From 9b61d34c4c797cb4f36b5d76f775650552ffa3df Mon Sep 17 00:00:00 2001 From: Wang Yan Date: Sun, 12 Mar 2023 15:28:37 +0800 Subject: [PATCH] add referrers api (#18324) add referrers api The define of referrers api in the upstream oci spec: https://github.com/opencontainers/distribution-spec/blob/v1.1.0-rc1/spec.md#listing-referrers Signed-off-by: Wang Yan --- src/go.mod | 43 +- src/go.sum | 83 +- src/lib/patterns.go | 2 + src/lib/patterns_test.go | 35 + .../middleware/artifactinfo/artifact_info.go | 1 + src/server/middleware/metric/metric.go | 2 + src/server/registry/referrers.go | 173 + src/server/registry/referrers_test.go | 100 + src/server/registry/route.go | 5 + .../github.com/Azure/go-ansiterm/LICENSE | 21 - .../github.com/Azure/go-ansiterm/README.md | 12 - .../github.com/Azure/go-ansiterm/constants.go | 188 - .../github.com/Azure/go-ansiterm/context.go | 7 - .../Azure/go-ansiterm/csi_entry_state.go | 49 - .../Azure/go-ansiterm/csi_param_state.go | 38 - .../go-ansiterm/escape_intermediate_state.go | 36 - .../Azure/go-ansiterm/escape_state.go | 47 - .../Azure/go-ansiterm/event_handler.go | 90 - .../Azure/go-ansiterm/ground_state.go | 24 - .../Azure/go-ansiterm/osc_string_state.go | 31 - .../github.com/Azure/go-ansiterm/parser.go | 151 - .../go-ansiterm/parser_action_helpers.go | 99 - .../Azure/go-ansiterm/parser_actions.go | 119 - .../github.com/Azure/go-ansiterm/states.go | 71 - .../github.com/Azure/go-ansiterm/utilities.go | 21 - .../Azure/go-ansiterm/winterm/ansi.go | 196 - .../Azure/go-ansiterm/winterm/api.go | 327 - .../go-ansiterm/winterm/attr_translation.go | 100 - .../go-ansiterm/winterm/cursor_helpers.go | 101 - .../go-ansiterm/winterm/erase_helpers.go | 84 - .../go-ansiterm/winterm/scroll_helper.go | 118 - .../Azure/go-ansiterm/winterm/utilities.go | 9 - .../go-ansiterm/winterm/win_event_handler.go | 743 - .../beego/beego/v2/client/cache/README.md | 54 - .../beego/beego/v2/client/cache/cache.go | 107 - .../beego/beego/v2/client/cache/calc_utils.go | 95 - .../beego/beego/v2/client/cache/conv.go | 100 - .../beego/beego/v2/client/cache/error_code.go | 176 - .../beego/beego/v2/client/cache/file.go | 338 - .../beego/beego/v2/client/cache/memory.go | 235 - .../beego/beego/v2/client/cache/module.go | 17 - .../v2/client/cache/random_expired_cache.go | 75 - .../beego/v2/client/cache/redis/redis.go | 290 - .../beego/beego/v2/core/berror/codes.go | 86 - .../beego/beego/v2/core/berror/error.go | 69 - .../beego/v2/core/berror/pre_define_code.go | 52 - .../github.com/containerd/containerd/LICENSE | 191 - .../github.com/containerd/containerd/NOTICE | 16 - .../archive/compression/compression.go | 323 - .../containerd/containerd/content/adaptor.go | 52 - .../containerd/containerd/content/content.go | 182 - .../containerd/containerd/content/helpers.go | 290 - .../containerd/content/local/locks.go | 62 - .../containerd/content/local/readerat.go | 67 - .../containerd/content/local/store.go | 704 - .../containerd/content/local/store_bsd.go | 34 - .../containerd/content/local/store_openbsd.go | 34 - .../containerd/content/local/store_unix.go | 34 - .../containerd/content/local/store_windows.go | 26 - .../containerd/content/local/writer.go | 208 - .../containerd/containerd/errdefs/errors.go | 92 - .../containerd/containerd/errdefs/grpc.go | 147 - .../containerd/containerd/filters/adaptor.go | 33 - .../containerd/containerd/filters/filter.go | 179 - .../containerd/containerd/filters/parser.go | 291 - .../containerd/containerd/filters/quote.go | 252 - .../containerd/containerd/filters/scanner.go | 297 - .../containerd/images/annotations.go | 23 - .../containerd/containerd/images/diffid.go | 81 - .../containerd/containerd/images/handlers.go | 322 - .../containerd/containerd/images/image.go | 440 - .../containerd/images/importexport.go | 37 - .../containerd/images/mediatypes.go | 204 - .../containerd/containerd/labels/labels.go | 21 - .../containerd/containerd/labels/validate.go | 38 - .../containerd/containerd/log/context.go | 69 - .../containerd/platforms/compare.go | 203 - .../containerd/platforms/cpuinfo.go | 131 - .../containerd/platforms/database.go | 116 - .../containerd/platforms/defaults.go | 27 - .../containerd/platforms/defaults_darwin.go | 45 - .../containerd/platforms/defaults_unix.go | 41 - .../containerd/platforms/defaults_windows.go | 91 - .../containerd/platforms/platforms.go | 261 - .../containerd/reference/reference.go | 166 - .../containerd/remotes/docker/auth/fetch.go | 226 - .../containerd/remotes/docker/auth/parse.go | 203 - .../containerd/remotes/docker/authorizer.go | 361 - .../containerd/remotes/docker/converter.go | 87 - .../containerd/remotes/docker/errcode.go | 283 - .../containerd/remotes/docker/errdesc.go | 154 - .../containerd/remotes/docker/fetcher.go | 215 - .../containerd/remotes/docker/handler.go | 154 - .../remotes/docker/httpreadseeker.go | 168 - .../containerd/remotes/docker/pusher.go | 451 - .../containerd/remotes/docker/registry.go | 244 - .../containerd/remotes/docker/resolver.go | 669 - .../remotes/docker/schema1/converter.go | 604 - .../containerd/remotes/docker/scope.go | 101 - .../containerd/remotes/docker/status.go | 90 - .../containerd/remotes/errors/errors.go | 55 - .../containerd/containerd/remotes/handlers.go | 368 - .../containerd/containerd/remotes/resolver.go | 82 - .../containerd/containerd/version/version.go | 34 - .../cyphar/filepath-securejoin/.travis.yml | 21 - .../cyphar/filepath-securejoin/LICENSE | 28 - .../cyphar/filepath-securejoin/README.md | 79 - .../cyphar/filepath-securejoin/VERSION | 1 - .../cyphar/filepath-securejoin/join.go | 115 - .../cyphar/filepath-securejoin/vfs.go | 41 - src/vendor/github.com/docker/cli/AUTHORS | 771 - src/vendor/github.com/docker/cli/LICENSE | 191 - src/vendor/github.com/docker/cli/NOTICE | 19 - .../docker/cli/cli/config/config.go | 167 - .../docker/cli/cli/config/configfile/file.go | 415 - .../cli/cli/config/configfile/file_unix.go | 36 - .../cli/cli/config/configfile/file_windows.go | 5 - .../cli/cli/config/credentials/credentials.go | 17 - .../cli/config/credentials/default_store.go | 21 - .../credentials/default_store_darwin.go | 5 - .../config/credentials/default_store_linux.go | 13 - .../credentials/default_store_unsupported.go | 8 - .../credentials/default_store_windows.go | 5 - .../cli/cli/config/credentials/file_store.go | 81 - .../cli/config/credentials/native_store.go | 143 - .../docker/cli/cli/config/types/authconfig.go | 22 - .../registry/api/v2/descriptors.go | 1596 - .../distribution/registry/api/v2/doc.go | 9 - .../distribution/registry/api/v2/errors.go | 136 - .../registry/api/v2/headerparser.go | 161 - .../distribution/registry/api/v2/routes.go | 40 - .../distribution/registry/api/v2/urls.go | 254 - .../registry/client/auth/api_version.go | 58 - .../registry/client/auth/session.go | 530 - .../registry/client/blob_writer.go | 162 - .../distribution/registry/client/errors.go | 139 - .../registry/client/repository.go | 872 - .../registry/storage/cache/memory/memory.go | 179 - .../docker/docker-credential-helpers/LICENSE | 20 - .../client/client.go | 121 - .../client/command.go | 57 - .../credentials/credentials.go | 186 - .../credentials/error.go | 102 - .../credentials/helper.go | 14 - .../credentials/version.go | 4 - src/vendor/github.com/docker/docker/AUTHORS | 2175 - src/vendor/github.com/docker/docker/LICENSE | 191 - src/vendor/github.com/docker/docker/NOTICE | 19 - .../docker/docker/api/types/auth.go | 22 - .../docker/docker/api/types/blkiodev/blkio.go | 23 - .../docker/docker/api/types/client.go | 419 - .../docker/docker/api/types/configs.go | 66 - .../docker/api/types/container/config.go | 69 - .../api/types/container/container_changes.go | 20 - .../api/types/container/container_create.go | 20 - .../api/types/container/container_top.go | 22 - .../api/types/container/container_update.go | 16 - .../api/types/container/container_wait.go | 28 - .../docker/api/types/container/host_config.go | 447 - .../api/types/container/hostconfig_unix.go | 42 - .../api/types/container/hostconfig_windows.go | 40 - .../api/types/container/waitcondition.go | 22 - .../docker/docker/api/types/error_response.go | 13 - .../docker/api/types/error_response_ext.go | 6 - .../docker/docker/api/types/filters/parse.go | 324 - .../docker/api/types/graph_driver_data.go | 17 - .../docker/docker/api/types/id_response.go | 13 - .../api/types/image_delete_response_item.go | 15 - .../docker/docker/api/types/image_summary.go | 49 - .../docker/docker/api/types/mount/mount.go | 131 - .../docker/api/types/network/network.go | 126 - .../docker/docker/api/types/plugin.go | 203 - .../docker/docker/api/types/plugin_device.go | 25 - .../docker/docker/api/types/plugin_env.go | 25 - .../docker/api/types/plugin_interface_type.go | 21 - .../docker/docker/api/types/plugin_mount.go | 37 - .../docker/api/types/plugin_responses.go | 71 - .../docker/docker/api/types/port.go | 23 - .../docker/api/types/registry/authenticate.go | 21 - .../docker/api/types/registry/registry.go | 119 - .../api/types/service_update_response.go | 12 - .../docker/docker/api/types/stats.go | 181 - .../docker/api/types/strslice/strslice.go | 30 - .../docker/docker/api/types/swarm/common.go | 40 - .../docker/docker/api/types/swarm/config.go | 40 - .../docker/api/types/swarm/container.go | 80 - .../docker/docker/api/types/swarm/network.go | 121 - .../docker/docker/api/types/swarm/node.go | 115 - .../docker/docker/api/types/swarm/runtime.go | 27 - .../docker/api/types/swarm/runtime/gen.go | 3 - .../api/types/swarm/runtime/plugin.pb.go | 754 - .../api/types/swarm/runtime/plugin.proto | 21 - .../docker/docker/api/types/swarm/secret.go | 36 - .../docker/docker/api/types/swarm/service.go | 202 - .../docker/docker/api/types/swarm/swarm.go | 227 - .../docker/docker/api/types/swarm/task.go | 206 - .../docker/docker/api/types/types.go | 635 - .../docker/api/types/versions/README.md | 14 - .../docker/api/types/versions/compare.go | 62 - .../docker/docker/api/types/volume.go | 72 - .../github.com/docker/docker/errdefs/defs.go | 69 - .../github.com/docker/docker/errdefs/doc.go | 8 - .../docker/docker/errdefs/helpers.go | 279 - .../docker/docker/errdefs/http_helpers.go | 53 - .../github.com/docker/docker/errdefs/is.go | 107 - .../docker/pkg/homedir/homedir_linux.go | 93 - .../docker/pkg/homedir/homedir_others.go | 28 - .../docker/docker/pkg/homedir/homedir_unix.go | 39 - .../docker/pkg/homedir/homedir_windows.go | 24 - .../docker/docker/pkg/ioutils/buffer.go | 51 - .../docker/docker/pkg/ioutils/bytespipe.go | 187 - .../docker/docker/pkg/ioutils/fswriters.go | 162 - .../docker/docker/pkg/ioutils/readers.go | 157 - .../docker/docker/pkg/ioutils/temp_unix.go | 11 - .../docker/docker/pkg/ioutils/temp_windows.go | 16 - .../docker/docker/pkg/ioutils/writeflusher.go | 92 - .../docker/docker/pkg/ioutils/writers.go | 66 - .../docker/pkg/jsonmessage/jsonmessage.go | 283 - .../docker/docker/pkg/longpath/longpath.go | 26 - .../docker/docker/pkg/stringid/README.md | 1 - .../docker/docker/pkg/stringid/stringid.go | 63 - .../github.com/docker/docker/registry/auth.go | 247 - .../docker/docker/registry/config.go | 433 - .../docker/docker/registry/config_unix.go | 32 - .../docker/docker/registry/config_windows.go | 20 - .../docker/docker/registry/endpoint_v1.go | 195 - .../docker/docker/registry/errors.go | 23 - .../docker/docker/registry/registry.go | 199 - .../docker/docker/registry/service.go | 297 - .../docker/docker/registry/service_v2.go | 79 - .../docker/docker/registry/session.go | 227 - .../docker/docker/registry/types.go | 70 - .../docker/docker/rootless/rootless.go | 25 - .../github.com/docker/go-connections/LICENSE | 191 - .../docker/go-connections/nat/nat.go | 242 - .../docker/go-connections/nat/parse.go | 57 - .../docker/go-connections/nat/sort.go | 96 - .../go-connections/tlsconfig/certpool_go17.go | 18 - .../tlsconfig/certpool_other.go | 13 - .../docker/go-connections/tlsconfig/config.go | 254 - .../tlsconfig/config_client_ciphers.go | 17 - .../tlsconfig/config_legacy_client_ciphers.go | 15 - .../docker/go-units/CONTRIBUTING.md | 67 - src/vendor/github.com/docker/go-units/LICENSE | 191 - .../github.com/docker/go-units/MAINTAINERS | 46 - .../github.com/docker/go-units/README.md | 16 - .../github.com/docker/go-units/circle.yml | 11 - .../github.com/docker/go-units/duration.go | 35 - src/vendor/github.com/docker/go-units/size.go | 108 - .../github.com/docker/go-units/ulimit.go | 123 - .../emicklei/go-restful/v3/.gitignore | 71 - .../emicklei/go-restful/v3/.goconvey | 1 - .../emicklei/go-restful/v3/.travis.yml | 13 - .../emicklei/go-restful/v3/CHANGES.md | 372 - .../github.com/emicklei/go-restful/v3/LICENSE | 22 - .../emicklei/go-restful/v3/Makefile | 8 - .../emicklei/go-restful/v3/README.md | 110 - .../emicklei/go-restful/v3/SECURITY.md | 13 - .../github.com/emicklei/go-restful/v3/Srcfile | 1 - .../emicklei/go-restful/v3/bench_test.sh | 10 - .../emicklei/go-restful/v3/compress.go | 127 - .../go-restful/v3/compressor_cache.go | 103 - .../go-restful/v3/compressor_pools.go | 91 - .../emicklei/go-restful/v3/compressors.go | 54 - .../emicklei/go-restful/v3/constants.go | 30 - .../emicklei/go-restful/v3/container.go | 450 - .../emicklei/go-restful/v3/cors_filter.go | 193 - .../emicklei/go-restful/v3/coverage.sh | 2 - .../emicklei/go-restful/v3/curly.go | 173 - .../emicklei/go-restful/v3/curly_route.go | 54 - .../emicklei/go-restful/v3/custom_verb.go | 29 - .../github.com/emicklei/go-restful/v3/doc.go | 185 - .../go-restful/v3/entity_accessors.go | 162 - .../emicklei/go-restful/v3/extensions.go | 21 - .../emicklei/go-restful/v3/filter.go | 37 - .../github.com/emicklei/go-restful/v3/json.go | 11 - .../emicklei/go-restful/v3/jsoniter.go | 12 - .../emicklei/go-restful/v3/jsr311.go | 326 - .../emicklei/go-restful/v3/log/log.go | 34 - .../emicklei/go-restful/v3/logger.go | 32 - .../github.com/emicklei/go-restful/v3/mime.go | 50 - .../emicklei/go-restful/v3/options_filter.go | 34 - .../emicklei/go-restful/v3/parameter.go | 234 - .../emicklei/go-restful/v3/path_expression.go | 74 - .../emicklei/go-restful/v3/path_processor.go | 74 - .../emicklei/go-restful/v3/request.go | 132 - .../emicklei/go-restful/v3/response.go | 256 - .../emicklei/go-restful/v3/route.go | 178 - .../emicklei/go-restful/v3/route_builder.go | 376 - .../emicklei/go-restful/v3/route_reader.go | 66 - .../emicklei/go-restful/v3/router.go | 20 - .../emicklei/go-restful/v3/service_error.go | 32 - .../emicklei/go-restful/v3/web_service.go | 293 - .../go-restful/v3/web_service_container.go | 39 - .../github.com/evanphx/json-patch/.gitignore | 6 - .../github.com/evanphx/json-patch/LICENSE | 25 - .../github.com/evanphx/json-patch/README.md | 317 - .../github.com/evanphx/json-patch/errors.go | 38 - .../github.com/evanphx/json-patch/merge.go | 389 - .../github.com/evanphx/json-patch/patch.go | 809 - .../github.com/go-errors/errors/.travis.yml | 5 - .../github.com/go-errors/errors/LICENSE.MIT | 7 - .../github.com/go-errors/errors/README.md | 66 - .../github.com/go-errors/errors/cover.out | 89 - .../github.com/go-errors/errors/error.go | 217 - .../go-errors/errors/parse_panic.go | 127 - .../github.com/go-errors/errors/stackframe.go | 102 - .../github.com/google/btree/.travis.yml | 1 - src/vendor/github.com/google/btree/LICENSE | 202 - src/vendor/github.com/google/btree/README.md | 12 - src/vendor/github.com/google/btree/btree.go | 890 - src/vendor/github.com/google/gnostic/LICENSE | 203 - .../google/gnostic/compiler/README.md | 4 - .../google/gnostic/compiler/context.go | 49 - .../google/gnostic/compiler/error.go | 70 - .../google/gnostic/compiler/extensions.go | 86 - .../google/gnostic/compiler/helpers.go | 397 - .../google/gnostic/compiler/main.go | 16 - .../google/gnostic/compiler/reader.go | 307 - .../google/gnostic/extensions/README.md | 13 - .../google/gnostic/extensions/extension.pb.go | 461 - .../google/gnostic/extensions/extension.proto | 97 - .../google/gnostic/extensions/extensions.go | 64 - .../google/gnostic/jsonschema/README.md | 4 - .../google/gnostic/jsonschema/base.go | 84 - .../google/gnostic/jsonschema/display.go | 229 - .../google/gnostic/jsonschema/models.go | 228 - .../google/gnostic/jsonschema/operations.go | 394 - .../google/gnostic/jsonschema/reader.go | 442 - .../google/gnostic/jsonschema/schema.json | 150 - .../google/gnostic/jsonschema/writer.go | 369 - .../google/gnostic/openapiv2/OpenAPIv2.go | 8820 --- .../google/gnostic/openapiv2/OpenAPIv2.pb.go | 7342 --- .../google/gnostic/openapiv2/OpenAPIv2.proto | 666 - .../google/gnostic/openapiv2/README.md | 14 - .../google/gnostic/openapiv2/document.go | 42 - .../google/gnostic/openapiv2/openapi-2.0.json | 1610 - .../google/gnostic/openapiv3/OpenAPIv3.go | 8633 --- .../google/gnostic/openapiv3/OpenAPIv3.pb.go | 8053 --- .../google/gnostic/openapiv3/OpenAPIv3.proto | 672 - .../google/gnostic/openapiv3/README.md | 21 - .../google/gnostic/openapiv3/document.go | 42 - .../google/gnostic/openapiv3/openapi-3.0.json | 1251 - .../google/gnostic/openapiv3/openapi-3.1.json | 1250 - src/vendor/github.com/google/shlex/COPYING | 202 - src/vendor/github.com/google/shlex/README | 2 - src/vendor/github.com/google/shlex/shlex.go | 416 - .../gregjones/httpcache/.travis.yml | 19 - .../gregjones/httpcache/LICENSE.txt | 7 - .../github.com/gregjones/httpcache/README.md | 25 - .../gregjones/httpcache/httpcache.go | 551 - .../github.com/imdario/mergo/.deepsource.toml | 12 - .../github.com/imdario/mergo/.gitignore | 33 - .../github.com/imdario/mergo/.travis.yml | 12 - .../imdario/mergo/CODE_OF_CONDUCT.md | 46 - src/vendor/github.com/imdario/mergo/LICENSE | 28 - src/vendor/github.com/imdario/mergo/README.md | 247 - src/vendor/github.com/imdario/mergo/doc.go | 143 - src/vendor/github.com/imdario/mergo/map.go | 178 - src/vendor/github.com/imdario/mergo/merge.go | 380 - src/vendor/github.com/imdario/mergo/mergo.go | 78 - .../inconshreveable/mousetrap/LICENSE | 13 - .../inconshreveable/mousetrap/README.md | 23 - .../inconshreveable/mousetrap/trap_others.go | 15 - .../inconshreveable/mousetrap/trap_windows.go | 98 - .../mousetrap/trap_windows_1.4.go | 46 - .../klauspost/compress/.gitattributes | 2 - .../github.com/klauspost/compress/.gitignore | 25 - .../klauspost/compress/.goreleaser.yml | 137 - .../github.com/klauspost/compress/LICENSE | 304 - .../github.com/klauspost/compress/README.md | 438 - .../klauspost/compress/compressible.go | 85 - .../klauspost/compress/fse/README.md | 79 - .../klauspost/compress/fse/bitreader.go | 122 - .../klauspost/compress/fse/bitwriter.go | 168 - .../klauspost/compress/fse/bytereader.go | 47 - .../klauspost/compress/fse/compress.go | 683 - .../klauspost/compress/fse/decompress.go | 374 - .../github.com/klauspost/compress/fse/fse.go | 144 - .../github.com/klauspost/compress/gen.sh | 4 - .../klauspost/compress/huff0/.gitignore | 1 - .../klauspost/compress/huff0/README.md | 89 - .../klauspost/compress/huff0/bitreader.go | 329 - .../klauspost/compress/huff0/bitwriter.go | 210 - .../klauspost/compress/huff0/bytereader.go | 54 - .../klauspost/compress/huff0/compress.go | 720 - .../klauspost/compress/huff0/decompress.go | 1371 - .../klauspost/compress/huff0/huff0.go | 335 - .../compress/internal/snapref/LICENSE | 27 - .../compress/internal/snapref/decode.go | 264 - .../compress/internal/snapref/decode_other.go | 113 - .../compress/internal/snapref/encode.go | 289 - .../compress/internal/snapref/encode_other.go | 236 - .../compress/internal/snapref/snappy.go | 98 - .../github.com/klauspost/compress/s2sx.mod | 4 - .../github.com/klauspost/compress/s2sx.sum | 0 .../klauspost/compress/zstd/README.md | 441 - .../klauspost/compress/zstd/bitreader.go | 136 - .../klauspost/compress/zstd/bitwriter.go | 169 - .../klauspost/compress/zstd/blockdec.go | 736 - .../klauspost/compress/zstd/blockenc.go | 871 - .../compress/zstd/blocktype_string.go | 85 - .../klauspost/compress/zstd/bytebuf.go | 130 - .../klauspost/compress/zstd/bytereader.go | 88 - .../klauspost/compress/zstd/decodeheader.go | 202 - .../klauspost/compress/zstd/decoder.go | 555 - .../compress/zstd/decoder_options.go | 102 - .../klauspost/compress/zstd/dict.go | 122 - .../klauspost/compress/zstd/enc_base.go | 178 - .../klauspost/compress/zstd/enc_best.go | 558 - .../klauspost/compress/zstd/enc_better.go | 1237 - .../klauspost/compress/zstd/enc_dfast.go | 1124 - .../klauspost/compress/zstd/enc_fast.go | 1019 - .../klauspost/compress/zstd/encoder.go | 599 - .../compress/zstd/encoder_options.go | 312 - .../klauspost/compress/zstd/framedec.go | 521 - .../klauspost/compress/zstd/frameenc.go | 137 - .../klauspost/compress/zstd/fse_decoder.go | 385 - .../klauspost/compress/zstd/fse_encoder.go | 725 - .../klauspost/compress/zstd/fse_predefined.go | 158 - .../klauspost/compress/zstd/hash.go | 41 - .../klauspost/compress/zstd/history.go | 89 - .../compress/zstd/internal/xxhash/LICENSE.txt | 22 - .../compress/zstd/internal/xxhash/README.md | 58 - .../compress/zstd/internal/xxhash/xxhash.go | 237 - .../zstd/internal/xxhash/xxhash_amd64.go | 12 - .../zstd/internal/xxhash/xxhash_amd64.s | 215 - .../zstd/internal/xxhash/xxhash_other.go | 77 - .../zstd/internal/xxhash/xxhash_safe.go | 11 - .../klauspost/compress/zstd/seqdec.go | 492 - .../klauspost/compress/zstd/seqenc.go | 114 - .../klauspost/compress/zstd/snappy.go | 435 - .../github.com/klauspost/compress/zstd/zip.go | 122 - .../klauspost/compress/zstd/zstd.go | 152 - .../github.com/liggitt/tabwriter/.travis.yml | 11 - .../github.com/liggitt/tabwriter/LICENSE | 27 - .../github.com/liggitt/tabwriter/README.md | 7 - .../github.com/liggitt/tabwriter/tabwriter.go | 637 - .../mitchellh/copystructure/LICENSE | 21 - .../mitchellh/copystructure/README.md | 21 - .../mitchellh/copystructure/copier_time.go | 15 - .../mitchellh/copystructure/copystructure.go | 631 - .../mitchellh/reflectwalk/.travis.yml | 1 - .../github.com/mitchellh/reflectwalk/LICENSE | 21 - .../mitchellh/reflectwalk/README.md | 6 - .../mitchellh/reflectwalk/location.go | 19 - .../mitchellh/reflectwalk/location_string.go | 16 - .../mitchellh/reflectwalk/reflectwalk.go | 420 - src/vendor/github.com/moby/locker/LICENSE | 190 - src/vendor/github.com/moby/locker/README.md | 65 - src/vendor/github.com/moby/locker/locker.go | 112 - src/vendor/github.com/moby/term/.gitignore | 8 - src/vendor/github.com/moby/term/LICENSE | 191 - src/vendor/github.com/moby/term/README.md | 36 - src/vendor/github.com/moby/term/ascii.go | 66 - src/vendor/github.com/moby/term/proxy.go | 88 - src/vendor/github.com/moby/term/tc.go | 19 - src/vendor/github.com/moby/term/term.go | 120 - .../github.com/moby/term/term_windows.go | 231 - src/vendor/github.com/moby/term/termios.go | 35 - .../github.com/moby/term/termios_bsd.go | 12 - .../github.com/moby/term/termios_nonbsd.go | 12 - .../moby/term/windows/ansi_reader.go | 252 - .../moby/term/windows/ansi_writer.go | 56 - .../github.com/moby/term/windows/console.go | 39 - .../github.com/moby/term/windows/doc.go | 5 - src/vendor/github.com/moby/term/winsize.go | 20 - .../monochromegane/go-gitignore/.travis.yml | 6 - .../monochromegane/go-gitignore/LICENSE | 21 - .../monochromegane/go-gitignore/README.md | 95 - .../go-gitignore/depth_holder.go | 79 - .../go-gitignore/full_scan_patterns.go | 31 - .../monochromegane/go-gitignore/gitignore.go | 80 - .../go-gitignore/index_scan_patterns.go | 35 - .../go-gitignore/initial_holder.go | 62 - .../monochromegane/go-gitignore/match.go | 24 - .../monochromegane/go-gitignore/pattern.go | 69 - .../monochromegane/go-gitignore/patterns.go | 22 - .../monochromegane/go-gitignore/util.go | 45 - src/vendor/github.com/morikuni/aec/LICENSE | 21 - src/vendor/github.com/morikuni/aec/README.md | 178 - src/vendor/github.com/morikuni/aec/aec.go | 137 - src/vendor/github.com/morikuni/aec/ansi.go | 59 - src/vendor/github.com/morikuni/aec/builder.go | 388 - src/vendor/github.com/morikuni/aec/sample.gif | Bin 12548 -> 0 bytes src/vendor/github.com/morikuni/aec/sgr.go | 202 - .../github.com/munnerz/goautoneg/LICENSE | 31 - .../github.com/munnerz/goautoneg/Makefile | 13 - .../github.com/munnerz/goautoneg/README.txt | 67 - .../github.com/munnerz/goautoneg/autoneg.go | 189 - .../image-spec/specs-go/v1/annotations.go | 9 + .../image-spec/specs-go/v1/artifact.go | 34 + .../image-spec/specs-go/v1/descriptor.go | 10 +- .../image-spec/specs-go/v1/index.go | 2 +- .../image-spec/specs-go/v1/manifest.go | 7 +- .../image-spec/specs-go/v1/mediatype.go | 3 + .../image-spec/specs-go/version.go | 6 +- .../github.com/peterbourgon/diskv/LICENSE | 19 - .../github.com/peterbourgon/diskv/README.md | 141 - .../peterbourgon/diskv/compression.go | 64 - .../github.com/peterbourgon/diskv/diskv.go | 624 - .../github.com/peterbourgon/diskv/index.go | 115 - src/vendor/github.com/spf13/cobra/.gitignore | 39 - .../github.com/spf13/cobra/.golangci.yml | 48 - src/vendor/github.com/spf13/cobra/.mailmap | 3 - src/vendor/github.com/spf13/cobra/CONDUCT.md | 37 - .../github.com/spf13/cobra/CONTRIBUTING.md | 50 - src/vendor/github.com/spf13/cobra/LICENSE.txt | 174 - src/vendor/github.com/spf13/cobra/MAINTAINERS | 13 - src/vendor/github.com/spf13/cobra/Makefile | 35 - src/vendor/github.com/spf13/cobra/README.md | 111 - .../github.com/spf13/cobra/active_help.go | 49 - .../github.com/spf13/cobra/active_help.md | 157 - src/vendor/github.com/spf13/cobra/args.go | 121 - .../spf13/cobra/bash_completions.go | 698 - .../spf13/cobra/bash_completions.md | 93 - .../spf13/cobra/bash_completionsV2.go | 369 - src/vendor/github.com/spf13/cobra/cobra.go | 222 - src/vendor/github.com/spf13/cobra/command.go | 1696 - .../github.com/spf13/cobra/command_notwin.go | 6 - .../github.com/spf13/cobra/command_win.go | 27 - .../github.com/spf13/cobra/completions.go | 832 - .../spf13/cobra/fish_completions.go | 220 - .../spf13/cobra/fish_completions.md | 4 - .../github.com/spf13/cobra/flag_groups.go | 223 - .../spf13/cobra/powershell_completions.go | 288 - .../spf13/cobra/powershell_completions.md | 3 - .../spf13/cobra/projects_using_cobra.md | 54 - .../spf13/cobra/shell_completions.go | 84 - .../spf13/cobra/shell_completions.md | 548 - .../github.com/spf13/cobra/user_guide.md | 666 - .../github.com/spf13/cobra/zsh_completions.go | 287 - .../github.com/spf13/cobra/zsh_completions.md | 48 - .../gojsonpointer/LICENSE-APACHE-2.0.txt | 202 - .../xeipuuv/gojsonpointer/README.md | 41 - .../xeipuuv/gojsonpointer/pointer.go | 211 - .../gojsonreference/LICENSE-APACHE-2.0.txt | 202 - .../xeipuuv/gojsonreference/README.md | 10 - .../xeipuuv/gojsonreference/reference.go | 147 - .../xeipuuv/gojsonschema/.gitignore | 3 - .../xeipuuv/gojsonschema/.travis.yml | 9 - .../gojsonschema/LICENSE-APACHE-2.0.txt | 202 - .../github.com/xeipuuv/gojsonschema/README.md | 466 - .../github.com/xeipuuv/gojsonschema/draft.go | 125 - .../github.com/xeipuuv/gojsonschema/errors.go | 364 - .../xeipuuv/gojsonschema/format_checkers.go | 368 - .../xeipuuv/gojsonschema/glide.yaml | 13 - .../xeipuuv/gojsonschema/internalLog.go | 37 - .../xeipuuv/gojsonschema/jsonContext.go | 73 - .../xeipuuv/gojsonschema/jsonLoader.go | 386 - .../xeipuuv/gojsonschema/locales.go | 472 - .../github.com/xeipuuv/gojsonschema/result.go | 220 - .../github.com/xeipuuv/gojsonschema/schema.go | 1087 - .../xeipuuv/gojsonschema/schemaLoader.go | 206 - .../xeipuuv/gojsonschema/schemaPool.go | 215 - .../gojsonschema/schemaReferencePool.go | 68 - .../xeipuuv/gojsonschema/schemaType.go | 83 - .../xeipuuv/gojsonschema/subSchema.go | 149 - .../github.com/xeipuuv/gojsonschema/types.go | 62 - .../github.com/xeipuuv/gojsonschema/utils.go | 197 - .../xeipuuv/gojsonschema/validation.go | 858 - src/vendor/github.com/xlab/treeprint/LICENSE | 20 - .../github.com/xlab/treeprint/README.md | 154 - .../github.com/xlab/treeprint/helpers.go | 47 - .../github.com/xlab/treeprint/struct.go | 322 - .../github.com/xlab/treeprint/treeprint.go | 294 - src/vendor/go.starlark.net/LICENSE | 29 - .../internal/compile/compile.go | 1903 - .../internal/compile/serial.go | 389 - .../go.starlark.net/internal/spell/spell.go | 115 - src/vendor/go.starlark.net/resolve/binding.go | 74 - src/vendor/go.starlark.net/resolve/resolve.go | 978 - src/vendor/go.starlark.net/starlark/debug.go | 42 - src/vendor/go.starlark.net/starlark/empty.s | 3 - src/vendor/go.starlark.net/starlark/eval.go | 1497 - .../go.starlark.net/starlark/hashtable.go | 373 - src/vendor/go.starlark.net/starlark/int.go | 350 - src/vendor/go.starlark.net/starlark/interp.go | 637 - .../go.starlark.net/starlark/library.go | 2104 - .../go.starlark.net/starlark/profile.go | 449 - src/vendor/go.starlark.net/starlark/unpack.go | 258 - src/vendor/go.starlark.net/starlark/value.go | 1293 - .../go.starlark.net/starlarkstruct/module.go | 43 - .../go.starlark.net/starlarkstruct/struct.go | 281 - src/vendor/go.starlark.net/syntax/grammar.txt | 129 - src/vendor/go.starlark.net/syntax/parse.go | 1029 - src/vendor/go.starlark.net/syntax/quote.go | 269 - src/vendor/go.starlark.net/syntax/scan.go | 1089 - src/vendor/go.starlark.net/syntax/syntax.go | 529 - src/vendor/go.starlark.net/syntax/walk.go | 163 - src/vendor/golang.org/x/crypto/cast5/cast5.go | 533 - .../x/crypto/openpgp/armor/armor.go | 232 - .../x/crypto/openpgp/armor/encode.go | 161 - .../x/crypto/openpgp/canonical_text.go | 59 - .../x/crypto/openpgp/clearsign/clearsign.go | 424 - .../x/crypto/openpgp/elgamal/elgamal.go | 130 - .../x/crypto/openpgp/errors/errors.go | 78 - .../golang.org/x/crypto/openpgp/keys.go | 693 - .../x/crypto/openpgp/packet/compressed.go | 123 - .../x/crypto/openpgp/packet/config.go | 91 - .../x/crypto/openpgp/packet/encrypted_key.go | 208 - .../x/crypto/openpgp/packet/literal.go | 89 - .../x/crypto/openpgp/packet/ocfb.go | 143 - .../openpgp/packet/one_pass_signature.go | 73 - .../x/crypto/openpgp/packet/opaque.go | 161 - .../x/crypto/openpgp/packet/packet.go | 590 - .../x/crypto/openpgp/packet/private_key.go | 384 - .../x/crypto/openpgp/packet/public_key.go | 753 - .../x/crypto/openpgp/packet/public_key_v3.go | 279 - .../x/crypto/openpgp/packet/reader.go | 76 - .../x/crypto/openpgp/packet/signature.go | 731 - .../x/crypto/openpgp/packet/signature_v3.go | 146 - .../openpgp/packet/symmetric_key_encrypted.go | 155 - .../openpgp/packet/symmetrically_encrypted.go | 290 - .../x/crypto/openpgp/packet/userattribute.go | 90 - .../x/crypto/openpgp/packet/userid.go | 159 - .../golang.org/x/crypto/openpgp/read.go | 448 - .../golang.org/x/crypto/openpgp/s2k/s2k.go | 279 - .../golang.org/x/crypto/openpgp/write.go | 418 - src/vendor/golang.org/x/sync/LICENSE | 27 - src/vendor/golang.org/x/sync/PATENTS | 22 - .../golang.org/x/sync/errgroup/errgroup.go | 132 - .../golang.org/x/sync/semaphore/semaphore.go | 136 - .../golang.org/x/sys/execabs/execabs.go | 102 - .../golang.org/x/sys/execabs/execabs_go118.go | 12 - .../golang.org/x/sys/execabs/execabs_go119.go | 15 - .../golang.org/x/text/encoding/encoding.go | 335 - .../internal/identifier/identifier.go | 81 - .../text/encoding/internal/identifier/mib.go | 1627 - .../x/text/encoding/internal/internal.go | 75 - .../x/text/encoding/unicode/override.go | 82 - .../x/text/encoding/unicode/unicode.go | 512 - .../internal/utf8internal/utf8internal.go | 87 - .../helm.sh/helm/v3/cmd/helm/search/search.go | 227 - .../helm/v3/internal/fileutil/fileutil.go | 51 - .../helm/v3/internal/third_party/dep/fs/fs.go | 372 - .../v3/internal/third_party/dep/fs/rename.go | 58 - .../third_party/dep/fs/rename_windows.go | 69 - .../helm.sh/helm/v3/internal/tlsutil/cfg.go | 58 - .../helm.sh/helm/v3/internal/tlsutil/tls.go | 76 - .../helm/v3/internal/urlutil/urlutil.go | 73 - .../helm/v3/internal/version/version.go | 81 - .../helm/v3/pkg/chartutil/capabilities.go | 126 - .../helm/v3/pkg/chartutil/chartfile.go | 93 - .../helm.sh/helm/v3/pkg/chartutil/coalesce.go | 227 - .../helm/v3/pkg/chartutil/compatible.go | 34 - .../helm.sh/helm/v3/pkg/chartutil/create.go | 687 - .../helm/v3/pkg/chartutil/dependencies.go | 285 - .../helm.sh/helm/v3/pkg/chartutil/doc.go | 44 - .../helm.sh/helm/v3/pkg/chartutil/errors.go | 35 - .../helm.sh/helm/v3/pkg/chartutil/expand.go | 91 - .../helm/v3/pkg/chartutil/jsonschema.go | 93 - .../helm.sh/helm/v3/pkg/chartutil/save.go | 244 - .../helm/v3/pkg/chartutil/validate_name.go | 112 - .../helm.sh/helm/v3/pkg/chartutil/values.go | 212 - .../helm.sh/helm/v3/pkg/cli/environment.go | 230 - src/vendor/helm.sh/helm/v3/pkg/getter/doc.go | 21 - .../helm.sh/helm/v3/pkg/getter/getter.go | 193 - .../helm.sh/helm/v3/pkg/getter/httpgetter.go | 158 - .../helm.sh/helm/v3/pkg/getter/ocigetter.go | 84 - .../helm/v3/pkg/getter/plugingetter.go | 102 - .../helm.sh/helm/v3/pkg/helmpath/home.go | 44 - .../helm.sh/helm/v3/pkg/helmpath/lazypath.go | 72 - .../helm/v3/pkg/helmpath/lazypath_darwin.go | 34 - .../helm/v3/pkg/helmpath/lazypath_unix.go | 45 - .../helm/v3/pkg/helmpath/lazypath_windows.go | 24 - .../helm.sh/helm/v3/pkg/helmpath/xdg/xdg.go | 34 - .../helm.sh/helm/v3/pkg/plugin/hooks.go | 29 - .../helm.sh/helm/v3/pkg/plugin/plugin.go | 282 - .../helm.sh/helm/v3/pkg/provenance/doc.go | 37 - .../helm.sh/helm/v3/pkg/provenance/sign.go | 424 - .../helm.sh/helm/v3/pkg/registry/client.go | 643 - .../helm.sh/helm/v3/pkg/registry/constants.go | 37 - .../helm.sh/helm/v3/pkg/registry/util.go | 131 - .../helm.sh/helm/v3/pkg/repo/chartrepo.go | 313 - src/vendor/helm.sh/helm/v3/pkg/repo/doc.go | 93 - src/vendor/helm.sh/helm/v3/pkg/repo/index.go | 364 - src/vendor/helm.sh/helm/v3/pkg/repo/repo.go | 126 - .../k8s.io/apiextensions-apiserver/LICENSE | 202 - .../pkg/apis/apiextensions/deepcopy.go | 300 - .../pkg/apis/apiextensions/doc.go | 21 - .../pkg/apis/apiextensions/helpers.go | 257 - .../pkg/apis/apiextensions/register.go | 51 - .../pkg/apis/apiextensions/types.go | 422 - .../apis/apiextensions/types_jsonschema.go | 238 - .../apiextensions/v1/.import-restrictions | 5 - .../pkg/apis/apiextensions/v1/conversion.go | 215 - .../pkg/apis/apiextensions/v1/deepcopy.go | 260 - .../pkg/apis/apiextensions/v1/defaults.go | 61 - .../pkg/apis/apiextensions/v1/doc.go | 25 - .../pkg/apis/apiextensions/v1/generated.pb.go | 9257 ---- .../pkg/apis/apiextensions/v1/generated.proto | 729 - .../pkg/apis/apiextensions/v1/marshal.go | 136 - .../pkg/apis/apiextensions/v1/register.go | 62 - .../pkg/apis/apiextensions/v1/types.go | 485 - .../apis/apiextensions/v1/types_jsonschema.go | 331 - .../v1/zz_generated.conversion.go | 1318 - .../apiextensions/v1/zz_generated.deepcopy.go | 705 - .../apiextensions/v1/zz_generated.defaults.go | 58 - .../v1beta1/.import-restrictions | 5 - .../apis/apiextensions/v1beta1/conversion.go | 70 - .../apis/apiextensions/v1beta1/deepcopy.go | 276 - .../apis/apiextensions/v1beta1/defaults.go | 82 - .../pkg/apis/apiextensions/v1beta1/doc.go | 26 - .../apiextensions/v1beta1/generated.pb.go | 9297 ---- .../apiextensions/v1beta1/generated.proto | 766 - .../pkg/apis/apiextensions/v1beta1/marshal.go | 136 - .../apis/apiextensions/v1beta1/register.go | 62 - .../pkg/apis/apiextensions/v1beta1/types.go | 531 - .../apiextensions/v1beta1/types_jsonschema.go | 331 - .../v1beta1/zz_generated.conversion.go | 1366 - .../v1beta1/zz_generated.deepcopy.go | 704 - .../v1beta1/zz_generated.defaults.go | 56 - .../zz_generated.prerelease-lifecycle.go | 98 - .../apiextensions/zz_generated.deepcopy.go | 596 - .../apimachinery/pkg/api/equality/semantic.go | 49 - .../k8s.io/apimachinery/pkg/api/meta/OWNERS | 14 - .../apimachinery/pkg/api/meta/conditions.go | 102 - .../k8s.io/apimachinery/pkg/api/meta/doc.go | 19 - .../apimachinery/pkg/api/meta/errors.go | 121 - .../pkg/api/meta/firsthit_restmapper.go | 105 - .../k8s.io/apimachinery/pkg/api/meta/help.go | 265 - .../apimachinery/pkg/api/meta/interfaces.go | 143 - .../k8s.io/apimachinery/pkg/api/meta/lazy.go | 112 - .../k8s.io/apimachinery/pkg/api/meta/meta.go | 643 - .../pkg/api/meta/multirestmapper.go | 220 - .../apimachinery/pkg/api/meta/priority.go | 230 - .../apimachinery/pkg/api/meta/restmapper.go | 529 - .../unstructured/unstructuredscheme/scheme.go | 129 - .../pkg/util/duration/duration.go | 93 - src/vendor/k8s.io/cli-runtime/LICENSE | 202 - .../pkg/genericclioptions/builder_flags.go | 231 - .../genericclioptions/builder_flags_fake.go | 54 - .../pkg/genericclioptions/client_config.go | 72 - .../pkg/genericclioptions/command_headers.go | 94 - .../pkg/genericclioptions/config_flags.go | 466 - .../genericclioptions/config_flags_fake.go | 127 - .../cli-runtime/pkg/genericclioptions/doc.go | 19 - .../pkg/genericclioptions/filename_flags.go | 82 - .../pkg/genericclioptions/io_options.go | 57 - .../pkg/genericclioptions/json_yaml_flags.go | 79 - .../pkg/genericclioptions/jsonpath_flags.go | 137 - .../genericclioptions/kube_template_flags.go | 94 - .../pkg/genericclioptions/name_flags.go | 83 - .../pkg/genericclioptions/print_flags.go | 171 - .../pkg/genericclioptions/record_flags.go | 201 - .../pkg/genericclioptions/template_flags.go | 136 - .../cli-runtime/pkg/printers/discard.go | 30 - .../k8s.io/cli-runtime/pkg/printers/doc.go | 19 - .../cli-runtime/pkg/printers/interface.go | 54 - .../k8s.io/cli-runtime/pkg/printers/json.go | 79 - .../cli-runtime/pkg/printers/jsonpath.go | 147 - .../cli-runtime/pkg/printers/managedfields.go | 59 - .../k8s.io/cli-runtime/pkg/printers/name.go | 130 - .../cli-runtime/pkg/printers/sourcechecker.go | 60 - .../cli-runtime/pkg/printers/tableprinter.go | 588 - .../cli-runtime/pkg/printers/tabwriter.go | 36 - .../cli-runtime/pkg/printers/template.go | 118 - .../cli-runtime/pkg/printers/typesetter.go | 95 - .../pkg/printers/warningprinter.go | 55 - .../k8s.io/cli-runtime/pkg/printers/yaml.go | 85 - .../cli-runtime/pkg/resource/builder.go | 1247 - .../k8s.io/cli-runtime/pkg/resource/client.go | 69 - .../cli-runtime/pkg/resource/crd_finder.go | 110 - .../k8s.io/cli-runtime/pkg/resource/doc.go | 24 - .../k8s.io/cli-runtime/pkg/resource/fake.go | 40 - .../k8s.io/cli-runtime/pkg/resource/helper.go | 321 - .../cli-runtime/pkg/resource/interfaces.go | 103 - .../pkg/resource/kustomizevisitor.go | 54 - .../k8s.io/cli-runtime/pkg/resource/mapper.go | 166 - .../pkg/resource/metadata_decoder.go | 56 - .../pkg/resource/query_param_verifier.go | 166 - .../k8s.io/cli-runtime/pkg/resource/result.go | 242 - .../k8s.io/cli-runtime/pkg/resource/scheme.go | 82 - .../cli-runtime/pkg/resource/selector.go | 92 - .../cli-runtime/pkg/resource/visitor.go | 742 - .../discovery/cached/disk/cached_discovery.go | 312 - .../discovery/cached/disk/round_tripper.go | 120 - .../client-go/discovery/discovery_client.go | 529 - src/vendor/k8s.io/client-go/discovery/doc.go | 19 - .../k8s.io/client-go/discovery/helper.go | 146 - .../k8s.io/client-go/dynamic/interface.go | 63 - src/vendor/k8s.io/client-go/dynamic/scheme.go | 108 - src/vendor/k8s.io/client-go/dynamic/simple.go | 388 - .../k8s.io/client-go/openapi/cached/client.go | 54 - .../client-go/openapi/cached/groupversion.go | 45 - src/vendor/k8s.io/client-go/openapi/client.go | 64 - .../k8s.io/client-go/openapi/groupversion.go | 59 - .../restmapper/category_expansion.go | 119 - .../k8s.io/client-go/restmapper/discovery.go | 338 - .../k8s.io/client-go/restmapper/shortcut.go | 187 - .../third_party/forked/golang/LICENSE | 27 - .../third_party/forked/golang/PATENTS | 22 - .../forked/golang/template/exec.go | 52 - .../forked/golang/template/funcs.go | 177 - src/vendor/k8s.io/client-go/tools/auth/OWNERS | 8 - .../k8s.io/client-go/tools/auth/clientauth.go | 126 - .../tools/clientcmd/api/latest/latest.go | 61 - .../tools/clientcmd/api/v1/conversion.go | 174 - .../tools/clientcmd/api/v1/defaults.go | 37 - .../client-go/tools/clientcmd/api/v1/doc.go | 21 - .../tools/clientcmd/api/v1/register.go | 56 - .../client-go/tools/clientcmd/api/v1/types.go | 266 - .../api/v1/zz_generated.conversion.go | 456 - .../clientcmd/api/v1/zz_generated.deepcopy.go | 349 - .../clientcmd/api/v1/zz_generated.defaults.go | 43 - .../client-go/tools/clientcmd/auth_loaders.go | 111 - .../tools/clientcmd/client_config.go | 637 - .../client-go/tools/clientcmd/config.go | 500 - .../k8s.io/client-go/tools/clientcmd/doc.go | 37 - .../k8s.io/client-go/tools/clientcmd/flag.go | 49 - .../client-go/tools/clientcmd/helpers.go | 50 - .../client-go/tools/clientcmd/loader.go | 648 - .../tools/clientcmd/merged_client_builder.go | 172 - .../client-go/tools/clientcmd/overrides.go | 259 - .../client-go/tools/clientcmd/validation.go | 371 - .../k8s.io/client-go/util/homedir/homedir.go | 92 - .../k8s.io/client-go/util/jsonpath/doc.go | 20 - .../client-go/util/jsonpath/jsonpath.go | 579 - .../k8s.io/client-go/util/jsonpath/node.go | 256 - .../k8s.io/client-go/util/jsonpath/parser.go | 527 - src/vendor/k8s.io/kube-openapi/LICENSE | 202 - .../kube-openapi/pkg/builder3/util/util.go | 51 - .../k8s.io/kube-openapi/pkg/common/common.go | 320 - .../k8s.io/kube-openapi/pkg/common/doc.go | 19 - .../kube-openapi/pkg/common/interfaces.go | 88 - .../kube-openapi/pkg/handler3/handler.go | 291 - .../pkg/internal/handler/handler_cache.go | 57 - .../kube-openapi/pkg/openapiconv/convert.go | 322 - .../kube-openapi/pkg/schemamutation/walker.go | 519 - .../kube-openapi/pkg/spec3/component.go | 47 - .../k8s.io/kube-openapi/pkg/spec3/encoding.go | 64 - .../k8s.io/kube-openapi/pkg/spec3/example.go | 73 - .../pkg/spec3/external_documentation.go | 58 - .../k8s.io/kube-openapi/pkg/spec3/header.go | 90 - .../kube-openapi/pkg/spec3/media_type.go | 66 - .../kube-openapi/pkg/spec3/operation.go | 79 - .../kube-openapi/pkg/spec3/parameter.go | 94 - .../k8s.io/kube-openapi/pkg/spec3/path.go | 142 - .../kube-openapi/pkg/spec3/request_body.go | 73 - .../k8s.io/kube-openapi/pkg/spec3/response.go | 203 - .../pkg/spec3/security_requirement.go | 56 - .../kube-openapi/pkg/spec3/security_scheme.go | 118 - .../k8s.io/kube-openapi/pkg/spec3/server.go | 98 - .../k8s.io/kube-openapi/pkg/spec3/spec.go | 37 - .../pkg/validation/spec/.gitignore | 2 - .../kube-openapi/pkg/validation/spec/LICENSE | 202 - .../pkg/validation/spec/contact_info.go | 24 - .../pkg/validation/spec/external_docs.go | 24 - .../pkg/validation/spec/gnostic.go | 1515 - .../pkg/validation/spec/header.go | 75 - .../kube-openapi/pkg/validation/spec/info.go | 174 - .../kube-openapi/pkg/validation/spec/items.go | 109 - .../pkg/validation/spec/license.go | 23 - .../pkg/validation/spec/operation.go | 96 - .../pkg/validation/spec/parameter.go | 111 - .../pkg/validation/spec/path_item.go | 74 - .../kube-openapi/pkg/validation/spec/paths.go | 85 - .../kube-openapi/pkg/validation/spec/ref.go | 167 - .../pkg/validation/spec/response.go | 78 - .../pkg/validation/spec/responses.go | 110 - .../pkg/validation/spec/schema.go | 513 - .../pkg/validation/spec/security_scheme.go | 64 - .../pkg/validation/spec/swagger.go | 286 - .../kube-openapi/pkg/validation/spec/tag.go | 59 - src/vendor/k8s.io/utils/pointer/OWNERS | 10 - src/vendor/k8s.io/utils/pointer/README.md | 3 - src/vendor/k8s.io/utils/pointer/pointer.go | 300 - src/vendor/modules.txt | 335 +- src/vendor/oras.land/oras-go/LICENSE | 201 - .../oras.land/oras-go/pkg/artifact/consts.go | 22 - .../oras.land/oras-go/pkg/auth/client.go | 45 - .../oras.land/oras-go/pkg/auth/client_opts.go | 123 - .../oras-go/pkg/auth/docker/client.go | 123 - .../oras-go/pkg/auth/docker/login.go | 103 - .../oras-go/pkg/auth/docker/login_tls.go | 220 - .../oras-go/pkg/auth/docker/logout.go | 42 - .../oras-go/pkg/auth/docker/resolver.go | 86 - .../oras.land/oras-go/pkg/content/consts.go | 57 - .../oras-go/pkg/content/decompress.go | 151 - .../oras.land/oras-go/pkg/content/errors.go | 33 - .../oras.land/oras-go/pkg/content/file.go | 534 - .../oras.land/oras-go/pkg/content/gunzip.go | 72 - .../oras-go/pkg/content/interface.go | 26 - .../oras.land/oras-go/pkg/content/iowriter.go | 112 - .../oras.land/oras-go/pkg/content/manifest.go | 95 - .../oras.land/oras-go/pkg/content/memory.go | 284 - .../oras-go/pkg/content/multireader.go | 56 - .../oras-go/pkg/content/multiwriter.go | 42 - .../oras.land/oras-go/pkg/content/oci.go | 336 - .../oras.land/oras-go/pkg/content/opts.go | 112 - .../oras-go/pkg/content/passthrough.go | 286 - .../oras.land/oras-go/pkg/content/readerat.go | 68 - .../oras.land/oras-go/pkg/content/registry.go | 84 - .../oras.land/oras-go/pkg/content/untar.go | 157 - .../oras.land/oras-go/pkg/content/utils.go | 223 - .../oras.land/oras-go/pkg/context/context.go | 24 - .../oras.land/oras-go/pkg/context/logger.go | 50 - src/vendor/oras.land/oras-go/pkg/oras/copy.go | 213 - .../oras.land/oras-go/pkg/oras/errors.go | 42 - src/vendor/oras.land/oras-go/pkg/oras/opts.go | 254 - .../oras.land/oras-go/pkg/oras/provider.go | 79 - .../oras.land/oras-go/pkg/oras/store.go | 213 - .../oras-go/pkg/registry/reference.go | 177 - .../oras-go/pkg/registry/remote/auth/cache.go | 158 - .../pkg/registry/remote/auth/challenge.go | 166 - .../pkg/registry/remote/auth/client.go | 367 - .../pkg/registry/remote/auth/credential.go | 39 - .../oras-go/pkg/registry/remote/auth/scope.go | 231 - .../remote/internal/errutil/errors.go | 83 - .../registry/remote/internal/syncutil/once.go | 69 - .../oras-go/pkg/registry/remote/repository.go | 171 - .../oras-go/pkg/registry/remote/url.go | 42 - .../oras-go/pkg/registry/remote/utils.go | 72 - .../oras-go/pkg/registry/repository.go | 57 - .../oras.land/oras-go/pkg/target/target.go | 26 - src/vendor/sigs.k8s.io/kustomize/api/LICENSE | 201 - .../api/filters/annotations/annotations.go | 52 - .../kustomize/api/filters/annotations/doc.go | 6 - .../kustomize/api/filters/fieldspec/doc.go | 6 - .../api/filters/fieldspec/fieldspec.go | 182 - .../api/filters/filtersutil/setters.go | 105 - .../kustomize/api/filters/fsslice/doc.go | 6 - .../kustomize/api/filters/fsslice/fsslice.go | 47 - .../api/filters/iampolicygenerator/doc.go | 6 - .../iampolicygenerator/iampolicygenerator.go | 55 - .../kustomize/api/filters/imagetag/doc.go | 12 - .../api/filters/imagetag/imagetag.go | 72 - .../kustomize/api/filters/imagetag/legacy.go | 104 - .../kustomize/api/filters/imagetag/updater.go | 71 - .../kustomize/api/filters/labels/doc.go | 6 - .../kustomize/api/filters/labels/labels.go | 53 - .../kustomize/api/filters/nameref/doc.go | 6 - .../kustomize/api/filters/nameref/nameref.go | 413 - .../api/filters/nameref/seqfilter.go | 60 - .../kustomize/api/filters/namespace/doc.go | 9 - .../api/filters/namespace/namespace.go | 209 - .../api/filters/patchjson6902/doc.go | 6 - .../filters/patchjson6902/patchjson6902.go | 65 - .../api/filters/patchstrategicmerge/doc.go | 6 - .../patchstrategicmerge.go | 36 - .../kustomize/api/filters/prefix/doc.go | 6 - .../kustomize/api/filters/prefix/prefix.go | 50 - .../kustomize/api/filters/refvar/doc.go | 6 - .../kustomize/api/filters/refvar/expand.go | 147 - .../kustomize/api/filters/refvar/refvar.go | 113 - .../kustomize/api/filters/replacement/doc.go | 7 - .../api/filters/replacement/replacement.go | 258 - .../kustomize/api/filters/replicacount/doc.go | 6 - .../api/filters/replicacount/replicacount.go | 48 - .../kustomize/api/filters/suffix/doc.go | 6 - .../kustomize/api/filters/suffix/suffix.go | 50 - .../api/filters/valueadd/valueadd.go | 134 - .../kustomize/api/hasher/hasher.go | 155 - .../sigs.k8s.io/kustomize/api/ifc/ifc.go | 48 - .../sigs.k8s.io/kustomize/api/image/image.go | 66 - .../accumulator/loadconfigfromcrds.go | 198 - .../accumulator/namereferencetransformer.go | 164 - .../internal/accumulator/refvartransformer.go | 57 - .../internal/accumulator/resaccumulator.go | 189 - .../builtins/AnnotationsTransformer.go | 38 - .../internal/builtins/ConfigMapGenerator.go | 39 - .../api/internal/builtins/HashTransformer.go | 40 - .../builtins/HelmChartInflationGenerator.go | 339 - .../internal/builtins/IAMPolicyGenerator.go | 33 - .../internal/builtins/ImageTagTransformer.go | 41 - .../api/internal/builtins/LabelTransformer.go | 38 - .../builtins/LegacyOrderTransformer.go | 46 - .../internal/builtins/NamespaceTransformer.go | 74 - .../builtins/PatchJson6902Transformer.go | 105 - .../PatchStrategicMergeTransformer.go | 89 - .../api/internal/builtins/PatchTransformer.go | 153 - .../internal/builtins/PrefixTransformer.go | 96 - .../builtins/ReplacementTransformer.go | 78 - .../builtins/ReplicaCountTransformer.go | 73 - .../api/internal/builtins/SecretGenerator.go | 39 - .../internal/builtins/SuffixTransformer.go | 96 - .../internal/builtins/ValueAddTransformer.go | 141 - .../kustomize/api/internal/builtins/doc.go | 8 - .../api/internal/generators/configmap.go | 52 - .../api/internal/generators/secret.go | 59 - .../api/internal/generators/utils.go | 97 - .../kustomize/api/internal/git/cloner.go | 54 - .../kustomize/api/internal/git/gitrunner.go | 55 - .../kustomize/api/internal/git/repospec.go | 267 - .../api/internal/kusterr/yamlformaterror.go | 55 - .../api/internal/plugins/builtinconfig/doc.go | 10 - .../builtinconfig/loaddefaultconfig.go | 42 - .../builtinconfig/namebackreferences.go | 99 - .../builtinconfig/transformerconfig.go | 148 - .../builtinplugintype_string.go | 42 - .../plugins/builtinhelpers/builtins.go | 114 - .../internal/plugins/execplugin/execplugin.go | 192 - .../api/internal/plugins/fnplugin/fnplugin.go | 199 - .../api/internal/plugins/loader/loader.go | 317 - .../api/internal/plugins/utils/utils.go | 240 - .../target/errmissingkustomization.go | 48 - .../api/internal/target/kusttarget.go | 565 - .../target/kusttarget_configplugin.go | 446 - .../api/internal/target/multitransformer.go | 41 - .../api/internal/utils/annotations.go | 29 - .../api/internal/utils/errtimeout.go | 36 - .../api/internal/utils/makeResIds.go | 67 - .../api/internal/utils/stringslice.go | 44 - .../kustomize/api/internal/utils/timedcall.go | 23 - .../api/internal/validate/fieldvalidator.go | 68 - .../builtinpluginconsts/commonannotations.go | 47 - .../builtinpluginconsts/commonlabels.go | 159 - .../builtinpluginconsts/defaultconfig.go | 40 - .../api/konfig/builtinpluginconsts/doc.go | 8 - .../api/konfig/builtinpluginconsts/images.go | 18 - .../konfig/builtinpluginconsts/nameprefix.go | 11 - .../builtinpluginconsts/namereference.go | 427 - .../konfig/builtinpluginconsts/namespace.go | 20 - .../konfig/builtinpluginconsts/namesuffix.go | 11 - .../konfig/builtinpluginconsts/replicas.go | 23 - .../builtinpluginconsts/varreference.go | 223 - .../sigs.k8s.io/kustomize/api/konfig/doc.go | 7 - .../kustomize/api/konfig/general.go | 49 - .../kustomize/api/konfig/plugins.go | 138 - .../sigs.k8s.io/kustomize/api/krusty/doc.go | 11 - .../kustomize/api/krusty/kustomizer.go | 121 - .../kustomize/api/krusty/options.go | 58 - src/vendor/sigs.k8s.io/kustomize/api/kv/kv.go | 229 - .../kustomize/api/loader/errors.go | 11 - .../kustomize/api/loader/fileloader.go | 324 - .../kustomize/api/loader/loader.go | 35 - .../kustomize/api/loader/loadrestrictions.go | 35 - .../kustomize/api/provenance/provenance.go | 68 - .../kustomize/api/provider/depprovider.go | 42 - .../kustomize/api/resmap/factory.go | 145 - .../kustomize/api/resmap/idslice.go | 37 - .../kustomize/api/resmap/resmap.go | 333 - .../kustomize/api/resmap/reswrangler.go | 764 - .../sigs.k8s.io/kustomize/api/resource/doc.go | 5 - .../kustomize/api/resource/factory.go | 293 - .../kustomize/api/resource/idset.go | 30 - .../kustomize/api/resource/origin.go | 106 - .../kustomize/api/resource/resource.go | 527 - .../builtinpluginloadingoptions_string.go | 25 - .../kustomize/api/types/configmapargs.go | 10 - .../sigs.k8s.io/kustomize/api/types/doc.go | 9 - .../api/types/erronlybuiltinpluginsallowed.go | 33 - .../kustomize/api/types/errunabletofind.go | 40 - .../kustomize/api/types/fieldspec.go | 91 - .../sigs.k8s.io/kustomize/api/types/fix.go | 54 - .../kustomize/api/types/generationbehavior.go | 46 - .../kustomize/api/types/generatorargs.go | 27 - .../kustomize/api/types/generatoroptions.go | 76 - .../kustomize/api/types/helmchartargs.go | 122 - .../kustomize/api/types/iampolicygenerator.go | 36 - .../sigs.k8s.io/kustomize/api/types/image.go | 25 - .../kustomize/api/types/inventory.go | 16 - .../kustomize/api/types/kustomization.go | 274 - .../kustomize/api/types/kvpairsources.go | 36 - .../sigs.k8s.io/kustomize/api/types/labels.go | 30 - .../kustomize/api/types/loadrestrictions.go | 24 - .../api/types/loadrestrictions_string.go | 25 - .../kustomize/api/types/objectmeta.go | 13 - .../sigs.k8s.io/kustomize/api/types/pair.go | 10 - .../sigs.k8s.io/kustomize/api/types/patch.go | 34 - .../api/types/patchstrategicmerge.go | 9 - .../kustomize/api/types/pluginconfig.go | 47 - .../kustomize/api/types/pluginrestrictions.go | 62 - .../api/types/pluginrestrictions_string.go | 25 - .../kustomize/api/types/replacement.go | 87 - .../kustomize/api/types/replacementfield.go | 9 - .../kustomize/api/types/replica.go | 16 - .../kustomize/api/types/secretargs.go | 19 - .../kustomize/api/types/selector.go | 124 - .../kustomize/api/types/typemeta.go | 11 - .../sigs.k8s.io/kustomize/api/types/var.go | 211 - .../sigs.k8s.io/kustomize/kyaml/LICENSE | 201 - .../kustomize/kyaml/comments/comments.go | 83 - .../kustomize/kyaml/errors/errors.go | 40 - .../sigs.k8s.io/kustomize/kyaml/ext/ext.go | 10 - .../kustomize/kyaml/fieldmeta/fieldmeta.go | 275 - .../kustomize/kyaml/filesys/confirmeddir.go | 79 - .../kustomize/kyaml/filesys/doc.go | 7 - .../kustomize/kyaml/filesys/file.go | 15 - .../kustomize/kyaml/filesys/fileinfo.go | 34 - .../kustomize/kyaml/filesys/fileondisk.go | 27 - .../kustomize/kyaml/filesys/filesystem.go | 153 - .../kustomize/kyaml/filesys/fsnode.go | 647 - .../kustomize/kyaml/filesys/fsondisk.go | 139 - .../kustomize/kyaml/filesys/fsondisk_unix.go | 15 - .../kyaml/filesys/fsondisk_windows.go | 18 - .../kustomize/kyaml/filesys/util.go | 143 - .../kyaml/fn/runtime/container/container.go | 208 - .../kustomize/kyaml/fn/runtime/exec/doc.go | 5 - .../kustomize/kyaml/fn/runtime/exec/exec.go | 54 - .../kyaml/fn/runtime/runtimeutil/doc.go | 5 - .../fn/runtime/runtimeutil/functiontypes.go | 300 - .../fn/runtime/runtimeutil/runtimeutil.go | 281 - .../kyaml/fn/runtime/runtimeutil/types.go | 8 - .../kyaml/fn/runtime/starlark/context.go | 79 - .../kyaml/fn/runtime/starlark/doc.go | 36 - .../kyaml/fn/runtime/starlark/starlark.go | 180 - .../forked/github.com/go-yaml/yaml/LICENSE | 50 - .../forked/github.com/go-yaml/yaml/NOTICE | 13 - .../forked/github.com/go-yaml/yaml/README.md | 150 - .../forked/github.com/go-yaml/yaml/apic.go | 747 - .../forked/github.com/go-yaml/yaml/decode.go | 950 - .../github.com/go-yaml/yaml/emitterc.go | 2028 - .../forked/github.com/go-yaml/yaml/encode.go | 577 - .../forked/github.com/go-yaml/yaml/parserc.go | 1249 - .../forked/github.com/go-yaml/yaml/readerc.go | 434 - .../forked/github.com/go-yaml/yaml/resolve.go | 326 - .../github.com/go-yaml/yaml/scannerc.go | 3038 -- .../forked/github.com/go-yaml/yaml/sorter.go | 134 - .../forked/github.com/go-yaml/yaml/writerc.go | 48 - .../forked/github.com/go-yaml/yaml/yaml.go | 708 - .../forked/github.com/go-yaml/yaml/yamlh.go | 809 - .../github.com/go-yaml/yaml/yamlprivateh.go | 198 - .../github.com/qri-io/starlib/util/LICENSE | 21 - .../github.com/qri-io/starlib/util/doc.go | 25 - .../github.com/qri-io/starlib/util/util.go | 273 - .../kustomize/kyaml/kio/byteio_reader.go | 349 - .../kustomize/kyaml/kio/byteio_writer.go | 198 - .../sigs.k8s.io/kustomize/kyaml/kio/doc.go | 35 - .../kustomize/kyaml/kio/filters/filters.go | 210 - .../kustomize/kyaml/kio/filters/fmtr.go | 314 - .../kustomize/kyaml/kio/filters/grep.go | 117 - .../kustomize/kyaml/kio/filters/local.go | 38 - .../kustomize/kyaml/kio/filters/merge.go | 86 - .../kustomize/kyaml/kio/filters/merge3.go | 317 - .../kustomize/kyaml/kio/filters/modify.go | 4 - .../kyaml/kio/filters/stripcomments.go | 32 - .../kustomize/kyaml/kio/ignorefilesmatcher.go | 105 - .../sigs.k8s.io/kustomize/kyaml/kio/kio.go | 447 - .../kustomize/kyaml/kio/kioutil/kioutil.go | 420 - .../kustomize/kyaml/kio/pkgio_reader.go | 360 - .../kustomize/kyaml/kio/pkgio_writer.go | 150 - .../sigs.k8s.io/kustomize/kyaml/kio/tree.go | 519 - .../kustomize/kyaml/openapi/Makefile | 62 - .../kustomize/kyaml/openapi/README.md | 94 - .../openapi/kubernetesapi/openapiinfo.go | 18 - .../openapi/kubernetesapi/v1212/swagger.go | 249 - .../openapi/kubernetesapi/v1212/swagger.pb | 44195 ---------------- .../kyaml/openapi/kustomizationapi/swagger.go | 249 - .../openapi/kustomizationapi/swagger.json | 130 - .../kustomize/kyaml/openapi/openapi.go | 749 - .../kustomize/kyaml/order/syncorder.go | 121 - .../sigs.k8s.io/kustomize/kyaml/resid/gvk.go | 258 - .../kustomize/kyaml/resid/resid.go | 164 - .../kustomize/kyaml/runfn/runfn.go | 546 - .../kustomize/kyaml/sets/string.go | 64 - .../kustomize/kyaml/sets/stringlist.go | 44 - .../kustomize/kyaml/sliceutil/slice.go | 25 - .../kustomize/kyaml/utils/pathsplitter.go | 64 - .../sigs.k8s.io/kustomize/kyaml/yaml/alias.go | 99 - .../kustomize/kyaml/yaml/compatibility.go | 100 - .../sigs.k8s.io/kustomize/kyaml/yaml/const.go | 30 - .../kustomize/kyaml/yaml/datamap.go | 121 - .../sigs.k8s.io/kustomize/kyaml/yaml/doc.go | 49 - .../kustomize/kyaml/yaml/filters.go | 146 - .../sigs.k8s.io/kustomize/kyaml/yaml/fns.go | 867 - .../k8sgen/pkg/labels/copied.deepcopy.go | 44 - .../yaml/internal/k8sgen/pkg/labels/labels.go | 192 - .../internal/k8sgen/pkg/labels/selector.go | 925 - .../internal/k8sgen/pkg/selection/operator.go | 36 - .../internal/k8sgen/pkg/util/errors/errors.go | 252 - .../internal/k8sgen/pkg/util/sets/empty.go | 24 - .../internal/k8sgen/pkg/util/sets/string.go | 206 - .../pkg/util/validation/field/errors.go | 275 - .../k8sgen/pkg/util/validation/field/path.go | 94 - .../k8sgen/pkg/util/validation/validation.go | 506 - .../sigs.k8s.io/kustomize/kyaml/yaml/kfns.go | 137 - .../kustomize/kyaml/yaml/mapnode.go | 40 - .../sigs.k8s.io/kustomize/kyaml/yaml/match.go | 271 - .../kustomize/kyaml/yaml/merge2/merge2.go | 182 - .../kyaml/yaml/merge2/smpdirective.go | 101 - .../kyaml/yaml/merge2/smpdirective_string.go | 26 - .../kustomize/kyaml/yaml/merge3/merge3.go | 45 - .../kustomize/kyaml/yaml/merge3/visitor.go | 172 - .../sigs.k8s.io/kustomize/kyaml/yaml/order.go | 107 - .../sigs.k8s.io/kustomize/kyaml/yaml/rnode.go | 1294 - .../kustomize/kyaml/yaml/schema/schema.go | 44 - .../sigs.k8s.io/kustomize/kyaml/yaml/types.go | 240 - .../sigs.k8s.io/kustomize/kyaml/yaml/util.go | 70 - .../kyaml/yaml/walk/associative_sequence.go | 385 - .../kustomize/kyaml/yaml/walk/map.go | 173 - .../yaml/walk/nonassociative_sequence.go | 13 - .../kustomize/kyaml/yaml/walk/scalar.go | 11 - .../kustomize/kyaml/yaml/walk/visitor.go | 28 - .../kustomize/kyaml/yaml/walk/walk.go | 186 - 1186 files changed, 391 insertions(+), 309312 deletions(-) create mode 100644 src/server/registry/referrers.go create mode 100644 src/server/registry/referrers_test.go delete mode 100644 src/vendor/github.com/Azure/go-ansiterm/LICENSE delete mode 100644 src/vendor/github.com/Azure/go-ansiterm/README.md delete mode 100644 src/vendor/github.com/Azure/go-ansiterm/constants.go delete mode 100644 src/vendor/github.com/Azure/go-ansiterm/context.go delete mode 100644 src/vendor/github.com/Azure/go-ansiterm/csi_entry_state.go delete mode 100644 src/vendor/github.com/Azure/go-ansiterm/csi_param_state.go delete mode 100644 src/vendor/github.com/Azure/go-ansiterm/escape_intermediate_state.go delete mode 100644 src/vendor/github.com/Azure/go-ansiterm/escape_state.go delete mode 100644 src/vendor/github.com/Azure/go-ansiterm/event_handler.go delete mode 100644 src/vendor/github.com/Azure/go-ansiterm/ground_state.go delete mode 100644 src/vendor/github.com/Azure/go-ansiterm/osc_string_state.go delete mode 100644 src/vendor/github.com/Azure/go-ansiterm/parser.go delete mode 100644 src/vendor/github.com/Azure/go-ansiterm/parser_action_helpers.go delete mode 100644 src/vendor/github.com/Azure/go-ansiterm/parser_actions.go delete mode 100644 src/vendor/github.com/Azure/go-ansiterm/states.go delete mode 100644 src/vendor/github.com/Azure/go-ansiterm/utilities.go delete mode 100644 src/vendor/github.com/Azure/go-ansiterm/winterm/ansi.go delete mode 100644 src/vendor/github.com/Azure/go-ansiterm/winterm/api.go delete mode 100644 src/vendor/github.com/Azure/go-ansiterm/winterm/attr_translation.go delete mode 100644 src/vendor/github.com/Azure/go-ansiterm/winterm/cursor_helpers.go delete mode 100644 src/vendor/github.com/Azure/go-ansiterm/winterm/erase_helpers.go delete mode 100644 src/vendor/github.com/Azure/go-ansiterm/winterm/scroll_helper.go delete mode 100644 src/vendor/github.com/Azure/go-ansiterm/winterm/utilities.go delete mode 100644 src/vendor/github.com/Azure/go-ansiterm/winterm/win_event_handler.go delete mode 100644 src/vendor/github.com/beego/beego/v2/client/cache/README.md delete mode 100644 src/vendor/github.com/beego/beego/v2/client/cache/cache.go delete mode 100644 src/vendor/github.com/beego/beego/v2/client/cache/calc_utils.go delete mode 100644 src/vendor/github.com/beego/beego/v2/client/cache/conv.go delete mode 100644 src/vendor/github.com/beego/beego/v2/client/cache/error_code.go delete mode 100644 src/vendor/github.com/beego/beego/v2/client/cache/file.go delete mode 100644 src/vendor/github.com/beego/beego/v2/client/cache/memory.go delete mode 100644 src/vendor/github.com/beego/beego/v2/client/cache/module.go delete mode 100644 src/vendor/github.com/beego/beego/v2/client/cache/random_expired_cache.go delete mode 100644 src/vendor/github.com/beego/beego/v2/client/cache/redis/redis.go delete mode 100644 src/vendor/github.com/beego/beego/v2/core/berror/codes.go delete mode 100644 src/vendor/github.com/beego/beego/v2/core/berror/error.go delete mode 100644 src/vendor/github.com/beego/beego/v2/core/berror/pre_define_code.go delete mode 100644 src/vendor/github.com/containerd/containerd/LICENSE delete mode 100644 src/vendor/github.com/containerd/containerd/NOTICE delete mode 100644 src/vendor/github.com/containerd/containerd/archive/compression/compression.go delete mode 100644 src/vendor/github.com/containerd/containerd/content/adaptor.go delete mode 100644 src/vendor/github.com/containerd/containerd/content/content.go delete mode 100644 src/vendor/github.com/containerd/containerd/content/helpers.go delete mode 100644 src/vendor/github.com/containerd/containerd/content/local/locks.go delete mode 100644 src/vendor/github.com/containerd/containerd/content/local/readerat.go delete mode 100644 src/vendor/github.com/containerd/containerd/content/local/store.go delete mode 100644 src/vendor/github.com/containerd/containerd/content/local/store_bsd.go delete mode 100644 src/vendor/github.com/containerd/containerd/content/local/store_openbsd.go delete mode 100644 src/vendor/github.com/containerd/containerd/content/local/store_unix.go delete mode 100644 src/vendor/github.com/containerd/containerd/content/local/store_windows.go delete mode 100644 src/vendor/github.com/containerd/containerd/content/local/writer.go delete mode 100644 src/vendor/github.com/containerd/containerd/errdefs/errors.go delete mode 100644 src/vendor/github.com/containerd/containerd/errdefs/grpc.go delete mode 100644 src/vendor/github.com/containerd/containerd/filters/adaptor.go delete mode 100644 src/vendor/github.com/containerd/containerd/filters/filter.go delete mode 100644 src/vendor/github.com/containerd/containerd/filters/parser.go delete mode 100644 src/vendor/github.com/containerd/containerd/filters/quote.go delete mode 100644 src/vendor/github.com/containerd/containerd/filters/scanner.go delete mode 100644 src/vendor/github.com/containerd/containerd/images/annotations.go delete mode 100644 src/vendor/github.com/containerd/containerd/images/diffid.go delete mode 100644 src/vendor/github.com/containerd/containerd/images/handlers.go delete mode 100644 src/vendor/github.com/containerd/containerd/images/image.go delete mode 100644 src/vendor/github.com/containerd/containerd/images/importexport.go delete mode 100644 src/vendor/github.com/containerd/containerd/images/mediatypes.go delete mode 100644 src/vendor/github.com/containerd/containerd/labels/labels.go delete mode 100644 src/vendor/github.com/containerd/containerd/labels/validate.go delete mode 100644 src/vendor/github.com/containerd/containerd/log/context.go delete mode 100644 src/vendor/github.com/containerd/containerd/platforms/compare.go delete mode 100644 src/vendor/github.com/containerd/containerd/platforms/cpuinfo.go delete mode 100644 src/vendor/github.com/containerd/containerd/platforms/database.go delete mode 100644 src/vendor/github.com/containerd/containerd/platforms/defaults.go delete mode 100644 src/vendor/github.com/containerd/containerd/platforms/defaults_darwin.go delete mode 100644 src/vendor/github.com/containerd/containerd/platforms/defaults_unix.go delete mode 100644 src/vendor/github.com/containerd/containerd/platforms/defaults_windows.go delete mode 100644 src/vendor/github.com/containerd/containerd/platforms/platforms.go delete mode 100644 src/vendor/github.com/containerd/containerd/reference/reference.go delete mode 100644 src/vendor/github.com/containerd/containerd/remotes/docker/auth/fetch.go delete mode 100644 src/vendor/github.com/containerd/containerd/remotes/docker/auth/parse.go delete mode 100644 src/vendor/github.com/containerd/containerd/remotes/docker/authorizer.go delete mode 100644 src/vendor/github.com/containerd/containerd/remotes/docker/converter.go delete mode 100644 src/vendor/github.com/containerd/containerd/remotes/docker/errcode.go delete mode 100644 src/vendor/github.com/containerd/containerd/remotes/docker/errdesc.go delete mode 100644 src/vendor/github.com/containerd/containerd/remotes/docker/fetcher.go delete mode 100644 src/vendor/github.com/containerd/containerd/remotes/docker/handler.go delete mode 100644 src/vendor/github.com/containerd/containerd/remotes/docker/httpreadseeker.go delete mode 100644 src/vendor/github.com/containerd/containerd/remotes/docker/pusher.go delete mode 100644 src/vendor/github.com/containerd/containerd/remotes/docker/registry.go delete mode 100644 src/vendor/github.com/containerd/containerd/remotes/docker/resolver.go delete mode 100644 src/vendor/github.com/containerd/containerd/remotes/docker/schema1/converter.go delete mode 100644 src/vendor/github.com/containerd/containerd/remotes/docker/scope.go delete mode 100644 src/vendor/github.com/containerd/containerd/remotes/docker/status.go delete mode 100644 src/vendor/github.com/containerd/containerd/remotes/errors/errors.go delete mode 100644 src/vendor/github.com/containerd/containerd/remotes/handlers.go delete mode 100644 src/vendor/github.com/containerd/containerd/remotes/resolver.go delete mode 100644 src/vendor/github.com/containerd/containerd/version/version.go delete mode 100644 src/vendor/github.com/cyphar/filepath-securejoin/.travis.yml delete mode 100644 src/vendor/github.com/cyphar/filepath-securejoin/LICENSE delete mode 100644 src/vendor/github.com/cyphar/filepath-securejoin/README.md delete mode 100644 src/vendor/github.com/cyphar/filepath-securejoin/VERSION delete mode 100644 src/vendor/github.com/cyphar/filepath-securejoin/join.go delete mode 100644 src/vendor/github.com/cyphar/filepath-securejoin/vfs.go delete mode 100644 src/vendor/github.com/docker/cli/AUTHORS delete mode 100644 src/vendor/github.com/docker/cli/LICENSE delete mode 100644 src/vendor/github.com/docker/cli/NOTICE delete mode 100644 src/vendor/github.com/docker/cli/cli/config/config.go delete mode 100644 src/vendor/github.com/docker/cli/cli/config/configfile/file.go delete mode 100644 src/vendor/github.com/docker/cli/cli/config/configfile/file_unix.go delete mode 100644 src/vendor/github.com/docker/cli/cli/config/configfile/file_windows.go delete mode 100644 src/vendor/github.com/docker/cli/cli/config/credentials/credentials.go delete mode 100644 src/vendor/github.com/docker/cli/cli/config/credentials/default_store.go delete mode 100644 src/vendor/github.com/docker/cli/cli/config/credentials/default_store_darwin.go delete mode 100644 src/vendor/github.com/docker/cli/cli/config/credentials/default_store_linux.go delete mode 100644 src/vendor/github.com/docker/cli/cli/config/credentials/default_store_unsupported.go delete mode 100644 src/vendor/github.com/docker/cli/cli/config/credentials/default_store_windows.go delete mode 100644 src/vendor/github.com/docker/cli/cli/config/credentials/file_store.go delete mode 100644 src/vendor/github.com/docker/cli/cli/config/credentials/native_store.go delete mode 100644 src/vendor/github.com/docker/cli/cli/config/types/authconfig.go delete mode 100644 src/vendor/github.com/docker/distribution/registry/api/v2/descriptors.go delete mode 100644 src/vendor/github.com/docker/distribution/registry/api/v2/doc.go delete mode 100644 src/vendor/github.com/docker/distribution/registry/api/v2/errors.go delete mode 100644 src/vendor/github.com/docker/distribution/registry/api/v2/headerparser.go delete mode 100644 src/vendor/github.com/docker/distribution/registry/api/v2/routes.go delete mode 100644 src/vendor/github.com/docker/distribution/registry/api/v2/urls.go delete mode 100644 src/vendor/github.com/docker/distribution/registry/client/auth/api_version.go delete mode 100644 src/vendor/github.com/docker/distribution/registry/client/auth/session.go delete mode 100644 src/vendor/github.com/docker/distribution/registry/client/blob_writer.go delete mode 100644 src/vendor/github.com/docker/distribution/registry/client/errors.go delete mode 100644 src/vendor/github.com/docker/distribution/registry/client/repository.go delete mode 100644 src/vendor/github.com/docker/distribution/registry/storage/cache/memory/memory.go delete mode 100644 src/vendor/github.com/docker/docker-credential-helpers/LICENSE delete mode 100644 src/vendor/github.com/docker/docker-credential-helpers/client/client.go delete mode 100644 src/vendor/github.com/docker/docker-credential-helpers/client/command.go delete mode 100644 src/vendor/github.com/docker/docker-credential-helpers/credentials/credentials.go delete mode 100644 src/vendor/github.com/docker/docker-credential-helpers/credentials/error.go delete mode 100644 src/vendor/github.com/docker/docker-credential-helpers/credentials/helper.go delete mode 100644 src/vendor/github.com/docker/docker-credential-helpers/credentials/version.go delete mode 100644 src/vendor/github.com/docker/docker/AUTHORS delete mode 100644 src/vendor/github.com/docker/docker/LICENSE delete mode 100644 src/vendor/github.com/docker/docker/NOTICE delete mode 100644 src/vendor/github.com/docker/docker/api/types/auth.go delete mode 100644 src/vendor/github.com/docker/docker/api/types/blkiodev/blkio.go delete mode 100644 src/vendor/github.com/docker/docker/api/types/client.go delete mode 100644 src/vendor/github.com/docker/docker/api/types/configs.go delete mode 100644 src/vendor/github.com/docker/docker/api/types/container/config.go delete mode 100644 src/vendor/github.com/docker/docker/api/types/container/container_changes.go delete mode 100644 src/vendor/github.com/docker/docker/api/types/container/container_create.go delete mode 100644 src/vendor/github.com/docker/docker/api/types/container/container_top.go delete mode 100644 src/vendor/github.com/docker/docker/api/types/container/container_update.go delete mode 100644 src/vendor/github.com/docker/docker/api/types/container/container_wait.go delete mode 100644 src/vendor/github.com/docker/docker/api/types/container/host_config.go delete mode 100644 src/vendor/github.com/docker/docker/api/types/container/hostconfig_unix.go delete mode 100644 src/vendor/github.com/docker/docker/api/types/container/hostconfig_windows.go delete mode 100644 src/vendor/github.com/docker/docker/api/types/container/waitcondition.go delete mode 100644 src/vendor/github.com/docker/docker/api/types/error_response.go delete mode 100644 src/vendor/github.com/docker/docker/api/types/error_response_ext.go delete mode 100644 src/vendor/github.com/docker/docker/api/types/filters/parse.go delete mode 100644 src/vendor/github.com/docker/docker/api/types/graph_driver_data.go delete mode 100644 src/vendor/github.com/docker/docker/api/types/id_response.go delete mode 100644 src/vendor/github.com/docker/docker/api/types/image_delete_response_item.go delete mode 100644 src/vendor/github.com/docker/docker/api/types/image_summary.go delete mode 100644 src/vendor/github.com/docker/docker/api/types/mount/mount.go delete mode 100644 src/vendor/github.com/docker/docker/api/types/network/network.go delete mode 100644 src/vendor/github.com/docker/docker/api/types/plugin.go delete mode 100644 src/vendor/github.com/docker/docker/api/types/plugin_device.go delete mode 100644 src/vendor/github.com/docker/docker/api/types/plugin_env.go delete mode 100644 src/vendor/github.com/docker/docker/api/types/plugin_interface_type.go delete mode 100644 src/vendor/github.com/docker/docker/api/types/plugin_mount.go delete mode 100644 src/vendor/github.com/docker/docker/api/types/plugin_responses.go delete mode 100644 src/vendor/github.com/docker/docker/api/types/port.go delete mode 100644 src/vendor/github.com/docker/docker/api/types/registry/authenticate.go delete mode 100644 src/vendor/github.com/docker/docker/api/types/registry/registry.go delete mode 100644 src/vendor/github.com/docker/docker/api/types/service_update_response.go delete mode 100644 src/vendor/github.com/docker/docker/api/types/stats.go delete mode 100644 src/vendor/github.com/docker/docker/api/types/strslice/strslice.go delete mode 100644 src/vendor/github.com/docker/docker/api/types/swarm/common.go delete mode 100644 src/vendor/github.com/docker/docker/api/types/swarm/config.go delete mode 100644 src/vendor/github.com/docker/docker/api/types/swarm/container.go delete mode 100644 src/vendor/github.com/docker/docker/api/types/swarm/network.go delete mode 100644 src/vendor/github.com/docker/docker/api/types/swarm/node.go delete mode 100644 src/vendor/github.com/docker/docker/api/types/swarm/runtime.go delete mode 100644 src/vendor/github.com/docker/docker/api/types/swarm/runtime/gen.go delete mode 100644 src/vendor/github.com/docker/docker/api/types/swarm/runtime/plugin.pb.go delete mode 100644 src/vendor/github.com/docker/docker/api/types/swarm/runtime/plugin.proto delete mode 100644 src/vendor/github.com/docker/docker/api/types/swarm/secret.go delete mode 100644 src/vendor/github.com/docker/docker/api/types/swarm/service.go delete mode 100644 src/vendor/github.com/docker/docker/api/types/swarm/swarm.go delete mode 100644 src/vendor/github.com/docker/docker/api/types/swarm/task.go delete mode 100644 src/vendor/github.com/docker/docker/api/types/types.go delete mode 100644 src/vendor/github.com/docker/docker/api/types/versions/README.md delete mode 100644 src/vendor/github.com/docker/docker/api/types/versions/compare.go delete mode 100644 src/vendor/github.com/docker/docker/api/types/volume.go delete mode 100644 src/vendor/github.com/docker/docker/errdefs/defs.go delete mode 100644 src/vendor/github.com/docker/docker/errdefs/doc.go delete mode 100644 src/vendor/github.com/docker/docker/errdefs/helpers.go delete mode 100644 src/vendor/github.com/docker/docker/errdefs/http_helpers.go delete mode 100644 src/vendor/github.com/docker/docker/errdefs/is.go delete mode 100644 src/vendor/github.com/docker/docker/pkg/homedir/homedir_linux.go delete mode 100644 src/vendor/github.com/docker/docker/pkg/homedir/homedir_others.go delete mode 100644 src/vendor/github.com/docker/docker/pkg/homedir/homedir_unix.go delete mode 100644 src/vendor/github.com/docker/docker/pkg/homedir/homedir_windows.go delete mode 100644 src/vendor/github.com/docker/docker/pkg/ioutils/buffer.go delete mode 100644 src/vendor/github.com/docker/docker/pkg/ioutils/bytespipe.go delete mode 100644 src/vendor/github.com/docker/docker/pkg/ioutils/fswriters.go delete mode 100644 src/vendor/github.com/docker/docker/pkg/ioutils/readers.go delete mode 100644 src/vendor/github.com/docker/docker/pkg/ioutils/temp_unix.go delete mode 100644 src/vendor/github.com/docker/docker/pkg/ioutils/temp_windows.go delete mode 100644 src/vendor/github.com/docker/docker/pkg/ioutils/writeflusher.go delete mode 100644 src/vendor/github.com/docker/docker/pkg/ioutils/writers.go delete mode 100644 src/vendor/github.com/docker/docker/pkg/jsonmessage/jsonmessage.go delete mode 100644 src/vendor/github.com/docker/docker/pkg/longpath/longpath.go delete mode 100644 src/vendor/github.com/docker/docker/pkg/stringid/README.md delete mode 100644 src/vendor/github.com/docker/docker/pkg/stringid/stringid.go delete mode 100644 src/vendor/github.com/docker/docker/registry/auth.go delete mode 100644 src/vendor/github.com/docker/docker/registry/config.go delete mode 100644 src/vendor/github.com/docker/docker/registry/config_unix.go delete mode 100644 src/vendor/github.com/docker/docker/registry/config_windows.go delete mode 100644 src/vendor/github.com/docker/docker/registry/endpoint_v1.go delete mode 100644 src/vendor/github.com/docker/docker/registry/errors.go delete mode 100644 src/vendor/github.com/docker/docker/registry/registry.go delete mode 100644 src/vendor/github.com/docker/docker/registry/service.go delete mode 100644 src/vendor/github.com/docker/docker/registry/service_v2.go delete mode 100644 src/vendor/github.com/docker/docker/registry/session.go delete mode 100644 src/vendor/github.com/docker/docker/registry/types.go delete mode 100644 src/vendor/github.com/docker/docker/rootless/rootless.go delete mode 100644 src/vendor/github.com/docker/go-connections/LICENSE delete mode 100644 src/vendor/github.com/docker/go-connections/nat/nat.go delete mode 100644 src/vendor/github.com/docker/go-connections/nat/parse.go delete mode 100644 src/vendor/github.com/docker/go-connections/nat/sort.go delete mode 100644 src/vendor/github.com/docker/go-connections/tlsconfig/certpool_go17.go delete mode 100644 src/vendor/github.com/docker/go-connections/tlsconfig/certpool_other.go delete mode 100644 src/vendor/github.com/docker/go-connections/tlsconfig/config.go delete mode 100644 src/vendor/github.com/docker/go-connections/tlsconfig/config_client_ciphers.go delete mode 100644 src/vendor/github.com/docker/go-connections/tlsconfig/config_legacy_client_ciphers.go delete mode 100644 src/vendor/github.com/docker/go-units/CONTRIBUTING.md delete mode 100644 src/vendor/github.com/docker/go-units/LICENSE delete mode 100644 src/vendor/github.com/docker/go-units/MAINTAINERS delete mode 100644 src/vendor/github.com/docker/go-units/README.md delete mode 100644 src/vendor/github.com/docker/go-units/circle.yml delete mode 100644 src/vendor/github.com/docker/go-units/duration.go delete mode 100644 src/vendor/github.com/docker/go-units/size.go delete mode 100644 src/vendor/github.com/docker/go-units/ulimit.go delete mode 100644 src/vendor/github.com/emicklei/go-restful/v3/.gitignore delete mode 100644 src/vendor/github.com/emicklei/go-restful/v3/.goconvey delete mode 100644 src/vendor/github.com/emicklei/go-restful/v3/.travis.yml delete mode 100644 src/vendor/github.com/emicklei/go-restful/v3/CHANGES.md delete mode 100644 src/vendor/github.com/emicklei/go-restful/v3/LICENSE delete mode 100644 src/vendor/github.com/emicklei/go-restful/v3/Makefile delete mode 100644 src/vendor/github.com/emicklei/go-restful/v3/README.md delete mode 100644 src/vendor/github.com/emicklei/go-restful/v3/SECURITY.md delete mode 100644 src/vendor/github.com/emicklei/go-restful/v3/Srcfile delete mode 100644 src/vendor/github.com/emicklei/go-restful/v3/bench_test.sh delete mode 100644 src/vendor/github.com/emicklei/go-restful/v3/compress.go delete mode 100644 src/vendor/github.com/emicklei/go-restful/v3/compressor_cache.go delete mode 100644 src/vendor/github.com/emicklei/go-restful/v3/compressor_pools.go delete mode 100644 src/vendor/github.com/emicklei/go-restful/v3/compressors.go delete mode 100644 src/vendor/github.com/emicklei/go-restful/v3/constants.go delete mode 100644 src/vendor/github.com/emicklei/go-restful/v3/container.go delete mode 100644 src/vendor/github.com/emicklei/go-restful/v3/cors_filter.go delete mode 100644 src/vendor/github.com/emicklei/go-restful/v3/coverage.sh delete mode 100644 src/vendor/github.com/emicklei/go-restful/v3/curly.go delete mode 100644 src/vendor/github.com/emicklei/go-restful/v3/curly_route.go delete mode 100644 src/vendor/github.com/emicklei/go-restful/v3/custom_verb.go delete mode 100644 src/vendor/github.com/emicklei/go-restful/v3/doc.go delete mode 100644 src/vendor/github.com/emicklei/go-restful/v3/entity_accessors.go delete mode 100644 src/vendor/github.com/emicklei/go-restful/v3/extensions.go delete mode 100644 src/vendor/github.com/emicklei/go-restful/v3/filter.go delete mode 100644 src/vendor/github.com/emicklei/go-restful/v3/json.go delete mode 100644 src/vendor/github.com/emicklei/go-restful/v3/jsoniter.go delete mode 100644 src/vendor/github.com/emicklei/go-restful/v3/jsr311.go delete mode 100644 src/vendor/github.com/emicklei/go-restful/v3/log/log.go delete mode 100644 src/vendor/github.com/emicklei/go-restful/v3/logger.go delete mode 100644 src/vendor/github.com/emicklei/go-restful/v3/mime.go delete mode 100644 src/vendor/github.com/emicklei/go-restful/v3/options_filter.go delete mode 100644 src/vendor/github.com/emicklei/go-restful/v3/parameter.go delete mode 100644 src/vendor/github.com/emicklei/go-restful/v3/path_expression.go delete mode 100644 src/vendor/github.com/emicklei/go-restful/v3/path_processor.go delete mode 100644 src/vendor/github.com/emicklei/go-restful/v3/request.go delete mode 100644 src/vendor/github.com/emicklei/go-restful/v3/response.go delete mode 100644 src/vendor/github.com/emicklei/go-restful/v3/route.go delete mode 100644 src/vendor/github.com/emicklei/go-restful/v3/route_builder.go delete mode 100644 src/vendor/github.com/emicklei/go-restful/v3/route_reader.go delete mode 100644 src/vendor/github.com/emicklei/go-restful/v3/router.go delete mode 100644 src/vendor/github.com/emicklei/go-restful/v3/service_error.go delete mode 100644 src/vendor/github.com/emicklei/go-restful/v3/web_service.go delete mode 100644 src/vendor/github.com/emicklei/go-restful/v3/web_service_container.go delete mode 100644 src/vendor/github.com/evanphx/json-patch/.gitignore delete mode 100644 src/vendor/github.com/evanphx/json-patch/LICENSE delete mode 100644 src/vendor/github.com/evanphx/json-patch/README.md delete mode 100644 src/vendor/github.com/evanphx/json-patch/errors.go delete mode 100644 src/vendor/github.com/evanphx/json-patch/merge.go delete mode 100644 src/vendor/github.com/evanphx/json-patch/patch.go delete mode 100644 src/vendor/github.com/go-errors/errors/.travis.yml delete mode 100644 src/vendor/github.com/go-errors/errors/LICENSE.MIT delete mode 100644 src/vendor/github.com/go-errors/errors/README.md delete mode 100644 src/vendor/github.com/go-errors/errors/cover.out delete mode 100644 src/vendor/github.com/go-errors/errors/error.go delete mode 100644 src/vendor/github.com/go-errors/errors/parse_panic.go delete mode 100644 src/vendor/github.com/go-errors/errors/stackframe.go delete mode 100644 src/vendor/github.com/google/btree/.travis.yml delete mode 100644 src/vendor/github.com/google/btree/LICENSE delete mode 100644 src/vendor/github.com/google/btree/README.md delete mode 100644 src/vendor/github.com/google/btree/btree.go delete mode 100644 src/vendor/github.com/google/gnostic/LICENSE delete mode 100644 src/vendor/github.com/google/gnostic/compiler/README.md delete mode 100644 src/vendor/github.com/google/gnostic/compiler/context.go delete mode 100644 src/vendor/github.com/google/gnostic/compiler/error.go delete mode 100644 src/vendor/github.com/google/gnostic/compiler/extensions.go delete mode 100644 src/vendor/github.com/google/gnostic/compiler/helpers.go delete mode 100644 src/vendor/github.com/google/gnostic/compiler/main.go delete mode 100644 src/vendor/github.com/google/gnostic/compiler/reader.go delete mode 100644 src/vendor/github.com/google/gnostic/extensions/README.md delete mode 100644 src/vendor/github.com/google/gnostic/extensions/extension.pb.go delete mode 100644 src/vendor/github.com/google/gnostic/extensions/extension.proto delete mode 100644 src/vendor/github.com/google/gnostic/extensions/extensions.go delete mode 100644 src/vendor/github.com/google/gnostic/jsonschema/README.md delete mode 100644 src/vendor/github.com/google/gnostic/jsonschema/base.go delete mode 100644 src/vendor/github.com/google/gnostic/jsonschema/display.go delete mode 100644 src/vendor/github.com/google/gnostic/jsonschema/models.go delete mode 100644 src/vendor/github.com/google/gnostic/jsonschema/operations.go delete mode 100644 src/vendor/github.com/google/gnostic/jsonschema/reader.go delete mode 100644 src/vendor/github.com/google/gnostic/jsonschema/schema.json delete mode 100644 src/vendor/github.com/google/gnostic/jsonschema/writer.go delete mode 100644 src/vendor/github.com/google/gnostic/openapiv2/OpenAPIv2.go delete mode 100644 src/vendor/github.com/google/gnostic/openapiv2/OpenAPIv2.pb.go delete mode 100644 src/vendor/github.com/google/gnostic/openapiv2/OpenAPIv2.proto delete mode 100644 src/vendor/github.com/google/gnostic/openapiv2/README.md delete mode 100644 src/vendor/github.com/google/gnostic/openapiv2/document.go delete mode 100644 src/vendor/github.com/google/gnostic/openapiv2/openapi-2.0.json delete mode 100644 src/vendor/github.com/google/gnostic/openapiv3/OpenAPIv3.go delete mode 100644 src/vendor/github.com/google/gnostic/openapiv3/OpenAPIv3.pb.go delete mode 100644 src/vendor/github.com/google/gnostic/openapiv3/OpenAPIv3.proto delete mode 100644 src/vendor/github.com/google/gnostic/openapiv3/README.md delete mode 100644 src/vendor/github.com/google/gnostic/openapiv3/document.go delete mode 100644 src/vendor/github.com/google/gnostic/openapiv3/openapi-3.0.json delete mode 100644 src/vendor/github.com/google/gnostic/openapiv3/openapi-3.1.json delete mode 100644 src/vendor/github.com/google/shlex/COPYING delete mode 100644 src/vendor/github.com/google/shlex/README delete mode 100644 src/vendor/github.com/google/shlex/shlex.go delete mode 100644 src/vendor/github.com/gregjones/httpcache/.travis.yml delete mode 100644 src/vendor/github.com/gregjones/httpcache/LICENSE.txt delete mode 100644 src/vendor/github.com/gregjones/httpcache/README.md delete mode 100644 src/vendor/github.com/gregjones/httpcache/httpcache.go delete mode 100644 src/vendor/github.com/imdario/mergo/.deepsource.toml delete mode 100644 src/vendor/github.com/imdario/mergo/.gitignore delete mode 100644 src/vendor/github.com/imdario/mergo/.travis.yml delete mode 100644 src/vendor/github.com/imdario/mergo/CODE_OF_CONDUCT.md delete mode 100644 src/vendor/github.com/imdario/mergo/LICENSE delete mode 100644 src/vendor/github.com/imdario/mergo/README.md delete mode 100644 src/vendor/github.com/imdario/mergo/doc.go delete mode 100644 src/vendor/github.com/imdario/mergo/map.go delete mode 100644 src/vendor/github.com/imdario/mergo/merge.go delete mode 100644 src/vendor/github.com/imdario/mergo/mergo.go delete mode 100644 src/vendor/github.com/inconshreveable/mousetrap/LICENSE delete mode 100644 src/vendor/github.com/inconshreveable/mousetrap/README.md delete mode 100644 src/vendor/github.com/inconshreveable/mousetrap/trap_others.go delete mode 100644 src/vendor/github.com/inconshreveable/mousetrap/trap_windows.go delete mode 100644 src/vendor/github.com/inconshreveable/mousetrap/trap_windows_1.4.go delete mode 100644 src/vendor/github.com/klauspost/compress/.gitattributes delete mode 100644 src/vendor/github.com/klauspost/compress/.gitignore delete mode 100644 src/vendor/github.com/klauspost/compress/.goreleaser.yml delete mode 100644 src/vendor/github.com/klauspost/compress/LICENSE delete mode 100644 src/vendor/github.com/klauspost/compress/README.md delete mode 100644 src/vendor/github.com/klauspost/compress/compressible.go delete mode 100644 src/vendor/github.com/klauspost/compress/fse/README.md delete mode 100644 src/vendor/github.com/klauspost/compress/fse/bitreader.go delete mode 100644 src/vendor/github.com/klauspost/compress/fse/bitwriter.go delete mode 100644 src/vendor/github.com/klauspost/compress/fse/bytereader.go delete mode 100644 src/vendor/github.com/klauspost/compress/fse/compress.go delete mode 100644 src/vendor/github.com/klauspost/compress/fse/decompress.go delete mode 100644 src/vendor/github.com/klauspost/compress/fse/fse.go delete mode 100644 src/vendor/github.com/klauspost/compress/gen.sh delete mode 100644 src/vendor/github.com/klauspost/compress/huff0/.gitignore delete mode 100644 src/vendor/github.com/klauspost/compress/huff0/README.md delete mode 100644 src/vendor/github.com/klauspost/compress/huff0/bitreader.go delete mode 100644 src/vendor/github.com/klauspost/compress/huff0/bitwriter.go delete mode 100644 src/vendor/github.com/klauspost/compress/huff0/bytereader.go delete mode 100644 src/vendor/github.com/klauspost/compress/huff0/compress.go delete mode 100644 src/vendor/github.com/klauspost/compress/huff0/decompress.go delete mode 100644 src/vendor/github.com/klauspost/compress/huff0/huff0.go delete mode 100644 src/vendor/github.com/klauspost/compress/internal/snapref/LICENSE delete mode 100644 src/vendor/github.com/klauspost/compress/internal/snapref/decode.go delete mode 100644 src/vendor/github.com/klauspost/compress/internal/snapref/decode_other.go delete mode 100644 src/vendor/github.com/klauspost/compress/internal/snapref/encode.go delete mode 100644 src/vendor/github.com/klauspost/compress/internal/snapref/encode_other.go delete mode 100644 src/vendor/github.com/klauspost/compress/internal/snapref/snappy.go delete mode 100644 src/vendor/github.com/klauspost/compress/s2sx.mod delete mode 100644 src/vendor/github.com/klauspost/compress/s2sx.sum delete mode 100644 src/vendor/github.com/klauspost/compress/zstd/README.md delete mode 100644 src/vendor/github.com/klauspost/compress/zstd/bitreader.go delete mode 100644 src/vendor/github.com/klauspost/compress/zstd/bitwriter.go delete mode 100644 src/vendor/github.com/klauspost/compress/zstd/blockdec.go delete mode 100644 src/vendor/github.com/klauspost/compress/zstd/blockenc.go delete mode 100644 src/vendor/github.com/klauspost/compress/zstd/blocktype_string.go delete mode 100644 src/vendor/github.com/klauspost/compress/zstd/bytebuf.go delete mode 100644 src/vendor/github.com/klauspost/compress/zstd/bytereader.go delete mode 100644 src/vendor/github.com/klauspost/compress/zstd/decodeheader.go delete mode 100644 src/vendor/github.com/klauspost/compress/zstd/decoder.go delete mode 100644 src/vendor/github.com/klauspost/compress/zstd/decoder_options.go delete mode 100644 src/vendor/github.com/klauspost/compress/zstd/dict.go delete mode 100644 src/vendor/github.com/klauspost/compress/zstd/enc_base.go delete mode 100644 src/vendor/github.com/klauspost/compress/zstd/enc_best.go delete mode 100644 src/vendor/github.com/klauspost/compress/zstd/enc_better.go delete mode 100644 src/vendor/github.com/klauspost/compress/zstd/enc_dfast.go delete mode 100644 src/vendor/github.com/klauspost/compress/zstd/enc_fast.go delete mode 100644 src/vendor/github.com/klauspost/compress/zstd/encoder.go delete mode 100644 src/vendor/github.com/klauspost/compress/zstd/encoder_options.go delete mode 100644 src/vendor/github.com/klauspost/compress/zstd/framedec.go delete mode 100644 src/vendor/github.com/klauspost/compress/zstd/frameenc.go delete mode 100644 src/vendor/github.com/klauspost/compress/zstd/fse_decoder.go delete mode 100644 src/vendor/github.com/klauspost/compress/zstd/fse_encoder.go delete mode 100644 src/vendor/github.com/klauspost/compress/zstd/fse_predefined.go delete mode 100644 src/vendor/github.com/klauspost/compress/zstd/hash.go delete mode 100644 src/vendor/github.com/klauspost/compress/zstd/history.go delete mode 100644 src/vendor/github.com/klauspost/compress/zstd/internal/xxhash/LICENSE.txt delete mode 100644 src/vendor/github.com/klauspost/compress/zstd/internal/xxhash/README.md delete mode 100644 src/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash.go delete mode 100644 src/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_amd64.go delete mode 100644 src/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_amd64.s delete mode 100644 src/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_other.go delete mode 100644 src/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_safe.go delete mode 100644 src/vendor/github.com/klauspost/compress/zstd/seqdec.go delete mode 100644 src/vendor/github.com/klauspost/compress/zstd/seqenc.go delete mode 100644 src/vendor/github.com/klauspost/compress/zstd/snappy.go delete mode 100644 src/vendor/github.com/klauspost/compress/zstd/zip.go delete mode 100644 src/vendor/github.com/klauspost/compress/zstd/zstd.go delete mode 100644 src/vendor/github.com/liggitt/tabwriter/.travis.yml delete mode 100644 src/vendor/github.com/liggitt/tabwriter/LICENSE delete mode 100644 src/vendor/github.com/liggitt/tabwriter/README.md delete mode 100644 src/vendor/github.com/liggitt/tabwriter/tabwriter.go delete mode 100644 src/vendor/github.com/mitchellh/copystructure/LICENSE delete mode 100644 src/vendor/github.com/mitchellh/copystructure/README.md delete mode 100644 src/vendor/github.com/mitchellh/copystructure/copier_time.go delete mode 100644 src/vendor/github.com/mitchellh/copystructure/copystructure.go delete mode 100644 src/vendor/github.com/mitchellh/reflectwalk/.travis.yml delete mode 100644 src/vendor/github.com/mitchellh/reflectwalk/LICENSE delete mode 100644 src/vendor/github.com/mitchellh/reflectwalk/README.md delete mode 100644 src/vendor/github.com/mitchellh/reflectwalk/location.go delete mode 100644 src/vendor/github.com/mitchellh/reflectwalk/location_string.go delete mode 100644 src/vendor/github.com/mitchellh/reflectwalk/reflectwalk.go delete mode 100644 src/vendor/github.com/moby/locker/LICENSE delete mode 100644 src/vendor/github.com/moby/locker/README.md delete mode 100644 src/vendor/github.com/moby/locker/locker.go delete mode 100644 src/vendor/github.com/moby/term/.gitignore delete mode 100644 src/vendor/github.com/moby/term/LICENSE delete mode 100644 src/vendor/github.com/moby/term/README.md delete mode 100644 src/vendor/github.com/moby/term/ascii.go delete mode 100644 src/vendor/github.com/moby/term/proxy.go delete mode 100644 src/vendor/github.com/moby/term/tc.go delete mode 100644 src/vendor/github.com/moby/term/term.go delete mode 100644 src/vendor/github.com/moby/term/term_windows.go delete mode 100644 src/vendor/github.com/moby/term/termios.go delete mode 100644 src/vendor/github.com/moby/term/termios_bsd.go delete mode 100644 src/vendor/github.com/moby/term/termios_nonbsd.go delete mode 100644 src/vendor/github.com/moby/term/windows/ansi_reader.go delete mode 100644 src/vendor/github.com/moby/term/windows/ansi_writer.go delete mode 100644 src/vendor/github.com/moby/term/windows/console.go delete mode 100644 src/vendor/github.com/moby/term/windows/doc.go delete mode 100644 src/vendor/github.com/moby/term/winsize.go delete mode 100644 src/vendor/github.com/monochromegane/go-gitignore/.travis.yml delete mode 100644 src/vendor/github.com/monochromegane/go-gitignore/LICENSE delete mode 100644 src/vendor/github.com/monochromegane/go-gitignore/README.md delete mode 100644 src/vendor/github.com/monochromegane/go-gitignore/depth_holder.go delete mode 100644 src/vendor/github.com/monochromegane/go-gitignore/full_scan_patterns.go delete mode 100644 src/vendor/github.com/monochromegane/go-gitignore/gitignore.go delete mode 100644 src/vendor/github.com/monochromegane/go-gitignore/index_scan_patterns.go delete mode 100644 src/vendor/github.com/monochromegane/go-gitignore/initial_holder.go delete mode 100644 src/vendor/github.com/monochromegane/go-gitignore/match.go delete mode 100644 src/vendor/github.com/monochromegane/go-gitignore/pattern.go delete mode 100644 src/vendor/github.com/monochromegane/go-gitignore/patterns.go delete mode 100644 src/vendor/github.com/monochromegane/go-gitignore/util.go delete mode 100644 src/vendor/github.com/morikuni/aec/LICENSE delete mode 100644 src/vendor/github.com/morikuni/aec/README.md delete mode 100644 src/vendor/github.com/morikuni/aec/aec.go delete mode 100644 src/vendor/github.com/morikuni/aec/ansi.go delete mode 100644 src/vendor/github.com/morikuni/aec/builder.go delete mode 100644 src/vendor/github.com/morikuni/aec/sample.gif delete mode 100644 src/vendor/github.com/morikuni/aec/sgr.go delete mode 100644 src/vendor/github.com/munnerz/goautoneg/LICENSE delete mode 100644 src/vendor/github.com/munnerz/goautoneg/Makefile delete mode 100644 src/vendor/github.com/munnerz/goautoneg/README.txt delete mode 100644 src/vendor/github.com/munnerz/goautoneg/autoneg.go create mode 100644 src/vendor/github.com/opencontainers/image-spec/specs-go/v1/artifact.go delete mode 100644 src/vendor/github.com/peterbourgon/diskv/LICENSE delete mode 100644 src/vendor/github.com/peterbourgon/diskv/README.md delete mode 100644 src/vendor/github.com/peterbourgon/diskv/compression.go delete mode 100644 src/vendor/github.com/peterbourgon/diskv/diskv.go delete mode 100644 src/vendor/github.com/peterbourgon/diskv/index.go delete mode 100644 src/vendor/github.com/spf13/cobra/.gitignore delete mode 100644 src/vendor/github.com/spf13/cobra/.golangci.yml delete mode 100644 src/vendor/github.com/spf13/cobra/.mailmap delete mode 100644 src/vendor/github.com/spf13/cobra/CONDUCT.md delete mode 100644 src/vendor/github.com/spf13/cobra/CONTRIBUTING.md delete mode 100644 src/vendor/github.com/spf13/cobra/LICENSE.txt delete mode 100644 src/vendor/github.com/spf13/cobra/MAINTAINERS delete mode 100644 src/vendor/github.com/spf13/cobra/Makefile delete mode 100644 src/vendor/github.com/spf13/cobra/README.md delete mode 100644 src/vendor/github.com/spf13/cobra/active_help.go delete mode 100644 src/vendor/github.com/spf13/cobra/active_help.md delete mode 100644 src/vendor/github.com/spf13/cobra/args.go delete mode 100644 src/vendor/github.com/spf13/cobra/bash_completions.go delete mode 100644 src/vendor/github.com/spf13/cobra/bash_completions.md delete mode 100644 src/vendor/github.com/spf13/cobra/bash_completionsV2.go delete mode 100644 src/vendor/github.com/spf13/cobra/cobra.go delete mode 100644 src/vendor/github.com/spf13/cobra/command.go delete mode 100644 src/vendor/github.com/spf13/cobra/command_notwin.go delete mode 100644 src/vendor/github.com/spf13/cobra/command_win.go delete mode 100644 src/vendor/github.com/spf13/cobra/completions.go delete mode 100644 src/vendor/github.com/spf13/cobra/fish_completions.go delete mode 100644 src/vendor/github.com/spf13/cobra/fish_completions.md delete mode 100644 src/vendor/github.com/spf13/cobra/flag_groups.go delete mode 100644 src/vendor/github.com/spf13/cobra/powershell_completions.go delete mode 100644 src/vendor/github.com/spf13/cobra/powershell_completions.md delete mode 100644 src/vendor/github.com/spf13/cobra/projects_using_cobra.md delete mode 100644 src/vendor/github.com/spf13/cobra/shell_completions.go delete mode 100644 src/vendor/github.com/spf13/cobra/shell_completions.md delete mode 100644 src/vendor/github.com/spf13/cobra/user_guide.md delete mode 100644 src/vendor/github.com/spf13/cobra/zsh_completions.go delete mode 100644 src/vendor/github.com/spf13/cobra/zsh_completions.md delete mode 100644 src/vendor/github.com/xeipuuv/gojsonpointer/LICENSE-APACHE-2.0.txt delete mode 100644 src/vendor/github.com/xeipuuv/gojsonpointer/README.md delete mode 100644 src/vendor/github.com/xeipuuv/gojsonpointer/pointer.go delete mode 100644 src/vendor/github.com/xeipuuv/gojsonreference/LICENSE-APACHE-2.0.txt delete mode 100644 src/vendor/github.com/xeipuuv/gojsonreference/README.md delete mode 100644 src/vendor/github.com/xeipuuv/gojsonreference/reference.go delete mode 100644 src/vendor/github.com/xeipuuv/gojsonschema/.gitignore delete mode 100644 src/vendor/github.com/xeipuuv/gojsonschema/.travis.yml delete mode 100644 src/vendor/github.com/xeipuuv/gojsonschema/LICENSE-APACHE-2.0.txt delete mode 100644 src/vendor/github.com/xeipuuv/gojsonschema/README.md delete mode 100644 src/vendor/github.com/xeipuuv/gojsonschema/draft.go delete mode 100644 src/vendor/github.com/xeipuuv/gojsonschema/errors.go delete mode 100644 src/vendor/github.com/xeipuuv/gojsonschema/format_checkers.go delete mode 100644 src/vendor/github.com/xeipuuv/gojsonschema/glide.yaml delete mode 100644 src/vendor/github.com/xeipuuv/gojsonschema/internalLog.go delete mode 100644 src/vendor/github.com/xeipuuv/gojsonschema/jsonContext.go delete mode 100644 src/vendor/github.com/xeipuuv/gojsonschema/jsonLoader.go delete mode 100644 src/vendor/github.com/xeipuuv/gojsonschema/locales.go delete mode 100644 src/vendor/github.com/xeipuuv/gojsonschema/result.go delete mode 100644 src/vendor/github.com/xeipuuv/gojsonschema/schema.go delete mode 100644 src/vendor/github.com/xeipuuv/gojsonschema/schemaLoader.go delete mode 100644 src/vendor/github.com/xeipuuv/gojsonschema/schemaPool.go delete mode 100644 src/vendor/github.com/xeipuuv/gojsonschema/schemaReferencePool.go delete mode 100644 src/vendor/github.com/xeipuuv/gojsonschema/schemaType.go delete mode 100644 src/vendor/github.com/xeipuuv/gojsonschema/subSchema.go delete mode 100644 src/vendor/github.com/xeipuuv/gojsonschema/types.go delete mode 100644 src/vendor/github.com/xeipuuv/gojsonschema/utils.go delete mode 100644 src/vendor/github.com/xeipuuv/gojsonschema/validation.go delete mode 100644 src/vendor/github.com/xlab/treeprint/LICENSE delete mode 100644 src/vendor/github.com/xlab/treeprint/README.md delete mode 100644 src/vendor/github.com/xlab/treeprint/helpers.go delete mode 100644 src/vendor/github.com/xlab/treeprint/struct.go delete mode 100644 src/vendor/github.com/xlab/treeprint/treeprint.go delete mode 100644 src/vendor/go.starlark.net/LICENSE delete mode 100644 src/vendor/go.starlark.net/internal/compile/compile.go delete mode 100644 src/vendor/go.starlark.net/internal/compile/serial.go delete mode 100644 src/vendor/go.starlark.net/internal/spell/spell.go delete mode 100644 src/vendor/go.starlark.net/resolve/binding.go delete mode 100644 src/vendor/go.starlark.net/resolve/resolve.go delete mode 100644 src/vendor/go.starlark.net/starlark/debug.go delete mode 100644 src/vendor/go.starlark.net/starlark/empty.s delete mode 100644 src/vendor/go.starlark.net/starlark/eval.go delete mode 100644 src/vendor/go.starlark.net/starlark/hashtable.go delete mode 100644 src/vendor/go.starlark.net/starlark/int.go delete mode 100644 src/vendor/go.starlark.net/starlark/interp.go delete mode 100644 src/vendor/go.starlark.net/starlark/library.go delete mode 100644 src/vendor/go.starlark.net/starlark/profile.go delete mode 100644 src/vendor/go.starlark.net/starlark/unpack.go delete mode 100644 src/vendor/go.starlark.net/starlark/value.go delete mode 100644 src/vendor/go.starlark.net/starlarkstruct/module.go delete mode 100644 src/vendor/go.starlark.net/starlarkstruct/struct.go delete mode 100644 src/vendor/go.starlark.net/syntax/grammar.txt delete mode 100644 src/vendor/go.starlark.net/syntax/parse.go delete mode 100644 src/vendor/go.starlark.net/syntax/quote.go delete mode 100644 src/vendor/go.starlark.net/syntax/scan.go delete mode 100644 src/vendor/go.starlark.net/syntax/syntax.go delete mode 100644 src/vendor/go.starlark.net/syntax/walk.go delete mode 100644 src/vendor/golang.org/x/crypto/cast5/cast5.go delete mode 100644 src/vendor/golang.org/x/crypto/openpgp/armor/armor.go delete mode 100644 src/vendor/golang.org/x/crypto/openpgp/armor/encode.go delete mode 100644 src/vendor/golang.org/x/crypto/openpgp/canonical_text.go delete mode 100644 src/vendor/golang.org/x/crypto/openpgp/clearsign/clearsign.go delete mode 100644 src/vendor/golang.org/x/crypto/openpgp/elgamal/elgamal.go delete mode 100644 src/vendor/golang.org/x/crypto/openpgp/errors/errors.go delete mode 100644 src/vendor/golang.org/x/crypto/openpgp/keys.go delete mode 100644 src/vendor/golang.org/x/crypto/openpgp/packet/compressed.go delete mode 100644 src/vendor/golang.org/x/crypto/openpgp/packet/config.go delete mode 100644 src/vendor/golang.org/x/crypto/openpgp/packet/encrypted_key.go delete mode 100644 src/vendor/golang.org/x/crypto/openpgp/packet/literal.go delete mode 100644 src/vendor/golang.org/x/crypto/openpgp/packet/ocfb.go delete mode 100644 src/vendor/golang.org/x/crypto/openpgp/packet/one_pass_signature.go delete mode 100644 src/vendor/golang.org/x/crypto/openpgp/packet/opaque.go delete mode 100644 src/vendor/golang.org/x/crypto/openpgp/packet/packet.go delete mode 100644 src/vendor/golang.org/x/crypto/openpgp/packet/private_key.go delete mode 100644 src/vendor/golang.org/x/crypto/openpgp/packet/public_key.go delete mode 100644 src/vendor/golang.org/x/crypto/openpgp/packet/public_key_v3.go delete mode 100644 src/vendor/golang.org/x/crypto/openpgp/packet/reader.go delete mode 100644 src/vendor/golang.org/x/crypto/openpgp/packet/signature.go delete mode 100644 src/vendor/golang.org/x/crypto/openpgp/packet/signature_v3.go delete mode 100644 src/vendor/golang.org/x/crypto/openpgp/packet/symmetric_key_encrypted.go delete mode 100644 src/vendor/golang.org/x/crypto/openpgp/packet/symmetrically_encrypted.go delete mode 100644 src/vendor/golang.org/x/crypto/openpgp/packet/userattribute.go delete mode 100644 src/vendor/golang.org/x/crypto/openpgp/packet/userid.go delete mode 100644 src/vendor/golang.org/x/crypto/openpgp/read.go delete mode 100644 src/vendor/golang.org/x/crypto/openpgp/s2k/s2k.go delete mode 100644 src/vendor/golang.org/x/crypto/openpgp/write.go delete mode 100644 src/vendor/golang.org/x/sync/LICENSE delete mode 100644 src/vendor/golang.org/x/sync/PATENTS delete mode 100644 src/vendor/golang.org/x/sync/errgroup/errgroup.go delete mode 100644 src/vendor/golang.org/x/sync/semaphore/semaphore.go delete mode 100644 src/vendor/golang.org/x/sys/execabs/execabs.go delete mode 100644 src/vendor/golang.org/x/sys/execabs/execabs_go118.go delete mode 100644 src/vendor/golang.org/x/sys/execabs/execabs_go119.go delete mode 100644 src/vendor/golang.org/x/text/encoding/encoding.go delete mode 100644 src/vendor/golang.org/x/text/encoding/internal/identifier/identifier.go delete mode 100644 src/vendor/golang.org/x/text/encoding/internal/identifier/mib.go delete mode 100644 src/vendor/golang.org/x/text/encoding/internal/internal.go delete mode 100644 src/vendor/golang.org/x/text/encoding/unicode/override.go delete mode 100644 src/vendor/golang.org/x/text/encoding/unicode/unicode.go delete mode 100644 src/vendor/golang.org/x/text/internal/utf8internal/utf8internal.go delete mode 100644 src/vendor/helm.sh/helm/v3/cmd/helm/search/search.go delete mode 100644 src/vendor/helm.sh/helm/v3/internal/fileutil/fileutil.go delete mode 100644 src/vendor/helm.sh/helm/v3/internal/third_party/dep/fs/fs.go delete mode 100644 src/vendor/helm.sh/helm/v3/internal/third_party/dep/fs/rename.go delete mode 100644 src/vendor/helm.sh/helm/v3/internal/third_party/dep/fs/rename_windows.go delete mode 100644 src/vendor/helm.sh/helm/v3/internal/tlsutil/cfg.go delete mode 100644 src/vendor/helm.sh/helm/v3/internal/tlsutil/tls.go delete mode 100644 src/vendor/helm.sh/helm/v3/internal/urlutil/urlutil.go delete mode 100644 src/vendor/helm.sh/helm/v3/internal/version/version.go delete mode 100644 src/vendor/helm.sh/helm/v3/pkg/chartutil/capabilities.go delete mode 100644 src/vendor/helm.sh/helm/v3/pkg/chartutil/chartfile.go delete mode 100644 src/vendor/helm.sh/helm/v3/pkg/chartutil/coalesce.go delete mode 100644 src/vendor/helm.sh/helm/v3/pkg/chartutil/compatible.go delete mode 100644 src/vendor/helm.sh/helm/v3/pkg/chartutil/create.go delete mode 100644 src/vendor/helm.sh/helm/v3/pkg/chartutil/dependencies.go delete mode 100644 src/vendor/helm.sh/helm/v3/pkg/chartutil/doc.go delete mode 100644 src/vendor/helm.sh/helm/v3/pkg/chartutil/errors.go delete mode 100644 src/vendor/helm.sh/helm/v3/pkg/chartutil/expand.go delete mode 100644 src/vendor/helm.sh/helm/v3/pkg/chartutil/jsonschema.go delete mode 100644 src/vendor/helm.sh/helm/v3/pkg/chartutil/save.go delete mode 100644 src/vendor/helm.sh/helm/v3/pkg/chartutil/validate_name.go delete mode 100644 src/vendor/helm.sh/helm/v3/pkg/chartutil/values.go delete mode 100644 src/vendor/helm.sh/helm/v3/pkg/cli/environment.go delete mode 100644 src/vendor/helm.sh/helm/v3/pkg/getter/doc.go delete mode 100644 src/vendor/helm.sh/helm/v3/pkg/getter/getter.go delete mode 100644 src/vendor/helm.sh/helm/v3/pkg/getter/httpgetter.go delete mode 100644 src/vendor/helm.sh/helm/v3/pkg/getter/ocigetter.go delete mode 100644 src/vendor/helm.sh/helm/v3/pkg/getter/plugingetter.go delete mode 100644 src/vendor/helm.sh/helm/v3/pkg/helmpath/home.go delete mode 100644 src/vendor/helm.sh/helm/v3/pkg/helmpath/lazypath.go delete mode 100644 src/vendor/helm.sh/helm/v3/pkg/helmpath/lazypath_darwin.go delete mode 100644 src/vendor/helm.sh/helm/v3/pkg/helmpath/lazypath_unix.go delete mode 100644 src/vendor/helm.sh/helm/v3/pkg/helmpath/lazypath_windows.go delete mode 100644 src/vendor/helm.sh/helm/v3/pkg/helmpath/xdg/xdg.go delete mode 100644 src/vendor/helm.sh/helm/v3/pkg/plugin/hooks.go delete mode 100644 src/vendor/helm.sh/helm/v3/pkg/plugin/plugin.go delete mode 100644 src/vendor/helm.sh/helm/v3/pkg/provenance/doc.go delete mode 100644 src/vendor/helm.sh/helm/v3/pkg/provenance/sign.go delete mode 100644 src/vendor/helm.sh/helm/v3/pkg/registry/client.go delete mode 100644 src/vendor/helm.sh/helm/v3/pkg/registry/constants.go delete mode 100644 src/vendor/helm.sh/helm/v3/pkg/registry/util.go delete mode 100644 src/vendor/helm.sh/helm/v3/pkg/repo/chartrepo.go delete mode 100644 src/vendor/helm.sh/helm/v3/pkg/repo/doc.go delete mode 100644 src/vendor/helm.sh/helm/v3/pkg/repo/index.go delete mode 100644 src/vendor/helm.sh/helm/v3/pkg/repo/repo.go delete mode 100644 src/vendor/k8s.io/apiextensions-apiserver/LICENSE delete mode 100644 src/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/deepcopy.go delete mode 100644 src/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/doc.go delete mode 100644 src/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/helpers.go delete mode 100644 src/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/register.go delete mode 100644 src/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/types.go delete mode 100644 src/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/types_jsonschema.go delete mode 100644 src/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/.import-restrictions delete mode 100644 src/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/conversion.go delete mode 100644 src/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/deepcopy.go delete mode 100644 src/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/defaults.go delete mode 100644 src/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/doc.go delete mode 100644 src/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/generated.pb.go delete mode 100644 src/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/generated.proto delete mode 100644 src/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/marshal.go delete mode 100644 src/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/register.go delete mode 100644 src/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/types.go delete mode 100644 src/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/types_jsonschema.go delete mode 100644 src/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/zz_generated.conversion.go delete mode 100644 src/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/zz_generated.deepcopy.go delete mode 100644 src/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/zz_generated.defaults.go delete mode 100644 src/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/.import-restrictions delete mode 100644 src/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/conversion.go delete mode 100644 src/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/deepcopy.go delete mode 100644 src/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/defaults.go delete mode 100644 src/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/doc.go delete mode 100644 src/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/generated.pb.go delete mode 100644 src/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/generated.proto delete mode 100644 src/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/marshal.go delete mode 100644 src/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/register.go delete mode 100644 src/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/types.go delete mode 100644 src/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/types_jsonschema.go delete mode 100644 src/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/zz_generated.conversion.go delete mode 100644 src/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/zz_generated.deepcopy.go delete mode 100644 src/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/zz_generated.defaults.go delete mode 100644 src/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/zz_generated.prerelease-lifecycle.go delete mode 100644 src/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/zz_generated.deepcopy.go delete mode 100644 src/vendor/k8s.io/apimachinery/pkg/api/equality/semantic.go delete mode 100644 src/vendor/k8s.io/apimachinery/pkg/api/meta/OWNERS delete mode 100644 src/vendor/k8s.io/apimachinery/pkg/api/meta/conditions.go delete mode 100644 src/vendor/k8s.io/apimachinery/pkg/api/meta/doc.go delete mode 100644 src/vendor/k8s.io/apimachinery/pkg/api/meta/errors.go delete mode 100644 src/vendor/k8s.io/apimachinery/pkg/api/meta/firsthit_restmapper.go delete mode 100644 src/vendor/k8s.io/apimachinery/pkg/api/meta/help.go delete mode 100644 src/vendor/k8s.io/apimachinery/pkg/api/meta/interfaces.go delete mode 100644 src/vendor/k8s.io/apimachinery/pkg/api/meta/lazy.go delete mode 100644 src/vendor/k8s.io/apimachinery/pkg/api/meta/meta.go delete mode 100644 src/vendor/k8s.io/apimachinery/pkg/api/meta/multirestmapper.go delete mode 100644 src/vendor/k8s.io/apimachinery/pkg/api/meta/priority.go delete mode 100644 src/vendor/k8s.io/apimachinery/pkg/api/meta/restmapper.go delete mode 100644 src/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/unstructuredscheme/scheme.go delete mode 100644 src/vendor/k8s.io/apimachinery/pkg/util/duration/duration.go delete mode 100644 src/vendor/k8s.io/cli-runtime/LICENSE delete mode 100644 src/vendor/k8s.io/cli-runtime/pkg/genericclioptions/builder_flags.go delete mode 100644 src/vendor/k8s.io/cli-runtime/pkg/genericclioptions/builder_flags_fake.go delete mode 100644 src/vendor/k8s.io/cli-runtime/pkg/genericclioptions/client_config.go delete mode 100644 src/vendor/k8s.io/cli-runtime/pkg/genericclioptions/command_headers.go delete mode 100644 src/vendor/k8s.io/cli-runtime/pkg/genericclioptions/config_flags.go delete mode 100644 src/vendor/k8s.io/cli-runtime/pkg/genericclioptions/config_flags_fake.go delete mode 100644 src/vendor/k8s.io/cli-runtime/pkg/genericclioptions/doc.go delete mode 100644 src/vendor/k8s.io/cli-runtime/pkg/genericclioptions/filename_flags.go delete mode 100644 src/vendor/k8s.io/cli-runtime/pkg/genericclioptions/io_options.go delete mode 100644 src/vendor/k8s.io/cli-runtime/pkg/genericclioptions/json_yaml_flags.go delete mode 100644 src/vendor/k8s.io/cli-runtime/pkg/genericclioptions/jsonpath_flags.go delete mode 100644 src/vendor/k8s.io/cli-runtime/pkg/genericclioptions/kube_template_flags.go delete mode 100644 src/vendor/k8s.io/cli-runtime/pkg/genericclioptions/name_flags.go delete mode 100644 src/vendor/k8s.io/cli-runtime/pkg/genericclioptions/print_flags.go delete mode 100644 src/vendor/k8s.io/cli-runtime/pkg/genericclioptions/record_flags.go delete mode 100644 src/vendor/k8s.io/cli-runtime/pkg/genericclioptions/template_flags.go delete mode 100644 src/vendor/k8s.io/cli-runtime/pkg/printers/discard.go delete mode 100644 src/vendor/k8s.io/cli-runtime/pkg/printers/doc.go delete mode 100644 src/vendor/k8s.io/cli-runtime/pkg/printers/interface.go delete mode 100644 src/vendor/k8s.io/cli-runtime/pkg/printers/json.go delete mode 100644 src/vendor/k8s.io/cli-runtime/pkg/printers/jsonpath.go delete mode 100644 src/vendor/k8s.io/cli-runtime/pkg/printers/managedfields.go delete mode 100644 src/vendor/k8s.io/cli-runtime/pkg/printers/name.go delete mode 100644 src/vendor/k8s.io/cli-runtime/pkg/printers/sourcechecker.go delete mode 100644 src/vendor/k8s.io/cli-runtime/pkg/printers/tableprinter.go delete mode 100644 src/vendor/k8s.io/cli-runtime/pkg/printers/tabwriter.go delete mode 100644 src/vendor/k8s.io/cli-runtime/pkg/printers/template.go delete mode 100644 src/vendor/k8s.io/cli-runtime/pkg/printers/typesetter.go delete mode 100644 src/vendor/k8s.io/cli-runtime/pkg/printers/warningprinter.go delete mode 100644 src/vendor/k8s.io/cli-runtime/pkg/printers/yaml.go delete mode 100644 src/vendor/k8s.io/cli-runtime/pkg/resource/builder.go delete mode 100644 src/vendor/k8s.io/cli-runtime/pkg/resource/client.go delete mode 100644 src/vendor/k8s.io/cli-runtime/pkg/resource/crd_finder.go delete mode 100644 src/vendor/k8s.io/cli-runtime/pkg/resource/doc.go delete mode 100644 src/vendor/k8s.io/cli-runtime/pkg/resource/fake.go delete mode 100644 src/vendor/k8s.io/cli-runtime/pkg/resource/helper.go delete mode 100644 src/vendor/k8s.io/cli-runtime/pkg/resource/interfaces.go delete mode 100644 src/vendor/k8s.io/cli-runtime/pkg/resource/kustomizevisitor.go delete mode 100644 src/vendor/k8s.io/cli-runtime/pkg/resource/mapper.go delete mode 100644 src/vendor/k8s.io/cli-runtime/pkg/resource/metadata_decoder.go delete mode 100644 src/vendor/k8s.io/cli-runtime/pkg/resource/query_param_verifier.go delete mode 100644 src/vendor/k8s.io/cli-runtime/pkg/resource/result.go delete mode 100644 src/vendor/k8s.io/cli-runtime/pkg/resource/scheme.go delete mode 100644 src/vendor/k8s.io/cli-runtime/pkg/resource/selector.go delete mode 100644 src/vendor/k8s.io/cli-runtime/pkg/resource/visitor.go delete mode 100644 src/vendor/k8s.io/client-go/discovery/cached/disk/cached_discovery.go delete mode 100644 src/vendor/k8s.io/client-go/discovery/cached/disk/round_tripper.go delete mode 100644 src/vendor/k8s.io/client-go/discovery/discovery_client.go delete mode 100644 src/vendor/k8s.io/client-go/discovery/doc.go delete mode 100644 src/vendor/k8s.io/client-go/discovery/helper.go delete mode 100644 src/vendor/k8s.io/client-go/dynamic/interface.go delete mode 100644 src/vendor/k8s.io/client-go/dynamic/scheme.go delete mode 100644 src/vendor/k8s.io/client-go/dynamic/simple.go delete mode 100644 src/vendor/k8s.io/client-go/openapi/cached/client.go delete mode 100644 src/vendor/k8s.io/client-go/openapi/cached/groupversion.go delete mode 100644 src/vendor/k8s.io/client-go/openapi/client.go delete mode 100644 src/vendor/k8s.io/client-go/openapi/groupversion.go delete mode 100644 src/vendor/k8s.io/client-go/restmapper/category_expansion.go delete mode 100644 src/vendor/k8s.io/client-go/restmapper/discovery.go delete mode 100644 src/vendor/k8s.io/client-go/restmapper/shortcut.go delete mode 100644 src/vendor/k8s.io/client-go/third_party/forked/golang/LICENSE delete mode 100644 src/vendor/k8s.io/client-go/third_party/forked/golang/PATENTS delete mode 100644 src/vendor/k8s.io/client-go/third_party/forked/golang/template/exec.go delete mode 100644 src/vendor/k8s.io/client-go/third_party/forked/golang/template/funcs.go delete mode 100644 src/vendor/k8s.io/client-go/tools/auth/OWNERS delete mode 100644 src/vendor/k8s.io/client-go/tools/auth/clientauth.go delete mode 100644 src/vendor/k8s.io/client-go/tools/clientcmd/api/latest/latest.go delete mode 100644 src/vendor/k8s.io/client-go/tools/clientcmd/api/v1/conversion.go delete mode 100644 src/vendor/k8s.io/client-go/tools/clientcmd/api/v1/defaults.go delete mode 100644 src/vendor/k8s.io/client-go/tools/clientcmd/api/v1/doc.go delete mode 100644 src/vendor/k8s.io/client-go/tools/clientcmd/api/v1/register.go delete mode 100644 src/vendor/k8s.io/client-go/tools/clientcmd/api/v1/types.go delete mode 100644 src/vendor/k8s.io/client-go/tools/clientcmd/api/v1/zz_generated.conversion.go delete mode 100644 src/vendor/k8s.io/client-go/tools/clientcmd/api/v1/zz_generated.deepcopy.go delete mode 100644 src/vendor/k8s.io/client-go/tools/clientcmd/api/v1/zz_generated.defaults.go delete mode 100644 src/vendor/k8s.io/client-go/tools/clientcmd/auth_loaders.go delete mode 100644 src/vendor/k8s.io/client-go/tools/clientcmd/client_config.go delete mode 100644 src/vendor/k8s.io/client-go/tools/clientcmd/config.go delete mode 100644 src/vendor/k8s.io/client-go/tools/clientcmd/doc.go delete mode 100644 src/vendor/k8s.io/client-go/tools/clientcmd/flag.go delete mode 100644 src/vendor/k8s.io/client-go/tools/clientcmd/helpers.go delete mode 100644 src/vendor/k8s.io/client-go/tools/clientcmd/loader.go delete mode 100644 src/vendor/k8s.io/client-go/tools/clientcmd/merged_client_builder.go delete mode 100644 src/vendor/k8s.io/client-go/tools/clientcmd/overrides.go delete mode 100644 src/vendor/k8s.io/client-go/tools/clientcmd/validation.go delete mode 100644 src/vendor/k8s.io/client-go/util/homedir/homedir.go delete mode 100644 src/vendor/k8s.io/client-go/util/jsonpath/doc.go delete mode 100644 src/vendor/k8s.io/client-go/util/jsonpath/jsonpath.go delete mode 100644 src/vendor/k8s.io/client-go/util/jsonpath/node.go delete mode 100644 src/vendor/k8s.io/client-go/util/jsonpath/parser.go delete mode 100644 src/vendor/k8s.io/kube-openapi/LICENSE delete mode 100644 src/vendor/k8s.io/kube-openapi/pkg/builder3/util/util.go delete mode 100644 src/vendor/k8s.io/kube-openapi/pkg/common/common.go delete mode 100644 src/vendor/k8s.io/kube-openapi/pkg/common/doc.go delete mode 100644 src/vendor/k8s.io/kube-openapi/pkg/common/interfaces.go delete mode 100644 src/vendor/k8s.io/kube-openapi/pkg/handler3/handler.go delete mode 100644 src/vendor/k8s.io/kube-openapi/pkg/internal/handler/handler_cache.go delete mode 100644 src/vendor/k8s.io/kube-openapi/pkg/openapiconv/convert.go delete mode 100644 src/vendor/k8s.io/kube-openapi/pkg/schemamutation/walker.go delete mode 100644 src/vendor/k8s.io/kube-openapi/pkg/spec3/component.go delete mode 100644 src/vendor/k8s.io/kube-openapi/pkg/spec3/encoding.go delete mode 100644 src/vendor/k8s.io/kube-openapi/pkg/spec3/example.go delete mode 100644 src/vendor/k8s.io/kube-openapi/pkg/spec3/external_documentation.go delete mode 100644 src/vendor/k8s.io/kube-openapi/pkg/spec3/header.go delete mode 100644 src/vendor/k8s.io/kube-openapi/pkg/spec3/media_type.go delete mode 100644 src/vendor/k8s.io/kube-openapi/pkg/spec3/operation.go delete mode 100644 src/vendor/k8s.io/kube-openapi/pkg/spec3/parameter.go delete mode 100644 src/vendor/k8s.io/kube-openapi/pkg/spec3/path.go delete mode 100644 src/vendor/k8s.io/kube-openapi/pkg/spec3/request_body.go delete mode 100644 src/vendor/k8s.io/kube-openapi/pkg/spec3/response.go delete mode 100644 src/vendor/k8s.io/kube-openapi/pkg/spec3/security_requirement.go delete mode 100644 src/vendor/k8s.io/kube-openapi/pkg/spec3/security_scheme.go delete mode 100644 src/vendor/k8s.io/kube-openapi/pkg/spec3/server.go delete mode 100644 src/vendor/k8s.io/kube-openapi/pkg/spec3/spec.go delete mode 100644 src/vendor/k8s.io/kube-openapi/pkg/validation/spec/.gitignore delete mode 100644 src/vendor/k8s.io/kube-openapi/pkg/validation/spec/LICENSE delete mode 100644 src/vendor/k8s.io/kube-openapi/pkg/validation/spec/contact_info.go delete mode 100644 src/vendor/k8s.io/kube-openapi/pkg/validation/spec/external_docs.go delete mode 100644 src/vendor/k8s.io/kube-openapi/pkg/validation/spec/gnostic.go delete mode 100644 src/vendor/k8s.io/kube-openapi/pkg/validation/spec/header.go delete mode 100644 src/vendor/k8s.io/kube-openapi/pkg/validation/spec/info.go delete mode 100644 src/vendor/k8s.io/kube-openapi/pkg/validation/spec/items.go delete mode 100644 src/vendor/k8s.io/kube-openapi/pkg/validation/spec/license.go delete mode 100644 src/vendor/k8s.io/kube-openapi/pkg/validation/spec/operation.go delete mode 100644 src/vendor/k8s.io/kube-openapi/pkg/validation/spec/parameter.go delete mode 100644 src/vendor/k8s.io/kube-openapi/pkg/validation/spec/path_item.go delete mode 100644 src/vendor/k8s.io/kube-openapi/pkg/validation/spec/paths.go delete mode 100644 src/vendor/k8s.io/kube-openapi/pkg/validation/spec/ref.go delete mode 100644 src/vendor/k8s.io/kube-openapi/pkg/validation/spec/response.go delete mode 100644 src/vendor/k8s.io/kube-openapi/pkg/validation/spec/responses.go delete mode 100644 src/vendor/k8s.io/kube-openapi/pkg/validation/spec/schema.go delete mode 100644 src/vendor/k8s.io/kube-openapi/pkg/validation/spec/security_scheme.go delete mode 100644 src/vendor/k8s.io/kube-openapi/pkg/validation/spec/swagger.go delete mode 100644 src/vendor/k8s.io/kube-openapi/pkg/validation/spec/tag.go delete mode 100644 src/vendor/k8s.io/utils/pointer/OWNERS delete mode 100644 src/vendor/k8s.io/utils/pointer/README.md delete mode 100644 src/vendor/k8s.io/utils/pointer/pointer.go delete mode 100644 src/vendor/oras.land/oras-go/LICENSE delete mode 100644 src/vendor/oras.land/oras-go/pkg/artifact/consts.go delete mode 100644 src/vendor/oras.land/oras-go/pkg/auth/client.go delete mode 100644 src/vendor/oras.land/oras-go/pkg/auth/client_opts.go delete mode 100644 src/vendor/oras.land/oras-go/pkg/auth/docker/client.go delete mode 100644 src/vendor/oras.land/oras-go/pkg/auth/docker/login.go delete mode 100644 src/vendor/oras.land/oras-go/pkg/auth/docker/login_tls.go delete mode 100644 src/vendor/oras.land/oras-go/pkg/auth/docker/logout.go delete mode 100644 src/vendor/oras.land/oras-go/pkg/auth/docker/resolver.go delete mode 100644 src/vendor/oras.land/oras-go/pkg/content/consts.go delete mode 100644 src/vendor/oras.land/oras-go/pkg/content/decompress.go delete mode 100644 src/vendor/oras.land/oras-go/pkg/content/errors.go delete mode 100644 src/vendor/oras.land/oras-go/pkg/content/file.go delete mode 100644 src/vendor/oras.land/oras-go/pkg/content/gunzip.go delete mode 100644 src/vendor/oras.land/oras-go/pkg/content/interface.go delete mode 100644 src/vendor/oras.land/oras-go/pkg/content/iowriter.go delete mode 100644 src/vendor/oras.land/oras-go/pkg/content/manifest.go delete mode 100644 src/vendor/oras.land/oras-go/pkg/content/memory.go delete mode 100644 src/vendor/oras.land/oras-go/pkg/content/multireader.go delete mode 100644 src/vendor/oras.land/oras-go/pkg/content/multiwriter.go delete mode 100644 src/vendor/oras.land/oras-go/pkg/content/oci.go delete mode 100644 src/vendor/oras.land/oras-go/pkg/content/opts.go delete mode 100644 src/vendor/oras.land/oras-go/pkg/content/passthrough.go delete mode 100644 src/vendor/oras.land/oras-go/pkg/content/readerat.go delete mode 100644 src/vendor/oras.land/oras-go/pkg/content/registry.go delete mode 100644 src/vendor/oras.land/oras-go/pkg/content/untar.go delete mode 100644 src/vendor/oras.land/oras-go/pkg/content/utils.go delete mode 100644 src/vendor/oras.land/oras-go/pkg/context/context.go delete mode 100644 src/vendor/oras.land/oras-go/pkg/context/logger.go delete mode 100644 src/vendor/oras.land/oras-go/pkg/oras/copy.go delete mode 100644 src/vendor/oras.land/oras-go/pkg/oras/errors.go delete mode 100644 src/vendor/oras.land/oras-go/pkg/oras/opts.go delete mode 100644 src/vendor/oras.land/oras-go/pkg/oras/provider.go delete mode 100644 src/vendor/oras.land/oras-go/pkg/oras/store.go delete mode 100644 src/vendor/oras.land/oras-go/pkg/registry/reference.go delete mode 100644 src/vendor/oras.land/oras-go/pkg/registry/remote/auth/cache.go delete mode 100644 src/vendor/oras.land/oras-go/pkg/registry/remote/auth/challenge.go delete mode 100644 src/vendor/oras.land/oras-go/pkg/registry/remote/auth/client.go delete mode 100644 src/vendor/oras.land/oras-go/pkg/registry/remote/auth/credential.go delete mode 100644 src/vendor/oras.land/oras-go/pkg/registry/remote/auth/scope.go delete mode 100644 src/vendor/oras.land/oras-go/pkg/registry/remote/internal/errutil/errors.go delete mode 100644 src/vendor/oras.land/oras-go/pkg/registry/remote/internal/syncutil/once.go delete mode 100644 src/vendor/oras.land/oras-go/pkg/registry/remote/repository.go delete mode 100644 src/vendor/oras.land/oras-go/pkg/registry/remote/url.go delete mode 100644 src/vendor/oras.land/oras-go/pkg/registry/remote/utils.go delete mode 100644 src/vendor/oras.land/oras-go/pkg/registry/repository.go delete mode 100644 src/vendor/oras.land/oras-go/pkg/target/target.go delete mode 100644 src/vendor/sigs.k8s.io/kustomize/api/LICENSE delete mode 100644 src/vendor/sigs.k8s.io/kustomize/api/filters/annotations/annotations.go delete mode 100644 src/vendor/sigs.k8s.io/kustomize/api/filters/annotations/doc.go delete mode 100644 src/vendor/sigs.k8s.io/kustomize/api/filters/fieldspec/doc.go delete mode 100644 src/vendor/sigs.k8s.io/kustomize/api/filters/fieldspec/fieldspec.go delete mode 100644 src/vendor/sigs.k8s.io/kustomize/api/filters/filtersutil/setters.go delete mode 100644 src/vendor/sigs.k8s.io/kustomize/api/filters/fsslice/doc.go delete mode 100644 src/vendor/sigs.k8s.io/kustomize/api/filters/fsslice/fsslice.go delete mode 100644 src/vendor/sigs.k8s.io/kustomize/api/filters/iampolicygenerator/doc.go delete mode 100644 src/vendor/sigs.k8s.io/kustomize/api/filters/iampolicygenerator/iampolicygenerator.go delete mode 100644 src/vendor/sigs.k8s.io/kustomize/api/filters/imagetag/doc.go delete mode 100644 src/vendor/sigs.k8s.io/kustomize/api/filters/imagetag/imagetag.go delete mode 100644 src/vendor/sigs.k8s.io/kustomize/api/filters/imagetag/legacy.go delete mode 100644 src/vendor/sigs.k8s.io/kustomize/api/filters/imagetag/updater.go delete mode 100644 src/vendor/sigs.k8s.io/kustomize/api/filters/labels/doc.go delete mode 100644 src/vendor/sigs.k8s.io/kustomize/api/filters/labels/labels.go delete mode 100644 src/vendor/sigs.k8s.io/kustomize/api/filters/nameref/doc.go delete mode 100644 src/vendor/sigs.k8s.io/kustomize/api/filters/nameref/nameref.go delete mode 100644 src/vendor/sigs.k8s.io/kustomize/api/filters/nameref/seqfilter.go delete mode 100644 src/vendor/sigs.k8s.io/kustomize/api/filters/namespace/doc.go delete mode 100644 src/vendor/sigs.k8s.io/kustomize/api/filters/namespace/namespace.go delete mode 100644 src/vendor/sigs.k8s.io/kustomize/api/filters/patchjson6902/doc.go delete mode 100644 src/vendor/sigs.k8s.io/kustomize/api/filters/patchjson6902/patchjson6902.go delete mode 100644 src/vendor/sigs.k8s.io/kustomize/api/filters/patchstrategicmerge/doc.go delete mode 100644 src/vendor/sigs.k8s.io/kustomize/api/filters/patchstrategicmerge/patchstrategicmerge.go delete mode 100644 src/vendor/sigs.k8s.io/kustomize/api/filters/prefix/doc.go delete mode 100644 src/vendor/sigs.k8s.io/kustomize/api/filters/prefix/prefix.go delete mode 100644 src/vendor/sigs.k8s.io/kustomize/api/filters/refvar/doc.go delete mode 100644 src/vendor/sigs.k8s.io/kustomize/api/filters/refvar/expand.go delete mode 100644 src/vendor/sigs.k8s.io/kustomize/api/filters/refvar/refvar.go delete mode 100644 src/vendor/sigs.k8s.io/kustomize/api/filters/replacement/doc.go delete mode 100644 src/vendor/sigs.k8s.io/kustomize/api/filters/replacement/replacement.go delete mode 100644 src/vendor/sigs.k8s.io/kustomize/api/filters/replicacount/doc.go delete mode 100644 src/vendor/sigs.k8s.io/kustomize/api/filters/replicacount/replicacount.go delete mode 100644 src/vendor/sigs.k8s.io/kustomize/api/filters/suffix/doc.go delete mode 100644 src/vendor/sigs.k8s.io/kustomize/api/filters/suffix/suffix.go delete mode 100644 src/vendor/sigs.k8s.io/kustomize/api/filters/valueadd/valueadd.go delete mode 100644 src/vendor/sigs.k8s.io/kustomize/api/hasher/hasher.go delete mode 100644 src/vendor/sigs.k8s.io/kustomize/api/ifc/ifc.go delete mode 100644 src/vendor/sigs.k8s.io/kustomize/api/image/image.go delete mode 100644 src/vendor/sigs.k8s.io/kustomize/api/internal/accumulator/loadconfigfromcrds.go delete mode 100644 src/vendor/sigs.k8s.io/kustomize/api/internal/accumulator/namereferencetransformer.go delete mode 100644 src/vendor/sigs.k8s.io/kustomize/api/internal/accumulator/refvartransformer.go delete mode 100644 src/vendor/sigs.k8s.io/kustomize/api/internal/accumulator/resaccumulator.go delete mode 100644 src/vendor/sigs.k8s.io/kustomize/api/internal/builtins/AnnotationsTransformer.go delete mode 100644 src/vendor/sigs.k8s.io/kustomize/api/internal/builtins/ConfigMapGenerator.go delete mode 100644 src/vendor/sigs.k8s.io/kustomize/api/internal/builtins/HashTransformer.go delete mode 100644 src/vendor/sigs.k8s.io/kustomize/api/internal/builtins/HelmChartInflationGenerator.go delete mode 100644 src/vendor/sigs.k8s.io/kustomize/api/internal/builtins/IAMPolicyGenerator.go delete mode 100644 src/vendor/sigs.k8s.io/kustomize/api/internal/builtins/ImageTagTransformer.go delete mode 100644 src/vendor/sigs.k8s.io/kustomize/api/internal/builtins/LabelTransformer.go delete mode 100644 src/vendor/sigs.k8s.io/kustomize/api/internal/builtins/LegacyOrderTransformer.go delete mode 100644 src/vendor/sigs.k8s.io/kustomize/api/internal/builtins/NamespaceTransformer.go delete mode 100644 src/vendor/sigs.k8s.io/kustomize/api/internal/builtins/PatchJson6902Transformer.go delete mode 100644 src/vendor/sigs.k8s.io/kustomize/api/internal/builtins/PatchStrategicMergeTransformer.go delete mode 100644 src/vendor/sigs.k8s.io/kustomize/api/internal/builtins/PatchTransformer.go delete mode 100644 src/vendor/sigs.k8s.io/kustomize/api/internal/builtins/PrefixTransformer.go delete mode 100644 src/vendor/sigs.k8s.io/kustomize/api/internal/builtins/ReplacementTransformer.go delete mode 100644 src/vendor/sigs.k8s.io/kustomize/api/internal/builtins/ReplicaCountTransformer.go delete mode 100644 src/vendor/sigs.k8s.io/kustomize/api/internal/builtins/SecretGenerator.go delete mode 100644 src/vendor/sigs.k8s.io/kustomize/api/internal/builtins/SuffixTransformer.go delete mode 100644 src/vendor/sigs.k8s.io/kustomize/api/internal/builtins/ValueAddTransformer.go delete mode 100644 src/vendor/sigs.k8s.io/kustomize/api/internal/builtins/doc.go delete mode 100644 src/vendor/sigs.k8s.io/kustomize/api/internal/generators/configmap.go delete mode 100644 src/vendor/sigs.k8s.io/kustomize/api/internal/generators/secret.go delete mode 100644 src/vendor/sigs.k8s.io/kustomize/api/internal/generators/utils.go delete mode 100644 src/vendor/sigs.k8s.io/kustomize/api/internal/git/cloner.go delete mode 100644 src/vendor/sigs.k8s.io/kustomize/api/internal/git/gitrunner.go delete mode 100644 src/vendor/sigs.k8s.io/kustomize/api/internal/git/repospec.go delete mode 100644 src/vendor/sigs.k8s.io/kustomize/api/internal/kusterr/yamlformaterror.go delete mode 100644 src/vendor/sigs.k8s.io/kustomize/api/internal/plugins/builtinconfig/doc.go delete mode 100644 src/vendor/sigs.k8s.io/kustomize/api/internal/plugins/builtinconfig/loaddefaultconfig.go delete mode 100644 src/vendor/sigs.k8s.io/kustomize/api/internal/plugins/builtinconfig/namebackreferences.go delete mode 100644 src/vendor/sigs.k8s.io/kustomize/api/internal/plugins/builtinconfig/transformerconfig.go delete mode 100644 src/vendor/sigs.k8s.io/kustomize/api/internal/plugins/builtinhelpers/builtinplugintype_string.go delete mode 100644 src/vendor/sigs.k8s.io/kustomize/api/internal/plugins/builtinhelpers/builtins.go delete mode 100644 src/vendor/sigs.k8s.io/kustomize/api/internal/plugins/execplugin/execplugin.go delete mode 100644 src/vendor/sigs.k8s.io/kustomize/api/internal/plugins/fnplugin/fnplugin.go delete mode 100644 src/vendor/sigs.k8s.io/kustomize/api/internal/plugins/loader/loader.go delete mode 100644 src/vendor/sigs.k8s.io/kustomize/api/internal/plugins/utils/utils.go delete mode 100644 src/vendor/sigs.k8s.io/kustomize/api/internal/target/errmissingkustomization.go delete mode 100644 src/vendor/sigs.k8s.io/kustomize/api/internal/target/kusttarget.go delete mode 100644 src/vendor/sigs.k8s.io/kustomize/api/internal/target/kusttarget_configplugin.go delete mode 100644 src/vendor/sigs.k8s.io/kustomize/api/internal/target/multitransformer.go delete mode 100644 src/vendor/sigs.k8s.io/kustomize/api/internal/utils/annotations.go delete mode 100644 src/vendor/sigs.k8s.io/kustomize/api/internal/utils/errtimeout.go delete mode 100644 src/vendor/sigs.k8s.io/kustomize/api/internal/utils/makeResIds.go delete mode 100644 src/vendor/sigs.k8s.io/kustomize/api/internal/utils/stringslice.go delete mode 100644 src/vendor/sigs.k8s.io/kustomize/api/internal/utils/timedcall.go delete mode 100644 src/vendor/sigs.k8s.io/kustomize/api/internal/validate/fieldvalidator.go delete mode 100644 src/vendor/sigs.k8s.io/kustomize/api/konfig/builtinpluginconsts/commonannotations.go delete mode 100644 src/vendor/sigs.k8s.io/kustomize/api/konfig/builtinpluginconsts/commonlabels.go delete mode 100644 src/vendor/sigs.k8s.io/kustomize/api/konfig/builtinpluginconsts/defaultconfig.go delete mode 100644 src/vendor/sigs.k8s.io/kustomize/api/konfig/builtinpluginconsts/doc.go delete mode 100644 src/vendor/sigs.k8s.io/kustomize/api/konfig/builtinpluginconsts/images.go delete mode 100644 src/vendor/sigs.k8s.io/kustomize/api/konfig/builtinpluginconsts/nameprefix.go delete mode 100644 src/vendor/sigs.k8s.io/kustomize/api/konfig/builtinpluginconsts/namereference.go delete mode 100644 src/vendor/sigs.k8s.io/kustomize/api/konfig/builtinpluginconsts/namespace.go delete mode 100644 src/vendor/sigs.k8s.io/kustomize/api/konfig/builtinpluginconsts/namesuffix.go delete mode 100644 src/vendor/sigs.k8s.io/kustomize/api/konfig/builtinpluginconsts/replicas.go delete mode 100644 src/vendor/sigs.k8s.io/kustomize/api/konfig/builtinpluginconsts/varreference.go delete mode 100644 src/vendor/sigs.k8s.io/kustomize/api/konfig/doc.go delete mode 100644 src/vendor/sigs.k8s.io/kustomize/api/konfig/general.go delete mode 100644 src/vendor/sigs.k8s.io/kustomize/api/konfig/plugins.go delete mode 100644 src/vendor/sigs.k8s.io/kustomize/api/krusty/doc.go delete mode 100644 src/vendor/sigs.k8s.io/kustomize/api/krusty/kustomizer.go delete mode 100644 src/vendor/sigs.k8s.io/kustomize/api/krusty/options.go delete mode 100644 src/vendor/sigs.k8s.io/kustomize/api/kv/kv.go delete mode 100644 src/vendor/sigs.k8s.io/kustomize/api/loader/errors.go delete mode 100644 src/vendor/sigs.k8s.io/kustomize/api/loader/fileloader.go delete mode 100644 src/vendor/sigs.k8s.io/kustomize/api/loader/loader.go delete mode 100644 src/vendor/sigs.k8s.io/kustomize/api/loader/loadrestrictions.go delete mode 100644 src/vendor/sigs.k8s.io/kustomize/api/provenance/provenance.go delete mode 100644 src/vendor/sigs.k8s.io/kustomize/api/provider/depprovider.go delete mode 100644 src/vendor/sigs.k8s.io/kustomize/api/resmap/factory.go delete mode 100644 src/vendor/sigs.k8s.io/kustomize/api/resmap/idslice.go delete mode 100644 src/vendor/sigs.k8s.io/kustomize/api/resmap/resmap.go delete mode 100644 src/vendor/sigs.k8s.io/kustomize/api/resmap/reswrangler.go delete mode 100644 src/vendor/sigs.k8s.io/kustomize/api/resource/doc.go delete mode 100644 src/vendor/sigs.k8s.io/kustomize/api/resource/factory.go delete mode 100644 src/vendor/sigs.k8s.io/kustomize/api/resource/idset.go delete mode 100644 src/vendor/sigs.k8s.io/kustomize/api/resource/origin.go delete mode 100644 src/vendor/sigs.k8s.io/kustomize/api/resource/resource.go delete mode 100644 src/vendor/sigs.k8s.io/kustomize/api/types/builtinpluginloadingoptions_string.go delete mode 100644 src/vendor/sigs.k8s.io/kustomize/api/types/configmapargs.go delete mode 100644 src/vendor/sigs.k8s.io/kustomize/api/types/doc.go delete mode 100644 src/vendor/sigs.k8s.io/kustomize/api/types/erronlybuiltinpluginsallowed.go delete mode 100644 src/vendor/sigs.k8s.io/kustomize/api/types/errunabletofind.go delete mode 100644 src/vendor/sigs.k8s.io/kustomize/api/types/fieldspec.go delete mode 100644 src/vendor/sigs.k8s.io/kustomize/api/types/fix.go delete mode 100644 src/vendor/sigs.k8s.io/kustomize/api/types/generationbehavior.go delete mode 100644 src/vendor/sigs.k8s.io/kustomize/api/types/generatorargs.go delete mode 100644 src/vendor/sigs.k8s.io/kustomize/api/types/generatoroptions.go delete mode 100644 src/vendor/sigs.k8s.io/kustomize/api/types/helmchartargs.go delete mode 100644 src/vendor/sigs.k8s.io/kustomize/api/types/iampolicygenerator.go delete mode 100644 src/vendor/sigs.k8s.io/kustomize/api/types/image.go delete mode 100644 src/vendor/sigs.k8s.io/kustomize/api/types/inventory.go delete mode 100644 src/vendor/sigs.k8s.io/kustomize/api/types/kustomization.go delete mode 100644 src/vendor/sigs.k8s.io/kustomize/api/types/kvpairsources.go delete mode 100644 src/vendor/sigs.k8s.io/kustomize/api/types/labels.go delete mode 100644 src/vendor/sigs.k8s.io/kustomize/api/types/loadrestrictions.go delete mode 100644 src/vendor/sigs.k8s.io/kustomize/api/types/loadrestrictions_string.go delete mode 100644 src/vendor/sigs.k8s.io/kustomize/api/types/objectmeta.go delete mode 100644 src/vendor/sigs.k8s.io/kustomize/api/types/pair.go delete mode 100644 src/vendor/sigs.k8s.io/kustomize/api/types/patch.go delete mode 100644 src/vendor/sigs.k8s.io/kustomize/api/types/patchstrategicmerge.go delete mode 100644 src/vendor/sigs.k8s.io/kustomize/api/types/pluginconfig.go delete mode 100644 src/vendor/sigs.k8s.io/kustomize/api/types/pluginrestrictions.go delete mode 100644 src/vendor/sigs.k8s.io/kustomize/api/types/pluginrestrictions_string.go delete mode 100644 src/vendor/sigs.k8s.io/kustomize/api/types/replacement.go delete mode 100644 src/vendor/sigs.k8s.io/kustomize/api/types/replacementfield.go delete mode 100644 src/vendor/sigs.k8s.io/kustomize/api/types/replica.go delete mode 100644 src/vendor/sigs.k8s.io/kustomize/api/types/secretargs.go delete mode 100644 src/vendor/sigs.k8s.io/kustomize/api/types/selector.go delete mode 100644 src/vendor/sigs.k8s.io/kustomize/api/types/typemeta.go delete mode 100644 src/vendor/sigs.k8s.io/kustomize/api/types/var.go delete mode 100644 src/vendor/sigs.k8s.io/kustomize/kyaml/LICENSE delete mode 100644 src/vendor/sigs.k8s.io/kustomize/kyaml/comments/comments.go delete mode 100644 src/vendor/sigs.k8s.io/kustomize/kyaml/errors/errors.go delete mode 100644 src/vendor/sigs.k8s.io/kustomize/kyaml/ext/ext.go delete mode 100644 src/vendor/sigs.k8s.io/kustomize/kyaml/fieldmeta/fieldmeta.go delete mode 100644 src/vendor/sigs.k8s.io/kustomize/kyaml/filesys/confirmeddir.go delete mode 100644 src/vendor/sigs.k8s.io/kustomize/kyaml/filesys/doc.go delete mode 100644 src/vendor/sigs.k8s.io/kustomize/kyaml/filesys/file.go delete mode 100644 src/vendor/sigs.k8s.io/kustomize/kyaml/filesys/fileinfo.go delete mode 100644 src/vendor/sigs.k8s.io/kustomize/kyaml/filesys/fileondisk.go delete mode 100644 src/vendor/sigs.k8s.io/kustomize/kyaml/filesys/filesystem.go delete mode 100644 src/vendor/sigs.k8s.io/kustomize/kyaml/filesys/fsnode.go delete mode 100644 src/vendor/sigs.k8s.io/kustomize/kyaml/filesys/fsondisk.go delete mode 100644 src/vendor/sigs.k8s.io/kustomize/kyaml/filesys/fsondisk_unix.go delete mode 100644 src/vendor/sigs.k8s.io/kustomize/kyaml/filesys/fsondisk_windows.go delete mode 100644 src/vendor/sigs.k8s.io/kustomize/kyaml/filesys/util.go delete mode 100644 src/vendor/sigs.k8s.io/kustomize/kyaml/fn/runtime/container/container.go delete mode 100644 src/vendor/sigs.k8s.io/kustomize/kyaml/fn/runtime/exec/doc.go delete mode 100644 src/vendor/sigs.k8s.io/kustomize/kyaml/fn/runtime/exec/exec.go delete mode 100644 src/vendor/sigs.k8s.io/kustomize/kyaml/fn/runtime/runtimeutil/doc.go delete mode 100644 src/vendor/sigs.k8s.io/kustomize/kyaml/fn/runtime/runtimeutil/functiontypes.go delete mode 100644 src/vendor/sigs.k8s.io/kustomize/kyaml/fn/runtime/runtimeutil/runtimeutil.go delete mode 100644 src/vendor/sigs.k8s.io/kustomize/kyaml/fn/runtime/runtimeutil/types.go delete mode 100644 src/vendor/sigs.k8s.io/kustomize/kyaml/fn/runtime/starlark/context.go delete mode 100644 src/vendor/sigs.k8s.io/kustomize/kyaml/fn/runtime/starlark/doc.go delete mode 100644 src/vendor/sigs.k8s.io/kustomize/kyaml/fn/runtime/starlark/starlark.go delete mode 100644 src/vendor/sigs.k8s.io/kustomize/kyaml/internal/forked/github.com/go-yaml/yaml/LICENSE delete mode 100644 src/vendor/sigs.k8s.io/kustomize/kyaml/internal/forked/github.com/go-yaml/yaml/NOTICE delete mode 100644 src/vendor/sigs.k8s.io/kustomize/kyaml/internal/forked/github.com/go-yaml/yaml/README.md delete mode 100644 src/vendor/sigs.k8s.io/kustomize/kyaml/internal/forked/github.com/go-yaml/yaml/apic.go delete mode 100644 src/vendor/sigs.k8s.io/kustomize/kyaml/internal/forked/github.com/go-yaml/yaml/decode.go delete mode 100644 src/vendor/sigs.k8s.io/kustomize/kyaml/internal/forked/github.com/go-yaml/yaml/emitterc.go delete mode 100644 src/vendor/sigs.k8s.io/kustomize/kyaml/internal/forked/github.com/go-yaml/yaml/encode.go delete mode 100644 src/vendor/sigs.k8s.io/kustomize/kyaml/internal/forked/github.com/go-yaml/yaml/parserc.go delete mode 100644 src/vendor/sigs.k8s.io/kustomize/kyaml/internal/forked/github.com/go-yaml/yaml/readerc.go delete mode 100644 src/vendor/sigs.k8s.io/kustomize/kyaml/internal/forked/github.com/go-yaml/yaml/resolve.go delete mode 100644 src/vendor/sigs.k8s.io/kustomize/kyaml/internal/forked/github.com/go-yaml/yaml/scannerc.go delete mode 100644 src/vendor/sigs.k8s.io/kustomize/kyaml/internal/forked/github.com/go-yaml/yaml/sorter.go delete mode 100644 src/vendor/sigs.k8s.io/kustomize/kyaml/internal/forked/github.com/go-yaml/yaml/writerc.go delete mode 100644 src/vendor/sigs.k8s.io/kustomize/kyaml/internal/forked/github.com/go-yaml/yaml/yaml.go delete mode 100644 src/vendor/sigs.k8s.io/kustomize/kyaml/internal/forked/github.com/go-yaml/yaml/yamlh.go delete mode 100644 src/vendor/sigs.k8s.io/kustomize/kyaml/internal/forked/github.com/go-yaml/yaml/yamlprivateh.go delete mode 100644 src/vendor/sigs.k8s.io/kustomize/kyaml/internal/forked/github.com/qri-io/starlib/util/LICENSE delete mode 100644 src/vendor/sigs.k8s.io/kustomize/kyaml/internal/forked/github.com/qri-io/starlib/util/doc.go delete mode 100644 src/vendor/sigs.k8s.io/kustomize/kyaml/internal/forked/github.com/qri-io/starlib/util/util.go delete mode 100644 src/vendor/sigs.k8s.io/kustomize/kyaml/kio/byteio_reader.go delete mode 100644 src/vendor/sigs.k8s.io/kustomize/kyaml/kio/byteio_writer.go delete mode 100644 src/vendor/sigs.k8s.io/kustomize/kyaml/kio/doc.go delete mode 100644 src/vendor/sigs.k8s.io/kustomize/kyaml/kio/filters/filters.go delete mode 100644 src/vendor/sigs.k8s.io/kustomize/kyaml/kio/filters/fmtr.go delete mode 100644 src/vendor/sigs.k8s.io/kustomize/kyaml/kio/filters/grep.go delete mode 100644 src/vendor/sigs.k8s.io/kustomize/kyaml/kio/filters/local.go delete mode 100644 src/vendor/sigs.k8s.io/kustomize/kyaml/kio/filters/merge.go delete mode 100644 src/vendor/sigs.k8s.io/kustomize/kyaml/kio/filters/merge3.go delete mode 100644 src/vendor/sigs.k8s.io/kustomize/kyaml/kio/filters/modify.go delete mode 100644 src/vendor/sigs.k8s.io/kustomize/kyaml/kio/filters/stripcomments.go delete mode 100644 src/vendor/sigs.k8s.io/kustomize/kyaml/kio/ignorefilesmatcher.go delete mode 100644 src/vendor/sigs.k8s.io/kustomize/kyaml/kio/kio.go delete mode 100644 src/vendor/sigs.k8s.io/kustomize/kyaml/kio/kioutil/kioutil.go delete mode 100644 src/vendor/sigs.k8s.io/kustomize/kyaml/kio/pkgio_reader.go delete mode 100644 src/vendor/sigs.k8s.io/kustomize/kyaml/kio/pkgio_writer.go delete mode 100644 src/vendor/sigs.k8s.io/kustomize/kyaml/kio/tree.go delete mode 100644 src/vendor/sigs.k8s.io/kustomize/kyaml/openapi/Makefile delete mode 100644 src/vendor/sigs.k8s.io/kustomize/kyaml/openapi/README.md delete mode 100644 src/vendor/sigs.k8s.io/kustomize/kyaml/openapi/kubernetesapi/openapiinfo.go delete mode 100644 src/vendor/sigs.k8s.io/kustomize/kyaml/openapi/kubernetesapi/v1212/swagger.go delete mode 100644 src/vendor/sigs.k8s.io/kustomize/kyaml/openapi/kubernetesapi/v1212/swagger.pb delete mode 100644 src/vendor/sigs.k8s.io/kustomize/kyaml/openapi/kustomizationapi/swagger.go delete mode 100644 src/vendor/sigs.k8s.io/kustomize/kyaml/openapi/kustomizationapi/swagger.json delete mode 100644 src/vendor/sigs.k8s.io/kustomize/kyaml/openapi/openapi.go delete mode 100644 src/vendor/sigs.k8s.io/kustomize/kyaml/order/syncorder.go delete mode 100644 src/vendor/sigs.k8s.io/kustomize/kyaml/resid/gvk.go delete mode 100644 src/vendor/sigs.k8s.io/kustomize/kyaml/resid/resid.go delete mode 100644 src/vendor/sigs.k8s.io/kustomize/kyaml/runfn/runfn.go delete mode 100644 src/vendor/sigs.k8s.io/kustomize/kyaml/sets/string.go delete mode 100644 src/vendor/sigs.k8s.io/kustomize/kyaml/sets/stringlist.go delete mode 100644 src/vendor/sigs.k8s.io/kustomize/kyaml/sliceutil/slice.go delete mode 100644 src/vendor/sigs.k8s.io/kustomize/kyaml/utils/pathsplitter.go delete mode 100644 src/vendor/sigs.k8s.io/kustomize/kyaml/yaml/alias.go delete mode 100644 src/vendor/sigs.k8s.io/kustomize/kyaml/yaml/compatibility.go delete mode 100644 src/vendor/sigs.k8s.io/kustomize/kyaml/yaml/const.go delete mode 100644 src/vendor/sigs.k8s.io/kustomize/kyaml/yaml/datamap.go delete mode 100644 src/vendor/sigs.k8s.io/kustomize/kyaml/yaml/doc.go delete mode 100644 src/vendor/sigs.k8s.io/kustomize/kyaml/yaml/filters.go delete mode 100644 src/vendor/sigs.k8s.io/kustomize/kyaml/yaml/fns.go delete mode 100644 src/vendor/sigs.k8s.io/kustomize/kyaml/yaml/internal/k8sgen/pkg/labels/copied.deepcopy.go delete mode 100644 src/vendor/sigs.k8s.io/kustomize/kyaml/yaml/internal/k8sgen/pkg/labels/labels.go delete mode 100644 src/vendor/sigs.k8s.io/kustomize/kyaml/yaml/internal/k8sgen/pkg/labels/selector.go delete mode 100644 src/vendor/sigs.k8s.io/kustomize/kyaml/yaml/internal/k8sgen/pkg/selection/operator.go delete mode 100644 src/vendor/sigs.k8s.io/kustomize/kyaml/yaml/internal/k8sgen/pkg/util/errors/errors.go delete mode 100644 src/vendor/sigs.k8s.io/kustomize/kyaml/yaml/internal/k8sgen/pkg/util/sets/empty.go delete mode 100644 src/vendor/sigs.k8s.io/kustomize/kyaml/yaml/internal/k8sgen/pkg/util/sets/string.go delete mode 100644 src/vendor/sigs.k8s.io/kustomize/kyaml/yaml/internal/k8sgen/pkg/util/validation/field/errors.go delete mode 100644 src/vendor/sigs.k8s.io/kustomize/kyaml/yaml/internal/k8sgen/pkg/util/validation/field/path.go delete mode 100644 src/vendor/sigs.k8s.io/kustomize/kyaml/yaml/internal/k8sgen/pkg/util/validation/validation.go delete mode 100644 src/vendor/sigs.k8s.io/kustomize/kyaml/yaml/kfns.go delete mode 100644 src/vendor/sigs.k8s.io/kustomize/kyaml/yaml/mapnode.go delete mode 100644 src/vendor/sigs.k8s.io/kustomize/kyaml/yaml/match.go delete mode 100644 src/vendor/sigs.k8s.io/kustomize/kyaml/yaml/merge2/merge2.go delete mode 100644 src/vendor/sigs.k8s.io/kustomize/kyaml/yaml/merge2/smpdirective.go delete mode 100644 src/vendor/sigs.k8s.io/kustomize/kyaml/yaml/merge2/smpdirective_string.go delete mode 100644 src/vendor/sigs.k8s.io/kustomize/kyaml/yaml/merge3/merge3.go delete mode 100644 src/vendor/sigs.k8s.io/kustomize/kyaml/yaml/merge3/visitor.go delete mode 100644 src/vendor/sigs.k8s.io/kustomize/kyaml/yaml/order.go delete mode 100644 src/vendor/sigs.k8s.io/kustomize/kyaml/yaml/rnode.go delete mode 100644 src/vendor/sigs.k8s.io/kustomize/kyaml/yaml/schema/schema.go delete mode 100644 src/vendor/sigs.k8s.io/kustomize/kyaml/yaml/types.go delete mode 100644 src/vendor/sigs.k8s.io/kustomize/kyaml/yaml/util.go delete mode 100644 src/vendor/sigs.k8s.io/kustomize/kyaml/yaml/walk/associative_sequence.go delete mode 100644 src/vendor/sigs.k8s.io/kustomize/kyaml/yaml/walk/map.go delete mode 100644 src/vendor/sigs.k8s.io/kustomize/kyaml/yaml/walk/nonassociative_sequence.go delete mode 100644 src/vendor/sigs.k8s.io/kustomize/kyaml/yaml/walk/scalar.go delete mode 100644 src/vendor/sigs.k8s.io/kustomize/kyaml/yaml/walk/visitor.go delete mode 100644 src/vendor/sigs.k8s.io/kustomize/kyaml/yaml/walk/walk.go diff --git a/src/go.mod b/src/go.mod index 5986daa2d..3c984aebd 100644 --- a/src/go.mod +++ b/src/go.mod @@ -46,7 +46,7 @@ require ( github.com/nfnt/resize v0.0.0-20180221191011-83c6a9932646 github.com/olekukonko/tablewriter v0.0.5 github.com/opencontainers/go-digest v1.0.0 - github.com/opencontainers/image-spec v1.0.3-0.20211202183452-c5a74bcca799 + github.com/opencontainers/image-spec v1.1.0-rc2 github.com/pkg/errors v0.9.1 github.com/prometheus/client_golang v1.13.0 github.com/robfig/cron v1.0.0 // indirect @@ -84,7 +84,6 @@ require ( require ( cloud.google.com/go v0.99.0 // indirect github.com/Azure/azure-sdk-for-go v37.2.0+incompatible // indirect - github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 // indirect github.com/Azure/go-autorest v14.2.0+incompatible // indirect github.com/Azure/go-autorest/autorest v0.11.27 // indirect github.com/Azure/go-autorest/autorest/adal v0.9.20 // indirect @@ -95,6 +94,7 @@ require ( github.com/Azure/go-ntlmssp v0.0.0-20200615164410-66371956d46c // indirect github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible // indirect github.com/Masterminds/semver/v3 v3.1.1 // indirect + github.com/Microsoft/go-winio v0.5.1 // indirect github.com/PuerkitoBio/purell v1.1.1 // indirect github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 // indirect github.com/Unknwon/goconfig v0.0.0-20160216183935-5f601ca6ef4d // indirect @@ -105,24 +105,14 @@ require ( github.com/bugsnag/panicwrap v1.2.0 // indirect github.com/cespare/xxhash/v2 v2.1.2 // indirect github.com/cloudflare/cfssl v0.0.0-20190510060611-9c027c93ba9e // indirect - github.com/containerd/containerd v1.6.6 // indirect - github.com/cyphar/filepath-securejoin v0.2.3 // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/denverdino/aliyungo v0.0.0-20191227032621-df38c6fa730c // indirect github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect github.com/dnaeon/go-vcr v1.2.0 // indirect - github.com/docker/cli v20.10.17+incompatible // indirect - github.com/docker/docker v20.10.17+incompatible // indirect - github.com/docker/docker-credential-helpers v0.6.4 // indirect github.com/docker/go v0.0.0-20160303222718-d30aec9fd63c // indirect - github.com/docker/go-connections v0.4.0 // indirect github.com/docker/go-metrics v0.0.1 // indirect - github.com/docker/go-units v0.4.0 // indirect - github.com/emicklei/go-restful/v3 v3.8.0 // indirect - github.com/evanphx/json-patch v5.6.0+incompatible // indirect github.com/felixge/httpsnoop v1.0.2 // indirect github.com/fsnotify/fsnotify v1.4.9 // indirect - github.com/go-errors/errors v1.0.1 // indirect github.com/go-logr/logr v1.2.3 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/go-openapi/analysis v0.19.10 // indirect @@ -131,22 +121,16 @@ require ( github.com/go-stack/stack v1.8.0 // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/golang/protobuf v1.5.2 // indirect - github.com/google/btree v1.0.1 // indirect github.com/google/certificate-transparency-go v1.0.21 // indirect - github.com/google/gnostic v0.5.7-v3refs // indirect github.com/google/go-querystring v1.0.0 // indirect github.com/google/gofuzz v1.2.0 // indirect - github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect github.com/gorilla/securecookie v1.1.1 // indirect - github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7 // indirect github.com/grpc-ecosystem/grpc-gateway v1.16.0 // indirect github.com/h2non/parth v0.0.0-20190131123155-b4df798d6542 // indirect github.com/hashicorp/errwrap v1.1.0 // indirect github.com/hashicorp/go-multierror v1.1.1 // indirect github.com/hashicorp/golang-lru v0.5.4 // indirect github.com/hashicorp/hcl v1.0.0 // indirect - github.com/imdario/mergo v0.3.12 // indirect - github.com/inconshreveable/mousetrap v1.0.0 // indirect github.com/jackc/chunkreader/v2 v2.0.1 // indirect github.com/jackc/pgerrcode v0.0.0-20201024163028-a0d42d470451 // indirect github.com/jackc/pgio v1.0.0 // indirect @@ -157,25 +141,15 @@ require ( github.com/jmespath/go-jmespath v0.4.0 // indirect github.com/josharian/intern v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect - github.com/klauspost/compress v1.13.6 // indirect - github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de // indirect github.com/magiconair/properties v1.8.5 // indirect github.com/mailru/easyjson v0.7.6 // indirect github.com/mattn/go-runewidth v0.0.9 // indirect github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 // indirect - github.com/mitchellh/copystructure v1.2.0 // indirect github.com/mitchellh/mapstructure v1.4.3 // indirect - github.com/mitchellh/reflectwalk v1.0.2 // indirect - github.com/moby/locker v1.0.1 // indirect - github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect - github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00 // indirect - github.com/morikuni/aec v1.0.0 // indirect - github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect github.com/opentracing/opentracing-go v1.2.0 // indirect github.com/pelletier/go-toml v1.9.3 // indirect - github.com/peterbourgon/diskv v2.0.1+incompatible // indirect github.com/pmezard/go-difflib v1.0.0 // indirect github.com/prometheus/client_model v0.2.0 // indirect github.com/prometheus/common v0.37.0 // indirect @@ -185,16 +159,11 @@ require ( github.com/sirupsen/logrus v1.8.1 // indirect github.com/spf13/afero v1.6.0 // indirect github.com/spf13/cast v1.4.1 // indirect - github.com/spf13/cobra v1.5.0 // indirect github.com/spf13/jwalterweatherman v1.1.0 // indirect github.com/spf13/pflag v1.0.5 // indirect github.com/stretchr/objx v0.4.0 // indirect github.com/subosito/gotenv v1.2.0 // indirect github.com/vmihailenco/tagparser v0.1.2 // indirect - github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f // indirect - github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect - github.com/xeipuuv/gojsonschema v1.2.0 // indirect - github.com/xlab/treeprint v1.1.0 // indirect go.mongodb.org/mongo-driver v1.7.0 // indirect go.opentelemetry.io/contrib v0.22.0 // indirect go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.3.0 // indirect @@ -202,9 +171,7 @@ require ( go.opentelemetry.io/otel/internal/metric v0.22.0 // indirect go.opentelemetry.io/otel/metric v0.22.0 // indirect go.opentelemetry.io/proto/otlp v0.11.0 // indirect - go.starlark.net v0.0.0-20200306205701-8dd3e2ee1dd5 // indirect go.uber.org/atomic v1.7.0 // indirect - golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4 // indirect golang.org/x/sys v0.2.0 // indirect golang.org/x/term v0.2.0 // indirect google.golang.org/api v0.61.0 // indirect @@ -220,15 +187,9 @@ require ( gopkg.in/ini.v1 v1.62.0 // indirect gopkg.in/square/go-jose.v2 v2.5.1 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect - k8s.io/apiextensions-apiserver v0.25.2 // indirect - k8s.io/cli-runtime v0.25.2 // indirect k8s.io/klog/v2 v2.70.1 // indirect - k8s.io/kube-openapi v0.0.0-20220803162953-67bda5d908f1 // indirect k8s.io/utils v0.0.0-20220728103510-ee6ede2d64ed // indirect - oras.land/oras-go v1.2.0 // indirect sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2 // indirect - sigs.k8s.io/kustomize/api v0.12.1 // indirect - sigs.k8s.io/kustomize/kyaml v0.13.9 // indirect sigs.k8s.io/structured-merge-diff/v4 v4.2.3 // indirect sigs.k8s.io/yaml v1.3.0 // indirect ) diff --git a/src/go.sum b/src/go.sum index 234a2e0f6..bbc0763fd 100644 --- a/src/go.sum +++ b/src/go.sum @@ -99,6 +99,7 @@ github.com/Microsoft/go-winio v0.4.17-0.20210324224401-5516f17a5958/go.mod h1:JP github.com/Microsoft/go-winio v0.4.17/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= github.com/Microsoft/go-winio v0.5.0/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= github.com/Microsoft/go-winio v0.5.1 h1:aPJp2QD7OOrhO5tQXqQoGSJc+DjDtWTGLOmNyAm6FgY= +github.com/Microsoft/go-winio v0.5.1/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= github.com/Microsoft/hcsshim v0.8.6/go.mod h1:Op3hHsoHPAvb6lceZHDtd9OkTew38wNoXnJs8iY7rUg= github.com/Microsoft/hcsshim v0.8.7-0.20190325164909-8abdbb8205e4/go.mod h1:Op3hHsoHPAvb6lceZHDtd9OkTew38wNoXnJs8iY7rUg= github.com/Microsoft/hcsshim v0.8.7/go.mod h1:OHd7sQqRFrYd3RmSgbgji+ctCwkbq2wbEYNSzOYtcBQ= @@ -107,7 +108,6 @@ github.com/Microsoft/hcsshim v0.8.14/go.mod h1:NtVKoYxQuTLx6gEq0L96c9Ju4JbRJ4nY2 github.com/Microsoft/hcsshim v0.8.15/go.mod h1:x38A4YbHbdxJtc0sF6oIz+RG0npwSCAvn69iY6URG00= github.com/Microsoft/hcsshim v0.8.16/go.mod h1:o5/SZqmR7x9JNKsW3pu+nqHm0MF8vbA+VxGOoXdC600= github.com/Microsoft/hcsshim v0.8.21/go.mod h1:+w2gRZ5ReXQhFOrvSQeNfhrYB/dg3oDwTOcER2fw4I4= -github.com/Microsoft/hcsshim v0.9.3 h1:k371PzBuRrz2b+ebGuI2nVgVhgsVX60jMfSw80NECxo= github.com/Microsoft/hcsshim/test v0.0.0-20201218223536-d3e5debf77da/go.mod h1:5hlzMzRKMLyo42nCZ9oml8AdTlq/0cvIaBv6tK1RehU= github.com/Microsoft/hcsshim/test v0.0.0-20210227013316-43a75bb4edd3/go.mod h1:mw7qgWloBUl75W/gVH3cQszUg1+gUITj7D6NY7ywVnY= github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= @@ -206,7 +206,6 @@ github.com/bmatcuk/doublestar v1.1.1 h1:YroD6BJCZBYx06yYFEWvUuKVWQn3vLLQAVmDmvTS github.com/bmatcuk/doublestar v1.1.1/go.mod h1:UD6OnuiIn0yFxxA2le/rnRU1G4RaI4UvFv1sNto9p6w= github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869/go.mod h1:Ekp36dRnpXw/yCqJaO+ZrUyxD+3VXMFFr56k5XYrpB4= github.com/boombuler/barcode v1.0.0/go.mod h1:paBWMcWSl3LHKBqUq+rly7CNSldXjb2rDl3JlRe0mD8= -github.com/bshuster-repo/logrus-logstash-hook v1.0.0 h1:e+C0SB5R1pu//O4MQ3f9cFuPGoOVeF2fE4Og9otCc70= github.com/buger/jsonparser v0.0.0-20180808090653-f4dd9f5a6b44/go.mod h1:bbYlZJ7hK1yFx9hf58LP0zeX7UjIGs20ufpu3evjr+s= github.com/bugsnag/bugsnag-go v1.5.2 h1:fdaGJJEReigPzSE6HajOhpJwE2IEP/TdHDHXKGeOJtc= github.com/bugsnag/bugsnag-go v1.5.2/go.mod h1:2oa8nejYd4cQ/b0hMIopN0lCRxU0bueqREvZLWFrtK8= @@ -267,7 +266,6 @@ github.com/containerd/cgroups v0.0.0-20200710171044-318312a37340/go.mod h1:s5q4S github.com/containerd/cgroups v0.0.0-20200824123100-0b889c03f102/go.mod h1:s5q4SojHctfxANBDvMeIaIovkq29IP48TKAxnhYRxvo= github.com/containerd/cgroups v0.0.0-20210114181951-8a68de567b68/go.mod h1:ZJeTFisyysqgcCdecO57Dj79RfL0LNeGiFUqLYQRYLE= github.com/containerd/cgroups v1.0.1/go.mod h1:0SJrPIenamHDcZhEcJMNBB85rHcUsw4f25ZfBiPYRkU= -github.com/containerd/cgroups v1.0.3 h1:ADZftAkglvCiD44c77s5YmMqaP2pzVCFZvBmAlBdAP4= github.com/containerd/console v0.0.0-20180822173158-c12b1e7919c1/go.mod h1:Tj/on1eG8kiEhd0+fhSDzsPAFESxzBBvdyEgyryXffw= github.com/containerd/console v0.0.0-20181022165439-0650fd9eeb50/go.mod h1:Tj/on1eG8kiEhd0+fhSDzsPAFESxzBBvdyEgyryXffw= github.com/containerd/console v0.0.0-20191206165004-02ecf6a7291e/go.mod h1:8Pf4gM6VEbTNRIT26AyyU7hxdQU3MvAvxVI0sc00XBE= @@ -287,8 +285,6 @@ github.com/containerd/containerd v1.5.0-beta.4/go.mod h1:GmdgZd2zA2GYIBZ0w09Zvgq github.com/containerd/containerd v1.5.0-rc.0/go.mod h1:V/IXoMqNGgBlabz3tHD2TWDoTJseu1FGOKuoA4nNb2s= github.com/containerd/containerd v1.5.1/go.mod h1:0DOxVqwDy2iZvrZp2JUx/E+hS0UNTVn7dJnIOwtYR4g= github.com/containerd/containerd v1.5.7/go.mod h1:gyvv6+ugqY25TiXxcZC3L5yOeYgEw0QMhscqVp1AR9c= -github.com/containerd/containerd v1.6.6 h1:xJNPhbrmz8xAMDNoVjHy9YHtWwEQNS+CDkcIRh7t8Y0= -github.com/containerd/containerd v1.6.6/go.mod h1:ZoP1geJldzCVY3Tonoz7b1IXk8rIX0Nltt5QE4OMNk0= github.com/containerd/continuity v0.0.0-20190426062206-aaeac12a7ffc/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= github.com/containerd/continuity v0.0.0-20190815185530-f2a389ac0a02/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= github.com/containerd/continuity v0.0.0-20191127005431-f65d91d395eb/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= @@ -358,20 +354,15 @@ github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfc github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= -github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= -github.com/creack/pty v1.1.11 h1:07n33Z8lZxZ2qwegKbObQohDhXDQxiMMz1NOUGYlesw= github.com/creack/pty v1.1.11/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/cyphar/filepath-securejoin v0.2.2/go.mod h1:FpkQEhXnPnOthhzymB7CGsFk2G9VLXONKD9G7QGMM+4= -github.com/cyphar/filepath-securejoin v0.2.3 h1:YX6ebbZCZP7VkM3scTTokDgBL2TY741X51MTk3ycuNI= -github.com/cyphar/filepath-securejoin v0.2.3/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4= github.com/cznic/mathutil v0.0.0-20180504122225-ca4c9f2c1369/go.mod h1:e6NPNENfs9mPDVNRekM7lKScauxd5kXTr1Mfyig6TDM= github.com/d2g/dhcp4 v0.0.0-20170904100407-a1d1b6c41b1c/go.mod h1:Ct2BUK8SB0YC1SMSibvLzxjeJLnrYEVLULFNiHY9YfQ= github.com/d2g/dhcp4client v1.0.0/go.mod h1:j0hNfjhrt2SxUOw55nL0ATM/z4Yt3t2Kd1mW34z5W5s= github.com/d2g/dhcp4server v0.0.0-20181031114812-7d4a0a7f59a5/go.mod h1:Eo87+Kg/IX2hfWJfwxMzLyuSZyxSoAug2nGa1G2QAi8= github.com/d2g/hardwareaddr v0.0.0-20190221164911-e7d9fbe030e4/go.mod h1:bMl4RjIciD2oAxI7DmWRx6gbeqrkoLqv3MV0vzNad+I= -github.com/danieljoos/wincred v1.1.0/go.mod h1:XYlo+eRTsVA9aHGp7NGjFkPla4m+DCL7hqDjlFjiygg= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= @@ -389,22 +380,15 @@ github.com/dhui/dktest v0.3.7 h1:jWjWgHAPDAdqgUr7lAsB3bqB2DKWC3OaA+isfekjRew= github.com/dhui/dktest v0.3.7/go.mod h1:nYMOkafiA07WchSwKnKFUSbGMb2hMm5DrCGiXYG6gwM= github.com/distribution/distribution v2.8.1+incompatible h1:8iXUoOqRPx30bhzIEPUmNIqlmBlWdrieW1bqr6LrX30= github.com/distribution/distribution v2.8.1+incompatible/go.mod h1:EgLm2NgWtdKgzF9NpMzUKgzmR7AMmb0VQi2B+ZzDRjc= -github.com/distribution/distribution/v3 v3.0.0-20220526142353-ffbd94cbe269 h1:hbCT8ZPPMqefiAWD2ZKjn7ypokIGViTvBBg/ExLSdCk= github.com/dnaeon/go-vcr v1.2.0 h1:zHCHvJYTMh1N7xnV7zf1m1GPBF9Ad0Jk/whtQ1663qI= github.com/dnaeon/go-vcr v1.2.0/go.mod h1:R4UdLID7HZT3taECzJs4YgbbH6PIGXB6W/sc5OLb6RQ= -github.com/docker/cli v20.10.17+incompatible h1:eO2KS7ZFeov5UJeaDmIs1NFEDRf32PaqRpvoEkKBy5M= -github.com/docker/cli v20.10.17+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= github.com/docker/docker v20.10.9+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/docker v20.10.17+incompatible h1:JYCuMrWaVNophQTOrMMoSwudOVEfcegoZZrleKc1xwE= -github.com/docker/docker v20.10.17+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= -github.com/docker/docker-credential-helpers v0.6.4 h1:axCks+yV+2MR3/kZhAmy07yC56WZ2Pwu/fKWtKuZB0o= -github.com/docker/docker-credential-helpers v0.6.4/go.mod h1:ofX3UI0Gz1TteYBjtgs07O36Pyasyp66D2uKT7H8W1c= github.com/docker/go v0.0.0-20160303222718-d30aec9fd63c h1:Ggg7IiOtghyZzn3ozi31kPHpV6qSjMgmesXaWCijYNM= github.com/docker/go v0.0.0-20160303222718-d30aec9fd63c/go.mod h1:CADgU4DSXK5QUlFslkQu2yW2TKzFZcXq/leZfM0UH5Q= github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ= github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= github.com/docker/go-events v0.0.0-20170721190031-9461782956ad/go.mod h1:Uw6UezgYA44ePAFQYUehOuCzmy5zmg/+nl2ZfMWGkpA= -github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c h1:+pKlWGMw7gf6bQ+oDZB4KHQFypsfjYlq/C4rfL7D3g8= github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c/go.mod h1:Uw6UezgYA44ePAFQYUehOuCzmy5zmg/+nl2ZfMWGkpA= github.com/docker/go-metrics v0.0.1 h1:AgB/0SvBxihN0X8OR4SjsblXkbMvalQ8cjmtKQ2rQV8= github.com/docker/go-metrics v0.0.1/go.mod h1:cG1hvH2utMXtqgqqYE9plW6lDxS3/5ayHzueweSI3Vw= @@ -427,7 +411,6 @@ github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkg github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= github.com/emicklei/go-restful v2.9.5+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= github.com/emicklei/go-restful/v3 v3.8.0 h1:eCZ8ulSerjdAiaNpF7GxXIE7ZCMo1moN1qX+S609eVw= -github.com/emicklei/go-restful/v3 v3.8.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= github.com/envoyproxy/go-control-plane v0.6.9/go.mod h1:SBwIajubJHhxtWwsL9s8ss4safvEdbitLhGGK48rN6g= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= @@ -440,8 +423,6 @@ github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1/go. github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/erikstmartin/go-testdb v0.0.0-20160219214506-8d10e4a1bae5/go.mod h1:a2zkGnVExMxdzMo3M0Hi/3sEU+cWnZpSni0O6/Yb/P0= github.com/evanphx/json-patch v4.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= -github.com/evanphx/json-patch v5.6.0+incompatible h1:jBYDEEiFBPxA0v50tFdvOzQQTCvpL6mnFh5mB2/l16U= -github.com/evanphx/json-patch v5.6.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= github.com/felixge/httpsnoop v1.0.1/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= github.com/felixge/httpsnoop v1.0.2 h1:+nS9g82KMXccJ/wp0zyRW9ZBHFETmMGtkk+2CTTrW4o= @@ -467,8 +448,6 @@ github.com/globalsign/mgo v0.0.0-20180905125535-1ca0a4f7cbcb/go.mod h1:xkRDCp4j0 github.com/globalsign/mgo v0.0.0-20181015135952-eeefdecb41b8/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q= github.com/go-asn1-ber/asn1-ber v1.5.1 h1:pDbRAunXzIUXfx4CB2QJFv5IuPiuoW+sWvr/Us009o8= github.com/go-asn1-ber/asn1-ber v1.5.1/go.mod h1:hEBeB/ic+5LoWskz+yKT7vGhhPYkProFKoKdwZRWMe0= -github.com/go-errors/errors v1.0.1 h1:LUHzmkK3GUKUrL/1gfBUxAHzcev3apQlezX/+O7ma6w= -github.com/go-errors/errors v1.0.1/go.mod h1:f4zRHt4oKfwPJE5k8C9vpYG+aDHdBFUsgrm6/TyX73Q= github.com/go-fonts/dejavu v0.1.0/go.mod h1:4Wt4I4OU2Nq9asgDCteaAaWZOV24E+0/Pwo0gppep4g= github.com/go-fonts/latin-modern v0.2.0/go.mod h1:rQVLdDMK+mK1xscDwsqM5J8U2jrRa3T0ecnM9pNujks= github.com/go-fonts/liberation v0.1.1/go.mod h1:K6qoJYypsmfVjWg8KOVDQhLc8UDgIK2HYqyqAO9z7GY= @@ -637,7 +616,6 @@ github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4er github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= @@ -673,13 +651,10 @@ github.com/gomodule/redigo v2.0.0+incompatible h1:K/R+8tc58AaqLkqG2Ol3Qk+DR/TlNu github.com/gomodule/redigo v2.0.0+incompatible/go.mod h1:B4C85qUVwatsJoIUNIfCRsp7qO0iAmpGFZ4EELWSbC4= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= -github.com/google/btree v1.0.1 h1:gK4Kx5IaGY9CD5sPJ36FHiBJ6ZXl0kilRiiCj+jdYp4= -github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA= github.com/google/certificate-transparency-go v1.0.21 h1:Yf1aXowfZ2nuboBsg7iYGLmwsOARdV86pfH3g95wXmE= github.com/google/certificate-transparency-go v1.0.21/go.mod h1:QeJfpSbVSfYc7RgB3gJFj9cbuQMMchQxrWXz8Ruopmg= github.com/google/flatbuffers v2.0.0+incompatible/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8= github.com/google/gnostic v0.5.7-v3refs h1:FhTMOKj2VhjpouxvWJAV1TL304uMlb9zcDqkl6cEI54= -github.com/google/gnostic v0.5.7-v3refs/go.mod h1:73MKFl6jIHelAJNaBGFzt3SPtZULs9dYrGFt8OiIsHQ= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= @@ -712,8 +687,6 @@ github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hf github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20210715191844-86eeefc3e471/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= -github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaUGG7oYTSPP8MxqL4YI3kZKwcP4= -github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ= github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= @@ -744,7 +717,6 @@ github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoA github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/graph-gophers/dataloader v5.0.0+incompatible h1:R+yjsbrNq1Mo3aPG+Z/EKYrXrXXUNJHOgbRt+U6jOug= github.com/graph-gophers/dataloader v5.0.0+incompatible/go.mod h1:jk4jk0c5ZISbKaMe8WsVopGB5/15GvGHMdMdPtwlRp4= -github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7 h1:pdN6V1QBWetyv/0+wjACpqVH+eVULgEjkurDLq3goeM= github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= @@ -798,9 +770,7 @@ github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJ github.com/imdario/mergo v0.3.8/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= github.com/imdario/mergo v0.3.10/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= github.com/imdario/mergo v0.3.11/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= -github.com/imdario/mergo v0.3.12 h1:b6R2BslTbIEToALKP7LxUvijTsNI9TAe80pLWN2g/HU= github.com/imdario/mergo v0.3.12/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= -github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= github.com/influxdata/influxdb1-client v0.0.0-20191209144304-8bf82d3c094d/go.mod h1:qj24IKcXYK6Iy9ceXlo3Tc+vtHo9lIhSX5JddghvEPo= github.com/j-keck/arping v0.0.0-20160618110441-2cf9dc699c56/go.mod h1:ymszkNOg6tORTn+6F6j+Jc8TOr5osrynvN6ivFWZ2GA= @@ -917,7 +887,6 @@ github.com/klauspost/compress v1.11.3/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYs github.com/klauspost/compress v1.11.13/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= github.com/klauspost/compress v1.13.1/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg= github.com/klauspost/compress v1.13.4/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg= -github.com/klauspost/compress v1.13.6 h1:P76CopJELS0TiO2mebmnzgWaajssP/EszplttgQxcgc= github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= @@ -943,8 +912,6 @@ github.com/lib/pq v1.8.0/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= github.com/lib/pq v1.10.0/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= github.com/lib/pq v1.10.2/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= github.com/lib/pq v1.10.6 h1:jbk+ZieJ0D7EVGJYpL9QTz7/YW6UHbmdnZWYyK5cdBs= -github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de h1:9TO3cAIGXtEhnIaL+V+BEER86oLrvS+kWobKpbJuye0= -github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de/go.mod h1:zAbeS9B/r2mtpb6U+EI2rYA5OAXxsYw6wTamcNW+zcE= github.com/lightstep/lightstep-tracer-common/golang/gogo v0.0.0-20190605223551-bc2310a04743/go.mod h1:qklhhLq1aX+mtWk9cPHPzaBjWImj5ULL6C7HFJtXQMM= github.com/lightstep/lightstep-tracer-go v0.18.1/go.mod h1:jlF1pusYV4pidLvZ+XD0UBX0ZE6WURAspgAczcDHrL4= github.com/lyft/protoc-gen-validate v0.0.13/go.mod h1:XbGvPuh87YZc5TdIa2/I4pLk0QoUACkjt2znoq26NVQ= @@ -991,8 +958,6 @@ github.com/miekg/pkcs11 v1.1.1 h1:Ugu9pdy6vAYku5DEpVWVFPYnzV+bxB+iRdbuFSu7TvU= github.com/miekg/pkcs11 v1.1.1/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs= github.com/mistifyio/go-zfs v2.1.2-0.20190413222219-f784269be439+incompatible/go.mod h1:8AuVvqP/mXw1px98n46wfvcGfQ4ci2FwoAjKYxuo3Z4= github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= -github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa15WveJJGw= -github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s= github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= @@ -1005,13 +970,9 @@ github.com/mitchellh/mapstructure v1.3.2/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RR github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/mapstructure v1.4.3 h1:OVowDSCllw/YjdLkam3/sm7wEtOy59d8ndGgCcyj8cs= github.com/mitchellh/mapstructure v1.4.3/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= -github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ= -github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= -github.com/moby/locker v1.0.1 h1:fOXqR41zeveg4fFODix+1Ch4mj/gT0NE1XJbp/epuBg= github.com/moby/locker v1.0.1/go.mod h1:S7SDdo5zpBK84bzzVlKr2V0hz+7x9hWbYC/kq7oQppc= github.com/moby/sys/mountinfo v0.4.0/go.mod h1:rEr8tzG/lsIZHBtN/JjGG+LMYx9eXgW2JI+6q0qou+A= github.com/moby/sys/mountinfo v0.4.1/go.mod h1:rEr8tzG/lsIZHBtN/JjGG+LMYx9eXgW2JI+6q0qou+A= -github.com/moby/sys/mountinfo v0.5.0 h1:2Ks8/r6lopsxWi9m58nlwjaeSzUX9iiL1vj5qB/9ObI= github.com/moby/sys/symlink v0.1.0/go.mod h1:GGDODQmbFOjFsXvfLVn3+ZRxkch54RkSiGqsZeMYowQ= github.com/moby/term v0.0.0-20200312100748-672ec06f55cd/go.mod h1:DdlQx2hp0Ss5/fLikoLlEeIYiATotOjgB//nb973jeo= github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6 h1:dcztxKSvZ4Id8iPpHERQBbIJfabdt4wUm5qy3wOL2Zc= @@ -1024,8 +985,6 @@ github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3Rllmb github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= github.com/modocache/gover v0.0.0-20171022184752-b58185e213c5/go.mod h1:caMODM3PzxT8aQXRPkAt8xlV/e7d7w8GM5g0fa5F0D8= -github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00 h1:n6/2gBQ3RWajuToeY6ZtZTIKv2v7ThUy5KKusIT0yc0= -github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00/go.mod h1:Pm3mSP3c5uWn86xMLZ5Sa7JB9GsEZySvHYXCTK4E9q4= github.com/montanaflynn/stats v0.0.0-20171201202039-1bf9dbcd8cbe/go.mod h1:wL8QJuTMNUDYhXwkmfOly8iTdp5TEcJFWZD2D7SIkUc= github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A= github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= @@ -1090,8 +1049,8 @@ github.com/opencontainers/go-digest v1.0.0-rc1.0.20180430190053-c9281466c8b2/go. github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= -github.com/opencontainers/image-spec v1.0.3-0.20211202183452-c5a74bcca799 h1:rc3tiVYb5z54aKaDfakKn0dDjIyPpTtszkjuMzyt7ec= -github.com/opencontainers/image-spec v1.0.3-0.20211202183452-c5a74bcca799/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= +github.com/opencontainers/image-spec v1.1.0-rc2 h1:2zx/Stx4Wc5pIPDvIxHXvXtQFW/7XWJGmnM7r3wg034= +github.com/opencontainers/image-spec v1.1.0-rc2/go.mod h1:3OVijpioIKYWTqjiG0zfF6wvoJ4fAXGbjdZuI2NgsRQ= github.com/opencontainers/runc v0.0.0-20190115041553-12f6a991201f/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= github.com/opencontainers/runc v0.1.1/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= github.com/opencontainers/runc v1.0.0-rc8.0.20190926000215-3e425f80a8c9/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= @@ -1128,9 +1087,7 @@ github.com/pelletier/go-toml v1.8.1/go.mod h1:T2/BmBdy8dvIRq1a/8aqjN41wvWlN4lrap github.com/pelletier/go-toml v1.9.3 h1:zeC5b1GviRUyKYd6OJPvBU/mcVDVoL1OhT17FCt5dSQ= github.com/pelletier/go-toml v1.9.3/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= github.com/performancecopilot/speed v3.0.0+incompatible/go.mod h1:/CLtqpZ5gBg1M9iaPbIdPPGyKcA8hKdoy6hAWba7Yac= -github.com/peterbourgon/diskv v2.0.1+incompatible h1:UBdAOUP5p4RWqPBg048CAvpKN+vxiaj6gdUUzhl4XmI= github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= -github.com/phayes/freeport v0.0.0-20220201140144-74d24b5ae9f5 h1:Ii+DKncOVM8Cu1Hc+ETb5K+23HdAMvESYE3ZJ5b5cMI= github.com/phpdave11/gofpdf v1.4.2/go.mod h1:zpO6xFn9yxo3YLyMvW8HcKWVdbNqgIfOOp2dXMnm1mY= github.com/phpdave11/gofpdi v1.0.12/go.mod h1:vBmVV0Do6hSBHC8uKUQ71JGW+ZGQq74llk/7bXwjDoI= github.com/pierrec/lz4 v1.0.2-0.20190131084431-473cd7ce01a1/go.mod h1:3/3N9NVKO0jef7pBehbT1qWhCMrIgbYNnFAZCqQ5LRc= @@ -1210,7 +1167,6 @@ github.com/rs/xid v1.2.1/go.mod h1:+uKXf+4Djp6Md1KODXJxgGQPKngRmWyn10oCKFzNHOQ= github.com/rs/zerolog v1.13.0/go.mod h1:YbFCdg8HfsridGWAh22vktObvhZbQsZXe4/zB0OKkWU= github.com/rs/zerolog v1.15.0/go.mod h1:xYTKnLHcpfU2225ny5qZjxnj9NvkumZYjJHlAThCjNc= github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= -github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/ruudk/golang-pdf417 v0.0.0-20181029194003-1af4ab5afa58/go.mod h1:6lfFZQK844Gfx8o5WFuvpxWRwnSoipWe/p622j1v06w= github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/safchain/ethtool v0.0.0-20190326074333-42ed695e3de8/go.mod h1:Z0q5wiBQGYcxhMZ6gUqHn6pYNLypFAvaL3UvgZLR0U4= @@ -1219,7 +1175,6 @@ github.com/satori/go.uuid v1.2.0 h1:0uYX9dsZ2yD7q2RtLRtPSdGDWzjeM3TbMJP9utgA0ww= github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= github.com/seccomp/libseccomp-golang v0.9.1/go.mod h1:GbW5+tmTXfcxTToHLXlScSlAvWlF4P2Ca7zGrPiEpWo= -github.com/sergi/go-diff v1.1.0 h1:we8PVUC3FE2uYfodKH/nBHMSetSfHDR6scGdBi+erh0= github.com/shiena/ansicolor v0.0.0-20200904210342-c7312218db18 h1:DAYUYH5869yV94zvCES9F51oYtN5oGlwjxJJz7ZCnik= github.com/shiena/ansicolor v0.0.0-20200904210342-c7312218db18/go.mod h1:nkxAfR/5quYxwPZhyDxgasBMnRtBZd0FCEpawpjMUFg= github.com/shopspring/decimal v0.0.0-20180709203117-cd690d0c9e24/go.mod h1:M+9NzErvs504Cn4c5DxATwIqPbtswREoFCre64PpcG4= @@ -1257,8 +1212,6 @@ github.com/spf13/cast v1.4.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkU github.com/spf13/cobra v0.0.2-0.20171109065643-2da4a54c5cee/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= github.com/spf13/cobra v1.0.0/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE= -github.com/spf13/cobra v1.5.0 h1:X+jTBEBqF0bHN+9cSMgmfuvv2VHJ9ezmFNf9Y/XstYU= -github.com/spf13/cobra v1.5.0/go.mod h1:dWXEIy2H428czQCjInthrTRUg7yKbok+2Qi/yBIJoUM= github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= github.com/spf13/jwalterweatherman v1.1.0 h1:ue6voC5bR5F8YxI5S67j9i582FU4Qvo2bmqnqMYADFk= github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo= @@ -1272,7 +1225,6 @@ github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/y github.com/spf13/viper v1.8.1 h1:Kq1fyeebqsBfbjZj4EL7gj2IO0mMaiyjYUWcUsl2O44= github.com/spf13/viper v1.8.1/go.mod h1:o0Pch8wJ9BVSWGQMbra6iw0oQ5oktSIBaujf1rJH9Ns= github.com/stefanberger/go-pkcs11uri v0.0.0-20201008174630-78d3cae3a980/go.mod h1:AO3tvPzVZ/ayst6UlUKUv6rcPQInYe3IknH3jYhAKu8= -github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8= github.com/streadway/amqp v0.0.0-20190404075320-75d898a42a94/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= github.com/streadway/amqp v0.0.0-20190827072141-edfb9018d271/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= github.com/streadway/handy v0.0.0-20190108123426-d5acb3125c2a/go.mod h1:qNTQ5P5JnDBl6z3cMAg/SywNDC5ABu5ApDIw6lUbRmI= @@ -1329,16 +1281,10 @@ github.com/xdg-go/scram v1.0.2/go.mod h1:1WAq6h33pAW+iRreB34OORO2Nf7qel3VV3fjBj+ github.com/xdg-go/stringprep v1.0.2/go.mod h1:8F9zXuvzgwmyT5DUm4GUfZGDdT3W+LCvS6+da4O5kxM= github.com/xdg/scram v0.0.0-20180814205039-7eeb5667e42c/go.mod h1:lB8K/P019DLNhemzwFU4jHLhdvlE6uDZjXFejJXr49I= github.com/xdg/stringprep v0.0.0-20180714160509-73f8eece6fdc/go.mod h1:Jhud4/sHMO4oL310DaZAKk9ZaJ08SJfe+sJh0HrGL1Y= -github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f h1:J9EGpcZtP0E/raorCMxlFGSTBrsSlaDGf3jU/qvAE2c= github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= -github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 h1:EzJWgHovont7NscjpAxXsDA8S8BMYve8Y5+7cuRE7R0= github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ= github.com/xeipuuv/gojsonschema v0.0.0-20180618132009-1d523034197f/go.mod h1:5yf86TLmAcydyeJq5YvxkGPE2fm/u4myDekKRoLuqhs= -github.com/xeipuuv/gojsonschema v1.2.0 h1:LhYJRs+L4fBtjZUfuSZIKGeVu0QRy8e5Xi7D17UxZ74= -github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQluxsYJ78Id3Y= github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= -github.com/xlab/treeprint v1.1.0 h1:G/1DjNkPpfZCFt9CSh6b5/nY4VimlbHF3Rh4obvtzDk= -github.com/xlab/treeprint v1.1.0/go.mod h1:gj5Gd3gPdKtR1ikdDK6fnFLdmIS0X30kTTuNd/WEJu0= github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= github.com/youmark/pkcs8 v0.0.0-20181117223130-1be2e3e5546d/go.mod h1:rHwXgn7JulP+udvsHwJoVG1YGAP6VLg4y9I5dyZdqmA= github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= @@ -1346,9 +1292,6 @@ github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9de github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= -github.com/yvasiyarov/go-metrics v0.0.0-20140926110328-57bccd1ccd43 h1:+lm10QQTNSBd8DVTNGHx7o/IKu9HYDvLMffDhbyLccI= -github.com/yvasiyarov/gorelic v0.0.0-20141212073537-a9bba5b9ab50 h1:hlE8//ciYMztlGpl/VA+Zm1AcTPHYkHJPbHqE6WJUXE= -github.com/yvasiyarov/newrelic_platform_go v0.0.0-20140908184405-b21fdbd4370f h1:ERexzlUfuTvpE74urLSbIQW0Z/6hF9t8U4NsJLaioAY= github.com/zenazn/goji v0.9.0/go.mod h1:7S9M489iMyHBNxwZnk9/EHS098H4/F6TATF2mIxtB1Q= gitlab.com/nyarla/go-crypt v0.0.0-20160106005555-d9a5dc2b789b/go.mod h1:T3BPAOm2cqquPa0MKWeNkmOM5RQsRhkrwMWonFMN7fE= go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= @@ -1372,7 +1315,6 @@ go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M= go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= go.opentelemetry.io/contrib v0.22.0 h1:0F7gDEjgb1WGn4ODIjaCAg75hmqF+UN0LiVgwxsCodc= go.opentelemetry.io/contrib v0.22.0/go.mod h1:EH4yDYeNoaTqn/8yCWQmfNB78VHfGX2Jt2bvnvzBlGM= @@ -1416,8 +1358,6 @@ go.opentelemetry.io/otel/trace v1.8.0/go.mod h1:0Bt3PXY8w+3pheS3hQUt+wow8b1ojPaT go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= go.opentelemetry.io/proto/otlp v0.11.0 h1:cLDgIBTf4lLOlztkhzAEdQsJ4Lj+i5Wc9k6Nn0K1VyU= go.opentelemetry.io/proto/otlp v0.11.0/go.mod h1:QpEjXPrNQzrFDZgoTo49dgHR9RYRSrg3NAKnUGl9YpQ= -go.starlark.net v0.0.0-20200306205701-8dd3e2ee1dd5 h1:+FNtrFTmVw0YZGpBGX56XDee331t6JAXeK2bcyhLOOc= -go.starlark.net v0.0.0-20200306205701-8dd3e2ee1dd5/go.mod h1:nmDLcffg48OtT/PSW0Hg7FvpRQsQh5OSqIylirxKC7o= go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= @@ -1598,8 +1538,6 @@ golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4 h1:uVc8UZUe6tr40fFVnUP5Oj+veunVezqYl9z7DYw9xzw= -golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20180224232135-f6cff0780e54/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -1634,7 +1572,6 @@ golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191002063906-3421d5a6bb1c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191022100944-742c48ecaeb7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -1844,7 +1781,6 @@ google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1m google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201019141844-1ed22bb0c154/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20201110150050-8816d57aaa9a/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210207032614-bba0dbe2a9ea/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= @@ -1961,10 +1897,8 @@ gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gorm.io/driver/postgres v1.0.8/go.mod h1:4eOzrI1MUfm6ObJU/UcmbXyiHSs8jSwH95G5P5dxcAg= gorm.io/gorm v1.20.12/go.mod h1:0HFTzE/SqkGTzK6TlDPPQbAYCluiVvhzoA1+aVyzenw= gorm.io/gorm v1.21.4/go.mod h1:0HFTzE/SqkGTzK6TlDPPQbAYCluiVvhzoA1+aVyzenw= -gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo= gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk= -gotest.tools/v3 v3.0.3 h1:4AuOwCGf4lLR9u3YOe2awrHygurzhO/HeQ6laiA6Sx0= gotest.tools/v3 v3.0.3/go.mod h1:Z7Lb0S5l+klDB31fvDQX8ss/FlKDxtlFlw3Oa8Ymbl8= helm.sh/helm/v3 v3.10.3 h1:wL7IUZ7Zyukm5Kz0OUmIFZgKHuAgByCrUcJBtY0kDyw= helm.sh/helm/v3 v3.10.3/go.mod h1:CXOcs02AYvrlPMWARNYNRgf2rNP7gLJQsi/Ubd4EDrI= @@ -1981,8 +1915,6 @@ k8s.io/api v0.20.4/go.mod h1:++lNL1AJMkDymriNniQsWRkMDzRaX2Y/POTUi8yvqYQ= k8s.io/api v0.20.6/go.mod h1:X9e8Qag6JV/bL5G6bU8sdVRltWKmdHsFUGS3eVndqE8= k8s.io/api v0.25.2 h1:v6G8RyFcwf0HR5jQGIAYlvtRNrxMJQG1xJzaSeVnIS8= k8s.io/api v0.25.2/go.mod h1:qP1Rn4sCVFwx/xIhe+we2cwBLTXNcheRyYXwajonhy0= -k8s.io/apiextensions-apiserver v0.25.2 h1:8uOQX17RE7XL02ngtnh3TgifY7EhekpK+/piwzQNnBo= -k8s.io/apiextensions-apiserver v0.25.2/go.mod h1:iRwwRDlWPfaHhuBfQ0WMa5skdQfrE18QXJaJvIDLvE8= k8s.io/apimachinery v0.20.1/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRpU= k8s.io/apimachinery v0.20.4/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRpU= k8s.io/apimachinery v0.20.6/go.mod h1:ejZXtW1Ra6V1O5H8xPBGz+T3+4gfkTCeExAHKU57MAc= @@ -1991,8 +1923,6 @@ k8s.io/apimachinery v0.25.2/go.mod h1:hqqA1X0bsgsxI6dXsJ4HnNTBOmJNxyPp8dw3u2fSHw k8s.io/apiserver v0.20.1/go.mod h1:ro5QHeQkgMS7ZGpvf4tSMx6bBOgPfE+f52KwvXfScaU= k8s.io/apiserver v0.20.4/go.mod h1:Mc80thBKOyy7tbvFtB4kJv1kbdD0eIH8k8vianJcbFM= k8s.io/apiserver v0.20.6/go.mod h1:QIJXNt6i6JB+0YQRNcS0hdRHJlMhflFmsBDeSgT1r8Q= -k8s.io/cli-runtime v0.25.2 h1:XOx+SKRjBpYMLY/J292BHTkmyDffl/qOx3YSuFZkTuc= -k8s.io/cli-runtime v0.25.2/go.mod h1:OQx3+/0st6x5YpkkJQlEWLC73V0wHsOFMC1/roxV8Oc= k8s.io/client-go v0.20.1/go.mod h1:/zcHdt1TeWSd5HoUe6elJmHSQ6uLLgp4bIJHVEuy+/Y= k8s.io/client-go v0.20.4/go.mod h1:LiMv25ND1gLUdBeYxBIwKpkSC5IsozMMmOOeSJboP+k= k8s.io/client-go v0.20.6/go.mod h1:nNQMnOvEUEsOzRRFIIkdmYOjAZrC8bgq0ExboWSU1I0= @@ -2012,7 +1942,6 @@ k8s.io/klog/v2 v2.70.1 h1:7aaoSdahviPmR+XkS7FyxlkkXs6tHISSG03RxleQAVQ= k8s.io/klog/v2 v2.70.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= k8s.io/kube-openapi v0.0.0-20201113171705-d219536bb9fd/go.mod h1:WOJ3KddDSol4tAGcJo0Tvi+dK12EcqSLqcWsryKMpfM= k8s.io/kube-openapi v0.0.0-20220803162953-67bda5d908f1 h1:MQ8BAZPZlWk3S9K4a9NCkIFQtZShWqoha7snGixVgEA= -k8s.io/kube-openapi v0.0.0-20220803162953-67bda5d908f1/go.mod h1:C/N6wCaBHeBHkHUesQOQy2/MZqGgMAFPqGsGQLdbZBU= k8s.io/kubernetes v1.13.0/go.mod h1:ocZa8+6APFNC2tX1DZASIbocyYT5jHzqFVsY5aoB7Jk= k8s.io/utils v0.0.0-20201110183641-67b214c5f920/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= k8s.io/utils v0.0.0-20220728103510-ee6ede2d64ed h1:jAne/RjBTyawwAy0utX5eqigAwz/lQhTmy+Hr/Cpue4= @@ -2043,8 +1972,6 @@ modernc.org/token v1.0.0/go.mod h1:UGzOrNV1mAFSEB63lOFHIpNRUVMvYTc6yu1SMY/XTDM= modernc.org/z v1.0.1-0.20210308123920-1f282aa71362/go.mod h1:8/SRk5C/HgiQWCgXdfpb+1RvhORdkz5sw72d3jjtyqA= modernc.org/z v1.0.1/go.mod h1:8/SRk5C/HgiQWCgXdfpb+1RvhORdkz5sw72d3jjtyqA= modernc.org/zappy v1.0.0/go.mod h1:hHe+oGahLVII/aTTyWK/b53VDHMAGCBYYeZ9sn83HC4= -oras.land/oras-go v1.2.0 h1:yoKosVIbsPoFMqAIFHTnrmOuafHal+J/r+I5bdbVWu4= -oras.land/oras-go v1.2.0/go.mod h1:pFNs7oHp2dYsYMSS82HaX5l4mpnGO7hbpPN6EWH2ltc= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= @@ -2053,10 +1980,6 @@ sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.14/go.mod h1:LEScyz sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.15/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg= sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2 h1:iXTIw73aPyC+oRdyqqvVJuloN1p0AC/kzH07hu3NE+k= sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= -sigs.k8s.io/kustomize/api v0.12.1 h1:7YM7gW3kYBwtKvoY216ZzY+8hM+lV53LUayghNRJ0vM= -sigs.k8s.io/kustomize/api v0.12.1/go.mod h1:y3JUhimkZkR6sbLNwfJHxvo1TCLwuwm14sCYnkH6S1s= -sigs.k8s.io/kustomize/kyaml v0.13.9 h1:Qz53EAaFFANyNgyOEJbT/yoIHygK40/ZcvU3rgry2Tk= -sigs.k8s.io/kustomize/kyaml v0.13.9/go.mod h1:QsRbD0/KcU+wdk0/L0fIp2KLnohkVzs6fQ85/nOXac4= sigs.k8s.io/structured-merge-diff/v4 v4.0.2/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= sigs.k8s.io/structured-merge-diff/v4 v4.0.3/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= sigs.k8s.io/structured-merge-diff/v4 v4.2.3 h1:PRbqxJClWWYMNV1dhaG4NsibJbArud9kFxnAMREiWFE= diff --git a/src/lib/patterns.go b/src/lib/patterns.go index 9af942435..7ae9d1df4 100644 --- a/src/lib/patterns.go +++ b/src/lib/patterns.go @@ -28,6 +28,8 @@ var ( V2BlobUploadURLRe = regexp.MustCompile(fmt.Sprintf(`^/v2/(?P<%s>%s)/blobs/uploads[/a-zA-Z0-9\-_\.=]*$`, RepositorySubexp, reference.NameRegexp.String())) // V2CatalogURLRe is the regular expression for matching the request to v2 handler to list catalog V2CatalogURLRe = regexp.MustCompile(`^/v2/_catalog(/.*)?$`) + // V2ReferrersURLRe is the regular expression for matching request to v2 handler to list referrers + V2ReferrersURLRe = regexp.MustCompile(fmt.Sprintf(`^/v2/(?P<%s>%s)/referrers/(?P<%s>.*)$`, RepositorySubexp, reference.NameRegexp.String(), ReferenceSubexp)) // RepositoryNameRe is the regular expression for matching repository name RepositoryNameRe = regexp.MustCompile(fmt.Sprintf("^%s$", reference.NameRegexp)) ) diff --git a/src/lib/patterns_test.go b/src/lib/patterns_test.go index c4270b514..842679127 100644 --- a/src/lib/patterns_test.go +++ b/src/lib/patterns_test.go @@ -109,6 +109,41 @@ func TestMatchCatalogURLPattern(t *testing.T) { } } +func TestMatchReferrersURLPattern(t *testing.T) { + cases := []struct { + url string + match bool + }{ + { + url: "/v2/library/hello-world/referrers/!@#!@#%", + match: true, + }, + { + url: "/v2/library/hello-world/referrers/test", + match: true, + }, + { + url: "/v2/library/hello-world/referrers/sha256:e5785cb0c62cebbed4965129bae371f0589cadd6d84798fb58c2c5f9e237efd9", + match: true, + }, + { + url: "/v2/library/hello-world/referrers/e5785cb0c62cebbed4965129bae371f0589cadd6d84798fb58c2c5f9e237efd9", + match: true, + }, + { + url: "/v2/library/hello-world/referrers/.Invalid", + match: true, + }, + { + url: "/v2/library/hello-world/referrers//v2/library/photon/referrers/sha256:0000000000000000000000000000000000000000000000000000000000000000", + match: true, + }, + } + for _, c := range cases { + assert.Equal(t, c.match, V2ReferrersURLRe.MatchString(c.url), "failed for %s", c.url) + } +} + func TestRepositoryNamePattern(t *testing.T) { assert := assert.New(t) assert.False(RepositoryNameRe.MatchString("a/*")) diff --git a/src/server/middleware/artifactinfo/artifact_info.go b/src/server/middleware/artifactinfo/artifact_info.go index 20344bc17..36658d9d6 100644 --- a/src/server/middleware/artifactinfo/artifact_info.go +++ b/src/server/middleware/artifactinfo/artifact_info.go @@ -44,6 +44,7 @@ var ( "tag_list": lib.V2TagListURLRe, "blob_upload": lib.V2BlobUploadURLRe, "blob": lib.V2BlobURLRe, + "referrers": lib.V2ReferrersURLRe, } ) diff --git a/src/server/middleware/metric/metric.go b/src/server/middleware/metric/metric.go index dfd43b3ff..6480d7816 100644 --- a/src/server/middleware/metric/metric.go +++ b/src/server/middleware/metric/metric.go @@ -25,6 +25,8 @@ const ( BlobsOperationID = "v2_blob" // BlobsUploadOperationID ... BlobsUploadOperationID = "v2_blob_upload" + // ReferrersOperationID ... + ReferrersOperationID = "v2_referrers" // OthersOperationID ... OthersOperationID = "v2_others" ) diff --git a/src/server/registry/referrers.go b/src/server/registry/referrers.go new file mode 100644 index 000000000..a6099bc39 --- /dev/null +++ b/src/server/registry/referrers.go @@ -0,0 +1,173 @@ +package registry + +import ( + "encoding/json" + "net/http" + + "github.com/go-openapi/swag" + "github.com/opencontainers/go-digest" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" + + "github.com/goharbor/harbor/src/lib/errors" + lib_http "github.com/goharbor/harbor/src/lib/http" + "github.com/goharbor/harbor/src/lib/q" + "github.com/goharbor/harbor/src/pkg/accessory" + "github.com/goharbor/harbor/src/pkg/artifact" + "github.com/goharbor/harbor/src/server/router" + "github.com/goharbor/harbor/src/server/v2.0/handler" +) + +const ReferrersSchemaVersion = 2 +const ReferrersMediaType = "application/vnd.oci.image.index.v1+json" + +func newReferrersHandler() http.Handler { + return &referrersHandler{ + artifactManager: artifact.NewManager(), + accessoryManager: accessory.NewManager(), + } +} + +type referrersHandler struct { + artifactManager artifact.Manager + accessoryManager accessory.Manager +} + +func (r *referrersHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) { + ctx := req.Context() + + repository := router.Param(ctx, ":splat") + reference := router.Param(ctx, ":reference") + at := req.URL.Query().Get("artifactType") + + // Check if the reference is a valid digest + if _, err := digest.Parse(reference); err != nil { + lib_http.SendError(w, errors.Wrapf(err, "unsupported digest %s", reference).WithCode(errors.BadRequestCode)) + return + } + + result := &ocispec.Index{} + + // Get the artifact by reference + art, err := r.artifactManager.GetByDigest(ctx, repository, reference) + if err != nil { + if errors.IsNotFoundErr(err) { + // If artifact not found, return empty index + newListReferrersOK().WithPayload(result).WriteResponse(w) + return + } + lib_http.SendError(w, err) + return + } + + // Query accessories with matching subject artifact digest and artifactType + query := q.New(q.KeyWords{"SubjectArtifactDigest": art.Digest}) + if at != "" { + query = q.New(q.KeyWords{"SubjectArtifactDigest": art.Digest, "Type": at}) + } + total, err := r.accessoryManager.Count(ctx, query) + if err != nil { + lib_http.SendError(w, err) + return + } + accs, err := r.accessoryManager.List(ctx, query) + if err != nil { + lib_http.SendError(w, err) + return + } + + // Build index manifest from accessories + var mfs []ocispec.Descriptor + for _, acc := range accs { + accArt, err := r.artifactManager.GetByDigest(ctx, repository, acc.GetData().Digest) + if err != nil { + lib_http.SendError(w, err) + return + } + mf := ocispec.Descriptor{ + MediaType: accArt.ManifestMediaType, + Size: accArt.Size, + Digest: digest.Digest(accArt.Digest), + Annotations: accArt.Annotations, + ArtifactType: acc.GetData().Type, + } + mfs = append(mfs, mf) + } + + // Populate index manifest + result.SchemaVersion = ReferrersSchemaVersion + result.MediaType = ReferrersMediaType + result.Manifests = mfs + + // Write response with index manifest and headers + baseAPI := &handler.BaseAPI{} + newListReferrersOK(). + WithXTotalCount(total). + WithLink(baseAPI.Links(ctx, req.URL, total, query.PageNumber, query.PageSize).String()). + WithPayload(result).WriteResponse(w) +} + +type listReferrersOK struct { + /*Link refers to the previous page and next page + + */ + Link string `json:"Link"` + /*The total count of accessories + + */ + XTotalCount int64 `json:"X-Total-Count"` + + /* + In: Body + */ + Payload *ocispec.Index `json:"body,omitempty"` +} + +// newListReferrersOK creates newlistReferrersOK with default headers values +func newListReferrersOK() *listReferrersOK { + return &listReferrersOK{} +} + +// WithLink adds the link to the get referrers o k response +func (o *listReferrersOK) WithLink(link string) *listReferrersOK { + o.Link = link + return o +} + +// WithXTotalCount adds the xTotalCount to the list accessories o k response +func (o *listReferrersOK) WithXTotalCount(xTotalCount int64) *listReferrersOK { + o.XTotalCount = xTotalCount + return o +} + +// WithPayload adds the payload to the list accessories o k response +func (o *listReferrersOK) WithPayload(payload *ocispec.Index) *listReferrersOK { + o.Payload = payload + return o +} + +// WriteResponse to the client +func (o *listReferrersOK) WriteResponse(rw http.ResponseWriter) { + rw.Header().Set("Content-Type", "application/json; charset=utf-8") + + link := o.Link + if link != "" { + rw.Header().Set("Link", link) + } + xTotalCount := swag.FormatInt64(o.XTotalCount) + if xTotalCount != "" { + rw.Header().Set("X-Total-Count", xTotalCount) + } + + rw.WriteHeader(200) + payload := o.Payload + if payload == nil { + // return empty index + payload = &ocispec.Index{} + } + + enc := json.NewEncoder(rw) + if err := enc.Encode(payload); err != nil { + lib_http.SendError(rw, err) + return + } +} diff --git a/src/server/registry/referrers_test.go b/src/server/registry/referrers_test.go new file mode 100644 index 000000000..4c1a26fb7 --- /dev/null +++ b/src/server/registry/referrers_test.go @@ -0,0 +1,100 @@ +package registry + +import ( + "context" + "encoding/json" + beegocontext "github.com/beego/beego/v2/server/web/context" + "github.com/goharbor/harbor/src/lib/q" + accessorymodel "github.com/goharbor/harbor/src/pkg/accessory/model" + basemodel "github.com/goharbor/harbor/src/pkg/accessory/model/base" + "github.com/goharbor/harbor/src/pkg/artifact" + "github.com/goharbor/harbor/src/server/router" + "github.com/goharbor/harbor/src/testing/mock" + accessorytesting "github.com/goharbor/harbor/src/testing/pkg/accessory" + arttesting "github.com/goharbor/harbor/src/testing/pkg/artifact" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" + "net/http" + "net/http/httptest" + "testing" +) + +func TestReferrersHandlerOK(t *testing.T) { + rec := httptest.NewRecorder() + digestVal := "sha256:6c3c624b58dbbcd3c0dd82b4c53f04194d1247c6eebdaab7c610cf7d66709b3b" + req, err := http.NewRequest("GET", "/v2/test/repository/referrers/sha256:6c3c624b58dbbcd3c0dd82b4c53f04194d1247c6eebdaab7c610cf7d66709b3b", nil) + if err != nil { + t.Fatal(err) + } + input := &beegocontext.BeegoInput{} + input.SetParam(":reference", digestVal) + *req = *(req.WithContext(context.WithValue(req.Context(), router.ContextKeyInput{}, input))) + + artifactMock := &arttesting.Manager{} + accessoryMock := &accessorytesting.Manager{} + + artifactMock.On("GetByDigest", mock.Anything, mock.Anything, mock.Anything). + Return(&artifact.Artifact{ + Digest: digestVal, + ManifestMediaType: "application/vnd.oci.image.manifest.v1+json", + Size: 1000, + Annotations: map[string]string{ + "name": "test-image", + }, + }, nil) + + accessoryMock.On("Count", mock.Anything, q.New(q.KeyWords{"SubjectArtifactDigest": digestVal})). + Return(int64(1), nil) + accessoryMock.On("List", mock.Anything, q.New(q.KeyWords{"SubjectArtifactDigest": digestVal})). + Return([]accessorymodel.Accessory{ + &basemodel.Default{ + Data: accessorymodel.AccessoryData{ + ID: 1, + ArtifactID: 2, + SubArtifactDigest: digestVal, + Type: accessorymodel.TypeCosignSignature, + }, + }, + }, nil) + + handler := &referrersHandler{ + artifactManager: artifactMock, + accessoryManager: accessoryMock, + } + + handler.ServeHTTP(rec, req) + + // check that the response has the expected status code (200 OK) + if rec.Code != http.StatusOK { + t.Errorf("Expected status code %d, but got %d", http.StatusOK, rec.Code) + } + index := &ocispec.Index{} + json.Unmarshal([]byte(rec.Body.String()), index) + if index.Manifests[0].ArtifactType != "signature.cosign" { + t.Errorf("Expected response body %s, but got %s", "signature.cosign", rec.Body.String()) + } +} + +func TestReferrersHandler400(t *testing.T) { + rec := httptest.NewRecorder() + digestVal := "invalid" + req, err := http.NewRequest("GET", "/v2/test/repository/referrers/invalid", nil) + if err != nil { + t.Fatal(err) + } + input := &beegocontext.BeegoInput{} + input.SetParam(":reference", digestVal) + *req = *(req.WithContext(context.WithValue(req.Context(), router.ContextKeyInput{}, input))) + + artifactMock := &arttesting.Manager{} + accessoryMock := &accessorytesting.Manager{} + handler := &referrersHandler{ + artifactManager: artifactMock, + accessoryManager: accessoryMock, + } + + handler.ServeHTTP(rec, req) + // check that the response has the expected status code (200 OK) + if rec.Code != http.StatusBadRequest { + t.Errorf("Expected status code %d, but got %d", http.StatusOK, rec.Code) + } +} diff --git a/src/server/registry/route.go b/src/server/registry/route.go index 44fd02fba..cc4bb1b86 100644 --- a/src/server/registry/route.go +++ b/src/server/registry/route.go @@ -119,6 +119,11 @@ func RegisterRoutes() { Middleware(quota.PutBlobUploadMiddleware()). Middleware(blob.PutBlobUploadMiddleware()). Handler(proxy) + root.NewRoute(). + Method(http.MethodGet). + Path("/*/referrers/:reference"). + Middleware(metric.InjectOpIDMiddleware(metric.ReferrersOperationID)). + Handler(newReferrersHandler()) // others root.NewRoute().Path("/*").Middleware(metric.InjectOpIDMiddleware(metric.OthersOperationID)).Handler(proxy) } diff --git a/src/vendor/github.com/Azure/go-ansiterm/LICENSE b/src/vendor/github.com/Azure/go-ansiterm/LICENSE deleted file mode 100644 index e3d9a64d1..000000000 --- a/src/vendor/github.com/Azure/go-ansiterm/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2015 Microsoft Corporation - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. diff --git a/src/vendor/github.com/Azure/go-ansiterm/README.md b/src/vendor/github.com/Azure/go-ansiterm/README.md deleted file mode 100644 index 261c041e7..000000000 --- a/src/vendor/github.com/Azure/go-ansiterm/README.md +++ /dev/null @@ -1,12 +0,0 @@ -# go-ansiterm - -This is a cross platform Ansi Terminal Emulation library. It reads a stream of Ansi characters and produces the appropriate function calls. The results of the function calls are platform dependent. - -For example the parser might receive "ESC, [, A" as a stream of three characters. This is the code for Cursor Up (http://www.vt100.net/docs/vt510-rm/CUU). The parser then calls the cursor up function (CUU()) on an event handler. The event handler determines what platform specific work must be done to cause the cursor to move up one position. - -The parser (parser.go) is a partial implementation of this state machine (http://vt100.net/emu/vt500_parser.png). There are also two event handler implementations, one for tests (test_event_handler.go) to validate that the expected events are being produced and called, the other is a Windows implementation (winterm/win_event_handler.go). - -See parser_test.go for examples exercising the state machine and generating appropriate function calls. - ------ -This project has adopted the [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/). For more information see the [Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/) or contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with any additional questions or comments. diff --git a/src/vendor/github.com/Azure/go-ansiterm/constants.go b/src/vendor/github.com/Azure/go-ansiterm/constants.go deleted file mode 100644 index 96504a33b..000000000 --- a/src/vendor/github.com/Azure/go-ansiterm/constants.go +++ /dev/null @@ -1,188 +0,0 @@ -package ansiterm - -const LogEnv = "DEBUG_TERMINAL" - -// ANSI constants -// References: -// -- http://www.ecma-international.org/publications/standards/Ecma-048.htm -// -- http://man7.org/linux/man-pages/man4/console_codes.4.html -// -- http://manpages.ubuntu.com/manpages/intrepid/man4/console_codes.4.html -// -- http://en.wikipedia.org/wiki/ANSI_escape_code -// -- http://vt100.net/emu/dec_ansi_parser -// -- http://vt100.net/emu/vt500_parser.svg -// -- http://invisible-island.net/xterm/ctlseqs/ctlseqs.html -// -- http://www.inwap.com/pdp10/ansicode.txt -const ( - // ECMA-48 Set Graphics Rendition - // Note: - // -- Constants leading with an underscore (e.g., _ANSI_xxx) are unsupported or reserved - // -- Fonts could possibly be supported via SetCurrentConsoleFontEx - // -- Windows does not expose the per-window cursor (i.e., caret) blink times - ANSI_SGR_RESET = 0 - ANSI_SGR_BOLD = 1 - ANSI_SGR_DIM = 2 - _ANSI_SGR_ITALIC = 3 - ANSI_SGR_UNDERLINE = 4 - _ANSI_SGR_BLINKSLOW = 5 - _ANSI_SGR_BLINKFAST = 6 - ANSI_SGR_REVERSE = 7 - _ANSI_SGR_INVISIBLE = 8 - _ANSI_SGR_LINETHROUGH = 9 - _ANSI_SGR_FONT_00 = 10 - _ANSI_SGR_FONT_01 = 11 - _ANSI_SGR_FONT_02 = 12 - _ANSI_SGR_FONT_03 = 13 - _ANSI_SGR_FONT_04 = 14 - _ANSI_SGR_FONT_05 = 15 - _ANSI_SGR_FONT_06 = 16 - _ANSI_SGR_FONT_07 = 17 - _ANSI_SGR_FONT_08 = 18 - _ANSI_SGR_FONT_09 = 19 - _ANSI_SGR_FONT_10 = 20 - _ANSI_SGR_DOUBLEUNDERLINE = 21 - ANSI_SGR_BOLD_DIM_OFF = 22 - _ANSI_SGR_ITALIC_OFF = 23 - ANSI_SGR_UNDERLINE_OFF = 24 - _ANSI_SGR_BLINK_OFF = 25 - _ANSI_SGR_RESERVED_00 = 26 - ANSI_SGR_REVERSE_OFF = 27 - _ANSI_SGR_INVISIBLE_OFF = 28 - _ANSI_SGR_LINETHROUGH_OFF = 29 - ANSI_SGR_FOREGROUND_BLACK = 30 - ANSI_SGR_FOREGROUND_RED = 31 - ANSI_SGR_FOREGROUND_GREEN = 32 - ANSI_SGR_FOREGROUND_YELLOW = 33 - ANSI_SGR_FOREGROUND_BLUE = 34 - ANSI_SGR_FOREGROUND_MAGENTA = 35 - ANSI_SGR_FOREGROUND_CYAN = 36 - ANSI_SGR_FOREGROUND_WHITE = 37 - _ANSI_SGR_RESERVED_01 = 38 - ANSI_SGR_FOREGROUND_DEFAULT = 39 - ANSI_SGR_BACKGROUND_BLACK = 40 - ANSI_SGR_BACKGROUND_RED = 41 - ANSI_SGR_BACKGROUND_GREEN = 42 - ANSI_SGR_BACKGROUND_YELLOW = 43 - ANSI_SGR_BACKGROUND_BLUE = 44 - ANSI_SGR_BACKGROUND_MAGENTA = 45 - ANSI_SGR_BACKGROUND_CYAN = 46 - ANSI_SGR_BACKGROUND_WHITE = 47 - _ANSI_SGR_RESERVED_02 = 48 - ANSI_SGR_BACKGROUND_DEFAULT = 49 - // 50 - 65: Unsupported - - ANSI_MAX_CMD_LENGTH = 4096 - - MAX_INPUT_EVENTS = 128 - DEFAULT_WIDTH = 80 - DEFAULT_HEIGHT = 24 - - ANSI_BEL = 0x07 - ANSI_BACKSPACE = 0x08 - ANSI_TAB = 0x09 - ANSI_LINE_FEED = 0x0A - ANSI_VERTICAL_TAB = 0x0B - ANSI_FORM_FEED = 0x0C - ANSI_CARRIAGE_RETURN = 0x0D - ANSI_ESCAPE_PRIMARY = 0x1B - ANSI_ESCAPE_SECONDARY = 0x5B - ANSI_OSC_STRING_ENTRY = 0x5D - ANSI_COMMAND_FIRST = 0x40 - ANSI_COMMAND_LAST = 0x7E - DCS_ENTRY = 0x90 - CSI_ENTRY = 0x9B - OSC_STRING = 0x9D - ANSI_PARAMETER_SEP = ";" - ANSI_CMD_G0 = '(' - ANSI_CMD_G1 = ')' - ANSI_CMD_G2 = '*' - ANSI_CMD_G3 = '+' - ANSI_CMD_DECPNM = '>' - ANSI_CMD_DECPAM = '=' - ANSI_CMD_OSC = ']' - ANSI_CMD_STR_TERM = '\\' - - KEY_CONTROL_PARAM_2 = ";2" - KEY_CONTROL_PARAM_3 = ";3" - KEY_CONTROL_PARAM_4 = ";4" - KEY_CONTROL_PARAM_5 = ";5" - KEY_CONTROL_PARAM_6 = ";6" - KEY_CONTROL_PARAM_7 = ";7" - KEY_CONTROL_PARAM_8 = ";8" - KEY_ESC_CSI = "\x1B[" - KEY_ESC_N = "\x1BN" - KEY_ESC_O = "\x1BO" - - FILL_CHARACTER = ' ' -) - -func getByteRange(start byte, end byte) []byte { - bytes := make([]byte, 0, 32) - for i := start; i <= end; i++ { - bytes = append(bytes, byte(i)) - } - - return bytes -} - -var toGroundBytes = getToGroundBytes() -var executors = getExecuteBytes() - -// SPACE 20+A0 hex Always and everywhere a blank space -// Intermediate 20-2F hex !"#$%&'()*+,-./ -var intermeds = getByteRange(0x20, 0x2F) - -// Parameters 30-3F hex 0123456789:;<=>? -// CSI Parameters 30-39, 3B hex 0123456789; -var csiParams = getByteRange(0x30, 0x3F) - -var csiCollectables = append(getByteRange(0x30, 0x39), getByteRange(0x3B, 0x3F)...) - -// Uppercase 40-5F hex @ABCDEFGHIJKLMNOPQRSTUVWXYZ[\]^_ -var upperCase = getByteRange(0x40, 0x5F) - -// Lowercase 60-7E hex `abcdefghijlkmnopqrstuvwxyz{|}~ -var lowerCase = getByteRange(0x60, 0x7E) - -// Alphabetics 40-7E hex (all of upper and lower case) -var alphabetics = append(upperCase, lowerCase...) - -var printables = getByteRange(0x20, 0x7F) - -var escapeIntermediateToGroundBytes = getByteRange(0x30, 0x7E) -var escapeToGroundBytes = getEscapeToGroundBytes() - -// See http://www.vt100.net/emu/vt500_parser.png for description of the complex -// byte ranges below - -func getEscapeToGroundBytes() []byte { - escapeToGroundBytes := getByteRange(0x30, 0x4F) - escapeToGroundBytes = append(escapeToGroundBytes, getByteRange(0x51, 0x57)...) - escapeToGroundBytes = append(escapeToGroundBytes, 0x59) - escapeToGroundBytes = append(escapeToGroundBytes, 0x5A) - escapeToGroundBytes = append(escapeToGroundBytes, 0x5C) - escapeToGroundBytes = append(escapeToGroundBytes, getByteRange(0x60, 0x7E)...) - return escapeToGroundBytes -} - -func getExecuteBytes() []byte { - executeBytes := getByteRange(0x00, 0x17) - executeBytes = append(executeBytes, 0x19) - executeBytes = append(executeBytes, getByteRange(0x1C, 0x1F)...) - return executeBytes -} - -func getToGroundBytes() []byte { - groundBytes := []byte{0x18} - groundBytes = append(groundBytes, 0x1A) - groundBytes = append(groundBytes, getByteRange(0x80, 0x8F)...) - groundBytes = append(groundBytes, getByteRange(0x91, 0x97)...) - groundBytes = append(groundBytes, 0x99) - groundBytes = append(groundBytes, 0x9A) - groundBytes = append(groundBytes, 0x9C) - return groundBytes -} - -// Delete 7F hex Always and everywhere ignored -// C1 Control 80-9F hex 32 additional control characters -// G1 Displayable A1-FE hex 94 additional displayable characters -// Special A0+FF hex Same as SPACE and DELETE diff --git a/src/vendor/github.com/Azure/go-ansiterm/context.go b/src/vendor/github.com/Azure/go-ansiterm/context.go deleted file mode 100644 index 8d66e777c..000000000 --- a/src/vendor/github.com/Azure/go-ansiterm/context.go +++ /dev/null @@ -1,7 +0,0 @@ -package ansiterm - -type ansiContext struct { - currentChar byte - paramBuffer []byte - interBuffer []byte -} diff --git a/src/vendor/github.com/Azure/go-ansiterm/csi_entry_state.go b/src/vendor/github.com/Azure/go-ansiterm/csi_entry_state.go deleted file mode 100644 index bcbe00d0c..000000000 --- a/src/vendor/github.com/Azure/go-ansiterm/csi_entry_state.go +++ /dev/null @@ -1,49 +0,0 @@ -package ansiterm - -type csiEntryState struct { - baseState -} - -func (csiState csiEntryState) Handle(b byte) (s state, e error) { - csiState.parser.logf("CsiEntry::Handle %#x", b) - - nextState, err := csiState.baseState.Handle(b) - if nextState != nil || err != nil { - return nextState, err - } - - switch { - case sliceContains(alphabetics, b): - return csiState.parser.ground, nil - case sliceContains(csiCollectables, b): - return csiState.parser.csiParam, nil - case sliceContains(executors, b): - return csiState, csiState.parser.execute() - } - - return csiState, nil -} - -func (csiState csiEntryState) Transition(s state) error { - csiState.parser.logf("CsiEntry::Transition %s --> %s", csiState.Name(), s.Name()) - csiState.baseState.Transition(s) - - switch s { - case csiState.parser.ground: - return csiState.parser.csiDispatch() - case csiState.parser.csiParam: - switch { - case sliceContains(csiParams, csiState.parser.context.currentChar): - csiState.parser.collectParam() - case sliceContains(intermeds, csiState.parser.context.currentChar): - csiState.parser.collectInter() - } - } - - return nil -} - -func (csiState csiEntryState) Enter() error { - csiState.parser.clear() - return nil -} diff --git a/src/vendor/github.com/Azure/go-ansiterm/csi_param_state.go b/src/vendor/github.com/Azure/go-ansiterm/csi_param_state.go deleted file mode 100644 index 7ed5e01c3..000000000 --- a/src/vendor/github.com/Azure/go-ansiterm/csi_param_state.go +++ /dev/null @@ -1,38 +0,0 @@ -package ansiterm - -type csiParamState struct { - baseState -} - -func (csiState csiParamState) Handle(b byte) (s state, e error) { - csiState.parser.logf("CsiParam::Handle %#x", b) - - nextState, err := csiState.baseState.Handle(b) - if nextState != nil || err != nil { - return nextState, err - } - - switch { - case sliceContains(alphabetics, b): - return csiState.parser.ground, nil - case sliceContains(csiCollectables, b): - csiState.parser.collectParam() - return csiState, nil - case sliceContains(executors, b): - return csiState, csiState.parser.execute() - } - - return csiState, nil -} - -func (csiState csiParamState) Transition(s state) error { - csiState.parser.logf("CsiParam::Transition %s --> %s", csiState.Name(), s.Name()) - csiState.baseState.Transition(s) - - switch s { - case csiState.parser.ground: - return csiState.parser.csiDispatch() - } - - return nil -} diff --git a/src/vendor/github.com/Azure/go-ansiterm/escape_intermediate_state.go b/src/vendor/github.com/Azure/go-ansiterm/escape_intermediate_state.go deleted file mode 100644 index 1c719db9e..000000000 --- a/src/vendor/github.com/Azure/go-ansiterm/escape_intermediate_state.go +++ /dev/null @@ -1,36 +0,0 @@ -package ansiterm - -type escapeIntermediateState struct { - baseState -} - -func (escState escapeIntermediateState) Handle(b byte) (s state, e error) { - escState.parser.logf("escapeIntermediateState::Handle %#x", b) - nextState, err := escState.baseState.Handle(b) - if nextState != nil || err != nil { - return nextState, err - } - - switch { - case sliceContains(intermeds, b): - return escState, escState.parser.collectInter() - case sliceContains(executors, b): - return escState, escState.parser.execute() - case sliceContains(escapeIntermediateToGroundBytes, b): - return escState.parser.ground, nil - } - - return escState, nil -} - -func (escState escapeIntermediateState) Transition(s state) error { - escState.parser.logf("escapeIntermediateState::Transition %s --> %s", escState.Name(), s.Name()) - escState.baseState.Transition(s) - - switch s { - case escState.parser.ground: - return escState.parser.escDispatch() - } - - return nil -} diff --git a/src/vendor/github.com/Azure/go-ansiterm/escape_state.go b/src/vendor/github.com/Azure/go-ansiterm/escape_state.go deleted file mode 100644 index 6390abd23..000000000 --- a/src/vendor/github.com/Azure/go-ansiterm/escape_state.go +++ /dev/null @@ -1,47 +0,0 @@ -package ansiterm - -type escapeState struct { - baseState -} - -func (escState escapeState) Handle(b byte) (s state, e error) { - escState.parser.logf("escapeState::Handle %#x", b) - nextState, err := escState.baseState.Handle(b) - if nextState != nil || err != nil { - return nextState, err - } - - switch { - case b == ANSI_ESCAPE_SECONDARY: - return escState.parser.csiEntry, nil - case b == ANSI_OSC_STRING_ENTRY: - return escState.parser.oscString, nil - case sliceContains(executors, b): - return escState, escState.parser.execute() - case sliceContains(escapeToGroundBytes, b): - return escState.parser.ground, nil - case sliceContains(intermeds, b): - return escState.parser.escapeIntermediate, nil - } - - return escState, nil -} - -func (escState escapeState) Transition(s state) error { - escState.parser.logf("Escape::Transition %s --> %s", escState.Name(), s.Name()) - escState.baseState.Transition(s) - - switch s { - case escState.parser.ground: - return escState.parser.escDispatch() - case escState.parser.escapeIntermediate: - return escState.parser.collectInter() - } - - return nil -} - -func (escState escapeState) Enter() error { - escState.parser.clear() - return nil -} diff --git a/src/vendor/github.com/Azure/go-ansiterm/event_handler.go b/src/vendor/github.com/Azure/go-ansiterm/event_handler.go deleted file mode 100644 index 98087b38c..000000000 --- a/src/vendor/github.com/Azure/go-ansiterm/event_handler.go +++ /dev/null @@ -1,90 +0,0 @@ -package ansiterm - -type AnsiEventHandler interface { - // Print - Print(b byte) error - - // Execute C0 commands - Execute(b byte) error - - // CUrsor Up - CUU(int) error - - // CUrsor Down - CUD(int) error - - // CUrsor Forward - CUF(int) error - - // CUrsor Backward - CUB(int) error - - // Cursor to Next Line - CNL(int) error - - // Cursor to Previous Line - CPL(int) error - - // Cursor Horizontal position Absolute - CHA(int) error - - // Vertical line Position Absolute - VPA(int) error - - // CUrsor Position - CUP(int, int) error - - // Horizontal and Vertical Position (depends on PUM) - HVP(int, int) error - - // Text Cursor Enable Mode - DECTCEM(bool) error - - // Origin Mode - DECOM(bool) error - - // 132 Column Mode - DECCOLM(bool) error - - // Erase in Display - ED(int) error - - // Erase in Line - EL(int) error - - // Insert Line - IL(int) error - - // Delete Line - DL(int) error - - // Insert Character - ICH(int) error - - // Delete Character - DCH(int) error - - // Set Graphics Rendition - SGR([]int) error - - // Pan Down - SU(int) error - - // Pan Up - SD(int) error - - // Device Attributes - DA([]string) error - - // Set Top and Bottom Margins - DECSTBM(int, int) error - - // Index - IND() error - - // Reverse Index - RI() error - - // Flush updates from previous commands - Flush() error -} diff --git a/src/vendor/github.com/Azure/go-ansiterm/ground_state.go b/src/vendor/github.com/Azure/go-ansiterm/ground_state.go deleted file mode 100644 index 52451e946..000000000 --- a/src/vendor/github.com/Azure/go-ansiterm/ground_state.go +++ /dev/null @@ -1,24 +0,0 @@ -package ansiterm - -type groundState struct { - baseState -} - -func (gs groundState) Handle(b byte) (s state, e error) { - gs.parser.context.currentChar = b - - nextState, err := gs.baseState.Handle(b) - if nextState != nil || err != nil { - return nextState, err - } - - switch { - case sliceContains(printables, b): - return gs, gs.parser.print() - - case sliceContains(executors, b): - return gs, gs.parser.execute() - } - - return gs, nil -} diff --git a/src/vendor/github.com/Azure/go-ansiterm/osc_string_state.go b/src/vendor/github.com/Azure/go-ansiterm/osc_string_state.go deleted file mode 100644 index 593b10ab6..000000000 --- a/src/vendor/github.com/Azure/go-ansiterm/osc_string_state.go +++ /dev/null @@ -1,31 +0,0 @@ -package ansiterm - -type oscStringState struct { - baseState -} - -func (oscState oscStringState) Handle(b byte) (s state, e error) { - oscState.parser.logf("OscString::Handle %#x", b) - nextState, err := oscState.baseState.Handle(b) - if nextState != nil || err != nil { - return nextState, err - } - - switch { - case isOscStringTerminator(b): - return oscState.parser.ground, nil - } - - return oscState, nil -} - -// See below for OSC string terminators for linux -// http://man7.org/linux/man-pages/man4/console_codes.4.html -func isOscStringTerminator(b byte) bool { - - if b == ANSI_BEL || b == 0x5C { - return true - } - - return false -} diff --git a/src/vendor/github.com/Azure/go-ansiterm/parser.go b/src/vendor/github.com/Azure/go-ansiterm/parser.go deleted file mode 100644 index 03cec7ada..000000000 --- a/src/vendor/github.com/Azure/go-ansiterm/parser.go +++ /dev/null @@ -1,151 +0,0 @@ -package ansiterm - -import ( - "errors" - "log" - "os" -) - -type AnsiParser struct { - currState state - eventHandler AnsiEventHandler - context *ansiContext - csiEntry state - csiParam state - dcsEntry state - escape state - escapeIntermediate state - error state - ground state - oscString state - stateMap []state - - logf func(string, ...interface{}) -} - -type Option func(*AnsiParser) - -func WithLogf(f func(string, ...interface{})) Option { - return func(ap *AnsiParser) { - ap.logf = f - } -} - -func CreateParser(initialState string, evtHandler AnsiEventHandler, opts ...Option) *AnsiParser { - ap := &AnsiParser{ - eventHandler: evtHandler, - context: &ansiContext{}, - } - for _, o := range opts { - o(ap) - } - - if isDebugEnv := os.Getenv(LogEnv); isDebugEnv == "1" { - logFile, _ := os.Create("ansiParser.log") - logger := log.New(logFile, "", log.LstdFlags) - if ap.logf != nil { - l := ap.logf - ap.logf = func(s string, v ...interface{}) { - l(s, v...) - logger.Printf(s, v...) - } - } else { - ap.logf = logger.Printf - } - } - - if ap.logf == nil { - ap.logf = func(string, ...interface{}) {} - } - - ap.csiEntry = csiEntryState{baseState{name: "CsiEntry", parser: ap}} - ap.csiParam = csiParamState{baseState{name: "CsiParam", parser: ap}} - ap.dcsEntry = dcsEntryState{baseState{name: "DcsEntry", parser: ap}} - ap.escape = escapeState{baseState{name: "Escape", parser: ap}} - ap.escapeIntermediate = escapeIntermediateState{baseState{name: "EscapeIntermediate", parser: ap}} - ap.error = errorState{baseState{name: "Error", parser: ap}} - ap.ground = groundState{baseState{name: "Ground", parser: ap}} - ap.oscString = oscStringState{baseState{name: "OscString", parser: ap}} - - ap.stateMap = []state{ - ap.csiEntry, - ap.csiParam, - ap.dcsEntry, - ap.escape, - ap.escapeIntermediate, - ap.error, - ap.ground, - ap.oscString, - } - - ap.currState = getState(initialState, ap.stateMap) - - ap.logf("CreateParser: parser %p", ap) - return ap -} - -func getState(name string, states []state) state { - for _, el := range states { - if el.Name() == name { - return el - } - } - - return nil -} - -func (ap *AnsiParser) Parse(bytes []byte) (int, error) { - for i, b := range bytes { - if err := ap.handle(b); err != nil { - return i, err - } - } - - return len(bytes), ap.eventHandler.Flush() -} - -func (ap *AnsiParser) handle(b byte) error { - ap.context.currentChar = b - newState, err := ap.currState.Handle(b) - if err != nil { - return err - } - - if newState == nil { - ap.logf("WARNING: newState is nil") - return errors.New("New state of 'nil' is invalid.") - } - - if newState != ap.currState { - if err := ap.changeState(newState); err != nil { - return err - } - } - - return nil -} - -func (ap *AnsiParser) changeState(newState state) error { - ap.logf("ChangeState %s --> %s", ap.currState.Name(), newState.Name()) - - // Exit old state - if err := ap.currState.Exit(); err != nil { - ap.logf("Exit state '%s' failed with : '%v'", ap.currState.Name(), err) - return err - } - - // Perform transition action - if err := ap.currState.Transition(newState); err != nil { - ap.logf("Transition from '%s' to '%s' failed with: '%v'", ap.currState.Name(), newState.Name, err) - return err - } - - // Enter new state - if err := newState.Enter(); err != nil { - ap.logf("Enter state '%s' failed with: '%v'", newState.Name(), err) - return err - } - - ap.currState = newState - return nil -} diff --git a/src/vendor/github.com/Azure/go-ansiterm/parser_action_helpers.go b/src/vendor/github.com/Azure/go-ansiterm/parser_action_helpers.go deleted file mode 100644 index de0a1f9cd..000000000 --- a/src/vendor/github.com/Azure/go-ansiterm/parser_action_helpers.go +++ /dev/null @@ -1,99 +0,0 @@ -package ansiterm - -import ( - "strconv" -) - -func parseParams(bytes []byte) ([]string, error) { - paramBuff := make([]byte, 0, 0) - params := []string{} - - for _, v := range bytes { - if v == ';' { - if len(paramBuff) > 0 { - // Completed parameter, append it to the list - s := string(paramBuff) - params = append(params, s) - paramBuff = make([]byte, 0, 0) - } - } else { - paramBuff = append(paramBuff, v) - } - } - - // Last parameter may not be terminated with ';' - if len(paramBuff) > 0 { - s := string(paramBuff) - params = append(params, s) - } - - return params, nil -} - -func parseCmd(context ansiContext) (string, error) { - return string(context.currentChar), nil -} - -func getInt(params []string, dflt int) int { - i := getInts(params, 1, dflt)[0] - return i -} - -func getInts(params []string, minCount int, dflt int) []int { - ints := []int{} - - for _, v := range params { - i, _ := strconv.Atoi(v) - // Zero is mapped to the default value in VT100. - if i == 0 { - i = dflt - } - ints = append(ints, i) - } - - if len(ints) < minCount { - remaining := minCount - len(ints) - for i := 0; i < remaining; i++ { - ints = append(ints, dflt) - } - } - - return ints -} - -func (ap *AnsiParser) modeDispatch(param string, set bool) error { - switch param { - case "?3": - return ap.eventHandler.DECCOLM(set) - case "?6": - return ap.eventHandler.DECOM(set) - case "?25": - return ap.eventHandler.DECTCEM(set) - } - return nil -} - -func (ap *AnsiParser) hDispatch(params []string) error { - if len(params) == 1 { - return ap.modeDispatch(params[0], true) - } - - return nil -} - -func (ap *AnsiParser) lDispatch(params []string) error { - if len(params) == 1 { - return ap.modeDispatch(params[0], false) - } - - return nil -} - -func getEraseParam(params []string) int { - param := getInt(params, 0) - if param < 0 || 3 < param { - param = 0 - } - - return param -} diff --git a/src/vendor/github.com/Azure/go-ansiterm/parser_actions.go b/src/vendor/github.com/Azure/go-ansiterm/parser_actions.go deleted file mode 100644 index 0bb5e51e9..000000000 --- a/src/vendor/github.com/Azure/go-ansiterm/parser_actions.go +++ /dev/null @@ -1,119 +0,0 @@ -package ansiterm - -func (ap *AnsiParser) collectParam() error { - currChar := ap.context.currentChar - ap.logf("collectParam %#x", currChar) - ap.context.paramBuffer = append(ap.context.paramBuffer, currChar) - return nil -} - -func (ap *AnsiParser) collectInter() error { - currChar := ap.context.currentChar - ap.logf("collectInter %#x", currChar) - ap.context.paramBuffer = append(ap.context.interBuffer, currChar) - return nil -} - -func (ap *AnsiParser) escDispatch() error { - cmd, _ := parseCmd(*ap.context) - intermeds := ap.context.interBuffer - ap.logf("escDispatch currentChar: %#x", ap.context.currentChar) - ap.logf("escDispatch: %v(%v)", cmd, intermeds) - - switch cmd { - case "D": // IND - return ap.eventHandler.IND() - case "E": // NEL, equivalent to CRLF - err := ap.eventHandler.Execute(ANSI_CARRIAGE_RETURN) - if err == nil { - err = ap.eventHandler.Execute(ANSI_LINE_FEED) - } - return err - case "M": // RI - return ap.eventHandler.RI() - } - - return nil -} - -func (ap *AnsiParser) csiDispatch() error { - cmd, _ := parseCmd(*ap.context) - params, _ := parseParams(ap.context.paramBuffer) - ap.logf("Parsed params: %v with length: %d", params, len(params)) - - ap.logf("csiDispatch: %v(%v)", cmd, params) - - switch cmd { - case "@": - return ap.eventHandler.ICH(getInt(params, 1)) - case "A": - return ap.eventHandler.CUU(getInt(params, 1)) - case "B": - return ap.eventHandler.CUD(getInt(params, 1)) - case "C": - return ap.eventHandler.CUF(getInt(params, 1)) - case "D": - return ap.eventHandler.CUB(getInt(params, 1)) - case "E": - return ap.eventHandler.CNL(getInt(params, 1)) - case "F": - return ap.eventHandler.CPL(getInt(params, 1)) - case "G": - return ap.eventHandler.CHA(getInt(params, 1)) - case "H": - ints := getInts(params, 2, 1) - x, y := ints[0], ints[1] - return ap.eventHandler.CUP(x, y) - case "J": - param := getEraseParam(params) - return ap.eventHandler.ED(param) - case "K": - param := getEraseParam(params) - return ap.eventHandler.EL(param) - case "L": - return ap.eventHandler.IL(getInt(params, 1)) - case "M": - return ap.eventHandler.DL(getInt(params, 1)) - case "P": - return ap.eventHandler.DCH(getInt(params, 1)) - case "S": - return ap.eventHandler.SU(getInt(params, 1)) - case "T": - return ap.eventHandler.SD(getInt(params, 1)) - case "c": - return ap.eventHandler.DA(params) - case "d": - return ap.eventHandler.VPA(getInt(params, 1)) - case "f": - ints := getInts(params, 2, 1) - x, y := ints[0], ints[1] - return ap.eventHandler.HVP(x, y) - case "h": - return ap.hDispatch(params) - case "l": - return ap.lDispatch(params) - case "m": - return ap.eventHandler.SGR(getInts(params, 1, 0)) - case "r": - ints := getInts(params, 2, 1) - top, bottom := ints[0], ints[1] - return ap.eventHandler.DECSTBM(top, bottom) - default: - ap.logf("ERROR: Unsupported CSI command: '%s', with full context: %v", cmd, ap.context) - return nil - } - -} - -func (ap *AnsiParser) print() error { - return ap.eventHandler.Print(ap.context.currentChar) -} - -func (ap *AnsiParser) clear() error { - ap.context = &ansiContext{} - return nil -} - -func (ap *AnsiParser) execute() error { - return ap.eventHandler.Execute(ap.context.currentChar) -} diff --git a/src/vendor/github.com/Azure/go-ansiterm/states.go b/src/vendor/github.com/Azure/go-ansiterm/states.go deleted file mode 100644 index f2ea1fcd1..000000000 --- a/src/vendor/github.com/Azure/go-ansiterm/states.go +++ /dev/null @@ -1,71 +0,0 @@ -package ansiterm - -type stateID int - -type state interface { - Enter() error - Exit() error - Handle(byte) (state, error) - Name() string - Transition(state) error -} - -type baseState struct { - name string - parser *AnsiParser -} - -func (base baseState) Enter() error { - return nil -} - -func (base baseState) Exit() error { - return nil -} - -func (base baseState) Handle(b byte) (s state, e error) { - - switch { - case b == CSI_ENTRY: - return base.parser.csiEntry, nil - case b == DCS_ENTRY: - return base.parser.dcsEntry, nil - case b == ANSI_ESCAPE_PRIMARY: - return base.parser.escape, nil - case b == OSC_STRING: - return base.parser.oscString, nil - case sliceContains(toGroundBytes, b): - return base.parser.ground, nil - } - - return nil, nil -} - -func (base baseState) Name() string { - return base.name -} - -func (base baseState) Transition(s state) error { - if s == base.parser.ground { - execBytes := []byte{0x18} - execBytes = append(execBytes, 0x1A) - execBytes = append(execBytes, getByteRange(0x80, 0x8F)...) - execBytes = append(execBytes, getByteRange(0x91, 0x97)...) - execBytes = append(execBytes, 0x99) - execBytes = append(execBytes, 0x9A) - - if sliceContains(execBytes, base.parser.context.currentChar) { - return base.parser.execute() - } - } - - return nil -} - -type dcsEntryState struct { - baseState -} - -type errorState struct { - baseState -} diff --git a/src/vendor/github.com/Azure/go-ansiterm/utilities.go b/src/vendor/github.com/Azure/go-ansiterm/utilities.go deleted file mode 100644 index 392114493..000000000 --- a/src/vendor/github.com/Azure/go-ansiterm/utilities.go +++ /dev/null @@ -1,21 +0,0 @@ -package ansiterm - -import ( - "strconv" -) - -func sliceContains(bytes []byte, b byte) bool { - for _, v := range bytes { - if v == b { - return true - } - } - - return false -} - -func convertBytesToInteger(bytes []byte) int { - s := string(bytes) - i, _ := strconv.Atoi(s) - return i -} diff --git a/src/vendor/github.com/Azure/go-ansiterm/winterm/ansi.go b/src/vendor/github.com/Azure/go-ansiterm/winterm/ansi.go deleted file mode 100644 index 5599082ae..000000000 --- a/src/vendor/github.com/Azure/go-ansiterm/winterm/ansi.go +++ /dev/null @@ -1,196 +0,0 @@ -// +build windows - -package winterm - -import ( - "fmt" - "os" - "strconv" - "strings" - "syscall" - - "github.com/Azure/go-ansiterm" - windows "golang.org/x/sys/windows" -) - -// Windows keyboard constants -// See https://msdn.microsoft.com/en-us/library/windows/desktop/dd375731(v=vs.85).aspx. -const ( - VK_PRIOR = 0x21 // PAGE UP key - VK_NEXT = 0x22 // PAGE DOWN key - VK_END = 0x23 // END key - VK_HOME = 0x24 // HOME key - VK_LEFT = 0x25 // LEFT ARROW key - VK_UP = 0x26 // UP ARROW key - VK_RIGHT = 0x27 // RIGHT ARROW key - VK_DOWN = 0x28 // DOWN ARROW key - VK_SELECT = 0x29 // SELECT key - VK_PRINT = 0x2A // PRINT key - VK_EXECUTE = 0x2B // EXECUTE key - VK_SNAPSHOT = 0x2C // PRINT SCREEN key - VK_INSERT = 0x2D // INS key - VK_DELETE = 0x2E // DEL key - VK_HELP = 0x2F // HELP key - VK_F1 = 0x70 // F1 key - VK_F2 = 0x71 // F2 key - VK_F3 = 0x72 // F3 key - VK_F4 = 0x73 // F4 key - VK_F5 = 0x74 // F5 key - VK_F6 = 0x75 // F6 key - VK_F7 = 0x76 // F7 key - VK_F8 = 0x77 // F8 key - VK_F9 = 0x78 // F9 key - VK_F10 = 0x79 // F10 key - VK_F11 = 0x7A // F11 key - VK_F12 = 0x7B // F12 key - - RIGHT_ALT_PRESSED = 0x0001 - LEFT_ALT_PRESSED = 0x0002 - RIGHT_CTRL_PRESSED = 0x0004 - LEFT_CTRL_PRESSED = 0x0008 - SHIFT_PRESSED = 0x0010 - NUMLOCK_ON = 0x0020 - SCROLLLOCK_ON = 0x0040 - CAPSLOCK_ON = 0x0080 - ENHANCED_KEY = 0x0100 -) - -type ansiCommand struct { - CommandBytes []byte - Command string - Parameters []string - IsSpecial bool -} - -func newAnsiCommand(command []byte) *ansiCommand { - - if isCharacterSelectionCmdChar(command[1]) { - // Is Character Set Selection commands - return &ansiCommand{ - CommandBytes: command, - Command: string(command), - IsSpecial: true, - } - } - - // last char is command character - lastCharIndex := len(command) - 1 - - ac := &ansiCommand{ - CommandBytes: command, - Command: string(command[lastCharIndex]), - IsSpecial: false, - } - - // more than a single escape - if lastCharIndex != 0 { - start := 1 - // skip if double char escape sequence - if command[0] == ansiterm.ANSI_ESCAPE_PRIMARY && command[1] == ansiterm.ANSI_ESCAPE_SECONDARY { - start++ - } - // convert this to GetNextParam method - ac.Parameters = strings.Split(string(command[start:lastCharIndex]), ansiterm.ANSI_PARAMETER_SEP) - } - - return ac -} - -func (ac *ansiCommand) paramAsSHORT(index int, defaultValue int16) int16 { - if index < 0 || index >= len(ac.Parameters) { - return defaultValue - } - - param, err := strconv.ParseInt(ac.Parameters[index], 10, 16) - if err != nil { - return defaultValue - } - - return int16(param) -} - -func (ac *ansiCommand) String() string { - return fmt.Sprintf("0x%v \"%v\" (\"%v\")", - bytesToHex(ac.CommandBytes), - ac.Command, - strings.Join(ac.Parameters, "\",\"")) -} - -// isAnsiCommandChar returns true if the passed byte falls within the range of ANSI commands. -// See http://manpages.ubuntu.com/manpages/intrepid/man4/console_codes.4.html. -func isAnsiCommandChar(b byte) bool { - switch { - case ansiterm.ANSI_COMMAND_FIRST <= b && b <= ansiterm.ANSI_COMMAND_LAST && b != ansiterm.ANSI_ESCAPE_SECONDARY: - return true - case b == ansiterm.ANSI_CMD_G1 || b == ansiterm.ANSI_CMD_OSC || b == ansiterm.ANSI_CMD_DECPAM || b == ansiterm.ANSI_CMD_DECPNM: - // non-CSI escape sequence terminator - return true - case b == ansiterm.ANSI_CMD_STR_TERM || b == ansiterm.ANSI_BEL: - // String escape sequence terminator - return true - } - return false -} - -func isXtermOscSequence(command []byte, current byte) bool { - return (len(command) >= 2 && command[0] == ansiterm.ANSI_ESCAPE_PRIMARY && command[1] == ansiterm.ANSI_CMD_OSC && current != ansiterm.ANSI_BEL) -} - -func isCharacterSelectionCmdChar(b byte) bool { - return (b == ansiterm.ANSI_CMD_G0 || b == ansiterm.ANSI_CMD_G1 || b == ansiterm.ANSI_CMD_G2 || b == ansiterm.ANSI_CMD_G3) -} - -// bytesToHex converts a slice of bytes to a human-readable string. -func bytesToHex(b []byte) string { - hex := make([]string, len(b)) - for i, ch := range b { - hex[i] = fmt.Sprintf("%X", ch) - } - return strings.Join(hex, "") -} - -// ensureInRange adjusts the passed value, if necessary, to ensure it is within -// the passed min / max range. -func ensureInRange(n int16, min int16, max int16) int16 { - if n < min { - return min - } else if n > max { - return max - } else { - return n - } -} - -func GetStdFile(nFile int) (*os.File, uintptr) { - var file *os.File - - // syscall uses negative numbers - // windows package uses very big uint32 - // Keep these switches split so we don't have to convert ints too much. - switch uint32(nFile) { - case windows.STD_INPUT_HANDLE: - file = os.Stdin - case windows.STD_OUTPUT_HANDLE: - file = os.Stdout - case windows.STD_ERROR_HANDLE: - file = os.Stderr - default: - switch nFile { - case syscall.STD_INPUT_HANDLE: - file = os.Stdin - case syscall.STD_OUTPUT_HANDLE: - file = os.Stdout - case syscall.STD_ERROR_HANDLE: - file = os.Stderr - default: - panic(fmt.Errorf("Invalid standard handle identifier: %v", nFile)) - } - } - - fd, err := syscall.GetStdHandle(nFile) - if err != nil { - panic(fmt.Errorf("Invalid standard handle identifier: %v -- %v", nFile, err)) - } - - return file, uintptr(fd) -} diff --git a/src/vendor/github.com/Azure/go-ansiterm/winterm/api.go b/src/vendor/github.com/Azure/go-ansiterm/winterm/api.go deleted file mode 100644 index 6055e33b9..000000000 --- a/src/vendor/github.com/Azure/go-ansiterm/winterm/api.go +++ /dev/null @@ -1,327 +0,0 @@ -// +build windows - -package winterm - -import ( - "fmt" - "syscall" - "unsafe" -) - -//=========================================================================================================== -// IMPORTANT NOTE: -// -// The methods below make extensive use of the "unsafe" package to obtain the required pointers. -// Beginning in Go 1.3, the garbage collector may release local variables (e.g., incoming arguments, stack -// variables) the pointers reference *before* the API completes. -// -// As a result, in those cases, the code must hint that the variables remain in active by invoking the -// dummy method "use" (see below). Newer versions of Go are planned to change the mechanism to no longer -// require unsafe pointers. -// -// If you add or modify methods, ENSURE protection of local variables through the "use" builtin to inform -// the garbage collector the variables remain in use if: -// -// -- The value is not a pointer (e.g., int32, struct) -// -- The value is not referenced by the method after passing the pointer to Windows -// -// See http://golang.org/doc/go1.3. -//=========================================================================================================== - -var ( - kernel32DLL = syscall.NewLazyDLL("kernel32.dll") - - getConsoleCursorInfoProc = kernel32DLL.NewProc("GetConsoleCursorInfo") - setConsoleCursorInfoProc = kernel32DLL.NewProc("SetConsoleCursorInfo") - setConsoleCursorPositionProc = kernel32DLL.NewProc("SetConsoleCursorPosition") - setConsoleModeProc = kernel32DLL.NewProc("SetConsoleMode") - getConsoleScreenBufferInfoProc = kernel32DLL.NewProc("GetConsoleScreenBufferInfo") - setConsoleScreenBufferSizeProc = kernel32DLL.NewProc("SetConsoleScreenBufferSize") - scrollConsoleScreenBufferProc = kernel32DLL.NewProc("ScrollConsoleScreenBufferA") - setConsoleTextAttributeProc = kernel32DLL.NewProc("SetConsoleTextAttribute") - setConsoleWindowInfoProc = kernel32DLL.NewProc("SetConsoleWindowInfo") - writeConsoleOutputProc = kernel32DLL.NewProc("WriteConsoleOutputW") - readConsoleInputProc = kernel32DLL.NewProc("ReadConsoleInputW") - waitForSingleObjectProc = kernel32DLL.NewProc("WaitForSingleObject") -) - -// Windows Console constants -const ( - // Console modes - // See https://msdn.microsoft.com/en-us/library/windows/desktop/ms686033(v=vs.85).aspx. - ENABLE_PROCESSED_INPUT = 0x0001 - ENABLE_LINE_INPUT = 0x0002 - ENABLE_ECHO_INPUT = 0x0004 - ENABLE_WINDOW_INPUT = 0x0008 - ENABLE_MOUSE_INPUT = 0x0010 - ENABLE_INSERT_MODE = 0x0020 - ENABLE_QUICK_EDIT_MODE = 0x0040 - ENABLE_EXTENDED_FLAGS = 0x0080 - ENABLE_AUTO_POSITION = 0x0100 - ENABLE_VIRTUAL_TERMINAL_INPUT = 0x0200 - - ENABLE_PROCESSED_OUTPUT = 0x0001 - ENABLE_WRAP_AT_EOL_OUTPUT = 0x0002 - ENABLE_VIRTUAL_TERMINAL_PROCESSING = 0x0004 - DISABLE_NEWLINE_AUTO_RETURN = 0x0008 - ENABLE_LVB_GRID_WORLDWIDE = 0x0010 - - // Character attributes - // Note: - // -- The attributes are combined to produce various colors (e.g., Blue + Green will create Cyan). - // Clearing all foreground or background colors results in black; setting all creates white. - // See https://msdn.microsoft.com/en-us/library/windows/desktop/ms682088(v=vs.85).aspx#_win32_character_attributes. - FOREGROUND_BLUE uint16 = 0x0001 - FOREGROUND_GREEN uint16 = 0x0002 - FOREGROUND_RED uint16 = 0x0004 - FOREGROUND_INTENSITY uint16 = 0x0008 - FOREGROUND_MASK uint16 = 0x000F - - BACKGROUND_BLUE uint16 = 0x0010 - BACKGROUND_GREEN uint16 = 0x0020 - BACKGROUND_RED uint16 = 0x0040 - BACKGROUND_INTENSITY uint16 = 0x0080 - BACKGROUND_MASK uint16 = 0x00F0 - - COMMON_LVB_MASK uint16 = 0xFF00 - COMMON_LVB_REVERSE_VIDEO uint16 = 0x4000 - COMMON_LVB_UNDERSCORE uint16 = 0x8000 - - // Input event types - // See https://msdn.microsoft.com/en-us/library/windows/desktop/ms683499(v=vs.85).aspx. - KEY_EVENT = 0x0001 - MOUSE_EVENT = 0x0002 - WINDOW_BUFFER_SIZE_EVENT = 0x0004 - MENU_EVENT = 0x0008 - FOCUS_EVENT = 0x0010 - - // WaitForSingleObject return codes - WAIT_ABANDONED = 0x00000080 - WAIT_FAILED = 0xFFFFFFFF - WAIT_SIGNALED = 0x0000000 - WAIT_TIMEOUT = 0x00000102 - - // WaitForSingleObject wait duration - WAIT_INFINITE = 0xFFFFFFFF - WAIT_ONE_SECOND = 1000 - WAIT_HALF_SECOND = 500 - WAIT_QUARTER_SECOND = 250 -) - -// Windows API Console types -// -- See https://msdn.microsoft.com/en-us/library/windows/desktop/ms682101(v=vs.85).aspx for Console specific types (e.g., COORD) -// -- See https://msdn.microsoft.com/en-us/library/aa296569(v=vs.60).aspx for comments on alignment -type ( - CHAR_INFO struct { - UnicodeChar uint16 - Attributes uint16 - } - - CONSOLE_CURSOR_INFO struct { - Size uint32 - Visible int32 - } - - CONSOLE_SCREEN_BUFFER_INFO struct { - Size COORD - CursorPosition COORD - Attributes uint16 - Window SMALL_RECT - MaximumWindowSize COORD - } - - COORD struct { - X int16 - Y int16 - } - - SMALL_RECT struct { - Left int16 - Top int16 - Right int16 - Bottom int16 - } - - // INPUT_RECORD is a C/C++ union of which KEY_EVENT_RECORD is one case, it is also the largest - // See https://msdn.microsoft.com/en-us/library/windows/desktop/ms683499(v=vs.85).aspx. - INPUT_RECORD struct { - EventType uint16 - KeyEvent KEY_EVENT_RECORD - } - - KEY_EVENT_RECORD struct { - KeyDown int32 - RepeatCount uint16 - VirtualKeyCode uint16 - VirtualScanCode uint16 - UnicodeChar uint16 - ControlKeyState uint32 - } - - WINDOW_BUFFER_SIZE struct { - Size COORD - } -) - -// boolToBOOL converts a Go bool into a Windows int32. -func boolToBOOL(f bool) int32 { - if f { - return int32(1) - } else { - return int32(0) - } -} - -// GetConsoleCursorInfo retrieves information about the size and visiblity of the console cursor. -// See https://msdn.microsoft.com/en-us/library/windows/desktop/ms683163(v=vs.85).aspx. -func GetConsoleCursorInfo(handle uintptr, cursorInfo *CONSOLE_CURSOR_INFO) error { - r1, r2, err := getConsoleCursorInfoProc.Call(handle, uintptr(unsafe.Pointer(cursorInfo)), 0) - return checkError(r1, r2, err) -} - -// SetConsoleCursorInfo sets the size and visiblity of the console cursor. -// See https://msdn.microsoft.com/en-us/library/windows/desktop/ms686019(v=vs.85).aspx. -func SetConsoleCursorInfo(handle uintptr, cursorInfo *CONSOLE_CURSOR_INFO) error { - r1, r2, err := setConsoleCursorInfoProc.Call(handle, uintptr(unsafe.Pointer(cursorInfo)), 0) - return checkError(r1, r2, err) -} - -// SetConsoleCursorPosition location of the console cursor. -// See https://msdn.microsoft.com/en-us/library/windows/desktop/ms686025(v=vs.85).aspx. -func SetConsoleCursorPosition(handle uintptr, coord COORD) error { - r1, r2, err := setConsoleCursorPositionProc.Call(handle, coordToPointer(coord)) - use(coord) - return checkError(r1, r2, err) -} - -// GetConsoleMode gets the console mode for given file descriptor -// See http://msdn.microsoft.com/en-us/library/windows/desktop/ms683167(v=vs.85).aspx. -func GetConsoleMode(handle uintptr) (mode uint32, err error) { - err = syscall.GetConsoleMode(syscall.Handle(handle), &mode) - return mode, err -} - -// SetConsoleMode sets the console mode for given file descriptor -// See http://msdn.microsoft.com/en-us/library/windows/desktop/ms686033(v=vs.85).aspx. -func SetConsoleMode(handle uintptr, mode uint32) error { - r1, r2, err := setConsoleModeProc.Call(handle, uintptr(mode), 0) - use(mode) - return checkError(r1, r2, err) -} - -// GetConsoleScreenBufferInfo retrieves information about the specified console screen buffer. -// See http://msdn.microsoft.com/en-us/library/windows/desktop/ms683171(v=vs.85).aspx. -func GetConsoleScreenBufferInfo(handle uintptr) (*CONSOLE_SCREEN_BUFFER_INFO, error) { - info := CONSOLE_SCREEN_BUFFER_INFO{} - err := checkError(getConsoleScreenBufferInfoProc.Call(handle, uintptr(unsafe.Pointer(&info)), 0)) - if err != nil { - return nil, err - } - return &info, nil -} - -func ScrollConsoleScreenBuffer(handle uintptr, scrollRect SMALL_RECT, clipRect SMALL_RECT, destOrigin COORD, char CHAR_INFO) error { - r1, r2, err := scrollConsoleScreenBufferProc.Call(handle, uintptr(unsafe.Pointer(&scrollRect)), uintptr(unsafe.Pointer(&clipRect)), coordToPointer(destOrigin), uintptr(unsafe.Pointer(&char))) - use(scrollRect) - use(clipRect) - use(destOrigin) - use(char) - return checkError(r1, r2, err) -} - -// SetConsoleScreenBufferSize sets the size of the console screen buffer. -// See https://msdn.microsoft.com/en-us/library/windows/desktop/ms686044(v=vs.85).aspx. -func SetConsoleScreenBufferSize(handle uintptr, coord COORD) error { - r1, r2, err := setConsoleScreenBufferSizeProc.Call(handle, coordToPointer(coord)) - use(coord) - return checkError(r1, r2, err) -} - -// SetConsoleTextAttribute sets the attributes of characters written to the -// console screen buffer by the WriteFile or WriteConsole function. -// See http://msdn.microsoft.com/en-us/library/windows/desktop/ms686047(v=vs.85).aspx. -func SetConsoleTextAttribute(handle uintptr, attribute uint16) error { - r1, r2, err := setConsoleTextAttributeProc.Call(handle, uintptr(attribute), 0) - use(attribute) - return checkError(r1, r2, err) -} - -// SetConsoleWindowInfo sets the size and position of the console screen buffer's window. -// Note that the size and location must be within and no larger than the backing console screen buffer. -// See https://msdn.microsoft.com/en-us/library/windows/desktop/ms686125(v=vs.85).aspx. -func SetConsoleWindowInfo(handle uintptr, isAbsolute bool, rect SMALL_RECT) error { - r1, r2, err := setConsoleWindowInfoProc.Call(handle, uintptr(boolToBOOL(isAbsolute)), uintptr(unsafe.Pointer(&rect))) - use(isAbsolute) - use(rect) - return checkError(r1, r2, err) -} - -// WriteConsoleOutput writes the CHAR_INFOs from the provided buffer to the active console buffer. -// See https://msdn.microsoft.com/en-us/library/windows/desktop/ms687404(v=vs.85).aspx. -func WriteConsoleOutput(handle uintptr, buffer []CHAR_INFO, bufferSize COORD, bufferCoord COORD, writeRegion *SMALL_RECT) error { - r1, r2, err := writeConsoleOutputProc.Call(handle, uintptr(unsafe.Pointer(&buffer[0])), coordToPointer(bufferSize), coordToPointer(bufferCoord), uintptr(unsafe.Pointer(writeRegion))) - use(buffer) - use(bufferSize) - use(bufferCoord) - return checkError(r1, r2, err) -} - -// ReadConsoleInput reads (and removes) data from the console input buffer. -// See https://msdn.microsoft.com/en-us/library/windows/desktop/ms684961(v=vs.85).aspx. -func ReadConsoleInput(handle uintptr, buffer []INPUT_RECORD, count *uint32) error { - r1, r2, err := readConsoleInputProc.Call(handle, uintptr(unsafe.Pointer(&buffer[0])), uintptr(len(buffer)), uintptr(unsafe.Pointer(count))) - use(buffer) - return checkError(r1, r2, err) -} - -// WaitForSingleObject waits for the passed handle to be signaled. -// It returns true if the handle was signaled; false otherwise. -// See https://msdn.microsoft.com/en-us/library/windows/desktop/ms687032(v=vs.85).aspx. -func WaitForSingleObject(handle uintptr, msWait uint32) (bool, error) { - r1, _, err := waitForSingleObjectProc.Call(handle, uintptr(uint32(msWait))) - switch r1 { - case WAIT_ABANDONED, WAIT_TIMEOUT: - return false, nil - case WAIT_SIGNALED: - return true, nil - } - use(msWait) - return false, err -} - -// String helpers -func (info CONSOLE_SCREEN_BUFFER_INFO) String() string { - return fmt.Sprintf("Size(%v) Cursor(%v) Window(%v) Max(%v)", info.Size, info.CursorPosition, info.Window, info.MaximumWindowSize) -} - -func (coord COORD) String() string { - return fmt.Sprintf("%v,%v", coord.X, coord.Y) -} - -func (rect SMALL_RECT) String() string { - return fmt.Sprintf("(%v,%v),(%v,%v)", rect.Left, rect.Top, rect.Right, rect.Bottom) -} - -// checkError evaluates the results of a Windows API call and returns the error if it failed. -func checkError(r1, r2 uintptr, err error) error { - // Windows APIs return non-zero to indicate success - if r1 != 0 { - return nil - } - - // Return the error if provided, otherwise default to EINVAL - if err != nil { - return err - } - return syscall.EINVAL -} - -// coordToPointer converts a COORD into a uintptr (by fooling the type system). -func coordToPointer(c COORD) uintptr { - // Note: This code assumes the two SHORTs are correctly laid out; the "cast" to uint32 is just to get a pointer to pass. - return uintptr(*((*uint32)(unsafe.Pointer(&c)))) -} - -// use is a no-op, but the compiler cannot see that it is. -// Calling use(p) ensures that p is kept live until that point. -func use(p interface{}) {} diff --git a/src/vendor/github.com/Azure/go-ansiterm/winterm/attr_translation.go b/src/vendor/github.com/Azure/go-ansiterm/winterm/attr_translation.go deleted file mode 100644 index cbec8f728..000000000 --- a/src/vendor/github.com/Azure/go-ansiterm/winterm/attr_translation.go +++ /dev/null @@ -1,100 +0,0 @@ -// +build windows - -package winterm - -import "github.com/Azure/go-ansiterm" - -const ( - FOREGROUND_COLOR_MASK = FOREGROUND_RED | FOREGROUND_GREEN | FOREGROUND_BLUE - BACKGROUND_COLOR_MASK = BACKGROUND_RED | BACKGROUND_GREEN | BACKGROUND_BLUE -) - -// collectAnsiIntoWindowsAttributes modifies the passed Windows text mode flags to reflect the -// request represented by the passed ANSI mode. -func collectAnsiIntoWindowsAttributes(windowsMode uint16, inverted bool, baseMode uint16, ansiMode int16) (uint16, bool) { - switch ansiMode { - - // Mode styles - case ansiterm.ANSI_SGR_BOLD: - windowsMode = windowsMode | FOREGROUND_INTENSITY - - case ansiterm.ANSI_SGR_DIM, ansiterm.ANSI_SGR_BOLD_DIM_OFF: - windowsMode &^= FOREGROUND_INTENSITY - - case ansiterm.ANSI_SGR_UNDERLINE: - windowsMode = windowsMode | COMMON_LVB_UNDERSCORE - - case ansiterm.ANSI_SGR_REVERSE: - inverted = true - - case ansiterm.ANSI_SGR_REVERSE_OFF: - inverted = false - - case ansiterm.ANSI_SGR_UNDERLINE_OFF: - windowsMode &^= COMMON_LVB_UNDERSCORE - - // Foreground colors - case ansiterm.ANSI_SGR_FOREGROUND_DEFAULT: - windowsMode = (windowsMode &^ FOREGROUND_MASK) | (baseMode & FOREGROUND_MASK) - - case ansiterm.ANSI_SGR_FOREGROUND_BLACK: - windowsMode = (windowsMode &^ FOREGROUND_COLOR_MASK) - - case ansiterm.ANSI_SGR_FOREGROUND_RED: - windowsMode = (windowsMode &^ FOREGROUND_COLOR_MASK) | FOREGROUND_RED - - case ansiterm.ANSI_SGR_FOREGROUND_GREEN: - windowsMode = (windowsMode &^ FOREGROUND_COLOR_MASK) | FOREGROUND_GREEN - - case ansiterm.ANSI_SGR_FOREGROUND_YELLOW: - windowsMode = (windowsMode &^ FOREGROUND_COLOR_MASK) | FOREGROUND_RED | FOREGROUND_GREEN - - case ansiterm.ANSI_SGR_FOREGROUND_BLUE: - windowsMode = (windowsMode &^ FOREGROUND_COLOR_MASK) | FOREGROUND_BLUE - - case ansiterm.ANSI_SGR_FOREGROUND_MAGENTA: - windowsMode = (windowsMode &^ FOREGROUND_COLOR_MASK) | FOREGROUND_RED | FOREGROUND_BLUE - - case ansiterm.ANSI_SGR_FOREGROUND_CYAN: - windowsMode = (windowsMode &^ FOREGROUND_COLOR_MASK) | FOREGROUND_GREEN | FOREGROUND_BLUE - - case ansiterm.ANSI_SGR_FOREGROUND_WHITE: - windowsMode = (windowsMode &^ FOREGROUND_COLOR_MASK) | FOREGROUND_RED | FOREGROUND_GREEN | FOREGROUND_BLUE - - // Background colors - case ansiterm.ANSI_SGR_BACKGROUND_DEFAULT: - // Black with no intensity - windowsMode = (windowsMode &^ BACKGROUND_MASK) | (baseMode & BACKGROUND_MASK) - - case ansiterm.ANSI_SGR_BACKGROUND_BLACK: - windowsMode = (windowsMode &^ BACKGROUND_COLOR_MASK) - - case ansiterm.ANSI_SGR_BACKGROUND_RED: - windowsMode = (windowsMode &^ BACKGROUND_COLOR_MASK) | BACKGROUND_RED - - case ansiterm.ANSI_SGR_BACKGROUND_GREEN: - windowsMode = (windowsMode &^ BACKGROUND_COLOR_MASK) | BACKGROUND_GREEN - - case ansiterm.ANSI_SGR_BACKGROUND_YELLOW: - windowsMode = (windowsMode &^ BACKGROUND_COLOR_MASK) | BACKGROUND_RED | BACKGROUND_GREEN - - case ansiterm.ANSI_SGR_BACKGROUND_BLUE: - windowsMode = (windowsMode &^ BACKGROUND_COLOR_MASK) | BACKGROUND_BLUE - - case ansiterm.ANSI_SGR_BACKGROUND_MAGENTA: - windowsMode = (windowsMode &^ BACKGROUND_COLOR_MASK) | BACKGROUND_RED | BACKGROUND_BLUE - - case ansiterm.ANSI_SGR_BACKGROUND_CYAN: - windowsMode = (windowsMode &^ BACKGROUND_COLOR_MASK) | BACKGROUND_GREEN | BACKGROUND_BLUE - - case ansiterm.ANSI_SGR_BACKGROUND_WHITE: - windowsMode = (windowsMode &^ BACKGROUND_COLOR_MASK) | BACKGROUND_RED | BACKGROUND_GREEN | BACKGROUND_BLUE - } - - return windowsMode, inverted -} - -// invertAttributes inverts the foreground and background colors of a Windows attributes value -func invertAttributes(windowsMode uint16) uint16 { - return (COMMON_LVB_MASK & windowsMode) | ((FOREGROUND_MASK & windowsMode) << 4) | ((BACKGROUND_MASK & windowsMode) >> 4) -} diff --git a/src/vendor/github.com/Azure/go-ansiterm/winterm/cursor_helpers.go b/src/vendor/github.com/Azure/go-ansiterm/winterm/cursor_helpers.go deleted file mode 100644 index 3ee06ea72..000000000 --- a/src/vendor/github.com/Azure/go-ansiterm/winterm/cursor_helpers.go +++ /dev/null @@ -1,101 +0,0 @@ -// +build windows - -package winterm - -const ( - horizontal = iota - vertical -) - -func (h *windowsAnsiEventHandler) getCursorWindow(info *CONSOLE_SCREEN_BUFFER_INFO) SMALL_RECT { - if h.originMode { - sr := h.effectiveSr(info.Window) - return SMALL_RECT{ - Top: sr.top, - Bottom: sr.bottom, - Left: 0, - Right: info.Size.X - 1, - } - } else { - return SMALL_RECT{ - Top: info.Window.Top, - Bottom: info.Window.Bottom, - Left: 0, - Right: info.Size.X - 1, - } - } -} - -// setCursorPosition sets the cursor to the specified position, bounded to the screen size -func (h *windowsAnsiEventHandler) setCursorPosition(position COORD, window SMALL_RECT) error { - position.X = ensureInRange(position.X, window.Left, window.Right) - position.Y = ensureInRange(position.Y, window.Top, window.Bottom) - err := SetConsoleCursorPosition(h.fd, position) - if err != nil { - return err - } - h.logf("Cursor position set: (%d, %d)", position.X, position.Y) - return err -} - -func (h *windowsAnsiEventHandler) moveCursorVertical(param int) error { - return h.moveCursor(vertical, param) -} - -func (h *windowsAnsiEventHandler) moveCursorHorizontal(param int) error { - return h.moveCursor(horizontal, param) -} - -func (h *windowsAnsiEventHandler) moveCursor(moveMode int, param int) error { - info, err := GetConsoleScreenBufferInfo(h.fd) - if err != nil { - return err - } - - position := info.CursorPosition - switch moveMode { - case horizontal: - position.X += int16(param) - case vertical: - position.Y += int16(param) - } - - if err = h.setCursorPosition(position, h.getCursorWindow(info)); err != nil { - return err - } - - return nil -} - -func (h *windowsAnsiEventHandler) moveCursorLine(param int) error { - info, err := GetConsoleScreenBufferInfo(h.fd) - if err != nil { - return err - } - - position := info.CursorPosition - position.X = 0 - position.Y += int16(param) - - if err = h.setCursorPosition(position, h.getCursorWindow(info)); err != nil { - return err - } - - return nil -} - -func (h *windowsAnsiEventHandler) moveCursorColumn(param int) error { - info, err := GetConsoleScreenBufferInfo(h.fd) - if err != nil { - return err - } - - position := info.CursorPosition - position.X = int16(param) - 1 - - if err = h.setCursorPosition(position, h.getCursorWindow(info)); err != nil { - return err - } - - return nil -} diff --git a/src/vendor/github.com/Azure/go-ansiterm/winterm/erase_helpers.go b/src/vendor/github.com/Azure/go-ansiterm/winterm/erase_helpers.go deleted file mode 100644 index 244b5fa25..000000000 --- a/src/vendor/github.com/Azure/go-ansiterm/winterm/erase_helpers.go +++ /dev/null @@ -1,84 +0,0 @@ -// +build windows - -package winterm - -import "github.com/Azure/go-ansiterm" - -func (h *windowsAnsiEventHandler) clearRange(attributes uint16, fromCoord COORD, toCoord COORD) error { - // Ignore an invalid (negative area) request - if toCoord.Y < fromCoord.Y { - return nil - } - - var err error - - var coordStart = COORD{} - var coordEnd = COORD{} - - xCurrent, yCurrent := fromCoord.X, fromCoord.Y - xEnd, yEnd := toCoord.X, toCoord.Y - - // Clear any partial initial line - if xCurrent > 0 { - coordStart.X, coordStart.Y = xCurrent, yCurrent - coordEnd.X, coordEnd.Y = xEnd, yCurrent - - err = h.clearRect(attributes, coordStart, coordEnd) - if err != nil { - return err - } - - xCurrent = 0 - yCurrent += 1 - } - - // Clear intervening rectangular section - if yCurrent < yEnd { - coordStart.X, coordStart.Y = xCurrent, yCurrent - coordEnd.X, coordEnd.Y = xEnd, yEnd-1 - - err = h.clearRect(attributes, coordStart, coordEnd) - if err != nil { - return err - } - - xCurrent = 0 - yCurrent = yEnd - } - - // Clear remaining partial ending line - coordStart.X, coordStart.Y = xCurrent, yCurrent - coordEnd.X, coordEnd.Y = xEnd, yEnd - - err = h.clearRect(attributes, coordStart, coordEnd) - if err != nil { - return err - } - - return nil -} - -func (h *windowsAnsiEventHandler) clearRect(attributes uint16, fromCoord COORD, toCoord COORD) error { - region := SMALL_RECT{Top: fromCoord.Y, Left: fromCoord.X, Bottom: toCoord.Y, Right: toCoord.X} - width := toCoord.X - fromCoord.X + 1 - height := toCoord.Y - fromCoord.Y + 1 - size := uint32(width) * uint32(height) - - if size <= 0 { - return nil - } - - buffer := make([]CHAR_INFO, size) - - char := CHAR_INFO{ansiterm.FILL_CHARACTER, attributes} - for i := 0; i < int(size); i++ { - buffer[i] = char - } - - err := WriteConsoleOutput(h.fd, buffer, COORD{X: width, Y: height}, COORD{X: 0, Y: 0}, ®ion) - if err != nil { - return err - } - - return nil -} diff --git a/src/vendor/github.com/Azure/go-ansiterm/winterm/scroll_helper.go b/src/vendor/github.com/Azure/go-ansiterm/winterm/scroll_helper.go deleted file mode 100644 index 2d27fa1d0..000000000 --- a/src/vendor/github.com/Azure/go-ansiterm/winterm/scroll_helper.go +++ /dev/null @@ -1,118 +0,0 @@ -// +build windows - -package winterm - -// effectiveSr gets the current effective scroll region in buffer coordinates -func (h *windowsAnsiEventHandler) effectiveSr(window SMALL_RECT) scrollRegion { - top := addInRange(window.Top, h.sr.top, window.Top, window.Bottom) - bottom := addInRange(window.Top, h.sr.bottom, window.Top, window.Bottom) - if top >= bottom { - top = window.Top - bottom = window.Bottom - } - return scrollRegion{top: top, bottom: bottom} -} - -func (h *windowsAnsiEventHandler) scrollUp(param int) error { - info, err := GetConsoleScreenBufferInfo(h.fd) - if err != nil { - return err - } - - sr := h.effectiveSr(info.Window) - return h.scroll(param, sr, info) -} - -func (h *windowsAnsiEventHandler) scrollDown(param int) error { - return h.scrollUp(-param) -} - -func (h *windowsAnsiEventHandler) deleteLines(param int) error { - info, err := GetConsoleScreenBufferInfo(h.fd) - if err != nil { - return err - } - - start := info.CursorPosition.Y - sr := h.effectiveSr(info.Window) - // Lines cannot be inserted or deleted outside the scrolling region. - if start >= sr.top && start <= sr.bottom { - sr.top = start - return h.scroll(param, sr, info) - } else { - return nil - } -} - -func (h *windowsAnsiEventHandler) insertLines(param int) error { - return h.deleteLines(-param) -} - -// scroll scrolls the provided scroll region by param lines. The scroll region is in buffer coordinates. -func (h *windowsAnsiEventHandler) scroll(param int, sr scrollRegion, info *CONSOLE_SCREEN_BUFFER_INFO) error { - h.logf("scroll: scrollTop: %d, scrollBottom: %d", sr.top, sr.bottom) - h.logf("scroll: windowTop: %d, windowBottom: %d", info.Window.Top, info.Window.Bottom) - - // Copy from and clip to the scroll region (full buffer width) - scrollRect := SMALL_RECT{ - Top: sr.top, - Bottom: sr.bottom, - Left: 0, - Right: info.Size.X - 1, - } - - // Origin to which area should be copied - destOrigin := COORD{ - X: 0, - Y: sr.top - int16(param), - } - - char := CHAR_INFO{ - UnicodeChar: ' ', - Attributes: h.attributes, - } - - if err := ScrollConsoleScreenBuffer(h.fd, scrollRect, scrollRect, destOrigin, char); err != nil { - return err - } - return nil -} - -func (h *windowsAnsiEventHandler) deleteCharacters(param int) error { - info, err := GetConsoleScreenBufferInfo(h.fd) - if err != nil { - return err - } - return h.scrollLine(param, info.CursorPosition, info) -} - -func (h *windowsAnsiEventHandler) insertCharacters(param int) error { - return h.deleteCharacters(-param) -} - -// scrollLine scrolls a line horizontally starting at the provided position by a number of columns. -func (h *windowsAnsiEventHandler) scrollLine(columns int, position COORD, info *CONSOLE_SCREEN_BUFFER_INFO) error { - // Copy from and clip to the scroll region (full buffer width) - scrollRect := SMALL_RECT{ - Top: position.Y, - Bottom: position.Y, - Left: position.X, - Right: info.Size.X - 1, - } - - // Origin to which area should be copied - destOrigin := COORD{ - X: position.X - int16(columns), - Y: position.Y, - } - - char := CHAR_INFO{ - UnicodeChar: ' ', - Attributes: h.attributes, - } - - if err := ScrollConsoleScreenBuffer(h.fd, scrollRect, scrollRect, destOrigin, char); err != nil { - return err - } - return nil -} diff --git a/src/vendor/github.com/Azure/go-ansiterm/winterm/utilities.go b/src/vendor/github.com/Azure/go-ansiterm/winterm/utilities.go deleted file mode 100644 index afa7635d7..000000000 --- a/src/vendor/github.com/Azure/go-ansiterm/winterm/utilities.go +++ /dev/null @@ -1,9 +0,0 @@ -// +build windows - -package winterm - -// AddInRange increments a value by the passed quantity while ensuring the values -// always remain within the supplied min / max range. -func addInRange(n int16, increment int16, min int16, max int16) int16 { - return ensureInRange(n+increment, min, max) -} diff --git a/src/vendor/github.com/Azure/go-ansiterm/winterm/win_event_handler.go b/src/vendor/github.com/Azure/go-ansiterm/winterm/win_event_handler.go deleted file mode 100644 index 2d40fb75a..000000000 --- a/src/vendor/github.com/Azure/go-ansiterm/winterm/win_event_handler.go +++ /dev/null @@ -1,743 +0,0 @@ -// +build windows - -package winterm - -import ( - "bytes" - "log" - "os" - "strconv" - - "github.com/Azure/go-ansiterm" -) - -type windowsAnsiEventHandler struct { - fd uintptr - file *os.File - infoReset *CONSOLE_SCREEN_BUFFER_INFO - sr scrollRegion - buffer bytes.Buffer - attributes uint16 - inverted bool - wrapNext bool - drewMarginByte bool - originMode bool - marginByte byte - curInfo *CONSOLE_SCREEN_BUFFER_INFO - curPos COORD - logf func(string, ...interface{}) -} - -type Option func(*windowsAnsiEventHandler) - -func WithLogf(f func(string, ...interface{})) Option { - return func(w *windowsAnsiEventHandler) { - w.logf = f - } -} - -func CreateWinEventHandler(fd uintptr, file *os.File, opts ...Option) ansiterm.AnsiEventHandler { - infoReset, err := GetConsoleScreenBufferInfo(fd) - if err != nil { - return nil - } - - h := &windowsAnsiEventHandler{ - fd: fd, - file: file, - infoReset: infoReset, - attributes: infoReset.Attributes, - } - for _, o := range opts { - o(h) - } - - if isDebugEnv := os.Getenv(ansiterm.LogEnv); isDebugEnv == "1" { - logFile, _ := os.Create("winEventHandler.log") - logger := log.New(logFile, "", log.LstdFlags) - if h.logf != nil { - l := h.logf - h.logf = func(s string, v ...interface{}) { - l(s, v...) - logger.Printf(s, v...) - } - } else { - h.logf = logger.Printf - } - } - - if h.logf == nil { - h.logf = func(string, ...interface{}) {} - } - - return h -} - -type scrollRegion struct { - top int16 - bottom int16 -} - -// simulateLF simulates a LF or CR+LF by scrolling if necessary to handle the -// current cursor position and scroll region settings, in which case it returns -// true. If no special handling is necessary, then it does nothing and returns -// false. -// -// In the false case, the caller should ensure that a carriage return -// and line feed are inserted or that the text is otherwise wrapped. -func (h *windowsAnsiEventHandler) simulateLF(includeCR bool) (bool, error) { - if h.wrapNext { - if err := h.Flush(); err != nil { - return false, err - } - h.clearWrap() - } - pos, info, err := h.getCurrentInfo() - if err != nil { - return false, err - } - sr := h.effectiveSr(info.Window) - if pos.Y == sr.bottom { - // Scrolling is necessary. Let Windows automatically scroll if the scrolling region - // is the full window. - if sr.top == info.Window.Top && sr.bottom == info.Window.Bottom { - if includeCR { - pos.X = 0 - h.updatePos(pos) - } - return false, nil - } - - // A custom scroll region is active. Scroll the window manually to simulate - // the LF. - if err := h.Flush(); err != nil { - return false, err - } - h.logf("Simulating LF inside scroll region") - if err := h.scrollUp(1); err != nil { - return false, err - } - if includeCR { - pos.X = 0 - if err := SetConsoleCursorPosition(h.fd, pos); err != nil { - return false, err - } - } - return true, nil - - } else if pos.Y < info.Window.Bottom { - // Let Windows handle the LF. - pos.Y++ - if includeCR { - pos.X = 0 - } - h.updatePos(pos) - return false, nil - } else { - // The cursor is at the bottom of the screen but outside the scroll - // region. Skip the LF. - h.logf("Simulating LF outside scroll region") - if includeCR { - if err := h.Flush(); err != nil { - return false, err - } - pos.X = 0 - if err := SetConsoleCursorPosition(h.fd, pos); err != nil { - return false, err - } - } - return true, nil - } -} - -// executeLF executes a LF without a CR. -func (h *windowsAnsiEventHandler) executeLF() error { - handled, err := h.simulateLF(false) - if err != nil { - return err - } - if !handled { - // Windows LF will reset the cursor column position. Write the LF - // and restore the cursor position. - pos, _, err := h.getCurrentInfo() - if err != nil { - return err - } - h.buffer.WriteByte(ansiterm.ANSI_LINE_FEED) - if pos.X != 0 { - if err := h.Flush(); err != nil { - return err - } - h.logf("Resetting cursor position for LF without CR") - if err := SetConsoleCursorPosition(h.fd, pos); err != nil { - return err - } - } - } - return nil -} - -func (h *windowsAnsiEventHandler) Print(b byte) error { - if h.wrapNext { - h.buffer.WriteByte(h.marginByte) - h.clearWrap() - if _, err := h.simulateLF(true); err != nil { - return err - } - } - pos, info, err := h.getCurrentInfo() - if err != nil { - return err - } - if pos.X == info.Size.X-1 { - h.wrapNext = true - h.marginByte = b - } else { - pos.X++ - h.updatePos(pos) - h.buffer.WriteByte(b) - } - return nil -} - -func (h *windowsAnsiEventHandler) Execute(b byte) error { - switch b { - case ansiterm.ANSI_TAB: - h.logf("Execute(TAB)") - // Move to the next tab stop, but preserve auto-wrap if already set. - if !h.wrapNext { - pos, info, err := h.getCurrentInfo() - if err != nil { - return err - } - pos.X = (pos.X + 8) - pos.X%8 - if pos.X >= info.Size.X { - pos.X = info.Size.X - 1 - } - if err := h.Flush(); err != nil { - return err - } - if err := SetConsoleCursorPosition(h.fd, pos); err != nil { - return err - } - } - return nil - - case ansiterm.ANSI_BEL: - h.buffer.WriteByte(ansiterm.ANSI_BEL) - return nil - - case ansiterm.ANSI_BACKSPACE: - if h.wrapNext { - if err := h.Flush(); err != nil { - return err - } - h.clearWrap() - } - pos, _, err := h.getCurrentInfo() - if err != nil { - return err - } - if pos.X > 0 { - pos.X-- - h.updatePos(pos) - h.buffer.WriteByte(ansiterm.ANSI_BACKSPACE) - } - return nil - - case ansiterm.ANSI_VERTICAL_TAB, ansiterm.ANSI_FORM_FEED: - // Treat as true LF. - return h.executeLF() - - case ansiterm.ANSI_LINE_FEED: - // Simulate a CR and LF for now since there is no way in go-ansiterm - // to tell if the LF should include CR (and more things break when it's - // missing than when it's incorrectly added). - handled, err := h.simulateLF(true) - if handled || err != nil { - return err - } - return h.buffer.WriteByte(ansiterm.ANSI_LINE_FEED) - - case ansiterm.ANSI_CARRIAGE_RETURN: - if h.wrapNext { - if err := h.Flush(); err != nil { - return err - } - h.clearWrap() - } - pos, _, err := h.getCurrentInfo() - if err != nil { - return err - } - if pos.X != 0 { - pos.X = 0 - h.updatePos(pos) - h.buffer.WriteByte(ansiterm.ANSI_CARRIAGE_RETURN) - } - return nil - - default: - return nil - } -} - -func (h *windowsAnsiEventHandler) CUU(param int) error { - if err := h.Flush(); err != nil { - return err - } - h.logf("CUU: [%v]", []string{strconv.Itoa(param)}) - h.clearWrap() - return h.moveCursorVertical(-param) -} - -func (h *windowsAnsiEventHandler) CUD(param int) error { - if err := h.Flush(); err != nil { - return err - } - h.logf("CUD: [%v]", []string{strconv.Itoa(param)}) - h.clearWrap() - return h.moveCursorVertical(param) -} - -func (h *windowsAnsiEventHandler) CUF(param int) error { - if err := h.Flush(); err != nil { - return err - } - h.logf("CUF: [%v]", []string{strconv.Itoa(param)}) - h.clearWrap() - return h.moveCursorHorizontal(param) -} - -func (h *windowsAnsiEventHandler) CUB(param int) error { - if err := h.Flush(); err != nil { - return err - } - h.logf("CUB: [%v]", []string{strconv.Itoa(param)}) - h.clearWrap() - return h.moveCursorHorizontal(-param) -} - -func (h *windowsAnsiEventHandler) CNL(param int) error { - if err := h.Flush(); err != nil { - return err - } - h.logf("CNL: [%v]", []string{strconv.Itoa(param)}) - h.clearWrap() - return h.moveCursorLine(param) -} - -func (h *windowsAnsiEventHandler) CPL(param int) error { - if err := h.Flush(); err != nil { - return err - } - h.logf("CPL: [%v]", []string{strconv.Itoa(param)}) - h.clearWrap() - return h.moveCursorLine(-param) -} - -func (h *windowsAnsiEventHandler) CHA(param int) error { - if err := h.Flush(); err != nil { - return err - } - h.logf("CHA: [%v]", []string{strconv.Itoa(param)}) - h.clearWrap() - return h.moveCursorColumn(param) -} - -func (h *windowsAnsiEventHandler) VPA(param int) error { - if err := h.Flush(); err != nil { - return err - } - h.logf("VPA: [[%d]]", param) - h.clearWrap() - info, err := GetConsoleScreenBufferInfo(h.fd) - if err != nil { - return err - } - window := h.getCursorWindow(info) - position := info.CursorPosition - position.Y = window.Top + int16(param) - 1 - return h.setCursorPosition(position, window) -} - -func (h *windowsAnsiEventHandler) CUP(row int, col int) error { - if err := h.Flush(); err != nil { - return err - } - h.logf("CUP: [[%d %d]]", row, col) - h.clearWrap() - info, err := GetConsoleScreenBufferInfo(h.fd) - if err != nil { - return err - } - - window := h.getCursorWindow(info) - position := COORD{window.Left + int16(col) - 1, window.Top + int16(row) - 1} - return h.setCursorPosition(position, window) -} - -func (h *windowsAnsiEventHandler) HVP(row int, col int) error { - if err := h.Flush(); err != nil { - return err - } - h.logf("HVP: [[%d %d]]", row, col) - h.clearWrap() - return h.CUP(row, col) -} - -func (h *windowsAnsiEventHandler) DECTCEM(visible bool) error { - if err := h.Flush(); err != nil { - return err - } - h.logf("DECTCEM: [%v]", []string{strconv.FormatBool(visible)}) - h.clearWrap() - return nil -} - -func (h *windowsAnsiEventHandler) DECOM(enable bool) error { - if err := h.Flush(); err != nil { - return err - } - h.logf("DECOM: [%v]", []string{strconv.FormatBool(enable)}) - h.clearWrap() - h.originMode = enable - return h.CUP(1, 1) -} - -func (h *windowsAnsiEventHandler) DECCOLM(use132 bool) error { - if err := h.Flush(); err != nil { - return err - } - h.logf("DECCOLM: [%v]", []string{strconv.FormatBool(use132)}) - h.clearWrap() - if err := h.ED(2); err != nil { - return err - } - info, err := GetConsoleScreenBufferInfo(h.fd) - if err != nil { - return err - } - targetWidth := int16(80) - if use132 { - targetWidth = 132 - } - if info.Size.X < targetWidth { - if err := SetConsoleScreenBufferSize(h.fd, COORD{targetWidth, info.Size.Y}); err != nil { - h.logf("set buffer failed: %v", err) - return err - } - } - window := info.Window - window.Left = 0 - window.Right = targetWidth - 1 - if err := SetConsoleWindowInfo(h.fd, true, window); err != nil { - h.logf("set window failed: %v", err) - return err - } - if info.Size.X > targetWidth { - if err := SetConsoleScreenBufferSize(h.fd, COORD{targetWidth, info.Size.Y}); err != nil { - h.logf("set buffer failed: %v", err) - return err - } - } - return SetConsoleCursorPosition(h.fd, COORD{0, 0}) -} - -func (h *windowsAnsiEventHandler) ED(param int) error { - if err := h.Flush(); err != nil { - return err - } - h.logf("ED: [%v]", []string{strconv.Itoa(param)}) - h.clearWrap() - - // [J -- Erases from the cursor to the end of the screen, including the cursor position. - // [1J -- Erases from the beginning of the screen to the cursor, including the cursor position. - // [2J -- Erases the complete display. The cursor does not move. - // Notes: - // -- Clearing the entire buffer, versus just the Window, works best for Windows Consoles - - info, err := GetConsoleScreenBufferInfo(h.fd) - if err != nil { - return err - } - - var start COORD - var end COORD - - switch param { - case 0: - start = info.CursorPosition - end = COORD{info.Size.X - 1, info.Size.Y - 1} - - case 1: - start = COORD{0, 0} - end = info.CursorPosition - - case 2: - start = COORD{0, 0} - end = COORD{info.Size.X - 1, info.Size.Y - 1} - } - - err = h.clearRange(h.attributes, start, end) - if err != nil { - return err - } - - // If the whole buffer was cleared, move the window to the top while preserving - // the window-relative cursor position. - if param == 2 { - pos := info.CursorPosition - window := info.Window - pos.Y -= window.Top - window.Bottom -= window.Top - window.Top = 0 - if err := SetConsoleCursorPosition(h.fd, pos); err != nil { - return err - } - if err := SetConsoleWindowInfo(h.fd, true, window); err != nil { - return err - } - } - - return nil -} - -func (h *windowsAnsiEventHandler) EL(param int) error { - if err := h.Flush(); err != nil { - return err - } - h.logf("EL: [%v]", strconv.Itoa(param)) - h.clearWrap() - - // [K -- Erases from the cursor to the end of the line, including the cursor position. - // [1K -- Erases from the beginning of the line to the cursor, including the cursor position. - // [2K -- Erases the complete line. - - info, err := GetConsoleScreenBufferInfo(h.fd) - if err != nil { - return err - } - - var start COORD - var end COORD - - switch param { - case 0: - start = info.CursorPosition - end = COORD{info.Size.X, info.CursorPosition.Y} - - case 1: - start = COORD{0, info.CursorPosition.Y} - end = info.CursorPosition - - case 2: - start = COORD{0, info.CursorPosition.Y} - end = COORD{info.Size.X, info.CursorPosition.Y} - } - - err = h.clearRange(h.attributes, start, end) - if err != nil { - return err - } - - return nil -} - -func (h *windowsAnsiEventHandler) IL(param int) error { - if err := h.Flush(); err != nil { - return err - } - h.logf("IL: [%v]", strconv.Itoa(param)) - h.clearWrap() - return h.insertLines(param) -} - -func (h *windowsAnsiEventHandler) DL(param int) error { - if err := h.Flush(); err != nil { - return err - } - h.logf("DL: [%v]", strconv.Itoa(param)) - h.clearWrap() - return h.deleteLines(param) -} - -func (h *windowsAnsiEventHandler) ICH(param int) error { - if err := h.Flush(); err != nil { - return err - } - h.logf("ICH: [%v]", strconv.Itoa(param)) - h.clearWrap() - return h.insertCharacters(param) -} - -func (h *windowsAnsiEventHandler) DCH(param int) error { - if err := h.Flush(); err != nil { - return err - } - h.logf("DCH: [%v]", strconv.Itoa(param)) - h.clearWrap() - return h.deleteCharacters(param) -} - -func (h *windowsAnsiEventHandler) SGR(params []int) error { - if err := h.Flush(); err != nil { - return err - } - strings := []string{} - for _, v := range params { - strings = append(strings, strconv.Itoa(v)) - } - - h.logf("SGR: [%v]", strings) - - if len(params) <= 0 { - h.attributes = h.infoReset.Attributes - h.inverted = false - } else { - for _, attr := range params { - - if attr == ansiterm.ANSI_SGR_RESET { - h.attributes = h.infoReset.Attributes - h.inverted = false - continue - } - - h.attributes, h.inverted = collectAnsiIntoWindowsAttributes(h.attributes, h.inverted, h.infoReset.Attributes, int16(attr)) - } - } - - attributes := h.attributes - if h.inverted { - attributes = invertAttributes(attributes) - } - err := SetConsoleTextAttribute(h.fd, attributes) - if err != nil { - return err - } - - return nil -} - -func (h *windowsAnsiEventHandler) SU(param int) error { - if err := h.Flush(); err != nil { - return err - } - h.logf("SU: [%v]", []string{strconv.Itoa(param)}) - h.clearWrap() - return h.scrollUp(param) -} - -func (h *windowsAnsiEventHandler) SD(param int) error { - if err := h.Flush(); err != nil { - return err - } - h.logf("SD: [%v]", []string{strconv.Itoa(param)}) - h.clearWrap() - return h.scrollDown(param) -} - -func (h *windowsAnsiEventHandler) DA(params []string) error { - h.logf("DA: [%v]", params) - // DA cannot be implemented because it must send data on the VT100 input stream, - // which is not available to go-ansiterm. - return nil -} - -func (h *windowsAnsiEventHandler) DECSTBM(top int, bottom int) error { - if err := h.Flush(); err != nil { - return err - } - h.logf("DECSTBM: [%d, %d]", top, bottom) - - // Windows is 0 indexed, Linux is 1 indexed - h.sr.top = int16(top - 1) - h.sr.bottom = int16(bottom - 1) - - // This command also moves the cursor to the origin. - h.clearWrap() - return h.CUP(1, 1) -} - -func (h *windowsAnsiEventHandler) RI() error { - if err := h.Flush(); err != nil { - return err - } - h.logf("RI: []") - h.clearWrap() - - info, err := GetConsoleScreenBufferInfo(h.fd) - if err != nil { - return err - } - - sr := h.effectiveSr(info.Window) - if info.CursorPosition.Y == sr.top { - return h.scrollDown(1) - } - - return h.moveCursorVertical(-1) -} - -func (h *windowsAnsiEventHandler) IND() error { - h.logf("IND: []") - return h.executeLF() -} - -func (h *windowsAnsiEventHandler) Flush() error { - h.curInfo = nil - if h.buffer.Len() > 0 { - h.logf("Flush: [%s]", h.buffer.Bytes()) - if _, err := h.buffer.WriteTo(h.file); err != nil { - return err - } - } - - if h.wrapNext && !h.drewMarginByte { - h.logf("Flush: drawing margin byte '%c'", h.marginByte) - - info, err := GetConsoleScreenBufferInfo(h.fd) - if err != nil { - return err - } - - charInfo := []CHAR_INFO{{UnicodeChar: uint16(h.marginByte), Attributes: info.Attributes}} - size := COORD{1, 1} - position := COORD{0, 0} - region := SMALL_RECT{Left: info.CursorPosition.X, Top: info.CursorPosition.Y, Right: info.CursorPosition.X, Bottom: info.CursorPosition.Y} - if err := WriteConsoleOutput(h.fd, charInfo, size, position, ®ion); err != nil { - return err - } - h.drewMarginByte = true - } - return nil -} - -// cacheConsoleInfo ensures that the current console screen information has been queried -// since the last call to Flush(). It must be called before accessing h.curInfo or h.curPos. -func (h *windowsAnsiEventHandler) getCurrentInfo() (COORD, *CONSOLE_SCREEN_BUFFER_INFO, error) { - if h.curInfo == nil { - info, err := GetConsoleScreenBufferInfo(h.fd) - if err != nil { - return COORD{}, nil, err - } - h.curInfo = info - h.curPos = info.CursorPosition - } - return h.curPos, h.curInfo, nil -} - -func (h *windowsAnsiEventHandler) updatePos(pos COORD) { - if h.curInfo == nil { - panic("failed to call getCurrentInfo before calling updatePos") - } - h.curPos = pos -} - -// clearWrap clears the state where the cursor is in the margin -// waiting for the next character before wrapping the line. This must -// be done before most operations that act on the cursor. -func (h *windowsAnsiEventHandler) clearWrap() { - h.wrapNext = false - h.drewMarginByte = false -} diff --git a/src/vendor/github.com/beego/beego/v2/client/cache/README.md b/src/vendor/github.com/beego/beego/v2/client/cache/README.md deleted file mode 100644 index df1ea0957..000000000 --- a/src/vendor/github.com/beego/beego/v2/client/cache/README.md +++ /dev/null @@ -1,54 +0,0 @@ -## cache - -cache is a Go cache manager. It can use many cache adapters. The repo is inspired by `database/sql` . - -## How to install? - - go get github.com/beego/beego/v2/client/cache - -## What adapters are supported? - -As of now this cache support memory, Memcache and Redis. - -## How to use it? - -First you must import it - - import ( - "github.com/beego/beego/v2/client/cache" - ) - -Then init a Cache (example with memory adapter) - - bm, err := cache.NewCache("memory", `{"interval":60}`) - -Use it like this: - - bm.Put("astaxie", 1, 10 * time.Second) - bm.Get("astaxie") - bm.IsExist("astaxie") - bm.Delete("astaxie") - -## Memory adapter - -Configure memory adapter like this: - - {"interval":60} - -interval means the gc time. The cache will check at each time interval, whether item has expired. - -## Memcache adapter - -Memcache adapter use the [gomemcache](http://github.com/bradfitz/gomemcache) client. - -Configure like this: - - {"conn":"127.0.0.1:11211"} - -## Redis adapter - -Redis adapter use the [redigo](http://github.com/gomodule/redigo) client. - -Configure like this: - - {"conn":":6039"} diff --git a/src/vendor/github.com/beego/beego/v2/client/cache/cache.go b/src/vendor/github.com/beego/beego/v2/client/cache/cache.go deleted file mode 100644 index 1eafccdc1..000000000 --- a/src/vendor/github.com/beego/beego/v2/client/cache/cache.go +++ /dev/null @@ -1,107 +0,0 @@ -// Copyright 2014 beego Author. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package cache provide a Cache interface and some implement engine -// Usage: -// -// import( -// "github.com/beego/beego/v2/client/cache" -// ) -// -// bm, err := cache.NewCache("memory", `{"interval":60}`) -// -// Use it like this: -// -// bm.Put("astaxie", 1, 10 * time.Second) -// bm.Get("astaxie") -// bm.IsExist("astaxie") -// bm.Delete("astaxie") -// -// more docs http://beego.vip/docs/module/cache.md -package cache - -import ( - "context" - "time" - - "github.com/beego/beego/v2/core/berror" -) - -// Cache interface contains all behaviors for cache adapter. -// usage: -// cache.Register("file",cache.NewFileCache) // this operation is run in init method of file.go. -// c,err := cache.NewCache("file","{....}") -// c.Put("key",value, 3600 * time.Second) -// v := c.Get("key") -// -// c.Incr("counter") // now is 1 -// c.Incr("counter") // now is 2 -// count := c.Get("counter").(int) -type Cache interface { - // Get a cached value by key. - Get(ctx context.Context, key string) (interface{}, error) - // GetMulti is a batch version of Get. - GetMulti(ctx context.Context, keys []string) ([]interface{}, error) - // Put Set a cached value with key and expire time. - Put(ctx context.Context, key string, val interface{}, timeout time.Duration) error - // Delete cached value by key. - // Should not return error if key not found - Delete(ctx context.Context, key string) error - // Incr Increment a cached int value by key, as a counter. - Incr(ctx context.Context, key string) error - // Decr Decrement a cached int value by key, as a counter. - Decr(ctx context.Context, key string) error - // IsExist Check if a cached value exists or not. - // if key is expired, return (false, nil) - IsExist(ctx context.Context, key string) (bool, error) - // ClearAll Clear all cache. - ClearAll(ctx context.Context) error - // StartAndGC Start gc routine based on config string settings. - StartAndGC(config string) error -} - -// Instance is a function create a new Cache Instance -type Instance func() Cache - -var adapters = make(map[string]Instance) - -// Register makes a cache adapter available by the adapter name. -// If Register is called twice with the same name or if driver is nil, -// it panics. -func Register(name string, adapter Instance) { - if adapter == nil { - panic(berror.Error(NilCacheAdapter, "cache: Register adapter is nil").Error()) - } - if _, ok := adapters[name]; ok { - panic("cache: Register called twice for adapter " + name) - } - adapters[name] = adapter -} - -// NewCache creates a new cache driver by adapter name and config string. -// config: must be in JSON format such as {"interval":360}. -// Starts gc automatically. -func NewCache(adapterName, config string) (adapter Cache, err error) { - instanceFunc, ok := adapters[adapterName] - if !ok { - err = berror.Errorf(UnknownAdapter, "cache: unknown adapter name %s (forgot to import?)", adapterName) - return - } - adapter = instanceFunc() - err = adapter.StartAndGC(config) - if err != nil { - adapter = nil - } - return -} diff --git a/src/vendor/github.com/beego/beego/v2/client/cache/calc_utils.go b/src/vendor/github.com/beego/beego/v2/client/cache/calc_utils.go deleted file mode 100644 index f8b7f24ac..000000000 --- a/src/vendor/github.com/beego/beego/v2/client/cache/calc_utils.go +++ /dev/null @@ -1,95 +0,0 @@ -package cache - -import ( - "math" - - "github.com/beego/beego/v2/core/berror" -) - -var ( - ErrIncrementOverflow = berror.Error(IncrementOverflow, "this incr invocation will overflow.") - ErrDecrementOverflow = berror.Error(DecrementOverflow, "this decr invocation will overflow.") - ErrNotIntegerType = berror.Error(NotIntegerType, "item val is not (u)int (u)int32 (u)int64") -) - -const ( - MinUint32 uint32 = 0 - MinUint64 uint64 = 0 -) - -func incr(originVal interface{}) (interface{}, error) { - switch val := originVal.(type) { - case int: - tmp := val + 1 - if val > 0 && tmp < 0 { - return nil, ErrIncrementOverflow - } - return tmp, nil - case int32: - if val == math.MaxInt32 { - return nil, ErrIncrementOverflow - } - return val + 1, nil - case int64: - if val == math.MaxInt64 { - return nil, ErrIncrementOverflow - } - return val + 1, nil - case uint: - tmp := val + 1 - if tmp < val { - return nil, ErrIncrementOverflow - } - return tmp, nil - case uint32: - if val == math.MaxUint32 { - return nil, ErrIncrementOverflow - } - return val + 1, nil - case uint64: - if val == math.MaxUint64 { - return nil, ErrIncrementOverflow - } - return val + 1, nil - default: - return nil, ErrNotIntegerType - } -} - -func decr(originVal interface{}) (interface{}, error) { - switch val := originVal.(type) { - case int: - tmp := val - 1 - if val < 0 && tmp > 0 { - return nil, ErrDecrementOverflow - } - return tmp, nil - case int32: - if val == math.MinInt32 { - return nil, ErrDecrementOverflow - } - return val - 1, nil - case int64: - if val == math.MinInt64 { - return nil, ErrDecrementOverflow - } - return val - 1, nil - case uint: - if val == 0 { - return nil, ErrDecrementOverflow - } - return val - 1, nil - case uint32: - if val == MinUint32 { - return nil, ErrDecrementOverflow - } - return val - 1, nil - case uint64: - if val == MinUint64 { - return nil, ErrDecrementOverflow - } - return val - 1, nil - default: - return nil, ErrNotIntegerType - } -} diff --git a/src/vendor/github.com/beego/beego/v2/client/cache/conv.go b/src/vendor/github.com/beego/beego/v2/client/cache/conv.go deleted file mode 100644 index 158f7f413..000000000 --- a/src/vendor/github.com/beego/beego/v2/client/cache/conv.go +++ /dev/null @@ -1,100 +0,0 @@ -// Copyright 2014 beego Author. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package cache - -import ( - "fmt" - "strconv" -) - -// GetString converts interface to string. -func GetString(v interface{}) string { - switch result := v.(type) { - case string: - return result - case []byte: - return string(result) - default: - if v != nil { - return fmt.Sprint(result) - } - } - return "" -} - -// GetInt converts interface to int. -func GetInt(v interface{}) int { - switch result := v.(type) { - case int: - return result - case int32: - return int(result) - case int64: - return int(result) - default: - if d := GetString(v); d != "" { - value, _ := strconv.Atoi(d) - return value - } - } - return 0 -} - -// GetInt64 converts interface to int64. -func GetInt64(v interface{}) int64 { - switch result := v.(type) { - case int: - return int64(result) - case int32: - return int64(result) - case int64: - return result - default: - - if d := GetString(v); d != "" { - value, _ := strconv.ParseInt(d, 10, 64) - return value - } - } - return 0 -} - -// GetFloat64 converts interface to float64. -func GetFloat64(v interface{}) float64 { - switch result := v.(type) { - case float64: - return result - default: - if d := GetString(v); d != "" { - value, _ := strconv.ParseFloat(d, 64) - return value - } - } - return 0 -} - -// GetBool converts interface to bool. -func GetBool(v interface{}) bool { - switch result := v.(type) { - case bool: - return result - default: - if d := GetString(v); d != "" { - value, _ := strconv.ParseBool(d) - return value - } - } - return false -} diff --git a/src/vendor/github.com/beego/beego/v2/client/cache/error_code.go b/src/vendor/github.com/beego/beego/v2/client/cache/error_code.go deleted file mode 100644 index 5611f065f..000000000 --- a/src/vendor/github.com/beego/beego/v2/client/cache/error_code.go +++ /dev/null @@ -1,176 +0,0 @@ -// Copyright 2021 beego -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package cache - -import ( - "github.com/beego/beego/v2/core/berror" -) - -var NilCacheAdapter = berror.DefineCode(4002001, moduleName, "NilCacheAdapter", ` -It means that you register cache adapter by pass nil. -A cache adapter is an instance of Cache interface. -`) - -var DuplicateAdapter = berror.DefineCode(4002002, moduleName, "DuplicateAdapter", ` -You register two adapter with same name. In beego cache module, one name one adapter. -Once you got this error, please check the error stack, search adapter -`) - -var UnknownAdapter = berror.DefineCode(4002003, moduleName, "UnknownAdapter", ` -Unknown adapter, do you forget to register the adapter? -You must register adapter before use it. For example, if you want to use redis implementation, -you must import the cache/redis package. -`) - -var IncrementOverflow = berror.DefineCode(4002004, moduleName, "IncrementOverflow", ` -The increment operation will overflow. -`) - -var DecrementOverflow = berror.DefineCode(4002005, moduleName, "DecrementOverflow", ` -The decrement operation will overflow. -`) - -var NotIntegerType = berror.DefineCode(4002006, moduleName, "NotIntegerType", ` -The type of value is not (u)int (u)int32 (u)int64. -When you want to call Incr or Decr function of Cache API, you must confirm that the value's type is one of (u)int (u)int32 (u)int64. -`) - -var InvalidFileCacheDirectoryLevelCfg = berror.DefineCode(4002007, moduleName, "InvalidFileCacheDirectoryLevelCfg", ` -You pass invalid DirectoryLevel parameter when you try to StartAndGC file cache instance. -This parameter must be a integer, and please check your input. -`) - -var InvalidFileCacheEmbedExpiryCfg = berror.DefineCode(4002008, moduleName, "InvalidFileCacheEmbedExpiryCfg", ` -You pass invalid EmbedExpiry parameter when you try to StartAndGC file cache instance. -This parameter must be a integer, and please check your input. -`) - -var CreateFileCacheDirFailed = berror.DefineCode(4002009, moduleName, "CreateFileCacheDirFailed", ` -Beego failed to create file cache directory. There are two cases: -1. You pass invalid CachePath parameter. Please check your input. -2. Beego doesn't have the permission to create this directory. Please check your file mode. -`) - -var InvalidFileCachePath = berror.DefineCode(4002010, moduleName, "InvalidFilePath", ` -The file path of FileCache is invalid. Please correct the config. -`) - -var ReadFileCacheContentFailed = berror.DefineCode(4002011, moduleName, "ReadFileCacheContentFailed", ` -Usually you won't got this error. It means that Beego cannot read the data from the file. -You need to check whether the file exist. Sometimes it may be deleted by other processes. -If the file exists, please check the permission that Beego is able to read data from the file. -`) - -var InvalidGobEncodedData = berror.DefineCode(4002012, moduleName, "InvalidEncodedData", ` -The data is invalid. When you try to decode the invalid data, you got this error. -Please confirm that the data is encoded by GOB correctly. -`) - -var GobEncodeDataFailed = berror.DefineCode(4002013, moduleName, "GobEncodeDataFailed", ` -Beego could not encode the data to GOB byte array. In general, the data type is invalid. -For example, GOB doesn't support function type. -Basic types, string, structure, structure pointer are supported. -`) - -var KeyExpired = berror.DefineCode(4002014, moduleName, "KeyExpired", ` -Cache key is expired. -You should notice that, a key is expired and then it may be deleted by GC goroutine. -So when you query a key which may be expired, you may got this code, or KeyNotExist. -`) - -var KeyNotExist = berror.DefineCode(4002015, moduleName, "KeyNotExist", ` -Key not found. -`) - -var MultiGetFailed = berror.DefineCode(4002016, moduleName, "MultiGetFailed", ` -Get multiple keys failed. Please check the detail msg to find out the root cause. -`) - -var InvalidMemoryCacheCfg = berror.DefineCode(4002017, moduleName, "InvalidMemoryCacheCfg", ` -The config is invalid. Please check your input. It must be a json string. -`) - -var InvalidMemCacheCfg = berror.DefineCode(4002018, moduleName, "InvalidMemCacheCfg", ` -The config is invalid. Please check your input, it must be json string and contains "conn" field. -`) - -var InvalidMemCacheValue = berror.DefineCode(4002019, moduleName, "InvalidMemCacheValue", ` -The value must be string or byte[], please check your input. -`) - -var InvalidRedisCacheCfg = berror.DefineCode(4002020, moduleName, "InvalidRedisCacheCfg", ` -The config must be json string, and has "conn" field. -`) - -var InvalidSsdbCacheCfg = berror.DefineCode(4002021, moduleName, "InvalidSsdbCacheCfg", ` -The config must be json string, and has "conn" field. The value of "conn" field should be "host:port". -"port" must be a valid integer. -`) - -var InvalidSsdbCacheValue = berror.DefineCode(4002022, moduleName, "InvalidSsdbCacheValue", ` -SSDB cache only accept string value. Please check your input. -`) - -var DeleteFileCacheItemFailed = berror.DefineCode(5002001, moduleName, "DeleteFileCacheItemFailed", ` -Beego try to delete file cache item failed. -Please check whether Beego generated file correctly. -And then confirm whether this file is already deleted by other processes or other people. -`) - -var MemCacheCurdFailed = berror.DefineCode(5002002, moduleName, "MemCacheError", ` -When you want to get, put, delete key-value from remote memcache servers, you may get error: -1. You pass invalid servers address, so Beego could not connect to remote server; -2. The servers address is correct, but there is some net issue. Typically there is some firewalls between application and memcache server; -3. Key is invalid. The key's length should be less than 250 and must not contains special characters; -4. The response from memcache server is invalid; -`) - -var RedisCacheCurdFailed = berror.DefineCode(5002003, moduleName, "RedisCacheCurdFailed", ` -When Beego uses client to send request to redis server, it failed. -1. The server addresses is invalid; -2. Network issue, firewall issue or network is unstable; -3. Client failed to manage connection. In extreme cases, Beego's redis client didn't maintain connections correctly, for example, Beego try to send request via closed connection; -4. The request are huge and redis server spent too much time to process it, and client is timeout; - -In general, if you always got this error whatever you do, in most cases, it was caused by network issue. -You could check your network state, and confirm that firewall rules are correct. -`) - -var InvalidConnection = berror.DefineCode(5002004, moduleName, "InvalidConnection", ` -The connection is invalid. Please check your connection info, network, firewall. -You could simply uses ping, telnet or write some simple tests to test network. -`) - -var DialFailed = berror.DefineCode(5002005, moduleName, "DialFailed", ` -When Beego try to dial to remote servers, it failed. Please check your connection info and network state, server state. -`) - -var SsdbCacheCurdFailed = berror.DefineCode(5002006, moduleName, "SsdbCacheCurdFailed", ` -When you try to use SSDB cache, it failed. There are many cases: -1. servers unavailable; -2. network issue, including network unstable, firewall; -3. connection issue; -4. request are huge and servers spent too much time to process it, got timeout; -`) - -var SsdbBadResponse = berror.DefineCode(5002007, moduleName, "SsdbBadResponse", ` -The reponse from SSDB server is invalid. -Usually it indicates something wrong on server side. -`) - -var ( - ErrKeyExpired = berror.Error(KeyExpired, "the key is expired") - ErrKeyNotExist = berror.Error(KeyNotExist, "the key isn't exist") -) diff --git a/src/vendor/github.com/beego/beego/v2/client/cache/file.go b/src/vendor/github.com/beego/beego/v2/client/cache/file.go deleted file mode 100644 index ae2bc7cf6..000000000 --- a/src/vendor/github.com/beego/beego/v2/client/cache/file.go +++ /dev/null @@ -1,338 +0,0 @@ -// Copyright 2014 beego Author. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package cache - -import ( - "bytes" - "context" - "crypto/md5" - "encoding/gob" - "encoding/hex" - "encoding/json" - "fmt" - "io" - "io/ioutil" - "os" - "path/filepath" - "strconv" - "strings" - "time" - - "github.com/beego/beego/v2/core/berror" -) - -// FileCacheItem is basic unit of file cache adapter which -// contains data and expire time. -type FileCacheItem struct { - Data interface{} - Lastaccess time.Time - Expired time.Time -} - -// FileCache Config -var ( - FileCachePath = "cache" // cache directory - FileCacheFileSuffix = ".bin" // cache file suffix - FileCacheDirectoryLevel = 2 // cache file deep level if auto generated cache files. - FileCacheEmbedExpiry time.Duration // cache expire time, default is no expire forever. -) - -// FileCache is cache adapter for file storage. -type FileCache struct { - CachePath string - FileSuffix string - DirectoryLevel int - EmbedExpiry int -} - -// NewFileCache creates a new file cache with no config. -// The level and expiry need to be set in the method StartAndGC as config string. -func NewFileCache() Cache { - // return &FileCache{CachePath:FileCachePath, FileSuffix:FileCacheFileSuffix} - return &FileCache{} -} - -// StartAndGC starts gc for file cache. -// config must be in the format {CachePath:"/cache","FileSuffix":".bin","DirectoryLevel":"2","EmbedExpiry":"0"} -func (fc *FileCache) StartAndGC(config string) error { - cfg := make(map[string]string) - err := json.Unmarshal([]byte(config), &cfg) - if err != nil { - return err - } - - const cpKey = "CachePath" - const fsKey = "FileSuffix" - const dlKey = "DirectoryLevel" - const eeKey = "EmbedExpiry" - - if _, ok := cfg[cpKey]; !ok { - cfg[cpKey] = FileCachePath - } - - if _, ok := cfg[fsKey]; !ok { - cfg[fsKey] = FileCacheFileSuffix - } - - if _, ok := cfg[dlKey]; !ok { - cfg[dlKey] = strconv.Itoa(FileCacheDirectoryLevel) - } - - if _, ok := cfg[eeKey]; !ok { - cfg[eeKey] = strconv.FormatInt(int64(FileCacheEmbedExpiry.Seconds()), 10) - } - fc.CachePath = cfg[cpKey] - fc.FileSuffix = cfg[fsKey] - fc.DirectoryLevel, err = strconv.Atoi(cfg[dlKey]) - if err != nil { - return berror.Wrapf(err, InvalidFileCacheDirectoryLevelCfg, - "invalid directory level config, please check your input, it must be integer: %s", cfg[dlKey]) - } - fc.EmbedExpiry, err = strconv.Atoi(cfg[eeKey]) - if err != nil { - return berror.Wrapf(err, InvalidFileCacheEmbedExpiryCfg, - "invalid embed expiry config, please check your input, it must be integer: %s", cfg[eeKey]) - } - return fc.Init() -} - -// Init makes new a dir for file cache if it does not already exist -func (fc *FileCache) Init() error { - ok, err := exists(fc.CachePath) - if err != nil || ok { - return err - } - err = os.MkdirAll(fc.CachePath, os.ModePerm) - if err != nil { - return berror.Wrapf(err, CreateFileCacheDirFailed, - "could not create directory, please check the config [%s] and file mode.", fc.CachePath) - } - return nil -} - -// getCachedFilename returns an md5 encoded file name. -func (fc *FileCache) getCacheFileName(key string) (string, error) { - m := md5.New() - _, _ = io.WriteString(m, key) - keyMd5 := hex.EncodeToString(m.Sum(nil)) - cachePath := fc.CachePath - switch fc.DirectoryLevel { - case 2: - cachePath = filepath.Join(cachePath, keyMd5[0:2], keyMd5[2:4]) - case 1: - cachePath = filepath.Join(cachePath, keyMd5[0:2]) - } - ok, err := exists(cachePath) - if err != nil { - return "", err - } - if !ok { - err = os.MkdirAll(cachePath, os.ModePerm) - if err != nil { - return "", berror.Wrapf(err, CreateFileCacheDirFailed, - "could not create the directory: %s", cachePath) - } - } - - return filepath.Join(cachePath, fmt.Sprintf("%s%s", keyMd5, fc.FileSuffix)), nil -} - -// Get value from file cache. -// if nonexistent or expired return an empty string. -func (fc *FileCache) Get(ctx context.Context, key string) (interface{}, error) { - fn, err := fc.getCacheFileName(key) - if err != nil { - return nil, err - } - fileData, err := FileGetContents(fn) - if err != nil { - return nil, err - } - - var to FileCacheItem - err = GobDecode(fileData, &to) - if err != nil { - return nil, err - } - - if to.Expired.Before(time.Now()) { - return nil, ErrKeyExpired - } - return to.Data, nil -} - -// GetMulti gets values from file cache. -// if nonexistent or expired return an empty string. -func (fc *FileCache) GetMulti(ctx context.Context, keys []string) ([]interface{}, error) { - rc := make([]interface{}, len(keys)) - keysErr := make([]string, 0) - - for i, ki := range keys { - val, err := fc.Get(context.Background(), ki) - if err != nil { - keysErr = append(keysErr, fmt.Sprintf("key [%s] error: %s", ki, err.Error())) - continue - } - rc[i] = val - } - - if len(keysErr) == 0 { - return rc, nil - } - return rc, berror.Error(MultiGetFailed, strings.Join(keysErr, "; ")) -} - -// Put value into file cache. -// timeout: how long this file should be kept in ms -// if timeout equals fc.EmbedExpiry(default is 0), cache this item forever. -func (fc *FileCache) Put(ctx context.Context, key string, val interface{}, timeout time.Duration) error { - gob.Register(val) - - item := FileCacheItem{Data: val} - if timeout == time.Duration(fc.EmbedExpiry) { - item.Expired = time.Now().Add((86400 * 365 * 10) * time.Second) // ten years - } else { - item.Expired = time.Now().Add(timeout) - } - item.Lastaccess = time.Now() - data, err := GobEncode(item) - if err != nil { - return err - } - - fn, err := fc.getCacheFileName(key) - if err != nil { - return err - } - return FilePutContents(fn, data) -} - -// Delete file cache value. -func (fc *FileCache) Delete(ctx context.Context, key string) error { - filename, err := fc.getCacheFileName(key) - if err != nil { - return err - } - if ok, _ := exists(filename); ok { - err = os.Remove(filename) - if err != nil { - return berror.Wrapf(err, DeleteFileCacheItemFailed, - "can not delete this file cache key-value, key is %s and file name is %s", key, filename) - } - } - return nil -} - -// Incr increases cached int value. -// fc value is saved forever unless deleted. -func (fc *FileCache) Incr(ctx context.Context, key string) error { - data, err := fc.Get(context.Background(), key) - if err != nil { - return err - } - - val, err := incr(data) - if err != nil { - return err - } - - return fc.Put(context.Background(), key, val, time.Duration(fc.EmbedExpiry)) -} - -// Decr decreases cached int value. -func (fc *FileCache) Decr(ctx context.Context, key string) error { - data, err := fc.Get(context.Background(), key) - if err != nil { - return err - } - - val, err := decr(data) - if err != nil { - return err - } - - return fc.Put(context.Background(), key, val, time.Duration(fc.EmbedExpiry)) -} - -// IsExist checks if value exists. -func (fc *FileCache) IsExist(ctx context.Context, key string) (bool, error) { - fn, err := fc.getCacheFileName(key) - if err != nil { - return false, err - } - return exists(fn) -} - -// ClearAll cleans cached files (not implemented) -func (fc *FileCache) ClearAll(context.Context) error { - return nil -} - -// Check if a file exists -func exists(path string) (bool, error) { - _, err := os.Stat(path) - if err == nil { - return true, nil - } - if os.IsNotExist(err) { - return false, nil - } - return false, berror.Wrapf(err, InvalidFileCachePath, "file cache path is invalid: %s", path) -} - -// FileGetContents Reads bytes from a file. -// if non-existent, create this file. -func FileGetContents(filename string) ([]byte, error) { - data, err := ioutil.ReadFile(filename) - if err != nil { - return nil, berror.Wrapf(err, ReadFileCacheContentFailed, - "could not read the data from the file: %s, "+ - "please confirm that file exist and Beego has the permission to read the content.", filename) - } - return data, nil -} - -// FilePutContents puts bytes into a file. -// if non-existent, create this file. -func FilePutContents(filename string, content []byte) error { - return ioutil.WriteFile(filename, content, os.ModePerm) -} - -// GobEncode Gob encodes a file cache item. -func GobEncode(data interface{}) ([]byte, error) { - buf := bytes.NewBuffer(nil) - enc := gob.NewEncoder(buf) - err := enc.Encode(data) - if err != nil { - return nil, berror.Wrap(err, GobEncodeDataFailed, "could not encode this data") - } - return buf.Bytes(), nil -} - -// GobDecode Gob decodes a file cache item. -func GobDecode(data []byte, to *FileCacheItem) error { - buf := bytes.NewBuffer(data) - dec := gob.NewDecoder(buf) - err := dec.Decode(&to) - if err != nil { - return berror.Wrap(err, InvalidGobEncodedData, - "could not decode this data to FileCacheItem. Make sure that the data is encoded by GOB.") - } - return nil -} - -func init() { - Register("file", NewFileCache) -} diff --git a/src/vendor/github.com/beego/beego/v2/client/cache/memory.go b/src/vendor/github.com/beego/beego/v2/client/cache/memory.go deleted file mode 100644 index c1d1a2e50..000000000 --- a/src/vendor/github.com/beego/beego/v2/client/cache/memory.go +++ /dev/null @@ -1,235 +0,0 @@ -// Copyright 2014 beego Author. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package cache - -import ( - "context" - "encoding/json" - "fmt" - "strings" - "sync" - "time" - - "github.com/beego/beego/v2/core/berror" -) - -// DefaultEvery sets a timer for how often to recycle the expired cache items in memory (in seconds) -var DefaultEvery = 60 // 1 minute - -// MemoryItem stores memory cache item. -type MemoryItem struct { - val interface{} - createdTime time.Time - lifespan time.Duration -} - -func (mi *MemoryItem) isExpire() bool { - // 0 means forever - if mi.lifespan == 0 { - return false - } - return time.Since(mi.createdTime) > mi.lifespan -} - -// MemoryCache is a memory cache adapter. -// Contains a RW locker for safe map storage. -type MemoryCache struct { - sync.RWMutex - dur time.Duration - items map[string]*MemoryItem - Every int // run an expiration check Every clock time -} - -// NewMemoryCache returns a new MemoryCache. -func NewMemoryCache() Cache { - cache := MemoryCache{items: make(map[string]*MemoryItem)} - return &cache -} - -// Get returns cache from memory. -// If non-existent or expired, return nil. -func (bc *MemoryCache) Get(ctx context.Context, key string) (interface{}, error) { - bc.RLock() - defer bc.RUnlock() - if itm, ok := - bc.items[key]; ok { - if itm.isExpire() { - return nil, ErrKeyExpired - } - return itm.val, nil - } - return nil, ErrKeyNotExist -} - -// GetMulti gets caches from memory. -// If non-existent or expired, return nil. -func (bc *MemoryCache) GetMulti(ctx context.Context, keys []string) ([]interface{}, error) { - rc := make([]interface{}, len(keys)) - keysErr := make([]string, 0) - - for i, ki := range keys { - val, err := bc.Get(context.Background(), ki) - if err != nil { - keysErr = append(keysErr, fmt.Sprintf("key [%s] error: %s", ki, err.Error())) - continue - } - rc[i] = val - } - - if len(keysErr) == 0 { - return rc, nil - } - return rc, berror.Error(MultiGetFailed, strings.Join(keysErr, "; ")) -} - -// Put puts cache into memory. -// If lifespan is 0, it will never overwrite this value unless restarted -func (bc *MemoryCache) Put(ctx context.Context, key string, val interface{}, timeout time.Duration) error { - bc.Lock() - defer bc.Unlock() - bc.items[key] = &MemoryItem{ - val: val, - createdTime: time.Now(), - lifespan: timeout, - } - return nil -} - -// Delete cache in memory. -// If the key is not found, it will not return error -func (bc *MemoryCache) Delete(ctx context.Context, key string) error { - bc.Lock() - defer bc.Unlock() - delete(bc.items, key) - return nil -} - -// Incr increases cache counter in memory. -// Supports int,int32,int64,uint,uint32,uint64. -func (bc *MemoryCache) Incr(ctx context.Context, key string) error { - bc.Lock() - defer bc.Unlock() - itm, ok := bc.items[key] - if !ok { - return ErrKeyNotExist - } - - val, err := incr(itm.val) - if err != nil { - return err - } - itm.val = val - return nil -} - -// Decr decreases counter in memory. -func (bc *MemoryCache) Decr(ctx context.Context, key string) error { - bc.Lock() - defer bc.Unlock() - itm, ok := bc.items[key] - if !ok { - return ErrKeyNotExist - } - - val, err := decr(itm.val) - if err != nil { - return err - } - itm.val = val - return nil -} - -// IsExist checks if cache exists in memory. -func (bc *MemoryCache) IsExist(ctx context.Context, key string) (bool, error) { - bc.RLock() - defer bc.RUnlock() - if v, ok := bc.items[key]; ok { - return !v.isExpire(), nil - } - return false, nil -} - -// ClearAll deletes all cache in memory. -func (bc *MemoryCache) ClearAll(context.Context) error { - bc.Lock() - defer bc.Unlock() - bc.items = make(map[string]*MemoryItem) - return nil -} - -// StartAndGC starts memory cache. Checks expiration in every clock time. -func (bc *MemoryCache) StartAndGC(config string) error { - var cf map[string]int - if err := json.Unmarshal([]byte(config), &cf); err != nil { - return berror.Wrapf(err, InvalidMemoryCacheCfg, "invalid config, please check your input: %s", config) - } - if _, ok := cf["interval"]; !ok { - cf = make(map[string]int) - cf["interval"] = DefaultEvery - } - dur := time.Duration(cf["interval"]) * time.Second - bc.Every = cf["interval"] - bc.dur = dur - go bc.vacuum() - return nil -} - -// check expiration. -func (bc *MemoryCache) vacuum() { - bc.RLock() - every := bc.Every - bc.RUnlock() - - if every < 1 { - return - } - for { - <-time.After(bc.dur) - bc.RLock() - if bc.items == nil { - bc.RUnlock() - return - } - bc.RUnlock() - if keys := bc.expiredKeys(); len(keys) != 0 { - bc.clearItems(keys) - } - } -} - -// expiredKeys returns keys list which are expired. -func (bc *MemoryCache) expiredKeys() (keys []string) { - bc.RLock() - defer bc.RUnlock() - for key, itm := range bc.items { - if itm.isExpire() { - keys = append(keys, key) - } - } - return -} - -// ClearItems removes all items who's key is in keys -func (bc *MemoryCache) clearItems(keys []string) { - bc.Lock() - defer bc.Unlock() - for _, key := range keys { - delete(bc.items, key) - } -} - -func init() { - Register("memory", NewMemoryCache) -} diff --git a/src/vendor/github.com/beego/beego/v2/client/cache/module.go b/src/vendor/github.com/beego/beego/v2/client/cache/module.go deleted file mode 100644 index 5a4e499ea..000000000 --- a/src/vendor/github.com/beego/beego/v2/client/cache/module.go +++ /dev/null @@ -1,17 +0,0 @@ -// Copyright 2021 beego -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package cache - -const moduleName = "cache" diff --git a/src/vendor/github.com/beego/beego/v2/client/cache/random_expired_cache.go b/src/vendor/github.com/beego/beego/v2/client/cache/random_expired_cache.go deleted file mode 100644 index 262001124..000000000 --- a/src/vendor/github.com/beego/beego/v2/client/cache/random_expired_cache.go +++ /dev/null @@ -1,75 +0,0 @@ -// Copyright 2014 beego Author. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package cache - -import ( - "context" - "math/rand" - "sync/atomic" - "time" -) - -// RandomExpireCacheOption implement genreate random time offset expired option -type RandomExpireCacheOption func(*RandomExpireCache) - -// WithOffsetFunc returns a RandomExpireCacheOption that configures the offset function -func WithOffsetFunc(fn func() time.Duration) RandomExpireCacheOption { - return func(cache *RandomExpireCache) { - cache.offset = fn - } -} - -// RandomExpireCache prevent cache batch invalidation -// Cache random time offset expired -type RandomExpireCache struct { - Cache - offset func() time.Duration -} - -// Put random time offset expired -func (rec *RandomExpireCache) Put(ctx context.Context, key string, val interface{}, timeout time.Duration) error { - timeout += rec.offset() - return rec.Cache.Put(ctx, key, val, timeout) -} - -// NewRandomExpireCache return random expire cache struct -func NewRandomExpireCache(adapter Cache, opts ...RandomExpireCacheOption) Cache { - rec := RandomExpireCache{ - Cache: adapter, - offset: defaultExpiredFunc(), - } - for _, fn := range opts { - fn(&rec) - } - return &rec -} - -// defaultExpiredFunc return a func that used to generate random time offset (range: [3s,8s)) expired -func defaultExpiredFunc() func() time.Duration { - const size = 5 - var randTimes [size]time.Duration - for i := range randTimes { - randTimes[i] = time.Duration(i+3) * time.Second - } - // shuffle values - for i := range randTimes { - n := rand.Intn(size) - randTimes[i], randTimes[n] = randTimes[n], randTimes[i] - } - var i uint64 - return func() time.Duration { - return randTimes[atomic.AddUint64(&i, 1)%size] - } -} diff --git a/src/vendor/github.com/beego/beego/v2/client/cache/redis/redis.go b/src/vendor/github.com/beego/beego/v2/client/cache/redis/redis.go deleted file mode 100644 index 9462bcd8e..000000000 --- a/src/vendor/github.com/beego/beego/v2/client/cache/redis/redis.go +++ /dev/null @@ -1,290 +0,0 @@ -// Copyright 2014 beego Author. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package redis for cache provider -// -// depend on github.com/gomodule/redigo/redis -// -// go install github.com/gomodule/redigo/redis -// -// Usage: -// import( -// _ "github.com/beego/beego/v2/client/cache/redis" -// "github.com/beego/beego/v2/client/cache" -// ) -// -// bm, err := cache.NewCache("redis", `{"conn":"127.0.0.1:11211"}`) -// -// more docs http://beego.vip/docs/module/cache.md -package redis - -import ( - "context" - "encoding/json" - "fmt" - "strconv" - "strings" - "time" - - "github.com/gomodule/redigo/redis" - - "github.com/beego/beego/v2/client/cache" - "github.com/beego/beego/v2/core/berror" -) - -// DefaultKey defines the collection name of redis for the cache adapter. -var DefaultKey = "beecacheRedis" - -// Cache is Redis cache adapter. -type Cache struct { - p *redis.Pool // redis connection pool - conninfo string - dbNum int - key string - password string - maxIdle int - - // Timeout value (less than the redis server's timeout value) - timeout time.Duration -} - -// NewRedisCache creates a new redis cache with default collection name. -func NewRedisCache() cache.Cache { - return &Cache{key: DefaultKey} -} - -// Execute the redis commands. args[0] must be the key name -func (rc *Cache) do(commandName string, args ...interface{}) (interface{}, error) { - args[0] = rc.associate(args[0]) - c := rc.p.Get() - defer func() { - _ = c.Close() - }() - - reply, err := c.Do(commandName, args...) - if err != nil { - return nil, berror.Wrapf(err, cache.RedisCacheCurdFailed, - "could not execute this command: %s", commandName) - } - - return reply, nil -} - -// associate with config key. -func (rc *Cache) associate(originKey interface{}) string { - return fmt.Sprintf("%s:%s", rc.key, originKey) -} - -// Get cache from redis. -func (rc *Cache) Get(ctx context.Context, key string) (interface{}, error) { - if v, err := rc.do("GET", key); err == nil { - return v, nil - } else { - return nil, err - } -} - -// GetMulti gets cache from redis. -func (rc *Cache) GetMulti(ctx context.Context, keys []string) ([]interface{}, error) { - c := rc.p.Get() - defer func() { - _ = c.Close() - }() - var args []interface{} - for _, key := range keys { - args = append(args, rc.associate(key)) - } - return redis.Values(c.Do("MGET", args...)) -} - -// Put puts cache into redis. -func (rc *Cache) Put(ctx context.Context, key string, val interface{}, timeout time.Duration) error { - _, err := rc.do("SETEX", key, int64(timeout/time.Second), val) - return err -} - -// Delete deletes a key's cache in redis. -func (rc *Cache) Delete(ctx context.Context, key string) error { - _, err := rc.do("DEL", key) - return err -} - -// IsExist checks cache's existence in redis. -func (rc *Cache) IsExist(ctx context.Context, key string) (bool, error) { - v, err := redis.Bool(rc.do("EXISTS", key)) - if err != nil { - return false, err - } - return v, nil -} - -// Incr increases a key's counter in redis. -func (rc *Cache) Incr(ctx context.Context, key string) error { - _, err := redis.Bool(rc.do("INCRBY", key, 1)) - return err -} - -// Decr decreases a key's counter in redis. -func (rc *Cache) Decr(ctx context.Context, key string) error { - _, err := redis.Bool(rc.do("INCRBY", key, -1)) - return err -} - -// ClearAll deletes all cache in the redis collection -// Be careful about this method, because it scans all keys and the delete them one by one -func (rc *Cache) ClearAll(context.Context) error { - cachedKeys, err := rc.Scan(rc.key + ":*") - if err != nil { - return err - } - c := rc.p.Get() - defer func() { - _ = c.Close() - }() - for _, str := range cachedKeys { - if _, err = c.Do("DEL", str); err != nil { - return err - } - } - return err -} - -// Scan scans all keys matching a given pattern. -func (rc *Cache) Scan(pattern string) (keys []string, err error) { - c := rc.p.Get() - defer func() { - _ = c.Close() - }() - var ( - cursor uint64 = 0 // start - result []interface{} - list []string - ) - for { - result, err = redis.Values(c.Do("SCAN", cursor, "MATCH", pattern, "COUNT", 1024)) - if err != nil { - return - } - list, err = redis.Strings(result[1], nil) - if err != nil { - return - } - keys = append(keys, list...) - cursor, err = redis.Uint64(result[0], nil) - if err != nil { - return - } - if cursor == 0 { // over - return - } - } -} - -// StartAndGC starts the redis cache adapter. -// config: must be in this format {"key":"collection key","conn":"connection info","dbNum":"0"} -// Cached items in redis are stored forever, no garbage collection happens -func (rc *Cache) StartAndGC(config string) error { - var cf map[string]string - err := json.Unmarshal([]byte(config), &cf) - if err != nil { - return berror.Wrapf(err, cache.InvalidRedisCacheCfg, "could not unmarshal the config: %s", config) - } - - if _, ok := cf["key"]; !ok { - cf["key"] = DefaultKey - } - if _, ok := cf["conn"]; !ok { - return berror.Wrapf(err, cache.InvalidRedisCacheCfg, "config missing conn field: %s", config) - } - - // Format redis://@: - cf["conn"] = strings.Replace(cf["conn"], "redis://", "", 1) - if i := strings.Index(cf["conn"], "@"); i > -1 { - cf["password"] = cf["conn"][0:i] - cf["conn"] = cf["conn"][i+1:] - } - - if _, ok := cf["dbNum"]; !ok { - cf["dbNum"] = "0" - } - if _, ok := cf["password"]; !ok { - cf["password"] = "" - } - if _, ok := cf["maxIdle"]; !ok { - cf["maxIdle"] = "3" - } - if _, ok := cf["timeout"]; !ok { - cf["timeout"] = "180s" - } - rc.key = cf["key"] - rc.conninfo = cf["conn"] - rc.dbNum, _ = strconv.Atoi(cf["dbNum"]) - rc.password = cf["password"] - rc.maxIdle, _ = strconv.Atoi(cf["maxIdle"]) - - if v, err := time.ParseDuration(cf["timeout"]); err == nil { - rc.timeout = v - } else { - rc.timeout = 180 * time.Second - } - - rc.connectInit() - - c := rc.p.Get() - defer func() { - _ = c.Close() - }() - - // test connection - if err = c.Err(); err != nil { - return berror.Wrapf(err, cache.InvalidConnection, - "can not connect to remote redis server, please check the connection info and network state: %s", config) - } - return nil -} - -// connect to redis. -func (rc *Cache) connectInit() { - dialFunc := func() (c redis.Conn, err error) { - c, err = redis.Dial("tcp", rc.conninfo) - if err != nil { - return nil, berror.Wrapf(err, cache.DialFailed, - "could not dial to remote server: %s ", rc.conninfo) - } - - if rc.password != "" { - if _, err = c.Do("AUTH", rc.password); err != nil { - _ = c.Close() - return nil, err - } - } - - _, selecterr := c.Do("SELECT", rc.dbNum) - if selecterr != nil { - _ = c.Close() - return nil, selecterr - } - return - } - // initialize a new pool - rc.p = &redis.Pool{ - MaxIdle: rc.maxIdle, - IdleTimeout: rc.timeout, - Dial: dialFunc, - } -} - -func init() { - cache.Register("redis", NewRedisCache) -} diff --git a/src/vendor/github.com/beego/beego/v2/core/berror/codes.go b/src/vendor/github.com/beego/beego/v2/core/berror/codes.go deleted file mode 100644 index b6712a847..000000000 --- a/src/vendor/github.com/beego/beego/v2/core/berror/codes.go +++ /dev/null @@ -1,86 +0,0 @@ -// Copyright 2020 beego -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package berror - -import ( - "fmt" - "sync" -) - -// A Code is an unsigned 32-bit error code as defined in the beego spec. -type Code interface { - Code() uint32 - Module() string - Desc() string - Name() string -} - -var defaultCodeRegistry = &codeRegistry{ - codes: make(map[uint32]*codeDefinition, 127), -} - -// DefineCode defining a new Code -// Before defining a new code, please read Beego specification. -// desc could be markdown doc -func DefineCode(code uint32, module string, name string, desc string) Code { - res := &codeDefinition{ - code: code, - module: module, - desc: desc, - } - defaultCodeRegistry.lock.Lock() - defer defaultCodeRegistry.lock.Unlock() - - if _, ok := defaultCodeRegistry.codes[code]; ok { - panic(fmt.Sprintf("duplicate code, code %d has been registered", code)) - } - defaultCodeRegistry.codes[code] = res - return res -} - -type codeRegistry struct { - lock sync.RWMutex - codes map[uint32]*codeDefinition -} - -func (cr *codeRegistry) Get(code uint32) (Code, bool) { - cr.lock.RLock() - defer cr.lock.RUnlock() - c, ok := cr.codes[code] - return c, ok -} - -type codeDefinition struct { - code uint32 - module string - desc string - name string -} - -func (c *codeDefinition) Name() string { - return c.name -} - -func (c *codeDefinition) Code() uint32 { - return c.code -} - -func (c *codeDefinition) Module() string { - return c.module -} - -func (c *codeDefinition) Desc() string { - return c.desc -} diff --git a/src/vendor/github.com/beego/beego/v2/core/berror/error.go b/src/vendor/github.com/beego/beego/v2/core/berror/error.go deleted file mode 100644 index c40009c68..000000000 --- a/src/vendor/github.com/beego/beego/v2/core/berror/error.go +++ /dev/null @@ -1,69 +0,0 @@ -// Copyright 2020 beego -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package berror - -import ( - "fmt" - "strconv" - "strings" - - "github.com/pkg/errors" -) - -// code, msg -const errFmt = "ERROR-%d, %s" - -// Error returns an error representing c and msg. If c is OK, returns nil. -func Error(c Code, msg string) error { - return fmt.Errorf(errFmt, c.Code(), msg) -} - -// Errorf returns error -func Errorf(c Code, format string, a ...interface{}) error { - return Error(c, fmt.Sprintf(format, a...)) -} - -func Wrap(err error, c Code, msg string) error { - if err == nil { - return nil - } - return errors.Wrap(err, fmt.Sprintf(errFmt, c.Code(), msg)) -} - -func Wrapf(err error, c Code, format string, a ...interface{}) error { - return Wrap(err, c, fmt.Sprintf(format, a...)) -} - -// FromError is very simple. It just parse error msg and check whether code has been register -// if code not being register, return unknown -// if err.Error() is not valid beego error code, return unknown -func FromError(err error) (Code, bool) { - msg := err.Error() - codeSeg := strings.SplitN(msg, ",", 2) - if strings.HasPrefix(codeSeg[0], "ERROR-") { - codeStr := strings.SplitN(codeSeg[0], "-", 2) - if len(codeStr) < 2 { - return Unknown, false - } - codeInt, e := strconv.ParseUint(codeStr[1], 10, 32) - if e != nil { - return Unknown, false - } - if code, ok := defaultCodeRegistry.Get(uint32(codeInt)); ok { - return code, true - } - } - return Unknown, false -} diff --git a/src/vendor/github.com/beego/beego/v2/core/berror/pre_define_code.go b/src/vendor/github.com/beego/beego/v2/core/berror/pre_define_code.go deleted file mode 100644 index ff8eb46b6..000000000 --- a/src/vendor/github.com/beego/beego/v2/core/berror/pre_define_code.go +++ /dev/null @@ -1,52 +0,0 @@ -// Copyright 2021 beego -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package berror - -import ( - "fmt" -) - -// pre define code - -// Unknown indicates got some error which is not defined -var Unknown = DefineCode(5000001, "error", "Unknown", fmt.Sprintf(` -Unknown error code. Usually you will see this code in three cases: -1. You forget to define Code or function DefineCode not being executed; -2. This is not Beego's error but you call FromError(); -3. Beego got unexpected error and don't know how to handle it, and then return Unknown error - -A common practice to DefineCode looks like: -%s - -In this way, you may forget to import this package, and got Unknown error. - -Sometimes, you believe you got Beego error, but actually you don't, and then you call FromError(err) - -`, goCodeBlock(` -import your_package - -func init() { - DefineCode(5100100, "your_module", "detail") - // ... -} -`))) - -func goCodeBlock(code string) string { - return codeBlock("go", code) -} - -func codeBlock(lan string, code string) string { - return fmt.Sprintf("```%s\n%s\n```", lan, code) -} diff --git a/src/vendor/github.com/containerd/containerd/LICENSE b/src/vendor/github.com/containerd/containerd/LICENSE deleted file mode 100644 index 584149b6e..000000000 --- a/src/vendor/github.com/containerd/containerd/LICENSE +++ /dev/null @@ -1,191 +0,0 @@ - - Apache License - Version 2.0, January 2004 - https://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - Copyright The containerd Authors - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - https://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/src/vendor/github.com/containerd/containerd/NOTICE b/src/vendor/github.com/containerd/containerd/NOTICE deleted file mode 100644 index 8915f0277..000000000 --- a/src/vendor/github.com/containerd/containerd/NOTICE +++ /dev/null @@ -1,16 +0,0 @@ -Docker -Copyright 2012-2015 Docker, Inc. - -This product includes software developed at Docker, Inc. (https://www.docker.com). - -The following is courtesy of our legal counsel: - - -Use and transfer of Docker may be subject to certain restrictions by the -United States and other governments. -It is your responsibility to ensure that your use and/or transfer does not -violate applicable laws. - -For more information, please see https://www.bis.doc.gov - -See also https://www.apache.org/dev/crypto.html and/or seek legal counsel. diff --git a/src/vendor/github.com/containerd/containerd/archive/compression/compression.go b/src/vendor/github.com/containerd/containerd/archive/compression/compression.go deleted file mode 100644 index ceceb21f5..000000000 --- a/src/vendor/github.com/containerd/containerd/archive/compression/compression.go +++ /dev/null @@ -1,323 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package compression - -import ( - "bufio" - "bytes" - "compress/gzip" - "context" - "encoding/binary" - "fmt" - "io" - "os" - "strconv" - "sync" - - "github.com/containerd/containerd/log" - "github.com/klauspost/compress/zstd" - exec "golang.org/x/sys/execabs" -) - -type ( - // Compression is the state represents if compressed or not. - Compression int -) - -const ( - // Uncompressed represents the uncompressed. - Uncompressed Compression = iota - // Gzip is gzip compression algorithm. - Gzip - // Zstd is zstd compression algorithm. - Zstd -) - -const disablePigzEnv = "CONTAINERD_DISABLE_PIGZ" - -var ( - initPigz sync.Once - unpigzPath string -) - -var ( - bufioReader32KPool = &sync.Pool{ - New: func() interface{} { return bufio.NewReaderSize(nil, 32*1024) }, - } -) - -// DecompressReadCloser include the stream after decompress and the compress method detected. -type DecompressReadCloser interface { - io.ReadCloser - // GetCompression returns the compress method which is used before decompressing - GetCompression() Compression -} - -type readCloserWrapper struct { - io.Reader - compression Compression - closer func() error -} - -func (r *readCloserWrapper) Close() error { - if r.closer != nil { - return r.closer() - } - return nil -} - -func (r *readCloserWrapper) GetCompression() Compression { - return r.compression -} - -type writeCloserWrapper struct { - io.Writer - closer func() error -} - -func (w *writeCloserWrapper) Close() error { - if w.closer != nil { - w.closer() - } - return nil -} - -type bufferedReader struct { - buf *bufio.Reader -} - -func newBufferedReader(r io.Reader) *bufferedReader { - buf := bufioReader32KPool.Get().(*bufio.Reader) - buf.Reset(r) - return &bufferedReader{buf} -} - -func (r *bufferedReader) Read(p []byte) (n int, err error) { - if r.buf == nil { - return 0, io.EOF - } - n, err = r.buf.Read(p) - if err == io.EOF { - r.buf.Reset(nil) - bufioReader32KPool.Put(r.buf) - r.buf = nil - } - return -} - -func (r *bufferedReader) Peek(n int) ([]byte, error) { - if r.buf == nil { - return nil, io.EOF - } - return r.buf.Peek(n) -} - -const ( - zstdMagicSkippableStart = 0x184D2A50 - zstdMagicSkippableMask = 0xFFFFFFF0 -) - -var ( - gzipMagic = []byte{0x1F, 0x8B, 0x08} - zstdMagic = []byte{0x28, 0xb5, 0x2f, 0xfd} -) - -type matcher = func([]byte) bool - -func magicNumberMatcher(m []byte) matcher { - return func(source []byte) bool { - return bytes.HasPrefix(source, m) - } -} - -// zstdMatcher detects zstd compression algorithm. -// There are two frame formats defined by Zstandard: Zstandard frames and Skippable frames. -// See https://tools.ietf.org/id/draft-kucherawy-dispatch-zstd-00.html#rfc.section.2 for more details. -func zstdMatcher() matcher { - return func(source []byte) bool { - if bytes.HasPrefix(source, zstdMagic) { - // Zstandard frame - return true - } - // skippable frame - if len(source) < 8 { - return false - } - // magic number from 0x184D2A50 to 0x184D2A5F. - if binary.LittleEndian.Uint32(source[:4])&zstdMagicSkippableMask == zstdMagicSkippableStart { - return true - } - return false - } -} - -// DetectCompression detects the compression algorithm of the source. -func DetectCompression(source []byte) Compression { - for compression, fn := range map[Compression]matcher{ - Gzip: magicNumberMatcher(gzipMagic), - Zstd: zstdMatcher(), - } { - if fn(source) { - return compression - } - } - return Uncompressed -} - -// DecompressStream decompresses the archive and returns a ReaderCloser with the decompressed archive. -func DecompressStream(archive io.Reader) (DecompressReadCloser, error) { - buf := newBufferedReader(archive) - bs, err := buf.Peek(10) - if err != nil && err != io.EOF { - // Note: we'll ignore any io.EOF error because there are some odd - // cases where the layer.tar file will be empty (zero bytes) and - // that results in an io.EOF from the Peek() call. So, in those - // cases we'll just treat it as a non-compressed stream and - // that means just create an empty layer. - // See Issue docker/docker#18170 - return nil, err - } - - switch compression := DetectCompression(bs); compression { - case Uncompressed: - return &readCloserWrapper{ - Reader: buf, - compression: compression, - }, nil - case Gzip: - ctx, cancel := context.WithCancel(context.Background()) - gzReader, err := gzipDecompress(ctx, buf) - if err != nil { - cancel() - return nil, err - } - - return &readCloserWrapper{ - Reader: gzReader, - compression: compression, - closer: func() error { - cancel() - return gzReader.Close() - }, - }, nil - case Zstd: - zstdReader, err := zstd.NewReader(buf) - if err != nil { - return nil, err - } - return &readCloserWrapper{ - Reader: zstdReader, - compression: compression, - closer: func() error { - zstdReader.Close() - return nil - }, - }, nil - - default: - return nil, fmt.Errorf("unsupported compression format %s", (&compression).Extension()) - } -} - -// CompressStream compresses the dest with specified compression algorithm. -func CompressStream(dest io.Writer, compression Compression) (io.WriteCloser, error) { - switch compression { - case Uncompressed: - return &writeCloserWrapper{dest, nil}, nil - case Gzip: - return gzip.NewWriter(dest), nil - case Zstd: - return zstd.NewWriter(dest) - default: - return nil, fmt.Errorf("unsupported compression format %s", (&compression).Extension()) - } -} - -// Extension returns the extension of a file that uses the specified compression algorithm. -func (compression *Compression) Extension() string { - switch *compression { - case Gzip: - return "gz" - case Zstd: - return "zst" - } - return "" -} - -func gzipDecompress(ctx context.Context, buf io.Reader) (io.ReadCloser, error) { - initPigz.Do(func() { - if unpigzPath = detectPigz(); unpigzPath != "" { - log.L.Debug("using pigz for decompression") - } - }) - - if unpigzPath == "" { - return gzip.NewReader(buf) - } - - return cmdStream(exec.CommandContext(ctx, unpigzPath, "-d", "-c"), buf) -} - -func cmdStream(cmd *exec.Cmd, in io.Reader) (io.ReadCloser, error) { - reader, writer := io.Pipe() - - cmd.Stdin = in - cmd.Stdout = writer - - var errBuf bytes.Buffer - cmd.Stderr = &errBuf - - if err := cmd.Start(); err != nil { - return nil, err - } - - go func() { - if err := cmd.Wait(); err != nil { - writer.CloseWithError(fmt.Errorf("%s: %s", err, errBuf.String())) - } else { - writer.Close() - } - }() - - return reader, nil -} - -func detectPigz() string { - path, err := exec.LookPath("unpigz") - if err != nil { - log.L.WithError(err).Debug("unpigz not found, falling back to go gzip") - return "" - } - - // Check if pigz disabled via CONTAINERD_DISABLE_PIGZ env variable - value := os.Getenv(disablePigzEnv) - if value == "" { - return path - } - - disable, err := strconv.ParseBool(value) - if err != nil { - log.L.WithError(err).Warnf("could not parse %s: %s", disablePigzEnv, value) - return path - } - - if disable { - return "" - } - - return path -} diff --git a/src/vendor/github.com/containerd/containerd/content/adaptor.go b/src/vendor/github.com/containerd/containerd/content/adaptor.go deleted file mode 100644 index 88bad2610..000000000 --- a/src/vendor/github.com/containerd/containerd/content/adaptor.go +++ /dev/null @@ -1,52 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package content - -import ( - "strings" - - "github.com/containerd/containerd/filters" -) - -// AdaptInfo returns `filters.Adaptor` that handles `content.Info`. -func AdaptInfo(info Info) filters.Adaptor { - return filters.AdapterFunc(func(fieldpath []string) (string, bool) { - if len(fieldpath) == 0 { - return "", false - } - - switch fieldpath[0] { - case "digest": - return info.Digest.String(), true - case "size": - // TODO: support size based filtering - case "labels": - return checkMap(fieldpath[1:], info.Labels) - } - - return "", false - }) -} - -func checkMap(fieldpath []string, m map[string]string) (string, bool) { - if len(m) == 0 { - return "", false - } - - value, ok := m[strings.Join(fieldpath, ".")] - return value, ok -} diff --git a/src/vendor/github.com/containerd/containerd/content/content.go b/src/vendor/github.com/containerd/containerd/content/content.go deleted file mode 100644 index ff17a8417..000000000 --- a/src/vendor/github.com/containerd/containerd/content/content.go +++ /dev/null @@ -1,182 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package content - -import ( - "context" - "io" - "time" - - "github.com/opencontainers/go-digest" - ocispec "github.com/opencontainers/image-spec/specs-go/v1" -) - -// ReaderAt extends the standard io.ReaderAt interface with reporting of Size and io.Closer -type ReaderAt interface { - io.ReaderAt - io.Closer - Size() int64 -} - -// Provider provides a reader interface for specific content -type Provider interface { - // ReaderAt only requires desc.Digest to be set. - // Other fields in the descriptor may be used internally for resolving - // the location of the actual data. - ReaderAt(ctx context.Context, desc ocispec.Descriptor) (ReaderAt, error) -} - -// Ingester writes content -type Ingester interface { - // Some implementations require WithRef to be included in opts. - Writer(ctx context.Context, opts ...WriterOpt) (Writer, error) -} - -// Info holds content specific information -// -// TODO(stevvooe): Consider a very different name for this struct. Info is way -// to general. It also reads very weird in certain context, like pluralization. -type Info struct { - Digest digest.Digest - Size int64 - CreatedAt time.Time - UpdatedAt time.Time - Labels map[string]string -} - -// Status of a content operation -type Status struct { - Ref string - Offset int64 - Total int64 - Expected digest.Digest - StartedAt time.Time - UpdatedAt time.Time -} - -// WalkFunc defines the callback for a blob walk. -type WalkFunc func(Info) error - -// Manager provides methods for inspecting, listing and removing content. -type Manager interface { - // Info will return metadata about content available in the content store. - // - // If the content is not present, ErrNotFound will be returned. - Info(ctx context.Context, dgst digest.Digest) (Info, error) - - // Update updates mutable information related to content. - // If one or more fieldpaths are provided, only those - // fields will be updated. - // Mutable fields: - // labels.* - Update(ctx context.Context, info Info, fieldpaths ...string) (Info, error) - - // Walk will call fn for each item in the content store which - // match the provided filters. If no filters are given all - // items will be walked. - Walk(ctx context.Context, fn WalkFunc, filters ...string) error - - // Delete removes the content from the store. - Delete(ctx context.Context, dgst digest.Digest) error -} - -// IngestManager provides methods for managing ingests. -type IngestManager interface { - // Status returns the status of the provided ref. - Status(ctx context.Context, ref string) (Status, error) - - // ListStatuses returns the status of any active ingestions whose ref match the - // provided regular expression. If empty, all active ingestions will be - // returned. - ListStatuses(ctx context.Context, filters ...string) ([]Status, error) - - // Abort completely cancels the ingest operation targeted by ref. - Abort(ctx context.Context, ref string) error -} - -// Writer handles the write of content into a content store -type Writer interface { - // Close closes the writer, if the writer has not been - // committed this allows resuming or aborting. - // Calling Close on a closed writer will not error. - io.WriteCloser - - // Digest may return empty digest or panics until committed. - Digest() digest.Digest - - // Commit commits the blob (but no roll-back is guaranteed on an error). - // size and expected can be zero-value when unknown. - // Commit always closes the writer, even on error. - // ErrAlreadyExists aborts the writer. - Commit(ctx context.Context, size int64, expected digest.Digest, opts ...Opt) error - - // Status returns the current state of write - Status() (Status, error) - - // Truncate updates the size of the target blob - Truncate(size int64) error -} - -// Store combines the methods of content-oriented interfaces into a set that -// are commonly provided by complete implementations. -type Store interface { - Manager - Provider - IngestManager - Ingester -} - -// Opt is used to alter the mutable properties of content -type Opt func(*Info) error - -// WithLabels allows labels to be set on content -func WithLabels(labels map[string]string) Opt { - return func(info *Info) error { - info.Labels = labels - return nil - } -} - -// WriterOpts is internally used by WriterOpt. -type WriterOpts struct { - Ref string - Desc ocispec.Descriptor -} - -// WriterOpt is used for passing options to Ingester.Writer. -type WriterOpt func(*WriterOpts) error - -// WithDescriptor specifies an OCI descriptor. -// Writer may optionally use the descriptor internally for resolving -// the location of the actual data. -// Write does not require any field of desc to be set. -// If the data size is unknown, desc.Size should be set to 0. -// Some implementations may also accept negative values as "unknown". -func WithDescriptor(desc ocispec.Descriptor) WriterOpt { - return func(opts *WriterOpts) error { - opts.Desc = desc - return nil - } -} - -// WithRef specifies a ref string. -func WithRef(ref string) WriterOpt { - return func(opts *WriterOpts) error { - opts.Ref = ref - return nil - } -} diff --git a/src/vendor/github.com/containerd/containerd/content/helpers.go b/src/vendor/github.com/containerd/containerd/content/helpers.go deleted file mode 100644 index 3ec1ffce0..000000000 --- a/src/vendor/github.com/containerd/containerd/content/helpers.go +++ /dev/null @@ -1,290 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package content - -import ( - "context" - "errors" - "fmt" - "io" - "math/rand" - "sync" - "time" - - "github.com/containerd/containerd/errdefs" - "github.com/opencontainers/go-digest" - ocispec "github.com/opencontainers/image-spec/specs-go/v1" -) - -var bufPool = sync.Pool{ - New: func() interface{} { - buffer := make([]byte, 1<<20) - return &buffer - }, -} - -// NewReader returns a io.Reader from a ReaderAt -func NewReader(ra ReaderAt) io.Reader { - rd := io.NewSectionReader(ra, 0, ra.Size()) - return rd -} - -// ReadBlob retrieves the entire contents of the blob from the provider. -// -// Avoid using this for large blobs, such as layers. -func ReadBlob(ctx context.Context, provider Provider, desc ocispec.Descriptor) ([]byte, error) { - ra, err := provider.ReaderAt(ctx, desc) - if err != nil { - return nil, err - } - defer ra.Close() - - p := make([]byte, ra.Size()) - - n, err := ra.ReadAt(p, 0) - if err == io.EOF { - if int64(n) != ra.Size() { - err = io.ErrUnexpectedEOF - } else { - err = nil - } - } - return p, err -} - -// WriteBlob writes data with the expected digest into the content store. If -// expected already exists, the method returns immediately and the reader will -// not be consumed. -// -// This is useful when the digest and size are known beforehand. -// -// Copy is buffered, so no need to wrap reader in buffered io. -func WriteBlob(ctx context.Context, cs Ingester, ref string, r io.Reader, desc ocispec.Descriptor, opts ...Opt) error { - cw, err := OpenWriter(ctx, cs, WithRef(ref), WithDescriptor(desc)) - if err != nil { - if !errdefs.IsAlreadyExists(err) { - return fmt.Errorf("failed to open writer: %w", err) - } - - return nil // all ready present - } - defer cw.Close() - - return Copy(ctx, cw, r, desc.Size, desc.Digest, opts...) -} - -// OpenWriter opens a new writer for the given reference, retrying if the writer -// is locked until the reference is available or returns an error. -func OpenWriter(ctx context.Context, cs Ingester, opts ...WriterOpt) (Writer, error) { - var ( - cw Writer - err error - retry = 16 - ) - for { - cw, err = cs.Writer(ctx, opts...) - if err != nil { - if !errdefs.IsUnavailable(err) { - return nil, err - } - - // TODO: Check status to determine if the writer is active, - // continue waiting while active, otherwise return lock - // error or abort. Requires asserting for an ingest manager - - select { - case <-time.After(time.Millisecond * time.Duration(rand.Intn(retry))): - if retry < 2048 { - retry = retry << 1 - } - continue - case <-ctx.Done(): - // Propagate lock error - return nil, err - } - - } - break - } - - return cw, err -} - -// Copy copies data with the expected digest from the reader into the -// provided content store writer. This copy commits the writer. -// -// This is useful when the digest and size are known beforehand. When -// the size or digest is unknown, these values may be empty. -// -// Copy is buffered, so no need to wrap reader in buffered io. -func Copy(ctx context.Context, cw Writer, r io.Reader, size int64, expected digest.Digest, opts ...Opt) error { - ws, err := cw.Status() - if err != nil { - return fmt.Errorf("failed to get status: %w", err) - } - - if ws.Offset > 0 { - r, err = seekReader(r, ws.Offset, size) - if err != nil { - return fmt.Errorf("unable to resume write to %v: %w", ws.Ref, err) - } - } - - copied, err := copyWithBuffer(cw, r) - if err != nil { - return fmt.Errorf("failed to copy: %w", err) - } - if size != 0 && copied < size-ws.Offset { - // Short writes would return its own error, this indicates a read failure - return fmt.Errorf("failed to read expected number of bytes: %w", io.ErrUnexpectedEOF) - } - - if err := cw.Commit(ctx, size, expected, opts...); err != nil { - if !errdefs.IsAlreadyExists(err) { - return fmt.Errorf("failed commit on ref %q: %w", ws.Ref, err) - } - } - - return nil -} - -// CopyReaderAt copies to a writer from a given reader at for the given -// number of bytes. This copy does not commit the writer. -func CopyReaderAt(cw Writer, ra ReaderAt, n int64) error { - ws, err := cw.Status() - if err != nil { - return err - } - - copied, err := copyWithBuffer(cw, io.NewSectionReader(ra, ws.Offset, n)) - if err != nil { - return fmt.Errorf("failed to copy: %w", err) - } - if copied < n { - // Short writes would return its own error, this indicates a read failure - return fmt.Errorf("failed to read expected number of bytes: %w", io.ErrUnexpectedEOF) - } - return nil -} - -// CopyReader copies to a writer from a given reader, returning -// the number of bytes copied. -// Note: if the writer has a non-zero offset, the total number -// of bytes read may be greater than those copied if the reader -// is not an io.Seeker. -// This copy does not commit the writer. -func CopyReader(cw Writer, r io.Reader) (int64, error) { - ws, err := cw.Status() - if err != nil { - return 0, fmt.Errorf("failed to get status: %w", err) - } - - if ws.Offset > 0 { - r, err = seekReader(r, ws.Offset, 0) - if err != nil { - return 0, fmt.Errorf("unable to resume write to %v: %w", ws.Ref, err) - } - } - - return copyWithBuffer(cw, r) -} - -// seekReader attempts to seek the reader to the given offset, either by -// resolving `io.Seeker`, by detecting `io.ReaderAt`, or discarding -// up to the given offset. -func seekReader(r io.Reader, offset, size int64) (io.Reader, error) { - // attempt to resolve r as a seeker and setup the offset. - seeker, ok := r.(io.Seeker) - if ok { - nn, err := seeker.Seek(offset, io.SeekStart) - if nn != offset { - if err == nil { - err = fmt.Errorf("unexpected seek location without seek error") - } - return nil, fmt.Errorf("failed to seek to offset %v: %w", offset, err) - } - - if err != nil { - return nil, err - } - - return r, nil - } - - // ok, let's try io.ReaderAt! - readerAt, ok := r.(io.ReaderAt) - if ok && size > offset { - sr := io.NewSectionReader(readerAt, offset, size) - return sr, nil - } - - // well then, let's just discard up to the offset - n, err := copyWithBuffer(io.Discard, io.LimitReader(r, offset)) - if err != nil { - return nil, fmt.Errorf("failed to discard to offset: %w", err) - } - if n != offset { - return nil, errors.New("unable to discard to offset") - } - - return r, nil -} - -// copyWithBuffer is very similar to io.CopyBuffer https://golang.org/pkg/io/#CopyBuffer -// but instead of using Read to read from the src, we use ReadAtLeast to make sure we have -// a full buffer before we do a write operation to dst to reduce overheads associated -// with the write operations of small buffers. -func copyWithBuffer(dst io.Writer, src io.Reader) (written int64, err error) { - // If the reader has a WriteTo method, use it to do the copy. - // Avoids an allocation and a copy. - if wt, ok := src.(io.WriterTo); ok { - return wt.WriteTo(dst) - } - // Similarly, if the writer has a ReadFrom method, use it to do the copy. - if rt, ok := dst.(io.ReaderFrom); ok { - return rt.ReadFrom(src) - } - bufRef := bufPool.Get().(*[]byte) - defer bufPool.Put(bufRef) - buf := *bufRef - for { - nr, er := io.ReadAtLeast(src, buf, len(buf)) - if nr > 0 { - nw, ew := dst.Write(buf[0:nr]) - if nw > 0 { - written += int64(nw) - } - if ew != nil { - err = ew - break - } - if nr != nw { - err = io.ErrShortWrite - break - } - } - if er != nil { - // If an EOF happens after reading fewer than the requested bytes, - // ReadAtLeast returns ErrUnexpectedEOF. - if er != io.EOF && er != io.ErrUnexpectedEOF { - err = er - } - break - } - } - return -} diff --git a/src/vendor/github.com/containerd/containerd/content/local/locks.go b/src/vendor/github.com/containerd/containerd/content/local/locks.go deleted file mode 100644 index 1e59f39b3..000000000 --- a/src/vendor/github.com/containerd/containerd/content/local/locks.go +++ /dev/null @@ -1,62 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package local - -import ( - "fmt" - "sync" - "time" - - "github.com/containerd/containerd/errdefs" -) - -// Handles locking references - -type lock struct { - since time.Time -} - -var ( - // locks lets us lock in process - locks = make(map[string]*lock) - locksMu sync.Mutex -) - -func tryLock(ref string) error { - locksMu.Lock() - defer locksMu.Unlock() - - if v, ok := locks[ref]; ok { - // Returning the duration may help developers distinguish dead locks (long duration) from - // lock contentions (short duration). - now := time.Now() - return fmt.Errorf( - "ref %s locked for %s (since %s): %w", ref, now.Sub(v.since), v.since, - errdefs.ErrUnavailable, - ) - } - - locks[ref] = &lock{time.Now()} - return nil -} - -func unlock(ref string) { - locksMu.Lock() - defer locksMu.Unlock() - - delete(locks, ref) -} diff --git a/src/vendor/github.com/containerd/containerd/content/local/readerat.go b/src/vendor/github.com/containerd/containerd/content/local/readerat.go deleted file mode 100644 index a83c171bb..000000000 --- a/src/vendor/github.com/containerd/containerd/content/local/readerat.go +++ /dev/null @@ -1,67 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package local - -import ( - "fmt" - "os" - - "github.com/containerd/containerd/content" - "github.com/containerd/containerd/errdefs" -) - -// readerat implements io.ReaderAt in a completely stateless manner by opening -// the referenced file for each call to ReadAt. -type sizeReaderAt struct { - size int64 - fp *os.File -} - -// OpenReader creates ReaderAt from a file -func OpenReader(p string) (content.ReaderAt, error) { - fi, err := os.Stat(p) - if err != nil { - if !os.IsNotExist(err) { - return nil, err - } - - return nil, fmt.Errorf("blob not found: %w", errdefs.ErrNotFound) - } - - fp, err := os.Open(p) - if err != nil { - if !os.IsNotExist(err) { - return nil, err - } - - return nil, fmt.Errorf("blob not found: %w", errdefs.ErrNotFound) - } - - return sizeReaderAt{size: fi.Size(), fp: fp}, nil -} - -func (ra sizeReaderAt) ReadAt(p []byte, offset int64) (int, error) { - return ra.fp.ReadAt(p, offset) -} - -func (ra sizeReaderAt) Size() int64 { - return ra.size -} - -func (ra sizeReaderAt) Close() error { - return ra.fp.Close() -} diff --git a/src/vendor/github.com/containerd/containerd/content/local/store.go b/src/vendor/github.com/containerd/containerd/content/local/store.go deleted file mode 100644 index 457bbcd0e..000000000 --- a/src/vendor/github.com/containerd/containerd/content/local/store.go +++ /dev/null @@ -1,704 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package local - -import ( - "context" - "fmt" - "io" - "math/rand" - "os" - "path/filepath" - "strconv" - "strings" - "sync" - "time" - - "github.com/containerd/containerd/content" - "github.com/containerd/containerd/errdefs" - "github.com/containerd/containerd/filters" - "github.com/containerd/containerd/log" - "github.com/sirupsen/logrus" - - digest "github.com/opencontainers/go-digest" - ocispec "github.com/opencontainers/image-spec/specs-go/v1" -) - -var bufPool = sync.Pool{ - New: func() interface{} { - buffer := make([]byte, 1<<20) - return &buffer - }, -} - -// LabelStore is used to store mutable labels for digests -type LabelStore interface { - // Get returns all the labels for the given digest - Get(digest.Digest) (map[string]string, error) - - // Set sets all the labels for a given digest - Set(digest.Digest, map[string]string) error - - // Update replaces the given labels for a digest, - // a key with an empty value removes a label. - Update(digest.Digest, map[string]string) (map[string]string, error) -} - -// Store is digest-keyed store for content. All data written into the store is -// stored under a verifiable digest. -// -// Store can generally support multi-reader, single-writer ingest of data, -// including resumable ingest. -type store struct { - root string - ls LabelStore -} - -// NewStore returns a local content store -func NewStore(root string) (content.Store, error) { - return NewLabeledStore(root, nil) -} - -// NewLabeledStore returns a new content store using the provided label store -// -// Note: content stores which are used underneath a metadata store may not -// require labels and should use `NewStore`. `NewLabeledStore` is primarily -// useful for tests or standalone implementations. -func NewLabeledStore(root string, ls LabelStore) (content.Store, error) { - if err := os.MkdirAll(filepath.Join(root, "ingest"), 0777); err != nil { - return nil, err - } - - return &store{ - root: root, - ls: ls, - }, nil -} - -func (s *store) Info(ctx context.Context, dgst digest.Digest) (content.Info, error) { - p, err := s.blobPath(dgst) - if err != nil { - return content.Info{}, fmt.Errorf("calculating blob info path: %w", err) - } - - fi, err := os.Stat(p) - if err != nil { - if os.IsNotExist(err) { - err = fmt.Errorf("content %v: %w", dgst, errdefs.ErrNotFound) - } - - return content.Info{}, err - } - var labels map[string]string - if s.ls != nil { - labels, err = s.ls.Get(dgst) - if err != nil { - return content.Info{}, err - } - } - return s.info(dgst, fi, labels), nil -} - -func (s *store) info(dgst digest.Digest, fi os.FileInfo, labels map[string]string) content.Info { - return content.Info{ - Digest: dgst, - Size: fi.Size(), - CreatedAt: fi.ModTime(), - UpdatedAt: getATime(fi), - Labels: labels, - } -} - -// ReaderAt returns an io.ReaderAt for the blob. -func (s *store) ReaderAt(ctx context.Context, desc ocispec.Descriptor) (content.ReaderAt, error) { - p, err := s.blobPath(desc.Digest) - if err != nil { - return nil, fmt.Errorf("calculating blob path for ReaderAt: %w", err) - } - - reader, err := OpenReader(p) - if err != nil { - return nil, fmt.Errorf("blob %s expected at %s: %w", desc.Digest, p, err) - } - - return reader, nil -} - -// Delete removes a blob by its digest. -// -// While this is safe to do concurrently, safe exist-removal logic must hold -// some global lock on the store. -func (s *store) Delete(ctx context.Context, dgst digest.Digest) error { - bp, err := s.blobPath(dgst) - if err != nil { - return fmt.Errorf("calculating blob path for delete: %w", err) - } - - if err := os.RemoveAll(bp); err != nil { - if !os.IsNotExist(err) { - return err - } - - return fmt.Errorf("content %v: %w", dgst, errdefs.ErrNotFound) - } - - return nil -} - -func (s *store) Update(ctx context.Context, info content.Info, fieldpaths ...string) (content.Info, error) { - if s.ls == nil { - return content.Info{}, fmt.Errorf("update not supported on immutable content store: %w", errdefs.ErrFailedPrecondition) - } - - p, err := s.blobPath(info.Digest) - if err != nil { - return content.Info{}, fmt.Errorf("calculating blob path for update: %w", err) - } - - fi, err := os.Stat(p) - if err != nil { - if os.IsNotExist(err) { - err = fmt.Errorf("content %v: %w", info.Digest, errdefs.ErrNotFound) - } - - return content.Info{}, err - } - - var ( - all bool - labels map[string]string - ) - if len(fieldpaths) > 0 { - for _, path := range fieldpaths { - if strings.HasPrefix(path, "labels.") { - if labels == nil { - labels = map[string]string{} - } - - key := strings.TrimPrefix(path, "labels.") - labels[key] = info.Labels[key] - continue - } - - switch path { - case "labels": - all = true - labels = info.Labels - default: - return content.Info{}, fmt.Errorf("cannot update %q field on content info %q: %w", path, info.Digest, errdefs.ErrInvalidArgument) - } - } - } else { - all = true - labels = info.Labels - } - - if all { - err = s.ls.Set(info.Digest, labels) - } else { - labels, err = s.ls.Update(info.Digest, labels) - } - if err != nil { - return content.Info{}, err - } - - info = s.info(info.Digest, fi, labels) - info.UpdatedAt = time.Now() - - if err := os.Chtimes(p, info.UpdatedAt, info.CreatedAt); err != nil { - log.G(ctx).WithError(err).Warnf("could not change access time for %s", info.Digest) - } - - return info, nil -} - -func (s *store) Walk(ctx context.Context, fn content.WalkFunc, fs ...string) error { - root := filepath.Join(s.root, "blobs") - - filter, err := filters.ParseAll(fs...) - if err != nil { - return err - } - - var alg digest.Algorithm - return filepath.Walk(root, func(path string, fi os.FileInfo, err error) error { - if err != nil { - return err - } - if !fi.IsDir() && !alg.Available() { - return nil - } - - // TODO(stevvooe): There are few more cases with subdirs that should be - // handled in case the layout gets corrupted. This isn't strict enough - // and may spew bad data. - - if path == root { - return nil - } - if filepath.Dir(path) == root { - alg = digest.Algorithm(filepath.Base(path)) - - if !alg.Available() { - alg = "" - return filepath.SkipDir - } - - // descending into a hash directory - return nil - } - - dgst := digest.NewDigestFromHex(alg.String(), filepath.Base(path)) - if err := dgst.Validate(); err != nil { - // log error but don't report - log.L.WithError(err).WithField("path", path).Error("invalid digest for blob path") - // if we see this, it could mean some sort of corruption of the - // store or extra paths not expected previously. - } - - var labels map[string]string - if s.ls != nil { - labels, err = s.ls.Get(dgst) - if err != nil { - return err - } - } - - info := s.info(dgst, fi, labels) - if !filter.Match(content.AdaptInfo(info)) { - return nil - } - return fn(info) - }) -} - -func (s *store) Status(ctx context.Context, ref string) (content.Status, error) { - return s.status(s.ingestRoot(ref)) -} - -func (s *store) ListStatuses(ctx context.Context, fs ...string) ([]content.Status, error) { - fp, err := os.Open(filepath.Join(s.root, "ingest")) - if err != nil { - return nil, err - } - - defer fp.Close() - - fis, err := fp.Readdir(-1) - if err != nil { - return nil, err - } - - filter, err := filters.ParseAll(fs...) - if err != nil { - return nil, err - } - - var active []content.Status - for _, fi := range fis { - p := filepath.Join(s.root, "ingest", fi.Name()) - stat, err := s.status(p) - if err != nil { - if !os.IsNotExist(err) { - return nil, err - } - - // TODO(stevvooe): This is a common error if uploads are being - // completed while making this listing. Need to consider taking a - // lock on the whole store to coordinate this aspect. - // - // Another option is to cleanup downloads asynchronously and - // coordinate this method with the cleanup process. - // - // For now, we just skip them, as they really don't exist. - continue - } - - if filter.Match(adaptStatus(stat)) { - active = append(active, stat) - } - } - - return active, nil -} - -// WalkStatusRefs is used to walk all status references -// Failed status reads will be logged and ignored, if -// this function is called while references are being altered, -// these error messages may be produced. -func (s *store) WalkStatusRefs(ctx context.Context, fn func(string) error) error { - fp, err := os.Open(filepath.Join(s.root, "ingest")) - if err != nil { - return err - } - - defer fp.Close() - - fis, err := fp.Readdir(-1) - if err != nil { - return err - } - - for _, fi := range fis { - rf := filepath.Join(s.root, "ingest", fi.Name(), "ref") - - ref, err := readFileString(rf) - if err != nil { - log.G(ctx).WithError(err).WithField("path", rf).Error("failed to read ingest ref") - continue - } - - if err := fn(ref); err != nil { - return err - } - } - - return nil -} - -// status works like stat above except uses the path to the ingest. -func (s *store) status(ingestPath string) (content.Status, error) { - dp := filepath.Join(ingestPath, "data") - fi, err := os.Stat(dp) - if err != nil { - if os.IsNotExist(err) { - err = fmt.Errorf("%s: %w", err.Error(), errdefs.ErrNotFound) - } - return content.Status{}, err - } - - ref, err := readFileString(filepath.Join(ingestPath, "ref")) - if err != nil { - if os.IsNotExist(err) { - err = fmt.Errorf("%s: %w", err.Error(), errdefs.ErrNotFound) - } - return content.Status{}, err - } - - startedAt, err := readFileTimestamp(filepath.Join(ingestPath, "startedat")) - if err != nil { - return content.Status{}, fmt.Errorf("could not read startedat: %w", err) - } - - updatedAt, err := readFileTimestamp(filepath.Join(ingestPath, "updatedat")) - if err != nil { - return content.Status{}, fmt.Errorf("could not read updatedat: %w", err) - } - - // because we don't write updatedat on every write, the mod time may - // actually be more up to date. - if fi.ModTime().After(updatedAt) { - updatedAt = fi.ModTime() - } - - return content.Status{ - Ref: ref, - Offset: fi.Size(), - Total: s.total(ingestPath), - UpdatedAt: updatedAt, - StartedAt: startedAt, - }, nil -} - -func adaptStatus(status content.Status) filters.Adaptor { - return filters.AdapterFunc(func(fieldpath []string) (string, bool) { - if len(fieldpath) == 0 { - return "", false - } - switch fieldpath[0] { - case "ref": - return status.Ref, true - } - - return "", false - }) -} - -// total attempts to resolve the total expected size for the write. -func (s *store) total(ingestPath string) int64 { - totalS, err := readFileString(filepath.Join(ingestPath, "total")) - if err != nil { - return 0 - } - - total, err := strconv.ParseInt(totalS, 10, 64) - if err != nil { - // represents a corrupted file, should probably remove. - return 0 - } - - return total -} - -// Writer begins or resumes the active writer identified by ref. If the writer -// is already in use, an error is returned. Only one writer may be in use per -// ref at a time. -// -// The argument `ref` is used to uniquely identify a long-lived writer transaction. -func (s *store) Writer(ctx context.Context, opts ...content.WriterOpt) (content.Writer, error) { - var wOpts content.WriterOpts - for _, opt := range opts { - if err := opt(&wOpts); err != nil { - return nil, err - } - } - // TODO(AkihiroSuda): we could create a random string or one calculated based on the context - // https://github.com/containerd/containerd/issues/2129#issuecomment-380255019 - if wOpts.Ref == "" { - return nil, fmt.Errorf("ref must not be empty: %w", errdefs.ErrInvalidArgument) - } - var lockErr error - for count := uint64(0); count < 10; count++ { - if err := tryLock(wOpts.Ref); err != nil { - if !errdefs.IsUnavailable(err) { - return nil, err - } - - lockErr = err - } else { - lockErr = nil - break - } - time.Sleep(time.Millisecond * time.Duration(rand.Intn(1< 0 && status.Total > 0 && total != status.Total { - return status, fmt.Errorf("provided total differs from status: %v != %v", total, status.Total) - } - - // TODO(stevvooe): slow slow slow!!, send to goroutine or use resumable hashes - fp, err := os.Open(data) - if err != nil { - return status, err - } - - p := bufPool.Get().(*[]byte) - status.Offset, err = io.CopyBuffer(digester.Hash(), fp, *p) - bufPool.Put(p) - fp.Close() - return status, err -} - -// writer provides the main implementation of the Writer method. The caller -// must hold the lock correctly and release on error if there is a problem. -func (s *store) writer(ctx context.Context, ref string, total int64, expected digest.Digest) (content.Writer, error) { - // TODO(stevvooe): Need to actually store expected here. We have - // code in the service that shouldn't be dealing with this. - if expected != "" { - p, err := s.blobPath(expected) - if err != nil { - return nil, fmt.Errorf("calculating expected blob path for writer: %w", err) - } - if _, err := os.Stat(p); err == nil { - return nil, fmt.Errorf("content %v: %w", expected, errdefs.ErrAlreadyExists) - } - } - - path, refp, data := s.ingestPaths(ref) - - var ( - digester = digest.Canonical.Digester() - offset int64 - startedAt time.Time - updatedAt time.Time - ) - - foundValidIngest := false - // ensure that the ingest path has been created. - if err := os.Mkdir(path, 0755); err != nil { - if !os.IsExist(err) { - return nil, err - } - status, err := s.resumeStatus(ref, total, digester) - if err == nil { - foundValidIngest = true - updatedAt = status.UpdatedAt - startedAt = status.StartedAt - total = status.Total - offset = status.Offset - } else { - logrus.Infof("failed to resume the status from path %s: %s. will recreate them", path, err.Error()) - } - } - - if !foundValidIngest { - startedAt = time.Now() - updatedAt = startedAt - - // the ingest is new, we need to setup the target location. - // write the ref to a file for later use - if err := os.WriteFile(refp, []byte(ref), 0666); err != nil { - return nil, err - } - - if err := writeTimestampFile(filepath.Join(path, "startedat"), startedAt); err != nil { - return nil, err - } - - if err := writeTimestampFile(filepath.Join(path, "updatedat"), startedAt); err != nil { - return nil, err - } - - if total > 0 { - if err := os.WriteFile(filepath.Join(path, "total"), []byte(fmt.Sprint(total)), 0666); err != nil { - return nil, err - } - } - } - - fp, err := os.OpenFile(data, os.O_WRONLY|os.O_CREATE, 0666) - if err != nil { - return nil, fmt.Errorf("failed to open data file: %w", err) - } - - if _, err := fp.Seek(offset, io.SeekStart); err != nil { - fp.Close() - return nil, fmt.Errorf("could not seek to current write offset: %w", err) - } - - return &writer{ - s: s, - fp: fp, - ref: ref, - path: path, - offset: offset, - total: total, - digester: digester, - startedAt: startedAt, - updatedAt: updatedAt, - }, nil -} - -// Abort an active transaction keyed by ref. If the ingest is active, it will -// be cancelled. Any resources associated with the ingest will be cleaned. -func (s *store) Abort(ctx context.Context, ref string) error { - root := s.ingestRoot(ref) - if err := os.RemoveAll(root); err != nil { - if os.IsNotExist(err) { - return fmt.Errorf("ingest ref %q: %w", ref, errdefs.ErrNotFound) - } - - return err - } - - return nil -} - -func (s *store) blobPath(dgst digest.Digest) (string, error) { - if err := dgst.Validate(); err != nil { - return "", fmt.Errorf("cannot calculate blob path from invalid digest: %v: %w", err, errdefs.ErrInvalidArgument) - } - - return filepath.Join(s.root, "blobs", dgst.Algorithm().String(), dgst.Hex()), nil -} - -func (s *store) ingestRoot(ref string) string { - // we take a digest of the ref to keep the ingest paths constant length. - // Note that this is not the current or potential digest of incoming content. - dgst := digest.FromString(ref) - return filepath.Join(s.root, "ingest", dgst.Hex()) -} - -// ingestPaths are returned. The paths are the following: -// -// - root: entire ingest directory -// - ref: name of the starting ref, must be unique -// - data: file where data is written -// -func (s *store) ingestPaths(ref string) (string, string, string) { - var ( - fp = s.ingestRoot(ref) - rp = filepath.Join(fp, "ref") - dp = filepath.Join(fp, "data") - ) - - return fp, rp, dp -} - -func readFileString(path string) (string, error) { - p, err := os.ReadFile(path) - return string(p), err -} - -// readFileTimestamp reads a file with just a timestamp present. -func readFileTimestamp(p string) (time.Time, error) { - b, err := os.ReadFile(p) - if err != nil { - if os.IsNotExist(err) { - err = fmt.Errorf("%s: %w", err.Error(), errdefs.ErrNotFound) - } - return time.Time{}, err - } - - var t time.Time - if err := t.UnmarshalText(b); err != nil { - return time.Time{}, fmt.Errorf("could not parse timestamp file %v: %w", p, err) - } - - return t, nil -} - -func writeTimestampFile(p string, t time.Time) error { - b, err := t.MarshalText() - if err != nil { - return err - } - return writeToCompletion(p, b, 0666) -} - -func writeToCompletion(path string, data []byte, mode os.FileMode) error { - tmp := fmt.Sprintf("%s.tmp", path) - f, err := os.OpenFile(tmp, os.O_RDWR|os.O_CREATE|os.O_TRUNC|os.O_SYNC, mode) - if err != nil { - return fmt.Errorf("create tmp file: %w", err) - } - _, err = f.Write(data) - f.Close() - if err != nil { - return fmt.Errorf("write tmp file: %w", err) - } - err = os.Rename(tmp, path) - if err != nil { - return fmt.Errorf("rename tmp file: %w", err) - } - return nil -} diff --git a/src/vendor/github.com/containerd/containerd/content/local/store_bsd.go b/src/vendor/github.com/containerd/containerd/content/local/store_bsd.go deleted file mode 100644 index 42fddd341..000000000 --- a/src/vendor/github.com/containerd/containerd/content/local/store_bsd.go +++ /dev/null @@ -1,34 +0,0 @@ -//go:build darwin || freebsd || netbsd -// +build darwin freebsd netbsd - -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package local - -import ( - "os" - "syscall" - "time" -) - -func getATime(fi os.FileInfo) time.Time { - if st, ok := fi.Sys().(*syscall.Stat_t); ok { - return time.Unix(st.Atimespec.Unix()) - } - - return fi.ModTime() -} diff --git a/src/vendor/github.com/containerd/containerd/content/local/store_openbsd.go b/src/vendor/github.com/containerd/containerd/content/local/store_openbsd.go deleted file mode 100644 index 2b58b617b..000000000 --- a/src/vendor/github.com/containerd/containerd/content/local/store_openbsd.go +++ /dev/null @@ -1,34 +0,0 @@ -//go:build openbsd -// +build openbsd - -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package local - -import ( - "os" - "syscall" - "time" -) - -func getATime(fi os.FileInfo) time.Time { - if st, ok := fi.Sys().(*syscall.Stat_t); ok { - return time.Unix(st.Atim.Unix()) - } - - return fi.ModTime() -} diff --git a/src/vendor/github.com/containerd/containerd/content/local/store_unix.go b/src/vendor/github.com/containerd/containerd/content/local/store_unix.go deleted file mode 100644 index efa2eb943..000000000 --- a/src/vendor/github.com/containerd/containerd/content/local/store_unix.go +++ /dev/null @@ -1,34 +0,0 @@ -//go:build linux || solaris -// +build linux solaris - -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package local - -import ( - "os" - "syscall" - "time" -) - -func getATime(fi os.FileInfo) time.Time { - if st, ok := fi.Sys().(*syscall.Stat_t); ok { - return time.Unix(st.Atim.Unix()) - } - - return fi.ModTime() -} diff --git a/src/vendor/github.com/containerd/containerd/content/local/store_windows.go b/src/vendor/github.com/containerd/containerd/content/local/store_windows.go deleted file mode 100644 index bce849979..000000000 --- a/src/vendor/github.com/containerd/containerd/content/local/store_windows.go +++ /dev/null @@ -1,26 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package local - -import ( - "os" - "time" -) - -func getATime(fi os.FileInfo) time.Time { - return fi.ModTime() -} diff --git a/src/vendor/github.com/containerd/containerd/content/local/writer.go b/src/vendor/github.com/containerd/containerd/content/local/writer.go deleted file mode 100644 index b187e524c..000000000 --- a/src/vendor/github.com/containerd/containerd/content/local/writer.go +++ /dev/null @@ -1,208 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package local - -import ( - "context" - "errors" - "fmt" - "io" - "os" - "path/filepath" - "runtime" - "time" - - "github.com/containerd/containerd/content" - "github.com/containerd/containerd/errdefs" - "github.com/containerd/containerd/log" - "github.com/opencontainers/go-digest" -) - -// writer represents a write transaction against the blob store. -type writer struct { - s *store - fp *os.File // opened data file - path string // path to writer dir - ref string // ref key - offset int64 - total int64 - digester digest.Digester - startedAt time.Time - updatedAt time.Time -} - -func (w *writer) Status() (content.Status, error) { - return content.Status{ - Ref: w.ref, - Offset: w.offset, - Total: w.total, - StartedAt: w.startedAt, - UpdatedAt: w.updatedAt, - }, nil -} - -// Digest returns the current digest of the content, up to the current write. -// -// Cannot be called concurrently with `Write`. -func (w *writer) Digest() digest.Digest { - return w.digester.Digest() -} - -// Write p to the transaction. -// -// Note that writes are unbuffered to the backing file. When writing, it is -// recommended to wrap in a bufio.Writer or, preferably, use io.CopyBuffer. -func (w *writer) Write(p []byte) (n int, err error) { - n, err = w.fp.Write(p) - w.digester.Hash().Write(p[:n]) - w.offset += int64(len(p)) - w.updatedAt = time.Now() - return n, err -} - -func (w *writer) Commit(ctx context.Context, size int64, expected digest.Digest, opts ...content.Opt) error { - // Ensure even on error the writer is fully closed - defer unlock(w.ref) - - var base content.Info - for _, opt := range opts { - if err := opt(&base); err != nil { - return err - } - } - - fp := w.fp - w.fp = nil - - if fp == nil { - return fmt.Errorf("cannot commit on closed writer: %w", errdefs.ErrFailedPrecondition) - } - - if err := fp.Sync(); err != nil { - fp.Close() - return fmt.Errorf("sync failed: %w", err) - } - - fi, err := fp.Stat() - closeErr := fp.Close() - if err != nil { - return fmt.Errorf("stat on ingest file failed: %w", err) - } - if closeErr != nil { - return fmt.Errorf("failed to close ingest file: %w", closeErr) - } - - if size > 0 && size != fi.Size() { - return fmt.Errorf("unexpected commit size %d, expected %d: %w", fi.Size(), size, errdefs.ErrFailedPrecondition) - } - - dgst := w.digester.Digest() - if expected != "" && expected != dgst { - return fmt.Errorf("unexpected commit digest %s, expected %s: %w", dgst, expected, errdefs.ErrFailedPrecondition) - } - - var ( - ingest = filepath.Join(w.path, "data") - target, _ = w.s.blobPath(dgst) // ignore error because we calculated this dgst - ) - - // make sure parent directories of blob exist - if err := os.MkdirAll(filepath.Dir(target), 0755); err != nil { - return err - } - - if _, err := os.Stat(target); err == nil { - // collision with the target file! - if err := os.RemoveAll(w.path); err != nil { - log.G(ctx).WithField("ref", w.ref).WithField("path", w.path).Error("failed to remove ingest directory") - } - return fmt.Errorf("content %v: %w", dgst, errdefs.ErrAlreadyExists) - } - - if err := os.Rename(ingest, target); err != nil { - return err - } - - // Ingest has now been made available in the content store, attempt to complete - // setting metadata but errors should only be logged and not returned since - // the content store cannot be cleanly rolled back. - - commitTime := time.Now() - if err := os.Chtimes(target, commitTime, commitTime); err != nil { - log.G(ctx).WithField("digest", dgst).Error("failed to change file time to commit time") - } - - // clean up!! - if err := os.RemoveAll(w.path); err != nil { - log.G(ctx).WithField("ref", w.ref).WithField("path", w.path).Error("failed to remove ingest directory") - } - - if w.s.ls != nil && base.Labels != nil { - if err := w.s.ls.Set(dgst, base.Labels); err != nil { - log.G(ctx).WithField("digest", dgst).Error("failed to set labels") - } - } - - // change to readonly, more important for read, but provides _some_ - // protection from this point on. We use the existing perms with a mask - // only allowing reads honoring the umask on creation. - // - // This removes write and exec, only allowing read per the creation umask. - // - // NOTE: Windows does not support this operation - if runtime.GOOS != "windows" { - if err := os.Chmod(target, (fi.Mode()&os.ModePerm)&^0333); err != nil { - log.G(ctx).WithField("ref", w.ref).Error("failed to make readonly") - } - } - - return nil -} - -// Close the writer, flushing any unwritten data and leaving the progress in -// tact. -// -// If one needs to resume the transaction, a new writer can be obtained from -// `Ingester.Writer` using the same key. The write can then be continued -// from it was left off. -// -// To abandon a transaction completely, first call close then `IngestManager.Abort` to -// clean up the associated resources. -func (w *writer) Close() (err error) { - if w.fp != nil { - w.fp.Sync() - err = w.fp.Close() - writeTimestampFile(filepath.Join(w.path, "updatedat"), w.updatedAt) - w.fp = nil - unlock(w.ref) - return - } - - return nil -} - -func (w *writer) Truncate(size int64) error { - if size != 0 { - return errors.New("Truncate: unsupported size") - } - w.offset = 0 - w.digester.Hash().Reset() - if _, err := w.fp.Seek(0, io.SeekStart); err != nil { - return err - } - return w.fp.Truncate(0) -} diff --git a/src/vendor/github.com/containerd/containerd/errdefs/errors.go b/src/vendor/github.com/containerd/containerd/errdefs/errors.go deleted file mode 100644 index 876225597..000000000 --- a/src/vendor/github.com/containerd/containerd/errdefs/errors.go +++ /dev/null @@ -1,92 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -// Package errdefs defines the common errors used throughout containerd -// packages. -// -// Use with fmt.Errorf to add context to an error. -// -// To detect an error class, use the IsXXX functions to tell whether an error -// is of a certain type. -// -// The functions ToGRPC and FromGRPC can be used to map server-side and -// client-side errors to the correct types. -package errdefs - -import ( - "context" - "errors" -) - -// Definitions of common error types used throughout containerd. All containerd -// errors returned by most packages will map into one of these errors classes. -// Packages should return errors of these types when they want to instruct a -// client to take a particular action. -// -// For the most part, we just try to provide local grpc errors. Most conditions -// map very well to those defined by grpc. -var ( - ErrUnknown = errors.New("unknown") // used internally to represent a missed mapping. - ErrInvalidArgument = errors.New("invalid argument") - ErrNotFound = errors.New("not found") - ErrAlreadyExists = errors.New("already exists") - ErrFailedPrecondition = errors.New("failed precondition") - ErrUnavailable = errors.New("unavailable") - ErrNotImplemented = errors.New("not implemented") // represents not supported and unimplemented -) - -// IsInvalidArgument returns true if the error is due to an invalid argument -func IsInvalidArgument(err error) bool { - return errors.Is(err, ErrInvalidArgument) -} - -// IsNotFound returns true if the error is due to a missing object -func IsNotFound(err error) bool { - return errors.Is(err, ErrNotFound) -} - -// IsAlreadyExists returns true if the error is due to an already existing -// metadata item -func IsAlreadyExists(err error) bool { - return errors.Is(err, ErrAlreadyExists) -} - -// IsFailedPrecondition returns true if an operation could not proceed to the -// lack of a particular condition -func IsFailedPrecondition(err error) bool { - return errors.Is(err, ErrFailedPrecondition) -} - -// IsUnavailable returns true if the error is due to a resource being unavailable -func IsUnavailable(err error) bool { - return errors.Is(err, ErrUnavailable) -} - -// IsNotImplemented returns true if the error is due to not being implemented -func IsNotImplemented(err error) bool { - return errors.Is(err, ErrNotImplemented) -} - -// IsCanceled returns true if the error is due to `context.Canceled`. -func IsCanceled(err error) bool { - return errors.Is(err, context.Canceled) -} - -// IsDeadlineExceeded returns true if the error is due to -// `context.DeadlineExceeded`. -func IsDeadlineExceeded(err error) bool { - return errors.Is(err, context.DeadlineExceeded) -} diff --git a/src/vendor/github.com/containerd/containerd/errdefs/grpc.go b/src/vendor/github.com/containerd/containerd/errdefs/grpc.go deleted file mode 100644 index 7a9b33e05..000000000 --- a/src/vendor/github.com/containerd/containerd/errdefs/grpc.go +++ /dev/null @@ -1,147 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package errdefs - -import ( - "context" - "fmt" - "strings" - - "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" -) - -// ToGRPC will attempt to map the backend containerd error into a grpc error, -// using the original error message as a description. -// -// Further information may be extracted from certain errors depending on their -// type. -// -// If the error is unmapped, the original error will be returned to be handled -// by the regular grpc error handling stack. -func ToGRPC(err error) error { - if err == nil { - return nil - } - - if isGRPCError(err) { - // error has already been mapped to grpc - return err - } - - switch { - case IsInvalidArgument(err): - return status.Errorf(codes.InvalidArgument, err.Error()) - case IsNotFound(err): - return status.Errorf(codes.NotFound, err.Error()) - case IsAlreadyExists(err): - return status.Errorf(codes.AlreadyExists, err.Error()) - case IsFailedPrecondition(err): - return status.Errorf(codes.FailedPrecondition, err.Error()) - case IsUnavailable(err): - return status.Errorf(codes.Unavailable, err.Error()) - case IsNotImplemented(err): - return status.Errorf(codes.Unimplemented, err.Error()) - case IsCanceled(err): - return status.Errorf(codes.Canceled, err.Error()) - case IsDeadlineExceeded(err): - return status.Errorf(codes.DeadlineExceeded, err.Error()) - } - - return err -} - -// ToGRPCf maps the error to grpc error codes, assembling the formatting string -// and combining it with the target error string. -// -// This is equivalent to errdefs.ToGRPC(fmt.Errorf("%s: %w", fmt.Sprintf(format, args...), err)) -func ToGRPCf(err error, format string, args ...interface{}) error { - return ToGRPC(fmt.Errorf("%s: %w", fmt.Sprintf(format, args...), err)) -} - -// FromGRPC returns the underlying error from a grpc service based on the grpc error code -func FromGRPC(err error) error { - if err == nil { - return nil - } - - var cls error // divide these into error classes, becomes the cause - - switch code(err) { - case codes.InvalidArgument: - cls = ErrInvalidArgument - case codes.AlreadyExists: - cls = ErrAlreadyExists - case codes.NotFound: - cls = ErrNotFound - case codes.Unavailable: - cls = ErrUnavailable - case codes.FailedPrecondition: - cls = ErrFailedPrecondition - case codes.Unimplemented: - cls = ErrNotImplemented - case codes.Canceled: - cls = context.Canceled - case codes.DeadlineExceeded: - cls = context.DeadlineExceeded - default: - cls = ErrUnknown - } - - msg := rebaseMessage(cls, err) - if msg != "" { - err = fmt.Errorf("%s: %w", msg, cls) - } else { - err = cls - } - - return err -} - -// rebaseMessage removes the repeats for an error at the end of an error -// string. This will happen when taking an error over grpc then remapping it. -// -// Effectively, we just remove the string of cls from the end of err if it -// appears there. -func rebaseMessage(cls error, err error) string { - desc := errDesc(err) - clss := cls.Error() - if desc == clss { - return "" - } - - return strings.TrimSuffix(desc, ": "+clss) -} - -func isGRPCError(err error) bool { - _, ok := status.FromError(err) - return ok -} - -func code(err error) codes.Code { - if s, ok := status.FromError(err); ok { - return s.Code() - } - return codes.Unknown -} - -func errDesc(err error) string { - if s, ok := status.FromError(err); ok { - return s.Message() - } - return err.Error() -} diff --git a/src/vendor/github.com/containerd/containerd/filters/adaptor.go b/src/vendor/github.com/containerd/containerd/filters/adaptor.go deleted file mode 100644 index 5a9c559c1..000000000 --- a/src/vendor/github.com/containerd/containerd/filters/adaptor.go +++ /dev/null @@ -1,33 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package filters - -// Adaptor specifies the mapping of fieldpaths to a type. For the given field -// path, the value and whether it is present should be returned. The mapping of -// the fieldpath to a field is deferred to the adaptor implementation, but -// should generally follow protobuf field path/mask semantics. -type Adaptor interface { - Field(fieldpath []string) (value string, present bool) -} - -// AdapterFunc allows implementation specific matching of fieldpaths -type AdapterFunc func(fieldpath []string) (string, bool) - -// Field returns the field name and true if it exists -func (fn AdapterFunc) Field(fieldpath []string) (string, bool) { - return fn(fieldpath) -} diff --git a/src/vendor/github.com/containerd/containerd/filters/filter.go b/src/vendor/github.com/containerd/containerd/filters/filter.go deleted file mode 100644 index cf09d8d9e..000000000 --- a/src/vendor/github.com/containerd/containerd/filters/filter.go +++ /dev/null @@ -1,179 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -// Package filters defines a syntax and parser that can be used for the -// filtration of items across the containerd API. The core is built on the -// concept of protobuf field paths, with quoting. Several operators allow the -// user to flexibly select items based on field presence, equality, inequality -// and regular expressions. Flexible adaptors support working with any type. -// -// The syntax is fairly familiar, if you've used container ecosystem -// projects. At the core, we base it on the concept of protobuf field -// paths, augmenting with the ability to quote portions of the field path -// to match arbitrary labels. These "selectors" come in the following -// syntax: -// -// ``` -// [] -// ``` -// -// A basic example is as follows: -// -// ``` -// name==foo -// ``` -// -// This would match all objects that have a field `name` with the value -// `foo`. If we only want to test if the field is present, we can omit the -// operator. This is most useful for matching labels in containerd. The -// following will match objects that have the field "labels" and have the -// label "foo" defined: -// -// ``` -// labels.foo -// ``` -// -// We also allow for quoting of parts of the field path to allow matching -// of arbitrary items: -// -// ``` -// labels."very complex label"==something -// ``` -// -// We also define `!=` and `~=` as operators. The `!=` will match all -// objects that don't match the value for a field and `~=` will compile the -// target value as a regular expression and match the field value against that. -// -// Selectors can be combined using a comma, such that the resulting -// selector will require all selectors are matched for the object to match. -// The following example will match objects that are named `foo` and have -// the label `bar`: -// -// ``` -// name==foo,labels.bar -// ``` -// -package filters - -import ( - "regexp" - - "github.com/containerd/containerd/log" -) - -// Filter matches specific resources based the provided filter -type Filter interface { - Match(adaptor Adaptor) bool -} - -// FilterFunc is a function that handles matching with an adaptor -type FilterFunc func(Adaptor) bool - -// Match matches the FilterFunc returning true if the object matches the filter -func (fn FilterFunc) Match(adaptor Adaptor) bool { - return fn(adaptor) -} - -// Always is a filter that always returns true for any type of object -var Always FilterFunc = func(adaptor Adaptor) bool { - return true -} - -// Any allows multiple filters to be matched against the object -type Any []Filter - -// Match returns true if any of the provided filters are true -func (m Any) Match(adaptor Adaptor) bool { - for _, m := range m { - if m.Match(adaptor) { - return true - } - } - - return false -} - -// All allows multiple filters to be matched against the object -type All []Filter - -// Match only returns true if all filters match the object -func (m All) Match(adaptor Adaptor) bool { - for _, m := range m { - if !m.Match(adaptor) { - return false - } - } - - return true -} - -type operator int - -const ( - operatorPresent = iota - operatorEqual - operatorNotEqual - operatorMatches -) - -func (op operator) String() string { - switch op { - case operatorPresent: - return "?" - case operatorEqual: - return "==" - case operatorNotEqual: - return "!=" - case operatorMatches: - return "~=" - } - - return "unknown" -} - -type selector struct { - fieldpath []string - operator operator - value string - re *regexp.Regexp -} - -func (m selector) Match(adaptor Adaptor) bool { - value, present := adaptor.Field(m.fieldpath) - - switch m.operator { - case operatorPresent: - return present - case operatorEqual: - return present && value == m.value - case operatorNotEqual: - return value != m.value - case operatorMatches: - if m.re == nil { - r, err := regexp.Compile(m.value) - if err != nil { - log.L.Errorf("error compiling regexp %q", m.value) - return false - } - - m.re = r - } - - return m.re.MatchString(value) - default: - return false - } -} diff --git a/src/vendor/github.com/containerd/containerd/filters/parser.go b/src/vendor/github.com/containerd/containerd/filters/parser.go deleted file mode 100644 index 49182d7b7..000000000 --- a/src/vendor/github.com/containerd/containerd/filters/parser.go +++ /dev/null @@ -1,291 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package filters - -import ( - "fmt" - "io" - - "github.com/containerd/containerd/errdefs" -) - -/* -Parse the strings into a filter that may be used with an adaptor. - -The filter is made up of zero or more selectors. - -The format is a comma separated list of expressions, in the form of -``, known as selectors. All selectors must match the -target object for the filter to be true. - -We define the operators "==" for equality, "!=" for not equal and "~=" for a -regular expression. If the operator and value are not present, the matcher will -test for the presence of a value, as defined by the target object. - -The formal grammar is as follows: - -selectors := selector ("," selector)* -selector := fieldpath (operator value) -fieldpath := field ('.' field)* -field := quoted | [A-Za-z] [A-Za-z0-9_]+ -operator := "==" | "!=" | "~=" -value := quoted | [^\s,]+ -quoted := - -*/ -func Parse(s string) (Filter, error) { - // special case empty to match all - if s == "" { - return Always, nil - } - - p := parser{input: s} - return p.parse() -} - -// ParseAll parses each filter in ss and returns a filter that will return true -// if any filter matches the expression. -// -// If no filters are provided, the filter will match anything. -func ParseAll(ss ...string) (Filter, error) { - if len(ss) == 0 { - return Always, nil - } - - var fs []Filter - for _, s := range ss { - f, err := Parse(s) - if err != nil { - return nil, fmt.Errorf("%s: %w", err.Error(), errdefs.ErrInvalidArgument) - } - - fs = append(fs, f) - } - - return Any(fs), nil -} - -type parser struct { - input string - scanner scanner -} - -func (p *parser) parse() (Filter, error) { - p.scanner.init(p.input) - - ss, err := p.selectors() - if err != nil { - return nil, fmt.Errorf("filters: %w", err) - } - - return ss, nil -} - -func (p *parser) selectors() (Filter, error) { - s, err := p.selector() - if err != nil { - return nil, err - } - - ss := All{s} - -loop: - for { - tok := p.scanner.peek() - switch tok { - case ',': - pos, tok, _ := p.scanner.scan() - if tok != tokenSeparator { - return nil, p.mkerr(pos, "expected a separator") - } - - s, err := p.selector() - if err != nil { - return nil, err - } - - ss = append(ss, s) - case tokenEOF: - break loop - default: - return nil, p.mkerr(p.scanner.ppos, "unexpected input: %v", string(tok)) - } - } - - return ss, nil -} - -func (p *parser) selector() (selector, error) { - fieldpath, err := p.fieldpath() - if err != nil { - return selector{}, err - } - - switch p.scanner.peek() { - case ',', tokenSeparator, tokenEOF: - return selector{ - fieldpath: fieldpath, - operator: operatorPresent, - }, nil - } - - op, err := p.operator() - if err != nil { - return selector{}, err - } - - var allowAltQuotes bool - if op == operatorMatches { - allowAltQuotes = true - } - - value, err := p.value(allowAltQuotes) - if err != nil { - if err == io.EOF { - return selector{}, io.ErrUnexpectedEOF - } - return selector{}, err - } - - return selector{ - fieldpath: fieldpath, - value: value, - operator: op, - }, nil -} - -func (p *parser) fieldpath() ([]string, error) { - f, err := p.field() - if err != nil { - return nil, err - } - - fs := []string{f} -loop: - for { - tok := p.scanner.peek() // lookahead to consume field separator - - switch tok { - case '.': - pos, tok, _ := p.scanner.scan() // consume separator - if tok != tokenSeparator { - return nil, p.mkerr(pos, "expected a field separator (`.`)") - } - - f, err := p.field() - if err != nil { - return nil, err - } - - fs = append(fs, f) - default: - // let the layer above handle the other bad cases. - break loop - } - } - - return fs, nil -} - -func (p *parser) field() (string, error) { - pos, tok, s := p.scanner.scan() - switch tok { - case tokenField: - return s, nil - case tokenQuoted: - return p.unquote(pos, s, false) - case tokenIllegal: - return "", p.mkerr(pos, p.scanner.err) - } - - return "", p.mkerr(pos, "expected field or quoted") -} - -func (p *parser) operator() (operator, error) { - pos, tok, s := p.scanner.scan() - switch tok { - case tokenOperator: - switch s { - case "==": - return operatorEqual, nil - case "!=": - return operatorNotEqual, nil - case "~=": - return operatorMatches, nil - default: - return 0, p.mkerr(pos, "unsupported operator %q", s) - } - case tokenIllegal: - return 0, p.mkerr(pos, p.scanner.err) - } - - return 0, p.mkerr(pos, `expected an operator ("=="|"!="|"~=")`) -} - -func (p *parser) value(allowAltQuotes bool) (string, error) { - pos, tok, s := p.scanner.scan() - - switch tok { - case tokenValue, tokenField: - return s, nil - case tokenQuoted: - return p.unquote(pos, s, allowAltQuotes) - case tokenIllegal: - return "", p.mkerr(pos, p.scanner.err) - } - - return "", p.mkerr(pos, "expected value or quoted") -} - -func (p *parser) unquote(pos int, s string, allowAlts bool) (string, error) { - if !allowAlts && s[0] != '\'' && s[0] != '"' { - return "", p.mkerr(pos, "invalid quote encountered") - } - - uq, err := unquote(s) - if err != nil { - return "", p.mkerr(pos, "unquoting failed: %v", err) - } - - return uq, nil -} - -type parseError struct { - input string - pos int - msg string -} - -func (pe parseError) Error() string { - if pe.pos < len(pe.input) { - before := pe.input[:pe.pos] - location := pe.input[pe.pos : pe.pos+1] // need to handle end - after := pe.input[pe.pos+1:] - - return fmt.Sprintf("[%s >|%s|< %s]: %v", before, location, after, pe.msg) - } - - return fmt.Sprintf("[%s]: %v", pe.input, pe.msg) -} - -func (p *parser) mkerr(pos int, format string, args ...interface{}) error { - return fmt.Errorf("parse error: %w", parseError{ - input: p.input, - pos: pos, - msg: fmt.Sprintf(format, args...), - }) -} diff --git a/src/vendor/github.com/containerd/containerd/filters/quote.go b/src/vendor/github.com/containerd/containerd/filters/quote.go deleted file mode 100644 index b76aab9b4..000000000 --- a/src/vendor/github.com/containerd/containerd/filters/quote.go +++ /dev/null @@ -1,252 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package filters - -import ( - "errors" - "unicode/utf8" -) - -// NOTE(stevvooe): Most of this code in this file is copied from the stdlib -// strconv package and modified to be able to handle quoting with `/` and `|` -// as delimiters. The copyright is held by the Go authors. - -var errQuoteSyntax = errors.New("quote syntax error") - -// UnquoteChar decodes the first character or byte in the escaped string -// or character literal represented by the string s. -// It returns four values: -// -// 1) value, the decoded Unicode code point or byte value; -// 2) multibyte, a boolean indicating whether the decoded character requires a multibyte UTF-8 representation; -// 3) tail, the remainder of the string after the character; and -// 4) an error that will be nil if the character is syntactically valid. -// -// The second argument, quote, specifies the type of literal being parsed -// and therefore which escaped quote character is permitted. -// If set to a single quote, it permits the sequence \' and disallows unescaped '. -// If set to a double quote, it permits \" and disallows unescaped ". -// If set to zero, it does not permit either escape and allows both quote characters to appear unescaped. -// -// This is from Go strconv package, modified to support `|` and `/` as double -// quotes for use with regular expressions. -func unquoteChar(s string, quote byte) (value rune, multibyte bool, tail string, err error) { - // easy cases - switch c := s[0]; { - case c == quote && (quote == '\'' || quote == '"' || quote == '/' || quote == '|'): - err = errQuoteSyntax - return - case c >= utf8.RuneSelf: - r, size := utf8.DecodeRuneInString(s) - return r, true, s[size:], nil - case c != '\\': - return rune(s[0]), false, s[1:], nil - } - - // hard case: c is backslash - if len(s) <= 1 { - err = errQuoteSyntax - return - } - c := s[1] - s = s[2:] - - switch c { - case 'a': - value = '\a' - case 'b': - value = '\b' - case 'f': - value = '\f' - case 'n': - value = '\n' - case 'r': - value = '\r' - case 't': - value = '\t' - case 'v': - value = '\v' - case 'x', 'u', 'U': - n := 0 - switch c { - case 'x': - n = 2 - case 'u': - n = 4 - case 'U': - n = 8 - } - var v rune - if len(s) < n { - err = errQuoteSyntax - return - } - for j := 0; j < n; j++ { - x, ok := unhex(s[j]) - if !ok { - err = errQuoteSyntax - return - } - v = v<<4 | x - } - s = s[n:] - if c == 'x' { - // single-byte string, possibly not UTF-8 - value = v - break - } - if v > utf8.MaxRune { - err = errQuoteSyntax - return - } - value = v - multibyte = true - case '0', '1', '2', '3', '4', '5', '6', '7': - v := rune(c) - '0' - if len(s) < 2 { - err = errQuoteSyntax - return - } - for j := 0; j < 2; j++ { // one digit already; two more - x := rune(s[j]) - '0' - if x < 0 || x > 7 { - err = errQuoteSyntax - return - } - v = (v << 3) | x - } - s = s[2:] - if v > 255 { - err = errQuoteSyntax - return - } - value = v - case '\\': - value = '\\' - case '\'', '"', '|', '/': - if c != quote { - err = errQuoteSyntax - return - } - value = rune(c) - default: - err = errQuoteSyntax - return - } - tail = s - return -} - -// unquote interprets s as a single-quoted, double-quoted, -// or backquoted Go string literal, returning the string value -// that s quotes. (If s is single-quoted, it would be a Go -// character literal; Unquote returns the corresponding -// one-character string.) -// -// This is modified from the standard library to support `|` and `/` as quote -// characters for use with regular expressions. -func unquote(s string) (string, error) { - n := len(s) - if n < 2 { - return "", errQuoteSyntax - } - quote := s[0] - if quote != s[n-1] { - return "", errQuoteSyntax - } - s = s[1 : n-1] - - if quote == '`' { - if contains(s, '`') { - return "", errQuoteSyntax - } - if contains(s, '\r') { - // -1 because we know there is at least one \r to remove. - buf := make([]byte, 0, len(s)-1) - for i := 0; i < len(s); i++ { - if s[i] != '\r' { - buf = append(buf, s[i]) - } - } - return string(buf), nil - } - return s, nil - } - if quote != '"' && quote != '\'' && quote != '|' && quote != '/' { - return "", errQuoteSyntax - } - if contains(s, '\n') { - return "", errQuoteSyntax - } - - // Is it trivial? Avoid allocation. - if !contains(s, '\\') && !contains(s, quote) { - switch quote { - case '"', '/', '|': // pipe and slash are treated like double quote - return s, nil - case '\'': - r, size := utf8.DecodeRuneInString(s) - if size == len(s) && (r != utf8.RuneError || size != 1) { - return s, nil - } - } - } - - var runeTmp [utf8.UTFMax]byte - buf := make([]byte, 0, 3*len(s)/2) // Try to avoid more allocations. - for len(s) > 0 { - c, multibyte, ss, err := unquoteChar(s, quote) - if err != nil { - return "", err - } - s = ss - if c < utf8.RuneSelf || !multibyte { - buf = append(buf, byte(c)) - } else { - n := utf8.EncodeRune(runeTmp[:], c) - buf = append(buf, runeTmp[:n]...) - } - if quote == '\'' && len(s) != 0 { - // single-quoted must be single character - return "", errQuoteSyntax - } - } - return string(buf), nil -} - -// contains reports whether the string contains the byte c. -func contains(s string, c byte) bool { - for i := 0; i < len(s); i++ { - if s[i] == c { - return true - } - } - return false -} - -func unhex(b byte) (v rune, ok bool) { - c := rune(b) - switch { - case '0' <= c && c <= '9': - return c - '0', true - case 'a' <= c && c <= 'f': - return c - 'a' + 10, true - case 'A' <= c && c <= 'F': - return c - 'A' + 10, true - } - return -} diff --git a/src/vendor/github.com/containerd/containerd/filters/scanner.go b/src/vendor/github.com/containerd/containerd/filters/scanner.go deleted file mode 100644 index 6a485467b..000000000 --- a/src/vendor/github.com/containerd/containerd/filters/scanner.go +++ /dev/null @@ -1,297 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package filters - -import ( - "unicode" - "unicode/utf8" -) - -const ( - tokenEOF = -(iota + 1) - tokenQuoted - tokenValue - tokenField - tokenSeparator - tokenOperator - tokenIllegal -) - -type token rune - -func (t token) String() string { - switch t { - case tokenEOF: - return "EOF" - case tokenQuoted: - return "Quoted" - case tokenValue: - return "Value" - case tokenField: - return "Field" - case tokenSeparator: - return "Separator" - case tokenOperator: - return "Operator" - case tokenIllegal: - return "Illegal" - } - - return string(t) -} - -func (t token) GoString() string { - return "token" + t.String() -} - -type scanner struct { - input string - pos int - ppos int // bounds the current rune in the string - value bool - err string -} - -func (s *scanner) init(input string) { - s.input = input - s.pos = 0 - s.ppos = 0 -} - -func (s *scanner) next() rune { - if s.pos >= len(s.input) { - return tokenEOF - } - s.pos = s.ppos - - r, w := utf8.DecodeRuneInString(s.input[s.ppos:]) - s.ppos += w - if r == utf8.RuneError { - if w > 0 { - s.error("rune error") - return tokenIllegal - } - return tokenEOF - } - - if r == 0 { - s.error("unexpected null") - return tokenIllegal - } - - return r -} - -func (s *scanner) peek() rune { - pos := s.pos - ppos := s.ppos - ch := s.next() - s.pos = pos - s.ppos = ppos - return ch -} - -func (s *scanner) scan() (nextp int, tk token, text string) { - var ( - ch = s.next() - pos = s.pos - ) - -chomp: - switch { - case ch == tokenEOF: - case ch == tokenIllegal: - case isQuoteRune(ch): - if !s.scanQuoted(ch) { - return pos, tokenIllegal, s.input[pos:s.ppos] - } - return pos, tokenQuoted, s.input[pos:s.ppos] - case isSeparatorRune(ch): - s.value = false - return pos, tokenSeparator, s.input[pos:s.ppos] - case isOperatorRune(ch): - s.scanOperator() - s.value = true - return pos, tokenOperator, s.input[pos:s.ppos] - case unicode.IsSpace(ch): - // chomp - ch = s.next() - pos = s.pos - goto chomp - case s.value: - s.scanValue() - s.value = false - return pos, tokenValue, s.input[pos:s.ppos] - case isFieldRune(ch): - s.scanField() - return pos, tokenField, s.input[pos:s.ppos] - } - - return s.pos, token(ch), "" -} - -func (s *scanner) scanField() { - for { - ch := s.peek() - if !isFieldRune(ch) { - break - } - s.next() - } -} - -func (s *scanner) scanOperator() { - for { - ch := s.peek() - switch ch { - case '=', '!', '~': - s.next() - default: - return - } - } -} - -func (s *scanner) scanValue() { - for { - ch := s.peek() - if !isValueRune(ch) { - break - } - s.next() - } -} - -func (s *scanner) scanQuoted(quote rune) bool { - var illegal bool - ch := s.next() // read character after quote - for ch != quote { - if ch == '\n' || ch < 0 { - s.error("quoted literal not terminated") - return false - } - if ch == '\\' { - var legal bool - ch, legal = s.scanEscape(quote) - if !legal { - illegal = true - } - } else { - ch = s.next() - } - } - return !illegal -} - -func (s *scanner) scanEscape(quote rune) (ch rune, legal bool) { - ch = s.next() // read character after '/' - switch ch { - case 'a', 'b', 'f', 'n', 'r', 't', 'v', '\\', quote: - // nothing to do - ch = s.next() - legal = true - case '0', '1', '2', '3', '4', '5', '6', '7': - ch, legal = s.scanDigits(ch, 8, 3) - case 'x': - ch, legal = s.scanDigits(s.next(), 16, 2) - case 'u': - ch, legal = s.scanDigits(s.next(), 16, 4) - case 'U': - ch, legal = s.scanDigits(s.next(), 16, 8) - default: - s.error("illegal escape sequence") - } - return -} - -func (s *scanner) scanDigits(ch rune, base, n int) (rune, bool) { - for n > 0 && digitVal(ch) < base { - ch = s.next() - n-- - } - if n > 0 { - s.error("illegal numeric escape sequence") - return ch, false - } - return ch, true -} - -func (s *scanner) error(msg string) { - if s.err == "" { - s.err = msg - } -} - -func digitVal(ch rune) int { - switch { - case '0' <= ch && ch <= '9': - return int(ch - '0') - case 'a' <= ch && ch <= 'f': - return int(ch - 'a' + 10) - case 'A' <= ch && ch <= 'F': - return int(ch - 'A' + 10) - } - return 16 // larger than any legal digit val -} - -func isFieldRune(r rune) bool { - return (r == '_' || isAlphaRune(r) || isDigitRune(r)) -} - -func isAlphaRune(r rune) bool { - return r >= 'A' && r <= 'Z' || r >= 'a' && r <= 'z' -} - -func isDigitRune(r rune) bool { - return r >= '0' && r <= '9' -} - -func isOperatorRune(r rune) bool { - switch r { - case '=', '!', '~': - return true - } - - return false -} - -func isQuoteRune(r rune) bool { - switch r { - case '/', '|', '"': // maybe add single quoting? - return true - } - - return false -} - -func isSeparatorRune(r rune) bool { - switch r { - case ',', '.': - return true - } - - return false -} - -func isValueRune(r rune) bool { - return r != ',' && !unicode.IsSpace(r) && - (unicode.IsLetter(r) || - unicode.IsDigit(r) || - unicode.IsNumber(r) || - unicode.IsGraphic(r) || - unicode.IsPunct(r)) -} diff --git a/src/vendor/github.com/containerd/containerd/images/annotations.go b/src/vendor/github.com/containerd/containerd/images/annotations.go deleted file mode 100644 index 47d92104c..000000000 --- a/src/vendor/github.com/containerd/containerd/images/annotations.go +++ /dev/null @@ -1,23 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package images - -const ( - // AnnotationImageName is an annotation on a Descriptor in an index.json - // containing the `Name` value as used by an `Image` struct - AnnotationImageName = "io.containerd.image.name" -) diff --git a/src/vendor/github.com/containerd/containerd/images/diffid.go b/src/vendor/github.com/containerd/containerd/images/diffid.go deleted file mode 100644 index 56193cc28..000000000 --- a/src/vendor/github.com/containerd/containerd/images/diffid.go +++ /dev/null @@ -1,81 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package images - -import ( - "context" - "io" - - "github.com/containerd/containerd/archive/compression" - "github.com/containerd/containerd/content" - "github.com/containerd/containerd/labels" - "github.com/opencontainers/go-digest" - ocispec "github.com/opencontainers/image-spec/specs-go/v1" - "github.com/sirupsen/logrus" -) - -// GetDiffID gets the diff ID of the layer blob descriptor. -func GetDiffID(ctx context.Context, cs content.Store, desc ocispec.Descriptor) (digest.Digest, error) { - switch desc.MediaType { - case - // If the layer is already uncompressed, we can just return its digest - MediaTypeDockerSchema2Layer, - ocispec.MediaTypeImageLayer, - MediaTypeDockerSchema2LayerForeign, - ocispec.MediaTypeImageLayerNonDistributable: - return desc.Digest, nil - } - info, err := cs.Info(ctx, desc.Digest) - if err != nil { - return "", err - } - v, ok := info.Labels[labels.LabelUncompressed] - if ok { - // Fast path: if the image is already unpacked, we can use the label value - return digest.Parse(v) - } - // if the image is not unpacked, we may not have the label - ra, err := cs.ReaderAt(ctx, desc) - if err != nil { - return "", err - } - defer ra.Close() - r := content.NewReader(ra) - uR, err := compression.DecompressStream(r) - if err != nil { - return "", err - } - defer uR.Close() - digester := digest.Canonical.Digester() - hashW := digester.Hash() - if _, err := io.Copy(hashW, uR); err != nil { - return "", err - } - if err := ra.Close(); err != nil { - return "", err - } - digest := digester.Digest() - // memorize the computed value - if info.Labels == nil { - info.Labels = make(map[string]string) - } - info.Labels[labels.LabelUncompressed] = digest.String() - if _, err := cs.Update(ctx, info, "labels"); err != nil { - logrus.WithError(err).Warnf("failed to set %s label for %s", labels.LabelUncompressed, desc.Digest) - } - return digest, nil -} diff --git a/src/vendor/github.com/containerd/containerd/images/handlers.go b/src/vendor/github.com/containerd/containerd/images/handlers.go deleted file mode 100644 index 077d88e78..000000000 --- a/src/vendor/github.com/containerd/containerd/images/handlers.go +++ /dev/null @@ -1,322 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package images - -import ( - "context" - "errors" - "fmt" - "sort" - - "github.com/containerd/containerd/content" - "github.com/containerd/containerd/errdefs" - "github.com/containerd/containerd/platforms" - ocispec "github.com/opencontainers/image-spec/specs-go/v1" - "golang.org/x/sync/errgroup" - "golang.org/x/sync/semaphore" -) - -var ( - // ErrSkipDesc is used to skip processing of a descriptor and - // its descendants. - ErrSkipDesc = errors.New("skip descriptor") - - // ErrStopHandler is used to signify that the descriptor - // has been handled and should not be handled further. - // This applies only to a single descriptor in a handler - // chain and does not apply to descendant descriptors. - ErrStopHandler = errors.New("stop handler") - - // ErrEmptyWalk is used when the WalkNotEmpty handlers return no - // children (e.g.: they were filtered out). - ErrEmptyWalk = errors.New("image might be filtered out") -) - -// Handler handles image manifests -type Handler interface { - Handle(ctx context.Context, desc ocispec.Descriptor) (subdescs []ocispec.Descriptor, err error) -} - -// HandlerFunc function implementing the Handler interface -type HandlerFunc func(ctx context.Context, desc ocispec.Descriptor) (subdescs []ocispec.Descriptor, err error) - -// Handle image manifests -func (fn HandlerFunc) Handle(ctx context.Context, desc ocispec.Descriptor) (subdescs []ocispec.Descriptor, err error) { - return fn(ctx, desc) -} - -// Handlers returns a handler that will run the handlers in sequence. -// -// A handler may return `ErrStopHandler` to stop calling additional handlers -func Handlers(handlers ...Handler) HandlerFunc { - return func(ctx context.Context, desc ocispec.Descriptor) (subdescs []ocispec.Descriptor, err error) { - var children []ocispec.Descriptor - for _, handler := range handlers { - ch, err := handler.Handle(ctx, desc) - if err != nil { - if errors.Is(err, ErrStopHandler) { - break - } - return nil, err - } - - children = append(children, ch...) - } - - return children, nil - } -} - -// Walk the resources of an image and call the handler for each. If the handler -// decodes the sub-resources for each image, -// -// This differs from dispatch in that each sibling resource is considered -// synchronously. -func Walk(ctx context.Context, handler Handler, descs ...ocispec.Descriptor) error { - for _, desc := range descs { - - children, err := handler.Handle(ctx, desc) - if err != nil { - if errors.Is(err, ErrSkipDesc) { - continue // don't traverse the children. - } - return err - } - - if len(children) > 0 { - if err := Walk(ctx, handler, children...); err != nil { - return err - } - } - } - return nil -} - -// WalkNotEmpty works the same way Walk does, with the exception that it ensures that -// some children are still found by Walking the descriptors (for example, not all of -// them have been filtered out by one of the handlers). If there are no children, -// then an ErrEmptyWalk error is returned. -func WalkNotEmpty(ctx context.Context, handler Handler, descs ...ocispec.Descriptor) error { - isEmpty := true - var notEmptyHandler HandlerFunc = func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) { - children, err := handler.Handle(ctx, desc) - if err != nil { - return children, err - } - - if len(children) > 0 { - isEmpty = false - } - - return children, nil - } - - err := Walk(ctx, notEmptyHandler, descs...) - if err != nil { - return err - } - - if isEmpty { - return ErrEmptyWalk - } - - return nil -} - -// Dispatch runs the provided handler for content specified by the descriptors. -// If the handler decode subresources, they will be visited, as well. -// -// Handlers for siblings are run in parallel on the provided descriptors. A -// handler may return `ErrSkipDesc` to signal to the dispatcher to not traverse -// any children. -// -// A concurrency limiter can be passed in to limit the number of concurrent -// handlers running. When limiter is nil, there is no limit. -// -// Typically, this function will be used with `FetchHandler`, often composed -// with other handlers. -// -// If any handler returns an error, the dispatch session will be canceled. -func Dispatch(ctx context.Context, handler Handler, limiter *semaphore.Weighted, descs ...ocispec.Descriptor) error { - eg, ctx2 := errgroup.WithContext(ctx) - for _, desc := range descs { - desc := desc - - if limiter != nil { - if err := limiter.Acquire(ctx, 1); err != nil { - return err - } - } - - eg.Go(func() error { - desc := desc - - children, err := handler.Handle(ctx2, desc) - if limiter != nil { - limiter.Release(1) - } - if err != nil { - if errors.Is(err, ErrSkipDesc) { - return nil // don't traverse the children. - } - return err - } - - if len(children) > 0 { - return Dispatch(ctx2, handler, limiter, children...) - } - - return nil - }) - } - - return eg.Wait() -} - -// ChildrenHandler decodes well-known manifest types and returns their children. -// -// This is useful for supporting recursive fetch and other use cases where you -// want to do a full walk of resources. -// -// One can also replace this with another implementation to allow descending of -// arbitrary types. -func ChildrenHandler(provider content.Provider) HandlerFunc { - return func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) { - return Children(ctx, provider, desc) - } -} - -// SetChildrenLabels is a handler wrapper which sets labels for the content on -// the children returned by the handler and passes through the children. -// Must follow a handler that returns the children to be labeled. -func SetChildrenLabels(manager content.Manager, f HandlerFunc) HandlerFunc { - return SetChildrenMappedLabels(manager, f, nil) -} - -// SetChildrenMappedLabels is a handler wrapper which sets labels for the content on -// the children returned by the handler and passes through the children. -// Must follow a handler that returns the children to be labeled. -// The label map allows the caller to control the labels per child descriptor. -// For returned labels, the index of the child will be appended to the end -// except for the first index when the returned label does not end with '.'. -func SetChildrenMappedLabels(manager content.Manager, f HandlerFunc, labelMap func(ocispec.Descriptor) []string) HandlerFunc { - if labelMap == nil { - labelMap = ChildGCLabels - } - return func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) { - children, err := f(ctx, desc) - if err != nil { - return children, err - } - - if len(children) > 0 { - var ( - info = content.Info{ - Digest: desc.Digest, - Labels: map[string]string{}, - } - fields = []string{} - keys = map[string]uint{} - ) - for _, ch := range children { - labelKeys := labelMap(ch) - for _, key := range labelKeys { - idx := keys[key] - keys[key] = idx + 1 - if idx > 0 || key[len(key)-1] == '.' { - key = fmt.Sprintf("%s%d", key, idx) - } - - info.Labels[key] = ch.Digest.String() - fields = append(fields, "labels."+key) - } - } - - _, err := manager.Update(ctx, info, fields...) - if err != nil { - return nil, err - } - } - - return children, err - } -} - -// FilterPlatforms is a handler wrapper which limits the descriptors returned -// based on matching the specified platform matcher. -func FilterPlatforms(f HandlerFunc, m platforms.Matcher) HandlerFunc { - return func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) { - children, err := f(ctx, desc) - if err != nil { - return children, err - } - - var descs []ocispec.Descriptor - - if m == nil { - descs = children - } else { - for _, d := range children { - if d.Platform == nil || m.Match(*d.Platform) { - descs = append(descs, d) - } - } - } - - return descs, nil - } -} - -// LimitManifests is a handler wrapper which filters the manifest descriptors -// returned using the provided platform. -// The results will be ordered according to the comparison operator and -// use the ordering in the manifests for equal matches. -// A limit of 0 or less is considered no limit. -// A not found error is returned if no manifest is matched. -func LimitManifests(f HandlerFunc, m platforms.MatchComparer, n int) HandlerFunc { - return func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) { - children, err := f(ctx, desc) - if err != nil { - return children, err - } - - switch desc.MediaType { - case ocispec.MediaTypeImageIndex, MediaTypeDockerSchema2ManifestList: - sort.SliceStable(children, func(i, j int) bool { - if children[i].Platform == nil { - return false - } - if children[j].Platform == nil { - return true - } - return m.Less(*children[i].Platform, *children[j].Platform) - }) - - if n > 0 { - if len(children) == 0 { - return children, fmt.Errorf("no match for platform in manifest: %w", errdefs.ErrNotFound) - } - if len(children) > n { - children = children[:n] - } - } - default: - // only limit manifests from an index - } - return children, nil - } -} diff --git a/src/vendor/github.com/containerd/containerd/images/image.go b/src/vendor/github.com/containerd/containerd/images/image.go deleted file mode 100644 index d45afe482..000000000 --- a/src/vendor/github.com/containerd/containerd/images/image.go +++ /dev/null @@ -1,440 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package images - -import ( - "context" - "encoding/json" - "fmt" - "sort" - "time" - - "github.com/containerd/containerd/content" - "github.com/containerd/containerd/errdefs" - "github.com/containerd/containerd/log" - "github.com/containerd/containerd/platforms" - digest "github.com/opencontainers/go-digest" - ocispec "github.com/opencontainers/image-spec/specs-go/v1" -) - -// Image provides the model for how containerd views container images. -type Image struct { - // Name of the image. - // - // To be pulled, it must be a reference compatible with resolvers. - // - // This field is required. - Name string - - // Labels provide runtime decoration for the image record. - // - // There is no default behavior for how these labels are propagated. They - // only decorate the static metadata object. - // - // This field is optional. - Labels map[string]string - - // Target describes the root content for this image. Typically, this is - // a manifest, index or manifest list. - Target ocispec.Descriptor - - CreatedAt, UpdatedAt time.Time -} - -// DeleteOptions provide options on image delete -type DeleteOptions struct { - Synchronous bool -} - -// DeleteOpt allows configuring a delete operation -type DeleteOpt func(context.Context, *DeleteOptions) error - -// SynchronousDelete is used to indicate that an image deletion and removal of -// the image resources should occur synchronously before returning a result. -func SynchronousDelete() DeleteOpt { - return func(ctx context.Context, o *DeleteOptions) error { - o.Synchronous = true - return nil - } -} - -// Store and interact with images -type Store interface { - Get(ctx context.Context, name string) (Image, error) - List(ctx context.Context, filters ...string) ([]Image, error) - Create(ctx context.Context, image Image) (Image, error) - - // Update will replace the data in the store with the provided image. If - // one or more fieldpaths are provided, only those fields will be updated. - Update(ctx context.Context, image Image, fieldpaths ...string) (Image, error) - - Delete(ctx context.Context, name string, opts ...DeleteOpt) error -} - -// TODO(stevvooe): Many of these functions make strong platform assumptions, -// which are untrue in a lot of cases. More refactoring must be done here to -// make this work in all cases. - -// Config resolves the image configuration descriptor. -// -// The caller can then use the descriptor to resolve and process the -// configuration of the image. -func (image *Image) Config(ctx context.Context, provider content.Provider, platform platforms.MatchComparer) (ocispec.Descriptor, error) { - return Config(ctx, provider, image.Target, platform) -} - -// RootFS returns the unpacked diffids that make up and images rootfs. -// -// These are used to verify that a set of layers unpacked to the expected -// values. -func (image *Image) RootFS(ctx context.Context, provider content.Provider, platform platforms.MatchComparer) ([]digest.Digest, error) { - desc, err := image.Config(ctx, provider, platform) - if err != nil { - return nil, err - } - return RootFS(ctx, provider, desc) -} - -// Size returns the total size of an image's packed resources. -func (image *Image) Size(ctx context.Context, provider content.Provider, platform platforms.MatchComparer) (int64, error) { - var size int64 - return size, Walk(ctx, Handlers(HandlerFunc(func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) { - if desc.Size < 0 { - return nil, fmt.Errorf("invalid size %v in %v (%v)", desc.Size, desc.Digest, desc.MediaType) - } - size += desc.Size - return nil, nil - }), LimitManifests(FilterPlatforms(ChildrenHandler(provider), platform), platform, 1)), image.Target) -} - -type platformManifest struct { - p *ocispec.Platform - m *ocispec.Manifest -} - -// Manifest resolves a manifest from the image for the given platform. -// -// When a manifest descriptor inside of a manifest index does not have -// a platform defined, the platform from the image config is considered. -// -// If the descriptor points to a non-index manifest, then the manifest is -// unmarshalled and returned without considering the platform inside of the -// config. -// -// TODO(stevvooe): This violates the current platform agnostic approach to this -// package by returning a specific manifest type. We'll need to refactor this -// to return a manifest descriptor or decide that we want to bring the API in -// this direction because this abstraction is not needed.` -func Manifest(ctx context.Context, provider content.Provider, image ocispec.Descriptor, platform platforms.MatchComparer) (ocispec.Manifest, error) { - var ( - limit = 1 - m []platformManifest - wasIndex bool - ) - - if err := Walk(ctx, HandlerFunc(func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) { - switch desc.MediaType { - case MediaTypeDockerSchema2Manifest, ocispec.MediaTypeImageManifest: - p, err := content.ReadBlob(ctx, provider, desc) - if err != nil { - return nil, err - } - - if err := validateMediaType(p, desc.MediaType); err != nil { - return nil, fmt.Errorf("manifest: invalid desc %s: %w", desc.Digest, err) - } - - var manifest ocispec.Manifest - if err := json.Unmarshal(p, &manifest); err != nil { - return nil, err - } - - if desc.Digest != image.Digest && platform != nil { - if desc.Platform != nil && !platform.Match(*desc.Platform) { - return nil, nil - } - - if desc.Platform == nil { - p, err := content.ReadBlob(ctx, provider, manifest.Config) - if err != nil { - return nil, err - } - - var image ocispec.Image - if err := json.Unmarshal(p, &image); err != nil { - return nil, err - } - - if !platform.Match(platforms.Normalize(ocispec.Platform{OS: image.OS, Architecture: image.Architecture})) { - return nil, nil - } - - } - } - - m = append(m, platformManifest{ - p: desc.Platform, - m: &manifest, - }) - - return nil, nil - case MediaTypeDockerSchema2ManifestList, ocispec.MediaTypeImageIndex: - p, err := content.ReadBlob(ctx, provider, desc) - if err != nil { - return nil, err - } - - if err := validateMediaType(p, desc.MediaType); err != nil { - return nil, fmt.Errorf("manifest: invalid desc %s: %w", desc.Digest, err) - } - - var idx ocispec.Index - if err := json.Unmarshal(p, &idx); err != nil { - return nil, err - } - - if platform == nil { - return idx.Manifests, nil - } - - var descs []ocispec.Descriptor - for _, d := range idx.Manifests { - if d.Platform == nil || platform.Match(*d.Platform) { - descs = append(descs, d) - } - } - - sort.SliceStable(descs, func(i, j int) bool { - if descs[i].Platform == nil { - return false - } - if descs[j].Platform == nil { - return true - } - return platform.Less(*descs[i].Platform, *descs[j].Platform) - }) - - wasIndex = true - - if len(descs) > limit { - return descs[:limit], nil - } - return descs, nil - } - return nil, fmt.Errorf("unexpected media type %v for %v: %w", desc.MediaType, desc.Digest, errdefs.ErrNotFound) - }), image); err != nil { - return ocispec.Manifest{}, err - } - - if len(m) == 0 { - err := fmt.Errorf("manifest %v: %w", image.Digest, errdefs.ErrNotFound) - if wasIndex { - err = fmt.Errorf("no match for platform in manifest %v: %w", image.Digest, errdefs.ErrNotFound) - } - return ocispec.Manifest{}, err - } - return *m[0].m, nil -} - -// Config resolves the image configuration descriptor using a content provided -// to resolve child resources on the image. -// -// The caller can then use the descriptor to resolve and process the -// configuration of the image. -func Config(ctx context.Context, provider content.Provider, image ocispec.Descriptor, platform platforms.MatchComparer) (ocispec.Descriptor, error) { - manifest, err := Manifest(ctx, provider, image, platform) - if err != nil { - return ocispec.Descriptor{}, err - } - return manifest.Config, err -} - -// Platforms returns one or more platforms supported by the image. -func Platforms(ctx context.Context, provider content.Provider, image ocispec.Descriptor) ([]ocispec.Platform, error) { - var platformSpecs []ocispec.Platform - return platformSpecs, Walk(ctx, Handlers(HandlerFunc(func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) { - if desc.Platform != nil { - platformSpecs = append(platformSpecs, *desc.Platform) - return nil, ErrSkipDesc - } - - switch desc.MediaType { - case MediaTypeDockerSchema2Config, ocispec.MediaTypeImageConfig: - p, err := content.ReadBlob(ctx, provider, desc) - if err != nil { - return nil, err - } - - var image ocispec.Image - if err := json.Unmarshal(p, &image); err != nil { - return nil, err - } - - platformSpecs = append(platformSpecs, - platforms.Normalize(ocispec.Platform{OS: image.OS, Architecture: image.Architecture})) - } - return nil, nil - }), ChildrenHandler(provider)), image) -} - -// Check returns nil if the all components of an image are available in the -// provider for the specified platform. -// -// If available is true, the caller can assume that required represents the -// complete set of content required for the image. -// -// missing will have the components that are part of required but not available -// in the provider. -// -// If there is a problem resolving content, an error will be returned. -func Check(ctx context.Context, provider content.Provider, image ocispec.Descriptor, platform platforms.MatchComparer) (available bool, required, present, missing []ocispec.Descriptor, err error) { - mfst, err := Manifest(ctx, provider, image, platform) - if err != nil { - if errdefs.IsNotFound(err) { - return false, []ocispec.Descriptor{image}, nil, []ocispec.Descriptor{image}, nil - } - - return false, nil, nil, nil, fmt.Errorf("failed to check image %v: %w", image.Digest, err) - } - - // TODO(stevvooe): It is possible that referenced conponents could have - // children, but this is rare. For now, we ignore this and only verify - // that manifest components are present. - required = append([]ocispec.Descriptor{mfst.Config}, mfst.Layers...) - - for _, desc := range required { - ra, err := provider.ReaderAt(ctx, desc) - if err != nil { - if errdefs.IsNotFound(err) { - missing = append(missing, desc) - continue - } else { - return false, nil, nil, nil, fmt.Errorf("failed to check image %v: %w", desc.Digest, err) - } - } - ra.Close() - present = append(present, desc) - - } - - return true, required, present, missing, nil -} - -// Children returns the immediate children of content described by the descriptor. -func Children(ctx context.Context, provider content.Provider, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) { - var descs []ocispec.Descriptor - switch desc.MediaType { - case MediaTypeDockerSchema2Manifest, ocispec.MediaTypeImageManifest: - p, err := content.ReadBlob(ctx, provider, desc) - if err != nil { - return nil, err - } - - if err := validateMediaType(p, desc.MediaType); err != nil { - return nil, fmt.Errorf("children: invalid desc %s: %w", desc.Digest, err) - } - - // TODO(stevvooe): We just assume oci manifest, for now. There may be - // subtle differences from the docker version. - var manifest ocispec.Manifest - if err := json.Unmarshal(p, &manifest); err != nil { - return nil, err - } - - descs = append(descs, manifest.Config) - descs = append(descs, manifest.Layers...) - case MediaTypeDockerSchema2ManifestList, ocispec.MediaTypeImageIndex: - p, err := content.ReadBlob(ctx, provider, desc) - if err != nil { - return nil, err - } - - if err := validateMediaType(p, desc.MediaType); err != nil { - return nil, fmt.Errorf("children: invalid desc %s: %w", desc.Digest, err) - } - - var index ocispec.Index - if err := json.Unmarshal(p, &index); err != nil { - return nil, err - } - - descs = append(descs, index.Manifests...) - default: - if IsLayerType(desc.MediaType) || IsKnownConfig(desc.MediaType) { - // childless data types. - return nil, nil - } - log.G(ctx).Debugf("encountered unknown type %v; children may not be fetched", desc.MediaType) - } - - return descs, nil -} - -// unknownDocument represents a manifest, manifest list, or index that has not -// yet been validated. -type unknownDocument struct { - MediaType string `json:"mediaType,omitempty"` - Config json.RawMessage `json:"config,omitempty"` - Layers json.RawMessage `json:"layers,omitempty"` - Manifests json.RawMessage `json:"manifests,omitempty"` - FSLayers json.RawMessage `json:"fsLayers,omitempty"` // schema 1 -} - -// validateMediaType returns an error if the byte slice is invalid JSON or if -// the media type identifies the blob as one format but it contains elements of -// another format. -func validateMediaType(b []byte, mt string) error { - var doc unknownDocument - if err := json.Unmarshal(b, &doc); err != nil { - return err - } - if len(doc.FSLayers) != 0 { - return fmt.Errorf("media-type: schema 1 not supported") - } - switch mt { - case MediaTypeDockerSchema2Manifest, ocispec.MediaTypeImageManifest: - if len(doc.Manifests) != 0 || - doc.MediaType == MediaTypeDockerSchema2ManifestList || - doc.MediaType == ocispec.MediaTypeImageIndex { - return fmt.Errorf("media-type: expected manifest but found index (%s)", mt) - } - case MediaTypeDockerSchema2ManifestList, ocispec.MediaTypeImageIndex: - if len(doc.Config) != 0 || len(doc.Layers) != 0 || - doc.MediaType == MediaTypeDockerSchema2Manifest || - doc.MediaType == ocispec.MediaTypeImageManifest { - return fmt.Errorf("media-type: expected index but found manifest (%s)", mt) - } - } - return nil -} - -// RootFS returns the unpacked diffids that make up and images rootfs. -// -// These are used to verify that a set of layers unpacked to the expected -// values. -func RootFS(ctx context.Context, provider content.Provider, configDesc ocispec.Descriptor) ([]digest.Digest, error) { - p, err := content.ReadBlob(ctx, provider, configDesc) - if err != nil { - return nil, err - } - - var config ocispec.Image - if err := json.Unmarshal(p, &config); err != nil { - return nil, err - } - return config.RootFS.DiffIDs, nil -} diff --git a/src/vendor/github.com/containerd/containerd/images/importexport.go b/src/vendor/github.com/containerd/containerd/images/importexport.go deleted file mode 100644 index 843adcadc..000000000 --- a/src/vendor/github.com/containerd/containerd/images/importexport.go +++ /dev/null @@ -1,37 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package images - -import ( - "context" - "io" - - "github.com/containerd/containerd/content" - ocispec "github.com/opencontainers/image-spec/specs-go/v1" -) - -// Importer is the interface for image importer. -type Importer interface { - // Import imports an image from a tar stream. - Import(ctx context.Context, store content.Store, reader io.Reader) (ocispec.Descriptor, error) -} - -// Exporter is the interface for image exporter. -type Exporter interface { - // Export exports an image to a tar stream. - Export(ctx context.Context, store content.Provider, desc ocispec.Descriptor, writer io.Writer) error -} diff --git a/src/vendor/github.com/containerd/containerd/images/mediatypes.go b/src/vendor/github.com/containerd/containerd/images/mediatypes.go deleted file mode 100644 index 671e160e1..000000000 --- a/src/vendor/github.com/containerd/containerd/images/mediatypes.go +++ /dev/null @@ -1,204 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package images - -import ( - "context" - "fmt" - "sort" - "strings" - - "github.com/containerd/containerd/errdefs" - ocispec "github.com/opencontainers/image-spec/specs-go/v1" -) - -// mediatype definitions for image components handled in containerd. -// -// oci components are generally referenced directly, although we may centralize -// here for clarity. -const ( - MediaTypeDockerSchema2Layer = "application/vnd.docker.image.rootfs.diff.tar" - MediaTypeDockerSchema2LayerForeign = "application/vnd.docker.image.rootfs.foreign.diff.tar" - MediaTypeDockerSchema2LayerGzip = "application/vnd.docker.image.rootfs.diff.tar.gzip" - MediaTypeDockerSchema2LayerForeignGzip = "application/vnd.docker.image.rootfs.foreign.diff.tar.gzip" - MediaTypeDockerSchema2Config = "application/vnd.docker.container.image.v1+json" - MediaTypeDockerSchema2Manifest = "application/vnd.docker.distribution.manifest.v2+json" - MediaTypeDockerSchema2ManifestList = "application/vnd.docker.distribution.manifest.list.v2+json" - // Checkpoint/Restore Media Types - MediaTypeContainerd1Checkpoint = "application/vnd.containerd.container.criu.checkpoint.criu.tar" - MediaTypeContainerd1CheckpointPreDump = "application/vnd.containerd.container.criu.checkpoint.predump.tar" - MediaTypeContainerd1Resource = "application/vnd.containerd.container.resource.tar" - MediaTypeContainerd1RW = "application/vnd.containerd.container.rw.tar" - MediaTypeContainerd1CheckpointConfig = "application/vnd.containerd.container.checkpoint.config.v1+proto" - MediaTypeContainerd1CheckpointOptions = "application/vnd.containerd.container.checkpoint.options.v1+proto" - MediaTypeContainerd1CheckpointRuntimeName = "application/vnd.containerd.container.checkpoint.runtime.name" - MediaTypeContainerd1CheckpointRuntimeOptions = "application/vnd.containerd.container.checkpoint.runtime.options+proto" - // Legacy Docker schema1 manifest - MediaTypeDockerSchema1Manifest = "application/vnd.docker.distribution.manifest.v1+prettyjws" - // Encypted media types - MediaTypeImageLayerEncrypted = ocispec.MediaTypeImageLayer + "+encrypted" - MediaTypeImageLayerGzipEncrypted = ocispec.MediaTypeImageLayerGzip + "+encrypted" -) - -// DiffCompression returns the compression as defined by the layer diff media -// type. For Docker media types without compression, "unknown" is returned to -// indicate that the media type may be compressed. If the media type is not -// recognized as a layer diff, then it returns errdefs.ErrNotImplemented -func DiffCompression(ctx context.Context, mediaType string) (string, error) { - base, ext := parseMediaTypes(mediaType) - switch base { - case MediaTypeDockerSchema2Layer, MediaTypeDockerSchema2LayerForeign: - if len(ext) > 0 { - // Type is wrapped - return "", nil - } - // These media types may have been compressed but failed to - // use the correct media type. The decompression function - // should detect and handle this case. - return "unknown", nil - case MediaTypeDockerSchema2LayerGzip, MediaTypeDockerSchema2LayerForeignGzip: - if len(ext) > 0 { - // Type is wrapped - return "", nil - } - return "gzip", nil - case ocispec.MediaTypeImageLayer, ocispec.MediaTypeImageLayerNonDistributable: - if len(ext) > 0 { - switch ext[len(ext)-1] { - case "gzip": - return "gzip", nil - case "zstd": - return "zstd", nil - } - } - return "", nil - default: - return "", fmt.Errorf("unrecognised mediatype %s: %w", mediaType, errdefs.ErrNotImplemented) - } -} - -// parseMediaTypes splits the media type into the base type and -// an array of sorted extensions -func parseMediaTypes(mt string) (string, []string) { - if mt == "" { - return "", []string{} - } - - s := strings.Split(mt, "+") - ext := s[1:] - sort.Strings(ext) - - return s[0], ext -} - -// IsNonDistributable returns true if the media type is non-distributable. -func IsNonDistributable(mt string) bool { - return strings.HasPrefix(mt, "application/vnd.oci.image.layer.nondistributable.") || - strings.HasPrefix(mt, "application/vnd.docker.image.rootfs.foreign.") -} - -// IsLayerType returns true if the media type is a layer -func IsLayerType(mt string) bool { - if strings.HasPrefix(mt, "application/vnd.oci.image.layer.") { - return true - } - - // Parse Docker media types, strip off any + suffixes first - base, _ := parseMediaTypes(mt) - switch base { - case MediaTypeDockerSchema2Layer, MediaTypeDockerSchema2LayerGzip, - MediaTypeDockerSchema2LayerForeign, MediaTypeDockerSchema2LayerForeignGzip: - return true - } - return false -} - -// IsDockerType returns true if the media type has "application/vnd.docker." prefix -func IsDockerType(mt string) bool { - return strings.HasPrefix(mt, "application/vnd.docker.") -} - -// IsManifestType returns true if the media type is an OCI-compatible manifest. -// No support for schema1 manifest. -func IsManifestType(mt string) bool { - switch mt { - case MediaTypeDockerSchema2Manifest, ocispec.MediaTypeImageManifest: - return true - default: - return false - } -} - -// IsIndexType returns true if the media type is an OCI-compatible index. -func IsIndexType(mt string) bool { - switch mt { - case ocispec.MediaTypeImageIndex, MediaTypeDockerSchema2ManifestList: - return true - default: - return false - } -} - -// IsConfigType returns true if the media type is an OCI-compatible image config. -// No support for containerd checkpoint configs. -func IsConfigType(mt string) bool { - switch mt { - case MediaTypeDockerSchema2Config, ocispec.MediaTypeImageConfig: - return true - default: - return false - } -} - -// IsKnownConfig returns true if the media type is a known config type, -// including containerd checkpoint configs -func IsKnownConfig(mt string) bool { - switch mt { - case MediaTypeDockerSchema2Config, ocispec.MediaTypeImageConfig, - MediaTypeContainerd1Checkpoint, MediaTypeContainerd1CheckpointConfig: - return true - } - return false -} - -// ChildGCLabels returns the label for a given descriptor to reference it -func ChildGCLabels(desc ocispec.Descriptor) []string { - mt := desc.MediaType - if IsKnownConfig(mt) { - return []string{"containerd.io/gc.ref.content.config"} - } - - switch mt { - case MediaTypeDockerSchema2Manifest, ocispec.MediaTypeImageManifest: - return []string{"containerd.io/gc.ref.content.m."} - } - - if IsLayerType(mt) { - return []string{"containerd.io/gc.ref.content.l."} - } - - return []string{"containerd.io/gc.ref.content."} -} - -// ChildGCLabelsFilterLayers returns the labels for a given descriptor to -// reference it, skipping layer media types -func ChildGCLabelsFilterLayers(desc ocispec.Descriptor) []string { - if IsLayerType(desc.MediaType) { - return nil - } - return ChildGCLabels(desc) -} diff --git a/src/vendor/github.com/containerd/containerd/labels/labels.go b/src/vendor/github.com/containerd/containerd/labels/labels.go deleted file mode 100644 index d76ff2cf9..000000000 --- a/src/vendor/github.com/containerd/containerd/labels/labels.go +++ /dev/null @@ -1,21 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package labels - -// LabelUncompressed is added to compressed layer contents. -// The value is digest of the uncompressed content. -const LabelUncompressed = "containerd.io/uncompressed" diff --git a/src/vendor/github.com/containerd/containerd/labels/validate.go b/src/vendor/github.com/containerd/containerd/labels/validate.go deleted file mode 100644 index 1fd527adb..000000000 --- a/src/vendor/github.com/containerd/containerd/labels/validate.go +++ /dev/null @@ -1,38 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package labels - -import ( - "fmt" - - "github.com/containerd/containerd/errdefs" -) - -const ( - maxSize = 4096 -) - -// Validate a label's key and value are under 4096 bytes -func Validate(k, v string) error { - if (len(k) + len(v)) > maxSize { - if len(k) > 10 { - k = k[:10] - } - return fmt.Errorf("label key and value greater than maximum size (%d bytes), key: %s: %w", maxSize, k, errdefs.ErrInvalidArgument) - } - return nil -} diff --git a/src/vendor/github.com/containerd/containerd/log/context.go b/src/vendor/github.com/containerd/containerd/log/context.go deleted file mode 100644 index 0db9562b8..000000000 --- a/src/vendor/github.com/containerd/containerd/log/context.go +++ /dev/null @@ -1,69 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package log - -import ( - "context" - - "github.com/sirupsen/logrus" -) - -var ( - // G is an alias for GetLogger. - // - // We may want to define this locally to a package to get package tagged log - // messages. - G = GetLogger - - // L is an alias for the standard logger. - L = logrus.NewEntry(logrus.StandardLogger()) -) - -type ( - loggerKey struct{} -) - -const ( - // RFC3339NanoFixed is time.RFC3339Nano with nanoseconds padded using zeros to - // ensure the formatted time is always the same number of characters. - RFC3339NanoFixed = "2006-01-02T15:04:05.000000000Z07:00" - - // TextFormat represents the text logging format - TextFormat = "text" - - // JSONFormat represents the JSON logging format - JSONFormat = "json" -) - -// WithLogger returns a new context with the provided logger. Use in -// combination with logger.WithField(s) for great effect. -func WithLogger(ctx context.Context, logger *logrus.Entry) context.Context { - e := logger.WithContext(ctx) - return context.WithValue(ctx, loggerKey{}, e) -} - -// GetLogger retrieves the current logger from the context. If no logger is -// available, the default logger is returned. -func GetLogger(ctx context.Context) *logrus.Entry { - logger := ctx.Value(loggerKey{}) - - if logger == nil { - return L.WithContext(ctx) - } - - return logger.(*logrus.Entry) -} diff --git a/src/vendor/github.com/containerd/containerd/platforms/compare.go b/src/vendor/github.com/containerd/containerd/platforms/compare.go deleted file mode 100644 index 3913ef663..000000000 --- a/src/vendor/github.com/containerd/containerd/platforms/compare.go +++ /dev/null @@ -1,203 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package platforms - -import ( - "strconv" - "strings" - - specs "github.com/opencontainers/image-spec/specs-go/v1" -) - -// MatchComparer is able to match and compare platforms to -// filter and sort platforms. -type MatchComparer interface { - Matcher - - Less(specs.Platform, specs.Platform) bool -} - -// platformVector returns an (ordered) vector of appropriate specs.Platform -// objects to try matching for the given platform object (see platforms.Only). -func platformVector(platform specs.Platform) []specs.Platform { - vector := []specs.Platform{platform} - - switch platform.Architecture { - case "amd64": - if amd64Version, err := strconv.Atoi(strings.TrimPrefix(platform.Variant, "v")); err == nil && amd64Version > 1 { - for amd64Version--; amd64Version >= 1; amd64Version-- { - vector = append(vector, specs.Platform{ - Architecture: platform.Architecture, - OS: platform.OS, - OSVersion: platform.OSVersion, - OSFeatures: platform.OSFeatures, - Variant: "v" + strconv.Itoa(amd64Version), - }) - } - } - vector = append(vector, specs.Platform{ - Architecture: "386", - OS: platform.OS, - OSVersion: platform.OSVersion, - OSFeatures: platform.OSFeatures, - }) - case "arm": - if armVersion, err := strconv.Atoi(strings.TrimPrefix(platform.Variant, "v")); err == nil && armVersion > 5 { - for armVersion--; armVersion >= 5; armVersion-- { - vector = append(vector, specs.Platform{ - Architecture: platform.Architecture, - OS: platform.OS, - OSVersion: platform.OSVersion, - OSFeatures: platform.OSFeatures, - Variant: "v" + strconv.Itoa(armVersion), - }) - } - } - case "arm64": - variant := platform.Variant - if variant == "" { - variant = "v8" - } - vector = append(vector, platformVector(specs.Platform{ - Architecture: "arm", - OS: platform.OS, - OSVersion: platform.OSVersion, - OSFeatures: platform.OSFeatures, - Variant: variant, - })...) - } - - return vector -} - -// Only returns a match comparer for a single platform -// using default resolution logic for the platform. -// -// For arm/v8, will also match arm/v7, arm/v6 and arm/v5 -// For arm/v7, will also match arm/v6 and arm/v5 -// For arm/v6, will also match arm/v5 -// For amd64, will also match 386 -func Only(platform specs.Platform) MatchComparer { - return Ordered(platformVector(Normalize(platform))...) -} - -// OnlyStrict returns a match comparer for a single platform. -// -// Unlike Only, OnlyStrict does not match sub platforms. -// So, "arm/vN" will not match "arm/vM" where M < N, -// and "amd64" will not also match "386". -// -// OnlyStrict matches non-canonical forms. -// So, "arm64" matches "arm/64/v8". -func OnlyStrict(platform specs.Platform) MatchComparer { - return Ordered(Normalize(platform)) -} - -// Ordered returns a platform MatchComparer which matches any of the platforms -// but orders them in order they are provided. -func Ordered(platforms ...specs.Platform) MatchComparer { - matchers := make([]Matcher, len(platforms)) - for i := range platforms { - matchers[i] = NewMatcher(platforms[i]) - } - return orderedPlatformComparer{ - matchers: matchers, - } -} - -// Any returns a platform MatchComparer which matches any of the platforms -// with no preference for ordering. -func Any(platforms ...specs.Platform) MatchComparer { - matchers := make([]Matcher, len(platforms)) - for i := range platforms { - matchers[i] = NewMatcher(platforms[i]) - } - return anyPlatformComparer{ - matchers: matchers, - } -} - -// All is a platform MatchComparer which matches all platforms -// with preference for ordering. -var All MatchComparer = allPlatformComparer{} - -type orderedPlatformComparer struct { - matchers []Matcher -} - -func (c orderedPlatformComparer) Match(platform specs.Platform) bool { - for _, m := range c.matchers { - if m.Match(platform) { - return true - } - } - return false -} - -func (c orderedPlatformComparer) Less(p1 specs.Platform, p2 specs.Platform) bool { - for _, m := range c.matchers { - p1m := m.Match(p1) - p2m := m.Match(p2) - if p1m && !p2m { - return true - } - if p1m || p2m { - return false - } - } - return false -} - -type anyPlatformComparer struct { - matchers []Matcher -} - -func (c anyPlatformComparer) Match(platform specs.Platform) bool { - for _, m := range c.matchers { - if m.Match(platform) { - return true - } - } - return false -} - -func (c anyPlatformComparer) Less(p1, p2 specs.Platform) bool { - var p1m, p2m bool - for _, m := range c.matchers { - if !p1m && m.Match(p1) { - p1m = true - } - if !p2m && m.Match(p2) { - p2m = true - } - if p1m && p2m { - return false - } - } - // If one matches, and the other does, sort match first - return p1m && !p2m -} - -type allPlatformComparer struct{} - -func (allPlatformComparer) Match(specs.Platform) bool { - return true -} - -func (allPlatformComparer) Less(specs.Platform, specs.Platform) bool { - return false -} diff --git a/src/vendor/github.com/containerd/containerd/platforms/cpuinfo.go b/src/vendor/github.com/containerd/containerd/platforms/cpuinfo.go deleted file mode 100644 index 046e0356d..000000000 --- a/src/vendor/github.com/containerd/containerd/platforms/cpuinfo.go +++ /dev/null @@ -1,131 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package platforms - -import ( - "bufio" - "fmt" - "os" - "runtime" - "strings" - "sync" - - "github.com/containerd/containerd/errdefs" - "github.com/containerd/containerd/log" -) - -// Present the ARM instruction set architecture, eg: v7, v8 -// Don't use this value directly; call cpuVariant() instead. -var cpuVariantValue string - -var cpuVariantOnce sync.Once - -func cpuVariant() string { - cpuVariantOnce.Do(func() { - if isArmArch(runtime.GOARCH) { - cpuVariantValue = getCPUVariant() - } - }) - return cpuVariantValue -} - -// For Linux, the kernel has already detected the ABI, ISA and Features. -// So we don't need to access the ARM registers to detect platform information -// by ourselves. We can just parse these information from /proc/cpuinfo -func getCPUInfo(pattern string) (info string, err error) { - if !isLinuxOS(runtime.GOOS) { - return "", fmt.Errorf("getCPUInfo for OS %s: %w", runtime.GOOS, errdefs.ErrNotImplemented) - } - - cpuinfo, err := os.Open("/proc/cpuinfo") - if err != nil { - return "", err - } - defer cpuinfo.Close() - - // Start to Parse the Cpuinfo line by line. For SMP SoC, we parse - // the first core is enough. - scanner := bufio.NewScanner(cpuinfo) - for scanner.Scan() { - newline := scanner.Text() - list := strings.Split(newline, ":") - - if len(list) > 1 && strings.EqualFold(strings.TrimSpace(list[0]), pattern) { - return strings.TrimSpace(list[1]), nil - } - } - - // Check whether the scanner encountered errors - err = scanner.Err() - if err != nil { - return "", err - } - - return "", fmt.Errorf("getCPUInfo for pattern: %s: %w", pattern, errdefs.ErrNotFound) -} - -func getCPUVariant() string { - if runtime.GOOS == "windows" || runtime.GOOS == "darwin" { - // Windows/Darwin only supports v7 for ARM32 and v8 for ARM64 and so we can use - // runtime.GOARCH to determine the variants - var variant string - switch runtime.GOARCH { - case "arm64": - variant = "v8" - case "arm": - variant = "v7" - default: - variant = "unknown" - } - - return variant - } - - variant, err := getCPUInfo("Cpu architecture") - if err != nil { - log.L.WithError(err).Error("failure getting variant") - return "" - } - - // handle edge case for Raspberry Pi ARMv6 devices (which due to a kernel quirk, report "CPU architecture: 7") - // https://www.raspberrypi.org/forums/viewtopic.php?t=12614 - if runtime.GOARCH == "arm" && variant == "7" { - model, err := getCPUInfo("model name") - if err == nil && strings.HasPrefix(strings.ToLower(model), "armv6-compatible") { - variant = "6" - } - } - - switch strings.ToLower(variant) { - case "8", "aarch64": - variant = "v8" - case "7", "7m", "?(12)", "?(13)", "?(14)", "?(15)", "?(16)", "?(17)": - variant = "v7" - case "6", "6tej": - variant = "v6" - case "5", "5t", "5te", "5tej": - variant = "v5" - case "4", "4t": - variant = "v4" - case "3": - variant = "v3" - default: - variant = "unknown" - } - - return variant -} diff --git a/src/vendor/github.com/containerd/containerd/platforms/database.go b/src/vendor/github.com/containerd/containerd/platforms/database.go deleted file mode 100644 index dbe9957ca..000000000 --- a/src/vendor/github.com/containerd/containerd/platforms/database.go +++ /dev/null @@ -1,116 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package platforms - -import ( - "runtime" - "strings" -) - -// isLinuxOS returns true if the operating system is Linux. -// -// The OS value should be normalized before calling this function. -func isLinuxOS(os string) bool { - return os == "linux" -} - -// These function are generated from https://golang.org/src/go/build/syslist.go. -// -// We use switch statements because they are slightly faster than map lookups -// and use a little less memory. - -// isKnownOS returns true if we know about the operating system. -// -// The OS value should be normalized before calling this function. -func isKnownOS(os string) bool { - switch os { - case "aix", "android", "darwin", "dragonfly", "freebsd", "hurd", "illumos", "ios", "js", "linux", "nacl", "netbsd", "openbsd", "plan9", "solaris", "windows", "zos": - return true - } - return false -} - -// isArmArch returns true if the architecture is ARM. -// -// The arch value should be normalized before being passed to this function. -func isArmArch(arch string) bool { - switch arch { - case "arm", "arm64": - return true - } - return false -} - -// isKnownArch returns true if we know about the architecture. -// -// The arch value should be normalized before being passed to this function. -func isKnownArch(arch string) bool { - switch arch { - case "386", "amd64", "amd64p32", "arm", "armbe", "arm64", "arm64be", "ppc64", "ppc64le", "loong64", "mips", "mipsle", "mips64", "mips64le", "mips64p32", "mips64p32le", "ppc", "riscv", "riscv64", "s390", "s390x", "sparc", "sparc64", "wasm": - return true - } - return false -} - -func normalizeOS(os string) string { - if os == "" { - return runtime.GOOS - } - os = strings.ToLower(os) - - switch os { - case "macos": - os = "darwin" - } - return os -} - -// normalizeArch normalizes the architecture. -func normalizeArch(arch, variant string) (string, string) { - arch, variant = strings.ToLower(arch), strings.ToLower(variant) - switch arch { - case "i386": - arch = "386" - variant = "" - case "x86_64", "x86-64", "amd64": - arch = "amd64" - if variant == "v1" { - variant = "" - } - case "aarch64", "arm64": - arch = "arm64" - switch variant { - case "8", "v8": - variant = "" - } - case "armhf": - arch = "arm" - variant = "v7" - case "armel": - arch = "arm" - variant = "v6" - case "arm": - switch variant { - case "", "7": - variant = "v7" - case "5", "6", "8": - variant = "v" + variant - } - } - - return arch, variant -} diff --git a/src/vendor/github.com/containerd/containerd/platforms/defaults.go b/src/vendor/github.com/containerd/containerd/platforms/defaults.go deleted file mode 100644 index cfa3ff34a..000000000 --- a/src/vendor/github.com/containerd/containerd/platforms/defaults.go +++ /dev/null @@ -1,27 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package platforms - -// DefaultString returns the default string specifier for the platform. -func DefaultString() string { - return Format(DefaultSpec()) -} - -// DefaultStrict returns strict form of Default. -func DefaultStrict() MatchComparer { - return OnlyStrict(DefaultSpec()) -} diff --git a/src/vendor/github.com/containerd/containerd/platforms/defaults_darwin.go b/src/vendor/github.com/containerd/containerd/platforms/defaults_darwin.go deleted file mode 100644 index e249fe48d..000000000 --- a/src/vendor/github.com/containerd/containerd/platforms/defaults_darwin.go +++ /dev/null @@ -1,45 +0,0 @@ -//go:build darwin -// +build darwin - -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package platforms - -import ( - "runtime" - - specs "github.com/opencontainers/image-spec/specs-go/v1" -) - -// DefaultSpec returns the current platform's default platform specification. -func DefaultSpec() specs.Platform { - return specs.Platform{ - OS: runtime.GOOS, - Architecture: runtime.GOARCH, - // The Variant field will be empty if arch != ARM. - Variant: cpuVariant(), - } -} - -// Default returns the default matcher for the platform. -func Default() MatchComparer { - return Ordered(DefaultSpec(), specs.Platform{ - // darwin runtime also supports Linux binary via runu/LKL - OS: "linux", - Architecture: runtime.GOARCH, - }) -} diff --git a/src/vendor/github.com/containerd/containerd/platforms/defaults_unix.go b/src/vendor/github.com/containerd/containerd/platforms/defaults_unix.go deleted file mode 100644 index 49690f1b3..000000000 --- a/src/vendor/github.com/containerd/containerd/platforms/defaults_unix.go +++ /dev/null @@ -1,41 +0,0 @@ -//go:build !windows && !darwin -// +build !windows,!darwin - -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package platforms - -import ( - "runtime" - - specs "github.com/opencontainers/image-spec/specs-go/v1" -) - -// DefaultSpec returns the current platform's default platform specification. -func DefaultSpec() specs.Platform { - return specs.Platform{ - OS: runtime.GOOS, - Architecture: runtime.GOARCH, - // The Variant field will be empty if arch != ARM. - Variant: cpuVariant(), - } -} - -// Default returns the default matcher for the platform. -func Default() MatchComparer { - return Only(DefaultSpec()) -} diff --git a/src/vendor/github.com/containerd/containerd/platforms/defaults_windows.go b/src/vendor/github.com/containerd/containerd/platforms/defaults_windows.go deleted file mode 100644 index c1aaf72ca..000000000 --- a/src/vendor/github.com/containerd/containerd/platforms/defaults_windows.go +++ /dev/null @@ -1,91 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package platforms - -import ( - "fmt" - "runtime" - "strconv" - "strings" - - imagespec "github.com/opencontainers/image-spec/specs-go/v1" - specs "github.com/opencontainers/image-spec/specs-go/v1" - "golang.org/x/sys/windows" -) - -// DefaultSpec returns the current platform's default platform specification. -func DefaultSpec() specs.Platform { - major, minor, build := windows.RtlGetNtVersionNumbers() - return specs.Platform{ - OS: runtime.GOOS, - Architecture: runtime.GOARCH, - OSVersion: fmt.Sprintf("%d.%d.%d", major, minor, build), - // The Variant field will be empty if arch != ARM. - Variant: cpuVariant(), - } -} - -type matchComparer struct { - defaults Matcher - osVersionPrefix string -} - -// Match matches platform with the same windows major, minor -// and build version. -func (m matchComparer) Match(p imagespec.Platform) bool { - if m.defaults.Match(p) { - // TODO(windows): Figure out whether OSVersion is deprecated. - return strings.HasPrefix(p.OSVersion, m.osVersionPrefix) - } - return false -} - -// Less sorts matched platforms in front of other platforms. -// For matched platforms, it puts platforms with larger revision -// number in front. -func (m matchComparer) Less(p1, p2 imagespec.Platform) bool { - m1, m2 := m.Match(p1), m.Match(p2) - if m1 && m2 { - r1, r2 := revision(p1.OSVersion), revision(p2.OSVersion) - return r1 > r2 - } - return m1 && !m2 -} - -func revision(v string) int { - parts := strings.Split(v, ".") - if len(parts) < 4 { - return 0 - } - r, err := strconv.Atoi(parts[3]) - if err != nil { - return 0 - } - return r -} - -// Default returns the current platform's default platform specification. -func Default() MatchComparer { - major, minor, build := windows.RtlGetNtVersionNumbers() - return matchComparer{ - defaults: Ordered(DefaultSpec(), specs.Platform{ - OS: "linux", - Architecture: runtime.GOARCH, - }), - osVersionPrefix: fmt.Sprintf("%d.%d.%d", major, minor, build), - } -} diff --git a/src/vendor/github.com/containerd/containerd/platforms/platforms.go b/src/vendor/github.com/containerd/containerd/platforms/platforms.go deleted file mode 100644 index 8f955d036..000000000 --- a/src/vendor/github.com/containerd/containerd/platforms/platforms.go +++ /dev/null @@ -1,261 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -// Package platforms provides a toolkit for normalizing, matching and -// specifying container platforms. -// -// Centered around OCI platform specifications, we define a string-based -// specifier syntax that can be used for user input. With a specifier, users -// only need to specify the parts of the platform that are relevant to their -// context, providing an operating system or architecture or both. -// -// How do I use this package? -// -// The vast majority of use cases should simply use the match function with -// user input. The first step is to parse a specifier into a matcher: -// -// m, err := Parse("linux") -// if err != nil { ... } -// -// Once you have a matcher, use it to match against the platform declared by a -// component, typically from an image or runtime. Since extracting an images -// platform is a little more involved, we'll use an example against the -// platform default: -// -// if ok := m.Match(Default()); !ok { /* doesn't match */ } -// -// This can be composed in loops for resolving runtimes or used as a filter for -// fetch and select images. -// -// More details of the specifier syntax and platform spec follow. -// -// Declaring Platform Support -// -// Components that have strict platform requirements should use the OCI -// platform specification to declare their support. Typically, this will be -// images and runtimes that should make these declaring which platform they -// support specifically. This looks roughly as follows: -// -// type Platform struct { -// Architecture string -// OS string -// Variant string -// } -// -// Most images and runtimes should at least set Architecture and OS, according -// to their GOARCH and GOOS values, respectively (follow the OCI image -// specification when in doubt). ARM should set variant under certain -// discussions, which are outlined below. -// -// Platform Specifiers -// -// While the OCI platform specifications provide a tool for components to -// specify structured information, user input typically doesn't need the full -// context and much can be inferred. To solve this problem, we introduced -// "specifiers". A specifier has the format -// `||/[/]`. The user can provide either the -// operating system or the architecture or both. -// -// An example of a common specifier is `linux/amd64`. If the host has a default -// of runtime that matches this, the user can simply provide the component that -// matters. For example, if a image provides amd64 and arm64 support, the -// operating system, `linux` can be inferred, so they only have to provide -// `arm64` or `amd64`. Similar behavior is implemented for operating systems, -// where the architecture may be known but a runtime may support images from -// different operating systems. -// -// Normalization -// -// Because not all users are familiar with the way the Go runtime represents -// platforms, several normalizations have been provided to make this package -// easier to user. -// -// The following are performed for architectures: -// -// Value Normalized -// aarch64 arm64 -// armhf arm -// armel arm/v6 -// i386 386 -// x86_64 amd64 -// x86-64 amd64 -// -// We also normalize the operating system `macos` to `darwin`. -// -// ARM Support -// -// To qualify ARM architecture, the Variant field is used to qualify the arm -// version. The most common arm version, v7, is represented without the variant -// unless it is explicitly provided. This is treated as equivalent to armhf. A -// previous architecture, armel, will be normalized to arm/v6. -// -// While these normalizations are provided, their support on arm platforms has -// not yet been fully implemented and tested. -package platforms - -import ( - "fmt" - "path" - "regexp" - "runtime" - "strconv" - "strings" - - "github.com/containerd/containerd/errdefs" - specs "github.com/opencontainers/image-spec/specs-go/v1" -) - -var ( - specifierRe = regexp.MustCompile(`^[A-Za-z0-9_-]+$`) -) - -// Matcher matches platforms specifications, provided by an image or runtime. -type Matcher interface { - Match(platform specs.Platform) bool -} - -// NewMatcher returns a simple matcher based on the provided platform -// specification. The returned matcher only looks for equality based on os, -// architecture and variant. -// -// One may implement their own matcher if this doesn't provide the required -// functionality. -// -// Applications should opt to use `Match` over directly parsing specifiers. -func NewMatcher(platform specs.Platform) Matcher { - return &matcher{ - Platform: Normalize(platform), - } -} - -type matcher struct { - specs.Platform -} - -func (m *matcher) Match(platform specs.Platform) bool { - normalized := Normalize(platform) - return m.OS == normalized.OS && - m.Architecture == normalized.Architecture && - m.Variant == normalized.Variant -} - -func (m *matcher) String() string { - return Format(m.Platform) -} - -// Parse parses the platform specifier syntax into a platform declaration. -// -// Platform specifiers are in the format `||/[/]`. -// The minimum required information for a platform specifier is the operating -// system or architecture. If there is only a single string (no slashes), the -// value will be matched against the known set of operating systems, then fall -// back to the known set of architectures. The missing component will be -// inferred based on the local environment. -func Parse(specifier string) (specs.Platform, error) { - if strings.Contains(specifier, "*") { - // TODO(stevvooe): need to work out exact wildcard handling - return specs.Platform{}, fmt.Errorf("%q: wildcards not yet supported: %w", specifier, errdefs.ErrInvalidArgument) - } - - parts := strings.Split(specifier, "/") - - for _, part := range parts { - if !specifierRe.MatchString(part) { - return specs.Platform{}, fmt.Errorf("%q is an invalid component of %q: platform specifier component must match %q: %w", part, specifier, specifierRe.String(), errdefs.ErrInvalidArgument) - } - } - - var p specs.Platform - switch len(parts) { - case 1: - // in this case, we will test that the value might be an OS, then look - // it up. If it is not known, we'll treat it as an architecture. Since - // we have very little information about the platform here, we are - // going to be a little more strict if we don't know about the argument - // value. - p.OS = normalizeOS(parts[0]) - if isKnownOS(p.OS) { - // picks a default architecture - p.Architecture = runtime.GOARCH - if p.Architecture == "arm" && cpuVariant() != "v7" { - p.Variant = cpuVariant() - } - - return p, nil - } - - p.Architecture, p.Variant = normalizeArch(parts[0], "") - if p.Architecture == "arm" && p.Variant == "v7" { - p.Variant = "" - } - if isKnownArch(p.Architecture) { - p.OS = runtime.GOOS - return p, nil - } - - return specs.Platform{}, fmt.Errorf("%q: unknown operating system or architecture: %w", specifier, errdefs.ErrInvalidArgument) - case 2: - // In this case, we treat as a regular os/arch pair. We don't care - // about whether or not we know of the platform. - p.OS = normalizeOS(parts[0]) - p.Architecture, p.Variant = normalizeArch(parts[1], "") - if p.Architecture == "arm" && p.Variant == "v7" { - p.Variant = "" - } - - return p, nil - case 3: - // we have a fully specified variant, this is rare - p.OS = normalizeOS(parts[0]) - p.Architecture, p.Variant = normalizeArch(parts[1], parts[2]) - if p.Architecture == "arm64" && p.Variant == "" { - p.Variant = "v8" - } - - return p, nil - } - - return specs.Platform{}, fmt.Errorf("%q: cannot parse platform specifier: %w", specifier, errdefs.ErrInvalidArgument) -} - -// MustParse is like Parses but panics if the specifier cannot be parsed. -// Simplifies initialization of global variables. -func MustParse(specifier string) specs.Platform { - p, err := Parse(specifier) - if err != nil { - panic("platform: Parse(" + strconv.Quote(specifier) + "): " + err.Error()) - } - return p -} - -// Format returns a string specifier from the provided platform specification. -func Format(platform specs.Platform) string { - if platform.OS == "" { - return "unknown" - } - - return path.Join(platform.OS, platform.Architecture, platform.Variant) -} - -// Normalize validates and translate the platform to the canonical value. -// -// For example, if "Aarch64" is encountered, we change it to "arm64" or if -// "x86_64" is encountered, it becomes "amd64". -func Normalize(platform specs.Platform) specs.Platform { - platform.OS = normalizeOS(platform.OS) - platform.Architecture, platform.Variant = normalizeArch(platform.Architecture, platform.Variant) - return platform -} diff --git a/src/vendor/github.com/containerd/containerd/reference/reference.go b/src/vendor/github.com/containerd/containerd/reference/reference.go deleted file mode 100644 index a4bf6da60..000000000 --- a/src/vendor/github.com/containerd/containerd/reference/reference.go +++ /dev/null @@ -1,166 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package reference - -import ( - "errors" - "fmt" - "net/url" - "path" - "regexp" - "strings" - - digest "github.com/opencontainers/go-digest" -) - -var ( - // ErrInvalid is returned when there is an invalid reference - ErrInvalid = errors.New("invalid reference") - // ErrObjectRequired is returned when the object is required - ErrObjectRequired = errors.New("object required") - // ErrHostnameRequired is returned when the hostname is required - ErrHostnameRequired = errors.New("hostname required") -) - -// Spec defines the main components of a reference specification. -// -// A reference specification is a schema-less URI parsed into common -// components. The two main components, locator and object, are required to be -// supported by remotes. It represents a superset of the naming define in -// docker's reference schema. It aims to be compatible but not prescriptive. -// -// While the interpretation of the components, locator and object, are up to -// the remote, we define a few common parts, accessible via helper methods. -// -// The first is the hostname, which is part of the locator. This doesn't need -// to map to a physical resource, but it must parse as a hostname. We refer to -// this as the namespace. -// -// The other component made accessible by helper method is the digest. This is -// part of the object identifier, always prefixed with an '@'. If present, the -// remote may use the digest portion directly or resolve it against a prefix. -// If the object does not include the `@` symbol, the return value for `Digest` -// will be empty. -type Spec struct { - // Locator is the host and path portion of the specification. The host - // portion may refer to an actual host or just a namespace of related - // images. - // - // Typically, the locator may used to resolve the remote to fetch specific - // resources. - Locator string - - // Object contains the identifier for the remote resource. Classically, - // this is a tag but can refer to anything in a remote. By convention, any - // portion that may be a partial or whole digest will be preceded by an - // `@`. Anything preceding the `@` will be referred to as the "tag". - // - // In practice, we will see this broken down into the following formats: - // - // 1. - // 2. @ - // 3. @ - // - // We define the tag to be anything except '@' and ':'. may - // be a full valid digest or shortened version, possibly with elided - // algorithm. - Object string -} - -var splitRe = regexp.MustCompile(`[:@]`) - -// Parse parses the string into a structured ref. -func Parse(s string) (Spec, error) { - if strings.Contains(s, "://") { - return Spec{}, ErrInvalid - } - - u, err := url.Parse("dummy://" + s) - if err != nil { - return Spec{}, err - } - - if u.Scheme != "dummy" { - return Spec{}, ErrInvalid - } - - if u.Host == "" { - return Spec{}, ErrHostnameRequired - } - - var object string - - if idx := splitRe.FindStringIndex(u.Path); idx != nil { - // This allows us to retain the @ to signify digests or shortened digests in - // the object. - object = u.Path[idx[0]:] - if object[:1] == ":" { - object = object[1:] - } - u.Path = u.Path[:idx[0]] - } - - return Spec{ - Locator: path.Join(u.Host, u.Path), - Object: object, - }, nil -} - -// Hostname returns the hostname portion of the locator. -// -// Remotes are not required to directly access the resources at this host. This -// method is provided for convenience. -func (r Spec) Hostname() string { - i := strings.Index(r.Locator, "/") - - if i < 0 { - return r.Locator - } - return r.Locator[:i] -} - -// Digest returns the digest portion of the reference spec. This may be a -// partial or invalid digest, which may be used to lookup a complete digest. -func (r Spec) Digest() digest.Digest { - _, dgst := SplitObject(r.Object) - return dgst -} - -// String returns the normalized string for the ref. -func (r Spec) String() string { - if r.Object == "" { - return r.Locator - } - if r.Object[:1] == "@" { - return fmt.Sprintf("%v%v", r.Locator, r.Object) - } - - return fmt.Sprintf("%v:%v", r.Locator, r.Object) -} - -// SplitObject provides two parts of the object spec, delimited by an `@` -// symbol. -// -// Either may be empty and it is the callers job to validate them -// appropriately. -func SplitObject(obj string) (tag string, dgst digest.Digest) { - parts := strings.SplitAfterN(obj, "@", 2) - if len(parts) < 2 { - return parts[0], "" - } - return parts[0], digest.Digest(parts[1]) -} diff --git a/src/vendor/github.com/containerd/containerd/remotes/docker/auth/fetch.go b/src/vendor/github.com/containerd/containerd/remotes/docker/auth/fetch.go deleted file mode 100644 index c259873d2..000000000 --- a/src/vendor/github.com/containerd/containerd/remotes/docker/auth/fetch.go +++ /dev/null @@ -1,226 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package auth - -import ( - "context" - "encoding/json" - "errors" - "fmt" - "net/http" - "net/url" - "strings" - "time" - - "github.com/containerd/containerd/log" - remoteserrors "github.com/containerd/containerd/remotes/errors" - "github.com/containerd/containerd/version" - "golang.org/x/net/context/ctxhttp" -) - -var ( - // ErrNoToken is returned if a request is successful but the body does not - // contain an authorization token. - ErrNoToken = errors.New("authorization server did not include a token in the response") -) - -// GenerateTokenOptions generates options for fetching a token based on a challenge -func GenerateTokenOptions(ctx context.Context, host, username, secret string, c Challenge) (TokenOptions, error) { - realm, ok := c.Parameters["realm"] - if !ok { - return TokenOptions{}, errors.New("no realm specified for token auth challenge") - } - - realmURL, err := url.Parse(realm) - if err != nil { - return TokenOptions{}, fmt.Errorf("invalid token auth challenge realm: %w", err) - } - - to := TokenOptions{ - Realm: realmURL.String(), - Service: c.Parameters["service"], - Username: username, - Secret: secret, - } - - scope, ok := c.Parameters["scope"] - if ok { - to.Scopes = append(to.Scopes, strings.Split(scope, " ")...) - } else { - log.G(ctx).WithField("host", host).Debug("no scope specified for token auth challenge") - } - - return to, nil -} - -// TokenOptions are options for requesting a token -type TokenOptions struct { - Realm string - Service string - Scopes []string - Username string - Secret string - - // FetchRefreshToken enables fetching a refresh token (aka "identity token", "offline token") along with the bearer token. - // - // For HTTP GET mode (FetchToken), FetchRefreshToken sets `offline_token=true` in the request. - // https://docs.docker.com/registry/spec/auth/token/#requesting-a-token - // - // For HTTP POST mode (FetchTokenWithOAuth), FetchRefreshToken sets `access_type=offline` in the request. - // https://docs.docker.com/registry/spec/auth/oauth/#getting-a-token - FetchRefreshToken bool -} - -// OAuthTokenResponse is response from fetching token with a OAuth POST request -type OAuthTokenResponse struct { - AccessToken string `json:"access_token"` - RefreshToken string `json:"refresh_token"` - ExpiresIn int `json:"expires_in"` - IssuedAt time.Time `json:"issued_at"` - Scope string `json:"scope"` -} - -// FetchTokenWithOAuth fetches a token using a POST request -func FetchTokenWithOAuth(ctx context.Context, client *http.Client, headers http.Header, clientID string, to TokenOptions) (*OAuthTokenResponse, error) { - form := url.Values{} - if len(to.Scopes) > 0 { - form.Set("scope", strings.Join(to.Scopes, " ")) - } - form.Set("service", to.Service) - form.Set("client_id", clientID) - - if to.Username == "" { - form.Set("grant_type", "refresh_token") - form.Set("refresh_token", to.Secret) - } else { - form.Set("grant_type", "password") - form.Set("username", to.Username) - form.Set("password", to.Secret) - } - if to.FetchRefreshToken { - form.Set("access_type", "offline") - } - - req, err := http.NewRequest("POST", to.Realm, strings.NewReader(form.Encode())) - if err != nil { - return nil, err - } - req.Header.Set("Content-Type", "application/x-www-form-urlencoded; charset=utf-8") - for k, v := range headers { - req.Header[k] = append(req.Header[k], v...) - } - if len(req.Header.Get("User-Agent")) == 0 { - req.Header.Set("User-Agent", "containerd/"+version.Version) - } - - resp, err := ctxhttp.Do(ctx, client, req) - if err != nil { - return nil, err - } - defer resp.Body.Close() - - if resp.StatusCode < 200 || resp.StatusCode >= 400 { - return nil, remoteserrors.NewUnexpectedStatusErr(resp) - } - - decoder := json.NewDecoder(resp.Body) - - var tr OAuthTokenResponse - if err = decoder.Decode(&tr); err != nil { - return nil, fmt.Errorf("unable to decode token response: %w", err) - } - - if tr.AccessToken == "" { - return nil, ErrNoToken - } - - return &tr, nil -} - -// FetchTokenResponse is response from fetching token with GET request -type FetchTokenResponse struct { - Token string `json:"token"` - AccessToken string `json:"access_token"` - ExpiresIn int `json:"expires_in"` - IssuedAt time.Time `json:"issued_at"` - RefreshToken string `json:"refresh_token"` -} - -// FetchToken fetches a token using a GET request -func FetchToken(ctx context.Context, client *http.Client, headers http.Header, to TokenOptions) (*FetchTokenResponse, error) { - req, err := http.NewRequest("GET", to.Realm, nil) - if err != nil { - return nil, err - } - - for k, v := range headers { - req.Header[k] = append(req.Header[k], v...) - } - if len(req.Header.Get("User-Agent")) == 0 { - req.Header.Set("User-Agent", "containerd/"+version.Version) - } - - reqParams := req.URL.Query() - - if to.Service != "" { - reqParams.Add("service", to.Service) - } - - for _, scope := range to.Scopes { - reqParams.Add("scope", scope) - } - - if to.Secret != "" { - req.SetBasicAuth(to.Username, to.Secret) - } - - if to.FetchRefreshToken { - reqParams.Add("offline_token", "true") - } - - req.URL.RawQuery = reqParams.Encode() - - resp, err := ctxhttp.Do(ctx, client, req) - if err != nil { - return nil, err - } - defer resp.Body.Close() - - if resp.StatusCode < 200 || resp.StatusCode >= 400 { - return nil, remoteserrors.NewUnexpectedStatusErr(resp) - } - - decoder := json.NewDecoder(resp.Body) - - var tr FetchTokenResponse - if err = decoder.Decode(&tr); err != nil { - return nil, fmt.Errorf("unable to decode token response: %w", err) - } - - // `access_token` is equivalent to `token` and if both are specified - // the choice is undefined. Canonicalize `access_token` by sticking - // things in `token`. - if tr.AccessToken != "" { - tr.Token = tr.AccessToken - } - - if tr.Token == "" { - return nil, ErrNoToken - } - - return &tr, nil -} diff --git a/src/vendor/github.com/containerd/containerd/remotes/docker/auth/parse.go b/src/vendor/github.com/containerd/containerd/remotes/docker/auth/parse.go deleted file mode 100644 index 223fa2d05..000000000 --- a/src/vendor/github.com/containerd/containerd/remotes/docker/auth/parse.go +++ /dev/null @@ -1,203 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package auth - -import ( - "net/http" - "sort" - "strings" -) - -// AuthenticationScheme defines scheme of the authentication method -type AuthenticationScheme byte - -const ( - // BasicAuth is scheme for Basic HTTP Authentication RFC 7617 - BasicAuth AuthenticationScheme = 1 << iota - // DigestAuth is scheme for HTTP Digest Access Authentication RFC 7616 - DigestAuth - // BearerAuth is scheme for OAuth 2.0 Bearer Tokens RFC 6750 - BearerAuth -) - -// Challenge carries information from a WWW-Authenticate response header. -// See RFC 2617. -type Challenge struct { - // scheme is the auth-scheme according to RFC 2617 - Scheme AuthenticationScheme - - // parameters are the auth-params according to RFC 2617 - Parameters map[string]string -} - -type byScheme []Challenge - -func (bs byScheme) Len() int { return len(bs) } -func (bs byScheme) Swap(i, j int) { bs[i], bs[j] = bs[j], bs[i] } - -// Sort in priority order: token > digest > basic -func (bs byScheme) Less(i, j int) bool { return bs[i].Scheme > bs[j].Scheme } - -// Octet types from RFC 2616. -type octetType byte - -var octetTypes [256]octetType - -const ( - isToken octetType = 1 << iota - isSpace -) - -func init() { - // OCTET = - // CHAR = - // CTL = - // CR = - // LF = - // SP = - // HT = - // <"> = - // CRLF = CR LF - // LWS = [CRLF] 1*( SP | HT ) - // TEXT = - // separators = "(" | ")" | "<" | ">" | "@" | "," | ";" | ":" | "\" | <"> - // | "/" | "[" | "]" | "?" | "=" | "{" | "}" | SP | HT - // token = 1* - // qdtext = > - - for c := 0; c < 256; c++ { - var t octetType - isCtl := c <= 31 || c == 127 - isChar := 0 <= c && c <= 127 - isSeparator := strings.ContainsRune(" \t\"(),/:;<=>?@[]\\{}", rune(c)) - if strings.ContainsRune(" \t\r\n", rune(c)) { - t |= isSpace - } - if isChar && !isCtl && !isSeparator { - t |= isToken - } - octetTypes[c] = t - } -} - -// ParseAuthHeader parses challenges from WWW-Authenticate header -func ParseAuthHeader(header http.Header) []Challenge { - challenges := []Challenge{} - for _, h := range header[http.CanonicalHeaderKey("WWW-Authenticate")] { - v, p := parseValueAndParams(h) - var s AuthenticationScheme - switch v { - case "basic": - s = BasicAuth - case "digest": - s = DigestAuth - case "bearer": - s = BearerAuth - default: - continue - } - challenges = append(challenges, Challenge{Scheme: s, Parameters: p}) - } - sort.Stable(byScheme(challenges)) - return challenges -} - -func parseValueAndParams(header string) (value string, params map[string]string) { - params = make(map[string]string) - value, s := expectToken(header) - if value == "" { - return - } - value = strings.ToLower(value) - for { - var pkey string - pkey, s = expectToken(skipSpace(s)) - if pkey == "" { - return - } - if !strings.HasPrefix(s, "=") { - return - } - var pvalue string - pvalue, s = expectTokenOrQuoted(s[1:]) - if pvalue == "" { - return - } - pkey = strings.ToLower(pkey) - params[pkey] = pvalue - s = skipSpace(s) - if !strings.HasPrefix(s, ",") { - return - } - s = s[1:] - } -} - -func skipSpace(s string) (rest string) { - i := 0 - for ; i < len(s); i++ { - if octetTypes[s[i]]&isSpace == 0 { - break - } - } - return s[i:] -} - -func expectToken(s string) (token, rest string) { - i := 0 - for ; i < len(s); i++ { - if octetTypes[s[i]]&isToken == 0 { - break - } - } - return s[:i], s[i:] -} - -func expectTokenOrQuoted(s string) (value string, rest string) { - if !strings.HasPrefix(s, "\"") { - return expectToken(s) - } - s = s[1:] - for i := 0; i < len(s); i++ { - switch s[i] { - case '"': - return s[:i], s[i+1:] - case '\\': - p := make([]byte, len(s)-1) - j := copy(p, s[:i]) - escape := true - for i = i + 1; i < len(s); i++ { - b := s[i] - switch { - case escape: - escape = false - p[j] = b - j++ - case b == '\\': - escape = true - case b == '"': - return string(p[:j]), s[i+1:] - default: - p[j] = b - j++ - } - } - return "", "" - } - } - return "", "" -} diff --git a/src/vendor/github.com/containerd/containerd/remotes/docker/authorizer.go b/src/vendor/github.com/containerd/containerd/remotes/docker/authorizer.go deleted file mode 100644 index eaa0e5dbd..000000000 --- a/src/vendor/github.com/containerd/containerd/remotes/docker/authorizer.go +++ /dev/null @@ -1,361 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package docker - -import ( - "context" - "encoding/base64" - "errors" - "fmt" - "net/http" - "strings" - "sync" - - "github.com/containerd/containerd/errdefs" - "github.com/containerd/containerd/log" - "github.com/containerd/containerd/remotes/docker/auth" - remoteerrors "github.com/containerd/containerd/remotes/errors" - "github.com/sirupsen/logrus" -) - -type dockerAuthorizer struct { - credentials func(string) (string, string, error) - - client *http.Client - header http.Header - mu sync.RWMutex - - // indexed by host name - handlers map[string]*authHandler - - onFetchRefreshToken OnFetchRefreshToken -} - -// NewAuthorizer creates a Docker authorizer using the provided function to -// get credentials for the token server or basic auth. -// Deprecated: Use NewDockerAuthorizer -func NewAuthorizer(client *http.Client, f func(string) (string, string, error)) Authorizer { - return NewDockerAuthorizer(WithAuthClient(client), WithAuthCreds(f)) -} - -type authorizerConfig struct { - credentials func(string) (string, string, error) - client *http.Client - header http.Header - onFetchRefreshToken OnFetchRefreshToken -} - -// AuthorizerOpt configures an authorizer -type AuthorizerOpt func(*authorizerConfig) - -// WithAuthClient provides the HTTP client for the authorizer -func WithAuthClient(client *http.Client) AuthorizerOpt { - return func(opt *authorizerConfig) { - opt.client = client - } -} - -// WithAuthCreds provides a credential function to the authorizer -func WithAuthCreds(creds func(string) (string, string, error)) AuthorizerOpt { - return func(opt *authorizerConfig) { - opt.credentials = creds - } -} - -// WithAuthHeader provides HTTP headers for authorization -func WithAuthHeader(hdr http.Header) AuthorizerOpt { - return func(opt *authorizerConfig) { - opt.header = hdr - } -} - -// OnFetchRefreshToken is called on fetching request token. -type OnFetchRefreshToken func(ctx context.Context, refreshToken string, req *http.Request) - -// WithFetchRefreshToken enables fetching "refresh token" (aka "identity token", "offline token"). -func WithFetchRefreshToken(f OnFetchRefreshToken) AuthorizerOpt { - return func(opt *authorizerConfig) { - opt.onFetchRefreshToken = f - } -} - -// NewDockerAuthorizer creates an authorizer using Docker's registry -// authentication spec. -// See https://docs.docker.com/registry/spec/auth/ -func NewDockerAuthorizer(opts ...AuthorizerOpt) Authorizer { - var ao authorizerConfig - for _, opt := range opts { - opt(&ao) - } - - if ao.client == nil { - ao.client = http.DefaultClient - } - - return &dockerAuthorizer{ - credentials: ao.credentials, - client: ao.client, - header: ao.header, - handlers: make(map[string]*authHandler), - onFetchRefreshToken: ao.onFetchRefreshToken, - } -} - -// Authorize handles auth request. -func (a *dockerAuthorizer) Authorize(ctx context.Context, req *http.Request) error { - // skip if there is no auth handler - ah := a.getAuthHandler(req.URL.Host) - if ah == nil { - return nil - } - - auth, refreshToken, err := ah.authorize(ctx) - if err != nil { - return err - } - - req.Header.Set("Authorization", auth) - - if refreshToken != "" { - a.mu.RLock() - onFetchRefreshToken := a.onFetchRefreshToken - a.mu.RUnlock() - if onFetchRefreshToken != nil { - onFetchRefreshToken(ctx, refreshToken, req) - } - } - return nil -} - -func (a *dockerAuthorizer) getAuthHandler(host string) *authHandler { - a.mu.Lock() - defer a.mu.Unlock() - - return a.handlers[host] -} - -func (a *dockerAuthorizer) AddResponses(ctx context.Context, responses []*http.Response) error { - last := responses[len(responses)-1] - host := last.Request.URL.Host - - a.mu.Lock() - defer a.mu.Unlock() - for _, c := range auth.ParseAuthHeader(last.Header) { - if c.Scheme == auth.BearerAuth { - if err := invalidAuthorization(c, responses); err != nil { - delete(a.handlers, host) - return err - } - - // reuse existing handler - // - // assume that one registry will return the common - // challenge information, including realm and service. - // and the resource scope is only different part - // which can be provided by each request. - if _, ok := a.handlers[host]; ok { - return nil - } - - var username, secret string - if a.credentials != nil { - var err error - username, secret, err = a.credentials(host) - if err != nil { - return err - } - } - - common, err := auth.GenerateTokenOptions(ctx, host, username, secret, c) - if err != nil { - return err - } - common.FetchRefreshToken = a.onFetchRefreshToken != nil - - a.handlers[host] = newAuthHandler(a.client, a.header, c.Scheme, common) - return nil - } else if c.Scheme == auth.BasicAuth && a.credentials != nil { - username, secret, err := a.credentials(host) - if err != nil { - return err - } - - if username != "" && secret != "" { - common := auth.TokenOptions{ - Username: username, - Secret: secret, - } - - a.handlers[host] = newAuthHandler(a.client, a.header, c.Scheme, common) - return nil - } - } - } - return fmt.Errorf("failed to find supported auth scheme: %w", errdefs.ErrNotImplemented) -} - -// authResult is used to control limit rate. -type authResult struct { - sync.WaitGroup - token string - refreshToken string - err error -} - -// authHandler is used to handle auth request per registry server. -type authHandler struct { - sync.Mutex - - header http.Header - - client *http.Client - - // only support basic and bearer schemes - scheme auth.AuthenticationScheme - - // common contains common challenge answer - common auth.TokenOptions - - // scopedTokens caches token indexed by scopes, which used in - // bearer auth case - scopedTokens map[string]*authResult -} - -func newAuthHandler(client *http.Client, hdr http.Header, scheme auth.AuthenticationScheme, opts auth.TokenOptions) *authHandler { - return &authHandler{ - header: hdr, - client: client, - scheme: scheme, - common: opts, - scopedTokens: map[string]*authResult{}, - } -} - -func (ah *authHandler) authorize(ctx context.Context) (string, string, error) { - switch ah.scheme { - case auth.BasicAuth: - return ah.doBasicAuth(ctx) - case auth.BearerAuth: - return ah.doBearerAuth(ctx) - default: - return "", "", fmt.Errorf("failed to find supported auth scheme: %s: %w", string(ah.scheme), errdefs.ErrNotImplemented) - } -} - -func (ah *authHandler) doBasicAuth(ctx context.Context) (string, string, error) { - username, secret := ah.common.Username, ah.common.Secret - - if username == "" || secret == "" { - return "", "", fmt.Errorf("failed to handle basic auth because missing username or secret") - } - - auth := base64.StdEncoding.EncodeToString([]byte(username + ":" + secret)) - return fmt.Sprintf("Basic %s", auth), "", nil -} - -func (ah *authHandler) doBearerAuth(ctx context.Context) (token, refreshToken string, err error) { - // copy common tokenOptions - to := ah.common - - to.Scopes = GetTokenScopes(ctx, to.Scopes) - - // Docs: https://docs.docker.com/registry/spec/auth/scope - scoped := strings.Join(to.Scopes, " ") - - ah.Lock() - if r, exist := ah.scopedTokens[scoped]; exist { - ah.Unlock() - r.Wait() - return r.token, r.refreshToken, r.err - } - - // only one fetch token job - r := new(authResult) - r.Add(1) - ah.scopedTokens[scoped] = r - ah.Unlock() - - defer func() { - token = fmt.Sprintf("Bearer %s", token) - r.token, r.refreshToken, r.err = token, refreshToken, err - r.Done() - }() - - // fetch token for the resource scope - if to.Secret != "" { - defer func() { - if err != nil { - err = fmt.Errorf("failed to fetch oauth token: %w", err) - } - }() - // credential information is provided, use oauth POST endpoint - // TODO: Allow setting client_id - resp, err := auth.FetchTokenWithOAuth(ctx, ah.client, ah.header, "containerd-client", to) - if err != nil { - var errStatus remoteerrors.ErrUnexpectedStatus - if errors.As(err, &errStatus) { - // Registries without support for POST may return 404 for POST /v2/token. - // As of September 2017, GCR is known to return 404. - // As of February 2018, JFrog Artifactory is known to return 401. - // As of January 2022, ACR is known to return 400. - if (errStatus.StatusCode == 405 && to.Username != "") || errStatus.StatusCode == 404 || errStatus.StatusCode == 401 || errStatus.StatusCode == 400 { - resp, err := auth.FetchToken(ctx, ah.client, ah.header, to) - if err != nil { - return "", "", err - } - return resp.Token, resp.RefreshToken, nil - } - log.G(ctx).WithFields(logrus.Fields{ - "status": errStatus.Status, - "body": string(errStatus.Body), - }).Debugf("token request failed") - } - return "", "", err - } - return resp.AccessToken, resp.RefreshToken, nil - } - // do request anonymously - resp, err := auth.FetchToken(ctx, ah.client, ah.header, to) - if err != nil { - return "", "", fmt.Errorf("failed to fetch anonymous token: %w", err) - } - return resp.Token, resp.RefreshToken, nil -} - -func invalidAuthorization(c auth.Challenge, responses []*http.Response) error { - errStr := c.Parameters["error"] - if errStr == "" { - return nil - } - - n := len(responses) - if n == 1 || (n > 1 && !sameRequest(responses[n-2].Request, responses[n-1].Request)) { - return nil - } - - return fmt.Errorf("server message: %s: %w", errStr, ErrInvalidAuthorization) -} - -func sameRequest(r1, r2 *http.Request) bool { - if r1.Method != r2.Method { - return false - } - if *r1.URL != *r2.URL { - return false - } - return true -} diff --git a/src/vendor/github.com/containerd/containerd/remotes/docker/converter.go b/src/vendor/github.com/containerd/containerd/remotes/docker/converter.go deleted file mode 100644 index d7dca0d36..000000000 --- a/src/vendor/github.com/containerd/containerd/remotes/docker/converter.go +++ /dev/null @@ -1,87 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package docker - -import ( - "bytes" - "context" - "encoding/json" - "fmt" - - "github.com/containerd/containerd/content" - "github.com/containerd/containerd/images" - "github.com/containerd/containerd/log" - "github.com/containerd/containerd/remotes" - digest "github.com/opencontainers/go-digest" - ocispec "github.com/opencontainers/image-spec/specs-go/v1" -) - -// LegacyConfigMediaType should be replaced by OCI image spec. -// -// More detail: docker/distribution#1622 -const LegacyConfigMediaType = "application/octet-stream" - -// ConvertManifest changes application/octet-stream to schema2 config media type if need. -// -// NOTE: -// 1. original manifest will be deleted by next gc round. -// 2. don't cover manifest list. -func ConvertManifest(ctx context.Context, store content.Store, desc ocispec.Descriptor) (ocispec.Descriptor, error) { - if !(desc.MediaType == images.MediaTypeDockerSchema2Manifest || - desc.MediaType == ocispec.MediaTypeImageManifest) { - - log.G(ctx).Warnf("do nothing for media type: %s", desc.MediaType) - return desc, nil - } - - // read manifest data - mb, err := content.ReadBlob(ctx, store, desc) - if err != nil { - return ocispec.Descriptor{}, fmt.Errorf("failed to read index data: %w", err) - } - - var manifest ocispec.Manifest - if err := json.Unmarshal(mb, &manifest); err != nil { - return ocispec.Descriptor{}, fmt.Errorf("failed to unmarshal data into manifest: %w", err) - } - - // check config media type - if manifest.Config.MediaType != LegacyConfigMediaType { - return desc, nil - } - - manifest.Config.MediaType = images.MediaTypeDockerSchema2Config - data, err := json.MarshalIndent(manifest, "", " ") - if err != nil { - return ocispec.Descriptor{}, fmt.Errorf("failed to marshal manifest: %w", err) - } - - // update manifest with gc labels - desc.Digest = digest.Canonical.FromBytes(data) - desc.Size = int64(len(data)) - - labels := map[string]string{} - for i, c := range append([]ocispec.Descriptor{manifest.Config}, manifest.Layers...) { - labels[fmt.Sprintf("containerd.io/gc.ref.content.%d", i)] = c.Digest.String() - } - - ref := remotes.MakeRefKey(ctx, desc) - if err := content.WriteBlob(ctx, store, ref, bytes.NewReader(data), desc, content.WithLabels(labels)); err != nil { - return ocispec.Descriptor{}, fmt.Errorf("failed to update content: %w", err) - } - return desc, nil -} diff --git a/src/vendor/github.com/containerd/containerd/remotes/docker/errcode.go b/src/vendor/github.com/containerd/containerd/remotes/docker/errcode.go deleted file mode 100644 index 8c623bcbe..000000000 --- a/src/vendor/github.com/containerd/containerd/remotes/docker/errcode.go +++ /dev/null @@ -1,283 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package docker - -import ( - "encoding/json" - "fmt" - "strings" -) - -// ErrorCoder is the base interface for ErrorCode and Error allowing -// users of each to just call ErrorCode to get the real ID of each -type ErrorCoder interface { - ErrorCode() ErrorCode -} - -// ErrorCode represents the error type. The errors are serialized via strings -// and the integer format may change and should *never* be exported. -type ErrorCode int - -var _ error = ErrorCode(0) - -// ErrorCode just returns itself -func (ec ErrorCode) ErrorCode() ErrorCode { - return ec -} - -// Error returns the ID/Value -func (ec ErrorCode) Error() string { - // NOTE(stevvooe): Cannot use message here since it may have unpopulated args. - return strings.ToLower(strings.Replace(ec.String(), "_", " ", -1)) -} - -// Descriptor returns the descriptor for the error code. -func (ec ErrorCode) Descriptor() ErrorDescriptor { - d, ok := errorCodeToDescriptors[ec] - - if !ok { - return ErrorCodeUnknown.Descriptor() - } - - return d -} - -// String returns the canonical identifier for this error code. -func (ec ErrorCode) String() string { - return ec.Descriptor().Value -} - -// Message returned the human-readable error message for this error code. -func (ec ErrorCode) Message() string { - return ec.Descriptor().Message -} - -// MarshalText encodes the receiver into UTF-8-encoded text and returns the -// result. -func (ec ErrorCode) MarshalText() (text []byte, err error) { - return []byte(ec.String()), nil -} - -// UnmarshalText decodes the form generated by MarshalText. -func (ec *ErrorCode) UnmarshalText(text []byte) error { - desc, ok := idToDescriptors[string(text)] - - if !ok { - desc = ErrorCodeUnknown.Descriptor() - } - - *ec = desc.Code - - return nil -} - -// WithMessage creates a new Error struct based on the passed-in info and -// overrides the Message property. -func (ec ErrorCode) WithMessage(message string) Error { - return Error{ - Code: ec, - Message: message, - } -} - -// WithDetail creates a new Error struct based on the passed-in info and -// set the Detail property appropriately -func (ec ErrorCode) WithDetail(detail interface{}) Error { - return Error{ - Code: ec, - Message: ec.Message(), - }.WithDetail(detail) -} - -// WithArgs creates a new Error struct and sets the Args slice -func (ec ErrorCode) WithArgs(args ...interface{}) Error { - return Error{ - Code: ec, - Message: ec.Message(), - }.WithArgs(args...) -} - -// Error provides a wrapper around ErrorCode with extra Details provided. -type Error struct { - Code ErrorCode `json:"code"` - Message string `json:"message"` - Detail interface{} `json:"detail,omitempty"` - - // TODO(duglin): See if we need an "args" property so we can do the - // variable substitution right before showing the message to the user -} - -var _ error = Error{} - -// ErrorCode returns the ID/Value of this Error -func (e Error) ErrorCode() ErrorCode { - return e.Code -} - -// Error returns a human readable representation of the error. -func (e Error) Error() string { - return fmt.Sprintf("%s: %s", e.Code.Error(), e.Message) -} - -// WithDetail will return a new Error, based on the current one, but with -// some Detail info added -func (e Error) WithDetail(detail interface{}) Error { - return Error{ - Code: e.Code, - Message: e.Message, - Detail: detail, - } -} - -// WithArgs uses the passed-in list of interface{} as the substitution -// variables in the Error's Message string, but returns a new Error -func (e Error) WithArgs(args ...interface{}) Error { - return Error{ - Code: e.Code, - Message: fmt.Sprintf(e.Code.Message(), args...), - Detail: e.Detail, - } -} - -// ErrorDescriptor provides relevant information about a given error code. -type ErrorDescriptor struct { - // Code is the error code that this descriptor describes. - Code ErrorCode - - // Value provides a unique, string key, often captilized with - // underscores, to identify the error code. This value is used as the - // keyed value when serializing api errors. - Value string - - // Message is a short, human readable description of the error condition - // included in API responses. - Message string - - // Description provides a complete account of the errors purpose, suitable - // for use in documentation. - Description string - - // HTTPStatusCode provides the http status code that is associated with - // this error condition. - HTTPStatusCode int -} - -// ParseErrorCode returns the value by the string error code. -// `ErrorCodeUnknown` will be returned if the error is not known. -func ParseErrorCode(value string) ErrorCode { - ed, ok := idToDescriptors[value] - if ok { - return ed.Code - } - - return ErrorCodeUnknown -} - -// Errors provides the envelope for multiple errors and a few sugar methods -// for use within the application. -type Errors []error - -var _ error = Errors{} - -func (errs Errors) Error() string { - switch len(errs) { - case 0: - return "" - case 1: - return errs[0].Error() - default: - msg := "errors:\n" - for _, err := range errs { - msg += err.Error() + "\n" - } - return msg - } -} - -// Len returns the current number of errors. -func (errs Errors) Len() int { - return len(errs) -} - -// MarshalJSON converts slice of error, ErrorCode or Error into a -// slice of Error - then serializes -func (errs Errors) MarshalJSON() ([]byte, error) { - var tmpErrs struct { - Errors []Error `json:"errors,omitempty"` - } - - for _, daErr := range errs { - var err Error - - switch daErr := daErr.(type) { - case ErrorCode: - err = daErr.WithDetail(nil) - case Error: - err = daErr - default: - err = ErrorCodeUnknown.WithDetail(daErr) - - } - - // If the Error struct was setup and they forgot to set the - // Message field (meaning its "") then grab it from the ErrCode - msg := err.Message - if msg == "" { - msg = err.Code.Message() - } - - tmpErrs.Errors = append(tmpErrs.Errors, Error{ - Code: err.Code, - Message: msg, - Detail: err.Detail, - }) - } - - return json.Marshal(tmpErrs) -} - -// UnmarshalJSON deserializes []Error and then converts it into slice of -// Error or ErrorCode -func (errs *Errors) UnmarshalJSON(data []byte) error { - var tmpErrs struct { - Errors []Error - } - - if err := json.Unmarshal(data, &tmpErrs); err != nil { - return err - } - - var newErrs Errors - for _, daErr := range tmpErrs.Errors { - // If Message is empty or exactly matches the Code's message string - // then just use the Code, no need for a full Error struct - if daErr.Detail == nil && (daErr.Message == "" || daErr.Message == daErr.Code.Message()) { - // Error's w/o details get converted to ErrorCode - newErrs = append(newErrs, daErr.Code) - } else { - // Error's w/ details are untouched - newErrs = append(newErrs, Error{ - Code: daErr.Code, - Message: daErr.Message, - Detail: daErr.Detail, - }) - } - } - - *errs = newErrs - return nil -} diff --git a/src/vendor/github.com/containerd/containerd/remotes/docker/errdesc.go b/src/vendor/github.com/containerd/containerd/remotes/docker/errdesc.go deleted file mode 100644 index b2bd4d82b..000000000 --- a/src/vendor/github.com/containerd/containerd/remotes/docker/errdesc.go +++ /dev/null @@ -1,154 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package docker - -import ( - "fmt" - "net/http" - "sort" - "sync" -) - -var ( - errorCodeToDescriptors = map[ErrorCode]ErrorDescriptor{} - idToDescriptors = map[string]ErrorDescriptor{} - groupToDescriptors = map[string][]ErrorDescriptor{} -) - -var ( - // ErrorCodeUnknown is a generic error that can be used as a last - // resort if there is no situation-specific error message that can be used - ErrorCodeUnknown = Register("errcode", ErrorDescriptor{ - Value: "UNKNOWN", - Message: "unknown error", - Description: `Generic error returned when the error does not have an - API classification.`, - HTTPStatusCode: http.StatusInternalServerError, - }) - - // ErrorCodeUnsupported is returned when an operation is not supported. - ErrorCodeUnsupported = Register("errcode", ErrorDescriptor{ - Value: "UNSUPPORTED", - Message: "The operation is unsupported.", - Description: `The operation was unsupported due to a missing - implementation or invalid set of parameters.`, - HTTPStatusCode: http.StatusMethodNotAllowed, - }) - - // ErrorCodeUnauthorized is returned if a request requires - // authentication. - ErrorCodeUnauthorized = Register("errcode", ErrorDescriptor{ - Value: "UNAUTHORIZED", - Message: "authentication required", - Description: `The access controller was unable to authenticate - the client. Often this will be accompanied by a - Www-Authenticate HTTP response header indicating how to - authenticate.`, - HTTPStatusCode: http.StatusUnauthorized, - }) - - // ErrorCodeDenied is returned if a client does not have sufficient - // permission to perform an action. - ErrorCodeDenied = Register("errcode", ErrorDescriptor{ - Value: "DENIED", - Message: "requested access to the resource is denied", - Description: `The access controller denied access for the - operation on a resource.`, - HTTPStatusCode: http.StatusForbidden, - }) - - // ErrorCodeUnavailable provides a common error to report unavailability - // of a service or endpoint. - ErrorCodeUnavailable = Register("errcode", ErrorDescriptor{ - Value: "UNAVAILABLE", - Message: "service unavailable", - Description: "Returned when a service is not available", - HTTPStatusCode: http.StatusServiceUnavailable, - }) - - // ErrorCodeTooManyRequests is returned if a client attempts too many - // times to contact a service endpoint. - ErrorCodeTooManyRequests = Register("errcode", ErrorDescriptor{ - Value: "TOOMANYREQUESTS", - Message: "too many requests", - Description: `Returned when a client attempts to contact a - service too many times`, - HTTPStatusCode: http.StatusTooManyRequests, - }) -) - -var nextCode = 1000 -var registerLock sync.Mutex - -// Register will make the passed-in error known to the environment and -// return a new ErrorCode -func Register(group string, descriptor ErrorDescriptor) ErrorCode { - registerLock.Lock() - defer registerLock.Unlock() - - descriptor.Code = ErrorCode(nextCode) - - if _, ok := idToDescriptors[descriptor.Value]; ok { - panic(fmt.Sprintf("ErrorValue %q is already registered", descriptor.Value)) - } - if _, ok := errorCodeToDescriptors[descriptor.Code]; ok { - panic(fmt.Sprintf("ErrorCode %v is already registered", descriptor.Code)) - } - - groupToDescriptors[group] = append(groupToDescriptors[group], descriptor) - errorCodeToDescriptors[descriptor.Code] = descriptor - idToDescriptors[descriptor.Value] = descriptor - - nextCode++ - return descriptor.Code -} - -type byValue []ErrorDescriptor - -func (a byValue) Len() int { return len(a) } -func (a byValue) Swap(i, j int) { a[i], a[j] = a[j], a[i] } -func (a byValue) Less(i, j int) bool { return a[i].Value < a[j].Value } - -// GetGroupNames returns the list of Error group names that are registered -func GetGroupNames() []string { - keys := []string{} - - for k := range groupToDescriptors { - keys = append(keys, k) - } - sort.Strings(keys) - return keys -} - -// GetErrorCodeGroup returns the named group of error descriptors -func GetErrorCodeGroup(name string) []ErrorDescriptor { - desc := groupToDescriptors[name] - sort.Sort(byValue(desc)) - return desc -} - -// GetErrorAllDescriptors returns a slice of all ErrorDescriptors that are -// registered, irrespective of what group they're in -func GetErrorAllDescriptors() []ErrorDescriptor { - result := []ErrorDescriptor{} - - for _, group := range GetGroupNames() { - result = append(result, GetErrorCodeGroup(group)...) - } - sort.Sort(byValue(result)) - return result -} diff --git a/src/vendor/github.com/containerd/containerd/remotes/docker/fetcher.go b/src/vendor/github.com/containerd/containerd/remotes/docker/fetcher.go deleted file mode 100644 index 11a75356e..000000000 --- a/src/vendor/github.com/containerd/containerd/remotes/docker/fetcher.go +++ /dev/null @@ -1,215 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package docker - -import ( - "context" - "encoding/json" - "errors" - "fmt" - "io" - "net/http" - "net/url" - "strings" - - "github.com/containerd/containerd/errdefs" - "github.com/containerd/containerd/images" - "github.com/containerd/containerd/log" - ocispec "github.com/opencontainers/image-spec/specs-go/v1" -) - -type dockerFetcher struct { - *dockerBase -} - -func (r dockerFetcher) Fetch(ctx context.Context, desc ocispec.Descriptor) (io.ReadCloser, error) { - ctx = log.WithLogger(ctx, log.G(ctx).WithField("digest", desc.Digest)) - - hosts := r.filterHosts(HostCapabilityPull) - if len(hosts) == 0 { - return nil, fmt.Errorf("no pull hosts: %w", errdefs.ErrNotFound) - } - - ctx, err := ContextWithRepositoryScope(ctx, r.refspec, false) - if err != nil { - return nil, err - } - - return newHTTPReadSeeker(desc.Size, func(offset int64) (io.ReadCloser, error) { - // firstly try fetch via external urls - for _, us := range desc.URLs { - ctx = log.WithLogger(ctx, log.G(ctx).WithField("url", us)) - - u, err := url.Parse(us) - if err != nil { - log.G(ctx).WithError(err).Debug("failed to parse") - continue - } - if u.Scheme != "http" && u.Scheme != "https" { - log.G(ctx).Debug("non-http(s) alternative url is unsupported") - continue - } - log.G(ctx).Debug("trying alternative url") - - // Try this first, parse it - host := RegistryHost{ - Client: http.DefaultClient, - Host: u.Host, - Scheme: u.Scheme, - Path: u.Path, - Capabilities: HostCapabilityPull, - } - req := r.request(host, http.MethodGet) - // Strip namespace from base - req.path = u.Path - if u.RawQuery != "" { - req.path = req.path + "?" + u.RawQuery - } - - rc, err := r.open(ctx, req, desc.MediaType, offset) - if err != nil { - if errdefs.IsNotFound(err) { - continue // try one of the other urls. - } - - return nil, err - } - - return rc, nil - } - - // Try manifests endpoints for manifests types - switch desc.MediaType { - case images.MediaTypeDockerSchema2Manifest, images.MediaTypeDockerSchema2ManifestList, - images.MediaTypeDockerSchema1Manifest, - ocispec.MediaTypeImageManifest, ocispec.MediaTypeImageIndex: - - var firstErr error - for _, host := range r.hosts { - req := r.request(host, http.MethodGet, "manifests", desc.Digest.String()) - if err := req.addNamespace(r.refspec.Hostname()); err != nil { - return nil, err - } - - rc, err := r.open(ctx, req, desc.MediaType, offset) - if err != nil { - // Store the error for referencing later - if firstErr == nil { - firstErr = err - } - continue // try another host - } - - return rc, nil - } - - return nil, firstErr - } - - // Finally use blobs endpoints - var firstErr error - for _, host := range r.hosts { - req := r.request(host, http.MethodGet, "blobs", desc.Digest.String()) - if err := req.addNamespace(r.refspec.Hostname()); err != nil { - return nil, err - } - - rc, err := r.open(ctx, req, desc.MediaType, offset) - if err != nil { - // Store the error for referencing later - if firstErr == nil { - firstErr = err - } - continue // try another host - } - - return rc, nil - } - - if errdefs.IsNotFound(firstErr) { - firstErr = fmt.Errorf("could not fetch content descriptor %v (%v) from remote: %w", - desc.Digest, desc.MediaType, errdefs.ErrNotFound, - ) - } - - return nil, firstErr - - }) -} - -func (r dockerFetcher) open(ctx context.Context, req *request, mediatype string, offset int64) (_ io.ReadCloser, retErr error) { - req.header.Set("Accept", strings.Join([]string{mediatype, `*/*`}, ", ")) - - if offset > 0 { - // Note: "Accept-Ranges: bytes" cannot be trusted as some endpoints - // will return the header without supporting the range. The content - // range must always be checked. - req.header.Set("Range", fmt.Sprintf("bytes=%d-", offset)) - } - - resp, err := req.doWithRetries(ctx, nil) - if err != nil { - return nil, err - } - defer func() { - if retErr != nil { - resp.Body.Close() - } - }() - - if resp.StatusCode > 299 { - // TODO(stevvooe): When doing a offset specific request, we should - // really distinguish between a 206 and a 200. In the case of 200, we - // can discard the bytes, hiding the seek behavior from the - // implementation. - - if resp.StatusCode == http.StatusNotFound { - return nil, fmt.Errorf("content at %v not found: %w", req.String(), errdefs.ErrNotFound) - } - var registryErr Errors - if err := json.NewDecoder(resp.Body).Decode(®istryErr); err != nil || registryErr.Len() < 1 { - return nil, fmt.Errorf("unexpected status code %v: %v", req.String(), resp.Status) - } - return nil, fmt.Errorf("unexpected status code %v: %s - Server message: %s", req.String(), resp.Status, registryErr.Error()) - } - if offset > 0 { - cr := resp.Header.Get("content-range") - if cr != "" { - if !strings.HasPrefix(cr, fmt.Sprintf("bytes %d-", offset)) { - return nil, fmt.Errorf("unhandled content range in response: %v", cr) - - } - } else { - // TODO: Should any cases where use of content range - // without the proper header be considered? - // 206 responses? - - // Discard up to offset - // Could use buffer pool here but this case should be rare - n, err := io.Copy(io.Discard, io.LimitReader(resp.Body, offset)) - if err != nil { - return nil, fmt.Errorf("failed to discard to offset: %w", err) - } - if n != offset { - return nil, errors.New("unable to discard to offset") - } - - } - } - - return resp.Body, nil -} diff --git a/src/vendor/github.com/containerd/containerd/remotes/docker/handler.go b/src/vendor/github.com/containerd/containerd/remotes/docker/handler.go deleted file mode 100644 index 529cfbc27..000000000 --- a/src/vendor/github.com/containerd/containerd/remotes/docker/handler.go +++ /dev/null @@ -1,154 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package docker - -import ( - "context" - "fmt" - "net/url" - "strings" - - "github.com/containerd/containerd/content" - "github.com/containerd/containerd/images" - "github.com/containerd/containerd/labels" - "github.com/containerd/containerd/log" - "github.com/containerd/containerd/reference" - ocispec "github.com/opencontainers/image-spec/specs-go/v1" -) - -var ( - // labelDistributionSource describes the source blob comes from. - labelDistributionSource = "containerd.io/distribution.source" -) - -// AppendDistributionSourceLabel updates the label of blob with distribution source. -func AppendDistributionSourceLabel(manager content.Manager, ref string) (images.HandlerFunc, error) { - refspec, err := reference.Parse(ref) - if err != nil { - return nil, err - } - - u, err := url.Parse("dummy://" + refspec.Locator) - if err != nil { - return nil, err - } - - source, repo := u.Hostname(), strings.TrimPrefix(u.Path, "/") - return func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) { - info, err := manager.Info(ctx, desc.Digest) - if err != nil { - return nil, err - } - - key := distributionSourceLabelKey(source) - - originLabel := "" - if info.Labels != nil { - originLabel = info.Labels[key] - } - value := appendDistributionSourceLabel(originLabel, repo) - - // The repo name has been limited under 256 and the distribution - // label might hit the limitation of label size, when blob data - // is used as the very, very common layer. - if err := labels.Validate(key, value); err != nil { - log.G(ctx).Warnf("skip to append distribution label: %s", err) - return nil, nil - } - - info = content.Info{ - Digest: desc.Digest, - Labels: map[string]string{ - key: value, - }, - } - _, err = manager.Update(ctx, info, fmt.Sprintf("labels.%s", key)) - return nil, err - }, nil -} - -func appendDistributionSourceLabel(originLabel, repo string) string { - repos := []string{} - if originLabel != "" { - repos = strings.Split(originLabel, ",") - } - repos = append(repos, repo) - - // use empty string to present duplicate items - for i := 1; i < len(repos); i++ { - tmp, j := repos[i], i-1 - for ; j >= 0 && repos[j] >= tmp; j-- { - if repos[j] == tmp { - tmp = "" - } - repos[j+1] = repos[j] - } - repos[j+1] = tmp - } - - i := 0 - for ; i < len(repos) && repos[i] == ""; i++ { - } - - return strings.Join(repos[i:], ",") -} - -func distributionSourceLabelKey(source string) string { - return fmt.Sprintf("%s.%s", labelDistributionSource, source) -} - -// selectRepositoryMountCandidate will select the repo which has longest -// common prefix components as the candidate. -func selectRepositoryMountCandidate(refspec reference.Spec, sources map[string]string) string { - u, err := url.Parse("dummy://" + refspec.Locator) - if err != nil { - // NOTE: basically, it won't be error here - return "" - } - - source, target := u.Hostname(), strings.TrimPrefix(u.Path, "/") - repoLabel, ok := sources[distributionSourceLabelKey(source)] - if !ok || repoLabel == "" { - return "" - } - - n, match := 0, "" - components := strings.Split(target, "/") - for _, repo := range strings.Split(repoLabel, ",") { - // the target repo is not a candidate - if repo == target { - continue - } - - if l := commonPrefixComponents(components, repo); l >= n { - n, match = l, repo - } - } - return match -} - -func commonPrefixComponents(components []string, target string) int { - targetComponents := strings.Split(target, "/") - - i := 0 - for ; i < len(components) && i < len(targetComponents); i++ { - if components[i] != targetComponents[i] { - break - } - } - return i -} diff --git a/src/vendor/github.com/containerd/containerd/remotes/docker/httpreadseeker.go b/src/vendor/github.com/containerd/containerd/remotes/docker/httpreadseeker.go deleted file mode 100644 index 9a827ef04..000000000 --- a/src/vendor/github.com/containerd/containerd/remotes/docker/httpreadseeker.go +++ /dev/null @@ -1,168 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package docker - -import ( - "bytes" - "fmt" - "io" - - "github.com/containerd/containerd/errdefs" - "github.com/containerd/containerd/log" -) - -const maxRetry = 3 - -type httpReadSeeker struct { - size int64 - offset int64 - rc io.ReadCloser - open func(offset int64) (io.ReadCloser, error) - closed bool - - errsWithNoProgress int -} - -func newHTTPReadSeeker(size int64, open func(offset int64) (io.ReadCloser, error)) (io.ReadCloser, error) { - return &httpReadSeeker{ - size: size, - open: open, - }, nil -} - -func (hrs *httpReadSeeker) Read(p []byte) (n int, err error) { - if hrs.closed { - return 0, io.EOF - } - - rd, err := hrs.reader() - if err != nil { - return 0, err - } - - n, err = rd.Read(p) - hrs.offset += int64(n) - if n > 0 || err == nil { - hrs.errsWithNoProgress = 0 - } - if err == io.ErrUnexpectedEOF { - // connection closed unexpectedly. try reconnecting. - if n == 0 { - hrs.errsWithNoProgress++ - if hrs.errsWithNoProgress > maxRetry { - return // too many retries for this offset with no progress - } - } - if hrs.rc != nil { - if clsErr := hrs.rc.Close(); clsErr != nil { - log.L.WithError(clsErr).Error("httpReadSeeker: failed to close ReadCloser") - } - hrs.rc = nil - } - if _, err2 := hrs.reader(); err2 == nil { - return n, nil - } - } - return -} - -func (hrs *httpReadSeeker) Close() error { - if hrs.closed { - return nil - } - hrs.closed = true - if hrs.rc != nil { - return hrs.rc.Close() - } - - return nil -} - -func (hrs *httpReadSeeker) Seek(offset int64, whence int) (int64, error) { - if hrs.closed { - return 0, fmt.Errorf("Fetcher.Seek: closed: %w", errdefs.ErrUnavailable) - } - - abs := hrs.offset - switch whence { - case io.SeekStart: - abs = offset - case io.SeekCurrent: - abs += offset - case io.SeekEnd: - if hrs.size == -1 { - return 0, fmt.Errorf("Fetcher.Seek: unknown size, cannot seek from end: %w", errdefs.ErrUnavailable) - } - abs = hrs.size + offset - default: - return 0, fmt.Errorf("Fetcher.Seek: invalid whence: %w", errdefs.ErrInvalidArgument) - } - - if abs < 0 { - return 0, fmt.Errorf("Fetcher.Seek: negative offset: %w", errdefs.ErrInvalidArgument) - } - - if abs != hrs.offset { - if hrs.rc != nil { - if err := hrs.rc.Close(); err != nil { - log.L.WithError(err).Error("Fetcher.Seek: failed to close ReadCloser") - } - - hrs.rc = nil - } - - hrs.offset = abs - } - - return hrs.offset, nil -} - -func (hrs *httpReadSeeker) reader() (io.Reader, error) { - if hrs.rc != nil { - return hrs.rc, nil - } - - if hrs.size == -1 || hrs.offset < hrs.size { - // only try to reopen the body request if we are seeking to a value - // less than the actual size. - if hrs.open == nil { - return nil, fmt.Errorf("cannot open: %w", errdefs.ErrNotImplemented) - } - - rc, err := hrs.open(hrs.offset) - if err != nil { - return nil, fmt.Errorf("httpReadSeeker: failed open: %w", err) - } - - if hrs.rc != nil { - if err := hrs.rc.Close(); err != nil { - log.L.WithError(err).Error("httpReadSeeker: failed to close ReadCloser") - } - } - hrs.rc = rc - } else { - // There is an edge case here where offset == size of the content. If - // we seek, we will probably get an error for content that cannot be - // sought (?). In that case, we should err on committing the content, - // as the length is already satisfied but we just return the empty - // reader instead. - - hrs.rc = io.NopCloser(bytes.NewReader([]byte{})) - } - - return hrs.rc, nil -} diff --git a/src/vendor/github.com/containerd/containerd/remotes/docker/pusher.go b/src/vendor/github.com/containerd/containerd/remotes/docker/pusher.go deleted file mode 100644 index c786ad215..000000000 --- a/src/vendor/github.com/containerd/containerd/remotes/docker/pusher.go +++ /dev/null @@ -1,451 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package docker - -import ( - "context" - "errors" - "fmt" - "io" - "net/http" - "net/url" - "strings" - "time" - - "github.com/containerd/containerd/content" - "github.com/containerd/containerd/errdefs" - "github.com/containerd/containerd/images" - "github.com/containerd/containerd/log" - "github.com/containerd/containerd/remotes" - remoteserrors "github.com/containerd/containerd/remotes/errors" - digest "github.com/opencontainers/go-digest" - ocispec "github.com/opencontainers/image-spec/specs-go/v1" -) - -type dockerPusher struct { - *dockerBase - object string - - // TODO: namespace tracker - tracker StatusTracker -} - -// Writer implements Ingester API of content store. This allows the client -// to receive ErrUnavailable when there is already an on-going upload. -// Note that the tracker MUST implement StatusTrackLocker interface to avoid -// race condition on StatusTracker. -func (p dockerPusher) Writer(ctx context.Context, opts ...content.WriterOpt) (content.Writer, error) { - var wOpts content.WriterOpts - for _, opt := range opts { - if err := opt(&wOpts); err != nil { - return nil, err - } - } - if wOpts.Ref == "" { - return nil, fmt.Errorf("ref must not be empty: %w", errdefs.ErrInvalidArgument) - } - return p.push(ctx, wOpts.Desc, wOpts.Ref, true) -} - -func (p dockerPusher) Push(ctx context.Context, desc ocispec.Descriptor) (content.Writer, error) { - return p.push(ctx, desc, remotes.MakeRefKey(ctx, desc), false) -} - -func (p dockerPusher) push(ctx context.Context, desc ocispec.Descriptor, ref string, unavailableOnFail bool) (content.Writer, error) { - if l, ok := p.tracker.(StatusTrackLocker); ok { - l.Lock(ref) - defer l.Unlock(ref) - } - ctx, err := ContextWithRepositoryScope(ctx, p.refspec, true) - if err != nil { - return nil, err - } - status, err := p.tracker.GetStatus(ref) - if err == nil { - if status.Committed && status.Offset == status.Total { - return nil, fmt.Errorf("ref %v: %w", ref, errdefs.ErrAlreadyExists) - } - if unavailableOnFail && status.ErrClosed == nil { - // Another push of this ref is happening elsewhere. The rest of function - // will continue only when `errdefs.IsNotFound(err) == true` (i.e. there - // is no actively-tracked ref already). - return nil, fmt.Errorf("push is on-going: %w", errdefs.ErrUnavailable) - } - // TODO: Handle incomplete status - } else if !errdefs.IsNotFound(err) { - return nil, fmt.Errorf("failed to get status: %w", err) - } - - hosts := p.filterHosts(HostCapabilityPush) - if len(hosts) == 0 { - return nil, fmt.Errorf("no push hosts: %w", errdefs.ErrNotFound) - } - - var ( - isManifest bool - existCheck []string - host = hosts[0] - ) - - switch desc.MediaType { - case images.MediaTypeDockerSchema2Manifest, images.MediaTypeDockerSchema2ManifestList, - ocispec.MediaTypeImageManifest, ocispec.MediaTypeImageIndex: - isManifest = true - existCheck = getManifestPath(p.object, desc.Digest) - default: - existCheck = []string{"blobs", desc.Digest.String()} - } - - req := p.request(host, http.MethodHead, existCheck...) - req.header.Set("Accept", strings.Join([]string{desc.MediaType, `*/*`}, ", ")) - - log.G(ctx).WithField("url", req.String()).Debugf("checking and pushing to") - - resp, err := req.doWithRetries(ctx, nil) - if err != nil { - if !errors.Is(err, ErrInvalidAuthorization) { - return nil, err - } - log.G(ctx).WithError(err).Debugf("Unable to check existence, continuing with push") - } else { - if resp.StatusCode == http.StatusOK { - var exists bool - if isManifest && existCheck[1] != desc.Digest.String() { - dgstHeader := digest.Digest(resp.Header.Get("Docker-Content-Digest")) - if dgstHeader == desc.Digest { - exists = true - } - } else { - exists = true - } - - if exists { - p.tracker.SetStatus(ref, Status{ - Committed: true, - Status: content.Status{ - Ref: ref, - Total: desc.Size, - Offset: desc.Size, - // TODO: Set updated time? - }, - }) - resp.Body.Close() - return nil, fmt.Errorf("content %v on remote: %w", desc.Digest, errdefs.ErrAlreadyExists) - } - } else if resp.StatusCode != http.StatusNotFound { - err := remoteserrors.NewUnexpectedStatusErr(resp) - log.G(ctx).WithField("resp", resp).WithField("body", string(err.(remoteserrors.ErrUnexpectedStatus).Body)).Debug("unexpected response") - resp.Body.Close() - return nil, err - } - resp.Body.Close() - } - - if isManifest { - putPath := getManifestPath(p.object, desc.Digest) - req = p.request(host, http.MethodPut, putPath...) - req.header.Add("Content-Type", desc.MediaType) - } else { - // Start upload request - req = p.request(host, http.MethodPost, "blobs", "uploads/") - - var resp *http.Response - if fromRepo := selectRepositoryMountCandidate(p.refspec, desc.Annotations); fromRepo != "" { - preq := requestWithMountFrom(req, desc.Digest.String(), fromRepo) - pctx := ContextWithAppendPullRepositoryScope(ctx, fromRepo) - - // NOTE: the fromRepo might be private repo and - // auth service still can grant token without error. - // but the post request will fail because of 401. - // - // for the private repo, we should remove mount-from - // query and send the request again. - resp, err = preq.doWithRetries(pctx, nil) - if err != nil { - return nil, err - } - - if resp.StatusCode == http.StatusUnauthorized { - log.G(ctx).Debugf("failed to mount from repository %s", fromRepo) - - resp.Body.Close() - resp = nil - } - } - - if resp == nil { - resp, err = req.doWithRetries(ctx, nil) - if err != nil { - return nil, err - } - } - defer resp.Body.Close() - - switch resp.StatusCode { - case http.StatusOK, http.StatusAccepted, http.StatusNoContent: - case http.StatusCreated: - p.tracker.SetStatus(ref, Status{ - Committed: true, - Status: content.Status{ - Ref: ref, - Total: desc.Size, - Offset: desc.Size, - }, - }) - return nil, fmt.Errorf("content %v on remote: %w", desc.Digest, errdefs.ErrAlreadyExists) - default: - err := remoteserrors.NewUnexpectedStatusErr(resp) - log.G(ctx).WithField("resp", resp).WithField("body", string(err.(remoteserrors.ErrUnexpectedStatus).Body)).Debug("unexpected response") - return nil, err - } - - var ( - location = resp.Header.Get("Location") - lurl *url.URL - lhost = host - ) - // Support paths without host in location - if strings.HasPrefix(location, "/") { - lurl, err = url.Parse(lhost.Scheme + "://" + lhost.Host + location) - if err != nil { - return nil, fmt.Errorf("unable to parse location %v: %w", location, err) - } - } else { - if !strings.Contains(location, "://") { - location = lhost.Scheme + "://" + location - } - lurl, err = url.Parse(location) - if err != nil { - return nil, fmt.Errorf("unable to parse location %v: %w", location, err) - } - - if lurl.Host != lhost.Host || lhost.Scheme != lurl.Scheme { - - lhost.Scheme = lurl.Scheme - lhost.Host = lurl.Host - log.G(ctx).WithField("host", lhost.Host).WithField("scheme", lhost.Scheme).Debug("upload changed destination") - - // Strip authorizer if change to host or scheme - lhost.Authorizer = nil - } - } - q := lurl.Query() - q.Add("digest", desc.Digest.String()) - - req = p.request(lhost, http.MethodPut) - req.header.Set("Content-Type", "application/octet-stream") - req.path = lurl.Path + "?" + q.Encode() - } - p.tracker.SetStatus(ref, Status{ - Status: content.Status{ - Ref: ref, - Total: desc.Size, - Expected: desc.Digest, - StartedAt: time.Now(), - }, - }) - - // TODO: Support chunked upload - - pr, pw := io.Pipe() - respC := make(chan response, 1) - body := io.NopCloser(pr) - - req.body = func() (io.ReadCloser, error) { - if body == nil { - return nil, errors.New("cannot reuse body, request must be retried") - } - // Only use the body once since pipe cannot be seeked - ob := body - body = nil - return ob, nil - } - req.size = desc.Size - - go func() { - defer close(respC) - resp, err := req.doWithRetries(ctx, nil) - if err != nil { - respC <- response{err: err} - pr.CloseWithError(err) - return - } - - switch resp.StatusCode { - case http.StatusOK, http.StatusCreated, http.StatusNoContent: - default: - err := remoteserrors.NewUnexpectedStatusErr(resp) - log.G(ctx).WithField("resp", resp).WithField("body", string(err.(remoteserrors.ErrUnexpectedStatus).Body)).Debug("unexpected response") - pr.CloseWithError(err) - } - respC <- response{Response: resp} - }() - - return &pushWriter{ - base: p.dockerBase, - ref: ref, - pipe: pw, - responseC: respC, - isManifest: isManifest, - expected: desc.Digest, - tracker: p.tracker, - }, nil -} - -func getManifestPath(object string, dgst digest.Digest) []string { - if i := strings.IndexByte(object, '@'); i >= 0 { - if object[i+1:] != dgst.String() { - // use digest, not tag - object = "" - } else { - // strip @ for registry path to make tag - object = object[:i] - } - - } - - if object == "" { - return []string{"manifests", dgst.String()} - } - - return []string{"manifests", object} -} - -type response struct { - *http.Response - err error -} - -type pushWriter struct { - base *dockerBase - ref string - - pipe *io.PipeWriter - responseC <-chan response - isManifest bool - - expected digest.Digest - tracker StatusTracker -} - -func (pw *pushWriter) Write(p []byte) (n int, err error) { - status, err := pw.tracker.GetStatus(pw.ref) - if err != nil { - return n, err - } - n, err = pw.pipe.Write(p) - status.Offset += int64(n) - status.UpdatedAt = time.Now() - pw.tracker.SetStatus(pw.ref, status) - return -} - -func (pw *pushWriter) Close() error { - status, err := pw.tracker.GetStatus(pw.ref) - if err == nil && !status.Committed { - // Closing an incomplete writer. Record this as an error so that following write can retry it. - status.ErrClosed = errors.New("closed incomplete writer") - pw.tracker.SetStatus(pw.ref, status) - } - return pw.pipe.Close() -} - -func (pw *pushWriter) Status() (content.Status, error) { - status, err := pw.tracker.GetStatus(pw.ref) - if err != nil { - return content.Status{}, err - } - return status.Status, nil - -} - -func (pw *pushWriter) Digest() digest.Digest { - // TODO: Get rid of this function? - return pw.expected -} - -func (pw *pushWriter) Commit(ctx context.Context, size int64, expected digest.Digest, opts ...content.Opt) error { - // Check whether read has already thrown an error - if _, err := pw.pipe.Write([]byte{}); err != nil && err != io.ErrClosedPipe { - return fmt.Errorf("pipe error before commit: %w", err) - } - - if err := pw.pipe.Close(); err != nil { - return err - } - // TODO: timeout waiting for response - resp := <-pw.responseC - if resp.err != nil { - return resp.err - } - defer resp.Response.Body.Close() - - // 201 is specified return status, some registries return - // 200, 202 or 204. - switch resp.StatusCode { - case http.StatusOK, http.StatusCreated, http.StatusNoContent, http.StatusAccepted: - default: - return remoteserrors.NewUnexpectedStatusErr(resp.Response) - } - - status, err := pw.tracker.GetStatus(pw.ref) - if err != nil { - return fmt.Errorf("failed to get status: %w", err) - } - - if size > 0 && size != status.Offset { - return fmt.Errorf("unexpected size %d, expected %d", status.Offset, size) - } - - if expected == "" { - expected = status.Expected - } - - actual, err := digest.Parse(resp.Header.Get("Docker-Content-Digest")) - if err != nil { - return fmt.Errorf("invalid content digest in response: %w", err) - } - - if actual != expected { - return fmt.Errorf("got digest %s, expected %s", actual, expected) - } - - status.Committed = true - status.UpdatedAt = time.Now() - pw.tracker.SetStatus(pw.ref, status) - - return nil -} - -func (pw *pushWriter) Truncate(size int64) error { - // TODO: if blob close request and start new request at offset - // TODO: always error on manifest - return errors.New("cannot truncate remote upload") -} - -func requestWithMountFrom(req *request, mount, from string) *request { - creq := *req - - sep := "?" - if strings.Contains(creq.path, sep) { - sep = "&" - } - - creq.path = creq.path + sep + "mount=" + mount + "&from=" + from - - return &creq -} diff --git a/src/vendor/github.com/containerd/containerd/remotes/docker/registry.go b/src/vendor/github.com/containerd/containerd/remotes/docker/registry.go deleted file mode 100644 index 98cafcd06..000000000 --- a/src/vendor/github.com/containerd/containerd/remotes/docker/registry.go +++ /dev/null @@ -1,244 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package docker - -import ( - "errors" - "net" - "net/http" -) - -// HostCapabilities represent the capabilities of the registry -// host. This also represents the set of operations for which -// the registry host may be trusted to perform. -// -// For example pushing is a capability which should only be -// performed on an upstream source, not a mirror. -// Resolving (the process of converting a name into a digest) -// must be considered a trusted operation and only done by -// a host which is trusted (or more preferably by secure process -// which can prove the provenance of the mapping). A public -// mirror should never be trusted to do a resolve action. -// -// | Registry Type | Pull | Resolve | Push | -// |------------------|------|---------|------| -// | Public Registry | yes | yes | yes | -// | Private Registry | yes | yes | yes | -// | Public Mirror | yes | no | no | -// | Private Mirror | yes | yes | no | -type HostCapabilities uint8 - -const ( - // HostCapabilityPull represents the capability to fetch manifests - // and blobs by digest - HostCapabilityPull HostCapabilities = 1 << iota - - // HostCapabilityResolve represents the capability to fetch manifests - // by name - HostCapabilityResolve - - // HostCapabilityPush represents the capability to push blobs and - // manifests - HostCapabilityPush - - // Reserved for future capabilities (i.e. search, catalog, remove) -) - -// Has checks whether the capabilities list has the provide capability -func (c HostCapabilities) Has(t HostCapabilities) bool { - return c&t == t -} - -// RegistryHost represents a complete configuration for a registry -// host, representing the capabilities, authorizations, connection -// configuration, and location. -type RegistryHost struct { - Client *http.Client - Authorizer Authorizer - Host string - Scheme string - Path string - Capabilities HostCapabilities - Header http.Header -} - -func (h RegistryHost) isProxy(refhost string) bool { - if refhost != h.Host { - if refhost != "docker.io" || h.Host != "registry-1.docker.io" { - return true - } - } - return false -} - -// RegistryHosts fetches the registry hosts for a given namespace, -// provided by the host component of an distribution image reference. -type RegistryHosts func(string) ([]RegistryHost, error) - -// Registries joins multiple registry configuration functions, using the same -// order as provided within the arguments. When an empty registry configuration -// is returned with a nil error, the next function will be called. -// NOTE: This function will not join configurations, as soon as a non-empty -// configuration is returned from a configuration function, it will be returned -// to the caller. -func Registries(registries ...RegistryHosts) RegistryHosts { - return func(host string) ([]RegistryHost, error) { - for _, registry := range registries { - config, err := registry(host) - if err != nil { - return config, err - } - if len(config) > 0 { - return config, nil - } - } - return nil, nil - } -} - -type registryOpts struct { - authorizer Authorizer - plainHTTP func(string) (bool, error) - host func(string) (string, error) - client *http.Client -} - -// RegistryOpt defines a registry default option -type RegistryOpt func(*registryOpts) - -// WithPlainHTTP configures registries to use plaintext http scheme -// for the provided host match function. -func WithPlainHTTP(f func(string) (bool, error)) RegistryOpt { - return func(opts *registryOpts) { - opts.plainHTTP = f - } -} - -// WithAuthorizer configures the default authorizer for a registry -func WithAuthorizer(a Authorizer) RegistryOpt { - return func(opts *registryOpts) { - opts.authorizer = a - } -} - -// WithHostTranslator defines the default translator to use for registry hosts -func WithHostTranslator(h func(string) (string, error)) RegistryOpt { - return func(opts *registryOpts) { - opts.host = h - } -} - -// WithClient configures the default http client for a registry -func WithClient(c *http.Client) RegistryOpt { - return func(opts *registryOpts) { - opts.client = c - } -} - -// ConfigureDefaultRegistries is used to create a default configuration for -// registries. For more advanced configurations or per-domain setups, -// the RegistryHosts interface should be used directly. -// NOTE: This function will always return a non-empty value or error -func ConfigureDefaultRegistries(ropts ...RegistryOpt) RegistryHosts { - var opts registryOpts - for _, opt := range ropts { - opt(&opts) - } - - return func(host string) ([]RegistryHost, error) { - config := RegistryHost{ - Client: opts.client, - Authorizer: opts.authorizer, - Host: host, - Scheme: "https", - Path: "/v2", - Capabilities: HostCapabilityPull | HostCapabilityResolve | HostCapabilityPush, - } - - if config.Client == nil { - config.Client = http.DefaultClient - } - - if opts.plainHTTP != nil { - match, err := opts.plainHTTP(host) - if err != nil { - return nil, err - } - if match { - config.Scheme = "http" - } - } - - if opts.host != nil { - var err error - config.Host, err = opts.host(config.Host) - if err != nil { - return nil, err - } - } else if host == "docker.io" { - config.Host = "registry-1.docker.io" - } - - return []RegistryHost{config}, nil - } -} - -// MatchAllHosts is a host match function which is always true. -func MatchAllHosts(string) (bool, error) { - return true, nil -} - -// MatchLocalhost is a host match function which returns true for -// localhost. -// -// Note: this does not handle matching of ip addresses in octal, -// decimal or hex form. -func MatchLocalhost(host string) (bool, error) { - switch { - case host == "::1": - return true, nil - case host == "[::1]": - return true, nil - } - h, p, err := net.SplitHostPort(host) - - // addrError helps distinguish between errors of form - // "no colon in address" and "too many colons in address". - // The former is fine as the host string need not have a - // port. Latter needs to be handled. - addrError := &net.AddrError{ - Err: "missing port in address", - Addr: host, - } - if err != nil { - if err.Error() != addrError.Error() { - return false, err - } - // host string without any port specified - h = host - } else if len(p) == 0 { - return false, errors.New("invalid host name format") - } - - // use ipv4 dotted decimal for further checking - if h == "localhost" { - h = "127.0.0.1" - } - ip := net.ParseIP(h) - - return ip.IsLoopback(), nil -} diff --git a/src/vendor/github.com/containerd/containerd/remotes/docker/resolver.go b/src/vendor/github.com/containerd/containerd/remotes/docker/resolver.go deleted file mode 100644 index 9bbbc2622..000000000 --- a/src/vendor/github.com/containerd/containerd/remotes/docker/resolver.go +++ /dev/null @@ -1,669 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package docker - -import ( - "context" - "errors" - "fmt" - "io" - "net/http" - "net/url" - "path" - "strings" - - "github.com/containerd/containerd/errdefs" - "github.com/containerd/containerd/images" - "github.com/containerd/containerd/log" - "github.com/containerd/containerd/reference" - "github.com/containerd/containerd/remotes" - "github.com/containerd/containerd/remotes/docker/schema1" - "github.com/containerd/containerd/version" - digest "github.com/opencontainers/go-digest" - ocispec "github.com/opencontainers/image-spec/specs-go/v1" - "github.com/sirupsen/logrus" - "golang.org/x/net/context/ctxhttp" -) - -var ( - // ErrInvalidAuthorization is used when credentials are passed to a server but - // those credentials are rejected. - ErrInvalidAuthorization = errors.New("authorization failed") - - // MaxManifestSize represents the largest size accepted from a registry - // during resolution. Larger manifests may be accepted using a - // resolution method other than the registry. - // - // NOTE: The max supported layers by some runtimes is 128 and individual - // layers will not contribute more than 256 bytes, making a - // reasonable limit for a large image manifests of 32K bytes. - // 4M bytes represents a much larger upper bound for images which may - // contain large annotations or be non-images. A proper manifest - // design puts large metadata in subobjects, as is consistent the - // intent of the manifest design. - MaxManifestSize int64 = 4 * 1048 * 1048 -) - -// Authorizer is used to authorize HTTP requests based on 401 HTTP responses. -// An Authorizer is responsible for caching tokens or credentials used by -// requests. -type Authorizer interface { - // Authorize sets the appropriate `Authorization` header on the given - // request. - // - // If no authorization is found for the request, the request remains - // unmodified. It may also add an `Authorization` header as - // "bearer " - // "basic " - Authorize(context.Context, *http.Request) error - - // AddResponses adds a 401 response for the authorizer to consider when - // authorizing requests. The last response should be unauthorized and - // the previous requests are used to consider redirects and retries - // that may have led to the 401. - // - // If response is not handled, returns `ErrNotImplemented` - AddResponses(context.Context, []*http.Response) error -} - -// ResolverOptions are used to configured a new Docker register resolver -type ResolverOptions struct { - // Hosts returns registry host configurations for a namespace. - Hosts RegistryHosts - - // Headers are the HTTP request header fields sent by the resolver - Headers http.Header - - // Tracker is used to track uploads to the registry. This is used - // since the registry does not have upload tracking and the existing - // mechanism for getting blob upload status is expensive. - Tracker StatusTracker - - // Authorizer is used to authorize registry requests - // Deprecated: use Hosts - Authorizer Authorizer - - // Credentials provides username and secret given a host. - // If username is empty but a secret is given, that secret - // is interpreted as a long lived token. - // Deprecated: use Hosts - Credentials func(string) (string, string, error) - - // Host provides the hostname given a namespace. - // Deprecated: use Hosts - Host func(string) (string, error) - - // PlainHTTP specifies to use plain http and not https - // Deprecated: use Hosts - PlainHTTP bool - - // Client is the http client to used when making registry requests - // Deprecated: use Hosts - Client *http.Client -} - -// DefaultHost is the default host function. -func DefaultHost(ns string) (string, error) { - if ns == "docker.io" { - return "registry-1.docker.io", nil - } - return ns, nil -} - -type dockerResolver struct { - hosts RegistryHosts - header http.Header - resolveHeader http.Header - tracker StatusTracker -} - -// NewResolver returns a new resolver to a Docker registry -func NewResolver(options ResolverOptions) remotes.Resolver { - if options.Tracker == nil { - options.Tracker = NewInMemoryTracker() - } - - if options.Headers == nil { - options.Headers = make(http.Header) - } - if _, ok := options.Headers["User-Agent"]; !ok { - options.Headers.Set("User-Agent", "containerd/"+version.Version) - } - - resolveHeader := http.Header{} - if _, ok := options.Headers["Accept"]; !ok { - // set headers for all the types we support for resolution. - resolveHeader.Set("Accept", strings.Join([]string{ - images.MediaTypeDockerSchema2Manifest, - images.MediaTypeDockerSchema2ManifestList, - ocispec.MediaTypeImageManifest, - ocispec.MediaTypeImageIndex, "*/*"}, ", ")) - } else { - resolveHeader["Accept"] = options.Headers["Accept"] - delete(options.Headers, "Accept") - } - - if options.Hosts == nil { - opts := []RegistryOpt{} - if options.Host != nil { - opts = append(opts, WithHostTranslator(options.Host)) - } - - if options.Authorizer == nil { - options.Authorizer = NewDockerAuthorizer( - WithAuthClient(options.Client), - WithAuthHeader(options.Headers), - WithAuthCreds(options.Credentials)) - } - opts = append(opts, WithAuthorizer(options.Authorizer)) - - if options.Client != nil { - opts = append(opts, WithClient(options.Client)) - } - if options.PlainHTTP { - opts = append(opts, WithPlainHTTP(MatchAllHosts)) - } else { - opts = append(opts, WithPlainHTTP(MatchLocalhost)) - } - options.Hosts = ConfigureDefaultRegistries(opts...) - } - return &dockerResolver{ - hosts: options.Hosts, - header: options.Headers, - resolveHeader: resolveHeader, - tracker: options.Tracker, - } -} - -func getManifestMediaType(resp *http.Response) string { - // Strip encoding data (manifests should always be ascii JSON) - contentType := resp.Header.Get("Content-Type") - if sp := strings.IndexByte(contentType, ';'); sp != -1 { - contentType = contentType[0:sp] - } - - // As of Apr 30 2019 the registry.access.redhat.com registry does not specify - // the content type of any data but uses schema1 manifests. - if contentType == "text/plain" { - contentType = images.MediaTypeDockerSchema1Manifest - } - return contentType -} - -type countingReader struct { - reader io.Reader - bytesRead int64 -} - -func (r *countingReader) Read(p []byte) (int, error) { - n, err := r.reader.Read(p) - r.bytesRead += int64(n) - return n, err -} - -var _ remotes.Resolver = &dockerResolver{} - -func (r *dockerResolver) Resolve(ctx context.Context, ref string) (string, ocispec.Descriptor, error) { - base, err := r.resolveDockerBase(ref) - if err != nil { - return "", ocispec.Descriptor{}, err - } - refspec := base.refspec - if refspec.Object == "" { - return "", ocispec.Descriptor{}, reference.ErrObjectRequired - } - - var ( - firstErr error - paths [][]string - dgst = refspec.Digest() - caps = HostCapabilityPull - ) - - if dgst != "" { - if err := dgst.Validate(); err != nil { - // need to fail here, since we can't actually resolve the invalid - // digest. - return "", ocispec.Descriptor{}, err - } - - // turns out, we have a valid digest, make a url. - paths = append(paths, []string{"manifests", dgst.String()}) - - // fallback to blobs on not found. - paths = append(paths, []string{"blobs", dgst.String()}) - } else { - // Add - paths = append(paths, []string{"manifests", refspec.Object}) - caps |= HostCapabilityResolve - } - - hosts := base.filterHosts(caps) - if len(hosts) == 0 { - return "", ocispec.Descriptor{}, fmt.Errorf("no resolve hosts: %w", errdefs.ErrNotFound) - } - - ctx, err = ContextWithRepositoryScope(ctx, refspec, false) - if err != nil { - return "", ocispec.Descriptor{}, err - } - - for _, u := range paths { - for _, host := range hosts { - ctx := log.WithLogger(ctx, log.G(ctx).WithField("host", host.Host)) - - req := base.request(host, http.MethodHead, u...) - if err := req.addNamespace(base.refspec.Hostname()); err != nil { - return "", ocispec.Descriptor{}, err - } - - for key, value := range r.resolveHeader { - req.header[key] = append(req.header[key], value...) - } - - log.G(ctx).Debug("resolving") - resp, err := req.doWithRetries(ctx, nil) - if err != nil { - if errors.Is(err, ErrInvalidAuthorization) { - err = fmt.Errorf("pull access denied, repository does not exist or may require authorization: %w", err) - } - // Store the error for referencing later - if firstErr == nil { - firstErr = err - } - log.G(ctx).WithError(err).Info("trying next host") - continue // try another host - } - resp.Body.Close() // don't care about body contents. - - if resp.StatusCode > 299 { - if resp.StatusCode == http.StatusNotFound { - log.G(ctx).Info("trying next host - response was http.StatusNotFound") - continue - } - if resp.StatusCode > 399 { - // Set firstErr when encountering the first non-404 status code. - if firstErr == nil { - firstErr = fmt.Errorf("pulling from host %s failed with status code %v: %v", host.Host, u, resp.Status) - } - continue // try another host - } - return "", ocispec.Descriptor{}, fmt.Errorf("pulling from host %s failed with unexpected status code %v: %v", host.Host, u, resp.Status) - } - size := resp.ContentLength - contentType := getManifestMediaType(resp) - - // if no digest was provided, then only a resolve - // trusted registry was contacted, in this case use - // the digest header (or content from GET) - if dgst == "" { - // this is the only point at which we trust the registry. we use the - // content headers to assemble a descriptor for the name. when this becomes - // more robust, we mostly get this information from a secure trust store. - dgstHeader := digest.Digest(resp.Header.Get("Docker-Content-Digest")) - - if dgstHeader != "" && size != -1 { - if err := dgstHeader.Validate(); err != nil { - return "", ocispec.Descriptor{}, fmt.Errorf("%q in header not a valid digest: %w", dgstHeader, err) - } - dgst = dgstHeader - } - } - if dgst == "" || size == -1 { - log.G(ctx).Debug("no Docker-Content-Digest header, fetching manifest instead") - - req = base.request(host, http.MethodGet, u...) - if err := req.addNamespace(base.refspec.Hostname()); err != nil { - return "", ocispec.Descriptor{}, err - } - - for key, value := range r.resolveHeader { - req.header[key] = append(req.header[key], value...) - } - - resp, err := req.doWithRetries(ctx, nil) - if err != nil { - return "", ocispec.Descriptor{}, err - } - defer resp.Body.Close() - - bodyReader := countingReader{reader: resp.Body} - - contentType = getManifestMediaType(resp) - if dgst == "" { - if contentType == images.MediaTypeDockerSchema1Manifest { - b, err := schema1.ReadStripSignature(&bodyReader) - if err != nil { - return "", ocispec.Descriptor{}, err - } - - dgst = digest.FromBytes(b) - } else { - dgst, err = digest.FromReader(&bodyReader) - if err != nil { - return "", ocispec.Descriptor{}, err - } - } - } else if _, err := io.Copy(io.Discard, &bodyReader); err != nil { - return "", ocispec.Descriptor{}, err - } - size = bodyReader.bytesRead - } - // Prevent resolving to excessively large manifests - if size > MaxManifestSize { - if firstErr == nil { - firstErr = fmt.Errorf("rejecting %d byte manifest for %s: %w", size, ref, errdefs.ErrNotFound) - } - continue - } - - desc := ocispec.Descriptor{ - Digest: dgst, - MediaType: contentType, - Size: size, - } - - log.G(ctx).WithField("desc.digest", desc.Digest).Debug("resolved") - return ref, desc, nil - } - } - - // If above loop terminates without return, then there was an error. - // "firstErr" contains the first non-404 error. That is, "firstErr == nil" - // means that either no registries were given or each registry returned 404. - - if firstErr == nil { - firstErr = fmt.Errorf("%s: %w", ref, errdefs.ErrNotFound) - } - - return "", ocispec.Descriptor{}, firstErr -} - -func (r *dockerResolver) Fetcher(ctx context.Context, ref string) (remotes.Fetcher, error) { - base, err := r.resolveDockerBase(ref) - if err != nil { - return nil, err - } - - return dockerFetcher{ - dockerBase: base, - }, nil -} - -func (r *dockerResolver) Pusher(ctx context.Context, ref string) (remotes.Pusher, error) { - base, err := r.resolveDockerBase(ref) - if err != nil { - return nil, err - } - - return dockerPusher{ - dockerBase: base, - object: base.refspec.Object, - tracker: r.tracker, - }, nil -} - -func (r *dockerResolver) resolveDockerBase(ref string) (*dockerBase, error) { - refspec, err := reference.Parse(ref) - if err != nil { - return nil, err - } - - return r.base(refspec) -} - -type dockerBase struct { - refspec reference.Spec - repository string - hosts []RegistryHost - header http.Header -} - -func (r *dockerResolver) base(refspec reference.Spec) (*dockerBase, error) { - host := refspec.Hostname() - hosts, err := r.hosts(host) - if err != nil { - return nil, err - } - return &dockerBase{ - refspec: refspec, - repository: strings.TrimPrefix(refspec.Locator, host+"/"), - hosts: hosts, - header: r.header, - }, nil -} - -func (r *dockerBase) filterHosts(caps HostCapabilities) (hosts []RegistryHost) { - for _, host := range r.hosts { - if host.Capabilities.Has(caps) { - hosts = append(hosts, host) - } - } - return -} - -func (r *dockerBase) request(host RegistryHost, method string, ps ...string) *request { - header := r.header.Clone() - if header == nil { - header = http.Header{} - } - - for key, value := range host.Header { - header[key] = append(header[key], value...) - } - parts := append([]string{"/", host.Path, r.repository}, ps...) - p := path.Join(parts...) - // Join strips trailing slash, re-add ending "/" if included - if len(parts) > 0 && strings.HasSuffix(parts[len(parts)-1], "/") { - p = p + "/" - } - return &request{ - method: method, - path: p, - header: header, - host: host, - } -} - -func (r *request) authorize(ctx context.Context, req *http.Request) error { - // Check if has header for host - if r.host.Authorizer != nil { - if err := r.host.Authorizer.Authorize(ctx, req); err != nil { - return err - } - } - - return nil -} - -func (r *request) addNamespace(ns string) (err error) { - if !r.host.isProxy(ns) { - return nil - } - var q url.Values - // Parse query - if i := strings.IndexByte(r.path, '?'); i > 0 { - r.path = r.path[:i+1] - q, err = url.ParseQuery(r.path[i+1:]) - if err != nil { - return - } - } else { - r.path = r.path + "?" - q = url.Values{} - } - q.Add("ns", ns) - - r.path = r.path + q.Encode() - - return -} - -type request struct { - method string - path string - header http.Header - host RegistryHost - body func() (io.ReadCloser, error) - size int64 -} - -func (r *request) do(ctx context.Context) (*http.Response, error) { - u := r.host.Scheme + "://" + r.host.Host + r.path - req, err := http.NewRequest(r.method, u, nil) - if err != nil { - return nil, err - } - req.Header = http.Header{} // headers need to be copied to avoid concurrent map access - for k, v := range r.header { - req.Header[k] = v - } - if r.body != nil { - body, err := r.body() - if err != nil { - return nil, err - } - req.Body = body - req.GetBody = r.body - if r.size > 0 { - req.ContentLength = r.size - } - } - - ctx = log.WithLogger(ctx, log.G(ctx).WithField("url", u)) - log.G(ctx).WithFields(requestFields(req)).Debug("do request") - if err := r.authorize(ctx, req); err != nil { - return nil, fmt.Errorf("failed to authorize: %w", err) - } - - var client = &http.Client{} - if r.host.Client != nil { - *client = *r.host.Client - } - if client.CheckRedirect == nil { - client.CheckRedirect = func(req *http.Request, via []*http.Request) error { - if len(via) >= 10 { - return errors.New("stopped after 10 redirects") - } - if err := r.authorize(ctx, req); err != nil { - return fmt.Errorf("failed to authorize redirect: %w", err) - } - return nil - } - } - - resp, err := ctxhttp.Do(ctx, client, req) - if err != nil { - return nil, fmt.Errorf("failed to do request: %w", err) - } - log.G(ctx).WithFields(responseFields(resp)).Debug("fetch response received") - return resp, nil -} - -func (r *request) doWithRetries(ctx context.Context, responses []*http.Response) (*http.Response, error) { - resp, err := r.do(ctx) - if err != nil { - return nil, err - } - - responses = append(responses, resp) - retry, err := r.retryRequest(ctx, responses) - if err != nil { - resp.Body.Close() - return nil, err - } - if retry { - resp.Body.Close() - return r.doWithRetries(ctx, responses) - } - return resp, err -} - -func (r *request) retryRequest(ctx context.Context, responses []*http.Response) (bool, error) { - if len(responses) > 5 { - return false, nil - } - last := responses[len(responses)-1] - switch last.StatusCode { - case http.StatusUnauthorized: - log.G(ctx).WithField("header", last.Header.Get("WWW-Authenticate")).Debug("Unauthorized") - if r.host.Authorizer != nil { - if err := r.host.Authorizer.AddResponses(ctx, responses); err == nil { - return true, nil - } else if !errdefs.IsNotImplemented(err) { - return false, err - } - } - - return false, nil - case http.StatusMethodNotAllowed: - // Support registries which have not properly implemented the HEAD method for - // manifests endpoint - if r.method == http.MethodHead && strings.Contains(r.path, "/manifests/") { - r.method = http.MethodGet - return true, nil - } - case http.StatusRequestTimeout, http.StatusTooManyRequests: - return true, nil - } - - // TODO: Handle 50x errors accounting for attempt history - return false, nil -} - -func (r *request) String() string { - return r.host.Scheme + "://" + r.host.Host + r.path -} - -func requestFields(req *http.Request) logrus.Fields { - fields := map[string]interface{}{ - "request.method": req.Method, - } - for k, vals := range req.Header { - k = strings.ToLower(k) - if k == "authorization" { - continue - } - for i, v := range vals { - field := "request.header." + k - if i > 0 { - field = fmt.Sprintf("%s.%d", field, i) - } - fields[field] = v - } - } - - return logrus.Fields(fields) -} - -func responseFields(resp *http.Response) logrus.Fields { - fields := map[string]interface{}{ - "response.status": resp.Status, - } - for k, vals := range resp.Header { - k = strings.ToLower(k) - for i, v := range vals { - field := "response.header." + k - if i > 0 { - field = fmt.Sprintf("%s.%d", field, i) - } - fields[field] = v - } - } - - return logrus.Fields(fields) -} diff --git a/src/vendor/github.com/containerd/containerd/remotes/docker/schema1/converter.go b/src/vendor/github.com/containerd/containerd/remotes/docker/schema1/converter.go deleted file mode 100644 index efa4e8d6e..000000000 --- a/src/vendor/github.com/containerd/containerd/remotes/docker/schema1/converter.go +++ /dev/null @@ -1,604 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package schema1 - -import ( - "bytes" - "context" - "encoding/base64" - "encoding/json" - "errors" - "fmt" - "io" - "strconv" - "strings" - "sync" - "time" - - "github.com/containerd/containerd/archive/compression" - "github.com/containerd/containerd/content" - "github.com/containerd/containerd/errdefs" - "github.com/containerd/containerd/images" - "github.com/containerd/containerd/log" - "github.com/containerd/containerd/remotes" - digest "github.com/opencontainers/go-digest" - specs "github.com/opencontainers/image-spec/specs-go" - ocispec "github.com/opencontainers/image-spec/specs-go/v1" - "golang.org/x/sync/errgroup" -) - -const ( - manifestSizeLimit = 8e6 // 8MB - labelDockerSchema1EmptyLayer = "containerd.io/docker.schema1.empty-layer" -) - -type blobState struct { - diffID digest.Digest - empty bool -} - -// Converter converts schema1 manifests to schema2 on fetch -type Converter struct { - contentStore content.Store - fetcher remotes.Fetcher - - pulledManifest *manifest - - mu sync.Mutex - blobMap map[digest.Digest]blobState - layerBlobs map[digest.Digest]ocispec.Descriptor -} - -// NewConverter returns a new converter -func NewConverter(contentStore content.Store, fetcher remotes.Fetcher) *Converter { - return &Converter{ - contentStore: contentStore, - fetcher: fetcher, - blobMap: map[digest.Digest]blobState{}, - layerBlobs: map[digest.Digest]ocispec.Descriptor{}, - } -} - -// Handle fetching descriptors for a docker media type -func (c *Converter) Handle(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) { - switch desc.MediaType { - case images.MediaTypeDockerSchema1Manifest: - if err := c.fetchManifest(ctx, desc); err != nil { - return nil, err - } - - m := c.pulledManifest - if len(m.FSLayers) != len(m.History) { - return nil, errors.New("invalid schema 1 manifest, history and layer mismatch") - } - descs := make([]ocispec.Descriptor, 0, len(c.pulledManifest.FSLayers)) - - for i := range m.FSLayers { - if _, ok := c.blobMap[c.pulledManifest.FSLayers[i].BlobSum]; !ok { - empty, err := isEmptyLayer([]byte(m.History[i].V1Compatibility)) - if err != nil { - return nil, err - } - - // Do no attempt to download a known empty blob - if !empty { - descs = append([]ocispec.Descriptor{ - { - MediaType: images.MediaTypeDockerSchema2LayerGzip, - Digest: c.pulledManifest.FSLayers[i].BlobSum, - Size: -1, - }, - }, descs...) - } - c.blobMap[c.pulledManifest.FSLayers[i].BlobSum] = blobState{ - empty: empty, - } - } - } - return descs, nil - case images.MediaTypeDockerSchema2LayerGzip: - if c.pulledManifest == nil { - return nil, errors.New("manifest required for schema 1 blob pull") - } - return nil, c.fetchBlob(ctx, desc) - default: - return nil, fmt.Errorf("%v not support for schema 1 manifests", desc.MediaType) - } -} - -// ConvertOptions provides options on converting a docker schema1 manifest. -type ConvertOptions struct { - // ManifestMediaType specifies the media type of the manifest OCI descriptor. - ManifestMediaType string - - // ConfigMediaType specifies the media type of the manifest config OCI - // descriptor. - ConfigMediaType string -} - -// ConvertOpt allows configuring a convert operation. -type ConvertOpt func(context.Context, *ConvertOptions) error - -// UseDockerSchema2 is used to indicate that a schema1 manifest should be -// converted into the media types for a docker schema2 manifest. -func UseDockerSchema2() ConvertOpt { - return func(ctx context.Context, o *ConvertOptions) error { - o.ManifestMediaType = images.MediaTypeDockerSchema2Manifest - o.ConfigMediaType = images.MediaTypeDockerSchema2Config - return nil - } -} - -// Convert a docker manifest to an OCI descriptor -func (c *Converter) Convert(ctx context.Context, opts ...ConvertOpt) (ocispec.Descriptor, error) { - co := ConvertOptions{ - ManifestMediaType: ocispec.MediaTypeImageManifest, - ConfigMediaType: ocispec.MediaTypeImageConfig, - } - for _, opt := range opts { - if err := opt(ctx, &co); err != nil { - return ocispec.Descriptor{}, err - } - } - - history, diffIDs, err := c.schema1ManifestHistory() - if err != nil { - return ocispec.Descriptor{}, fmt.Errorf("schema 1 conversion failed: %w", err) - } - - var img ocispec.Image - if err := json.Unmarshal([]byte(c.pulledManifest.History[0].V1Compatibility), &img); err != nil { - return ocispec.Descriptor{}, fmt.Errorf("failed to unmarshal image from schema 1 history: %w", err) - } - - img.History = history - img.RootFS = ocispec.RootFS{ - Type: "layers", - DiffIDs: diffIDs, - } - - b, err := json.MarshalIndent(img, "", " ") - if err != nil { - return ocispec.Descriptor{}, fmt.Errorf("failed to marshal image: %w", err) - } - - config := ocispec.Descriptor{ - MediaType: co.ConfigMediaType, - Digest: digest.Canonical.FromBytes(b), - Size: int64(len(b)), - } - - layers := make([]ocispec.Descriptor, len(diffIDs)) - for i, diffID := range diffIDs { - layers[i] = c.layerBlobs[diffID] - } - - manifest := ocispec.Manifest{ - Versioned: specs.Versioned{ - SchemaVersion: 2, - }, - Config: config, - Layers: layers, - } - - mb, err := json.MarshalIndent(manifest, "", " ") - if err != nil { - return ocispec.Descriptor{}, fmt.Errorf("failed to marshal image: %w", err) - } - - desc := ocispec.Descriptor{ - MediaType: co.ManifestMediaType, - Digest: digest.Canonical.FromBytes(mb), - Size: int64(len(mb)), - } - - labels := map[string]string{} - labels["containerd.io/gc.ref.content.0"] = manifest.Config.Digest.String() - for i, ch := range manifest.Layers { - labels[fmt.Sprintf("containerd.io/gc.ref.content.%d", i+1)] = ch.Digest.String() - } - - ref := remotes.MakeRefKey(ctx, desc) - if err := content.WriteBlob(ctx, c.contentStore, ref, bytes.NewReader(mb), desc, content.WithLabels(labels)); err != nil { - return ocispec.Descriptor{}, fmt.Errorf("failed to write image manifest: %w", err) - } - - ref = remotes.MakeRefKey(ctx, config) - if err := content.WriteBlob(ctx, c.contentStore, ref, bytes.NewReader(b), config); err != nil { - return ocispec.Descriptor{}, fmt.Errorf("failed to write image config: %w", err) - } - - return desc, nil -} - -// ReadStripSignature reads in a schema1 manifest and returns a byte array -// with the "signatures" field stripped -func ReadStripSignature(schema1Blob io.Reader) ([]byte, error) { - b, err := io.ReadAll(io.LimitReader(schema1Blob, manifestSizeLimit)) // limit to 8MB - if err != nil { - return nil, err - } - - return stripSignature(b) -} - -func (c *Converter) fetchManifest(ctx context.Context, desc ocispec.Descriptor) error { - log.G(ctx).Debug("fetch schema 1") - - rc, err := c.fetcher.Fetch(ctx, desc) - if err != nil { - return err - } - - b, err := ReadStripSignature(rc) - rc.Close() - if err != nil { - return err - } - - var m manifest - if err := json.Unmarshal(b, &m); err != nil { - return err - } - if len(m.Manifests) != 0 || len(m.Layers) != 0 { - return errors.New("converter: expected schema1 document but found extra keys") - } - c.pulledManifest = &m - - return nil -} - -func (c *Converter) fetchBlob(ctx context.Context, desc ocispec.Descriptor) error { - log.G(ctx).Debug("fetch blob") - - var ( - ref = remotes.MakeRefKey(ctx, desc) - calc = newBlobStateCalculator() - compressMethod = compression.Gzip - ) - - // size may be unknown, set to zero for content ingest - ingestDesc := desc - if ingestDesc.Size == -1 { - ingestDesc.Size = 0 - } - - cw, err := content.OpenWriter(ctx, c.contentStore, content.WithRef(ref), content.WithDescriptor(ingestDesc)) - if err != nil { - if !errdefs.IsAlreadyExists(err) { - return err - } - - reuse, err := c.reuseLabelBlobState(ctx, desc) - if err != nil { - return err - } - - if reuse { - return nil - } - - ra, err := c.contentStore.ReaderAt(ctx, desc) - if err != nil { - return err - } - defer ra.Close() - - r, err := compression.DecompressStream(content.NewReader(ra)) - if err != nil { - return err - } - - compressMethod = r.GetCompression() - _, err = io.Copy(calc, r) - r.Close() - if err != nil { - return err - } - } else { - defer cw.Close() - - rc, err := c.fetcher.Fetch(ctx, desc) - if err != nil { - return err - } - defer rc.Close() - - eg, _ := errgroup.WithContext(ctx) - pr, pw := io.Pipe() - - eg.Go(func() error { - r, err := compression.DecompressStream(pr) - if err != nil { - return err - } - - compressMethod = r.GetCompression() - _, err = io.Copy(calc, r) - r.Close() - pr.CloseWithError(err) - return err - }) - - eg.Go(func() error { - defer pw.Close() - - return content.Copy(ctx, cw, io.TeeReader(rc, pw), ingestDesc.Size, ingestDesc.Digest) - }) - - if err := eg.Wait(); err != nil { - return err - } - } - - if desc.Size == -1 { - info, err := c.contentStore.Info(ctx, desc.Digest) - if err != nil { - return fmt.Errorf("failed to get blob info: %w", err) - } - desc.Size = info.Size - } - - if compressMethod == compression.Uncompressed { - log.G(ctx).WithField("id", desc.Digest).Debugf("changed media type for uncompressed schema1 layer blob") - desc.MediaType = images.MediaTypeDockerSchema2Layer - } - - state := calc.State() - - cinfo := content.Info{ - Digest: desc.Digest, - Labels: map[string]string{ - "containerd.io/uncompressed": state.diffID.String(), - labelDockerSchema1EmptyLayer: strconv.FormatBool(state.empty), - }, - } - - if _, err := c.contentStore.Update(ctx, cinfo, "labels.containerd.io/uncompressed", fmt.Sprintf("labels.%s", labelDockerSchema1EmptyLayer)); err != nil { - return fmt.Errorf("failed to update uncompressed label: %w", err) - } - - c.mu.Lock() - c.blobMap[desc.Digest] = state - c.layerBlobs[state.diffID] = desc - c.mu.Unlock() - - return nil -} - -func (c *Converter) reuseLabelBlobState(ctx context.Context, desc ocispec.Descriptor) (bool, error) { - cinfo, err := c.contentStore.Info(ctx, desc.Digest) - if err != nil { - return false, fmt.Errorf("failed to get blob info: %w", err) - } - desc.Size = cinfo.Size - - diffID, ok := cinfo.Labels["containerd.io/uncompressed"] - if !ok { - return false, nil - } - - emptyVal, ok := cinfo.Labels[labelDockerSchema1EmptyLayer] - if !ok { - return false, nil - } - - isEmpty, err := strconv.ParseBool(emptyVal) - if err != nil { - log.G(ctx).WithField("id", desc.Digest).Warnf("failed to parse bool from label %s: %v", labelDockerSchema1EmptyLayer, isEmpty) - return false, nil - } - - bState := blobState{empty: isEmpty} - - if bState.diffID, err = digest.Parse(diffID); err != nil { - log.G(ctx).WithField("id", desc.Digest).Warnf("failed to parse digest from label containerd.io/uncompressed: %v", diffID) - return false, nil - } - - // NOTE: there is no need to read header to get compression method - // because there are only two kinds of methods. - if bState.diffID == desc.Digest { - desc.MediaType = images.MediaTypeDockerSchema2Layer - } else { - desc.MediaType = images.MediaTypeDockerSchema2LayerGzip - } - - c.mu.Lock() - c.blobMap[desc.Digest] = bState - c.layerBlobs[bState.diffID] = desc - c.mu.Unlock() - return true, nil -} - -func (c *Converter) schema1ManifestHistory() ([]ocispec.History, []digest.Digest, error) { - if c.pulledManifest == nil { - return nil, nil, errors.New("missing schema 1 manifest for conversion") - } - m := *c.pulledManifest - - if len(m.History) == 0 { - return nil, nil, errors.New("no history") - } - - history := make([]ocispec.History, len(m.History)) - diffIDs := []digest.Digest{} - for i := range m.History { - var h v1History - if err := json.Unmarshal([]byte(m.History[i].V1Compatibility), &h); err != nil { - return nil, nil, fmt.Errorf("failed to unmarshal history: %w", err) - } - - blobSum := m.FSLayers[i].BlobSum - - state := c.blobMap[blobSum] - - history[len(history)-i-1] = ocispec.History{ - Author: h.Author, - Comment: h.Comment, - Created: &h.Created, - CreatedBy: strings.Join(h.ContainerConfig.Cmd, " "), - EmptyLayer: state.empty, - } - - if !state.empty { - diffIDs = append([]digest.Digest{state.diffID}, diffIDs...) - - } - } - - return history, diffIDs, nil -} - -type fsLayer struct { - BlobSum digest.Digest `json:"blobSum"` -} - -type history struct { - V1Compatibility string `json:"v1Compatibility"` -} - -type manifest struct { - FSLayers []fsLayer `json:"fsLayers"` - History []history `json:"history"` - Layers json.RawMessage `json:"layers,omitempty"` // OCI manifest - Manifests json.RawMessage `json:"manifests,omitempty"` // OCI index -} - -type v1History struct { - Author string `json:"author,omitempty"` - Created time.Time `json:"created"` - Comment string `json:"comment,omitempty"` - ThrowAway *bool `json:"throwaway,omitempty"` - Size *int `json:"Size,omitempty"` // used before ThrowAway field - ContainerConfig struct { - Cmd []string `json:"Cmd,omitempty"` - } `json:"container_config,omitempty"` -} - -// isEmptyLayer returns whether the v1 compatibility history describes an -// empty layer. A return value of true indicates the layer is empty, -// however false does not indicate non-empty. -func isEmptyLayer(compatHistory []byte) (bool, error) { - var h v1History - if err := json.Unmarshal(compatHistory, &h); err != nil { - return false, err - } - - if h.ThrowAway != nil { - return *h.ThrowAway, nil - } - if h.Size != nil { - return *h.Size == 0, nil - } - - // If no `Size` or `throwaway` field is given, then - // it cannot be determined whether the layer is empty - // from the history, return false - return false, nil -} - -type signature struct { - Signatures []jsParsedSignature `json:"signatures"` -} - -type jsParsedSignature struct { - Protected string `json:"protected"` -} - -type protectedBlock struct { - Length int `json:"formatLength"` - Tail string `json:"formatTail"` -} - -// joseBase64UrlDecode decodes the given string using the standard base64 url -// decoder but first adds the appropriate number of trailing '=' characters in -// accordance with the jose specification. -// http://tools.ietf.org/html/draft-ietf-jose-json-web-signature-31#section-2 -func joseBase64UrlDecode(s string) ([]byte, error) { - switch len(s) % 4 { - case 0: - case 2: - s += "==" - case 3: - s += "=" - default: - return nil, errors.New("illegal base64url string") - } - return base64.URLEncoding.DecodeString(s) -} - -func stripSignature(b []byte) ([]byte, error) { - var sig signature - if err := json.Unmarshal(b, &sig); err != nil { - return nil, err - } - if len(sig.Signatures) == 0 { - return nil, errors.New("no signatures") - } - pb, err := joseBase64UrlDecode(sig.Signatures[0].Protected) - if err != nil { - return nil, fmt.Errorf("could not decode %s: %w", sig.Signatures[0].Protected, err) - } - - var protected protectedBlock - if err := json.Unmarshal(pb, &protected); err != nil { - return nil, err - } - - if protected.Length > len(b) { - return nil, errors.New("invalid protected length block") - } - - tail, err := joseBase64UrlDecode(protected.Tail) - if err != nil { - return nil, fmt.Errorf("invalid tail base 64 value: %w", err) - } - - return append(b[:protected.Length], tail...), nil -} - -type blobStateCalculator struct { - empty bool - digester digest.Digester -} - -func newBlobStateCalculator() *blobStateCalculator { - return &blobStateCalculator{ - empty: true, - digester: digest.Canonical.Digester(), - } -} - -func (c *blobStateCalculator) Write(p []byte) (int, error) { - if c.empty { - for _, b := range p { - if b != 0x00 { - c.empty = false - break - } - } - } - return c.digester.Hash().Write(p) -} - -func (c *blobStateCalculator) State() blobState { - return blobState{ - empty: c.empty, - diffID: c.digester.Digest(), - } -} diff --git a/src/vendor/github.com/containerd/containerd/remotes/docker/scope.go b/src/vendor/github.com/containerd/containerd/remotes/docker/scope.go deleted file mode 100644 index 95b4810ab..000000000 --- a/src/vendor/github.com/containerd/containerd/remotes/docker/scope.go +++ /dev/null @@ -1,101 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package docker - -import ( - "context" - "fmt" - "net/url" - "sort" - "strings" - - "github.com/containerd/containerd/reference" -) - -// RepositoryScope returns a repository scope string such as "repository:foo/bar:pull" -// for "host/foo/bar:baz". -// When push is true, both pull and push are added to the scope. -func RepositoryScope(refspec reference.Spec, push bool) (string, error) { - u, err := url.Parse("dummy://" + refspec.Locator) - if err != nil { - return "", err - } - s := "repository:" + strings.TrimPrefix(u.Path, "/") + ":pull" - if push { - s += ",push" - } - return s, nil -} - -// tokenScopesKey is used for the key for context.WithValue(). -// value: []string (e.g. {"registry:foo/bar:pull"}) -type tokenScopesKey struct{} - -// ContextWithRepositoryScope returns a context with tokenScopesKey{} and the repository scope value. -func ContextWithRepositoryScope(ctx context.Context, refspec reference.Spec, push bool) (context.Context, error) { - s, err := RepositoryScope(refspec, push) - if err != nil { - return nil, err - } - return WithScope(ctx, s), nil -} - -// WithScope appends a custom registry auth scope to the context. -func WithScope(ctx context.Context, scope string) context.Context { - var scopes []string - if v := ctx.Value(tokenScopesKey{}); v != nil { - scopes = v.([]string) - scopes = append(scopes, scope) - } else { - scopes = []string{scope} - } - return context.WithValue(ctx, tokenScopesKey{}, scopes) -} - -// ContextWithAppendPullRepositoryScope is used to append repository pull -// scope into existing scopes indexed by the tokenScopesKey{}. -func ContextWithAppendPullRepositoryScope(ctx context.Context, repo string) context.Context { - return WithScope(ctx, fmt.Sprintf("repository:%s:pull", repo)) -} - -// GetTokenScopes returns deduplicated and sorted scopes from ctx.Value(tokenScopesKey{}) and common scopes. -func GetTokenScopes(ctx context.Context, common []string) []string { - scopes := []string{} - if x := ctx.Value(tokenScopesKey{}); x != nil { - scopes = append(scopes, x.([]string)...) - } - - scopes = append(scopes, common...) - sort.Strings(scopes) - - if len(scopes) == 0 { - return scopes - } - - l := 0 - for idx := 1; idx < len(scopes); idx++ { - // Note: this comparison is unaware of the scope grammar (https://docs.docker.com/registry/spec/auth/scope/) - // So, "repository:foo/bar:pull,push" != "repository:foo/bar:push,pull", although semantically they are equal. - if scopes[l] == scopes[idx] { - continue - } - - l++ - scopes[l] = scopes[idx] - } - return scopes[:l+1] -} diff --git a/src/vendor/github.com/containerd/containerd/remotes/docker/status.go b/src/vendor/github.com/containerd/containerd/remotes/docker/status.go deleted file mode 100644 index 1f7b278ae..000000000 --- a/src/vendor/github.com/containerd/containerd/remotes/docker/status.go +++ /dev/null @@ -1,90 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package docker - -import ( - "fmt" - "sync" - - "github.com/containerd/containerd/content" - "github.com/containerd/containerd/errdefs" - "github.com/moby/locker" -) - -// Status of a content operation -type Status struct { - content.Status - - Committed bool - - // ErrClosed contains error encountered on close. - ErrClosed error - - // UploadUUID is used by the Docker registry to reference blob uploads - UploadUUID string -} - -// StatusTracker to track status of operations -type StatusTracker interface { - GetStatus(string) (Status, error) - SetStatus(string, Status) -} - -// StatusTrackLocker to track status of operations with lock -type StatusTrackLocker interface { - StatusTracker - Lock(string) - Unlock(string) -} - -type memoryStatusTracker struct { - statuses map[string]Status - m sync.Mutex - locker *locker.Locker -} - -// NewInMemoryTracker returns a StatusTracker that tracks content status in-memory -func NewInMemoryTracker() StatusTrackLocker { - return &memoryStatusTracker{ - statuses: map[string]Status{}, - locker: locker.New(), - } -} - -func (t *memoryStatusTracker) GetStatus(ref string) (Status, error) { - t.m.Lock() - defer t.m.Unlock() - status, ok := t.statuses[ref] - if !ok { - return Status{}, fmt.Errorf("status for ref %v: %w", ref, errdefs.ErrNotFound) - } - return status, nil -} - -func (t *memoryStatusTracker) SetStatus(ref string, status Status) { - t.m.Lock() - t.statuses[ref] = status - t.m.Unlock() -} - -func (t *memoryStatusTracker) Lock(ref string) { - t.locker.Lock(ref) -} - -func (t *memoryStatusTracker) Unlock(ref string) { - t.locker.Unlock(ref) -} diff --git a/src/vendor/github.com/containerd/containerd/remotes/errors/errors.go b/src/vendor/github.com/containerd/containerd/remotes/errors/errors.go deleted file mode 100644 index 67ccb23df..000000000 --- a/src/vendor/github.com/containerd/containerd/remotes/errors/errors.go +++ /dev/null @@ -1,55 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package errors - -import ( - "fmt" - "io" - "net/http" -) - -var _ error = ErrUnexpectedStatus{} - -// ErrUnexpectedStatus is returned if a registry API request returned with unexpected HTTP status -type ErrUnexpectedStatus struct { - Status string - StatusCode int - Body []byte - RequestURL, RequestMethod string -} - -func (e ErrUnexpectedStatus) Error() string { - return fmt.Sprintf("unexpected status: %s", e.Status) -} - -// NewUnexpectedStatusErr creates an ErrUnexpectedStatus from HTTP response -func NewUnexpectedStatusErr(resp *http.Response) error { - var b []byte - if resp.Body != nil { - b, _ = io.ReadAll(io.LimitReader(resp.Body, 64000)) // 64KB - } - err := ErrUnexpectedStatus{ - Body: b, - Status: resp.Status, - StatusCode: resp.StatusCode, - RequestMethod: resp.Request.Method, - } - if resp.Request.URL != nil { - err.RequestURL = resp.Request.URL.String() - } - return err -} diff --git a/src/vendor/github.com/containerd/containerd/remotes/handlers.go b/src/vendor/github.com/containerd/containerd/remotes/handlers.go deleted file mode 100644 index 8bcafb22a..000000000 --- a/src/vendor/github.com/containerd/containerd/remotes/handlers.go +++ /dev/null @@ -1,368 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package remotes - -import ( - "context" - "errors" - "fmt" - "io" - "strings" - "sync" - - "github.com/containerd/containerd/content" - "github.com/containerd/containerd/errdefs" - "github.com/containerd/containerd/images" - "github.com/containerd/containerd/log" - "github.com/containerd/containerd/platforms" - ocispec "github.com/opencontainers/image-spec/specs-go/v1" - "github.com/sirupsen/logrus" - "golang.org/x/sync/semaphore" -) - -type refKeyPrefix struct{} - -// WithMediaTypeKeyPrefix adds a custom key prefix for a media type which is used when storing -// data in the content store from the FetchHandler. -// -// Used in `MakeRefKey` to determine what the key prefix should be. -func WithMediaTypeKeyPrefix(ctx context.Context, mediaType, prefix string) context.Context { - var values map[string]string - if v := ctx.Value(refKeyPrefix{}); v != nil { - values = v.(map[string]string) - } else { - values = make(map[string]string) - } - - values[mediaType] = prefix - return context.WithValue(ctx, refKeyPrefix{}, values) -} - -// MakeRefKey returns a unique reference for the descriptor. This reference can be -// used to lookup ongoing processes related to the descriptor. This function -// may look to the context to namespace the reference appropriately. -func MakeRefKey(ctx context.Context, desc ocispec.Descriptor) string { - key := desc.Digest.String() - if desc.Annotations != nil { - if name, ok := desc.Annotations[ocispec.AnnotationRefName]; ok { - key = fmt.Sprintf("%s@%s", name, desc.Digest.String()) - } - } - - if v := ctx.Value(refKeyPrefix{}); v != nil { - values := v.(map[string]string) - if prefix := values[desc.MediaType]; prefix != "" { - return prefix + "-" + key - } - } - - switch mt := desc.MediaType; { - case mt == images.MediaTypeDockerSchema2Manifest || mt == ocispec.MediaTypeImageManifest: - return "manifest-" + key - case mt == images.MediaTypeDockerSchema2ManifestList || mt == ocispec.MediaTypeImageIndex: - return "index-" + key - case images.IsLayerType(mt): - return "layer-" + key - case images.IsKnownConfig(mt): - return "config-" + key - default: - log.G(ctx).Warnf("reference for unknown type: %s", mt) - return "unknown-" + key - } -} - -// FetchHandler returns a handler that will fetch all content into the ingester -// discovered in a call to Dispatch. Use with ChildrenHandler to do a full -// recursive fetch. -func FetchHandler(ingester content.Ingester, fetcher Fetcher) images.HandlerFunc { - return func(ctx context.Context, desc ocispec.Descriptor) (subdescs []ocispec.Descriptor, err error) { - ctx = log.WithLogger(ctx, log.G(ctx).WithFields(logrus.Fields{ - "digest": desc.Digest, - "mediatype": desc.MediaType, - "size": desc.Size, - })) - - switch desc.MediaType { - case images.MediaTypeDockerSchema1Manifest: - return nil, fmt.Errorf("%v not supported", desc.MediaType) - default: - err := fetch(ctx, ingester, fetcher, desc) - return nil, err - } - } -} - -func fetch(ctx context.Context, ingester content.Ingester, fetcher Fetcher, desc ocispec.Descriptor) error { - log.G(ctx).Debug("fetch") - - cw, err := content.OpenWriter(ctx, ingester, content.WithRef(MakeRefKey(ctx, desc)), content.WithDescriptor(desc)) - if err != nil { - if errdefs.IsAlreadyExists(err) { - return nil - } - return err - } - defer cw.Close() - - ws, err := cw.Status() - if err != nil { - return err - } - - if desc.Size == 0 { - // most likely a poorly configured registry/web front end which responded with no - // Content-Length header; unable (not to mention useless) to commit a 0-length entry - // into the content store. Error out here otherwise the error sent back is confusing - return fmt.Errorf("unable to fetch descriptor (%s) which reports content size of zero: %w", desc.Digest, errdefs.ErrInvalidArgument) - } - if ws.Offset == desc.Size { - // If writer is already complete, commit and return - err := cw.Commit(ctx, desc.Size, desc.Digest) - if err != nil && !errdefs.IsAlreadyExists(err) { - return fmt.Errorf("failed commit on ref %q: %w", ws.Ref, err) - } - return nil - } - - rc, err := fetcher.Fetch(ctx, desc) - if err != nil { - return err - } - defer rc.Close() - - return content.Copy(ctx, cw, rc, desc.Size, desc.Digest) -} - -// PushHandler returns a handler that will push all content from the provider -// using a writer from the pusher. -func PushHandler(pusher Pusher, provider content.Provider) images.HandlerFunc { - return func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) { - ctx = log.WithLogger(ctx, log.G(ctx).WithFields(logrus.Fields{ - "digest": desc.Digest, - "mediatype": desc.MediaType, - "size": desc.Size, - })) - - err := push(ctx, provider, pusher, desc) - return nil, err - } -} - -func push(ctx context.Context, provider content.Provider, pusher Pusher, desc ocispec.Descriptor) error { - log.G(ctx).Debug("push") - - var ( - cw content.Writer - err error - ) - if cs, ok := pusher.(content.Ingester); ok { - cw, err = content.OpenWriter(ctx, cs, content.WithRef(MakeRefKey(ctx, desc)), content.WithDescriptor(desc)) - } else { - cw, err = pusher.Push(ctx, desc) - } - if err != nil { - if !errdefs.IsAlreadyExists(err) { - return err - } - - return nil - } - defer cw.Close() - - ra, err := provider.ReaderAt(ctx, desc) - if err != nil { - return err - } - defer ra.Close() - - rd := io.NewSectionReader(ra, 0, desc.Size) - return content.Copy(ctx, cw, rd, desc.Size, desc.Digest) -} - -// PushContent pushes content specified by the descriptor from the provider. -// -// Base handlers can be provided which will be called before any push specific -// handlers. -func PushContent(ctx context.Context, pusher Pusher, desc ocispec.Descriptor, store content.Store, limiter *semaphore.Weighted, platform platforms.MatchComparer, wrapper func(h images.Handler) images.Handler) error { - - var m sync.Mutex - manifestStack := []ocispec.Descriptor{} - - filterHandler := images.HandlerFunc(func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) { - switch desc.MediaType { - case images.MediaTypeDockerSchema2Manifest, ocispec.MediaTypeImageManifest, - images.MediaTypeDockerSchema2ManifestList, ocispec.MediaTypeImageIndex: - m.Lock() - manifestStack = append(manifestStack, desc) - m.Unlock() - return nil, images.ErrStopHandler - default: - return nil, nil - } - }) - - pushHandler := PushHandler(pusher, store) - - platformFilterhandler := images.FilterPlatforms(images.ChildrenHandler(store), platform) - - annotateHandler := annotateDistributionSourceHandler(platformFilterhandler, store) - - var handler images.Handler = images.Handlers( - annotateHandler, - filterHandler, - pushHandler, - ) - if wrapper != nil { - handler = wrapper(handler) - } - - if err := images.Dispatch(ctx, handler, limiter, desc); err != nil { - return err - } - - // Iterate in reverse order as seen, parent always uploaded after child - for i := len(manifestStack) - 1; i >= 0; i-- { - _, err := pushHandler(ctx, manifestStack[i]) - if err != nil { - // TODO(estesp): until we have a more complete method for index push, we need to report - // missing dependencies in an index/manifest list by sensing the "400 Bad Request" - // as a marker for this problem - if (manifestStack[i].MediaType == ocispec.MediaTypeImageIndex || - manifestStack[i].MediaType == images.MediaTypeDockerSchema2ManifestList) && - errors.Unwrap(err) != nil && strings.Contains(errors.Unwrap(err).Error(), "400 Bad Request") { - return fmt.Errorf("manifest list/index references to blobs and/or manifests are missing in your target registry: %w", err) - } - return err - } - } - - return nil -} - -// SkipNonDistributableBlobs returns a handler that skips blobs that have a media type that is "non-distributeable". -// An example of this kind of content would be a Windows base layer, which is not supposed to be redistributed. -// -// This is based on the media type of the content: -// - application/vnd.oci.image.layer.nondistributable -// - application/vnd.docker.image.rootfs.foreign -func SkipNonDistributableBlobs(f images.HandlerFunc) images.HandlerFunc { - return func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) { - if images.IsNonDistributable(desc.MediaType) { - log.G(ctx).WithField("digest", desc.Digest).WithField("mediatype", desc.MediaType).Debug("Skipping non-distributable blob") - return nil, images.ErrSkipDesc - } - - if images.IsLayerType(desc.MediaType) { - return nil, nil - } - - children, err := f(ctx, desc) - if err != nil { - return nil, err - } - if len(children) == 0 { - return nil, nil - } - - out := make([]ocispec.Descriptor, 0, len(children)) - for _, child := range children { - if !images.IsNonDistributable(child.MediaType) { - out = append(out, child) - } else { - log.G(ctx).WithField("digest", child.Digest).WithField("mediatype", child.MediaType).Debug("Skipping non-distributable blob") - } - } - return out, nil - } -} - -// FilterManifestByPlatformHandler allows Handler to handle non-target -// platform's manifest and configuration data. -func FilterManifestByPlatformHandler(f images.HandlerFunc, m platforms.Matcher) images.HandlerFunc { - return func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) { - children, err := f(ctx, desc) - if err != nil { - return nil, err - } - - // no platform information - if desc.Platform == nil || m == nil { - return children, nil - } - - var descs []ocispec.Descriptor - switch desc.MediaType { - case images.MediaTypeDockerSchema2Manifest, ocispec.MediaTypeImageManifest: - if m.Match(*desc.Platform) { - descs = children - } else { - for _, child := range children { - if child.MediaType == images.MediaTypeDockerSchema2Config || - child.MediaType == ocispec.MediaTypeImageConfig { - - descs = append(descs, child) - } - } - } - default: - descs = children - } - return descs, nil - } -} - -// annotateDistributionSourceHandler add distribution source label into -// annotation of config or blob descriptor. -func annotateDistributionSourceHandler(f images.HandlerFunc, manager content.Manager) images.HandlerFunc { - return func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) { - children, err := f(ctx, desc) - if err != nil { - return nil, err - } - - // only add distribution source for the config or blob data descriptor - switch desc.MediaType { - case images.MediaTypeDockerSchema2Manifest, ocispec.MediaTypeImageManifest, - images.MediaTypeDockerSchema2ManifestList, ocispec.MediaTypeImageIndex: - default: - return children, nil - } - - for i := range children { - child := children[i] - - info, err := manager.Info(ctx, child.Digest) - if err != nil { - return nil, err - } - - for k, v := range info.Labels { - if !strings.HasPrefix(k, "containerd.io/distribution.source.") { - continue - } - - if child.Annotations == nil { - child.Annotations = map[string]string{} - } - child.Annotations[k] = v - } - - children[i] = child - } - return children, nil - } -} diff --git a/src/vendor/github.com/containerd/containerd/remotes/resolver.go b/src/vendor/github.com/containerd/containerd/remotes/resolver.go deleted file mode 100644 index 624b14f05..000000000 --- a/src/vendor/github.com/containerd/containerd/remotes/resolver.go +++ /dev/null @@ -1,82 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package remotes - -import ( - "context" - "io" - - "github.com/containerd/containerd/content" - ocispec "github.com/opencontainers/image-spec/specs-go/v1" -) - -// Resolver provides remotes based on a locator. -type Resolver interface { - // Resolve attempts to resolve the reference into a name and descriptor. - // - // The argument `ref` should be a scheme-less URI representing the remote. - // Structurally, it has a host and path. The "host" can be used to directly - // reference a specific host or be matched against a specific handler. - // - // The returned name should be used to identify the referenced entity. - // Dependending on the remote namespace, this may be immutable or mutable. - // While the name may differ from ref, it should itself be a valid ref. - // - // If the resolution fails, an error will be returned. - Resolve(ctx context.Context, ref string) (name string, desc ocispec.Descriptor, err error) - - // Fetcher returns a new fetcher for the provided reference. - // All content fetched from the returned fetcher will be - // from the namespace referred to by ref. - Fetcher(ctx context.Context, ref string) (Fetcher, error) - - // Pusher returns a new pusher for the provided reference - // The returned Pusher should satisfy content.Ingester and concurrent attempts - // to push the same blob using the Ingester API should result in ErrUnavailable. - Pusher(ctx context.Context, ref string) (Pusher, error) -} - -// Fetcher fetches content -type Fetcher interface { - // Fetch the resource identified by the descriptor. - Fetch(ctx context.Context, desc ocispec.Descriptor) (io.ReadCloser, error) -} - -// Pusher pushes content -type Pusher interface { - // Push returns a content writer for the given resource identified - // by the descriptor. - Push(ctx context.Context, d ocispec.Descriptor) (content.Writer, error) -} - -// FetcherFunc allows package users to implement a Fetcher with just a -// function. -type FetcherFunc func(ctx context.Context, desc ocispec.Descriptor) (io.ReadCloser, error) - -// Fetch content -func (fn FetcherFunc) Fetch(ctx context.Context, desc ocispec.Descriptor) (io.ReadCloser, error) { - return fn(ctx, desc) -} - -// PusherFunc allows package users to implement a Pusher with just a -// function. -type PusherFunc func(ctx context.Context, desc ocispec.Descriptor) (content.Writer, error) - -// Push content -func (fn PusherFunc) Push(ctx context.Context, desc ocispec.Descriptor) (content.Writer, error) { - return fn(ctx, desc) -} diff --git a/src/vendor/github.com/containerd/containerd/version/version.go b/src/vendor/github.com/containerd/containerd/version/version.go deleted file mode 100644 index cef635bb9..000000000 --- a/src/vendor/github.com/containerd/containerd/version/version.go +++ /dev/null @@ -1,34 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package version - -import "runtime" - -var ( - // Package is filled at linking time - Package = "github.com/containerd/containerd" - - // Version holds the complete version number. Filled in at linking time. - Version = "1.6.6+unknown" - - // Revision is filled with the VCS (e.g. git) revision being used to build - // the program at linking time. - Revision = "" - - // GoVersion is Go tree's version. - GoVersion = runtime.Version() -) diff --git a/src/vendor/github.com/cyphar/filepath-securejoin/.travis.yml b/src/vendor/github.com/cyphar/filepath-securejoin/.travis.yml deleted file mode 100644 index b94ff8cf9..000000000 --- a/src/vendor/github.com/cyphar/filepath-securejoin/.travis.yml +++ /dev/null @@ -1,21 +0,0 @@ -# Copyright (C) 2017 SUSE LLC. All rights reserved. -# Use of this source code is governed by a BSD-style -# license that can be found in the LICENSE file. - -language: go -go: - - 1.13.x - - 1.16.x - - tip -arch: - - AMD64 - - ppc64le -os: - - linux - - osx - -script: - - go test -cover -v ./... - -notifications: - email: false diff --git a/src/vendor/github.com/cyphar/filepath-securejoin/LICENSE b/src/vendor/github.com/cyphar/filepath-securejoin/LICENSE deleted file mode 100644 index bec842f29..000000000 --- a/src/vendor/github.com/cyphar/filepath-securejoin/LICENSE +++ /dev/null @@ -1,28 +0,0 @@ -Copyright (C) 2014-2015 Docker Inc & Go Authors. All rights reserved. -Copyright (C) 2017 SUSE LLC. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/src/vendor/github.com/cyphar/filepath-securejoin/README.md b/src/vendor/github.com/cyphar/filepath-securejoin/README.md deleted file mode 100644 index 3624617c8..000000000 --- a/src/vendor/github.com/cyphar/filepath-securejoin/README.md +++ /dev/null @@ -1,79 +0,0 @@ -## `filepath-securejoin` ## - -[![Build Status](https://travis-ci.org/cyphar/filepath-securejoin.svg?branch=master)](https://travis-ci.org/cyphar/filepath-securejoin) - -An implementation of `SecureJoin`, a [candidate for inclusion in the Go -standard library][go#20126]. The purpose of this function is to be a "secure" -alternative to `filepath.Join`, and in particular it provides certain -guarantees that are not provided by `filepath.Join`. - -> **NOTE**: This code is *only* safe if you are not at risk of other processes -> modifying path components after you've used `SecureJoin`. If it is possible -> for a malicious process to modify path components of the resolved path, then -> you will be vulnerable to some fairly trivial TOCTOU race conditions. [There -> are some Linux kernel patches I'm working on which might allow for a better -> solution.][lwn-obeneath] -> -> In addition, with a slightly modified API it might be possible to use -> `O_PATH` and verify that the opened path is actually the resolved one -- but -> I have not done that yet. I might add it in the future as a helper function -> to help users verify the path (we can't just return `/proc/self/fd/` -> because that doesn't always work transparently for all users). - -This is the function prototype: - -```go -func SecureJoin(root, unsafePath string) (string, error) -``` - -This library **guarantees** the following: - -* If no error is set, the resulting string **must** be a child path of - `root` and will not contain any symlink path components (they will all be - expanded). - -* When expanding symlinks, all symlink path components **must** be resolved - relative to the provided root. In particular, this can be considered a - userspace implementation of how `chroot(2)` operates on file paths. Note that - these symlinks will **not** be expanded lexically (`filepath.Clean` is not - called on the input before processing). - -* Non-existent path components are unaffected by `SecureJoin` (similar to - `filepath.EvalSymlinks`'s semantics). - -* The returned path will always be `filepath.Clean`ed and thus not contain any - `..` components. - -A (trivial) implementation of this function on GNU/Linux systems could be done -with the following (note that this requires root privileges and is far more -opaque than the implementation in this library, and also requires that -`readlink` is inside the `root` path): - -```go -package securejoin - -import ( - "os/exec" - "path/filepath" -) - -func SecureJoin(root, unsafePath string) (string, error) { - unsafePath = string(filepath.Separator) + unsafePath - cmd := exec.Command("chroot", root, - "readlink", "--canonicalize-missing", "--no-newline", unsafePath) - output, err := cmd.CombinedOutput() - if err != nil { - return "", err - } - expanded := string(output) - return filepath.Join(root, expanded), nil -} -``` - -[lwn-obeneath]: https://lwn.net/Articles/767547/ -[go#20126]: https://github.com/golang/go/issues/20126 - -### License ### - -The license of this project is the same as Go, which is a BSD 3-clause license -available in the `LICENSE` file. diff --git a/src/vendor/github.com/cyphar/filepath-securejoin/VERSION b/src/vendor/github.com/cyphar/filepath-securejoin/VERSION deleted file mode 100644 index 717903969..000000000 --- a/src/vendor/github.com/cyphar/filepath-securejoin/VERSION +++ /dev/null @@ -1 +0,0 @@ -0.2.3 diff --git a/src/vendor/github.com/cyphar/filepath-securejoin/join.go b/src/vendor/github.com/cyphar/filepath-securejoin/join.go deleted file mode 100644 index 7dd08dbbd..000000000 --- a/src/vendor/github.com/cyphar/filepath-securejoin/join.go +++ /dev/null @@ -1,115 +0,0 @@ -// Copyright (C) 2014-2015 Docker Inc & Go Authors. All rights reserved. -// Copyright (C) 2017 SUSE LLC. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package securejoin is an implementation of the hopefully-soon-to-be-included -// SecureJoin helper that is meant to be part of the "path/filepath" package. -// The purpose of this project is to provide a PoC implementation to make the -// SecureJoin proposal (https://github.com/golang/go/issues/20126) more -// tangible. -package securejoin - -import ( - "bytes" - "errors" - "os" - "path/filepath" - "strings" - "syscall" -) - -// IsNotExist tells you if err is an error that implies that either the path -// accessed does not exist (or path components don't exist). This is -// effectively a more broad version of os.IsNotExist. -func IsNotExist(err error) bool { - // Check that it's not actually an ENOTDIR, which in some cases is a more - // convoluted case of ENOENT (usually involving weird paths). - return errors.Is(err, os.ErrNotExist) || errors.Is(err, syscall.ENOTDIR) || errors.Is(err, syscall.ENOENT) -} - -// SecureJoinVFS joins the two given path components (similar to Join) except -// that the returned path is guaranteed to be scoped inside the provided root -// path (when evaluated). Any symbolic links in the path are evaluated with the -// given root treated as the root of the filesystem, similar to a chroot. The -// filesystem state is evaluated through the given VFS interface (if nil, the -// standard os.* family of functions are used). -// -// Note that the guarantees provided by this function only apply if the path -// components in the returned string are not modified (in other words are not -// replaced with symlinks on the filesystem) after this function has returned. -// Such a symlink race is necessarily out-of-scope of SecureJoin. -func SecureJoinVFS(root, unsafePath string, vfs VFS) (string, error) { - // Use the os.* VFS implementation if none was specified. - if vfs == nil { - vfs = osVFS{} - } - - var path bytes.Buffer - n := 0 - for unsafePath != "" { - if n > 255 { - return "", &os.PathError{Op: "SecureJoin", Path: root + "/" + unsafePath, Err: syscall.ELOOP} - } - - // Next path component, p. - i := strings.IndexRune(unsafePath, filepath.Separator) - var p string - if i == -1 { - p, unsafePath = unsafePath, "" - } else { - p, unsafePath = unsafePath[:i], unsafePath[i+1:] - } - - // Create a cleaned path, using the lexical semantics of /../a, to - // create a "scoped" path component which can safely be joined to fullP - // for evaluation. At this point, path.String() doesn't contain any - // symlink components. - cleanP := filepath.Clean(string(filepath.Separator) + path.String() + p) - if cleanP == string(filepath.Separator) { - path.Reset() - continue - } - fullP := filepath.Clean(root + cleanP) - - // Figure out whether the path is a symlink. - fi, err := vfs.Lstat(fullP) - if err != nil && !IsNotExist(err) { - return "", err - } - // Treat non-existent path components the same as non-symlinks (we - // can't do any better here). - if IsNotExist(err) || fi.Mode()&os.ModeSymlink == 0 { - path.WriteString(p) - path.WriteRune(filepath.Separator) - continue - } - - // Only increment when we actually dereference a link. - n++ - - // It's a symlink, expand it by prepending it to the yet-unparsed path. - dest, err := vfs.Readlink(fullP) - if err != nil { - return "", err - } - // Absolute symlinks reset any work we've already done. - if filepath.IsAbs(dest) { - path.Reset() - } - unsafePath = dest + string(filepath.Separator) + unsafePath - } - - // We have to clean path.String() here because it may contain '..' - // components that are entirely lexical, but would be misleading otherwise. - // And finally do a final clean to ensure that root is also lexically - // clean. - fullP := filepath.Clean(string(filepath.Separator) + path.String()) - return filepath.Clean(root + fullP), nil -} - -// SecureJoin is a wrapper around SecureJoinVFS that just uses the os.* library -// of functions as the VFS. If in doubt, use this function over SecureJoinVFS. -func SecureJoin(root, unsafePath string) (string, error) { - return SecureJoinVFS(root, unsafePath, nil) -} diff --git a/src/vendor/github.com/cyphar/filepath-securejoin/vfs.go b/src/vendor/github.com/cyphar/filepath-securejoin/vfs.go deleted file mode 100644 index a82a5eae1..000000000 --- a/src/vendor/github.com/cyphar/filepath-securejoin/vfs.go +++ /dev/null @@ -1,41 +0,0 @@ -// Copyright (C) 2017 SUSE LLC. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package securejoin - -import "os" - -// In future this should be moved into a separate package, because now there -// are several projects (umoci and go-mtree) that are using this sort of -// interface. - -// VFS is the minimal interface necessary to use SecureJoinVFS. A nil VFS is -// equivalent to using the standard os.* family of functions. This is mainly -// used for the purposes of mock testing, but also can be used to otherwise use -// SecureJoin with VFS-like system. -type VFS interface { - // Lstat returns a FileInfo describing the named file. If the file is a - // symbolic link, the returned FileInfo describes the symbolic link. Lstat - // makes no attempt to follow the link. These semantics are identical to - // os.Lstat. - Lstat(name string) (os.FileInfo, error) - - // Readlink returns the destination of the named symbolic link. These - // semantics are identical to os.Readlink. - Readlink(name string) (string, error) -} - -// osVFS is the "nil" VFS, in that it just passes everything through to the os -// module. -type osVFS struct{} - -// Lstat returns a FileInfo describing the named file. If the file is a -// symbolic link, the returned FileInfo describes the symbolic link. Lstat -// makes no attempt to follow the link. These semantics are identical to -// os.Lstat. -func (o osVFS) Lstat(name string) (os.FileInfo, error) { return os.Lstat(name) } - -// Readlink returns the destination of the named symbolic link. These -// semantics are identical to os.Readlink. -func (o osVFS) Readlink(name string) (string, error) { return os.Readlink(name) } diff --git a/src/vendor/github.com/docker/cli/AUTHORS b/src/vendor/github.com/docker/cli/AUTHORS deleted file mode 100644 index 8990f85b5..000000000 --- a/src/vendor/github.com/docker/cli/AUTHORS +++ /dev/null @@ -1,771 +0,0 @@ -# This file lists all individuals having contributed content to the repository. -# For how it is generated, see `scripts/docs/generate-authors.sh`. - -Aanand Prasad -Aaron L. Xu -Aaron Lehmann -Aaron.L.Xu -Abdur Rehman -Abhinandan Prativadi -Abin Shahab -Abreto FU -Ace Tang -Addam Hardy -Adolfo Ochagavía -Adrian Plata -Adrien Duermael -Adrien Folie -Ahmet Alp Balkan -Aidan Feldman -Aidan Hobson Sayers -AJ Bowen -Akhil Mohan -Akihiro Suda -Akim Demaille -Alan Thompson -Albert Callarisa -Albin Kerouanton -Aleksa Sarai -Aleksander Piotrowski -Alessandro Boch -Alex Mavrogiannis -Alex Mayer -Alexander Boyd -Alexander Larsson -Alexander Morozov -Alexander Ryabov -Alexandre González -Alfred Landrum -Alicia Lauerman -Allen Sun -Alvin Deng -Amen Belayneh -Amir Goldstein -Amit Krishnan -Amit Shukla -Amy Lindburg -Anca Iordache -Anda Xu -Andrea Luzzardi -Andreas Köhler -Andrew France -Andrew Hsu -Andrew Macpherson -Andrew McDonnell -Andrew Po -Andrey Petrov -Andrii Berehuliak -André Martins -Andy Goldstein -Andy Rothfusz -Anil Madhavapeddy -Ankush Agarwal -Anne Henmi -Anton Polonskiy -Antonio Murdaca -Antonis Kalipetis -Anusha Ragunathan -Ao Li -Arash Deshmeh -Arko Dasgupta -Arnaud Porterie -Arthur Peka -Ashwini Oruganti -Azat Khuyiyakhmetov -Bardia Keyoumarsi -Barnaby Gray -Bastiaan Bakker -BastianHofmann -Ben Bonnefoy -Ben Creasy -Ben Firshman -Benjamin Boudreau -Benoit Sigoure -Bhumika Bayani -Bill Wang -Bin Liu -Bingshen Wang -Boaz Shuster -Bogdan Anton -Boris Pruessmann -Bradley Cicenas -Brandon Mitchell -Brandon Philips -Brent Salisbury -Bret Fisher -Brian (bex) Exelbierd -Brian Goff -Brian Wieder -Bryan Bess -Bryan Boreham -Bryan Murphy -bryfry -Cameron Spear -Cao Weiwei -Carlo Mion -Carlos Alexandro Becker -Carlos de Paula -Ce Gao -Cedric Davies -Cezar Sa Espinola -Chad Faragher -Chao Wang -Charles Chan -Charles Law -Charles Smith -Charlie Drage -ChaYoung You -Chen Chuanliang -Chen Hanxiao -Chen Mingjie -Chen Qiu -Chris Gavin -Chris Gibson -Chris McKinnel -Chris Snow -Chris Weyl -Christian Persson -Christian Stefanescu -Christophe Robin -Christophe Vidal -Christopher Biscardi -Christopher Crone -Christopher Jones -Christy Norman -Chun Chen -Clinton Kitson -Coenraad Loubser -Colin Hebert -Collin Guarino -Colm Hally -Comical Derskeal <27731088+derskeal@users.noreply.github.com> -Corey Farrell -Corey Quon -Craig Wilhite -Cristian Staretu -Daehyeok Mun -Dafydd Crosby -Daisuke Ito -dalanlan -Damien Nadé -Dan Cotora -Daniel Artine -Daniel Cassidy -Daniel Dao -Daniel Farrell -Daniel Gasienica -Daniel Goosen -Daniel Helfand -Daniel Hiltgen -Daniel J Walsh -Daniel Nephin -Daniel Norberg -Daniel Watkins -Daniel Zhang -Daniil Nikolenko -Danny Berger -Darren Shepherd -Darren Stahl -Dattatraya Kumbhar -Dave Goodchild -Dave Henderson -Dave Tucker -David Beitey -David Calavera -David Cramer -David Dooling -David Gageot -David Lechner -David Scott -David Sheets -David Williamson -David Xia -David Young -Deng Guangxing -Denis Defreyne -Denis Gladkikh -Denis Ollier -Dennis Docter -Derek McGowan -Deshi Xiao -Dharmit Shah -Dhawal Yogesh Bhanushali -Dieter Reuter -Dima Stopel -Dimitry Andric -Ding Fei -Diogo Monica -Djordje Lukic -Dmitry Gusev -Dmitry Smirnov -Dmitry V. Krivenok -Dominik Braun -Don Kjer -Dong Chen -Doug Davis -Drew Erny -Ed Costello -Elango Sivanandam -Eli Uriegas -Eli Uriegas -Elias Faxö -Elliot Luo <956941328@qq.com> -Eric Curtin -Eric G. Noriega -Eric Rosenberg -Eric Sage -Eric-Olivier Lamey -Erica Windisch -Erik Hollensbe -Erik St. Martin -Essam A. Hassan -Ethan Haynes -Euan Kemp -Eugene Yakubovich -Evan Allrich -Evan Hazlett -Evan Krall -Evelyn Xu -Everett Toews -Fabio Falci -Fabrizio Soppelsa -Felix Hupfeld -Felix Rabe -Filip JareÅ¡ -Flavio Crisciani -Florian Klein -Forest Johnson -Foysal Iqbal -François Scala -Fred Lifton -Frederic Hemberger -Frederick F. Kautz IV -Frederik Nordahl Jul Sabroe -Frieder Bluemle -Gabriel Nicolas Avellaneda -Gaetan de Villele -Gang Qiao -Gary Schaetz -Genki Takiuchi -George MacRorie -George Xie -Gianluca Borello -Gildas Cuisinier -Goksu Toprak -Gou Rao -Grant Reaber -Greg Pflaum -Guilhem Lettron -Guillaume J. Charmes -Guillaume Le Floch -gwx296173 -Günther Jungbluth -Hakan Özler -Hao Zhang <21521210@zju.edu.cn> -Harald Albers -Harold Cooper -Harry Zhang -He Simei -Hector S -Helen Xie -Henning Sprang -Henry N -Hernan Garcia -Hongbin Lu -Hu Keping -Huayi Zhang -Hugo Gabriel Eyherabide -huqun -Huu Nguyen -Hyzhou Zhy -Ian Campbell -Ian Philpot -Ignacio Capurro -Ilya Dmitrichenko -Ilya Khlopotov -Ilya Sotkov -Ioan Eugen Stan -Isabel Jimenez -Ivan Grcic -Ivan Markin -Jacob Atzen -Jacob Tomlinson -Jaivish Kothari -Jake Lambert -Jake Sanders -James Nesbitt -James Turnbull -Jamie Hannaford -Jan Koprowski -Jan Pazdziora -Jan-Jaap Driessen -Jana Radhakrishnan -Jared Hocutt -Jasmine Hegman -Jason Heiss -Jason Plum -Jay Kamat -Jean Rouge -Jean-Christophe Sirot -Jean-Pierre Huynh -Jeff Lindsay -Jeff Nickoloff -Jeff Silberman -Jeremy Chambers -Jeremy Unruh -Jeremy Yallop -Jeroen Franse -Jesse Adametz -Jessica Frazelle -Jezeniel Zapanta -Jian Zhang -Jie Luo -Jilles Oldenbeuving -Jim Galasyn -Jimmy Leger -Jimmy Song -jimmyxian -Jintao Zhang -Joao Fernandes -Joe Abbey -Joe Doliner -Joe Gordon -Joel Handwell -Joey Geiger -Joffrey F -Johan Euphrosine -Johannes 'fish' Ziemke -John Feminella -John Harris -John Howard -John Laswell -John Maguire -John Mulhausen -John Starks -John Stephens -John Tims -John V. Martinez -John Willis -Jon Johnson -Jonatas Baldin -Jonathan Boulle -Jonathan Lee -Jonathan Lomas -Jonathan McCrohan -Jonh Wendell -Jordan Jennings -Jose J. Escobar <53836904+jescobar-docker@users.noreply.github.com> -Joseph Kern -Josh Bodah -Josh Chorlton -Josh Hawn -Josh Horwitz -Josh Soref -Julien Barbier -Julien Kassar -Julien Maitrehenry -Justas Brazauskas -Justin Cormack -Justin Simonelis -Justyn Temme -Jyrki Puttonen -Jérémie Drouet -Jérôme Petazzoni -Jörg Thalheim -Kai Blin -Kai Qiang Wu (Kennan) -Kara Alexandra -Kareem Khazem -Karthik Nayak -Kat Samperi -Kathryn Spiers -Katie McLaughlin -Ke Xu -Kei Ohmura -Keith Hudgins -Ken Cochrane -Ken ICHIKAWA -Kenfe-Mickaël Laventure -Kevin Burke -Kevin Feyrer -Kevin Kern -Kevin Kirsche -Kevin Meredith -Kevin Richardson -Kevin Woblick -khaled souf -Kim Eik -Kir Kolyshkin -Kotaro Yoshimatsu -Krasi Georgiev -Kris-Mikael Krister -Kun Zhang -Kunal Kushwaha -Lachlan Cooper -Lai Jiangshan -Lars Kellogg-Stedman -Laura Frank -Laurent Erignoux -Lee Gaines -Lei Jitang -Lennie -Leo Gallucci -Lewis Daly -Li Yi -Li Yi -Liang-Chi Hsieh -Lifubang -Lihua Tang -Lily Guo -Lin Lu -Linus Heckemann -Liping Xue -Liron Levin -liwenqi -lixiaobing10051267 -Lloyd Dewolf -Lorenzo Fontana -Louis Opter -Luca Favatella -Luca Marturana -Lucas Chan -Luka Hartwig -Lukas Heeren -Lukasz Zajaczkowski -Lydell Manganti -Lénaïc Huard -Ma Shimiao -Mabin -Maciej Kalisz -Madhav Puri -Madhu Venugopal -Madhur Batra -Malte Janduda -Manjunath A Kumatagi -Mansi Nahar -mapk0y -Marc Bihlmaier -Marco Mariani -Marco Vedovati -Marcus Martins -Marianna Tessel -Marius Ileana -Marius Sturm -Mark Oates -Marsh Macy -Martin Mosegaard Amdisen -Mary Anthony -Mason Fish -Mason Malone -Mateusz Major -Mathieu Champlon -Matt Gucci -Matt Robenolt -Matteo Orefice -Matthew Heon -Matthieu Hauglustaine -Mauro Porras P -Max Shytikov -Maxime Petazzoni -Mei ChunTao -Micah Zoltu -Michael A. Smith -Michael Bridgen -Michael Crosby -Michael Friis -Michael Irwin -Michael Käufl -Michael Prokop -Michael Scharf -Michael Spetsiotis -Michael Steinert -Michael West -Michal Minář -MichaÅ‚ Czeraszkiewicz -Miguel Angel Alvarez Cabrerizo -Mihai Borobocea -Mihuleacc Sergiu -Mike Brown -Mike Casas -Mike Danese -Mike Dillon -Mike Goelzer -Mike MacCana -mikelinjie <294893458@qq.com> -Mikhail Vasin -Milind Chawre -Mindaugas Rukas -Miroslav Gula -Misty Stanley-Jones -Mohammad Banikazemi -Mohammed Aaqib Ansari -Mohini Anne Dsouza -Moorthy RS -Morgan Bauer -Morten Hekkvang -Moysés Borges -Mrunal Patel -muicoder -Muthukumar R -Máximo Cuadros -MÃ¥rten Cassel -Nace Oroz -Nahum Shalman -Nalin Dahyabhai -Nao YONASHIRO -Nassim 'Nass' Eddequiouaq -Natalie Parker -Nate Brennand -Nathan Hsieh -Nathan LeClaire -Nathan McCauley -Neil Peterson -Nick Adcock -Nico Stapelbroek -Nicola Kabar -Nicolas Borboën -Nicolas De Loof -Nikhil Chawla -Nikolas Garofil -Nikolay Milovanov -Nir Soffer -Nishant Totla -NIWA Hideyuki -Noah Treuhaft -O.S. Tezer -Odin Ugedal -ohmystack -Olle Jonsson -Olli Janatuinen -Oscar Wieman -Otto Kekäläinen -Ovidio Mallo -Pascal Borreli -Patrick Böänziger -Patrick Hemmer -Patrick Lang -Paul -Paul Kehrer -Paul Lietar -Paul Mulders -Paul Weaver -Pavel Pospisil -PaweÅ‚ Szczekutowicz -Peeyush Gupta -Per Lundberg -Peter Edge -Peter Hsu -Peter Jaffe -Peter Kehl -Peter Nagy -Peter Salvatore -Peter Waller -Phil Estes -Philip Alexander Etling -Philipp Gillé -Philipp Schmied -pidster -pixelistik -Pratik Karki -Prayag Verma -Preston Cowley -Pure White -Qiang Huang -Qinglan Peng -qudongfang -Raghavendra K T -Rahul Zoldyck -Ravi Shekhar Jethani -Ray Tsang -Reficul -Remy Suen -Renaud Gaubert -Ricardo N Feliciano -Rich Moyse -Richard Mathie -Richard Scothern -Rick Wieman -Ritesh H Shukla -Riyaz Faizullabhoy -Rob Gulewich -Robert Wallis -Robin Naundorf -Robin Speekenbrink -Rodolfo Ortiz -Rogelio Canedo -Rohan Verma -Roland Kammerer -Roman Dudin -Rory Hunter -Ross Boucher -Rubens Figueiredo -Rui Cao -Ryan Belgrave -Ryan Detzel -Ryan Stelly -Ryan Wilson-Perkin -Ryan Zhang -Sainath Grandhi -Sakeven Jiang -Sally O'Malley -Sam Neirinck -Samarth Shah -Sambuddha Basu -Sami Tabet -Samuel Cochran -Samuel Karp -Santhosh Manohar -Sargun Dhillon -Saswat Bhattacharya -Scott Brenner -Scott Collier -Sean Christopherson -Sean Rodman -Sebastiaan van Stijn -Sergey Tryuber -Serhat Gülçiçek -Sevki Hasirci -Shaun Kaasten -Sheng Yang -Shijiang Wei -Shishir Mahajan -Shoubhik Bose -Shukui Yang -Sian Lerk Lau -Sidhartha Mani -sidharthamani -Silvin Lubecki -Simei He -Simon Ferquel -Simon Heimberg -Sindhu S -Slava Semushin -Solomon Hykes -Song Gao -Spencer Brown -squeegels <1674195+squeegels@users.noreply.github.com> -Srini Brahmaroutu -Stefan S. -Stefan Scherer -Stefan Weil -Stephane Jeandeaux -Stephen Day -Stephen Rust -Steve Durrheimer -Steve Richards -Steven Burgess -Subhajit Ghosh -Sun Jianbo -Sune Keller -Sungwon Han -Sunny Gogoi -Sven Dowideit -Sylvain Baubeau -Sébastien HOUZÉ -T K Sourabh -TAGOMORI Satoshi -taiji-tech -Taylor Jones -Tejaswini Duggaraju -Tengfei Wang -Teppei Fukuda -Thatcher Peskens -Thibault Coupin -Thomas Gazagnaire -Thomas Krzero -Thomas Leonard -Thomas Léveil -Thomas Riccardi -Thomas Swift -Tianon Gravi -Tianyi Wang -Tibor Vass -Tim Dettrick -Tim Hockin -Tim Sampson -Tim Smith -Tim Waugh -Tim Wraight -timfeirg -Timothy Hobbs -Tobias Bradtke -Tobias Gesellchen -Todd Whiteman -Tom Denham -Tom Fotherby -Tom Klingenberg -Tom Milligan -Tom X. Tobin -Tomas Tomecek -Tomasz Kopczynski -Tomáš HrÄka -Tony Abboud -Tõnis Tiigi -Trapier Marshall -Travis Cline -Tristan Carel -Tycho Andersen -Tycho Andersen -uhayate -Ulrich Bareth -Ulysses Souza -Umesh Yadav -Valentin Lorentz -Venkateswara Reddy Bukkasamudram -Veres Lajos -Victor Vieux -Victoria Bialas -Viktor Stanchev -Vimal Raghubir -Vincent Batts -Vincent Bernat -Vincent Demeester -Vincent Woo -Vishnu Kannan -Vivek Goyal -Wang Jie -Wang Lei -Wang Long -Wang Ping -Wang Xing -Wang Yuexiao -Wang Yumu <37442693@qq.com> -Wataru Ishida -Wayne Song -Wen Cheng Ma -Wenzhi Liang -Wes Morgan -Wewang Xiaorenfine -William Henry -Xianglin Gao -Xiaodong Liu -Xiaodong Zhang -Xiaoxi He -Xinbo Weng -Xuecong Liao -Yan Feng -Yanqiang Miao -Yassine Tijani -Yi EungJun -Ying Li -Yong Tang -Yosef Fertel -Yu Peng -Yuan Sun -Yue Zhang -Yunxiang Huang -Zachary Romero -Zander Mackie -zebrilee -Zhang Kun -Zhang Wei -Zhang Wentao -ZhangHang -zhenghenghuo -Zhou Hao -Zhoulin Xie -Zhu Guihua -Ãlex González -Ãlvaro Lázaro -Ãtila Camurça Alves -å¾ä¿Šæ° diff --git a/src/vendor/github.com/docker/cli/LICENSE b/src/vendor/github.com/docker/cli/LICENSE deleted file mode 100644 index 9c8e20ab8..000000000 --- a/src/vendor/github.com/docker/cli/LICENSE +++ /dev/null @@ -1,191 +0,0 @@ - - Apache License - Version 2.0, January 2004 - https://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - Copyright 2013-2017 Docker, Inc. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - https://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/src/vendor/github.com/docker/cli/NOTICE b/src/vendor/github.com/docker/cli/NOTICE deleted file mode 100644 index 58b19b6d1..000000000 --- a/src/vendor/github.com/docker/cli/NOTICE +++ /dev/null @@ -1,19 +0,0 @@ -Docker -Copyright 2012-2017 Docker, Inc. - -This product includes software developed at Docker, Inc. (https://www.docker.com). - -This product contains software (https://github.com/creack/pty) developed -by Keith Rarick, licensed under the MIT License. - -The following is courtesy of our legal counsel: - - -Use and transfer of Docker may be subject to certain restrictions by the -United States and other governments. -It is your responsibility to ensure that your use and/or transfer does not -violate applicable laws. - -For more information, please see https://www.bis.doc.gov - -See also https://www.apache.org/dev/crypto.html and/or seek legal counsel. diff --git a/src/vendor/github.com/docker/cli/cli/config/config.go b/src/vendor/github.com/docker/cli/cli/config/config.go deleted file mode 100644 index 31ad117d4..000000000 --- a/src/vendor/github.com/docker/cli/cli/config/config.go +++ /dev/null @@ -1,167 +0,0 @@ -package config - -import ( - "fmt" - "io" - "os" - "path/filepath" - "strings" - "sync" - - "github.com/docker/cli/cli/config/configfile" - "github.com/docker/cli/cli/config/credentials" - "github.com/docker/cli/cli/config/types" - "github.com/docker/docker/pkg/homedir" - "github.com/pkg/errors" -) - -const ( - // ConfigFileName is the name of config file - ConfigFileName = "config.json" - configFileDir = ".docker" - oldConfigfile = ".dockercfg" - contextsDir = "contexts" -) - -var ( - initConfigDir = new(sync.Once) - configDir string - homeDir string -) - -// resetHomeDir is used in testing to reset the "homeDir" package variable to -// force re-lookup of the home directory between tests. -func resetHomeDir() { - homeDir = "" -} - -func getHomeDir() string { - if homeDir == "" { - homeDir = homedir.Get() - } - return homeDir -} - -// resetConfigDir is used in testing to reset the "configDir" package variable -// and its sync.Once to force re-lookup between tests. -func resetConfigDir() { - configDir = "" - initConfigDir = new(sync.Once) -} - -func setConfigDir() { - if configDir != "" { - return - } - configDir = os.Getenv("DOCKER_CONFIG") - if configDir == "" { - configDir = filepath.Join(getHomeDir(), configFileDir) - } -} - -// Dir returns the directory the configuration file is stored in -func Dir() string { - initConfigDir.Do(setConfigDir) - return configDir -} - -// ContextStoreDir returns the directory the docker contexts are stored in -func ContextStoreDir() string { - return filepath.Join(Dir(), contextsDir) -} - -// SetDir sets the directory the configuration file is stored in -func SetDir(dir string) { - configDir = filepath.Clean(dir) -} - -// Path returns the path to a file relative to the config dir -func Path(p ...string) (string, error) { - path := filepath.Join(append([]string{Dir()}, p...)...) - if !strings.HasPrefix(path, Dir()+string(filepath.Separator)) { - return "", errors.Errorf("path %q is outside of root config directory %q", path, Dir()) - } - return path, nil -} - -// LegacyLoadFromReader is a convenience function that creates a ConfigFile object from -// a non-nested reader -func LegacyLoadFromReader(configData io.Reader) (*configfile.ConfigFile, error) { - configFile := configfile.ConfigFile{ - AuthConfigs: make(map[string]types.AuthConfig), - } - err := configFile.LegacyLoadFromReader(configData) - return &configFile, err -} - -// LoadFromReader is a convenience function that creates a ConfigFile object from -// a reader -func LoadFromReader(configData io.Reader) (*configfile.ConfigFile, error) { - configFile := configfile.ConfigFile{ - AuthConfigs: make(map[string]types.AuthConfig), - } - err := configFile.LoadFromReader(configData) - return &configFile, err -} - -// Load reads the configuration files in the given directory, and sets up -// the auth config information and returns values. -// FIXME: use the internal golang config parser -func Load(configDir string) (*configfile.ConfigFile, error) { - cfg, _, err := load(configDir) - return cfg, err -} - -// TODO remove this temporary hack, which is used to warn about the deprecated ~/.dockercfg file -// so we can remove the bool return value and collapse this back into `Load` -func load(configDir string) (*configfile.ConfigFile, bool, error) { - printLegacyFileWarning := false - - if configDir == "" { - configDir = Dir() - } - - filename := filepath.Join(configDir, ConfigFileName) - configFile := configfile.New(filename) - - // Try happy path first - latest config file - if file, err := os.Open(filename); err == nil { - defer file.Close() - err = configFile.LoadFromReader(file) - if err != nil { - err = errors.Wrap(err, filename) - } - return configFile, printLegacyFileWarning, err - } else if !os.IsNotExist(err) { - // if file is there but we can't stat it for any reason other - // than it doesn't exist then stop - return configFile, printLegacyFileWarning, errors.Wrap(err, filename) - } - - // Can't find latest config file so check for the old one - filename = filepath.Join(getHomeDir(), oldConfigfile) - if file, err := os.Open(filename); err == nil { - printLegacyFileWarning = true - defer file.Close() - if err := configFile.LegacyLoadFromReader(file); err != nil { - return configFile, printLegacyFileWarning, errors.Wrap(err, filename) - } - } - return configFile, printLegacyFileWarning, nil -} - -// LoadDefaultConfigFile attempts to load the default config file and returns -// an initialized ConfigFile struct if none is found. -func LoadDefaultConfigFile(stderr io.Writer) *configfile.ConfigFile { - configFile, printLegacyFileWarning, err := load(Dir()) - if err != nil { - fmt.Fprintf(stderr, "WARNING: Error loading config file: %v\n", err) - } - if printLegacyFileWarning { - _, _ = fmt.Fprintln(stderr, "WARNING: Support for the legacy ~/.dockercfg configuration file and file-format is deprecated and will be removed in an upcoming release") - } - if !configFile.ContainsAuth() { - configFile.CredentialsStore = credentials.DetectDefaultStore(configFile.CredentialsStore) - } - return configFile -} diff --git a/src/vendor/github.com/docker/cli/cli/config/configfile/file.go b/src/vendor/github.com/docker/cli/cli/config/configfile/file.go deleted file mode 100644 index d6f710817..000000000 --- a/src/vendor/github.com/docker/cli/cli/config/configfile/file.go +++ /dev/null @@ -1,415 +0,0 @@ -package configfile - -import ( - "encoding/base64" - "encoding/json" - "fmt" - "io" - "io/ioutil" - "os" - "path/filepath" - "strings" - - "github.com/docker/cli/cli/config/credentials" - "github.com/docker/cli/cli/config/types" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" -) - -const ( - // This constant is only used for really old config files when the - // URL wasn't saved as part of the config file and it was just - // assumed to be this value. - defaultIndexServer = "https://index.docker.io/v1/" -) - -// ConfigFile ~/.docker/config.json file info -type ConfigFile struct { - AuthConfigs map[string]types.AuthConfig `json:"auths"` - HTTPHeaders map[string]string `json:"HttpHeaders,omitempty"` - PsFormat string `json:"psFormat,omitempty"` - ImagesFormat string `json:"imagesFormat,omitempty"` - NetworksFormat string `json:"networksFormat,omitempty"` - PluginsFormat string `json:"pluginsFormat,omitempty"` - VolumesFormat string `json:"volumesFormat,omitempty"` - StatsFormat string `json:"statsFormat,omitempty"` - DetachKeys string `json:"detachKeys,omitempty"` - CredentialsStore string `json:"credsStore,omitempty"` - CredentialHelpers map[string]string `json:"credHelpers,omitempty"` - Filename string `json:"-"` // Note: for internal use only - ServiceInspectFormat string `json:"serviceInspectFormat,omitempty"` - ServicesFormat string `json:"servicesFormat,omitempty"` - TasksFormat string `json:"tasksFormat,omitempty"` - SecretFormat string `json:"secretFormat,omitempty"` - ConfigFormat string `json:"configFormat,omitempty"` - NodesFormat string `json:"nodesFormat,omitempty"` - PruneFilters []string `json:"pruneFilters,omitempty"` - Proxies map[string]ProxyConfig `json:"proxies,omitempty"` - Experimental string `json:"experimental,omitempty"` - StackOrchestrator string `json:"stackOrchestrator,omitempty"` - Kubernetes *KubernetesConfig `json:"kubernetes,omitempty"` - CurrentContext string `json:"currentContext,omitempty"` - CLIPluginsExtraDirs []string `json:"cliPluginsExtraDirs,omitempty"` - Plugins map[string]map[string]string `json:"plugins,omitempty"` - Aliases map[string]string `json:"aliases,omitempty"` -} - -// ProxyConfig contains proxy configuration settings -type ProxyConfig struct { - HTTPProxy string `json:"httpProxy,omitempty"` - HTTPSProxy string `json:"httpsProxy,omitempty"` - NoProxy string `json:"noProxy,omitempty"` - FTPProxy string `json:"ftpProxy,omitempty"` -} - -// KubernetesConfig contains Kubernetes orchestrator settings -type KubernetesConfig struct { - AllNamespaces string `json:"allNamespaces,omitempty"` -} - -// New initializes an empty configuration file for the given filename 'fn' -func New(fn string) *ConfigFile { - return &ConfigFile{ - AuthConfigs: make(map[string]types.AuthConfig), - HTTPHeaders: make(map[string]string), - Filename: fn, - Plugins: make(map[string]map[string]string), - Aliases: make(map[string]string), - } -} - -// LegacyLoadFromReader reads the non-nested configuration data given and sets up the -// auth config information with given directory and populates the receiver object -func (configFile *ConfigFile) LegacyLoadFromReader(configData io.Reader) error { - b, err := ioutil.ReadAll(configData) - if err != nil { - return err - } - - if err := json.Unmarshal(b, &configFile.AuthConfigs); err != nil { - arr := strings.Split(string(b), "\n") - if len(arr) < 2 { - return errors.Errorf("The Auth config file is empty") - } - authConfig := types.AuthConfig{} - origAuth := strings.Split(arr[0], " = ") - if len(origAuth) != 2 { - return errors.Errorf("Invalid Auth config file") - } - authConfig.Username, authConfig.Password, err = decodeAuth(origAuth[1]) - if err != nil { - return err - } - authConfig.ServerAddress = defaultIndexServer - configFile.AuthConfigs[defaultIndexServer] = authConfig - } else { - for k, authConfig := range configFile.AuthConfigs { - authConfig.Username, authConfig.Password, err = decodeAuth(authConfig.Auth) - if err != nil { - return err - } - authConfig.Auth = "" - authConfig.ServerAddress = k - configFile.AuthConfigs[k] = authConfig - } - } - return nil -} - -// LoadFromReader reads the configuration data given and sets up the auth config -// information with given directory and populates the receiver object -func (configFile *ConfigFile) LoadFromReader(configData io.Reader) error { - if err := json.NewDecoder(configData).Decode(configFile); err != nil && !errors.Is(err, io.EOF) { - return err - } - var err error - for addr, ac := range configFile.AuthConfigs { - if ac.Auth != "" { - ac.Username, ac.Password, err = decodeAuth(ac.Auth) - if err != nil { - return err - } - } - ac.Auth = "" - ac.ServerAddress = addr - configFile.AuthConfigs[addr] = ac - } - return checkKubernetesConfiguration(configFile.Kubernetes) -} - -// ContainsAuth returns whether there is authentication configured -// in this file or not. -func (configFile *ConfigFile) ContainsAuth() bool { - return configFile.CredentialsStore != "" || - len(configFile.CredentialHelpers) > 0 || - len(configFile.AuthConfigs) > 0 -} - -// GetAuthConfigs returns the mapping of repo to auth configuration -func (configFile *ConfigFile) GetAuthConfigs() map[string]types.AuthConfig { - return configFile.AuthConfigs -} - -// SaveToWriter encodes and writes out all the authorization information to -// the given writer -func (configFile *ConfigFile) SaveToWriter(writer io.Writer) error { - // Encode sensitive data into a new/temp struct - tmpAuthConfigs := make(map[string]types.AuthConfig, len(configFile.AuthConfigs)) - for k, authConfig := range configFile.AuthConfigs { - authCopy := authConfig - // encode and save the authstring, while blanking out the original fields - authCopy.Auth = encodeAuth(&authCopy) - authCopy.Username = "" - authCopy.Password = "" - authCopy.ServerAddress = "" - tmpAuthConfigs[k] = authCopy - } - - saveAuthConfigs := configFile.AuthConfigs - configFile.AuthConfigs = tmpAuthConfigs - defer func() { configFile.AuthConfigs = saveAuthConfigs }() - - // User-Agent header is automatically set, and should not be stored in the configuration - for v := range configFile.HTTPHeaders { - if strings.EqualFold(v, "User-Agent") { - delete(configFile.HTTPHeaders, v) - } - } - - data, err := json.MarshalIndent(configFile, "", "\t") - if err != nil { - return err - } - _, err = writer.Write(data) - return err -} - -// Save encodes and writes out all the authorization information -func (configFile *ConfigFile) Save() (retErr error) { - if configFile.Filename == "" { - return errors.Errorf("Can't save config with empty filename") - } - - dir := filepath.Dir(configFile.Filename) - if err := os.MkdirAll(dir, 0700); err != nil { - return err - } - temp, err := ioutil.TempFile(dir, filepath.Base(configFile.Filename)) - if err != nil { - return err - } - defer func() { - temp.Close() - if retErr != nil { - if err := os.Remove(temp.Name()); err != nil { - logrus.WithError(err).WithField("file", temp.Name()).Debug("Error cleaning up temp file") - } - } - }() - - err = configFile.SaveToWriter(temp) - if err != nil { - return err - } - - if err := temp.Close(); err != nil { - return errors.Wrap(err, "error closing temp file") - } - - // Handle situation where the configfile is a symlink - cfgFile := configFile.Filename - if f, err := os.Readlink(cfgFile); err == nil { - cfgFile = f - } - - // Try copying the current config file (if any) ownership and permissions - copyFilePermissions(cfgFile, temp.Name()) - return os.Rename(temp.Name(), cfgFile) -} - -// ParseProxyConfig computes proxy configuration by retrieving the config for the provided host and -// then checking this against any environment variables provided to the container -func (configFile *ConfigFile) ParseProxyConfig(host string, runOpts map[string]*string) map[string]*string { - var cfgKey string - - if _, ok := configFile.Proxies[host]; !ok { - cfgKey = "default" - } else { - cfgKey = host - } - - config := configFile.Proxies[cfgKey] - permitted := map[string]*string{ - "HTTP_PROXY": &config.HTTPProxy, - "HTTPS_PROXY": &config.HTTPSProxy, - "NO_PROXY": &config.NoProxy, - "FTP_PROXY": &config.FTPProxy, - } - m := runOpts - if m == nil { - m = make(map[string]*string) - } - for k := range permitted { - if *permitted[k] == "" { - continue - } - if _, ok := m[k]; !ok { - m[k] = permitted[k] - } - if _, ok := m[strings.ToLower(k)]; !ok { - m[strings.ToLower(k)] = permitted[k] - } - } - return m -} - -// encodeAuth creates a base64 encoded string to containing authorization information -func encodeAuth(authConfig *types.AuthConfig) string { - if authConfig.Username == "" && authConfig.Password == "" { - return "" - } - - authStr := authConfig.Username + ":" + authConfig.Password - msg := []byte(authStr) - encoded := make([]byte, base64.StdEncoding.EncodedLen(len(msg))) - base64.StdEncoding.Encode(encoded, msg) - return string(encoded) -} - -// decodeAuth decodes a base64 encoded string and returns username and password -func decodeAuth(authStr string) (string, string, error) { - if authStr == "" { - return "", "", nil - } - - decLen := base64.StdEncoding.DecodedLen(len(authStr)) - decoded := make([]byte, decLen) - authByte := []byte(authStr) - n, err := base64.StdEncoding.Decode(decoded, authByte) - if err != nil { - return "", "", err - } - if n > decLen { - return "", "", errors.Errorf("Something went wrong decoding auth config") - } - arr := strings.SplitN(string(decoded), ":", 2) - if len(arr) != 2 { - return "", "", errors.Errorf("Invalid auth configuration file") - } - password := strings.Trim(arr[1], "\x00") - return arr[0], password, nil -} - -// GetCredentialsStore returns a new credentials store from the settings in the -// configuration file -func (configFile *ConfigFile) GetCredentialsStore(registryHostname string) credentials.Store { - if helper := getConfiguredCredentialStore(configFile, registryHostname); helper != "" { - return newNativeStore(configFile, helper) - } - return credentials.NewFileStore(configFile) -} - -// var for unit testing. -var newNativeStore = func(configFile *ConfigFile, helperSuffix string) credentials.Store { - return credentials.NewNativeStore(configFile, helperSuffix) -} - -// GetAuthConfig for a repository from the credential store -func (configFile *ConfigFile) GetAuthConfig(registryHostname string) (types.AuthConfig, error) { - return configFile.GetCredentialsStore(registryHostname).Get(registryHostname) -} - -// getConfiguredCredentialStore returns the credential helper configured for the -// given registry, the default credsStore, or the empty string if neither are -// configured. -func getConfiguredCredentialStore(c *ConfigFile, registryHostname string) string { - if c.CredentialHelpers != nil && registryHostname != "" { - if helper, exists := c.CredentialHelpers[registryHostname]; exists { - return helper - } - } - return c.CredentialsStore -} - -// GetAllCredentials returns all of the credentials stored in all of the -// configured credential stores. -func (configFile *ConfigFile) GetAllCredentials() (map[string]types.AuthConfig, error) { - auths := make(map[string]types.AuthConfig) - addAll := func(from map[string]types.AuthConfig) { - for reg, ac := range from { - auths[reg] = ac - } - } - - defaultStore := configFile.GetCredentialsStore("") - newAuths, err := defaultStore.GetAll() - if err != nil { - return nil, err - } - addAll(newAuths) - - // Auth configs from a registry-specific helper should override those from the default store. - for registryHostname := range configFile.CredentialHelpers { - newAuth, err := configFile.GetAuthConfig(registryHostname) - if err != nil { - return nil, err - } - auths[registryHostname] = newAuth - } - return auths, nil -} - -// GetFilename returns the file name that this config file is based on. -func (configFile *ConfigFile) GetFilename() string { - return configFile.Filename -} - -// PluginConfig retrieves the requested option for the given plugin. -func (configFile *ConfigFile) PluginConfig(pluginname, option string) (string, bool) { - if configFile.Plugins == nil { - return "", false - } - pluginConfig, ok := configFile.Plugins[pluginname] - if !ok { - return "", false - } - value, ok := pluginConfig[option] - return value, ok -} - -// SetPluginConfig sets the option to the given value for the given -// plugin. Passing a value of "" will remove the option. If removing -// the final config item for a given plugin then also cleans up the -// overall plugin entry. -func (configFile *ConfigFile) SetPluginConfig(pluginname, option, value string) { - if configFile.Plugins == nil { - configFile.Plugins = make(map[string]map[string]string) - } - pluginConfig, ok := configFile.Plugins[pluginname] - if !ok { - pluginConfig = make(map[string]string) - configFile.Plugins[pluginname] = pluginConfig - } - if value != "" { - pluginConfig[option] = value - } else { - delete(pluginConfig, option) - } - if len(pluginConfig) == 0 { - delete(configFile.Plugins, pluginname) - } -} - -func checkKubernetesConfiguration(kubeConfig *KubernetesConfig) error { - if kubeConfig == nil { - return nil - } - switch kubeConfig.AllNamespaces { - case "": - case "enabled": - case "disabled": - default: - return fmt.Errorf("invalid 'kubernetes.allNamespaces' value, should be 'enabled' or 'disabled': %s", kubeConfig.AllNamespaces) - } - return nil -} diff --git a/src/vendor/github.com/docker/cli/cli/config/configfile/file_unix.go b/src/vendor/github.com/docker/cli/cli/config/configfile/file_unix.go deleted file mode 100644 index 6af671812..000000000 --- a/src/vendor/github.com/docker/cli/cli/config/configfile/file_unix.go +++ /dev/null @@ -1,36 +0,0 @@ -//go:build !windows -// +build !windows - -package configfile - -import ( - "os" - "syscall" -) - -// copyFilePermissions copies file ownership and permissions from "src" to "dst", -// ignoring any error during the process. -func copyFilePermissions(src, dst string) { - var ( - mode os.FileMode = 0600 - uid, gid int - ) - - fi, err := os.Stat(src) - if err != nil { - return - } - if fi.Mode().IsRegular() { - mode = fi.Mode() - } - if err := os.Chmod(dst, mode); err != nil { - return - } - - uid = int(fi.Sys().(*syscall.Stat_t).Uid) - gid = int(fi.Sys().(*syscall.Stat_t).Gid) - - if uid > 0 && gid > 0 { - _ = os.Chown(dst, uid, gid) - } -} diff --git a/src/vendor/github.com/docker/cli/cli/config/configfile/file_windows.go b/src/vendor/github.com/docker/cli/cli/config/configfile/file_windows.go deleted file mode 100644 index 42fffc39a..000000000 --- a/src/vendor/github.com/docker/cli/cli/config/configfile/file_windows.go +++ /dev/null @@ -1,5 +0,0 @@ -package configfile - -func copyFilePermissions(src, dst string) { - // TODO implement for Windows -} diff --git a/src/vendor/github.com/docker/cli/cli/config/credentials/credentials.go b/src/vendor/github.com/docker/cli/cli/config/credentials/credentials.go deleted file mode 100644 index 28d58ec48..000000000 --- a/src/vendor/github.com/docker/cli/cli/config/credentials/credentials.go +++ /dev/null @@ -1,17 +0,0 @@ -package credentials - -import ( - "github.com/docker/cli/cli/config/types" -) - -// Store is the interface that any credentials store must implement. -type Store interface { - // Erase removes credentials from the store for a given server. - Erase(serverAddress string) error - // Get retrieves credentials from the store for a given server. - Get(serverAddress string) (types.AuthConfig, error) - // GetAll retrieves all the credentials from the store. - GetAll() (map[string]types.AuthConfig, error) - // Store saves credentials in the store. - Store(authConfig types.AuthConfig) error -} diff --git a/src/vendor/github.com/docker/cli/cli/config/credentials/default_store.go b/src/vendor/github.com/docker/cli/cli/config/credentials/default_store.go deleted file mode 100644 index 402235bff..000000000 --- a/src/vendor/github.com/docker/cli/cli/config/credentials/default_store.go +++ /dev/null @@ -1,21 +0,0 @@ -package credentials - -import ( - exec "golang.org/x/sys/execabs" -) - -// DetectDefaultStore return the default credentials store for the platform if -// the store executable is available. -func DetectDefaultStore(store string) string { - platformDefault := defaultCredentialsStore() - - // user defined or no default for platform - if store != "" || platformDefault == "" { - return store - } - - if _, err := exec.LookPath(remoteCredentialsPrefix + platformDefault); err == nil { - return platformDefault - } - return "" -} diff --git a/src/vendor/github.com/docker/cli/cli/config/credentials/default_store_darwin.go b/src/vendor/github.com/docker/cli/cli/config/credentials/default_store_darwin.go deleted file mode 100644 index 5d42dec62..000000000 --- a/src/vendor/github.com/docker/cli/cli/config/credentials/default_store_darwin.go +++ /dev/null @@ -1,5 +0,0 @@ -package credentials - -func defaultCredentialsStore() string { - return "osxkeychain" -} diff --git a/src/vendor/github.com/docker/cli/cli/config/credentials/default_store_linux.go b/src/vendor/github.com/docker/cli/cli/config/credentials/default_store_linux.go deleted file mode 100644 index a9012c6d4..000000000 --- a/src/vendor/github.com/docker/cli/cli/config/credentials/default_store_linux.go +++ /dev/null @@ -1,13 +0,0 @@ -package credentials - -import ( - "os/exec" -) - -func defaultCredentialsStore() string { - if _, err := exec.LookPath("pass"); err == nil { - return "pass" - } - - return "secretservice" -} diff --git a/src/vendor/github.com/docker/cli/cli/config/credentials/default_store_unsupported.go b/src/vendor/github.com/docker/cli/cli/config/credentials/default_store_unsupported.go deleted file mode 100644 index c9630ea51..000000000 --- a/src/vendor/github.com/docker/cli/cli/config/credentials/default_store_unsupported.go +++ /dev/null @@ -1,8 +0,0 @@ -//go:build !windows && !darwin && !linux -// +build !windows,!darwin,!linux - -package credentials - -func defaultCredentialsStore() string { - return "" -} diff --git a/src/vendor/github.com/docker/cli/cli/config/credentials/default_store_windows.go b/src/vendor/github.com/docker/cli/cli/config/credentials/default_store_windows.go deleted file mode 100644 index bb799ca61..000000000 --- a/src/vendor/github.com/docker/cli/cli/config/credentials/default_store_windows.go +++ /dev/null @@ -1,5 +0,0 @@ -package credentials - -func defaultCredentialsStore() string { - return "wincred" -} diff --git a/src/vendor/github.com/docker/cli/cli/config/credentials/file_store.go b/src/vendor/github.com/docker/cli/cli/config/credentials/file_store.go deleted file mode 100644 index e509820b7..000000000 --- a/src/vendor/github.com/docker/cli/cli/config/credentials/file_store.go +++ /dev/null @@ -1,81 +0,0 @@ -package credentials - -import ( - "strings" - - "github.com/docker/cli/cli/config/types" -) - -type store interface { - Save() error - GetAuthConfigs() map[string]types.AuthConfig - GetFilename() string -} - -// fileStore implements a credentials store using -// the docker configuration file to keep the credentials in plain text. -type fileStore struct { - file store -} - -// NewFileStore creates a new file credentials store. -func NewFileStore(file store) Store { - return &fileStore{file: file} -} - -// Erase removes the given credentials from the file store. -func (c *fileStore) Erase(serverAddress string) error { - delete(c.file.GetAuthConfigs(), serverAddress) - return c.file.Save() -} - -// Get retrieves credentials for a specific server from the file store. -func (c *fileStore) Get(serverAddress string) (types.AuthConfig, error) { - authConfig, ok := c.file.GetAuthConfigs()[serverAddress] - if !ok { - // Maybe they have a legacy config file, we will iterate the keys converting - // them to the new format and testing - for r, ac := range c.file.GetAuthConfigs() { - if serverAddress == ConvertToHostname(r) { - return ac, nil - } - } - - authConfig = types.AuthConfig{} - } - return authConfig, nil -} - -func (c *fileStore) GetAll() (map[string]types.AuthConfig, error) { - return c.file.GetAuthConfigs(), nil -} - -// Store saves the given credentials in the file store. -func (c *fileStore) Store(authConfig types.AuthConfig) error { - c.file.GetAuthConfigs()[authConfig.ServerAddress] = authConfig - return c.file.Save() -} - -func (c *fileStore) GetFilename() string { - return c.file.GetFilename() -} - -func (c *fileStore) IsFileStore() bool { - return true -} - -// ConvertToHostname converts a registry url which has http|https prepended -// to just an hostname. -// Copied from github.com/docker/docker/registry.ConvertToHostname to reduce dependencies. -func ConvertToHostname(url string) string { - stripped := url - if strings.HasPrefix(url, "http://") { - stripped = strings.TrimPrefix(url, "http://") - } else if strings.HasPrefix(url, "https://") { - stripped = strings.TrimPrefix(url, "https://") - } - - nameParts := strings.SplitN(stripped, "/", 2) - - return nameParts[0] -} diff --git a/src/vendor/github.com/docker/cli/cli/config/credentials/native_store.go b/src/vendor/github.com/docker/cli/cli/config/credentials/native_store.go deleted file mode 100644 index afe542cc3..000000000 --- a/src/vendor/github.com/docker/cli/cli/config/credentials/native_store.go +++ /dev/null @@ -1,143 +0,0 @@ -package credentials - -import ( - "github.com/docker/cli/cli/config/types" - "github.com/docker/docker-credential-helpers/client" - "github.com/docker/docker-credential-helpers/credentials" -) - -const ( - remoteCredentialsPrefix = "docker-credential-" - tokenUsername = "" -) - -// nativeStore implements a credentials store -// using native keychain to keep credentials secure. -// It piggybacks into a file store to keep users' emails. -type nativeStore struct { - programFunc client.ProgramFunc - fileStore Store -} - -// NewNativeStore creates a new native store that -// uses a remote helper program to manage credentials. -func NewNativeStore(file store, helperSuffix string) Store { - name := remoteCredentialsPrefix + helperSuffix - return &nativeStore{ - programFunc: client.NewShellProgramFunc(name), - fileStore: NewFileStore(file), - } -} - -// Erase removes the given credentials from the native store. -func (c *nativeStore) Erase(serverAddress string) error { - if err := client.Erase(c.programFunc, serverAddress); err != nil { - return err - } - - // Fallback to plain text store to remove email - return c.fileStore.Erase(serverAddress) -} - -// Get retrieves credentials for a specific server from the native store. -func (c *nativeStore) Get(serverAddress string) (types.AuthConfig, error) { - // load user email if it exist or an empty auth config. - auth, _ := c.fileStore.Get(serverAddress) - - creds, err := c.getCredentialsFromStore(serverAddress) - if err != nil { - return auth, err - } - auth.Username = creds.Username - auth.IdentityToken = creds.IdentityToken - auth.Password = creds.Password - - return auth, nil -} - -// GetAll retrieves all the credentials from the native store. -func (c *nativeStore) GetAll() (map[string]types.AuthConfig, error) { - auths, err := c.listCredentialsInStore() - if err != nil { - return nil, err - } - - // Emails are only stored in the file store. - // This call can be safely eliminated when emails are removed. - fileConfigs, _ := c.fileStore.GetAll() - - authConfigs := make(map[string]types.AuthConfig) - for registry := range auths { - creds, err := c.getCredentialsFromStore(registry) - if err != nil { - return nil, err - } - ac := fileConfigs[registry] // might contain Email - ac.Username = creds.Username - ac.Password = creds.Password - ac.IdentityToken = creds.IdentityToken - authConfigs[registry] = ac - } - - return authConfigs, nil -} - -// Store saves the given credentials in the file store. -func (c *nativeStore) Store(authConfig types.AuthConfig) error { - if err := c.storeCredentialsInStore(authConfig); err != nil { - return err - } - authConfig.Username = "" - authConfig.Password = "" - authConfig.IdentityToken = "" - - // Fallback to old credential in plain text to save only the email - return c.fileStore.Store(authConfig) -} - -// storeCredentialsInStore executes the command to store the credentials in the native store. -func (c *nativeStore) storeCredentialsInStore(config types.AuthConfig) error { - creds := &credentials.Credentials{ - ServerURL: config.ServerAddress, - Username: config.Username, - Secret: config.Password, - } - - if config.IdentityToken != "" { - creds.Username = tokenUsername - creds.Secret = config.IdentityToken - } - - return client.Store(c.programFunc, creds) -} - -// getCredentialsFromStore executes the command to get the credentials from the native store. -func (c *nativeStore) getCredentialsFromStore(serverAddress string) (types.AuthConfig, error) { - var ret types.AuthConfig - - creds, err := client.Get(c.programFunc, serverAddress) - if err != nil { - if credentials.IsErrCredentialsNotFound(err) { - // do not return an error if the credentials are not - // in the keychain. Let docker ask for new credentials. - return ret, nil - } - return ret, err - } - - if creds.Username == tokenUsername { - ret.IdentityToken = creds.Secret - } else { - ret.Password = creds.Secret - ret.Username = creds.Username - } - - ret.ServerAddress = serverAddress - return ret, nil -} - -// listCredentialsInStore returns a listing of stored credentials as a map of -// URL -> username. -func (c *nativeStore) listCredentialsInStore() (map[string]string, error) { - return client.List(c.programFunc) -} diff --git a/src/vendor/github.com/docker/cli/cli/config/types/authconfig.go b/src/vendor/github.com/docker/cli/cli/config/types/authconfig.go deleted file mode 100644 index 056af6b84..000000000 --- a/src/vendor/github.com/docker/cli/cli/config/types/authconfig.go +++ /dev/null @@ -1,22 +0,0 @@ -package types - -// AuthConfig contains authorization information for connecting to a Registry -type AuthConfig struct { - Username string `json:"username,omitempty"` - Password string `json:"password,omitempty"` - Auth string `json:"auth,omitempty"` - - // Email is an optional value associated with the username. - // This field is deprecated and will be removed in a later - // version of docker. - Email string `json:"email,omitempty"` - - ServerAddress string `json:"serveraddress,omitempty"` - - // IdentityToken is used to authenticate the user and get - // an access token for the registry. - IdentityToken string `json:"identitytoken,omitempty"` - - // RegistryToken is a bearer token to be sent to a registry - RegistryToken string `json:"registrytoken,omitempty"` -} diff --git a/src/vendor/github.com/docker/distribution/registry/api/v2/descriptors.go b/src/vendor/github.com/docker/distribution/registry/api/v2/descriptors.go deleted file mode 100644 index a9616c58a..000000000 --- a/src/vendor/github.com/docker/distribution/registry/api/v2/descriptors.go +++ /dev/null @@ -1,1596 +0,0 @@ -package v2 - -import ( - "net/http" - "regexp" - - "github.com/docker/distribution/reference" - "github.com/docker/distribution/registry/api/errcode" - "github.com/opencontainers/go-digest" -) - -var ( - nameParameterDescriptor = ParameterDescriptor{ - Name: "name", - Type: "string", - Format: reference.NameRegexp.String(), - Required: true, - Description: `Name of the target repository.`, - } - - referenceParameterDescriptor = ParameterDescriptor{ - Name: "reference", - Type: "string", - Format: reference.TagRegexp.String(), - Required: true, - Description: `Tag or digest of the target manifest.`, - } - - uuidParameterDescriptor = ParameterDescriptor{ - Name: "uuid", - Type: "opaque", - Required: true, - Description: "A uuid identifying the upload. This field can accept characters that match `[a-zA-Z0-9-_.=]+`.", - } - - digestPathParameter = ParameterDescriptor{ - Name: "digest", - Type: "path", - Required: true, - Format: digest.DigestRegexp.String(), - Description: `Digest of desired blob.`, - } - - hostHeader = ParameterDescriptor{ - Name: "Host", - Type: "string", - Description: "Standard HTTP Host Header. Should be set to the registry host.", - Format: "", - Examples: []string{"registry-1.docker.io"}, - } - - authHeader = ParameterDescriptor{ - Name: "Authorization", - Type: "string", - Description: "An RFC7235 compliant authorization header.", - Format: " ", - Examples: []string{"Bearer dGhpcyBpcyBhIGZha2UgYmVhcmVyIHRva2VuIQ=="}, - } - - authChallengeHeader = ParameterDescriptor{ - Name: "WWW-Authenticate", - Type: "string", - Description: "An RFC7235 compliant authentication challenge header.", - Format: ` realm="", ..."`, - Examples: []string{ - `Bearer realm="https://auth.docker.com/", service="registry.docker.com", scopes="repository:library/ubuntu:pull"`, - }, - } - - contentLengthZeroHeader = ParameterDescriptor{ - Name: "Content-Length", - Description: "The `Content-Length` header must be zero and the body must be empty.", - Type: "integer", - Format: "0", - } - - dockerUploadUUIDHeader = ParameterDescriptor{ - Name: "Docker-Upload-UUID", - Description: "Identifies the docker upload uuid for the current request.", - Type: "uuid", - Format: "", - } - - digestHeader = ParameterDescriptor{ - Name: "Docker-Content-Digest", - Description: "Digest of the targeted content for the request.", - Type: "digest", - Format: "", - } - - linkHeader = ParameterDescriptor{ - Name: "Link", - Type: "link", - Description: "RFC5988 compliant rel='next' with URL to next result set, if available", - Format: `<?n=&last=>; rel="next"`, - } - - paginationParameters = []ParameterDescriptor{ - { - Name: "n", - Type: "integer", - Description: "Limit the number of entries in each response. It not present, all entries will be returned.", - Format: "", - Required: false, - }, - { - Name: "last", - Type: "string", - Description: "Result set will include values lexically after last.", - Format: "", - Required: false, - }, - } - - unauthorizedResponseDescriptor = ResponseDescriptor{ - Name: "Authentication Required", - StatusCode: http.StatusUnauthorized, - Description: "The client is not authenticated.", - Headers: []ParameterDescriptor{ - authChallengeHeader, - { - Name: "Content-Length", - Type: "integer", - Description: "Length of the JSON response body.", - Format: "", - }, - }, - Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", - Format: errorsBody, - }, - ErrorCodes: []errcode.ErrorCode{ - errcode.ErrorCodeUnauthorized, - }, - } - - repositoryNotFoundResponseDescriptor = ResponseDescriptor{ - Name: "No Such Repository Error", - StatusCode: http.StatusNotFound, - Description: "The repository is not known to the registry.", - Headers: []ParameterDescriptor{ - { - Name: "Content-Length", - Type: "integer", - Description: "Length of the JSON response body.", - Format: "", - }, - }, - Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", - Format: errorsBody, - }, - ErrorCodes: []errcode.ErrorCode{ - ErrorCodeNameUnknown, - }, - } - - deniedResponseDescriptor = ResponseDescriptor{ - Name: "Access Denied", - StatusCode: http.StatusForbidden, - Description: "The client does not have required access to the repository.", - Headers: []ParameterDescriptor{ - { - Name: "Content-Length", - Type: "integer", - Description: "Length of the JSON response body.", - Format: "", - }, - }, - Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", - Format: errorsBody, - }, - ErrorCodes: []errcode.ErrorCode{ - errcode.ErrorCodeDenied, - }, - } - - tooManyRequestsDescriptor = ResponseDescriptor{ - Name: "Too Many Requests", - StatusCode: http.StatusTooManyRequests, - Description: "The client made too many requests within a time interval.", - Headers: []ParameterDescriptor{ - { - Name: "Content-Length", - Type: "integer", - Description: "Length of the JSON response body.", - Format: "", - }, - }, - Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", - Format: errorsBody, - }, - ErrorCodes: []errcode.ErrorCode{ - errcode.ErrorCodeTooManyRequests, - }, - } -) - -const ( - manifestBody = `{ - "name": , - "tag": , - "fsLayers": [ - { - "blobSum": "" - }, - ... - ] - ], - "history": , - "signature": -}` - - errorsBody = `{ - "errors:" [ - { - "code": , - "message": "", - "detail": ... - }, - ... - ] -}` -) - -// APIDescriptor exports descriptions of the layout of the v2 registry API. -var APIDescriptor = struct { - // RouteDescriptors provides a list of the routes available in the API. - RouteDescriptors []RouteDescriptor -}{ - RouteDescriptors: routeDescriptors, -} - -// RouteDescriptor describes a route specified by name. -type RouteDescriptor struct { - // Name is the name of the route, as specified in RouteNameXXX exports. - // These names a should be considered a unique reference for a route. If - // the route is registered with gorilla, this is the name that will be - // used. - Name string - - // Path is a gorilla/mux-compatible regexp that can be used to match the - // route. For any incoming method and path, only one route descriptor - // should match. - Path string - - // Entity should be a short, human-readalbe description of the object - // targeted by the endpoint. - Entity string - - // Description should provide an accurate overview of the functionality - // provided by the route. - Description string - - // Methods should describe the various HTTP methods that may be used on - // this route, including request and response formats. - Methods []MethodDescriptor -} - -// MethodDescriptor provides a description of the requests that may be -// conducted with the target method. -type MethodDescriptor struct { - - // Method is an HTTP method, such as GET, PUT or POST. - Method string - - // Description should provide an overview of the functionality provided by - // the covered method, suitable for use in documentation. Use of markdown - // here is encouraged. - Description string - - // Requests is a slice of request descriptors enumerating how this - // endpoint may be used. - Requests []RequestDescriptor -} - -// RequestDescriptor covers a particular set of headers and parameters that -// can be carried out with the parent method. Its most helpful to have one -// RequestDescriptor per API use case. -type RequestDescriptor struct { - // Name provides a short identifier for the request, usable as a title or - // to provide quick context for the particular request. - Name string - - // Description should cover the requests purpose, covering any details for - // this particular use case. - Description string - - // Headers describes headers that must be used with the HTTP request. - Headers []ParameterDescriptor - - // PathParameters enumerate the parameterized path components for the - // given request, as defined in the route's regular expression. - PathParameters []ParameterDescriptor - - // QueryParameters provides a list of query parameters for the given - // request. - QueryParameters []ParameterDescriptor - - // Body describes the format of the request body. - Body BodyDescriptor - - // Successes enumerates the possible responses that are considered to be - // the result of a successful request. - Successes []ResponseDescriptor - - // Failures covers the possible failures from this particular request. - Failures []ResponseDescriptor -} - -// ResponseDescriptor describes the components of an API response. -type ResponseDescriptor struct { - // Name provides a short identifier for the response, usable as a title or - // to provide quick context for the particular response. - Name string - - // Description should provide a brief overview of the role of the - // response. - Description string - - // StatusCode specifies the status received by this particular response. - StatusCode int - - // Headers covers any headers that may be returned from the response. - Headers []ParameterDescriptor - - // Fields describes any fields that may be present in the response. - Fields []ParameterDescriptor - - // ErrorCodes enumerates the error codes that may be returned along with - // the response. - ErrorCodes []errcode.ErrorCode - - // Body describes the body of the response, if any. - Body BodyDescriptor -} - -// BodyDescriptor describes a request body and its expected content type. For -// the most part, it should be example json or some placeholder for body -// data in documentation. -type BodyDescriptor struct { - ContentType string - Format string -} - -// ParameterDescriptor describes the format of a request parameter, which may -// be a header, path parameter or query parameter. -type ParameterDescriptor struct { - // Name is the name of the parameter, either of the path component or - // query parameter. - Name string - - // Type specifies the type of the parameter, such as string, integer, etc. - Type string - - // Description provides a human-readable description of the parameter. - Description string - - // Required means the field is required when set. - Required bool - - // Format is a specifying the string format accepted by this parameter. - Format string - - // Regexp is a compiled regular expression that can be used to validate - // the contents of the parameter. - Regexp *regexp.Regexp - - // Examples provides multiple examples for the values that might be valid - // for this parameter. - Examples []string -} - -var routeDescriptors = []RouteDescriptor{ - { - Name: RouteNameBase, - Path: "/v2/", - Entity: "Base", - Description: `Base V2 API route. Typically, this can be used for lightweight version checks and to validate registry authentication.`, - Methods: []MethodDescriptor{ - { - Method: "GET", - Description: "Check that the endpoint implements Docker Registry API V2.", - Requests: []RequestDescriptor{ - { - Headers: []ParameterDescriptor{ - hostHeader, - authHeader, - }, - Successes: []ResponseDescriptor{ - { - Description: "The API implements V2 protocol and is accessible.", - StatusCode: http.StatusOK, - }, - }, - Failures: []ResponseDescriptor{ - { - Description: "The registry does not implement the V2 API.", - StatusCode: http.StatusNotFound, - }, - unauthorizedResponseDescriptor, - tooManyRequestsDescriptor, - }, - }, - }, - }, - }, - }, - { - Name: RouteNameTags, - Path: "/v2/{name:" + reference.NameRegexp.String() + "}/tags/list", - Entity: "Tags", - Description: "Retrieve information about tags.", - Methods: []MethodDescriptor{ - { - Method: "GET", - Description: "Fetch the tags under the repository identified by `name`.", - Requests: []RequestDescriptor{ - { - Name: "Tags", - Description: "Return all tags for the repository", - Headers: []ParameterDescriptor{ - hostHeader, - authHeader, - }, - PathParameters: []ParameterDescriptor{ - nameParameterDescriptor, - }, - Successes: []ResponseDescriptor{ - { - StatusCode: http.StatusOK, - Description: "A list of tags for the named repository.", - Headers: []ParameterDescriptor{ - { - Name: "Content-Length", - Type: "integer", - Description: "Length of the JSON response body.", - Format: "", - }, - }, - Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", - Format: `{ - "name": , - "tags": [ - , - ... - ] -}`, - }, - }, - }, - Failures: []ResponseDescriptor{ - unauthorizedResponseDescriptor, - repositoryNotFoundResponseDescriptor, - deniedResponseDescriptor, - tooManyRequestsDescriptor, - }, - }, - { - Name: "Tags Paginated", - Description: "Return a portion of the tags for the specified repository.", - PathParameters: []ParameterDescriptor{nameParameterDescriptor}, - QueryParameters: paginationParameters, - Successes: []ResponseDescriptor{ - { - StatusCode: http.StatusOK, - Description: "A list of tags for the named repository.", - Headers: []ParameterDescriptor{ - { - Name: "Content-Length", - Type: "integer", - Description: "Length of the JSON response body.", - Format: "", - }, - linkHeader, - }, - Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", - Format: `{ - "name": , - "tags": [ - , - ... - ], -}`, - }, - }, - }, - Failures: []ResponseDescriptor{ - unauthorizedResponseDescriptor, - repositoryNotFoundResponseDescriptor, - deniedResponseDescriptor, - tooManyRequestsDescriptor, - }, - }, - }, - }, - }, - }, - { - Name: RouteNameManifest, - Path: "/v2/{name:" + reference.NameRegexp.String() + "}/manifests/{reference:" + reference.TagRegexp.String() + "|" + digest.DigestRegexp.String() + "}", - Entity: "Manifest", - Description: "Create, update, delete and retrieve manifests.", - Methods: []MethodDescriptor{ - { - Method: "GET", - Description: "Fetch the manifest identified by `name` and `reference` where `reference` can be a tag or digest. A `HEAD` request can also be issued to this endpoint to obtain resource information without receiving all data.", - Requests: []RequestDescriptor{ - { - Headers: []ParameterDescriptor{ - hostHeader, - authHeader, - }, - PathParameters: []ParameterDescriptor{ - nameParameterDescriptor, - referenceParameterDescriptor, - }, - Successes: []ResponseDescriptor{ - { - Description: "The manifest identified by `name` and `reference`. The contents can be used to identify and resolve resources required to run the specified image.", - StatusCode: http.StatusOK, - Headers: []ParameterDescriptor{ - digestHeader, - }, - Body: BodyDescriptor{ - ContentType: "", - Format: manifestBody, - }, - }, - }, - Failures: []ResponseDescriptor{ - { - Description: "The name or reference was invalid.", - StatusCode: http.StatusBadRequest, - ErrorCodes: []errcode.ErrorCode{ - ErrorCodeNameInvalid, - ErrorCodeTagInvalid, - }, - Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", - Format: errorsBody, - }, - }, - unauthorizedResponseDescriptor, - repositoryNotFoundResponseDescriptor, - deniedResponseDescriptor, - tooManyRequestsDescriptor, - }, - }, - }, - }, - { - Method: "PUT", - Description: "Put the manifest identified by `name` and `reference` where `reference` can be a tag or digest.", - Requests: []RequestDescriptor{ - { - Headers: []ParameterDescriptor{ - hostHeader, - authHeader, - }, - PathParameters: []ParameterDescriptor{ - nameParameterDescriptor, - referenceParameterDescriptor, - }, - Body: BodyDescriptor{ - ContentType: "", - Format: manifestBody, - }, - Successes: []ResponseDescriptor{ - { - Description: "The manifest has been accepted by the registry and is stored under the specified `name` and `tag`.", - StatusCode: http.StatusCreated, - Headers: []ParameterDescriptor{ - { - Name: "Location", - Type: "url", - Description: "The canonical location url of the uploaded manifest.", - Format: "", - }, - contentLengthZeroHeader, - digestHeader, - }, - }, - }, - Failures: []ResponseDescriptor{ - { - Name: "Invalid Manifest", - Description: "The received manifest was invalid in some way, as described by the error codes. The client should resolve the issue and retry the request.", - StatusCode: http.StatusBadRequest, - Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", - Format: errorsBody, - }, - ErrorCodes: []errcode.ErrorCode{ - ErrorCodeNameInvalid, - ErrorCodeTagInvalid, - ErrorCodeManifestInvalid, - ErrorCodeManifestUnverified, - ErrorCodeBlobUnknown, - }, - }, - unauthorizedResponseDescriptor, - repositoryNotFoundResponseDescriptor, - deniedResponseDescriptor, - tooManyRequestsDescriptor, - { - Name: "Missing Layer(s)", - Description: "One or more layers may be missing during a manifest upload. If so, the missing layers will be enumerated in the error response.", - StatusCode: http.StatusBadRequest, - ErrorCodes: []errcode.ErrorCode{ - ErrorCodeBlobUnknown, - }, - Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", - Format: `{ - "errors:" [{ - "code": "BLOB_UNKNOWN", - "message": "blob unknown to registry", - "detail": { - "digest": "" - } - }, - ... - ] -}`, - }, - }, - { - Name: "Not allowed", - Description: "Manifest put is not allowed because the registry is configured as a pull-through cache or for some other reason", - StatusCode: http.StatusMethodNotAllowed, - ErrorCodes: []errcode.ErrorCode{ - errcode.ErrorCodeUnsupported, - }, - }, - }, - }, - }, - }, - { - Method: "DELETE", - Description: "Delete the manifest identified by `name` and `reference`. Note that a manifest can _only_ be deleted by `digest`.", - Requests: []RequestDescriptor{ - { - Headers: []ParameterDescriptor{ - hostHeader, - authHeader, - }, - PathParameters: []ParameterDescriptor{ - nameParameterDescriptor, - referenceParameterDescriptor, - }, - Successes: []ResponseDescriptor{ - { - StatusCode: http.StatusAccepted, - }, - }, - Failures: []ResponseDescriptor{ - { - Name: "Invalid Name or Reference", - Description: "The specified `name` or `reference` were invalid and the delete was unable to proceed.", - StatusCode: http.StatusBadRequest, - ErrorCodes: []errcode.ErrorCode{ - ErrorCodeNameInvalid, - ErrorCodeTagInvalid, - }, - Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", - Format: errorsBody, - }, - }, - unauthorizedResponseDescriptor, - repositoryNotFoundResponseDescriptor, - deniedResponseDescriptor, - tooManyRequestsDescriptor, - { - Name: "Unknown Manifest", - Description: "The specified `name` or `reference` are unknown to the registry and the delete was unable to proceed. Clients can assume the manifest was already deleted if this response is returned.", - StatusCode: http.StatusNotFound, - ErrorCodes: []errcode.ErrorCode{ - ErrorCodeNameUnknown, - ErrorCodeManifestUnknown, - }, - Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", - Format: errorsBody, - }, - }, - { - Name: "Not allowed", - Description: "Manifest delete is not allowed because the registry is configured as a pull-through cache or `delete` has been disabled.", - StatusCode: http.StatusMethodNotAllowed, - ErrorCodes: []errcode.ErrorCode{ - errcode.ErrorCodeUnsupported, - }, - }, - }, - }, - }, - }, - }, - }, - - { - Name: RouteNameBlob, - Path: "/v2/{name:" + reference.NameRegexp.String() + "}/blobs/{digest:" + digest.DigestRegexp.String() + "}", - Entity: "Blob", - Description: "Operations on blobs identified by `name` and `digest`. Used to fetch or delete layers by digest.", - Methods: []MethodDescriptor{ - { - Method: "GET", - Description: "Retrieve the blob from the registry identified by `digest`. A `HEAD` request can also be issued to this endpoint to obtain resource information without receiving all data.", - Requests: []RequestDescriptor{ - { - Name: "Fetch Blob", - Headers: []ParameterDescriptor{ - hostHeader, - authHeader, - }, - PathParameters: []ParameterDescriptor{ - nameParameterDescriptor, - digestPathParameter, - }, - Successes: []ResponseDescriptor{ - { - Description: "The blob identified by `digest` is available. The blob content will be present in the body of the request.", - StatusCode: http.StatusOK, - Headers: []ParameterDescriptor{ - { - Name: "Content-Length", - Type: "integer", - Description: "The length of the requested blob content.", - Format: "", - }, - digestHeader, - }, - Body: BodyDescriptor{ - ContentType: "application/octet-stream", - Format: "", - }, - }, - { - Description: "The blob identified by `digest` is available at the provided location.", - StatusCode: http.StatusTemporaryRedirect, - Headers: []ParameterDescriptor{ - { - Name: "Location", - Type: "url", - Description: "The location where the layer should be accessible.", - Format: "", - }, - digestHeader, - }, - }, - }, - Failures: []ResponseDescriptor{ - { - Description: "There was a problem with the request that needs to be addressed by the client, such as an invalid `name` or `tag`.", - StatusCode: http.StatusBadRequest, - ErrorCodes: []errcode.ErrorCode{ - ErrorCodeNameInvalid, - ErrorCodeDigestInvalid, - }, - Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", - Format: errorsBody, - }, - }, - { - Description: "The blob, identified by `name` and `digest`, is unknown to the registry.", - StatusCode: http.StatusNotFound, - Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", - Format: errorsBody, - }, - ErrorCodes: []errcode.ErrorCode{ - ErrorCodeNameUnknown, - ErrorCodeBlobUnknown, - }, - }, - unauthorizedResponseDescriptor, - repositoryNotFoundResponseDescriptor, - deniedResponseDescriptor, - tooManyRequestsDescriptor, - }, - }, - { - Name: "Fetch Blob Part", - Description: "This endpoint may also support RFC7233 compliant range requests. Support can be detected by issuing a HEAD request. If the header `Accept-Range: bytes` is returned, range requests can be used to fetch partial content.", - Headers: []ParameterDescriptor{ - hostHeader, - authHeader, - { - Name: "Range", - Type: "string", - Description: "HTTP Range header specifying blob chunk.", - Format: "bytes=-", - }, - }, - PathParameters: []ParameterDescriptor{ - nameParameterDescriptor, - digestPathParameter, - }, - Successes: []ResponseDescriptor{ - { - Description: "The blob identified by `digest` is available. The specified chunk of blob content will be present in the body of the request.", - StatusCode: http.StatusPartialContent, - Headers: []ParameterDescriptor{ - { - Name: "Content-Length", - Type: "integer", - Description: "The length of the requested blob chunk.", - Format: "", - }, - { - Name: "Content-Range", - Type: "byte range", - Description: "Content range of blob chunk.", - Format: "bytes -/", - }, - }, - Body: BodyDescriptor{ - ContentType: "application/octet-stream", - Format: "", - }, - }, - }, - Failures: []ResponseDescriptor{ - { - Description: "There was a problem with the request that needs to be addressed by the client, such as an invalid `name` or `tag`.", - StatusCode: http.StatusBadRequest, - ErrorCodes: []errcode.ErrorCode{ - ErrorCodeNameInvalid, - ErrorCodeDigestInvalid, - }, - Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", - Format: errorsBody, - }, - }, - { - StatusCode: http.StatusNotFound, - ErrorCodes: []errcode.ErrorCode{ - ErrorCodeNameUnknown, - ErrorCodeBlobUnknown, - }, - Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", - Format: errorsBody, - }, - }, - { - Description: "The range specification cannot be satisfied for the requested content. This can happen when the range is not formatted correctly or if the range is outside of the valid size of the content.", - StatusCode: http.StatusRequestedRangeNotSatisfiable, - }, - unauthorizedResponseDescriptor, - repositoryNotFoundResponseDescriptor, - deniedResponseDescriptor, - tooManyRequestsDescriptor, - }, - }, - }, - }, - { - Method: "DELETE", - Description: "Delete the blob identified by `name` and `digest`", - Requests: []RequestDescriptor{ - { - Headers: []ParameterDescriptor{ - hostHeader, - authHeader, - }, - PathParameters: []ParameterDescriptor{ - nameParameterDescriptor, - digestPathParameter, - }, - Successes: []ResponseDescriptor{ - { - StatusCode: http.StatusAccepted, - Headers: []ParameterDescriptor{ - { - Name: "Content-Length", - Type: "integer", - Description: "0", - Format: "0", - }, - digestHeader, - }, - }, - }, - Failures: []ResponseDescriptor{ - { - Name: "Invalid Name or Digest", - StatusCode: http.StatusBadRequest, - ErrorCodes: []errcode.ErrorCode{ - ErrorCodeDigestInvalid, - ErrorCodeNameInvalid, - }, - }, - { - Description: "The blob, identified by `name` and `digest`, is unknown to the registry.", - StatusCode: http.StatusNotFound, - Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", - Format: errorsBody, - }, - ErrorCodes: []errcode.ErrorCode{ - ErrorCodeNameUnknown, - ErrorCodeBlobUnknown, - }, - }, - { - Description: "Blob delete is not allowed because the registry is configured as a pull-through cache or `delete` has been disabled", - StatusCode: http.StatusMethodNotAllowed, - Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", - Format: errorsBody, - }, - ErrorCodes: []errcode.ErrorCode{ - errcode.ErrorCodeUnsupported, - }, - }, - unauthorizedResponseDescriptor, - repositoryNotFoundResponseDescriptor, - deniedResponseDescriptor, - tooManyRequestsDescriptor, - }, - }, - }, - }, - - // TODO(stevvooe): We may want to add a PUT request here to - // kickoff an upload of a blob, integrated with the blob upload - // API. - }, - }, - - { - Name: RouteNameBlobUpload, - Path: "/v2/{name:" + reference.NameRegexp.String() + "}/blobs/uploads/", - Entity: "Initiate Blob Upload", - Description: "Initiate a blob upload. This endpoint can be used to create resumable uploads or monolithic uploads.", - Methods: []MethodDescriptor{ - { - Method: "POST", - Description: "Initiate a resumable blob upload. If successful, an upload location will be provided to complete the upload. Optionally, if the `digest` parameter is present, the request body will be used to complete the upload in a single request.", - Requests: []RequestDescriptor{ - { - Name: "Initiate Monolithic Blob Upload", - Description: "Upload a blob identified by the `digest` parameter in single request. This upload will not be resumable unless a recoverable error is returned.", - Headers: []ParameterDescriptor{ - hostHeader, - authHeader, - { - Name: "Content-Length", - Type: "integer", - Format: "", - }, - }, - PathParameters: []ParameterDescriptor{ - nameParameterDescriptor, - }, - QueryParameters: []ParameterDescriptor{ - { - Name: "digest", - Type: "query", - Format: "", - Regexp: digest.DigestRegexp, - Description: `Digest of uploaded blob. If present, the upload will be completed, in a single request, with contents of the request body as the resulting blob.`, - }, - }, - Body: BodyDescriptor{ - ContentType: "application/octect-stream", - Format: "", - }, - Successes: []ResponseDescriptor{ - { - Description: "The blob has been created in the registry and is available at the provided location.", - StatusCode: http.StatusCreated, - Headers: []ParameterDescriptor{ - { - Name: "Location", - Type: "url", - Format: "", - }, - contentLengthZeroHeader, - dockerUploadUUIDHeader, - }, - }, - }, - Failures: []ResponseDescriptor{ - { - Name: "Invalid Name or Digest", - StatusCode: http.StatusBadRequest, - ErrorCodes: []errcode.ErrorCode{ - ErrorCodeDigestInvalid, - ErrorCodeNameInvalid, - }, - }, - { - Name: "Not allowed", - Description: "Blob upload is not allowed because the registry is configured as a pull-through cache or for some other reason", - StatusCode: http.StatusMethodNotAllowed, - ErrorCodes: []errcode.ErrorCode{ - errcode.ErrorCodeUnsupported, - }, - }, - unauthorizedResponseDescriptor, - repositoryNotFoundResponseDescriptor, - deniedResponseDescriptor, - tooManyRequestsDescriptor, - }, - }, - { - Name: "Initiate Resumable Blob Upload", - Description: "Initiate a resumable blob upload with an empty request body.", - Headers: []ParameterDescriptor{ - hostHeader, - authHeader, - contentLengthZeroHeader, - }, - PathParameters: []ParameterDescriptor{ - nameParameterDescriptor, - }, - Successes: []ResponseDescriptor{ - { - Description: "The upload has been created. The `Location` header must be used to complete the upload. The response should be identical to a `GET` request on the contents of the returned `Location` header.", - StatusCode: http.StatusAccepted, - Headers: []ParameterDescriptor{ - contentLengthZeroHeader, - { - Name: "Location", - Type: "url", - Format: "/v2//blobs/uploads/", - Description: "The location of the created upload. Clients should use the contents verbatim to complete the upload, adding parameters where required.", - }, - { - Name: "Range", - Format: "0-0", - Description: "Range header indicating the progress of the upload. When starting an upload, it will return an empty range, since no content has been received.", - }, - dockerUploadUUIDHeader, - }, - }, - }, - Failures: []ResponseDescriptor{ - { - Name: "Invalid Name or Digest", - StatusCode: http.StatusBadRequest, - ErrorCodes: []errcode.ErrorCode{ - ErrorCodeDigestInvalid, - ErrorCodeNameInvalid, - }, - }, - unauthorizedResponseDescriptor, - repositoryNotFoundResponseDescriptor, - deniedResponseDescriptor, - tooManyRequestsDescriptor, - }, - }, - { - Name: "Mount Blob", - Description: "Mount a blob identified by the `mount` parameter from another repository.", - Headers: []ParameterDescriptor{ - hostHeader, - authHeader, - contentLengthZeroHeader, - }, - PathParameters: []ParameterDescriptor{ - nameParameterDescriptor, - }, - QueryParameters: []ParameterDescriptor{ - { - Name: "mount", - Type: "query", - Format: "", - Regexp: digest.DigestRegexp, - Description: `Digest of blob to mount from the source repository.`, - }, - { - Name: "from", - Type: "query", - Format: "", - Regexp: reference.NameRegexp, - Description: `Name of the source repository.`, - }, - }, - Successes: []ResponseDescriptor{ - { - Description: "The blob has been mounted in the repository and is available at the provided location.", - StatusCode: http.StatusCreated, - Headers: []ParameterDescriptor{ - { - Name: "Location", - Type: "url", - Format: "", - }, - contentLengthZeroHeader, - dockerUploadUUIDHeader, - }, - }, - }, - Failures: []ResponseDescriptor{ - { - Name: "Invalid Name or Digest", - StatusCode: http.StatusBadRequest, - ErrorCodes: []errcode.ErrorCode{ - ErrorCodeDigestInvalid, - ErrorCodeNameInvalid, - }, - }, - { - Name: "Not allowed", - Description: "Blob mount is not allowed because the registry is configured as a pull-through cache or for some other reason", - StatusCode: http.StatusMethodNotAllowed, - ErrorCodes: []errcode.ErrorCode{ - errcode.ErrorCodeUnsupported, - }, - }, - unauthorizedResponseDescriptor, - repositoryNotFoundResponseDescriptor, - deniedResponseDescriptor, - tooManyRequestsDescriptor, - }, - }, - }, - }, - }, - }, - - { - Name: RouteNameBlobUploadChunk, - Path: "/v2/{name:" + reference.NameRegexp.String() + "}/blobs/uploads/{uuid:[a-zA-Z0-9-_.=]+}", - Entity: "Blob Upload", - Description: "Interact with blob uploads. Clients should never assemble URLs for this endpoint and should only take it through the `Location` header on related API requests. The `Location` header and its parameters should be preserved by clients, using the latest value returned via upload related API calls.", - Methods: []MethodDescriptor{ - { - Method: "GET", - Description: "Retrieve status of upload identified by `uuid`. The primary purpose of this endpoint is to resolve the current status of a resumable upload.", - Requests: []RequestDescriptor{ - { - Description: "Retrieve the progress of the current upload, as reported by the `Range` header.", - Headers: []ParameterDescriptor{ - hostHeader, - authHeader, - }, - PathParameters: []ParameterDescriptor{ - nameParameterDescriptor, - uuidParameterDescriptor, - }, - Successes: []ResponseDescriptor{ - { - Name: "Upload Progress", - Description: "The upload is known and in progress. The last received offset is available in the `Range` header.", - StatusCode: http.StatusNoContent, - Headers: []ParameterDescriptor{ - { - Name: "Range", - Type: "header", - Format: "0-", - Description: "Range indicating the current progress of the upload.", - }, - contentLengthZeroHeader, - dockerUploadUUIDHeader, - }, - }, - }, - Failures: []ResponseDescriptor{ - { - Description: "There was an error processing the upload and it must be restarted.", - StatusCode: http.StatusBadRequest, - ErrorCodes: []errcode.ErrorCode{ - ErrorCodeDigestInvalid, - ErrorCodeNameInvalid, - ErrorCodeBlobUploadInvalid, - }, - Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", - Format: errorsBody, - }, - }, - { - Description: "The upload is unknown to the registry. The upload must be restarted.", - StatusCode: http.StatusNotFound, - ErrorCodes: []errcode.ErrorCode{ - ErrorCodeBlobUploadUnknown, - }, - Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", - Format: errorsBody, - }, - }, - unauthorizedResponseDescriptor, - repositoryNotFoundResponseDescriptor, - deniedResponseDescriptor, - tooManyRequestsDescriptor, - }, - }, - }, - }, - { - Method: "PATCH", - Description: "Upload a chunk of data for the specified upload.", - Requests: []RequestDescriptor{ - { - Name: "Stream upload", - Description: "Upload a stream of data to upload without completing the upload.", - PathParameters: []ParameterDescriptor{ - nameParameterDescriptor, - uuidParameterDescriptor, - }, - Headers: []ParameterDescriptor{ - hostHeader, - authHeader, - }, - Body: BodyDescriptor{ - ContentType: "application/octet-stream", - Format: "", - }, - Successes: []ResponseDescriptor{ - { - Name: "Data Accepted", - Description: "The stream of data has been accepted and the current progress is available in the range header. The updated upload location is available in the `Location` header.", - StatusCode: http.StatusNoContent, - Headers: []ParameterDescriptor{ - { - Name: "Location", - Type: "url", - Format: "/v2//blobs/uploads/", - Description: "The location of the upload. Clients should assume this changes after each request. Clients should use the contents verbatim to complete the upload, adding parameters where required.", - }, - { - Name: "Range", - Type: "header", - Format: "0-", - Description: "Range indicating the current progress of the upload.", - }, - contentLengthZeroHeader, - dockerUploadUUIDHeader, - }, - }, - }, - Failures: []ResponseDescriptor{ - { - Description: "There was an error processing the upload and it must be restarted.", - StatusCode: http.StatusBadRequest, - ErrorCodes: []errcode.ErrorCode{ - ErrorCodeDigestInvalid, - ErrorCodeNameInvalid, - ErrorCodeBlobUploadInvalid, - }, - Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", - Format: errorsBody, - }, - }, - { - Description: "The upload is unknown to the registry. The upload must be restarted.", - StatusCode: http.StatusNotFound, - ErrorCodes: []errcode.ErrorCode{ - ErrorCodeBlobUploadUnknown, - }, - Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", - Format: errorsBody, - }, - }, - unauthorizedResponseDescriptor, - repositoryNotFoundResponseDescriptor, - deniedResponseDescriptor, - tooManyRequestsDescriptor, - }, - }, - { - Name: "Chunked upload", - Description: "Upload a chunk of data to specified upload without completing the upload. The data will be uploaded to the specified Content Range.", - PathParameters: []ParameterDescriptor{ - nameParameterDescriptor, - uuidParameterDescriptor, - }, - Headers: []ParameterDescriptor{ - hostHeader, - authHeader, - { - Name: "Content-Range", - Type: "header", - Format: "-", - Required: true, - Description: "Range of bytes identifying the desired block of content represented by the body. Start must the end offset retrieved via status check plus one. Note that this is a non-standard use of the `Content-Range` header.", - }, - { - Name: "Content-Length", - Type: "integer", - Format: "", - Description: "Length of the chunk being uploaded, corresponding the length of the request body.", - }, - }, - Body: BodyDescriptor{ - ContentType: "application/octet-stream", - Format: "", - }, - Successes: []ResponseDescriptor{ - { - Name: "Chunk Accepted", - Description: "The chunk of data has been accepted and the current progress is available in the range header. The updated upload location is available in the `Location` header.", - StatusCode: http.StatusNoContent, - Headers: []ParameterDescriptor{ - { - Name: "Location", - Type: "url", - Format: "/v2//blobs/uploads/", - Description: "The location of the upload. Clients should assume this changes after each request. Clients should use the contents verbatim to complete the upload, adding parameters where required.", - }, - { - Name: "Range", - Type: "header", - Format: "0-", - Description: "Range indicating the current progress of the upload.", - }, - contentLengthZeroHeader, - dockerUploadUUIDHeader, - }, - }, - }, - Failures: []ResponseDescriptor{ - { - Description: "There was an error processing the upload and it must be restarted.", - StatusCode: http.StatusBadRequest, - ErrorCodes: []errcode.ErrorCode{ - ErrorCodeDigestInvalid, - ErrorCodeNameInvalid, - ErrorCodeBlobUploadInvalid, - }, - Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", - Format: errorsBody, - }, - }, - { - Description: "The upload is unknown to the registry. The upload must be restarted.", - StatusCode: http.StatusNotFound, - ErrorCodes: []errcode.ErrorCode{ - ErrorCodeBlobUploadUnknown, - }, - Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", - Format: errorsBody, - }, - }, - { - Description: "The `Content-Range` specification cannot be accepted, either because it does not overlap with the current progress or it is invalid.", - StatusCode: http.StatusRequestedRangeNotSatisfiable, - }, - unauthorizedResponseDescriptor, - repositoryNotFoundResponseDescriptor, - deniedResponseDescriptor, - tooManyRequestsDescriptor, - }, - }, - }, - }, - { - Method: "PUT", - Description: "Complete the upload specified by `uuid`, optionally appending the body as the final chunk.", - Requests: []RequestDescriptor{ - { - Description: "Complete the upload, providing all the data in the body, if necessary. A request without a body will just complete the upload with previously uploaded content.", - Headers: []ParameterDescriptor{ - hostHeader, - authHeader, - { - Name: "Content-Length", - Type: "integer", - Format: "", - Description: "Length of the data being uploaded, corresponding to the length of the request body. May be zero if no data is provided.", - }, - }, - PathParameters: []ParameterDescriptor{ - nameParameterDescriptor, - uuidParameterDescriptor, - }, - QueryParameters: []ParameterDescriptor{ - { - Name: "digest", - Type: "string", - Format: "", - Regexp: digest.DigestRegexp, - Required: true, - Description: `Digest of uploaded blob.`, - }, - }, - Body: BodyDescriptor{ - ContentType: "application/octet-stream", - Format: "", - }, - Successes: []ResponseDescriptor{ - { - Name: "Upload Complete", - Description: "The upload has been completed and accepted by the registry. The canonical location will be available in the `Location` header.", - StatusCode: http.StatusNoContent, - Headers: []ParameterDescriptor{ - { - Name: "Location", - Type: "url", - Format: "", - Description: "The canonical location of the blob for retrieval", - }, - { - Name: "Content-Range", - Type: "header", - Format: "-", - Description: "Range of bytes identifying the desired block of content represented by the body. Start must match the end of offset retrieved via status check. Note that this is a non-standard use of the `Content-Range` header.", - }, - contentLengthZeroHeader, - digestHeader, - }, - }, - }, - Failures: []ResponseDescriptor{ - { - Description: "There was an error processing the upload and it must be restarted.", - StatusCode: http.StatusBadRequest, - ErrorCodes: []errcode.ErrorCode{ - ErrorCodeDigestInvalid, - ErrorCodeNameInvalid, - ErrorCodeBlobUploadInvalid, - errcode.ErrorCodeUnsupported, - }, - Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", - Format: errorsBody, - }, - }, - { - Description: "The upload is unknown to the registry. The upload must be restarted.", - StatusCode: http.StatusNotFound, - ErrorCodes: []errcode.ErrorCode{ - ErrorCodeBlobUploadUnknown, - }, - Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", - Format: errorsBody, - }, - }, - unauthorizedResponseDescriptor, - repositoryNotFoundResponseDescriptor, - deniedResponseDescriptor, - tooManyRequestsDescriptor, - }, - }, - }, - }, - { - Method: "DELETE", - Description: "Cancel outstanding upload processes, releasing associated resources. If this is not called, the unfinished uploads will eventually timeout.", - Requests: []RequestDescriptor{ - { - Description: "Cancel the upload specified by `uuid`.", - PathParameters: []ParameterDescriptor{ - nameParameterDescriptor, - uuidParameterDescriptor, - }, - Headers: []ParameterDescriptor{ - hostHeader, - authHeader, - contentLengthZeroHeader, - }, - Successes: []ResponseDescriptor{ - { - Name: "Upload Deleted", - Description: "The upload has been successfully deleted.", - StatusCode: http.StatusNoContent, - Headers: []ParameterDescriptor{ - contentLengthZeroHeader, - }, - }, - }, - Failures: []ResponseDescriptor{ - { - Description: "An error was encountered processing the delete. The client may ignore this error.", - StatusCode: http.StatusBadRequest, - ErrorCodes: []errcode.ErrorCode{ - ErrorCodeNameInvalid, - ErrorCodeBlobUploadInvalid, - }, - Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", - Format: errorsBody, - }, - }, - { - Description: "The upload is unknown to the registry. The client may ignore this error and assume the upload has been deleted.", - StatusCode: http.StatusNotFound, - ErrorCodes: []errcode.ErrorCode{ - ErrorCodeBlobUploadUnknown, - }, - Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", - Format: errorsBody, - }, - }, - unauthorizedResponseDescriptor, - repositoryNotFoundResponseDescriptor, - deniedResponseDescriptor, - tooManyRequestsDescriptor, - }, - }, - }, - }, - }, - }, - { - Name: RouteNameCatalog, - Path: "/v2/_catalog", - Entity: "Catalog", - Description: "List a set of available repositories in the local registry cluster. Does not provide any indication of what may be available upstream. Applications can only determine if a repository is available but not if it is not available.", - Methods: []MethodDescriptor{ - { - Method: "GET", - Description: "Retrieve a sorted, json list of repositories available in the registry.", - Requests: []RequestDescriptor{ - { - Name: "Catalog Fetch", - Description: "Request an unabridged list of repositories available. The implementation may impose a maximum limit and return a partial set with pagination links.", - Successes: []ResponseDescriptor{ - { - Description: "Returns the unabridged list of repositories as a json response.", - StatusCode: http.StatusOK, - Headers: []ParameterDescriptor{ - { - Name: "Content-Length", - Type: "integer", - Description: "Length of the JSON response body.", - Format: "", - }, - }, - Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", - Format: `{ - "repositories": [ - , - ... - ] -}`, - }, - }, - }, - }, - { - Name: "Catalog Fetch Paginated", - Description: "Return the specified portion of repositories.", - QueryParameters: paginationParameters, - Successes: []ResponseDescriptor{ - { - StatusCode: http.StatusOK, - Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", - Format: `{ - "repositories": [ - , - ... - ] - "next": "?last=&n=" -}`, - }, - Headers: []ParameterDescriptor{ - { - Name: "Content-Length", - Type: "integer", - Description: "Length of the JSON response body.", - Format: "", - }, - linkHeader, - }, - }, - }, - }, - }, - }, - }, - }, -} - -var routeDescriptorsMap map[string]RouteDescriptor - -func init() { - routeDescriptorsMap = make(map[string]RouteDescriptor, len(routeDescriptors)) - - for _, descriptor := range routeDescriptors { - routeDescriptorsMap[descriptor.Name] = descriptor - } -} diff --git a/src/vendor/github.com/docker/distribution/registry/api/v2/doc.go b/src/vendor/github.com/docker/distribution/registry/api/v2/doc.go deleted file mode 100644 index cde011959..000000000 --- a/src/vendor/github.com/docker/distribution/registry/api/v2/doc.go +++ /dev/null @@ -1,9 +0,0 @@ -// Package v2 describes routes, urls and the error codes used in the Docker -// Registry JSON HTTP API V2. In addition to declarations, descriptors are -// provided for routes and error codes that can be used for implementation and -// automatically generating documentation. -// -// Definitions here are considered to be locked down for the V2 registry api. -// Any changes must be considered carefully and should not proceed without a -// change proposal in docker core. -package v2 diff --git a/src/vendor/github.com/docker/distribution/registry/api/v2/errors.go b/src/vendor/github.com/docker/distribution/registry/api/v2/errors.go deleted file mode 100644 index 97d6923aa..000000000 --- a/src/vendor/github.com/docker/distribution/registry/api/v2/errors.go +++ /dev/null @@ -1,136 +0,0 @@ -package v2 - -import ( - "net/http" - - "github.com/docker/distribution/registry/api/errcode" -) - -const errGroup = "registry.api.v2" - -var ( - // ErrorCodeDigestInvalid is returned when uploading a blob if the - // provided digest does not match the blob contents. - ErrorCodeDigestInvalid = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "DIGEST_INVALID", - Message: "provided digest did not match uploaded content", - Description: `When a blob is uploaded, the registry will check that - the content matches the digest provided by the client. The error may - include a detail structure with the key "digest", including the - invalid digest string. This error may also be returned when a manifest - includes an invalid layer digest.`, - HTTPStatusCode: http.StatusBadRequest, - }) - - // ErrorCodeSizeInvalid is returned when uploading a blob if the provided - ErrorCodeSizeInvalid = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "SIZE_INVALID", - Message: "provided length did not match content length", - Description: `When a layer is uploaded, the provided size will be - checked against the uploaded content. If they do not match, this error - will be returned.`, - HTTPStatusCode: http.StatusBadRequest, - }) - - // ErrorCodeNameInvalid is returned when the name in the manifest does not - // match the provided name. - ErrorCodeNameInvalid = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "NAME_INVALID", - Message: "invalid repository name", - Description: `Invalid repository name encountered either during - manifest validation or any API operation.`, - HTTPStatusCode: http.StatusBadRequest, - }) - - // ErrorCodeTagInvalid is returned when the tag in the manifest does not - // match the provided tag. - ErrorCodeTagInvalid = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "TAG_INVALID", - Message: "manifest tag did not match URI", - Description: `During a manifest upload, if the tag in the manifest - does not match the uri tag, this error will be returned.`, - HTTPStatusCode: http.StatusBadRequest, - }) - - // ErrorCodeNameUnknown when the repository name is not known. - ErrorCodeNameUnknown = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "NAME_UNKNOWN", - Message: "repository name not known to registry", - Description: `This is returned if the name used during an operation is - unknown to the registry.`, - HTTPStatusCode: http.StatusNotFound, - }) - - // ErrorCodeManifestUnknown returned when image manifest is unknown. - ErrorCodeManifestUnknown = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "MANIFEST_UNKNOWN", - Message: "manifest unknown", - Description: `This error is returned when the manifest, identified by - name and tag is unknown to the repository.`, - HTTPStatusCode: http.StatusNotFound, - }) - - // ErrorCodeManifestInvalid returned when an image manifest is invalid, - // typically during a PUT operation. This error encompasses all errors - // encountered during manifest validation that aren't signature errors. - ErrorCodeManifestInvalid = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "MANIFEST_INVALID", - Message: "manifest invalid", - Description: `During upload, manifests undergo several checks ensuring - validity. If those checks fail, this error may be returned, unless a - more specific error is included. The detail will contain information - the failed validation.`, - HTTPStatusCode: http.StatusBadRequest, - }) - - // ErrorCodeManifestUnverified is returned when the manifest fails - // signature verification. - ErrorCodeManifestUnverified = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "MANIFEST_UNVERIFIED", - Message: "manifest failed signature verification", - Description: `During manifest upload, if the manifest fails signature - verification, this error will be returned.`, - HTTPStatusCode: http.StatusBadRequest, - }) - - // ErrorCodeManifestBlobUnknown is returned when a manifest blob is - // unknown to the registry. - ErrorCodeManifestBlobUnknown = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "MANIFEST_BLOB_UNKNOWN", - Message: "blob unknown to registry", - Description: `This error may be returned when a manifest blob is - unknown to the registry.`, - HTTPStatusCode: http.StatusBadRequest, - }) - - // ErrorCodeBlobUnknown is returned when a blob is unknown to the - // registry. This can happen when the manifest references a nonexistent - // layer or the result is not found by a blob fetch. - ErrorCodeBlobUnknown = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "BLOB_UNKNOWN", - Message: "blob unknown to registry", - Description: `This error may be returned when a blob is unknown to the - registry in a specified repository. This can be returned with a - standard get or if a manifest references an unknown layer during - upload.`, - HTTPStatusCode: http.StatusNotFound, - }) - - // ErrorCodeBlobUploadUnknown is returned when an upload is unknown. - ErrorCodeBlobUploadUnknown = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "BLOB_UPLOAD_UNKNOWN", - Message: "blob upload unknown to registry", - Description: `If a blob upload has been cancelled or was never - started, this error code may be returned.`, - HTTPStatusCode: http.StatusNotFound, - }) - - // ErrorCodeBlobUploadInvalid is returned when an upload is invalid. - ErrorCodeBlobUploadInvalid = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "BLOB_UPLOAD_INVALID", - Message: "blob upload invalid", - Description: `The blob upload encountered an error and can no - longer proceed.`, - HTTPStatusCode: http.StatusNotFound, - }) -) diff --git a/src/vendor/github.com/docker/distribution/registry/api/v2/headerparser.go b/src/vendor/github.com/docker/distribution/registry/api/v2/headerparser.go deleted file mode 100644 index 9bc41a3a6..000000000 --- a/src/vendor/github.com/docker/distribution/registry/api/v2/headerparser.go +++ /dev/null @@ -1,161 +0,0 @@ -package v2 - -import ( - "fmt" - "regexp" - "strings" - "unicode" -) - -var ( - // according to rfc7230 - reToken = regexp.MustCompile(`^[^"(),/:;<=>?@[\]{}[:space:][:cntrl:]]+`) - reQuotedValue = regexp.MustCompile(`^[^\\"]+`) - reEscapedCharacter = regexp.MustCompile(`^[[:blank:][:graph:]]`) -) - -// parseForwardedHeader is a benevolent parser of Forwarded header defined in rfc7239. The header contains -// a comma-separated list of forwarding key-value pairs. Each list element is set by single proxy. The -// function parses only the first element of the list, which is set by the very first proxy. It returns a map -// of corresponding key-value pairs and an unparsed slice of the input string. -// -// Examples of Forwarded header values: -// -// 1. Forwarded: For=192.0.2.43; Proto=https,For="[2001:db8:cafe::17]",For=unknown -// 2. Forwarded: for="192.0.2.43:443"; host="registry.example.org", for="10.10.05.40:80" -// -// The first will be parsed into {"for": "192.0.2.43", "proto": "https"} while the second into -// {"for": "192.0.2.43:443", "host": "registry.example.org"}. -func parseForwardedHeader(forwarded string) (map[string]string, string, error) { - // Following are states of forwarded header parser. Any state could transition to a failure. - const ( - // terminating state; can transition to Parameter - stateElement = iota - // terminating state; can transition to KeyValueDelimiter - stateParameter - // can transition to Value - stateKeyValueDelimiter - // can transition to one of { QuotedValue, PairEnd } - stateValue - // can transition to one of { EscapedCharacter, PairEnd } - stateQuotedValue - // can transition to one of { QuotedValue } - stateEscapedCharacter - // terminating state; can transition to one of { Parameter, Element } - statePairEnd - ) - - var ( - parameter string - value string - parse = forwarded[:] - res = map[string]string{} - state = stateElement - ) - -Loop: - for { - // skip spaces unless in quoted value - if state != stateQuotedValue && state != stateEscapedCharacter { - parse = strings.TrimLeftFunc(parse, unicode.IsSpace) - } - - if len(parse) == 0 { - if state != stateElement && state != statePairEnd && state != stateParameter { - return nil, parse, fmt.Errorf("unexpected end of input") - } - // terminating - break - } - - switch state { - // terminate at list element delimiter - case stateElement: - if parse[0] == ',' { - parse = parse[1:] - break Loop - } - state = stateParameter - - // parse parameter (the key of key-value pair) - case stateParameter: - match := reToken.FindString(parse) - if len(match) == 0 { - return nil, parse, fmt.Errorf("failed to parse token at position %d", len(forwarded)-len(parse)) - } - parameter = strings.ToLower(match) - parse = parse[len(match):] - state = stateKeyValueDelimiter - - // parse '=' - case stateKeyValueDelimiter: - if parse[0] != '=' { - return nil, parse, fmt.Errorf("expected '=', not '%c' at position %d", parse[0], len(forwarded)-len(parse)) - } - parse = parse[1:] - state = stateValue - - // parse value or quoted value - case stateValue: - if parse[0] == '"' { - parse = parse[1:] - state = stateQuotedValue - } else { - value = reToken.FindString(parse) - if len(value) == 0 { - return nil, parse, fmt.Errorf("failed to parse value at position %d", len(forwarded)-len(parse)) - } - if _, exists := res[parameter]; exists { - return nil, parse, fmt.Errorf("duplicate parameter %q at position %d", parameter, len(forwarded)-len(parse)) - } - res[parameter] = value - parse = parse[len(value):] - value = "" - state = statePairEnd - } - - // parse a part of quoted value until the first backslash - case stateQuotedValue: - match := reQuotedValue.FindString(parse) - value += match - parse = parse[len(match):] - switch { - case len(parse) == 0: - return nil, parse, fmt.Errorf("unterminated quoted string") - case parse[0] == '"': - res[parameter] = value - value = "" - parse = parse[1:] - state = statePairEnd - case parse[0] == '\\': - parse = parse[1:] - state = stateEscapedCharacter - } - - // parse escaped character in a quoted string, ignore the backslash - // transition back to QuotedValue state - case stateEscapedCharacter: - c := reEscapedCharacter.FindString(parse) - if len(c) == 0 { - return nil, parse, fmt.Errorf("invalid escape sequence at position %d", len(forwarded)-len(parse)-1) - } - value += c - parse = parse[1:] - state = stateQuotedValue - - // expect either a new key-value pair, new list or end of input - case statePairEnd: - switch parse[0] { - case ';': - parse = parse[1:] - state = stateParameter - case ',': - state = stateElement - default: - return nil, parse, fmt.Errorf("expected ',' or ';', not %c at position %d", parse[0], len(forwarded)-len(parse)) - } - } - } - - return res, parse, nil -} diff --git a/src/vendor/github.com/docker/distribution/registry/api/v2/routes.go b/src/vendor/github.com/docker/distribution/registry/api/v2/routes.go deleted file mode 100644 index 9612ac2e5..000000000 --- a/src/vendor/github.com/docker/distribution/registry/api/v2/routes.go +++ /dev/null @@ -1,40 +0,0 @@ -package v2 - -import "github.com/gorilla/mux" - -// The following are definitions of the name under which all V2 routes are -// registered. These symbols can be used to look up a route based on the name. -const ( - RouteNameBase = "base" - RouteNameManifest = "manifest" - RouteNameTags = "tags" - RouteNameBlob = "blob" - RouteNameBlobUpload = "blob-upload" - RouteNameBlobUploadChunk = "blob-upload-chunk" - RouteNameCatalog = "catalog" -) - -// Router builds a gorilla router with named routes for the various API -// methods. This can be used directly by both server implementations and -// clients. -func Router() *mux.Router { - return RouterWithPrefix("") -} - -// RouterWithPrefix builds a gorilla router with a configured prefix -// on all routes. -func RouterWithPrefix(prefix string) *mux.Router { - rootRouter := mux.NewRouter() - router := rootRouter - if prefix != "" { - router = router.PathPrefix(prefix).Subrouter() - } - - router.StrictSlash(true) - - for _, descriptor := range routeDescriptors { - router.Path(descriptor.Path).Name(descriptor.Name) - } - - return rootRouter -} diff --git a/src/vendor/github.com/docker/distribution/registry/api/v2/urls.go b/src/vendor/github.com/docker/distribution/registry/api/v2/urls.go deleted file mode 100644 index 3c3ec9893..000000000 --- a/src/vendor/github.com/docker/distribution/registry/api/v2/urls.go +++ /dev/null @@ -1,254 +0,0 @@ -package v2 - -import ( - "fmt" - "net/http" - "net/url" - "strings" - - "github.com/docker/distribution/reference" - "github.com/gorilla/mux" -) - -// URLBuilder creates registry API urls from a single base endpoint. It can be -// used to create urls for use in a registry client or server. -// -// All urls will be created from the given base, including the api version. -// For example, if a root of "/foo/" is provided, urls generated will be fall -// under "/foo/v2/...". Most application will only provide a schema, host and -// port, such as "https://localhost:5000/". -type URLBuilder struct { - root *url.URL // url root (ie http://localhost/) - router *mux.Router - relative bool -} - -// NewURLBuilder creates a URLBuilder with provided root url object. -func NewURLBuilder(root *url.URL, relative bool) *URLBuilder { - return &URLBuilder{ - root: root, - router: Router(), - relative: relative, - } -} - -// NewURLBuilderFromString workes identically to NewURLBuilder except it takes -// a string argument for the root, returning an error if it is not a valid -// url. -func NewURLBuilderFromString(root string, relative bool) (*URLBuilder, error) { - u, err := url.Parse(root) - if err != nil { - return nil, err - } - - return NewURLBuilder(u, relative), nil -} - -// NewURLBuilderFromRequest uses information from an *http.Request to -// construct the root url. -func NewURLBuilderFromRequest(r *http.Request, relative bool) *URLBuilder { - var ( - scheme = "http" - host = r.Host - ) - - if r.TLS != nil { - scheme = "https" - } else if len(r.URL.Scheme) > 0 { - scheme = r.URL.Scheme - } - - // Handle fowarded headers - // Prefer "Forwarded" header as defined by rfc7239 if given - // see https://tools.ietf.org/html/rfc7239 - if forwarded := r.Header.Get("Forwarded"); len(forwarded) > 0 { - forwardedHeader, _, err := parseForwardedHeader(forwarded) - if err == nil { - if fproto := forwardedHeader["proto"]; len(fproto) > 0 { - scheme = fproto - } - if fhost := forwardedHeader["host"]; len(fhost) > 0 { - host = fhost - } - } - } else { - if forwardedProto := r.Header.Get("X-Forwarded-Proto"); len(forwardedProto) > 0 { - scheme = forwardedProto - } - if forwardedHost := r.Header.Get("X-Forwarded-Host"); len(forwardedHost) > 0 { - // According to the Apache mod_proxy docs, X-Forwarded-Host can be a - // comma-separated list of hosts, to which each proxy appends the - // requested host. We want to grab the first from this comma-separated - // list. - hosts := strings.SplitN(forwardedHost, ",", 2) - host = strings.TrimSpace(hosts[0]) - } - } - - basePath := routeDescriptorsMap[RouteNameBase].Path - - requestPath := r.URL.Path - index := strings.Index(requestPath, basePath) - - u := &url.URL{ - Scheme: scheme, - Host: host, - } - - if index > 0 { - // N.B. index+1 is important because we want to include the trailing / - u.Path = requestPath[0 : index+1] - } - - return NewURLBuilder(u, relative) -} - -// BuildBaseURL constructs a base url for the API, typically just "/v2/". -func (ub *URLBuilder) BuildBaseURL() (string, error) { - route := ub.cloneRoute(RouteNameBase) - - baseURL, err := route.URL() - if err != nil { - return "", err - } - - return baseURL.String(), nil -} - -// BuildCatalogURL constructs a url get a catalog of repositories -func (ub *URLBuilder) BuildCatalogURL(values ...url.Values) (string, error) { - route := ub.cloneRoute(RouteNameCatalog) - - catalogURL, err := route.URL() - if err != nil { - return "", err - } - - return appendValuesURL(catalogURL, values...).String(), nil -} - -// BuildTagsURL constructs a url to list the tags in the named repository. -func (ub *URLBuilder) BuildTagsURL(name reference.Named) (string, error) { - route := ub.cloneRoute(RouteNameTags) - - tagsURL, err := route.URL("name", name.Name()) - if err != nil { - return "", err - } - - return tagsURL.String(), nil -} - -// BuildManifestURL constructs a url for the manifest identified by name and -// reference. The argument reference may be either a tag or digest. -func (ub *URLBuilder) BuildManifestURL(ref reference.Named) (string, error) { - route := ub.cloneRoute(RouteNameManifest) - - tagOrDigest := "" - switch v := ref.(type) { - case reference.Tagged: - tagOrDigest = v.Tag() - case reference.Digested: - tagOrDigest = v.Digest().String() - default: - return "", fmt.Errorf("reference must have a tag or digest") - } - - manifestURL, err := route.URL("name", ref.Name(), "reference", tagOrDigest) - if err != nil { - return "", err - } - - return manifestURL.String(), nil -} - -// BuildBlobURL constructs the url for the blob identified by name and dgst. -func (ub *URLBuilder) BuildBlobURL(ref reference.Canonical) (string, error) { - route := ub.cloneRoute(RouteNameBlob) - - layerURL, err := route.URL("name", ref.Name(), "digest", ref.Digest().String()) - if err != nil { - return "", err - } - - return layerURL.String(), nil -} - -// BuildBlobUploadURL constructs a url to begin a blob upload in the -// repository identified by name. -func (ub *URLBuilder) BuildBlobUploadURL(name reference.Named, values ...url.Values) (string, error) { - route := ub.cloneRoute(RouteNameBlobUpload) - - uploadURL, err := route.URL("name", name.Name()) - if err != nil { - return "", err - } - - return appendValuesURL(uploadURL, values...).String(), nil -} - -// BuildBlobUploadChunkURL constructs a url for the upload identified by uuid, -// including any url values. This should generally not be used by clients, as -// this url is provided by server implementations during the blob upload -// process. -func (ub *URLBuilder) BuildBlobUploadChunkURL(name reference.Named, uuid string, values ...url.Values) (string, error) { - route := ub.cloneRoute(RouteNameBlobUploadChunk) - - uploadURL, err := route.URL("name", name.Name(), "uuid", uuid) - if err != nil { - return "", err - } - - return appendValuesURL(uploadURL, values...).String(), nil -} - -// clondedRoute returns a clone of the named route from the router. Routes -// must be cloned to avoid modifying them during url generation. -func (ub *URLBuilder) cloneRoute(name string) clonedRoute { - route := new(mux.Route) - root := new(url.URL) - - *route = *ub.router.GetRoute(name) // clone the route - *root = *ub.root - - return clonedRoute{Route: route, root: root, relative: ub.relative} -} - -type clonedRoute struct { - *mux.Route - root *url.URL - relative bool -} - -func (cr clonedRoute) URL(pairs ...string) (*url.URL, error) { - routeURL, err := cr.Route.URL(pairs...) - if err != nil { - return nil, err - } - - if cr.relative { - return routeURL, nil - } - - if routeURL.Scheme == "" && routeURL.User == nil && routeURL.Host == "" { - routeURL.Path = routeURL.Path[1:] - } - - url := cr.root.ResolveReference(routeURL) - url.Scheme = cr.root.Scheme - return url, nil -} - -// appendValuesURL appends the parameters to the url. -func appendValuesURL(u *url.URL, values ...url.Values) *url.URL { - merged := u.Query() - - for _, v := range values { - for k, vv := range v { - merged[k] = append(merged[k], vv...) - } - } - - u.RawQuery = merged.Encode() - return u -} diff --git a/src/vendor/github.com/docker/distribution/registry/client/auth/api_version.go b/src/vendor/github.com/docker/distribution/registry/client/auth/api_version.go deleted file mode 100644 index 7d8f1d957..000000000 --- a/src/vendor/github.com/docker/distribution/registry/client/auth/api_version.go +++ /dev/null @@ -1,58 +0,0 @@ -package auth - -import ( - "net/http" - "strings" -) - -// APIVersion represents a version of an API including its -// type and version number. -type APIVersion struct { - // Type refers to the name of a specific API specification - // such as "registry" - Type string - - // Version is the version of the API specification implemented, - // This may omit the revision number and only include - // the major and minor version, such as "2.0" - Version string -} - -// String returns the string formatted API Version -func (v APIVersion) String() string { - return v.Type + "/" + v.Version -} - -// APIVersions gets the API versions out of an HTTP response using the provided -// version header as the key for the HTTP header. -func APIVersions(resp *http.Response, versionHeader string) []APIVersion { - versions := []APIVersion{} - if versionHeader != "" { - for _, supportedVersions := range resp.Header[http.CanonicalHeaderKey(versionHeader)] { - for _, version := range strings.Fields(supportedVersions) { - versions = append(versions, ParseAPIVersion(version)) - } - } - } - return versions -} - -// ParseAPIVersion parses an API version string into an APIVersion -// Format (Expected, not enforced): -// API version string = '/' -// API type = [a-z][a-z0-9]* -// API version = [0-9]+(\.[0-9]+)? -// TODO(dmcgowan): Enforce format, add error condition, remove unknown type -func ParseAPIVersion(versionStr string) APIVersion { - idx := strings.IndexRune(versionStr, '/') - if idx == -1 { - return APIVersion{ - Type: "unknown", - Version: versionStr, - } - } - return APIVersion{ - Type: strings.ToLower(versionStr[:idx]), - Version: versionStr[idx+1:], - } -} diff --git a/src/vendor/github.com/docker/distribution/registry/client/auth/session.go b/src/vendor/github.com/docker/distribution/registry/client/auth/session.go deleted file mode 100644 index aad8a0e6f..000000000 --- a/src/vendor/github.com/docker/distribution/registry/client/auth/session.go +++ /dev/null @@ -1,530 +0,0 @@ -package auth - -import ( - "encoding/json" - "errors" - "fmt" - "net/http" - "net/url" - "strings" - "sync" - "time" - - "github.com/docker/distribution/registry/client" - "github.com/docker/distribution/registry/client/auth/challenge" - "github.com/docker/distribution/registry/client/transport" -) - -var ( - // ErrNoBasicAuthCredentials is returned if a request can't be authorized with - // basic auth due to lack of credentials. - ErrNoBasicAuthCredentials = errors.New("no basic auth credentials") - - // ErrNoToken is returned if a request is successful but the body does not - // contain an authorization token. - ErrNoToken = errors.New("authorization server did not include a token in the response") -) - -const defaultClientID = "registry-client" - -// AuthenticationHandler is an interface for authorizing a request from -// params from a "WWW-Authenicate" header for a single scheme. -type AuthenticationHandler interface { - // Scheme returns the scheme as expected from the "WWW-Authenicate" header. - Scheme() string - - // AuthorizeRequest adds the authorization header to a request (if needed) - // using the parameters from "WWW-Authenticate" method. The parameters - // values depend on the scheme. - AuthorizeRequest(req *http.Request, params map[string]string) error -} - -// CredentialStore is an interface for getting credentials for -// a given URL -type CredentialStore interface { - // Basic returns basic auth for the given URL - Basic(*url.URL) (string, string) - - // RefreshToken returns a refresh token for the - // given URL and service - RefreshToken(*url.URL, string) string - - // SetRefreshToken sets the refresh token if none - // is provided for the given url and service - SetRefreshToken(realm *url.URL, service, token string) -} - -// NewAuthorizer creates an authorizer which can handle multiple authentication -// schemes. The handlers are tried in order, the higher priority authentication -// methods should be first. The challengeMap holds a list of challenges for -// a given root API endpoint (for example "https://registry-1.docker.io/v2/"). -func NewAuthorizer(manager challenge.Manager, handlers ...AuthenticationHandler) transport.RequestModifier { - return &endpointAuthorizer{ - challenges: manager, - handlers: handlers, - } -} - -type endpointAuthorizer struct { - challenges challenge.Manager - handlers []AuthenticationHandler -} - -func (ea *endpointAuthorizer) ModifyRequest(req *http.Request) error { - pingPath := req.URL.Path - if v2Root := strings.Index(req.URL.Path, "/v2/"); v2Root != -1 { - pingPath = pingPath[:v2Root+4] - } else if v1Root := strings.Index(req.URL.Path, "/v1/"); v1Root != -1 { - pingPath = pingPath[:v1Root] + "/v2/" - } else { - return nil - } - - ping := url.URL{ - Host: req.URL.Host, - Scheme: req.URL.Scheme, - Path: pingPath, - } - - challenges, err := ea.challenges.GetChallenges(ping) - if err != nil { - return err - } - - if len(challenges) > 0 { - for _, handler := range ea.handlers { - for _, c := range challenges { - if c.Scheme != handler.Scheme() { - continue - } - if err := handler.AuthorizeRequest(req, c.Parameters); err != nil { - return err - } - } - } - } - - return nil -} - -// This is the minimum duration a token can last (in seconds). -// A token must not live less than 60 seconds because older versions -// of the Docker client didn't read their expiration from the token -// response and assumed 60 seconds. So to remain compatible with -// those implementations, a token must live at least this long. -const minimumTokenLifetimeSeconds = 60 - -// Private interface for time used by this package to enable tests to provide their own implementation. -type clock interface { - Now() time.Time -} - -type tokenHandler struct { - creds CredentialStore - transport http.RoundTripper - clock clock - - offlineAccess bool - forceOAuth bool - clientID string - scopes []Scope - - tokenLock sync.Mutex - tokenCache string - tokenExpiration time.Time - - logger Logger -} - -// Scope is a type which is serializable to a string -// using the allow scope grammar. -type Scope interface { - String() string -} - -// RepositoryScope represents a token scope for access -// to a repository. -type RepositoryScope struct { - Repository string - Class string - Actions []string -} - -// String returns the string representation of the repository -// using the scope grammar -func (rs RepositoryScope) String() string { - repoType := "repository" - // Keep existing format for image class to maintain backwards compatibility - // with authorization servers which do not support the expanded grammar. - if rs.Class != "" && rs.Class != "image" { - repoType = fmt.Sprintf("%s(%s)", repoType, rs.Class) - } - return fmt.Sprintf("%s:%s:%s", repoType, rs.Repository, strings.Join(rs.Actions, ",")) -} - -// RegistryScope represents a token scope for access -// to resources in the registry. -type RegistryScope struct { - Name string - Actions []string -} - -// String returns the string representation of the user -// using the scope grammar -func (rs RegistryScope) String() string { - return fmt.Sprintf("registry:%s:%s", rs.Name, strings.Join(rs.Actions, ",")) -} - -// Logger defines the injectable logging interface, used on TokenHandlers. -type Logger interface { - Debugf(format string, args ...interface{}) -} - -func logDebugf(logger Logger, format string, args ...interface{}) { - if logger == nil { - return - } - logger.Debugf(format, args...) -} - -// TokenHandlerOptions is used to configure a new token handler -type TokenHandlerOptions struct { - Transport http.RoundTripper - Credentials CredentialStore - - OfflineAccess bool - ForceOAuth bool - ClientID string - Scopes []Scope - Logger Logger -} - -// An implementation of clock for providing real time data. -type realClock struct{} - -// Now implements clock -func (realClock) Now() time.Time { return time.Now() } - -// NewTokenHandler creates a new AuthenicationHandler which supports -// fetching tokens from a remote token server. -func NewTokenHandler(transport http.RoundTripper, creds CredentialStore, scope string, actions ...string) AuthenticationHandler { - // Create options... - return NewTokenHandlerWithOptions(TokenHandlerOptions{ - Transport: transport, - Credentials: creds, - Scopes: []Scope{ - RepositoryScope{ - Repository: scope, - Actions: actions, - }, - }, - }) -} - -// NewTokenHandlerWithOptions creates a new token handler using the provided -// options structure. -func NewTokenHandlerWithOptions(options TokenHandlerOptions) AuthenticationHandler { - handler := &tokenHandler{ - transport: options.Transport, - creds: options.Credentials, - offlineAccess: options.OfflineAccess, - forceOAuth: options.ForceOAuth, - clientID: options.ClientID, - scopes: options.Scopes, - clock: realClock{}, - logger: options.Logger, - } - - return handler -} - -func (th *tokenHandler) client() *http.Client { - return &http.Client{ - Transport: th.transport, - Timeout: 15 * time.Second, - } -} - -func (th *tokenHandler) Scheme() string { - return "bearer" -} - -func (th *tokenHandler) AuthorizeRequest(req *http.Request, params map[string]string) error { - var additionalScopes []string - if fromParam := req.URL.Query().Get("from"); fromParam != "" { - additionalScopes = append(additionalScopes, RepositoryScope{ - Repository: fromParam, - Actions: []string{"pull"}, - }.String()) - } - - token, err := th.getToken(params, additionalScopes...) - if err != nil { - return err - } - - req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", token)) - - return nil -} - -func (th *tokenHandler) getToken(params map[string]string, additionalScopes ...string) (string, error) { - th.tokenLock.Lock() - defer th.tokenLock.Unlock() - scopes := make([]string, 0, len(th.scopes)+len(additionalScopes)) - for _, scope := range th.scopes { - scopes = append(scopes, scope.String()) - } - var addedScopes bool - for _, scope := range additionalScopes { - if hasScope(scopes, scope) { - continue - } - scopes = append(scopes, scope) - addedScopes = true - } - - now := th.clock.Now() - if now.After(th.tokenExpiration) || addedScopes { - token, expiration, err := th.fetchToken(params, scopes) - if err != nil { - return "", err - } - - // do not update cache for added scope tokens - if !addedScopes { - th.tokenCache = token - th.tokenExpiration = expiration - } - - return token, nil - } - - return th.tokenCache, nil -} - -func hasScope(scopes []string, scope string) bool { - for _, s := range scopes { - if s == scope { - return true - } - } - return false -} - -type postTokenResponse struct { - AccessToken string `json:"access_token"` - RefreshToken string `json:"refresh_token"` - ExpiresIn int `json:"expires_in"` - IssuedAt time.Time `json:"issued_at"` - Scope string `json:"scope"` -} - -func (th *tokenHandler) fetchTokenWithOAuth(realm *url.URL, refreshToken, service string, scopes []string) (token string, expiration time.Time, err error) { - form := url.Values{} - form.Set("scope", strings.Join(scopes, " ")) - form.Set("service", service) - - clientID := th.clientID - if clientID == "" { - // Use default client, this is a required field - clientID = defaultClientID - } - form.Set("client_id", clientID) - - if refreshToken != "" { - form.Set("grant_type", "refresh_token") - form.Set("refresh_token", refreshToken) - } else if th.creds != nil { - form.Set("grant_type", "password") - username, password := th.creds.Basic(realm) - form.Set("username", username) - form.Set("password", password) - - // attempt to get a refresh token - form.Set("access_type", "offline") - } else { - // refuse to do oauth without a grant type - return "", time.Time{}, fmt.Errorf("no supported grant type") - } - - resp, err := th.client().PostForm(realm.String(), form) - if err != nil { - return "", time.Time{}, err - } - defer resp.Body.Close() - - if !client.SuccessStatus(resp.StatusCode) { - err := client.HandleErrorResponse(resp) - return "", time.Time{}, err - } - - decoder := json.NewDecoder(resp.Body) - - var tr postTokenResponse - if err = decoder.Decode(&tr); err != nil { - return "", time.Time{}, fmt.Errorf("unable to decode token response: %s", err) - } - - if tr.RefreshToken != "" && tr.RefreshToken != refreshToken { - th.creds.SetRefreshToken(realm, service, tr.RefreshToken) - } - - if tr.ExpiresIn < minimumTokenLifetimeSeconds { - // The default/minimum lifetime. - tr.ExpiresIn = minimumTokenLifetimeSeconds - logDebugf(th.logger, "Increasing token expiration to: %d seconds", tr.ExpiresIn) - } - - if tr.IssuedAt.IsZero() { - // issued_at is optional in the token response. - tr.IssuedAt = th.clock.Now().UTC() - } - - return tr.AccessToken, tr.IssuedAt.Add(time.Duration(tr.ExpiresIn) * time.Second), nil -} - -type getTokenResponse struct { - Token string `json:"token"` - AccessToken string `json:"access_token"` - ExpiresIn int `json:"expires_in"` - IssuedAt time.Time `json:"issued_at"` - RefreshToken string `json:"refresh_token"` -} - -func (th *tokenHandler) fetchTokenWithBasicAuth(realm *url.URL, service string, scopes []string) (token string, expiration time.Time, err error) { - - req, err := http.NewRequest("GET", realm.String(), nil) - if err != nil { - return "", time.Time{}, err - } - - reqParams := req.URL.Query() - - if service != "" { - reqParams.Add("service", service) - } - - for _, scope := range scopes { - reqParams.Add("scope", scope) - } - - if th.offlineAccess { - reqParams.Add("offline_token", "true") - clientID := th.clientID - if clientID == "" { - clientID = defaultClientID - } - reqParams.Add("client_id", clientID) - } - - if th.creds != nil { - username, password := th.creds.Basic(realm) - if username != "" && password != "" { - reqParams.Add("account", username) - req.SetBasicAuth(username, password) - } - } - - req.URL.RawQuery = reqParams.Encode() - - resp, err := th.client().Do(req) - if err != nil { - return "", time.Time{}, err - } - defer resp.Body.Close() - - if !client.SuccessStatus(resp.StatusCode) { - err := client.HandleErrorResponse(resp) - return "", time.Time{}, err - } - - decoder := json.NewDecoder(resp.Body) - - var tr getTokenResponse - if err = decoder.Decode(&tr); err != nil { - return "", time.Time{}, fmt.Errorf("unable to decode token response: %s", err) - } - - if tr.RefreshToken != "" && th.creds != nil { - th.creds.SetRefreshToken(realm, service, tr.RefreshToken) - } - - // `access_token` is equivalent to `token` and if both are specified - // the choice is undefined. Canonicalize `access_token` by sticking - // things in `token`. - if tr.AccessToken != "" { - tr.Token = tr.AccessToken - } - - if tr.Token == "" { - return "", time.Time{}, ErrNoToken - } - - if tr.ExpiresIn < minimumTokenLifetimeSeconds { - // The default/minimum lifetime. - tr.ExpiresIn = minimumTokenLifetimeSeconds - logDebugf(th.logger, "Increasing token expiration to: %d seconds", tr.ExpiresIn) - } - - if tr.IssuedAt.IsZero() { - // issued_at is optional in the token response. - tr.IssuedAt = th.clock.Now().UTC() - } - - return tr.Token, tr.IssuedAt.Add(time.Duration(tr.ExpiresIn) * time.Second), nil -} - -func (th *tokenHandler) fetchToken(params map[string]string, scopes []string) (token string, expiration time.Time, err error) { - realm, ok := params["realm"] - if !ok { - return "", time.Time{}, errors.New("no realm specified for token auth challenge") - } - - // TODO(dmcgowan): Handle empty scheme and relative realm - realmURL, err := url.Parse(realm) - if err != nil { - return "", time.Time{}, fmt.Errorf("invalid token auth challenge realm: %s", err) - } - - service := params["service"] - - var refreshToken string - - if th.creds != nil { - refreshToken = th.creds.RefreshToken(realmURL, service) - } - - if refreshToken != "" || th.forceOAuth { - return th.fetchTokenWithOAuth(realmURL, refreshToken, service, scopes) - } - - return th.fetchTokenWithBasicAuth(realmURL, service, scopes) -} - -type basicHandler struct { - creds CredentialStore -} - -// NewBasicHandler creaters a new authentiation handler which adds -// basic authentication credentials to a request. -func NewBasicHandler(creds CredentialStore) AuthenticationHandler { - return &basicHandler{ - creds: creds, - } -} - -func (*basicHandler) Scheme() string { - return "basic" -} - -func (bh *basicHandler) AuthorizeRequest(req *http.Request, params map[string]string) error { - if bh.creds != nil { - username, password := bh.creds.Basic(req.URL) - if username != "" && password != "" { - req.SetBasicAuth(username, password) - return nil - } - } - return ErrNoBasicAuthCredentials -} diff --git a/src/vendor/github.com/docker/distribution/registry/client/blob_writer.go b/src/vendor/github.com/docker/distribution/registry/client/blob_writer.go deleted file mode 100644 index 695bf852f..000000000 --- a/src/vendor/github.com/docker/distribution/registry/client/blob_writer.go +++ /dev/null @@ -1,162 +0,0 @@ -package client - -import ( - "bytes" - "context" - "fmt" - "io" - "io/ioutil" - "net/http" - "time" - - "github.com/docker/distribution" -) - -type httpBlobUpload struct { - statter distribution.BlobStatter - client *http.Client - - uuid string - startedAt time.Time - - location string // always the last value of the location header. - offset int64 - closed bool -} - -func (hbu *httpBlobUpload) Reader() (io.ReadCloser, error) { - panic("Not implemented") -} - -func (hbu *httpBlobUpload) handleErrorResponse(resp *http.Response) error { - if resp.StatusCode == http.StatusNotFound { - return distribution.ErrBlobUploadUnknown - } - return HandleErrorResponse(resp) -} - -func (hbu *httpBlobUpload) ReadFrom(r io.Reader) (n int64, err error) { - req, err := http.NewRequest("PATCH", hbu.location, ioutil.NopCloser(r)) - if err != nil { - return 0, err - } - defer req.Body.Close() - - resp, err := hbu.client.Do(req) - if err != nil { - return 0, err - } - - if !SuccessStatus(resp.StatusCode) { - return 0, hbu.handleErrorResponse(resp) - } - - hbu.uuid = resp.Header.Get("Docker-Upload-UUID") - hbu.location, err = sanitizeLocation(resp.Header.Get("Location"), hbu.location) - if err != nil { - return 0, err - } - rng := resp.Header.Get("Range") - var start, end int64 - if n, err := fmt.Sscanf(rng, "%d-%d", &start, &end); err != nil { - return 0, err - } else if n != 2 || end < start { - return 0, fmt.Errorf("bad range format: %s", rng) - } - - return (end - start + 1), nil - -} - -func (hbu *httpBlobUpload) Write(p []byte) (n int, err error) { - req, err := http.NewRequest("PATCH", hbu.location, bytes.NewReader(p)) - if err != nil { - return 0, err - } - req.Header.Set("Content-Range", fmt.Sprintf("%d-%d", hbu.offset, hbu.offset+int64(len(p)-1))) - req.Header.Set("Content-Length", fmt.Sprintf("%d", len(p))) - req.Header.Set("Content-Type", "application/octet-stream") - - resp, err := hbu.client.Do(req) - if err != nil { - return 0, err - } - - if !SuccessStatus(resp.StatusCode) { - return 0, hbu.handleErrorResponse(resp) - } - - hbu.uuid = resp.Header.Get("Docker-Upload-UUID") - hbu.location, err = sanitizeLocation(resp.Header.Get("Location"), hbu.location) - if err != nil { - return 0, err - } - rng := resp.Header.Get("Range") - var start, end int - if n, err := fmt.Sscanf(rng, "%d-%d", &start, &end); err != nil { - return 0, err - } else if n != 2 || end < start { - return 0, fmt.Errorf("bad range format: %s", rng) - } - - return (end - start + 1), nil - -} - -func (hbu *httpBlobUpload) Size() int64 { - return hbu.offset -} - -func (hbu *httpBlobUpload) ID() string { - return hbu.uuid -} - -func (hbu *httpBlobUpload) StartedAt() time.Time { - return hbu.startedAt -} - -func (hbu *httpBlobUpload) Commit(ctx context.Context, desc distribution.Descriptor) (distribution.Descriptor, error) { - // TODO(dmcgowan): Check if already finished, if so just fetch - req, err := http.NewRequest("PUT", hbu.location, nil) - if err != nil { - return distribution.Descriptor{}, err - } - - values := req.URL.Query() - values.Set("digest", desc.Digest.String()) - req.URL.RawQuery = values.Encode() - - resp, err := hbu.client.Do(req) - if err != nil { - return distribution.Descriptor{}, err - } - defer resp.Body.Close() - - if !SuccessStatus(resp.StatusCode) { - return distribution.Descriptor{}, hbu.handleErrorResponse(resp) - } - - return hbu.statter.Stat(ctx, desc.Digest) -} - -func (hbu *httpBlobUpload) Cancel(ctx context.Context) error { - req, err := http.NewRequest("DELETE", hbu.location, nil) - if err != nil { - return err - } - resp, err := hbu.client.Do(req) - if err != nil { - return err - } - defer resp.Body.Close() - - if resp.StatusCode == http.StatusNotFound || SuccessStatus(resp.StatusCode) { - return nil - } - return hbu.handleErrorResponse(resp) -} - -func (hbu *httpBlobUpload) Close() error { - hbu.closed = true - return nil -} diff --git a/src/vendor/github.com/docker/distribution/registry/client/errors.go b/src/vendor/github.com/docker/distribution/registry/client/errors.go deleted file mode 100644 index 52d49d5d2..000000000 --- a/src/vendor/github.com/docker/distribution/registry/client/errors.go +++ /dev/null @@ -1,139 +0,0 @@ -package client - -import ( - "encoding/json" - "errors" - "fmt" - "io" - "io/ioutil" - "net/http" - - "github.com/docker/distribution/registry/api/errcode" - "github.com/docker/distribution/registry/client/auth/challenge" -) - -// ErrNoErrorsInBody is returned when an HTTP response body parses to an empty -// errcode.Errors slice. -var ErrNoErrorsInBody = errors.New("no error details found in HTTP response body") - -// UnexpectedHTTPStatusError is returned when an unexpected HTTP status is -// returned when making a registry api call. -type UnexpectedHTTPStatusError struct { - Status string -} - -func (e *UnexpectedHTTPStatusError) Error() string { - return fmt.Sprintf("received unexpected HTTP status: %s", e.Status) -} - -// UnexpectedHTTPResponseError is returned when an expected HTTP status code -// is returned, but the content was unexpected and failed to be parsed. -type UnexpectedHTTPResponseError struct { - ParseErr error - StatusCode int - Response []byte -} - -func (e *UnexpectedHTTPResponseError) Error() string { - return fmt.Sprintf("error parsing HTTP %d response body: %s: %q", e.StatusCode, e.ParseErr.Error(), string(e.Response)) -} - -func parseHTTPErrorResponse(statusCode int, r io.Reader) error { - var errors errcode.Errors - body, err := ioutil.ReadAll(r) - if err != nil { - return err - } - - // For backward compatibility, handle irregularly formatted - // messages that contain a "details" field. - var detailsErr struct { - Details string `json:"details"` - } - err = json.Unmarshal(body, &detailsErr) - if err == nil && detailsErr.Details != "" { - switch statusCode { - case http.StatusUnauthorized: - return errcode.ErrorCodeUnauthorized.WithMessage(detailsErr.Details) - case http.StatusTooManyRequests: - return errcode.ErrorCodeTooManyRequests.WithMessage(detailsErr.Details) - default: - return errcode.ErrorCodeUnknown.WithMessage(detailsErr.Details) - } - } - - if err := json.Unmarshal(body, &errors); err != nil { - return &UnexpectedHTTPResponseError{ - ParseErr: err, - StatusCode: statusCode, - Response: body, - } - } - - if len(errors) == 0 { - // If there was no error specified in the body, return - // UnexpectedHTTPResponseError. - return &UnexpectedHTTPResponseError{ - ParseErr: ErrNoErrorsInBody, - StatusCode: statusCode, - Response: body, - } - } - - return errors -} - -func makeErrorList(err error) []error { - if errL, ok := err.(errcode.Errors); ok { - return []error(errL) - } - return []error{err} -} - -func mergeErrors(err1, err2 error) error { - return errcode.Errors(append(makeErrorList(err1), makeErrorList(err2)...)) -} - -// HandleErrorResponse returns error parsed from HTTP response for an -// unsuccessful HTTP response code (in the range 400 - 499 inclusive). An -// UnexpectedHTTPStatusError returned for response code outside of expected -// range. -func HandleErrorResponse(resp *http.Response) error { - if resp.StatusCode >= 400 && resp.StatusCode < 500 { - // Check for OAuth errors within the `WWW-Authenticate` header first - // See https://tools.ietf.org/html/rfc6750#section-3 - for _, c := range challenge.ResponseChallenges(resp) { - if c.Scheme == "bearer" { - var err errcode.Error - // codes defined at https://tools.ietf.org/html/rfc6750#section-3.1 - switch c.Parameters["error"] { - case "invalid_token": - err.Code = errcode.ErrorCodeUnauthorized - case "insufficient_scope": - err.Code = errcode.ErrorCodeDenied - default: - continue - } - if description := c.Parameters["error_description"]; description != "" { - err.Message = description - } else { - err.Message = err.Code.Message() - } - - return mergeErrors(err, parseHTTPErrorResponse(resp.StatusCode, resp.Body)) - } - } - err := parseHTTPErrorResponse(resp.StatusCode, resp.Body) - if uErr, ok := err.(*UnexpectedHTTPResponseError); ok && resp.StatusCode == 401 { - return errcode.ErrorCodeUnauthorized.WithDetail(uErr.Response) - } - return err - } - return &UnexpectedHTTPStatusError{Status: resp.Status} -} - -// SuccessStatus returns true if the argument is a successful HTTP response -// code (in the range 200 - 399 inclusive). -func SuccessStatus(status int) bool { - return status >= 200 && status <= 399 -} diff --git a/src/vendor/github.com/docker/distribution/registry/client/repository.go b/src/vendor/github.com/docker/distribution/registry/client/repository.go deleted file mode 100644 index 3e2ae66d3..000000000 --- a/src/vendor/github.com/docker/distribution/registry/client/repository.go +++ /dev/null @@ -1,872 +0,0 @@ -package client - -import ( - "bytes" - "context" - "encoding/json" - "errors" - "fmt" - "io" - "io/ioutil" - "net/http" - "net/url" - "strconv" - "strings" - "time" - - "github.com/docker/distribution" - "github.com/docker/distribution/reference" - v2 "github.com/docker/distribution/registry/api/v2" - "github.com/docker/distribution/registry/client/transport" - "github.com/docker/distribution/registry/storage/cache" - "github.com/docker/distribution/registry/storage/cache/memory" - "github.com/opencontainers/go-digest" -) - -// Registry provides an interface for calling Repositories, which returns a catalog of repositories. -type Registry interface { - Repositories(ctx context.Context, repos []string, last string) (n int, err error) -} - -// checkHTTPRedirect is a callback that can manipulate redirected HTTP -// requests. It is used to preserve Accept and Range headers. -func checkHTTPRedirect(req *http.Request, via []*http.Request) error { - if len(via) >= 10 { - return errors.New("stopped after 10 redirects") - } - - if len(via) > 0 { - for headerName, headerVals := range via[0].Header { - if headerName != "Accept" && headerName != "Range" { - continue - } - for _, val := range headerVals { - // Don't add to redirected request if redirected - // request already has a header with the same - // name and value. - hasValue := false - for _, existingVal := range req.Header[headerName] { - if existingVal == val { - hasValue = true - break - } - } - if !hasValue { - req.Header.Add(headerName, val) - } - } - } - } - - return nil -} - -// NewRegistry creates a registry namespace which can be used to get a listing of repositories -func NewRegistry(baseURL string, transport http.RoundTripper) (Registry, error) { - ub, err := v2.NewURLBuilderFromString(baseURL, false) - if err != nil { - return nil, err - } - - client := &http.Client{ - Transport: transport, - Timeout: 1 * time.Minute, - CheckRedirect: checkHTTPRedirect, - } - - return ®istry{ - client: client, - ub: ub, - }, nil -} - -type registry struct { - client *http.Client - ub *v2.URLBuilder -} - -// Repositories returns a lexigraphically sorted catalog given a base URL. The 'entries' slice will be filled up to the size -// of the slice, starting at the value provided in 'last'. The number of entries will be returned along with io.EOF if there -// are no more entries -func (r *registry) Repositories(ctx context.Context, entries []string, last string) (int, error) { - var numFilled int - var returnErr error - - values := buildCatalogValues(len(entries), last) - u, err := r.ub.BuildCatalogURL(values) - if err != nil { - return 0, err - } - - resp, err := r.client.Get(u) - if err != nil { - return 0, err - } - defer resp.Body.Close() - - if SuccessStatus(resp.StatusCode) { - var ctlg struct { - Repositories []string `json:"repositories"` - } - decoder := json.NewDecoder(resp.Body) - - if err := decoder.Decode(&ctlg); err != nil { - return 0, err - } - - for cnt := range ctlg.Repositories { - entries[cnt] = ctlg.Repositories[cnt] - } - numFilled = len(ctlg.Repositories) - - link := resp.Header.Get("Link") - if link == "" { - returnErr = io.EOF - } - } else { - return 0, HandleErrorResponse(resp) - } - - return numFilled, returnErr -} - -// NewRepository creates a new Repository for the given repository name and base URL. -func NewRepository(name reference.Named, baseURL string, transport http.RoundTripper) (distribution.Repository, error) { - ub, err := v2.NewURLBuilderFromString(baseURL, false) - if err != nil { - return nil, err - } - - client := &http.Client{ - Transport: transport, - CheckRedirect: checkHTTPRedirect, - // TODO(dmcgowan): create cookie jar - } - - return &repository{ - client: client, - ub: ub, - name: name, - }, nil -} - -type repository struct { - client *http.Client - ub *v2.URLBuilder - name reference.Named -} - -func (r *repository) Named() reference.Named { - return r.name -} - -func (r *repository) Blobs(ctx context.Context) distribution.BlobStore { - statter := &blobStatter{ - name: r.name, - ub: r.ub, - client: r.client, - } - return &blobs{ - name: r.name, - ub: r.ub, - client: r.client, - statter: cache.NewCachedBlobStatter(memory.NewInMemoryBlobDescriptorCacheProvider(), statter), - } -} - -func (r *repository) Manifests(ctx context.Context, options ...distribution.ManifestServiceOption) (distribution.ManifestService, error) { - // todo(richardscothern): options should be sent over the wire - return &manifests{ - name: r.name, - ub: r.ub, - client: r.client, - etags: make(map[string]string), - }, nil -} - -func (r *repository) Tags(ctx context.Context) distribution.TagService { - return &tags{ - client: r.client, - ub: r.ub, - name: r.Named(), - } -} - -// tags implements remote tagging operations. -type tags struct { - client *http.Client - ub *v2.URLBuilder - name reference.Named -} - -// All returns all tags -func (t *tags) All(ctx context.Context) ([]string, error) { - var tags []string - - listURLStr, err := t.ub.BuildTagsURL(t.name) - if err != nil { - return tags, err - } - - listURL, err := url.Parse(listURLStr) - if err != nil { - return tags, err - } - - for { - resp, err := t.client.Get(listURL.String()) - if err != nil { - return tags, err - } - defer resp.Body.Close() - - if SuccessStatus(resp.StatusCode) { - b, err := ioutil.ReadAll(resp.Body) - if err != nil { - return tags, err - } - - tagsResponse := struct { - Tags []string `json:"tags"` - }{} - if err := json.Unmarshal(b, &tagsResponse); err != nil { - return tags, err - } - tags = append(tags, tagsResponse.Tags...) - if link := resp.Header.Get("Link"); link != "" { - linkURLStr := strings.Trim(strings.Split(link, ";")[0], "<>") - linkURL, err := url.Parse(linkURLStr) - if err != nil { - return tags, err - } - - listURL = listURL.ResolveReference(linkURL) - } else { - return tags, nil - } - } else { - return tags, HandleErrorResponse(resp) - } - } -} - -func descriptorFromResponse(response *http.Response) (distribution.Descriptor, error) { - desc := distribution.Descriptor{} - headers := response.Header - - ctHeader := headers.Get("Content-Type") - if ctHeader == "" { - return distribution.Descriptor{}, errors.New("missing or empty Content-Type header") - } - desc.MediaType = ctHeader - - digestHeader := headers.Get("Docker-Content-Digest") - if digestHeader == "" { - bytes, err := ioutil.ReadAll(response.Body) - if err != nil { - return distribution.Descriptor{}, err - } - _, desc, err := distribution.UnmarshalManifest(ctHeader, bytes) - if err != nil { - return distribution.Descriptor{}, err - } - return desc, nil - } - - dgst, err := digest.Parse(digestHeader) - if err != nil { - return distribution.Descriptor{}, err - } - desc.Digest = dgst - - lengthHeader := headers.Get("Content-Length") - if lengthHeader == "" { - return distribution.Descriptor{}, errors.New("missing or empty Content-Length header") - } - length, err := strconv.ParseInt(lengthHeader, 10, 64) - if err != nil { - return distribution.Descriptor{}, err - } - desc.Size = length - - return desc, nil - -} - -// Get issues a HEAD request for a Manifest against its named endpoint in order -// to construct a descriptor for the tag. If the registry doesn't support HEADing -// a manifest, fallback to GET. -func (t *tags) Get(ctx context.Context, tag string) (distribution.Descriptor, error) { - ref, err := reference.WithTag(t.name, tag) - if err != nil { - return distribution.Descriptor{}, err - } - u, err := t.ub.BuildManifestURL(ref) - if err != nil { - return distribution.Descriptor{}, err - } - - newRequest := func(method string) (*http.Response, error) { - req, err := http.NewRequest(method, u, nil) - if err != nil { - return nil, err - } - - for _, t := range distribution.ManifestMediaTypes() { - req.Header.Add("Accept", t) - } - resp, err := t.client.Do(req) - return resp, err - } - - resp, err := newRequest("HEAD") - if err != nil { - return distribution.Descriptor{}, err - } - defer resp.Body.Close() - - switch { - case resp.StatusCode >= 200 && resp.StatusCode < 400 && len(resp.Header.Get("Docker-Content-Digest")) > 0: - // if the response is a success AND a Docker-Content-Digest can be retrieved from the headers - return descriptorFromResponse(resp) - default: - // if the response is an error - there will be no body to decode. - // Issue a GET request: - // - for data from a server that does not handle HEAD - // - to get error details in case of a failure - resp, err = newRequest("GET") - if err != nil { - return distribution.Descriptor{}, err - } - defer resp.Body.Close() - - if resp.StatusCode >= 200 && resp.StatusCode < 400 { - return descriptorFromResponse(resp) - } - return distribution.Descriptor{}, HandleErrorResponse(resp) - } -} - -func (t *tags) Lookup(ctx context.Context, digest distribution.Descriptor) ([]string, error) { - panic("not implemented") -} - -func (t *tags) Tag(ctx context.Context, tag string, desc distribution.Descriptor) error { - panic("not implemented") -} - -func (t *tags) Untag(ctx context.Context, tag string) error { - panic("not implemented") -} - -type manifests struct { - name reference.Named - ub *v2.URLBuilder - client *http.Client - etags map[string]string -} - -func (ms *manifests) Exists(ctx context.Context, dgst digest.Digest) (bool, error) { - ref, err := reference.WithDigest(ms.name, dgst) - if err != nil { - return false, err - } - u, err := ms.ub.BuildManifestURL(ref) - if err != nil { - return false, err - } - - resp, err := ms.client.Head(u) - if err != nil { - return false, err - } - - if SuccessStatus(resp.StatusCode) { - return true, nil - } else if resp.StatusCode == http.StatusNotFound { - return false, nil - } - return false, HandleErrorResponse(resp) -} - -// AddEtagToTag allows a client to supply an eTag to Get which will be -// used for a conditional HTTP request. If the eTag matches, a nil manifest -// and ErrManifestNotModified error will be returned. etag is automatically -// quoted when added to this map. -func AddEtagToTag(tag, etag string) distribution.ManifestServiceOption { - return etagOption{tag, etag} -} - -type etagOption struct{ tag, etag string } - -func (o etagOption) Apply(ms distribution.ManifestService) error { - if ms, ok := ms.(*manifests); ok { - ms.etags[o.tag] = fmt.Sprintf(`"%s"`, o.etag) - return nil - } - return fmt.Errorf("etag options is a client-only option") -} - -// ReturnContentDigest allows a client to set a the content digest on -// a successful request from the 'Docker-Content-Digest' header. This -// returned digest is represents the digest which the registry uses -// to refer to the content and can be used to delete the content. -func ReturnContentDigest(dgst *digest.Digest) distribution.ManifestServiceOption { - return contentDigestOption{dgst} -} - -type contentDigestOption struct{ digest *digest.Digest } - -func (o contentDigestOption) Apply(ms distribution.ManifestService) error { - return nil -} - -func (ms *manifests) Get(ctx context.Context, dgst digest.Digest, options ...distribution.ManifestServiceOption) (distribution.Manifest, error) { - var ( - digestOrTag string - ref reference.Named - err error - contentDgst *digest.Digest - mediaTypes []string - ) - - for _, option := range options { - switch opt := option.(type) { - case distribution.WithTagOption: - digestOrTag = opt.Tag - ref, err = reference.WithTag(ms.name, opt.Tag) - if err != nil { - return nil, err - } - case contentDigestOption: - contentDgst = opt.digest - case distribution.WithManifestMediaTypesOption: - mediaTypes = opt.MediaTypes - default: - err := option.Apply(ms) - if err != nil { - return nil, err - } - } - } - - if digestOrTag == "" { - digestOrTag = dgst.String() - ref, err = reference.WithDigest(ms.name, dgst) - if err != nil { - return nil, err - } - } - - if len(mediaTypes) == 0 { - mediaTypes = distribution.ManifestMediaTypes() - } - - u, err := ms.ub.BuildManifestURL(ref) - if err != nil { - return nil, err - } - - req, err := http.NewRequest("GET", u, nil) - if err != nil { - return nil, err - } - - for _, t := range mediaTypes { - req.Header.Add("Accept", t) - } - - if _, ok := ms.etags[digestOrTag]; ok { - req.Header.Set("If-None-Match", ms.etags[digestOrTag]) - } - - resp, err := ms.client.Do(req) - if err != nil { - return nil, err - } - defer resp.Body.Close() - if resp.StatusCode == http.StatusNotModified { - return nil, distribution.ErrManifestNotModified - } else if SuccessStatus(resp.StatusCode) { - if contentDgst != nil { - dgst, err := digest.Parse(resp.Header.Get("Docker-Content-Digest")) - if err == nil { - *contentDgst = dgst - } - } - mt := resp.Header.Get("Content-Type") - body, err := ioutil.ReadAll(resp.Body) - - if err != nil { - return nil, err - } - m, _, err := distribution.UnmarshalManifest(mt, body) - if err != nil { - return nil, err - } - return m, nil - } - return nil, HandleErrorResponse(resp) -} - -// Put puts a manifest. A tag can be specified using an options parameter which uses some shared state to hold the -// tag name in order to build the correct upload URL. -func (ms *manifests) Put(ctx context.Context, m distribution.Manifest, options ...distribution.ManifestServiceOption) (digest.Digest, error) { - ref := ms.name - var tagged bool - - for _, option := range options { - if opt, ok := option.(distribution.WithTagOption); ok { - var err error - ref, err = reference.WithTag(ref, opt.Tag) - if err != nil { - return "", err - } - tagged = true - } else { - err := option.Apply(ms) - if err != nil { - return "", err - } - } - } - mediaType, p, err := m.Payload() - if err != nil { - return "", err - } - - if !tagged { - // generate a canonical digest and Put by digest - _, d, err := distribution.UnmarshalManifest(mediaType, p) - if err != nil { - return "", err - } - ref, err = reference.WithDigest(ref, d.Digest) - if err != nil { - return "", err - } - } - - manifestURL, err := ms.ub.BuildManifestURL(ref) - if err != nil { - return "", err - } - - putRequest, err := http.NewRequest("PUT", manifestURL, bytes.NewReader(p)) - if err != nil { - return "", err - } - - putRequest.Header.Set("Content-Type", mediaType) - - resp, err := ms.client.Do(putRequest) - if err != nil { - return "", err - } - defer resp.Body.Close() - - if SuccessStatus(resp.StatusCode) { - dgstHeader := resp.Header.Get("Docker-Content-Digest") - dgst, err := digest.Parse(dgstHeader) - if err != nil { - return "", err - } - - return dgst, nil - } - - return "", HandleErrorResponse(resp) -} - -func (ms *manifests) Delete(ctx context.Context, dgst digest.Digest) error { - ref, err := reference.WithDigest(ms.name, dgst) - if err != nil { - return err - } - u, err := ms.ub.BuildManifestURL(ref) - if err != nil { - return err - } - req, err := http.NewRequest("DELETE", u, nil) - if err != nil { - return err - } - - resp, err := ms.client.Do(req) - if err != nil { - return err - } - defer resp.Body.Close() - - if SuccessStatus(resp.StatusCode) { - return nil - } - return HandleErrorResponse(resp) -} - -// todo(richardscothern): Restore interface and implementation with merge of #1050 -/*func (ms *manifests) Enumerate(ctx context.Context, manifests []distribution.Manifest, last distribution.Manifest) (n int, err error) { - panic("not supported") -}*/ - -type blobs struct { - name reference.Named - ub *v2.URLBuilder - client *http.Client - - statter distribution.BlobDescriptorService - distribution.BlobDeleter -} - -func sanitizeLocation(location, base string) (string, error) { - baseURL, err := url.Parse(base) - if err != nil { - return "", err - } - - locationURL, err := url.Parse(location) - if err != nil { - return "", err - } - - return baseURL.ResolveReference(locationURL).String(), nil -} - -func (bs *blobs) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) { - return bs.statter.Stat(ctx, dgst) - -} - -func (bs *blobs) Get(ctx context.Context, dgst digest.Digest) ([]byte, error) { - reader, err := bs.Open(ctx, dgst) - if err != nil { - return nil, err - } - defer reader.Close() - - return ioutil.ReadAll(reader) -} - -func (bs *blobs) Open(ctx context.Context, dgst digest.Digest) (distribution.ReadSeekCloser, error) { - ref, err := reference.WithDigest(bs.name, dgst) - if err != nil { - return nil, err - } - blobURL, err := bs.ub.BuildBlobURL(ref) - if err != nil { - return nil, err - } - - return transport.NewHTTPReadSeeker(bs.client, blobURL, - func(resp *http.Response) error { - if resp.StatusCode == http.StatusNotFound { - return distribution.ErrBlobUnknown - } - return HandleErrorResponse(resp) - }), nil -} - -func (bs *blobs) ServeBlob(ctx context.Context, w http.ResponseWriter, r *http.Request, dgst digest.Digest) error { - panic("not implemented") -} - -func (bs *blobs) Put(ctx context.Context, mediaType string, p []byte) (distribution.Descriptor, error) { - writer, err := bs.Create(ctx) - if err != nil { - return distribution.Descriptor{}, err - } - dgstr := digest.Canonical.Digester() - n, err := io.Copy(writer, io.TeeReader(bytes.NewReader(p), dgstr.Hash())) - if err != nil { - return distribution.Descriptor{}, err - } - if n < int64(len(p)) { - return distribution.Descriptor{}, fmt.Errorf("short copy: wrote %d of %d", n, len(p)) - } - - desc := distribution.Descriptor{ - MediaType: mediaType, - Size: int64(len(p)), - Digest: dgstr.Digest(), - } - - return writer.Commit(ctx, desc) -} - -type optionFunc func(interface{}) error - -func (f optionFunc) Apply(v interface{}) error { - return f(v) -} - -// WithMountFrom returns a BlobCreateOption which designates that the blob should be -// mounted from the given canonical reference. -func WithMountFrom(ref reference.Canonical) distribution.BlobCreateOption { - return optionFunc(func(v interface{}) error { - opts, ok := v.(*distribution.CreateOptions) - if !ok { - return fmt.Errorf("unexpected options type: %T", v) - } - - opts.Mount.ShouldMount = true - opts.Mount.From = ref - - return nil - }) -} - -func (bs *blobs) Create(ctx context.Context, options ...distribution.BlobCreateOption) (distribution.BlobWriter, error) { - var opts distribution.CreateOptions - - for _, option := range options { - err := option.Apply(&opts) - if err != nil { - return nil, err - } - } - - var values []url.Values - - if opts.Mount.ShouldMount { - values = append(values, url.Values{"from": {opts.Mount.From.Name()}, "mount": {opts.Mount.From.Digest().String()}}) - } - - u, err := bs.ub.BuildBlobUploadURL(bs.name, values...) - if err != nil { - return nil, err - } - - req, err := http.NewRequest("POST", u, nil) - if err != nil { - return nil, err - } - - resp, err := bs.client.Do(req) - if err != nil { - return nil, err - } - defer resp.Body.Close() - - switch resp.StatusCode { - case http.StatusCreated: - desc, err := bs.statter.Stat(ctx, opts.Mount.From.Digest()) - if err != nil { - return nil, err - } - return nil, distribution.ErrBlobMounted{From: opts.Mount.From, Descriptor: desc} - case http.StatusAccepted: - // TODO(dmcgowan): Check for invalid UUID - uuid := resp.Header.Get("Docker-Upload-UUID") - location, err := sanitizeLocation(resp.Header.Get("Location"), u) - if err != nil { - return nil, err - } - - return &httpBlobUpload{ - statter: bs.statter, - client: bs.client, - uuid: uuid, - startedAt: time.Now(), - location: location, - }, nil - default: - return nil, HandleErrorResponse(resp) - } -} - -func (bs *blobs) Resume(ctx context.Context, id string) (distribution.BlobWriter, error) { - panic("not implemented") -} - -func (bs *blobs) Delete(ctx context.Context, dgst digest.Digest) error { - return bs.statter.Clear(ctx, dgst) -} - -type blobStatter struct { - name reference.Named - ub *v2.URLBuilder - client *http.Client -} - -func (bs *blobStatter) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) { - ref, err := reference.WithDigest(bs.name, dgst) - if err != nil { - return distribution.Descriptor{}, err - } - u, err := bs.ub.BuildBlobURL(ref) - if err != nil { - return distribution.Descriptor{}, err - } - - resp, err := bs.client.Head(u) - if err != nil { - return distribution.Descriptor{}, err - } - defer resp.Body.Close() - - if SuccessStatus(resp.StatusCode) { - lengthHeader := resp.Header.Get("Content-Length") - if lengthHeader == "" { - return distribution.Descriptor{}, fmt.Errorf("missing content-length header for request: %s", u) - } - - length, err := strconv.ParseInt(lengthHeader, 10, 64) - if err != nil { - return distribution.Descriptor{}, fmt.Errorf("error parsing content-length: %v", err) - } - - return distribution.Descriptor{ - MediaType: resp.Header.Get("Content-Type"), - Size: length, - Digest: dgst, - }, nil - } else if resp.StatusCode == http.StatusNotFound { - return distribution.Descriptor{}, distribution.ErrBlobUnknown - } - return distribution.Descriptor{}, HandleErrorResponse(resp) -} - -func buildCatalogValues(maxEntries int, last string) url.Values { - values := url.Values{} - - if maxEntries > 0 { - values.Add("n", strconv.Itoa(maxEntries)) - } - - if last != "" { - values.Add("last", last) - } - - return values -} - -func (bs *blobStatter) Clear(ctx context.Context, dgst digest.Digest) error { - ref, err := reference.WithDigest(bs.name, dgst) - if err != nil { - return err - } - blobURL, err := bs.ub.BuildBlobURL(ref) - if err != nil { - return err - } - - req, err := http.NewRequest("DELETE", blobURL, nil) - if err != nil { - return err - } - - resp, err := bs.client.Do(req) - if err != nil { - return err - } - defer resp.Body.Close() - - if SuccessStatus(resp.StatusCode) { - return nil - } - return HandleErrorResponse(resp) -} - -func (bs *blobStatter) SetDescriptor(ctx context.Context, dgst digest.Digest, desc distribution.Descriptor) error { - return nil -} diff --git a/src/vendor/github.com/docker/distribution/registry/storage/cache/memory/memory.go b/src/vendor/github.com/docker/distribution/registry/storage/cache/memory/memory.go deleted file mode 100644 index 42d94d9bd..000000000 --- a/src/vendor/github.com/docker/distribution/registry/storage/cache/memory/memory.go +++ /dev/null @@ -1,179 +0,0 @@ -package memory - -import ( - "context" - "sync" - - "github.com/docker/distribution" - "github.com/docker/distribution/reference" - "github.com/docker/distribution/registry/storage/cache" - "github.com/opencontainers/go-digest" -) - -type inMemoryBlobDescriptorCacheProvider struct { - global *mapBlobDescriptorCache - repositories map[string]*mapBlobDescriptorCache - mu sync.RWMutex -} - -// NewInMemoryBlobDescriptorCacheProvider returns a new mapped-based cache for -// storing blob descriptor data. -func NewInMemoryBlobDescriptorCacheProvider() cache.BlobDescriptorCacheProvider { - return &inMemoryBlobDescriptorCacheProvider{ - global: newMapBlobDescriptorCache(), - repositories: make(map[string]*mapBlobDescriptorCache), - } -} - -func (imbdcp *inMemoryBlobDescriptorCacheProvider) RepositoryScoped(repo string) (distribution.BlobDescriptorService, error) { - if _, err := reference.ParseNormalizedNamed(repo); err != nil { - return nil, err - } - - imbdcp.mu.RLock() - defer imbdcp.mu.RUnlock() - - return &repositoryScopedInMemoryBlobDescriptorCache{ - repo: repo, - parent: imbdcp, - repository: imbdcp.repositories[repo], - }, nil -} - -func (imbdcp *inMemoryBlobDescriptorCacheProvider) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) { - return imbdcp.global.Stat(ctx, dgst) -} - -func (imbdcp *inMemoryBlobDescriptorCacheProvider) Clear(ctx context.Context, dgst digest.Digest) error { - return imbdcp.global.Clear(ctx, dgst) -} - -func (imbdcp *inMemoryBlobDescriptorCacheProvider) SetDescriptor(ctx context.Context, dgst digest.Digest, desc distribution.Descriptor) error { - _, err := imbdcp.Stat(ctx, dgst) - if err == distribution.ErrBlobUnknown { - - if dgst.Algorithm() != desc.Digest.Algorithm() && dgst != desc.Digest { - // if the digests differ, set the other canonical mapping - if err := imbdcp.global.SetDescriptor(ctx, desc.Digest, desc); err != nil { - return err - } - } - - // unknown, just set it - return imbdcp.global.SetDescriptor(ctx, dgst, desc) - } - - // we already know it, do nothing - return err -} - -// repositoryScopedInMemoryBlobDescriptorCache provides the request scoped -// repository cache. Instances are not thread-safe but the delegated -// operations are. -type repositoryScopedInMemoryBlobDescriptorCache struct { - repo string - parent *inMemoryBlobDescriptorCacheProvider // allows lazy allocation of repo's map - repository *mapBlobDescriptorCache -} - -func (rsimbdcp *repositoryScopedInMemoryBlobDescriptorCache) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) { - rsimbdcp.parent.mu.Lock() - repo := rsimbdcp.repository - rsimbdcp.parent.mu.Unlock() - - if repo == nil { - return distribution.Descriptor{}, distribution.ErrBlobUnknown - } - - return repo.Stat(ctx, dgst) -} - -func (rsimbdcp *repositoryScopedInMemoryBlobDescriptorCache) Clear(ctx context.Context, dgst digest.Digest) error { - rsimbdcp.parent.mu.Lock() - repo := rsimbdcp.repository - rsimbdcp.parent.mu.Unlock() - - if repo == nil { - return distribution.ErrBlobUnknown - } - - return repo.Clear(ctx, dgst) -} - -func (rsimbdcp *repositoryScopedInMemoryBlobDescriptorCache) SetDescriptor(ctx context.Context, dgst digest.Digest, desc distribution.Descriptor) error { - rsimbdcp.parent.mu.Lock() - repo := rsimbdcp.repository - if repo == nil { - // allocate map since we are setting it now. - var ok bool - // have to read back value since we may have allocated elsewhere. - repo, ok = rsimbdcp.parent.repositories[rsimbdcp.repo] - if !ok { - repo = newMapBlobDescriptorCache() - rsimbdcp.parent.repositories[rsimbdcp.repo] = repo - } - rsimbdcp.repository = repo - } - rsimbdcp.parent.mu.Unlock() - - if err := repo.SetDescriptor(ctx, dgst, desc); err != nil { - return err - } - - return rsimbdcp.parent.SetDescriptor(ctx, dgst, desc) -} - -// mapBlobDescriptorCache provides a simple map-based implementation of the -// descriptor cache. -type mapBlobDescriptorCache struct { - descriptors map[digest.Digest]distribution.Descriptor - mu sync.RWMutex -} - -var _ distribution.BlobDescriptorService = &mapBlobDescriptorCache{} - -func newMapBlobDescriptorCache() *mapBlobDescriptorCache { - return &mapBlobDescriptorCache{ - descriptors: make(map[digest.Digest]distribution.Descriptor), - } -} - -func (mbdc *mapBlobDescriptorCache) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) { - if err := dgst.Validate(); err != nil { - return distribution.Descriptor{}, err - } - - mbdc.mu.RLock() - defer mbdc.mu.RUnlock() - - desc, ok := mbdc.descriptors[dgst] - if !ok { - return distribution.Descriptor{}, distribution.ErrBlobUnknown - } - - return desc, nil -} - -func (mbdc *mapBlobDescriptorCache) Clear(ctx context.Context, dgst digest.Digest) error { - mbdc.mu.Lock() - defer mbdc.mu.Unlock() - - delete(mbdc.descriptors, dgst) - return nil -} - -func (mbdc *mapBlobDescriptorCache) SetDescriptor(ctx context.Context, dgst digest.Digest, desc distribution.Descriptor) error { - if err := dgst.Validate(); err != nil { - return err - } - - if err := cache.ValidateDescriptor(desc); err != nil { - return err - } - - mbdc.mu.Lock() - defer mbdc.mu.Unlock() - - mbdc.descriptors[dgst] = desc - return nil -} diff --git a/src/vendor/github.com/docker/docker-credential-helpers/LICENSE b/src/vendor/github.com/docker/docker-credential-helpers/LICENSE deleted file mode 100644 index 1ea555e2a..000000000 --- a/src/vendor/github.com/docker/docker-credential-helpers/LICENSE +++ /dev/null @@ -1,20 +0,0 @@ -Copyright (c) 2016 David Calavera - -Permission is hereby granted, free of charge, to any person obtaining -a copy of this software and associated documentation files (the -"Software"), to deal in the Software without restriction, including -without limitation the rights to use, copy, modify, merge, publish, -distribute, sublicense, and/or sell copies of the Software, and to -permit persons to whom the Software is furnished to do so, subject to -the following conditions: - -The above copyright notice and this permission notice shall be -included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. -IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, -TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE -SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/src/vendor/github.com/docker/docker-credential-helpers/client/client.go b/src/vendor/github.com/docker/docker-credential-helpers/client/client.go deleted file mode 100644 index d1d0434cb..000000000 --- a/src/vendor/github.com/docker/docker-credential-helpers/client/client.go +++ /dev/null @@ -1,121 +0,0 @@ -package client - -import ( - "bytes" - "encoding/json" - "fmt" - "strings" - - "github.com/docker/docker-credential-helpers/credentials" -) - -// isValidCredsMessage checks if 'msg' contains invalid credentials error message. -// It returns whether the logs are free of invalid credentials errors and the error if it isn't. -// error values can be errCredentialsMissingServerURL or errCredentialsMissingUsername. -func isValidCredsMessage(msg string) error { - if credentials.IsCredentialsMissingServerURLMessage(msg) { - return credentials.NewErrCredentialsMissingServerURL() - } - - if credentials.IsCredentialsMissingUsernameMessage(msg) { - return credentials.NewErrCredentialsMissingUsername() - } - - return nil -} - -// Store uses an external program to save credentials. -func Store(program ProgramFunc, creds *credentials.Credentials) error { - cmd := program("store") - - buffer := new(bytes.Buffer) - if err := json.NewEncoder(buffer).Encode(creds); err != nil { - return err - } - cmd.Input(buffer) - - out, err := cmd.Output() - if err != nil { - t := strings.TrimSpace(string(out)) - - if isValidErr := isValidCredsMessage(t); isValidErr != nil { - err = isValidErr - } - - return fmt.Errorf("error storing credentials - err: %v, out: `%s`", err, t) - } - - return nil -} - -// Get executes an external program to get the credentials from a native store. -func Get(program ProgramFunc, serverURL string) (*credentials.Credentials, error) { - cmd := program("get") - cmd.Input(strings.NewReader(serverURL)) - - out, err := cmd.Output() - if err != nil { - t := strings.TrimSpace(string(out)) - - if credentials.IsErrCredentialsNotFoundMessage(t) { - return nil, credentials.NewErrCredentialsNotFound() - } - - if isValidErr := isValidCredsMessage(t); isValidErr != nil { - err = isValidErr - } - - return nil, fmt.Errorf("error getting credentials - err: %v, out: `%s`", err, t) - } - - resp := &credentials.Credentials{ - ServerURL: serverURL, - } - - if err := json.NewDecoder(bytes.NewReader(out)).Decode(resp); err != nil { - return nil, err - } - - return resp, nil -} - -// Erase executes a program to remove the server credentials from the native store. -func Erase(program ProgramFunc, serverURL string) error { - cmd := program("erase") - cmd.Input(strings.NewReader(serverURL)) - out, err := cmd.Output() - if err != nil { - t := strings.TrimSpace(string(out)) - - if isValidErr := isValidCredsMessage(t); isValidErr != nil { - err = isValidErr - } - - return fmt.Errorf("error erasing credentials - err: %v, out: `%s`", err, t) - } - - return nil -} - -// List executes a program to list server credentials in the native store. -func List(program ProgramFunc) (map[string]string, error) { - cmd := program("list") - cmd.Input(strings.NewReader("unused")) - out, err := cmd.Output() - if err != nil { - t := strings.TrimSpace(string(out)) - - if isValidErr := isValidCredsMessage(t); isValidErr != nil { - err = isValidErr - } - - return nil, fmt.Errorf("error listing credentials - err: %v, out: `%s`", err, t) - } - - var resp map[string]string - if err = json.NewDecoder(bytes.NewReader(out)).Decode(&resp); err != nil { - return nil, err - } - - return resp, nil -} diff --git a/src/vendor/github.com/docker/docker-credential-helpers/client/command.go b/src/vendor/github.com/docker/docker-credential-helpers/client/command.go deleted file mode 100644 index 0183c0639..000000000 --- a/src/vendor/github.com/docker/docker-credential-helpers/client/command.go +++ /dev/null @@ -1,57 +0,0 @@ -package client - -import ( - "fmt" - "io" - "os" - - exec "golang.org/x/sys/execabs" -) - -// Program is an interface to execute external programs. -type Program interface { - Output() ([]byte, error) - Input(in io.Reader) -} - -// ProgramFunc is a type of function that initializes programs based on arguments. -type ProgramFunc func(args ...string) Program - -// NewShellProgramFunc creates programs that are executed in a Shell. -func NewShellProgramFunc(name string) ProgramFunc { - return NewShellProgramFuncWithEnv(name, nil) -} - -// NewShellProgramFuncWithEnv creates programs that are executed in a Shell with environment variables -func NewShellProgramFuncWithEnv(name string, env *map[string]string) ProgramFunc { - return func(args ...string) Program { - return &Shell{cmd: createProgramCmdRedirectErr(name, args, env)} - } -} - -func createProgramCmdRedirectErr(commandName string, args []string, env *map[string]string) *exec.Cmd { - programCmd := exec.Command(commandName, args...) - programCmd.Env = os.Environ() - if env != nil { - for k, v := range *env { - programCmd.Env = append(programCmd.Env, fmt.Sprintf("%s=%s", k, v)) - } - } - programCmd.Stderr = os.Stderr - return programCmd -} - -// Shell invokes shell commands to talk with a remote credentials helper. -type Shell struct { - cmd *exec.Cmd -} - -// Output returns responses from the remote credentials helper. -func (s *Shell) Output() ([]byte, error) { - return s.cmd.Output() -} - -// Input sets the input to send to a remote credentials helper. -func (s *Shell) Input(in io.Reader) { - s.cmd.Stdin = in -} diff --git a/src/vendor/github.com/docker/docker-credential-helpers/credentials/credentials.go b/src/vendor/github.com/docker/docker-credential-helpers/credentials/credentials.go deleted file mode 100644 index da8b594e7..000000000 --- a/src/vendor/github.com/docker/docker-credential-helpers/credentials/credentials.go +++ /dev/null @@ -1,186 +0,0 @@ -package credentials - -import ( - "bufio" - "bytes" - "encoding/json" - "fmt" - "io" - "os" - "strings" -) - -// Credentials holds the information shared between docker and the credentials store. -type Credentials struct { - ServerURL string - Username string - Secret string -} - -// isValid checks the integrity of Credentials object such that no credentials lack -// a server URL or a username. -// It returns whether the credentials are valid and the error if it isn't. -// error values can be errCredentialsMissingServerURL or errCredentialsMissingUsername -func (c *Credentials) isValid() (bool, error) { - if len(c.ServerURL) == 0 { - return false, NewErrCredentialsMissingServerURL() - } - - if len(c.Username) == 0 { - return false, NewErrCredentialsMissingUsername() - } - - return true, nil -} - -// CredsLabel holds the way Docker credentials should be labeled as such in credentials stores that allow labelling. -// That label allows to filter out non-Docker credentials too at lookup/search in macOS keychain, -// Windows credentials manager and Linux libsecret. Default value is "Docker Credentials" -var CredsLabel = "Docker Credentials" - -// SetCredsLabel is a simple setter for CredsLabel -func SetCredsLabel(label string) { - CredsLabel = label -} - -// Serve initializes the credentials helper and parses the action argument. -// This function is designed to be called from a command line interface. -// It uses os.Args[1] as the key for the action. -// It uses os.Stdin as input and os.Stdout as output. -// This function terminates the program with os.Exit(1) if there is an error. -func Serve(helper Helper) { - var err error - if len(os.Args) != 2 { - err = fmt.Errorf("Usage: %s ", os.Args[0]) - } - - if err == nil { - err = HandleCommand(helper, os.Args[1], os.Stdin, os.Stdout) - } - - if err != nil { - fmt.Fprintf(os.Stdout, "%v\n", err) - os.Exit(1) - } -} - -// HandleCommand uses a helper and a key to run a credential action. -func HandleCommand(helper Helper, key string, in io.Reader, out io.Writer) error { - switch key { - case "store": - return Store(helper, in) - case "get": - return Get(helper, in, out) - case "erase": - return Erase(helper, in) - case "list": - return List(helper, out) - case "version": - return PrintVersion(out) - } - return fmt.Errorf("Unknown credential action `%s`", key) -} - -// Store uses a helper and an input reader to save credentials. -// The reader must contain the JSON serialization of a Credentials struct. -func Store(helper Helper, reader io.Reader) error { - scanner := bufio.NewScanner(reader) - - buffer := new(bytes.Buffer) - for scanner.Scan() { - buffer.Write(scanner.Bytes()) - } - - if err := scanner.Err(); err != nil && err != io.EOF { - return err - } - - var creds Credentials - if err := json.NewDecoder(buffer).Decode(&creds); err != nil { - return err - } - - if ok, err := creds.isValid(); !ok { - return err - } - - return helper.Add(&creds) -} - -// Get retrieves the credentials for a given server url. -// The reader must contain the server URL to search. -// The writer is used to write the JSON serialization of the credentials. -func Get(helper Helper, reader io.Reader, writer io.Writer) error { - scanner := bufio.NewScanner(reader) - - buffer := new(bytes.Buffer) - for scanner.Scan() { - buffer.Write(scanner.Bytes()) - } - - if err := scanner.Err(); err != nil && err != io.EOF { - return err - } - - serverURL := strings.TrimSpace(buffer.String()) - if len(serverURL) == 0 { - return NewErrCredentialsMissingServerURL() - } - - username, secret, err := helper.Get(serverURL) - if err != nil { - return err - } - - resp := Credentials{ - ServerURL: serverURL, - Username: username, - Secret: secret, - } - - buffer.Reset() - if err := json.NewEncoder(buffer).Encode(resp); err != nil { - return err - } - - fmt.Fprint(writer, buffer.String()) - return nil -} - -// Erase removes credentials from the store. -// The reader must contain the server URL to remove. -func Erase(helper Helper, reader io.Reader) error { - scanner := bufio.NewScanner(reader) - - buffer := new(bytes.Buffer) - for scanner.Scan() { - buffer.Write(scanner.Bytes()) - } - - if err := scanner.Err(); err != nil && err != io.EOF { - return err - } - - serverURL := strings.TrimSpace(buffer.String()) - if len(serverURL) == 0 { - return NewErrCredentialsMissingServerURL() - } - - return helper.Delete(serverURL) -} - -//List returns all the serverURLs of keys in -//the OS store as a list of strings -func List(helper Helper, writer io.Writer) error { - accts, err := helper.List() - if err != nil { - return err - } - return json.NewEncoder(writer).Encode(accts) -} - -//PrintVersion outputs the current version. -func PrintVersion(writer io.Writer) error { - fmt.Fprintln(writer, Version) - return nil -} diff --git a/src/vendor/github.com/docker/docker-credential-helpers/credentials/error.go b/src/vendor/github.com/docker/docker-credential-helpers/credentials/error.go deleted file mode 100644 index fe6a5aef4..000000000 --- a/src/vendor/github.com/docker/docker-credential-helpers/credentials/error.go +++ /dev/null @@ -1,102 +0,0 @@ -package credentials - -const ( - // ErrCredentialsNotFound standardizes the not found error, so every helper returns - // the same message and docker can handle it properly. - errCredentialsNotFoundMessage = "credentials not found in native keychain" - - // ErrCredentialsMissingServerURL and ErrCredentialsMissingUsername standardize - // invalid credentials or credentials management operations - errCredentialsMissingServerURLMessage = "no credentials server URL" - errCredentialsMissingUsernameMessage = "no credentials username" -) - -// errCredentialsNotFound represents an error -// raised when credentials are not in the store. -type errCredentialsNotFound struct{} - -// Error returns the standard error message -// for when the credentials are not in the store. -func (errCredentialsNotFound) Error() string { - return errCredentialsNotFoundMessage -} - -// NewErrCredentialsNotFound creates a new error -// for when the credentials are not in the store. -func NewErrCredentialsNotFound() error { - return errCredentialsNotFound{} -} - -// IsErrCredentialsNotFound returns true if the error -// was caused by not having a set of credentials in a store. -func IsErrCredentialsNotFound(err error) bool { - _, ok := err.(errCredentialsNotFound) - return ok -} - -// IsErrCredentialsNotFoundMessage returns true if the error -// was caused by not having a set of credentials in a store. -// -// This function helps to check messages returned by an -// external program via its standard output. -func IsErrCredentialsNotFoundMessage(err string) bool { - return err == errCredentialsNotFoundMessage -} - -// errCredentialsMissingServerURL represents an error raised -// when the credentials object has no server URL or when no -// server URL is provided to a credentials operation requiring -// one. -type errCredentialsMissingServerURL struct{} - -func (errCredentialsMissingServerURL) Error() string { - return errCredentialsMissingServerURLMessage -} - -// errCredentialsMissingUsername represents an error raised -// when the credentials object has no username or when no -// username is provided to a credentials operation requiring -// one. -type errCredentialsMissingUsername struct{} - -func (errCredentialsMissingUsername) Error() string { - return errCredentialsMissingUsernameMessage -} - -// NewErrCredentialsMissingServerURL creates a new error for -// errCredentialsMissingServerURL. -func NewErrCredentialsMissingServerURL() error { - return errCredentialsMissingServerURL{} -} - -// NewErrCredentialsMissingUsername creates a new error for -// errCredentialsMissingUsername. -func NewErrCredentialsMissingUsername() error { - return errCredentialsMissingUsername{} -} - -// IsCredentialsMissingServerURL returns true if the error -// was an errCredentialsMissingServerURL. -func IsCredentialsMissingServerURL(err error) bool { - _, ok := err.(errCredentialsMissingServerURL) - return ok -} - -// IsCredentialsMissingServerURLMessage checks for an -// errCredentialsMissingServerURL in the error message. -func IsCredentialsMissingServerURLMessage(err string) bool { - return err == errCredentialsMissingServerURLMessage -} - -// IsCredentialsMissingUsername returns true if the error -// was an errCredentialsMissingUsername. -func IsCredentialsMissingUsername(err error) bool { - _, ok := err.(errCredentialsMissingUsername) - return ok -} - -// IsCredentialsMissingUsernameMessage checks for an -// errCredentialsMissingUsername in the error message. -func IsCredentialsMissingUsernameMessage(err string) bool { - return err == errCredentialsMissingUsernameMessage -} diff --git a/src/vendor/github.com/docker/docker-credential-helpers/credentials/helper.go b/src/vendor/github.com/docker/docker-credential-helpers/credentials/helper.go deleted file mode 100644 index 135acd254..000000000 --- a/src/vendor/github.com/docker/docker-credential-helpers/credentials/helper.go +++ /dev/null @@ -1,14 +0,0 @@ -package credentials - -// Helper is the interface a credentials store helper must implement. -type Helper interface { - // Add appends credentials to the store. - Add(*Credentials) error - // Delete removes credentials from the store. - Delete(serverURL string) error - // Get retrieves credentials from the store. - // It returns username and secret as strings. - Get(serverURL string) (string, string, error) - // List returns the stored serverURLs and their associated usernames. - List() (map[string]string, error) -} diff --git a/src/vendor/github.com/docker/docker-credential-helpers/credentials/version.go b/src/vendor/github.com/docker/docker-credential-helpers/credentials/version.go deleted file mode 100644 index 185e36796..000000000 --- a/src/vendor/github.com/docker/docker-credential-helpers/credentials/version.go +++ /dev/null @@ -1,4 +0,0 @@ -package credentials - -// Version holds a string describing the current version -const Version = "0.6.4" diff --git a/src/vendor/github.com/docker/docker/AUTHORS b/src/vendor/github.com/docker/docker/AUTHORS deleted file mode 100644 index dffacff11..000000000 --- a/src/vendor/github.com/docker/docker/AUTHORS +++ /dev/null @@ -1,2175 +0,0 @@ -# This file lists all individuals having contributed content to the repository. -# For how it is generated, see `hack/generate-authors.sh`. - -Aanand Prasad -Aaron Davidson -Aaron Feng -Aaron Hnatiw -Aaron Huslage -Aaron L. Xu -Aaron Lehmann -Aaron Welch -Aaron.L.Xu -Abel Muiño -Abhijeet Kasurde -Abhinandan Prativadi -Abhinav Ajgaonkar -Abhishek Chanda -Abhishek Sharma -Abin Shahab -Adam Avilla -Adam Dobrawy -Adam Eijdenberg -Adam Kunk -Adam Miller -Adam Mills -Adam Pointer -Adam Singer -Adam Walz -Addam Hardy -Aditi Rajagopal -Aditya -Adnan Khan -Adolfo Ochagavía -Adria Casas -Adrian Moisey -Adrian Mouat -Adrian Oprea -Adrien Folie -Adrien Gallouët -Ahmed Kamal -Ahmet Alp Balkan -Aidan Feldman -Aidan Hobson Sayers -AJ Bowen -Ajey Charantimath -ajneu -Akash Gupta -Akhil Mohan -Akihiro Matsushima -Akihiro Suda -Akim Demaille -Akira Koyasu -Akshay Karle -Al Tobey -alambike -Alan Hoyle -Alan Scherger -Alan Thompson -Albert Callarisa -Albert Zhang -Albin Kerouanton -Alejandro González Hevia -Aleksa Sarai -Aleksandrs Fadins -Alena Prokharchyk -Alessandro Boch -Alessio Biancalana -Alex Chan -Alex Chen -Alex Coventry -Alex Crawford -Alex Ellis -Alex Gaynor -Alex Goodman -Alex Olshansky -Alex Samorukov -Alex Warhawk -Alexander Artemenko -Alexander Boyd -Alexander Larsson -Alexander Midlash -Alexander Morozov -Alexander Shopov -Alexandre Beslic -Alexandre Garnier -Alexandre González -Alexandre Jomin -Alexandru Sfirlogea -Alexei Margasov -Alexey Guskov -Alexey Kotlyarov -Alexey Shamrin -Alexis THOMAS -Alfred Landrum -Ali Dehghani -Alicia Lauerman -Alihan Demir -Allen Madsen -Allen Sun -almoehi -Alvaro Saurin -Alvin Deng -Alvin Richards -amangoel -Amen Belayneh -Amir Goldstein -Amit Bakshi -Amit Krishnan -Amit Shukla -Amr Gawish -Amy Lindburg -Anand Patil -AnandkumarPatel -Anatoly Borodin -Anca Iordache -Anchal Agrawal -Anda Xu -Anders Janmyr -Andre Dublin <81dublin@gmail.com> -Andre Granovsky -Andrea Denisse Gómez -Andrea Luzzardi -Andrea Turli -Andreas Elvers -Andreas Köhler -Andreas Savvides -Andreas Tiefenthaler -Andrei Gherzan -Andrei Vagin -Andrew C. Bodine -Andrew Clay Shafer -Andrew Duckworth -Andrew France -Andrew Gerrand -Andrew Guenther -Andrew He -Andrew Hsu -Andrew Kuklewicz -Andrew Macgregor -Andrew Macpherson -Andrew Martin -Andrew McDonnell -Andrew Munsell -Andrew Pennebaker -Andrew Po -Andrew Weiss -Andrew Williams -Andrews Medina -Andrey Kolomentsev -Andrey Petrov -Andrey Stolbovsky -André Martins -andy -Andy Chambers -andy diller -Andy Goldstein -Andy Kipp -Andy Rothfusz -Andy Smith -Andy Wilson -Anes Hasicic -Anil Belur -Anil Madhavapeddy -Ankit Jain -Ankush Agarwal -Anonmily -Anran Qiao -Anshul Pundir -Anthon van der Neut -Anthony Baire -Anthony Bishopric -Anthony Dahanne -Anthony Sottile -Anton Löfgren -Anton Nikitin -Anton Polonskiy -Anton Tiurin -Antonio Murdaca -Antonis Kalipetis -Antony Messerli -Anuj Bahuguna -Anusha Ragunathan -apocas -Arash Deshmeh -ArikaChen -Arko Dasgupta -Arnaud Lefebvre -Arnaud Porterie -Arnaud Rebillout -Arthur Barr -Arthur Gautier -Artur Meyster -Arun Gupta -Asad Saeeduddin -Asbjørn Enge -averagehuman -Avi Das -Avi Kivity -Avi Miller -Avi Vaid -ayoshitake -Azat Khuyiyakhmetov -Bardia Keyoumarsi -Barnaby Gray -Barry Allard -BartÅ‚omiej Piotrowski -Bastiaan Bakker -bdevloed -Ben Bonnefoy -Ben Firshman -Ben Golub -Ben Gould -Ben Hall -Ben Sargent -Ben Severson -Ben Toews -Ben Wiklund -Benjamin Atkin -Benjamin Baker -Benjamin Boudreau -Benjamin Yolken -Benny Ng -Benoit Chesneau -Bernerd Schaefer -Bernhard M. Wiedemann -Bert Goethals -Bertrand Roussel -Bevisy Zhang -Bharath Thiruveedula -Bhiraj Butala -Bhumika Bayani -Bilal Amarni -Bill Wang -Bily Zhang -Bin Liu -Bingshen Wang -Blake Geno -Boaz Shuster -bobby abbott -Boqin Qin -Boris Pruessmann -Boshi Lian -Bouke Haarsma -Boyd Hemphill -boynux -Bradley Cicenas -Bradley Wright -Brandon Liu -Brandon Philips -Brandon Rhodes -Brendan Dixon -Brent Salisbury -Brett Higgins -Brett Kochendorfer -Brett Randall -Brian (bex) Exelbierd -Brian Bland -Brian DeHamer -Brian Dorsey -Brian Flad -Brian Goff -Brian McCallister -Brian Olsen -Brian Schwind -Brian Shumate -Brian Torres-Gil -Brian Trump -Brice Jaglin -Briehan Lombaard -Brielle Broder -Bruno Bigras -Bruno Binet -Bruno Gazzera -Bruno Renié -Bruno Tavares -Bryan Bess -Bryan Boreham -Bryan Matsuo -Bryan Murphy -Burke Libbey -Byung Kang -Caleb Spare -Calen Pennington -Cameron Boehmer -Cameron Spear -Campbell Allen -Candid Dauth -Cao Weiwei -Carl Henrik Lunde -Carl Loa Odin -Carl X. Su -Carlo Mion -Carlos Alexandro Becker -Carlos de Paula -Carlos Sanchez -Carol Fager-Higgins -Cary -Casey Bisson -Catalin Pirvu -Ce Gao -Cedric Davies -Cezar Sa Espinola -Chad Swenson -Chance Zibolski -Chander Govindarajan -Chanhun Jeong -Chao Wang -Charles Chan -Charles Hooper -Charles Law -Charles Lindsay -Charles Merriam -Charles Sarrazin -Charles Smith -Charlie Drage -Charlie Lewis -Chase Bolt -ChaYoung You -Chen Chao -Chen Chuanliang -Chen Hanxiao -Chen Min -Chen Mingjie -Chen Qiu -Cheng-mean Liu -Chengfei Shang -Chengguang Xu -chenyuzhu -Chetan Birajdar -Chewey -Chia-liang Kao -chli -Cholerae Hu -Chris Alfonso -Chris Armstrong -Chris Dias -Chris Dituri -Chris Fordham -Chris Gavin -Chris Gibson -Chris Khoo -Chris McKinnel -Chris McKinnel -Chris Price -Chris Seto -Chris Snow -Chris St. Pierre -Chris Stivers -Chris Swan -Chris Telfer -Chris Wahl -Chris Weyl -Chris White -Christian Berendt -Christian Brauner -Christian Böhme -Christian Muehlhaeuser -Christian Persson -Christian Rotzoll -Christian Simon -Christian Stefanescu -Christophe Mehay -Christophe Troestler -Christophe Vidal -Christopher Biscardi -Christopher Crone -Christopher Currie -Christopher Jones -Christopher Latham -Christopher Rigor -Christy Norman -Chun Chen -Ciro S. Costa -Clayton Coleman -Clinton Kitson -Cody Roseborough -Coenraad Loubser -Colin Dunklau -Colin Hebert -Colin Panisset -Colin Rice -Colin Walters -Collin Guarino -Colm Hally -companycy -Corbin Coleman -Corey Farrell -Cory Forsyth -cressie176 -CrimsonGlory -Cristian Ariza -Cristian Staretu -cristiano balducci -Cristina Yenyxe Gonzalez Garcia -Cruceru Calin-Cristian -CUI Wei -Cyprian Gracz -Cyril F -Daan van Berkel -Daehyeok Mun -Dafydd Crosby -dalanlan -Damian Smyth -Damien Nadé -Damien Nozay -Damjan Georgievski -Dan Anolik -Dan Buch -Dan Cotora -Dan Feldman -Dan Griffin -Dan Hirsch -Dan Keder -Dan Levy -Dan McPherson -Dan Stine -Dan Williams -Dani Hodovic -Dani Louca -Daniel Antlinger -Daniel Black -Daniel Dao -Daniel Exner -Daniel Farrell -Daniel Garcia -Daniel Gasienica -Daniel Grunwell -Daniel Helfand -Daniel Hiltgen -Daniel J Walsh -Daniel Menet -Daniel Mizyrycki -Daniel Nephin -Daniel Norberg -Daniel Nordberg -Daniel Robinson -Daniel S -Daniel Sweet -Daniel Von Fange -Daniel Watkins -Daniel X Moore -Daniel YC Lin -Daniel Zhang -Danny Berger -Danny Milosavljevic -Danny Yates -Danyal Khaliq -Darren Coxall -Darren Shepherd -Darren Stahl -Dattatraya Kumbhar -Davanum Srinivas -Dave Barboza -Dave Goodchild -Dave Henderson -Dave MacDonald -Dave Tucker -David Anderson -David Calavera -David Chung -David Corking -David Cramer -David Currie -David Davis -David Dooling -David Gageot -David Gebler -David Glasser -David Lawrence -David Lechner -David M. Karr -David Mackey -David Mat -David Mcanulty -David McKay -David P Hilton -David Pelaez -David R. Jenni -David Röthlisberger -David Sheets -David Sissitka -David Trott -David Wang <00107082@163.com> -David Williamson -David Xia -David Young -Davide Ceretti -Dawn Chen -dbdd -dcylabs -Debayan De -Deborah Gertrude Digges -deed02392 -Deep Debroy -Deng Guangxing -Deni Bertovic -Denis Defreyne -Denis Gladkikh -Denis Ollier -Dennis Chen -Dennis Chen -Dennis Docter -Derek -Derek -Derek Ch -Derek McGowan -Deric Crago -Deshi Xiao -devmeyster -Devon Estes -Devvyn Murphy -Dharmit Shah -Dhawal Yogesh Bhanushali -Diego Romero -Diego Siqueira -Dieter Reuter -Dillon Dixon -Dima Stopel -Dimitri John Ledkov -Dimitris Mandalidis -Dimitris Rozakis -Dimitry Andric -Dinesh Subhraveti -Ding Fei -Diogo Monica -DiuDiugirl -Djibril Koné -dkumor -Dmitri Logvinenko -Dmitri Shuralyov -Dmitry Demeshchuk -Dmitry Gusev -Dmitry Kononenko -Dmitry Sharshakov -Dmitry Shyshkin -Dmitry Smirnov -Dmitry V. Krivenok -Dmitry Vorobev -Dolph Mathews -Dominic Tubach -Dominic Yin -Dominik Dingel -Dominik Finkbeiner -Dominik Honnef -Don Kirkby -Don Kjer -Don Spaulding -Donald Huang -Dong Chen -Donghwa Kim -Donovan Jones -Doron Podoleanu -Doug Davis -Doug MacEachern -Doug Tangren -Douglas Curtis -Dr Nic Williams -dragon788 -Dražen LuÄanin -Drew Erny -Drew Hubl -Dustin Sallings -Ed Costello -Edmund Wagner -Eiichi Tsukata -Eike Herzbach -Eivin Giske Skaaren -Eivind Uggedal -Elan Ruusamäe -Elango Sivanandam -Elena Morozova -Eli Uriegas -Elias Faxö -Elias Probst -Elijah Zupancic -eluck -Elvir Kuric -Emil Davtyan -Emil Hernvall -Emily Maier -Emily Rose -Emir Ozer -Enguerran -Eohyung Lee -epeterso -Eric Barch -Eric Curtin -Eric G. Noriega -Eric Hanchrow -Eric Lee -Eric Myhre -Eric Paris -Eric Rafaloff -Eric Rosenberg -Eric Sage -Eric Soderstrom -Eric Yang -Eric-Olivier Lamey -Erica Windisch -Erik Bray -Erik Dubbelboer -Erik Hollensbe -Erik Inge Bolsø -Erik Kristensen -Erik St. Martin -Erik Weathers -Erno Hopearuoho -Erwin van der Koogh -Ethan Bell -Ethan Mosbaugh -Euan Kemp -Eugen Krizo -Eugene Yakubovich -Evan Allrich -Evan Carmi -Evan Hazlett -Evan Krall -Evan Phoenix -Evan Wies -Evelyn Xu -Everett Toews -Evgeniy Makhrov -Evgeny Shmarnev -Evgeny Vereshchagin -Ewa Czechowska -Eystein MÃ¥løy Stenberg -ezbercih -Ezra Silvera -Fabian Kramm -Fabian Lauer -Fabian Raetz -Fabiano Rosas -Fabio Falci -Fabio Kung -Fabio Rapposelli -Fabio Rehm -Fabrizio Regini -Fabrizio Soppelsa -Faiz Khan -falmp -Fangming Fang -Fangyuan Gao <21551127@zju.edu.cn> -fanjiyun -Fareed Dudhia -Fathi Boudra -Federico Gimenez -Felipe Oliveira -Felipe Ruhland -Felix Abecassis -Felix Geisendörfer -Felix Hupfeld -Felix Rabe -Felix Ruess -Felix Schindler -Feng Yan -Fengtu Wang -Ferenc Szabo -Fernando -Fero Volar -Ferran Rodenas -Filipe Brandenburger -Filipe Oliveira -Flavio Castelli -Flavio Crisciani -Florian -Florian Klein -Florian Maier -Florian Noeding -Florian Schmaus -Florian Weingarten -Florin Asavoaie -Florin Patan -fonglh -Foysal Iqbal -Francesc Campoy -Francesco Mari -Francis Chuang -Francisco Carriedo -Francisco Souza -Frank Groeneveld -Frank Herrmann -Frank Macreery -Frank Rosquin -frankyang -Fred Lifton -Frederick F. Kautz IV -Frederik Loeffert -Frederik Nordahl Jul Sabroe -Freek Kalter -Frieder Bluemle -Fu JinLin -Félix Baylac-Jacqué -Félix Cantournet -Gabe Rosenhouse -Gabor Nagy -Gabriel Linder -Gabriel Monroy -Gabriel Nicolas Avellaneda -Gaetan de Villele -Galen Sampson -Gang Qiao -Gareth Rushgrove -Garrett Barboza -Gary Schaetz -Gaurav -Gaurav Singh -Gaël PORTAY -Genki Takiuchi -GennadySpb -Geoffrey Bachelet -Geon Kim -George Kontridze -George MacRorie -George Xie -Georgi Hristozov -Gereon Frey -German DZ -Gert van Valkenhoef -Gerwim Feiken -Ghislain Bourgeois -Giampaolo Mancini -Gianluca Borello -Gildas Cuisinier -Giovan Isa Musthofa -gissehel -Giuseppe Mazzotta -Gleb Fotengauer-Malinovskiy -Gleb M Borisov -Glyn Normington -GoBella -Goffert van Gool -Goldwyn Rodrigues -Gopikannan Venugopalsamy -Gosuke Miyashita -Gou Rao -Govinda Fichtner -Grant Millar -Grant Reaber -Graydon Hoare -Greg Fausak -Greg Pflaum -Greg Stephens -Greg Thornton -Grzegorz JaÅ›kiewicz -Guilhem Lettron -Guilherme Salgado -Guillaume Dufour -Guillaume J. Charmes -guoxiuyan -Guri -Gurjeet Singh -Guruprasad -Gustav Sinder -gwx296173 -Günter Zöchbauer -Haichao Yang -haikuoliu -Hakan Özler -Hamish Hutchings -Hannes Ljungberg -Hans Kristian Flaatten -Hans Rødtang -Hao Shu Wei -Hao Zhang <21521210@zju.edu.cn> -Harald Albers -Harald Niesche -Harley Laue -Harold Cooper -Harrison Turton -Harry Zhang -Harshal Patil -Harshal Patil -He Simei -He Xiaoxi -He Xin -heartlock <21521209@zju.edu.cn> -Hector Castro -Helen Xie -Henning Sprang -Hiroshi Hatake -Hiroyuki Sasagawa -Hobofan -Hollie Teal -Hong Xu -Hongbin Lu -Hongxu Jia -Honza Pokorny -Hsing-Hui Hsu -hsinko <21551195@zju.edu.cn> -Hu Keping -Hu Tao -HuanHuan Ye -Huanzhong Zhang -Huayi Zhang -Hugo Duncan -Hugo Marisco <0x6875676f@gmail.com> -Hunter Blanks -huqun -Huu Nguyen -hyeongkyu.lee -Hyzhou Zhy -Iago López Galeiras -Ian Babrou -Ian Bishop -Ian Bull -Ian Calvert -Ian Campbell -Ian Chen -Ian Lee -Ian Main -Ian Philpot -Ian Truslove -Iavael -Icaro Seara -Ignacio Capurro -Igor Dolzhikov -Igor Karpovich -Iliana Weller -Ilkka Laukkanen -Ilya Dmitrichenko -Ilya Gusev -Ilya Khlopotov -imre Fitos -inglesp -Ingo Gottwald -Innovimax -Isaac Dupree -Isabel Jimenez -Isaiah Grace -Isao Jonas -Iskander Sharipov -Ivan Babrou -Ivan Fraixedes -Ivan Grcic -Ivan Markin -J Bruni -J. Nunn -Jack Danger Canty -Jack Laxson -Jacob Atzen -Jacob Edelman -Jacob Tomlinson -Jacob Vallejo -Jacob Wen -Jaime Cepeda -Jaivish Kothari -Jake Champlin -Jake Moshenko -Jake Sanders -jakedt -James Allen -James Carey -James Carr -James DeFelice -James Harrison Fisher -James Kyburz -James Kyle -James Lal -James Mills -James Nesbitt -James Nugent -James Turnbull -James Watkins-Harvey -Jamie Hannaford -Jamshid Afshar -Jan Chren -Jan Keromnes -Jan Koprowski -Jan Pazdziora -Jan Toebes -Jan-Gerd Tenberge -Jan-Jaap Driessen -Jana Radhakrishnan -Jannick Fahlbusch -Januar Wayong -Jared Biel -Jared Hocutt -Jaroslaw Zabiello -jaseg -Jasmine Hegman -Jason A. Donenfeld -Jason Divock -Jason Giedymin -Jason Green -Jason Hall -Jason Heiss -Jason Livesay -Jason McVetta -Jason Plum -Jason Shepherd -Jason Smith -Jason Sommer -Jason Stangroome -jaxgeller -Jay -Jay -Jay Kamat -Jean Rouge -Jean-Baptiste Barth -Jean-Baptiste Dalido -Jean-Christophe Berthon -Jean-Paul Calderone -Jean-Pierre Huynh -Jean-Tiare Le Bigot -Jeeva S. Chelladhurai -Jeff Anderson -Jeff Hajewski -Jeff Johnston -Jeff Lindsay -Jeff Mickey -Jeff Minard -Jeff Nickoloff -Jeff Silberman -Jeff Welch -Jeffrey Bolle -Jeffrey Morgan -Jeffrey van Gogh -Jenny Gebske -Jeremy Chambers -Jeremy Grosser -Jeremy Price -Jeremy Qian -Jeremy Unruh -Jeremy Yallop -Jeroen Franse -Jeroen Jacobs -Jesse Dearing -Jesse Dubay -Jessica Frazelle -Jezeniel Zapanta -Jhon Honce -Ji.Zhilong -Jian Liao -Jian Zhang -Jiang Jinyang -Jie Luo -Jie Ma -Jihyun Hwang -Jilles Oldenbeuving -Jim Alateras -Jim Ehrismann -Jim Galasyn -Jim Minter -Jim Perrin -Jimmy Cuadra -Jimmy Puckett -Jimmy Song -Jinsoo Park -Jintao Zhang -Jiri Appl -Jiri Popelka -Jiuyue Ma -Jiří Župka -Joao Fernandes -Joao Trindade -Joe Beda -Joe Doliner -Joe Ferguson -Joe Gordon -Joe Shaw -Joe Van Dyk -Joel Friedly -Joel Handwell -Joel Hansson -Joel Wurtz -Joey Geiger -Joey Geiger -Joey Gibson -Joffrey F -Johan Euphrosine -Johan Rydberg -Johanan Lieberman -Johannes 'fish' Ziemke -John Costa -John Feminella -John Gardiner Myers -John Gossman -John Harris -John Howard -John Laswell -John Maguire -John Mulhausen -John OBrien III -John Starks -John Stephens -John Tims -John V. Martinez -John Warwick -John Willis -Jon Johnson -Jon Surrell -Jon Wedaman -Jonas Dohse -Jonas Heinrich -Jonas Pfenniger -Jonathan A. Schweder -Jonathan A. Sternberg -Jonathan Boulle -Jonathan Camp -Jonathan Choy -Jonathan Dowland -Jonathan Lebon -Jonathan Lomas -Jonathan McCrohan -Jonathan Mueller -Jonathan Pares -Jonathan Rudenberg -Jonathan Stoppani -Jonh Wendell -Joni Sar -Joost Cassee -Jordan Arentsen -Jordan Jennings -Jordan Sissel -Jorge Marin -Jorit Kleine-Möllhoff -Jose Diaz-Gonzalez -Joseph Anthony Pasquale Holsten -Joseph Hager -Joseph Kern -Joseph Rothrock -Josh -Josh Bodah -Josh Bonczkowski -Josh Chorlton -Josh Eveleth -Josh Hawn -Josh Horwitz -Josh Poimboeuf -Josh Soref -Josh Wilson -Josiah Kiehl -José Tomás Albornoz -Joyce Jang -JP -Julian Taylor -Julien Barbier -Julien Bisconti -Julien Bordellier -Julien Dubois -Julien Kassar -Julien Maitrehenry -Julien Pervillé -Julien Pivotto -Julio Guerra -Julio Montes -Jun-Ru Chang -Jussi Nummelin -Justas Brazauskas -Justen Martin -Justin Cormack -Justin Force -Justin Menga -Justin Plock -Justin Simonelis -Justin Terry -Justyn Temme -Jyrki Puttonen -Jérémy Leherpeur -Jérôme Petazzoni -Jörg Thalheim -K. Heller -Kai Blin -Kai Qiang Wu (Kennan) -Kamil DomaÅ„ski -Kamjar Gerami -Kanstantsin Shautsou -Kara Alexandra -Karan Lyons -Kareem Khazem -kargakis -Karl Grzeszczak -Karol Duleba -Karthik Karanth -Karthik Nayak -Kasper Fabæch Brandt -Kate Heddleston -Katie McLaughlin -Kato Kazuyoshi -Katrina Owen -Kawsar Saiyeed -Kay Yan -kayrus -Kazuhiro Sera -Ke Li -Ke Xu -Kei Ohmura -Keith Hudgins -Keli Hu -Ken Cochrane -Ken Herner -Ken ICHIKAWA -Ken Reese -Kenfe-Mickaël Laventure -Kenjiro Nakayama -Kent Johnson -Kenta Tada -Kevin "qwazerty" Houdebert -Kevin Burke -Kevin Clark -Kevin Feyrer -Kevin J. Lynagh -Kevin Jing Qiu -Kevin Kern -Kevin Menard -Kevin Meredith -Kevin P. Kucharczyk -Kevin Parsons -Kevin Richardson -Kevin Shi -Kevin Wallace -Kevin Yap -Keyvan Fatehi -kies -Kim BKC Carlbacker -Kim Eik -Kimbro Staken -Kir Kolyshkin -Kiran Gangadharan -Kirill SIbirev -knappe -Kohei Tsuruta -Koichi Shiraishi -Konrad Kleine -Konstantin Gribov -Konstantin L -Konstantin Pelykh -Krasi Georgiev -Krasimir Georgiev -Kris-Mikael Krister -Kristian Haugene -Kristina Zabunova -Krystian Wojcicki -Kun Zhang -Kunal Kushwaha -Kunal Tyagi -Kyle Conroy -Kyle Linden -Kyle Wuolle -kyu -Lachlan Coote -Lai Jiangshan -Lajos Papp -Lakshan Perera -Lalatendu Mohanty -Lance Chen -Lance Kinley -Lars Butler -Lars Kellogg-Stedman -Lars R. Damerow -Lars-Magnus Skog -Laszlo Meszaros -Laura Frank -Laurent Erignoux -Laurie Voss -Leandro Siqueira -Lee Chao <932819864@qq.com> -Lee, Meng-Han -leeplay -Lei Gong -Lei Jitang -Len Weincier -Lennie -Leo Gallucci -Leszek Kowalski -Levi Blackstone -Levi Gross -Lewis Daly -Lewis Marshall -Lewis Peckover -Li Yi -Liam Macgillavry -Liana Lo -Liang Mingqiang -Liang-Chi Hsieh -Liao Qingwei -Lifubang -Lihua Tang -Lily Guo -limsy -Lin Lu -LingFaKe -Linus Heckemann -Liran Tal -Liron Levin -Liu Bo -Liu Hua -liwenqi -lixiaobing10051267 -Liz Zhang -LIZAO LI -Lizzie Dixon <_@lizzie.io> -Lloyd Dewolf -Lokesh Mandvekar -longliqiang88 <394564827@qq.com> -Lorenz Leutgeb -Lorenzo Fontana -Lotus Fenn -Louis Delossantos -Louis Opter -Luca Favatella -Luca Marturana -Luca Orlandi -Luca-Bogdan Grigorescu -Lucas Chan -Lucas Chi -Lucas Molas -Lucas Silvestre -Luciano Mores -Luis Martínez de Bartolomé Izquierdo -Luiz Svoboda -Lukas Heeren -Lukas Waslowski -lukaspustina -Lukasz Zajaczkowski -Luke Marsden -Lyn -Lynda O'Leary -Lénaïc Huard -Ma Müller -Ma Shimiao -Mabin -Madhan Raj Mookkandy -Madhav Puri -Madhu Venugopal -Mageee -Mahesh Tiyyagura -malnick -Malte Janduda -Manfred Touron -Manfred Zabarauskas -Manjunath A Kumatagi -Mansi Nahar -Manuel Meurer -Manuel Rüger -Manuel Woelker -mapk0y -Marc Abramowitz -Marc Kuo -Marc Tamsky -Marcel Edmund Franke -Marcelo Horacio Fortino -Marcelo Salazar -Marco Hennings -Marcus Cobden -Marcus Farkas -Marcus Linke -Marcus Martins -Marcus Ramberg -Marek Goldmann -Marian Marinov -Marianna Tessel -Mario Loriedo -Marius Gundersen -Marius Sturm -Marius Voila -Mark Allen -Mark Jeromin -Mark McGranaghan -Mark McKinstry -Mark Milstein -Mark Oates -Mark Parker -Mark West -Markan Patel -Marko Mikulicic -Marko Tibold -Markus Fix -Markus Kortlang -Martijn Dwars -Martijn van Oosterhout -Martin Honermeyer -Martin Kelly -Martin Mosegaard Amdisen -Martin Muzatko -Martin Redmond -Mary Anthony -Masahito Zembutsu -Masato Ohba -Masayuki Morita -Mason Malone -Mateusz Sulima -Mathias Monnerville -Mathieu Champlon -Mathieu Le Marec - Pasquet -Mathieu Parent -Matt Apperson -Matt Bachmann -Matt Bentley -Matt Haggard -Matt Hoyle -Matt McCormick -Matt Moore -Matt Richardson -Matt Rickard -Matt Robenolt -Matt Schurenko -Matt Williams -Matthew Heon -Matthew Lapworth -Matthew Mayer -Matthew Mosesohn -Matthew Mueller -Matthew Riley -Matthias Klumpp -Matthias Kühnle -Matthias Rampke -Matthieu Hauglustaine -Mattias Jernberg -Mauricio Garavaglia -mauriyouth -Max Harmathy -Max Shytikov -Maxim Fedchyshyn -Maxim Ivanov -Maxim Kulkin -Maxim Treskin -Maxime Petazzoni -Maximiliano Maccanti -Maxwell -Meaglith Ma -meejah -Megan Kostick -Mehul Kar -Mei ChunTao -Mengdi Gao -Mert YazıcıoÄŸlu -mgniu -Micah Zoltu -Michael A. Smith -Michael Bridgen -Michael Brown -Michael Chiang -Michael Crosby -Michael Currie -Michael Friis -Michael Gorsuch -Michael Grauer -Michael Holzheu -Michael Hudson-Doyle -Michael Huettermann -Michael Irwin -Michael Käufl -Michael Neale -Michael Nussbaum -Michael Prokop -Michael Scharf -Michael Spetsiotis -Michael Stapelberg -Michael Steinert -Michael Thies -Michael West -Michael Zhao -Michal Fojtik -Michal Gebauer -Michal Jemala -Michal Minář -Michal Wieczorek -Michaël Pailloncy -MichaÅ‚ Czeraszkiewicz -MichaÅ‚ Gryko -Michiel de Jong -Mickaël Fortunato -Mickaël Remars -Miguel Angel Fernández -Miguel Morales -Mihai Borobocea -Mihuleacc Sergiu -Mike Brown -Mike Bush -Mike Casas -Mike Chelen -Mike Danese -Mike Dillon -Mike Dougherty -Mike Estes -Mike Gaffney -Mike Goelzer -Mike Leone -Mike Lundy -Mike MacCana -Mike Naberezny -Mike Snitzer -mikelinjie <294893458@qq.com> -Mikhail Sobolev -Miklos Szegedi -Milind Chawre -Miloslav TrmaÄ -mingqing -Mingzhen Feng -Misty Stanley-Jones -Mitch Capper -Mizuki Urushida -mlarcher -Mohammad Banikazemi -Mohammad Nasirifar -Mohammed Aaqib Ansari -Mohit Soni -Moorthy RS -Morgan Bauer -Morgante Pell -Morgy93 -Morten Siebuhr -Morton Fox -Moysés Borges -mrfly -Mrunal Patel -Muayyad Alsadi -Mustafa Akın -Muthukumar R -Máximo Cuadros -Médi-Rémi Hashim -Nace Oroz -Nahum Shalman -Nakul Pathak -Nalin Dahyabhai -Nan Monnand Deng -Naoki Orii -Natalie Parker -Natanael Copa -Natasha Jarus -Nate Brennand -Nate Eagleson -Nate Jones -Nathan Hsieh -Nathan Kleyn -Nathan LeClaire -Nathan McCauley -Nathan Williams -Naveed Jamil -Neal McBurnett -Neil Horman -Neil Peterson -Nelson Chen -Neyazul Haque -Nghia Tran -Niall O'Higgins -Nicholas E. Rabenau -Nick Adcock -Nick DeCoursin -Nick Irvine -Nick Neisen -Nick Parker -Nick Payne -Nick Russo -Nick Stenning -Nick Stinemates -NickrenREN -Nicola Kabar -Nicolas Borboën -Nicolas De Loof -Nicolas Dudebout -Nicolas Goy -Nicolas Kaiser -Nicolas Sterchele -Nicolas V Castet -Nicolás Hock Isaza -Nigel Poulton -Nik Nyby -Nikhil Chawla -NikolaMandic -Nikolas Garofil -Nikolay Edigaryev -Nikolay Milovanov -Nirmal Mehta -Nishant Totla -NIWA Hideyuki -Noah Meyerhans -Noah Treuhaft -NobodyOnSE -noducks -Nolan Darilek -Noriki Nakamura -nponeccop -Nuutti Kotivuori -nzwsch -O.S. Tezer -objectified -Odin Ugedal -Oguz Bilgic -Oh Jinkyun -Ohad Schneider -ohmystack -Ole Reifschneider -Oliver Neal -Oliver Reason -Olivier Gambier -Olle Jonsson -Olli Janatuinen -Olly Pomeroy -Omri Shiv -Oriol Francès -Oskar Niburski -Otto Kekäläinen -Ouyang Liduo -Ovidio Mallo -Panagiotis Moustafellos -Paolo G. Giarrusso -Pascal -Pascal Bach -Pascal Borreli -Pascal Hartig -Patrick Böänziger -Patrick Devine -Patrick Hemmer -Patrick Stapleton -Patrik Cyvoct -pattichen -Paul -paul -Paul Annesley -Paul Bellamy -Paul Bowsher -Paul Furtado -Paul Hammond -Paul Jimenez -Paul Kehrer -Paul Lietar -Paul Liljenberg -Paul Morie -Paul Nasrat -Paul Weaver -Paulo Ribeiro -Pavel Lobashov -Pavel MatÄ›ja -Pavel Pletenev -Pavel Pospisil -Pavel Sutyrin -Pavel Tikhomirov -Pavlos Ratis -Pavol Vargovcik -Pawel Konczalski -Peeyush Gupta -Peggy Li -Pei Su -Peng Tao -Penghan Wang -Per Weijnitz -perhapszzy@sina.com -Peter Bourgon -Peter Braden -Peter Bücker -Peter Choi -Peter Dave Hello -Peter Edge -Peter Ericson -Peter Esbensen -Peter Jaffe -Peter Kang -Peter Malmgren -Peter Salvatore -Peter Volpe -Peter Waller -Petr Å vihlík -Phil -Phil Estes -Phil Spitler -Philip Alexander Etling -Philip Monroe -Philipp Gillé -Philipp Wahala -Philipp Weissensteiner -Phillip Alexander -phineas -pidster -Piergiuliano Bossi -Pierre -Pierre Carrier -Pierre Dal-Pra -Pierre Wacrenier -Pierre-Alain RIVIERE -Piotr Bogdan -pixelistik -Porjo -Poul Kjeldager Sørensen -Pradeep Chhetri -Pradip Dhara -Prasanna Gautam -Pratik Karki -Prayag Verma -Priya Wadhwa -Projjol Banerji -Przemek Hejman -Pure White -pysqz -Qiang Huang -Qinglan Peng -qudongfang -Quentin Brossard -Quentin Perez -Quentin Tayssier -r0n22 -Radostin Stoyanov -Rafal Jeczalik -Rafe Colton -Raghavendra K T -Raghuram Devarakonda -Raja Sami -Rajat Pandit -Rajdeep Dua -Ralf Sippl -Ralle -Ralph Bean -Ramkumar Ramachandra -Ramon Brooker -Ramon van Alteren -RaviTeja Pothana -Ray Tsang -ReadmeCritic -Recursive Madman -Reficul -Regan McCooey -Remi Rampin -Remy Suen -Renato Riccieri Santos Zannon -Renaud Gaubert -Rhys Hiltner -Ri Xu -Ricardo N Feliciano -Rich Moyse -Rich Seymour -Richard -Richard Burnison -Richard Harvey -Richard Mathie -Richard Metzler -Richard Scothern -Richo Healey -Rick Bradley -Rick van de Loo -Rick Wieman -Rik Nijessen -Riku Voipio -Riley Guerin -Ritesh H Shukla -Riyaz Faizullabhoy -Rob Gulewich -Rob Vesse -Robert Bachmann -Robert Bittle -Robert Obryk -Robert Schneider -Robert Stern -Robert Terhaar -Robert Wallis -Robert Wang -Roberto G. Hashioka -Roberto Muñoz Fernández -Robin Naundorf -Robin Schneider -Robin Speekenbrink -Robin Thoni -robpc -Rodolfo Carvalho -Rodrigo Vaz -Roel Van Nyen -Roger Peppe -Rohit Jnagal -Rohit Kadam -Rohit Kapur -Rojin George -Roland Huß -Roland Kammerer -Roland Moriz -Roma Sokolov -Roman Dudin -Roman Mazur -Roman Strashkin -Ron Smits -Ron Williams -Rong Gao -Rong Zhang -Rongxiang Song -root -root -root -root -Rory Hunter -Rory McCune -Ross Boucher -Rovanion Luckey -Royce Remer -Rozhnov Alexandr -Rudolph Gottesheim -Rui Cao -Rui Lopes -Ruilin Li -Runshen Zhu -Russ Magee -Ryan Abrams -Ryan Anderson -Ryan Aslett -Ryan Belgrave -Ryan Detzel -Ryan Fowler -Ryan Liu -Ryan McLaughlin -Ryan O'Donnell -Ryan Seto -Ryan Simmen -Ryan Stelly -Ryan Thomas -Ryan Trauntvein -Ryan Wallner -Ryan Zhang -ryancooper7 -RyanDeng -Ryo Nakao -Rémy Greinhofer -s. rannou -s00318865 -Sabin Basyal -Sachin Joshi -Sagar Hani -Sainath Grandhi -Sakeven Jiang -Salahuddin Khan -Sally O'Malley -Sam Abed -Sam Alba -Sam Bailey -Sam J Sharpe -Sam Neirinck -Sam Reis -Sam Rijs -Sam Whited -Sambuddha Basu -Sami Wagiaalla -Samuel Andaya -Samuel Dion-Girardeau -Samuel Karp -Samuel PHAN -Sandeep Bansal -Sankar சஙà¯à®•à®°à¯ -Sanket Saurav -Santhosh Manohar -sapphiredev -Sargun Dhillon -Sascha Andres -Sascha Grunert -SataQiu -Satnam Singh -Satoshi Amemiya -Satoshi Tagomori -Scott Bessler -Scott Collier -Scott Johnston -Scott Stamp -Scott Walls -sdreyesg -Sean Christopherson -Sean Cronin -Sean Lee -Sean McIntyre -Sean OMeara -Sean P. Kane -Sean Rodman -Sebastiaan van Steenis -Sebastiaan van Stijn -Senthil Kumar Selvaraj -Senthil Kumaran -SeongJae Park -Seongyeol Lim -Serge Hallyn -Sergey Alekseev -Sergey Evstifeev -Sergii Kabashniuk -Sergio Lopez -Serhat Gülçiçek -SeungUkLee -Sevki Hasirci -Shane Canon -Shane da Silva -Shaun Kaasten -shaunol -Shawn Landden -Shawn Siefkas -shawnhe -Shayne Wang -Shekhar Gulati -Sheng Yang -Shengbo Song -Shev Yan -Shih-Yuan Lee -Shijiang Wei -Shijun Qin -Shishir Mahajan -Shoubhik Bose -Shourya Sarcar -Shu-Wai Chow -shuai-z -Shukui Yang -Shuwei Hao -Sian Lerk Lau -Sidhartha Mani -sidharthamani -Silas Sewell -Silvan Jegen -Simão Reis -Simei He -Simon Barendse -Simon Eskildsen -Simon Ferquel -Simon Leinen -Simon Menke -Simon Taranto -Simon Vikstrom -Sindhu S -Sjoerd Langkemper -skanehira -Solganik Alexander -Solomon Hykes -Song Gao -Soshi Katsuta -Soulou -Spencer Brown -Spencer Smith -Sridatta Thatipamala -Sridhar Ratnakumar -Srini Brahmaroutu -Srinivasan Srivatsan -Staf Wagemakers -Stanislav Bondarenko -Stanislav Levin -Steeve Morin -Stefan Berger -Stefan J. Wernli -Stefan Praszalowicz -Stefan S. -Stefan Scherer -Stefan Staudenmeyer -Stefan Weil -Stephan Spindler -Stephen Benjamin -Stephen Crosby -Stephen Day -Stephen Drake -Stephen Rust -Steve Desmond -Steve Dougherty -Steve Durrheimer -Steve Francia -Steve Koch -Steven Burgess -Steven Erenst -Steven Hartland -Steven Iveson -Steven Merrill -Steven Richards -Steven Taylor -Stig Larsson -Subhajit Ghosh -Sujith Haridasan -Sun Gengze <690388648@qq.com> -Sun Jianbo -Sune Keller -Sunny Gogoi -Suryakumar Sudar -Sven Dowideit -Swapnil Daingade -Sylvain Baubeau -Sylvain Bellemare -Sébastien -Sébastien HOUZÉ -Sébastien Luttringer -Sébastien Stormacq -Tabakhase -Tadej Janež -TAGOMORI Satoshi -tang0th -Tangi Colin -Tatsuki Sugiura -Tatsushi Inagaki -Taylan Isikdemir -Taylor Jones -Ted M. Young -Tehmasp Chaudhri -Tejaswini Duggaraju -Tejesh Mehta -terryding77 <550147740@qq.com> -tgic -Thatcher Peskens -theadactyl -Thell 'Bo' Fowler -Thermionix -Thijs Terlouw -Thomas Bikeev -Thomas Frössman -Thomas Gazagnaire -Thomas Grainger -Thomas Hansen -Thomas Leonard -Thomas Léveil -Thomas Orozco -Thomas Riccardi -Thomas Schroeter -Thomas Sjögren -Thomas Swift -Thomas Tanaka -Thomas Texier -Ti Zhou -Tianon Gravi -Tianyi Wang -Tibor Vass -Tiffany Jernigan -Tiffany Low -Till Wegmüller -Tim -Tim Bart -Tim Bosse -Tim Dettrick -Tim Düsterhus -Tim Hockin -Tim Potter -Tim Ruffles -Tim Smith -Tim Terhorst -Tim Wang -Tim Waugh -Tim Wraight -Tim Zju <21651152@zju.edu.cn> -timfeirg -Timothy Hobbs -tjwebb123 -tobe -Tobias Bieniek -Tobias Bradtke -Tobias Gesellchen -Tobias Klauser -Tobias Munk -Tobias Schmidt -Tobias Schwab -Todd Crane -Todd Lunter -Todd Whiteman -Toli Kuznets -Tom Barlow -Tom Booth -Tom Denham -Tom Fotherby -Tom Howe -Tom Hulihan -Tom Maaswinkel -Tom Sweeney -Tom Wilkie -Tom X. Tobin -Tomas Tomecek -Tomasz Kopczynski -Tomasz Lipinski -Tomasz Nurkiewicz -Tommaso Visconti -Tomáš HrÄka -Tonny Xu -Tony Abboud -Tony Daws -Tony Miller -toogley -Torstein Husebø -Tõnis Tiigi -Trace Andreason -tracylihui <793912329@qq.com> -Trapier Marshall -Travis Cline -Travis Thieman -Trent Ogren -Trevor -Trevor Pounds -Trevor Sullivan -Trishna Guha -Tristan Carel -Troy Denton -Tycho Andersen -Tyler Brock -Tyler Brown -Tzu-Jung Lee -uhayate -Ulysse Carion -Umesh Yadav -Utz Bacher -vagrant -Vaidas Jablonskis -vanderliang -Velko Ivanov -Veres Lajos -Victor Algaze -Victor Coisne -Victor Costan -Victor I. Wood -Victor Lyuboslavsky -Victor Marmol -Victor Palma -Victor Vieux -Victoria Bialas -Vijaya Kumar K -Vikram bir Singh -Viktor Stanchev -Viktor Vojnovski -VinayRaghavanKS -Vincent Batts -Vincent Bernat -Vincent Boulineau -Vincent Demeester -Vincent Giersch -Vincent Mayers -Vincent Woo -Vinod Kulkarni -Vishal Doshi -Vishnu Kannan -Vitaly Ostrosablin -Vitor Monteiro -Vivek Agarwal -Vivek Dasgupta -Vivek Goyal -Vladimir Bulyga -Vladimir Kirillov -Vladimir Pouzanov -Vladimir Rutsky -Vladimir Varankin -VladimirAus -Vlastimil Zeman -Vojtech Vitek (V-Teq) -waitingkuo -Walter Leibbrandt -Walter Stanish -Wang Chao -Wang Guoliang -Wang Jie -Wang Long -Wang Ping -Wang Xing -Wang Yuexiao -Wang Yumu <37442693@qq.com> -wanghuaiqing -Ward Vandewege -WarheadsSE -Wassim Dhif -Wayne Chang -Wayne Song -Weerasak Chongnguluam -Wei Fu -Wei Wu -Wei-Ting Kuo -weipeng -weiyan -Weiyang Zhu -Wen Cheng Ma -Wendel Fleming -Wenjun Tang -Wenkai Yin -wenlxie -Wentao Zhang -Wenxuan Zhao -Wenyu You <21551128@zju.edu.cn> -Wenzhi Liang -Wes Morgan -Wewang Xiaorenfine -Wiktor Kwapisiewicz -Will Dietz -Will Rouesnel -Will Weaver -willhf -William Delanoue -William Henry -William Hubbs -William Martin -William Riancho -William Thurston -Wilson Júnior -Wing-Kam Wong -WiseTrem -Wolfgang Powisch -Wonjun Kim -xamyzhao -Xian Chaobo -Xianglin Gao -Xianlu Bird -Xiao YongBiao -XiaoBing Jiang -Xiaodong Liu -Xiaodong Zhang -Xiaoxi He -Xiaoxu Chen -Xiaoyu Zhang -xichengliudui <1693291525@qq.com> -xiekeyang -Ximo Guanter Gonzálbez -Xinbo Weng -Xinfeng Liu -Xinzi Zhou -Xiuming Chen -Xuecong Liao -xuzhaokui -Yadnyawalkya Tale -Yahya -YAMADA Tsuyoshi -Yamasaki Masahide -Yan Feng -Yang Bai -Yang Pengfei -yangchenliang -Yanqiang Miao -Yao Zaiyong -Yash Murty -Yassine Tijani -Yasunori Mahata -Yazhong Liu -Yestin Sun -Yi EungJun -Yibai Zhang -Yihang Ho -Ying Li -Yohei Ueda -Yong Tang -Yongxin Li -Yongzhi Pan -Yosef Fertel -You-Sheng Yang (楊有å‹) -youcai -Youcef YEKHLEF -Yu Changchun -Yu Chengxia -Yu Peng -Yu-Ju Hong -Yuan Sun -Yuanhong Peng -Yue Zhang -Yuhao Fang -Yuichiro Kaneko -Yunxiang Huang -Yurii Rashkovskii -Yusuf Tarık Günaydın -Yves Junqueira -Zac Dover -Zach Borboa -Zachary Jaffee -Zain Memon -Zaiste! -Zane DeGraffenried -Zefan Li -Zen Lin(Zhinan Lin) -Zhang Kun -Zhang Wei -Zhang Wentao -ZhangHang -zhangxianwei -Zhenan Ye <21551168@zju.edu.cn> -zhenghenghuo -Zhenhai Gao -Zhenkun Bi -zhipengzuo -Zhou Hao -Zhoulin Xie -Zhu Guihua -Zhu Kunjia -Zhuoyun Wei -Ziheng Liu -Zilin Du -zimbatm -Ziming Dong -ZJUshuaizhou <21551191@zju.edu.cn> -zmarouf -Zoltan Tombol -Zou Yu -zqh -Zuhayr Elahi -Zunayed Ali -Ãlex González -Ãlvaro Lázaro -Ãtila Camurça Alves -å°¹å‰å³° -å±ˆéª -å¾ä¿Šæ° -慕陶 -æ通 -黄艳红00139573 diff --git a/src/vendor/github.com/docker/docker/LICENSE b/src/vendor/github.com/docker/docker/LICENSE deleted file mode 100644 index 6d8d58fb6..000000000 --- a/src/vendor/github.com/docker/docker/LICENSE +++ /dev/null @@ -1,191 +0,0 @@ - - Apache License - Version 2.0, January 2004 - https://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - Copyright 2013-2018 Docker, Inc. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - https://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/src/vendor/github.com/docker/docker/NOTICE b/src/vendor/github.com/docker/docker/NOTICE deleted file mode 100644 index 58b19b6d1..000000000 --- a/src/vendor/github.com/docker/docker/NOTICE +++ /dev/null @@ -1,19 +0,0 @@ -Docker -Copyright 2012-2017 Docker, Inc. - -This product includes software developed at Docker, Inc. (https://www.docker.com). - -This product contains software (https://github.com/creack/pty) developed -by Keith Rarick, licensed under the MIT License. - -The following is courtesy of our legal counsel: - - -Use and transfer of Docker may be subject to certain restrictions by the -United States and other governments. -It is your responsibility to ensure that your use and/or transfer does not -violate applicable laws. - -For more information, please see https://www.bis.doc.gov - -See also https://www.apache.org/dev/crypto.html and/or seek legal counsel. diff --git a/src/vendor/github.com/docker/docker/api/types/auth.go b/src/vendor/github.com/docker/docker/api/types/auth.go deleted file mode 100644 index ddf15bb18..000000000 --- a/src/vendor/github.com/docker/docker/api/types/auth.go +++ /dev/null @@ -1,22 +0,0 @@ -package types // import "github.com/docker/docker/api/types" - -// AuthConfig contains authorization information for connecting to a Registry -type AuthConfig struct { - Username string `json:"username,omitempty"` - Password string `json:"password,omitempty"` - Auth string `json:"auth,omitempty"` - - // Email is an optional value associated with the username. - // This field is deprecated and will be removed in a later - // version of docker. - Email string `json:"email,omitempty"` - - ServerAddress string `json:"serveraddress,omitempty"` - - // IdentityToken is used to authenticate the user and get - // an access token for the registry. - IdentityToken string `json:"identitytoken,omitempty"` - - // RegistryToken is a bearer token to be sent to a registry - RegistryToken string `json:"registrytoken,omitempty"` -} diff --git a/src/vendor/github.com/docker/docker/api/types/blkiodev/blkio.go b/src/vendor/github.com/docker/docker/api/types/blkiodev/blkio.go deleted file mode 100644 index bf3463b90..000000000 --- a/src/vendor/github.com/docker/docker/api/types/blkiodev/blkio.go +++ /dev/null @@ -1,23 +0,0 @@ -package blkiodev // import "github.com/docker/docker/api/types/blkiodev" - -import "fmt" - -// WeightDevice is a structure that holds device:weight pair -type WeightDevice struct { - Path string - Weight uint16 -} - -func (w *WeightDevice) String() string { - return fmt.Sprintf("%s:%d", w.Path, w.Weight) -} - -// ThrottleDevice is a structure that holds device:rate_per_second pair -type ThrottleDevice struct { - Path string - Rate uint64 -} - -func (t *ThrottleDevice) String() string { - return fmt.Sprintf("%s:%d", t.Path, t.Rate) -} diff --git a/src/vendor/github.com/docker/docker/api/types/client.go b/src/vendor/github.com/docker/docker/api/types/client.go deleted file mode 100644 index 9c464b73e..000000000 --- a/src/vendor/github.com/docker/docker/api/types/client.go +++ /dev/null @@ -1,419 +0,0 @@ -package types // import "github.com/docker/docker/api/types" - -import ( - "bufio" - "io" - "net" - - "github.com/docker/docker/api/types/container" - "github.com/docker/docker/api/types/filters" - units "github.com/docker/go-units" -) - -// CheckpointCreateOptions holds parameters to create a checkpoint from a container -type CheckpointCreateOptions struct { - CheckpointID string - CheckpointDir string - Exit bool -} - -// CheckpointListOptions holds parameters to list checkpoints for a container -type CheckpointListOptions struct { - CheckpointDir string -} - -// CheckpointDeleteOptions holds parameters to delete a checkpoint from a container -type CheckpointDeleteOptions struct { - CheckpointID string - CheckpointDir string -} - -// ContainerAttachOptions holds parameters to attach to a container. -type ContainerAttachOptions struct { - Stream bool - Stdin bool - Stdout bool - Stderr bool - DetachKeys string - Logs bool -} - -// ContainerCommitOptions holds parameters to commit changes into a container. -type ContainerCommitOptions struct { - Reference string - Comment string - Author string - Changes []string - Pause bool - Config *container.Config -} - -// ContainerExecInspect holds information returned by exec inspect. -type ContainerExecInspect struct { - ExecID string `json:"ID"` - ContainerID string - Running bool - ExitCode int - Pid int -} - -// ContainerListOptions holds parameters to list containers with. -type ContainerListOptions struct { - Quiet bool - Size bool - All bool - Latest bool - Since string - Before string - Limit int - Filters filters.Args -} - -// ContainerLogsOptions holds parameters to filter logs with. -type ContainerLogsOptions struct { - ShowStdout bool - ShowStderr bool - Since string - Until string - Timestamps bool - Follow bool - Tail string - Details bool -} - -// ContainerRemoveOptions holds parameters to remove containers. -type ContainerRemoveOptions struct { - RemoveVolumes bool - RemoveLinks bool - Force bool -} - -// ContainerStartOptions holds parameters to start containers. -type ContainerStartOptions struct { - CheckpointID string - CheckpointDir string -} - -// CopyToContainerOptions holds information -// about files to copy into a container -type CopyToContainerOptions struct { - AllowOverwriteDirWithFile bool - CopyUIDGID bool -} - -// EventsOptions holds parameters to filter events with. -type EventsOptions struct { - Since string - Until string - Filters filters.Args -} - -// NetworkListOptions holds parameters to filter the list of networks with. -type NetworkListOptions struct { - Filters filters.Args -} - -// HijackedResponse holds connection information for a hijacked request. -type HijackedResponse struct { - Conn net.Conn - Reader *bufio.Reader -} - -// Close closes the hijacked connection and reader. -func (h *HijackedResponse) Close() { - h.Conn.Close() -} - -// CloseWriter is an interface that implements structs -// that close input streams to prevent from writing. -type CloseWriter interface { - CloseWrite() error -} - -// CloseWrite closes a readWriter for writing. -func (h *HijackedResponse) CloseWrite() error { - if conn, ok := h.Conn.(CloseWriter); ok { - return conn.CloseWrite() - } - return nil -} - -// ImageBuildOptions holds the information -// necessary to build images. -type ImageBuildOptions struct { - Tags []string - SuppressOutput bool - RemoteContext string - NoCache bool - Remove bool - ForceRemove bool - PullParent bool - Isolation container.Isolation - CPUSetCPUs string - CPUSetMems string - CPUShares int64 - CPUQuota int64 - CPUPeriod int64 - Memory int64 - MemorySwap int64 - CgroupParent string - NetworkMode string - ShmSize int64 - Dockerfile string - Ulimits []*units.Ulimit - // BuildArgs needs to be a *string instead of just a string so that - // we can tell the difference between "" (empty string) and no value - // at all (nil). See the parsing of buildArgs in - // api/server/router/build/build_routes.go for even more info. - BuildArgs map[string]*string - AuthConfigs map[string]AuthConfig - Context io.Reader - Labels map[string]string - // squash the resulting image's layers to the parent - // preserves the original image and creates a new one from the parent with all - // the changes applied to a single layer - Squash bool - // CacheFrom specifies images that are used for matching cache. Images - // specified here do not need to have a valid parent chain to match cache. - CacheFrom []string - SecurityOpt []string - ExtraHosts []string // List of extra hosts - Target string - SessionID string - Platform string - // Version specifies the version of the unerlying builder to use - Version BuilderVersion - // BuildID is an optional identifier that can be passed together with the - // build request. The same identifier can be used to gracefully cancel the - // build with the cancel request. - BuildID string - // Outputs defines configurations for exporting build results. Only supported - // in BuildKit mode - Outputs []ImageBuildOutput -} - -// ImageBuildOutput defines configuration for exporting a build result -type ImageBuildOutput struct { - Type string - Attrs map[string]string -} - -// BuilderVersion sets the version of underlying builder to use -type BuilderVersion string - -const ( - // BuilderV1 is the first generation builder in docker daemon - BuilderV1 BuilderVersion = "1" - // BuilderBuildKit is builder based on moby/buildkit project - BuilderBuildKit BuilderVersion = "2" -) - -// ImageBuildResponse holds information -// returned by a server after building -// an image. -type ImageBuildResponse struct { - Body io.ReadCloser - OSType string -} - -// ImageCreateOptions holds information to create images. -type ImageCreateOptions struct { - RegistryAuth string // RegistryAuth is the base64 encoded credentials for the registry. - Platform string // Platform is the target platform of the image if it needs to be pulled from the registry. -} - -// ImageImportSource holds source information for ImageImport -type ImageImportSource struct { - Source io.Reader // Source is the data to send to the server to create this image from. You must set SourceName to "-" to leverage this. - SourceName string // SourceName is the name of the image to pull. Set to "-" to leverage the Source attribute. -} - -// ImageImportOptions holds information to import images from the client host. -type ImageImportOptions struct { - Tag string // Tag is the name to tag this image with. This attribute is deprecated. - Message string // Message is the message to tag the image with - Changes []string // Changes are the raw changes to apply to this image - Platform string // Platform is the target platform of the image -} - -// ImageListOptions holds parameters to filter the list of images with. -type ImageListOptions struct { - All bool - Filters filters.Args -} - -// ImageLoadResponse returns information to the client about a load process. -type ImageLoadResponse struct { - // Body must be closed to avoid a resource leak - Body io.ReadCloser - JSON bool -} - -// ImagePullOptions holds information to pull images. -type ImagePullOptions struct { - All bool - RegistryAuth string // RegistryAuth is the base64 encoded credentials for the registry - PrivilegeFunc RequestPrivilegeFunc - Platform string -} - -// RequestPrivilegeFunc is a function interface that -// clients can supply to retry operations after -// getting an authorization error. -// This function returns the registry authentication -// header value in base 64 format, or an error -// if the privilege request fails. -type RequestPrivilegeFunc func() (string, error) - -// ImagePushOptions holds information to push images. -type ImagePushOptions ImagePullOptions - -// ImageRemoveOptions holds parameters to remove images. -type ImageRemoveOptions struct { - Force bool - PruneChildren bool -} - -// ImageSearchOptions holds parameters to search images with. -type ImageSearchOptions struct { - RegistryAuth string - PrivilegeFunc RequestPrivilegeFunc - Filters filters.Args - Limit int -} - -// ResizeOptions holds parameters to resize a tty. -// It can be used to resize container ttys and -// exec process ttys too. -type ResizeOptions struct { - Height uint - Width uint -} - -// NodeListOptions holds parameters to list nodes with. -type NodeListOptions struct { - Filters filters.Args -} - -// NodeRemoveOptions holds parameters to remove nodes with. -type NodeRemoveOptions struct { - Force bool -} - -// ServiceCreateOptions contains the options to use when creating a service. -type ServiceCreateOptions struct { - // EncodedRegistryAuth is the encoded registry authorization credentials to - // use when updating the service. - // - // This field follows the format of the X-Registry-Auth header. - EncodedRegistryAuth string - - // QueryRegistry indicates whether the service update requires - // contacting a registry. A registry may be contacted to retrieve - // the image digest and manifest, which in turn can be used to update - // platform or other information about the service. - QueryRegistry bool -} - -// ServiceCreateResponse contains the information returned to a client -// on the creation of a new service. -type ServiceCreateResponse struct { - // ID is the ID of the created service. - ID string - // Warnings is a set of non-fatal warning messages to pass on to the user. - Warnings []string `json:",omitempty"` -} - -// Values for RegistryAuthFrom in ServiceUpdateOptions -const ( - RegistryAuthFromSpec = "spec" - RegistryAuthFromPreviousSpec = "previous-spec" -) - -// ServiceUpdateOptions contains the options to be used for updating services. -type ServiceUpdateOptions struct { - // EncodedRegistryAuth is the encoded registry authorization credentials to - // use when updating the service. - // - // This field follows the format of the X-Registry-Auth header. - EncodedRegistryAuth string - - // TODO(stevvooe): Consider moving the version parameter of ServiceUpdate - // into this field. While it does open API users up to racy writes, most - // users may not need that level of consistency in practice. - - // RegistryAuthFrom specifies where to find the registry authorization - // credentials if they are not given in EncodedRegistryAuth. Valid - // values are "spec" and "previous-spec". - RegistryAuthFrom string - - // Rollback indicates whether a server-side rollback should be - // performed. When this is set, the provided spec will be ignored. - // The valid values are "previous" and "none". An empty value is the - // same as "none". - Rollback string - - // QueryRegistry indicates whether the service update requires - // contacting a registry. A registry may be contacted to retrieve - // the image digest and manifest, which in turn can be used to update - // platform or other information about the service. - QueryRegistry bool -} - -// ServiceListOptions holds parameters to list services with. -type ServiceListOptions struct { - Filters filters.Args - - // Status indicates whether the server should include the service task - // count of running and desired tasks. - Status bool -} - -// ServiceInspectOptions holds parameters related to the "service inspect" -// operation. -type ServiceInspectOptions struct { - InsertDefaults bool -} - -// TaskListOptions holds parameters to list tasks with. -type TaskListOptions struct { - Filters filters.Args -} - -// PluginRemoveOptions holds parameters to remove plugins. -type PluginRemoveOptions struct { - Force bool -} - -// PluginEnableOptions holds parameters to enable plugins. -type PluginEnableOptions struct { - Timeout int -} - -// PluginDisableOptions holds parameters to disable plugins. -type PluginDisableOptions struct { - Force bool -} - -// PluginInstallOptions holds parameters to install a plugin. -type PluginInstallOptions struct { - Disabled bool - AcceptAllPermissions bool - RegistryAuth string // RegistryAuth is the base64 encoded credentials for the registry - RemoteRef string // RemoteRef is the plugin name on the registry - PrivilegeFunc RequestPrivilegeFunc - AcceptPermissionsFunc func(PluginPrivileges) (bool, error) - Args []string -} - -// SwarmUnlockKeyResponse contains the response for Engine API: -// GET /swarm/unlockkey -type SwarmUnlockKeyResponse struct { - // UnlockKey is the unlock key in ASCII-armored format. - UnlockKey string -} - -// PluginCreateOptions hold all options to plugin create. -type PluginCreateOptions struct { - RepoName string -} diff --git a/src/vendor/github.com/docker/docker/api/types/configs.go b/src/vendor/github.com/docker/docker/api/types/configs.go deleted file mode 100644 index 3dd133a3a..000000000 --- a/src/vendor/github.com/docker/docker/api/types/configs.go +++ /dev/null @@ -1,66 +0,0 @@ -package types // import "github.com/docker/docker/api/types" - -import ( - "github.com/docker/docker/api/types/container" - "github.com/docker/docker/api/types/network" - specs "github.com/opencontainers/image-spec/specs-go/v1" -) - -// configs holds structs used for internal communication between the -// frontend (such as an http server) and the backend (such as the -// docker daemon). - -// ContainerCreateConfig is the parameter set to ContainerCreate() -type ContainerCreateConfig struct { - Name string - Config *container.Config - HostConfig *container.HostConfig - NetworkingConfig *network.NetworkingConfig - Platform *specs.Platform - AdjustCPUShares bool -} - -// ContainerRmConfig holds arguments for the container remove -// operation. This struct is used to tell the backend what operations -// to perform. -type ContainerRmConfig struct { - ForceRemove, RemoveVolume, RemoveLink bool -} - -// ExecConfig is a small subset of the Config struct that holds the configuration -// for the exec feature of docker. -type ExecConfig struct { - User string // User that will run the command - Privileged bool // Is the container in privileged mode - Tty bool // Attach standard streams to a tty. - AttachStdin bool // Attach the standard input, makes possible user interaction - AttachStderr bool // Attach the standard error - AttachStdout bool // Attach the standard output - Detach bool // Execute in detach mode - DetachKeys string // Escape keys for detach - Env []string // Environment variables - WorkingDir string // Working directory - Cmd []string // Execution commands and args -} - -// PluginRmConfig holds arguments for plugin remove. -type PluginRmConfig struct { - ForceRemove bool -} - -// PluginEnableConfig holds arguments for plugin enable -type PluginEnableConfig struct { - Timeout int -} - -// PluginDisableConfig holds arguments for plugin disable. -type PluginDisableConfig struct { - ForceDisable bool -} - -// NetworkListConfig stores the options available for listing networks -type NetworkListConfig struct { - // TODO(@cpuguy83): naming is hard, this is pulled from what was being used in the router before moving here - Detailed bool - Verbose bool -} diff --git a/src/vendor/github.com/docker/docker/api/types/container/config.go b/src/vendor/github.com/docker/docker/api/types/container/config.go deleted file mode 100644 index f767195b9..000000000 --- a/src/vendor/github.com/docker/docker/api/types/container/config.go +++ /dev/null @@ -1,69 +0,0 @@ -package container // import "github.com/docker/docker/api/types/container" - -import ( - "time" - - "github.com/docker/docker/api/types/strslice" - "github.com/docker/go-connections/nat" -) - -// MinimumDuration puts a minimum on user configured duration. -// This is to prevent API error on time unit. For example, API may -// set 3 as healthcheck interval with intention of 3 seconds, but -// Docker interprets it as 3 nanoseconds. -const MinimumDuration = 1 * time.Millisecond - -// HealthConfig holds configuration settings for the HEALTHCHECK feature. -type HealthConfig struct { - // Test is the test to perform to check that the container is healthy. - // An empty slice means to inherit the default. - // The options are: - // {} : inherit healthcheck - // {"NONE"} : disable healthcheck - // {"CMD", args...} : exec arguments directly - // {"CMD-SHELL", command} : run command with system's default shell - Test []string `json:",omitempty"` - - // Zero means to inherit. Durations are expressed as integer nanoseconds. - Interval time.Duration `json:",omitempty"` // Interval is the time to wait between checks. - Timeout time.Duration `json:",omitempty"` // Timeout is the time to wait before considering the check to have hung. - StartPeriod time.Duration `json:",omitempty"` // The start period for the container to initialize before the retries starts to count down. - - // Retries is the number of consecutive failures needed to consider a container as unhealthy. - // Zero means inherit. - Retries int `json:",omitempty"` -} - -// Config contains the configuration data about a container. -// It should hold only portable information about the container. -// Here, "portable" means "independent from the host we are running on". -// Non-portable information *should* appear in HostConfig. -// All fields added to this struct must be marked `omitempty` to keep getting -// predictable hashes from the old `v1Compatibility` configuration. -type Config struct { - Hostname string // Hostname - Domainname string // Domainname - User string // User that will run the command(s) inside the container, also support user:group - AttachStdin bool // Attach the standard input, makes possible user interaction - AttachStdout bool // Attach the standard output - AttachStderr bool // Attach the standard error - ExposedPorts nat.PortSet `json:",omitempty"` // List of exposed ports - Tty bool // Attach standard streams to a tty, including stdin if it is not closed. - OpenStdin bool // Open stdin - StdinOnce bool // If true, close stdin after the 1 attached client disconnects. - Env []string // List of environment variable to set in the container - Cmd strslice.StrSlice // Command to run when starting the container - Healthcheck *HealthConfig `json:",omitempty"` // Healthcheck describes how to check the container is healthy - ArgsEscaped bool `json:",omitempty"` // True if command is already escaped (meaning treat as a command line) (Windows specific). - Image string // Name of the image as it was passed by the operator (e.g. could be symbolic) - Volumes map[string]struct{} // List of volumes (mounts) used for the container - WorkingDir string // Current directory (PWD) in the command will be launched - Entrypoint strslice.StrSlice // Entrypoint to run when starting the container - NetworkDisabled bool `json:",omitempty"` // Is network disabled - MacAddress string `json:",omitempty"` // Mac Address of the container - OnBuild []string // ONBUILD metadata that were defined on the image Dockerfile - Labels map[string]string // List of labels set to this container - StopSignal string `json:",omitempty"` // Signal to stop a container - StopTimeout *int `json:",omitempty"` // Timeout (in seconds) to stop a container - Shell strslice.StrSlice `json:",omitempty"` // Shell for shell-form of RUN, CMD, ENTRYPOINT -} diff --git a/src/vendor/github.com/docker/docker/api/types/container/container_changes.go b/src/vendor/github.com/docker/docker/api/types/container/container_changes.go deleted file mode 100644 index 16dd5019e..000000000 --- a/src/vendor/github.com/docker/docker/api/types/container/container_changes.go +++ /dev/null @@ -1,20 +0,0 @@ -package container // import "github.com/docker/docker/api/types/container" - -// ---------------------------------------------------------------------------- -// Code generated by `swagger generate operation`. DO NOT EDIT. -// -// See hack/generate-swagger-api.sh -// ---------------------------------------------------------------------------- - -// ContainerChangeResponseItem change item in response to ContainerChanges operation -// swagger:model ContainerChangeResponseItem -type ContainerChangeResponseItem struct { - - // Kind of change - // Required: true - Kind uint8 `json:"Kind"` - - // Path to file that has changed - // Required: true - Path string `json:"Path"` -} diff --git a/src/vendor/github.com/docker/docker/api/types/container/container_create.go b/src/vendor/github.com/docker/docker/api/types/container/container_create.go deleted file mode 100644 index d0c852f84..000000000 --- a/src/vendor/github.com/docker/docker/api/types/container/container_create.go +++ /dev/null @@ -1,20 +0,0 @@ -package container // import "github.com/docker/docker/api/types/container" - -// ---------------------------------------------------------------------------- -// Code generated by `swagger generate operation`. DO NOT EDIT. -// -// See hack/generate-swagger-api.sh -// ---------------------------------------------------------------------------- - -// ContainerCreateCreatedBody OK response to ContainerCreate operation -// swagger:model ContainerCreateCreatedBody -type ContainerCreateCreatedBody struct { - - // The ID of the created container - // Required: true - ID string `json:"Id"` - - // Warnings encountered when creating the container - // Required: true - Warnings []string `json:"Warnings"` -} diff --git a/src/vendor/github.com/docker/docker/api/types/container/container_top.go b/src/vendor/github.com/docker/docker/api/types/container/container_top.go deleted file mode 100644 index 63381da36..000000000 --- a/src/vendor/github.com/docker/docker/api/types/container/container_top.go +++ /dev/null @@ -1,22 +0,0 @@ -package container // import "github.com/docker/docker/api/types/container" - -// ---------------------------------------------------------------------------- -// Code generated by `swagger generate operation`. DO NOT EDIT. -// -// See hack/generate-swagger-api.sh -// ---------------------------------------------------------------------------- - -// ContainerTopOKBody OK response to ContainerTop operation -// swagger:model ContainerTopOKBody -type ContainerTopOKBody struct { - - // Each process running in the container, where each is process - // is an array of values corresponding to the titles. - // - // Required: true - Processes [][]string `json:"Processes"` - - // The ps column titles - // Required: true - Titles []string `json:"Titles"` -} diff --git a/src/vendor/github.com/docker/docker/api/types/container/container_update.go b/src/vendor/github.com/docker/docker/api/types/container/container_update.go deleted file mode 100644 index c10f175ea..000000000 --- a/src/vendor/github.com/docker/docker/api/types/container/container_update.go +++ /dev/null @@ -1,16 +0,0 @@ -package container // import "github.com/docker/docker/api/types/container" - -// ---------------------------------------------------------------------------- -// Code generated by `swagger generate operation`. DO NOT EDIT. -// -// See hack/generate-swagger-api.sh -// ---------------------------------------------------------------------------- - -// ContainerUpdateOKBody OK response to ContainerUpdate operation -// swagger:model ContainerUpdateOKBody -type ContainerUpdateOKBody struct { - - // warnings - // Required: true - Warnings []string `json:"Warnings"` -} diff --git a/src/vendor/github.com/docker/docker/api/types/container/container_wait.go b/src/vendor/github.com/docker/docker/api/types/container/container_wait.go deleted file mode 100644 index 49e05ae66..000000000 --- a/src/vendor/github.com/docker/docker/api/types/container/container_wait.go +++ /dev/null @@ -1,28 +0,0 @@ -package container // import "github.com/docker/docker/api/types/container" - -// ---------------------------------------------------------------------------- -// Code generated by `swagger generate operation`. DO NOT EDIT. -// -// See hack/generate-swagger-api.sh -// ---------------------------------------------------------------------------- - -// ContainerWaitOKBodyError container waiting error, if any -// swagger:model ContainerWaitOKBodyError -type ContainerWaitOKBodyError struct { - - // Details of an error - Message string `json:"Message,omitempty"` -} - -// ContainerWaitOKBody OK response to ContainerWait operation -// swagger:model ContainerWaitOKBody -type ContainerWaitOKBody struct { - - // error - // Required: true - Error *ContainerWaitOKBodyError `json:"Error"` - - // Exit code of the container - // Required: true - StatusCode int64 `json:"StatusCode"` -} diff --git a/src/vendor/github.com/docker/docker/api/types/container/host_config.go b/src/vendor/github.com/docker/docker/api/types/container/host_config.go deleted file mode 100644 index 2d1cbaa9a..000000000 --- a/src/vendor/github.com/docker/docker/api/types/container/host_config.go +++ /dev/null @@ -1,447 +0,0 @@ -package container // import "github.com/docker/docker/api/types/container" - -import ( - "strings" - - "github.com/docker/docker/api/types/blkiodev" - "github.com/docker/docker/api/types/mount" - "github.com/docker/docker/api/types/strslice" - "github.com/docker/go-connections/nat" - units "github.com/docker/go-units" -) - -// CgroupnsMode represents the cgroup namespace mode of the container -type CgroupnsMode string - -// IsPrivate indicates whether the container uses its own private cgroup namespace -func (c CgroupnsMode) IsPrivate() bool { - return c == "private" -} - -// IsHost indicates whether the container shares the host's cgroup namespace -func (c CgroupnsMode) IsHost() bool { - return c == "host" -} - -// IsEmpty indicates whether the container cgroup namespace mode is unset -func (c CgroupnsMode) IsEmpty() bool { - return c == "" -} - -// Valid indicates whether the cgroup namespace mode is valid -func (c CgroupnsMode) Valid() bool { - return c.IsEmpty() || c.IsPrivate() || c.IsHost() -} - -// Isolation represents the isolation technology of a container. The supported -// values are platform specific -type Isolation string - -// IsDefault indicates the default isolation technology of a container. On Linux this -// is the native driver. On Windows, this is a Windows Server Container. -func (i Isolation) IsDefault() bool { - return strings.ToLower(string(i)) == "default" || string(i) == "" -} - -// IsHyperV indicates the use of a Hyper-V partition for isolation -func (i Isolation) IsHyperV() bool { - return strings.ToLower(string(i)) == "hyperv" -} - -// IsProcess indicates the use of process isolation -func (i Isolation) IsProcess() bool { - return strings.ToLower(string(i)) == "process" -} - -const ( - // IsolationEmpty is unspecified (same behavior as default) - IsolationEmpty = Isolation("") - // IsolationDefault is the default isolation mode on current daemon - IsolationDefault = Isolation("default") - // IsolationProcess is process isolation mode - IsolationProcess = Isolation("process") - // IsolationHyperV is HyperV isolation mode - IsolationHyperV = Isolation("hyperv") -) - -// IpcMode represents the container ipc stack. -type IpcMode string - -// IsPrivate indicates whether the container uses its own private ipc namespace which can not be shared. -func (n IpcMode) IsPrivate() bool { - return n == "private" -} - -// IsHost indicates whether the container shares the host's ipc namespace. -func (n IpcMode) IsHost() bool { - return n == "host" -} - -// IsShareable indicates whether the container's ipc namespace can be shared with another container. -func (n IpcMode) IsShareable() bool { - return n == "shareable" -} - -// IsContainer indicates whether the container uses another container's ipc namespace. -func (n IpcMode) IsContainer() bool { - parts := strings.SplitN(string(n), ":", 2) - return len(parts) > 1 && parts[0] == "container" -} - -// IsNone indicates whether container IpcMode is set to "none". -func (n IpcMode) IsNone() bool { - return n == "none" -} - -// IsEmpty indicates whether container IpcMode is empty -func (n IpcMode) IsEmpty() bool { - return n == "" -} - -// Valid indicates whether the ipc mode is valid. -func (n IpcMode) Valid() bool { - return n.IsEmpty() || n.IsNone() || n.IsPrivate() || n.IsHost() || n.IsShareable() || n.IsContainer() -} - -// Container returns the name of the container ipc stack is going to be used. -func (n IpcMode) Container() string { - parts := strings.SplitN(string(n), ":", 2) - if len(parts) > 1 && parts[0] == "container" { - return parts[1] - } - return "" -} - -// NetworkMode represents the container network stack. -type NetworkMode string - -// IsNone indicates whether container isn't using a network stack. -func (n NetworkMode) IsNone() bool { - return n == "none" -} - -// IsDefault indicates whether container uses the default network stack. -func (n NetworkMode) IsDefault() bool { - return n == "default" -} - -// IsPrivate indicates whether container uses its private network stack. -func (n NetworkMode) IsPrivate() bool { - return !(n.IsHost() || n.IsContainer()) -} - -// IsContainer indicates whether container uses a container network stack. -func (n NetworkMode) IsContainer() bool { - parts := strings.SplitN(string(n), ":", 2) - return len(parts) > 1 && parts[0] == "container" -} - -// ConnectedContainer is the id of the container which network this container is connected to. -func (n NetworkMode) ConnectedContainer() string { - parts := strings.SplitN(string(n), ":", 2) - if len(parts) > 1 { - return parts[1] - } - return "" -} - -// UserDefined indicates user-created network -func (n NetworkMode) UserDefined() string { - if n.IsUserDefined() { - return string(n) - } - return "" -} - -// UsernsMode represents userns mode in the container. -type UsernsMode string - -// IsHost indicates whether the container uses the host's userns. -func (n UsernsMode) IsHost() bool { - return n == "host" -} - -// IsPrivate indicates whether the container uses the a private userns. -func (n UsernsMode) IsPrivate() bool { - return !(n.IsHost()) -} - -// Valid indicates whether the userns is valid. -func (n UsernsMode) Valid() bool { - parts := strings.Split(string(n), ":") - switch mode := parts[0]; mode { - case "", "host": - default: - return false - } - return true -} - -// CgroupSpec represents the cgroup to use for the container. -type CgroupSpec string - -// IsContainer indicates whether the container is using another container cgroup -func (c CgroupSpec) IsContainer() bool { - parts := strings.SplitN(string(c), ":", 2) - return len(parts) > 1 && parts[0] == "container" -} - -// Valid indicates whether the cgroup spec is valid. -func (c CgroupSpec) Valid() bool { - return c.IsContainer() || c == "" -} - -// Container returns the name of the container whose cgroup will be used. -func (c CgroupSpec) Container() string { - parts := strings.SplitN(string(c), ":", 2) - if len(parts) > 1 { - return parts[1] - } - return "" -} - -// UTSMode represents the UTS namespace of the container. -type UTSMode string - -// IsPrivate indicates whether the container uses its private UTS namespace. -func (n UTSMode) IsPrivate() bool { - return !(n.IsHost()) -} - -// IsHost indicates whether the container uses the host's UTS namespace. -func (n UTSMode) IsHost() bool { - return n == "host" -} - -// Valid indicates whether the UTS namespace is valid. -func (n UTSMode) Valid() bool { - parts := strings.Split(string(n), ":") - switch mode := parts[0]; mode { - case "", "host": - default: - return false - } - return true -} - -// PidMode represents the pid namespace of the container. -type PidMode string - -// IsPrivate indicates whether the container uses its own new pid namespace. -func (n PidMode) IsPrivate() bool { - return !(n.IsHost() || n.IsContainer()) -} - -// IsHost indicates whether the container uses the host's pid namespace. -func (n PidMode) IsHost() bool { - return n == "host" -} - -// IsContainer indicates whether the container uses a container's pid namespace. -func (n PidMode) IsContainer() bool { - parts := strings.SplitN(string(n), ":", 2) - return len(parts) > 1 && parts[0] == "container" -} - -// Valid indicates whether the pid namespace is valid. -func (n PidMode) Valid() bool { - parts := strings.Split(string(n), ":") - switch mode := parts[0]; mode { - case "", "host": - case "container": - if len(parts) != 2 || parts[1] == "" { - return false - } - default: - return false - } - return true -} - -// Container returns the name of the container whose pid namespace is going to be used. -func (n PidMode) Container() string { - parts := strings.SplitN(string(n), ":", 2) - if len(parts) > 1 { - return parts[1] - } - return "" -} - -// DeviceRequest represents a request for devices from a device driver. -// Used by GPU device drivers. -type DeviceRequest struct { - Driver string // Name of device driver - Count int // Number of devices to request (-1 = All) - DeviceIDs []string // List of device IDs as recognizable by the device driver - Capabilities [][]string // An OR list of AND lists of device capabilities (e.g. "gpu") - Options map[string]string // Options to pass onto the device driver -} - -// DeviceMapping represents the device mapping between the host and the container. -type DeviceMapping struct { - PathOnHost string - PathInContainer string - CgroupPermissions string -} - -// RestartPolicy represents the restart policies of the container. -type RestartPolicy struct { - Name string - MaximumRetryCount int -} - -// IsNone indicates whether the container has the "no" restart policy. -// This means the container will not automatically restart when exiting. -func (rp *RestartPolicy) IsNone() bool { - return rp.Name == "no" || rp.Name == "" -} - -// IsAlways indicates whether the container has the "always" restart policy. -// This means the container will automatically restart regardless of the exit status. -func (rp *RestartPolicy) IsAlways() bool { - return rp.Name == "always" -} - -// IsOnFailure indicates whether the container has the "on-failure" restart policy. -// This means the container will automatically restart of exiting with a non-zero exit status. -func (rp *RestartPolicy) IsOnFailure() bool { - return rp.Name == "on-failure" -} - -// IsUnlessStopped indicates whether the container has the -// "unless-stopped" restart policy. This means the container will -// automatically restart unless user has put it to stopped state. -func (rp *RestartPolicy) IsUnlessStopped() bool { - return rp.Name == "unless-stopped" -} - -// IsSame compares two RestartPolicy to see if they are the same -func (rp *RestartPolicy) IsSame(tp *RestartPolicy) bool { - return rp.Name == tp.Name && rp.MaximumRetryCount == tp.MaximumRetryCount -} - -// LogMode is a type to define the available modes for logging -// These modes affect how logs are handled when log messages start piling up. -type LogMode string - -// Available logging modes -const ( - LogModeUnset = "" - LogModeBlocking LogMode = "blocking" - LogModeNonBlock LogMode = "non-blocking" -) - -// LogConfig represents the logging configuration of the container. -type LogConfig struct { - Type string - Config map[string]string -} - -// Resources contains container's resources (cgroups config, ulimits...) -type Resources struct { - // Applicable to all platforms - CPUShares int64 `json:"CpuShares"` // CPU shares (relative weight vs. other containers) - Memory int64 // Memory limit (in bytes) - NanoCPUs int64 `json:"NanoCpus"` // CPU quota in units of 10-9 CPUs. - - // Applicable to UNIX platforms - CgroupParent string // Parent cgroup. - BlkioWeight uint16 // Block IO weight (relative weight vs. other containers) - BlkioWeightDevice []*blkiodev.WeightDevice - BlkioDeviceReadBps []*blkiodev.ThrottleDevice - BlkioDeviceWriteBps []*blkiodev.ThrottleDevice - BlkioDeviceReadIOps []*blkiodev.ThrottleDevice - BlkioDeviceWriteIOps []*blkiodev.ThrottleDevice - CPUPeriod int64 `json:"CpuPeriod"` // CPU CFS (Completely Fair Scheduler) period - CPUQuota int64 `json:"CpuQuota"` // CPU CFS (Completely Fair Scheduler) quota - CPURealtimePeriod int64 `json:"CpuRealtimePeriod"` // CPU real-time period - CPURealtimeRuntime int64 `json:"CpuRealtimeRuntime"` // CPU real-time runtime - CpusetCpus string // CpusetCpus 0-2, 0,1 - CpusetMems string // CpusetMems 0-2, 0,1 - Devices []DeviceMapping // List of devices to map inside the container - DeviceCgroupRules []string // List of rule to be added to the device cgroup - DeviceRequests []DeviceRequest // List of device requests for device drivers - KernelMemory int64 // Kernel memory limit (in bytes), Deprecated: kernel 5.4 deprecated kmem.limit_in_bytes - KernelMemoryTCP int64 // Hard limit for kernel TCP buffer memory (in bytes) - MemoryReservation int64 // Memory soft limit (in bytes) - MemorySwap int64 // Total memory usage (memory + swap); set `-1` to enable unlimited swap - MemorySwappiness *int64 // Tuning container memory swappiness behaviour - OomKillDisable *bool // Whether to disable OOM Killer or not - PidsLimit *int64 // Setting PIDs limit for a container; Set `0` or `-1` for unlimited, or `null` to not change. - Ulimits []*units.Ulimit // List of ulimits to be set in the container - - // Applicable to Windows - CPUCount int64 `json:"CpuCount"` // CPU count - CPUPercent int64 `json:"CpuPercent"` // CPU percent - IOMaximumIOps uint64 // Maximum IOps for the container system drive - IOMaximumBandwidth uint64 // Maximum IO in bytes per second for the container system drive -} - -// UpdateConfig holds the mutable attributes of a Container. -// Those attributes can be updated at runtime. -type UpdateConfig struct { - // Contains container's resources (cgroups, ulimits) - Resources - RestartPolicy RestartPolicy -} - -// HostConfig the non-portable Config structure of a container. -// Here, "non-portable" means "dependent of the host we are running on". -// Portable information *should* appear in Config. -type HostConfig struct { - // Applicable to all platforms - Binds []string // List of volume bindings for this container - ContainerIDFile string // File (path) where the containerId is written - LogConfig LogConfig // Configuration of the logs for this container - NetworkMode NetworkMode // Network mode to use for the container - PortBindings nat.PortMap // Port mapping between the exposed port (container) and the host - RestartPolicy RestartPolicy // Restart policy to be used for the container - AutoRemove bool // Automatically remove container when it exits - VolumeDriver string // Name of the volume driver used to mount volumes - VolumesFrom []string // List of volumes to take from other container - - // Applicable to UNIX platforms - CapAdd strslice.StrSlice // List of kernel capabilities to add to the container - CapDrop strslice.StrSlice // List of kernel capabilities to remove from the container - CgroupnsMode CgroupnsMode // Cgroup namespace mode to use for the container - DNS []string `json:"Dns"` // List of DNS server to lookup - DNSOptions []string `json:"DnsOptions"` // List of DNSOption to look for - DNSSearch []string `json:"DnsSearch"` // List of DNSSearch to look for - ExtraHosts []string // List of extra hosts - GroupAdd []string // List of additional groups that the container process will run as - IpcMode IpcMode // IPC namespace to use for the container - Cgroup CgroupSpec // Cgroup to use for the container - Links []string // List of links (in the name:alias form) - OomScoreAdj int // Container preference for OOM-killing - PidMode PidMode // PID namespace to use for the container - Privileged bool // Is the container in privileged mode - PublishAllPorts bool // Should docker publish all exposed port for the container - ReadonlyRootfs bool // Is the container root filesystem in read-only - SecurityOpt []string // List of string values to customize labels for MLS systems, such as SELinux. - StorageOpt map[string]string `json:",omitempty"` // Storage driver options per container. - Tmpfs map[string]string `json:",omitempty"` // List of tmpfs (mounts) used for the container - UTSMode UTSMode // UTS namespace to use for the container - UsernsMode UsernsMode // The user namespace to use for the container - ShmSize int64 // Total shm memory usage - Sysctls map[string]string `json:",omitempty"` // List of Namespaced sysctls used for the container - Runtime string `json:",omitempty"` // Runtime to use with this container - - // Applicable to Windows - ConsoleSize [2]uint // Initial console size (height,width) - Isolation Isolation // Isolation technology of the container (e.g. default, hyperv) - - // Contains container's resources (cgroups, ulimits) - Resources - - // Mounts specs used by the container - Mounts []mount.Mount `json:",omitempty"` - - // MaskedPaths is the list of paths to be masked inside the container (this overrides the default set of paths) - MaskedPaths []string - - // ReadonlyPaths is the list of paths to be set as read-only inside the container (this overrides the default set of paths) - ReadonlyPaths []string - - // Run a custom init inside the container, if null, use the daemon's configured settings - Init *bool `json:",omitempty"` -} diff --git a/src/vendor/github.com/docker/docker/api/types/container/hostconfig_unix.go b/src/vendor/github.com/docker/docker/api/types/container/hostconfig_unix.go deleted file mode 100644 index 24c4fa8d9..000000000 --- a/src/vendor/github.com/docker/docker/api/types/container/hostconfig_unix.go +++ /dev/null @@ -1,42 +0,0 @@ -//go:build !windows -// +build !windows - -package container // import "github.com/docker/docker/api/types/container" - -// IsValid indicates if an isolation technology is valid -func (i Isolation) IsValid() bool { - return i.IsDefault() -} - -// NetworkName returns the name of the network stack. -func (n NetworkMode) NetworkName() string { - if n.IsBridge() { - return "bridge" - } else if n.IsHost() { - return "host" - } else if n.IsContainer() { - return "container" - } else if n.IsNone() { - return "none" - } else if n.IsDefault() { - return "default" - } else if n.IsUserDefined() { - return n.UserDefined() - } - return "" -} - -// IsBridge indicates whether container uses the bridge network stack -func (n NetworkMode) IsBridge() bool { - return n == "bridge" -} - -// IsHost indicates whether container uses the host network stack. -func (n NetworkMode) IsHost() bool { - return n == "host" -} - -// IsUserDefined indicates user-created network -func (n NetworkMode) IsUserDefined() bool { - return !n.IsDefault() && !n.IsBridge() && !n.IsHost() && !n.IsNone() && !n.IsContainer() -} diff --git a/src/vendor/github.com/docker/docker/api/types/container/hostconfig_windows.go b/src/vendor/github.com/docker/docker/api/types/container/hostconfig_windows.go deleted file mode 100644 index 99f803a5b..000000000 --- a/src/vendor/github.com/docker/docker/api/types/container/hostconfig_windows.go +++ /dev/null @@ -1,40 +0,0 @@ -package container // import "github.com/docker/docker/api/types/container" - -// IsBridge indicates whether container uses the bridge network stack -// in windows it is given the name NAT -func (n NetworkMode) IsBridge() bool { - return n == "nat" -} - -// IsHost indicates whether container uses the host network stack. -// returns false as this is not supported by windows -func (n NetworkMode) IsHost() bool { - return false -} - -// IsUserDefined indicates user-created network -func (n NetworkMode) IsUserDefined() bool { - return !n.IsDefault() && !n.IsNone() && !n.IsBridge() && !n.IsContainer() -} - -// IsValid indicates if an isolation technology is valid -func (i Isolation) IsValid() bool { - return i.IsDefault() || i.IsHyperV() || i.IsProcess() -} - -// NetworkName returns the name of the network stack. -func (n NetworkMode) NetworkName() string { - if n.IsDefault() { - return "default" - } else if n.IsBridge() { - return "nat" - } else if n.IsNone() { - return "none" - } else if n.IsContainer() { - return "container" - } else if n.IsUserDefined() { - return n.UserDefined() - } - - return "" -} diff --git a/src/vendor/github.com/docker/docker/api/types/container/waitcondition.go b/src/vendor/github.com/docker/docker/api/types/container/waitcondition.go deleted file mode 100644 index cd8311f99..000000000 --- a/src/vendor/github.com/docker/docker/api/types/container/waitcondition.go +++ /dev/null @@ -1,22 +0,0 @@ -package container // import "github.com/docker/docker/api/types/container" - -// WaitCondition is a type used to specify a container state for which -// to wait. -type WaitCondition string - -// Possible WaitCondition Values. -// -// WaitConditionNotRunning (default) is used to wait for any of the non-running -// states: "created", "exited", "dead", "removing", or "removed". -// -// WaitConditionNextExit is used to wait for the next time the state changes -// to a non-running state. If the state is currently "created" or "exited", -// this would cause Wait() to block until either the container runs and exits -// or is removed. -// -// WaitConditionRemoved is used to wait for the container to be removed. -const ( - WaitConditionNotRunning WaitCondition = "not-running" - WaitConditionNextExit WaitCondition = "next-exit" - WaitConditionRemoved WaitCondition = "removed" -) diff --git a/src/vendor/github.com/docker/docker/api/types/error_response.go b/src/vendor/github.com/docker/docker/api/types/error_response.go deleted file mode 100644 index dc942d9d9..000000000 --- a/src/vendor/github.com/docker/docker/api/types/error_response.go +++ /dev/null @@ -1,13 +0,0 @@ -package types - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -// ErrorResponse Represents an error. -// swagger:model ErrorResponse -type ErrorResponse struct { - - // The error message. - // Required: true - Message string `json:"message"` -} diff --git a/src/vendor/github.com/docker/docker/api/types/error_response_ext.go b/src/vendor/github.com/docker/docker/api/types/error_response_ext.go deleted file mode 100644 index f84f034cd..000000000 --- a/src/vendor/github.com/docker/docker/api/types/error_response_ext.go +++ /dev/null @@ -1,6 +0,0 @@ -package types - -// Error returns the error message -func (e ErrorResponse) Error() string { - return e.Message -} diff --git a/src/vendor/github.com/docker/docker/api/types/filters/parse.go b/src/vendor/github.com/docker/docker/api/types/filters/parse.go deleted file mode 100644 index 4bc91cffd..000000000 --- a/src/vendor/github.com/docker/docker/api/types/filters/parse.go +++ /dev/null @@ -1,324 +0,0 @@ -/*Package filters provides tools for encoding a mapping of keys to a set of -multiple values. -*/ -package filters // import "github.com/docker/docker/api/types/filters" - -import ( - "encoding/json" - "regexp" - "strings" - - "github.com/docker/docker/api/types/versions" -) - -// Args stores a mapping of keys to a set of multiple values. -type Args struct { - fields map[string]map[string]bool -} - -// KeyValuePair are used to initialize a new Args -type KeyValuePair struct { - Key string - Value string -} - -// Arg creates a new KeyValuePair for initializing Args -func Arg(key, value string) KeyValuePair { - return KeyValuePair{Key: key, Value: value} -} - -// NewArgs returns a new Args populated with the initial args -func NewArgs(initialArgs ...KeyValuePair) Args { - args := Args{fields: map[string]map[string]bool{}} - for _, arg := range initialArgs { - args.Add(arg.Key, arg.Value) - } - return args -} - -// Keys returns all the keys in list of Args -func (args Args) Keys() []string { - keys := make([]string, 0, len(args.fields)) - for k := range args.fields { - keys = append(keys, k) - } - return keys -} - -// MarshalJSON returns a JSON byte representation of the Args -func (args Args) MarshalJSON() ([]byte, error) { - if len(args.fields) == 0 { - return []byte{}, nil - } - return json.Marshal(args.fields) -} - -// ToJSON returns the Args as a JSON encoded string -func ToJSON(a Args) (string, error) { - if a.Len() == 0 { - return "", nil - } - buf, err := json.Marshal(a) - return string(buf), err -} - -// ToParamWithVersion encodes Args as a JSON string. If version is less than 1.22 -// then the encoded format will use an older legacy format where the values are a -// list of strings, instead of a set. -// -// Deprecated: do not use in any new code; use ToJSON instead -func ToParamWithVersion(version string, a Args) (string, error) { - if a.Len() == 0 { - return "", nil - } - - if version != "" && versions.LessThan(version, "1.22") { - buf, err := json.Marshal(convertArgsToSlice(a.fields)) - return string(buf), err - } - - return ToJSON(a) -} - -// FromJSON decodes a JSON encoded string into Args -func FromJSON(p string) (Args, error) { - args := NewArgs() - - if p == "" { - return args, nil - } - - raw := []byte(p) - err := json.Unmarshal(raw, &args) - if err == nil { - return args, nil - } - - // Fallback to parsing arguments in the legacy slice format - deprecated := map[string][]string{} - if legacyErr := json.Unmarshal(raw, &deprecated); legacyErr != nil { - return args, err - } - - args.fields = deprecatedArgs(deprecated) - return args, nil -} - -// UnmarshalJSON populates the Args from JSON encode bytes -func (args Args) UnmarshalJSON(raw []byte) error { - if len(raw) == 0 { - return nil - } - return json.Unmarshal(raw, &args.fields) -} - -// Get returns the list of values associated with the key -func (args Args) Get(key string) []string { - values := args.fields[key] - if values == nil { - return make([]string, 0) - } - slice := make([]string, 0, len(values)) - for key := range values { - slice = append(slice, key) - } - return slice -} - -// Add a new value to the set of values -func (args Args) Add(key, value string) { - if _, ok := args.fields[key]; ok { - args.fields[key][value] = true - } else { - args.fields[key] = map[string]bool{value: true} - } -} - -// Del removes a value from the set -func (args Args) Del(key, value string) { - if _, ok := args.fields[key]; ok { - delete(args.fields[key], value) - if len(args.fields[key]) == 0 { - delete(args.fields, key) - } - } -} - -// Len returns the number of keys in the mapping -func (args Args) Len() int { - return len(args.fields) -} - -// MatchKVList returns true if all the pairs in sources exist as key=value -// pairs in the mapping at key, or if there are no values at key. -func (args Args) MatchKVList(key string, sources map[string]string) bool { - fieldValues := args.fields[key] - - // do not filter if there is no filter set or cannot determine filter - if len(fieldValues) == 0 { - return true - } - - if len(sources) == 0 { - return false - } - - for value := range fieldValues { - testKV := strings.SplitN(value, "=", 2) - - v, ok := sources[testKV[0]] - if !ok { - return false - } - if len(testKV) == 2 && testKV[1] != v { - return false - } - } - - return true -} - -// Match returns true if any of the values at key match the source string -func (args Args) Match(field, source string) bool { - if args.ExactMatch(field, source) { - return true - } - - fieldValues := args.fields[field] - for name2match := range fieldValues { - match, err := regexp.MatchString(name2match, source) - if err != nil { - continue - } - if match { - return true - } - } - return false -} - -// ExactMatch returns true if the source matches exactly one of the values. -func (args Args) ExactMatch(key, source string) bool { - fieldValues, ok := args.fields[key] - // do not filter if there is no filter set or cannot determine filter - if !ok || len(fieldValues) == 0 { - return true - } - - // try to match full name value to avoid O(N) regular expression matching - return fieldValues[source] -} - -// UniqueExactMatch returns true if there is only one value and the source -// matches exactly the value. -func (args Args) UniqueExactMatch(key, source string) bool { - fieldValues := args.fields[key] - // do not filter if there is no filter set or cannot determine filter - if len(fieldValues) == 0 { - return true - } - if len(args.fields[key]) != 1 { - return false - } - - // try to match full name value to avoid O(N) regular expression matching - return fieldValues[source] -} - -// FuzzyMatch returns true if the source matches exactly one value, or the -// source has one of the values as a prefix. -func (args Args) FuzzyMatch(key, source string) bool { - if args.ExactMatch(key, source) { - return true - } - - fieldValues := args.fields[key] - for prefix := range fieldValues { - if strings.HasPrefix(source, prefix) { - return true - } - } - return false -} - -// Contains returns true if the key exists in the mapping -func (args Args) Contains(field string) bool { - _, ok := args.fields[field] - return ok -} - -type invalidFilter string - -func (e invalidFilter) Error() string { - return "Invalid filter '" + string(e) + "'" -} - -func (invalidFilter) InvalidParameter() {} - -// Validate compared the set of accepted keys against the keys in the mapping. -// An error is returned if any mapping keys are not in the accepted set. -func (args Args) Validate(accepted map[string]bool) error { - for name := range args.fields { - if !accepted[name] { - return invalidFilter(name) - } - } - return nil -} - -// WalkValues iterates over the list of values for a key in the mapping and calls -// op() for each value. If op returns an error the iteration stops and the -// error is returned. -func (args Args) WalkValues(field string, op func(value string) error) error { - if _, ok := args.fields[field]; !ok { - return nil - } - for v := range args.fields[field] { - if err := op(v); err != nil { - return err - } - } - return nil -} - -// Clone returns a copy of args. -func (args Args) Clone() (newArgs Args) { - newArgs.fields = make(map[string]map[string]bool, len(args.fields)) - for k, m := range args.fields { - var mm map[string]bool - if m != nil { - mm = make(map[string]bool, len(m)) - for kk, v := range m { - mm[kk] = v - } - } - newArgs.fields[k] = mm - } - return newArgs -} - -func deprecatedArgs(d map[string][]string) map[string]map[string]bool { - m := map[string]map[string]bool{} - for k, v := range d { - values := map[string]bool{} - for _, vv := range v { - values[vv] = true - } - m[k] = values - } - return m -} - -func convertArgsToSlice(f map[string]map[string]bool) map[string][]string { - m := map[string][]string{} - for k, v := range f { - values := []string{} - for kk := range v { - if v[kk] { - values = append(values, kk) - } - } - m[k] = values - } - return m -} diff --git a/src/vendor/github.com/docker/docker/api/types/graph_driver_data.go b/src/vendor/github.com/docker/docker/api/types/graph_driver_data.go deleted file mode 100644 index 4d9bf1c62..000000000 --- a/src/vendor/github.com/docker/docker/api/types/graph_driver_data.go +++ /dev/null @@ -1,17 +0,0 @@ -package types - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -// GraphDriverData Information about a container's graph driver. -// swagger:model GraphDriverData -type GraphDriverData struct { - - // data - // Required: true - Data map[string]string `json:"Data"` - - // name - // Required: true - Name string `json:"Name"` -} diff --git a/src/vendor/github.com/docker/docker/api/types/id_response.go b/src/vendor/github.com/docker/docker/api/types/id_response.go deleted file mode 100644 index 7592d2f8b..000000000 --- a/src/vendor/github.com/docker/docker/api/types/id_response.go +++ /dev/null @@ -1,13 +0,0 @@ -package types - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -// IDResponse Response to an API call that returns just an Id -// swagger:model IdResponse -type IDResponse struct { - - // The id of the newly created object. - // Required: true - ID string `json:"Id"` -} diff --git a/src/vendor/github.com/docker/docker/api/types/image_delete_response_item.go b/src/vendor/github.com/docker/docker/api/types/image_delete_response_item.go deleted file mode 100644 index b9a65a0d8..000000000 --- a/src/vendor/github.com/docker/docker/api/types/image_delete_response_item.go +++ /dev/null @@ -1,15 +0,0 @@ -package types - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -// ImageDeleteResponseItem image delete response item -// swagger:model ImageDeleteResponseItem -type ImageDeleteResponseItem struct { - - // The image ID of an image that was deleted - Deleted string `json:"Deleted,omitempty"` - - // The image ID of an image that was untagged - Untagged string `json:"Untagged,omitempty"` -} diff --git a/src/vendor/github.com/docker/docker/api/types/image_summary.go b/src/vendor/github.com/docker/docker/api/types/image_summary.go deleted file mode 100644 index e145b3dcf..000000000 --- a/src/vendor/github.com/docker/docker/api/types/image_summary.go +++ /dev/null @@ -1,49 +0,0 @@ -package types - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -// ImageSummary image summary -// swagger:model ImageSummary -type ImageSummary struct { - - // containers - // Required: true - Containers int64 `json:"Containers"` - - // created - // Required: true - Created int64 `json:"Created"` - - // Id - // Required: true - ID string `json:"Id"` - - // labels - // Required: true - Labels map[string]string `json:"Labels"` - - // parent Id - // Required: true - ParentID string `json:"ParentId"` - - // repo digests - // Required: true - RepoDigests []string `json:"RepoDigests"` - - // repo tags - // Required: true - RepoTags []string `json:"RepoTags"` - - // shared size - // Required: true - SharedSize int64 `json:"SharedSize"` - - // size - // Required: true - Size int64 `json:"Size"` - - // virtual size - // Required: true - VirtualSize int64 `json:"VirtualSize"` -} diff --git a/src/vendor/github.com/docker/docker/api/types/mount/mount.go b/src/vendor/github.com/docker/docker/api/types/mount/mount.go deleted file mode 100644 index 443b8d07a..000000000 --- a/src/vendor/github.com/docker/docker/api/types/mount/mount.go +++ /dev/null @@ -1,131 +0,0 @@ -package mount // import "github.com/docker/docker/api/types/mount" - -import ( - "os" -) - -// Type represents the type of a mount. -type Type string - -// Type constants -const ( - // TypeBind is the type for mounting host dir - TypeBind Type = "bind" - // TypeVolume is the type for remote storage volumes - TypeVolume Type = "volume" - // TypeTmpfs is the type for mounting tmpfs - TypeTmpfs Type = "tmpfs" - // TypeNamedPipe is the type for mounting Windows named pipes - TypeNamedPipe Type = "npipe" -) - -// Mount represents a mount (volume). -type Mount struct { - Type Type `json:",omitempty"` - // Source specifies the name of the mount. Depending on mount type, this - // may be a volume name or a host path, or even ignored. - // Source is not supported for tmpfs (must be an empty value) - Source string `json:",omitempty"` - Target string `json:",omitempty"` - ReadOnly bool `json:",omitempty"` - Consistency Consistency `json:",omitempty"` - - BindOptions *BindOptions `json:",omitempty"` - VolumeOptions *VolumeOptions `json:",omitempty"` - TmpfsOptions *TmpfsOptions `json:",omitempty"` -} - -// Propagation represents the propagation of a mount. -type Propagation string - -const ( - // PropagationRPrivate RPRIVATE - PropagationRPrivate Propagation = "rprivate" - // PropagationPrivate PRIVATE - PropagationPrivate Propagation = "private" - // PropagationRShared RSHARED - PropagationRShared Propagation = "rshared" - // PropagationShared SHARED - PropagationShared Propagation = "shared" - // PropagationRSlave RSLAVE - PropagationRSlave Propagation = "rslave" - // PropagationSlave SLAVE - PropagationSlave Propagation = "slave" -) - -// Propagations is the list of all valid mount propagations -var Propagations = []Propagation{ - PropagationRPrivate, - PropagationPrivate, - PropagationRShared, - PropagationShared, - PropagationRSlave, - PropagationSlave, -} - -// Consistency represents the consistency requirements of a mount. -type Consistency string - -const ( - // ConsistencyFull guarantees bind mount-like consistency - ConsistencyFull Consistency = "consistent" - // ConsistencyCached mounts can cache read data and FS structure - ConsistencyCached Consistency = "cached" - // ConsistencyDelegated mounts can cache read and written data and structure - ConsistencyDelegated Consistency = "delegated" - // ConsistencyDefault provides "consistent" behavior unless overridden - ConsistencyDefault Consistency = "default" -) - -// BindOptions defines options specific to mounts of type "bind". -type BindOptions struct { - Propagation Propagation `json:",omitempty"` - NonRecursive bool `json:",omitempty"` -} - -// VolumeOptions represents the options for a mount of type volume. -type VolumeOptions struct { - NoCopy bool `json:",omitempty"` - Labels map[string]string `json:",omitempty"` - DriverConfig *Driver `json:",omitempty"` -} - -// Driver represents a volume driver. -type Driver struct { - Name string `json:",omitempty"` - Options map[string]string `json:",omitempty"` -} - -// TmpfsOptions defines options specific to mounts of type "tmpfs". -type TmpfsOptions struct { - // Size sets the size of the tmpfs, in bytes. - // - // This will be converted to an operating system specific value - // depending on the host. For example, on linux, it will be converted to - // use a 'k', 'm' or 'g' syntax. BSD, though not widely supported with - // docker, uses a straight byte value. - // - // Percentages are not supported. - SizeBytes int64 `json:",omitempty"` - // Mode of the tmpfs upon creation - Mode os.FileMode `json:",omitempty"` - - // TODO(stevvooe): There are several more tmpfs flags, specified in the - // daemon, that are accepted. Only the most basic are added for now. - // - // From https://github.com/moby/sys/blob/mount/v0.1.1/mount/flags.go#L47-L56 - // - // var validFlags = map[string]bool{ - // "": true, - // "size": true, X - // "mode": true, X - // "uid": true, - // "gid": true, - // "nr_inodes": true, - // "nr_blocks": true, - // "mpol": true, - // } - // - // Some of these may be straightforward to add, but others, such as - // uid/gid have implications in a clustered system. -} diff --git a/src/vendor/github.com/docker/docker/api/types/network/network.go b/src/vendor/github.com/docker/docker/api/types/network/network.go deleted file mode 100644 index 437b184c6..000000000 --- a/src/vendor/github.com/docker/docker/api/types/network/network.go +++ /dev/null @@ -1,126 +0,0 @@ -package network // import "github.com/docker/docker/api/types/network" -import ( - "github.com/docker/docker/api/types/filters" -) - -// Address represents an IP address -type Address struct { - Addr string - PrefixLen int -} - -// IPAM represents IP Address Management -type IPAM struct { - Driver string - Options map[string]string // Per network IPAM driver options - Config []IPAMConfig -} - -// IPAMConfig represents IPAM configurations -type IPAMConfig struct { - Subnet string `json:",omitempty"` - IPRange string `json:",omitempty"` - Gateway string `json:",omitempty"` - AuxAddress map[string]string `json:"AuxiliaryAddresses,omitempty"` -} - -// EndpointIPAMConfig represents IPAM configurations for the endpoint -type EndpointIPAMConfig struct { - IPv4Address string `json:",omitempty"` - IPv6Address string `json:",omitempty"` - LinkLocalIPs []string `json:",omitempty"` -} - -// Copy makes a copy of the endpoint ipam config -func (cfg *EndpointIPAMConfig) Copy() *EndpointIPAMConfig { - cfgCopy := *cfg - cfgCopy.LinkLocalIPs = make([]string, 0, len(cfg.LinkLocalIPs)) - cfgCopy.LinkLocalIPs = append(cfgCopy.LinkLocalIPs, cfg.LinkLocalIPs...) - return &cfgCopy -} - -// PeerInfo represents one peer of an overlay network -type PeerInfo struct { - Name string - IP string -} - -// EndpointSettings stores the network endpoint details -type EndpointSettings struct { - // Configurations - IPAMConfig *EndpointIPAMConfig - Links []string - Aliases []string - // Operational data - NetworkID string - EndpointID string - Gateway string - IPAddress string - IPPrefixLen int - IPv6Gateway string - GlobalIPv6Address string - GlobalIPv6PrefixLen int - MacAddress string - DriverOpts map[string]string -} - -// Task carries the information about one backend task -type Task struct { - Name string - EndpointID string - EndpointIP string - Info map[string]string -} - -// ServiceInfo represents service parameters with the list of service's tasks -type ServiceInfo struct { - VIP string - Ports []string - LocalLBIndex int - Tasks []Task -} - -// Copy makes a deep copy of `EndpointSettings` -func (es *EndpointSettings) Copy() *EndpointSettings { - epCopy := *es - if es.IPAMConfig != nil { - epCopy.IPAMConfig = es.IPAMConfig.Copy() - } - - if es.Links != nil { - links := make([]string, 0, len(es.Links)) - epCopy.Links = append(links, es.Links...) - } - - if es.Aliases != nil { - aliases := make([]string, 0, len(es.Aliases)) - epCopy.Aliases = append(aliases, es.Aliases...) - } - return &epCopy -} - -// NetworkingConfig represents the container's networking configuration for each of its interfaces -// Carries the networking configs specified in the `docker run` and `docker network connect` commands -type NetworkingConfig struct { - EndpointsConfig map[string]*EndpointSettings // Endpoint configs for each connecting network -} - -// ConfigReference specifies the source which provides a network's configuration -type ConfigReference struct { - Network string -} - -var acceptedFilters = map[string]bool{ - "dangling": true, - "driver": true, - "id": true, - "label": true, - "name": true, - "scope": true, - "type": true, -} - -// ValidateFilters validates the list of filter args with the available filters. -func ValidateFilters(filter filters.Args) error { - return filter.Validate(acceptedFilters) -} diff --git a/src/vendor/github.com/docker/docker/api/types/plugin.go b/src/vendor/github.com/docker/docker/api/types/plugin.go deleted file mode 100644 index abae48b9a..000000000 --- a/src/vendor/github.com/docker/docker/api/types/plugin.go +++ /dev/null @@ -1,203 +0,0 @@ -package types - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -// Plugin A plugin for the Engine API -// swagger:model Plugin -type Plugin struct { - - // config - // Required: true - Config PluginConfig `json:"Config"` - - // True if the plugin is running. False if the plugin is not running, only installed. - // Required: true - Enabled bool `json:"Enabled"` - - // Id - ID string `json:"Id,omitempty"` - - // name - // Required: true - Name string `json:"Name"` - - // plugin remote reference used to push/pull the plugin - PluginReference string `json:"PluginReference,omitempty"` - - // settings - // Required: true - Settings PluginSettings `json:"Settings"` -} - -// PluginConfig The config of a plugin. -// swagger:model PluginConfig -type PluginConfig struct { - - // args - // Required: true - Args PluginConfigArgs `json:"Args"` - - // description - // Required: true - Description string `json:"Description"` - - // Docker Version used to create the plugin - DockerVersion string `json:"DockerVersion,omitempty"` - - // documentation - // Required: true - Documentation string `json:"Documentation"` - - // entrypoint - // Required: true - Entrypoint []string `json:"Entrypoint"` - - // env - // Required: true - Env []PluginEnv `json:"Env"` - - // interface - // Required: true - Interface PluginConfigInterface `json:"Interface"` - - // ipc host - // Required: true - IpcHost bool `json:"IpcHost"` - - // linux - // Required: true - Linux PluginConfigLinux `json:"Linux"` - - // mounts - // Required: true - Mounts []PluginMount `json:"Mounts"` - - // network - // Required: true - Network PluginConfigNetwork `json:"Network"` - - // pid host - // Required: true - PidHost bool `json:"PidHost"` - - // propagated mount - // Required: true - PropagatedMount string `json:"PropagatedMount"` - - // user - User PluginConfigUser `json:"User,omitempty"` - - // work dir - // Required: true - WorkDir string `json:"WorkDir"` - - // rootfs - Rootfs *PluginConfigRootfs `json:"rootfs,omitempty"` -} - -// PluginConfigArgs plugin config args -// swagger:model PluginConfigArgs -type PluginConfigArgs struct { - - // description - // Required: true - Description string `json:"Description"` - - // name - // Required: true - Name string `json:"Name"` - - // settable - // Required: true - Settable []string `json:"Settable"` - - // value - // Required: true - Value []string `json:"Value"` -} - -// PluginConfigInterface The interface between Docker and the plugin -// swagger:model PluginConfigInterface -type PluginConfigInterface struct { - - // Protocol to use for clients connecting to the plugin. - ProtocolScheme string `json:"ProtocolScheme,omitempty"` - - // socket - // Required: true - Socket string `json:"Socket"` - - // types - // Required: true - Types []PluginInterfaceType `json:"Types"` -} - -// PluginConfigLinux plugin config linux -// swagger:model PluginConfigLinux -type PluginConfigLinux struct { - - // allow all devices - // Required: true - AllowAllDevices bool `json:"AllowAllDevices"` - - // capabilities - // Required: true - Capabilities []string `json:"Capabilities"` - - // devices - // Required: true - Devices []PluginDevice `json:"Devices"` -} - -// PluginConfigNetwork plugin config network -// swagger:model PluginConfigNetwork -type PluginConfigNetwork struct { - - // type - // Required: true - Type string `json:"Type"` -} - -// PluginConfigRootfs plugin config rootfs -// swagger:model PluginConfigRootfs -type PluginConfigRootfs struct { - - // diff ids - DiffIds []string `json:"diff_ids"` - - // type - Type string `json:"type,omitempty"` -} - -// PluginConfigUser plugin config user -// swagger:model PluginConfigUser -type PluginConfigUser struct { - - // g ID - GID uint32 `json:"GID,omitempty"` - - // UID - UID uint32 `json:"UID,omitempty"` -} - -// PluginSettings Settings that can be modified by users. -// swagger:model PluginSettings -type PluginSettings struct { - - // args - // Required: true - Args []string `json:"Args"` - - // devices - // Required: true - Devices []PluginDevice `json:"Devices"` - - // env - // Required: true - Env []string `json:"Env"` - - // mounts - // Required: true - Mounts []PluginMount `json:"Mounts"` -} diff --git a/src/vendor/github.com/docker/docker/api/types/plugin_device.go b/src/vendor/github.com/docker/docker/api/types/plugin_device.go deleted file mode 100644 index 569901067..000000000 --- a/src/vendor/github.com/docker/docker/api/types/plugin_device.go +++ /dev/null @@ -1,25 +0,0 @@ -package types - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -// PluginDevice plugin device -// swagger:model PluginDevice -type PluginDevice struct { - - // description - // Required: true - Description string `json:"Description"` - - // name - // Required: true - Name string `json:"Name"` - - // path - // Required: true - Path *string `json:"Path"` - - // settable - // Required: true - Settable []string `json:"Settable"` -} diff --git a/src/vendor/github.com/docker/docker/api/types/plugin_env.go b/src/vendor/github.com/docker/docker/api/types/plugin_env.go deleted file mode 100644 index 32962dc2e..000000000 --- a/src/vendor/github.com/docker/docker/api/types/plugin_env.go +++ /dev/null @@ -1,25 +0,0 @@ -package types - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -// PluginEnv plugin env -// swagger:model PluginEnv -type PluginEnv struct { - - // description - // Required: true - Description string `json:"Description"` - - // name - // Required: true - Name string `json:"Name"` - - // settable - // Required: true - Settable []string `json:"Settable"` - - // value - // Required: true - Value *string `json:"Value"` -} diff --git a/src/vendor/github.com/docker/docker/api/types/plugin_interface_type.go b/src/vendor/github.com/docker/docker/api/types/plugin_interface_type.go deleted file mode 100644 index c82f204e8..000000000 --- a/src/vendor/github.com/docker/docker/api/types/plugin_interface_type.go +++ /dev/null @@ -1,21 +0,0 @@ -package types - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -// PluginInterfaceType plugin interface type -// swagger:model PluginInterfaceType -type PluginInterfaceType struct { - - // capability - // Required: true - Capability string `json:"Capability"` - - // prefix - // Required: true - Prefix string `json:"Prefix"` - - // version - // Required: true - Version string `json:"Version"` -} diff --git a/src/vendor/github.com/docker/docker/api/types/plugin_mount.go b/src/vendor/github.com/docker/docker/api/types/plugin_mount.go deleted file mode 100644 index 5c031cf8b..000000000 --- a/src/vendor/github.com/docker/docker/api/types/plugin_mount.go +++ /dev/null @@ -1,37 +0,0 @@ -package types - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -// PluginMount plugin mount -// swagger:model PluginMount -type PluginMount struct { - - // description - // Required: true - Description string `json:"Description"` - - // destination - // Required: true - Destination string `json:"Destination"` - - // name - // Required: true - Name string `json:"Name"` - - // options - // Required: true - Options []string `json:"Options"` - - // settable - // Required: true - Settable []string `json:"Settable"` - - // source - // Required: true - Source *string `json:"Source"` - - // type - // Required: true - Type string `json:"Type"` -} diff --git a/src/vendor/github.com/docker/docker/api/types/plugin_responses.go b/src/vendor/github.com/docker/docker/api/types/plugin_responses.go deleted file mode 100644 index 60d1fb5ad..000000000 --- a/src/vendor/github.com/docker/docker/api/types/plugin_responses.go +++ /dev/null @@ -1,71 +0,0 @@ -package types // import "github.com/docker/docker/api/types" - -import ( - "encoding/json" - "fmt" - "sort" -) - -// PluginsListResponse contains the response for the Engine API -type PluginsListResponse []*Plugin - -// UnmarshalJSON implements json.Unmarshaler for PluginInterfaceType -func (t *PluginInterfaceType) UnmarshalJSON(p []byte) error { - versionIndex := len(p) - prefixIndex := 0 - if len(p) < 2 || p[0] != '"' || p[len(p)-1] != '"' { - return fmt.Errorf("%q is not a plugin interface type", p) - } - p = p[1 : len(p)-1] -loop: - for i, b := range p { - switch b { - case '.': - prefixIndex = i - case '/': - versionIndex = i - break loop - } - } - t.Prefix = string(p[:prefixIndex]) - t.Capability = string(p[prefixIndex+1 : versionIndex]) - if versionIndex < len(p) { - t.Version = string(p[versionIndex+1:]) - } - return nil -} - -// MarshalJSON implements json.Marshaler for PluginInterfaceType -func (t *PluginInterfaceType) MarshalJSON() ([]byte, error) { - return json.Marshal(t.String()) -} - -// String implements fmt.Stringer for PluginInterfaceType -func (t PluginInterfaceType) String() string { - return fmt.Sprintf("%s.%s/%s", t.Prefix, t.Capability, t.Version) -} - -// PluginPrivilege describes a permission the user has to accept -// upon installing a plugin. -type PluginPrivilege struct { - Name string - Description string - Value []string -} - -// PluginPrivileges is a list of PluginPrivilege -type PluginPrivileges []PluginPrivilege - -func (s PluginPrivileges) Len() int { - return len(s) -} - -func (s PluginPrivileges) Less(i, j int) bool { - return s[i].Name < s[j].Name -} - -func (s PluginPrivileges) Swap(i, j int) { - sort.Strings(s[i].Value) - sort.Strings(s[j].Value) - s[i], s[j] = s[j], s[i] -} diff --git a/src/vendor/github.com/docker/docker/api/types/port.go b/src/vendor/github.com/docker/docker/api/types/port.go deleted file mode 100644 index d91234744..000000000 --- a/src/vendor/github.com/docker/docker/api/types/port.go +++ /dev/null @@ -1,23 +0,0 @@ -package types - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -// Port An open port on a container -// swagger:model Port -type Port struct { - - // Host IP address that the container's port is mapped to - IP string `json:"IP,omitempty"` - - // Port on the container - // Required: true - PrivatePort uint16 `json:"PrivatePort"` - - // Port exposed on the host - PublicPort uint16 `json:"PublicPort,omitempty"` - - // type - // Required: true - Type string `json:"Type"` -} diff --git a/src/vendor/github.com/docker/docker/api/types/registry/authenticate.go b/src/vendor/github.com/docker/docker/api/types/registry/authenticate.go deleted file mode 100644 index f0a2113e4..000000000 --- a/src/vendor/github.com/docker/docker/api/types/registry/authenticate.go +++ /dev/null @@ -1,21 +0,0 @@ -package registry // import "github.com/docker/docker/api/types/registry" - -// ---------------------------------------------------------------------------- -// DO NOT EDIT THIS FILE -// This file was generated by `swagger generate operation` -// -// See hack/generate-swagger-api.sh -// ---------------------------------------------------------------------------- - -// AuthenticateOKBody authenticate o k body -// swagger:model AuthenticateOKBody -type AuthenticateOKBody struct { - - // An opaque token used to authenticate a user after a successful login - // Required: true - IdentityToken string `json:"IdentityToken"` - - // The status of the authentication - // Required: true - Status string `json:"Status"` -} diff --git a/src/vendor/github.com/docker/docker/api/types/registry/registry.go b/src/vendor/github.com/docker/docker/api/types/registry/registry.go deleted file mode 100644 index 53e47084c..000000000 --- a/src/vendor/github.com/docker/docker/api/types/registry/registry.go +++ /dev/null @@ -1,119 +0,0 @@ -package registry // import "github.com/docker/docker/api/types/registry" - -import ( - "encoding/json" - "net" - - v1 "github.com/opencontainers/image-spec/specs-go/v1" -) - -// ServiceConfig stores daemon registry services configuration. -type ServiceConfig struct { - AllowNondistributableArtifactsCIDRs []*NetIPNet - AllowNondistributableArtifactsHostnames []string - InsecureRegistryCIDRs []*NetIPNet `json:"InsecureRegistryCIDRs"` - IndexConfigs map[string]*IndexInfo `json:"IndexConfigs"` - Mirrors []string -} - -// NetIPNet is the net.IPNet type, which can be marshalled and -// unmarshalled to JSON -type NetIPNet net.IPNet - -// String returns the CIDR notation of ipnet -func (ipnet *NetIPNet) String() string { - return (*net.IPNet)(ipnet).String() -} - -// MarshalJSON returns the JSON representation of the IPNet -func (ipnet *NetIPNet) MarshalJSON() ([]byte, error) { - return json.Marshal((*net.IPNet)(ipnet).String()) -} - -// UnmarshalJSON sets the IPNet from a byte array of JSON -func (ipnet *NetIPNet) UnmarshalJSON(b []byte) (err error) { - var ipnetStr string - if err = json.Unmarshal(b, &ipnetStr); err == nil { - var cidr *net.IPNet - if _, cidr, err = net.ParseCIDR(ipnetStr); err == nil { - *ipnet = NetIPNet(*cidr) - } - } - return -} - -// IndexInfo contains information about a registry -// -// RepositoryInfo Examples: -// { -// "Index" : { -// "Name" : "docker.io", -// "Mirrors" : ["https://registry-2.docker.io/v1/", "https://registry-3.docker.io/v1/"], -// "Secure" : true, -// "Official" : true, -// }, -// "RemoteName" : "library/debian", -// "LocalName" : "debian", -// "CanonicalName" : "docker.io/debian" -// "Official" : true, -// } -// -// { -// "Index" : { -// "Name" : "127.0.0.1:5000", -// "Mirrors" : [], -// "Secure" : false, -// "Official" : false, -// }, -// "RemoteName" : "user/repo", -// "LocalName" : "127.0.0.1:5000/user/repo", -// "CanonicalName" : "127.0.0.1:5000/user/repo", -// "Official" : false, -// } -type IndexInfo struct { - // Name is the name of the registry, such as "docker.io" - Name string - // Mirrors is a list of mirrors, expressed as URIs - Mirrors []string - // Secure is set to false if the registry is part of the list of - // insecure registries. Insecure registries accept HTTP and/or accept - // HTTPS with certificates from unknown CAs. - Secure bool - // Official indicates whether this is an official registry - Official bool -} - -// SearchResult describes a search result returned from a registry -type SearchResult struct { - // StarCount indicates the number of stars this repository has - StarCount int `json:"star_count"` - // IsOfficial is true if the result is from an official repository. - IsOfficial bool `json:"is_official"` - // Name is the name of the repository - Name string `json:"name"` - // IsAutomated indicates whether the result is automated - IsAutomated bool `json:"is_automated"` - // Description is a textual description of the repository - Description string `json:"description"` -} - -// SearchResults lists a collection search results returned from a registry -type SearchResults struct { - // Query contains the query string that generated the search results - Query string `json:"query"` - // NumResults indicates the number of results the query returned - NumResults int `json:"num_results"` - // Results is a slice containing the actual results for the search - Results []SearchResult `json:"results"` -} - -// DistributionInspect describes the result obtained from contacting the -// registry to retrieve image metadata -type DistributionInspect struct { - // Descriptor contains information about the manifest, including - // the content addressable digest - Descriptor v1.Descriptor - // Platforms contains the list of platforms supported by the image, - // obtained by parsing the manifest - Platforms []v1.Platform -} diff --git a/src/vendor/github.com/docker/docker/api/types/service_update_response.go b/src/vendor/github.com/docker/docker/api/types/service_update_response.go deleted file mode 100644 index 74ea64b1b..000000000 --- a/src/vendor/github.com/docker/docker/api/types/service_update_response.go +++ /dev/null @@ -1,12 +0,0 @@ -package types - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -// ServiceUpdateResponse service update response -// swagger:model ServiceUpdateResponse -type ServiceUpdateResponse struct { - - // Optional warning messages - Warnings []string `json:"Warnings"` -} diff --git a/src/vendor/github.com/docker/docker/api/types/stats.go b/src/vendor/github.com/docker/docker/api/types/stats.go deleted file mode 100644 index 20daebed1..000000000 --- a/src/vendor/github.com/docker/docker/api/types/stats.go +++ /dev/null @@ -1,181 +0,0 @@ -// Package types is used for API stability in the types and response to the -// consumers of the API stats endpoint. -package types // import "github.com/docker/docker/api/types" - -import "time" - -// ThrottlingData stores CPU throttling stats of one running container. -// Not used on Windows. -type ThrottlingData struct { - // Number of periods with throttling active - Periods uint64 `json:"periods"` - // Number of periods when the container hits its throttling limit. - ThrottledPeriods uint64 `json:"throttled_periods"` - // Aggregate time the container was throttled for in nanoseconds. - ThrottledTime uint64 `json:"throttled_time"` -} - -// CPUUsage stores All CPU stats aggregated since container inception. -type CPUUsage struct { - // Total CPU time consumed. - // Units: nanoseconds (Linux) - // Units: 100's of nanoseconds (Windows) - TotalUsage uint64 `json:"total_usage"` - - // Total CPU time consumed per core (Linux). Not used on Windows. - // Units: nanoseconds. - PercpuUsage []uint64 `json:"percpu_usage,omitempty"` - - // Time spent by tasks of the cgroup in kernel mode (Linux). - // Time spent by all container processes in kernel mode (Windows). - // Units: nanoseconds (Linux). - // Units: 100's of nanoseconds (Windows). Not populated for Hyper-V Containers. - UsageInKernelmode uint64 `json:"usage_in_kernelmode"` - - // Time spent by tasks of the cgroup in user mode (Linux). - // Time spent by all container processes in user mode (Windows). - // Units: nanoseconds (Linux). - // Units: 100's of nanoseconds (Windows). Not populated for Hyper-V Containers - UsageInUsermode uint64 `json:"usage_in_usermode"` -} - -// CPUStats aggregates and wraps all CPU related info of container -type CPUStats struct { - // CPU Usage. Linux and Windows. - CPUUsage CPUUsage `json:"cpu_usage"` - - // System Usage. Linux only. - SystemUsage uint64 `json:"system_cpu_usage,omitempty"` - - // Online CPUs. Linux only. - OnlineCPUs uint32 `json:"online_cpus,omitempty"` - - // Throttling Data. Linux only. - ThrottlingData ThrottlingData `json:"throttling_data,omitempty"` -} - -// MemoryStats aggregates all memory stats since container inception on Linux. -// Windows returns stats for commit and private working set only. -type MemoryStats struct { - // Linux Memory Stats - - // current res_counter usage for memory - Usage uint64 `json:"usage,omitempty"` - // maximum usage ever recorded. - MaxUsage uint64 `json:"max_usage,omitempty"` - // TODO(vishh): Export these as stronger types. - // all the stats exported via memory.stat. - Stats map[string]uint64 `json:"stats,omitempty"` - // number of times memory usage hits limits. - Failcnt uint64 `json:"failcnt,omitempty"` - Limit uint64 `json:"limit,omitempty"` - - // Windows Memory Stats - // See https://technet.microsoft.com/en-us/magazine/ff382715.aspx - - // committed bytes - Commit uint64 `json:"commitbytes,omitempty"` - // peak committed bytes - CommitPeak uint64 `json:"commitpeakbytes,omitempty"` - // private working set - PrivateWorkingSet uint64 `json:"privateworkingset,omitempty"` -} - -// BlkioStatEntry is one small entity to store a piece of Blkio stats -// Not used on Windows. -type BlkioStatEntry struct { - Major uint64 `json:"major"` - Minor uint64 `json:"minor"` - Op string `json:"op"` - Value uint64 `json:"value"` -} - -// BlkioStats stores All IO service stats for data read and write. -// This is a Linux specific structure as the differences between expressing -// block I/O on Windows and Linux are sufficiently significant to make -// little sense attempting to morph into a combined structure. -type BlkioStats struct { - // number of bytes transferred to and from the block device - IoServiceBytesRecursive []BlkioStatEntry `json:"io_service_bytes_recursive"` - IoServicedRecursive []BlkioStatEntry `json:"io_serviced_recursive"` - IoQueuedRecursive []BlkioStatEntry `json:"io_queue_recursive"` - IoServiceTimeRecursive []BlkioStatEntry `json:"io_service_time_recursive"` - IoWaitTimeRecursive []BlkioStatEntry `json:"io_wait_time_recursive"` - IoMergedRecursive []BlkioStatEntry `json:"io_merged_recursive"` - IoTimeRecursive []BlkioStatEntry `json:"io_time_recursive"` - SectorsRecursive []BlkioStatEntry `json:"sectors_recursive"` -} - -// StorageStats is the disk I/O stats for read/write on Windows. -type StorageStats struct { - ReadCountNormalized uint64 `json:"read_count_normalized,omitempty"` - ReadSizeBytes uint64 `json:"read_size_bytes,omitempty"` - WriteCountNormalized uint64 `json:"write_count_normalized,omitempty"` - WriteSizeBytes uint64 `json:"write_size_bytes,omitempty"` -} - -// NetworkStats aggregates the network stats of one container -type NetworkStats struct { - // Bytes received. Windows and Linux. - RxBytes uint64 `json:"rx_bytes"` - // Packets received. Windows and Linux. - RxPackets uint64 `json:"rx_packets"` - // Received errors. Not used on Windows. Note that we don't `omitempty` this - // field as it is expected in the >=v1.21 API stats structure. - RxErrors uint64 `json:"rx_errors"` - // Incoming packets dropped. Windows and Linux. - RxDropped uint64 `json:"rx_dropped"` - // Bytes sent. Windows and Linux. - TxBytes uint64 `json:"tx_bytes"` - // Packets sent. Windows and Linux. - TxPackets uint64 `json:"tx_packets"` - // Sent errors. Not used on Windows. Note that we don't `omitempty` this - // field as it is expected in the >=v1.21 API stats structure. - TxErrors uint64 `json:"tx_errors"` - // Outgoing packets dropped. Windows and Linux. - TxDropped uint64 `json:"tx_dropped"` - // Endpoint ID. Not used on Linux. - EndpointID string `json:"endpoint_id,omitempty"` - // Instance ID. Not used on Linux. - InstanceID string `json:"instance_id,omitempty"` -} - -// PidsStats contains the stats of a container's pids -type PidsStats struct { - // Current is the number of pids in the cgroup - Current uint64 `json:"current,omitempty"` - // Limit is the hard limit on the number of pids in the cgroup. - // A "Limit" of 0 means that there is no limit. - Limit uint64 `json:"limit,omitempty"` -} - -// Stats is Ultimate struct aggregating all types of stats of one container -type Stats struct { - // Common stats - Read time.Time `json:"read"` - PreRead time.Time `json:"preread"` - - // Linux specific stats, not populated on Windows. - PidsStats PidsStats `json:"pids_stats,omitempty"` - BlkioStats BlkioStats `json:"blkio_stats,omitempty"` - - // Windows specific stats, not populated on Linux. - NumProcs uint32 `json:"num_procs"` - StorageStats StorageStats `json:"storage_stats,omitempty"` - - // Shared stats - CPUStats CPUStats `json:"cpu_stats,omitempty"` - PreCPUStats CPUStats `json:"precpu_stats,omitempty"` // "Pre"="Previous" - MemoryStats MemoryStats `json:"memory_stats,omitempty"` -} - -// StatsJSON is newly used Networks -type StatsJSON struct { - Stats - - Name string `json:"name,omitempty"` - ID string `json:"id,omitempty"` - - // Networks request version >=1.21 - Networks map[string]NetworkStats `json:"networks,omitempty"` -} diff --git a/src/vendor/github.com/docker/docker/api/types/strslice/strslice.go b/src/vendor/github.com/docker/docker/api/types/strslice/strslice.go deleted file mode 100644 index 82921cebc..000000000 --- a/src/vendor/github.com/docker/docker/api/types/strslice/strslice.go +++ /dev/null @@ -1,30 +0,0 @@ -package strslice // import "github.com/docker/docker/api/types/strslice" - -import "encoding/json" - -// StrSlice represents a string or an array of strings. -// We need to override the json decoder to accept both options. -type StrSlice []string - -// UnmarshalJSON decodes the byte slice whether it's a string or an array of -// strings. This method is needed to implement json.Unmarshaler. -func (e *StrSlice) UnmarshalJSON(b []byte) error { - if len(b) == 0 { - // With no input, we preserve the existing value by returning nil and - // leaving the target alone. This allows defining default values for - // the type. - return nil - } - - p := make([]string, 0, 1) - if err := json.Unmarshal(b, &p); err != nil { - var s string - if err := json.Unmarshal(b, &s); err != nil { - return err - } - p = append(p, s) - } - - *e = p - return nil -} diff --git a/src/vendor/github.com/docker/docker/api/types/swarm/common.go b/src/vendor/github.com/docker/docker/api/types/swarm/common.go deleted file mode 100644 index ef020f458..000000000 --- a/src/vendor/github.com/docker/docker/api/types/swarm/common.go +++ /dev/null @@ -1,40 +0,0 @@ -package swarm // import "github.com/docker/docker/api/types/swarm" - -import "time" - -// Version represents the internal object version. -type Version struct { - Index uint64 `json:",omitempty"` -} - -// Meta is a base object inherited by most of the other once. -type Meta struct { - Version Version `json:",omitempty"` - CreatedAt time.Time `json:",omitempty"` - UpdatedAt time.Time `json:",omitempty"` -} - -// Annotations represents how to describe an object. -type Annotations struct { - Name string `json:",omitempty"` - Labels map[string]string `json:"Labels"` -} - -// Driver represents a driver (network, logging, secrets backend). -type Driver struct { - Name string `json:",omitempty"` - Options map[string]string `json:",omitempty"` -} - -// TLSInfo represents the TLS information about what CA certificate is trusted, -// and who the issuer for a TLS certificate is -type TLSInfo struct { - // TrustRoot is the trusted CA root certificate in PEM format - TrustRoot string `json:",omitempty"` - - // CertIssuer is the raw subject bytes of the issuer - CertIssuerSubject []byte `json:",omitempty"` - - // CertIssuerPublicKey is the raw public key bytes of the issuer - CertIssuerPublicKey []byte `json:",omitempty"` -} diff --git a/src/vendor/github.com/docker/docker/api/types/swarm/config.go b/src/vendor/github.com/docker/docker/api/types/swarm/config.go deleted file mode 100644 index 16202ccce..000000000 --- a/src/vendor/github.com/docker/docker/api/types/swarm/config.go +++ /dev/null @@ -1,40 +0,0 @@ -package swarm // import "github.com/docker/docker/api/types/swarm" - -import "os" - -// Config represents a config. -type Config struct { - ID string - Meta - Spec ConfigSpec -} - -// ConfigSpec represents a config specification from a config in swarm -type ConfigSpec struct { - Annotations - Data []byte `json:",omitempty"` - - // Templating controls whether and how to evaluate the config payload as - // a template. If it is not set, no templating is used. - Templating *Driver `json:",omitempty"` -} - -// ConfigReferenceFileTarget is a file target in a config reference -type ConfigReferenceFileTarget struct { - Name string - UID string - GID string - Mode os.FileMode -} - -// ConfigReferenceRuntimeTarget is a target for a config specifying that it -// isn't mounted into the container but instead has some other purpose. -type ConfigReferenceRuntimeTarget struct{} - -// ConfigReference is a reference to a config in swarm -type ConfigReference struct { - File *ConfigReferenceFileTarget `json:",omitempty"` - Runtime *ConfigReferenceRuntimeTarget `json:",omitempty"` - ConfigID string - ConfigName string -} diff --git a/src/vendor/github.com/docker/docker/api/types/swarm/container.go b/src/vendor/github.com/docker/docker/api/types/swarm/container.go deleted file mode 100644 index af5e1c0bc..000000000 --- a/src/vendor/github.com/docker/docker/api/types/swarm/container.go +++ /dev/null @@ -1,80 +0,0 @@ -package swarm // import "github.com/docker/docker/api/types/swarm" - -import ( - "time" - - "github.com/docker/docker/api/types/container" - "github.com/docker/docker/api/types/mount" - "github.com/docker/go-units" -) - -// DNSConfig specifies DNS related configurations in resolver configuration file (resolv.conf) -// Detailed documentation is available in: -// http://man7.org/linux/man-pages/man5/resolv.conf.5.html -// `nameserver`, `search`, `options` have been supported. -// TODO: `domain` is not supported yet. -type DNSConfig struct { - // Nameservers specifies the IP addresses of the name servers - Nameservers []string `json:",omitempty"` - // Search specifies the search list for host-name lookup - Search []string `json:",omitempty"` - // Options allows certain internal resolver variables to be modified - Options []string `json:",omitempty"` -} - -// SELinuxContext contains the SELinux labels of the container. -type SELinuxContext struct { - Disable bool - - User string - Role string - Type string - Level string -} - -// CredentialSpec for managed service account (Windows only) -type CredentialSpec struct { - Config string - File string - Registry string -} - -// Privileges defines the security options for the container. -type Privileges struct { - CredentialSpec *CredentialSpec - SELinuxContext *SELinuxContext -} - -// ContainerSpec represents the spec of a container. -type ContainerSpec struct { - Image string `json:",omitempty"` - Labels map[string]string `json:",omitempty"` - Command []string `json:",omitempty"` - Args []string `json:",omitempty"` - Hostname string `json:",omitempty"` - Env []string `json:",omitempty"` - Dir string `json:",omitempty"` - User string `json:",omitempty"` - Groups []string `json:",omitempty"` - Privileges *Privileges `json:",omitempty"` - Init *bool `json:",omitempty"` - StopSignal string `json:",omitempty"` - TTY bool `json:",omitempty"` - OpenStdin bool `json:",omitempty"` - ReadOnly bool `json:",omitempty"` - Mounts []mount.Mount `json:",omitempty"` - StopGracePeriod *time.Duration `json:",omitempty"` - Healthcheck *container.HealthConfig `json:",omitempty"` - // The format of extra hosts on swarmkit is specified in: - // http://man7.org/linux/man-pages/man5/hosts.5.html - // IP_address canonical_hostname [aliases...] - Hosts []string `json:",omitempty"` - DNSConfig *DNSConfig `json:",omitempty"` - Secrets []*SecretReference `json:",omitempty"` - Configs []*ConfigReference `json:",omitempty"` - Isolation container.Isolation `json:",omitempty"` - Sysctls map[string]string `json:",omitempty"` - CapabilityAdd []string `json:",omitempty"` - CapabilityDrop []string `json:",omitempty"` - Ulimits []*units.Ulimit `json:",omitempty"` -} diff --git a/src/vendor/github.com/docker/docker/api/types/swarm/network.go b/src/vendor/github.com/docker/docker/api/types/swarm/network.go deleted file mode 100644 index 98ef3284d..000000000 --- a/src/vendor/github.com/docker/docker/api/types/swarm/network.go +++ /dev/null @@ -1,121 +0,0 @@ -package swarm // import "github.com/docker/docker/api/types/swarm" - -import ( - "github.com/docker/docker/api/types/network" -) - -// Endpoint represents an endpoint. -type Endpoint struct { - Spec EndpointSpec `json:",omitempty"` - Ports []PortConfig `json:",omitempty"` - VirtualIPs []EndpointVirtualIP `json:",omitempty"` -} - -// EndpointSpec represents the spec of an endpoint. -type EndpointSpec struct { - Mode ResolutionMode `json:",omitempty"` - Ports []PortConfig `json:",omitempty"` -} - -// ResolutionMode represents a resolution mode. -type ResolutionMode string - -const ( - // ResolutionModeVIP VIP - ResolutionModeVIP ResolutionMode = "vip" - // ResolutionModeDNSRR DNSRR - ResolutionModeDNSRR ResolutionMode = "dnsrr" -) - -// PortConfig represents the config of a port. -type PortConfig struct { - Name string `json:",omitempty"` - Protocol PortConfigProtocol `json:",omitempty"` - // TargetPort is the port inside the container - TargetPort uint32 `json:",omitempty"` - // PublishedPort is the port on the swarm hosts - PublishedPort uint32 `json:",omitempty"` - // PublishMode is the mode in which port is published - PublishMode PortConfigPublishMode `json:",omitempty"` -} - -// PortConfigPublishMode represents the mode in which the port is to -// be published. -type PortConfigPublishMode string - -const ( - // PortConfigPublishModeIngress is used for ports published - // for ingress load balancing using routing mesh. - PortConfigPublishModeIngress PortConfigPublishMode = "ingress" - // PortConfigPublishModeHost is used for ports published - // for direct host level access on the host where the task is running. - PortConfigPublishModeHost PortConfigPublishMode = "host" -) - -// PortConfigProtocol represents the protocol of a port. -type PortConfigProtocol string - -const ( - // TODO(stevvooe): These should be used generally, not just for PortConfig. - - // PortConfigProtocolTCP TCP - PortConfigProtocolTCP PortConfigProtocol = "tcp" - // PortConfigProtocolUDP UDP - PortConfigProtocolUDP PortConfigProtocol = "udp" - // PortConfigProtocolSCTP SCTP - PortConfigProtocolSCTP PortConfigProtocol = "sctp" -) - -// EndpointVirtualIP represents the virtual ip of a port. -type EndpointVirtualIP struct { - NetworkID string `json:",omitempty"` - Addr string `json:",omitempty"` -} - -// Network represents a network. -type Network struct { - ID string - Meta - Spec NetworkSpec `json:",omitempty"` - DriverState Driver `json:",omitempty"` - IPAMOptions *IPAMOptions `json:",omitempty"` -} - -// NetworkSpec represents the spec of a network. -type NetworkSpec struct { - Annotations - DriverConfiguration *Driver `json:",omitempty"` - IPv6Enabled bool `json:",omitempty"` - Internal bool `json:",omitempty"` - Attachable bool `json:",omitempty"` - Ingress bool `json:",omitempty"` - IPAMOptions *IPAMOptions `json:",omitempty"` - ConfigFrom *network.ConfigReference `json:",omitempty"` - Scope string `json:",omitempty"` -} - -// NetworkAttachmentConfig represents the configuration of a network attachment. -type NetworkAttachmentConfig struct { - Target string `json:",omitempty"` - Aliases []string `json:",omitempty"` - DriverOpts map[string]string `json:",omitempty"` -} - -// NetworkAttachment represents a network attachment. -type NetworkAttachment struct { - Network Network `json:",omitempty"` - Addresses []string `json:",omitempty"` -} - -// IPAMOptions represents ipam options. -type IPAMOptions struct { - Driver Driver `json:",omitempty"` - Configs []IPAMConfig `json:",omitempty"` -} - -// IPAMConfig represents ipam configuration. -type IPAMConfig struct { - Subnet string `json:",omitempty"` - Range string `json:",omitempty"` - Gateway string `json:",omitempty"` -} diff --git a/src/vendor/github.com/docker/docker/api/types/swarm/node.go b/src/vendor/github.com/docker/docker/api/types/swarm/node.go deleted file mode 100644 index 1e30f5fa1..000000000 --- a/src/vendor/github.com/docker/docker/api/types/swarm/node.go +++ /dev/null @@ -1,115 +0,0 @@ -package swarm // import "github.com/docker/docker/api/types/swarm" - -// Node represents a node. -type Node struct { - ID string - Meta - // Spec defines the desired state of the node as specified by the user. - // The system will honor this and will *never* modify it. - Spec NodeSpec `json:",omitempty"` - // Description encapsulates the properties of the Node as reported by the - // agent. - Description NodeDescription `json:",omitempty"` - // Status provides the current status of the node, as seen by the manager. - Status NodeStatus `json:",omitempty"` - // ManagerStatus provides the current status of the node's manager - // component, if the node is a manager. - ManagerStatus *ManagerStatus `json:",omitempty"` -} - -// NodeSpec represents the spec of a node. -type NodeSpec struct { - Annotations - Role NodeRole `json:",omitempty"` - Availability NodeAvailability `json:",omitempty"` -} - -// NodeRole represents the role of a node. -type NodeRole string - -const ( - // NodeRoleWorker WORKER - NodeRoleWorker NodeRole = "worker" - // NodeRoleManager MANAGER - NodeRoleManager NodeRole = "manager" -) - -// NodeAvailability represents the availability of a node. -type NodeAvailability string - -const ( - // NodeAvailabilityActive ACTIVE - NodeAvailabilityActive NodeAvailability = "active" - // NodeAvailabilityPause PAUSE - NodeAvailabilityPause NodeAvailability = "pause" - // NodeAvailabilityDrain DRAIN - NodeAvailabilityDrain NodeAvailability = "drain" -) - -// NodeDescription represents the description of a node. -type NodeDescription struct { - Hostname string `json:",omitempty"` - Platform Platform `json:",omitempty"` - Resources Resources `json:",omitempty"` - Engine EngineDescription `json:",omitempty"` - TLSInfo TLSInfo `json:",omitempty"` -} - -// Platform represents the platform (Arch/OS). -type Platform struct { - Architecture string `json:",omitempty"` - OS string `json:",omitempty"` -} - -// EngineDescription represents the description of an engine. -type EngineDescription struct { - EngineVersion string `json:",omitempty"` - Labels map[string]string `json:",omitempty"` - Plugins []PluginDescription `json:",omitempty"` -} - -// PluginDescription represents the description of an engine plugin. -type PluginDescription struct { - Type string `json:",omitempty"` - Name string `json:",omitempty"` -} - -// NodeStatus represents the status of a node. -type NodeStatus struct { - State NodeState `json:",omitempty"` - Message string `json:",omitempty"` - Addr string `json:",omitempty"` -} - -// Reachability represents the reachability of a node. -type Reachability string - -const ( - // ReachabilityUnknown UNKNOWN - ReachabilityUnknown Reachability = "unknown" - // ReachabilityUnreachable UNREACHABLE - ReachabilityUnreachable Reachability = "unreachable" - // ReachabilityReachable REACHABLE - ReachabilityReachable Reachability = "reachable" -) - -// ManagerStatus represents the status of a manager. -type ManagerStatus struct { - Leader bool `json:",omitempty"` - Reachability Reachability `json:",omitempty"` - Addr string `json:",omitempty"` -} - -// NodeState represents the state of a node. -type NodeState string - -const ( - // NodeStateUnknown UNKNOWN - NodeStateUnknown NodeState = "unknown" - // NodeStateDown DOWN - NodeStateDown NodeState = "down" - // NodeStateReady READY - NodeStateReady NodeState = "ready" - // NodeStateDisconnected DISCONNECTED - NodeStateDisconnected NodeState = "disconnected" -) diff --git a/src/vendor/github.com/docker/docker/api/types/swarm/runtime.go b/src/vendor/github.com/docker/docker/api/types/swarm/runtime.go deleted file mode 100644 index 0c77403cc..000000000 --- a/src/vendor/github.com/docker/docker/api/types/swarm/runtime.go +++ /dev/null @@ -1,27 +0,0 @@ -package swarm // import "github.com/docker/docker/api/types/swarm" - -// RuntimeType is the type of runtime used for the TaskSpec -type RuntimeType string - -// RuntimeURL is the proto type url -type RuntimeURL string - -const ( - // RuntimeContainer is the container based runtime - RuntimeContainer RuntimeType = "container" - // RuntimePlugin is the plugin based runtime - RuntimePlugin RuntimeType = "plugin" - // RuntimeNetworkAttachment is the network attachment runtime - RuntimeNetworkAttachment RuntimeType = "attachment" - - // RuntimeURLContainer is the proto url for the container type - RuntimeURLContainer RuntimeURL = "types.docker.com/RuntimeContainer" - // RuntimeURLPlugin is the proto url for the plugin type - RuntimeURLPlugin RuntimeURL = "types.docker.com/RuntimePlugin" -) - -// NetworkAttachmentSpec represents the runtime spec type for network -// attachment tasks -type NetworkAttachmentSpec struct { - ContainerID string -} diff --git a/src/vendor/github.com/docker/docker/api/types/swarm/runtime/gen.go b/src/vendor/github.com/docker/docker/api/types/swarm/runtime/gen.go deleted file mode 100644 index 98c2806c3..000000000 --- a/src/vendor/github.com/docker/docker/api/types/swarm/runtime/gen.go +++ /dev/null @@ -1,3 +0,0 @@ -//go:generate protoc -I . --gogofast_out=import_path=github.com/docker/docker/api/types/swarm/runtime:. plugin.proto - -package runtime // import "github.com/docker/docker/api/types/swarm/runtime" diff --git a/src/vendor/github.com/docker/docker/api/types/swarm/runtime/plugin.pb.go b/src/vendor/github.com/docker/docker/api/types/swarm/runtime/plugin.pb.go deleted file mode 100644 index e45045866..000000000 --- a/src/vendor/github.com/docker/docker/api/types/swarm/runtime/plugin.pb.go +++ /dev/null @@ -1,754 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: plugin.proto - -/* - Package runtime is a generated protocol buffer package. - - It is generated from these files: - plugin.proto - - It has these top-level messages: - PluginSpec - PluginPrivilege -*/ -package runtime - -import proto "github.com/gogo/protobuf/proto" -import fmt "fmt" -import math "math" - -import io "io" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package - -// PluginSpec defines the base payload which clients can specify for creating -// a service with the plugin runtime. -type PluginSpec struct { - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - Remote string `protobuf:"bytes,2,opt,name=remote,proto3" json:"remote,omitempty"` - Privileges []*PluginPrivilege `protobuf:"bytes,3,rep,name=privileges" json:"privileges,omitempty"` - Disabled bool `protobuf:"varint,4,opt,name=disabled,proto3" json:"disabled,omitempty"` - Env []string `protobuf:"bytes,5,rep,name=env" json:"env,omitempty"` -} - -func (m *PluginSpec) Reset() { *m = PluginSpec{} } -func (m *PluginSpec) String() string { return proto.CompactTextString(m) } -func (*PluginSpec) ProtoMessage() {} -func (*PluginSpec) Descriptor() ([]byte, []int) { return fileDescriptorPlugin, []int{0} } - -func (m *PluginSpec) GetName() string { - if m != nil { - return m.Name - } - return "" -} - -func (m *PluginSpec) GetRemote() string { - if m != nil { - return m.Remote - } - return "" -} - -func (m *PluginSpec) GetPrivileges() []*PluginPrivilege { - if m != nil { - return m.Privileges - } - return nil -} - -func (m *PluginSpec) GetDisabled() bool { - if m != nil { - return m.Disabled - } - return false -} - -func (m *PluginSpec) GetEnv() []string { - if m != nil { - return m.Env - } - return nil -} - -// PluginPrivilege describes a permission the user has to accept -// upon installing a plugin. -type PluginPrivilege struct { - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - Description string `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"` - Value []string `protobuf:"bytes,3,rep,name=value" json:"value,omitempty"` -} - -func (m *PluginPrivilege) Reset() { *m = PluginPrivilege{} } -func (m *PluginPrivilege) String() string { return proto.CompactTextString(m) } -func (*PluginPrivilege) ProtoMessage() {} -func (*PluginPrivilege) Descriptor() ([]byte, []int) { return fileDescriptorPlugin, []int{1} } - -func (m *PluginPrivilege) GetName() string { - if m != nil { - return m.Name - } - return "" -} - -func (m *PluginPrivilege) GetDescription() string { - if m != nil { - return m.Description - } - return "" -} - -func (m *PluginPrivilege) GetValue() []string { - if m != nil { - return m.Value - } - return nil -} - -func init() { - proto.RegisterType((*PluginSpec)(nil), "PluginSpec") - proto.RegisterType((*PluginPrivilege)(nil), "PluginPrivilege") -} -func (m *PluginSpec) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *PluginSpec) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.Name) > 0 { - dAtA[i] = 0xa - i++ - i = encodeVarintPlugin(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - } - if len(m.Remote) > 0 { - dAtA[i] = 0x12 - i++ - i = encodeVarintPlugin(dAtA, i, uint64(len(m.Remote))) - i += copy(dAtA[i:], m.Remote) - } - if len(m.Privileges) > 0 { - for _, msg := range m.Privileges { - dAtA[i] = 0x1a - i++ - i = encodeVarintPlugin(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - if m.Disabled { - dAtA[i] = 0x20 - i++ - if m.Disabled { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - } - if len(m.Env) > 0 { - for _, s := range m.Env { - dAtA[i] = 0x2a - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } - } - return i, nil -} - -func (m *PluginPrivilege) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *PluginPrivilege) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.Name) > 0 { - dAtA[i] = 0xa - i++ - i = encodeVarintPlugin(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - } - if len(m.Description) > 0 { - dAtA[i] = 0x12 - i++ - i = encodeVarintPlugin(dAtA, i, uint64(len(m.Description))) - i += copy(dAtA[i:], m.Description) - } - if len(m.Value) > 0 { - for _, s := range m.Value { - dAtA[i] = 0x1a - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } - } - return i, nil -} - -func encodeVarintPlugin(dAtA []byte, offset int, v uint64) int { - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return offset + 1 -} -func (m *PluginSpec) Size() (n int) { - var l int - _ = l - l = len(m.Name) - if l > 0 { - n += 1 + l + sovPlugin(uint64(l)) - } - l = len(m.Remote) - if l > 0 { - n += 1 + l + sovPlugin(uint64(l)) - } - if len(m.Privileges) > 0 { - for _, e := range m.Privileges { - l = e.Size() - n += 1 + l + sovPlugin(uint64(l)) - } - } - if m.Disabled { - n += 2 - } - if len(m.Env) > 0 { - for _, s := range m.Env { - l = len(s) - n += 1 + l + sovPlugin(uint64(l)) - } - } - return n -} - -func (m *PluginPrivilege) Size() (n int) { - var l int - _ = l - l = len(m.Name) - if l > 0 { - n += 1 + l + sovPlugin(uint64(l)) - } - l = len(m.Description) - if l > 0 { - n += 1 + l + sovPlugin(uint64(l)) - } - if len(m.Value) > 0 { - for _, s := range m.Value { - l = len(s) - n += 1 + l + sovPlugin(uint64(l)) - } - } - return n -} - -func sovPlugin(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n -} -func sozPlugin(x uint64) (n int) { - return sovPlugin(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (m *PluginSpec) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPlugin - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PluginSpec: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PluginSpec: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPlugin - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthPlugin - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Remote", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPlugin - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthPlugin - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Remote = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Privileges", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPlugin - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthPlugin - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Privileges = append(m.Privileges, &PluginPrivilege{}) - if err := m.Privileges[len(m.Privileges)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Disabled", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPlugin - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.Disabled = bool(v != 0) - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Env", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPlugin - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthPlugin - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Env = append(m.Env, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipPlugin(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthPlugin - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *PluginPrivilege) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPlugin - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PluginPrivilege: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PluginPrivilege: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPlugin - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthPlugin - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Description", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPlugin - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthPlugin - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Description = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPlugin - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthPlugin - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Value = append(m.Value, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipPlugin(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthPlugin - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipPlugin(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowPlugin - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowPlugin - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - return iNdEx, nil - case 1: - iNdEx += 8 - return iNdEx, nil - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowPlugin - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - iNdEx += length - if length < 0 { - return 0, ErrInvalidLengthPlugin - } - return iNdEx, nil - case 3: - for { - var innerWire uint64 - var start int = iNdEx - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowPlugin - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - innerWire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - innerWireType := int(innerWire & 0x7) - if innerWireType == 4 { - break - } - next, err := skipPlugin(dAtA[start:]) - if err != nil { - return 0, err - } - iNdEx = start + next - } - return iNdEx, nil - case 4: - return iNdEx, nil - case 5: - iNdEx += 4 - return iNdEx, nil - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - } - panic("unreachable") -} - -var ( - ErrInvalidLengthPlugin = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowPlugin = fmt.Errorf("proto: integer overflow") -) - -func init() { proto.RegisterFile("plugin.proto", fileDescriptorPlugin) } - -var fileDescriptorPlugin = []byte{ - // 256 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x90, 0x4d, 0x4b, 0xc3, 0x30, - 0x18, 0xc7, 0x89, 0xdd, 0xc6, 0xfa, 0x4c, 0x70, 0x04, 0x91, 0xe2, 0xa1, 0x94, 0x9d, 0x7a, 0x6a, - 0x45, 0x2f, 0x82, 0x37, 0x0f, 0x9e, 0x47, 0xbc, 0x09, 0x1e, 0xd2, 0xf6, 0xa1, 0x06, 0x9b, 0x17, - 0x92, 0xb4, 0xe2, 0x37, 0xf1, 0x23, 0x79, 0xf4, 0x23, 0x48, 0x3f, 0x89, 0x98, 0x75, 0x32, 0x64, - 0xa7, 0xff, 0x4b, 0xc2, 0x9f, 0x1f, 0x0f, 0x9c, 0x9a, 0xae, 0x6f, 0x85, 0x2a, 0x8c, 0xd5, 0x5e, - 0x6f, 0x3e, 0x08, 0xc0, 0x36, 0x14, 0x8f, 0x06, 0x6b, 0x4a, 0x61, 0xa6, 0xb8, 0xc4, 0x84, 0x64, - 0x24, 0x8f, 0x59, 0xf0, 0xf4, 0x02, 0x16, 0x16, 0xa5, 0xf6, 0x98, 0x9c, 0x84, 0x76, 0x4a, 0xf4, - 0x0a, 0xc0, 0x58, 0x31, 0x88, 0x0e, 0x5b, 0x74, 0x49, 0x94, 0x45, 0xf9, 0xea, 0x7a, 0x5d, 0xec, - 0xc6, 0xb6, 0xfb, 0x07, 0x76, 0xf0, 0x87, 0x5e, 0xc2, 0xb2, 0x11, 0x8e, 0x57, 0x1d, 0x36, 0xc9, - 0x2c, 0x23, 0xf9, 0x92, 0xfd, 0x65, 0xba, 0x86, 0x08, 0xd5, 0x90, 0xcc, 0xb3, 0x28, 0x8f, 0xd9, - 0xaf, 0xdd, 0x3c, 0xc3, 0xd9, 0xbf, 0xb1, 0xa3, 0x78, 0x19, 0xac, 0x1a, 0x74, 0xb5, 0x15, 0xc6, - 0x0b, 0xad, 0x26, 0xc6, 0xc3, 0x8a, 0x9e, 0xc3, 0x7c, 0xe0, 0x5d, 0x8f, 0x81, 0x31, 0x66, 0xbb, - 0x70, 0xff, 0xf0, 0x39, 0xa6, 0xe4, 0x6b, 0x4c, 0xc9, 0xf7, 0x98, 0x92, 0xa7, 0xdb, 0x56, 0xf8, - 0x97, 0xbe, 0x2a, 0x6a, 0x2d, 0xcb, 0x46, 0xd7, 0xaf, 0x68, 0xf7, 0xc2, 0x8d, 0x28, 0xfd, 0xbb, - 0x41, 0x57, 0xba, 0x37, 0x6e, 0x65, 0x69, 0x7b, 0xe5, 0x85, 0xc4, 0xbb, 0x49, 0xab, 0x45, 0x38, - 0xe4, 0xcd, 0x4f, 0x00, 0x00, 0x00, 0xff, 0xff, 0x99, 0xa8, 0xd9, 0x9b, 0x58, 0x01, 0x00, 0x00, -} diff --git a/src/vendor/github.com/docker/docker/api/types/swarm/runtime/plugin.proto b/src/vendor/github.com/docker/docker/api/types/swarm/runtime/plugin.proto deleted file mode 100644 index 9ef169046..000000000 --- a/src/vendor/github.com/docker/docker/api/types/swarm/runtime/plugin.proto +++ /dev/null @@ -1,21 +0,0 @@ -syntax = "proto3"; - -option go_package = "github.com/docker/docker/api/types/swarm/runtime;runtime"; - -// PluginSpec defines the base payload which clients can specify for creating -// a service with the plugin runtime. -message PluginSpec { - string name = 1; - string remote = 2; - repeated PluginPrivilege privileges = 3; - bool disabled = 4; - repeated string env = 5; -} - -// PluginPrivilege describes a permission the user has to accept -// upon installing a plugin. -message PluginPrivilege { - string name = 1; - string description = 2; - repeated string value = 3; -} diff --git a/src/vendor/github.com/docker/docker/api/types/swarm/secret.go b/src/vendor/github.com/docker/docker/api/types/swarm/secret.go deleted file mode 100644 index d5213ec98..000000000 --- a/src/vendor/github.com/docker/docker/api/types/swarm/secret.go +++ /dev/null @@ -1,36 +0,0 @@ -package swarm // import "github.com/docker/docker/api/types/swarm" - -import "os" - -// Secret represents a secret. -type Secret struct { - ID string - Meta - Spec SecretSpec -} - -// SecretSpec represents a secret specification from a secret in swarm -type SecretSpec struct { - Annotations - Data []byte `json:",omitempty"` - Driver *Driver `json:",omitempty"` // name of the secrets driver used to fetch the secret's value from an external secret store - - // Templating controls whether and how to evaluate the secret payload as - // a template. If it is not set, no templating is used. - Templating *Driver `json:",omitempty"` -} - -// SecretReferenceFileTarget is a file target in a secret reference -type SecretReferenceFileTarget struct { - Name string - UID string - GID string - Mode os.FileMode -} - -// SecretReference is a reference to a secret in swarm -type SecretReference struct { - File *SecretReferenceFileTarget - SecretID string - SecretName string -} diff --git a/src/vendor/github.com/docker/docker/api/types/swarm/service.go b/src/vendor/github.com/docker/docker/api/types/swarm/service.go deleted file mode 100644 index 6eb452d24..000000000 --- a/src/vendor/github.com/docker/docker/api/types/swarm/service.go +++ /dev/null @@ -1,202 +0,0 @@ -package swarm // import "github.com/docker/docker/api/types/swarm" - -import "time" - -// Service represents a service. -type Service struct { - ID string - Meta - Spec ServiceSpec `json:",omitempty"` - PreviousSpec *ServiceSpec `json:",omitempty"` - Endpoint Endpoint `json:",omitempty"` - UpdateStatus *UpdateStatus `json:",omitempty"` - - // ServiceStatus is an optional, extra field indicating the number of - // desired and running tasks. It is provided primarily as a shortcut to - // calculating these values client-side, which otherwise would require - // listing all tasks for a service, an operation that could be - // computation and network expensive. - ServiceStatus *ServiceStatus `json:",omitempty"` - - // JobStatus is the status of a Service which is in one of ReplicatedJob or - // GlobalJob modes. It is absent on Replicated and Global services. - JobStatus *JobStatus `json:",omitempty"` -} - -// ServiceSpec represents the spec of a service. -type ServiceSpec struct { - Annotations - - // TaskTemplate defines how the service should construct new tasks when - // orchestrating this service. - TaskTemplate TaskSpec `json:",omitempty"` - Mode ServiceMode `json:",omitempty"` - UpdateConfig *UpdateConfig `json:",omitempty"` - RollbackConfig *UpdateConfig `json:",omitempty"` - - // Networks field in ServiceSpec is deprecated. The - // same field in TaskSpec should be used instead. - // This field will be removed in a future release. - Networks []NetworkAttachmentConfig `json:",omitempty"` - EndpointSpec *EndpointSpec `json:",omitempty"` -} - -// ServiceMode represents the mode of a service. -type ServiceMode struct { - Replicated *ReplicatedService `json:",omitempty"` - Global *GlobalService `json:",omitempty"` - ReplicatedJob *ReplicatedJob `json:",omitempty"` - GlobalJob *GlobalJob `json:",omitempty"` -} - -// UpdateState is the state of a service update. -type UpdateState string - -const ( - // UpdateStateUpdating is the updating state. - UpdateStateUpdating UpdateState = "updating" - // UpdateStatePaused is the paused state. - UpdateStatePaused UpdateState = "paused" - // UpdateStateCompleted is the completed state. - UpdateStateCompleted UpdateState = "completed" - // UpdateStateRollbackStarted is the state with a rollback in progress. - UpdateStateRollbackStarted UpdateState = "rollback_started" - // UpdateStateRollbackPaused is the state with a rollback in progress. - UpdateStateRollbackPaused UpdateState = "rollback_paused" - // UpdateStateRollbackCompleted is the state with a rollback in progress. - UpdateStateRollbackCompleted UpdateState = "rollback_completed" -) - -// UpdateStatus reports the status of a service update. -type UpdateStatus struct { - State UpdateState `json:",omitempty"` - StartedAt *time.Time `json:",omitempty"` - CompletedAt *time.Time `json:",omitempty"` - Message string `json:",omitempty"` -} - -// ReplicatedService is a kind of ServiceMode. -type ReplicatedService struct { - Replicas *uint64 `json:",omitempty"` -} - -// GlobalService is a kind of ServiceMode. -type GlobalService struct{} - -// ReplicatedJob is the a type of Service which executes a defined Tasks -// in parallel until the specified number of Tasks have succeeded. -type ReplicatedJob struct { - // MaxConcurrent indicates the maximum number of Tasks that should be - // executing simultaneously for this job at any given time. There may be - // fewer Tasks that MaxConcurrent executing simultaneously; for example, if - // there are fewer than MaxConcurrent tasks needed to reach - // TotalCompletions. - // - // If this field is empty, it will default to a max concurrency of 1. - MaxConcurrent *uint64 `json:",omitempty"` - - // TotalCompletions is the total number of Tasks desired to run to - // completion. - // - // If this field is empty, the value of MaxConcurrent will be used. - TotalCompletions *uint64 `json:",omitempty"` -} - -// GlobalJob is the type of a Service which executes a Task on every Node -// matching the Service's placement constraints. These tasks run to completion -// and then exit. -// -// This type is deliberately empty. -type GlobalJob struct{} - -const ( - // UpdateFailureActionPause PAUSE - UpdateFailureActionPause = "pause" - // UpdateFailureActionContinue CONTINUE - UpdateFailureActionContinue = "continue" - // UpdateFailureActionRollback ROLLBACK - UpdateFailureActionRollback = "rollback" - - // UpdateOrderStopFirst STOP_FIRST - UpdateOrderStopFirst = "stop-first" - // UpdateOrderStartFirst START_FIRST - UpdateOrderStartFirst = "start-first" -) - -// UpdateConfig represents the update configuration. -type UpdateConfig struct { - // Maximum number of tasks to be updated in one iteration. - // 0 means unlimited parallelism. - Parallelism uint64 - - // Amount of time between updates. - Delay time.Duration `json:",omitempty"` - - // FailureAction is the action to take when an update failures. - FailureAction string `json:",omitempty"` - - // Monitor indicates how long to monitor a task for failure after it is - // created. If the task fails by ending up in one of the states - // REJECTED, COMPLETED, or FAILED, within Monitor from its creation, - // this counts as a failure. If it fails after Monitor, it does not - // count as a failure. If Monitor is unspecified, a default value will - // be used. - Monitor time.Duration `json:",omitempty"` - - // MaxFailureRatio is the fraction of tasks that may fail during - // an update before the failure action is invoked. Any task created by - // the current update which ends up in one of the states REJECTED, - // COMPLETED or FAILED within Monitor from its creation counts as a - // failure. The number of failures is divided by the number of tasks - // being updated, and if this fraction is greater than - // MaxFailureRatio, the failure action is invoked. - // - // If the failure action is CONTINUE, there is no effect. - // If the failure action is PAUSE, no more tasks will be updated until - // another update is started. - MaxFailureRatio float32 - - // Order indicates the order of operations when rolling out an updated - // task. Either the old task is shut down before the new task is - // started, or the new task is started before the old task is shut down. - Order string -} - -// ServiceStatus represents the number of running tasks in a service and the -// number of tasks desired to be running. -type ServiceStatus struct { - // RunningTasks is the number of tasks for the service actually in the - // Running state - RunningTasks uint64 - - // DesiredTasks is the number of tasks desired to be running by the - // service. For replicated services, this is the replica count. For global - // services, this is computed by taking the number of tasks with desired - // state of not-Shutdown. - DesiredTasks uint64 - - // CompletedTasks is the number of tasks in the state Completed, if this - // service is in ReplicatedJob or GlobalJob mode. This field must be - // cross-referenced with the service type, because the default value of 0 - // may mean that a service is not in a job mode, or it may mean that the - // job has yet to complete any tasks. - CompletedTasks uint64 -} - -// JobStatus is the status of a job-type service. -type JobStatus struct { - // JobIteration is a value increased each time a Job is executed, - // successfully or otherwise. "Executed", in this case, means the job as a - // whole has been started, not that an individual Task has been launched. A - // job is "Executed" when its ServiceSpec is updated. JobIteration can be - // used to disambiguate Tasks belonging to different executions of a job. - // - // Though JobIteration will increase with each subsequent execution, it may - // not necessarily increase by 1, and so JobIteration should not be used to - // keep track of the number of times a job has been executed. - JobIteration Version - - // LastExecution is the time that the job was last executed, as observed by - // Swarm manager. - LastExecution time.Time `json:",omitempty"` -} diff --git a/src/vendor/github.com/docker/docker/api/types/swarm/swarm.go b/src/vendor/github.com/docker/docker/api/types/swarm/swarm.go deleted file mode 100644 index b25f99964..000000000 --- a/src/vendor/github.com/docker/docker/api/types/swarm/swarm.go +++ /dev/null @@ -1,227 +0,0 @@ -package swarm // import "github.com/docker/docker/api/types/swarm" - -import ( - "time" -) - -// ClusterInfo represents info about the cluster for outputting in "info" -// it contains the same information as "Swarm", but without the JoinTokens -type ClusterInfo struct { - ID string - Meta - Spec Spec - TLSInfo TLSInfo - RootRotationInProgress bool - DefaultAddrPool []string - SubnetSize uint32 - DataPathPort uint32 -} - -// Swarm represents a swarm. -type Swarm struct { - ClusterInfo - JoinTokens JoinTokens -} - -// JoinTokens contains the tokens workers and managers need to join the swarm. -type JoinTokens struct { - // Worker is the join token workers may use to join the swarm. - Worker string - // Manager is the join token managers may use to join the swarm. - Manager string -} - -// Spec represents the spec of a swarm. -type Spec struct { - Annotations - - Orchestration OrchestrationConfig `json:",omitempty"` - Raft RaftConfig `json:",omitempty"` - Dispatcher DispatcherConfig `json:",omitempty"` - CAConfig CAConfig `json:",omitempty"` - TaskDefaults TaskDefaults `json:",omitempty"` - EncryptionConfig EncryptionConfig `json:",omitempty"` -} - -// OrchestrationConfig represents orchestration configuration. -type OrchestrationConfig struct { - // TaskHistoryRetentionLimit is the number of historic tasks to keep per instance or - // node. If negative, never remove completed or failed tasks. - TaskHistoryRetentionLimit *int64 `json:",omitempty"` -} - -// TaskDefaults parameterizes cluster-level task creation with default values. -type TaskDefaults struct { - // LogDriver selects the log driver to use for tasks created in the - // orchestrator if unspecified by a service. - // - // Updating this value will only have an affect on new tasks. Old tasks - // will continue use their previously configured log driver until - // recreated. - LogDriver *Driver `json:",omitempty"` -} - -// EncryptionConfig controls at-rest encryption of data and keys. -type EncryptionConfig struct { - // AutoLockManagers specifies whether or not managers TLS keys and raft data - // should be encrypted at rest in such a way that they must be unlocked - // before the manager node starts up again. - AutoLockManagers bool -} - -// RaftConfig represents raft configuration. -type RaftConfig struct { - // SnapshotInterval is the number of log entries between snapshots. - SnapshotInterval uint64 `json:",omitempty"` - - // KeepOldSnapshots is the number of snapshots to keep beyond the - // current snapshot. - KeepOldSnapshots *uint64 `json:",omitempty"` - - // LogEntriesForSlowFollowers is the number of log entries to keep - // around to sync up slow followers after a snapshot is created. - LogEntriesForSlowFollowers uint64 `json:",omitempty"` - - // ElectionTick is the number of ticks that a follower will wait for a message - // from the leader before becoming a candidate and starting an election. - // ElectionTick must be greater than HeartbeatTick. - // - // A tick currently defaults to one second, so these translate directly to - // seconds currently, but this is NOT guaranteed. - ElectionTick int - - // HeartbeatTick is the number of ticks between heartbeats. Every - // HeartbeatTick ticks, the leader will send a heartbeat to the - // followers. - // - // A tick currently defaults to one second, so these translate directly to - // seconds currently, but this is NOT guaranteed. - HeartbeatTick int -} - -// DispatcherConfig represents dispatcher configuration. -type DispatcherConfig struct { - // HeartbeatPeriod defines how often agent should send heartbeats to - // dispatcher. - HeartbeatPeriod time.Duration `json:",omitempty"` -} - -// CAConfig represents CA configuration. -type CAConfig struct { - // NodeCertExpiry is the duration certificates should be issued for - NodeCertExpiry time.Duration `json:",omitempty"` - - // ExternalCAs is a list of CAs to which a manager node will make - // certificate signing requests for node certificates. - ExternalCAs []*ExternalCA `json:",omitempty"` - - // SigningCACert and SigningCAKey specify the desired signing root CA and - // root CA key for the swarm. When inspecting the cluster, the key will - // be redacted. - SigningCACert string `json:",omitempty"` - SigningCAKey string `json:",omitempty"` - - // If this value changes, and there is no specified signing cert and key, - // then the swarm is forced to generate a new root certificate ane key. - ForceRotate uint64 `json:",omitempty"` -} - -// ExternalCAProtocol represents type of external CA. -type ExternalCAProtocol string - -// ExternalCAProtocolCFSSL CFSSL -const ExternalCAProtocolCFSSL ExternalCAProtocol = "cfssl" - -// ExternalCA defines external CA to be used by the cluster. -type ExternalCA struct { - // Protocol is the protocol used by this external CA. - Protocol ExternalCAProtocol - - // URL is the URL where the external CA can be reached. - URL string - - // Options is a set of additional key/value pairs whose interpretation - // depends on the specified CA type. - Options map[string]string `json:",omitempty"` - - // CACert specifies which root CA is used by this external CA. This certificate must - // be in PEM format. - CACert string -} - -// InitRequest is the request used to init a swarm. -type InitRequest struct { - ListenAddr string - AdvertiseAddr string - DataPathAddr string - DataPathPort uint32 - ForceNewCluster bool - Spec Spec - AutoLockManagers bool - Availability NodeAvailability - DefaultAddrPool []string - SubnetSize uint32 -} - -// JoinRequest is the request used to join a swarm. -type JoinRequest struct { - ListenAddr string - AdvertiseAddr string - DataPathAddr string - RemoteAddrs []string - JoinToken string // accept by secret - Availability NodeAvailability -} - -// UnlockRequest is the request used to unlock a swarm. -type UnlockRequest struct { - // UnlockKey is the unlock key in ASCII-armored format. - UnlockKey string -} - -// LocalNodeState represents the state of the local node. -type LocalNodeState string - -const ( - // LocalNodeStateInactive INACTIVE - LocalNodeStateInactive LocalNodeState = "inactive" - // LocalNodeStatePending PENDING - LocalNodeStatePending LocalNodeState = "pending" - // LocalNodeStateActive ACTIVE - LocalNodeStateActive LocalNodeState = "active" - // LocalNodeStateError ERROR - LocalNodeStateError LocalNodeState = "error" - // LocalNodeStateLocked LOCKED - LocalNodeStateLocked LocalNodeState = "locked" -) - -// Info represents generic information about swarm. -type Info struct { - NodeID string - NodeAddr string - - LocalNodeState LocalNodeState - ControlAvailable bool - Error string - - RemoteManagers []Peer - Nodes int `json:",omitempty"` - Managers int `json:",omitempty"` - - Cluster *ClusterInfo `json:",omitempty"` - - Warnings []string `json:",omitempty"` -} - -// Peer represents a peer. -type Peer struct { - NodeID string - Addr string -} - -// UpdateFlags contains flags for SwarmUpdate. -type UpdateFlags struct { - RotateWorkerToken bool - RotateManagerToken bool - RotateManagerUnlockKey bool -} diff --git a/src/vendor/github.com/docker/docker/api/types/swarm/task.go b/src/vendor/github.com/docker/docker/api/types/swarm/task.go deleted file mode 100644 index a6f7ab7b5..000000000 --- a/src/vendor/github.com/docker/docker/api/types/swarm/task.go +++ /dev/null @@ -1,206 +0,0 @@ -package swarm // import "github.com/docker/docker/api/types/swarm" - -import ( - "time" - - "github.com/docker/docker/api/types/swarm/runtime" -) - -// TaskState represents the state of a task. -type TaskState string - -const ( - // TaskStateNew NEW - TaskStateNew TaskState = "new" - // TaskStateAllocated ALLOCATED - TaskStateAllocated TaskState = "allocated" - // TaskStatePending PENDING - TaskStatePending TaskState = "pending" - // TaskStateAssigned ASSIGNED - TaskStateAssigned TaskState = "assigned" - // TaskStateAccepted ACCEPTED - TaskStateAccepted TaskState = "accepted" - // TaskStatePreparing PREPARING - TaskStatePreparing TaskState = "preparing" - // TaskStateReady READY - TaskStateReady TaskState = "ready" - // TaskStateStarting STARTING - TaskStateStarting TaskState = "starting" - // TaskStateRunning RUNNING - TaskStateRunning TaskState = "running" - // TaskStateComplete COMPLETE - TaskStateComplete TaskState = "complete" - // TaskStateShutdown SHUTDOWN - TaskStateShutdown TaskState = "shutdown" - // TaskStateFailed FAILED - TaskStateFailed TaskState = "failed" - // TaskStateRejected REJECTED - TaskStateRejected TaskState = "rejected" - // TaskStateRemove REMOVE - TaskStateRemove TaskState = "remove" - // TaskStateOrphaned ORPHANED - TaskStateOrphaned TaskState = "orphaned" -) - -// Task represents a task. -type Task struct { - ID string - Meta - Annotations - - Spec TaskSpec `json:",omitempty"` - ServiceID string `json:",omitempty"` - Slot int `json:",omitempty"` - NodeID string `json:",omitempty"` - Status TaskStatus `json:",omitempty"` - DesiredState TaskState `json:",omitempty"` - NetworksAttachments []NetworkAttachment `json:",omitempty"` - GenericResources []GenericResource `json:",omitempty"` - - // JobIteration is the JobIteration of the Service that this Task was - // spawned from, if the Service is a ReplicatedJob or GlobalJob. This is - // used to determine which Tasks belong to which run of the job. This field - // is absent if the Service mode is Replicated or Global. - JobIteration *Version `json:",omitempty"` -} - -// TaskSpec represents the spec of a task. -type TaskSpec struct { - // ContainerSpec, NetworkAttachmentSpec, and PluginSpec are mutually exclusive. - // PluginSpec is only used when the `Runtime` field is set to `plugin` - // NetworkAttachmentSpec is used if the `Runtime` field is set to - // `attachment`. - ContainerSpec *ContainerSpec `json:",omitempty"` - PluginSpec *runtime.PluginSpec `json:",omitempty"` - NetworkAttachmentSpec *NetworkAttachmentSpec `json:",omitempty"` - - Resources *ResourceRequirements `json:",omitempty"` - RestartPolicy *RestartPolicy `json:",omitempty"` - Placement *Placement `json:",omitempty"` - Networks []NetworkAttachmentConfig `json:",omitempty"` - - // LogDriver specifies the LogDriver to use for tasks created from this - // spec. If not present, the one on cluster default on swarm.Spec will be - // used, finally falling back to the engine default if not specified. - LogDriver *Driver `json:",omitempty"` - - // ForceUpdate is a counter that triggers an update even if no relevant - // parameters have been changed. - ForceUpdate uint64 - - Runtime RuntimeType `json:",omitempty"` -} - -// Resources represents resources (CPU/Memory) which can be advertised by a -// node and requested to be reserved for a task. -type Resources struct { - NanoCPUs int64 `json:",omitempty"` - MemoryBytes int64 `json:",omitempty"` - GenericResources []GenericResource `json:",omitempty"` -} - -// Limit describes limits on resources which can be requested by a task. -type Limit struct { - NanoCPUs int64 `json:",omitempty"` - MemoryBytes int64 `json:",omitempty"` - Pids int64 `json:",omitempty"` -} - -// GenericResource represents a "user defined" resource which can -// be either an integer (e.g: SSD=3) or a string (e.g: SSD=sda1) -type GenericResource struct { - NamedResourceSpec *NamedGenericResource `json:",omitempty"` - DiscreteResourceSpec *DiscreteGenericResource `json:",omitempty"` -} - -// NamedGenericResource represents a "user defined" resource which is defined -// as a string. -// "Kind" is used to describe the Kind of a resource (e.g: "GPU", "FPGA", "SSD", ...) -// Value is used to identify the resource (GPU="UUID-1", FPGA="/dev/sdb5", ...) -type NamedGenericResource struct { - Kind string `json:",omitempty"` - Value string `json:",omitempty"` -} - -// DiscreteGenericResource represents a "user defined" resource which is defined -// as an integer -// "Kind" is used to describe the Kind of a resource (e.g: "GPU", "FPGA", "SSD", ...) -// Value is used to count the resource (SSD=5, HDD=3, ...) -type DiscreteGenericResource struct { - Kind string `json:",omitempty"` - Value int64 `json:",omitempty"` -} - -// ResourceRequirements represents resources requirements. -type ResourceRequirements struct { - Limits *Limit `json:",omitempty"` - Reservations *Resources `json:",omitempty"` -} - -// Placement represents orchestration parameters. -type Placement struct { - Constraints []string `json:",omitempty"` - Preferences []PlacementPreference `json:",omitempty"` - MaxReplicas uint64 `json:",omitempty"` - - // Platforms stores all the platforms that the image can run on. - // This field is used in the platform filter for scheduling. If empty, - // then the platform filter is off, meaning there are no scheduling restrictions. - Platforms []Platform `json:",omitempty"` -} - -// PlacementPreference provides a way to make the scheduler aware of factors -// such as topology. -type PlacementPreference struct { - Spread *SpreadOver -} - -// SpreadOver is a scheduling preference that instructs the scheduler to spread -// tasks evenly over groups of nodes identified by labels. -type SpreadOver struct { - // label descriptor, such as engine.labels.az - SpreadDescriptor string -} - -// RestartPolicy represents the restart policy. -type RestartPolicy struct { - Condition RestartPolicyCondition `json:",omitempty"` - Delay *time.Duration `json:",omitempty"` - MaxAttempts *uint64 `json:",omitempty"` - Window *time.Duration `json:",omitempty"` -} - -// RestartPolicyCondition represents when to restart. -type RestartPolicyCondition string - -const ( - // RestartPolicyConditionNone NONE - RestartPolicyConditionNone RestartPolicyCondition = "none" - // RestartPolicyConditionOnFailure ON_FAILURE - RestartPolicyConditionOnFailure RestartPolicyCondition = "on-failure" - // RestartPolicyConditionAny ANY - RestartPolicyConditionAny RestartPolicyCondition = "any" -) - -// TaskStatus represents the status of a task. -type TaskStatus struct { - Timestamp time.Time `json:",omitempty"` - State TaskState `json:",omitempty"` - Message string `json:",omitempty"` - Err string `json:",omitempty"` - ContainerStatus *ContainerStatus `json:",omitempty"` - PortStatus PortStatus `json:",omitempty"` -} - -// ContainerStatus represents the status of a container. -type ContainerStatus struct { - ContainerID string - PID int - ExitCode int -} - -// PortStatus represents the port status of a task's host ports whose -// service has published host ports -type PortStatus struct { - Ports []PortConfig `json:",omitempty"` -} diff --git a/src/vendor/github.com/docker/docker/api/types/types.go b/src/vendor/github.com/docker/docker/api/types/types.go deleted file mode 100644 index e3a159912..000000000 --- a/src/vendor/github.com/docker/docker/api/types/types.go +++ /dev/null @@ -1,635 +0,0 @@ -package types // import "github.com/docker/docker/api/types" - -import ( - "errors" - "fmt" - "io" - "os" - "strings" - "time" - - "github.com/docker/docker/api/types/container" - "github.com/docker/docker/api/types/filters" - "github.com/docker/docker/api/types/mount" - "github.com/docker/docker/api/types/network" - "github.com/docker/docker/api/types/registry" - "github.com/docker/docker/api/types/swarm" - "github.com/docker/go-connections/nat" -) - -// RootFS returns Image's RootFS description including the layer IDs. -type RootFS struct { - Type string - Layers []string `json:",omitempty"` - BaseLayer string `json:",omitempty"` -} - -// ImageInspect contains response of Engine API: -// GET "/images/{name:.*}/json" -type ImageInspect struct { - ID string `json:"Id"` - RepoTags []string - RepoDigests []string - Parent string - Comment string - Created string - Container string - ContainerConfig *container.Config - DockerVersion string - Author string - Config *container.Config - Architecture string - Variant string `json:",omitempty"` - Os string - OsVersion string `json:",omitempty"` - Size int64 - VirtualSize int64 - GraphDriver GraphDriverData - RootFS RootFS - Metadata ImageMetadata -} - -// ImageMetadata contains engine-local data about the image -type ImageMetadata struct { - LastTagTime time.Time `json:",omitempty"` -} - -// Container contains response of Engine API: -// GET "/containers/json" -type Container struct { - ID string `json:"Id"` - Names []string - Image string - ImageID string - Command string - Created int64 - Ports []Port - SizeRw int64 `json:",omitempty"` - SizeRootFs int64 `json:",omitempty"` - Labels map[string]string - State string - Status string - HostConfig struct { - NetworkMode string `json:",omitempty"` - } - NetworkSettings *SummaryNetworkSettings - Mounts []MountPoint -} - -// CopyConfig contains request body of Engine API: -// POST "/containers/"+containerID+"/copy" -type CopyConfig struct { - Resource string -} - -// ContainerPathStat is used to encode the header from -// GET "/containers/{name:.*}/archive" -// "Name" is the file or directory name. -type ContainerPathStat struct { - Name string `json:"name"` - Size int64 `json:"size"` - Mode os.FileMode `json:"mode"` - Mtime time.Time `json:"mtime"` - LinkTarget string `json:"linkTarget"` -} - -// ContainerStats contains response of Engine API: -// GET "/stats" -type ContainerStats struct { - Body io.ReadCloser `json:"body"` - OSType string `json:"ostype"` -} - -// Ping contains response of Engine API: -// GET "/_ping" -type Ping struct { - APIVersion string - OSType string - Experimental bool - BuilderVersion BuilderVersion -} - -// ComponentVersion describes the version information for a specific component. -type ComponentVersion struct { - Name string - Version string - Details map[string]string `json:",omitempty"` -} - -// Version contains response of Engine API: -// GET "/version" -type Version struct { - Platform struct{ Name string } `json:",omitempty"` - Components []ComponentVersion `json:",omitempty"` - - // The following fields are deprecated, they relate to the Engine component and are kept for backwards compatibility - - Version string - APIVersion string `json:"ApiVersion"` - MinAPIVersion string `json:"MinAPIVersion,omitempty"` - GitCommit string - GoVersion string - Os string - Arch string - KernelVersion string `json:",omitempty"` - Experimental bool `json:",omitempty"` - BuildTime string `json:",omitempty"` -} - -// Commit holds the Git-commit (SHA1) that a binary was built from, as reported -// in the version-string of external tools, such as containerd, or runC. -type Commit struct { - ID string // ID is the actual commit ID of external tool. - Expected string // Expected is the commit ID of external tool expected by dockerd as set at build time. -} - -// Info contains response of Engine API: -// GET "/info" -type Info struct { - ID string - Containers int - ContainersRunning int - ContainersPaused int - ContainersStopped int - Images int - Driver string - DriverStatus [][2]string - SystemStatus [][2]string `json:",omitempty"` // SystemStatus is only propagated by the Swarm standalone API - Plugins PluginsInfo - MemoryLimit bool - SwapLimit bool - KernelMemory bool // Deprecated: kernel 5.4 deprecated kmem.limit_in_bytes - KernelMemoryTCP bool - CPUCfsPeriod bool `json:"CpuCfsPeriod"` - CPUCfsQuota bool `json:"CpuCfsQuota"` - CPUShares bool - CPUSet bool - PidsLimit bool - IPv4Forwarding bool - BridgeNfIptables bool - BridgeNfIP6tables bool `json:"BridgeNfIp6tables"` - Debug bool - NFd int - OomKillDisable bool - NGoroutines int - SystemTime string - LoggingDriver string - CgroupDriver string - CgroupVersion string `json:",omitempty"` - NEventsListener int - KernelVersion string - OperatingSystem string - OSVersion string - OSType string - Architecture string - IndexServerAddress string - RegistryConfig *registry.ServiceConfig - NCPU int - MemTotal int64 - GenericResources []swarm.GenericResource - DockerRootDir string - HTTPProxy string `json:"HttpProxy"` - HTTPSProxy string `json:"HttpsProxy"` - NoProxy string - Name string - Labels []string - ExperimentalBuild bool - ServerVersion string - ClusterStore string `json:",omitempty"` // Deprecated: host-discovery and overlay networks with external k/v stores are deprecated - ClusterAdvertise string `json:",omitempty"` // Deprecated: host-discovery and overlay networks with external k/v stores are deprecated - Runtimes map[string]Runtime - DefaultRuntime string - Swarm swarm.Info - // LiveRestoreEnabled determines whether containers should be kept - // running when the daemon is shutdown or upon daemon start if - // running containers are detected - LiveRestoreEnabled bool - Isolation container.Isolation - InitBinary string - ContainerdCommit Commit - RuncCommit Commit - InitCommit Commit - SecurityOptions []string - ProductLicense string `json:",omitempty"` - DefaultAddressPools []NetworkAddressPool `json:",omitempty"` - Warnings []string -} - -// KeyValue holds a key/value pair -type KeyValue struct { - Key, Value string -} - -// NetworkAddressPool is a temp struct used by Info struct -type NetworkAddressPool struct { - Base string - Size int -} - -// SecurityOpt contains the name and options of a security option -type SecurityOpt struct { - Name string - Options []KeyValue -} - -// DecodeSecurityOptions decodes a security options string slice to a type safe -// SecurityOpt -func DecodeSecurityOptions(opts []string) ([]SecurityOpt, error) { - so := []SecurityOpt{} - for _, opt := range opts { - // support output from a < 1.13 docker daemon - if !strings.Contains(opt, "=") { - so = append(so, SecurityOpt{Name: opt}) - continue - } - secopt := SecurityOpt{} - split := strings.Split(opt, ",") - for _, s := range split { - kv := strings.SplitN(s, "=", 2) - if len(kv) != 2 { - return nil, fmt.Errorf("invalid security option %q", s) - } - if kv[0] == "" || kv[1] == "" { - return nil, errors.New("invalid empty security option") - } - if kv[0] == "name" { - secopt.Name = kv[1] - continue - } - secopt.Options = append(secopt.Options, KeyValue{Key: kv[0], Value: kv[1]}) - } - so = append(so, secopt) - } - return so, nil -} - -// PluginsInfo is a temp struct holding Plugins name -// registered with docker daemon. It is used by Info struct -type PluginsInfo struct { - // List of Volume plugins registered - Volume []string - // List of Network plugins registered - Network []string - // List of Authorization plugins registered - Authorization []string - // List of Log plugins registered - Log []string -} - -// ExecStartCheck is a temp struct used by execStart -// Config fields is part of ExecConfig in runconfig package -type ExecStartCheck struct { - // ExecStart will first check if it's detached - Detach bool - // Check if there's a tty - Tty bool -} - -// HealthcheckResult stores information about a single run of a healthcheck probe -type HealthcheckResult struct { - Start time.Time // Start is the time this check started - End time.Time // End is the time this check ended - ExitCode int // ExitCode meanings: 0=healthy, 1=unhealthy, 2=reserved (considered unhealthy), else=error running probe - Output string // Output from last check -} - -// Health states -const ( - NoHealthcheck = "none" // Indicates there is no healthcheck - Starting = "starting" // Starting indicates that the container is not yet ready - Healthy = "healthy" // Healthy indicates that the container is running correctly - Unhealthy = "unhealthy" // Unhealthy indicates that the container has a problem -) - -// Health stores information about the container's healthcheck results -type Health struct { - Status string // Status is one of Starting, Healthy or Unhealthy - FailingStreak int // FailingStreak is the number of consecutive failures - Log []*HealthcheckResult // Log contains the last few results (oldest first) -} - -// ContainerState stores container's running state -// it's part of ContainerJSONBase and will return by "inspect" command -type ContainerState struct { - Status string // String representation of the container state. Can be one of "created", "running", "paused", "restarting", "removing", "exited", or "dead" - Running bool - Paused bool - Restarting bool - OOMKilled bool - Dead bool - Pid int - ExitCode int - Error string - StartedAt string - FinishedAt string - Health *Health `json:",omitempty"` -} - -// ContainerNode stores information about the node that a container -// is running on. It's only used by the Docker Swarm standalone API -type ContainerNode struct { - ID string - IPAddress string `json:"IP"` - Addr string - Name string - Cpus int - Memory int64 - Labels map[string]string -} - -// ContainerJSONBase contains response of Engine API: -// GET "/containers/{name:.*}/json" -type ContainerJSONBase struct { - ID string `json:"Id"` - Created string - Path string - Args []string - State *ContainerState - Image string - ResolvConfPath string - HostnamePath string - HostsPath string - LogPath string - Node *ContainerNode `json:",omitempty"` // Node is only propagated by Docker Swarm standalone API - Name string - RestartCount int - Driver string - Platform string - MountLabel string - ProcessLabel string - AppArmorProfile string - ExecIDs []string - HostConfig *container.HostConfig - GraphDriver GraphDriverData - SizeRw *int64 `json:",omitempty"` - SizeRootFs *int64 `json:",omitempty"` -} - -// ContainerJSON is newly used struct along with MountPoint -type ContainerJSON struct { - *ContainerJSONBase - Mounts []MountPoint - Config *container.Config - NetworkSettings *NetworkSettings -} - -// NetworkSettings exposes the network settings in the api -type NetworkSettings struct { - NetworkSettingsBase - DefaultNetworkSettings - Networks map[string]*network.EndpointSettings -} - -// SummaryNetworkSettings provides a summary of container's networks -// in /containers/json -type SummaryNetworkSettings struct { - Networks map[string]*network.EndpointSettings -} - -// NetworkSettingsBase holds basic information about networks -type NetworkSettingsBase struct { - Bridge string // Bridge is the Bridge name the network uses(e.g. `docker0`) - SandboxID string // SandboxID uniquely represents a container's network stack - HairpinMode bool // HairpinMode specifies if hairpin NAT should be enabled on the virtual interface - LinkLocalIPv6Address string // LinkLocalIPv6Address is an IPv6 unicast address using the link-local prefix - LinkLocalIPv6PrefixLen int // LinkLocalIPv6PrefixLen is the prefix length of an IPv6 unicast address - Ports nat.PortMap // Ports is a collection of PortBinding indexed by Port - SandboxKey string // SandboxKey identifies the sandbox - SecondaryIPAddresses []network.Address - SecondaryIPv6Addresses []network.Address -} - -// DefaultNetworkSettings holds network information -// during the 2 release deprecation period. -// It will be removed in Docker 1.11. -type DefaultNetworkSettings struct { - EndpointID string // EndpointID uniquely represents a service endpoint in a Sandbox - Gateway string // Gateway holds the gateway address for the network - GlobalIPv6Address string // GlobalIPv6Address holds network's global IPv6 address - GlobalIPv6PrefixLen int // GlobalIPv6PrefixLen represents mask length of network's global IPv6 address - IPAddress string // IPAddress holds the IPv4 address for the network - IPPrefixLen int // IPPrefixLen represents mask length of network's IPv4 address - IPv6Gateway string // IPv6Gateway holds gateway address specific for IPv6 - MacAddress string // MacAddress holds the MAC address for the network -} - -// MountPoint represents a mount point configuration inside the container. -// This is used for reporting the mountpoints in use by a container. -type MountPoint struct { - Type mount.Type `json:",omitempty"` - Name string `json:",omitempty"` - Source string - Destination string - Driver string `json:",omitempty"` - Mode string - RW bool - Propagation mount.Propagation -} - -// NetworkResource is the body of the "get network" http response message -type NetworkResource struct { - Name string // Name is the requested name of the network - ID string `json:"Id"` // ID uniquely identifies a network on a single machine - Created time.Time // Created is the time the network created - Scope string // Scope describes the level at which the network exists (e.g. `swarm` for cluster-wide or `local` for machine level) - Driver string // Driver is the Driver name used to create the network (e.g. `bridge`, `overlay`) - EnableIPv6 bool // EnableIPv6 represents whether to enable IPv6 - IPAM network.IPAM // IPAM is the network's IP Address Management - Internal bool // Internal represents if the network is used internal only - Attachable bool // Attachable represents if the global scope is manually attachable by regular containers from workers in swarm mode. - Ingress bool // Ingress indicates the network is providing the routing-mesh for the swarm cluster. - ConfigFrom network.ConfigReference // ConfigFrom specifies the source which will provide the configuration for this network. - ConfigOnly bool // ConfigOnly networks are place-holder networks for network configurations to be used by other networks. ConfigOnly networks cannot be used directly to run containers or services. - Containers map[string]EndpointResource // Containers contains endpoints belonging to the network - Options map[string]string // Options holds the network specific options to use for when creating the network - Labels map[string]string // Labels holds metadata specific to the network being created - Peers []network.PeerInfo `json:",omitempty"` // List of peer nodes for an overlay network - Services map[string]network.ServiceInfo `json:",omitempty"` -} - -// EndpointResource contains network resources allocated and used for a container in a network -type EndpointResource struct { - Name string - EndpointID string - MacAddress string - IPv4Address string - IPv6Address string -} - -// NetworkCreate is the expected body of the "create network" http request message -type NetworkCreate struct { - // Check for networks with duplicate names. - // Network is primarily keyed based on a random ID and not on the name. - // Network name is strictly a user-friendly alias to the network - // which is uniquely identified using ID. - // And there is no guaranteed way to check for duplicates. - // Option CheckDuplicate is there to provide a best effort checking of any networks - // which has the same name but it is not guaranteed to catch all name collisions. - CheckDuplicate bool - Driver string - Scope string - EnableIPv6 bool - IPAM *network.IPAM - Internal bool - Attachable bool - Ingress bool - ConfigOnly bool - ConfigFrom *network.ConfigReference - Options map[string]string - Labels map[string]string -} - -// NetworkCreateRequest is the request message sent to the server for network create call. -type NetworkCreateRequest struct { - NetworkCreate - Name string -} - -// NetworkCreateResponse is the response message sent by the server for network create call -type NetworkCreateResponse struct { - ID string `json:"Id"` - Warning string -} - -// NetworkConnect represents the data to be used to connect a container to the network -type NetworkConnect struct { - Container string - EndpointConfig *network.EndpointSettings `json:",omitempty"` -} - -// NetworkDisconnect represents the data to be used to disconnect a container from the network -type NetworkDisconnect struct { - Container string - Force bool -} - -// NetworkInspectOptions holds parameters to inspect network -type NetworkInspectOptions struct { - Scope string - Verbose bool -} - -// Checkpoint represents the details of a checkpoint -type Checkpoint struct { - Name string // Name is the name of the checkpoint -} - -// Runtime describes an OCI runtime -type Runtime struct { - Path string `json:"path"` - Args []string `json:"runtimeArgs,omitempty"` - - // This is exposed here only for internal use - // It is not currently supported to specify custom shim configs - Shim *ShimConfig `json:"-"` -} - -// ShimConfig is used by runtime to configure containerd shims -type ShimConfig struct { - Binary string - Opts interface{} -} - -// DiskUsage contains response of Engine API: -// GET "/system/df" -type DiskUsage struct { - LayersSize int64 - Images []*ImageSummary - Containers []*Container - Volumes []*Volume - BuildCache []*BuildCache - BuilderSize int64 // deprecated -} - -// ContainersPruneReport contains the response for Engine API: -// POST "/containers/prune" -type ContainersPruneReport struct { - ContainersDeleted []string - SpaceReclaimed uint64 -} - -// VolumesPruneReport contains the response for Engine API: -// POST "/volumes/prune" -type VolumesPruneReport struct { - VolumesDeleted []string - SpaceReclaimed uint64 -} - -// ImagesPruneReport contains the response for Engine API: -// POST "/images/prune" -type ImagesPruneReport struct { - ImagesDeleted []ImageDeleteResponseItem - SpaceReclaimed uint64 -} - -// BuildCachePruneReport contains the response for Engine API: -// POST "/build/prune" -type BuildCachePruneReport struct { - CachesDeleted []string - SpaceReclaimed uint64 -} - -// NetworksPruneReport contains the response for Engine API: -// POST "/networks/prune" -type NetworksPruneReport struct { - NetworksDeleted []string -} - -// SecretCreateResponse contains the information returned to a client -// on the creation of a new secret. -type SecretCreateResponse struct { - // ID is the id of the created secret. - ID string -} - -// SecretListOptions holds parameters to list secrets -type SecretListOptions struct { - Filters filters.Args -} - -// ConfigCreateResponse contains the information returned to a client -// on the creation of a new config. -type ConfigCreateResponse struct { - // ID is the id of the created config. - ID string -} - -// ConfigListOptions holds parameters to list configs -type ConfigListOptions struct { - Filters filters.Args -} - -// PushResult contains the tag, manifest digest, and manifest size from the -// push. It's used to signal this information to the trust code in the client -// so it can sign the manifest if necessary. -type PushResult struct { - Tag string - Digest string - Size int -} - -// BuildResult contains the image id of a successful build -type BuildResult struct { - ID string -} - -// BuildCache contains information about a build cache record -type BuildCache struct { - ID string - Parent string - Type string - Description string - InUse bool - Shared bool - Size int64 - CreatedAt time.Time - LastUsedAt *time.Time - UsageCount int -} - -// BuildCachePruneOptions hold parameters to prune the build cache -type BuildCachePruneOptions struct { - All bool - KeepStorage int64 - Filters filters.Args -} diff --git a/src/vendor/github.com/docker/docker/api/types/versions/README.md b/src/vendor/github.com/docker/docker/api/types/versions/README.md deleted file mode 100644 index 1ef911edb..000000000 --- a/src/vendor/github.com/docker/docker/api/types/versions/README.md +++ /dev/null @@ -1,14 +0,0 @@ -# Legacy API type versions - -This package includes types for legacy API versions. The stable version of the API types live in `api/types/*.go`. - -Consider moving a type here when you need to keep backwards compatibility in the API. This legacy types are organized by the latest API version they appear in. For instance, types in the `v1p19` package are valid for API versions below or equal `1.19`. Types in the `v1p20` package are valid for the API version `1.20`, since the versions below that will use the legacy types in `v1p19`. - -## Package name conventions - -The package name convention is to use `v` as a prefix for the version number and `p`(patch) as a separator. We use this nomenclature due to a few restrictions in the Go package name convention: - -1. We cannot use `.` because it's interpreted by the language, think of `v1.20.CallFunction`. -2. We cannot use `_` because golint complains about it. The code is actually valid, but it looks probably more weird: `v1_20.CallFunction`. - -For instance, if you want to modify a type that was available in the version `1.21` of the API but it will have different fields in the version `1.22`, you want to create a new package under `api/types/versions/v1p21`. diff --git a/src/vendor/github.com/docker/docker/api/types/versions/compare.go b/src/vendor/github.com/docker/docker/api/types/versions/compare.go deleted file mode 100644 index 8ccb0aa92..000000000 --- a/src/vendor/github.com/docker/docker/api/types/versions/compare.go +++ /dev/null @@ -1,62 +0,0 @@ -package versions // import "github.com/docker/docker/api/types/versions" - -import ( - "strconv" - "strings" -) - -// compare compares two version strings -// returns -1 if v1 < v2, 1 if v1 > v2, 0 otherwise. -func compare(v1, v2 string) int { - var ( - currTab = strings.Split(v1, ".") - otherTab = strings.Split(v2, ".") - ) - - max := len(currTab) - if len(otherTab) > max { - max = len(otherTab) - } - for i := 0; i < max; i++ { - var currInt, otherInt int - - if len(currTab) > i { - currInt, _ = strconv.Atoi(currTab[i]) - } - if len(otherTab) > i { - otherInt, _ = strconv.Atoi(otherTab[i]) - } - if currInt > otherInt { - return 1 - } - if otherInt > currInt { - return -1 - } - } - return 0 -} - -// LessThan checks if a version is less than another -func LessThan(v, other string) bool { - return compare(v, other) == -1 -} - -// LessThanOrEqualTo checks if a version is less than or equal to another -func LessThanOrEqualTo(v, other string) bool { - return compare(v, other) <= 0 -} - -// GreaterThan checks if a version is greater than another -func GreaterThan(v, other string) bool { - return compare(v, other) == 1 -} - -// GreaterThanOrEqualTo checks if a version is greater than or equal to another -func GreaterThanOrEqualTo(v, other string) bool { - return compare(v, other) >= 0 -} - -// Equal checks if a version is equal to another -func Equal(v, other string) bool { - return compare(v, other) == 0 -} diff --git a/src/vendor/github.com/docker/docker/api/types/volume.go b/src/vendor/github.com/docker/docker/api/types/volume.go deleted file mode 100644 index c69b08448..000000000 --- a/src/vendor/github.com/docker/docker/api/types/volume.go +++ /dev/null @@ -1,72 +0,0 @@ -package types - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -// Volume volume -// swagger:model Volume -type Volume struct { - - // Date/Time the volume was created. - CreatedAt string `json:"CreatedAt,omitempty"` - - // Name of the volume driver used by the volume. - // Required: true - Driver string `json:"Driver"` - - // User-defined key/value metadata. - // Required: true - Labels map[string]string `json:"Labels"` - - // Mount path of the volume on the host. - // Required: true - Mountpoint string `json:"Mountpoint"` - - // Name of the volume. - // Required: true - Name string `json:"Name"` - - // The driver specific options used when creating the volume. - // - // Required: true - Options map[string]string `json:"Options"` - - // The level at which the volume exists. Either `global` for cluster-wide, - // or `local` for machine level. - // - // Required: true - Scope string `json:"Scope"` - - // Low-level details about the volume, provided by the volume driver. - // Details are returned as a map with key/value pairs: - // `{"key":"value","key2":"value2"}`. - // - // The `Status` field is optional, and is omitted if the volume driver - // does not support this feature. - // - Status map[string]interface{} `json:"Status,omitempty"` - - // usage data - UsageData *VolumeUsageData `json:"UsageData,omitempty"` -} - -// VolumeUsageData Usage details about the volume. This information is used by the -// `GET /system/df` endpoint, and omitted in other endpoints. -// -// swagger:model VolumeUsageData -type VolumeUsageData struct { - - // The number of containers referencing this volume. This field - // is set to `-1` if the reference-count is not available. - // - // Required: true - RefCount int64 `json:"RefCount"` - - // Amount of disk space used by the volume (in bytes). This information - // is only available for volumes created with the `"local"` volume - // driver. For volumes created with other volume drivers, this field - // is set to `-1` ("not available") - // - // Required: true - Size int64 `json:"Size"` -} diff --git a/src/vendor/github.com/docker/docker/errdefs/defs.go b/src/vendor/github.com/docker/docker/errdefs/defs.go deleted file mode 100644 index 61e7456b4..000000000 --- a/src/vendor/github.com/docker/docker/errdefs/defs.go +++ /dev/null @@ -1,69 +0,0 @@ -package errdefs // import "github.com/docker/docker/errdefs" - -// ErrNotFound signals that the requested object doesn't exist -type ErrNotFound interface { - NotFound() -} - -// ErrInvalidParameter signals that the user input is invalid -type ErrInvalidParameter interface { - InvalidParameter() -} - -// ErrConflict signals that some internal state conflicts with the requested action and can't be performed. -// A change in state should be able to clear this error. -type ErrConflict interface { - Conflict() -} - -// ErrUnauthorized is used to signify that the user is not authorized to perform a specific action -type ErrUnauthorized interface { - Unauthorized() -} - -// ErrUnavailable signals that the requested action/subsystem is not available. -type ErrUnavailable interface { - Unavailable() -} - -// ErrForbidden signals that the requested action cannot be performed under any circumstances. -// When a ErrForbidden is returned, the caller should never retry the action. -type ErrForbidden interface { - Forbidden() -} - -// ErrSystem signals that some internal error occurred. -// An example of this would be a failed mount request. -type ErrSystem interface { - System() -} - -// ErrNotModified signals that an action can't be performed because it's already in the desired state -type ErrNotModified interface { - NotModified() -} - -// ErrNotImplemented signals that the requested action/feature is not implemented on the system as configured. -type ErrNotImplemented interface { - NotImplemented() -} - -// ErrUnknown signals that the kind of error that occurred is not known. -type ErrUnknown interface { - Unknown() -} - -// ErrCancelled signals that the action was cancelled. -type ErrCancelled interface { - Cancelled() -} - -// ErrDeadline signals that the deadline was reached before the action completed. -type ErrDeadline interface { - DeadlineExceeded() -} - -// ErrDataLoss indicates that data was lost or there is data corruption. -type ErrDataLoss interface { - DataLoss() -} diff --git a/src/vendor/github.com/docker/docker/errdefs/doc.go b/src/vendor/github.com/docker/docker/errdefs/doc.go deleted file mode 100644 index c211f174f..000000000 --- a/src/vendor/github.com/docker/docker/errdefs/doc.go +++ /dev/null @@ -1,8 +0,0 @@ -// Package errdefs defines a set of error interfaces that packages should use for communicating classes of errors. -// Errors that cross the package boundary should implement one (and only one) of these interfaces. -// -// Packages should not reference these interfaces directly, only implement them. -// To check if a particular error implements one of these interfaces, there are helper -// functions provided (e.g. `Is`) which can be used rather than asserting the interfaces directly. -// If you must assert on these interfaces, be sure to check the causal chain (`err.Cause()`). -package errdefs // import "github.com/docker/docker/errdefs" diff --git a/src/vendor/github.com/docker/docker/errdefs/helpers.go b/src/vendor/github.com/docker/docker/errdefs/helpers.go deleted file mode 100644 index fe06fb6f7..000000000 --- a/src/vendor/github.com/docker/docker/errdefs/helpers.go +++ /dev/null @@ -1,279 +0,0 @@ -package errdefs // import "github.com/docker/docker/errdefs" - -import "context" - -type errNotFound struct{ error } - -func (errNotFound) NotFound() {} - -func (e errNotFound) Cause() error { - return e.error -} - -func (e errNotFound) Unwrap() error { - return e.error -} - -// NotFound is a helper to create an error of the class with the same name from any error type -func NotFound(err error) error { - if err == nil || IsNotFound(err) { - return err - } - return errNotFound{err} -} - -type errInvalidParameter struct{ error } - -func (errInvalidParameter) InvalidParameter() {} - -func (e errInvalidParameter) Cause() error { - return e.error -} - -func (e errInvalidParameter) Unwrap() error { - return e.error -} - -// InvalidParameter is a helper to create an error of the class with the same name from any error type -func InvalidParameter(err error) error { - if err == nil || IsInvalidParameter(err) { - return err - } - return errInvalidParameter{err} -} - -type errConflict struct{ error } - -func (errConflict) Conflict() {} - -func (e errConflict) Cause() error { - return e.error -} - -func (e errConflict) Unwrap() error { - return e.error -} - -// Conflict is a helper to create an error of the class with the same name from any error type -func Conflict(err error) error { - if err == nil || IsConflict(err) { - return err - } - return errConflict{err} -} - -type errUnauthorized struct{ error } - -func (errUnauthorized) Unauthorized() {} - -func (e errUnauthorized) Cause() error { - return e.error -} - -func (e errUnauthorized) Unwrap() error { - return e.error -} - -// Unauthorized is a helper to create an error of the class with the same name from any error type -func Unauthorized(err error) error { - if err == nil || IsUnauthorized(err) { - return err - } - return errUnauthorized{err} -} - -type errUnavailable struct{ error } - -func (errUnavailable) Unavailable() {} - -func (e errUnavailable) Cause() error { - return e.error -} - -func (e errUnavailable) Unwrap() error { - return e.error -} - -// Unavailable is a helper to create an error of the class with the same name from any error type -func Unavailable(err error) error { - if err == nil || IsUnavailable(err) { - return err - } - return errUnavailable{err} -} - -type errForbidden struct{ error } - -func (errForbidden) Forbidden() {} - -func (e errForbidden) Cause() error { - return e.error -} - -func (e errForbidden) Unwrap() error { - return e.error -} - -// Forbidden is a helper to create an error of the class with the same name from any error type -func Forbidden(err error) error { - if err == nil || IsForbidden(err) { - return err - } - return errForbidden{err} -} - -type errSystem struct{ error } - -func (errSystem) System() {} - -func (e errSystem) Cause() error { - return e.error -} - -func (e errSystem) Unwrap() error { - return e.error -} - -// System is a helper to create an error of the class with the same name from any error type -func System(err error) error { - if err == nil || IsSystem(err) { - return err - } - return errSystem{err} -} - -type errNotModified struct{ error } - -func (errNotModified) NotModified() {} - -func (e errNotModified) Cause() error { - return e.error -} - -func (e errNotModified) Unwrap() error { - return e.error -} - -// NotModified is a helper to create an error of the class with the same name from any error type -func NotModified(err error) error { - if err == nil || IsNotModified(err) { - return err - } - return errNotModified{err} -} - -type errNotImplemented struct{ error } - -func (errNotImplemented) NotImplemented() {} - -func (e errNotImplemented) Cause() error { - return e.error -} - -func (e errNotImplemented) Unwrap() error { - return e.error -} - -// NotImplemented is a helper to create an error of the class with the same name from any error type -func NotImplemented(err error) error { - if err == nil || IsNotImplemented(err) { - return err - } - return errNotImplemented{err} -} - -type errUnknown struct{ error } - -func (errUnknown) Unknown() {} - -func (e errUnknown) Cause() error { - return e.error -} - -func (e errUnknown) Unwrap() error { - return e.error -} - -// Unknown is a helper to create an error of the class with the same name from any error type -func Unknown(err error) error { - if err == nil || IsUnknown(err) { - return err - } - return errUnknown{err} -} - -type errCancelled struct{ error } - -func (errCancelled) Cancelled() {} - -func (e errCancelled) Cause() error { - return e.error -} - -func (e errCancelled) Unwrap() error { - return e.error -} - -// Cancelled is a helper to create an error of the class with the same name from any error type -func Cancelled(err error) error { - if err == nil || IsCancelled(err) { - return err - } - return errCancelled{err} -} - -type errDeadline struct{ error } - -func (errDeadline) DeadlineExceeded() {} - -func (e errDeadline) Cause() error { - return e.error -} - -func (e errDeadline) Unwrap() error { - return e.error -} - -// Deadline is a helper to create an error of the class with the same name from any error type -func Deadline(err error) error { - if err == nil || IsDeadline(err) { - return err - } - return errDeadline{err} -} - -type errDataLoss struct{ error } - -func (errDataLoss) DataLoss() {} - -func (e errDataLoss) Cause() error { - return e.error -} - -func (e errDataLoss) Unwrap() error { - return e.error -} - -// DataLoss is a helper to create an error of the class with the same name from any error type -func DataLoss(err error) error { - if err == nil || IsDataLoss(err) { - return err - } - return errDataLoss{err} -} - -// FromContext returns the error class from the passed in context -func FromContext(ctx context.Context) error { - e := ctx.Err() - if e == nil { - return nil - } - - if e == context.Canceled { - return Cancelled(e) - } - if e == context.DeadlineExceeded { - return Deadline(e) - } - return Unknown(e) -} diff --git a/src/vendor/github.com/docker/docker/errdefs/http_helpers.go b/src/vendor/github.com/docker/docker/errdefs/http_helpers.go deleted file mode 100644 index 5afe48677..000000000 --- a/src/vendor/github.com/docker/docker/errdefs/http_helpers.go +++ /dev/null @@ -1,53 +0,0 @@ -package errdefs // import "github.com/docker/docker/errdefs" - -import ( - "net/http" - - "github.com/sirupsen/logrus" -) - -// FromStatusCode creates an errdef error, based on the provided HTTP status-code -func FromStatusCode(err error, statusCode int) error { - if err == nil { - return err - } - switch statusCode { - case http.StatusNotFound: - err = NotFound(err) - case http.StatusBadRequest: - err = InvalidParameter(err) - case http.StatusConflict: - err = Conflict(err) - case http.StatusUnauthorized: - err = Unauthorized(err) - case http.StatusServiceUnavailable: - err = Unavailable(err) - case http.StatusForbidden: - err = Forbidden(err) - case http.StatusNotModified: - err = NotModified(err) - case http.StatusNotImplemented: - err = NotImplemented(err) - case http.StatusInternalServerError: - if !IsSystem(err) && !IsUnknown(err) && !IsDataLoss(err) && !IsDeadline(err) && !IsCancelled(err) { - err = System(err) - } - default: - logrus.WithError(err).WithFields(logrus.Fields{ - "module": "api", - "status_code": statusCode, - }).Debug("FIXME: Got an status-code for which error does not match any expected type!!!") - - switch { - case statusCode >= 200 && statusCode < 400: - // it's a client error - case statusCode >= 400 && statusCode < 500: - err = InvalidParameter(err) - case statusCode >= 500 && statusCode < 600: - err = System(err) - default: - err = Unknown(err) - } - } - return err -} diff --git a/src/vendor/github.com/docker/docker/errdefs/is.go b/src/vendor/github.com/docker/docker/errdefs/is.go deleted file mode 100644 index 3abf07d0c..000000000 --- a/src/vendor/github.com/docker/docker/errdefs/is.go +++ /dev/null @@ -1,107 +0,0 @@ -package errdefs // import "github.com/docker/docker/errdefs" - -type causer interface { - Cause() error -} - -func getImplementer(err error) error { - switch e := err.(type) { - case - ErrNotFound, - ErrInvalidParameter, - ErrConflict, - ErrUnauthorized, - ErrUnavailable, - ErrForbidden, - ErrSystem, - ErrNotModified, - ErrNotImplemented, - ErrCancelled, - ErrDeadline, - ErrDataLoss, - ErrUnknown: - return err - case causer: - return getImplementer(e.Cause()) - default: - return err - } -} - -// IsNotFound returns if the passed in error is an ErrNotFound -func IsNotFound(err error) bool { - _, ok := getImplementer(err).(ErrNotFound) - return ok -} - -// IsInvalidParameter returns if the passed in error is an ErrInvalidParameter -func IsInvalidParameter(err error) bool { - _, ok := getImplementer(err).(ErrInvalidParameter) - return ok -} - -// IsConflict returns if the passed in error is an ErrConflict -func IsConflict(err error) bool { - _, ok := getImplementer(err).(ErrConflict) - return ok -} - -// IsUnauthorized returns if the passed in error is an ErrUnauthorized -func IsUnauthorized(err error) bool { - _, ok := getImplementer(err).(ErrUnauthorized) - return ok -} - -// IsUnavailable returns if the passed in error is an ErrUnavailable -func IsUnavailable(err error) bool { - _, ok := getImplementer(err).(ErrUnavailable) - return ok -} - -// IsForbidden returns if the passed in error is an ErrForbidden -func IsForbidden(err error) bool { - _, ok := getImplementer(err).(ErrForbidden) - return ok -} - -// IsSystem returns if the passed in error is an ErrSystem -func IsSystem(err error) bool { - _, ok := getImplementer(err).(ErrSystem) - return ok -} - -// IsNotModified returns if the passed in error is a NotModified error -func IsNotModified(err error) bool { - _, ok := getImplementer(err).(ErrNotModified) - return ok -} - -// IsNotImplemented returns if the passed in error is an ErrNotImplemented -func IsNotImplemented(err error) bool { - _, ok := getImplementer(err).(ErrNotImplemented) - return ok -} - -// IsUnknown returns if the passed in error is an ErrUnknown -func IsUnknown(err error) bool { - _, ok := getImplementer(err).(ErrUnknown) - return ok -} - -// IsCancelled returns if the passed in error is an ErrCancelled -func IsCancelled(err error) bool { - _, ok := getImplementer(err).(ErrCancelled) - return ok -} - -// IsDeadline returns if the passed in error is an ErrDeadline -func IsDeadline(err error) bool { - _, ok := getImplementer(err).(ErrDeadline) - return ok -} - -// IsDataLoss returns if the passed in error is an ErrDataLoss -func IsDataLoss(err error) bool { - _, ok := getImplementer(err).(ErrDataLoss) - return ok -} diff --git a/src/vendor/github.com/docker/docker/pkg/homedir/homedir_linux.go b/src/vendor/github.com/docker/docker/pkg/homedir/homedir_linux.go deleted file mode 100644 index 5e6310fdc..000000000 --- a/src/vendor/github.com/docker/docker/pkg/homedir/homedir_linux.go +++ /dev/null @@ -1,93 +0,0 @@ -package homedir // import "github.com/docker/docker/pkg/homedir" - -import ( - "errors" - "os" - "path/filepath" - "strings" -) - -// GetRuntimeDir returns XDG_RUNTIME_DIR. -// XDG_RUNTIME_DIR is typically configured via pam_systemd. -// GetRuntimeDir returns non-nil error if XDG_RUNTIME_DIR is not set. -// -// See also https://standards.freedesktop.org/basedir-spec/latest/ar01s03.html -func GetRuntimeDir() (string, error) { - if xdgRuntimeDir := os.Getenv("XDG_RUNTIME_DIR"); xdgRuntimeDir != "" { - return xdgRuntimeDir, nil - } - return "", errors.New("could not get XDG_RUNTIME_DIR") -} - -// StickRuntimeDirContents sets the sticky bit on files that are under -// XDG_RUNTIME_DIR, so that the files won't be periodically removed by the system. -// -// StickyRuntimeDir returns slice of sticked files. -// StickyRuntimeDir returns nil error if XDG_RUNTIME_DIR is not set. -// -// See also https://standards.freedesktop.org/basedir-spec/latest/ar01s03.html -func StickRuntimeDirContents(files []string) ([]string, error) { - runtimeDir, err := GetRuntimeDir() - if err != nil { - // ignore error if runtimeDir is empty - return nil, nil - } - runtimeDir, err = filepath.Abs(runtimeDir) - if err != nil { - return nil, err - } - var sticked []string - for _, f := range files { - f, err = filepath.Abs(f) - if err != nil { - return sticked, err - } - if strings.HasPrefix(f, runtimeDir+"/") { - if err = stick(f); err != nil { - return sticked, err - } - sticked = append(sticked, f) - } - } - return sticked, nil -} - -func stick(f string) error { - st, err := os.Stat(f) - if err != nil { - return err - } - m := st.Mode() - m |= os.ModeSticky - return os.Chmod(f, m) -} - -// GetDataHome returns XDG_DATA_HOME. -// GetDataHome returns $HOME/.local/share and nil error if XDG_DATA_HOME is not set. -// -// See also https://standards.freedesktop.org/basedir-spec/latest/ar01s03.html -func GetDataHome() (string, error) { - if xdgDataHome := os.Getenv("XDG_DATA_HOME"); xdgDataHome != "" { - return xdgDataHome, nil - } - home := os.Getenv("HOME") - if home == "" { - return "", errors.New("could not get either XDG_DATA_HOME or HOME") - } - return filepath.Join(home, ".local", "share"), nil -} - -// GetConfigHome returns XDG_CONFIG_HOME. -// GetConfigHome returns $HOME/.config and nil error if XDG_CONFIG_HOME is not set. -// -// See also https://standards.freedesktop.org/basedir-spec/latest/ar01s03.html -func GetConfigHome() (string, error) { - if xdgConfigHome := os.Getenv("XDG_CONFIG_HOME"); xdgConfigHome != "" { - return xdgConfigHome, nil - } - home := os.Getenv("HOME") - if home == "" { - return "", errors.New("could not get either XDG_CONFIG_HOME or HOME") - } - return filepath.Join(home, ".config"), nil -} diff --git a/src/vendor/github.com/docker/docker/pkg/homedir/homedir_others.go b/src/vendor/github.com/docker/docker/pkg/homedir/homedir_others.go deleted file mode 100644 index fc48e674c..000000000 --- a/src/vendor/github.com/docker/docker/pkg/homedir/homedir_others.go +++ /dev/null @@ -1,28 +0,0 @@ -//go:build !linux -// +build !linux - -package homedir // import "github.com/docker/docker/pkg/homedir" - -import ( - "errors" -) - -// GetRuntimeDir is unsupported on non-linux system. -func GetRuntimeDir() (string, error) { - return "", errors.New("homedir.GetRuntimeDir() is not supported on this system") -} - -// StickRuntimeDirContents is unsupported on non-linux system. -func StickRuntimeDirContents(files []string) ([]string, error) { - return nil, errors.New("homedir.StickRuntimeDirContents() is not supported on this system") -} - -// GetDataHome is unsupported on non-linux system. -func GetDataHome() (string, error) { - return "", errors.New("homedir.GetDataHome() is not supported on this system") -} - -// GetConfigHome is unsupported on non-linux system. -func GetConfigHome() (string, error) { - return "", errors.New("homedir.GetConfigHome() is not supported on this system") -} diff --git a/src/vendor/github.com/docker/docker/pkg/homedir/homedir_unix.go b/src/vendor/github.com/docker/docker/pkg/homedir/homedir_unix.go deleted file mode 100644 index d1732dee5..000000000 --- a/src/vendor/github.com/docker/docker/pkg/homedir/homedir_unix.go +++ /dev/null @@ -1,39 +0,0 @@ -//go:build !windows -// +build !windows - -package homedir // import "github.com/docker/docker/pkg/homedir" - -import ( - "os" - "os/user" -) - -// Key returns the env var name for the user's home dir based on -// the platform being run on -func Key() string { - return "HOME" -} - -// Get returns the home directory of the current user with the help of -// environment variables depending on the target operating system. -// Returned path should be used with "path/filepath" to form new paths. -// -// If linking statically with cgo enabled against glibc, ensure the -// osusergo build tag is used. -// -// If needing to do nss lookups, do not disable cgo or set osusergo. -func Get() string { - home := os.Getenv(Key()) - if home == "" { - if u, err := user.Current(); err == nil { - return u.HomeDir - } - } - return home -} - -// GetShortcutString returns the string that is shortcut to user's home directory -// in the native shell of the platform running on. -func GetShortcutString() string { - return "~" -} diff --git a/src/vendor/github.com/docker/docker/pkg/homedir/homedir_windows.go b/src/vendor/github.com/docker/docker/pkg/homedir/homedir_windows.go deleted file mode 100644 index 2f81813b2..000000000 --- a/src/vendor/github.com/docker/docker/pkg/homedir/homedir_windows.go +++ /dev/null @@ -1,24 +0,0 @@ -package homedir // import "github.com/docker/docker/pkg/homedir" - -import ( - "os" -) - -// Key returns the env var name for the user's home dir based on -// the platform being run on -func Key() string { - return "USERPROFILE" -} - -// Get returns the home directory of the current user with the help of -// environment variables depending on the target operating system. -// Returned path should be used with "path/filepath" to form new paths. -func Get() string { - return os.Getenv(Key()) -} - -// GetShortcutString returns the string that is shortcut to user's home directory -// in the native shell of the platform running on. -func GetShortcutString() string { - return "%USERPROFILE%" // be careful while using in format functions -} diff --git a/src/vendor/github.com/docker/docker/pkg/ioutils/buffer.go b/src/vendor/github.com/docker/docker/pkg/ioutils/buffer.go deleted file mode 100644 index 466f79294..000000000 --- a/src/vendor/github.com/docker/docker/pkg/ioutils/buffer.go +++ /dev/null @@ -1,51 +0,0 @@ -package ioutils // import "github.com/docker/docker/pkg/ioutils" - -import ( - "errors" - "io" -) - -var errBufferFull = errors.New("buffer is full") - -type fixedBuffer struct { - buf []byte - pos int - lastRead int -} - -func (b *fixedBuffer) Write(p []byte) (int, error) { - n := copy(b.buf[b.pos:cap(b.buf)], p) - b.pos += n - - if n < len(p) { - if b.pos == cap(b.buf) { - return n, errBufferFull - } - return n, io.ErrShortWrite - } - return n, nil -} - -func (b *fixedBuffer) Read(p []byte) (int, error) { - n := copy(p, b.buf[b.lastRead:b.pos]) - b.lastRead += n - return n, nil -} - -func (b *fixedBuffer) Len() int { - return b.pos - b.lastRead -} - -func (b *fixedBuffer) Cap() int { - return cap(b.buf) -} - -func (b *fixedBuffer) Reset() { - b.pos = 0 - b.lastRead = 0 - b.buf = b.buf[:0] -} - -func (b *fixedBuffer) String() string { - return string(b.buf[b.lastRead:b.pos]) -} diff --git a/src/vendor/github.com/docker/docker/pkg/ioutils/bytespipe.go b/src/vendor/github.com/docker/docker/pkg/ioutils/bytespipe.go deleted file mode 100644 index 87514b643..000000000 --- a/src/vendor/github.com/docker/docker/pkg/ioutils/bytespipe.go +++ /dev/null @@ -1,187 +0,0 @@ -package ioutils // import "github.com/docker/docker/pkg/ioutils" - -import ( - "errors" - "io" - "sync" -) - -// maxCap is the highest capacity to use in byte slices that buffer data. -const maxCap = 1e6 - -// minCap is the lowest capacity to use in byte slices that buffer data -const minCap = 64 - -// blockThreshold is the minimum number of bytes in the buffer which will cause -// a write to BytesPipe to block when allocating a new slice. -const blockThreshold = 1e6 - -var ( - // ErrClosed is returned when Write is called on a closed BytesPipe. - ErrClosed = errors.New("write to closed BytesPipe") - - bufPools = make(map[int]*sync.Pool) - bufPoolsLock sync.Mutex -) - -// BytesPipe is io.ReadWriteCloser which works similarly to pipe(queue). -// All written data may be read at most once. Also, BytesPipe allocates -// and releases new byte slices to adjust to current needs, so the buffer -// won't be overgrown after peak loads. -type BytesPipe struct { - mu sync.Mutex - wait *sync.Cond - buf []*fixedBuffer - bufLen int - closeErr error // error to return from next Read. set to nil if not closed. -} - -// NewBytesPipe creates new BytesPipe, initialized by specified slice. -// If buf is nil, then it will be initialized with slice which cap is 64. -// buf will be adjusted in a way that len(buf) == 0, cap(buf) == cap(buf). -func NewBytesPipe() *BytesPipe { - bp := &BytesPipe{} - bp.buf = append(bp.buf, getBuffer(minCap)) - bp.wait = sync.NewCond(&bp.mu) - return bp -} - -// Write writes p to BytesPipe. -// It can allocate new []byte slices in a process of writing. -func (bp *BytesPipe) Write(p []byte) (int, error) { - bp.mu.Lock() - - written := 0 -loop0: - for { - if bp.closeErr != nil { - bp.mu.Unlock() - return written, ErrClosed - } - - if len(bp.buf) == 0 { - bp.buf = append(bp.buf, getBuffer(64)) - } - // get the last buffer - b := bp.buf[len(bp.buf)-1] - - n, err := b.Write(p) - written += n - bp.bufLen += n - - // errBufferFull is an error we expect to get if the buffer is full - if err != nil && err != errBufferFull { - bp.wait.Broadcast() - bp.mu.Unlock() - return written, err - } - - // if there was enough room to write all then break - if len(p) == n { - break - } - - // more data: write to the next slice - p = p[n:] - - // make sure the buffer doesn't grow too big from this write - for bp.bufLen >= blockThreshold { - bp.wait.Wait() - if bp.closeErr != nil { - continue loop0 - } - } - - // add new byte slice to the buffers slice and continue writing - nextCap := b.Cap() * 2 - if nextCap > maxCap { - nextCap = maxCap - } - bp.buf = append(bp.buf, getBuffer(nextCap)) - } - bp.wait.Broadcast() - bp.mu.Unlock() - return written, nil -} - -// CloseWithError causes further reads from a BytesPipe to return immediately. -func (bp *BytesPipe) CloseWithError(err error) error { - bp.mu.Lock() - if err != nil { - bp.closeErr = err - } else { - bp.closeErr = io.EOF - } - bp.wait.Broadcast() - bp.mu.Unlock() - return nil -} - -// Close causes further reads from a BytesPipe to return immediately. -func (bp *BytesPipe) Close() error { - return bp.CloseWithError(nil) -} - -// Read reads bytes from BytesPipe. -// Data could be read only once. -func (bp *BytesPipe) Read(p []byte) (n int, err error) { - bp.mu.Lock() - if bp.bufLen == 0 { - if bp.closeErr != nil { - err := bp.closeErr - bp.mu.Unlock() - return 0, err - } - bp.wait.Wait() - if bp.bufLen == 0 && bp.closeErr != nil { - err := bp.closeErr - bp.mu.Unlock() - return 0, err - } - } - - for bp.bufLen > 0 { - b := bp.buf[0] - read, _ := b.Read(p) // ignore error since fixedBuffer doesn't really return an error - n += read - bp.bufLen -= read - - if b.Len() == 0 { - // it's empty so return it to the pool and move to the next one - returnBuffer(b) - bp.buf[0] = nil - bp.buf = bp.buf[1:] - } - - if len(p) == read { - break - } - - p = p[read:] - } - - bp.wait.Broadcast() - bp.mu.Unlock() - return -} - -func returnBuffer(b *fixedBuffer) { - b.Reset() - bufPoolsLock.Lock() - pool := bufPools[b.Cap()] - bufPoolsLock.Unlock() - if pool != nil { - pool.Put(b) - } -} - -func getBuffer(size int) *fixedBuffer { - bufPoolsLock.Lock() - pool, ok := bufPools[size] - if !ok { - pool = &sync.Pool{New: func() interface{} { return &fixedBuffer{buf: make([]byte, 0, size)} }} - bufPools[size] = pool - } - bufPoolsLock.Unlock() - return pool.Get().(*fixedBuffer) -} diff --git a/src/vendor/github.com/docker/docker/pkg/ioutils/fswriters.go b/src/vendor/github.com/docker/docker/pkg/ioutils/fswriters.go deleted file mode 100644 index 534d66ac2..000000000 --- a/src/vendor/github.com/docker/docker/pkg/ioutils/fswriters.go +++ /dev/null @@ -1,162 +0,0 @@ -package ioutils // import "github.com/docker/docker/pkg/ioutils" - -import ( - "io" - "io/ioutil" - "os" - "path/filepath" -) - -// NewAtomicFileWriter returns WriteCloser so that writing to it writes to a -// temporary file and closing it atomically changes the temporary file to -// destination path. Writing and closing concurrently is not allowed. -func NewAtomicFileWriter(filename string, perm os.FileMode) (io.WriteCloser, error) { - f, err := ioutil.TempFile(filepath.Dir(filename), ".tmp-"+filepath.Base(filename)) - if err != nil { - return nil, err - } - - abspath, err := filepath.Abs(filename) - if err != nil { - return nil, err - } - return &atomicFileWriter{ - f: f, - fn: abspath, - perm: perm, - }, nil -} - -// AtomicWriteFile atomically writes data to a file named by filename. -func AtomicWriteFile(filename string, data []byte, perm os.FileMode) error { - f, err := NewAtomicFileWriter(filename, perm) - if err != nil { - return err - } - n, err := f.Write(data) - if err == nil && n < len(data) { - err = io.ErrShortWrite - f.(*atomicFileWriter).writeErr = err - } - if err1 := f.Close(); err == nil { - err = err1 - } - return err -} - -type atomicFileWriter struct { - f *os.File - fn string - writeErr error - perm os.FileMode -} - -func (w *atomicFileWriter) Write(dt []byte) (int, error) { - n, err := w.f.Write(dt) - if err != nil { - w.writeErr = err - } - return n, err -} - -func (w *atomicFileWriter) Close() (retErr error) { - defer func() { - if retErr != nil || w.writeErr != nil { - os.Remove(w.f.Name()) - } - }() - if err := w.f.Sync(); err != nil { - w.f.Close() - return err - } - if err := w.f.Close(); err != nil { - return err - } - if err := os.Chmod(w.f.Name(), w.perm); err != nil { - return err - } - if w.writeErr == nil { - return os.Rename(w.f.Name(), w.fn) - } - return nil -} - -// AtomicWriteSet is used to atomically write a set -// of files and ensure they are visible at the same time. -// Must be committed to a new directory. -type AtomicWriteSet struct { - root string -} - -// NewAtomicWriteSet creates a new atomic write set to -// atomically create a set of files. The given directory -// is used as the base directory for storing files before -// commit. If no temporary directory is given the system -// default is used. -func NewAtomicWriteSet(tmpDir string) (*AtomicWriteSet, error) { - td, err := ioutil.TempDir(tmpDir, "write-set-") - if err != nil { - return nil, err - } - - return &AtomicWriteSet{ - root: td, - }, nil -} - -// WriteFile writes a file to the set, guaranteeing the file -// has been synced. -func (ws *AtomicWriteSet) WriteFile(filename string, data []byte, perm os.FileMode) error { - f, err := ws.FileWriter(filename, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, perm) - if err != nil { - return err - } - n, err := f.Write(data) - if err == nil && n < len(data) { - err = io.ErrShortWrite - } - if err1 := f.Close(); err == nil { - err = err1 - } - return err -} - -type syncFileCloser struct { - *os.File -} - -func (w syncFileCloser) Close() error { - err := w.File.Sync() - if err1 := w.File.Close(); err == nil { - err = err1 - } - return err -} - -// FileWriter opens a file writer inside the set. The file -// should be synced and closed before calling commit. -func (ws *AtomicWriteSet) FileWriter(name string, flag int, perm os.FileMode) (io.WriteCloser, error) { - f, err := os.OpenFile(filepath.Join(ws.root, name), flag, perm) - if err != nil { - return nil, err - } - return syncFileCloser{f}, nil -} - -// Cancel cancels the set and removes all temporary data -// created in the set. -func (ws *AtomicWriteSet) Cancel() error { - return os.RemoveAll(ws.root) -} - -// Commit moves all created files to the target directory. The -// target directory must not exist and the parent of the target -// directory must exist. -func (ws *AtomicWriteSet) Commit(target string) error { - return os.Rename(ws.root, target) -} - -// String returns the location the set is writing to. -func (ws *AtomicWriteSet) String() string { - return ws.root -} diff --git a/src/vendor/github.com/docker/docker/pkg/ioutils/readers.go b/src/vendor/github.com/docker/docker/pkg/ioutils/readers.go deleted file mode 100644 index 1f657bd3d..000000000 --- a/src/vendor/github.com/docker/docker/pkg/ioutils/readers.go +++ /dev/null @@ -1,157 +0,0 @@ -package ioutils // import "github.com/docker/docker/pkg/ioutils" - -import ( - "context" - "crypto/sha256" - "encoding/hex" - "io" -) - -// ReadCloserWrapper wraps an io.Reader, and implements an io.ReadCloser -// It calls the given callback function when closed. It should be constructed -// with NewReadCloserWrapper -type ReadCloserWrapper struct { - io.Reader - closer func() error -} - -// Close calls back the passed closer function -func (r *ReadCloserWrapper) Close() error { - return r.closer() -} - -// NewReadCloserWrapper returns a new io.ReadCloser. -func NewReadCloserWrapper(r io.Reader, closer func() error) io.ReadCloser { - return &ReadCloserWrapper{ - Reader: r, - closer: closer, - } -} - -type readerErrWrapper struct { - reader io.Reader - closer func() -} - -func (r *readerErrWrapper) Read(p []byte) (int, error) { - n, err := r.reader.Read(p) - if err != nil { - r.closer() - } - return n, err -} - -// NewReaderErrWrapper returns a new io.Reader. -func NewReaderErrWrapper(r io.Reader, closer func()) io.Reader { - return &readerErrWrapper{ - reader: r, - closer: closer, - } -} - -// HashData returns the sha256 sum of src. -func HashData(src io.Reader) (string, error) { - h := sha256.New() - if _, err := io.Copy(h, src); err != nil { - return "", err - } - return "sha256:" + hex.EncodeToString(h.Sum(nil)), nil -} - -// OnEOFReader wraps an io.ReadCloser and a function -// the function will run at the end of file or close the file. -type OnEOFReader struct { - Rc io.ReadCloser - Fn func() -} - -func (r *OnEOFReader) Read(p []byte) (n int, err error) { - n, err = r.Rc.Read(p) - if err == io.EOF { - r.runFunc() - } - return -} - -// Close closes the file and run the function. -func (r *OnEOFReader) Close() error { - err := r.Rc.Close() - r.runFunc() - return err -} - -func (r *OnEOFReader) runFunc() { - if fn := r.Fn; fn != nil { - fn() - r.Fn = nil - } -} - -// cancelReadCloser wraps an io.ReadCloser with a context for cancelling read -// operations. -type cancelReadCloser struct { - cancel func() - pR *io.PipeReader // Stream to read from - pW *io.PipeWriter -} - -// NewCancelReadCloser creates a wrapper that closes the ReadCloser when the -// context is cancelled. The returned io.ReadCloser must be closed when it is -// no longer needed. -func NewCancelReadCloser(ctx context.Context, in io.ReadCloser) io.ReadCloser { - pR, pW := io.Pipe() - - // Create a context used to signal when the pipe is closed - doneCtx, cancel := context.WithCancel(context.Background()) - - p := &cancelReadCloser{ - cancel: cancel, - pR: pR, - pW: pW, - } - - go func() { - _, err := io.Copy(pW, in) - select { - case <-ctx.Done(): - // If the context was closed, p.closeWithError - // was already called. Calling it again would - // change the error that Read returns. - default: - p.closeWithError(err) - } - in.Close() - }() - go func() { - for { - select { - case <-ctx.Done(): - p.closeWithError(ctx.Err()) - case <-doneCtx.Done(): - return - } - } - }() - - return p -} - -// Read wraps the Read method of the pipe that provides data from the wrapped -// ReadCloser. -func (p *cancelReadCloser) Read(buf []byte) (n int, err error) { - return p.pR.Read(buf) -} - -// closeWithError closes the wrapper and its underlying reader. It will -// cause future calls to Read to return err. -func (p *cancelReadCloser) closeWithError(err error) { - p.pW.CloseWithError(err) - p.cancel() -} - -// Close closes the wrapper its underlying reader. It will cause -// future calls to Read to return io.EOF. -func (p *cancelReadCloser) Close() error { - p.closeWithError(io.EOF) - return nil -} diff --git a/src/vendor/github.com/docker/docker/pkg/ioutils/temp_unix.go b/src/vendor/github.com/docker/docker/pkg/ioutils/temp_unix.go deleted file mode 100644 index 4e67ec2f5..000000000 --- a/src/vendor/github.com/docker/docker/pkg/ioutils/temp_unix.go +++ /dev/null @@ -1,11 +0,0 @@ -//go:build !windows -// +build !windows - -package ioutils // import "github.com/docker/docker/pkg/ioutils" - -import "io/ioutil" - -// TempDir on Unix systems is equivalent to ioutil.TempDir. -func TempDir(dir, prefix string) (string, error) { - return ioutil.TempDir(dir, prefix) -} diff --git a/src/vendor/github.com/docker/docker/pkg/ioutils/temp_windows.go b/src/vendor/github.com/docker/docker/pkg/ioutils/temp_windows.go deleted file mode 100644 index ecaba2e36..000000000 --- a/src/vendor/github.com/docker/docker/pkg/ioutils/temp_windows.go +++ /dev/null @@ -1,16 +0,0 @@ -package ioutils // import "github.com/docker/docker/pkg/ioutils" - -import ( - "io/ioutil" - - "github.com/docker/docker/pkg/longpath" -) - -// TempDir is the equivalent of ioutil.TempDir, except that the result is in Windows longpath format. -func TempDir(dir, prefix string) (string, error) { - tempDir, err := ioutil.TempDir(dir, prefix) - if err != nil { - return "", err - } - return longpath.AddPrefix(tempDir), nil -} diff --git a/src/vendor/github.com/docker/docker/pkg/ioutils/writeflusher.go b/src/vendor/github.com/docker/docker/pkg/ioutils/writeflusher.go deleted file mode 100644 index 91b8d1826..000000000 --- a/src/vendor/github.com/docker/docker/pkg/ioutils/writeflusher.go +++ /dev/null @@ -1,92 +0,0 @@ -package ioutils // import "github.com/docker/docker/pkg/ioutils" - -import ( - "io" - "sync" -) - -// WriteFlusher wraps the Write and Flush operation ensuring that every write -// is a flush. In addition, the Close method can be called to intercept -// Read/Write calls if the targets lifecycle has already ended. -type WriteFlusher struct { - w io.Writer - flusher flusher - flushed chan struct{} - flushedOnce sync.Once - closed chan struct{} - closeLock sync.Mutex -} - -type flusher interface { - Flush() -} - -var errWriteFlusherClosed = io.EOF - -func (wf *WriteFlusher) Write(b []byte) (n int, err error) { - select { - case <-wf.closed: - return 0, errWriteFlusherClosed - default: - } - - n, err = wf.w.Write(b) - wf.Flush() // every write is a flush. - return n, err -} - -// Flush the stream immediately. -func (wf *WriteFlusher) Flush() { - select { - case <-wf.closed: - return - default: - } - - wf.flushedOnce.Do(func() { - close(wf.flushed) - }) - wf.flusher.Flush() -} - -// Flushed returns the state of flushed. -// If it's flushed, return true, or else it return false. -func (wf *WriteFlusher) Flushed() bool { - // BUG(stevvooe): Remove this method. Its use is inherently racy. Seems to - // be used to detect whether or a response code has been issued or not. - // Another hook should be used instead. - var flushed bool - select { - case <-wf.flushed: - flushed = true - default: - } - return flushed -} - -// Close closes the write flusher, disallowing any further writes to the -// target. After the flusher is closed, all calls to write or flush will -// result in an error. -func (wf *WriteFlusher) Close() error { - wf.closeLock.Lock() - defer wf.closeLock.Unlock() - - select { - case <-wf.closed: - return errWriteFlusherClosed - default: - close(wf.closed) - } - return nil -} - -// NewWriteFlusher returns a new WriteFlusher. -func NewWriteFlusher(w io.Writer) *WriteFlusher { - var fl flusher - if f, ok := w.(flusher); ok { - fl = f - } else { - fl = &NopFlusher{} - } - return &WriteFlusher{w: w, flusher: fl, closed: make(chan struct{}), flushed: make(chan struct{})} -} diff --git a/src/vendor/github.com/docker/docker/pkg/ioutils/writers.go b/src/vendor/github.com/docker/docker/pkg/ioutils/writers.go deleted file mode 100644 index 61c679497..000000000 --- a/src/vendor/github.com/docker/docker/pkg/ioutils/writers.go +++ /dev/null @@ -1,66 +0,0 @@ -package ioutils // import "github.com/docker/docker/pkg/ioutils" - -import "io" - -// NopWriter represents a type which write operation is nop. -type NopWriter struct{} - -func (*NopWriter) Write(buf []byte) (int, error) { - return len(buf), nil -} - -type nopWriteCloser struct { - io.Writer -} - -func (w *nopWriteCloser) Close() error { return nil } - -// NopWriteCloser returns a nopWriteCloser. -func NopWriteCloser(w io.Writer) io.WriteCloser { - return &nopWriteCloser{w} -} - -// NopFlusher represents a type which flush operation is nop. -type NopFlusher struct{} - -// Flush is a nop operation. -func (f *NopFlusher) Flush() {} - -type writeCloserWrapper struct { - io.Writer - closer func() error -} - -func (r *writeCloserWrapper) Close() error { - return r.closer() -} - -// NewWriteCloserWrapper returns a new io.WriteCloser. -func NewWriteCloserWrapper(r io.Writer, closer func() error) io.WriteCloser { - return &writeCloserWrapper{ - Writer: r, - closer: closer, - } -} - -// WriteCounter wraps a concrete io.Writer and hold a count of the number -// of bytes written to the writer during a "session". -// This can be convenient when write return is masked -// (e.g., json.Encoder.Encode()) -type WriteCounter struct { - Count int64 - Writer io.Writer -} - -// NewWriteCounter returns a new WriteCounter. -func NewWriteCounter(w io.Writer) *WriteCounter { - return &WriteCounter{ - Writer: w, - } -} - -func (wc *WriteCounter) Write(p []byte) (count int, err error) { - count, err = wc.Writer.Write(p) - wc.Count += int64(count) - return -} diff --git a/src/vendor/github.com/docker/docker/pkg/jsonmessage/jsonmessage.go b/src/vendor/github.com/docker/docker/pkg/jsonmessage/jsonmessage.go deleted file mode 100644 index cf8d04b1b..000000000 --- a/src/vendor/github.com/docker/docker/pkg/jsonmessage/jsonmessage.go +++ /dev/null @@ -1,283 +0,0 @@ -package jsonmessage // import "github.com/docker/docker/pkg/jsonmessage" - -import ( - "encoding/json" - "fmt" - "io" - "strings" - "time" - - units "github.com/docker/go-units" - "github.com/moby/term" - "github.com/morikuni/aec" -) - -// RFC3339NanoFixed is time.RFC3339Nano with nanoseconds padded using zeros to -// ensure the formatted time isalways the same number of characters. -const RFC3339NanoFixed = "2006-01-02T15:04:05.000000000Z07:00" - -// JSONError wraps a concrete Code and Message, `Code` is -// is an integer error code, `Message` is the error message. -type JSONError struct { - Code int `json:"code,omitempty"` - Message string `json:"message,omitempty"` -} - -func (e *JSONError) Error() string { - return e.Message -} - -// JSONProgress describes a Progress. terminalFd is the fd of the current terminal, -// Start is the initial value for the operation. Current is the current status and -// value of the progress made towards Total. Total is the end value describing when -// we made 100% progress for an operation. -type JSONProgress struct { - terminalFd uintptr - Current int64 `json:"current,omitempty"` - Total int64 `json:"total,omitempty"` - Start int64 `json:"start,omitempty"` - // If true, don't show xB/yB - HideCounts bool `json:"hidecounts,omitempty"` - Units string `json:"units,omitempty"` - nowFunc func() time.Time - winSize int -} - -func (p *JSONProgress) String() string { - var ( - width = p.width() - pbBox string - numbersBox string - timeLeftBox string - ) - if p.Current <= 0 && p.Total <= 0 { - return "" - } - if p.Total <= 0 { - switch p.Units { - case "": - current := units.HumanSize(float64(p.Current)) - return fmt.Sprintf("%8v", current) - default: - return fmt.Sprintf("%d %s", p.Current, p.Units) - } - } - - percentage := int(float64(p.Current)/float64(p.Total)*100) / 2 - if percentage > 50 { - percentage = 50 - } - if width > 110 { - // this number can't be negative gh#7136 - numSpaces := 0 - if 50-percentage > 0 { - numSpaces = 50 - percentage - } - pbBox = fmt.Sprintf("[%s>%s] ", strings.Repeat("=", percentage), strings.Repeat(" ", numSpaces)) - } - - switch { - case p.HideCounts: - case p.Units == "": // no units, use bytes - current := units.HumanSize(float64(p.Current)) - total := units.HumanSize(float64(p.Total)) - - numbersBox = fmt.Sprintf("%8v/%v", current, total) - - if p.Current > p.Total { - // remove total display if the reported current is wonky. - numbersBox = fmt.Sprintf("%8v", current) - } - default: - numbersBox = fmt.Sprintf("%d/%d %s", p.Current, p.Total, p.Units) - - if p.Current > p.Total { - // remove total display if the reported current is wonky. - numbersBox = fmt.Sprintf("%d %s", p.Current, p.Units) - } - } - - if p.Current > 0 && p.Start > 0 && percentage < 50 { - fromStart := p.now().Sub(time.Unix(p.Start, 0)) - perEntry := fromStart / time.Duration(p.Current) - left := time.Duration(p.Total-p.Current) * perEntry - left = (left / time.Second) * time.Second - - if width > 50 { - timeLeftBox = " " + left.String() - } - } - return pbBox + numbersBox + timeLeftBox -} - -// shim for testing -func (p *JSONProgress) now() time.Time { - if p.nowFunc == nil { - p.nowFunc = func() time.Time { - return time.Now().UTC() - } - } - return p.nowFunc() -} - -// shim for testing -func (p *JSONProgress) width() int { - if p.winSize != 0 { - return p.winSize - } - ws, err := term.GetWinsize(p.terminalFd) - if err == nil { - return int(ws.Width) - } - return 200 -} - -// JSONMessage defines a message struct. It describes -// the created time, where it from, status, ID of the -// message. It's used for docker events. -type JSONMessage struct { - Stream string `json:"stream,omitempty"` - Status string `json:"status,omitempty"` - Progress *JSONProgress `json:"progressDetail,omitempty"` - ProgressMessage string `json:"progress,omitempty"` // deprecated - ID string `json:"id,omitempty"` - From string `json:"from,omitempty"` - Time int64 `json:"time,omitempty"` - TimeNano int64 `json:"timeNano,omitempty"` - Error *JSONError `json:"errorDetail,omitempty"` - ErrorMessage string `json:"error,omitempty"` // deprecated - // Aux contains out-of-band data, such as digests for push signing and image id after building. - Aux *json.RawMessage `json:"aux,omitempty"` -} - -func clearLine(out io.Writer) { - eraseMode := aec.EraseModes.All - cl := aec.EraseLine(eraseMode) - fmt.Fprint(out, cl) -} - -func cursorUp(out io.Writer, l uint) { - fmt.Fprint(out, aec.Up(l)) -} - -func cursorDown(out io.Writer, l uint) { - fmt.Fprint(out, aec.Down(l)) -} - -// Display displays the JSONMessage to `out`. If `isTerminal` is true, it will erase the -// entire current line when displaying the progressbar. -func (jm *JSONMessage) Display(out io.Writer, isTerminal bool) error { - if jm.Error != nil { - if jm.Error.Code == 401 { - return fmt.Errorf("authentication is required") - } - return jm.Error - } - var endl string - if isTerminal && jm.Stream == "" && jm.Progress != nil { - clearLine(out) - endl = "\r" - fmt.Fprint(out, endl) - } else if jm.Progress != nil && jm.Progress.String() != "" { // disable progressbar in non-terminal - return nil - } - if jm.TimeNano != 0 { - fmt.Fprintf(out, "%s ", time.Unix(0, jm.TimeNano).Format(RFC3339NanoFixed)) - } else if jm.Time != 0 { - fmt.Fprintf(out, "%s ", time.Unix(jm.Time, 0).Format(RFC3339NanoFixed)) - } - if jm.ID != "" { - fmt.Fprintf(out, "%s: ", jm.ID) - } - if jm.From != "" { - fmt.Fprintf(out, "(from %s) ", jm.From) - } - if jm.Progress != nil && isTerminal { - fmt.Fprintf(out, "%s %s%s", jm.Status, jm.Progress.String(), endl) - } else if jm.ProgressMessage != "" { // deprecated - fmt.Fprintf(out, "%s %s%s", jm.Status, jm.ProgressMessage, endl) - } else if jm.Stream != "" { - fmt.Fprintf(out, "%s%s", jm.Stream, endl) - } else { - fmt.Fprintf(out, "%s%s\n", jm.Status, endl) - } - return nil -} - -// DisplayJSONMessagesStream displays a json message stream from `in` to `out`, `isTerminal` -// describes if `out` is a terminal. If this is the case, it will print `\n` at the end of -// each line and move the cursor while displaying. -func DisplayJSONMessagesStream(in io.Reader, out io.Writer, terminalFd uintptr, isTerminal bool, auxCallback func(JSONMessage)) error { - var ( - dec = json.NewDecoder(in) - ids = make(map[string]uint) - ) - - for { - var diff uint - var jm JSONMessage - if err := dec.Decode(&jm); err != nil { - if err == io.EOF { - break - } - return err - } - - if jm.Aux != nil { - if auxCallback != nil { - auxCallback(jm) - } - continue - } - - if jm.Progress != nil { - jm.Progress.terminalFd = terminalFd - } - if jm.ID != "" && (jm.Progress != nil || jm.ProgressMessage != "") { - line, ok := ids[jm.ID] - if !ok { - // NOTE: This approach of using len(id) to - // figure out the number of lines of history - // only works as long as we clear the history - // when we output something that's not - // accounted for in the map, such as a line - // with no ID. - line = uint(len(ids)) - ids[jm.ID] = line - if isTerminal { - fmt.Fprintf(out, "\n") - } - } - diff = uint(len(ids)) - line - if isTerminal { - cursorUp(out, diff) - } - } else { - // When outputting something that isn't progress - // output, clear the history of previous lines. We - // don't want progress entries from some previous - // operation to be updated (for example, pull -a - // with multiple tags). - ids = make(map[string]uint) - } - err := jm.Display(out, isTerminal) - if jm.ID != "" && isTerminal { - cursorDown(out, diff) - } - if err != nil { - return err - } - } - return nil -} - -type stream interface { - io.Writer - FD() uintptr - IsTerminal() bool -} - -// DisplayJSONMessagesToStream prints json messages to the output stream -func DisplayJSONMessagesToStream(in io.Reader, stream stream, auxCallback func(JSONMessage)) error { - return DisplayJSONMessagesStream(in, stream, stream.FD(), stream.IsTerminal(), auxCallback) -} diff --git a/src/vendor/github.com/docker/docker/pkg/longpath/longpath.go b/src/vendor/github.com/docker/docker/pkg/longpath/longpath.go deleted file mode 100644 index 4177affba..000000000 --- a/src/vendor/github.com/docker/docker/pkg/longpath/longpath.go +++ /dev/null @@ -1,26 +0,0 @@ -// longpath introduces some constants and helper functions for handling long paths -// in Windows, which are expected to be prepended with `\\?\` and followed by either -// a drive letter, a UNC server\share, or a volume identifier. - -package longpath // import "github.com/docker/docker/pkg/longpath" - -import ( - "strings" -) - -// Prefix is the longpath prefix for Windows file paths. -const Prefix = `\\?\` - -// AddPrefix will add the Windows long path prefix to the path provided if -// it does not already have it. -func AddPrefix(path string) string { - if !strings.HasPrefix(path, Prefix) { - if strings.HasPrefix(path, `\\`) { - // This is a UNC path, so we need to add 'UNC' to the path as well. - path = Prefix + `UNC` + path[1:] - } else { - path = Prefix + path - } - } - return path -} diff --git a/src/vendor/github.com/docker/docker/pkg/stringid/README.md b/src/vendor/github.com/docker/docker/pkg/stringid/README.md deleted file mode 100644 index 37a5098fd..000000000 --- a/src/vendor/github.com/docker/docker/pkg/stringid/README.md +++ /dev/null @@ -1 +0,0 @@ -This package provides helper functions for dealing with string identifiers diff --git a/src/vendor/github.com/docker/docker/pkg/stringid/stringid.go b/src/vendor/github.com/docker/docker/pkg/stringid/stringid.go deleted file mode 100644 index 5fe071d62..000000000 --- a/src/vendor/github.com/docker/docker/pkg/stringid/stringid.go +++ /dev/null @@ -1,63 +0,0 @@ -// Package stringid provides helper functions for dealing with string identifiers -package stringid // import "github.com/docker/docker/pkg/stringid" - -import ( - "crypto/rand" - "encoding/hex" - "fmt" - "regexp" - "strconv" - "strings" -) - -const shortLen = 12 - -var ( - validShortID = regexp.MustCompile("^[a-f0-9]{12}$") - validHex = regexp.MustCompile(`^[a-f0-9]{64}$`) -) - -// IsShortID determines if an arbitrary string *looks like* a short ID. -func IsShortID(id string) bool { - return validShortID.MatchString(id) -} - -// TruncateID returns a shorthand version of a string identifier for convenience. -// A collision with other shorthands is very unlikely, but possible. -// In case of a collision a lookup with TruncIndex.Get() will fail, and the caller -// will need to use a longer prefix, or the full-length Id. -func TruncateID(id string) string { - if i := strings.IndexRune(id, ':'); i >= 0 { - id = id[i+1:] - } - if len(id) > shortLen { - id = id[:shortLen] - } - return id -} - -// GenerateRandomID returns a unique id. -func GenerateRandomID() string { - b := make([]byte, 32) - for { - if _, err := rand.Read(b); err != nil { - panic(err) // This shouldn't happen - } - id := hex.EncodeToString(b) - // if we try to parse the truncated for as an int and we don't have - // an error then the value is all numeric and causes issues when - // used as a hostname. ref #3869 - if _, err := strconv.ParseInt(TruncateID(id), 10, 64); err == nil { - continue - } - return id - } -} - -// ValidateID checks whether an ID string is a valid image ID. -func ValidateID(id string) error { - if ok := validHex.MatchString(id); !ok { - return fmt.Errorf("image ID %q is invalid", id) - } - return nil -} diff --git a/src/vendor/github.com/docker/docker/registry/auth.go b/src/vendor/github.com/docker/docker/registry/auth.go deleted file mode 100644 index 2d0ecde2d..000000000 --- a/src/vendor/github.com/docker/docker/registry/auth.go +++ /dev/null @@ -1,247 +0,0 @@ -package registry // import "github.com/docker/docker/registry" - -import ( - "net/http" - "net/url" - "strings" - "time" - - "github.com/docker/distribution/registry/client/auth" - "github.com/docker/distribution/registry/client/auth/challenge" - "github.com/docker/distribution/registry/client/transport" - "github.com/docker/docker/api/types" - registrytypes "github.com/docker/docker/api/types/registry" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" -) - -const ( - // AuthClientID is used the ClientID used for the token server - AuthClientID = "docker" -) - -type loginCredentialStore struct { - authConfig *types.AuthConfig -} - -func (lcs loginCredentialStore) Basic(*url.URL) (string, string) { - return lcs.authConfig.Username, lcs.authConfig.Password -} - -func (lcs loginCredentialStore) RefreshToken(*url.URL, string) string { - return lcs.authConfig.IdentityToken -} - -func (lcs loginCredentialStore) SetRefreshToken(u *url.URL, service, token string) { - lcs.authConfig.IdentityToken = token -} - -type staticCredentialStore struct { - auth *types.AuthConfig -} - -// NewStaticCredentialStore returns a credential store -// which always returns the same credential values. -func NewStaticCredentialStore(auth *types.AuthConfig) auth.CredentialStore { - return staticCredentialStore{ - auth: auth, - } -} - -func (scs staticCredentialStore) Basic(*url.URL) (string, string) { - if scs.auth == nil { - return "", "" - } - return scs.auth.Username, scs.auth.Password -} - -func (scs staticCredentialStore) RefreshToken(*url.URL, string) string { - if scs.auth == nil { - return "" - } - return scs.auth.IdentityToken -} - -func (scs staticCredentialStore) SetRefreshToken(*url.URL, string, string) { -} - -type fallbackError struct { - err error -} - -func (err fallbackError) Error() string { - return err.err.Error() -} - -// loginV2 tries to login to the v2 registry server. The given registry -// endpoint will be pinged to get authorization challenges. These challenges -// will be used to authenticate against the registry to validate credentials. -func loginV2(authConfig *types.AuthConfig, endpoint APIEndpoint, userAgent string) (string, string, error) { - var ( - endpointStr = strings.TrimRight(endpoint.URL.String(), "/") + "/v2/" - modifiers = Headers(userAgent, nil) - authTransport = transport.NewTransport(NewTransport(endpoint.TLSConfig), modifiers...) - credentialAuthConfig = *authConfig - creds = loginCredentialStore{authConfig: &credentialAuthConfig} - ) - - logrus.Debugf("attempting v2 login to registry endpoint %s", endpointStr) - - loginClient, foundV2, err := v2AuthHTTPClient(endpoint.URL, authTransport, modifiers, creds, nil) - if err != nil { - return "", "", err - } - - req, err := http.NewRequest(http.MethodGet, endpointStr, nil) - if err != nil { - if !foundV2 { - err = fallbackError{err: err} - } - return "", "", err - } - - resp, err := loginClient.Do(req) - if err != nil { - err = translateV2AuthError(err) - if !foundV2 { - err = fallbackError{err: err} - } - - return "", "", err - } - defer resp.Body.Close() - - if resp.StatusCode == http.StatusOK { - return "Login Succeeded", credentialAuthConfig.IdentityToken, nil - } - - // TODO(dmcgowan): Attempt to further interpret result, status code and error code string - err = errors.Errorf("login attempt to %s failed with status: %d %s", endpointStr, resp.StatusCode, http.StatusText(resp.StatusCode)) - if !foundV2 { - err = fallbackError{err: err} - } - return "", "", err -} - -func v2AuthHTTPClient(endpoint *url.URL, authTransport http.RoundTripper, modifiers []transport.RequestModifier, creds auth.CredentialStore, scopes []auth.Scope) (*http.Client, bool, error) { - challengeManager, foundV2, err := PingV2Registry(endpoint, authTransport) - if err != nil { - if !foundV2 { - err = fallbackError{err: err} - } - return nil, foundV2, err - } - - tokenHandlerOptions := auth.TokenHandlerOptions{ - Transport: authTransport, - Credentials: creds, - OfflineAccess: true, - ClientID: AuthClientID, - Scopes: scopes, - } - tokenHandler := auth.NewTokenHandlerWithOptions(tokenHandlerOptions) - basicHandler := auth.NewBasicHandler(creds) - modifiers = append(modifiers, auth.NewAuthorizer(challengeManager, tokenHandler, basicHandler)) - tr := transport.NewTransport(authTransport, modifiers...) - - return &http.Client{ - Transport: tr, - Timeout: 15 * time.Second, - }, foundV2, nil - -} - -// ConvertToHostname converts a registry url which has http|https prepended -// to just an hostname. -func ConvertToHostname(url string) string { - stripped := url - if strings.HasPrefix(url, "http://") { - stripped = strings.TrimPrefix(url, "http://") - } else if strings.HasPrefix(url, "https://") { - stripped = strings.TrimPrefix(url, "https://") - } - - nameParts := strings.SplitN(stripped, "/", 2) - - return nameParts[0] -} - -// ResolveAuthConfig matches an auth configuration to a server address or a URL -func ResolveAuthConfig(authConfigs map[string]types.AuthConfig, index *registrytypes.IndexInfo) types.AuthConfig { - configKey := GetAuthConfigKey(index) - // First try the happy case - if c, found := authConfigs[configKey]; found || index.Official { - return c - } - - // Maybe they have a legacy config file, we will iterate the keys converting - // them to the new format and testing - for registry, ac := range authConfigs { - if configKey == ConvertToHostname(registry) { - return ac - } - } - - // When all else fails, return an empty auth config - return types.AuthConfig{} -} - -// PingResponseError is used when the response from a ping -// was received but invalid. -type PingResponseError struct { - Err error -} - -func (err PingResponseError) Error() string { - return err.Err.Error() -} - -// PingV2Registry attempts to ping a v2 registry and on success return a -// challenge manager for the supported authentication types and -// whether v2 was confirmed by the response. If a response is received but -// cannot be interpreted a PingResponseError will be returned. -func PingV2Registry(endpoint *url.URL, transport http.RoundTripper) (challenge.Manager, bool, error) { - var ( - foundV2 = false - v2Version = auth.APIVersion{ - Type: "registry", - Version: "2.0", - } - ) - - pingClient := &http.Client{ - Transport: transport, - Timeout: 15 * time.Second, - } - endpointStr := strings.TrimRight(endpoint.String(), "/") + "/v2/" - req, err := http.NewRequest(http.MethodGet, endpointStr, nil) - if err != nil { - return nil, false, err - } - resp, err := pingClient.Do(req) - if err != nil { - return nil, false, err - } - defer resp.Body.Close() - - versions := auth.APIVersions(resp, DefaultRegistryVersionHeader) - for _, pingVersion := range versions { - if pingVersion == v2Version { - // The version header indicates we're definitely - // talking to a v2 registry. So don't allow future - // fallbacks to the v1 protocol. - - foundV2 = true - break - } - } - - challengeManager := challenge.NewSimpleManager() - if err := challengeManager.AddResponse(resp); err != nil { - return nil, foundV2, PingResponseError{ - Err: err, - } - } - - return challengeManager, foundV2, nil -} diff --git a/src/vendor/github.com/docker/docker/registry/config.go b/src/vendor/github.com/docker/docker/registry/config.go deleted file mode 100644 index 54b83fa40..000000000 --- a/src/vendor/github.com/docker/docker/registry/config.go +++ /dev/null @@ -1,433 +0,0 @@ -package registry // import "github.com/docker/docker/registry" - -import ( - "fmt" - "net" - "net/url" - "regexp" - "strconv" - "strings" - - "github.com/docker/distribution/reference" - registrytypes "github.com/docker/docker/api/types/registry" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" -) - -// ServiceOptions holds command line options. -type ServiceOptions struct { - AllowNondistributableArtifacts []string `json:"allow-nondistributable-artifacts,omitempty"` - Mirrors []string `json:"registry-mirrors,omitempty"` - InsecureRegistries []string `json:"insecure-registries,omitempty"` -} - -// serviceConfig holds daemon configuration for the registry service. -type serviceConfig struct { - registrytypes.ServiceConfig -} - -const ( - // DefaultNamespace is the default namespace - DefaultNamespace = "docker.io" - // DefaultRegistryVersionHeader is the name of the default HTTP header - // that carries Registry version info - DefaultRegistryVersionHeader = "Docker-Distribution-Api-Version" - - // IndexHostname is the index hostname - IndexHostname = "index.docker.io" - // IndexServer is used for user auth and image search - IndexServer = "https://" + IndexHostname + "/v1/" - // IndexName is the name of the index - IndexName = "docker.io" -) - -var ( - // DefaultV2Registry is the URI of the default v2 registry - DefaultV2Registry = &url.URL{ - Scheme: "https", - Host: "registry-1.docker.io", - } - - // ErrInvalidRepositoryName is an error returned if the repository name did - // not have the correct form - ErrInvalidRepositoryName = errors.New("Invalid repository name (ex: \"registry.domain.tld/myrepos\")") - - emptyServiceConfig, _ = newServiceConfig(ServiceOptions{}) - validHostPortRegex = regexp.MustCompile(`^` + reference.DomainRegexp.String() + `$`) - - // for mocking in unit tests - lookupIP = net.LookupIP -) - -// newServiceConfig returns a new instance of ServiceConfig -func newServiceConfig(options ServiceOptions) (*serviceConfig, error) { - config := &serviceConfig{ - ServiceConfig: registrytypes.ServiceConfig{ - InsecureRegistryCIDRs: make([]*registrytypes.NetIPNet, 0), - IndexConfigs: make(map[string]*registrytypes.IndexInfo), - // Hack: Bypass setting the mirrors to IndexConfigs since they are going away - // and Mirrors are only for the official registry anyways. - }, - } - if err := config.LoadAllowNondistributableArtifacts(options.AllowNondistributableArtifacts); err != nil { - return nil, err - } - if err := config.LoadMirrors(options.Mirrors); err != nil { - return nil, err - } - if err := config.LoadInsecureRegistries(options.InsecureRegistries); err != nil { - return nil, err - } - - return config, nil -} - -// LoadAllowNondistributableArtifacts loads allow-nondistributable-artifacts registries into config. -func (config *serviceConfig) LoadAllowNondistributableArtifacts(registries []string) error { - cidrs := map[string]*registrytypes.NetIPNet{} - hostnames := map[string]bool{} - - for _, r := range registries { - if _, err := ValidateIndexName(r); err != nil { - return err - } - if validateNoScheme(r) != nil { - return fmt.Errorf("allow-nondistributable-artifacts registry %s should not contain '://'", r) - } - - if _, ipnet, err := net.ParseCIDR(r); err == nil { - // Valid CIDR. - cidrs[ipnet.String()] = (*registrytypes.NetIPNet)(ipnet) - } else if err := validateHostPort(r); err == nil { - // Must be `host:port` if not CIDR. - hostnames[r] = true - } else { - return fmt.Errorf("allow-nondistributable-artifacts registry %s is not valid: %v", r, err) - } - } - - config.AllowNondistributableArtifactsCIDRs = make([]*(registrytypes.NetIPNet), 0) - for _, c := range cidrs { - config.AllowNondistributableArtifactsCIDRs = append(config.AllowNondistributableArtifactsCIDRs, c) - } - - config.AllowNondistributableArtifactsHostnames = make([]string, 0) - for h := range hostnames { - config.AllowNondistributableArtifactsHostnames = append(config.AllowNondistributableArtifactsHostnames, h) - } - - return nil -} - -// LoadMirrors loads mirrors to config, after removing duplicates. -// Returns an error if mirrors contains an invalid mirror. -func (config *serviceConfig) LoadMirrors(mirrors []string) error { - mMap := map[string]struct{}{} - unique := []string{} - - for _, mirror := range mirrors { - m, err := ValidateMirror(mirror) - if err != nil { - return err - } - if _, exist := mMap[m]; !exist { - mMap[m] = struct{}{} - unique = append(unique, m) - } - } - - config.Mirrors = unique - - // Configure public registry since mirrors may have changed. - config.IndexConfigs[IndexName] = ®istrytypes.IndexInfo{ - Name: IndexName, - Mirrors: config.Mirrors, - Secure: true, - Official: true, - } - - return nil -} - -// LoadInsecureRegistries loads insecure registries to config -func (config *serviceConfig) LoadInsecureRegistries(registries []string) error { - // Localhost is by default considered as an insecure registry - // This is a stop-gap for people who are running a private registry on localhost (especially on Boot2docker). - // - // TODO: should we deprecate this once it is easier for people to set up a TLS registry or change - // daemon flags on boot2docker? - registries = append(registries, "127.0.0.0/8") - - // Store original InsecureRegistryCIDRs and IndexConfigs - // Clean InsecureRegistryCIDRs and IndexConfigs in config, as passed registries has all insecure registry info. - originalCIDRs := config.ServiceConfig.InsecureRegistryCIDRs - originalIndexInfos := config.ServiceConfig.IndexConfigs - - config.ServiceConfig.InsecureRegistryCIDRs = make([]*registrytypes.NetIPNet, 0) - config.ServiceConfig.IndexConfigs = make(map[string]*registrytypes.IndexInfo) - -skip: - for _, r := range registries { - // validate insecure registry - if _, err := ValidateIndexName(r); err != nil { - // before returning err, roll back to original data - config.ServiceConfig.InsecureRegistryCIDRs = originalCIDRs - config.ServiceConfig.IndexConfigs = originalIndexInfos - return err - } - if strings.HasPrefix(strings.ToLower(r), "http://") { - logrus.Warnf("insecure registry %s should not contain 'http://' and 'http://' has been removed from the insecure registry config", r) - r = r[7:] - } else if strings.HasPrefix(strings.ToLower(r), "https://") { - logrus.Warnf("insecure registry %s should not contain 'https://' and 'https://' has been removed from the insecure registry config", r) - r = r[8:] - } else if validateNoScheme(r) != nil { - // Insecure registry should not contain '://' - // before returning err, roll back to original data - config.ServiceConfig.InsecureRegistryCIDRs = originalCIDRs - config.ServiceConfig.IndexConfigs = originalIndexInfos - return fmt.Errorf("insecure registry %s should not contain '://'", r) - } - // Check if CIDR was passed to --insecure-registry - _, ipnet, err := net.ParseCIDR(r) - if err == nil { - // Valid CIDR. If ipnet is already in config.InsecureRegistryCIDRs, skip. - data := (*registrytypes.NetIPNet)(ipnet) - for _, value := range config.InsecureRegistryCIDRs { - if value.IP.String() == data.IP.String() && value.Mask.String() == data.Mask.String() { - continue skip - } - } - // ipnet is not found, add it in config.InsecureRegistryCIDRs - config.InsecureRegistryCIDRs = append(config.InsecureRegistryCIDRs, data) - - } else { - if err := validateHostPort(r); err != nil { - config.ServiceConfig.InsecureRegistryCIDRs = originalCIDRs - config.ServiceConfig.IndexConfigs = originalIndexInfos - return fmt.Errorf("insecure registry %s is not valid: %v", r, err) - - } - // Assume `host:port` if not CIDR. - config.IndexConfigs[r] = ®istrytypes.IndexInfo{ - Name: r, - Mirrors: make([]string, 0), - Secure: false, - Official: false, - } - } - } - - // Configure public registry. - config.IndexConfigs[IndexName] = ®istrytypes.IndexInfo{ - Name: IndexName, - Mirrors: config.Mirrors, - Secure: true, - Official: true, - } - - return nil -} - -// allowNondistributableArtifacts returns true if the provided hostname is part of the list of registries -// that allow push of nondistributable artifacts. -// -// The list can contain elements with CIDR notation to specify a whole subnet. If the subnet contains an IP -// of the registry specified by hostname, true is returned. -// -// hostname should be a URL.Host (`host:port` or `host`) where the `host` part can be either a domain name -// or an IP address. If it is a domain name, then it will be resolved to IP addresses for matching. If -// resolution fails, CIDR matching is not performed. -func allowNondistributableArtifacts(config *serviceConfig, hostname string) bool { - for _, h := range config.AllowNondistributableArtifactsHostnames { - if h == hostname { - return true - } - } - - return isCIDRMatch(config.AllowNondistributableArtifactsCIDRs, hostname) -} - -// isSecureIndex returns false if the provided indexName is part of the list of insecure registries -// Insecure registries accept HTTP and/or accept HTTPS with certificates from unknown CAs. -// -// The list of insecure registries can contain an element with CIDR notation to specify a whole subnet. -// If the subnet contains one of the IPs of the registry specified by indexName, the latter is considered -// insecure. -// -// indexName should be a URL.Host (`host:port` or `host`) where the `host` part can be either a domain name -// or an IP address. If it is a domain name, then it will be resolved in order to check if the IP is contained -// in a subnet. If the resolving is not successful, isSecureIndex will only try to match hostname to any element -// of insecureRegistries. -func isSecureIndex(config *serviceConfig, indexName string) bool { - // Check for configured index, first. This is needed in case isSecureIndex - // is called from anything besides newIndexInfo, in order to honor per-index configurations. - if index, ok := config.IndexConfigs[indexName]; ok { - return index.Secure - } - - return !isCIDRMatch(config.InsecureRegistryCIDRs, indexName) -} - -// isCIDRMatch returns true if URLHost matches an element of cidrs. URLHost is a URL.Host (`host:port` or `host`) -// where the `host` part can be either a domain name or an IP address. If it is a domain name, then it will be -// resolved to IP addresses for matching. If resolution fails, false is returned. -func isCIDRMatch(cidrs []*registrytypes.NetIPNet, URLHost string) bool { - host, _, err := net.SplitHostPort(URLHost) - if err != nil { - // Assume URLHost is of the form `host` without the port and go on. - host = URLHost - } - - addrs, err := lookupIP(host) - if err != nil { - ip := net.ParseIP(host) - if ip != nil { - addrs = []net.IP{ip} - } - - // if ip == nil, then `host` is neither an IP nor it could be looked up, - // either because the index is unreachable, or because the index is behind an HTTP proxy. - // So, len(addrs) == 0 and we're not aborting. - } - - // Try CIDR notation only if addrs has any elements, i.e. if `host`'s IP could be determined. - for _, addr := range addrs { - for _, ipnet := range cidrs { - // check if the addr falls in the subnet - if (*net.IPNet)(ipnet).Contains(addr) { - return true - } - } - } - - return false -} - -// ValidateMirror validates an HTTP(S) registry mirror -func ValidateMirror(val string) (string, error) { - uri, err := url.Parse(val) - if err != nil { - return "", fmt.Errorf("invalid mirror: %q is not a valid URI", val) - } - if uri.Scheme != "http" && uri.Scheme != "https" { - return "", fmt.Errorf("invalid mirror: unsupported scheme %q in %q", uri.Scheme, uri) - } - if (uri.Path != "" && uri.Path != "/") || uri.RawQuery != "" || uri.Fragment != "" { - return "", fmt.Errorf("invalid mirror: path, query, or fragment at end of the URI %q", uri) - } - if uri.User != nil { - // strip password from output - uri.User = url.UserPassword(uri.User.Username(), "xxxxx") - return "", fmt.Errorf("invalid mirror: username/password not allowed in URI %q", uri) - } - return strings.TrimSuffix(val, "/") + "/", nil -} - -// ValidateIndexName validates an index name. -func ValidateIndexName(val string) (string, error) { - // TODO: upstream this to check to reference package - if val == "index.docker.io" { - val = "docker.io" - } - if strings.HasPrefix(val, "-") || strings.HasSuffix(val, "-") { - return "", fmt.Errorf("invalid index name (%s). Cannot begin or end with a hyphen", val) - } - return val, nil -} - -func validateNoScheme(reposName string) error { - if strings.Contains(reposName, "://") { - // It cannot contain a scheme! - return ErrInvalidRepositoryName - } - return nil -} - -func validateHostPort(s string) error { - // Split host and port, and in case s can not be splitted, assume host only - host, port, err := net.SplitHostPort(s) - if err != nil { - host = s - port = "" - } - // If match against the `host:port` pattern fails, - // it might be `IPv6:port`, which will be captured by net.ParseIP(host) - if !validHostPortRegex.MatchString(s) && net.ParseIP(host) == nil { - return fmt.Errorf("invalid host %q", host) - } - if port != "" { - v, err := strconv.Atoi(port) - if err != nil { - return err - } - if v < 0 || v > 65535 { - return fmt.Errorf("invalid port %q", port) - } - } - return nil -} - -// newIndexInfo returns IndexInfo configuration from indexName -func newIndexInfo(config *serviceConfig, indexName string) (*registrytypes.IndexInfo, error) { - var err error - indexName, err = ValidateIndexName(indexName) - if err != nil { - return nil, err - } - - // Return any configured index info, first. - if index, ok := config.IndexConfigs[indexName]; ok { - return index, nil - } - - // Construct a non-configured index info. - index := ®istrytypes.IndexInfo{ - Name: indexName, - Mirrors: make([]string, 0), - Official: false, - } - index.Secure = isSecureIndex(config, indexName) - return index, nil -} - -// GetAuthConfigKey special-cases using the full index address of the official -// index as the AuthConfig key, and uses the (host)name[:port] for private indexes. -func GetAuthConfigKey(index *registrytypes.IndexInfo) string { - if index.Official { - return IndexServer - } - return index.Name -} - -// newRepositoryInfo validates and breaks down a repository name into a RepositoryInfo -func newRepositoryInfo(config *serviceConfig, name reference.Named) (*RepositoryInfo, error) { - index, err := newIndexInfo(config, reference.Domain(name)) - if err != nil { - return nil, err - } - official := !strings.ContainsRune(reference.FamiliarName(name), '/') - - return &RepositoryInfo{ - Name: reference.TrimNamed(name), - Index: index, - Official: official, - }, nil -} - -// ParseRepositoryInfo performs the breakdown of a repository name into a RepositoryInfo, but -// lacks registry configuration. -func ParseRepositoryInfo(reposName reference.Named) (*RepositoryInfo, error) { - return newRepositoryInfo(emptyServiceConfig, reposName) -} - -// ParseSearchIndexInfo will use repository name to get back an indexInfo. -func ParseSearchIndexInfo(reposName string) (*registrytypes.IndexInfo, error) { - indexName, _ := splitReposSearchTerm(reposName) - - indexInfo, err := newIndexInfo(emptyServiceConfig, indexName) - if err != nil { - return nil, err - } - return indexInfo, nil -} diff --git a/src/vendor/github.com/docker/docker/registry/config_unix.go b/src/vendor/github.com/docker/docker/registry/config_unix.go deleted file mode 100644 index b5bb31cfa..000000000 --- a/src/vendor/github.com/docker/docker/registry/config_unix.go +++ /dev/null @@ -1,32 +0,0 @@ -//go:build !windows -// +build !windows - -package registry // import "github.com/docker/docker/registry" - -import ( - "path/filepath" - - "github.com/docker/docker/pkg/homedir" - "github.com/docker/docker/rootless" -) - -// CertsDir is the directory where certificates are stored -func CertsDir() string { - d := "/etc/docker/certs.d" - - if rootless.RunningWithRootlessKit() { - configHome, err := homedir.GetConfigHome() - if err == nil { - d = filepath.Join(configHome, "docker/certs.d") - } - } - return d -} - -// cleanPath is used to ensure that a directory name is valid on the target -// platform. It will be passed in something *similar* to a URL such as -// https:/index.docker.io/v1. Not all platforms support directory names -// which contain those characters (such as : on Windows) -func cleanPath(s string) string { - return s -} diff --git a/src/vendor/github.com/docker/docker/registry/config_windows.go b/src/vendor/github.com/docker/docker/registry/config_windows.go deleted file mode 100644 index 4ae1e07ab..000000000 --- a/src/vendor/github.com/docker/docker/registry/config_windows.go +++ /dev/null @@ -1,20 +0,0 @@ -package registry // import "github.com/docker/docker/registry" - -import ( - "os" - "path/filepath" - "strings" -) - -// CertsDir is the directory where certificates are stored -func CertsDir() string { - return os.Getenv("programdata") + `\docker\certs.d` -} - -// cleanPath is used to ensure that a directory name is valid on the target -// platform. It will be passed in something *similar* to a URL such as -// https:\index.docker.io\v1. Not all platforms support directory names -// which contain those characters (such as : on Windows) -func cleanPath(s string) string { - return filepath.FromSlash(strings.Replace(s, ":", "", -1)) -} diff --git a/src/vendor/github.com/docker/docker/registry/endpoint_v1.go b/src/vendor/github.com/docker/docker/registry/endpoint_v1.go deleted file mode 100644 index db342d141..000000000 --- a/src/vendor/github.com/docker/docker/registry/endpoint_v1.go +++ /dev/null @@ -1,195 +0,0 @@ -package registry // import "github.com/docker/docker/registry" - -import ( - "crypto/tls" - "encoding/json" - "fmt" - "io/ioutil" - "net/http" - "net/url" - "strings" - - "github.com/docker/distribution/registry/client/transport" - registrytypes "github.com/docker/docker/api/types/registry" - "github.com/sirupsen/logrus" -) - -// V1Endpoint stores basic information about a V1 registry endpoint. -type V1Endpoint struct { - client *http.Client - URL *url.URL - IsSecure bool -} - -// NewV1Endpoint parses the given address to return a registry endpoint. -func NewV1Endpoint(index *registrytypes.IndexInfo, userAgent string, metaHeaders http.Header) (*V1Endpoint, error) { - tlsConfig, err := newTLSConfig(index.Name, index.Secure) - if err != nil { - return nil, err - } - - endpoint, err := newV1EndpointFromStr(GetAuthConfigKey(index), tlsConfig, userAgent, metaHeaders) - if err != nil { - return nil, err - } - - if err := validateEndpoint(endpoint); err != nil { - return nil, err - } - - return endpoint, nil -} - -func validateEndpoint(endpoint *V1Endpoint) error { - logrus.Debugf("pinging registry endpoint %s", endpoint) - - // Try HTTPS ping to registry - endpoint.URL.Scheme = "https" - if _, err := endpoint.Ping(); err != nil { - if endpoint.IsSecure { - // If registry is secure and HTTPS failed, show user the error and tell them about `--insecure-registry` - // in case that's what they need. DO NOT accept unknown CA certificates, and DO NOT fallback to HTTP. - return fmt.Errorf("invalid registry endpoint %s: %v. If this private registry supports only HTTP or HTTPS with an unknown CA certificate, please add `--insecure-registry %s` to the daemon's arguments. In the case of HTTPS, if you have access to the registry's CA certificate, no need for the flag; simply place the CA certificate at /etc/docker/certs.d/%s/ca.crt", endpoint, err, endpoint.URL.Host, endpoint.URL.Host) - } - - // If registry is insecure and HTTPS failed, fallback to HTTP. - logrus.Debugf("Error from registry %q marked as insecure: %v. Insecurely falling back to HTTP", endpoint, err) - endpoint.URL.Scheme = "http" - - var err2 error - if _, err2 = endpoint.Ping(); err2 == nil { - return nil - } - - return fmt.Errorf("invalid registry endpoint %q. HTTPS attempt: %v. HTTP attempt: %v", endpoint, err, err2) - } - - return nil -} - -func newV1Endpoint(address url.URL, tlsConfig *tls.Config, userAgent string, metaHeaders http.Header) *V1Endpoint { - endpoint := &V1Endpoint{ - IsSecure: tlsConfig == nil || !tlsConfig.InsecureSkipVerify, - URL: new(url.URL), - } - - *endpoint.URL = address - - // TODO(tiborvass): make sure a ConnectTimeout transport is used - tr := NewTransport(tlsConfig) - endpoint.client = HTTPClient(transport.NewTransport(tr, Headers(userAgent, metaHeaders)...)) - return endpoint -} - -// trimV1Address trims the version off the address and returns the -// trimmed address or an error if there is a non-V1 version. -func trimV1Address(address string) (string, error) { - var ( - chunks []string - apiVersionStr string - ) - - if strings.HasSuffix(address, "/") { - address = address[:len(address)-1] - } - - chunks = strings.Split(address, "/") - apiVersionStr = chunks[len(chunks)-1] - if apiVersionStr == "v1" { - return strings.Join(chunks[:len(chunks)-1], "/"), nil - } - - for k, v := range apiVersions { - if k != APIVersion1 && apiVersionStr == v { - return "", fmt.Errorf("unsupported V1 version path %s", apiVersionStr) - } - } - - return address, nil -} - -func newV1EndpointFromStr(address string, tlsConfig *tls.Config, userAgent string, metaHeaders http.Header) (*V1Endpoint, error) { - if !strings.HasPrefix(address, "http://") && !strings.HasPrefix(address, "https://") { - address = "https://" + address - } - - address, err := trimV1Address(address) - if err != nil { - return nil, err - } - - uri, err := url.Parse(address) - if err != nil { - return nil, err - } - - endpoint := newV1Endpoint(*uri, tlsConfig, userAgent, metaHeaders) - - return endpoint, nil -} - -// Get the formatted URL for the root of this registry Endpoint -func (e *V1Endpoint) String() string { - return e.URL.String() + "/v1/" -} - -// Path returns a formatted string for the URL -// of this endpoint with the given path appended. -func (e *V1Endpoint) Path(path string) string { - return e.URL.String() + "/v1/" + path -} - -// Ping returns a PingResult which indicates whether the registry is standalone or not. -func (e *V1Endpoint) Ping() (PingResult, error) { - logrus.Debugf("attempting v1 ping for registry endpoint %s", e) - - if e.String() == IndexServer { - // Skip the check, we know this one is valid - // (and we never want to fallback to http in case of error) - return PingResult{Standalone: false}, nil - } - - req, err := http.NewRequest(http.MethodGet, e.Path("_ping"), nil) - if err != nil { - return PingResult{Standalone: false}, err - } - - resp, err := e.client.Do(req) - if err != nil { - return PingResult{Standalone: false}, err - } - - defer resp.Body.Close() - - jsonString, err := ioutil.ReadAll(resp.Body) - if err != nil { - return PingResult{Standalone: false}, fmt.Errorf("error while reading the http response: %s", err) - } - - // If the header is absent, we assume true for compatibility with earlier - // versions of the registry. default to true - info := PingResult{ - Standalone: true, - } - if err := json.Unmarshal(jsonString, &info); err != nil { - logrus.Debugf("Error unmarshaling the _ping PingResult: %s", err) - // don't stop here. Just assume sane defaults - } - if hdr := resp.Header.Get("X-Docker-Registry-Version"); hdr != "" { - logrus.Debugf("Registry version header: '%s'", hdr) - info.Version = hdr - } - logrus.Debugf("PingResult.Version: %q", info.Version) - - standalone := resp.Header.Get("X-Docker-Registry-Standalone") - logrus.Debugf("Registry standalone header: '%s'", standalone) - // Accepted values are "true" (case-insensitive) and "1". - if strings.EqualFold(standalone, "true") || standalone == "1" { - info.Standalone = true - } else if len(standalone) > 0 { - // there is a header set, and it is not "true" or "1", so assume fails - info.Standalone = false - } - logrus.Debugf("PingResult.Standalone: %t", info.Standalone) - return info, nil -} diff --git a/src/vendor/github.com/docker/docker/registry/errors.go b/src/vendor/github.com/docker/docker/registry/errors.go deleted file mode 100644 index 4906303ef..000000000 --- a/src/vendor/github.com/docker/docker/registry/errors.go +++ /dev/null @@ -1,23 +0,0 @@ -package registry // import "github.com/docker/docker/registry" - -import ( - "net/url" - - "github.com/docker/distribution/registry/api/errcode" - "github.com/docker/docker/errdefs" -) - -func translateV2AuthError(err error) error { - switch e := err.(type) { - case *url.Error: - switch e2 := e.Err.(type) { - case errcode.Error: - switch e2.Code { - case errcode.ErrorCodeUnauthorized: - return errdefs.Unauthorized(err) - } - } - } - - return err -} diff --git a/src/vendor/github.com/docker/docker/registry/registry.go b/src/vendor/github.com/docker/docker/registry/registry.go deleted file mode 100644 index 7a70bf28b..000000000 --- a/src/vendor/github.com/docker/docker/registry/registry.go +++ /dev/null @@ -1,199 +0,0 @@ -// Package registry contains client primitives to interact with a remote Docker registry. -package registry // import "github.com/docker/docker/registry" - -import ( - "crypto/tls" - "errors" - "fmt" - "io/ioutil" - "net" - "net/http" - "os" - "path/filepath" - "strings" - "time" - - "github.com/docker/distribution/registry/client/transport" - "github.com/docker/go-connections/tlsconfig" - "github.com/sirupsen/logrus" -) - -var ( - // ErrAlreadyExists is an error returned if an image being pushed - // already exists on the remote side - ErrAlreadyExists = errors.New("Image already exists") -) - -// HostCertsDir returns the config directory for a specific host -func HostCertsDir(hostname string) (string, error) { - certsDir := CertsDir() - - hostDir := filepath.Join(certsDir, cleanPath(hostname)) - - return hostDir, nil -} - -func newTLSConfig(hostname string, isSecure bool) (*tls.Config, error) { - // PreferredServerCipherSuites should have no effect - tlsConfig := tlsconfig.ServerDefault() - - tlsConfig.InsecureSkipVerify = !isSecure - - if isSecure && CertsDir() != "" { - hostDir, err := HostCertsDir(hostname) - if err != nil { - return nil, err - } - - logrus.Debugf("hostDir: %s", hostDir) - if err := ReadCertsDirectory(tlsConfig, hostDir); err != nil { - return nil, err - } - } - - return tlsConfig, nil -} - -func hasFile(files []os.FileInfo, name string) bool { - for _, f := range files { - if f.Name() == name { - return true - } - } - return false -} - -// ReadCertsDirectory reads the directory for TLS certificates -// including roots and certificate pairs and updates the -// provided TLS configuration. -func ReadCertsDirectory(tlsConfig *tls.Config, directory string) error { - fs, err := ioutil.ReadDir(directory) - if err != nil && !os.IsNotExist(err) { - return err - } - - for _, f := range fs { - if strings.HasSuffix(f.Name(), ".crt") { - if tlsConfig.RootCAs == nil { - systemPool, err := tlsconfig.SystemCertPool() - if err != nil { - return fmt.Errorf("unable to get system cert pool: %v", err) - } - tlsConfig.RootCAs = systemPool - } - logrus.Debugf("crt: %s", filepath.Join(directory, f.Name())) - data, err := ioutil.ReadFile(filepath.Join(directory, f.Name())) - if err != nil { - return err - } - tlsConfig.RootCAs.AppendCertsFromPEM(data) - } - if strings.HasSuffix(f.Name(), ".cert") { - certName := f.Name() - keyName := certName[:len(certName)-5] + ".key" - logrus.Debugf("cert: %s", filepath.Join(directory, f.Name())) - if !hasFile(fs, keyName) { - return fmt.Errorf("missing key %s for client certificate %s. Note that CA certificates should use the extension .crt", keyName, certName) - } - cert, err := tls.LoadX509KeyPair(filepath.Join(directory, certName), filepath.Join(directory, keyName)) - if err != nil { - return err - } - tlsConfig.Certificates = append(tlsConfig.Certificates, cert) - } - if strings.HasSuffix(f.Name(), ".key") { - keyName := f.Name() - certName := keyName[:len(keyName)-4] + ".cert" - logrus.Debugf("key: %s", filepath.Join(directory, f.Name())) - if !hasFile(fs, certName) { - return fmt.Errorf("Missing client certificate %s for key %s", certName, keyName) - } - } - } - - return nil -} - -// Headers returns request modifiers with a User-Agent and metaHeaders -func Headers(userAgent string, metaHeaders http.Header) []transport.RequestModifier { - modifiers := []transport.RequestModifier{} - if userAgent != "" { - modifiers = append(modifiers, transport.NewHeaderRequestModifier(http.Header{ - "User-Agent": []string{userAgent}, - })) - } - if metaHeaders != nil { - modifiers = append(modifiers, transport.NewHeaderRequestModifier(metaHeaders)) - } - return modifiers -} - -// HTTPClient returns an HTTP client structure which uses the given transport -// and contains the necessary headers for redirected requests -func HTTPClient(transport http.RoundTripper) *http.Client { - return &http.Client{ - Transport: transport, - CheckRedirect: addRequiredHeadersToRedirectedRequests, - } -} - -func trustedLocation(req *http.Request) bool { - var ( - trusteds = []string{"docker.com", "docker.io"} - hostname = strings.SplitN(req.Host, ":", 2)[0] - ) - if req.URL.Scheme != "https" { - return false - } - - for _, trusted := range trusteds { - if hostname == trusted || strings.HasSuffix(hostname, "."+trusted) { - return true - } - } - return false -} - -// addRequiredHeadersToRedirectedRequests adds the necessary redirection headers -// for redirected requests -func addRequiredHeadersToRedirectedRequests(req *http.Request, via []*http.Request) error { - if len(via) != 0 && via[0] != nil { - if trustedLocation(req) && trustedLocation(via[0]) { - req.Header = via[0].Header - return nil - } - for k, v := range via[0].Header { - if k != "Authorization" { - for _, vv := range v { - req.Header.Add(k, vv) - } - } - } - } - return nil -} - -// NewTransport returns a new HTTP transport. If tlsConfig is nil, it uses the -// default TLS configuration. -func NewTransport(tlsConfig *tls.Config) *http.Transport { - if tlsConfig == nil { - tlsConfig = tlsconfig.ServerDefault() - } - - direct := &net.Dialer{ - Timeout: 30 * time.Second, - KeepAlive: 30 * time.Second, - DualStack: true, - } - - base := &http.Transport{ - Proxy: http.ProxyFromEnvironment, - DialContext: direct.DialContext, - TLSHandshakeTimeout: 10 * time.Second, - TLSClientConfig: tlsConfig, - // TODO(dmcgowan): Call close idle connections when complete and use keep alive - DisableKeepAlives: true, - } - - return base -} diff --git a/src/vendor/github.com/docker/docker/registry/service.go b/src/vendor/github.com/docker/docker/registry/service.go deleted file mode 100644 index 3b08e39da..000000000 --- a/src/vendor/github.com/docker/docker/registry/service.go +++ /dev/null @@ -1,297 +0,0 @@ -package registry // import "github.com/docker/docker/registry" - -import ( - "context" - "crypto/tls" - "net/http" - "net/url" - "strings" - "sync" - - "github.com/docker/distribution/reference" - "github.com/docker/distribution/registry/client/auth" - "github.com/docker/docker/api/types" - registrytypes "github.com/docker/docker/api/types/registry" - "github.com/docker/docker/errdefs" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" -) - -const ( - // DefaultSearchLimit is the default value for maximum number of returned search results. - DefaultSearchLimit = 25 -) - -// Service is the interface defining what a registry service should implement. -type Service interface { - Auth(ctx context.Context, authConfig *types.AuthConfig, userAgent string) (status, token string, err error) - LookupPullEndpoints(hostname string) (endpoints []APIEndpoint, err error) - LookupPushEndpoints(hostname string) (endpoints []APIEndpoint, err error) - ResolveRepository(name reference.Named) (*RepositoryInfo, error) - Search(ctx context.Context, term string, limit int, authConfig *types.AuthConfig, userAgent string, headers map[string][]string) (*registrytypes.SearchResults, error) - ServiceConfig() *registrytypes.ServiceConfig - TLSConfig(hostname string) (*tls.Config, error) - LoadAllowNondistributableArtifacts([]string) error - LoadMirrors([]string) error - LoadInsecureRegistries([]string) error -} - -// DefaultService is a registry service. It tracks configuration data such as a list -// of mirrors. -type DefaultService struct { - config *serviceConfig - mu sync.Mutex -} - -// NewService returns a new instance of DefaultService ready to be -// installed into an engine. -func NewService(options ServiceOptions) (*DefaultService, error) { - config, err := newServiceConfig(options) - - return &DefaultService{config: config}, err -} - -// ServiceConfig returns the public registry service configuration. -func (s *DefaultService) ServiceConfig() *registrytypes.ServiceConfig { - s.mu.Lock() - defer s.mu.Unlock() - - servConfig := registrytypes.ServiceConfig{ - AllowNondistributableArtifactsCIDRs: make([]*(registrytypes.NetIPNet), 0), - AllowNondistributableArtifactsHostnames: make([]string, 0), - InsecureRegistryCIDRs: make([]*(registrytypes.NetIPNet), 0), - IndexConfigs: make(map[string]*(registrytypes.IndexInfo)), - Mirrors: make([]string, 0), - } - - // construct a new ServiceConfig which will not retrieve s.Config directly, - // and look up items in s.config with mu locked - servConfig.AllowNondistributableArtifactsCIDRs = append(servConfig.AllowNondistributableArtifactsCIDRs, s.config.ServiceConfig.AllowNondistributableArtifactsCIDRs...) - servConfig.AllowNondistributableArtifactsHostnames = append(servConfig.AllowNondistributableArtifactsHostnames, s.config.ServiceConfig.AllowNondistributableArtifactsHostnames...) - servConfig.InsecureRegistryCIDRs = append(servConfig.InsecureRegistryCIDRs, s.config.ServiceConfig.InsecureRegistryCIDRs...) - - for key, value := range s.config.ServiceConfig.IndexConfigs { - servConfig.IndexConfigs[key] = value - } - - servConfig.Mirrors = append(servConfig.Mirrors, s.config.ServiceConfig.Mirrors...) - - return &servConfig -} - -// LoadAllowNondistributableArtifacts loads allow-nondistributable-artifacts registries for Service. -func (s *DefaultService) LoadAllowNondistributableArtifacts(registries []string) error { - s.mu.Lock() - defer s.mu.Unlock() - - return s.config.LoadAllowNondistributableArtifacts(registries) -} - -// LoadMirrors loads registry mirrors for Service -func (s *DefaultService) LoadMirrors(mirrors []string) error { - s.mu.Lock() - defer s.mu.Unlock() - - return s.config.LoadMirrors(mirrors) -} - -// LoadInsecureRegistries loads insecure registries for Service -func (s *DefaultService) LoadInsecureRegistries(registries []string) error { - s.mu.Lock() - defer s.mu.Unlock() - - return s.config.LoadInsecureRegistries(registries) -} - -// Auth contacts the public registry with the provided credentials, -// and returns OK if authentication was successful. -// It can be used to verify the validity of a client's credentials. -func (s *DefaultService) Auth(ctx context.Context, authConfig *types.AuthConfig, userAgent string) (status, token string, err error) { - // TODO Use ctx when searching for repositories - var registryHostName = IndexHostname - - if authConfig.ServerAddress != "" { - serverAddress := authConfig.ServerAddress - if !strings.HasPrefix(serverAddress, "https://") && !strings.HasPrefix(serverAddress, "http://") { - serverAddress = "https://" + serverAddress - } - u, err := url.Parse(serverAddress) - if err != nil { - return "", "", errdefs.InvalidParameter(errors.Errorf("unable to parse server address: %v", err)) - } - registryHostName = u.Host - } - - // Lookup endpoints for authentication using "LookupPushEndpoints", which - // excludes mirrors to prevent sending credentials of the upstream registry - // to a mirror. - endpoints, err := s.LookupPushEndpoints(registryHostName) - if err != nil { - return "", "", errdefs.InvalidParameter(err) - } - - for _, endpoint := range endpoints { - status, token, err = loginV2(authConfig, endpoint, userAgent) - if err == nil { - return - } - if fErr, ok := err.(fallbackError); ok { - logrus.WithError(fErr.err).Infof("Error logging in to endpoint, trying next endpoint") - continue - } - - return "", "", err - } - - return "", "", err -} - -// splitReposSearchTerm breaks a search term into an index name and remote name -func splitReposSearchTerm(reposName string) (string, string) { - nameParts := strings.SplitN(reposName, "/", 2) - if len(nameParts) == 1 || (!strings.Contains(nameParts[0], ".") && - !strings.Contains(nameParts[0], ":") && nameParts[0] != "localhost") { - // This is a Docker Hub repository (ex: samalba/hipache or ubuntu), - // use the default Docker Hub registry (docker.io) - return IndexName, reposName - } - return nameParts[0], nameParts[1] -} - -// Search queries the public registry for images matching the specified -// search terms, and returns the results. -func (s *DefaultService) Search(ctx context.Context, term string, limit int, authConfig *types.AuthConfig, userAgent string, headers map[string][]string) (*registrytypes.SearchResults, error) { - // TODO Use ctx when searching for repositories - if err := validateNoScheme(term); err != nil { - return nil, err - } - - indexName, remoteName := splitReposSearchTerm(term) - - // Search is a long-running operation, just lock s.config to avoid block others. - s.mu.Lock() - index, err := newIndexInfo(s.config, indexName) - s.mu.Unlock() - - if err != nil { - return nil, err - } - - // *TODO: Search multiple indexes. - endpoint, err := NewV1Endpoint(index, userAgent, headers) - if err != nil { - return nil, err - } - - var client *http.Client - if authConfig != nil && authConfig.IdentityToken != "" && authConfig.Username != "" { - creds := NewStaticCredentialStore(authConfig) - scopes := []auth.Scope{ - auth.RegistryScope{ - Name: "catalog", - Actions: []string{"search"}, - }, - } - - modifiers := Headers(userAgent, nil) - v2Client, foundV2, err := v2AuthHTTPClient(endpoint.URL, endpoint.client.Transport, modifiers, creds, scopes) - if err != nil { - if fErr, ok := err.(fallbackError); ok { - logrus.Errorf("Cannot use identity token for search, v2 auth not supported: %v", fErr.err) - } else { - return nil, err - } - } else if foundV2 { - // Copy non transport http client features - v2Client.Timeout = endpoint.client.Timeout - v2Client.CheckRedirect = endpoint.client.CheckRedirect - v2Client.Jar = endpoint.client.Jar - - logrus.Debugf("using v2 client for search to %s", endpoint.URL) - client = v2Client - } - } - - if client == nil { - client = endpoint.client - if err := authorizeClient(client, authConfig, endpoint); err != nil { - return nil, err - } - } - - r := newSession(client, authConfig, endpoint) - - if index.Official { - // If pull "library/foo", it's stored locally under "foo" - remoteName = strings.TrimPrefix(remoteName, "library/") - } - return r.SearchRepositories(remoteName, limit) -} - -// ResolveRepository splits a repository name into its components -// and configuration of the associated registry. -func (s *DefaultService) ResolveRepository(name reference.Named) (*RepositoryInfo, error) { - s.mu.Lock() - defer s.mu.Unlock() - return newRepositoryInfo(s.config, name) -} - -// APIEndpoint represents a remote API endpoint -type APIEndpoint struct { - Mirror bool - URL *url.URL - Version APIVersion - AllowNondistributableArtifacts bool - Official bool - TrimHostname bool - TLSConfig *tls.Config -} - -// ToV1Endpoint returns a V1 API endpoint based on the APIEndpoint -// Deprecated: this function is deprecated and will be removed in a future update -func (e APIEndpoint) ToV1Endpoint(userAgent string, metaHeaders http.Header) *V1Endpoint { - return newV1Endpoint(*e.URL, e.TLSConfig, userAgent, metaHeaders) -} - -// TLSConfig constructs a client TLS configuration based on server defaults -func (s *DefaultService) TLSConfig(hostname string) (*tls.Config, error) { - s.mu.Lock() - defer s.mu.Unlock() - - return newTLSConfig(hostname, isSecureIndex(s.config, hostname)) -} - -// tlsConfig constructs a client TLS configuration based on server defaults -func (s *DefaultService) tlsConfig(hostname string) (*tls.Config, error) { - return newTLSConfig(hostname, isSecureIndex(s.config, hostname)) -} - -func (s *DefaultService) tlsConfigForMirror(mirrorURL *url.URL) (*tls.Config, error) { - return s.tlsConfig(mirrorURL.Host) -} - -// LookupPullEndpoints creates a list of v2 endpoints to try to pull from, in order of preference. -// It gives preference to mirrors over the actual registry, and HTTPS over plain HTTP. -func (s *DefaultService) LookupPullEndpoints(hostname string) (endpoints []APIEndpoint, err error) { - s.mu.Lock() - defer s.mu.Unlock() - - return s.lookupV2Endpoints(hostname) -} - -// LookupPushEndpoints creates a list of v2 endpoints to try to push to, in order of preference. -// It gives preference to HTTPS over plain HTTP. Mirrors are not included. -func (s *DefaultService) LookupPushEndpoints(hostname string) (endpoints []APIEndpoint, err error) { - s.mu.Lock() - defer s.mu.Unlock() - - allEndpoints, err := s.lookupV2Endpoints(hostname) - if err == nil { - for _, endpoint := range allEndpoints { - if !endpoint.Mirror { - endpoints = append(endpoints, endpoint) - } - } - } - return endpoints, err -} diff --git a/src/vendor/github.com/docker/docker/registry/service_v2.go b/src/vendor/github.com/docker/docker/registry/service_v2.go deleted file mode 100644 index 3e3a5b41f..000000000 --- a/src/vendor/github.com/docker/docker/registry/service_v2.go +++ /dev/null @@ -1,79 +0,0 @@ -package registry // import "github.com/docker/docker/registry" - -import ( - "net/url" - "strings" - - "github.com/docker/go-connections/tlsconfig" -) - -func (s *DefaultService) lookupV2Endpoints(hostname string) (endpoints []APIEndpoint, err error) { - tlsConfig := tlsconfig.ServerDefault() - if hostname == DefaultNamespace || hostname == IndexHostname { - for _, mirror := range s.config.Mirrors { - if !strings.HasPrefix(mirror, "http://") && !strings.HasPrefix(mirror, "https://") { - mirror = "https://" + mirror - } - mirrorURL, err := url.Parse(mirror) - if err != nil { - return nil, err - } - mirrorTLSConfig, err := s.tlsConfigForMirror(mirrorURL) - if err != nil { - return nil, err - } - endpoints = append(endpoints, APIEndpoint{ - URL: mirrorURL, - Version: APIVersion2, - Mirror: true, - TrimHostname: true, - TLSConfig: mirrorTLSConfig, - }) - } - endpoints = append(endpoints, APIEndpoint{ - URL: DefaultV2Registry, - Version: APIVersion2, - Official: true, - TrimHostname: true, - TLSConfig: tlsConfig, - }) - - return endpoints, nil - } - - ana := allowNondistributableArtifacts(s.config, hostname) - - tlsConfig, err = s.tlsConfig(hostname) - if err != nil { - return nil, err - } - - endpoints = []APIEndpoint{ - { - URL: &url.URL{ - Scheme: "https", - Host: hostname, - }, - Version: APIVersion2, - AllowNondistributableArtifacts: ana, - TrimHostname: true, - TLSConfig: tlsConfig, - }, - } - - if tlsConfig.InsecureSkipVerify { - endpoints = append(endpoints, APIEndpoint{ - URL: &url.URL{ - Scheme: "http", - Host: hostname, - }, - Version: APIVersion2, - AllowNondistributableArtifacts: ana, - TrimHostname: true, - // used to check if supposed to be secure via InsecureSkipVerify - TLSConfig: tlsConfig, - }) - } - - return endpoints, nil -} diff --git a/src/vendor/github.com/docker/docker/registry/session.go b/src/vendor/github.com/docker/docker/registry/session.go deleted file mode 100644 index d34dc1e58..000000000 --- a/src/vendor/github.com/docker/docker/registry/session.go +++ /dev/null @@ -1,227 +0,0 @@ -package registry // import "github.com/docker/docker/registry" - -import ( - // this is required for some certificates - _ "crypto/sha512" - "encoding/json" - "fmt" - "net/http" - "net/http/cookiejar" - "net/url" - "strings" - "sync" - - "github.com/docker/docker/api/types" - registrytypes "github.com/docker/docker/api/types/registry" - "github.com/docker/docker/errdefs" - "github.com/docker/docker/pkg/ioutils" - "github.com/docker/docker/pkg/jsonmessage" - "github.com/docker/docker/pkg/stringid" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" -) - -// A Session is used to communicate with a V1 registry -type Session struct { - indexEndpoint *V1Endpoint - client *http.Client - // TODO(tiborvass): remove authConfig - authConfig *types.AuthConfig - id string -} - -type authTransport struct { - http.RoundTripper - *types.AuthConfig - - alwaysSetBasicAuth bool - token []string - - mu sync.Mutex // guards modReq - modReq map[*http.Request]*http.Request // original -> modified -} - -// AuthTransport handles the auth layer when communicating with a v1 registry (private or official) -// -// For private v1 registries, set alwaysSetBasicAuth to true. -// -// For the official v1 registry, if there isn't already an Authorization header in the request, -// but there is an X-Docker-Token header set to true, then Basic Auth will be used to set the Authorization header. -// After sending the request with the provided base http.RoundTripper, if an X-Docker-Token header, representing -// a token, is present in the response, then it gets cached and sent in the Authorization header of all subsequent -// requests. -// -// If the server sends a token without the client having requested it, it is ignored. -// -// This RoundTripper also has a CancelRequest method important for correct timeout handling. -func AuthTransport(base http.RoundTripper, authConfig *types.AuthConfig, alwaysSetBasicAuth bool) http.RoundTripper { - if base == nil { - base = http.DefaultTransport - } - return &authTransport{ - RoundTripper: base, - AuthConfig: authConfig, - alwaysSetBasicAuth: alwaysSetBasicAuth, - modReq: make(map[*http.Request]*http.Request), - } -} - -// cloneRequest returns a clone of the provided *http.Request. -// The clone is a shallow copy of the struct and its Header map. -func cloneRequest(r *http.Request) *http.Request { - // shallow copy of the struct - r2 := new(http.Request) - *r2 = *r - // deep copy of the Header - r2.Header = make(http.Header, len(r.Header)) - for k, s := range r.Header { - r2.Header[k] = append([]string(nil), s...) - } - - return r2 -} - -// RoundTrip changes an HTTP request's headers to add the necessary -// authentication-related headers -func (tr *authTransport) RoundTrip(orig *http.Request) (*http.Response, error) { - // Authorization should not be set on 302 redirect for untrusted locations. - // This logic mirrors the behavior in addRequiredHeadersToRedirectedRequests. - // As the authorization logic is currently implemented in RoundTrip, - // a 302 redirect is detected by looking at the Referrer header as go http package adds said header. - // This is safe as Docker doesn't set Referrer in other scenarios. - if orig.Header.Get("Referer") != "" && !trustedLocation(orig) { - return tr.RoundTripper.RoundTrip(orig) - } - - req := cloneRequest(orig) - tr.mu.Lock() - tr.modReq[orig] = req - tr.mu.Unlock() - - if tr.alwaysSetBasicAuth { - if tr.AuthConfig == nil { - return nil, errors.New("unexpected error: empty auth config") - } - req.SetBasicAuth(tr.Username, tr.Password) - return tr.RoundTripper.RoundTrip(req) - } - - // Don't override - if req.Header.Get("Authorization") == "" { - if req.Header.Get("X-Docker-Token") == "true" && tr.AuthConfig != nil && len(tr.Username) > 0 { - req.SetBasicAuth(tr.Username, tr.Password) - } else if len(tr.token) > 0 { - req.Header.Set("Authorization", "Token "+strings.Join(tr.token, ",")) - } - } - resp, err := tr.RoundTripper.RoundTrip(req) - if err != nil { - tr.mu.Lock() - delete(tr.modReq, orig) - tr.mu.Unlock() - return nil, err - } - if len(resp.Header["X-Docker-Token"]) > 0 { - tr.token = resp.Header["X-Docker-Token"] - } - resp.Body = &ioutils.OnEOFReader{ - Rc: resp.Body, - Fn: func() { - tr.mu.Lock() - delete(tr.modReq, orig) - tr.mu.Unlock() - }, - } - return resp, nil -} - -// CancelRequest cancels an in-flight request by closing its connection. -func (tr *authTransport) CancelRequest(req *http.Request) { - type canceler interface { - CancelRequest(*http.Request) - } - if cr, ok := tr.RoundTripper.(canceler); ok { - tr.mu.Lock() - modReq := tr.modReq[req] - delete(tr.modReq, req) - tr.mu.Unlock() - cr.CancelRequest(modReq) - } -} - -func authorizeClient(client *http.Client, authConfig *types.AuthConfig, endpoint *V1Endpoint) error { - var alwaysSetBasicAuth bool - - // If we're working with a standalone private registry over HTTPS, send Basic Auth headers - // alongside all our requests. - if endpoint.String() != IndexServer && endpoint.URL.Scheme == "https" { - info, err := endpoint.Ping() - if err != nil { - return err - } - if info.Standalone && authConfig != nil { - logrus.Debugf("Endpoint %s is eligible for private registry. Enabling decorator.", endpoint.String()) - alwaysSetBasicAuth = true - } - } - - // Annotate the transport unconditionally so that v2 can - // properly fallback on v1 when an image is not found. - client.Transport = AuthTransport(client.Transport, authConfig, alwaysSetBasicAuth) - - jar, err := cookiejar.New(nil) - if err != nil { - return errors.New("cookiejar.New is not supposed to return an error") - } - client.Jar = jar - - return nil -} - -func newSession(client *http.Client, authConfig *types.AuthConfig, endpoint *V1Endpoint) *Session { - return &Session{ - authConfig: authConfig, - client: client, - indexEndpoint: endpoint, - id: stringid.GenerateRandomID(), - } -} - -// NewSession creates a new session -// TODO(tiborvass): remove authConfig param once registry client v2 is vendored -func NewSession(client *http.Client, authConfig *types.AuthConfig, endpoint *V1Endpoint) (*Session, error) { - if err := authorizeClient(client, authConfig, endpoint); err != nil { - return nil, err - } - - return newSession(client, authConfig, endpoint), nil -} - -// SearchRepositories performs a search against the remote repository -func (r *Session) SearchRepositories(term string, limit int) (*registrytypes.SearchResults, error) { - if limit < 1 || limit > 100 { - return nil, errdefs.InvalidParameter(errors.Errorf("Limit %d is outside the range of [1, 100]", limit)) - } - logrus.Debugf("Index server: %s", r.indexEndpoint) - u := r.indexEndpoint.String() + "search?q=" + url.QueryEscape(term) + "&n=" + url.QueryEscape(fmt.Sprintf("%d", limit)) - - req, err := http.NewRequest(http.MethodGet, u, nil) - if err != nil { - return nil, errors.Wrap(errdefs.InvalidParameter(err), "Error building request") - } - // Have the AuthTransport send authentication, when logged in. - req.Header.Set("X-Docker-Token", "true") - res, err := r.client.Do(req) - if err != nil { - return nil, errdefs.System(err) - } - defer res.Body.Close() - if res.StatusCode != http.StatusOK { - return nil, &jsonmessage.JSONError{ - Message: fmt.Sprintf("Unexpected status code %d", res.StatusCode), - Code: res.StatusCode, - } - } - result := new(registrytypes.SearchResults) - return result, errors.Wrap(json.NewDecoder(res.Body).Decode(result), "error decoding registry search results") -} diff --git a/src/vendor/github.com/docker/docker/registry/types.go b/src/vendor/github.com/docker/docker/registry/types.go deleted file mode 100644 index 28ed2bfa5..000000000 --- a/src/vendor/github.com/docker/docker/registry/types.go +++ /dev/null @@ -1,70 +0,0 @@ -package registry // import "github.com/docker/docker/registry" - -import ( - "github.com/docker/distribution/reference" - registrytypes "github.com/docker/docker/api/types/registry" -) - -// RepositoryData tracks the image list, list of endpoints for a repository -type RepositoryData struct { - // ImgList is a list of images in the repository - ImgList map[string]*ImgData - // Endpoints is a list of endpoints returned in X-Docker-Endpoints - Endpoints []string -} - -// ImgData is used to transfer image checksums to and from the registry -type ImgData struct { - // ID is an opaque string that identifies the image - ID string `json:"id"` - Checksum string `json:"checksum,omitempty"` - ChecksumPayload string `json:"-"` - Tag string `json:",omitempty"` -} - -// PingResult contains the information returned when pinging a registry. It -// indicates the registry's version and whether the registry claims to be a -// standalone registry. -type PingResult struct { - // Version is the registry version supplied by the registry in an HTTP - // header - Version string `json:"version"` - // Standalone is set to true if the registry indicates it is a - // standalone registry in the X-Docker-Registry-Standalone - // header - Standalone bool `json:"standalone"` -} - -// APIVersion is an integral representation of an API version (presently -// either 1 or 2) -type APIVersion int - -func (av APIVersion) String() string { - return apiVersions[av] -} - -// API Version identifiers. -const ( - _ = iota - APIVersion1 APIVersion = iota - APIVersion2 -) - -var apiVersions = map[APIVersion]string{ - APIVersion1: "v1", - APIVersion2: "v2", -} - -// RepositoryInfo describes a repository -type RepositoryInfo struct { - Name reference.Named - // Index points to registry information - Index *registrytypes.IndexInfo - // Official indicates whether the repository is considered official. - // If the registry is official, and the normalized name does not - // contain a '/' (e.g. "foo"), then it is considered an official repo. - Official bool - // Class represents the class of the repository, such as "plugin" - // or "image". - Class string -} diff --git a/src/vendor/github.com/docker/docker/rootless/rootless.go b/src/vendor/github.com/docker/docker/rootless/rootless.go deleted file mode 100644 index 376d5263d..000000000 --- a/src/vendor/github.com/docker/docker/rootless/rootless.go +++ /dev/null @@ -1,25 +0,0 @@ -package rootless // import "github.com/docker/docker/rootless" - -import ( - "os" - "sync" -) - -const ( - // RootlessKitDockerProxyBinary is the binary name of rootlesskit-docker-proxy - RootlessKitDockerProxyBinary = "rootlesskit-docker-proxy" -) - -var ( - runningWithRootlessKit bool - runningWithRootlessKitOnce sync.Once -) - -// RunningWithRootlessKit returns true if running under RootlessKit namespaces. -func RunningWithRootlessKit() bool { - runningWithRootlessKitOnce.Do(func() { - u := os.Getenv("ROOTLESSKIT_STATE_DIR") - runningWithRootlessKit = u != "" - }) - return runningWithRootlessKit -} diff --git a/src/vendor/github.com/docker/go-connections/LICENSE b/src/vendor/github.com/docker/go-connections/LICENSE deleted file mode 100644 index b55b37bc3..000000000 --- a/src/vendor/github.com/docker/go-connections/LICENSE +++ /dev/null @@ -1,191 +0,0 @@ - - Apache License - Version 2.0, January 2004 - https://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - Copyright 2015 Docker, Inc. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - https://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/src/vendor/github.com/docker/go-connections/nat/nat.go b/src/vendor/github.com/docker/go-connections/nat/nat.go deleted file mode 100644 index bb7e4e336..000000000 --- a/src/vendor/github.com/docker/go-connections/nat/nat.go +++ /dev/null @@ -1,242 +0,0 @@ -// Package nat is a convenience package for manipulation of strings describing network ports. -package nat - -import ( - "fmt" - "net" - "strconv" - "strings" -) - -const ( - // portSpecTemplate is the expected format for port specifications - portSpecTemplate = "ip:hostPort:containerPort" -) - -// PortBinding represents a binding between a Host IP address and a Host Port -type PortBinding struct { - // HostIP is the host IP Address - HostIP string `json:"HostIp"` - // HostPort is the host port number - HostPort string -} - -// PortMap is a collection of PortBinding indexed by Port -type PortMap map[Port][]PortBinding - -// PortSet is a collection of structs indexed by Port -type PortSet map[Port]struct{} - -// Port is a string containing port number and protocol in the format "80/tcp" -type Port string - -// NewPort creates a new instance of a Port given a protocol and port number or port range -func NewPort(proto, port string) (Port, error) { - // Check for parsing issues on "port" now so we can avoid having - // to check it later on. - - portStartInt, portEndInt, err := ParsePortRangeToInt(port) - if err != nil { - return "", err - } - - if portStartInt == portEndInt { - return Port(fmt.Sprintf("%d/%s", portStartInt, proto)), nil - } - return Port(fmt.Sprintf("%d-%d/%s", portStartInt, portEndInt, proto)), nil -} - -// ParsePort parses the port number string and returns an int -func ParsePort(rawPort string) (int, error) { - if len(rawPort) == 0 { - return 0, nil - } - port, err := strconv.ParseUint(rawPort, 10, 16) - if err != nil { - return 0, err - } - return int(port), nil -} - -// ParsePortRangeToInt parses the port range string and returns start/end ints -func ParsePortRangeToInt(rawPort string) (int, int, error) { - if len(rawPort) == 0 { - return 0, 0, nil - } - start, end, err := ParsePortRange(rawPort) - if err != nil { - return 0, 0, err - } - return int(start), int(end), nil -} - -// Proto returns the protocol of a Port -func (p Port) Proto() string { - proto, _ := SplitProtoPort(string(p)) - return proto -} - -// Port returns the port number of a Port -func (p Port) Port() string { - _, port := SplitProtoPort(string(p)) - return port -} - -// Int returns the port number of a Port as an int -func (p Port) Int() int { - portStr := p.Port() - // We don't need to check for an error because we're going to - // assume that any error would have been found, and reported, in NewPort() - port, _ := ParsePort(portStr) - return port -} - -// Range returns the start/end port numbers of a Port range as ints -func (p Port) Range() (int, int, error) { - return ParsePortRangeToInt(p.Port()) -} - -// SplitProtoPort splits a port in the format of proto/port -func SplitProtoPort(rawPort string) (string, string) { - parts := strings.Split(rawPort, "/") - l := len(parts) - if len(rawPort) == 0 || l == 0 || len(parts[0]) == 0 { - return "", "" - } - if l == 1 { - return "tcp", rawPort - } - if len(parts[1]) == 0 { - return "tcp", parts[0] - } - return parts[1], parts[0] -} - -func validateProto(proto string) bool { - for _, availableProto := range []string{"tcp", "udp", "sctp"} { - if availableProto == proto { - return true - } - } - return false -} - -// ParsePortSpecs receives port specs in the format of ip:public:private/proto and parses -// these in to the internal types -func ParsePortSpecs(ports []string) (map[Port]struct{}, map[Port][]PortBinding, error) { - var ( - exposedPorts = make(map[Port]struct{}, len(ports)) - bindings = make(map[Port][]PortBinding) - ) - for _, rawPort := range ports { - portMappings, err := ParsePortSpec(rawPort) - if err != nil { - return nil, nil, err - } - - for _, portMapping := range portMappings { - port := portMapping.Port - if _, exists := exposedPorts[port]; !exists { - exposedPorts[port] = struct{}{} - } - bslice, exists := bindings[port] - if !exists { - bslice = []PortBinding{} - } - bindings[port] = append(bslice, portMapping.Binding) - } - } - return exposedPorts, bindings, nil -} - -// PortMapping is a data object mapping a Port to a PortBinding -type PortMapping struct { - Port Port - Binding PortBinding -} - -func splitParts(rawport string) (string, string, string) { - parts := strings.Split(rawport, ":") - n := len(parts) - containerport := parts[n-1] - - switch n { - case 1: - return "", "", containerport - case 2: - return "", parts[0], containerport - case 3: - return parts[0], parts[1], containerport - default: - return strings.Join(parts[:n-2], ":"), parts[n-2], containerport - } -} - -// ParsePortSpec parses a port specification string into a slice of PortMappings -func ParsePortSpec(rawPort string) ([]PortMapping, error) { - var proto string - rawIP, hostPort, containerPort := splitParts(rawPort) - proto, containerPort = SplitProtoPort(containerPort) - - // Strip [] from IPV6 addresses - ip, _, err := net.SplitHostPort(rawIP + ":") - if err != nil { - return nil, fmt.Errorf("Invalid ip address %v: %s", rawIP, err) - } - if ip != "" && net.ParseIP(ip) == nil { - return nil, fmt.Errorf("Invalid ip address: %s", ip) - } - if containerPort == "" { - return nil, fmt.Errorf("No port specified: %s", rawPort) - } - - startPort, endPort, err := ParsePortRange(containerPort) - if err != nil { - return nil, fmt.Errorf("Invalid containerPort: %s", containerPort) - } - - var startHostPort, endHostPort uint64 = 0, 0 - if len(hostPort) > 0 { - startHostPort, endHostPort, err = ParsePortRange(hostPort) - if err != nil { - return nil, fmt.Errorf("Invalid hostPort: %s", hostPort) - } - } - - if hostPort != "" && (endPort-startPort) != (endHostPort-startHostPort) { - // Allow host port range iff containerPort is not a range. - // In this case, use the host port range as the dynamic - // host port range to allocate into. - if endPort != startPort { - return nil, fmt.Errorf("Invalid ranges specified for container and host Ports: %s and %s", containerPort, hostPort) - } - } - - if !validateProto(strings.ToLower(proto)) { - return nil, fmt.Errorf("Invalid proto: %s", proto) - } - - ports := []PortMapping{} - for i := uint64(0); i <= (endPort - startPort); i++ { - containerPort = strconv.FormatUint(startPort+i, 10) - if len(hostPort) > 0 { - hostPort = strconv.FormatUint(startHostPort+i, 10) - } - // Set hostPort to a range only if there is a single container port - // and a dynamic host port. - if startPort == endPort && startHostPort != endHostPort { - hostPort = fmt.Sprintf("%s-%s", hostPort, strconv.FormatUint(endHostPort, 10)) - } - port, err := NewPort(strings.ToLower(proto), containerPort) - if err != nil { - return nil, err - } - - binding := PortBinding{ - HostIP: ip, - HostPort: hostPort, - } - ports = append(ports, PortMapping{Port: port, Binding: binding}) - } - return ports, nil -} diff --git a/src/vendor/github.com/docker/go-connections/nat/parse.go b/src/vendor/github.com/docker/go-connections/nat/parse.go deleted file mode 100644 index 892adf8c6..000000000 --- a/src/vendor/github.com/docker/go-connections/nat/parse.go +++ /dev/null @@ -1,57 +0,0 @@ -package nat - -import ( - "fmt" - "strconv" - "strings" -) - -// PartParser parses and validates the specified string (data) using the specified template -// e.g. ip:public:private -> 192.168.0.1:80:8000 -// DEPRECATED: do not use, this function may be removed in a future version -func PartParser(template, data string) (map[string]string, error) { - // ip:public:private - var ( - templateParts = strings.Split(template, ":") - parts = strings.Split(data, ":") - out = make(map[string]string, len(templateParts)) - ) - if len(parts) != len(templateParts) { - return nil, fmt.Errorf("Invalid format to parse. %s should match template %s", data, template) - } - - for i, t := range templateParts { - value := "" - if len(parts) > i { - value = parts[i] - } - out[t] = value - } - return out, nil -} - -// ParsePortRange parses and validates the specified string as a port-range (8000-9000) -func ParsePortRange(ports string) (uint64, uint64, error) { - if ports == "" { - return 0, 0, fmt.Errorf("Empty string specified for ports.") - } - if !strings.Contains(ports, "-") { - start, err := strconv.ParseUint(ports, 10, 16) - end := start - return start, end, err - } - - parts := strings.Split(ports, "-") - start, err := strconv.ParseUint(parts[0], 10, 16) - if err != nil { - return 0, 0, err - } - end, err := strconv.ParseUint(parts[1], 10, 16) - if err != nil { - return 0, 0, err - } - if end < start { - return 0, 0, fmt.Errorf("Invalid range specified for the Port: %s", ports) - } - return start, end, nil -} diff --git a/src/vendor/github.com/docker/go-connections/nat/sort.go b/src/vendor/github.com/docker/go-connections/nat/sort.go deleted file mode 100644 index ce950171e..000000000 --- a/src/vendor/github.com/docker/go-connections/nat/sort.go +++ /dev/null @@ -1,96 +0,0 @@ -package nat - -import ( - "sort" - "strings" -) - -type portSorter struct { - ports []Port - by func(i, j Port) bool -} - -func (s *portSorter) Len() int { - return len(s.ports) -} - -func (s *portSorter) Swap(i, j int) { - s.ports[i], s.ports[j] = s.ports[j], s.ports[i] -} - -func (s *portSorter) Less(i, j int) bool { - ip := s.ports[i] - jp := s.ports[j] - - return s.by(ip, jp) -} - -// Sort sorts a list of ports using the provided predicate -// This function should compare `i` and `j`, returning true if `i` is -// considered to be less than `j` -func Sort(ports []Port, predicate func(i, j Port) bool) { - s := &portSorter{ports, predicate} - sort.Sort(s) -} - -type portMapEntry struct { - port Port - binding PortBinding -} - -type portMapSorter []portMapEntry - -func (s portMapSorter) Len() int { return len(s) } -func (s portMapSorter) Swap(i, j int) { s[i], s[j] = s[j], s[i] } - -// sort the port so that the order is: -// 1. port with larger specified bindings -// 2. larger port -// 3. port with tcp protocol -func (s portMapSorter) Less(i, j int) bool { - pi, pj := s[i].port, s[j].port - hpi, hpj := toInt(s[i].binding.HostPort), toInt(s[j].binding.HostPort) - return hpi > hpj || pi.Int() > pj.Int() || (pi.Int() == pj.Int() && strings.ToLower(pi.Proto()) == "tcp") -} - -// SortPortMap sorts the list of ports and their respected mapping. The ports -// will explicit HostPort will be placed first. -func SortPortMap(ports []Port, bindings PortMap) { - s := portMapSorter{} - for _, p := range ports { - if binding, ok := bindings[p]; ok { - for _, b := range binding { - s = append(s, portMapEntry{port: p, binding: b}) - } - bindings[p] = []PortBinding{} - } else { - s = append(s, portMapEntry{port: p}) - } - } - - sort.Sort(s) - var ( - i int - pm = make(map[Port]struct{}) - ) - // reorder ports - for _, entry := range s { - if _, ok := pm[entry.port]; !ok { - ports[i] = entry.port - pm[entry.port] = struct{}{} - i++ - } - // reorder bindings for this port - if _, ok := bindings[entry.port]; ok { - bindings[entry.port] = append(bindings[entry.port], entry.binding) - } - } -} - -func toInt(s string) uint64 { - i, _, err := ParsePortRange(s) - if err != nil { - i = 0 - } - return i -} diff --git a/src/vendor/github.com/docker/go-connections/tlsconfig/certpool_go17.go b/src/vendor/github.com/docker/go-connections/tlsconfig/certpool_go17.go deleted file mode 100644 index 1ca0965e0..000000000 --- a/src/vendor/github.com/docker/go-connections/tlsconfig/certpool_go17.go +++ /dev/null @@ -1,18 +0,0 @@ -// +build go1.7 - -package tlsconfig - -import ( - "crypto/x509" - "runtime" -) - -// SystemCertPool returns a copy of the system cert pool, -// returns an error if failed to load or empty pool on windows. -func SystemCertPool() (*x509.CertPool, error) { - certpool, err := x509.SystemCertPool() - if err != nil && runtime.GOOS == "windows" { - return x509.NewCertPool(), nil - } - return certpool, err -} diff --git a/src/vendor/github.com/docker/go-connections/tlsconfig/certpool_other.go b/src/vendor/github.com/docker/go-connections/tlsconfig/certpool_other.go deleted file mode 100644 index 1ff81c333..000000000 --- a/src/vendor/github.com/docker/go-connections/tlsconfig/certpool_other.go +++ /dev/null @@ -1,13 +0,0 @@ -// +build !go1.7 - -package tlsconfig - -import ( - "crypto/x509" -) - -// SystemCertPool returns an new empty cert pool, -// accessing system cert pool is supported in go 1.7 -func SystemCertPool() (*x509.CertPool, error) { - return x509.NewCertPool(), nil -} diff --git a/src/vendor/github.com/docker/go-connections/tlsconfig/config.go b/src/vendor/github.com/docker/go-connections/tlsconfig/config.go deleted file mode 100644 index 0ef3fdcb4..000000000 --- a/src/vendor/github.com/docker/go-connections/tlsconfig/config.go +++ /dev/null @@ -1,254 +0,0 @@ -// Package tlsconfig provides primitives to retrieve secure-enough TLS configurations for both clients and servers. -// -// As a reminder from https://golang.org/pkg/crypto/tls/#Config: -// A Config structure is used to configure a TLS client or server. After one has been passed to a TLS function it must not be modified. -// A Config may be reused; the tls package will also not modify it. -package tlsconfig - -import ( - "crypto/tls" - "crypto/x509" - "encoding/pem" - "fmt" - "io/ioutil" - "os" - - "github.com/pkg/errors" -) - -// Options represents the information needed to create client and server TLS configurations. -type Options struct { - CAFile string - - // If either CertFile or KeyFile is empty, Client() will not load them - // preventing the client from authenticating to the server. - // However, Server() requires them and will error out if they are empty. - CertFile string - KeyFile string - - // client-only option - InsecureSkipVerify bool - // server-only option - ClientAuth tls.ClientAuthType - // If ExclusiveRootPools is set, then if a CA file is provided, the root pool used for TLS - // creds will include exclusively the roots in that CA file. If no CA file is provided, - // the system pool will be used. - ExclusiveRootPools bool - MinVersion uint16 - // If Passphrase is set, it will be used to decrypt a TLS private key - // if the key is encrypted - Passphrase string -} - -// Extra (server-side) accepted CBC cipher suites - will phase out in the future -var acceptedCBCCiphers = []uint16{ - tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA, - tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA, - tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA, - tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA, -} - -// DefaultServerAcceptedCiphers should be uses by code which already has a crypto/tls -// options struct but wants to use a commonly accepted set of TLS cipher suites, with -// known weak algorithms removed. -var DefaultServerAcceptedCiphers = append(clientCipherSuites, acceptedCBCCiphers...) - -// allTLSVersions lists all the TLS versions and is used by the code that validates -// a uint16 value as a TLS version. -var allTLSVersions = map[uint16]struct{}{ - tls.VersionSSL30: {}, - tls.VersionTLS10: {}, - tls.VersionTLS11: {}, - tls.VersionTLS12: {}, -} - -// ServerDefault returns a secure-enough TLS configuration for the server TLS configuration. -func ServerDefault(ops ...func(*tls.Config)) *tls.Config { - tlsconfig := &tls.Config{ - // Avoid fallback by default to SSL protocols < TLS1.2 - MinVersion: tls.VersionTLS12, - PreferServerCipherSuites: true, - CipherSuites: DefaultServerAcceptedCiphers, - } - - for _, op := range ops { - op(tlsconfig) - } - - return tlsconfig -} - -// ClientDefault returns a secure-enough TLS configuration for the client TLS configuration. -func ClientDefault(ops ...func(*tls.Config)) *tls.Config { - tlsconfig := &tls.Config{ - // Prefer TLS1.2 as the client minimum - MinVersion: tls.VersionTLS12, - CipherSuites: clientCipherSuites, - } - - for _, op := range ops { - op(tlsconfig) - } - - return tlsconfig -} - -// certPool returns an X.509 certificate pool from `caFile`, the certificate file. -func certPool(caFile string, exclusivePool bool) (*x509.CertPool, error) { - // If we should verify the server, we need to load a trusted ca - var ( - certPool *x509.CertPool - err error - ) - if exclusivePool { - certPool = x509.NewCertPool() - } else { - certPool, err = SystemCertPool() - if err != nil { - return nil, fmt.Errorf("failed to read system certificates: %v", err) - } - } - pem, err := ioutil.ReadFile(caFile) - if err != nil { - return nil, fmt.Errorf("could not read CA certificate %q: %v", caFile, err) - } - if !certPool.AppendCertsFromPEM(pem) { - return nil, fmt.Errorf("failed to append certificates from PEM file: %q", caFile) - } - return certPool, nil -} - -// isValidMinVersion checks that the input value is a valid tls minimum version -func isValidMinVersion(version uint16) bool { - _, ok := allTLSVersions[version] - return ok -} - -// adjustMinVersion sets the MinVersion on `config`, the input configuration. -// It assumes the current MinVersion on the `config` is the lowest allowed. -func adjustMinVersion(options Options, config *tls.Config) error { - if options.MinVersion > 0 { - if !isValidMinVersion(options.MinVersion) { - return fmt.Errorf("Invalid minimum TLS version: %x", options.MinVersion) - } - if options.MinVersion < config.MinVersion { - return fmt.Errorf("Requested minimum TLS version is too low. Should be at-least: %x", config.MinVersion) - } - config.MinVersion = options.MinVersion - } - - return nil -} - -// IsErrEncryptedKey returns true if the 'err' is an error of incorrect -// password when tryin to decrypt a TLS private key -func IsErrEncryptedKey(err error) bool { - return errors.Cause(err) == x509.IncorrectPasswordError -} - -// getPrivateKey returns the private key in 'keyBytes', in PEM-encoded format. -// If the private key is encrypted, 'passphrase' is used to decrypted the -// private key. -func getPrivateKey(keyBytes []byte, passphrase string) ([]byte, error) { - // this section makes some small changes to code from notary/tuf/utils/x509.go - pemBlock, _ := pem.Decode(keyBytes) - if pemBlock == nil { - return nil, fmt.Errorf("no valid private key found") - } - - var err error - if x509.IsEncryptedPEMBlock(pemBlock) { - keyBytes, err = x509.DecryptPEMBlock(pemBlock, []byte(passphrase)) - if err != nil { - return nil, errors.Wrap(err, "private key is encrypted, but could not decrypt it") - } - keyBytes = pem.EncodeToMemory(&pem.Block{Type: pemBlock.Type, Bytes: keyBytes}) - } - - return keyBytes, nil -} - -// getCert returns a Certificate from the CertFile and KeyFile in 'options', -// if the key is encrypted, the Passphrase in 'options' will be used to -// decrypt it. -func getCert(options Options) ([]tls.Certificate, error) { - if options.CertFile == "" && options.KeyFile == "" { - return nil, nil - } - - errMessage := "Could not load X509 key pair" - - cert, err := ioutil.ReadFile(options.CertFile) - if err != nil { - return nil, errors.Wrap(err, errMessage) - } - - prKeyBytes, err := ioutil.ReadFile(options.KeyFile) - if err != nil { - return nil, errors.Wrap(err, errMessage) - } - - prKeyBytes, err = getPrivateKey(prKeyBytes, options.Passphrase) - if err != nil { - return nil, errors.Wrap(err, errMessage) - } - - tlsCert, err := tls.X509KeyPair(cert, prKeyBytes) - if err != nil { - return nil, errors.Wrap(err, errMessage) - } - - return []tls.Certificate{tlsCert}, nil -} - -// Client returns a TLS configuration meant to be used by a client. -func Client(options Options) (*tls.Config, error) { - tlsConfig := ClientDefault() - tlsConfig.InsecureSkipVerify = options.InsecureSkipVerify - if !options.InsecureSkipVerify && options.CAFile != "" { - CAs, err := certPool(options.CAFile, options.ExclusiveRootPools) - if err != nil { - return nil, err - } - tlsConfig.RootCAs = CAs - } - - tlsCerts, err := getCert(options) - if err != nil { - return nil, err - } - tlsConfig.Certificates = tlsCerts - - if err := adjustMinVersion(options, tlsConfig); err != nil { - return nil, err - } - - return tlsConfig, nil -} - -// Server returns a TLS configuration meant to be used by a server. -func Server(options Options) (*tls.Config, error) { - tlsConfig := ServerDefault() - tlsConfig.ClientAuth = options.ClientAuth - tlsCert, err := tls.LoadX509KeyPair(options.CertFile, options.KeyFile) - if err != nil { - if os.IsNotExist(err) { - return nil, fmt.Errorf("Could not load X509 key pair (cert: %q, key: %q): %v", options.CertFile, options.KeyFile, err) - } - return nil, fmt.Errorf("Error reading X509 key pair (cert: %q, key: %q): %v. Make sure the key is not encrypted.", options.CertFile, options.KeyFile, err) - } - tlsConfig.Certificates = []tls.Certificate{tlsCert} - if options.ClientAuth >= tls.VerifyClientCertIfGiven && options.CAFile != "" { - CAs, err := certPool(options.CAFile, options.ExclusiveRootPools) - if err != nil { - return nil, err - } - tlsConfig.ClientCAs = CAs - } - - if err := adjustMinVersion(options, tlsConfig); err != nil { - return nil, err - } - - return tlsConfig, nil -} diff --git a/src/vendor/github.com/docker/go-connections/tlsconfig/config_client_ciphers.go b/src/vendor/github.com/docker/go-connections/tlsconfig/config_client_ciphers.go deleted file mode 100644 index 6b4c6a7c0..000000000 --- a/src/vendor/github.com/docker/go-connections/tlsconfig/config_client_ciphers.go +++ /dev/null @@ -1,17 +0,0 @@ -// +build go1.5 - -// Package tlsconfig provides primitives to retrieve secure-enough TLS configurations for both clients and servers. -// -package tlsconfig - -import ( - "crypto/tls" -) - -// Client TLS cipher suites (dropping CBC ciphers for client preferred suite set) -var clientCipherSuites = []uint16{ - tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, - tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384, - tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, - tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, -} diff --git a/src/vendor/github.com/docker/go-connections/tlsconfig/config_legacy_client_ciphers.go b/src/vendor/github.com/docker/go-connections/tlsconfig/config_legacy_client_ciphers.go deleted file mode 100644 index ee22df47c..000000000 --- a/src/vendor/github.com/docker/go-connections/tlsconfig/config_legacy_client_ciphers.go +++ /dev/null @@ -1,15 +0,0 @@ -// +build !go1.5 - -// Package tlsconfig provides primitives to retrieve secure-enough TLS configurations for both clients and servers. -// -package tlsconfig - -import ( - "crypto/tls" -) - -// Client TLS cipher suites (dropping CBC ciphers for client preferred suite set) -var clientCipherSuites = []uint16{ - tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, - tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, -} diff --git a/src/vendor/github.com/docker/go-units/CONTRIBUTING.md b/src/vendor/github.com/docker/go-units/CONTRIBUTING.md deleted file mode 100644 index 9ea86d784..000000000 --- a/src/vendor/github.com/docker/go-units/CONTRIBUTING.md +++ /dev/null @@ -1,67 +0,0 @@ -# Contributing to go-units - -Want to hack on go-units? Awesome! Here are instructions to get you started. - -go-units is a part of the [Docker](https://www.docker.com) project, and follows -the same rules and principles. If you're already familiar with the way -Docker does things, you'll feel right at home. - -Otherwise, go read Docker's -[contributions guidelines](https://github.com/docker/docker/blob/master/CONTRIBUTING.md), -[issue triaging](https://github.com/docker/docker/blob/master/project/ISSUE-TRIAGE.md), -[review process](https://github.com/docker/docker/blob/master/project/REVIEWING.md) and -[branches and tags](https://github.com/docker/docker/blob/master/project/BRANCHES-AND-TAGS.md). - -### Sign your work - -The sign-off is a simple line at the end of the explanation for the patch. Your -signature certifies that you wrote the patch or otherwise have the right to pass -it on as an open-source patch. The rules are pretty simple: if you can certify -the below (from [developercertificate.org](http://developercertificate.org/)): - -``` -Developer Certificate of Origin -Version 1.1 - -Copyright (C) 2004, 2006 The Linux Foundation and its contributors. -660 York Street, Suite 102, -San Francisco, CA 94110 USA - -Everyone is permitted to copy and distribute verbatim copies of this -license document, but changing it is not allowed. - -Developer's Certificate of Origin 1.1 - -By making a contribution to this project, I certify that: - -(a) The contribution was created in whole or in part by me and I - have the right to submit it under the open source license - indicated in the file; or - -(b) The contribution is based upon previous work that, to the best - of my knowledge, is covered under an appropriate open source - license and I have the right under that license to submit that - work with modifications, whether created in whole or in part - by me, under the same open source license (unless I am - permitted to submit under a different license), as indicated - in the file; or - -(c) The contribution was provided directly to me by some other - person who certified (a), (b) or (c) and I have not modified - it. - -(d) I understand and agree that this project and the contribution - are public and that a record of the contribution (including all - personal information I submit with it, including my sign-off) is - maintained indefinitely and may be redistributed consistent with - this project or the open source license(s) involved. -``` - -Then you just add a line to every git commit message: - - Signed-off-by: Joe Smith - -Use your real name (sorry, no pseudonyms or anonymous contributions.) - -If you set your `user.name` and `user.email` git configs, you can sign your -commit automatically with `git commit -s`. diff --git a/src/vendor/github.com/docker/go-units/LICENSE b/src/vendor/github.com/docker/go-units/LICENSE deleted file mode 100644 index b55b37bc3..000000000 --- a/src/vendor/github.com/docker/go-units/LICENSE +++ /dev/null @@ -1,191 +0,0 @@ - - Apache License - Version 2.0, January 2004 - https://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - Copyright 2015 Docker, Inc. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - https://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/src/vendor/github.com/docker/go-units/MAINTAINERS b/src/vendor/github.com/docker/go-units/MAINTAINERS deleted file mode 100644 index 4aac7c741..000000000 --- a/src/vendor/github.com/docker/go-units/MAINTAINERS +++ /dev/null @@ -1,46 +0,0 @@ -# go-units maintainers file -# -# This file describes who runs the docker/go-units project and how. -# This is a living document - if you see something out of date or missing, speak up! -# -# It is structured to be consumable by both humans and programs. -# To extract its contents programmatically, use any TOML-compliant parser. -# -# This file is compiled into the MAINTAINERS file in docker/opensource. -# -[Org] - [Org."Core maintainers"] - people = [ - "akihirosuda", - "dnephin", - "thajeztah", - "vdemeester", - ] - -[people] - -# A reference list of all people associated with the project. -# All other sections should refer to people by their canonical key -# in the people section. - - # ADD YOURSELF HERE IN ALPHABETICAL ORDER - - [people.akihirosuda] - Name = "Akihiro Suda" - Email = "akihiro.suda.cz@hco.ntt.co.jp" - GitHub = "AkihiroSuda" - - [people.dnephin] - Name = "Daniel Nephin" - Email = "dnephin@gmail.com" - GitHub = "dnephin" - - [people.thajeztah] - Name = "Sebastiaan van Stijn" - Email = "github@gone.nl" - GitHub = "thaJeztah" - - [people.vdemeester] - Name = "Vincent Demeester" - Email = "vincent@sbr.pm" - GitHub = "vdemeester" \ No newline at end of file diff --git a/src/vendor/github.com/docker/go-units/README.md b/src/vendor/github.com/docker/go-units/README.md deleted file mode 100644 index 4f70a4e13..000000000 --- a/src/vendor/github.com/docker/go-units/README.md +++ /dev/null @@ -1,16 +0,0 @@ -[![GoDoc](https://godoc.org/github.com/docker/go-units?status.svg)](https://godoc.org/github.com/docker/go-units) - -# Introduction - -go-units is a library to transform human friendly measurements into machine friendly values. - -## Usage - -See the [docs in godoc](https://godoc.org/github.com/docker/go-units) for examples and documentation. - -## Copyright and license - -Copyright © 2015 Docker, Inc. - -go-units is licensed under the Apache License, Version 2.0. -See [LICENSE](LICENSE) for the full text of the license. diff --git a/src/vendor/github.com/docker/go-units/circle.yml b/src/vendor/github.com/docker/go-units/circle.yml deleted file mode 100644 index af9d60552..000000000 --- a/src/vendor/github.com/docker/go-units/circle.yml +++ /dev/null @@ -1,11 +0,0 @@ -dependencies: - post: - # install golint - - go get golang.org/x/lint/golint - -test: - pre: - # run analysis before tests - - go vet ./... - - test -z "$(golint ./... | tee /dev/stderr)" - - test -z "$(gofmt -s -l . | tee /dev/stderr)" diff --git a/src/vendor/github.com/docker/go-units/duration.go b/src/vendor/github.com/docker/go-units/duration.go deleted file mode 100644 index 48dd8744d..000000000 --- a/src/vendor/github.com/docker/go-units/duration.go +++ /dev/null @@ -1,35 +0,0 @@ -// Package units provides helper function to parse and print size and time units -// in human-readable format. -package units - -import ( - "fmt" - "time" -) - -// HumanDuration returns a human-readable approximation of a duration -// (eg. "About a minute", "4 hours ago", etc.). -func HumanDuration(d time.Duration) string { - if seconds := int(d.Seconds()); seconds < 1 { - return "Less than a second" - } else if seconds == 1 { - return "1 second" - } else if seconds < 60 { - return fmt.Sprintf("%d seconds", seconds) - } else if minutes := int(d.Minutes()); minutes == 1 { - return "About a minute" - } else if minutes < 60 { - return fmt.Sprintf("%d minutes", minutes) - } else if hours := int(d.Hours() + 0.5); hours == 1 { - return "About an hour" - } else if hours < 48 { - return fmt.Sprintf("%d hours", hours) - } else if hours < 24*7*2 { - return fmt.Sprintf("%d days", hours/24) - } else if hours < 24*30*2 { - return fmt.Sprintf("%d weeks", hours/24/7) - } else if hours < 24*365*2 { - return fmt.Sprintf("%d months", hours/24/30) - } - return fmt.Sprintf("%d years", int(d.Hours())/24/365) -} diff --git a/src/vendor/github.com/docker/go-units/size.go b/src/vendor/github.com/docker/go-units/size.go deleted file mode 100644 index 85f6ab071..000000000 --- a/src/vendor/github.com/docker/go-units/size.go +++ /dev/null @@ -1,108 +0,0 @@ -package units - -import ( - "fmt" - "regexp" - "strconv" - "strings" -) - -// See: http://en.wikipedia.org/wiki/Binary_prefix -const ( - // Decimal - - KB = 1000 - MB = 1000 * KB - GB = 1000 * MB - TB = 1000 * GB - PB = 1000 * TB - - // Binary - - KiB = 1024 - MiB = 1024 * KiB - GiB = 1024 * MiB - TiB = 1024 * GiB - PiB = 1024 * TiB -) - -type unitMap map[string]int64 - -var ( - decimalMap = unitMap{"k": KB, "m": MB, "g": GB, "t": TB, "p": PB} - binaryMap = unitMap{"k": KiB, "m": MiB, "g": GiB, "t": TiB, "p": PiB} - sizeRegex = regexp.MustCompile(`^(\d+(\.\d+)*) ?([kKmMgGtTpP])?[iI]?[bB]?$`) -) - -var decimapAbbrs = []string{"B", "kB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB"} -var binaryAbbrs = []string{"B", "KiB", "MiB", "GiB", "TiB", "PiB", "EiB", "ZiB", "YiB"} - -func getSizeAndUnit(size float64, base float64, _map []string) (float64, string) { - i := 0 - unitsLimit := len(_map) - 1 - for size >= base && i < unitsLimit { - size = size / base - i++ - } - return size, _map[i] -} - -// CustomSize returns a human-readable approximation of a size -// using custom format. -func CustomSize(format string, size float64, base float64, _map []string) string { - size, unit := getSizeAndUnit(size, base, _map) - return fmt.Sprintf(format, size, unit) -} - -// HumanSizeWithPrecision allows the size to be in any precision, -// instead of 4 digit precision used in units.HumanSize. -func HumanSizeWithPrecision(size float64, precision int) string { - size, unit := getSizeAndUnit(size, 1000.0, decimapAbbrs) - return fmt.Sprintf("%.*g%s", precision, size, unit) -} - -// HumanSize returns a human-readable approximation of a size -// capped at 4 valid numbers (eg. "2.746 MB", "796 KB"). -func HumanSize(size float64) string { - return HumanSizeWithPrecision(size, 4) -} - -// BytesSize returns a human-readable size in bytes, kibibytes, -// mebibytes, gibibytes, or tebibytes (eg. "44kiB", "17MiB"). -func BytesSize(size float64) string { - return CustomSize("%.4g%s", size, 1024.0, binaryAbbrs) -} - -// FromHumanSize returns an integer from a human-readable specification of a -// size using SI standard (eg. "44kB", "17MB"). -func FromHumanSize(size string) (int64, error) { - return parseSize(size, decimalMap) -} - -// RAMInBytes parses a human-readable string representing an amount of RAM -// in bytes, kibibytes, mebibytes, gibibytes, or tebibytes and -// returns the number of bytes, or -1 if the string is unparseable. -// Units are case-insensitive, and the 'b' suffix is optional. -func RAMInBytes(size string) (int64, error) { - return parseSize(size, binaryMap) -} - -// Parses the human-readable size string into the amount it represents. -func parseSize(sizeStr string, uMap unitMap) (int64, error) { - matches := sizeRegex.FindStringSubmatch(sizeStr) - if len(matches) != 4 { - return -1, fmt.Errorf("invalid size: '%s'", sizeStr) - } - - size, err := strconv.ParseFloat(matches[1], 64) - if err != nil { - return -1, err - } - - unitPrefix := strings.ToLower(matches[3]) - if mul, ok := uMap[unitPrefix]; ok { - size *= float64(mul) - } - - return int64(size), nil -} diff --git a/src/vendor/github.com/docker/go-units/ulimit.go b/src/vendor/github.com/docker/go-units/ulimit.go deleted file mode 100644 index fca0400cc..000000000 --- a/src/vendor/github.com/docker/go-units/ulimit.go +++ /dev/null @@ -1,123 +0,0 @@ -package units - -import ( - "fmt" - "strconv" - "strings" -) - -// Ulimit is a human friendly version of Rlimit. -type Ulimit struct { - Name string - Hard int64 - Soft int64 -} - -// Rlimit specifies the resource limits, such as max open files. -type Rlimit struct { - Type int `json:"type,omitempty"` - Hard uint64 `json:"hard,omitempty"` - Soft uint64 `json:"soft,omitempty"` -} - -const ( - // magic numbers for making the syscall - // some of these are defined in the syscall package, but not all. - // Also since Windows client doesn't get access to the syscall package, need to - // define these here - rlimitAs = 9 - rlimitCore = 4 - rlimitCPU = 0 - rlimitData = 2 - rlimitFsize = 1 - rlimitLocks = 10 - rlimitMemlock = 8 - rlimitMsgqueue = 12 - rlimitNice = 13 - rlimitNofile = 7 - rlimitNproc = 6 - rlimitRss = 5 - rlimitRtprio = 14 - rlimitRttime = 15 - rlimitSigpending = 11 - rlimitStack = 3 -) - -var ulimitNameMapping = map[string]int{ - //"as": rlimitAs, // Disabled since this doesn't seem usable with the way Docker inits a container. - "core": rlimitCore, - "cpu": rlimitCPU, - "data": rlimitData, - "fsize": rlimitFsize, - "locks": rlimitLocks, - "memlock": rlimitMemlock, - "msgqueue": rlimitMsgqueue, - "nice": rlimitNice, - "nofile": rlimitNofile, - "nproc": rlimitNproc, - "rss": rlimitRss, - "rtprio": rlimitRtprio, - "rttime": rlimitRttime, - "sigpending": rlimitSigpending, - "stack": rlimitStack, -} - -// ParseUlimit parses and returns a Ulimit from the specified string. -func ParseUlimit(val string) (*Ulimit, error) { - parts := strings.SplitN(val, "=", 2) - if len(parts) != 2 { - return nil, fmt.Errorf("invalid ulimit argument: %s", val) - } - - if _, exists := ulimitNameMapping[parts[0]]; !exists { - return nil, fmt.Errorf("invalid ulimit type: %s", parts[0]) - } - - var ( - soft int64 - hard = &soft // default to soft in case no hard was set - temp int64 - err error - ) - switch limitVals := strings.Split(parts[1], ":"); len(limitVals) { - case 2: - temp, err = strconv.ParseInt(limitVals[1], 10, 64) - if err != nil { - return nil, err - } - hard = &temp - fallthrough - case 1: - soft, err = strconv.ParseInt(limitVals[0], 10, 64) - if err != nil { - return nil, err - } - default: - return nil, fmt.Errorf("too many limit value arguments - %s, can only have up to two, `soft[:hard]`", parts[1]) - } - - if *hard != -1 { - if soft == -1 { - return nil, fmt.Errorf("ulimit soft limit must be less than or equal to hard limit: soft: -1 (unlimited), hard: %d", *hard) - } - if soft > *hard { - return nil, fmt.Errorf("ulimit soft limit must be less than or equal to hard limit: %d > %d", soft, *hard) - } - } - - return &Ulimit{Name: parts[0], Soft: soft, Hard: *hard}, nil -} - -// GetRlimit returns the RLimit corresponding to Ulimit. -func (u *Ulimit) GetRlimit() (*Rlimit, error) { - t, exists := ulimitNameMapping[u.Name] - if !exists { - return nil, fmt.Errorf("invalid ulimit name %s", u.Name) - } - - return &Rlimit{Type: t, Soft: uint64(u.Soft), Hard: uint64(u.Hard)}, nil -} - -func (u *Ulimit) String() string { - return fmt.Sprintf("%s=%d:%d", u.Name, u.Soft, u.Hard) -} diff --git a/src/vendor/github.com/emicklei/go-restful/v3/.gitignore b/src/vendor/github.com/emicklei/go-restful/v3/.gitignore deleted file mode 100644 index 446be09b4..000000000 --- a/src/vendor/github.com/emicklei/go-restful/v3/.gitignore +++ /dev/null @@ -1,71 +0,0 @@ -# Compiled Object files, Static and Dynamic libs (Shared Objects) -*.o -*.a -*.so - -# Folders -_obj -_test - -# Architecture specific extensions/prefixes -*.[568vq] -[568vq].out - -*.cgo1.go -*.cgo2.c -_cgo_defun.c -_cgo_gotypes.go -_cgo_export.* - -_testmain.go - -*.exe - -restful.html - -*.out - -tmp.prof - -go-restful.test - -examples/restful-basic-authentication - -examples/restful-encoding-filter - -examples/restful-filters - -examples/restful-hello-world - -examples/restful-resource-functions - -examples/restful-serve-static - -examples/restful-user-service - -*.DS_Store -examples/restful-user-resource - -examples/restful-multi-containers - -examples/restful-form-handling - -examples/restful-CORS-filter - -examples/restful-options-filter - -examples/restful-curly-router - -examples/restful-cpuprofiler-service - -examples/restful-pre-post-filters - -curly.prof - -examples/restful-NCSA-logging - -examples/restful-html-template - -s.html -restful-path-tail -.idea diff --git a/src/vendor/github.com/emicklei/go-restful/v3/.goconvey b/src/vendor/github.com/emicklei/go-restful/v3/.goconvey deleted file mode 100644 index 8485e986e..000000000 --- a/src/vendor/github.com/emicklei/go-restful/v3/.goconvey +++ /dev/null @@ -1 +0,0 @@ -ignore \ No newline at end of file diff --git a/src/vendor/github.com/emicklei/go-restful/v3/.travis.yml b/src/vendor/github.com/emicklei/go-restful/v3/.travis.yml deleted file mode 100644 index 3a0bf5ff1..000000000 --- a/src/vendor/github.com/emicklei/go-restful/v3/.travis.yml +++ /dev/null @@ -1,13 +0,0 @@ -language: go - -go: - - 1.x - -before_install: - - go test -v - -script: - - go test -race -coverprofile=coverage.txt -covermode=atomic - -after_success: - - bash <(curl -s https://codecov.io/bash) \ No newline at end of file diff --git a/src/vendor/github.com/emicklei/go-restful/v3/CHANGES.md b/src/vendor/github.com/emicklei/go-restful/v3/CHANGES.md deleted file mode 100644 index 38169cfd6..000000000 --- a/src/vendor/github.com/emicklei/go-restful/v3/CHANGES.md +++ /dev/null @@ -1,372 +0,0 @@ -# Change history of go-restful - -## [v3.8.0] - 20221-06-06 - -- use exact matching of allowed domain entries, issue #489 (#493) - - this changes fixes [security] Authorization Bypass Through User-Controlled Key - by changing the behaviour of the AllowedDomains setting in the CORS filter. - To support the previous behaviour, the CORS filter type now has a AllowedDomainFunc - callback mechanism which is called when a simple domain match fails. -- add test and fix for POST without body and Content-type, issue #492 (#496) -- [Minor] Bad practice to have a mix of Receiver types. (#491) - -## [v3.7.2] - 2021-11-24 - -- restored FilterChain (#482 by SVilgelm) - - -## [v3.7.1] - 2021-10-04 - -- fix problem with contentEncodingEnabled setting (#479) - -## [v3.7.0] - 2021-09-24 - -- feat(parameter): adds additional openapi mappings (#478) - -## [v3.6.0] - 2021-09-18 - -- add support for vendor extensions (#477 thx erraggy) - -## [v3.5.2] - 2021-07-14 - -- fix removing absent route from webservice (#472) - -## [v3.5.1] - 2021-04-12 - -- fix handling no match access selected path -- remove obsolete field - -## [v3.5.0] - 2021-04-10 - -- add check for wildcard (#463) in CORS -- add access to Route from Request, issue #459 (#462) - -## [v3.4.0] - 2020-11-10 - -- Added OPTIONS to WebService - -## [v3.3.2] - 2020-01-23 - -- Fixed duplicate compression in dispatch. #449 - - -## [v3.3.1] - 2020-08-31 - -- Added check on writer to prevent compression of response twice. #447 - -## [v3.3.0] - 2020-08-19 - -- Enable content encoding on Handle and ServeHTTP (#446) -- List available representations in 406 body (#437) -- Convert to string using rune() (#443) - -## [v3.2.0] - 2020-06-21 - -- 405 Method Not Allowed must have Allow header (#436) (thx Bracken ) -- add field allowedMethodsWithoutContentType (#424) - -## [v3.1.0] - -- support describing response headers (#426) -- fix openapi examples (#425) - -v3.0.0 - -- fix: use request/response resulting from filter chain -- add Go module - Module consumer should use github.com/emicklei/go-restful/v3 as import path - -v2.10.0 - -- support for Custom Verbs (thanks Vinci Xu <277040271@qq.com>) -- fixed static example (thanks Arthur ) -- simplify code (thanks Christian Muehlhaeuser ) -- added JWT HMAC with SHA-512 authentication code example (thanks Amim Knabben ) - -v2.9.6 - -- small optimization in filter code - -v2.11.1 - -- fix WriteError return value (#415) - -v2.11.0 - -- allow prefix and suffix in path variable expression (#414) - -v2.9.6 - -- support google custome verb (#413) - -v2.9.5 - -- fix panic in Response.WriteError if err == nil - -v2.9.4 - -- fix issue #400 , parsing mime type quality -- Route Builder added option for contentEncodingEnabled (#398) - -v2.9.3 - -- Avoid return of 415 Unsupported Media Type when request body is empty (#396) - -v2.9.2 - -- Reduce allocations in per-request methods to improve performance (#395) - -v2.9.1 - -- Fix issue with default responses and invalid status code 0. (#393) - -v2.9.0 - -- add per Route content encoding setting (overrides container setting) - -v2.8.0 - -- add Request.QueryParameters() -- add json-iterator (via build tag) -- disable vgo module (until log is moved) - -v2.7.1 - -- add vgo module - -v2.6.1 - -- add JSONNewDecoderFunc to allow custom JSON Decoder usage (go 1.10+) - -v2.6.0 - -- Make JSR 311 routing and path param processing consistent -- Adding description to RouteBuilder.Reads() -- Update example for Swagger12 and OpenAPI - -2017-09-13 - -- added route condition functions using `.If(func)` in route building. - -2017-02-16 - -- solved issue #304, make operation names unique - -2017-01-30 - - [IMPORTANT] For swagger users, change your import statement to: - swagger "github.com/emicklei/go-restful-swagger12" - -- moved swagger 1.2 code to go-restful-swagger12 -- created TAG 2.0.0 - -2017-01-27 - -- remove defer request body close -- expose Dispatch for testing filters and Routefunctions -- swagger response model cannot be array -- created TAG 1.0.0 - -2016-12-22 - -- (API change) Remove code related to caching request content. Removes SetCacheReadEntity(doCache bool) - -2016-11-26 - -- Default change! now use CurlyRouter (was RouterJSR311) -- Default change! no more caching of request content -- Default change! do not recover from panics - -2016-09-22 - -- fix the DefaultRequestContentType feature - -2016-02-14 - -- take the qualify factor of the Accept header mediatype into account when deciding the contentype of the response -- add constructors for custom entity accessors for xml and json - -2015-09-27 - -- rename new WriteStatusAnd... to WriteHeaderAnd... for consistency - -2015-09-25 - -- fixed problem with changing Header after WriteHeader (issue 235) - -2015-09-14 - -- changed behavior of WriteHeader (immediate write) and WriteEntity (no status write) -- added support for custom EntityReaderWriters. - -2015-08-06 - -- add support for reading entities from compressed request content -- use sync.Pool for compressors of http response and request body -- add Description to Parameter for documentation in Swagger UI - -2015-03-20 - -- add configurable logging - -2015-03-18 - -- if not specified, the Operation is derived from the Route function - -2015-03-17 - -- expose Parameter creation functions -- make trace logger an interface -- fix OPTIONSFilter -- customize rendering of ServiceError -- JSR311 router now handles wildcards -- add Notes to Route - -2014-11-27 - -- (api add) PrettyPrint per response. (as proposed in #167) - -2014-11-12 - -- (api add) ApiVersion(.) for documentation in Swagger UI - -2014-11-10 - -- (api change) struct fields tagged with "description" show up in Swagger UI - -2014-10-31 - -- (api change) ReturnsError -> Returns -- (api add) RouteBuilder.Do(aBuilder) for DRY use of RouteBuilder -- fix swagger nested structs -- sort Swagger response messages by code - -2014-10-23 - -- (api add) ReturnsError allows you to document Http codes in swagger -- fixed problem with greedy CurlyRouter -- (api add) Access-Control-Max-Age in CORS -- add tracing functionality (injectable) for debugging purposes -- support JSON parse 64bit int -- fix empty parameters for swagger -- WebServicesUrl is now optional for swagger -- fixed duplicate AccessControlAllowOrigin in CORS -- (api change) expose ServeMux in container -- (api add) added AllowedDomains in CORS -- (api add) ParameterNamed for detailed documentation - -2014-04-16 - -- (api add) expose constructor of Request for testing. - -2014-06-27 - -- (api add) ParameterNamed gives access to a Parameter definition and its data (for further specification). -- (api add) SetCacheReadEntity allow scontrol over whether or not the request body is being cached (default true for compatibility reasons). - -2014-07-03 - -- (api add) CORS can be configured with a list of allowed domains - -2014-03-12 - -- (api add) Route path parameters can use wildcard or regular expressions. (requires CurlyRouter) - -2014-02-26 - -- (api add) Request now provides information about the matched Route, see method SelectedRoutePath - -2014-02-17 - -- (api change) renamed parameter constants (go-lint checks) - -2014-01-10 - -- (api add) support for CloseNotify, see http://golang.org/pkg/net/http/#CloseNotifier - -2014-01-07 - -- (api change) Write* methods in Response now return the error or nil. -- added example of serving HTML from a Go template. -- fixed comparing Allowed headers in CORS (is now case-insensitive) - -2013-11-13 - -- (api add) Response knows how many bytes are written to the response body. - -2013-10-29 - -- (api add) RecoverHandler(handler RecoverHandleFunction) to change how panic recovery is handled. Default behavior is to log and return a stacktrace. This may be a security issue as it exposes sourcecode information. - -2013-10-04 - -- (api add) Response knows what HTTP status has been written -- (api add) Request can have attributes (map of string->interface, also called request-scoped variables - -2013-09-12 - -- (api change) Router interface simplified -- Implemented CurlyRouter, a Router that does not use|allow regular expressions in paths - -2013-08-05 - - add OPTIONS support - - add CORS support - -2013-08-27 - -- fixed some reported issues (see github) -- (api change) deprecated use of WriteError; use WriteErrorString instead - -2014-04-15 - -- (fix) v1.0.1 tag: fix Issue 111: WriteErrorString - -2013-08-08 - -- (api add) Added implementation Container: a WebServices collection with its own http.ServeMux allowing multiple endpoints per program. Existing uses of go-restful will register their services to the DefaultContainer. -- (api add) the swagger package has be extended to have a UI per container. -- if panic is detected then a small stack trace is printed (thanks to runner-mei) -- (api add) WriteErrorString to Response - -Important API changes: - -- (api remove) package variable DoNotRecover no longer works ; use restful.DefaultContainer.DoNotRecover(true) instead. -- (api remove) package variable EnableContentEncoding no longer works ; use restful.DefaultContainer.EnableContentEncoding(true) instead. - - -2013-07-06 - -- (api add) Added support for response encoding (gzip and deflate(zlib)). This feature is disabled on default (for backwards compatibility). Use restful.EnableContentEncoding = true in your initialization to enable this feature. - -2013-06-19 - -- (improve) DoNotRecover option, moved request body closer, improved ReadEntity - -2013-06-03 - -- (api change) removed Dispatcher interface, hide PathExpression -- changed receiver names of type functions to be more idiomatic Go - -2013-06-02 - -- (optimize) Cache the RegExp compilation of Paths. - -2013-05-22 - -- (api add) Added support for request/response filter functions - -2013-05-18 - - -- (api add) Added feature to change the default Http Request Dispatch function (travis cline) -- (api change) Moved Swagger Webservice to swagger package (see example restful-user) - -[2012-11-14 .. 2013-05-18> - -- See https://github.com/emicklei/go-restful/commits - -2012-11-14 - -- Initial commit - - diff --git a/src/vendor/github.com/emicklei/go-restful/v3/LICENSE b/src/vendor/github.com/emicklei/go-restful/v3/LICENSE deleted file mode 100644 index ece7ec61e..000000000 --- a/src/vendor/github.com/emicklei/go-restful/v3/LICENSE +++ /dev/null @@ -1,22 +0,0 @@ -Copyright (c) 2012,2013 Ernest Micklei - -MIT License - -Permission is hereby granted, free of charge, to any person obtaining -a copy of this software and associated documentation files (the -"Software"), to deal in the Software without restriction, including -without limitation the rights to use, copy, modify, merge, publish, -distribute, sublicense, and/or sell copies of the Software, and to -permit persons to whom the Software is furnished to do so, subject to -the following conditions: - -The above copyright notice and this permission notice shall be -included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE -LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. \ No newline at end of file diff --git a/src/vendor/github.com/emicklei/go-restful/v3/Makefile b/src/vendor/github.com/emicklei/go-restful/v3/Makefile deleted file mode 100644 index 16d0b80bb..000000000 --- a/src/vendor/github.com/emicklei/go-restful/v3/Makefile +++ /dev/null @@ -1,8 +0,0 @@ -all: test - -test: - go vet . - go test -cover -v . - -ex: - find ./examples -type f -name "*.go" | xargs -I {} go build -o /tmp/ignore {} \ No newline at end of file diff --git a/src/vendor/github.com/emicklei/go-restful/v3/README.md b/src/vendor/github.com/emicklei/go-restful/v3/README.md deleted file mode 100644 index 23166d3b4..000000000 --- a/src/vendor/github.com/emicklei/go-restful/v3/README.md +++ /dev/null @@ -1,110 +0,0 @@ -go-restful -========== -package for building REST-style Web Services using Google Go - -[![Build Status](https://travis-ci.org/emicklei/go-restful.png)](https://travis-ci.org/emicklei/go-restful) -[![Go Report Card](https://goreportcard.com/badge/github.com/emicklei/go-restful)](https://goreportcard.com/report/github.com/emicklei/go-restful) -[![GoDoc](https://godoc.org/github.com/emicklei/go-restful?status.svg)](https://pkg.go.dev/github.com/emicklei/go-restful) -[![codecov](https://codecov.io/gh/emicklei/go-restful/branch/master/graph/badge.svg)](https://codecov.io/gh/emicklei/go-restful) - -- [Code examples use v3](https://github.com/emicklei/go-restful/tree/v3/examples) - -REST asks developers to use HTTP methods explicitly and in a way that's consistent with the protocol definition. This basic REST design principle establishes a one-to-one mapping between create, read, update, and delete (CRUD) operations and HTTP methods. According to this mapping: - -- GET = Retrieve a representation of a resource -- POST = Create if you are sending content to the server to create a subordinate of the specified resource collection, using some server-side algorithm. -- PUT = Create if you are sending the full content of the specified resource (URI). -- PUT = Update if you are updating the full content of the specified resource. -- DELETE = Delete if you are requesting the server to delete the resource -- PATCH = Update partial content of a resource -- OPTIONS = Get information about the communication options for the request URI - -### Usage - -#### Without Go Modules - -All versions up to `v2.*.*` (on the master) are not supporting Go modules. - -``` -import ( - restful "github.com/emicklei/go-restful" -) -``` - -#### Using Go Modules - -As of version `v3.0.0` (on the v3 branch), this package supports Go modules. - -``` -import ( - restful "github.com/emicklei/go-restful/v3" -) -``` - -### Example - -```Go -ws := new(restful.WebService) -ws. - Path("/users"). - Consumes(restful.MIME_XML, restful.MIME_JSON). - Produces(restful.MIME_JSON, restful.MIME_XML) - -ws.Route(ws.GET("/{user-id}").To(u.findUser). - Doc("get a user"). - Param(ws.PathParameter("user-id", "identifier of the user").DataType("string")). - Writes(User{})) -... - -func (u UserResource) findUser(request *restful.Request, response *restful.Response) { - id := request.PathParameter("user-id") - ... -} -``` - -[Full API of a UserResource](https://github.com/emicklei/go-restful/blob/v3/examples/user-resource/restful-user-resource.go) - -### Features - -- Routes for request → function mapping with path parameter (e.g. {id} but also prefix_{var} and {var}_suffix) support -- Configurable router: - - (default) Fast routing algorithm that allows static elements, [google custom method](https://cloud.google.com/apis/design/custom_methods), regular expressions and dynamic parameters in the URL path (e.g. /resource/name:customVerb, /meetings/{id} or /static/{subpath:*}) - - Routing algorithm after [JSR311](http://jsr311.java.net/nonav/releases/1.1/spec/spec.html) that is implemented using (but does **not** accept) regular expressions -- Request API for reading structs from JSON/XML and accessing parameters (path,query,header) -- Response API for writing structs to JSON/XML and setting headers -- Customizable encoding using EntityReaderWriter registration -- Filters for intercepting the request → response flow on Service or Route level -- Request-scoped variables using attributes -- Containers for WebServices on different HTTP endpoints -- Content encoding (gzip,deflate) of request and response payloads -- Automatic responses on OPTIONS (using a filter) -- Automatic CORS request handling (using a filter) -- API declaration for Swagger UI ([go-restful-openapi](https://github.com/emicklei/go-restful-openapi), see [go-restful-swagger12](https://github.com/emicklei/go-restful-swagger12)) -- Panic recovery to produce HTTP 500, customizable using RecoverHandler(...) -- Route errors produce HTTP 404/405/406/415 errors, customizable using ServiceErrorHandler(...) -- Configurable (trace) logging -- Customizable gzip/deflate readers and writers using CompressorProvider registration - -## How to customize -There are several hooks to customize the behavior of the go-restful package. - -- Router algorithm -- Panic recovery -- JSON decoder -- Trace logging -- Compression -- Encoders for other serializers -- Use [jsoniter](https://github.com/json-iterator/go) by build this package using a tag, e.g. `go build -tags=jsoniter .` - -## Resources - -- [Example programs](./examples) -- [Example posted on blog](http://ernestmicklei.com/2012/11/go-restful-first-working-example/) -- [Design explained on blog](http://ernestmicklei.com/2012/11/go-restful-api-design/) -- [sourcegraph](https://sourcegraph.com/github.com/emicklei/go-restful) -- [showcase: Zazkia - tcp proxy for testing resiliency](https://github.com/emicklei/zazkia) -- [showcase: Mora - MongoDB REST Api server](https://github.com/emicklei/mora) - -Type ```git shortlog -s``` for a full list of contributors. - -© 2012 - 2022, http://ernestmicklei.com. MIT License. Contributions are welcome. diff --git a/src/vendor/github.com/emicklei/go-restful/v3/SECURITY.md b/src/vendor/github.com/emicklei/go-restful/v3/SECURITY.md deleted file mode 100644 index 810d3b510..000000000 --- a/src/vendor/github.com/emicklei/go-restful/v3/SECURITY.md +++ /dev/null @@ -1,13 +0,0 @@ -# Security Policy - -## Supported Versions - -| Version | Supported | -| ------- | ------------------ | -| v3.7.x | :white_check_mark: | -| < v3.0.1 | :x: | - -## Reporting a Vulnerability - -Create an Issue and put the label `[security]` in the title of the issue. -Valid reported security issues are expected to be solved within a week. diff --git a/src/vendor/github.com/emicklei/go-restful/v3/Srcfile b/src/vendor/github.com/emicklei/go-restful/v3/Srcfile deleted file mode 100644 index 16fd18689..000000000 --- a/src/vendor/github.com/emicklei/go-restful/v3/Srcfile +++ /dev/null @@ -1 +0,0 @@ -{"SkipDirs": ["examples"]} diff --git a/src/vendor/github.com/emicklei/go-restful/v3/bench_test.sh b/src/vendor/github.com/emicklei/go-restful/v3/bench_test.sh deleted file mode 100644 index 47ffbe4ac..000000000 --- a/src/vendor/github.com/emicklei/go-restful/v3/bench_test.sh +++ /dev/null @@ -1,10 +0,0 @@ -#go test -run=none -file bench_test.go -test.bench . -cpuprofile=bench_test.out - -go test -c -./go-restful.test -test.run=none -test.cpuprofile=tmp.prof -test.bench=BenchmarkMany -./go-restful.test -test.run=none -test.cpuprofile=curly.prof -test.bench=BenchmarkManyCurly - -#go tool pprof go-restful.test tmp.prof -go tool pprof go-restful.test curly.prof - - diff --git a/src/vendor/github.com/emicklei/go-restful/v3/compress.go b/src/vendor/github.com/emicklei/go-restful/v3/compress.go deleted file mode 100644 index 1ff239f99..000000000 --- a/src/vendor/github.com/emicklei/go-restful/v3/compress.go +++ /dev/null @@ -1,127 +0,0 @@ -package restful - -// Copyright 2013 Ernest Micklei. All rights reserved. -// Use of this source code is governed by a license -// that can be found in the LICENSE file. - -import ( - "bufio" - "compress/gzip" - "compress/zlib" - "errors" - "io" - "net" - "net/http" - "strings" -) - -// OBSOLETE : use restful.DefaultContainer.EnableContentEncoding(true) to change this setting. -var EnableContentEncoding = false - -// CompressingResponseWriter is a http.ResponseWriter that can perform content encoding (gzip and zlib) -type CompressingResponseWriter struct { - writer http.ResponseWriter - compressor io.WriteCloser - encoding string -} - -// Header is part of http.ResponseWriter interface -func (c *CompressingResponseWriter) Header() http.Header { - return c.writer.Header() -} - -// WriteHeader is part of http.ResponseWriter interface -func (c *CompressingResponseWriter) WriteHeader(status int) { - c.writer.WriteHeader(status) -} - -// Write is part of http.ResponseWriter interface -// It is passed through the compressor -func (c *CompressingResponseWriter) Write(bytes []byte) (int, error) { - if c.isCompressorClosed() { - return -1, errors.New("Compressing error: tried to write data using closed compressor") - } - return c.compressor.Write(bytes) -} - -// CloseNotify is part of http.CloseNotifier interface -func (c *CompressingResponseWriter) CloseNotify() <-chan bool { - return c.writer.(http.CloseNotifier).CloseNotify() -} - -// Close the underlying compressor -func (c *CompressingResponseWriter) Close() error { - if c.isCompressorClosed() { - return errors.New("Compressing error: tried to close already closed compressor") - } - - c.compressor.Close() - if ENCODING_GZIP == c.encoding { - currentCompressorProvider.ReleaseGzipWriter(c.compressor.(*gzip.Writer)) - } - if ENCODING_DEFLATE == c.encoding { - currentCompressorProvider.ReleaseZlibWriter(c.compressor.(*zlib.Writer)) - } - // gc hint needed? - c.compressor = nil - return nil -} - -func (c *CompressingResponseWriter) isCompressorClosed() bool { - return nil == c.compressor -} - -// Hijack implements the Hijacker interface -// This is especially useful when combining Container.EnabledContentEncoding -// in combination with websockets (for instance gorilla/websocket) -func (c *CompressingResponseWriter) Hijack() (net.Conn, *bufio.ReadWriter, error) { - hijacker, ok := c.writer.(http.Hijacker) - if !ok { - return nil, nil, errors.New("ResponseWriter doesn't support Hijacker interface") - } - return hijacker.Hijack() -} - -// WantsCompressedResponse reads the Accept-Encoding header to see if and which encoding is requested. -// It also inspects the httpWriter whether its content-encoding is already set (non-empty). -func wantsCompressedResponse(httpRequest *http.Request, httpWriter http.ResponseWriter) (bool, string) { - if contentEncoding := httpWriter.Header().Get(HEADER_ContentEncoding); contentEncoding != "" { - return false, "" - } - header := httpRequest.Header.Get(HEADER_AcceptEncoding) - gi := strings.Index(header, ENCODING_GZIP) - zi := strings.Index(header, ENCODING_DEFLATE) - // use in order of appearance - if gi == -1 { - return zi != -1, ENCODING_DEFLATE - } else if zi == -1 { - return gi != -1, ENCODING_GZIP - } else { - if gi < zi { - return true, ENCODING_GZIP - } - return true, ENCODING_DEFLATE - } -} - -// NewCompressingResponseWriter create a CompressingResponseWriter for a known encoding = {gzip,deflate} -func NewCompressingResponseWriter(httpWriter http.ResponseWriter, encoding string) (*CompressingResponseWriter, error) { - httpWriter.Header().Set(HEADER_ContentEncoding, encoding) - c := new(CompressingResponseWriter) - c.writer = httpWriter - var err error - if ENCODING_GZIP == encoding { - w := currentCompressorProvider.AcquireGzipWriter() - w.Reset(httpWriter) - c.compressor = w - c.encoding = ENCODING_GZIP - } else if ENCODING_DEFLATE == encoding { - w := currentCompressorProvider.AcquireZlibWriter() - w.Reset(httpWriter) - c.compressor = w - c.encoding = ENCODING_DEFLATE - } else { - return nil, errors.New("Unknown encoding:" + encoding) - } - return c, err -} diff --git a/src/vendor/github.com/emicklei/go-restful/v3/compressor_cache.go b/src/vendor/github.com/emicklei/go-restful/v3/compressor_cache.go deleted file mode 100644 index ee426010a..000000000 --- a/src/vendor/github.com/emicklei/go-restful/v3/compressor_cache.go +++ /dev/null @@ -1,103 +0,0 @@ -package restful - -// Copyright 2015 Ernest Micklei. All rights reserved. -// Use of this source code is governed by a license -// that can be found in the LICENSE file. - -import ( - "compress/gzip" - "compress/zlib" -) - -// BoundedCachedCompressors is a CompressorProvider that uses a cache with a fixed amount -// of writers and readers (resources). -// If a new resource is acquired and all are in use, it will return a new unmanaged resource. -type BoundedCachedCompressors struct { - gzipWriters chan *gzip.Writer - gzipReaders chan *gzip.Reader - zlibWriters chan *zlib.Writer - writersCapacity int - readersCapacity int -} - -// NewBoundedCachedCompressors returns a new, with filled cache, BoundedCachedCompressors. -func NewBoundedCachedCompressors(writersCapacity, readersCapacity int) *BoundedCachedCompressors { - b := &BoundedCachedCompressors{ - gzipWriters: make(chan *gzip.Writer, writersCapacity), - gzipReaders: make(chan *gzip.Reader, readersCapacity), - zlibWriters: make(chan *zlib.Writer, writersCapacity), - writersCapacity: writersCapacity, - readersCapacity: readersCapacity, - } - for ix := 0; ix < writersCapacity; ix++ { - b.gzipWriters <- newGzipWriter() - b.zlibWriters <- newZlibWriter() - } - for ix := 0; ix < readersCapacity; ix++ { - b.gzipReaders <- newGzipReader() - } - return b -} - -// AcquireGzipWriter returns an resettable *gzip.Writer. Needs to be released. -func (b *BoundedCachedCompressors) AcquireGzipWriter() *gzip.Writer { - var writer *gzip.Writer - select { - case writer, _ = <-b.gzipWriters: - default: - // return a new unmanaged one - writer = newGzipWriter() - } - return writer -} - -// ReleaseGzipWriter accepts a writer (does not have to be one that was cached) -// only when the cache has room for it. It will ignore it otherwise. -func (b *BoundedCachedCompressors) ReleaseGzipWriter(w *gzip.Writer) { - // forget the unmanaged ones - if len(b.gzipWriters) < b.writersCapacity { - b.gzipWriters <- w - } -} - -// AcquireGzipReader returns a *gzip.Reader. Needs to be released. -func (b *BoundedCachedCompressors) AcquireGzipReader() *gzip.Reader { - var reader *gzip.Reader - select { - case reader, _ = <-b.gzipReaders: - default: - // return a new unmanaged one - reader = newGzipReader() - } - return reader -} - -// ReleaseGzipReader accepts a reader (does not have to be one that was cached) -// only when the cache has room for it. It will ignore it otherwise. -func (b *BoundedCachedCompressors) ReleaseGzipReader(r *gzip.Reader) { - // forget the unmanaged ones - if len(b.gzipReaders) < b.readersCapacity { - b.gzipReaders <- r - } -} - -// AcquireZlibWriter returns an resettable *zlib.Writer. Needs to be released. -func (b *BoundedCachedCompressors) AcquireZlibWriter() *zlib.Writer { - var writer *zlib.Writer - select { - case writer, _ = <-b.zlibWriters: - default: - // return a new unmanaged one - writer = newZlibWriter() - } - return writer -} - -// ReleaseZlibWriter accepts a writer (does not have to be one that was cached) -// only when the cache has room for it. It will ignore it otherwise. -func (b *BoundedCachedCompressors) ReleaseZlibWriter(w *zlib.Writer) { - // forget the unmanaged ones - if len(b.zlibWriters) < b.writersCapacity { - b.zlibWriters <- w - } -} diff --git a/src/vendor/github.com/emicklei/go-restful/v3/compressor_pools.go b/src/vendor/github.com/emicklei/go-restful/v3/compressor_pools.go deleted file mode 100644 index d866ce64b..000000000 --- a/src/vendor/github.com/emicklei/go-restful/v3/compressor_pools.go +++ /dev/null @@ -1,91 +0,0 @@ -package restful - -// Copyright 2015 Ernest Micklei. All rights reserved. -// Use of this source code is governed by a license -// that can be found in the LICENSE file. - -import ( - "bytes" - "compress/gzip" - "compress/zlib" - "sync" -) - -// SyncPoolCompessors is a CompressorProvider that use the standard sync.Pool. -type SyncPoolCompessors struct { - GzipWriterPool *sync.Pool - GzipReaderPool *sync.Pool - ZlibWriterPool *sync.Pool -} - -// NewSyncPoolCompessors returns a new ("empty") SyncPoolCompessors. -func NewSyncPoolCompessors() *SyncPoolCompessors { - return &SyncPoolCompessors{ - GzipWriterPool: &sync.Pool{ - New: func() interface{} { return newGzipWriter() }, - }, - GzipReaderPool: &sync.Pool{ - New: func() interface{} { return newGzipReader() }, - }, - ZlibWriterPool: &sync.Pool{ - New: func() interface{} { return newZlibWriter() }, - }, - } -} - -func (s *SyncPoolCompessors) AcquireGzipWriter() *gzip.Writer { - return s.GzipWriterPool.Get().(*gzip.Writer) -} - -func (s *SyncPoolCompessors) ReleaseGzipWriter(w *gzip.Writer) { - s.GzipWriterPool.Put(w) -} - -func (s *SyncPoolCompessors) AcquireGzipReader() *gzip.Reader { - return s.GzipReaderPool.Get().(*gzip.Reader) -} - -func (s *SyncPoolCompessors) ReleaseGzipReader(r *gzip.Reader) { - s.GzipReaderPool.Put(r) -} - -func (s *SyncPoolCompessors) AcquireZlibWriter() *zlib.Writer { - return s.ZlibWriterPool.Get().(*zlib.Writer) -} - -func (s *SyncPoolCompessors) ReleaseZlibWriter(w *zlib.Writer) { - s.ZlibWriterPool.Put(w) -} - -func newGzipWriter() *gzip.Writer { - // create with an empty bytes writer; it will be replaced before using the gzipWriter - writer, err := gzip.NewWriterLevel(new(bytes.Buffer), gzip.BestSpeed) - if err != nil { - panic(err.Error()) - } - return writer -} - -func newGzipReader() *gzip.Reader { - // create with an empty reader (but with GZIP header); it will be replaced before using the gzipReader - // we can safely use currentCompressProvider because it is set on package initialization. - w := currentCompressorProvider.AcquireGzipWriter() - defer currentCompressorProvider.ReleaseGzipWriter(w) - b := new(bytes.Buffer) - w.Reset(b) - w.Flush() - w.Close() - reader, err := gzip.NewReader(bytes.NewReader(b.Bytes())) - if err != nil { - panic(err.Error()) - } - return reader -} - -func newZlibWriter() *zlib.Writer { - writer, err := zlib.NewWriterLevel(new(bytes.Buffer), gzip.BestSpeed) - if err != nil { - panic(err.Error()) - } - return writer -} diff --git a/src/vendor/github.com/emicklei/go-restful/v3/compressors.go b/src/vendor/github.com/emicklei/go-restful/v3/compressors.go deleted file mode 100644 index 9db4a8c8e..000000000 --- a/src/vendor/github.com/emicklei/go-restful/v3/compressors.go +++ /dev/null @@ -1,54 +0,0 @@ -package restful - -// Copyright 2015 Ernest Micklei. All rights reserved. -// Use of this source code is governed by a license -// that can be found in the LICENSE file. - -import ( - "compress/gzip" - "compress/zlib" -) - -// CompressorProvider describes a component that can provider compressors for the std methods. -type CompressorProvider interface { - // Returns a *gzip.Writer which needs to be released later. - // Before using it, call Reset(). - AcquireGzipWriter() *gzip.Writer - - // Releases an acquired *gzip.Writer. - ReleaseGzipWriter(w *gzip.Writer) - - // Returns a *gzip.Reader which needs to be released later. - AcquireGzipReader() *gzip.Reader - - // Releases an acquired *gzip.Reader. - ReleaseGzipReader(w *gzip.Reader) - - // Returns a *zlib.Writer which needs to be released later. - // Before using it, call Reset(). - AcquireZlibWriter() *zlib.Writer - - // Releases an acquired *zlib.Writer. - ReleaseZlibWriter(w *zlib.Writer) -} - -// DefaultCompressorProvider is the actual provider of compressors (zlib or gzip). -var currentCompressorProvider CompressorProvider - -func init() { - currentCompressorProvider = NewSyncPoolCompessors() -} - -// CurrentCompressorProvider returns the current CompressorProvider. -// It is initialized using a SyncPoolCompessors. -func CurrentCompressorProvider() CompressorProvider { - return currentCompressorProvider -} - -// SetCompressorProvider sets the actual provider of compressors (zlib or gzip). -func SetCompressorProvider(p CompressorProvider) { - if p == nil { - panic("cannot set compressor provider to nil") - } - currentCompressorProvider = p -} diff --git a/src/vendor/github.com/emicklei/go-restful/v3/constants.go b/src/vendor/github.com/emicklei/go-restful/v3/constants.go deleted file mode 100644 index 203439c5e..000000000 --- a/src/vendor/github.com/emicklei/go-restful/v3/constants.go +++ /dev/null @@ -1,30 +0,0 @@ -package restful - -// Copyright 2013 Ernest Micklei. All rights reserved. -// Use of this source code is governed by a license -// that can be found in the LICENSE file. - -const ( - MIME_XML = "application/xml" // Accept or Content-Type used in Consumes() and/or Produces() - MIME_JSON = "application/json" // Accept or Content-Type used in Consumes() and/or Produces() - MIME_OCTET = "application/octet-stream" // If Content-Type is not present in request, use the default - - HEADER_Allow = "Allow" - HEADER_Accept = "Accept" - HEADER_Origin = "Origin" - HEADER_ContentType = "Content-Type" - HEADER_LastModified = "Last-Modified" - HEADER_AcceptEncoding = "Accept-Encoding" - HEADER_ContentEncoding = "Content-Encoding" - HEADER_AccessControlExposeHeaders = "Access-Control-Expose-Headers" - HEADER_AccessControlRequestMethod = "Access-Control-Request-Method" - HEADER_AccessControlRequestHeaders = "Access-Control-Request-Headers" - HEADER_AccessControlAllowMethods = "Access-Control-Allow-Methods" - HEADER_AccessControlAllowOrigin = "Access-Control-Allow-Origin" - HEADER_AccessControlAllowCredentials = "Access-Control-Allow-Credentials" - HEADER_AccessControlAllowHeaders = "Access-Control-Allow-Headers" - HEADER_AccessControlMaxAge = "Access-Control-Max-Age" - - ENCODING_GZIP = "gzip" - ENCODING_DEFLATE = "deflate" -) diff --git a/src/vendor/github.com/emicklei/go-restful/v3/container.go b/src/vendor/github.com/emicklei/go-restful/v3/container.go deleted file mode 100644 index dd56246dd..000000000 --- a/src/vendor/github.com/emicklei/go-restful/v3/container.go +++ /dev/null @@ -1,450 +0,0 @@ -package restful - -// Copyright 2013 Ernest Micklei. All rights reserved. -// Use of this source code is governed by a license -// that can be found in the LICENSE file. - -import ( - "bytes" - "errors" - "fmt" - "net/http" - "os" - "runtime" - "strings" - "sync" - - "github.com/emicklei/go-restful/v3/log" -) - -// Container holds a collection of WebServices and a http.ServeMux to dispatch http requests. -// The requests are further dispatched to routes of WebServices using a RouteSelector -type Container struct { - webServicesLock sync.RWMutex - webServices []*WebService - ServeMux *http.ServeMux - isRegisteredOnRoot bool - containerFilters []FilterFunction - doNotRecover bool // default is true - recoverHandleFunc RecoverHandleFunction - serviceErrorHandleFunc ServiceErrorHandleFunction - router RouteSelector // default is a CurlyRouter (RouterJSR311 is a slower alternative) - contentEncodingEnabled bool // default is false -} - -// NewContainer creates a new Container using a new ServeMux and default router (CurlyRouter) -func NewContainer() *Container { - return &Container{ - webServices: []*WebService{}, - ServeMux: http.NewServeMux(), - isRegisteredOnRoot: false, - containerFilters: []FilterFunction{}, - doNotRecover: true, - recoverHandleFunc: logStackOnRecover, - serviceErrorHandleFunc: writeServiceError, - router: CurlyRouter{}, - contentEncodingEnabled: false} -} - -// RecoverHandleFunction declares functions that can be used to handle a panic situation. -// The first argument is what recover() returns. The second must be used to communicate an error response. -type RecoverHandleFunction func(interface{}, http.ResponseWriter) - -// RecoverHandler changes the default function (logStackOnRecover) to be called -// when a panic is detected. DoNotRecover must be have its default value (=false). -func (c *Container) RecoverHandler(handler RecoverHandleFunction) { - c.recoverHandleFunc = handler -} - -// ServiceErrorHandleFunction declares functions that can be used to handle a service error situation. -// The first argument is the service error, the second is the request that resulted in the error and -// the third must be used to communicate an error response. -type ServiceErrorHandleFunction func(ServiceError, *Request, *Response) - -// ServiceErrorHandler changes the default function (writeServiceError) to be called -// when a ServiceError is detected. -func (c *Container) ServiceErrorHandler(handler ServiceErrorHandleFunction) { - c.serviceErrorHandleFunc = handler -} - -// DoNotRecover controls whether panics will be caught to return HTTP 500. -// If set to true, Route functions are responsible for handling any error situation. -// Default value is true. -func (c *Container) DoNotRecover(doNot bool) { - c.doNotRecover = doNot -} - -// Router changes the default Router (currently CurlyRouter) -func (c *Container) Router(aRouter RouteSelector) { - c.router = aRouter -} - -// EnableContentEncoding (default=false) allows for GZIP or DEFLATE encoding of responses. -func (c *Container) EnableContentEncoding(enabled bool) { - c.contentEncodingEnabled = enabled -} - -// Add a WebService to the Container. It will detect duplicate root paths and exit in that case. -func (c *Container) Add(service *WebService) *Container { - c.webServicesLock.Lock() - defer c.webServicesLock.Unlock() - - // if rootPath was not set then lazy initialize it - if len(service.rootPath) == 0 { - service.Path("/") - } - - // cannot have duplicate root paths - for _, each := range c.webServices { - if each.RootPath() == service.RootPath() { - log.Printf("WebService with duplicate root path detected:['%v']", each) - os.Exit(1) - } - } - - // If not registered on root then add specific mapping - if !c.isRegisteredOnRoot { - c.isRegisteredOnRoot = c.addHandler(service, c.ServeMux) - } - c.webServices = append(c.webServices, service) - return c -} - -// addHandler may set a new HandleFunc for the serveMux -// this function must run inside the critical region protected by the webServicesLock. -// returns true if the function was registered on root ("/") -func (c *Container) addHandler(service *WebService, serveMux *http.ServeMux) bool { - pattern := fixedPrefixPath(service.RootPath()) - // check if root path registration is needed - if "/" == pattern || "" == pattern { - serveMux.HandleFunc("/", c.dispatch) - return true - } - // detect if registration already exists - alreadyMapped := false - for _, each := range c.webServices { - if each.RootPath() == service.RootPath() { - alreadyMapped = true - break - } - } - if !alreadyMapped { - serveMux.HandleFunc(pattern, c.dispatch) - if !strings.HasSuffix(pattern, "/") { - serveMux.HandleFunc(pattern+"/", c.dispatch) - } - } - return false -} - -func (c *Container) Remove(ws *WebService) error { - if c.ServeMux == http.DefaultServeMux { - errMsg := fmt.Sprintf("cannot remove a WebService from a Container using the DefaultServeMux: ['%v']", ws) - log.Print(errMsg) - return errors.New(errMsg) - } - c.webServicesLock.Lock() - defer c.webServicesLock.Unlock() - // build a new ServeMux and re-register all WebServices - newServeMux := http.NewServeMux() - newServices := []*WebService{} - newIsRegisteredOnRoot := false - for _, each := range c.webServices { - if each.rootPath != ws.rootPath { - // If not registered on root then add specific mapping - if !newIsRegisteredOnRoot { - newIsRegisteredOnRoot = c.addHandler(each, newServeMux) - } - newServices = append(newServices, each) - } - } - c.webServices, c.ServeMux, c.isRegisteredOnRoot = newServices, newServeMux, newIsRegisteredOnRoot - return nil -} - -// logStackOnRecover is the default RecoverHandleFunction and is called -// when DoNotRecover is false and the recoverHandleFunc is not set for the container. -// Default implementation logs the stacktrace and writes the stacktrace on the response. -// This may be a security issue as it exposes sourcecode information. -func logStackOnRecover(panicReason interface{}, httpWriter http.ResponseWriter) { - var buffer bytes.Buffer - buffer.WriteString(fmt.Sprintf("recover from panic situation: - %v\r\n", panicReason)) - for i := 2; ; i += 1 { - _, file, line, ok := runtime.Caller(i) - if !ok { - break - } - buffer.WriteString(fmt.Sprintf(" %s:%d\r\n", file, line)) - } - log.Print(buffer.String()) - httpWriter.WriteHeader(http.StatusInternalServerError) - httpWriter.Write(buffer.Bytes()) -} - -// writeServiceError is the default ServiceErrorHandleFunction and is called -// when a ServiceError is returned during route selection. Default implementation -// calls resp.WriteErrorString(err.Code, err.Message) -func writeServiceError(err ServiceError, req *Request, resp *Response) { - for header, values := range err.Header { - for _, value := range values { - resp.Header().Add(header, value) - } - } - resp.WriteErrorString(err.Code, err.Message) -} - -// Dispatch the incoming Http Request to a matching WebService. -func (c *Container) Dispatch(httpWriter http.ResponseWriter, httpRequest *http.Request) { - if httpWriter == nil { - panic("httpWriter cannot be nil") - } - if httpRequest == nil { - panic("httpRequest cannot be nil") - } - c.dispatch(httpWriter, httpRequest) -} - -// Dispatch the incoming Http Request to a matching WebService. -func (c *Container) dispatch(httpWriter http.ResponseWriter, httpRequest *http.Request) { - // so we can assign a compressing one later - writer := httpWriter - - // CompressingResponseWriter should be closed after all operations are done - defer func() { - if compressWriter, ok := writer.(*CompressingResponseWriter); ok { - compressWriter.Close() - } - }() - - // Instal panic recovery unless told otherwise - if !c.doNotRecover { // catch all for 500 response - defer func() { - if r := recover(); r != nil { - c.recoverHandleFunc(r, writer) - return - } - }() - } - - // Find best match Route ; err is non nil if no match was found - var webService *WebService - var route *Route - var err error - func() { - c.webServicesLock.RLock() - defer c.webServicesLock.RUnlock() - webService, route, err = c.router.SelectRoute( - c.webServices, - httpRequest) - }() - if err != nil { - // a non-200 response (may be compressed) has already been written - // run container filters anyway ; they should not touch the response... - chain := FilterChain{Filters: c.containerFilters, Target: func(req *Request, resp *Response) { - switch err.(type) { - case ServiceError: - ser := err.(ServiceError) - c.serviceErrorHandleFunc(ser, req, resp) - } - // TODO - }} - chain.ProcessFilter(NewRequest(httpRequest), NewResponse(writer)) - return - } - - // Unless httpWriter is already an CompressingResponseWriter see if we need to install one - if _, isCompressing := httpWriter.(*CompressingResponseWriter); !isCompressing { - // Detect if compression is needed - // assume without compression, test for override - contentEncodingEnabled := c.contentEncodingEnabled - if route != nil && route.contentEncodingEnabled != nil { - contentEncodingEnabled = *route.contentEncodingEnabled - } - if contentEncodingEnabled { - doCompress, encoding := wantsCompressedResponse(httpRequest, httpWriter) - if doCompress { - var err error - writer, err = NewCompressingResponseWriter(httpWriter, encoding) - if err != nil { - log.Print("unable to install compressor: ", err) - httpWriter.WriteHeader(http.StatusInternalServerError) - return - } - } - } - } - - pathProcessor, routerProcessesPath := c.router.(PathProcessor) - if !routerProcessesPath { - pathProcessor = defaultPathProcessor{} - } - pathParams := pathProcessor.ExtractParameters(route, webService, httpRequest.URL.Path) - wrappedRequest, wrappedResponse := route.wrapRequestResponse(writer, httpRequest, pathParams) - // pass through filters (if any) - if size := len(c.containerFilters) + len(webService.filters) + len(route.Filters); size > 0 { - // compose filter chain - allFilters := make([]FilterFunction, 0, size) - allFilters = append(allFilters, c.containerFilters...) - allFilters = append(allFilters, webService.filters...) - allFilters = append(allFilters, route.Filters...) - chain := FilterChain{ - Filters: allFilters, - Target: route.Function, - ParameterDocs: route.ParameterDocs, - Operation: route.Operation, - } - chain.ProcessFilter(wrappedRequest, wrappedResponse) - } else { - // no filters, handle request by route - route.Function(wrappedRequest, wrappedResponse) - } -} - -// fixedPrefixPath returns the fixed part of the partspec ; it may include template vars {} -func fixedPrefixPath(pathspec string) string { - varBegin := strings.Index(pathspec, "{") - if -1 == varBegin { - return pathspec - } - return pathspec[:varBegin] -} - -// ServeHTTP implements net/http.Handler therefore a Container can be a Handler in a http.Server -func (c *Container) ServeHTTP(httpWriter http.ResponseWriter, httpRequest *http.Request) { - // Skip, if content encoding is disabled - if !c.contentEncodingEnabled { - c.ServeMux.ServeHTTP(httpWriter, httpRequest) - return - } - // content encoding is enabled - - // Skip, if httpWriter is already an CompressingResponseWriter - if _, ok := httpWriter.(*CompressingResponseWriter); ok { - c.ServeMux.ServeHTTP(httpWriter, httpRequest) - return - } - - writer := httpWriter - // CompressingResponseWriter should be closed after all operations are done - defer func() { - if compressWriter, ok := writer.(*CompressingResponseWriter); ok { - compressWriter.Close() - } - }() - - doCompress, encoding := wantsCompressedResponse(httpRequest, httpWriter) - if doCompress { - var err error - writer, err = NewCompressingResponseWriter(httpWriter, encoding) - if err != nil { - log.Print("unable to install compressor: ", err) - httpWriter.WriteHeader(http.StatusInternalServerError) - return - } - } - - c.ServeMux.ServeHTTP(writer, httpRequest) -} - -// Handle registers the handler for the given pattern. If a handler already exists for pattern, Handle panics. -func (c *Container) Handle(pattern string, handler http.Handler) { - c.ServeMux.Handle(pattern, http.HandlerFunc(func(httpWriter http.ResponseWriter, httpRequest *http.Request) { - // Skip, if httpWriter is already an CompressingResponseWriter - if _, ok := httpWriter.(*CompressingResponseWriter); ok { - handler.ServeHTTP(httpWriter, httpRequest) - return - } - - writer := httpWriter - - // CompressingResponseWriter should be closed after all operations are done - defer func() { - if compressWriter, ok := writer.(*CompressingResponseWriter); ok { - compressWriter.Close() - } - }() - - if c.contentEncodingEnabled { - doCompress, encoding := wantsCompressedResponse(httpRequest, httpWriter) - if doCompress { - var err error - writer, err = NewCompressingResponseWriter(httpWriter, encoding) - if err != nil { - log.Print("unable to install compressor: ", err) - httpWriter.WriteHeader(http.StatusInternalServerError) - return - } - } - } - - handler.ServeHTTP(writer, httpRequest) - })) -} - -// HandleWithFilter registers the handler for the given pattern. -// Container's filter chain is applied for handler. -// If a handler already exists for pattern, HandleWithFilter panics. -func (c *Container) HandleWithFilter(pattern string, handler http.Handler) { - f := func(httpResponse http.ResponseWriter, httpRequest *http.Request) { - if len(c.containerFilters) == 0 { - handler.ServeHTTP(httpResponse, httpRequest) - return - } - - chain := FilterChain{Filters: c.containerFilters, Target: func(req *Request, resp *Response) { - handler.ServeHTTP(resp, req.Request) - }} - chain.ProcessFilter(NewRequest(httpRequest), NewResponse(httpResponse)) - } - - c.Handle(pattern, http.HandlerFunc(f)) -} - -// Filter appends a container FilterFunction. These are called before dispatching -// a http.Request to a WebService from the container -func (c *Container) Filter(filter FilterFunction) { - c.containerFilters = append(c.containerFilters, filter) -} - -// RegisteredWebServices returns the collections of added WebServices -func (c *Container) RegisteredWebServices() []*WebService { - c.webServicesLock.RLock() - defer c.webServicesLock.RUnlock() - result := make([]*WebService, len(c.webServices)) - for ix := range c.webServices { - result[ix] = c.webServices[ix] - } - return result -} - -// computeAllowedMethods returns a list of HTTP methods that are valid for a Request -func (c *Container) computeAllowedMethods(req *Request) []string { - // Go through all RegisteredWebServices() and all its Routes to collect the options - methods := []string{} - requestPath := req.Request.URL.Path - for _, ws := range c.RegisteredWebServices() { - matches := ws.pathExpr.Matcher.FindStringSubmatch(requestPath) - if matches != nil { - finalMatch := matches[len(matches)-1] - for _, rt := range ws.Routes() { - matches := rt.pathExpr.Matcher.FindStringSubmatch(finalMatch) - if matches != nil { - lastMatch := matches[len(matches)-1] - if lastMatch == "" || lastMatch == "/" { // do not include if value is neither empty nor ‘/’. - methods = append(methods, rt.Method) - } - } - } - } - } - // methods = append(methods, "OPTIONS") not sure about this - return methods -} - -// newBasicRequestResponse creates a pair of Request,Response from its http versions. -// It is basic because no parameter or (produces) content-type information is given. -func newBasicRequestResponse(httpWriter http.ResponseWriter, httpRequest *http.Request) (*Request, *Response) { - resp := NewResponse(httpWriter) - resp.requestAccept = httpRequest.Header.Get(HEADER_Accept) - return NewRequest(httpRequest), resp -} diff --git a/src/vendor/github.com/emicklei/go-restful/v3/cors_filter.go b/src/vendor/github.com/emicklei/go-restful/v3/cors_filter.go deleted file mode 100644 index 9d18dfb7b..000000000 --- a/src/vendor/github.com/emicklei/go-restful/v3/cors_filter.go +++ /dev/null @@ -1,193 +0,0 @@ -package restful - -// Copyright 2013 Ernest Micklei. All rights reserved. -// Use of this source code is governed by a license -// that can be found in the LICENSE file. - -import ( - "regexp" - "strconv" - "strings" -) - -// CrossOriginResourceSharing is used to create a Container Filter that implements CORS. -// Cross-origin resource sharing (CORS) is a mechanism that allows JavaScript on a web page -// to make XMLHttpRequests to another domain, not the domain the JavaScript originated from. -// -// http://en.wikipedia.org/wiki/Cross-origin_resource_sharing -// http://enable-cors.org/server.html -// http://www.html5rocks.com/en/tutorials/cors/#toc-handling-a-not-so-simple-request -type CrossOriginResourceSharing struct { - ExposeHeaders []string // list of Header names - - // AllowedHeaders is alist of Header names. Checking is case-insensitive. - // The list may contain the special wildcard string ".*" ; all is allowed - AllowedHeaders []string - - // AllowedDomains is a list of allowed values for Http Origin. - // The list may contain the special wildcard string ".*" ; all is allowed - // If empty all are allowed. - AllowedDomains []string - - // AllowedDomainFunc is optional and is a function that will do the check - // when the origin is not part of the AllowedDomains and it does not contain the wildcard ".*". - AllowedDomainFunc func(origin string) bool - - // AllowedMethods is either empty or has a list of http methods names. Checking is case-insensitive. - AllowedMethods []string - MaxAge int // number of seconds before requiring new Options request - CookiesAllowed bool - Container *Container - - allowedOriginPatterns []*regexp.Regexp // internal field for origin regexp check. -} - -// Filter is a filter function that implements the CORS flow as documented on http://enable-cors.org/server.html -// and http://www.html5rocks.com/static/images/cors_server_flowchart.png -func (c CrossOriginResourceSharing) Filter(req *Request, resp *Response, chain *FilterChain) { - origin := req.Request.Header.Get(HEADER_Origin) - if len(origin) == 0 { - if trace { - traceLogger.Print("no Http header Origin set") - } - chain.ProcessFilter(req, resp) - return - } - if !c.isOriginAllowed(origin) { // check whether this origin is allowed - if trace { - traceLogger.Printf("HTTP Origin:%s is not part of %v, neither matches any part of %v", origin, c.AllowedDomains, c.allowedOriginPatterns) - } - chain.ProcessFilter(req, resp) - return - } - if req.Request.Method != "OPTIONS" { - c.doActualRequest(req, resp) - chain.ProcessFilter(req, resp) - return - } - if acrm := req.Request.Header.Get(HEADER_AccessControlRequestMethod); acrm != "" { - c.doPreflightRequest(req, resp) - } else { - c.doActualRequest(req, resp) - chain.ProcessFilter(req, resp) - return - } -} - -func (c CrossOriginResourceSharing) doActualRequest(req *Request, resp *Response) { - c.setOptionsHeaders(req, resp) - // continue processing the response -} - -func (c *CrossOriginResourceSharing) doPreflightRequest(req *Request, resp *Response) { - if len(c.AllowedMethods) == 0 { - if c.Container == nil { - c.AllowedMethods = DefaultContainer.computeAllowedMethods(req) - } else { - c.AllowedMethods = c.Container.computeAllowedMethods(req) - } - } - - acrm := req.Request.Header.Get(HEADER_AccessControlRequestMethod) - if !c.isValidAccessControlRequestMethod(acrm, c.AllowedMethods) { - if trace { - traceLogger.Printf("Http header %s:%s is not in %v", - HEADER_AccessControlRequestMethod, - acrm, - c.AllowedMethods) - } - return - } - acrhs := req.Request.Header.Get(HEADER_AccessControlRequestHeaders) - if len(acrhs) > 0 { - for _, each := range strings.Split(acrhs, ",") { - if !c.isValidAccessControlRequestHeader(strings.Trim(each, " ")) { - if trace { - traceLogger.Printf("Http header %s:%s is not in %v", - HEADER_AccessControlRequestHeaders, - acrhs, - c.AllowedHeaders) - } - return - } - } - } - resp.AddHeader(HEADER_AccessControlAllowMethods, strings.Join(c.AllowedMethods, ",")) - resp.AddHeader(HEADER_AccessControlAllowHeaders, acrhs) - c.setOptionsHeaders(req, resp) - - // return http 200 response, no body -} - -func (c CrossOriginResourceSharing) setOptionsHeaders(req *Request, resp *Response) { - c.checkAndSetExposeHeaders(resp) - c.setAllowOriginHeader(req, resp) - c.checkAndSetAllowCredentials(resp) - if c.MaxAge > 0 { - resp.AddHeader(HEADER_AccessControlMaxAge, strconv.Itoa(c.MaxAge)) - } -} - -func (c CrossOriginResourceSharing) isOriginAllowed(origin string) bool { - if len(origin) == 0 { - return false - } - lowerOrigin := strings.ToLower(origin) - if len(c.AllowedDomains) == 0 { - if c.AllowedDomainFunc != nil { - return c.AllowedDomainFunc(lowerOrigin) - } - return true - } - - // exact match on each allowed domain - for _, domain := range c.AllowedDomains { - if domain == ".*" || strings.ToLower(domain) == lowerOrigin { - return true - } - } - if c.AllowedDomainFunc != nil { - return c.AllowedDomainFunc(origin) - } - return false -} - -func (c CrossOriginResourceSharing) setAllowOriginHeader(req *Request, resp *Response) { - origin := req.Request.Header.Get(HEADER_Origin) - if c.isOriginAllowed(origin) { - resp.AddHeader(HEADER_AccessControlAllowOrigin, origin) - } -} - -func (c CrossOriginResourceSharing) checkAndSetExposeHeaders(resp *Response) { - if len(c.ExposeHeaders) > 0 { - resp.AddHeader(HEADER_AccessControlExposeHeaders, strings.Join(c.ExposeHeaders, ",")) - } -} - -func (c CrossOriginResourceSharing) checkAndSetAllowCredentials(resp *Response) { - if c.CookiesAllowed { - resp.AddHeader(HEADER_AccessControlAllowCredentials, "true") - } -} - -func (c CrossOriginResourceSharing) isValidAccessControlRequestMethod(method string, allowedMethods []string) bool { - for _, each := range allowedMethods { - if each == method { - return true - } - } - return false -} - -func (c CrossOriginResourceSharing) isValidAccessControlRequestHeader(header string) bool { - for _, each := range c.AllowedHeaders { - if strings.ToLower(each) == strings.ToLower(header) { - return true - } - if each == "*" { - return true - } - } - return false -} diff --git a/src/vendor/github.com/emicklei/go-restful/v3/coverage.sh b/src/vendor/github.com/emicklei/go-restful/v3/coverage.sh deleted file mode 100644 index e27dbf1a9..000000000 --- a/src/vendor/github.com/emicklei/go-restful/v3/coverage.sh +++ /dev/null @@ -1,2 +0,0 @@ -go test -coverprofile=coverage.out -go tool cover -html=coverage.out \ No newline at end of file diff --git a/src/vendor/github.com/emicklei/go-restful/v3/curly.go b/src/vendor/github.com/emicklei/go-restful/v3/curly.go deleted file mode 100644 index ba1fc5d5f..000000000 --- a/src/vendor/github.com/emicklei/go-restful/v3/curly.go +++ /dev/null @@ -1,173 +0,0 @@ -package restful - -// Copyright 2013 Ernest Micklei. All rights reserved. -// Use of this source code is governed by a license -// that can be found in the LICENSE file. - -import ( - "net/http" - "regexp" - "sort" - "strings" -) - -// CurlyRouter expects Routes with paths that contain zero or more parameters in curly brackets. -type CurlyRouter struct{} - -// SelectRoute is part of the Router interface and returns the best match -// for the WebService and its Route for the given Request. -func (c CurlyRouter) SelectRoute( - webServices []*WebService, - httpRequest *http.Request) (selectedService *WebService, selected *Route, err error) { - - requestTokens := tokenizePath(httpRequest.URL.Path) - - detectedService := c.detectWebService(requestTokens, webServices) - if detectedService == nil { - if trace { - traceLogger.Printf("no WebService was found to match URL path:%s\n", httpRequest.URL.Path) - } - return nil, nil, NewError(http.StatusNotFound, "404: Page Not Found") - } - candidateRoutes := c.selectRoutes(detectedService, requestTokens) - if len(candidateRoutes) == 0 { - if trace { - traceLogger.Printf("no Route in WebService with path %s was found to match URL path:%s\n", detectedService.rootPath, httpRequest.URL.Path) - } - return detectedService, nil, NewError(http.StatusNotFound, "404: Page Not Found") - } - selectedRoute, err := c.detectRoute(candidateRoutes, httpRequest) - if selectedRoute == nil { - return detectedService, nil, err - } - return detectedService, selectedRoute, nil -} - -// selectRoutes return a collection of Route from a WebService that matches the path tokens from the request. -func (c CurlyRouter) selectRoutes(ws *WebService, requestTokens []string) sortableCurlyRoutes { - candidates := make(sortableCurlyRoutes, 0, 8) - for _, each := range ws.routes { - matches, paramCount, staticCount := c.matchesRouteByPathTokens(each.pathParts, requestTokens, each.hasCustomVerb) - if matches { - candidates.add(curlyRoute{each, paramCount, staticCount}) // TODO make sure Routes() return pointers? - } - } - sort.Sort(candidates) - return candidates -} - -// matchesRouteByPathTokens computes whether it matches, howmany parameters do match and what the number of static path elements are. -func (c CurlyRouter) matchesRouteByPathTokens(routeTokens, requestTokens []string, routeHasCustomVerb bool) (matches bool, paramCount int, staticCount int) { - if len(routeTokens) < len(requestTokens) { - // proceed in matching only if last routeToken is wildcard - count := len(routeTokens) - if count == 0 || !strings.HasSuffix(routeTokens[count-1], "*}") { - return false, 0, 0 - } - // proceed - } - for i, routeToken := range routeTokens { - if i == len(requestTokens) { - // reached end of request path - return false, 0, 0 - } - requestToken := requestTokens[i] - if routeHasCustomVerb && hasCustomVerb(routeToken){ - if !isMatchCustomVerb(routeToken, requestToken) { - return false, 0, 0 - } - staticCount++ - requestToken = removeCustomVerb(requestToken) - routeToken = removeCustomVerb(routeToken) - } - - if strings.HasPrefix(routeToken, "{") { - paramCount++ - if colon := strings.Index(routeToken, ":"); colon != -1 { - // match by regex - matchesToken, matchesRemainder := c.regularMatchesPathToken(routeToken, colon, requestToken) - if !matchesToken { - return false, 0, 0 - } - if matchesRemainder { - break - } - } - } else { // no { prefix - if requestToken != routeToken { - return false, 0, 0 - } - staticCount++ - } - } - return true, paramCount, staticCount -} - -// regularMatchesPathToken tests whether the regular expression part of routeToken matches the requestToken or all remaining tokens -// format routeToken is {someVar:someExpression}, e.g. {zipcode:[\d][\d][\d][\d][A-Z][A-Z]} -func (c CurlyRouter) regularMatchesPathToken(routeToken string, colon int, requestToken string) (matchesToken bool, matchesRemainder bool) { - regPart := routeToken[colon+1 : len(routeToken)-1] - if regPart == "*" { - if trace { - traceLogger.Printf("wildcard parameter detected in route token %s that matches %s\n", routeToken, requestToken) - } - return true, true - } - matched, err := regexp.MatchString(regPart, requestToken) - return (matched && err == nil), false -} - -var jsr311Router = RouterJSR311{} - -// detectRoute selectes from a list of Route the first match by inspecting both the Accept and Content-Type -// headers of the Request. See also RouterJSR311 in jsr311.go -func (c CurlyRouter) detectRoute(candidateRoutes sortableCurlyRoutes, httpRequest *http.Request) (*Route, error) { - // tracing is done inside detectRoute - return jsr311Router.detectRoute(candidateRoutes.routes(), httpRequest) -} - -// detectWebService returns the best matching webService given the list of path tokens. -// see also computeWebserviceScore -func (c CurlyRouter) detectWebService(requestTokens []string, webServices []*WebService) *WebService { - var best *WebService - score := -1 - for _, each := range webServices { - matches, eachScore := c.computeWebserviceScore(requestTokens, each.pathExpr.tokens) - if matches && (eachScore > score) { - best = each - score = eachScore - } - } - return best -} - -// computeWebserviceScore returns whether tokens match and -// the weighted score of the longest matching consecutive tokens from the beginning. -func (c CurlyRouter) computeWebserviceScore(requestTokens []string, tokens []string) (bool, int) { - if len(tokens) > len(requestTokens) { - return false, 0 - } - score := 0 - for i := 0; i < len(tokens); i++ { - each := requestTokens[i] - other := tokens[i] - if len(each) == 0 && len(other) == 0 { - score++ - continue - } - if len(other) > 0 && strings.HasPrefix(other, "{") { - // no empty match - if len(each) == 0 { - return false, score - } - score += 1 - } else { - // not a parameter - if each != other { - return false, score - } - score += (len(tokens) - i) * 10 //fuzzy - } - } - return true, score -} diff --git a/src/vendor/github.com/emicklei/go-restful/v3/curly_route.go b/src/vendor/github.com/emicklei/go-restful/v3/curly_route.go deleted file mode 100644 index 403dd3be9..000000000 --- a/src/vendor/github.com/emicklei/go-restful/v3/curly_route.go +++ /dev/null @@ -1,54 +0,0 @@ -package restful - -// Copyright 2013 Ernest Micklei. All rights reserved. -// Use of this source code is governed by a license -// that can be found in the LICENSE file. - -// curlyRoute exits for sorting Routes by the CurlyRouter based on number of parameters and number of static path elements. -type curlyRoute struct { - route Route - paramCount int - staticCount int -} - -// sortableCurlyRoutes orders by most parameters and path elements first. -type sortableCurlyRoutes []curlyRoute - -func (s *sortableCurlyRoutes) add(route curlyRoute) { - *s = append(*s, route) -} - -func (s sortableCurlyRoutes) routes() (routes []Route) { - routes = make([]Route, 0, len(s)) - for _, each := range s { - routes = append(routes, each.route) // TODO change return type - } - return routes -} - -func (s sortableCurlyRoutes) Len() int { - return len(s) -} -func (s sortableCurlyRoutes) Swap(i, j int) { - s[i], s[j] = s[j], s[i] -} -func (s sortableCurlyRoutes) Less(i, j int) bool { - a := s[j] - b := s[i] - - // primary key - if a.staticCount < b.staticCount { - return true - } - if a.staticCount > b.staticCount { - return false - } - // secundary key - if a.paramCount < b.paramCount { - return true - } - if a.paramCount > b.paramCount { - return false - } - return a.route.Path < b.route.Path -} diff --git a/src/vendor/github.com/emicklei/go-restful/v3/custom_verb.go b/src/vendor/github.com/emicklei/go-restful/v3/custom_verb.go deleted file mode 100644 index bfc17efde..000000000 --- a/src/vendor/github.com/emicklei/go-restful/v3/custom_verb.go +++ /dev/null @@ -1,29 +0,0 @@ -package restful - -import ( - "fmt" - "regexp" -) - -var ( - customVerbReg = regexp.MustCompile(":([A-Za-z]+)$") -) - -func hasCustomVerb(routeToken string) bool { - return customVerbReg.MatchString(routeToken) -} - -func isMatchCustomVerb(routeToken string, pathToken string) bool { - rs := customVerbReg.FindStringSubmatch(routeToken) - if len(rs) < 2 { - return false - } - - customVerb := rs[1] - specificVerbReg := regexp.MustCompile(fmt.Sprintf(":%s$", customVerb)) - return specificVerbReg.MatchString(pathToken) -} - -func removeCustomVerb(str string) string { - return customVerbReg.ReplaceAllString(str, "") -} diff --git a/src/vendor/github.com/emicklei/go-restful/v3/doc.go b/src/vendor/github.com/emicklei/go-restful/v3/doc.go deleted file mode 100644 index 69b13057d..000000000 --- a/src/vendor/github.com/emicklei/go-restful/v3/doc.go +++ /dev/null @@ -1,185 +0,0 @@ -/* -Package restful , a lean package for creating REST-style WebServices without magic. - -WebServices and Routes - -A WebService has a collection of Route objects that dispatch incoming Http Requests to a function calls. -Typically, a WebService has a root path (e.g. /users) and defines common MIME types for its routes. -WebServices must be added to a container (see below) in order to handler Http requests from a server. - -A Route is defined by a HTTP method, an URL path and (optionally) the MIME types it consumes (Content-Type) and produces (Accept). -This package has the logic to find the best matching Route and if found, call its Function. - - ws := new(restful.WebService) - ws. - Path("/users"). - Consumes(restful.MIME_JSON, restful.MIME_XML). - Produces(restful.MIME_JSON, restful.MIME_XML) - - ws.Route(ws.GET("/{user-id}").To(u.findUser)) // u is a UserResource - - ... - - // GET http://localhost:8080/users/1 - func (u UserResource) findUser(request *restful.Request, response *restful.Response) { - id := request.PathParameter("user-id") - ... - } - -The (*Request, *Response) arguments provide functions for reading information from the request and writing information back to the response. - -See the example https://github.com/emicklei/go-restful/blob/v3/examples/user-resource/restful-user-resource.go with a full implementation. - -Regular expression matching Routes - -A Route parameter can be specified using the format "uri/{var[:regexp]}" or the special version "uri/{var:*}" for matching the tail of the path. -For example, /persons/{name:[A-Z][A-Z]} can be used to restrict values for the parameter "name" to only contain capital alphabetic characters. -Regular expressions must use the standard Go syntax as described in the regexp package. (https://code.google.com/p/re2/wiki/Syntax) -This feature requires the use of a CurlyRouter. - -Containers - -A Container holds a collection of WebServices, Filters and a http.ServeMux for multiplexing http requests. -Using the statements "restful.Add(...) and restful.Filter(...)" will register WebServices and Filters to the Default Container. -The Default container of go-restful uses the http.DefaultServeMux. -You can create your own Container and create a new http.Server for that particular container. - - container := restful.NewContainer() - server := &http.Server{Addr: ":8081", Handler: container} - -Filters - -A filter dynamically intercepts requests and responses to transform or use the information contained in the requests or responses. -You can use filters to perform generic logging, measurement, authentication, redirect, set response headers etc. -In the restful package there are three hooks into the request,response flow where filters can be added. -Each filter must define a FilterFunction: - - func (req *restful.Request, resp *restful.Response, chain *restful.FilterChain) - -Use the following statement to pass the request,response pair to the next filter or RouteFunction - - chain.ProcessFilter(req, resp) - -Container Filters - -These are processed before any registered WebService. - - // install a (global) filter for the default container (processed before any webservice) - restful.Filter(globalLogging) - -WebService Filters - -These are processed before any Route of a WebService. - - // install a webservice filter (processed before any route) - ws.Filter(webserviceLogging).Filter(measureTime) - - -Route Filters - -These are processed before calling the function associated with the Route. - - // install 2 chained route filters (processed before calling findUser) - ws.Route(ws.GET("/{user-id}").Filter(routeLogging).Filter(NewCountFilter().routeCounter).To(findUser)) - -See the example https://github.com/emicklei/go-restful/blob/v3/examples/filters/restful-filters.go with full implementations. - -Response Encoding - -Two encodings are supported: gzip and deflate. To enable this for all responses: - - restful.DefaultContainer.EnableContentEncoding(true) - -If a Http request includes the Accept-Encoding header then the response content will be compressed using the specified encoding. -Alternatively, you can create a Filter that performs the encoding and install it per WebService or Route. - -See the example https://github.com/emicklei/go-restful/blob/v3/examples/encoding/restful-encoding-filter.go - -OPTIONS support - -By installing a pre-defined container filter, your Webservice(s) can respond to the OPTIONS Http request. - - Filter(OPTIONSFilter()) - -CORS - -By installing the filter of a CrossOriginResourceSharing (CORS), your WebService(s) can handle CORS requests. - - cors := CrossOriginResourceSharing{ExposeHeaders: []string{"X-My-Header"}, CookiesAllowed: false, Container: DefaultContainer} - Filter(cors.Filter) - -Error Handling - -Unexpected things happen. If a request cannot be processed because of a failure, your service needs to tell via the response what happened and why. -For this reason HTTP status codes exist and it is important to use the correct code in every exceptional situation. - - 400: Bad Request - -If path or query parameters are not valid (content or type) then use http.StatusBadRequest. - - 404: Not Found - -Despite a valid URI, the resource requested may not be available - - 500: Internal Server Error - -If the application logic could not process the request (or write the response) then use http.StatusInternalServerError. - - 405: Method Not Allowed - -The request has a valid URL but the method (GET,PUT,POST,...) is not allowed. - - 406: Not Acceptable - -The request does not have or has an unknown Accept Header set for this operation. - - 415: Unsupported Media Type - -The request does not have or has an unknown Content-Type Header set for this operation. - -ServiceError - -In addition to setting the correct (error) Http status code, you can choose to write a ServiceError message on the response. - -Performance options - -This package has several options that affect the performance of your service. It is important to understand them and how you can change it. - - restful.DefaultContainer.DoNotRecover(false) - -DoNotRecover controls whether panics will be caught to return HTTP 500. -If set to false, the container will recover from panics. -Default value is true - - restful.SetCompressorProvider(NewBoundedCachedCompressors(20, 20)) - -If content encoding is enabled then the default strategy for getting new gzip/zlib writers and readers is to use a sync.Pool. -Because writers are expensive structures, performance is even more improved when using a preloaded cache. You can also inject your own implementation. - -Trouble shooting - -This package has the means to produce detail logging of the complete Http request matching process and filter invocation. -Enabling this feature requires you to set an implementation of restful.StdLogger (e.g. log.Logger) instance such as: - - restful.TraceLogger(log.New(os.Stdout, "[restful] ", log.LstdFlags|log.Lshortfile)) - -Logging - -The restful.SetLogger() method allows you to override the logger used by the package. By default restful -uses the standard library `log` package and logs to stdout. Different logging packages are supported as -long as they conform to `StdLogger` interface defined in the `log` sub-package, writing an adapter for your -preferred package is simple. - -Resources - -[project]: https://github.com/emicklei/go-restful - -[examples]: https://github.com/emicklei/go-restful/blob/master/examples - -[design]: http://ernestmicklei.com/2012/11/11/go-restful-api-design/ - -[showcases]: https://github.com/emicklei/mora, https://github.com/emicklei/landskape - -(c) 2012-2015, http://ernestmicklei.com. MIT License -*/ -package restful diff --git a/src/vendor/github.com/emicklei/go-restful/v3/entity_accessors.go b/src/vendor/github.com/emicklei/go-restful/v3/entity_accessors.go deleted file mode 100644 index 66dfc824f..000000000 --- a/src/vendor/github.com/emicklei/go-restful/v3/entity_accessors.go +++ /dev/null @@ -1,162 +0,0 @@ -package restful - -// Copyright 2015 Ernest Micklei. All rights reserved. -// Use of this source code is governed by a license -// that can be found in the LICENSE file. - -import ( - "encoding/xml" - "strings" - "sync" -) - -// EntityReaderWriter can read and write values using an encoding such as JSON,XML. -type EntityReaderWriter interface { - // Read a serialized version of the value from the request. - // The Request may have a decompressing reader. Depends on Content-Encoding. - Read(req *Request, v interface{}) error - - // Write a serialized version of the value on the response. - // The Response may have a compressing writer. Depends on Accept-Encoding. - // status should be a valid Http Status code - Write(resp *Response, status int, v interface{}) error -} - -// entityAccessRegistry is a singleton -var entityAccessRegistry = &entityReaderWriters{ - protection: new(sync.RWMutex), - accessors: map[string]EntityReaderWriter{}, -} - -// entityReaderWriters associates MIME to an EntityReaderWriter -type entityReaderWriters struct { - protection *sync.RWMutex - accessors map[string]EntityReaderWriter -} - -func init() { - RegisterEntityAccessor(MIME_JSON, NewEntityAccessorJSON(MIME_JSON)) - RegisterEntityAccessor(MIME_XML, NewEntityAccessorXML(MIME_XML)) -} - -// RegisterEntityAccessor add/overrides the ReaderWriter for encoding content with this MIME type. -func RegisterEntityAccessor(mime string, erw EntityReaderWriter) { - entityAccessRegistry.protection.Lock() - defer entityAccessRegistry.protection.Unlock() - entityAccessRegistry.accessors[mime] = erw -} - -// NewEntityAccessorJSON returns a new EntityReaderWriter for accessing JSON content. -// This package is already initialized with such an accessor using the MIME_JSON contentType. -func NewEntityAccessorJSON(contentType string) EntityReaderWriter { - return entityJSONAccess{ContentType: contentType} -} - -// NewEntityAccessorXML returns a new EntityReaderWriter for accessing XML content. -// This package is already initialized with such an accessor using the MIME_XML contentType. -func NewEntityAccessorXML(contentType string) EntityReaderWriter { - return entityXMLAccess{ContentType: contentType} -} - -// accessorAt returns the registered ReaderWriter for this MIME type. -func (r *entityReaderWriters) accessorAt(mime string) (EntityReaderWriter, bool) { - r.protection.RLock() - defer r.protection.RUnlock() - er, ok := r.accessors[mime] - if !ok { - // retry with reverse lookup - // more expensive but we are in an exceptional situation anyway - for k, v := range r.accessors { - if strings.Contains(mime, k) { - return v, true - } - } - } - return er, ok -} - -// entityXMLAccess is a EntityReaderWriter for XML encoding -type entityXMLAccess struct { - // This is used for setting the Content-Type header when writing - ContentType string -} - -// Read unmarshalls the value from XML -func (e entityXMLAccess) Read(req *Request, v interface{}) error { - return xml.NewDecoder(req.Request.Body).Decode(v) -} - -// Write marshalls the value to JSON and set the Content-Type Header. -func (e entityXMLAccess) Write(resp *Response, status int, v interface{}) error { - return writeXML(resp, status, e.ContentType, v) -} - -// writeXML marshalls the value to JSON and set the Content-Type Header. -func writeXML(resp *Response, status int, contentType string, v interface{}) error { - if v == nil { - resp.WriteHeader(status) - // do not write a nil representation - return nil - } - if resp.prettyPrint { - // pretty output must be created and written explicitly - output, err := xml.MarshalIndent(v, " ", " ") - if err != nil { - return err - } - resp.Header().Set(HEADER_ContentType, contentType) - resp.WriteHeader(status) - _, err = resp.Write([]byte(xml.Header)) - if err != nil { - return err - } - _, err = resp.Write(output) - return err - } - // not-so-pretty - resp.Header().Set(HEADER_ContentType, contentType) - resp.WriteHeader(status) - return xml.NewEncoder(resp).Encode(v) -} - -// entityJSONAccess is a EntityReaderWriter for JSON encoding -type entityJSONAccess struct { - // This is used for setting the Content-Type header when writing - ContentType string -} - -// Read unmarshalls the value from JSON -func (e entityJSONAccess) Read(req *Request, v interface{}) error { - decoder := NewDecoder(req.Request.Body) - decoder.UseNumber() - return decoder.Decode(v) -} - -// Write marshalls the value to JSON and set the Content-Type Header. -func (e entityJSONAccess) Write(resp *Response, status int, v interface{}) error { - return writeJSON(resp, status, e.ContentType, v) -} - -// write marshalls the value to JSON and set the Content-Type Header. -func writeJSON(resp *Response, status int, contentType string, v interface{}) error { - if v == nil { - resp.WriteHeader(status) - // do not write a nil representation - return nil - } - if resp.prettyPrint { - // pretty output must be created and written explicitly - output, err := MarshalIndent(v, "", " ") - if err != nil { - return err - } - resp.Header().Set(HEADER_ContentType, contentType) - resp.WriteHeader(status) - _, err = resp.Write(output) - return err - } - // not-so-pretty - resp.Header().Set(HEADER_ContentType, contentType) - resp.WriteHeader(status) - return NewEncoder(resp).Encode(v) -} diff --git a/src/vendor/github.com/emicklei/go-restful/v3/extensions.go b/src/vendor/github.com/emicklei/go-restful/v3/extensions.go deleted file mode 100644 index 5023fa049..000000000 --- a/src/vendor/github.com/emicklei/go-restful/v3/extensions.go +++ /dev/null @@ -1,21 +0,0 @@ -package restful - -// Copyright 2021 Ernest Micklei. All rights reserved. -// Use of this source code is governed by a license -// that can be found in the LICENSE file. - -// ExtensionProperties provides storage of vendor extensions for entities -type ExtensionProperties struct { - // Extensions vendor extensions used to describe extra functionality - // (https://swagger.io/docs/specification/2-0/swagger-extensions/) - Extensions map[string]interface{} -} - -// AddExtension adds or updates a key=value pair to the extension map. -func (ep *ExtensionProperties) AddExtension(key string, value interface{}) { - if ep.Extensions == nil { - ep.Extensions = map[string]interface{}{key: value} - } else { - ep.Extensions[key] = value - } -} diff --git a/src/vendor/github.com/emicklei/go-restful/v3/filter.go b/src/vendor/github.com/emicklei/go-restful/v3/filter.go deleted file mode 100644 index fd88c536c..000000000 --- a/src/vendor/github.com/emicklei/go-restful/v3/filter.go +++ /dev/null @@ -1,37 +0,0 @@ -package restful - -// Copyright 2013 Ernest Micklei. All rights reserved. -// Use of this source code is governed by a license -// that can be found in the LICENSE file. - -// FilterChain is a request scoped object to process one or more filters before calling the target RouteFunction. -type FilterChain struct { - Filters []FilterFunction // ordered list of FilterFunction - Index int // index into filters that is currently in progress - Target RouteFunction // function to call after passing all filters - ParameterDocs []*Parameter // the parameter docs for the route - Operation string // the name of the operation -} - -// ProcessFilter passes the request,response pair through the next of Filters. -// Each filter can decide to proceed to the next Filter or handle the Response itself. -func (f *FilterChain) ProcessFilter(request *Request, response *Response) { - if f.Index < len(f.Filters) { - f.Index++ - f.Filters[f.Index-1](request, response, f) - } else { - f.Target(request, response) - } -} - -// FilterFunction definitions must call ProcessFilter on the FilterChain to pass on the control and eventually call the RouteFunction -type FilterFunction func(*Request, *Response, *FilterChain) - -// NoBrowserCacheFilter is a filter function to set HTTP headers that disable browser caching -// See examples/restful-no-cache-filter.go for usage -func NoBrowserCacheFilter(req *Request, resp *Response, chain *FilterChain) { - resp.Header().Set("Cache-Control", "no-cache, no-store, must-revalidate") // HTTP 1.1. - resp.Header().Set("Pragma", "no-cache") // HTTP 1.0. - resp.Header().Set("Expires", "0") // Proxies. - chain.ProcessFilter(req, resp) -} diff --git a/src/vendor/github.com/emicklei/go-restful/v3/json.go b/src/vendor/github.com/emicklei/go-restful/v3/json.go deleted file mode 100644 index 871165166..000000000 --- a/src/vendor/github.com/emicklei/go-restful/v3/json.go +++ /dev/null @@ -1,11 +0,0 @@ -// +build !jsoniter - -package restful - -import "encoding/json" - -var ( - MarshalIndent = json.MarshalIndent - NewDecoder = json.NewDecoder - NewEncoder = json.NewEncoder -) diff --git a/src/vendor/github.com/emicklei/go-restful/v3/jsoniter.go b/src/vendor/github.com/emicklei/go-restful/v3/jsoniter.go deleted file mode 100644 index 11b8f8ae7..000000000 --- a/src/vendor/github.com/emicklei/go-restful/v3/jsoniter.go +++ /dev/null @@ -1,12 +0,0 @@ -// +build jsoniter - -package restful - -import "github.com/json-iterator/go" - -var ( - json = jsoniter.ConfigCompatibleWithStandardLibrary - MarshalIndent = json.MarshalIndent - NewDecoder = json.NewDecoder - NewEncoder = json.NewEncoder -) diff --git a/src/vendor/github.com/emicklei/go-restful/v3/jsr311.go b/src/vendor/github.com/emicklei/go-restful/v3/jsr311.go deleted file mode 100644 index 07a0c91e9..000000000 --- a/src/vendor/github.com/emicklei/go-restful/v3/jsr311.go +++ /dev/null @@ -1,326 +0,0 @@ -package restful - -// Copyright 2013 Ernest Micklei. All rights reserved. -// Use of this source code is governed by a license -// that can be found in the LICENSE file. - -import ( - "errors" - "fmt" - "net/http" - "sort" - "strings" -) - -// RouterJSR311 implements the flow for matching Requests to Routes (and consequently Resource Functions) -// as specified by the JSR311 http://jsr311.java.net/nonav/releases/1.1/spec/spec.html. -// RouterJSR311 implements the Router interface. -// Concept of locators is not implemented. -type RouterJSR311 struct{} - -// SelectRoute is part of the Router interface and returns the best match -// for the WebService and its Route for the given Request. -func (r RouterJSR311) SelectRoute( - webServices []*WebService, - httpRequest *http.Request) (selectedService *WebService, selectedRoute *Route, err error) { - - // Identify the root resource class (WebService) - dispatcher, finalMatch, err := r.detectDispatcher(httpRequest.URL.Path, webServices) - if err != nil { - return nil, nil, NewError(http.StatusNotFound, "") - } - // Obtain the set of candidate methods (Routes) - routes := r.selectRoutes(dispatcher, finalMatch) - if len(routes) == 0 { - return dispatcher, nil, NewError(http.StatusNotFound, "404: Page Not Found") - } - - // Identify the method (Route) that will handle the request - route, ok := r.detectRoute(routes, httpRequest) - return dispatcher, route, ok -} - -// ExtractParameters is used to obtain the path parameters from the route using the same matching -// engine as the JSR 311 router. -func (r RouterJSR311) ExtractParameters(route *Route, webService *WebService, urlPath string) map[string]string { - webServiceExpr := webService.pathExpr - webServiceMatches := webServiceExpr.Matcher.FindStringSubmatch(urlPath) - pathParameters := r.extractParams(webServiceExpr, webServiceMatches) - routeExpr := route.pathExpr - routeMatches := routeExpr.Matcher.FindStringSubmatch(webServiceMatches[len(webServiceMatches)-1]) - routeParams := r.extractParams(routeExpr, routeMatches) - for key, value := range routeParams { - pathParameters[key] = value - } - return pathParameters -} - -func (RouterJSR311) extractParams(pathExpr *pathExpression, matches []string) map[string]string { - params := map[string]string{} - for i := 1; i < len(matches); i++ { - if len(pathExpr.VarNames) >= i { - params[pathExpr.VarNames[i-1]] = matches[i] - } - } - return params -} - -// http://jsr311.java.net/nonav/releases/1.1/spec/spec3.html#x3-360003.7.2 -func (r RouterJSR311) detectRoute(routes []Route, httpRequest *http.Request) (*Route, error) { - candidates := make([]*Route, 0, 8) - for i, each := range routes { - ok := true - for _, fn := range each.If { - if !fn(httpRequest) { - ok = false - break - } - } - if ok { - candidates = append(candidates, &routes[i]) - } - } - if len(candidates) == 0 { - if trace { - traceLogger.Printf("no Route found (from %d) that passes conditional checks", len(routes)) - } - return nil, NewError(http.StatusNotFound, "404: Not Found") - } - - // http method - previous := candidates - candidates = candidates[:0] - for _, each := range previous { - if httpRequest.Method == each.Method { - candidates = append(candidates, each) - } - } - if len(candidates) == 0 { - if trace { - traceLogger.Printf("no Route found (in %d routes) that matches HTTP method %s\n", len(previous), httpRequest.Method) - } - allowed := []string{} - allowedLoop: - for _, candidate := range previous { - for _, method := range allowed { - if method == candidate.Method { - continue allowedLoop - } - } - allowed = append(allowed, candidate.Method) - } - header := http.Header{"Allow": []string{strings.Join(allowed, ", ")}} - return nil, NewErrorWithHeader(http.StatusMethodNotAllowed, "405: Method Not Allowed", header) - } - - // content-type - contentType := httpRequest.Header.Get(HEADER_ContentType) - previous = candidates - candidates = candidates[:0] - for _, each := range previous { - if each.matchesContentType(contentType) { - candidates = append(candidates, each) - } - } - if len(candidates) == 0 { - if trace { - traceLogger.Printf("no Route found (from %d) that matches HTTP Content-Type: %s\n", len(previous), contentType) - } - if httpRequest.ContentLength > 0 { - return nil, NewError(http.StatusUnsupportedMediaType, "415: Unsupported Media Type") - } - } - - // accept - previous = candidates - candidates = candidates[:0] - accept := httpRequest.Header.Get(HEADER_Accept) - if len(accept) == 0 { - accept = "*/*" - } - for _, each := range previous { - if each.matchesAccept(accept) { - candidates = append(candidates, each) - } - } - if len(candidates) == 0 { - if trace { - traceLogger.Printf("no Route found (from %d) that matches HTTP Accept: %s\n", len(previous), accept) - } - available := []string{} - for _, candidate := range previous { - available = append(available, candidate.Produces...) - } - // if POST,PUT,PATCH without body - method, length := httpRequest.Method, httpRequest.Header.Get("Content-Length") - if (method == http.MethodPost || - method == http.MethodPut || - method == http.MethodPatch) && length == "" { - return nil, NewError( - http.StatusUnsupportedMediaType, - fmt.Sprintf("415: Unsupported Media Type\n\nAvailable representations: %s", strings.Join(available, ", ")), - ) - } - return nil, NewError( - http.StatusNotAcceptable, - fmt.Sprintf("406: Not Acceptable\n\nAvailable representations: %s", strings.Join(available, ", ")), - ) - } - // return r.bestMatchByMedia(outputMediaOk, contentType, accept), nil - return candidates[0], nil -} - -// http://jsr311.java.net/nonav/releases/1.1/spec/spec3.html#x3-360003.7.2 -// n/m > n/* > */* -func (r RouterJSR311) bestMatchByMedia(routes []Route, contentType string, accept string) *Route { - // TODO - return &routes[0] -} - -// http://jsr311.java.net/nonav/releases/1.1/spec/spec3.html#x3-360003.7.2 (step 2) -func (r RouterJSR311) selectRoutes(dispatcher *WebService, pathRemainder string) []Route { - filtered := &sortableRouteCandidates{} - for _, each := range dispatcher.Routes() { - pathExpr := each.pathExpr - matches := pathExpr.Matcher.FindStringSubmatch(pathRemainder) - if matches != nil { - lastMatch := matches[len(matches)-1] - if len(lastMatch) == 0 || lastMatch == "/" { // do not include if value is neither empty nor ‘/’. - filtered.candidates = append(filtered.candidates, - routeCandidate{each, len(matches) - 1, pathExpr.LiteralCount, pathExpr.VarCount}) - } - } - } - if len(filtered.candidates) == 0 { - if trace { - traceLogger.Printf("WebService on path %s has no routes that match URL path remainder:%s\n", dispatcher.rootPath, pathRemainder) - } - return []Route{} - } - sort.Sort(sort.Reverse(filtered)) - - // select other routes from candidates whoes expression matches rmatch - matchingRoutes := []Route{filtered.candidates[0].route} - for c := 1; c < len(filtered.candidates); c++ { - each := filtered.candidates[c] - if each.route.pathExpr.Matcher.MatchString(pathRemainder) { - matchingRoutes = append(matchingRoutes, each.route) - } - } - return matchingRoutes -} - -// http://jsr311.java.net/nonav/releases/1.1/spec/spec3.html#x3-360003.7.2 (step 1) -func (r RouterJSR311) detectDispatcher(requestPath string, dispatchers []*WebService) (*WebService, string, error) { - filtered := &sortableDispatcherCandidates{} - for _, each := range dispatchers { - matches := each.pathExpr.Matcher.FindStringSubmatch(requestPath) - if matches != nil { - filtered.candidates = append(filtered.candidates, - dispatcherCandidate{each, matches[len(matches)-1], len(matches), each.pathExpr.LiteralCount, each.pathExpr.VarCount}) - } - } - if len(filtered.candidates) == 0 { - if trace { - traceLogger.Printf("no WebService was found to match URL path:%s\n", requestPath) - } - return nil, "", errors.New("not found") - } - sort.Sort(sort.Reverse(filtered)) - return filtered.candidates[0].dispatcher, filtered.candidates[0].finalMatch, nil -} - -// Types and functions to support the sorting of Routes - -type routeCandidate struct { - route Route - matchesCount int // the number of capturing groups - literalCount int // the number of literal characters (means those not resulting from template variable substitution) - nonDefaultCount int // the number of capturing groups with non-default regular expressions (i.e. not ‘([^ /]+?)’) -} - -func (r routeCandidate) expressionToMatch() string { - return r.route.pathExpr.Source -} - -func (r routeCandidate) String() string { - return fmt.Sprintf("(m=%d,l=%d,n=%d)", r.matchesCount, r.literalCount, r.nonDefaultCount) -} - -type sortableRouteCandidates struct { - candidates []routeCandidate -} - -func (rcs *sortableRouteCandidates) Len() int { - return len(rcs.candidates) -} -func (rcs *sortableRouteCandidates) Swap(i, j int) { - rcs.candidates[i], rcs.candidates[j] = rcs.candidates[j], rcs.candidates[i] -} -func (rcs *sortableRouteCandidates) Less(i, j int) bool { - ci := rcs.candidates[i] - cj := rcs.candidates[j] - // primary key - if ci.literalCount < cj.literalCount { - return true - } - if ci.literalCount > cj.literalCount { - return false - } - // secundary key - if ci.matchesCount < cj.matchesCount { - return true - } - if ci.matchesCount > cj.matchesCount { - return false - } - // tertiary key - if ci.nonDefaultCount < cj.nonDefaultCount { - return true - } - if ci.nonDefaultCount > cj.nonDefaultCount { - return false - } - // quaternary key ("source" is interpreted as Path) - return ci.route.Path < cj.route.Path -} - -// Types and functions to support the sorting of Dispatchers - -type dispatcherCandidate struct { - dispatcher *WebService - finalMatch string - matchesCount int // the number of capturing groups - literalCount int // the number of literal characters (means those not resulting from template variable substitution) - nonDefaultCount int // the number of capturing groups with non-default regular expressions (i.e. not ‘([^ /]+?)’) -} -type sortableDispatcherCandidates struct { - candidates []dispatcherCandidate -} - -func (dc *sortableDispatcherCandidates) Len() int { - return len(dc.candidates) -} -func (dc *sortableDispatcherCandidates) Swap(i, j int) { - dc.candidates[i], dc.candidates[j] = dc.candidates[j], dc.candidates[i] -} -func (dc *sortableDispatcherCandidates) Less(i, j int) bool { - ci := dc.candidates[i] - cj := dc.candidates[j] - // primary key - if ci.matchesCount < cj.matchesCount { - return true - } - if ci.matchesCount > cj.matchesCount { - return false - } - // secundary key - if ci.literalCount < cj.literalCount { - return true - } - if ci.literalCount > cj.literalCount { - return false - } - // tertiary key - return ci.nonDefaultCount < cj.nonDefaultCount -} diff --git a/src/vendor/github.com/emicklei/go-restful/v3/log/log.go b/src/vendor/github.com/emicklei/go-restful/v3/log/log.go deleted file mode 100644 index 6cd44c7a5..000000000 --- a/src/vendor/github.com/emicklei/go-restful/v3/log/log.go +++ /dev/null @@ -1,34 +0,0 @@ -package log - -import ( - stdlog "log" - "os" -) - -// StdLogger corresponds to a minimal subset of the interface satisfied by stdlib log.Logger -type StdLogger interface { - Print(v ...interface{}) - Printf(format string, v ...interface{}) -} - -var Logger StdLogger - -func init() { - // default Logger - SetLogger(stdlog.New(os.Stderr, "[restful] ", stdlog.LstdFlags|stdlog.Lshortfile)) -} - -// SetLogger sets the logger for this package -func SetLogger(customLogger StdLogger) { - Logger = customLogger -} - -// Print delegates to the Logger -func Print(v ...interface{}) { - Logger.Print(v...) -} - -// Printf delegates to the Logger -func Printf(format string, v ...interface{}) { - Logger.Printf(format, v...) -} diff --git a/src/vendor/github.com/emicklei/go-restful/v3/logger.go b/src/vendor/github.com/emicklei/go-restful/v3/logger.go deleted file mode 100644 index 29202726f..000000000 --- a/src/vendor/github.com/emicklei/go-restful/v3/logger.go +++ /dev/null @@ -1,32 +0,0 @@ -package restful - -// Copyright 2014 Ernest Micklei. All rights reserved. -// Use of this source code is governed by a license -// that can be found in the LICENSE file. -import ( - "github.com/emicklei/go-restful/v3/log" -) - -var trace bool = false -var traceLogger log.StdLogger - -func init() { - traceLogger = log.Logger // use the package logger by default -} - -// TraceLogger enables detailed logging of Http request matching and filter invocation. Default no logger is set. -// You may call EnableTracing() directly to enable trace logging to the package-wide logger. -func TraceLogger(logger log.StdLogger) { - traceLogger = logger - EnableTracing(logger != nil) -} - -// SetLogger exposes the setter for the global logger on the top-level package -func SetLogger(customLogger log.StdLogger) { - log.SetLogger(customLogger) -} - -// EnableTracing can be used to Trace logging on and off. -func EnableTracing(enabled bool) { - trace = enabled -} diff --git a/src/vendor/github.com/emicklei/go-restful/v3/mime.go b/src/vendor/github.com/emicklei/go-restful/v3/mime.go deleted file mode 100644 index 33014471b..000000000 --- a/src/vendor/github.com/emicklei/go-restful/v3/mime.go +++ /dev/null @@ -1,50 +0,0 @@ -package restful - -import ( - "strconv" - "strings" -) - -type mime struct { - media string - quality float64 -} - -// insertMime adds a mime to a list and keeps it sorted by quality. -func insertMime(l []mime, e mime) []mime { - for i, each := range l { - // if current mime has lower quality then insert before - if e.quality > each.quality { - left := append([]mime{}, l[0:i]...) - return append(append(left, e), l[i:]...) - } - } - return append(l, e) -} - -const qFactorWeightingKey = "q" - -// sortedMimes returns a list of mime sorted (desc) by its specified quality. -// e.g. text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3 -func sortedMimes(accept string) (sorted []mime) { - for _, each := range strings.Split(accept, ",") { - typeAndQuality := strings.Split(strings.Trim(each, " "), ";") - if len(typeAndQuality) == 1 { - sorted = insertMime(sorted, mime{typeAndQuality[0], 1.0}) - } else { - // take factor - qAndWeight := strings.Split(typeAndQuality[1], "=") - if len(qAndWeight) == 2 && strings.Trim(qAndWeight[0], " ") == qFactorWeightingKey { - f, err := strconv.ParseFloat(qAndWeight[1], 64) - if err != nil { - traceLogger.Printf("unable to parse quality in %s, %v", each, err) - } else { - sorted = insertMime(sorted, mime{typeAndQuality[0], f}) - } - } else { - sorted = insertMime(sorted, mime{typeAndQuality[0], 1.0}) - } - } - } - return -} diff --git a/src/vendor/github.com/emicklei/go-restful/v3/options_filter.go b/src/vendor/github.com/emicklei/go-restful/v3/options_filter.go deleted file mode 100644 index 5c1b34251..000000000 --- a/src/vendor/github.com/emicklei/go-restful/v3/options_filter.go +++ /dev/null @@ -1,34 +0,0 @@ -package restful - -import "strings" - -// Copyright 2013 Ernest Micklei. All rights reserved. -// Use of this source code is governed by a license -// that can be found in the LICENSE file. - -// OPTIONSFilter is a filter function that inspects the Http Request for the OPTIONS method -// and provides the response with a set of allowed methods for the request URL Path. -// As for any filter, you can also install it for a particular WebService within a Container. -// Note: this filter is not needed when using CrossOriginResourceSharing (for CORS). -func (c *Container) OPTIONSFilter(req *Request, resp *Response, chain *FilterChain) { - if "OPTIONS" != req.Request.Method { - chain.ProcessFilter(req, resp) - return - } - - archs := req.Request.Header.Get(HEADER_AccessControlRequestHeaders) - methods := strings.Join(c.computeAllowedMethods(req), ",") - origin := req.Request.Header.Get(HEADER_Origin) - - resp.AddHeader(HEADER_Allow, methods) - resp.AddHeader(HEADER_AccessControlAllowOrigin, origin) - resp.AddHeader(HEADER_AccessControlAllowHeaders, archs) - resp.AddHeader(HEADER_AccessControlAllowMethods, methods) -} - -// OPTIONSFilter is a filter function that inspects the Http Request for the OPTIONS method -// and provides the response with a set of allowed methods for the request URL Path. -// Note: this filter is not needed when using CrossOriginResourceSharing (for CORS). -func OPTIONSFilter() FilterFunction { - return DefaultContainer.OPTIONSFilter -} diff --git a/src/vendor/github.com/emicklei/go-restful/v3/parameter.go b/src/vendor/github.com/emicklei/go-restful/v3/parameter.go deleted file mode 100644 index 0e658af5f..000000000 --- a/src/vendor/github.com/emicklei/go-restful/v3/parameter.go +++ /dev/null @@ -1,234 +0,0 @@ -package restful - -import "sort" - -// Copyright 2013 Ernest Micklei. All rights reserved. -// Use of this source code is governed by a license -// that can be found in the LICENSE file. - -const ( - // PathParameterKind = indicator of Request parameter type "path" - PathParameterKind = iota - - // QueryParameterKind = indicator of Request parameter type "query" - QueryParameterKind - - // BodyParameterKind = indicator of Request parameter type "body" - BodyParameterKind - - // HeaderParameterKind = indicator of Request parameter type "header" - HeaderParameterKind - - // FormParameterKind = indicator of Request parameter type "form" - FormParameterKind - - // CollectionFormatCSV comma separated values `foo,bar` - CollectionFormatCSV = CollectionFormat("csv") - - // CollectionFormatSSV space separated values `foo bar` - CollectionFormatSSV = CollectionFormat("ssv") - - // CollectionFormatTSV tab separated values `foo\tbar` - CollectionFormatTSV = CollectionFormat("tsv") - - // CollectionFormatPipes pipe separated values `foo|bar` - CollectionFormatPipes = CollectionFormat("pipes") - - // CollectionFormatMulti corresponds to multiple parameter instances instead of multiple values for a single - // instance `foo=bar&foo=baz`. This is valid only for QueryParameters and FormParameters - CollectionFormatMulti = CollectionFormat("multi") -) - -type CollectionFormat string - -func (cf CollectionFormat) String() string { - return string(cf) -} - -// Parameter is for documententing the parameter used in a Http Request -// ParameterData kinds are Path,Query and Body -type Parameter struct { - data *ParameterData -} - -// ParameterData represents the state of a Parameter. -// It is made public to make it accessible to e.g. the Swagger package. -type ParameterData struct { - ExtensionProperties - Name, Description, DataType, DataFormat string - Kind int - Required bool - // AllowableValues is deprecated. Use PossibleValues instead - AllowableValues map[string]string - PossibleValues []string - AllowMultiple bool - AllowEmptyValue bool - DefaultValue string - CollectionFormat string - Pattern string - Minimum *float64 - Maximum *float64 - MinLength *int64 - MaxLength *int64 - MinItems *int64 - MaxItems *int64 - UniqueItems bool -} - -// Data returns the state of the Parameter -func (p *Parameter) Data() ParameterData { - return *p.data -} - -// Kind returns the parameter type indicator (see const for valid values) -func (p *Parameter) Kind() int { - return p.data.Kind -} - -func (p *Parameter) bePath() *Parameter { - p.data.Kind = PathParameterKind - return p -} -func (p *Parameter) beQuery() *Parameter { - p.data.Kind = QueryParameterKind - return p -} -func (p *Parameter) beBody() *Parameter { - p.data.Kind = BodyParameterKind - return p -} - -func (p *Parameter) beHeader() *Parameter { - p.data.Kind = HeaderParameterKind - return p -} - -func (p *Parameter) beForm() *Parameter { - p.data.Kind = FormParameterKind - return p -} - -// Required sets the required field and returns the receiver -func (p *Parameter) Required(required bool) *Parameter { - p.data.Required = required - return p -} - -// AllowMultiple sets the allowMultiple field and returns the receiver -func (p *Parameter) AllowMultiple(multiple bool) *Parameter { - p.data.AllowMultiple = multiple - return p -} - -// AddExtension adds or updates a key=value pair to the extension map -func (p *Parameter) AddExtension(key string, value interface{}) *Parameter { - p.data.AddExtension(key, value) - return p -} - -// AllowEmptyValue sets the AllowEmptyValue field and returns the receiver -func (p *Parameter) AllowEmptyValue(multiple bool) *Parameter { - p.data.AllowEmptyValue = multiple - return p -} - -// AllowableValues is deprecated. Use PossibleValues instead. Both will be set. -func (p *Parameter) AllowableValues(values map[string]string) *Parameter { - p.data.AllowableValues = values - - allowableSortedKeys := make([]string, 0, len(values)) - for k := range values { - allowableSortedKeys = append(allowableSortedKeys, k) - } - sort.Strings(allowableSortedKeys) - - p.data.PossibleValues = make([]string, 0, len(values)) - for _, k := range allowableSortedKeys { - p.data.PossibleValues = append(p.data.PossibleValues, values[k]) - } - return p -} - -// PossibleValues sets the possible values field and returns the receiver -func (p *Parameter) PossibleValues(values []string) *Parameter { - p.data.PossibleValues = values - return p -} - -// DataType sets the dataType field and returns the receiver -func (p *Parameter) DataType(typeName string) *Parameter { - p.data.DataType = typeName - return p -} - -// DataFormat sets the dataFormat field for Swagger UI -func (p *Parameter) DataFormat(formatName string) *Parameter { - p.data.DataFormat = formatName - return p -} - -// DefaultValue sets the default value field and returns the receiver -func (p *Parameter) DefaultValue(stringRepresentation string) *Parameter { - p.data.DefaultValue = stringRepresentation - return p -} - -// Description sets the description value field and returns the receiver -func (p *Parameter) Description(doc string) *Parameter { - p.data.Description = doc - return p -} - -// CollectionFormat sets the collection format for an array type -func (p *Parameter) CollectionFormat(format CollectionFormat) *Parameter { - p.data.CollectionFormat = format.String() - return p -} - -// Pattern sets the pattern field and returns the receiver -func (p *Parameter) Pattern(pattern string) *Parameter { - p.data.Pattern = pattern - return p -} - -// Minimum sets the minimum field and returns the receiver -func (p *Parameter) Minimum(minimum float64) *Parameter { - p.data.Minimum = &minimum - return p -} - -// Maximum sets the maximum field and returns the receiver -func (p *Parameter) Maximum(maximum float64) *Parameter { - p.data.Maximum = &maximum - return p -} - -// MinLength sets the minLength field and returns the receiver -func (p *Parameter) MinLength(minLength int64) *Parameter { - p.data.MinLength = &minLength - return p -} - -// MaxLength sets the maxLength field and returns the receiver -func (p *Parameter) MaxLength(maxLength int64) *Parameter { - p.data.MaxLength = &maxLength - return p -} - -// MinItems sets the minItems field and returns the receiver -func (p *Parameter) MinItems(minItems int64) *Parameter { - p.data.MinItems = &minItems - return p -} - -// MaxItems sets the maxItems field and returns the receiver -func (p *Parameter) MaxItems(maxItems int64) *Parameter { - p.data.MaxItems = &maxItems - return p -} - -// UniqueItems sets the uniqueItems field and returns the receiver -func (p *Parameter) UniqueItems(uniqueItems bool) *Parameter { - p.data.UniqueItems = uniqueItems - return p -} diff --git a/src/vendor/github.com/emicklei/go-restful/v3/path_expression.go b/src/vendor/github.com/emicklei/go-restful/v3/path_expression.go deleted file mode 100644 index 95a9a2545..000000000 --- a/src/vendor/github.com/emicklei/go-restful/v3/path_expression.go +++ /dev/null @@ -1,74 +0,0 @@ -package restful - -// Copyright 2013 Ernest Micklei. All rights reserved. -// Use of this source code is governed by a license -// that can be found in the LICENSE file. - -import ( - "bytes" - "fmt" - "regexp" - "strings" -) - -// PathExpression holds a compiled path expression (RegExp) needed to match against -// Http request paths and to extract path parameter values. -type pathExpression struct { - LiteralCount int // the number of literal characters (means those not resulting from template variable substitution) - VarNames []string // the names of parameters (enclosed by {}) in the path - VarCount int // the number of named parameters (enclosed by {}) in the path - Matcher *regexp.Regexp - Source string // Path as defined by the RouteBuilder - tokens []string -} - -// NewPathExpression creates a PathExpression from the input URL path. -// Returns an error if the path is invalid. -func newPathExpression(path string) (*pathExpression, error) { - expression, literalCount, varNames, varCount, tokens := templateToRegularExpression(path) - compiled, err := regexp.Compile(expression) - if err != nil { - return nil, err - } - return &pathExpression{literalCount, varNames, varCount, compiled, expression, tokens}, nil -} - -// http://jsr311.java.net/nonav/releases/1.1/spec/spec3.html#x3-370003.7.3 -func templateToRegularExpression(template string) (expression string, literalCount int, varNames []string, varCount int, tokens []string) { - var buffer bytes.Buffer - buffer.WriteString("^") - //tokens = strings.Split(template, "/") - tokens = tokenizePath(template) - for _, each := range tokens { - if each == "" { - continue - } - buffer.WriteString("/") - if strings.HasPrefix(each, "{") { - // check for regular expression in variable - colon := strings.Index(each, ":") - var varName string - if colon != -1 { - // extract expression - varName = strings.TrimSpace(each[1:colon]) - paramExpr := strings.TrimSpace(each[colon+1 : len(each)-1]) - if paramExpr == "*" { // special case - buffer.WriteString("(.*)") - } else { - buffer.WriteString(fmt.Sprintf("(%s)", paramExpr)) // between colon and closing moustache - } - } else { - // plain var - varName = strings.TrimSpace(each[1 : len(each)-1]) - buffer.WriteString("([^/]+?)") - } - varNames = append(varNames, varName) - varCount += 1 - } else { - literalCount += len(each) - encoded := each // TODO URI encode - buffer.WriteString(regexp.QuoteMeta(encoded)) - } - } - return strings.TrimRight(buffer.String(), "/") + "(/.*)?$", literalCount, varNames, varCount, tokens -} diff --git a/src/vendor/github.com/emicklei/go-restful/v3/path_processor.go b/src/vendor/github.com/emicklei/go-restful/v3/path_processor.go deleted file mode 100644 index 141573245..000000000 --- a/src/vendor/github.com/emicklei/go-restful/v3/path_processor.go +++ /dev/null @@ -1,74 +0,0 @@ -package restful - -import ( - "bytes" - "strings" -) - -// Copyright 2018 Ernest Micklei. All rights reserved. -// Use of this source code is governed by a license -// that can be found in the LICENSE file. - -// PathProcessor is extra behaviour that a Router can provide to extract path parameters from the path. -// If a Router does not implement this interface then the default behaviour will be used. -type PathProcessor interface { - // ExtractParameters gets the path parameters defined in the route and webService from the urlPath - ExtractParameters(route *Route, webService *WebService, urlPath string) map[string]string -} - -type defaultPathProcessor struct{} - -// Extract the parameters from the request url path -func (d defaultPathProcessor) ExtractParameters(r *Route, _ *WebService, urlPath string) map[string]string { - urlParts := tokenizePath(urlPath) - pathParameters := map[string]string{} - for i, key := range r.pathParts { - var value string - if i >= len(urlParts) { - value = "" - } else { - value = urlParts[i] - } - if r.hasCustomVerb && hasCustomVerb(key) { - key = removeCustomVerb(key) - value = removeCustomVerb(value) - } - - if strings.Index(key, "{") > -1 { // path-parameter - if colon := strings.Index(key, ":"); colon != -1 { - // extract by regex - regPart := key[colon+1 : len(key)-1] - keyPart := key[1:colon] - if regPart == "*" { - pathParameters[keyPart] = untokenizePath(i, urlParts) - break - } else { - pathParameters[keyPart] = value - } - } else { - // without enclosing {} - startIndex := strings.Index(key, "{") - endKeyIndex := strings.Index(key, "}") - - suffixLength := len(key) - endKeyIndex - 1 - endValueIndex := len(value) - suffixLength - - pathParameters[key[startIndex+1:endKeyIndex]] = value[startIndex:endValueIndex] - } - } - } - return pathParameters -} - -// Untokenize back into an URL path using the slash separator -func untokenizePath(offset int, parts []string) string { - var buffer bytes.Buffer - for p := offset; p < len(parts); p++ { - buffer.WriteString(parts[p]) - // do not end - if p < len(parts)-1 { - buffer.WriteString("/") - } - } - return buffer.String() -} diff --git a/src/vendor/github.com/emicklei/go-restful/v3/request.go b/src/vendor/github.com/emicklei/go-restful/v3/request.go deleted file mode 100644 index 5725a0759..000000000 --- a/src/vendor/github.com/emicklei/go-restful/v3/request.go +++ /dev/null @@ -1,132 +0,0 @@ -package restful - -// Copyright 2013 Ernest Micklei. All rights reserved. -// Use of this source code is governed by a license -// that can be found in the LICENSE file. - -import ( - "compress/zlib" - "net/http" -) - -var defaultRequestContentType string - -// Request is a wrapper for a http Request that provides convenience methods -type Request struct { - Request *http.Request - pathParameters map[string]string - attributes map[string]interface{} // for storing request-scoped values - selectedRoute *Route // is nil when no route was matched -} - -func NewRequest(httpRequest *http.Request) *Request { - return &Request{ - Request: httpRequest, - pathParameters: map[string]string{}, - attributes: map[string]interface{}{}, - } // empty parameters, attributes -} - -// If ContentType is missing or */* is given then fall back to this type, otherwise -// a "Unable to unmarshal content of type:" response is returned. -// Valid values are restful.MIME_JSON and restful.MIME_XML -// Example: -// restful.DefaultRequestContentType(restful.MIME_JSON) -func DefaultRequestContentType(mime string) { - defaultRequestContentType = mime -} - -// PathParameter accesses the Path parameter value by its name -func (r *Request) PathParameter(name string) string { - return r.pathParameters[name] -} - -// PathParameters accesses the Path parameter values -func (r *Request) PathParameters() map[string]string { - return r.pathParameters -} - -// QueryParameter returns the (first) Query parameter value by its name -func (r *Request) QueryParameter(name string) string { - return r.Request.FormValue(name) -} - -// QueryParameters returns the all the query parameters values by name -func (r *Request) QueryParameters(name string) []string { - return r.Request.URL.Query()[name] -} - -// BodyParameter parses the body of the request (once for typically a POST or a PUT) and returns the value of the given name or an error. -func (r *Request) BodyParameter(name string) (string, error) { - err := r.Request.ParseForm() - if err != nil { - return "", err - } - return r.Request.PostFormValue(name), nil -} - -// HeaderParameter returns the HTTP Header value of a Header name or empty if missing -func (r *Request) HeaderParameter(name string) string { - return r.Request.Header.Get(name) -} - -// ReadEntity checks the Accept header and reads the content into the entityPointer. -func (r *Request) ReadEntity(entityPointer interface{}) (err error) { - contentType := r.Request.Header.Get(HEADER_ContentType) - contentEncoding := r.Request.Header.Get(HEADER_ContentEncoding) - - // check if the request body needs decompression - if ENCODING_GZIP == contentEncoding { - gzipReader := currentCompressorProvider.AcquireGzipReader() - defer currentCompressorProvider.ReleaseGzipReader(gzipReader) - gzipReader.Reset(r.Request.Body) - r.Request.Body = gzipReader - } else if ENCODING_DEFLATE == contentEncoding { - zlibReader, err := zlib.NewReader(r.Request.Body) - if err != nil { - return err - } - r.Request.Body = zlibReader - } - - // lookup the EntityReader, use defaultRequestContentType if needed and provided - entityReader, ok := entityAccessRegistry.accessorAt(contentType) - if !ok { - if len(defaultRequestContentType) != 0 { - entityReader, ok = entityAccessRegistry.accessorAt(defaultRequestContentType) - } - if !ok { - return NewError(http.StatusBadRequest, "Unable to unmarshal content of type:"+contentType) - } - } - return entityReader.Read(r, entityPointer) -} - -// SetAttribute adds or replaces the attribute with the given value. -func (r *Request) SetAttribute(name string, value interface{}) { - r.attributes[name] = value -} - -// Attribute returns the value associated to the given name. Returns nil if absent. -func (r Request) Attribute(name string) interface{} { - return r.attributes[name] -} - -// SelectedRoutePath root path + route path that matched the request, e.g. /meetings/{id}/attendees -// If no route was matched then return an empty string. -func (r Request) SelectedRoutePath() string { - if r.selectedRoute == nil { - return "" - } - // skip creating an accessor - return r.selectedRoute.Path -} - -// SelectedRoute returns a reader to access the selected Route by the container -// Returns nil if no route was matched. -func (r Request) SelectedRoute() RouteReader { - if r.selectedRoute == nil { - return nil - } - return routeAccessor{route: r.selectedRoute} -} diff --git a/src/vendor/github.com/emicklei/go-restful/v3/response.go b/src/vendor/github.com/emicklei/go-restful/v3/response.go deleted file mode 100644 index 8f0b56aa2..000000000 --- a/src/vendor/github.com/emicklei/go-restful/v3/response.go +++ /dev/null @@ -1,256 +0,0 @@ -package restful - -// Copyright 2013 Ernest Micklei. All rights reserved. -// Use of this source code is governed by a license -// that can be found in the LICENSE file. - -import ( - "bufio" - "errors" - "net" - "net/http" -) - -// DefaultResponseMimeType is DEPRECATED, use DefaultResponseContentType(mime) -var DefaultResponseMimeType string - -//PrettyPrintResponses controls the indentation feature of XML and JSON serialization -var PrettyPrintResponses = true - -// Response is a wrapper on the actual http ResponseWriter -// It provides several convenience methods to prepare and write response content. -type Response struct { - http.ResponseWriter - requestAccept string // mime-type what the Http Request says it wants to receive - routeProduces []string // mime-types what the Route says it can produce - statusCode int // HTTP status code that has been written explicitly (if zero then net/http has written 200) - contentLength int // number of bytes written for the response body - prettyPrint bool // controls the indentation feature of XML and JSON serialization. It is initialized using var PrettyPrintResponses. - err error // err property is kept when WriteError is called - hijacker http.Hijacker // if underlying ResponseWriter supports it -} - -// NewResponse creates a new response based on a http ResponseWriter. -func NewResponse(httpWriter http.ResponseWriter) *Response { - hijacker, _ := httpWriter.(http.Hijacker) - return &Response{ResponseWriter: httpWriter, routeProduces: []string{}, statusCode: http.StatusOK, prettyPrint: PrettyPrintResponses, hijacker: hijacker} -} - -// DefaultResponseContentType set a default. -// If Accept header matching fails, fall back to this type. -// Valid values are restful.MIME_JSON and restful.MIME_XML -// Example: -// restful.DefaultResponseContentType(restful.MIME_JSON) -func DefaultResponseContentType(mime string) { - DefaultResponseMimeType = mime -} - -// InternalServerError writes the StatusInternalServerError header. -// DEPRECATED, use WriteErrorString(http.StatusInternalServerError,reason) -func (r Response) InternalServerError() Response { - r.WriteHeader(http.StatusInternalServerError) - return r -} - -// Hijack implements the http.Hijacker interface. This expands -// the Response to fulfill http.Hijacker if the underlying -// http.ResponseWriter supports it. -func (r *Response) Hijack() (net.Conn, *bufio.ReadWriter, error) { - if r.hijacker == nil { - return nil, nil, errors.New("http.Hijacker not implemented by underlying http.ResponseWriter") - } - return r.hijacker.Hijack() -} - -// PrettyPrint changes whether this response must produce pretty (line-by-line, indented) JSON or XML output. -func (r *Response) PrettyPrint(bePretty bool) { - r.prettyPrint = bePretty -} - -// AddHeader is a shortcut for .Header().Add(header,value) -func (r Response) AddHeader(header string, value string) Response { - r.Header().Add(header, value) - return r -} - -// SetRequestAccepts tells the response what Mime-type(s) the HTTP request said it wants to accept. Exposed for testing. -func (r *Response) SetRequestAccepts(mime string) { - r.requestAccept = mime -} - -// EntityWriter returns the registered EntityWriter that the entity (requested resource) -// can write according to what the request wants (Accept) and what the Route can produce or what the restful defaults say. -// If called before WriteEntity and WriteHeader then a false return value can be used to write a 406: Not Acceptable. -func (r *Response) EntityWriter() (EntityReaderWriter, bool) { - sorted := sortedMimes(r.requestAccept) - for _, eachAccept := range sorted { - for _, eachProduce := range r.routeProduces { - if eachProduce == eachAccept.media { - if w, ok := entityAccessRegistry.accessorAt(eachAccept.media); ok { - return w, true - } - } - } - if eachAccept.media == "*/*" { - for _, each := range r.routeProduces { - if w, ok := entityAccessRegistry.accessorAt(each); ok { - return w, true - } - } - } - } - // if requestAccept is empty - writer, ok := entityAccessRegistry.accessorAt(r.requestAccept) - if !ok { - // if not registered then fallback to the defaults (if set) - if DefaultResponseMimeType == MIME_JSON { - return entityAccessRegistry.accessorAt(MIME_JSON) - } - if DefaultResponseMimeType == MIME_XML { - return entityAccessRegistry.accessorAt(MIME_XML) - } - // Fallback to whatever the route says it can produce. - // https://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html - for _, each := range r.routeProduces { - if w, ok := entityAccessRegistry.accessorAt(each); ok { - return w, true - } - } - if trace { - traceLogger.Printf("no registered EntityReaderWriter found for %s", r.requestAccept) - } - } - return writer, ok -} - -// WriteEntity calls WriteHeaderAndEntity with Http Status OK (200) -func (r *Response) WriteEntity(value interface{}) error { - return r.WriteHeaderAndEntity(http.StatusOK, value) -} - -// WriteHeaderAndEntity marshals the value using the representation denoted by the Accept Header and the registered EntityWriters. -// If no Accept header is specified (or */*) then respond with the Content-Type as specified by the first in the Route.Produces. -// If an Accept header is specified then respond with the Content-Type as specified by the first in the Route.Produces that is matched with the Accept header. -// If the value is nil then no response is send except for the Http status. You may want to call WriteHeader(http.StatusNotFound) instead. -// If there is no writer available that can represent the value in the requested MIME type then Http Status NotAcceptable is written. -// Current implementation ignores any q-parameters in the Accept Header. -// Returns an error if the value could not be written on the response. -func (r *Response) WriteHeaderAndEntity(status int, value interface{}) error { - writer, ok := r.EntityWriter() - if !ok { - r.WriteHeader(http.StatusNotAcceptable) - return nil - } - return writer.Write(r, status, value) -} - -// WriteAsXml is a convenience method for writing a value in xml (requires Xml tags on the value) -// It uses the standard encoding/xml package for marshalling the value ; not using a registered EntityReaderWriter. -func (r *Response) WriteAsXml(value interface{}) error { - return writeXML(r, http.StatusOK, MIME_XML, value) -} - -// WriteHeaderAndXml is a convenience method for writing a status and value in xml (requires Xml tags on the value) -// It uses the standard encoding/xml package for marshalling the value ; not using a registered EntityReaderWriter. -func (r *Response) WriteHeaderAndXml(status int, value interface{}) error { - return writeXML(r, status, MIME_XML, value) -} - -// WriteAsJson is a convenience method for writing a value in json. -// It uses the standard encoding/json package for marshalling the value ; not using a registered EntityReaderWriter. -func (r *Response) WriteAsJson(value interface{}) error { - return writeJSON(r, http.StatusOK, MIME_JSON, value) -} - -// WriteJson is a convenience method for writing a value in Json with a given Content-Type. -// It uses the standard encoding/json package for marshalling the value ; not using a registered EntityReaderWriter. -func (r *Response) WriteJson(value interface{}, contentType string) error { - return writeJSON(r, http.StatusOK, contentType, value) -} - -// WriteHeaderAndJson is a convenience method for writing the status and a value in Json with a given Content-Type. -// It uses the standard encoding/json package for marshalling the value ; not using a registered EntityReaderWriter. -func (r *Response) WriteHeaderAndJson(status int, value interface{}, contentType string) error { - return writeJSON(r, status, contentType, value) -} - -// WriteError writes the http status and the error string on the response. err can be nil. -// Return an error if writing was not successful. -func (r *Response) WriteError(httpStatus int, err error) (writeErr error) { - r.err = err - if err == nil { - writeErr = r.WriteErrorString(httpStatus, "") - } else { - writeErr = r.WriteErrorString(httpStatus, err.Error()) - } - return writeErr -} - -// WriteServiceError is a convenience method for a responding with a status and a ServiceError -func (r *Response) WriteServiceError(httpStatus int, err ServiceError) error { - r.err = err - return r.WriteHeaderAndEntity(httpStatus, err) -} - -// WriteErrorString is a convenience method for an error status with the actual error -func (r *Response) WriteErrorString(httpStatus int, errorReason string) error { - if r.err == nil { - // if not called from WriteError - r.err = errors.New(errorReason) - } - r.WriteHeader(httpStatus) - if _, err := r.Write([]byte(errorReason)); err != nil { - return err - } - return nil -} - -// Flush implements http.Flusher interface, which sends any buffered data to the client. -func (r *Response) Flush() { - if f, ok := r.ResponseWriter.(http.Flusher); ok { - f.Flush() - } else if trace { - traceLogger.Printf("ResponseWriter %v doesn't support Flush", r) - } -} - -// WriteHeader is overridden to remember the Status Code that has been written. -// Changes to the Header of the response have no effect after this. -func (r *Response) WriteHeader(httpStatus int) { - r.statusCode = httpStatus - r.ResponseWriter.WriteHeader(httpStatus) -} - -// StatusCode returns the code that has been written using WriteHeader. -func (r Response) StatusCode() int { - if 0 == r.statusCode { - // no status code has been written yet; assume OK - return http.StatusOK - } - return r.statusCode -} - -// Write writes the data to the connection as part of an HTTP reply. -// Write is part of http.ResponseWriter interface. -func (r *Response) Write(bytes []byte) (int, error) { - written, err := r.ResponseWriter.Write(bytes) - r.contentLength += written - return written, err -} - -// ContentLength returns the number of bytes written for the response content. -// Note that this value is only correct if all data is written through the Response using its Write* methods. -// Data written directly using the underlying http.ResponseWriter is not accounted for. -func (r Response) ContentLength() int { - return r.contentLength -} - -// CloseNotify is part of http.CloseNotifier interface -func (r Response) CloseNotify() <-chan bool { - return r.ResponseWriter.(http.CloseNotifier).CloseNotify() -} - -// Error returns the err created by WriteError -func (r Response) Error() error { - return r.err -} diff --git a/src/vendor/github.com/emicklei/go-restful/v3/route.go b/src/vendor/github.com/emicklei/go-restful/v3/route.go deleted file mode 100644 index 193f4a6b0..000000000 --- a/src/vendor/github.com/emicklei/go-restful/v3/route.go +++ /dev/null @@ -1,178 +0,0 @@ -package restful - -// Copyright 2013 Ernest Micklei. All rights reserved. -// Use of this source code is governed by a license -// that can be found in the LICENSE file. - -import ( - "net/http" - "strings" -) - -// RouteFunction declares the signature of a function that can be bound to a Route. -type RouteFunction func(*Request, *Response) - -// RouteSelectionConditionFunction declares the signature of a function that -// can be used to add extra conditional logic when selecting whether the route -// matches the HTTP request. -type RouteSelectionConditionFunction func(httpRequest *http.Request) bool - -// Route binds a HTTP Method,Path,Consumes combination to a RouteFunction. -type Route struct { - ExtensionProperties - Method string - Produces []string - Consumes []string - Path string // webservice root path + described path - Function RouteFunction - Filters []FilterFunction - If []RouteSelectionConditionFunction - - // cached values for dispatching - relativePath string - pathParts []string - pathExpr *pathExpression // cached compilation of relativePath as RegExp - - // documentation - Doc string - Notes string - Operation string - ParameterDocs []*Parameter - ResponseErrors map[int]ResponseError - DefaultResponse *ResponseError - ReadSample, WriteSample interface{} // structs that model an example request or response payload - - // Extra information used to store custom information about the route. - Metadata map[string]interface{} - - // marks a route as deprecated - Deprecated bool - - //Overrides the container.contentEncodingEnabled - contentEncodingEnabled *bool - - // indicate route path has custom verb - hasCustomVerb bool - - // if a request does not include a content-type header then - // depending on the method, it may return a 415 Unsupported Media - // Must have uppercase HTTP Method names such as GET,HEAD,OPTIONS,... - allowedMethodsWithoutContentType []string -} - -// Initialize for Route -func (r *Route) postBuild() { - r.pathParts = tokenizePath(r.Path) - r.hasCustomVerb = hasCustomVerb(r.Path) -} - -// Create Request and Response from their http versions -func (r *Route) wrapRequestResponse(httpWriter http.ResponseWriter, httpRequest *http.Request, pathParams map[string]string) (*Request, *Response) { - wrappedRequest := NewRequest(httpRequest) - wrappedRequest.pathParameters = pathParams - wrappedRequest.selectedRoute = r - wrappedResponse := NewResponse(httpWriter) - wrappedResponse.requestAccept = httpRequest.Header.Get(HEADER_Accept) - wrappedResponse.routeProduces = r.Produces - return wrappedRequest, wrappedResponse -} - -func stringTrimSpaceCutset(r rune) bool { - return r == ' ' -} - -// Return whether the mimeType matches to what this Route can produce. -func (r Route) matchesAccept(mimeTypesWithQuality string) bool { - remaining := mimeTypesWithQuality - for { - var mimeType string - if end := strings.Index(remaining, ","); end == -1 { - mimeType, remaining = remaining, "" - } else { - mimeType, remaining = remaining[:end], remaining[end+1:] - } - if quality := strings.Index(mimeType, ";"); quality != -1 { - mimeType = mimeType[:quality] - } - mimeType = strings.TrimFunc(mimeType, stringTrimSpaceCutset) - if mimeType == "*/*" { - return true - } - for _, producibleType := range r.Produces { - if producibleType == "*/*" || producibleType == mimeType { - return true - } - } - if len(remaining) == 0 { - return false - } - } -} - -// Return whether this Route can consume content with a type specified by mimeTypes (can be empty). -func (r Route) matchesContentType(mimeTypes string) bool { - - if len(r.Consumes) == 0 { - // did not specify what it can consume ; any media type (“*/*â€) is assumed - return true - } - - if len(mimeTypes) == 0 { - // idempotent methods with (most-likely or guaranteed) empty content match missing Content-Type - m := r.Method - // if route specifies less or non-idempotent methods then use that - if len(r.allowedMethodsWithoutContentType) > 0 { - for _, each := range r.allowedMethodsWithoutContentType { - if m == each { - return true - } - } - } else { - if m == "GET" || m == "HEAD" || m == "OPTIONS" || m == "DELETE" || m == "TRACE" { - return true - } - } - // proceed with default - mimeTypes = MIME_OCTET - } - - remaining := mimeTypes - for { - var mimeType string - if end := strings.Index(remaining, ","); end == -1 { - mimeType, remaining = remaining, "" - } else { - mimeType, remaining = remaining[:end], remaining[end+1:] - } - if quality := strings.Index(mimeType, ";"); quality != -1 { - mimeType = mimeType[:quality] - } - mimeType = strings.TrimFunc(mimeType, stringTrimSpaceCutset) - for _, consumeableType := range r.Consumes { - if consumeableType == "*/*" || consumeableType == mimeType { - return true - } - } - if len(remaining) == 0 { - return false - } - } -} - -// Tokenize an URL path using the slash separator ; the result does not have empty tokens -func tokenizePath(path string) []string { - if "/" == path { - return nil - } - return strings.Split(strings.Trim(path, "/"), "/") -} - -// for debugging -func (r *Route) String() string { - return r.Method + " " + r.Path -} - -// EnableContentEncoding (default=false) allows for GZIP or DEFLATE encoding of responses. Overrides the container.contentEncodingEnabled value. -func (r *Route) EnableContentEncoding(enabled bool) { - r.contentEncodingEnabled = &enabled -} diff --git a/src/vendor/github.com/emicklei/go-restful/v3/route_builder.go b/src/vendor/github.com/emicklei/go-restful/v3/route_builder.go deleted file mode 100644 index 23641b6dd..000000000 --- a/src/vendor/github.com/emicklei/go-restful/v3/route_builder.go +++ /dev/null @@ -1,376 +0,0 @@ -package restful - -// Copyright 2013 Ernest Micklei. All rights reserved. -// Use of this source code is governed by a license -// that can be found in the LICENSE file. - -import ( - "fmt" - "os" - "reflect" - "runtime" - "strings" - "sync/atomic" - - "github.com/emicklei/go-restful/v3/log" -) - -// RouteBuilder is a helper to construct Routes. -type RouteBuilder struct { - rootPath string - currentPath string - produces []string - consumes []string - httpMethod string // required - function RouteFunction // required - filters []FilterFunction - conditions []RouteSelectionConditionFunction - allowedMethodsWithoutContentType []string // see Route - - typeNameHandleFunc TypeNameHandleFunction // required - - // documentation - doc string - notes string - operation string - readSample, writeSample interface{} - parameters []*Parameter - errorMap map[int]ResponseError - defaultResponse *ResponseError - metadata map[string]interface{} - extensions map[string]interface{} - deprecated bool - contentEncodingEnabled *bool -} - -// Do evaluates each argument with the RouteBuilder itself. -// This allows you to follow DRY principles without breaking the fluent programming style. -// Example: -// ws.Route(ws.DELETE("/{name}").To(t.deletePerson).Do(Returns200, Returns500)) -// -// func Returns500(b *RouteBuilder) { -// b.Returns(500, "Internal Server Error", restful.ServiceError{}) -// } -func (b *RouteBuilder) Do(oneArgBlocks ...func(*RouteBuilder)) *RouteBuilder { - for _, each := range oneArgBlocks { - each(b) - } - return b -} - -// To bind the route to a function. -// If this route is matched with the incoming Http Request then call this function with the *Request,*Response pair. Required. -func (b *RouteBuilder) To(function RouteFunction) *RouteBuilder { - b.function = function - return b -} - -// Method specifies what HTTP method to match. Required. -func (b *RouteBuilder) Method(method string) *RouteBuilder { - b.httpMethod = method - return b -} - -// Produces specifies what MIME types can be produced ; the matched one will appear in the Content-Type Http header. -func (b *RouteBuilder) Produces(mimeTypes ...string) *RouteBuilder { - b.produces = mimeTypes - return b -} - -// Consumes specifies what MIME types can be consumes ; the Accept Http header must matched any of these -func (b *RouteBuilder) Consumes(mimeTypes ...string) *RouteBuilder { - b.consumes = mimeTypes - return b -} - -// Path specifies the relative (w.r.t WebService root path) URL path to match. Default is "/". -func (b *RouteBuilder) Path(subPath string) *RouteBuilder { - b.currentPath = subPath - return b -} - -// Doc tells what this route is all about. Optional. -func (b *RouteBuilder) Doc(documentation string) *RouteBuilder { - b.doc = documentation - return b -} - -// Notes is a verbose explanation of the operation behavior. Optional. -func (b *RouteBuilder) Notes(notes string) *RouteBuilder { - b.notes = notes - return b -} - -// Reads tells what resource type will be read from the request payload. Optional. -// A parameter of type "body" is added ,required is set to true and the dataType is set to the qualified name of the sample's type. -func (b *RouteBuilder) Reads(sample interface{}, optionalDescription ...string) *RouteBuilder { - fn := b.typeNameHandleFunc - if fn == nil { - fn = reflectTypeName - } - typeAsName := fn(sample) - description := "" - if len(optionalDescription) > 0 { - description = optionalDescription[0] - } - b.readSample = sample - bodyParameter := &Parameter{&ParameterData{Name: "body", Description: description}} - bodyParameter.beBody() - bodyParameter.Required(true) - bodyParameter.DataType(typeAsName) - b.Param(bodyParameter) - return b -} - -// ParameterNamed returns a Parameter already known to the RouteBuilder. Returns nil if not. -// Use this to modify or extend information for the Parameter (through its Data()). -func (b RouteBuilder) ParameterNamed(name string) (p *Parameter) { - for _, each := range b.parameters { - if each.Data().Name == name { - return each - } - } - return p -} - -// Writes tells what resource type will be written as the response payload. Optional. -func (b *RouteBuilder) Writes(sample interface{}) *RouteBuilder { - b.writeSample = sample - return b -} - -// Param allows you to document the parameters of the Route. It adds a new Parameter (does not check for duplicates). -func (b *RouteBuilder) Param(parameter *Parameter) *RouteBuilder { - if b.parameters == nil { - b.parameters = []*Parameter{} - } - b.parameters = append(b.parameters, parameter) - return b -} - -// Operation allows you to document what the actual method/function call is of the Route. -// Unless called, the operation name is derived from the RouteFunction set using To(..). -func (b *RouteBuilder) Operation(name string) *RouteBuilder { - b.operation = name - return b -} - -// ReturnsError is deprecated, use Returns instead. -func (b *RouteBuilder) ReturnsError(code int, message string, model interface{}) *RouteBuilder { - log.Print("ReturnsError is deprecated, use Returns instead.") - return b.Returns(code, message, model) -} - -// Returns allows you to document what responses (errors or regular) can be expected. -// The model parameter is optional ; either pass a struct instance or use nil if not applicable. -func (b *RouteBuilder) Returns(code int, message string, model interface{}) *RouteBuilder { - err := ResponseError{ - Code: code, - Message: message, - Model: model, - IsDefault: false, // this field is deprecated, use default response instead. - } - // lazy init because there is no NewRouteBuilder (yet) - if b.errorMap == nil { - b.errorMap = map[int]ResponseError{} - } - b.errorMap[code] = err - return b -} - -// ReturnsWithHeaders is similar to Returns, but can specify response headers -func (b *RouteBuilder) ReturnsWithHeaders(code int, message string, model interface{}, headers map[string]Header) *RouteBuilder { - b.Returns(code, message, model) - err := b.errorMap[code] - err.Headers = headers - b.errorMap[code] = err - return b -} - -// DefaultReturns is a special Returns call that sets the default of the response. -func (b *RouteBuilder) DefaultReturns(message string, model interface{}) *RouteBuilder { - b.defaultResponse = &ResponseError{ - Message: message, - Model: model, - } - return b -} - -// Metadata adds or updates a key=value pair to the metadata map. -func (b *RouteBuilder) Metadata(key string, value interface{}) *RouteBuilder { - if b.metadata == nil { - b.metadata = map[string]interface{}{} - } - b.metadata[key] = value - return b -} - -// AddExtension adds or updates a key=value pair to the extensions map. -func (b *RouteBuilder) AddExtension(key string, value interface{}) *RouteBuilder { - if b.extensions == nil { - b.extensions = map[string]interface{}{} - } - b.extensions[key] = value - return b -} - -// Deprecate sets the value of deprecated to true. Deprecated routes have a special UI treatment to warn against use -func (b *RouteBuilder) Deprecate() *RouteBuilder { - b.deprecated = true - return b -} - -// AllowedMethodsWithoutContentType overrides the default list GET,HEAD,OPTIONS,DELETE,TRACE -// If a request does not include a content-type header then -// depending on the method, it may return a 415 Unsupported Media. -// Must have uppercase HTTP Method names such as GET,HEAD,OPTIONS,... -func (b *RouteBuilder) AllowedMethodsWithoutContentType(methods []string) *RouteBuilder { - b.allowedMethodsWithoutContentType = methods - return b -} - -// ResponseError represents a response; not necessarily an error. -type ResponseError struct { - ExtensionProperties - Code int - Message string - Model interface{} - Headers map[string]Header - IsDefault bool -} - -// Header describes a header for a response of the API -// -// For more information: http://goo.gl/8us55a#headerObject -type Header struct { - *Items - Description string -} - -// Items describe swagger simple schemas for headers -type Items struct { - Type string - Format string - Items *Items - CollectionFormat string - Default interface{} -} - -func (b *RouteBuilder) servicePath(path string) *RouteBuilder { - b.rootPath = path - return b -} - -// Filter appends a FilterFunction to the end of filters for this Route to build. -func (b *RouteBuilder) Filter(filter FilterFunction) *RouteBuilder { - b.filters = append(b.filters, filter) - return b -} - -// If sets a condition function that controls matching the Route based on custom logic. -// The condition function is provided the HTTP request and should return true if the route -// should be considered. -// -// Efficiency note: the condition function is called before checking the method, produces, and -// consumes criteria, so that the correct HTTP status code can be returned. -// -// Lifecycle note: no filter functions have been called prior to calling the condition function, -// so the condition function should not depend on any context that might be set up by container -// or route filters. -func (b *RouteBuilder) If(condition RouteSelectionConditionFunction) *RouteBuilder { - b.conditions = append(b.conditions, condition) - return b -} - -// ContentEncodingEnabled allows you to override the Containers value for auto-compressing this route response. -func (b *RouteBuilder) ContentEncodingEnabled(enabled bool) *RouteBuilder { - b.contentEncodingEnabled = &enabled - return b -} - -// If no specific Route path then set to rootPath -// If no specific Produces then set to rootProduces -// If no specific Consumes then set to rootConsumes -func (b *RouteBuilder) copyDefaults(rootProduces, rootConsumes []string) { - if len(b.produces) == 0 { - b.produces = rootProduces - } - if len(b.consumes) == 0 { - b.consumes = rootConsumes - } -} - -// typeNameHandler sets the function that will convert types to strings in the parameter -// and model definitions. -func (b *RouteBuilder) typeNameHandler(handler TypeNameHandleFunction) *RouteBuilder { - b.typeNameHandleFunc = handler - return b -} - -// Build creates a new Route using the specification details collected by the RouteBuilder -func (b *RouteBuilder) Build() Route { - pathExpr, err := newPathExpression(b.currentPath) - if err != nil { - log.Printf("Invalid path:%s because:%v", b.currentPath, err) - os.Exit(1) - } - if b.function == nil { - log.Printf("No function specified for route:" + b.currentPath) - os.Exit(1) - } - operationName := b.operation - if len(operationName) == 0 && b.function != nil { - // extract from definition - operationName = nameOfFunction(b.function) - } - route := Route{ - Method: b.httpMethod, - Path: concatPath(b.rootPath, b.currentPath), - Produces: b.produces, - Consumes: b.consumes, - Function: b.function, - Filters: b.filters, - If: b.conditions, - relativePath: b.currentPath, - pathExpr: pathExpr, - Doc: b.doc, - Notes: b.notes, - Operation: operationName, - ParameterDocs: b.parameters, - ResponseErrors: b.errorMap, - DefaultResponse: b.defaultResponse, - ReadSample: b.readSample, - WriteSample: b.writeSample, - Metadata: b.metadata, - Deprecated: b.deprecated, - contentEncodingEnabled: b.contentEncodingEnabled, - allowedMethodsWithoutContentType: b.allowedMethodsWithoutContentType, - } - route.Extensions = b.extensions - route.postBuild() - return route -} - -func concatPath(path1, path2 string) string { - return strings.TrimRight(path1, "/") + "/" + strings.TrimLeft(path2, "/") -} - -var anonymousFuncCount int32 - -// nameOfFunction returns the short name of the function f for documentation. -// It uses a runtime feature for debugging ; its value may change for later Go versions. -func nameOfFunction(f interface{}) string { - fun := runtime.FuncForPC(reflect.ValueOf(f).Pointer()) - tokenized := strings.Split(fun.Name(), ".") - last := tokenized[len(tokenized)-1] - last = strings.TrimSuffix(last, ")·fm") // < Go 1.5 - last = strings.TrimSuffix(last, ")-fm") // Go 1.5 - last = strings.TrimSuffix(last, "·fm") // < Go 1.5 - last = strings.TrimSuffix(last, "-fm") // Go 1.5 - if last == "func1" { // this could mean conflicts in API docs - val := atomic.AddInt32(&anonymousFuncCount, 1) - last = "func" + fmt.Sprintf("%d", val) - atomic.StoreInt32(&anonymousFuncCount, val) - } - return last -} diff --git a/src/vendor/github.com/emicklei/go-restful/v3/route_reader.go b/src/vendor/github.com/emicklei/go-restful/v3/route_reader.go deleted file mode 100644 index c9f4ee75f..000000000 --- a/src/vendor/github.com/emicklei/go-restful/v3/route_reader.go +++ /dev/null @@ -1,66 +0,0 @@ -package restful - -// Copyright 2021 Ernest Micklei. All rights reserved. -// Use of this source code is governed by a license -// that can be found in the LICENSE file. - -type RouteReader interface { - Method() string - Consumes() []string - Path() string - Doc() string - Notes() string - Operation() string - ParameterDocs() []*Parameter - // Returns a copy - Metadata() map[string]interface{} - Deprecated() bool -} - -type routeAccessor struct { - route *Route -} - -func (r routeAccessor) Method() string { - return r.route.Method -} -func (r routeAccessor) Consumes() []string { - return r.route.Consumes[:] -} -func (r routeAccessor) Path() string { - return r.route.Path -} -func (r routeAccessor) Doc() string { - return r.route.Doc -} -func (r routeAccessor) Notes() string { - return r.route.Notes -} -func (r routeAccessor) Operation() string { - return r.route.Operation -} -func (r routeAccessor) ParameterDocs() []*Parameter { - return r.route.ParameterDocs[:] -} - -// Returns a copy -func (r routeAccessor) Metadata() map[string]interface{} { - return copyMap(r.route.Metadata) -} -func (r routeAccessor) Deprecated() bool { - return r.route.Deprecated -} - -// https://stackoverflow.com/questions/23057785/how-to-copy-a-map -func copyMap(m map[string]interface{}) map[string]interface{} { - cp := make(map[string]interface{}) - for k, v := range m { - vm, ok := v.(map[string]interface{}) - if ok { - cp[k] = copyMap(vm) - } else { - cp[k] = v - } - } - return cp -} diff --git a/src/vendor/github.com/emicklei/go-restful/v3/router.go b/src/vendor/github.com/emicklei/go-restful/v3/router.go deleted file mode 100644 index 19078af1c..000000000 --- a/src/vendor/github.com/emicklei/go-restful/v3/router.go +++ /dev/null @@ -1,20 +0,0 @@ -package restful - -// Copyright 2013 Ernest Micklei. All rights reserved. -// Use of this source code is governed by a license -// that can be found in the LICENSE file. - -import "net/http" - -// A RouteSelector finds the best matching Route given the input HTTP Request -// RouteSelectors can optionally also implement the PathProcessor interface to also calculate the -// path parameters after the route has been selected. -type RouteSelector interface { - - // SelectRoute finds a Route given the input HTTP Request and a list of WebServices. - // It returns a selected Route and its containing WebService or an error indicating - // a problem. - SelectRoute( - webServices []*WebService, - httpRequest *http.Request) (selectedService *WebService, selected *Route, err error) -} diff --git a/src/vendor/github.com/emicklei/go-restful/v3/service_error.go b/src/vendor/github.com/emicklei/go-restful/v3/service_error.go deleted file mode 100644 index a41575469..000000000 --- a/src/vendor/github.com/emicklei/go-restful/v3/service_error.go +++ /dev/null @@ -1,32 +0,0 @@ -package restful - -// Copyright 2013 Ernest Micklei. All rights reserved. -// Use of this source code is governed by a license -// that can be found in the LICENSE file. - -import ( - "fmt" - "net/http" -) - -// ServiceError is a transport object to pass information about a non-Http error occurred in a WebService while processing a request. -type ServiceError struct { - Code int - Message string - Header http.Header -} - -// NewError returns a ServiceError using the code and reason -func NewError(code int, message string) ServiceError { - return ServiceError{Code: code, Message: message} -} - -// NewErrorWithHeader returns a ServiceError using the code, reason and header -func NewErrorWithHeader(code int, message string, header http.Header) ServiceError { - return ServiceError{Code: code, Message: message, Header: header} -} - -// Error returns a text representation of the service error -func (s ServiceError) Error() string { - return fmt.Sprintf("[ServiceError:%v] %v", s.Code, s.Message) -} diff --git a/src/vendor/github.com/emicklei/go-restful/v3/web_service.go b/src/vendor/github.com/emicklei/go-restful/v3/web_service.go deleted file mode 100644 index 0bf5d1e5f..000000000 --- a/src/vendor/github.com/emicklei/go-restful/v3/web_service.go +++ /dev/null @@ -1,293 +0,0 @@ -package restful - -import ( - "errors" - "os" - "reflect" - "sync" - - "github.com/emicklei/go-restful/v3/log" -) - -// Copyright 2013 Ernest Micklei. All rights reserved. -// Use of this source code is governed by a license -// that can be found in the LICENSE file. - -// WebService holds a collection of Route values that bind a Http Method + URL Path to a function. -type WebService struct { - rootPath string - pathExpr *pathExpression // cached compilation of rootPath as RegExp - routes []Route - produces []string - consumes []string - pathParameters []*Parameter - filters []FilterFunction - documentation string - apiVersion string - - typeNameHandleFunc TypeNameHandleFunction - - dynamicRoutes bool - - // protects 'routes' if dynamic routes are enabled - routesLock sync.RWMutex -} - -func (w *WebService) SetDynamicRoutes(enable bool) { - w.dynamicRoutes = enable -} - -// TypeNameHandleFunction declares functions that can handle translating the name of a sample object -// into the restful documentation for the service. -type TypeNameHandleFunction func(sample interface{}) string - -// TypeNameHandler sets the function that will convert types to strings in the parameter -// and model definitions. If not set, the web service will invoke -// reflect.TypeOf(object).String(). -func (w *WebService) TypeNameHandler(handler TypeNameHandleFunction) *WebService { - w.typeNameHandleFunc = handler - return w -} - -// reflectTypeName is the default TypeNameHandleFunction and for a given object -// returns the name that Go identifies it with (e.g. "string" or "v1.Object") via -// the reflection API. -func reflectTypeName(sample interface{}) string { - return reflect.TypeOf(sample).String() -} - -// compilePathExpression ensures that the path is compiled into a RegEx for those routers that need it. -func (w *WebService) compilePathExpression() { - compiled, err := newPathExpression(w.rootPath) - if err != nil { - log.Printf("invalid path:%s because:%v", w.rootPath, err) - os.Exit(1) - } - w.pathExpr = compiled -} - -// ApiVersion sets the API version for documentation purposes. -func (w *WebService) ApiVersion(apiVersion string) *WebService { - w.apiVersion = apiVersion - return w -} - -// Version returns the API version for documentation purposes. -func (w *WebService) Version() string { return w.apiVersion } - -// Path specifies the root URL template path of the WebService. -// All Routes will be relative to this path. -func (w *WebService) Path(root string) *WebService { - w.rootPath = root - if len(w.rootPath) == 0 { - w.rootPath = "/" - } - w.compilePathExpression() - return w -} - -// Param adds a PathParameter to document parameters used in the root path. -func (w *WebService) Param(parameter *Parameter) *WebService { - if w.pathParameters == nil { - w.pathParameters = []*Parameter{} - } - w.pathParameters = append(w.pathParameters, parameter) - return w -} - -// PathParameter creates a new Parameter of kind Path for documentation purposes. -// It is initialized as required with string as its DataType. -func (w *WebService) PathParameter(name, description string) *Parameter { - return PathParameter(name, description) -} - -// PathParameter creates a new Parameter of kind Path for documentation purposes. -// It is initialized as required with string as its DataType. -func PathParameter(name, description string) *Parameter { - p := &Parameter{&ParameterData{Name: name, Description: description, Required: true, DataType: "string"}} - p.bePath() - return p -} - -// QueryParameter creates a new Parameter of kind Query for documentation purposes. -// It is initialized as not required with string as its DataType. -func (w *WebService) QueryParameter(name, description string) *Parameter { - return QueryParameter(name, description) -} - -// QueryParameter creates a new Parameter of kind Query for documentation purposes. -// It is initialized as not required with string as its DataType. -func QueryParameter(name, description string) *Parameter { - p := &Parameter{&ParameterData{Name: name, Description: description, Required: false, DataType: "string", CollectionFormat: CollectionFormatCSV.String()}} - p.beQuery() - return p -} - -// BodyParameter creates a new Parameter of kind Body for documentation purposes. -// It is initialized as required without a DataType. -func (w *WebService) BodyParameter(name, description string) *Parameter { - return BodyParameter(name, description) -} - -// BodyParameter creates a new Parameter of kind Body for documentation purposes. -// It is initialized as required without a DataType. -func BodyParameter(name, description string) *Parameter { - p := &Parameter{&ParameterData{Name: name, Description: description, Required: true}} - p.beBody() - return p -} - -// HeaderParameter creates a new Parameter of kind (Http) Header for documentation purposes. -// It is initialized as not required with string as its DataType. -func (w *WebService) HeaderParameter(name, description string) *Parameter { - return HeaderParameter(name, description) -} - -// HeaderParameter creates a new Parameter of kind (Http) Header for documentation purposes. -// It is initialized as not required with string as its DataType. -func HeaderParameter(name, description string) *Parameter { - p := &Parameter{&ParameterData{Name: name, Description: description, Required: false, DataType: "string"}} - p.beHeader() - return p -} - -// FormParameter creates a new Parameter of kind Form (using application/x-www-form-urlencoded) for documentation purposes. -// It is initialized as required with string as its DataType. -func (w *WebService) FormParameter(name, description string) *Parameter { - return FormParameter(name, description) -} - -// FormParameter creates a new Parameter of kind Form (using application/x-www-form-urlencoded) for documentation purposes. -// It is initialized as required with string as its DataType. -func FormParameter(name, description string) *Parameter { - p := &Parameter{&ParameterData{Name: name, Description: description, Required: false, DataType: "string"}} - p.beForm() - return p -} - -// Route creates a new Route using the RouteBuilder and add to the ordered list of Routes. -func (w *WebService) Route(builder *RouteBuilder) *WebService { - w.routesLock.Lock() - defer w.routesLock.Unlock() - builder.copyDefaults(w.produces, w.consumes) - w.routes = append(w.routes, builder.Build()) - return w -} - -// RemoveRoute removes the specified route, looks for something that matches 'path' and 'method' -func (w *WebService) RemoveRoute(path, method string) error { - if !w.dynamicRoutes { - return errors.New("dynamic routes are not enabled.") - } - w.routesLock.Lock() - defer w.routesLock.Unlock() - newRoutes := []Route{} - for _, route := range w.routes { - if route.Method == method && route.Path == path { - continue - } - newRoutes = append(newRoutes, route) - } - w.routes = newRoutes - return nil -} - -// Method creates a new RouteBuilder and initialize its http method -func (w *WebService) Method(httpMethod string) *RouteBuilder { - return new(RouteBuilder).typeNameHandler(w.typeNameHandleFunc).servicePath(w.rootPath).Method(httpMethod) -} - -// Produces specifies that this WebService can produce one or more MIME types. -// Http requests must have one of these values set for the Accept header. -func (w *WebService) Produces(contentTypes ...string) *WebService { - w.produces = contentTypes - return w -} - -// Consumes specifies that this WebService can consume one or more MIME types. -// Http requests must have one of these values set for the Content-Type header. -func (w *WebService) Consumes(accepts ...string) *WebService { - w.consumes = accepts - return w -} - -// Routes returns the Routes associated with this WebService -func (w *WebService) Routes() []Route { - if !w.dynamicRoutes { - return w.routes - } - // Make a copy of the array to prevent concurrency problems - w.routesLock.RLock() - defer w.routesLock.RUnlock() - result := make([]Route, len(w.routes)) - for ix := range w.routes { - result[ix] = w.routes[ix] - } - return result -} - -// RootPath returns the RootPath associated with this WebService. Default "/" -func (w *WebService) RootPath() string { - return w.rootPath -} - -// PathParameters return the path parameter names for (shared among its Routes) -func (w *WebService) PathParameters() []*Parameter { - return w.pathParameters -} - -// Filter adds a filter function to the chain of filters applicable to all its Routes -func (w *WebService) Filter(filter FilterFunction) *WebService { - w.filters = append(w.filters, filter) - return w -} - -// Doc is used to set the documentation of this service. -func (w *WebService) Doc(plainText string) *WebService { - w.documentation = plainText - return w -} - -// Documentation returns it. -func (w *WebService) Documentation() string { - return w.documentation -} - -/* - Convenience methods -*/ - -// HEAD is a shortcut for .Method("HEAD").Path(subPath) -func (w *WebService) HEAD(subPath string) *RouteBuilder { - return new(RouteBuilder).typeNameHandler(w.typeNameHandleFunc).servicePath(w.rootPath).Method("HEAD").Path(subPath) -} - -// GET is a shortcut for .Method("GET").Path(subPath) -func (w *WebService) GET(subPath string) *RouteBuilder { - return new(RouteBuilder).typeNameHandler(w.typeNameHandleFunc).servicePath(w.rootPath).Method("GET").Path(subPath) -} - -// POST is a shortcut for .Method("POST").Path(subPath) -func (w *WebService) POST(subPath string) *RouteBuilder { - return new(RouteBuilder).typeNameHandler(w.typeNameHandleFunc).servicePath(w.rootPath).Method("POST").Path(subPath) -} - -// PUT is a shortcut for .Method("PUT").Path(subPath) -func (w *WebService) PUT(subPath string) *RouteBuilder { - return new(RouteBuilder).typeNameHandler(w.typeNameHandleFunc).servicePath(w.rootPath).Method("PUT").Path(subPath) -} - -// PATCH is a shortcut for .Method("PATCH").Path(subPath) -func (w *WebService) PATCH(subPath string) *RouteBuilder { - return new(RouteBuilder).typeNameHandler(w.typeNameHandleFunc).servicePath(w.rootPath).Method("PATCH").Path(subPath) -} - -// DELETE is a shortcut for .Method("DELETE").Path(subPath) -func (w *WebService) DELETE(subPath string) *RouteBuilder { - return new(RouteBuilder).typeNameHandler(w.typeNameHandleFunc).servicePath(w.rootPath).Method("DELETE").Path(subPath) -} - -// OPTIONS is a shortcut for .Method("OPTIONS").Path(subPath) -func (w *WebService) OPTIONS(subPath string) *RouteBuilder { - return new(RouteBuilder).typeNameHandler(w.typeNameHandleFunc).servicePath(w.rootPath).Method("OPTIONS").Path(subPath) -} diff --git a/src/vendor/github.com/emicklei/go-restful/v3/web_service_container.go b/src/vendor/github.com/emicklei/go-restful/v3/web_service_container.go deleted file mode 100644 index c9d31b06c..000000000 --- a/src/vendor/github.com/emicklei/go-restful/v3/web_service_container.go +++ /dev/null @@ -1,39 +0,0 @@ -package restful - -// Copyright 2013 Ernest Micklei. All rights reserved. -// Use of this source code is governed by a license -// that can be found in the LICENSE file. - -import ( - "net/http" -) - -// DefaultContainer is a restful.Container that uses http.DefaultServeMux -var DefaultContainer *Container - -func init() { - DefaultContainer = NewContainer() - DefaultContainer.ServeMux = http.DefaultServeMux -} - -// If set the true then panics will not be caught to return HTTP 500. -// In that case, Route functions are responsible for handling any error situation. -// Default value is false = recover from panics. This has performance implications. -// OBSOLETE ; use restful.DefaultContainer.DoNotRecover(true) -var DoNotRecover = false - -// Add registers a new WebService add it to the DefaultContainer. -func Add(service *WebService) { - DefaultContainer.Add(service) -} - -// Filter appends a container FilterFunction from the DefaultContainer. -// These are called before dispatching a http.Request to a WebService. -func Filter(filter FilterFunction) { - DefaultContainer.Filter(filter) -} - -// RegisteredWebServices returns the collections of WebServices from the DefaultContainer -func RegisteredWebServices() []*WebService { - return DefaultContainer.RegisteredWebServices() -} diff --git a/src/vendor/github.com/evanphx/json-patch/.gitignore b/src/vendor/github.com/evanphx/json-patch/.gitignore deleted file mode 100644 index b7ed7f956..000000000 --- a/src/vendor/github.com/evanphx/json-patch/.gitignore +++ /dev/null @@ -1,6 +0,0 @@ -# editor and IDE paraphernalia -.idea -.vscode - -# macOS paraphernalia -.DS_Store diff --git a/src/vendor/github.com/evanphx/json-patch/LICENSE b/src/vendor/github.com/evanphx/json-patch/LICENSE deleted file mode 100644 index df76d7d77..000000000 --- a/src/vendor/github.com/evanphx/json-patch/LICENSE +++ /dev/null @@ -1,25 +0,0 @@ -Copyright (c) 2014, Evan Phoenix -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: - -* Redistributions of source code must retain the above copyright notice, this - list of conditions and the following disclaimer. -* Redistributions in binary form must reproduce the above copyright notice, - this list of conditions and the following disclaimer in the documentation - and/or other materials provided with the distribution. -* Neither the name of the Evan Phoenix nor the names of its contributors - may be used to endorse or promote products derived from this software - without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE -FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL -DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, -OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/src/vendor/github.com/evanphx/json-patch/README.md b/src/vendor/github.com/evanphx/json-patch/README.md deleted file mode 100644 index 28e351693..000000000 --- a/src/vendor/github.com/evanphx/json-patch/README.md +++ /dev/null @@ -1,317 +0,0 @@ -# JSON-Patch -`jsonpatch` is a library which provides functionality for both applying -[RFC6902 JSON patches](http://tools.ietf.org/html/rfc6902) against documents, as -well as for calculating & applying [RFC7396 JSON merge patches](https://tools.ietf.org/html/rfc7396). - -[![GoDoc](https://godoc.org/github.com/evanphx/json-patch?status.svg)](http://godoc.org/github.com/evanphx/json-patch) -[![Build Status](https://travis-ci.org/evanphx/json-patch.svg?branch=master)](https://travis-ci.org/evanphx/json-patch) -[![Report Card](https://goreportcard.com/badge/github.com/evanphx/json-patch)](https://goreportcard.com/report/github.com/evanphx/json-patch) - -# Get It! - -**Latest and greatest**: -```bash -go get -u github.com/evanphx/json-patch/v5 -``` - -**Stable Versions**: -* Version 5: `go get -u gopkg.in/evanphx/json-patch.v5` -* Version 4: `go get -u gopkg.in/evanphx/json-patch.v4` - -(previous versions below `v3` are unavailable) - -# Use It! -* [Create and apply a merge patch](#create-and-apply-a-merge-patch) -* [Create and apply a JSON Patch](#create-and-apply-a-json-patch) -* [Comparing JSON documents](#comparing-json-documents) -* [Combine merge patches](#combine-merge-patches) - - -# Configuration - -* There is a global configuration variable `jsonpatch.SupportNegativeIndices`. - This defaults to `true` and enables the non-standard practice of allowing - negative indices to mean indices starting at the end of an array. This - functionality can be disabled by setting `jsonpatch.SupportNegativeIndices = - false`. - -* There is a global configuration variable `jsonpatch.AccumulatedCopySizeLimit`, - which limits the total size increase in bytes caused by "copy" operations in a - patch. It defaults to 0, which means there is no limit. - -These global variables control the behavior of `jsonpatch.Apply`. - -An alternative to `jsonpatch.Apply` is `jsonpatch.ApplyWithOptions` whose behavior -is controlled by an `options` parameter of type `*jsonpatch.ApplyOptions`. - -Structure `jsonpatch.ApplyOptions` includes the configuration options above -and adds two new options: `AllowMissingPathOnRemove` and `EnsurePathExistsOnAdd`. - -When `AllowMissingPathOnRemove` is set to `true`, `jsonpatch.ApplyWithOptions` will ignore -`remove` operations whose `path` points to a non-existent location in the JSON document. -`AllowMissingPathOnRemove` defaults to `false` which will lead to `jsonpatch.ApplyWithOptions` -returning an error when hitting a missing `path` on `remove`. - -When `EnsurePathExistsOnAdd` is set to `true`, `jsonpatch.ApplyWithOptions` will make sure -that `add` operations produce all the `path` elements that are missing from the target object. - -Use `jsonpatch.NewApplyOptions` to create an instance of `jsonpatch.ApplyOptions` -whose values are populated from the global configuration variables. - -## Create and apply a merge patch -Given both an original JSON document and a modified JSON document, you can create -a [Merge Patch](https://tools.ietf.org/html/rfc7396) document. - -It can describe the changes needed to convert from the original to the -modified JSON document. - -Once you have a merge patch, you can apply it to other JSON documents using the -`jsonpatch.MergePatch(document, patch)` function. - -```go -package main - -import ( - "fmt" - - jsonpatch "github.com/evanphx/json-patch" -) - -func main() { - // Let's create a merge patch from these two documents... - original := []byte(`{"name": "John", "age": 24, "height": 3.21}`) - target := []byte(`{"name": "Jane", "age": 24}`) - - patch, err := jsonpatch.CreateMergePatch(original, target) - if err != nil { - panic(err) - } - - // Now lets apply the patch against a different JSON document... - - alternative := []byte(`{"name": "Tina", "age": 28, "height": 3.75}`) - modifiedAlternative, err := jsonpatch.MergePatch(alternative, patch) - - fmt.Printf("patch document: %s\n", patch) - fmt.Printf("updated alternative doc: %s\n", modifiedAlternative) -} -``` - -When ran, you get the following output: - -```bash -$ go run main.go -patch document: {"height":null,"name":"Jane"} -updated alternative doc: {"age":28,"name":"Jane"} -``` - -## Create and apply a JSON Patch -You can create patch objects using `DecodePatch([]byte)`, which can then -be applied against JSON documents. - -The following is an example of creating a patch from two operations, and -applying it against a JSON document. - -```go -package main - -import ( - "fmt" - - jsonpatch "github.com/evanphx/json-patch" -) - -func main() { - original := []byte(`{"name": "John", "age": 24, "height": 3.21}`) - patchJSON := []byte(`[ - {"op": "replace", "path": "/name", "value": "Jane"}, - {"op": "remove", "path": "/height"} - ]`) - - patch, err := jsonpatch.DecodePatch(patchJSON) - if err != nil { - panic(err) - } - - modified, err := patch.Apply(original) - if err != nil { - panic(err) - } - - fmt.Printf("Original document: %s\n", original) - fmt.Printf("Modified document: %s\n", modified) -} -``` - -When ran, you get the following output: - -```bash -$ go run main.go -Original document: {"name": "John", "age": 24, "height": 3.21} -Modified document: {"age":24,"name":"Jane"} -``` - -## Comparing JSON documents -Due to potential whitespace and ordering differences, one cannot simply compare -JSON strings or byte-arrays directly. - -As such, you can instead use `jsonpatch.Equal(document1, document2)` to -determine if two JSON documents are _structurally_ equal. This ignores -whitespace differences, and key-value ordering. - -```go -package main - -import ( - "fmt" - - jsonpatch "github.com/evanphx/json-patch" -) - -func main() { - original := []byte(`{"name": "John", "age": 24, "height": 3.21}`) - similar := []byte(` - { - "age": 24, - "height": 3.21, - "name": "John" - } - `) - different := []byte(`{"name": "Jane", "age": 20, "height": 3.37}`) - - if jsonpatch.Equal(original, similar) { - fmt.Println(`"original" is structurally equal to "similar"`) - } - - if !jsonpatch.Equal(original, different) { - fmt.Println(`"original" is _not_ structurally equal to "different"`) - } -} -``` - -When ran, you get the following output: -```bash -$ go run main.go -"original" is structurally equal to "similar" -"original" is _not_ structurally equal to "different" -``` - -## Combine merge patches -Given two JSON merge patch documents, it is possible to combine them into a -single merge patch which can describe both set of changes. - -The resulting merge patch can be used such that applying it results in a -document structurally similar as merging each merge patch to the document -in succession. - -```go -package main - -import ( - "fmt" - - jsonpatch "github.com/evanphx/json-patch" -) - -func main() { - original := []byte(`{"name": "John", "age": 24, "height": 3.21}`) - - nameAndHeight := []byte(`{"height":null,"name":"Jane"}`) - ageAndEyes := []byte(`{"age":4.23,"eyes":"blue"}`) - - // Let's combine these merge patch documents... - combinedPatch, err := jsonpatch.MergeMergePatches(nameAndHeight, ageAndEyes) - if err != nil { - panic(err) - } - - // Apply each patch individual against the original document - withoutCombinedPatch, err := jsonpatch.MergePatch(original, nameAndHeight) - if err != nil { - panic(err) - } - - withoutCombinedPatch, err = jsonpatch.MergePatch(withoutCombinedPatch, ageAndEyes) - if err != nil { - panic(err) - } - - // Apply the combined patch against the original document - - withCombinedPatch, err := jsonpatch.MergePatch(original, combinedPatch) - if err != nil { - panic(err) - } - - // Do both result in the same thing? They should! - if jsonpatch.Equal(withCombinedPatch, withoutCombinedPatch) { - fmt.Println("Both JSON documents are structurally the same!") - } - - fmt.Printf("combined merge patch: %s", combinedPatch) -} -``` - -When ran, you get the following output: -```bash -$ go run main.go -Both JSON documents are structurally the same! -combined merge patch: {"age":4.23,"eyes":"blue","height":null,"name":"Jane"} -``` - -# CLI for comparing JSON documents -You can install the commandline program `json-patch`. - -This program can take multiple JSON patch documents as arguments, -and fed a JSON document from `stdin`. It will apply the patch(es) against -the document and output the modified doc. - -**patch.1.json** -```json -[ - {"op": "replace", "path": "/name", "value": "Jane"}, - {"op": "remove", "path": "/height"} -] -``` - -**patch.2.json** -```json -[ - {"op": "add", "path": "/address", "value": "123 Main St"}, - {"op": "replace", "path": "/age", "value": "21"} -] -``` - -**document.json** -```json -{ - "name": "John", - "age": 24, - "height": 3.21 -} -``` - -You can then run: - -```bash -$ go install github.com/evanphx/json-patch/cmd/json-patch -$ cat document.json | json-patch -p patch.1.json -p patch.2.json -{"address":"123 Main St","age":"21","name":"Jane"} -``` - -# Help It! -Contributions are welcomed! Leave [an issue](https://github.com/evanphx/json-patch/issues) -or [create a PR](https://github.com/evanphx/json-patch/compare). - - -Before creating a pull request, we'd ask that you make sure tests are passing -and that you have added new tests when applicable. - -Contributors can run tests using: - -```bash -go test -cover ./... -``` - -Builds for pull requests are tested automatically -using [TravisCI](https://travis-ci.org/evanphx/json-patch). diff --git a/src/vendor/github.com/evanphx/json-patch/errors.go b/src/vendor/github.com/evanphx/json-patch/errors.go deleted file mode 100644 index 75304b443..000000000 --- a/src/vendor/github.com/evanphx/json-patch/errors.go +++ /dev/null @@ -1,38 +0,0 @@ -package jsonpatch - -import "fmt" - -// AccumulatedCopySizeError is an error type returned when the accumulated size -// increase caused by copy operations in a patch operation has exceeded the -// limit. -type AccumulatedCopySizeError struct { - limit int64 - accumulated int64 -} - -// NewAccumulatedCopySizeError returns an AccumulatedCopySizeError. -func NewAccumulatedCopySizeError(l, a int64) *AccumulatedCopySizeError { - return &AccumulatedCopySizeError{limit: l, accumulated: a} -} - -// Error implements the error interface. -func (a *AccumulatedCopySizeError) Error() string { - return fmt.Sprintf("Unable to complete the copy, the accumulated size increase of copy is %d, exceeding the limit %d", a.accumulated, a.limit) -} - -// ArraySizeError is an error type returned when the array size has exceeded -// the limit. -type ArraySizeError struct { - limit int - size int -} - -// NewArraySizeError returns an ArraySizeError. -func NewArraySizeError(l, s int) *ArraySizeError { - return &ArraySizeError{limit: l, size: s} -} - -// Error implements the error interface. -func (a *ArraySizeError) Error() string { - return fmt.Sprintf("Unable to create array of size %d, limit is %d", a.size, a.limit) -} diff --git a/src/vendor/github.com/evanphx/json-patch/merge.go b/src/vendor/github.com/evanphx/json-patch/merge.go deleted file mode 100644 index ad88d4018..000000000 --- a/src/vendor/github.com/evanphx/json-patch/merge.go +++ /dev/null @@ -1,389 +0,0 @@ -package jsonpatch - -import ( - "bytes" - "encoding/json" - "fmt" - "reflect" -) - -func merge(cur, patch *lazyNode, mergeMerge bool) *lazyNode { - curDoc, err := cur.intoDoc() - - if err != nil { - pruneNulls(patch) - return patch - } - - patchDoc, err := patch.intoDoc() - - if err != nil { - return patch - } - - mergeDocs(curDoc, patchDoc, mergeMerge) - - return cur -} - -func mergeDocs(doc, patch *partialDoc, mergeMerge bool) { - for k, v := range *patch { - if v == nil { - if mergeMerge { - (*doc)[k] = nil - } else { - delete(*doc, k) - } - } else { - cur, ok := (*doc)[k] - - if !ok || cur == nil { - if !mergeMerge { - pruneNulls(v) - } - - (*doc)[k] = v - } else { - (*doc)[k] = merge(cur, v, mergeMerge) - } - } - } -} - -func pruneNulls(n *lazyNode) { - sub, err := n.intoDoc() - - if err == nil { - pruneDocNulls(sub) - } else { - ary, err := n.intoAry() - - if err == nil { - pruneAryNulls(ary) - } - } -} - -func pruneDocNulls(doc *partialDoc) *partialDoc { - for k, v := range *doc { - if v == nil { - delete(*doc, k) - } else { - pruneNulls(v) - } - } - - return doc -} - -func pruneAryNulls(ary *partialArray) *partialArray { - newAry := []*lazyNode{} - - for _, v := range *ary { - if v != nil { - pruneNulls(v) - } - newAry = append(newAry, v) - } - - *ary = newAry - - return ary -} - -var ErrBadJSONDoc = fmt.Errorf("Invalid JSON Document") -var ErrBadJSONPatch = fmt.Errorf("Invalid JSON Patch") -var errBadMergeTypes = fmt.Errorf("Mismatched JSON Documents") - -// MergeMergePatches merges two merge patches together, such that -// applying this resulting merged merge patch to a document yields the same -// as merging each merge patch to the document in succession. -func MergeMergePatches(patch1Data, patch2Data []byte) ([]byte, error) { - return doMergePatch(patch1Data, patch2Data, true) -} - -// MergePatch merges the patchData into the docData. -func MergePatch(docData, patchData []byte) ([]byte, error) { - return doMergePatch(docData, patchData, false) -} - -func doMergePatch(docData, patchData []byte, mergeMerge bool) ([]byte, error) { - doc := &partialDoc{} - - docErr := json.Unmarshal(docData, doc) - - patch := &partialDoc{} - - patchErr := json.Unmarshal(patchData, patch) - - if _, ok := docErr.(*json.SyntaxError); ok { - return nil, ErrBadJSONDoc - } - - if _, ok := patchErr.(*json.SyntaxError); ok { - return nil, ErrBadJSONPatch - } - - if docErr == nil && *doc == nil { - return nil, ErrBadJSONDoc - } - - if patchErr == nil && *patch == nil { - return nil, ErrBadJSONPatch - } - - if docErr != nil || patchErr != nil { - // Not an error, just not a doc, so we turn straight into the patch - if patchErr == nil { - if mergeMerge { - doc = patch - } else { - doc = pruneDocNulls(patch) - } - } else { - patchAry := &partialArray{} - patchErr = json.Unmarshal(patchData, patchAry) - - if patchErr != nil { - return nil, ErrBadJSONPatch - } - - pruneAryNulls(patchAry) - - out, patchErr := json.Marshal(patchAry) - - if patchErr != nil { - return nil, ErrBadJSONPatch - } - - return out, nil - } - } else { - mergeDocs(doc, patch, mergeMerge) - } - - return json.Marshal(doc) -} - -// resemblesJSONArray indicates whether the byte-slice "appears" to be -// a JSON array or not. -// False-positives are possible, as this function does not check the internal -// structure of the array. It only checks that the outer syntax is present and -// correct. -func resemblesJSONArray(input []byte) bool { - input = bytes.TrimSpace(input) - - hasPrefix := bytes.HasPrefix(input, []byte("[")) - hasSuffix := bytes.HasSuffix(input, []byte("]")) - - return hasPrefix && hasSuffix -} - -// CreateMergePatch will return a merge patch document capable of converting -// the original document(s) to the modified document(s). -// The parameters can be bytes of either two JSON Documents, or two arrays of -// JSON documents. -// The merge patch returned follows the specification defined at http://tools.ietf.org/html/draft-ietf-appsawg-json-merge-patch-07 -func CreateMergePatch(originalJSON, modifiedJSON []byte) ([]byte, error) { - originalResemblesArray := resemblesJSONArray(originalJSON) - modifiedResemblesArray := resemblesJSONArray(modifiedJSON) - - // Do both byte-slices seem like JSON arrays? - if originalResemblesArray && modifiedResemblesArray { - return createArrayMergePatch(originalJSON, modifiedJSON) - } - - // Are both byte-slices are not arrays? Then they are likely JSON objects... - if !originalResemblesArray && !modifiedResemblesArray { - return createObjectMergePatch(originalJSON, modifiedJSON) - } - - // None of the above? Then return an error because of mismatched types. - return nil, errBadMergeTypes -} - -// createObjectMergePatch will return a merge-patch document capable of -// converting the original document to the modified document. -func createObjectMergePatch(originalJSON, modifiedJSON []byte) ([]byte, error) { - originalDoc := map[string]interface{}{} - modifiedDoc := map[string]interface{}{} - - err := json.Unmarshal(originalJSON, &originalDoc) - if err != nil { - return nil, ErrBadJSONDoc - } - - err = json.Unmarshal(modifiedJSON, &modifiedDoc) - if err != nil { - return nil, ErrBadJSONDoc - } - - dest, err := getDiff(originalDoc, modifiedDoc) - if err != nil { - return nil, err - } - - return json.Marshal(dest) -} - -// createArrayMergePatch will return an array of merge-patch documents capable -// of converting the original document to the modified document for each -// pair of JSON documents provided in the arrays. -// Arrays of mismatched sizes will result in an error. -func createArrayMergePatch(originalJSON, modifiedJSON []byte) ([]byte, error) { - originalDocs := []json.RawMessage{} - modifiedDocs := []json.RawMessage{} - - err := json.Unmarshal(originalJSON, &originalDocs) - if err != nil { - return nil, ErrBadJSONDoc - } - - err = json.Unmarshal(modifiedJSON, &modifiedDocs) - if err != nil { - return nil, ErrBadJSONDoc - } - - total := len(originalDocs) - if len(modifiedDocs) != total { - return nil, ErrBadJSONDoc - } - - result := []json.RawMessage{} - for i := 0; i < len(originalDocs); i++ { - original := originalDocs[i] - modified := modifiedDocs[i] - - patch, err := createObjectMergePatch(original, modified) - if err != nil { - return nil, err - } - - result = append(result, json.RawMessage(patch)) - } - - return json.Marshal(result) -} - -// Returns true if the array matches (must be json types). -// As is idiomatic for go, an empty array is not the same as a nil array. -func matchesArray(a, b []interface{}) bool { - if len(a) != len(b) { - return false - } - if (a == nil && b != nil) || (a != nil && b == nil) { - return false - } - for i := range a { - if !matchesValue(a[i], b[i]) { - return false - } - } - return true -} - -// Returns true if the values matches (must be json types) -// The types of the values must match, otherwise it will always return false -// If two map[string]interface{} are given, all elements must match. -func matchesValue(av, bv interface{}) bool { - if reflect.TypeOf(av) != reflect.TypeOf(bv) { - return false - } - switch at := av.(type) { - case string: - bt := bv.(string) - if bt == at { - return true - } - case float64: - bt := bv.(float64) - if bt == at { - return true - } - case bool: - bt := bv.(bool) - if bt == at { - return true - } - case nil: - // Both nil, fine. - return true - case map[string]interface{}: - bt := bv.(map[string]interface{}) - if len(bt) != len(at) { - return false - } - for key := range bt { - av, aOK := at[key] - bv, bOK := bt[key] - if aOK != bOK { - return false - } - if !matchesValue(av, bv) { - return false - } - } - return true - case []interface{}: - bt := bv.([]interface{}) - return matchesArray(at, bt) - } - return false -} - -// getDiff returns the (recursive) difference between a and b as a map[string]interface{}. -func getDiff(a, b map[string]interface{}) (map[string]interface{}, error) { - into := map[string]interface{}{} - for key, bv := range b { - av, ok := a[key] - // value was added - if !ok { - into[key] = bv - continue - } - // If types have changed, replace completely - if reflect.TypeOf(av) != reflect.TypeOf(bv) { - into[key] = bv - continue - } - // Types are the same, compare values - switch at := av.(type) { - case map[string]interface{}: - bt := bv.(map[string]interface{}) - dst := make(map[string]interface{}, len(bt)) - dst, err := getDiff(at, bt) - if err != nil { - return nil, err - } - if len(dst) > 0 { - into[key] = dst - } - case string, float64, bool: - if !matchesValue(av, bv) { - into[key] = bv - } - case []interface{}: - bt := bv.([]interface{}) - if !matchesArray(at, bt) { - into[key] = bv - } - case nil: - switch bv.(type) { - case nil: - // Both nil, fine. - default: - into[key] = bv - } - default: - panic(fmt.Sprintf("Unknown type:%T in key %s", av, key)) - } - } - // Now add all deleted values as nil - for key := range a { - _, found := b[key] - if !found { - into[key] = nil - } - } - return into, nil -} diff --git a/src/vendor/github.com/evanphx/json-patch/patch.go b/src/vendor/github.com/evanphx/json-patch/patch.go deleted file mode 100644 index 4bce5936d..000000000 --- a/src/vendor/github.com/evanphx/json-patch/patch.go +++ /dev/null @@ -1,809 +0,0 @@ -package jsonpatch - -import ( - "bytes" - "encoding/json" - "fmt" - "strconv" - "strings" - - "github.com/pkg/errors" -) - -const ( - eRaw = iota - eDoc - eAry -) - -var ( - // SupportNegativeIndices decides whether to support non-standard practice of - // allowing negative indices to mean indices starting at the end of an array. - // Default to true. - SupportNegativeIndices bool = true - // AccumulatedCopySizeLimit limits the total size increase in bytes caused by - // "copy" operations in a patch. - AccumulatedCopySizeLimit int64 = 0 -) - -var ( - ErrTestFailed = errors.New("test failed") - ErrMissing = errors.New("missing value") - ErrUnknownType = errors.New("unknown object type") - ErrInvalid = errors.New("invalid state detected") - ErrInvalidIndex = errors.New("invalid index referenced") -) - -type lazyNode struct { - raw *json.RawMessage - doc partialDoc - ary partialArray - which int -} - -// Operation is a single JSON-Patch step, such as a single 'add' operation. -type Operation map[string]*json.RawMessage - -// Patch is an ordered collection of Operations. -type Patch []Operation - -type partialDoc map[string]*lazyNode -type partialArray []*lazyNode - -type container interface { - get(key string) (*lazyNode, error) - set(key string, val *lazyNode) error - add(key string, val *lazyNode) error - remove(key string) error -} - -func newLazyNode(raw *json.RawMessage) *lazyNode { - return &lazyNode{raw: raw, doc: nil, ary: nil, which: eRaw} -} - -func (n *lazyNode) MarshalJSON() ([]byte, error) { - switch n.which { - case eRaw: - return json.Marshal(n.raw) - case eDoc: - return json.Marshal(n.doc) - case eAry: - return json.Marshal(n.ary) - default: - return nil, ErrUnknownType - } -} - -func (n *lazyNode) UnmarshalJSON(data []byte) error { - dest := make(json.RawMessage, len(data)) - copy(dest, data) - n.raw = &dest - n.which = eRaw - return nil -} - -func deepCopy(src *lazyNode) (*lazyNode, int, error) { - if src == nil { - return nil, 0, nil - } - a, err := src.MarshalJSON() - if err != nil { - return nil, 0, err - } - sz := len(a) - ra := make(json.RawMessage, sz) - copy(ra, a) - return newLazyNode(&ra), sz, nil -} - -func (n *lazyNode) intoDoc() (*partialDoc, error) { - if n.which == eDoc { - return &n.doc, nil - } - - if n.raw == nil { - return nil, ErrInvalid - } - - err := json.Unmarshal(*n.raw, &n.doc) - - if err != nil { - return nil, err - } - - n.which = eDoc - return &n.doc, nil -} - -func (n *lazyNode) intoAry() (*partialArray, error) { - if n.which == eAry { - return &n.ary, nil - } - - if n.raw == nil { - return nil, ErrInvalid - } - - err := json.Unmarshal(*n.raw, &n.ary) - - if err != nil { - return nil, err - } - - n.which = eAry - return &n.ary, nil -} - -func (n *lazyNode) compact() []byte { - buf := &bytes.Buffer{} - - if n.raw == nil { - return nil - } - - err := json.Compact(buf, *n.raw) - - if err != nil { - return *n.raw - } - - return buf.Bytes() -} - -func (n *lazyNode) tryDoc() bool { - if n.raw == nil { - return false - } - - err := json.Unmarshal(*n.raw, &n.doc) - - if err != nil { - return false - } - - n.which = eDoc - return true -} - -func (n *lazyNode) tryAry() bool { - if n.raw == nil { - return false - } - - err := json.Unmarshal(*n.raw, &n.ary) - - if err != nil { - return false - } - - n.which = eAry - return true -} - -func (n *lazyNode) equal(o *lazyNode) bool { - if n.which == eRaw { - if !n.tryDoc() && !n.tryAry() { - if o.which != eRaw { - return false - } - - return bytes.Equal(n.compact(), o.compact()) - } - } - - if n.which == eDoc { - if o.which == eRaw { - if !o.tryDoc() { - return false - } - } - - if o.which != eDoc { - return false - } - - if len(n.doc) != len(o.doc) { - return false - } - - for k, v := range n.doc { - ov, ok := o.doc[k] - - if !ok { - return false - } - - if (v == nil) != (ov == nil) { - return false - } - - if v == nil && ov == nil { - continue - } - - if !v.equal(ov) { - return false - } - } - - return true - } - - if o.which != eAry && !o.tryAry() { - return false - } - - if len(n.ary) != len(o.ary) { - return false - } - - for idx, val := range n.ary { - if !val.equal(o.ary[idx]) { - return false - } - } - - return true -} - -// Kind reads the "op" field of the Operation. -func (o Operation) Kind() string { - if obj, ok := o["op"]; ok && obj != nil { - var op string - - err := json.Unmarshal(*obj, &op) - - if err != nil { - return "unknown" - } - - return op - } - - return "unknown" -} - -// Path reads the "path" field of the Operation. -func (o Operation) Path() (string, error) { - if obj, ok := o["path"]; ok && obj != nil { - var op string - - err := json.Unmarshal(*obj, &op) - - if err != nil { - return "unknown", err - } - - return op, nil - } - - return "unknown", errors.Wrapf(ErrMissing, "operation missing path field") -} - -// From reads the "from" field of the Operation. -func (o Operation) From() (string, error) { - if obj, ok := o["from"]; ok && obj != nil { - var op string - - err := json.Unmarshal(*obj, &op) - - if err != nil { - return "unknown", err - } - - return op, nil - } - - return "unknown", errors.Wrapf(ErrMissing, "operation, missing from field") -} - -func (o Operation) value() *lazyNode { - if obj, ok := o["value"]; ok { - return newLazyNode(obj) - } - - return nil -} - -// ValueInterface decodes the operation value into an interface. -func (o Operation) ValueInterface() (interface{}, error) { - if obj, ok := o["value"]; ok && obj != nil { - var v interface{} - - err := json.Unmarshal(*obj, &v) - - if err != nil { - return nil, err - } - - return v, nil - } - - return nil, errors.Wrapf(ErrMissing, "operation, missing value field") -} - -func isArray(buf []byte) bool { -Loop: - for _, c := range buf { - switch c { - case ' ': - case '\n': - case '\t': - continue - case '[': - return true - default: - break Loop - } - } - - return false -} - -func findObject(pd *container, path string) (container, string) { - doc := *pd - - split := strings.Split(path, "/") - - if len(split) < 2 { - return nil, "" - } - - parts := split[1 : len(split)-1] - - key := split[len(split)-1] - - var err error - - for _, part := range parts { - - next, ok := doc.get(decodePatchKey(part)) - - if next == nil || ok != nil { - return nil, "" - } - - if isArray(*next.raw) { - doc, err = next.intoAry() - - if err != nil { - return nil, "" - } - } else { - doc, err = next.intoDoc() - - if err != nil { - return nil, "" - } - } - } - - return doc, decodePatchKey(key) -} - -func (d *partialDoc) set(key string, val *lazyNode) error { - (*d)[key] = val - return nil -} - -func (d *partialDoc) add(key string, val *lazyNode) error { - (*d)[key] = val - return nil -} - -func (d *partialDoc) get(key string) (*lazyNode, error) { - return (*d)[key], nil -} - -func (d *partialDoc) remove(key string) error { - _, ok := (*d)[key] - if !ok { - return errors.Wrapf(ErrMissing, "Unable to remove nonexistent key: %s", key) - } - - delete(*d, key) - return nil -} - -// set should only be used to implement the "replace" operation, so "key" must -// be an already existing index in "d". -func (d *partialArray) set(key string, val *lazyNode) error { - idx, err := strconv.Atoi(key) - if err != nil { - return err - } - - if idx < 0 { - if !SupportNegativeIndices { - return errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx) - } - if idx < -len(*d) { - return errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx) - } - idx += len(*d) - } - - (*d)[idx] = val - return nil -} - -func (d *partialArray) add(key string, val *lazyNode) error { - if key == "-" { - *d = append(*d, val) - return nil - } - - idx, err := strconv.Atoi(key) - if err != nil { - return errors.Wrapf(err, "value was not a proper array index: '%s'", key) - } - - sz := len(*d) + 1 - - ary := make([]*lazyNode, sz) - - cur := *d - - if idx >= len(ary) { - return errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx) - } - - if idx < 0 { - if !SupportNegativeIndices { - return errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx) - } - if idx < -len(ary) { - return errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx) - } - idx += len(ary) - } - - copy(ary[0:idx], cur[0:idx]) - ary[idx] = val - copy(ary[idx+1:], cur[idx:]) - - *d = ary - return nil -} - -func (d *partialArray) get(key string) (*lazyNode, error) { - idx, err := strconv.Atoi(key) - - if err != nil { - return nil, err - } - - if idx < 0 { - if !SupportNegativeIndices { - return nil, errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx) - } - if idx < -len(*d) { - return nil, errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx) - } - idx += len(*d) - } - - if idx >= len(*d) { - return nil, errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx) - } - - return (*d)[idx], nil -} - -func (d *partialArray) remove(key string) error { - idx, err := strconv.Atoi(key) - if err != nil { - return err - } - - cur := *d - - if idx >= len(cur) { - return errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx) - } - - if idx < 0 { - if !SupportNegativeIndices { - return errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx) - } - if idx < -len(cur) { - return errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx) - } - idx += len(cur) - } - - ary := make([]*lazyNode, len(cur)-1) - - copy(ary[0:idx], cur[0:idx]) - copy(ary[idx:], cur[idx+1:]) - - *d = ary - return nil - -} - -func (p Patch) add(doc *container, op Operation) error { - path, err := op.Path() - if err != nil { - return errors.Wrapf(ErrMissing, "add operation failed to decode path") - } - - con, key := findObject(doc, path) - - if con == nil { - return errors.Wrapf(ErrMissing, "add operation does not apply: doc is missing path: \"%s\"", path) - } - - err = con.add(key, op.value()) - if err != nil { - return errors.Wrapf(err, "error in add for path: '%s'", path) - } - - return nil -} - -func (p Patch) remove(doc *container, op Operation) error { - path, err := op.Path() - if err != nil { - return errors.Wrapf(ErrMissing, "remove operation failed to decode path") - } - - con, key := findObject(doc, path) - - if con == nil { - return errors.Wrapf(ErrMissing, "remove operation does not apply: doc is missing path: \"%s\"", path) - } - - err = con.remove(key) - if err != nil { - return errors.Wrapf(err, "error in remove for path: '%s'", path) - } - - return nil -} - -func (p Patch) replace(doc *container, op Operation) error { - path, err := op.Path() - if err != nil { - return errors.Wrapf(err, "replace operation failed to decode path") - } - - con, key := findObject(doc, path) - - if con == nil { - return errors.Wrapf(ErrMissing, "replace operation does not apply: doc is missing path: %s", path) - } - - _, ok := con.get(key) - if ok != nil { - return errors.Wrapf(ErrMissing, "replace operation does not apply: doc is missing key: %s", path) - } - - err = con.set(key, op.value()) - if err != nil { - return errors.Wrapf(err, "error in remove for path: '%s'", path) - } - - return nil -} - -func (p Patch) move(doc *container, op Operation) error { - from, err := op.From() - if err != nil { - return errors.Wrapf(err, "move operation failed to decode from") - } - - con, key := findObject(doc, from) - - if con == nil { - return errors.Wrapf(ErrMissing, "move operation does not apply: doc is missing from path: %s", from) - } - - val, err := con.get(key) - if err != nil { - return errors.Wrapf(err, "error in move for path: '%s'", key) - } - - err = con.remove(key) - if err != nil { - return errors.Wrapf(err, "error in move for path: '%s'", key) - } - - path, err := op.Path() - if err != nil { - return errors.Wrapf(err, "move operation failed to decode path") - } - - con, key = findObject(doc, path) - - if con == nil { - return errors.Wrapf(ErrMissing, "move operation does not apply: doc is missing destination path: %s", path) - } - - err = con.add(key, val) - if err != nil { - return errors.Wrapf(err, "error in move for path: '%s'", path) - } - - return nil -} - -func (p Patch) test(doc *container, op Operation) error { - path, err := op.Path() - if err != nil { - return errors.Wrapf(err, "test operation failed to decode path") - } - - con, key := findObject(doc, path) - - if con == nil { - return errors.Wrapf(ErrMissing, "test operation does not apply: is missing path: %s", path) - } - - val, err := con.get(key) - if err != nil { - return errors.Wrapf(err, "error in test for path: '%s'", path) - } - - if val == nil { - if op.value().raw == nil { - return nil - } - return errors.Wrapf(ErrTestFailed, "testing value %s failed", path) - } else if op.value() == nil { - return errors.Wrapf(ErrTestFailed, "testing value %s failed", path) - } - - if val.equal(op.value()) { - return nil - } - - return errors.Wrapf(ErrTestFailed, "testing value %s failed", path) -} - -func (p Patch) copy(doc *container, op Operation, accumulatedCopySize *int64) error { - from, err := op.From() - if err != nil { - return errors.Wrapf(err, "copy operation failed to decode from") - } - - con, key := findObject(doc, from) - - if con == nil { - return errors.Wrapf(ErrMissing, "copy operation does not apply: doc is missing from path: %s", from) - } - - val, err := con.get(key) - if err != nil { - return errors.Wrapf(err, "error in copy for from: '%s'", from) - } - - path, err := op.Path() - if err != nil { - return errors.Wrapf(ErrMissing, "copy operation failed to decode path") - } - - con, key = findObject(doc, path) - - if con == nil { - return errors.Wrapf(ErrMissing, "copy operation does not apply: doc is missing destination path: %s", path) - } - - valCopy, sz, err := deepCopy(val) - if err != nil { - return errors.Wrapf(err, "error while performing deep copy") - } - - (*accumulatedCopySize) += int64(sz) - if AccumulatedCopySizeLimit > 0 && *accumulatedCopySize > AccumulatedCopySizeLimit { - return NewAccumulatedCopySizeError(AccumulatedCopySizeLimit, *accumulatedCopySize) - } - - err = con.add(key, valCopy) - if err != nil { - return errors.Wrapf(err, "error while adding value during copy") - } - - return nil -} - -// Equal indicates if 2 JSON documents have the same structural equality. -func Equal(a, b []byte) bool { - ra := make(json.RawMessage, len(a)) - copy(ra, a) - la := newLazyNode(&ra) - - rb := make(json.RawMessage, len(b)) - copy(rb, b) - lb := newLazyNode(&rb) - - return la.equal(lb) -} - -// DecodePatch decodes the passed JSON document as an RFC 6902 patch. -func DecodePatch(buf []byte) (Patch, error) { - var p Patch - - err := json.Unmarshal(buf, &p) - - if err != nil { - return nil, err - } - - return p, nil -} - -// Apply mutates a JSON document according to the patch, and returns the new -// document. -func (p Patch) Apply(doc []byte) ([]byte, error) { - return p.ApplyIndent(doc, "") -} - -// ApplyIndent mutates a JSON document according to the patch, and returns the new -// document indented. -func (p Patch) ApplyIndent(doc []byte, indent string) ([]byte, error) { - if len(doc) == 0 { - return doc, nil - } - - var pd container - if doc[0] == '[' { - pd = &partialArray{} - } else { - pd = &partialDoc{} - } - - err := json.Unmarshal(doc, pd) - - if err != nil { - return nil, err - } - - err = nil - - var accumulatedCopySize int64 - - for _, op := range p { - switch op.Kind() { - case "add": - err = p.add(&pd, op) - case "remove": - err = p.remove(&pd, op) - case "replace": - err = p.replace(&pd, op) - case "move": - err = p.move(&pd, op) - case "test": - err = p.test(&pd, op) - case "copy": - err = p.copy(&pd, op, &accumulatedCopySize) - default: - err = fmt.Errorf("Unexpected kind: %s", op.Kind()) - } - - if err != nil { - return nil, err - } - } - - if indent != "" { - return json.MarshalIndent(pd, "", indent) - } - - return json.Marshal(pd) -} - -// From http://tools.ietf.org/html/rfc6901#section-4 : -// -// Evaluation of each reference token begins by decoding any escaped -// character sequence. This is performed by first transforming any -// occurrence of the sequence '~1' to '/', and then transforming any -// occurrence of the sequence '~0' to '~'. - -var ( - rfc6901Decoder = strings.NewReplacer("~1", "/", "~0", "~") -) - -func decodePatchKey(k string) string { - return rfc6901Decoder.Replace(k) -} diff --git a/src/vendor/github.com/go-errors/errors/.travis.yml b/src/vendor/github.com/go-errors/errors/.travis.yml deleted file mode 100644 index 9d00fdd5d..000000000 --- a/src/vendor/github.com/go-errors/errors/.travis.yml +++ /dev/null @@ -1,5 +0,0 @@ -language: go - -go: - - "1.8.x" - - "1.10.x" diff --git a/src/vendor/github.com/go-errors/errors/LICENSE.MIT b/src/vendor/github.com/go-errors/errors/LICENSE.MIT deleted file mode 100644 index c9a5b2eeb..000000000 --- a/src/vendor/github.com/go-errors/errors/LICENSE.MIT +++ /dev/null @@ -1,7 +0,0 @@ -Copyright (c) 2015 Conrad Irwin - -Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/src/vendor/github.com/go-errors/errors/README.md b/src/vendor/github.com/go-errors/errors/README.md deleted file mode 100644 index 5d4f1873d..000000000 --- a/src/vendor/github.com/go-errors/errors/README.md +++ /dev/null @@ -1,66 +0,0 @@ -go-errors/errors -================ - -[![Build Status](https://travis-ci.org/go-errors/errors.svg?branch=master)](https://travis-ci.org/go-errors/errors) - -Package errors adds stacktrace support to errors in go. - -This is particularly useful when you want to understand the state of execution -when an error was returned unexpectedly. - -It provides the type \*Error which implements the standard golang error -interface, so you can use this library interchangably with code that is -expecting a normal error return. - -Usage ------ - -Full documentation is available on -[godoc](https://godoc.org/github.com/go-errors/errors), but here's a simple -example: - -```go -package crashy - -import "github.com/go-errors/errors" - -var Crashed = errors.Errorf("oh dear") - -func Crash() error { - return errors.New(Crashed) -} -``` - -This can be called as follows: - -```go -package main - -import ( - "crashy" - "fmt" - "github.com/go-errors/errors" -) - -func main() { - err := crashy.Crash() - if err != nil { - if errors.Is(err, crashy.Crashed) { - fmt.Println(err.(*errors.Error).ErrorStack()) - } else { - panic(err) - } - } -} -``` - -Meta-fu -------- - -This package was original written to allow reporting to -[Bugsnag](https://bugsnag.com/) from -[bugsnag-go](https://github.com/bugsnag/bugsnag-go), but after I found similar -packages by Facebook and Dropbox, it was moved to one canonical location so -everyone can benefit. - -This package is licensed under the MIT license, see LICENSE.MIT for details. diff --git a/src/vendor/github.com/go-errors/errors/cover.out b/src/vendor/github.com/go-errors/errors/cover.out deleted file mode 100644 index ab18b0519..000000000 --- a/src/vendor/github.com/go-errors/errors/cover.out +++ /dev/null @@ -1,89 +0,0 @@ -mode: set -github.com/go-errors/errors/stackframe.go:27.51,30.25 2 1 -github.com/go-errors/errors/stackframe.go:33.2,38.8 3 1 -github.com/go-errors/errors/stackframe.go:30.25,32.3 1 0 -github.com/go-errors/errors/stackframe.go:43.47,44.31 1 1 -github.com/go-errors/errors/stackframe.go:47.2,47.48 1 1 -github.com/go-errors/errors/stackframe.go:44.31,46.3 1 1 -github.com/go-errors/errors/stackframe.go:52.42,56.16 3 1 -github.com/go-errors/errors/stackframe.go:60.2,60.60 1 1 -github.com/go-errors/errors/stackframe.go:56.16,58.3 1 0 -github.com/go-errors/errors/stackframe.go:64.55,67.16 2 1 -github.com/go-errors/errors/stackframe.go:71.2,72.61 2 1 -github.com/go-errors/errors/stackframe.go:76.2,76.66 1 1 -github.com/go-errors/errors/stackframe.go:67.16,69.3 1 0 -github.com/go-errors/errors/stackframe.go:72.61,74.3 1 0 -github.com/go-errors/errors/stackframe.go:79.56,91.63 3 1 -github.com/go-errors/errors/stackframe.go:95.2,95.53 1 1 -github.com/go-errors/errors/stackframe.go:100.2,101.18 2 1 -github.com/go-errors/errors/stackframe.go:91.63,94.3 2 1 -github.com/go-errors/errors/stackframe.go:95.53,98.3 2 1 -github.com/go-errors/errors/error.go:70.32,73.23 2 1 -github.com/go-errors/errors/error.go:80.2,85.3 3 1 -github.com/go-errors/errors/error.go:74.2,75.10 1 1 -github.com/go-errors/errors/error.go:76.2,77.28 1 1 -github.com/go-errors/errors/error.go:92.43,95.23 2 1 -github.com/go-errors/errors/error.go:104.2,109.3 3 1 -github.com/go-errors/errors/error.go:96.2,97.11 1 1 -github.com/go-errors/errors/error.go:98.2,99.10 1 1 -github.com/go-errors/errors/error.go:100.2,101.28 1 1 -github.com/go-errors/errors/error.go:115.39,117.19 1 1 -github.com/go-errors/errors/error.go:121.2,121.29 1 1 -github.com/go-errors/errors/error.go:125.2,125.43 1 1 -github.com/go-errors/errors/error.go:129.2,129.14 1 1 -github.com/go-errors/errors/error.go:117.19,119.3 1 1 -github.com/go-errors/errors/error.go:121.29,123.3 1 1 -github.com/go-errors/errors/error.go:125.43,127.3 1 1 -github.com/go-errors/errors/error.go:135.53,137.2 1 1 -github.com/go-errors/errors/error.go:140.34,142.2 1 1 -github.com/go-errors/errors/error.go:146.34,149.42 2 1 -github.com/go-errors/errors/error.go:153.2,153.20 1 1 -github.com/go-errors/errors/error.go:149.42,151.3 1 1 -github.com/go-errors/errors/error.go:158.39,160.2 1 1 -github.com/go-errors/errors/error.go:164.46,165.23 1 1 -github.com/go-errors/errors/error.go:173.2,173.19 1 1 -github.com/go-errors/errors/error.go:165.23,168.32 2 1 -github.com/go-errors/errors/error.go:168.32,170.4 1 1 -github.com/go-errors/errors/error.go:177.37,178.42 1 1 -github.com/go-errors/errors/error.go:181.2,181.41 1 1 -github.com/go-errors/errors/error.go:178.42,180.3 1 1 -github.com/go-errors/errors/parse_panic.go:10.39,12.2 1 1 -github.com/go-errors/errors/parse_panic.go:16.46,24.34 5 1 -github.com/go-errors/errors/parse_panic.go:70.2,70.43 1 1 -github.com/go-errors/errors/parse_panic.go:73.2,73.55 1 0 -github.com/go-errors/errors/parse_panic.go:24.34,27.23 2 1 -github.com/go-errors/errors/parse_panic.go:27.23,28.42 1 1 -github.com/go-errors/errors/parse_panic.go:28.42,31.5 2 1 -github.com/go-errors/errors/parse_panic.go:31.6,33.5 1 0 -github.com/go-errors/errors/parse_panic.go:35.5,35.29 1 1 -github.com/go-errors/errors/parse_panic.go:35.29,36.86 1 1 -github.com/go-errors/errors/parse_panic.go:36.86,38.5 1 1 -github.com/go-errors/errors/parse_panic.go:40.5,40.32 1 1 -github.com/go-errors/errors/parse_panic.go:40.32,41.18 1 1 -github.com/go-errors/errors/parse_panic.go:45.4,46.46 2 1 -github.com/go-errors/errors/parse_panic.go:51.4,53.23 2 1 -github.com/go-errors/errors/parse_panic.go:57.4,58.18 2 1 -github.com/go-errors/errors/parse_panic.go:62.4,63.17 2 1 -github.com/go-errors/errors/parse_panic.go:41.18,43.10 2 1 -github.com/go-errors/errors/parse_panic.go:46.46,49.5 2 1 -github.com/go-errors/errors/parse_panic.go:53.23,55.5 1 0 -github.com/go-errors/errors/parse_panic.go:58.18,60.5 1 0 -github.com/go-errors/errors/parse_panic.go:63.17,65.10 2 1 -github.com/go-errors/errors/parse_panic.go:70.43,72.3 1 1 -github.com/go-errors/errors/parse_panic.go:80.85,82.29 2 1 -github.com/go-errors/errors/parse_panic.go:85.2,85.15 1 1 -github.com/go-errors/errors/parse_panic.go:88.2,90.63 2 1 -github.com/go-errors/errors/parse_panic.go:94.2,94.53 1 1 -github.com/go-errors/errors/parse_panic.go:99.2,101.36 2 1 -github.com/go-errors/errors/parse_panic.go:105.2,106.15 2 1 -github.com/go-errors/errors/parse_panic.go:109.2,112.49 3 1 -github.com/go-errors/errors/parse_panic.go:116.2,117.16 2 1 -github.com/go-errors/errors/parse_panic.go:121.2,126.8 1 1 -github.com/go-errors/errors/parse_panic.go:82.29,84.3 1 0 -github.com/go-errors/errors/parse_panic.go:85.15,87.3 1 1 -github.com/go-errors/errors/parse_panic.go:90.63,93.3 2 1 -github.com/go-errors/errors/parse_panic.go:94.53,97.3 2 1 -github.com/go-errors/errors/parse_panic.go:101.36,103.3 1 0 -github.com/go-errors/errors/parse_panic.go:106.15,108.3 1 0 -github.com/go-errors/errors/parse_panic.go:112.49,114.3 1 1 -github.com/go-errors/errors/parse_panic.go:117.16,119.3 1 0 diff --git a/src/vendor/github.com/go-errors/errors/error.go b/src/vendor/github.com/go-errors/errors/error.go deleted file mode 100644 index 60062a437..000000000 --- a/src/vendor/github.com/go-errors/errors/error.go +++ /dev/null @@ -1,217 +0,0 @@ -// Package errors provides errors that have stack-traces. -// -// This is particularly useful when you want to understand the -// state of execution when an error was returned unexpectedly. -// -// It provides the type *Error which implements the standard -// golang error interface, so you can use this library interchangably -// with code that is expecting a normal error return. -// -// For example: -// -// package crashy -// -// import "github.com/go-errors/errors" -// -// var Crashed = errors.Errorf("oh dear") -// -// func Crash() error { -// return errors.New(Crashed) -// } -// -// This can be called as follows: -// -// package main -// -// import ( -// "crashy" -// "fmt" -// "github.com/go-errors/errors" -// ) -// -// func main() { -// err := crashy.Crash() -// if err != nil { -// if errors.Is(err, crashy.Crashed) { -// fmt.Println(err.(*errors.Error).ErrorStack()) -// } else { -// panic(err) -// } -// } -// } -// -// This package was original written to allow reporting to Bugsnag, -// but after I found similar packages by Facebook and Dropbox, it -// was moved to one canonical location so everyone can benefit. -package errors - -import ( - "bytes" - "fmt" - "reflect" - "runtime" -) - -// The maximum number of stackframes on any error. -var MaxStackDepth = 50 - -// Error is an error with an attached stacktrace. It can be used -// wherever the builtin error interface is expected. -type Error struct { - Err error - stack []uintptr - frames []StackFrame - prefix string -} - -// New makes an Error from the given value. If that value is already an -// error then it will be used directly, if not, it will be passed to -// fmt.Errorf("%v"). The stacktrace will point to the line of code that -// called New. -func New(e interface{}) *Error { - var err error - - switch e := e.(type) { - case error: - err = e - default: - err = fmt.Errorf("%v", e) - } - - stack := make([]uintptr, MaxStackDepth) - length := runtime.Callers(2, stack[:]) - return &Error{ - Err: err, - stack: stack[:length], - } -} - -// Wrap makes an Error from the given value. If that value is already an -// error then it will be used directly, if not, it will be passed to -// fmt.Errorf("%v"). The skip parameter indicates how far up the stack -// to start the stacktrace. 0 is from the current call, 1 from its caller, etc. -func Wrap(e interface{}, skip int) *Error { - var err error - - switch e := e.(type) { - case *Error: - return e - case error: - err = e - default: - err = fmt.Errorf("%v", e) - } - - stack := make([]uintptr, MaxStackDepth) - length := runtime.Callers(2+skip, stack[:]) - return &Error{ - Err: err, - stack: stack[:length], - } -} - -// WrapPrefix makes an Error from the given value. If that value is already an -// error then it will be used directly, if not, it will be passed to -// fmt.Errorf("%v"). The prefix parameter is used to add a prefix to the -// error message when calling Error(). The skip parameter indicates how far -// up the stack to start the stacktrace. 0 is from the current call, -// 1 from its caller, etc. -func WrapPrefix(e interface{}, prefix string, skip int) *Error { - - err := Wrap(e, 1+skip) - - if err.prefix != "" { - prefix = fmt.Sprintf("%s: %s", prefix, err.prefix) - } - - return &Error{ - Err: err.Err, - stack: err.stack, - prefix: prefix, - } - -} - -// Is detects whether the error is equal to a given error. Errors -// are considered equal by this function if they are the same object, -// or if they both contain the same error inside an errors.Error. -func Is(e error, original error) bool { - - if e == original { - return true - } - - if e, ok := e.(*Error); ok { - return Is(e.Err, original) - } - - if original, ok := original.(*Error); ok { - return Is(e, original.Err) - } - - return false -} - -// Errorf creates a new error with the given message. You can use it -// as a drop-in replacement for fmt.Errorf() to provide descriptive -// errors in return values. -func Errorf(format string, a ...interface{}) *Error { - return Wrap(fmt.Errorf(format, a...), 1) -} - -// Error returns the underlying error's message. -func (err *Error) Error() string { - - msg := err.Err.Error() - if err.prefix != "" { - msg = fmt.Sprintf("%s: %s", err.prefix, msg) - } - - return msg -} - -// Stack returns the callstack formatted the same way that go does -// in runtime/debug.Stack() -func (err *Error) Stack() []byte { - buf := bytes.Buffer{} - - for _, frame := range err.StackFrames() { - buf.WriteString(frame.String()) - } - - return buf.Bytes() -} - -// Callers satisfies the bugsnag ErrorWithCallerS() interface -// so that the stack can be read out. -func (err *Error) Callers() []uintptr { - return err.stack -} - -// ErrorStack returns a string that contains both the -// error message and the callstack. -func (err *Error) ErrorStack() string { - return err.TypeName() + " " + err.Error() + "\n" + string(err.Stack()) -} - -// StackFrames returns an array of frames containing information about the -// stack. -func (err *Error) StackFrames() []StackFrame { - if err.frames == nil { - err.frames = make([]StackFrame, len(err.stack)) - - for i, pc := range err.stack { - err.frames[i] = NewStackFrame(pc) - } - } - - return err.frames -} - -// TypeName returns the type this error. e.g. *errors.stringError. -func (err *Error) TypeName() string { - if _, ok := err.Err.(uncaughtPanic); ok { - return "panic" - } - return reflect.TypeOf(err.Err).String() -} diff --git a/src/vendor/github.com/go-errors/errors/parse_panic.go b/src/vendor/github.com/go-errors/errors/parse_panic.go deleted file mode 100644 index cc37052d7..000000000 --- a/src/vendor/github.com/go-errors/errors/parse_panic.go +++ /dev/null @@ -1,127 +0,0 @@ -package errors - -import ( - "strconv" - "strings" -) - -type uncaughtPanic struct{ message string } - -func (p uncaughtPanic) Error() string { - return p.message -} - -// ParsePanic allows you to get an error object from the output of a go program -// that panicked. This is particularly useful with https://github.com/mitchellh/panicwrap. -func ParsePanic(text string) (*Error, error) { - lines := strings.Split(text, "\n") - - state := "start" - - var message string - var stack []StackFrame - - for i := 0; i < len(lines); i++ { - line := lines[i] - - if state == "start" { - if strings.HasPrefix(line, "panic: ") { - message = strings.TrimPrefix(line, "panic: ") - state = "seek" - } else { - return nil, Errorf("bugsnag.panicParser: Invalid line (no prefix): %s", line) - } - - } else if state == "seek" { - if strings.HasPrefix(line, "goroutine ") && strings.HasSuffix(line, "[running]:") { - state = "parsing" - } - - } else if state == "parsing" { - if line == "" { - state = "done" - break - } - createdBy := false - if strings.HasPrefix(line, "created by ") { - line = strings.TrimPrefix(line, "created by ") - createdBy = true - } - - i++ - - if i >= len(lines) { - return nil, Errorf("bugsnag.panicParser: Invalid line (unpaired): %s", line) - } - - frame, err := parsePanicFrame(line, lines[i], createdBy) - if err != nil { - return nil, err - } - - stack = append(stack, *frame) - if createdBy { - state = "done" - break - } - } - } - - if state == "done" || state == "parsing" { - return &Error{Err: uncaughtPanic{message}, frames: stack}, nil - } - return nil, Errorf("could not parse panic: %v", text) -} - -// The lines we're passing look like this: -// -// main.(*foo).destruct(0xc208067e98) -// /0/go/src/github.com/bugsnag/bugsnag-go/pan/main.go:22 +0x151 -func parsePanicFrame(name string, line string, createdBy bool) (*StackFrame, error) { - idx := strings.LastIndex(name, "(") - if idx == -1 && !createdBy { - return nil, Errorf("bugsnag.panicParser: Invalid line (no call): %s", name) - } - if idx != -1 { - name = name[:idx] - } - pkg := "" - - if lastslash := strings.LastIndex(name, "/"); lastslash >= 0 { - pkg += name[:lastslash] + "/" - name = name[lastslash+1:] - } - if period := strings.Index(name, "."); period >= 0 { - pkg += name[:period] - name = name[period+1:] - } - - name = strings.Replace(name, "·", ".", -1) - - if !strings.HasPrefix(line, "\t") { - return nil, Errorf("bugsnag.panicParser: Invalid line (no tab): %s", line) - } - - idx = strings.LastIndex(line, ":") - if idx == -1 { - return nil, Errorf("bugsnag.panicParser: Invalid line (no line number): %s", line) - } - file := line[1:idx] - - number := line[idx+1:] - if idx = strings.Index(number, " +"); idx > -1 { - number = number[:idx] - } - - lno, err := strconv.ParseInt(number, 10, 32) - if err != nil { - return nil, Errorf("bugsnag.panicParser: Invalid line (bad line number): %s", line) - } - - return &StackFrame{ - File: file, - LineNumber: int(lno), - Package: pkg, - Name: name, - }, nil -} diff --git a/src/vendor/github.com/go-errors/errors/stackframe.go b/src/vendor/github.com/go-errors/errors/stackframe.go deleted file mode 100644 index 750ab9a52..000000000 --- a/src/vendor/github.com/go-errors/errors/stackframe.go +++ /dev/null @@ -1,102 +0,0 @@ -package errors - -import ( - "bytes" - "fmt" - "io/ioutil" - "runtime" - "strings" -) - -// A StackFrame contains all necessary information about to generate a line -// in a callstack. -type StackFrame struct { - // The path to the file containing this ProgramCounter - File string - // The LineNumber in that file - LineNumber int - // The Name of the function that contains this ProgramCounter - Name string - // The Package that contains this function - Package string - // The underlying ProgramCounter - ProgramCounter uintptr -} - -// NewStackFrame popoulates a stack frame object from the program counter. -func NewStackFrame(pc uintptr) (frame StackFrame) { - - frame = StackFrame{ProgramCounter: pc} - if frame.Func() == nil { - return - } - frame.Package, frame.Name = packageAndName(frame.Func()) - - // pc -1 because the program counters we use are usually return addresses, - // and we want to show the line that corresponds to the function call - frame.File, frame.LineNumber = frame.Func().FileLine(pc - 1) - return - -} - -// Func returns the function that contained this frame. -func (frame *StackFrame) Func() *runtime.Func { - if frame.ProgramCounter == 0 { - return nil - } - return runtime.FuncForPC(frame.ProgramCounter) -} - -// String returns the stackframe formatted in the same way as go does -// in runtime/debug.Stack() -func (frame *StackFrame) String() string { - str := fmt.Sprintf("%s:%d (0x%x)\n", frame.File, frame.LineNumber, frame.ProgramCounter) - - source, err := frame.SourceLine() - if err != nil { - return str - } - - return str + fmt.Sprintf("\t%s: %s\n", frame.Name, source) -} - -// SourceLine gets the line of code (from File and Line) of the original source if possible. -func (frame *StackFrame) SourceLine() (string, error) { - data, err := ioutil.ReadFile(frame.File) - - if err != nil { - return "", New(err) - } - - lines := bytes.Split(data, []byte{'\n'}) - if frame.LineNumber <= 0 || frame.LineNumber >= len(lines) { - return "???", nil - } - // -1 because line-numbers are 1 based, but our array is 0 based - return string(bytes.Trim(lines[frame.LineNumber-1], " \t")), nil -} - -func packageAndName(fn *runtime.Func) (string, string) { - name := fn.Name() - pkg := "" - - // The name includes the path name to the package, which is unnecessary - // since the file name is already included. Plus, it has center dots. - // That is, we see - // runtime/debug.*T·ptrmethod - // and want - // *T.ptrmethod - // Since the package path might contains dots (e.g. code.google.com/...), - // we first remove the path prefix if there is one. - if lastslash := strings.LastIndex(name, "/"); lastslash >= 0 { - pkg += name[:lastslash] + "/" - name = name[lastslash+1:] - } - if period := strings.Index(name, "."); period >= 0 { - pkg += name[:period] - name = name[period+1:] - } - - name = strings.Replace(name, "·", ".", -1) - return pkg, name -} diff --git a/src/vendor/github.com/google/btree/.travis.yml b/src/vendor/github.com/google/btree/.travis.yml deleted file mode 100644 index 4f2ee4d97..000000000 --- a/src/vendor/github.com/google/btree/.travis.yml +++ /dev/null @@ -1 +0,0 @@ -language: go diff --git a/src/vendor/github.com/google/btree/LICENSE b/src/vendor/github.com/google/btree/LICENSE deleted file mode 100644 index d64569567..000000000 --- a/src/vendor/github.com/google/btree/LICENSE +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/src/vendor/github.com/google/btree/README.md b/src/vendor/github.com/google/btree/README.md deleted file mode 100644 index 6062a4dac..000000000 --- a/src/vendor/github.com/google/btree/README.md +++ /dev/null @@ -1,12 +0,0 @@ -# BTree implementation for Go - -![Travis CI Build Status](https://api.travis-ci.org/google/btree.svg?branch=master) - -This package provides an in-memory B-Tree implementation for Go, useful as -an ordered, mutable data structure. - -The API is based off of the wonderful -http://godoc.org/github.com/petar/GoLLRB/llrb, and is meant to allow btree to -act as a drop-in replacement for gollrb trees. - -See http://godoc.org/github.com/google/btree for documentation. diff --git a/src/vendor/github.com/google/btree/btree.go b/src/vendor/github.com/google/btree/btree.go deleted file mode 100644 index b83acdbc6..000000000 --- a/src/vendor/github.com/google/btree/btree.go +++ /dev/null @@ -1,890 +0,0 @@ -// Copyright 2014 Google Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package btree implements in-memory B-Trees of arbitrary degree. -// -// btree implements an in-memory B-Tree for use as an ordered data structure. -// It is not meant for persistent storage solutions. -// -// It has a flatter structure than an equivalent red-black or other binary tree, -// which in some cases yields better memory usage and/or performance. -// See some discussion on the matter here: -// http://google-opensource.blogspot.com/2013/01/c-containers-that-save-memory-and-time.html -// Note, though, that this project is in no way related to the C++ B-Tree -// implementation written about there. -// -// Within this tree, each node contains a slice of items and a (possibly nil) -// slice of children. For basic numeric values or raw structs, this can cause -// efficiency differences when compared to equivalent C++ template code that -// stores values in arrays within the node: -// * Due to the overhead of storing values as interfaces (each -// value needs to be stored as the value itself, then 2 words for the -// interface pointing to that value and its type), resulting in higher -// memory use. -// * Since interfaces can point to values anywhere in memory, values are -// most likely not stored in contiguous blocks, resulting in a higher -// number of cache misses. -// These issues don't tend to matter, though, when working with strings or other -// heap-allocated structures, since C++-equivalent structures also must store -// pointers and also distribute their values across the heap. -// -// This implementation is designed to be a drop-in replacement to gollrb.LLRB -// trees, (http://github.com/petar/gollrb), an excellent and probably the most -// widely used ordered tree implementation in the Go ecosystem currently. -// Its functions, therefore, exactly mirror those of -// llrb.LLRB where possible. Unlike gollrb, though, we currently don't -// support storing multiple equivalent values. -package btree - -import ( - "fmt" - "io" - "sort" - "strings" - "sync" -) - -// Item represents a single object in the tree. -type Item interface { - // Less tests whether the current item is less than the given argument. - // - // This must provide a strict weak ordering. - // If !a.Less(b) && !b.Less(a), we treat this to mean a == b (i.e. we can only - // hold one of either a or b in the tree). - Less(than Item) bool -} - -const ( - DefaultFreeListSize = 32 -) - -var ( - nilItems = make(items, 16) - nilChildren = make(children, 16) -) - -// FreeList represents a free list of btree nodes. By default each -// BTree has its own FreeList, but multiple BTrees can share the same -// FreeList. -// Two Btrees using the same freelist are safe for concurrent write access. -type FreeList struct { - mu sync.Mutex - freelist []*node -} - -// NewFreeList creates a new free list. -// size is the maximum size of the returned free list. -func NewFreeList(size int) *FreeList { - return &FreeList{freelist: make([]*node, 0, size)} -} - -func (f *FreeList) newNode() (n *node) { - f.mu.Lock() - index := len(f.freelist) - 1 - if index < 0 { - f.mu.Unlock() - return new(node) - } - n = f.freelist[index] - f.freelist[index] = nil - f.freelist = f.freelist[:index] - f.mu.Unlock() - return -} - -// freeNode adds the given node to the list, returning true if it was added -// and false if it was discarded. -func (f *FreeList) freeNode(n *node) (out bool) { - f.mu.Lock() - if len(f.freelist) < cap(f.freelist) { - f.freelist = append(f.freelist, n) - out = true - } - f.mu.Unlock() - return -} - -// ItemIterator allows callers of Ascend* to iterate in-order over portions of -// the tree. When this function returns false, iteration will stop and the -// associated Ascend* function will immediately return. -type ItemIterator func(i Item) bool - -// New creates a new B-Tree with the given degree. -// -// New(2), for example, will create a 2-3-4 tree (each node contains 1-3 items -// and 2-4 children). -func New(degree int) *BTree { - return NewWithFreeList(degree, NewFreeList(DefaultFreeListSize)) -} - -// NewWithFreeList creates a new B-Tree that uses the given node free list. -func NewWithFreeList(degree int, f *FreeList) *BTree { - if degree <= 1 { - panic("bad degree") - } - return &BTree{ - degree: degree, - cow: ©OnWriteContext{freelist: f}, - } -} - -// items stores items in a node. -type items []Item - -// insertAt inserts a value into the given index, pushing all subsequent values -// forward. -func (s *items) insertAt(index int, item Item) { - *s = append(*s, nil) - if index < len(*s) { - copy((*s)[index+1:], (*s)[index:]) - } - (*s)[index] = item -} - -// removeAt removes a value at a given index, pulling all subsequent values -// back. -func (s *items) removeAt(index int) Item { - item := (*s)[index] - copy((*s)[index:], (*s)[index+1:]) - (*s)[len(*s)-1] = nil - *s = (*s)[:len(*s)-1] - return item -} - -// pop removes and returns the last element in the list. -func (s *items) pop() (out Item) { - index := len(*s) - 1 - out = (*s)[index] - (*s)[index] = nil - *s = (*s)[:index] - return -} - -// truncate truncates this instance at index so that it contains only the -// first index items. index must be less than or equal to length. -func (s *items) truncate(index int) { - var toClear items - *s, toClear = (*s)[:index], (*s)[index:] - for len(toClear) > 0 { - toClear = toClear[copy(toClear, nilItems):] - } -} - -// find returns the index where the given item should be inserted into this -// list. 'found' is true if the item already exists in the list at the given -// index. -func (s items) find(item Item) (index int, found bool) { - i := sort.Search(len(s), func(i int) bool { - return item.Less(s[i]) - }) - if i > 0 && !s[i-1].Less(item) { - return i - 1, true - } - return i, false -} - -// children stores child nodes in a node. -type children []*node - -// insertAt inserts a value into the given index, pushing all subsequent values -// forward. -func (s *children) insertAt(index int, n *node) { - *s = append(*s, nil) - if index < len(*s) { - copy((*s)[index+1:], (*s)[index:]) - } - (*s)[index] = n -} - -// removeAt removes a value at a given index, pulling all subsequent values -// back. -func (s *children) removeAt(index int) *node { - n := (*s)[index] - copy((*s)[index:], (*s)[index+1:]) - (*s)[len(*s)-1] = nil - *s = (*s)[:len(*s)-1] - return n -} - -// pop removes and returns the last element in the list. -func (s *children) pop() (out *node) { - index := len(*s) - 1 - out = (*s)[index] - (*s)[index] = nil - *s = (*s)[:index] - return -} - -// truncate truncates this instance at index so that it contains only the -// first index children. index must be less than or equal to length. -func (s *children) truncate(index int) { - var toClear children - *s, toClear = (*s)[:index], (*s)[index:] - for len(toClear) > 0 { - toClear = toClear[copy(toClear, nilChildren):] - } -} - -// node is an internal node in a tree. -// -// It must at all times maintain the invariant that either -// * len(children) == 0, len(items) unconstrained -// * len(children) == len(items) + 1 -type node struct { - items items - children children - cow *copyOnWriteContext -} - -func (n *node) mutableFor(cow *copyOnWriteContext) *node { - if n.cow == cow { - return n - } - out := cow.newNode() - if cap(out.items) >= len(n.items) { - out.items = out.items[:len(n.items)] - } else { - out.items = make(items, len(n.items), cap(n.items)) - } - copy(out.items, n.items) - // Copy children - if cap(out.children) >= len(n.children) { - out.children = out.children[:len(n.children)] - } else { - out.children = make(children, len(n.children), cap(n.children)) - } - copy(out.children, n.children) - return out -} - -func (n *node) mutableChild(i int) *node { - c := n.children[i].mutableFor(n.cow) - n.children[i] = c - return c -} - -// split splits the given node at the given index. The current node shrinks, -// and this function returns the item that existed at that index and a new node -// containing all items/children after it. -func (n *node) split(i int) (Item, *node) { - item := n.items[i] - next := n.cow.newNode() - next.items = append(next.items, n.items[i+1:]...) - n.items.truncate(i) - if len(n.children) > 0 { - next.children = append(next.children, n.children[i+1:]...) - n.children.truncate(i + 1) - } - return item, next -} - -// maybeSplitChild checks if a child should be split, and if so splits it. -// Returns whether or not a split occurred. -func (n *node) maybeSplitChild(i, maxItems int) bool { - if len(n.children[i].items) < maxItems { - return false - } - first := n.mutableChild(i) - item, second := first.split(maxItems / 2) - n.items.insertAt(i, item) - n.children.insertAt(i+1, second) - return true -} - -// insert inserts an item into the subtree rooted at this node, making sure -// no nodes in the subtree exceed maxItems items. Should an equivalent item be -// be found/replaced by insert, it will be returned. -func (n *node) insert(item Item, maxItems int) Item { - i, found := n.items.find(item) - if found { - out := n.items[i] - n.items[i] = item - return out - } - if len(n.children) == 0 { - n.items.insertAt(i, item) - return nil - } - if n.maybeSplitChild(i, maxItems) { - inTree := n.items[i] - switch { - case item.Less(inTree): - // no change, we want first split node - case inTree.Less(item): - i++ // we want second split node - default: - out := n.items[i] - n.items[i] = item - return out - } - } - return n.mutableChild(i).insert(item, maxItems) -} - -// get finds the given key in the subtree and returns it. -func (n *node) get(key Item) Item { - i, found := n.items.find(key) - if found { - return n.items[i] - } else if len(n.children) > 0 { - return n.children[i].get(key) - } - return nil -} - -// min returns the first item in the subtree. -func min(n *node) Item { - if n == nil { - return nil - } - for len(n.children) > 0 { - n = n.children[0] - } - if len(n.items) == 0 { - return nil - } - return n.items[0] -} - -// max returns the last item in the subtree. -func max(n *node) Item { - if n == nil { - return nil - } - for len(n.children) > 0 { - n = n.children[len(n.children)-1] - } - if len(n.items) == 0 { - return nil - } - return n.items[len(n.items)-1] -} - -// toRemove details what item to remove in a node.remove call. -type toRemove int - -const ( - removeItem toRemove = iota // removes the given item - removeMin // removes smallest item in the subtree - removeMax // removes largest item in the subtree -) - -// remove removes an item from the subtree rooted at this node. -func (n *node) remove(item Item, minItems int, typ toRemove) Item { - var i int - var found bool - switch typ { - case removeMax: - if len(n.children) == 0 { - return n.items.pop() - } - i = len(n.items) - case removeMin: - if len(n.children) == 0 { - return n.items.removeAt(0) - } - i = 0 - case removeItem: - i, found = n.items.find(item) - if len(n.children) == 0 { - if found { - return n.items.removeAt(i) - } - return nil - } - default: - panic("invalid type") - } - // If we get to here, we have children. - if len(n.children[i].items) <= minItems { - return n.growChildAndRemove(i, item, minItems, typ) - } - child := n.mutableChild(i) - // Either we had enough items to begin with, or we've done some - // merging/stealing, because we've got enough now and we're ready to return - // stuff. - if found { - // The item exists at index 'i', and the child we've selected can give us a - // predecessor, since if we've gotten here it's got > minItems items in it. - out := n.items[i] - // We use our special-case 'remove' call with typ=maxItem to pull the - // predecessor of item i (the rightmost leaf of our immediate left child) - // and set it into where we pulled the item from. - n.items[i] = child.remove(nil, minItems, removeMax) - return out - } - // Final recursive call. Once we're here, we know that the item isn't in this - // node and that the child is big enough to remove from. - return child.remove(item, minItems, typ) -} - -// growChildAndRemove grows child 'i' to make sure it's possible to remove an -// item from it while keeping it at minItems, then calls remove to actually -// remove it. -// -// Most documentation says we have to do two sets of special casing: -// 1) item is in this node -// 2) item is in child -// In both cases, we need to handle the two subcases: -// A) node has enough values that it can spare one -// B) node doesn't have enough values -// For the latter, we have to check: -// a) left sibling has node to spare -// b) right sibling has node to spare -// c) we must merge -// To simplify our code here, we handle cases #1 and #2 the same: -// If a node doesn't have enough items, we make sure it does (using a,b,c). -// We then simply redo our remove call, and the second time (regardless of -// whether we're in case 1 or 2), we'll have enough items and can guarantee -// that we hit case A. -func (n *node) growChildAndRemove(i int, item Item, minItems int, typ toRemove) Item { - if i > 0 && len(n.children[i-1].items) > minItems { - // Steal from left child - child := n.mutableChild(i) - stealFrom := n.mutableChild(i - 1) - stolenItem := stealFrom.items.pop() - child.items.insertAt(0, n.items[i-1]) - n.items[i-1] = stolenItem - if len(stealFrom.children) > 0 { - child.children.insertAt(0, stealFrom.children.pop()) - } - } else if i < len(n.items) && len(n.children[i+1].items) > minItems { - // steal from right child - child := n.mutableChild(i) - stealFrom := n.mutableChild(i + 1) - stolenItem := stealFrom.items.removeAt(0) - child.items = append(child.items, n.items[i]) - n.items[i] = stolenItem - if len(stealFrom.children) > 0 { - child.children = append(child.children, stealFrom.children.removeAt(0)) - } - } else { - if i >= len(n.items) { - i-- - } - child := n.mutableChild(i) - // merge with right child - mergeItem := n.items.removeAt(i) - mergeChild := n.children.removeAt(i + 1) - child.items = append(child.items, mergeItem) - child.items = append(child.items, mergeChild.items...) - child.children = append(child.children, mergeChild.children...) - n.cow.freeNode(mergeChild) - } - return n.remove(item, minItems, typ) -} - -type direction int - -const ( - descend = direction(-1) - ascend = direction(+1) -) - -// iterate provides a simple method for iterating over elements in the tree. -// -// When ascending, the 'start' should be less than 'stop' and when descending, -// the 'start' should be greater than 'stop'. Setting 'includeStart' to true -// will force the iterator to include the first item when it equals 'start', -// thus creating a "greaterOrEqual" or "lessThanEqual" rather than just a -// "greaterThan" or "lessThan" queries. -func (n *node) iterate(dir direction, start, stop Item, includeStart bool, hit bool, iter ItemIterator) (bool, bool) { - var ok, found bool - var index int - switch dir { - case ascend: - if start != nil { - index, _ = n.items.find(start) - } - for i := index; i < len(n.items); i++ { - if len(n.children) > 0 { - if hit, ok = n.children[i].iterate(dir, start, stop, includeStart, hit, iter); !ok { - return hit, false - } - } - if !includeStart && !hit && start != nil && !start.Less(n.items[i]) { - hit = true - continue - } - hit = true - if stop != nil && !n.items[i].Less(stop) { - return hit, false - } - if !iter(n.items[i]) { - return hit, false - } - } - if len(n.children) > 0 { - if hit, ok = n.children[len(n.children)-1].iterate(dir, start, stop, includeStart, hit, iter); !ok { - return hit, false - } - } - case descend: - if start != nil { - index, found = n.items.find(start) - if !found { - index = index - 1 - } - } else { - index = len(n.items) - 1 - } - for i := index; i >= 0; i-- { - if start != nil && !n.items[i].Less(start) { - if !includeStart || hit || start.Less(n.items[i]) { - continue - } - } - if len(n.children) > 0 { - if hit, ok = n.children[i+1].iterate(dir, start, stop, includeStart, hit, iter); !ok { - return hit, false - } - } - if stop != nil && !stop.Less(n.items[i]) { - return hit, false // continue - } - hit = true - if !iter(n.items[i]) { - return hit, false - } - } - if len(n.children) > 0 { - if hit, ok = n.children[0].iterate(dir, start, stop, includeStart, hit, iter); !ok { - return hit, false - } - } - } - return hit, true -} - -// Used for testing/debugging purposes. -func (n *node) print(w io.Writer, level int) { - fmt.Fprintf(w, "%sNODE:%v\n", strings.Repeat(" ", level), n.items) - for _, c := range n.children { - c.print(w, level+1) - } -} - -// BTree is an implementation of a B-Tree. -// -// BTree stores Item instances in an ordered structure, allowing easy insertion, -// removal, and iteration. -// -// Write operations are not safe for concurrent mutation by multiple -// goroutines, but Read operations are. -type BTree struct { - degree int - length int - root *node - cow *copyOnWriteContext -} - -// copyOnWriteContext pointers determine node ownership... a tree with a write -// context equivalent to a node's write context is allowed to modify that node. -// A tree whose write context does not match a node's is not allowed to modify -// it, and must create a new, writable copy (IE: it's a Clone). -// -// When doing any write operation, we maintain the invariant that the current -// node's context is equal to the context of the tree that requested the write. -// We do this by, before we descend into any node, creating a copy with the -// correct context if the contexts don't match. -// -// Since the node we're currently visiting on any write has the requesting -// tree's context, that node is modifiable in place. Children of that node may -// not share context, but before we descend into them, we'll make a mutable -// copy. -type copyOnWriteContext struct { - freelist *FreeList -} - -// Clone clones the btree, lazily. Clone should not be called concurrently, -// but the original tree (t) and the new tree (t2) can be used concurrently -// once the Clone call completes. -// -// The internal tree structure of b is marked read-only and shared between t and -// t2. Writes to both t and t2 use copy-on-write logic, creating new nodes -// whenever one of b's original nodes would have been modified. Read operations -// should have no performance degredation. Write operations for both t and t2 -// will initially experience minor slow-downs caused by additional allocs and -// copies due to the aforementioned copy-on-write logic, but should converge to -// the original performance characteristics of the original tree. -func (t *BTree) Clone() (t2 *BTree) { - // Create two entirely new copy-on-write contexts. - // This operation effectively creates three trees: - // the original, shared nodes (old b.cow) - // the new b.cow nodes - // the new out.cow nodes - cow1, cow2 := *t.cow, *t.cow - out := *t - t.cow = &cow1 - out.cow = &cow2 - return &out -} - -// maxItems returns the max number of items to allow per node. -func (t *BTree) maxItems() int { - return t.degree*2 - 1 -} - -// minItems returns the min number of items to allow per node (ignored for the -// root node). -func (t *BTree) minItems() int { - return t.degree - 1 -} - -func (c *copyOnWriteContext) newNode() (n *node) { - n = c.freelist.newNode() - n.cow = c - return -} - -type freeType int - -const ( - ftFreelistFull freeType = iota // node was freed (available for GC, not stored in freelist) - ftStored // node was stored in the freelist for later use - ftNotOwned // node was ignored by COW, since it's owned by another one -) - -// freeNode frees a node within a given COW context, if it's owned by that -// context. It returns what happened to the node (see freeType const -// documentation). -func (c *copyOnWriteContext) freeNode(n *node) freeType { - if n.cow == c { - // clear to allow GC - n.items.truncate(0) - n.children.truncate(0) - n.cow = nil - if c.freelist.freeNode(n) { - return ftStored - } else { - return ftFreelistFull - } - } else { - return ftNotOwned - } -} - -// ReplaceOrInsert adds the given item to the tree. If an item in the tree -// already equals the given one, it is removed from the tree and returned. -// Otherwise, nil is returned. -// -// nil cannot be added to the tree (will panic). -func (t *BTree) ReplaceOrInsert(item Item) Item { - if item == nil { - panic("nil item being added to BTree") - } - if t.root == nil { - t.root = t.cow.newNode() - t.root.items = append(t.root.items, item) - t.length++ - return nil - } else { - t.root = t.root.mutableFor(t.cow) - if len(t.root.items) >= t.maxItems() { - item2, second := t.root.split(t.maxItems() / 2) - oldroot := t.root - t.root = t.cow.newNode() - t.root.items = append(t.root.items, item2) - t.root.children = append(t.root.children, oldroot, second) - } - } - out := t.root.insert(item, t.maxItems()) - if out == nil { - t.length++ - } - return out -} - -// Delete removes an item equal to the passed in item from the tree, returning -// it. If no such item exists, returns nil. -func (t *BTree) Delete(item Item) Item { - return t.deleteItem(item, removeItem) -} - -// DeleteMin removes the smallest item in the tree and returns it. -// If no such item exists, returns nil. -func (t *BTree) DeleteMin() Item { - return t.deleteItem(nil, removeMin) -} - -// DeleteMax removes the largest item in the tree and returns it. -// If no such item exists, returns nil. -func (t *BTree) DeleteMax() Item { - return t.deleteItem(nil, removeMax) -} - -func (t *BTree) deleteItem(item Item, typ toRemove) Item { - if t.root == nil || len(t.root.items) == 0 { - return nil - } - t.root = t.root.mutableFor(t.cow) - out := t.root.remove(item, t.minItems(), typ) - if len(t.root.items) == 0 && len(t.root.children) > 0 { - oldroot := t.root - t.root = t.root.children[0] - t.cow.freeNode(oldroot) - } - if out != nil { - t.length-- - } - return out -} - -// AscendRange calls the iterator for every value in the tree within the range -// [greaterOrEqual, lessThan), until iterator returns false. -func (t *BTree) AscendRange(greaterOrEqual, lessThan Item, iterator ItemIterator) { - if t.root == nil { - return - } - t.root.iterate(ascend, greaterOrEqual, lessThan, true, false, iterator) -} - -// AscendLessThan calls the iterator for every value in the tree within the range -// [first, pivot), until iterator returns false. -func (t *BTree) AscendLessThan(pivot Item, iterator ItemIterator) { - if t.root == nil { - return - } - t.root.iterate(ascend, nil, pivot, false, false, iterator) -} - -// AscendGreaterOrEqual calls the iterator for every value in the tree within -// the range [pivot, last], until iterator returns false. -func (t *BTree) AscendGreaterOrEqual(pivot Item, iterator ItemIterator) { - if t.root == nil { - return - } - t.root.iterate(ascend, pivot, nil, true, false, iterator) -} - -// Ascend calls the iterator for every value in the tree within the range -// [first, last], until iterator returns false. -func (t *BTree) Ascend(iterator ItemIterator) { - if t.root == nil { - return - } - t.root.iterate(ascend, nil, nil, false, false, iterator) -} - -// DescendRange calls the iterator for every value in the tree within the range -// [lessOrEqual, greaterThan), until iterator returns false. -func (t *BTree) DescendRange(lessOrEqual, greaterThan Item, iterator ItemIterator) { - if t.root == nil { - return - } - t.root.iterate(descend, lessOrEqual, greaterThan, true, false, iterator) -} - -// DescendLessOrEqual calls the iterator for every value in the tree within the range -// [pivot, first], until iterator returns false. -func (t *BTree) DescendLessOrEqual(pivot Item, iterator ItemIterator) { - if t.root == nil { - return - } - t.root.iterate(descend, pivot, nil, true, false, iterator) -} - -// DescendGreaterThan calls the iterator for every value in the tree within -// the range [last, pivot), until iterator returns false. -func (t *BTree) DescendGreaterThan(pivot Item, iterator ItemIterator) { - if t.root == nil { - return - } - t.root.iterate(descend, nil, pivot, false, false, iterator) -} - -// Descend calls the iterator for every value in the tree within the range -// [last, first], until iterator returns false. -func (t *BTree) Descend(iterator ItemIterator) { - if t.root == nil { - return - } - t.root.iterate(descend, nil, nil, false, false, iterator) -} - -// Get looks for the key item in the tree, returning it. It returns nil if -// unable to find that item. -func (t *BTree) Get(key Item) Item { - if t.root == nil { - return nil - } - return t.root.get(key) -} - -// Min returns the smallest item in the tree, or nil if the tree is empty. -func (t *BTree) Min() Item { - return min(t.root) -} - -// Max returns the largest item in the tree, or nil if the tree is empty. -func (t *BTree) Max() Item { - return max(t.root) -} - -// Has returns true if the given key is in the tree. -func (t *BTree) Has(key Item) bool { - return t.Get(key) != nil -} - -// Len returns the number of items currently in the tree. -func (t *BTree) Len() int { - return t.length -} - -// Clear removes all items from the btree. If addNodesToFreelist is true, -// t's nodes are added to its freelist as part of this call, until the freelist -// is full. Otherwise, the root node is simply dereferenced and the subtree -// left to Go's normal GC processes. -// -// This can be much faster -// than calling Delete on all elements, because that requires finding/removing -// each element in the tree and updating the tree accordingly. It also is -// somewhat faster than creating a new tree to replace the old one, because -// nodes from the old tree are reclaimed into the freelist for use by the new -// one, instead of being lost to the garbage collector. -// -// This call takes: -// O(1): when addNodesToFreelist is false, this is a single operation. -// O(1): when the freelist is already full, it breaks out immediately -// O(freelist size): when the freelist is empty and the nodes are all owned -// by this tree, nodes are added to the freelist until full. -// O(tree size): when all nodes are owned by another tree, all nodes are -// iterated over looking for nodes to add to the freelist, and due to -// ownership, none are. -func (t *BTree) Clear(addNodesToFreelist bool) { - if t.root != nil && addNodesToFreelist { - t.root.reset(t.cow) - } - t.root, t.length = nil, 0 -} - -// reset returns a subtree to the freelist. It breaks out immediately if the -// freelist is full, since the only benefit of iterating is to fill that -// freelist up. Returns true if parent reset call should continue. -func (n *node) reset(c *copyOnWriteContext) bool { - for _, child := range n.children { - if !child.reset(c) { - return false - } - } - return c.freeNode(n) != ftFreelistFull -} - -// Int implements the Item interface for integers. -type Int int - -// Less returns true if int(a) < int(b). -func (a Int) Less(b Item) bool { - return a < b.(Int) -} diff --git a/src/vendor/github.com/google/gnostic/LICENSE b/src/vendor/github.com/google/gnostic/LICENSE deleted file mode 100644 index 6b0b1270f..000000000 --- a/src/vendor/github.com/google/gnostic/LICENSE +++ /dev/null @@ -1,203 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - diff --git a/src/vendor/github.com/google/gnostic/compiler/README.md b/src/vendor/github.com/google/gnostic/compiler/README.md deleted file mode 100644 index ee9783d23..000000000 --- a/src/vendor/github.com/google/gnostic/compiler/README.md +++ /dev/null @@ -1,4 +0,0 @@ -# Compiler support code - -This directory contains compiler support code used by Gnostic and Gnostic -extensions. diff --git a/src/vendor/github.com/google/gnostic/compiler/context.go b/src/vendor/github.com/google/gnostic/compiler/context.go deleted file mode 100644 index 1bfe96121..000000000 --- a/src/vendor/github.com/google/gnostic/compiler/context.go +++ /dev/null @@ -1,49 +0,0 @@ -// Copyright 2017 Google LLC. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package compiler - -import ( - yaml "gopkg.in/yaml.v3" -) - -// Context contains state of the compiler as it traverses a document. -type Context struct { - Parent *Context - Name string - Node *yaml.Node - ExtensionHandlers *[]ExtensionHandler -} - -// NewContextWithExtensions returns a new object representing the compiler state -func NewContextWithExtensions(name string, node *yaml.Node, parent *Context, extensionHandlers *[]ExtensionHandler) *Context { - return &Context{Name: name, Node: node, Parent: parent, ExtensionHandlers: extensionHandlers} -} - -// NewContext returns a new object representing the compiler state -func NewContext(name string, node *yaml.Node, parent *Context) *Context { - if parent != nil { - return &Context{Name: name, Node: node, Parent: parent, ExtensionHandlers: parent.ExtensionHandlers} - } - return &Context{Name: name, Parent: parent, ExtensionHandlers: nil} -} - -// Description returns a text description of the compiler state -func (context *Context) Description() string { - name := context.Name - if context.Parent != nil { - name = context.Parent.Description() + "." + name - } - return name -} diff --git a/src/vendor/github.com/google/gnostic/compiler/error.go b/src/vendor/github.com/google/gnostic/compiler/error.go deleted file mode 100644 index 6f40515d6..000000000 --- a/src/vendor/github.com/google/gnostic/compiler/error.go +++ /dev/null @@ -1,70 +0,0 @@ -// Copyright 2017 Google LLC. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package compiler - -import "fmt" - -// Error represents compiler errors and their location in the document. -type Error struct { - Context *Context - Message string -} - -// NewError creates an Error. -func NewError(context *Context, message string) *Error { - return &Error{Context: context, Message: message} -} - -func (err *Error) locationDescription() string { - if err.Context.Node != nil { - return fmt.Sprintf("[%d,%d] %s", err.Context.Node.Line, err.Context.Node.Column, err.Context.Description()) - } - return err.Context.Description() -} - -// Error returns the string value of an Error. -func (err *Error) Error() string { - if err.Context == nil { - return err.Message - } - return err.locationDescription() + " " + err.Message -} - -// ErrorGroup is a container for groups of Error values. -type ErrorGroup struct { - Errors []error -} - -// NewErrorGroupOrNil returns a new ErrorGroup for a slice of errors or nil if the slice is empty. -func NewErrorGroupOrNil(errors []error) error { - if len(errors) == 0 { - return nil - } else if len(errors) == 1 { - return errors[0] - } else { - return &ErrorGroup{Errors: errors} - } -} - -func (group *ErrorGroup) Error() string { - result := "" - for i, err := range group.Errors { - if i > 0 { - result += "\n" - } - result += err.Error() - } - return result -} diff --git a/src/vendor/github.com/google/gnostic/compiler/extensions.go b/src/vendor/github.com/google/gnostic/compiler/extensions.go deleted file mode 100644 index 5b5a916d2..000000000 --- a/src/vendor/github.com/google/gnostic/compiler/extensions.go +++ /dev/null @@ -1,86 +0,0 @@ -// Copyright 2017 Google LLC. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package compiler - -import ( - "bytes" - "fmt" - "os/exec" - "strings" - - "github.com/golang/protobuf/proto" - "github.com/golang/protobuf/ptypes/any" - yaml "gopkg.in/yaml.v3" - - extensions "github.com/google/gnostic/extensions" -) - -// ExtensionHandler describes a binary that is called by the compiler to handle specification extensions. -type ExtensionHandler struct { - Name string -} - -// CallExtension calls a binary extension handler. -func CallExtension(context *Context, in *yaml.Node, extensionName string) (handled bool, response *any.Any, err error) { - if context == nil || context.ExtensionHandlers == nil { - return false, nil, nil - } - handled = false - for _, handler := range *(context.ExtensionHandlers) { - response, err = handler.handle(in, extensionName) - if response == nil { - continue - } else { - handled = true - break - } - } - return handled, response, err -} - -func (extensionHandlers *ExtensionHandler) handle(in *yaml.Node, extensionName string) (*any.Any, error) { - if extensionHandlers.Name != "" { - yamlData, _ := yaml.Marshal(in) - request := &extensions.ExtensionHandlerRequest{ - CompilerVersion: &extensions.Version{ - Major: 0, - Minor: 1, - Patch: 0, - }, - Wrapper: &extensions.Wrapper{ - Version: "unknown", // TODO: set this to the type/version of spec being parsed. - Yaml: string(yamlData), - ExtensionName: extensionName, - }, - } - requestBytes, _ := proto.Marshal(request) - cmd := exec.Command(extensionHandlers.Name) - cmd.Stdin = bytes.NewReader(requestBytes) - output, err := cmd.Output() - if err != nil { - return nil, err - } - response := &extensions.ExtensionHandlerResponse{} - err = proto.Unmarshal(output, response) - if err != nil || !response.Handled { - return nil, err - } - if len(response.Errors) != 0 { - return nil, fmt.Errorf("Errors when parsing: %+v for field %s by vendor extension handler %s. Details %+v", in, extensionName, extensionHandlers.Name, strings.Join(response.Errors, ",")) - } - return response.Value, nil - } - return nil, nil -} diff --git a/src/vendor/github.com/google/gnostic/compiler/helpers.go b/src/vendor/github.com/google/gnostic/compiler/helpers.go deleted file mode 100644 index 97ffaa513..000000000 --- a/src/vendor/github.com/google/gnostic/compiler/helpers.go +++ /dev/null @@ -1,397 +0,0 @@ -// Copyright 2017 Google LLC. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package compiler - -import ( - "fmt" - "regexp" - "sort" - "strconv" - - "gopkg.in/yaml.v3" - - "github.com/google/gnostic/jsonschema" -) - -// compiler helper functions, usually called from generated code - -// UnpackMap gets a *yaml.Node if possible. -func UnpackMap(in *yaml.Node) (*yaml.Node, bool) { - if in == nil { - return nil, false - } - return in, true -} - -// SortedKeysForMap returns the sorted keys of a yamlv2.MapSlice. -func SortedKeysForMap(m *yaml.Node) []string { - keys := make([]string, 0) - if m.Kind == yaml.MappingNode { - for i := 0; i < len(m.Content); i += 2 { - keys = append(keys, m.Content[i].Value) - } - } - sort.Strings(keys) - return keys -} - -// MapHasKey returns true if a yamlv2.MapSlice contains a specified key. -func MapHasKey(m *yaml.Node, key string) bool { - if m == nil { - return false - } - if m.Kind == yaml.MappingNode { - for i := 0; i < len(m.Content); i += 2 { - itemKey := m.Content[i].Value - if key == itemKey { - return true - } - } - } - return false -} - -// MapValueForKey gets the value of a map value for a specified key. -func MapValueForKey(m *yaml.Node, key string) *yaml.Node { - if m == nil { - return nil - } - if m.Kind == yaml.MappingNode { - for i := 0; i < len(m.Content); i += 2 { - itemKey := m.Content[i].Value - if key == itemKey { - return m.Content[i+1] - } - } - } - return nil -} - -// ConvertInterfaceArrayToStringArray converts an array of interfaces to an array of strings, if possible. -func ConvertInterfaceArrayToStringArray(interfaceArray []interface{}) []string { - stringArray := make([]string, 0) - for _, item := range interfaceArray { - v, ok := item.(string) - if ok { - stringArray = append(stringArray, v) - } - } - return stringArray -} - -// SequenceNodeForNode returns a node if it is a SequenceNode. -func SequenceNodeForNode(node *yaml.Node) (*yaml.Node, bool) { - if node.Kind != yaml.SequenceNode { - return nil, false - } - return node, true -} - -// BoolForScalarNode returns the bool value of a node. -func BoolForScalarNode(node *yaml.Node) (bool, bool) { - if node == nil { - return false, false - } - if node.Kind == yaml.DocumentNode { - return BoolForScalarNode(node.Content[0]) - } - if node.Kind != yaml.ScalarNode { - return false, false - } - if node.Tag != "!!bool" { - return false, false - } - v, err := strconv.ParseBool(node.Value) - if err != nil { - return false, false - } - return v, true -} - -// IntForScalarNode returns the integer value of a node. -func IntForScalarNode(node *yaml.Node) (int64, bool) { - if node == nil { - return 0, false - } - if node.Kind == yaml.DocumentNode { - return IntForScalarNode(node.Content[0]) - } - if node.Kind != yaml.ScalarNode { - return 0, false - } - if node.Tag != "!!int" { - return 0, false - } - v, err := strconv.ParseInt(node.Value, 10, 64) - if err != nil { - return 0, false - } - return v, true -} - -// FloatForScalarNode returns the float value of a node. -func FloatForScalarNode(node *yaml.Node) (float64, bool) { - if node == nil { - return 0.0, false - } - if node.Kind == yaml.DocumentNode { - return FloatForScalarNode(node.Content[0]) - } - if node.Kind != yaml.ScalarNode { - return 0.0, false - } - if (node.Tag != "!!int") && (node.Tag != "!!float") { - return 0.0, false - } - v, err := strconv.ParseFloat(node.Value, 64) - if err != nil { - return 0.0, false - } - return v, true -} - -// StringForScalarNode returns the string value of a node. -func StringForScalarNode(node *yaml.Node) (string, bool) { - if node == nil { - return "", false - } - if node.Kind == yaml.DocumentNode { - return StringForScalarNode(node.Content[0]) - } - switch node.Kind { - case yaml.ScalarNode: - switch node.Tag { - case "!!int": - return node.Value, true - case "!!str": - return node.Value, true - case "!!timestamp": - return node.Value, true - case "!!null": - return "", true - default: - return "", false - } - default: - return "", false - } -} - -// StringArrayForSequenceNode converts a sequence node to an array of strings, if possible. -func StringArrayForSequenceNode(node *yaml.Node) []string { - stringArray := make([]string, 0) - for _, item := range node.Content { - v, ok := StringForScalarNode(item) - if ok { - stringArray = append(stringArray, v) - } - } - return stringArray -} - -// MissingKeysInMap identifies which keys from a list of required keys are not in a map. -func MissingKeysInMap(m *yaml.Node, requiredKeys []string) []string { - missingKeys := make([]string, 0) - for _, k := range requiredKeys { - if !MapHasKey(m, k) { - missingKeys = append(missingKeys, k) - } - } - return missingKeys -} - -// InvalidKeysInMap returns keys in a map that don't match a list of allowed keys and patterns. -func InvalidKeysInMap(m *yaml.Node, allowedKeys []string, allowedPatterns []*regexp.Regexp) []string { - invalidKeys := make([]string, 0) - if m == nil || m.Kind != yaml.MappingNode { - return invalidKeys - } - for i := 0; i < len(m.Content); i += 2 { - key := m.Content[i].Value - found := false - // does the key match an allowed key? - for _, allowedKey := range allowedKeys { - if key == allowedKey { - found = true - break - } - } - if !found { - // does the key match an allowed pattern? - for _, allowedPattern := range allowedPatterns { - if allowedPattern.MatchString(key) { - found = true - break - } - } - if !found { - invalidKeys = append(invalidKeys, key) - } - } - } - return invalidKeys -} - -// NewNullNode creates a new Null node. -func NewNullNode() *yaml.Node { - node := &yaml.Node{ - Kind: yaml.ScalarNode, - Tag: "!!null", - } - return node -} - -// NewMappingNode creates a new Mapping node. -func NewMappingNode() *yaml.Node { - return &yaml.Node{ - Kind: yaml.MappingNode, - Content: make([]*yaml.Node, 0), - } -} - -// NewSequenceNode creates a new Sequence node. -func NewSequenceNode() *yaml.Node { - node := &yaml.Node{ - Kind: yaml.SequenceNode, - Content: make([]*yaml.Node, 0), - } - return node -} - -// NewScalarNodeForString creates a new node to hold a string. -func NewScalarNodeForString(s string) *yaml.Node { - return &yaml.Node{ - Kind: yaml.ScalarNode, - Tag: "!!str", - Value: s, - } -} - -// NewSequenceNodeForStringArray creates a new node to hold an array of strings. -func NewSequenceNodeForStringArray(strings []string) *yaml.Node { - node := &yaml.Node{ - Kind: yaml.SequenceNode, - Content: make([]*yaml.Node, 0), - } - for _, s := range strings { - node.Content = append(node.Content, NewScalarNodeForString(s)) - } - return node -} - -// NewScalarNodeForBool creates a new node to hold a bool. -func NewScalarNodeForBool(b bool) *yaml.Node { - return &yaml.Node{ - Kind: yaml.ScalarNode, - Tag: "!!bool", - Value: fmt.Sprintf("%t", b), - } -} - -// NewScalarNodeForFloat creates a new node to hold a float. -func NewScalarNodeForFloat(f float64) *yaml.Node { - return &yaml.Node{ - Kind: yaml.ScalarNode, - Tag: "!!float", - Value: fmt.Sprintf("%g", f), - } -} - -// NewScalarNodeForInt creates a new node to hold an integer. -func NewScalarNodeForInt(i int64) *yaml.Node { - return &yaml.Node{ - Kind: yaml.ScalarNode, - Tag: "!!int", - Value: fmt.Sprintf("%d", i), - } -} - -// PluralProperties returns the string "properties" pluralized. -func PluralProperties(count int) string { - if count == 1 { - return "property" - } - return "properties" -} - -// StringArrayContainsValue returns true if a string array contains a specified value. -func StringArrayContainsValue(array []string, value string) bool { - for _, item := range array { - if item == value { - return true - } - } - return false -} - -// StringArrayContainsValues returns true if a string array contains all of a list of specified values. -func StringArrayContainsValues(array []string, values []string) bool { - for _, value := range values { - if !StringArrayContainsValue(array, value) { - return false - } - } - return true -} - -// StringValue returns the string value of an item. -func StringValue(item interface{}) (value string, ok bool) { - value, ok = item.(string) - if ok { - return value, ok - } - intValue, ok := item.(int) - if ok { - return strconv.Itoa(intValue), true - } - return "", false -} - -// Description returns a human-readable represention of an item. -func Description(item interface{}) string { - value, ok := item.(*yaml.Node) - if ok { - return jsonschema.Render(value) - } - return fmt.Sprintf("%+v", item) -} - -// Display returns a description of a node for use in error messages. -func Display(node *yaml.Node) string { - switch node.Kind { - case yaml.ScalarNode: - switch node.Tag { - case "!!str": - return fmt.Sprintf("%s (string)", node.Value) - } - } - return fmt.Sprintf("%+v (%T)", node, node) -} - -// Marshal creates a yaml version of a structure in our preferred style -func Marshal(in *yaml.Node) []byte { - clearStyle(in) - //bytes, _ := yaml.Marshal(&yaml.Node{Kind: yaml.DocumentNode, Content: []*yaml.Node{in}}) - bytes, _ := yaml.Marshal(in) - - return bytes -} - -func clearStyle(node *yaml.Node) { - node.Style = 0 - for _, c := range node.Content { - clearStyle(c) - } -} diff --git a/src/vendor/github.com/google/gnostic/compiler/main.go b/src/vendor/github.com/google/gnostic/compiler/main.go deleted file mode 100644 index ce9fcc456..000000000 --- a/src/vendor/github.com/google/gnostic/compiler/main.go +++ /dev/null @@ -1,16 +0,0 @@ -// Copyright 2017 Google LLC. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package compiler provides support functions to generated compiler code. -package compiler diff --git a/src/vendor/github.com/google/gnostic/compiler/reader.go b/src/vendor/github.com/google/gnostic/compiler/reader.go deleted file mode 100644 index be0e8b40c..000000000 --- a/src/vendor/github.com/google/gnostic/compiler/reader.go +++ /dev/null @@ -1,307 +0,0 @@ -// Copyright 2017 Google LLC. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package compiler - -import ( - "fmt" - "io/ioutil" - "log" - "net/http" - "net/url" - "path/filepath" - "strings" - "sync" - - yaml "gopkg.in/yaml.v3" -) - -var verboseReader = false - -var fileCache map[string][]byte -var infoCache map[string]*yaml.Node - -var fileCacheEnable = true -var infoCacheEnable = true - -// These locks are used to synchronize accesses to the fileCache and infoCache -// maps (above). They are global state and can throw thread-related errors -// when modified from separate goroutines. The general strategy is to protect -// all public functions in this file with mutex Lock() calls. As a result, to -// avoid deadlock, these public functions should not call other public -// functions, so some public functions have private equivalents. -// In the future, we might consider replacing the maps with sync.Map and -// eliminating these mutexes. -var fileCacheMutex sync.Mutex -var infoCacheMutex sync.Mutex - -func initializeFileCache() { - if fileCache == nil { - fileCache = make(map[string][]byte, 0) - } -} - -func initializeInfoCache() { - if infoCache == nil { - infoCache = make(map[string]*yaml.Node, 0) - } -} - -// EnableFileCache turns on file caching. -func EnableFileCache() { - fileCacheMutex.Lock() - defer fileCacheMutex.Unlock() - fileCacheEnable = true -} - -// EnableInfoCache turns on parsed info caching. -func EnableInfoCache() { - infoCacheMutex.Lock() - defer infoCacheMutex.Unlock() - infoCacheEnable = true -} - -// DisableFileCache turns off file caching. -func DisableFileCache() { - fileCacheMutex.Lock() - defer fileCacheMutex.Unlock() - fileCacheEnable = false -} - -// DisableInfoCache turns off parsed info caching. -func DisableInfoCache() { - infoCacheMutex.Lock() - defer infoCacheMutex.Unlock() - infoCacheEnable = false -} - -// RemoveFromFileCache removes an entry from the file cache. -func RemoveFromFileCache(fileurl string) { - fileCacheMutex.Lock() - defer fileCacheMutex.Unlock() - if !fileCacheEnable { - return - } - initializeFileCache() - delete(fileCache, fileurl) -} - -// RemoveFromInfoCache removes an entry from the info cache. -func RemoveFromInfoCache(filename string) { - infoCacheMutex.Lock() - defer infoCacheMutex.Unlock() - if !infoCacheEnable { - return - } - initializeInfoCache() - delete(infoCache, filename) -} - -// GetInfoCache returns the info cache map. -func GetInfoCache() map[string]*yaml.Node { - infoCacheMutex.Lock() - defer infoCacheMutex.Unlock() - if infoCache == nil { - initializeInfoCache() - } - return infoCache -} - -// ClearFileCache clears the file cache. -func ClearFileCache() { - fileCacheMutex.Lock() - defer fileCacheMutex.Unlock() - fileCache = make(map[string][]byte, 0) -} - -// ClearInfoCache clears the info cache. -func ClearInfoCache() { - infoCacheMutex.Lock() - defer infoCacheMutex.Unlock() - infoCache = make(map[string]*yaml.Node) -} - -// ClearCaches clears all caches. -func ClearCaches() { - ClearFileCache() - ClearInfoCache() -} - -// FetchFile gets a specified file from the local filesystem or a remote location. -func FetchFile(fileurl string) ([]byte, error) { - fileCacheMutex.Lock() - defer fileCacheMutex.Unlock() - return fetchFile(fileurl) -} - -func fetchFile(fileurl string) ([]byte, error) { - var bytes []byte - initializeFileCache() - if fileCacheEnable { - bytes, ok := fileCache[fileurl] - if ok { - if verboseReader { - log.Printf("Cache hit %s", fileurl) - } - return bytes, nil - } - if verboseReader { - log.Printf("Fetching %s", fileurl) - } - } - response, err := http.Get(fileurl) - if err != nil { - return nil, err - } - defer response.Body.Close() - if response.StatusCode != 200 { - return nil, fmt.Errorf("Error downloading %s: %s", fileurl, response.Status) - } - bytes, err = ioutil.ReadAll(response.Body) - if fileCacheEnable && err == nil { - fileCache[fileurl] = bytes - } - return bytes, err -} - -// ReadBytesForFile reads the bytes of a file. -func ReadBytesForFile(filename string) ([]byte, error) { - fileCacheMutex.Lock() - defer fileCacheMutex.Unlock() - return readBytesForFile(filename) -} - -func readBytesForFile(filename string) ([]byte, error) { - // is the filename a url? - fileurl, _ := url.Parse(filename) - if fileurl.Scheme != "" { - // yes, fetch it - bytes, err := fetchFile(filename) - if err != nil { - return nil, err - } - return bytes, nil - } - // no, it's a local filename - bytes, err := ioutil.ReadFile(filename) - if err != nil { - return nil, err - } - return bytes, nil -} - -// ReadInfoFromBytes unmarshals a file as a *yaml.Node. -func ReadInfoFromBytes(filename string, bytes []byte) (*yaml.Node, error) { - infoCacheMutex.Lock() - defer infoCacheMutex.Unlock() - return readInfoFromBytes(filename, bytes) -} - -func readInfoFromBytes(filename string, bytes []byte) (*yaml.Node, error) { - initializeInfoCache() - if infoCacheEnable { - cachedInfo, ok := infoCache[filename] - if ok { - if verboseReader { - log.Printf("Cache hit info for file %s", filename) - } - return cachedInfo, nil - } - if verboseReader { - log.Printf("Reading info for file %s", filename) - } - } - var info yaml.Node - err := yaml.Unmarshal(bytes, &info) - if err != nil { - return nil, err - } - if infoCacheEnable && len(filename) > 0 { - infoCache[filename] = &info - } - return &info, nil -} - -// ReadInfoForRef reads a file and return the fragment needed to resolve a $ref. -func ReadInfoForRef(basefile string, ref string) (*yaml.Node, error) { - fileCacheMutex.Lock() - defer fileCacheMutex.Unlock() - infoCacheMutex.Lock() - defer infoCacheMutex.Unlock() - initializeInfoCache() - if infoCacheEnable { - info, ok := infoCache[ref] - if ok { - if verboseReader { - log.Printf("Cache hit for ref %s#%s", basefile, ref) - } - return info, nil - } - if verboseReader { - log.Printf("Reading info for ref %s#%s", basefile, ref) - } - } - basedir, _ := filepath.Split(basefile) - parts := strings.Split(ref, "#") - var filename string - if parts[0] != "" { - filename = parts[0] - if _, err := url.ParseRequestURI(parts[0]); err != nil { - // It is not an URL, so the file is local - filename = basedir + parts[0] - } - } else { - filename = basefile - } - bytes, err := readBytesForFile(filename) - if err != nil { - return nil, err - } - info, err := readInfoFromBytes(filename, bytes) - if info != nil && info.Kind == yaml.DocumentNode { - info = info.Content[0] - } - if err != nil { - log.Printf("File error: %v\n", err) - } else { - if info == nil { - return nil, NewError(nil, fmt.Sprintf("could not resolve %s", ref)) - } - if len(parts) > 1 { - path := strings.Split(parts[1], "/") - for i, key := range path { - if i > 0 { - m := info - if true { - found := false - for i := 0; i < len(m.Content); i += 2 { - if m.Content[i].Value == key { - info = m.Content[i+1] - found = true - } - } - if !found { - infoCache[ref] = nil - return nil, NewError(nil, fmt.Sprintf("could not resolve %s", ref)) - } - } - } - } - } - } - if infoCacheEnable { - infoCache[ref] = info - } - return info, nil -} diff --git a/src/vendor/github.com/google/gnostic/extensions/README.md b/src/vendor/github.com/google/gnostic/extensions/README.md deleted file mode 100644 index 4b5d63e58..000000000 --- a/src/vendor/github.com/google/gnostic/extensions/README.md +++ /dev/null @@ -1,13 +0,0 @@ -# Extensions - -**Extension Support is experimental.** - -This directory contains support code for building Gnostic extensio handlers and -associated examples. - -Extension handlers can be used to compile vendor or specification extensions -into protocol buffer structures. - -Like plugins, extension handlers are built as separate executables. Extension -bodies are written to extension handlers as serialized -ExtensionHandlerRequests. diff --git a/src/vendor/github.com/google/gnostic/extensions/extension.pb.go b/src/vendor/github.com/google/gnostic/extensions/extension.pb.go deleted file mode 100644 index a6a4ccca6..000000000 --- a/src/vendor/github.com/google/gnostic/extensions/extension.pb.go +++ /dev/null @@ -1,461 +0,0 @@ -// Copyright 2017 Google LLC. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Code generated by protoc-gen-go. DO NOT EDIT. -// versions: -// protoc-gen-go v1.26.0 -// protoc v3.18.1 -// source: extensions/extension.proto - -package gnostic_extension_v1 - -import ( - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" - anypb "google.golang.org/protobuf/types/known/anypb" - reflect "reflect" - sync "sync" -) - -const ( - // Verify that this generated code is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) - // Verify that runtime/protoimpl is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) -) - -// The version number of Gnostic. -type Version struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Major int32 `protobuf:"varint,1,opt,name=major,proto3" json:"major,omitempty"` - Minor int32 `protobuf:"varint,2,opt,name=minor,proto3" json:"minor,omitempty"` - Patch int32 `protobuf:"varint,3,opt,name=patch,proto3" json:"patch,omitempty"` - // A suffix for alpha, beta or rc release, e.g., "alpha-1", "rc2". It should - // be empty for mainline stable releases. - Suffix string `protobuf:"bytes,4,opt,name=suffix,proto3" json:"suffix,omitempty"` -} - -func (x *Version) Reset() { - *x = Version{} - if protoimpl.UnsafeEnabled { - mi := &file_extensions_extension_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Version) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Version) ProtoMessage() {} - -func (x *Version) ProtoReflect() protoreflect.Message { - mi := &file_extensions_extension_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Version.ProtoReflect.Descriptor instead. -func (*Version) Descriptor() ([]byte, []int) { - return file_extensions_extension_proto_rawDescGZIP(), []int{0} -} - -func (x *Version) GetMajor() int32 { - if x != nil { - return x.Major - } - return 0 -} - -func (x *Version) GetMinor() int32 { - if x != nil { - return x.Minor - } - return 0 -} - -func (x *Version) GetPatch() int32 { - if x != nil { - return x.Patch - } - return 0 -} - -func (x *Version) GetSuffix() string { - if x != nil { - return x.Suffix - } - return "" -} - -// An encoded Request is written to the ExtensionHandler's stdin. -type ExtensionHandlerRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The extension to process. - Wrapper *Wrapper `protobuf:"bytes,1,opt,name=wrapper,proto3" json:"wrapper,omitempty"` - // The version number of Gnostic. - CompilerVersion *Version `protobuf:"bytes,2,opt,name=compiler_version,json=compilerVersion,proto3" json:"compiler_version,omitempty"` -} - -func (x *ExtensionHandlerRequest) Reset() { - *x = ExtensionHandlerRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_extensions_extension_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ExtensionHandlerRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ExtensionHandlerRequest) ProtoMessage() {} - -func (x *ExtensionHandlerRequest) ProtoReflect() protoreflect.Message { - mi := &file_extensions_extension_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ExtensionHandlerRequest.ProtoReflect.Descriptor instead. -func (*ExtensionHandlerRequest) Descriptor() ([]byte, []int) { - return file_extensions_extension_proto_rawDescGZIP(), []int{1} -} - -func (x *ExtensionHandlerRequest) GetWrapper() *Wrapper { - if x != nil { - return x.Wrapper - } - return nil -} - -func (x *ExtensionHandlerRequest) GetCompilerVersion() *Version { - if x != nil { - return x.CompilerVersion - } - return nil -} - -// The extensions writes an encoded ExtensionHandlerResponse to stdout. -type ExtensionHandlerResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // true if the extension is handled by the extension handler; false otherwise - Handled bool `protobuf:"varint,1,opt,name=handled,proto3" json:"handled,omitempty"` - // Error message(s). If non-empty, the extension handling failed. - // The extension handler process should exit with status code zero - // even if it reports an error in this way. - // - // This should be used to indicate errors which prevent the extension from - // operating as intended. Errors which indicate a problem in gnostic - // itself -- such as the input Document being unparseable -- should be - // reported by writing a message to stderr and exiting with a non-zero - // status code. - Errors []string `protobuf:"bytes,2,rep,name=errors,proto3" json:"errors,omitempty"` - // text output - Value *anypb.Any `protobuf:"bytes,3,opt,name=value,proto3" json:"value,omitempty"` -} - -func (x *ExtensionHandlerResponse) Reset() { - *x = ExtensionHandlerResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_extensions_extension_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ExtensionHandlerResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ExtensionHandlerResponse) ProtoMessage() {} - -func (x *ExtensionHandlerResponse) ProtoReflect() protoreflect.Message { - mi := &file_extensions_extension_proto_msgTypes[2] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ExtensionHandlerResponse.ProtoReflect.Descriptor instead. -func (*ExtensionHandlerResponse) Descriptor() ([]byte, []int) { - return file_extensions_extension_proto_rawDescGZIP(), []int{2} -} - -func (x *ExtensionHandlerResponse) GetHandled() bool { - if x != nil { - return x.Handled - } - return false -} - -func (x *ExtensionHandlerResponse) GetErrors() []string { - if x != nil { - return x.Errors - } - return nil -} - -func (x *ExtensionHandlerResponse) GetValue() *anypb.Any { - if x != nil { - return x.Value - } - return nil -} - -type Wrapper struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // version of the OpenAPI specification in which this extension was written. - Version string `protobuf:"bytes,1,opt,name=version,proto3" json:"version,omitempty"` - // Name of the extension. - ExtensionName string `protobuf:"bytes,2,opt,name=extension_name,json=extensionName,proto3" json:"extension_name,omitempty"` - // YAML-formatted extension value. - Yaml string `protobuf:"bytes,3,opt,name=yaml,proto3" json:"yaml,omitempty"` -} - -func (x *Wrapper) Reset() { - *x = Wrapper{} - if protoimpl.UnsafeEnabled { - mi := &file_extensions_extension_proto_msgTypes[3] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Wrapper) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Wrapper) ProtoMessage() {} - -func (x *Wrapper) ProtoReflect() protoreflect.Message { - mi := &file_extensions_extension_proto_msgTypes[3] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Wrapper.ProtoReflect.Descriptor instead. -func (*Wrapper) Descriptor() ([]byte, []int) { - return file_extensions_extension_proto_rawDescGZIP(), []int{3} -} - -func (x *Wrapper) GetVersion() string { - if x != nil { - return x.Version - } - return "" -} - -func (x *Wrapper) GetExtensionName() string { - if x != nil { - return x.ExtensionName - } - return "" -} - -func (x *Wrapper) GetYaml() string { - if x != nil { - return x.Yaml - } - return "" -} - -var File_extensions_extension_proto protoreflect.FileDescriptor - -var file_extensions_extension_proto_rawDesc = []byte{ - 0x0a, 0x1a, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x65, 0x78, 0x74, - 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x14, 0x67, 0x6e, - 0x6f, 0x73, 0x74, 0x69, 0x63, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x2e, - 0x76, 0x31, 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x63, 0x0a, - 0x07, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x14, 0x0a, 0x05, 0x6d, 0x61, 0x6a, 0x6f, - 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x6d, 0x61, 0x6a, 0x6f, 0x72, 0x12, 0x14, - 0x0a, 0x05, 0x6d, 0x69, 0x6e, 0x6f, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x6d, - 0x69, 0x6e, 0x6f, 0x72, 0x12, 0x14, 0x0a, 0x05, 0x70, 0x61, 0x74, 0x63, 0x68, 0x18, 0x03, 0x20, - 0x01, 0x28, 0x05, 0x52, 0x05, 0x70, 0x61, 0x74, 0x63, 0x68, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x75, - 0x66, 0x66, 0x69, 0x78, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x73, 0x75, 0x66, 0x66, - 0x69, 0x78, 0x22, 0x9c, 0x01, 0x0a, 0x17, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, - 0x48, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x37, - 0x0a, 0x07, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x1d, 0x2e, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, - 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x57, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x52, 0x07, - 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x12, 0x48, 0x0a, 0x10, 0x63, 0x6f, 0x6d, 0x70, 0x69, - 0x6c, 0x65, 0x72, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x1d, 0x2e, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x2e, 0x65, 0x78, 0x74, 0x65, - 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, - 0x52, 0x0f, 0x63, 0x6f, 0x6d, 0x70, 0x69, 0x6c, 0x65, 0x72, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, - 0x6e, 0x22, 0x78, 0x0a, 0x18, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x48, 0x61, - 0x6e, 0x64, 0x6c, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x18, 0x0a, - 0x07, 0x68, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, - 0x68, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x64, 0x12, 0x16, 0x0a, 0x06, 0x65, 0x72, 0x72, 0x6f, 0x72, - 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x06, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x73, 0x12, - 0x2a, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, - 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x5e, 0x0a, 0x07, 0x57, - 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, - 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, - 0x12, 0x25, 0x0a, 0x0e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x61, - 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, - 0x69, 0x6f, 0x6e, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x79, 0x61, 0x6d, 0x6c, 0x18, - 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x79, 0x61, 0x6d, 0x6c, 0x42, 0x4d, 0x0a, 0x0e, 0x6f, - 0x72, 0x67, 0x2e, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x2e, 0x76, 0x31, 0x42, 0x10, 0x47, - 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x50, - 0x01, 0x5a, 0x21, 0x2e, 0x2f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x3b, - 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, - 0x6e, 0x5f, 0x76, 0x31, 0xa2, 0x02, 0x03, 0x47, 0x4e, 0x58, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x33, -} - -var ( - file_extensions_extension_proto_rawDescOnce sync.Once - file_extensions_extension_proto_rawDescData = file_extensions_extension_proto_rawDesc -) - -func file_extensions_extension_proto_rawDescGZIP() []byte { - file_extensions_extension_proto_rawDescOnce.Do(func() { - file_extensions_extension_proto_rawDescData = protoimpl.X.CompressGZIP(file_extensions_extension_proto_rawDescData) - }) - return file_extensions_extension_proto_rawDescData -} - -var file_extensions_extension_proto_msgTypes = make([]protoimpl.MessageInfo, 4) -var file_extensions_extension_proto_goTypes = []interface{}{ - (*Version)(nil), // 0: gnostic.extension.v1.Version - (*ExtensionHandlerRequest)(nil), // 1: gnostic.extension.v1.ExtensionHandlerRequest - (*ExtensionHandlerResponse)(nil), // 2: gnostic.extension.v1.ExtensionHandlerResponse - (*Wrapper)(nil), // 3: gnostic.extension.v1.Wrapper - (*anypb.Any)(nil), // 4: google.protobuf.Any -} -var file_extensions_extension_proto_depIdxs = []int32{ - 3, // 0: gnostic.extension.v1.ExtensionHandlerRequest.wrapper:type_name -> gnostic.extension.v1.Wrapper - 0, // 1: gnostic.extension.v1.ExtensionHandlerRequest.compiler_version:type_name -> gnostic.extension.v1.Version - 4, // 2: gnostic.extension.v1.ExtensionHandlerResponse.value:type_name -> google.protobuf.Any - 3, // [3:3] is the sub-list for method output_type - 3, // [3:3] is the sub-list for method input_type - 3, // [3:3] is the sub-list for extension type_name - 3, // [3:3] is the sub-list for extension extendee - 0, // [0:3] is the sub-list for field type_name -} - -func init() { file_extensions_extension_proto_init() } -func file_extensions_extension_proto_init() { - if File_extensions_extension_proto != nil { - return - } - if !protoimpl.UnsafeEnabled { - file_extensions_extension_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Version); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_extensions_extension_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ExtensionHandlerRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_extensions_extension_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ExtensionHandlerResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_extensions_extension_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Wrapper); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_extensions_extension_proto_rawDesc, - NumEnums: 0, - NumMessages: 4, - NumExtensions: 0, - NumServices: 0, - }, - GoTypes: file_extensions_extension_proto_goTypes, - DependencyIndexes: file_extensions_extension_proto_depIdxs, - MessageInfos: file_extensions_extension_proto_msgTypes, - }.Build() - File_extensions_extension_proto = out.File - file_extensions_extension_proto_rawDesc = nil - file_extensions_extension_proto_goTypes = nil - file_extensions_extension_proto_depIdxs = nil -} diff --git a/src/vendor/github.com/google/gnostic/extensions/extension.proto b/src/vendor/github.com/google/gnostic/extensions/extension.proto deleted file mode 100644 index 875137c1a..000000000 --- a/src/vendor/github.com/google/gnostic/extensions/extension.proto +++ /dev/null @@ -1,97 +0,0 @@ -// Copyright 2017 Google LLC. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -syntax = "proto3"; - -package gnostic.extension.v1; - -import "google/protobuf/any.proto"; - -// This option lets the proto compiler generate Java code inside the package -// name (see below) instead of inside an outer class. It creates a simpler -// developer experience by reducing one-level of name nesting and be -// consistent with most programming languages that don't support outer classes. -option java_multiple_files = true; - -// The Java outer classname should be the filename in UpperCamelCase. This -// class is only used to hold proto descriptor, so developers don't need to -// work with it directly. -option java_outer_classname = "GnosticExtension"; - -// The Java package name must be proto package name with proper prefix. -option java_package = "org.gnostic.v1"; - -// A reasonable prefix for the Objective-C symbols generated from the package. -// It should at a minimum be 3 characters long, all uppercase, and convention -// is to use an abbreviation of the package name. Something short, but -// hopefully unique enough to not conflict with things that may come along in -// the future. 'GPB' is reserved for the protocol buffer implementation itself. -// -// "Gnostic Extension" -option objc_class_prefix = "GNX"; - -// The Go package name. -option go_package = "./extensions;gnostic_extension_v1"; - -// The version number of Gnostic. -message Version { - int32 major = 1; - int32 minor = 2; - int32 patch = 3; - // A suffix for alpha, beta or rc release, e.g., "alpha-1", "rc2". It should - // be empty for mainline stable releases. - string suffix = 4; -} - -// An encoded Request is written to the ExtensionHandler's stdin. -message ExtensionHandlerRequest { - - // The extension to process. - Wrapper wrapper = 1; - - // The version number of Gnostic. - Version compiler_version = 2; -} - -// The extensions writes an encoded ExtensionHandlerResponse to stdout. -message ExtensionHandlerResponse { - - // true if the extension is handled by the extension handler; false otherwise - bool handled = 1; - - // Error message(s). If non-empty, the extension handling failed. - // The extension handler process should exit with status code zero - // even if it reports an error in this way. - // - // This should be used to indicate errors which prevent the extension from - // operating as intended. Errors which indicate a problem in gnostic - // itself -- such as the input Document being unparseable -- should be - // reported by writing a message to stderr and exiting with a non-zero - // status code. - repeated string errors = 2; - - // text output - google.protobuf.Any value = 3; -} - -message Wrapper { - // version of the OpenAPI specification in which this extension was written. - string version = 1; - - // Name of the extension. - string extension_name = 2; - - // YAML-formatted extension value. - string yaml = 3; -} diff --git a/src/vendor/github.com/google/gnostic/extensions/extensions.go b/src/vendor/github.com/google/gnostic/extensions/extensions.go deleted file mode 100644 index ec8afd009..000000000 --- a/src/vendor/github.com/google/gnostic/extensions/extensions.go +++ /dev/null @@ -1,64 +0,0 @@ -// Copyright 2017 Google LLC. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package gnostic_extension_v1 - -import ( - "io/ioutil" - "log" - "os" - - "github.com/golang/protobuf/proto" - "github.com/golang/protobuf/ptypes" -) - -type extensionHandler func(name string, yamlInput string) (bool, proto.Message, error) - -// Main implements the main program of an extension handler. -func Main(handler extensionHandler) { - // unpack the request - data, err := ioutil.ReadAll(os.Stdin) - if err != nil { - log.Println("File error:", err.Error()) - os.Exit(1) - } - if len(data) == 0 { - log.Println("No input data.") - os.Exit(1) - } - request := &ExtensionHandlerRequest{} - err = proto.Unmarshal(data, request) - if err != nil { - log.Println("Input error:", err.Error()) - os.Exit(1) - } - // call the handler - handled, output, err := handler(request.Wrapper.ExtensionName, request.Wrapper.Yaml) - // respond with the output of the handler - response := &ExtensionHandlerResponse{ - Handled: false, // default assumption - Errors: make([]string, 0), - } - if err != nil { - response.Errors = append(response.Errors, err.Error()) - } else if handled { - response.Handled = true - response.Value, err = ptypes.MarshalAny(output) - if err != nil { - response.Errors = append(response.Errors, err.Error()) - } - } - responseBytes, _ := proto.Marshal(response) - os.Stdout.Write(responseBytes) -} diff --git a/src/vendor/github.com/google/gnostic/jsonschema/README.md b/src/vendor/github.com/google/gnostic/jsonschema/README.md deleted file mode 100644 index 6793c5179..000000000 --- a/src/vendor/github.com/google/gnostic/jsonschema/README.md +++ /dev/null @@ -1,4 +0,0 @@ -# jsonschema - -This directory contains code for reading, writing, and manipulating JSON -schemas. diff --git a/src/vendor/github.com/google/gnostic/jsonschema/base.go b/src/vendor/github.com/google/gnostic/jsonschema/base.go deleted file mode 100644 index 0af8b148b..000000000 --- a/src/vendor/github.com/google/gnostic/jsonschema/base.go +++ /dev/null @@ -1,84 +0,0 @@ - -// THIS FILE IS AUTOMATICALLY GENERATED. - -package jsonschema - -import ( - "encoding/base64" -) - -func baseSchemaBytes() ([]byte, error){ - return base64.StdEncoding.DecodeString( -`ewogICAgImlkIjogImh0dHA6Ly9qc29uLXNjaGVtYS5vcmcvZHJhZnQtMDQvc2NoZW1hIyIsCiAgICAi -JHNjaGVtYSI6ICJodHRwOi8vanNvbi1zY2hlbWEub3JnL2RyYWZ0LTA0L3NjaGVtYSMiLAogICAgImRl -c2NyaXB0aW9uIjogIkNvcmUgc2NoZW1hIG1ldGEtc2NoZW1hIiwKICAgICJkZWZpbml0aW9ucyI6IHsK -ICAgICAgICAic2NoZW1hQXJyYXkiOiB7CiAgICAgICAgICAgICJ0eXBlIjogImFycmF5IiwKICAgICAg -ICAgICAgIm1pbkl0ZW1zIjogMSwKICAgICAgICAgICAgIml0ZW1zIjogeyAiJHJlZiI6ICIjIiB9CiAg -ICAgICAgfSwKICAgICAgICAicG9zaXRpdmVJbnRlZ2VyIjogewogICAgICAgICAgICAidHlwZSI6ICJp -bnRlZ2VyIiwKICAgICAgICAgICAgIm1pbmltdW0iOiAwCiAgICAgICAgfSwKICAgICAgICAicG9zaXRp -dmVJbnRlZ2VyRGVmYXVsdDAiOiB7CiAgICAgICAgICAgICJhbGxPZiI6IFsgeyAiJHJlZiI6ICIjL2Rl -ZmluaXRpb25zL3Bvc2l0aXZlSW50ZWdlciIgfSwgeyAiZGVmYXVsdCI6IDAgfSBdCiAgICAgICAgfSwK -ICAgICAgICAic2ltcGxlVHlwZXMiOiB7CiAgICAgICAgICAgICJlbnVtIjogWyAiYXJyYXkiLCAiYm9v -bGVhbiIsICJpbnRlZ2VyIiwgIm51bGwiLCAibnVtYmVyIiwgIm9iamVjdCIsICJzdHJpbmciIF0KICAg -ICAgICB9LAogICAgICAgICJzdHJpbmdBcnJheSI6IHsKICAgICAgICAgICAgInR5cGUiOiAiYXJyYXki -LAogICAgICAgICAgICAiaXRlbXMiOiB7ICJ0eXBlIjogInN0cmluZyIgfSwKICAgICAgICAgICAgIm1p -bkl0ZW1zIjogMSwKICAgICAgICAgICAgInVuaXF1ZUl0ZW1zIjogdHJ1ZQogICAgICAgIH0KICAgIH0s -CiAgICAidHlwZSI6ICJvYmplY3QiLAogICAgInByb3BlcnRpZXMiOiB7CiAgICAgICAgImlkIjogewog -ICAgICAgICAgICAidHlwZSI6ICJzdHJpbmciLAogICAgICAgICAgICAiZm9ybWF0IjogInVyaSIKICAg -ICAgICB9LAogICAgICAgICIkc2NoZW1hIjogewogICAgICAgICAgICAidHlwZSI6ICJzdHJpbmciLAog -ICAgICAgICAgICAiZm9ybWF0IjogInVyaSIKICAgICAgICB9LAogICAgICAgICJ0aXRsZSI6IHsKICAg -ICAgICAgICAgInR5cGUiOiAic3RyaW5nIgogICAgICAgIH0sCiAgICAgICAgImRlc2NyaXB0aW9uIjog -ewogICAgICAgICAgICAidHlwZSI6ICJzdHJpbmciCiAgICAgICAgfSwKICAgICAgICAiZGVmYXVsdCI6 -IHt9LAogICAgICAgICJtdWx0aXBsZU9mIjogewogICAgICAgICAgICAidHlwZSI6ICJudW1iZXIiLAog -ICAgICAgICAgICAibWluaW11bSI6IDAsCiAgICAgICAgICAgICJleGNsdXNpdmVNaW5pbXVtIjogdHJ1 -ZQogICAgICAgIH0sCiAgICAgICAgIm1heGltdW0iOiB7CiAgICAgICAgICAgICJ0eXBlIjogIm51bWJl -ciIKICAgICAgICB9LAogICAgICAgICJleGNsdXNpdmVNYXhpbXVtIjogewogICAgICAgICAgICAidHlw -ZSI6ICJib29sZWFuIiwKICAgICAgICAgICAgImRlZmF1bHQiOiBmYWxzZQogICAgICAgIH0sCiAgICAg -ICAgIm1pbmltdW0iOiB7CiAgICAgICAgICAgICJ0eXBlIjogIm51bWJlciIKICAgICAgICB9LAogICAg -ICAgICJleGNsdXNpdmVNaW5pbXVtIjogewogICAgICAgICAgICAidHlwZSI6ICJib29sZWFuIiwKICAg -ICAgICAgICAgImRlZmF1bHQiOiBmYWxzZQogICAgICAgIH0sCiAgICAgICAgIm1heExlbmd0aCI6IHsg -IiRyZWYiOiAiIy9kZWZpbml0aW9ucy9wb3NpdGl2ZUludGVnZXIiIH0sCiAgICAgICAgIm1pbkxlbmd0 -aCI6IHsgIiRyZWYiOiAiIy9kZWZpbml0aW9ucy9wb3NpdGl2ZUludGVnZXJEZWZhdWx0MCIgfSwKICAg -ICAgICAicGF0dGVybiI6IHsKICAgICAgICAgICAgInR5cGUiOiAic3RyaW5nIiwKICAgICAgICAgICAg -ImZvcm1hdCI6ICJyZWdleCIKICAgICAgICB9LAogICAgICAgICJhZGRpdGlvbmFsSXRlbXMiOiB7CiAg -ICAgICAgICAgICJhbnlPZiI6IFsKICAgICAgICAgICAgICAgIHsgInR5cGUiOiAiYm9vbGVhbiIgfSwK -ICAgICAgICAgICAgICAgIHsgIiRyZWYiOiAiIyIgfQogICAgICAgICAgICBdLAogICAgICAgICAgICAi -ZGVmYXVsdCI6IHt9CiAgICAgICAgfSwKICAgICAgICAiaXRlbXMiOiB7CiAgICAgICAgICAgICJhbnlP -ZiI6IFsKICAgICAgICAgICAgICAgIHsgIiRyZWYiOiAiIyIgfSwKICAgICAgICAgICAgICAgIHsgIiRy -ZWYiOiAiIy9kZWZpbml0aW9ucy9zY2hlbWFBcnJheSIgfQogICAgICAgICAgICBdLAogICAgICAgICAg -ICAiZGVmYXVsdCI6IHt9CiAgICAgICAgfSwKICAgICAgICAibWF4SXRlbXMiOiB7ICIkcmVmIjogIiMv -ZGVmaW5pdGlvbnMvcG9zaXRpdmVJbnRlZ2VyIiB9LAogICAgICAgICJtaW5JdGVtcyI6IHsgIiRyZWYi -OiAiIy9kZWZpbml0aW9ucy9wb3NpdGl2ZUludGVnZXJEZWZhdWx0MCIgfSwKICAgICAgICAidW5pcXVl -SXRlbXMiOiB7CiAgICAgICAgICAgICJ0eXBlIjogImJvb2xlYW4iLAogICAgICAgICAgICAiZGVmYXVs -dCI6IGZhbHNlCiAgICAgICAgfSwKICAgICAgICAibWF4UHJvcGVydGllcyI6IHsgIiRyZWYiOiAiIy9k -ZWZpbml0aW9ucy9wb3NpdGl2ZUludGVnZXIiIH0sCiAgICAgICAgIm1pblByb3BlcnRpZXMiOiB7ICIk -cmVmIjogIiMvZGVmaW5pdGlvbnMvcG9zaXRpdmVJbnRlZ2VyRGVmYXVsdDAiIH0sCiAgICAgICAgInJl -cXVpcmVkIjogeyAiJHJlZiI6ICIjL2RlZmluaXRpb25zL3N0cmluZ0FycmF5IiB9LAogICAgICAgICJh -ZGRpdGlvbmFsUHJvcGVydGllcyI6IHsKICAgICAgICAgICAgImFueU9mIjogWwogICAgICAgICAgICAg -ICAgeyAidHlwZSI6ICJib29sZWFuIiB9LAogICAgICAgICAgICAgICAgeyAiJHJlZiI6ICIjIiB9CiAg -ICAgICAgICAgIF0sCiAgICAgICAgICAgICJkZWZhdWx0Ijoge30KICAgICAgICB9LAogICAgICAgICJk -ZWZpbml0aW9ucyI6IHsKICAgICAgICAgICAgInR5cGUiOiAib2JqZWN0IiwKICAgICAgICAgICAgImFk -ZGl0aW9uYWxQcm9wZXJ0aWVzIjogeyAiJHJlZiI6ICIjIiB9LAogICAgICAgICAgICAiZGVmYXVsdCI6 -IHt9CiAgICAgICAgfSwKICAgICAgICAicHJvcGVydGllcyI6IHsKICAgICAgICAgICAgInR5cGUiOiAi -b2JqZWN0IiwKICAgICAgICAgICAgImFkZGl0aW9uYWxQcm9wZXJ0aWVzIjogeyAiJHJlZiI6ICIjIiB9 -LAogICAgICAgICAgICAiZGVmYXVsdCI6IHt9CiAgICAgICAgfSwKICAgICAgICAicGF0dGVyblByb3Bl -cnRpZXMiOiB7CiAgICAgICAgICAgICJ0eXBlIjogIm9iamVjdCIsCiAgICAgICAgICAgICJhZGRpdGlv -bmFsUHJvcGVydGllcyI6IHsgIiRyZWYiOiAiIyIgfSwKICAgICAgICAgICAgImRlZmF1bHQiOiB7fQog -ICAgICAgIH0sCiAgICAgICAgImRlcGVuZGVuY2llcyI6IHsKICAgICAgICAgICAgInR5cGUiOiAib2Jq -ZWN0IiwKICAgICAgICAgICAgImFkZGl0aW9uYWxQcm9wZXJ0aWVzIjogewogICAgICAgICAgICAgICAg -ImFueU9mIjogWwogICAgICAgICAgICAgICAgICAgIHsgIiRyZWYiOiAiIyIgfSwKICAgICAgICAgICAg -ICAgICAgICB7ICIkcmVmIjogIiMvZGVmaW5pdGlvbnMvc3RyaW5nQXJyYXkiIH0KICAgICAgICAgICAg -ICAgIF0KICAgICAgICAgICAgfQogICAgICAgIH0sCiAgICAgICAgImVudW0iOiB7CiAgICAgICAgICAg -ICJ0eXBlIjogImFycmF5IiwKICAgICAgICAgICAgIm1pbkl0ZW1zIjogMSwKICAgICAgICAgICAgInVu -aXF1ZUl0ZW1zIjogdHJ1ZQogICAgICAgIH0sCiAgICAgICAgInR5cGUiOiB7CiAgICAgICAgICAgICJh -bnlPZiI6IFsKICAgICAgICAgICAgICAgIHsgIiRyZWYiOiAiIy9kZWZpbml0aW9ucy9zaW1wbGVUeXBl -cyIgfSwKICAgICAgICAgICAgICAgIHsKICAgICAgICAgICAgICAgICAgICAidHlwZSI6ICJhcnJheSIs -CiAgICAgICAgICAgICAgICAgICAgIml0ZW1zIjogeyAiJHJlZiI6ICIjL2RlZmluaXRpb25zL3NpbXBs -ZVR5cGVzIiB9LAogICAgICAgICAgICAgICAgICAgICJtaW5JdGVtcyI6IDEsCiAgICAgICAgICAgICAg -ICAgICAgInVuaXF1ZUl0ZW1zIjogdHJ1ZQogICAgICAgICAgICAgICAgfQogICAgICAgICAgICBdCiAg -ICAgICAgfSwKICAgICAgICAiYWxsT2YiOiB7ICIkcmVmIjogIiMvZGVmaW5pdGlvbnMvc2NoZW1hQXJy -YXkiIH0sCiAgICAgICAgImFueU9mIjogeyAiJHJlZiI6ICIjL2RlZmluaXRpb25zL3NjaGVtYUFycmF5 -IiB9LAogICAgICAgICJvbmVPZiI6IHsgIiRyZWYiOiAiIy9kZWZpbml0aW9ucy9zY2hlbWFBcnJheSIg -fSwKICAgICAgICAibm90IjogeyAiJHJlZiI6ICIjIiB9CiAgICB9LAogICAgImRlcGVuZGVuY2llcyI6 -IHsKICAgICAgICAiZXhjbHVzaXZlTWF4aW11bSI6IFsgIm1heGltdW0iIF0sCiAgICAgICAgImV4Y2x1 -c2l2ZU1pbmltdW0iOiBbICJtaW5pbXVtIiBdCiAgICB9LAogICAgImRlZmF1bHQiOiB7fQp9Cg==`)} \ No newline at end of file diff --git a/src/vendor/github.com/google/gnostic/jsonschema/display.go b/src/vendor/github.com/google/gnostic/jsonschema/display.go deleted file mode 100644 index 028a760a9..000000000 --- a/src/vendor/github.com/google/gnostic/jsonschema/display.go +++ /dev/null @@ -1,229 +0,0 @@ -// Copyright 2017 Google LLC. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package jsonschema - -import ( - "fmt" - "strings" -) - -// -// DISPLAY -// The following methods display Schemas. -// - -// Description returns a string representation of a string or string array. -func (s *StringOrStringArray) Description() string { - if s.String != nil { - return *s.String - } - if s.StringArray != nil { - return strings.Join(*s.StringArray, ", ") - } - return "" -} - -// Returns a string representation of a Schema. -func (schema *Schema) String() string { - return schema.describeSchema("") -} - -// Helper: Returns a string representation of a Schema indented by a specified string. -func (schema *Schema) describeSchema(indent string) string { - result := "" - if schema.Schema != nil { - result += indent + "$schema: " + *(schema.Schema) + "\n" - } - if schema.ID != nil { - result += indent + "id: " + *(schema.ID) + "\n" - } - if schema.MultipleOf != nil { - result += indent + fmt.Sprintf("multipleOf: %+v\n", *(schema.MultipleOf)) - } - if schema.Maximum != nil { - result += indent + fmt.Sprintf("maximum: %+v\n", *(schema.Maximum)) - } - if schema.ExclusiveMaximum != nil { - result += indent + fmt.Sprintf("exclusiveMaximum: %+v\n", *(schema.ExclusiveMaximum)) - } - if schema.Minimum != nil { - result += indent + fmt.Sprintf("minimum: %+v\n", *(schema.Minimum)) - } - if schema.ExclusiveMinimum != nil { - result += indent + fmt.Sprintf("exclusiveMinimum: %+v\n", *(schema.ExclusiveMinimum)) - } - if schema.MaxLength != nil { - result += indent + fmt.Sprintf("maxLength: %+v\n", *(schema.MaxLength)) - } - if schema.MinLength != nil { - result += indent + fmt.Sprintf("minLength: %+v\n", *(schema.MinLength)) - } - if schema.Pattern != nil { - result += indent + fmt.Sprintf("pattern: %+v\n", *(schema.Pattern)) - } - if schema.AdditionalItems != nil { - s := schema.AdditionalItems.Schema - if s != nil { - result += indent + "additionalItems:\n" - result += s.describeSchema(indent + " ") - } else { - b := *(schema.AdditionalItems.Boolean) - result += indent + fmt.Sprintf("additionalItems: %+v\n", b) - } - } - if schema.Items != nil { - result += indent + "items:\n" - items := schema.Items - if items.SchemaArray != nil { - for i, s := range *(items.SchemaArray) { - result += indent + " " + fmt.Sprintf("%d", i) + ":\n" - result += s.describeSchema(indent + " " + " ") - } - } else if items.Schema != nil { - result += items.Schema.describeSchema(indent + " " + " ") - } - } - if schema.MaxItems != nil { - result += indent + fmt.Sprintf("maxItems: %+v\n", *(schema.MaxItems)) - } - if schema.MinItems != nil { - result += indent + fmt.Sprintf("minItems: %+v\n", *(schema.MinItems)) - } - if schema.UniqueItems != nil { - result += indent + fmt.Sprintf("uniqueItems: %+v\n", *(schema.UniqueItems)) - } - if schema.MaxProperties != nil { - result += indent + fmt.Sprintf("maxProperties: %+v\n", *(schema.MaxProperties)) - } - if schema.MinProperties != nil { - result += indent + fmt.Sprintf("minProperties: %+v\n", *(schema.MinProperties)) - } - if schema.Required != nil { - result += indent + fmt.Sprintf("required: %+v\n", *(schema.Required)) - } - if schema.AdditionalProperties != nil { - s := schema.AdditionalProperties.Schema - if s != nil { - result += indent + "additionalProperties:\n" - result += s.describeSchema(indent + " ") - } else { - b := *(schema.AdditionalProperties.Boolean) - result += indent + fmt.Sprintf("additionalProperties: %+v\n", b) - } - } - if schema.Properties != nil { - result += indent + "properties:\n" - for _, pair := range *(schema.Properties) { - name := pair.Name - s := pair.Value - result += indent + " " + name + ":\n" - result += s.describeSchema(indent + " " + " ") - } - } - if schema.PatternProperties != nil { - result += indent + "patternProperties:\n" - for _, pair := range *(schema.PatternProperties) { - name := pair.Name - s := pair.Value - result += indent + " " + name + ":\n" - result += s.describeSchema(indent + " " + " ") - } - } - if schema.Dependencies != nil { - result += indent + "dependencies:\n" - for _, pair := range *(schema.Dependencies) { - name := pair.Name - schemaOrStringArray := pair.Value - s := schemaOrStringArray.Schema - if s != nil { - result += indent + " " + name + ":\n" - result += s.describeSchema(indent + " " + " ") - } else { - a := schemaOrStringArray.StringArray - if a != nil { - result += indent + " " + name + ":\n" - for _, s2 := range *a { - result += indent + " " + " " + s2 + "\n" - } - } - } - - } - } - if schema.Enumeration != nil { - result += indent + "enumeration:\n" - for _, value := range *(schema.Enumeration) { - if value.String != nil { - result += indent + " " + fmt.Sprintf("%+v\n", *value.String) - } else { - result += indent + " " + fmt.Sprintf("%+v\n", *value.Bool) - } - } - } - if schema.Type != nil { - result += indent + fmt.Sprintf("type: %+v\n", schema.Type.Description()) - } - if schema.AllOf != nil { - result += indent + "allOf:\n" - for _, s := range *(schema.AllOf) { - result += s.describeSchema(indent + " ") - result += indent + "-\n" - } - } - if schema.AnyOf != nil { - result += indent + "anyOf:\n" - for _, s := range *(schema.AnyOf) { - result += s.describeSchema(indent + " ") - result += indent + "-\n" - } - } - if schema.OneOf != nil { - result += indent + "oneOf:\n" - for _, s := range *(schema.OneOf) { - result += s.describeSchema(indent + " ") - result += indent + "-\n" - } - } - if schema.Not != nil { - result += indent + "not:\n" - result += schema.Not.describeSchema(indent + " ") - } - if schema.Definitions != nil { - result += indent + "definitions:\n" - for _, pair := range *(schema.Definitions) { - name := pair.Name - s := pair.Value - result += indent + " " + name + ":\n" - result += s.describeSchema(indent + " " + " ") - } - } - if schema.Title != nil { - result += indent + "title: " + *(schema.Title) + "\n" - } - if schema.Description != nil { - result += indent + "description: " + *(schema.Description) + "\n" - } - if schema.Default != nil { - result += indent + "default:\n" - result += indent + fmt.Sprintf(" %+v\n", *(schema.Default)) - } - if schema.Format != nil { - result += indent + "format: " + *(schema.Format) + "\n" - } - if schema.Ref != nil { - result += indent + "$ref: " + *(schema.Ref) + "\n" - } - return result -} diff --git a/src/vendor/github.com/google/gnostic/jsonschema/models.go b/src/vendor/github.com/google/gnostic/jsonschema/models.go deleted file mode 100644 index 4781bdc5f..000000000 --- a/src/vendor/github.com/google/gnostic/jsonschema/models.go +++ /dev/null @@ -1,228 +0,0 @@ -// Copyright 2017 Google LLC. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package jsonschema supports the reading, writing, and manipulation -// of JSON Schemas. -package jsonschema - -import "gopkg.in/yaml.v3" - -// The Schema struct models a JSON Schema and, because schemas are -// defined hierarchically, contains many references to itself. -// All fields are pointers and are nil if the associated values -// are not specified. -type Schema struct { - Schema *string // $schema - ID *string // id keyword used for $ref resolution scope - Ref *string // $ref, i.e. JSON Pointers - - // http://json-schema.org/latest/json-schema-validation.html - // 5.1. Validation keywords for numeric instances (number and integer) - MultipleOf *SchemaNumber - Maximum *SchemaNumber - ExclusiveMaximum *bool - Minimum *SchemaNumber - ExclusiveMinimum *bool - - // 5.2. Validation keywords for strings - MaxLength *int64 - MinLength *int64 - Pattern *string - - // 5.3. Validation keywords for arrays - AdditionalItems *SchemaOrBoolean - Items *SchemaOrSchemaArray - MaxItems *int64 - MinItems *int64 - UniqueItems *bool - - // 5.4. Validation keywords for objects - MaxProperties *int64 - MinProperties *int64 - Required *[]string - AdditionalProperties *SchemaOrBoolean - Properties *[]*NamedSchema - PatternProperties *[]*NamedSchema - Dependencies *[]*NamedSchemaOrStringArray - - // 5.5. Validation keywords for any instance type - Enumeration *[]SchemaEnumValue - Type *StringOrStringArray - AllOf *[]*Schema - AnyOf *[]*Schema - OneOf *[]*Schema - Not *Schema - Definitions *[]*NamedSchema - - // 6. Metadata keywords - Title *string - Description *string - Default *yaml.Node - - // 7. Semantic validation with "format" - Format *string -} - -// These helper structs represent "combination" types that generally can -// have values of one type or another. All are used to represent parts -// of Schemas. - -// SchemaNumber represents a value that can be either an Integer or a Float. -type SchemaNumber struct { - Integer *int64 - Float *float64 -} - -// NewSchemaNumberWithInteger creates and returns a new object -func NewSchemaNumberWithInteger(i int64) *SchemaNumber { - result := &SchemaNumber{} - result.Integer = &i - return result -} - -// NewSchemaNumberWithFloat creates and returns a new object -func NewSchemaNumberWithFloat(f float64) *SchemaNumber { - result := &SchemaNumber{} - result.Float = &f - return result -} - -// SchemaOrBoolean represents a value that can be either a Schema or a Boolean. -type SchemaOrBoolean struct { - Schema *Schema - Boolean *bool -} - -// NewSchemaOrBooleanWithSchema creates and returns a new object -func NewSchemaOrBooleanWithSchema(s *Schema) *SchemaOrBoolean { - result := &SchemaOrBoolean{} - result.Schema = s - return result -} - -// NewSchemaOrBooleanWithBoolean creates and returns a new object -func NewSchemaOrBooleanWithBoolean(b bool) *SchemaOrBoolean { - result := &SchemaOrBoolean{} - result.Boolean = &b - return result -} - -// StringOrStringArray represents a value that can be either -// a String or an Array of Strings. -type StringOrStringArray struct { - String *string - StringArray *[]string -} - -// NewStringOrStringArrayWithString creates and returns a new object -func NewStringOrStringArrayWithString(s string) *StringOrStringArray { - result := &StringOrStringArray{} - result.String = &s - return result -} - -// NewStringOrStringArrayWithStringArray creates and returns a new object -func NewStringOrStringArrayWithStringArray(a []string) *StringOrStringArray { - result := &StringOrStringArray{} - result.StringArray = &a - return result -} - -// SchemaOrStringArray represents a value that can be either -// a Schema or an Array of Strings. -type SchemaOrStringArray struct { - Schema *Schema - StringArray *[]string -} - -// SchemaOrSchemaArray represents a value that can be either -// a Schema or an Array of Schemas. -type SchemaOrSchemaArray struct { - Schema *Schema - SchemaArray *[]*Schema -} - -// NewSchemaOrSchemaArrayWithSchema creates and returns a new object -func NewSchemaOrSchemaArrayWithSchema(s *Schema) *SchemaOrSchemaArray { - result := &SchemaOrSchemaArray{} - result.Schema = s - return result -} - -// NewSchemaOrSchemaArrayWithSchemaArray creates and returns a new object -func NewSchemaOrSchemaArrayWithSchemaArray(a []*Schema) *SchemaOrSchemaArray { - result := &SchemaOrSchemaArray{} - result.SchemaArray = &a - return result -} - -// SchemaEnumValue represents a value that can be part of an -// enumeration in a Schema. -type SchemaEnumValue struct { - String *string - Bool *bool -} - -// NamedSchema is a name-value pair that is used to emulate maps -// with ordered keys. -type NamedSchema struct { - Name string - Value *Schema -} - -// NewNamedSchema creates and returns a new object -func NewNamedSchema(name string, value *Schema) *NamedSchema { - return &NamedSchema{Name: name, Value: value} -} - -// NamedSchemaOrStringArray is a name-value pair that is used -// to emulate maps with ordered keys. -type NamedSchemaOrStringArray struct { - Name string - Value *SchemaOrStringArray -} - -// Access named subschemas by name - -func namedSchemaArrayElementWithName(array *[]*NamedSchema, name string) *Schema { - if array == nil { - return nil - } - for _, pair := range *array { - if pair.Name == name { - return pair.Value - } - } - return nil -} - -// PropertyWithName returns the selected element. -func (s *Schema) PropertyWithName(name string) *Schema { - return namedSchemaArrayElementWithName(s.Properties, name) -} - -// PatternPropertyWithName returns the selected element. -func (s *Schema) PatternPropertyWithName(name string) *Schema { - return namedSchemaArrayElementWithName(s.PatternProperties, name) -} - -// DefinitionWithName returns the selected element. -func (s *Schema) DefinitionWithName(name string) *Schema { - return namedSchemaArrayElementWithName(s.Definitions, name) -} - -// AddProperty adds a named property. -func (s *Schema) AddProperty(name string, property *Schema) { - *s.Properties = append(*s.Properties, NewNamedSchema(name, property)) -} diff --git a/src/vendor/github.com/google/gnostic/jsonschema/operations.go b/src/vendor/github.com/google/gnostic/jsonschema/operations.go deleted file mode 100644 index ba8dd4a91..000000000 --- a/src/vendor/github.com/google/gnostic/jsonschema/operations.go +++ /dev/null @@ -1,394 +0,0 @@ -// Copyright 2017 Google LLC. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package jsonschema - -import ( - "fmt" - "log" - "strings" -) - -// -// OPERATIONS -// The following methods perform operations on Schemas. -// - -// IsEmpty returns true if no members of the Schema are specified. -func (schema *Schema) IsEmpty() bool { - return (schema.Schema == nil) && - (schema.ID == nil) && - (schema.MultipleOf == nil) && - (schema.Maximum == nil) && - (schema.ExclusiveMaximum == nil) && - (schema.Minimum == nil) && - (schema.ExclusiveMinimum == nil) && - (schema.MaxLength == nil) && - (schema.MinLength == nil) && - (schema.Pattern == nil) && - (schema.AdditionalItems == nil) && - (schema.Items == nil) && - (schema.MaxItems == nil) && - (schema.MinItems == nil) && - (schema.UniqueItems == nil) && - (schema.MaxProperties == nil) && - (schema.MinProperties == nil) && - (schema.Required == nil) && - (schema.AdditionalProperties == nil) && - (schema.Properties == nil) && - (schema.PatternProperties == nil) && - (schema.Dependencies == nil) && - (schema.Enumeration == nil) && - (schema.Type == nil) && - (schema.AllOf == nil) && - (schema.AnyOf == nil) && - (schema.OneOf == nil) && - (schema.Not == nil) && - (schema.Definitions == nil) && - (schema.Title == nil) && - (schema.Description == nil) && - (schema.Default == nil) && - (schema.Format == nil) && - (schema.Ref == nil) -} - -// IsEqual returns true if two schemas are equal. -func (schema *Schema) IsEqual(schema2 *Schema) bool { - return schema.String() == schema2.String() -} - -// SchemaOperation represents a function that can be applied to a Schema. -type SchemaOperation func(schema *Schema, context string) - -// Applies a specified function to a Schema and all of the Schemas that it contains. -func (schema *Schema) applyToSchemas(operation SchemaOperation, context string) { - - if schema.AdditionalItems != nil { - s := schema.AdditionalItems.Schema - if s != nil { - s.applyToSchemas(operation, "AdditionalItems") - } - } - - if schema.Items != nil { - if schema.Items.SchemaArray != nil { - for _, s := range *(schema.Items.SchemaArray) { - s.applyToSchemas(operation, "Items.SchemaArray") - } - } else if schema.Items.Schema != nil { - schema.Items.Schema.applyToSchemas(operation, "Items.Schema") - } - } - - if schema.AdditionalProperties != nil { - s := schema.AdditionalProperties.Schema - if s != nil { - s.applyToSchemas(operation, "AdditionalProperties") - } - } - - if schema.Properties != nil { - for _, pair := range *(schema.Properties) { - s := pair.Value - s.applyToSchemas(operation, "Properties") - } - } - if schema.PatternProperties != nil { - for _, pair := range *(schema.PatternProperties) { - s := pair.Value - s.applyToSchemas(operation, "PatternProperties") - } - } - - if schema.Dependencies != nil { - for _, pair := range *(schema.Dependencies) { - schemaOrStringArray := pair.Value - s := schemaOrStringArray.Schema - if s != nil { - s.applyToSchemas(operation, "Dependencies") - } - } - } - - if schema.AllOf != nil { - for _, s := range *(schema.AllOf) { - s.applyToSchemas(operation, "AllOf") - } - } - if schema.AnyOf != nil { - for _, s := range *(schema.AnyOf) { - s.applyToSchemas(operation, "AnyOf") - } - } - if schema.OneOf != nil { - for _, s := range *(schema.OneOf) { - s.applyToSchemas(operation, "OneOf") - } - } - if schema.Not != nil { - schema.Not.applyToSchemas(operation, "Not") - } - - if schema.Definitions != nil { - for _, pair := range *(schema.Definitions) { - s := pair.Value - s.applyToSchemas(operation, "Definitions") - } - } - - operation(schema, context) -} - -// CopyProperties copies all non-nil properties from the source Schema to the schema Schema. -func (schema *Schema) CopyProperties(source *Schema) { - if source.Schema != nil { - schema.Schema = source.Schema - } - if source.ID != nil { - schema.ID = source.ID - } - if source.MultipleOf != nil { - schema.MultipleOf = source.MultipleOf - } - if source.Maximum != nil { - schema.Maximum = source.Maximum - } - if source.ExclusiveMaximum != nil { - schema.ExclusiveMaximum = source.ExclusiveMaximum - } - if source.Minimum != nil { - schema.Minimum = source.Minimum - } - if source.ExclusiveMinimum != nil { - schema.ExclusiveMinimum = source.ExclusiveMinimum - } - if source.MaxLength != nil { - schema.MaxLength = source.MaxLength - } - if source.MinLength != nil { - schema.MinLength = source.MinLength - } - if source.Pattern != nil { - schema.Pattern = source.Pattern - } - if source.AdditionalItems != nil { - schema.AdditionalItems = source.AdditionalItems - } - if source.Items != nil { - schema.Items = source.Items - } - if source.MaxItems != nil { - schema.MaxItems = source.MaxItems - } - if source.MinItems != nil { - schema.MinItems = source.MinItems - } - if source.UniqueItems != nil { - schema.UniqueItems = source.UniqueItems - } - if source.MaxProperties != nil { - schema.MaxProperties = source.MaxProperties - } - if source.MinProperties != nil { - schema.MinProperties = source.MinProperties - } - if source.Required != nil { - schema.Required = source.Required - } - if source.AdditionalProperties != nil { - schema.AdditionalProperties = source.AdditionalProperties - } - if source.Properties != nil { - schema.Properties = source.Properties - } - if source.PatternProperties != nil { - schema.PatternProperties = source.PatternProperties - } - if source.Dependencies != nil { - schema.Dependencies = source.Dependencies - } - if source.Enumeration != nil { - schema.Enumeration = source.Enumeration - } - if source.Type != nil { - schema.Type = source.Type - } - if source.AllOf != nil { - schema.AllOf = source.AllOf - } - if source.AnyOf != nil { - schema.AnyOf = source.AnyOf - } - if source.OneOf != nil { - schema.OneOf = source.OneOf - } - if source.Not != nil { - schema.Not = source.Not - } - if source.Definitions != nil { - schema.Definitions = source.Definitions - } - if source.Title != nil { - schema.Title = source.Title - } - if source.Description != nil { - schema.Description = source.Description - } - if source.Default != nil { - schema.Default = source.Default - } - if source.Format != nil { - schema.Format = source.Format - } - if source.Ref != nil { - schema.Ref = source.Ref - } -} - -// TypeIs returns true if the Type of a Schema includes the specified type -func (schema *Schema) TypeIs(typeName string) bool { - if schema.Type != nil { - // the schema Type is either a string or an array of strings - if schema.Type.String != nil { - return (*(schema.Type.String) == typeName) - } else if schema.Type.StringArray != nil { - for _, n := range *(schema.Type.StringArray) { - if n == typeName { - return true - } - } - } - } - return false -} - -// ResolveRefs resolves "$ref" elements in a Schema and its children. -// But if a reference refers to an object type, is inside a oneOf, or contains a oneOf, -// the reference is kept and we expect downstream tools to separately model these -// referenced schemas. -func (schema *Schema) ResolveRefs() { - rootSchema := schema - count := 1 - for count > 0 { - count = 0 - schema.applyToSchemas( - func(schema *Schema, context string) { - if schema.Ref != nil { - resolvedRef, err := rootSchema.resolveJSONPointer(*(schema.Ref)) - if err != nil { - log.Printf("%+v", err) - } else if resolvedRef.TypeIs("object") { - // don't substitute for objects, we'll model the referenced schema with a class - } else if context == "OneOf" { - // don't substitute for references inside oneOf declarations - } else if resolvedRef.OneOf != nil { - // don't substitute for references that contain oneOf declarations - } else if resolvedRef.AdditionalProperties != nil { - // don't substitute for references that look like objects - } else { - schema.Ref = nil - schema.CopyProperties(resolvedRef) - count++ - } - } - }, "") - } -} - -// resolveJSONPointer resolves JSON pointers. -// This current implementation is very crude and custom for OpenAPI 2.0 schemas. -// It panics for any pointer that it is unable to resolve. -func (schema *Schema) resolveJSONPointer(ref string) (result *Schema, err error) { - parts := strings.Split(ref, "#") - if len(parts) == 2 { - documentName := parts[0] + "#" - if documentName == "#" && schema.ID != nil { - documentName = *(schema.ID) - } - path := parts[1] - document := schemas[documentName] - pathParts := strings.Split(path, "/") - - // we currently do a very limited (hard-coded) resolution of certain paths and log errors for missed cases - if len(pathParts) == 1 { - return document, nil - } else if len(pathParts) == 3 { - switch pathParts[1] { - case "definitions": - dictionary := document.Definitions - for _, pair := range *dictionary { - if pair.Name == pathParts[2] { - result = pair.Value - } - } - case "properties": - dictionary := document.Properties - for _, pair := range *dictionary { - if pair.Name == pathParts[2] { - result = pair.Value - } - } - default: - break - } - } - } - if result == nil { - return nil, fmt.Errorf("unresolved pointer: %+v", ref) - } - return result, nil -} - -// ResolveAllOfs replaces "allOf" elements by merging their properties into the parent Schema. -func (schema *Schema) ResolveAllOfs() { - schema.applyToSchemas( - func(schema *Schema, context string) { - if schema.AllOf != nil { - for _, allOf := range *(schema.AllOf) { - schema.CopyProperties(allOf) - } - schema.AllOf = nil - } - }, "resolveAllOfs") -} - -// ResolveAnyOfs replaces all "anyOf" elements with "oneOf". -func (schema *Schema) ResolveAnyOfs() { - schema.applyToSchemas( - func(schema *Schema, context string) { - if schema.AnyOf != nil { - schema.OneOf = schema.AnyOf - schema.AnyOf = nil - } - }, "resolveAnyOfs") -} - -// return a pointer to a copy of a passed-in string -func stringptr(input string) (output *string) { - return &input -} - -// CopyOfficialSchemaProperty copies a named property from the official JSON Schema definition -func (schema *Schema) CopyOfficialSchemaProperty(name string) { - *schema.Properties = append(*schema.Properties, - NewNamedSchema(name, - &Schema{Ref: stringptr("http://json-schema.org/draft-04/schema#/properties/" + name)})) -} - -// CopyOfficialSchemaProperties copies named properties from the official JSON Schema definition -func (schema *Schema) CopyOfficialSchemaProperties(names []string) { - for _, name := range names { - schema.CopyOfficialSchemaProperty(name) - } -} diff --git a/src/vendor/github.com/google/gnostic/jsonschema/reader.go b/src/vendor/github.com/google/gnostic/jsonschema/reader.go deleted file mode 100644 index b8583d466..000000000 --- a/src/vendor/github.com/google/gnostic/jsonschema/reader.go +++ /dev/null @@ -1,442 +0,0 @@ -// Copyright 2017 Google LLC. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//go:generate go run generate-base.go - -package jsonschema - -import ( - "fmt" - "io/ioutil" - "strconv" - - "gopkg.in/yaml.v3" -) - -// This is a global map of all known Schemas. -// It is initialized when the first Schema is created and inserted. -var schemas map[string]*Schema - -// NewBaseSchema builds a schema object from an embedded json representation. -func NewBaseSchema() (schema *Schema, err error) { - b, err := baseSchemaBytes() - if err != nil { - return nil, err - } - var node yaml.Node - err = yaml.Unmarshal(b, &node) - if err != nil { - return nil, err - } - return NewSchemaFromObject(&node), nil -} - -// NewSchemaFromFile reads a schema from a file. -// Currently this assumes that schemas are stored in the source distribution of this project. -func NewSchemaFromFile(filename string) (schema *Schema, err error) { - file, err := ioutil.ReadFile(filename) - if err != nil { - return nil, err - } - var node yaml.Node - err = yaml.Unmarshal(file, &node) - if err != nil { - return nil, err - } - return NewSchemaFromObject(&node), nil -} - -// NewSchemaFromObject constructs a schema from a parsed JSON object. -// Due to the complexity of the schema representation, this is a -// custom reader and not the standard Go JSON reader (encoding/json). -func NewSchemaFromObject(jsonData *yaml.Node) *Schema { - switch jsonData.Kind { - case yaml.DocumentNode: - return NewSchemaFromObject(jsonData.Content[0]) - case yaml.MappingNode: - schema := &Schema{} - - for i := 0; i < len(jsonData.Content); i += 2 { - k := jsonData.Content[i].Value - v := jsonData.Content[i+1] - - switch k { - case "$schema": - schema.Schema = schema.stringValue(v) - case "id": - schema.ID = schema.stringValue(v) - - case "multipleOf": - schema.MultipleOf = schema.numberValue(v) - case "maximum": - schema.Maximum = schema.numberValue(v) - case "exclusiveMaximum": - schema.ExclusiveMaximum = schema.boolValue(v) - case "minimum": - schema.Minimum = schema.numberValue(v) - case "exclusiveMinimum": - schema.ExclusiveMinimum = schema.boolValue(v) - - case "maxLength": - schema.MaxLength = schema.intValue(v) - case "minLength": - schema.MinLength = schema.intValue(v) - case "pattern": - schema.Pattern = schema.stringValue(v) - - case "additionalItems": - schema.AdditionalItems = schema.schemaOrBooleanValue(v) - case "items": - schema.Items = schema.schemaOrSchemaArrayValue(v) - case "maxItems": - schema.MaxItems = schema.intValue(v) - case "minItems": - schema.MinItems = schema.intValue(v) - case "uniqueItems": - schema.UniqueItems = schema.boolValue(v) - - case "maxProperties": - schema.MaxProperties = schema.intValue(v) - case "minProperties": - schema.MinProperties = schema.intValue(v) - case "required": - schema.Required = schema.arrayOfStringsValue(v) - case "additionalProperties": - schema.AdditionalProperties = schema.schemaOrBooleanValue(v) - case "properties": - schema.Properties = schema.mapOfSchemasValue(v) - case "patternProperties": - schema.PatternProperties = schema.mapOfSchemasValue(v) - case "dependencies": - schema.Dependencies = schema.mapOfSchemasOrStringArraysValue(v) - - case "enum": - schema.Enumeration = schema.arrayOfEnumValuesValue(v) - - case "type": - schema.Type = schema.stringOrStringArrayValue(v) - case "allOf": - schema.AllOf = schema.arrayOfSchemasValue(v) - case "anyOf": - schema.AnyOf = schema.arrayOfSchemasValue(v) - case "oneOf": - schema.OneOf = schema.arrayOfSchemasValue(v) - case "not": - schema.Not = NewSchemaFromObject(v) - case "definitions": - schema.Definitions = schema.mapOfSchemasValue(v) - - case "title": - schema.Title = schema.stringValue(v) - case "description": - schema.Description = schema.stringValue(v) - - case "default": - schema.Default = v - - case "format": - schema.Format = schema.stringValue(v) - case "$ref": - schema.Ref = schema.stringValue(v) - default: - fmt.Printf("UNSUPPORTED (%s)\n", k) - } - } - - // insert schema in global map - if schema.ID != nil { - if schemas == nil { - schemas = make(map[string]*Schema, 0) - } - schemas[*(schema.ID)] = schema - } - return schema - - default: - fmt.Printf("schemaValue: unexpected node %+v\n", jsonData) - return nil - } - - return nil -} - -// -// BUILDERS -// The following methods build elements of Schemas from interface{} values. -// Each returns nil if it is unable to build the desired element. -// - -// Gets the string value of an interface{} value if possible. -func (schema *Schema) stringValue(v *yaml.Node) *string { - switch v.Kind { - case yaml.ScalarNode: - return &v.Value - default: - fmt.Printf("stringValue: unexpected node %+v\n", v) - } - return nil -} - -// Gets the numeric value of an interface{} value if possible. -func (schema *Schema) numberValue(v *yaml.Node) *SchemaNumber { - number := &SchemaNumber{} - switch v.Kind { - case yaml.ScalarNode: - switch v.Tag { - case "!!float": - v2, _ := strconv.ParseFloat(v.Value, 64) - number.Float = &v2 - return number - case "!!int": - v2, _ := strconv.ParseInt(v.Value, 10, 64) - number.Integer = &v2 - return number - default: - fmt.Printf("stringValue: unexpected node %+v\n", v) - } - default: - fmt.Printf("stringValue: unexpected node %+v\n", v) - } - return nil -} - -// Gets the integer value of an interface{} value if possible. -func (schema *Schema) intValue(v *yaml.Node) *int64 { - switch v.Kind { - case yaml.ScalarNode: - switch v.Tag { - case "!!float": - v2, _ := strconv.ParseFloat(v.Value, 64) - v3 := int64(v2) - return &v3 - case "!!int": - v2, _ := strconv.ParseInt(v.Value, 10, 64) - return &v2 - default: - fmt.Printf("intValue: unexpected node %+v\n", v) - } - default: - fmt.Printf("intValue: unexpected node %+v\n", v) - } - return nil -} - -// Gets the bool value of an interface{} value if possible. -func (schema *Schema) boolValue(v *yaml.Node) *bool { - switch v.Kind { - case yaml.ScalarNode: - switch v.Tag { - case "!!bool": - v2, _ := strconv.ParseBool(v.Value) - return &v2 - default: - fmt.Printf("boolValue: unexpected node %+v\n", v) - } - default: - fmt.Printf("boolValue: unexpected node %+v\n", v) - } - return nil -} - -// Gets a map of Schemas from an interface{} value if possible. -func (schema *Schema) mapOfSchemasValue(v *yaml.Node) *[]*NamedSchema { - switch v.Kind { - case yaml.MappingNode: - m := make([]*NamedSchema, 0) - for i := 0; i < len(v.Content); i += 2 { - k2 := v.Content[i].Value - v2 := v.Content[i+1] - pair := &NamedSchema{Name: k2, Value: NewSchemaFromObject(v2)} - m = append(m, pair) - } - return &m - default: - fmt.Printf("mapOfSchemasValue: unexpected node %+v\n", v) - } - return nil -} - -// Gets an array of Schemas from an interface{} value if possible. -func (schema *Schema) arrayOfSchemasValue(v *yaml.Node) *[]*Schema { - switch v.Kind { - case yaml.SequenceNode: - m := make([]*Schema, 0) - for _, v2 := range v.Content { - switch v2.Kind { - case yaml.MappingNode: - s := NewSchemaFromObject(v2) - m = append(m, s) - default: - fmt.Printf("arrayOfSchemasValue: unexpected node %+v\n", v2) - } - } - return &m - case yaml.MappingNode: - m := make([]*Schema, 0) - s := NewSchemaFromObject(v) - m = append(m, s) - return &m - default: - fmt.Printf("arrayOfSchemasValue: unexpected node %+v\n", v) - } - return nil -} - -// Gets a Schema or an array of Schemas from an interface{} value if possible. -func (schema *Schema) schemaOrSchemaArrayValue(v *yaml.Node) *SchemaOrSchemaArray { - switch v.Kind { - case yaml.SequenceNode: - m := make([]*Schema, 0) - for _, v2 := range v.Content { - switch v2.Kind { - case yaml.MappingNode: - s := NewSchemaFromObject(v2) - m = append(m, s) - default: - fmt.Printf("schemaOrSchemaArrayValue: unexpected node %+v\n", v2) - } - } - return &SchemaOrSchemaArray{SchemaArray: &m} - case yaml.MappingNode: - s := NewSchemaFromObject(v) - return &SchemaOrSchemaArray{Schema: s} - default: - fmt.Printf("schemaOrSchemaArrayValue: unexpected node %+v\n", v) - } - return nil -} - -// Gets an array of strings from an interface{} value if possible. -func (schema *Schema) arrayOfStringsValue(v *yaml.Node) *[]string { - switch v.Kind { - case yaml.ScalarNode: - a := []string{v.Value} - return &a - case yaml.SequenceNode: - a := make([]string, 0) - for _, v2 := range v.Content { - switch v2.Kind { - case yaml.ScalarNode: - a = append(a, v2.Value) - default: - fmt.Printf("arrayOfStringsValue: unexpected node %+v\n", v2) - } - } - return &a - default: - fmt.Printf("arrayOfStringsValue: unexpected node %+v\n", v) - } - return nil -} - -// Gets a string or an array of strings from an interface{} value if possible. -func (schema *Schema) stringOrStringArrayValue(v *yaml.Node) *StringOrStringArray { - switch v.Kind { - case yaml.ScalarNode: - s := &StringOrStringArray{} - s.String = &v.Value - return s - case yaml.SequenceNode: - a := make([]string, 0) - for _, v2 := range v.Content { - switch v2.Kind { - case yaml.ScalarNode: - a = append(a, v2.Value) - default: - fmt.Printf("arrayOfStringsValue: unexpected node %+v\n", v2) - } - } - s := &StringOrStringArray{} - s.StringArray = &a - return s - default: - fmt.Printf("arrayOfStringsValue: unexpected node %+v\n", v) - } - return nil -} - -// Gets an array of enum values from an interface{} value if possible. -func (schema *Schema) arrayOfEnumValuesValue(v *yaml.Node) *[]SchemaEnumValue { - a := make([]SchemaEnumValue, 0) - switch v.Kind { - case yaml.SequenceNode: - for _, v2 := range v.Content { - switch v2.Kind { - case yaml.ScalarNode: - switch v2.Tag { - case "!!str": - a = append(a, SchemaEnumValue{String: &v2.Value}) - case "!!bool": - v3, _ := strconv.ParseBool(v2.Value) - a = append(a, SchemaEnumValue{Bool: &v3}) - default: - fmt.Printf("arrayOfEnumValuesValue: unexpected type %s\n", v2.Tag) - } - default: - fmt.Printf("arrayOfEnumValuesValue: unexpected node %+v\n", v2) - } - } - default: - fmt.Printf("arrayOfEnumValuesValue: unexpected node %+v\n", v) - } - return &a -} - -// Gets a map of schemas or string arrays from an interface{} value if possible. -func (schema *Schema) mapOfSchemasOrStringArraysValue(v *yaml.Node) *[]*NamedSchemaOrStringArray { - m := make([]*NamedSchemaOrStringArray, 0) - switch v.Kind { - case yaml.MappingNode: - for i := 0; i < len(v.Content); i += 2 { - k2 := v.Content[i].Value - v2 := v.Content[i+1] - switch v2.Kind { - case yaml.SequenceNode: - a := make([]string, 0) - for _, v3 := range v2.Content { - switch v3.Kind { - case yaml.ScalarNode: - a = append(a, v3.Value) - default: - fmt.Printf("mapOfSchemasOrStringArraysValue: unexpected node %+v\n", v3) - } - } - s := &SchemaOrStringArray{} - s.StringArray = &a - pair := &NamedSchemaOrStringArray{Name: k2, Value: s} - m = append(m, pair) - default: - fmt.Printf("mapOfSchemasOrStringArraysValue: unexpected node %+v\n", v2) - } - } - default: - fmt.Printf("mapOfSchemasOrStringArraysValue: unexpected node %+v\n", v) - } - return &m -} - -// Gets a schema or a boolean value from an interface{} value if possible. -func (schema *Schema) schemaOrBooleanValue(v *yaml.Node) *SchemaOrBoolean { - schemaOrBoolean := &SchemaOrBoolean{} - switch v.Kind { - case yaml.ScalarNode: - v2, _ := strconv.ParseBool(v.Value) - schemaOrBoolean.Boolean = &v2 - case yaml.MappingNode: - schemaOrBoolean.Schema = NewSchemaFromObject(v) - default: - fmt.Printf("schemaOrBooleanValue: unexpected node %+v\n", v) - } - return schemaOrBoolean -} diff --git a/src/vendor/github.com/google/gnostic/jsonschema/schema.json b/src/vendor/github.com/google/gnostic/jsonschema/schema.json deleted file mode 100644 index 85eb502a6..000000000 --- a/src/vendor/github.com/google/gnostic/jsonschema/schema.json +++ /dev/null @@ -1,150 +0,0 @@ -{ - "id": "http://json-schema.org/draft-04/schema#", - "$schema": "http://json-schema.org/draft-04/schema#", - "description": "Core schema meta-schema", - "definitions": { - "schemaArray": { - "type": "array", - "minItems": 1, - "items": { "$ref": "#" } - }, - "positiveInteger": { - "type": "integer", - "minimum": 0 - }, - "positiveIntegerDefault0": { - "allOf": [ { "$ref": "#/definitions/positiveInteger" }, { "default": 0 } ] - }, - "simpleTypes": { - "enum": [ "array", "boolean", "integer", "null", "number", "object", "string" ] - }, - "stringArray": { - "type": "array", - "items": { "type": "string" }, - "minItems": 1, - "uniqueItems": true - } - }, - "type": "object", - "properties": { - "id": { - "type": "string", - "format": "uri" - }, - "$schema": { - "type": "string", - "format": "uri" - }, - "title": { - "type": "string" - }, - "description": { - "type": "string" - }, - "default": {}, - "multipleOf": { - "type": "number", - "minimum": 0, - "exclusiveMinimum": true - }, - "maximum": { - "type": "number" - }, - "exclusiveMaximum": { - "type": "boolean", - "default": false - }, - "minimum": { - "type": "number" - }, - "exclusiveMinimum": { - "type": "boolean", - "default": false - }, - "maxLength": { "$ref": "#/definitions/positiveInteger" }, - "minLength": { "$ref": "#/definitions/positiveIntegerDefault0" }, - "pattern": { - "type": "string", - "format": "regex" - }, - "additionalItems": { - "anyOf": [ - { "type": "boolean" }, - { "$ref": "#" } - ], - "default": {} - }, - "items": { - "anyOf": [ - { "$ref": "#" }, - { "$ref": "#/definitions/schemaArray" } - ], - "default": {} - }, - "maxItems": { "$ref": "#/definitions/positiveInteger" }, - "minItems": { "$ref": "#/definitions/positiveIntegerDefault0" }, - "uniqueItems": { - "type": "boolean", - "default": false - }, - "maxProperties": { "$ref": "#/definitions/positiveInteger" }, - "minProperties": { "$ref": "#/definitions/positiveIntegerDefault0" }, - "required": { "$ref": "#/definitions/stringArray" }, - "additionalProperties": { - "anyOf": [ - { "type": "boolean" }, - { "$ref": "#" } - ], - "default": {} - }, - "definitions": { - "type": "object", - "additionalProperties": { "$ref": "#" }, - "default": {} - }, - "properties": { - "type": "object", - "additionalProperties": { "$ref": "#" }, - "default": {} - }, - "patternProperties": { - "type": "object", - "additionalProperties": { "$ref": "#" }, - "default": {} - }, - "dependencies": { - "type": "object", - "additionalProperties": { - "anyOf": [ - { "$ref": "#" }, - { "$ref": "#/definitions/stringArray" } - ] - } - }, - "enum": { - "type": "array", - "minItems": 1, - "uniqueItems": true - }, - "type": { - "anyOf": [ - { "$ref": "#/definitions/simpleTypes" }, - { - "type": "array", - "items": { "$ref": "#/definitions/simpleTypes" }, - "minItems": 1, - "uniqueItems": true - } - ] - }, - "allOf": { "$ref": "#/definitions/schemaArray" }, - "anyOf": { "$ref": "#/definitions/schemaArray" }, - "oneOf": { "$ref": "#/definitions/schemaArray" }, - "not": { "$ref": "#" } - }, - "dependencies": { - "exclusiveMaximum": [ "maximum" ], - "exclusiveMinimum": [ "minimum" ] - }, - "default": {} -} diff --git a/src/vendor/github.com/google/gnostic/jsonschema/writer.go b/src/vendor/github.com/google/gnostic/jsonschema/writer.go deleted file mode 100644 index 340dc5f93..000000000 --- a/src/vendor/github.com/google/gnostic/jsonschema/writer.go +++ /dev/null @@ -1,369 +0,0 @@ -// Copyright 2017 Google LLC. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package jsonschema - -import ( - "fmt" - - "gopkg.in/yaml.v3" -) - -const indentation = " " - -func renderMappingNode(node *yaml.Node, indent string) (result string) { - result = "{\n" - innerIndent := indent + indentation - for i := 0; i < len(node.Content); i += 2 { - // first print the key - key := node.Content[i].Value - result += fmt.Sprintf("%s\"%+v\": ", innerIndent, key) - // then the value - value := node.Content[i+1] - switch value.Kind { - case yaml.ScalarNode: - result += "\"" + value.Value + "\"" - case yaml.MappingNode: - result += renderMappingNode(value, innerIndent) - case yaml.SequenceNode: - result += renderSequenceNode(value, innerIndent) - default: - result += fmt.Sprintf("???MapItem(Key:%+v, Value:%T)", value, value) - } - if i < len(node.Content)-2 { - result += "," - } - result += "\n" - } - - result += indent + "}" - return result -} - -func renderSequenceNode(node *yaml.Node, indent string) (result string) { - result = "[\n" - innerIndent := indent + indentation - for i := 0; i < len(node.Content); i++ { - item := node.Content[i] - switch item.Kind { - case yaml.ScalarNode: - result += innerIndent + "\"" + item.Value + "\"" - case yaml.MappingNode: - result += innerIndent + renderMappingNode(item, innerIndent) + "" - default: - result += innerIndent + fmt.Sprintf("???ArrayItem(%+v)", item) - } - if i < len(node.Content)-1 { - result += "," - } - result += "\n" - } - result += indent + "]" - return result -} - -func renderStringArray(array []string, indent string) (result string) { - result = "[\n" - innerIndent := indent + indentation - for i, item := range array { - result += innerIndent + "\"" + item + "\"" - if i < len(array)-1 { - result += "," - } - result += "\n" - } - result += indent + "]" - return result -} - -// Render renders a yaml.Node as JSON -func Render(node *yaml.Node) string { - if node.Kind == yaml.DocumentNode { - if len(node.Content) == 1 { - return Render(node.Content[0]) - } - } else if node.Kind == yaml.MappingNode { - return renderMappingNode(node, "") + "\n" - } else if node.Kind == yaml.SequenceNode { - return renderSequenceNode(node, "") + "\n" - } - return "" -} - -func (object *SchemaNumber) nodeValue() *yaml.Node { - if object.Integer != nil { - return nodeForInt64(*object.Integer) - } else if object.Float != nil { - return nodeForFloat64(*object.Float) - } else { - return nil - } -} - -func (object *SchemaOrBoolean) nodeValue() *yaml.Node { - if object.Schema != nil { - return object.Schema.nodeValue() - } else if object.Boolean != nil { - return nodeForBoolean(*object.Boolean) - } else { - return nil - } -} - -func nodeForStringArray(array []string) *yaml.Node { - content := make([]*yaml.Node, 0) - for _, item := range array { - content = append(content, nodeForString(item)) - } - return nodeForSequence(content) -} - -func nodeForSchemaArray(array []*Schema) *yaml.Node { - content := make([]*yaml.Node, 0) - for _, item := range array { - content = append(content, item.nodeValue()) - } - return nodeForSequence(content) -} - -func (object *StringOrStringArray) nodeValue() *yaml.Node { - if object.String != nil { - return nodeForString(*object.String) - } else if object.StringArray != nil { - return nodeForStringArray(*(object.StringArray)) - } else { - return nil - } -} - -func (object *SchemaOrStringArray) nodeValue() *yaml.Node { - if object.Schema != nil { - return object.Schema.nodeValue() - } else if object.StringArray != nil { - return nodeForStringArray(*(object.StringArray)) - } else { - return nil - } -} - -func (object *SchemaOrSchemaArray) nodeValue() *yaml.Node { - if object.Schema != nil { - return object.Schema.nodeValue() - } else if object.SchemaArray != nil { - return nodeForSchemaArray(*(object.SchemaArray)) - } else { - return nil - } -} - -func (object *SchemaEnumValue) nodeValue() *yaml.Node { - if object.String != nil { - return nodeForString(*object.String) - } else if object.Bool != nil { - return nodeForBoolean(*object.Bool) - } else { - return nil - } -} - -func nodeForNamedSchemaArray(array *[]*NamedSchema) *yaml.Node { - content := make([]*yaml.Node, 0) - for _, pair := range *(array) { - content = appendPair(content, pair.Name, pair.Value.nodeValue()) - } - return nodeForMapping(content) -} - -func nodeForNamedSchemaOrStringArray(array *[]*NamedSchemaOrStringArray) *yaml.Node { - content := make([]*yaml.Node, 0) - for _, pair := range *(array) { - content = appendPair(content, pair.Name, pair.Value.nodeValue()) - } - return nodeForMapping(content) -} - -func nodeForSchemaEnumArray(array *[]SchemaEnumValue) *yaml.Node { - content := make([]*yaml.Node, 0) - for _, item := range *array { - content = append(content, item.nodeValue()) - } - return nodeForSequence(content) -} - -func nodeForMapping(content []*yaml.Node) *yaml.Node { - return &yaml.Node{ - Kind: yaml.MappingNode, - Content: content, - } -} - -func nodeForSequence(content []*yaml.Node) *yaml.Node { - return &yaml.Node{ - Kind: yaml.SequenceNode, - Content: content, - } -} - -func nodeForString(value string) *yaml.Node { - return &yaml.Node{ - Kind: yaml.ScalarNode, - Tag: "!!str", - Value: value, - } -} - -func nodeForBoolean(value bool) *yaml.Node { - return &yaml.Node{ - Kind: yaml.ScalarNode, - Tag: "!!bool", - Value: fmt.Sprintf("%t", value), - } -} - -func nodeForInt64(value int64) *yaml.Node { - return &yaml.Node{ - Kind: yaml.ScalarNode, - Tag: "!!int", - Value: fmt.Sprintf("%d", value), - } -} - -func nodeForFloat64(value float64) *yaml.Node { - return &yaml.Node{ - Kind: yaml.ScalarNode, - Tag: "!!float", - Value: fmt.Sprintf("%f", value), - } -} - -func appendPair(nodes []*yaml.Node, name string, value *yaml.Node) []*yaml.Node { - nodes = append(nodes, nodeForString(name)) - nodes = append(nodes, value) - return nodes -} - -func (schema *Schema) nodeValue() *yaml.Node { - n := &yaml.Node{Kind: yaml.MappingNode} - content := make([]*yaml.Node, 0) - if schema.Title != nil { - content = appendPair(content, "title", nodeForString(*schema.Title)) - } - if schema.ID != nil { - content = appendPair(content, "id", nodeForString(*schema.ID)) - } - if schema.Schema != nil { - content = appendPair(content, "$schema", nodeForString(*schema.Schema)) - } - if schema.Type != nil { - content = appendPair(content, "type", schema.Type.nodeValue()) - } - if schema.Items != nil { - content = appendPair(content, "items", schema.Items.nodeValue()) - } - if schema.Description != nil { - content = appendPair(content, "description", nodeForString(*schema.Description)) - } - if schema.Required != nil { - content = appendPair(content, "required", nodeForStringArray(*schema.Required)) - } - if schema.AdditionalProperties != nil { - content = appendPair(content, "additionalProperties", schema.AdditionalProperties.nodeValue()) - } - if schema.PatternProperties != nil { - content = appendPair(content, "patternProperties", nodeForNamedSchemaArray(schema.PatternProperties)) - } - if schema.Properties != nil { - content = appendPair(content, "properties", nodeForNamedSchemaArray(schema.Properties)) - } - if schema.Dependencies != nil { - content = appendPair(content, "dependencies", nodeForNamedSchemaOrStringArray(schema.Dependencies)) - } - if schema.Ref != nil { - content = appendPair(content, "$ref", nodeForString(*schema.Ref)) - } - if schema.MultipleOf != nil { - content = appendPair(content, "multipleOf", schema.MultipleOf.nodeValue()) - } - if schema.Maximum != nil { - content = appendPair(content, "maximum", schema.Maximum.nodeValue()) - } - if schema.ExclusiveMaximum != nil { - content = appendPair(content, "exclusiveMaximum", nodeForBoolean(*schema.ExclusiveMaximum)) - } - if schema.Minimum != nil { - content = appendPair(content, "minimum", schema.Minimum.nodeValue()) - } - if schema.ExclusiveMinimum != nil { - content = appendPair(content, "exclusiveMinimum", nodeForBoolean(*schema.ExclusiveMinimum)) - } - if schema.MaxLength != nil { - content = appendPair(content, "maxLength", nodeForInt64(*schema.MaxLength)) - } - if schema.MinLength != nil { - content = appendPair(content, "minLength", nodeForInt64(*schema.MinLength)) - } - if schema.Pattern != nil { - content = appendPair(content, "pattern", nodeForString(*schema.Pattern)) - } - if schema.AdditionalItems != nil { - content = appendPair(content, "additionalItems", schema.AdditionalItems.nodeValue()) - } - if schema.MaxItems != nil { - content = appendPair(content, "maxItems", nodeForInt64(*schema.MaxItems)) - } - if schema.MinItems != nil { - content = appendPair(content, "minItems", nodeForInt64(*schema.MinItems)) - } - if schema.UniqueItems != nil { - content = appendPair(content, "uniqueItems", nodeForBoolean(*schema.UniqueItems)) - } - if schema.MaxProperties != nil { - content = appendPair(content, "maxProperties", nodeForInt64(*schema.MaxProperties)) - } - if schema.MinProperties != nil { - content = appendPair(content, "minProperties", nodeForInt64(*schema.MinProperties)) - } - if schema.Enumeration != nil { - content = appendPair(content, "enum", nodeForSchemaEnumArray(schema.Enumeration)) - } - if schema.AllOf != nil { - content = appendPair(content, "allOf", nodeForSchemaArray(*schema.AllOf)) - } - if schema.AnyOf != nil { - content = appendPair(content, "anyOf", nodeForSchemaArray(*schema.AnyOf)) - } - if schema.OneOf != nil { - content = appendPair(content, "oneOf", nodeForSchemaArray(*schema.OneOf)) - } - if schema.Not != nil { - content = appendPair(content, "not", schema.Not.nodeValue()) - } - if schema.Definitions != nil { - content = appendPair(content, "definitions", nodeForNamedSchemaArray(schema.Definitions)) - } - if schema.Default != nil { - // m = append(m, yaml.MapItem{Key: "default", Value: *schema.Default}) - } - if schema.Format != nil { - content = appendPair(content, "format", nodeForString(*schema.Format)) - } - n.Content = content - return n -} - -// JSONString returns a json representation of a schema. -func (schema *Schema) JSONString() string { - node := schema.nodeValue() - return Render(node) -} diff --git a/src/vendor/github.com/google/gnostic/openapiv2/OpenAPIv2.go b/src/vendor/github.com/google/gnostic/openapiv2/OpenAPIv2.go deleted file mode 100644 index 0f1790766..000000000 --- a/src/vendor/github.com/google/gnostic/openapiv2/OpenAPIv2.go +++ /dev/null @@ -1,8820 +0,0 @@ -// Copyright 2020 Google LLC. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// THIS FILE IS AUTOMATICALLY GENERATED. - -package openapi_v2 - -import ( - "fmt" - "regexp" - "strings" - - "gopkg.in/yaml.v3" - - "github.com/google/gnostic/compiler" -) - -// Version returns the package name (and OpenAPI version). -func Version() string { - return "openapi_v2" -} - -// NewAdditionalPropertiesItem creates an object of type AdditionalPropertiesItem if possible, returning an error if not. -func NewAdditionalPropertiesItem(in *yaml.Node, context *compiler.Context) (*AdditionalPropertiesItem, error) { - errors := make([]error, 0) - x := &AdditionalPropertiesItem{} - matched := false - // Schema schema = 1; - { - m, ok := compiler.UnpackMap(in) - if ok { - // errors might be ok here, they mean we just don't have the right subtype - t, matchingError := NewSchema(m, compiler.NewContext("schema", m, context)) - if matchingError == nil { - x.Oneof = &AdditionalPropertiesItem_Schema{Schema: t} - matched = true - } else { - errors = append(errors, matchingError) - } - } - } - // bool boolean = 2; - boolValue, ok := compiler.BoolForScalarNode(in) - if ok { - x.Oneof = &AdditionalPropertiesItem_Boolean{Boolean: boolValue} - matched = true - } - if matched { - // since the oneof matched one of its possibilities, discard any matching errors - errors = make([]error, 0) - } else { - message := fmt.Sprintf("contains an invalid AdditionalPropertiesItem") - err := compiler.NewError(context, message) - errors = []error{err} - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewAny creates an object of type Any if possible, returning an error if not. -func NewAny(in *yaml.Node, context *compiler.Context) (*Any, error) { - errors := make([]error, 0) - x := &Any{} - bytes := compiler.Marshal(in) - x.Yaml = string(bytes) - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewApiKeySecurity creates an object of type ApiKeySecurity if possible, returning an error if not. -func NewApiKeySecurity(in *yaml.Node, context *compiler.Context) (*ApiKeySecurity, error) { - errors := make([]error, 0) - x := &ApiKeySecurity{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - requiredKeys := []string{"in", "name", "type"} - missingKeys := compiler.MissingKeysInMap(m, requiredKeys) - if len(missingKeys) > 0 { - message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - allowedKeys := []string{"description", "in", "name", "type"} - allowedPatterns := []*regexp.Regexp{pattern0} - invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) - if len(invalidKeys) > 0 { - message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - // string type = 1; - v1 := compiler.MapValueForKey(m, "type") - if v1 != nil { - x.Type, ok = compiler.StringForScalarNode(v1) - if !ok { - message := fmt.Sprintf("has unexpected value for type: %s", compiler.Display(v1)) - errors = append(errors, compiler.NewError(context, message)) - } - // check for valid enum values - // [apiKey] - if ok && !compiler.StringArrayContainsValue([]string{"apiKey"}, x.Type) { - message := fmt.Sprintf("has unexpected value for type: %s", compiler.Display(v1)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string name = 2; - v2 := compiler.MapValueForKey(m, "name") - if v2 != nil { - x.Name, ok = compiler.StringForScalarNode(v2) - if !ok { - message := fmt.Sprintf("has unexpected value for name: %s", compiler.Display(v2)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string in = 3; - v3 := compiler.MapValueForKey(m, "in") - if v3 != nil { - x.In, ok = compiler.StringForScalarNode(v3) - if !ok { - message := fmt.Sprintf("has unexpected value for in: %s", compiler.Display(v3)) - errors = append(errors, compiler.NewError(context, message)) - } - // check for valid enum values - // [header query] - if ok && !compiler.StringArrayContainsValue([]string{"header", "query"}, x.In) { - message := fmt.Sprintf("has unexpected value for in: %s", compiler.Display(v3)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string description = 4; - v4 := compiler.MapValueForKey(m, "description") - if v4 != nil { - x.Description, ok = compiler.StringForScalarNode(v4) - if !ok { - message := fmt.Sprintf("has unexpected value for description: %s", compiler.Display(v4)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // repeated NamedAny vendor_extension = 5; - // MAP: Any ^x- - x.VendorExtension = make([]*NamedAny, 0) - for i := 0; i < len(m.Content); i += 2 { - k, ok := compiler.StringForScalarNode(m.Content[i]) - if ok { - v := m.Content[i+1] - if strings.HasPrefix(k, "x-") { - pair := &NamedAny{} - pair.Name = k - result := &Any{} - handled, resultFromExt, err := compiler.CallExtension(context, v, k) - if handled { - if err != nil { - errors = append(errors, err) - } else { - bytes := compiler.Marshal(v) - result.Yaml = string(bytes) - result.Value = resultFromExt - pair.Value = result - } - } else { - pair.Value, err = NewAny(v, compiler.NewContext(k, v, context)) - if err != nil { - errors = append(errors, err) - } - } - x.VendorExtension = append(x.VendorExtension, pair) - } - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewBasicAuthenticationSecurity creates an object of type BasicAuthenticationSecurity if possible, returning an error if not. -func NewBasicAuthenticationSecurity(in *yaml.Node, context *compiler.Context) (*BasicAuthenticationSecurity, error) { - errors := make([]error, 0) - x := &BasicAuthenticationSecurity{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - requiredKeys := []string{"type"} - missingKeys := compiler.MissingKeysInMap(m, requiredKeys) - if len(missingKeys) > 0 { - message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - allowedKeys := []string{"description", "type"} - allowedPatterns := []*regexp.Regexp{pattern0} - invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) - if len(invalidKeys) > 0 { - message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - // string type = 1; - v1 := compiler.MapValueForKey(m, "type") - if v1 != nil { - x.Type, ok = compiler.StringForScalarNode(v1) - if !ok { - message := fmt.Sprintf("has unexpected value for type: %s", compiler.Display(v1)) - errors = append(errors, compiler.NewError(context, message)) - } - // check for valid enum values - // [basic] - if ok && !compiler.StringArrayContainsValue([]string{"basic"}, x.Type) { - message := fmt.Sprintf("has unexpected value for type: %s", compiler.Display(v1)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string description = 2; - v2 := compiler.MapValueForKey(m, "description") - if v2 != nil { - x.Description, ok = compiler.StringForScalarNode(v2) - if !ok { - message := fmt.Sprintf("has unexpected value for description: %s", compiler.Display(v2)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // repeated NamedAny vendor_extension = 3; - // MAP: Any ^x- - x.VendorExtension = make([]*NamedAny, 0) - for i := 0; i < len(m.Content); i += 2 { - k, ok := compiler.StringForScalarNode(m.Content[i]) - if ok { - v := m.Content[i+1] - if strings.HasPrefix(k, "x-") { - pair := &NamedAny{} - pair.Name = k - result := &Any{} - handled, resultFromExt, err := compiler.CallExtension(context, v, k) - if handled { - if err != nil { - errors = append(errors, err) - } else { - bytes := compiler.Marshal(v) - result.Yaml = string(bytes) - result.Value = resultFromExt - pair.Value = result - } - } else { - pair.Value, err = NewAny(v, compiler.NewContext(k, v, context)) - if err != nil { - errors = append(errors, err) - } - } - x.VendorExtension = append(x.VendorExtension, pair) - } - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewBodyParameter creates an object of type BodyParameter if possible, returning an error if not. -func NewBodyParameter(in *yaml.Node, context *compiler.Context) (*BodyParameter, error) { - errors := make([]error, 0) - x := &BodyParameter{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - requiredKeys := []string{"in", "name", "schema"} - missingKeys := compiler.MissingKeysInMap(m, requiredKeys) - if len(missingKeys) > 0 { - message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - allowedKeys := []string{"description", "in", "name", "required", "schema"} - allowedPatterns := []*regexp.Regexp{pattern0} - invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) - if len(invalidKeys) > 0 { - message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - // string description = 1; - v1 := compiler.MapValueForKey(m, "description") - if v1 != nil { - x.Description, ok = compiler.StringForScalarNode(v1) - if !ok { - message := fmt.Sprintf("has unexpected value for description: %s", compiler.Display(v1)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string name = 2; - v2 := compiler.MapValueForKey(m, "name") - if v2 != nil { - x.Name, ok = compiler.StringForScalarNode(v2) - if !ok { - message := fmt.Sprintf("has unexpected value for name: %s", compiler.Display(v2)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string in = 3; - v3 := compiler.MapValueForKey(m, "in") - if v3 != nil { - x.In, ok = compiler.StringForScalarNode(v3) - if !ok { - message := fmt.Sprintf("has unexpected value for in: %s", compiler.Display(v3)) - errors = append(errors, compiler.NewError(context, message)) - } - // check for valid enum values - // [body] - if ok && !compiler.StringArrayContainsValue([]string{"body"}, x.In) { - message := fmt.Sprintf("has unexpected value for in: %s", compiler.Display(v3)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // bool required = 4; - v4 := compiler.MapValueForKey(m, "required") - if v4 != nil { - x.Required, ok = compiler.BoolForScalarNode(v4) - if !ok { - message := fmt.Sprintf("has unexpected value for required: %s", compiler.Display(v4)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // Schema schema = 5; - v5 := compiler.MapValueForKey(m, "schema") - if v5 != nil { - var err error - x.Schema, err = NewSchema(v5, compiler.NewContext("schema", v5, context)) - if err != nil { - errors = append(errors, err) - } - } - // repeated NamedAny vendor_extension = 6; - // MAP: Any ^x- - x.VendorExtension = make([]*NamedAny, 0) - for i := 0; i < len(m.Content); i += 2 { - k, ok := compiler.StringForScalarNode(m.Content[i]) - if ok { - v := m.Content[i+1] - if strings.HasPrefix(k, "x-") { - pair := &NamedAny{} - pair.Name = k - result := &Any{} - handled, resultFromExt, err := compiler.CallExtension(context, v, k) - if handled { - if err != nil { - errors = append(errors, err) - } else { - bytes := compiler.Marshal(v) - result.Yaml = string(bytes) - result.Value = resultFromExt - pair.Value = result - } - } else { - pair.Value, err = NewAny(v, compiler.NewContext(k, v, context)) - if err != nil { - errors = append(errors, err) - } - } - x.VendorExtension = append(x.VendorExtension, pair) - } - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewContact creates an object of type Contact if possible, returning an error if not. -func NewContact(in *yaml.Node, context *compiler.Context) (*Contact, error) { - errors := make([]error, 0) - x := &Contact{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - allowedKeys := []string{"email", "name", "url"} - allowedPatterns := []*regexp.Regexp{pattern0} - invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) - if len(invalidKeys) > 0 { - message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - // string name = 1; - v1 := compiler.MapValueForKey(m, "name") - if v1 != nil { - x.Name, ok = compiler.StringForScalarNode(v1) - if !ok { - message := fmt.Sprintf("has unexpected value for name: %s", compiler.Display(v1)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string url = 2; - v2 := compiler.MapValueForKey(m, "url") - if v2 != nil { - x.Url, ok = compiler.StringForScalarNode(v2) - if !ok { - message := fmt.Sprintf("has unexpected value for url: %s", compiler.Display(v2)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string email = 3; - v3 := compiler.MapValueForKey(m, "email") - if v3 != nil { - x.Email, ok = compiler.StringForScalarNode(v3) - if !ok { - message := fmt.Sprintf("has unexpected value for email: %s", compiler.Display(v3)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // repeated NamedAny vendor_extension = 4; - // MAP: Any ^x- - x.VendorExtension = make([]*NamedAny, 0) - for i := 0; i < len(m.Content); i += 2 { - k, ok := compiler.StringForScalarNode(m.Content[i]) - if ok { - v := m.Content[i+1] - if strings.HasPrefix(k, "x-") { - pair := &NamedAny{} - pair.Name = k - result := &Any{} - handled, resultFromExt, err := compiler.CallExtension(context, v, k) - if handled { - if err != nil { - errors = append(errors, err) - } else { - bytes := compiler.Marshal(v) - result.Yaml = string(bytes) - result.Value = resultFromExt - pair.Value = result - } - } else { - pair.Value, err = NewAny(v, compiler.NewContext(k, v, context)) - if err != nil { - errors = append(errors, err) - } - } - x.VendorExtension = append(x.VendorExtension, pair) - } - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewDefault creates an object of type Default if possible, returning an error if not. -func NewDefault(in *yaml.Node, context *compiler.Context) (*Default, error) { - errors := make([]error, 0) - x := &Default{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - // repeated NamedAny additional_properties = 1; - // MAP: Any - x.AdditionalProperties = make([]*NamedAny, 0) - for i := 0; i < len(m.Content); i += 2 { - k, ok := compiler.StringForScalarNode(m.Content[i]) - if ok { - v := m.Content[i+1] - pair := &NamedAny{} - pair.Name = k - result := &Any{} - handled, resultFromExt, err := compiler.CallExtension(context, v, k) - if handled { - if err != nil { - errors = append(errors, err) - } else { - bytes := compiler.Marshal(v) - result.Yaml = string(bytes) - result.Value = resultFromExt - pair.Value = result - } - } else { - pair.Value, err = NewAny(v, compiler.NewContext(k, v, context)) - if err != nil { - errors = append(errors, err) - } - } - x.AdditionalProperties = append(x.AdditionalProperties, pair) - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewDefinitions creates an object of type Definitions if possible, returning an error if not. -func NewDefinitions(in *yaml.Node, context *compiler.Context) (*Definitions, error) { - errors := make([]error, 0) - x := &Definitions{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - // repeated NamedSchema additional_properties = 1; - // MAP: Schema - x.AdditionalProperties = make([]*NamedSchema, 0) - for i := 0; i < len(m.Content); i += 2 { - k, ok := compiler.StringForScalarNode(m.Content[i]) - if ok { - v := m.Content[i+1] - pair := &NamedSchema{} - pair.Name = k - var err error - pair.Value, err = NewSchema(v, compiler.NewContext(k, v, context)) - if err != nil { - errors = append(errors, err) - } - x.AdditionalProperties = append(x.AdditionalProperties, pair) - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewDocument creates an object of type Document if possible, returning an error if not. -func NewDocument(in *yaml.Node, context *compiler.Context) (*Document, error) { - errors := make([]error, 0) - x := &Document{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - requiredKeys := []string{"info", "paths", "swagger"} - missingKeys := compiler.MissingKeysInMap(m, requiredKeys) - if len(missingKeys) > 0 { - message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - allowedKeys := []string{"basePath", "consumes", "definitions", "externalDocs", "host", "info", "parameters", "paths", "produces", "responses", "schemes", "security", "securityDefinitions", "swagger", "tags"} - allowedPatterns := []*regexp.Regexp{pattern0} - invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) - if len(invalidKeys) > 0 { - message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - // string swagger = 1; - v1 := compiler.MapValueForKey(m, "swagger") - if v1 != nil { - x.Swagger, ok = compiler.StringForScalarNode(v1) - if !ok { - message := fmt.Sprintf("has unexpected value for swagger: %s", compiler.Display(v1)) - errors = append(errors, compiler.NewError(context, message)) - } - // check for valid enum values - // [2.0] - if ok && !compiler.StringArrayContainsValue([]string{"2.0"}, x.Swagger) { - message := fmt.Sprintf("has unexpected value for swagger: %s", compiler.Display(v1)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // Info info = 2; - v2 := compiler.MapValueForKey(m, "info") - if v2 != nil { - var err error - x.Info, err = NewInfo(v2, compiler.NewContext("info", v2, context)) - if err != nil { - errors = append(errors, err) - } - } - // string host = 3; - v3 := compiler.MapValueForKey(m, "host") - if v3 != nil { - x.Host, ok = compiler.StringForScalarNode(v3) - if !ok { - message := fmt.Sprintf("has unexpected value for host: %s", compiler.Display(v3)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string base_path = 4; - v4 := compiler.MapValueForKey(m, "basePath") - if v4 != nil { - x.BasePath, ok = compiler.StringForScalarNode(v4) - if !ok { - message := fmt.Sprintf("has unexpected value for basePath: %s", compiler.Display(v4)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // repeated string schemes = 5; - v5 := compiler.MapValueForKey(m, "schemes") - if v5 != nil { - v, ok := compiler.SequenceNodeForNode(v5) - if ok { - x.Schemes = compiler.StringArrayForSequenceNode(v) - } else { - message := fmt.Sprintf("has unexpected value for schemes: %s", compiler.Display(v5)) - errors = append(errors, compiler.NewError(context, message)) - } - // check for valid enum values - // [http https ws wss] - if ok && !compiler.StringArrayContainsValues([]string{"http", "https", "ws", "wss"}, x.Schemes) { - message := fmt.Sprintf("has unexpected value for schemes: %s", compiler.Display(v5)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // repeated string consumes = 6; - v6 := compiler.MapValueForKey(m, "consumes") - if v6 != nil { - v, ok := compiler.SequenceNodeForNode(v6) - if ok { - x.Consumes = compiler.StringArrayForSequenceNode(v) - } else { - message := fmt.Sprintf("has unexpected value for consumes: %s", compiler.Display(v6)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // repeated string produces = 7; - v7 := compiler.MapValueForKey(m, "produces") - if v7 != nil { - v, ok := compiler.SequenceNodeForNode(v7) - if ok { - x.Produces = compiler.StringArrayForSequenceNode(v) - } else { - message := fmt.Sprintf("has unexpected value for produces: %s", compiler.Display(v7)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // Paths paths = 8; - v8 := compiler.MapValueForKey(m, "paths") - if v8 != nil { - var err error - x.Paths, err = NewPaths(v8, compiler.NewContext("paths", v8, context)) - if err != nil { - errors = append(errors, err) - } - } - // Definitions definitions = 9; - v9 := compiler.MapValueForKey(m, "definitions") - if v9 != nil { - var err error - x.Definitions, err = NewDefinitions(v9, compiler.NewContext("definitions", v9, context)) - if err != nil { - errors = append(errors, err) - } - } - // ParameterDefinitions parameters = 10; - v10 := compiler.MapValueForKey(m, "parameters") - if v10 != nil { - var err error - x.Parameters, err = NewParameterDefinitions(v10, compiler.NewContext("parameters", v10, context)) - if err != nil { - errors = append(errors, err) - } - } - // ResponseDefinitions responses = 11; - v11 := compiler.MapValueForKey(m, "responses") - if v11 != nil { - var err error - x.Responses, err = NewResponseDefinitions(v11, compiler.NewContext("responses", v11, context)) - if err != nil { - errors = append(errors, err) - } - } - // repeated SecurityRequirement security = 12; - v12 := compiler.MapValueForKey(m, "security") - if v12 != nil { - // repeated SecurityRequirement - x.Security = make([]*SecurityRequirement, 0) - a, ok := compiler.SequenceNodeForNode(v12) - if ok { - for _, item := range a.Content { - y, err := NewSecurityRequirement(item, compiler.NewContext("security", item, context)) - if err != nil { - errors = append(errors, err) - } - x.Security = append(x.Security, y) - } - } - } - // SecurityDefinitions security_definitions = 13; - v13 := compiler.MapValueForKey(m, "securityDefinitions") - if v13 != nil { - var err error - x.SecurityDefinitions, err = NewSecurityDefinitions(v13, compiler.NewContext("securityDefinitions", v13, context)) - if err != nil { - errors = append(errors, err) - } - } - // repeated Tag tags = 14; - v14 := compiler.MapValueForKey(m, "tags") - if v14 != nil { - // repeated Tag - x.Tags = make([]*Tag, 0) - a, ok := compiler.SequenceNodeForNode(v14) - if ok { - for _, item := range a.Content { - y, err := NewTag(item, compiler.NewContext("tags", item, context)) - if err != nil { - errors = append(errors, err) - } - x.Tags = append(x.Tags, y) - } - } - } - // ExternalDocs external_docs = 15; - v15 := compiler.MapValueForKey(m, "externalDocs") - if v15 != nil { - var err error - x.ExternalDocs, err = NewExternalDocs(v15, compiler.NewContext("externalDocs", v15, context)) - if err != nil { - errors = append(errors, err) - } - } - // repeated NamedAny vendor_extension = 16; - // MAP: Any ^x- - x.VendorExtension = make([]*NamedAny, 0) - for i := 0; i < len(m.Content); i += 2 { - k, ok := compiler.StringForScalarNode(m.Content[i]) - if ok { - v := m.Content[i+1] - if strings.HasPrefix(k, "x-") { - pair := &NamedAny{} - pair.Name = k - result := &Any{} - handled, resultFromExt, err := compiler.CallExtension(context, v, k) - if handled { - if err != nil { - errors = append(errors, err) - } else { - bytes := compiler.Marshal(v) - result.Yaml = string(bytes) - result.Value = resultFromExt - pair.Value = result - } - } else { - pair.Value, err = NewAny(v, compiler.NewContext(k, v, context)) - if err != nil { - errors = append(errors, err) - } - } - x.VendorExtension = append(x.VendorExtension, pair) - } - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewExamples creates an object of type Examples if possible, returning an error if not. -func NewExamples(in *yaml.Node, context *compiler.Context) (*Examples, error) { - errors := make([]error, 0) - x := &Examples{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - // repeated NamedAny additional_properties = 1; - // MAP: Any - x.AdditionalProperties = make([]*NamedAny, 0) - for i := 0; i < len(m.Content); i += 2 { - k, ok := compiler.StringForScalarNode(m.Content[i]) - if ok { - v := m.Content[i+1] - pair := &NamedAny{} - pair.Name = k - result := &Any{} - handled, resultFromExt, err := compiler.CallExtension(context, v, k) - if handled { - if err != nil { - errors = append(errors, err) - } else { - bytes := compiler.Marshal(v) - result.Yaml = string(bytes) - result.Value = resultFromExt - pair.Value = result - } - } else { - pair.Value, err = NewAny(v, compiler.NewContext(k, v, context)) - if err != nil { - errors = append(errors, err) - } - } - x.AdditionalProperties = append(x.AdditionalProperties, pair) - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewExternalDocs creates an object of type ExternalDocs if possible, returning an error if not. -func NewExternalDocs(in *yaml.Node, context *compiler.Context) (*ExternalDocs, error) { - errors := make([]error, 0) - x := &ExternalDocs{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - requiredKeys := []string{"url"} - missingKeys := compiler.MissingKeysInMap(m, requiredKeys) - if len(missingKeys) > 0 { - message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - allowedKeys := []string{"description", "url"} - allowedPatterns := []*regexp.Regexp{pattern0} - invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) - if len(invalidKeys) > 0 { - message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - // string description = 1; - v1 := compiler.MapValueForKey(m, "description") - if v1 != nil { - x.Description, ok = compiler.StringForScalarNode(v1) - if !ok { - message := fmt.Sprintf("has unexpected value for description: %s", compiler.Display(v1)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string url = 2; - v2 := compiler.MapValueForKey(m, "url") - if v2 != nil { - x.Url, ok = compiler.StringForScalarNode(v2) - if !ok { - message := fmt.Sprintf("has unexpected value for url: %s", compiler.Display(v2)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // repeated NamedAny vendor_extension = 3; - // MAP: Any ^x- - x.VendorExtension = make([]*NamedAny, 0) - for i := 0; i < len(m.Content); i += 2 { - k, ok := compiler.StringForScalarNode(m.Content[i]) - if ok { - v := m.Content[i+1] - if strings.HasPrefix(k, "x-") { - pair := &NamedAny{} - pair.Name = k - result := &Any{} - handled, resultFromExt, err := compiler.CallExtension(context, v, k) - if handled { - if err != nil { - errors = append(errors, err) - } else { - bytes := compiler.Marshal(v) - result.Yaml = string(bytes) - result.Value = resultFromExt - pair.Value = result - } - } else { - pair.Value, err = NewAny(v, compiler.NewContext(k, v, context)) - if err != nil { - errors = append(errors, err) - } - } - x.VendorExtension = append(x.VendorExtension, pair) - } - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewFileSchema creates an object of type FileSchema if possible, returning an error if not. -func NewFileSchema(in *yaml.Node, context *compiler.Context) (*FileSchema, error) { - errors := make([]error, 0) - x := &FileSchema{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - requiredKeys := []string{"type"} - missingKeys := compiler.MissingKeysInMap(m, requiredKeys) - if len(missingKeys) > 0 { - message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - allowedKeys := []string{"default", "description", "example", "externalDocs", "format", "readOnly", "required", "title", "type"} - allowedPatterns := []*regexp.Regexp{pattern0} - invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) - if len(invalidKeys) > 0 { - message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - // string format = 1; - v1 := compiler.MapValueForKey(m, "format") - if v1 != nil { - x.Format, ok = compiler.StringForScalarNode(v1) - if !ok { - message := fmt.Sprintf("has unexpected value for format: %s", compiler.Display(v1)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string title = 2; - v2 := compiler.MapValueForKey(m, "title") - if v2 != nil { - x.Title, ok = compiler.StringForScalarNode(v2) - if !ok { - message := fmt.Sprintf("has unexpected value for title: %s", compiler.Display(v2)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string description = 3; - v3 := compiler.MapValueForKey(m, "description") - if v3 != nil { - x.Description, ok = compiler.StringForScalarNode(v3) - if !ok { - message := fmt.Sprintf("has unexpected value for description: %s", compiler.Display(v3)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // Any default = 4; - v4 := compiler.MapValueForKey(m, "default") - if v4 != nil { - var err error - x.Default, err = NewAny(v4, compiler.NewContext("default", v4, context)) - if err != nil { - errors = append(errors, err) - } - } - // repeated string required = 5; - v5 := compiler.MapValueForKey(m, "required") - if v5 != nil { - v, ok := compiler.SequenceNodeForNode(v5) - if ok { - x.Required = compiler.StringArrayForSequenceNode(v) - } else { - message := fmt.Sprintf("has unexpected value for required: %s", compiler.Display(v5)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string type = 6; - v6 := compiler.MapValueForKey(m, "type") - if v6 != nil { - x.Type, ok = compiler.StringForScalarNode(v6) - if !ok { - message := fmt.Sprintf("has unexpected value for type: %s", compiler.Display(v6)) - errors = append(errors, compiler.NewError(context, message)) - } - // check for valid enum values - // [file] - if ok && !compiler.StringArrayContainsValue([]string{"file"}, x.Type) { - message := fmt.Sprintf("has unexpected value for type: %s", compiler.Display(v6)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // bool read_only = 7; - v7 := compiler.MapValueForKey(m, "readOnly") - if v7 != nil { - x.ReadOnly, ok = compiler.BoolForScalarNode(v7) - if !ok { - message := fmt.Sprintf("has unexpected value for readOnly: %s", compiler.Display(v7)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // ExternalDocs external_docs = 8; - v8 := compiler.MapValueForKey(m, "externalDocs") - if v8 != nil { - var err error - x.ExternalDocs, err = NewExternalDocs(v8, compiler.NewContext("externalDocs", v8, context)) - if err != nil { - errors = append(errors, err) - } - } - // Any example = 9; - v9 := compiler.MapValueForKey(m, "example") - if v9 != nil { - var err error - x.Example, err = NewAny(v9, compiler.NewContext("example", v9, context)) - if err != nil { - errors = append(errors, err) - } - } - // repeated NamedAny vendor_extension = 10; - // MAP: Any ^x- - x.VendorExtension = make([]*NamedAny, 0) - for i := 0; i < len(m.Content); i += 2 { - k, ok := compiler.StringForScalarNode(m.Content[i]) - if ok { - v := m.Content[i+1] - if strings.HasPrefix(k, "x-") { - pair := &NamedAny{} - pair.Name = k - result := &Any{} - handled, resultFromExt, err := compiler.CallExtension(context, v, k) - if handled { - if err != nil { - errors = append(errors, err) - } else { - bytes := compiler.Marshal(v) - result.Yaml = string(bytes) - result.Value = resultFromExt - pair.Value = result - } - } else { - pair.Value, err = NewAny(v, compiler.NewContext(k, v, context)) - if err != nil { - errors = append(errors, err) - } - } - x.VendorExtension = append(x.VendorExtension, pair) - } - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewFormDataParameterSubSchema creates an object of type FormDataParameterSubSchema if possible, returning an error if not. -func NewFormDataParameterSubSchema(in *yaml.Node, context *compiler.Context) (*FormDataParameterSubSchema, error) { - errors := make([]error, 0) - x := &FormDataParameterSubSchema{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - allowedKeys := []string{"allowEmptyValue", "collectionFormat", "default", "description", "enum", "exclusiveMaximum", "exclusiveMinimum", "format", "in", "items", "maxItems", "maxLength", "maximum", "minItems", "minLength", "minimum", "multipleOf", "name", "pattern", "required", "type", "uniqueItems"} - allowedPatterns := []*regexp.Regexp{pattern0} - invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) - if len(invalidKeys) > 0 { - message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - // bool required = 1; - v1 := compiler.MapValueForKey(m, "required") - if v1 != nil { - x.Required, ok = compiler.BoolForScalarNode(v1) - if !ok { - message := fmt.Sprintf("has unexpected value for required: %s", compiler.Display(v1)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string in = 2; - v2 := compiler.MapValueForKey(m, "in") - if v2 != nil { - x.In, ok = compiler.StringForScalarNode(v2) - if !ok { - message := fmt.Sprintf("has unexpected value for in: %s", compiler.Display(v2)) - errors = append(errors, compiler.NewError(context, message)) - } - // check for valid enum values - // [formData] - if ok && !compiler.StringArrayContainsValue([]string{"formData"}, x.In) { - message := fmt.Sprintf("has unexpected value for in: %s", compiler.Display(v2)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string description = 3; - v3 := compiler.MapValueForKey(m, "description") - if v3 != nil { - x.Description, ok = compiler.StringForScalarNode(v3) - if !ok { - message := fmt.Sprintf("has unexpected value for description: %s", compiler.Display(v3)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string name = 4; - v4 := compiler.MapValueForKey(m, "name") - if v4 != nil { - x.Name, ok = compiler.StringForScalarNode(v4) - if !ok { - message := fmt.Sprintf("has unexpected value for name: %s", compiler.Display(v4)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // bool allow_empty_value = 5; - v5 := compiler.MapValueForKey(m, "allowEmptyValue") - if v5 != nil { - x.AllowEmptyValue, ok = compiler.BoolForScalarNode(v5) - if !ok { - message := fmt.Sprintf("has unexpected value for allowEmptyValue: %s", compiler.Display(v5)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string type = 6; - v6 := compiler.MapValueForKey(m, "type") - if v6 != nil { - x.Type, ok = compiler.StringForScalarNode(v6) - if !ok { - message := fmt.Sprintf("has unexpected value for type: %s", compiler.Display(v6)) - errors = append(errors, compiler.NewError(context, message)) - } - // check for valid enum values - // [string number boolean integer array file] - if ok && !compiler.StringArrayContainsValue([]string{"string", "number", "boolean", "integer", "array", "file"}, x.Type) { - message := fmt.Sprintf("has unexpected value for type: %s", compiler.Display(v6)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string format = 7; - v7 := compiler.MapValueForKey(m, "format") - if v7 != nil { - x.Format, ok = compiler.StringForScalarNode(v7) - if !ok { - message := fmt.Sprintf("has unexpected value for format: %s", compiler.Display(v7)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // PrimitivesItems items = 8; - v8 := compiler.MapValueForKey(m, "items") - if v8 != nil { - var err error - x.Items, err = NewPrimitivesItems(v8, compiler.NewContext("items", v8, context)) - if err != nil { - errors = append(errors, err) - } - } - // string collection_format = 9; - v9 := compiler.MapValueForKey(m, "collectionFormat") - if v9 != nil { - x.CollectionFormat, ok = compiler.StringForScalarNode(v9) - if !ok { - message := fmt.Sprintf("has unexpected value for collectionFormat: %s", compiler.Display(v9)) - errors = append(errors, compiler.NewError(context, message)) - } - // check for valid enum values - // [csv ssv tsv pipes multi] - if ok && !compiler.StringArrayContainsValue([]string{"csv", "ssv", "tsv", "pipes", "multi"}, x.CollectionFormat) { - message := fmt.Sprintf("has unexpected value for collectionFormat: %s", compiler.Display(v9)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // Any default = 10; - v10 := compiler.MapValueForKey(m, "default") - if v10 != nil { - var err error - x.Default, err = NewAny(v10, compiler.NewContext("default", v10, context)) - if err != nil { - errors = append(errors, err) - } - } - // float maximum = 11; - v11 := compiler.MapValueForKey(m, "maximum") - if v11 != nil { - v, ok := compiler.FloatForScalarNode(v11) - if ok { - x.Maximum = v - } else { - message := fmt.Sprintf("has unexpected value for maximum: %s", compiler.Display(v11)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // bool exclusive_maximum = 12; - v12 := compiler.MapValueForKey(m, "exclusiveMaximum") - if v12 != nil { - x.ExclusiveMaximum, ok = compiler.BoolForScalarNode(v12) - if !ok { - message := fmt.Sprintf("has unexpected value for exclusiveMaximum: %s", compiler.Display(v12)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // float minimum = 13; - v13 := compiler.MapValueForKey(m, "minimum") - if v13 != nil { - v, ok := compiler.FloatForScalarNode(v13) - if ok { - x.Minimum = v - } else { - message := fmt.Sprintf("has unexpected value for minimum: %s", compiler.Display(v13)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // bool exclusive_minimum = 14; - v14 := compiler.MapValueForKey(m, "exclusiveMinimum") - if v14 != nil { - x.ExclusiveMinimum, ok = compiler.BoolForScalarNode(v14) - if !ok { - message := fmt.Sprintf("has unexpected value for exclusiveMinimum: %s", compiler.Display(v14)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // int64 max_length = 15; - v15 := compiler.MapValueForKey(m, "maxLength") - if v15 != nil { - t, ok := compiler.IntForScalarNode(v15) - if ok { - x.MaxLength = int64(t) - } else { - message := fmt.Sprintf("has unexpected value for maxLength: %s", compiler.Display(v15)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // int64 min_length = 16; - v16 := compiler.MapValueForKey(m, "minLength") - if v16 != nil { - t, ok := compiler.IntForScalarNode(v16) - if ok { - x.MinLength = int64(t) - } else { - message := fmt.Sprintf("has unexpected value for minLength: %s", compiler.Display(v16)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string pattern = 17; - v17 := compiler.MapValueForKey(m, "pattern") - if v17 != nil { - x.Pattern, ok = compiler.StringForScalarNode(v17) - if !ok { - message := fmt.Sprintf("has unexpected value for pattern: %s", compiler.Display(v17)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // int64 max_items = 18; - v18 := compiler.MapValueForKey(m, "maxItems") - if v18 != nil { - t, ok := compiler.IntForScalarNode(v18) - if ok { - x.MaxItems = int64(t) - } else { - message := fmt.Sprintf("has unexpected value for maxItems: %s", compiler.Display(v18)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // int64 min_items = 19; - v19 := compiler.MapValueForKey(m, "minItems") - if v19 != nil { - t, ok := compiler.IntForScalarNode(v19) - if ok { - x.MinItems = int64(t) - } else { - message := fmt.Sprintf("has unexpected value for minItems: %s", compiler.Display(v19)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // bool unique_items = 20; - v20 := compiler.MapValueForKey(m, "uniqueItems") - if v20 != nil { - x.UniqueItems, ok = compiler.BoolForScalarNode(v20) - if !ok { - message := fmt.Sprintf("has unexpected value for uniqueItems: %s", compiler.Display(v20)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // repeated Any enum = 21; - v21 := compiler.MapValueForKey(m, "enum") - if v21 != nil { - // repeated Any - x.Enum = make([]*Any, 0) - a, ok := compiler.SequenceNodeForNode(v21) - if ok { - for _, item := range a.Content { - y, err := NewAny(item, compiler.NewContext("enum", item, context)) - if err != nil { - errors = append(errors, err) - } - x.Enum = append(x.Enum, y) - } - } - } - // float multiple_of = 22; - v22 := compiler.MapValueForKey(m, "multipleOf") - if v22 != nil { - v, ok := compiler.FloatForScalarNode(v22) - if ok { - x.MultipleOf = v - } else { - message := fmt.Sprintf("has unexpected value for multipleOf: %s", compiler.Display(v22)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // repeated NamedAny vendor_extension = 23; - // MAP: Any ^x- - x.VendorExtension = make([]*NamedAny, 0) - for i := 0; i < len(m.Content); i += 2 { - k, ok := compiler.StringForScalarNode(m.Content[i]) - if ok { - v := m.Content[i+1] - if strings.HasPrefix(k, "x-") { - pair := &NamedAny{} - pair.Name = k - result := &Any{} - handled, resultFromExt, err := compiler.CallExtension(context, v, k) - if handled { - if err != nil { - errors = append(errors, err) - } else { - bytes := compiler.Marshal(v) - result.Yaml = string(bytes) - result.Value = resultFromExt - pair.Value = result - } - } else { - pair.Value, err = NewAny(v, compiler.NewContext(k, v, context)) - if err != nil { - errors = append(errors, err) - } - } - x.VendorExtension = append(x.VendorExtension, pair) - } - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewHeader creates an object of type Header if possible, returning an error if not. -func NewHeader(in *yaml.Node, context *compiler.Context) (*Header, error) { - errors := make([]error, 0) - x := &Header{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - requiredKeys := []string{"type"} - missingKeys := compiler.MissingKeysInMap(m, requiredKeys) - if len(missingKeys) > 0 { - message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - allowedKeys := []string{"collectionFormat", "default", "description", "enum", "exclusiveMaximum", "exclusiveMinimum", "format", "items", "maxItems", "maxLength", "maximum", "minItems", "minLength", "minimum", "multipleOf", "pattern", "type", "uniqueItems"} - allowedPatterns := []*regexp.Regexp{pattern0} - invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) - if len(invalidKeys) > 0 { - message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - // string type = 1; - v1 := compiler.MapValueForKey(m, "type") - if v1 != nil { - x.Type, ok = compiler.StringForScalarNode(v1) - if !ok { - message := fmt.Sprintf("has unexpected value for type: %s", compiler.Display(v1)) - errors = append(errors, compiler.NewError(context, message)) - } - // check for valid enum values - // [string number integer boolean array] - if ok && !compiler.StringArrayContainsValue([]string{"string", "number", "integer", "boolean", "array"}, x.Type) { - message := fmt.Sprintf("has unexpected value for type: %s", compiler.Display(v1)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string format = 2; - v2 := compiler.MapValueForKey(m, "format") - if v2 != nil { - x.Format, ok = compiler.StringForScalarNode(v2) - if !ok { - message := fmt.Sprintf("has unexpected value for format: %s", compiler.Display(v2)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // PrimitivesItems items = 3; - v3 := compiler.MapValueForKey(m, "items") - if v3 != nil { - var err error - x.Items, err = NewPrimitivesItems(v3, compiler.NewContext("items", v3, context)) - if err != nil { - errors = append(errors, err) - } - } - // string collection_format = 4; - v4 := compiler.MapValueForKey(m, "collectionFormat") - if v4 != nil { - x.CollectionFormat, ok = compiler.StringForScalarNode(v4) - if !ok { - message := fmt.Sprintf("has unexpected value for collectionFormat: %s", compiler.Display(v4)) - errors = append(errors, compiler.NewError(context, message)) - } - // check for valid enum values - // [csv ssv tsv pipes] - if ok && !compiler.StringArrayContainsValue([]string{"csv", "ssv", "tsv", "pipes"}, x.CollectionFormat) { - message := fmt.Sprintf("has unexpected value for collectionFormat: %s", compiler.Display(v4)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // Any default = 5; - v5 := compiler.MapValueForKey(m, "default") - if v5 != nil { - var err error - x.Default, err = NewAny(v5, compiler.NewContext("default", v5, context)) - if err != nil { - errors = append(errors, err) - } - } - // float maximum = 6; - v6 := compiler.MapValueForKey(m, "maximum") - if v6 != nil { - v, ok := compiler.FloatForScalarNode(v6) - if ok { - x.Maximum = v - } else { - message := fmt.Sprintf("has unexpected value for maximum: %s", compiler.Display(v6)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // bool exclusive_maximum = 7; - v7 := compiler.MapValueForKey(m, "exclusiveMaximum") - if v7 != nil { - x.ExclusiveMaximum, ok = compiler.BoolForScalarNode(v7) - if !ok { - message := fmt.Sprintf("has unexpected value for exclusiveMaximum: %s", compiler.Display(v7)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // float minimum = 8; - v8 := compiler.MapValueForKey(m, "minimum") - if v8 != nil { - v, ok := compiler.FloatForScalarNode(v8) - if ok { - x.Minimum = v - } else { - message := fmt.Sprintf("has unexpected value for minimum: %s", compiler.Display(v8)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // bool exclusive_minimum = 9; - v9 := compiler.MapValueForKey(m, "exclusiveMinimum") - if v9 != nil { - x.ExclusiveMinimum, ok = compiler.BoolForScalarNode(v9) - if !ok { - message := fmt.Sprintf("has unexpected value for exclusiveMinimum: %s", compiler.Display(v9)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // int64 max_length = 10; - v10 := compiler.MapValueForKey(m, "maxLength") - if v10 != nil { - t, ok := compiler.IntForScalarNode(v10) - if ok { - x.MaxLength = int64(t) - } else { - message := fmt.Sprintf("has unexpected value for maxLength: %s", compiler.Display(v10)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // int64 min_length = 11; - v11 := compiler.MapValueForKey(m, "minLength") - if v11 != nil { - t, ok := compiler.IntForScalarNode(v11) - if ok { - x.MinLength = int64(t) - } else { - message := fmt.Sprintf("has unexpected value for minLength: %s", compiler.Display(v11)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string pattern = 12; - v12 := compiler.MapValueForKey(m, "pattern") - if v12 != nil { - x.Pattern, ok = compiler.StringForScalarNode(v12) - if !ok { - message := fmt.Sprintf("has unexpected value for pattern: %s", compiler.Display(v12)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // int64 max_items = 13; - v13 := compiler.MapValueForKey(m, "maxItems") - if v13 != nil { - t, ok := compiler.IntForScalarNode(v13) - if ok { - x.MaxItems = int64(t) - } else { - message := fmt.Sprintf("has unexpected value for maxItems: %s", compiler.Display(v13)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // int64 min_items = 14; - v14 := compiler.MapValueForKey(m, "minItems") - if v14 != nil { - t, ok := compiler.IntForScalarNode(v14) - if ok { - x.MinItems = int64(t) - } else { - message := fmt.Sprintf("has unexpected value for minItems: %s", compiler.Display(v14)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // bool unique_items = 15; - v15 := compiler.MapValueForKey(m, "uniqueItems") - if v15 != nil { - x.UniqueItems, ok = compiler.BoolForScalarNode(v15) - if !ok { - message := fmt.Sprintf("has unexpected value for uniqueItems: %s", compiler.Display(v15)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // repeated Any enum = 16; - v16 := compiler.MapValueForKey(m, "enum") - if v16 != nil { - // repeated Any - x.Enum = make([]*Any, 0) - a, ok := compiler.SequenceNodeForNode(v16) - if ok { - for _, item := range a.Content { - y, err := NewAny(item, compiler.NewContext("enum", item, context)) - if err != nil { - errors = append(errors, err) - } - x.Enum = append(x.Enum, y) - } - } - } - // float multiple_of = 17; - v17 := compiler.MapValueForKey(m, "multipleOf") - if v17 != nil { - v, ok := compiler.FloatForScalarNode(v17) - if ok { - x.MultipleOf = v - } else { - message := fmt.Sprintf("has unexpected value for multipleOf: %s", compiler.Display(v17)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string description = 18; - v18 := compiler.MapValueForKey(m, "description") - if v18 != nil { - x.Description, ok = compiler.StringForScalarNode(v18) - if !ok { - message := fmt.Sprintf("has unexpected value for description: %s", compiler.Display(v18)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // repeated NamedAny vendor_extension = 19; - // MAP: Any ^x- - x.VendorExtension = make([]*NamedAny, 0) - for i := 0; i < len(m.Content); i += 2 { - k, ok := compiler.StringForScalarNode(m.Content[i]) - if ok { - v := m.Content[i+1] - if strings.HasPrefix(k, "x-") { - pair := &NamedAny{} - pair.Name = k - result := &Any{} - handled, resultFromExt, err := compiler.CallExtension(context, v, k) - if handled { - if err != nil { - errors = append(errors, err) - } else { - bytes := compiler.Marshal(v) - result.Yaml = string(bytes) - result.Value = resultFromExt - pair.Value = result - } - } else { - pair.Value, err = NewAny(v, compiler.NewContext(k, v, context)) - if err != nil { - errors = append(errors, err) - } - } - x.VendorExtension = append(x.VendorExtension, pair) - } - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewHeaderParameterSubSchema creates an object of type HeaderParameterSubSchema if possible, returning an error if not. -func NewHeaderParameterSubSchema(in *yaml.Node, context *compiler.Context) (*HeaderParameterSubSchema, error) { - errors := make([]error, 0) - x := &HeaderParameterSubSchema{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - allowedKeys := []string{"collectionFormat", "default", "description", "enum", "exclusiveMaximum", "exclusiveMinimum", "format", "in", "items", "maxItems", "maxLength", "maximum", "minItems", "minLength", "minimum", "multipleOf", "name", "pattern", "required", "type", "uniqueItems"} - allowedPatterns := []*regexp.Regexp{pattern0} - invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) - if len(invalidKeys) > 0 { - message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - // bool required = 1; - v1 := compiler.MapValueForKey(m, "required") - if v1 != nil { - x.Required, ok = compiler.BoolForScalarNode(v1) - if !ok { - message := fmt.Sprintf("has unexpected value for required: %s", compiler.Display(v1)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string in = 2; - v2 := compiler.MapValueForKey(m, "in") - if v2 != nil { - x.In, ok = compiler.StringForScalarNode(v2) - if !ok { - message := fmt.Sprintf("has unexpected value for in: %s", compiler.Display(v2)) - errors = append(errors, compiler.NewError(context, message)) - } - // check for valid enum values - // [header] - if ok && !compiler.StringArrayContainsValue([]string{"header"}, x.In) { - message := fmt.Sprintf("has unexpected value for in: %s", compiler.Display(v2)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string description = 3; - v3 := compiler.MapValueForKey(m, "description") - if v3 != nil { - x.Description, ok = compiler.StringForScalarNode(v3) - if !ok { - message := fmt.Sprintf("has unexpected value for description: %s", compiler.Display(v3)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string name = 4; - v4 := compiler.MapValueForKey(m, "name") - if v4 != nil { - x.Name, ok = compiler.StringForScalarNode(v4) - if !ok { - message := fmt.Sprintf("has unexpected value for name: %s", compiler.Display(v4)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string type = 5; - v5 := compiler.MapValueForKey(m, "type") - if v5 != nil { - x.Type, ok = compiler.StringForScalarNode(v5) - if !ok { - message := fmt.Sprintf("has unexpected value for type: %s", compiler.Display(v5)) - errors = append(errors, compiler.NewError(context, message)) - } - // check for valid enum values - // [string number boolean integer array] - if ok && !compiler.StringArrayContainsValue([]string{"string", "number", "boolean", "integer", "array"}, x.Type) { - message := fmt.Sprintf("has unexpected value for type: %s", compiler.Display(v5)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string format = 6; - v6 := compiler.MapValueForKey(m, "format") - if v6 != nil { - x.Format, ok = compiler.StringForScalarNode(v6) - if !ok { - message := fmt.Sprintf("has unexpected value for format: %s", compiler.Display(v6)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // PrimitivesItems items = 7; - v7 := compiler.MapValueForKey(m, "items") - if v7 != nil { - var err error - x.Items, err = NewPrimitivesItems(v7, compiler.NewContext("items", v7, context)) - if err != nil { - errors = append(errors, err) - } - } - // string collection_format = 8; - v8 := compiler.MapValueForKey(m, "collectionFormat") - if v8 != nil { - x.CollectionFormat, ok = compiler.StringForScalarNode(v8) - if !ok { - message := fmt.Sprintf("has unexpected value for collectionFormat: %s", compiler.Display(v8)) - errors = append(errors, compiler.NewError(context, message)) - } - // check for valid enum values - // [csv ssv tsv pipes] - if ok && !compiler.StringArrayContainsValue([]string{"csv", "ssv", "tsv", "pipes"}, x.CollectionFormat) { - message := fmt.Sprintf("has unexpected value for collectionFormat: %s", compiler.Display(v8)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // Any default = 9; - v9 := compiler.MapValueForKey(m, "default") - if v9 != nil { - var err error - x.Default, err = NewAny(v9, compiler.NewContext("default", v9, context)) - if err != nil { - errors = append(errors, err) - } - } - // float maximum = 10; - v10 := compiler.MapValueForKey(m, "maximum") - if v10 != nil { - v, ok := compiler.FloatForScalarNode(v10) - if ok { - x.Maximum = v - } else { - message := fmt.Sprintf("has unexpected value for maximum: %s", compiler.Display(v10)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // bool exclusive_maximum = 11; - v11 := compiler.MapValueForKey(m, "exclusiveMaximum") - if v11 != nil { - x.ExclusiveMaximum, ok = compiler.BoolForScalarNode(v11) - if !ok { - message := fmt.Sprintf("has unexpected value for exclusiveMaximum: %s", compiler.Display(v11)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // float minimum = 12; - v12 := compiler.MapValueForKey(m, "minimum") - if v12 != nil { - v, ok := compiler.FloatForScalarNode(v12) - if ok { - x.Minimum = v - } else { - message := fmt.Sprintf("has unexpected value for minimum: %s", compiler.Display(v12)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // bool exclusive_minimum = 13; - v13 := compiler.MapValueForKey(m, "exclusiveMinimum") - if v13 != nil { - x.ExclusiveMinimum, ok = compiler.BoolForScalarNode(v13) - if !ok { - message := fmt.Sprintf("has unexpected value for exclusiveMinimum: %s", compiler.Display(v13)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // int64 max_length = 14; - v14 := compiler.MapValueForKey(m, "maxLength") - if v14 != nil { - t, ok := compiler.IntForScalarNode(v14) - if ok { - x.MaxLength = int64(t) - } else { - message := fmt.Sprintf("has unexpected value for maxLength: %s", compiler.Display(v14)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // int64 min_length = 15; - v15 := compiler.MapValueForKey(m, "minLength") - if v15 != nil { - t, ok := compiler.IntForScalarNode(v15) - if ok { - x.MinLength = int64(t) - } else { - message := fmt.Sprintf("has unexpected value for minLength: %s", compiler.Display(v15)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string pattern = 16; - v16 := compiler.MapValueForKey(m, "pattern") - if v16 != nil { - x.Pattern, ok = compiler.StringForScalarNode(v16) - if !ok { - message := fmt.Sprintf("has unexpected value for pattern: %s", compiler.Display(v16)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // int64 max_items = 17; - v17 := compiler.MapValueForKey(m, "maxItems") - if v17 != nil { - t, ok := compiler.IntForScalarNode(v17) - if ok { - x.MaxItems = int64(t) - } else { - message := fmt.Sprintf("has unexpected value for maxItems: %s", compiler.Display(v17)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // int64 min_items = 18; - v18 := compiler.MapValueForKey(m, "minItems") - if v18 != nil { - t, ok := compiler.IntForScalarNode(v18) - if ok { - x.MinItems = int64(t) - } else { - message := fmt.Sprintf("has unexpected value for minItems: %s", compiler.Display(v18)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // bool unique_items = 19; - v19 := compiler.MapValueForKey(m, "uniqueItems") - if v19 != nil { - x.UniqueItems, ok = compiler.BoolForScalarNode(v19) - if !ok { - message := fmt.Sprintf("has unexpected value for uniqueItems: %s", compiler.Display(v19)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // repeated Any enum = 20; - v20 := compiler.MapValueForKey(m, "enum") - if v20 != nil { - // repeated Any - x.Enum = make([]*Any, 0) - a, ok := compiler.SequenceNodeForNode(v20) - if ok { - for _, item := range a.Content { - y, err := NewAny(item, compiler.NewContext("enum", item, context)) - if err != nil { - errors = append(errors, err) - } - x.Enum = append(x.Enum, y) - } - } - } - // float multiple_of = 21; - v21 := compiler.MapValueForKey(m, "multipleOf") - if v21 != nil { - v, ok := compiler.FloatForScalarNode(v21) - if ok { - x.MultipleOf = v - } else { - message := fmt.Sprintf("has unexpected value for multipleOf: %s", compiler.Display(v21)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // repeated NamedAny vendor_extension = 22; - // MAP: Any ^x- - x.VendorExtension = make([]*NamedAny, 0) - for i := 0; i < len(m.Content); i += 2 { - k, ok := compiler.StringForScalarNode(m.Content[i]) - if ok { - v := m.Content[i+1] - if strings.HasPrefix(k, "x-") { - pair := &NamedAny{} - pair.Name = k - result := &Any{} - handled, resultFromExt, err := compiler.CallExtension(context, v, k) - if handled { - if err != nil { - errors = append(errors, err) - } else { - bytes := compiler.Marshal(v) - result.Yaml = string(bytes) - result.Value = resultFromExt - pair.Value = result - } - } else { - pair.Value, err = NewAny(v, compiler.NewContext(k, v, context)) - if err != nil { - errors = append(errors, err) - } - } - x.VendorExtension = append(x.VendorExtension, pair) - } - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewHeaders creates an object of type Headers if possible, returning an error if not. -func NewHeaders(in *yaml.Node, context *compiler.Context) (*Headers, error) { - errors := make([]error, 0) - x := &Headers{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - // repeated NamedHeader additional_properties = 1; - // MAP: Header - x.AdditionalProperties = make([]*NamedHeader, 0) - for i := 0; i < len(m.Content); i += 2 { - k, ok := compiler.StringForScalarNode(m.Content[i]) - if ok { - v := m.Content[i+1] - pair := &NamedHeader{} - pair.Name = k - var err error - pair.Value, err = NewHeader(v, compiler.NewContext(k, v, context)) - if err != nil { - errors = append(errors, err) - } - x.AdditionalProperties = append(x.AdditionalProperties, pair) - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewInfo creates an object of type Info if possible, returning an error if not. -func NewInfo(in *yaml.Node, context *compiler.Context) (*Info, error) { - errors := make([]error, 0) - x := &Info{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - requiredKeys := []string{"title", "version"} - missingKeys := compiler.MissingKeysInMap(m, requiredKeys) - if len(missingKeys) > 0 { - message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - allowedKeys := []string{"contact", "description", "license", "termsOfService", "title", "version"} - allowedPatterns := []*regexp.Regexp{pattern0} - invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) - if len(invalidKeys) > 0 { - message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - // string title = 1; - v1 := compiler.MapValueForKey(m, "title") - if v1 != nil { - x.Title, ok = compiler.StringForScalarNode(v1) - if !ok { - message := fmt.Sprintf("has unexpected value for title: %s", compiler.Display(v1)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string version = 2; - v2 := compiler.MapValueForKey(m, "version") - if v2 != nil { - x.Version, ok = compiler.StringForScalarNode(v2) - if !ok { - message := fmt.Sprintf("has unexpected value for version: %s", compiler.Display(v2)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string description = 3; - v3 := compiler.MapValueForKey(m, "description") - if v3 != nil { - x.Description, ok = compiler.StringForScalarNode(v3) - if !ok { - message := fmt.Sprintf("has unexpected value for description: %s", compiler.Display(v3)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string terms_of_service = 4; - v4 := compiler.MapValueForKey(m, "termsOfService") - if v4 != nil { - x.TermsOfService, ok = compiler.StringForScalarNode(v4) - if !ok { - message := fmt.Sprintf("has unexpected value for termsOfService: %s", compiler.Display(v4)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // Contact contact = 5; - v5 := compiler.MapValueForKey(m, "contact") - if v5 != nil { - var err error - x.Contact, err = NewContact(v5, compiler.NewContext("contact", v5, context)) - if err != nil { - errors = append(errors, err) - } - } - // License license = 6; - v6 := compiler.MapValueForKey(m, "license") - if v6 != nil { - var err error - x.License, err = NewLicense(v6, compiler.NewContext("license", v6, context)) - if err != nil { - errors = append(errors, err) - } - } - // repeated NamedAny vendor_extension = 7; - // MAP: Any ^x- - x.VendorExtension = make([]*NamedAny, 0) - for i := 0; i < len(m.Content); i += 2 { - k, ok := compiler.StringForScalarNode(m.Content[i]) - if ok { - v := m.Content[i+1] - if strings.HasPrefix(k, "x-") { - pair := &NamedAny{} - pair.Name = k - result := &Any{} - handled, resultFromExt, err := compiler.CallExtension(context, v, k) - if handled { - if err != nil { - errors = append(errors, err) - } else { - bytes := compiler.Marshal(v) - result.Yaml = string(bytes) - result.Value = resultFromExt - pair.Value = result - } - } else { - pair.Value, err = NewAny(v, compiler.NewContext(k, v, context)) - if err != nil { - errors = append(errors, err) - } - } - x.VendorExtension = append(x.VendorExtension, pair) - } - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewItemsItem creates an object of type ItemsItem if possible, returning an error if not. -func NewItemsItem(in *yaml.Node, context *compiler.Context) (*ItemsItem, error) { - errors := make([]error, 0) - x := &ItemsItem{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value for item array: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - x.Schema = make([]*Schema, 0) - y, err := NewSchema(m, compiler.NewContext("", m, context)) - if err != nil { - return nil, err - } - x.Schema = append(x.Schema, y) - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewJsonReference creates an object of type JsonReference if possible, returning an error if not. -func NewJsonReference(in *yaml.Node, context *compiler.Context) (*JsonReference, error) { - errors := make([]error, 0) - x := &JsonReference{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - requiredKeys := []string{"$ref"} - missingKeys := compiler.MissingKeysInMap(m, requiredKeys) - if len(missingKeys) > 0 { - message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - // string _ref = 1; - v1 := compiler.MapValueForKey(m, "$ref") - if v1 != nil { - x.XRef, ok = compiler.StringForScalarNode(v1) - if !ok { - message := fmt.Sprintf("has unexpected value for $ref: %s", compiler.Display(v1)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string description = 2; - v2 := compiler.MapValueForKey(m, "description") - if v2 != nil { - x.Description, ok = compiler.StringForScalarNode(v2) - if !ok { - message := fmt.Sprintf("has unexpected value for description: %s", compiler.Display(v2)) - errors = append(errors, compiler.NewError(context, message)) - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewLicense creates an object of type License if possible, returning an error if not. -func NewLicense(in *yaml.Node, context *compiler.Context) (*License, error) { - errors := make([]error, 0) - x := &License{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - requiredKeys := []string{"name"} - missingKeys := compiler.MissingKeysInMap(m, requiredKeys) - if len(missingKeys) > 0 { - message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - allowedKeys := []string{"name", "url"} - allowedPatterns := []*regexp.Regexp{pattern0} - invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) - if len(invalidKeys) > 0 { - message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - // string name = 1; - v1 := compiler.MapValueForKey(m, "name") - if v1 != nil { - x.Name, ok = compiler.StringForScalarNode(v1) - if !ok { - message := fmt.Sprintf("has unexpected value for name: %s", compiler.Display(v1)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string url = 2; - v2 := compiler.MapValueForKey(m, "url") - if v2 != nil { - x.Url, ok = compiler.StringForScalarNode(v2) - if !ok { - message := fmt.Sprintf("has unexpected value for url: %s", compiler.Display(v2)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // repeated NamedAny vendor_extension = 3; - // MAP: Any ^x- - x.VendorExtension = make([]*NamedAny, 0) - for i := 0; i < len(m.Content); i += 2 { - k, ok := compiler.StringForScalarNode(m.Content[i]) - if ok { - v := m.Content[i+1] - if strings.HasPrefix(k, "x-") { - pair := &NamedAny{} - pair.Name = k - result := &Any{} - handled, resultFromExt, err := compiler.CallExtension(context, v, k) - if handled { - if err != nil { - errors = append(errors, err) - } else { - bytes := compiler.Marshal(v) - result.Yaml = string(bytes) - result.Value = resultFromExt - pair.Value = result - } - } else { - pair.Value, err = NewAny(v, compiler.NewContext(k, v, context)) - if err != nil { - errors = append(errors, err) - } - } - x.VendorExtension = append(x.VendorExtension, pair) - } - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewNamedAny creates an object of type NamedAny if possible, returning an error if not. -func NewNamedAny(in *yaml.Node, context *compiler.Context) (*NamedAny, error) { - errors := make([]error, 0) - x := &NamedAny{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - allowedKeys := []string{"name", "value"} - var allowedPatterns []*regexp.Regexp - invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) - if len(invalidKeys) > 0 { - message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - // string name = 1; - v1 := compiler.MapValueForKey(m, "name") - if v1 != nil { - x.Name, ok = compiler.StringForScalarNode(v1) - if !ok { - message := fmt.Sprintf("has unexpected value for name: %s", compiler.Display(v1)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // Any value = 2; - v2 := compiler.MapValueForKey(m, "value") - if v2 != nil { - var err error - x.Value, err = NewAny(v2, compiler.NewContext("value", v2, context)) - if err != nil { - errors = append(errors, err) - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewNamedHeader creates an object of type NamedHeader if possible, returning an error if not. -func NewNamedHeader(in *yaml.Node, context *compiler.Context) (*NamedHeader, error) { - errors := make([]error, 0) - x := &NamedHeader{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - allowedKeys := []string{"name", "value"} - var allowedPatterns []*regexp.Regexp - invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) - if len(invalidKeys) > 0 { - message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - // string name = 1; - v1 := compiler.MapValueForKey(m, "name") - if v1 != nil { - x.Name, ok = compiler.StringForScalarNode(v1) - if !ok { - message := fmt.Sprintf("has unexpected value for name: %s", compiler.Display(v1)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // Header value = 2; - v2 := compiler.MapValueForKey(m, "value") - if v2 != nil { - var err error - x.Value, err = NewHeader(v2, compiler.NewContext("value", v2, context)) - if err != nil { - errors = append(errors, err) - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewNamedParameter creates an object of type NamedParameter if possible, returning an error if not. -func NewNamedParameter(in *yaml.Node, context *compiler.Context) (*NamedParameter, error) { - errors := make([]error, 0) - x := &NamedParameter{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - allowedKeys := []string{"name", "value"} - var allowedPatterns []*regexp.Regexp - invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) - if len(invalidKeys) > 0 { - message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - // string name = 1; - v1 := compiler.MapValueForKey(m, "name") - if v1 != nil { - x.Name, ok = compiler.StringForScalarNode(v1) - if !ok { - message := fmt.Sprintf("has unexpected value for name: %s", compiler.Display(v1)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // Parameter value = 2; - v2 := compiler.MapValueForKey(m, "value") - if v2 != nil { - var err error - x.Value, err = NewParameter(v2, compiler.NewContext("value", v2, context)) - if err != nil { - errors = append(errors, err) - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewNamedPathItem creates an object of type NamedPathItem if possible, returning an error if not. -func NewNamedPathItem(in *yaml.Node, context *compiler.Context) (*NamedPathItem, error) { - errors := make([]error, 0) - x := &NamedPathItem{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - allowedKeys := []string{"name", "value"} - var allowedPatterns []*regexp.Regexp - invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) - if len(invalidKeys) > 0 { - message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - // string name = 1; - v1 := compiler.MapValueForKey(m, "name") - if v1 != nil { - x.Name, ok = compiler.StringForScalarNode(v1) - if !ok { - message := fmt.Sprintf("has unexpected value for name: %s", compiler.Display(v1)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // PathItem value = 2; - v2 := compiler.MapValueForKey(m, "value") - if v2 != nil { - var err error - x.Value, err = NewPathItem(v2, compiler.NewContext("value", v2, context)) - if err != nil { - errors = append(errors, err) - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewNamedResponse creates an object of type NamedResponse if possible, returning an error if not. -func NewNamedResponse(in *yaml.Node, context *compiler.Context) (*NamedResponse, error) { - errors := make([]error, 0) - x := &NamedResponse{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - allowedKeys := []string{"name", "value"} - var allowedPatterns []*regexp.Regexp - invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) - if len(invalidKeys) > 0 { - message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - // string name = 1; - v1 := compiler.MapValueForKey(m, "name") - if v1 != nil { - x.Name, ok = compiler.StringForScalarNode(v1) - if !ok { - message := fmt.Sprintf("has unexpected value for name: %s", compiler.Display(v1)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // Response value = 2; - v2 := compiler.MapValueForKey(m, "value") - if v2 != nil { - var err error - x.Value, err = NewResponse(v2, compiler.NewContext("value", v2, context)) - if err != nil { - errors = append(errors, err) - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewNamedResponseValue creates an object of type NamedResponseValue if possible, returning an error if not. -func NewNamedResponseValue(in *yaml.Node, context *compiler.Context) (*NamedResponseValue, error) { - errors := make([]error, 0) - x := &NamedResponseValue{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - allowedKeys := []string{"name", "value"} - var allowedPatterns []*regexp.Regexp - invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) - if len(invalidKeys) > 0 { - message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - // string name = 1; - v1 := compiler.MapValueForKey(m, "name") - if v1 != nil { - x.Name, ok = compiler.StringForScalarNode(v1) - if !ok { - message := fmt.Sprintf("has unexpected value for name: %s", compiler.Display(v1)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // ResponseValue value = 2; - v2 := compiler.MapValueForKey(m, "value") - if v2 != nil { - var err error - x.Value, err = NewResponseValue(v2, compiler.NewContext("value", v2, context)) - if err != nil { - errors = append(errors, err) - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewNamedSchema creates an object of type NamedSchema if possible, returning an error if not. -func NewNamedSchema(in *yaml.Node, context *compiler.Context) (*NamedSchema, error) { - errors := make([]error, 0) - x := &NamedSchema{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - allowedKeys := []string{"name", "value"} - var allowedPatterns []*regexp.Regexp - invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) - if len(invalidKeys) > 0 { - message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - // string name = 1; - v1 := compiler.MapValueForKey(m, "name") - if v1 != nil { - x.Name, ok = compiler.StringForScalarNode(v1) - if !ok { - message := fmt.Sprintf("has unexpected value for name: %s", compiler.Display(v1)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // Schema value = 2; - v2 := compiler.MapValueForKey(m, "value") - if v2 != nil { - var err error - x.Value, err = NewSchema(v2, compiler.NewContext("value", v2, context)) - if err != nil { - errors = append(errors, err) - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewNamedSecurityDefinitionsItem creates an object of type NamedSecurityDefinitionsItem if possible, returning an error if not. -func NewNamedSecurityDefinitionsItem(in *yaml.Node, context *compiler.Context) (*NamedSecurityDefinitionsItem, error) { - errors := make([]error, 0) - x := &NamedSecurityDefinitionsItem{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - allowedKeys := []string{"name", "value"} - var allowedPatterns []*regexp.Regexp - invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) - if len(invalidKeys) > 0 { - message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - // string name = 1; - v1 := compiler.MapValueForKey(m, "name") - if v1 != nil { - x.Name, ok = compiler.StringForScalarNode(v1) - if !ok { - message := fmt.Sprintf("has unexpected value for name: %s", compiler.Display(v1)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // SecurityDefinitionsItem value = 2; - v2 := compiler.MapValueForKey(m, "value") - if v2 != nil { - var err error - x.Value, err = NewSecurityDefinitionsItem(v2, compiler.NewContext("value", v2, context)) - if err != nil { - errors = append(errors, err) - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewNamedString creates an object of type NamedString if possible, returning an error if not. -func NewNamedString(in *yaml.Node, context *compiler.Context) (*NamedString, error) { - errors := make([]error, 0) - x := &NamedString{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - allowedKeys := []string{"name", "value"} - var allowedPatterns []*regexp.Regexp - invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) - if len(invalidKeys) > 0 { - message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - // string name = 1; - v1 := compiler.MapValueForKey(m, "name") - if v1 != nil { - x.Name, ok = compiler.StringForScalarNode(v1) - if !ok { - message := fmt.Sprintf("has unexpected value for name: %s", compiler.Display(v1)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string value = 2; - v2 := compiler.MapValueForKey(m, "value") - if v2 != nil { - x.Value, ok = compiler.StringForScalarNode(v2) - if !ok { - message := fmt.Sprintf("has unexpected value for value: %s", compiler.Display(v2)) - errors = append(errors, compiler.NewError(context, message)) - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewNamedStringArray creates an object of type NamedStringArray if possible, returning an error if not. -func NewNamedStringArray(in *yaml.Node, context *compiler.Context) (*NamedStringArray, error) { - errors := make([]error, 0) - x := &NamedStringArray{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - allowedKeys := []string{"name", "value"} - var allowedPatterns []*regexp.Regexp - invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) - if len(invalidKeys) > 0 { - message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - // string name = 1; - v1 := compiler.MapValueForKey(m, "name") - if v1 != nil { - x.Name, ok = compiler.StringForScalarNode(v1) - if !ok { - message := fmt.Sprintf("has unexpected value for name: %s", compiler.Display(v1)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // StringArray value = 2; - v2 := compiler.MapValueForKey(m, "value") - if v2 != nil { - var err error - x.Value, err = NewStringArray(v2, compiler.NewContext("value", v2, context)) - if err != nil { - errors = append(errors, err) - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewNonBodyParameter creates an object of type NonBodyParameter if possible, returning an error if not. -func NewNonBodyParameter(in *yaml.Node, context *compiler.Context) (*NonBodyParameter, error) { - errors := make([]error, 0) - x := &NonBodyParameter{} - matched := false - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - requiredKeys := []string{"in", "name", "type"} - missingKeys := compiler.MissingKeysInMap(m, requiredKeys) - if len(missingKeys) > 0 { - message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - // HeaderParameterSubSchema header_parameter_sub_schema = 1; - { - // errors might be ok here, they mean we just don't have the right subtype - t, matchingError := NewHeaderParameterSubSchema(m, compiler.NewContext("headerParameterSubSchema", m, context)) - if matchingError == nil { - x.Oneof = &NonBodyParameter_HeaderParameterSubSchema{HeaderParameterSubSchema: t} - matched = true - } else { - errors = append(errors, matchingError) - } - } - // FormDataParameterSubSchema form_data_parameter_sub_schema = 2; - { - // errors might be ok here, they mean we just don't have the right subtype - t, matchingError := NewFormDataParameterSubSchema(m, compiler.NewContext("formDataParameterSubSchema", m, context)) - if matchingError == nil { - x.Oneof = &NonBodyParameter_FormDataParameterSubSchema{FormDataParameterSubSchema: t} - matched = true - } else { - errors = append(errors, matchingError) - } - } - // QueryParameterSubSchema query_parameter_sub_schema = 3; - { - // errors might be ok here, they mean we just don't have the right subtype - t, matchingError := NewQueryParameterSubSchema(m, compiler.NewContext("queryParameterSubSchema", m, context)) - if matchingError == nil { - x.Oneof = &NonBodyParameter_QueryParameterSubSchema{QueryParameterSubSchema: t} - matched = true - } else { - errors = append(errors, matchingError) - } - } - // PathParameterSubSchema path_parameter_sub_schema = 4; - { - // errors might be ok here, they mean we just don't have the right subtype - t, matchingError := NewPathParameterSubSchema(m, compiler.NewContext("pathParameterSubSchema", m, context)) - if matchingError == nil { - x.Oneof = &NonBodyParameter_PathParameterSubSchema{PathParameterSubSchema: t} - matched = true - } else { - errors = append(errors, matchingError) - } - } - } - if matched { - // since the oneof matched one of its possibilities, discard any matching errors - errors = make([]error, 0) - } else { - message := fmt.Sprintf("contains an invalid NonBodyParameter") - err := compiler.NewError(context, message) - errors = []error{err} - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewOauth2AccessCodeSecurity creates an object of type Oauth2AccessCodeSecurity if possible, returning an error if not. -func NewOauth2AccessCodeSecurity(in *yaml.Node, context *compiler.Context) (*Oauth2AccessCodeSecurity, error) { - errors := make([]error, 0) - x := &Oauth2AccessCodeSecurity{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - requiredKeys := []string{"authorizationUrl", "flow", "tokenUrl", "type"} - missingKeys := compiler.MissingKeysInMap(m, requiredKeys) - if len(missingKeys) > 0 { - message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - allowedKeys := []string{"authorizationUrl", "description", "flow", "scopes", "tokenUrl", "type"} - allowedPatterns := []*regexp.Regexp{pattern0} - invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) - if len(invalidKeys) > 0 { - message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - // string type = 1; - v1 := compiler.MapValueForKey(m, "type") - if v1 != nil { - x.Type, ok = compiler.StringForScalarNode(v1) - if !ok { - message := fmt.Sprintf("has unexpected value for type: %s", compiler.Display(v1)) - errors = append(errors, compiler.NewError(context, message)) - } - // check for valid enum values - // [oauth2] - if ok && !compiler.StringArrayContainsValue([]string{"oauth2"}, x.Type) { - message := fmt.Sprintf("has unexpected value for type: %s", compiler.Display(v1)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string flow = 2; - v2 := compiler.MapValueForKey(m, "flow") - if v2 != nil { - x.Flow, ok = compiler.StringForScalarNode(v2) - if !ok { - message := fmt.Sprintf("has unexpected value for flow: %s", compiler.Display(v2)) - errors = append(errors, compiler.NewError(context, message)) - } - // check for valid enum values - // [accessCode] - if ok && !compiler.StringArrayContainsValue([]string{"accessCode"}, x.Flow) { - message := fmt.Sprintf("has unexpected value for flow: %s", compiler.Display(v2)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // Oauth2Scopes scopes = 3; - v3 := compiler.MapValueForKey(m, "scopes") - if v3 != nil { - var err error - x.Scopes, err = NewOauth2Scopes(v3, compiler.NewContext("scopes", v3, context)) - if err != nil { - errors = append(errors, err) - } - } - // string authorization_url = 4; - v4 := compiler.MapValueForKey(m, "authorizationUrl") - if v4 != nil { - x.AuthorizationUrl, ok = compiler.StringForScalarNode(v4) - if !ok { - message := fmt.Sprintf("has unexpected value for authorizationUrl: %s", compiler.Display(v4)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string token_url = 5; - v5 := compiler.MapValueForKey(m, "tokenUrl") - if v5 != nil { - x.TokenUrl, ok = compiler.StringForScalarNode(v5) - if !ok { - message := fmt.Sprintf("has unexpected value for tokenUrl: %s", compiler.Display(v5)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string description = 6; - v6 := compiler.MapValueForKey(m, "description") - if v6 != nil { - x.Description, ok = compiler.StringForScalarNode(v6) - if !ok { - message := fmt.Sprintf("has unexpected value for description: %s", compiler.Display(v6)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // repeated NamedAny vendor_extension = 7; - // MAP: Any ^x- - x.VendorExtension = make([]*NamedAny, 0) - for i := 0; i < len(m.Content); i += 2 { - k, ok := compiler.StringForScalarNode(m.Content[i]) - if ok { - v := m.Content[i+1] - if strings.HasPrefix(k, "x-") { - pair := &NamedAny{} - pair.Name = k - result := &Any{} - handled, resultFromExt, err := compiler.CallExtension(context, v, k) - if handled { - if err != nil { - errors = append(errors, err) - } else { - bytes := compiler.Marshal(v) - result.Yaml = string(bytes) - result.Value = resultFromExt - pair.Value = result - } - } else { - pair.Value, err = NewAny(v, compiler.NewContext(k, v, context)) - if err != nil { - errors = append(errors, err) - } - } - x.VendorExtension = append(x.VendorExtension, pair) - } - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewOauth2ApplicationSecurity creates an object of type Oauth2ApplicationSecurity if possible, returning an error if not. -func NewOauth2ApplicationSecurity(in *yaml.Node, context *compiler.Context) (*Oauth2ApplicationSecurity, error) { - errors := make([]error, 0) - x := &Oauth2ApplicationSecurity{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - requiredKeys := []string{"flow", "tokenUrl", "type"} - missingKeys := compiler.MissingKeysInMap(m, requiredKeys) - if len(missingKeys) > 0 { - message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - allowedKeys := []string{"description", "flow", "scopes", "tokenUrl", "type"} - allowedPatterns := []*regexp.Regexp{pattern0} - invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) - if len(invalidKeys) > 0 { - message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - // string type = 1; - v1 := compiler.MapValueForKey(m, "type") - if v1 != nil { - x.Type, ok = compiler.StringForScalarNode(v1) - if !ok { - message := fmt.Sprintf("has unexpected value for type: %s", compiler.Display(v1)) - errors = append(errors, compiler.NewError(context, message)) - } - // check for valid enum values - // [oauth2] - if ok && !compiler.StringArrayContainsValue([]string{"oauth2"}, x.Type) { - message := fmt.Sprintf("has unexpected value for type: %s", compiler.Display(v1)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string flow = 2; - v2 := compiler.MapValueForKey(m, "flow") - if v2 != nil { - x.Flow, ok = compiler.StringForScalarNode(v2) - if !ok { - message := fmt.Sprintf("has unexpected value for flow: %s", compiler.Display(v2)) - errors = append(errors, compiler.NewError(context, message)) - } - // check for valid enum values - // [application] - if ok && !compiler.StringArrayContainsValue([]string{"application"}, x.Flow) { - message := fmt.Sprintf("has unexpected value for flow: %s", compiler.Display(v2)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // Oauth2Scopes scopes = 3; - v3 := compiler.MapValueForKey(m, "scopes") - if v3 != nil { - var err error - x.Scopes, err = NewOauth2Scopes(v3, compiler.NewContext("scopes", v3, context)) - if err != nil { - errors = append(errors, err) - } - } - // string token_url = 4; - v4 := compiler.MapValueForKey(m, "tokenUrl") - if v4 != nil { - x.TokenUrl, ok = compiler.StringForScalarNode(v4) - if !ok { - message := fmt.Sprintf("has unexpected value for tokenUrl: %s", compiler.Display(v4)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string description = 5; - v5 := compiler.MapValueForKey(m, "description") - if v5 != nil { - x.Description, ok = compiler.StringForScalarNode(v5) - if !ok { - message := fmt.Sprintf("has unexpected value for description: %s", compiler.Display(v5)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // repeated NamedAny vendor_extension = 6; - // MAP: Any ^x- - x.VendorExtension = make([]*NamedAny, 0) - for i := 0; i < len(m.Content); i += 2 { - k, ok := compiler.StringForScalarNode(m.Content[i]) - if ok { - v := m.Content[i+1] - if strings.HasPrefix(k, "x-") { - pair := &NamedAny{} - pair.Name = k - result := &Any{} - handled, resultFromExt, err := compiler.CallExtension(context, v, k) - if handled { - if err != nil { - errors = append(errors, err) - } else { - bytes := compiler.Marshal(v) - result.Yaml = string(bytes) - result.Value = resultFromExt - pair.Value = result - } - } else { - pair.Value, err = NewAny(v, compiler.NewContext(k, v, context)) - if err != nil { - errors = append(errors, err) - } - } - x.VendorExtension = append(x.VendorExtension, pair) - } - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewOauth2ImplicitSecurity creates an object of type Oauth2ImplicitSecurity if possible, returning an error if not. -func NewOauth2ImplicitSecurity(in *yaml.Node, context *compiler.Context) (*Oauth2ImplicitSecurity, error) { - errors := make([]error, 0) - x := &Oauth2ImplicitSecurity{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - requiredKeys := []string{"authorizationUrl", "flow", "type"} - missingKeys := compiler.MissingKeysInMap(m, requiredKeys) - if len(missingKeys) > 0 { - message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - allowedKeys := []string{"authorizationUrl", "description", "flow", "scopes", "type"} - allowedPatterns := []*regexp.Regexp{pattern0} - invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) - if len(invalidKeys) > 0 { - message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - // string type = 1; - v1 := compiler.MapValueForKey(m, "type") - if v1 != nil { - x.Type, ok = compiler.StringForScalarNode(v1) - if !ok { - message := fmt.Sprintf("has unexpected value for type: %s", compiler.Display(v1)) - errors = append(errors, compiler.NewError(context, message)) - } - // check for valid enum values - // [oauth2] - if ok && !compiler.StringArrayContainsValue([]string{"oauth2"}, x.Type) { - message := fmt.Sprintf("has unexpected value for type: %s", compiler.Display(v1)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string flow = 2; - v2 := compiler.MapValueForKey(m, "flow") - if v2 != nil { - x.Flow, ok = compiler.StringForScalarNode(v2) - if !ok { - message := fmt.Sprintf("has unexpected value for flow: %s", compiler.Display(v2)) - errors = append(errors, compiler.NewError(context, message)) - } - // check for valid enum values - // [implicit] - if ok && !compiler.StringArrayContainsValue([]string{"implicit"}, x.Flow) { - message := fmt.Sprintf("has unexpected value for flow: %s", compiler.Display(v2)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // Oauth2Scopes scopes = 3; - v3 := compiler.MapValueForKey(m, "scopes") - if v3 != nil { - var err error - x.Scopes, err = NewOauth2Scopes(v3, compiler.NewContext("scopes", v3, context)) - if err != nil { - errors = append(errors, err) - } - } - // string authorization_url = 4; - v4 := compiler.MapValueForKey(m, "authorizationUrl") - if v4 != nil { - x.AuthorizationUrl, ok = compiler.StringForScalarNode(v4) - if !ok { - message := fmt.Sprintf("has unexpected value for authorizationUrl: %s", compiler.Display(v4)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string description = 5; - v5 := compiler.MapValueForKey(m, "description") - if v5 != nil { - x.Description, ok = compiler.StringForScalarNode(v5) - if !ok { - message := fmt.Sprintf("has unexpected value for description: %s", compiler.Display(v5)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // repeated NamedAny vendor_extension = 6; - // MAP: Any ^x- - x.VendorExtension = make([]*NamedAny, 0) - for i := 0; i < len(m.Content); i += 2 { - k, ok := compiler.StringForScalarNode(m.Content[i]) - if ok { - v := m.Content[i+1] - if strings.HasPrefix(k, "x-") { - pair := &NamedAny{} - pair.Name = k - result := &Any{} - handled, resultFromExt, err := compiler.CallExtension(context, v, k) - if handled { - if err != nil { - errors = append(errors, err) - } else { - bytes := compiler.Marshal(v) - result.Yaml = string(bytes) - result.Value = resultFromExt - pair.Value = result - } - } else { - pair.Value, err = NewAny(v, compiler.NewContext(k, v, context)) - if err != nil { - errors = append(errors, err) - } - } - x.VendorExtension = append(x.VendorExtension, pair) - } - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewOauth2PasswordSecurity creates an object of type Oauth2PasswordSecurity if possible, returning an error if not. -func NewOauth2PasswordSecurity(in *yaml.Node, context *compiler.Context) (*Oauth2PasswordSecurity, error) { - errors := make([]error, 0) - x := &Oauth2PasswordSecurity{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - requiredKeys := []string{"flow", "tokenUrl", "type"} - missingKeys := compiler.MissingKeysInMap(m, requiredKeys) - if len(missingKeys) > 0 { - message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - allowedKeys := []string{"description", "flow", "scopes", "tokenUrl", "type"} - allowedPatterns := []*regexp.Regexp{pattern0} - invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) - if len(invalidKeys) > 0 { - message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - // string type = 1; - v1 := compiler.MapValueForKey(m, "type") - if v1 != nil { - x.Type, ok = compiler.StringForScalarNode(v1) - if !ok { - message := fmt.Sprintf("has unexpected value for type: %s", compiler.Display(v1)) - errors = append(errors, compiler.NewError(context, message)) - } - // check for valid enum values - // [oauth2] - if ok && !compiler.StringArrayContainsValue([]string{"oauth2"}, x.Type) { - message := fmt.Sprintf("has unexpected value for type: %s", compiler.Display(v1)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string flow = 2; - v2 := compiler.MapValueForKey(m, "flow") - if v2 != nil { - x.Flow, ok = compiler.StringForScalarNode(v2) - if !ok { - message := fmt.Sprintf("has unexpected value for flow: %s", compiler.Display(v2)) - errors = append(errors, compiler.NewError(context, message)) - } - // check for valid enum values - // [password] - if ok && !compiler.StringArrayContainsValue([]string{"password"}, x.Flow) { - message := fmt.Sprintf("has unexpected value for flow: %s", compiler.Display(v2)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // Oauth2Scopes scopes = 3; - v3 := compiler.MapValueForKey(m, "scopes") - if v3 != nil { - var err error - x.Scopes, err = NewOauth2Scopes(v3, compiler.NewContext("scopes", v3, context)) - if err != nil { - errors = append(errors, err) - } - } - // string token_url = 4; - v4 := compiler.MapValueForKey(m, "tokenUrl") - if v4 != nil { - x.TokenUrl, ok = compiler.StringForScalarNode(v4) - if !ok { - message := fmt.Sprintf("has unexpected value for tokenUrl: %s", compiler.Display(v4)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string description = 5; - v5 := compiler.MapValueForKey(m, "description") - if v5 != nil { - x.Description, ok = compiler.StringForScalarNode(v5) - if !ok { - message := fmt.Sprintf("has unexpected value for description: %s", compiler.Display(v5)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // repeated NamedAny vendor_extension = 6; - // MAP: Any ^x- - x.VendorExtension = make([]*NamedAny, 0) - for i := 0; i < len(m.Content); i += 2 { - k, ok := compiler.StringForScalarNode(m.Content[i]) - if ok { - v := m.Content[i+1] - if strings.HasPrefix(k, "x-") { - pair := &NamedAny{} - pair.Name = k - result := &Any{} - handled, resultFromExt, err := compiler.CallExtension(context, v, k) - if handled { - if err != nil { - errors = append(errors, err) - } else { - bytes := compiler.Marshal(v) - result.Yaml = string(bytes) - result.Value = resultFromExt - pair.Value = result - } - } else { - pair.Value, err = NewAny(v, compiler.NewContext(k, v, context)) - if err != nil { - errors = append(errors, err) - } - } - x.VendorExtension = append(x.VendorExtension, pair) - } - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewOauth2Scopes creates an object of type Oauth2Scopes if possible, returning an error if not. -func NewOauth2Scopes(in *yaml.Node, context *compiler.Context) (*Oauth2Scopes, error) { - errors := make([]error, 0) - x := &Oauth2Scopes{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - // repeated NamedString additional_properties = 1; - // MAP: string - x.AdditionalProperties = make([]*NamedString, 0) - for i := 0; i < len(m.Content); i += 2 { - k, ok := compiler.StringForScalarNode(m.Content[i]) - if ok { - v := m.Content[i+1] - pair := &NamedString{} - pair.Name = k - pair.Value, _ = compiler.StringForScalarNode(v) - x.AdditionalProperties = append(x.AdditionalProperties, pair) - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewOperation creates an object of type Operation if possible, returning an error if not. -func NewOperation(in *yaml.Node, context *compiler.Context) (*Operation, error) { - errors := make([]error, 0) - x := &Operation{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - requiredKeys := []string{"responses"} - missingKeys := compiler.MissingKeysInMap(m, requiredKeys) - if len(missingKeys) > 0 { - message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - allowedKeys := []string{"consumes", "deprecated", "description", "externalDocs", "operationId", "parameters", "produces", "responses", "schemes", "security", "summary", "tags"} - allowedPatterns := []*regexp.Regexp{pattern0} - invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) - if len(invalidKeys) > 0 { - message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - // repeated string tags = 1; - v1 := compiler.MapValueForKey(m, "tags") - if v1 != nil { - v, ok := compiler.SequenceNodeForNode(v1) - if ok { - x.Tags = compiler.StringArrayForSequenceNode(v) - } else { - message := fmt.Sprintf("has unexpected value for tags: %s", compiler.Display(v1)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string summary = 2; - v2 := compiler.MapValueForKey(m, "summary") - if v2 != nil { - x.Summary, ok = compiler.StringForScalarNode(v2) - if !ok { - message := fmt.Sprintf("has unexpected value for summary: %s", compiler.Display(v2)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string description = 3; - v3 := compiler.MapValueForKey(m, "description") - if v3 != nil { - x.Description, ok = compiler.StringForScalarNode(v3) - if !ok { - message := fmt.Sprintf("has unexpected value for description: %s", compiler.Display(v3)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // ExternalDocs external_docs = 4; - v4 := compiler.MapValueForKey(m, "externalDocs") - if v4 != nil { - var err error - x.ExternalDocs, err = NewExternalDocs(v4, compiler.NewContext("externalDocs", v4, context)) - if err != nil { - errors = append(errors, err) - } - } - // string operation_id = 5; - v5 := compiler.MapValueForKey(m, "operationId") - if v5 != nil { - x.OperationId, ok = compiler.StringForScalarNode(v5) - if !ok { - message := fmt.Sprintf("has unexpected value for operationId: %s", compiler.Display(v5)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // repeated string produces = 6; - v6 := compiler.MapValueForKey(m, "produces") - if v6 != nil { - v, ok := compiler.SequenceNodeForNode(v6) - if ok { - x.Produces = compiler.StringArrayForSequenceNode(v) - } else { - message := fmt.Sprintf("has unexpected value for produces: %s", compiler.Display(v6)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // repeated string consumes = 7; - v7 := compiler.MapValueForKey(m, "consumes") - if v7 != nil { - v, ok := compiler.SequenceNodeForNode(v7) - if ok { - x.Consumes = compiler.StringArrayForSequenceNode(v) - } else { - message := fmt.Sprintf("has unexpected value for consumes: %s", compiler.Display(v7)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // repeated ParametersItem parameters = 8; - v8 := compiler.MapValueForKey(m, "parameters") - if v8 != nil { - // repeated ParametersItem - x.Parameters = make([]*ParametersItem, 0) - a, ok := compiler.SequenceNodeForNode(v8) - if ok { - for _, item := range a.Content { - y, err := NewParametersItem(item, compiler.NewContext("parameters", item, context)) - if err != nil { - errors = append(errors, err) - } - x.Parameters = append(x.Parameters, y) - } - } - } - // Responses responses = 9; - v9 := compiler.MapValueForKey(m, "responses") - if v9 != nil { - var err error - x.Responses, err = NewResponses(v9, compiler.NewContext("responses", v9, context)) - if err != nil { - errors = append(errors, err) - } - } - // repeated string schemes = 10; - v10 := compiler.MapValueForKey(m, "schemes") - if v10 != nil { - v, ok := compiler.SequenceNodeForNode(v10) - if ok { - x.Schemes = compiler.StringArrayForSequenceNode(v) - } else { - message := fmt.Sprintf("has unexpected value for schemes: %s", compiler.Display(v10)) - errors = append(errors, compiler.NewError(context, message)) - } - // check for valid enum values - // [http https ws wss] - if ok && !compiler.StringArrayContainsValues([]string{"http", "https", "ws", "wss"}, x.Schemes) { - message := fmt.Sprintf("has unexpected value for schemes: %s", compiler.Display(v10)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // bool deprecated = 11; - v11 := compiler.MapValueForKey(m, "deprecated") - if v11 != nil { - x.Deprecated, ok = compiler.BoolForScalarNode(v11) - if !ok { - message := fmt.Sprintf("has unexpected value for deprecated: %s", compiler.Display(v11)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // repeated SecurityRequirement security = 12; - v12 := compiler.MapValueForKey(m, "security") - if v12 != nil { - // repeated SecurityRequirement - x.Security = make([]*SecurityRequirement, 0) - a, ok := compiler.SequenceNodeForNode(v12) - if ok { - for _, item := range a.Content { - y, err := NewSecurityRequirement(item, compiler.NewContext("security", item, context)) - if err != nil { - errors = append(errors, err) - } - x.Security = append(x.Security, y) - } - } - } - // repeated NamedAny vendor_extension = 13; - // MAP: Any ^x- - x.VendorExtension = make([]*NamedAny, 0) - for i := 0; i < len(m.Content); i += 2 { - k, ok := compiler.StringForScalarNode(m.Content[i]) - if ok { - v := m.Content[i+1] - if strings.HasPrefix(k, "x-") { - pair := &NamedAny{} - pair.Name = k - result := &Any{} - handled, resultFromExt, err := compiler.CallExtension(context, v, k) - if handled { - if err != nil { - errors = append(errors, err) - } else { - bytes := compiler.Marshal(v) - result.Yaml = string(bytes) - result.Value = resultFromExt - pair.Value = result - } - } else { - pair.Value, err = NewAny(v, compiler.NewContext(k, v, context)) - if err != nil { - errors = append(errors, err) - } - } - x.VendorExtension = append(x.VendorExtension, pair) - } - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewParameter creates an object of type Parameter if possible, returning an error if not. -func NewParameter(in *yaml.Node, context *compiler.Context) (*Parameter, error) { - errors := make([]error, 0) - x := &Parameter{} - matched := false - // BodyParameter body_parameter = 1; - { - m, ok := compiler.UnpackMap(in) - if ok { - // errors might be ok here, they mean we just don't have the right subtype - t, matchingError := NewBodyParameter(m, compiler.NewContext("bodyParameter", m, context)) - if matchingError == nil { - x.Oneof = &Parameter_BodyParameter{BodyParameter: t} - matched = true - } else { - errors = append(errors, matchingError) - } - } - } - // NonBodyParameter non_body_parameter = 2; - { - m, ok := compiler.UnpackMap(in) - if ok { - // errors might be ok here, they mean we just don't have the right subtype - t, matchingError := NewNonBodyParameter(m, compiler.NewContext("nonBodyParameter", m, context)) - if matchingError == nil { - x.Oneof = &Parameter_NonBodyParameter{NonBodyParameter: t} - matched = true - } else { - errors = append(errors, matchingError) - } - } - } - if matched { - // since the oneof matched one of its possibilities, discard any matching errors - errors = make([]error, 0) - } else { - message := fmt.Sprintf("contains an invalid Parameter") - err := compiler.NewError(context, message) - errors = []error{err} - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewParameterDefinitions creates an object of type ParameterDefinitions if possible, returning an error if not. -func NewParameterDefinitions(in *yaml.Node, context *compiler.Context) (*ParameterDefinitions, error) { - errors := make([]error, 0) - x := &ParameterDefinitions{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - // repeated NamedParameter additional_properties = 1; - // MAP: Parameter - x.AdditionalProperties = make([]*NamedParameter, 0) - for i := 0; i < len(m.Content); i += 2 { - k, ok := compiler.StringForScalarNode(m.Content[i]) - if ok { - v := m.Content[i+1] - pair := &NamedParameter{} - pair.Name = k - var err error - pair.Value, err = NewParameter(v, compiler.NewContext(k, v, context)) - if err != nil { - errors = append(errors, err) - } - x.AdditionalProperties = append(x.AdditionalProperties, pair) - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewParametersItem creates an object of type ParametersItem if possible, returning an error if not. -func NewParametersItem(in *yaml.Node, context *compiler.Context) (*ParametersItem, error) { - errors := make([]error, 0) - x := &ParametersItem{} - matched := false - // Parameter parameter = 1; - { - m, ok := compiler.UnpackMap(in) - if ok { - // errors might be ok here, they mean we just don't have the right subtype - t, matchingError := NewParameter(m, compiler.NewContext("parameter", m, context)) - if matchingError == nil { - x.Oneof = &ParametersItem_Parameter{Parameter: t} - matched = true - } else { - errors = append(errors, matchingError) - } - } - } - // JsonReference json_reference = 2; - { - m, ok := compiler.UnpackMap(in) - if ok { - // errors might be ok here, they mean we just don't have the right subtype - t, matchingError := NewJsonReference(m, compiler.NewContext("jsonReference", m, context)) - if matchingError == nil { - x.Oneof = &ParametersItem_JsonReference{JsonReference: t} - matched = true - } else { - errors = append(errors, matchingError) - } - } - } - if matched { - // since the oneof matched one of its possibilities, discard any matching errors - errors = make([]error, 0) - } else { - message := fmt.Sprintf("contains an invalid ParametersItem") - err := compiler.NewError(context, message) - errors = []error{err} - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewPathItem creates an object of type PathItem if possible, returning an error if not. -func NewPathItem(in *yaml.Node, context *compiler.Context) (*PathItem, error) { - errors := make([]error, 0) - x := &PathItem{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - allowedKeys := []string{"$ref", "delete", "get", "head", "options", "parameters", "patch", "post", "put"} - allowedPatterns := []*regexp.Regexp{pattern0} - invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) - if len(invalidKeys) > 0 { - message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - // string _ref = 1; - v1 := compiler.MapValueForKey(m, "$ref") - if v1 != nil { - x.XRef, ok = compiler.StringForScalarNode(v1) - if !ok { - message := fmt.Sprintf("has unexpected value for $ref: %s", compiler.Display(v1)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // Operation get = 2; - v2 := compiler.MapValueForKey(m, "get") - if v2 != nil { - var err error - x.Get, err = NewOperation(v2, compiler.NewContext("get", v2, context)) - if err != nil { - errors = append(errors, err) - } - } - // Operation put = 3; - v3 := compiler.MapValueForKey(m, "put") - if v3 != nil { - var err error - x.Put, err = NewOperation(v3, compiler.NewContext("put", v3, context)) - if err != nil { - errors = append(errors, err) - } - } - // Operation post = 4; - v4 := compiler.MapValueForKey(m, "post") - if v4 != nil { - var err error - x.Post, err = NewOperation(v4, compiler.NewContext("post", v4, context)) - if err != nil { - errors = append(errors, err) - } - } - // Operation delete = 5; - v5 := compiler.MapValueForKey(m, "delete") - if v5 != nil { - var err error - x.Delete, err = NewOperation(v5, compiler.NewContext("delete", v5, context)) - if err != nil { - errors = append(errors, err) - } - } - // Operation options = 6; - v6 := compiler.MapValueForKey(m, "options") - if v6 != nil { - var err error - x.Options, err = NewOperation(v6, compiler.NewContext("options", v6, context)) - if err != nil { - errors = append(errors, err) - } - } - // Operation head = 7; - v7 := compiler.MapValueForKey(m, "head") - if v7 != nil { - var err error - x.Head, err = NewOperation(v7, compiler.NewContext("head", v7, context)) - if err != nil { - errors = append(errors, err) - } - } - // Operation patch = 8; - v8 := compiler.MapValueForKey(m, "patch") - if v8 != nil { - var err error - x.Patch, err = NewOperation(v8, compiler.NewContext("patch", v8, context)) - if err != nil { - errors = append(errors, err) - } - } - // repeated ParametersItem parameters = 9; - v9 := compiler.MapValueForKey(m, "parameters") - if v9 != nil { - // repeated ParametersItem - x.Parameters = make([]*ParametersItem, 0) - a, ok := compiler.SequenceNodeForNode(v9) - if ok { - for _, item := range a.Content { - y, err := NewParametersItem(item, compiler.NewContext("parameters", item, context)) - if err != nil { - errors = append(errors, err) - } - x.Parameters = append(x.Parameters, y) - } - } - } - // repeated NamedAny vendor_extension = 10; - // MAP: Any ^x- - x.VendorExtension = make([]*NamedAny, 0) - for i := 0; i < len(m.Content); i += 2 { - k, ok := compiler.StringForScalarNode(m.Content[i]) - if ok { - v := m.Content[i+1] - if strings.HasPrefix(k, "x-") { - pair := &NamedAny{} - pair.Name = k - result := &Any{} - handled, resultFromExt, err := compiler.CallExtension(context, v, k) - if handled { - if err != nil { - errors = append(errors, err) - } else { - bytes := compiler.Marshal(v) - result.Yaml = string(bytes) - result.Value = resultFromExt - pair.Value = result - } - } else { - pair.Value, err = NewAny(v, compiler.NewContext(k, v, context)) - if err != nil { - errors = append(errors, err) - } - } - x.VendorExtension = append(x.VendorExtension, pair) - } - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewPathParameterSubSchema creates an object of type PathParameterSubSchema if possible, returning an error if not. -func NewPathParameterSubSchema(in *yaml.Node, context *compiler.Context) (*PathParameterSubSchema, error) { - errors := make([]error, 0) - x := &PathParameterSubSchema{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - requiredKeys := []string{"required"} - missingKeys := compiler.MissingKeysInMap(m, requiredKeys) - if len(missingKeys) > 0 { - message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - allowedKeys := []string{"collectionFormat", "default", "description", "enum", "exclusiveMaximum", "exclusiveMinimum", "format", "in", "items", "maxItems", "maxLength", "maximum", "minItems", "minLength", "minimum", "multipleOf", "name", "pattern", "required", "type", "uniqueItems"} - allowedPatterns := []*regexp.Regexp{pattern0} - invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) - if len(invalidKeys) > 0 { - message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - // bool required = 1; - v1 := compiler.MapValueForKey(m, "required") - if v1 != nil { - x.Required, ok = compiler.BoolForScalarNode(v1) - if !ok { - message := fmt.Sprintf("has unexpected value for required: %s", compiler.Display(v1)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string in = 2; - v2 := compiler.MapValueForKey(m, "in") - if v2 != nil { - x.In, ok = compiler.StringForScalarNode(v2) - if !ok { - message := fmt.Sprintf("has unexpected value for in: %s", compiler.Display(v2)) - errors = append(errors, compiler.NewError(context, message)) - } - // check for valid enum values - // [path] - if ok && !compiler.StringArrayContainsValue([]string{"path"}, x.In) { - message := fmt.Sprintf("has unexpected value for in: %s", compiler.Display(v2)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string description = 3; - v3 := compiler.MapValueForKey(m, "description") - if v3 != nil { - x.Description, ok = compiler.StringForScalarNode(v3) - if !ok { - message := fmt.Sprintf("has unexpected value for description: %s", compiler.Display(v3)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string name = 4; - v4 := compiler.MapValueForKey(m, "name") - if v4 != nil { - x.Name, ok = compiler.StringForScalarNode(v4) - if !ok { - message := fmt.Sprintf("has unexpected value for name: %s", compiler.Display(v4)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string type = 5; - v5 := compiler.MapValueForKey(m, "type") - if v5 != nil { - x.Type, ok = compiler.StringForScalarNode(v5) - if !ok { - message := fmt.Sprintf("has unexpected value for type: %s", compiler.Display(v5)) - errors = append(errors, compiler.NewError(context, message)) - } - // check for valid enum values - // [string number boolean integer array] - if ok && !compiler.StringArrayContainsValue([]string{"string", "number", "boolean", "integer", "array"}, x.Type) { - message := fmt.Sprintf("has unexpected value for type: %s", compiler.Display(v5)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string format = 6; - v6 := compiler.MapValueForKey(m, "format") - if v6 != nil { - x.Format, ok = compiler.StringForScalarNode(v6) - if !ok { - message := fmt.Sprintf("has unexpected value for format: %s", compiler.Display(v6)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // PrimitivesItems items = 7; - v7 := compiler.MapValueForKey(m, "items") - if v7 != nil { - var err error - x.Items, err = NewPrimitivesItems(v7, compiler.NewContext("items", v7, context)) - if err != nil { - errors = append(errors, err) - } - } - // string collection_format = 8; - v8 := compiler.MapValueForKey(m, "collectionFormat") - if v8 != nil { - x.CollectionFormat, ok = compiler.StringForScalarNode(v8) - if !ok { - message := fmt.Sprintf("has unexpected value for collectionFormat: %s", compiler.Display(v8)) - errors = append(errors, compiler.NewError(context, message)) - } - // check for valid enum values - // [csv ssv tsv pipes] - if ok && !compiler.StringArrayContainsValue([]string{"csv", "ssv", "tsv", "pipes"}, x.CollectionFormat) { - message := fmt.Sprintf("has unexpected value for collectionFormat: %s", compiler.Display(v8)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // Any default = 9; - v9 := compiler.MapValueForKey(m, "default") - if v9 != nil { - var err error - x.Default, err = NewAny(v9, compiler.NewContext("default", v9, context)) - if err != nil { - errors = append(errors, err) - } - } - // float maximum = 10; - v10 := compiler.MapValueForKey(m, "maximum") - if v10 != nil { - v, ok := compiler.FloatForScalarNode(v10) - if ok { - x.Maximum = v - } else { - message := fmt.Sprintf("has unexpected value for maximum: %s", compiler.Display(v10)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // bool exclusive_maximum = 11; - v11 := compiler.MapValueForKey(m, "exclusiveMaximum") - if v11 != nil { - x.ExclusiveMaximum, ok = compiler.BoolForScalarNode(v11) - if !ok { - message := fmt.Sprintf("has unexpected value for exclusiveMaximum: %s", compiler.Display(v11)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // float minimum = 12; - v12 := compiler.MapValueForKey(m, "minimum") - if v12 != nil { - v, ok := compiler.FloatForScalarNode(v12) - if ok { - x.Minimum = v - } else { - message := fmt.Sprintf("has unexpected value for minimum: %s", compiler.Display(v12)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // bool exclusive_minimum = 13; - v13 := compiler.MapValueForKey(m, "exclusiveMinimum") - if v13 != nil { - x.ExclusiveMinimum, ok = compiler.BoolForScalarNode(v13) - if !ok { - message := fmt.Sprintf("has unexpected value for exclusiveMinimum: %s", compiler.Display(v13)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // int64 max_length = 14; - v14 := compiler.MapValueForKey(m, "maxLength") - if v14 != nil { - t, ok := compiler.IntForScalarNode(v14) - if ok { - x.MaxLength = int64(t) - } else { - message := fmt.Sprintf("has unexpected value for maxLength: %s", compiler.Display(v14)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // int64 min_length = 15; - v15 := compiler.MapValueForKey(m, "minLength") - if v15 != nil { - t, ok := compiler.IntForScalarNode(v15) - if ok { - x.MinLength = int64(t) - } else { - message := fmt.Sprintf("has unexpected value for minLength: %s", compiler.Display(v15)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string pattern = 16; - v16 := compiler.MapValueForKey(m, "pattern") - if v16 != nil { - x.Pattern, ok = compiler.StringForScalarNode(v16) - if !ok { - message := fmt.Sprintf("has unexpected value for pattern: %s", compiler.Display(v16)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // int64 max_items = 17; - v17 := compiler.MapValueForKey(m, "maxItems") - if v17 != nil { - t, ok := compiler.IntForScalarNode(v17) - if ok { - x.MaxItems = int64(t) - } else { - message := fmt.Sprintf("has unexpected value for maxItems: %s", compiler.Display(v17)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // int64 min_items = 18; - v18 := compiler.MapValueForKey(m, "minItems") - if v18 != nil { - t, ok := compiler.IntForScalarNode(v18) - if ok { - x.MinItems = int64(t) - } else { - message := fmt.Sprintf("has unexpected value for minItems: %s", compiler.Display(v18)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // bool unique_items = 19; - v19 := compiler.MapValueForKey(m, "uniqueItems") - if v19 != nil { - x.UniqueItems, ok = compiler.BoolForScalarNode(v19) - if !ok { - message := fmt.Sprintf("has unexpected value for uniqueItems: %s", compiler.Display(v19)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // repeated Any enum = 20; - v20 := compiler.MapValueForKey(m, "enum") - if v20 != nil { - // repeated Any - x.Enum = make([]*Any, 0) - a, ok := compiler.SequenceNodeForNode(v20) - if ok { - for _, item := range a.Content { - y, err := NewAny(item, compiler.NewContext("enum", item, context)) - if err != nil { - errors = append(errors, err) - } - x.Enum = append(x.Enum, y) - } - } - } - // float multiple_of = 21; - v21 := compiler.MapValueForKey(m, "multipleOf") - if v21 != nil { - v, ok := compiler.FloatForScalarNode(v21) - if ok { - x.MultipleOf = v - } else { - message := fmt.Sprintf("has unexpected value for multipleOf: %s", compiler.Display(v21)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // repeated NamedAny vendor_extension = 22; - // MAP: Any ^x- - x.VendorExtension = make([]*NamedAny, 0) - for i := 0; i < len(m.Content); i += 2 { - k, ok := compiler.StringForScalarNode(m.Content[i]) - if ok { - v := m.Content[i+1] - if strings.HasPrefix(k, "x-") { - pair := &NamedAny{} - pair.Name = k - result := &Any{} - handled, resultFromExt, err := compiler.CallExtension(context, v, k) - if handled { - if err != nil { - errors = append(errors, err) - } else { - bytes := compiler.Marshal(v) - result.Yaml = string(bytes) - result.Value = resultFromExt - pair.Value = result - } - } else { - pair.Value, err = NewAny(v, compiler.NewContext(k, v, context)) - if err != nil { - errors = append(errors, err) - } - } - x.VendorExtension = append(x.VendorExtension, pair) - } - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewPaths creates an object of type Paths if possible, returning an error if not. -func NewPaths(in *yaml.Node, context *compiler.Context) (*Paths, error) { - errors := make([]error, 0) - x := &Paths{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - allowedKeys := []string{} - allowedPatterns := []*regexp.Regexp{pattern0, pattern1} - invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) - if len(invalidKeys) > 0 { - message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - // repeated NamedAny vendor_extension = 1; - // MAP: Any ^x- - x.VendorExtension = make([]*NamedAny, 0) - for i := 0; i < len(m.Content); i += 2 { - k, ok := compiler.StringForScalarNode(m.Content[i]) - if ok { - v := m.Content[i+1] - if strings.HasPrefix(k, "x-") { - pair := &NamedAny{} - pair.Name = k - result := &Any{} - handled, resultFromExt, err := compiler.CallExtension(context, v, k) - if handled { - if err != nil { - errors = append(errors, err) - } else { - bytes := compiler.Marshal(v) - result.Yaml = string(bytes) - result.Value = resultFromExt - pair.Value = result - } - } else { - pair.Value, err = NewAny(v, compiler.NewContext(k, v, context)) - if err != nil { - errors = append(errors, err) - } - } - x.VendorExtension = append(x.VendorExtension, pair) - } - } - } - // repeated NamedPathItem path = 2; - // MAP: PathItem ^/ - x.Path = make([]*NamedPathItem, 0) - for i := 0; i < len(m.Content); i += 2 { - k, ok := compiler.StringForScalarNode(m.Content[i]) - if ok { - v := m.Content[i+1] - if strings.HasPrefix(k, "/") { - pair := &NamedPathItem{} - pair.Name = k - var err error - pair.Value, err = NewPathItem(v, compiler.NewContext(k, v, context)) - if err != nil { - errors = append(errors, err) - } - x.Path = append(x.Path, pair) - } - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewPrimitivesItems creates an object of type PrimitivesItems if possible, returning an error if not. -func NewPrimitivesItems(in *yaml.Node, context *compiler.Context) (*PrimitivesItems, error) { - errors := make([]error, 0) - x := &PrimitivesItems{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - allowedKeys := []string{"collectionFormat", "default", "enum", "exclusiveMaximum", "exclusiveMinimum", "format", "items", "maxItems", "maxLength", "maximum", "minItems", "minLength", "minimum", "multipleOf", "pattern", "type", "uniqueItems"} - allowedPatterns := []*regexp.Regexp{pattern0} - invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) - if len(invalidKeys) > 0 { - message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - // string type = 1; - v1 := compiler.MapValueForKey(m, "type") - if v1 != nil { - x.Type, ok = compiler.StringForScalarNode(v1) - if !ok { - message := fmt.Sprintf("has unexpected value for type: %s", compiler.Display(v1)) - errors = append(errors, compiler.NewError(context, message)) - } - // check for valid enum values - // [string number integer boolean array] - if ok && !compiler.StringArrayContainsValue([]string{"string", "number", "integer", "boolean", "array"}, x.Type) { - message := fmt.Sprintf("has unexpected value for type: %s", compiler.Display(v1)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string format = 2; - v2 := compiler.MapValueForKey(m, "format") - if v2 != nil { - x.Format, ok = compiler.StringForScalarNode(v2) - if !ok { - message := fmt.Sprintf("has unexpected value for format: %s", compiler.Display(v2)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // PrimitivesItems items = 3; - v3 := compiler.MapValueForKey(m, "items") - if v3 != nil { - var err error - x.Items, err = NewPrimitivesItems(v3, compiler.NewContext("items", v3, context)) - if err != nil { - errors = append(errors, err) - } - } - // string collection_format = 4; - v4 := compiler.MapValueForKey(m, "collectionFormat") - if v4 != nil { - x.CollectionFormat, ok = compiler.StringForScalarNode(v4) - if !ok { - message := fmt.Sprintf("has unexpected value for collectionFormat: %s", compiler.Display(v4)) - errors = append(errors, compiler.NewError(context, message)) - } - // check for valid enum values - // [csv ssv tsv pipes] - if ok && !compiler.StringArrayContainsValue([]string{"csv", "ssv", "tsv", "pipes"}, x.CollectionFormat) { - message := fmt.Sprintf("has unexpected value for collectionFormat: %s", compiler.Display(v4)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // Any default = 5; - v5 := compiler.MapValueForKey(m, "default") - if v5 != nil { - var err error - x.Default, err = NewAny(v5, compiler.NewContext("default", v5, context)) - if err != nil { - errors = append(errors, err) - } - } - // float maximum = 6; - v6 := compiler.MapValueForKey(m, "maximum") - if v6 != nil { - v, ok := compiler.FloatForScalarNode(v6) - if ok { - x.Maximum = v - } else { - message := fmt.Sprintf("has unexpected value for maximum: %s", compiler.Display(v6)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // bool exclusive_maximum = 7; - v7 := compiler.MapValueForKey(m, "exclusiveMaximum") - if v7 != nil { - x.ExclusiveMaximum, ok = compiler.BoolForScalarNode(v7) - if !ok { - message := fmt.Sprintf("has unexpected value for exclusiveMaximum: %s", compiler.Display(v7)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // float minimum = 8; - v8 := compiler.MapValueForKey(m, "minimum") - if v8 != nil { - v, ok := compiler.FloatForScalarNode(v8) - if ok { - x.Minimum = v - } else { - message := fmt.Sprintf("has unexpected value for minimum: %s", compiler.Display(v8)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // bool exclusive_minimum = 9; - v9 := compiler.MapValueForKey(m, "exclusiveMinimum") - if v9 != nil { - x.ExclusiveMinimum, ok = compiler.BoolForScalarNode(v9) - if !ok { - message := fmt.Sprintf("has unexpected value for exclusiveMinimum: %s", compiler.Display(v9)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // int64 max_length = 10; - v10 := compiler.MapValueForKey(m, "maxLength") - if v10 != nil { - t, ok := compiler.IntForScalarNode(v10) - if ok { - x.MaxLength = int64(t) - } else { - message := fmt.Sprintf("has unexpected value for maxLength: %s", compiler.Display(v10)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // int64 min_length = 11; - v11 := compiler.MapValueForKey(m, "minLength") - if v11 != nil { - t, ok := compiler.IntForScalarNode(v11) - if ok { - x.MinLength = int64(t) - } else { - message := fmt.Sprintf("has unexpected value for minLength: %s", compiler.Display(v11)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string pattern = 12; - v12 := compiler.MapValueForKey(m, "pattern") - if v12 != nil { - x.Pattern, ok = compiler.StringForScalarNode(v12) - if !ok { - message := fmt.Sprintf("has unexpected value for pattern: %s", compiler.Display(v12)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // int64 max_items = 13; - v13 := compiler.MapValueForKey(m, "maxItems") - if v13 != nil { - t, ok := compiler.IntForScalarNode(v13) - if ok { - x.MaxItems = int64(t) - } else { - message := fmt.Sprintf("has unexpected value for maxItems: %s", compiler.Display(v13)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // int64 min_items = 14; - v14 := compiler.MapValueForKey(m, "minItems") - if v14 != nil { - t, ok := compiler.IntForScalarNode(v14) - if ok { - x.MinItems = int64(t) - } else { - message := fmt.Sprintf("has unexpected value for minItems: %s", compiler.Display(v14)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // bool unique_items = 15; - v15 := compiler.MapValueForKey(m, "uniqueItems") - if v15 != nil { - x.UniqueItems, ok = compiler.BoolForScalarNode(v15) - if !ok { - message := fmt.Sprintf("has unexpected value for uniqueItems: %s", compiler.Display(v15)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // repeated Any enum = 16; - v16 := compiler.MapValueForKey(m, "enum") - if v16 != nil { - // repeated Any - x.Enum = make([]*Any, 0) - a, ok := compiler.SequenceNodeForNode(v16) - if ok { - for _, item := range a.Content { - y, err := NewAny(item, compiler.NewContext("enum", item, context)) - if err != nil { - errors = append(errors, err) - } - x.Enum = append(x.Enum, y) - } - } - } - // float multiple_of = 17; - v17 := compiler.MapValueForKey(m, "multipleOf") - if v17 != nil { - v, ok := compiler.FloatForScalarNode(v17) - if ok { - x.MultipleOf = v - } else { - message := fmt.Sprintf("has unexpected value for multipleOf: %s", compiler.Display(v17)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // repeated NamedAny vendor_extension = 18; - // MAP: Any ^x- - x.VendorExtension = make([]*NamedAny, 0) - for i := 0; i < len(m.Content); i += 2 { - k, ok := compiler.StringForScalarNode(m.Content[i]) - if ok { - v := m.Content[i+1] - if strings.HasPrefix(k, "x-") { - pair := &NamedAny{} - pair.Name = k - result := &Any{} - handled, resultFromExt, err := compiler.CallExtension(context, v, k) - if handled { - if err != nil { - errors = append(errors, err) - } else { - bytes := compiler.Marshal(v) - result.Yaml = string(bytes) - result.Value = resultFromExt - pair.Value = result - } - } else { - pair.Value, err = NewAny(v, compiler.NewContext(k, v, context)) - if err != nil { - errors = append(errors, err) - } - } - x.VendorExtension = append(x.VendorExtension, pair) - } - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewProperties creates an object of type Properties if possible, returning an error if not. -func NewProperties(in *yaml.Node, context *compiler.Context) (*Properties, error) { - errors := make([]error, 0) - x := &Properties{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - // repeated NamedSchema additional_properties = 1; - // MAP: Schema - x.AdditionalProperties = make([]*NamedSchema, 0) - for i := 0; i < len(m.Content); i += 2 { - k, ok := compiler.StringForScalarNode(m.Content[i]) - if ok { - v := m.Content[i+1] - pair := &NamedSchema{} - pair.Name = k - var err error - pair.Value, err = NewSchema(v, compiler.NewContext(k, v, context)) - if err != nil { - errors = append(errors, err) - } - x.AdditionalProperties = append(x.AdditionalProperties, pair) - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewQueryParameterSubSchema creates an object of type QueryParameterSubSchema if possible, returning an error if not. -func NewQueryParameterSubSchema(in *yaml.Node, context *compiler.Context) (*QueryParameterSubSchema, error) { - errors := make([]error, 0) - x := &QueryParameterSubSchema{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - allowedKeys := []string{"allowEmptyValue", "collectionFormat", "default", "description", "enum", "exclusiveMaximum", "exclusiveMinimum", "format", "in", "items", "maxItems", "maxLength", "maximum", "minItems", "minLength", "minimum", "multipleOf", "name", "pattern", "required", "type", "uniqueItems"} - allowedPatterns := []*regexp.Regexp{pattern0} - invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) - if len(invalidKeys) > 0 { - message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - // bool required = 1; - v1 := compiler.MapValueForKey(m, "required") - if v1 != nil { - x.Required, ok = compiler.BoolForScalarNode(v1) - if !ok { - message := fmt.Sprintf("has unexpected value for required: %s", compiler.Display(v1)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string in = 2; - v2 := compiler.MapValueForKey(m, "in") - if v2 != nil { - x.In, ok = compiler.StringForScalarNode(v2) - if !ok { - message := fmt.Sprintf("has unexpected value for in: %s", compiler.Display(v2)) - errors = append(errors, compiler.NewError(context, message)) - } - // check for valid enum values - // [query] - if ok && !compiler.StringArrayContainsValue([]string{"query"}, x.In) { - message := fmt.Sprintf("has unexpected value for in: %s", compiler.Display(v2)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string description = 3; - v3 := compiler.MapValueForKey(m, "description") - if v3 != nil { - x.Description, ok = compiler.StringForScalarNode(v3) - if !ok { - message := fmt.Sprintf("has unexpected value for description: %s", compiler.Display(v3)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string name = 4; - v4 := compiler.MapValueForKey(m, "name") - if v4 != nil { - x.Name, ok = compiler.StringForScalarNode(v4) - if !ok { - message := fmt.Sprintf("has unexpected value for name: %s", compiler.Display(v4)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // bool allow_empty_value = 5; - v5 := compiler.MapValueForKey(m, "allowEmptyValue") - if v5 != nil { - x.AllowEmptyValue, ok = compiler.BoolForScalarNode(v5) - if !ok { - message := fmt.Sprintf("has unexpected value for allowEmptyValue: %s", compiler.Display(v5)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string type = 6; - v6 := compiler.MapValueForKey(m, "type") - if v6 != nil { - x.Type, ok = compiler.StringForScalarNode(v6) - if !ok { - message := fmt.Sprintf("has unexpected value for type: %s", compiler.Display(v6)) - errors = append(errors, compiler.NewError(context, message)) - } - // check for valid enum values - // [string number boolean integer array] - if ok && !compiler.StringArrayContainsValue([]string{"string", "number", "boolean", "integer", "array"}, x.Type) { - message := fmt.Sprintf("has unexpected value for type: %s", compiler.Display(v6)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string format = 7; - v7 := compiler.MapValueForKey(m, "format") - if v7 != nil { - x.Format, ok = compiler.StringForScalarNode(v7) - if !ok { - message := fmt.Sprintf("has unexpected value for format: %s", compiler.Display(v7)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // PrimitivesItems items = 8; - v8 := compiler.MapValueForKey(m, "items") - if v8 != nil { - var err error - x.Items, err = NewPrimitivesItems(v8, compiler.NewContext("items", v8, context)) - if err != nil { - errors = append(errors, err) - } - } - // string collection_format = 9; - v9 := compiler.MapValueForKey(m, "collectionFormat") - if v9 != nil { - x.CollectionFormat, ok = compiler.StringForScalarNode(v9) - if !ok { - message := fmt.Sprintf("has unexpected value for collectionFormat: %s", compiler.Display(v9)) - errors = append(errors, compiler.NewError(context, message)) - } - // check for valid enum values - // [csv ssv tsv pipes multi] - if ok && !compiler.StringArrayContainsValue([]string{"csv", "ssv", "tsv", "pipes", "multi"}, x.CollectionFormat) { - message := fmt.Sprintf("has unexpected value for collectionFormat: %s", compiler.Display(v9)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // Any default = 10; - v10 := compiler.MapValueForKey(m, "default") - if v10 != nil { - var err error - x.Default, err = NewAny(v10, compiler.NewContext("default", v10, context)) - if err != nil { - errors = append(errors, err) - } - } - // float maximum = 11; - v11 := compiler.MapValueForKey(m, "maximum") - if v11 != nil { - v, ok := compiler.FloatForScalarNode(v11) - if ok { - x.Maximum = v - } else { - message := fmt.Sprintf("has unexpected value for maximum: %s", compiler.Display(v11)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // bool exclusive_maximum = 12; - v12 := compiler.MapValueForKey(m, "exclusiveMaximum") - if v12 != nil { - x.ExclusiveMaximum, ok = compiler.BoolForScalarNode(v12) - if !ok { - message := fmt.Sprintf("has unexpected value for exclusiveMaximum: %s", compiler.Display(v12)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // float minimum = 13; - v13 := compiler.MapValueForKey(m, "minimum") - if v13 != nil { - v, ok := compiler.FloatForScalarNode(v13) - if ok { - x.Minimum = v - } else { - message := fmt.Sprintf("has unexpected value for minimum: %s", compiler.Display(v13)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // bool exclusive_minimum = 14; - v14 := compiler.MapValueForKey(m, "exclusiveMinimum") - if v14 != nil { - x.ExclusiveMinimum, ok = compiler.BoolForScalarNode(v14) - if !ok { - message := fmt.Sprintf("has unexpected value for exclusiveMinimum: %s", compiler.Display(v14)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // int64 max_length = 15; - v15 := compiler.MapValueForKey(m, "maxLength") - if v15 != nil { - t, ok := compiler.IntForScalarNode(v15) - if ok { - x.MaxLength = int64(t) - } else { - message := fmt.Sprintf("has unexpected value for maxLength: %s", compiler.Display(v15)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // int64 min_length = 16; - v16 := compiler.MapValueForKey(m, "minLength") - if v16 != nil { - t, ok := compiler.IntForScalarNode(v16) - if ok { - x.MinLength = int64(t) - } else { - message := fmt.Sprintf("has unexpected value for minLength: %s", compiler.Display(v16)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string pattern = 17; - v17 := compiler.MapValueForKey(m, "pattern") - if v17 != nil { - x.Pattern, ok = compiler.StringForScalarNode(v17) - if !ok { - message := fmt.Sprintf("has unexpected value for pattern: %s", compiler.Display(v17)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // int64 max_items = 18; - v18 := compiler.MapValueForKey(m, "maxItems") - if v18 != nil { - t, ok := compiler.IntForScalarNode(v18) - if ok { - x.MaxItems = int64(t) - } else { - message := fmt.Sprintf("has unexpected value for maxItems: %s", compiler.Display(v18)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // int64 min_items = 19; - v19 := compiler.MapValueForKey(m, "minItems") - if v19 != nil { - t, ok := compiler.IntForScalarNode(v19) - if ok { - x.MinItems = int64(t) - } else { - message := fmt.Sprintf("has unexpected value for minItems: %s", compiler.Display(v19)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // bool unique_items = 20; - v20 := compiler.MapValueForKey(m, "uniqueItems") - if v20 != nil { - x.UniqueItems, ok = compiler.BoolForScalarNode(v20) - if !ok { - message := fmt.Sprintf("has unexpected value for uniqueItems: %s", compiler.Display(v20)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // repeated Any enum = 21; - v21 := compiler.MapValueForKey(m, "enum") - if v21 != nil { - // repeated Any - x.Enum = make([]*Any, 0) - a, ok := compiler.SequenceNodeForNode(v21) - if ok { - for _, item := range a.Content { - y, err := NewAny(item, compiler.NewContext("enum", item, context)) - if err != nil { - errors = append(errors, err) - } - x.Enum = append(x.Enum, y) - } - } - } - // float multiple_of = 22; - v22 := compiler.MapValueForKey(m, "multipleOf") - if v22 != nil { - v, ok := compiler.FloatForScalarNode(v22) - if ok { - x.MultipleOf = v - } else { - message := fmt.Sprintf("has unexpected value for multipleOf: %s", compiler.Display(v22)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // repeated NamedAny vendor_extension = 23; - // MAP: Any ^x- - x.VendorExtension = make([]*NamedAny, 0) - for i := 0; i < len(m.Content); i += 2 { - k, ok := compiler.StringForScalarNode(m.Content[i]) - if ok { - v := m.Content[i+1] - if strings.HasPrefix(k, "x-") { - pair := &NamedAny{} - pair.Name = k - result := &Any{} - handled, resultFromExt, err := compiler.CallExtension(context, v, k) - if handled { - if err != nil { - errors = append(errors, err) - } else { - bytes := compiler.Marshal(v) - result.Yaml = string(bytes) - result.Value = resultFromExt - pair.Value = result - } - } else { - pair.Value, err = NewAny(v, compiler.NewContext(k, v, context)) - if err != nil { - errors = append(errors, err) - } - } - x.VendorExtension = append(x.VendorExtension, pair) - } - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewResponse creates an object of type Response if possible, returning an error if not. -func NewResponse(in *yaml.Node, context *compiler.Context) (*Response, error) { - errors := make([]error, 0) - x := &Response{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - requiredKeys := []string{"description"} - missingKeys := compiler.MissingKeysInMap(m, requiredKeys) - if len(missingKeys) > 0 { - message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - allowedKeys := []string{"description", "examples", "headers", "schema"} - allowedPatterns := []*regexp.Regexp{pattern0} - invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) - if len(invalidKeys) > 0 { - message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - // string description = 1; - v1 := compiler.MapValueForKey(m, "description") - if v1 != nil { - x.Description, ok = compiler.StringForScalarNode(v1) - if !ok { - message := fmt.Sprintf("has unexpected value for description: %s", compiler.Display(v1)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // SchemaItem schema = 2; - v2 := compiler.MapValueForKey(m, "schema") - if v2 != nil { - var err error - x.Schema, err = NewSchemaItem(v2, compiler.NewContext("schema", v2, context)) - if err != nil { - errors = append(errors, err) - } - } - // Headers headers = 3; - v3 := compiler.MapValueForKey(m, "headers") - if v3 != nil { - var err error - x.Headers, err = NewHeaders(v3, compiler.NewContext("headers", v3, context)) - if err != nil { - errors = append(errors, err) - } - } - // Examples examples = 4; - v4 := compiler.MapValueForKey(m, "examples") - if v4 != nil { - var err error - x.Examples, err = NewExamples(v4, compiler.NewContext("examples", v4, context)) - if err != nil { - errors = append(errors, err) - } - } - // repeated NamedAny vendor_extension = 5; - // MAP: Any ^x- - x.VendorExtension = make([]*NamedAny, 0) - for i := 0; i < len(m.Content); i += 2 { - k, ok := compiler.StringForScalarNode(m.Content[i]) - if ok { - v := m.Content[i+1] - if strings.HasPrefix(k, "x-") { - pair := &NamedAny{} - pair.Name = k - result := &Any{} - handled, resultFromExt, err := compiler.CallExtension(context, v, k) - if handled { - if err != nil { - errors = append(errors, err) - } else { - bytes := compiler.Marshal(v) - result.Yaml = string(bytes) - result.Value = resultFromExt - pair.Value = result - } - } else { - pair.Value, err = NewAny(v, compiler.NewContext(k, v, context)) - if err != nil { - errors = append(errors, err) - } - } - x.VendorExtension = append(x.VendorExtension, pair) - } - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewResponseDefinitions creates an object of type ResponseDefinitions if possible, returning an error if not. -func NewResponseDefinitions(in *yaml.Node, context *compiler.Context) (*ResponseDefinitions, error) { - errors := make([]error, 0) - x := &ResponseDefinitions{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - // repeated NamedResponse additional_properties = 1; - // MAP: Response - x.AdditionalProperties = make([]*NamedResponse, 0) - for i := 0; i < len(m.Content); i += 2 { - k, ok := compiler.StringForScalarNode(m.Content[i]) - if ok { - v := m.Content[i+1] - pair := &NamedResponse{} - pair.Name = k - var err error - pair.Value, err = NewResponse(v, compiler.NewContext(k, v, context)) - if err != nil { - errors = append(errors, err) - } - x.AdditionalProperties = append(x.AdditionalProperties, pair) - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewResponseValue creates an object of type ResponseValue if possible, returning an error if not. -func NewResponseValue(in *yaml.Node, context *compiler.Context) (*ResponseValue, error) { - errors := make([]error, 0) - x := &ResponseValue{} - matched := false - // Response response = 1; - { - m, ok := compiler.UnpackMap(in) - if ok { - // errors might be ok here, they mean we just don't have the right subtype - t, matchingError := NewResponse(m, compiler.NewContext("response", m, context)) - if matchingError == nil { - x.Oneof = &ResponseValue_Response{Response: t} - matched = true - } else { - errors = append(errors, matchingError) - } - } - } - // JsonReference json_reference = 2; - { - m, ok := compiler.UnpackMap(in) - if ok { - // errors might be ok here, they mean we just don't have the right subtype - t, matchingError := NewJsonReference(m, compiler.NewContext("jsonReference", m, context)) - if matchingError == nil { - x.Oneof = &ResponseValue_JsonReference{JsonReference: t} - matched = true - } else { - errors = append(errors, matchingError) - } - } - } - if matched { - // since the oneof matched one of its possibilities, discard any matching errors - errors = make([]error, 0) - } else { - message := fmt.Sprintf("contains an invalid ResponseValue") - err := compiler.NewError(context, message) - errors = []error{err} - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewResponses creates an object of type Responses if possible, returning an error if not. -func NewResponses(in *yaml.Node, context *compiler.Context) (*Responses, error) { - errors := make([]error, 0) - x := &Responses{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - allowedKeys := []string{} - allowedPatterns := []*regexp.Regexp{pattern2, pattern0} - invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) - if len(invalidKeys) > 0 { - message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - // repeated NamedResponseValue response_code = 1; - // MAP: ResponseValue ^([0-9]{3})$|^(default)$ - x.ResponseCode = make([]*NamedResponseValue, 0) - for i := 0; i < len(m.Content); i += 2 { - k, ok := compiler.StringForScalarNode(m.Content[i]) - if ok { - v := m.Content[i+1] - if pattern2.MatchString(k) { - pair := &NamedResponseValue{} - pair.Name = k - var err error - pair.Value, err = NewResponseValue(v, compiler.NewContext(k, v, context)) - if err != nil { - errors = append(errors, err) - } - x.ResponseCode = append(x.ResponseCode, pair) - } - } - } - // repeated NamedAny vendor_extension = 2; - // MAP: Any ^x- - x.VendorExtension = make([]*NamedAny, 0) - for i := 0; i < len(m.Content); i += 2 { - k, ok := compiler.StringForScalarNode(m.Content[i]) - if ok { - v := m.Content[i+1] - if strings.HasPrefix(k, "x-") { - pair := &NamedAny{} - pair.Name = k - result := &Any{} - handled, resultFromExt, err := compiler.CallExtension(context, v, k) - if handled { - if err != nil { - errors = append(errors, err) - } else { - bytes := compiler.Marshal(v) - result.Yaml = string(bytes) - result.Value = resultFromExt - pair.Value = result - } - } else { - pair.Value, err = NewAny(v, compiler.NewContext(k, v, context)) - if err != nil { - errors = append(errors, err) - } - } - x.VendorExtension = append(x.VendorExtension, pair) - } - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewSchema creates an object of type Schema if possible, returning an error if not. -func NewSchema(in *yaml.Node, context *compiler.Context) (*Schema, error) { - errors := make([]error, 0) - x := &Schema{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - allowedKeys := []string{"$ref", "additionalProperties", "allOf", "default", "description", "discriminator", "enum", "example", "exclusiveMaximum", "exclusiveMinimum", "externalDocs", "format", "items", "maxItems", "maxLength", "maxProperties", "maximum", "minItems", "minLength", "minProperties", "minimum", "multipleOf", "pattern", "properties", "readOnly", "required", "title", "type", "uniqueItems", "xml"} - allowedPatterns := []*regexp.Regexp{pattern0} - invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) - if len(invalidKeys) > 0 { - message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - // string _ref = 1; - v1 := compiler.MapValueForKey(m, "$ref") - if v1 != nil { - x.XRef, ok = compiler.StringForScalarNode(v1) - if !ok { - message := fmt.Sprintf("has unexpected value for $ref: %s", compiler.Display(v1)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string format = 2; - v2 := compiler.MapValueForKey(m, "format") - if v2 != nil { - x.Format, ok = compiler.StringForScalarNode(v2) - if !ok { - message := fmt.Sprintf("has unexpected value for format: %s", compiler.Display(v2)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string title = 3; - v3 := compiler.MapValueForKey(m, "title") - if v3 != nil { - x.Title, ok = compiler.StringForScalarNode(v3) - if !ok { - message := fmt.Sprintf("has unexpected value for title: %s", compiler.Display(v3)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string description = 4; - v4 := compiler.MapValueForKey(m, "description") - if v4 != nil { - x.Description, ok = compiler.StringForScalarNode(v4) - if !ok { - message := fmt.Sprintf("has unexpected value for description: %s", compiler.Display(v4)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // Any default = 5; - v5 := compiler.MapValueForKey(m, "default") - if v5 != nil { - var err error - x.Default, err = NewAny(v5, compiler.NewContext("default", v5, context)) - if err != nil { - errors = append(errors, err) - } - } - // float multiple_of = 6; - v6 := compiler.MapValueForKey(m, "multipleOf") - if v6 != nil { - v, ok := compiler.FloatForScalarNode(v6) - if ok { - x.MultipleOf = v - } else { - message := fmt.Sprintf("has unexpected value for multipleOf: %s", compiler.Display(v6)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // float maximum = 7; - v7 := compiler.MapValueForKey(m, "maximum") - if v7 != nil { - v, ok := compiler.FloatForScalarNode(v7) - if ok { - x.Maximum = v - } else { - message := fmt.Sprintf("has unexpected value for maximum: %s", compiler.Display(v7)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // bool exclusive_maximum = 8; - v8 := compiler.MapValueForKey(m, "exclusiveMaximum") - if v8 != nil { - x.ExclusiveMaximum, ok = compiler.BoolForScalarNode(v8) - if !ok { - message := fmt.Sprintf("has unexpected value for exclusiveMaximum: %s", compiler.Display(v8)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // float minimum = 9; - v9 := compiler.MapValueForKey(m, "minimum") - if v9 != nil { - v, ok := compiler.FloatForScalarNode(v9) - if ok { - x.Minimum = v - } else { - message := fmt.Sprintf("has unexpected value for minimum: %s", compiler.Display(v9)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // bool exclusive_minimum = 10; - v10 := compiler.MapValueForKey(m, "exclusiveMinimum") - if v10 != nil { - x.ExclusiveMinimum, ok = compiler.BoolForScalarNode(v10) - if !ok { - message := fmt.Sprintf("has unexpected value for exclusiveMinimum: %s", compiler.Display(v10)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // int64 max_length = 11; - v11 := compiler.MapValueForKey(m, "maxLength") - if v11 != nil { - t, ok := compiler.IntForScalarNode(v11) - if ok { - x.MaxLength = int64(t) - } else { - message := fmt.Sprintf("has unexpected value for maxLength: %s", compiler.Display(v11)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // int64 min_length = 12; - v12 := compiler.MapValueForKey(m, "minLength") - if v12 != nil { - t, ok := compiler.IntForScalarNode(v12) - if ok { - x.MinLength = int64(t) - } else { - message := fmt.Sprintf("has unexpected value for minLength: %s", compiler.Display(v12)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string pattern = 13; - v13 := compiler.MapValueForKey(m, "pattern") - if v13 != nil { - x.Pattern, ok = compiler.StringForScalarNode(v13) - if !ok { - message := fmt.Sprintf("has unexpected value for pattern: %s", compiler.Display(v13)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // int64 max_items = 14; - v14 := compiler.MapValueForKey(m, "maxItems") - if v14 != nil { - t, ok := compiler.IntForScalarNode(v14) - if ok { - x.MaxItems = int64(t) - } else { - message := fmt.Sprintf("has unexpected value for maxItems: %s", compiler.Display(v14)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // int64 min_items = 15; - v15 := compiler.MapValueForKey(m, "minItems") - if v15 != nil { - t, ok := compiler.IntForScalarNode(v15) - if ok { - x.MinItems = int64(t) - } else { - message := fmt.Sprintf("has unexpected value for minItems: %s", compiler.Display(v15)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // bool unique_items = 16; - v16 := compiler.MapValueForKey(m, "uniqueItems") - if v16 != nil { - x.UniqueItems, ok = compiler.BoolForScalarNode(v16) - if !ok { - message := fmt.Sprintf("has unexpected value for uniqueItems: %s", compiler.Display(v16)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // int64 max_properties = 17; - v17 := compiler.MapValueForKey(m, "maxProperties") - if v17 != nil { - t, ok := compiler.IntForScalarNode(v17) - if ok { - x.MaxProperties = int64(t) - } else { - message := fmt.Sprintf("has unexpected value for maxProperties: %s", compiler.Display(v17)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // int64 min_properties = 18; - v18 := compiler.MapValueForKey(m, "minProperties") - if v18 != nil { - t, ok := compiler.IntForScalarNode(v18) - if ok { - x.MinProperties = int64(t) - } else { - message := fmt.Sprintf("has unexpected value for minProperties: %s", compiler.Display(v18)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // repeated string required = 19; - v19 := compiler.MapValueForKey(m, "required") - if v19 != nil { - v, ok := compiler.SequenceNodeForNode(v19) - if ok { - x.Required = compiler.StringArrayForSequenceNode(v) - } else { - message := fmt.Sprintf("has unexpected value for required: %s", compiler.Display(v19)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // repeated Any enum = 20; - v20 := compiler.MapValueForKey(m, "enum") - if v20 != nil { - // repeated Any - x.Enum = make([]*Any, 0) - a, ok := compiler.SequenceNodeForNode(v20) - if ok { - for _, item := range a.Content { - y, err := NewAny(item, compiler.NewContext("enum", item, context)) - if err != nil { - errors = append(errors, err) - } - x.Enum = append(x.Enum, y) - } - } - } - // AdditionalPropertiesItem additional_properties = 21; - v21 := compiler.MapValueForKey(m, "additionalProperties") - if v21 != nil { - var err error - x.AdditionalProperties, err = NewAdditionalPropertiesItem(v21, compiler.NewContext("additionalProperties", v21, context)) - if err != nil { - errors = append(errors, err) - } - } - // TypeItem type = 22; - v22 := compiler.MapValueForKey(m, "type") - if v22 != nil { - var err error - x.Type, err = NewTypeItem(v22, compiler.NewContext("type", v22, context)) - if err != nil { - errors = append(errors, err) - } - } - // ItemsItem items = 23; - v23 := compiler.MapValueForKey(m, "items") - if v23 != nil { - var err error - x.Items, err = NewItemsItem(v23, compiler.NewContext("items", v23, context)) - if err != nil { - errors = append(errors, err) - } - } - // repeated Schema all_of = 24; - v24 := compiler.MapValueForKey(m, "allOf") - if v24 != nil { - // repeated Schema - x.AllOf = make([]*Schema, 0) - a, ok := compiler.SequenceNodeForNode(v24) - if ok { - for _, item := range a.Content { - y, err := NewSchema(item, compiler.NewContext("allOf", item, context)) - if err != nil { - errors = append(errors, err) - } - x.AllOf = append(x.AllOf, y) - } - } - } - // Properties properties = 25; - v25 := compiler.MapValueForKey(m, "properties") - if v25 != nil { - var err error - x.Properties, err = NewProperties(v25, compiler.NewContext("properties", v25, context)) - if err != nil { - errors = append(errors, err) - } - } - // string discriminator = 26; - v26 := compiler.MapValueForKey(m, "discriminator") - if v26 != nil { - x.Discriminator, ok = compiler.StringForScalarNode(v26) - if !ok { - message := fmt.Sprintf("has unexpected value for discriminator: %s", compiler.Display(v26)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // bool read_only = 27; - v27 := compiler.MapValueForKey(m, "readOnly") - if v27 != nil { - x.ReadOnly, ok = compiler.BoolForScalarNode(v27) - if !ok { - message := fmt.Sprintf("has unexpected value for readOnly: %s", compiler.Display(v27)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // Xml xml = 28; - v28 := compiler.MapValueForKey(m, "xml") - if v28 != nil { - var err error - x.Xml, err = NewXml(v28, compiler.NewContext("xml", v28, context)) - if err != nil { - errors = append(errors, err) - } - } - // ExternalDocs external_docs = 29; - v29 := compiler.MapValueForKey(m, "externalDocs") - if v29 != nil { - var err error - x.ExternalDocs, err = NewExternalDocs(v29, compiler.NewContext("externalDocs", v29, context)) - if err != nil { - errors = append(errors, err) - } - } - // Any example = 30; - v30 := compiler.MapValueForKey(m, "example") - if v30 != nil { - var err error - x.Example, err = NewAny(v30, compiler.NewContext("example", v30, context)) - if err != nil { - errors = append(errors, err) - } - } - // repeated NamedAny vendor_extension = 31; - // MAP: Any ^x- - x.VendorExtension = make([]*NamedAny, 0) - for i := 0; i < len(m.Content); i += 2 { - k, ok := compiler.StringForScalarNode(m.Content[i]) - if ok { - v := m.Content[i+1] - if strings.HasPrefix(k, "x-") { - pair := &NamedAny{} - pair.Name = k - result := &Any{} - handled, resultFromExt, err := compiler.CallExtension(context, v, k) - if handled { - if err != nil { - errors = append(errors, err) - } else { - bytes := compiler.Marshal(v) - result.Yaml = string(bytes) - result.Value = resultFromExt - pair.Value = result - } - } else { - pair.Value, err = NewAny(v, compiler.NewContext(k, v, context)) - if err != nil { - errors = append(errors, err) - } - } - x.VendorExtension = append(x.VendorExtension, pair) - } - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewSchemaItem creates an object of type SchemaItem if possible, returning an error if not. -func NewSchemaItem(in *yaml.Node, context *compiler.Context) (*SchemaItem, error) { - errors := make([]error, 0) - x := &SchemaItem{} - matched := false - // Schema schema = 1; - { - m, ok := compiler.UnpackMap(in) - if ok { - // errors might be ok here, they mean we just don't have the right subtype - t, matchingError := NewSchema(m, compiler.NewContext("schema", m, context)) - if matchingError == nil { - x.Oneof = &SchemaItem_Schema{Schema: t} - matched = true - } else { - errors = append(errors, matchingError) - } - } - } - // FileSchema file_schema = 2; - { - m, ok := compiler.UnpackMap(in) - if ok { - // errors might be ok here, they mean we just don't have the right subtype - t, matchingError := NewFileSchema(m, compiler.NewContext("fileSchema", m, context)) - if matchingError == nil { - x.Oneof = &SchemaItem_FileSchema{FileSchema: t} - matched = true - } else { - errors = append(errors, matchingError) - } - } - } - if matched { - // since the oneof matched one of its possibilities, discard any matching errors - errors = make([]error, 0) - } else { - message := fmt.Sprintf("contains an invalid SchemaItem") - err := compiler.NewError(context, message) - errors = []error{err} - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewSecurityDefinitions creates an object of type SecurityDefinitions if possible, returning an error if not. -func NewSecurityDefinitions(in *yaml.Node, context *compiler.Context) (*SecurityDefinitions, error) { - errors := make([]error, 0) - x := &SecurityDefinitions{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - // repeated NamedSecurityDefinitionsItem additional_properties = 1; - // MAP: SecurityDefinitionsItem - x.AdditionalProperties = make([]*NamedSecurityDefinitionsItem, 0) - for i := 0; i < len(m.Content); i += 2 { - k, ok := compiler.StringForScalarNode(m.Content[i]) - if ok { - v := m.Content[i+1] - pair := &NamedSecurityDefinitionsItem{} - pair.Name = k - var err error - pair.Value, err = NewSecurityDefinitionsItem(v, compiler.NewContext(k, v, context)) - if err != nil { - errors = append(errors, err) - } - x.AdditionalProperties = append(x.AdditionalProperties, pair) - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewSecurityDefinitionsItem creates an object of type SecurityDefinitionsItem if possible, returning an error if not. -func NewSecurityDefinitionsItem(in *yaml.Node, context *compiler.Context) (*SecurityDefinitionsItem, error) { - errors := make([]error, 0) - x := &SecurityDefinitionsItem{} - matched := false - // BasicAuthenticationSecurity basic_authentication_security = 1; - { - m, ok := compiler.UnpackMap(in) - if ok { - // errors might be ok here, they mean we just don't have the right subtype - t, matchingError := NewBasicAuthenticationSecurity(m, compiler.NewContext("basicAuthenticationSecurity", m, context)) - if matchingError == nil { - x.Oneof = &SecurityDefinitionsItem_BasicAuthenticationSecurity{BasicAuthenticationSecurity: t} - matched = true - } else { - errors = append(errors, matchingError) - } - } - } - // ApiKeySecurity api_key_security = 2; - { - m, ok := compiler.UnpackMap(in) - if ok { - // errors might be ok here, they mean we just don't have the right subtype - t, matchingError := NewApiKeySecurity(m, compiler.NewContext("apiKeySecurity", m, context)) - if matchingError == nil { - x.Oneof = &SecurityDefinitionsItem_ApiKeySecurity{ApiKeySecurity: t} - matched = true - } else { - errors = append(errors, matchingError) - } - } - } - // Oauth2ImplicitSecurity oauth2_implicit_security = 3; - { - m, ok := compiler.UnpackMap(in) - if ok { - // errors might be ok here, they mean we just don't have the right subtype - t, matchingError := NewOauth2ImplicitSecurity(m, compiler.NewContext("oauth2ImplicitSecurity", m, context)) - if matchingError == nil { - x.Oneof = &SecurityDefinitionsItem_Oauth2ImplicitSecurity{Oauth2ImplicitSecurity: t} - matched = true - } else { - errors = append(errors, matchingError) - } - } - } - // Oauth2PasswordSecurity oauth2_password_security = 4; - { - m, ok := compiler.UnpackMap(in) - if ok { - // errors might be ok here, they mean we just don't have the right subtype - t, matchingError := NewOauth2PasswordSecurity(m, compiler.NewContext("oauth2PasswordSecurity", m, context)) - if matchingError == nil { - x.Oneof = &SecurityDefinitionsItem_Oauth2PasswordSecurity{Oauth2PasswordSecurity: t} - matched = true - } else { - errors = append(errors, matchingError) - } - } - } - // Oauth2ApplicationSecurity oauth2_application_security = 5; - { - m, ok := compiler.UnpackMap(in) - if ok { - // errors might be ok here, they mean we just don't have the right subtype - t, matchingError := NewOauth2ApplicationSecurity(m, compiler.NewContext("oauth2ApplicationSecurity", m, context)) - if matchingError == nil { - x.Oneof = &SecurityDefinitionsItem_Oauth2ApplicationSecurity{Oauth2ApplicationSecurity: t} - matched = true - } else { - errors = append(errors, matchingError) - } - } - } - // Oauth2AccessCodeSecurity oauth2_access_code_security = 6; - { - m, ok := compiler.UnpackMap(in) - if ok { - // errors might be ok here, they mean we just don't have the right subtype - t, matchingError := NewOauth2AccessCodeSecurity(m, compiler.NewContext("oauth2AccessCodeSecurity", m, context)) - if matchingError == nil { - x.Oneof = &SecurityDefinitionsItem_Oauth2AccessCodeSecurity{Oauth2AccessCodeSecurity: t} - matched = true - } else { - errors = append(errors, matchingError) - } - } - } - if matched { - // since the oneof matched one of its possibilities, discard any matching errors - errors = make([]error, 0) - } else { - message := fmt.Sprintf("contains an invalid SecurityDefinitionsItem") - err := compiler.NewError(context, message) - errors = []error{err} - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewSecurityRequirement creates an object of type SecurityRequirement if possible, returning an error if not. -func NewSecurityRequirement(in *yaml.Node, context *compiler.Context) (*SecurityRequirement, error) { - errors := make([]error, 0) - x := &SecurityRequirement{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - // repeated NamedStringArray additional_properties = 1; - // MAP: StringArray - x.AdditionalProperties = make([]*NamedStringArray, 0) - for i := 0; i < len(m.Content); i += 2 { - k, ok := compiler.StringForScalarNode(m.Content[i]) - if ok { - v := m.Content[i+1] - pair := &NamedStringArray{} - pair.Name = k - var err error - pair.Value, err = NewStringArray(v, compiler.NewContext(k, v, context)) - if err != nil { - errors = append(errors, err) - } - x.AdditionalProperties = append(x.AdditionalProperties, pair) - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewStringArray creates an object of type StringArray if possible, returning an error if not. -func NewStringArray(in *yaml.Node, context *compiler.Context) (*StringArray, error) { - errors := make([]error, 0) - x := &StringArray{} - x.Value = make([]string, 0) - for _, node := range in.Content { - s, _ := compiler.StringForScalarNode(node) - x.Value = append(x.Value, s) - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewTag creates an object of type Tag if possible, returning an error if not. -func NewTag(in *yaml.Node, context *compiler.Context) (*Tag, error) { - errors := make([]error, 0) - x := &Tag{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - requiredKeys := []string{"name"} - missingKeys := compiler.MissingKeysInMap(m, requiredKeys) - if len(missingKeys) > 0 { - message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - allowedKeys := []string{"description", "externalDocs", "name"} - allowedPatterns := []*regexp.Regexp{pattern0} - invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) - if len(invalidKeys) > 0 { - message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - // string name = 1; - v1 := compiler.MapValueForKey(m, "name") - if v1 != nil { - x.Name, ok = compiler.StringForScalarNode(v1) - if !ok { - message := fmt.Sprintf("has unexpected value for name: %s", compiler.Display(v1)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string description = 2; - v2 := compiler.MapValueForKey(m, "description") - if v2 != nil { - x.Description, ok = compiler.StringForScalarNode(v2) - if !ok { - message := fmt.Sprintf("has unexpected value for description: %s", compiler.Display(v2)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // ExternalDocs external_docs = 3; - v3 := compiler.MapValueForKey(m, "externalDocs") - if v3 != nil { - var err error - x.ExternalDocs, err = NewExternalDocs(v3, compiler.NewContext("externalDocs", v3, context)) - if err != nil { - errors = append(errors, err) - } - } - // repeated NamedAny vendor_extension = 4; - // MAP: Any ^x- - x.VendorExtension = make([]*NamedAny, 0) - for i := 0; i < len(m.Content); i += 2 { - k, ok := compiler.StringForScalarNode(m.Content[i]) - if ok { - v := m.Content[i+1] - if strings.HasPrefix(k, "x-") { - pair := &NamedAny{} - pair.Name = k - result := &Any{} - handled, resultFromExt, err := compiler.CallExtension(context, v, k) - if handled { - if err != nil { - errors = append(errors, err) - } else { - bytes := compiler.Marshal(v) - result.Yaml = string(bytes) - result.Value = resultFromExt - pair.Value = result - } - } else { - pair.Value, err = NewAny(v, compiler.NewContext(k, v, context)) - if err != nil { - errors = append(errors, err) - } - } - x.VendorExtension = append(x.VendorExtension, pair) - } - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewTypeItem creates an object of type TypeItem if possible, returning an error if not. -func NewTypeItem(in *yaml.Node, context *compiler.Context) (*TypeItem, error) { - errors := make([]error, 0) - x := &TypeItem{} - v1 := in - switch v1.Kind { - case yaml.ScalarNode: - x.Value = make([]string, 0) - x.Value = append(x.Value, v1.Value) - case yaml.SequenceNode: - x.Value = make([]string, 0) - for _, v := range v1.Content { - value := v.Value - ok := v.Kind == yaml.ScalarNode - if ok { - x.Value = append(x.Value, value) - } else { - message := fmt.Sprintf("has unexpected value for string array element: %+v (%T)", value, value) - errors = append(errors, compiler.NewError(context, message)) - } - } - default: - message := fmt.Sprintf("has unexpected value for string array: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewVendorExtension creates an object of type VendorExtension if possible, returning an error if not. -func NewVendorExtension(in *yaml.Node, context *compiler.Context) (*VendorExtension, error) { - errors := make([]error, 0) - x := &VendorExtension{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - // repeated NamedAny additional_properties = 1; - // MAP: Any - x.AdditionalProperties = make([]*NamedAny, 0) - for i := 0; i < len(m.Content); i += 2 { - k, ok := compiler.StringForScalarNode(m.Content[i]) - if ok { - v := m.Content[i+1] - pair := &NamedAny{} - pair.Name = k - result := &Any{} - handled, resultFromExt, err := compiler.CallExtension(context, v, k) - if handled { - if err != nil { - errors = append(errors, err) - } else { - bytes := compiler.Marshal(v) - result.Yaml = string(bytes) - result.Value = resultFromExt - pair.Value = result - } - } else { - pair.Value, err = NewAny(v, compiler.NewContext(k, v, context)) - if err != nil { - errors = append(errors, err) - } - } - x.AdditionalProperties = append(x.AdditionalProperties, pair) - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewXml creates an object of type Xml if possible, returning an error if not. -func NewXml(in *yaml.Node, context *compiler.Context) (*Xml, error) { - errors := make([]error, 0) - x := &Xml{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - allowedKeys := []string{"attribute", "name", "namespace", "prefix", "wrapped"} - allowedPatterns := []*regexp.Regexp{pattern0} - invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) - if len(invalidKeys) > 0 { - message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - // string name = 1; - v1 := compiler.MapValueForKey(m, "name") - if v1 != nil { - x.Name, ok = compiler.StringForScalarNode(v1) - if !ok { - message := fmt.Sprintf("has unexpected value for name: %s", compiler.Display(v1)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string namespace = 2; - v2 := compiler.MapValueForKey(m, "namespace") - if v2 != nil { - x.Namespace, ok = compiler.StringForScalarNode(v2) - if !ok { - message := fmt.Sprintf("has unexpected value for namespace: %s", compiler.Display(v2)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string prefix = 3; - v3 := compiler.MapValueForKey(m, "prefix") - if v3 != nil { - x.Prefix, ok = compiler.StringForScalarNode(v3) - if !ok { - message := fmt.Sprintf("has unexpected value for prefix: %s", compiler.Display(v3)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // bool attribute = 4; - v4 := compiler.MapValueForKey(m, "attribute") - if v4 != nil { - x.Attribute, ok = compiler.BoolForScalarNode(v4) - if !ok { - message := fmt.Sprintf("has unexpected value for attribute: %s", compiler.Display(v4)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // bool wrapped = 5; - v5 := compiler.MapValueForKey(m, "wrapped") - if v5 != nil { - x.Wrapped, ok = compiler.BoolForScalarNode(v5) - if !ok { - message := fmt.Sprintf("has unexpected value for wrapped: %s", compiler.Display(v5)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // repeated NamedAny vendor_extension = 6; - // MAP: Any ^x- - x.VendorExtension = make([]*NamedAny, 0) - for i := 0; i < len(m.Content); i += 2 { - k, ok := compiler.StringForScalarNode(m.Content[i]) - if ok { - v := m.Content[i+1] - if strings.HasPrefix(k, "x-") { - pair := &NamedAny{} - pair.Name = k - result := &Any{} - handled, resultFromExt, err := compiler.CallExtension(context, v, k) - if handled { - if err != nil { - errors = append(errors, err) - } else { - bytes := compiler.Marshal(v) - result.Yaml = string(bytes) - result.Value = resultFromExt - pair.Value = result - } - } else { - pair.Value, err = NewAny(v, compiler.NewContext(k, v, context)) - if err != nil { - errors = append(errors, err) - } - } - x.VendorExtension = append(x.VendorExtension, pair) - } - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside AdditionalPropertiesItem objects. -func (m *AdditionalPropertiesItem) ResolveReferences(root string) (*yaml.Node, error) { - errors := make([]error, 0) - { - p, ok := m.Oneof.(*AdditionalPropertiesItem_Schema) - if ok { - _, err := p.Schema.ResolveReferences(root) - if err != nil { - return nil, err - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside Any objects. -func (m *Any) ResolveReferences(root string) (*yaml.Node, error) { - errors := make([]error, 0) - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside ApiKeySecurity objects. -func (m *ApiKeySecurity) ResolveReferences(root string) (*yaml.Node, error) { - errors := make([]error, 0) - for _, item := range m.VendorExtension { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside BasicAuthenticationSecurity objects. -func (m *BasicAuthenticationSecurity) ResolveReferences(root string) (*yaml.Node, error) { - errors := make([]error, 0) - for _, item := range m.VendorExtension { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside BodyParameter objects. -func (m *BodyParameter) ResolveReferences(root string) (*yaml.Node, error) { - errors := make([]error, 0) - if m.Schema != nil { - _, err := m.Schema.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - for _, item := range m.VendorExtension { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside Contact objects. -func (m *Contact) ResolveReferences(root string) (*yaml.Node, error) { - errors := make([]error, 0) - for _, item := range m.VendorExtension { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside Default objects. -func (m *Default) ResolveReferences(root string) (*yaml.Node, error) { - errors := make([]error, 0) - for _, item := range m.AdditionalProperties { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside Definitions objects. -func (m *Definitions) ResolveReferences(root string) (*yaml.Node, error) { - errors := make([]error, 0) - for _, item := range m.AdditionalProperties { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside Document objects. -func (m *Document) ResolveReferences(root string) (*yaml.Node, error) { - errors := make([]error, 0) - if m.Info != nil { - _, err := m.Info.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - if m.Paths != nil { - _, err := m.Paths.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - if m.Definitions != nil { - _, err := m.Definitions.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - if m.Parameters != nil { - _, err := m.Parameters.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - if m.Responses != nil { - _, err := m.Responses.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - for _, item := range m.Security { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - if m.SecurityDefinitions != nil { - _, err := m.SecurityDefinitions.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - for _, item := range m.Tags { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - if m.ExternalDocs != nil { - _, err := m.ExternalDocs.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - for _, item := range m.VendorExtension { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside Examples objects. -func (m *Examples) ResolveReferences(root string) (*yaml.Node, error) { - errors := make([]error, 0) - for _, item := range m.AdditionalProperties { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside ExternalDocs objects. -func (m *ExternalDocs) ResolveReferences(root string) (*yaml.Node, error) { - errors := make([]error, 0) - for _, item := range m.VendorExtension { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside FileSchema objects. -func (m *FileSchema) ResolveReferences(root string) (*yaml.Node, error) { - errors := make([]error, 0) - if m.Default != nil { - _, err := m.Default.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - if m.ExternalDocs != nil { - _, err := m.ExternalDocs.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - if m.Example != nil { - _, err := m.Example.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - for _, item := range m.VendorExtension { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside FormDataParameterSubSchema objects. -func (m *FormDataParameterSubSchema) ResolveReferences(root string) (*yaml.Node, error) { - errors := make([]error, 0) - if m.Items != nil { - _, err := m.Items.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - if m.Default != nil { - _, err := m.Default.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - for _, item := range m.Enum { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - for _, item := range m.VendorExtension { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside Header objects. -func (m *Header) ResolveReferences(root string) (*yaml.Node, error) { - errors := make([]error, 0) - if m.Items != nil { - _, err := m.Items.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - if m.Default != nil { - _, err := m.Default.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - for _, item := range m.Enum { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - for _, item := range m.VendorExtension { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside HeaderParameterSubSchema objects. -func (m *HeaderParameterSubSchema) ResolveReferences(root string) (*yaml.Node, error) { - errors := make([]error, 0) - if m.Items != nil { - _, err := m.Items.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - if m.Default != nil { - _, err := m.Default.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - for _, item := range m.Enum { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - for _, item := range m.VendorExtension { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside Headers objects. -func (m *Headers) ResolveReferences(root string) (*yaml.Node, error) { - errors := make([]error, 0) - for _, item := range m.AdditionalProperties { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside Info objects. -func (m *Info) ResolveReferences(root string) (*yaml.Node, error) { - errors := make([]error, 0) - if m.Contact != nil { - _, err := m.Contact.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - if m.License != nil { - _, err := m.License.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - for _, item := range m.VendorExtension { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside ItemsItem objects. -func (m *ItemsItem) ResolveReferences(root string) (*yaml.Node, error) { - errors := make([]error, 0) - for _, item := range m.Schema { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside JsonReference objects. -func (m *JsonReference) ResolveReferences(root string) (*yaml.Node, error) { - errors := make([]error, 0) - if m.XRef != "" { - info, err := compiler.ReadInfoForRef(root, m.XRef) - if err != nil { - return nil, err - } - if info != nil { - replacement, err := NewJsonReference(info, nil) - if err == nil { - *m = *replacement - return m.ResolveReferences(root) - } - } - return info, nil - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside License objects. -func (m *License) ResolveReferences(root string) (*yaml.Node, error) { - errors := make([]error, 0) - for _, item := range m.VendorExtension { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside NamedAny objects. -func (m *NamedAny) ResolveReferences(root string) (*yaml.Node, error) { - errors := make([]error, 0) - if m.Value != nil { - _, err := m.Value.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside NamedHeader objects. -func (m *NamedHeader) ResolveReferences(root string) (*yaml.Node, error) { - errors := make([]error, 0) - if m.Value != nil { - _, err := m.Value.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside NamedParameter objects. -func (m *NamedParameter) ResolveReferences(root string) (*yaml.Node, error) { - errors := make([]error, 0) - if m.Value != nil { - _, err := m.Value.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside NamedPathItem objects. -func (m *NamedPathItem) ResolveReferences(root string) (*yaml.Node, error) { - errors := make([]error, 0) - if m.Value != nil { - _, err := m.Value.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside NamedResponse objects. -func (m *NamedResponse) ResolveReferences(root string) (*yaml.Node, error) { - errors := make([]error, 0) - if m.Value != nil { - _, err := m.Value.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside NamedResponseValue objects. -func (m *NamedResponseValue) ResolveReferences(root string) (*yaml.Node, error) { - errors := make([]error, 0) - if m.Value != nil { - _, err := m.Value.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside NamedSchema objects. -func (m *NamedSchema) ResolveReferences(root string) (*yaml.Node, error) { - errors := make([]error, 0) - if m.Value != nil { - _, err := m.Value.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside NamedSecurityDefinitionsItem objects. -func (m *NamedSecurityDefinitionsItem) ResolveReferences(root string) (*yaml.Node, error) { - errors := make([]error, 0) - if m.Value != nil { - _, err := m.Value.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside NamedString objects. -func (m *NamedString) ResolveReferences(root string) (*yaml.Node, error) { - errors := make([]error, 0) - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside NamedStringArray objects. -func (m *NamedStringArray) ResolveReferences(root string) (*yaml.Node, error) { - errors := make([]error, 0) - if m.Value != nil { - _, err := m.Value.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside NonBodyParameter objects. -func (m *NonBodyParameter) ResolveReferences(root string) (*yaml.Node, error) { - errors := make([]error, 0) - { - p, ok := m.Oneof.(*NonBodyParameter_HeaderParameterSubSchema) - if ok { - _, err := p.HeaderParameterSubSchema.ResolveReferences(root) - if err != nil { - return nil, err - } - } - } - { - p, ok := m.Oneof.(*NonBodyParameter_FormDataParameterSubSchema) - if ok { - _, err := p.FormDataParameterSubSchema.ResolveReferences(root) - if err != nil { - return nil, err - } - } - } - { - p, ok := m.Oneof.(*NonBodyParameter_QueryParameterSubSchema) - if ok { - _, err := p.QueryParameterSubSchema.ResolveReferences(root) - if err != nil { - return nil, err - } - } - } - { - p, ok := m.Oneof.(*NonBodyParameter_PathParameterSubSchema) - if ok { - _, err := p.PathParameterSubSchema.ResolveReferences(root) - if err != nil { - return nil, err - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside Oauth2AccessCodeSecurity objects. -func (m *Oauth2AccessCodeSecurity) ResolveReferences(root string) (*yaml.Node, error) { - errors := make([]error, 0) - if m.Scopes != nil { - _, err := m.Scopes.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - for _, item := range m.VendorExtension { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside Oauth2ApplicationSecurity objects. -func (m *Oauth2ApplicationSecurity) ResolveReferences(root string) (*yaml.Node, error) { - errors := make([]error, 0) - if m.Scopes != nil { - _, err := m.Scopes.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - for _, item := range m.VendorExtension { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside Oauth2ImplicitSecurity objects. -func (m *Oauth2ImplicitSecurity) ResolveReferences(root string) (*yaml.Node, error) { - errors := make([]error, 0) - if m.Scopes != nil { - _, err := m.Scopes.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - for _, item := range m.VendorExtension { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside Oauth2PasswordSecurity objects. -func (m *Oauth2PasswordSecurity) ResolveReferences(root string) (*yaml.Node, error) { - errors := make([]error, 0) - if m.Scopes != nil { - _, err := m.Scopes.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - for _, item := range m.VendorExtension { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside Oauth2Scopes objects. -func (m *Oauth2Scopes) ResolveReferences(root string) (*yaml.Node, error) { - errors := make([]error, 0) - for _, item := range m.AdditionalProperties { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside Operation objects. -func (m *Operation) ResolveReferences(root string) (*yaml.Node, error) { - errors := make([]error, 0) - if m.ExternalDocs != nil { - _, err := m.ExternalDocs.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - for _, item := range m.Parameters { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - if m.Responses != nil { - _, err := m.Responses.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - for _, item := range m.Security { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - for _, item := range m.VendorExtension { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside Parameter objects. -func (m *Parameter) ResolveReferences(root string) (*yaml.Node, error) { - errors := make([]error, 0) - { - p, ok := m.Oneof.(*Parameter_BodyParameter) - if ok { - _, err := p.BodyParameter.ResolveReferences(root) - if err != nil { - return nil, err - } - } - } - { - p, ok := m.Oneof.(*Parameter_NonBodyParameter) - if ok { - _, err := p.NonBodyParameter.ResolveReferences(root) - if err != nil { - return nil, err - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside ParameterDefinitions objects. -func (m *ParameterDefinitions) ResolveReferences(root string) (*yaml.Node, error) { - errors := make([]error, 0) - for _, item := range m.AdditionalProperties { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside ParametersItem objects. -func (m *ParametersItem) ResolveReferences(root string) (*yaml.Node, error) { - errors := make([]error, 0) - { - p, ok := m.Oneof.(*ParametersItem_Parameter) - if ok { - _, err := p.Parameter.ResolveReferences(root) - if err != nil { - return nil, err - } - } - } - { - p, ok := m.Oneof.(*ParametersItem_JsonReference) - if ok { - info, err := p.JsonReference.ResolveReferences(root) - if err != nil { - return nil, err - } else if info != nil { - n, err := NewParametersItem(info, nil) - if err != nil { - return nil, err - } else if n != nil { - *m = *n - return nil, nil - } - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside PathItem objects. -func (m *PathItem) ResolveReferences(root string) (*yaml.Node, error) { - errors := make([]error, 0) - if m.XRef != "" { - info, err := compiler.ReadInfoForRef(root, m.XRef) - if err != nil { - return nil, err - } - if info != nil { - replacement, err := NewPathItem(info, nil) - if err == nil { - *m = *replacement - return m.ResolveReferences(root) - } - } - return info, nil - } - if m.Get != nil { - _, err := m.Get.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - if m.Put != nil { - _, err := m.Put.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - if m.Post != nil { - _, err := m.Post.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - if m.Delete != nil { - _, err := m.Delete.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - if m.Options != nil { - _, err := m.Options.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - if m.Head != nil { - _, err := m.Head.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - if m.Patch != nil { - _, err := m.Patch.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - for _, item := range m.Parameters { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - for _, item := range m.VendorExtension { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside PathParameterSubSchema objects. -func (m *PathParameterSubSchema) ResolveReferences(root string) (*yaml.Node, error) { - errors := make([]error, 0) - if m.Items != nil { - _, err := m.Items.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - if m.Default != nil { - _, err := m.Default.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - for _, item := range m.Enum { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - for _, item := range m.VendorExtension { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside Paths objects. -func (m *Paths) ResolveReferences(root string) (*yaml.Node, error) { - errors := make([]error, 0) - for _, item := range m.VendorExtension { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - for _, item := range m.Path { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside PrimitivesItems objects. -func (m *PrimitivesItems) ResolveReferences(root string) (*yaml.Node, error) { - errors := make([]error, 0) - if m.Items != nil { - _, err := m.Items.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - if m.Default != nil { - _, err := m.Default.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - for _, item := range m.Enum { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - for _, item := range m.VendorExtension { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside Properties objects. -func (m *Properties) ResolveReferences(root string) (*yaml.Node, error) { - errors := make([]error, 0) - for _, item := range m.AdditionalProperties { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside QueryParameterSubSchema objects. -func (m *QueryParameterSubSchema) ResolveReferences(root string) (*yaml.Node, error) { - errors := make([]error, 0) - if m.Items != nil { - _, err := m.Items.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - if m.Default != nil { - _, err := m.Default.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - for _, item := range m.Enum { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - for _, item := range m.VendorExtension { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside Response objects. -func (m *Response) ResolveReferences(root string) (*yaml.Node, error) { - errors := make([]error, 0) - if m.Schema != nil { - _, err := m.Schema.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - if m.Headers != nil { - _, err := m.Headers.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - if m.Examples != nil { - _, err := m.Examples.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - for _, item := range m.VendorExtension { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside ResponseDefinitions objects. -func (m *ResponseDefinitions) ResolveReferences(root string) (*yaml.Node, error) { - errors := make([]error, 0) - for _, item := range m.AdditionalProperties { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside ResponseValue objects. -func (m *ResponseValue) ResolveReferences(root string) (*yaml.Node, error) { - errors := make([]error, 0) - { - p, ok := m.Oneof.(*ResponseValue_Response) - if ok { - _, err := p.Response.ResolveReferences(root) - if err != nil { - return nil, err - } - } - } - { - p, ok := m.Oneof.(*ResponseValue_JsonReference) - if ok { - info, err := p.JsonReference.ResolveReferences(root) - if err != nil { - return nil, err - } else if info != nil { - n, err := NewResponseValue(info, nil) - if err != nil { - return nil, err - } else if n != nil { - *m = *n - return nil, nil - } - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside Responses objects. -func (m *Responses) ResolveReferences(root string) (*yaml.Node, error) { - errors := make([]error, 0) - for _, item := range m.ResponseCode { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - for _, item := range m.VendorExtension { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside Schema objects. -func (m *Schema) ResolveReferences(root string) (*yaml.Node, error) { - errors := make([]error, 0) - if m.XRef != "" { - info, err := compiler.ReadInfoForRef(root, m.XRef) - if err != nil { - return nil, err - } - if info != nil { - replacement, err := NewSchema(info, nil) - if err == nil { - *m = *replacement - return m.ResolveReferences(root) - } - } - return info, nil - } - if m.Default != nil { - _, err := m.Default.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - for _, item := range m.Enum { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - if m.AdditionalProperties != nil { - _, err := m.AdditionalProperties.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - if m.Type != nil { - _, err := m.Type.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - if m.Items != nil { - _, err := m.Items.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - for _, item := range m.AllOf { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - if m.Properties != nil { - _, err := m.Properties.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - if m.Xml != nil { - _, err := m.Xml.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - if m.ExternalDocs != nil { - _, err := m.ExternalDocs.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - if m.Example != nil { - _, err := m.Example.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - for _, item := range m.VendorExtension { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside SchemaItem objects. -func (m *SchemaItem) ResolveReferences(root string) (*yaml.Node, error) { - errors := make([]error, 0) - { - p, ok := m.Oneof.(*SchemaItem_Schema) - if ok { - _, err := p.Schema.ResolveReferences(root) - if err != nil { - return nil, err - } - } - } - { - p, ok := m.Oneof.(*SchemaItem_FileSchema) - if ok { - _, err := p.FileSchema.ResolveReferences(root) - if err != nil { - return nil, err - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside SecurityDefinitions objects. -func (m *SecurityDefinitions) ResolveReferences(root string) (*yaml.Node, error) { - errors := make([]error, 0) - for _, item := range m.AdditionalProperties { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside SecurityDefinitionsItem objects. -func (m *SecurityDefinitionsItem) ResolveReferences(root string) (*yaml.Node, error) { - errors := make([]error, 0) - { - p, ok := m.Oneof.(*SecurityDefinitionsItem_BasicAuthenticationSecurity) - if ok { - _, err := p.BasicAuthenticationSecurity.ResolveReferences(root) - if err != nil { - return nil, err - } - } - } - { - p, ok := m.Oneof.(*SecurityDefinitionsItem_ApiKeySecurity) - if ok { - _, err := p.ApiKeySecurity.ResolveReferences(root) - if err != nil { - return nil, err - } - } - } - { - p, ok := m.Oneof.(*SecurityDefinitionsItem_Oauth2ImplicitSecurity) - if ok { - _, err := p.Oauth2ImplicitSecurity.ResolveReferences(root) - if err != nil { - return nil, err - } - } - } - { - p, ok := m.Oneof.(*SecurityDefinitionsItem_Oauth2PasswordSecurity) - if ok { - _, err := p.Oauth2PasswordSecurity.ResolveReferences(root) - if err != nil { - return nil, err - } - } - } - { - p, ok := m.Oneof.(*SecurityDefinitionsItem_Oauth2ApplicationSecurity) - if ok { - _, err := p.Oauth2ApplicationSecurity.ResolveReferences(root) - if err != nil { - return nil, err - } - } - } - { - p, ok := m.Oneof.(*SecurityDefinitionsItem_Oauth2AccessCodeSecurity) - if ok { - _, err := p.Oauth2AccessCodeSecurity.ResolveReferences(root) - if err != nil { - return nil, err - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside SecurityRequirement objects. -func (m *SecurityRequirement) ResolveReferences(root string) (*yaml.Node, error) { - errors := make([]error, 0) - for _, item := range m.AdditionalProperties { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside StringArray objects. -func (m *StringArray) ResolveReferences(root string) (*yaml.Node, error) { - errors := make([]error, 0) - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside Tag objects. -func (m *Tag) ResolveReferences(root string) (*yaml.Node, error) { - errors := make([]error, 0) - if m.ExternalDocs != nil { - _, err := m.ExternalDocs.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - for _, item := range m.VendorExtension { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside TypeItem objects. -func (m *TypeItem) ResolveReferences(root string) (*yaml.Node, error) { - errors := make([]error, 0) - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside VendorExtension objects. -func (m *VendorExtension) ResolveReferences(root string) (*yaml.Node, error) { - errors := make([]error, 0) - for _, item := range m.AdditionalProperties { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside Xml objects. -func (m *Xml) ResolveReferences(root string) (*yaml.Node, error) { - errors := make([]error, 0) - for _, item := range m.VendorExtension { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ToRawInfo returns a description of AdditionalPropertiesItem suitable for JSON or YAML export. -func (m *AdditionalPropertiesItem) ToRawInfo() *yaml.Node { - // ONE OF WRAPPER - // AdditionalPropertiesItem - // {Name:schema Type:Schema StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - v0 := m.GetSchema() - if v0 != nil { - return v0.ToRawInfo() - } - // {Name:boolean Type:bool StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - if v1, ok := m.GetOneof().(*AdditionalPropertiesItem_Boolean); ok { - return compiler.NewScalarNodeForBool(v1.Boolean) - } - return compiler.NewNullNode() -} - -// ToRawInfo returns a description of Any suitable for JSON or YAML export. -func (m *Any) ToRawInfo() *yaml.Node { - var err error - var node yaml.Node - err = yaml.Unmarshal([]byte(m.Yaml), &node) - if err == nil { - if node.Kind == yaml.DocumentNode { - return node.Content[0] - } - return &node - } - return compiler.NewNullNode() -} - -// ToRawInfo returns a description of ApiKeySecurity suitable for JSON or YAML export. -func (m *ApiKeySecurity) ToRawInfo() *yaml.Node { - info := compiler.NewMappingNode() - if m == nil { - return info - } - // always include this required field. - info.Content = append(info.Content, compiler.NewScalarNodeForString("type")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Type)) - // always include this required field. - info.Content = append(info.Content, compiler.NewScalarNodeForString("name")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Name)) - // always include this required field. - info.Content = append(info.Content, compiler.NewScalarNodeForString("in")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.In)) - if m.Description != "" { - info.Content = append(info.Content, compiler.NewScalarNodeForString("description")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Description)) - } - if m.VendorExtension != nil { - for _, item := range m.VendorExtension { - info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) - info.Content = append(info.Content, item.Value.ToRawInfo()) - } - } - return info -} - -// ToRawInfo returns a description of BasicAuthenticationSecurity suitable for JSON or YAML export. -func (m *BasicAuthenticationSecurity) ToRawInfo() *yaml.Node { - info := compiler.NewMappingNode() - if m == nil { - return info - } - // always include this required field. - info.Content = append(info.Content, compiler.NewScalarNodeForString("type")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Type)) - if m.Description != "" { - info.Content = append(info.Content, compiler.NewScalarNodeForString("description")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Description)) - } - if m.VendorExtension != nil { - for _, item := range m.VendorExtension { - info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) - info.Content = append(info.Content, item.Value.ToRawInfo()) - } - } - return info -} - -// ToRawInfo returns a description of BodyParameter suitable for JSON or YAML export. -func (m *BodyParameter) ToRawInfo() *yaml.Node { - info := compiler.NewMappingNode() - if m == nil { - return info - } - if m.Description != "" { - info.Content = append(info.Content, compiler.NewScalarNodeForString("description")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Description)) - } - // always include this required field. - info.Content = append(info.Content, compiler.NewScalarNodeForString("name")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Name)) - // always include this required field. - info.Content = append(info.Content, compiler.NewScalarNodeForString("in")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.In)) - if m.Required != false { - info.Content = append(info.Content, compiler.NewScalarNodeForString("required")) - info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.Required)) - } - // always include this required field. - info.Content = append(info.Content, compiler.NewScalarNodeForString("schema")) - info.Content = append(info.Content, m.Schema.ToRawInfo()) - if m.VendorExtension != nil { - for _, item := range m.VendorExtension { - info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) - info.Content = append(info.Content, item.Value.ToRawInfo()) - } - } - return info -} - -// ToRawInfo returns a description of Contact suitable for JSON or YAML export. -func (m *Contact) ToRawInfo() *yaml.Node { - info := compiler.NewMappingNode() - if m == nil { - return info - } - if m.Name != "" { - info.Content = append(info.Content, compiler.NewScalarNodeForString("name")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Name)) - } - if m.Url != "" { - info.Content = append(info.Content, compiler.NewScalarNodeForString("url")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Url)) - } - if m.Email != "" { - info.Content = append(info.Content, compiler.NewScalarNodeForString("email")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Email)) - } - if m.VendorExtension != nil { - for _, item := range m.VendorExtension { - info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) - info.Content = append(info.Content, item.Value.ToRawInfo()) - } - } - return info -} - -// ToRawInfo returns a description of Default suitable for JSON or YAML export. -func (m *Default) ToRawInfo() *yaml.Node { - info := compiler.NewMappingNode() - if m == nil { - return info - } - if m.AdditionalProperties != nil { - for _, item := range m.AdditionalProperties { - info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) - info.Content = append(info.Content, item.Value.ToRawInfo()) - } - } - return info -} - -// ToRawInfo returns a description of Definitions suitable for JSON or YAML export. -func (m *Definitions) ToRawInfo() *yaml.Node { - info := compiler.NewMappingNode() - if m == nil { - return info - } - if m.AdditionalProperties != nil { - for _, item := range m.AdditionalProperties { - info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) - info.Content = append(info.Content, item.Value.ToRawInfo()) - } - } - return info -} - -// ToRawInfo returns a description of Document suitable for JSON or YAML export. -func (m *Document) ToRawInfo() *yaml.Node { - info := compiler.NewMappingNode() - if m == nil { - return info - } - // always include this required field. - info.Content = append(info.Content, compiler.NewScalarNodeForString("swagger")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Swagger)) - // always include this required field. - info.Content = append(info.Content, compiler.NewScalarNodeForString("info")) - info.Content = append(info.Content, m.Info.ToRawInfo()) - if m.Host != "" { - info.Content = append(info.Content, compiler.NewScalarNodeForString("host")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Host)) - } - if m.BasePath != "" { - info.Content = append(info.Content, compiler.NewScalarNodeForString("basePath")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.BasePath)) - } - if len(m.Schemes) != 0 { - info.Content = append(info.Content, compiler.NewScalarNodeForString("schemes")) - info.Content = append(info.Content, compiler.NewSequenceNodeForStringArray(m.Schemes)) - } - if len(m.Consumes) != 0 { - info.Content = append(info.Content, compiler.NewScalarNodeForString("consumes")) - info.Content = append(info.Content, compiler.NewSequenceNodeForStringArray(m.Consumes)) - } - if len(m.Produces) != 0 { - info.Content = append(info.Content, compiler.NewScalarNodeForString("produces")) - info.Content = append(info.Content, compiler.NewSequenceNodeForStringArray(m.Produces)) - } - // always include this required field. - info.Content = append(info.Content, compiler.NewScalarNodeForString("paths")) - info.Content = append(info.Content, m.Paths.ToRawInfo()) - if m.Definitions != nil { - info.Content = append(info.Content, compiler.NewScalarNodeForString("definitions")) - info.Content = append(info.Content, m.Definitions.ToRawInfo()) - } - if m.Parameters != nil { - info.Content = append(info.Content, compiler.NewScalarNodeForString("parameters")) - info.Content = append(info.Content, m.Parameters.ToRawInfo()) - } - if m.Responses != nil { - info.Content = append(info.Content, compiler.NewScalarNodeForString("responses")) - info.Content = append(info.Content, m.Responses.ToRawInfo()) - } - if len(m.Security) != 0 { - items := compiler.NewSequenceNode() - for _, item := range m.Security { - items.Content = append(items.Content, item.ToRawInfo()) - } - info.Content = append(info.Content, compiler.NewScalarNodeForString("security")) - info.Content = append(info.Content, items) - } - if m.SecurityDefinitions != nil { - info.Content = append(info.Content, compiler.NewScalarNodeForString("securityDefinitions")) - info.Content = append(info.Content, m.SecurityDefinitions.ToRawInfo()) - } - if len(m.Tags) != 0 { - items := compiler.NewSequenceNode() - for _, item := range m.Tags { - items.Content = append(items.Content, item.ToRawInfo()) - } - info.Content = append(info.Content, compiler.NewScalarNodeForString("tags")) - info.Content = append(info.Content, items) - } - if m.ExternalDocs != nil { - info.Content = append(info.Content, compiler.NewScalarNodeForString("externalDocs")) - info.Content = append(info.Content, m.ExternalDocs.ToRawInfo()) - } - if m.VendorExtension != nil { - for _, item := range m.VendorExtension { - info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) - info.Content = append(info.Content, item.Value.ToRawInfo()) - } - } - return info -} - -// ToRawInfo returns a description of Examples suitable for JSON or YAML export. -func (m *Examples) ToRawInfo() *yaml.Node { - info := compiler.NewMappingNode() - if m == nil { - return info - } - if m.AdditionalProperties != nil { - for _, item := range m.AdditionalProperties { - info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) - info.Content = append(info.Content, item.Value.ToRawInfo()) - } - } - return info -} - -// ToRawInfo returns a description of ExternalDocs suitable for JSON or YAML export. -func (m *ExternalDocs) ToRawInfo() *yaml.Node { - info := compiler.NewMappingNode() - if m == nil { - return info - } - if m.Description != "" { - info.Content = append(info.Content, compiler.NewScalarNodeForString("description")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Description)) - } - // always include this required field. - info.Content = append(info.Content, compiler.NewScalarNodeForString("url")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Url)) - if m.VendorExtension != nil { - for _, item := range m.VendorExtension { - info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) - info.Content = append(info.Content, item.Value.ToRawInfo()) - } - } - return info -} - -// ToRawInfo returns a description of FileSchema suitable for JSON or YAML export. -func (m *FileSchema) ToRawInfo() *yaml.Node { - info := compiler.NewMappingNode() - if m == nil { - return info - } - if m.Format != "" { - info.Content = append(info.Content, compiler.NewScalarNodeForString("format")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Format)) - } - if m.Title != "" { - info.Content = append(info.Content, compiler.NewScalarNodeForString("title")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Title)) - } - if m.Description != "" { - info.Content = append(info.Content, compiler.NewScalarNodeForString("description")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Description)) - } - if m.Default != nil { - info.Content = append(info.Content, compiler.NewScalarNodeForString("default")) - info.Content = append(info.Content, m.Default.ToRawInfo()) - } - if len(m.Required) != 0 { - info.Content = append(info.Content, compiler.NewScalarNodeForString("required")) - info.Content = append(info.Content, compiler.NewSequenceNodeForStringArray(m.Required)) - } - // always include this required field. - info.Content = append(info.Content, compiler.NewScalarNodeForString("type")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Type)) - if m.ReadOnly != false { - info.Content = append(info.Content, compiler.NewScalarNodeForString("readOnly")) - info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.ReadOnly)) - } - if m.ExternalDocs != nil { - info.Content = append(info.Content, compiler.NewScalarNodeForString("externalDocs")) - info.Content = append(info.Content, m.ExternalDocs.ToRawInfo()) - } - if m.Example != nil { - info.Content = append(info.Content, compiler.NewScalarNodeForString("example")) - info.Content = append(info.Content, m.Example.ToRawInfo()) - } - if m.VendorExtension != nil { - for _, item := range m.VendorExtension { - info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) - info.Content = append(info.Content, item.Value.ToRawInfo()) - } - } - return info -} - -// ToRawInfo returns a description of FormDataParameterSubSchema suitable for JSON or YAML export. -func (m *FormDataParameterSubSchema) ToRawInfo() *yaml.Node { - info := compiler.NewMappingNode() - if m == nil { - return info - } - if m.Required != false { - info.Content = append(info.Content, compiler.NewScalarNodeForString("required")) - info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.Required)) - } - if m.In != "" { - info.Content = append(info.Content, compiler.NewScalarNodeForString("in")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.In)) - } - if m.Description != "" { - info.Content = append(info.Content, compiler.NewScalarNodeForString("description")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Description)) - } - if m.Name != "" { - info.Content = append(info.Content, compiler.NewScalarNodeForString("name")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Name)) - } - if m.AllowEmptyValue != false { - info.Content = append(info.Content, compiler.NewScalarNodeForString("allowEmptyValue")) - info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.AllowEmptyValue)) - } - if m.Type != "" { - info.Content = append(info.Content, compiler.NewScalarNodeForString("type")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Type)) - } - if m.Format != "" { - info.Content = append(info.Content, compiler.NewScalarNodeForString("format")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Format)) - } - if m.Items != nil { - info.Content = append(info.Content, compiler.NewScalarNodeForString("items")) - info.Content = append(info.Content, m.Items.ToRawInfo()) - } - if m.CollectionFormat != "" { - info.Content = append(info.Content, compiler.NewScalarNodeForString("collectionFormat")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.CollectionFormat)) - } - if m.Default != nil { - info.Content = append(info.Content, compiler.NewScalarNodeForString("default")) - info.Content = append(info.Content, m.Default.ToRawInfo()) - } - if m.Maximum != 0.0 { - info.Content = append(info.Content, compiler.NewScalarNodeForString("maximum")) - info.Content = append(info.Content, compiler.NewScalarNodeForFloat(m.Maximum)) - } - if m.ExclusiveMaximum != false { - info.Content = append(info.Content, compiler.NewScalarNodeForString("exclusiveMaximum")) - info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.ExclusiveMaximum)) - } - if m.Minimum != 0.0 { - info.Content = append(info.Content, compiler.NewScalarNodeForString("minimum")) - info.Content = append(info.Content, compiler.NewScalarNodeForFloat(m.Minimum)) - } - if m.ExclusiveMinimum != false { - info.Content = append(info.Content, compiler.NewScalarNodeForString("exclusiveMinimum")) - info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.ExclusiveMinimum)) - } - if m.MaxLength != 0 { - info.Content = append(info.Content, compiler.NewScalarNodeForString("maxLength")) - info.Content = append(info.Content, compiler.NewScalarNodeForInt(m.MaxLength)) - } - if m.MinLength != 0 { - info.Content = append(info.Content, compiler.NewScalarNodeForString("minLength")) - info.Content = append(info.Content, compiler.NewScalarNodeForInt(m.MinLength)) - } - if m.Pattern != "" { - info.Content = append(info.Content, compiler.NewScalarNodeForString("pattern")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Pattern)) - } - if m.MaxItems != 0 { - info.Content = append(info.Content, compiler.NewScalarNodeForString("maxItems")) - info.Content = append(info.Content, compiler.NewScalarNodeForInt(m.MaxItems)) - } - if m.MinItems != 0 { - info.Content = append(info.Content, compiler.NewScalarNodeForString("minItems")) - info.Content = append(info.Content, compiler.NewScalarNodeForInt(m.MinItems)) - } - if m.UniqueItems != false { - info.Content = append(info.Content, compiler.NewScalarNodeForString("uniqueItems")) - info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.UniqueItems)) - } - if len(m.Enum) != 0 { - items := compiler.NewSequenceNode() - for _, item := range m.Enum { - items.Content = append(items.Content, item.ToRawInfo()) - } - info.Content = append(info.Content, compiler.NewScalarNodeForString("enum")) - info.Content = append(info.Content, items) - } - if m.MultipleOf != 0.0 { - info.Content = append(info.Content, compiler.NewScalarNodeForString("multipleOf")) - info.Content = append(info.Content, compiler.NewScalarNodeForFloat(m.MultipleOf)) - } - if m.VendorExtension != nil { - for _, item := range m.VendorExtension { - info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) - info.Content = append(info.Content, item.Value.ToRawInfo()) - } - } - return info -} - -// ToRawInfo returns a description of Header suitable for JSON or YAML export. -func (m *Header) ToRawInfo() *yaml.Node { - info := compiler.NewMappingNode() - if m == nil { - return info - } - // always include this required field. - info.Content = append(info.Content, compiler.NewScalarNodeForString("type")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Type)) - if m.Format != "" { - info.Content = append(info.Content, compiler.NewScalarNodeForString("format")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Format)) - } - if m.Items != nil { - info.Content = append(info.Content, compiler.NewScalarNodeForString("items")) - info.Content = append(info.Content, m.Items.ToRawInfo()) - } - if m.CollectionFormat != "" { - info.Content = append(info.Content, compiler.NewScalarNodeForString("collectionFormat")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.CollectionFormat)) - } - if m.Default != nil { - info.Content = append(info.Content, compiler.NewScalarNodeForString("default")) - info.Content = append(info.Content, m.Default.ToRawInfo()) - } - if m.Maximum != 0.0 { - info.Content = append(info.Content, compiler.NewScalarNodeForString("maximum")) - info.Content = append(info.Content, compiler.NewScalarNodeForFloat(m.Maximum)) - } - if m.ExclusiveMaximum != false { - info.Content = append(info.Content, compiler.NewScalarNodeForString("exclusiveMaximum")) - info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.ExclusiveMaximum)) - } - if m.Minimum != 0.0 { - info.Content = append(info.Content, compiler.NewScalarNodeForString("minimum")) - info.Content = append(info.Content, compiler.NewScalarNodeForFloat(m.Minimum)) - } - if m.ExclusiveMinimum != false { - info.Content = append(info.Content, compiler.NewScalarNodeForString("exclusiveMinimum")) - info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.ExclusiveMinimum)) - } - if m.MaxLength != 0 { - info.Content = append(info.Content, compiler.NewScalarNodeForString("maxLength")) - info.Content = append(info.Content, compiler.NewScalarNodeForInt(m.MaxLength)) - } - if m.MinLength != 0 { - info.Content = append(info.Content, compiler.NewScalarNodeForString("minLength")) - info.Content = append(info.Content, compiler.NewScalarNodeForInt(m.MinLength)) - } - if m.Pattern != "" { - info.Content = append(info.Content, compiler.NewScalarNodeForString("pattern")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Pattern)) - } - if m.MaxItems != 0 { - info.Content = append(info.Content, compiler.NewScalarNodeForString("maxItems")) - info.Content = append(info.Content, compiler.NewScalarNodeForInt(m.MaxItems)) - } - if m.MinItems != 0 { - info.Content = append(info.Content, compiler.NewScalarNodeForString("minItems")) - info.Content = append(info.Content, compiler.NewScalarNodeForInt(m.MinItems)) - } - if m.UniqueItems != false { - info.Content = append(info.Content, compiler.NewScalarNodeForString("uniqueItems")) - info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.UniqueItems)) - } - if len(m.Enum) != 0 { - items := compiler.NewSequenceNode() - for _, item := range m.Enum { - items.Content = append(items.Content, item.ToRawInfo()) - } - info.Content = append(info.Content, compiler.NewScalarNodeForString("enum")) - info.Content = append(info.Content, items) - } - if m.MultipleOf != 0.0 { - info.Content = append(info.Content, compiler.NewScalarNodeForString("multipleOf")) - info.Content = append(info.Content, compiler.NewScalarNodeForFloat(m.MultipleOf)) - } - if m.Description != "" { - info.Content = append(info.Content, compiler.NewScalarNodeForString("description")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Description)) - } - if m.VendorExtension != nil { - for _, item := range m.VendorExtension { - info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) - info.Content = append(info.Content, item.Value.ToRawInfo()) - } - } - return info -} - -// ToRawInfo returns a description of HeaderParameterSubSchema suitable for JSON or YAML export. -func (m *HeaderParameterSubSchema) ToRawInfo() *yaml.Node { - info := compiler.NewMappingNode() - if m == nil { - return info - } - if m.Required != false { - info.Content = append(info.Content, compiler.NewScalarNodeForString("required")) - info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.Required)) - } - if m.In != "" { - info.Content = append(info.Content, compiler.NewScalarNodeForString("in")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.In)) - } - if m.Description != "" { - info.Content = append(info.Content, compiler.NewScalarNodeForString("description")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Description)) - } - if m.Name != "" { - info.Content = append(info.Content, compiler.NewScalarNodeForString("name")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Name)) - } - if m.Type != "" { - info.Content = append(info.Content, compiler.NewScalarNodeForString("type")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Type)) - } - if m.Format != "" { - info.Content = append(info.Content, compiler.NewScalarNodeForString("format")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Format)) - } - if m.Items != nil { - info.Content = append(info.Content, compiler.NewScalarNodeForString("items")) - info.Content = append(info.Content, m.Items.ToRawInfo()) - } - if m.CollectionFormat != "" { - info.Content = append(info.Content, compiler.NewScalarNodeForString("collectionFormat")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.CollectionFormat)) - } - if m.Default != nil { - info.Content = append(info.Content, compiler.NewScalarNodeForString("default")) - info.Content = append(info.Content, m.Default.ToRawInfo()) - } - if m.Maximum != 0.0 { - info.Content = append(info.Content, compiler.NewScalarNodeForString("maximum")) - info.Content = append(info.Content, compiler.NewScalarNodeForFloat(m.Maximum)) - } - if m.ExclusiveMaximum != false { - info.Content = append(info.Content, compiler.NewScalarNodeForString("exclusiveMaximum")) - info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.ExclusiveMaximum)) - } - if m.Minimum != 0.0 { - info.Content = append(info.Content, compiler.NewScalarNodeForString("minimum")) - info.Content = append(info.Content, compiler.NewScalarNodeForFloat(m.Minimum)) - } - if m.ExclusiveMinimum != false { - info.Content = append(info.Content, compiler.NewScalarNodeForString("exclusiveMinimum")) - info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.ExclusiveMinimum)) - } - if m.MaxLength != 0 { - info.Content = append(info.Content, compiler.NewScalarNodeForString("maxLength")) - info.Content = append(info.Content, compiler.NewScalarNodeForInt(m.MaxLength)) - } - if m.MinLength != 0 { - info.Content = append(info.Content, compiler.NewScalarNodeForString("minLength")) - info.Content = append(info.Content, compiler.NewScalarNodeForInt(m.MinLength)) - } - if m.Pattern != "" { - info.Content = append(info.Content, compiler.NewScalarNodeForString("pattern")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Pattern)) - } - if m.MaxItems != 0 { - info.Content = append(info.Content, compiler.NewScalarNodeForString("maxItems")) - info.Content = append(info.Content, compiler.NewScalarNodeForInt(m.MaxItems)) - } - if m.MinItems != 0 { - info.Content = append(info.Content, compiler.NewScalarNodeForString("minItems")) - info.Content = append(info.Content, compiler.NewScalarNodeForInt(m.MinItems)) - } - if m.UniqueItems != false { - info.Content = append(info.Content, compiler.NewScalarNodeForString("uniqueItems")) - info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.UniqueItems)) - } - if len(m.Enum) != 0 { - items := compiler.NewSequenceNode() - for _, item := range m.Enum { - items.Content = append(items.Content, item.ToRawInfo()) - } - info.Content = append(info.Content, compiler.NewScalarNodeForString("enum")) - info.Content = append(info.Content, items) - } - if m.MultipleOf != 0.0 { - info.Content = append(info.Content, compiler.NewScalarNodeForString("multipleOf")) - info.Content = append(info.Content, compiler.NewScalarNodeForFloat(m.MultipleOf)) - } - if m.VendorExtension != nil { - for _, item := range m.VendorExtension { - info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) - info.Content = append(info.Content, item.Value.ToRawInfo()) - } - } - return info -} - -// ToRawInfo returns a description of Headers suitable for JSON or YAML export. -func (m *Headers) ToRawInfo() *yaml.Node { - info := compiler.NewMappingNode() - if m == nil { - return info - } - if m.AdditionalProperties != nil { - for _, item := range m.AdditionalProperties { - info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) - info.Content = append(info.Content, item.Value.ToRawInfo()) - } - } - return info -} - -// ToRawInfo returns a description of Info suitable for JSON or YAML export. -func (m *Info) ToRawInfo() *yaml.Node { - info := compiler.NewMappingNode() - if m == nil { - return info - } - // always include this required field. - info.Content = append(info.Content, compiler.NewScalarNodeForString("title")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Title)) - // always include this required field. - info.Content = append(info.Content, compiler.NewScalarNodeForString("version")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Version)) - if m.Description != "" { - info.Content = append(info.Content, compiler.NewScalarNodeForString("description")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Description)) - } - if m.TermsOfService != "" { - info.Content = append(info.Content, compiler.NewScalarNodeForString("termsOfService")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.TermsOfService)) - } - if m.Contact != nil { - info.Content = append(info.Content, compiler.NewScalarNodeForString("contact")) - info.Content = append(info.Content, m.Contact.ToRawInfo()) - } - if m.License != nil { - info.Content = append(info.Content, compiler.NewScalarNodeForString("license")) - info.Content = append(info.Content, m.License.ToRawInfo()) - } - if m.VendorExtension != nil { - for _, item := range m.VendorExtension { - info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) - info.Content = append(info.Content, item.Value.ToRawInfo()) - } - } - return info -} - -// ToRawInfo returns a description of ItemsItem suitable for JSON or YAML export. -func (m *ItemsItem) ToRawInfo() *yaml.Node { - info := compiler.NewMappingNode() - if m == nil { - return info - } - if len(m.Schema) != 0 { - items := compiler.NewSequenceNode() - for _, item := range m.Schema { - items.Content = append(items.Content, item.ToRawInfo()) - } - info.Content = append(info.Content, compiler.NewScalarNodeForString("schema")) - info.Content = append(info.Content, items) - } - return info -} - -// ToRawInfo returns a description of JsonReference suitable for JSON or YAML export. -func (m *JsonReference) ToRawInfo() *yaml.Node { - info := compiler.NewMappingNode() - if m == nil { - return info - } - // always include this required field. - info.Content = append(info.Content, compiler.NewScalarNodeForString("$ref")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.XRef)) - if m.Description != "" { - info.Content = append(info.Content, compiler.NewScalarNodeForString("description")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Description)) - } - return info -} - -// ToRawInfo returns a description of License suitable for JSON or YAML export. -func (m *License) ToRawInfo() *yaml.Node { - info := compiler.NewMappingNode() - if m == nil { - return info - } - // always include this required field. - info.Content = append(info.Content, compiler.NewScalarNodeForString("name")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Name)) - if m.Url != "" { - info.Content = append(info.Content, compiler.NewScalarNodeForString("url")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Url)) - } - if m.VendorExtension != nil { - for _, item := range m.VendorExtension { - info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) - info.Content = append(info.Content, item.Value.ToRawInfo()) - } - } - return info -} - -// ToRawInfo returns a description of NamedAny suitable for JSON or YAML export. -func (m *NamedAny) ToRawInfo() *yaml.Node { - info := compiler.NewMappingNode() - if m == nil { - return info - } - if m.Name != "" { - info.Content = append(info.Content, compiler.NewScalarNodeForString("name")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Name)) - } - if m.Value != nil { - info.Content = append(info.Content, compiler.NewScalarNodeForString("value")) - info.Content = append(info.Content, m.Value.ToRawInfo()) - } - return info -} - -// ToRawInfo returns a description of NamedHeader suitable for JSON or YAML export. -func (m *NamedHeader) ToRawInfo() *yaml.Node { - info := compiler.NewMappingNode() - if m == nil { - return info - } - if m.Name != "" { - info.Content = append(info.Content, compiler.NewScalarNodeForString("name")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Name)) - } - // &{Name:value Type:Header StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:Mapped value} - return info -} - -// ToRawInfo returns a description of NamedParameter suitable for JSON or YAML export. -func (m *NamedParameter) ToRawInfo() *yaml.Node { - info := compiler.NewMappingNode() - if m == nil { - return info - } - if m.Name != "" { - info.Content = append(info.Content, compiler.NewScalarNodeForString("name")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Name)) - } - // &{Name:value Type:Parameter StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:Mapped value} - return info -} - -// ToRawInfo returns a description of NamedPathItem suitable for JSON or YAML export. -func (m *NamedPathItem) ToRawInfo() *yaml.Node { - info := compiler.NewMappingNode() - if m == nil { - return info - } - if m.Name != "" { - info.Content = append(info.Content, compiler.NewScalarNodeForString("name")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Name)) - } - // &{Name:value Type:PathItem StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:Mapped value} - return info -} - -// ToRawInfo returns a description of NamedResponse suitable for JSON or YAML export. -func (m *NamedResponse) ToRawInfo() *yaml.Node { - info := compiler.NewMappingNode() - if m == nil { - return info - } - if m.Name != "" { - info.Content = append(info.Content, compiler.NewScalarNodeForString("name")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Name)) - } - // &{Name:value Type:Response StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:Mapped value} - return info -} - -// ToRawInfo returns a description of NamedResponseValue suitable for JSON or YAML export. -func (m *NamedResponseValue) ToRawInfo() *yaml.Node { - info := compiler.NewMappingNode() - if m == nil { - return info - } - if m.Name != "" { - info.Content = append(info.Content, compiler.NewScalarNodeForString("name")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Name)) - } - // &{Name:value Type:ResponseValue StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:Mapped value} - return info -} - -// ToRawInfo returns a description of NamedSchema suitable for JSON or YAML export. -func (m *NamedSchema) ToRawInfo() *yaml.Node { - info := compiler.NewMappingNode() - if m == nil { - return info - } - if m.Name != "" { - info.Content = append(info.Content, compiler.NewScalarNodeForString("name")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Name)) - } - // &{Name:value Type:Schema StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:Mapped value} - return info -} - -// ToRawInfo returns a description of NamedSecurityDefinitionsItem suitable for JSON or YAML export. -func (m *NamedSecurityDefinitionsItem) ToRawInfo() *yaml.Node { - info := compiler.NewMappingNode() - if m == nil { - return info - } - if m.Name != "" { - info.Content = append(info.Content, compiler.NewScalarNodeForString("name")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Name)) - } - // &{Name:value Type:SecurityDefinitionsItem StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:Mapped value} - return info -} - -// ToRawInfo returns a description of NamedString suitable for JSON or YAML export. -func (m *NamedString) ToRawInfo() *yaml.Node { - info := compiler.NewMappingNode() - if m == nil { - return info - } - if m.Name != "" { - info.Content = append(info.Content, compiler.NewScalarNodeForString("name")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Name)) - } - if m.Value != "" { - info.Content = append(info.Content, compiler.NewScalarNodeForString("value")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Value)) - } - return info -} - -// ToRawInfo returns a description of NamedStringArray suitable for JSON or YAML export. -func (m *NamedStringArray) ToRawInfo() *yaml.Node { - info := compiler.NewMappingNode() - if m == nil { - return info - } - if m.Name != "" { - info.Content = append(info.Content, compiler.NewScalarNodeForString("name")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Name)) - } - // &{Name:value Type:StringArray StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:Mapped value} - return info -} - -// ToRawInfo returns a description of NonBodyParameter suitable for JSON or YAML export. -func (m *NonBodyParameter) ToRawInfo() *yaml.Node { - // ONE OF WRAPPER - // NonBodyParameter - // {Name:headerParameterSubSchema Type:HeaderParameterSubSchema StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - v0 := m.GetHeaderParameterSubSchema() - if v0 != nil { - return v0.ToRawInfo() - } - // {Name:formDataParameterSubSchema Type:FormDataParameterSubSchema StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - v1 := m.GetFormDataParameterSubSchema() - if v1 != nil { - return v1.ToRawInfo() - } - // {Name:queryParameterSubSchema Type:QueryParameterSubSchema StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - v2 := m.GetQueryParameterSubSchema() - if v2 != nil { - return v2.ToRawInfo() - } - // {Name:pathParameterSubSchema Type:PathParameterSubSchema StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - v3 := m.GetPathParameterSubSchema() - if v3 != nil { - return v3.ToRawInfo() - } - return compiler.NewNullNode() -} - -// ToRawInfo returns a description of Oauth2AccessCodeSecurity suitable for JSON or YAML export. -func (m *Oauth2AccessCodeSecurity) ToRawInfo() *yaml.Node { - info := compiler.NewMappingNode() - if m == nil { - return info - } - // always include this required field. - info.Content = append(info.Content, compiler.NewScalarNodeForString("type")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Type)) - // always include this required field. - info.Content = append(info.Content, compiler.NewScalarNodeForString("flow")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Flow)) - if m.Scopes != nil { - info.Content = append(info.Content, compiler.NewScalarNodeForString("scopes")) - info.Content = append(info.Content, m.Scopes.ToRawInfo()) - } - // always include this required field. - info.Content = append(info.Content, compiler.NewScalarNodeForString("authorizationUrl")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.AuthorizationUrl)) - // always include this required field. - info.Content = append(info.Content, compiler.NewScalarNodeForString("tokenUrl")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.TokenUrl)) - if m.Description != "" { - info.Content = append(info.Content, compiler.NewScalarNodeForString("description")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Description)) - } - if m.VendorExtension != nil { - for _, item := range m.VendorExtension { - info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) - info.Content = append(info.Content, item.Value.ToRawInfo()) - } - } - return info -} - -// ToRawInfo returns a description of Oauth2ApplicationSecurity suitable for JSON or YAML export. -func (m *Oauth2ApplicationSecurity) ToRawInfo() *yaml.Node { - info := compiler.NewMappingNode() - if m == nil { - return info - } - // always include this required field. - info.Content = append(info.Content, compiler.NewScalarNodeForString("type")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Type)) - // always include this required field. - info.Content = append(info.Content, compiler.NewScalarNodeForString("flow")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Flow)) - if m.Scopes != nil { - info.Content = append(info.Content, compiler.NewScalarNodeForString("scopes")) - info.Content = append(info.Content, m.Scopes.ToRawInfo()) - } - // always include this required field. - info.Content = append(info.Content, compiler.NewScalarNodeForString("tokenUrl")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.TokenUrl)) - if m.Description != "" { - info.Content = append(info.Content, compiler.NewScalarNodeForString("description")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Description)) - } - if m.VendorExtension != nil { - for _, item := range m.VendorExtension { - info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) - info.Content = append(info.Content, item.Value.ToRawInfo()) - } - } - return info -} - -// ToRawInfo returns a description of Oauth2ImplicitSecurity suitable for JSON or YAML export. -func (m *Oauth2ImplicitSecurity) ToRawInfo() *yaml.Node { - info := compiler.NewMappingNode() - if m == nil { - return info - } - // always include this required field. - info.Content = append(info.Content, compiler.NewScalarNodeForString("type")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Type)) - // always include this required field. - info.Content = append(info.Content, compiler.NewScalarNodeForString("flow")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Flow)) - if m.Scopes != nil { - info.Content = append(info.Content, compiler.NewScalarNodeForString("scopes")) - info.Content = append(info.Content, m.Scopes.ToRawInfo()) - } - // always include this required field. - info.Content = append(info.Content, compiler.NewScalarNodeForString("authorizationUrl")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.AuthorizationUrl)) - if m.Description != "" { - info.Content = append(info.Content, compiler.NewScalarNodeForString("description")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Description)) - } - if m.VendorExtension != nil { - for _, item := range m.VendorExtension { - info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) - info.Content = append(info.Content, item.Value.ToRawInfo()) - } - } - return info -} - -// ToRawInfo returns a description of Oauth2PasswordSecurity suitable for JSON or YAML export. -func (m *Oauth2PasswordSecurity) ToRawInfo() *yaml.Node { - info := compiler.NewMappingNode() - if m == nil { - return info - } - // always include this required field. - info.Content = append(info.Content, compiler.NewScalarNodeForString("type")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Type)) - // always include this required field. - info.Content = append(info.Content, compiler.NewScalarNodeForString("flow")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Flow)) - if m.Scopes != nil { - info.Content = append(info.Content, compiler.NewScalarNodeForString("scopes")) - info.Content = append(info.Content, m.Scopes.ToRawInfo()) - } - // always include this required field. - info.Content = append(info.Content, compiler.NewScalarNodeForString("tokenUrl")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.TokenUrl)) - if m.Description != "" { - info.Content = append(info.Content, compiler.NewScalarNodeForString("description")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Description)) - } - if m.VendorExtension != nil { - for _, item := range m.VendorExtension { - info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) - info.Content = append(info.Content, item.Value.ToRawInfo()) - } - } - return info -} - -// ToRawInfo returns a description of Oauth2Scopes suitable for JSON or YAML export. -func (m *Oauth2Scopes) ToRawInfo() *yaml.Node { - info := compiler.NewMappingNode() - if m == nil { - return info - } - // &{Name:additionalProperties Type:NamedString StringEnumValues:[] MapType:string Repeated:true Pattern: Implicit:true Description:} - return info -} - -// ToRawInfo returns a description of Operation suitable for JSON or YAML export. -func (m *Operation) ToRawInfo() *yaml.Node { - info := compiler.NewMappingNode() - if m == nil { - return info - } - if len(m.Tags) != 0 { - info.Content = append(info.Content, compiler.NewScalarNodeForString("tags")) - info.Content = append(info.Content, compiler.NewSequenceNodeForStringArray(m.Tags)) - } - if m.Summary != "" { - info.Content = append(info.Content, compiler.NewScalarNodeForString("summary")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Summary)) - } - if m.Description != "" { - info.Content = append(info.Content, compiler.NewScalarNodeForString("description")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Description)) - } - if m.ExternalDocs != nil { - info.Content = append(info.Content, compiler.NewScalarNodeForString("externalDocs")) - info.Content = append(info.Content, m.ExternalDocs.ToRawInfo()) - } - if m.OperationId != "" { - info.Content = append(info.Content, compiler.NewScalarNodeForString("operationId")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.OperationId)) - } - if len(m.Produces) != 0 { - info.Content = append(info.Content, compiler.NewScalarNodeForString("produces")) - info.Content = append(info.Content, compiler.NewSequenceNodeForStringArray(m.Produces)) - } - if len(m.Consumes) != 0 { - info.Content = append(info.Content, compiler.NewScalarNodeForString("consumes")) - info.Content = append(info.Content, compiler.NewSequenceNodeForStringArray(m.Consumes)) - } - if len(m.Parameters) != 0 { - items := compiler.NewSequenceNode() - for _, item := range m.Parameters { - items.Content = append(items.Content, item.ToRawInfo()) - } - info.Content = append(info.Content, compiler.NewScalarNodeForString("parameters")) - info.Content = append(info.Content, items) - } - // always include this required field. - info.Content = append(info.Content, compiler.NewScalarNodeForString("responses")) - info.Content = append(info.Content, m.Responses.ToRawInfo()) - if len(m.Schemes) != 0 { - info.Content = append(info.Content, compiler.NewScalarNodeForString("schemes")) - info.Content = append(info.Content, compiler.NewSequenceNodeForStringArray(m.Schemes)) - } - if m.Deprecated != false { - info.Content = append(info.Content, compiler.NewScalarNodeForString("deprecated")) - info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.Deprecated)) - } - if len(m.Security) != 0 { - items := compiler.NewSequenceNode() - for _, item := range m.Security { - items.Content = append(items.Content, item.ToRawInfo()) - } - info.Content = append(info.Content, compiler.NewScalarNodeForString("security")) - info.Content = append(info.Content, items) - } - if m.VendorExtension != nil { - for _, item := range m.VendorExtension { - info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) - info.Content = append(info.Content, item.Value.ToRawInfo()) - } - } - return info -} - -// ToRawInfo returns a description of Parameter suitable for JSON or YAML export. -func (m *Parameter) ToRawInfo() *yaml.Node { - // ONE OF WRAPPER - // Parameter - // {Name:bodyParameter Type:BodyParameter StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - v0 := m.GetBodyParameter() - if v0 != nil { - return v0.ToRawInfo() - } - // {Name:nonBodyParameter Type:NonBodyParameter StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - v1 := m.GetNonBodyParameter() - if v1 != nil { - return v1.ToRawInfo() - } - return compiler.NewNullNode() -} - -// ToRawInfo returns a description of ParameterDefinitions suitable for JSON or YAML export. -func (m *ParameterDefinitions) ToRawInfo() *yaml.Node { - info := compiler.NewMappingNode() - if m == nil { - return info - } - if m.AdditionalProperties != nil { - for _, item := range m.AdditionalProperties { - info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) - info.Content = append(info.Content, item.Value.ToRawInfo()) - } - } - return info -} - -// ToRawInfo returns a description of ParametersItem suitable for JSON or YAML export. -func (m *ParametersItem) ToRawInfo() *yaml.Node { - // ONE OF WRAPPER - // ParametersItem - // {Name:parameter Type:Parameter StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - v0 := m.GetParameter() - if v0 != nil { - return v0.ToRawInfo() - } - // {Name:jsonReference Type:JsonReference StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - v1 := m.GetJsonReference() - if v1 != nil { - return v1.ToRawInfo() - } - return compiler.NewNullNode() -} - -// ToRawInfo returns a description of PathItem suitable for JSON or YAML export. -func (m *PathItem) ToRawInfo() *yaml.Node { - info := compiler.NewMappingNode() - if m == nil { - return info - } - if m.XRef != "" { - info.Content = append(info.Content, compiler.NewScalarNodeForString("$ref")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.XRef)) - } - if m.Get != nil { - info.Content = append(info.Content, compiler.NewScalarNodeForString("get")) - info.Content = append(info.Content, m.Get.ToRawInfo()) - } - if m.Put != nil { - info.Content = append(info.Content, compiler.NewScalarNodeForString("put")) - info.Content = append(info.Content, m.Put.ToRawInfo()) - } - if m.Post != nil { - info.Content = append(info.Content, compiler.NewScalarNodeForString("post")) - info.Content = append(info.Content, m.Post.ToRawInfo()) - } - if m.Delete != nil { - info.Content = append(info.Content, compiler.NewScalarNodeForString("delete")) - info.Content = append(info.Content, m.Delete.ToRawInfo()) - } - if m.Options != nil { - info.Content = append(info.Content, compiler.NewScalarNodeForString("options")) - info.Content = append(info.Content, m.Options.ToRawInfo()) - } - if m.Head != nil { - info.Content = append(info.Content, compiler.NewScalarNodeForString("head")) - info.Content = append(info.Content, m.Head.ToRawInfo()) - } - if m.Patch != nil { - info.Content = append(info.Content, compiler.NewScalarNodeForString("patch")) - info.Content = append(info.Content, m.Patch.ToRawInfo()) - } - if len(m.Parameters) != 0 { - items := compiler.NewSequenceNode() - for _, item := range m.Parameters { - items.Content = append(items.Content, item.ToRawInfo()) - } - info.Content = append(info.Content, compiler.NewScalarNodeForString("parameters")) - info.Content = append(info.Content, items) - } - if m.VendorExtension != nil { - for _, item := range m.VendorExtension { - info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) - info.Content = append(info.Content, item.Value.ToRawInfo()) - } - } - return info -} - -// ToRawInfo returns a description of PathParameterSubSchema suitable for JSON or YAML export. -func (m *PathParameterSubSchema) ToRawInfo() *yaml.Node { - info := compiler.NewMappingNode() - if m == nil { - return info - } - // always include this required field. - info.Content = append(info.Content, compiler.NewScalarNodeForString("required")) - info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.Required)) - if m.In != "" { - info.Content = append(info.Content, compiler.NewScalarNodeForString("in")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.In)) - } - if m.Description != "" { - info.Content = append(info.Content, compiler.NewScalarNodeForString("description")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Description)) - } - if m.Name != "" { - info.Content = append(info.Content, compiler.NewScalarNodeForString("name")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Name)) - } - if m.Type != "" { - info.Content = append(info.Content, compiler.NewScalarNodeForString("type")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Type)) - } - if m.Format != "" { - info.Content = append(info.Content, compiler.NewScalarNodeForString("format")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Format)) - } - if m.Items != nil { - info.Content = append(info.Content, compiler.NewScalarNodeForString("items")) - info.Content = append(info.Content, m.Items.ToRawInfo()) - } - if m.CollectionFormat != "" { - info.Content = append(info.Content, compiler.NewScalarNodeForString("collectionFormat")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.CollectionFormat)) - } - if m.Default != nil { - info.Content = append(info.Content, compiler.NewScalarNodeForString("default")) - info.Content = append(info.Content, m.Default.ToRawInfo()) - } - if m.Maximum != 0.0 { - info.Content = append(info.Content, compiler.NewScalarNodeForString("maximum")) - info.Content = append(info.Content, compiler.NewScalarNodeForFloat(m.Maximum)) - } - if m.ExclusiveMaximum != false { - info.Content = append(info.Content, compiler.NewScalarNodeForString("exclusiveMaximum")) - info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.ExclusiveMaximum)) - } - if m.Minimum != 0.0 { - info.Content = append(info.Content, compiler.NewScalarNodeForString("minimum")) - info.Content = append(info.Content, compiler.NewScalarNodeForFloat(m.Minimum)) - } - if m.ExclusiveMinimum != false { - info.Content = append(info.Content, compiler.NewScalarNodeForString("exclusiveMinimum")) - info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.ExclusiveMinimum)) - } - if m.MaxLength != 0 { - info.Content = append(info.Content, compiler.NewScalarNodeForString("maxLength")) - info.Content = append(info.Content, compiler.NewScalarNodeForInt(m.MaxLength)) - } - if m.MinLength != 0 { - info.Content = append(info.Content, compiler.NewScalarNodeForString("minLength")) - info.Content = append(info.Content, compiler.NewScalarNodeForInt(m.MinLength)) - } - if m.Pattern != "" { - info.Content = append(info.Content, compiler.NewScalarNodeForString("pattern")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Pattern)) - } - if m.MaxItems != 0 { - info.Content = append(info.Content, compiler.NewScalarNodeForString("maxItems")) - info.Content = append(info.Content, compiler.NewScalarNodeForInt(m.MaxItems)) - } - if m.MinItems != 0 { - info.Content = append(info.Content, compiler.NewScalarNodeForString("minItems")) - info.Content = append(info.Content, compiler.NewScalarNodeForInt(m.MinItems)) - } - if m.UniqueItems != false { - info.Content = append(info.Content, compiler.NewScalarNodeForString("uniqueItems")) - info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.UniqueItems)) - } - if len(m.Enum) != 0 { - items := compiler.NewSequenceNode() - for _, item := range m.Enum { - items.Content = append(items.Content, item.ToRawInfo()) - } - info.Content = append(info.Content, compiler.NewScalarNodeForString("enum")) - info.Content = append(info.Content, items) - } - if m.MultipleOf != 0.0 { - info.Content = append(info.Content, compiler.NewScalarNodeForString("multipleOf")) - info.Content = append(info.Content, compiler.NewScalarNodeForFloat(m.MultipleOf)) - } - if m.VendorExtension != nil { - for _, item := range m.VendorExtension { - info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) - info.Content = append(info.Content, item.Value.ToRawInfo()) - } - } - return info -} - -// ToRawInfo returns a description of Paths suitable for JSON or YAML export. -func (m *Paths) ToRawInfo() *yaml.Node { - info := compiler.NewMappingNode() - if m == nil { - return info - } - if m.VendorExtension != nil { - for _, item := range m.VendorExtension { - info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) - info.Content = append(info.Content, item.Value.ToRawInfo()) - } - } - if m.Path != nil { - for _, item := range m.Path { - info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) - info.Content = append(info.Content, item.Value.ToRawInfo()) - } - } - return info -} - -// ToRawInfo returns a description of PrimitivesItems suitable for JSON or YAML export. -func (m *PrimitivesItems) ToRawInfo() *yaml.Node { - info := compiler.NewMappingNode() - if m == nil { - return info - } - if m.Type != "" { - info.Content = append(info.Content, compiler.NewScalarNodeForString("type")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Type)) - } - if m.Format != "" { - info.Content = append(info.Content, compiler.NewScalarNodeForString("format")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Format)) - } - if m.Items != nil { - info.Content = append(info.Content, compiler.NewScalarNodeForString("items")) - info.Content = append(info.Content, m.Items.ToRawInfo()) - } - if m.CollectionFormat != "" { - info.Content = append(info.Content, compiler.NewScalarNodeForString("collectionFormat")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.CollectionFormat)) - } - if m.Default != nil { - info.Content = append(info.Content, compiler.NewScalarNodeForString("default")) - info.Content = append(info.Content, m.Default.ToRawInfo()) - } - if m.Maximum != 0.0 { - info.Content = append(info.Content, compiler.NewScalarNodeForString("maximum")) - info.Content = append(info.Content, compiler.NewScalarNodeForFloat(m.Maximum)) - } - if m.ExclusiveMaximum != false { - info.Content = append(info.Content, compiler.NewScalarNodeForString("exclusiveMaximum")) - info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.ExclusiveMaximum)) - } - if m.Minimum != 0.0 { - info.Content = append(info.Content, compiler.NewScalarNodeForString("minimum")) - info.Content = append(info.Content, compiler.NewScalarNodeForFloat(m.Minimum)) - } - if m.ExclusiveMinimum != false { - info.Content = append(info.Content, compiler.NewScalarNodeForString("exclusiveMinimum")) - info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.ExclusiveMinimum)) - } - if m.MaxLength != 0 { - info.Content = append(info.Content, compiler.NewScalarNodeForString("maxLength")) - info.Content = append(info.Content, compiler.NewScalarNodeForInt(m.MaxLength)) - } - if m.MinLength != 0 { - info.Content = append(info.Content, compiler.NewScalarNodeForString("minLength")) - info.Content = append(info.Content, compiler.NewScalarNodeForInt(m.MinLength)) - } - if m.Pattern != "" { - info.Content = append(info.Content, compiler.NewScalarNodeForString("pattern")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Pattern)) - } - if m.MaxItems != 0 { - info.Content = append(info.Content, compiler.NewScalarNodeForString("maxItems")) - info.Content = append(info.Content, compiler.NewScalarNodeForInt(m.MaxItems)) - } - if m.MinItems != 0 { - info.Content = append(info.Content, compiler.NewScalarNodeForString("minItems")) - info.Content = append(info.Content, compiler.NewScalarNodeForInt(m.MinItems)) - } - if m.UniqueItems != false { - info.Content = append(info.Content, compiler.NewScalarNodeForString("uniqueItems")) - info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.UniqueItems)) - } - if len(m.Enum) != 0 { - items := compiler.NewSequenceNode() - for _, item := range m.Enum { - items.Content = append(items.Content, item.ToRawInfo()) - } - info.Content = append(info.Content, compiler.NewScalarNodeForString("enum")) - info.Content = append(info.Content, items) - } - if m.MultipleOf != 0.0 { - info.Content = append(info.Content, compiler.NewScalarNodeForString("multipleOf")) - info.Content = append(info.Content, compiler.NewScalarNodeForFloat(m.MultipleOf)) - } - if m.VendorExtension != nil { - for _, item := range m.VendorExtension { - info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) - info.Content = append(info.Content, item.Value.ToRawInfo()) - } - } - return info -} - -// ToRawInfo returns a description of Properties suitable for JSON or YAML export. -func (m *Properties) ToRawInfo() *yaml.Node { - info := compiler.NewMappingNode() - if m == nil { - return info - } - if m.AdditionalProperties != nil { - for _, item := range m.AdditionalProperties { - info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) - info.Content = append(info.Content, item.Value.ToRawInfo()) - } - } - return info -} - -// ToRawInfo returns a description of QueryParameterSubSchema suitable for JSON or YAML export. -func (m *QueryParameterSubSchema) ToRawInfo() *yaml.Node { - info := compiler.NewMappingNode() - if m == nil { - return info - } - if m.Required != false { - info.Content = append(info.Content, compiler.NewScalarNodeForString("required")) - info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.Required)) - } - if m.In != "" { - info.Content = append(info.Content, compiler.NewScalarNodeForString("in")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.In)) - } - if m.Description != "" { - info.Content = append(info.Content, compiler.NewScalarNodeForString("description")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Description)) - } - if m.Name != "" { - info.Content = append(info.Content, compiler.NewScalarNodeForString("name")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Name)) - } - if m.AllowEmptyValue != false { - info.Content = append(info.Content, compiler.NewScalarNodeForString("allowEmptyValue")) - info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.AllowEmptyValue)) - } - if m.Type != "" { - info.Content = append(info.Content, compiler.NewScalarNodeForString("type")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Type)) - } - if m.Format != "" { - info.Content = append(info.Content, compiler.NewScalarNodeForString("format")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Format)) - } - if m.Items != nil { - info.Content = append(info.Content, compiler.NewScalarNodeForString("items")) - info.Content = append(info.Content, m.Items.ToRawInfo()) - } - if m.CollectionFormat != "" { - info.Content = append(info.Content, compiler.NewScalarNodeForString("collectionFormat")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.CollectionFormat)) - } - if m.Default != nil { - info.Content = append(info.Content, compiler.NewScalarNodeForString("default")) - info.Content = append(info.Content, m.Default.ToRawInfo()) - } - if m.Maximum != 0.0 { - info.Content = append(info.Content, compiler.NewScalarNodeForString("maximum")) - info.Content = append(info.Content, compiler.NewScalarNodeForFloat(m.Maximum)) - } - if m.ExclusiveMaximum != false { - info.Content = append(info.Content, compiler.NewScalarNodeForString("exclusiveMaximum")) - info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.ExclusiveMaximum)) - } - if m.Minimum != 0.0 { - info.Content = append(info.Content, compiler.NewScalarNodeForString("minimum")) - info.Content = append(info.Content, compiler.NewScalarNodeForFloat(m.Minimum)) - } - if m.ExclusiveMinimum != false { - info.Content = append(info.Content, compiler.NewScalarNodeForString("exclusiveMinimum")) - info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.ExclusiveMinimum)) - } - if m.MaxLength != 0 { - info.Content = append(info.Content, compiler.NewScalarNodeForString("maxLength")) - info.Content = append(info.Content, compiler.NewScalarNodeForInt(m.MaxLength)) - } - if m.MinLength != 0 { - info.Content = append(info.Content, compiler.NewScalarNodeForString("minLength")) - info.Content = append(info.Content, compiler.NewScalarNodeForInt(m.MinLength)) - } - if m.Pattern != "" { - info.Content = append(info.Content, compiler.NewScalarNodeForString("pattern")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Pattern)) - } - if m.MaxItems != 0 { - info.Content = append(info.Content, compiler.NewScalarNodeForString("maxItems")) - info.Content = append(info.Content, compiler.NewScalarNodeForInt(m.MaxItems)) - } - if m.MinItems != 0 { - info.Content = append(info.Content, compiler.NewScalarNodeForString("minItems")) - info.Content = append(info.Content, compiler.NewScalarNodeForInt(m.MinItems)) - } - if m.UniqueItems != false { - info.Content = append(info.Content, compiler.NewScalarNodeForString("uniqueItems")) - info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.UniqueItems)) - } - if len(m.Enum) != 0 { - items := compiler.NewSequenceNode() - for _, item := range m.Enum { - items.Content = append(items.Content, item.ToRawInfo()) - } - info.Content = append(info.Content, compiler.NewScalarNodeForString("enum")) - info.Content = append(info.Content, items) - } - if m.MultipleOf != 0.0 { - info.Content = append(info.Content, compiler.NewScalarNodeForString("multipleOf")) - info.Content = append(info.Content, compiler.NewScalarNodeForFloat(m.MultipleOf)) - } - if m.VendorExtension != nil { - for _, item := range m.VendorExtension { - info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) - info.Content = append(info.Content, item.Value.ToRawInfo()) - } - } - return info -} - -// ToRawInfo returns a description of Response suitable for JSON or YAML export. -func (m *Response) ToRawInfo() *yaml.Node { - info := compiler.NewMappingNode() - if m == nil { - return info - } - // always include this required field. - info.Content = append(info.Content, compiler.NewScalarNodeForString("description")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Description)) - if m.Schema != nil { - info.Content = append(info.Content, compiler.NewScalarNodeForString("schema")) - info.Content = append(info.Content, m.Schema.ToRawInfo()) - } - if m.Headers != nil { - info.Content = append(info.Content, compiler.NewScalarNodeForString("headers")) - info.Content = append(info.Content, m.Headers.ToRawInfo()) - } - if m.Examples != nil { - info.Content = append(info.Content, compiler.NewScalarNodeForString("examples")) - info.Content = append(info.Content, m.Examples.ToRawInfo()) - } - if m.VendorExtension != nil { - for _, item := range m.VendorExtension { - info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) - info.Content = append(info.Content, item.Value.ToRawInfo()) - } - } - return info -} - -// ToRawInfo returns a description of ResponseDefinitions suitable for JSON or YAML export. -func (m *ResponseDefinitions) ToRawInfo() *yaml.Node { - info := compiler.NewMappingNode() - if m == nil { - return info - } - if m.AdditionalProperties != nil { - for _, item := range m.AdditionalProperties { - info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) - info.Content = append(info.Content, item.Value.ToRawInfo()) - } - } - return info -} - -// ToRawInfo returns a description of ResponseValue suitable for JSON or YAML export. -func (m *ResponseValue) ToRawInfo() *yaml.Node { - // ONE OF WRAPPER - // ResponseValue - // {Name:response Type:Response StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - v0 := m.GetResponse() - if v0 != nil { - return v0.ToRawInfo() - } - // {Name:jsonReference Type:JsonReference StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - v1 := m.GetJsonReference() - if v1 != nil { - return v1.ToRawInfo() - } - return compiler.NewNullNode() -} - -// ToRawInfo returns a description of Responses suitable for JSON or YAML export. -func (m *Responses) ToRawInfo() *yaml.Node { - info := compiler.NewMappingNode() - if m == nil { - return info - } - if m.ResponseCode != nil { - for _, item := range m.ResponseCode { - info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) - info.Content = append(info.Content, item.Value.ToRawInfo()) - } - } - if m.VendorExtension != nil { - for _, item := range m.VendorExtension { - info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) - info.Content = append(info.Content, item.Value.ToRawInfo()) - } - } - return info -} - -// ToRawInfo returns a description of Schema suitable for JSON or YAML export. -func (m *Schema) ToRawInfo() *yaml.Node { - info := compiler.NewMappingNode() - if m == nil { - return info - } - if m.XRef != "" { - info.Content = append(info.Content, compiler.NewScalarNodeForString("$ref")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.XRef)) - } - if m.Format != "" { - info.Content = append(info.Content, compiler.NewScalarNodeForString("format")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Format)) - } - if m.Title != "" { - info.Content = append(info.Content, compiler.NewScalarNodeForString("title")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Title)) - } - if m.Description != "" { - info.Content = append(info.Content, compiler.NewScalarNodeForString("description")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Description)) - } - if m.Default != nil { - info.Content = append(info.Content, compiler.NewScalarNodeForString("default")) - info.Content = append(info.Content, m.Default.ToRawInfo()) - } - if m.MultipleOf != 0.0 { - info.Content = append(info.Content, compiler.NewScalarNodeForString("multipleOf")) - info.Content = append(info.Content, compiler.NewScalarNodeForFloat(m.MultipleOf)) - } - if m.Maximum != 0.0 { - info.Content = append(info.Content, compiler.NewScalarNodeForString("maximum")) - info.Content = append(info.Content, compiler.NewScalarNodeForFloat(m.Maximum)) - } - if m.ExclusiveMaximum != false { - info.Content = append(info.Content, compiler.NewScalarNodeForString("exclusiveMaximum")) - info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.ExclusiveMaximum)) - } - if m.Minimum != 0.0 { - info.Content = append(info.Content, compiler.NewScalarNodeForString("minimum")) - info.Content = append(info.Content, compiler.NewScalarNodeForFloat(m.Minimum)) - } - if m.ExclusiveMinimum != false { - info.Content = append(info.Content, compiler.NewScalarNodeForString("exclusiveMinimum")) - info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.ExclusiveMinimum)) - } - if m.MaxLength != 0 { - info.Content = append(info.Content, compiler.NewScalarNodeForString("maxLength")) - info.Content = append(info.Content, compiler.NewScalarNodeForInt(m.MaxLength)) - } - if m.MinLength != 0 { - info.Content = append(info.Content, compiler.NewScalarNodeForString("minLength")) - info.Content = append(info.Content, compiler.NewScalarNodeForInt(m.MinLength)) - } - if m.Pattern != "" { - info.Content = append(info.Content, compiler.NewScalarNodeForString("pattern")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Pattern)) - } - if m.MaxItems != 0 { - info.Content = append(info.Content, compiler.NewScalarNodeForString("maxItems")) - info.Content = append(info.Content, compiler.NewScalarNodeForInt(m.MaxItems)) - } - if m.MinItems != 0 { - info.Content = append(info.Content, compiler.NewScalarNodeForString("minItems")) - info.Content = append(info.Content, compiler.NewScalarNodeForInt(m.MinItems)) - } - if m.UniqueItems != false { - info.Content = append(info.Content, compiler.NewScalarNodeForString("uniqueItems")) - info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.UniqueItems)) - } - if m.MaxProperties != 0 { - info.Content = append(info.Content, compiler.NewScalarNodeForString("maxProperties")) - info.Content = append(info.Content, compiler.NewScalarNodeForInt(m.MaxProperties)) - } - if m.MinProperties != 0 { - info.Content = append(info.Content, compiler.NewScalarNodeForString("minProperties")) - info.Content = append(info.Content, compiler.NewScalarNodeForInt(m.MinProperties)) - } - if len(m.Required) != 0 { - info.Content = append(info.Content, compiler.NewScalarNodeForString("required")) - info.Content = append(info.Content, compiler.NewSequenceNodeForStringArray(m.Required)) - } - if len(m.Enum) != 0 { - items := compiler.NewSequenceNode() - for _, item := range m.Enum { - items.Content = append(items.Content, item.ToRawInfo()) - } - info.Content = append(info.Content, compiler.NewScalarNodeForString("enum")) - info.Content = append(info.Content, items) - } - if m.AdditionalProperties != nil { - info.Content = append(info.Content, compiler.NewScalarNodeForString("additionalProperties")) - info.Content = append(info.Content, m.AdditionalProperties.ToRawInfo()) - } - if m.Type != nil { - if len(m.Type.Value) == 1 { - info.Content = append(info.Content, compiler.NewScalarNodeForString("type")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Type.Value[0])) - } else { - info.Content = append(info.Content, compiler.NewScalarNodeForString("type")) - info.Content = append(info.Content, compiler.NewSequenceNodeForStringArray(m.Type.Value)) - } - } - if m.Items != nil { - items := compiler.NewSequenceNode() - for _, item := range m.Items.Schema { - items.Content = append(items.Content, item.ToRawInfo()) - } - if len(items.Content) == 1 { - items = items.Content[0] - } - info.Content = append(info.Content, compiler.NewScalarNodeForString("items")) - info.Content = append(info.Content, items) - } - if len(m.AllOf) != 0 { - items := compiler.NewSequenceNode() - for _, item := range m.AllOf { - items.Content = append(items.Content, item.ToRawInfo()) - } - info.Content = append(info.Content, compiler.NewScalarNodeForString("allOf")) - info.Content = append(info.Content, items) - } - if m.Properties != nil { - info.Content = append(info.Content, compiler.NewScalarNodeForString("properties")) - info.Content = append(info.Content, m.Properties.ToRawInfo()) - } - if m.Discriminator != "" { - info.Content = append(info.Content, compiler.NewScalarNodeForString("discriminator")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Discriminator)) - } - if m.ReadOnly != false { - info.Content = append(info.Content, compiler.NewScalarNodeForString("readOnly")) - info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.ReadOnly)) - } - if m.Xml != nil { - info.Content = append(info.Content, compiler.NewScalarNodeForString("xml")) - info.Content = append(info.Content, m.Xml.ToRawInfo()) - } - if m.ExternalDocs != nil { - info.Content = append(info.Content, compiler.NewScalarNodeForString("externalDocs")) - info.Content = append(info.Content, m.ExternalDocs.ToRawInfo()) - } - if m.Example != nil { - info.Content = append(info.Content, compiler.NewScalarNodeForString("example")) - info.Content = append(info.Content, m.Example.ToRawInfo()) - } - if m.VendorExtension != nil { - for _, item := range m.VendorExtension { - info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) - info.Content = append(info.Content, item.Value.ToRawInfo()) - } - } - return info -} - -// ToRawInfo returns a description of SchemaItem suitable for JSON or YAML export. -func (m *SchemaItem) ToRawInfo() *yaml.Node { - // ONE OF WRAPPER - // SchemaItem - // {Name:schema Type:Schema StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - v0 := m.GetSchema() - if v0 != nil { - return v0.ToRawInfo() - } - // {Name:fileSchema Type:FileSchema StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - v1 := m.GetFileSchema() - if v1 != nil { - return v1.ToRawInfo() - } - return compiler.NewNullNode() -} - -// ToRawInfo returns a description of SecurityDefinitions suitable for JSON or YAML export. -func (m *SecurityDefinitions) ToRawInfo() *yaml.Node { - info := compiler.NewMappingNode() - if m == nil { - return info - } - if m.AdditionalProperties != nil { - for _, item := range m.AdditionalProperties { - info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) - info.Content = append(info.Content, item.Value.ToRawInfo()) - } - } - return info -} - -// ToRawInfo returns a description of SecurityDefinitionsItem suitable for JSON or YAML export. -func (m *SecurityDefinitionsItem) ToRawInfo() *yaml.Node { - // ONE OF WRAPPER - // SecurityDefinitionsItem - // {Name:basicAuthenticationSecurity Type:BasicAuthenticationSecurity StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - v0 := m.GetBasicAuthenticationSecurity() - if v0 != nil { - return v0.ToRawInfo() - } - // {Name:apiKeySecurity Type:ApiKeySecurity StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - v1 := m.GetApiKeySecurity() - if v1 != nil { - return v1.ToRawInfo() - } - // {Name:oauth2ImplicitSecurity Type:Oauth2ImplicitSecurity StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - v2 := m.GetOauth2ImplicitSecurity() - if v2 != nil { - return v2.ToRawInfo() - } - // {Name:oauth2PasswordSecurity Type:Oauth2PasswordSecurity StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - v3 := m.GetOauth2PasswordSecurity() - if v3 != nil { - return v3.ToRawInfo() - } - // {Name:oauth2ApplicationSecurity Type:Oauth2ApplicationSecurity StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - v4 := m.GetOauth2ApplicationSecurity() - if v4 != nil { - return v4.ToRawInfo() - } - // {Name:oauth2AccessCodeSecurity Type:Oauth2AccessCodeSecurity StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - v5 := m.GetOauth2AccessCodeSecurity() - if v5 != nil { - return v5.ToRawInfo() - } - return compiler.NewNullNode() -} - -// ToRawInfo returns a description of SecurityRequirement suitable for JSON or YAML export. -func (m *SecurityRequirement) ToRawInfo() *yaml.Node { - info := compiler.NewMappingNode() - if m == nil { - return info - } - if m.AdditionalProperties != nil { - for _, item := range m.AdditionalProperties { - info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) - info.Content = append(info.Content, item.Value.ToRawInfo()) - } - } - return info -} - -// ToRawInfo returns a description of StringArray suitable for JSON or YAML export. -func (m *StringArray) ToRawInfo() *yaml.Node { - return compiler.NewSequenceNodeForStringArray(m.Value) -} - -// ToRawInfo returns a description of Tag suitable for JSON or YAML export. -func (m *Tag) ToRawInfo() *yaml.Node { - info := compiler.NewMappingNode() - if m == nil { - return info - } - // always include this required field. - info.Content = append(info.Content, compiler.NewScalarNodeForString("name")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Name)) - if m.Description != "" { - info.Content = append(info.Content, compiler.NewScalarNodeForString("description")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Description)) - } - if m.ExternalDocs != nil { - info.Content = append(info.Content, compiler.NewScalarNodeForString("externalDocs")) - info.Content = append(info.Content, m.ExternalDocs.ToRawInfo()) - } - if m.VendorExtension != nil { - for _, item := range m.VendorExtension { - info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) - info.Content = append(info.Content, item.Value.ToRawInfo()) - } - } - return info -} - -// ToRawInfo returns a description of TypeItem suitable for JSON or YAML export. -func (m *TypeItem) ToRawInfo() *yaml.Node { - info := compiler.NewMappingNode() - if m == nil { - return info - } - if len(m.Value) != 0 { - info.Content = append(info.Content, compiler.NewScalarNodeForString("value")) - info.Content = append(info.Content, compiler.NewSequenceNodeForStringArray(m.Value)) - } - return info -} - -// ToRawInfo returns a description of VendorExtension suitable for JSON or YAML export. -func (m *VendorExtension) ToRawInfo() *yaml.Node { - info := compiler.NewMappingNode() - if m == nil { - return info - } - if m.AdditionalProperties != nil { - for _, item := range m.AdditionalProperties { - info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) - info.Content = append(info.Content, item.Value.ToRawInfo()) - } - } - return info -} - -// ToRawInfo returns a description of Xml suitable for JSON or YAML export. -func (m *Xml) ToRawInfo() *yaml.Node { - info := compiler.NewMappingNode() - if m == nil { - return info - } - if m.Name != "" { - info.Content = append(info.Content, compiler.NewScalarNodeForString("name")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Name)) - } - if m.Namespace != "" { - info.Content = append(info.Content, compiler.NewScalarNodeForString("namespace")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Namespace)) - } - if m.Prefix != "" { - info.Content = append(info.Content, compiler.NewScalarNodeForString("prefix")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Prefix)) - } - if m.Attribute != false { - info.Content = append(info.Content, compiler.NewScalarNodeForString("attribute")) - info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.Attribute)) - } - if m.Wrapped != false { - info.Content = append(info.Content, compiler.NewScalarNodeForString("wrapped")) - info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.Wrapped)) - } - if m.VendorExtension != nil { - for _, item := range m.VendorExtension { - info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) - info.Content = append(info.Content, item.Value.ToRawInfo()) - } - } - return info -} - -var ( - pattern0 = regexp.MustCompile("^x-") - pattern1 = regexp.MustCompile("^/") - pattern2 = regexp.MustCompile("^([0-9]{3})$|^(default)$") -) diff --git a/src/vendor/github.com/google/gnostic/openapiv2/OpenAPIv2.pb.go b/src/vendor/github.com/google/gnostic/openapiv2/OpenAPIv2.pb.go deleted file mode 100644 index 06b60157c..000000000 --- a/src/vendor/github.com/google/gnostic/openapiv2/OpenAPIv2.pb.go +++ /dev/null @@ -1,7342 +0,0 @@ -// Copyright 2020 Google LLC. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// THIS FILE IS AUTOMATICALLY GENERATED. - -// Code generated by protoc-gen-go. DO NOT EDIT. -// versions: -// protoc-gen-go v1.26.0 -// protoc v3.18.1 -// source: openapiv2/OpenAPIv2.proto - -package openapi_v2 - -import ( - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" - anypb "google.golang.org/protobuf/types/known/anypb" - reflect "reflect" - sync "sync" -) - -const ( - // Verify that this generated code is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) - // Verify that runtime/protoimpl is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) -) - -type AdditionalPropertiesItem struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Types that are assignable to Oneof: - // *AdditionalPropertiesItem_Schema - // *AdditionalPropertiesItem_Boolean - Oneof isAdditionalPropertiesItem_Oneof `protobuf_oneof:"oneof"` -} - -func (x *AdditionalPropertiesItem) Reset() { - *x = AdditionalPropertiesItem{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *AdditionalPropertiesItem) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*AdditionalPropertiesItem) ProtoMessage() {} - -func (x *AdditionalPropertiesItem) ProtoReflect() protoreflect.Message { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use AdditionalPropertiesItem.ProtoReflect.Descriptor instead. -func (*AdditionalPropertiesItem) Descriptor() ([]byte, []int) { - return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{0} -} - -func (m *AdditionalPropertiesItem) GetOneof() isAdditionalPropertiesItem_Oneof { - if m != nil { - return m.Oneof - } - return nil -} - -func (x *AdditionalPropertiesItem) GetSchema() *Schema { - if x, ok := x.GetOneof().(*AdditionalPropertiesItem_Schema); ok { - return x.Schema - } - return nil -} - -func (x *AdditionalPropertiesItem) GetBoolean() bool { - if x, ok := x.GetOneof().(*AdditionalPropertiesItem_Boolean); ok { - return x.Boolean - } - return false -} - -type isAdditionalPropertiesItem_Oneof interface { - isAdditionalPropertiesItem_Oneof() -} - -type AdditionalPropertiesItem_Schema struct { - Schema *Schema `protobuf:"bytes,1,opt,name=schema,proto3,oneof"` -} - -type AdditionalPropertiesItem_Boolean struct { - Boolean bool `protobuf:"varint,2,opt,name=boolean,proto3,oneof"` -} - -func (*AdditionalPropertiesItem_Schema) isAdditionalPropertiesItem_Oneof() {} - -func (*AdditionalPropertiesItem_Boolean) isAdditionalPropertiesItem_Oneof() {} - -type Any struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Value *anypb.Any `protobuf:"bytes,1,opt,name=value,proto3" json:"value,omitempty"` - Yaml string `protobuf:"bytes,2,opt,name=yaml,proto3" json:"yaml,omitempty"` -} - -func (x *Any) Reset() { - *x = Any{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Any) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Any) ProtoMessage() {} - -func (x *Any) ProtoReflect() protoreflect.Message { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Any.ProtoReflect.Descriptor instead. -func (*Any) Descriptor() ([]byte, []int) { - return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{1} -} - -func (x *Any) GetValue() *anypb.Any { - if x != nil { - return x.Value - } - return nil -} - -func (x *Any) GetYaml() string { - if x != nil { - return x.Yaml - } - return "" -} - -type ApiKeySecurity struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` - Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"` - In string `protobuf:"bytes,3,opt,name=in,proto3" json:"in,omitempty"` - Description string `protobuf:"bytes,4,opt,name=description,proto3" json:"description,omitempty"` - VendorExtension []*NamedAny `protobuf:"bytes,5,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` -} - -func (x *ApiKeySecurity) Reset() { - *x = ApiKeySecurity{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ApiKeySecurity) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ApiKeySecurity) ProtoMessage() {} - -func (x *ApiKeySecurity) ProtoReflect() protoreflect.Message { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[2] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ApiKeySecurity.ProtoReflect.Descriptor instead. -func (*ApiKeySecurity) Descriptor() ([]byte, []int) { - return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{2} -} - -func (x *ApiKeySecurity) GetType() string { - if x != nil { - return x.Type - } - return "" -} - -func (x *ApiKeySecurity) GetName() string { - if x != nil { - return x.Name - } - return "" -} - -func (x *ApiKeySecurity) GetIn() string { - if x != nil { - return x.In - } - return "" -} - -func (x *ApiKeySecurity) GetDescription() string { - if x != nil { - return x.Description - } - return "" -} - -func (x *ApiKeySecurity) GetVendorExtension() []*NamedAny { - if x != nil { - return x.VendorExtension - } - return nil -} - -type BasicAuthenticationSecurity struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` - Description string `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"` - VendorExtension []*NamedAny `protobuf:"bytes,3,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` -} - -func (x *BasicAuthenticationSecurity) Reset() { - *x = BasicAuthenticationSecurity{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[3] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *BasicAuthenticationSecurity) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*BasicAuthenticationSecurity) ProtoMessage() {} - -func (x *BasicAuthenticationSecurity) ProtoReflect() protoreflect.Message { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[3] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use BasicAuthenticationSecurity.ProtoReflect.Descriptor instead. -func (*BasicAuthenticationSecurity) Descriptor() ([]byte, []int) { - return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{3} -} - -func (x *BasicAuthenticationSecurity) GetType() string { - if x != nil { - return x.Type - } - return "" -} - -func (x *BasicAuthenticationSecurity) GetDescription() string { - if x != nil { - return x.Description - } - return "" -} - -func (x *BasicAuthenticationSecurity) GetVendorExtension() []*NamedAny { - if x != nil { - return x.VendorExtension - } - return nil -} - -type BodyParameter struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed. - Description string `protobuf:"bytes,1,opt,name=description,proto3" json:"description,omitempty"` - // The name of the parameter. - Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"` - // Determines the location of the parameter. - In string `protobuf:"bytes,3,opt,name=in,proto3" json:"in,omitempty"` - // Determines whether or not this parameter is required or optional. - Required bool `protobuf:"varint,4,opt,name=required,proto3" json:"required,omitempty"` - Schema *Schema `protobuf:"bytes,5,opt,name=schema,proto3" json:"schema,omitempty"` - VendorExtension []*NamedAny `protobuf:"bytes,6,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` -} - -func (x *BodyParameter) Reset() { - *x = BodyParameter{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[4] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *BodyParameter) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*BodyParameter) ProtoMessage() {} - -func (x *BodyParameter) ProtoReflect() protoreflect.Message { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[4] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use BodyParameter.ProtoReflect.Descriptor instead. -func (*BodyParameter) Descriptor() ([]byte, []int) { - return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{4} -} - -func (x *BodyParameter) GetDescription() string { - if x != nil { - return x.Description - } - return "" -} - -func (x *BodyParameter) GetName() string { - if x != nil { - return x.Name - } - return "" -} - -func (x *BodyParameter) GetIn() string { - if x != nil { - return x.In - } - return "" -} - -func (x *BodyParameter) GetRequired() bool { - if x != nil { - return x.Required - } - return false -} - -func (x *BodyParameter) GetSchema() *Schema { - if x != nil { - return x.Schema - } - return nil -} - -func (x *BodyParameter) GetVendorExtension() []*NamedAny { - if x != nil { - return x.VendorExtension - } - return nil -} - -// Contact information for the owners of the API. -type Contact struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The identifying name of the contact person/organization. - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - // The URL pointing to the contact information. - Url string `protobuf:"bytes,2,opt,name=url,proto3" json:"url,omitempty"` - // The email address of the contact person/organization. - Email string `protobuf:"bytes,3,opt,name=email,proto3" json:"email,omitempty"` - VendorExtension []*NamedAny `protobuf:"bytes,4,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` -} - -func (x *Contact) Reset() { - *x = Contact{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[5] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Contact) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Contact) ProtoMessage() {} - -func (x *Contact) ProtoReflect() protoreflect.Message { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[5] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Contact.ProtoReflect.Descriptor instead. -func (*Contact) Descriptor() ([]byte, []int) { - return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{5} -} - -func (x *Contact) GetName() string { - if x != nil { - return x.Name - } - return "" -} - -func (x *Contact) GetUrl() string { - if x != nil { - return x.Url - } - return "" -} - -func (x *Contact) GetEmail() string { - if x != nil { - return x.Email - } - return "" -} - -func (x *Contact) GetVendorExtension() []*NamedAny { - if x != nil { - return x.VendorExtension - } - return nil -} - -type Default struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - AdditionalProperties []*NamedAny `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties,proto3" json:"additional_properties,omitempty"` -} - -func (x *Default) Reset() { - *x = Default{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[6] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Default) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Default) ProtoMessage() {} - -func (x *Default) ProtoReflect() protoreflect.Message { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[6] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Default.ProtoReflect.Descriptor instead. -func (*Default) Descriptor() ([]byte, []int) { - return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{6} -} - -func (x *Default) GetAdditionalProperties() []*NamedAny { - if x != nil { - return x.AdditionalProperties - } - return nil -} - -// One or more JSON objects describing the schemas being consumed and produced by the API. -type Definitions struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - AdditionalProperties []*NamedSchema `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties,proto3" json:"additional_properties,omitempty"` -} - -func (x *Definitions) Reset() { - *x = Definitions{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[7] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Definitions) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Definitions) ProtoMessage() {} - -func (x *Definitions) ProtoReflect() protoreflect.Message { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[7] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Definitions.ProtoReflect.Descriptor instead. -func (*Definitions) Descriptor() ([]byte, []int) { - return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{7} -} - -func (x *Definitions) GetAdditionalProperties() []*NamedSchema { - if x != nil { - return x.AdditionalProperties - } - return nil -} - -type Document struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The Swagger version of this document. - Swagger string `protobuf:"bytes,1,opt,name=swagger,proto3" json:"swagger,omitempty"` - Info *Info `protobuf:"bytes,2,opt,name=info,proto3" json:"info,omitempty"` - // The host (name or ip) of the API. Example: 'swagger.io' - Host string `protobuf:"bytes,3,opt,name=host,proto3" json:"host,omitempty"` - // The base path to the API. Example: '/api'. - BasePath string `protobuf:"bytes,4,opt,name=base_path,json=basePath,proto3" json:"base_path,omitempty"` - // The transfer protocol of the API. - Schemes []string `protobuf:"bytes,5,rep,name=schemes,proto3" json:"schemes,omitempty"` - // A list of MIME types accepted by the API. - Consumes []string `protobuf:"bytes,6,rep,name=consumes,proto3" json:"consumes,omitempty"` - // A list of MIME types the API can produce. - Produces []string `protobuf:"bytes,7,rep,name=produces,proto3" json:"produces,omitempty"` - Paths *Paths `protobuf:"bytes,8,opt,name=paths,proto3" json:"paths,omitempty"` - Definitions *Definitions `protobuf:"bytes,9,opt,name=definitions,proto3" json:"definitions,omitempty"` - Parameters *ParameterDefinitions `protobuf:"bytes,10,opt,name=parameters,proto3" json:"parameters,omitempty"` - Responses *ResponseDefinitions `protobuf:"bytes,11,opt,name=responses,proto3" json:"responses,omitempty"` - Security []*SecurityRequirement `protobuf:"bytes,12,rep,name=security,proto3" json:"security,omitempty"` - SecurityDefinitions *SecurityDefinitions `protobuf:"bytes,13,opt,name=security_definitions,json=securityDefinitions,proto3" json:"security_definitions,omitempty"` - Tags []*Tag `protobuf:"bytes,14,rep,name=tags,proto3" json:"tags,omitempty"` - ExternalDocs *ExternalDocs `protobuf:"bytes,15,opt,name=external_docs,json=externalDocs,proto3" json:"external_docs,omitempty"` - VendorExtension []*NamedAny `protobuf:"bytes,16,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` -} - -func (x *Document) Reset() { - *x = Document{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[8] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Document) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Document) ProtoMessage() {} - -func (x *Document) ProtoReflect() protoreflect.Message { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[8] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Document.ProtoReflect.Descriptor instead. -func (*Document) Descriptor() ([]byte, []int) { - return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{8} -} - -func (x *Document) GetSwagger() string { - if x != nil { - return x.Swagger - } - return "" -} - -func (x *Document) GetInfo() *Info { - if x != nil { - return x.Info - } - return nil -} - -func (x *Document) GetHost() string { - if x != nil { - return x.Host - } - return "" -} - -func (x *Document) GetBasePath() string { - if x != nil { - return x.BasePath - } - return "" -} - -func (x *Document) GetSchemes() []string { - if x != nil { - return x.Schemes - } - return nil -} - -func (x *Document) GetConsumes() []string { - if x != nil { - return x.Consumes - } - return nil -} - -func (x *Document) GetProduces() []string { - if x != nil { - return x.Produces - } - return nil -} - -func (x *Document) GetPaths() *Paths { - if x != nil { - return x.Paths - } - return nil -} - -func (x *Document) GetDefinitions() *Definitions { - if x != nil { - return x.Definitions - } - return nil -} - -func (x *Document) GetParameters() *ParameterDefinitions { - if x != nil { - return x.Parameters - } - return nil -} - -func (x *Document) GetResponses() *ResponseDefinitions { - if x != nil { - return x.Responses - } - return nil -} - -func (x *Document) GetSecurity() []*SecurityRequirement { - if x != nil { - return x.Security - } - return nil -} - -func (x *Document) GetSecurityDefinitions() *SecurityDefinitions { - if x != nil { - return x.SecurityDefinitions - } - return nil -} - -func (x *Document) GetTags() []*Tag { - if x != nil { - return x.Tags - } - return nil -} - -func (x *Document) GetExternalDocs() *ExternalDocs { - if x != nil { - return x.ExternalDocs - } - return nil -} - -func (x *Document) GetVendorExtension() []*NamedAny { - if x != nil { - return x.VendorExtension - } - return nil -} - -type Examples struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - AdditionalProperties []*NamedAny `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties,proto3" json:"additional_properties,omitempty"` -} - -func (x *Examples) Reset() { - *x = Examples{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[9] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Examples) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Examples) ProtoMessage() {} - -func (x *Examples) ProtoReflect() protoreflect.Message { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[9] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Examples.ProtoReflect.Descriptor instead. -func (*Examples) Descriptor() ([]byte, []int) { - return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{9} -} - -func (x *Examples) GetAdditionalProperties() []*NamedAny { - if x != nil { - return x.AdditionalProperties - } - return nil -} - -// information about external documentation -type ExternalDocs struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Description string `protobuf:"bytes,1,opt,name=description,proto3" json:"description,omitempty"` - Url string `protobuf:"bytes,2,opt,name=url,proto3" json:"url,omitempty"` - VendorExtension []*NamedAny `protobuf:"bytes,3,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` -} - -func (x *ExternalDocs) Reset() { - *x = ExternalDocs{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[10] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ExternalDocs) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ExternalDocs) ProtoMessage() {} - -func (x *ExternalDocs) ProtoReflect() protoreflect.Message { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[10] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ExternalDocs.ProtoReflect.Descriptor instead. -func (*ExternalDocs) Descriptor() ([]byte, []int) { - return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{10} -} - -func (x *ExternalDocs) GetDescription() string { - if x != nil { - return x.Description - } - return "" -} - -func (x *ExternalDocs) GetUrl() string { - if x != nil { - return x.Url - } - return "" -} - -func (x *ExternalDocs) GetVendorExtension() []*NamedAny { - if x != nil { - return x.VendorExtension - } - return nil -} - -// A deterministic version of a JSON Schema object. -type FileSchema struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Format string `protobuf:"bytes,1,opt,name=format,proto3" json:"format,omitempty"` - Title string `protobuf:"bytes,2,opt,name=title,proto3" json:"title,omitempty"` - Description string `protobuf:"bytes,3,opt,name=description,proto3" json:"description,omitempty"` - Default *Any `protobuf:"bytes,4,opt,name=default,proto3" json:"default,omitempty"` - Required []string `protobuf:"bytes,5,rep,name=required,proto3" json:"required,omitempty"` - Type string `protobuf:"bytes,6,opt,name=type,proto3" json:"type,omitempty"` - ReadOnly bool `protobuf:"varint,7,opt,name=read_only,json=readOnly,proto3" json:"read_only,omitempty"` - ExternalDocs *ExternalDocs `protobuf:"bytes,8,opt,name=external_docs,json=externalDocs,proto3" json:"external_docs,omitempty"` - Example *Any `protobuf:"bytes,9,opt,name=example,proto3" json:"example,omitempty"` - VendorExtension []*NamedAny `protobuf:"bytes,10,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` -} - -func (x *FileSchema) Reset() { - *x = FileSchema{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[11] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *FileSchema) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*FileSchema) ProtoMessage() {} - -func (x *FileSchema) ProtoReflect() protoreflect.Message { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[11] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use FileSchema.ProtoReflect.Descriptor instead. -func (*FileSchema) Descriptor() ([]byte, []int) { - return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{11} -} - -func (x *FileSchema) GetFormat() string { - if x != nil { - return x.Format - } - return "" -} - -func (x *FileSchema) GetTitle() string { - if x != nil { - return x.Title - } - return "" -} - -func (x *FileSchema) GetDescription() string { - if x != nil { - return x.Description - } - return "" -} - -func (x *FileSchema) GetDefault() *Any { - if x != nil { - return x.Default - } - return nil -} - -func (x *FileSchema) GetRequired() []string { - if x != nil { - return x.Required - } - return nil -} - -func (x *FileSchema) GetType() string { - if x != nil { - return x.Type - } - return "" -} - -func (x *FileSchema) GetReadOnly() bool { - if x != nil { - return x.ReadOnly - } - return false -} - -func (x *FileSchema) GetExternalDocs() *ExternalDocs { - if x != nil { - return x.ExternalDocs - } - return nil -} - -func (x *FileSchema) GetExample() *Any { - if x != nil { - return x.Example - } - return nil -} - -func (x *FileSchema) GetVendorExtension() []*NamedAny { - if x != nil { - return x.VendorExtension - } - return nil -} - -type FormDataParameterSubSchema struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Determines whether or not this parameter is required or optional. - Required bool `protobuf:"varint,1,opt,name=required,proto3" json:"required,omitempty"` - // Determines the location of the parameter. - In string `protobuf:"bytes,2,opt,name=in,proto3" json:"in,omitempty"` - // A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed. - Description string `protobuf:"bytes,3,opt,name=description,proto3" json:"description,omitempty"` - // The name of the parameter. - Name string `protobuf:"bytes,4,opt,name=name,proto3" json:"name,omitempty"` - // allows sending a parameter by name only or with an empty value. - AllowEmptyValue bool `protobuf:"varint,5,opt,name=allow_empty_value,json=allowEmptyValue,proto3" json:"allow_empty_value,omitempty"` - Type string `protobuf:"bytes,6,opt,name=type,proto3" json:"type,omitempty"` - Format string `protobuf:"bytes,7,opt,name=format,proto3" json:"format,omitempty"` - Items *PrimitivesItems `protobuf:"bytes,8,opt,name=items,proto3" json:"items,omitempty"` - CollectionFormat string `protobuf:"bytes,9,opt,name=collection_format,json=collectionFormat,proto3" json:"collection_format,omitempty"` - Default *Any `protobuf:"bytes,10,opt,name=default,proto3" json:"default,omitempty"` - Maximum float64 `protobuf:"fixed64,11,opt,name=maximum,proto3" json:"maximum,omitempty"` - ExclusiveMaximum bool `protobuf:"varint,12,opt,name=exclusive_maximum,json=exclusiveMaximum,proto3" json:"exclusive_maximum,omitempty"` - Minimum float64 `protobuf:"fixed64,13,opt,name=minimum,proto3" json:"minimum,omitempty"` - ExclusiveMinimum bool `protobuf:"varint,14,opt,name=exclusive_minimum,json=exclusiveMinimum,proto3" json:"exclusive_minimum,omitempty"` - MaxLength int64 `protobuf:"varint,15,opt,name=max_length,json=maxLength,proto3" json:"max_length,omitempty"` - MinLength int64 `protobuf:"varint,16,opt,name=min_length,json=minLength,proto3" json:"min_length,omitempty"` - Pattern string `protobuf:"bytes,17,opt,name=pattern,proto3" json:"pattern,omitempty"` - MaxItems int64 `protobuf:"varint,18,opt,name=max_items,json=maxItems,proto3" json:"max_items,omitempty"` - MinItems int64 `protobuf:"varint,19,opt,name=min_items,json=minItems,proto3" json:"min_items,omitempty"` - UniqueItems bool `protobuf:"varint,20,opt,name=unique_items,json=uniqueItems,proto3" json:"unique_items,omitempty"` - Enum []*Any `protobuf:"bytes,21,rep,name=enum,proto3" json:"enum,omitempty"` - MultipleOf float64 `protobuf:"fixed64,22,opt,name=multiple_of,json=multipleOf,proto3" json:"multiple_of,omitempty"` - VendorExtension []*NamedAny `protobuf:"bytes,23,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` -} - -func (x *FormDataParameterSubSchema) Reset() { - *x = FormDataParameterSubSchema{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[12] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *FormDataParameterSubSchema) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*FormDataParameterSubSchema) ProtoMessage() {} - -func (x *FormDataParameterSubSchema) ProtoReflect() protoreflect.Message { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[12] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use FormDataParameterSubSchema.ProtoReflect.Descriptor instead. -func (*FormDataParameterSubSchema) Descriptor() ([]byte, []int) { - return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{12} -} - -func (x *FormDataParameterSubSchema) GetRequired() bool { - if x != nil { - return x.Required - } - return false -} - -func (x *FormDataParameterSubSchema) GetIn() string { - if x != nil { - return x.In - } - return "" -} - -func (x *FormDataParameterSubSchema) GetDescription() string { - if x != nil { - return x.Description - } - return "" -} - -func (x *FormDataParameterSubSchema) GetName() string { - if x != nil { - return x.Name - } - return "" -} - -func (x *FormDataParameterSubSchema) GetAllowEmptyValue() bool { - if x != nil { - return x.AllowEmptyValue - } - return false -} - -func (x *FormDataParameterSubSchema) GetType() string { - if x != nil { - return x.Type - } - return "" -} - -func (x *FormDataParameterSubSchema) GetFormat() string { - if x != nil { - return x.Format - } - return "" -} - -func (x *FormDataParameterSubSchema) GetItems() *PrimitivesItems { - if x != nil { - return x.Items - } - return nil -} - -func (x *FormDataParameterSubSchema) GetCollectionFormat() string { - if x != nil { - return x.CollectionFormat - } - return "" -} - -func (x *FormDataParameterSubSchema) GetDefault() *Any { - if x != nil { - return x.Default - } - return nil -} - -func (x *FormDataParameterSubSchema) GetMaximum() float64 { - if x != nil { - return x.Maximum - } - return 0 -} - -func (x *FormDataParameterSubSchema) GetExclusiveMaximum() bool { - if x != nil { - return x.ExclusiveMaximum - } - return false -} - -func (x *FormDataParameterSubSchema) GetMinimum() float64 { - if x != nil { - return x.Minimum - } - return 0 -} - -func (x *FormDataParameterSubSchema) GetExclusiveMinimum() bool { - if x != nil { - return x.ExclusiveMinimum - } - return false -} - -func (x *FormDataParameterSubSchema) GetMaxLength() int64 { - if x != nil { - return x.MaxLength - } - return 0 -} - -func (x *FormDataParameterSubSchema) GetMinLength() int64 { - if x != nil { - return x.MinLength - } - return 0 -} - -func (x *FormDataParameterSubSchema) GetPattern() string { - if x != nil { - return x.Pattern - } - return "" -} - -func (x *FormDataParameterSubSchema) GetMaxItems() int64 { - if x != nil { - return x.MaxItems - } - return 0 -} - -func (x *FormDataParameterSubSchema) GetMinItems() int64 { - if x != nil { - return x.MinItems - } - return 0 -} - -func (x *FormDataParameterSubSchema) GetUniqueItems() bool { - if x != nil { - return x.UniqueItems - } - return false -} - -func (x *FormDataParameterSubSchema) GetEnum() []*Any { - if x != nil { - return x.Enum - } - return nil -} - -func (x *FormDataParameterSubSchema) GetMultipleOf() float64 { - if x != nil { - return x.MultipleOf - } - return 0 -} - -func (x *FormDataParameterSubSchema) GetVendorExtension() []*NamedAny { - if x != nil { - return x.VendorExtension - } - return nil -} - -type Header struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` - Format string `protobuf:"bytes,2,opt,name=format,proto3" json:"format,omitempty"` - Items *PrimitivesItems `protobuf:"bytes,3,opt,name=items,proto3" json:"items,omitempty"` - CollectionFormat string `protobuf:"bytes,4,opt,name=collection_format,json=collectionFormat,proto3" json:"collection_format,omitempty"` - Default *Any `protobuf:"bytes,5,opt,name=default,proto3" json:"default,omitempty"` - Maximum float64 `protobuf:"fixed64,6,opt,name=maximum,proto3" json:"maximum,omitempty"` - ExclusiveMaximum bool `protobuf:"varint,7,opt,name=exclusive_maximum,json=exclusiveMaximum,proto3" json:"exclusive_maximum,omitempty"` - Minimum float64 `protobuf:"fixed64,8,opt,name=minimum,proto3" json:"minimum,omitempty"` - ExclusiveMinimum bool `protobuf:"varint,9,opt,name=exclusive_minimum,json=exclusiveMinimum,proto3" json:"exclusive_minimum,omitempty"` - MaxLength int64 `protobuf:"varint,10,opt,name=max_length,json=maxLength,proto3" json:"max_length,omitempty"` - MinLength int64 `protobuf:"varint,11,opt,name=min_length,json=minLength,proto3" json:"min_length,omitempty"` - Pattern string `protobuf:"bytes,12,opt,name=pattern,proto3" json:"pattern,omitempty"` - MaxItems int64 `protobuf:"varint,13,opt,name=max_items,json=maxItems,proto3" json:"max_items,omitempty"` - MinItems int64 `protobuf:"varint,14,opt,name=min_items,json=minItems,proto3" json:"min_items,omitempty"` - UniqueItems bool `protobuf:"varint,15,opt,name=unique_items,json=uniqueItems,proto3" json:"unique_items,omitempty"` - Enum []*Any `protobuf:"bytes,16,rep,name=enum,proto3" json:"enum,omitempty"` - MultipleOf float64 `protobuf:"fixed64,17,opt,name=multiple_of,json=multipleOf,proto3" json:"multiple_of,omitempty"` - Description string `protobuf:"bytes,18,opt,name=description,proto3" json:"description,omitempty"` - VendorExtension []*NamedAny `protobuf:"bytes,19,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` -} - -func (x *Header) Reset() { - *x = Header{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[13] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Header) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Header) ProtoMessage() {} - -func (x *Header) ProtoReflect() protoreflect.Message { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[13] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Header.ProtoReflect.Descriptor instead. -func (*Header) Descriptor() ([]byte, []int) { - return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{13} -} - -func (x *Header) GetType() string { - if x != nil { - return x.Type - } - return "" -} - -func (x *Header) GetFormat() string { - if x != nil { - return x.Format - } - return "" -} - -func (x *Header) GetItems() *PrimitivesItems { - if x != nil { - return x.Items - } - return nil -} - -func (x *Header) GetCollectionFormat() string { - if x != nil { - return x.CollectionFormat - } - return "" -} - -func (x *Header) GetDefault() *Any { - if x != nil { - return x.Default - } - return nil -} - -func (x *Header) GetMaximum() float64 { - if x != nil { - return x.Maximum - } - return 0 -} - -func (x *Header) GetExclusiveMaximum() bool { - if x != nil { - return x.ExclusiveMaximum - } - return false -} - -func (x *Header) GetMinimum() float64 { - if x != nil { - return x.Minimum - } - return 0 -} - -func (x *Header) GetExclusiveMinimum() bool { - if x != nil { - return x.ExclusiveMinimum - } - return false -} - -func (x *Header) GetMaxLength() int64 { - if x != nil { - return x.MaxLength - } - return 0 -} - -func (x *Header) GetMinLength() int64 { - if x != nil { - return x.MinLength - } - return 0 -} - -func (x *Header) GetPattern() string { - if x != nil { - return x.Pattern - } - return "" -} - -func (x *Header) GetMaxItems() int64 { - if x != nil { - return x.MaxItems - } - return 0 -} - -func (x *Header) GetMinItems() int64 { - if x != nil { - return x.MinItems - } - return 0 -} - -func (x *Header) GetUniqueItems() bool { - if x != nil { - return x.UniqueItems - } - return false -} - -func (x *Header) GetEnum() []*Any { - if x != nil { - return x.Enum - } - return nil -} - -func (x *Header) GetMultipleOf() float64 { - if x != nil { - return x.MultipleOf - } - return 0 -} - -func (x *Header) GetDescription() string { - if x != nil { - return x.Description - } - return "" -} - -func (x *Header) GetVendorExtension() []*NamedAny { - if x != nil { - return x.VendorExtension - } - return nil -} - -type HeaderParameterSubSchema struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Determines whether or not this parameter is required or optional. - Required bool `protobuf:"varint,1,opt,name=required,proto3" json:"required,omitempty"` - // Determines the location of the parameter. - In string `protobuf:"bytes,2,opt,name=in,proto3" json:"in,omitempty"` - // A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed. - Description string `protobuf:"bytes,3,opt,name=description,proto3" json:"description,omitempty"` - // The name of the parameter. - Name string `protobuf:"bytes,4,opt,name=name,proto3" json:"name,omitempty"` - Type string `protobuf:"bytes,5,opt,name=type,proto3" json:"type,omitempty"` - Format string `protobuf:"bytes,6,opt,name=format,proto3" json:"format,omitempty"` - Items *PrimitivesItems `protobuf:"bytes,7,opt,name=items,proto3" json:"items,omitempty"` - CollectionFormat string `protobuf:"bytes,8,opt,name=collection_format,json=collectionFormat,proto3" json:"collection_format,omitempty"` - Default *Any `protobuf:"bytes,9,opt,name=default,proto3" json:"default,omitempty"` - Maximum float64 `protobuf:"fixed64,10,opt,name=maximum,proto3" json:"maximum,omitempty"` - ExclusiveMaximum bool `protobuf:"varint,11,opt,name=exclusive_maximum,json=exclusiveMaximum,proto3" json:"exclusive_maximum,omitempty"` - Minimum float64 `protobuf:"fixed64,12,opt,name=minimum,proto3" json:"minimum,omitempty"` - ExclusiveMinimum bool `protobuf:"varint,13,opt,name=exclusive_minimum,json=exclusiveMinimum,proto3" json:"exclusive_minimum,omitempty"` - MaxLength int64 `protobuf:"varint,14,opt,name=max_length,json=maxLength,proto3" json:"max_length,omitempty"` - MinLength int64 `protobuf:"varint,15,opt,name=min_length,json=minLength,proto3" json:"min_length,omitempty"` - Pattern string `protobuf:"bytes,16,opt,name=pattern,proto3" json:"pattern,omitempty"` - MaxItems int64 `protobuf:"varint,17,opt,name=max_items,json=maxItems,proto3" json:"max_items,omitempty"` - MinItems int64 `protobuf:"varint,18,opt,name=min_items,json=minItems,proto3" json:"min_items,omitempty"` - UniqueItems bool `protobuf:"varint,19,opt,name=unique_items,json=uniqueItems,proto3" json:"unique_items,omitempty"` - Enum []*Any `protobuf:"bytes,20,rep,name=enum,proto3" json:"enum,omitempty"` - MultipleOf float64 `protobuf:"fixed64,21,opt,name=multiple_of,json=multipleOf,proto3" json:"multiple_of,omitempty"` - VendorExtension []*NamedAny `protobuf:"bytes,22,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` -} - -func (x *HeaderParameterSubSchema) Reset() { - *x = HeaderParameterSubSchema{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[14] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *HeaderParameterSubSchema) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*HeaderParameterSubSchema) ProtoMessage() {} - -func (x *HeaderParameterSubSchema) ProtoReflect() protoreflect.Message { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[14] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use HeaderParameterSubSchema.ProtoReflect.Descriptor instead. -func (*HeaderParameterSubSchema) Descriptor() ([]byte, []int) { - return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{14} -} - -func (x *HeaderParameterSubSchema) GetRequired() bool { - if x != nil { - return x.Required - } - return false -} - -func (x *HeaderParameterSubSchema) GetIn() string { - if x != nil { - return x.In - } - return "" -} - -func (x *HeaderParameterSubSchema) GetDescription() string { - if x != nil { - return x.Description - } - return "" -} - -func (x *HeaderParameterSubSchema) GetName() string { - if x != nil { - return x.Name - } - return "" -} - -func (x *HeaderParameterSubSchema) GetType() string { - if x != nil { - return x.Type - } - return "" -} - -func (x *HeaderParameterSubSchema) GetFormat() string { - if x != nil { - return x.Format - } - return "" -} - -func (x *HeaderParameterSubSchema) GetItems() *PrimitivesItems { - if x != nil { - return x.Items - } - return nil -} - -func (x *HeaderParameterSubSchema) GetCollectionFormat() string { - if x != nil { - return x.CollectionFormat - } - return "" -} - -func (x *HeaderParameterSubSchema) GetDefault() *Any { - if x != nil { - return x.Default - } - return nil -} - -func (x *HeaderParameterSubSchema) GetMaximum() float64 { - if x != nil { - return x.Maximum - } - return 0 -} - -func (x *HeaderParameterSubSchema) GetExclusiveMaximum() bool { - if x != nil { - return x.ExclusiveMaximum - } - return false -} - -func (x *HeaderParameterSubSchema) GetMinimum() float64 { - if x != nil { - return x.Minimum - } - return 0 -} - -func (x *HeaderParameterSubSchema) GetExclusiveMinimum() bool { - if x != nil { - return x.ExclusiveMinimum - } - return false -} - -func (x *HeaderParameterSubSchema) GetMaxLength() int64 { - if x != nil { - return x.MaxLength - } - return 0 -} - -func (x *HeaderParameterSubSchema) GetMinLength() int64 { - if x != nil { - return x.MinLength - } - return 0 -} - -func (x *HeaderParameterSubSchema) GetPattern() string { - if x != nil { - return x.Pattern - } - return "" -} - -func (x *HeaderParameterSubSchema) GetMaxItems() int64 { - if x != nil { - return x.MaxItems - } - return 0 -} - -func (x *HeaderParameterSubSchema) GetMinItems() int64 { - if x != nil { - return x.MinItems - } - return 0 -} - -func (x *HeaderParameterSubSchema) GetUniqueItems() bool { - if x != nil { - return x.UniqueItems - } - return false -} - -func (x *HeaderParameterSubSchema) GetEnum() []*Any { - if x != nil { - return x.Enum - } - return nil -} - -func (x *HeaderParameterSubSchema) GetMultipleOf() float64 { - if x != nil { - return x.MultipleOf - } - return 0 -} - -func (x *HeaderParameterSubSchema) GetVendorExtension() []*NamedAny { - if x != nil { - return x.VendorExtension - } - return nil -} - -type Headers struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - AdditionalProperties []*NamedHeader `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties,proto3" json:"additional_properties,omitempty"` -} - -func (x *Headers) Reset() { - *x = Headers{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[15] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Headers) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Headers) ProtoMessage() {} - -func (x *Headers) ProtoReflect() protoreflect.Message { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[15] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Headers.ProtoReflect.Descriptor instead. -func (*Headers) Descriptor() ([]byte, []int) { - return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{15} -} - -func (x *Headers) GetAdditionalProperties() []*NamedHeader { - if x != nil { - return x.AdditionalProperties - } - return nil -} - -// General information about the API. -type Info struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // A unique and precise title of the API. - Title string `protobuf:"bytes,1,opt,name=title,proto3" json:"title,omitempty"` - // A semantic version number of the API. - Version string `protobuf:"bytes,2,opt,name=version,proto3" json:"version,omitempty"` - // A longer description of the API. Should be different from the title. GitHub Flavored Markdown is allowed. - Description string `protobuf:"bytes,3,opt,name=description,proto3" json:"description,omitempty"` - // The terms of service for the API. - TermsOfService string `protobuf:"bytes,4,opt,name=terms_of_service,json=termsOfService,proto3" json:"terms_of_service,omitempty"` - Contact *Contact `protobuf:"bytes,5,opt,name=contact,proto3" json:"contact,omitempty"` - License *License `protobuf:"bytes,6,opt,name=license,proto3" json:"license,omitempty"` - VendorExtension []*NamedAny `protobuf:"bytes,7,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` -} - -func (x *Info) Reset() { - *x = Info{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[16] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Info) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Info) ProtoMessage() {} - -func (x *Info) ProtoReflect() protoreflect.Message { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[16] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Info.ProtoReflect.Descriptor instead. -func (*Info) Descriptor() ([]byte, []int) { - return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{16} -} - -func (x *Info) GetTitle() string { - if x != nil { - return x.Title - } - return "" -} - -func (x *Info) GetVersion() string { - if x != nil { - return x.Version - } - return "" -} - -func (x *Info) GetDescription() string { - if x != nil { - return x.Description - } - return "" -} - -func (x *Info) GetTermsOfService() string { - if x != nil { - return x.TermsOfService - } - return "" -} - -func (x *Info) GetContact() *Contact { - if x != nil { - return x.Contact - } - return nil -} - -func (x *Info) GetLicense() *License { - if x != nil { - return x.License - } - return nil -} - -func (x *Info) GetVendorExtension() []*NamedAny { - if x != nil { - return x.VendorExtension - } - return nil -} - -type ItemsItem struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Schema []*Schema `protobuf:"bytes,1,rep,name=schema,proto3" json:"schema,omitempty"` -} - -func (x *ItemsItem) Reset() { - *x = ItemsItem{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[17] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ItemsItem) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ItemsItem) ProtoMessage() {} - -func (x *ItemsItem) ProtoReflect() protoreflect.Message { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[17] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ItemsItem.ProtoReflect.Descriptor instead. -func (*ItemsItem) Descriptor() ([]byte, []int) { - return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{17} -} - -func (x *ItemsItem) GetSchema() []*Schema { - if x != nil { - return x.Schema - } - return nil -} - -type JsonReference struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - XRef string `protobuf:"bytes,1,opt,name=_ref,json=Ref,proto3" json:"_ref,omitempty"` - Description string `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"` -} - -func (x *JsonReference) Reset() { - *x = JsonReference{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[18] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *JsonReference) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*JsonReference) ProtoMessage() {} - -func (x *JsonReference) ProtoReflect() protoreflect.Message { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[18] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use JsonReference.ProtoReflect.Descriptor instead. -func (*JsonReference) Descriptor() ([]byte, []int) { - return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{18} -} - -func (x *JsonReference) GetXRef() string { - if x != nil { - return x.XRef - } - return "" -} - -func (x *JsonReference) GetDescription() string { - if x != nil { - return x.Description - } - return "" -} - -type License struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The name of the license type. It's encouraged to use an OSI compatible license. - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - // The URL pointing to the license. - Url string `protobuf:"bytes,2,opt,name=url,proto3" json:"url,omitempty"` - VendorExtension []*NamedAny `protobuf:"bytes,3,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` -} - -func (x *License) Reset() { - *x = License{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[19] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *License) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*License) ProtoMessage() {} - -func (x *License) ProtoReflect() protoreflect.Message { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[19] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use License.ProtoReflect.Descriptor instead. -func (*License) Descriptor() ([]byte, []int) { - return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{19} -} - -func (x *License) GetName() string { - if x != nil { - return x.Name - } - return "" -} - -func (x *License) GetUrl() string { - if x != nil { - return x.Url - } - return "" -} - -func (x *License) GetVendorExtension() []*NamedAny { - if x != nil { - return x.VendorExtension - } - return nil -} - -// Automatically-generated message used to represent maps of Any as ordered (name,value) pairs. -type NamedAny struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Map key - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - // Mapped value - Value *Any `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` -} - -func (x *NamedAny) Reset() { - *x = NamedAny{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[20] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *NamedAny) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*NamedAny) ProtoMessage() {} - -func (x *NamedAny) ProtoReflect() protoreflect.Message { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[20] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use NamedAny.ProtoReflect.Descriptor instead. -func (*NamedAny) Descriptor() ([]byte, []int) { - return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{20} -} - -func (x *NamedAny) GetName() string { - if x != nil { - return x.Name - } - return "" -} - -func (x *NamedAny) GetValue() *Any { - if x != nil { - return x.Value - } - return nil -} - -// Automatically-generated message used to represent maps of Header as ordered (name,value) pairs. -type NamedHeader struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Map key - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - // Mapped value - Value *Header `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` -} - -func (x *NamedHeader) Reset() { - *x = NamedHeader{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[21] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *NamedHeader) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*NamedHeader) ProtoMessage() {} - -func (x *NamedHeader) ProtoReflect() protoreflect.Message { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[21] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use NamedHeader.ProtoReflect.Descriptor instead. -func (*NamedHeader) Descriptor() ([]byte, []int) { - return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{21} -} - -func (x *NamedHeader) GetName() string { - if x != nil { - return x.Name - } - return "" -} - -func (x *NamedHeader) GetValue() *Header { - if x != nil { - return x.Value - } - return nil -} - -// Automatically-generated message used to represent maps of Parameter as ordered (name,value) pairs. -type NamedParameter struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Map key - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - // Mapped value - Value *Parameter `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` -} - -func (x *NamedParameter) Reset() { - *x = NamedParameter{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[22] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *NamedParameter) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*NamedParameter) ProtoMessage() {} - -func (x *NamedParameter) ProtoReflect() protoreflect.Message { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[22] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use NamedParameter.ProtoReflect.Descriptor instead. -func (*NamedParameter) Descriptor() ([]byte, []int) { - return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{22} -} - -func (x *NamedParameter) GetName() string { - if x != nil { - return x.Name - } - return "" -} - -func (x *NamedParameter) GetValue() *Parameter { - if x != nil { - return x.Value - } - return nil -} - -// Automatically-generated message used to represent maps of PathItem as ordered (name,value) pairs. -type NamedPathItem struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Map key - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - // Mapped value - Value *PathItem `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` -} - -func (x *NamedPathItem) Reset() { - *x = NamedPathItem{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[23] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *NamedPathItem) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*NamedPathItem) ProtoMessage() {} - -func (x *NamedPathItem) ProtoReflect() protoreflect.Message { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[23] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use NamedPathItem.ProtoReflect.Descriptor instead. -func (*NamedPathItem) Descriptor() ([]byte, []int) { - return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{23} -} - -func (x *NamedPathItem) GetName() string { - if x != nil { - return x.Name - } - return "" -} - -func (x *NamedPathItem) GetValue() *PathItem { - if x != nil { - return x.Value - } - return nil -} - -// Automatically-generated message used to represent maps of Response as ordered (name,value) pairs. -type NamedResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Map key - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - // Mapped value - Value *Response `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` -} - -func (x *NamedResponse) Reset() { - *x = NamedResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[24] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *NamedResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*NamedResponse) ProtoMessage() {} - -func (x *NamedResponse) ProtoReflect() protoreflect.Message { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[24] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use NamedResponse.ProtoReflect.Descriptor instead. -func (*NamedResponse) Descriptor() ([]byte, []int) { - return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{24} -} - -func (x *NamedResponse) GetName() string { - if x != nil { - return x.Name - } - return "" -} - -func (x *NamedResponse) GetValue() *Response { - if x != nil { - return x.Value - } - return nil -} - -// Automatically-generated message used to represent maps of ResponseValue as ordered (name,value) pairs. -type NamedResponseValue struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Map key - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - // Mapped value - Value *ResponseValue `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` -} - -func (x *NamedResponseValue) Reset() { - *x = NamedResponseValue{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[25] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *NamedResponseValue) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*NamedResponseValue) ProtoMessage() {} - -func (x *NamedResponseValue) ProtoReflect() protoreflect.Message { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[25] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use NamedResponseValue.ProtoReflect.Descriptor instead. -func (*NamedResponseValue) Descriptor() ([]byte, []int) { - return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{25} -} - -func (x *NamedResponseValue) GetName() string { - if x != nil { - return x.Name - } - return "" -} - -func (x *NamedResponseValue) GetValue() *ResponseValue { - if x != nil { - return x.Value - } - return nil -} - -// Automatically-generated message used to represent maps of Schema as ordered (name,value) pairs. -type NamedSchema struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Map key - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - // Mapped value - Value *Schema `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` -} - -func (x *NamedSchema) Reset() { - *x = NamedSchema{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[26] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *NamedSchema) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*NamedSchema) ProtoMessage() {} - -func (x *NamedSchema) ProtoReflect() protoreflect.Message { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[26] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use NamedSchema.ProtoReflect.Descriptor instead. -func (*NamedSchema) Descriptor() ([]byte, []int) { - return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{26} -} - -func (x *NamedSchema) GetName() string { - if x != nil { - return x.Name - } - return "" -} - -func (x *NamedSchema) GetValue() *Schema { - if x != nil { - return x.Value - } - return nil -} - -// Automatically-generated message used to represent maps of SecurityDefinitionsItem as ordered (name,value) pairs. -type NamedSecurityDefinitionsItem struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Map key - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - // Mapped value - Value *SecurityDefinitionsItem `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` -} - -func (x *NamedSecurityDefinitionsItem) Reset() { - *x = NamedSecurityDefinitionsItem{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[27] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *NamedSecurityDefinitionsItem) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*NamedSecurityDefinitionsItem) ProtoMessage() {} - -func (x *NamedSecurityDefinitionsItem) ProtoReflect() protoreflect.Message { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[27] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use NamedSecurityDefinitionsItem.ProtoReflect.Descriptor instead. -func (*NamedSecurityDefinitionsItem) Descriptor() ([]byte, []int) { - return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{27} -} - -func (x *NamedSecurityDefinitionsItem) GetName() string { - if x != nil { - return x.Name - } - return "" -} - -func (x *NamedSecurityDefinitionsItem) GetValue() *SecurityDefinitionsItem { - if x != nil { - return x.Value - } - return nil -} - -// Automatically-generated message used to represent maps of string as ordered (name,value) pairs. -type NamedString struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Map key - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - // Mapped value - Value string `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` -} - -func (x *NamedString) Reset() { - *x = NamedString{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[28] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *NamedString) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*NamedString) ProtoMessage() {} - -func (x *NamedString) ProtoReflect() protoreflect.Message { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[28] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use NamedString.ProtoReflect.Descriptor instead. -func (*NamedString) Descriptor() ([]byte, []int) { - return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{28} -} - -func (x *NamedString) GetName() string { - if x != nil { - return x.Name - } - return "" -} - -func (x *NamedString) GetValue() string { - if x != nil { - return x.Value - } - return "" -} - -// Automatically-generated message used to represent maps of StringArray as ordered (name,value) pairs. -type NamedStringArray struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Map key - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - // Mapped value - Value *StringArray `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` -} - -func (x *NamedStringArray) Reset() { - *x = NamedStringArray{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[29] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *NamedStringArray) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*NamedStringArray) ProtoMessage() {} - -func (x *NamedStringArray) ProtoReflect() protoreflect.Message { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[29] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use NamedStringArray.ProtoReflect.Descriptor instead. -func (*NamedStringArray) Descriptor() ([]byte, []int) { - return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{29} -} - -func (x *NamedStringArray) GetName() string { - if x != nil { - return x.Name - } - return "" -} - -func (x *NamedStringArray) GetValue() *StringArray { - if x != nil { - return x.Value - } - return nil -} - -type NonBodyParameter struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Types that are assignable to Oneof: - // *NonBodyParameter_HeaderParameterSubSchema - // *NonBodyParameter_FormDataParameterSubSchema - // *NonBodyParameter_QueryParameterSubSchema - // *NonBodyParameter_PathParameterSubSchema - Oneof isNonBodyParameter_Oneof `protobuf_oneof:"oneof"` -} - -func (x *NonBodyParameter) Reset() { - *x = NonBodyParameter{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[30] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *NonBodyParameter) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*NonBodyParameter) ProtoMessage() {} - -func (x *NonBodyParameter) ProtoReflect() protoreflect.Message { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[30] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use NonBodyParameter.ProtoReflect.Descriptor instead. -func (*NonBodyParameter) Descriptor() ([]byte, []int) { - return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{30} -} - -func (m *NonBodyParameter) GetOneof() isNonBodyParameter_Oneof { - if m != nil { - return m.Oneof - } - return nil -} - -func (x *NonBodyParameter) GetHeaderParameterSubSchema() *HeaderParameterSubSchema { - if x, ok := x.GetOneof().(*NonBodyParameter_HeaderParameterSubSchema); ok { - return x.HeaderParameterSubSchema - } - return nil -} - -func (x *NonBodyParameter) GetFormDataParameterSubSchema() *FormDataParameterSubSchema { - if x, ok := x.GetOneof().(*NonBodyParameter_FormDataParameterSubSchema); ok { - return x.FormDataParameterSubSchema - } - return nil -} - -func (x *NonBodyParameter) GetQueryParameterSubSchema() *QueryParameterSubSchema { - if x, ok := x.GetOneof().(*NonBodyParameter_QueryParameterSubSchema); ok { - return x.QueryParameterSubSchema - } - return nil -} - -func (x *NonBodyParameter) GetPathParameterSubSchema() *PathParameterSubSchema { - if x, ok := x.GetOneof().(*NonBodyParameter_PathParameterSubSchema); ok { - return x.PathParameterSubSchema - } - return nil -} - -type isNonBodyParameter_Oneof interface { - isNonBodyParameter_Oneof() -} - -type NonBodyParameter_HeaderParameterSubSchema struct { - HeaderParameterSubSchema *HeaderParameterSubSchema `protobuf:"bytes,1,opt,name=header_parameter_sub_schema,json=headerParameterSubSchema,proto3,oneof"` -} - -type NonBodyParameter_FormDataParameterSubSchema struct { - FormDataParameterSubSchema *FormDataParameterSubSchema `protobuf:"bytes,2,opt,name=form_data_parameter_sub_schema,json=formDataParameterSubSchema,proto3,oneof"` -} - -type NonBodyParameter_QueryParameterSubSchema struct { - QueryParameterSubSchema *QueryParameterSubSchema `protobuf:"bytes,3,opt,name=query_parameter_sub_schema,json=queryParameterSubSchema,proto3,oneof"` -} - -type NonBodyParameter_PathParameterSubSchema struct { - PathParameterSubSchema *PathParameterSubSchema `protobuf:"bytes,4,opt,name=path_parameter_sub_schema,json=pathParameterSubSchema,proto3,oneof"` -} - -func (*NonBodyParameter_HeaderParameterSubSchema) isNonBodyParameter_Oneof() {} - -func (*NonBodyParameter_FormDataParameterSubSchema) isNonBodyParameter_Oneof() {} - -func (*NonBodyParameter_QueryParameterSubSchema) isNonBodyParameter_Oneof() {} - -func (*NonBodyParameter_PathParameterSubSchema) isNonBodyParameter_Oneof() {} - -type Oauth2AccessCodeSecurity struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` - Flow string `protobuf:"bytes,2,opt,name=flow,proto3" json:"flow,omitempty"` - Scopes *Oauth2Scopes `protobuf:"bytes,3,opt,name=scopes,proto3" json:"scopes,omitempty"` - AuthorizationUrl string `protobuf:"bytes,4,opt,name=authorization_url,json=authorizationUrl,proto3" json:"authorization_url,omitempty"` - TokenUrl string `protobuf:"bytes,5,opt,name=token_url,json=tokenUrl,proto3" json:"token_url,omitempty"` - Description string `protobuf:"bytes,6,opt,name=description,proto3" json:"description,omitempty"` - VendorExtension []*NamedAny `protobuf:"bytes,7,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` -} - -func (x *Oauth2AccessCodeSecurity) Reset() { - *x = Oauth2AccessCodeSecurity{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[31] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Oauth2AccessCodeSecurity) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Oauth2AccessCodeSecurity) ProtoMessage() {} - -func (x *Oauth2AccessCodeSecurity) ProtoReflect() protoreflect.Message { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[31] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Oauth2AccessCodeSecurity.ProtoReflect.Descriptor instead. -func (*Oauth2AccessCodeSecurity) Descriptor() ([]byte, []int) { - return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{31} -} - -func (x *Oauth2AccessCodeSecurity) GetType() string { - if x != nil { - return x.Type - } - return "" -} - -func (x *Oauth2AccessCodeSecurity) GetFlow() string { - if x != nil { - return x.Flow - } - return "" -} - -func (x *Oauth2AccessCodeSecurity) GetScopes() *Oauth2Scopes { - if x != nil { - return x.Scopes - } - return nil -} - -func (x *Oauth2AccessCodeSecurity) GetAuthorizationUrl() string { - if x != nil { - return x.AuthorizationUrl - } - return "" -} - -func (x *Oauth2AccessCodeSecurity) GetTokenUrl() string { - if x != nil { - return x.TokenUrl - } - return "" -} - -func (x *Oauth2AccessCodeSecurity) GetDescription() string { - if x != nil { - return x.Description - } - return "" -} - -func (x *Oauth2AccessCodeSecurity) GetVendorExtension() []*NamedAny { - if x != nil { - return x.VendorExtension - } - return nil -} - -type Oauth2ApplicationSecurity struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` - Flow string `protobuf:"bytes,2,opt,name=flow,proto3" json:"flow,omitempty"` - Scopes *Oauth2Scopes `protobuf:"bytes,3,opt,name=scopes,proto3" json:"scopes,omitempty"` - TokenUrl string `protobuf:"bytes,4,opt,name=token_url,json=tokenUrl,proto3" json:"token_url,omitempty"` - Description string `protobuf:"bytes,5,opt,name=description,proto3" json:"description,omitempty"` - VendorExtension []*NamedAny `protobuf:"bytes,6,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` -} - -func (x *Oauth2ApplicationSecurity) Reset() { - *x = Oauth2ApplicationSecurity{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[32] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Oauth2ApplicationSecurity) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Oauth2ApplicationSecurity) ProtoMessage() {} - -func (x *Oauth2ApplicationSecurity) ProtoReflect() protoreflect.Message { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[32] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Oauth2ApplicationSecurity.ProtoReflect.Descriptor instead. -func (*Oauth2ApplicationSecurity) Descriptor() ([]byte, []int) { - return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{32} -} - -func (x *Oauth2ApplicationSecurity) GetType() string { - if x != nil { - return x.Type - } - return "" -} - -func (x *Oauth2ApplicationSecurity) GetFlow() string { - if x != nil { - return x.Flow - } - return "" -} - -func (x *Oauth2ApplicationSecurity) GetScopes() *Oauth2Scopes { - if x != nil { - return x.Scopes - } - return nil -} - -func (x *Oauth2ApplicationSecurity) GetTokenUrl() string { - if x != nil { - return x.TokenUrl - } - return "" -} - -func (x *Oauth2ApplicationSecurity) GetDescription() string { - if x != nil { - return x.Description - } - return "" -} - -func (x *Oauth2ApplicationSecurity) GetVendorExtension() []*NamedAny { - if x != nil { - return x.VendorExtension - } - return nil -} - -type Oauth2ImplicitSecurity struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` - Flow string `protobuf:"bytes,2,opt,name=flow,proto3" json:"flow,omitempty"` - Scopes *Oauth2Scopes `protobuf:"bytes,3,opt,name=scopes,proto3" json:"scopes,omitempty"` - AuthorizationUrl string `protobuf:"bytes,4,opt,name=authorization_url,json=authorizationUrl,proto3" json:"authorization_url,omitempty"` - Description string `protobuf:"bytes,5,opt,name=description,proto3" json:"description,omitempty"` - VendorExtension []*NamedAny `protobuf:"bytes,6,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` -} - -func (x *Oauth2ImplicitSecurity) Reset() { - *x = Oauth2ImplicitSecurity{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[33] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Oauth2ImplicitSecurity) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Oauth2ImplicitSecurity) ProtoMessage() {} - -func (x *Oauth2ImplicitSecurity) ProtoReflect() protoreflect.Message { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[33] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Oauth2ImplicitSecurity.ProtoReflect.Descriptor instead. -func (*Oauth2ImplicitSecurity) Descriptor() ([]byte, []int) { - return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{33} -} - -func (x *Oauth2ImplicitSecurity) GetType() string { - if x != nil { - return x.Type - } - return "" -} - -func (x *Oauth2ImplicitSecurity) GetFlow() string { - if x != nil { - return x.Flow - } - return "" -} - -func (x *Oauth2ImplicitSecurity) GetScopes() *Oauth2Scopes { - if x != nil { - return x.Scopes - } - return nil -} - -func (x *Oauth2ImplicitSecurity) GetAuthorizationUrl() string { - if x != nil { - return x.AuthorizationUrl - } - return "" -} - -func (x *Oauth2ImplicitSecurity) GetDescription() string { - if x != nil { - return x.Description - } - return "" -} - -func (x *Oauth2ImplicitSecurity) GetVendorExtension() []*NamedAny { - if x != nil { - return x.VendorExtension - } - return nil -} - -type Oauth2PasswordSecurity struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` - Flow string `protobuf:"bytes,2,opt,name=flow,proto3" json:"flow,omitempty"` - Scopes *Oauth2Scopes `protobuf:"bytes,3,opt,name=scopes,proto3" json:"scopes,omitempty"` - TokenUrl string `protobuf:"bytes,4,opt,name=token_url,json=tokenUrl,proto3" json:"token_url,omitempty"` - Description string `protobuf:"bytes,5,opt,name=description,proto3" json:"description,omitempty"` - VendorExtension []*NamedAny `protobuf:"bytes,6,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` -} - -func (x *Oauth2PasswordSecurity) Reset() { - *x = Oauth2PasswordSecurity{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[34] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Oauth2PasswordSecurity) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Oauth2PasswordSecurity) ProtoMessage() {} - -func (x *Oauth2PasswordSecurity) ProtoReflect() protoreflect.Message { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[34] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Oauth2PasswordSecurity.ProtoReflect.Descriptor instead. -func (*Oauth2PasswordSecurity) Descriptor() ([]byte, []int) { - return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{34} -} - -func (x *Oauth2PasswordSecurity) GetType() string { - if x != nil { - return x.Type - } - return "" -} - -func (x *Oauth2PasswordSecurity) GetFlow() string { - if x != nil { - return x.Flow - } - return "" -} - -func (x *Oauth2PasswordSecurity) GetScopes() *Oauth2Scopes { - if x != nil { - return x.Scopes - } - return nil -} - -func (x *Oauth2PasswordSecurity) GetTokenUrl() string { - if x != nil { - return x.TokenUrl - } - return "" -} - -func (x *Oauth2PasswordSecurity) GetDescription() string { - if x != nil { - return x.Description - } - return "" -} - -func (x *Oauth2PasswordSecurity) GetVendorExtension() []*NamedAny { - if x != nil { - return x.VendorExtension - } - return nil -} - -type Oauth2Scopes struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - AdditionalProperties []*NamedString `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties,proto3" json:"additional_properties,omitempty"` -} - -func (x *Oauth2Scopes) Reset() { - *x = Oauth2Scopes{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[35] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Oauth2Scopes) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Oauth2Scopes) ProtoMessage() {} - -func (x *Oauth2Scopes) ProtoReflect() protoreflect.Message { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[35] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Oauth2Scopes.ProtoReflect.Descriptor instead. -func (*Oauth2Scopes) Descriptor() ([]byte, []int) { - return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{35} -} - -func (x *Oauth2Scopes) GetAdditionalProperties() []*NamedString { - if x != nil { - return x.AdditionalProperties - } - return nil -} - -type Operation struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Tags []string `protobuf:"bytes,1,rep,name=tags,proto3" json:"tags,omitempty"` - // A brief summary of the operation. - Summary string `protobuf:"bytes,2,opt,name=summary,proto3" json:"summary,omitempty"` - // A longer description of the operation, GitHub Flavored Markdown is allowed. - Description string `protobuf:"bytes,3,opt,name=description,proto3" json:"description,omitempty"` - ExternalDocs *ExternalDocs `protobuf:"bytes,4,opt,name=external_docs,json=externalDocs,proto3" json:"external_docs,omitempty"` - // A unique identifier of the operation. - OperationId string `protobuf:"bytes,5,opt,name=operation_id,json=operationId,proto3" json:"operation_id,omitempty"` - // A list of MIME types the API can produce. - Produces []string `protobuf:"bytes,6,rep,name=produces,proto3" json:"produces,omitempty"` - // A list of MIME types the API can consume. - Consumes []string `protobuf:"bytes,7,rep,name=consumes,proto3" json:"consumes,omitempty"` - // The parameters needed to send a valid API call. - Parameters []*ParametersItem `protobuf:"bytes,8,rep,name=parameters,proto3" json:"parameters,omitempty"` - Responses *Responses `protobuf:"bytes,9,opt,name=responses,proto3" json:"responses,omitempty"` - // The transfer protocol of the API. - Schemes []string `protobuf:"bytes,10,rep,name=schemes,proto3" json:"schemes,omitempty"` - Deprecated bool `protobuf:"varint,11,opt,name=deprecated,proto3" json:"deprecated,omitempty"` - Security []*SecurityRequirement `protobuf:"bytes,12,rep,name=security,proto3" json:"security,omitempty"` - VendorExtension []*NamedAny `protobuf:"bytes,13,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` -} - -func (x *Operation) Reset() { - *x = Operation{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[36] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Operation) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Operation) ProtoMessage() {} - -func (x *Operation) ProtoReflect() protoreflect.Message { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[36] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Operation.ProtoReflect.Descriptor instead. -func (*Operation) Descriptor() ([]byte, []int) { - return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{36} -} - -func (x *Operation) GetTags() []string { - if x != nil { - return x.Tags - } - return nil -} - -func (x *Operation) GetSummary() string { - if x != nil { - return x.Summary - } - return "" -} - -func (x *Operation) GetDescription() string { - if x != nil { - return x.Description - } - return "" -} - -func (x *Operation) GetExternalDocs() *ExternalDocs { - if x != nil { - return x.ExternalDocs - } - return nil -} - -func (x *Operation) GetOperationId() string { - if x != nil { - return x.OperationId - } - return "" -} - -func (x *Operation) GetProduces() []string { - if x != nil { - return x.Produces - } - return nil -} - -func (x *Operation) GetConsumes() []string { - if x != nil { - return x.Consumes - } - return nil -} - -func (x *Operation) GetParameters() []*ParametersItem { - if x != nil { - return x.Parameters - } - return nil -} - -func (x *Operation) GetResponses() *Responses { - if x != nil { - return x.Responses - } - return nil -} - -func (x *Operation) GetSchemes() []string { - if x != nil { - return x.Schemes - } - return nil -} - -func (x *Operation) GetDeprecated() bool { - if x != nil { - return x.Deprecated - } - return false -} - -func (x *Operation) GetSecurity() []*SecurityRequirement { - if x != nil { - return x.Security - } - return nil -} - -func (x *Operation) GetVendorExtension() []*NamedAny { - if x != nil { - return x.VendorExtension - } - return nil -} - -type Parameter struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Types that are assignable to Oneof: - // *Parameter_BodyParameter - // *Parameter_NonBodyParameter - Oneof isParameter_Oneof `protobuf_oneof:"oneof"` -} - -func (x *Parameter) Reset() { - *x = Parameter{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[37] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Parameter) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Parameter) ProtoMessage() {} - -func (x *Parameter) ProtoReflect() protoreflect.Message { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[37] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Parameter.ProtoReflect.Descriptor instead. -func (*Parameter) Descriptor() ([]byte, []int) { - return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{37} -} - -func (m *Parameter) GetOneof() isParameter_Oneof { - if m != nil { - return m.Oneof - } - return nil -} - -func (x *Parameter) GetBodyParameter() *BodyParameter { - if x, ok := x.GetOneof().(*Parameter_BodyParameter); ok { - return x.BodyParameter - } - return nil -} - -func (x *Parameter) GetNonBodyParameter() *NonBodyParameter { - if x, ok := x.GetOneof().(*Parameter_NonBodyParameter); ok { - return x.NonBodyParameter - } - return nil -} - -type isParameter_Oneof interface { - isParameter_Oneof() -} - -type Parameter_BodyParameter struct { - BodyParameter *BodyParameter `protobuf:"bytes,1,opt,name=body_parameter,json=bodyParameter,proto3,oneof"` -} - -type Parameter_NonBodyParameter struct { - NonBodyParameter *NonBodyParameter `protobuf:"bytes,2,opt,name=non_body_parameter,json=nonBodyParameter,proto3,oneof"` -} - -func (*Parameter_BodyParameter) isParameter_Oneof() {} - -func (*Parameter_NonBodyParameter) isParameter_Oneof() {} - -// One or more JSON representations for parameters -type ParameterDefinitions struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - AdditionalProperties []*NamedParameter `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties,proto3" json:"additional_properties,omitempty"` -} - -func (x *ParameterDefinitions) Reset() { - *x = ParameterDefinitions{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[38] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ParameterDefinitions) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ParameterDefinitions) ProtoMessage() {} - -func (x *ParameterDefinitions) ProtoReflect() protoreflect.Message { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[38] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ParameterDefinitions.ProtoReflect.Descriptor instead. -func (*ParameterDefinitions) Descriptor() ([]byte, []int) { - return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{38} -} - -func (x *ParameterDefinitions) GetAdditionalProperties() []*NamedParameter { - if x != nil { - return x.AdditionalProperties - } - return nil -} - -type ParametersItem struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Types that are assignable to Oneof: - // *ParametersItem_Parameter - // *ParametersItem_JsonReference - Oneof isParametersItem_Oneof `protobuf_oneof:"oneof"` -} - -func (x *ParametersItem) Reset() { - *x = ParametersItem{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[39] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ParametersItem) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ParametersItem) ProtoMessage() {} - -func (x *ParametersItem) ProtoReflect() protoreflect.Message { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[39] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ParametersItem.ProtoReflect.Descriptor instead. -func (*ParametersItem) Descriptor() ([]byte, []int) { - return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{39} -} - -func (m *ParametersItem) GetOneof() isParametersItem_Oneof { - if m != nil { - return m.Oneof - } - return nil -} - -func (x *ParametersItem) GetParameter() *Parameter { - if x, ok := x.GetOneof().(*ParametersItem_Parameter); ok { - return x.Parameter - } - return nil -} - -func (x *ParametersItem) GetJsonReference() *JsonReference { - if x, ok := x.GetOneof().(*ParametersItem_JsonReference); ok { - return x.JsonReference - } - return nil -} - -type isParametersItem_Oneof interface { - isParametersItem_Oneof() -} - -type ParametersItem_Parameter struct { - Parameter *Parameter `protobuf:"bytes,1,opt,name=parameter,proto3,oneof"` -} - -type ParametersItem_JsonReference struct { - JsonReference *JsonReference `protobuf:"bytes,2,opt,name=json_reference,json=jsonReference,proto3,oneof"` -} - -func (*ParametersItem_Parameter) isParametersItem_Oneof() {} - -func (*ParametersItem_JsonReference) isParametersItem_Oneof() {} - -type PathItem struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - XRef string `protobuf:"bytes,1,opt,name=_ref,json=Ref,proto3" json:"_ref,omitempty"` - Get *Operation `protobuf:"bytes,2,opt,name=get,proto3" json:"get,omitempty"` - Put *Operation `protobuf:"bytes,3,opt,name=put,proto3" json:"put,omitempty"` - Post *Operation `protobuf:"bytes,4,opt,name=post,proto3" json:"post,omitempty"` - Delete *Operation `protobuf:"bytes,5,opt,name=delete,proto3" json:"delete,omitempty"` - Options *Operation `protobuf:"bytes,6,opt,name=options,proto3" json:"options,omitempty"` - Head *Operation `protobuf:"bytes,7,opt,name=head,proto3" json:"head,omitempty"` - Patch *Operation `protobuf:"bytes,8,opt,name=patch,proto3" json:"patch,omitempty"` - // The parameters needed to send a valid API call. - Parameters []*ParametersItem `protobuf:"bytes,9,rep,name=parameters,proto3" json:"parameters,omitempty"` - VendorExtension []*NamedAny `protobuf:"bytes,10,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` -} - -func (x *PathItem) Reset() { - *x = PathItem{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[40] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *PathItem) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*PathItem) ProtoMessage() {} - -func (x *PathItem) ProtoReflect() protoreflect.Message { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[40] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use PathItem.ProtoReflect.Descriptor instead. -func (*PathItem) Descriptor() ([]byte, []int) { - return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{40} -} - -func (x *PathItem) GetXRef() string { - if x != nil { - return x.XRef - } - return "" -} - -func (x *PathItem) GetGet() *Operation { - if x != nil { - return x.Get - } - return nil -} - -func (x *PathItem) GetPut() *Operation { - if x != nil { - return x.Put - } - return nil -} - -func (x *PathItem) GetPost() *Operation { - if x != nil { - return x.Post - } - return nil -} - -func (x *PathItem) GetDelete() *Operation { - if x != nil { - return x.Delete - } - return nil -} - -func (x *PathItem) GetOptions() *Operation { - if x != nil { - return x.Options - } - return nil -} - -func (x *PathItem) GetHead() *Operation { - if x != nil { - return x.Head - } - return nil -} - -func (x *PathItem) GetPatch() *Operation { - if x != nil { - return x.Patch - } - return nil -} - -func (x *PathItem) GetParameters() []*ParametersItem { - if x != nil { - return x.Parameters - } - return nil -} - -func (x *PathItem) GetVendorExtension() []*NamedAny { - if x != nil { - return x.VendorExtension - } - return nil -} - -type PathParameterSubSchema struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Determines whether or not this parameter is required or optional. - Required bool `protobuf:"varint,1,opt,name=required,proto3" json:"required,omitempty"` - // Determines the location of the parameter. - In string `protobuf:"bytes,2,opt,name=in,proto3" json:"in,omitempty"` - // A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed. - Description string `protobuf:"bytes,3,opt,name=description,proto3" json:"description,omitempty"` - // The name of the parameter. - Name string `protobuf:"bytes,4,opt,name=name,proto3" json:"name,omitempty"` - Type string `protobuf:"bytes,5,opt,name=type,proto3" json:"type,omitempty"` - Format string `protobuf:"bytes,6,opt,name=format,proto3" json:"format,omitempty"` - Items *PrimitivesItems `protobuf:"bytes,7,opt,name=items,proto3" json:"items,omitempty"` - CollectionFormat string `protobuf:"bytes,8,opt,name=collection_format,json=collectionFormat,proto3" json:"collection_format,omitempty"` - Default *Any `protobuf:"bytes,9,opt,name=default,proto3" json:"default,omitempty"` - Maximum float64 `protobuf:"fixed64,10,opt,name=maximum,proto3" json:"maximum,omitempty"` - ExclusiveMaximum bool `protobuf:"varint,11,opt,name=exclusive_maximum,json=exclusiveMaximum,proto3" json:"exclusive_maximum,omitempty"` - Minimum float64 `protobuf:"fixed64,12,opt,name=minimum,proto3" json:"minimum,omitempty"` - ExclusiveMinimum bool `protobuf:"varint,13,opt,name=exclusive_minimum,json=exclusiveMinimum,proto3" json:"exclusive_minimum,omitempty"` - MaxLength int64 `protobuf:"varint,14,opt,name=max_length,json=maxLength,proto3" json:"max_length,omitempty"` - MinLength int64 `protobuf:"varint,15,opt,name=min_length,json=minLength,proto3" json:"min_length,omitempty"` - Pattern string `protobuf:"bytes,16,opt,name=pattern,proto3" json:"pattern,omitempty"` - MaxItems int64 `protobuf:"varint,17,opt,name=max_items,json=maxItems,proto3" json:"max_items,omitempty"` - MinItems int64 `protobuf:"varint,18,opt,name=min_items,json=minItems,proto3" json:"min_items,omitempty"` - UniqueItems bool `protobuf:"varint,19,opt,name=unique_items,json=uniqueItems,proto3" json:"unique_items,omitempty"` - Enum []*Any `protobuf:"bytes,20,rep,name=enum,proto3" json:"enum,omitempty"` - MultipleOf float64 `protobuf:"fixed64,21,opt,name=multiple_of,json=multipleOf,proto3" json:"multiple_of,omitempty"` - VendorExtension []*NamedAny `protobuf:"bytes,22,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` -} - -func (x *PathParameterSubSchema) Reset() { - *x = PathParameterSubSchema{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[41] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *PathParameterSubSchema) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*PathParameterSubSchema) ProtoMessage() {} - -func (x *PathParameterSubSchema) ProtoReflect() protoreflect.Message { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[41] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use PathParameterSubSchema.ProtoReflect.Descriptor instead. -func (*PathParameterSubSchema) Descriptor() ([]byte, []int) { - return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{41} -} - -func (x *PathParameterSubSchema) GetRequired() bool { - if x != nil { - return x.Required - } - return false -} - -func (x *PathParameterSubSchema) GetIn() string { - if x != nil { - return x.In - } - return "" -} - -func (x *PathParameterSubSchema) GetDescription() string { - if x != nil { - return x.Description - } - return "" -} - -func (x *PathParameterSubSchema) GetName() string { - if x != nil { - return x.Name - } - return "" -} - -func (x *PathParameterSubSchema) GetType() string { - if x != nil { - return x.Type - } - return "" -} - -func (x *PathParameterSubSchema) GetFormat() string { - if x != nil { - return x.Format - } - return "" -} - -func (x *PathParameterSubSchema) GetItems() *PrimitivesItems { - if x != nil { - return x.Items - } - return nil -} - -func (x *PathParameterSubSchema) GetCollectionFormat() string { - if x != nil { - return x.CollectionFormat - } - return "" -} - -func (x *PathParameterSubSchema) GetDefault() *Any { - if x != nil { - return x.Default - } - return nil -} - -func (x *PathParameterSubSchema) GetMaximum() float64 { - if x != nil { - return x.Maximum - } - return 0 -} - -func (x *PathParameterSubSchema) GetExclusiveMaximum() bool { - if x != nil { - return x.ExclusiveMaximum - } - return false -} - -func (x *PathParameterSubSchema) GetMinimum() float64 { - if x != nil { - return x.Minimum - } - return 0 -} - -func (x *PathParameterSubSchema) GetExclusiveMinimum() bool { - if x != nil { - return x.ExclusiveMinimum - } - return false -} - -func (x *PathParameterSubSchema) GetMaxLength() int64 { - if x != nil { - return x.MaxLength - } - return 0 -} - -func (x *PathParameterSubSchema) GetMinLength() int64 { - if x != nil { - return x.MinLength - } - return 0 -} - -func (x *PathParameterSubSchema) GetPattern() string { - if x != nil { - return x.Pattern - } - return "" -} - -func (x *PathParameterSubSchema) GetMaxItems() int64 { - if x != nil { - return x.MaxItems - } - return 0 -} - -func (x *PathParameterSubSchema) GetMinItems() int64 { - if x != nil { - return x.MinItems - } - return 0 -} - -func (x *PathParameterSubSchema) GetUniqueItems() bool { - if x != nil { - return x.UniqueItems - } - return false -} - -func (x *PathParameterSubSchema) GetEnum() []*Any { - if x != nil { - return x.Enum - } - return nil -} - -func (x *PathParameterSubSchema) GetMultipleOf() float64 { - if x != nil { - return x.MultipleOf - } - return 0 -} - -func (x *PathParameterSubSchema) GetVendorExtension() []*NamedAny { - if x != nil { - return x.VendorExtension - } - return nil -} - -// Relative paths to the individual endpoints. They must be relative to the 'basePath'. -type Paths struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - VendorExtension []*NamedAny `protobuf:"bytes,1,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` - Path []*NamedPathItem `protobuf:"bytes,2,rep,name=path,proto3" json:"path,omitempty"` -} - -func (x *Paths) Reset() { - *x = Paths{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[42] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Paths) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Paths) ProtoMessage() {} - -func (x *Paths) ProtoReflect() protoreflect.Message { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[42] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Paths.ProtoReflect.Descriptor instead. -func (*Paths) Descriptor() ([]byte, []int) { - return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{42} -} - -func (x *Paths) GetVendorExtension() []*NamedAny { - if x != nil { - return x.VendorExtension - } - return nil -} - -func (x *Paths) GetPath() []*NamedPathItem { - if x != nil { - return x.Path - } - return nil -} - -type PrimitivesItems struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` - Format string `protobuf:"bytes,2,opt,name=format,proto3" json:"format,omitempty"` - Items *PrimitivesItems `protobuf:"bytes,3,opt,name=items,proto3" json:"items,omitempty"` - CollectionFormat string `protobuf:"bytes,4,opt,name=collection_format,json=collectionFormat,proto3" json:"collection_format,omitempty"` - Default *Any `protobuf:"bytes,5,opt,name=default,proto3" json:"default,omitempty"` - Maximum float64 `protobuf:"fixed64,6,opt,name=maximum,proto3" json:"maximum,omitempty"` - ExclusiveMaximum bool `protobuf:"varint,7,opt,name=exclusive_maximum,json=exclusiveMaximum,proto3" json:"exclusive_maximum,omitempty"` - Minimum float64 `protobuf:"fixed64,8,opt,name=minimum,proto3" json:"minimum,omitempty"` - ExclusiveMinimum bool `protobuf:"varint,9,opt,name=exclusive_minimum,json=exclusiveMinimum,proto3" json:"exclusive_minimum,omitempty"` - MaxLength int64 `protobuf:"varint,10,opt,name=max_length,json=maxLength,proto3" json:"max_length,omitempty"` - MinLength int64 `protobuf:"varint,11,opt,name=min_length,json=minLength,proto3" json:"min_length,omitempty"` - Pattern string `protobuf:"bytes,12,opt,name=pattern,proto3" json:"pattern,omitempty"` - MaxItems int64 `protobuf:"varint,13,opt,name=max_items,json=maxItems,proto3" json:"max_items,omitempty"` - MinItems int64 `protobuf:"varint,14,opt,name=min_items,json=minItems,proto3" json:"min_items,omitempty"` - UniqueItems bool `protobuf:"varint,15,opt,name=unique_items,json=uniqueItems,proto3" json:"unique_items,omitempty"` - Enum []*Any `protobuf:"bytes,16,rep,name=enum,proto3" json:"enum,omitempty"` - MultipleOf float64 `protobuf:"fixed64,17,opt,name=multiple_of,json=multipleOf,proto3" json:"multiple_of,omitempty"` - VendorExtension []*NamedAny `protobuf:"bytes,18,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` -} - -func (x *PrimitivesItems) Reset() { - *x = PrimitivesItems{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[43] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *PrimitivesItems) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*PrimitivesItems) ProtoMessage() {} - -func (x *PrimitivesItems) ProtoReflect() protoreflect.Message { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[43] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use PrimitivesItems.ProtoReflect.Descriptor instead. -func (*PrimitivesItems) Descriptor() ([]byte, []int) { - return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{43} -} - -func (x *PrimitivesItems) GetType() string { - if x != nil { - return x.Type - } - return "" -} - -func (x *PrimitivesItems) GetFormat() string { - if x != nil { - return x.Format - } - return "" -} - -func (x *PrimitivesItems) GetItems() *PrimitivesItems { - if x != nil { - return x.Items - } - return nil -} - -func (x *PrimitivesItems) GetCollectionFormat() string { - if x != nil { - return x.CollectionFormat - } - return "" -} - -func (x *PrimitivesItems) GetDefault() *Any { - if x != nil { - return x.Default - } - return nil -} - -func (x *PrimitivesItems) GetMaximum() float64 { - if x != nil { - return x.Maximum - } - return 0 -} - -func (x *PrimitivesItems) GetExclusiveMaximum() bool { - if x != nil { - return x.ExclusiveMaximum - } - return false -} - -func (x *PrimitivesItems) GetMinimum() float64 { - if x != nil { - return x.Minimum - } - return 0 -} - -func (x *PrimitivesItems) GetExclusiveMinimum() bool { - if x != nil { - return x.ExclusiveMinimum - } - return false -} - -func (x *PrimitivesItems) GetMaxLength() int64 { - if x != nil { - return x.MaxLength - } - return 0 -} - -func (x *PrimitivesItems) GetMinLength() int64 { - if x != nil { - return x.MinLength - } - return 0 -} - -func (x *PrimitivesItems) GetPattern() string { - if x != nil { - return x.Pattern - } - return "" -} - -func (x *PrimitivesItems) GetMaxItems() int64 { - if x != nil { - return x.MaxItems - } - return 0 -} - -func (x *PrimitivesItems) GetMinItems() int64 { - if x != nil { - return x.MinItems - } - return 0 -} - -func (x *PrimitivesItems) GetUniqueItems() bool { - if x != nil { - return x.UniqueItems - } - return false -} - -func (x *PrimitivesItems) GetEnum() []*Any { - if x != nil { - return x.Enum - } - return nil -} - -func (x *PrimitivesItems) GetMultipleOf() float64 { - if x != nil { - return x.MultipleOf - } - return 0 -} - -func (x *PrimitivesItems) GetVendorExtension() []*NamedAny { - if x != nil { - return x.VendorExtension - } - return nil -} - -type Properties struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - AdditionalProperties []*NamedSchema `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties,proto3" json:"additional_properties,omitempty"` -} - -func (x *Properties) Reset() { - *x = Properties{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[44] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Properties) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Properties) ProtoMessage() {} - -func (x *Properties) ProtoReflect() protoreflect.Message { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[44] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Properties.ProtoReflect.Descriptor instead. -func (*Properties) Descriptor() ([]byte, []int) { - return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{44} -} - -func (x *Properties) GetAdditionalProperties() []*NamedSchema { - if x != nil { - return x.AdditionalProperties - } - return nil -} - -type QueryParameterSubSchema struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Determines whether or not this parameter is required or optional. - Required bool `protobuf:"varint,1,opt,name=required,proto3" json:"required,omitempty"` - // Determines the location of the parameter. - In string `protobuf:"bytes,2,opt,name=in,proto3" json:"in,omitempty"` - // A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed. - Description string `protobuf:"bytes,3,opt,name=description,proto3" json:"description,omitempty"` - // The name of the parameter. - Name string `protobuf:"bytes,4,opt,name=name,proto3" json:"name,omitempty"` - // allows sending a parameter by name only or with an empty value. - AllowEmptyValue bool `protobuf:"varint,5,opt,name=allow_empty_value,json=allowEmptyValue,proto3" json:"allow_empty_value,omitempty"` - Type string `protobuf:"bytes,6,opt,name=type,proto3" json:"type,omitempty"` - Format string `protobuf:"bytes,7,opt,name=format,proto3" json:"format,omitempty"` - Items *PrimitivesItems `protobuf:"bytes,8,opt,name=items,proto3" json:"items,omitempty"` - CollectionFormat string `protobuf:"bytes,9,opt,name=collection_format,json=collectionFormat,proto3" json:"collection_format,omitempty"` - Default *Any `protobuf:"bytes,10,opt,name=default,proto3" json:"default,omitempty"` - Maximum float64 `protobuf:"fixed64,11,opt,name=maximum,proto3" json:"maximum,omitempty"` - ExclusiveMaximum bool `protobuf:"varint,12,opt,name=exclusive_maximum,json=exclusiveMaximum,proto3" json:"exclusive_maximum,omitempty"` - Minimum float64 `protobuf:"fixed64,13,opt,name=minimum,proto3" json:"minimum,omitempty"` - ExclusiveMinimum bool `protobuf:"varint,14,opt,name=exclusive_minimum,json=exclusiveMinimum,proto3" json:"exclusive_minimum,omitempty"` - MaxLength int64 `protobuf:"varint,15,opt,name=max_length,json=maxLength,proto3" json:"max_length,omitempty"` - MinLength int64 `protobuf:"varint,16,opt,name=min_length,json=minLength,proto3" json:"min_length,omitempty"` - Pattern string `protobuf:"bytes,17,opt,name=pattern,proto3" json:"pattern,omitempty"` - MaxItems int64 `protobuf:"varint,18,opt,name=max_items,json=maxItems,proto3" json:"max_items,omitempty"` - MinItems int64 `protobuf:"varint,19,opt,name=min_items,json=minItems,proto3" json:"min_items,omitempty"` - UniqueItems bool `protobuf:"varint,20,opt,name=unique_items,json=uniqueItems,proto3" json:"unique_items,omitempty"` - Enum []*Any `protobuf:"bytes,21,rep,name=enum,proto3" json:"enum,omitempty"` - MultipleOf float64 `protobuf:"fixed64,22,opt,name=multiple_of,json=multipleOf,proto3" json:"multiple_of,omitempty"` - VendorExtension []*NamedAny `protobuf:"bytes,23,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` -} - -func (x *QueryParameterSubSchema) Reset() { - *x = QueryParameterSubSchema{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[45] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *QueryParameterSubSchema) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*QueryParameterSubSchema) ProtoMessage() {} - -func (x *QueryParameterSubSchema) ProtoReflect() protoreflect.Message { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[45] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use QueryParameterSubSchema.ProtoReflect.Descriptor instead. -func (*QueryParameterSubSchema) Descriptor() ([]byte, []int) { - return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{45} -} - -func (x *QueryParameterSubSchema) GetRequired() bool { - if x != nil { - return x.Required - } - return false -} - -func (x *QueryParameterSubSchema) GetIn() string { - if x != nil { - return x.In - } - return "" -} - -func (x *QueryParameterSubSchema) GetDescription() string { - if x != nil { - return x.Description - } - return "" -} - -func (x *QueryParameterSubSchema) GetName() string { - if x != nil { - return x.Name - } - return "" -} - -func (x *QueryParameterSubSchema) GetAllowEmptyValue() bool { - if x != nil { - return x.AllowEmptyValue - } - return false -} - -func (x *QueryParameterSubSchema) GetType() string { - if x != nil { - return x.Type - } - return "" -} - -func (x *QueryParameterSubSchema) GetFormat() string { - if x != nil { - return x.Format - } - return "" -} - -func (x *QueryParameterSubSchema) GetItems() *PrimitivesItems { - if x != nil { - return x.Items - } - return nil -} - -func (x *QueryParameterSubSchema) GetCollectionFormat() string { - if x != nil { - return x.CollectionFormat - } - return "" -} - -func (x *QueryParameterSubSchema) GetDefault() *Any { - if x != nil { - return x.Default - } - return nil -} - -func (x *QueryParameterSubSchema) GetMaximum() float64 { - if x != nil { - return x.Maximum - } - return 0 -} - -func (x *QueryParameterSubSchema) GetExclusiveMaximum() bool { - if x != nil { - return x.ExclusiveMaximum - } - return false -} - -func (x *QueryParameterSubSchema) GetMinimum() float64 { - if x != nil { - return x.Minimum - } - return 0 -} - -func (x *QueryParameterSubSchema) GetExclusiveMinimum() bool { - if x != nil { - return x.ExclusiveMinimum - } - return false -} - -func (x *QueryParameterSubSchema) GetMaxLength() int64 { - if x != nil { - return x.MaxLength - } - return 0 -} - -func (x *QueryParameterSubSchema) GetMinLength() int64 { - if x != nil { - return x.MinLength - } - return 0 -} - -func (x *QueryParameterSubSchema) GetPattern() string { - if x != nil { - return x.Pattern - } - return "" -} - -func (x *QueryParameterSubSchema) GetMaxItems() int64 { - if x != nil { - return x.MaxItems - } - return 0 -} - -func (x *QueryParameterSubSchema) GetMinItems() int64 { - if x != nil { - return x.MinItems - } - return 0 -} - -func (x *QueryParameterSubSchema) GetUniqueItems() bool { - if x != nil { - return x.UniqueItems - } - return false -} - -func (x *QueryParameterSubSchema) GetEnum() []*Any { - if x != nil { - return x.Enum - } - return nil -} - -func (x *QueryParameterSubSchema) GetMultipleOf() float64 { - if x != nil { - return x.MultipleOf - } - return 0 -} - -func (x *QueryParameterSubSchema) GetVendorExtension() []*NamedAny { - if x != nil { - return x.VendorExtension - } - return nil -} - -type Response struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Description string `protobuf:"bytes,1,opt,name=description,proto3" json:"description,omitempty"` - Schema *SchemaItem `protobuf:"bytes,2,opt,name=schema,proto3" json:"schema,omitempty"` - Headers *Headers `protobuf:"bytes,3,opt,name=headers,proto3" json:"headers,omitempty"` - Examples *Examples `protobuf:"bytes,4,opt,name=examples,proto3" json:"examples,omitempty"` - VendorExtension []*NamedAny `protobuf:"bytes,5,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` -} - -func (x *Response) Reset() { - *x = Response{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[46] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Response) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Response) ProtoMessage() {} - -func (x *Response) ProtoReflect() protoreflect.Message { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[46] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Response.ProtoReflect.Descriptor instead. -func (*Response) Descriptor() ([]byte, []int) { - return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{46} -} - -func (x *Response) GetDescription() string { - if x != nil { - return x.Description - } - return "" -} - -func (x *Response) GetSchema() *SchemaItem { - if x != nil { - return x.Schema - } - return nil -} - -func (x *Response) GetHeaders() *Headers { - if x != nil { - return x.Headers - } - return nil -} - -func (x *Response) GetExamples() *Examples { - if x != nil { - return x.Examples - } - return nil -} - -func (x *Response) GetVendorExtension() []*NamedAny { - if x != nil { - return x.VendorExtension - } - return nil -} - -// One or more JSON representations for responses -type ResponseDefinitions struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - AdditionalProperties []*NamedResponse `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties,proto3" json:"additional_properties,omitempty"` -} - -func (x *ResponseDefinitions) Reset() { - *x = ResponseDefinitions{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[47] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ResponseDefinitions) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ResponseDefinitions) ProtoMessage() {} - -func (x *ResponseDefinitions) ProtoReflect() protoreflect.Message { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[47] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ResponseDefinitions.ProtoReflect.Descriptor instead. -func (*ResponseDefinitions) Descriptor() ([]byte, []int) { - return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{47} -} - -func (x *ResponseDefinitions) GetAdditionalProperties() []*NamedResponse { - if x != nil { - return x.AdditionalProperties - } - return nil -} - -type ResponseValue struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Types that are assignable to Oneof: - // *ResponseValue_Response - // *ResponseValue_JsonReference - Oneof isResponseValue_Oneof `protobuf_oneof:"oneof"` -} - -func (x *ResponseValue) Reset() { - *x = ResponseValue{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[48] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ResponseValue) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ResponseValue) ProtoMessage() {} - -func (x *ResponseValue) ProtoReflect() protoreflect.Message { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[48] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ResponseValue.ProtoReflect.Descriptor instead. -func (*ResponseValue) Descriptor() ([]byte, []int) { - return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{48} -} - -func (m *ResponseValue) GetOneof() isResponseValue_Oneof { - if m != nil { - return m.Oneof - } - return nil -} - -func (x *ResponseValue) GetResponse() *Response { - if x, ok := x.GetOneof().(*ResponseValue_Response); ok { - return x.Response - } - return nil -} - -func (x *ResponseValue) GetJsonReference() *JsonReference { - if x, ok := x.GetOneof().(*ResponseValue_JsonReference); ok { - return x.JsonReference - } - return nil -} - -type isResponseValue_Oneof interface { - isResponseValue_Oneof() -} - -type ResponseValue_Response struct { - Response *Response `protobuf:"bytes,1,opt,name=response,proto3,oneof"` -} - -type ResponseValue_JsonReference struct { - JsonReference *JsonReference `protobuf:"bytes,2,opt,name=json_reference,json=jsonReference,proto3,oneof"` -} - -func (*ResponseValue_Response) isResponseValue_Oneof() {} - -func (*ResponseValue_JsonReference) isResponseValue_Oneof() {} - -// Response objects names can either be any valid HTTP status code or 'default'. -type Responses struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - ResponseCode []*NamedResponseValue `protobuf:"bytes,1,rep,name=response_code,json=responseCode,proto3" json:"response_code,omitempty"` - VendorExtension []*NamedAny `protobuf:"bytes,2,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` -} - -func (x *Responses) Reset() { - *x = Responses{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[49] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Responses) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Responses) ProtoMessage() {} - -func (x *Responses) ProtoReflect() protoreflect.Message { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[49] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Responses.ProtoReflect.Descriptor instead. -func (*Responses) Descriptor() ([]byte, []int) { - return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{49} -} - -func (x *Responses) GetResponseCode() []*NamedResponseValue { - if x != nil { - return x.ResponseCode - } - return nil -} - -func (x *Responses) GetVendorExtension() []*NamedAny { - if x != nil { - return x.VendorExtension - } - return nil -} - -// A deterministic version of a JSON Schema object. -type Schema struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - XRef string `protobuf:"bytes,1,opt,name=_ref,json=Ref,proto3" json:"_ref,omitempty"` - Format string `protobuf:"bytes,2,opt,name=format,proto3" json:"format,omitempty"` - Title string `protobuf:"bytes,3,opt,name=title,proto3" json:"title,omitempty"` - Description string `protobuf:"bytes,4,opt,name=description,proto3" json:"description,omitempty"` - Default *Any `protobuf:"bytes,5,opt,name=default,proto3" json:"default,omitempty"` - MultipleOf float64 `protobuf:"fixed64,6,opt,name=multiple_of,json=multipleOf,proto3" json:"multiple_of,omitempty"` - Maximum float64 `protobuf:"fixed64,7,opt,name=maximum,proto3" json:"maximum,omitempty"` - ExclusiveMaximum bool `protobuf:"varint,8,opt,name=exclusive_maximum,json=exclusiveMaximum,proto3" json:"exclusive_maximum,omitempty"` - Minimum float64 `protobuf:"fixed64,9,opt,name=minimum,proto3" json:"minimum,omitempty"` - ExclusiveMinimum bool `protobuf:"varint,10,opt,name=exclusive_minimum,json=exclusiveMinimum,proto3" json:"exclusive_minimum,omitempty"` - MaxLength int64 `protobuf:"varint,11,opt,name=max_length,json=maxLength,proto3" json:"max_length,omitempty"` - MinLength int64 `protobuf:"varint,12,opt,name=min_length,json=minLength,proto3" json:"min_length,omitempty"` - Pattern string `protobuf:"bytes,13,opt,name=pattern,proto3" json:"pattern,omitempty"` - MaxItems int64 `protobuf:"varint,14,opt,name=max_items,json=maxItems,proto3" json:"max_items,omitempty"` - MinItems int64 `protobuf:"varint,15,opt,name=min_items,json=minItems,proto3" json:"min_items,omitempty"` - UniqueItems bool `protobuf:"varint,16,opt,name=unique_items,json=uniqueItems,proto3" json:"unique_items,omitempty"` - MaxProperties int64 `protobuf:"varint,17,opt,name=max_properties,json=maxProperties,proto3" json:"max_properties,omitempty"` - MinProperties int64 `protobuf:"varint,18,opt,name=min_properties,json=minProperties,proto3" json:"min_properties,omitempty"` - Required []string `protobuf:"bytes,19,rep,name=required,proto3" json:"required,omitempty"` - Enum []*Any `protobuf:"bytes,20,rep,name=enum,proto3" json:"enum,omitempty"` - AdditionalProperties *AdditionalPropertiesItem `protobuf:"bytes,21,opt,name=additional_properties,json=additionalProperties,proto3" json:"additional_properties,omitempty"` - Type *TypeItem `protobuf:"bytes,22,opt,name=type,proto3" json:"type,omitempty"` - Items *ItemsItem `protobuf:"bytes,23,opt,name=items,proto3" json:"items,omitempty"` - AllOf []*Schema `protobuf:"bytes,24,rep,name=all_of,json=allOf,proto3" json:"all_of,omitempty"` - Properties *Properties `protobuf:"bytes,25,opt,name=properties,proto3" json:"properties,omitempty"` - Discriminator string `protobuf:"bytes,26,opt,name=discriminator,proto3" json:"discriminator,omitempty"` - ReadOnly bool `protobuf:"varint,27,opt,name=read_only,json=readOnly,proto3" json:"read_only,omitempty"` - Xml *Xml `protobuf:"bytes,28,opt,name=xml,proto3" json:"xml,omitempty"` - ExternalDocs *ExternalDocs `protobuf:"bytes,29,opt,name=external_docs,json=externalDocs,proto3" json:"external_docs,omitempty"` - Example *Any `protobuf:"bytes,30,opt,name=example,proto3" json:"example,omitempty"` - VendorExtension []*NamedAny `protobuf:"bytes,31,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` -} - -func (x *Schema) Reset() { - *x = Schema{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[50] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Schema) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Schema) ProtoMessage() {} - -func (x *Schema) ProtoReflect() protoreflect.Message { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[50] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Schema.ProtoReflect.Descriptor instead. -func (*Schema) Descriptor() ([]byte, []int) { - return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{50} -} - -func (x *Schema) GetXRef() string { - if x != nil { - return x.XRef - } - return "" -} - -func (x *Schema) GetFormat() string { - if x != nil { - return x.Format - } - return "" -} - -func (x *Schema) GetTitle() string { - if x != nil { - return x.Title - } - return "" -} - -func (x *Schema) GetDescription() string { - if x != nil { - return x.Description - } - return "" -} - -func (x *Schema) GetDefault() *Any { - if x != nil { - return x.Default - } - return nil -} - -func (x *Schema) GetMultipleOf() float64 { - if x != nil { - return x.MultipleOf - } - return 0 -} - -func (x *Schema) GetMaximum() float64 { - if x != nil { - return x.Maximum - } - return 0 -} - -func (x *Schema) GetExclusiveMaximum() bool { - if x != nil { - return x.ExclusiveMaximum - } - return false -} - -func (x *Schema) GetMinimum() float64 { - if x != nil { - return x.Minimum - } - return 0 -} - -func (x *Schema) GetExclusiveMinimum() bool { - if x != nil { - return x.ExclusiveMinimum - } - return false -} - -func (x *Schema) GetMaxLength() int64 { - if x != nil { - return x.MaxLength - } - return 0 -} - -func (x *Schema) GetMinLength() int64 { - if x != nil { - return x.MinLength - } - return 0 -} - -func (x *Schema) GetPattern() string { - if x != nil { - return x.Pattern - } - return "" -} - -func (x *Schema) GetMaxItems() int64 { - if x != nil { - return x.MaxItems - } - return 0 -} - -func (x *Schema) GetMinItems() int64 { - if x != nil { - return x.MinItems - } - return 0 -} - -func (x *Schema) GetUniqueItems() bool { - if x != nil { - return x.UniqueItems - } - return false -} - -func (x *Schema) GetMaxProperties() int64 { - if x != nil { - return x.MaxProperties - } - return 0 -} - -func (x *Schema) GetMinProperties() int64 { - if x != nil { - return x.MinProperties - } - return 0 -} - -func (x *Schema) GetRequired() []string { - if x != nil { - return x.Required - } - return nil -} - -func (x *Schema) GetEnum() []*Any { - if x != nil { - return x.Enum - } - return nil -} - -func (x *Schema) GetAdditionalProperties() *AdditionalPropertiesItem { - if x != nil { - return x.AdditionalProperties - } - return nil -} - -func (x *Schema) GetType() *TypeItem { - if x != nil { - return x.Type - } - return nil -} - -func (x *Schema) GetItems() *ItemsItem { - if x != nil { - return x.Items - } - return nil -} - -func (x *Schema) GetAllOf() []*Schema { - if x != nil { - return x.AllOf - } - return nil -} - -func (x *Schema) GetProperties() *Properties { - if x != nil { - return x.Properties - } - return nil -} - -func (x *Schema) GetDiscriminator() string { - if x != nil { - return x.Discriminator - } - return "" -} - -func (x *Schema) GetReadOnly() bool { - if x != nil { - return x.ReadOnly - } - return false -} - -func (x *Schema) GetXml() *Xml { - if x != nil { - return x.Xml - } - return nil -} - -func (x *Schema) GetExternalDocs() *ExternalDocs { - if x != nil { - return x.ExternalDocs - } - return nil -} - -func (x *Schema) GetExample() *Any { - if x != nil { - return x.Example - } - return nil -} - -func (x *Schema) GetVendorExtension() []*NamedAny { - if x != nil { - return x.VendorExtension - } - return nil -} - -type SchemaItem struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Types that are assignable to Oneof: - // *SchemaItem_Schema - // *SchemaItem_FileSchema - Oneof isSchemaItem_Oneof `protobuf_oneof:"oneof"` -} - -func (x *SchemaItem) Reset() { - *x = SchemaItem{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[51] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *SchemaItem) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*SchemaItem) ProtoMessage() {} - -func (x *SchemaItem) ProtoReflect() protoreflect.Message { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[51] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use SchemaItem.ProtoReflect.Descriptor instead. -func (*SchemaItem) Descriptor() ([]byte, []int) { - return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{51} -} - -func (m *SchemaItem) GetOneof() isSchemaItem_Oneof { - if m != nil { - return m.Oneof - } - return nil -} - -func (x *SchemaItem) GetSchema() *Schema { - if x, ok := x.GetOneof().(*SchemaItem_Schema); ok { - return x.Schema - } - return nil -} - -func (x *SchemaItem) GetFileSchema() *FileSchema { - if x, ok := x.GetOneof().(*SchemaItem_FileSchema); ok { - return x.FileSchema - } - return nil -} - -type isSchemaItem_Oneof interface { - isSchemaItem_Oneof() -} - -type SchemaItem_Schema struct { - Schema *Schema `protobuf:"bytes,1,opt,name=schema,proto3,oneof"` -} - -type SchemaItem_FileSchema struct { - FileSchema *FileSchema `protobuf:"bytes,2,opt,name=file_schema,json=fileSchema,proto3,oneof"` -} - -func (*SchemaItem_Schema) isSchemaItem_Oneof() {} - -func (*SchemaItem_FileSchema) isSchemaItem_Oneof() {} - -type SecurityDefinitions struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - AdditionalProperties []*NamedSecurityDefinitionsItem `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties,proto3" json:"additional_properties,omitempty"` -} - -func (x *SecurityDefinitions) Reset() { - *x = SecurityDefinitions{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[52] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *SecurityDefinitions) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*SecurityDefinitions) ProtoMessage() {} - -func (x *SecurityDefinitions) ProtoReflect() protoreflect.Message { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[52] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use SecurityDefinitions.ProtoReflect.Descriptor instead. -func (*SecurityDefinitions) Descriptor() ([]byte, []int) { - return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{52} -} - -func (x *SecurityDefinitions) GetAdditionalProperties() []*NamedSecurityDefinitionsItem { - if x != nil { - return x.AdditionalProperties - } - return nil -} - -type SecurityDefinitionsItem struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Types that are assignable to Oneof: - // *SecurityDefinitionsItem_BasicAuthenticationSecurity - // *SecurityDefinitionsItem_ApiKeySecurity - // *SecurityDefinitionsItem_Oauth2ImplicitSecurity - // *SecurityDefinitionsItem_Oauth2PasswordSecurity - // *SecurityDefinitionsItem_Oauth2ApplicationSecurity - // *SecurityDefinitionsItem_Oauth2AccessCodeSecurity - Oneof isSecurityDefinitionsItem_Oneof `protobuf_oneof:"oneof"` -} - -func (x *SecurityDefinitionsItem) Reset() { - *x = SecurityDefinitionsItem{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[53] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *SecurityDefinitionsItem) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*SecurityDefinitionsItem) ProtoMessage() {} - -func (x *SecurityDefinitionsItem) ProtoReflect() protoreflect.Message { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[53] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use SecurityDefinitionsItem.ProtoReflect.Descriptor instead. -func (*SecurityDefinitionsItem) Descriptor() ([]byte, []int) { - return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{53} -} - -func (m *SecurityDefinitionsItem) GetOneof() isSecurityDefinitionsItem_Oneof { - if m != nil { - return m.Oneof - } - return nil -} - -func (x *SecurityDefinitionsItem) GetBasicAuthenticationSecurity() *BasicAuthenticationSecurity { - if x, ok := x.GetOneof().(*SecurityDefinitionsItem_BasicAuthenticationSecurity); ok { - return x.BasicAuthenticationSecurity - } - return nil -} - -func (x *SecurityDefinitionsItem) GetApiKeySecurity() *ApiKeySecurity { - if x, ok := x.GetOneof().(*SecurityDefinitionsItem_ApiKeySecurity); ok { - return x.ApiKeySecurity - } - return nil -} - -func (x *SecurityDefinitionsItem) GetOauth2ImplicitSecurity() *Oauth2ImplicitSecurity { - if x, ok := x.GetOneof().(*SecurityDefinitionsItem_Oauth2ImplicitSecurity); ok { - return x.Oauth2ImplicitSecurity - } - return nil -} - -func (x *SecurityDefinitionsItem) GetOauth2PasswordSecurity() *Oauth2PasswordSecurity { - if x, ok := x.GetOneof().(*SecurityDefinitionsItem_Oauth2PasswordSecurity); ok { - return x.Oauth2PasswordSecurity - } - return nil -} - -func (x *SecurityDefinitionsItem) GetOauth2ApplicationSecurity() *Oauth2ApplicationSecurity { - if x, ok := x.GetOneof().(*SecurityDefinitionsItem_Oauth2ApplicationSecurity); ok { - return x.Oauth2ApplicationSecurity - } - return nil -} - -func (x *SecurityDefinitionsItem) GetOauth2AccessCodeSecurity() *Oauth2AccessCodeSecurity { - if x, ok := x.GetOneof().(*SecurityDefinitionsItem_Oauth2AccessCodeSecurity); ok { - return x.Oauth2AccessCodeSecurity - } - return nil -} - -type isSecurityDefinitionsItem_Oneof interface { - isSecurityDefinitionsItem_Oneof() -} - -type SecurityDefinitionsItem_BasicAuthenticationSecurity struct { - BasicAuthenticationSecurity *BasicAuthenticationSecurity `protobuf:"bytes,1,opt,name=basic_authentication_security,json=basicAuthenticationSecurity,proto3,oneof"` -} - -type SecurityDefinitionsItem_ApiKeySecurity struct { - ApiKeySecurity *ApiKeySecurity `protobuf:"bytes,2,opt,name=api_key_security,json=apiKeySecurity,proto3,oneof"` -} - -type SecurityDefinitionsItem_Oauth2ImplicitSecurity struct { - Oauth2ImplicitSecurity *Oauth2ImplicitSecurity `protobuf:"bytes,3,opt,name=oauth2_implicit_security,json=oauth2ImplicitSecurity,proto3,oneof"` -} - -type SecurityDefinitionsItem_Oauth2PasswordSecurity struct { - Oauth2PasswordSecurity *Oauth2PasswordSecurity `protobuf:"bytes,4,opt,name=oauth2_password_security,json=oauth2PasswordSecurity,proto3,oneof"` -} - -type SecurityDefinitionsItem_Oauth2ApplicationSecurity struct { - Oauth2ApplicationSecurity *Oauth2ApplicationSecurity `protobuf:"bytes,5,opt,name=oauth2_application_security,json=oauth2ApplicationSecurity,proto3,oneof"` -} - -type SecurityDefinitionsItem_Oauth2AccessCodeSecurity struct { - Oauth2AccessCodeSecurity *Oauth2AccessCodeSecurity `protobuf:"bytes,6,opt,name=oauth2_access_code_security,json=oauth2AccessCodeSecurity,proto3,oneof"` -} - -func (*SecurityDefinitionsItem_BasicAuthenticationSecurity) isSecurityDefinitionsItem_Oneof() {} - -func (*SecurityDefinitionsItem_ApiKeySecurity) isSecurityDefinitionsItem_Oneof() {} - -func (*SecurityDefinitionsItem_Oauth2ImplicitSecurity) isSecurityDefinitionsItem_Oneof() {} - -func (*SecurityDefinitionsItem_Oauth2PasswordSecurity) isSecurityDefinitionsItem_Oneof() {} - -func (*SecurityDefinitionsItem_Oauth2ApplicationSecurity) isSecurityDefinitionsItem_Oneof() {} - -func (*SecurityDefinitionsItem_Oauth2AccessCodeSecurity) isSecurityDefinitionsItem_Oneof() {} - -type SecurityRequirement struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - AdditionalProperties []*NamedStringArray `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties,proto3" json:"additional_properties,omitempty"` -} - -func (x *SecurityRequirement) Reset() { - *x = SecurityRequirement{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[54] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *SecurityRequirement) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*SecurityRequirement) ProtoMessage() {} - -func (x *SecurityRequirement) ProtoReflect() protoreflect.Message { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[54] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use SecurityRequirement.ProtoReflect.Descriptor instead. -func (*SecurityRequirement) Descriptor() ([]byte, []int) { - return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{54} -} - -func (x *SecurityRequirement) GetAdditionalProperties() []*NamedStringArray { - if x != nil { - return x.AdditionalProperties - } - return nil -} - -type StringArray struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Value []string `protobuf:"bytes,1,rep,name=value,proto3" json:"value,omitempty"` -} - -func (x *StringArray) Reset() { - *x = StringArray{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[55] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *StringArray) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*StringArray) ProtoMessage() {} - -func (x *StringArray) ProtoReflect() protoreflect.Message { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[55] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use StringArray.ProtoReflect.Descriptor instead. -func (*StringArray) Descriptor() ([]byte, []int) { - return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{55} -} - -func (x *StringArray) GetValue() []string { - if x != nil { - return x.Value - } - return nil -} - -type Tag struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - Description string `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"` - ExternalDocs *ExternalDocs `protobuf:"bytes,3,opt,name=external_docs,json=externalDocs,proto3" json:"external_docs,omitempty"` - VendorExtension []*NamedAny `protobuf:"bytes,4,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` -} - -func (x *Tag) Reset() { - *x = Tag{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[56] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Tag) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Tag) ProtoMessage() {} - -func (x *Tag) ProtoReflect() protoreflect.Message { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[56] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Tag.ProtoReflect.Descriptor instead. -func (*Tag) Descriptor() ([]byte, []int) { - return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{56} -} - -func (x *Tag) GetName() string { - if x != nil { - return x.Name - } - return "" -} - -func (x *Tag) GetDescription() string { - if x != nil { - return x.Description - } - return "" -} - -func (x *Tag) GetExternalDocs() *ExternalDocs { - if x != nil { - return x.ExternalDocs - } - return nil -} - -func (x *Tag) GetVendorExtension() []*NamedAny { - if x != nil { - return x.VendorExtension - } - return nil -} - -type TypeItem struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Value []string `protobuf:"bytes,1,rep,name=value,proto3" json:"value,omitempty"` -} - -func (x *TypeItem) Reset() { - *x = TypeItem{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[57] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *TypeItem) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*TypeItem) ProtoMessage() {} - -func (x *TypeItem) ProtoReflect() protoreflect.Message { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[57] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use TypeItem.ProtoReflect.Descriptor instead. -func (*TypeItem) Descriptor() ([]byte, []int) { - return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{57} -} - -func (x *TypeItem) GetValue() []string { - if x != nil { - return x.Value - } - return nil -} - -// Any property starting with x- is valid. -type VendorExtension struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - AdditionalProperties []*NamedAny `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties,proto3" json:"additional_properties,omitempty"` -} - -func (x *VendorExtension) Reset() { - *x = VendorExtension{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[58] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *VendorExtension) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*VendorExtension) ProtoMessage() {} - -func (x *VendorExtension) ProtoReflect() protoreflect.Message { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[58] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use VendorExtension.ProtoReflect.Descriptor instead. -func (*VendorExtension) Descriptor() ([]byte, []int) { - return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{58} -} - -func (x *VendorExtension) GetAdditionalProperties() []*NamedAny { - if x != nil { - return x.AdditionalProperties - } - return nil -} - -type Xml struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - Namespace string `protobuf:"bytes,2,opt,name=namespace,proto3" json:"namespace,omitempty"` - Prefix string `protobuf:"bytes,3,opt,name=prefix,proto3" json:"prefix,omitempty"` - Attribute bool `protobuf:"varint,4,opt,name=attribute,proto3" json:"attribute,omitempty"` - Wrapped bool `protobuf:"varint,5,opt,name=wrapped,proto3" json:"wrapped,omitempty"` - VendorExtension []*NamedAny `protobuf:"bytes,6,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` -} - -func (x *Xml) Reset() { - *x = Xml{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[59] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Xml) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Xml) ProtoMessage() {} - -func (x *Xml) ProtoReflect() protoreflect.Message { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[59] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Xml.ProtoReflect.Descriptor instead. -func (*Xml) Descriptor() ([]byte, []int) { - return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{59} -} - -func (x *Xml) GetName() string { - if x != nil { - return x.Name - } - return "" -} - -func (x *Xml) GetNamespace() string { - if x != nil { - return x.Namespace - } - return "" -} - -func (x *Xml) GetPrefix() string { - if x != nil { - return x.Prefix - } - return "" -} - -func (x *Xml) GetAttribute() bool { - if x != nil { - return x.Attribute - } - return false -} - -func (x *Xml) GetWrapped() bool { - if x != nil { - return x.Wrapped - } - return false -} - -func (x *Xml) GetVendorExtension() []*NamedAny { - if x != nil { - return x.VendorExtension - } - return nil -} - -var File_openapiv2_OpenAPIv2_proto protoreflect.FileDescriptor - -var file_openapiv2_OpenAPIv2_proto_rawDesc = []byte{ - 0x0a, 0x19, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2f, 0x4f, 0x70, 0x65, 0x6e, - 0x41, 0x50, 0x49, 0x76, 0x32, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0a, 0x6f, 0x70, 0x65, - 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x22, 0x6d, 0x0a, 0x18, 0x41, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, - 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x49, 0x74, 0x65, 0x6d, 0x12, 0x2c, - 0x0a, 0x06, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, - 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x63, 0x68, 0x65, - 0x6d, 0x61, 0x48, 0x00, 0x52, 0x06, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x1a, 0x0a, 0x07, - 0x62, 0x6f, 0x6f, 0x6c, 0x65, 0x61, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x48, 0x00, 0x52, - 0x07, 0x62, 0x6f, 0x6f, 0x6c, 0x65, 0x61, 0x6e, 0x42, 0x07, 0x0a, 0x05, 0x6f, 0x6e, 0x65, 0x6f, - 0x66, 0x22, 0x45, 0x0a, 0x03, 0x41, 0x6e, 0x79, 0x12, 0x2a, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, - 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x05, 0x76, - 0x61, 0x6c, 0x75, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x79, 0x61, 0x6d, 0x6c, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x04, 0x79, 0x61, 0x6d, 0x6c, 0x22, 0xab, 0x01, 0x0a, 0x0e, 0x41, 0x70, 0x69, - 0x4b, 0x65, 0x79, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x12, 0x12, 0x0a, 0x04, 0x74, - 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, - 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, - 0x61, 0x6d, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x02, 0x69, 0x6e, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, - 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, - 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x3f, 0x0a, 0x10, 0x76, 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x5f, - 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, - 0x14, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x61, 0x6d, - 0x65, 0x64, 0x41, 0x6e, 0x79, 0x52, 0x0f, 0x76, 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x45, 0x78, 0x74, - 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0x94, 0x01, 0x0a, 0x1b, 0x42, 0x61, 0x73, 0x69, 0x63, - 0x41, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x65, - 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, - 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x3f, 0x0a, 0x10, - 0x76, 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, - 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, - 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x41, 0x6e, 0x79, 0x52, 0x0f, 0x76, 0x65, - 0x6e, 0x64, 0x6f, 0x72, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0xde, 0x01, - 0x0a, 0x0d, 0x42, 0x6f, 0x64, 0x79, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x12, - 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, - 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x02, 0x69, 0x6e, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, - 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, - 0x64, 0x12, 0x2a, 0x0a, 0x06, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x18, 0x05, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x12, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x53, - 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x06, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x3f, 0x0a, - 0x10, 0x76, 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, - 0x6e, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, - 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x41, 0x6e, 0x79, 0x52, 0x0f, 0x76, - 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0x86, - 0x01, 0x0a, 0x07, 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x63, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, - 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x10, - 0x0a, 0x03, 0x75, 0x72, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x75, 0x72, 0x6c, - 0x12, 0x14, 0x0a, 0x05, 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x05, 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x12, 0x3f, 0x0a, 0x10, 0x76, 0x65, 0x6e, 0x64, 0x6f, 0x72, - 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, - 0x32, 0x14, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x61, - 0x6d, 0x65, 0x64, 0x41, 0x6e, 0x79, 0x52, 0x0f, 0x76, 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x45, 0x78, - 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0x54, 0x0a, 0x07, 0x44, 0x65, 0x66, 0x61, 0x75, - 0x6c, 0x74, 0x12, 0x49, 0x0a, 0x15, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, - 0x5f, 0x70, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, - 0x0b, 0x32, 0x14, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4e, - 0x61, 0x6d, 0x65, 0x64, 0x41, 0x6e, 0x79, 0x52, 0x14, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, - 0x6e, 0x61, 0x6c, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x22, 0x5b, 0x0a, - 0x0b, 0x44, 0x65, 0x66, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x4c, 0x0a, 0x15, - 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x5f, 0x70, 0x72, 0x6f, 0x70, 0x65, - 0x72, 0x74, 0x69, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x6f, 0x70, - 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x53, 0x63, - 0x68, 0x65, 0x6d, 0x61, 0x52, 0x14, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, - 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x22, 0xe8, 0x05, 0x0a, 0x08, 0x44, - 0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x73, 0x77, 0x61, 0x67, 0x67, - 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x73, 0x77, 0x61, 0x67, 0x67, 0x65, - 0x72, 0x12, 0x24, 0x0a, 0x04, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x10, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x49, 0x6e, 0x66, - 0x6f, 0x52, 0x04, 0x69, 0x6e, 0x66, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x68, 0x6f, 0x73, 0x74, 0x18, - 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x68, 0x6f, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x62, - 0x61, 0x73, 0x65, 0x5f, 0x70, 0x61, 0x74, 0x68, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, - 0x62, 0x61, 0x73, 0x65, 0x50, 0x61, 0x74, 0x68, 0x12, 0x18, 0x0a, 0x07, 0x73, 0x63, 0x68, 0x65, - 0x6d, 0x65, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, 0x73, 0x63, 0x68, 0x65, 0x6d, - 0x65, 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6d, 0x65, 0x73, 0x18, 0x06, - 0x20, 0x03, 0x28, 0x09, 0x52, 0x08, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6d, 0x65, 0x73, 0x12, 0x1a, - 0x0a, 0x08, 0x70, 0x72, 0x6f, 0x64, 0x75, 0x63, 0x65, 0x73, 0x18, 0x07, 0x20, 0x03, 0x28, 0x09, - 0x52, 0x08, 0x70, 0x72, 0x6f, 0x64, 0x75, 0x63, 0x65, 0x73, 0x12, 0x27, 0x0a, 0x05, 0x70, 0x61, - 0x74, 0x68, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x6f, 0x70, 0x65, 0x6e, - 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x50, 0x61, 0x74, 0x68, 0x73, 0x52, 0x05, 0x70, 0x61, - 0x74, 0x68, 0x73, 0x12, 0x39, 0x0a, 0x0b, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x6f, - 0x6e, 0x73, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, - 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x44, 0x65, 0x66, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x6f, 0x6e, - 0x73, 0x52, 0x0b, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x40, - 0x0a, 0x0a, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x18, 0x0a, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, - 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x44, 0x65, 0x66, 0x69, 0x6e, 0x69, 0x74, - 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x0a, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, - 0x12, 0x3d, 0x0a, 0x09, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x73, 0x18, 0x0b, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, - 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x44, 0x65, 0x66, 0x69, 0x6e, 0x69, 0x74, - 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x09, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x73, 0x12, - 0x3b, 0x0a, 0x08, 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x18, 0x0c, 0x20, 0x03, 0x28, - 0x0b, 0x32, 0x1f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x53, - 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x52, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x6d, 0x65, - 0x6e, 0x74, 0x52, 0x08, 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x12, 0x52, 0x0a, 0x14, - 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x5f, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x69, 0x74, - 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x6f, 0x70, 0x65, - 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, - 0x44, 0x65, 0x66, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x13, 0x73, 0x65, 0x63, - 0x75, 0x72, 0x69, 0x74, 0x79, 0x44, 0x65, 0x66, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, - 0x12, 0x23, 0x0a, 0x04, 0x74, 0x61, 0x67, 0x73, 0x18, 0x0e, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0f, - 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x54, 0x61, 0x67, 0x52, - 0x04, 0x74, 0x61, 0x67, 0x73, 0x12, 0x3d, 0x0a, 0x0d, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, - 0x6c, 0x5f, 0x64, 0x6f, 0x63, 0x73, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x6f, - 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x72, 0x6e, - 0x61, 0x6c, 0x44, 0x6f, 0x63, 0x73, 0x52, 0x0c, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, - 0x44, 0x6f, 0x63, 0x73, 0x12, 0x3f, 0x0a, 0x10, 0x76, 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x5f, 0x65, - 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x10, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, - 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x61, 0x6d, 0x65, - 0x64, 0x41, 0x6e, 0x79, 0x52, 0x0f, 0x76, 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x45, 0x78, 0x74, 0x65, - 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0x55, 0x0a, 0x08, 0x45, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, - 0x73, 0x12, 0x49, 0x0a, 0x15, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x5f, - 0x70, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, - 0x32, 0x14, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x61, - 0x6d, 0x65, 0x64, 0x41, 0x6e, 0x79, 0x52, 0x14, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, - 0x61, 0x6c, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x22, 0x83, 0x01, 0x0a, - 0x0c, 0x45, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x44, 0x6f, 0x63, 0x73, 0x12, 0x20, 0x0a, - 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, - 0x10, 0x0a, 0x03, 0x75, 0x72, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x75, 0x72, - 0x6c, 0x12, 0x3f, 0x0a, 0x10, 0x76, 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x5f, 0x65, 0x78, 0x74, 0x65, - 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6f, 0x70, - 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x41, 0x6e, - 0x79, 0x52, 0x0f, 0x76, 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, - 0x6f, 0x6e, 0x22, 0xff, 0x02, 0x0a, 0x0a, 0x46, 0x69, 0x6c, 0x65, 0x53, 0x63, 0x68, 0x65, 0x6d, - 0x61, 0x12, 0x16, 0x0a, 0x06, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x06, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x74, 0x69, 0x74, - 0x6c, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x74, 0x69, 0x74, 0x6c, 0x65, 0x12, - 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, - 0x6e, 0x12, 0x29, 0x0a, 0x07, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x18, 0x04, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, - 0x41, 0x6e, 0x79, 0x52, 0x07, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x12, 0x1a, 0x0a, 0x08, - 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x18, 0x05, 0x20, 0x03, 0x28, 0x09, 0x52, 0x08, - 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, - 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x1b, 0x0a, 0x09, - 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6f, 0x6e, 0x6c, 0x79, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, - 0x08, 0x72, 0x65, 0x61, 0x64, 0x4f, 0x6e, 0x6c, 0x79, 0x12, 0x3d, 0x0a, 0x0d, 0x65, 0x78, 0x74, - 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x5f, 0x64, 0x6f, 0x63, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x18, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x45, 0x78, - 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x44, 0x6f, 0x63, 0x73, 0x52, 0x0c, 0x65, 0x78, 0x74, 0x65, - 0x72, 0x6e, 0x61, 0x6c, 0x44, 0x6f, 0x63, 0x73, 0x12, 0x29, 0x0a, 0x07, 0x65, 0x78, 0x61, 0x6d, - 0x70, 0x6c, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, - 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x07, 0x65, 0x78, 0x61, 0x6d, - 0x70, 0x6c, 0x65, 0x12, 0x3f, 0x0a, 0x10, 0x76, 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x5f, 0x65, 0x78, - 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x0a, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, - 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, - 0x41, 0x6e, 0x79, 0x52, 0x0f, 0x76, 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x45, 0x78, 0x74, 0x65, 0x6e, - 0x73, 0x69, 0x6f, 0x6e, 0x22, 0xab, 0x06, 0x0a, 0x1a, 0x46, 0x6f, 0x72, 0x6d, 0x44, 0x61, 0x74, - 0x61, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x53, 0x75, 0x62, 0x53, 0x63, 0x68, - 0x65, 0x6d, 0x61, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x12, - 0x0e, 0x0a, 0x02, 0x69, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x6e, 0x12, - 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, - 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x2a, 0x0a, 0x11, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x5f, 0x65, - 0x6d, 0x70, 0x74, 0x79, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, - 0x52, 0x0f, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x56, 0x61, 0x6c, 0x75, - 0x65, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, - 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, 0x31, 0x0a, - 0x05, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x6f, - 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x50, 0x72, 0x69, 0x6d, 0x69, 0x74, - 0x69, 0x76, 0x65, 0x73, 0x49, 0x74, 0x65, 0x6d, 0x73, 0x52, 0x05, 0x69, 0x74, 0x65, 0x6d, 0x73, - 0x12, 0x2b, 0x0a, 0x11, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x66, - 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x63, 0x6f, 0x6c, - 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, 0x29, 0x0a, - 0x07, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, - 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x41, 0x6e, 0x79, 0x52, - 0x07, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x61, 0x78, 0x69, - 0x6d, 0x75, 0x6d, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x01, 0x52, 0x07, 0x6d, 0x61, 0x78, 0x69, 0x6d, - 0x75, 0x6d, 0x12, 0x2b, 0x0a, 0x11, 0x65, 0x78, 0x63, 0x6c, 0x75, 0x73, 0x69, 0x76, 0x65, 0x5f, - 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x65, - 0x78, 0x63, 0x6c, 0x75, 0x73, 0x69, 0x76, 0x65, 0x4d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x12, - 0x18, 0x0a, 0x07, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x01, - 0x52, 0x07, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x12, 0x2b, 0x0a, 0x11, 0x65, 0x78, 0x63, - 0x6c, 0x75, 0x73, 0x69, 0x76, 0x65, 0x5f, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x18, 0x0e, - 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x65, 0x78, 0x63, 0x6c, 0x75, 0x73, 0x69, 0x76, 0x65, 0x4d, - 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x12, 0x1d, 0x0a, 0x0a, 0x6d, 0x61, 0x78, 0x5f, 0x6c, 0x65, - 0x6e, 0x67, 0x74, 0x68, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x6d, 0x61, 0x78, 0x4c, - 0x65, 0x6e, 0x67, 0x74, 0x68, 0x12, 0x1d, 0x0a, 0x0a, 0x6d, 0x69, 0x6e, 0x5f, 0x6c, 0x65, 0x6e, - 0x67, 0x74, 0x68, 0x18, 0x10, 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x6d, 0x69, 0x6e, 0x4c, 0x65, - 0x6e, 0x67, 0x74, 0x68, 0x12, 0x18, 0x0a, 0x07, 0x70, 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e, 0x18, - 0x11, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x70, 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e, 0x12, 0x1b, - 0x0a, 0x09, 0x6d, 0x61, 0x78, 0x5f, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x18, 0x12, 0x20, 0x01, 0x28, - 0x03, 0x52, 0x08, 0x6d, 0x61, 0x78, 0x49, 0x74, 0x65, 0x6d, 0x73, 0x12, 0x1b, 0x0a, 0x09, 0x6d, - 0x69, 0x6e, 0x5f, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x18, 0x13, 0x20, 0x01, 0x28, 0x03, 0x52, 0x08, - 0x6d, 0x69, 0x6e, 0x49, 0x74, 0x65, 0x6d, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x75, 0x6e, 0x69, 0x71, - 0x75, 0x65, 0x5f, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x18, 0x14, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, - 0x75, 0x6e, 0x69, 0x71, 0x75, 0x65, 0x49, 0x74, 0x65, 0x6d, 0x73, 0x12, 0x23, 0x0a, 0x04, 0x65, - 0x6e, 0x75, 0x6d, 0x18, 0x15, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, - 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x04, 0x65, 0x6e, 0x75, 0x6d, - 0x12, 0x1f, 0x0a, 0x0b, 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x65, 0x5f, 0x6f, 0x66, 0x18, - 0x16, 0x20, 0x01, 0x28, 0x01, 0x52, 0x0a, 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x65, 0x4f, - 0x66, 0x12, 0x3f, 0x0a, 0x10, 0x76, 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x5f, 0x65, 0x78, 0x74, 0x65, - 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x17, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6f, 0x70, - 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x41, 0x6e, - 0x79, 0x52, 0x0f, 0x76, 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, - 0x6f, 0x6e, 0x22, 0xab, 0x05, 0x0a, 0x06, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x12, 0x12, 0x0a, - 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, - 0x65, 0x12, 0x16, 0x0a, 0x06, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x06, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, 0x31, 0x0a, 0x05, 0x69, 0x74, 0x65, - 0x6d, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, - 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x50, 0x72, 0x69, 0x6d, 0x69, 0x74, 0x69, 0x76, 0x65, 0x73, - 0x49, 0x74, 0x65, 0x6d, 0x73, 0x52, 0x05, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x12, 0x2b, 0x0a, 0x11, - 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x66, 0x6f, 0x72, 0x6d, 0x61, - 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, - 0x69, 0x6f, 0x6e, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, 0x29, 0x0a, 0x07, 0x64, 0x65, 0x66, - 0x61, 0x75, 0x6c, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x6f, 0x70, 0x65, - 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x07, 0x64, 0x65, 0x66, - 0x61, 0x75, 0x6c, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x18, - 0x06, 0x20, 0x01, 0x28, 0x01, 0x52, 0x07, 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x12, 0x2b, - 0x0a, 0x11, 0x65, 0x78, 0x63, 0x6c, 0x75, 0x73, 0x69, 0x76, 0x65, 0x5f, 0x6d, 0x61, 0x78, 0x69, - 0x6d, 0x75, 0x6d, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x65, 0x78, 0x63, 0x6c, 0x75, - 0x73, 0x69, 0x76, 0x65, 0x4d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x12, 0x18, 0x0a, 0x07, 0x6d, - 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x18, 0x08, 0x20, 0x01, 0x28, 0x01, 0x52, 0x07, 0x6d, 0x69, - 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x12, 0x2b, 0x0a, 0x11, 0x65, 0x78, 0x63, 0x6c, 0x75, 0x73, 0x69, - 0x76, 0x65, 0x5f, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x18, 0x09, 0x20, 0x01, 0x28, 0x08, - 0x52, 0x10, 0x65, 0x78, 0x63, 0x6c, 0x75, 0x73, 0x69, 0x76, 0x65, 0x4d, 0x69, 0x6e, 0x69, 0x6d, - 0x75, 0x6d, 0x12, 0x1d, 0x0a, 0x0a, 0x6d, 0x61, 0x78, 0x5f, 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, - 0x18, 0x0a, 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x6d, 0x61, 0x78, 0x4c, 0x65, 0x6e, 0x67, 0x74, - 0x68, 0x12, 0x1d, 0x0a, 0x0a, 0x6d, 0x69, 0x6e, 0x5f, 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x18, - 0x0b, 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x6d, 0x69, 0x6e, 0x4c, 0x65, 0x6e, 0x67, 0x74, 0x68, - 0x12, 0x18, 0x0a, 0x07, 0x70, 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e, 0x18, 0x0c, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x07, 0x70, 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e, 0x12, 0x1b, 0x0a, 0x09, 0x6d, 0x61, - 0x78, 0x5f, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x03, 0x52, 0x08, 0x6d, - 0x61, 0x78, 0x49, 0x74, 0x65, 0x6d, 0x73, 0x12, 0x1b, 0x0a, 0x09, 0x6d, 0x69, 0x6e, 0x5f, 0x69, - 0x74, 0x65, 0x6d, 0x73, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x03, 0x52, 0x08, 0x6d, 0x69, 0x6e, 0x49, - 0x74, 0x65, 0x6d, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x75, 0x6e, 0x69, 0x71, 0x75, 0x65, 0x5f, 0x69, - 0x74, 0x65, 0x6d, 0x73, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x75, 0x6e, 0x69, 0x71, - 0x75, 0x65, 0x49, 0x74, 0x65, 0x6d, 0x73, 0x12, 0x23, 0x0a, 0x04, 0x65, 0x6e, 0x75, 0x6d, 0x18, - 0x10, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, - 0x76, 0x32, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x04, 0x65, 0x6e, 0x75, 0x6d, 0x12, 0x1f, 0x0a, 0x0b, - 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x65, 0x5f, 0x6f, 0x66, 0x18, 0x11, 0x20, 0x01, 0x28, - 0x01, 0x52, 0x0a, 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x65, 0x4f, 0x66, 0x12, 0x20, 0x0a, - 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x12, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, - 0x3f, 0x0a, 0x10, 0x76, 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, - 0x69, 0x6f, 0x6e, 0x18, 0x13, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6f, 0x70, 0x65, 0x6e, - 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x41, 0x6e, 0x79, 0x52, - 0x0f, 0x76, 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, - 0x22, 0xfd, 0x05, 0x0a, 0x18, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x50, 0x61, 0x72, 0x61, 0x6d, - 0x65, 0x74, 0x65, 0x72, 0x53, 0x75, 0x62, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x1a, 0x0a, - 0x08, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, - 0x08, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x6e, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x6e, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, - 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, - 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x6e, - 0x61, 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, - 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, - 0x79, 0x70, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, 0x06, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x06, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, 0x31, 0x0a, 0x05, 0x69, - 0x74, 0x65, 0x6d, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x6f, 0x70, 0x65, - 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x50, 0x72, 0x69, 0x6d, 0x69, 0x74, 0x69, 0x76, - 0x65, 0x73, 0x49, 0x74, 0x65, 0x6d, 0x73, 0x52, 0x05, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x12, 0x2b, - 0x0a, 0x11, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x66, 0x6f, 0x72, - 0x6d, 0x61, 0x74, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x63, 0x6f, 0x6c, 0x6c, 0x65, - 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, 0x29, 0x0a, 0x07, 0x64, - 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x6f, - 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x07, 0x64, - 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, - 0x6d, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x01, 0x52, 0x07, 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, - 0x12, 0x2b, 0x0a, 0x11, 0x65, 0x78, 0x63, 0x6c, 0x75, 0x73, 0x69, 0x76, 0x65, 0x5f, 0x6d, 0x61, - 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x65, 0x78, 0x63, - 0x6c, 0x75, 0x73, 0x69, 0x76, 0x65, 0x4d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x12, 0x18, 0x0a, - 0x07, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x01, 0x52, 0x07, - 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x12, 0x2b, 0x0a, 0x11, 0x65, 0x78, 0x63, 0x6c, 0x75, - 0x73, 0x69, 0x76, 0x65, 0x5f, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x18, 0x0d, 0x20, 0x01, - 0x28, 0x08, 0x52, 0x10, 0x65, 0x78, 0x63, 0x6c, 0x75, 0x73, 0x69, 0x76, 0x65, 0x4d, 0x69, 0x6e, - 0x69, 0x6d, 0x75, 0x6d, 0x12, 0x1d, 0x0a, 0x0a, 0x6d, 0x61, 0x78, 0x5f, 0x6c, 0x65, 0x6e, 0x67, - 0x74, 0x68, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x6d, 0x61, 0x78, 0x4c, 0x65, 0x6e, - 0x67, 0x74, 0x68, 0x12, 0x1d, 0x0a, 0x0a, 0x6d, 0x69, 0x6e, 0x5f, 0x6c, 0x65, 0x6e, 0x67, 0x74, - 0x68, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x6d, 0x69, 0x6e, 0x4c, 0x65, 0x6e, 0x67, - 0x74, 0x68, 0x12, 0x18, 0x0a, 0x07, 0x70, 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e, 0x18, 0x10, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x07, 0x70, 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e, 0x12, 0x1b, 0x0a, 0x09, - 0x6d, 0x61, 0x78, 0x5f, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x18, 0x11, 0x20, 0x01, 0x28, 0x03, 0x52, - 0x08, 0x6d, 0x61, 0x78, 0x49, 0x74, 0x65, 0x6d, 0x73, 0x12, 0x1b, 0x0a, 0x09, 0x6d, 0x69, 0x6e, - 0x5f, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x18, 0x12, 0x20, 0x01, 0x28, 0x03, 0x52, 0x08, 0x6d, 0x69, - 0x6e, 0x49, 0x74, 0x65, 0x6d, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x75, 0x6e, 0x69, 0x71, 0x75, 0x65, - 0x5f, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x18, 0x13, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x75, 0x6e, - 0x69, 0x71, 0x75, 0x65, 0x49, 0x74, 0x65, 0x6d, 0x73, 0x12, 0x23, 0x0a, 0x04, 0x65, 0x6e, 0x75, - 0x6d, 0x18, 0x14, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, - 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x04, 0x65, 0x6e, 0x75, 0x6d, 0x12, 0x1f, - 0x0a, 0x0b, 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x65, 0x5f, 0x6f, 0x66, 0x18, 0x15, 0x20, - 0x01, 0x28, 0x01, 0x52, 0x0a, 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x65, 0x4f, 0x66, 0x12, - 0x3f, 0x0a, 0x10, 0x76, 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, - 0x69, 0x6f, 0x6e, 0x18, 0x16, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6f, 0x70, 0x65, 0x6e, - 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x41, 0x6e, 0x79, 0x52, - 0x0f, 0x76, 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, - 0x22, 0x57, 0x0a, 0x07, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x12, 0x4c, 0x0a, 0x15, 0x61, - 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x5f, 0x70, 0x72, 0x6f, 0x70, 0x65, 0x72, - 0x74, 0x69, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x6f, 0x70, 0x65, - 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x48, 0x65, 0x61, - 0x64, 0x65, 0x72, 0x52, 0x14, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x50, - 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x22, 0xa1, 0x02, 0x0a, 0x04, 0x49, 0x6e, - 0x66, 0x6f, 0x12, 0x14, 0x0a, 0x05, 0x74, 0x69, 0x74, 0x6c, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x05, 0x74, 0x69, 0x74, 0x6c, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, - 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, - 0x6f, 0x6e, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, - 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, - 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x28, 0x0a, 0x10, 0x74, 0x65, 0x72, 0x6d, 0x73, 0x5f, 0x6f, 0x66, - 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, - 0x74, 0x65, 0x72, 0x6d, 0x73, 0x4f, 0x66, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x2d, - 0x0a, 0x07, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x63, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x13, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6e, - 0x74, 0x61, 0x63, 0x74, 0x52, 0x07, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x63, 0x74, 0x12, 0x2d, 0x0a, - 0x07, 0x6c, 0x69, 0x63, 0x65, 0x6e, 0x73, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, - 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4c, 0x69, 0x63, 0x65, - 0x6e, 0x73, 0x65, 0x52, 0x07, 0x6c, 0x69, 0x63, 0x65, 0x6e, 0x73, 0x65, 0x12, 0x3f, 0x0a, 0x10, - 0x76, 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, - 0x18, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, - 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x41, 0x6e, 0x79, 0x52, 0x0f, 0x76, 0x65, - 0x6e, 0x64, 0x6f, 0x72, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0x37, 0x0a, - 0x09, 0x49, 0x74, 0x65, 0x6d, 0x73, 0x49, 0x74, 0x65, 0x6d, 0x12, 0x2a, 0x0a, 0x06, 0x73, 0x63, - 0x68, 0x65, 0x6d, 0x61, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x6f, 0x70, 0x65, - 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x06, - 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x22, 0x44, 0x0a, 0x0d, 0x4a, 0x73, 0x6f, 0x6e, 0x52, 0x65, - 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x11, 0x0a, 0x04, 0x5f, 0x72, 0x65, 0x66, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x52, 0x65, 0x66, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, - 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x70, 0x0a, 0x07, - 0x4c, 0x69, 0x63, 0x65, 0x6e, 0x73, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x10, 0x0a, 0x03, 0x75, - 0x72, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x75, 0x72, 0x6c, 0x12, 0x3f, 0x0a, - 0x10, 0x76, 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, - 0x6e, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, - 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x41, 0x6e, 0x79, 0x52, 0x0f, 0x76, - 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0x45, - 0x0a, 0x08, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x41, 0x6e, 0x79, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, - 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x25, - 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, - 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x05, - 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x4b, 0x0a, 0x0b, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x48, 0x65, - 0x61, 0x64, 0x65, 0x72, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x28, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, - 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, - 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x52, 0x05, 0x76, 0x61, 0x6c, - 0x75, 0x65, 0x22, 0x51, 0x0a, 0x0e, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x50, 0x61, 0x72, 0x61, 0x6d, - 0x65, 0x74, 0x65, 0x72, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x2b, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, - 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, - 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x52, 0x05, - 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x4f, 0x0a, 0x0d, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x50, 0x61, - 0x74, 0x68, 0x49, 0x74, 0x65, 0x6d, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x2a, 0x0a, 0x05, 0x76, 0x61, - 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6f, 0x70, 0x65, 0x6e, - 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x50, 0x61, 0x74, 0x68, 0x49, 0x74, 0x65, 0x6d, 0x52, - 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x4f, 0x0a, 0x0d, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x2a, 0x0a, 0x05, 0x76, - 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6f, 0x70, 0x65, - 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x59, 0x0a, 0x12, 0x4e, 0x61, 0x6d, 0x65, 0x64, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x12, 0x0a, - 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, - 0x65, 0x12, 0x2f, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x19, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, - 0x75, 0x65, 0x22, 0x4b, 0x0a, 0x0b, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x53, 0x63, 0x68, 0x65, 0x6d, - 0x61, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x28, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, - 0x32, 0x2e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, - 0x6d, 0x0a, 0x1c, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, - 0x44, 0x65, 0x66, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x49, 0x74, 0x65, 0x6d, 0x12, - 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, - 0x61, 0x6d, 0x65, 0x12, 0x39, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, - 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x44, 0x65, 0x66, 0x69, 0x6e, 0x69, 0x74, 0x69, - 0x6f, 0x6e, 0x73, 0x49, 0x74, 0x65, 0x6d, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x37, - 0x0a, 0x0b, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x12, 0x12, 0x0a, - 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, - 0x65, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x55, 0x0a, 0x10, 0x4e, 0x61, 0x6d, 0x65, 0x64, - 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x41, 0x72, 0x72, 0x61, 0x79, 0x12, 0x12, 0x0a, 0x04, 0x6e, - 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, - 0x2d, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, - 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x74, 0x72, 0x69, - 0x6e, 0x67, 0x41, 0x72, 0x72, 0x61, 0x79, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0xb5, - 0x03, 0x0a, 0x10, 0x4e, 0x6f, 0x6e, 0x42, 0x6f, 0x64, 0x79, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, - 0x74, 0x65, 0x72, 0x12, 0x65, 0x0a, 0x1b, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x5f, 0x70, 0x61, - 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x5f, 0x73, 0x75, 0x62, 0x5f, 0x73, 0x63, 0x68, 0x65, - 0x6d, 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, - 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x50, 0x61, 0x72, 0x61, - 0x6d, 0x65, 0x74, 0x65, 0x72, 0x53, 0x75, 0x62, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x48, 0x00, - 0x52, 0x18, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, - 0x72, 0x53, 0x75, 0x62, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x6c, 0x0a, 0x1e, 0x66, 0x6f, - 0x72, 0x6d, 0x5f, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, - 0x72, 0x5f, 0x73, 0x75, 0x62, 0x5f, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, - 0x46, 0x6f, 0x72, 0x6d, 0x44, 0x61, 0x74, 0x61, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, - 0x72, 0x53, 0x75, 0x62, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x48, 0x00, 0x52, 0x1a, 0x66, 0x6f, - 0x72, 0x6d, 0x44, 0x61, 0x74, 0x61, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x53, - 0x75, 0x62, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x62, 0x0a, 0x1a, 0x71, 0x75, 0x65, 0x72, - 0x79, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x5f, 0x73, 0x75, 0x62, 0x5f, - 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x6f, - 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x50, - 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x53, 0x75, 0x62, 0x53, 0x63, 0x68, 0x65, 0x6d, - 0x61, 0x48, 0x00, 0x52, 0x17, 0x71, 0x75, 0x65, 0x72, 0x79, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, - 0x74, 0x65, 0x72, 0x53, 0x75, 0x62, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x5f, 0x0a, 0x19, - 0x70, 0x61, 0x74, 0x68, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x5f, 0x73, - 0x75, 0x62, 0x5f, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x22, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x50, 0x61, 0x74, - 0x68, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x53, 0x75, 0x62, 0x53, 0x63, 0x68, - 0x65, 0x6d, 0x61, 0x48, 0x00, 0x52, 0x16, 0x70, 0x61, 0x74, 0x68, 0x50, 0x61, 0x72, 0x61, 0x6d, - 0x65, 0x74, 0x65, 0x72, 0x53, 0x75, 0x62, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x42, 0x07, 0x0a, - 0x05, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x22, 0xa1, 0x02, 0x0a, 0x18, 0x4f, 0x61, 0x75, 0x74, 0x68, - 0x32, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x43, 0x6f, 0x64, 0x65, 0x53, 0x65, 0x63, 0x75, 0x72, - 0x69, 0x74, 0x79, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x66, 0x6c, 0x6f, 0x77, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x66, 0x6c, 0x6f, 0x77, 0x12, 0x30, 0x0a, 0x06, 0x73, - 0x63, 0x6f, 0x70, 0x65, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x6f, 0x70, - 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x61, 0x75, 0x74, 0x68, 0x32, 0x53, - 0x63, 0x6f, 0x70, 0x65, 0x73, 0x52, 0x06, 0x73, 0x63, 0x6f, 0x70, 0x65, 0x73, 0x12, 0x2b, 0x0a, - 0x11, 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x75, - 0x72, 0x6c, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, - 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x55, 0x72, 0x6c, 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x6f, - 0x6b, 0x65, 0x6e, 0x5f, 0x75, 0x72, 0x6c, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x74, - 0x6f, 0x6b, 0x65, 0x6e, 0x55, 0x72, 0x6c, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, - 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, - 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x3f, 0x0a, 0x10, 0x76, 0x65, 0x6e, - 0x64, 0x6f, 0x72, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x07, 0x20, - 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, - 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x41, 0x6e, 0x79, 0x52, 0x0f, 0x76, 0x65, 0x6e, 0x64, 0x6f, - 0x72, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0xf5, 0x01, 0x0a, 0x19, 0x4f, - 0x61, 0x75, 0x74, 0x68, 0x32, 0x41, 0x70, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x12, 0x0a, 0x04, - 0x66, 0x6c, 0x6f, 0x77, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x66, 0x6c, 0x6f, 0x77, - 0x12, 0x30, 0x0a, 0x06, 0x73, 0x63, 0x6f, 0x70, 0x65, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x18, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x61, - 0x75, 0x74, 0x68, 0x32, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x73, 0x52, 0x06, 0x73, 0x63, 0x6f, 0x70, - 0x65, 0x73, 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x5f, 0x75, 0x72, 0x6c, 0x18, - 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x55, 0x72, 0x6c, 0x12, - 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x05, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, - 0x6e, 0x12, 0x3f, 0x0a, 0x10, 0x76, 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x5f, 0x65, 0x78, 0x74, 0x65, - 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6f, 0x70, - 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x41, 0x6e, - 0x79, 0x52, 0x0f, 0x76, 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, - 0x6f, 0x6e, 0x22, 0x82, 0x02, 0x0a, 0x16, 0x4f, 0x61, 0x75, 0x74, 0x68, 0x32, 0x49, 0x6d, 0x70, - 0x6c, 0x69, 0x63, 0x69, 0x74, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x12, 0x12, 0x0a, - 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, - 0x65, 0x12, 0x12, 0x0a, 0x04, 0x66, 0x6c, 0x6f, 0x77, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x04, 0x66, 0x6c, 0x6f, 0x77, 0x12, 0x30, 0x0a, 0x06, 0x73, 0x63, 0x6f, 0x70, 0x65, 0x73, 0x18, - 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, - 0x76, 0x32, 0x2e, 0x4f, 0x61, 0x75, 0x74, 0x68, 0x32, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x73, 0x52, - 0x06, 0x73, 0x63, 0x6f, 0x70, 0x65, 0x73, 0x12, 0x2b, 0x0a, 0x11, 0x61, 0x75, 0x74, 0x68, 0x6f, - 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x75, 0x72, 0x6c, 0x18, 0x04, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x10, 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x55, 0x72, 0x6c, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, - 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x3f, 0x0a, 0x10, 0x76, 0x65, 0x6e, 0x64, 0x6f, 0x72, - 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, - 0x32, 0x14, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x61, - 0x6d, 0x65, 0x64, 0x41, 0x6e, 0x79, 0x52, 0x0f, 0x76, 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x45, 0x78, - 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0xf2, 0x01, 0x0a, 0x16, 0x4f, 0x61, 0x75, 0x74, - 0x68, 0x32, 0x50, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, - 0x74, 0x79, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x66, 0x6c, 0x6f, 0x77, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x66, 0x6c, 0x6f, 0x77, 0x12, 0x30, 0x0a, 0x06, 0x73, 0x63, - 0x6f, 0x70, 0x65, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x6f, 0x70, 0x65, - 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x61, 0x75, 0x74, 0x68, 0x32, 0x53, 0x63, - 0x6f, 0x70, 0x65, 0x73, 0x52, 0x06, 0x73, 0x63, 0x6f, 0x70, 0x65, 0x73, 0x12, 0x1b, 0x0a, 0x09, - 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x5f, 0x75, 0x72, 0x6c, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x08, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x55, 0x72, 0x6c, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, - 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, - 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x3f, 0x0a, 0x10, 0x76, - 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, - 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, - 0x76, 0x32, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x41, 0x6e, 0x79, 0x52, 0x0f, 0x76, 0x65, 0x6e, - 0x64, 0x6f, 0x72, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0x5c, 0x0a, 0x0c, - 0x4f, 0x61, 0x75, 0x74, 0x68, 0x32, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x73, 0x12, 0x4c, 0x0a, 0x15, - 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x5f, 0x70, 0x72, 0x6f, 0x70, 0x65, - 0x72, 0x74, 0x69, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x6f, 0x70, - 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x53, 0x74, - 0x72, 0x69, 0x6e, 0x67, 0x52, 0x14, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, - 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x22, 0x9e, 0x04, 0x0a, 0x09, 0x4f, - 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x61, 0x67, 0x73, - 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x04, 0x74, 0x61, 0x67, 0x73, 0x12, 0x18, 0x0a, 0x07, - 0x73, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x73, - 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, - 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, - 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x3d, 0x0a, 0x0d, 0x65, 0x78, 0x74, 0x65, - 0x72, 0x6e, 0x61, 0x6c, 0x5f, 0x64, 0x6f, 0x63, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x18, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x45, 0x78, 0x74, - 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x44, 0x6f, 0x63, 0x73, 0x52, 0x0c, 0x65, 0x78, 0x74, 0x65, 0x72, - 0x6e, 0x61, 0x6c, 0x44, 0x6f, 0x63, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x6f, 0x70, 0x65, 0x72, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x6f, - 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x72, - 0x6f, 0x64, 0x75, 0x63, 0x65, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x09, 0x52, 0x08, 0x70, 0x72, - 0x6f, 0x64, 0x75, 0x63, 0x65, 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6d, - 0x65, 0x73, 0x18, 0x07, 0x20, 0x03, 0x28, 0x09, 0x52, 0x08, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6d, - 0x65, 0x73, 0x12, 0x3a, 0x0a, 0x0a, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, - 0x18, 0x08, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, - 0x2e, 0x76, 0x32, 0x2e, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x49, 0x74, - 0x65, 0x6d, 0x52, 0x0a, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x12, 0x33, - 0x0a, 0x09, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x73, 0x18, 0x09, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x15, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x73, 0x52, 0x09, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x65, 0x73, 0x18, 0x0a, - 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x65, 0x73, 0x12, 0x1e, 0x0a, - 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x0b, 0x20, 0x01, 0x28, - 0x08, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x3b, 0x0a, - 0x08, 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x18, 0x0c, 0x20, 0x03, 0x28, 0x0b, 0x32, - 0x1f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x65, 0x63, - 0x75, 0x72, 0x69, 0x74, 0x79, 0x52, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, - 0x52, 0x08, 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x12, 0x3f, 0x0a, 0x10, 0x76, 0x65, - 0x6e, 0x64, 0x6f, 0x72, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x0d, - 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, - 0x32, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x41, 0x6e, 0x79, 0x52, 0x0f, 0x76, 0x65, 0x6e, 0x64, - 0x6f, 0x72, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0xa6, 0x01, 0x0a, 0x09, - 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x12, 0x42, 0x0a, 0x0e, 0x62, 0x6f, 0x64, - 0x79, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x19, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x42, - 0x6f, 0x64, 0x79, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x48, 0x00, 0x52, 0x0d, - 0x62, 0x6f, 0x64, 0x79, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x12, 0x4c, 0x0a, - 0x12, 0x6e, 0x6f, 0x6e, 0x5f, 0x62, 0x6f, 0x64, 0x79, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, - 0x74, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x6f, 0x70, 0x65, 0x6e, - 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x6f, 0x6e, 0x42, 0x6f, 0x64, 0x79, 0x50, 0x61, - 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x48, 0x00, 0x52, 0x10, 0x6e, 0x6f, 0x6e, 0x42, 0x6f, - 0x64, 0x79, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x42, 0x07, 0x0a, 0x05, 0x6f, - 0x6e, 0x65, 0x6f, 0x66, 0x22, 0x67, 0x0a, 0x14, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, - 0x72, 0x44, 0x65, 0x66, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x4f, 0x0a, 0x15, - 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x5f, 0x70, 0x72, 0x6f, 0x70, 0x65, - 0x72, 0x74, 0x69, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x6f, 0x70, - 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x50, 0x61, - 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x52, 0x14, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, - 0x6e, 0x61, 0x6c, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x22, 0x94, 0x01, - 0x0a, 0x0e, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x49, 0x74, 0x65, 0x6d, - 0x12, 0x35, 0x0a, 0x09, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, - 0x2e, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x48, 0x00, 0x52, 0x09, 0x70, 0x61, - 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x12, 0x42, 0x0a, 0x0e, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, - 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x19, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4a, 0x73, 0x6f, - 0x6e, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x48, 0x00, 0x52, 0x0d, 0x6a, 0x73, - 0x6f, 0x6e, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x42, 0x07, 0x0a, 0x05, 0x6f, - 0x6e, 0x65, 0x6f, 0x66, 0x22, 0xcf, 0x03, 0x0a, 0x08, 0x50, 0x61, 0x74, 0x68, 0x49, 0x74, 0x65, - 0x6d, 0x12, 0x11, 0x0a, 0x04, 0x5f, 0x72, 0x65, 0x66, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x03, 0x52, 0x65, 0x66, 0x12, 0x27, 0x0a, 0x03, 0x67, 0x65, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x15, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4f, - 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x03, 0x67, 0x65, 0x74, 0x12, 0x27, 0x0a, - 0x03, 0x70, 0x75, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x6f, 0x70, 0x65, - 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x52, 0x03, 0x70, 0x75, 0x74, 0x12, 0x29, 0x0a, 0x04, 0x70, 0x6f, 0x73, 0x74, 0x18, 0x04, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, - 0x32, 0x2e, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x04, 0x70, 0x6f, 0x73, - 0x74, 0x12, 0x2d, 0x0a, 0x06, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x15, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4f, - 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x06, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, - 0x12, 0x2f, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x15, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4f, - 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, - 0x73, 0x12, 0x29, 0x0a, 0x04, 0x68, 0x65, 0x61, 0x64, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x15, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x70, 0x65, - 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x04, 0x68, 0x65, 0x61, 0x64, 0x12, 0x2b, 0x0a, 0x05, - 0x70, 0x61, 0x74, 0x63, 0x68, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x6f, 0x70, - 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x52, 0x05, 0x70, 0x61, 0x74, 0x63, 0x68, 0x12, 0x3a, 0x0a, 0x0a, 0x70, 0x61, 0x72, - 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x18, 0x09, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1a, 0x2e, - 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x50, 0x61, 0x72, 0x61, 0x6d, - 0x65, 0x74, 0x65, 0x72, 0x73, 0x49, 0x74, 0x65, 0x6d, 0x52, 0x0a, 0x70, 0x61, 0x72, 0x61, 0x6d, - 0x65, 0x74, 0x65, 0x72, 0x73, 0x12, 0x3f, 0x0a, 0x10, 0x76, 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x5f, - 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x0a, 0x20, 0x03, 0x28, 0x0b, 0x32, - 0x14, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x61, 0x6d, - 0x65, 0x64, 0x41, 0x6e, 0x79, 0x52, 0x0f, 0x76, 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x45, 0x78, 0x74, - 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0xfb, 0x05, 0x0a, 0x16, 0x50, 0x61, 0x74, 0x68, 0x50, - 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x53, 0x75, 0x62, 0x53, 0x63, 0x68, 0x65, 0x6d, - 0x61, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x08, 0x52, 0x08, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x12, 0x0e, 0x0a, - 0x02, 0x69, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x6e, 0x12, 0x20, 0x0a, - 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, - 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, - 0x61, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x66, 0x6f, 0x72, 0x6d, 0x61, - 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, - 0x31, 0x0a, 0x05, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, - 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x50, 0x72, 0x69, 0x6d, - 0x69, 0x74, 0x69, 0x76, 0x65, 0x73, 0x49, 0x74, 0x65, 0x6d, 0x73, 0x52, 0x05, 0x69, 0x74, 0x65, - 0x6d, 0x73, 0x12, 0x2b, 0x0a, 0x11, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, - 0x5f, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x63, - 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, - 0x29, 0x0a, 0x07, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x0f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x41, 0x6e, - 0x79, 0x52, 0x07, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x61, - 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x01, 0x52, 0x07, 0x6d, 0x61, 0x78, - 0x69, 0x6d, 0x75, 0x6d, 0x12, 0x2b, 0x0a, 0x11, 0x65, 0x78, 0x63, 0x6c, 0x75, 0x73, 0x69, 0x76, - 0x65, 0x5f, 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x08, 0x52, - 0x10, 0x65, 0x78, 0x63, 0x6c, 0x75, 0x73, 0x69, 0x76, 0x65, 0x4d, 0x61, 0x78, 0x69, 0x6d, 0x75, - 0x6d, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x18, 0x0c, 0x20, 0x01, - 0x28, 0x01, 0x52, 0x07, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x12, 0x2b, 0x0a, 0x11, 0x65, - 0x78, 0x63, 0x6c, 0x75, 0x73, 0x69, 0x76, 0x65, 0x5f, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, - 0x18, 0x0d, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x65, 0x78, 0x63, 0x6c, 0x75, 0x73, 0x69, 0x76, - 0x65, 0x4d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x12, 0x1d, 0x0a, 0x0a, 0x6d, 0x61, 0x78, 0x5f, - 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x6d, 0x61, - 0x78, 0x4c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x12, 0x1d, 0x0a, 0x0a, 0x6d, 0x69, 0x6e, 0x5f, 0x6c, - 0x65, 0x6e, 0x67, 0x74, 0x68, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x6d, 0x69, 0x6e, - 0x4c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x12, 0x18, 0x0a, 0x07, 0x70, 0x61, 0x74, 0x74, 0x65, 0x72, - 0x6e, 0x18, 0x10, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x70, 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e, - 0x12, 0x1b, 0x0a, 0x09, 0x6d, 0x61, 0x78, 0x5f, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x18, 0x11, 0x20, - 0x01, 0x28, 0x03, 0x52, 0x08, 0x6d, 0x61, 0x78, 0x49, 0x74, 0x65, 0x6d, 0x73, 0x12, 0x1b, 0x0a, - 0x09, 0x6d, 0x69, 0x6e, 0x5f, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x18, 0x12, 0x20, 0x01, 0x28, 0x03, - 0x52, 0x08, 0x6d, 0x69, 0x6e, 0x49, 0x74, 0x65, 0x6d, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x75, 0x6e, - 0x69, 0x71, 0x75, 0x65, 0x5f, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x18, 0x13, 0x20, 0x01, 0x28, 0x08, - 0x52, 0x0b, 0x75, 0x6e, 0x69, 0x71, 0x75, 0x65, 0x49, 0x74, 0x65, 0x6d, 0x73, 0x12, 0x23, 0x0a, - 0x04, 0x65, 0x6e, 0x75, 0x6d, 0x18, 0x14, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x6f, 0x70, - 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x04, 0x65, 0x6e, - 0x75, 0x6d, 0x12, 0x1f, 0x0a, 0x0b, 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x65, 0x5f, 0x6f, - 0x66, 0x18, 0x15, 0x20, 0x01, 0x28, 0x01, 0x52, 0x0a, 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, - 0x65, 0x4f, 0x66, 0x12, 0x3f, 0x0a, 0x10, 0x76, 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x5f, 0x65, 0x78, - 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x16, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, - 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, - 0x41, 0x6e, 0x79, 0x52, 0x0f, 0x76, 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x45, 0x78, 0x74, 0x65, 0x6e, - 0x73, 0x69, 0x6f, 0x6e, 0x22, 0x77, 0x0a, 0x05, 0x50, 0x61, 0x74, 0x68, 0x73, 0x12, 0x3f, 0x0a, - 0x10, 0x76, 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, - 0x6e, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, - 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x41, 0x6e, 0x79, 0x52, 0x0f, 0x76, - 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x2d, - 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x6f, - 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x50, - 0x61, 0x74, 0x68, 0x49, 0x74, 0x65, 0x6d, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x22, 0x92, 0x05, - 0x0a, 0x0f, 0x50, 0x72, 0x69, 0x6d, 0x69, 0x74, 0x69, 0x76, 0x65, 0x73, 0x49, 0x74, 0x65, 0x6d, - 0x73, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, 0x31, 0x0a, - 0x05, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x6f, - 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x50, 0x72, 0x69, 0x6d, 0x69, 0x74, - 0x69, 0x76, 0x65, 0x73, 0x49, 0x74, 0x65, 0x6d, 0x73, 0x52, 0x05, 0x69, 0x74, 0x65, 0x6d, 0x73, - 0x12, 0x2b, 0x0a, 0x11, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x66, - 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x63, 0x6f, 0x6c, - 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, 0x29, 0x0a, - 0x07, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, - 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x41, 0x6e, 0x79, 0x52, - 0x07, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x61, 0x78, 0x69, - 0x6d, 0x75, 0x6d, 0x18, 0x06, 0x20, 0x01, 0x28, 0x01, 0x52, 0x07, 0x6d, 0x61, 0x78, 0x69, 0x6d, - 0x75, 0x6d, 0x12, 0x2b, 0x0a, 0x11, 0x65, 0x78, 0x63, 0x6c, 0x75, 0x73, 0x69, 0x76, 0x65, 0x5f, - 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x65, - 0x78, 0x63, 0x6c, 0x75, 0x73, 0x69, 0x76, 0x65, 0x4d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x12, - 0x18, 0x0a, 0x07, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x18, 0x08, 0x20, 0x01, 0x28, 0x01, - 0x52, 0x07, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x12, 0x2b, 0x0a, 0x11, 0x65, 0x78, 0x63, - 0x6c, 0x75, 0x73, 0x69, 0x76, 0x65, 0x5f, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x18, 0x09, - 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x65, 0x78, 0x63, 0x6c, 0x75, 0x73, 0x69, 0x76, 0x65, 0x4d, - 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x12, 0x1d, 0x0a, 0x0a, 0x6d, 0x61, 0x78, 0x5f, 0x6c, 0x65, - 0x6e, 0x67, 0x74, 0x68, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x6d, 0x61, 0x78, 0x4c, - 0x65, 0x6e, 0x67, 0x74, 0x68, 0x12, 0x1d, 0x0a, 0x0a, 0x6d, 0x69, 0x6e, 0x5f, 0x6c, 0x65, 0x6e, - 0x67, 0x74, 0x68, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x6d, 0x69, 0x6e, 0x4c, 0x65, - 0x6e, 0x67, 0x74, 0x68, 0x12, 0x18, 0x0a, 0x07, 0x70, 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e, 0x18, - 0x0c, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x70, 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e, 0x12, 0x1b, - 0x0a, 0x09, 0x6d, 0x61, 0x78, 0x5f, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x18, 0x0d, 0x20, 0x01, 0x28, - 0x03, 0x52, 0x08, 0x6d, 0x61, 0x78, 0x49, 0x74, 0x65, 0x6d, 0x73, 0x12, 0x1b, 0x0a, 0x09, 0x6d, - 0x69, 0x6e, 0x5f, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x03, 0x52, 0x08, - 0x6d, 0x69, 0x6e, 0x49, 0x74, 0x65, 0x6d, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x75, 0x6e, 0x69, 0x71, - 0x75, 0x65, 0x5f, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, - 0x75, 0x6e, 0x69, 0x71, 0x75, 0x65, 0x49, 0x74, 0x65, 0x6d, 0x73, 0x12, 0x23, 0x0a, 0x04, 0x65, - 0x6e, 0x75, 0x6d, 0x18, 0x10, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, - 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x04, 0x65, 0x6e, 0x75, 0x6d, - 0x12, 0x1f, 0x0a, 0x0b, 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x65, 0x5f, 0x6f, 0x66, 0x18, - 0x11, 0x20, 0x01, 0x28, 0x01, 0x52, 0x0a, 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x65, 0x4f, - 0x66, 0x12, 0x3f, 0x0a, 0x10, 0x76, 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x5f, 0x65, 0x78, 0x74, 0x65, - 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x12, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6f, 0x70, - 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x41, 0x6e, - 0x79, 0x52, 0x0f, 0x76, 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, - 0x6f, 0x6e, 0x22, 0x5a, 0x0a, 0x0a, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, - 0x12, 0x4c, 0x0a, 0x15, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x5f, 0x70, - 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, - 0x17, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x61, 0x6d, - 0x65, 0x64, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x14, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, - 0x6f, 0x6e, 0x61, 0x6c, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x22, 0xa8, - 0x06, 0x0a, 0x17, 0x51, 0x75, 0x65, 0x72, 0x79, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, - 0x72, 0x53, 0x75, 0x62, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, - 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x72, 0x65, - 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x6e, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x02, 0x69, 0x6e, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, - 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, - 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, - 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x2a, 0x0a, 0x11, - 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x5f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x5f, 0x76, 0x61, 0x6c, 0x75, - 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0f, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x45, 0x6d, - 0x70, 0x74, 0x79, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, - 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x16, 0x0a, 0x06, - 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x66, 0x6f, - 0x72, 0x6d, 0x61, 0x74, 0x12, 0x31, 0x0a, 0x05, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x18, 0x08, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, - 0x2e, 0x50, 0x72, 0x69, 0x6d, 0x69, 0x74, 0x69, 0x76, 0x65, 0x73, 0x49, 0x74, 0x65, 0x6d, 0x73, - 0x52, 0x05, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x12, 0x2b, 0x0a, 0x11, 0x63, 0x6f, 0x6c, 0x6c, 0x65, - 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, 0x09, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x10, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x46, 0x6f, - 0x72, 0x6d, 0x61, 0x74, 0x12, 0x29, 0x0a, 0x07, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x18, - 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, - 0x76, 0x32, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x07, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x12, - 0x18, 0x0a, 0x07, 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x01, - 0x52, 0x07, 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x12, 0x2b, 0x0a, 0x11, 0x65, 0x78, 0x63, - 0x6c, 0x75, 0x73, 0x69, 0x76, 0x65, 0x5f, 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x18, 0x0c, - 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x65, 0x78, 0x63, 0x6c, 0x75, 0x73, 0x69, 0x76, 0x65, 0x4d, - 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, - 0x6d, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x01, 0x52, 0x07, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, - 0x12, 0x2b, 0x0a, 0x11, 0x65, 0x78, 0x63, 0x6c, 0x75, 0x73, 0x69, 0x76, 0x65, 0x5f, 0x6d, 0x69, - 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x65, 0x78, 0x63, - 0x6c, 0x75, 0x73, 0x69, 0x76, 0x65, 0x4d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x12, 0x1d, 0x0a, - 0x0a, 0x6d, 0x61, 0x78, 0x5f, 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x18, 0x0f, 0x20, 0x01, 0x28, - 0x03, 0x52, 0x09, 0x6d, 0x61, 0x78, 0x4c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x12, 0x1d, 0x0a, 0x0a, - 0x6d, 0x69, 0x6e, 0x5f, 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x18, 0x10, 0x20, 0x01, 0x28, 0x03, - 0x52, 0x09, 0x6d, 0x69, 0x6e, 0x4c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x12, 0x18, 0x0a, 0x07, 0x70, - 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e, 0x18, 0x11, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x70, 0x61, - 0x74, 0x74, 0x65, 0x72, 0x6e, 0x12, 0x1b, 0x0a, 0x09, 0x6d, 0x61, 0x78, 0x5f, 0x69, 0x74, 0x65, - 0x6d, 0x73, 0x18, 0x12, 0x20, 0x01, 0x28, 0x03, 0x52, 0x08, 0x6d, 0x61, 0x78, 0x49, 0x74, 0x65, - 0x6d, 0x73, 0x12, 0x1b, 0x0a, 0x09, 0x6d, 0x69, 0x6e, 0x5f, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x18, - 0x13, 0x20, 0x01, 0x28, 0x03, 0x52, 0x08, 0x6d, 0x69, 0x6e, 0x49, 0x74, 0x65, 0x6d, 0x73, 0x12, - 0x21, 0x0a, 0x0c, 0x75, 0x6e, 0x69, 0x71, 0x75, 0x65, 0x5f, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x18, - 0x14, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x75, 0x6e, 0x69, 0x71, 0x75, 0x65, 0x49, 0x74, 0x65, - 0x6d, 0x73, 0x12, 0x23, 0x0a, 0x04, 0x65, 0x6e, 0x75, 0x6d, 0x18, 0x15, 0x20, 0x03, 0x28, 0x0b, - 0x32, 0x0f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x41, 0x6e, - 0x79, 0x52, 0x04, 0x65, 0x6e, 0x75, 0x6d, 0x12, 0x1f, 0x0a, 0x0b, 0x6d, 0x75, 0x6c, 0x74, 0x69, - 0x70, 0x6c, 0x65, 0x5f, 0x6f, 0x66, 0x18, 0x16, 0x20, 0x01, 0x28, 0x01, 0x52, 0x0a, 0x6d, 0x75, - 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x65, 0x4f, 0x66, 0x12, 0x3f, 0x0a, 0x10, 0x76, 0x65, 0x6e, 0x64, - 0x6f, 0x72, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x17, 0x20, 0x03, - 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, - 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x41, 0x6e, 0x79, 0x52, 0x0f, 0x76, 0x65, 0x6e, 0x64, 0x6f, 0x72, - 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0xfe, 0x01, 0x0a, 0x08, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, - 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, - 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2e, 0x0a, 0x06, 0x73, 0x63, 0x68, 0x65, - 0x6d, 0x61, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, - 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x49, 0x74, 0x65, 0x6d, - 0x52, 0x06, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x2d, 0x0a, 0x07, 0x68, 0x65, 0x61, 0x64, - 0x65, 0x72, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x6f, 0x70, 0x65, 0x6e, - 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x52, 0x07, - 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x12, 0x30, 0x0a, 0x08, 0x65, 0x78, 0x61, 0x6d, 0x70, - 0x6c, 0x65, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6f, 0x70, 0x65, 0x6e, - 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x45, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x73, 0x52, - 0x08, 0x65, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x73, 0x12, 0x3f, 0x0a, 0x10, 0x76, 0x65, 0x6e, - 0x64, 0x6f, 0x72, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, - 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, - 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x41, 0x6e, 0x79, 0x52, 0x0f, 0x76, 0x65, 0x6e, 0x64, 0x6f, - 0x72, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0x65, 0x0a, 0x13, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x44, 0x65, 0x66, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x6f, 0x6e, - 0x73, 0x12, 0x4e, 0x0a, 0x15, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x5f, - 0x70, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, - 0x32, 0x19, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x61, - 0x6d, 0x65, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x52, 0x14, 0x61, 0x64, 0x64, - 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, - 0x73, 0x22, 0x90, 0x01, 0x0a, 0x0d, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x56, 0x61, - 0x6c, 0x75, 0x65, 0x12, 0x32, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, - 0x76, 0x32, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x48, 0x00, 0x52, 0x08, 0x72, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x42, 0x0a, 0x0e, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, - 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x19, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4a, 0x73, 0x6f, - 0x6e, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x48, 0x00, 0x52, 0x0d, 0x6a, 0x73, - 0x6f, 0x6e, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x42, 0x07, 0x0a, 0x05, 0x6f, - 0x6e, 0x65, 0x6f, 0x66, 0x22, 0x91, 0x01, 0x0a, 0x09, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x73, 0x12, 0x43, 0x0a, 0x0d, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x5f, 0x63, - 0x6f, 0x64, 0x65, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x6f, 0x70, 0x65, 0x6e, - 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x52, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0c, 0x72, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x43, 0x6f, 0x64, 0x65, 0x12, 0x3f, 0x0a, 0x10, 0x76, 0x65, 0x6e, 0x64, 0x6f, - 0x72, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x03, 0x28, - 0x0b, 0x32, 0x14, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4e, - 0x61, 0x6d, 0x65, 0x64, 0x41, 0x6e, 0x79, 0x52, 0x0f, 0x76, 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x45, - 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0xaf, 0x09, 0x0a, 0x06, 0x53, 0x63, 0x68, - 0x65, 0x6d, 0x61, 0x12, 0x11, 0x0a, 0x04, 0x5f, 0x72, 0x65, 0x66, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x03, 0x52, 0x65, 0x66, 0x12, 0x16, 0x0a, 0x06, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, 0x14, - 0x0a, 0x05, 0x74, 0x69, 0x74, 0x6c, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x74, - 0x69, 0x74, 0x6c, 0x65, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, - 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x29, 0x0a, 0x07, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, - 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, - 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x07, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, - 0x74, 0x12, 0x1f, 0x0a, 0x0b, 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x65, 0x5f, 0x6f, 0x66, - 0x18, 0x06, 0x20, 0x01, 0x28, 0x01, 0x52, 0x0a, 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x65, - 0x4f, 0x66, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x18, 0x07, 0x20, - 0x01, 0x28, 0x01, 0x52, 0x07, 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x12, 0x2b, 0x0a, 0x11, - 0x65, 0x78, 0x63, 0x6c, 0x75, 0x73, 0x69, 0x76, 0x65, 0x5f, 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, - 0x6d, 0x18, 0x08, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x65, 0x78, 0x63, 0x6c, 0x75, 0x73, 0x69, - 0x76, 0x65, 0x4d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x69, 0x6e, - 0x69, 0x6d, 0x75, 0x6d, 0x18, 0x09, 0x20, 0x01, 0x28, 0x01, 0x52, 0x07, 0x6d, 0x69, 0x6e, 0x69, - 0x6d, 0x75, 0x6d, 0x12, 0x2b, 0x0a, 0x11, 0x65, 0x78, 0x63, 0x6c, 0x75, 0x73, 0x69, 0x76, 0x65, - 0x5f, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, - 0x65, 0x78, 0x63, 0x6c, 0x75, 0x73, 0x69, 0x76, 0x65, 0x4d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, - 0x12, 0x1d, 0x0a, 0x0a, 0x6d, 0x61, 0x78, 0x5f, 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x18, 0x0b, - 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x6d, 0x61, 0x78, 0x4c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x12, - 0x1d, 0x0a, 0x0a, 0x6d, 0x69, 0x6e, 0x5f, 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x18, 0x0c, 0x20, - 0x01, 0x28, 0x03, 0x52, 0x09, 0x6d, 0x69, 0x6e, 0x4c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x12, 0x18, - 0x0a, 0x07, 0x70, 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x07, 0x70, 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e, 0x12, 0x1b, 0x0a, 0x09, 0x6d, 0x61, 0x78, 0x5f, - 0x69, 0x74, 0x65, 0x6d, 0x73, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x03, 0x52, 0x08, 0x6d, 0x61, 0x78, - 0x49, 0x74, 0x65, 0x6d, 0x73, 0x12, 0x1b, 0x0a, 0x09, 0x6d, 0x69, 0x6e, 0x5f, 0x69, 0x74, 0x65, - 0x6d, 0x73, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x03, 0x52, 0x08, 0x6d, 0x69, 0x6e, 0x49, 0x74, 0x65, - 0x6d, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x75, 0x6e, 0x69, 0x71, 0x75, 0x65, 0x5f, 0x69, 0x74, 0x65, - 0x6d, 0x73, 0x18, 0x10, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x75, 0x6e, 0x69, 0x71, 0x75, 0x65, - 0x49, 0x74, 0x65, 0x6d, 0x73, 0x12, 0x25, 0x0a, 0x0e, 0x6d, 0x61, 0x78, 0x5f, 0x70, 0x72, 0x6f, - 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x18, 0x11, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0d, 0x6d, - 0x61, 0x78, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x12, 0x25, 0x0a, 0x0e, - 0x6d, 0x69, 0x6e, 0x5f, 0x70, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x18, 0x12, - 0x20, 0x01, 0x28, 0x03, 0x52, 0x0d, 0x6d, 0x69, 0x6e, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, - 0x69, 0x65, 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x18, - 0x13, 0x20, 0x03, 0x28, 0x09, 0x52, 0x08, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x12, - 0x23, 0x0a, 0x04, 0x65, 0x6e, 0x75, 0x6d, 0x18, 0x14, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0f, 0x2e, - 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x04, - 0x65, 0x6e, 0x75, 0x6d, 0x12, 0x59, 0x0a, 0x15, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, - 0x61, 0x6c, 0x5f, 0x70, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x18, 0x15, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, - 0x2e, 0x41, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x50, 0x72, 0x6f, 0x70, 0x65, - 0x72, 0x74, 0x69, 0x65, 0x73, 0x49, 0x74, 0x65, 0x6d, 0x52, 0x14, 0x61, 0x64, 0x64, 0x69, 0x74, - 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x12, - 0x28, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x16, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, - 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x49, - 0x74, 0x65, 0x6d, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x2b, 0x0a, 0x05, 0x69, 0x74, 0x65, - 0x6d, 0x73, 0x18, 0x17, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, - 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x49, 0x74, 0x65, 0x6d, 0x73, 0x49, 0x74, 0x65, 0x6d, 0x52, - 0x05, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x12, 0x29, 0x0a, 0x06, 0x61, 0x6c, 0x6c, 0x5f, 0x6f, 0x66, - 0x18, 0x18, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, - 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x05, 0x61, 0x6c, 0x6c, 0x4f, - 0x66, 0x12, 0x36, 0x0a, 0x0a, 0x70, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x18, - 0x19, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, - 0x76, 0x32, 0x2e, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x52, 0x0a, 0x70, - 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x12, 0x24, 0x0a, 0x0d, 0x64, 0x69, 0x73, - 0x63, 0x72, 0x69, 0x6d, 0x69, 0x6e, 0x61, 0x74, 0x6f, 0x72, 0x18, 0x1a, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x0d, 0x64, 0x69, 0x73, 0x63, 0x72, 0x69, 0x6d, 0x69, 0x6e, 0x61, 0x74, 0x6f, 0x72, 0x12, - 0x1b, 0x0a, 0x09, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6f, 0x6e, 0x6c, 0x79, 0x18, 0x1b, 0x20, 0x01, - 0x28, 0x08, 0x52, 0x08, 0x72, 0x65, 0x61, 0x64, 0x4f, 0x6e, 0x6c, 0x79, 0x12, 0x21, 0x0a, 0x03, - 0x78, 0x6d, 0x6c, 0x18, 0x1c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, - 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x58, 0x6d, 0x6c, 0x52, 0x03, 0x78, 0x6d, 0x6c, 0x12, - 0x3d, 0x0a, 0x0d, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x5f, 0x64, 0x6f, 0x63, 0x73, - 0x18, 0x1d, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, - 0x2e, 0x76, 0x32, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x44, 0x6f, 0x63, 0x73, - 0x52, 0x0c, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x44, 0x6f, 0x63, 0x73, 0x12, 0x29, - 0x0a, 0x07, 0x65, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x18, 0x1e, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x0f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x41, 0x6e, 0x79, - 0x52, 0x07, 0x65, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x12, 0x3f, 0x0a, 0x10, 0x76, 0x65, 0x6e, - 0x64, 0x6f, 0x72, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x1f, 0x20, - 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, - 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x41, 0x6e, 0x79, 0x52, 0x0f, 0x76, 0x65, 0x6e, 0x64, 0x6f, - 0x72, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0x7e, 0x0a, 0x0a, 0x53, 0x63, - 0x68, 0x65, 0x6d, 0x61, 0x49, 0x74, 0x65, 0x6d, 0x12, 0x2c, 0x0a, 0x06, 0x73, 0x63, 0x68, 0x65, - 0x6d, 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, - 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x48, 0x00, 0x52, 0x06, - 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x39, 0x0a, 0x0b, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x73, - 0x63, 0x68, 0x65, 0x6d, 0x61, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x6f, 0x70, - 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x53, 0x63, 0x68, - 0x65, 0x6d, 0x61, 0x48, 0x00, 0x52, 0x0a, 0x66, 0x69, 0x6c, 0x65, 0x53, 0x63, 0x68, 0x65, 0x6d, - 0x61, 0x42, 0x07, 0x0a, 0x05, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x22, 0x74, 0x0a, 0x13, 0x53, 0x65, - 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x44, 0x65, 0x66, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x6f, 0x6e, - 0x73, 0x12, 0x5d, 0x0a, 0x15, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x5f, - 0x70, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, - 0x32, 0x28, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x61, - 0x6d, 0x65, 0x64, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x44, 0x65, 0x66, 0x69, 0x6e, - 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x49, 0x74, 0x65, 0x6d, 0x52, 0x14, 0x61, 0x64, 0x64, 0x69, - 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, - 0x22, 0xe9, 0x04, 0x0a, 0x17, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x44, 0x65, 0x66, - 0x69, 0x6e, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x49, 0x74, 0x65, 0x6d, 0x12, 0x6d, 0x0a, 0x1d, - 0x62, 0x61, 0x73, 0x69, 0x63, 0x5f, 0x61, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, - 0x2e, 0x42, 0x61, 0x73, 0x69, 0x63, 0x41, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x48, 0x00, 0x52, 0x1b, - 0x62, 0x61, 0x73, 0x69, 0x63, 0x41, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x12, 0x46, 0x0a, 0x10, 0x61, - 0x70, 0x69, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, - 0x76, 0x32, 0x2e, 0x41, 0x70, 0x69, 0x4b, 0x65, 0x79, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, - 0x79, 0x48, 0x00, 0x52, 0x0e, 0x61, 0x70, 0x69, 0x4b, 0x65, 0x79, 0x53, 0x65, 0x63, 0x75, 0x72, - 0x69, 0x74, 0x79, 0x12, 0x5e, 0x0a, 0x18, 0x6f, 0x61, 0x75, 0x74, 0x68, 0x32, 0x5f, 0x69, 0x6d, - 0x70, 0x6c, 0x69, 0x63, 0x69, 0x74, 0x5f, 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x18, - 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, - 0x76, 0x32, 0x2e, 0x4f, 0x61, 0x75, 0x74, 0x68, 0x32, 0x49, 0x6d, 0x70, 0x6c, 0x69, 0x63, 0x69, - 0x74, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x48, 0x00, 0x52, 0x16, 0x6f, 0x61, 0x75, - 0x74, 0x68, 0x32, 0x49, 0x6d, 0x70, 0x6c, 0x69, 0x63, 0x69, 0x74, 0x53, 0x65, 0x63, 0x75, 0x72, - 0x69, 0x74, 0x79, 0x12, 0x5e, 0x0a, 0x18, 0x6f, 0x61, 0x75, 0x74, 0x68, 0x32, 0x5f, 0x70, 0x61, - 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x5f, 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x18, - 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, - 0x76, 0x32, 0x2e, 0x4f, 0x61, 0x75, 0x74, 0x68, 0x32, 0x50, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, - 0x64, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x48, 0x00, 0x52, 0x16, 0x6f, 0x61, 0x75, - 0x74, 0x68, 0x32, 0x50, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x53, 0x65, 0x63, 0x75, 0x72, - 0x69, 0x74, 0x79, 0x12, 0x67, 0x0a, 0x1b, 0x6f, 0x61, 0x75, 0x74, 0x68, 0x32, 0x5f, 0x61, 0x70, - 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, - 0x74, 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, - 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x61, 0x75, 0x74, 0x68, 0x32, 0x41, 0x70, 0x70, 0x6c, - 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x48, - 0x00, 0x52, 0x19, 0x6f, 0x61, 0x75, 0x74, 0x68, 0x32, 0x41, 0x70, 0x70, 0x6c, 0x69, 0x63, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x12, 0x65, 0x0a, 0x1b, - 0x6f, 0x61, 0x75, 0x74, 0x68, 0x32, 0x5f, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x5f, 0x63, 0x6f, - 0x64, 0x65, 0x5f, 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x18, 0x06, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x24, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4f, - 0x61, 0x75, 0x74, 0x68, 0x32, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x43, 0x6f, 0x64, 0x65, 0x53, - 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x48, 0x00, 0x52, 0x18, 0x6f, 0x61, 0x75, 0x74, 0x68, - 0x32, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x43, 0x6f, 0x64, 0x65, 0x53, 0x65, 0x63, 0x75, 0x72, - 0x69, 0x74, 0x79, 0x42, 0x07, 0x0a, 0x05, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x22, 0x68, 0x0a, 0x13, - 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x52, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x6d, - 0x65, 0x6e, 0x74, 0x12, 0x51, 0x0a, 0x15, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, - 0x6c, 0x5f, 0x70, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, - 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, - 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x41, 0x72, 0x72, 0x61, 0x79, - 0x52, 0x14, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x50, 0x72, 0x6f, 0x70, - 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x22, 0x23, 0x0a, 0x0b, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, - 0x41, 0x72, 0x72, 0x61, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, - 0x20, 0x03, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0xbb, 0x01, 0x0a, 0x03, - 0x54, 0x61, 0x67, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, - 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, - 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x3d, 0x0a, 0x0d, 0x65, 0x78, 0x74, - 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x5f, 0x64, 0x6f, 0x63, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x18, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x45, 0x78, - 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x44, 0x6f, 0x63, 0x73, 0x52, 0x0c, 0x65, 0x78, 0x74, 0x65, - 0x72, 0x6e, 0x61, 0x6c, 0x44, 0x6f, 0x63, 0x73, 0x12, 0x3f, 0x0a, 0x10, 0x76, 0x65, 0x6e, 0x64, - 0x6f, 0x72, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x03, - 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, - 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x41, 0x6e, 0x79, 0x52, 0x0f, 0x76, 0x65, 0x6e, 0x64, 0x6f, 0x72, - 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0x20, 0x0a, 0x08, 0x54, 0x79, 0x70, - 0x65, 0x49, 0x74, 0x65, 0x6d, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, - 0x20, 0x03, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x5c, 0x0a, 0x0f, 0x56, - 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x49, - 0x0a, 0x15, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x5f, 0x70, 0x72, 0x6f, - 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, - 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, - 0x41, 0x6e, 0x79, 0x52, 0x14, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x50, - 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x22, 0xc8, 0x01, 0x0a, 0x03, 0x58, 0x6d, - 0x6c, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, - 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, - 0x61, 0x63, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x03, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x06, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x1c, 0x0a, 0x09, 0x61, - 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, - 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x77, 0x72, 0x61, - 0x70, 0x70, 0x65, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x77, 0x72, 0x61, 0x70, - 0x70, 0x65, 0x64, 0x12, 0x3f, 0x0a, 0x10, 0x76, 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x5f, 0x65, 0x78, - 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, - 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, - 0x41, 0x6e, 0x79, 0x52, 0x0f, 0x76, 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x45, 0x78, 0x74, 0x65, 0x6e, - 0x73, 0x69, 0x6f, 0x6e, 0x42, 0x3e, 0x0a, 0x0e, 0x6f, 0x72, 0x67, 0x2e, 0x6f, 0x70, 0x65, 0x6e, - 0x61, 0x70, 0x69, 0x5f, 0x76, 0x32, 0x42, 0x0c, 0x4f, 0x70, 0x65, 0x6e, 0x41, 0x50, 0x49, 0x50, - 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x16, 0x2e, 0x2f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, - 0x69, 0x76, 0x32, 0x3b, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x5f, 0x76, 0x32, 0xa2, 0x02, - 0x03, 0x4f, 0x41, 0x53, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -} - -var ( - file_openapiv2_OpenAPIv2_proto_rawDescOnce sync.Once - file_openapiv2_OpenAPIv2_proto_rawDescData = file_openapiv2_OpenAPIv2_proto_rawDesc -) - -func file_openapiv2_OpenAPIv2_proto_rawDescGZIP() []byte { - file_openapiv2_OpenAPIv2_proto_rawDescOnce.Do(func() { - file_openapiv2_OpenAPIv2_proto_rawDescData = protoimpl.X.CompressGZIP(file_openapiv2_OpenAPIv2_proto_rawDescData) - }) - return file_openapiv2_OpenAPIv2_proto_rawDescData -} - -var file_openapiv2_OpenAPIv2_proto_msgTypes = make([]protoimpl.MessageInfo, 60) -var file_openapiv2_OpenAPIv2_proto_goTypes = []interface{}{ - (*AdditionalPropertiesItem)(nil), // 0: openapi.v2.AdditionalPropertiesItem - (*Any)(nil), // 1: openapi.v2.Any - (*ApiKeySecurity)(nil), // 2: openapi.v2.ApiKeySecurity - (*BasicAuthenticationSecurity)(nil), // 3: openapi.v2.BasicAuthenticationSecurity - (*BodyParameter)(nil), // 4: openapi.v2.BodyParameter - (*Contact)(nil), // 5: openapi.v2.Contact - (*Default)(nil), // 6: openapi.v2.Default - (*Definitions)(nil), // 7: openapi.v2.Definitions - (*Document)(nil), // 8: openapi.v2.Document - (*Examples)(nil), // 9: openapi.v2.Examples - (*ExternalDocs)(nil), // 10: openapi.v2.ExternalDocs - (*FileSchema)(nil), // 11: openapi.v2.FileSchema - (*FormDataParameterSubSchema)(nil), // 12: openapi.v2.FormDataParameterSubSchema - (*Header)(nil), // 13: openapi.v2.Header - (*HeaderParameterSubSchema)(nil), // 14: openapi.v2.HeaderParameterSubSchema - (*Headers)(nil), // 15: openapi.v2.Headers - (*Info)(nil), // 16: openapi.v2.Info - (*ItemsItem)(nil), // 17: openapi.v2.ItemsItem - (*JsonReference)(nil), // 18: openapi.v2.JsonReference - (*License)(nil), // 19: openapi.v2.License - (*NamedAny)(nil), // 20: openapi.v2.NamedAny - (*NamedHeader)(nil), // 21: openapi.v2.NamedHeader - (*NamedParameter)(nil), // 22: openapi.v2.NamedParameter - (*NamedPathItem)(nil), // 23: openapi.v2.NamedPathItem - (*NamedResponse)(nil), // 24: openapi.v2.NamedResponse - (*NamedResponseValue)(nil), // 25: openapi.v2.NamedResponseValue - (*NamedSchema)(nil), // 26: openapi.v2.NamedSchema - (*NamedSecurityDefinitionsItem)(nil), // 27: openapi.v2.NamedSecurityDefinitionsItem - (*NamedString)(nil), // 28: openapi.v2.NamedString - (*NamedStringArray)(nil), // 29: openapi.v2.NamedStringArray - (*NonBodyParameter)(nil), // 30: openapi.v2.NonBodyParameter - (*Oauth2AccessCodeSecurity)(nil), // 31: openapi.v2.Oauth2AccessCodeSecurity - (*Oauth2ApplicationSecurity)(nil), // 32: openapi.v2.Oauth2ApplicationSecurity - (*Oauth2ImplicitSecurity)(nil), // 33: openapi.v2.Oauth2ImplicitSecurity - (*Oauth2PasswordSecurity)(nil), // 34: openapi.v2.Oauth2PasswordSecurity - (*Oauth2Scopes)(nil), // 35: openapi.v2.Oauth2Scopes - (*Operation)(nil), // 36: openapi.v2.Operation - (*Parameter)(nil), // 37: openapi.v2.Parameter - (*ParameterDefinitions)(nil), // 38: openapi.v2.ParameterDefinitions - (*ParametersItem)(nil), // 39: openapi.v2.ParametersItem - (*PathItem)(nil), // 40: openapi.v2.PathItem - (*PathParameterSubSchema)(nil), // 41: openapi.v2.PathParameterSubSchema - (*Paths)(nil), // 42: openapi.v2.Paths - (*PrimitivesItems)(nil), // 43: openapi.v2.PrimitivesItems - (*Properties)(nil), // 44: openapi.v2.Properties - (*QueryParameterSubSchema)(nil), // 45: openapi.v2.QueryParameterSubSchema - (*Response)(nil), // 46: openapi.v2.Response - (*ResponseDefinitions)(nil), // 47: openapi.v2.ResponseDefinitions - (*ResponseValue)(nil), // 48: openapi.v2.ResponseValue - (*Responses)(nil), // 49: openapi.v2.Responses - (*Schema)(nil), // 50: openapi.v2.Schema - (*SchemaItem)(nil), // 51: openapi.v2.SchemaItem - (*SecurityDefinitions)(nil), // 52: openapi.v2.SecurityDefinitions - (*SecurityDefinitionsItem)(nil), // 53: openapi.v2.SecurityDefinitionsItem - (*SecurityRequirement)(nil), // 54: openapi.v2.SecurityRequirement - (*StringArray)(nil), // 55: openapi.v2.StringArray - (*Tag)(nil), // 56: openapi.v2.Tag - (*TypeItem)(nil), // 57: openapi.v2.TypeItem - (*VendorExtension)(nil), // 58: openapi.v2.VendorExtension - (*Xml)(nil), // 59: openapi.v2.Xml - (*anypb.Any)(nil), // 60: google.protobuf.Any -} -var file_openapiv2_OpenAPIv2_proto_depIdxs = []int32{ - 50, // 0: openapi.v2.AdditionalPropertiesItem.schema:type_name -> openapi.v2.Schema - 60, // 1: openapi.v2.Any.value:type_name -> google.protobuf.Any - 20, // 2: openapi.v2.ApiKeySecurity.vendor_extension:type_name -> openapi.v2.NamedAny - 20, // 3: openapi.v2.BasicAuthenticationSecurity.vendor_extension:type_name -> openapi.v2.NamedAny - 50, // 4: openapi.v2.BodyParameter.schema:type_name -> openapi.v2.Schema - 20, // 5: openapi.v2.BodyParameter.vendor_extension:type_name -> openapi.v2.NamedAny - 20, // 6: openapi.v2.Contact.vendor_extension:type_name -> openapi.v2.NamedAny - 20, // 7: openapi.v2.Default.additional_properties:type_name -> openapi.v2.NamedAny - 26, // 8: openapi.v2.Definitions.additional_properties:type_name -> openapi.v2.NamedSchema - 16, // 9: openapi.v2.Document.info:type_name -> openapi.v2.Info - 42, // 10: openapi.v2.Document.paths:type_name -> openapi.v2.Paths - 7, // 11: openapi.v2.Document.definitions:type_name -> openapi.v2.Definitions - 38, // 12: openapi.v2.Document.parameters:type_name -> openapi.v2.ParameterDefinitions - 47, // 13: openapi.v2.Document.responses:type_name -> openapi.v2.ResponseDefinitions - 54, // 14: openapi.v2.Document.security:type_name -> openapi.v2.SecurityRequirement - 52, // 15: openapi.v2.Document.security_definitions:type_name -> openapi.v2.SecurityDefinitions - 56, // 16: openapi.v2.Document.tags:type_name -> openapi.v2.Tag - 10, // 17: openapi.v2.Document.external_docs:type_name -> openapi.v2.ExternalDocs - 20, // 18: openapi.v2.Document.vendor_extension:type_name -> openapi.v2.NamedAny - 20, // 19: openapi.v2.Examples.additional_properties:type_name -> openapi.v2.NamedAny - 20, // 20: openapi.v2.ExternalDocs.vendor_extension:type_name -> openapi.v2.NamedAny - 1, // 21: openapi.v2.FileSchema.default:type_name -> openapi.v2.Any - 10, // 22: openapi.v2.FileSchema.external_docs:type_name -> openapi.v2.ExternalDocs - 1, // 23: openapi.v2.FileSchema.example:type_name -> openapi.v2.Any - 20, // 24: openapi.v2.FileSchema.vendor_extension:type_name -> openapi.v2.NamedAny - 43, // 25: openapi.v2.FormDataParameterSubSchema.items:type_name -> openapi.v2.PrimitivesItems - 1, // 26: openapi.v2.FormDataParameterSubSchema.default:type_name -> openapi.v2.Any - 1, // 27: openapi.v2.FormDataParameterSubSchema.enum:type_name -> openapi.v2.Any - 20, // 28: openapi.v2.FormDataParameterSubSchema.vendor_extension:type_name -> openapi.v2.NamedAny - 43, // 29: openapi.v2.Header.items:type_name -> openapi.v2.PrimitivesItems - 1, // 30: openapi.v2.Header.default:type_name -> openapi.v2.Any - 1, // 31: openapi.v2.Header.enum:type_name -> openapi.v2.Any - 20, // 32: openapi.v2.Header.vendor_extension:type_name -> openapi.v2.NamedAny - 43, // 33: openapi.v2.HeaderParameterSubSchema.items:type_name -> openapi.v2.PrimitivesItems - 1, // 34: openapi.v2.HeaderParameterSubSchema.default:type_name -> openapi.v2.Any - 1, // 35: openapi.v2.HeaderParameterSubSchema.enum:type_name -> openapi.v2.Any - 20, // 36: openapi.v2.HeaderParameterSubSchema.vendor_extension:type_name -> openapi.v2.NamedAny - 21, // 37: openapi.v2.Headers.additional_properties:type_name -> openapi.v2.NamedHeader - 5, // 38: openapi.v2.Info.contact:type_name -> openapi.v2.Contact - 19, // 39: openapi.v2.Info.license:type_name -> openapi.v2.License - 20, // 40: openapi.v2.Info.vendor_extension:type_name -> openapi.v2.NamedAny - 50, // 41: openapi.v2.ItemsItem.schema:type_name -> openapi.v2.Schema - 20, // 42: openapi.v2.License.vendor_extension:type_name -> openapi.v2.NamedAny - 1, // 43: openapi.v2.NamedAny.value:type_name -> openapi.v2.Any - 13, // 44: openapi.v2.NamedHeader.value:type_name -> openapi.v2.Header - 37, // 45: openapi.v2.NamedParameter.value:type_name -> openapi.v2.Parameter - 40, // 46: openapi.v2.NamedPathItem.value:type_name -> openapi.v2.PathItem - 46, // 47: openapi.v2.NamedResponse.value:type_name -> openapi.v2.Response - 48, // 48: openapi.v2.NamedResponseValue.value:type_name -> openapi.v2.ResponseValue - 50, // 49: openapi.v2.NamedSchema.value:type_name -> openapi.v2.Schema - 53, // 50: openapi.v2.NamedSecurityDefinitionsItem.value:type_name -> openapi.v2.SecurityDefinitionsItem - 55, // 51: openapi.v2.NamedStringArray.value:type_name -> openapi.v2.StringArray - 14, // 52: openapi.v2.NonBodyParameter.header_parameter_sub_schema:type_name -> openapi.v2.HeaderParameterSubSchema - 12, // 53: openapi.v2.NonBodyParameter.form_data_parameter_sub_schema:type_name -> openapi.v2.FormDataParameterSubSchema - 45, // 54: openapi.v2.NonBodyParameter.query_parameter_sub_schema:type_name -> openapi.v2.QueryParameterSubSchema - 41, // 55: openapi.v2.NonBodyParameter.path_parameter_sub_schema:type_name -> openapi.v2.PathParameterSubSchema - 35, // 56: openapi.v2.Oauth2AccessCodeSecurity.scopes:type_name -> openapi.v2.Oauth2Scopes - 20, // 57: openapi.v2.Oauth2AccessCodeSecurity.vendor_extension:type_name -> openapi.v2.NamedAny - 35, // 58: openapi.v2.Oauth2ApplicationSecurity.scopes:type_name -> openapi.v2.Oauth2Scopes - 20, // 59: openapi.v2.Oauth2ApplicationSecurity.vendor_extension:type_name -> openapi.v2.NamedAny - 35, // 60: openapi.v2.Oauth2ImplicitSecurity.scopes:type_name -> openapi.v2.Oauth2Scopes - 20, // 61: openapi.v2.Oauth2ImplicitSecurity.vendor_extension:type_name -> openapi.v2.NamedAny - 35, // 62: openapi.v2.Oauth2PasswordSecurity.scopes:type_name -> openapi.v2.Oauth2Scopes - 20, // 63: openapi.v2.Oauth2PasswordSecurity.vendor_extension:type_name -> openapi.v2.NamedAny - 28, // 64: openapi.v2.Oauth2Scopes.additional_properties:type_name -> openapi.v2.NamedString - 10, // 65: openapi.v2.Operation.external_docs:type_name -> openapi.v2.ExternalDocs - 39, // 66: openapi.v2.Operation.parameters:type_name -> openapi.v2.ParametersItem - 49, // 67: openapi.v2.Operation.responses:type_name -> openapi.v2.Responses - 54, // 68: openapi.v2.Operation.security:type_name -> openapi.v2.SecurityRequirement - 20, // 69: openapi.v2.Operation.vendor_extension:type_name -> openapi.v2.NamedAny - 4, // 70: openapi.v2.Parameter.body_parameter:type_name -> openapi.v2.BodyParameter - 30, // 71: openapi.v2.Parameter.non_body_parameter:type_name -> openapi.v2.NonBodyParameter - 22, // 72: openapi.v2.ParameterDefinitions.additional_properties:type_name -> openapi.v2.NamedParameter - 37, // 73: openapi.v2.ParametersItem.parameter:type_name -> openapi.v2.Parameter - 18, // 74: openapi.v2.ParametersItem.json_reference:type_name -> openapi.v2.JsonReference - 36, // 75: openapi.v2.PathItem.get:type_name -> openapi.v2.Operation - 36, // 76: openapi.v2.PathItem.put:type_name -> openapi.v2.Operation - 36, // 77: openapi.v2.PathItem.post:type_name -> openapi.v2.Operation - 36, // 78: openapi.v2.PathItem.delete:type_name -> openapi.v2.Operation - 36, // 79: openapi.v2.PathItem.options:type_name -> openapi.v2.Operation - 36, // 80: openapi.v2.PathItem.head:type_name -> openapi.v2.Operation - 36, // 81: openapi.v2.PathItem.patch:type_name -> openapi.v2.Operation - 39, // 82: openapi.v2.PathItem.parameters:type_name -> openapi.v2.ParametersItem - 20, // 83: openapi.v2.PathItem.vendor_extension:type_name -> openapi.v2.NamedAny - 43, // 84: openapi.v2.PathParameterSubSchema.items:type_name -> openapi.v2.PrimitivesItems - 1, // 85: openapi.v2.PathParameterSubSchema.default:type_name -> openapi.v2.Any - 1, // 86: openapi.v2.PathParameterSubSchema.enum:type_name -> openapi.v2.Any - 20, // 87: openapi.v2.PathParameterSubSchema.vendor_extension:type_name -> openapi.v2.NamedAny - 20, // 88: openapi.v2.Paths.vendor_extension:type_name -> openapi.v2.NamedAny - 23, // 89: openapi.v2.Paths.path:type_name -> openapi.v2.NamedPathItem - 43, // 90: openapi.v2.PrimitivesItems.items:type_name -> openapi.v2.PrimitivesItems - 1, // 91: openapi.v2.PrimitivesItems.default:type_name -> openapi.v2.Any - 1, // 92: openapi.v2.PrimitivesItems.enum:type_name -> openapi.v2.Any - 20, // 93: openapi.v2.PrimitivesItems.vendor_extension:type_name -> openapi.v2.NamedAny - 26, // 94: openapi.v2.Properties.additional_properties:type_name -> openapi.v2.NamedSchema - 43, // 95: openapi.v2.QueryParameterSubSchema.items:type_name -> openapi.v2.PrimitivesItems - 1, // 96: openapi.v2.QueryParameterSubSchema.default:type_name -> openapi.v2.Any - 1, // 97: openapi.v2.QueryParameterSubSchema.enum:type_name -> openapi.v2.Any - 20, // 98: openapi.v2.QueryParameterSubSchema.vendor_extension:type_name -> openapi.v2.NamedAny - 51, // 99: openapi.v2.Response.schema:type_name -> openapi.v2.SchemaItem - 15, // 100: openapi.v2.Response.headers:type_name -> openapi.v2.Headers - 9, // 101: openapi.v2.Response.examples:type_name -> openapi.v2.Examples - 20, // 102: openapi.v2.Response.vendor_extension:type_name -> openapi.v2.NamedAny - 24, // 103: openapi.v2.ResponseDefinitions.additional_properties:type_name -> openapi.v2.NamedResponse - 46, // 104: openapi.v2.ResponseValue.response:type_name -> openapi.v2.Response - 18, // 105: openapi.v2.ResponseValue.json_reference:type_name -> openapi.v2.JsonReference - 25, // 106: openapi.v2.Responses.response_code:type_name -> openapi.v2.NamedResponseValue - 20, // 107: openapi.v2.Responses.vendor_extension:type_name -> openapi.v2.NamedAny - 1, // 108: openapi.v2.Schema.default:type_name -> openapi.v2.Any - 1, // 109: openapi.v2.Schema.enum:type_name -> openapi.v2.Any - 0, // 110: openapi.v2.Schema.additional_properties:type_name -> openapi.v2.AdditionalPropertiesItem - 57, // 111: openapi.v2.Schema.type:type_name -> openapi.v2.TypeItem - 17, // 112: openapi.v2.Schema.items:type_name -> openapi.v2.ItemsItem - 50, // 113: openapi.v2.Schema.all_of:type_name -> openapi.v2.Schema - 44, // 114: openapi.v2.Schema.properties:type_name -> openapi.v2.Properties - 59, // 115: openapi.v2.Schema.xml:type_name -> openapi.v2.Xml - 10, // 116: openapi.v2.Schema.external_docs:type_name -> openapi.v2.ExternalDocs - 1, // 117: openapi.v2.Schema.example:type_name -> openapi.v2.Any - 20, // 118: openapi.v2.Schema.vendor_extension:type_name -> openapi.v2.NamedAny - 50, // 119: openapi.v2.SchemaItem.schema:type_name -> openapi.v2.Schema - 11, // 120: openapi.v2.SchemaItem.file_schema:type_name -> openapi.v2.FileSchema - 27, // 121: openapi.v2.SecurityDefinitions.additional_properties:type_name -> openapi.v2.NamedSecurityDefinitionsItem - 3, // 122: openapi.v2.SecurityDefinitionsItem.basic_authentication_security:type_name -> openapi.v2.BasicAuthenticationSecurity - 2, // 123: openapi.v2.SecurityDefinitionsItem.api_key_security:type_name -> openapi.v2.ApiKeySecurity - 33, // 124: openapi.v2.SecurityDefinitionsItem.oauth2_implicit_security:type_name -> openapi.v2.Oauth2ImplicitSecurity - 34, // 125: openapi.v2.SecurityDefinitionsItem.oauth2_password_security:type_name -> openapi.v2.Oauth2PasswordSecurity - 32, // 126: openapi.v2.SecurityDefinitionsItem.oauth2_application_security:type_name -> openapi.v2.Oauth2ApplicationSecurity - 31, // 127: openapi.v2.SecurityDefinitionsItem.oauth2_access_code_security:type_name -> openapi.v2.Oauth2AccessCodeSecurity - 29, // 128: openapi.v2.SecurityRequirement.additional_properties:type_name -> openapi.v2.NamedStringArray - 10, // 129: openapi.v2.Tag.external_docs:type_name -> openapi.v2.ExternalDocs - 20, // 130: openapi.v2.Tag.vendor_extension:type_name -> openapi.v2.NamedAny - 20, // 131: openapi.v2.VendorExtension.additional_properties:type_name -> openapi.v2.NamedAny - 20, // 132: openapi.v2.Xml.vendor_extension:type_name -> openapi.v2.NamedAny - 133, // [133:133] is the sub-list for method output_type - 133, // [133:133] is the sub-list for method input_type - 133, // [133:133] is the sub-list for extension type_name - 133, // [133:133] is the sub-list for extension extendee - 0, // [0:133] is the sub-list for field type_name -} - -func init() { file_openapiv2_OpenAPIv2_proto_init() } -func file_openapiv2_OpenAPIv2_proto_init() { - if File_openapiv2_OpenAPIv2_proto != nil { - return - } - if !protoimpl.UnsafeEnabled { - file_openapiv2_OpenAPIv2_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*AdditionalPropertiesItem); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv2_OpenAPIv2_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Any); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv2_OpenAPIv2_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ApiKeySecurity); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv2_OpenAPIv2_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*BasicAuthenticationSecurity); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv2_OpenAPIv2_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*BodyParameter); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv2_OpenAPIv2_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Contact); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv2_OpenAPIv2_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Default); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv2_OpenAPIv2_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Definitions); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv2_OpenAPIv2_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Document); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv2_OpenAPIv2_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Examples); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv2_OpenAPIv2_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ExternalDocs); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv2_OpenAPIv2_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*FileSchema); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv2_OpenAPIv2_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*FormDataParameterSubSchema); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv2_OpenAPIv2_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Header); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv2_OpenAPIv2_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*HeaderParameterSubSchema); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv2_OpenAPIv2_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Headers); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv2_OpenAPIv2_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Info); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv2_OpenAPIv2_proto_msgTypes[17].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ItemsItem); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv2_OpenAPIv2_proto_msgTypes[18].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*JsonReference); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv2_OpenAPIv2_proto_msgTypes[19].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*License); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv2_OpenAPIv2_proto_msgTypes[20].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*NamedAny); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv2_OpenAPIv2_proto_msgTypes[21].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*NamedHeader); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv2_OpenAPIv2_proto_msgTypes[22].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*NamedParameter); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv2_OpenAPIv2_proto_msgTypes[23].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*NamedPathItem); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv2_OpenAPIv2_proto_msgTypes[24].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*NamedResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv2_OpenAPIv2_proto_msgTypes[25].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*NamedResponseValue); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv2_OpenAPIv2_proto_msgTypes[26].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*NamedSchema); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv2_OpenAPIv2_proto_msgTypes[27].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*NamedSecurityDefinitionsItem); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv2_OpenAPIv2_proto_msgTypes[28].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*NamedString); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv2_OpenAPIv2_proto_msgTypes[29].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*NamedStringArray); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv2_OpenAPIv2_proto_msgTypes[30].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*NonBodyParameter); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv2_OpenAPIv2_proto_msgTypes[31].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Oauth2AccessCodeSecurity); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv2_OpenAPIv2_proto_msgTypes[32].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Oauth2ApplicationSecurity); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv2_OpenAPIv2_proto_msgTypes[33].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Oauth2ImplicitSecurity); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv2_OpenAPIv2_proto_msgTypes[34].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Oauth2PasswordSecurity); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv2_OpenAPIv2_proto_msgTypes[35].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Oauth2Scopes); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv2_OpenAPIv2_proto_msgTypes[36].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Operation); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv2_OpenAPIv2_proto_msgTypes[37].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Parameter); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv2_OpenAPIv2_proto_msgTypes[38].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ParameterDefinitions); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv2_OpenAPIv2_proto_msgTypes[39].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ParametersItem); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv2_OpenAPIv2_proto_msgTypes[40].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*PathItem); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv2_OpenAPIv2_proto_msgTypes[41].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*PathParameterSubSchema); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv2_OpenAPIv2_proto_msgTypes[42].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Paths); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv2_OpenAPIv2_proto_msgTypes[43].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*PrimitivesItems); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv2_OpenAPIv2_proto_msgTypes[44].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Properties); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv2_OpenAPIv2_proto_msgTypes[45].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*QueryParameterSubSchema); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv2_OpenAPIv2_proto_msgTypes[46].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Response); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv2_OpenAPIv2_proto_msgTypes[47].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ResponseDefinitions); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv2_OpenAPIv2_proto_msgTypes[48].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ResponseValue); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv2_OpenAPIv2_proto_msgTypes[49].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Responses); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv2_OpenAPIv2_proto_msgTypes[50].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Schema); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv2_OpenAPIv2_proto_msgTypes[51].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*SchemaItem); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv2_OpenAPIv2_proto_msgTypes[52].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*SecurityDefinitions); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv2_OpenAPIv2_proto_msgTypes[53].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*SecurityDefinitionsItem); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv2_OpenAPIv2_proto_msgTypes[54].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*SecurityRequirement); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv2_OpenAPIv2_proto_msgTypes[55].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*StringArray); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv2_OpenAPIv2_proto_msgTypes[56].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Tag); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv2_OpenAPIv2_proto_msgTypes[57].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*TypeItem); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv2_OpenAPIv2_proto_msgTypes[58].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*VendorExtension); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv2_OpenAPIv2_proto_msgTypes[59].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Xml); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } - file_openapiv2_OpenAPIv2_proto_msgTypes[0].OneofWrappers = []interface{}{ - (*AdditionalPropertiesItem_Schema)(nil), - (*AdditionalPropertiesItem_Boolean)(nil), - } - file_openapiv2_OpenAPIv2_proto_msgTypes[30].OneofWrappers = []interface{}{ - (*NonBodyParameter_HeaderParameterSubSchema)(nil), - (*NonBodyParameter_FormDataParameterSubSchema)(nil), - (*NonBodyParameter_QueryParameterSubSchema)(nil), - (*NonBodyParameter_PathParameterSubSchema)(nil), - } - file_openapiv2_OpenAPIv2_proto_msgTypes[37].OneofWrappers = []interface{}{ - (*Parameter_BodyParameter)(nil), - (*Parameter_NonBodyParameter)(nil), - } - file_openapiv2_OpenAPIv2_proto_msgTypes[39].OneofWrappers = []interface{}{ - (*ParametersItem_Parameter)(nil), - (*ParametersItem_JsonReference)(nil), - } - file_openapiv2_OpenAPIv2_proto_msgTypes[48].OneofWrappers = []interface{}{ - (*ResponseValue_Response)(nil), - (*ResponseValue_JsonReference)(nil), - } - file_openapiv2_OpenAPIv2_proto_msgTypes[51].OneofWrappers = []interface{}{ - (*SchemaItem_Schema)(nil), - (*SchemaItem_FileSchema)(nil), - } - file_openapiv2_OpenAPIv2_proto_msgTypes[53].OneofWrappers = []interface{}{ - (*SecurityDefinitionsItem_BasicAuthenticationSecurity)(nil), - (*SecurityDefinitionsItem_ApiKeySecurity)(nil), - (*SecurityDefinitionsItem_Oauth2ImplicitSecurity)(nil), - (*SecurityDefinitionsItem_Oauth2PasswordSecurity)(nil), - (*SecurityDefinitionsItem_Oauth2ApplicationSecurity)(nil), - (*SecurityDefinitionsItem_Oauth2AccessCodeSecurity)(nil), - } - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_openapiv2_OpenAPIv2_proto_rawDesc, - NumEnums: 0, - NumMessages: 60, - NumExtensions: 0, - NumServices: 0, - }, - GoTypes: file_openapiv2_OpenAPIv2_proto_goTypes, - DependencyIndexes: file_openapiv2_OpenAPIv2_proto_depIdxs, - MessageInfos: file_openapiv2_OpenAPIv2_proto_msgTypes, - }.Build() - File_openapiv2_OpenAPIv2_proto = out.File - file_openapiv2_OpenAPIv2_proto_rawDesc = nil - file_openapiv2_OpenAPIv2_proto_goTypes = nil - file_openapiv2_OpenAPIv2_proto_depIdxs = nil -} diff --git a/src/vendor/github.com/google/gnostic/openapiv2/OpenAPIv2.proto b/src/vendor/github.com/google/gnostic/openapiv2/OpenAPIv2.proto deleted file mode 100644 index 1c59b2f4a..000000000 --- a/src/vendor/github.com/google/gnostic/openapiv2/OpenAPIv2.proto +++ /dev/null @@ -1,666 +0,0 @@ -// Copyright 2020 Google LLC. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// THIS FILE IS AUTOMATICALLY GENERATED. - -syntax = "proto3"; - -package openapi.v2; - -import "google/protobuf/any.proto"; - -// This option lets the proto compiler generate Java code inside the package -// name (see below) instead of inside an outer class. It creates a simpler -// developer experience by reducing one-level of name nesting and be -// consistent with most programming languages that don't support outer classes. -option java_multiple_files = true; - -// The Java outer classname should be the filename in UpperCamelCase. This -// class is only used to hold proto descriptor, so developers don't need to -// work with it directly. -option java_outer_classname = "OpenAPIProto"; - -// The Java package name must be proto package name with proper prefix. -option java_package = "org.openapi_v2"; - -// A reasonable prefix for the Objective-C symbols generated from the package. -// It should at a minimum be 3 characters long, all uppercase, and convention -// is to use an abbreviation of the package name. Something short, but -// hopefully unique enough to not conflict with things that may come along in -// the future. 'GPB' is reserved for the protocol buffer implementation itself. -option objc_class_prefix = "OAS"; - -// The Go package name. -option go_package = "./openapiv2;openapi_v2"; - -message AdditionalPropertiesItem { - oneof oneof { - Schema schema = 1; - bool boolean = 2; - } -} - -message Any { - google.protobuf.Any value = 1; - string yaml = 2; -} - -message ApiKeySecurity { - string type = 1; - string name = 2; - string in = 3; - string description = 4; - repeated NamedAny vendor_extension = 5; -} - -message BasicAuthenticationSecurity { - string type = 1; - string description = 2; - repeated NamedAny vendor_extension = 3; -} - -message BodyParameter { - // A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed. - string description = 1; - // The name of the parameter. - string name = 2; - // Determines the location of the parameter. - string in = 3; - // Determines whether or not this parameter is required or optional. - bool required = 4; - Schema schema = 5; - repeated NamedAny vendor_extension = 6; -} - -// Contact information for the owners of the API. -message Contact { - // The identifying name of the contact person/organization. - string name = 1; - // The URL pointing to the contact information. - string url = 2; - // The email address of the contact person/organization. - string email = 3; - repeated NamedAny vendor_extension = 4; -} - -message Default { - repeated NamedAny additional_properties = 1; -} - -// One or more JSON objects describing the schemas being consumed and produced by the API. -message Definitions { - repeated NamedSchema additional_properties = 1; -} - -message Document { - // The Swagger version of this document. - string swagger = 1; - Info info = 2; - // The host (name or ip) of the API. Example: 'swagger.io' - string host = 3; - // The base path to the API. Example: '/api'. - string base_path = 4; - // The transfer protocol of the API. - repeated string schemes = 5; - // A list of MIME types accepted by the API. - repeated string consumes = 6; - // A list of MIME types the API can produce. - repeated string produces = 7; - Paths paths = 8; - Definitions definitions = 9; - ParameterDefinitions parameters = 10; - ResponseDefinitions responses = 11; - repeated SecurityRequirement security = 12; - SecurityDefinitions security_definitions = 13; - repeated Tag tags = 14; - ExternalDocs external_docs = 15; - repeated NamedAny vendor_extension = 16; -} - -message Examples { - repeated NamedAny additional_properties = 1; -} - -// information about external documentation -message ExternalDocs { - string description = 1; - string url = 2; - repeated NamedAny vendor_extension = 3; -} - -// A deterministic version of a JSON Schema object. -message FileSchema { - string format = 1; - string title = 2; - string description = 3; - Any default = 4; - repeated string required = 5; - string type = 6; - bool read_only = 7; - ExternalDocs external_docs = 8; - Any example = 9; - repeated NamedAny vendor_extension = 10; -} - -message FormDataParameterSubSchema { - // Determines whether or not this parameter is required or optional. - bool required = 1; - // Determines the location of the parameter. - string in = 2; - // A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed. - string description = 3; - // The name of the parameter. - string name = 4; - // allows sending a parameter by name only or with an empty value. - bool allow_empty_value = 5; - string type = 6; - string format = 7; - PrimitivesItems items = 8; - string collection_format = 9; - Any default = 10; - double maximum = 11; - bool exclusive_maximum = 12; - double minimum = 13; - bool exclusive_minimum = 14; - int64 max_length = 15; - int64 min_length = 16; - string pattern = 17; - int64 max_items = 18; - int64 min_items = 19; - bool unique_items = 20; - repeated Any enum = 21; - double multiple_of = 22; - repeated NamedAny vendor_extension = 23; -} - -message Header { - string type = 1; - string format = 2; - PrimitivesItems items = 3; - string collection_format = 4; - Any default = 5; - double maximum = 6; - bool exclusive_maximum = 7; - double minimum = 8; - bool exclusive_minimum = 9; - int64 max_length = 10; - int64 min_length = 11; - string pattern = 12; - int64 max_items = 13; - int64 min_items = 14; - bool unique_items = 15; - repeated Any enum = 16; - double multiple_of = 17; - string description = 18; - repeated NamedAny vendor_extension = 19; -} - -message HeaderParameterSubSchema { - // Determines whether or not this parameter is required or optional. - bool required = 1; - // Determines the location of the parameter. - string in = 2; - // A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed. - string description = 3; - // The name of the parameter. - string name = 4; - string type = 5; - string format = 6; - PrimitivesItems items = 7; - string collection_format = 8; - Any default = 9; - double maximum = 10; - bool exclusive_maximum = 11; - double minimum = 12; - bool exclusive_minimum = 13; - int64 max_length = 14; - int64 min_length = 15; - string pattern = 16; - int64 max_items = 17; - int64 min_items = 18; - bool unique_items = 19; - repeated Any enum = 20; - double multiple_of = 21; - repeated NamedAny vendor_extension = 22; -} - -message Headers { - repeated NamedHeader additional_properties = 1; -} - -// General information about the API. -message Info { - // A unique and precise title of the API. - string title = 1; - // A semantic version number of the API. - string version = 2; - // A longer description of the API. Should be different from the title. GitHub Flavored Markdown is allowed. - string description = 3; - // The terms of service for the API. - string terms_of_service = 4; - Contact contact = 5; - License license = 6; - repeated NamedAny vendor_extension = 7; -} - -message ItemsItem { - repeated Schema schema = 1; -} - -message JsonReference { - string _ref = 1; - string description = 2; -} - -message License { - // The name of the license type. It's encouraged to use an OSI compatible license. - string name = 1; - // The URL pointing to the license. - string url = 2; - repeated NamedAny vendor_extension = 3; -} - -// Automatically-generated message used to represent maps of Any as ordered (name,value) pairs. -message NamedAny { - // Map key - string name = 1; - // Mapped value - Any value = 2; -} - -// Automatically-generated message used to represent maps of Header as ordered (name,value) pairs. -message NamedHeader { - // Map key - string name = 1; - // Mapped value - Header value = 2; -} - -// Automatically-generated message used to represent maps of Parameter as ordered (name,value) pairs. -message NamedParameter { - // Map key - string name = 1; - // Mapped value - Parameter value = 2; -} - -// Automatically-generated message used to represent maps of PathItem as ordered (name,value) pairs. -message NamedPathItem { - // Map key - string name = 1; - // Mapped value - PathItem value = 2; -} - -// Automatically-generated message used to represent maps of Response as ordered (name,value) pairs. -message NamedResponse { - // Map key - string name = 1; - // Mapped value - Response value = 2; -} - -// Automatically-generated message used to represent maps of ResponseValue as ordered (name,value) pairs. -message NamedResponseValue { - // Map key - string name = 1; - // Mapped value - ResponseValue value = 2; -} - -// Automatically-generated message used to represent maps of Schema as ordered (name,value) pairs. -message NamedSchema { - // Map key - string name = 1; - // Mapped value - Schema value = 2; -} - -// Automatically-generated message used to represent maps of SecurityDefinitionsItem as ordered (name,value) pairs. -message NamedSecurityDefinitionsItem { - // Map key - string name = 1; - // Mapped value - SecurityDefinitionsItem value = 2; -} - -// Automatically-generated message used to represent maps of string as ordered (name,value) pairs. -message NamedString { - // Map key - string name = 1; - // Mapped value - string value = 2; -} - -// Automatically-generated message used to represent maps of StringArray as ordered (name,value) pairs. -message NamedStringArray { - // Map key - string name = 1; - // Mapped value - StringArray value = 2; -} - -message NonBodyParameter { - oneof oneof { - HeaderParameterSubSchema header_parameter_sub_schema = 1; - FormDataParameterSubSchema form_data_parameter_sub_schema = 2; - QueryParameterSubSchema query_parameter_sub_schema = 3; - PathParameterSubSchema path_parameter_sub_schema = 4; - } -} - -message Oauth2AccessCodeSecurity { - string type = 1; - string flow = 2; - Oauth2Scopes scopes = 3; - string authorization_url = 4; - string token_url = 5; - string description = 6; - repeated NamedAny vendor_extension = 7; -} - -message Oauth2ApplicationSecurity { - string type = 1; - string flow = 2; - Oauth2Scopes scopes = 3; - string token_url = 4; - string description = 5; - repeated NamedAny vendor_extension = 6; -} - -message Oauth2ImplicitSecurity { - string type = 1; - string flow = 2; - Oauth2Scopes scopes = 3; - string authorization_url = 4; - string description = 5; - repeated NamedAny vendor_extension = 6; -} - -message Oauth2PasswordSecurity { - string type = 1; - string flow = 2; - Oauth2Scopes scopes = 3; - string token_url = 4; - string description = 5; - repeated NamedAny vendor_extension = 6; -} - -message Oauth2Scopes { - repeated NamedString additional_properties = 1; -} - -message Operation { - repeated string tags = 1; - // A brief summary of the operation. - string summary = 2; - // A longer description of the operation, GitHub Flavored Markdown is allowed. - string description = 3; - ExternalDocs external_docs = 4; - // A unique identifier of the operation. - string operation_id = 5; - // A list of MIME types the API can produce. - repeated string produces = 6; - // A list of MIME types the API can consume. - repeated string consumes = 7; - // The parameters needed to send a valid API call. - repeated ParametersItem parameters = 8; - Responses responses = 9; - // The transfer protocol of the API. - repeated string schemes = 10; - bool deprecated = 11; - repeated SecurityRequirement security = 12; - repeated NamedAny vendor_extension = 13; -} - -message Parameter { - oneof oneof { - BodyParameter body_parameter = 1; - NonBodyParameter non_body_parameter = 2; - } -} - -// One or more JSON representations for parameters -message ParameterDefinitions { - repeated NamedParameter additional_properties = 1; -} - -message ParametersItem { - oneof oneof { - Parameter parameter = 1; - JsonReference json_reference = 2; - } -} - -message PathItem { - string _ref = 1; - Operation get = 2; - Operation put = 3; - Operation post = 4; - Operation delete = 5; - Operation options = 6; - Operation head = 7; - Operation patch = 8; - // The parameters needed to send a valid API call. - repeated ParametersItem parameters = 9; - repeated NamedAny vendor_extension = 10; -} - -message PathParameterSubSchema { - // Determines whether or not this parameter is required or optional. - bool required = 1; - // Determines the location of the parameter. - string in = 2; - // A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed. - string description = 3; - // The name of the parameter. - string name = 4; - string type = 5; - string format = 6; - PrimitivesItems items = 7; - string collection_format = 8; - Any default = 9; - double maximum = 10; - bool exclusive_maximum = 11; - double minimum = 12; - bool exclusive_minimum = 13; - int64 max_length = 14; - int64 min_length = 15; - string pattern = 16; - int64 max_items = 17; - int64 min_items = 18; - bool unique_items = 19; - repeated Any enum = 20; - double multiple_of = 21; - repeated NamedAny vendor_extension = 22; -} - -// Relative paths to the individual endpoints. They must be relative to the 'basePath'. -message Paths { - repeated NamedAny vendor_extension = 1; - repeated NamedPathItem path = 2; -} - -message PrimitivesItems { - string type = 1; - string format = 2; - PrimitivesItems items = 3; - string collection_format = 4; - Any default = 5; - double maximum = 6; - bool exclusive_maximum = 7; - double minimum = 8; - bool exclusive_minimum = 9; - int64 max_length = 10; - int64 min_length = 11; - string pattern = 12; - int64 max_items = 13; - int64 min_items = 14; - bool unique_items = 15; - repeated Any enum = 16; - double multiple_of = 17; - repeated NamedAny vendor_extension = 18; -} - -message Properties { - repeated NamedSchema additional_properties = 1; -} - -message QueryParameterSubSchema { - // Determines whether or not this parameter is required or optional. - bool required = 1; - // Determines the location of the parameter. - string in = 2; - // A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed. - string description = 3; - // The name of the parameter. - string name = 4; - // allows sending a parameter by name only or with an empty value. - bool allow_empty_value = 5; - string type = 6; - string format = 7; - PrimitivesItems items = 8; - string collection_format = 9; - Any default = 10; - double maximum = 11; - bool exclusive_maximum = 12; - double minimum = 13; - bool exclusive_minimum = 14; - int64 max_length = 15; - int64 min_length = 16; - string pattern = 17; - int64 max_items = 18; - int64 min_items = 19; - bool unique_items = 20; - repeated Any enum = 21; - double multiple_of = 22; - repeated NamedAny vendor_extension = 23; -} - -message Response { - string description = 1; - SchemaItem schema = 2; - Headers headers = 3; - Examples examples = 4; - repeated NamedAny vendor_extension = 5; -} - -// One or more JSON representations for responses -message ResponseDefinitions { - repeated NamedResponse additional_properties = 1; -} - -message ResponseValue { - oneof oneof { - Response response = 1; - JsonReference json_reference = 2; - } -} - -// Response objects names can either be any valid HTTP status code or 'default'. -message Responses { - repeated NamedResponseValue response_code = 1; - repeated NamedAny vendor_extension = 2; -} - -// A deterministic version of a JSON Schema object. -message Schema { - string _ref = 1; - string format = 2; - string title = 3; - string description = 4; - Any default = 5; - double multiple_of = 6; - double maximum = 7; - bool exclusive_maximum = 8; - double minimum = 9; - bool exclusive_minimum = 10; - int64 max_length = 11; - int64 min_length = 12; - string pattern = 13; - int64 max_items = 14; - int64 min_items = 15; - bool unique_items = 16; - int64 max_properties = 17; - int64 min_properties = 18; - repeated string required = 19; - repeated Any enum = 20; - AdditionalPropertiesItem additional_properties = 21; - TypeItem type = 22; - ItemsItem items = 23; - repeated Schema all_of = 24; - Properties properties = 25; - string discriminator = 26; - bool read_only = 27; - Xml xml = 28; - ExternalDocs external_docs = 29; - Any example = 30; - repeated NamedAny vendor_extension = 31; -} - -message SchemaItem { - oneof oneof { - Schema schema = 1; - FileSchema file_schema = 2; - } -} - -message SecurityDefinitions { - repeated NamedSecurityDefinitionsItem additional_properties = 1; -} - -message SecurityDefinitionsItem { - oneof oneof { - BasicAuthenticationSecurity basic_authentication_security = 1; - ApiKeySecurity api_key_security = 2; - Oauth2ImplicitSecurity oauth2_implicit_security = 3; - Oauth2PasswordSecurity oauth2_password_security = 4; - Oauth2ApplicationSecurity oauth2_application_security = 5; - Oauth2AccessCodeSecurity oauth2_access_code_security = 6; - } -} - -message SecurityRequirement { - repeated NamedStringArray additional_properties = 1; -} - -message StringArray { - repeated string value = 1; -} - -message Tag { - string name = 1; - string description = 2; - ExternalDocs external_docs = 3; - repeated NamedAny vendor_extension = 4; -} - -message TypeItem { - repeated string value = 1; -} - -// Any property starting with x- is valid. -message VendorExtension { - repeated NamedAny additional_properties = 1; -} - -message Xml { - string name = 1; - string namespace = 2; - string prefix = 3; - bool attribute = 4; - bool wrapped = 5; - repeated NamedAny vendor_extension = 6; -} - diff --git a/src/vendor/github.com/google/gnostic/openapiv2/README.md b/src/vendor/github.com/google/gnostic/openapiv2/README.md deleted file mode 100644 index 5276128d3..000000000 --- a/src/vendor/github.com/google/gnostic/openapiv2/README.md +++ /dev/null @@ -1,14 +0,0 @@ -# OpenAPI v2 Protocol Buffer Models - -This directory contains a Protocol Buffer-language model and related code for -supporting OpenAPI v2. - -Gnostic applications and plugins can use OpenAPIv2.proto to generate Protocol -Buffer support code for their preferred languages. - -OpenAPIv2.go is used by Gnostic to read JSON and YAML OpenAPI descriptions into -the Protocol Buffer-based datastructures generated from OpenAPIv2.proto. - -OpenAPIv2.proto and OpenAPIv2.go are generated by the Gnostic compiler -generator, and OpenAPIv2.pb.go is generated by protoc, the Protocol Buffer -compiler, and protoc-gen-go, the Protocol Buffer Go code generation plugin. diff --git a/src/vendor/github.com/google/gnostic/openapiv2/document.go b/src/vendor/github.com/google/gnostic/openapiv2/document.go deleted file mode 100644 index 0021ae871..000000000 --- a/src/vendor/github.com/google/gnostic/openapiv2/document.go +++ /dev/null @@ -1,42 +0,0 @@ -// Copyright 2020 Google LLC. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package openapi_v2 - -import ( - "gopkg.in/yaml.v3" - - "github.com/google/gnostic/compiler" -) - -// ParseDocument reads an OpenAPI v2 description from a YAML/JSON representation. -func ParseDocument(b []byte) (*Document, error) { - info, err := compiler.ReadInfoFromBytes("", b) - if err != nil { - return nil, err - } - root := info.Content[0] - return NewDocument(root, compiler.NewContextWithExtensions("$root", root, nil, nil)) -} - -// YAMLValue produces a serialized YAML representation of the document. -func (d *Document) YAMLValue(comment string) ([]byte, error) { - rawInfo := d.ToRawInfo() - rawInfo = &yaml.Node{ - Kind: yaml.DocumentNode, - Content: []*yaml.Node{rawInfo}, - HeadComment: comment, - } - return yaml.Marshal(rawInfo) -} diff --git a/src/vendor/github.com/google/gnostic/openapiv2/openapi-2.0.json b/src/vendor/github.com/google/gnostic/openapiv2/openapi-2.0.json deleted file mode 100644 index afa12b79b..000000000 --- a/src/vendor/github.com/google/gnostic/openapiv2/openapi-2.0.json +++ /dev/null @@ -1,1610 +0,0 @@ -{ - "title": "A JSON Schema for Swagger 2.0 API.", - "id": "http://swagger.io/v2/schema.json#", - "$schema": "http://json-schema.org/draft-04/schema#", - "type": "object", - "required": [ - "swagger", - "info", - "paths" - ], - "additionalProperties": false, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/vendorExtension" - } - }, - "properties": { - "swagger": { - "type": "string", - "enum": [ - "2.0" - ], - "description": "The Swagger version of this document." - }, - "info": { - "$ref": "#/definitions/info" - }, - "host": { - "type": "string", - "pattern": "^[^{}/ :\\\\]+(?::\\d+)?$", - "description": "The host (name or ip) of the API. Example: 'swagger.io'" - }, - "basePath": { - "type": "string", - "pattern": "^/", - "description": "The base path to the API. Example: '/api'." - }, - "schemes": { - "$ref": "#/definitions/schemesList" - }, - "consumes": { - "description": "A list of MIME types accepted by the API.", - "allOf": [ - { - "$ref": "#/definitions/mediaTypeList" - } - ] - }, - "produces": { - "description": "A list of MIME types the API can produce.", - "allOf": [ - { - "$ref": "#/definitions/mediaTypeList" - } - ] - }, - "paths": { - "$ref": "#/definitions/paths" - }, - "definitions": { - "$ref": "#/definitions/definitions" - }, - "parameters": { - "$ref": "#/definitions/parameterDefinitions" - }, - "responses": { - "$ref": "#/definitions/responseDefinitions" - }, - "security": { - "$ref": "#/definitions/security" - }, - "securityDefinitions": { - "$ref": "#/definitions/securityDefinitions" - }, - "tags": { - "type": "array", - "items": { - "$ref": "#/definitions/tag" - }, - "uniqueItems": true - }, - "externalDocs": { - "$ref": "#/definitions/externalDocs" - } - }, - "definitions": { - "info": { - "type": "object", - "description": "General information about the API.", - "required": [ - "version", - "title" - ], - "additionalProperties": false, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/vendorExtension" - } - }, - "properties": { - "title": { - "type": "string", - "description": "A unique and precise title of the API." - }, - "version": { - "type": "string", - "description": "A semantic version number of the API." - }, - "description": { - "type": "string", - "description": "A longer description of the API. Should be different from the title. GitHub Flavored Markdown is allowed." - }, - "termsOfService": { - "type": "string", - "description": "The terms of service for the API." - }, - "contact": { - "$ref": "#/definitions/contact" - }, - "license": { - "$ref": "#/definitions/license" - } - } - }, - "contact": { - "type": "object", - "description": "Contact information for the owners of the API.", - "additionalProperties": false, - "properties": { - "name": { - "type": "string", - "description": "The identifying name of the contact person/organization." - }, - "url": { - "type": "string", - "description": "The URL pointing to the contact information.", - "format": "uri" - }, - "email": { - "type": "string", - "description": "The email address of the contact person/organization.", - "format": "email" - } - }, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/vendorExtension" - } - } - }, - "license": { - "type": "object", - "required": [ - "name" - ], - "additionalProperties": false, - "properties": { - "name": { - "type": "string", - "description": "The name of the license type. It's encouraged to use an OSI compatible license." - }, - "url": { - "type": "string", - "description": "The URL pointing to the license.", - "format": "uri" - } - }, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/vendorExtension" - } - } - }, - "paths": { - "type": "object", - "description": "Relative paths to the individual endpoints. They must be relative to the 'basePath'.", - "patternProperties": { - "^x-": { - "$ref": "#/definitions/vendorExtension" - }, - "^/": { - "$ref": "#/definitions/pathItem" - } - }, - "additionalProperties": false - }, - "definitions": { - "type": "object", - "additionalProperties": { - "$ref": "#/definitions/schema" - }, - "description": "One or more JSON objects describing the schemas being consumed and produced by the API." - }, - "parameterDefinitions": { - "type": "object", - "additionalProperties": { - "$ref": "#/definitions/parameter" - }, - "description": "One or more JSON representations for parameters" - }, - "responseDefinitions": { - "type": "object", - "additionalProperties": { - "$ref": "#/definitions/response" - }, - "description": "One or more JSON representations for responses" - }, - "externalDocs": { - "type": "object", - "additionalProperties": false, - "description": "information about external documentation", - "required": [ - "url" - ], - "properties": { - "description": { - "type": "string" - }, - "url": { - "type": "string", - "format": "uri" - } - }, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/vendorExtension" - } - } - }, - "examples": { - "type": "object", - "additionalProperties": true - }, - "mimeType": { - "type": "string", - "description": "The MIME type of the HTTP message." - }, - "operation": { - "type": "object", - "required": [ - "responses" - ], - "additionalProperties": false, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/vendorExtension" - } - }, - "properties": { - "tags": { - "type": "array", - "items": { - "type": "string" - }, - "uniqueItems": true - }, - "summary": { - "type": "string", - "description": "A brief summary of the operation." - }, - "description": { - "type": "string", - "description": "A longer description of the operation, GitHub Flavored Markdown is allowed." - }, - "externalDocs": { - "$ref": "#/definitions/externalDocs" - }, - "operationId": { - "type": "string", - "description": "A unique identifier of the operation." - }, - "produces": { - "description": "A list of MIME types the API can produce.", - "allOf": [ - { - "$ref": "#/definitions/mediaTypeList" - } - ] - }, - "consumes": { - "description": "A list of MIME types the API can consume.", - "allOf": [ - { - "$ref": "#/definitions/mediaTypeList" - } - ] - }, - "parameters": { - "$ref": "#/definitions/parametersList" - }, - "responses": { - "$ref": "#/definitions/responses" - }, - "schemes": { - "$ref": "#/definitions/schemesList" - }, - "deprecated": { - "type": "boolean", - "default": false - }, - "security": { - "$ref": "#/definitions/security" - } - } - }, - "pathItem": { - "type": "object", - "additionalProperties": false, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/vendorExtension" - } - }, - "properties": { - "$ref": { - "type": "string" - }, - "get": { - "$ref": "#/definitions/operation" - }, - "put": { - "$ref": "#/definitions/operation" - }, - "post": { - "$ref": "#/definitions/operation" - }, - "delete": { - "$ref": "#/definitions/operation" - }, - "options": { - "$ref": "#/definitions/operation" - }, - "head": { - "$ref": "#/definitions/operation" - }, - "patch": { - "$ref": "#/definitions/operation" - }, - "parameters": { - "$ref": "#/definitions/parametersList" - } - } - }, - "responses": { - "type": "object", - "description": "Response objects names can either be any valid HTTP status code or 'default'.", - "minProperties": 1, - "additionalProperties": false, - "patternProperties": { - "^([0-9]{3})$|^(default)$": { - "$ref": "#/definitions/responseValue" - }, - "^x-": { - "$ref": "#/definitions/vendorExtension" - } - }, - "not": { - "type": "object", - "additionalProperties": false, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/vendorExtension" - } - } - } - }, - "responseValue": { - "oneOf": [ - { - "$ref": "#/definitions/response" - }, - { - "$ref": "#/definitions/jsonReference" - } - ] - }, - "response": { - "type": "object", - "required": [ - "description" - ], - "properties": { - "description": { - "type": "string" - }, - "schema": { - "oneOf": [ - { - "$ref": "#/definitions/schema" - }, - { - "$ref": "#/definitions/fileSchema" - } - ] - }, - "headers": { - "$ref": "#/definitions/headers" - }, - "examples": { - "$ref": "#/definitions/examples" - } - }, - "additionalProperties": false, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/vendorExtension" - } - } - }, - "headers": { - "type": "object", - "additionalProperties": { - "$ref": "#/definitions/header" - } - }, - "header": { - "type": "object", - "additionalProperties": false, - "required": [ - "type" - ], - "properties": { - "type": { - "type": "string", - "enum": [ - "string", - "number", - "integer", - "boolean", - "array" - ] - }, - "format": { - "type": "string" - }, - "items": { - "$ref": "#/definitions/primitivesItems" - }, - "collectionFormat": { - "$ref": "#/definitions/collectionFormat" - }, - "default": { - "$ref": "#/definitions/default" - }, - "maximum": { - "$ref": "#/definitions/maximum" - }, - "exclusiveMaximum": { - "$ref": "#/definitions/exclusiveMaximum" - }, - "minimum": { - "$ref": "#/definitions/minimum" - }, - "exclusiveMinimum": { - "$ref": "#/definitions/exclusiveMinimum" - }, - "maxLength": { - "$ref": "#/definitions/maxLength" - }, - "minLength": { - "$ref": "#/definitions/minLength" - }, - "pattern": { - "$ref": "#/definitions/pattern" - }, - "maxItems": { - "$ref": "#/definitions/maxItems" - }, - "minItems": { - "$ref": "#/definitions/minItems" - }, - "uniqueItems": { - "$ref": "#/definitions/uniqueItems" - }, - "enum": { - "$ref": "#/definitions/enum" - }, - "multipleOf": { - "$ref": "#/definitions/multipleOf" - }, - "description": { - "type": "string" - } - }, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/vendorExtension" - } - } - }, - "vendorExtension": { - "description": "Any property starting with x- is valid.", - "additionalProperties": true, - "additionalItems": true - }, - "bodyParameter": { - "type": "object", - "required": [ - "name", - "in", - "schema" - ], - "patternProperties": { - "^x-": { - "$ref": "#/definitions/vendorExtension" - } - }, - "properties": { - "description": { - "type": "string", - "description": "A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed." - }, - "name": { - "type": "string", - "description": "The name of the parameter." - }, - "in": { - "type": "string", - "description": "Determines the location of the parameter.", - "enum": [ - "body" - ] - }, - "required": { - "type": "boolean", - "description": "Determines whether or not this parameter is required or optional.", - "default": false - }, - "schema": { - "$ref": "#/definitions/schema" - } - }, - "additionalProperties": false - }, - "headerParameterSubSchema": { - "additionalProperties": false, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/vendorExtension" - } - }, - "properties": { - "required": { - "type": "boolean", - "description": "Determines whether or not this parameter is required or optional.", - "default": false - }, - "in": { - "type": "string", - "description": "Determines the location of the parameter.", - "enum": [ - "header" - ] - }, - "description": { - "type": "string", - "description": "A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed." - }, - "name": { - "type": "string", - "description": "The name of the parameter." - }, - "type": { - "type": "string", - "enum": [ - "string", - "number", - "boolean", - "integer", - "array" - ] - }, - "format": { - "type": "string" - }, - "items": { - "$ref": "#/definitions/primitivesItems" - }, - "collectionFormat": { - "$ref": "#/definitions/collectionFormat" - }, - "default": { - "$ref": "#/definitions/default" - }, - "maximum": { - "$ref": "#/definitions/maximum" - }, - "exclusiveMaximum": { - "$ref": "#/definitions/exclusiveMaximum" - }, - "minimum": { - "$ref": "#/definitions/minimum" - }, - "exclusiveMinimum": { - "$ref": "#/definitions/exclusiveMinimum" - }, - "maxLength": { - "$ref": "#/definitions/maxLength" - }, - "minLength": { - "$ref": "#/definitions/minLength" - }, - "pattern": { - "$ref": "#/definitions/pattern" - }, - "maxItems": { - "$ref": "#/definitions/maxItems" - }, - "minItems": { - "$ref": "#/definitions/minItems" - }, - "uniqueItems": { - "$ref": "#/definitions/uniqueItems" - }, - "enum": { - "$ref": "#/definitions/enum" - }, - "multipleOf": { - "$ref": "#/definitions/multipleOf" - } - } - }, - "queryParameterSubSchema": { - "additionalProperties": false, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/vendorExtension" - } - }, - "properties": { - "required": { - "type": "boolean", - "description": "Determines whether or not this parameter is required or optional.", - "default": false - }, - "in": { - "type": "string", - "description": "Determines the location of the parameter.", - "enum": [ - "query" - ] - }, - "description": { - "type": "string", - "description": "A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed." - }, - "name": { - "type": "string", - "description": "The name of the parameter." - }, - "allowEmptyValue": { - "type": "boolean", - "default": false, - "description": "allows sending a parameter by name only or with an empty value." - }, - "type": { - "type": "string", - "enum": [ - "string", - "number", - "boolean", - "integer", - "array" - ] - }, - "format": { - "type": "string" - }, - "items": { - "$ref": "#/definitions/primitivesItems" - }, - "collectionFormat": { - "$ref": "#/definitions/collectionFormatWithMulti" - }, - "default": { - "$ref": "#/definitions/default" - }, - "maximum": { - "$ref": "#/definitions/maximum" - }, - "exclusiveMaximum": { - "$ref": "#/definitions/exclusiveMaximum" - }, - "minimum": { - "$ref": "#/definitions/minimum" - }, - "exclusiveMinimum": { - "$ref": "#/definitions/exclusiveMinimum" - }, - "maxLength": { - "$ref": "#/definitions/maxLength" - }, - "minLength": { - "$ref": "#/definitions/minLength" - }, - "pattern": { - "$ref": "#/definitions/pattern" - }, - "maxItems": { - "$ref": "#/definitions/maxItems" - }, - "minItems": { - "$ref": "#/definitions/minItems" - }, - "uniqueItems": { - "$ref": "#/definitions/uniqueItems" - }, - "enum": { - "$ref": "#/definitions/enum" - }, - "multipleOf": { - "$ref": "#/definitions/multipleOf" - } - } - }, - "formDataParameterSubSchema": { - "additionalProperties": false, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/vendorExtension" - } - }, - "properties": { - "required": { - "type": "boolean", - "description": "Determines whether or not this parameter is required or optional.", - "default": false - }, - "in": { - "type": "string", - "description": "Determines the location of the parameter.", - "enum": [ - "formData" - ] - }, - "description": { - "type": "string", - "description": "A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed." - }, - "name": { - "type": "string", - "description": "The name of the parameter." - }, - "allowEmptyValue": { - "type": "boolean", - "default": false, - "description": "allows sending a parameter by name only or with an empty value." - }, - "type": { - "type": "string", - "enum": [ - "string", - "number", - "boolean", - "integer", - "array", - "file" - ] - }, - "format": { - "type": "string" - }, - "items": { - "$ref": "#/definitions/primitivesItems" - }, - "collectionFormat": { - "$ref": "#/definitions/collectionFormatWithMulti" - }, - "default": { - "$ref": "#/definitions/default" - }, - "maximum": { - "$ref": "#/definitions/maximum" - }, - "exclusiveMaximum": { - "$ref": "#/definitions/exclusiveMaximum" - }, - "minimum": { - "$ref": "#/definitions/minimum" - }, - "exclusiveMinimum": { - "$ref": "#/definitions/exclusiveMinimum" - }, - "maxLength": { - "$ref": "#/definitions/maxLength" - }, - "minLength": { - "$ref": "#/definitions/minLength" - }, - "pattern": { - "$ref": "#/definitions/pattern" - }, - "maxItems": { - "$ref": "#/definitions/maxItems" - }, - "minItems": { - "$ref": "#/definitions/minItems" - }, - "uniqueItems": { - "$ref": "#/definitions/uniqueItems" - }, - "enum": { - "$ref": "#/definitions/enum" - }, - "multipleOf": { - "$ref": "#/definitions/multipleOf" - } - } - }, - "pathParameterSubSchema": { - "additionalProperties": false, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/vendorExtension" - } - }, - "required": [ - "required" - ], - "properties": { - "required": { - "type": "boolean", - "enum": [ - true - ], - "description": "Determines whether or not this parameter is required or optional." - }, - "in": { - "type": "string", - "description": "Determines the location of the parameter.", - "enum": [ - "path" - ] - }, - "description": { - "type": "string", - "description": "A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed." - }, - "name": { - "type": "string", - "description": "The name of the parameter." - }, - "type": { - "type": "string", - "enum": [ - "string", - "number", - "boolean", - "integer", - "array" - ] - }, - "format": { - "type": "string" - }, - "items": { - "$ref": "#/definitions/primitivesItems" - }, - "collectionFormat": { - "$ref": "#/definitions/collectionFormat" - }, - "default": { - "$ref": "#/definitions/default" - }, - "maximum": { - "$ref": "#/definitions/maximum" - }, - "exclusiveMaximum": { - "$ref": "#/definitions/exclusiveMaximum" - }, - "minimum": { - "$ref": "#/definitions/minimum" - }, - "exclusiveMinimum": { - "$ref": "#/definitions/exclusiveMinimum" - }, - "maxLength": { - "$ref": "#/definitions/maxLength" - }, - "minLength": { - "$ref": "#/definitions/minLength" - }, - "pattern": { - "$ref": "#/definitions/pattern" - }, - "maxItems": { - "$ref": "#/definitions/maxItems" - }, - "minItems": { - "$ref": "#/definitions/minItems" - }, - "uniqueItems": { - "$ref": "#/definitions/uniqueItems" - }, - "enum": { - "$ref": "#/definitions/enum" - }, - "multipleOf": { - "$ref": "#/definitions/multipleOf" - } - } - }, - "nonBodyParameter": { - "type": "object", - "required": [ - "name", - "in", - "type" - ], - "oneOf": [ - { - "$ref": "#/definitions/headerParameterSubSchema" - }, - { - "$ref": "#/definitions/formDataParameterSubSchema" - }, - { - "$ref": "#/definitions/queryParameterSubSchema" - }, - { - "$ref": "#/definitions/pathParameterSubSchema" - } - ] - }, - "parameter": { - "oneOf": [ - { - "$ref": "#/definitions/bodyParameter" - }, - { - "$ref": "#/definitions/nonBodyParameter" - } - ] - }, - "schema": { - "type": "object", - "description": "A deterministic version of a JSON Schema object.", - "patternProperties": { - "^x-": { - "$ref": "#/definitions/vendorExtension" - } - }, - "properties": { - "$ref": { - "type": "string" - }, - "format": { - "type": "string" - }, - "title": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/title" - }, - "description": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/description" - }, - "default": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/default" - }, - "multipleOf": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/multipleOf" - }, - "maximum": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/maximum" - }, - "exclusiveMaximum": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/exclusiveMaximum" - }, - "minimum": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/minimum" - }, - "exclusiveMinimum": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/exclusiveMinimum" - }, - "maxLength": { - "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveInteger" - }, - "minLength": { - "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveIntegerDefault0" - }, - "pattern": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/pattern" - }, - "maxItems": { - "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveInteger" - }, - "minItems": { - "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveIntegerDefault0" - }, - "uniqueItems": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/uniqueItems" - }, - "maxProperties": { - "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveInteger" - }, - "minProperties": { - "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveIntegerDefault0" - }, - "required": { - "$ref": "http://json-schema.org/draft-04/schema#/definitions/stringArray" - }, - "enum": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/enum" - }, - "additionalProperties": { - "oneOf": [ - { - "$ref": "#/definitions/schema" - }, - { - "type": "boolean" - } - ], - "default": {} - }, - "type": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/type" - }, - "items": { - "anyOf": [ - { - "$ref": "#/definitions/schema" - }, - { - "type": "array", - "minItems": 1, - "items": { - "$ref": "#/definitions/schema" - } - } - ], - "default": {} - }, - "allOf": { - "type": "array", - "minItems": 1, - "items": { - "$ref": "#/definitions/schema" - } - }, - "properties": { - "type": "object", - "additionalProperties": { - "$ref": "#/definitions/schema" - }, - "default": {} - }, - "discriminator": { - "type": "string" - }, - "readOnly": { - "type": "boolean", - "default": false - }, - "xml": { - "$ref": "#/definitions/xml" - }, - "externalDocs": { - "$ref": "#/definitions/externalDocs" - }, - "example": {} - }, - "additionalProperties": false - }, - "fileSchema": { - "type": "object", - "description": "A deterministic version of a JSON Schema object.", - "patternProperties": { - "^x-": { - "$ref": "#/definitions/vendorExtension" - } - }, - "required": [ - "type" - ], - "properties": { - "format": { - "type": "string" - }, - "title": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/title" - }, - "description": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/description" - }, - "default": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/default" - }, - "required": { - "$ref": "http://json-schema.org/draft-04/schema#/definitions/stringArray" - }, - "type": { - "type": "string", - "enum": [ - "file" - ] - }, - "readOnly": { - "type": "boolean", - "default": false - }, - "externalDocs": { - "$ref": "#/definitions/externalDocs" - }, - "example": {} - }, - "additionalProperties": false - }, - "primitivesItems": { - "type": "object", - "additionalProperties": false, - "properties": { - "type": { - "type": "string", - "enum": [ - "string", - "number", - "integer", - "boolean", - "array" - ] - }, - "format": { - "type": "string" - }, - "items": { - "$ref": "#/definitions/primitivesItems" - }, - "collectionFormat": { - "$ref": "#/definitions/collectionFormat" - }, - "default": { - "$ref": "#/definitions/default" - }, - "maximum": { - "$ref": "#/definitions/maximum" - }, - "exclusiveMaximum": { - "$ref": "#/definitions/exclusiveMaximum" - }, - "minimum": { - "$ref": "#/definitions/minimum" - }, - "exclusiveMinimum": { - "$ref": "#/definitions/exclusiveMinimum" - }, - "maxLength": { - "$ref": "#/definitions/maxLength" - }, - "minLength": { - "$ref": "#/definitions/minLength" - }, - "pattern": { - "$ref": "#/definitions/pattern" - }, - "maxItems": { - "$ref": "#/definitions/maxItems" - }, - "minItems": { - "$ref": "#/definitions/minItems" - }, - "uniqueItems": { - "$ref": "#/definitions/uniqueItems" - }, - "enum": { - "$ref": "#/definitions/enum" - }, - "multipleOf": { - "$ref": "#/definitions/multipleOf" - } - }, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/vendorExtension" - } - } - }, - "security": { - "type": "array", - "items": { - "$ref": "#/definitions/securityRequirement" - }, - "uniqueItems": true - }, - "securityRequirement": { - "type": "object", - "additionalProperties": { - "type": "array", - "items": { - "type": "string" - }, - "uniqueItems": true - } - }, - "xml": { - "type": "object", - "additionalProperties": false, - "properties": { - "name": { - "type": "string" - }, - "namespace": { - "type": "string" - }, - "prefix": { - "type": "string" - }, - "attribute": { - "type": "boolean", - "default": false - }, - "wrapped": { - "type": "boolean", - "default": false - } - }, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/vendorExtension" - } - } - }, - "tag": { - "type": "object", - "additionalProperties": false, - "required": [ - "name" - ], - "properties": { - "name": { - "type": "string" - }, - "description": { - "type": "string" - }, - "externalDocs": { - "$ref": "#/definitions/externalDocs" - } - }, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/vendorExtension" - } - } - }, - "securityDefinitions": { - "type": "object", - "additionalProperties": { - "oneOf": [ - { - "$ref": "#/definitions/basicAuthenticationSecurity" - }, - { - "$ref": "#/definitions/apiKeySecurity" - }, - { - "$ref": "#/definitions/oauth2ImplicitSecurity" - }, - { - "$ref": "#/definitions/oauth2PasswordSecurity" - }, - { - "$ref": "#/definitions/oauth2ApplicationSecurity" - }, - { - "$ref": "#/definitions/oauth2AccessCodeSecurity" - } - ] - } - }, - "basicAuthenticationSecurity": { - "type": "object", - "additionalProperties": false, - "required": [ - "type" - ], - "properties": { - "type": { - "type": "string", - "enum": [ - "basic" - ] - }, - "description": { - "type": "string" - } - }, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/vendorExtension" - } - } - }, - "apiKeySecurity": { - "type": "object", - "additionalProperties": false, - "required": [ - "type", - "name", - "in" - ], - "properties": { - "type": { - "type": "string", - "enum": [ - "apiKey" - ] - }, - "name": { - "type": "string" - }, - "in": { - "type": "string", - "enum": [ - "header", - "query" - ] - }, - "description": { - "type": "string" - } - }, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/vendorExtension" - } - } - }, - "oauth2ImplicitSecurity": { - "type": "object", - "additionalProperties": false, - "required": [ - "type", - "flow", - "authorizationUrl" - ], - "properties": { - "type": { - "type": "string", - "enum": [ - "oauth2" - ] - }, - "flow": { - "type": "string", - "enum": [ - "implicit" - ] - }, - "scopes": { - "$ref": "#/definitions/oauth2Scopes" - }, - "authorizationUrl": { - "type": "string", - "format": "uri" - }, - "description": { - "type": "string" - } - }, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/vendorExtension" - } - } - }, - "oauth2PasswordSecurity": { - "type": "object", - "additionalProperties": false, - "required": [ - "type", - "flow", - "tokenUrl" - ], - "properties": { - "type": { - "type": "string", - "enum": [ - "oauth2" - ] - }, - "flow": { - "type": "string", - "enum": [ - "password" - ] - }, - "scopes": { - "$ref": "#/definitions/oauth2Scopes" - }, - "tokenUrl": { - "type": "string", - "format": "uri" - }, - "description": { - "type": "string" - } - }, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/vendorExtension" - } - } - }, - "oauth2ApplicationSecurity": { - "type": "object", - "additionalProperties": false, - "required": [ - "type", - "flow", - "tokenUrl" - ], - "properties": { - "type": { - "type": "string", - "enum": [ - "oauth2" - ] - }, - "flow": { - "type": "string", - "enum": [ - "application" - ] - }, - "scopes": { - "$ref": "#/definitions/oauth2Scopes" - }, - "tokenUrl": { - "type": "string", - "format": "uri" - }, - "description": { - "type": "string" - } - }, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/vendorExtension" - } - } - }, - "oauth2AccessCodeSecurity": { - "type": "object", - "additionalProperties": false, - "required": [ - "type", - "flow", - "authorizationUrl", - "tokenUrl" - ], - "properties": { - "type": { - "type": "string", - "enum": [ - "oauth2" - ] - }, - "flow": { - "type": "string", - "enum": [ - "accessCode" - ] - }, - "scopes": { - "$ref": "#/definitions/oauth2Scopes" - }, - "authorizationUrl": { - "type": "string", - "format": "uri" - }, - "tokenUrl": { - "type": "string", - "format": "uri" - }, - "description": { - "type": "string" - } - }, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/vendorExtension" - } - } - }, - "oauth2Scopes": { - "type": "object", - "additionalProperties": { - "type": "string" - } - }, - "mediaTypeList": { - "type": "array", - "items": { - "$ref": "#/definitions/mimeType" - }, - "uniqueItems": true - }, - "parametersList": { - "type": "array", - "description": "The parameters needed to send a valid API call.", - "additionalItems": false, - "items": { - "oneOf": [ - { - "$ref": "#/definitions/parameter" - }, - { - "$ref": "#/definitions/jsonReference" - } - ] - }, - "uniqueItems": true - }, - "schemesList": { - "type": "array", - "description": "The transfer protocol of the API.", - "items": { - "type": "string", - "enum": [ - "http", - "https", - "ws", - "wss" - ] - }, - "uniqueItems": true - }, - "collectionFormat": { - "type": "string", - "enum": [ - "csv", - "ssv", - "tsv", - "pipes" - ], - "default": "csv" - }, - "collectionFormatWithMulti": { - "type": "string", - "enum": [ - "csv", - "ssv", - "tsv", - "pipes", - "multi" - ], - "default": "csv" - }, - "title": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/title" - }, - "description": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/description" - }, - "default": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/default" - }, - "multipleOf": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/multipleOf" - }, - "maximum": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/maximum" - }, - "exclusiveMaximum": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/exclusiveMaximum" - }, - "minimum": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/minimum" - }, - "exclusiveMinimum": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/exclusiveMinimum" - }, - "maxLength": { - "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveInteger" - }, - "minLength": { - "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveIntegerDefault0" - }, - "pattern": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/pattern" - }, - "maxItems": { - "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveInteger" - }, - "minItems": { - "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveIntegerDefault0" - }, - "uniqueItems": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/uniqueItems" - }, - "enum": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/enum" - }, - "jsonReference": { - "type": "object", - "required": [ - "$ref" - ], - "additionalProperties": false, - "properties": { - "$ref": { - "type": "string" - }, - "description": { - "type": "string" - } - } - } - } -} \ No newline at end of file diff --git a/src/vendor/github.com/google/gnostic/openapiv3/OpenAPIv3.go b/src/vendor/github.com/google/gnostic/openapiv3/OpenAPIv3.go deleted file mode 100644 index 5f4a7025e..000000000 --- a/src/vendor/github.com/google/gnostic/openapiv3/OpenAPIv3.go +++ /dev/null @@ -1,8633 +0,0 @@ -// Copyright 2020 Google LLC. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// THIS FILE IS AUTOMATICALLY GENERATED. - -package openapi_v3 - -import ( - "fmt" - "regexp" - "strings" - - "gopkg.in/yaml.v3" - - "github.com/google/gnostic/compiler" -) - -// Version returns the package name (and OpenAPI version). -func Version() string { - return "openapi_v3" -} - -// NewAdditionalPropertiesItem creates an object of type AdditionalPropertiesItem if possible, returning an error if not. -func NewAdditionalPropertiesItem(in *yaml.Node, context *compiler.Context) (*AdditionalPropertiesItem, error) { - errors := make([]error, 0) - x := &AdditionalPropertiesItem{} - matched := false - // SchemaOrReference schema_or_reference = 1; - { - m, ok := compiler.UnpackMap(in) - if ok { - // errors might be ok here, they mean we just don't have the right subtype - t, matchingError := NewSchemaOrReference(m, compiler.NewContext("schemaOrReference", m, context)) - if matchingError == nil { - x.Oneof = &AdditionalPropertiesItem_SchemaOrReference{SchemaOrReference: t} - matched = true - } else { - errors = append(errors, matchingError) - } - } - } - // bool boolean = 2; - boolValue, ok := compiler.BoolForScalarNode(in) - if ok { - x.Oneof = &AdditionalPropertiesItem_Boolean{Boolean: boolValue} - matched = true - } - if matched { - // since the oneof matched one of its possibilities, discard any matching errors - errors = make([]error, 0) - } else { - message := fmt.Sprintf("contains an invalid AdditionalPropertiesItem") - err := compiler.NewError(context, message) - errors = []error{err} - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewAny creates an object of type Any if possible, returning an error if not. -func NewAny(in *yaml.Node, context *compiler.Context) (*Any, error) { - errors := make([]error, 0) - x := &Any{} - bytes := compiler.Marshal(in) - x.Yaml = string(bytes) - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewAnyOrExpression creates an object of type AnyOrExpression if possible, returning an error if not. -func NewAnyOrExpression(in *yaml.Node, context *compiler.Context) (*AnyOrExpression, error) { - errors := make([]error, 0) - x := &AnyOrExpression{} - matched := false - // Any any = 1; - { - m, ok := compiler.UnpackMap(in) - if ok { - // errors might be ok here, they mean we just don't have the right subtype - t, matchingError := NewAny(m, compiler.NewContext("any", m, context)) - if matchingError == nil { - x.Oneof = &AnyOrExpression_Any{Any: t} - matched = true - } else { - errors = append(errors, matchingError) - } - } - } - // Expression expression = 2; - { - m, ok := compiler.UnpackMap(in) - if ok { - // errors might be ok here, they mean we just don't have the right subtype - t, matchingError := NewExpression(m, compiler.NewContext("expression", m, context)) - if matchingError == nil { - x.Oneof = &AnyOrExpression_Expression{Expression: t} - matched = true - } else { - errors = append(errors, matchingError) - } - } - } - if matched { - // since the oneof matched one of its possibilities, discard any matching errors - errors = make([]error, 0) - } else { - message := fmt.Sprintf("contains an invalid AnyOrExpression") - err := compiler.NewError(context, message) - errors = []error{err} - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewCallback creates an object of type Callback if possible, returning an error if not. -func NewCallback(in *yaml.Node, context *compiler.Context) (*Callback, error) { - errors := make([]error, 0) - x := &Callback{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - allowedKeys := []string{} - allowedPatterns := []*regexp.Regexp{pattern0, pattern1} - invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) - if len(invalidKeys) > 0 { - message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - // repeated NamedPathItem path = 1; - // MAP: PathItem ^ - x.Path = make([]*NamedPathItem, 0) - for i := 0; i < len(m.Content); i += 2 { - k, ok := compiler.StringForScalarNode(m.Content[i]) - if ok { - v := m.Content[i+1] - if true { - pair := &NamedPathItem{} - pair.Name = k - var err error - pair.Value, err = NewPathItem(v, compiler.NewContext(k, v, context)) - if err != nil { - errors = append(errors, err) - } - x.Path = append(x.Path, pair) - } - } - } - // repeated NamedAny specification_extension = 2; - // MAP: Any ^x- - x.SpecificationExtension = make([]*NamedAny, 0) - for i := 0; i < len(m.Content); i += 2 { - k, ok := compiler.StringForScalarNode(m.Content[i]) - if ok { - v := m.Content[i+1] - if strings.HasPrefix(k, "x-") { - pair := &NamedAny{} - pair.Name = k - result := &Any{} - handled, resultFromExt, err := compiler.CallExtension(context, v, k) - if handled { - if err != nil { - errors = append(errors, err) - } else { - bytes := compiler.Marshal(v) - result.Yaml = string(bytes) - result.Value = resultFromExt - pair.Value = result - } - } else { - pair.Value, err = NewAny(v, compiler.NewContext(k, v, context)) - if err != nil { - errors = append(errors, err) - } - } - x.SpecificationExtension = append(x.SpecificationExtension, pair) - } - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewCallbackOrReference creates an object of type CallbackOrReference if possible, returning an error if not. -func NewCallbackOrReference(in *yaml.Node, context *compiler.Context) (*CallbackOrReference, error) { - errors := make([]error, 0) - x := &CallbackOrReference{} - matched := false - // Callback callback = 1; - { - m, ok := compiler.UnpackMap(in) - if ok { - // errors might be ok here, they mean we just don't have the right subtype - t, matchingError := NewCallback(m, compiler.NewContext("callback", m, context)) - if matchingError == nil { - x.Oneof = &CallbackOrReference_Callback{Callback: t} - matched = true - } else { - errors = append(errors, matchingError) - } - } - } - // Reference reference = 2; - { - m, ok := compiler.UnpackMap(in) - if ok { - // errors might be ok here, they mean we just don't have the right subtype - t, matchingError := NewReference(m, compiler.NewContext("reference", m, context)) - if matchingError == nil { - x.Oneof = &CallbackOrReference_Reference{Reference: t} - matched = true - } else { - errors = append(errors, matchingError) - } - } - } - if matched { - // since the oneof matched one of its possibilities, discard any matching errors - errors = make([]error, 0) - } else { - message := fmt.Sprintf("contains an invalid CallbackOrReference") - err := compiler.NewError(context, message) - errors = []error{err} - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewCallbacksOrReferences creates an object of type CallbacksOrReferences if possible, returning an error if not. -func NewCallbacksOrReferences(in *yaml.Node, context *compiler.Context) (*CallbacksOrReferences, error) { - errors := make([]error, 0) - x := &CallbacksOrReferences{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - // repeated NamedCallbackOrReference additional_properties = 1; - // MAP: CallbackOrReference - x.AdditionalProperties = make([]*NamedCallbackOrReference, 0) - for i := 0; i < len(m.Content); i += 2 { - k, ok := compiler.StringForScalarNode(m.Content[i]) - if ok { - v := m.Content[i+1] - pair := &NamedCallbackOrReference{} - pair.Name = k - var err error - pair.Value, err = NewCallbackOrReference(v, compiler.NewContext(k, v, context)) - if err != nil { - errors = append(errors, err) - } - x.AdditionalProperties = append(x.AdditionalProperties, pair) - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewComponents creates an object of type Components if possible, returning an error if not. -func NewComponents(in *yaml.Node, context *compiler.Context) (*Components, error) { - errors := make([]error, 0) - x := &Components{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - allowedKeys := []string{"callbacks", "examples", "headers", "links", "parameters", "requestBodies", "responses", "schemas", "securitySchemes"} - allowedPatterns := []*regexp.Regexp{pattern1} - invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) - if len(invalidKeys) > 0 { - message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - // SchemasOrReferences schemas = 1; - v1 := compiler.MapValueForKey(m, "schemas") - if v1 != nil { - var err error - x.Schemas, err = NewSchemasOrReferences(v1, compiler.NewContext("schemas", v1, context)) - if err != nil { - errors = append(errors, err) - } - } - // ResponsesOrReferences responses = 2; - v2 := compiler.MapValueForKey(m, "responses") - if v2 != nil { - var err error - x.Responses, err = NewResponsesOrReferences(v2, compiler.NewContext("responses", v2, context)) - if err != nil { - errors = append(errors, err) - } - } - // ParametersOrReferences parameters = 3; - v3 := compiler.MapValueForKey(m, "parameters") - if v3 != nil { - var err error - x.Parameters, err = NewParametersOrReferences(v3, compiler.NewContext("parameters", v3, context)) - if err != nil { - errors = append(errors, err) - } - } - // ExamplesOrReferences examples = 4; - v4 := compiler.MapValueForKey(m, "examples") - if v4 != nil { - var err error - x.Examples, err = NewExamplesOrReferences(v4, compiler.NewContext("examples", v4, context)) - if err != nil { - errors = append(errors, err) - } - } - // RequestBodiesOrReferences request_bodies = 5; - v5 := compiler.MapValueForKey(m, "requestBodies") - if v5 != nil { - var err error - x.RequestBodies, err = NewRequestBodiesOrReferences(v5, compiler.NewContext("requestBodies", v5, context)) - if err != nil { - errors = append(errors, err) - } - } - // HeadersOrReferences headers = 6; - v6 := compiler.MapValueForKey(m, "headers") - if v6 != nil { - var err error - x.Headers, err = NewHeadersOrReferences(v6, compiler.NewContext("headers", v6, context)) - if err != nil { - errors = append(errors, err) - } - } - // SecuritySchemesOrReferences security_schemes = 7; - v7 := compiler.MapValueForKey(m, "securitySchemes") - if v7 != nil { - var err error - x.SecuritySchemes, err = NewSecuritySchemesOrReferences(v7, compiler.NewContext("securitySchemes", v7, context)) - if err != nil { - errors = append(errors, err) - } - } - // LinksOrReferences links = 8; - v8 := compiler.MapValueForKey(m, "links") - if v8 != nil { - var err error - x.Links, err = NewLinksOrReferences(v8, compiler.NewContext("links", v8, context)) - if err != nil { - errors = append(errors, err) - } - } - // CallbacksOrReferences callbacks = 9; - v9 := compiler.MapValueForKey(m, "callbacks") - if v9 != nil { - var err error - x.Callbacks, err = NewCallbacksOrReferences(v9, compiler.NewContext("callbacks", v9, context)) - if err != nil { - errors = append(errors, err) - } - } - // repeated NamedAny specification_extension = 10; - // MAP: Any ^x- - x.SpecificationExtension = make([]*NamedAny, 0) - for i := 0; i < len(m.Content); i += 2 { - k, ok := compiler.StringForScalarNode(m.Content[i]) - if ok { - v := m.Content[i+1] - if strings.HasPrefix(k, "x-") { - pair := &NamedAny{} - pair.Name = k - result := &Any{} - handled, resultFromExt, err := compiler.CallExtension(context, v, k) - if handled { - if err != nil { - errors = append(errors, err) - } else { - bytes := compiler.Marshal(v) - result.Yaml = string(bytes) - result.Value = resultFromExt - pair.Value = result - } - } else { - pair.Value, err = NewAny(v, compiler.NewContext(k, v, context)) - if err != nil { - errors = append(errors, err) - } - } - x.SpecificationExtension = append(x.SpecificationExtension, pair) - } - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewContact creates an object of type Contact if possible, returning an error if not. -func NewContact(in *yaml.Node, context *compiler.Context) (*Contact, error) { - errors := make([]error, 0) - x := &Contact{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - allowedKeys := []string{"email", "name", "url"} - allowedPatterns := []*regexp.Regexp{pattern1} - invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) - if len(invalidKeys) > 0 { - message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - // string name = 1; - v1 := compiler.MapValueForKey(m, "name") - if v1 != nil { - x.Name, ok = compiler.StringForScalarNode(v1) - if !ok { - message := fmt.Sprintf("has unexpected value for name: %s", compiler.Display(v1)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string url = 2; - v2 := compiler.MapValueForKey(m, "url") - if v2 != nil { - x.Url, ok = compiler.StringForScalarNode(v2) - if !ok { - message := fmt.Sprintf("has unexpected value for url: %s", compiler.Display(v2)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string email = 3; - v3 := compiler.MapValueForKey(m, "email") - if v3 != nil { - x.Email, ok = compiler.StringForScalarNode(v3) - if !ok { - message := fmt.Sprintf("has unexpected value for email: %s", compiler.Display(v3)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // repeated NamedAny specification_extension = 4; - // MAP: Any ^x- - x.SpecificationExtension = make([]*NamedAny, 0) - for i := 0; i < len(m.Content); i += 2 { - k, ok := compiler.StringForScalarNode(m.Content[i]) - if ok { - v := m.Content[i+1] - if strings.HasPrefix(k, "x-") { - pair := &NamedAny{} - pair.Name = k - result := &Any{} - handled, resultFromExt, err := compiler.CallExtension(context, v, k) - if handled { - if err != nil { - errors = append(errors, err) - } else { - bytes := compiler.Marshal(v) - result.Yaml = string(bytes) - result.Value = resultFromExt - pair.Value = result - } - } else { - pair.Value, err = NewAny(v, compiler.NewContext(k, v, context)) - if err != nil { - errors = append(errors, err) - } - } - x.SpecificationExtension = append(x.SpecificationExtension, pair) - } - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewDefaultType creates an object of type DefaultType if possible, returning an error if not. -func NewDefaultType(in *yaml.Node, context *compiler.Context) (*DefaultType, error) { - errors := make([]error, 0) - x := &DefaultType{} - matched := false - switch in.Tag { - case "!!bool": - var v bool - v, matched = compiler.BoolForScalarNode(in) - x.Oneof = &DefaultType_Boolean{Boolean: v} - case "!!str": - var v string - v, matched = compiler.StringForScalarNode(in) - x.Oneof = &DefaultType_String_{String_: v} - case "!!float": - var v float64 - v, matched = compiler.FloatForScalarNode(in) - x.Oneof = &DefaultType_Number{Number: v} - case "!!int": - var v int64 - v, matched = compiler.IntForScalarNode(in) - x.Oneof = &DefaultType_Number{Number: float64(v)} - } - if matched { - // since the oneof matched one of its possibilities, discard any matching errors - errors = make([]error, 0) - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewDiscriminator creates an object of type Discriminator if possible, returning an error if not. -func NewDiscriminator(in *yaml.Node, context *compiler.Context) (*Discriminator, error) { - errors := make([]error, 0) - x := &Discriminator{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - requiredKeys := []string{"propertyName"} - missingKeys := compiler.MissingKeysInMap(m, requiredKeys) - if len(missingKeys) > 0 { - message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - allowedKeys := []string{"mapping", "propertyName"} - allowedPatterns := []*regexp.Regexp{pattern1} - invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) - if len(invalidKeys) > 0 { - message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - // string property_name = 1; - v1 := compiler.MapValueForKey(m, "propertyName") - if v1 != nil { - x.PropertyName, ok = compiler.StringForScalarNode(v1) - if !ok { - message := fmt.Sprintf("has unexpected value for propertyName: %s", compiler.Display(v1)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // Strings mapping = 2; - v2 := compiler.MapValueForKey(m, "mapping") - if v2 != nil { - var err error - x.Mapping, err = NewStrings(v2, compiler.NewContext("mapping", v2, context)) - if err != nil { - errors = append(errors, err) - } - } - // repeated NamedAny specification_extension = 3; - // MAP: Any ^x- - x.SpecificationExtension = make([]*NamedAny, 0) - for i := 0; i < len(m.Content); i += 2 { - k, ok := compiler.StringForScalarNode(m.Content[i]) - if ok { - v := m.Content[i+1] - if strings.HasPrefix(k, "x-") { - pair := &NamedAny{} - pair.Name = k - result := &Any{} - handled, resultFromExt, err := compiler.CallExtension(context, v, k) - if handled { - if err != nil { - errors = append(errors, err) - } else { - bytes := compiler.Marshal(v) - result.Yaml = string(bytes) - result.Value = resultFromExt - pair.Value = result - } - } else { - pair.Value, err = NewAny(v, compiler.NewContext(k, v, context)) - if err != nil { - errors = append(errors, err) - } - } - x.SpecificationExtension = append(x.SpecificationExtension, pair) - } - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewDocument creates an object of type Document if possible, returning an error if not. -func NewDocument(in *yaml.Node, context *compiler.Context) (*Document, error) { - errors := make([]error, 0) - x := &Document{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - requiredKeys := []string{"info", "openapi", "paths"} - missingKeys := compiler.MissingKeysInMap(m, requiredKeys) - if len(missingKeys) > 0 { - message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - allowedKeys := []string{"components", "externalDocs", "info", "openapi", "paths", "security", "servers", "tags"} - allowedPatterns := []*regexp.Regexp{pattern1} - invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) - if len(invalidKeys) > 0 { - message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - // string openapi = 1; - v1 := compiler.MapValueForKey(m, "openapi") - if v1 != nil { - x.Openapi, ok = compiler.StringForScalarNode(v1) - if !ok { - message := fmt.Sprintf("has unexpected value for openapi: %s", compiler.Display(v1)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // Info info = 2; - v2 := compiler.MapValueForKey(m, "info") - if v2 != nil { - var err error - x.Info, err = NewInfo(v2, compiler.NewContext("info", v2, context)) - if err != nil { - errors = append(errors, err) - } - } - // repeated Server servers = 3; - v3 := compiler.MapValueForKey(m, "servers") - if v3 != nil { - // repeated Server - x.Servers = make([]*Server, 0) - a, ok := compiler.SequenceNodeForNode(v3) - if ok { - for _, item := range a.Content { - y, err := NewServer(item, compiler.NewContext("servers", item, context)) - if err != nil { - errors = append(errors, err) - } - x.Servers = append(x.Servers, y) - } - } - } - // Paths paths = 4; - v4 := compiler.MapValueForKey(m, "paths") - if v4 != nil { - var err error - x.Paths, err = NewPaths(v4, compiler.NewContext("paths", v4, context)) - if err != nil { - errors = append(errors, err) - } - } - // Components components = 5; - v5 := compiler.MapValueForKey(m, "components") - if v5 != nil { - var err error - x.Components, err = NewComponents(v5, compiler.NewContext("components", v5, context)) - if err != nil { - errors = append(errors, err) - } - } - // repeated SecurityRequirement security = 6; - v6 := compiler.MapValueForKey(m, "security") - if v6 != nil { - // repeated SecurityRequirement - x.Security = make([]*SecurityRequirement, 0) - a, ok := compiler.SequenceNodeForNode(v6) - if ok { - for _, item := range a.Content { - y, err := NewSecurityRequirement(item, compiler.NewContext("security", item, context)) - if err != nil { - errors = append(errors, err) - } - x.Security = append(x.Security, y) - } - } - } - // repeated Tag tags = 7; - v7 := compiler.MapValueForKey(m, "tags") - if v7 != nil { - // repeated Tag - x.Tags = make([]*Tag, 0) - a, ok := compiler.SequenceNodeForNode(v7) - if ok { - for _, item := range a.Content { - y, err := NewTag(item, compiler.NewContext("tags", item, context)) - if err != nil { - errors = append(errors, err) - } - x.Tags = append(x.Tags, y) - } - } - } - // ExternalDocs external_docs = 8; - v8 := compiler.MapValueForKey(m, "externalDocs") - if v8 != nil { - var err error - x.ExternalDocs, err = NewExternalDocs(v8, compiler.NewContext("externalDocs", v8, context)) - if err != nil { - errors = append(errors, err) - } - } - // repeated NamedAny specification_extension = 9; - // MAP: Any ^x- - x.SpecificationExtension = make([]*NamedAny, 0) - for i := 0; i < len(m.Content); i += 2 { - k, ok := compiler.StringForScalarNode(m.Content[i]) - if ok { - v := m.Content[i+1] - if strings.HasPrefix(k, "x-") { - pair := &NamedAny{} - pair.Name = k - result := &Any{} - handled, resultFromExt, err := compiler.CallExtension(context, v, k) - if handled { - if err != nil { - errors = append(errors, err) - } else { - bytes := compiler.Marshal(v) - result.Yaml = string(bytes) - result.Value = resultFromExt - pair.Value = result - } - } else { - pair.Value, err = NewAny(v, compiler.NewContext(k, v, context)) - if err != nil { - errors = append(errors, err) - } - } - x.SpecificationExtension = append(x.SpecificationExtension, pair) - } - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewEncoding creates an object of type Encoding if possible, returning an error if not. -func NewEncoding(in *yaml.Node, context *compiler.Context) (*Encoding, error) { - errors := make([]error, 0) - x := &Encoding{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - allowedKeys := []string{"allowReserved", "contentType", "explode", "headers", "style"} - allowedPatterns := []*regexp.Regexp{pattern1} - invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) - if len(invalidKeys) > 0 { - message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - // string content_type = 1; - v1 := compiler.MapValueForKey(m, "contentType") - if v1 != nil { - x.ContentType, ok = compiler.StringForScalarNode(v1) - if !ok { - message := fmt.Sprintf("has unexpected value for contentType: %s", compiler.Display(v1)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // HeadersOrReferences headers = 2; - v2 := compiler.MapValueForKey(m, "headers") - if v2 != nil { - var err error - x.Headers, err = NewHeadersOrReferences(v2, compiler.NewContext("headers", v2, context)) - if err != nil { - errors = append(errors, err) - } - } - // string style = 3; - v3 := compiler.MapValueForKey(m, "style") - if v3 != nil { - x.Style, ok = compiler.StringForScalarNode(v3) - if !ok { - message := fmt.Sprintf("has unexpected value for style: %s", compiler.Display(v3)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // bool explode = 4; - v4 := compiler.MapValueForKey(m, "explode") - if v4 != nil { - x.Explode, ok = compiler.BoolForScalarNode(v4) - if !ok { - message := fmt.Sprintf("has unexpected value for explode: %s", compiler.Display(v4)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // bool allow_reserved = 5; - v5 := compiler.MapValueForKey(m, "allowReserved") - if v5 != nil { - x.AllowReserved, ok = compiler.BoolForScalarNode(v5) - if !ok { - message := fmt.Sprintf("has unexpected value for allowReserved: %s", compiler.Display(v5)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // repeated NamedAny specification_extension = 6; - // MAP: Any ^x- - x.SpecificationExtension = make([]*NamedAny, 0) - for i := 0; i < len(m.Content); i += 2 { - k, ok := compiler.StringForScalarNode(m.Content[i]) - if ok { - v := m.Content[i+1] - if strings.HasPrefix(k, "x-") { - pair := &NamedAny{} - pair.Name = k - result := &Any{} - handled, resultFromExt, err := compiler.CallExtension(context, v, k) - if handled { - if err != nil { - errors = append(errors, err) - } else { - bytes := compiler.Marshal(v) - result.Yaml = string(bytes) - result.Value = resultFromExt - pair.Value = result - } - } else { - pair.Value, err = NewAny(v, compiler.NewContext(k, v, context)) - if err != nil { - errors = append(errors, err) - } - } - x.SpecificationExtension = append(x.SpecificationExtension, pair) - } - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewEncodings creates an object of type Encodings if possible, returning an error if not. -func NewEncodings(in *yaml.Node, context *compiler.Context) (*Encodings, error) { - errors := make([]error, 0) - x := &Encodings{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - // repeated NamedEncoding additional_properties = 1; - // MAP: Encoding - x.AdditionalProperties = make([]*NamedEncoding, 0) - for i := 0; i < len(m.Content); i += 2 { - k, ok := compiler.StringForScalarNode(m.Content[i]) - if ok { - v := m.Content[i+1] - pair := &NamedEncoding{} - pair.Name = k - var err error - pair.Value, err = NewEncoding(v, compiler.NewContext(k, v, context)) - if err != nil { - errors = append(errors, err) - } - x.AdditionalProperties = append(x.AdditionalProperties, pair) - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewExample creates an object of type Example if possible, returning an error if not. -func NewExample(in *yaml.Node, context *compiler.Context) (*Example, error) { - errors := make([]error, 0) - x := &Example{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - allowedKeys := []string{"description", "externalValue", "summary", "value"} - allowedPatterns := []*regexp.Regexp{pattern1} - invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) - if len(invalidKeys) > 0 { - message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - // string summary = 1; - v1 := compiler.MapValueForKey(m, "summary") - if v1 != nil { - x.Summary, ok = compiler.StringForScalarNode(v1) - if !ok { - message := fmt.Sprintf("has unexpected value for summary: %s", compiler.Display(v1)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string description = 2; - v2 := compiler.MapValueForKey(m, "description") - if v2 != nil { - x.Description, ok = compiler.StringForScalarNode(v2) - if !ok { - message := fmt.Sprintf("has unexpected value for description: %s", compiler.Display(v2)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // Any value = 3; - v3 := compiler.MapValueForKey(m, "value") - if v3 != nil { - var err error - x.Value, err = NewAny(v3, compiler.NewContext("value", v3, context)) - if err != nil { - errors = append(errors, err) - } - } - // string external_value = 4; - v4 := compiler.MapValueForKey(m, "externalValue") - if v4 != nil { - x.ExternalValue, ok = compiler.StringForScalarNode(v4) - if !ok { - message := fmt.Sprintf("has unexpected value for externalValue: %s", compiler.Display(v4)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // repeated NamedAny specification_extension = 5; - // MAP: Any ^x- - x.SpecificationExtension = make([]*NamedAny, 0) - for i := 0; i < len(m.Content); i += 2 { - k, ok := compiler.StringForScalarNode(m.Content[i]) - if ok { - v := m.Content[i+1] - if strings.HasPrefix(k, "x-") { - pair := &NamedAny{} - pair.Name = k - result := &Any{} - handled, resultFromExt, err := compiler.CallExtension(context, v, k) - if handled { - if err != nil { - errors = append(errors, err) - } else { - bytes := compiler.Marshal(v) - result.Yaml = string(bytes) - result.Value = resultFromExt - pair.Value = result - } - } else { - pair.Value, err = NewAny(v, compiler.NewContext(k, v, context)) - if err != nil { - errors = append(errors, err) - } - } - x.SpecificationExtension = append(x.SpecificationExtension, pair) - } - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewExampleOrReference creates an object of type ExampleOrReference if possible, returning an error if not. -func NewExampleOrReference(in *yaml.Node, context *compiler.Context) (*ExampleOrReference, error) { - errors := make([]error, 0) - x := &ExampleOrReference{} - matched := false - // Example example = 1; - { - m, ok := compiler.UnpackMap(in) - if ok { - // errors might be ok here, they mean we just don't have the right subtype - t, matchingError := NewExample(m, compiler.NewContext("example", m, context)) - if matchingError == nil { - x.Oneof = &ExampleOrReference_Example{Example: t} - matched = true - } else { - errors = append(errors, matchingError) - } - } - } - // Reference reference = 2; - { - m, ok := compiler.UnpackMap(in) - if ok { - // errors might be ok here, they mean we just don't have the right subtype - t, matchingError := NewReference(m, compiler.NewContext("reference", m, context)) - if matchingError == nil { - x.Oneof = &ExampleOrReference_Reference{Reference: t} - matched = true - } else { - errors = append(errors, matchingError) - } - } - } - if matched { - // since the oneof matched one of its possibilities, discard any matching errors - errors = make([]error, 0) - } else { - message := fmt.Sprintf("contains an invalid ExampleOrReference") - err := compiler.NewError(context, message) - errors = []error{err} - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewExamplesOrReferences creates an object of type ExamplesOrReferences if possible, returning an error if not. -func NewExamplesOrReferences(in *yaml.Node, context *compiler.Context) (*ExamplesOrReferences, error) { - errors := make([]error, 0) - x := &ExamplesOrReferences{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - // repeated NamedExampleOrReference additional_properties = 1; - // MAP: ExampleOrReference - x.AdditionalProperties = make([]*NamedExampleOrReference, 0) - for i := 0; i < len(m.Content); i += 2 { - k, ok := compiler.StringForScalarNode(m.Content[i]) - if ok { - v := m.Content[i+1] - pair := &NamedExampleOrReference{} - pair.Name = k - var err error - pair.Value, err = NewExampleOrReference(v, compiler.NewContext(k, v, context)) - if err != nil { - errors = append(errors, err) - } - x.AdditionalProperties = append(x.AdditionalProperties, pair) - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewExpression creates an object of type Expression if possible, returning an error if not. -func NewExpression(in *yaml.Node, context *compiler.Context) (*Expression, error) { - errors := make([]error, 0) - x := &Expression{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - // repeated NamedAny additional_properties = 1; - // MAP: Any - x.AdditionalProperties = make([]*NamedAny, 0) - for i := 0; i < len(m.Content); i += 2 { - k, ok := compiler.StringForScalarNode(m.Content[i]) - if ok { - v := m.Content[i+1] - pair := &NamedAny{} - pair.Name = k - result := &Any{} - handled, resultFromExt, err := compiler.CallExtension(context, v, k) - if handled { - if err != nil { - errors = append(errors, err) - } else { - bytes := compiler.Marshal(v) - result.Yaml = string(bytes) - result.Value = resultFromExt - pair.Value = result - } - } else { - pair.Value, err = NewAny(v, compiler.NewContext(k, v, context)) - if err != nil { - errors = append(errors, err) - } - } - x.AdditionalProperties = append(x.AdditionalProperties, pair) - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewExternalDocs creates an object of type ExternalDocs if possible, returning an error if not. -func NewExternalDocs(in *yaml.Node, context *compiler.Context) (*ExternalDocs, error) { - errors := make([]error, 0) - x := &ExternalDocs{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - requiredKeys := []string{"url"} - missingKeys := compiler.MissingKeysInMap(m, requiredKeys) - if len(missingKeys) > 0 { - message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - allowedKeys := []string{"description", "url"} - allowedPatterns := []*regexp.Regexp{pattern1} - invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) - if len(invalidKeys) > 0 { - message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - // string description = 1; - v1 := compiler.MapValueForKey(m, "description") - if v1 != nil { - x.Description, ok = compiler.StringForScalarNode(v1) - if !ok { - message := fmt.Sprintf("has unexpected value for description: %s", compiler.Display(v1)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string url = 2; - v2 := compiler.MapValueForKey(m, "url") - if v2 != nil { - x.Url, ok = compiler.StringForScalarNode(v2) - if !ok { - message := fmt.Sprintf("has unexpected value for url: %s", compiler.Display(v2)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // repeated NamedAny specification_extension = 3; - // MAP: Any ^x- - x.SpecificationExtension = make([]*NamedAny, 0) - for i := 0; i < len(m.Content); i += 2 { - k, ok := compiler.StringForScalarNode(m.Content[i]) - if ok { - v := m.Content[i+1] - if strings.HasPrefix(k, "x-") { - pair := &NamedAny{} - pair.Name = k - result := &Any{} - handled, resultFromExt, err := compiler.CallExtension(context, v, k) - if handled { - if err != nil { - errors = append(errors, err) - } else { - bytes := compiler.Marshal(v) - result.Yaml = string(bytes) - result.Value = resultFromExt - pair.Value = result - } - } else { - pair.Value, err = NewAny(v, compiler.NewContext(k, v, context)) - if err != nil { - errors = append(errors, err) - } - } - x.SpecificationExtension = append(x.SpecificationExtension, pair) - } - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewHeader creates an object of type Header if possible, returning an error if not. -func NewHeader(in *yaml.Node, context *compiler.Context) (*Header, error) { - errors := make([]error, 0) - x := &Header{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - allowedKeys := []string{"allowEmptyValue", "allowReserved", "content", "deprecated", "description", "example", "examples", "explode", "required", "schema", "style"} - allowedPatterns := []*regexp.Regexp{pattern1} - invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) - if len(invalidKeys) > 0 { - message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - // string description = 1; - v1 := compiler.MapValueForKey(m, "description") - if v1 != nil { - x.Description, ok = compiler.StringForScalarNode(v1) - if !ok { - message := fmt.Sprintf("has unexpected value for description: %s", compiler.Display(v1)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // bool required = 2; - v2 := compiler.MapValueForKey(m, "required") - if v2 != nil { - x.Required, ok = compiler.BoolForScalarNode(v2) - if !ok { - message := fmt.Sprintf("has unexpected value for required: %s", compiler.Display(v2)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // bool deprecated = 3; - v3 := compiler.MapValueForKey(m, "deprecated") - if v3 != nil { - x.Deprecated, ok = compiler.BoolForScalarNode(v3) - if !ok { - message := fmt.Sprintf("has unexpected value for deprecated: %s", compiler.Display(v3)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // bool allow_empty_value = 4; - v4 := compiler.MapValueForKey(m, "allowEmptyValue") - if v4 != nil { - x.AllowEmptyValue, ok = compiler.BoolForScalarNode(v4) - if !ok { - message := fmt.Sprintf("has unexpected value for allowEmptyValue: %s", compiler.Display(v4)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string style = 5; - v5 := compiler.MapValueForKey(m, "style") - if v5 != nil { - x.Style, ok = compiler.StringForScalarNode(v5) - if !ok { - message := fmt.Sprintf("has unexpected value for style: %s", compiler.Display(v5)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // bool explode = 6; - v6 := compiler.MapValueForKey(m, "explode") - if v6 != nil { - x.Explode, ok = compiler.BoolForScalarNode(v6) - if !ok { - message := fmt.Sprintf("has unexpected value for explode: %s", compiler.Display(v6)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // bool allow_reserved = 7; - v7 := compiler.MapValueForKey(m, "allowReserved") - if v7 != nil { - x.AllowReserved, ok = compiler.BoolForScalarNode(v7) - if !ok { - message := fmt.Sprintf("has unexpected value for allowReserved: %s", compiler.Display(v7)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // SchemaOrReference schema = 8; - v8 := compiler.MapValueForKey(m, "schema") - if v8 != nil { - var err error - x.Schema, err = NewSchemaOrReference(v8, compiler.NewContext("schema", v8, context)) - if err != nil { - errors = append(errors, err) - } - } - // Any example = 9; - v9 := compiler.MapValueForKey(m, "example") - if v9 != nil { - var err error - x.Example, err = NewAny(v9, compiler.NewContext("example", v9, context)) - if err != nil { - errors = append(errors, err) - } - } - // ExamplesOrReferences examples = 10; - v10 := compiler.MapValueForKey(m, "examples") - if v10 != nil { - var err error - x.Examples, err = NewExamplesOrReferences(v10, compiler.NewContext("examples", v10, context)) - if err != nil { - errors = append(errors, err) - } - } - // MediaTypes content = 11; - v11 := compiler.MapValueForKey(m, "content") - if v11 != nil { - var err error - x.Content, err = NewMediaTypes(v11, compiler.NewContext("content", v11, context)) - if err != nil { - errors = append(errors, err) - } - } - // repeated NamedAny specification_extension = 12; - // MAP: Any ^x- - x.SpecificationExtension = make([]*NamedAny, 0) - for i := 0; i < len(m.Content); i += 2 { - k, ok := compiler.StringForScalarNode(m.Content[i]) - if ok { - v := m.Content[i+1] - if strings.HasPrefix(k, "x-") { - pair := &NamedAny{} - pair.Name = k - result := &Any{} - handled, resultFromExt, err := compiler.CallExtension(context, v, k) - if handled { - if err != nil { - errors = append(errors, err) - } else { - bytes := compiler.Marshal(v) - result.Yaml = string(bytes) - result.Value = resultFromExt - pair.Value = result - } - } else { - pair.Value, err = NewAny(v, compiler.NewContext(k, v, context)) - if err != nil { - errors = append(errors, err) - } - } - x.SpecificationExtension = append(x.SpecificationExtension, pair) - } - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewHeaderOrReference creates an object of type HeaderOrReference if possible, returning an error if not. -func NewHeaderOrReference(in *yaml.Node, context *compiler.Context) (*HeaderOrReference, error) { - errors := make([]error, 0) - x := &HeaderOrReference{} - matched := false - // Header header = 1; - { - m, ok := compiler.UnpackMap(in) - if ok { - // errors might be ok here, they mean we just don't have the right subtype - t, matchingError := NewHeader(m, compiler.NewContext("header", m, context)) - if matchingError == nil { - x.Oneof = &HeaderOrReference_Header{Header: t} - matched = true - } else { - errors = append(errors, matchingError) - } - } - } - // Reference reference = 2; - { - m, ok := compiler.UnpackMap(in) - if ok { - // errors might be ok here, they mean we just don't have the right subtype - t, matchingError := NewReference(m, compiler.NewContext("reference", m, context)) - if matchingError == nil { - x.Oneof = &HeaderOrReference_Reference{Reference: t} - matched = true - } else { - errors = append(errors, matchingError) - } - } - } - if matched { - // since the oneof matched one of its possibilities, discard any matching errors - errors = make([]error, 0) - } else { - message := fmt.Sprintf("contains an invalid HeaderOrReference") - err := compiler.NewError(context, message) - errors = []error{err} - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewHeadersOrReferences creates an object of type HeadersOrReferences if possible, returning an error if not. -func NewHeadersOrReferences(in *yaml.Node, context *compiler.Context) (*HeadersOrReferences, error) { - errors := make([]error, 0) - x := &HeadersOrReferences{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - // repeated NamedHeaderOrReference additional_properties = 1; - // MAP: HeaderOrReference - x.AdditionalProperties = make([]*NamedHeaderOrReference, 0) - for i := 0; i < len(m.Content); i += 2 { - k, ok := compiler.StringForScalarNode(m.Content[i]) - if ok { - v := m.Content[i+1] - pair := &NamedHeaderOrReference{} - pair.Name = k - var err error - pair.Value, err = NewHeaderOrReference(v, compiler.NewContext(k, v, context)) - if err != nil { - errors = append(errors, err) - } - x.AdditionalProperties = append(x.AdditionalProperties, pair) - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewInfo creates an object of type Info if possible, returning an error if not. -func NewInfo(in *yaml.Node, context *compiler.Context) (*Info, error) { - errors := make([]error, 0) - x := &Info{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - requiredKeys := []string{"title", "version"} - missingKeys := compiler.MissingKeysInMap(m, requiredKeys) - if len(missingKeys) > 0 { - message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - allowedKeys := []string{"contact", "description", "license", "summary", "termsOfService", "title", "version"} - allowedPatterns := []*regexp.Regexp{pattern1} - invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) - if len(invalidKeys) > 0 { - message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - // string title = 1; - v1 := compiler.MapValueForKey(m, "title") - if v1 != nil { - x.Title, ok = compiler.StringForScalarNode(v1) - if !ok { - message := fmt.Sprintf("has unexpected value for title: %s", compiler.Display(v1)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string description = 2; - v2 := compiler.MapValueForKey(m, "description") - if v2 != nil { - x.Description, ok = compiler.StringForScalarNode(v2) - if !ok { - message := fmt.Sprintf("has unexpected value for description: %s", compiler.Display(v2)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string terms_of_service = 3; - v3 := compiler.MapValueForKey(m, "termsOfService") - if v3 != nil { - x.TermsOfService, ok = compiler.StringForScalarNode(v3) - if !ok { - message := fmt.Sprintf("has unexpected value for termsOfService: %s", compiler.Display(v3)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // Contact contact = 4; - v4 := compiler.MapValueForKey(m, "contact") - if v4 != nil { - var err error - x.Contact, err = NewContact(v4, compiler.NewContext("contact", v4, context)) - if err != nil { - errors = append(errors, err) - } - } - // License license = 5; - v5 := compiler.MapValueForKey(m, "license") - if v5 != nil { - var err error - x.License, err = NewLicense(v5, compiler.NewContext("license", v5, context)) - if err != nil { - errors = append(errors, err) - } - } - // string version = 6; - v6 := compiler.MapValueForKey(m, "version") - if v6 != nil { - x.Version, ok = compiler.StringForScalarNode(v6) - if !ok { - message := fmt.Sprintf("has unexpected value for version: %s", compiler.Display(v6)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string summary = 7; - v7 := compiler.MapValueForKey(m, "summary") - if v7 != nil { - x.Summary, ok = compiler.StringForScalarNode(v7) - if !ok { - message := fmt.Sprintf("has unexpected value for summary: %s", compiler.Display(v7)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // repeated NamedAny specification_extension = 8; - // MAP: Any ^x- - x.SpecificationExtension = make([]*NamedAny, 0) - for i := 0; i < len(m.Content); i += 2 { - k, ok := compiler.StringForScalarNode(m.Content[i]) - if ok { - v := m.Content[i+1] - if strings.HasPrefix(k, "x-") { - pair := &NamedAny{} - pair.Name = k - result := &Any{} - handled, resultFromExt, err := compiler.CallExtension(context, v, k) - if handled { - if err != nil { - errors = append(errors, err) - } else { - bytes := compiler.Marshal(v) - result.Yaml = string(bytes) - result.Value = resultFromExt - pair.Value = result - } - } else { - pair.Value, err = NewAny(v, compiler.NewContext(k, v, context)) - if err != nil { - errors = append(errors, err) - } - } - x.SpecificationExtension = append(x.SpecificationExtension, pair) - } - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewItemsItem creates an object of type ItemsItem if possible, returning an error if not. -func NewItemsItem(in *yaml.Node, context *compiler.Context) (*ItemsItem, error) { - errors := make([]error, 0) - x := &ItemsItem{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value for item array: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - x.SchemaOrReference = make([]*SchemaOrReference, 0) - y, err := NewSchemaOrReference(m, compiler.NewContext("", m, context)) - if err != nil { - return nil, err - } - x.SchemaOrReference = append(x.SchemaOrReference, y) - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewLicense creates an object of type License if possible, returning an error if not. -func NewLicense(in *yaml.Node, context *compiler.Context) (*License, error) { - errors := make([]error, 0) - x := &License{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - requiredKeys := []string{"name"} - missingKeys := compiler.MissingKeysInMap(m, requiredKeys) - if len(missingKeys) > 0 { - message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - allowedKeys := []string{"name", "url"} - allowedPatterns := []*regexp.Regexp{pattern1} - invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) - if len(invalidKeys) > 0 { - message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - // string name = 1; - v1 := compiler.MapValueForKey(m, "name") - if v1 != nil { - x.Name, ok = compiler.StringForScalarNode(v1) - if !ok { - message := fmt.Sprintf("has unexpected value for name: %s", compiler.Display(v1)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string url = 2; - v2 := compiler.MapValueForKey(m, "url") - if v2 != nil { - x.Url, ok = compiler.StringForScalarNode(v2) - if !ok { - message := fmt.Sprintf("has unexpected value for url: %s", compiler.Display(v2)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // repeated NamedAny specification_extension = 3; - // MAP: Any ^x- - x.SpecificationExtension = make([]*NamedAny, 0) - for i := 0; i < len(m.Content); i += 2 { - k, ok := compiler.StringForScalarNode(m.Content[i]) - if ok { - v := m.Content[i+1] - if strings.HasPrefix(k, "x-") { - pair := &NamedAny{} - pair.Name = k - result := &Any{} - handled, resultFromExt, err := compiler.CallExtension(context, v, k) - if handled { - if err != nil { - errors = append(errors, err) - } else { - bytes := compiler.Marshal(v) - result.Yaml = string(bytes) - result.Value = resultFromExt - pair.Value = result - } - } else { - pair.Value, err = NewAny(v, compiler.NewContext(k, v, context)) - if err != nil { - errors = append(errors, err) - } - } - x.SpecificationExtension = append(x.SpecificationExtension, pair) - } - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewLink creates an object of type Link if possible, returning an error if not. -func NewLink(in *yaml.Node, context *compiler.Context) (*Link, error) { - errors := make([]error, 0) - x := &Link{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - allowedKeys := []string{"description", "operationId", "operationRef", "parameters", "requestBody", "server"} - allowedPatterns := []*regexp.Regexp{pattern1} - invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) - if len(invalidKeys) > 0 { - message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - // string operation_ref = 1; - v1 := compiler.MapValueForKey(m, "operationRef") - if v1 != nil { - x.OperationRef, ok = compiler.StringForScalarNode(v1) - if !ok { - message := fmt.Sprintf("has unexpected value for operationRef: %s", compiler.Display(v1)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string operation_id = 2; - v2 := compiler.MapValueForKey(m, "operationId") - if v2 != nil { - x.OperationId, ok = compiler.StringForScalarNode(v2) - if !ok { - message := fmt.Sprintf("has unexpected value for operationId: %s", compiler.Display(v2)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // AnyOrExpression parameters = 3; - v3 := compiler.MapValueForKey(m, "parameters") - if v3 != nil { - var err error - x.Parameters, err = NewAnyOrExpression(v3, compiler.NewContext("parameters", v3, context)) - if err != nil { - errors = append(errors, err) - } - } - // AnyOrExpression request_body = 4; - v4 := compiler.MapValueForKey(m, "requestBody") - if v4 != nil { - var err error - x.RequestBody, err = NewAnyOrExpression(v4, compiler.NewContext("requestBody", v4, context)) - if err != nil { - errors = append(errors, err) - } - } - // string description = 5; - v5 := compiler.MapValueForKey(m, "description") - if v5 != nil { - x.Description, ok = compiler.StringForScalarNode(v5) - if !ok { - message := fmt.Sprintf("has unexpected value for description: %s", compiler.Display(v5)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // Server server = 6; - v6 := compiler.MapValueForKey(m, "server") - if v6 != nil { - var err error - x.Server, err = NewServer(v6, compiler.NewContext("server", v6, context)) - if err != nil { - errors = append(errors, err) - } - } - // repeated NamedAny specification_extension = 7; - // MAP: Any ^x- - x.SpecificationExtension = make([]*NamedAny, 0) - for i := 0; i < len(m.Content); i += 2 { - k, ok := compiler.StringForScalarNode(m.Content[i]) - if ok { - v := m.Content[i+1] - if strings.HasPrefix(k, "x-") { - pair := &NamedAny{} - pair.Name = k - result := &Any{} - handled, resultFromExt, err := compiler.CallExtension(context, v, k) - if handled { - if err != nil { - errors = append(errors, err) - } else { - bytes := compiler.Marshal(v) - result.Yaml = string(bytes) - result.Value = resultFromExt - pair.Value = result - } - } else { - pair.Value, err = NewAny(v, compiler.NewContext(k, v, context)) - if err != nil { - errors = append(errors, err) - } - } - x.SpecificationExtension = append(x.SpecificationExtension, pair) - } - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewLinkOrReference creates an object of type LinkOrReference if possible, returning an error if not. -func NewLinkOrReference(in *yaml.Node, context *compiler.Context) (*LinkOrReference, error) { - errors := make([]error, 0) - x := &LinkOrReference{} - matched := false - // Link link = 1; - { - m, ok := compiler.UnpackMap(in) - if ok { - // errors might be ok here, they mean we just don't have the right subtype - t, matchingError := NewLink(m, compiler.NewContext("link", m, context)) - if matchingError == nil { - x.Oneof = &LinkOrReference_Link{Link: t} - matched = true - } else { - errors = append(errors, matchingError) - } - } - } - // Reference reference = 2; - { - m, ok := compiler.UnpackMap(in) - if ok { - // errors might be ok here, they mean we just don't have the right subtype - t, matchingError := NewReference(m, compiler.NewContext("reference", m, context)) - if matchingError == nil { - x.Oneof = &LinkOrReference_Reference{Reference: t} - matched = true - } else { - errors = append(errors, matchingError) - } - } - } - if matched { - // since the oneof matched one of its possibilities, discard any matching errors - errors = make([]error, 0) - } else { - message := fmt.Sprintf("contains an invalid LinkOrReference") - err := compiler.NewError(context, message) - errors = []error{err} - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewLinksOrReferences creates an object of type LinksOrReferences if possible, returning an error if not. -func NewLinksOrReferences(in *yaml.Node, context *compiler.Context) (*LinksOrReferences, error) { - errors := make([]error, 0) - x := &LinksOrReferences{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - // repeated NamedLinkOrReference additional_properties = 1; - // MAP: LinkOrReference - x.AdditionalProperties = make([]*NamedLinkOrReference, 0) - for i := 0; i < len(m.Content); i += 2 { - k, ok := compiler.StringForScalarNode(m.Content[i]) - if ok { - v := m.Content[i+1] - pair := &NamedLinkOrReference{} - pair.Name = k - var err error - pair.Value, err = NewLinkOrReference(v, compiler.NewContext(k, v, context)) - if err != nil { - errors = append(errors, err) - } - x.AdditionalProperties = append(x.AdditionalProperties, pair) - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewMediaType creates an object of type MediaType if possible, returning an error if not. -func NewMediaType(in *yaml.Node, context *compiler.Context) (*MediaType, error) { - errors := make([]error, 0) - x := &MediaType{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - allowedKeys := []string{"encoding", "example", "examples", "schema"} - allowedPatterns := []*regexp.Regexp{pattern1} - invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) - if len(invalidKeys) > 0 { - message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - // SchemaOrReference schema = 1; - v1 := compiler.MapValueForKey(m, "schema") - if v1 != nil { - var err error - x.Schema, err = NewSchemaOrReference(v1, compiler.NewContext("schema", v1, context)) - if err != nil { - errors = append(errors, err) - } - } - // Any example = 2; - v2 := compiler.MapValueForKey(m, "example") - if v2 != nil { - var err error - x.Example, err = NewAny(v2, compiler.NewContext("example", v2, context)) - if err != nil { - errors = append(errors, err) - } - } - // ExamplesOrReferences examples = 3; - v3 := compiler.MapValueForKey(m, "examples") - if v3 != nil { - var err error - x.Examples, err = NewExamplesOrReferences(v3, compiler.NewContext("examples", v3, context)) - if err != nil { - errors = append(errors, err) - } - } - // Encodings encoding = 4; - v4 := compiler.MapValueForKey(m, "encoding") - if v4 != nil { - var err error - x.Encoding, err = NewEncodings(v4, compiler.NewContext("encoding", v4, context)) - if err != nil { - errors = append(errors, err) - } - } - // repeated NamedAny specification_extension = 5; - // MAP: Any ^x- - x.SpecificationExtension = make([]*NamedAny, 0) - for i := 0; i < len(m.Content); i += 2 { - k, ok := compiler.StringForScalarNode(m.Content[i]) - if ok { - v := m.Content[i+1] - if strings.HasPrefix(k, "x-") { - pair := &NamedAny{} - pair.Name = k - result := &Any{} - handled, resultFromExt, err := compiler.CallExtension(context, v, k) - if handled { - if err != nil { - errors = append(errors, err) - } else { - bytes := compiler.Marshal(v) - result.Yaml = string(bytes) - result.Value = resultFromExt - pair.Value = result - } - } else { - pair.Value, err = NewAny(v, compiler.NewContext(k, v, context)) - if err != nil { - errors = append(errors, err) - } - } - x.SpecificationExtension = append(x.SpecificationExtension, pair) - } - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewMediaTypes creates an object of type MediaTypes if possible, returning an error if not. -func NewMediaTypes(in *yaml.Node, context *compiler.Context) (*MediaTypes, error) { - errors := make([]error, 0) - x := &MediaTypes{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - // repeated NamedMediaType additional_properties = 1; - // MAP: MediaType - x.AdditionalProperties = make([]*NamedMediaType, 0) - for i := 0; i < len(m.Content); i += 2 { - k, ok := compiler.StringForScalarNode(m.Content[i]) - if ok { - v := m.Content[i+1] - pair := &NamedMediaType{} - pair.Name = k - var err error - pair.Value, err = NewMediaType(v, compiler.NewContext(k, v, context)) - if err != nil { - errors = append(errors, err) - } - x.AdditionalProperties = append(x.AdditionalProperties, pair) - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewNamedAny creates an object of type NamedAny if possible, returning an error if not. -func NewNamedAny(in *yaml.Node, context *compiler.Context) (*NamedAny, error) { - errors := make([]error, 0) - x := &NamedAny{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - allowedKeys := []string{"name", "value"} - var allowedPatterns []*regexp.Regexp - invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) - if len(invalidKeys) > 0 { - message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - // string name = 1; - v1 := compiler.MapValueForKey(m, "name") - if v1 != nil { - x.Name, ok = compiler.StringForScalarNode(v1) - if !ok { - message := fmt.Sprintf("has unexpected value for name: %s", compiler.Display(v1)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // Any value = 2; - v2 := compiler.MapValueForKey(m, "value") - if v2 != nil { - var err error - x.Value, err = NewAny(v2, compiler.NewContext("value", v2, context)) - if err != nil { - errors = append(errors, err) - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewNamedCallbackOrReference creates an object of type NamedCallbackOrReference if possible, returning an error if not. -func NewNamedCallbackOrReference(in *yaml.Node, context *compiler.Context) (*NamedCallbackOrReference, error) { - errors := make([]error, 0) - x := &NamedCallbackOrReference{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - allowedKeys := []string{"name", "value"} - var allowedPatterns []*regexp.Regexp - invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) - if len(invalidKeys) > 0 { - message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - // string name = 1; - v1 := compiler.MapValueForKey(m, "name") - if v1 != nil { - x.Name, ok = compiler.StringForScalarNode(v1) - if !ok { - message := fmt.Sprintf("has unexpected value for name: %s", compiler.Display(v1)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // CallbackOrReference value = 2; - v2 := compiler.MapValueForKey(m, "value") - if v2 != nil { - var err error - x.Value, err = NewCallbackOrReference(v2, compiler.NewContext("value", v2, context)) - if err != nil { - errors = append(errors, err) - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewNamedEncoding creates an object of type NamedEncoding if possible, returning an error if not. -func NewNamedEncoding(in *yaml.Node, context *compiler.Context) (*NamedEncoding, error) { - errors := make([]error, 0) - x := &NamedEncoding{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - allowedKeys := []string{"name", "value"} - var allowedPatterns []*regexp.Regexp - invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) - if len(invalidKeys) > 0 { - message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - // string name = 1; - v1 := compiler.MapValueForKey(m, "name") - if v1 != nil { - x.Name, ok = compiler.StringForScalarNode(v1) - if !ok { - message := fmt.Sprintf("has unexpected value for name: %s", compiler.Display(v1)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // Encoding value = 2; - v2 := compiler.MapValueForKey(m, "value") - if v2 != nil { - var err error - x.Value, err = NewEncoding(v2, compiler.NewContext("value", v2, context)) - if err != nil { - errors = append(errors, err) - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewNamedExampleOrReference creates an object of type NamedExampleOrReference if possible, returning an error if not. -func NewNamedExampleOrReference(in *yaml.Node, context *compiler.Context) (*NamedExampleOrReference, error) { - errors := make([]error, 0) - x := &NamedExampleOrReference{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - allowedKeys := []string{"name", "value"} - var allowedPatterns []*regexp.Regexp - invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) - if len(invalidKeys) > 0 { - message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - // string name = 1; - v1 := compiler.MapValueForKey(m, "name") - if v1 != nil { - x.Name, ok = compiler.StringForScalarNode(v1) - if !ok { - message := fmt.Sprintf("has unexpected value for name: %s", compiler.Display(v1)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // ExampleOrReference value = 2; - v2 := compiler.MapValueForKey(m, "value") - if v2 != nil { - var err error - x.Value, err = NewExampleOrReference(v2, compiler.NewContext("value", v2, context)) - if err != nil { - errors = append(errors, err) - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewNamedHeaderOrReference creates an object of type NamedHeaderOrReference if possible, returning an error if not. -func NewNamedHeaderOrReference(in *yaml.Node, context *compiler.Context) (*NamedHeaderOrReference, error) { - errors := make([]error, 0) - x := &NamedHeaderOrReference{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - allowedKeys := []string{"name", "value"} - var allowedPatterns []*regexp.Regexp - invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) - if len(invalidKeys) > 0 { - message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - // string name = 1; - v1 := compiler.MapValueForKey(m, "name") - if v1 != nil { - x.Name, ok = compiler.StringForScalarNode(v1) - if !ok { - message := fmt.Sprintf("has unexpected value for name: %s", compiler.Display(v1)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // HeaderOrReference value = 2; - v2 := compiler.MapValueForKey(m, "value") - if v2 != nil { - var err error - x.Value, err = NewHeaderOrReference(v2, compiler.NewContext("value", v2, context)) - if err != nil { - errors = append(errors, err) - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewNamedLinkOrReference creates an object of type NamedLinkOrReference if possible, returning an error if not. -func NewNamedLinkOrReference(in *yaml.Node, context *compiler.Context) (*NamedLinkOrReference, error) { - errors := make([]error, 0) - x := &NamedLinkOrReference{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - allowedKeys := []string{"name", "value"} - var allowedPatterns []*regexp.Regexp - invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) - if len(invalidKeys) > 0 { - message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - // string name = 1; - v1 := compiler.MapValueForKey(m, "name") - if v1 != nil { - x.Name, ok = compiler.StringForScalarNode(v1) - if !ok { - message := fmt.Sprintf("has unexpected value for name: %s", compiler.Display(v1)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // LinkOrReference value = 2; - v2 := compiler.MapValueForKey(m, "value") - if v2 != nil { - var err error - x.Value, err = NewLinkOrReference(v2, compiler.NewContext("value", v2, context)) - if err != nil { - errors = append(errors, err) - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewNamedMediaType creates an object of type NamedMediaType if possible, returning an error if not. -func NewNamedMediaType(in *yaml.Node, context *compiler.Context) (*NamedMediaType, error) { - errors := make([]error, 0) - x := &NamedMediaType{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - allowedKeys := []string{"name", "value"} - var allowedPatterns []*regexp.Regexp - invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) - if len(invalidKeys) > 0 { - message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - // string name = 1; - v1 := compiler.MapValueForKey(m, "name") - if v1 != nil { - x.Name, ok = compiler.StringForScalarNode(v1) - if !ok { - message := fmt.Sprintf("has unexpected value for name: %s", compiler.Display(v1)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // MediaType value = 2; - v2 := compiler.MapValueForKey(m, "value") - if v2 != nil { - var err error - x.Value, err = NewMediaType(v2, compiler.NewContext("value", v2, context)) - if err != nil { - errors = append(errors, err) - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewNamedParameterOrReference creates an object of type NamedParameterOrReference if possible, returning an error if not. -func NewNamedParameterOrReference(in *yaml.Node, context *compiler.Context) (*NamedParameterOrReference, error) { - errors := make([]error, 0) - x := &NamedParameterOrReference{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - allowedKeys := []string{"name", "value"} - var allowedPatterns []*regexp.Regexp - invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) - if len(invalidKeys) > 0 { - message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - // string name = 1; - v1 := compiler.MapValueForKey(m, "name") - if v1 != nil { - x.Name, ok = compiler.StringForScalarNode(v1) - if !ok { - message := fmt.Sprintf("has unexpected value for name: %s", compiler.Display(v1)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // ParameterOrReference value = 2; - v2 := compiler.MapValueForKey(m, "value") - if v2 != nil { - var err error - x.Value, err = NewParameterOrReference(v2, compiler.NewContext("value", v2, context)) - if err != nil { - errors = append(errors, err) - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewNamedPathItem creates an object of type NamedPathItem if possible, returning an error if not. -func NewNamedPathItem(in *yaml.Node, context *compiler.Context) (*NamedPathItem, error) { - errors := make([]error, 0) - x := &NamedPathItem{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - allowedKeys := []string{"name", "value"} - var allowedPatterns []*regexp.Regexp - invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) - if len(invalidKeys) > 0 { - message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - // string name = 1; - v1 := compiler.MapValueForKey(m, "name") - if v1 != nil { - x.Name, ok = compiler.StringForScalarNode(v1) - if !ok { - message := fmt.Sprintf("has unexpected value for name: %s", compiler.Display(v1)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // PathItem value = 2; - v2 := compiler.MapValueForKey(m, "value") - if v2 != nil { - var err error - x.Value, err = NewPathItem(v2, compiler.NewContext("value", v2, context)) - if err != nil { - errors = append(errors, err) - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewNamedRequestBodyOrReference creates an object of type NamedRequestBodyOrReference if possible, returning an error if not. -func NewNamedRequestBodyOrReference(in *yaml.Node, context *compiler.Context) (*NamedRequestBodyOrReference, error) { - errors := make([]error, 0) - x := &NamedRequestBodyOrReference{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - allowedKeys := []string{"name", "value"} - var allowedPatterns []*regexp.Regexp - invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) - if len(invalidKeys) > 0 { - message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - // string name = 1; - v1 := compiler.MapValueForKey(m, "name") - if v1 != nil { - x.Name, ok = compiler.StringForScalarNode(v1) - if !ok { - message := fmt.Sprintf("has unexpected value for name: %s", compiler.Display(v1)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // RequestBodyOrReference value = 2; - v2 := compiler.MapValueForKey(m, "value") - if v2 != nil { - var err error - x.Value, err = NewRequestBodyOrReference(v2, compiler.NewContext("value", v2, context)) - if err != nil { - errors = append(errors, err) - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewNamedResponseOrReference creates an object of type NamedResponseOrReference if possible, returning an error if not. -func NewNamedResponseOrReference(in *yaml.Node, context *compiler.Context) (*NamedResponseOrReference, error) { - errors := make([]error, 0) - x := &NamedResponseOrReference{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - allowedKeys := []string{"name", "value"} - var allowedPatterns []*regexp.Regexp - invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) - if len(invalidKeys) > 0 { - message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - // string name = 1; - v1 := compiler.MapValueForKey(m, "name") - if v1 != nil { - x.Name, ok = compiler.StringForScalarNode(v1) - if !ok { - message := fmt.Sprintf("has unexpected value for name: %s", compiler.Display(v1)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // ResponseOrReference value = 2; - v2 := compiler.MapValueForKey(m, "value") - if v2 != nil { - var err error - x.Value, err = NewResponseOrReference(v2, compiler.NewContext("value", v2, context)) - if err != nil { - errors = append(errors, err) - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewNamedSchemaOrReference creates an object of type NamedSchemaOrReference if possible, returning an error if not. -func NewNamedSchemaOrReference(in *yaml.Node, context *compiler.Context) (*NamedSchemaOrReference, error) { - errors := make([]error, 0) - x := &NamedSchemaOrReference{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - allowedKeys := []string{"name", "value"} - var allowedPatterns []*regexp.Regexp - invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) - if len(invalidKeys) > 0 { - message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - // string name = 1; - v1 := compiler.MapValueForKey(m, "name") - if v1 != nil { - x.Name, ok = compiler.StringForScalarNode(v1) - if !ok { - message := fmt.Sprintf("has unexpected value for name: %s", compiler.Display(v1)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // SchemaOrReference value = 2; - v2 := compiler.MapValueForKey(m, "value") - if v2 != nil { - var err error - x.Value, err = NewSchemaOrReference(v2, compiler.NewContext("value", v2, context)) - if err != nil { - errors = append(errors, err) - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewNamedSecuritySchemeOrReference creates an object of type NamedSecuritySchemeOrReference if possible, returning an error if not. -func NewNamedSecuritySchemeOrReference(in *yaml.Node, context *compiler.Context) (*NamedSecuritySchemeOrReference, error) { - errors := make([]error, 0) - x := &NamedSecuritySchemeOrReference{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - allowedKeys := []string{"name", "value"} - var allowedPatterns []*regexp.Regexp - invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) - if len(invalidKeys) > 0 { - message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - // string name = 1; - v1 := compiler.MapValueForKey(m, "name") - if v1 != nil { - x.Name, ok = compiler.StringForScalarNode(v1) - if !ok { - message := fmt.Sprintf("has unexpected value for name: %s", compiler.Display(v1)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // SecuritySchemeOrReference value = 2; - v2 := compiler.MapValueForKey(m, "value") - if v2 != nil { - var err error - x.Value, err = NewSecuritySchemeOrReference(v2, compiler.NewContext("value", v2, context)) - if err != nil { - errors = append(errors, err) - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewNamedServerVariable creates an object of type NamedServerVariable if possible, returning an error if not. -func NewNamedServerVariable(in *yaml.Node, context *compiler.Context) (*NamedServerVariable, error) { - errors := make([]error, 0) - x := &NamedServerVariable{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - allowedKeys := []string{"name", "value"} - var allowedPatterns []*regexp.Regexp - invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) - if len(invalidKeys) > 0 { - message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - // string name = 1; - v1 := compiler.MapValueForKey(m, "name") - if v1 != nil { - x.Name, ok = compiler.StringForScalarNode(v1) - if !ok { - message := fmt.Sprintf("has unexpected value for name: %s", compiler.Display(v1)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // ServerVariable value = 2; - v2 := compiler.MapValueForKey(m, "value") - if v2 != nil { - var err error - x.Value, err = NewServerVariable(v2, compiler.NewContext("value", v2, context)) - if err != nil { - errors = append(errors, err) - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewNamedString creates an object of type NamedString if possible, returning an error if not. -func NewNamedString(in *yaml.Node, context *compiler.Context) (*NamedString, error) { - errors := make([]error, 0) - x := &NamedString{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - allowedKeys := []string{"name", "value"} - var allowedPatterns []*regexp.Regexp - invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) - if len(invalidKeys) > 0 { - message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - // string name = 1; - v1 := compiler.MapValueForKey(m, "name") - if v1 != nil { - x.Name, ok = compiler.StringForScalarNode(v1) - if !ok { - message := fmt.Sprintf("has unexpected value for name: %s", compiler.Display(v1)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string value = 2; - v2 := compiler.MapValueForKey(m, "value") - if v2 != nil { - x.Value, ok = compiler.StringForScalarNode(v2) - if !ok { - message := fmt.Sprintf("has unexpected value for value: %s", compiler.Display(v2)) - errors = append(errors, compiler.NewError(context, message)) - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewNamedStringArray creates an object of type NamedStringArray if possible, returning an error if not. -func NewNamedStringArray(in *yaml.Node, context *compiler.Context) (*NamedStringArray, error) { - errors := make([]error, 0) - x := &NamedStringArray{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - allowedKeys := []string{"name", "value"} - var allowedPatterns []*regexp.Regexp - invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) - if len(invalidKeys) > 0 { - message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - // string name = 1; - v1 := compiler.MapValueForKey(m, "name") - if v1 != nil { - x.Name, ok = compiler.StringForScalarNode(v1) - if !ok { - message := fmt.Sprintf("has unexpected value for name: %s", compiler.Display(v1)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // StringArray value = 2; - v2 := compiler.MapValueForKey(m, "value") - if v2 != nil { - var err error - x.Value, err = NewStringArray(v2, compiler.NewContext("value", v2, context)) - if err != nil { - errors = append(errors, err) - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewOauthFlow creates an object of type OauthFlow if possible, returning an error if not. -func NewOauthFlow(in *yaml.Node, context *compiler.Context) (*OauthFlow, error) { - errors := make([]error, 0) - x := &OauthFlow{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - allowedKeys := []string{"authorizationUrl", "refreshUrl", "scopes", "tokenUrl"} - allowedPatterns := []*regexp.Regexp{pattern1} - invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) - if len(invalidKeys) > 0 { - message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - // string authorization_url = 1; - v1 := compiler.MapValueForKey(m, "authorizationUrl") - if v1 != nil { - x.AuthorizationUrl, ok = compiler.StringForScalarNode(v1) - if !ok { - message := fmt.Sprintf("has unexpected value for authorizationUrl: %s", compiler.Display(v1)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string token_url = 2; - v2 := compiler.MapValueForKey(m, "tokenUrl") - if v2 != nil { - x.TokenUrl, ok = compiler.StringForScalarNode(v2) - if !ok { - message := fmt.Sprintf("has unexpected value for tokenUrl: %s", compiler.Display(v2)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string refresh_url = 3; - v3 := compiler.MapValueForKey(m, "refreshUrl") - if v3 != nil { - x.RefreshUrl, ok = compiler.StringForScalarNode(v3) - if !ok { - message := fmt.Sprintf("has unexpected value for refreshUrl: %s", compiler.Display(v3)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // Strings scopes = 4; - v4 := compiler.MapValueForKey(m, "scopes") - if v4 != nil { - var err error - x.Scopes, err = NewStrings(v4, compiler.NewContext("scopes", v4, context)) - if err != nil { - errors = append(errors, err) - } - } - // repeated NamedAny specification_extension = 5; - // MAP: Any ^x- - x.SpecificationExtension = make([]*NamedAny, 0) - for i := 0; i < len(m.Content); i += 2 { - k, ok := compiler.StringForScalarNode(m.Content[i]) - if ok { - v := m.Content[i+1] - if strings.HasPrefix(k, "x-") { - pair := &NamedAny{} - pair.Name = k - result := &Any{} - handled, resultFromExt, err := compiler.CallExtension(context, v, k) - if handled { - if err != nil { - errors = append(errors, err) - } else { - bytes := compiler.Marshal(v) - result.Yaml = string(bytes) - result.Value = resultFromExt - pair.Value = result - } - } else { - pair.Value, err = NewAny(v, compiler.NewContext(k, v, context)) - if err != nil { - errors = append(errors, err) - } - } - x.SpecificationExtension = append(x.SpecificationExtension, pair) - } - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewOauthFlows creates an object of type OauthFlows if possible, returning an error if not. -func NewOauthFlows(in *yaml.Node, context *compiler.Context) (*OauthFlows, error) { - errors := make([]error, 0) - x := &OauthFlows{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - allowedKeys := []string{"authorizationCode", "clientCredentials", "implicit", "password"} - allowedPatterns := []*regexp.Regexp{pattern1} - invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) - if len(invalidKeys) > 0 { - message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - // OauthFlow implicit = 1; - v1 := compiler.MapValueForKey(m, "implicit") - if v1 != nil { - var err error - x.Implicit, err = NewOauthFlow(v1, compiler.NewContext("implicit", v1, context)) - if err != nil { - errors = append(errors, err) - } - } - // OauthFlow password = 2; - v2 := compiler.MapValueForKey(m, "password") - if v2 != nil { - var err error - x.Password, err = NewOauthFlow(v2, compiler.NewContext("password", v2, context)) - if err != nil { - errors = append(errors, err) - } - } - // OauthFlow client_credentials = 3; - v3 := compiler.MapValueForKey(m, "clientCredentials") - if v3 != nil { - var err error - x.ClientCredentials, err = NewOauthFlow(v3, compiler.NewContext("clientCredentials", v3, context)) - if err != nil { - errors = append(errors, err) - } - } - // OauthFlow authorization_code = 4; - v4 := compiler.MapValueForKey(m, "authorizationCode") - if v4 != nil { - var err error - x.AuthorizationCode, err = NewOauthFlow(v4, compiler.NewContext("authorizationCode", v4, context)) - if err != nil { - errors = append(errors, err) - } - } - // repeated NamedAny specification_extension = 5; - // MAP: Any ^x- - x.SpecificationExtension = make([]*NamedAny, 0) - for i := 0; i < len(m.Content); i += 2 { - k, ok := compiler.StringForScalarNode(m.Content[i]) - if ok { - v := m.Content[i+1] - if strings.HasPrefix(k, "x-") { - pair := &NamedAny{} - pair.Name = k - result := &Any{} - handled, resultFromExt, err := compiler.CallExtension(context, v, k) - if handled { - if err != nil { - errors = append(errors, err) - } else { - bytes := compiler.Marshal(v) - result.Yaml = string(bytes) - result.Value = resultFromExt - pair.Value = result - } - } else { - pair.Value, err = NewAny(v, compiler.NewContext(k, v, context)) - if err != nil { - errors = append(errors, err) - } - } - x.SpecificationExtension = append(x.SpecificationExtension, pair) - } - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewObject creates an object of type Object if possible, returning an error if not. -func NewObject(in *yaml.Node, context *compiler.Context) (*Object, error) { - errors := make([]error, 0) - x := &Object{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - // repeated NamedAny additional_properties = 1; - // MAP: Any - x.AdditionalProperties = make([]*NamedAny, 0) - for i := 0; i < len(m.Content); i += 2 { - k, ok := compiler.StringForScalarNode(m.Content[i]) - if ok { - v := m.Content[i+1] - pair := &NamedAny{} - pair.Name = k - result := &Any{} - handled, resultFromExt, err := compiler.CallExtension(context, v, k) - if handled { - if err != nil { - errors = append(errors, err) - } else { - bytes := compiler.Marshal(v) - result.Yaml = string(bytes) - result.Value = resultFromExt - pair.Value = result - } - } else { - pair.Value, err = NewAny(v, compiler.NewContext(k, v, context)) - if err != nil { - errors = append(errors, err) - } - } - x.AdditionalProperties = append(x.AdditionalProperties, pair) - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewOperation creates an object of type Operation if possible, returning an error if not. -func NewOperation(in *yaml.Node, context *compiler.Context) (*Operation, error) { - errors := make([]error, 0) - x := &Operation{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - requiredKeys := []string{"responses"} - missingKeys := compiler.MissingKeysInMap(m, requiredKeys) - if len(missingKeys) > 0 { - message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - allowedKeys := []string{"callbacks", "deprecated", "description", "externalDocs", "operationId", "parameters", "requestBody", "responses", "security", "servers", "summary", "tags"} - allowedPatterns := []*regexp.Regexp{pattern1} - invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) - if len(invalidKeys) > 0 { - message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - // repeated string tags = 1; - v1 := compiler.MapValueForKey(m, "tags") - if v1 != nil { - v, ok := compiler.SequenceNodeForNode(v1) - if ok { - x.Tags = compiler.StringArrayForSequenceNode(v) - } else { - message := fmt.Sprintf("has unexpected value for tags: %s", compiler.Display(v1)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string summary = 2; - v2 := compiler.MapValueForKey(m, "summary") - if v2 != nil { - x.Summary, ok = compiler.StringForScalarNode(v2) - if !ok { - message := fmt.Sprintf("has unexpected value for summary: %s", compiler.Display(v2)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string description = 3; - v3 := compiler.MapValueForKey(m, "description") - if v3 != nil { - x.Description, ok = compiler.StringForScalarNode(v3) - if !ok { - message := fmt.Sprintf("has unexpected value for description: %s", compiler.Display(v3)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // ExternalDocs external_docs = 4; - v4 := compiler.MapValueForKey(m, "externalDocs") - if v4 != nil { - var err error - x.ExternalDocs, err = NewExternalDocs(v4, compiler.NewContext("externalDocs", v4, context)) - if err != nil { - errors = append(errors, err) - } - } - // string operation_id = 5; - v5 := compiler.MapValueForKey(m, "operationId") - if v5 != nil { - x.OperationId, ok = compiler.StringForScalarNode(v5) - if !ok { - message := fmt.Sprintf("has unexpected value for operationId: %s", compiler.Display(v5)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // repeated ParameterOrReference parameters = 6; - v6 := compiler.MapValueForKey(m, "parameters") - if v6 != nil { - // repeated ParameterOrReference - x.Parameters = make([]*ParameterOrReference, 0) - a, ok := compiler.SequenceNodeForNode(v6) - if ok { - for _, item := range a.Content { - y, err := NewParameterOrReference(item, compiler.NewContext("parameters", item, context)) - if err != nil { - errors = append(errors, err) - } - x.Parameters = append(x.Parameters, y) - } - } - } - // RequestBodyOrReference request_body = 7; - v7 := compiler.MapValueForKey(m, "requestBody") - if v7 != nil { - var err error - x.RequestBody, err = NewRequestBodyOrReference(v7, compiler.NewContext("requestBody", v7, context)) - if err != nil { - errors = append(errors, err) - } - } - // Responses responses = 8; - v8 := compiler.MapValueForKey(m, "responses") - if v8 != nil { - var err error - x.Responses, err = NewResponses(v8, compiler.NewContext("responses", v8, context)) - if err != nil { - errors = append(errors, err) - } - } - // CallbacksOrReferences callbacks = 9; - v9 := compiler.MapValueForKey(m, "callbacks") - if v9 != nil { - var err error - x.Callbacks, err = NewCallbacksOrReferences(v9, compiler.NewContext("callbacks", v9, context)) - if err != nil { - errors = append(errors, err) - } - } - // bool deprecated = 10; - v10 := compiler.MapValueForKey(m, "deprecated") - if v10 != nil { - x.Deprecated, ok = compiler.BoolForScalarNode(v10) - if !ok { - message := fmt.Sprintf("has unexpected value for deprecated: %s", compiler.Display(v10)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // repeated SecurityRequirement security = 11; - v11 := compiler.MapValueForKey(m, "security") - if v11 != nil { - // repeated SecurityRequirement - x.Security = make([]*SecurityRequirement, 0) - a, ok := compiler.SequenceNodeForNode(v11) - if ok { - for _, item := range a.Content { - y, err := NewSecurityRequirement(item, compiler.NewContext("security", item, context)) - if err != nil { - errors = append(errors, err) - } - x.Security = append(x.Security, y) - } - } - } - // repeated Server servers = 12; - v12 := compiler.MapValueForKey(m, "servers") - if v12 != nil { - // repeated Server - x.Servers = make([]*Server, 0) - a, ok := compiler.SequenceNodeForNode(v12) - if ok { - for _, item := range a.Content { - y, err := NewServer(item, compiler.NewContext("servers", item, context)) - if err != nil { - errors = append(errors, err) - } - x.Servers = append(x.Servers, y) - } - } - } - // repeated NamedAny specification_extension = 13; - // MAP: Any ^x- - x.SpecificationExtension = make([]*NamedAny, 0) - for i := 0; i < len(m.Content); i += 2 { - k, ok := compiler.StringForScalarNode(m.Content[i]) - if ok { - v := m.Content[i+1] - if strings.HasPrefix(k, "x-") { - pair := &NamedAny{} - pair.Name = k - result := &Any{} - handled, resultFromExt, err := compiler.CallExtension(context, v, k) - if handled { - if err != nil { - errors = append(errors, err) - } else { - bytes := compiler.Marshal(v) - result.Yaml = string(bytes) - result.Value = resultFromExt - pair.Value = result - } - } else { - pair.Value, err = NewAny(v, compiler.NewContext(k, v, context)) - if err != nil { - errors = append(errors, err) - } - } - x.SpecificationExtension = append(x.SpecificationExtension, pair) - } - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewParameter creates an object of type Parameter if possible, returning an error if not. -func NewParameter(in *yaml.Node, context *compiler.Context) (*Parameter, error) { - errors := make([]error, 0) - x := &Parameter{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - requiredKeys := []string{"in", "name"} - missingKeys := compiler.MissingKeysInMap(m, requiredKeys) - if len(missingKeys) > 0 { - message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - allowedKeys := []string{"allowEmptyValue", "allowReserved", "content", "deprecated", "description", "example", "examples", "explode", "in", "name", "required", "schema", "style"} - allowedPatterns := []*regexp.Regexp{pattern1} - invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) - if len(invalidKeys) > 0 { - message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - // string name = 1; - v1 := compiler.MapValueForKey(m, "name") - if v1 != nil { - x.Name, ok = compiler.StringForScalarNode(v1) - if !ok { - message := fmt.Sprintf("has unexpected value for name: %s", compiler.Display(v1)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string in = 2; - v2 := compiler.MapValueForKey(m, "in") - if v2 != nil { - x.In, ok = compiler.StringForScalarNode(v2) - if !ok { - message := fmt.Sprintf("has unexpected value for in: %s", compiler.Display(v2)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string description = 3; - v3 := compiler.MapValueForKey(m, "description") - if v3 != nil { - x.Description, ok = compiler.StringForScalarNode(v3) - if !ok { - message := fmt.Sprintf("has unexpected value for description: %s", compiler.Display(v3)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // bool required = 4; - v4 := compiler.MapValueForKey(m, "required") - if v4 != nil { - x.Required, ok = compiler.BoolForScalarNode(v4) - if !ok { - message := fmt.Sprintf("has unexpected value for required: %s", compiler.Display(v4)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // bool deprecated = 5; - v5 := compiler.MapValueForKey(m, "deprecated") - if v5 != nil { - x.Deprecated, ok = compiler.BoolForScalarNode(v5) - if !ok { - message := fmt.Sprintf("has unexpected value for deprecated: %s", compiler.Display(v5)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // bool allow_empty_value = 6; - v6 := compiler.MapValueForKey(m, "allowEmptyValue") - if v6 != nil { - x.AllowEmptyValue, ok = compiler.BoolForScalarNode(v6) - if !ok { - message := fmt.Sprintf("has unexpected value for allowEmptyValue: %s", compiler.Display(v6)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string style = 7; - v7 := compiler.MapValueForKey(m, "style") - if v7 != nil { - x.Style, ok = compiler.StringForScalarNode(v7) - if !ok { - message := fmt.Sprintf("has unexpected value for style: %s", compiler.Display(v7)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // bool explode = 8; - v8 := compiler.MapValueForKey(m, "explode") - if v8 != nil { - x.Explode, ok = compiler.BoolForScalarNode(v8) - if !ok { - message := fmt.Sprintf("has unexpected value for explode: %s", compiler.Display(v8)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // bool allow_reserved = 9; - v9 := compiler.MapValueForKey(m, "allowReserved") - if v9 != nil { - x.AllowReserved, ok = compiler.BoolForScalarNode(v9) - if !ok { - message := fmt.Sprintf("has unexpected value for allowReserved: %s", compiler.Display(v9)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // SchemaOrReference schema = 10; - v10 := compiler.MapValueForKey(m, "schema") - if v10 != nil { - var err error - x.Schema, err = NewSchemaOrReference(v10, compiler.NewContext("schema", v10, context)) - if err != nil { - errors = append(errors, err) - } - } - // Any example = 11; - v11 := compiler.MapValueForKey(m, "example") - if v11 != nil { - var err error - x.Example, err = NewAny(v11, compiler.NewContext("example", v11, context)) - if err != nil { - errors = append(errors, err) - } - } - // ExamplesOrReferences examples = 12; - v12 := compiler.MapValueForKey(m, "examples") - if v12 != nil { - var err error - x.Examples, err = NewExamplesOrReferences(v12, compiler.NewContext("examples", v12, context)) - if err != nil { - errors = append(errors, err) - } - } - // MediaTypes content = 13; - v13 := compiler.MapValueForKey(m, "content") - if v13 != nil { - var err error - x.Content, err = NewMediaTypes(v13, compiler.NewContext("content", v13, context)) - if err != nil { - errors = append(errors, err) - } - } - // repeated NamedAny specification_extension = 14; - // MAP: Any ^x- - x.SpecificationExtension = make([]*NamedAny, 0) - for i := 0; i < len(m.Content); i += 2 { - k, ok := compiler.StringForScalarNode(m.Content[i]) - if ok { - v := m.Content[i+1] - if strings.HasPrefix(k, "x-") { - pair := &NamedAny{} - pair.Name = k - result := &Any{} - handled, resultFromExt, err := compiler.CallExtension(context, v, k) - if handled { - if err != nil { - errors = append(errors, err) - } else { - bytes := compiler.Marshal(v) - result.Yaml = string(bytes) - result.Value = resultFromExt - pair.Value = result - } - } else { - pair.Value, err = NewAny(v, compiler.NewContext(k, v, context)) - if err != nil { - errors = append(errors, err) - } - } - x.SpecificationExtension = append(x.SpecificationExtension, pair) - } - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewParameterOrReference creates an object of type ParameterOrReference if possible, returning an error if not. -func NewParameterOrReference(in *yaml.Node, context *compiler.Context) (*ParameterOrReference, error) { - errors := make([]error, 0) - x := &ParameterOrReference{} - matched := false - // Parameter parameter = 1; - { - m, ok := compiler.UnpackMap(in) - if ok { - // errors might be ok here, they mean we just don't have the right subtype - t, matchingError := NewParameter(m, compiler.NewContext("parameter", m, context)) - if matchingError == nil { - x.Oneof = &ParameterOrReference_Parameter{Parameter: t} - matched = true - } else { - errors = append(errors, matchingError) - } - } - } - // Reference reference = 2; - { - m, ok := compiler.UnpackMap(in) - if ok { - // errors might be ok here, they mean we just don't have the right subtype - t, matchingError := NewReference(m, compiler.NewContext("reference", m, context)) - if matchingError == nil { - x.Oneof = &ParameterOrReference_Reference{Reference: t} - matched = true - } else { - errors = append(errors, matchingError) - } - } - } - if matched { - // since the oneof matched one of its possibilities, discard any matching errors - errors = make([]error, 0) - } else { - message := fmt.Sprintf("contains an invalid ParameterOrReference") - err := compiler.NewError(context, message) - errors = []error{err} - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewParametersOrReferences creates an object of type ParametersOrReferences if possible, returning an error if not. -func NewParametersOrReferences(in *yaml.Node, context *compiler.Context) (*ParametersOrReferences, error) { - errors := make([]error, 0) - x := &ParametersOrReferences{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - // repeated NamedParameterOrReference additional_properties = 1; - // MAP: ParameterOrReference - x.AdditionalProperties = make([]*NamedParameterOrReference, 0) - for i := 0; i < len(m.Content); i += 2 { - k, ok := compiler.StringForScalarNode(m.Content[i]) - if ok { - v := m.Content[i+1] - pair := &NamedParameterOrReference{} - pair.Name = k - var err error - pair.Value, err = NewParameterOrReference(v, compiler.NewContext(k, v, context)) - if err != nil { - errors = append(errors, err) - } - x.AdditionalProperties = append(x.AdditionalProperties, pair) - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewPathItem creates an object of type PathItem if possible, returning an error if not. -func NewPathItem(in *yaml.Node, context *compiler.Context) (*PathItem, error) { - errors := make([]error, 0) - x := &PathItem{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - allowedKeys := []string{"$ref", "delete", "description", "get", "head", "options", "parameters", "patch", "post", "put", "servers", "summary", "trace"} - allowedPatterns := []*regexp.Regexp{pattern1} - invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) - if len(invalidKeys) > 0 { - message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - // string _ref = 1; - v1 := compiler.MapValueForKey(m, "$ref") - if v1 != nil { - x.XRef, ok = compiler.StringForScalarNode(v1) - if !ok { - message := fmt.Sprintf("has unexpected value for $ref: %s", compiler.Display(v1)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string summary = 2; - v2 := compiler.MapValueForKey(m, "summary") - if v2 != nil { - x.Summary, ok = compiler.StringForScalarNode(v2) - if !ok { - message := fmt.Sprintf("has unexpected value for summary: %s", compiler.Display(v2)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string description = 3; - v3 := compiler.MapValueForKey(m, "description") - if v3 != nil { - x.Description, ok = compiler.StringForScalarNode(v3) - if !ok { - message := fmt.Sprintf("has unexpected value for description: %s", compiler.Display(v3)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // Operation get = 4; - v4 := compiler.MapValueForKey(m, "get") - if v4 != nil { - var err error - x.Get, err = NewOperation(v4, compiler.NewContext("get", v4, context)) - if err != nil { - errors = append(errors, err) - } - } - // Operation put = 5; - v5 := compiler.MapValueForKey(m, "put") - if v5 != nil { - var err error - x.Put, err = NewOperation(v5, compiler.NewContext("put", v5, context)) - if err != nil { - errors = append(errors, err) - } - } - // Operation post = 6; - v6 := compiler.MapValueForKey(m, "post") - if v6 != nil { - var err error - x.Post, err = NewOperation(v6, compiler.NewContext("post", v6, context)) - if err != nil { - errors = append(errors, err) - } - } - // Operation delete = 7; - v7 := compiler.MapValueForKey(m, "delete") - if v7 != nil { - var err error - x.Delete, err = NewOperation(v7, compiler.NewContext("delete", v7, context)) - if err != nil { - errors = append(errors, err) - } - } - // Operation options = 8; - v8 := compiler.MapValueForKey(m, "options") - if v8 != nil { - var err error - x.Options, err = NewOperation(v8, compiler.NewContext("options", v8, context)) - if err != nil { - errors = append(errors, err) - } - } - // Operation head = 9; - v9 := compiler.MapValueForKey(m, "head") - if v9 != nil { - var err error - x.Head, err = NewOperation(v9, compiler.NewContext("head", v9, context)) - if err != nil { - errors = append(errors, err) - } - } - // Operation patch = 10; - v10 := compiler.MapValueForKey(m, "patch") - if v10 != nil { - var err error - x.Patch, err = NewOperation(v10, compiler.NewContext("patch", v10, context)) - if err != nil { - errors = append(errors, err) - } - } - // Operation trace = 11; - v11 := compiler.MapValueForKey(m, "trace") - if v11 != nil { - var err error - x.Trace, err = NewOperation(v11, compiler.NewContext("trace", v11, context)) - if err != nil { - errors = append(errors, err) - } - } - // repeated Server servers = 12; - v12 := compiler.MapValueForKey(m, "servers") - if v12 != nil { - // repeated Server - x.Servers = make([]*Server, 0) - a, ok := compiler.SequenceNodeForNode(v12) - if ok { - for _, item := range a.Content { - y, err := NewServer(item, compiler.NewContext("servers", item, context)) - if err != nil { - errors = append(errors, err) - } - x.Servers = append(x.Servers, y) - } - } - } - // repeated ParameterOrReference parameters = 13; - v13 := compiler.MapValueForKey(m, "parameters") - if v13 != nil { - // repeated ParameterOrReference - x.Parameters = make([]*ParameterOrReference, 0) - a, ok := compiler.SequenceNodeForNode(v13) - if ok { - for _, item := range a.Content { - y, err := NewParameterOrReference(item, compiler.NewContext("parameters", item, context)) - if err != nil { - errors = append(errors, err) - } - x.Parameters = append(x.Parameters, y) - } - } - } - // repeated NamedAny specification_extension = 14; - // MAP: Any ^x- - x.SpecificationExtension = make([]*NamedAny, 0) - for i := 0; i < len(m.Content); i += 2 { - k, ok := compiler.StringForScalarNode(m.Content[i]) - if ok { - v := m.Content[i+1] - if strings.HasPrefix(k, "x-") { - pair := &NamedAny{} - pair.Name = k - result := &Any{} - handled, resultFromExt, err := compiler.CallExtension(context, v, k) - if handled { - if err != nil { - errors = append(errors, err) - } else { - bytes := compiler.Marshal(v) - result.Yaml = string(bytes) - result.Value = resultFromExt - pair.Value = result - } - } else { - pair.Value, err = NewAny(v, compiler.NewContext(k, v, context)) - if err != nil { - errors = append(errors, err) - } - } - x.SpecificationExtension = append(x.SpecificationExtension, pair) - } - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewPaths creates an object of type Paths if possible, returning an error if not. -func NewPaths(in *yaml.Node, context *compiler.Context) (*Paths, error) { - errors := make([]error, 0) - x := &Paths{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - allowedKeys := []string{} - allowedPatterns := []*regexp.Regexp{pattern2, pattern1} - invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) - if len(invalidKeys) > 0 { - message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - // repeated NamedPathItem path = 1; - // MAP: PathItem ^/ - x.Path = make([]*NamedPathItem, 0) - for i := 0; i < len(m.Content); i += 2 { - k, ok := compiler.StringForScalarNode(m.Content[i]) - if ok { - v := m.Content[i+1] - if strings.HasPrefix(k, "/") { - pair := &NamedPathItem{} - pair.Name = k - var err error - pair.Value, err = NewPathItem(v, compiler.NewContext(k, v, context)) - if err != nil { - errors = append(errors, err) - } - x.Path = append(x.Path, pair) - } - } - } - // repeated NamedAny specification_extension = 2; - // MAP: Any ^x- - x.SpecificationExtension = make([]*NamedAny, 0) - for i := 0; i < len(m.Content); i += 2 { - k, ok := compiler.StringForScalarNode(m.Content[i]) - if ok { - v := m.Content[i+1] - if strings.HasPrefix(k, "x-") { - pair := &NamedAny{} - pair.Name = k - result := &Any{} - handled, resultFromExt, err := compiler.CallExtension(context, v, k) - if handled { - if err != nil { - errors = append(errors, err) - } else { - bytes := compiler.Marshal(v) - result.Yaml = string(bytes) - result.Value = resultFromExt - pair.Value = result - } - } else { - pair.Value, err = NewAny(v, compiler.NewContext(k, v, context)) - if err != nil { - errors = append(errors, err) - } - } - x.SpecificationExtension = append(x.SpecificationExtension, pair) - } - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewProperties creates an object of type Properties if possible, returning an error if not. -func NewProperties(in *yaml.Node, context *compiler.Context) (*Properties, error) { - errors := make([]error, 0) - x := &Properties{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - // repeated NamedSchemaOrReference additional_properties = 1; - // MAP: SchemaOrReference - x.AdditionalProperties = make([]*NamedSchemaOrReference, 0) - for i := 0; i < len(m.Content); i += 2 { - k, ok := compiler.StringForScalarNode(m.Content[i]) - if ok { - v := m.Content[i+1] - pair := &NamedSchemaOrReference{} - pair.Name = k - var err error - pair.Value, err = NewSchemaOrReference(v, compiler.NewContext(k, v, context)) - if err != nil { - errors = append(errors, err) - } - x.AdditionalProperties = append(x.AdditionalProperties, pair) - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewReference creates an object of type Reference if possible, returning an error if not. -func NewReference(in *yaml.Node, context *compiler.Context) (*Reference, error) { - errors := make([]error, 0) - x := &Reference{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - requiredKeys := []string{"$ref"} - missingKeys := compiler.MissingKeysInMap(m, requiredKeys) - if len(missingKeys) > 0 { - message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - // string _ref = 1; - v1 := compiler.MapValueForKey(m, "$ref") - if v1 != nil { - x.XRef, ok = compiler.StringForScalarNode(v1) - if !ok { - message := fmt.Sprintf("has unexpected value for $ref: %s", compiler.Display(v1)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string summary = 2; - v2 := compiler.MapValueForKey(m, "summary") - if v2 != nil { - x.Summary, ok = compiler.StringForScalarNode(v2) - if !ok { - message := fmt.Sprintf("has unexpected value for summary: %s", compiler.Display(v2)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string description = 3; - v3 := compiler.MapValueForKey(m, "description") - if v3 != nil { - x.Description, ok = compiler.StringForScalarNode(v3) - if !ok { - message := fmt.Sprintf("has unexpected value for description: %s", compiler.Display(v3)) - errors = append(errors, compiler.NewError(context, message)) - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewRequestBodiesOrReferences creates an object of type RequestBodiesOrReferences if possible, returning an error if not. -func NewRequestBodiesOrReferences(in *yaml.Node, context *compiler.Context) (*RequestBodiesOrReferences, error) { - errors := make([]error, 0) - x := &RequestBodiesOrReferences{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - // repeated NamedRequestBodyOrReference additional_properties = 1; - // MAP: RequestBodyOrReference - x.AdditionalProperties = make([]*NamedRequestBodyOrReference, 0) - for i := 0; i < len(m.Content); i += 2 { - k, ok := compiler.StringForScalarNode(m.Content[i]) - if ok { - v := m.Content[i+1] - pair := &NamedRequestBodyOrReference{} - pair.Name = k - var err error - pair.Value, err = NewRequestBodyOrReference(v, compiler.NewContext(k, v, context)) - if err != nil { - errors = append(errors, err) - } - x.AdditionalProperties = append(x.AdditionalProperties, pair) - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewRequestBody creates an object of type RequestBody if possible, returning an error if not. -func NewRequestBody(in *yaml.Node, context *compiler.Context) (*RequestBody, error) { - errors := make([]error, 0) - x := &RequestBody{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - requiredKeys := []string{"content"} - missingKeys := compiler.MissingKeysInMap(m, requiredKeys) - if len(missingKeys) > 0 { - message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - allowedKeys := []string{"content", "description", "required"} - allowedPatterns := []*regexp.Regexp{pattern1} - invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) - if len(invalidKeys) > 0 { - message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - // string description = 1; - v1 := compiler.MapValueForKey(m, "description") - if v1 != nil { - x.Description, ok = compiler.StringForScalarNode(v1) - if !ok { - message := fmt.Sprintf("has unexpected value for description: %s", compiler.Display(v1)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // MediaTypes content = 2; - v2 := compiler.MapValueForKey(m, "content") - if v2 != nil { - var err error - x.Content, err = NewMediaTypes(v2, compiler.NewContext("content", v2, context)) - if err != nil { - errors = append(errors, err) - } - } - // bool required = 3; - v3 := compiler.MapValueForKey(m, "required") - if v3 != nil { - x.Required, ok = compiler.BoolForScalarNode(v3) - if !ok { - message := fmt.Sprintf("has unexpected value for required: %s", compiler.Display(v3)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // repeated NamedAny specification_extension = 4; - // MAP: Any ^x- - x.SpecificationExtension = make([]*NamedAny, 0) - for i := 0; i < len(m.Content); i += 2 { - k, ok := compiler.StringForScalarNode(m.Content[i]) - if ok { - v := m.Content[i+1] - if strings.HasPrefix(k, "x-") { - pair := &NamedAny{} - pair.Name = k - result := &Any{} - handled, resultFromExt, err := compiler.CallExtension(context, v, k) - if handled { - if err != nil { - errors = append(errors, err) - } else { - bytes := compiler.Marshal(v) - result.Yaml = string(bytes) - result.Value = resultFromExt - pair.Value = result - } - } else { - pair.Value, err = NewAny(v, compiler.NewContext(k, v, context)) - if err != nil { - errors = append(errors, err) - } - } - x.SpecificationExtension = append(x.SpecificationExtension, pair) - } - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewRequestBodyOrReference creates an object of type RequestBodyOrReference if possible, returning an error if not. -func NewRequestBodyOrReference(in *yaml.Node, context *compiler.Context) (*RequestBodyOrReference, error) { - errors := make([]error, 0) - x := &RequestBodyOrReference{} - matched := false - // RequestBody request_body = 1; - { - m, ok := compiler.UnpackMap(in) - if ok { - // errors might be ok here, they mean we just don't have the right subtype - t, matchingError := NewRequestBody(m, compiler.NewContext("requestBody", m, context)) - if matchingError == nil { - x.Oneof = &RequestBodyOrReference_RequestBody{RequestBody: t} - matched = true - } else { - errors = append(errors, matchingError) - } - } - } - // Reference reference = 2; - { - m, ok := compiler.UnpackMap(in) - if ok { - // errors might be ok here, they mean we just don't have the right subtype - t, matchingError := NewReference(m, compiler.NewContext("reference", m, context)) - if matchingError == nil { - x.Oneof = &RequestBodyOrReference_Reference{Reference: t} - matched = true - } else { - errors = append(errors, matchingError) - } - } - } - if matched { - // since the oneof matched one of its possibilities, discard any matching errors - errors = make([]error, 0) - } else { - message := fmt.Sprintf("contains an invalid RequestBodyOrReference") - err := compiler.NewError(context, message) - errors = []error{err} - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewResponse creates an object of type Response if possible, returning an error if not. -func NewResponse(in *yaml.Node, context *compiler.Context) (*Response, error) { - errors := make([]error, 0) - x := &Response{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - requiredKeys := []string{"description"} - missingKeys := compiler.MissingKeysInMap(m, requiredKeys) - if len(missingKeys) > 0 { - message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - allowedKeys := []string{"content", "description", "headers", "links"} - allowedPatterns := []*regexp.Regexp{pattern1} - invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) - if len(invalidKeys) > 0 { - message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - // string description = 1; - v1 := compiler.MapValueForKey(m, "description") - if v1 != nil { - x.Description, ok = compiler.StringForScalarNode(v1) - if !ok { - message := fmt.Sprintf("has unexpected value for description: %s", compiler.Display(v1)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // HeadersOrReferences headers = 2; - v2 := compiler.MapValueForKey(m, "headers") - if v2 != nil { - var err error - x.Headers, err = NewHeadersOrReferences(v2, compiler.NewContext("headers", v2, context)) - if err != nil { - errors = append(errors, err) - } - } - // MediaTypes content = 3; - v3 := compiler.MapValueForKey(m, "content") - if v3 != nil { - var err error - x.Content, err = NewMediaTypes(v3, compiler.NewContext("content", v3, context)) - if err != nil { - errors = append(errors, err) - } - } - // LinksOrReferences links = 4; - v4 := compiler.MapValueForKey(m, "links") - if v4 != nil { - var err error - x.Links, err = NewLinksOrReferences(v4, compiler.NewContext("links", v4, context)) - if err != nil { - errors = append(errors, err) - } - } - // repeated NamedAny specification_extension = 5; - // MAP: Any ^x- - x.SpecificationExtension = make([]*NamedAny, 0) - for i := 0; i < len(m.Content); i += 2 { - k, ok := compiler.StringForScalarNode(m.Content[i]) - if ok { - v := m.Content[i+1] - if strings.HasPrefix(k, "x-") { - pair := &NamedAny{} - pair.Name = k - result := &Any{} - handled, resultFromExt, err := compiler.CallExtension(context, v, k) - if handled { - if err != nil { - errors = append(errors, err) - } else { - bytes := compiler.Marshal(v) - result.Yaml = string(bytes) - result.Value = resultFromExt - pair.Value = result - } - } else { - pair.Value, err = NewAny(v, compiler.NewContext(k, v, context)) - if err != nil { - errors = append(errors, err) - } - } - x.SpecificationExtension = append(x.SpecificationExtension, pair) - } - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewResponseOrReference creates an object of type ResponseOrReference if possible, returning an error if not. -func NewResponseOrReference(in *yaml.Node, context *compiler.Context) (*ResponseOrReference, error) { - errors := make([]error, 0) - x := &ResponseOrReference{} - matched := false - // Response response = 1; - { - m, ok := compiler.UnpackMap(in) - if ok { - // errors might be ok here, they mean we just don't have the right subtype - t, matchingError := NewResponse(m, compiler.NewContext("response", m, context)) - if matchingError == nil { - x.Oneof = &ResponseOrReference_Response{Response: t} - matched = true - } else { - errors = append(errors, matchingError) - } - } - } - // Reference reference = 2; - { - m, ok := compiler.UnpackMap(in) - if ok { - // errors might be ok here, they mean we just don't have the right subtype - t, matchingError := NewReference(m, compiler.NewContext("reference", m, context)) - if matchingError == nil { - x.Oneof = &ResponseOrReference_Reference{Reference: t} - matched = true - } else { - errors = append(errors, matchingError) - } - } - } - if matched { - // since the oneof matched one of its possibilities, discard any matching errors - errors = make([]error, 0) - } else { - message := fmt.Sprintf("contains an invalid ResponseOrReference") - err := compiler.NewError(context, message) - errors = []error{err} - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewResponses creates an object of type Responses if possible, returning an error if not. -func NewResponses(in *yaml.Node, context *compiler.Context) (*Responses, error) { - errors := make([]error, 0) - x := &Responses{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - allowedKeys := []string{"default"} - allowedPatterns := []*regexp.Regexp{pattern3, pattern1} - invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) - if len(invalidKeys) > 0 { - message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - // ResponseOrReference default = 1; - v1 := compiler.MapValueForKey(m, "default") - if v1 != nil { - var err error - x.Default, err = NewResponseOrReference(v1, compiler.NewContext("default", v1, context)) - if err != nil { - errors = append(errors, err) - } - } - // repeated NamedResponseOrReference response_or_reference = 2; - // MAP: ResponseOrReference ^([0-9X]{3})$ - x.ResponseOrReference = make([]*NamedResponseOrReference, 0) - for i := 0; i < len(m.Content); i += 2 { - k, ok := compiler.StringForScalarNode(m.Content[i]) - if ok { - v := m.Content[i+1] - if pattern3.MatchString(k) { - pair := &NamedResponseOrReference{} - pair.Name = k - var err error - pair.Value, err = NewResponseOrReference(v, compiler.NewContext(k, v, context)) - if err != nil { - errors = append(errors, err) - } - x.ResponseOrReference = append(x.ResponseOrReference, pair) - } - } - } - // repeated NamedAny specification_extension = 3; - // MAP: Any ^x- - x.SpecificationExtension = make([]*NamedAny, 0) - for i := 0; i < len(m.Content); i += 2 { - k, ok := compiler.StringForScalarNode(m.Content[i]) - if ok { - v := m.Content[i+1] - if strings.HasPrefix(k, "x-") { - pair := &NamedAny{} - pair.Name = k - result := &Any{} - handled, resultFromExt, err := compiler.CallExtension(context, v, k) - if handled { - if err != nil { - errors = append(errors, err) - } else { - bytes := compiler.Marshal(v) - result.Yaml = string(bytes) - result.Value = resultFromExt - pair.Value = result - } - } else { - pair.Value, err = NewAny(v, compiler.NewContext(k, v, context)) - if err != nil { - errors = append(errors, err) - } - } - x.SpecificationExtension = append(x.SpecificationExtension, pair) - } - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewResponsesOrReferences creates an object of type ResponsesOrReferences if possible, returning an error if not. -func NewResponsesOrReferences(in *yaml.Node, context *compiler.Context) (*ResponsesOrReferences, error) { - errors := make([]error, 0) - x := &ResponsesOrReferences{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - // repeated NamedResponseOrReference additional_properties = 1; - // MAP: ResponseOrReference - x.AdditionalProperties = make([]*NamedResponseOrReference, 0) - for i := 0; i < len(m.Content); i += 2 { - k, ok := compiler.StringForScalarNode(m.Content[i]) - if ok { - v := m.Content[i+1] - pair := &NamedResponseOrReference{} - pair.Name = k - var err error - pair.Value, err = NewResponseOrReference(v, compiler.NewContext(k, v, context)) - if err != nil { - errors = append(errors, err) - } - x.AdditionalProperties = append(x.AdditionalProperties, pair) - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewSchema creates an object of type Schema if possible, returning an error if not. -func NewSchema(in *yaml.Node, context *compiler.Context) (*Schema, error) { - errors := make([]error, 0) - x := &Schema{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - allowedKeys := []string{"additionalProperties", "allOf", "anyOf", "default", "deprecated", "description", "discriminator", "enum", "example", "exclusiveMaximum", "exclusiveMinimum", "externalDocs", "format", "items", "maxItems", "maxLength", "maxProperties", "maximum", "minItems", "minLength", "minProperties", "minimum", "multipleOf", "not", "nullable", "oneOf", "pattern", "properties", "readOnly", "required", "title", "type", "uniqueItems", "writeOnly", "xml"} - allowedPatterns := []*regexp.Regexp{pattern1} - invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) - if len(invalidKeys) > 0 { - message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - // bool nullable = 1; - v1 := compiler.MapValueForKey(m, "nullable") - if v1 != nil { - x.Nullable, ok = compiler.BoolForScalarNode(v1) - if !ok { - message := fmt.Sprintf("has unexpected value for nullable: %s", compiler.Display(v1)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // Discriminator discriminator = 2; - v2 := compiler.MapValueForKey(m, "discriminator") - if v2 != nil { - var err error - x.Discriminator, err = NewDiscriminator(v2, compiler.NewContext("discriminator", v2, context)) - if err != nil { - errors = append(errors, err) - } - } - // bool read_only = 3; - v3 := compiler.MapValueForKey(m, "readOnly") - if v3 != nil { - x.ReadOnly, ok = compiler.BoolForScalarNode(v3) - if !ok { - message := fmt.Sprintf("has unexpected value for readOnly: %s", compiler.Display(v3)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // bool write_only = 4; - v4 := compiler.MapValueForKey(m, "writeOnly") - if v4 != nil { - x.WriteOnly, ok = compiler.BoolForScalarNode(v4) - if !ok { - message := fmt.Sprintf("has unexpected value for writeOnly: %s", compiler.Display(v4)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // Xml xml = 5; - v5 := compiler.MapValueForKey(m, "xml") - if v5 != nil { - var err error - x.Xml, err = NewXml(v5, compiler.NewContext("xml", v5, context)) - if err != nil { - errors = append(errors, err) - } - } - // ExternalDocs external_docs = 6; - v6 := compiler.MapValueForKey(m, "externalDocs") - if v6 != nil { - var err error - x.ExternalDocs, err = NewExternalDocs(v6, compiler.NewContext("externalDocs", v6, context)) - if err != nil { - errors = append(errors, err) - } - } - // Any example = 7; - v7 := compiler.MapValueForKey(m, "example") - if v7 != nil { - var err error - x.Example, err = NewAny(v7, compiler.NewContext("example", v7, context)) - if err != nil { - errors = append(errors, err) - } - } - // bool deprecated = 8; - v8 := compiler.MapValueForKey(m, "deprecated") - if v8 != nil { - x.Deprecated, ok = compiler.BoolForScalarNode(v8) - if !ok { - message := fmt.Sprintf("has unexpected value for deprecated: %s", compiler.Display(v8)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string title = 9; - v9 := compiler.MapValueForKey(m, "title") - if v9 != nil { - x.Title, ok = compiler.StringForScalarNode(v9) - if !ok { - message := fmt.Sprintf("has unexpected value for title: %s", compiler.Display(v9)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // float multiple_of = 10; - v10 := compiler.MapValueForKey(m, "multipleOf") - if v10 != nil { - v, ok := compiler.FloatForScalarNode(v10) - if ok { - x.MultipleOf = v - } else { - message := fmt.Sprintf("has unexpected value for multipleOf: %s", compiler.Display(v10)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // float maximum = 11; - v11 := compiler.MapValueForKey(m, "maximum") - if v11 != nil { - v, ok := compiler.FloatForScalarNode(v11) - if ok { - x.Maximum = v - } else { - message := fmt.Sprintf("has unexpected value for maximum: %s", compiler.Display(v11)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // bool exclusive_maximum = 12; - v12 := compiler.MapValueForKey(m, "exclusiveMaximum") - if v12 != nil { - x.ExclusiveMaximum, ok = compiler.BoolForScalarNode(v12) - if !ok { - message := fmt.Sprintf("has unexpected value for exclusiveMaximum: %s", compiler.Display(v12)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // float minimum = 13; - v13 := compiler.MapValueForKey(m, "minimum") - if v13 != nil { - v, ok := compiler.FloatForScalarNode(v13) - if ok { - x.Minimum = v - } else { - message := fmt.Sprintf("has unexpected value for minimum: %s", compiler.Display(v13)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // bool exclusive_minimum = 14; - v14 := compiler.MapValueForKey(m, "exclusiveMinimum") - if v14 != nil { - x.ExclusiveMinimum, ok = compiler.BoolForScalarNode(v14) - if !ok { - message := fmt.Sprintf("has unexpected value for exclusiveMinimum: %s", compiler.Display(v14)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // int64 max_length = 15; - v15 := compiler.MapValueForKey(m, "maxLength") - if v15 != nil { - t, ok := compiler.IntForScalarNode(v15) - if ok { - x.MaxLength = int64(t) - } else { - message := fmt.Sprintf("has unexpected value for maxLength: %s", compiler.Display(v15)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // int64 min_length = 16; - v16 := compiler.MapValueForKey(m, "minLength") - if v16 != nil { - t, ok := compiler.IntForScalarNode(v16) - if ok { - x.MinLength = int64(t) - } else { - message := fmt.Sprintf("has unexpected value for minLength: %s", compiler.Display(v16)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string pattern = 17; - v17 := compiler.MapValueForKey(m, "pattern") - if v17 != nil { - x.Pattern, ok = compiler.StringForScalarNode(v17) - if !ok { - message := fmt.Sprintf("has unexpected value for pattern: %s", compiler.Display(v17)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // int64 max_items = 18; - v18 := compiler.MapValueForKey(m, "maxItems") - if v18 != nil { - t, ok := compiler.IntForScalarNode(v18) - if ok { - x.MaxItems = int64(t) - } else { - message := fmt.Sprintf("has unexpected value for maxItems: %s", compiler.Display(v18)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // int64 min_items = 19; - v19 := compiler.MapValueForKey(m, "minItems") - if v19 != nil { - t, ok := compiler.IntForScalarNode(v19) - if ok { - x.MinItems = int64(t) - } else { - message := fmt.Sprintf("has unexpected value for minItems: %s", compiler.Display(v19)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // bool unique_items = 20; - v20 := compiler.MapValueForKey(m, "uniqueItems") - if v20 != nil { - x.UniqueItems, ok = compiler.BoolForScalarNode(v20) - if !ok { - message := fmt.Sprintf("has unexpected value for uniqueItems: %s", compiler.Display(v20)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // int64 max_properties = 21; - v21 := compiler.MapValueForKey(m, "maxProperties") - if v21 != nil { - t, ok := compiler.IntForScalarNode(v21) - if ok { - x.MaxProperties = int64(t) - } else { - message := fmt.Sprintf("has unexpected value for maxProperties: %s", compiler.Display(v21)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // int64 min_properties = 22; - v22 := compiler.MapValueForKey(m, "minProperties") - if v22 != nil { - t, ok := compiler.IntForScalarNode(v22) - if ok { - x.MinProperties = int64(t) - } else { - message := fmt.Sprintf("has unexpected value for minProperties: %s", compiler.Display(v22)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // repeated string required = 23; - v23 := compiler.MapValueForKey(m, "required") - if v23 != nil { - v, ok := compiler.SequenceNodeForNode(v23) - if ok { - x.Required = compiler.StringArrayForSequenceNode(v) - } else { - message := fmt.Sprintf("has unexpected value for required: %s", compiler.Display(v23)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // repeated Any enum = 24; - v24 := compiler.MapValueForKey(m, "enum") - if v24 != nil { - // repeated Any - x.Enum = make([]*Any, 0) - a, ok := compiler.SequenceNodeForNode(v24) - if ok { - for _, item := range a.Content { - y, err := NewAny(item, compiler.NewContext("enum", item, context)) - if err != nil { - errors = append(errors, err) - } - x.Enum = append(x.Enum, y) - } - } - } - // string type = 25; - v25 := compiler.MapValueForKey(m, "type") - if v25 != nil { - x.Type, ok = compiler.StringForScalarNode(v25) - if !ok { - message := fmt.Sprintf("has unexpected value for type: %s", compiler.Display(v25)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // repeated SchemaOrReference all_of = 26; - v26 := compiler.MapValueForKey(m, "allOf") - if v26 != nil { - // repeated SchemaOrReference - x.AllOf = make([]*SchemaOrReference, 0) - a, ok := compiler.SequenceNodeForNode(v26) - if ok { - for _, item := range a.Content { - y, err := NewSchemaOrReference(item, compiler.NewContext("allOf", item, context)) - if err != nil { - errors = append(errors, err) - } - x.AllOf = append(x.AllOf, y) - } - } - } - // repeated SchemaOrReference one_of = 27; - v27 := compiler.MapValueForKey(m, "oneOf") - if v27 != nil { - // repeated SchemaOrReference - x.OneOf = make([]*SchemaOrReference, 0) - a, ok := compiler.SequenceNodeForNode(v27) - if ok { - for _, item := range a.Content { - y, err := NewSchemaOrReference(item, compiler.NewContext("oneOf", item, context)) - if err != nil { - errors = append(errors, err) - } - x.OneOf = append(x.OneOf, y) - } - } - } - // repeated SchemaOrReference any_of = 28; - v28 := compiler.MapValueForKey(m, "anyOf") - if v28 != nil { - // repeated SchemaOrReference - x.AnyOf = make([]*SchemaOrReference, 0) - a, ok := compiler.SequenceNodeForNode(v28) - if ok { - for _, item := range a.Content { - y, err := NewSchemaOrReference(item, compiler.NewContext("anyOf", item, context)) - if err != nil { - errors = append(errors, err) - } - x.AnyOf = append(x.AnyOf, y) - } - } - } - // Schema not = 29; - v29 := compiler.MapValueForKey(m, "not") - if v29 != nil { - var err error - x.Not, err = NewSchema(v29, compiler.NewContext("not", v29, context)) - if err != nil { - errors = append(errors, err) - } - } - // ItemsItem items = 30; - v30 := compiler.MapValueForKey(m, "items") - if v30 != nil { - var err error - x.Items, err = NewItemsItem(v30, compiler.NewContext("items", v30, context)) - if err != nil { - errors = append(errors, err) - } - } - // Properties properties = 31; - v31 := compiler.MapValueForKey(m, "properties") - if v31 != nil { - var err error - x.Properties, err = NewProperties(v31, compiler.NewContext("properties", v31, context)) - if err != nil { - errors = append(errors, err) - } - } - // AdditionalPropertiesItem additional_properties = 32; - v32 := compiler.MapValueForKey(m, "additionalProperties") - if v32 != nil { - var err error - x.AdditionalProperties, err = NewAdditionalPropertiesItem(v32, compiler.NewContext("additionalProperties", v32, context)) - if err != nil { - errors = append(errors, err) - } - } - // DefaultType default = 33; - v33 := compiler.MapValueForKey(m, "default") - if v33 != nil { - var err error - x.Default, err = NewDefaultType(v33, compiler.NewContext("default", v33, context)) - if err != nil { - errors = append(errors, err) - } - } - // string description = 34; - v34 := compiler.MapValueForKey(m, "description") - if v34 != nil { - x.Description, ok = compiler.StringForScalarNode(v34) - if !ok { - message := fmt.Sprintf("has unexpected value for description: %s", compiler.Display(v34)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string format = 35; - v35 := compiler.MapValueForKey(m, "format") - if v35 != nil { - x.Format, ok = compiler.StringForScalarNode(v35) - if !ok { - message := fmt.Sprintf("has unexpected value for format: %s", compiler.Display(v35)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // repeated NamedAny specification_extension = 36; - // MAP: Any ^x- - x.SpecificationExtension = make([]*NamedAny, 0) - for i := 0; i < len(m.Content); i += 2 { - k, ok := compiler.StringForScalarNode(m.Content[i]) - if ok { - v := m.Content[i+1] - if strings.HasPrefix(k, "x-") { - pair := &NamedAny{} - pair.Name = k - result := &Any{} - handled, resultFromExt, err := compiler.CallExtension(context, v, k) - if handled { - if err != nil { - errors = append(errors, err) - } else { - bytes := compiler.Marshal(v) - result.Yaml = string(bytes) - result.Value = resultFromExt - pair.Value = result - } - } else { - pair.Value, err = NewAny(v, compiler.NewContext(k, v, context)) - if err != nil { - errors = append(errors, err) - } - } - x.SpecificationExtension = append(x.SpecificationExtension, pair) - } - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewSchemaOrReference creates an object of type SchemaOrReference if possible, returning an error if not. -func NewSchemaOrReference(in *yaml.Node, context *compiler.Context) (*SchemaOrReference, error) { - errors := make([]error, 0) - x := &SchemaOrReference{} - matched := false - // Schema schema = 1; - { - m, ok := compiler.UnpackMap(in) - if ok { - // errors might be ok here, they mean we just don't have the right subtype - t, matchingError := NewSchema(m, compiler.NewContext("schema", m, context)) - if matchingError == nil { - x.Oneof = &SchemaOrReference_Schema{Schema: t} - matched = true - } else { - errors = append(errors, matchingError) - } - } - } - // Reference reference = 2; - { - m, ok := compiler.UnpackMap(in) - if ok { - // errors might be ok here, they mean we just don't have the right subtype - t, matchingError := NewReference(m, compiler.NewContext("reference", m, context)) - if matchingError == nil { - x.Oneof = &SchemaOrReference_Reference{Reference: t} - matched = true - } else { - errors = append(errors, matchingError) - } - } - } - if matched { - // since the oneof matched one of its possibilities, discard any matching errors - errors = make([]error, 0) - } else { - message := fmt.Sprintf("contains an invalid SchemaOrReference") - err := compiler.NewError(context, message) - errors = []error{err} - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewSchemasOrReferences creates an object of type SchemasOrReferences if possible, returning an error if not. -func NewSchemasOrReferences(in *yaml.Node, context *compiler.Context) (*SchemasOrReferences, error) { - errors := make([]error, 0) - x := &SchemasOrReferences{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - // repeated NamedSchemaOrReference additional_properties = 1; - // MAP: SchemaOrReference - x.AdditionalProperties = make([]*NamedSchemaOrReference, 0) - for i := 0; i < len(m.Content); i += 2 { - k, ok := compiler.StringForScalarNode(m.Content[i]) - if ok { - v := m.Content[i+1] - pair := &NamedSchemaOrReference{} - pair.Name = k - var err error - pair.Value, err = NewSchemaOrReference(v, compiler.NewContext(k, v, context)) - if err != nil { - errors = append(errors, err) - } - x.AdditionalProperties = append(x.AdditionalProperties, pair) - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewSecurityRequirement creates an object of type SecurityRequirement if possible, returning an error if not. -func NewSecurityRequirement(in *yaml.Node, context *compiler.Context) (*SecurityRequirement, error) { - errors := make([]error, 0) - x := &SecurityRequirement{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - // repeated NamedStringArray additional_properties = 1; - // MAP: StringArray - x.AdditionalProperties = make([]*NamedStringArray, 0) - for i := 0; i < len(m.Content); i += 2 { - k, ok := compiler.StringForScalarNode(m.Content[i]) - if ok { - v := m.Content[i+1] - pair := &NamedStringArray{} - pair.Name = k - var err error - pair.Value, err = NewStringArray(v, compiler.NewContext(k, v, context)) - if err != nil { - errors = append(errors, err) - } - x.AdditionalProperties = append(x.AdditionalProperties, pair) - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewSecurityScheme creates an object of type SecurityScheme if possible, returning an error if not. -func NewSecurityScheme(in *yaml.Node, context *compiler.Context) (*SecurityScheme, error) { - errors := make([]error, 0) - x := &SecurityScheme{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - requiredKeys := []string{"type"} - missingKeys := compiler.MissingKeysInMap(m, requiredKeys) - if len(missingKeys) > 0 { - message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - allowedKeys := []string{"bearerFormat", "description", "flows", "in", "name", "openIdConnectUrl", "scheme", "type"} - allowedPatterns := []*regexp.Regexp{pattern1} - invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) - if len(invalidKeys) > 0 { - message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - // string type = 1; - v1 := compiler.MapValueForKey(m, "type") - if v1 != nil { - x.Type, ok = compiler.StringForScalarNode(v1) - if !ok { - message := fmt.Sprintf("has unexpected value for type: %s", compiler.Display(v1)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string description = 2; - v2 := compiler.MapValueForKey(m, "description") - if v2 != nil { - x.Description, ok = compiler.StringForScalarNode(v2) - if !ok { - message := fmt.Sprintf("has unexpected value for description: %s", compiler.Display(v2)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string name = 3; - v3 := compiler.MapValueForKey(m, "name") - if v3 != nil { - x.Name, ok = compiler.StringForScalarNode(v3) - if !ok { - message := fmt.Sprintf("has unexpected value for name: %s", compiler.Display(v3)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string in = 4; - v4 := compiler.MapValueForKey(m, "in") - if v4 != nil { - x.In, ok = compiler.StringForScalarNode(v4) - if !ok { - message := fmt.Sprintf("has unexpected value for in: %s", compiler.Display(v4)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string scheme = 5; - v5 := compiler.MapValueForKey(m, "scheme") - if v5 != nil { - x.Scheme, ok = compiler.StringForScalarNode(v5) - if !ok { - message := fmt.Sprintf("has unexpected value for scheme: %s", compiler.Display(v5)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string bearer_format = 6; - v6 := compiler.MapValueForKey(m, "bearerFormat") - if v6 != nil { - x.BearerFormat, ok = compiler.StringForScalarNode(v6) - if !ok { - message := fmt.Sprintf("has unexpected value for bearerFormat: %s", compiler.Display(v6)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // OauthFlows flows = 7; - v7 := compiler.MapValueForKey(m, "flows") - if v7 != nil { - var err error - x.Flows, err = NewOauthFlows(v7, compiler.NewContext("flows", v7, context)) - if err != nil { - errors = append(errors, err) - } - } - // string open_id_connect_url = 8; - v8 := compiler.MapValueForKey(m, "openIdConnectUrl") - if v8 != nil { - x.OpenIdConnectUrl, ok = compiler.StringForScalarNode(v8) - if !ok { - message := fmt.Sprintf("has unexpected value for openIdConnectUrl: %s", compiler.Display(v8)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // repeated NamedAny specification_extension = 9; - // MAP: Any ^x- - x.SpecificationExtension = make([]*NamedAny, 0) - for i := 0; i < len(m.Content); i += 2 { - k, ok := compiler.StringForScalarNode(m.Content[i]) - if ok { - v := m.Content[i+1] - if strings.HasPrefix(k, "x-") { - pair := &NamedAny{} - pair.Name = k - result := &Any{} - handled, resultFromExt, err := compiler.CallExtension(context, v, k) - if handled { - if err != nil { - errors = append(errors, err) - } else { - bytes := compiler.Marshal(v) - result.Yaml = string(bytes) - result.Value = resultFromExt - pair.Value = result - } - } else { - pair.Value, err = NewAny(v, compiler.NewContext(k, v, context)) - if err != nil { - errors = append(errors, err) - } - } - x.SpecificationExtension = append(x.SpecificationExtension, pair) - } - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewSecuritySchemeOrReference creates an object of type SecuritySchemeOrReference if possible, returning an error if not. -func NewSecuritySchemeOrReference(in *yaml.Node, context *compiler.Context) (*SecuritySchemeOrReference, error) { - errors := make([]error, 0) - x := &SecuritySchemeOrReference{} - matched := false - // SecurityScheme security_scheme = 1; - { - m, ok := compiler.UnpackMap(in) - if ok { - // errors might be ok here, they mean we just don't have the right subtype - t, matchingError := NewSecurityScheme(m, compiler.NewContext("securityScheme", m, context)) - if matchingError == nil { - x.Oneof = &SecuritySchemeOrReference_SecurityScheme{SecurityScheme: t} - matched = true - } else { - errors = append(errors, matchingError) - } - } - } - // Reference reference = 2; - { - m, ok := compiler.UnpackMap(in) - if ok { - // errors might be ok here, they mean we just don't have the right subtype - t, matchingError := NewReference(m, compiler.NewContext("reference", m, context)) - if matchingError == nil { - x.Oneof = &SecuritySchemeOrReference_Reference{Reference: t} - matched = true - } else { - errors = append(errors, matchingError) - } - } - } - if matched { - // since the oneof matched one of its possibilities, discard any matching errors - errors = make([]error, 0) - } else { - message := fmt.Sprintf("contains an invalid SecuritySchemeOrReference") - err := compiler.NewError(context, message) - errors = []error{err} - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewSecuritySchemesOrReferences creates an object of type SecuritySchemesOrReferences if possible, returning an error if not. -func NewSecuritySchemesOrReferences(in *yaml.Node, context *compiler.Context) (*SecuritySchemesOrReferences, error) { - errors := make([]error, 0) - x := &SecuritySchemesOrReferences{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - // repeated NamedSecuritySchemeOrReference additional_properties = 1; - // MAP: SecuritySchemeOrReference - x.AdditionalProperties = make([]*NamedSecuritySchemeOrReference, 0) - for i := 0; i < len(m.Content); i += 2 { - k, ok := compiler.StringForScalarNode(m.Content[i]) - if ok { - v := m.Content[i+1] - pair := &NamedSecuritySchemeOrReference{} - pair.Name = k - var err error - pair.Value, err = NewSecuritySchemeOrReference(v, compiler.NewContext(k, v, context)) - if err != nil { - errors = append(errors, err) - } - x.AdditionalProperties = append(x.AdditionalProperties, pair) - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewServer creates an object of type Server if possible, returning an error if not. -func NewServer(in *yaml.Node, context *compiler.Context) (*Server, error) { - errors := make([]error, 0) - x := &Server{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - requiredKeys := []string{"url"} - missingKeys := compiler.MissingKeysInMap(m, requiredKeys) - if len(missingKeys) > 0 { - message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - allowedKeys := []string{"description", "url", "variables"} - allowedPatterns := []*regexp.Regexp{pattern1} - invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) - if len(invalidKeys) > 0 { - message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - // string url = 1; - v1 := compiler.MapValueForKey(m, "url") - if v1 != nil { - x.Url, ok = compiler.StringForScalarNode(v1) - if !ok { - message := fmt.Sprintf("has unexpected value for url: %s", compiler.Display(v1)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string description = 2; - v2 := compiler.MapValueForKey(m, "description") - if v2 != nil { - x.Description, ok = compiler.StringForScalarNode(v2) - if !ok { - message := fmt.Sprintf("has unexpected value for description: %s", compiler.Display(v2)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // ServerVariables variables = 3; - v3 := compiler.MapValueForKey(m, "variables") - if v3 != nil { - var err error - x.Variables, err = NewServerVariables(v3, compiler.NewContext("variables", v3, context)) - if err != nil { - errors = append(errors, err) - } - } - // repeated NamedAny specification_extension = 4; - // MAP: Any ^x- - x.SpecificationExtension = make([]*NamedAny, 0) - for i := 0; i < len(m.Content); i += 2 { - k, ok := compiler.StringForScalarNode(m.Content[i]) - if ok { - v := m.Content[i+1] - if strings.HasPrefix(k, "x-") { - pair := &NamedAny{} - pair.Name = k - result := &Any{} - handled, resultFromExt, err := compiler.CallExtension(context, v, k) - if handled { - if err != nil { - errors = append(errors, err) - } else { - bytes := compiler.Marshal(v) - result.Yaml = string(bytes) - result.Value = resultFromExt - pair.Value = result - } - } else { - pair.Value, err = NewAny(v, compiler.NewContext(k, v, context)) - if err != nil { - errors = append(errors, err) - } - } - x.SpecificationExtension = append(x.SpecificationExtension, pair) - } - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewServerVariable creates an object of type ServerVariable if possible, returning an error if not. -func NewServerVariable(in *yaml.Node, context *compiler.Context) (*ServerVariable, error) { - errors := make([]error, 0) - x := &ServerVariable{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - requiredKeys := []string{"default"} - missingKeys := compiler.MissingKeysInMap(m, requiredKeys) - if len(missingKeys) > 0 { - message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - allowedKeys := []string{"default", "description", "enum"} - allowedPatterns := []*regexp.Regexp{pattern1} - invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) - if len(invalidKeys) > 0 { - message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - // repeated string enum = 1; - v1 := compiler.MapValueForKey(m, "enum") - if v1 != nil { - v, ok := compiler.SequenceNodeForNode(v1) - if ok { - x.Enum = compiler.StringArrayForSequenceNode(v) - } else { - message := fmt.Sprintf("has unexpected value for enum: %s", compiler.Display(v1)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string default = 2; - v2 := compiler.MapValueForKey(m, "default") - if v2 != nil { - x.Default, ok = compiler.StringForScalarNode(v2) - if !ok { - message := fmt.Sprintf("has unexpected value for default: %s", compiler.Display(v2)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string description = 3; - v3 := compiler.MapValueForKey(m, "description") - if v3 != nil { - x.Description, ok = compiler.StringForScalarNode(v3) - if !ok { - message := fmt.Sprintf("has unexpected value for description: %s", compiler.Display(v3)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // repeated NamedAny specification_extension = 4; - // MAP: Any ^x- - x.SpecificationExtension = make([]*NamedAny, 0) - for i := 0; i < len(m.Content); i += 2 { - k, ok := compiler.StringForScalarNode(m.Content[i]) - if ok { - v := m.Content[i+1] - if strings.HasPrefix(k, "x-") { - pair := &NamedAny{} - pair.Name = k - result := &Any{} - handled, resultFromExt, err := compiler.CallExtension(context, v, k) - if handled { - if err != nil { - errors = append(errors, err) - } else { - bytes := compiler.Marshal(v) - result.Yaml = string(bytes) - result.Value = resultFromExt - pair.Value = result - } - } else { - pair.Value, err = NewAny(v, compiler.NewContext(k, v, context)) - if err != nil { - errors = append(errors, err) - } - } - x.SpecificationExtension = append(x.SpecificationExtension, pair) - } - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewServerVariables creates an object of type ServerVariables if possible, returning an error if not. -func NewServerVariables(in *yaml.Node, context *compiler.Context) (*ServerVariables, error) { - errors := make([]error, 0) - x := &ServerVariables{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - // repeated NamedServerVariable additional_properties = 1; - // MAP: ServerVariable - x.AdditionalProperties = make([]*NamedServerVariable, 0) - for i := 0; i < len(m.Content); i += 2 { - k, ok := compiler.StringForScalarNode(m.Content[i]) - if ok { - v := m.Content[i+1] - pair := &NamedServerVariable{} - pair.Name = k - var err error - pair.Value, err = NewServerVariable(v, compiler.NewContext(k, v, context)) - if err != nil { - errors = append(errors, err) - } - x.AdditionalProperties = append(x.AdditionalProperties, pair) - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewSpecificationExtension creates an object of type SpecificationExtension if possible, returning an error if not. -func NewSpecificationExtension(in *yaml.Node, context *compiler.Context) (*SpecificationExtension, error) { - errors := make([]error, 0) - x := &SpecificationExtension{} - matched := false - switch in.Tag { - case "!!bool": - var v bool - v, matched = compiler.BoolForScalarNode(in) - x.Oneof = &SpecificationExtension_Boolean{Boolean: v} - case "!!str": - var v string - v, matched = compiler.StringForScalarNode(in) - x.Oneof = &SpecificationExtension_String_{String_: v} - case "!!float": - var v float64 - v, matched = compiler.FloatForScalarNode(in) - x.Oneof = &SpecificationExtension_Number{Number: v} - case "!!int": - var v int64 - v, matched = compiler.IntForScalarNode(in) - x.Oneof = &SpecificationExtension_Number{Number: float64(v)} - } - if matched { - // since the oneof matched one of its possibilities, discard any matching errors - errors = make([]error, 0) - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewStringArray creates an object of type StringArray if possible, returning an error if not. -func NewStringArray(in *yaml.Node, context *compiler.Context) (*StringArray, error) { - errors := make([]error, 0) - x := &StringArray{} - x.Value = make([]string, 0) - for _, node := range in.Content { - s, _ := compiler.StringForScalarNode(node) - x.Value = append(x.Value, s) - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewStrings creates an object of type Strings if possible, returning an error if not. -func NewStrings(in *yaml.Node, context *compiler.Context) (*Strings, error) { - errors := make([]error, 0) - x := &Strings{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - // repeated NamedString additional_properties = 1; - // MAP: string - x.AdditionalProperties = make([]*NamedString, 0) - for i := 0; i < len(m.Content); i += 2 { - k, ok := compiler.StringForScalarNode(m.Content[i]) - if ok { - v := m.Content[i+1] - pair := &NamedString{} - pair.Name = k - pair.Value, _ = compiler.StringForScalarNode(v) - x.AdditionalProperties = append(x.AdditionalProperties, pair) - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewTag creates an object of type Tag if possible, returning an error if not. -func NewTag(in *yaml.Node, context *compiler.Context) (*Tag, error) { - errors := make([]error, 0) - x := &Tag{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - requiredKeys := []string{"name"} - missingKeys := compiler.MissingKeysInMap(m, requiredKeys) - if len(missingKeys) > 0 { - message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - allowedKeys := []string{"description", "externalDocs", "name"} - allowedPatterns := []*regexp.Regexp{pattern1} - invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) - if len(invalidKeys) > 0 { - message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - // string name = 1; - v1 := compiler.MapValueForKey(m, "name") - if v1 != nil { - x.Name, ok = compiler.StringForScalarNode(v1) - if !ok { - message := fmt.Sprintf("has unexpected value for name: %s", compiler.Display(v1)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string description = 2; - v2 := compiler.MapValueForKey(m, "description") - if v2 != nil { - x.Description, ok = compiler.StringForScalarNode(v2) - if !ok { - message := fmt.Sprintf("has unexpected value for description: %s", compiler.Display(v2)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // ExternalDocs external_docs = 3; - v3 := compiler.MapValueForKey(m, "externalDocs") - if v3 != nil { - var err error - x.ExternalDocs, err = NewExternalDocs(v3, compiler.NewContext("externalDocs", v3, context)) - if err != nil { - errors = append(errors, err) - } - } - // repeated NamedAny specification_extension = 4; - // MAP: Any ^x- - x.SpecificationExtension = make([]*NamedAny, 0) - for i := 0; i < len(m.Content); i += 2 { - k, ok := compiler.StringForScalarNode(m.Content[i]) - if ok { - v := m.Content[i+1] - if strings.HasPrefix(k, "x-") { - pair := &NamedAny{} - pair.Name = k - result := &Any{} - handled, resultFromExt, err := compiler.CallExtension(context, v, k) - if handled { - if err != nil { - errors = append(errors, err) - } else { - bytes := compiler.Marshal(v) - result.Yaml = string(bytes) - result.Value = resultFromExt - pair.Value = result - } - } else { - pair.Value, err = NewAny(v, compiler.NewContext(k, v, context)) - if err != nil { - errors = append(errors, err) - } - } - x.SpecificationExtension = append(x.SpecificationExtension, pair) - } - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewXml creates an object of type Xml if possible, returning an error if not. -func NewXml(in *yaml.Node, context *compiler.Context) (*Xml, error) { - errors := make([]error, 0) - x := &Xml{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - allowedKeys := []string{"attribute", "name", "namespace", "prefix", "wrapped"} - allowedPatterns := []*regexp.Regexp{pattern1} - invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) - if len(invalidKeys) > 0 { - message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - // string name = 1; - v1 := compiler.MapValueForKey(m, "name") - if v1 != nil { - x.Name, ok = compiler.StringForScalarNode(v1) - if !ok { - message := fmt.Sprintf("has unexpected value for name: %s", compiler.Display(v1)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string namespace = 2; - v2 := compiler.MapValueForKey(m, "namespace") - if v2 != nil { - x.Namespace, ok = compiler.StringForScalarNode(v2) - if !ok { - message := fmt.Sprintf("has unexpected value for namespace: %s", compiler.Display(v2)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string prefix = 3; - v3 := compiler.MapValueForKey(m, "prefix") - if v3 != nil { - x.Prefix, ok = compiler.StringForScalarNode(v3) - if !ok { - message := fmt.Sprintf("has unexpected value for prefix: %s", compiler.Display(v3)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // bool attribute = 4; - v4 := compiler.MapValueForKey(m, "attribute") - if v4 != nil { - x.Attribute, ok = compiler.BoolForScalarNode(v4) - if !ok { - message := fmt.Sprintf("has unexpected value for attribute: %s", compiler.Display(v4)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // bool wrapped = 5; - v5 := compiler.MapValueForKey(m, "wrapped") - if v5 != nil { - x.Wrapped, ok = compiler.BoolForScalarNode(v5) - if !ok { - message := fmt.Sprintf("has unexpected value for wrapped: %s", compiler.Display(v5)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // repeated NamedAny specification_extension = 6; - // MAP: Any ^x- - x.SpecificationExtension = make([]*NamedAny, 0) - for i := 0; i < len(m.Content); i += 2 { - k, ok := compiler.StringForScalarNode(m.Content[i]) - if ok { - v := m.Content[i+1] - if strings.HasPrefix(k, "x-") { - pair := &NamedAny{} - pair.Name = k - result := &Any{} - handled, resultFromExt, err := compiler.CallExtension(context, v, k) - if handled { - if err != nil { - errors = append(errors, err) - } else { - bytes := compiler.Marshal(v) - result.Yaml = string(bytes) - result.Value = resultFromExt - pair.Value = result - } - } else { - pair.Value, err = NewAny(v, compiler.NewContext(k, v, context)) - if err != nil { - errors = append(errors, err) - } - } - x.SpecificationExtension = append(x.SpecificationExtension, pair) - } - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside AdditionalPropertiesItem objects. -func (m *AdditionalPropertiesItem) ResolveReferences(root string) (*yaml.Node, error) { - errors := make([]error, 0) - { - p, ok := m.Oneof.(*AdditionalPropertiesItem_SchemaOrReference) - if ok { - _, err := p.SchemaOrReference.ResolveReferences(root) - if err != nil { - return nil, err - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside Any objects. -func (m *Any) ResolveReferences(root string) (*yaml.Node, error) { - errors := make([]error, 0) - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside AnyOrExpression objects. -func (m *AnyOrExpression) ResolveReferences(root string) (*yaml.Node, error) { - errors := make([]error, 0) - { - p, ok := m.Oneof.(*AnyOrExpression_Any) - if ok { - _, err := p.Any.ResolveReferences(root) - if err != nil { - return nil, err - } - } - } - { - p, ok := m.Oneof.(*AnyOrExpression_Expression) - if ok { - _, err := p.Expression.ResolveReferences(root) - if err != nil { - return nil, err - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside Callback objects. -func (m *Callback) ResolveReferences(root string) (*yaml.Node, error) { - errors := make([]error, 0) - for _, item := range m.Path { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - for _, item := range m.SpecificationExtension { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside CallbackOrReference objects. -func (m *CallbackOrReference) ResolveReferences(root string) (*yaml.Node, error) { - errors := make([]error, 0) - { - p, ok := m.Oneof.(*CallbackOrReference_Callback) - if ok { - _, err := p.Callback.ResolveReferences(root) - if err != nil { - return nil, err - } - } - } - { - p, ok := m.Oneof.(*CallbackOrReference_Reference) - if ok { - _, err := p.Reference.ResolveReferences(root) - if err != nil { - return nil, err - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside CallbacksOrReferences objects. -func (m *CallbacksOrReferences) ResolveReferences(root string) (*yaml.Node, error) { - errors := make([]error, 0) - for _, item := range m.AdditionalProperties { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside Components objects. -func (m *Components) ResolveReferences(root string) (*yaml.Node, error) { - errors := make([]error, 0) - if m.Schemas != nil { - _, err := m.Schemas.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - if m.Responses != nil { - _, err := m.Responses.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - if m.Parameters != nil { - _, err := m.Parameters.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - if m.Examples != nil { - _, err := m.Examples.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - if m.RequestBodies != nil { - _, err := m.RequestBodies.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - if m.Headers != nil { - _, err := m.Headers.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - if m.SecuritySchemes != nil { - _, err := m.SecuritySchemes.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - if m.Links != nil { - _, err := m.Links.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - if m.Callbacks != nil { - _, err := m.Callbacks.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - for _, item := range m.SpecificationExtension { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside Contact objects. -func (m *Contact) ResolveReferences(root string) (*yaml.Node, error) { - errors := make([]error, 0) - for _, item := range m.SpecificationExtension { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside DefaultType objects. -func (m *DefaultType) ResolveReferences(root string) (*yaml.Node, error) { - errors := make([]error, 0) - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside Discriminator objects. -func (m *Discriminator) ResolveReferences(root string) (*yaml.Node, error) { - errors := make([]error, 0) - if m.Mapping != nil { - _, err := m.Mapping.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - for _, item := range m.SpecificationExtension { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside Document objects. -func (m *Document) ResolveReferences(root string) (*yaml.Node, error) { - errors := make([]error, 0) - if m.Info != nil { - _, err := m.Info.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - for _, item := range m.Servers { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - if m.Paths != nil { - _, err := m.Paths.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - if m.Components != nil { - _, err := m.Components.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - for _, item := range m.Security { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - for _, item := range m.Tags { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - if m.ExternalDocs != nil { - _, err := m.ExternalDocs.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - for _, item := range m.SpecificationExtension { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside Encoding objects. -func (m *Encoding) ResolveReferences(root string) (*yaml.Node, error) { - errors := make([]error, 0) - if m.Headers != nil { - _, err := m.Headers.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - for _, item := range m.SpecificationExtension { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside Encodings objects. -func (m *Encodings) ResolveReferences(root string) (*yaml.Node, error) { - errors := make([]error, 0) - for _, item := range m.AdditionalProperties { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside Example objects. -func (m *Example) ResolveReferences(root string) (*yaml.Node, error) { - errors := make([]error, 0) - if m.Value != nil { - _, err := m.Value.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - for _, item := range m.SpecificationExtension { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside ExampleOrReference objects. -func (m *ExampleOrReference) ResolveReferences(root string) (*yaml.Node, error) { - errors := make([]error, 0) - { - p, ok := m.Oneof.(*ExampleOrReference_Example) - if ok { - _, err := p.Example.ResolveReferences(root) - if err != nil { - return nil, err - } - } - } - { - p, ok := m.Oneof.(*ExampleOrReference_Reference) - if ok { - _, err := p.Reference.ResolveReferences(root) - if err != nil { - return nil, err - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside ExamplesOrReferences objects. -func (m *ExamplesOrReferences) ResolveReferences(root string) (*yaml.Node, error) { - errors := make([]error, 0) - for _, item := range m.AdditionalProperties { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside Expression objects. -func (m *Expression) ResolveReferences(root string) (*yaml.Node, error) { - errors := make([]error, 0) - for _, item := range m.AdditionalProperties { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside ExternalDocs objects. -func (m *ExternalDocs) ResolveReferences(root string) (*yaml.Node, error) { - errors := make([]error, 0) - for _, item := range m.SpecificationExtension { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside Header objects. -func (m *Header) ResolveReferences(root string) (*yaml.Node, error) { - errors := make([]error, 0) - if m.Schema != nil { - _, err := m.Schema.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - if m.Example != nil { - _, err := m.Example.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - if m.Examples != nil { - _, err := m.Examples.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - if m.Content != nil { - _, err := m.Content.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - for _, item := range m.SpecificationExtension { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside HeaderOrReference objects. -func (m *HeaderOrReference) ResolveReferences(root string) (*yaml.Node, error) { - errors := make([]error, 0) - { - p, ok := m.Oneof.(*HeaderOrReference_Header) - if ok { - _, err := p.Header.ResolveReferences(root) - if err != nil { - return nil, err - } - } - } - { - p, ok := m.Oneof.(*HeaderOrReference_Reference) - if ok { - _, err := p.Reference.ResolveReferences(root) - if err != nil { - return nil, err - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside HeadersOrReferences objects. -func (m *HeadersOrReferences) ResolveReferences(root string) (*yaml.Node, error) { - errors := make([]error, 0) - for _, item := range m.AdditionalProperties { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside Info objects. -func (m *Info) ResolveReferences(root string) (*yaml.Node, error) { - errors := make([]error, 0) - if m.Contact != nil { - _, err := m.Contact.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - if m.License != nil { - _, err := m.License.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - for _, item := range m.SpecificationExtension { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside ItemsItem objects. -func (m *ItemsItem) ResolveReferences(root string) (*yaml.Node, error) { - errors := make([]error, 0) - for _, item := range m.SchemaOrReference { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside License objects. -func (m *License) ResolveReferences(root string) (*yaml.Node, error) { - errors := make([]error, 0) - for _, item := range m.SpecificationExtension { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside Link objects. -func (m *Link) ResolveReferences(root string) (*yaml.Node, error) { - errors := make([]error, 0) - if m.Parameters != nil { - _, err := m.Parameters.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - if m.RequestBody != nil { - _, err := m.RequestBody.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - if m.Server != nil { - _, err := m.Server.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - for _, item := range m.SpecificationExtension { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside LinkOrReference objects. -func (m *LinkOrReference) ResolveReferences(root string) (*yaml.Node, error) { - errors := make([]error, 0) - { - p, ok := m.Oneof.(*LinkOrReference_Link) - if ok { - _, err := p.Link.ResolveReferences(root) - if err != nil { - return nil, err - } - } - } - { - p, ok := m.Oneof.(*LinkOrReference_Reference) - if ok { - _, err := p.Reference.ResolveReferences(root) - if err != nil { - return nil, err - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside LinksOrReferences objects. -func (m *LinksOrReferences) ResolveReferences(root string) (*yaml.Node, error) { - errors := make([]error, 0) - for _, item := range m.AdditionalProperties { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside MediaType objects. -func (m *MediaType) ResolveReferences(root string) (*yaml.Node, error) { - errors := make([]error, 0) - if m.Schema != nil { - _, err := m.Schema.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - if m.Example != nil { - _, err := m.Example.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - if m.Examples != nil { - _, err := m.Examples.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - if m.Encoding != nil { - _, err := m.Encoding.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - for _, item := range m.SpecificationExtension { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside MediaTypes objects. -func (m *MediaTypes) ResolveReferences(root string) (*yaml.Node, error) { - errors := make([]error, 0) - for _, item := range m.AdditionalProperties { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside NamedAny objects. -func (m *NamedAny) ResolveReferences(root string) (*yaml.Node, error) { - errors := make([]error, 0) - if m.Value != nil { - _, err := m.Value.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside NamedCallbackOrReference objects. -func (m *NamedCallbackOrReference) ResolveReferences(root string) (*yaml.Node, error) { - errors := make([]error, 0) - if m.Value != nil { - _, err := m.Value.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside NamedEncoding objects. -func (m *NamedEncoding) ResolveReferences(root string) (*yaml.Node, error) { - errors := make([]error, 0) - if m.Value != nil { - _, err := m.Value.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside NamedExampleOrReference objects. -func (m *NamedExampleOrReference) ResolveReferences(root string) (*yaml.Node, error) { - errors := make([]error, 0) - if m.Value != nil { - _, err := m.Value.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside NamedHeaderOrReference objects. -func (m *NamedHeaderOrReference) ResolveReferences(root string) (*yaml.Node, error) { - errors := make([]error, 0) - if m.Value != nil { - _, err := m.Value.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside NamedLinkOrReference objects. -func (m *NamedLinkOrReference) ResolveReferences(root string) (*yaml.Node, error) { - errors := make([]error, 0) - if m.Value != nil { - _, err := m.Value.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside NamedMediaType objects. -func (m *NamedMediaType) ResolveReferences(root string) (*yaml.Node, error) { - errors := make([]error, 0) - if m.Value != nil { - _, err := m.Value.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside NamedParameterOrReference objects. -func (m *NamedParameterOrReference) ResolveReferences(root string) (*yaml.Node, error) { - errors := make([]error, 0) - if m.Value != nil { - _, err := m.Value.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside NamedPathItem objects. -func (m *NamedPathItem) ResolveReferences(root string) (*yaml.Node, error) { - errors := make([]error, 0) - if m.Value != nil { - _, err := m.Value.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside NamedRequestBodyOrReference objects. -func (m *NamedRequestBodyOrReference) ResolveReferences(root string) (*yaml.Node, error) { - errors := make([]error, 0) - if m.Value != nil { - _, err := m.Value.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside NamedResponseOrReference objects. -func (m *NamedResponseOrReference) ResolveReferences(root string) (*yaml.Node, error) { - errors := make([]error, 0) - if m.Value != nil { - _, err := m.Value.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside NamedSchemaOrReference objects. -func (m *NamedSchemaOrReference) ResolveReferences(root string) (*yaml.Node, error) { - errors := make([]error, 0) - if m.Value != nil { - _, err := m.Value.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside NamedSecuritySchemeOrReference objects. -func (m *NamedSecuritySchemeOrReference) ResolveReferences(root string) (*yaml.Node, error) { - errors := make([]error, 0) - if m.Value != nil { - _, err := m.Value.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside NamedServerVariable objects. -func (m *NamedServerVariable) ResolveReferences(root string) (*yaml.Node, error) { - errors := make([]error, 0) - if m.Value != nil { - _, err := m.Value.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside NamedString objects. -func (m *NamedString) ResolveReferences(root string) (*yaml.Node, error) { - errors := make([]error, 0) - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside NamedStringArray objects. -func (m *NamedStringArray) ResolveReferences(root string) (*yaml.Node, error) { - errors := make([]error, 0) - if m.Value != nil { - _, err := m.Value.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside OauthFlow objects. -func (m *OauthFlow) ResolveReferences(root string) (*yaml.Node, error) { - errors := make([]error, 0) - if m.Scopes != nil { - _, err := m.Scopes.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - for _, item := range m.SpecificationExtension { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside OauthFlows objects. -func (m *OauthFlows) ResolveReferences(root string) (*yaml.Node, error) { - errors := make([]error, 0) - if m.Implicit != nil { - _, err := m.Implicit.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - if m.Password != nil { - _, err := m.Password.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - if m.ClientCredentials != nil { - _, err := m.ClientCredentials.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - if m.AuthorizationCode != nil { - _, err := m.AuthorizationCode.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - for _, item := range m.SpecificationExtension { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside Object objects. -func (m *Object) ResolveReferences(root string) (*yaml.Node, error) { - errors := make([]error, 0) - for _, item := range m.AdditionalProperties { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside Operation objects. -func (m *Operation) ResolveReferences(root string) (*yaml.Node, error) { - errors := make([]error, 0) - if m.ExternalDocs != nil { - _, err := m.ExternalDocs.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - for _, item := range m.Parameters { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - if m.RequestBody != nil { - _, err := m.RequestBody.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - if m.Responses != nil { - _, err := m.Responses.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - if m.Callbacks != nil { - _, err := m.Callbacks.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - for _, item := range m.Security { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - for _, item := range m.Servers { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - for _, item := range m.SpecificationExtension { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside Parameter objects. -func (m *Parameter) ResolveReferences(root string) (*yaml.Node, error) { - errors := make([]error, 0) - if m.Schema != nil { - _, err := m.Schema.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - if m.Example != nil { - _, err := m.Example.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - if m.Examples != nil { - _, err := m.Examples.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - if m.Content != nil { - _, err := m.Content.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - for _, item := range m.SpecificationExtension { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside ParameterOrReference objects. -func (m *ParameterOrReference) ResolveReferences(root string) (*yaml.Node, error) { - errors := make([]error, 0) - { - p, ok := m.Oneof.(*ParameterOrReference_Parameter) - if ok { - _, err := p.Parameter.ResolveReferences(root) - if err != nil { - return nil, err - } - } - } - { - p, ok := m.Oneof.(*ParameterOrReference_Reference) - if ok { - _, err := p.Reference.ResolveReferences(root) - if err != nil { - return nil, err - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside ParametersOrReferences objects. -func (m *ParametersOrReferences) ResolveReferences(root string) (*yaml.Node, error) { - errors := make([]error, 0) - for _, item := range m.AdditionalProperties { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside PathItem objects. -func (m *PathItem) ResolveReferences(root string) (*yaml.Node, error) { - errors := make([]error, 0) - if m.XRef != "" { - info, err := compiler.ReadInfoForRef(root, m.XRef) - if err != nil { - return nil, err - } - if info != nil { - replacement, err := NewPathItem(info, nil) - if err == nil { - *m = *replacement - return m.ResolveReferences(root) - } - } - return info, nil - } - if m.Get != nil { - _, err := m.Get.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - if m.Put != nil { - _, err := m.Put.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - if m.Post != nil { - _, err := m.Post.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - if m.Delete != nil { - _, err := m.Delete.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - if m.Options != nil { - _, err := m.Options.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - if m.Head != nil { - _, err := m.Head.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - if m.Patch != nil { - _, err := m.Patch.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - if m.Trace != nil { - _, err := m.Trace.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - for _, item := range m.Servers { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - for _, item := range m.Parameters { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - for _, item := range m.SpecificationExtension { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside Paths objects. -func (m *Paths) ResolveReferences(root string) (*yaml.Node, error) { - errors := make([]error, 0) - for _, item := range m.Path { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - for _, item := range m.SpecificationExtension { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside Properties objects. -func (m *Properties) ResolveReferences(root string) (*yaml.Node, error) { - errors := make([]error, 0) - for _, item := range m.AdditionalProperties { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside Reference objects. -func (m *Reference) ResolveReferences(root string) (*yaml.Node, error) { - errors := make([]error, 0) - if m.XRef != "" { - info, err := compiler.ReadInfoForRef(root, m.XRef) - if err != nil { - return nil, err - } - if info != nil { - replacement, err := NewReference(info, nil) - if err == nil { - *m = *replacement - return m.ResolveReferences(root) - } - } - return info, nil - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside RequestBodiesOrReferences objects. -func (m *RequestBodiesOrReferences) ResolveReferences(root string) (*yaml.Node, error) { - errors := make([]error, 0) - for _, item := range m.AdditionalProperties { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside RequestBody objects. -func (m *RequestBody) ResolveReferences(root string) (*yaml.Node, error) { - errors := make([]error, 0) - if m.Content != nil { - _, err := m.Content.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - for _, item := range m.SpecificationExtension { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside RequestBodyOrReference objects. -func (m *RequestBodyOrReference) ResolveReferences(root string) (*yaml.Node, error) { - errors := make([]error, 0) - { - p, ok := m.Oneof.(*RequestBodyOrReference_RequestBody) - if ok { - _, err := p.RequestBody.ResolveReferences(root) - if err != nil { - return nil, err - } - } - } - { - p, ok := m.Oneof.(*RequestBodyOrReference_Reference) - if ok { - _, err := p.Reference.ResolveReferences(root) - if err != nil { - return nil, err - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside Response objects. -func (m *Response) ResolveReferences(root string) (*yaml.Node, error) { - errors := make([]error, 0) - if m.Headers != nil { - _, err := m.Headers.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - if m.Content != nil { - _, err := m.Content.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - if m.Links != nil { - _, err := m.Links.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - for _, item := range m.SpecificationExtension { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside ResponseOrReference objects. -func (m *ResponseOrReference) ResolveReferences(root string) (*yaml.Node, error) { - errors := make([]error, 0) - { - p, ok := m.Oneof.(*ResponseOrReference_Response) - if ok { - _, err := p.Response.ResolveReferences(root) - if err != nil { - return nil, err - } - } - } - { - p, ok := m.Oneof.(*ResponseOrReference_Reference) - if ok { - _, err := p.Reference.ResolveReferences(root) - if err != nil { - return nil, err - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside Responses objects. -func (m *Responses) ResolveReferences(root string) (*yaml.Node, error) { - errors := make([]error, 0) - if m.Default != nil { - _, err := m.Default.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - for _, item := range m.ResponseOrReference { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - for _, item := range m.SpecificationExtension { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside ResponsesOrReferences objects. -func (m *ResponsesOrReferences) ResolveReferences(root string) (*yaml.Node, error) { - errors := make([]error, 0) - for _, item := range m.AdditionalProperties { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside Schema objects. -func (m *Schema) ResolveReferences(root string) (*yaml.Node, error) { - errors := make([]error, 0) - if m.Discriminator != nil { - _, err := m.Discriminator.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - if m.Xml != nil { - _, err := m.Xml.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - if m.ExternalDocs != nil { - _, err := m.ExternalDocs.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - if m.Example != nil { - _, err := m.Example.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - for _, item := range m.Enum { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - for _, item := range m.AllOf { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - for _, item := range m.OneOf { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - for _, item := range m.AnyOf { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - if m.Not != nil { - _, err := m.Not.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - if m.Items != nil { - _, err := m.Items.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - if m.Properties != nil { - _, err := m.Properties.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - if m.AdditionalProperties != nil { - _, err := m.AdditionalProperties.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - if m.Default != nil { - _, err := m.Default.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - for _, item := range m.SpecificationExtension { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside SchemaOrReference objects. -func (m *SchemaOrReference) ResolveReferences(root string) (*yaml.Node, error) { - errors := make([]error, 0) - { - p, ok := m.Oneof.(*SchemaOrReference_Schema) - if ok { - _, err := p.Schema.ResolveReferences(root) - if err != nil { - return nil, err - } - } - } - { - p, ok := m.Oneof.(*SchemaOrReference_Reference) - if ok { - _, err := p.Reference.ResolveReferences(root) - if err != nil { - return nil, err - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside SchemasOrReferences objects. -func (m *SchemasOrReferences) ResolveReferences(root string) (*yaml.Node, error) { - errors := make([]error, 0) - for _, item := range m.AdditionalProperties { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside SecurityRequirement objects. -func (m *SecurityRequirement) ResolveReferences(root string) (*yaml.Node, error) { - errors := make([]error, 0) - for _, item := range m.AdditionalProperties { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside SecurityScheme objects. -func (m *SecurityScheme) ResolveReferences(root string) (*yaml.Node, error) { - errors := make([]error, 0) - if m.Flows != nil { - _, err := m.Flows.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - for _, item := range m.SpecificationExtension { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside SecuritySchemeOrReference objects. -func (m *SecuritySchemeOrReference) ResolveReferences(root string) (*yaml.Node, error) { - errors := make([]error, 0) - { - p, ok := m.Oneof.(*SecuritySchemeOrReference_SecurityScheme) - if ok { - _, err := p.SecurityScheme.ResolveReferences(root) - if err != nil { - return nil, err - } - } - } - { - p, ok := m.Oneof.(*SecuritySchemeOrReference_Reference) - if ok { - _, err := p.Reference.ResolveReferences(root) - if err != nil { - return nil, err - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside SecuritySchemesOrReferences objects. -func (m *SecuritySchemesOrReferences) ResolveReferences(root string) (*yaml.Node, error) { - errors := make([]error, 0) - for _, item := range m.AdditionalProperties { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside Server objects. -func (m *Server) ResolveReferences(root string) (*yaml.Node, error) { - errors := make([]error, 0) - if m.Variables != nil { - _, err := m.Variables.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - for _, item := range m.SpecificationExtension { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside ServerVariable objects. -func (m *ServerVariable) ResolveReferences(root string) (*yaml.Node, error) { - errors := make([]error, 0) - for _, item := range m.SpecificationExtension { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside ServerVariables objects. -func (m *ServerVariables) ResolveReferences(root string) (*yaml.Node, error) { - errors := make([]error, 0) - for _, item := range m.AdditionalProperties { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside SpecificationExtension objects. -func (m *SpecificationExtension) ResolveReferences(root string) (*yaml.Node, error) { - errors := make([]error, 0) - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside StringArray objects. -func (m *StringArray) ResolveReferences(root string) (*yaml.Node, error) { - errors := make([]error, 0) - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside Strings objects. -func (m *Strings) ResolveReferences(root string) (*yaml.Node, error) { - errors := make([]error, 0) - for _, item := range m.AdditionalProperties { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside Tag objects. -func (m *Tag) ResolveReferences(root string) (*yaml.Node, error) { - errors := make([]error, 0) - if m.ExternalDocs != nil { - _, err := m.ExternalDocs.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - for _, item := range m.SpecificationExtension { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside Xml objects. -func (m *Xml) ResolveReferences(root string) (*yaml.Node, error) { - errors := make([]error, 0) - for _, item := range m.SpecificationExtension { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ToRawInfo returns a description of AdditionalPropertiesItem suitable for JSON or YAML export. -func (m *AdditionalPropertiesItem) ToRawInfo() *yaml.Node { - // ONE OF WRAPPER - // AdditionalPropertiesItem - // {Name:schemaOrReference Type:SchemaOrReference StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - v0 := m.GetSchemaOrReference() - if v0 != nil { - return v0.ToRawInfo() - } - // {Name:boolean Type:bool StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - if v1, ok := m.GetOneof().(*AdditionalPropertiesItem_Boolean); ok { - return compiler.NewScalarNodeForBool(v1.Boolean) - } - return compiler.NewNullNode() -} - -// ToRawInfo returns a description of Any suitable for JSON or YAML export. -func (m *Any) ToRawInfo() *yaml.Node { - var err error - var node yaml.Node - err = yaml.Unmarshal([]byte(m.Yaml), &node) - if err == nil { - if node.Kind == yaml.DocumentNode { - return node.Content[0] - } - return &node - } - return compiler.NewNullNode() -} - -// ToRawInfo returns a description of AnyOrExpression suitable for JSON or YAML export. -func (m *AnyOrExpression) ToRawInfo() *yaml.Node { - // ONE OF WRAPPER - // AnyOrExpression - // {Name:any Type:Any StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - v0 := m.GetAny() - if v0 != nil { - return v0.ToRawInfo() - } - // {Name:expression Type:Expression StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - v1 := m.GetExpression() - if v1 != nil { - return v1.ToRawInfo() - } - return compiler.NewNullNode() -} - -// ToRawInfo returns a description of Callback suitable for JSON or YAML export. -func (m *Callback) ToRawInfo() *yaml.Node { - info := compiler.NewMappingNode() - if m == nil { - return info - } - if m.Path != nil { - for _, item := range m.Path { - info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) - info.Content = append(info.Content, item.Value.ToRawInfo()) - } - } - if m.SpecificationExtension != nil { - for _, item := range m.SpecificationExtension { - info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) - info.Content = append(info.Content, item.Value.ToRawInfo()) - } - } - return info -} - -// ToRawInfo returns a description of CallbackOrReference suitable for JSON or YAML export. -func (m *CallbackOrReference) ToRawInfo() *yaml.Node { - // ONE OF WRAPPER - // CallbackOrReference - // {Name:callback Type:Callback StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - v0 := m.GetCallback() - if v0 != nil { - return v0.ToRawInfo() - } - // {Name:reference Type:Reference StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - v1 := m.GetReference() - if v1 != nil { - return v1.ToRawInfo() - } - return compiler.NewNullNode() -} - -// ToRawInfo returns a description of CallbacksOrReferences suitable for JSON or YAML export. -func (m *CallbacksOrReferences) ToRawInfo() *yaml.Node { - info := compiler.NewMappingNode() - if m == nil { - return info - } - if m.AdditionalProperties != nil { - for _, item := range m.AdditionalProperties { - info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) - info.Content = append(info.Content, item.Value.ToRawInfo()) - } - } - return info -} - -// ToRawInfo returns a description of Components suitable for JSON or YAML export. -func (m *Components) ToRawInfo() *yaml.Node { - info := compiler.NewMappingNode() - if m == nil { - return info - } - if m.Schemas != nil { - info.Content = append(info.Content, compiler.NewScalarNodeForString("schemas")) - info.Content = append(info.Content, m.Schemas.ToRawInfo()) - } - if m.Responses != nil { - info.Content = append(info.Content, compiler.NewScalarNodeForString("responses")) - info.Content = append(info.Content, m.Responses.ToRawInfo()) - } - if m.Parameters != nil { - info.Content = append(info.Content, compiler.NewScalarNodeForString("parameters")) - info.Content = append(info.Content, m.Parameters.ToRawInfo()) - } - if m.Examples != nil { - info.Content = append(info.Content, compiler.NewScalarNodeForString("examples")) - info.Content = append(info.Content, m.Examples.ToRawInfo()) - } - if m.RequestBodies != nil { - info.Content = append(info.Content, compiler.NewScalarNodeForString("requestBodies")) - info.Content = append(info.Content, m.RequestBodies.ToRawInfo()) - } - if m.Headers != nil { - info.Content = append(info.Content, compiler.NewScalarNodeForString("headers")) - info.Content = append(info.Content, m.Headers.ToRawInfo()) - } - if m.SecuritySchemes != nil { - info.Content = append(info.Content, compiler.NewScalarNodeForString("securitySchemes")) - info.Content = append(info.Content, m.SecuritySchemes.ToRawInfo()) - } - if m.Links != nil { - info.Content = append(info.Content, compiler.NewScalarNodeForString("links")) - info.Content = append(info.Content, m.Links.ToRawInfo()) - } - if m.Callbacks != nil { - info.Content = append(info.Content, compiler.NewScalarNodeForString("callbacks")) - info.Content = append(info.Content, m.Callbacks.ToRawInfo()) - } - if m.SpecificationExtension != nil { - for _, item := range m.SpecificationExtension { - info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) - info.Content = append(info.Content, item.Value.ToRawInfo()) - } - } - return info -} - -// ToRawInfo returns a description of Contact suitable for JSON or YAML export. -func (m *Contact) ToRawInfo() *yaml.Node { - info := compiler.NewMappingNode() - if m == nil { - return info - } - if m.Name != "" { - info.Content = append(info.Content, compiler.NewScalarNodeForString("name")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Name)) - } - if m.Url != "" { - info.Content = append(info.Content, compiler.NewScalarNodeForString("url")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Url)) - } - if m.Email != "" { - info.Content = append(info.Content, compiler.NewScalarNodeForString("email")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Email)) - } - if m.SpecificationExtension != nil { - for _, item := range m.SpecificationExtension { - info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) - info.Content = append(info.Content, item.Value.ToRawInfo()) - } - } - return info -} - -// ToRawInfo returns a description of DefaultType suitable for JSON or YAML export. -func (m *DefaultType) ToRawInfo() *yaml.Node { - // ONE OF WRAPPER - // DefaultType - // {Name:number Type:float StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - if v0, ok := m.GetOneof().(*DefaultType_Number); ok { - return compiler.NewScalarNodeForFloat(v0.Number) - } - // {Name:boolean Type:bool StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - if v1, ok := m.GetOneof().(*DefaultType_Boolean); ok { - return compiler.NewScalarNodeForBool(v1.Boolean) - } - // {Name:string Type:string StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - if v2, ok := m.GetOneof().(*DefaultType_String_); ok { - return compiler.NewScalarNodeForString(v2.String_) - } - return compiler.NewNullNode() -} - -// ToRawInfo returns a description of Discriminator suitable for JSON or YAML export. -func (m *Discriminator) ToRawInfo() *yaml.Node { - info := compiler.NewMappingNode() - if m == nil { - return info - } - // always include this required field. - info.Content = append(info.Content, compiler.NewScalarNodeForString("propertyName")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.PropertyName)) - if m.Mapping != nil { - info.Content = append(info.Content, compiler.NewScalarNodeForString("mapping")) - info.Content = append(info.Content, m.Mapping.ToRawInfo()) - } - if m.SpecificationExtension != nil { - for _, item := range m.SpecificationExtension { - info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) - info.Content = append(info.Content, item.Value.ToRawInfo()) - } - } - return info -} - -// ToRawInfo returns a description of Document suitable for JSON or YAML export. -func (m *Document) ToRawInfo() *yaml.Node { - info := compiler.NewMappingNode() - if m == nil { - return info - } - // always include this required field. - info.Content = append(info.Content, compiler.NewScalarNodeForString("openapi")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Openapi)) - // always include this required field. - info.Content = append(info.Content, compiler.NewScalarNodeForString("info")) - info.Content = append(info.Content, m.Info.ToRawInfo()) - if len(m.Servers) != 0 { - items := compiler.NewSequenceNode() - for _, item := range m.Servers { - items.Content = append(items.Content, item.ToRawInfo()) - } - info.Content = append(info.Content, compiler.NewScalarNodeForString("servers")) - info.Content = append(info.Content, items) - } - // always include this required field. - info.Content = append(info.Content, compiler.NewScalarNodeForString("paths")) - info.Content = append(info.Content, m.Paths.ToRawInfo()) - if m.Components != nil { - info.Content = append(info.Content, compiler.NewScalarNodeForString("components")) - info.Content = append(info.Content, m.Components.ToRawInfo()) - } - if len(m.Security) != 0 { - items := compiler.NewSequenceNode() - for _, item := range m.Security { - items.Content = append(items.Content, item.ToRawInfo()) - } - info.Content = append(info.Content, compiler.NewScalarNodeForString("security")) - info.Content = append(info.Content, items) - } - if len(m.Tags) != 0 { - items := compiler.NewSequenceNode() - for _, item := range m.Tags { - items.Content = append(items.Content, item.ToRawInfo()) - } - info.Content = append(info.Content, compiler.NewScalarNodeForString("tags")) - info.Content = append(info.Content, items) - } - if m.ExternalDocs != nil { - info.Content = append(info.Content, compiler.NewScalarNodeForString("externalDocs")) - info.Content = append(info.Content, m.ExternalDocs.ToRawInfo()) - } - if m.SpecificationExtension != nil { - for _, item := range m.SpecificationExtension { - info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) - info.Content = append(info.Content, item.Value.ToRawInfo()) - } - } - return info -} - -// ToRawInfo returns a description of Encoding suitable for JSON or YAML export. -func (m *Encoding) ToRawInfo() *yaml.Node { - info := compiler.NewMappingNode() - if m == nil { - return info - } - if m.ContentType != "" { - info.Content = append(info.Content, compiler.NewScalarNodeForString("contentType")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.ContentType)) - } - if m.Headers != nil { - info.Content = append(info.Content, compiler.NewScalarNodeForString("headers")) - info.Content = append(info.Content, m.Headers.ToRawInfo()) - } - if m.Style != "" { - info.Content = append(info.Content, compiler.NewScalarNodeForString("style")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Style)) - } - if m.Explode != false { - info.Content = append(info.Content, compiler.NewScalarNodeForString("explode")) - info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.Explode)) - } - if m.AllowReserved != false { - info.Content = append(info.Content, compiler.NewScalarNodeForString("allowReserved")) - info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.AllowReserved)) - } - if m.SpecificationExtension != nil { - for _, item := range m.SpecificationExtension { - info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) - info.Content = append(info.Content, item.Value.ToRawInfo()) - } - } - return info -} - -// ToRawInfo returns a description of Encodings suitable for JSON or YAML export. -func (m *Encodings) ToRawInfo() *yaml.Node { - info := compiler.NewMappingNode() - if m == nil { - return info - } - if m.AdditionalProperties != nil { - for _, item := range m.AdditionalProperties { - info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) - info.Content = append(info.Content, item.Value.ToRawInfo()) - } - } - return info -} - -// ToRawInfo returns a description of Example suitable for JSON or YAML export. -func (m *Example) ToRawInfo() *yaml.Node { - info := compiler.NewMappingNode() - if m == nil { - return info - } - if m.Summary != "" { - info.Content = append(info.Content, compiler.NewScalarNodeForString("summary")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Summary)) - } - if m.Description != "" { - info.Content = append(info.Content, compiler.NewScalarNodeForString("description")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Description)) - } - if m.Value != nil { - info.Content = append(info.Content, compiler.NewScalarNodeForString("value")) - info.Content = append(info.Content, m.Value.ToRawInfo()) - } - if m.ExternalValue != "" { - info.Content = append(info.Content, compiler.NewScalarNodeForString("externalValue")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.ExternalValue)) - } - if m.SpecificationExtension != nil { - for _, item := range m.SpecificationExtension { - info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) - info.Content = append(info.Content, item.Value.ToRawInfo()) - } - } - return info -} - -// ToRawInfo returns a description of ExampleOrReference suitable for JSON or YAML export. -func (m *ExampleOrReference) ToRawInfo() *yaml.Node { - // ONE OF WRAPPER - // ExampleOrReference - // {Name:example Type:Example StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - v0 := m.GetExample() - if v0 != nil { - return v0.ToRawInfo() - } - // {Name:reference Type:Reference StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - v1 := m.GetReference() - if v1 != nil { - return v1.ToRawInfo() - } - return compiler.NewNullNode() -} - -// ToRawInfo returns a description of ExamplesOrReferences suitable for JSON or YAML export. -func (m *ExamplesOrReferences) ToRawInfo() *yaml.Node { - info := compiler.NewMappingNode() - if m == nil { - return info - } - if m.AdditionalProperties != nil { - for _, item := range m.AdditionalProperties { - info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) - info.Content = append(info.Content, item.Value.ToRawInfo()) - } - } - return info -} - -// ToRawInfo returns a description of Expression suitable for JSON or YAML export. -func (m *Expression) ToRawInfo() *yaml.Node { - info := compiler.NewMappingNode() - if m == nil { - return info - } - if m.AdditionalProperties != nil { - for _, item := range m.AdditionalProperties { - info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) - info.Content = append(info.Content, item.Value.ToRawInfo()) - } - } - return info -} - -// ToRawInfo returns a description of ExternalDocs suitable for JSON or YAML export. -func (m *ExternalDocs) ToRawInfo() *yaml.Node { - info := compiler.NewMappingNode() - if m == nil { - return info - } - if m.Description != "" { - info.Content = append(info.Content, compiler.NewScalarNodeForString("description")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Description)) - } - // always include this required field. - info.Content = append(info.Content, compiler.NewScalarNodeForString("url")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Url)) - if m.SpecificationExtension != nil { - for _, item := range m.SpecificationExtension { - info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) - info.Content = append(info.Content, item.Value.ToRawInfo()) - } - } - return info -} - -// ToRawInfo returns a description of Header suitable for JSON or YAML export. -func (m *Header) ToRawInfo() *yaml.Node { - info := compiler.NewMappingNode() - if m == nil { - return info - } - if m.Description != "" { - info.Content = append(info.Content, compiler.NewScalarNodeForString("description")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Description)) - } - if m.Required != false { - info.Content = append(info.Content, compiler.NewScalarNodeForString("required")) - info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.Required)) - } - if m.Deprecated != false { - info.Content = append(info.Content, compiler.NewScalarNodeForString("deprecated")) - info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.Deprecated)) - } - if m.AllowEmptyValue != false { - info.Content = append(info.Content, compiler.NewScalarNodeForString("allowEmptyValue")) - info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.AllowEmptyValue)) - } - if m.Style != "" { - info.Content = append(info.Content, compiler.NewScalarNodeForString("style")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Style)) - } - if m.Explode != false { - info.Content = append(info.Content, compiler.NewScalarNodeForString("explode")) - info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.Explode)) - } - if m.AllowReserved != false { - info.Content = append(info.Content, compiler.NewScalarNodeForString("allowReserved")) - info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.AllowReserved)) - } - if m.Schema != nil { - info.Content = append(info.Content, compiler.NewScalarNodeForString("schema")) - info.Content = append(info.Content, m.Schema.ToRawInfo()) - } - if m.Example != nil { - info.Content = append(info.Content, compiler.NewScalarNodeForString("example")) - info.Content = append(info.Content, m.Example.ToRawInfo()) - } - if m.Examples != nil { - info.Content = append(info.Content, compiler.NewScalarNodeForString("examples")) - info.Content = append(info.Content, m.Examples.ToRawInfo()) - } - if m.Content != nil { - info.Content = append(info.Content, compiler.NewScalarNodeForString("content")) - info.Content = append(info.Content, m.Content.ToRawInfo()) - } - if m.SpecificationExtension != nil { - for _, item := range m.SpecificationExtension { - info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) - info.Content = append(info.Content, item.Value.ToRawInfo()) - } - } - return info -} - -// ToRawInfo returns a description of HeaderOrReference suitable for JSON or YAML export. -func (m *HeaderOrReference) ToRawInfo() *yaml.Node { - // ONE OF WRAPPER - // HeaderOrReference - // {Name:header Type:Header StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - v0 := m.GetHeader() - if v0 != nil { - return v0.ToRawInfo() - } - // {Name:reference Type:Reference StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - v1 := m.GetReference() - if v1 != nil { - return v1.ToRawInfo() - } - return compiler.NewNullNode() -} - -// ToRawInfo returns a description of HeadersOrReferences suitable for JSON or YAML export. -func (m *HeadersOrReferences) ToRawInfo() *yaml.Node { - info := compiler.NewMappingNode() - if m == nil { - return info - } - if m.AdditionalProperties != nil { - for _, item := range m.AdditionalProperties { - info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) - info.Content = append(info.Content, item.Value.ToRawInfo()) - } - } - return info -} - -// ToRawInfo returns a description of Info suitable for JSON or YAML export. -func (m *Info) ToRawInfo() *yaml.Node { - info := compiler.NewMappingNode() - if m == nil { - return info - } - // always include this required field. - info.Content = append(info.Content, compiler.NewScalarNodeForString("title")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Title)) - if m.Description != "" { - info.Content = append(info.Content, compiler.NewScalarNodeForString("description")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Description)) - } - if m.TermsOfService != "" { - info.Content = append(info.Content, compiler.NewScalarNodeForString("termsOfService")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.TermsOfService)) - } - if m.Contact != nil { - info.Content = append(info.Content, compiler.NewScalarNodeForString("contact")) - info.Content = append(info.Content, m.Contact.ToRawInfo()) - } - if m.License != nil { - info.Content = append(info.Content, compiler.NewScalarNodeForString("license")) - info.Content = append(info.Content, m.License.ToRawInfo()) - } - // always include this required field. - info.Content = append(info.Content, compiler.NewScalarNodeForString("version")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Version)) - if m.Summary != "" { - info.Content = append(info.Content, compiler.NewScalarNodeForString("summary")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Summary)) - } - if m.SpecificationExtension != nil { - for _, item := range m.SpecificationExtension { - info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) - info.Content = append(info.Content, item.Value.ToRawInfo()) - } - } - return info -} - -// ToRawInfo returns a description of ItemsItem suitable for JSON or YAML export. -func (m *ItemsItem) ToRawInfo() *yaml.Node { - info := compiler.NewMappingNode() - if m == nil { - return info - } - if len(m.SchemaOrReference) != 0 { - items := compiler.NewSequenceNode() - for _, item := range m.SchemaOrReference { - items.Content = append(items.Content, item.ToRawInfo()) - } - info.Content = append(info.Content, compiler.NewScalarNodeForString("schemaOrReference")) - info.Content = append(info.Content, items) - } - return info -} - -// ToRawInfo returns a description of License suitable for JSON or YAML export. -func (m *License) ToRawInfo() *yaml.Node { - info := compiler.NewMappingNode() - if m == nil { - return info - } - // always include this required field. - info.Content = append(info.Content, compiler.NewScalarNodeForString("name")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Name)) - if m.Url != "" { - info.Content = append(info.Content, compiler.NewScalarNodeForString("url")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Url)) - } - if m.SpecificationExtension != nil { - for _, item := range m.SpecificationExtension { - info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) - info.Content = append(info.Content, item.Value.ToRawInfo()) - } - } - return info -} - -// ToRawInfo returns a description of Link suitable for JSON or YAML export. -func (m *Link) ToRawInfo() *yaml.Node { - info := compiler.NewMappingNode() - if m == nil { - return info - } - if m.OperationRef != "" { - info.Content = append(info.Content, compiler.NewScalarNodeForString("operationRef")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.OperationRef)) - } - if m.OperationId != "" { - info.Content = append(info.Content, compiler.NewScalarNodeForString("operationId")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.OperationId)) - } - if m.Parameters != nil { - info.Content = append(info.Content, compiler.NewScalarNodeForString("parameters")) - info.Content = append(info.Content, m.Parameters.ToRawInfo()) - } - if m.RequestBody != nil { - info.Content = append(info.Content, compiler.NewScalarNodeForString("requestBody")) - info.Content = append(info.Content, m.RequestBody.ToRawInfo()) - } - if m.Description != "" { - info.Content = append(info.Content, compiler.NewScalarNodeForString("description")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Description)) - } - if m.Server != nil { - info.Content = append(info.Content, compiler.NewScalarNodeForString("server")) - info.Content = append(info.Content, m.Server.ToRawInfo()) - } - if m.SpecificationExtension != nil { - for _, item := range m.SpecificationExtension { - info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) - info.Content = append(info.Content, item.Value.ToRawInfo()) - } - } - return info -} - -// ToRawInfo returns a description of LinkOrReference suitable for JSON or YAML export. -func (m *LinkOrReference) ToRawInfo() *yaml.Node { - // ONE OF WRAPPER - // LinkOrReference - // {Name:link Type:Link StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - v0 := m.GetLink() - if v0 != nil { - return v0.ToRawInfo() - } - // {Name:reference Type:Reference StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - v1 := m.GetReference() - if v1 != nil { - return v1.ToRawInfo() - } - return compiler.NewNullNode() -} - -// ToRawInfo returns a description of LinksOrReferences suitable for JSON or YAML export. -func (m *LinksOrReferences) ToRawInfo() *yaml.Node { - info := compiler.NewMappingNode() - if m == nil { - return info - } - if m.AdditionalProperties != nil { - for _, item := range m.AdditionalProperties { - info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) - info.Content = append(info.Content, item.Value.ToRawInfo()) - } - } - return info -} - -// ToRawInfo returns a description of MediaType suitable for JSON or YAML export. -func (m *MediaType) ToRawInfo() *yaml.Node { - info := compiler.NewMappingNode() - if m == nil { - return info - } - if m.Schema != nil { - info.Content = append(info.Content, compiler.NewScalarNodeForString("schema")) - info.Content = append(info.Content, m.Schema.ToRawInfo()) - } - if m.Example != nil { - info.Content = append(info.Content, compiler.NewScalarNodeForString("example")) - info.Content = append(info.Content, m.Example.ToRawInfo()) - } - if m.Examples != nil { - info.Content = append(info.Content, compiler.NewScalarNodeForString("examples")) - info.Content = append(info.Content, m.Examples.ToRawInfo()) - } - if m.Encoding != nil { - info.Content = append(info.Content, compiler.NewScalarNodeForString("encoding")) - info.Content = append(info.Content, m.Encoding.ToRawInfo()) - } - if m.SpecificationExtension != nil { - for _, item := range m.SpecificationExtension { - info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) - info.Content = append(info.Content, item.Value.ToRawInfo()) - } - } - return info -} - -// ToRawInfo returns a description of MediaTypes suitable for JSON or YAML export. -func (m *MediaTypes) ToRawInfo() *yaml.Node { - info := compiler.NewMappingNode() - if m == nil { - return info - } - if m.AdditionalProperties != nil { - for _, item := range m.AdditionalProperties { - info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) - info.Content = append(info.Content, item.Value.ToRawInfo()) - } - } - return info -} - -// ToRawInfo returns a description of NamedAny suitable for JSON or YAML export. -func (m *NamedAny) ToRawInfo() *yaml.Node { - info := compiler.NewMappingNode() - if m == nil { - return info - } - if m.Name != "" { - info.Content = append(info.Content, compiler.NewScalarNodeForString("name")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Name)) - } - if m.Value != nil { - info.Content = append(info.Content, compiler.NewScalarNodeForString("value")) - info.Content = append(info.Content, m.Value.ToRawInfo()) - } - return info -} - -// ToRawInfo returns a description of NamedCallbackOrReference suitable for JSON or YAML export. -func (m *NamedCallbackOrReference) ToRawInfo() *yaml.Node { - info := compiler.NewMappingNode() - if m == nil { - return info - } - if m.Name != "" { - info.Content = append(info.Content, compiler.NewScalarNodeForString("name")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Name)) - } - // &{Name:value Type:CallbackOrReference StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:Mapped value} - return info -} - -// ToRawInfo returns a description of NamedEncoding suitable for JSON or YAML export. -func (m *NamedEncoding) ToRawInfo() *yaml.Node { - info := compiler.NewMappingNode() - if m == nil { - return info - } - if m.Name != "" { - info.Content = append(info.Content, compiler.NewScalarNodeForString("name")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Name)) - } - // &{Name:value Type:Encoding StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:Mapped value} - return info -} - -// ToRawInfo returns a description of NamedExampleOrReference suitable for JSON or YAML export. -func (m *NamedExampleOrReference) ToRawInfo() *yaml.Node { - info := compiler.NewMappingNode() - if m == nil { - return info - } - if m.Name != "" { - info.Content = append(info.Content, compiler.NewScalarNodeForString("name")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Name)) - } - // &{Name:value Type:ExampleOrReference StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:Mapped value} - return info -} - -// ToRawInfo returns a description of NamedHeaderOrReference suitable for JSON or YAML export. -func (m *NamedHeaderOrReference) ToRawInfo() *yaml.Node { - info := compiler.NewMappingNode() - if m == nil { - return info - } - if m.Name != "" { - info.Content = append(info.Content, compiler.NewScalarNodeForString("name")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Name)) - } - // &{Name:value Type:HeaderOrReference StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:Mapped value} - return info -} - -// ToRawInfo returns a description of NamedLinkOrReference suitable for JSON or YAML export. -func (m *NamedLinkOrReference) ToRawInfo() *yaml.Node { - info := compiler.NewMappingNode() - if m == nil { - return info - } - if m.Name != "" { - info.Content = append(info.Content, compiler.NewScalarNodeForString("name")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Name)) - } - // &{Name:value Type:LinkOrReference StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:Mapped value} - return info -} - -// ToRawInfo returns a description of NamedMediaType suitable for JSON or YAML export. -func (m *NamedMediaType) ToRawInfo() *yaml.Node { - info := compiler.NewMappingNode() - if m == nil { - return info - } - if m.Name != "" { - info.Content = append(info.Content, compiler.NewScalarNodeForString("name")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Name)) - } - // &{Name:value Type:MediaType StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:Mapped value} - return info -} - -// ToRawInfo returns a description of NamedParameterOrReference suitable for JSON or YAML export. -func (m *NamedParameterOrReference) ToRawInfo() *yaml.Node { - info := compiler.NewMappingNode() - if m == nil { - return info - } - if m.Name != "" { - info.Content = append(info.Content, compiler.NewScalarNodeForString("name")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Name)) - } - // &{Name:value Type:ParameterOrReference StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:Mapped value} - return info -} - -// ToRawInfo returns a description of NamedPathItem suitable for JSON or YAML export. -func (m *NamedPathItem) ToRawInfo() *yaml.Node { - info := compiler.NewMappingNode() - if m == nil { - return info - } - if m.Name != "" { - info.Content = append(info.Content, compiler.NewScalarNodeForString("name")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Name)) - } - // &{Name:value Type:PathItem StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:Mapped value} - return info -} - -// ToRawInfo returns a description of NamedRequestBodyOrReference suitable for JSON or YAML export. -func (m *NamedRequestBodyOrReference) ToRawInfo() *yaml.Node { - info := compiler.NewMappingNode() - if m == nil { - return info - } - if m.Name != "" { - info.Content = append(info.Content, compiler.NewScalarNodeForString("name")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Name)) - } - // &{Name:value Type:RequestBodyOrReference StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:Mapped value} - return info -} - -// ToRawInfo returns a description of NamedResponseOrReference suitable for JSON or YAML export. -func (m *NamedResponseOrReference) ToRawInfo() *yaml.Node { - info := compiler.NewMappingNode() - if m == nil { - return info - } - if m.Name != "" { - info.Content = append(info.Content, compiler.NewScalarNodeForString("name")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Name)) - } - // &{Name:value Type:ResponseOrReference StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:Mapped value} - return info -} - -// ToRawInfo returns a description of NamedSchemaOrReference suitable for JSON or YAML export. -func (m *NamedSchemaOrReference) ToRawInfo() *yaml.Node { - info := compiler.NewMappingNode() - if m == nil { - return info - } - if m.Name != "" { - info.Content = append(info.Content, compiler.NewScalarNodeForString("name")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Name)) - } - // &{Name:value Type:SchemaOrReference StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:Mapped value} - return info -} - -// ToRawInfo returns a description of NamedSecuritySchemeOrReference suitable for JSON or YAML export. -func (m *NamedSecuritySchemeOrReference) ToRawInfo() *yaml.Node { - info := compiler.NewMappingNode() - if m == nil { - return info - } - if m.Name != "" { - info.Content = append(info.Content, compiler.NewScalarNodeForString("name")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Name)) - } - // &{Name:value Type:SecuritySchemeOrReference StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:Mapped value} - return info -} - -// ToRawInfo returns a description of NamedServerVariable suitable for JSON or YAML export. -func (m *NamedServerVariable) ToRawInfo() *yaml.Node { - info := compiler.NewMappingNode() - if m == nil { - return info - } - if m.Name != "" { - info.Content = append(info.Content, compiler.NewScalarNodeForString("name")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Name)) - } - // &{Name:value Type:ServerVariable StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:Mapped value} - return info -} - -// ToRawInfo returns a description of NamedString suitable for JSON or YAML export. -func (m *NamedString) ToRawInfo() *yaml.Node { - info := compiler.NewMappingNode() - if m == nil { - return info - } - if m.Name != "" { - info.Content = append(info.Content, compiler.NewScalarNodeForString("name")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Name)) - } - if m.Value != "" { - info.Content = append(info.Content, compiler.NewScalarNodeForString("value")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Value)) - } - return info -} - -// ToRawInfo returns a description of NamedStringArray suitable for JSON or YAML export. -func (m *NamedStringArray) ToRawInfo() *yaml.Node { - info := compiler.NewMappingNode() - if m == nil { - return info - } - if m.Name != "" { - info.Content = append(info.Content, compiler.NewScalarNodeForString("name")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Name)) - } - // &{Name:value Type:StringArray StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:Mapped value} - return info -} - -// ToRawInfo returns a description of OauthFlow suitable for JSON or YAML export. -func (m *OauthFlow) ToRawInfo() *yaml.Node { - info := compiler.NewMappingNode() - if m == nil { - return info - } - if m.AuthorizationUrl != "" { - info.Content = append(info.Content, compiler.NewScalarNodeForString("authorizationUrl")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.AuthorizationUrl)) - } - if m.TokenUrl != "" { - info.Content = append(info.Content, compiler.NewScalarNodeForString("tokenUrl")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.TokenUrl)) - } - if m.RefreshUrl != "" { - info.Content = append(info.Content, compiler.NewScalarNodeForString("refreshUrl")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.RefreshUrl)) - } - if m.Scopes != nil { - info.Content = append(info.Content, compiler.NewScalarNodeForString("scopes")) - info.Content = append(info.Content, m.Scopes.ToRawInfo()) - } - if m.SpecificationExtension != nil { - for _, item := range m.SpecificationExtension { - info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) - info.Content = append(info.Content, item.Value.ToRawInfo()) - } - } - return info -} - -// ToRawInfo returns a description of OauthFlows suitable for JSON or YAML export. -func (m *OauthFlows) ToRawInfo() *yaml.Node { - info := compiler.NewMappingNode() - if m == nil { - return info - } - if m.Implicit != nil { - info.Content = append(info.Content, compiler.NewScalarNodeForString("implicit")) - info.Content = append(info.Content, m.Implicit.ToRawInfo()) - } - if m.Password != nil { - info.Content = append(info.Content, compiler.NewScalarNodeForString("password")) - info.Content = append(info.Content, m.Password.ToRawInfo()) - } - if m.ClientCredentials != nil { - info.Content = append(info.Content, compiler.NewScalarNodeForString("clientCredentials")) - info.Content = append(info.Content, m.ClientCredentials.ToRawInfo()) - } - if m.AuthorizationCode != nil { - info.Content = append(info.Content, compiler.NewScalarNodeForString("authorizationCode")) - info.Content = append(info.Content, m.AuthorizationCode.ToRawInfo()) - } - if m.SpecificationExtension != nil { - for _, item := range m.SpecificationExtension { - info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) - info.Content = append(info.Content, item.Value.ToRawInfo()) - } - } - return info -} - -// ToRawInfo returns a description of Object suitable for JSON or YAML export. -func (m *Object) ToRawInfo() *yaml.Node { - info := compiler.NewMappingNode() - if m == nil { - return info - } - if m.AdditionalProperties != nil { - for _, item := range m.AdditionalProperties { - info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) - info.Content = append(info.Content, item.Value.ToRawInfo()) - } - } - return info -} - -// ToRawInfo returns a description of Operation suitable for JSON or YAML export. -func (m *Operation) ToRawInfo() *yaml.Node { - info := compiler.NewMappingNode() - if m == nil { - return info - } - if len(m.Tags) != 0 { - info.Content = append(info.Content, compiler.NewScalarNodeForString("tags")) - info.Content = append(info.Content, compiler.NewSequenceNodeForStringArray(m.Tags)) - } - if m.Summary != "" { - info.Content = append(info.Content, compiler.NewScalarNodeForString("summary")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Summary)) - } - if m.Description != "" { - info.Content = append(info.Content, compiler.NewScalarNodeForString("description")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Description)) - } - if m.ExternalDocs != nil { - info.Content = append(info.Content, compiler.NewScalarNodeForString("externalDocs")) - info.Content = append(info.Content, m.ExternalDocs.ToRawInfo()) - } - if m.OperationId != "" { - info.Content = append(info.Content, compiler.NewScalarNodeForString("operationId")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.OperationId)) - } - if len(m.Parameters) != 0 { - items := compiler.NewSequenceNode() - for _, item := range m.Parameters { - items.Content = append(items.Content, item.ToRawInfo()) - } - info.Content = append(info.Content, compiler.NewScalarNodeForString("parameters")) - info.Content = append(info.Content, items) - } - if m.RequestBody != nil { - info.Content = append(info.Content, compiler.NewScalarNodeForString("requestBody")) - info.Content = append(info.Content, m.RequestBody.ToRawInfo()) - } - // always include this required field. - info.Content = append(info.Content, compiler.NewScalarNodeForString("responses")) - info.Content = append(info.Content, m.Responses.ToRawInfo()) - if m.Callbacks != nil { - info.Content = append(info.Content, compiler.NewScalarNodeForString("callbacks")) - info.Content = append(info.Content, m.Callbacks.ToRawInfo()) - } - if m.Deprecated != false { - info.Content = append(info.Content, compiler.NewScalarNodeForString("deprecated")) - info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.Deprecated)) - } - if len(m.Security) != 0 { - items := compiler.NewSequenceNode() - for _, item := range m.Security { - items.Content = append(items.Content, item.ToRawInfo()) - } - info.Content = append(info.Content, compiler.NewScalarNodeForString("security")) - info.Content = append(info.Content, items) - } - if len(m.Servers) != 0 { - items := compiler.NewSequenceNode() - for _, item := range m.Servers { - items.Content = append(items.Content, item.ToRawInfo()) - } - info.Content = append(info.Content, compiler.NewScalarNodeForString("servers")) - info.Content = append(info.Content, items) - } - if m.SpecificationExtension != nil { - for _, item := range m.SpecificationExtension { - info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) - info.Content = append(info.Content, item.Value.ToRawInfo()) - } - } - return info -} - -// ToRawInfo returns a description of Parameter suitable for JSON or YAML export. -func (m *Parameter) ToRawInfo() *yaml.Node { - info := compiler.NewMappingNode() - if m == nil { - return info - } - // always include this required field. - info.Content = append(info.Content, compiler.NewScalarNodeForString("name")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Name)) - // always include this required field. - info.Content = append(info.Content, compiler.NewScalarNodeForString("in")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.In)) - if m.Description != "" { - info.Content = append(info.Content, compiler.NewScalarNodeForString("description")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Description)) - } - if m.Required != false { - info.Content = append(info.Content, compiler.NewScalarNodeForString("required")) - info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.Required)) - } - if m.Deprecated != false { - info.Content = append(info.Content, compiler.NewScalarNodeForString("deprecated")) - info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.Deprecated)) - } - if m.AllowEmptyValue != false { - info.Content = append(info.Content, compiler.NewScalarNodeForString("allowEmptyValue")) - info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.AllowEmptyValue)) - } - if m.Style != "" { - info.Content = append(info.Content, compiler.NewScalarNodeForString("style")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Style)) - } - if m.Explode != false { - info.Content = append(info.Content, compiler.NewScalarNodeForString("explode")) - info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.Explode)) - } - if m.AllowReserved != false { - info.Content = append(info.Content, compiler.NewScalarNodeForString("allowReserved")) - info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.AllowReserved)) - } - if m.Schema != nil { - info.Content = append(info.Content, compiler.NewScalarNodeForString("schema")) - info.Content = append(info.Content, m.Schema.ToRawInfo()) - } - if m.Example != nil { - info.Content = append(info.Content, compiler.NewScalarNodeForString("example")) - info.Content = append(info.Content, m.Example.ToRawInfo()) - } - if m.Examples != nil { - info.Content = append(info.Content, compiler.NewScalarNodeForString("examples")) - info.Content = append(info.Content, m.Examples.ToRawInfo()) - } - if m.Content != nil { - info.Content = append(info.Content, compiler.NewScalarNodeForString("content")) - info.Content = append(info.Content, m.Content.ToRawInfo()) - } - if m.SpecificationExtension != nil { - for _, item := range m.SpecificationExtension { - info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) - info.Content = append(info.Content, item.Value.ToRawInfo()) - } - } - return info -} - -// ToRawInfo returns a description of ParameterOrReference suitable for JSON or YAML export. -func (m *ParameterOrReference) ToRawInfo() *yaml.Node { - // ONE OF WRAPPER - // ParameterOrReference - // {Name:parameter Type:Parameter StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - v0 := m.GetParameter() - if v0 != nil { - return v0.ToRawInfo() - } - // {Name:reference Type:Reference StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - v1 := m.GetReference() - if v1 != nil { - return v1.ToRawInfo() - } - return compiler.NewNullNode() -} - -// ToRawInfo returns a description of ParametersOrReferences suitable for JSON or YAML export. -func (m *ParametersOrReferences) ToRawInfo() *yaml.Node { - info := compiler.NewMappingNode() - if m == nil { - return info - } - if m.AdditionalProperties != nil { - for _, item := range m.AdditionalProperties { - info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) - info.Content = append(info.Content, item.Value.ToRawInfo()) - } - } - return info -} - -// ToRawInfo returns a description of PathItem suitable for JSON or YAML export. -func (m *PathItem) ToRawInfo() *yaml.Node { - info := compiler.NewMappingNode() - if m == nil { - return info - } - if m.XRef != "" { - info.Content = append(info.Content, compiler.NewScalarNodeForString("$ref")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.XRef)) - } - if m.Summary != "" { - info.Content = append(info.Content, compiler.NewScalarNodeForString("summary")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Summary)) - } - if m.Description != "" { - info.Content = append(info.Content, compiler.NewScalarNodeForString("description")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Description)) - } - if m.Get != nil { - info.Content = append(info.Content, compiler.NewScalarNodeForString("get")) - info.Content = append(info.Content, m.Get.ToRawInfo()) - } - if m.Put != nil { - info.Content = append(info.Content, compiler.NewScalarNodeForString("put")) - info.Content = append(info.Content, m.Put.ToRawInfo()) - } - if m.Post != nil { - info.Content = append(info.Content, compiler.NewScalarNodeForString("post")) - info.Content = append(info.Content, m.Post.ToRawInfo()) - } - if m.Delete != nil { - info.Content = append(info.Content, compiler.NewScalarNodeForString("delete")) - info.Content = append(info.Content, m.Delete.ToRawInfo()) - } - if m.Options != nil { - info.Content = append(info.Content, compiler.NewScalarNodeForString("options")) - info.Content = append(info.Content, m.Options.ToRawInfo()) - } - if m.Head != nil { - info.Content = append(info.Content, compiler.NewScalarNodeForString("head")) - info.Content = append(info.Content, m.Head.ToRawInfo()) - } - if m.Patch != nil { - info.Content = append(info.Content, compiler.NewScalarNodeForString("patch")) - info.Content = append(info.Content, m.Patch.ToRawInfo()) - } - if m.Trace != nil { - info.Content = append(info.Content, compiler.NewScalarNodeForString("trace")) - info.Content = append(info.Content, m.Trace.ToRawInfo()) - } - if len(m.Servers) != 0 { - items := compiler.NewSequenceNode() - for _, item := range m.Servers { - items.Content = append(items.Content, item.ToRawInfo()) - } - info.Content = append(info.Content, compiler.NewScalarNodeForString("servers")) - info.Content = append(info.Content, items) - } - if len(m.Parameters) != 0 { - items := compiler.NewSequenceNode() - for _, item := range m.Parameters { - items.Content = append(items.Content, item.ToRawInfo()) - } - info.Content = append(info.Content, compiler.NewScalarNodeForString("parameters")) - info.Content = append(info.Content, items) - } - if m.SpecificationExtension != nil { - for _, item := range m.SpecificationExtension { - info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) - info.Content = append(info.Content, item.Value.ToRawInfo()) - } - } - return info -} - -// ToRawInfo returns a description of Paths suitable for JSON or YAML export. -func (m *Paths) ToRawInfo() *yaml.Node { - info := compiler.NewMappingNode() - if m == nil { - return info - } - if m.Path != nil { - for _, item := range m.Path { - info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) - info.Content = append(info.Content, item.Value.ToRawInfo()) - } - } - if m.SpecificationExtension != nil { - for _, item := range m.SpecificationExtension { - info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) - info.Content = append(info.Content, item.Value.ToRawInfo()) - } - } - return info -} - -// ToRawInfo returns a description of Properties suitable for JSON or YAML export. -func (m *Properties) ToRawInfo() *yaml.Node { - info := compiler.NewMappingNode() - if m == nil { - return info - } - if m.AdditionalProperties != nil { - for _, item := range m.AdditionalProperties { - info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) - info.Content = append(info.Content, item.Value.ToRawInfo()) - } - } - return info -} - -// ToRawInfo returns a description of Reference suitable for JSON or YAML export. -func (m *Reference) ToRawInfo() *yaml.Node { - info := compiler.NewMappingNode() - if m == nil { - return info - } - // always include this required field. - info.Content = append(info.Content, compiler.NewScalarNodeForString("$ref")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.XRef)) - if m.Summary != "" { - info.Content = append(info.Content, compiler.NewScalarNodeForString("summary")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Summary)) - } - if m.Description != "" { - info.Content = append(info.Content, compiler.NewScalarNodeForString("description")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Description)) - } - return info -} - -// ToRawInfo returns a description of RequestBodiesOrReferences suitable for JSON or YAML export. -func (m *RequestBodiesOrReferences) ToRawInfo() *yaml.Node { - info := compiler.NewMappingNode() - if m == nil { - return info - } - if m.AdditionalProperties != nil { - for _, item := range m.AdditionalProperties { - info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) - info.Content = append(info.Content, item.Value.ToRawInfo()) - } - } - return info -} - -// ToRawInfo returns a description of RequestBody suitable for JSON or YAML export. -func (m *RequestBody) ToRawInfo() *yaml.Node { - info := compiler.NewMappingNode() - if m == nil { - return info - } - if m.Description != "" { - info.Content = append(info.Content, compiler.NewScalarNodeForString("description")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Description)) - } - // always include this required field. - info.Content = append(info.Content, compiler.NewScalarNodeForString("content")) - info.Content = append(info.Content, m.Content.ToRawInfo()) - if m.Required != false { - info.Content = append(info.Content, compiler.NewScalarNodeForString("required")) - info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.Required)) - } - if m.SpecificationExtension != nil { - for _, item := range m.SpecificationExtension { - info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) - info.Content = append(info.Content, item.Value.ToRawInfo()) - } - } - return info -} - -// ToRawInfo returns a description of RequestBodyOrReference suitable for JSON or YAML export. -func (m *RequestBodyOrReference) ToRawInfo() *yaml.Node { - // ONE OF WRAPPER - // RequestBodyOrReference - // {Name:requestBody Type:RequestBody StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - v0 := m.GetRequestBody() - if v0 != nil { - return v0.ToRawInfo() - } - // {Name:reference Type:Reference StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - v1 := m.GetReference() - if v1 != nil { - return v1.ToRawInfo() - } - return compiler.NewNullNode() -} - -// ToRawInfo returns a description of Response suitable for JSON or YAML export. -func (m *Response) ToRawInfo() *yaml.Node { - info := compiler.NewMappingNode() - if m == nil { - return info - } - // always include this required field. - info.Content = append(info.Content, compiler.NewScalarNodeForString("description")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Description)) - if m.Headers != nil { - info.Content = append(info.Content, compiler.NewScalarNodeForString("headers")) - info.Content = append(info.Content, m.Headers.ToRawInfo()) - } - if m.Content != nil { - info.Content = append(info.Content, compiler.NewScalarNodeForString("content")) - info.Content = append(info.Content, m.Content.ToRawInfo()) - } - if m.Links != nil { - info.Content = append(info.Content, compiler.NewScalarNodeForString("links")) - info.Content = append(info.Content, m.Links.ToRawInfo()) - } - if m.SpecificationExtension != nil { - for _, item := range m.SpecificationExtension { - info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) - info.Content = append(info.Content, item.Value.ToRawInfo()) - } - } - return info -} - -// ToRawInfo returns a description of ResponseOrReference suitable for JSON or YAML export. -func (m *ResponseOrReference) ToRawInfo() *yaml.Node { - // ONE OF WRAPPER - // ResponseOrReference - // {Name:response Type:Response StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - v0 := m.GetResponse() - if v0 != nil { - return v0.ToRawInfo() - } - // {Name:reference Type:Reference StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - v1 := m.GetReference() - if v1 != nil { - return v1.ToRawInfo() - } - return compiler.NewNullNode() -} - -// ToRawInfo returns a description of Responses suitable for JSON or YAML export. -func (m *Responses) ToRawInfo() *yaml.Node { - info := compiler.NewMappingNode() - if m == nil { - return info - } - if m.Default != nil { - info.Content = append(info.Content, compiler.NewScalarNodeForString("default")) - info.Content = append(info.Content, m.Default.ToRawInfo()) - } - if m.ResponseOrReference != nil { - for _, item := range m.ResponseOrReference { - info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) - info.Content = append(info.Content, item.Value.ToRawInfo()) - } - } - if m.SpecificationExtension != nil { - for _, item := range m.SpecificationExtension { - info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) - info.Content = append(info.Content, item.Value.ToRawInfo()) - } - } - return info -} - -// ToRawInfo returns a description of ResponsesOrReferences suitable for JSON or YAML export. -func (m *ResponsesOrReferences) ToRawInfo() *yaml.Node { - info := compiler.NewMappingNode() - if m == nil { - return info - } - if m.AdditionalProperties != nil { - for _, item := range m.AdditionalProperties { - info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) - info.Content = append(info.Content, item.Value.ToRawInfo()) - } - } - return info -} - -// ToRawInfo returns a description of Schema suitable for JSON or YAML export. -func (m *Schema) ToRawInfo() *yaml.Node { - info := compiler.NewMappingNode() - if m == nil { - return info - } - if m.Nullable != false { - info.Content = append(info.Content, compiler.NewScalarNodeForString("nullable")) - info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.Nullable)) - } - if m.Discriminator != nil { - info.Content = append(info.Content, compiler.NewScalarNodeForString("discriminator")) - info.Content = append(info.Content, m.Discriminator.ToRawInfo()) - } - if m.ReadOnly != false { - info.Content = append(info.Content, compiler.NewScalarNodeForString("readOnly")) - info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.ReadOnly)) - } - if m.WriteOnly != false { - info.Content = append(info.Content, compiler.NewScalarNodeForString("writeOnly")) - info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.WriteOnly)) - } - if m.Xml != nil { - info.Content = append(info.Content, compiler.NewScalarNodeForString("xml")) - info.Content = append(info.Content, m.Xml.ToRawInfo()) - } - if m.ExternalDocs != nil { - info.Content = append(info.Content, compiler.NewScalarNodeForString("externalDocs")) - info.Content = append(info.Content, m.ExternalDocs.ToRawInfo()) - } - if m.Example != nil { - info.Content = append(info.Content, compiler.NewScalarNodeForString("example")) - info.Content = append(info.Content, m.Example.ToRawInfo()) - } - if m.Deprecated != false { - info.Content = append(info.Content, compiler.NewScalarNodeForString("deprecated")) - info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.Deprecated)) - } - if m.Title != "" { - info.Content = append(info.Content, compiler.NewScalarNodeForString("title")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Title)) - } - if m.MultipleOf != 0.0 { - info.Content = append(info.Content, compiler.NewScalarNodeForString("multipleOf")) - info.Content = append(info.Content, compiler.NewScalarNodeForFloat(m.MultipleOf)) - } - if m.Maximum != 0.0 { - info.Content = append(info.Content, compiler.NewScalarNodeForString("maximum")) - info.Content = append(info.Content, compiler.NewScalarNodeForFloat(m.Maximum)) - } - if m.ExclusiveMaximum != false { - info.Content = append(info.Content, compiler.NewScalarNodeForString("exclusiveMaximum")) - info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.ExclusiveMaximum)) - } - if m.Minimum != 0.0 { - info.Content = append(info.Content, compiler.NewScalarNodeForString("minimum")) - info.Content = append(info.Content, compiler.NewScalarNodeForFloat(m.Minimum)) - } - if m.ExclusiveMinimum != false { - info.Content = append(info.Content, compiler.NewScalarNodeForString("exclusiveMinimum")) - info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.ExclusiveMinimum)) - } - if m.MaxLength != 0 { - info.Content = append(info.Content, compiler.NewScalarNodeForString("maxLength")) - info.Content = append(info.Content, compiler.NewScalarNodeForInt(m.MaxLength)) - } - if m.MinLength != 0 { - info.Content = append(info.Content, compiler.NewScalarNodeForString("minLength")) - info.Content = append(info.Content, compiler.NewScalarNodeForInt(m.MinLength)) - } - if m.Pattern != "" { - info.Content = append(info.Content, compiler.NewScalarNodeForString("pattern")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Pattern)) - } - if m.MaxItems != 0 { - info.Content = append(info.Content, compiler.NewScalarNodeForString("maxItems")) - info.Content = append(info.Content, compiler.NewScalarNodeForInt(m.MaxItems)) - } - if m.MinItems != 0 { - info.Content = append(info.Content, compiler.NewScalarNodeForString("minItems")) - info.Content = append(info.Content, compiler.NewScalarNodeForInt(m.MinItems)) - } - if m.UniqueItems != false { - info.Content = append(info.Content, compiler.NewScalarNodeForString("uniqueItems")) - info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.UniqueItems)) - } - if m.MaxProperties != 0 { - info.Content = append(info.Content, compiler.NewScalarNodeForString("maxProperties")) - info.Content = append(info.Content, compiler.NewScalarNodeForInt(m.MaxProperties)) - } - if m.MinProperties != 0 { - info.Content = append(info.Content, compiler.NewScalarNodeForString("minProperties")) - info.Content = append(info.Content, compiler.NewScalarNodeForInt(m.MinProperties)) - } - if len(m.Required) != 0 { - info.Content = append(info.Content, compiler.NewScalarNodeForString("required")) - info.Content = append(info.Content, compiler.NewSequenceNodeForStringArray(m.Required)) - } - if len(m.Enum) != 0 { - items := compiler.NewSequenceNode() - for _, item := range m.Enum { - items.Content = append(items.Content, item.ToRawInfo()) - } - info.Content = append(info.Content, compiler.NewScalarNodeForString("enum")) - info.Content = append(info.Content, items) - } - if m.Type != "" { - info.Content = append(info.Content, compiler.NewScalarNodeForString("type")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Type)) - } - if len(m.AllOf) != 0 { - items := compiler.NewSequenceNode() - for _, item := range m.AllOf { - items.Content = append(items.Content, item.ToRawInfo()) - } - info.Content = append(info.Content, compiler.NewScalarNodeForString("allOf")) - info.Content = append(info.Content, items) - } - if len(m.OneOf) != 0 { - items := compiler.NewSequenceNode() - for _, item := range m.OneOf { - items.Content = append(items.Content, item.ToRawInfo()) - } - info.Content = append(info.Content, compiler.NewScalarNodeForString("oneOf")) - info.Content = append(info.Content, items) - } - if len(m.AnyOf) != 0 { - items := compiler.NewSequenceNode() - for _, item := range m.AnyOf { - items.Content = append(items.Content, item.ToRawInfo()) - } - info.Content = append(info.Content, compiler.NewScalarNodeForString("anyOf")) - info.Content = append(info.Content, items) - } - if m.Not != nil { - info.Content = append(info.Content, compiler.NewScalarNodeForString("not")) - info.Content = append(info.Content, m.Not.ToRawInfo()) - } - if m.Items != nil { - items := compiler.NewSequenceNode() - for _, item := range m.Items.SchemaOrReference { - items.Content = append(items.Content, item.ToRawInfo()) - } - if len(items.Content) == 1 { - items = items.Content[0] - } - info.Content = append(info.Content, compiler.NewScalarNodeForString("items")) - info.Content = append(info.Content, items) - } - if m.Properties != nil { - info.Content = append(info.Content, compiler.NewScalarNodeForString("properties")) - info.Content = append(info.Content, m.Properties.ToRawInfo()) - } - if m.AdditionalProperties != nil { - info.Content = append(info.Content, compiler.NewScalarNodeForString("additionalProperties")) - info.Content = append(info.Content, m.AdditionalProperties.ToRawInfo()) - } - if m.Default != nil { - info.Content = append(info.Content, compiler.NewScalarNodeForString("default")) - info.Content = append(info.Content, m.Default.ToRawInfo()) - } - if m.Description != "" { - info.Content = append(info.Content, compiler.NewScalarNodeForString("description")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Description)) - } - if m.Format != "" { - info.Content = append(info.Content, compiler.NewScalarNodeForString("format")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Format)) - } - if m.SpecificationExtension != nil { - for _, item := range m.SpecificationExtension { - info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) - info.Content = append(info.Content, item.Value.ToRawInfo()) - } - } - return info -} - -// ToRawInfo returns a description of SchemaOrReference suitable for JSON or YAML export. -func (m *SchemaOrReference) ToRawInfo() *yaml.Node { - // ONE OF WRAPPER - // SchemaOrReference - // {Name:schema Type:Schema StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - v0 := m.GetSchema() - if v0 != nil { - return v0.ToRawInfo() - } - // {Name:reference Type:Reference StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - v1 := m.GetReference() - if v1 != nil { - return v1.ToRawInfo() - } - return compiler.NewNullNode() -} - -// ToRawInfo returns a description of SchemasOrReferences suitable for JSON or YAML export. -func (m *SchemasOrReferences) ToRawInfo() *yaml.Node { - info := compiler.NewMappingNode() - if m == nil { - return info - } - if m.AdditionalProperties != nil { - for _, item := range m.AdditionalProperties { - info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) - info.Content = append(info.Content, item.Value.ToRawInfo()) - } - } - return info -} - -// ToRawInfo returns a description of SecurityRequirement suitable for JSON or YAML export. -func (m *SecurityRequirement) ToRawInfo() *yaml.Node { - info := compiler.NewMappingNode() - if m == nil { - return info - } - if m.AdditionalProperties != nil { - for _, item := range m.AdditionalProperties { - info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) - info.Content = append(info.Content, item.Value.ToRawInfo()) - } - } - return info -} - -// ToRawInfo returns a description of SecurityScheme suitable for JSON or YAML export. -func (m *SecurityScheme) ToRawInfo() *yaml.Node { - info := compiler.NewMappingNode() - if m == nil { - return info - } - // always include this required field. - info.Content = append(info.Content, compiler.NewScalarNodeForString("type")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Type)) - if m.Description != "" { - info.Content = append(info.Content, compiler.NewScalarNodeForString("description")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Description)) - } - if m.Name != "" { - info.Content = append(info.Content, compiler.NewScalarNodeForString("name")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Name)) - } - if m.In != "" { - info.Content = append(info.Content, compiler.NewScalarNodeForString("in")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.In)) - } - if m.Scheme != "" { - info.Content = append(info.Content, compiler.NewScalarNodeForString("scheme")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Scheme)) - } - if m.BearerFormat != "" { - info.Content = append(info.Content, compiler.NewScalarNodeForString("bearerFormat")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.BearerFormat)) - } - if m.Flows != nil { - info.Content = append(info.Content, compiler.NewScalarNodeForString("flows")) - info.Content = append(info.Content, m.Flows.ToRawInfo()) - } - if m.OpenIdConnectUrl != "" { - info.Content = append(info.Content, compiler.NewScalarNodeForString("openIdConnectUrl")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.OpenIdConnectUrl)) - } - if m.SpecificationExtension != nil { - for _, item := range m.SpecificationExtension { - info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) - info.Content = append(info.Content, item.Value.ToRawInfo()) - } - } - return info -} - -// ToRawInfo returns a description of SecuritySchemeOrReference suitable for JSON or YAML export. -func (m *SecuritySchemeOrReference) ToRawInfo() *yaml.Node { - // ONE OF WRAPPER - // SecuritySchemeOrReference - // {Name:securityScheme Type:SecurityScheme StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - v0 := m.GetSecurityScheme() - if v0 != nil { - return v0.ToRawInfo() - } - // {Name:reference Type:Reference StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - v1 := m.GetReference() - if v1 != nil { - return v1.ToRawInfo() - } - return compiler.NewNullNode() -} - -// ToRawInfo returns a description of SecuritySchemesOrReferences suitable for JSON or YAML export. -func (m *SecuritySchemesOrReferences) ToRawInfo() *yaml.Node { - info := compiler.NewMappingNode() - if m == nil { - return info - } - if m.AdditionalProperties != nil { - for _, item := range m.AdditionalProperties { - info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) - info.Content = append(info.Content, item.Value.ToRawInfo()) - } - } - return info -} - -// ToRawInfo returns a description of Server suitable for JSON or YAML export. -func (m *Server) ToRawInfo() *yaml.Node { - info := compiler.NewMappingNode() - if m == nil { - return info - } - // always include this required field. - info.Content = append(info.Content, compiler.NewScalarNodeForString("url")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Url)) - if m.Description != "" { - info.Content = append(info.Content, compiler.NewScalarNodeForString("description")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Description)) - } - if m.Variables != nil { - info.Content = append(info.Content, compiler.NewScalarNodeForString("variables")) - info.Content = append(info.Content, m.Variables.ToRawInfo()) - } - if m.SpecificationExtension != nil { - for _, item := range m.SpecificationExtension { - info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) - info.Content = append(info.Content, item.Value.ToRawInfo()) - } - } - return info -} - -// ToRawInfo returns a description of ServerVariable suitable for JSON or YAML export. -func (m *ServerVariable) ToRawInfo() *yaml.Node { - info := compiler.NewMappingNode() - if m == nil { - return info - } - if len(m.Enum) != 0 { - info.Content = append(info.Content, compiler.NewScalarNodeForString("enum")) - info.Content = append(info.Content, compiler.NewSequenceNodeForStringArray(m.Enum)) - } - // always include this required field. - info.Content = append(info.Content, compiler.NewScalarNodeForString("default")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Default)) - if m.Description != "" { - info.Content = append(info.Content, compiler.NewScalarNodeForString("description")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Description)) - } - if m.SpecificationExtension != nil { - for _, item := range m.SpecificationExtension { - info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) - info.Content = append(info.Content, item.Value.ToRawInfo()) - } - } - return info -} - -// ToRawInfo returns a description of ServerVariables suitable for JSON or YAML export. -func (m *ServerVariables) ToRawInfo() *yaml.Node { - info := compiler.NewMappingNode() - if m == nil { - return info - } - if m.AdditionalProperties != nil { - for _, item := range m.AdditionalProperties { - info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) - info.Content = append(info.Content, item.Value.ToRawInfo()) - } - } - return info -} - -// ToRawInfo returns a description of SpecificationExtension suitable for JSON or YAML export. -func (m *SpecificationExtension) ToRawInfo() *yaml.Node { - // ONE OF WRAPPER - // SpecificationExtension - // {Name:number Type:float StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - if v0, ok := m.GetOneof().(*SpecificationExtension_Number); ok { - return compiler.NewScalarNodeForFloat(v0.Number) - } - // {Name:boolean Type:bool StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - if v1, ok := m.GetOneof().(*SpecificationExtension_Boolean); ok { - return compiler.NewScalarNodeForBool(v1.Boolean) - } - // {Name:string Type:string StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - if v2, ok := m.GetOneof().(*SpecificationExtension_String_); ok { - return compiler.NewScalarNodeForString(v2.String_) - } - return compiler.NewNullNode() -} - -// ToRawInfo returns a description of StringArray suitable for JSON or YAML export. -func (m *StringArray) ToRawInfo() *yaml.Node { - return compiler.NewSequenceNodeForStringArray(m.Value) -} - -// ToRawInfo returns a description of Strings suitable for JSON or YAML export. -func (m *Strings) ToRawInfo() *yaml.Node { - info := compiler.NewMappingNode() - if m == nil { - return info - } - // &{Name:additionalProperties Type:NamedString StringEnumValues:[] MapType:string Repeated:true Pattern: Implicit:true Description:} - return info -} - -// ToRawInfo returns a description of Tag suitable for JSON or YAML export. -func (m *Tag) ToRawInfo() *yaml.Node { - info := compiler.NewMappingNode() - if m == nil { - return info - } - // always include this required field. - info.Content = append(info.Content, compiler.NewScalarNodeForString("name")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Name)) - if m.Description != "" { - info.Content = append(info.Content, compiler.NewScalarNodeForString("description")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Description)) - } - if m.ExternalDocs != nil { - info.Content = append(info.Content, compiler.NewScalarNodeForString("externalDocs")) - info.Content = append(info.Content, m.ExternalDocs.ToRawInfo()) - } - if m.SpecificationExtension != nil { - for _, item := range m.SpecificationExtension { - info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) - info.Content = append(info.Content, item.Value.ToRawInfo()) - } - } - return info -} - -// ToRawInfo returns a description of Xml suitable for JSON or YAML export. -func (m *Xml) ToRawInfo() *yaml.Node { - info := compiler.NewMappingNode() - if m == nil { - return info - } - if m.Name != "" { - info.Content = append(info.Content, compiler.NewScalarNodeForString("name")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Name)) - } - if m.Namespace != "" { - info.Content = append(info.Content, compiler.NewScalarNodeForString("namespace")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Namespace)) - } - if m.Prefix != "" { - info.Content = append(info.Content, compiler.NewScalarNodeForString("prefix")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Prefix)) - } - if m.Attribute != false { - info.Content = append(info.Content, compiler.NewScalarNodeForString("attribute")) - info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.Attribute)) - } - if m.Wrapped != false { - info.Content = append(info.Content, compiler.NewScalarNodeForString("wrapped")) - info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.Wrapped)) - } - if m.SpecificationExtension != nil { - for _, item := range m.SpecificationExtension { - info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) - info.Content = append(info.Content, item.Value.ToRawInfo()) - } - } - return info -} - -var ( - pattern0 = regexp.MustCompile("^") - pattern1 = regexp.MustCompile("^x-") - pattern2 = regexp.MustCompile("^/") - pattern3 = regexp.MustCompile("^([0-9X]{3})$") -) diff --git a/src/vendor/github.com/google/gnostic/openapiv3/OpenAPIv3.pb.go b/src/vendor/github.com/google/gnostic/openapiv3/OpenAPIv3.pb.go deleted file mode 100644 index 499e7f932..000000000 --- a/src/vendor/github.com/google/gnostic/openapiv3/OpenAPIv3.pb.go +++ /dev/null @@ -1,8053 +0,0 @@ -// Copyright 2020 Google LLC. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// THIS FILE IS AUTOMATICALLY GENERATED. - -// Code generated by protoc-gen-go. DO NOT EDIT. -// versions: -// protoc-gen-go v1.26.0 -// protoc v3.18.1 -// source: openapiv3/OpenAPIv3.proto - -package openapi_v3 - -import ( - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" - anypb "google.golang.org/protobuf/types/known/anypb" - reflect "reflect" - sync "sync" -) - -const ( - // Verify that this generated code is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) - // Verify that runtime/protoimpl is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) -) - -type AdditionalPropertiesItem struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Types that are assignable to Oneof: - // *AdditionalPropertiesItem_SchemaOrReference - // *AdditionalPropertiesItem_Boolean - Oneof isAdditionalPropertiesItem_Oneof `protobuf_oneof:"oneof"` -} - -func (x *AdditionalPropertiesItem) Reset() { - *x = AdditionalPropertiesItem{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *AdditionalPropertiesItem) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*AdditionalPropertiesItem) ProtoMessage() {} - -func (x *AdditionalPropertiesItem) ProtoReflect() protoreflect.Message { - mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use AdditionalPropertiesItem.ProtoReflect.Descriptor instead. -func (*AdditionalPropertiesItem) Descriptor() ([]byte, []int) { - return file_openapiv3_OpenAPIv3_proto_rawDescGZIP(), []int{0} -} - -func (m *AdditionalPropertiesItem) GetOneof() isAdditionalPropertiesItem_Oneof { - if m != nil { - return m.Oneof - } - return nil -} - -func (x *AdditionalPropertiesItem) GetSchemaOrReference() *SchemaOrReference { - if x, ok := x.GetOneof().(*AdditionalPropertiesItem_SchemaOrReference); ok { - return x.SchemaOrReference - } - return nil -} - -func (x *AdditionalPropertiesItem) GetBoolean() bool { - if x, ok := x.GetOneof().(*AdditionalPropertiesItem_Boolean); ok { - return x.Boolean - } - return false -} - -type isAdditionalPropertiesItem_Oneof interface { - isAdditionalPropertiesItem_Oneof() -} - -type AdditionalPropertiesItem_SchemaOrReference struct { - SchemaOrReference *SchemaOrReference `protobuf:"bytes,1,opt,name=schema_or_reference,json=schemaOrReference,proto3,oneof"` -} - -type AdditionalPropertiesItem_Boolean struct { - Boolean bool `protobuf:"varint,2,opt,name=boolean,proto3,oneof"` -} - -func (*AdditionalPropertiesItem_SchemaOrReference) isAdditionalPropertiesItem_Oneof() {} - -func (*AdditionalPropertiesItem_Boolean) isAdditionalPropertiesItem_Oneof() {} - -type Any struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Value *anypb.Any `protobuf:"bytes,1,opt,name=value,proto3" json:"value,omitempty"` - Yaml string `protobuf:"bytes,2,opt,name=yaml,proto3" json:"yaml,omitempty"` -} - -func (x *Any) Reset() { - *x = Any{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Any) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Any) ProtoMessage() {} - -func (x *Any) ProtoReflect() protoreflect.Message { - mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Any.ProtoReflect.Descriptor instead. -func (*Any) Descriptor() ([]byte, []int) { - return file_openapiv3_OpenAPIv3_proto_rawDescGZIP(), []int{1} -} - -func (x *Any) GetValue() *anypb.Any { - if x != nil { - return x.Value - } - return nil -} - -func (x *Any) GetYaml() string { - if x != nil { - return x.Yaml - } - return "" -} - -type AnyOrExpression struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Types that are assignable to Oneof: - // *AnyOrExpression_Any - // *AnyOrExpression_Expression - Oneof isAnyOrExpression_Oneof `protobuf_oneof:"oneof"` -} - -func (x *AnyOrExpression) Reset() { - *x = AnyOrExpression{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *AnyOrExpression) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*AnyOrExpression) ProtoMessage() {} - -func (x *AnyOrExpression) ProtoReflect() protoreflect.Message { - mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[2] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use AnyOrExpression.ProtoReflect.Descriptor instead. -func (*AnyOrExpression) Descriptor() ([]byte, []int) { - return file_openapiv3_OpenAPIv3_proto_rawDescGZIP(), []int{2} -} - -func (m *AnyOrExpression) GetOneof() isAnyOrExpression_Oneof { - if m != nil { - return m.Oneof - } - return nil -} - -func (x *AnyOrExpression) GetAny() *Any { - if x, ok := x.GetOneof().(*AnyOrExpression_Any); ok { - return x.Any - } - return nil -} - -func (x *AnyOrExpression) GetExpression() *Expression { - if x, ok := x.GetOneof().(*AnyOrExpression_Expression); ok { - return x.Expression - } - return nil -} - -type isAnyOrExpression_Oneof interface { - isAnyOrExpression_Oneof() -} - -type AnyOrExpression_Any struct { - Any *Any `protobuf:"bytes,1,opt,name=any,proto3,oneof"` -} - -type AnyOrExpression_Expression struct { - Expression *Expression `protobuf:"bytes,2,opt,name=expression,proto3,oneof"` -} - -func (*AnyOrExpression_Any) isAnyOrExpression_Oneof() {} - -func (*AnyOrExpression_Expression) isAnyOrExpression_Oneof() {} - -// A map of possible out-of band callbacks related to the parent operation. Each value in the map is a Path Item Object that describes a set of requests that may be initiated by the API provider and the expected responses. The key value used to identify the callback object is an expression, evaluated at runtime, that identifies a URL to use for the callback operation. -type Callback struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Path []*NamedPathItem `protobuf:"bytes,1,rep,name=path,proto3" json:"path,omitempty"` - SpecificationExtension []*NamedAny `protobuf:"bytes,2,rep,name=specification_extension,json=specificationExtension,proto3" json:"specification_extension,omitempty"` -} - -func (x *Callback) Reset() { - *x = Callback{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[3] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Callback) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Callback) ProtoMessage() {} - -func (x *Callback) ProtoReflect() protoreflect.Message { - mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[3] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Callback.ProtoReflect.Descriptor instead. -func (*Callback) Descriptor() ([]byte, []int) { - return file_openapiv3_OpenAPIv3_proto_rawDescGZIP(), []int{3} -} - -func (x *Callback) GetPath() []*NamedPathItem { - if x != nil { - return x.Path - } - return nil -} - -func (x *Callback) GetSpecificationExtension() []*NamedAny { - if x != nil { - return x.SpecificationExtension - } - return nil -} - -type CallbackOrReference struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Types that are assignable to Oneof: - // *CallbackOrReference_Callback - // *CallbackOrReference_Reference - Oneof isCallbackOrReference_Oneof `protobuf_oneof:"oneof"` -} - -func (x *CallbackOrReference) Reset() { - *x = CallbackOrReference{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[4] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *CallbackOrReference) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*CallbackOrReference) ProtoMessage() {} - -func (x *CallbackOrReference) ProtoReflect() protoreflect.Message { - mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[4] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use CallbackOrReference.ProtoReflect.Descriptor instead. -func (*CallbackOrReference) Descriptor() ([]byte, []int) { - return file_openapiv3_OpenAPIv3_proto_rawDescGZIP(), []int{4} -} - -func (m *CallbackOrReference) GetOneof() isCallbackOrReference_Oneof { - if m != nil { - return m.Oneof - } - return nil -} - -func (x *CallbackOrReference) GetCallback() *Callback { - if x, ok := x.GetOneof().(*CallbackOrReference_Callback); ok { - return x.Callback - } - return nil -} - -func (x *CallbackOrReference) GetReference() *Reference { - if x, ok := x.GetOneof().(*CallbackOrReference_Reference); ok { - return x.Reference - } - return nil -} - -type isCallbackOrReference_Oneof interface { - isCallbackOrReference_Oneof() -} - -type CallbackOrReference_Callback struct { - Callback *Callback `protobuf:"bytes,1,opt,name=callback,proto3,oneof"` -} - -type CallbackOrReference_Reference struct { - Reference *Reference `protobuf:"bytes,2,opt,name=reference,proto3,oneof"` -} - -func (*CallbackOrReference_Callback) isCallbackOrReference_Oneof() {} - -func (*CallbackOrReference_Reference) isCallbackOrReference_Oneof() {} - -type CallbacksOrReferences struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - AdditionalProperties []*NamedCallbackOrReference `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties,proto3" json:"additional_properties,omitempty"` -} - -func (x *CallbacksOrReferences) Reset() { - *x = CallbacksOrReferences{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[5] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *CallbacksOrReferences) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*CallbacksOrReferences) ProtoMessage() {} - -func (x *CallbacksOrReferences) ProtoReflect() protoreflect.Message { - mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[5] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use CallbacksOrReferences.ProtoReflect.Descriptor instead. -func (*CallbacksOrReferences) Descriptor() ([]byte, []int) { - return file_openapiv3_OpenAPIv3_proto_rawDescGZIP(), []int{5} -} - -func (x *CallbacksOrReferences) GetAdditionalProperties() []*NamedCallbackOrReference { - if x != nil { - return x.AdditionalProperties - } - return nil -} - -// Holds a set of reusable objects for different aspects of the OAS. All objects defined within the components object will have no effect on the API unless they are explicitly referenced from properties outside the components object. -type Components struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Schemas *SchemasOrReferences `protobuf:"bytes,1,opt,name=schemas,proto3" json:"schemas,omitempty"` - Responses *ResponsesOrReferences `protobuf:"bytes,2,opt,name=responses,proto3" json:"responses,omitempty"` - Parameters *ParametersOrReferences `protobuf:"bytes,3,opt,name=parameters,proto3" json:"parameters,omitempty"` - Examples *ExamplesOrReferences `protobuf:"bytes,4,opt,name=examples,proto3" json:"examples,omitempty"` - RequestBodies *RequestBodiesOrReferences `protobuf:"bytes,5,opt,name=request_bodies,json=requestBodies,proto3" json:"request_bodies,omitempty"` - Headers *HeadersOrReferences `protobuf:"bytes,6,opt,name=headers,proto3" json:"headers,omitempty"` - SecuritySchemes *SecuritySchemesOrReferences `protobuf:"bytes,7,opt,name=security_schemes,json=securitySchemes,proto3" json:"security_schemes,omitempty"` - Links *LinksOrReferences `protobuf:"bytes,8,opt,name=links,proto3" json:"links,omitempty"` - Callbacks *CallbacksOrReferences `protobuf:"bytes,9,opt,name=callbacks,proto3" json:"callbacks,omitempty"` - SpecificationExtension []*NamedAny `protobuf:"bytes,10,rep,name=specification_extension,json=specificationExtension,proto3" json:"specification_extension,omitempty"` -} - -func (x *Components) Reset() { - *x = Components{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[6] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Components) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Components) ProtoMessage() {} - -func (x *Components) ProtoReflect() protoreflect.Message { - mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[6] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Components.ProtoReflect.Descriptor instead. -func (*Components) Descriptor() ([]byte, []int) { - return file_openapiv3_OpenAPIv3_proto_rawDescGZIP(), []int{6} -} - -func (x *Components) GetSchemas() *SchemasOrReferences { - if x != nil { - return x.Schemas - } - return nil -} - -func (x *Components) GetResponses() *ResponsesOrReferences { - if x != nil { - return x.Responses - } - return nil -} - -func (x *Components) GetParameters() *ParametersOrReferences { - if x != nil { - return x.Parameters - } - return nil -} - -func (x *Components) GetExamples() *ExamplesOrReferences { - if x != nil { - return x.Examples - } - return nil -} - -func (x *Components) GetRequestBodies() *RequestBodiesOrReferences { - if x != nil { - return x.RequestBodies - } - return nil -} - -func (x *Components) GetHeaders() *HeadersOrReferences { - if x != nil { - return x.Headers - } - return nil -} - -func (x *Components) GetSecuritySchemes() *SecuritySchemesOrReferences { - if x != nil { - return x.SecuritySchemes - } - return nil -} - -func (x *Components) GetLinks() *LinksOrReferences { - if x != nil { - return x.Links - } - return nil -} - -func (x *Components) GetCallbacks() *CallbacksOrReferences { - if x != nil { - return x.Callbacks - } - return nil -} - -func (x *Components) GetSpecificationExtension() []*NamedAny { - if x != nil { - return x.SpecificationExtension - } - return nil -} - -// Contact information for the exposed API. -type Contact struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - Url string `protobuf:"bytes,2,opt,name=url,proto3" json:"url,omitempty"` - Email string `protobuf:"bytes,3,opt,name=email,proto3" json:"email,omitempty"` - SpecificationExtension []*NamedAny `protobuf:"bytes,4,rep,name=specification_extension,json=specificationExtension,proto3" json:"specification_extension,omitempty"` -} - -func (x *Contact) Reset() { - *x = Contact{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[7] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Contact) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Contact) ProtoMessage() {} - -func (x *Contact) ProtoReflect() protoreflect.Message { - mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[7] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Contact.ProtoReflect.Descriptor instead. -func (*Contact) Descriptor() ([]byte, []int) { - return file_openapiv3_OpenAPIv3_proto_rawDescGZIP(), []int{7} -} - -func (x *Contact) GetName() string { - if x != nil { - return x.Name - } - return "" -} - -func (x *Contact) GetUrl() string { - if x != nil { - return x.Url - } - return "" -} - -func (x *Contact) GetEmail() string { - if x != nil { - return x.Email - } - return "" -} - -func (x *Contact) GetSpecificationExtension() []*NamedAny { - if x != nil { - return x.SpecificationExtension - } - return nil -} - -type DefaultType struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Types that are assignable to Oneof: - // *DefaultType_Number - // *DefaultType_Boolean - // *DefaultType_String_ - Oneof isDefaultType_Oneof `protobuf_oneof:"oneof"` -} - -func (x *DefaultType) Reset() { - *x = DefaultType{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[8] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *DefaultType) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*DefaultType) ProtoMessage() {} - -func (x *DefaultType) ProtoReflect() protoreflect.Message { - mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[8] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use DefaultType.ProtoReflect.Descriptor instead. -func (*DefaultType) Descriptor() ([]byte, []int) { - return file_openapiv3_OpenAPIv3_proto_rawDescGZIP(), []int{8} -} - -func (m *DefaultType) GetOneof() isDefaultType_Oneof { - if m != nil { - return m.Oneof - } - return nil -} - -func (x *DefaultType) GetNumber() float64 { - if x, ok := x.GetOneof().(*DefaultType_Number); ok { - return x.Number - } - return 0 -} - -func (x *DefaultType) GetBoolean() bool { - if x, ok := x.GetOneof().(*DefaultType_Boolean); ok { - return x.Boolean - } - return false -} - -func (x *DefaultType) GetString_() string { - if x, ok := x.GetOneof().(*DefaultType_String_); ok { - return x.String_ - } - return "" -} - -type isDefaultType_Oneof interface { - isDefaultType_Oneof() -} - -type DefaultType_Number struct { - Number float64 `protobuf:"fixed64,1,opt,name=number,proto3,oneof"` -} - -type DefaultType_Boolean struct { - Boolean bool `protobuf:"varint,2,opt,name=boolean,proto3,oneof"` -} - -type DefaultType_String_ struct { - String_ string `protobuf:"bytes,3,opt,name=string,proto3,oneof"` -} - -func (*DefaultType_Number) isDefaultType_Oneof() {} - -func (*DefaultType_Boolean) isDefaultType_Oneof() {} - -func (*DefaultType_String_) isDefaultType_Oneof() {} - -// When request bodies or response payloads may be one of a number of different schemas, a `discriminator` object can be used to aid in serialization, deserialization, and validation. The discriminator is a specific object in a schema which is used to inform the consumer of the specification of an alternative schema based on the value associated with it. When using the discriminator, _inline_ schemas will not be considered. -type Discriminator struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - PropertyName string `protobuf:"bytes,1,opt,name=property_name,json=propertyName,proto3" json:"property_name,omitempty"` - Mapping *Strings `protobuf:"bytes,2,opt,name=mapping,proto3" json:"mapping,omitempty"` - SpecificationExtension []*NamedAny `protobuf:"bytes,3,rep,name=specification_extension,json=specificationExtension,proto3" json:"specification_extension,omitempty"` -} - -func (x *Discriminator) Reset() { - *x = Discriminator{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[9] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Discriminator) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Discriminator) ProtoMessage() {} - -func (x *Discriminator) ProtoReflect() protoreflect.Message { - mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[9] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Discriminator.ProtoReflect.Descriptor instead. -func (*Discriminator) Descriptor() ([]byte, []int) { - return file_openapiv3_OpenAPIv3_proto_rawDescGZIP(), []int{9} -} - -func (x *Discriminator) GetPropertyName() string { - if x != nil { - return x.PropertyName - } - return "" -} - -func (x *Discriminator) GetMapping() *Strings { - if x != nil { - return x.Mapping - } - return nil -} - -func (x *Discriminator) GetSpecificationExtension() []*NamedAny { - if x != nil { - return x.SpecificationExtension - } - return nil -} - -type Document struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Openapi string `protobuf:"bytes,1,opt,name=openapi,proto3" json:"openapi,omitempty"` - Info *Info `protobuf:"bytes,2,opt,name=info,proto3" json:"info,omitempty"` - Servers []*Server `protobuf:"bytes,3,rep,name=servers,proto3" json:"servers,omitempty"` - Paths *Paths `protobuf:"bytes,4,opt,name=paths,proto3" json:"paths,omitempty"` - Components *Components `protobuf:"bytes,5,opt,name=components,proto3" json:"components,omitempty"` - Security []*SecurityRequirement `protobuf:"bytes,6,rep,name=security,proto3" json:"security,omitempty"` - Tags []*Tag `protobuf:"bytes,7,rep,name=tags,proto3" json:"tags,omitempty"` - ExternalDocs *ExternalDocs `protobuf:"bytes,8,opt,name=external_docs,json=externalDocs,proto3" json:"external_docs,omitempty"` - SpecificationExtension []*NamedAny `protobuf:"bytes,9,rep,name=specification_extension,json=specificationExtension,proto3" json:"specification_extension,omitempty"` -} - -func (x *Document) Reset() { - *x = Document{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[10] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Document) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Document) ProtoMessage() {} - -func (x *Document) ProtoReflect() protoreflect.Message { - mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[10] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Document.ProtoReflect.Descriptor instead. -func (*Document) Descriptor() ([]byte, []int) { - return file_openapiv3_OpenAPIv3_proto_rawDescGZIP(), []int{10} -} - -func (x *Document) GetOpenapi() string { - if x != nil { - return x.Openapi - } - return "" -} - -func (x *Document) GetInfo() *Info { - if x != nil { - return x.Info - } - return nil -} - -func (x *Document) GetServers() []*Server { - if x != nil { - return x.Servers - } - return nil -} - -func (x *Document) GetPaths() *Paths { - if x != nil { - return x.Paths - } - return nil -} - -func (x *Document) GetComponents() *Components { - if x != nil { - return x.Components - } - return nil -} - -func (x *Document) GetSecurity() []*SecurityRequirement { - if x != nil { - return x.Security - } - return nil -} - -func (x *Document) GetTags() []*Tag { - if x != nil { - return x.Tags - } - return nil -} - -func (x *Document) GetExternalDocs() *ExternalDocs { - if x != nil { - return x.ExternalDocs - } - return nil -} - -func (x *Document) GetSpecificationExtension() []*NamedAny { - if x != nil { - return x.SpecificationExtension - } - return nil -} - -// A single encoding definition applied to a single schema property. -type Encoding struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - ContentType string `protobuf:"bytes,1,opt,name=content_type,json=contentType,proto3" json:"content_type,omitempty"` - Headers *HeadersOrReferences `protobuf:"bytes,2,opt,name=headers,proto3" json:"headers,omitempty"` - Style string `protobuf:"bytes,3,opt,name=style,proto3" json:"style,omitempty"` - Explode bool `protobuf:"varint,4,opt,name=explode,proto3" json:"explode,omitempty"` - AllowReserved bool `protobuf:"varint,5,opt,name=allow_reserved,json=allowReserved,proto3" json:"allow_reserved,omitempty"` - SpecificationExtension []*NamedAny `protobuf:"bytes,6,rep,name=specification_extension,json=specificationExtension,proto3" json:"specification_extension,omitempty"` -} - -func (x *Encoding) Reset() { - *x = Encoding{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[11] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Encoding) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Encoding) ProtoMessage() {} - -func (x *Encoding) ProtoReflect() protoreflect.Message { - mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[11] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Encoding.ProtoReflect.Descriptor instead. -func (*Encoding) Descriptor() ([]byte, []int) { - return file_openapiv3_OpenAPIv3_proto_rawDescGZIP(), []int{11} -} - -func (x *Encoding) GetContentType() string { - if x != nil { - return x.ContentType - } - return "" -} - -func (x *Encoding) GetHeaders() *HeadersOrReferences { - if x != nil { - return x.Headers - } - return nil -} - -func (x *Encoding) GetStyle() string { - if x != nil { - return x.Style - } - return "" -} - -func (x *Encoding) GetExplode() bool { - if x != nil { - return x.Explode - } - return false -} - -func (x *Encoding) GetAllowReserved() bool { - if x != nil { - return x.AllowReserved - } - return false -} - -func (x *Encoding) GetSpecificationExtension() []*NamedAny { - if x != nil { - return x.SpecificationExtension - } - return nil -} - -type Encodings struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - AdditionalProperties []*NamedEncoding `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties,proto3" json:"additional_properties,omitempty"` -} - -func (x *Encodings) Reset() { - *x = Encodings{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[12] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Encodings) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Encodings) ProtoMessage() {} - -func (x *Encodings) ProtoReflect() protoreflect.Message { - mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[12] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Encodings.ProtoReflect.Descriptor instead. -func (*Encodings) Descriptor() ([]byte, []int) { - return file_openapiv3_OpenAPIv3_proto_rawDescGZIP(), []int{12} -} - -func (x *Encodings) GetAdditionalProperties() []*NamedEncoding { - if x != nil { - return x.AdditionalProperties - } - return nil -} - -type Example struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Summary string `protobuf:"bytes,1,opt,name=summary,proto3" json:"summary,omitempty"` - Description string `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"` - Value *Any `protobuf:"bytes,3,opt,name=value,proto3" json:"value,omitempty"` - ExternalValue string `protobuf:"bytes,4,opt,name=external_value,json=externalValue,proto3" json:"external_value,omitempty"` - SpecificationExtension []*NamedAny `protobuf:"bytes,5,rep,name=specification_extension,json=specificationExtension,proto3" json:"specification_extension,omitempty"` -} - -func (x *Example) Reset() { - *x = Example{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[13] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Example) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Example) ProtoMessage() {} - -func (x *Example) ProtoReflect() protoreflect.Message { - mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[13] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Example.ProtoReflect.Descriptor instead. -func (*Example) Descriptor() ([]byte, []int) { - return file_openapiv3_OpenAPIv3_proto_rawDescGZIP(), []int{13} -} - -func (x *Example) GetSummary() string { - if x != nil { - return x.Summary - } - return "" -} - -func (x *Example) GetDescription() string { - if x != nil { - return x.Description - } - return "" -} - -func (x *Example) GetValue() *Any { - if x != nil { - return x.Value - } - return nil -} - -func (x *Example) GetExternalValue() string { - if x != nil { - return x.ExternalValue - } - return "" -} - -func (x *Example) GetSpecificationExtension() []*NamedAny { - if x != nil { - return x.SpecificationExtension - } - return nil -} - -type ExampleOrReference struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Types that are assignable to Oneof: - // *ExampleOrReference_Example - // *ExampleOrReference_Reference - Oneof isExampleOrReference_Oneof `protobuf_oneof:"oneof"` -} - -func (x *ExampleOrReference) Reset() { - *x = ExampleOrReference{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[14] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ExampleOrReference) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ExampleOrReference) ProtoMessage() {} - -func (x *ExampleOrReference) ProtoReflect() protoreflect.Message { - mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[14] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ExampleOrReference.ProtoReflect.Descriptor instead. -func (*ExampleOrReference) Descriptor() ([]byte, []int) { - return file_openapiv3_OpenAPIv3_proto_rawDescGZIP(), []int{14} -} - -func (m *ExampleOrReference) GetOneof() isExampleOrReference_Oneof { - if m != nil { - return m.Oneof - } - return nil -} - -func (x *ExampleOrReference) GetExample() *Example { - if x, ok := x.GetOneof().(*ExampleOrReference_Example); ok { - return x.Example - } - return nil -} - -func (x *ExampleOrReference) GetReference() *Reference { - if x, ok := x.GetOneof().(*ExampleOrReference_Reference); ok { - return x.Reference - } - return nil -} - -type isExampleOrReference_Oneof interface { - isExampleOrReference_Oneof() -} - -type ExampleOrReference_Example struct { - Example *Example `protobuf:"bytes,1,opt,name=example,proto3,oneof"` -} - -type ExampleOrReference_Reference struct { - Reference *Reference `protobuf:"bytes,2,opt,name=reference,proto3,oneof"` -} - -func (*ExampleOrReference_Example) isExampleOrReference_Oneof() {} - -func (*ExampleOrReference_Reference) isExampleOrReference_Oneof() {} - -type ExamplesOrReferences struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - AdditionalProperties []*NamedExampleOrReference `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties,proto3" json:"additional_properties,omitempty"` -} - -func (x *ExamplesOrReferences) Reset() { - *x = ExamplesOrReferences{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[15] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ExamplesOrReferences) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ExamplesOrReferences) ProtoMessage() {} - -func (x *ExamplesOrReferences) ProtoReflect() protoreflect.Message { - mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[15] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ExamplesOrReferences.ProtoReflect.Descriptor instead. -func (*ExamplesOrReferences) Descriptor() ([]byte, []int) { - return file_openapiv3_OpenAPIv3_proto_rawDescGZIP(), []int{15} -} - -func (x *ExamplesOrReferences) GetAdditionalProperties() []*NamedExampleOrReference { - if x != nil { - return x.AdditionalProperties - } - return nil -} - -type Expression struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - AdditionalProperties []*NamedAny `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties,proto3" json:"additional_properties,omitempty"` -} - -func (x *Expression) Reset() { - *x = Expression{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[16] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Expression) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Expression) ProtoMessage() {} - -func (x *Expression) ProtoReflect() protoreflect.Message { - mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[16] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Expression.ProtoReflect.Descriptor instead. -func (*Expression) Descriptor() ([]byte, []int) { - return file_openapiv3_OpenAPIv3_proto_rawDescGZIP(), []int{16} -} - -func (x *Expression) GetAdditionalProperties() []*NamedAny { - if x != nil { - return x.AdditionalProperties - } - return nil -} - -// Allows referencing an external resource for extended documentation. -type ExternalDocs struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Description string `protobuf:"bytes,1,opt,name=description,proto3" json:"description,omitempty"` - Url string `protobuf:"bytes,2,opt,name=url,proto3" json:"url,omitempty"` - SpecificationExtension []*NamedAny `protobuf:"bytes,3,rep,name=specification_extension,json=specificationExtension,proto3" json:"specification_extension,omitempty"` -} - -func (x *ExternalDocs) Reset() { - *x = ExternalDocs{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[17] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ExternalDocs) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ExternalDocs) ProtoMessage() {} - -func (x *ExternalDocs) ProtoReflect() protoreflect.Message { - mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[17] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ExternalDocs.ProtoReflect.Descriptor instead. -func (*ExternalDocs) Descriptor() ([]byte, []int) { - return file_openapiv3_OpenAPIv3_proto_rawDescGZIP(), []int{17} -} - -func (x *ExternalDocs) GetDescription() string { - if x != nil { - return x.Description - } - return "" -} - -func (x *ExternalDocs) GetUrl() string { - if x != nil { - return x.Url - } - return "" -} - -func (x *ExternalDocs) GetSpecificationExtension() []*NamedAny { - if x != nil { - return x.SpecificationExtension - } - return nil -} - -// The Header Object follows the structure of the Parameter Object with the following changes: 1. `name` MUST NOT be specified, it is given in the corresponding `headers` map. 1. `in` MUST NOT be specified, it is implicitly in `header`. 1. All traits that are affected by the location MUST be applicable to a location of `header` (for example, `style`). -type Header struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Description string `protobuf:"bytes,1,opt,name=description,proto3" json:"description,omitempty"` - Required bool `protobuf:"varint,2,opt,name=required,proto3" json:"required,omitempty"` - Deprecated bool `protobuf:"varint,3,opt,name=deprecated,proto3" json:"deprecated,omitempty"` - AllowEmptyValue bool `protobuf:"varint,4,opt,name=allow_empty_value,json=allowEmptyValue,proto3" json:"allow_empty_value,omitempty"` - Style string `protobuf:"bytes,5,opt,name=style,proto3" json:"style,omitempty"` - Explode bool `protobuf:"varint,6,opt,name=explode,proto3" json:"explode,omitempty"` - AllowReserved bool `protobuf:"varint,7,opt,name=allow_reserved,json=allowReserved,proto3" json:"allow_reserved,omitempty"` - Schema *SchemaOrReference `protobuf:"bytes,8,opt,name=schema,proto3" json:"schema,omitempty"` - Example *Any `protobuf:"bytes,9,opt,name=example,proto3" json:"example,omitempty"` - Examples *ExamplesOrReferences `protobuf:"bytes,10,opt,name=examples,proto3" json:"examples,omitempty"` - Content *MediaTypes `protobuf:"bytes,11,opt,name=content,proto3" json:"content,omitempty"` - SpecificationExtension []*NamedAny `protobuf:"bytes,12,rep,name=specification_extension,json=specificationExtension,proto3" json:"specification_extension,omitempty"` -} - -func (x *Header) Reset() { - *x = Header{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[18] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Header) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Header) ProtoMessage() {} - -func (x *Header) ProtoReflect() protoreflect.Message { - mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[18] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Header.ProtoReflect.Descriptor instead. -func (*Header) Descriptor() ([]byte, []int) { - return file_openapiv3_OpenAPIv3_proto_rawDescGZIP(), []int{18} -} - -func (x *Header) GetDescription() string { - if x != nil { - return x.Description - } - return "" -} - -func (x *Header) GetRequired() bool { - if x != nil { - return x.Required - } - return false -} - -func (x *Header) GetDeprecated() bool { - if x != nil { - return x.Deprecated - } - return false -} - -func (x *Header) GetAllowEmptyValue() bool { - if x != nil { - return x.AllowEmptyValue - } - return false -} - -func (x *Header) GetStyle() string { - if x != nil { - return x.Style - } - return "" -} - -func (x *Header) GetExplode() bool { - if x != nil { - return x.Explode - } - return false -} - -func (x *Header) GetAllowReserved() bool { - if x != nil { - return x.AllowReserved - } - return false -} - -func (x *Header) GetSchema() *SchemaOrReference { - if x != nil { - return x.Schema - } - return nil -} - -func (x *Header) GetExample() *Any { - if x != nil { - return x.Example - } - return nil -} - -func (x *Header) GetExamples() *ExamplesOrReferences { - if x != nil { - return x.Examples - } - return nil -} - -func (x *Header) GetContent() *MediaTypes { - if x != nil { - return x.Content - } - return nil -} - -func (x *Header) GetSpecificationExtension() []*NamedAny { - if x != nil { - return x.SpecificationExtension - } - return nil -} - -type HeaderOrReference struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Types that are assignable to Oneof: - // *HeaderOrReference_Header - // *HeaderOrReference_Reference - Oneof isHeaderOrReference_Oneof `protobuf_oneof:"oneof"` -} - -func (x *HeaderOrReference) Reset() { - *x = HeaderOrReference{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[19] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *HeaderOrReference) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*HeaderOrReference) ProtoMessage() {} - -func (x *HeaderOrReference) ProtoReflect() protoreflect.Message { - mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[19] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use HeaderOrReference.ProtoReflect.Descriptor instead. -func (*HeaderOrReference) Descriptor() ([]byte, []int) { - return file_openapiv3_OpenAPIv3_proto_rawDescGZIP(), []int{19} -} - -func (m *HeaderOrReference) GetOneof() isHeaderOrReference_Oneof { - if m != nil { - return m.Oneof - } - return nil -} - -func (x *HeaderOrReference) GetHeader() *Header { - if x, ok := x.GetOneof().(*HeaderOrReference_Header); ok { - return x.Header - } - return nil -} - -func (x *HeaderOrReference) GetReference() *Reference { - if x, ok := x.GetOneof().(*HeaderOrReference_Reference); ok { - return x.Reference - } - return nil -} - -type isHeaderOrReference_Oneof interface { - isHeaderOrReference_Oneof() -} - -type HeaderOrReference_Header struct { - Header *Header `protobuf:"bytes,1,opt,name=header,proto3,oneof"` -} - -type HeaderOrReference_Reference struct { - Reference *Reference `protobuf:"bytes,2,opt,name=reference,proto3,oneof"` -} - -func (*HeaderOrReference_Header) isHeaderOrReference_Oneof() {} - -func (*HeaderOrReference_Reference) isHeaderOrReference_Oneof() {} - -type HeadersOrReferences struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - AdditionalProperties []*NamedHeaderOrReference `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties,proto3" json:"additional_properties,omitempty"` -} - -func (x *HeadersOrReferences) Reset() { - *x = HeadersOrReferences{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[20] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *HeadersOrReferences) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*HeadersOrReferences) ProtoMessage() {} - -func (x *HeadersOrReferences) ProtoReflect() protoreflect.Message { - mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[20] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use HeadersOrReferences.ProtoReflect.Descriptor instead. -func (*HeadersOrReferences) Descriptor() ([]byte, []int) { - return file_openapiv3_OpenAPIv3_proto_rawDescGZIP(), []int{20} -} - -func (x *HeadersOrReferences) GetAdditionalProperties() []*NamedHeaderOrReference { - if x != nil { - return x.AdditionalProperties - } - return nil -} - -// The object provides metadata about the API. The metadata MAY be used by the clients if needed, and MAY be presented in editing or documentation generation tools for convenience. -type Info struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Title string `protobuf:"bytes,1,opt,name=title,proto3" json:"title,omitempty"` - Description string `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"` - TermsOfService string `protobuf:"bytes,3,opt,name=terms_of_service,json=termsOfService,proto3" json:"terms_of_service,omitempty"` - Contact *Contact `protobuf:"bytes,4,opt,name=contact,proto3" json:"contact,omitempty"` - License *License `protobuf:"bytes,5,opt,name=license,proto3" json:"license,omitempty"` - Version string `protobuf:"bytes,6,opt,name=version,proto3" json:"version,omitempty"` - SpecificationExtension []*NamedAny `protobuf:"bytes,7,rep,name=specification_extension,json=specificationExtension,proto3" json:"specification_extension,omitempty"` - Summary string `protobuf:"bytes,8,opt,name=summary,proto3" json:"summary,omitempty"` -} - -func (x *Info) Reset() { - *x = Info{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[21] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Info) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Info) ProtoMessage() {} - -func (x *Info) ProtoReflect() protoreflect.Message { - mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[21] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Info.ProtoReflect.Descriptor instead. -func (*Info) Descriptor() ([]byte, []int) { - return file_openapiv3_OpenAPIv3_proto_rawDescGZIP(), []int{21} -} - -func (x *Info) GetTitle() string { - if x != nil { - return x.Title - } - return "" -} - -func (x *Info) GetDescription() string { - if x != nil { - return x.Description - } - return "" -} - -func (x *Info) GetTermsOfService() string { - if x != nil { - return x.TermsOfService - } - return "" -} - -func (x *Info) GetContact() *Contact { - if x != nil { - return x.Contact - } - return nil -} - -func (x *Info) GetLicense() *License { - if x != nil { - return x.License - } - return nil -} - -func (x *Info) GetVersion() string { - if x != nil { - return x.Version - } - return "" -} - -func (x *Info) GetSpecificationExtension() []*NamedAny { - if x != nil { - return x.SpecificationExtension - } - return nil -} - -func (x *Info) GetSummary() string { - if x != nil { - return x.Summary - } - return "" -} - -type ItemsItem struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - SchemaOrReference []*SchemaOrReference `protobuf:"bytes,1,rep,name=schema_or_reference,json=schemaOrReference,proto3" json:"schema_or_reference,omitempty"` -} - -func (x *ItemsItem) Reset() { - *x = ItemsItem{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[22] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ItemsItem) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ItemsItem) ProtoMessage() {} - -func (x *ItemsItem) ProtoReflect() protoreflect.Message { - mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[22] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ItemsItem.ProtoReflect.Descriptor instead. -func (*ItemsItem) Descriptor() ([]byte, []int) { - return file_openapiv3_OpenAPIv3_proto_rawDescGZIP(), []int{22} -} - -func (x *ItemsItem) GetSchemaOrReference() []*SchemaOrReference { - if x != nil { - return x.SchemaOrReference - } - return nil -} - -// License information for the exposed API. -type License struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - Url string `protobuf:"bytes,2,opt,name=url,proto3" json:"url,omitempty"` - SpecificationExtension []*NamedAny `protobuf:"bytes,3,rep,name=specification_extension,json=specificationExtension,proto3" json:"specification_extension,omitempty"` -} - -func (x *License) Reset() { - *x = License{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[23] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *License) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*License) ProtoMessage() {} - -func (x *License) ProtoReflect() protoreflect.Message { - mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[23] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use License.ProtoReflect.Descriptor instead. -func (*License) Descriptor() ([]byte, []int) { - return file_openapiv3_OpenAPIv3_proto_rawDescGZIP(), []int{23} -} - -func (x *License) GetName() string { - if x != nil { - return x.Name - } - return "" -} - -func (x *License) GetUrl() string { - if x != nil { - return x.Url - } - return "" -} - -func (x *License) GetSpecificationExtension() []*NamedAny { - if x != nil { - return x.SpecificationExtension - } - return nil -} - -// The `Link object` represents a possible design-time link for a response. The presence of a link does not guarantee the caller's ability to successfully invoke it, rather it provides a known relationship and traversal mechanism between responses and other operations. Unlike _dynamic_ links (i.e. links provided **in** the response payload), the OAS linking mechanism does not require link information in the runtime response. For computing links, and providing instructions to execute them, a runtime expression is used for accessing values in an operation and using them as parameters while invoking the linked operation. -type Link struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - OperationRef string `protobuf:"bytes,1,opt,name=operation_ref,json=operationRef,proto3" json:"operation_ref,omitempty"` - OperationId string `protobuf:"bytes,2,opt,name=operation_id,json=operationId,proto3" json:"operation_id,omitempty"` - Parameters *AnyOrExpression `protobuf:"bytes,3,opt,name=parameters,proto3" json:"parameters,omitempty"` - RequestBody *AnyOrExpression `protobuf:"bytes,4,opt,name=request_body,json=requestBody,proto3" json:"request_body,omitempty"` - Description string `protobuf:"bytes,5,opt,name=description,proto3" json:"description,omitempty"` - Server *Server `protobuf:"bytes,6,opt,name=server,proto3" json:"server,omitempty"` - SpecificationExtension []*NamedAny `protobuf:"bytes,7,rep,name=specification_extension,json=specificationExtension,proto3" json:"specification_extension,omitempty"` -} - -func (x *Link) Reset() { - *x = Link{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[24] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Link) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Link) ProtoMessage() {} - -func (x *Link) ProtoReflect() protoreflect.Message { - mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[24] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Link.ProtoReflect.Descriptor instead. -func (*Link) Descriptor() ([]byte, []int) { - return file_openapiv3_OpenAPIv3_proto_rawDescGZIP(), []int{24} -} - -func (x *Link) GetOperationRef() string { - if x != nil { - return x.OperationRef - } - return "" -} - -func (x *Link) GetOperationId() string { - if x != nil { - return x.OperationId - } - return "" -} - -func (x *Link) GetParameters() *AnyOrExpression { - if x != nil { - return x.Parameters - } - return nil -} - -func (x *Link) GetRequestBody() *AnyOrExpression { - if x != nil { - return x.RequestBody - } - return nil -} - -func (x *Link) GetDescription() string { - if x != nil { - return x.Description - } - return "" -} - -func (x *Link) GetServer() *Server { - if x != nil { - return x.Server - } - return nil -} - -func (x *Link) GetSpecificationExtension() []*NamedAny { - if x != nil { - return x.SpecificationExtension - } - return nil -} - -type LinkOrReference struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Types that are assignable to Oneof: - // *LinkOrReference_Link - // *LinkOrReference_Reference - Oneof isLinkOrReference_Oneof `protobuf_oneof:"oneof"` -} - -func (x *LinkOrReference) Reset() { - *x = LinkOrReference{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[25] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *LinkOrReference) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*LinkOrReference) ProtoMessage() {} - -func (x *LinkOrReference) ProtoReflect() protoreflect.Message { - mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[25] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use LinkOrReference.ProtoReflect.Descriptor instead. -func (*LinkOrReference) Descriptor() ([]byte, []int) { - return file_openapiv3_OpenAPIv3_proto_rawDescGZIP(), []int{25} -} - -func (m *LinkOrReference) GetOneof() isLinkOrReference_Oneof { - if m != nil { - return m.Oneof - } - return nil -} - -func (x *LinkOrReference) GetLink() *Link { - if x, ok := x.GetOneof().(*LinkOrReference_Link); ok { - return x.Link - } - return nil -} - -func (x *LinkOrReference) GetReference() *Reference { - if x, ok := x.GetOneof().(*LinkOrReference_Reference); ok { - return x.Reference - } - return nil -} - -type isLinkOrReference_Oneof interface { - isLinkOrReference_Oneof() -} - -type LinkOrReference_Link struct { - Link *Link `protobuf:"bytes,1,opt,name=link,proto3,oneof"` -} - -type LinkOrReference_Reference struct { - Reference *Reference `protobuf:"bytes,2,opt,name=reference,proto3,oneof"` -} - -func (*LinkOrReference_Link) isLinkOrReference_Oneof() {} - -func (*LinkOrReference_Reference) isLinkOrReference_Oneof() {} - -type LinksOrReferences struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - AdditionalProperties []*NamedLinkOrReference `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties,proto3" json:"additional_properties,omitempty"` -} - -func (x *LinksOrReferences) Reset() { - *x = LinksOrReferences{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[26] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *LinksOrReferences) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*LinksOrReferences) ProtoMessage() {} - -func (x *LinksOrReferences) ProtoReflect() protoreflect.Message { - mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[26] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use LinksOrReferences.ProtoReflect.Descriptor instead. -func (*LinksOrReferences) Descriptor() ([]byte, []int) { - return file_openapiv3_OpenAPIv3_proto_rawDescGZIP(), []int{26} -} - -func (x *LinksOrReferences) GetAdditionalProperties() []*NamedLinkOrReference { - if x != nil { - return x.AdditionalProperties - } - return nil -} - -// Each Media Type Object provides schema and examples for the media type identified by its key. -type MediaType struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Schema *SchemaOrReference `protobuf:"bytes,1,opt,name=schema,proto3" json:"schema,omitempty"` - Example *Any `protobuf:"bytes,2,opt,name=example,proto3" json:"example,omitempty"` - Examples *ExamplesOrReferences `protobuf:"bytes,3,opt,name=examples,proto3" json:"examples,omitempty"` - Encoding *Encodings `protobuf:"bytes,4,opt,name=encoding,proto3" json:"encoding,omitempty"` - SpecificationExtension []*NamedAny `protobuf:"bytes,5,rep,name=specification_extension,json=specificationExtension,proto3" json:"specification_extension,omitempty"` -} - -func (x *MediaType) Reset() { - *x = MediaType{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[27] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *MediaType) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*MediaType) ProtoMessage() {} - -func (x *MediaType) ProtoReflect() protoreflect.Message { - mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[27] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use MediaType.ProtoReflect.Descriptor instead. -func (*MediaType) Descriptor() ([]byte, []int) { - return file_openapiv3_OpenAPIv3_proto_rawDescGZIP(), []int{27} -} - -func (x *MediaType) GetSchema() *SchemaOrReference { - if x != nil { - return x.Schema - } - return nil -} - -func (x *MediaType) GetExample() *Any { - if x != nil { - return x.Example - } - return nil -} - -func (x *MediaType) GetExamples() *ExamplesOrReferences { - if x != nil { - return x.Examples - } - return nil -} - -func (x *MediaType) GetEncoding() *Encodings { - if x != nil { - return x.Encoding - } - return nil -} - -func (x *MediaType) GetSpecificationExtension() []*NamedAny { - if x != nil { - return x.SpecificationExtension - } - return nil -} - -type MediaTypes struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - AdditionalProperties []*NamedMediaType `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties,proto3" json:"additional_properties,omitempty"` -} - -func (x *MediaTypes) Reset() { - *x = MediaTypes{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[28] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *MediaTypes) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*MediaTypes) ProtoMessage() {} - -func (x *MediaTypes) ProtoReflect() protoreflect.Message { - mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[28] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use MediaTypes.ProtoReflect.Descriptor instead. -func (*MediaTypes) Descriptor() ([]byte, []int) { - return file_openapiv3_OpenAPIv3_proto_rawDescGZIP(), []int{28} -} - -func (x *MediaTypes) GetAdditionalProperties() []*NamedMediaType { - if x != nil { - return x.AdditionalProperties - } - return nil -} - -// Automatically-generated message used to represent maps of Any as ordered (name,value) pairs. -type NamedAny struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Map key - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - // Mapped value - Value *Any `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` -} - -func (x *NamedAny) Reset() { - *x = NamedAny{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[29] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *NamedAny) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*NamedAny) ProtoMessage() {} - -func (x *NamedAny) ProtoReflect() protoreflect.Message { - mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[29] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use NamedAny.ProtoReflect.Descriptor instead. -func (*NamedAny) Descriptor() ([]byte, []int) { - return file_openapiv3_OpenAPIv3_proto_rawDescGZIP(), []int{29} -} - -func (x *NamedAny) GetName() string { - if x != nil { - return x.Name - } - return "" -} - -func (x *NamedAny) GetValue() *Any { - if x != nil { - return x.Value - } - return nil -} - -// Automatically-generated message used to represent maps of CallbackOrReference as ordered (name,value) pairs. -type NamedCallbackOrReference struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Map key - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - // Mapped value - Value *CallbackOrReference `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` -} - -func (x *NamedCallbackOrReference) Reset() { - *x = NamedCallbackOrReference{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[30] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *NamedCallbackOrReference) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*NamedCallbackOrReference) ProtoMessage() {} - -func (x *NamedCallbackOrReference) ProtoReflect() protoreflect.Message { - mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[30] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use NamedCallbackOrReference.ProtoReflect.Descriptor instead. -func (*NamedCallbackOrReference) Descriptor() ([]byte, []int) { - return file_openapiv3_OpenAPIv3_proto_rawDescGZIP(), []int{30} -} - -func (x *NamedCallbackOrReference) GetName() string { - if x != nil { - return x.Name - } - return "" -} - -func (x *NamedCallbackOrReference) GetValue() *CallbackOrReference { - if x != nil { - return x.Value - } - return nil -} - -// Automatically-generated message used to represent maps of Encoding as ordered (name,value) pairs. -type NamedEncoding struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Map key - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - // Mapped value - Value *Encoding `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` -} - -func (x *NamedEncoding) Reset() { - *x = NamedEncoding{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[31] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *NamedEncoding) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*NamedEncoding) ProtoMessage() {} - -func (x *NamedEncoding) ProtoReflect() protoreflect.Message { - mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[31] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use NamedEncoding.ProtoReflect.Descriptor instead. -func (*NamedEncoding) Descriptor() ([]byte, []int) { - return file_openapiv3_OpenAPIv3_proto_rawDescGZIP(), []int{31} -} - -func (x *NamedEncoding) GetName() string { - if x != nil { - return x.Name - } - return "" -} - -func (x *NamedEncoding) GetValue() *Encoding { - if x != nil { - return x.Value - } - return nil -} - -// Automatically-generated message used to represent maps of ExampleOrReference as ordered (name,value) pairs. -type NamedExampleOrReference struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Map key - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - // Mapped value - Value *ExampleOrReference `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` -} - -func (x *NamedExampleOrReference) Reset() { - *x = NamedExampleOrReference{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[32] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *NamedExampleOrReference) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*NamedExampleOrReference) ProtoMessage() {} - -func (x *NamedExampleOrReference) ProtoReflect() protoreflect.Message { - mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[32] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use NamedExampleOrReference.ProtoReflect.Descriptor instead. -func (*NamedExampleOrReference) Descriptor() ([]byte, []int) { - return file_openapiv3_OpenAPIv3_proto_rawDescGZIP(), []int{32} -} - -func (x *NamedExampleOrReference) GetName() string { - if x != nil { - return x.Name - } - return "" -} - -func (x *NamedExampleOrReference) GetValue() *ExampleOrReference { - if x != nil { - return x.Value - } - return nil -} - -// Automatically-generated message used to represent maps of HeaderOrReference as ordered (name,value) pairs. -type NamedHeaderOrReference struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Map key - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - // Mapped value - Value *HeaderOrReference `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` -} - -func (x *NamedHeaderOrReference) Reset() { - *x = NamedHeaderOrReference{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[33] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *NamedHeaderOrReference) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*NamedHeaderOrReference) ProtoMessage() {} - -func (x *NamedHeaderOrReference) ProtoReflect() protoreflect.Message { - mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[33] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use NamedHeaderOrReference.ProtoReflect.Descriptor instead. -func (*NamedHeaderOrReference) Descriptor() ([]byte, []int) { - return file_openapiv3_OpenAPIv3_proto_rawDescGZIP(), []int{33} -} - -func (x *NamedHeaderOrReference) GetName() string { - if x != nil { - return x.Name - } - return "" -} - -func (x *NamedHeaderOrReference) GetValue() *HeaderOrReference { - if x != nil { - return x.Value - } - return nil -} - -// Automatically-generated message used to represent maps of LinkOrReference as ordered (name,value) pairs. -type NamedLinkOrReference struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Map key - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - // Mapped value - Value *LinkOrReference `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` -} - -func (x *NamedLinkOrReference) Reset() { - *x = NamedLinkOrReference{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[34] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *NamedLinkOrReference) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*NamedLinkOrReference) ProtoMessage() {} - -func (x *NamedLinkOrReference) ProtoReflect() protoreflect.Message { - mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[34] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use NamedLinkOrReference.ProtoReflect.Descriptor instead. -func (*NamedLinkOrReference) Descriptor() ([]byte, []int) { - return file_openapiv3_OpenAPIv3_proto_rawDescGZIP(), []int{34} -} - -func (x *NamedLinkOrReference) GetName() string { - if x != nil { - return x.Name - } - return "" -} - -func (x *NamedLinkOrReference) GetValue() *LinkOrReference { - if x != nil { - return x.Value - } - return nil -} - -// Automatically-generated message used to represent maps of MediaType as ordered (name,value) pairs. -type NamedMediaType struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Map key - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - // Mapped value - Value *MediaType `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` -} - -func (x *NamedMediaType) Reset() { - *x = NamedMediaType{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[35] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *NamedMediaType) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*NamedMediaType) ProtoMessage() {} - -func (x *NamedMediaType) ProtoReflect() protoreflect.Message { - mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[35] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use NamedMediaType.ProtoReflect.Descriptor instead. -func (*NamedMediaType) Descriptor() ([]byte, []int) { - return file_openapiv3_OpenAPIv3_proto_rawDescGZIP(), []int{35} -} - -func (x *NamedMediaType) GetName() string { - if x != nil { - return x.Name - } - return "" -} - -func (x *NamedMediaType) GetValue() *MediaType { - if x != nil { - return x.Value - } - return nil -} - -// Automatically-generated message used to represent maps of ParameterOrReference as ordered (name,value) pairs. -type NamedParameterOrReference struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Map key - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - // Mapped value - Value *ParameterOrReference `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` -} - -func (x *NamedParameterOrReference) Reset() { - *x = NamedParameterOrReference{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[36] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *NamedParameterOrReference) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*NamedParameterOrReference) ProtoMessage() {} - -func (x *NamedParameterOrReference) ProtoReflect() protoreflect.Message { - mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[36] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use NamedParameterOrReference.ProtoReflect.Descriptor instead. -func (*NamedParameterOrReference) Descriptor() ([]byte, []int) { - return file_openapiv3_OpenAPIv3_proto_rawDescGZIP(), []int{36} -} - -func (x *NamedParameterOrReference) GetName() string { - if x != nil { - return x.Name - } - return "" -} - -func (x *NamedParameterOrReference) GetValue() *ParameterOrReference { - if x != nil { - return x.Value - } - return nil -} - -// Automatically-generated message used to represent maps of PathItem as ordered (name,value) pairs. -type NamedPathItem struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Map key - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - // Mapped value - Value *PathItem `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` -} - -func (x *NamedPathItem) Reset() { - *x = NamedPathItem{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[37] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *NamedPathItem) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*NamedPathItem) ProtoMessage() {} - -func (x *NamedPathItem) ProtoReflect() protoreflect.Message { - mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[37] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use NamedPathItem.ProtoReflect.Descriptor instead. -func (*NamedPathItem) Descriptor() ([]byte, []int) { - return file_openapiv3_OpenAPIv3_proto_rawDescGZIP(), []int{37} -} - -func (x *NamedPathItem) GetName() string { - if x != nil { - return x.Name - } - return "" -} - -func (x *NamedPathItem) GetValue() *PathItem { - if x != nil { - return x.Value - } - return nil -} - -// Automatically-generated message used to represent maps of RequestBodyOrReference as ordered (name,value) pairs. -type NamedRequestBodyOrReference struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Map key - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - // Mapped value - Value *RequestBodyOrReference `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` -} - -func (x *NamedRequestBodyOrReference) Reset() { - *x = NamedRequestBodyOrReference{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[38] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *NamedRequestBodyOrReference) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*NamedRequestBodyOrReference) ProtoMessage() {} - -func (x *NamedRequestBodyOrReference) ProtoReflect() protoreflect.Message { - mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[38] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use NamedRequestBodyOrReference.ProtoReflect.Descriptor instead. -func (*NamedRequestBodyOrReference) Descriptor() ([]byte, []int) { - return file_openapiv3_OpenAPIv3_proto_rawDescGZIP(), []int{38} -} - -func (x *NamedRequestBodyOrReference) GetName() string { - if x != nil { - return x.Name - } - return "" -} - -func (x *NamedRequestBodyOrReference) GetValue() *RequestBodyOrReference { - if x != nil { - return x.Value - } - return nil -} - -// Automatically-generated message used to represent maps of ResponseOrReference as ordered (name,value) pairs. -type NamedResponseOrReference struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Map key - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - // Mapped value - Value *ResponseOrReference `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` -} - -func (x *NamedResponseOrReference) Reset() { - *x = NamedResponseOrReference{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[39] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *NamedResponseOrReference) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*NamedResponseOrReference) ProtoMessage() {} - -func (x *NamedResponseOrReference) ProtoReflect() protoreflect.Message { - mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[39] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use NamedResponseOrReference.ProtoReflect.Descriptor instead. -func (*NamedResponseOrReference) Descriptor() ([]byte, []int) { - return file_openapiv3_OpenAPIv3_proto_rawDescGZIP(), []int{39} -} - -func (x *NamedResponseOrReference) GetName() string { - if x != nil { - return x.Name - } - return "" -} - -func (x *NamedResponseOrReference) GetValue() *ResponseOrReference { - if x != nil { - return x.Value - } - return nil -} - -// Automatically-generated message used to represent maps of SchemaOrReference as ordered (name,value) pairs. -type NamedSchemaOrReference struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Map key - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - // Mapped value - Value *SchemaOrReference `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` -} - -func (x *NamedSchemaOrReference) Reset() { - *x = NamedSchemaOrReference{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[40] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *NamedSchemaOrReference) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*NamedSchemaOrReference) ProtoMessage() {} - -func (x *NamedSchemaOrReference) ProtoReflect() protoreflect.Message { - mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[40] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use NamedSchemaOrReference.ProtoReflect.Descriptor instead. -func (*NamedSchemaOrReference) Descriptor() ([]byte, []int) { - return file_openapiv3_OpenAPIv3_proto_rawDescGZIP(), []int{40} -} - -func (x *NamedSchemaOrReference) GetName() string { - if x != nil { - return x.Name - } - return "" -} - -func (x *NamedSchemaOrReference) GetValue() *SchemaOrReference { - if x != nil { - return x.Value - } - return nil -} - -// Automatically-generated message used to represent maps of SecuritySchemeOrReference as ordered (name,value) pairs. -type NamedSecuritySchemeOrReference struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Map key - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - // Mapped value - Value *SecuritySchemeOrReference `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` -} - -func (x *NamedSecuritySchemeOrReference) Reset() { - *x = NamedSecuritySchemeOrReference{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[41] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *NamedSecuritySchemeOrReference) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*NamedSecuritySchemeOrReference) ProtoMessage() {} - -func (x *NamedSecuritySchemeOrReference) ProtoReflect() protoreflect.Message { - mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[41] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use NamedSecuritySchemeOrReference.ProtoReflect.Descriptor instead. -func (*NamedSecuritySchemeOrReference) Descriptor() ([]byte, []int) { - return file_openapiv3_OpenAPIv3_proto_rawDescGZIP(), []int{41} -} - -func (x *NamedSecuritySchemeOrReference) GetName() string { - if x != nil { - return x.Name - } - return "" -} - -func (x *NamedSecuritySchemeOrReference) GetValue() *SecuritySchemeOrReference { - if x != nil { - return x.Value - } - return nil -} - -// Automatically-generated message used to represent maps of ServerVariable as ordered (name,value) pairs. -type NamedServerVariable struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Map key - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - // Mapped value - Value *ServerVariable `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` -} - -func (x *NamedServerVariable) Reset() { - *x = NamedServerVariable{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[42] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *NamedServerVariable) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*NamedServerVariable) ProtoMessage() {} - -func (x *NamedServerVariable) ProtoReflect() protoreflect.Message { - mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[42] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use NamedServerVariable.ProtoReflect.Descriptor instead. -func (*NamedServerVariable) Descriptor() ([]byte, []int) { - return file_openapiv3_OpenAPIv3_proto_rawDescGZIP(), []int{42} -} - -func (x *NamedServerVariable) GetName() string { - if x != nil { - return x.Name - } - return "" -} - -func (x *NamedServerVariable) GetValue() *ServerVariable { - if x != nil { - return x.Value - } - return nil -} - -// Automatically-generated message used to represent maps of string as ordered (name,value) pairs. -type NamedString struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Map key - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - // Mapped value - Value string `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` -} - -func (x *NamedString) Reset() { - *x = NamedString{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[43] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *NamedString) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*NamedString) ProtoMessage() {} - -func (x *NamedString) ProtoReflect() protoreflect.Message { - mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[43] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use NamedString.ProtoReflect.Descriptor instead. -func (*NamedString) Descriptor() ([]byte, []int) { - return file_openapiv3_OpenAPIv3_proto_rawDescGZIP(), []int{43} -} - -func (x *NamedString) GetName() string { - if x != nil { - return x.Name - } - return "" -} - -func (x *NamedString) GetValue() string { - if x != nil { - return x.Value - } - return "" -} - -// Automatically-generated message used to represent maps of StringArray as ordered (name,value) pairs. -type NamedStringArray struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Map key - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - // Mapped value - Value *StringArray `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` -} - -func (x *NamedStringArray) Reset() { - *x = NamedStringArray{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[44] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *NamedStringArray) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*NamedStringArray) ProtoMessage() {} - -func (x *NamedStringArray) ProtoReflect() protoreflect.Message { - mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[44] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use NamedStringArray.ProtoReflect.Descriptor instead. -func (*NamedStringArray) Descriptor() ([]byte, []int) { - return file_openapiv3_OpenAPIv3_proto_rawDescGZIP(), []int{44} -} - -func (x *NamedStringArray) GetName() string { - if x != nil { - return x.Name - } - return "" -} - -func (x *NamedStringArray) GetValue() *StringArray { - if x != nil { - return x.Value - } - return nil -} - -// Configuration details for a supported OAuth Flow -type OauthFlow struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - AuthorizationUrl string `protobuf:"bytes,1,opt,name=authorization_url,json=authorizationUrl,proto3" json:"authorization_url,omitempty"` - TokenUrl string `protobuf:"bytes,2,opt,name=token_url,json=tokenUrl,proto3" json:"token_url,omitempty"` - RefreshUrl string `protobuf:"bytes,3,opt,name=refresh_url,json=refreshUrl,proto3" json:"refresh_url,omitempty"` - Scopes *Strings `protobuf:"bytes,4,opt,name=scopes,proto3" json:"scopes,omitempty"` - SpecificationExtension []*NamedAny `protobuf:"bytes,5,rep,name=specification_extension,json=specificationExtension,proto3" json:"specification_extension,omitempty"` -} - -func (x *OauthFlow) Reset() { - *x = OauthFlow{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[45] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *OauthFlow) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*OauthFlow) ProtoMessage() {} - -func (x *OauthFlow) ProtoReflect() protoreflect.Message { - mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[45] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use OauthFlow.ProtoReflect.Descriptor instead. -func (*OauthFlow) Descriptor() ([]byte, []int) { - return file_openapiv3_OpenAPIv3_proto_rawDescGZIP(), []int{45} -} - -func (x *OauthFlow) GetAuthorizationUrl() string { - if x != nil { - return x.AuthorizationUrl - } - return "" -} - -func (x *OauthFlow) GetTokenUrl() string { - if x != nil { - return x.TokenUrl - } - return "" -} - -func (x *OauthFlow) GetRefreshUrl() string { - if x != nil { - return x.RefreshUrl - } - return "" -} - -func (x *OauthFlow) GetScopes() *Strings { - if x != nil { - return x.Scopes - } - return nil -} - -func (x *OauthFlow) GetSpecificationExtension() []*NamedAny { - if x != nil { - return x.SpecificationExtension - } - return nil -} - -// Allows configuration of the supported OAuth Flows. -type OauthFlows struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Implicit *OauthFlow `protobuf:"bytes,1,opt,name=implicit,proto3" json:"implicit,omitempty"` - Password *OauthFlow `protobuf:"bytes,2,opt,name=password,proto3" json:"password,omitempty"` - ClientCredentials *OauthFlow `protobuf:"bytes,3,opt,name=client_credentials,json=clientCredentials,proto3" json:"client_credentials,omitempty"` - AuthorizationCode *OauthFlow `protobuf:"bytes,4,opt,name=authorization_code,json=authorizationCode,proto3" json:"authorization_code,omitempty"` - SpecificationExtension []*NamedAny `protobuf:"bytes,5,rep,name=specification_extension,json=specificationExtension,proto3" json:"specification_extension,omitempty"` -} - -func (x *OauthFlows) Reset() { - *x = OauthFlows{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[46] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *OauthFlows) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*OauthFlows) ProtoMessage() {} - -func (x *OauthFlows) ProtoReflect() protoreflect.Message { - mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[46] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use OauthFlows.ProtoReflect.Descriptor instead. -func (*OauthFlows) Descriptor() ([]byte, []int) { - return file_openapiv3_OpenAPIv3_proto_rawDescGZIP(), []int{46} -} - -func (x *OauthFlows) GetImplicit() *OauthFlow { - if x != nil { - return x.Implicit - } - return nil -} - -func (x *OauthFlows) GetPassword() *OauthFlow { - if x != nil { - return x.Password - } - return nil -} - -func (x *OauthFlows) GetClientCredentials() *OauthFlow { - if x != nil { - return x.ClientCredentials - } - return nil -} - -func (x *OauthFlows) GetAuthorizationCode() *OauthFlow { - if x != nil { - return x.AuthorizationCode - } - return nil -} - -func (x *OauthFlows) GetSpecificationExtension() []*NamedAny { - if x != nil { - return x.SpecificationExtension - } - return nil -} - -type Object struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - AdditionalProperties []*NamedAny `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties,proto3" json:"additional_properties,omitempty"` -} - -func (x *Object) Reset() { - *x = Object{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[47] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Object) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Object) ProtoMessage() {} - -func (x *Object) ProtoReflect() protoreflect.Message { - mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[47] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Object.ProtoReflect.Descriptor instead. -func (*Object) Descriptor() ([]byte, []int) { - return file_openapiv3_OpenAPIv3_proto_rawDescGZIP(), []int{47} -} - -func (x *Object) GetAdditionalProperties() []*NamedAny { - if x != nil { - return x.AdditionalProperties - } - return nil -} - -// Describes a single API operation on a path. -type Operation struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Tags []string `protobuf:"bytes,1,rep,name=tags,proto3" json:"tags,omitempty"` - Summary string `protobuf:"bytes,2,opt,name=summary,proto3" json:"summary,omitempty"` - Description string `protobuf:"bytes,3,opt,name=description,proto3" json:"description,omitempty"` - ExternalDocs *ExternalDocs `protobuf:"bytes,4,opt,name=external_docs,json=externalDocs,proto3" json:"external_docs,omitempty"` - OperationId string `protobuf:"bytes,5,opt,name=operation_id,json=operationId,proto3" json:"operation_id,omitempty"` - Parameters []*ParameterOrReference `protobuf:"bytes,6,rep,name=parameters,proto3" json:"parameters,omitempty"` - RequestBody *RequestBodyOrReference `protobuf:"bytes,7,opt,name=request_body,json=requestBody,proto3" json:"request_body,omitempty"` - Responses *Responses `protobuf:"bytes,8,opt,name=responses,proto3" json:"responses,omitempty"` - Callbacks *CallbacksOrReferences `protobuf:"bytes,9,opt,name=callbacks,proto3" json:"callbacks,omitempty"` - Deprecated bool `protobuf:"varint,10,opt,name=deprecated,proto3" json:"deprecated,omitempty"` - Security []*SecurityRequirement `protobuf:"bytes,11,rep,name=security,proto3" json:"security,omitempty"` - Servers []*Server `protobuf:"bytes,12,rep,name=servers,proto3" json:"servers,omitempty"` - SpecificationExtension []*NamedAny `protobuf:"bytes,13,rep,name=specification_extension,json=specificationExtension,proto3" json:"specification_extension,omitempty"` -} - -func (x *Operation) Reset() { - *x = Operation{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[48] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Operation) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Operation) ProtoMessage() {} - -func (x *Operation) ProtoReflect() protoreflect.Message { - mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[48] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Operation.ProtoReflect.Descriptor instead. -func (*Operation) Descriptor() ([]byte, []int) { - return file_openapiv3_OpenAPIv3_proto_rawDescGZIP(), []int{48} -} - -func (x *Operation) GetTags() []string { - if x != nil { - return x.Tags - } - return nil -} - -func (x *Operation) GetSummary() string { - if x != nil { - return x.Summary - } - return "" -} - -func (x *Operation) GetDescription() string { - if x != nil { - return x.Description - } - return "" -} - -func (x *Operation) GetExternalDocs() *ExternalDocs { - if x != nil { - return x.ExternalDocs - } - return nil -} - -func (x *Operation) GetOperationId() string { - if x != nil { - return x.OperationId - } - return "" -} - -func (x *Operation) GetParameters() []*ParameterOrReference { - if x != nil { - return x.Parameters - } - return nil -} - -func (x *Operation) GetRequestBody() *RequestBodyOrReference { - if x != nil { - return x.RequestBody - } - return nil -} - -func (x *Operation) GetResponses() *Responses { - if x != nil { - return x.Responses - } - return nil -} - -func (x *Operation) GetCallbacks() *CallbacksOrReferences { - if x != nil { - return x.Callbacks - } - return nil -} - -func (x *Operation) GetDeprecated() bool { - if x != nil { - return x.Deprecated - } - return false -} - -func (x *Operation) GetSecurity() []*SecurityRequirement { - if x != nil { - return x.Security - } - return nil -} - -func (x *Operation) GetServers() []*Server { - if x != nil { - return x.Servers - } - return nil -} - -func (x *Operation) GetSpecificationExtension() []*NamedAny { - if x != nil { - return x.SpecificationExtension - } - return nil -} - -// Describes a single operation parameter. A unique parameter is defined by a combination of a name and location. -type Parameter struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - In string `protobuf:"bytes,2,opt,name=in,proto3" json:"in,omitempty"` - Description string `protobuf:"bytes,3,opt,name=description,proto3" json:"description,omitempty"` - Required bool `protobuf:"varint,4,opt,name=required,proto3" json:"required,omitempty"` - Deprecated bool `protobuf:"varint,5,opt,name=deprecated,proto3" json:"deprecated,omitempty"` - AllowEmptyValue bool `protobuf:"varint,6,opt,name=allow_empty_value,json=allowEmptyValue,proto3" json:"allow_empty_value,omitempty"` - Style string `protobuf:"bytes,7,opt,name=style,proto3" json:"style,omitempty"` - Explode bool `protobuf:"varint,8,opt,name=explode,proto3" json:"explode,omitempty"` - AllowReserved bool `protobuf:"varint,9,opt,name=allow_reserved,json=allowReserved,proto3" json:"allow_reserved,omitempty"` - Schema *SchemaOrReference `protobuf:"bytes,10,opt,name=schema,proto3" json:"schema,omitempty"` - Example *Any `protobuf:"bytes,11,opt,name=example,proto3" json:"example,omitempty"` - Examples *ExamplesOrReferences `protobuf:"bytes,12,opt,name=examples,proto3" json:"examples,omitempty"` - Content *MediaTypes `protobuf:"bytes,13,opt,name=content,proto3" json:"content,omitempty"` - SpecificationExtension []*NamedAny `protobuf:"bytes,14,rep,name=specification_extension,json=specificationExtension,proto3" json:"specification_extension,omitempty"` -} - -func (x *Parameter) Reset() { - *x = Parameter{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[49] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Parameter) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Parameter) ProtoMessage() {} - -func (x *Parameter) ProtoReflect() protoreflect.Message { - mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[49] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Parameter.ProtoReflect.Descriptor instead. -func (*Parameter) Descriptor() ([]byte, []int) { - return file_openapiv3_OpenAPIv3_proto_rawDescGZIP(), []int{49} -} - -func (x *Parameter) GetName() string { - if x != nil { - return x.Name - } - return "" -} - -func (x *Parameter) GetIn() string { - if x != nil { - return x.In - } - return "" -} - -func (x *Parameter) GetDescription() string { - if x != nil { - return x.Description - } - return "" -} - -func (x *Parameter) GetRequired() bool { - if x != nil { - return x.Required - } - return false -} - -func (x *Parameter) GetDeprecated() bool { - if x != nil { - return x.Deprecated - } - return false -} - -func (x *Parameter) GetAllowEmptyValue() bool { - if x != nil { - return x.AllowEmptyValue - } - return false -} - -func (x *Parameter) GetStyle() string { - if x != nil { - return x.Style - } - return "" -} - -func (x *Parameter) GetExplode() bool { - if x != nil { - return x.Explode - } - return false -} - -func (x *Parameter) GetAllowReserved() bool { - if x != nil { - return x.AllowReserved - } - return false -} - -func (x *Parameter) GetSchema() *SchemaOrReference { - if x != nil { - return x.Schema - } - return nil -} - -func (x *Parameter) GetExample() *Any { - if x != nil { - return x.Example - } - return nil -} - -func (x *Parameter) GetExamples() *ExamplesOrReferences { - if x != nil { - return x.Examples - } - return nil -} - -func (x *Parameter) GetContent() *MediaTypes { - if x != nil { - return x.Content - } - return nil -} - -func (x *Parameter) GetSpecificationExtension() []*NamedAny { - if x != nil { - return x.SpecificationExtension - } - return nil -} - -type ParameterOrReference struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Types that are assignable to Oneof: - // *ParameterOrReference_Parameter - // *ParameterOrReference_Reference - Oneof isParameterOrReference_Oneof `protobuf_oneof:"oneof"` -} - -func (x *ParameterOrReference) Reset() { - *x = ParameterOrReference{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[50] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ParameterOrReference) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ParameterOrReference) ProtoMessage() {} - -func (x *ParameterOrReference) ProtoReflect() protoreflect.Message { - mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[50] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ParameterOrReference.ProtoReflect.Descriptor instead. -func (*ParameterOrReference) Descriptor() ([]byte, []int) { - return file_openapiv3_OpenAPIv3_proto_rawDescGZIP(), []int{50} -} - -func (m *ParameterOrReference) GetOneof() isParameterOrReference_Oneof { - if m != nil { - return m.Oneof - } - return nil -} - -func (x *ParameterOrReference) GetParameter() *Parameter { - if x, ok := x.GetOneof().(*ParameterOrReference_Parameter); ok { - return x.Parameter - } - return nil -} - -func (x *ParameterOrReference) GetReference() *Reference { - if x, ok := x.GetOneof().(*ParameterOrReference_Reference); ok { - return x.Reference - } - return nil -} - -type isParameterOrReference_Oneof interface { - isParameterOrReference_Oneof() -} - -type ParameterOrReference_Parameter struct { - Parameter *Parameter `protobuf:"bytes,1,opt,name=parameter,proto3,oneof"` -} - -type ParameterOrReference_Reference struct { - Reference *Reference `protobuf:"bytes,2,opt,name=reference,proto3,oneof"` -} - -func (*ParameterOrReference_Parameter) isParameterOrReference_Oneof() {} - -func (*ParameterOrReference_Reference) isParameterOrReference_Oneof() {} - -type ParametersOrReferences struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - AdditionalProperties []*NamedParameterOrReference `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties,proto3" json:"additional_properties,omitempty"` -} - -func (x *ParametersOrReferences) Reset() { - *x = ParametersOrReferences{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[51] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ParametersOrReferences) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ParametersOrReferences) ProtoMessage() {} - -func (x *ParametersOrReferences) ProtoReflect() protoreflect.Message { - mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[51] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ParametersOrReferences.ProtoReflect.Descriptor instead. -func (*ParametersOrReferences) Descriptor() ([]byte, []int) { - return file_openapiv3_OpenAPIv3_proto_rawDescGZIP(), []int{51} -} - -func (x *ParametersOrReferences) GetAdditionalProperties() []*NamedParameterOrReference { - if x != nil { - return x.AdditionalProperties - } - return nil -} - -// Describes the operations available on a single path. A Path Item MAY be empty, due to ACL constraints. The path itself is still exposed to the documentation viewer but they will not know which operations and parameters are available. -type PathItem struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - XRef string `protobuf:"bytes,1,opt,name=_ref,json=Ref,proto3" json:"_ref,omitempty"` - Summary string `protobuf:"bytes,2,opt,name=summary,proto3" json:"summary,omitempty"` - Description string `protobuf:"bytes,3,opt,name=description,proto3" json:"description,omitempty"` - Get *Operation `protobuf:"bytes,4,opt,name=get,proto3" json:"get,omitempty"` - Put *Operation `protobuf:"bytes,5,opt,name=put,proto3" json:"put,omitempty"` - Post *Operation `protobuf:"bytes,6,opt,name=post,proto3" json:"post,omitempty"` - Delete *Operation `protobuf:"bytes,7,opt,name=delete,proto3" json:"delete,omitempty"` - Options *Operation `protobuf:"bytes,8,opt,name=options,proto3" json:"options,omitempty"` - Head *Operation `protobuf:"bytes,9,opt,name=head,proto3" json:"head,omitempty"` - Patch *Operation `protobuf:"bytes,10,opt,name=patch,proto3" json:"patch,omitempty"` - Trace *Operation `protobuf:"bytes,11,opt,name=trace,proto3" json:"trace,omitempty"` - Servers []*Server `protobuf:"bytes,12,rep,name=servers,proto3" json:"servers,omitempty"` - Parameters []*ParameterOrReference `protobuf:"bytes,13,rep,name=parameters,proto3" json:"parameters,omitempty"` - SpecificationExtension []*NamedAny `protobuf:"bytes,14,rep,name=specification_extension,json=specificationExtension,proto3" json:"specification_extension,omitempty"` -} - -func (x *PathItem) Reset() { - *x = PathItem{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[52] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *PathItem) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*PathItem) ProtoMessage() {} - -func (x *PathItem) ProtoReflect() protoreflect.Message { - mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[52] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use PathItem.ProtoReflect.Descriptor instead. -func (*PathItem) Descriptor() ([]byte, []int) { - return file_openapiv3_OpenAPIv3_proto_rawDescGZIP(), []int{52} -} - -func (x *PathItem) GetXRef() string { - if x != nil { - return x.XRef - } - return "" -} - -func (x *PathItem) GetSummary() string { - if x != nil { - return x.Summary - } - return "" -} - -func (x *PathItem) GetDescription() string { - if x != nil { - return x.Description - } - return "" -} - -func (x *PathItem) GetGet() *Operation { - if x != nil { - return x.Get - } - return nil -} - -func (x *PathItem) GetPut() *Operation { - if x != nil { - return x.Put - } - return nil -} - -func (x *PathItem) GetPost() *Operation { - if x != nil { - return x.Post - } - return nil -} - -func (x *PathItem) GetDelete() *Operation { - if x != nil { - return x.Delete - } - return nil -} - -func (x *PathItem) GetOptions() *Operation { - if x != nil { - return x.Options - } - return nil -} - -func (x *PathItem) GetHead() *Operation { - if x != nil { - return x.Head - } - return nil -} - -func (x *PathItem) GetPatch() *Operation { - if x != nil { - return x.Patch - } - return nil -} - -func (x *PathItem) GetTrace() *Operation { - if x != nil { - return x.Trace - } - return nil -} - -func (x *PathItem) GetServers() []*Server { - if x != nil { - return x.Servers - } - return nil -} - -func (x *PathItem) GetParameters() []*ParameterOrReference { - if x != nil { - return x.Parameters - } - return nil -} - -func (x *PathItem) GetSpecificationExtension() []*NamedAny { - if x != nil { - return x.SpecificationExtension - } - return nil -} - -// Holds the relative paths to the individual endpoints and their operations. The path is appended to the URL from the `Server Object` in order to construct the full URL. The Paths MAY be empty, due to ACL constraints. -type Paths struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Path []*NamedPathItem `protobuf:"bytes,1,rep,name=path,proto3" json:"path,omitempty"` - SpecificationExtension []*NamedAny `protobuf:"bytes,2,rep,name=specification_extension,json=specificationExtension,proto3" json:"specification_extension,omitempty"` -} - -func (x *Paths) Reset() { - *x = Paths{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[53] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Paths) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Paths) ProtoMessage() {} - -func (x *Paths) ProtoReflect() protoreflect.Message { - mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[53] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Paths.ProtoReflect.Descriptor instead. -func (*Paths) Descriptor() ([]byte, []int) { - return file_openapiv3_OpenAPIv3_proto_rawDescGZIP(), []int{53} -} - -func (x *Paths) GetPath() []*NamedPathItem { - if x != nil { - return x.Path - } - return nil -} - -func (x *Paths) GetSpecificationExtension() []*NamedAny { - if x != nil { - return x.SpecificationExtension - } - return nil -} - -type Properties struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - AdditionalProperties []*NamedSchemaOrReference `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties,proto3" json:"additional_properties,omitempty"` -} - -func (x *Properties) Reset() { - *x = Properties{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[54] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Properties) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Properties) ProtoMessage() {} - -func (x *Properties) ProtoReflect() protoreflect.Message { - mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[54] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Properties.ProtoReflect.Descriptor instead. -func (*Properties) Descriptor() ([]byte, []int) { - return file_openapiv3_OpenAPIv3_proto_rawDescGZIP(), []int{54} -} - -func (x *Properties) GetAdditionalProperties() []*NamedSchemaOrReference { - if x != nil { - return x.AdditionalProperties - } - return nil -} - -// A simple object to allow referencing other components in the specification, internally and externally. The Reference Object is defined by JSON Reference and follows the same structure, behavior and rules. For this specification, reference resolution is accomplished as defined by the JSON Reference specification and not by the JSON Schema specification. -type Reference struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - XRef string `protobuf:"bytes,1,opt,name=_ref,json=Ref,proto3" json:"_ref,omitempty"` - Summary string `protobuf:"bytes,2,opt,name=summary,proto3" json:"summary,omitempty"` - Description string `protobuf:"bytes,3,opt,name=description,proto3" json:"description,omitempty"` -} - -func (x *Reference) Reset() { - *x = Reference{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[55] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Reference) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Reference) ProtoMessage() {} - -func (x *Reference) ProtoReflect() protoreflect.Message { - mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[55] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Reference.ProtoReflect.Descriptor instead. -func (*Reference) Descriptor() ([]byte, []int) { - return file_openapiv3_OpenAPIv3_proto_rawDescGZIP(), []int{55} -} - -func (x *Reference) GetXRef() string { - if x != nil { - return x.XRef - } - return "" -} - -func (x *Reference) GetSummary() string { - if x != nil { - return x.Summary - } - return "" -} - -func (x *Reference) GetDescription() string { - if x != nil { - return x.Description - } - return "" -} - -type RequestBodiesOrReferences struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - AdditionalProperties []*NamedRequestBodyOrReference `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties,proto3" json:"additional_properties,omitempty"` -} - -func (x *RequestBodiesOrReferences) Reset() { - *x = RequestBodiesOrReferences{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[56] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *RequestBodiesOrReferences) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*RequestBodiesOrReferences) ProtoMessage() {} - -func (x *RequestBodiesOrReferences) ProtoReflect() protoreflect.Message { - mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[56] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use RequestBodiesOrReferences.ProtoReflect.Descriptor instead. -func (*RequestBodiesOrReferences) Descriptor() ([]byte, []int) { - return file_openapiv3_OpenAPIv3_proto_rawDescGZIP(), []int{56} -} - -func (x *RequestBodiesOrReferences) GetAdditionalProperties() []*NamedRequestBodyOrReference { - if x != nil { - return x.AdditionalProperties - } - return nil -} - -// Describes a single request body. -type RequestBody struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Description string `protobuf:"bytes,1,opt,name=description,proto3" json:"description,omitempty"` - Content *MediaTypes `protobuf:"bytes,2,opt,name=content,proto3" json:"content,omitempty"` - Required bool `protobuf:"varint,3,opt,name=required,proto3" json:"required,omitempty"` - SpecificationExtension []*NamedAny `protobuf:"bytes,4,rep,name=specification_extension,json=specificationExtension,proto3" json:"specification_extension,omitempty"` -} - -func (x *RequestBody) Reset() { - *x = RequestBody{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[57] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *RequestBody) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*RequestBody) ProtoMessage() {} - -func (x *RequestBody) ProtoReflect() protoreflect.Message { - mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[57] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use RequestBody.ProtoReflect.Descriptor instead. -func (*RequestBody) Descriptor() ([]byte, []int) { - return file_openapiv3_OpenAPIv3_proto_rawDescGZIP(), []int{57} -} - -func (x *RequestBody) GetDescription() string { - if x != nil { - return x.Description - } - return "" -} - -func (x *RequestBody) GetContent() *MediaTypes { - if x != nil { - return x.Content - } - return nil -} - -func (x *RequestBody) GetRequired() bool { - if x != nil { - return x.Required - } - return false -} - -func (x *RequestBody) GetSpecificationExtension() []*NamedAny { - if x != nil { - return x.SpecificationExtension - } - return nil -} - -type RequestBodyOrReference struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Types that are assignable to Oneof: - // *RequestBodyOrReference_RequestBody - // *RequestBodyOrReference_Reference - Oneof isRequestBodyOrReference_Oneof `protobuf_oneof:"oneof"` -} - -func (x *RequestBodyOrReference) Reset() { - *x = RequestBodyOrReference{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[58] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *RequestBodyOrReference) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*RequestBodyOrReference) ProtoMessage() {} - -func (x *RequestBodyOrReference) ProtoReflect() protoreflect.Message { - mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[58] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use RequestBodyOrReference.ProtoReflect.Descriptor instead. -func (*RequestBodyOrReference) Descriptor() ([]byte, []int) { - return file_openapiv3_OpenAPIv3_proto_rawDescGZIP(), []int{58} -} - -func (m *RequestBodyOrReference) GetOneof() isRequestBodyOrReference_Oneof { - if m != nil { - return m.Oneof - } - return nil -} - -func (x *RequestBodyOrReference) GetRequestBody() *RequestBody { - if x, ok := x.GetOneof().(*RequestBodyOrReference_RequestBody); ok { - return x.RequestBody - } - return nil -} - -func (x *RequestBodyOrReference) GetReference() *Reference { - if x, ok := x.GetOneof().(*RequestBodyOrReference_Reference); ok { - return x.Reference - } - return nil -} - -type isRequestBodyOrReference_Oneof interface { - isRequestBodyOrReference_Oneof() -} - -type RequestBodyOrReference_RequestBody struct { - RequestBody *RequestBody `protobuf:"bytes,1,opt,name=request_body,json=requestBody,proto3,oneof"` -} - -type RequestBodyOrReference_Reference struct { - Reference *Reference `protobuf:"bytes,2,opt,name=reference,proto3,oneof"` -} - -func (*RequestBodyOrReference_RequestBody) isRequestBodyOrReference_Oneof() {} - -func (*RequestBodyOrReference_Reference) isRequestBodyOrReference_Oneof() {} - -// Describes a single response from an API Operation, including design-time, static `links` to operations based on the response. -type Response struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Description string `protobuf:"bytes,1,opt,name=description,proto3" json:"description,omitempty"` - Headers *HeadersOrReferences `protobuf:"bytes,2,opt,name=headers,proto3" json:"headers,omitempty"` - Content *MediaTypes `protobuf:"bytes,3,opt,name=content,proto3" json:"content,omitempty"` - Links *LinksOrReferences `protobuf:"bytes,4,opt,name=links,proto3" json:"links,omitempty"` - SpecificationExtension []*NamedAny `protobuf:"bytes,5,rep,name=specification_extension,json=specificationExtension,proto3" json:"specification_extension,omitempty"` -} - -func (x *Response) Reset() { - *x = Response{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[59] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Response) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Response) ProtoMessage() {} - -func (x *Response) ProtoReflect() protoreflect.Message { - mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[59] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Response.ProtoReflect.Descriptor instead. -func (*Response) Descriptor() ([]byte, []int) { - return file_openapiv3_OpenAPIv3_proto_rawDescGZIP(), []int{59} -} - -func (x *Response) GetDescription() string { - if x != nil { - return x.Description - } - return "" -} - -func (x *Response) GetHeaders() *HeadersOrReferences { - if x != nil { - return x.Headers - } - return nil -} - -func (x *Response) GetContent() *MediaTypes { - if x != nil { - return x.Content - } - return nil -} - -func (x *Response) GetLinks() *LinksOrReferences { - if x != nil { - return x.Links - } - return nil -} - -func (x *Response) GetSpecificationExtension() []*NamedAny { - if x != nil { - return x.SpecificationExtension - } - return nil -} - -type ResponseOrReference struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Types that are assignable to Oneof: - // *ResponseOrReference_Response - // *ResponseOrReference_Reference - Oneof isResponseOrReference_Oneof `protobuf_oneof:"oneof"` -} - -func (x *ResponseOrReference) Reset() { - *x = ResponseOrReference{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[60] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ResponseOrReference) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ResponseOrReference) ProtoMessage() {} - -func (x *ResponseOrReference) ProtoReflect() protoreflect.Message { - mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[60] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ResponseOrReference.ProtoReflect.Descriptor instead. -func (*ResponseOrReference) Descriptor() ([]byte, []int) { - return file_openapiv3_OpenAPIv3_proto_rawDescGZIP(), []int{60} -} - -func (m *ResponseOrReference) GetOneof() isResponseOrReference_Oneof { - if m != nil { - return m.Oneof - } - return nil -} - -func (x *ResponseOrReference) GetResponse() *Response { - if x, ok := x.GetOneof().(*ResponseOrReference_Response); ok { - return x.Response - } - return nil -} - -func (x *ResponseOrReference) GetReference() *Reference { - if x, ok := x.GetOneof().(*ResponseOrReference_Reference); ok { - return x.Reference - } - return nil -} - -type isResponseOrReference_Oneof interface { - isResponseOrReference_Oneof() -} - -type ResponseOrReference_Response struct { - Response *Response `protobuf:"bytes,1,opt,name=response,proto3,oneof"` -} - -type ResponseOrReference_Reference struct { - Reference *Reference `protobuf:"bytes,2,opt,name=reference,proto3,oneof"` -} - -func (*ResponseOrReference_Response) isResponseOrReference_Oneof() {} - -func (*ResponseOrReference_Reference) isResponseOrReference_Oneof() {} - -// A container for the expected responses of an operation. The container maps a HTTP response code to the expected response. The documentation is not necessarily expected to cover all possible HTTP response codes because they may not be known in advance. However, documentation is expected to cover a successful operation response and any known errors. The `default` MAY be used as a default response object for all HTTP codes that are not covered individually by the specification. The `Responses Object` MUST contain at least one response code, and it SHOULD be the response for a successful operation call. -type Responses struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Default *ResponseOrReference `protobuf:"bytes,1,opt,name=default,proto3" json:"default,omitempty"` - ResponseOrReference []*NamedResponseOrReference `protobuf:"bytes,2,rep,name=response_or_reference,json=responseOrReference,proto3" json:"response_or_reference,omitempty"` - SpecificationExtension []*NamedAny `protobuf:"bytes,3,rep,name=specification_extension,json=specificationExtension,proto3" json:"specification_extension,omitempty"` -} - -func (x *Responses) Reset() { - *x = Responses{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[61] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Responses) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Responses) ProtoMessage() {} - -func (x *Responses) ProtoReflect() protoreflect.Message { - mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[61] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Responses.ProtoReflect.Descriptor instead. -func (*Responses) Descriptor() ([]byte, []int) { - return file_openapiv3_OpenAPIv3_proto_rawDescGZIP(), []int{61} -} - -func (x *Responses) GetDefault() *ResponseOrReference { - if x != nil { - return x.Default - } - return nil -} - -func (x *Responses) GetResponseOrReference() []*NamedResponseOrReference { - if x != nil { - return x.ResponseOrReference - } - return nil -} - -func (x *Responses) GetSpecificationExtension() []*NamedAny { - if x != nil { - return x.SpecificationExtension - } - return nil -} - -type ResponsesOrReferences struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - AdditionalProperties []*NamedResponseOrReference `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties,proto3" json:"additional_properties,omitempty"` -} - -func (x *ResponsesOrReferences) Reset() { - *x = ResponsesOrReferences{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[62] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ResponsesOrReferences) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ResponsesOrReferences) ProtoMessage() {} - -func (x *ResponsesOrReferences) ProtoReflect() protoreflect.Message { - mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[62] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ResponsesOrReferences.ProtoReflect.Descriptor instead. -func (*ResponsesOrReferences) Descriptor() ([]byte, []int) { - return file_openapiv3_OpenAPIv3_proto_rawDescGZIP(), []int{62} -} - -func (x *ResponsesOrReferences) GetAdditionalProperties() []*NamedResponseOrReference { - if x != nil { - return x.AdditionalProperties - } - return nil -} - -// The Schema Object allows the definition of input and output data types. These types can be objects, but also primitives and arrays. This object is an extended subset of the JSON Schema Specification Wright Draft 00. For more information about the properties, see JSON Schema Core and JSON Schema Validation. Unless stated otherwise, the property definitions follow the JSON Schema. -type Schema struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Nullable bool `protobuf:"varint,1,opt,name=nullable,proto3" json:"nullable,omitempty"` - Discriminator *Discriminator `protobuf:"bytes,2,opt,name=discriminator,proto3" json:"discriminator,omitempty"` - ReadOnly bool `protobuf:"varint,3,opt,name=read_only,json=readOnly,proto3" json:"read_only,omitempty"` - WriteOnly bool `protobuf:"varint,4,opt,name=write_only,json=writeOnly,proto3" json:"write_only,omitempty"` - Xml *Xml `protobuf:"bytes,5,opt,name=xml,proto3" json:"xml,omitempty"` - ExternalDocs *ExternalDocs `protobuf:"bytes,6,opt,name=external_docs,json=externalDocs,proto3" json:"external_docs,omitempty"` - Example *Any `protobuf:"bytes,7,opt,name=example,proto3" json:"example,omitempty"` - Deprecated bool `protobuf:"varint,8,opt,name=deprecated,proto3" json:"deprecated,omitempty"` - Title string `protobuf:"bytes,9,opt,name=title,proto3" json:"title,omitempty"` - MultipleOf float64 `protobuf:"fixed64,10,opt,name=multiple_of,json=multipleOf,proto3" json:"multiple_of,omitempty"` - Maximum float64 `protobuf:"fixed64,11,opt,name=maximum,proto3" json:"maximum,omitempty"` - ExclusiveMaximum bool `protobuf:"varint,12,opt,name=exclusive_maximum,json=exclusiveMaximum,proto3" json:"exclusive_maximum,omitempty"` - Minimum float64 `protobuf:"fixed64,13,opt,name=minimum,proto3" json:"minimum,omitempty"` - ExclusiveMinimum bool `protobuf:"varint,14,opt,name=exclusive_minimum,json=exclusiveMinimum,proto3" json:"exclusive_minimum,omitempty"` - MaxLength int64 `protobuf:"varint,15,opt,name=max_length,json=maxLength,proto3" json:"max_length,omitempty"` - MinLength int64 `protobuf:"varint,16,opt,name=min_length,json=minLength,proto3" json:"min_length,omitempty"` - Pattern string `protobuf:"bytes,17,opt,name=pattern,proto3" json:"pattern,omitempty"` - MaxItems int64 `protobuf:"varint,18,opt,name=max_items,json=maxItems,proto3" json:"max_items,omitempty"` - MinItems int64 `protobuf:"varint,19,opt,name=min_items,json=minItems,proto3" json:"min_items,omitempty"` - UniqueItems bool `protobuf:"varint,20,opt,name=unique_items,json=uniqueItems,proto3" json:"unique_items,omitempty"` - MaxProperties int64 `protobuf:"varint,21,opt,name=max_properties,json=maxProperties,proto3" json:"max_properties,omitempty"` - MinProperties int64 `protobuf:"varint,22,opt,name=min_properties,json=minProperties,proto3" json:"min_properties,omitempty"` - Required []string `protobuf:"bytes,23,rep,name=required,proto3" json:"required,omitempty"` - Enum []*Any `protobuf:"bytes,24,rep,name=enum,proto3" json:"enum,omitempty"` - Type string `protobuf:"bytes,25,opt,name=type,proto3" json:"type,omitempty"` - AllOf []*SchemaOrReference `protobuf:"bytes,26,rep,name=all_of,json=allOf,proto3" json:"all_of,omitempty"` - OneOf []*SchemaOrReference `protobuf:"bytes,27,rep,name=one_of,json=oneOf,proto3" json:"one_of,omitempty"` - AnyOf []*SchemaOrReference `protobuf:"bytes,28,rep,name=any_of,json=anyOf,proto3" json:"any_of,omitempty"` - Not *Schema `protobuf:"bytes,29,opt,name=not,proto3" json:"not,omitempty"` - Items *ItemsItem `protobuf:"bytes,30,opt,name=items,proto3" json:"items,omitempty"` - Properties *Properties `protobuf:"bytes,31,opt,name=properties,proto3" json:"properties,omitempty"` - AdditionalProperties *AdditionalPropertiesItem `protobuf:"bytes,32,opt,name=additional_properties,json=additionalProperties,proto3" json:"additional_properties,omitempty"` - Default *DefaultType `protobuf:"bytes,33,opt,name=default,proto3" json:"default,omitempty"` - Description string `protobuf:"bytes,34,opt,name=description,proto3" json:"description,omitempty"` - Format string `protobuf:"bytes,35,opt,name=format,proto3" json:"format,omitempty"` - SpecificationExtension []*NamedAny `protobuf:"bytes,36,rep,name=specification_extension,json=specificationExtension,proto3" json:"specification_extension,omitempty"` -} - -func (x *Schema) Reset() { - *x = Schema{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[63] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Schema) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Schema) ProtoMessage() {} - -func (x *Schema) ProtoReflect() protoreflect.Message { - mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[63] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Schema.ProtoReflect.Descriptor instead. -func (*Schema) Descriptor() ([]byte, []int) { - return file_openapiv3_OpenAPIv3_proto_rawDescGZIP(), []int{63} -} - -func (x *Schema) GetNullable() bool { - if x != nil { - return x.Nullable - } - return false -} - -func (x *Schema) GetDiscriminator() *Discriminator { - if x != nil { - return x.Discriminator - } - return nil -} - -func (x *Schema) GetReadOnly() bool { - if x != nil { - return x.ReadOnly - } - return false -} - -func (x *Schema) GetWriteOnly() bool { - if x != nil { - return x.WriteOnly - } - return false -} - -func (x *Schema) GetXml() *Xml { - if x != nil { - return x.Xml - } - return nil -} - -func (x *Schema) GetExternalDocs() *ExternalDocs { - if x != nil { - return x.ExternalDocs - } - return nil -} - -func (x *Schema) GetExample() *Any { - if x != nil { - return x.Example - } - return nil -} - -func (x *Schema) GetDeprecated() bool { - if x != nil { - return x.Deprecated - } - return false -} - -func (x *Schema) GetTitle() string { - if x != nil { - return x.Title - } - return "" -} - -func (x *Schema) GetMultipleOf() float64 { - if x != nil { - return x.MultipleOf - } - return 0 -} - -func (x *Schema) GetMaximum() float64 { - if x != nil { - return x.Maximum - } - return 0 -} - -func (x *Schema) GetExclusiveMaximum() bool { - if x != nil { - return x.ExclusiveMaximum - } - return false -} - -func (x *Schema) GetMinimum() float64 { - if x != nil { - return x.Minimum - } - return 0 -} - -func (x *Schema) GetExclusiveMinimum() bool { - if x != nil { - return x.ExclusiveMinimum - } - return false -} - -func (x *Schema) GetMaxLength() int64 { - if x != nil { - return x.MaxLength - } - return 0 -} - -func (x *Schema) GetMinLength() int64 { - if x != nil { - return x.MinLength - } - return 0 -} - -func (x *Schema) GetPattern() string { - if x != nil { - return x.Pattern - } - return "" -} - -func (x *Schema) GetMaxItems() int64 { - if x != nil { - return x.MaxItems - } - return 0 -} - -func (x *Schema) GetMinItems() int64 { - if x != nil { - return x.MinItems - } - return 0 -} - -func (x *Schema) GetUniqueItems() bool { - if x != nil { - return x.UniqueItems - } - return false -} - -func (x *Schema) GetMaxProperties() int64 { - if x != nil { - return x.MaxProperties - } - return 0 -} - -func (x *Schema) GetMinProperties() int64 { - if x != nil { - return x.MinProperties - } - return 0 -} - -func (x *Schema) GetRequired() []string { - if x != nil { - return x.Required - } - return nil -} - -func (x *Schema) GetEnum() []*Any { - if x != nil { - return x.Enum - } - return nil -} - -func (x *Schema) GetType() string { - if x != nil { - return x.Type - } - return "" -} - -func (x *Schema) GetAllOf() []*SchemaOrReference { - if x != nil { - return x.AllOf - } - return nil -} - -func (x *Schema) GetOneOf() []*SchemaOrReference { - if x != nil { - return x.OneOf - } - return nil -} - -func (x *Schema) GetAnyOf() []*SchemaOrReference { - if x != nil { - return x.AnyOf - } - return nil -} - -func (x *Schema) GetNot() *Schema { - if x != nil { - return x.Not - } - return nil -} - -func (x *Schema) GetItems() *ItemsItem { - if x != nil { - return x.Items - } - return nil -} - -func (x *Schema) GetProperties() *Properties { - if x != nil { - return x.Properties - } - return nil -} - -func (x *Schema) GetAdditionalProperties() *AdditionalPropertiesItem { - if x != nil { - return x.AdditionalProperties - } - return nil -} - -func (x *Schema) GetDefault() *DefaultType { - if x != nil { - return x.Default - } - return nil -} - -func (x *Schema) GetDescription() string { - if x != nil { - return x.Description - } - return "" -} - -func (x *Schema) GetFormat() string { - if x != nil { - return x.Format - } - return "" -} - -func (x *Schema) GetSpecificationExtension() []*NamedAny { - if x != nil { - return x.SpecificationExtension - } - return nil -} - -type SchemaOrReference struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Types that are assignable to Oneof: - // *SchemaOrReference_Schema - // *SchemaOrReference_Reference - Oneof isSchemaOrReference_Oneof `protobuf_oneof:"oneof"` -} - -func (x *SchemaOrReference) Reset() { - *x = SchemaOrReference{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[64] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *SchemaOrReference) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*SchemaOrReference) ProtoMessage() {} - -func (x *SchemaOrReference) ProtoReflect() protoreflect.Message { - mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[64] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use SchemaOrReference.ProtoReflect.Descriptor instead. -func (*SchemaOrReference) Descriptor() ([]byte, []int) { - return file_openapiv3_OpenAPIv3_proto_rawDescGZIP(), []int{64} -} - -func (m *SchemaOrReference) GetOneof() isSchemaOrReference_Oneof { - if m != nil { - return m.Oneof - } - return nil -} - -func (x *SchemaOrReference) GetSchema() *Schema { - if x, ok := x.GetOneof().(*SchemaOrReference_Schema); ok { - return x.Schema - } - return nil -} - -func (x *SchemaOrReference) GetReference() *Reference { - if x, ok := x.GetOneof().(*SchemaOrReference_Reference); ok { - return x.Reference - } - return nil -} - -type isSchemaOrReference_Oneof interface { - isSchemaOrReference_Oneof() -} - -type SchemaOrReference_Schema struct { - Schema *Schema `protobuf:"bytes,1,opt,name=schema,proto3,oneof"` -} - -type SchemaOrReference_Reference struct { - Reference *Reference `protobuf:"bytes,2,opt,name=reference,proto3,oneof"` -} - -func (*SchemaOrReference_Schema) isSchemaOrReference_Oneof() {} - -func (*SchemaOrReference_Reference) isSchemaOrReference_Oneof() {} - -type SchemasOrReferences struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - AdditionalProperties []*NamedSchemaOrReference `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties,proto3" json:"additional_properties,omitempty"` -} - -func (x *SchemasOrReferences) Reset() { - *x = SchemasOrReferences{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[65] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *SchemasOrReferences) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*SchemasOrReferences) ProtoMessage() {} - -func (x *SchemasOrReferences) ProtoReflect() protoreflect.Message { - mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[65] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use SchemasOrReferences.ProtoReflect.Descriptor instead. -func (*SchemasOrReferences) Descriptor() ([]byte, []int) { - return file_openapiv3_OpenAPIv3_proto_rawDescGZIP(), []int{65} -} - -func (x *SchemasOrReferences) GetAdditionalProperties() []*NamedSchemaOrReference { - if x != nil { - return x.AdditionalProperties - } - return nil -} - -// Lists the required security schemes to execute this operation. The name used for each property MUST correspond to a security scheme declared in the Security Schemes under the Components Object. Security Requirement Objects that contain multiple schemes require that all schemes MUST be satisfied for a request to be authorized. This enables support for scenarios where multiple query parameters or HTTP headers are required to convey security information. When a list of Security Requirement Objects is defined on the OpenAPI Object or Operation Object, only one of the Security Requirement Objects in the list needs to be satisfied to authorize the request. -type SecurityRequirement struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - AdditionalProperties []*NamedStringArray `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties,proto3" json:"additional_properties,omitempty"` -} - -func (x *SecurityRequirement) Reset() { - *x = SecurityRequirement{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[66] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *SecurityRequirement) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*SecurityRequirement) ProtoMessage() {} - -func (x *SecurityRequirement) ProtoReflect() protoreflect.Message { - mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[66] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use SecurityRequirement.ProtoReflect.Descriptor instead. -func (*SecurityRequirement) Descriptor() ([]byte, []int) { - return file_openapiv3_OpenAPIv3_proto_rawDescGZIP(), []int{66} -} - -func (x *SecurityRequirement) GetAdditionalProperties() []*NamedStringArray { - if x != nil { - return x.AdditionalProperties - } - return nil -} - -// Defines a security scheme that can be used by the operations. Supported schemes are HTTP authentication, an API key (either as a header, a cookie parameter or as a query parameter), mutual TLS (use of a client certificate), OAuth2's common flows (implicit, password, application and access code) as defined in RFC6749, and OpenID Connect. Please note that currently (2019) the implicit flow is about to be deprecated OAuth 2.0 Security Best Current Practice. Recommended for most use case is Authorization Code Grant flow with PKCE. -type SecurityScheme struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` - Description string `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"` - Name string `protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"` - In string `protobuf:"bytes,4,opt,name=in,proto3" json:"in,omitempty"` - Scheme string `protobuf:"bytes,5,opt,name=scheme,proto3" json:"scheme,omitempty"` - BearerFormat string `protobuf:"bytes,6,opt,name=bearer_format,json=bearerFormat,proto3" json:"bearer_format,omitempty"` - Flows *OauthFlows `protobuf:"bytes,7,opt,name=flows,proto3" json:"flows,omitempty"` - OpenIdConnectUrl string `protobuf:"bytes,8,opt,name=open_id_connect_url,json=openIdConnectUrl,proto3" json:"open_id_connect_url,omitempty"` - SpecificationExtension []*NamedAny `protobuf:"bytes,9,rep,name=specification_extension,json=specificationExtension,proto3" json:"specification_extension,omitempty"` -} - -func (x *SecurityScheme) Reset() { - *x = SecurityScheme{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[67] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *SecurityScheme) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*SecurityScheme) ProtoMessage() {} - -func (x *SecurityScheme) ProtoReflect() protoreflect.Message { - mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[67] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use SecurityScheme.ProtoReflect.Descriptor instead. -func (*SecurityScheme) Descriptor() ([]byte, []int) { - return file_openapiv3_OpenAPIv3_proto_rawDescGZIP(), []int{67} -} - -func (x *SecurityScheme) GetType() string { - if x != nil { - return x.Type - } - return "" -} - -func (x *SecurityScheme) GetDescription() string { - if x != nil { - return x.Description - } - return "" -} - -func (x *SecurityScheme) GetName() string { - if x != nil { - return x.Name - } - return "" -} - -func (x *SecurityScheme) GetIn() string { - if x != nil { - return x.In - } - return "" -} - -func (x *SecurityScheme) GetScheme() string { - if x != nil { - return x.Scheme - } - return "" -} - -func (x *SecurityScheme) GetBearerFormat() string { - if x != nil { - return x.BearerFormat - } - return "" -} - -func (x *SecurityScheme) GetFlows() *OauthFlows { - if x != nil { - return x.Flows - } - return nil -} - -func (x *SecurityScheme) GetOpenIdConnectUrl() string { - if x != nil { - return x.OpenIdConnectUrl - } - return "" -} - -func (x *SecurityScheme) GetSpecificationExtension() []*NamedAny { - if x != nil { - return x.SpecificationExtension - } - return nil -} - -type SecuritySchemeOrReference struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Types that are assignable to Oneof: - // *SecuritySchemeOrReference_SecurityScheme - // *SecuritySchemeOrReference_Reference - Oneof isSecuritySchemeOrReference_Oneof `protobuf_oneof:"oneof"` -} - -func (x *SecuritySchemeOrReference) Reset() { - *x = SecuritySchemeOrReference{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[68] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *SecuritySchemeOrReference) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*SecuritySchemeOrReference) ProtoMessage() {} - -func (x *SecuritySchemeOrReference) ProtoReflect() protoreflect.Message { - mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[68] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use SecuritySchemeOrReference.ProtoReflect.Descriptor instead. -func (*SecuritySchemeOrReference) Descriptor() ([]byte, []int) { - return file_openapiv3_OpenAPIv3_proto_rawDescGZIP(), []int{68} -} - -func (m *SecuritySchemeOrReference) GetOneof() isSecuritySchemeOrReference_Oneof { - if m != nil { - return m.Oneof - } - return nil -} - -func (x *SecuritySchemeOrReference) GetSecurityScheme() *SecurityScheme { - if x, ok := x.GetOneof().(*SecuritySchemeOrReference_SecurityScheme); ok { - return x.SecurityScheme - } - return nil -} - -func (x *SecuritySchemeOrReference) GetReference() *Reference { - if x, ok := x.GetOneof().(*SecuritySchemeOrReference_Reference); ok { - return x.Reference - } - return nil -} - -type isSecuritySchemeOrReference_Oneof interface { - isSecuritySchemeOrReference_Oneof() -} - -type SecuritySchemeOrReference_SecurityScheme struct { - SecurityScheme *SecurityScheme `protobuf:"bytes,1,opt,name=security_scheme,json=securityScheme,proto3,oneof"` -} - -type SecuritySchemeOrReference_Reference struct { - Reference *Reference `protobuf:"bytes,2,opt,name=reference,proto3,oneof"` -} - -func (*SecuritySchemeOrReference_SecurityScheme) isSecuritySchemeOrReference_Oneof() {} - -func (*SecuritySchemeOrReference_Reference) isSecuritySchemeOrReference_Oneof() {} - -type SecuritySchemesOrReferences struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - AdditionalProperties []*NamedSecuritySchemeOrReference `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties,proto3" json:"additional_properties,omitempty"` -} - -func (x *SecuritySchemesOrReferences) Reset() { - *x = SecuritySchemesOrReferences{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[69] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *SecuritySchemesOrReferences) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*SecuritySchemesOrReferences) ProtoMessage() {} - -func (x *SecuritySchemesOrReferences) ProtoReflect() protoreflect.Message { - mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[69] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use SecuritySchemesOrReferences.ProtoReflect.Descriptor instead. -func (*SecuritySchemesOrReferences) Descriptor() ([]byte, []int) { - return file_openapiv3_OpenAPIv3_proto_rawDescGZIP(), []int{69} -} - -func (x *SecuritySchemesOrReferences) GetAdditionalProperties() []*NamedSecuritySchemeOrReference { - if x != nil { - return x.AdditionalProperties - } - return nil -} - -// An object representing a Server. -type Server struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Url string `protobuf:"bytes,1,opt,name=url,proto3" json:"url,omitempty"` - Description string `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"` - Variables *ServerVariables `protobuf:"bytes,3,opt,name=variables,proto3" json:"variables,omitempty"` - SpecificationExtension []*NamedAny `protobuf:"bytes,4,rep,name=specification_extension,json=specificationExtension,proto3" json:"specification_extension,omitempty"` -} - -func (x *Server) Reset() { - *x = Server{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[70] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Server) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Server) ProtoMessage() {} - -func (x *Server) ProtoReflect() protoreflect.Message { - mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[70] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Server.ProtoReflect.Descriptor instead. -func (*Server) Descriptor() ([]byte, []int) { - return file_openapiv3_OpenAPIv3_proto_rawDescGZIP(), []int{70} -} - -func (x *Server) GetUrl() string { - if x != nil { - return x.Url - } - return "" -} - -func (x *Server) GetDescription() string { - if x != nil { - return x.Description - } - return "" -} - -func (x *Server) GetVariables() *ServerVariables { - if x != nil { - return x.Variables - } - return nil -} - -func (x *Server) GetSpecificationExtension() []*NamedAny { - if x != nil { - return x.SpecificationExtension - } - return nil -} - -// An object representing a Server Variable for server URL template substitution. -type ServerVariable struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Enum []string `protobuf:"bytes,1,rep,name=enum,proto3" json:"enum,omitempty"` - Default string `protobuf:"bytes,2,opt,name=default,proto3" json:"default,omitempty"` - Description string `protobuf:"bytes,3,opt,name=description,proto3" json:"description,omitempty"` - SpecificationExtension []*NamedAny `protobuf:"bytes,4,rep,name=specification_extension,json=specificationExtension,proto3" json:"specification_extension,omitempty"` -} - -func (x *ServerVariable) Reset() { - *x = ServerVariable{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[71] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ServerVariable) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ServerVariable) ProtoMessage() {} - -func (x *ServerVariable) ProtoReflect() protoreflect.Message { - mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[71] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ServerVariable.ProtoReflect.Descriptor instead. -func (*ServerVariable) Descriptor() ([]byte, []int) { - return file_openapiv3_OpenAPIv3_proto_rawDescGZIP(), []int{71} -} - -func (x *ServerVariable) GetEnum() []string { - if x != nil { - return x.Enum - } - return nil -} - -func (x *ServerVariable) GetDefault() string { - if x != nil { - return x.Default - } - return "" -} - -func (x *ServerVariable) GetDescription() string { - if x != nil { - return x.Description - } - return "" -} - -func (x *ServerVariable) GetSpecificationExtension() []*NamedAny { - if x != nil { - return x.SpecificationExtension - } - return nil -} - -type ServerVariables struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - AdditionalProperties []*NamedServerVariable `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties,proto3" json:"additional_properties,omitempty"` -} - -func (x *ServerVariables) Reset() { - *x = ServerVariables{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[72] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ServerVariables) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ServerVariables) ProtoMessage() {} - -func (x *ServerVariables) ProtoReflect() protoreflect.Message { - mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[72] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ServerVariables.ProtoReflect.Descriptor instead. -func (*ServerVariables) Descriptor() ([]byte, []int) { - return file_openapiv3_OpenAPIv3_proto_rawDescGZIP(), []int{72} -} - -func (x *ServerVariables) GetAdditionalProperties() []*NamedServerVariable { - if x != nil { - return x.AdditionalProperties - } - return nil -} - -// Any property starting with x- is valid. -type SpecificationExtension struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Types that are assignable to Oneof: - // *SpecificationExtension_Number - // *SpecificationExtension_Boolean - // *SpecificationExtension_String_ - Oneof isSpecificationExtension_Oneof `protobuf_oneof:"oneof"` -} - -func (x *SpecificationExtension) Reset() { - *x = SpecificationExtension{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[73] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *SpecificationExtension) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*SpecificationExtension) ProtoMessage() {} - -func (x *SpecificationExtension) ProtoReflect() protoreflect.Message { - mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[73] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use SpecificationExtension.ProtoReflect.Descriptor instead. -func (*SpecificationExtension) Descriptor() ([]byte, []int) { - return file_openapiv3_OpenAPIv3_proto_rawDescGZIP(), []int{73} -} - -func (m *SpecificationExtension) GetOneof() isSpecificationExtension_Oneof { - if m != nil { - return m.Oneof - } - return nil -} - -func (x *SpecificationExtension) GetNumber() float64 { - if x, ok := x.GetOneof().(*SpecificationExtension_Number); ok { - return x.Number - } - return 0 -} - -func (x *SpecificationExtension) GetBoolean() bool { - if x, ok := x.GetOneof().(*SpecificationExtension_Boolean); ok { - return x.Boolean - } - return false -} - -func (x *SpecificationExtension) GetString_() string { - if x, ok := x.GetOneof().(*SpecificationExtension_String_); ok { - return x.String_ - } - return "" -} - -type isSpecificationExtension_Oneof interface { - isSpecificationExtension_Oneof() -} - -type SpecificationExtension_Number struct { - Number float64 `protobuf:"fixed64,1,opt,name=number,proto3,oneof"` -} - -type SpecificationExtension_Boolean struct { - Boolean bool `protobuf:"varint,2,opt,name=boolean,proto3,oneof"` -} - -type SpecificationExtension_String_ struct { - String_ string `protobuf:"bytes,3,opt,name=string,proto3,oneof"` -} - -func (*SpecificationExtension_Number) isSpecificationExtension_Oneof() {} - -func (*SpecificationExtension_Boolean) isSpecificationExtension_Oneof() {} - -func (*SpecificationExtension_String_) isSpecificationExtension_Oneof() {} - -type StringArray struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Value []string `protobuf:"bytes,1,rep,name=value,proto3" json:"value,omitempty"` -} - -func (x *StringArray) Reset() { - *x = StringArray{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[74] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *StringArray) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*StringArray) ProtoMessage() {} - -func (x *StringArray) ProtoReflect() protoreflect.Message { - mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[74] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use StringArray.ProtoReflect.Descriptor instead. -func (*StringArray) Descriptor() ([]byte, []int) { - return file_openapiv3_OpenAPIv3_proto_rawDescGZIP(), []int{74} -} - -func (x *StringArray) GetValue() []string { - if x != nil { - return x.Value - } - return nil -} - -type Strings struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - AdditionalProperties []*NamedString `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties,proto3" json:"additional_properties,omitempty"` -} - -func (x *Strings) Reset() { - *x = Strings{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[75] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Strings) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Strings) ProtoMessage() {} - -func (x *Strings) ProtoReflect() protoreflect.Message { - mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[75] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Strings.ProtoReflect.Descriptor instead. -func (*Strings) Descriptor() ([]byte, []int) { - return file_openapiv3_OpenAPIv3_proto_rawDescGZIP(), []int{75} -} - -func (x *Strings) GetAdditionalProperties() []*NamedString { - if x != nil { - return x.AdditionalProperties - } - return nil -} - -// Adds metadata to a single tag that is used by the Operation Object. It is not mandatory to have a Tag Object per tag defined in the Operation Object instances. -type Tag struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - Description string `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"` - ExternalDocs *ExternalDocs `protobuf:"bytes,3,opt,name=external_docs,json=externalDocs,proto3" json:"external_docs,omitempty"` - SpecificationExtension []*NamedAny `protobuf:"bytes,4,rep,name=specification_extension,json=specificationExtension,proto3" json:"specification_extension,omitempty"` -} - -func (x *Tag) Reset() { - *x = Tag{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[76] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Tag) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Tag) ProtoMessage() {} - -func (x *Tag) ProtoReflect() protoreflect.Message { - mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[76] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Tag.ProtoReflect.Descriptor instead. -func (*Tag) Descriptor() ([]byte, []int) { - return file_openapiv3_OpenAPIv3_proto_rawDescGZIP(), []int{76} -} - -func (x *Tag) GetName() string { - if x != nil { - return x.Name - } - return "" -} - -func (x *Tag) GetDescription() string { - if x != nil { - return x.Description - } - return "" -} - -func (x *Tag) GetExternalDocs() *ExternalDocs { - if x != nil { - return x.ExternalDocs - } - return nil -} - -func (x *Tag) GetSpecificationExtension() []*NamedAny { - if x != nil { - return x.SpecificationExtension - } - return nil -} - -// A metadata object that allows for more fine-tuned XML model definitions. When using arrays, XML element names are *not* inferred (for singular/plural forms) and the `name` property SHOULD be used to add that information. See examples for expected behavior. -type Xml struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - Namespace string `protobuf:"bytes,2,opt,name=namespace,proto3" json:"namespace,omitempty"` - Prefix string `protobuf:"bytes,3,opt,name=prefix,proto3" json:"prefix,omitempty"` - Attribute bool `protobuf:"varint,4,opt,name=attribute,proto3" json:"attribute,omitempty"` - Wrapped bool `protobuf:"varint,5,opt,name=wrapped,proto3" json:"wrapped,omitempty"` - SpecificationExtension []*NamedAny `protobuf:"bytes,6,rep,name=specification_extension,json=specificationExtension,proto3" json:"specification_extension,omitempty"` -} - -func (x *Xml) Reset() { - *x = Xml{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[77] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Xml) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Xml) ProtoMessage() {} - -func (x *Xml) ProtoReflect() protoreflect.Message { - mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[77] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Xml.ProtoReflect.Descriptor instead. -func (*Xml) Descriptor() ([]byte, []int) { - return file_openapiv3_OpenAPIv3_proto_rawDescGZIP(), []int{77} -} - -func (x *Xml) GetName() string { - if x != nil { - return x.Name - } - return "" -} - -func (x *Xml) GetNamespace() string { - if x != nil { - return x.Namespace - } - return "" -} - -func (x *Xml) GetPrefix() string { - if x != nil { - return x.Prefix - } - return "" -} - -func (x *Xml) GetAttribute() bool { - if x != nil { - return x.Attribute - } - return false -} - -func (x *Xml) GetWrapped() bool { - if x != nil { - return x.Wrapped - } - return false -} - -func (x *Xml) GetSpecificationExtension() []*NamedAny { - if x != nil { - return x.SpecificationExtension - } - return nil -} - -var File_openapiv3_OpenAPIv3_proto protoreflect.FileDescriptor - -var file_openapiv3_OpenAPIv3_proto_rawDesc = []byte{ - 0x0a, 0x19, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x33, 0x2f, 0x4f, 0x70, 0x65, 0x6e, - 0x41, 0x50, 0x49, 0x76, 0x33, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0a, 0x6f, 0x70, 0x65, - 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x22, 0x90, 0x01, 0x0a, 0x18, 0x41, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, - 0x6c, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x49, 0x74, 0x65, 0x6d, 0x12, - 0x4f, 0x0a, 0x13, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x5f, 0x6f, 0x72, 0x5f, 0x72, 0x65, 0x66, - 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x6f, - 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, - 0x4f, 0x72, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x48, 0x00, 0x52, 0x11, 0x73, - 0x63, 0x68, 0x65, 0x6d, 0x61, 0x4f, 0x72, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, - 0x12, 0x1a, 0x0a, 0x07, 0x62, 0x6f, 0x6f, 0x6c, 0x65, 0x61, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x08, 0x48, 0x00, 0x52, 0x07, 0x62, 0x6f, 0x6f, 0x6c, 0x65, 0x61, 0x6e, 0x42, 0x07, 0x0a, 0x05, - 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x22, 0x45, 0x0a, 0x03, 0x41, 0x6e, 0x79, 0x12, 0x2a, 0x0a, 0x05, - 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, - 0x79, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x79, 0x61, 0x6d, 0x6c, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x79, 0x61, 0x6d, 0x6c, 0x22, 0x79, 0x0a, 0x0f, - 0x41, 0x6e, 0x79, 0x4f, 0x72, 0x45, 0x78, 0x70, 0x72, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x12, - 0x23, 0x0a, 0x03, 0x61, 0x6e, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x6f, - 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x6e, 0x79, 0x48, 0x00, 0x52, - 0x03, 0x61, 0x6e, 0x79, 0x12, 0x38, 0x0a, 0x0a, 0x65, 0x78, 0x70, 0x72, 0x65, 0x73, 0x73, 0x69, - 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, - 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, - 0x48, 0x00, 0x52, 0x0a, 0x65, 0x78, 0x70, 0x72, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x42, 0x07, - 0x0a, 0x05, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x22, 0x88, 0x01, 0x0a, 0x08, 0x43, 0x61, 0x6c, 0x6c, - 0x62, 0x61, 0x63, 0x6b, 0x12, 0x2d, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x01, 0x20, 0x03, - 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, - 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x50, 0x61, 0x74, 0x68, 0x49, 0x74, 0x65, 0x6d, 0x52, 0x04, 0x70, - 0x61, 0x74, 0x68, 0x12, 0x4d, 0x0a, 0x17, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x63, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, - 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, - 0x33, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x41, 0x6e, 0x79, 0x52, 0x16, 0x73, 0x70, 0x65, 0x63, - 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, - 0x6f, 0x6e, 0x22, 0x89, 0x01, 0x0a, 0x13, 0x43, 0x61, 0x6c, 0x6c, 0x62, 0x61, 0x63, 0x6b, 0x4f, - 0x72, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x32, 0x0a, 0x08, 0x63, 0x61, - 0x6c, 0x6c, 0x62, 0x61, 0x63, 0x6b, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6f, - 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x61, 0x6c, 0x6c, 0x62, 0x61, - 0x63, 0x6b, 0x48, 0x00, 0x52, 0x08, 0x63, 0x61, 0x6c, 0x6c, 0x62, 0x61, 0x63, 0x6b, 0x12, 0x35, - 0x0a, 0x09, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x15, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x52, - 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x48, 0x00, 0x52, 0x09, 0x72, 0x65, 0x66, 0x65, - 0x72, 0x65, 0x6e, 0x63, 0x65, 0x42, 0x07, 0x0a, 0x05, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x22, 0x72, - 0x0a, 0x15, 0x43, 0x61, 0x6c, 0x6c, 0x62, 0x61, 0x63, 0x6b, 0x73, 0x4f, 0x72, 0x52, 0x65, 0x66, - 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x73, 0x12, 0x59, 0x0a, 0x15, 0x61, 0x64, 0x64, 0x69, 0x74, - 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x5f, 0x70, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, - 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, - 0x2e, 0x76, 0x33, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x43, 0x61, 0x6c, 0x6c, 0x62, 0x61, 0x63, - 0x6b, 0x4f, 0x72, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x52, 0x14, 0x61, 0x64, - 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, - 0x65, 0x73, 0x22, 0xac, 0x05, 0x0a, 0x0a, 0x43, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, - 0x73, 0x12, 0x39, 0x0a, 0x07, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, - 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x4f, 0x72, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, - 0x63, 0x65, 0x73, 0x52, 0x07, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x12, 0x3f, 0x0a, 0x09, - 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x21, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x73, 0x4f, 0x72, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, - 0x65, 0x73, 0x52, 0x09, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x73, 0x12, 0x42, 0x0a, - 0x0a, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x22, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x50, - 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x4f, 0x72, 0x52, 0x65, 0x66, 0x65, 0x72, - 0x65, 0x6e, 0x63, 0x65, 0x73, 0x52, 0x0a, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, - 0x73, 0x12, 0x3c, 0x0a, 0x08, 0x65, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x73, 0x18, 0x04, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, - 0x2e, 0x45, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x73, 0x4f, 0x72, 0x52, 0x65, 0x66, 0x65, 0x72, - 0x65, 0x6e, 0x63, 0x65, 0x73, 0x52, 0x08, 0x65, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x73, 0x12, - 0x4c, 0x0a, 0x0e, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x62, 0x6f, 0x64, 0x69, 0x65, - 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, - 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x42, 0x6f, 0x64, 0x69, - 0x65, 0x73, 0x4f, 0x72, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x73, 0x52, 0x0d, - 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x42, 0x6f, 0x64, 0x69, 0x65, 0x73, 0x12, 0x39, 0x0a, - 0x07, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, - 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x65, 0x61, 0x64, - 0x65, 0x72, 0x73, 0x4f, 0x72, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x73, 0x52, - 0x07, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x12, 0x52, 0x0a, 0x10, 0x73, 0x65, 0x63, 0x75, - 0x72, 0x69, 0x74, 0x79, 0x5f, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x65, 0x73, 0x18, 0x07, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, - 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x65, 0x73, 0x4f, - 0x72, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x73, 0x52, 0x0f, 0x73, 0x65, 0x63, - 0x75, 0x72, 0x69, 0x74, 0x79, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x65, 0x73, 0x12, 0x33, 0x0a, 0x05, - 0x6c, 0x69, 0x6e, 0x6b, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x6f, 0x70, - 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x4c, 0x69, 0x6e, 0x6b, 0x73, 0x4f, 0x72, - 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x73, 0x52, 0x05, 0x6c, 0x69, 0x6e, 0x6b, - 0x73, 0x12, 0x3f, 0x0a, 0x09, 0x63, 0x61, 0x6c, 0x6c, 0x62, 0x61, 0x63, 0x6b, 0x73, 0x18, 0x09, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, - 0x33, 0x2e, 0x43, 0x61, 0x6c, 0x6c, 0x62, 0x61, 0x63, 0x6b, 0x73, 0x4f, 0x72, 0x52, 0x65, 0x66, - 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x73, 0x52, 0x09, 0x63, 0x61, 0x6c, 0x6c, 0x62, 0x61, 0x63, - 0x6b, 0x73, 0x12, 0x4d, 0x0a, 0x17, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x0a, 0x20, - 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, - 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x41, 0x6e, 0x79, 0x52, 0x16, 0x73, 0x70, 0x65, 0x63, 0x69, - 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, - 0x6e, 0x22, 0x94, 0x01, 0x0a, 0x07, 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x63, 0x74, 0x12, 0x12, 0x0a, - 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, - 0x65, 0x12, 0x10, 0x0a, 0x03, 0x75, 0x72, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, - 0x75, 0x72, 0x6c, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x18, 0x03, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x05, 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x12, 0x4d, 0x0a, 0x17, 0x73, 0x70, 0x65, - 0x63, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, - 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6f, 0x70, 0x65, - 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x41, 0x6e, 0x79, - 0x52, 0x16, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x45, - 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0x66, 0x0a, 0x0b, 0x44, 0x65, 0x66, 0x61, - 0x75, 0x6c, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x18, 0x0a, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, - 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x01, 0x48, 0x00, 0x52, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, - 0x72, 0x12, 0x1a, 0x0a, 0x07, 0x62, 0x6f, 0x6f, 0x6c, 0x65, 0x61, 0x6e, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x08, 0x48, 0x00, 0x52, 0x07, 0x62, 0x6f, 0x6f, 0x6c, 0x65, 0x61, 0x6e, 0x12, 0x18, 0x0a, - 0x06, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, - 0x06, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x42, 0x07, 0x0a, 0x05, 0x6f, 0x6e, 0x65, 0x6f, 0x66, - 0x22, 0xb2, 0x01, 0x0a, 0x0d, 0x44, 0x69, 0x73, 0x63, 0x72, 0x69, 0x6d, 0x69, 0x6e, 0x61, 0x74, - 0x6f, 0x72, 0x12, 0x23, 0x0a, 0x0d, 0x70, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x79, 0x5f, 0x6e, - 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x70, 0x72, 0x6f, 0x70, 0x65, - 0x72, 0x74, 0x79, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x2d, 0x0a, 0x07, 0x6d, 0x61, 0x70, 0x70, 0x69, - 0x6e, 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, - 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x07, 0x6d, - 0x61, 0x70, 0x70, 0x69, 0x6e, 0x67, 0x12, 0x4d, 0x0a, 0x17, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, - 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, - 0x6e, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, - 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x41, 0x6e, 0x79, 0x52, 0x16, 0x73, - 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x78, 0x74, 0x65, - 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0xc9, 0x03, 0x0a, 0x08, 0x44, 0x6f, 0x63, 0x75, 0x6d, 0x65, - 0x6e, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x07, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x12, 0x24, 0x0a, 0x04, - 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x6f, 0x70, 0x65, - 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x04, 0x69, 0x6e, - 0x66, 0x6f, 0x12, 0x2c, 0x0a, 0x07, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x73, 0x18, 0x03, 0x20, - 0x03, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, - 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x52, 0x07, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x73, - 0x12, 0x27, 0x0a, 0x05, 0x70, 0x61, 0x74, 0x68, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x11, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x50, 0x61, 0x74, - 0x68, 0x73, 0x52, 0x05, 0x70, 0x61, 0x74, 0x68, 0x73, 0x12, 0x36, 0x0a, 0x0a, 0x63, 0x6f, 0x6d, - 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, - 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6f, 0x6d, 0x70, 0x6f, - 0x6e, 0x65, 0x6e, 0x74, 0x73, 0x52, 0x0a, 0x63, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, - 0x73, 0x12, 0x3b, 0x0a, 0x08, 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x18, 0x06, 0x20, - 0x03, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, - 0x2e, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x52, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, - 0x6d, 0x65, 0x6e, 0x74, 0x52, 0x08, 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x12, 0x23, - 0x0a, 0x04, 0x74, 0x61, 0x67, 0x73, 0x18, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x6f, - 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x61, 0x67, 0x52, 0x04, 0x74, - 0x61, 0x67, 0x73, 0x12, 0x3d, 0x0a, 0x0d, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x5f, - 0x64, 0x6f, 0x63, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x6f, 0x70, 0x65, - 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, - 0x44, 0x6f, 0x63, 0x73, 0x52, 0x0c, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x44, 0x6f, - 0x63, 0x73, 0x12, 0x4d, 0x0a, 0x17, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x09, 0x20, - 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, - 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x41, 0x6e, 0x79, 0x52, 0x16, 0x73, 0x70, 0x65, 0x63, 0x69, - 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, - 0x6e, 0x22, 0x8e, 0x02, 0x0a, 0x08, 0x45, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x12, 0x21, - 0x0a, 0x0c, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, - 0x65, 0x12, 0x39, 0x0a, 0x07, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, - 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x4f, 0x72, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, - 0x63, 0x65, 0x73, 0x52, 0x07, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x12, 0x14, 0x0a, 0x05, - 0x73, 0x74, 0x79, 0x6c, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x74, 0x79, - 0x6c, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x65, 0x78, 0x70, 0x6c, 0x6f, 0x64, 0x65, 0x18, 0x04, 0x20, - 0x01, 0x28, 0x08, 0x52, 0x07, 0x65, 0x78, 0x70, 0x6c, 0x6f, 0x64, 0x65, 0x12, 0x25, 0x0a, 0x0e, - 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x5f, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x18, 0x05, - 0x20, 0x01, 0x28, 0x08, 0x52, 0x0d, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x52, 0x65, 0x73, 0x65, 0x72, - 0x76, 0x65, 0x64, 0x12, 0x4d, 0x0a, 0x17, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x63, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x06, - 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, - 0x33, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x41, 0x6e, 0x79, 0x52, 0x16, 0x73, 0x70, 0x65, 0x63, - 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, - 0x6f, 0x6e, 0x22, 0x5b, 0x0a, 0x09, 0x45, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x73, 0x12, - 0x4e, 0x0a, 0x15, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x5f, 0x70, 0x72, - 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x19, - 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x4e, 0x61, 0x6d, 0x65, - 0x64, 0x45, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x52, 0x14, 0x61, 0x64, 0x64, 0x69, 0x74, - 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x22, - 0xe2, 0x01, 0x0a, 0x07, 0x45, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x73, - 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x73, 0x75, - 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, - 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, - 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x25, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, - 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, - 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x25, - 0x0a, 0x0e, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, - 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, - 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x4d, 0x0a, 0x17, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, - 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, - 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, - 0x2e, 0x76, 0x33, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x41, 0x6e, 0x79, 0x52, 0x16, 0x73, 0x70, - 0x65, 0x63, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x78, 0x74, 0x65, 0x6e, - 0x73, 0x69, 0x6f, 0x6e, 0x22, 0x85, 0x01, 0x0a, 0x12, 0x45, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, - 0x4f, 0x72, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x2f, 0x0a, 0x07, 0x65, - 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x6f, - 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x45, 0x78, 0x61, 0x6d, 0x70, 0x6c, - 0x65, 0x48, 0x00, 0x52, 0x07, 0x65, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x12, 0x35, 0x0a, 0x09, - 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x15, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x65, 0x66, - 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x48, 0x00, 0x52, 0x09, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, - 0x6e, 0x63, 0x65, 0x42, 0x07, 0x0a, 0x05, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x22, 0x70, 0x0a, 0x14, - 0x45, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x73, 0x4f, 0x72, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, - 0x6e, 0x63, 0x65, 0x73, 0x12, 0x58, 0x0a, 0x15, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, - 0x61, 0x6c, 0x5f, 0x70, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x18, 0x01, 0x20, - 0x03, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, - 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x45, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x4f, 0x72, 0x52, - 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x52, 0x14, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, - 0x6f, 0x6e, 0x61, 0x6c, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x22, 0x57, - 0x0a, 0x0a, 0x45, 0x78, 0x70, 0x72, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x49, 0x0a, 0x15, - 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x5f, 0x70, 0x72, 0x6f, 0x70, 0x65, - 0x72, 0x74, 0x69, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6f, 0x70, - 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x41, 0x6e, - 0x79, 0x52, 0x14, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x50, 0x72, 0x6f, - 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x22, 0x91, 0x01, 0x0a, 0x0c, 0x45, 0x78, 0x74, 0x65, - 0x72, 0x6e, 0x61, 0x6c, 0x44, 0x6f, 0x63, 0x73, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, - 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, - 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x10, 0x0a, 0x03, 0x75, 0x72, - 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x75, 0x72, 0x6c, 0x12, 0x4d, 0x0a, 0x17, - 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x65, 0x78, - 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, - 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, - 0x41, 0x6e, 0x79, 0x52, 0x16, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0x8a, 0x04, 0x0a, 0x06, - 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, - 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, - 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x71, 0x75, - 0x69, 0x72, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x72, 0x65, 0x71, 0x75, - 0x69, 0x72, 0x65, 0x64, 0x12, 0x1e, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, - 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, - 0x61, 0x74, 0x65, 0x64, 0x12, 0x2a, 0x0a, 0x11, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x5f, 0x65, 0x6d, - 0x70, 0x74, 0x79, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, - 0x0f, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x56, 0x61, 0x6c, 0x75, 0x65, - 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, 0x79, 0x6c, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x05, 0x73, 0x74, 0x79, 0x6c, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x65, 0x78, 0x70, 0x6c, 0x6f, 0x64, - 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x65, 0x78, 0x70, 0x6c, 0x6f, 0x64, 0x65, - 0x12, 0x25, 0x0a, 0x0e, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x5f, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, - 0x65, 0x64, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0d, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x52, - 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x12, 0x35, 0x0a, 0x06, 0x73, 0x63, 0x68, 0x65, 0x6d, - 0x61, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, - 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x4f, 0x72, 0x52, 0x65, 0x66, - 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x52, 0x06, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x29, - 0x0a, 0x07, 0x65, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x0f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x6e, 0x79, - 0x52, 0x07, 0x65, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x12, 0x3c, 0x0a, 0x08, 0x65, 0x78, 0x61, - 0x6d, 0x70, 0x6c, 0x65, 0x73, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x6f, 0x70, - 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x45, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, - 0x73, 0x4f, 0x72, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x73, 0x52, 0x08, 0x65, - 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x73, 0x12, 0x30, 0x0a, 0x07, 0x63, 0x6f, 0x6e, 0x74, 0x65, - 0x6e, 0x74, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, - 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x4d, 0x65, 0x64, 0x69, 0x61, 0x54, 0x79, 0x70, 0x65, 0x73, - 0x52, 0x07, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x12, 0x4d, 0x0a, 0x17, 0x73, 0x70, 0x65, - 0x63, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, - 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x0c, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6f, 0x70, 0x65, - 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x41, 0x6e, 0x79, - 0x52, 0x16, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x45, - 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0x81, 0x01, 0x0a, 0x11, 0x48, 0x65, 0x61, - 0x64, 0x65, 0x72, 0x4f, 0x72, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x2c, - 0x0a, 0x06, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, - 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x65, 0x61, 0x64, - 0x65, 0x72, 0x48, 0x00, 0x52, 0x06, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x12, 0x35, 0x0a, 0x09, - 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x15, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x65, 0x66, - 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x48, 0x00, 0x52, 0x09, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, - 0x6e, 0x63, 0x65, 0x42, 0x07, 0x0a, 0x05, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x22, 0x6e, 0x0a, 0x13, - 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x4f, 0x72, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, - 0x63, 0x65, 0x73, 0x12, 0x57, 0x0a, 0x15, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, - 0x6c, 0x5f, 0x70, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, - 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, - 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x4f, 0x72, 0x52, 0x65, 0x66, - 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x52, 0x14, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, - 0x61, 0x6c, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x22, 0xc9, 0x02, 0x0a, - 0x04, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x14, 0x0a, 0x05, 0x74, 0x69, 0x74, 0x6c, 0x65, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x74, 0x69, 0x74, 0x6c, 0x65, 0x12, 0x20, 0x0a, 0x0b, 0x64, - 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x28, 0x0a, - 0x10, 0x74, 0x65, 0x72, 0x6d, 0x73, 0x5f, 0x6f, 0x66, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, - 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x74, 0x65, 0x72, 0x6d, 0x73, 0x4f, 0x66, - 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x2d, 0x0a, 0x07, 0x63, 0x6f, 0x6e, 0x74, 0x61, - 0x63, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, - 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x63, 0x74, 0x52, 0x07, 0x63, - 0x6f, 0x6e, 0x74, 0x61, 0x63, 0x74, 0x12, 0x2d, 0x0a, 0x07, 0x6c, 0x69, 0x63, 0x65, 0x6e, 0x73, - 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, - 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x4c, 0x69, 0x63, 0x65, 0x6e, 0x73, 0x65, 0x52, 0x07, 0x6c, 0x69, - 0x63, 0x65, 0x6e, 0x73, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, - 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, - 0x4d, 0x0a, 0x17, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x07, 0x20, 0x03, 0x28, 0x0b, - 0x32, 0x14, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x4e, 0x61, - 0x6d, 0x65, 0x64, 0x41, 0x6e, 0x79, 0x52, 0x16, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x63, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x18, - 0x0a, 0x07, 0x73, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x07, 0x73, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x22, 0x5a, 0x0a, 0x09, 0x49, 0x74, 0x65, 0x6d, - 0x73, 0x49, 0x74, 0x65, 0x6d, 0x12, 0x4d, 0x0a, 0x13, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x5f, - 0x6f, 0x72, 0x5f, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x18, 0x01, 0x20, 0x03, - 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, - 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x4f, 0x72, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, - 0x65, 0x52, 0x11, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x4f, 0x72, 0x52, 0x65, 0x66, 0x65, 0x72, - 0x65, 0x6e, 0x63, 0x65, 0x22, 0x7e, 0x0a, 0x07, 0x4c, 0x69, 0x63, 0x65, 0x6e, 0x73, 0x65, 0x12, - 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, - 0x61, 0x6d, 0x65, 0x12, 0x10, 0x0a, 0x03, 0x75, 0x72, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x03, 0x75, 0x72, 0x6c, 0x12, 0x4d, 0x0a, 0x17, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, - 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, - 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, - 0x2e, 0x76, 0x33, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x41, 0x6e, 0x79, 0x52, 0x16, 0x73, 0x70, - 0x65, 0x63, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x78, 0x74, 0x65, 0x6e, - 0x73, 0x69, 0x6f, 0x6e, 0x22, 0xe8, 0x02, 0x0a, 0x04, 0x4c, 0x69, 0x6e, 0x6b, 0x12, 0x23, 0x0a, - 0x0d, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x65, 0x66, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, - 0x65, 0x66, 0x12, 0x21, 0x0a, 0x0c, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, - 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x49, 0x64, 0x12, 0x3b, 0x0a, 0x0a, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, - 0x65, 0x72, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x6f, 0x70, 0x65, 0x6e, - 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x6e, 0x79, 0x4f, 0x72, 0x45, 0x78, 0x70, 0x72, - 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x0a, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, - 0x72, 0x73, 0x12, 0x3e, 0x0a, 0x0c, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x62, 0x6f, - 0x64, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, - 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x6e, 0x79, 0x4f, 0x72, 0x45, 0x78, 0x70, 0x72, 0x65, - 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x0b, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x42, 0x6f, - 0x64, 0x79, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, - 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, - 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2a, 0x0a, 0x06, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x18, 0x06, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, - 0x33, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x52, 0x06, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, - 0x12, 0x4d, 0x0a, 0x17, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x07, 0x20, 0x03, 0x28, - 0x0b, 0x32, 0x14, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x4e, - 0x61, 0x6d, 0x65, 0x64, 0x41, 0x6e, 0x79, 0x52, 0x16, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, - 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x22, - 0x79, 0x0a, 0x0f, 0x4c, 0x69, 0x6e, 0x6b, 0x4f, 0x72, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, - 0x63, 0x65, 0x12, 0x26, 0x0a, 0x04, 0x6c, 0x69, 0x6e, 0x6b, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x10, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x4c, 0x69, - 0x6e, 0x6b, 0x48, 0x00, 0x52, 0x04, 0x6c, 0x69, 0x6e, 0x6b, 0x12, 0x35, 0x0a, 0x09, 0x72, 0x65, - 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, - 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x65, 0x66, 0x65, 0x72, - 0x65, 0x6e, 0x63, 0x65, 0x48, 0x00, 0x52, 0x09, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, - 0x65, 0x42, 0x07, 0x0a, 0x05, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x22, 0x6a, 0x0a, 0x11, 0x4c, 0x69, - 0x6e, 0x6b, 0x73, 0x4f, 0x72, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x73, 0x12, - 0x55, 0x0a, 0x15, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x5f, 0x70, 0x72, - 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x20, - 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x4e, 0x61, 0x6d, 0x65, - 0x64, 0x4c, 0x69, 0x6e, 0x6b, 0x4f, 0x72, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, - 0x52, 0x14, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x50, 0x72, 0x6f, 0x70, - 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x22, 0xad, 0x02, 0x0a, 0x09, 0x4d, 0x65, 0x64, 0x69, 0x61, - 0x54, 0x79, 0x70, 0x65, 0x12, 0x35, 0x0a, 0x06, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, - 0x33, 0x2e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x4f, 0x72, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, - 0x6e, 0x63, 0x65, 0x52, 0x06, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x29, 0x0a, 0x07, 0x65, - 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x6f, - 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x07, 0x65, - 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x12, 0x3c, 0x0a, 0x08, 0x65, 0x78, 0x61, 0x6d, 0x70, 0x6c, - 0x65, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, - 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x45, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x73, 0x4f, 0x72, - 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x73, 0x52, 0x08, 0x65, 0x78, 0x61, 0x6d, - 0x70, 0x6c, 0x65, 0x73, 0x12, 0x31, 0x0a, 0x08, 0x65, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, - 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, - 0x2e, 0x76, 0x33, 0x2e, 0x45, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x08, 0x65, - 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x12, 0x4d, 0x0a, 0x17, 0x73, 0x70, 0x65, 0x63, 0x69, - 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, - 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, - 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x41, 0x6e, 0x79, 0x52, 0x16, - 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x78, 0x74, - 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0x5d, 0x0a, 0x0a, 0x4d, 0x65, 0x64, 0x69, 0x61, 0x54, - 0x79, 0x70, 0x65, 0x73, 0x12, 0x4f, 0x0a, 0x15, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, - 0x61, 0x6c, 0x5f, 0x70, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x18, 0x01, 0x20, - 0x03, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, - 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x4d, 0x65, 0x64, 0x69, 0x61, 0x54, 0x79, 0x70, 0x65, 0x52, - 0x14, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x50, 0x72, 0x6f, 0x70, 0x65, - 0x72, 0x74, 0x69, 0x65, 0x73, 0x22, 0x45, 0x0a, 0x08, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x41, 0x6e, - 0x79, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x25, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, - 0x33, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x65, 0x0a, 0x18, - 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x43, 0x61, 0x6c, 0x6c, 0x62, 0x61, 0x63, 0x6b, 0x4f, 0x72, 0x52, - 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x35, 0x0a, 0x05, - 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x6f, 0x70, - 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x61, 0x6c, 0x6c, 0x62, 0x61, 0x63, - 0x6b, 0x4f, 0x72, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x52, 0x05, 0x76, 0x61, - 0x6c, 0x75, 0x65, 0x22, 0x4f, 0x0a, 0x0d, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x45, 0x6e, 0x63, 0x6f, - 0x64, 0x69, 0x6e, 0x67, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x2a, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, - 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, - 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x45, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x52, 0x05, 0x76, - 0x61, 0x6c, 0x75, 0x65, 0x22, 0x63, 0x0a, 0x17, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x45, 0x78, 0x61, - 0x6d, 0x70, 0x6c, 0x65, 0x4f, 0x72, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x12, - 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, - 0x61, 0x6d, 0x65, 0x12, 0x34, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, - 0x45, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x4f, 0x72, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, - 0x63, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x61, 0x0a, 0x16, 0x4e, 0x61, 0x6d, - 0x65, 0x64, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x4f, 0x72, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, - 0x6e, 0x63, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x33, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, - 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x4f, 0x72, 0x52, 0x65, 0x66, 0x65, - 0x72, 0x65, 0x6e, 0x63, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x5d, 0x0a, 0x14, - 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x4c, 0x69, 0x6e, 0x6b, 0x4f, 0x72, 0x52, 0x65, 0x66, 0x65, 0x72, - 0x65, 0x6e, 0x63, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x31, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, - 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, - 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x4c, 0x69, 0x6e, 0x6b, 0x4f, 0x72, 0x52, 0x65, 0x66, 0x65, 0x72, - 0x65, 0x6e, 0x63, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x51, 0x0a, 0x0e, 0x4e, - 0x61, 0x6d, 0x65, 0x64, 0x4d, 0x65, 0x64, 0x69, 0x61, 0x54, 0x79, 0x70, 0x65, 0x12, 0x12, 0x0a, - 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, - 0x65, 0x12, 0x2b, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x15, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x4d, 0x65, - 0x64, 0x69, 0x61, 0x54, 0x79, 0x70, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x67, - 0x0a, 0x19, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, - 0x4f, 0x72, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, - 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, - 0x36, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, - 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x50, 0x61, 0x72, 0x61, - 0x6d, 0x65, 0x74, 0x65, 0x72, 0x4f, 0x72, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, - 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x4f, 0x0a, 0x0d, 0x4e, 0x61, 0x6d, 0x65, 0x64, - 0x50, 0x61, 0x74, 0x68, 0x49, 0x74, 0x65, 0x6d, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x2a, 0x0a, 0x05, - 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6f, 0x70, - 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x50, 0x61, 0x74, 0x68, 0x49, 0x74, 0x65, - 0x6d, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x6b, 0x0a, 0x1b, 0x4e, 0x61, 0x6d, 0x65, - 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x42, 0x6f, 0x64, 0x79, 0x4f, 0x72, 0x52, 0x65, - 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x38, 0x0a, 0x05, 0x76, - 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x6f, 0x70, 0x65, - 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x42, - 0x6f, 0x64, 0x79, 0x4f, 0x72, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x52, 0x05, - 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x65, 0x0a, 0x18, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x4f, 0x72, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, - 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x35, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, - 0x33, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x4f, 0x72, 0x52, 0x65, 0x66, 0x65, - 0x72, 0x65, 0x6e, 0x63, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x61, 0x0a, 0x16, - 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x4f, 0x72, 0x52, 0x65, 0x66, - 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x33, 0x0a, 0x05, 0x76, 0x61, - 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x6f, 0x70, 0x65, 0x6e, - 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x4f, 0x72, 0x52, - 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, - 0x71, 0x0a, 0x1e, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, - 0x53, 0x63, 0x68, 0x65, 0x6d, 0x65, 0x4f, 0x72, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, - 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x3b, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, - 0x33, 0x2e, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x65, - 0x4f, 0x72, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, - 0x75, 0x65, 0x22, 0x5b, 0x0a, 0x13, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x53, 0x65, 0x72, 0x76, 0x65, - 0x72, 0x56, 0x61, 0x72, 0x69, 0x61, 0x62, 0x6c, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, - 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x30, 0x0a, - 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x6f, - 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, - 0x56, 0x61, 0x72, 0x69, 0x61, 0x62, 0x6c, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, - 0x37, 0x0a, 0x0b, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x12, 0x12, - 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, - 0x6d, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x55, 0x0a, 0x10, 0x4e, 0x61, 0x6d, 0x65, - 0x64, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x41, 0x72, 0x72, 0x61, 0x79, 0x12, 0x12, 0x0a, 0x04, - 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, - 0x12, 0x2d, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x17, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x74, 0x72, - 0x69, 0x6e, 0x67, 0x41, 0x72, 0x72, 0x61, 0x79, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, - 0xf2, 0x01, 0x0a, 0x09, 0x4f, 0x61, 0x75, 0x74, 0x68, 0x46, 0x6c, 0x6f, 0x77, 0x12, 0x2b, 0x0a, - 0x11, 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x75, - 0x72, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, - 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x55, 0x72, 0x6c, 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x6f, - 0x6b, 0x65, 0x6e, 0x5f, 0x75, 0x72, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x74, - 0x6f, 0x6b, 0x65, 0x6e, 0x55, 0x72, 0x6c, 0x12, 0x1f, 0x0a, 0x0b, 0x72, 0x65, 0x66, 0x72, 0x65, - 0x73, 0x68, 0x5f, 0x75, 0x72, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x72, 0x65, - 0x66, 0x72, 0x65, 0x73, 0x68, 0x55, 0x72, 0x6c, 0x12, 0x2b, 0x0a, 0x06, 0x73, 0x63, 0x6f, 0x70, - 0x65, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, - 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x06, 0x73, - 0x63, 0x6f, 0x70, 0x65, 0x73, 0x12, 0x4d, 0x0a, 0x17, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, - 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, - 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, - 0x2e, 0x76, 0x33, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x41, 0x6e, 0x79, 0x52, 0x16, 0x73, 0x70, - 0x65, 0x63, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x78, 0x74, 0x65, 0x6e, - 0x73, 0x69, 0x6f, 0x6e, 0x22, 0xcd, 0x02, 0x0a, 0x0a, 0x4f, 0x61, 0x75, 0x74, 0x68, 0x46, 0x6c, - 0x6f, 0x77, 0x73, 0x12, 0x31, 0x0a, 0x08, 0x69, 0x6d, 0x70, 0x6c, 0x69, 0x63, 0x69, 0x74, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, - 0x76, 0x33, 0x2e, 0x4f, 0x61, 0x75, 0x74, 0x68, 0x46, 0x6c, 0x6f, 0x77, 0x52, 0x08, 0x69, 0x6d, - 0x70, 0x6c, 0x69, 0x63, 0x69, 0x74, 0x12, 0x31, 0x0a, 0x08, 0x70, 0x61, 0x73, 0x73, 0x77, 0x6f, - 0x72, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, - 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x4f, 0x61, 0x75, 0x74, 0x68, 0x46, 0x6c, 0x6f, 0x77, 0x52, - 0x08, 0x70, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x12, 0x44, 0x0a, 0x12, 0x63, 0x6c, 0x69, - 0x65, 0x6e, 0x74, 0x5f, 0x63, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, 0x18, - 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, - 0x76, 0x33, 0x2e, 0x4f, 0x61, 0x75, 0x74, 0x68, 0x46, 0x6c, 0x6f, 0x77, 0x52, 0x11, 0x63, 0x6c, - 0x69, 0x65, 0x6e, 0x74, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, 0x12, - 0x44, 0x0a, 0x12, 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x5f, 0x63, 0x6f, 0x64, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x6f, 0x70, - 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x4f, 0x61, 0x75, 0x74, 0x68, 0x46, 0x6c, - 0x6f, 0x77, 0x52, 0x11, 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x43, 0x6f, 0x64, 0x65, 0x12, 0x4d, 0x0a, 0x17, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, - 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, - 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, - 0x2e, 0x76, 0x33, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x41, 0x6e, 0x79, 0x52, 0x16, 0x73, 0x70, - 0x65, 0x63, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x78, 0x74, 0x65, 0x6e, - 0x73, 0x69, 0x6f, 0x6e, 0x22, 0x53, 0x0a, 0x06, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x49, - 0x0a, 0x15, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x5f, 0x70, 0x72, 0x6f, - 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, - 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, - 0x41, 0x6e, 0x79, 0x52, 0x14, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x50, - 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x22, 0x96, 0x05, 0x0a, 0x09, 0x4f, 0x70, - 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x61, 0x67, 0x73, 0x18, - 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x04, 0x74, 0x61, 0x67, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x73, - 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x73, 0x75, - 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, - 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, - 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x3d, 0x0a, 0x0d, 0x65, 0x78, 0x74, 0x65, 0x72, - 0x6e, 0x61, 0x6c, 0x5f, 0x64, 0x6f, 0x63, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, - 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x45, 0x78, 0x74, 0x65, - 0x72, 0x6e, 0x61, 0x6c, 0x44, 0x6f, 0x63, 0x73, 0x52, 0x0c, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, - 0x61, 0x6c, 0x44, 0x6f, 0x63, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x6f, 0x70, - 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x64, 0x12, 0x40, 0x0a, 0x0a, 0x70, 0x61, 0x72, - 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x20, 0x2e, - 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x50, 0x61, 0x72, 0x61, 0x6d, - 0x65, 0x74, 0x65, 0x72, 0x4f, 0x72, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x52, - 0x0a, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x12, 0x45, 0x0a, 0x0c, 0x72, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x62, 0x6f, 0x64, 0x79, 0x18, 0x07, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x22, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x42, 0x6f, 0x64, 0x79, 0x4f, 0x72, 0x52, 0x65, 0x66, 0x65, - 0x72, 0x65, 0x6e, 0x63, 0x65, 0x52, 0x0b, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x42, 0x6f, - 0x64, 0x79, 0x12, 0x33, 0x0a, 0x09, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x73, 0x18, - 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, - 0x76, 0x33, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x73, 0x52, 0x09, 0x72, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x73, 0x12, 0x3f, 0x0a, 0x09, 0x63, 0x61, 0x6c, 0x6c, 0x62, - 0x61, 0x63, 0x6b, 0x73, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x6f, 0x70, 0x65, - 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x61, 0x6c, 0x6c, 0x62, 0x61, 0x63, 0x6b, - 0x73, 0x4f, 0x72, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x73, 0x52, 0x09, 0x63, - 0x61, 0x6c, 0x6c, 0x62, 0x61, 0x63, 0x6b, 0x73, 0x12, 0x1e, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, - 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, 0x64, 0x65, - 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x3b, 0x0a, 0x08, 0x73, 0x65, 0x63, 0x75, - 0x72, 0x69, 0x74, 0x79, 0x18, 0x0b, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x6f, 0x70, 0x65, - 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, - 0x52, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x52, 0x08, 0x73, 0x65, 0x63, - 0x75, 0x72, 0x69, 0x74, 0x79, 0x12, 0x2c, 0x0a, 0x07, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x73, - 0x18, 0x0c, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, - 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x52, 0x07, 0x73, 0x65, 0x72, 0x76, - 0x65, 0x72, 0x73, 0x12, 0x4d, 0x0a, 0x17, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x63, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x0d, - 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, - 0x33, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x41, 0x6e, 0x79, 0x52, 0x16, 0x73, 0x70, 0x65, 0x63, - 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, - 0x6f, 0x6e, 0x22, 0xb1, 0x04, 0x0a, 0x09, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, - 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, - 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x02, 0x69, 0x6e, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, - 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, - 0x65, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, - 0x65, 0x64, 0x12, 0x1e, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, - 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, - 0x65, 0x64, 0x12, 0x2a, 0x0a, 0x11, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x5f, 0x65, 0x6d, 0x70, 0x74, - 0x79, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0f, 0x61, - 0x6c, 0x6c, 0x6f, 0x77, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x14, - 0x0a, 0x05, 0x73, 0x74, 0x79, 0x6c, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, - 0x74, 0x79, 0x6c, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x65, 0x78, 0x70, 0x6c, 0x6f, 0x64, 0x65, 0x18, - 0x08, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x65, 0x78, 0x70, 0x6c, 0x6f, 0x64, 0x65, 0x12, 0x25, - 0x0a, 0x0e, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x5f, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, - 0x18, 0x09, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0d, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x52, 0x65, 0x73, - 0x65, 0x72, 0x76, 0x65, 0x64, 0x12, 0x35, 0x0a, 0x06, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x18, - 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, - 0x76, 0x33, 0x2e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x4f, 0x72, 0x52, 0x65, 0x66, 0x65, 0x72, - 0x65, 0x6e, 0x63, 0x65, 0x52, 0x06, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x29, 0x0a, 0x07, - 0x65, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, - 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x07, - 0x65, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x12, 0x3c, 0x0a, 0x08, 0x65, 0x78, 0x61, 0x6d, 0x70, - 0x6c, 0x65, 0x73, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x6f, 0x70, 0x65, 0x6e, - 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x45, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x73, 0x4f, - 0x72, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x73, 0x52, 0x08, 0x65, 0x78, 0x61, - 0x6d, 0x70, 0x6c, 0x65, 0x73, 0x12, 0x30, 0x0a, 0x07, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, - 0x18, 0x0d, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, - 0x2e, 0x76, 0x33, 0x2e, 0x4d, 0x65, 0x64, 0x69, 0x61, 0x54, 0x79, 0x70, 0x65, 0x73, 0x52, 0x07, - 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x12, 0x4d, 0x0a, 0x17, 0x73, 0x70, 0x65, 0x63, 0x69, - 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, - 0x6f, 0x6e, 0x18, 0x0e, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, - 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x41, 0x6e, 0x79, 0x52, 0x16, - 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x78, 0x74, - 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0x8d, 0x01, 0x0a, 0x14, 0x50, 0x61, 0x72, 0x61, 0x6d, - 0x65, 0x74, 0x65, 0x72, 0x4f, 0x72, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x12, - 0x35, 0x0a, 0x09, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, - 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x48, 0x00, 0x52, 0x09, 0x70, 0x61, 0x72, - 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x12, 0x35, 0x0a, 0x09, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, - 0x6e, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x6f, 0x70, 0x65, 0x6e, - 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, - 0x48, 0x00, 0x52, 0x09, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x42, 0x07, 0x0a, - 0x05, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x22, 0x74, 0x0a, 0x16, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, - 0x74, 0x65, 0x72, 0x73, 0x4f, 0x72, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x73, - 0x12, 0x5a, 0x0a, 0x15, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x5f, 0x70, - 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, - 0x25, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x4e, 0x61, 0x6d, - 0x65, 0x64, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x4f, 0x72, 0x52, 0x65, 0x66, - 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x52, 0x14, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, - 0x61, 0x6c, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x22, 0xfa, 0x04, 0x0a, - 0x08, 0x50, 0x61, 0x74, 0x68, 0x49, 0x74, 0x65, 0x6d, 0x12, 0x11, 0x0a, 0x04, 0x5f, 0x72, 0x65, - 0x66, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x52, 0x65, 0x66, 0x12, 0x18, 0x0a, 0x07, - 0x73, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x73, - 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, - 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, - 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x27, 0x0a, 0x03, 0x67, 0x65, 0x74, 0x18, - 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, - 0x76, 0x33, 0x2e, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x03, 0x67, 0x65, - 0x74, 0x12, 0x27, 0x0a, 0x03, 0x70, 0x75, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, - 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x4f, 0x70, 0x65, 0x72, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x03, 0x70, 0x75, 0x74, 0x12, 0x29, 0x0a, 0x04, 0x70, 0x6f, - 0x73, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, - 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, - 0x04, 0x70, 0x6f, 0x73, 0x74, 0x12, 0x2d, 0x0a, 0x06, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x18, - 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, - 0x76, 0x33, 0x2e, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x06, 0x64, 0x65, - 0x6c, 0x65, 0x74, 0x65, 0x12, 0x2f, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, - 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, - 0x76, 0x33, 0x2e, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x07, 0x6f, 0x70, - 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x29, 0x0a, 0x04, 0x68, 0x65, 0x61, 0x64, 0x18, 0x09, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, - 0x2e, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x04, 0x68, 0x65, 0x61, 0x64, - 0x12, 0x2b, 0x0a, 0x05, 0x70, 0x61, 0x74, 0x63, 0x68, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x15, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x4f, 0x70, 0x65, - 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x05, 0x70, 0x61, 0x74, 0x63, 0x68, 0x12, 0x2b, 0x0a, - 0x05, 0x74, 0x72, 0x61, 0x63, 0x65, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x6f, - 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x52, 0x05, 0x74, 0x72, 0x61, 0x63, 0x65, 0x12, 0x2c, 0x0a, 0x07, 0x73, 0x65, - 0x72, 0x76, 0x65, 0x72, 0x73, 0x18, 0x0c, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x6f, 0x70, - 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x52, - 0x07, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x73, 0x12, 0x40, 0x0a, 0x0a, 0x70, 0x61, 0x72, 0x61, - 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x18, 0x0d, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x6f, - 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, - 0x74, 0x65, 0x72, 0x4f, 0x72, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x52, 0x0a, - 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x12, 0x4d, 0x0a, 0x17, 0x73, 0x70, - 0x65, 0x63, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x65, 0x78, 0x74, 0x65, - 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x0e, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6f, 0x70, - 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x41, 0x6e, - 0x79, 0x52, 0x16, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0x85, 0x01, 0x0a, 0x05, 0x50, 0x61, - 0x74, 0x68, 0x73, 0x12, 0x2d, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x01, 0x20, 0x03, 0x28, - 0x0b, 0x32, 0x19, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x4e, - 0x61, 0x6d, 0x65, 0x64, 0x50, 0x61, 0x74, 0x68, 0x49, 0x74, 0x65, 0x6d, 0x52, 0x04, 0x70, 0x61, - 0x74, 0x68, 0x12, 0x4d, 0x0a, 0x17, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, - 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, - 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x41, 0x6e, 0x79, 0x52, 0x16, 0x73, 0x70, 0x65, 0x63, 0x69, - 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, - 0x6e, 0x22, 0x65, 0x0a, 0x0a, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x12, - 0x57, 0x0a, 0x15, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x5f, 0x70, 0x72, - 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x22, - 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x4e, 0x61, 0x6d, 0x65, - 0x64, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x4f, 0x72, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, - 0x63, 0x65, 0x52, 0x14, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x50, 0x72, - 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x22, 0x5a, 0x0a, 0x09, 0x52, 0x65, 0x66, 0x65, - 0x72, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x11, 0x0a, 0x04, 0x5f, 0x72, 0x65, 0x66, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x03, 0x52, 0x65, 0x66, 0x12, 0x18, 0x0a, 0x07, 0x73, 0x75, 0x6d, 0x6d, - 0x61, 0x72, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x73, 0x75, 0x6d, 0x6d, 0x61, - 0x72, 0x79, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, - 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, - 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x79, 0x0a, 0x19, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x42, - 0x6f, 0x64, 0x69, 0x65, 0x73, 0x4f, 0x72, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, - 0x73, 0x12, 0x5c, 0x0a, 0x15, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x5f, - 0x70, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, - 0x32, 0x27, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x4e, 0x61, - 0x6d, 0x65, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x42, 0x6f, 0x64, 0x79, 0x4f, 0x72, - 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x52, 0x14, 0x61, 0x64, 0x64, 0x69, 0x74, - 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x22, - 0xcc, 0x01, 0x0a, 0x0b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x42, 0x6f, 0x64, 0x79, 0x12, - 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, - 0x6e, 0x12, 0x30, 0x0a, 0x07, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, - 0x4d, 0x65, 0x64, 0x69, 0x61, 0x54, 0x79, 0x70, 0x65, 0x73, 0x52, 0x07, 0x63, 0x6f, 0x6e, 0x74, - 0x65, 0x6e, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x18, - 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x12, - 0x4d, 0x0a, 0x17, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, - 0x32, 0x14, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x4e, 0x61, - 0x6d, 0x65, 0x64, 0x41, 0x6e, 0x79, 0x52, 0x16, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x63, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0x96, - 0x01, 0x0a, 0x16, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x42, 0x6f, 0x64, 0x79, 0x4f, 0x72, - 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x3c, 0x0a, 0x0c, 0x72, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x5f, 0x62, 0x6f, 0x64, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x17, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x42, 0x6f, 0x64, 0x79, 0x48, 0x00, 0x52, 0x0b, 0x72, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x42, 0x6f, 0x64, 0x79, 0x12, 0x35, 0x0a, 0x09, 0x72, 0x65, 0x66, 0x65, 0x72, - 0x65, 0x6e, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x6f, 0x70, 0x65, - 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, - 0x65, 0x48, 0x00, 0x52, 0x09, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x42, 0x07, - 0x0a, 0x05, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x22, 0x9d, 0x02, 0x0a, 0x08, 0x52, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, - 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x39, 0x0a, 0x07, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, - 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, - 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x4f, 0x72, 0x52, 0x65, - 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x73, 0x52, 0x07, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, - 0x73, 0x12, 0x30, 0x0a, 0x07, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x18, 0x03, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, - 0x4d, 0x65, 0x64, 0x69, 0x61, 0x54, 0x79, 0x70, 0x65, 0x73, 0x52, 0x07, 0x63, 0x6f, 0x6e, 0x74, - 0x65, 0x6e, 0x74, 0x12, 0x33, 0x0a, 0x05, 0x6c, 0x69, 0x6e, 0x6b, 0x73, 0x18, 0x04, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, - 0x4c, 0x69, 0x6e, 0x6b, 0x73, 0x4f, 0x72, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, - 0x73, 0x52, 0x05, 0x6c, 0x69, 0x6e, 0x6b, 0x73, 0x12, 0x4d, 0x0a, 0x17, 0x73, 0x70, 0x65, 0x63, - 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, - 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6f, 0x70, 0x65, 0x6e, - 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x41, 0x6e, 0x79, 0x52, - 0x16, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x78, - 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0x89, 0x01, 0x0a, 0x13, 0x52, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x4f, 0x72, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x12, - 0x32, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x14, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x48, 0x00, 0x52, 0x08, 0x72, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x12, 0x35, 0x0a, 0x09, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, - 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x48, 0x00, 0x52, - 0x09, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x42, 0x07, 0x0a, 0x05, 0x6f, 0x6e, - 0x65, 0x6f, 0x66, 0x22, 0xef, 0x01, 0x0a, 0x09, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x73, 0x12, 0x39, 0x0a, 0x07, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x4f, 0x72, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, - 0x6e, 0x63, 0x65, 0x52, 0x07, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x12, 0x58, 0x0a, 0x15, - 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x5f, 0x6f, 0x72, 0x5f, 0x72, 0x65, 0x66, 0x65, - 0x72, 0x65, 0x6e, 0x63, 0x65, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x6f, 0x70, - 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x4f, 0x72, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, - 0x65, 0x52, 0x13, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x4f, 0x72, 0x52, 0x65, 0x66, - 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x4d, 0x0a, 0x17, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, - 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, - 0x6e, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, - 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x41, 0x6e, 0x79, 0x52, 0x16, 0x73, - 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x78, 0x74, 0x65, - 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0x72, 0x0a, 0x15, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x73, 0x4f, 0x72, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x73, 0x12, 0x59, - 0x0a, 0x15, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x5f, 0x70, 0x72, 0x6f, - 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, - 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x4f, 0x72, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, - 0x6e, 0x63, 0x65, 0x52, 0x14, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x50, - 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x22, 0xaf, 0x0b, 0x0a, 0x06, 0x53, 0x63, - 0x68, 0x65, 0x6d, 0x61, 0x12, 0x1a, 0x0a, 0x08, 0x6e, 0x75, 0x6c, 0x6c, 0x61, 0x62, 0x6c, 0x65, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x6e, 0x75, 0x6c, 0x6c, 0x61, 0x62, 0x6c, 0x65, - 0x12, 0x3f, 0x0a, 0x0d, 0x64, 0x69, 0x73, 0x63, 0x72, 0x69, 0x6d, 0x69, 0x6e, 0x61, 0x74, 0x6f, - 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, - 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x44, 0x69, 0x73, 0x63, 0x72, 0x69, 0x6d, 0x69, 0x6e, 0x61, 0x74, - 0x6f, 0x72, 0x52, 0x0d, 0x64, 0x69, 0x73, 0x63, 0x72, 0x69, 0x6d, 0x69, 0x6e, 0x61, 0x74, 0x6f, - 0x72, 0x12, 0x1b, 0x0a, 0x09, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6f, 0x6e, 0x6c, 0x79, 0x18, 0x03, - 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x72, 0x65, 0x61, 0x64, 0x4f, 0x6e, 0x6c, 0x79, 0x12, 0x1d, - 0x0a, 0x0a, 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, 0x6f, 0x6e, 0x6c, 0x79, 0x18, 0x04, 0x20, 0x01, - 0x28, 0x08, 0x52, 0x09, 0x77, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x6e, 0x6c, 0x79, 0x12, 0x21, 0x0a, - 0x03, 0x78, 0x6d, 0x6c, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x6f, 0x70, 0x65, - 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x58, 0x6d, 0x6c, 0x52, 0x03, 0x78, 0x6d, 0x6c, - 0x12, 0x3d, 0x0a, 0x0d, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x5f, 0x64, 0x6f, 0x63, - 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, - 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x44, 0x6f, 0x63, - 0x73, 0x52, 0x0c, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x44, 0x6f, 0x63, 0x73, 0x12, - 0x29, 0x0a, 0x07, 0x65, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x0f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x6e, - 0x79, 0x52, 0x07, 0x65, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x12, 0x1e, 0x0a, 0x0a, 0x64, 0x65, - 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x08, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, - 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x74, 0x69, - 0x74, 0x6c, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x74, 0x69, 0x74, 0x6c, 0x65, - 0x12, 0x1f, 0x0a, 0x0b, 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x65, 0x5f, 0x6f, 0x66, 0x18, - 0x0a, 0x20, 0x01, 0x28, 0x01, 0x52, 0x0a, 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x65, 0x4f, - 0x66, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x18, 0x0b, 0x20, 0x01, - 0x28, 0x01, 0x52, 0x07, 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x12, 0x2b, 0x0a, 0x11, 0x65, - 0x78, 0x63, 0x6c, 0x75, 0x73, 0x69, 0x76, 0x65, 0x5f, 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, - 0x18, 0x0c, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x65, 0x78, 0x63, 0x6c, 0x75, 0x73, 0x69, 0x76, - 0x65, 0x4d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x69, 0x6e, 0x69, - 0x6d, 0x75, 0x6d, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x01, 0x52, 0x07, 0x6d, 0x69, 0x6e, 0x69, 0x6d, - 0x75, 0x6d, 0x12, 0x2b, 0x0a, 0x11, 0x65, 0x78, 0x63, 0x6c, 0x75, 0x73, 0x69, 0x76, 0x65, 0x5f, - 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x65, - 0x78, 0x63, 0x6c, 0x75, 0x73, 0x69, 0x76, 0x65, 0x4d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x12, - 0x1d, 0x0a, 0x0a, 0x6d, 0x61, 0x78, 0x5f, 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x18, 0x0f, 0x20, - 0x01, 0x28, 0x03, 0x52, 0x09, 0x6d, 0x61, 0x78, 0x4c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x12, 0x1d, - 0x0a, 0x0a, 0x6d, 0x69, 0x6e, 0x5f, 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x18, 0x10, 0x20, 0x01, - 0x28, 0x03, 0x52, 0x09, 0x6d, 0x69, 0x6e, 0x4c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x12, 0x18, 0x0a, - 0x07, 0x70, 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e, 0x18, 0x11, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, - 0x70, 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e, 0x12, 0x1b, 0x0a, 0x09, 0x6d, 0x61, 0x78, 0x5f, 0x69, - 0x74, 0x65, 0x6d, 0x73, 0x18, 0x12, 0x20, 0x01, 0x28, 0x03, 0x52, 0x08, 0x6d, 0x61, 0x78, 0x49, - 0x74, 0x65, 0x6d, 0x73, 0x12, 0x1b, 0x0a, 0x09, 0x6d, 0x69, 0x6e, 0x5f, 0x69, 0x74, 0x65, 0x6d, - 0x73, 0x18, 0x13, 0x20, 0x01, 0x28, 0x03, 0x52, 0x08, 0x6d, 0x69, 0x6e, 0x49, 0x74, 0x65, 0x6d, - 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x75, 0x6e, 0x69, 0x71, 0x75, 0x65, 0x5f, 0x69, 0x74, 0x65, 0x6d, - 0x73, 0x18, 0x14, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x75, 0x6e, 0x69, 0x71, 0x75, 0x65, 0x49, - 0x74, 0x65, 0x6d, 0x73, 0x12, 0x25, 0x0a, 0x0e, 0x6d, 0x61, 0x78, 0x5f, 0x70, 0x72, 0x6f, 0x70, - 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x18, 0x15, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0d, 0x6d, 0x61, - 0x78, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x12, 0x25, 0x0a, 0x0e, 0x6d, - 0x69, 0x6e, 0x5f, 0x70, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x18, 0x16, 0x20, - 0x01, 0x28, 0x03, 0x52, 0x0d, 0x6d, 0x69, 0x6e, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, - 0x65, 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x18, 0x17, - 0x20, 0x03, 0x28, 0x09, 0x52, 0x08, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x12, 0x23, - 0x0a, 0x04, 0x65, 0x6e, 0x75, 0x6d, 0x18, 0x18, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x6f, - 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x04, 0x65, - 0x6e, 0x75, 0x6d, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x19, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x34, 0x0a, 0x06, 0x61, 0x6c, 0x6c, 0x5f, 0x6f, - 0x66, 0x18, 0x1a, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, - 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x4f, 0x72, 0x52, 0x65, 0x66, - 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x52, 0x05, 0x61, 0x6c, 0x6c, 0x4f, 0x66, 0x12, 0x34, 0x0a, - 0x06, 0x6f, 0x6e, 0x65, 0x5f, 0x6f, 0x66, 0x18, 0x1b, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1d, 0x2e, - 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x63, 0x68, 0x65, 0x6d, - 0x61, 0x4f, 0x72, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x52, 0x05, 0x6f, 0x6e, - 0x65, 0x4f, 0x66, 0x12, 0x34, 0x0a, 0x06, 0x61, 0x6e, 0x79, 0x5f, 0x6f, 0x66, 0x18, 0x1c, 0x20, - 0x03, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, - 0x2e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x4f, 0x72, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, - 0x63, 0x65, 0x52, 0x05, 0x61, 0x6e, 0x79, 0x4f, 0x66, 0x12, 0x24, 0x0a, 0x03, 0x6e, 0x6f, 0x74, - 0x18, 0x1d, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, - 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x03, 0x6e, 0x6f, 0x74, 0x12, - 0x2b, 0x0a, 0x05, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x18, 0x1e, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, - 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x49, 0x74, 0x65, 0x6d, - 0x73, 0x49, 0x74, 0x65, 0x6d, 0x52, 0x05, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x12, 0x36, 0x0a, 0x0a, - 0x70, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x18, 0x1f, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x16, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x50, 0x72, - 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x52, 0x0a, 0x70, 0x72, 0x6f, 0x70, 0x65, 0x72, - 0x74, 0x69, 0x65, 0x73, 0x12, 0x59, 0x0a, 0x15, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, - 0x61, 0x6c, 0x5f, 0x70, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x18, 0x20, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, - 0x2e, 0x41, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x50, 0x72, 0x6f, 0x70, 0x65, - 0x72, 0x74, 0x69, 0x65, 0x73, 0x49, 0x74, 0x65, 0x6d, 0x52, 0x14, 0x61, 0x64, 0x64, 0x69, 0x74, - 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x12, - 0x31, 0x0a, 0x07, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x18, 0x21, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x17, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x44, 0x65, - 0x66, 0x61, 0x75, 0x6c, 0x74, 0x54, 0x79, 0x70, 0x65, 0x52, 0x07, 0x64, 0x65, 0x66, 0x61, 0x75, - 0x6c, 0x74, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, - 0x6e, 0x18, 0x22, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, - 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x06, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, 0x23, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, 0x4d, 0x0a, 0x17, - 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x65, 0x78, - 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x24, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, - 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, - 0x41, 0x6e, 0x79, 0x52, 0x16, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0x81, 0x01, 0x0a, 0x11, - 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x4f, 0x72, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, - 0x65, 0x12, 0x2c, 0x0a, 0x06, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x12, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x53, - 0x63, 0x68, 0x65, 0x6d, 0x61, 0x48, 0x00, 0x52, 0x06, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, - 0x35, 0x0a, 0x09, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, - 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x48, 0x00, 0x52, 0x09, 0x72, 0x65, 0x66, - 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x42, 0x07, 0x0a, 0x05, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x22, - 0x6e, 0x0a, 0x13, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x4f, 0x72, 0x52, 0x65, 0x66, 0x65, - 0x72, 0x65, 0x6e, 0x63, 0x65, 0x73, 0x12, 0x57, 0x0a, 0x15, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, - 0x6f, 0x6e, 0x61, 0x6c, 0x5f, 0x70, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x18, - 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, - 0x76, 0x33, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x4f, 0x72, - 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x52, 0x14, 0x61, 0x64, 0x64, 0x69, 0x74, - 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x22, - 0x68, 0x0a, 0x13, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x52, 0x65, 0x71, 0x75, 0x69, - 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x12, 0x51, 0x0a, 0x15, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, - 0x6f, 0x6e, 0x61, 0x6c, 0x5f, 0x70, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x18, - 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, - 0x76, 0x33, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x41, 0x72, - 0x72, 0x61, 0x79, 0x52, 0x14, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x50, - 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x22, 0xd3, 0x02, 0x0a, 0x0e, 0x53, 0x65, - 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04, - 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, - 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, - 0x6f, 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x6e, 0x18, 0x04, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x02, 0x69, 0x6e, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x65, - 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x65, 0x12, 0x23, - 0x0a, 0x0d, 0x62, 0x65, 0x61, 0x72, 0x65, 0x72, 0x5f, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, - 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x62, 0x65, 0x61, 0x72, 0x65, 0x72, 0x46, 0x6f, 0x72, - 0x6d, 0x61, 0x74, 0x12, 0x2c, 0x0a, 0x05, 0x66, 0x6c, 0x6f, 0x77, 0x73, 0x18, 0x07, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, - 0x4f, 0x61, 0x75, 0x74, 0x68, 0x46, 0x6c, 0x6f, 0x77, 0x73, 0x52, 0x05, 0x66, 0x6c, 0x6f, 0x77, - 0x73, 0x12, 0x2d, 0x0a, 0x13, 0x6f, 0x70, 0x65, 0x6e, 0x5f, 0x69, 0x64, 0x5f, 0x63, 0x6f, 0x6e, - 0x6e, 0x65, 0x63, 0x74, 0x5f, 0x75, 0x72, 0x6c, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, - 0x6f, 0x70, 0x65, 0x6e, 0x49, 0x64, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x55, 0x72, 0x6c, - 0x12, 0x4d, 0x0a, 0x17, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x09, 0x20, 0x03, 0x28, - 0x0b, 0x32, 0x14, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x4e, - 0x61, 0x6d, 0x65, 0x64, 0x41, 0x6e, 0x79, 0x52, 0x16, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, - 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x22, - 0xa2, 0x01, 0x0a, 0x19, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x53, 0x63, 0x68, 0x65, - 0x6d, 0x65, 0x4f, 0x72, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x45, 0x0a, - 0x0f, 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x5f, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x65, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, - 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x53, 0x63, 0x68, 0x65, - 0x6d, 0x65, 0x48, 0x00, 0x52, 0x0e, 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x53, 0x63, - 0x68, 0x65, 0x6d, 0x65, 0x12, 0x35, 0x0a, 0x09, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, - 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, - 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x48, 0x00, - 0x52, 0x09, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x42, 0x07, 0x0a, 0x05, 0x6f, - 0x6e, 0x65, 0x6f, 0x66, 0x22, 0x7e, 0x0a, 0x1b, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, - 0x53, 0x63, 0x68, 0x65, 0x6d, 0x65, 0x73, 0x4f, 0x72, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, - 0x63, 0x65, 0x73, 0x12, 0x5f, 0x0a, 0x15, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, - 0x6c, 0x5f, 0x70, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, - 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, - 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x53, 0x63, 0x68, - 0x65, 0x6d, 0x65, 0x4f, 0x72, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x52, 0x14, - 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, - 0x74, 0x69, 0x65, 0x73, 0x22, 0xc6, 0x01, 0x0a, 0x06, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x12, - 0x10, 0x0a, 0x03, 0x75, 0x72, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x75, 0x72, - 0x6c, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x12, 0x39, 0x0a, 0x09, 0x76, 0x61, 0x72, 0x69, 0x61, 0x62, 0x6c, 0x65, 0x73, - 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, - 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x56, 0x61, 0x72, 0x69, 0x61, 0x62, - 0x6c, 0x65, 0x73, 0x52, 0x09, 0x76, 0x61, 0x72, 0x69, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x12, 0x4d, - 0x0a, 0x17, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, - 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, - 0x14, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x4e, 0x61, 0x6d, - 0x65, 0x64, 0x41, 0x6e, 0x79, 0x52, 0x16, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x63, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0xaf, 0x01, - 0x0a, 0x0e, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x56, 0x61, 0x72, 0x69, 0x61, 0x62, 0x6c, 0x65, - 0x12, 0x12, 0x0a, 0x04, 0x65, 0x6e, 0x75, 0x6d, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x04, - 0x65, 0x6e, 0x75, 0x6d, 0x12, 0x18, 0x0a, 0x07, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x12, 0x20, - 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, - 0x12, 0x4d, 0x0a, 0x17, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x03, 0x28, - 0x0b, 0x32, 0x14, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x4e, - 0x61, 0x6d, 0x65, 0x64, 0x41, 0x6e, 0x79, 0x52, 0x16, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, - 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x22, - 0x67, 0x0a, 0x0f, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x56, 0x61, 0x72, 0x69, 0x61, 0x62, 0x6c, - 0x65, 0x73, 0x12, 0x54, 0x0a, 0x15, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, - 0x5f, 0x70, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, - 0x0b, 0x32, 0x1f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x4e, - 0x61, 0x6d, 0x65, 0x64, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x56, 0x61, 0x72, 0x69, 0x61, 0x62, - 0x6c, 0x65, 0x52, 0x14, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x50, 0x72, - 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x22, 0x71, 0x0a, 0x16, 0x53, 0x70, 0x65, 0x63, - 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, - 0x6f, 0x6e, 0x12, 0x18, 0x0a, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x01, 0x48, 0x00, 0x52, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x1a, 0x0a, 0x07, - 0x62, 0x6f, 0x6f, 0x6c, 0x65, 0x61, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x48, 0x00, 0x52, - 0x07, 0x62, 0x6f, 0x6f, 0x6c, 0x65, 0x61, 0x6e, 0x12, 0x18, 0x0a, 0x06, 0x73, 0x74, 0x72, 0x69, - 0x6e, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x06, 0x73, 0x74, 0x72, 0x69, - 0x6e, 0x67, 0x42, 0x07, 0x0a, 0x05, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x22, 0x23, 0x0a, 0x0b, 0x53, - 0x74, 0x72, 0x69, 0x6e, 0x67, 0x41, 0x72, 0x72, 0x61, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, - 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, - 0x22, 0x57, 0x0a, 0x07, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x4c, 0x0a, 0x15, 0x61, - 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x5f, 0x70, 0x72, 0x6f, 0x70, 0x65, 0x72, - 0x74, 0x69, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x6f, 0x70, 0x65, - 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x53, 0x74, 0x72, - 0x69, 0x6e, 0x67, 0x52, 0x14, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x50, - 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x22, 0xc9, 0x01, 0x0a, 0x03, 0x54, 0x61, - 0x67, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, - 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, - 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x3d, 0x0a, 0x0d, 0x65, 0x78, 0x74, 0x65, 0x72, - 0x6e, 0x61, 0x6c, 0x5f, 0x64, 0x6f, 0x63, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, - 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x45, 0x78, 0x74, 0x65, - 0x72, 0x6e, 0x61, 0x6c, 0x44, 0x6f, 0x63, 0x73, 0x52, 0x0c, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, - 0x61, 0x6c, 0x44, 0x6f, 0x63, 0x73, 0x12, 0x4d, 0x0a, 0x17, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, - 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, - 0x6e, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, - 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x41, 0x6e, 0x79, 0x52, 0x16, 0x73, - 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x78, 0x74, 0x65, - 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0xd6, 0x01, 0x0a, 0x03, 0x58, 0x6d, 0x6c, 0x12, 0x12, 0x0a, - 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, - 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, - 0x16, 0x0a, 0x06, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x06, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x1c, 0x0a, 0x09, 0x61, 0x74, 0x74, 0x72, 0x69, - 0x62, 0x75, 0x74, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, 0x61, 0x74, 0x74, 0x72, - 0x69, 0x62, 0x75, 0x74, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, 0x64, - 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, 0x64, 0x12, - 0x4d, 0x0a, 0x17, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, - 0x32, 0x14, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x4e, 0x61, - 0x6d, 0x65, 0x64, 0x41, 0x6e, 0x79, 0x52, 0x16, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x63, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x42, 0x3e, - 0x0a, 0x0e, 0x6f, 0x72, 0x67, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x5f, 0x76, 0x33, - 0x42, 0x0c, 0x4f, 0x70, 0x65, 0x6e, 0x41, 0x50, 0x49, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, - 0x5a, 0x16, 0x2e, 0x2f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x33, 0x3b, 0x6f, 0x70, - 0x65, 0x6e, 0x61, 0x70, 0x69, 0x5f, 0x76, 0x33, 0xa2, 0x02, 0x03, 0x4f, 0x41, 0x53, 0x62, 0x06, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -} - -var ( - file_openapiv3_OpenAPIv3_proto_rawDescOnce sync.Once - file_openapiv3_OpenAPIv3_proto_rawDescData = file_openapiv3_OpenAPIv3_proto_rawDesc -) - -func file_openapiv3_OpenAPIv3_proto_rawDescGZIP() []byte { - file_openapiv3_OpenAPIv3_proto_rawDescOnce.Do(func() { - file_openapiv3_OpenAPIv3_proto_rawDescData = protoimpl.X.CompressGZIP(file_openapiv3_OpenAPIv3_proto_rawDescData) - }) - return file_openapiv3_OpenAPIv3_proto_rawDescData -} - -var file_openapiv3_OpenAPIv3_proto_msgTypes = make([]protoimpl.MessageInfo, 78) -var file_openapiv3_OpenAPIv3_proto_goTypes = []interface{}{ - (*AdditionalPropertiesItem)(nil), // 0: openapi.v3.AdditionalPropertiesItem - (*Any)(nil), // 1: openapi.v3.Any - (*AnyOrExpression)(nil), // 2: openapi.v3.AnyOrExpression - (*Callback)(nil), // 3: openapi.v3.Callback - (*CallbackOrReference)(nil), // 4: openapi.v3.CallbackOrReference - (*CallbacksOrReferences)(nil), // 5: openapi.v3.CallbacksOrReferences - (*Components)(nil), // 6: openapi.v3.Components - (*Contact)(nil), // 7: openapi.v3.Contact - (*DefaultType)(nil), // 8: openapi.v3.DefaultType - (*Discriminator)(nil), // 9: openapi.v3.Discriminator - (*Document)(nil), // 10: openapi.v3.Document - (*Encoding)(nil), // 11: openapi.v3.Encoding - (*Encodings)(nil), // 12: openapi.v3.Encodings - (*Example)(nil), // 13: openapi.v3.Example - (*ExampleOrReference)(nil), // 14: openapi.v3.ExampleOrReference - (*ExamplesOrReferences)(nil), // 15: openapi.v3.ExamplesOrReferences - (*Expression)(nil), // 16: openapi.v3.Expression - (*ExternalDocs)(nil), // 17: openapi.v3.ExternalDocs - (*Header)(nil), // 18: openapi.v3.Header - (*HeaderOrReference)(nil), // 19: openapi.v3.HeaderOrReference - (*HeadersOrReferences)(nil), // 20: openapi.v3.HeadersOrReferences - (*Info)(nil), // 21: openapi.v3.Info - (*ItemsItem)(nil), // 22: openapi.v3.ItemsItem - (*License)(nil), // 23: openapi.v3.License - (*Link)(nil), // 24: openapi.v3.Link - (*LinkOrReference)(nil), // 25: openapi.v3.LinkOrReference - (*LinksOrReferences)(nil), // 26: openapi.v3.LinksOrReferences - (*MediaType)(nil), // 27: openapi.v3.MediaType - (*MediaTypes)(nil), // 28: openapi.v3.MediaTypes - (*NamedAny)(nil), // 29: openapi.v3.NamedAny - (*NamedCallbackOrReference)(nil), // 30: openapi.v3.NamedCallbackOrReference - (*NamedEncoding)(nil), // 31: openapi.v3.NamedEncoding - (*NamedExampleOrReference)(nil), // 32: openapi.v3.NamedExampleOrReference - (*NamedHeaderOrReference)(nil), // 33: openapi.v3.NamedHeaderOrReference - (*NamedLinkOrReference)(nil), // 34: openapi.v3.NamedLinkOrReference - (*NamedMediaType)(nil), // 35: openapi.v3.NamedMediaType - (*NamedParameterOrReference)(nil), // 36: openapi.v3.NamedParameterOrReference - (*NamedPathItem)(nil), // 37: openapi.v3.NamedPathItem - (*NamedRequestBodyOrReference)(nil), // 38: openapi.v3.NamedRequestBodyOrReference - (*NamedResponseOrReference)(nil), // 39: openapi.v3.NamedResponseOrReference - (*NamedSchemaOrReference)(nil), // 40: openapi.v3.NamedSchemaOrReference - (*NamedSecuritySchemeOrReference)(nil), // 41: openapi.v3.NamedSecuritySchemeOrReference - (*NamedServerVariable)(nil), // 42: openapi.v3.NamedServerVariable - (*NamedString)(nil), // 43: openapi.v3.NamedString - (*NamedStringArray)(nil), // 44: openapi.v3.NamedStringArray - (*OauthFlow)(nil), // 45: openapi.v3.OauthFlow - (*OauthFlows)(nil), // 46: openapi.v3.OauthFlows - (*Object)(nil), // 47: openapi.v3.Object - (*Operation)(nil), // 48: openapi.v3.Operation - (*Parameter)(nil), // 49: openapi.v3.Parameter - (*ParameterOrReference)(nil), // 50: openapi.v3.ParameterOrReference - (*ParametersOrReferences)(nil), // 51: openapi.v3.ParametersOrReferences - (*PathItem)(nil), // 52: openapi.v3.PathItem - (*Paths)(nil), // 53: openapi.v3.Paths - (*Properties)(nil), // 54: openapi.v3.Properties - (*Reference)(nil), // 55: openapi.v3.Reference - (*RequestBodiesOrReferences)(nil), // 56: openapi.v3.RequestBodiesOrReferences - (*RequestBody)(nil), // 57: openapi.v3.RequestBody - (*RequestBodyOrReference)(nil), // 58: openapi.v3.RequestBodyOrReference - (*Response)(nil), // 59: openapi.v3.Response - (*ResponseOrReference)(nil), // 60: openapi.v3.ResponseOrReference - (*Responses)(nil), // 61: openapi.v3.Responses - (*ResponsesOrReferences)(nil), // 62: openapi.v3.ResponsesOrReferences - (*Schema)(nil), // 63: openapi.v3.Schema - (*SchemaOrReference)(nil), // 64: openapi.v3.SchemaOrReference - (*SchemasOrReferences)(nil), // 65: openapi.v3.SchemasOrReferences - (*SecurityRequirement)(nil), // 66: openapi.v3.SecurityRequirement - (*SecurityScheme)(nil), // 67: openapi.v3.SecurityScheme - (*SecuritySchemeOrReference)(nil), // 68: openapi.v3.SecuritySchemeOrReference - (*SecuritySchemesOrReferences)(nil), // 69: openapi.v3.SecuritySchemesOrReferences - (*Server)(nil), // 70: openapi.v3.Server - (*ServerVariable)(nil), // 71: openapi.v3.ServerVariable - (*ServerVariables)(nil), // 72: openapi.v3.ServerVariables - (*SpecificationExtension)(nil), // 73: openapi.v3.SpecificationExtension - (*StringArray)(nil), // 74: openapi.v3.StringArray - (*Strings)(nil), // 75: openapi.v3.Strings - (*Tag)(nil), // 76: openapi.v3.Tag - (*Xml)(nil), // 77: openapi.v3.Xml - (*anypb.Any)(nil), // 78: google.protobuf.Any -} -var file_openapiv3_OpenAPIv3_proto_depIdxs = []int32{ - 64, // 0: openapi.v3.AdditionalPropertiesItem.schema_or_reference:type_name -> openapi.v3.SchemaOrReference - 78, // 1: openapi.v3.Any.value:type_name -> google.protobuf.Any - 1, // 2: openapi.v3.AnyOrExpression.any:type_name -> openapi.v3.Any - 16, // 3: openapi.v3.AnyOrExpression.expression:type_name -> openapi.v3.Expression - 37, // 4: openapi.v3.Callback.path:type_name -> openapi.v3.NamedPathItem - 29, // 5: openapi.v3.Callback.specification_extension:type_name -> openapi.v3.NamedAny - 3, // 6: openapi.v3.CallbackOrReference.callback:type_name -> openapi.v3.Callback - 55, // 7: openapi.v3.CallbackOrReference.reference:type_name -> openapi.v3.Reference - 30, // 8: openapi.v3.CallbacksOrReferences.additional_properties:type_name -> openapi.v3.NamedCallbackOrReference - 65, // 9: openapi.v3.Components.schemas:type_name -> openapi.v3.SchemasOrReferences - 62, // 10: openapi.v3.Components.responses:type_name -> openapi.v3.ResponsesOrReferences - 51, // 11: openapi.v3.Components.parameters:type_name -> openapi.v3.ParametersOrReferences - 15, // 12: openapi.v3.Components.examples:type_name -> openapi.v3.ExamplesOrReferences - 56, // 13: openapi.v3.Components.request_bodies:type_name -> openapi.v3.RequestBodiesOrReferences - 20, // 14: openapi.v3.Components.headers:type_name -> openapi.v3.HeadersOrReferences - 69, // 15: openapi.v3.Components.security_schemes:type_name -> openapi.v3.SecuritySchemesOrReferences - 26, // 16: openapi.v3.Components.links:type_name -> openapi.v3.LinksOrReferences - 5, // 17: openapi.v3.Components.callbacks:type_name -> openapi.v3.CallbacksOrReferences - 29, // 18: openapi.v3.Components.specification_extension:type_name -> openapi.v3.NamedAny - 29, // 19: openapi.v3.Contact.specification_extension:type_name -> openapi.v3.NamedAny - 75, // 20: openapi.v3.Discriminator.mapping:type_name -> openapi.v3.Strings - 29, // 21: openapi.v3.Discriminator.specification_extension:type_name -> openapi.v3.NamedAny - 21, // 22: openapi.v3.Document.info:type_name -> openapi.v3.Info - 70, // 23: openapi.v3.Document.servers:type_name -> openapi.v3.Server - 53, // 24: openapi.v3.Document.paths:type_name -> openapi.v3.Paths - 6, // 25: openapi.v3.Document.components:type_name -> openapi.v3.Components - 66, // 26: openapi.v3.Document.security:type_name -> openapi.v3.SecurityRequirement - 76, // 27: openapi.v3.Document.tags:type_name -> openapi.v3.Tag - 17, // 28: openapi.v3.Document.external_docs:type_name -> openapi.v3.ExternalDocs - 29, // 29: openapi.v3.Document.specification_extension:type_name -> openapi.v3.NamedAny - 20, // 30: openapi.v3.Encoding.headers:type_name -> openapi.v3.HeadersOrReferences - 29, // 31: openapi.v3.Encoding.specification_extension:type_name -> openapi.v3.NamedAny - 31, // 32: openapi.v3.Encodings.additional_properties:type_name -> openapi.v3.NamedEncoding - 1, // 33: openapi.v3.Example.value:type_name -> openapi.v3.Any - 29, // 34: openapi.v3.Example.specification_extension:type_name -> openapi.v3.NamedAny - 13, // 35: openapi.v3.ExampleOrReference.example:type_name -> openapi.v3.Example - 55, // 36: openapi.v3.ExampleOrReference.reference:type_name -> openapi.v3.Reference - 32, // 37: openapi.v3.ExamplesOrReferences.additional_properties:type_name -> openapi.v3.NamedExampleOrReference - 29, // 38: openapi.v3.Expression.additional_properties:type_name -> openapi.v3.NamedAny - 29, // 39: openapi.v3.ExternalDocs.specification_extension:type_name -> openapi.v3.NamedAny - 64, // 40: openapi.v3.Header.schema:type_name -> openapi.v3.SchemaOrReference - 1, // 41: openapi.v3.Header.example:type_name -> openapi.v3.Any - 15, // 42: openapi.v3.Header.examples:type_name -> openapi.v3.ExamplesOrReferences - 28, // 43: openapi.v3.Header.content:type_name -> openapi.v3.MediaTypes - 29, // 44: openapi.v3.Header.specification_extension:type_name -> openapi.v3.NamedAny - 18, // 45: openapi.v3.HeaderOrReference.header:type_name -> openapi.v3.Header - 55, // 46: openapi.v3.HeaderOrReference.reference:type_name -> openapi.v3.Reference - 33, // 47: openapi.v3.HeadersOrReferences.additional_properties:type_name -> openapi.v3.NamedHeaderOrReference - 7, // 48: openapi.v3.Info.contact:type_name -> openapi.v3.Contact - 23, // 49: openapi.v3.Info.license:type_name -> openapi.v3.License - 29, // 50: openapi.v3.Info.specification_extension:type_name -> openapi.v3.NamedAny - 64, // 51: openapi.v3.ItemsItem.schema_or_reference:type_name -> openapi.v3.SchemaOrReference - 29, // 52: openapi.v3.License.specification_extension:type_name -> openapi.v3.NamedAny - 2, // 53: openapi.v3.Link.parameters:type_name -> openapi.v3.AnyOrExpression - 2, // 54: openapi.v3.Link.request_body:type_name -> openapi.v3.AnyOrExpression - 70, // 55: openapi.v3.Link.server:type_name -> openapi.v3.Server - 29, // 56: openapi.v3.Link.specification_extension:type_name -> openapi.v3.NamedAny - 24, // 57: openapi.v3.LinkOrReference.link:type_name -> openapi.v3.Link - 55, // 58: openapi.v3.LinkOrReference.reference:type_name -> openapi.v3.Reference - 34, // 59: openapi.v3.LinksOrReferences.additional_properties:type_name -> openapi.v3.NamedLinkOrReference - 64, // 60: openapi.v3.MediaType.schema:type_name -> openapi.v3.SchemaOrReference - 1, // 61: openapi.v3.MediaType.example:type_name -> openapi.v3.Any - 15, // 62: openapi.v3.MediaType.examples:type_name -> openapi.v3.ExamplesOrReferences - 12, // 63: openapi.v3.MediaType.encoding:type_name -> openapi.v3.Encodings - 29, // 64: openapi.v3.MediaType.specification_extension:type_name -> openapi.v3.NamedAny - 35, // 65: openapi.v3.MediaTypes.additional_properties:type_name -> openapi.v3.NamedMediaType - 1, // 66: openapi.v3.NamedAny.value:type_name -> openapi.v3.Any - 4, // 67: openapi.v3.NamedCallbackOrReference.value:type_name -> openapi.v3.CallbackOrReference - 11, // 68: openapi.v3.NamedEncoding.value:type_name -> openapi.v3.Encoding - 14, // 69: openapi.v3.NamedExampleOrReference.value:type_name -> openapi.v3.ExampleOrReference - 19, // 70: openapi.v3.NamedHeaderOrReference.value:type_name -> openapi.v3.HeaderOrReference - 25, // 71: openapi.v3.NamedLinkOrReference.value:type_name -> openapi.v3.LinkOrReference - 27, // 72: openapi.v3.NamedMediaType.value:type_name -> openapi.v3.MediaType - 50, // 73: openapi.v3.NamedParameterOrReference.value:type_name -> openapi.v3.ParameterOrReference - 52, // 74: openapi.v3.NamedPathItem.value:type_name -> openapi.v3.PathItem - 58, // 75: openapi.v3.NamedRequestBodyOrReference.value:type_name -> openapi.v3.RequestBodyOrReference - 60, // 76: openapi.v3.NamedResponseOrReference.value:type_name -> openapi.v3.ResponseOrReference - 64, // 77: openapi.v3.NamedSchemaOrReference.value:type_name -> openapi.v3.SchemaOrReference - 68, // 78: openapi.v3.NamedSecuritySchemeOrReference.value:type_name -> openapi.v3.SecuritySchemeOrReference - 71, // 79: openapi.v3.NamedServerVariable.value:type_name -> openapi.v3.ServerVariable - 74, // 80: openapi.v3.NamedStringArray.value:type_name -> openapi.v3.StringArray - 75, // 81: openapi.v3.OauthFlow.scopes:type_name -> openapi.v3.Strings - 29, // 82: openapi.v3.OauthFlow.specification_extension:type_name -> openapi.v3.NamedAny - 45, // 83: openapi.v3.OauthFlows.implicit:type_name -> openapi.v3.OauthFlow - 45, // 84: openapi.v3.OauthFlows.password:type_name -> openapi.v3.OauthFlow - 45, // 85: openapi.v3.OauthFlows.client_credentials:type_name -> openapi.v3.OauthFlow - 45, // 86: openapi.v3.OauthFlows.authorization_code:type_name -> openapi.v3.OauthFlow - 29, // 87: openapi.v3.OauthFlows.specification_extension:type_name -> openapi.v3.NamedAny - 29, // 88: openapi.v3.Object.additional_properties:type_name -> openapi.v3.NamedAny - 17, // 89: openapi.v3.Operation.external_docs:type_name -> openapi.v3.ExternalDocs - 50, // 90: openapi.v3.Operation.parameters:type_name -> openapi.v3.ParameterOrReference - 58, // 91: openapi.v3.Operation.request_body:type_name -> openapi.v3.RequestBodyOrReference - 61, // 92: openapi.v3.Operation.responses:type_name -> openapi.v3.Responses - 5, // 93: openapi.v3.Operation.callbacks:type_name -> openapi.v3.CallbacksOrReferences - 66, // 94: openapi.v3.Operation.security:type_name -> openapi.v3.SecurityRequirement - 70, // 95: openapi.v3.Operation.servers:type_name -> openapi.v3.Server - 29, // 96: openapi.v3.Operation.specification_extension:type_name -> openapi.v3.NamedAny - 64, // 97: openapi.v3.Parameter.schema:type_name -> openapi.v3.SchemaOrReference - 1, // 98: openapi.v3.Parameter.example:type_name -> openapi.v3.Any - 15, // 99: openapi.v3.Parameter.examples:type_name -> openapi.v3.ExamplesOrReferences - 28, // 100: openapi.v3.Parameter.content:type_name -> openapi.v3.MediaTypes - 29, // 101: openapi.v3.Parameter.specification_extension:type_name -> openapi.v3.NamedAny - 49, // 102: openapi.v3.ParameterOrReference.parameter:type_name -> openapi.v3.Parameter - 55, // 103: openapi.v3.ParameterOrReference.reference:type_name -> openapi.v3.Reference - 36, // 104: openapi.v3.ParametersOrReferences.additional_properties:type_name -> openapi.v3.NamedParameterOrReference - 48, // 105: openapi.v3.PathItem.get:type_name -> openapi.v3.Operation - 48, // 106: openapi.v3.PathItem.put:type_name -> openapi.v3.Operation - 48, // 107: openapi.v3.PathItem.post:type_name -> openapi.v3.Operation - 48, // 108: openapi.v3.PathItem.delete:type_name -> openapi.v3.Operation - 48, // 109: openapi.v3.PathItem.options:type_name -> openapi.v3.Operation - 48, // 110: openapi.v3.PathItem.head:type_name -> openapi.v3.Operation - 48, // 111: openapi.v3.PathItem.patch:type_name -> openapi.v3.Operation - 48, // 112: openapi.v3.PathItem.trace:type_name -> openapi.v3.Operation - 70, // 113: openapi.v3.PathItem.servers:type_name -> openapi.v3.Server - 50, // 114: openapi.v3.PathItem.parameters:type_name -> openapi.v3.ParameterOrReference - 29, // 115: openapi.v3.PathItem.specification_extension:type_name -> openapi.v3.NamedAny - 37, // 116: openapi.v3.Paths.path:type_name -> openapi.v3.NamedPathItem - 29, // 117: openapi.v3.Paths.specification_extension:type_name -> openapi.v3.NamedAny - 40, // 118: openapi.v3.Properties.additional_properties:type_name -> openapi.v3.NamedSchemaOrReference - 38, // 119: openapi.v3.RequestBodiesOrReferences.additional_properties:type_name -> openapi.v3.NamedRequestBodyOrReference - 28, // 120: openapi.v3.RequestBody.content:type_name -> openapi.v3.MediaTypes - 29, // 121: openapi.v3.RequestBody.specification_extension:type_name -> openapi.v3.NamedAny - 57, // 122: openapi.v3.RequestBodyOrReference.request_body:type_name -> openapi.v3.RequestBody - 55, // 123: openapi.v3.RequestBodyOrReference.reference:type_name -> openapi.v3.Reference - 20, // 124: openapi.v3.Response.headers:type_name -> openapi.v3.HeadersOrReferences - 28, // 125: openapi.v3.Response.content:type_name -> openapi.v3.MediaTypes - 26, // 126: openapi.v3.Response.links:type_name -> openapi.v3.LinksOrReferences - 29, // 127: openapi.v3.Response.specification_extension:type_name -> openapi.v3.NamedAny - 59, // 128: openapi.v3.ResponseOrReference.response:type_name -> openapi.v3.Response - 55, // 129: openapi.v3.ResponseOrReference.reference:type_name -> openapi.v3.Reference - 60, // 130: openapi.v3.Responses.default:type_name -> openapi.v3.ResponseOrReference - 39, // 131: openapi.v3.Responses.response_or_reference:type_name -> openapi.v3.NamedResponseOrReference - 29, // 132: openapi.v3.Responses.specification_extension:type_name -> openapi.v3.NamedAny - 39, // 133: openapi.v3.ResponsesOrReferences.additional_properties:type_name -> openapi.v3.NamedResponseOrReference - 9, // 134: openapi.v3.Schema.discriminator:type_name -> openapi.v3.Discriminator - 77, // 135: openapi.v3.Schema.xml:type_name -> openapi.v3.Xml - 17, // 136: openapi.v3.Schema.external_docs:type_name -> openapi.v3.ExternalDocs - 1, // 137: openapi.v3.Schema.example:type_name -> openapi.v3.Any - 1, // 138: openapi.v3.Schema.enum:type_name -> openapi.v3.Any - 64, // 139: openapi.v3.Schema.all_of:type_name -> openapi.v3.SchemaOrReference - 64, // 140: openapi.v3.Schema.one_of:type_name -> openapi.v3.SchemaOrReference - 64, // 141: openapi.v3.Schema.any_of:type_name -> openapi.v3.SchemaOrReference - 63, // 142: openapi.v3.Schema.not:type_name -> openapi.v3.Schema - 22, // 143: openapi.v3.Schema.items:type_name -> openapi.v3.ItemsItem - 54, // 144: openapi.v3.Schema.properties:type_name -> openapi.v3.Properties - 0, // 145: openapi.v3.Schema.additional_properties:type_name -> openapi.v3.AdditionalPropertiesItem - 8, // 146: openapi.v3.Schema.default:type_name -> openapi.v3.DefaultType - 29, // 147: openapi.v3.Schema.specification_extension:type_name -> openapi.v3.NamedAny - 63, // 148: openapi.v3.SchemaOrReference.schema:type_name -> openapi.v3.Schema - 55, // 149: openapi.v3.SchemaOrReference.reference:type_name -> openapi.v3.Reference - 40, // 150: openapi.v3.SchemasOrReferences.additional_properties:type_name -> openapi.v3.NamedSchemaOrReference - 44, // 151: openapi.v3.SecurityRequirement.additional_properties:type_name -> openapi.v3.NamedStringArray - 46, // 152: openapi.v3.SecurityScheme.flows:type_name -> openapi.v3.OauthFlows - 29, // 153: openapi.v3.SecurityScheme.specification_extension:type_name -> openapi.v3.NamedAny - 67, // 154: openapi.v3.SecuritySchemeOrReference.security_scheme:type_name -> openapi.v3.SecurityScheme - 55, // 155: openapi.v3.SecuritySchemeOrReference.reference:type_name -> openapi.v3.Reference - 41, // 156: openapi.v3.SecuritySchemesOrReferences.additional_properties:type_name -> openapi.v3.NamedSecuritySchemeOrReference - 72, // 157: openapi.v3.Server.variables:type_name -> openapi.v3.ServerVariables - 29, // 158: openapi.v3.Server.specification_extension:type_name -> openapi.v3.NamedAny - 29, // 159: openapi.v3.ServerVariable.specification_extension:type_name -> openapi.v3.NamedAny - 42, // 160: openapi.v3.ServerVariables.additional_properties:type_name -> openapi.v3.NamedServerVariable - 43, // 161: openapi.v3.Strings.additional_properties:type_name -> openapi.v3.NamedString - 17, // 162: openapi.v3.Tag.external_docs:type_name -> openapi.v3.ExternalDocs - 29, // 163: openapi.v3.Tag.specification_extension:type_name -> openapi.v3.NamedAny - 29, // 164: openapi.v3.Xml.specification_extension:type_name -> openapi.v3.NamedAny - 165, // [165:165] is the sub-list for method output_type - 165, // [165:165] is the sub-list for method input_type - 165, // [165:165] is the sub-list for extension type_name - 165, // [165:165] is the sub-list for extension extendee - 0, // [0:165] is the sub-list for field type_name -} - -func init() { file_openapiv3_OpenAPIv3_proto_init() } -func file_openapiv3_OpenAPIv3_proto_init() { - if File_openapiv3_OpenAPIv3_proto != nil { - return - } - if !protoimpl.UnsafeEnabled { - file_openapiv3_OpenAPIv3_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*AdditionalPropertiesItem); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv3_OpenAPIv3_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Any); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv3_OpenAPIv3_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*AnyOrExpression); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv3_OpenAPIv3_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Callback); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv3_OpenAPIv3_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*CallbackOrReference); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv3_OpenAPIv3_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*CallbacksOrReferences); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv3_OpenAPIv3_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Components); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv3_OpenAPIv3_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Contact); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv3_OpenAPIv3_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*DefaultType); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv3_OpenAPIv3_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Discriminator); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv3_OpenAPIv3_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Document); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv3_OpenAPIv3_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Encoding); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv3_OpenAPIv3_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Encodings); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv3_OpenAPIv3_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Example); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv3_OpenAPIv3_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ExampleOrReference); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv3_OpenAPIv3_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ExamplesOrReferences); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv3_OpenAPIv3_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Expression); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv3_OpenAPIv3_proto_msgTypes[17].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ExternalDocs); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv3_OpenAPIv3_proto_msgTypes[18].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Header); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv3_OpenAPIv3_proto_msgTypes[19].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*HeaderOrReference); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv3_OpenAPIv3_proto_msgTypes[20].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*HeadersOrReferences); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv3_OpenAPIv3_proto_msgTypes[21].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Info); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv3_OpenAPIv3_proto_msgTypes[22].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ItemsItem); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv3_OpenAPIv3_proto_msgTypes[23].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*License); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv3_OpenAPIv3_proto_msgTypes[24].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Link); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv3_OpenAPIv3_proto_msgTypes[25].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*LinkOrReference); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv3_OpenAPIv3_proto_msgTypes[26].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*LinksOrReferences); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv3_OpenAPIv3_proto_msgTypes[27].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*MediaType); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv3_OpenAPIv3_proto_msgTypes[28].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*MediaTypes); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv3_OpenAPIv3_proto_msgTypes[29].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*NamedAny); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv3_OpenAPIv3_proto_msgTypes[30].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*NamedCallbackOrReference); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv3_OpenAPIv3_proto_msgTypes[31].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*NamedEncoding); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv3_OpenAPIv3_proto_msgTypes[32].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*NamedExampleOrReference); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv3_OpenAPIv3_proto_msgTypes[33].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*NamedHeaderOrReference); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv3_OpenAPIv3_proto_msgTypes[34].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*NamedLinkOrReference); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv3_OpenAPIv3_proto_msgTypes[35].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*NamedMediaType); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv3_OpenAPIv3_proto_msgTypes[36].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*NamedParameterOrReference); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv3_OpenAPIv3_proto_msgTypes[37].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*NamedPathItem); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv3_OpenAPIv3_proto_msgTypes[38].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*NamedRequestBodyOrReference); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv3_OpenAPIv3_proto_msgTypes[39].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*NamedResponseOrReference); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv3_OpenAPIv3_proto_msgTypes[40].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*NamedSchemaOrReference); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv3_OpenAPIv3_proto_msgTypes[41].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*NamedSecuritySchemeOrReference); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv3_OpenAPIv3_proto_msgTypes[42].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*NamedServerVariable); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv3_OpenAPIv3_proto_msgTypes[43].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*NamedString); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv3_OpenAPIv3_proto_msgTypes[44].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*NamedStringArray); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv3_OpenAPIv3_proto_msgTypes[45].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*OauthFlow); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv3_OpenAPIv3_proto_msgTypes[46].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*OauthFlows); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv3_OpenAPIv3_proto_msgTypes[47].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Object); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv3_OpenAPIv3_proto_msgTypes[48].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Operation); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv3_OpenAPIv3_proto_msgTypes[49].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Parameter); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv3_OpenAPIv3_proto_msgTypes[50].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ParameterOrReference); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv3_OpenAPIv3_proto_msgTypes[51].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ParametersOrReferences); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv3_OpenAPIv3_proto_msgTypes[52].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*PathItem); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv3_OpenAPIv3_proto_msgTypes[53].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Paths); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv3_OpenAPIv3_proto_msgTypes[54].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Properties); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv3_OpenAPIv3_proto_msgTypes[55].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Reference); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv3_OpenAPIv3_proto_msgTypes[56].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*RequestBodiesOrReferences); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv3_OpenAPIv3_proto_msgTypes[57].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*RequestBody); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv3_OpenAPIv3_proto_msgTypes[58].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*RequestBodyOrReference); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv3_OpenAPIv3_proto_msgTypes[59].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Response); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv3_OpenAPIv3_proto_msgTypes[60].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ResponseOrReference); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv3_OpenAPIv3_proto_msgTypes[61].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Responses); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv3_OpenAPIv3_proto_msgTypes[62].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ResponsesOrReferences); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv3_OpenAPIv3_proto_msgTypes[63].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Schema); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv3_OpenAPIv3_proto_msgTypes[64].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*SchemaOrReference); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv3_OpenAPIv3_proto_msgTypes[65].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*SchemasOrReferences); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv3_OpenAPIv3_proto_msgTypes[66].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*SecurityRequirement); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv3_OpenAPIv3_proto_msgTypes[67].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*SecurityScheme); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv3_OpenAPIv3_proto_msgTypes[68].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*SecuritySchemeOrReference); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv3_OpenAPIv3_proto_msgTypes[69].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*SecuritySchemesOrReferences); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv3_OpenAPIv3_proto_msgTypes[70].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Server); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv3_OpenAPIv3_proto_msgTypes[71].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ServerVariable); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv3_OpenAPIv3_proto_msgTypes[72].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ServerVariables); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv3_OpenAPIv3_proto_msgTypes[73].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*SpecificationExtension); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv3_OpenAPIv3_proto_msgTypes[74].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*StringArray); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv3_OpenAPIv3_proto_msgTypes[75].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Strings); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv3_OpenAPIv3_proto_msgTypes[76].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Tag); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv3_OpenAPIv3_proto_msgTypes[77].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Xml); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } - file_openapiv3_OpenAPIv3_proto_msgTypes[0].OneofWrappers = []interface{}{ - (*AdditionalPropertiesItem_SchemaOrReference)(nil), - (*AdditionalPropertiesItem_Boolean)(nil), - } - file_openapiv3_OpenAPIv3_proto_msgTypes[2].OneofWrappers = []interface{}{ - (*AnyOrExpression_Any)(nil), - (*AnyOrExpression_Expression)(nil), - } - file_openapiv3_OpenAPIv3_proto_msgTypes[4].OneofWrappers = []interface{}{ - (*CallbackOrReference_Callback)(nil), - (*CallbackOrReference_Reference)(nil), - } - file_openapiv3_OpenAPIv3_proto_msgTypes[8].OneofWrappers = []interface{}{ - (*DefaultType_Number)(nil), - (*DefaultType_Boolean)(nil), - (*DefaultType_String_)(nil), - } - file_openapiv3_OpenAPIv3_proto_msgTypes[14].OneofWrappers = []interface{}{ - (*ExampleOrReference_Example)(nil), - (*ExampleOrReference_Reference)(nil), - } - file_openapiv3_OpenAPIv3_proto_msgTypes[19].OneofWrappers = []interface{}{ - (*HeaderOrReference_Header)(nil), - (*HeaderOrReference_Reference)(nil), - } - file_openapiv3_OpenAPIv3_proto_msgTypes[25].OneofWrappers = []interface{}{ - (*LinkOrReference_Link)(nil), - (*LinkOrReference_Reference)(nil), - } - file_openapiv3_OpenAPIv3_proto_msgTypes[50].OneofWrappers = []interface{}{ - (*ParameterOrReference_Parameter)(nil), - (*ParameterOrReference_Reference)(nil), - } - file_openapiv3_OpenAPIv3_proto_msgTypes[58].OneofWrappers = []interface{}{ - (*RequestBodyOrReference_RequestBody)(nil), - (*RequestBodyOrReference_Reference)(nil), - } - file_openapiv3_OpenAPIv3_proto_msgTypes[60].OneofWrappers = []interface{}{ - (*ResponseOrReference_Response)(nil), - (*ResponseOrReference_Reference)(nil), - } - file_openapiv3_OpenAPIv3_proto_msgTypes[64].OneofWrappers = []interface{}{ - (*SchemaOrReference_Schema)(nil), - (*SchemaOrReference_Reference)(nil), - } - file_openapiv3_OpenAPIv3_proto_msgTypes[68].OneofWrappers = []interface{}{ - (*SecuritySchemeOrReference_SecurityScheme)(nil), - (*SecuritySchemeOrReference_Reference)(nil), - } - file_openapiv3_OpenAPIv3_proto_msgTypes[73].OneofWrappers = []interface{}{ - (*SpecificationExtension_Number)(nil), - (*SpecificationExtension_Boolean)(nil), - (*SpecificationExtension_String_)(nil), - } - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_openapiv3_OpenAPIv3_proto_rawDesc, - NumEnums: 0, - NumMessages: 78, - NumExtensions: 0, - NumServices: 0, - }, - GoTypes: file_openapiv3_OpenAPIv3_proto_goTypes, - DependencyIndexes: file_openapiv3_OpenAPIv3_proto_depIdxs, - MessageInfos: file_openapiv3_OpenAPIv3_proto_msgTypes, - }.Build() - File_openapiv3_OpenAPIv3_proto = out.File - file_openapiv3_OpenAPIv3_proto_rawDesc = nil - file_openapiv3_OpenAPIv3_proto_goTypes = nil - file_openapiv3_OpenAPIv3_proto_depIdxs = nil -} diff --git a/src/vendor/github.com/google/gnostic/openapiv3/OpenAPIv3.proto b/src/vendor/github.com/google/gnostic/openapiv3/OpenAPIv3.proto deleted file mode 100644 index 1be335b89..000000000 --- a/src/vendor/github.com/google/gnostic/openapiv3/OpenAPIv3.proto +++ /dev/null @@ -1,672 +0,0 @@ -// Copyright 2020 Google LLC. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// THIS FILE IS AUTOMATICALLY GENERATED. - -syntax = "proto3"; - -package openapi.v3; - -import "google/protobuf/any.proto"; - -// This option lets the proto compiler generate Java code inside the package -// name (see below) instead of inside an outer class. It creates a simpler -// developer experience by reducing one-level of name nesting and be -// consistent with most programming languages that don't support outer classes. -option java_multiple_files = true; - -// The Java outer classname should be the filename in UpperCamelCase. This -// class is only used to hold proto descriptor, so developers don't need to -// work with it directly. -option java_outer_classname = "OpenAPIProto"; - -// The Java package name must be proto package name with proper prefix. -option java_package = "org.openapi_v3"; - -// A reasonable prefix for the Objective-C symbols generated from the package. -// It should at a minimum be 3 characters long, all uppercase, and convention -// is to use an abbreviation of the package name. Something short, but -// hopefully unique enough to not conflict with things that may come along in -// the future. 'GPB' is reserved for the protocol buffer implementation itself. -option objc_class_prefix = "OAS"; - -// The Go package name. -option go_package = "./openapiv3;openapi_v3"; - -message AdditionalPropertiesItem { - oneof oneof { - SchemaOrReference schema_or_reference = 1; - bool boolean = 2; - } -} - -message Any { - google.protobuf.Any value = 1; - string yaml = 2; -} - -message AnyOrExpression { - oneof oneof { - Any any = 1; - Expression expression = 2; - } -} - -// A map of possible out-of band callbacks related to the parent operation. Each value in the map is a Path Item Object that describes a set of requests that may be initiated by the API provider and the expected responses. The key value used to identify the callback object is an expression, evaluated at runtime, that identifies a URL to use for the callback operation. -message Callback { - repeated NamedPathItem path = 1; - repeated NamedAny specification_extension = 2; -} - -message CallbackOrReference { - oneof oneof { - Callback callback = 1; - Reference reference = 2; - } -} - -message CallbacksOrReferences { - repeated NamedCallbackOrReference additional_properties = 1; -} - -// Holds a set of reusable objects for different aspects of the OAS. All objects defined within the components object will have no effect on the API unless they are explicitly referenced from properties outside the components object. -message Components { - SchemasOrReferences schemas = 1; - ResponsesOrReferences responses = 2; - ParametersOrReferences parameters = 3; - ExamplesOrReferences examples = 4; - RequestBodiesOrReferences request_bodies = 5; - HeadersOrReferences headers = 6; - SecuritySchemesOrReferences security_schemes = 7; - LinksOrReferences links = 8; - CallbacksOrReferences callbacks = 9; - repeated NamedAny specification_extension = 10; -} - -// Contact information for the exposed API. -message Contact { - string name = 1; - string url = 2; - string email = 3; - repeated NamedAny specification_extension = 4; -} - -message DefaultType { - oneof oneof { - double number = 1; - bool boolean = 2; - string string = 3; - } -} - -// When request bodies or response payloads may be one of a number of different schemas, a `discriminator` object can be used to aid in serialization, deserialization, and validation. The discriminator is a specific object in a schema which is used to inform the consumer of the specification of an alternative schema based on the value associated with it. When using the discriminator, _inline_ schemas will not be considered. -message Discriminator { - string property_name = 1; - Strings mapping = 2; - repeated NamedAny specification_extension = 3; -} - -message Document { - string openapi = 1; - Info info = 2; - repeated Server servers = 3; - Paths paths = 4; - Components components = 5; - repeated SecurityRequirement security = 6; - repeated Tag tags = 7; - ExternalDocs external_docs = 8; - repeated NamedAny specification_extension = 9; -} - -// A single encoding definition applied to a single schema property. -message Encoding { - string content_type = 1; - HeadersOrReferences headers = 2; - string style = 3; - bool explode = 4; - bool allow_reserved = 5; - repeated NamedAny specification_extension = 6; -} - -message Encodings { - repeated NamedEncoding additional_properties = 1; -} - -message Example { - string summary = 1; - string description = 2; - Any value = 3; - string external_value = 4; - repeated NamedAny specification_extension = 5; -} - -message ExampleOrReference { - oneof oneof { - Example example = 1; - Reference reference = 2; - } -} - -message ExamplesOrReferences { - repeated NamedExampleOrReference additional_properties = 1; -} - -message Expression { - repeated NamedAny additional_properties = 1; -} - -// Allows referencing an external resource for extended documentation. -message ExternalDocs { - string description = 1; - string url = 2; - repeated NamedAny specification_extension = 3; -} - -// The Header Object follows the structure of the Parameter Object with the following changes: 1. `name` MUST NOT be specified, it is given in the corresponding `headers` map. 1. `in` MUST NOT be specified, it is implicitly in `header`. 1. All traits that are affected by the location MUST be applicable to a location of `header` (for example, `style`). -message Header { - string description = 1; - bool required = 2; - bool deprecated = 3; - bool allow_empty_value = 4; - string style = 5; - bool explode = 6; - bool allow_reserved = 7; - SchemaOrReference schema = 8; - Any example = 9; - ExamplesOrReferences examples = 10; - MediaTypes content = 11; - repeated NamedAny specification_extension = 12; -} - -message HeaderOrReference { - oneof oneof { - Header header = 1; - Reference reference = 2; - } -} - -message HeadersOrReferences { - repeated NamedHeaderOrReference additional_properties = 1; -} - -// The object provides metadata about the API. The metadata MAY be used by the clients if needed, and MAY be presented in editing or documentation generation tools for convenience. -message Info { - string title = 1; - string description = 2; - string terms_of_service = 3; - Contact contact = 4; - License license = 5; - string version = 6; - repeated NamedAny specification_extension = 7; - string summary = 8; -} - -message ItemsItem { - repeated SchemaOrReference schema_or_reference = 1; -} - -// License information for the exposed API. -message License { - string name = 1; - string url = 2; - repeated NamedAny specification_extension = 3; -} - -// The `Link object` represents a possible design-time link for a response. The presence of a link does not guarantee the caller's ability to successfully invoke it, rather it provides a known relationship and traversal mechanism between responses and other operations. Unlike _dynamic_ links (i.e. links provided **in** the response payload), the OAS linking mechanism does not require link information in the runtime response. For computing links, and providing instructions to execute them, a runtime expression is used for accessing values in an operation and using them as parameters while invoking the linked operation. -message Link { - string operation_ref = 1; - string operation_id = 2; - AnyOrExpression parameters = 3; - AnyOrExpression request_body = 4; - string description = 5; - Server server = 6; - repeated NamedAny specification_extension = 7; -} - -message LinkOrReference { - oneof oneof { - Link link = 1; - Reference reference = 2; - } -} - -message LinksOrReferences { - repeated NamedLinkOrReference additional_properties = 1; -} - -// Each Media Type Object provides schema and examples for the media type identified by its key. -message MediaType { - SchemaOrReference schema = 1; - Any example = 2; - ExamplesOrReferences examples = 3; - Encodings encoding = 4; - repeated NamedAny specification_extension = 5; -} - -message MediaTypes { - repeated NamedMediaType additional_properties = 1; -} - -// Automatically-generated message used to represent maps of Any as ordered (name,value) pairs. -message NamedAny { - // Map key - string name = 1; - // Mapped value - Any value = 2; -} - -// Automatically-generated message used to represent maps of CallbackOrReference as ordered (name,value) pairs. -message NamedCallbackOrReference { - // Map key - string name = 1; - // Mapped value - CallbackOrReference value = 2; -} - -// Automatically-generated message used to represent maps of Encoding as ordered (name,value) pairs. -message NamedEncoding { - // Map key - string name = 1; - // Mapped value - Encoding value = 2; -} - -// Automatically-generated message used to represent maps of ExampleOrReference as ordered (name,value) pairs. -message NamedExampleOrReference { - // Map key - string name = 1; - // Mapped value - ExampleOrReference value = 2; -} - -// Automatically-generated message used to represent maps of HeaderOrReference as ordered (name,value) pairs. -message NamedHeaderOrReference { - // Map key - string name = 1; - // Mapped value - HeaderOrReference value = 2; -} - -// Automatically-generated message used to represent maps of LinkOrReference as ordered (name,value) pairs. -message NamedLinkOrReference { - // Map key - string name = 1; - // Mapped value - LinkOrReference value = 2; -} - -// Automatically-generated message used to represent maps of MediaType as ordered (name,value) pairs. -message NamedMediaType { - // Map key - string name = 1; - // Mapped value - MediaType value = 2; -} - -// Automatically-generated message used to represent maps of ParameterOrReference as ordered (name,value) pairs. -message NamedParameterOrReference { - // Map key - string name = 1; - // Mapped value - ParameterOrReference value = 2; -} - -// Automatically-generated message used to represent maps of PathItem as ordered (name,value) pairs. -message NamedPathItem { - // Map key - string name = 1; - // Mapped value - PathItem value = 2; -} - -// Automatically-generated message used to represent maps of RequestBodyOrReference as ordered (name,value) pairs. -message NamedRequestBodyOrReference { - // Map key - string name = 1; - // Mapped value - RequestBodyOrReference value = 2; -} - -// Automatically-generated message used to represent maps of ResponseOrReference as ordered (name,value) pairs. -message NamedResponseOrReference { - // Map key - string name = 1; - // Mapped value - ResponseOrReference value = 2; -} - -// Automatically-generated message used to represent maps of SchemaOrReference as ordered (name,value) pairs. -message NamedSchemaOrReference { - // Map key - string name = 1; - // Mapped value - SchemaOrReference value = 2; -} - -// Automatically-generated message used to represent maps of SecuritySchemeOrReference as ordered (name,value) pairs. -message NamedSecuritySchemeOrReference { - // Map key - string name = 1; - // Mapped value - SecuritySchemeOrReference value = 2; -} - -// Automatically-generated message used to represent maps of ServerVariable as ordered (name,value) pairs. -message NamedServerVariable { - // Map key - string name = 1; - // Mapped value - ServerVariable value = 2; -} - -// Automatically-generated message used to represent maps of string as ordered (name,value) pairs. -message NamedString { - // Map key - string name = 1; - // Mapped value - string value = 2; -} - -// Automatically-generated message used to represent maps of StringArray as ordered (name,value) pairs. -message NamedStringArray { - // Map key - string name = 1; - // Mapped value - StringArray value = 2; -} - -// Configuration details for a supported OAuth Flow -message OauthFlow { - string authorization_url = 1; - string token_url = 2; - string refresh_url = 3; - Strings scopes = 4; - repeated NamedAny specification_extension = 5; -} - -// Allows configuration of the supported OAuth Flows. -message OauthFlows { - OauthFlow implicit = 1; - OauthFlow password = 2; - OauthFlow client_credentials = 3; - OauthFlow authorization_code = 4; - repeated NamedAny specification_extension = 5; -} - -message Object { - repeated NamedAny additional_properties = 1; -} - -// Describes a single API operation on a path. -message Operation { - repeated string tags = 1; - string summary = 2; - string description = 3; - ExternalDocs external_docs = 4; - string operation_id = 5; - repeated ParameterOrReference parameters = 6; - RequestBodyOrReference request_body = 7; - Responses responses = 8; - CallbacksOrReferences callbacks = 9; - bool deprecated = 10; - repeated SecurityRequirement security = 11; - repeated Server servers = 12; - repeated NamedAny specification_extension = 13; -} - -// Describes a single operation parameter. A unique parameter is defined by a combination of a name and location. -message Parameter { - string name = 1; - string in = 2; - string description = 3; - bool required = 4; - bool deprecated = 5; - bool allow_empty_value = 6; - string style = 7; - bool explode = 8; - bool allow_reserved = 9; - SchemaOrReference schema = 10; - Any example = 11; - ExamplesOrReferences examples = 12; - MediaTypes content = 13; - repeated NamedAny specification_extension = 14; -} - -message ParameterOrReference { - oneof oneof { - Parameter parameter = 1; - Reference reference = 2; - } -} - -message ParametersOrReferences { - repeated NamedParameterOrReference additional_properties = 1; -} - -// Describes the operations available on a single path. A Path Item MAY be empty, due to ACL constraints. The path itself is still exposed to the documentation viewer but they will not know which operations and parameters are available. -message PathItem { - string _ref = 1; - string summary = 2; - string description = 3; - Operation get = 4; - Operation put = 5; - Operation post = 6; - Operation delete = 7; - Operation options = 8; - Operation head = 9; - Operation patch = 10; - Operation trace = 11; - repeated Server servers = 12; - repeated ParameterOrReference parameters = 13; - repeated NamedAny specification_extension = 14; -} - -// Holds the relative paths to the individual endpoints and their operations. The path is appended to the URL from the `Server Object` in order to construct the full URL. The Paths MAY be empty, due to ACL constraints. -message Paths { - repeated NamedPathItem path = 1; - repeated NamedAny specification_extension = 2; -} - -message Properties { - repeated NamedSchemaOrReference additional_properties = 1; -} - -// A simple object to allow referencing other components in the specification, internally and externally. The Reference Object is defined by JSON Reference and follows the same structure, behavior and rules. For this specification, reference resolution is accomplished as defined by the JSON Reference specification and not by the JSON Schema specification. -message Reference { - string _ref = 1; - string summary = 2; - string description = 3; -} - -message RequestBodiesOrReferences { - repeated NamedRequestBodyOrReference additional_properties = 1; -} - -// Describes a single request body. -message RequestBody { - string description = 1; - MediaTypes content = 2; - bool required = 3; - repeated NamedAny specification_extension = 4; -} - -message RequestBodyOrReference { - oneof oneof { - RequestBody request_body = 1; - Reference reference = 2; - } -} - -// Describes a single response from an API Operation, including design-time, static `links` to operations based on the response. -message Response { - string description = 1; - HeadersOrReferences headers = 2; - MediaTypes content = 3; - LinksOrReferences links = 4; - repeated NamedAny specification_extension = 5; -} - -message ResponseOrReference { - oneof oneof { - Response response = 1; - Reference reference = 2; - } -} - -// A container for the expected responses of an operation. The container maps a HTTP response code to the expected response. The documentation is not necessarily expected to cover all possible HTTP response codes because they may not be known in advance. However, documentation is expected to cover a successful operation response and any known errors. The `default` MAY be used as a default response object for all HTTP codes that are not covered individually by the specification. The `Responses Object` MUST contain at least one response code, and it SHOULD be the response for a successful operation call. -message Responses { - ResponseOrReference default = 1; - repeated NamedResponseOrReference response_or_reference = 2; - repeated NamedAny specification_extension = 3; -} - -message ResponsesOrReferences { - repeated NamedResponseOrReference additional_properties = 1; -} - -// The Schema Object allows the definition of input and output data types. These types can be objects, but also primitives and arrays. This object is an extended subset of the JSON Schema Specification Wright Draft 00. For more information about the properties, see JSON Schema Core and JSON Schema Validation. Unless stated otherwise, the property definitions follow the JSON Schema. -message Schema { - bool nullable = 1; - Discriminator discriminator = 2; - bool read_only = 3; - bool write_only = 4; - Xml xml = 5; - ExternalDocs external_docs = 6; - Any example = 7; - bool deprecated = 8; - string title = 9; - double multiple_of = 10; - double maximum = 11; - bool exclusive_maximum = 12; - double minimum = 13; - bool exclusive_minimum = 14; - int64 max_length = 15; - int64 min_length = 16; - string pattern = 17; - int64 max_items = 18; - int64 min_items = 19; - bool unique_items = 20; - int64 max_properties = 21; - int64 min_properties = 22; - repeated string required = 23; - repeated Any enum = 24; - string type = 25; - repeated SchemaOrReference all_of = 26; - repeated SchemaOrReference one_of = 27; - repeated SchemaOrReference any_of = 28; - Schema not = 29; - ItemsItem items = 30; - Properties properties = 31; - AdditionalPropertiesItem additional_properties = 32; - DefaultType default = 33; - string description = 34; - string format = 35; - repeated NamedAny specification_extension = 36; -} - -message SchemaOrReference { - oneof oneof { - Schema schema = 1; - Reference reference = 2; - } -} - -message SchemasOrReferences { - repeated NamedSchemaOrReference additional_properties = 1; -} - -// Lists the required security schemes to execute this operation. The name used for each property MUST correspond to a security scheme declared in the Security Schemes under the Components Object. Security Requirement Objects that contain multiple schemes require that all schemes MUST be satisfied for a request to be authorized. This enables support for scenarios where multiple query parameters or HTTP headers are required to convey security information. When a list of Security Requirement Objects is defined on the OpenAPI Object or Operation Object, only one of the Security Requirement Objects in the list needs to be satisfied to authorize the request. -message SecurityRequirement { - repeated NamedStringArray additional_properties = 1; -} - -// Defines a security scheme that can be used by the operations. Supported schemes are HTTP authentication, an API key (either as a header, a cookie parameter or as a query parameter), mutual TLS (use of a client certificate), OAuth2's common flows (implicit, password, application and access code) as defined in RFC6749, and OpenID Connect. Please note that currently (2019) the implicit flow is about to be deprecated OAuth 2.0 Security Best Current Practice. Recommended for most use case is Authorization Code Grant flow with PKCE. -message SecurityScheme { - string type = 1; - string description = 2; - string name = 3; - string in = 4; - string scheme = 5; - string bearer_format = 6; - OauthFlows flows = 7; - string open_id_connect_url = 8; - repeated NamedAny specification_extension = 9; -} - -message SecuritySchemeOrReference { - oneof oneof { - SecurityScheme security_scheme = 1; - Reference reference = 2; - } -} - -message SecuritySchemesOrReferences { - repeated NamedSecuritySchemeOrReference additional_properties = 1; -} - -// An object representing a Server. -message Server { - string url = 1; - string description = 2; - ServerVariables variables = 3; - repeated NamedAny specification_extension = 4; -} - -// An object representing a Server Variable for server URL template substitution. -message ServerVariable { - repeated string enum = 1; - string default = 2; - string description = 3; - repeated NamedAny specification_extension = 4; -} - -message ServerVariables { - repeated NamedServerVariable additional_properties = 1; -} - -// Any property starting with x- is valid. -message SpecificationExtension { - oneof oneof { - double number = 1; - bool boolean = 2; - string string = 3; - } -} - -message StringArray { - repeated string value = 1; -} - -message Strings { - repeated NamedString additional_properties = 1; -} - -// Adds metadata to a single tag that is used by the Operation Object. It is not mandatory to have a Tag Object per tag defined in the Operation Object instances. -message Tag { - string name = 1; - string description = 2; - ExternalDocs external_docs = 3; - repeated NamedAny specification_extension = 4; -} - -// A metadata object that allows for more fine-tuned XML model definitions. When using arrays, XML element names are *not* inferred (for singular/plural forms) and the `name` property SHOULD be used to add that information. See examples for expected behavior. -message Xml { - string name = 1; - string namespace = 2; - string prefix = 3; - bool attribute = 4; - bool wrapped = 5; - repeated NamedAny specification_extension = 6; -} - diff --git a/src/vendor/github.com/google/gnostic/openapiv3/README.md b/src/vendor/github.com/google/gnostic/openapiv3/README.md deleted file mode 100644 index 5ee12d92e..000000000 --- a/src/vendor/github.com/google/gnostic/openapiv3/README.md +++ /dev/null @@ -1,21 +0,0 @@ -# OpenAPI v3 Protocol Buffer Models - -This directory contains a Protocol Buffer-language model and related code for -supporting OpenAPI v3. - -Gnostic applications and plugins can use OpenAPIv3.proto to generate Protocol -Buffer support code for their preferred languages. - -OpenAPIv3.go is used by Gnostic to read JSON and YAML OpenAPI descriptions into -the Protocol Buffer-based datastructures generated from OpenAPIv3.proto. - -OpenAPIv3.proto and OpenAPIv3.go are generated by the Gnostic compiler -generator, and OpenAPIv3.pb.go is generated by protoc, the Protocol Buffer -compiler, and protoc-gen-go, the Protocol Buffer Go code generation plugin. - -openapi-3.1.json is a JSON schema for OpenAPI 3.1 that is automatically -generated from the OpenAPI 3.1 specification. It is not an official JSON Schema -for OpenAPI. - -The schema-generator directory contains support code which generates -openapi-3.1.json from the OpenAPI 3.1 specification document (Markdown). diff --git a/src/vendor/github.com/google/gnostic/openapiv3/document.go b/src/vendor/github.com/google/gnostic/openapiv3/document.go deleted file mode 100644 index ef10d1d90..000000000 --- a/src/vendor/github.com/google/gnostic/openapiv3/document.go +++ /dev/null @@ -1,42 +0,0 @@ -// Copyright 2020 Google LLC. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package openapi_v3 - -import ( - "gopkg.in/yaml.v3" - - "github.com/google/gnostic/compiler" -) - -// ParseDocument reads an OpenAPI v3 description from a YAML/JSON representation. -func ParseDocument(b []byte) (*Document, error) { - info, err := compiler.ReadInfoFromBytes("", b) - if err != nil { - return nil, err - } - root := info.Content[0] - return NewDocument(root, compiler.NewContextWithExtensions("$root", root, nil, nil)) -} - -// YAMLValue produces a serialized YAML representation of the document. -func (d *Document) YAMLValue(comment string) ([]byte, error) { - rawInfo := d.ToRawInfo() - rawInfo = &yaml.Node{ - Kind: yaml.DocumentNode, - Content: []*yaml.Node{rawInfo}, - HeadComment: comment, - } - return yaml.Marshal(rawInfo) -} diff --git a/src/vendor/github.com/google/gnostic/openapiv3/openapi-3.0.json b/src/vendor/github.com/google/gnostic/openapiv3/openapi-3.0.json deleted file mode 100644 index d5caed162..000000000 --- a/src/vendor/github.com/google/gnostic/openapiv3/openapi-3.0.json +++ /dev/null @@ -1,1251 +0,0 @@ -{ - "title": "A JSON Schema for OpenAPI 3.0.", - "id": "http://openapis.org/v3/schema.json#", - "$schema": "http://json-schema.org/draft-04/schema#", - "type": "object", - "description": "This is the root document object of the OpenAPI document.", - "required": [ - "openapi", - "info", - "paths" - ], - "additionalProperties": false, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/specificationExtension" - } - }, - "properties": { - "openapi": { - "type": "string" - }, - "info": { - "$ref": "#/definitions/info" - }, - "servers": { - "type": "array", - "items": { - "$ref": "#/definitions/server" - }, - "uniqueItems": true - }, - "paths": { - "$ref": "#/definitions/paths" - }, - "components": { - "$ref": "#/definitions/components" - }, - "security": { - "type": "array", - "items": { - "$ref": "#/definitions/securityRequirement" - }, - "uniqueItems": true - }, - "tags": { - "type": "array", - "items": { - "$ref": "#/definitions/tag" - }, - "uniqueItems": true - }, - "externalDocs": { - "$ref": "#/definitions/externalDocs" - } - }, - "definitions": { - "info": { - "type": "object", - "description": "The object provides metadata about the API. The metadata MAY be used by the clients if needed, and MAY be presented in editing or documentation generation tools for convenience.", - "required": [ - "title", - "version" - ], - "additionalProperties": false, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/specificationExtension" - } - }, - "properties": { - "title": { - "type": "string" - }, - "description": { - "type": "string" - }, - "termsOfService": { - "type": "string" - }, - "contact": { - "$ref": "#/definitions/contact" - }, - "license": { - "$ref": "#/definitions/license" - }, - "version": { - "type": "string" - } - } - }, - "contact": { - "type": "object", - "description": "Contact information for the exposed API.", - "additionalProperties": false, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/specificationExtension" - } - }, - "properties": { - "name": { - "type": "string" - }, - "url": { - "type": "string", - "format": "uri" - }, - "email": { - "type": "string", - "format": "email" - } - } - }, - "license": { - "type": "object", - "description": "License information for the exposed API.", - "required": [ - "name" - ], - "additionalProperties": false, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/specificationExtension" - } - }, - "properties": { - "name": { - "type": "string" - }, - "url": { - "type": "string" - } - } - }, - "server": { - "type": "object", - "description": "An object representing a Server.", - "required": [ - "url" - ], - "additionalProperties": false, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/specificationExtension" - } - }, - "properties": { - "url": { - "type": "string" - }, - "description": { - "type": "string" - }, - "variables": { - "$ref": "#/definitions/serverVariables" - } - } - }, - "serverVariable": { - "type": "object", - "description": "An object representing a Server Variable for server URL template substitution.", - "required": [ - "default" - ], - "additionalProperties": false, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/specificationExtension" - } - }, - "properties": { - "enum": { - "type": "array", - "items": { - "type": "string" - }, - "uniqueItems": true - }, - "default": { - "type": "string" - }, - "description": { - "type": "string" - } - } - }, - "components": { - "type": "object", - "description": "Holds a set of reusable objects for different aspects of the OAS. All objects defined within the components object will have no effect on the API unless they are explicitly referenced from properties outside the components object.", - "additionalProperties": false, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/specificationExtension" - } - }, - "properties": { - "schemas": { - "$ref": "#/definitions/schemasOrReferences" - }, - "responses": { - "$ref": "#/definitions/responsesOrReferences" - }, - "parameters": { - "$ref": "#/definitions/parametersOrReferences" - }, - "examples": { - "$ref": "#/definitions/examplesOrReferences" - }, - "requestBodies": { - "$ref": "#/definitions/requestBodiesOrReferences" - }, - "headers": { - "$ref": "#/definitions/headersOrReferences" - }, - "securitySchemes": { - "$ref": "#/definitions/securitySchemesOrReferences" - }, - "links": { - "$ref": "#/definitions/linksOrReferences" - }, - "callbacks": { - "$ref": "#/definitions/callbacksOrReferences" - } - } - }, - "paths": { - "type": "object", - "description": "Holds the relative paths to the individual endpoints and their operations. The path is appended to the URL from the `Server Object` in order to construct the full URL. The Paths MAY be empty, due to ACL constraints.", - "additionalProperties": false, - "patternProperties": { - "^/": { - "$ref": "#/definitions/pathItem" - }, - "^x-": { - "$ref": "#/definitions/specificationExtension" - } - } - }, - "pathItem": { - "type": "object", - "description": "Describes the operations available on a single path. A Path Item MAY be empty, due to ACL constraints. The path itself is still exposed to the documentation viewer but they will not know which operations and parameters are available.", - "additionalProperties": false, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/specificationExtension" - } - }, - "properties": { - "$ref": { - "type": "string" - }, - "summary": { - "type": "string" - }, - "description": { - "type": "string" - }, - "get": { - "$ref": "#/definitions/operation" - }, - "put": { - "$ref": "#/definitions/operation" - }, - "post": { - "$ref": "#/definitions/operation" - }, - "delete": { - "$ref": "#/definitions/operation" - }, - "options": { - "$ref": "#/definitions/operation" - }, - "head": { - "$ref": "#/definitions/operation" - }, - "patch": { - "$ref": "#/definitions/operation" - }, - "trace": { - "$ref": "#/definitions/operation" - }, - "servers": { - "type": "array", - "items": { - "$ref": "#/definitions/server" - }, - "uniqueItems": true - }, - "parameters": { - "type": "array", - "items": { - "$ref": "#/definitions/parameterOrReference" - }, - "uniqueItems": true - } - } - }, - "operation": { - "type": "object", - "description": "Describes a single API operation on a path.", - "required": [ - "responses" - ], - "additionalProperties": false, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/specificationExtension" - } - }, - "properties": { - "tags": { - "type": "array", - "items": { - "type": "string" - }, - "uniqueItems": true - }, - "summary": { - "type": "string" - }, - "description": { - "type": "string" - }, - "externalDocs": { - "$ref": "#/definitions/externalDocs" - }, - "operationId": { - "type": "string" - }, - "parameters": { - "type": "array", - "items": { - "$ref": "#/definitions/parameterOrReference" - }, - "uniqueItems": true - }, - "requestBody": { - "$ref": "#/definitions/requestBodyOrReference" - }, - "responses": { - "$ref": "#/definitions/responses" - }, - "callbacks": { - "$ref": "#/definitions/callbacksOrReferences" - }, - "deprecated": { - "type": "boolean" - }, - "security": { - "type": "array", - "items": { - "$ref": "#/definitions/securityRequirement" - }, - "uniqueItems": true - }, - "servers": { - "type": "array", - "items": { - "$ref": "#/definitions/server" - }, - "uniqueItems": true - } - } - }, - "externalDocs": { - "type": "object", - "description": "Allows referencing an external resource for extended documentation.", - "required": [ - "url" - ], - "additionalProperties": false, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/specificationExtension" - } - }, - "properties": { - "description": { - "type": "string" - }, - "url": { - "type": "string" - } - } - }, - "parameter": { - "type": "object", - "description": "Describes a single operation parameter. A unique parameter is defined by a combination of a name and location.", - "required": [ - "name", - "in" - ], - "additionalProperties": false, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/specificationExtension" - } - }, - "properties": { - "name": { - "type": "string" - }, - "in": { - "type": "string" - }, - "description": { - "type": "string" - }, - "required": { - "type": "boolean" - }, - "deprecated": { - "type": "boolean" - }, - "allowEmptyValue": { - "type": "boolean" - }, - "style": { - "type": "string" - }, - "explode": { - "type": "boolean" - }, - "allowReserved": { - "type": "boolean" - }, - "schema": { - "$ref": "#/definitions/schemaOrReference" - }, - "example": { - "$ref": "#/definitions/any" - }, - "examples": { - "$ref": "#/definitions/examplesOrReferences" - }, - "content": { - "$ref": "#/definitions/mediaTypes" - } - } - }, - "requestBody": { - "type": "object", - "description": "Describes a single request body.", - "required": [ - "content" - ], - "additionalProperties": false, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/specificationExtension" - } - }, - "properties": { - "description": { - "type": "string" - }, - "content": { - "$ref": "#/definitions/mediaTypes" - }, - "required": { - "type": "boolean" - } - } - }, - "mediaType": { - "type": "object", - "description": "Each Media Type Object provides schema and examples for the media type identified by its key.", - "additionalProperties": false, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/specificationExtension" - } - }, - "properties": { - "schema": { - "$ref": "#/definitions/schemaOrReference" - }, - "example": { - "$ref": "#/definitions/any" - }, - "examples": { - "$ref": "#/definitions/examplesOrReferences" - }, - "encoding": { - "$ref": "#/definitions/encodings" - } - } - }, - "encoding": { - "type": "object", - "description": "A single encoding definition applied to a single schema property.", - "additionalProperties": false, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/specificationExtension" - } - }, - "properties": { - "contentType": { - "type": "string" - }, - "headers": { - "$ref": "#/definitions/headersOrReferences" - }, - "style": { - "type": "string" - }, - "explode": { - "type": "boolean" - }, - "allowReserved": { - "type": "boolean" - } - } - }, - "responses": { - "type": "object", - "description": "A container for the expected responses of an operation. The container maps a HTTP response code to the expected response. The documentation is not necessarily expected to cover all possible HTTP response codes because they may not be known in advance. However, documentation is expected to cover a successful operation response and any known errors. The `default` MAY be used as a default response object for all HTTP codes that are not covered individually by the specification. The `Responses Object` MUST contain at least one response code, and it SHOULD be the response for a successful operation call.", - "additionalProperties": false, - "patternProperties": { - "^([0-9X]{3})$": { - "$ref": "#/definitions/responseOrReference" - }, - "^x-": { - "$ref": "#/definitions/specificationExtension" - } - }, - "properties": { - "default": { - "$ref": "#/definitions/responseOrReference" - } - } - }, - "response": { - "type": "object", - "description": "Describes a single response from an API Operation, including design-time, static `links` to operations based on the response.", - "required": [ - "description" - ], - "additionalProperties": false, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/specificationExtension" - } - }, - "properties": { - "description": { - "type": "string" - }, - "headers": { - "$ref": "#/definitions/headersOrReferences" - }, - "content": { - "$ref": "#/definitions/mediaTypes" - }, - "links": { - "$ref": "#/definitions/linksOrReferences" - } - } - }, - "callback": { - "type": "object", - "description": "A map of possible out-of band callbacks related to the parent operation. Each value in the map is a Path Item Object that describes a set of requests that may be initiated by the API provider and the expected responses. The key value used to identify the callback object is an expression, evaluated at runtime, that identifies a URL to use for the callback operation.", - "additionalProperties": false, - "patternProperties": { - "^": { - "$ref": "#/definitions/pathItem" - }, - "^x-": { - "$ref": "#/definitions/specificationExtension" - } - } - }, - "example": { - "type": "object", - "description": "", - "additionalProperties": false, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/specificationExtension" - } - }, - "properties": { - "summary": { - "type": "string" - }, - "description": { - "type": "string" - }, - "value": { - "$ref": "#/definitions/any" - }, - "externalValue": { - "type": "string" - } - } - }, - "link": { - "type": "object", - "description": "The `Link object` represents a possible design-time link for a response. The presence of a link does not guarantee the caller's ability to successfully invoke it, rather it provides a known relationship and traversal mechanism between responses and other operations. Unlike _dynamic_ links (i.e. links provided **in** the response payload), the OAS linking mechanism does not require link information in the runtime response. For computing links, and providing instructions to execute them, a runtime expression is used for accessing values in an operation and using them as parameters while invoking the linked operation.", - "additionalProperties": false, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/specificationExtension" - } - }, - "properties": { - "operationRef": { - "type": "string" - }, - "operationId": { - "type": "string" - }, - "parameters": { - "$ref": "#/definitions/anysOrExpressions" - }, - "requestBody": { - "$ref": "#/definitions/anyOrExpression" - }, - "description": { - "type": "string" - }, - "server": { - "$ref": "#/definitions/server" - } - } - }, - "header": { - "type": "object", - "description": "The Header Object follows the structure of the Parameter Object with the following changes: 1. `name` MUST NOT be specified, it is given in the corresponding `headers` map. 1. `in` MUST NOT be specified, it is implicitly in `header`. 1. All traits that are affected by the location MUST be applicable to a location of `header` (for example, `style`).", - "additionalProperties": false, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/specificationExtension" - } - }, - "properties": { - "description": { - "type": "string" - }, - "required": { - "type": "boolean" - }, - "deprecated": { - "type": "boolean" - }, - "allowEmptyValue": { - "type": "boolean" - }, - "style": { - "type": "string" - }, - "explode": { - "type": "boolean" - }, - "allowReserved": { - "type": "boolean" - }, - "schema": { - "$ref": "#/definitions/schemaOrReference" - }, - "example": { - "$ref": "#/definitions/any" - }, - "examples": { - "$ref": "#/definitions/examplesOrReferences" - }, - "content": { - "$ref": "#/definitions/mediaTypes" - } - } - }, - "tag": { - "type": "object", - "description": "Adds metadata to a single tag that is used by the Operation Object. It is not mandatory to have a Tag Object per tag defined in the Operation Object instances.", - "required": [ - "name" - ], - "additionalProperties": false, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/specificationExtension" - } - }, - "properties": { - "name": { - "type": "string" - }, - "description": { - "type": "string" - }, - "externalDocs": { - "$ref": "#/definitions/externalDocs" - } - } - }, - "reference": { - "type": "object", - "description": "A simple object to allow referencing other components in the specification, internally and externally. The Reference Object is defined by JSON Reference and follows the same structure, behavior and rules. For this specification, reference resolution is accomplished as defined by the JSON Reference specification and not by the JSON Schema specification.", - "required": [ - "$ref" - ], - "additionalProperties": false, - "properties": { - "$ref": { - "type": "string" - }, - "summary": { - "type": "string" - }, - "description": { - "type": "string" - } - } - }, - "schema": { - "type": "object", - "description": "The Schema Object allows the definition of input and output data types. These types can be objects, but also primitives and arrays. This object is an extended subset of the JSON Schema Specification Wright Draft 00. For more information about the properties, see JSON Schema Core and JSON Schema Validation. Unless stated otherwise, the property definitions follow the JSON Schema.", - "additionalProperties": false, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/specificationExtension" - } - }, - "properties": { - "nullable": { - "type": "boolean" - }, - "discriminator": { - "$ref": "#/definitions/discriminator" - }, - "readOnly": { - "type": "boolean" - }, - "writeOnly": { - "type": "boolean" - }, - "xml": { - "$ref": "#/definitions/xml" - }, - "externalDocs": { - "$ref": "#/definitions/externalDocs" - }, - "example": { - "$ref": "#/definitions/any" - }, - "deprecated": { - "type": "boolean" - }, - "title": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/title" - }, - "multipleOf": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/multipleOf" - }, - "maximum": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/maximum" - }, - "exclusiveMaximum": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/exclusiveMaximum" - }, - "minimum": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/minimum" - }, - "exclusiveMinimum": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/exclusiveMinimum" - }, - "maxLength": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/maxLength" - }, - "minLength": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/minLength" - }, - "pattern": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/pattern" - }, - "maxItems": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/maxItems" - }, - "minItems": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/minItems" - }, - "uniqueItems": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/uniqueItems" - }, - "maxProperties": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/maxProperties" - }, - "minProperties": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/minProperties" - }, - "required": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/required" - }, - "enum": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/enum" - }, - "type": { - "type": "string" - }, - "allOf": { - "type": "array", - "items": { - "$ref": "#/definitions/schemaOrReference" - }, - "minItems": 1 - }, - "oneOf": { - "type": "array", - "items": { - "$ref": "#/definitions/schemaOrReference" - }, - "minItems": 1 - }, - "anyOf": { - "type": "array", - "items": { - "$ref": "#/definitions/schemaOrReference" - }, - "minItems": 1 - }, - "not": { - "$ref": "#/definitions/schema" - }, - "items": { - "anyOf": [ - { - "$ref": "#/definitions/schemaOrReference" - }, - { - "type": "array", - "items": { - "$ref": "#/definitions/schemaOrReference" - }, - "minItems": 1 - } - ] - }, - "properties": { - "type": "object", - "additionalProperties": { - "$ref": "#/definitions/schemaOrReference" - } - }, - "additionalProperties": { - "oneOf": [ - { - "$ref": "#/definitions/schemaOrReference" - }, - { - "type": "boolean" - } - ] - }, - "default": { - "$ref": "#/definitions/defaultType" - }, - "description": { - "type": "string" - }, - "format": { - "type": "string" - } - } - }, - "discriminator": { - "type": "object", - "description": "When request bodies or response payloads may be one of a number of different schemas, a `discriminator` object can be used to aid in serialization, deserialization, and validation. The discriminator is a specific object in a schema which is used to inform the consumer of the specification of an alternative schema based on the value associated with it. When using the discriminator, _inline_ schemas will not be considered.", - "required": [ - "propertyName" - ], - "additionalProperties": false, - "properties": { - "propertyName": { - "type": "string" - }, - "mapping": { - "$ref": "#/definitions/strings" - } - } - }, - "xml": { - "type": "object", - "description": "A metadata object that allows for more fine-tuned XML model definitions. When using arrays, XML element names are *not* inferred (for singular/plural forms) and the `name` property SHOULD be used to add that information. See examples for expected behavior.", - "additionalProperties": false, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/specificationExtension" - } - }, - "properties": { - "name": { - "type": "string" - }, - "namespace": { - "type": "string" - }, - "prefix": { - "type": "string" - }, - "attribute": { - "type": "boolean" - }, - "wrapped": { - "type": "boolean" - } - } - }, - "securityScheme": { - "type": "object", - "description": "Defines a security scheme that can be used by the operations. Supported schemes are HTTP authentication, an API key (either as a header or as a query parameter), OAuth2's common flows (implicit, password, application and access code) as defined in RFC6749, and OpenID Connect Discovery.", - "required": [ - "type" - ], - "additionalProperties": false, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/specificationExtension" - } - }, - "properties": { - "type": { - "type": "string" - }, - "description": { - "type": "string" - }, - "name": { - "type": "string" - }, - "in": { - "type": "string" - }, - "scheme": { - "type": "string" - }, - "bearerFormat": { - "type": "string" - }, - "flows": { - "$ref": "#/definitions/oauthFlows" - }, - "openIdConnectUrl": { - "type": "string" - } - } - }, - "oauthFlows": { - "type": "object", - "description": "Allows configuration of the supported OAuth Flows.", - "additionalProperties": false, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/specificationExtension" - } - }, - "properties": { - "implicit": { - "$ref": "#/definitions/oauthFlow" - }, - "password": { - "$ref": "#/definitions/oauthFlow" - }, - "clientCredentials": { - "$ref": "#/definitions/oauthFlow" - }, - "authorizationCode": { - "$ref": "#/definitions/oauthFlow" - } - } - }, - "oauthFlow": { - "type": "object", - "description": "Configuration details for a supported OAuth Flow", - "additionalProperties": false, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/specificationExtension" - } - }, - "properties": { - "authorizationUrl": { - "type": "string" - }, - "tokenUrl": { - "type": "string" - }, - "refreshUrl": { - "type": "string" - }, - "scopes": { - "$ref": "#/definitions/strings" - } - } - }, - "securityRequirement": { - "type": "object", - "description": "Lists the required security schemes to execute this operation. The name used for each property MUST correspond to a security scheme declared in the Security Schemes under the Components Object. Security Requirement Objects that contain multiple schemes require that all schemes MUST be satisfied for a request to be authorized. This enables support for scenarios where multiple query parameters or HTTP headers are required to convey security information. When a list of Security Requirement Objects is defined on the Open API object or Operation Object, only one of Security Requirement Objects in the list needs to be satisfied to authorize the request.", - "additionalProperties": false, - "patternProperties": { - "^[a-zA-Z0-9\\.\\-_]+$": { - "type": "array", - "items": { - "type": "string" - }, - "uniqueItems": true - } - } - }, - "anyOrExpression": { - "oneOf": [ - { - "$ref": "#/definitions/any" - }, - { - "$ref": "#/definitions/expression" - } - ] - }, - "callbackOrReference": { - "oneOf": [ - { - "$ref": "#/definitions/callback" - }, - { - "$ref": "#/definitions/reference" - } - ] - }, - "exampleOrReference": { - "oneOf": [ - { - "$ref": "#/definitions/example" - }, - { - "$ref": "#/definitions/reference" - } - ] - }, - "headerOrReference": { - "oneOf": [ - { - "$ref": "#/definitions/header" - }, - { - "$ref": "#/definitions/reference" - } - ] - }, - "linkOrReference": { - "oneOf": [ - { - "$ref": "#/definitions/link" - }, - { - "$ref": "#/definitions/reference" - } - ] - }, - "parameterOrReference": { - "oneOf": [ - { - "$ref": "#/definitions/parameter" - }, - { - "$ref": "#/definitions/reference" - } - ] - }, - "requestBodyOrReference": { - "oneOf": [ - { - "$ref": "#/definitions/requestBody" - }, - { - "$ref": "#/definitions/reference" - } - ] - }, - "responseOrReference": { - "oneOf": [ - { - "$ref": "#/definitions/response" - }, - { - "$ref": "#/definitions/reference" - } - ] - }, - "schemaOrReference": { - "oneOf": [ - { - "$ref": "#/definitions/schema" - }, - { - "$ref": "#/definitions/reference" - } - ] - }, - "securitySchemeOrReference": { - "oneOf": [ - { - "$ref": "#/definitions/securityScheme" - }, - { - "$ref": "#/definitions/reference" - } - ] - }, - "anysOrExpressions": { - "type": "object", - "additionalProperties": { - "$ref": "#/definitions/anyOrExpression" - } - }, - "callbacksOrReferences": { - "type": "object", - "additionalProperties": { - "$ref": "#/definitions/callbackOrReference" - } - }, - "encodings": { - "type": "object", - "additionalProperties": { - "$ref": "#/definitions/encoding" - } - }, - "examplesOrReferences": { - "type": "object", - "additionalProperties": { - "$ref": "#/definitions/exampleOrReference" - } - }, - "headersOrReferences": { - "type": "object", - "additionalProperties": { - "$ref": "#/definitions/headerOrReference" - } - }, - "linksOrReferences": { - "type": "object", - "additionalProperties": { - "$ref": "#/definitions/linkOrReference" - } - }, - "mediaTypes": { - "type": "object", - "additionalProperties": { - "$ref": "#/definitions/mediaType" - } - }, - "parametersOrReferences": { - "type": "object", - "additionalProperties": { - "$ref": "#/definitions/parameterOrReference" - } - }, - "requestBodiesOrReferences": { - "type": "object", - "additionalProperties": { - "$ref": "#/definitions/requestBodyOrReference" - } - }, - "responsesOrReferences": { - "type": "object", - "additionalProperties": { - "$ref": "#/definitions/responseOrReference" - } - }, - "schemasOrReferences": { - "type": "object", - "additionalProperties": { - "$ref": "#/definitions/schemaOrReference" - } - }, - "securitySchemesOrReferences": { - "type": "object", - "additionalProperties": { - "$ref": "#/definitions/securitySchemeOrReference" - } - }, - "serverVariables": { - "type": "object", - "additionalProperties": { - "$ref": "#/definitions/serverVariable" - } - }, - "strings": { - "type": "object", - "additionalProperties": { - "type": "string" - } - }, - "object": { - "type": "object", - "additionalProperties": true - }, - "any": { - "additionalProperties": true - }, - "expression": { - "type": "object", - "additionalProperties": true - }, - "specificationExtension": { - "description": "Any property starting with x- is valid.", - "oneOf": [ - { - "type": "null" - }, - { - "type": "number" - }, - { - "type": "boolean" - }, - { - "type": "string" - }, - { - "type": "object" - }, - { - "type": "array" - } - ] - }, - "defaultType": { - "oneOf": [ - { - "type": "null" - }, - { - "type": "array" - }, - { - "type": "object" - }, - { - "type": "number" - }, - { - "type": "boolean" - }, - { - "type": "string" - } - ] - } - } -} diff --git a/src/vendor/github.com/google/gnostic/openapiv3/openapi-3.1.json b/src/vendor/github.com/google/gnostic/openapiv3/openapi-3.1.json deleted file mode 100644 index ed0b83adf..000000000 --- a/src/vendor/github.com/google/gnostic/openapiv3/openapi-3.1.json +++ /dev/null @@ -1,1250 +0,0 @@ -{ - "title": "A JSON Schema for OpenAPI 3.0.", - "id": "http://openapis.org/v3/schema.json#", - "$schema": "http://json-schema.org/draft-04/schema#", - "type": "object", - "description": "This is the root document object of the OpenAPI document.", - "required": [ - "openapi", - "info", - "paths" - ], - "additionalProperties": false, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/specificationExtension" - } - }, - "properties": { - "openapi": { - "type": "string" - }, - "info": { - "$ref": "#/definitions/info" - }, - "servers": { - "type": "array", - "items": { - "$ref": "#/definitions/server" - }, - "uniqueItems": true - }, - "paths": { - "$ref": "#/definitions/paths" - }, - "components": { - "$ref": "#/definitions/components" - }, - "security": { - "type": "array", - "items": { - "$ref": "#/definitions/securityRequirement" - }, - "uniqueItems": true - }, - "tags": { - "type": "array", - "items": { - "$ref": "#/definitions/tag" - }, - "uniqueItems": true - }, - "externalDocs": { - "$ref": "#/definitions/externalDocs" - } - }, - "definitions": { - "info": { - "type": "object", - "description": "The object provides metadata about the API. The metadata MAY be used by the clients if needed, and MAY be presented in editing or documentation generation tools for convenience.", - "required": [ - "title", - "version" - ], - "additionalProperties": false, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/specificationExtension" - } - }, - "properties": { - "title": { - "type": "string" - }, - "description": { - "type": "string" - }, - "termsOfService": { - "type": "string" - }, - "contact": { - "$ref": "#/definitions/contact" - }, - "license": { - "$ref": "#/definitions/license" - }, - "version": { - "type": "string" - }, - "summary": { - "type": "string" - } - } - }, - "contact": { - "type": "object", - "description": "Contact information for the exposed API.", - "additionalProperties": false, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/specificationExtension" - } - }, - "properties": { - "name": { - "type": "string" - }, - "url": { - "type": "string", - "format": "uri" - }, - "email": { - "type": "string", - "format": "email" - } - } - }, - "license": { - "type": "object", - "description": "License information for the exposed API.", - "required": [ - "name" - ], - "additionalProperties": false, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/specificationExtension" - } - }, - "properties": { - "name": { - "type": "string" - }, - "url": { - "type": "string" - } - } - }, - "server": { - "type": "object", - "description": "An object representing a Server.", - "required": [ - "url" - ], - "additionalProperties": false, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/specificationExtension" - } - }, - "properties": { - "url": { - "type": "string" - }, - "description": { - "type": "string" - }, - "variables": { - "$ref": "#/definitions/serverVariables" - } - } - }, - "serverVariable": { - "type": "object", - "description": "An object representing a Server Variable for server URL template substitution.", - "required": [ - "default" - ], - "additionalProperties": false, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/specificationExtension" - } - }, - "properties": { - "enum": { - "type": "array", - "items": { - "type": "string" - }, - "uniqueItems": true - }, - "default": { - "type": "string" - }, - "description": { - "type": "string" - } - } - }, - "components": { - "type": "object", - "description": "Holds a set of reusable objects for different aspects of the OAS. All objects defined within the components object will have no effect on the API unless they are explicitly referenced from properties outside the components object.", - "additionalProperties": false, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/specificationExtension" - } - }, - "properties": { - "schemas": { - "$ref": "#/definitions/schemasOrReferences" - }, - "responses": { - "$ref": "#/definitions/responsesOrReferences" - }, - "parameters": { - "$ref": "#/definitions/parametersOrReferences" - }, - "examples": { - "$ref": "#/definitions/examplesOrReferences" - }, - "requestBodies": { - "$ref": "#/definitions/requestBodiesOrReferences" - }, - "headers": { - "$ref": "#/definitions/headersOrReferences" - }, - "securitySchemes": { - "$ref": "#/definitions/securitySchemesOrReferences" - }, - "links": { - "$ref": "#/definitions/linksOrReferences" - }, - "callbacks": { - "$ref": "#/definitions/callbacksOrReferences" - } - } - }, - "paths": { - "type": "object", - "description": "Holds the relative paths to the individual endpoints and their operations. The path is appended to the URL from the `Server Object` in order to construct the full URL. The Paths MAY be empty, due to ACL constraints.", - "additionalProperties": false, - "patternProperties": { - "^/": { - "$ref": "#/definitions/pathItem" - }, - "^x-": { - "$ref": "#/definitions/specificationExtension" - } - } - }, - "pathItem": { - "type": "object", - "description": "Describes the operations available on a single path. A Path Item MAY be empty, due to ACL constraints. The path itself is still exposed to the documentation viewer but they will not know which operations and parameters are available.", - "additionalProperties": false, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/specificationExtension" - } - }, - "properties": { - "$ref": { - "type": "string" - }, - "summary": { - "type": "string" - }, - "description": { - "type": "string" - }, - "get": { - "$ref": "#/definitions/operation" - }, - "put": { - "$ref": "#/definitions/operation" - }, - "post": { - "$ref": "#/definitions/operation" - }, - "delete": { - "$ref": "#/definitions/operation" - }, - "options": { - "$ref": "#/definitions/operation" - }, - "head": { - "$ref": "#/definitions/operation" - }, - "patch": { - "$ref": "#/definitions/operation" - }, - "trace": { - "$ref": "#/definitions/operation" - }, - "servers": { - "type": "array", - "items": { - "$ref": "#/definitions/server" - }, - "uniqueItems": true - }, - "parameters": { - "type": "array", - "items": { - "$ref": "#/definitions/parameterOrReference" - }, - "uniqueItems": true - } - } - }, - "operation": { - "type": "object", - "description": "Describes a single API operation on a path.", - "required": [ - "responses" - ], - "additionalProperties": false, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/specificationExtension" - } - }, - "properties": { - "tags": { - "type": "array", - "items": { - "type": "string" - }, - "uniqueItems": true - }, - "summary": { - "type": "string" - }, - "description": { - "type": "string" - }, - "externalDocs": { - "$ref": "#/definitions/externalDocs" - }, - "operationId": { - "type": "string" - }, - "parameters": { - "type": "array", - "items": { - "$ref": "#/definitions/parameterOrReference" - }, - "uniqueItems": true - }, - "requestBody": { - "$ref": "#/definitions/requestBodyOrReference" - }, - "responses": { - "$ref": "#/definitions/responses" - }, - "callbacks": { - "$ref": "#/definitions/callbacksOrReferences" - }, - "deprecated": { - "type": "boolean" - }, - "security": { - "type": "array", - "items": { - "$ref": "#/definitions/securityRequirement" - }, - "uniqueItems": true - }, - "servers": { - "type": "array", - "items": { - "$ref": "#/definitions/server" - }, - "uniqueItems": true - } - } - }, - "externalDocs": { - "type": "object", - "description": "Allows referencing an external resource for extended documentation.", - "required": [ - "url" - ], - "additionalProperties": false, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/specificationExtension" - } - }, - "properties": { - "description": { - "type": "string" - }, - "url": { - "type": "string" - } - } - }, - "parameter": { - "type": "object", - "description": "Describes a single operation parameter. A unique parameter is defined by a combination of a name and location.", - "required": [ - "name", - "in" - ], - "additionalProperties": false, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/specificationExtension" - } - }, - "properties": { - "name": { - "type": "string" - }, - "in": { - "type": "string" - }, - "description": { - "type": "string" - }, - "required": { - "type": "boolean" - }, - "deprecated": { - "type": "boolean" - }, - "allowEmptyValue": { - "type": "boolean" - }, - "style": { - "type": "string" - }, - "explode": { - "type": "boolean" - }, - "allowReserved": { - "type": "boolean" - }, - "schema": { - "$ref": "#/definitions/schemaOrReference" - }, - "example": { - "$ref": "#/definitions/any" - }, - "examples": { - "$ref": "#/definitions/examplesOrReferences" - }, - "content": { - "$ref": "#/definitions/mediaTypes" - } - } - }, - "requestBody": { - "type": "object", - "description": "Describes a single request body.", - "required": [ - "content" - ], - "additionalProperties": false, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/specificationExtension" - } - }, - "properties": { - "description": { - "type": "string" - }, - "content": { - "$ref": "#/definitions/mediaTypes" - }, - "required": { - "type": "boolean" - } - } - }, - "mediaType": { - "type": "object", - "description": "Each Media Type Object provides schema and examples for the media type identified by its key.", - "additionalProperties": false, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/specificationExtension" - } - }, - "properties": { - "schema": { - "$ref": "#/definitions/schemaOrReference" - }, - "example": { - "$ref": "#/definitions/any" - }, - "examples": { - "$ref": "#/definitions/examplesOrReferences" - }, - "encoding": { - "$ref": "#/definitions/encodings" - } - } - }, - "encoding": { - "type": "object", - "description": "A single encoding definition applied to a single schema property.", - "additionalProperties": false, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/specificationExtension" - } - }, - "properties": { - "contentType": { - "type": "string" - }, - "headers": { - "$ref": "#/definitions/headersOrReferences" - }, - "style": { - "type": "string" - }, - "explode": { - "type": "boolean" - }, - "allowReserved": { - "type": "boolean" - } - } - }, - "responses": { - "type": "object", - "description": "A container for the expected responses of an operation. The container maps a HTTP response code to the expected response. The documentation is not necessarily expected to cover all possible HTTP response codes because they may not be known in advance. However, documentation is expected to cover a successful operation response and any known errors. The `default` MAY be used as a default response object for all HTTP codes that are not covered individually by the specification. The `Responses Object` MUST contain at least one response code, and it SHOULD be the response for a successful operation call.", - "additionalProperties": false, - "patternProperties": { - "^([0-9X]{3})$": { - "$ref": "#/definitions/responseOrReference" - }, - "^x-": { - "$ref": "#/definitions/specificationExtension" - } - }, - "properties": { - "default": { - "$ref": "#/definitions/responseOrReference" - } - } - }, - "response": { - "type": "object", - "description": "Describes a single response from an API Operation, including design-time, static `links` to operations based on the response.", - "required": [ - "description" - ], - "additionalProperties": false, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/specificationExtension" - } - }, - "properties": { - "description": { - "type": "string" - }, - "headers": { - "$ref": "#/definitions/headersOrReferences" - }, - "content": { - "$ref": "#/definitions/mediaTypes" - }, - "links": { - "$ref": "#/definitions/linksOrReferences" - } - } - }, - "callback": { - "type": "object", - "description": "A map of possible out-of band callbacks related to the parent operation. Each value in the map is a Path Item Object that describes a set of requests that may be initiated by the API provider and the expected responses. The key value used to identify the callback object is an expression, evaluated at runtime, that identifies a URL to use for the callback operation.", - "additionalProperties": false, - "patternProperties": { - "^": { - "$ref": "#/definitions/pathItem" - }, - "^x-": { - "$ref": "#/definitions/specificationExtension" - } - } - }, - "example": { - "type": "object", - "description": "", - "additionalProperties": false, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/specificationExtension" - } - }, - "properties": { - "summary": { - "type": "string" - }, - "description": { - "type": "string" - }, - "value": { - "$ref": "#/definitions/any" - }, - "externalValue": { - "type": "string" - } - } - }, - "link": { - "type": "object", - "description": "The `Link object` represents a possible design-time link for a response. The presence of a link does not guarantee the caller's ability to successfully invoke it, rather it provides a known relationship and traversal mechanism between responses and other operations. Unlike _dynamic_ links (i.e. links provided **in** the response payload), the OAS linking mechanism does not require link information in the runtime response. For computing links, and providing instructions to execute them, a runtime expression is used for accessing values in an operation and using them as parameters while invoking the linked operation.", - "additionalProperties": false, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/specificationExtension" - } - }, - "properties": { - "operationRef": { - "type": "string" - }, - "operationId": { - "type": "string" - }, - "parameters": { - "$ref": "#/definitions/anyOrExpression" - }, - "requestBody": { - "$ref": "#/definitions/anyOrExpression" - }, - "description": { - "type": "string" - }, - "server": { - "$ref": "#/definitions/server" - } - } - }, - "header": { - "type": "object", - "description": "The Header Object follows the structure of the Parameter Object with the following changes: 1. `name` MUST NOT be specified, it is given in the corresponding `headers` map. 1. `in` MUST NOT be specified, it is implicitly in `header`. 1. All traits that are affected by the location MUST be applicable to a location of `header` (for example, `style`).", - "additionalProperties": false, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/specificationExtension" - } - }, - "properties": { - "description": { - "type": "string" - }, - "required": { - "type": "boolean" - }, - "deprecated": { - "type": "boolean" - }, - "allowEmptyValue": { - "type": "boolean" - }, - "style": { - "type": "string" - }, - "explode": { - "type": "boolean" - }, - "allowReserved": { - "type": "boolean" - }, - "schema": { - "$ref": "#/definitions/schemaOrReference" - }, - "example": { - "$ref": "#/definitions/any" - }, - "examples": { - "$ref": "#/definitions/examplesOrReferences" - }, - "content": { - "$ref": "#/definitions/mediaTypes" - } - } - }, - "tag": { - "type": "object", - "description": "Adds metadata to a single tag that is used by the Operation Object. It is not mandatory to have a Tag Object per tag defined in the Operation Object instances.", - "required": [ - "name" - ], - "additionalProperties": false, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/specificationExtension" - } - }, - "properties": { - "name": { - "type": "string" - }, - "description": { - "type": "string" - }, - "externalDocs": { - "$ref": "#/definitions/externalDocs" - } - } - }, - "reference": { - "type": "object", - "description": "A simple object to allow referencing other components in the specification, internally and externally. The Reference Object is defined by JSON Reference and follows the same structure, behavior and rules. For this specification, reference resolution is accomplished as defined by the JSON Reference specification and not by the JSON Schema specification.", - "required": [ - "$ref" - ], - "additionalProperties": false, - "properties": { - "$ref": { - "type": "string" - }, - "summary": { - "type": "string" - }, - "description": { - "type": "string" - } - } - }, - "schema": { - "type": "object", - "description": "The Schema Object allows the definition of input and output data types. These types can be objects, but also primitives and arrays. This object is an extended subset of the JSON Schema Specification Wright Draft 00. For more information about the properties, see JSON Schema Core and JSON Schema Validation. Unless stated otherwise, the property definitions follow the JSON Schema.", - "additionalProperties": false, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/specificationExtension" - } - }, - "properties": { - "nullable": { - "type": "boolean" - }, - "discriminator": { - "$ref": "#/definitions/discriminator" - }, - "readOnly": { - "type": "boolean" - }, - "writeOnly": { - "type": "boolean" - }, - "xml": { - "$ref": "#/definitions/xml" - }, - "externalDocs": { - "$ref": "#/definitions/externalDocs" - }, - "example": { - "$ref": "#/definitions/any" - }, - "deprecated": { - "type": "boolean" - }, - "title": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/title" - }, - "multipleOf": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/multipleOf" - }, - "maximum": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/maximum" - }, - "exclusiveMaximum": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/exclusiveMaximum" - }, - "minimum": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/minimum" - }, - "exclusiveMinimum": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/exclusiveMinimum" - }, - "maxLength": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/maxLength" - }, - "minLength": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/minLength" - }, - "pattern": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/pattern" - }, - "maxItems": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/maxItems" - }, - "minItems": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/minItems" - }, - "uniqueItems": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/uniqueItems" - }, - "maxProperties": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/maxProperties" - }, - "minProperties": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/minProperties" - }, - "required": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/required" - }, - "enum": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/enum" - }, - "type": { - "type": "string" - }, - "allOf": { - "type": "array", - "items": { - "$ref": "#/definitions/schemaOrReference" - }, - "minItems": 1 - }, - "oneOf": { - "type": "array", - "items": { - "$ref": "#/definitions/schemaOrReference" - }, - "minItems": 1 - }, - "anyOf": { - "type": "array", - "items": { - "$ref": "#/definitions/schemaOrReference" - }, - "minItems": 1 - }, - "not": { - "$ref": "#/definitions/schema" - }, - "items": { - "anyOf": [ - { - "$ref": "#/definitions/schemaOrReference" - }, - { - "type": "array", - "items": { - "$ref": "#/definitions/schemaOrReference" - }, - "minItems": 1 - } - ] - }, - "properties": { - "type": "object", - "additionalProperties": { - "$ref": "#/definitions/schemaOrReference" - } - }, - "additionalProperties": { - "oneOf": [ - { - "$ref": "#/definitions/schemaOrReference" - }, - { - "type": "boolean" - } - ] - }, - "default": { - "$ref": "#/definitions/defaultType" - }, - "description": { - "type": "string" - }, - "format": { - "type": "string" - } - } - }, - "discriminator": { - "type": "object", - "description": "When request bodies or response payloads may be one of a number of different schemas, a `discriminator` object can be used to aid in serialization, deserialization, and validation. The discriminator is a specific object in a schema which is used to inform the consumer of the specification of an alternative schema based on the value associated with it. When using the discriminator, _inline_ schemas will not be considered.", - "required": [ - "propertyName" - ], - "additionalProperties": false, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/specificationExtension" - } - }, - "properties": { - "propertyName": { - "type": "string" - }, - "mapping": { - "$ref": "#/definitions/strings" - } - } - }, - "xml": { - "type": "object", - "description": "A metadata object that allows for more fine-tuned XML model definitions. When using arrays, XML element names are *not* inferred (for singular/plural forms) and the `name` property SHOULD be used to add that information. See examples for expected behavior.", - "additionalProperties": false, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/specificationExtension" - } - }, - "properties": { - "name": { - "type": "string" - }, - "namespace": { - "type": "string" - }, - "prefix": { - "type": "string" - }, - "attribute": { - "type": "boolean" - }, - "wrapped": { - "type": "boolean" - } - } - }, - "securityScheme": { - "type": "object", - "description": "Defines a security scheme that can be used by the operations. Supported schemes are HTTP authentication, an API key (either as a header, a cookie parameter or as a query parameter), mutual TLS (use of a client certificate), OAuth2's common flows (implicit, password, application and access code) as defined in RFC6749, and OpenID Connect. Please note that currently (2019) the implicit flow is about to be deprecated OAuth 2.0 Security Best Current Practice. Recommended for most use case is Authorization Code Grant flow with PKCE.", - "required": [ - "type" - ], - "additionalProperties": false, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/specificationExtension" - } - }, - "properties": { - "type": { - "type": "string" - }, - "description": { - "type": "string" - }, - "name": { - "type": "string" - }, - "in": { - "type": "string" - }, - "scheme": { - "type": "string" - }, - "bearerFormat": { - "type": "string" - }, - "flows": { - "$ref": "#/definitions/oauthFlows" - }, - "openIdConnectUrl": { - "type": "string" - } - } - }, - "oauthFlows": { - "type": "object", - "description": "Allows configuration of the supported OAuth Flows.", - "additionalProperties": false, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/specificationExtension" - } - }, - "properties": { - "implicit": { - "$ref": "#/definitions/oauthFlow" - }, - "password": { - "$ref": "#/definitions/oauthFlow" - }, - "clientCredentials": { - "$ref": "#/definitions/oauthFlow" - }, - "authorizationCode": { - "$ref": "#/definitions/oauthFlow" - } - } - }, - "oauthFlow": { - "type": "object", - "description": "Configuration details for a supported OAuth Flow", - "additionalProperties": false, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/specificationExtension" - } - }, - "properties": { - "authorizationUrl": { - "type": "string" - }, - "tokenUrl": { - "type": "string" - }, - "refreshUrl": { - "type": "string" - }, - "scopes": { - "$ref": "#/definitions/strings" - } - } - }, - "securityRequirement": { - "type": "object", - "description": "Lists the required security schemes to execute this operation. The name used for each property MUST correspond to a security scheme declared in the Security Schemes under the Components Object. Security Requirement Objects that contain multiple schemes require that all schemes MUST be satisfied for a request to be authorized. This enables support for scenarios where multiple query parameters or HTTP headers are required to convey security information. When a list of Security Requirement Objects is defined on the OpenAPI Object or Operation Object, only one of the Security Requirement Objects in the list needs to be satisfied to authorize the request.", - "additionalProperties": { - "type": "array", - "items": { - "type": "string" - }, - "uniqueItems": true - } - }, - "anyOrExpression": { - "oneOf": [ - { - "$ref": "#/definitions/any" - }, - { - "$ref": "#/definitions/expression" - } - ] - }, - "callbackOrReference": { - "oneOf": [ - { - "$ref": "#/definitions/callback" - }, - { - "$ref": "#/definitions/reference" - } - ] - }, - "exampleOrReference": { - "oneOf": [ - { - "$ref": "#/definitions/example" - }, - { - "$ref": "#/definitions/reference" - } - ] - }, - "headerOrReference": { - "oneOf": [ - { - "$ref": "#/definitions/header" - }, - { - "$ref": "#/definitions/reference" - } - ] - }, - "linkOrReference": { - "oneOf": [ - { - "$ref": "#/definitions/link" - }, - { - "$ref": "#/definitions/reference" - } - ] - }, - "parameterOrReference": { - "oneOf": [ - { - "$ref": "#/definitions/parameter" - }, - { - "$ref": "#/definitions/reference" - } - ] - }, - "requestBodyOrReference": { - "oneOf": [ - { - "$ref": "#/definitions/requestBody" - }, - { - "$ref": "#/definitions/reference" - } - ] - }, - "responseOrReference": { - "oneOf": [ - { - "$ref": "#/definitions/response" - }, - { - "$ref": "#/definitions/reference" - } - ] - }, - "schemaOrReference": { - "oneOf": [ - { - "$ref": "#/definitions/schema" - }, - { - "$ref": "#/definitions/reference" - } - ] - }, - "securitySchemeOrReference": { - "oneOf": [ - { - "$ref": "#/definitions/securityScheme" - }, - { - "$ref": "#/definitions/reference" - } - ] - }, - "callbacksOrReferences": { - "type": "object", - "additionalProperties": { - "$ref": "#/definitions/callbackOrReference" - } - }, - "encodings": { - "type": "object", - "additionalProperties": { - "$ref": "#/definitions/encoding" - } - }, - "examplesOrReferences": { - "type": "object", - "additionalProperties": { - "$ref": "#/definitions/exampleOrReference" - } - }, - "headersOrReferences": { - "type": "object", - "additionalProperties": { - "$ref": "#/definitions/headerOrReference" - } - }, - "linksOrReferences": { - "type": "object", - "additionalProperties": { - "$ref": "#/definitions/linkOrReference" - } - }, - "mediaTypes": { - "type": "object", - "additionalProperties": { - "$ref": "#/definitions/mediaType" - } - }, - "parametersOrReferences": { - "type": "object", - "additionalProperties": { - "$ref": "#/definitions/parameterOrReference" - } - }, - "requestBodiesOrReferences": { - "type": "object", - "additionalProperties": { - "$ref": "#/definitions/requestBodyOrReference" - } - }, - "responsesOrReferences": { - "type": "object", - "additionalProperties": { - "$ref": "#/definitions/responseOrReference" - } - }, - "schemasOrReferences": { - "type": "object", - "additionalProperties": { - "$ref": "#/definitions/schemaOrReference" - } - }, - "securitySchemesOrReferences": { - "type": "object", - "additionalProperties": { - "$ref": "#/definitions/securitySchemeOrReference" - } - }, - "serverVariables": { - "type": "object", - "additionalProperties": { - "$ref": "#/definitions/serverVariable" - } - }, - "strings": { - "type": "object", - "additionalProperties": { - "type": "string" - } - }, - "object": { - "type": "object", - "additionalProperties": true - }, - "any": { - "additionalProperties": true - }, - "expression": { - "type": "object", - "additionalProperties": true - }, - "specificationExtension": { - "description": "Any property starting with x- is valid.", - "oneOf": [ - { - "type": "null" - }, - { - "type": "number" - }, - { - "type": "boolean" - }, - { - "type": "string" - }, - { - "type": "object" - }, - { - "type": "array" - } - ] - }, - "defaultType": { - "oneOf": [ - { - "type": "null" - }, - { - "type": "array" - }, - { - "type": "object" - }, - { - "type": "number" - }, - { - "type": "boolean" - }, - { - "type": "string" - } - ] - } - } -} diff --git a/src/vendor/github.com/google/shlex/COPYING b/src/vendor/github.com/google/shlex/COPYING deleted file mode 100644 index d64569567..000000000 --- a/src/vendor/github.com/google/shlex/COPYING +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/src/vendor/github.com/google/shlex/README b/src/vendor/github.com/google/shlex/README deleted file mode 100644 index c86bcc066..000000000 --- a/src/vendor/github.com/google/shlex/README +++ /dev/null @@ -1,2 +0,0 @@ -go-shlex is a simple lexer for go that supports shell-style quoting, -commenting, and escaping. diff --git a/src/vendor/github.com/google/shlex/shlex.go b/src/vendor/github.com/google/shlex/shlex.go deleted file mode 100644 index d98308bce..000000000 --- a/src/vendor/github.com/google/shlex/shlex.go +++ /dev/null @@ -1,416 +0,0 @@ -/* -Copyright 2012 Google Inc. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -/* -Package shlex implements a simple lexer which splits input in to tokens using -shell-style rules for quoting and commenting. - -The basic use case uses the default ASCII lexer to split a string into sub-strings: - - shlex.Split("one \"two three\" four") -> []string{"one", "two three", "four"} - -To process a stream of strings: - - l := NewLexer(os.Stdin) - for ; token, err := l.Next(); err != nil { - // process token - } - -To access the raw token stream (which includes tokens for comments): - - t := NewTokenizer(os.Stdin) - for ; token, err := t.Next(); err != nil { - // process token - } - -*/ -package shlex - -import ( - "bufio" - "fmt" - "io" - "strings" -) - -// TokenType is a top-level token classification: A word, space, comment, unknown. -type TokenType int - -// runeTokenClass is the type of a UTF-8 character classification: A quote, space, escape. -type runeTokenClass int - -// the internal state used by the lexer state machine -type lexerState int - -// Token is a (type, value) pair representing a lexographical token. -type Token struct { - tokenType TokenType - value string -} - -// Equal reports whether tokens a, and b, are equal. -// Two tokens are equal if both their types and values are equal. A nil token can -// never be equal to another token. -func (a *Token) Equal(b *Token) bool { - if a == nil || b == nil { - return false - } - if a.tokenType != b.tokenType { - return false - } - return a.value == b.value -} - -// Named classes of UTF-8 runes -const ( - spaceRunes = " \t\r\n" - escapingQuoteRunes = `"` - nonEscapingQuoteRunes = "'" - escapeRunes = `\` - commentRunes = "#" -) - -// Classes of rune token -const ( - unknownRuneClass runeTokenClass = iota - spaceRuneClass - escapingQuoteRuneClass - nonEscapingQuoteRuneClass - escapeRuneClass - commentRuneClass - eofRuneClass -) - -// Classes of lexographic token -const ( - UnknownToken TokenType = iota - WordToken - SpaceToken - CommentToken -) - -// Lexer state machine states -const ( - startState lexerState = iota // no runes have been seen - inWordState // processing regular runes in a word - escapingState // we have just consumed an escape rune; the next rune is literal - escapingQuotedState // we have just consumed an escape rune within a quoted string - quotingEscapingState // we are within a quoted string that supports escaping ("...") - quotingState // we are within a string that does not support escaping ('...') - commentState // we are within a comment (everything following an unquoted or unescaped # -) - -// tokenClassifier is used for classifying rune characters. -type tokenClassifier map[rune]runeTokenClass - -func (typeMap tokenClassifier) addRuneClass(runes string, tokenType runeTokenClass) { - for _, runeChar := range runes { - typeMap[runeChar] = tokenType - } -} - -// newDefaultClassifier creates a new classifier for ASCII characters. -func newDefaultClassifier() tokenClassifier { - t := tokenClassifier{} - t.addRuneClass(spaceRunes, spaceRuneClass) - t.addRuneClass(escapingQuoteRunes, escapingQuoteRuneClass) - t.addRuneClass(nonEscapingQuoteRunes, nonEscapingQuoteRuneClass) - t.addRuneClass(escapeRunes, escapeRuneClass) - t.addRuneClass(commentRunes, commentRuneClass) - return t -} - -// ClassifyRune classifiees a rune -func (t tokenClassifier) ClassifyRune(runeVal rune) runeTokenClass { - return t[runeVal] -} - -// Lexer turns an input stream into a sequence of tokens. Whitespace and comments are skipped. -type Lexer Tokenizer - -// NewLexer creates a new lexer from an input stream. -func NewLexer(r io.Reader) *Lexer { - - return (*Lexer)(NewTokenizer(r)) -} - -// Next returns the next word, or an error. If there are no more words, -// the error will be io.EOF. -func (l *Lexer) Next() (string, error) { - for { - token, err := (*Tokenizer)(l).Next() - if err != nil { - return "", err - } - switch token.tokenType { - case WordToken: - return token.value, nil - case CommentToken: - // skip comments - default: - return "", fmt.Errorf("Unknown token type: %v", token.tokenType) - } - } -} - -// Tokenizer turns an input stream into a sequence of typed tokens -type Tokenizer struct { - input bufio.Reader - classifier tokenClassifier -} - -// NewTokenizer creates a new tokenizer from an input stream. -func NewTokenizer(r io.Reader) *Tokenizer { - input := bufio.NewReader(r) - classifier := newDefaultClassifier() - return &Tokenizer{ - input: *input, - classifier: classifier} -} - -// scanStream scans the stream for the next token using the internal state machine. -// It will panic if it encounters a rune which it does not know how to handle. -func (t *Tokenizer) scanStream() (*Token, error) { - state := startState - var tokenType TokenType - var value []rune - var nextRune rune - var nextRuneType runeTokenClass - var err error - - for { - nextRune, _, err = t.input.ReadRune() - nextRuneType = t.classifier.ClassifyRune(nextRune) - - if err == io.EOF { - nextRuneType = eofRuneClass - err = nil - } else if err != nil { - return nil, err - } - - switch state { - case startState: // no runes read yet - { - switch nextRuneType { - case eofRuneClass: - { - return nil, io.EOF - } - case spaceRuneClass: - { - } - case escapingQuoteRuneClass: - { - tokenType = WordToken - state = quotingEscapingState - } - case nonEscapingQuoteRuneClass: - { - tokenType = WordToken - state = quotingState - } - case escapeRuneClass: - { - tokenType = WordToken - state = escapingState - } - case commentRuneClass: - { - tokenType = CommentToken - state = commentState - } - default: - { - tokenType = WordToken - value = append(value, nextRune) - state = inWordState - } - } - } - case inWordState: // in a regular word - { - switch nextRuneType { - case eofRuneClass: - { - token := &Token{ - tokenType: tokenType, - value: string(value)} - return token, err - } - case spaceRuneClass: - { - token := &Token{ - tokenType: tokenType, - value: string(value)} - return token, err - } - case escapingQuoteRuneClass: - { - state = quotingEscapingState - } - case nonEscapingQuoteRuneClass: - { - state = quotingState - } - case escapeRuneClass: - { - state = escapingState - } - default: - { - value = append(value, nextRune) - } - } - } - case escapingState: // the rune after an escape character - { - switch nextRuneType { - case eofRuneClass: - { - err = fmt.Errorf("EOF found after escape character") - token := &Token{ - tokenType: tokenType, - value: string(value)} - return token, err - } - default: - { - state = inWordState - value = append(value, nextRune) - } - } - } - case escapingQuotedState: // the next rune after an escape character, in double quotes - { - switch nextRuneType { - case eofRuneClass: - { - err = fmt.Errorf("EOF found after escape character") - token := &Token{ - tokenType: tokenType, - value: string(value)} - return token, err - } - default: - { - state = quotingEscapingState - value = append(value, nextRune) - } - } - } - case quotingEscapingState: // in escaping double quotes - { - switch nextRuneType { - case eofRuneClass: - { - err = fmt.Errorf("EOF found when expecting closing quote") - token := &Token{ - tokenType: tokenType, - value: string(value)} - return token, err - } - case escapingQuoteRuneClass: - { - state = inWordState - } - case escapeRuneClass: - { - state = escapingQuotedState - } - default: - { - value = append(value, nextRune) - } - } - } - case quotingState: // in non-escaping single quotes - { - switch nextRuneType { - case eofRuneClass: - { - err = fmt.Errorf("EOF found when expecting closing quote") - token := &Token{ - tokenType: tokenType, - value: string(value)} - return token, err - } - case nonEscapingQuoteRuneClass: - { - state = inWordState - } - default: - { - value = append(value, nextRune) - } - } - } - case commentState: // in a comment - { - switch nextRuneType { - case eofRuneClass: - { - token := &Token{ - tokenType: tokenType, - value: string(value)} - return token, err - } - case spaceRuneClass: - { - if nextRune == '\n' { - state = startState - token := &Token{ - tokenType: tokenType, - value: string(value)} - return token, err - } else { - value = append(value, nextRune) - } - } - default: - { - value = append(value, nextRune) - } - } - } - default: - { - return nil, fmt.Errorf("Unexpected state: %v", state) - } - } - } -} - -// Next returns the next token in the stream. -func (t *Tokenizer) Next() (*Token, error) { - return t.scanStream() -} - -// Split partitions a string into a slice of strings. -func Split(s string) ([]string, error) { - l := NewLexer(strings.NewReader(s)) - subStrings := make([]string, 0) - for { - word, err := l.Next() - if err != nil { - if err == io.EOF { - return subStrings, nil - } - return subStrings, err - } - subStrings = append(subStrings, word) - } -} diff --git a/src/vendor/github.com/gregjones/httpcache/.travis.yml b/src/vendor/github.com/gregjones/httpcache/.travis.yml deleted file mode 100644 index b5ffbe03d..000000000 --- a/src/vendor/github.com/gregjones/httpcache/.travis.yml +++ /dev/null @@ -1,19 +0,0 @@ -sudo: false -language: go -go: - - 1.6.x - - 1.7.x - - 1.8.x - - 1.9.x - - master -matrix: - allow_failures: - - go: master - fast_finish: true -install: - - # Do nothing. This is needed to prevent default install action "go get -t -v ./..." from happening here (we want it to happen inside script step). -script: - - go get -t -v ./... - - diff -u <(echo -n) <(gofmt -d .) - - go tool vet . - - go test -v -race ./... diff --git a/src/vendor/github.com/gregjones/httpcache/LICENSE.txt b/src/vendor/github.com/gregjones/httpcache/LICENSE.txt deleted file mode 100644 index 81316beb0..000000000 --- a/src/vendor/github.com/gregjones/httpcache/LICENSE.txt +++ /dev/null @@ -1,7 +0,0 @@ -Copyright © 2012 Greg Jones (greg.jones@gmail.com) - -Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the “Softwareâ€), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED “AS ISâ€, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. \ No newline at end of file diff --git a/src/vendor/github.com/gregjones/httpcache/README.md b/src/vendor/github.com/gregjones/httpcache/README.md deleted file mode 100644 index 09c9e7c17..000000000 --- a/src/vendor/github.com/gregjones/httpcache/README.md +++ /dev/null @@ -1,25 +0,0 @@ -httpcache -========= - -[![Build Status](https://travis-ci.org/gregjones/httpcache.svg?branch=master)](https://travis-ci.org/gregjones/httpcache) [![GoDoc](https://godoc.org/github.com/gregjones/httpcache?status.svg)](https://godoc.org/github.com/gregjones/httpcache) - -Package httpcache provides a http.RoundTripper implementation that works as a mostly [RFC 7234](https://tools.ietf.org/html/rfc7234) compliant cache for http responses. - -It is only suitable for use as a 'private' cache (i.e. for a web-browser or an API-client and not for a shared proxy). - -Cache Backends --------------- - -- The built-in 'memory' cache stores responses in an in-memory map. -- [`github.com/gregjones/httpcache/diskcache`](https://github.com/gregjones/httpcache/tree/master/diskcache) provides a filesystem-backed cache using the [diskv](https://github.com/peterbourgon/diskv) library. -- [`github.com/gregjones/httpcache/memcache`](https://github.com/gregjones/httpcache/tree/master/memcache) provides memcache implementations, for both App Engine and 'normal' memcache servers. -- [`sourcegraph.com/sourcegraph/s3cache`](https://sourcegraph.com/github.com/sourcegraph/s3cache) uses Amazon S3 for storage. -- [`github.com/gregjones/httpcache/leveldbcache`](https://github.com/gregjones/httpcache/tree/master/leveldbcache) provides a filesystem-backed cache using [leveldb](https://github.com/syndtr/goleveldb/leveldb). -- [`github.com/die-net/lrucache`](https://github.com/die-net/lrucache) provides an in-memory cache that will evict least-recently used entries. -- [`github.com/die-net/lrucache/twotier`](https://github.com/die-net/lrucache/tree/master/twotier) allows caches to be combined, for example to use lrucache above with a persistent disk-cache. -- [`github.com/birkelund/boltdbcache`](https://github.com/birkelund/boltdbcache) provides a BoltDB implementation (based on the [bbolt](https://github.com/coreos/bbolt) fork). - -License -------- - -- [MIT License](LICENSE.txt) diff --git a/src/vendor/github.com/gregjones/httpcache/httpcache.go b/src/vendor/github.com/gregjones/httpcache/httpcache.go deleted file mode 100644 index f6a2ec4a5..000000000 --- a/src/vendor/github.com/gregjones/httpcache/httpcache.go +++ /dev/null @@ -1,551 +0,0 @@ -// Package httpcache provides a http.RoundTripper implementation that works as a -// mostly RFC-compliant cache for http responses. -// -// It is only suitable for use as a 'private' cache (i.e. for a web-browser or an API-client -// and not for a shared proxy). -// -package httpcache - -import ( - "bufio" - "bytes" - "errors" - "io" - "io/ioutil" - "net/http" - "net/http/httputil" - "strings" - "sync" - "time" -) - -const ( - stale = iota - fresh - transparent - // XFromCache is the header added to responses that are returned from the cache - XFromCache = "X-From-Cache" -) - -// A Cache interface is used by the Transport to store and retrieve responses. -type Cache interface { - // Get returns the []byte representation of a cached response and a bool - // set to true if the value isn't empty - Get(key string) (responseBytes []byte, ok bool) - // Set stores the []byte representation of a response against a key - Set(key string, responseBytes []byte) - // Delete removes the value associated with the key - Delete(key string) -} - -// cacheKey returns the cache key for req. -func cacheKey(req *http.Request) string { - if req.Method == http.MethodGet { - return req.URL.String() - } else { - return req.Method + " " + req.URL.String() - } -} - -// CachedResponse returns the cached http.Response for req if present, and nil -// otherwise. -func CachedResponse(c Cache, req *http.Request) (resp *http.Response, err error) { - cachedVal, ok := c.Get(cacheKey(req)) - if !ok { - return - } - - b := bytes.NewBuffer(cachedVal) - return http.ReadResponse(bufio.NewReader(b), req) -} - -// MemoryCache is an implemtation of Cache that stores responses in an in-memory map. -type MemoryCache struct { - mu sync.RWMutex - items map[string][]byte -} - -// Get returns the []byte representation of the response and true if present, false if not -func (c *MemoryCache) Get(key string) (resp []byte, ok bool) { - c.mu.RLock() - resp, ok = c.items[key] - c.mu.RUnlock() - return resp, ok -} - -// Set saves response resp to the cache with key -func (c *MemoryCache) Set(key string, resp []byte) { - c.mu.Lock() - c.items[key] = resp - c.mu.Unlock() -} - -// Delete removes key from the cache -func (c *MemoryCache) Delete(key string) { - c.mu.Lock() - delete(c.items, key) - c.mu.Unlock() -} - -// NewMemoryCache returns a new Cache that will store items in an in-memory map -func NewMemoryCache() *MemoryCache { - c := &MemoryCache{items: map[string][]byte{}} - return c -} - -// Transport is an implementation of http.RoundTripper that will return values from a cache -// where possible (avoiding a network request) and will additionally add validators (etag/if-modified-since) -// to repeated requests allowing servers to return 304 / Not Modified -type Transport struct { - // The RoundTripper interface actually used to make requests - // If nil, http.DefaultTransport is used - Transport http.RoundTripper - Cache Cache - // If true, responses returned from the cache will be given an extra header, X-From-Cache - MarkCachedResponses bool -} - -// NewTransport returns a new Transport with the -// provided Cache implementation and MarkCachedResponses set to true -func NewTransport(c Cache) *Transport { - return &Transport{Cache: c, MarkCachedResponses: true} -} - -// Client returns an *http.Client that caches responses. -func (t *Transport) Client() *http.Client { - return &http.Client{Transport: t} -} - -// varyMatches will return false unless all of the cached values for the headers listed in Vary -// match the new request -func varyMatches(cachedResp *http.Response, req *http.Request) bool { - for _, header := range headerAllCommaSepValues(cachedResp.Header, "vary") { - header = http.CanonicalHeaderKey(header) - if header != "" && req.Header.Get(header) != cachedResp.Header.Get("X-Varied-"+header) { - return false - } - } - return true -} - -// RoundTrip takes a Request and returns a Response -// -// If there is a fresh Response already in cache, then it will be returned without connecting to -// the server. -// -// If there is a stale Response, then any validators it contains will be set on the new request -// to give the server a chance to respond with NotModified. If this happens, then the cached Response -// will be returned. -func (t *Transport) RoundTrip(req *http.Request) (resp *http.Response, err error) { - cacheKey := cacheKey(req) - cacheable := (req.Method == "GET" || req.Method == "HEAD") && req.Header.Get("range") == "" - var cachedResp *http.Response - if cacheable { - cachedResp, err = CachedResponse(t.Cache, req) - } else { - // Need to invalidate an existing value - t.Cache.Delete(cacheKey) - } - - transport := t.Transport - if transport == nil { - transport = http.DefaultTransport - } - - if cacheable && cachedResp != nil && err == nil { - if t.MarkCachedResponses { - cachedResp.Header.Set(XFromCache, "1") - } - - if varyMatches(cachedResp, req) { - // Can only use cached value if the new request doesn't Vary significantly - freshness := getFreshness(cachedResp.Header, req.Header) - if freshness == fresh { - return cachedResp, nil - } - - if freshness == stale { - var req2 *http.Request - // Add validators if caller hasn't already done so - etag := cachedResp.Header.Get("etag") - if etag != "" && req.Header.Get("etag") == "" { - req2 = cloneRequest(req) - req2.Header.Set("if-none-match", etag) - } - lastModified := cachedResp.Header.Get("last-modified") - if lastModified != "" && req.Header.Get("last-modified") == "" { - if req2 == nil { - req2 = cloneRequest(req) - } - req2.Header.Set("if-modified-since", lastModified) - } - if req2 != nil { - req = req2 - } - } - } - - resp, err = transport.RoundTrip(req) - if err == nil && req.Method == "GET" && resp.StatusCode == http.StatusNotModified { - // Replace the 304 response with the one from cache, but update with some new headers - endToEndHeaders := getEndToEndHeaders(resp.Header) - for _, header := range endToEndHeaders { - cachedResp.Header[header] = resp.Header[header] - } - resp = cachedResp - } else if (err != nil || (cachedResp != nil && resp.StatusCode >= 500)) && - req.Method == "GET" && canStaleOnError(cachedResp.Header, req.Header) { - // In case of transport failure and stale-if-error activated, returns cached content - // when available - return cachedResp, nil - } else { - if err != nil || resp.StatusCode != http.StatusOK { - t.Cache.Delete(cacheKey) - } - if err != nil { - return nil, err - } - } - } else { - reqCacheControl := parseCacheControl(req.Header) - if _, ok := reqCacheControl["only-if-cached"]; ok { - resp = newGatewayTimeoutResponse(req) - } else { - resp, err = transport.RoundTrip(req) - if err != nil { - return nil, err - } - } - } - - if cacheable && canStore(parseCacheControl(req.Header), parseCacheControl(resp.Header)) { - for _, varyKey := range headerAllCommaSepValues(resp.Header, "vary") { - varyKey = http.CanonicalHeaderKey(varyKey) - fakeHeader := "X-Varied-" + varyKey - reqValue := req.Header.Get(varyKey) - if reqValue != "" { - resp.Header.Set(fakeHeader, reqValue) - } - } - switch req.Method { - case "GET": - // Delay caching until EOF is reached. - resp.Body = &cachingReadCloser{ - R: resp.Body, - OnEOF: func(r io.Reader) { - resp := *resp - resp.Body = ioutil.NopCloser(r) - respBytes, err := httputil.DumpResponse(&resp, true) - if err == nil { - t.Cache.Set(cacheKey, respBytes) - } - }, - } - default: - respBytes, err := httputil.DumpResponse(resp, true) - if err == nil { - t.Cache.Set(cacheKey, respBytes) - } - } - } else { - t.Cache.Delete(cacheKey) - } - return resp, nil -} - -// ErrNoDateHeader indicates that the HTTP headers contained no Date header. -var ErrNoDateHeader = errors.New("no Date header") - -// Date parses and returns the value of the Date header. -func Date(respHeaders http.Header) (date time.Time, err error) { - dateHeader := respHeaders.Get("date") - if dateHeader == "" { - err = ErrNoDateHeader - return - } - - return time.Parse(time.RFC1123, dateHeader) -} - -type realClock struct{} - -func (c *realClock) since(d time.Time) time.Duration { - return time.Since(d) -} - -type timer interface { - since(d time.Time) time.Duration -} - -var clock timer = &realClock{} - -// getFreshness will return one of fresh/stale/transparent based on the cache-control -// values of the request and the response -// -// fresh indicates the response can be returned -// stale indicates that the response needs validating before it is returned -// transparent indicates the response should not be used to fulfil the request -// -// Because this is only a private cache, 'public' and 'private' in cache-control aren't -// signficant. Similarly, smax-age isn't used. -func getFreshness(respHeaders, reqHeaders http.Header) (freshness int) { - respCacheControl := parseCacheControl(respHeaders) - reqCacheControl := parseCacheControl(reqHeaders) - if _, ok := reqCacheControl["no-cache"]; ok { - return transparent - } - if _, ok := respCacheControl["no-cache"]; ok { - return stale - } - if _, ok := reqCacheControl["only-if-cached"]; ok { - return fresh - } - - date, err := Date(respHeaders) - if err != nil { - return stale - } - currentAge := clock.since(date) - - var lifetime time.Duration - var zeroDuration time.Duration - - // If a response includes both an Expires header and a max-age directive, - // the max-age directive overrides the Expires header, even if the Expires header is more restrictive. - if maxAge, ok := respCacheControl["max-age"]; ok { - lifetime, err = time.ParseDuration(maxAge + "s") - if err != nil { - lifetime = zeroDuration - } - } else { - expiresHeader := respHeaders.Get("Expires") - if expiresHeader != "" { - expires, err := time.Parse(time.RFC1123, expiresHeader) - if err != nil { - lifetime = zeroDuration - } else { - lifetime = expires.Sub(date) - } - } - } - - if maxAge, ok := reqCacheControl["max-age"]; ok { - // the client is willing to accept a response whose age is no greater than the specified time in seconds - lifetime, err = time.ParseDuration(maxAge + "s") - if err != nil { - lifetime = zeroDuration - } - } - if minfresh, ok := reqCacheControl["min-fresh"]; ok { - // the client wants a response that will still be fresh for at least the specified number of seconds. - minfreshDuration, err := time.ParseDuration(minfresh + "s") - if err == nil { - currentAge = time.Duration(currentAge + minfreshDuration) - } - } - - if maxstale, ok := reqCacheControl["max-stale"]; ok { - // Indicates that the client is willing to accept a response that has exceeded its expiration time. - // If max-stale is assigned a value, then the client is willing to accept a response that has exceeded - // its expiration time by no more than the specified number of seconds. - // If no value is assigned to max-stale, then the client is willing to accept a stale response of any age. - // - // Responses served only because of a max-stale value are supposed to have a Warning header added to them, - // but that seems like a hassle, and is it actually useful? If so, then there needs to be a different - // return-value available here. - if maxstale == "" { - return fresh - } - maxstaleDuration, err := time.ParseDuration(maxstale + "s") - if err == nil { - currentAge = time.Duration(currentAge - maxstaleDuration) - } - } - - if lifetime > currentAge { - return fresh - } - - return stale -} - -// Returns true if either the request or the response includes the stale-if-error -// cache control extension: https://tools.ietf.org/html/rfc5861 -func canStaleOnError(respHeaders, reqHeaders http.Header) bool { - respCacheControl := parseCacheControl(respHeaders) - reqCacheControl := parseCacheControl(reqHeaders) - - var err error - lifetime := time.Duration(-1) - - if staleMaxAge, ok := respCacheControl["stale-if-error"]; ok { - if staleMaxAge != "" { - lifetime, err = time.ParseDuration(staleMaxAge + "s") - if err != nil { - return false - } - } else { - return true - } - } - if staleMaxAge, ok := reqCacheControl["stale-if-error"]; ok { - if staleMaxAge != "" { - lifetime, err = time.ParseDuration(staleMaxAge + "s") - if err != nil { - return false - } - } else { - return true - } - } - - if lifetime >= 0 { - date, err := Date(respHeaders) - if err != nil { - return false - } - currentAge := clock.since(date) - if lifetime > currentAge { - return true - } - } - - return false -} - -func getEndToEndHeaders(respHeaders http.Header) []string { - // These headers are always hop-by-hop - hopByHopHeaders := map[string]struct{}{ - "Connection": struct{}{}, - "Keep-Alive": struct{}{}, - "Proxy-Authenticate": struct{}{}, - "Proxy-Authorization": struct{}{}, - "Te": struct{}{}, - "Trailers": struct{}{}, - "Transfer-Encoding": struct{}{}, - "Upgrade": struct{}{}, - } - - for _, extra := range strings.Split(respHeaders.Get("connection"), ",") { - // any header listed in connection, if present, is also considered hop-by-hop - if strings.Trim(extra, " ") != "" { - hopByHopHeaders[http.CanonicalHeaderKey(extra)] = struct{}{} - } - } - endToEndHeaders := []string{} - for respHeader, _ := range respHeaders { - if _, ok := hopByHopHeaders[respHeader]; !ok { - endToEndHeaders = append(endToEndHeaders, respHeader) - } - } - return endToEndHeaders -} - -func canStore(reqCacheControl, respCacheControl cacheControl) (canStore bool) { - if _, ok := respCacheControl["no-store"]; ok { - return false - } - if _, ok := reqCacheControl["no-store"]; ok { - return false - } - return true -} - -func newGatewayTimeoutResponse(req *http.Request) *http.Response { - var braw bytes.Buffer - braw.WriteString("HTTP/1.1 504 Gateway Timeout\r\n\r\n") - resp, err := http.ReadResponse(bufio.NewReader(&braw), req) - if err != nil { - panic(err) - } - return resp -} - -// cloneRequest returns a clone of the provided *http.Request. -// The clone is a shallow copy of the struct and its Header map. -// (This function copyright goauth2 authors: https://code.google.com/p/goauth2) -func cloneRequest(r *http.Request) *http.Request { - // shallow copy of the struct - r2 := new(http.Request) - *r2 = *r - // deep copy of the Header - r2.Header = make(http.Header) - for k, s := range r.Header { - r2.Header[k] = s - } - return r2 -} - -type cacheControl map[string]string - -func parseCacheControl(headers http.Header) cacheControl { - cc := cacheControl{} - ccHeader := headers.Get("Cache-Control") - for _, part := range strings.Split(ccHeader, ",") { - part = strings.Trim(part, " ") - if part == "" { - continue - } - if strings.ContainsRune(part, '=') { - keyval := strings.Split(part, "=") - cc[strings.Trim(keyval[0], " ")] = strings.Trim(keyval[1], ",") - } else { - cc[part] = "" - } - } - return cc -} - -// headerAllCommaSepValues returns all comma-separated values (each -// with whitespace trimmed) for header name in headers. According to -// Section 4.2 of the HTTP/1.1 spec -// (http://www.w3.org/Protocols/rfc2616/rfc2616-sec4.html#sec4.2), -// values from multiple occurrences of a header should be concatenated, if -// the header's value is a comma-separated list. -func headerAllCommaSepValues(headers http.Header, name string) []string { - var vals []string - for _, val := range headers[http.CanonicalHeaderKey(name)] { - fields := strings.Split(val, ",") - for i, f := range fields { - fields[i] = strings.TrimSpace(f) - } - vals = append(vals, fields...) - } - return vals -} - -// cachingReadCloser is a wrapper around ReadCloser R that calls OnEOF -// handler with a full copy of the content read from R when EOF is -// reached. -type cachingReadCloser struct { - // Underlying ReadCloser. - R io.ReadCloser - // OnEOF is called with a copy of the content of R when EOF is reached. - OnEOF func(io.Reader) - - buf bytes.Buffer // buf stores a copy of the content of R. -} - -// Read reads the next len(p) bytes from R or until R is drained. The -// return value n is the number of bytes read. If R has no data to -// return, err is io.EOF and OnEOF is called with a full copy of what -// has been read so far. -func (r *cachingReadCloser) Read(p []byte) (n int, err error) { - n, err = r.R.Read(p) - r.buf.Write(p[:n]) - if err == io.EOF { - r.OnEOF(bytes.NewReader(r.buf.Bytes())) - } - return n, err -} - -func (r *cachingReadCloser) Close() error { - return r.R.Close() -} - -// NewMemoryCacheTransport returns a new Transport using the in-memory cache implementation -func NewMemoryCacheTransport() *Transport { - c := NewMemoryCache() - t := NewTransport(c) - return t -} diff --git a/src/vendor/github.com/imdario/mergo/.deepsource.toml b/src/vendor/github.com/imdario/mergo/.deepsource.toml deleted file mode 100644 index 8a0681af8..000000000 --- a/src/vendor/github.com/imdario/mergo/.deepsource.toml +++ /dev/null @@ -1,12 +0,0 @@ -version = 1 - -test_patterns = [ - "*_test.go" -] - -[[analyzers]] -name = "go" -enabled = true - - [analyzers.meta] - import_path = "github.com/imdario/mergo" \ No newline at end of file diff --git a/src/vendor/github.com/imdario/mergo/.gitignore b/src/vendor/github.com/imdario/mergo/.gitignore deleted file mode 100644 index 529c3412b..000000000 --- a/src/vendor/github.com/imdario/mergo/.gitignore +++ /dev/null @@ -1,33 +0,0 @@ -#### joe made this: http://goel.io/joe - -#### go #### -# Binaries for programs and plugins -*.exe -*.dll -*.so -*.dylib - -# Test binary, build with `go test -c` -*.test - -# Output of the go coverage tool, specifically when used with LiteIDE -*.out - -# Project-local glide cache, RE: https://github.com/Masterminds/glide/issues/736 -.glide/ - -#### vim #### -# Swap -[._]*.s[a-v][a-z] -[._]*.sw[a-p] -[._]s[a-v][a-z] -[._]sw[a-p] - -# Session -Session.vim - -# Temporary -.netrwhist -*~ -# Auto-generated tag files -tags diff --git a/src/vendor/github.com/imdario/mergo/.travis.yml b/src/vendor/github.com/imdario/mergo/.travis.yml deleted file mode 100644 index d324c43ba..000000000 --- a/src/vendor/github.com/imdario/mergo/.travis.yml +++ /dev/null @@ -1,12 +0,0 @@ -language: go -arch: - - amd64 - - ppc64le -install: - - go get -t - - go get golang.org/x/tools/cmd/cover - - go get github.com/mattn/goveralls -script: - - go test -race -v ./... -after_script: - - $HOME/gopath/bin/goveralls -service=travis-ci -repotoken $COVERALLS_TOKEN diff --git a/src/vendor/github.com/imdario/mergo/CODE_OF_CONDUCT.md b/src/vendor/github.com/imdario/mergo/CODE_OF_CONDUCT.md deleted file mode 100644 index 469b44907..000000000 --- a/src/vendor/github.com/imdario/mergo/CODE_OF_CONDUCT.md +++ /dev/null @@ -1,46 +0,0 @@ -# Contributor Covenant Code of Conduct - -## Our Pledge - -In the interest of fostering an open and welcoming environment, we as contributors and maintainers pledge to making participation in our project and our community a harassment-free experience for everyone, regardless of age, body size, disability, ethnicity, gender identity and expression, level of experience, nationality, personal appearance, race, religion, or sexual identity and orientation. - -## Our Standards - -Examples of behavior that contributes to creating a positive environment include: - -* Using welcoming and inclusive language -* Being respectful of differing viewpoints and experiences -* Gracefully accepting constructive criticism -* Focusing on what is best for the community -* Showing empathy towards other community members - -Examples of unacceptable behavior by participants include: - -* The use of sexualized language or imagery and unwelcome sexual attention or advances -* Trolling, insulting/derogatory comments, and personal or political attacks -* Public or private harassment -* Publishing others' private information, such as a physical or electronic address, without explicit permission -* Other conduct which could reasonably be considered inappropriate in a professional setting - -## Our Responsibilities - -Project maintainers are responsible for clarifying the standards of acceptable behavior and are expected to take appropriate and fair corrective action in response to any instances of unacceptable behavior. - -Project maintainers have the right and responsibility to remove, edit, or reject comments, commits, code, wiki edits, issues, and other contributions that are not aligned to this Code of Conduct, or to ban temporarily or permanently any contributor for other behaviors that they deem inappropriate, threatening, offensive, or harmful. - -## Scope - -This Code of Conduct applies both within project spaces and in public spaces when an individual is representing the project or its community. Examples of representing a project or community include using an official project e-mail address, posting via an official social media account, or acting as an appointed representative at an online or offline event. Representation of a project may be further defined and clarified by project maintainers. - -## Enforcement - -Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by contacting the project team at i@dario.im. The project team will review and investigate all complaints, and will respond in a way that it deems appropriate to the circumstances. The project team is obligated to maintain confidentiality with regard to the reporter of an incident. Further details of specific enforcement policies may be posted separately. - -Project maintainers who do not follow or enforce the Code of Conduct in good faith may face temporary or permanent repercussions as determined by other members of the project's leadership. - -## Attribution - -This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, available at [http://contributor-covenant.org/version/1/4][version] - -[homepage]: http://contributor-covenant.org -[version]: http://contributor-covenant.org/version/1/4/ diff --git a/src/vendor/github.com/imdario/mergo/LICENSE b/src/vendor/github.com/imdario/mergo/LICENSE deleted file mode 100644 index 686680298..000000000 --- a/src/vendor/github.com/imdario/mergo/LICENSE +++ /dev/null @@ -1,28 +0,0 @@ -Copyright (c) 2013 Dario Castañé. All rights reserved. -Copyright (c) 2012 The Go Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/src/vendor/github.com/imdario/mergo/README.md b/src/vendor/github.com/imdario/mergo/README.md deleted file mode 100644 index aa8cbd7ce..000000000 --- a/src/vendor/github.com/imdario/mergo/README.md +++ /dev/null @@ -1,247 +0,0 @@ -# Mergo - - -[![GoDoc][3]][4] -[![GitHub release][5]][6] -[![GoCard][7]][8] -[![Build Status][1]][2] -[![Coverage Status][9]][10] -[![Sourcegraph][11]][12] -[![FOSSA Status][13]][14] - -[![GoCenter Kudos][15]][16] - -[1]: https://travis-ci.org/imdario/mergo.png -[2]: https://travis-ci.org/imdario/mergo -[3]: https://godoc.org/github.com/imdario/mergo?status.svg -[4]: https://godoc.org/github.com/imdario/mergo -[5]: https://img.shields.io/github/release/imdario/mergo.svg -[6]: https://github.com/imdario/mergo/releases -[7]: https://goreportcard.com/badge/imdario/mergo -[8]: https://goreportcard.com/report/github.com/imdario/mergo -[9]: https://coveralls.io/repos/github/imdario/mergo/badge.svg?branch=master -[10]: https://coveralls.io/github/imdario/mergo?branch=master -[11]: https://sourcegraph.com/github.com/imdario/mergo/-/badge.svg -[12]: https://sourcegraph.com/github.com/imdario/mergo?badge -[13]: https://app.fossa.io/api/projects/git%2Bgithub.com%2Fimdario%2Fmergo.svg?type=shield -[14]: https://app.fossa.io/projects/git%2Bgithub.com%2Fimdario%2Fmergo?ref=badge_shield -[15]: https://search.gocenter.io/api/ui/badge/github.com%2Fimdario%2Fmergo -[16]: https://search.gocenter.io/github.com/imdario/mergo - -A helper to merge structs and maps in Golang. Useful for configuration default values, avoiding messy if-statements. - -Mergo merges same-type structs and maps by setting default values in zero-value fields. Mergo won't merge unexported (private) fields. It will do recursively any exported one. It also won't merge structs inside maps (because they are not addressable using Go reflection). - -Also a lovely [comune](http://en.wikipedia.org/wiki/Mergo) (municipality) in the Province of Ancona in the Italian region of Marche. - -## Status - -It is ready for production use. [It is used in several projects by Docker, Google, The Linux Foundation, VMWare, Shopify, etc](https://github.com/imdario/mergo#mergo-in-the-wild). - -### Important note - -Please keep in mind that a problematic PR broke [0.3.9](//github.com/imdario/mergo/releases/tag/0.3.9). I reverted it in [0.3.10](//github.com/imdario/mergo/releases/tag/0.3.10), and I consider it stable but not bug-free. Also, this version adds suppot for go modules. - -Keep in mind that in [0.3.2](//github.com/imdario/mergo/releases/tag/0.3.2), Mergo changed `Merge()`and `Map()` signatures to support [transformers](#transformers). I added an optional/variadic argument so that it won't break the existing code. - -If you were using Mergo before April 6th, 2015, please check your project works as intended after updating your local copy with ```go get -u github.com/imdario/mergo```. I apologize for any issue caused by its previous behavior and any future bug that Mergo could cause in existing projects after the change (release 0.2.0). - -### Donations - -If Mergo is useful to you, consider buying me a coffee, a beer, or making a monthly donation to allow me to keep building great free software. :heart_eyes: - -Buy Me a Coffee at ko-fi.com -[![Beerpay](https://beerpay.io/imdario/mergo/badge.svg)](https://beerpay.io/imdario/mergo) -[![Beerpay](https://beerpay.io/imdario/mergo/make-wish.svg)](https://beerpay.io/imdario/mergo) -Donate using Liberapay - -### Mergo in the wild - -- [moby/moby](https://github.com/moby/moby) -- [kubernetes/kubernetes](https://github.com/kubernetes/kubernetes) -- [vmware/dispatch](https://github.com/vmware/dispatch) -- [Shopify/themekit](https://github.com/Shopify/themekit) -- [imdario/zas](https://github.com/imdario/zas) -- [matcornic/hermes](https://github.com/matcornic/hermes) -- [OpenBazaar/openbazaar-go](https://github.com/OpenBazaar/openbazaar-go) -- [kataras/iris](https://github.com/kataras/iris) -- [michaelsauter/crane](https://github.com/michaelsauter/crane) -- [go-task/task](https://github.com/go-task/task) -- [sensu/uchiwa](https://github.com/sensu/uchiwa) -- [ory/hydra](https://github.com/ory/hydra) -- [sisatech/vcli](https://github.com/sisatech/vcli) -- [dairycart/dairycart](https://github.com/dairycart/dairycart) -- [projectcalico/felix](https://github.com/projectcalico/felix) -- [resin-os/balena](https://github.com/resin-os/balena) -- [go-kivik/kivik](https://github.com/go-kivik/kivik) -- [Telefonica/govice](https://github.com/Telefonica/govice) -- [supergiant/supergiant](supergiant/supergiant) -- [SergeyTsalkov/brooce](https://github.com/SergeyTsalkov/brooce) -- [soniah/dnsmadeeasy](https://github.com/soniah/dnsmadeeasy) -- [ohsu-comp-bio/funnel](https://github.com/ohsu-comp-bio/funnel) -- [EagerIO/Stout](https://github.com/EagerIO/Stout) -- [lynndylanhurley/defsynth-api](https://github.com/lynndylanhurley/defsynth-api) -- [russross/canvasassignments](https://github.com/russross/canvasassignments) -- [rdegges/cryptly-api](https://github.com/rdegges/cryptly-api) -- [casualjim/exeggutor](https://github.com/casualjim/exeggutor) -- [divshot/gitling](https://github.com/divshot/gitling) -- [RWJMurphy/gorl](https://github.com/RWJMurphy/gorl) -- [andrerocker/deploy42](https://github.com/andrerocker/deploy42) -- [elwinar/rambler](https://github.com/elwinar/rambler) -- [tmaiaroto/gopartman](https://github.com/tmaiaroto/gopartman) -- [jfbus/impressionist](https://github.com/jfbus/impressionist) -- [Jmeyering/zealot](https://github.com/Jmeyering/zealot) -- [godep-migrator/rigger-host](https://github.com/godep-migrator/rigger-host) -- [Dronevery/MultiwaySwitch-Go](https://github.com/Dronevery/MultiwaySwitch-Go) -- [thoas/picfit](https://github.com/thoas/picfit) -- [mantasmatelis/whooplist-server](https://github.com/mantasmatelis/whooplist-server) -- [jnuthong/item_search](https://github.com/jnuthong/item_search) -- [bukalapak/snowboard](https://github.com/bukalapak/snowboard) -- [containerssh/containerssh](https://github.com/containerssh/containerssh) - -## Install - - go get github.com/imdario/mergo - - // use in your .go code - import ( - "github.com/imdario/mergo" - ) - -## Usage - -You can only merge same-type structs with exported fields initialized as zero value of their type and same-types maps. Mergo won't merge unexported (private) fields but will do recursively any exported one. It won't merge empty structs value as [they are zero values](https://golang.org/ref/spec#The_zero_value) too. Also, maps will be merged recursively except for structs inside maps (because they are not addressable using Go reflection). - -```go -if err := mergo.Merge(&dst, src); err != nil { - // ... -} -``` - -Also, you can merge overwriting values using the transformer `WithOverride`. - -```go -if err := mergo.Merge(&dst, src, mergo.WithOverride); err != nil { - // ... -} -``` - -Additionally, you can map a `map[string]interface{}` to a struct (and otherwise, from struct to map), following the same restrictions as in `Merge()`. Keys are capitalized to find each corresponding exported field. - -```go -if err := mergo.Map(&dst, srcMap); err != nil { - // ... -} -``` - -Warning: if you map a struct to map, it won't do it recursively. Don't expect Mergo to map struct members of your struct as `map[string]interface{}`. They will be just assigned as values. - -Here is a nice example: - -```go -package main - -import ( - "fmt" - "github.com/imdario/mergo" -) - -type Foo struct { - A string - B int64 -} - -func main() { - src := Foo{ - A: "one", - B: 2, - } - dest := Foo{ - A: "two", - } - mergo.Merge(&dest, src) - fmt.Println(dest) - // Will print - // {two 2} -} -``` - -Note: if test are failing due missing package, please execute: - - go get gopkg.in/yaml.v2 - -### Transformers - -Transformers allow to merge specific types differently than in the default behavior. In other words, now you can customize how some types are merged. For example, `time.Time` is a struct; it doesn't have zero value but IsZero can return true because it has fields with zero value. How can we merge a non-zero `time.Time`? - -```go -package main - -import ( - "fmt" - "github.com/imdario/mergo" - "reflect" - "time" -) - -type timeTransformer struct { -} - -func (t timeTransformer) Transformer(typ reflect.Type) func(dst, src reflect.Value) error { - if typ == reflect.TypeOf(time.Time{}) { - return func(dst, src reflect.Value) error { - if dst.CanSet() { - isZero := dst.MethodByName("IsZero") - result := isZero.Call([]reflect.Value{}) - if result[0].Bool() { - dst.Set(src) - } - } - return nil - } - } - return nil -} - -type Snapshot struct { - Time time.Time - // ... -} - -func main() { - src := Snapshot{time.Now()} - dest := Snapshot{} - mergo.Merge(&dest, src, mergo.WithTransformers(timeTransformer{})) - fmt.Println(dest) - // Will print - // { 2018-01-12 01:15:00 +0000 UTC m=+0.000000001 } -} -``` - - -## Contact me - -If I can help you, you have an idea or you are using Mergo in your projects, don't hesitate to drop me a line (or a pull request): [@im_dario](https://twitter.com/im_dario) - -## About - -Written by [Dario Castañé](http://dario.im). - -## Top Contributors - -[![0](https://sourcerer.io/fame/imdario/imdario/mergo/images/0)](https://sourcerer.io/fame/imdario/imdario/mergo/links/0) -[![1](https://sourcerer.io/fame/imdario/imdario/mergo/images/1)](https://sourcerer.io/fame/imdario/imdario/mergo/links/1) -[![2](https://sourcerer.io/fame/imdario/imdario/mergo/images/2)](https://sourcerer.io/fame/imdario/imdario/mergo/links/2) -[![3](https://sourcerer.io/fame/imdario/imdario/mergo/images/3)](https://sourcerer.io/fame/imdario/imdario/mergo/links/3) -[![4](https://sourcerer.io/fame/imdario/imdario/mergo/images/4)](https://sourcerer.io/fame/imdario/imdario/mergo/links/4) -[![5](https://sourcerer.io/fame/imdario/imdario/mergo/images/5)](https://sourcerer.io/fame/imdario/imdario/mergo/links/5) -[![6](https://sourcerer.io/fame/imdario/imdario/mergo/images/6)](https://sourcerer.io/fame/imdario/imdario/mergo/links/6) -[![7](https://sourcerer.io/fame/imdario/imdario/mergo/images/7)](https://sourcerer.io/fame/imdario/imdario/mergo/links/7) - - -## License - -[BSD 3-Clause](http://opensource.org/licenses/BSD-3-Clause) license, as [Go language](http://golang.org/LICENSE). - - -[![FOSSA Status](https://app.fossa.io/api/projects/git%2Bgithub.com%2Fimdario%2Fmergo.svg?type=large)](https://app.fossa.io/projects/git%2Bgithub.com%2Fimdario%2Fmergo?ref=badge_large) diff --git a/src/vendor/github.com/imdario/mergo/doc.go b/src/vendor/github.com/imdario/mergo/doc.go deleted file mode 100644 index fcd985f99..000000000 --- a/src/vendor/github.com/imdario/mergo/doc.go +++ /dev/null @@ -1,143 +0,0 @@ -// Copyright 2013 Dario Castañé. All rights reserved. -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -/* -A helper to merge structs and maps in Golang. Useful for configuration default values, avoiding messy if-statements. - -Mergo merges same-type structs and maps by setting default values in zero-value fields. Mergo won't merge unexported (private) fields. It will do recursively any exported one. It also won't merge structs inside maps (because they are not addressable using Go reflection). - -Status - -It is ready for production use. It is used in several projects by Docker, Google, The Linux Foundation, VMWare, Shopify, etc. - -Important note - -Please keep in mind that a problematic PR broke 0.3.9. We reverted it in 0.3.10. We consider 0.3.10 as stable but not bug-free. . Also, this version adds suppot for go modules. - -Keep in mind that in 0.3.2, Mergo changed Merge() and Map() signatures to support transformers. We added an optional/variadic argument so that it won't break the existing code. - -If you were using Mergo before April 6th, 2015, please check your project works as intended after updating your local copy with go get -u github.com/imdario/mergo. I apologize for any issue caused by its previous behavior and any future bug that Mergo could cause in existing projects after the change (release 0.2.0). - -Install - -Do your usual installation procedure: - - go get github.com/imdario/mergo - - // use in your .go code - import ( - "github.com/imdario/mergo" - ) - -Usage - -You can only merge same-type structs with exported fields initialized as zero value of their type and same-types maps. Mergo won't merge unexported (private) fields but will do recursively any exported one. It won't merge empty structs value as they are zero values too. Also, maps will be merged recursively except for structs inside maps (because they are not addressable using Go reflection). - - if err := mergo.Merge(&dst, src); err != nil { - // ... - } - -Also, you can merge overwriting values using the transformer WithOverride. - - if err := mergo.Merge(&dst, src, mergo.WithOverride); err != nil { - // ... - } - -Additionally, you can map a map[string]interface{} to a struct (and otherwise, from struct to map), following the same restrictions as in Merge(). Keys are capitalized to find each corresponding exported field. - - if err := mergo.Map(&dst, srcMap); err != nil { - // ... - } - -Warning: if you map a struct to map, it won't do it recursively. Don't expect Mergo to map struct members of your struct as map[string]interface{}. They will be just assigned as values. - -Here is a nice example: - - package main - - import ( - "fmt" - "github.com/imdario/mergo" - ) - - type Foo struct { - A string - B int64 - } - - func main() { - src := Foo{ - A: "one", - B: 2, - } - dest := Foo{ - A: "two", - } - mergo.Merge(&dest, src) - fmt.Println(dest) - // Will print - // {two 2} - } - -Transformers - -Transformers allow to merge specific types differently than in the default behavior. In other words, now you can customize how some types are merged. For example, time.Time is a struct; it doesn't have zero value but IsZero can return true because it has fields with zero value. How can we merge a non-zero time.Time? - - package main - - import ( - "fmt" - "github.com/imdario/mergo" - "reflect" - "time" - ) - - type timeTransformer struct { - } - - func (t timeTransformer) Transformer(typ reflect.Type) func(dst, src reflect.Value) error { - if typ == reflect.TypeOf(time.Time{}) { - return func(dst, src reflect.Value) error { - if dst.CanSet() { - isZero := dst.MethodByName("IsZero") - result := isZero.Call([]reflect.Value{}) - if result[0].Bool() { - dst.Set(src) - } - } - return nil - } - } - return nil - } - - type Snapshot struct { - Time time.Time - // ... - } - - func main() { - src := Snapshot{time.Now()} - dest := Snapshot{} - mergo.Merge(&dest, src, mergo.WithTransformers(timeTransformer{})) - fmt.Println(dest) - // Will print - // { 2018-01-12 01:15:00 +0000 UTC m=+0.000000001 } - } - -Contact me - -If I can help you, you have an idea or you are using Mergo in your projects, don't hesitate to drop me a line (or a pull request): https://twitter.com/im_dario - -About - -Written by Dario Castañé: https://da.rio.hn - -License - -BSD 3-Clause license, as Go language. - -*/ -package mergo diff --git a/src/vendor/github.com/imdario/mergo/map.go b/src/vendor/github.com/imdario/mergo/map.go deleted file mode 100644 index a13a7ee46..000000000 --- a/src/vendor/github.com/imdario/mergo/map.go +++ /dev/null @@ -1,178 +0,0 @@ -// Copyright 2014 Dario Castañé. All rights reserved. -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Based on src/pkg/reflect/deepequal.go from official -// golang's stdlib. - -package mergo - -import ( - "fmt" - "reflect" - "unicode" - "unicode/utf8" -) - -func changeInitialCase(s string, mapper func(rune) rune) string { - if s == "" { - return s - } - r, n := utf8.DecodeRuneInString(s) - return string(mapper(r)) + s[n:] -} - -func isExported(field reflect.StructField) bool { - r, _ := utf8.DecodeRuneInString(field.Name) - return r >= 'A' && r <= 'Z' -} - -// Traverses recursively both values, assigning src's fields values to dst. -// The map argument tracks comparisons that have already been seen, which allows -// short circuiting on recursive types. -func deepMap(dst, src reflect.Value, visited map[uintptr]*visit, depth int, config *Config) (err error) { - overwrite := config.Overwrite - if dst.CanAddr() { - addr := dst.UnsafeAddr() - h := 17 * addr - seen := visited[h] - typ := dst.Type() - for p := seen; p != nil; p = p.next { - if p.ptr == addr && p.typ == typ { - return nil - } - } - // Remember, remember... - visited[h] = &visit{addr, typ, seen} - } - zeroValue := reflect.Value{} - switch dst.Kind() { - case reflect.Map: - dstMap := dst.Interface().(map[string]interface{}) - for i, n := 0, src.NumField(); i < n; i++ { - srcType := src.Type() - field := srcType.Field(i) - if !isExported(field) { - continue - } - fieldName := field.Name - fieldName = changeInitialCase(fieldName, unicode.ToLower) - if v, ok := dstMap[fieldName]; !ok || (isEmptyValue(reflect.ValueOf(v)) || overwrite) { - dstMap[fieldName] = src.Field(i).Interface() - } - } - case reflect.Ptr: - if dst.IsNil() { - v := reflect.New(dst.Type().Elem()) - dst.Set(v) - } - dst = dst.Elem() - fallthrough - case reflect.Struct: - srcMap := src.Interface().(map[string]interface{}) - for key := range srcMap { - config.overwriteWithEmptyValue = true - srcValue := srcMap[key] - fieldName := changeInitialCase(key, unicode.ToUpper) - dstElement := dst.FieldByName(fieldName) - if dstElement == zeroValue { - // We discard it because the field doesn't exist. - continue - } - srcElement := reflect.ValueOf(srcValue) - dstKind := dstElement.Kind() - srcKind := srcElement.Kind() - if srcKind == reflect.Ptr && dstKind != reflect.Ptr { - srcElement = srcElement.Elem() - srcKind = reflect.TypeOf(srcElement.Interface()).Kind() - } else if dstKind == reflect.Ptr { - // Can this work? I guess it can't. - if srcKind != reflect.Ptr && srcElement.CanAddr() { - srcPtr := srcElement.Addr() - srcElement = reflect.ValueOf(srcPtr) - srcKind = reflect.Ptr - } - } - - if !srcElement.IsValid() { - continue - } - if srcKind == dstKind { - if err = deepMerge(dstElement, srcElement, visited, depth+1, config); err != nil { - return - } - } else if dstKind == reflect.Interface && dstElement.Kind() == reflect.Interface { - if err = deepMerge(dstElement, srcElement, visited, depth+1, config); err != nil { - return - } - } else if srcKind == reflect.Map { - if err = deepMap(dstElement, srcElement, visited, depth+1, config); err != nil { - return - } - } else { - return fmt.Errorf("type mismatch on %s field: found %v, expected %v", fieldName, srcKind, dstKind) - } - } - } - return -} - -// Map sets fields' values in dst from src. -// src can be a map with string keys or a struct. dst must be the opposite: -// if src is a map, dst must be a valid pointer to struct. If src is a struct, -// dst must be map[string]interface{}. -// It won't merge unexported (private) fields and will do recursively -// any exported field. -// If dst is a map, keys will be src fields' names in lower camel case. -// Missing key in src that doesn't match a field in dst will be skipped. This -// doesn't apply if dst is a map. -// This is separated method from Merge because it is cleaner and it keeps sane -// semantics: merging equal types, mapping different (restricted) types. -func Map(dst, src interface{}, opts ...func(*Config)) error { - return _map(dst, src, opts...) -} - -// MapWithOverwrite will do the same as Map except that non-empty dst attributes will be overridden by -// non-empty src attribute values. -// Deprecated: Use Map(…) with WithOverride -func MapWithOverwrite(dst, src interface{}, opts ...func(*Config)) error { - return _map(dst, src, append(opts, WithOverride)...) -} - -func _map(dst, src interface{}, opts ...func(*Config)) error { - if dst != nil && reflect.ValueOf(dst).Kind() != reflect.Ptr { - return ErrNonPointerAgument - } - var ( - vDst, vSrc reflect.Value - err error - ) - config := &Config{} - - for _, opt := range opts { - opt(config) - } - - if vDst, vSrc, err = resolveValues(dst, src); err != nil { - return err - } - // To be friction-less, we redirect equal-type arguments - // to deepMerge. Only because arguments can be anything. - if vSrc.Kind() == vDst.Kind() { - return deepMerge(vDst, vSrc, make(map[uintptr]*visit), 0, config) - } - switch vSrc.Kind() { - case reflect.Struct: - if vDst.Kind() != reflect.Map { - return ErrExpectedMapAsDestination - } - case reflect.Map: - if vDst.Kind() != reflect.Struct { - return ErrExpectedStructAsDestination - } - default: - return ErrNotSupported - } - return deepMap(vDst, vSrc, make(map[uintptr]*visit), 0, config) -} diff --git a/src/vendor/github.com/imdario/mergo/merge.go b/src/vendor/github.com/imdario/mergo/merge.go deleted file mode 100644 index 8c2a8fcd9..000000000 --- a/src/vendor/github.com/imdario/mergo/merge.go +++ /dev/null @@ -1,380 +0,0 @@ -// Copyright 2013 Dario Castañé. All rights reserved. -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Based on src/pkg/reflect/deepequal.go from official -// golang's stdlib. - -package mergo - -import ( - "fmt" - "reflect" -) - -func hasMergeableFields(dst reflect.Value) (exported bool) { - for i, n := 0, dst.NumField(); i < n; i++ { - field := dst.Type().Field(i) - if field.Anonymous && dst.Field(i).Kind() == reflect.Struct { - exported = exported || hasMergeableFields(dst.Field(i)) - } else if isExportedComponent(&field) { - exported = exported || len(field.PkgPath) == 0 - } - } - return -} - -func isExportedComponent(field *reflect.StructField) bool { - pkgPath := field.PkgPath - if len(pkgPath) > 0 { - return false - } - c := field.Name[0] - if 'a' <= c && c <= 'z' || c == '_' { - return false - } - return true -} - -type Config struct { - Overwrite bool - AppendSlice bool - TypeCheck bool - Transformers Transformers - overwriteWithEmptyValue bool - overwriteSliceWithEmptyValue bool - sliceDeepCopy bool - debug bool -} - -type Transformers interface { - Transformer(reflect.Type) func(dst, src reflect.Value) error -} - -// Traverses recursively both values, assigning src's fields values to dst. -// The map argument tracks comparisons that have already been seen, which allows -// short circuiting on recursive types. -func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int, config *Config) (err error) { - overwrite := config.Overwrite - typeCheck := config.TypeCheck - overwriteWithEmptySrc := config.overwriteWithEmptyValue - overwriteSliceWithEmptySrc := config.overwriteSliceWithEmptyValue - sliceDeepCopy := config.sliceDeepCopy - - if !src.IsValid() { - return - } - if dst.CanAddr() { - addr := dst.UnsafeAddr() - h := 17 * addr - seen := visited[h] - typ := dst.Type() - for p := seen; p != nil; p = p.next { - if p.ptr == addr && p.typ == typ { - return nil - } - } - // Remember, remember... - visited[h] = &visit{addr, typ, seen} - } - - if config.Transformers != nil && !isEmptyValue(dst) { - if fn := config.Transformers.Transformer(dst.Type()); fn != nil { - err = fn(dst, src) - return - } - } - - switch dst.Kind() { - case reflect.Struct: - if hasMergeableFields(dst) { - for i, n := 0, dst.NumField(); i < n; i++ { - if err = deepMerge(dst.Field(i), src.Field(i), visited, depth+1, config); err != nil { - return - } - } - } else { - if dst.CanSet() && (isReflectNil(dst) || overwrite) && (!isEmptyValue(src) || overwriteWithEmptySrc) { - dst.Set(src) - } - } - case reflect.Map: - if dst.IsNil() && !src.IsNil() { - if dst.CanSet() { - dst.Set(reflect.MakeMap(dst.Type())) - } else { - dst = src - return - } - } - - if src.Kind() != reflect.Map { - if overwrite { - dst.Set(src) - } - return - } - - for _, key := range src.MapKeys() { - srcElement := src.MapIndex(key) - if !srcElement.IsValid() { - continue - } - dstElement := dst.MapIndex(key) - switch srcElement.Kind() { - case reflect.Chan, reflect.Func, reflect.Map, reflect.Interface, reflect.Slice: - if srcElement.IsNil() { - if overwrite { - dst.SetMapIndex(key, srcElement) - } - continue - } - fallthrough - default: - if !srcElement.CanInterface() { - continue - } - switch reflect.TypeOf(srcElement.Interface()).Kind() { - case reflect.Struct: - fallthrough - case reflect.Ptr: - fallthrough - case reflect.Map: - srcMapElm := srcElement - dstMapElm := dstElement - if srcMapElm.CanInterface() { - srcMapElm = reflect.ValueOf(srcMapElm.Interface()) - if dstMapElm.IsValid() { - dstMapElm = reflect.ValueOf(dstMapElm.Interface()) - } - } - if err = deepMerge(dstMapElm, srcMapElm, visited, depth+1, config); err != nil { - return - } - case reflect.Slice: - srcSlice := reflect.ValueOf(srcElement.Interface()) - - var dstSlice reflect.Value - if !dstElement.IsValid() || dstElement.IsNil() { - dstSlice = reflect.MakeSlice(srcSlice.Type(), 0, srcSlice.Len()) - } else { - dstSlice = reflect.ValueOf(dstElement.Interface()) - } - - if (!isEmptyValue(src) || overwriteWithEmptySrc || overwriteSliceWithEmptySrc) && (overwrite || isEmptyValue(dst)) && !config.AppendSlice && !sliceDeepCopy { - if typeCheck && srcSlice.Type() != dstSlice.Type() { - return fmt.Errorf("cannot override two slices with different type (%s, %s)", srcSlice.Type(), dstSlice.Type()) - } - dstSlice = srcSlice - } else if config.AppendSlice { - if srcSlice.Type() != dstSlice.Type() { - return fmt.Errorf("cannot append two slices with different type (%s, %s)", srcSlice.Type(), dstSlice.Type()) - } - dstSlice = reflect.AppendSlice(dstSlice, srcSlice) - } else if sliceDeepCopy { - i := 0 - for ; i < srcSlice.Len() && i < dstSlice.Len(); i++ { - srcElement := srcSlice.Index(i) - dstElement := dstSlice.Index(i) - - if srcElement.CanInterface() { - srcElement = reflect.ValueOf(srcElement.Interface()) - } - if dstElement.CanInterface() { - dstElement = reflect.ValueOf(dstElement.Interface()) - } - - if err = deepMerge(dstElement, srcElement, visited, depth+1, config); err != nil { - return - } - } - - } - dst.SetMapIndex(key, dstSlice) - } - } - if dstElement.IsValid() && !isEmptyValue(dstElement) && (reflect.TypeOf(srcElement.Interface()).Kind() == reflect.Map || reflect.TypeOf(srcElement.Interface()).Kind() == reflect.Slice) { - continue - } - - if srcElement.IsValid() && ((srcElement.Kind() != reflect.Ptr && overwrite) || !dstElement.IsValid() || isEmptyValue(dstElement)) { - if dst.IsNil() { - dst.Set(reflect.MakeMap(dst.Type())) - } - dst.SetMapIndex(key, srcElement) - } - } - case reflect.Slice: - if !dst.CanSet() { - break - } - if (!isEmptyValue(src) || overwriteWithEmptySrc || overwriteSliceWithEmptySrc) && (overwrite || isEmptyValue(dst)) && !config.AppendSlice && !sliceDeepCopy { - dst.Set(src) - } else if config.AppendSlice { - if src.Type() != dst.Type() { - return fmt.Errorf("cannot append two slice with different type (%s, %s)", src.Type(), dst.Type()) - } - dst.Set(reflect.AppendSlice(dst, src)) - } else if sliceDeepCopy { - for i := 0; i < src.Len() && i < dst.Len(); i++ { - srcElement := src.Index(i) - dstElement := dst.Index(i) - if srcElement.CanInterface() { - srcElement = reflect.ValueOf(srcElement.Interface()) - } - if dstElement.CanInterface() { - dstElement = reflect.ValueOf(dstElement.Interface()) - } - - if err = deepMerge(dstElement, srcElement, visited, depth+1, config); err != nil { - return - } - } - } - case reflect.Ptr: - fallthrough - case reflect.Interface: - if isReflectNil(src) { - if overwriteWithEmptySrc && dst.CanSet() && src.Type().AssignableTo(dst.Type()) { - dst.Set(src) - } - break - } - - if src.Kind() != reflect.Interface { - if dst.IsNil() || (src.Kind() != reflect.Ptr && overwrite) { - if dst.CanSet() && (overwrite || isEmptyValue(dst)) { - dst.Set(src) - } - } else if src.Kind() == reflect.Ptr { - if err = deepMerge(dst.Elem(), src.Elem(), visited, depth+1, config); err != nil { - return - } - } else if dst.Elem().Type() == src.Type() { - if err = deepMerge(dst.Elem(), src, visited, depth+1, config); err != nil { - return - } - } else { - return ErrDifferentArgumentsTypes - } - break - } - - if dst.IsNil() || overwrite { - if dst.CanSet() && (overwrite || isEmptyValue(dst)) { - dst.Set(src) - } - break - } - - if dst.Elem().Kind() == src.Elem().Kind() { - if err = deepMerge(dst.Elem(), src.Elem(), visited, depth+1, config); err != nil { - return - } - break - } - default: - mustSet := (isEmptyValue(dst) || overwrite) && (!isEmptyValue(src) || overwriteWithEmptySrc) - if mustSet { - if dst.CanSet() { - dst.Set(src) - } else { - dst = src - } - } - } - - return -} - -// Merge will fill any empty for value type attributes on the dst struct using corresponding -// src attributes if they themselves are not empty. dst and src must be valid same-type structs -// and dst must be a pointer to struct. -// It won't merge unexported (private) fields and will do recursively any exported field. -func Merge(dst, src interface{}, opts ...func(*Config)) error { - return merge(dst, src, opts...) -} - -// MergeWithOverwrite will do the same as Merge except that non-empty dst attributes will be overridden by -// non-empty src attribute values. -// Deprecated: use Merge(…) with WithOverride -func MergeWithOverwrite(dst, src interface{}, opts ...func(*Config)) error { - return merge(dst, src, append(opts, WithOverride)...) -} - -// WithTransformers adds transformers to merge, allowing to customize the merging of some types. -func WithTransformers(transformers Transformers) func(*Config) { - return func(config *Config) { - config.Transformers = transformers - } -} - -// WithOverride will make merge override non-empty dst attributes with non-empty src attributes values. -func WithOverride(config *Config) { - config.Overwrite = true -} - -// WithOverwriteWithEmptyValue will make merge override non empty dst attributes with empty src attributes values. -func WithOverwriteWithEmptyValue(config *Config) { - config.Overwrite = true - config.overwriteWithEmptyValue = true -} - -// WithOverrideEmptySlice will make merge override empty dst slice with empty src slice. -func WithOverrideEmptySlice(config *Config) { - config.overwriteSliceWithEmptyValue = true -} - -// WithAppendSlice will make merge append slices instead of overwriting it. -func WithAppendSlice(config *Config) { - config.AppendSlice = true -} - -// WithTypeCheck will make merge check types while overwriting it (must be used with WithOverride). -func WithTypeCheck(config *Config) { - config.TypeCheck = true -} - -// WithSliceDeepCopy will merge slice element one by one with Overwrite flag. -func WithSliceDeepCopy(config *Config) { - config.sliceDeepCopy = true - config.Overwrite = true -} - -func merge(dst, src interface{}, opts ...func(*Config)) error { - if dst != nil && reflect.ValueOf(dst).Kind() != reflect.Ptr { - return ErrNonPointerAgument - } - var ( - vDst, vSrc reflect.Value - err error - ) - - config := &Config{} - - for _, opt := range opts { - opt(config) - } - - if vDst, vSrc, err = resolveValues(dst, src); err != nil { - return err - } - if vDst.Type() != vSrc.Type() { - return ErrDifferentArgumentsTypes - } - return deepMerge(vDst, vSrc, make(map[uintptr]*visit), 0, config) -} - -// IsReflectNil is the reflect value provided nil -func isReflectNil(v reflect.Value) bool { - k := v.Kind() - switch k { - case reflect.Interface, reflect.Slice, reflect.Chan, reflect.Func, reflect.Map, reflect.Ptr: - // Both interface and slice are nil if first word is 0. - // Both are always bigger than a word; assume flagIndir. - return v.IsNil() - default: - return false - } -} diff --git a/src/vendor/github.com/imdario/mergo/mergo.go b/src/vendor/github.com/imdario/mergo/mergo.go deleted file mode 100644 index 3cc926c7f..000000000 --- a/src/vendor/github.com/imdario/mergo/mergo.go +++ /dev/null @@ -1,78 +0,0 @@ -// Copyright 2013 Dario Castañé. All rights reserved. -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Based on src/pkg/reflect/deepequal.go from official -// golang's stdlib. - -package mergo - -import ( - "errors" - "reflect" -) - -// Errors reported by Mergo when it finds invalid arguments. -var ( - ErrNilArguments = errors.New("src and dst must not be nil") - ErrDifferentArgumentsTypes = errors.New("src and dst must be of same type") - ErrNotSupported = errors.New("only structs and maps are supported") - ErrExpectedMapAsDestination = errors.New("dst was expected to be a map") - ErrExpectedStructAsDestination = errors.New("dst was expected to be a struct") - ErrNonPointerAgument = errors.New("dst must be a pointer") -) - -// During deepMerge, must keep track of checks that are -// in progress. The comparison algorithm assumes that all -// checks in progress are true when it reencounters them. -// Visited are stored in a map indexed by 17 * a1 + a2; -type visit struct { - ptr uintptr - typ reflect.Type - next *visit -} - -// From src/pkg/encoding/json/encode.go. -func isEmptyValue(v reflect.Value) bool { - switch v.Kind() { - case reflect.Array, reflect.Map, reflect.Slice, reflect.String: - return v.Len() == 0 - case reflect.Bool: - return !v.Bool() - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - return v.Int() == 0 - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - return v.Uint() == 0 - case reflect.Float32, reflect.Float64: - return v.Float() == 0 - case reflect.Interface, reflect.Ptr: - if v.IsNil() { - return true - } - return isEmptyValue(v.Elem()) - case reflect.Func: - return v.IsNil() - case reflect.Invalid: - return true - } - return false -} - -func resolveValues(dst, src interface{}) (vDst, vSrc reflect.Value, err error) { - if dst == nil || src == nil { - err = ErrNilArguments - return - } - vDst = reflect.ValueOf(dst).Elem() - if vDst.Kind() != reflect.Struct && vDst.Kind() != reflect.Map { - err = ErrNotSupported - return - } - vSrc = reflect.ValueOf(src) - // We check if vSrc is a pointer to dereference it. - if vSrc.Kind() == reflect.Ptr { - vSrc = vSrc.Elem() - } - return -} diff --git a/src/vendor/github.com/inconshreveable/mousetrap/LICENSE b/src/vendor/github.com/inconshreveable/mousetrap/LICENSE deleted file mode 100644 index 5f0d1fb6a..000000000 --- a/src/vendor/github.com/inconshreveable/mousetrap/LICENSE +++ /dev/null @@ -1,13 +0,0 @@ -Copyright 2014 Alan Shreve - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. diff --git a/src/vendor/github.com/inconshreveable/mousetrap/README.md b/src/vendor/github.com/inconshreveable/mousetrap/README.md deleted file mode 100644 index 7a950d177..000000000 --- a/src/vendor/github.com/inconshreveable/mousetrap/README.md +++ /dev/null @@ -1,23 +0,0 @@ -# mousetrap - -mousetrap is a tiny library that answers a single question. - -On a Windows machine, was the process invoked by someone double clicking on -the executable file while browsing in explorer? - -### Motivation - -Windows developers unfamiliar with command line tools will often "double-click" -the executable for a tool. Because most CLI tools print the help and then exit -when invoked without arguments, this is often very frustrating for those users. - -mousetrap provides a way to detect these invocations so that you can provide -more helpful behavior and instructions on how to run the CLI tool. To see what -this looks like, both from an organizational and a technical perspective, see -https://inconshreveable.com/09-09-2014/sweat-the-small-stuff/ - -### The interface - -The library exposes a single interface: - - func StartedByExplorer() (bool) diff --git a/src/vendor/github.com/inconshreveable/mousetrap/trap_others.go b/src/vendor/github.com/inconshreveable/mousetrap/trap_others.go deleted file mode 100644 index 9d2d8a4ba..000000000 --- a/src/vendor/github.com/inconshreveable/mousetrap/trap_others.go +++ /dev/null @@ -1,15 +0,0 @@ -// +build !windows - -package mousetrap - -// StartedByExplorer returns true if the program was invoked by the user -// double-clicking on the executable from explorer.exe -// -// It is conservative and returns false if any of the internal calls fail. -// It does not guarantee that the program was run from a terminal. It only can tell you -// whether it was launched from explorer.exe -// -// On non-Windows platforms, it always returns false. -func StartedByExplorer() bool { - return false -} diff --git a/src/vendor/github.com/inconshreveable/mousetrap/trap_windows.go b/src/vendor/github.com/inconshreveable/mousetrap/trap_windows.go deleted file mode 100644 index 336142a5e..000000000 --- a/src/vendor/github.com/inconshreveable/mousetrap/trap_windows.go +++ /dev/null @@ -1,98 +0,0 @@ -// +build windows -// +build !go1.4 - -package mousetrap - -import ( - "fmt" - "os" - "syscall" - "unsafe" -) - -const ( - // defined by the Win32 API - th32cs_snapprocess uintptr = 0x2 -) - -var ( - kernel = syscall.MustLoadDLL("kernel32.dll") - CreateToolhelp32Snapshot = kernel.MustFindProc("CreateToolhelp32Snapshot") - Process32First = kernel.MustFindProc("Process32FirstW") - Process32Next = kernel.MustFindProc("Process32NextW") -) - -// ProcessEntry32 structure defined by the Win32 API -type processEntry32 struct { - dwSize uint32 - cntUsage uint32 - th32ProcessID uint32 - th32DefaultHeapID int - th32ModuleID uint32 - cntThreads uint32 - th32ParentProcessID uint32 - pcPriClassBase int32 - dwFlags uint32 - szExeFile [syscall.MAX_PATH]uint16 -} - -func getProcessEntry(pid int) (pe *processEntry32, err error) { - snapshot, _, e1 := CreateToolhelp32Snapshot.Call(th32cs_snapprocess, uintptr(0)) - if snapshot == uintptr(syscall.InvalidHandle) { - err = fmt.Errorf("CreateToolhelp32Snapshot: %v", e1) - return - } - defer syscall.CloseHandle(syscall.Handle(snapshot)) - - var processEntry processEntry32 - processEntry.dwSize = uint32(unsafe.Sizeof(processEntry)) - ok, _, e1 := Process32First.Call(snapshot, uintptr(unsafe.Pointer(&processEntry))) - if ok == 0 { - err = fmt.Errorf("Process32First: %v", e1) - return - } - - for { - if processEntry.th32ProcessID == uint32(pid) { - pe = &processEntry - return - } - - ok, _, e1 = Process32Next.Call(snapshot, uintptr(unsafe.Pointer(&processEntry))) - if ok == 0 { - err = fmt.Errorf("Process32Next: %v", e1) - return - } - } -} - -func getppid() (pid int, err error) { - pe, err := getProcessEntry(os.Getpid()) - if err != nil { - return - } - - pid = int(pe.th32ParentProcessID) - return -} - -// StartedByExplorer returns true if the program was invoked by the user double-clicking -// on the executable from explorer.exe -// -// It is conservative and returns false if any of the internal calls fail. -// It does not guarantee that the program was run from a terminal. It only can tell you -// whether it was launched from explorer.exe -func StartedByExplorer() bool { - ppid, err := getppid() - if err != nil { - return false - } - - pe, err := getProcessEntry(ppid) - if err != nil { - return false - } - - name := syscall.UTF16ToString(pe.szExeFile[:]) - return name == "explorer.exe" -} diff --git a/src/vendor/github.com/inconshreveable/mousetrap/trap_windows_1.4.go b/src/vendor/github.com/inconshreveable/mousetrap/trap_windows_1.4.go deleted file mode 100644 index 9a28e57c3..000000000 --- a/src/vendor/github.com/inconshreveable/mousetrap/trap_windows_1.4.go +++ /dev/null @@ -1,46 +0,0 @@ -// +build windows -// +build go1.4 - -package mousetrap - -import ( - "os" - "syscall" - "unsafe" -) - -func getProcessEntry(pid int) (*syscall.ProcessEntry32, error) { - snapshot, err := syscall.CreateToolhelp32Snapshot(syscall.TH32CS_SNAPPROCESS, 0) - if err != nil { - return nil, err - } - defer syscall.CloseHandle(snapshot) - var procEntry syscall.ProcessEntry32 - procEntry.Size = uint32(unsafe.Sizeof(procEntry)) - if err = syscall.Process32First(snapshot, &procEntry); err != nil { - return nil, err - } - for { - if procEntry.ProcessID == uint32(pid) { - return &procEntry, nil - } - err = syscall.Process32Next(snapshot, &procEntry) - if err != nil { - return nil, err - } - } -} - -// StartedByExplorer returns true if the program was invoked by the user double-clicking -// on the executable from explorer.exe -// -// It is conservative and returns false if any of the internal calls fail. -// It does not guarantee that the program was run from a terminal. It only can tell you -// whether it was launched from explorer.exe -func StartedByExplorer() bool { - pe, err := getProcessEntry(os.Getppid()) - if err != nil { - return false - } - return "explorer.exe" == syscall.UTF16ToString(pe.ExeFile[:]) -} diff --git a/src/vendor/github.com/klauspost/compress/.gitattributes b/src/vendor/github.com/klauspost/compress/.gitattributes deleted file mode 100644 index 402433593..000000000 --- a/src/vendor/github.com/klauspost/compress/.gitattributes +++ /dev/null @@ -1,2 +0,0 @@ -* -text -*.bin -text -diff diff --git a/src/vendor/github.com/klauspost/compress/.gitignore b/src/vendor/github.com/klauspost/compress/.gitignore deleted file mode 100644 index b35f8449b..000000000 --- a/src/vendor/github.com/klauspost/compress/.gitignore +++ /dev/null @@ -1,25 +0,0 @@ -# Compiled Object files, Static and Dynamic libs (Shared Objects) -*.o -*.a -*.so - -# Folders -_obj -_test - -# Architecture specific extensions/prefixes -*.[568vq] -[568vq].out - -*.cgo1.go -*.cgo2.c -_cgo_defun.c -_cgo_gotypes.go -_cgo_export.* - -_testmain.go - -*.exe -*.test -*.prof -/s2/cmd/_s2sx/sfx-exe diff --git a/src/vendor/github.com/klauspost/compress/.goreleaser.yml b/src/vendor/github.com/klauspost/compress/.goreleaser.yml deleted file mode 100644 index c9014ce1d..000000000 --- a/src/vendor/github.com/klauspost/compress/.goreleaser.yml +++ /dev/null @@ -1,137 +0,0 @@ -# This is an example goreleaser.yaml file with some sane defaults. -# Make sure to check the documentation at http://goreleaser.com -before: - hooks: - - ./gen.sh - -builds: - - - id: "s2c" - binary: s2c - main: ./s2/cmd/s2c/main.go - flags: - - -trimpath - env: - - CGO_ENABLED=0 - goos: - - aix - - linux - - freebsd - - netbsd - - windows - - darwin - goarch: - - 386 - - amd64 - - arm - - arm64 - - ppc64 - - ppc64le - - mips64 - - mips64le - goarm: - - 7 - - - id: "s2d" - binary: s2d - main: ./s2/cmd/s2d/main.go - flags: - - -trimpath - env: - - CGO_ENABLED=0 - goos: - - aix - - linux - - freebsd - - netbsd - - windows - - darwin - goarch: - - 386 - - amd64 - - arm - - arm64 - - ppc64 - - ppc64le - - mips64 - - mips64le - goarm: - - 7 - - - id: "s2sx" - binary: s2sx - main: ./s2/cmd/_s2sx/main.go - flags: - - -modfile=s2sx.mod - - -trimpath - env: - - CGO_ENABLED=0 - goos: - - aix - - linux - - freebsd - - netbsd - - windows - - darwin - goarch: - - 386 - - amd64 - - arm - - arm64 - - ppc64 - - ppc64le - - mips64 - - mips64le - goarm: - - 7 - -archives: - - - id: s2-binaries - name_template: "s2-{{ .Os }}_{{ .Arch }}_{{ .Version }}" - replacements: - aix: AIX - darwin: OSX - linux: Linux - windows: Windows - 386: i386 - amd64: x86_64 - freebsd: FreeBSD - netbsd: NetBSD - format_overrides: - - goos: windows - format: zip - files: - - unpack/* - - s2/LICENSE - - s2/README.md -checksum: - name_template: 'checksums.txt' -snapshot: - name_template: "{{ .Tag }}-next" -changelog: - sort: asc - filters: - exclude: - - '^doc:' - - '^docs:' - - '^test:' - - '^tests:' - - '^Update\sREADME.md' - -nfpms: - - - file_name_template: "s2_package_{{ .Version }}_{{ .Os }}_{{ .Arch }}" - vendor: Klaus Post - homepage: https://github.com/klauspost/compress - maintainer: Klaus Post - description: S2 Compression Tool - license: BSD 3-Clause - formats: - - deb - - rpm - replacements: - darwin: Darwin - linux: Linux - freebsd: FreeBSD - amd64: x86_64 diff --git a/src/vendor/github.com/klauspost/compress/LICENSE b/src/vendor/github.com/klauspost/compress/LICENSE deleted file mode 100644 index 87d557477..000000000 --- a/src/vendor/github.com/klauspost/compress/LICENSE +++ /dev/null @@ -1,304 +0,0 @@ -Copyright (c) 2012 The Go Authors. All rights reserved. -Copyright (c) 2019 Klaus Post. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - ------------------- - -Files: gzhttp/* - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright 2016-2017 The New York Times Company - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - ------------------- - -Files: s2/cmd/internal/readahead/* - -The MIT License (MIT) - -Copyright (c) 2015 Klaus Post - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - ---------------------- -Files: snappy/* -Files: internal/snapref/* - -Copyright (c) 2011 The Snappy-Go Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - ------------------ - -Files: s2/cmd/internal/filepathx/* - -Copyright 2016 The filepathx Authors - -Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/src/vendor/github.com/klauspost/compress/README.md b/src/vendor/github.com/klauspost/compress/README.md deleted file mode 100644 index 3429879eb..000000000 --- a/src/vendor/github.com/klauspost/compress/README.md +++ /dev/null @@ -1,438 +0,0 @@ -# compress - -This package provides various compression algorithms. - -* [zstandard](https://github.com/klauspost/compress/tree/master/zstd#zstd) compression and decompression in pure Go. -* [S2](https://github.com/klauspost/compress/tree/master/s2#s2-compression) is a high performance replacement for Snappy. -* Optimized [deflate](https://godoc.org/github.com/klauspost/compress/flate) packages which can be used as a dropin replacement for [gzip](https://godoc.org/github.com/klauspost/compress/gzip), [zip](https://godoc.org/github.com/klauspost/compress/zip) and [zlib](https://godoc.org/github.com/klauspost/compress/zlib). -* [snappy](https://github.com/klauspost/compress/tree/master/snappy) is a drop-in replacement for `github.com/golang/snappy` offering better compression and concurrent streams. -* [huff0](https://github.com/klauspost/compress/tree/master/huff0) and [FSE](https://github.com/klauspost/compress/tree/master/fse) implementations for raw entropy encoding. -* [gzhttp](https://github.com/klauspost/compress/tree/master/gzhttp) Provides client and server wrappers for handling gzipped requests efficiently. -* [pgzip](https://github.com/klauspost/pgzip) is a separate package that provides a very fast parallel gzip implementation. -* [fuzz package](https://github.com/klauspost/compress-fuzz) for fuzz testing all compressors/decompressors here. - -[![Go Reference](https://pkg.go.dev/badge/klauspost/compress.svg)](https://pkg.go.dev/github.com/klauspost/compress?tab=subdirectories) -[![Go](https://github.com/klauspost/compress/actions/workflows/go.yml/badge.svg)](https://github.com/klauspost/compress/actions/workflows/go.yml) -[![Sourcegraph Badge](https://sourcegraph.com/github.com/klauspost/compress/-/badge.svg)](https://sourcegraph.com/github.com/klauspost/compress?badge) - -# changelog - -* Aug 30, 2021 (v1.13.5) - * gz/zlib/flate: Alias stdlib errors [#425](https://github.com/klauspost/compress/pull/425) - * s2: Add block support to commandline tools [#413](https://github.com/klauspost/compress/pull/413) - * zstd: pooledZipWriter should return Writers to the same pool [#426](https://github.com/klauspost/compress/pull/426) - * Removed golang/snappy as external dependency for tests [#421](https://github.com/klauspost/compress/pull/421) - -* Aug 12, 2021 (v1.13.4) - * Add [snappy replacement package](https://github.com/klauspost/compress/tree/master/snappy). - * zstd: Fix incorrect encoding in "best" mode [#415](https://github.com/klauspost/compress/pull/415) - -* Aug 3, 2021 (v1.13.3) - * zstd: Improve Best compression [#404](https://github.com/klauspost/compress/pull/404) - * zstd: Fix WriteTo error forwarding [#411](https://github.com/klauspost/compress/pull/411) - * gzhttp: Return http.HandlerFunc instead of http.Handler. Unlikely breaking change. [#406](https://github.com/klauspost/compress/pull/406) - * s2sx: Fix max size error [#399](https://github.com/klauspost/compress/pull/399) - * zstd: Add optional stream content size on reset [#401](https://github.com/klauspost/compress/pull/401) - * zstd: use SpeedBestCompression for level >= 10 [#410](https://github.com/klauspost/compress/pull/410) - -* Jun 14, 2021 (v1.13.1) - * s2: Add full Snappy output support [#396](https://github.com/klauspost/compress/pull/396) - * zstd: Add configurable [Decoder window](https://pkg.go.dev/github.com/klauspost/compress/zstd#WithDecoderMaxWindow) size [#394](https://github.com/klauspost/compress/pull/394) - * gzhttp: Add header to skip compression [#389](https://github.com/klauspost/compress/pull/389) - * s2: Improve speed with bigger output margin [#395](https://github.com/klauspost/compress/pull/395) - -* Jun 3, 2021 (v1.13.0) - * Added [gzhttp](https://github.com/klauspost/compress/tree/master/gzhttp#gzip-handler) which allows wrapping HTTP servers and clients with GZIP compressors. - * zstd: Detect short invalid signatures [#382](https://github.com/klauspost/compress/pull/382) - * zstd: Spawn decoder goroutine only if needed. [#380](https://github.com/klauspost/compress/pull/380) - -* May 25, 2021 (v1.12.3) - * deflate: Better/faster Huffman encoding [#374](https://github.com/klauspost/compress/pull/374) - * deflate: Allocate less for history. [#375](https://github.com/klauspost/compress/pull/375) - * zstd: Forward read errors [#373](https://github.com/klauspost/compress/pull/373) - -* Apr 27, 2021 (v1.12.2) - * zstd: Improve better/best compression [#360](https://github.com/klauspost/compress/pull/360) [#364](https://github.com/klauspost/compress/pull/364) [#365](https://github.com/klauspost/compress/pull/365) - * zstd: Add helpers to compress/decompress zstd inside zip files [#363](https://github.com/klauspost/compress/pull/363) - * deflate: Improve level 5+6 compression [#367](https://github.com/klauspost/compress/pull/367) - * s2: Improve better/best compression [#358](https://github.com/klauspost/compress/pull/358) [#359](https://github.com/klauspost/compress/pull/358) - * s2: Load after checking src limit on amd64. [#362](https://github.com/klauspost/compress/pull/362) - * s2sx: Limit max executable size [#368](https://github.com/klauspost/compress/pull/368) - -* Apr 14, 2021 (v1.12.1) - * snappy package removed. Upstream added as dependency. - * s2: Better compression in "best" mode [#353](https://github.com/klauspost/compress/pull/353) - * s2sx: Add stdin input and detect pre-compressed from signature [#352](https://github.com/klauspost/compress/pull/352) - * s2c/s2d: Add http as possible input [#348](https://github.com/klauspost/compress/pull/348) - * s2c/s2d/s2sx: Always truncate when writing files [#352](https://github.com/klauspost/compress/pull/352) - * zstd: Reduce memory usage further when using [WithLowerEncoderMem](https://pkg.go.dev/github.com/klauspost/compress/zstd#WithLowerEncoderMem) [#346](https://github.com/klauspost/compress/pull/346) - * s2: Fix potential problem with amd64 assembly and profilers [#349](https://github.com/klauspost/compress/pull/349) - -
- See changes prior to v1.12.1 - -* Mar 26, 2021 (v1.11.13) - * zstd: Big speedup on small dictionary encodes [#344](https://github.com/klauspost/compress/pull/344) [#345](https://github.com/klauspost/compress/pull/345) - * zstd: Add [WithLowerEncoderMem](https://pkg.go.dev/github.com/klauspost/compress/zstd#WithLowerEncoderMem) encoder option [#336](https://github.com/klauspost/compress/pull/336) - * deflate: Improve entropy compression [#338](https://github.com/klauspost/compress/pull/338) - * s2: Clean up and minor performance improvement in best [#341](https://github.com/klauspost/compress/pull/341) - -* Mar 5, 2021 (v1.11.12) - * s2: Add `s2sx` binary that creates [self extracting archives](https://github.com/klauspost/compress/tree/master/s2#s2sx-self-extracting-archives). - * s2: Speed up decompression on non-assembly platforms [#328](https://github.com/klauspost/compress/pull/328) - -* Mar 1, 2021 (v1.11.9) - * s2: Add ARM64 decompression assembly. Around 2x output speed. [#324](https://github.com/klauspost/compress/pull/324) - * s2: Improve "better" speed and efficiency. [#325](https://github.com/klauspost/compress/pull/325) - * s2: Fix binaries. - -* Feb 25, 2021 (v1.11.8) - * s2: Fixed occational out-of-bounds write on amd64. Upgrade recommended. - * s2: Add AMD64 assembly for better mode. 25-50% faster. [#315](https://github.com/klauspost/compress/pull/315) - * s2: Less upfront decoder allocation. [#322](https://github.com/klauspost/compress/pull/322) - * zstd: Faster "compression" of incompressible data. [#314](https://github.com/klauspost/compress/pull/314) - * zip: Fix zip64 headers. [#313](https://github.com/klauspost/compress/pull/313) - -* Jan 14, 2021 (v1.11.7) - * Use Bytes() interface to get bytes across packages. [#309](https://github.com/klauspost/compress/pull/309) - * s2: Add 'best' compression option. [#310](https://github.com/klauspost/compress/pull/310) - * s2: Add ReaderMaxBlockSize, changes `s2.NewReader` signature to include varargs. [#311](https://github.com/klauspost/compress/pull/311) - * s2: Fix crash on small better buffers. [#308](https://github.com/klauspost/compress/pull/308) - * s2: Clean up decoder. [#312](https://github.com/klauspost/compress/pull/312) - -* Jan 7, 2021 (v1.11.6) - * zstd: Make decoder allocations smaller [#306](https://github.com/klauspost/compress/pull/306) - * zstd: Free Decoder resources when Reset is called with a nil io.Reader [#305](https://github.com/klauspost/compress/pull/305) - -* Dec 20, 2020 (v1.11.4) - * zstd: Add Best compression mode [#304](https://github.com/klauspost/compress/pull/304) - * Add header decoder [#299](https://github.com/klauspost/compress/pull/299) - * s2: Add uncompressed stream option [#297](https://github.com/klauspost/compress/pull/297) - * Simplify/speed up small blocks with known max size. [#300](https://github.com/klauspost/compress/pull/300) - * zstd: Always reset literal dict encoder [#303](https://github.com/klauspost/compress/pull/303) - -* Nov 15, 2020 (v1.11.3) - * inflate: 10-15% faster decompression [#293](https://github.com/klauspost/compress/pull/293) - * zstd: Tweak DecodeAll default allocation [#295](https://github.com/klauspost/compress/pull/295) - -* Oct 11, 2020 (v1.11.2) - * s2: Fix out of bounds read in "better" block compression [#291](https://github.com/klauspost/compress/pull/291) - -* Oct 1, 2020 (v1.11.1) - * zstd: Set allLitEntropy true in default configuration [#286](https://github.com/klauspost/compress/pull/286) - -* Sept 8, 2020 (v1.11.0) - * zstd: Add experimental compression [dictionaries](https://github.com/klauspost/compress/tree/master/zstd#dictionaries) [#281](https://github.com/klauspost/compress/pull/281) - * zstd: Fix mixed Write and ReadFrom calls [#282](https://github.com/klauspost/compress/pull/282) - * inflate/gz: Limit variable shifts, ~5% faster decompression [#274](https://github.com/klauspost/compress/pull/274) -
- -
- See changes prior to v1.11.0 - -* July 8, 2020 (v1.10.11) - * zstd: Fix extra block when compressing with ReadFrom. [#278](https://github.com/klauspost/compress/pull/278) - * huff0: Also populate compression table when reading decoding table. [#275](https://github.com/klauspost/compress/pull/275) - -* June 23, 2020 (v1.10.10) - * zstd: Skip entropy compression in fastest mode when no matches. [#270](https://github.com/klauspost/compress/pull/270) - -* June 16, 2020 (v1.10.9): - * zstd: API change for specifying dictionaries. See [#268](https://github.com/klauspost/compress/pull/268) - * zip: update CreateHeaderRaw to handle zip64 fields. [#266](https://github.com/klauspost/compress/pull/266) - * Fuzzit tests removed. The service has been purchased and is no longer available. - -* June 5, 2020 (v1.10.8): - * 1.15x faster zstd block decompression. [#265](https://github.com/klauspost/compress/pull/265) - -* June 1, 2020 (v1.10.7): - * Added zstd decompression [dictionary support](https://github.com/klauspost/compress/tree/master/zstd#dictionaries) - * Increase zstd decompression speed up to 1.19x. [#259](https://github.com/klauspost/compress/pull/259) - * Remove internal reset call in zstd compression and reduce allocations. [#263](https://github.com/klauspost/compress/pull/263) - -* May 21, 2020: (v1.10.6) - * zstd: Reduce allocations while decoding. [#258](https://github.com/klauspost/compress/pull/258), [#252](https://github.com/klauspost/compress/pull/252) - * zstd: Stricter decompression checks. - -* April 12, 2020: (v1.10.5) - * s2-commands: Flush output when receiving SIGINT. [#239](https://github.com/klauspost/compress/pull/239) - -* Apr 8, 2020: (v1.10.4) - * zstd: Minor/special case optimizations. [#251](https://github.com/klauspost/compress/pull/251), [#250](https://github.com/klauspost/compress/pull/250), [#249](https://github.com/klauspost/compress/pull/249), [#247](https://github.com/klauspost/compress/pull/247) -* Mar 11, 2020: (v1.10.3) - * s2: Use S2 encoder in pure Go mode for Snappy output as well. [#245](https://github.com/klauspost/compress/pull/245) - * s2: Fix pure Go block encoder. [#244](https://github.com/klauspost/compress/pull/244) - * zstd: Added "better compression" mode. [#240](https://github.com/klauspost/compress/pull/240) - * zstd: Improve speed of fastest compression mode by 5-10% [#241](https://github.com/klauspost/compress/pull/241) - * zstd: Skip creating encoders when not needed. [#238](https://github.com/klauspost/compress/pull/238) - -* Feb 27, 2020: (v1.10.2) - * Close to 50% speedup in inflate (gzip/zip decompression). [#236](https://github.com/klauspost/compress/pull/236) [#234](https://github.com/klauspost/compress/pull/234) [#232](https://github.com/klauspost/compress/pull/232) - * Reduce deflate level 1-6 memory usage up to 59%. [#227](https://github.com/klauspost/compress/pull/227) - -* Feb 18, 2020: (v1.10.1) - * Fix zstd crash when resetting multiple times without sending data. [#226](https://github.com/klauspost/compress/pull/226) - * deflate: Fix dictionary use on level 1-6. [#224](https://github.com/klauspost/compress/pull/224) - * Remove deflate writer reference when closing. [#224](https://github.com/klauspost/compress/pull/224) - -* Feb 4, 2020: (v1.10.0) - * Add optional dictionary to [stateless deflate](https://pkg.go.dev/github.com/klauspost/compress/flate?tab=doc#StatelessDeflate). Breaking change, send `nil` for previous behaviour. [#216](https://github.com/klauspost/compress/pull/216) - * Fix buffer overflow on repeated small block deflate. [#218](https://github.com/klauspost/compress/pull/218) - * Allow copying content from an existing ZIP file without decompressing+compressing. [#214](https://github.com/klauspost/compress/pull/214) - * Added [S2](https://github.com/klauspost/compress/tree/master/s2#s2-compression) AMD64 assembler and various optimizations. Stream speed >10GB/s. [#186](https://github.com/klauspost/compress/pull/186) - -
- -
- See changes prior to v1.10.0 - -* Jan 20,2020 (v1.9.8) Optimize gzip/deflate with better size estimates and faster table generation. [#207](https://github.com/klauspost/compress/pull/207) by [luyu6056](https://github.com/luyu6056), [#206](https://github.com/klauspost/compress/pull/206). -* Jan 11, 2020: S2 Encode/Decode will use provided buffer if capacity is big enough. [#204](https://github.com/klauspost/compress/pull/204) -* Jan 5, 2020: (v1.9.7) Fix another zstd regression in v1.9.5 - v1.9.6 removed. -* Jan 4, 2020: (v1.9.6) Regression in v1.9.5 fixed causing corrupt zstd encodes in rare cases. -* Jan 4, 2020: Faster IO in [s2c + s2d commandline tools](https://github.com/klauspost/compress/tree/master/s2#commandline-tools) compression/decompression. [#192](https://github.com/klauspost/compress/pull/192) -* Dec 29, 2019: Removed v1.9.5 since fuzz tests showed a compatibility problem with the reference zstandard decoder. -* Dec 29, 2019: (v1.9.5) zstd: 10-20% faster block compression. [#199](https://github.com/klauspost/compress/pull/199) -* Dec 29, 2019: [zip](https://godoc.org/github.com/klauspost/compress/zip) package updated with latest Go features -* Dec 29, 2019: zstd: Single segment flag condintions tweaked. [#197](https://github.com/klauspost/compress/pull/197) -* Dec 18, 2019: s2: Faster compression when ReadFrom is used. [#198](https://github.com/klauspost/compress/pull/198) -* Dec 10, 2019: s2: Fix repeat length output when just above at 16MB limit. -* Dec 10, 2019: zstd: Add function to get decoder as io.ReadCloser. [#191](https://github.com/klauspost/compress/pull/191) -* Dec 3, 2019: (v1.9.4) S2: limit max repeat length. [#188](https://github.com/klauspost/compress/pull/188) -* Dec 3, 2019: Add [WithNoEntropyCompression](https://godoc.org/github.com/klauspost/compress/zstd#WithNoEntropyCompression) to zstd [#187](https://github.com/klauspost/compress/pull/187) -* Dec 3, 2019: Reduce memory use for tests. Check for leaked goroutines. -* Nov 28, 2019 (v1.9.3) Less allocations in stateless deflate. -* Nov 28, 2019: 5-20% Faster huff0 decode. Impacts zstd as well. [#184](https://github.com/klauspost/compress/pull/184) -* Nov 12, 2019 (v1.9.2) Added [Stateless Compression](#stateless-compression) for gzip/deflate. -* Nov 12, 2019: Fixed zstd decompression of large single blocks. [#180](https://github.com/klauspost/compress/pull/180) -* Nov 11, 2019: Set default [s2c](https://github.com/klauspost/compress/tree/master/s2#commandline-tools) block size to 4MB. -* Nov 11, 2019: Reduce inflate memory use by 1KB. -* Nov 10, 2019: Less allocations in deflate bit writer. -* Nov 10, 2019: Fix inconsistent error returned by zstd decoder. -* Oct 28, 2019 (v1.9.1) ztsd: Fix crash when compressing blocks. [#174](https://github.com/klauspost/compress/pull/174) -* Oct 24, 2019 (v1.9.0) zstd: Fix rare data corruption [#173](https://github.com/klauspost/compress/pull/173) -* Oct 24, 2019 zstd: Fix huff0 out of buffer write [#171](https://github.com/klauspost/compress/pull/171) and always return errors [#172](https://github.com/klauspost/compress/pull/172) -* Oct 10, 2019: Big deflate rewrite, 30-40% faster with better compression [#105](https://github.com/klauspost/compress/pull/105) - -
- -
- See changes prior to v1.9.0 - -* Oct 10, 2019: (v1.8.6) zstd: Allow partial reads to get flushed data. [#169](https://github.com/klauspost/compress/pull/169) -* Oct 3, 2019: Fix inconsistent results on broken zstd streams. -* Sep 25, 2019: Added `-rm` (remove source files) and `-q` (no output except errors) to `s2c` and `s2d` [commands](https://github.com/klauspost/compress/tree/master/s2#commandline-tools) -* Sep 16, 2019: (v1.8.4) Add `s2c` and `s2d` [commandline tools](https://github.com/klauspost/compress/tree/master/s2#commandline-tools). -* Sep 10, 2019: (v1.8.3) Fix s2 decoder [Skip](https://godoc.org/github.com/klauspost/compress/s2#Reader.Skip). -* Sep 7, 2019: zstd: Added [WithWindowSize](https://godoc.org/github.com/klauspost/compress/zstd#WithWindowSize), contributed by [ianwilkes](https://github.com/ianwilkes). -* Sep 5, 2019: (v1.8.2) Add [WithZeroFrames](https://godoc.org/github.com/klauspost/compress/zstd#WithZeroFrames) which adds full zero payload block encoding option. -* Sep 5, 2019: Lazy initialization of zstandard predefined en/decoder tables. -* Aug 26, 2019: (v1.8.1) S2: 1-2% compression increase in "better" compression mode. -* Aug 26, 2019: zstd: Check maximum size of Huffman 1X compressed literals while decoding. -* Aug 24, 2019: (v1.8.0) Added [S2 compression](https://github.com/klauspost/compress/tree/master/s2#s2-compression), a high performance replacement for Snappy. -* Aug 21, 2019: (v1.7.6) Fixed minor issues found by fuzzer. One could lead to zstd not decompressing. -* Aug 18, 2019: Add [fuzzit](https://fuzzit.dev/) continuous fuzzing. -* Aug 14, 2019: zstd: Skip incompressible data 2x faster. [#147](https://github.com/klauspost/compress/pull/147) -* Aug 4, 2019 (v1.7.5): Better literal compression. [#146](https://github.com/klauspost/compress/pull/146) -* Aug 4, 2019: Faster zstd compression. [#143](https://github.com/klauspost/compress/pull/143) [#144](https://github.com/klauspost/compress/pull/144) -* Aug 4, 2019: Faster zstd decompression. [#145](https://github.com/klauspost/compress/pull/145) [#143](https://github.com/klauspost/compress/pull/143) [#142](https://github.com/klauspost/compress/pull/142) -* July 15, 2019 (v1.7.4): Fix double EOF block in rare cases on zstd encoder. -* July 15, 2019 (v1.7.3): Minor speedup/compression increase in default zstd encoder. -* July 14, 2019: zstd decoder: Fix decompression error on multiple uses with mixed content. -* July 7, 2019 (v1.7.2): Snappy update, zstd decoder potential race fix. -* June 17, 2019: zstd decompression bugfix. -* June 17, 2019: fix 32 bit builds. -* June 17, 2019: Easier use in modules (less dependencies). -* June 9, 2019: New stronger "default" [zstd](https://github.com/klauspost/compress/tree/master/zstd#zstd) compression mode. Matches zstd default compression ratio. -* June 5, 2019: 20-40% throughput in [zstandard](https://github.com/klauspost/compress/tree/master/zstd#zstd) compression and better compression. -* June 5, 2019: deflate/gzip compression: Reduce memory usage of lower compression levels. -* June 2, 2019: Added [zstandard](https://github.com/klauspost/compress/tree/master/zstd#zstd) compression! -* May 25, 2019: deflate/gzip: 10% faster bit writer, mostly visible in lower levels. -* Apr 22, 2019: [zstd](https://github.com/klauspost/compress/tree/master/zstd#zstd) decompression added. -* Aug 1, 2018: Added [huff0 README](https://github.com/klauspost/compress/tree/master/huff0#huff0-entropy-compression). -* Jul 8, 2018: Added [Performance Update 2018](#performance-update-2018) below. -* Jun 23, 2018: Merged [Go 1.11 inflate optimizations](https://go-review.googlesource.com/c/go/+/102235). Go 1.9 is now required. Backwards compatible version tagged with [v1.3.0](https://github.com/klauspost/compress/releases/tag/v1.3.0). -* Apr 2, 2018: Added [huff0](https://godoc.org/github.com/klauspost/compress/huff0) en/decoder. Experimental for now, API may change. -* Mar 4, 2018: Added [FSE Entropy](https://godoc.org/github.com/klauspost/compress/fse) en/decoder. Experimental for now, API may change. -* Nov 3, 2017: Add compression [Estimate](https://godoc.org/github.com/klauspost/compress#Estimate) function. -* May 28, 2017: Reduce allocations when resetting decoder. -* Apr 02, 2017: Change back to official crc32, since changes were merged in Go 1.7. -* Jan 14, 2017: Reduce stack pressure due to array copies. See [Issue #18625](https://github.com/golang/go/issues/18625). -* Oct 25, 2016: Level 2-4 have been rewritten and now offers significantly better performance than before. -* Oct 20, 2016: Port zlib changes from Go 1.7 to fix zlib writer issue. Please update. -* Oct 16, 2016: Go 1.7 changes merged. Apples to apples this package is a few percent faster, but has a significantly better balance between speed and compression per level. -* Mar 24, 2016: Always attempt Huffman encoding on level 4-7. This improves base 64 encoded data compression. -* Mar 24, 2016: Small speedup for level 1-3. -* Feb 19, 2016: Faster bit writer, level -2 is 15% faster, level 1 is 4% faster. -* Feb 19, 2016: Handle small payloads faster in level 1-3. -* Feb 19, 2016: Added faster level 2 + 3 compression modes. -* Feb 19, 2016: [Rebalanced compression levels](https://blog.klauspost.com/rebalancing-deflate-compression-levels/), so there is a more even progresssion in terms of compression. New default level is 5. -* Feb 14, 2016: Snappy: Merge upstream changes. -* Feb 14, 2016: Snappy: Fix aggressive skipping. -* Feb 14, 2016: Snappy: Update benchmark. -* Feb 13, 2016: Deflate: Fixed assembler problem that could lead to sub-optimal compression. -* Feb 12, 2016: Snappy: Added AMD64 SSE 4.2 optimizations to matching, which makes easy to compress material run faster. Typical speedup is around 25%. -* Feb 9, 2016: Added Snappy package fork. This version is 5-7% faster, much more on hard to compress content. -* Jan 30, 2016: Optimize level 1 to 3 by not considering static dictionary or storing uncompressed. ~4-5% speedup. -* Jan 16, 2016: Optimization on deflate level 1,2,3 compression. -* Jan 8 2016: Merge [CL 18317](https://go-review.googlesource.com/#/c/18317): fix reading, writing of zip64 archives. -* Dec 8 2015: Make level 1 and -2 deterministic even if write size differs. -* Dec 8 2015: Split encoding functions, so hashing and matching can potentially be inlined. 1-3% faster on AMD64. 5% faster on other platforms. -* Dec 8 2015: Fixed rare [one byte out-of bounds read](https://github.com/klauspost/compress/issues/20). Please update! -* Nov 23 2015: Optimization on token writer. ~2-4% faster. Contributed by [@dsnet](https://github.com/dsnet). -* Nov 20 2015: Small optimization to bit writer on 64 bit systems. -* Nov 17 2015: Fixed out-of-bound errors if the underlying Writer returned an error. See [#15](https://github.com/klauspost/compress/issues/15). -* Nov 12 2015: Added [io.WriterTo](https://golang.org/pkg/io/#WriterTo) support to gzip/inflate. -* Nov 11 2015: Merged [CL 16669](https://go-review.googlesource.com/#/c/16669/4): archive/zip: enable overriding (de)compressors per file -* Oct 15 2015: Added skipping on uncompressible data. Random data speed up >5x. - -
- -# deflate usage - -* [High Throughput Benchmark](http://blog.klauspost.com/go-gzipdeflate-benchmarks/). -* [Small Payload/Webserver Benchmarks](http://blog.klauspost.com/gzip-performance-for-go-webservers/). -* [Linear Time Compression](http://blog.klauspost.com/constant-time-gzipzip-compression/). -* [Re-balancing Deflate Compression Levels](https://blog.klauspost.com/rebalancing-deflate-compression-levels/) - -The packages are drop-in replacements for standard libraries. Simply replace the import path to use them: - -| old import | new import | Documentation -|--------------------|-----------------------------------------|--------------------| -| `compress/gzip` | `github.com/klauspost/compress/gzip` | [gzip](https://pkg.go.dev/github.com/klauspost/compress/gzip?tab=doc) -| `compress/zlib` | `github.com/klauspost/compress/zlib` | [zlib](https://pkg.go.dev/github.com/klauspost/compress/zlib?tab=doc) -| `archive/zip` | `github.com/klauspost/compress/zip` | [zip](https://pkg.go.dev/github.com/klauspost/compress/zip?tab=doc) -| `compress/flate` | `github.com/klauspost/compress/flate` | [flate](https://pkg.go.dev/github.com/klauspost/compress/flate?tab=doc) - -* Optimized [deflate](https://godoc.org/github.com/klauspost/compress/flate) packages which can be used as a dropin replacement for [gzip](https://godoc.org/github.com/klauspost/compress/gzip), [zip](https://godoc.org/github.com/klauspost/compress/zip) and [zlib](https://godoc.org/github.com/klauspost/compress/zlib). - -You may also be interested in [pgzip](https://github.com/klauspost/pgzip), which is a drop in replacement for gzip, which support multithreaded compression on big files and the optimized [crc32](https://github.com/klauspost/crc32) package used by these packages. - -The packages contains the same as the standard library, so you can use the godoc for that: [gzip](http://golang.org/pkg/compress/gzip/), [zip](http://golang.org/pkg/archive/zip/), [zlib](http://golang.org/pkg/compress/zlib/), [flate](http://golang.org/pkg/compress/flate/). - -Currently there is only minor speedup on decompression (mostly CRC32 calculation). - -Memory usage is typically 1MB for a Writer. stdlib is in the same range. -If you expect to have a lot of concurrently allocated Writers consider using -the stateless compress described below. - -# Stateless compression - -This package offers stateless compression as a special option for gzip/deflate. -It will do compression but without maintaining any state between Write calls. - -This means there will be no memory kept between Write calls, but compression and speed will be suboptimal. - -This is only relevant in cases where you expect to run many thousands of compressors concurrently, -but with very little activity. This is *not* intended for regular web servers serving individual requests. - -Because of this, the size of actual Write calls will affect output size. - -In gzip, specify level `-3` / `gzip.StatelessCompression` to enable. - -For direct deflate use, NewStatelessWriter and StatelessDeflate are available. See [documentation](https://godoc.org/github.com/klauspost/compress/flate#NewStatelessWriter) - -A `bufio.Writer` can of course be used to control write sizes. For example, to use a 4KB buffer: - -``` - // replace 'ioutil.Discard' with your output. - gzw, err := gzip.NewWriterLevel(ioutil.Discard, gzip.StatelessCompression) - if err != nil { - return err - } - defer gzw.Close() - - w := bufio.NewWriterSize(gzw, 4096) - defer w.Flush() - - // Write to 'w' -``` - -This will only use up to 4KB in memory when the writer is idle. - -Compression is almost always worse than the fastest compression level -and each write will allocate (a little) memory. - -# Performance Update 2018 - -It has been a while since we have been looking at the speed of this package compared to the standard library, so I thought I would re-do my tests and give some overall recommendations based on the current state. All benchmarks have been performed with Go 1.10 on my Desktop Intel(R) Core(TM) i7-2600 CPU @3.40GHz. Since I last ran the tests, I have gotten more RAM, which means tests with big files are no longer limited by my SSD. - -The raw results are in my [updated spreadsheet](https://docs.google.com/spreadsheets/d/1nuNE2nPfuINCZJRMt6wFWhKpToF95I47XjSsc-1rbPQ/edit?usp=sharing). Due to cgo changes and upstream updates i could not get the cgo version of gzip to compile. Instead I included the [zstd](https://github.com/datadog/zstd) cgo implementation. If I get cgo gzip to work again, I might replace the results in the sheet. - -The columns to take note of are: *MB/s* - the throughput. *Reduction* - the data size reduction in percent of the original. *Rel Speed* relative speed compared to the standard library at the same level. *Smaller* - how many percent smaller is the compressed output compared to stdlib. Negative means the output was bigger. *Loss* means the loss (or gain) in compression as a percentage difference of the input. - -The `gzstd` (standard library gzip) and `gzkp` (this package gzip) only uses one CPU core. [`pgzip`](https://github.com/klauspost/pgzip), [`bgzf`](https://github.com/biogo/hts/tree/master/bgzf) uses all 4 cores. [`zstd`](https://github.com/DataDog/zstd) uses one core, and is a beast (but not Go, yet). - - -## Overall differences. - -There appears to be a roughly 5-10% speed advantage over the standard library when comparing at similar compression levels. - -The biggest difference you will see is the result of [re-balancing](https://blog.klauspost.com/rebalancing-deflate-compression-levels/) the compression levels. I wanted by library to give a smoother transition between the compression levels than the standard library. - -This package attempts to provide a more smooth transition, where "1" is taking a lot of shortcuts, "5" is the reasonable trade-off and "9" is the "give me the best compression", and the values in between gives something reasonable in between. The standard library has big differences in levels 1-4, but levels 5-9 having no significant gains - often spending a lot more time than can be justified by the achieved compression. - -There are links to all the test data in the [spreadsheet](https://docs.google.com/spreadsheets/d/1nuNE2nPfuINCZJRMt6wFWhKpToF95I47XjSsc-1rbPQ/edit?usp=sharing) in the top left field on each tab. - -## Web Content - -This test set aims to emulate typical use in a web server. The test-set is 4GB data in 53k files, and is a mixture of (mostly) HTML, JS, CSS. - -Since level 1 and 9 are close to being the same code, they are quite close. But looking at the levels in-between the differences are quite big. - -Looking at level 6, this package is 88% faster, but will output about 6% more data. For a web server, this means you can serve 88% more data, but have to pay for 6% more bandwidth. You can draw your own conclusions on what would be the most expensive for your case. - -## Object files - -This test is for typical data files stored on a server. In this case it is a collection of Go precompiled objects. They are very compressible. - -The picture is similar to the web content, but with small differences since this is very compressible. Levels 2-3 offer good speed, but is sacrificing quite a bit of compression. - -The standard library seems suboptimal on level 3 and 4 - offering both worse compression and speed than level 6 & 7 of this package respectively. - -## Highly Compressible File - -This is a JSON file with very high redundancy. The reduction starts at 95% on level 1, so in real life terms we are dealing with something like a highly redundant stream of data, etc. - -It is definitely visible that we are dealing with specialized content here, so the results are very scattered. This package does not do very well at levels 1-4, but picks up significantly at level 5 and levels 7 and 8 offering great speed for the achieved compression. - -So if you know you content is extremely compressible you might want to go slightly higher than the defaults. The standard library has a huge gap between levels 3 and 4 in terms of speed (2.75x slowdown), so it offers little "middle ground". - -## Medium-High Compressible - -This is a pretty common test corpus: [enwik9](http://mattmahoney.net/dc/textdata.html). It contains the first 10^9 bytes of the English Wikipedia dump on Mar. 3, 2006. This is a very good test of typical text based compression and more data heavy streams. - -We see a similar picture here as in "Web Content". On equal levels some compression is sacrificed for more speed. Level 5 seems to be the best trade-off between speed and size, beating stdlib level 3 in both. - -## Medium Compressible - -I will combine two test sets, one [10GB file set](http://mattmahoney.net/dc/10gb.html) and a VM disk image (~8GB). Both contain different data types and represent a typical backup scenario. - -The most notable thing is how quickly the standard library drops to very low compression speeds around level 5-6 without any big gains in compression. Since this type of data is fairly common, this does not seem like good behavior. - - -## Un-compressible Content - -This is mainly a test of how good the algorithms are at detecting un-compressible input. The standard library only offers this feature with very conservative settings at level 1. Obviously there is no reason for the algorithms to try to compress input that cannot be compressed. The only downside is that it might skip some compressible data on false detections. - - -## Huffman only compression - -This compression library adds a special compression level, named `HuffmanOnly`, which allows near linear time compression. This is done by completely disabling matching of previous data, and only reduce the number of bits to represent each character. - -This means that often used characters, like 'e' and ' ' (space) in text use the fewest bits to represent, and rare characters like '¤' takes more bits to represent. For more information see [wikipedia](https://en.wikipedia.org/wiki/Huffman_coding) or this nice [video](https://youtu.be/ZdooBTdW5bM). - -Since this type of compression has much less variance, the compression speed is mostly unaffected by the input data, and is usually more than *180MB/s* for a single core. - -The downside is that the compression ratio is usually considerably worse than even the fastest conventional compression. The compression ratio can never be better than 8:1 (12.5%). - -The linear time compression can be used as a "better than nothing" mode, where you cannot risk the encoder to slow down on some content. For comparison, the size of the "Twain" text is *233460 bytes* (+29% vs. level 1) and encode speed is 144MB/s (4.5x level 1). So in this case you trade a 30% size increase for a 4 times speedup. - -For more information see my blog post on [Fast Linear Time Compression](http://blog.klauspost.com/constant-time-gzipzip-compression/). - -This is implemented on Go 1.7 as "Huffman Only" mode, though not exposed for gzip. - - -# license - -This code is licensed under the same conditions as the original Go code. See LICENSE file. diff --git a/src/vendor/github.com/klauspost/compress/compressible.go b/src/vendor/github.com/klauspost/compress/compressible.go deleted file mode 100644 index ea5a692d5..000000000 --- a/src/vendor/github.com/klauspost/compress/compressible.go +++ /dev/null @@ -1,85 +0,0 @@ -package compress - -import "math" - -// Estimate returns a normalized compressibility estimate of block b. -// Values close to zero are likely uncompressible. -// Values above 0.1 are likely to be compressible. -// Values above 0.5 are very compressible. -// Very small lengths will return 0. -func Estimate(b []byte) float64 { - if len(b) < 16 { - return 0 - } - - // Correctly predicted order 1 - hits := 0 - lastMatch := false - var o1 [256]byte - var hist [256]int - c1 := byte(0) - for _, c := range b { - if c == o1[c1] { - // We only count a hit if there was two correct predictions in a row. - if lastMatch { - hits++ - } - lastMatch = true - } else { - lastMatch = false - } - o1[c1] = c - c1 = c - hist[c]++ - } - - // Use x^0.6 to give better spread - prediction := math.Pow(float64(hits)/float64(len(b)), 0.6) - - // Calculate histogram distribution - variance := float64(0) - avg := float64(len(b)) / 256 - - for _, v := range hist { - Δ := float64(v) - avg - variance += Δ * Δ - } - - stddev := math.Sqrt(float64(variance)) / float64(len(b)) - exp := math.Sqrt(1 / float64(len(b))) - - // Subtract expected stddev - stddev -= exp - if stddev < 0 { - stddev = 0 - } - stddev *= 1 + exp - - // Use x^0.4 to give better spread - entropy := math.Pow(stddev, 0.4) - - // 50/50 weight between prediction and histogram distribution - return math.Pow((prediction+entropy)/2, 0.9) -} - -// ShannonEntropyBits returns the number of bits minimum required to represent -// an entropy encoding of the input bytes. -// https://en.wiktionary.org/wiki/Shannon_entropy -func ShannonEntropyBits(b []byte) int { - if len(b) == 0 { - return 0 - } - var hist [256]int - for _, c := range b { - hist[c]++ - } - shannon := float64(0) - invTotal := 1.0 / float64(len(b)) - for _, v := range hist[:] { - if v > 0 { - n := float64(v) - shannon += math.Ceil(-math.Log2(n*invTotal) * n) - } - } - return int(math.Ceil(shannon)) -} diff --git a/src/vendor/github.com/klauspost/compress/fse/README.md b/src/vendor/github.com/klauspost/compress/fse/README.md deleted file mode 100644 index ea7324da6..000000000 --- a/src/vendor/github.com/klauspost/compress/fse/README.md +++ /dev/null @@ -1,79 +0,0 @@ -# Finite State Entropy - -This package provides Finite State Entropy encoding and decoding. - -Finite State Entropy (also referenced as [tANS](https://en.wikipedia.org/wiki/Asymmetric_numeral_systems#tANS)) -encoding provides a fast near-optimal symbol encoding/decoding -for byte blocks as implemented in [zstandard](https://github.com/facebook/zstd). - -This can be used for compressing input with a lot of similar input values to the smallest number of bytes. -This does not perform any multi-byte [dictionary coding](https://en.wikipedia.org/wiki/Dictionary_coder) as LZ coders, -but it can be used as a secondary step to compressors (like Snappy) that does not do entropy encoding. - -* [Godoc documentation](https://godoc.org/github.com/klauspost/compress/fse) - -## News - - * Feb 2018: First implementation released. Consider this beta software for now. - -# Usage - -This package provides a low level interface that allows to compress single independent blocks. - -Each block is separate, and there is no built in integrity checks. -This means that the caller should keep track of block sizes and also do checksums if needed. - -Compressing a block is done via the [`Compress`](https://godoc.org/github.com/klauspost/compress/fse#Compress) function. -You must provide input and will receive the output and maybe an error. - -These error values can be returned: - -| Error | Description | -|---------------------|-----------------------------------------------------------------------------| -| `` | Everything ok, output is returned | -| `ErrIncompressible` | Returned when input is judged to be too hard to compress | -| `ErrUseRLE` | Returned from the compressor when the input is a single byte value repeated | -| `(error)` | An internal error occurred. | - -As can be seen above there are errors that will be returned even under normal operation so it is important to handle these. - -To reduce allocations you can provide a [`Scratch`](https://godoc.org/github.com/klauspost/compress/fse#Scratch) object -that can be re-used for successive calls. Both compression and decompression accepts a `Scratch` object, and the same -object can be used for both. - -Be aware, that when re-using a `Scratch` object that the *output* buffer is also re-used, so if you are still using this -you must set the `Out` field in the scratch to nil. The same buffer is used for compression and decompression output. - -Decompressing is done by calling the [`Decompress`](https://godoc.org/github.com/klauspost/compress/fse#Decompress) function. -You must provide the output from the compression stage, at exactly the size you got back. If you receive an error back -your input was likely corrupted. - -It is important to note that a successful decoding does *not* mean your output matches your original input. -There are no integrity checks, so relying on errors from the decompressor does not assure your data is valid. - -For more detailed usage, see examples in the [godoc documentation](https://godoc.org/github.com/klauspost/compress/fse#pkg-examples). - -# Performance - -A lot of factors are affecting speed. Block sizes and compressibility of the material are primary factors. -All compression functions are currently only running on the calling goroutine so only one core will be used per block. - -The compressor is significantly faster if symbols are kept as small as possible. The highest byte value of the input -is used to reduce some of the processing, so if all your input is above byte value 64 for instance, it may be -beneficial to transpose all your input values down by 64. - -With moderate block sizes around 64k speed are typically 200MB/s per core for compression and -around 300MB/s decompression speed. - -The same hardware typically does Huffman (deflate) encoding at 125MB/s and decompression at 100MB/s. - -# Plans - -At one point, more internals will be exposed to facilitate more "expert" usage of the components. - -A streaming interface is also likely to be implemented. Likely compatible with [FSE stream format](https://github.com/Cyan4973/FiniteStateEntropy/blob/dev/programs/fileio.c#L261). - -# Contributing - -Contributions are always welcome. Be aware that adding public functions will require good justification and breaking -changes will likely not be accepted. If in doubt open an issue before writing the PR. \ No newline at end of file diff --git a/src/vendor/github.com/klauspost/compress/fse/bitreader.go b/src/vendor/github.com/klauspost/compress/fse/bitreader.go deleted file mode 100644 index f65eb3909..000000000 --- a/src/vendor/github.com/klauspost/compress/fse/bitreader.go +++ /dev/null @@ -1,122 +0,0 @@ -// Copyright 2018 Klaus Post. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. -// Based on work Copyright (c) 2013, Yann Collet, released under BSD License. - -package fse - -import ( - "encoding/binary" - "errors" - "io" -) - -// bitReader reads a bitstream in reverse. -// The last set bit indicates the start of the stream and is used -// for aligning the input. -type bitReader struct { - in []byte - off uint // next byte to read is at in[off - 1] - value uint64 - bitsRead uint8 -} - -// init initializes and resets the bit reader. -func (b *bitReader) init(in []byte) error { - if len(in) < 1 { - return errors.New("corrupt stream: too short") - } - b.in = in - b.off = uint(len(in)) - // The highest bit of the last byte indicates where to start - v := in[len(in)-1] - if v == 0 { - return errors.New("corrupt stream, did not find end of stream") - } - b.bitsRead = 64 - b.value = 0 - if len(in) >= 8 { - b.fillFastStart() - } else { - b.fill() - b.fill() - } - b.bitsRead += 8 - uint8(highBits(uint32(v))) - return nil -} - -// getBits will return n bits. n can be 0. -func (b *bitReader) getBits(n uint8) uint16 { - if n == 0 || b.bitsRead >= 64 { - return 0 - } - return b.getBitsFast(n) -} - -// getBitsFast requires that at least one bit is requested every time. -// There are no checks if the buffer is filled. -func (b *bitReader) getBitsFast(n uint8) uint16 { - const regMask = 64 - 1 - v := uint16((b.value << (b.bitsRead & regMask)) >> ((regMask + 1 - n) & regMask)) - b.bitsRead += n - return v -} - -// fillFast() will make sure at least 32 bits are available. -// There must be at least 4 bytes available. -func (b *bitReader) fillFast() { - if b.bitsRead < 32 { - return - } - // 2 bounds checks. - v := b.in[b.off-4:] - v = v[:4] - low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) - b.value = (b.value << 32) | uint64(low) - b.bitsRead -= 32 - b.off -= 4 -} - -// fill() will make sure at least 32 bits are available. -func (b *bitReader) fill() { - if b.bitsRead < 32 { - return - } - if b.off > 4 { - v := b.in[b.off-4:] - v = v[:4] - low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) - b.value = (b.value << 32) | uint64(low) - b.bitsRead -= 32 - b.off -= 4 - return - } - for b.off > 0 { - b.value = (b.value << 8) | uint64(b.in[b.off-1]) - b.bitsRead -= 8 - b.off-- - } -} - -// fillFastStart() assumes the bitreader is empty and there is at least 8 bytes to read. -func (b *bitReader) fillFastStart() { - // Do single re-slice to avoid bounds checks. - b.value = binary.LittleEndian.Uint64(b.in[b.off-8:]) - b.bitsRead = 0 - b.off -= 8 -} - -// finished returns true if all bits have been read from the bit stream. -func (b *bitReader) finished() bool { - return b.bitsRead >= 64 && b.off == 0 -} - -// close the bitstream and returns an error if out-of-buffer reads occurred. -func (b *bitReader) close() error { - // Release reference. - b.in = nil - if b.bitsRead > 64 { - return io.ErrUnexpectedEOF - } - return nil -} diff --git a/src/vendor/github.com/klauspost/compress/fse/bitwriter.go b/src/vendor/github.com/klauspost/compress/fse/bitwriter.go deleted file mode 100644 index 43e463611..000000000 --- a/src/vendor/github.com/klauspost/compress/fse/bitwriter.go +++ /dev/null @@ -1,168 +0,0 @@ -// Copyright 2018 Klaus Post. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. -// Based on work Copyright (c) 2013, Yann Collet, released under BSD License. - -package fse - -import "fmt" - -// bitWriter will write bits. -// First bit will be LSB of the first byte of output. -type bitWriter struct { - bitContainer uint64 - nBits uint8 - out []byte -} - -// bitMask16 is bitmasks. Has extra to avoid bounds check. -var bitMask16 = [32]uint16{ - 0, 1, 3, 7, 0xF, 0x1F, - 0x3F, 0x7F, 0xFF, 0x1FF, 0x3FF, 0x7FF, - 0xFFF, 0x1FFF, 0x3FFF, 0x7FFF, 0xFFFF, 0xFFFF, - 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, - 0xFFFF, 0xFFFF} /* up to 16 bits */ - -// addBits16NC will add up to 16 bits. -// It will not check if there is space for them, -// so the caller must ensure that it has flushed recently. -func (b *bitWriter) addBits16NC(value uint16, bits uint8) { - b.bitContainer |= uint64(value&bitMask16[bits&31]) << (b.nBits & 63) - b.nBits += bits -} - -// addBits16Clean will add up to 16 bits. value may not contain more set bits than indicated. -// It will not check if there is space for them, so the caller must ensure that it has flushed recently. -func (b *bitWriter) addBits16Clean(value uint16, bits uint8) { - b.bitContainer |= uint64(value) << (b.nBits & 63) - b.nBits += bits -} - -// addBits16ZeroNC will add up to 16 bits. -// It will not check if there is space for them, -// so the caller must ensure that it has flushed recently. -// This is fastest if bits can be zero. -func (b *bitWriter) addBits16ZeroNC(value uint16, bits uint8) { - if bits == 0 { - return - } - value <<= (16 - bits) & 15 - value >>= (16 - bits) & 15 - b.bitContainer |= uint64(value) << (b.nBits & 63) - b.nBits += bits -} - -// flush will flush all pending full bytes. -// There will be at least 56 bits available for writing when this has been called. -// Using flush32 is faster, but leaves less space for writing. -func (b *bitWriter) flush() { - v := b.nBits >> 3 - switch v { - case 0: - case 1: - b.out = append(b.out, - byte(b.bitContainer), - ) - case 2: - b.out = append(b.out, - byte(b.bitContainer), - byte(b.bitContainer>>8), - ) - case 3: - b.out = append(b.out, - byte(b.bitContainer), - byte(b.bitContainer>>8), - byte(b.bitContainer>>16), - ) - case 4: - b.out = append(b.out, - byte(b.bitContainer), - byte(b.bitContainer>>8), - byte(b.bitContainer>>16), - byte(b.bitContainer>>24), - ) - case 5: - b.out = append(b.out, - byte(b.bitContainer), - byte(b.bitContainer>>8), - byte(b.bitContainer>>16), - byte(b.bitContainer>>24), - byte(b.bitContainer>>32), - ) - case 6: - b.out = append(b.out, - byte(b.bitContainer), - byte(b.bitContainer>>8), - byte(b.bitContainer>>16), - byte(b.bitContainer>>24), - byte(b.bitContainer>>32), - byte(b.bitContainer>>40), - ) - case 7: - b.out = append(b.out, - byte(b.bitContainer), - byte(b.bitContainer>>8), - byte(b.bitContainer>>16), - byte(b.bitContainer>>24), - byte(b.bitContainer>>32), - byte(b.bitContainer>>40), - byte(b.bitContainer>>48), - ) - case 8: - b.out = append(b.out, - byte(b.bitContainer), - byte(b.bitContainer>>8), - byte(b.bitContainer>>16), - byte(b.bitContainer>>24), - byte(b.bitContainer>>32), - byte(b.bitContainer>>40), - byte(b.bitContainer>>48), - byte(b.bitContainer>>56), - ) - default: - panic(fmt.Errorf("bits (%d) > 64", b.nBits)) - } - b.bitContainer >>= v << 3 - b.nBits &= 7 -} - -// flush32 will flush out, so there are at least 32 bits available for writing. -func (b *bitWriter) flush32() { - if b.nBits < 32 { - return - } - b.out = append(b.out, - byte(b.bitContainer), - byte(b.bitContainer>>8), - byte(b.bitContainer>>16), - byte(b.bitContainer>>24)) - b.nBits -= 32 - b.bitContainer >>= 32 -} - -// flushAlign will flush remaining full bytes and align to next byte boundary. -func (b *bitWriter) flushAlign() { - nbBytes := (b.nBits + 7) >> 3 - for i := uint8(0); i < nbBytes; i++ { - b.out = append(b.out, byte(b.bitContainer>>(i*8))) - } - b.nBits = 0 - b.bitContainer = 0 -} - -// close will write the alignment bit and write the final byte(s) -// to the output. -func (b *bitWriter) close() error { - // End mark - b.addBits16Clean(1, 1) - // flush until next byte. - b.flushAlign() - return nil -} - -// reset and continue writing by appending to out. -func (b *bitWriter) reset(out []byte) { - b.bitContainer = 0 - b.nBits = 0 - b.out = out -} diff --git a/src/vendor/github.com/klauspost/compress/fse/bytereader.go b/src/vendor/github.com/klauspost/compress/fse/bytereader.go deleted file mode 100644 index abade2d60..000000000 --- a/src/vendor/github.com/klauspost/compress/fse/bytereader.go +++ /dev/null @@ -1,47 +0,0 @@ -// Copyright 2018 Klaus Post. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. -// Based on work Copyright (c) 2013, Yann Collet, released under BSD License. - -package fse - -// byteReader provides a byte reader that reads -// little endian values from a byte stream. -// The input stream is manually advanced. -// The reader performs no bounds checks. -type byteReader struct { - b []byte - off int -} - -// init will initialize the reader and set the input. -func (b *byteReader) init(in []byte) { - b.b = in - b.off = 0 -} - -// advance the stream b n bytes. -func (b *byteReader) advance(n uint) { - b.off += int(n) -} - -// Uint32 returns a little endian uint32 starting at current offset. -func (b byteReader) Uint32() uint32 { - b2 := b.b[b.off:] - b2 = b2[:4] - v3 := uint32(b2[3]) - v2 := uint32(b2[2]) - v1 := uint32(b2[1]) - v0 := uint32(b2[0]) - return v0 | (v1 << 8) | (v2 << 16) | (v3 << 24) -} - -// unread returns the unread portion of the input. -func (b byteReader) unread() []byte { - return b.b[b.off:] -} - -// remain will return the number of bytes remaining. -func (b byteReader) remain() int { - return len(b.b) - b.off -} diff --git a/src/vendor/github.com/klauspost/compress/fse/compress.go b/src/vendor/github.com/klauspost/compress/fse/compress.go deleted file mode 100644 index 6f341914c..000000000 --- a/src/vendor/github.com/klauspost/compress/fse/compress.go +++ /dev/null @@ -1,683 +0,0 @@ -// Copyright 2018 Klaus Post. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. -// Based on work Copyright (c) 2013, Yann Collet, released under BSD License. - -package fse - -import ( - "errors" - "fmt" -) - -// Compress the input bytes. Input must be < 2GB. -// Provide a Scratch buffer to avoid memory allocations. -// Note that the output is also kept in the scratch buffer. -// If input is too hard to compress, ErrIncompressible is returned. -// If input is a single byte value repeated ErrUseRLE is returned. -func Compress(in []byte, s *Scratch) ([]byte, error) { - if len(in) <= 1 { - return nil, ErrIncompressible - } - if len(in) > (2<<30)-1 { - return nil, errors.New("input too big, must be < 2GB") - } - s, err := s.prepare(in) - if err != nil { - return nil, err - } - - // Create histogram, if none was provided. - maxCount := s.maxCount - if maxCount == 0 { - maxCount = s.countSimple(in) - } - // Reset for next run. - s.clearCount = true - s.maxCount = 0 - if maxCount == len(in) { - // One symbol, use RLE - return nil, ErrUseRLE - } - if maxCount == 1 || maxCount < (len(in)>>7) { - // Each symbol present maximum once or too well distributed. - return nil, ErrIncompressible - } - s.optimalTableLog() - err = s.normalizeCount() - if err != nil { - return nil, err - } - err = s.writeCount() - if err != nil { - return nil, err - } - - if false { - err = s.validateNorm() - if err != nil { - return nil, err - } - } - - err = s.buildCTable() - if err != nil { - return nil, err - } - err = s.compress(in) - if err != nil { - return nil, err - } - s.Out = s.bw.out - // Check if we compressed. - if len(s.Out) >= len(in) { - return nil, ErrIncompressible - } - return s.Out, nil -} - -// cState contains the compression state of a stream. -type cState struct { - bw *bitWriter - stateTable []uint16 - state uint16 -} - -// init will initialize the compression state to the first symbol of the stream. -func (c *cState) init(bw *bitWriter, ct *cTable, tableLog uint8, first symbolTransform) { - c.bw = bw - c.stateTable = ct.stateTable - - nbBitsOut := (first.deltaNbBits + (1 << 15)) >> 16 - im := int32((nbBitsOut << 16) - first.deltaNbBits) - lu := (im >> nbBitsOut) + first.deltaFindState - c.state = c.stateTable[lu] -} - -// encode the output symbol provided and write it to the bitstream. -func (c *cState) encode(symbolTT symbolTransform) { - nbBitsOut := (uint32(c.state) + symbolTT.deltaNbBits) >> 16 - dstState := int32(c.state>>(nbBitsOut&15)) + symbolTT.deltaFindState - c.bw.addBits16NC(c.state, uint8(nbBitsOut)) - c.state = c.stateTable[dstState] -} - -// encode the output symbol provided and write it to the bitstream. -func (c *cState) encodeZero(symbolTT symbolTransform) { - nbBitsOut := (uint32(c.state) + symbolTT.deltaNbBits) >> 16 - dstState := int32(c.state>>(nbBitsOut&15)) + symbolTT.deltaFindState - c.bw.addBits16ZeroNC(c.state, uint8(nbBitsOut)) - c.state = c.stateTable[dstState] -} - -// flush will write the tablelog to the output and flush the remaining full bytes. -func (c *cState) flush(tableLog uint8) { - c.bw.flush32() - c.bw.addBits16NC(c.state, tableLog) - c.bw.flush() -} - -// compress is the main compression loop that will encode the input from the last byte to the first. -func (s *Scratch) compress(src []byte) error { - if len(src) <= 2 { - return errors.New("compress: src too small") - } - tt := s.ct.symbolTT[:256] - s.bw.reset(s.Out) - - // Our two states each encodes every second byte. - // Last byte encoded (first byte decoded) will always be encoded by c1. - var c1, c2 cState - - // Encode so remaining size is divisible by 4. - ip := len(src) - if ip&1 == 1 { - c1.init(&s.bw, &s.ct, s.actualTableLog, tt[src[ip-1]]) - c2.init(&s.bw, &s.ct, s.actualTableLog, tt[src[ip-2]]) - c1.encodeZero(tt[src[ip-3]]) - ip -= 3 - } else { - c2.init(&s.bw, &s.ct, s.actualTableLog, tt[src[ip-1]]) - c1.init(&s.bw, &s.ct, s.actualTableLog, tt[src[ip-2]]) - ip -= 2 - } - if ip&2 != 0 { - c2.encodeZero(tt[src[ip-1]]) - c1.encodeZero(tt[src[ip-2]]) - ip -= 2 - } - - // Main compression loop. - switch { - case !s.zeroBits && s.actualTableLog <= 8: - // We can encode 4 symbols without requiring a flush. - // We do not need to check if any output is 0 bits. - for ip >= 4 { - s.bw.flush32() - v3, v2, v1, v0 := src[ip-4], src[ip-3], src[ip-2], src[ip-1] - c2.encode(tt[v0]) - c1.encode(tt[v1]) - c2.encode(tt[v2]) - c1.encode(tt[v3]) - ip -= 4 - } - case !s.zeroBits: - // We do not need to check if any output is 0 bits. - for ip >= 4 { - s.bw.flush32() - v3, v2, v1, v0 := src[ip-4], src[ip-3], src[ip-2], src[ip-1] - c2.encode(tt[v0]) - c1.encode(tt[v1]) - s.bw.flush32() - c2.encode(tt[v2]) - c1.encode(tt[v3]) - ip -= 4 - } - case s.actualTableLog <= 8: - // We can encode 4 symbols without requiring a flush - for ip >= 4 { - s.bw.flush32() - v3, v2, v1, v0 := src[ip-4], src[ip-3], src[ip-2], src[ip-1] - c2.encodeZero(tt[v0]) - c1.encodeZero(tt[v1]) - c2.encodeZero(tt[v2]) - c1.encodeZero(tt[v3]) - ip -= 4 - } - default: - for ip >= 4 { - s.bw.flush32() - v3, v2, v1, v0 := src[ip-4], src[ip-3], src[ip-2], src[ip-1] - c2.encodeZero(tt[v0]) - c1.encodeZero(tt[v1]) - s.bw.flush32() - c2.encodeZero(tt[v2]) - c1.encodeZero(tt[v3]) - ip -= 4 - } - } - - // Flush final state. - // Used to initialize state when decoding. - c2.flush(s.actualTableLog) - c1.flush(s.actualTableLog) - - return s.bw.close() -} - -// writeCount will write the normalized histogram count to header. -// This is read back by readNCount. -func (s *Scratch) writeCount() error { - var ( - tableLog = s.actualTableLog - tableSize = 1 << tableLog - previous0 bool - charnum uint16 - - maxHeaderSize = ((int(s.symbolLen) * int(tableLog)) >> 3) + 3 - - // Write Table Size - bitStream = uint32(tableLog - minTablelog) - bitCount = uint(4) - remaining = int16(tableSize + 1) /* +1 for extra accuracy */ - threshold = int16(tableSize) - nbBits = uint(tableLog + 1) - ) - if cap(s.Out) < maxHeaderSize { - s.Out = make([]byte, 0, s.br.remain()+maxHeaderSize) - } - outP := uint(0) - out := s.Out[:maxHeaderSize] - - // stops at 1 - for remaining > 1 { - if previous0 { - start := charnum - for s.norm[charnum] == 0 { - charnum++ - } - for charnum >= start+24 { - start += 24 - bitStream += uint32(0xFFFF) << bitCount - out[outP] = byte(bitStream) - out[outP+1] = byte(bitStream >> 8) - outP += 2 - bitStream >>= 16 - } - for charnum >= start+3 { - start += 3 - bitStream += 3 << bitCount - bitCount += 2 - } - bitStream += uint32(charnum-start) << bitCount - bitCount += 2 - if bitCount > 16 { - out[outP] = byte(bitStream) - out[outP+1] = byte(bitStream >> 8) - outP += 2 - bitStream >>= 16 - bitCount -= 16 - } - } - - count := s.norm[charnum] - charnum++ - max := (2*threshold - 1) - remaining - if count < 0 { - remaining += count - } else { - remaining -= count - } - count++ // +1 for extra accuracy - if count >= threshold { - count += max // [0..max[ [max..threshold[ (...) [threshold+max 2*threshold[ - } - bitStream += uint32(count) << bitCount - bitCount += nbBits - if count < max { - bitCount-- - } - - previous0 = count == 1 - if remaining < 1 { - return errors.New("internal error: remaining<1") - } - for remaining < threshold { - nbBits-- - threshold >>= 1 - } - - if bitCount > 16 { - out[outP] = byte(bitStream) - out[outP+1] = byte(bitStream >> 8) - outP += 2 - bitStream >>= 16 - bitCount -= 16 - } - } - - out[outP] = byte(bitStream) - out[outP+1] = byte(bitStream >> 8) - outP += (bitCount + 7) / 8 - - if charnum > s.symbolLen { - return errors.New("internal error: charnum > s.symbolLen") - } - s.Out = out[:outP] - return nil -} - -// symbolTransform contains the state transform for a symbol. -type symbolTransform struct { - deltaFindState int32 - deltaNbBits uint32 -} - -// String prints values as a human readable string. -func (s symbolTransform) String() string { - return fmt.Sprintf("dnbits: %08x, fs:%d", s.deltaNbBits, s.deltaFindState) -} - -// cTable contains tables used for compression. -type cTable struct { - tableSymbol []byte - stateTable []uint16 - symbolTT []symbolTransform -} - -// allocCtable will allocate tables needed for compression. -// If existing tables a re big enough, they are simply re-used. -func (s *Scratch) allocCtable() { - tableSize := 1 << s.actualTableLog - // get tableSymbol that is big enough. - if cap(s.ct.tableSymbol) < tableSize { - s.ct.tableSymbol = make([]byte, tableSize) - } - s.ct.tableSymbol = s.ct.tableSymbol[:tableSize] - - ctSize := tableSize - if cap(s.ct.stateTable) < ctSize { - s.ct.stateTable = make([]uint16, ctSize) - } - s.ct.stateTable = s.ct.stateTable[:ctSize] - - if cap(s.ct.symbolTT) < 256 { - s.ct.symbolTT = make([]symbolTransform, 256) - } - s.ct.symbolTT = s.ct.symbolTT[:256] -} - -// buildCTable will populate the compression table so it is ready to be used. -func (s *Scratch) buildCTable() error { - tableSize := uint32(1 << s.actualTableLog) - highThreshold := tableSize - 1 - var cumul [maxSymbolValue + 2]int16 - - s.allocCtable() - tableSymbol := s.ct.tableSymbol[:tableSize] - // symbol start positions - { - cumul[0] = 0 - for ui, v := range s.norm[:s.symbolLen-1] { - u := byte(ui) // one less than reference - if v == -1 { - // Low proba symbol - cumul[u+1] = cumul[u] + 1 - tableSymbol[highThreshold] = u - highThreshold-- - } else { - cumul[u+1] = cumul[u] + v - } - } - // Encode last symbol separately to avoid overflowing u - u := int(s.symbolLen - 1) - v := s.norm[s.symbolLen-1] - if v == -1 { - // Low proba symbol - cumul[u+1] = cumul[u] + 1 - tableSymbol[highThreshold] = byte(u) - highThreshold-- - } else { - cumul[u+1] = cumul[u] + v - } - if uint32(cumul[s.symbolLen]) != tableSize { - return fmt.Errorf("internal error: expected cumul[s.symbolLen] (%d) == tableSize (%d)", cumul[s.symbolLen], tableSize) - } - cumul[s.symbolLen] = int16(tableSize) + 1 - } - // Spread symbols - s.zeroBits = false - { - step := tableStep(tableSize) - tableMask := tableSize - 1 - var position uint32 - // if any symbol > largeLimit, we may have 0 bits output. - largeLimit := int16(1 << (s.actualTableLog - 1)) - for ui, v := range s.norm[:s.symbolLen] { - symbol := byte(ui) - if v > largeLimit { - s.zeroBits = true - } - for nbOccurrences := int16(0); nbOccurrences < v; nbOccurrences++ { - tableSymbol[position] = symbol - position = (position + step) & tableMask - for position > highThreshold { - position = (position + step) & tableMask - } /* Low proba area */ - } - } - - // Check if we have gone through all positions - if position != 0 { - return errors.New("position!=0") - } - } - - // Build table - table := s.ct.stateTable - { - tsi := int(tableSize) - for u, v := range tableSymbol { - // TableU16 : sorted by symbol order; gives next state value - table[cumul[v]] = uint16(tsi + u) - cumul[v]++ - } - } - - // Build Symbol Transformation Table - { - total := int16(0) - symbolTT := s.ct.symbolTT[:s.symbolLen] - tableLog := s.actualTableLog - tl := (uint32(tableLog) << 16) - (1 << tableLog) - for i, v := range s.norm[:s.symbolLen] { - switch v { - case 0: - case -1, 1: - symbolTT[i].deltaNbBits = tl - symbolTT[i].deltaFindState = int32(total - 1) - total++ - default: - maxBitsOut := uint32(tableLog) - highBits(uint32(v-1)) - minStatePlus := uint32(v) << maxBitsOut - symbolTT[i].deltaNbBits = (maxBitsOut << 16) - minStatePlus - symbolTT[i].deltaFindState = int32(total - v) - total += v - } - } - if total != int16(tableSize) { - return fmt.Errorf("total mismatch %d (got) != %d (want)", total, tableSize) - } - } - return nil -} - -// countSimple will create a simple histogram in s.count. -// Returns the biggest count. -// Does not update s.clearCount. -func (s *Scratch) countSimple(in []byte) (max int) { - for _, v := range in { - s.count[v]++ - } - m := uint32(0) - for i, v := range s.count[:] { - if v > m { - m = v - } - if v > 0 { - s.symbolLen = uint16(i) + 1 - } - } - return int(m) -} - -// minTableLog provides the minimum logSize to safely represent a distribution. -func (s *Scratch) minTableLog() uint8 { - minBitsSrc := highBits(uint32(s.br.remain()-1)) + 1 - minBitsSymbols := highBits(uint32(s.symbolLen-1)) + 2 - if minBitsSrc < minBitsSymbols { - return uint8(minBitsSrc) - } - return uint8(minBitsSymbols) -} - -// optimalTableLog calculates and sets the optimal tableLog in s.actualTableLog -func (s *Scratch) optimalTableLog() { - tableLog := s.TableLog - minBits := s.minTableLog() - maxBitsSrc := uint8(highBits(uint32(s.br.remain()-1))) - 2 - if maxBitsSrc < tableLog { - // Accuracy can be reduced - tableLog = maxBitsSrc - } - if minBits > tableLog { - tableLog = minBits - } - // Need a minimum to safely represent all symbol values - if tableLog < minTablelog { - tableLog = minTablelog - } - if tableLog > maxTableLog { - tableLog = maxTableLog - } - s.actualTableLog = tableLog -} - -var rtbTable = [...]uint32{0, 473195, 504333, 520860, 550000, 700000, 750000, 830000} - -// normalizeCount will normalize the count of the symbols so -// the total is equal to the table size. -func (s *Scratch) normalizeCount() error { - var ( - tableLog = s.actualTableLog - scale = 62 - uint64(tableLog) - step = (1 << 62) / uint64(s.br.remain()) - vStep = uint64(1) << (scale - 20) - stillToDistribute = int16(1 << tableLog) - largest int - largestP int16 - lowThreshold = (uint32)(s.br.remain() >> tableLog) - ) - - for i, cnt := range s.count[:s.symbolLen] { - // already handled - // if (count[s] == s.length) return 0; /* rle special case */ - - if cnt == 0 { - s.norm[i] = 0 - continue - } - if cnt <= lowThreshold { - s.norm[i] = -1 - stillToDistribute-- - } else { - proba := (int16)((uint64(cnt) * step) >> scale) - if proba < 8 { - restToBeat := vStep * uint64(rtbTable[proba]) - v := uint64(cnt)*step - (uint64(proba) << scale) - if v > restToBeat { - proba++ - } - } - if proba > largestP { - largestP = proba - largest = i - } - s.norm[i] = proba - stillToDistribute -= proba - } - } - - if -stillToDistribute >= (s.norm[largest] >> 1) { - // corner case, need another normalization method - return s.normalizeCount2() - } - s.norm[largest] += stillToDistribute - return nil -} - -// Secondary normalization method. -// To be used when primary method fails. -func (s *Scratch) normalizeCount2() error { - const notYetAssigned = -2 - var ( - distributed uint32 - total = uint32(s.br.remain()) - tableLog = s.actualTableLog - lowThreshold = total >> tableLog - lowOne = (total * 3) >> (tableLog + 1) - ) - for i, cnt := range s.count[:s.symbolLen] { - if cnt == 0 { - s.norm[i] = 0 - continue - } - if cnt <= lowThreshold { - s.norm[i] = -1 - distributed++ - total -= cnt - continue - } - if cnt <= lowOne { - s.norm[i] = 1 - distributed++ - total -= cnt - continue - } - s.norm[i] = notYetAssigned - } - toDistribute := (1 << tableLog) - distributed - - if (total / toDistribute) > lowOne { - // risk of rounding to zero - lowOne = (total * 3) / (toDistribute * 2) - for i, cnt := range s.count[:s.symbolLen] { - if (s.norm[i] == notYetAssigned) && (cnt <= lowOne) { - s.norm[i] = 1 - distributed++ - total -= cnt - continue - } - } - toDistribute = (1 << tableLog) - distributed - } - if distributed == uint32(s.symbolLen)+1 { - // all values are pretty poor; - // probably incompressible data (should have already been detected); - // find max, then give all remaining points to max - var maxV int - var maxC uint32 - for i, cnt := range s.count[:s.symbolLen] { - if cnt > maxC { - maxV = i - maxC = cnt - } - } - s.norm[maxV] += int16(toDistribute) - return nil - } - - if total == 0 { - // all of the symbols were low enough for the lowOne or lowThreshold - for i := uint32(0); toDistribute > 0; i = (i + 1) % (uint32(s.symbolLen)) { - if s.norm[i] > 0 { - toDistribute-- - s.norm[i]++ - } - } - return nil - } - - var ( - vStepLog = 62 - uint64(tableLog) - mid = uint64((1 << (vStepLog - 1)) - 1) - rStep = (((1 << vStepLog) * uint64(toDistribute)) + mid) / uint64(total) // scale on remaining - tmpTotal = mid - ) - for i, cnt := range s.count[:s.symbolLen] { - if s.norm[i] == notYetAssigned { - var ( - end = tmpTotal + uint64(cnt)*rStep - sStart = uint32(tmpTotal >> vStepLog) - sEnd = uint32(end >> vStepLog) - weight = sEnd - sStart - ) - if weight < 1 { - return errors.New("weight < 1") - } - s.norm[i] = int16(weight) - tmpTotal = end - } - } - return nil -} - -// validateNorm validates the normalized histogram table. -func (s *Scratch) validateNorm() (err error) { - var total int - for _, v := range s.norm[:s.symbolLen] { - if v >= 0 { - total += int(v) - } else { - total -= int(v) - } - } - defer func() { - if err == nil { - return - } - fmt.Printf("selected TableLog: %d, Symbol length: %d\n", s.actualTableLog, s.symbolLen) - for i, v := range s.norm[:s.symbolLen] { - fmt.Printf("%3d: %5d -> %4d \n", i, s.count[i], v) - } - }() - if total != (1 << s.actualTableLog) { - return fmt.Errorf("warning: Total == %d != %d", total, 1< tablelogAbsoluteMax { - return errors.New("tableLog too large") - } - bitStream >>= 4 - bitCount := uint(4) - - s.actualTableLog = uint8(nbBits) - remaining := int32((1 << nbBits) + 1) - threshold := int32(1 << nbBits) - gotTotal := int32(0) - nbBits++ - - for remaining > 1 { - if previous0 { - n0 := charnum - for (bitStream & 0xFFFF) == 0xFFFF { - n0 += 24 - if b.off < iend-5 { - b.advance(2) - bitStream = b.Uint32() >> bitCount - } else { - bitStream >>= 16 - bitCount += 16 - } - } - for (bitStream & 3) == 3 { - n0 += 3 - bitStream >>= 2 - bitCount += 2 - } - n0 += uint16(bitStream & 3) - bitCount += 2 - if n0 > maxSymbolValue { - return errors.New("maxSymbolValue too small") - } - for charnum < n0 { - s.norm[charnum&0xff] = 0 - charnum++ - } - - if b.off <= iend-7 || b.off+int(bitCount>>3) <= iend-4 { - b.advance(bitCount >> 3) - bitCount &= 7 - bitStream = b.Uint32() >> bitCount - } else { - bitStream >>= 2 - } - } - - max := (2*(threshold) - 1) - (remaining) - var count int32 - - if (int32(bitStream) & (threshold - 1)) < max { - count = int32(bitStream) & (threshold - 1) - bitCount += nbBits - 1 - } else { - count = int32(bitStream) & (2*threshold - 1) - if count >= threshold { - count -= max - } - bitCount += nbBits - } - - count-- // extra accuracy - if count < 0 { - // -1 means +1 - remaining += count - gotTotal -= count - } else { - remaining -= count - gotTotal += count - } - s.norm[charnum&0xff] = int16(count) - charnum++ - previous0 = count == 0 - for remaining < threshold { - nbBits-- - threshold >>= 1 - } - if b.off <= iend-7 || b.off+int(bitCount>>3) <= iend-4 { - b.advance(bitCount >> 3) - bitCount &= 7 - } else { - bitCount -= (uint)(8 * (len(b.b) - 4 - b.off)) - b.off = len(b.b) - 4 - } - bitStream = b.Uint32() >> (bitCount & 31) - } - s.symbolLen = charnum - - if s.symbolLen <= 1 { - return fmt.Errorf("symbolLen (%d) too small", s.symbolLen) - } - if s.symbolLen > maxSymbolValue+1 { - return fmt.Errorf("symbolLen (%d) too big", s.symbolLen) - } - if remaining != 1 { - return fmt.Errorf("corruption detected (remaining %d != 1)", remaining) - } - if bitCount > 32 { - return fmt.Errorf("corruption detected (bitCount %d > 32)", bitCount) - } - if gotTotal != 1<> 3) - return nil -} - -// decSymbol contains information about a state entry, -// Including the state offset base, the output symbol and -// the number of bits to read for the low part of the destination state. -type decSymbol struct { - newState uint16 - symbol uint8 - nbBits uint8 -} - -// allocDtable will allocate decoding tables if they are not big enough. -func (s *Scratch) allocDtable() { - tableSize := 1 << s.actualTableLog - if cap(s.decTable) < tableSize { - s.decTable = make([]decSymbol, tableSize) - } - s.decTable = s.decTable[:tableSize] - - if cap(s.ct.tableSymbol) < 256 { - s.ct.tableSymbol = make([]byte, 256) - } - s.ct.tableSymbol = s.ct.tableSymbol[:256] - - if cap(s.ct.stateTable) < 256 { - s.ct.stateTable = make([]uint16, 256) - } - s.ct.stateTable = s.ct.stateTable[:256] -} - -// buildDtable will build the decoding table. -func (s *Scratch) buildDtable() error { - tableSize := uint32(1 << s.actualTableLog) - highThreshold := tableSize - 1 - s.allocDtable() - symbolNext := s.ct.stateTable[:256] - - // Init, lay down lowprob symbols - s.zeroBits = false - { - largeLimit := int16(1 << (s.actualTableLog - 1)) - for i, v := range s.norm[:s.symbolLen] { - if v == -1 { - s.decTable[highThreshold].symbol = uint8(i) - highThreshold-- - symbolNext[i] = 1 - } else { - if v >= largeLimit { - s.zeroBits = true - } - symbolNext[i] = uint16(v) - } - } - } - // Spread symbols - { - tableMask := tableSize - 1 - step := tableStep(tableSize) - position := uint32(0) - for ss, v := range s.norm[:s.symbolLen] { - for i := 0; i < int(v); i++ { - s.decTable[position].symbol = uint8(ss) - position = (position + step) & tableMask - for position > highThreshold { - // lowprob area - position = (position + step) & tableMask - } - } - } - if position != 0 { - // position must reach all cells once, otherwise normalizedCounter is incorrect - return errors.New("corrupted input (position != 0)") - } - } - - // Build Decoding table - { - tableSize := uint16(1 << s.actualTableLog) - for u, v := range s.decTable { - symbol := v.symbol - nextState := symbolNext[symbol] - symbolNext[symbol] = nextState + 1 - nBits := s.actualTableLog - byte(highBits(uint32(nextState))) - s.decTable[u].nbBits = nBits - newState := (nextState << nBits) - tableSize - if newState >= tableSize { - return fmt.Errorf("newState (%d) outside table size (%d)", newState, tableSize) - } - if newState == uint16(u) && nBits == 0 { - // Seems weird that this is possible with nbits > 0. - return fmt.Errorf("newState (%d) == oldState (%d) and no bits", newState, u) - } - s.decTable[u].newState = newState - } - } - return nil -} - -// decompress will decompress the bitstream. -// If the buffer is over-read an error is returned. -func (s *Scratch) decompress() error { - br := &s.bits - br.init(s.br.unread()) - - var s1, s2 decoder - // Initialize and decode first state and symbol. - s1.init(br, s.decTable, s.actualTableLog) - s2.init(br, s.decTable, s.actualTableLog) - - // Use temp table to avoid bound checks/append penalty. - var tmp = s.ct.tableSymbol[:256] - var off uint8 - - // Main part - if !s.zeroBits { - for br.off >= 8 { - br.fillFast() - tmp[off+0] = s1.nextFast() - tmp[off+1] = s2.nextFast() - br.fillFast() - tmp[off+2] = s1.nextFast() - tmp[off+3] = s2.nextFast() - off += 4 - // When off is 0, we have overflowed and should write. - if off == 0 { - s.Out = append(s.Out, tmp...) - if len(s.Out) >= s.DecompressLimit { - return fmt.Errorf("output size (%d) > DecompressLimit (%d)", len(s.Out), s.DecompressLimit) - } - } - } - } else { - for br.off >= 8 { - br.fillFast() - tmp[off+0] = s1.next() - tmp[off+1] = s2.next() - br.fillFast() - tmp[off+2] = s1.next() - tmp[off+3] = s2.next() - off += 4 - if off == 0 { - s.Out = append(s.Out, tmp...) - // When off is 0, we have overflowed and should write. - if len(s.Out) >= s.DecompressLimit { - return fmt.Errorf("output size (%d) > DecompressLimit (%d)", len(s.Out), s.DecompressLimit) - } - } - } - } - s.Out = append(s.Out, tmp[:off]...) - - // Final bits, a bit more expensive check - for { - if s1.finished() { - s.Out = append(s.Out, s1.final(), s2.final()) - break - } - br.fill() - s.Out = append(s.Out, s1.next()) - if s2.finished() { - s.Out = append(s.Out, s2.final(), s1.final()) - break - } - s.Out = append(s.Out, s2.next()) - if len(s.Out) >= s.DecompressLimit { - return fmt.Errorf("output size (%d) > DecompressLimit (%d)", len(s.Out), s.DecompressLimit) - } - } - return br.close() -} - -// decoder keeps track of the current state and updates it from the bitstream. -type decoder struct { - state uint16 - br *bitReader - dt []decSymbol -} - -// init will initialize the decoder and read the first state from the stream. -func (d *decoder) init(in *bitReader, dt []decSymbol, tableLog uint8) { - d.dt = dt - d.br = in - d.state = in.getBits(tableLog) -} - -// next returns the next symbol and sets the next state. -// At least tablelog bits must be available in the bit reader. -func (d *decoder) next() uint8 { - n := &d.dt[d.state] - lowBits := d.br.getBits(n.nbBits) - d.state = n.newState + lowBits - return n.symbol -} - -// finished returns true if all bits have been read from the bitstream -// and the next state would require reading bits from the input. -func (d *decoder) finished() bool { - return d.br.finished() && d.dt[d.state].nbBits > 0 -} - -// final returns the current state symbol without decoding the next. -func (d *decoder) final() uint8 { - return d.dt[d.state].symbol -} - -// nextFast returns the next symbol and sets the next state. -// This can only be used if no symbols are 0 bits. -// At least tablelog bits must be available in the bit reader. -func (d *decoder) nextFast() uint8 { - n := d.dt[d.state] - lowBits := d.br.getBitsFast(n.nbBits) - d.state = n.newState + lowBits - return n.symbol -} diff --git a/src/vendor/github.com/klauspost/compress/fse/fse.go b/src/vendor/github.com/klauspost/compress/fse/fse.go deleted file mode 100644 index 535cbadfd..000000000 --- a/src/vendor/github.com/klauspost/compress/fse/fse.go +++ /dev/null @@ -1,144 +0,0 @@ -// Copyright 2018 Klaus Post. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. -// Based on work Copyright (c) 2013, Yann Collet, released under BSD License. - -// Package fse provides Finite State Entropy encoding and decoding. -// -// Finite State Entropy encoding provides a fast near-optimal symbol encoding/decoding -// for byte blocks as implemented in zstd. -// -// See https://github.com/klauspost/compress/tree/master/fse for more information. -package fse - -import ( - "errors" - "fmt" - "math/bits" -) - -const ( - /*!MEMORY_USAGE : - * Memory usage formula : N->2^N Bytes (examples : 10 -> 1KB; 12 -> 4KB ; 16 -> 64KB; 20 -> 1MB; etc.) - * Increasing memory usage improves compression ratio - * Reduced memory usage can improve speed, due to cache effect - * Recommended max value is 14, for 16KB, which nicely fits into Intel x86 L1 cache */ - maxMemoryUsage = 14 - defaultMemoryUsage = 13 - - maxTableLog = maxMemoryUsage - 2 - maxTablesize = 1 << maxTableLog - defaultTablelog = defaultMemoryUsage - 2 - minTablelog = 5 - maxSymbolValue = 255 -) - -var ( - // ErrIncompressible is returned when input is judged to be too hard to compress. - ErrIncompressible = errors.New("input is not compressible") - - // ErrUseRLE is returned from the compressor when the input is a single byte value repeated. - ErrUseRLE = errors.New("input is single value repeated") -) - -// Scratch provides temporary storage for compression and decompression. -type Scratch struct { - // Private - count [maxSymbolValue + 1]uint32 - norm [maxSymbolValue + 1]int16 - br byteReader - bits bitReader - bw bitWriter - ct cTable // Compression tables. - decTable []decSymbol // Decompression table. - maxCount int // count of the most probable symbol - - // Per block parameters. - // These can be used to override compression parameters of the block. - // Do not touch, unless you know what you are doing. - - // Out is output buffer. - // If the scratch is re-used before the caller is done processing the output, - // set this field to nil. - // Otherwise the output buffer will be re-used for next Compression/Decompression step - // and allocation will be avoided. - Out []byte - - // DecompressLimit limits the maximum decoded size acceptable. - // If > 0 decompression will stop when approximately this many bytes - // has been decoded. - // If 0, maximum size will be 2GB. - DecompressLimit int - - symbolLen uint16 // Length of active part of the symbol table. - actualTableLog uint8 // Selected tablelog. - zeroBits bool // no bits has prob > 50%. - clearCount bool // clear count - - // MaxSymbolValue will override the maximum symbol value of the next block. - MaxSymbolValue uint8 - - // TableLog will attempt to override the tablelog for the next block. - TableLog uint8 -} - -// Histogram allows to populate the histogram and skip that step in the compression, -// It otherwise allows to inspect the histogram when compression is done. -// To indicate that you have populated the histogram call HistogramFinished -// with the value of the highest populated symbol, as well as the number of entries -// in the most populated entry. These are accepted at face value. -// The returned slice will always be length 256. -func (s *Scratch) Histogram() []uint32 { - return s.count[:] -} - -// HistogramFinished can be called to indicate that the histogram has been populated. -// maxSymbol is the index of the highest set symbol of the next data segment. -// maxCount is the number of entries in the most populated entry. -// These are accepted at face value. -func (s *Scratch) HistogramFinished(maxSymbol uint8, maxCount int) { - s.maxCount = maxCount - s.symbolLen = uint16(maxSymbol) + 1 - s.clearCount = maxCount != 0 -} - -// prepare will prepare and allocate scratch tables used for both compression and decompression. -func (s *Scratch) prepare(in []byte) (*Scratch, error) { - if s == nil { - s = &Scratch{} - } - if s.MaxSymbolValue == 0 { - s.MaxSymbolValue = 255 - } - if s.TableLog == 0 { - s.TableLog = defaultTablelog - } - if s.TableLog > maxTableLog { - return nil, fmt.Errorf("tableLog (%d) > maxTableLog (%d)", s.TableLog, maxTableLog) - } - if cap(s.Out) == 0 { - s.Out = make([]byte, 0, len(in)) - } - if s.clearCount && s.maxCount == 0 { - for i := range s.count { - s.count[i] = 0 - } - s.clearCount = false - } - s.br.init(in) - if s.DecompressLimit == 0 { - // Max size 2GB. - s.DecompressLimit = (2 << 30) - 1 - } - - return s, nil -} - -// tableStep returns the next table index. -func tableStep(tableSize uint32) uint32 { - return (tableSize >> 1) + (tableSize >> 3) + 3 -} - -func highBits(val uint32) (n uint32) { - return uint32(bits.Len32(val) - 1) -} diff --git a/src/vendor/github.com/klauspost/compress/gen.sh b/src/vendor/github.com/klauspost/compress/gen.sh deleted file mode 100644 index aff942205..000000000 --- a/src/vendor/github.com/klauspost/compress/gen.sh +++ /dev/null @@ -1,4 +0,0 @@ -#!/bin/sh - -cd s2/cmd/_s2sx/ || exit 1 -go generate . diff --git a/src/vendor/github.com/klauspost/compress/huff0/.gitignore b/src/vendor/github.com/klauspost/compress/huff0/.gitignore deleted file mode 100644 index b3d262958..000000000 --- a/src/vendor/github.com/klauspost/compress/huff0/.gitignore +++ /dev/null @@ -1 +0,0 @@ -/huff0-fuzz.zip diff --git a/src/vendor/github.com/klauspost/compress/huff0/README.md b/src/vendor/github.com/klauspost/compress/huff0/README.md deleted file mode 100644 index 8b6e5c663..000000000 --- a/src/vendor/github.com/klauspost/compress/huff0/README.md +++ /dev/null @@ -1,89 +0,0 @@ -# Huff0 entropy compression - -This package provides Huff0 encoding and decoding as used in zstd. - -[Huff0](https://github.com/Cyan4973/FiniteStateEntropy#new-generation-entropy-coders), -a Huffman codec designed for modern CPU, featuring OoO (Out of Order) operations on multiple ALU -(Arithmetic Logic Unit), achieving extremely fast compression and decompression speeds. - -This can be used for compressing input with a lot of similar input values to the smallest number of bytes. -This does not perform any multi-byte [dictionary coding](https://en.wikipedia.org/wiki/Dictionary_coder) as LZ coders, -but it can be used as a secondary step to compressors (like Snappy) that does not do entropy encoding. - -* [Godoc documentation](https://godoc.org/github.com/klauspost/compress/huff0) - -## News - -This is used as part of the [zstandard](https://github.com/klauspost/compress/tree/master/zstd#zstd) compression and decompression package. - -This ensures that most functionality is well tested. - -# Usage - -This package provides a low level interface that allows to compress single independent blocks. - -Each block is separate, and there is no built in integrity checks. -This means that the caller should keep track of block sizes and also do checksums if needed. - -Compressing a block is done via the [`Compress1X`](https://godoc.org/github.com/klauspost/compress/huff0#Compress1X) and -[`Compress4X`](https://godoc.org/github.com/klauspost/compress/huff0#Compress4X) functions. -You must provide input and will receive the output and maybe an error. - -These error values can be returned: - -| Error | Description | -|---------------------|-----------------------------------------------------------------------------| -| `` | Everything ok, output is returned | -| `ErrIncompressible` | Returned when input is judged to be too hard to compress | -| `ErrUseRLE` | Returned from the compressor when the input is a single byte value repeated | -| `ErrTooBig` | Returned if the input block exceeds the maximum allowed size (128 Kib) | -| `(error)` | An internal error occurred. | - - -As can be seen above some of there are errors that will be returned even under normal operation so it is important to handle these. - -To reduce allocations you can provide a [`Scratch`](https://godoc.org/github.com/klauspost/compress/huff0#Scratch) object -that can be re-used for successive calls. Both compression and decompression accepts a `Scratch` object, and the same -object can be used for both. - -Be aware, that when re-using a `Scratch` object that the *output* buffer is also re-used, so if you are still using this -you must set the `Out` field in the scratch to nil. The same buffer is used for compression and decompression output. - -The `Scratch` object will retain state that allows to re-use previous tables for encoding and decoding. - -## Tables and re-use - -Huff0 allows for reusing tables from the previous block to save space if that is expected to give better/faster results. - -The Scratch object allows you to set a [`ReusePolicy`](https://godoc.org/github.com/klauspost/compress/huff0#ReusePolicy) -that controls this behaviour. See the documentation for details. This can be altered between each block. - -Do however note that this information is *not* stored in the output block and it is up to the users of the package to -record whether [`ReadTable`](https://godoc.org/github.com/klauspost/compress/huff0#ReadTable) should be called, -based on the boolean reported back from the CompressXX call. - -If you want to store the table separate from the data, you can access them as `OutData` and `OutTable` on the -[`Scratch`](https://godoc.org/github.com/klauspost/compress/huff0#Scratch) object. - -## Decompressing - -The first part of decoding is to initialize the decoding table through [`ReadTable`](https://godoc.org/github.com/klauspost/compress/huff0#ReadTable). -This will initialize the decoding tables. -You can supply the complete block to `ReadTable` and it will return the data part of the block -which can be given to the decompressor. - -Decompressing is done by calling the [`Decompress1X`](https://godoc.org/github.com/klauspost/compress/huff0#Scratch.Decompress1X) -or [`Decompress4X`](https://godoc.org/github.com/klauspost/compress/huff0#Scratch.Decompress4X) function. - -For concurrently decompressing content with a fixed table a stateless [`Decoder`](https://godoc.org/github.com/klauspost/compress/huff0#Decoder) can be requested which will remain correct as long as the scratch is unchanged. The capacity of the provided slice indicates the expected output size. - -You must provide the output from the compression stage, at exactly the size you got back. If you receive an error back -your input was likely corrupted. - -It is important to note that a successful decoding does *not* mean your output matches your original input. -There are no integrity checks, so relying on errors from the decompressor does not assure your data is valid. - -# Contributing - -Contributions are always welcome. Be aware that adding public functions will require good justification and breaking -changes will likely not be accepted. If in doubt open an issue before writing the PR. diff --git a/src/vendor/github.com/klauspost/compress/huff0/bitreader.go b/src/vendor/github.com/klauspost/compress/huff0/bitreader.go deleted file mode 100644 index a4979e886..000000000 --- a/src/vendor/github.com/klauspost/compress/huff0/bitreader.go +++ /dev/null @@ -1,329 +0,0 @@ -// Copyright 2018 Klaus Post. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. -// Based on work Copyright (c) 2013, Yann Collet, released under BSD License. - -package huff0 - -import ( - "encoding/binary" - "errors" - "io" -) - -// bitReader reads a bitstream in reverse. -// The last set bit indicates the start of the stream and is used -// for aligning the input. -type bitReader struct { - in []byte - off uint // next byte to read is at in[off - 1] - value uint64 - bitsRead uint8 -} - -// init initializes and resets the bit reader. -func (b *bitReader) init(in []byte) error { - if len(in) < 1 { - return errors.New("corrupt stream: too short") - } - b.in = in - b.off = uint(len(in)) - // The highest bit of the last byte indicates where to start - v := in[len(in)-1] - if v == 0 { - return errors.New("corrupt stream, did not find end of stream") - } - b.bitsRead = 64 - b.value = 0 - if len(in) >= 8 { - b.fillFastStart() - } else { - b.fill() - b.fill() - } - b.bitsRead += 8 - uint8(highBit32(uint32(v))) - return nil -} - -// peekBitsFast requires that at least one bit is requested every time. -// There are no checks if the buffer is filled. -func (b *bitReader) peekBitsFast(n uint8) uint16 { - const regMask = 64 - 1 - v := uint16((b.value << (b.bitsRead & regMask)) >> ((regMask + 1 - n) & regMask)) - return v -} - -// fillFast() will make sure at least 32 bits are available. -// There must be at least 4 bytes available. -func (b *bitReader) fillFast() { - if b.bitsRead < 32 { - return - } - - // 2 bounds checks. - v := b.in[b.off-4 : b.off] - v = v[:4] - low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) - b.value = (b.value << 32) | uint64(low) - b.bitsRead -= 32 - b.off -= 4 -} - -func (b *bitReader) advance(n uint8) { - b.bitsRead += n -} - -// fillFastStart() assumes the bitreader is empty and there is at least 8 bytes to read. -func (b *bitReader) fillFastStart() { - // Do single re-slice to avoid bounds checks. - b.value = binary.LittleEndian.Uint64(b.in[b.off-8:]) - b.bitsRead = 0 - b.off -= 8 -} - -// fill() will make sure at least 32 bits are available. -func (b *bitReader) fill() { - if b.bitsRead < 32 { - return - } - if b.off > 4 { - v := b.in[b.off-4:] - v = v[:4] - low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) - b.value = (b.value << 32) | uint64(low) - b.bitsRead -= 32 - b.off -= 4 - return - } - for b.off > 0 { - b.value = (b.value << 8) | uint64(b.in[b.off-1]) - b.bitsRead -= 8 - b.off-- - } -} - -// finished returns true if all bits have been read from the bit stream. -func (b *bitReader) finished() bool { - return b.off == 0 && b.bitsRead >= 64 -} - -// close the bitstream and returns an error if out-of-buffer reads occurred. -func (b *bitReader) close() error { - // Release reference. - b.in = nil - if b.bitsRead > 64 { - return io.ErrUnexpectedEOF - } - return nil -} - -// bitReader reads a bitstream in reverse. -// The last set bit indicates the start of the stream and is used -// for aligning the input. -type bitReaderBytes struct { - in []byte - off uint // next byte to read is at in[off - 1] - value uint64 - bitsRead uint8 -} - -// init initializes and resets the bit reader. -func (b *bitReaderBytes) init(in []byte) error { - if len(in) < 1 { - return errors.New("corrupt stream: too short") - } - b.in = in - b.off = uint(len(in)) - // The highest bit of the last byte indicates where to start - v := in[len(in)-1] - if v == 0 { - return errors.New("corrupt stream, did not find end of stream") - } - b.bitsRead = 64 - b.value = 0 - if len(in) >= 8 { - b.fillFastStart() - } else { - b.fill() - b.fill() - } - b.advance(8 - uint8(highBit32(uint32(v)))) - return nil -} - -// peekBitsFast requires that at least one bit is requested every time. -// There are no checks if the buffer is filled. -func (b *bitReaderBytes) peekByteFast() uint8 { - got := uint8(b.value >> 56) - return got -} - -func (b *bitReaderBytes) advance(n uint8) { - b.bitsRead += n - b.value <<= n & 63 -} - -// fillFast() will make sure at least 32 bits are available. -// There must be at least 4 bytes available. -func (b *bitReaderBytes) fillFast() { - if b.bitsRead < 32 { - return - } - - // 2 bounds checks. - v := b.in[b.off-4 : b.off] - v = v[:4] - low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) - b.value |= uint64(low) << (b.bitsRead - 32) - b.bitsRead -= 32 - b.off -= 4 -} - -// fillFastStart() assumes the bitReaderBytes is empty and there is at least 8 bytes to read. -func (b *bitReaderBytes) fillFastStart() { - // Do single re-slice to avoid bounds checks. - b.value = binary.LittleEndian.Uint64(b.in[b.off-8:]) - b.bitsRead = 0 - b.off -= 8 -} - -// fill() will make sure at least 32 bits are available. -func (b *bitReaderBytes) fill() { - if b.bitsRead < 32 { - return - } - if b.off > 4 { - v := b.in[b.off-4:] - v = v[:4] - low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) - b.value |= uint64(low) << (b.bitsRead - 32) - b.bitsRead -= 32 - b.off -= 4 - return - } - for b.off > 0 { - b.value |= uint64(b.in[b.off-1]) << (b.bitsRead - 8) - b.bitsRead -= 8 - b.off-- - } -} - -// finished returns true if all bits have been read from the bit stream. -func (b *bitReaderBytes) finished() bool { - return b.off == 0 && b.bitsRead >= 64 -} - -// close the bitstream and returns an error if out-of-buffer reads occurred. -func (b *bitReaderBytes) close() error { - // Release reference. - b.in = nil - if b.bitsRead > 64 { - return io.ErrUnexpectedEOF - } - return nil -} - -// bitReaderShifted reads a bitstream in reverse. -// The last set bit indicates the start of the stream and is used -// for aligning the input. -type bitReaderShifted struct { - in []byte - off uint // next byte to read is at in[off - 1] - value uint64 - bitsRead uint8 -} - -// init initializes and resets the bit reader. -func (b *bitReaderShifted) init(in []byte) error { - if len(in) < 1 { - return errors.New("corrupt stream: too short") - } - b.in = in - b.off = uint(len(in)) - // The highest bit of the last byte indicates where to start - v := in[len(in)-1] - if v == 0 { - return errors.New("corrupt stream, did not find end of stream") - } - b.bitsRead = 64 - b.value = 0 - if len(in) >= 8 { - b.fillFastStart() - } else { - b.fill() - b.fill() - } - b.advance(8 - uint8(highBit32(uint32(v)))) - return nil -} - -// peekBitsFast requires that at least one bit is requested every time. -// There are no checks if the buffer is filled. -func (b *bitReaderShifted) peekBitsFast(n uint8) uint16 { - return uint16(b.value >> ((64 - n) & 63)) -} - -func (b *bitReaderShifted) advance(n uint8) { - b.bitsRead += n - b.value <<= n & 63 -} - -// fillFast() will make sure at least 32 bits are available. -// There must be at least 4 bytes available. -func (b *bitReaderShifted) fillFast() { - if b.bitsRead < 32 { - return - } - - // 2 bounds checks. - v := b.in[b.off-4 : b.off] - v = v[:4] - low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) - b.value |= uint64(low) << ((b.bitsRead - 32) & 63) - b.bitsRead -= 32 - b.off -= 4 -} - -// fillFastStart() assumes the bitReaderShifted is empty and there is at least 8 bytes to read. -func (b *bitReaderShifted) fillFastStart() { - // Do single re-slice to avoid bounds checks. - b.value = binary.LittleEndian.Uint64(b.in[b.off-8:]) - b.bitsRead = 0 - b.off -= 8 -} - -// fill() will make sure at least 32 bits are available. -func (b *bitReaderShifted) fill() { - if b.bitsRead < 32 { - return - } - if b.off > 4 { - v := b.in[b.off-4:] - v = v[:4] - low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) - b.value |= uint64(low) << ((b.bitsRead - 32) & 63) - b.bitsRead -= 32 - b.off -= 4 - return - } - for b.off > 0 { - b.value |= uint64(b.in[b.off-1]) << ((b.bitsRead - 8) & 63) - b.bitsRead -= 8 - b.off-- - } -} - -// finished returns true if all bits have been read from the bit stream. -func (b *bitReaderShifted) finished() bool { - return b.off == 0 && b.bitsRead >= 64 -} - -// close the bitstream and returns an error if out-of-buffer reads occurred. -func (b *bitReaderShifted) close() error { - // Release reference. - b.in = nil - if b.bitsRead > 64 { - return io.ErrUnexpectedEOF - } - return nil -} diff --git a/src/vendor/github.com/klauspost/compress/huff0/bitwriter.go b/src/vendor/github.com/klauspost/compress/huff0/bitwriter.go deleted file mode 100644 index 6bce4e87d..000000000 --- a/src/vendor/github.com/klauspost/compress/huff0/bitwriter.go +++ /dev/null @@ -1,210 +0,0 @@ -// Copyright 2018 Klaus Post. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. -// Based on work Copyright (c) 2013, Yann Collet, released under BSD License. - -package huff0 - -import "fmt" - -// bitWriter will write bits. -// First bit will be LSB of the first byte of output. -type bitWriter struct { - bitContainer uint64 - nBits uint8 - out []byte -} - -// bitMask16 is bitmasks. Has extra to avoid bounds check. -var bitMask16 = [32]uint16{ - 0, 1, 3, 7, 0xF, 0x1F, - 0x3F, 0x7F, 0xFF, 0x1FF, 0x3FF, 0x7FF, - 0xFFF, 0x1FFF, 0x3FFF, 0x7FFF, 0xFFFF, 0xFFFF, - 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, - 0xFFFF, 0xFFFF} /* up to 16 bits */ - -// addBits16NC will add up to 16 bits. -// It will not check if there is space for them, -// so the caller must ensure that it has flushed recently. -func (b *bitWriter) addBits16NC(value uint16, bits uint8) { - b.bitContainer |= uint64(value&bitMask16[bits&31]) << (b.nBits & 63) - b.nBits += bits -} - -// addBits16Clean will add up to 16 bits. value may not contain more set bits than indicated. -// It will not check if there is space for them, so the caller must ensure that it has flushed recently. -func (b *bitWriter) addBits16Clean(value uint16, bits uint8) { - b.bitContainer |= uint64(value) << (b.nBits & 63) - b.nBits += bits -} - -// encSymbol will add up to 16 bits. value may not contain more set bits than indicated. -// It will not check if there is space for them, so the caller must ensure that it has flushed recently. -func (b *bitWriter) encSymbol(ct cTable, symbol byte) { - enc := ct[symbol] - b.bitContainer |= uint64(enc.val) << (b.nBits & 63) - if false { - if enc.nBits == 0 { - panic("nbits 0") - } - } - b.nBits += enc.nBits -} - -// encTwoSymbols will add up to 32 bits. value may not contain more set bits than indicated. -// It will not check if there is space for them, so the caller must ensure that it has flushed recently. -func (b *bitWriter) encTwoSymbols(ct cTable, av, bv byte) { - encA := ct[av] - encB := ct[bv] - sh := b.nBits & 63 - combined := uint64(encA.val) | (uint64(encB.val) << (encA.nBits & 63)) - b.bitContainer |= combined << sh - if false { - if encA.nBits == 0 { - panic("nbitsA 0") - } - if encB.nBits == 0 { - panic("nbitsB 0") - } - } - b.nBits += encA.nBits + encB.nBits -} - -// addBits16ZeroNC will add up to 16 bits. -// It will not check if there is space for them, -// so the caller must ensure that it has flushed recently. -// This is fastest if bits can be zero. -func (b *bitWriter) addBits16ZeroNC(value uint16, bits uint8) { - if bits == 0 { - return - } - value <<= (16 - bits) & 15 - value >>= (16 - bits) & 15 - b.bitContainer |= uint64(value) << (b.nBits & 63) - b.nBits += bits -} - -// flush will flush all pending full bytes. -// There will be at least 56 bits available for writing when this has been called. -// Using flush32 is faster, but leaves less space for writing. -func (b *bitWriter) flush() { - v := b.nBits >> 3 - switch v { - case 0: - return - case 1: - b.out = append(b.out, - byte(b.bitContainer), - ) - b.bitContainer >>= 1 << 3 - case 2: - b.out = append(b.out, - byte(b.bitContainer), - byte(b.bitContainer>>8), - ) - b.bitContainer >>= 2 << 3 - case 3: - b.out = append(b.out, - byte(b.bitContainer), - byte(b.bitContainer>>8), - byte(b.bitContainer>>16), - ) - b.bitContainer >>= 3 << 3 - case 4: - b.out = append(b.out, - byte(b.bitContainer), - byte(b.bitContainer>>8), - byte(b.bitContainer>>16), - byte(b.bitContainer>>24), - ) - b.bitContainer >>= 4 << 3 - case 5: - b.out = append(b.out, - byte(b.bitContainer), - byte(b.bitContainer>>8), - byte(b.bitContainer>>16), - byte(b.bitContainer>>24), - byte(b.bitContainer>>32), - ) - b.bitContainer >>= 5 << 3 - case 6: - b.out = append(b.out, - byte(b.bitContainer), - byte(b.bitContainer>>8), - byte(b.bitContainer>>16), - byte(b.bitContainer>>24), - byte(b.bitContainer>>32), - byte(b.bitContainer>>40), - ) - b.bitContainer >>= 6 << 3 - case 7: - b.out = append(b.out, - byte(b.bitContainer), - byte(b.bitContainer>>8), - byte(b.bitContainer>>16), - byte(b.bitContainer>>24), - byte(b.bitContainer>>32), - byte(b.bitContainer>>40), - byte(b.bitContainer>>48), - ) - b.bitContainer >>= 7 << 3 - case 8: - b.out = append(b.out, - byte(b.bitContainer), - byte(b.bitContainer>>8), - byte(b.bitContainer>>16), - byte(b.bitContainer>>24), - byte(b.bitContainer>>32), - byte(b.bitContainer>>40), - byte(b.bitContainer>>48), - byte(b.bitContainer>>56), - ) - b.bitContainer = 0 - b.nBits = 0 - return - default: - panic(fmt.Errorf("bits (%d) > 64", b.nBits)) - } - b.nBits &= 7 -} - -// flush32 will flush out, so there are at least 32 bits available for writing. -func (b *bitWriter) flush32() { - if b.nBits < 32 { - return - } - b.out = append(b.out, - byte(b.bitContainer), - byte(b.bitContainer>>8), - byte(b.bitContainer>>16), - byte(b.bitContainer>>24)) - b.nBits -= 32 - b.bitContainer >>= 32 -} - -// flushAlign will flush remaining full bytes and align to next byte boundary. -func (b *bitWriter) flushAlign() { - nbBytes := (b.nBits + 7) >> 3 - for i := uint8(0); i < nbBytes; i++ { - b.out = append(b.out, byte(b.bitContainer>>(i*8))) - } - b.nBits = 0 - b.bitContainer = 0 -} - -// close will write the alignment bit and write the final byte(s) -// to the output. -func (b *bitWriter) close() error { - // End mark - b.addBits16Clean(1, 1) - // flush until next byte. - b.flushAlign() - return nil -} - -// reset and continue writing by appending to out. -func (b *bitWriter) reset(out []byte) { - b.bitContainer = 0 - b.nBits = 0 - b.out = out -} diff --git a/src/vendor/github.com/klauspost/compress/huff0/bytereader.go b/src/vendor/github.com/klauspost/compress/huff0/bytereader.go deleted file mode 100644 index 50bcdf6ea..000000000 --- a/src/vendor/github.com/klauspost/compress/huff0/bytereader.go +++ /dev/null @@ -1,54 +0,0 @@ -// Copyright 2018 Klaus Post. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. -// Based on work Copyright (c) 2013, Yann Collet, released under BSD License. - -package huff0 - -// byteReader provides a byte reader that reads -// little endian values from a byte stream. -// The input stream is manually advanced. -// The reader performs no bounds checks. -type byteReader struct { - b []byte - off int -} - -// init will initialize the reader and set the input. -func (b *byteReader) init(in []byte) { - b.b = in - b.off = 0 -} - -// advance the stream b n bytes. -func (b *byteReader) advance(n uint) { - b.off += int(n) -} - -// Int32 returns a little endian int32 starting at current offset. -func (b byteReader) Int32() int32 { - v3 := int32(b.b[b.off+3]) - v2 := int32(b.b[b.off+2]) - v1 := int32(b.b[b.off+1]) - v0 := int32(b.b[b.off]) - return (v3 << 24) | (v2 << 16) | (v1 << 8) | v0 -} - -// Uint32 returns a little endian uint32 starting at current offset. -func (b byteReader) Uint32() uint32 { - v3 := uint32(b.b[b.off+3]) - v2 := uint32(b.b[b.off+2]) - v1 := uint32(b.b[b.off+1]) - v0 := uint32(b.b[b.off]) - return (v3 << 24) | (v2 << 16) | (v1 << 8) | v0 -} - -// unread returns the unread portion of the input. -func (b byteReader) unread() []byte { - return b.b[b.off:] -} - -// remain will return the number of bytes remaining. -func (b byteReader) remain() int { - return len(b.b) - b.off -} diff --git a/src/vendor/github.com/klauspost/compress/huff0/compress.go b/src/vendor/github.com/klauspost/compress/huff0/compress.go deleted file mode 100644 index 8323dc053..000000000 --- a/src/vendor/github.com/klauspost/compress/huff0/compress.go +++ /dev/null @@ -1,720 +0,0 @@ -package huff0 - -import ( - "fmt" - "runtime" - "sync" -) - -// Compress1X will compress the input. -// The output can be decoded using Decompress1X. -// Supply a Scratch object. The scratch object contains state about re-use, -// So when sharing across independent encodes, be sure to set the re-use policy. -func Compress1X(in []byte, s *Scratch) (out []byte, reUsed bool, err error) { - s, err = s.prepare(in) - if err != nil { - return nil, false, err - } - return compress(in, s, s.compress1X) -} - -// Compress4X will compress the input. The input is split into 4 independent blocks -// and compressed similar to Compress1X. -// The output can be decoded using Decompress4X. -// Supply a Scratch object. The scratch object contains state about re-use, -// So when sharing across independent encodes, be sure to set the re-use policy. -func Compress4X(in []byte, s *Scratch) (out []byte, reUsed bool, err error) { - s, err = s.prepare(in) - if err != nil { - return nil, false, err - } - if false { - // TODO: compress4Xp only slightly faster. - const parallelThreshold = 8 << 10 - if len(in) < parallelThreshold || runtime.GOMAXPROCS(0) == 1 { - return compress(in, s, s.compress4X) - } - return compress(in, s, s.compress4Xp) - } - return compress(in, s, s.compress4X) -} - -func compress(in []byte, s *Scratch, compressor func(src []byte) ([]byte, error)) (out []byte, reUsed bool, err error) { - // Nuke previous table if we cannot reuse anyway. - if s.Reuse == ReusePolicyNone { - s.prevTable = s.prevTable[:0] - } - - // Create histogram, if none was provided. - maxCount := s.maxCount - var canReuse = false - if maxCount == 0 { - maxCount, canReuse = s.countSimple(in) - } else { - canReuse = s.canUseTable(s.prevTable) - } - - // We want the output size to be less than this: - wantSize := len(in) - if s.WantLogLess > 0 { - wantSize -= wantSize >> s.WantLogLess - } - - // Reset for next run. - s.clearCount = true - s.maxCount = 0 - if maxCount >= len(in) { - if maxCount > len(in) { - return nil, false, fmt.Errorf("maxCount (%d) > length (%d)", maxCount, len(in)) - } - if len(in) == 1 { - return nil, false, ErrIncompressible - } - // One symbol, use RLE - return nil, false, ErrUseRLE - } - if maxCount == 1 || maxCount < (len(in)>>7) { - // Each symbol present maximum once or too well distributed. - return nil, false, ErrIncompressible - } - if s.Reuse == ReusePolicyMust && !canReuse { - // We must reuse, but we can't. - return nil, false, ErrIncompressible - } - if (s.Reuse == ReusePolicyPrefer || s.Reuse == ReusePolicyMust) && canReuse { - keepTable := s.cTable - keepTL := s.actualTableLog - s.cTable = s.prevTable - s.actualTableLog = s.prevTableLog - s.Out, err = compressor(in) - s.cTable = keepTable - s.actualTableLog = keepTL - if err == nil && len(s.Out) < wantSize { - s.OutData = s.Out - return s.Out, true, nil - } - if s.Reuse == ReusePolicyMust { - return nil, false, ErrIncompressible - } - // Do not attempt to re-use later. - s.prevTable = s.prevTable[:0] - } - - // Calculate new table. - err = s.buildCTable() - if err != nil { - return nil, false, err - } - - if false && !s.canUseTable(s.cTable) { - panic("invalid table generated") - } - - if s.Reuse == ReusePolicyAllow && canReuse { - hSize := len(s.Out) - oldSize := s.prevTable.estimateSize(s.count[:s.symbolLen]) - newSize := s.cTable.estimateSize(s.count[:s.symbolLen]) - if oldSize <= hSize+newSize || hSize+12 >= wantSize { - // Retain cTable even if we re-use. - keepTable := s.cTable - keepTL := s.actualTableLog - - s.cTable = s.prevTable - s.actualTableLog = s.prevTableLog - s.Out, err = compressor(in) - - // Restore ctable. - s.cTable = keepTable - s.actualTableLog = keepTL - if err != nil { - return nil, false, err - } - if len(s.Out) >= wantSize { - return nil, false, ErrIncompressible - } - s.OutData = s.Out - return s.Out, true, nil - } - } - - // Use new table - err = s.cTable.write(s) - if err != nil { - s.OutTable = nil - return nil, false, err - } - s.OutTable = s.Out - - // Compress using new table - s.Out, err = compressor(in) - if err != nil { - s.OutTable = nil - return nil, false, err - } - if len(s.Out) >= wantSize { - s.OutTable = nil - return nil, false, ErrIncompressible - } - // Move current table into previous. - s.prevTable, s.prevTableLog, s.cTable = s.cTable, s.actualTableLog, s.prevTable[:0] - s.OutData = s.Out[len(s.OutTable):] - return s.Out, false, nil -} - -// EstimateSizes will estimate the data sizes -func EstimateSizes(in []byte, s *Scratch) (tableSz, dataSz, reuseSz int, err error) { - s, err = s.prepare(in) - if err != nil { - return 0, 0, 0, err - } - - // Create histogram, if none was provided. - tableSz, dataSz, reuseSz = -1, -1, -1 - maxCount := s.maxCount - var canReuse = false - if maxCount == 0 { - maxCount, canReuse = s.countSimple(in) - } else { - canReuse = s.canUseTable(s.prevTable) - } - - // We want the output size to be less than this: - wantSize := len(in) - if s.WantLogLess > 0 { - wantSize -= wantSize >> s.WantLogLess - } - - // Reset for next run. - s.clearCount = true - s.maxCount = 0 - if maxCount >= len(in) { - if maxCount > len(in) { - return 0, 0, 0, fmt.Errorf("maxCount (%d) > length (%d)", maxCount, len(in)) - } - if len(in) == 1 { - return 0, 0, 0, ErrIncompressible - } - // One symbol, use RLE - return 0, 0, 0, ErrUseRLE - } - if maxCount == 1 || maxCount < (len(in)>>7) { - // Each symbol present maximum once or too well distributed. - return 0, 0, 0, ErrIncompressible - } - - // Calculate new table. - err = s.buildCTable() - if err != nil { - return 0, 0, 0, err - } - - if false && !s.canUseTable(s.cTable) { - panic("invalid table generated") - } - - tableSz, err = s.cTable.estTableSize(s) - if err != nil { - return 0, 0, 0, err - } - if canReuse { - reuseSz = s.prevTable.estimateSize(s.count[:s.symbolLen]) - } - dataSz = s.cTable.estimateSize(s.count[:s.symbolLen]) - - // Restore - return tableSz, dataSz, reuseSz, nil -} - -func (s *Scratch) compress1X(src []byte) ([]byte, error) { - return s.compress1xDo(s.Out, src) -} - -func (s *Scratch) compress1xDo(dst, src []byte) ([]byte, error) { - var bw = bitWriter{out: dst} - - // N is length divisible by 4. - n := len(src) - n -= n & 3 - cTable := s.cTable[:256] - - // Encode last bytes. - for i := len(src) & 3; i > 0; i-- { - bw.encSymbol(cTable, src[n+i-1]) - } - n -= 4 - if s.actualTableLog <= 8 { - for ; n >= 0; n -= 4 { - tmp := src[n : n+4] - // tmp should be len 4 - bw.flush32() - bw.encTwoSymbols(cTable, tmp[3], tmp[2]) - bw.encTwoSymbols(cTable, tmp[1], tmp[0]) - } - } else { - for ; n >= 0; n -= 4 { - tmp := src[n : n+4] - // tmp should be len 4 - bw.flush32() - bw.encTwoSymbols(cTable, tmp[3], tmp[2]) - bw.flush32() - bw.encTwoSymbols(cTable, tmp[1], tmp[0]) - } - } - err := bw.close() - return bw.out, err -} - -var sixZeros [6]byte - -func (s *Scratch) compress4X(src []byte) ([]byte, error) { - if len(src) < 12 { - return nil, ErrIncompressible - } - segmentSize := (len(src) + 3) / 4 - - // Add placeholder for output length - offsetIdx := len(s.Out) - s.Out = append(s.Out, sixZeros[:]...) - - for i := 0; i < 4; i++ { - toDo := src - if len(toDo) > segmentSize { - toDo = toDo[:segmentSize] - } - src = src[len(toDo):] - - var err error - idx := len(s.Out) - s.Out, err = s.compress1xDo(s.Out, toDo) - if err != nil { - return nil, err - } - // Write compressed length as little endian before block. - if i < 3 { - // Last length is not written. - length := len(s.Out) - idx - s.Out[i*2+offsetIdx] = byte(length) - s.Out[i*2+offsetIdx+1] = byte(length >> 8) - } - } - - return s.Out, nil -} - -// compress4Xp will compress 4 streams using separate goroutines. -func (s *Scratch) compress4Xp(src []byte) ([]byte, error) { - if len(src) < 12 { - return nil, ErrIncompressible - } - // Add placeholder for output length - s.Out = s.Out[:6] - - segmentSize := (len(src) + 3) / 4 - var wg sync.WaitGroup - var errs [4]error - wg.Add(4) - for i := 0; i < 4; i++ { - toDo := src - if len(toDo) > segmentSize { - toDo = toDo[:segmentSize] - } - src = src[len(toDo):] - - // Separate goroutine for each block. - go func(i int) { - s.tmpOut[i], errs[i] = s.compress1xDo(s.tmpOut[i][:0], toDo) - wg.Done() - }(i) - } - wg.Wait() - for i := 0; i < 4; i++ { - if errs[i] != nil { - return nil, errs[i] - } - o := s.tmpOut[i] - // Write compressed length as little endian before block. - if i < 3 { - // Last length is not written. - s.Out[i*2] = byte(len(o)) - s.Out[i*2+1] = byte(len(o) >> 8) - } - - // Write output. - s.Out = append(s.Out, o...) - } - return s.Out, nil -} - -// countSimple will create a simple histogram in s.count. -// Returns the biggest count. -// Does not update s.clearCount. -func (s *Scratch) countSimple(in []byte) (max int, reuse bool) { - reuse = true - for _, v := range in { - s.count[v]++ - } - m := uint32(0) - if len(s.prevTable) > 0 { - for i, v := range s.count[:] { - if v > m { - m = v - } - if v > 0 { - s.symbolLen = uint16(i) + 1 - if i >= len(s.prevTable) { - reuse = false - } else { - if s.prevTable[i].nBits == 0 { - reuse = false - } - } - } - } - return int(m), reuse - } - for i, v := range s.count[:] { - if v > m { - m = v - } - if v > 0 { - s.symbolLen = uint16(i) + 1 - } - } - return int(m), false -} - -func (s *Scratch) canUseTable(c cTable) bool { - if len(c) < int(s.symbolLen) { - return false - } - for i, v := range s.count[:s.symbolLen] { - if v != 0 && c[i].nBits == 0 { - return false - } - } - return true -} - -func (s *Scratch) validateTable(c cTable) bool { - if len(c) < int(s.symbolLen) { - return false - } - for i, v := range s.count[:s.symbolLen] { - if v != 0 { - if c[i].nBits == 0 { - return false - } - if c[i].nBits > s.actualTableLog { - return false - } - } - } - return true -} - -// minTableLog provides the minimum logSize to safely represent a distribution. -func (s *Scratch) minTableLog() uint8 { - minBitsSrc := highBit32(uint32(s.br.remain())) + 1 - minBitsSymbols := highBit32(uint32(s.symbolLen-1)) + 2 - if minBitsSrc < minBitsSymbols { - return uint8(minBitsSrc) - } - return uint8(minBitsSymbols) -} - -// optimalTableLog calculates and sets the optimal tableLog in s.actualTableLog -func (s *Scratch) optimalTableLog() { - tableLog := s.TableLog - minBits := s.minTableLog() - maxBitsSrc := uint8(highBit32(uint32(s.br.remain()-1))) - 1 - if maxBitsSrc < tableLog { - // Accuracy can be reduced - tableLog = maxBitsSrc - } - if minBits > tableLog { - tableLog = minBits - } - // Need a minimum to safely represent all symbol values - if tableLog < minTablelog { - tableLog = minTablelog - } - if tableLog > tableLogMax { - tableLog = tableLogMax - } - s.actualTableLog = tableLog -} - -type cTableEntry struct { - val uint16 - nBits uint8 - // We have 8 bits extra -} - -const huffNodesMask = huffNodesLen - 1 - -func (s *Scratch) buildCTable() error { - s.optimalTableLog() - s.huffSort() - if cap(s.cTable) < maxSymbolValue+1 { - s.cTable = make([]cTableEntry, s.symbolLen, maxSymbolValue+1) - } else { - s.cTable = s.cTable[:s.symbolLen] - for i := range s.cTable { - s.cTable[i] = cTableEntry{} - } - } - - var startNode = int16(s.symbolLen) - nonNullRank := s.symbolLen - 1 - - nodeNb := startNode - huffNode := s.nodes[1 : huffNodesLen+1] - - // This overlays the slice above, but allows "-1" index lookups. - // Different from reference implementation. - huffNode0 := s.nodes[0 : huffNodesLen+1] - - for huffNode[nonNullRank].count == 0 { - nonNullRank-- - } - - lowS := int16(nonNullRank) - nodeRoot := nodeNb + lowS - 1 - lowN := nodeNb - huffNode[nodeNb].count = huffNode[lowS].count + huffNode[lowS-1].count - huffNode[lowS].parent, huffNode[lowS-1].parent = uint16(nodeNb), uint16(nodeNb) - nodeNb++ - lowS -= 2 - for n := nodeNb; n <= nodeRoot; n++ { - huffNode[n].count = 1 << 30 - } - // fake entry, strong barrier - huffNode0[0].count = 1 << 31 - - // create parents - for nodeNb <= nodeRoot { - var n1, n2 int16 - if huffNode0[lowS+1].count < huffNode0[lowN+1].count { - n1 = lowS - lowS-- - } else { - n1 = lowN - lowN++ - } - if huffNode0[lowS+1].count < huffNode0[lowN+1].count { - n2 = lowS - lowS-- - } else { - n2 = lowN - lowN++ - } - - huffNode[nodeNb].count = huffNode0[n1+1].count + huffNode0[n2+1].count - huffNode0[n1+1].parent, huffNode0[n2+1].parent = uint16(nodeNb), uint16(nodeNb) - nodeNb++ - } - - // distribute weights (unlimited tree height) - huffNode[nodeRoot].nbBits = 0 - for n := nodeRoot - 1; n >= startNode; n-- { - huffNode[n].nbBits = huffNode[huffNode[n].parent].nbBits + 1 - } - for n := uint16(0); n <= nonNullRank; n++ { - huffNode[n].nbBits = huffNode[huffNode[n].parent].nbBits + 1 - } - s.actualTableLog = s.setMaxHeight(int(nonNullRank)) - maxNbBits := s.actualTableLog - - // fill result into tree (val, nbBits) - if maxNbBits > tableLogMax { - return fmt.Errorf("internal error: maxNbBits (%d) > tableLogMax (%d)", maxNbBits, tableLogMax) - } - var nbPerRank [tableLogMax + 1]uint16 - var valPerRank [16]uint16 - for _, v := range huffNode[:nonNullRank+1] { - nbPerRank[v.nbBits]++ - } - // determine stating value per rank - { - min := uint16(0) - for n := maxNbBits; n > 0; n-- { - // get starting value within each rank - valPerRank[n] = min - min += nbPerRank[n] - min >>= 1 - } - } - - // push nbBits per symbol, symbol order - for _, v := range huffNode[:nonNullRank+1] { - s.cTable[v.symbol].nBits = v.nbBits - } - - // assign value within rank, symbol order - t := s.cTable[:s.symbolLen] - for n, val := range t { - nbits := val.nBits & 15 - v := valPerRank[nbits] - t[n].val = v - valPerRank[nbits] = v + 1 - } - - return nil -} - -// huffSort will sort symbols, decreasing order. -func (s *Scratch) huffSort() { - type rankPos struct { - base uint32 - current uint32 - } - - // Clear nodes - nodes := s.nodes[:huffNodesLen+1] - s.nodes = nodes - nodes = nodes[1 : huffNodesLen+1] - - // Sort into buckets based on length of symbol count. - var rank [32]rankPos - for _, v := range s.count[:s.symbolLen] { - r := highBit32(v+1) & 31 - rank[r].base++ - } - // maxBitLength is log2(BlockSizeMax) + 1 - const maxBitLength = 18 + 1 - for n := maxBitLength; n > 0; n-- { - rank[n-1].base += rank[n].base - } - for n := range rank[:maxBitLength] { - rank[n].current = rank[n].base - } - for n, c := range s.count[:s.symbolLen] { - r := (highBit32(c+1) + 1) & 31 - pos := rank[r].current - rank[r].current++ - prev := nodes[(pos-1)&huffNodesMask] - for pos > rank[r].base && c > prev.count { - nodes[pos&huffNodesMask] = prev - pos-- - prev = nodes[(pos-1)&huffNodesMask] - } - nodes[pos&huffNodesMask] = nodeElt{count: c, symbol: byte(n)} - } -} - -func (s *Scratch) setMaxHeight(lastNonNull int) uint8 { - maxNbBits := s.actualTableLog - huffNode := s.nodes[1 : huffNodesLen+1] - //huffNode = huffNode[: huffNodesLen] - - largestBits := huffNode[lastNonNull].nbBits - - // early exit : no elt > maxNbBits - if largestBits <= maxNbBits { - return largestBits - } - totalCost := int(0) - baseCost := int(1) << (largestBits - maxNbBits) - n := uint32(lastNonNull) - - for huffNode[n].nbBits > maxNbBits { - totalCost += baseCost - (1 << (largestBits - huffNode[n].nbBits)) - huffNode[n].nbBits = maxNbBits - n-- - } - // n stops at huffNode[n].nbBits <= maxNbBits - - for huffNode[n].nbBits == maxNbBits { - n-- - } - // n end at index of smallest symbol using < maxNbBits - - // renorm totalCost - totalCost >>= largestBits - maxNbBits /* note : totalCost is necessarily a multiple of baseCost */ - - // repay normalized cost - { - const noSymbol = 0xF0F0F0F0 - var rankLast [tableLogMax + 2]uint32 - - for i := range rankLast[:] { - rankLast[i] = noSymbol - } - - // Get pos of last (smallest) symbol per rank - { - currentNbBits := maxNbBits - for pos := int(n); pos >= 0; pos-- { - if huffNode[pos].nbBits >= currentNbBits { - continue - } - currentNbBits = huffNode[pos].nbBits // < maxNbBits - rankLast[maxNbBits-currentNbBits] = uint32(pos) - } - } - - for totalCost > 0 { - nBitsToDecrease := uint8(highBit32(uint32(totalCost))) + 1 - - for ; nBitsToDecrease > 1; nBitsToDecrease-- { - highPos := rankLast[nBitsToDecrease] - lowPos := rankLast[nBitsToDecrease-1] - if highPos == noSymbol { - continue - } - if lowPos == noSymbol { - break - } - highTotal := huffNode[highPos].count - lowTotal := 2 * huffNode[lowPos].count - if highTotal <= lowTotal { - break - } - } - // only triggered when no more rank 1 symbol left => find closest one (note : there is necessarily at least one !) - // HUF_MAX_TABLELOG test just to please gcc 5+; but it should not be necessary - // FIXME: try to remove - for (nBitsToDecrease <= tableLogMax) && (rankLast[nBitsToDecrease] == noSymbol) { - nBitsToDecrease++ - } - totalCost -= 1 << (nBitsToDecrease - 1) - if rankLast[nBitsToDecrease-1] == noSymbol { - // this rank is no longer empty - rankLast[nBitsToDecrease-1] = rankLast[nBitsToDecrease] - } - huffNode[rankLast[nBitsToDecrease]].nbBits++ - if rankLast[nBitsToDecrease] == 0 { - /* special case, reached largest symbol */ - rankLast[nBitsToDecrease] = noSymbol - } else { - rankLast[nBitsToDecrease]-- - if huffNode[rankLast[nBitsToDecrease]].nbBits != maxNbBits-nBitsToDecrease { - rankLast[nBitsToDecrease] = noSymbol /* this rank is now empty */ - } - } - } - - for totalCost < 0 { /* Sometimes, cost correction overshoot */ - if rankLast[1] == noSymbol { /* special case : no rank 1 symbol (using maxNbBits-1); let's create one from largest rank 0 (using maxNbBits) */ - for huffNode[n].nbBits == maxNbBits { - n-- - } - huffNode[n+1].nbBits-- - rankLast[1] = n + 1 - totalCost++ - continue - } - huffNode[rankLast[1]+1].nbBits-- - rankLast[1]++ - totalCost++ - } - } - return maxNbBits -} - -type nodeElt struct { - count uint32 - parent uint16 - symbol byte - nbBits uint8 -} diff --git a/src/vendor/github.com/klauspost/compress/huff0/decompress.go b/src/vendor/github.com/klauspost/compress/huff0/decompress.go deleted file mode 100644 index 9b7cc8e97..000000000 --- a/src/vendor/github.com/klauspost/compress/huff0/decompress.go +++ /dev/null @@ -1,1371 +0,0 @@ -package huff0 - -import ( - "errors" - "fmt" - "io" - - "github.com/klauspost/compress/fse" -) - -type dTable struct { - single []dEntrySingle - double []dEntryDouble -} - -// single-symbols decoding -type dEntrySingle struct { - entry uint16 -} - -// double-symbols decoding -type dEntryDouble struct { - seq uint16 - nBits uint8 - len uint8 -} - -// Uses special code for all tables that are < 8 bits. -const use8BitTables = true - -// ReadTable will read a table from the input. -// The size of the input may be larger than the table definition. -// Any content remaining after the table definition will be returned. -// If no Scratch is provided a new one is allocated. -// The returned Scratch can be used for encoding or decoding input using this table. -func ReadTable(in []byte, s *Scratch) (s2 *Scratch, remain []byte, err error) { - s, err = s.prepare(in) - if err != nil { - return s, nil, err - } - if len(in) <= 1 { - return s, nil, errors.New("input too small for table") - } - iSize := in[0] - in = in[1:] - if iSize >= 128 { - // Uncompressed - oSize := iSize - 127 - iSize = (oSize + 1) / 2 - if int(iSize) > len(in) { - return s, nil, errors.New("input too small for table") - } - for n := uint8(0); n < oSize; n += 2 { - v := in[n/2] - s.huffWeight[n] = v >> 4 - s.huffWeight[n+1] = v & 15 - } - s.symbolLen = uint16(oSize) - in = in[iSize:] - } else { - if len(in) < int(iSize) { - return s, nil, fmt.Errorf("input too small for table, want %d bytes, have %d", iSize, len(in)) - } - // FSE compressed weights - s.fse.DecompressLimit = 255 - hw := s.huffWeight[:] - s.fse.Out = hw - b, err := fse.Decompress(in[:iSize], s.fse) - s.fse.Out = nil - if err != nil { - return s, nil, err - } - if len(b) > 255 { - return s, nil, errors.New("corrupt input: output table too large") - } - s.symbolLen = uint16(len(b)) - in = in[iSize:] - } - - // collect weight stats - var rankStats [16]uint32 - weightTotal := uint32(0) - for _, v := range s.huffWeight[:s.symbolLen] { - if v > tableLogMax { - return s, nil, errors.New("corrupt input: weight too large") - } - v2 := v & 15 - rankStats[v2]++ - // (1 << (v2-1)) is slower since the compiler cannot prove that v2 isn't 0. - weightTotal += (1 << v2) >> 1 - } - if weightTotal == 0 { - return s, nil, errors.New("corrupt input: weights zero") - } - - // get last non-null symbol weight (implied, total must be 2^n) - { - tableLog := highBit32(weightTotal) + 1 - if tableLog > tableLogMax { - return s, nil, errors.New("corrupt input: tableLog too big") - } - s.actualTableLog = uint8(tableLog) - // determine last weight - { - total := uint32(1) << tableLog - rest := total - weightTotal - verif := uint32(1) << highBit32(rest) - lastWeight := highBit32(rest) + 1 - if verif != rest { - // last value must be a clean power of 2 - return s, nil, errors.New("corrupt input: last value not power of two") - } - s.huffWeight[s.symbolLen] = uint8(lastWeight) - s.symbolLen++ - rankStats[lastWeight]++ - } - } - - if (rankStats[1] < 2) || (rankStats[1]&1 != 0) { - // by construction : at least 2 elts of rank 1, must be even - return s, nil, errors.New("corrupt input: min elt size, even check failed ") - } - - // TODO: Choose between single/double symbol decoding - - // Calculate starting value for each rank - { - var nextRankStart uint32 - for n := uint8(1); n < s.actualTableLog+1; n++ { - current := nextRankStart - nextRankStart += rankStats[n] << (n - 1) - rankStats[n] = current - } - } - - // fill DTable (always full size) - tSize := 1 << tableLogMax - if len(s.dt.single) != tSize { - s.dt.single = make([]dEntrySingle, tSize) - } - cTable := s.prevTable - if cap(cTable) < maxSymbolValue+1 { - cTable = make([]cTableEntry, 0, maxSymbolValue+1) - } - cTable = cTable[:maxSymbolValue+1] - s.prevTable = cTable[:s.symbolLen] - s.prevTableLog = s.actualTableLog - - for n, w := range s.huffWeight[:s.symbolLen] { - if w == 0 { - cTable[n] = cTableEntry{ - val: 0, - nBits: 0, - } - continue - } - length := (uint32(1) << w) >> 1 - d := dEntrySingle{ - entry: uint16(s.actualTableLog+1-w) | (uint16(n) << 8), - } - - rank := &rankStats[w] - cTable[n] = cTableEntry{ - val: uint16(*rank >> (w - 1)), - nBits: uint8(d.entry), - } - - single := s.dt.single[*rank : *rank+length] - for i := range single { - single[i] = d - } - *rank += length - } - - return s, in, nil -} - -// Decompress1X will decompress a 1X encoded stream. -// The length of the supplied input must match the end of a block exactly. -// Before this is called, the table must be initialized with ReadTable unless -// the encoder re-used the table. -// deprecated: Use the stateless Decoder() to get a concurrent version. -func (s *Scratch) Decompress1X(in []byte) (out []byte, err error) { - if cap(s.Out) < s.MaxDecodedSize { - s.Out = make([]byte, s.MaxDecodedSize) - } - s.Out = s.Out[:0:s.MaxDecodedSize] - s.Out, err = s.Decoder().Decompress1X(s.Out, in) - return s.Out, err -} - -// Decompress4X will decompress a 4X encoded stream. -// Before this is called, the table must be initialized with ReadTable unless -// the encoder re-used the table. -// The length of the supplied input must match the end of a block exactly. -// The destination size of the uncompressed data must be known and provided. -// deprecated: Use the stateless Decoder() to get a concurrent version. -func (s *Scratch) Decompress4X(in []byte, dstSize int) (out []byte, err error) { - if dstSize > s.MaxDecodedSize { - return nil, ErrMaxDecodedSizeExceeded - } - if cap(s.Out) < dstSize { - s.Out = make([]byte, s.MaxDecodedSize) - } - s.Out = s.Out[:0:dstSize] - s.Out, err = s.Decoder().Decompress4X(s.Out, in) - return s.Out, err -} - -// Decoder will return a stateless decoder that can be used by multiple -// decompressors concurrently. -// Before this is called, the table must be initialized with ReadTable. -// The Decoder is still linked to the scratch buffer so that cannot be reused. -// However, it is safe to discard the scratch. -func (s *Scratch) Decoder() *Decoder { - return &Decoder{ - dt: s.dt, - actualTableLog: s.actualTableLog, - } -} - -// Decoder provides stateless decoding. -type Decoder struct { - dt dTable - actualTableLog uint8 -} - -// Decompress1X will decompress a 1X encoded stream. -// The cap of the output buffer will be the maximum decompressed size. -// The length of the supplied input must match the end of a block exactly. -func (d *Decoder) Decompress1X(dst, src []byte) ([]byte, error) { - if len(d.dt.single) == 0 { - return nil, errors.New("no table loaded") - } - if use8BitTables && d.actualTableLog <= 8 { - return d.decompress1X8Bit(dst, src) - } - var br bitReaderShifted - err := br.init(src) - if err != nil { - return dst, err - } - maxDecodedSize := cap(dst) - dst = dst[:0] - - // Avoid bounds check by always having full sized table. - const tlSize = 1 << tableLogMax - const tlMask = tlSize - 1 - dt := d.dt.single[:tlSize] - - // Use temp table to avoid bound checks/append penalty. - var buf [256]byte - var off uint8 - - for br.off >= 8 { - br.fillFast() - v := dt[br.peekBitsFast(d.actualTableLog)&tlMask] - br.advance(uint8(v.entry)) - buf[off+0] = uint8(v.entry >> 8) - - v = dt[br.peekBitsFast(d.actualTableLog)&tlMask] - br.advance(uint8(v.entry)) - buf[off+1] = uint8(v.entry >> 8) - - // Refill - br.fillFast() - - v = dt[br.peekBitsFast(d.actualTableLog)&tlMask] - br.advance(uint8(v.entry)) - buf[off+2] = uint8(v.entry >> 8) - - v = dt[br.peekBitsFast(d.actualTableLog)&tlMask] - br.advance(uint8(v.entry)) - buf[off+3] = uint8(v.entry >> 8) - - off += 4 - if off == 0 { - if len(dst)+256 > maxDecodedSize { - br.close() - return nil, ErrMaxDecodedSizeExceeded - } - dst = append(dst, buf[:]...) - } - } - - if len(dst)+int(off) > maxDecodedSize { - br.close() - return nil, ErrMaxDecodedSizeExceeded - } - dst = append(dst, buf[:off]...) - - // br < 8, so uint8 is fine - bitsLeft := uint8(br.off)*8 + 64 - br.bitsRead - for bitsLeft > 0 { - br.fill() - if false && br.bitsRead >= 32 { - if br.off >= 4 { - v := br.in[br.off-4:] - v = v[:4] - low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) - br.value = (br.value << 32) | uint64(low) - br.bitsRead -= 32 - br.off -= 4 - } else { - for br.off > 0 { - br.value = (br.value << 8) | uint64(br.in[br.off-1]) - br.bitsRead -= 8 - br.off-- - } - } - } - if len(dst) >= maxDecodedSize { - br.close() - return nil, ErrMaxDecodedSizeExceeded - } - v := d.dt.single[br.peekBitsFast(d.actualTableLog)&tlMask] - nBits := uint8(v.entry) - br.advance(nBits) - bitsLeft -= nBits - dst = append(dst, uint8(v.entry>>8)) - } - return dst, br.close() -} - -// decompress1X8Bit will decompress a 1X encoded stream with tablelog <= 8. -// The cap of the output buffer will be the maximum decompressed size. -// The length of the supplied input must match the end of a block exactly. -func (d *Decoder) decompress1X8Bit(dst, src []byte) ([]byte, error) { - if d.actualTableLog == 8 { - return d.decompress1X8BitExactly(dst, src) - } - var br bitReaderBytes - err := br.init(src) - if err != nil { - return dst, err - } - maxDecodedSize := cap(dst) - dst = dst[:0] - - // Avoid bounds check by always having full sized table. - dt := d.dt.single[:256] - - // Use temp table to avoid bound checks/append penalty. - var buf [256]byte - var off uint8 - - switch d.actualTableLog { - case 8: - const shift = 8 - 8 - for br.off >= 4 { - br.fillFast() - v := dt[uint8(br.value>>(56+shift))] - br.advance(uint8(v.entry)) - buf[off+0] = uint8(v.entry >> 8) - - v = dt[uint8(br.value>>(56+shift))] - br.advance(uint8(v.entry)) - buf[off+1] = uint8(v.entry >> 8) - - v = dt[uint8(br.value>>(56+shift))] - br.advance(uint8(v.entry)) - buf[off+2] = uint8(v.entry >> 8) - - v = dt[uint8(br.value>>(56+shift))] - br.advance(uint8(v.entry)) - buf[off+3] = uint8(v.entry >> 8) - - off += 4 - if off == 0 { - if len(dst)+256 > maxDecodedSize { - br.close() - return nil, ErrMaxDecodedSizeExceeded - } - dst = append(dst, buf[:]...) - } - } - case 7: - const shift = 8 - 7 - for br.off >= 4 { - br.fillFast() - v := dt[uint8(br.value>>(56+shift))] - br.advance(uint8(v.entry)) - buf[off+0] = uint8(v.entry >> 8) - - v = dt[uint8(br.value>>(56+shift))] - br.advance(uint8(v.entry)) - buf[off+1] = uint8(v.entry >> 8) - - v = dt[uint8(br.value>>(56+shift))] - br.advance(uint8(v.entry)) - buf[off+2] = uint8(v.entry >> 8) - - v = dt[uint8(br.value>>(56+shift))] - br.advance(uint8(v.entry)) - buf[off+3] = uint8(v.entry >> 8) - - off += 4 - if off == 0 { - if len(dst)+256 > maxDecodedSize { - br.close() - return nil, ErrMaxDecodedSizeExceeded - } - dst = append(dst, buf[:]...) - } - } - case 6: - const shift = 8 - 6 - for br.off >= 4 { - br.fillFast() - v := dt[uint8(br.value>>(56+shift))] - br.advance(uint8(v.entry)) - buf[off+0] = uint8(v.entry >> 8) - - v = dt[uint8(br.value>>(56+shift))] - br.advance(uint8(v.entry)) - buf[off+1] = uint8(v.entry >> 8) - - v = dt[uint8(br.value>>(56+shift))] - br.advance(uint8(v.entry)) - buf[off+2] = uint8(v.entry >> 8) - - v = dt[uint8(br.value>>(56+shift))] - br.advance(uint8(v.entry)) - buf[off+3] = uint8(v.entry >> 8) - - off += 4 - if off == 0 { - if len(dst)+256 > maxDecodedSize { - br.close() - return nil, ErrMaxDecodedSizeExceeded - } - dst = append(dst, buf[:]...) - } - } - case 5: - const shift = 8 - 5 - for br.off >= 4 { - br.fillFast() - v := dt[uint8(br.value>>(56+shift))] - br.advance(uint8(v.entry)) - buf[off+0] = uint8(v.entry >> 8) - - v = dt[uint8(br.value>>(56+shift))] - br.advance(uint8(v.entry)) - buf[off+1] = uint8(v.entry >> 8) - - v = dt[uint8(br.value>>(56+shift))] - br.advance(uint8(v.entry)) - buf[off+2] = uint8(v.entry >> 8) - - v = dt[uint8(br.value>>(56+shift))] - br.advance(uint8(v.entry)) - buf[off+3] = uint8(v.entry >> 8) - - off += 4 - if off == 0 { - if len(dst)+256 > maxDecodedSize { - br.close() - return nil, ErrMaxDecodedSizeExceeded - } - dst = append(dst, buf[:]...) - } - } - case 4: - const shift = 8 - 4 - for br.off >= 4 { - br.fillFast() - v := dt[uint8(br.value>>(56+shift))] - br.advance(uint8(v.entry)) - buf[off+0] = uint8(v.entry >> 8) - - v = dt[uint8(br.value>>(56+shift))] - br.advance(uint8(v.entry)) - buf[off+1] = uint8(v.entry >> 8) - - v = dt[uint8(br.value>>(56+shift))] - br.advance(uint8(v.entry)) - buf[off+2] = uint8(v.entry >> 8) - - v = dt[uint8(br.value>>(56+shift))] - br.advance(uint8(v.entry)) - buf[off+3] = uint8(v.entry >> 8) - - off += 4 - if off == 0 { - if len(dst)+256 > maxDecodedSize { - br.close() - return nil, ErrMaxDecodedSizeExceeded - } - dst = append(dst, buf[:]...) - } - } - case 3: - const shift = 8 - 3 - for br.off >= 4 { - br.fillFast() - v := dt[uint8(br.value>>(56+shift))] - br.advance(uint8(v.entry)) - buf[off+0] = uint8(v.entry >> 8) - - v = dt[uint8(br.value>>(56+shift))] - br.advance(uint8(v.entry)) - buf[off+1] = uint8(v.entry >> 8) - - v = dt[uint8(br.value>>(56+shift))] - br.advance(uint8(v.entry)) - buf[off+2] = uint8(v.entry >> 8) - - v = dt[uint8(br.value>>(56+shift))] - br.advance(uint8(v.entry)) - buf[off+3] = uint8(v.entry >> 8) - - off += 4 - if off == 0 { - if len(dst)+256 > maxDecodedSize { - br.close() - return nil, ErrMaxDecodedSizeExceeded - } - dst = append(dst, buf[:]...) - } - } - case 2: - const shift = 8 - 2 - for br.off >= 4 { - br.fillFast() - v := dt[uint8(br.value>>(56+shift))] - br.advance(uint8(v.entry)) - buf[off+0] = uint8(v.entry >> 8) - - v = dt[uint8(br.value>>(56+shift))] - br.advance(uint8(v.entry)) - buf[off+1] = uint8(v.entry >> 8) - - v = dt[uint8(br.value>>(56+shift))] - br.advance(uint8(v.entry)) - buf[off+2] = uint8(v.entry >> 8) - - v = dt[uint8(br.value>>(56+shift))] - br.advance(uint8(v.entry)) - buf[off+3] = uint8(v.entry >> 8) - - off += 4 - if off == 0 { - if len(dst)+256 > maxDecodedSize { - br.close() - return nil, ErrMaxDecodedSizeExceeded - } - dst = append(dst, buf[:]...) - } - } - case 1: - const shift = 8 - 1 - for br.off >= 4 { - br.fillFast() - v := dt[uint8(br.value>>(56+shift))] - br.advance(uint8(v.entry)) - buf[off+0] = uint8(v.entry >> 8) - - v = dt[uint8(br.value>>(56+shift))] - br.advance(uint8(v.entry)) - buf[off+1] = uint8(v.entry >> 8) - - v = dt[uint8(br.value>>(56+shift))] - br.advance(uint8(v.entry)) - buf[off+2] = uint8(v.entry >> 8) - - v = dt[uint8(br.value>>(56+shift))] - br.advance(uint8(v.entry)) - buf[off+3] = uint8(v.entry >> 8) - - off += 4 - if off == 0 { - if len(dst)+256 > maxDecodedSize { - br.close() - return nil, ErrMaxDecodedSizeExceeded - } - dst = append(dst, buf[:]...) - } - } - default: - return nil, fmt.Errorf("invalid tablelog: %d", d.actualTableLog) - } - - if len(dst)+int(off) > maxDecodedSize { - br.close() - return nil, ErrMaxDecodedSizeExceeded - } - dst = append(dst, buf[:off]...) - - // br < 4, so uint8 is fine - bitsLeft := int8(uint8(br.off)*8 + (64 - br.bitsRead)) - shift := (8 - d.actualTableLog) & 7 - - for bitsLeft > 0 { - if br.bitsRead >= 64-8 { - for br.off > 0 { - br.value |= uint64(br.in[br.off-1]) << (br.bitsRead - 8) - br.bitsRead -= 8 - br.off-- - } - } - if len(dst) >= maxDecodedSize { - br.close() - return nil, ErrMaxDecodedSizeExceeded - } - v := dt[br.peekByteFast()>>shift] - nBits := uint8(v.entry) - br.advance(nBits) - bitsLeft -= int8(nBits) - dst = append(dst, uint8(v.entry>>8)) - } - return dst, br.close() -} - -// decompress1X8Bit will decompress a 1X encoded stream with tablelog <= 8. -// The cap of the output buffer will be the maximum decompressed size. -// The length of the supplied input must match the end of a block exactly. -func (d *Decoder) decompress1X8BitExactly(dst, src []byte) ([]byte, error) { - var br bitReaderBytes - err := br.init(src) - if err != nil { - return dst, err - } - maxDecodedSize := cap(dst) - dst = dst[:0] - - // Avoid bounds check by always having full sized table. - dt := d.dt.single[:256] - - // Use temp table to avoid bound checks/append penalty. - var buf [256]byte - var off uint8 - - const shift = 56 - - //fmt.Printf("mask: %b, tl:%d\n", mask, d.actualTableLog) - for br.off >= 4 { - br.fillFast() - v := dt[uint8(br.value>>shift)] - br.advance(uint8(v.entry)) - buf[off+0] = uint8(v.entry >> 8) - - v = dt[uint8(br.value>>shift)] - br.advance(uint8(v.entry)) - buf[off+1] = uint8(v.entry >> 8) - - v = dt[uint8(br.value>>shift)] - br.advance(uint8(v.entry)) - buf[off+2] = uint8(v.entry >> 8) - - v = dt[uint8(br.value>>shift)] - br.advance(uint8(v.entry)) - buf[off+3] = uint8(v.entry >> 8) - - off += 4 - if off == 0 { - if len(dst)+256 > maxDecodedSize { - br.close() - return nil, ErrMaxDecodedSizeExceeded - } - dst = append(dst, buf[:]...) - } - } - - if len(dst)+int(off) > maxDecodedSize { - br.close() - return nil, ErrMaxDecodedSizeExceeded - } - dst = append(dst, buf[:off]...) - - // br < 4, so uint8 is fine - bitsLeft := int8(uint8(br.off)*8 + (64 - br.bitsRead)) - for bitsLeft > 0 { - if br.bitsRead >= 64-8 { - for br.off > 0 { - br.value |= uint64(br.in[br.off-1]) << (br.bitsRead - 8) - br.bitsRead -= 8 - br.off-- - } - } - if len(dst) >= maxDecodedSize { - br.close() - return nil, ErrMaxDecodedSizeExceeded - } - v := dt[br.peekByteFast()] - nBits := uint8(v.entry) - br.advance(nBits) - bitsLeft -= int8(nBits) - dst = append(dst, uint8(v.entry>>8)) - } - return dst, br.close() -} - -// Decompress4X will decompress a 4X encoded stream. -// The length of the supplied input must match the end of a block exactly. -// The *capacity* of the dst slice must match the destination size of -// the uncompressed data exactly. -func (d *Decoder) Decompress4X(dst, src []byte) ([]byte, error) { - if len(d.dt.single) == 0 { - return nil, errors.New("no table loaded") - } - if len(src) < 6+(4*1) { - return nil, errors.New("input too small") - } - if use8BitTables && d.actualTableLog <= 8 { - return d.decompress4X8bit(dst, src) - } - - var br [4]bitReaderShifted - start := 6 - for i := 0; i < 3; i++ { - length := int(src[i*2]) | (int(src[i*2+1]) << 8) - if start+length >= len(src) { - return nil, errors.New("truncated input (or invalid offset)") - } - err := br[i].init(src[start : start+length]) - if err != nil { - return nil, err - } - start += length - } - err := br[3].init(src[start:]) - if err != nil { - return nil, err - } - - // destination, offset to match first output - dstSize := cap(dst) - dst = dst[:dstSize] - out := dst - dstEvery := (dstSize + 3) / 4 - - const tlSize = 1 << tableLogMax - const tlMask = tlSize - 1 - single := d.dt.single[:tlSize] - - // Use temp table to avoid bound checks/append penalty. - var buf [256]byte - var off uint8 - var decoded int - - // Decode 2 values from each decoder/loop. - const bufoff = 256 / 4 - for { - if br[0].off < 4 || br[1].off < 4 || br[2].off < 4 || br[3].off < 4 { - break - } - - { - const stream = 0 - const stream2 = 1 - br[stream].fillFast() - br[stream2].fillFast() - - val := br[stream].peekBitsFast(d.actualTableLog) - v := single[val&tlMask] - br[stream].advance(uint8(v.entry)) - buf[off+bufoff*stream] = uint8(v.entry >> 8) - - val2 := br[stream2].peekBitsFast(d.actualTableLog) - v2 := single[val2&tlMask] - br[stream2].advance(uint8(v2.entry)) - buf[off+bufoff*stream2] = uint8(v2.entry >> 8) - - val = br[stream].peekBitsFast(d.actualTableLog) - v = single[val&tlMask] - br[stream].advance(uint8(v.entry)) - buf[off+bufoff*stream+1] = uint8(v.entry >> 8) - - val2 = br[stream2].peekBitsFast(d.actualTableLog) - v2 = single[val2&tlMask] - br[stream2].advance(uint8(v2.entry)) - buf[off+bufoff*stream2+1] = uint8(v2.entry >> 8) - } - - { - const stream = 2 - const stream2 = 3 - br[stream].fillFast() - br[stream2].fillFast() - - val := br[stream].peekBitsFast(d.actualTableLog) - v := single[val&tlMask] - br[stream].advance(uint8(v.entry)) - buf[off+bufoff*stream] = uint8(v.entry >> 8) - - val2 := br[stream2].peekBitsFast(d.actualTableLog) - v2 := single[val2&tlMask] - br[stream2].advance(uint8(v2.entry)) - buf[off+bufoff*stream2] = uint8(v2.entry >> 8) - - val = br[stream].peekBitsFast(d.actualTableLog) - v = single[val&tlMask] - br[stream].advance(uint8(v.entry)) - buf[off+bufoff*stream+1] = uint8(v.entry >> 8) - - val2 = br[stream2].peekBitsFast(d.actualTableLog) - v2 = single[val2&tlMask] - br[stream2].advance(uint8(v2.entry)) - buf[off+bufoff*stream2+1] = uint8(v2.entry >> 8) - } - - off += 2 - - if off == bufoff { - if bufoff > dstEvery { - return nil, errors.New("corruption detected: stream overrun 1") - } - copy(out, buf[:bufoff]) - copy(out[dstEvery:], buf[bufoff:bufoff*2]) - copy(out[dstEvery*2:], buf[bufoff*2:bufoff*3]) - copy(out[dstEvery*3:], buf[bufoff*3:bufoff*4]) - off = 0 - out = out[bufoff:] - decoded += 256 - // There must at least be 3 buffers left. - if len(out) < dstEvery*3 { - return nil, errors.New("corruption detected: stream overrun 2") - } - } - } - if off > 0 { - ioff := int(off) - if len(out) < dstEvery*3+ioff { - return nil, errors.New("corruption detected: stream overrun 3") - } - copy(out, buf[:off]) - copy(out[dstEvery:dstEvery+ioff], buf[bufoff:bufoff*2]) - copy(out[dstEvery*2:dstEvery*2+ioff], buf[bufoff*2:bufoff*3]) - copy(out[dstEvery*3:dstEvery*3+ioff], buf[bufoff*3:bufoff*4]) - decoded += int(off) * 4 - out = out[off:] - } - - // Decode remaining. - for i := range br { - offset := dstEvery * i - br := &br[i] - bitsLeft := br.off*8 + uint(64-br.bitsRead) - for bitsLeft > 0 { - br.fill() - if false && br.bitsRead >= 32 { - if br.off >= 4 { - v := br.in[br.off-4:] - v = v[:4] - low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) - br.value = (br.value << 32) | uint64(low) - br.bitsRead -= 32 - br.off -= 4 - } else { - for br.off > 0 { - br.value = (br.value << 8) | uint64(br.in[br.off-1]) - br.bitsRead -= 8 - br.off-- - } - } - } - // end inline... - if offset >= len(out) { - return nil, errors.New("corruption detected: stream overrun 4") - } - - // Read value and increment offset. - val := br.peekBitsFast(d.actualTableLog) - v := single[val&tlMask].entry - nBits := uint8(v) - br.advance(nBits) - bitsLeft -= uint(nBits) - out[offset] = uint8(v >> 8) - offset++ - } - decoded += offset - dstEvery*i - err = br.close() - if err != nil { - return nil, err - } - } - if dstSize != decoded { - return nil, errors.New("corruption detected: short output block") - } - return dst, nil -} - -// Decompress4X will decompress a 4X encoded stream. -// The length of the supplied input must match the end of a block exactly. -// The *capacity* of the dst slice must match the destination size of -// the uncompressed data exactly. -func (d *Decoder) decompress4X8bit(dst, src []byte) ([]byte, error) { - if d.actualTableLog == 8 { - return d.decompress4X8bitExactly(dst, src) - } - - var br [4]bitReaderBytes - start := 6 - for i := 0; i < 3; i++ { - length := int(src[i*2]) | (int(src[i*2+1]) << 8) - if start+length >= len(src) { - return nil, errors.New("truncated input (or invalid offset)") - } - err := br[i].init(src[start : start+length]) - if err != nil { - return nil, err - } - start += length - } - err := br[3].init(src[start:]) - if err != nil { - return nil, err - } - - // destination, offset to match first output - dstSize := cap(dst) - dst = dst[:dstSize] - out := dst - dstEvery := (dstSize + 3) / 4 - - shift := (8 - d.actualTableLog) & 7 - - const tlSize = 1 << 8 - single := d.dt.single[:tlSize] - - // Use temp table to avoid bound checks/append penalty. - var buf [256]byte - var off uint8 - var decoded int - - // Decode 4 values from each decoder/loop. - const bufoff = 256 / 4 - for { - if br[0].off < 4 || br[1].off < 4 || br[2].off < 4 || br[3].off < 4 { - break - } - - { - // Interleave 2 decodes. - const stream = 0 - const stream2 = 1 - br[stream].fillFast() - br[stream2].fillFast() - - v := single[br[stream].peekByteFast()>>shift].entry - buf[off+bufoff*stream] = uint8(v >> 8) - br[stream].advance(uint8(v)) - - v2 := single[br[stream2].peekByteFast()>>shift].entry - buf[off+bufoff*stream2] = uint8(v2 >> 8) - br[stream2].advance(uint8(v2)) - - v = single[br[stream].peekByteFast()>>shift].entry - buf[off+bufoff*stream+1] = uint8(v >> 8) - br[stream].advance(uint8(v)) - - v2 = single[br[stream2].peekByteFast()>>shift].entry - buf[off+bufoff*stream2+1] = uint8(v2 >> 8) - br[stream2].advance(uint8(v2)) - - v = single[br[stream].peekByteFast()>>shift].entry - buf[off+bufoff*stream+2] = uint8(v >> 8) - br[stream].advance(uint8(v)) - - v2 = single[br[stream2].peekByteFast()>>shift].entry - buf[off+bufoff*stream2+2] = uint8(v2 >> 8) - br[stream2].advance(uint8(v2)) - - v = single[br[stream].peekByteFast()>>shift].entry - buf[off+bufoff*stream+3] = uint8(v >> 8) - br[stream].advance(uint8(v)) - - v2 = single[br[stream2].peekByteFast()>>shift].entry - buf[off+bufoff*stream2+3] = uint8(v2 >> 8) - br[stream2].advance(uint8(v2)) - } - - { - const stream = 2 - const stream2 = 3 - br[stream].fillFast() - br[stream2].fillFast() - - v := single[br[stream].peekByteFast()>>shift].entry - buf[off+bufoff*stream] = uint8(v >> 8) - br[stream].advance(uint8(v)) - - v2 := single[br[stream2].peekByteFast()>>shift].entry - buf[off+bufoff*stream2] = uint8(v2 >> 8) - br[stream2].advance(uint8(v2)) - - v = single[br[stream].peekByteFast()>>shift].entry - buf[off+bufoff*stream+1] = uint8(v >> 8) - br[stream].advance(uint8(v)) - - v2 = single[br[stream2].peekByteFast()>>shift].entry - buf[off+bufoff*stream2+1] = uint8(v2 >> 8) - br[stream2].advance(uint8(v2)) - - v = single[br[stream].peekByteFast()>>shift].entry - buf[off+bufoff*stream+2] = uint8(v >> 8) - br[stream].advance(uint8(v)) - - v2 = single[br[stream2].peekByteFast()>>shift].entry - buf[off+bufoff*stream2+2] = uint8(v2 >> 8) - br[stream2].advance(uint8(v2)) - - v = single[br[stream].peekByteFast()>>shift].entry - buf[off+bufoff*stream+3] = uint8(v >> 8) - br[stream].advance(uint8(v)) - - v2 = single[br[stream2].peekByteFast()>>shift].entry - buf[off+bufoff*stream2+3] = uint8(v2 >> 8) - br[stream2].advance(uint8(v2)) - } - - off += 4 - - if off == bufoff { - if bufoff > dstEvery { - return nil, errors.New("corruption detected: stream overrun 1") - } - copy(out, buf[:bufoff]) - copy(out[dstEvery:], buf[bufoff:bufoff*2]) - copy(out[dstEvery*2:], buf[bufoff*2:bufoff*3]) - copy(out[dstEvery*3:], buf[bufoff*3:bufoff*4]) - off = 0 - out = out[bufoff:] - decoded += 256 - // There must at least be 3 buffers left. - if len(out) < dstEvery*3 { - return nil, errors.New("corruption detected: stream overrun 2") - } - } - } - if off > 0 { - ioff := int(off) - if len(out) < dstEvery*3+ioff { - return nil, errors.New("corruption detected: stream overrun 3") - } - copy(out, buf[:off]) - copy(out[dstEvery:dstEvery+ioff], buf[bufoff:bufoff*2]) - copy(out[dstEvery*2:dstEvery*2+ioff], buf[bufoff*2:bufoff*3]) - copy(out[dstEvery*3:dstEvery*3+ioff], buf[bufoff*3:bufoff*4]) - decoded += int(off) * 4 - out = out[off:] - } - - // Decode remaining. - for i := range br { - offset := dstEvery * i - br := &br[i] - bitsLeft := int(br.off*8) + int(64-br.bitsRead) - for bitsLeft > 0 { - if br.finished() { - return nil, io.ErrUnexpectedEOF - } - if br.bitsRead >= 56 { - if br.off >= 4 { - v := br.in[br.off-4:] - v = v[:4] - low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) - br.value |= uint64(low) << (br.bitsRead - 32) - br.bitsRead -= 32 - br.off -= 4 - } else { - for br.off > 0 { - br.value |= uint64(br.in[br.off-1]) << (br.bitsRead - 8) - br.bitsRead -= 8 - br.off-- - } - } - } - // end inline... - if offset >= len(out) { - return nil, errors.New("corruption detected: stream overrun 4") - } - - // Read value and increment offset. - v := single[br.peekByteFast()>>shift].entry - nBits := uint8(v) - br.advance(nBits) - bitsLeft -= int(nBits) - out[offset] = uint8(v >> 8) - offset++ - } - decoded += offset - dstEvery*i - err = br.close() - if err != nil { - return nil, err - } - } - if dstSize != decoded { - return nil, errors.New("corruption detected: short output block") - } - return dst, nil -} - -// Decompress4X will decompress a 4X encoded stream. -// The length of the supplied input must match the end of a block exactly. -// The *capacity* of the dst slice must match the destination size of -// the uncompressed data exactly. -func (d *Decoder) decompress4X8bitExactly(dst, src []byte) ([]byte, error) { - var br [4]bitReaderBytes - start := 6 - for i := 0; i < 3; i++ { - length := int(src[i*2]) | (int(src[i*2+1]) << 8) - if start+length >= len(src) { - return nil, errors.New("truncated input (or invalid offset)") - } - err := br[i].init(src[start : start+length]) - if err != nil { - return nil, err - } - start += length - } - err := br[3].init(src[start:]) - if err != nil { - return nil, err - } - - // destination, offset to match first output - dstSize := cap(dst) - dst = dst[:dstSize] - out := dst - dstEvery := (dstSize + 3) / 4 - - const shift = 0 - const tlSize = 1 << 8 - const tlMask = tlSize - 1 - single := d.dt.single[:tlSize] - - // Use temp table to avoid bound checks/append penalty. - var buf [256]byte - var off uint8 - var decoded int - - // Decode 4 values from each decoder/loop. - const bufoff = 256 / 4 - for { - if br[0].off < 4 || br[1].off < 4 || br[2].off < 4 || br[3].off < 4 { - break - } - - { - // Interleave 2 decodes. - const stream = 0 - const stream2 = 1 - br[stream].fillFast() - br[stream2].fillFast() - - v := single[br[stream].peekByteFast()>>shift].entry - buf[off+bufoff*stream] = uint8(v >> 8) - br[stream].advance(uint8(v)) - - v2 := single[br[stream2].peekByteFast()>>shift].entry - buf[off+bufoff*stream2] = uint8(v2 >> 8) - br[stream2].advance(uint8(v2)) - - v = single[br[stream].peekByteFast()>>shift].entry - buf[off+bufoff*stream+1] = uint8(v >> 8) - br[stream].advance(uint8(v)) - - v2 = single[br[stream2].peekByteFast()>>shift].entry - buf[off+bufoff*stream2+1] = uint8(v2 >> 8) - br[stream2].advance(uint8(v2)) - - v = single[br[stream].peekByteFast()>>shift].entry - buf[off+bufoff*stream+2] = uint8(v >> 8) - br[stream].advance(uint8(v)) - - v2 = single[br[stream2].peekByteFast()>>shift].entry - buf[off+bufoff*stream2+2] = uint8(v2 >> 8) - br[stream2].advance(uint8(v2)) - - v = single[br[stream].peekByteFast()>>shift].entry - buf[off+bufoff*stream+3] = uint8(v >> 8) - br[stream].advance(uint8(v)) - - v2 = single[br[stream2].peekByteFast()>>shift].entry - buf[off+bufoff*stream2+3] = uint8(v2 >> 8) - br[stream2].advance(uint8(v2)) - } - - { - const stream = 2 - const stream2 = 3 - br[stream].fillFast() - br[stream2].fillFast() - - v := single[br[stream].peekByteFast()>>shift].entry - buf[off+bufoff*stream] = uint8(v >> 8) - br[stream].advance(uint8(v)) - - v2 := single[br[stream2].peekByteFast()>>shift].entry - buf[off+bufoff*stream2] = uint8(v2 >> 8) - br[stream2].advance(uint8(v2)) - - v = single[br[stream].peekByteFast()>>shift].entry - buf[off+bufoff*stream+1] = uint8(v >> 8) - br[stream].advance(uint8(v)) - - v2 = single[br[stream2].peekByteFast()>>shift].entry - buf[off+bufoff*stream2+1] = uint8(v2 >> 8) - br[stream2].advance(uint8(v2)) - - v = single[br[stream].peekByteFast()>>shift].entry - buf[off+bufoff*stream+2] = uint8(v >> 8) - br[stream].advance(uint8(v)) - - v2 = single[br[stream2].peekByteFast()>>shift].entry - buf[off+bufoff*stream2+2] = uint8(v2 >> 8) - br[stream2].advance(uint8(v2)) - - v = single[br[stream].peekByteFast()>>shift].entry - buf[off+bufoff*stream+3] = uint8(v >> 8) - br[stream].advance(uint8(v)) - - v2 = single[br[stream2].peekByteFast()>>shift].entry - buf[off+bufoff*stream2+3] = uint8(v2 >> 8) - br[stream2].advance(uint8(v2)) - } - - off += 4 - - if off == bufoff { - if bufoff > dstEvery { - return nil, errors.New("corruption detected: stream overrun 1") - } - copy(out, buf[:bufoff]) - copy(out[dstEvery:], buf[bufoff:bufoff*2]) - copy(out[dstEvery*2:], buf[bufoff*2:bufoff*3]) - copy(out[dstEvery*3:], buf[bufoff*3:bufoff*4]) - off = 0 - out = out[bufoff:] - decoded += 256 - // There must at least be 3 buffers left. - if len(out) < dstEvery*3 { - return nil, errors.New("corruption detected: stream overrun 2") - } - } - } - if off > 0 { - ioff := int(off) - if len(out) < dstEvery*3+ioff { - return nil, errors.New("corruption detected: stream overrun 3") - } - copy(out, buf[:off]) - copy(out[dstEvery:dstEvery+ioff], buf[bufoff:bufoff*2]) - copy(out[dstEvery*2:dstEvery*2+ioff], buf[bufoff*2:bufoff*3]) - copy(out[dstEvery*3:dstEvery*3+ioff], buf[bufoff*3:bufoff*4]) - decoded += int(off) * 4 - out = out[off:] - } - - // Decode remaining. - for i := range br { - offset := dstEvery * i - br := &br[i] - bitsLeft := int(br.off*8) + int(64-br.bitsRead) - for bitsLeft > 0 { - if br.finished() { - return nil, io.ErrUnexpectedEOF - } - if br.bitsRead >= 56 { - if br.off >= 4 { - v := br.in[br.off-4:] - v = v[:4] - low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) - br.value |= uint64(low) << (br.bitsRead - 32) - br.bitsRead -= 32 - br.off -= 4 - } else { - for br.off > 0 { - br.value |= uint64(br.in[br.off-1]) << (br.bitsRead - 8) - br.bitsRead -= 8 - br.off-- - } - } - } - // end inline... - if offset >= len(out) { - return nil, errors.New("corruption detected: stream overrun 4") - } - - // Read value and increment offset. - v := single[br.peekByteFast()>>shift].entry - nBits := uint8(v) - br.advance(nBits) - bitsLeft -= int(nBits) - out[offset] = uint8(v >> 8) - offset++ - } - decoded += offset - dstEvery*i - err = br.close() - if err != nil { - return nil, err - } - } - if dstSize != decoded { - return nil, errors.New("corruption detected: short output block") - } - return dst, nil -} - -// matches will compare a decoding table to a coding table. -// Errors are written to the writer. -// Nothing will be written if table is ok. -func (s *Scratch) matches(ct cTable, w io.Writer) { - if s == nil || len(s.dt.single) == 0 { - return - } - dt := s.dt.single[:1<>8) == byte(sym) { - fmt.Fprintf(w, "symbol %x has decoder, but no encoder\n", sym) - errs++ - break - } - } - if errs == 0 { - broken-- - } - continue - } - // Unused bits in input - ub := tablelog - enc.nBits - top := enc.val << ub - // decoder looks at top bits. - dec := dt[top] - if uint8(dec.entry) != enc.nBits { - fmt.Fprintf(w, "symbol 0x%x bit size mismatch (enc: %d, dec:%d).\n", sym, enc.nBits, uint8(dec.entry)) - errs++ - } - if uint8(dec.entry>>8) != uint8(sym) { - fmt.Fprintf(w, "symbol 0x%x decoder output mismatch (enc: %d, dec:%d).\n", sym, sym, uint8(dec.entry>>8)) - errs++ - } - if errs > 0 { - fmt.Fprintf(w, "%d errros in base, stopping\n", errs) - continue - } - // Ensure that all combinations are covered. - for i := uint16(0); i < (1 << ub); i++ { - vval := top | i - dec := dt[vval] - if uint8(dec.entry) != enc.nBits { - fmt.Fprintf(w, "symbol 0x%x bit size mismatch (enc: %d, dec:%d).\n", vval, enc.nBits, uint8(dec.entry)) - errs++ - } - if uint8(dec.entry>>8) != uint8(sym) { - fmt.Fprintf(w, "symbol 0x%x decoder output mismatch (enc: %d, dec:%d).\n", vval, sym, uint8(dec.entry>>8)) - errs++ - } - if errs > 20 { - fmt.Fprintf(w, "%d errros, stopping\n", errs) - break - } - } - if errs == 0 { - ok++ - broken-- - } - } - if broken > 0 { - fmt.Fprintf(w, "%d broken, %d ok\n", broken, ok) - } -} diff --git a/src/vendor/github.com/klauspost/compress/huff0/huff0.go b/src/vendor/github.com/klauspost/compress/huff0/huff0.go deleted file mode 100644 index 3ee00ecb4..000000000 --- a/src/vendor/github.com/klauspost/compress/huff0/huff0.go +++ /dev/null @@ -1,335 +0,0 @@ -// Package huff0 provides fast huffman encoding as used in zstd. -// -// See README.md at https://github.com/klauspost/compress/tree/master/huff0 for details. -package huff0 - -import ( - "errors" - "fmt" - "math" - "math/bits" - - "github.com/klauspost/compress/fse" -) - -const ( - maxSymbolValue = 255 - - // zstandard limits tablelog to 11, see: - // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#huffman-tree-description - tableLogMax = 11 - tableLogDefault = 11 - minTablelog = 5 - huffNodesLen = 512 - - // BlockSizeMax is maximum input size for a single block uncompressed. - BlockSizeMax = 1<<18 - 1 -) - -var ( - // ErrIncompressible is returned when input is judged to be too hard to compress. - ErrIncompressible = errors.New("input is not compressible") - - // ErrUseRLE is returned from the compressor when the input is a single byte value repeated. - ErrUseRLE = errors.New("input is single value repeated") - - // ErrTooBig is return if input is too large for a single block. - ErrTooBig = errors.New("input too big") - - // ErrMaxDecodedSizeExceeded is return if input is too large for a single block. - ErrMaxDecodedSizeExceeded = errors.New("maximum output size exceeded") -) - -type ReusePolicy uint8 - -const ( - // ReusePolicyAllow will allow reuse if it produces smaller output. - ReusePolicyAllow ReusePolicy = iota - - // ReusePolicyPrefer will re-use aggressively if possible. - // This will not check if a new table will produce smaller output, - // except if the current table is impossible to use or - // compressed output is bigger than input. - ReusePolicyPrefer - - // ReusePolicyNone will disable re-use of tables. - // This is slightly faster than ReusePolicyAllow but may produce larger output. - ReusePolicyNone - - // ReusePolicyMust must allow reuse and produce smaller output. - ReusePolicyMust -) - -type Scratch struct { - count [maxSymbolValue + 1]uint32 - - // Per block parameters. - // These can be used to override compression parameters of the block. - // Do not touch, unless you know what you are doing. - - // Out is output buffer. - // If the scratch is re-used before the caller is done processing the output, - // set this field to nil. - // Otherwise the output buffer will be re-used for next Compression/Decompression step - // and allocation will be avoided. - Out []byte - - // OutTable will contain the table data only, if a new table has been generated. - // Slice of the returned data. - OutTable []byte - - // OutData will contain the compressed data. - // Slice of the returned data. - OutData []byte - - // MaxDecodedSize will set the maximum allowed output size. - // This value will automatically be set to BlockSizeMax if not set. - // Decoders will return ErrMaxDecodedSizeExceeded is this limit is exceeded. - MaxDecodedSize int - - br byteReader - - // MaxSymbolValue will override the maximum symbol value of the next block. - MaxSymbolValue uint8 - - // TableLog will attempt to override the tablelog for the next block. - // Must be <= 11 and >= 5. - TableLog uint8 - - // Reuse will specify the reuse policy - Reuse ReusePolicy - - // WantLogLess allows to specify a log 2 reduction that should at least be achieved, - // otherwise the block will be returned as incompressible. - // The reduction should then at least be (input size >> WantLogLess) - // If WantLogLess == 0 any improvement will do. - WantLogLess uint8 - - symbolLen uint16 // Length of active part of the symbol table. - maxCount int // count of the most probable symbol - clearCount bool // clear count - actualTableLog uint8 // Selected tablelog. - prevTableLog uint8 // Tablelog for previous table - prevTable cTable // Table used for previous compression. - cTable cTable // compression table - dt dTable // decompression table - nodes []nodeElt - tmpOut [4][]byte - fse *fse.Scratch - huffWeight [maxSymbolValue + 1]byte -} - -// TransferCTable will transfer the previously used compression table. -func (s *Scratch) TransferCTable(src *Scratch) { - if cap(s.prevTable) < len(src.prevTable) { - s.prevTable = make(cTable, 0, maxSymbolValue+1) - } - s.prevTable = s.prevTable[:len(src.prevTable)] - copy(s.prevTable, src.prevTable) - s.prevTableLog = src.prevTableLog -} - -func (s *Scratch) prepare(in []byte) (*Scratch, error) { - if len(in) > BlockSizeMax { - return nil, ErrTooBig - } - if s == nil { - s = &Scratch{} - } - if s.MaxSymbolValue == 0 { - s.MaxSymbolValue = maxSymbolValue - } - if s.TableLog == 0 { - s.TableLog = tableLogDefault - } - if s.TableLog > tableLogMax || s.TableLog < minTablelog { - return nil, fmt.Errorf(" invalid tableLog %d (%d -> %d)", s.TableLog, minTablelog, tableLogMax) - } - if s.MaxDecodedSize <= 0 || s.MaxDecodedSize > BlockSizeMax { - s.MaxDecodedSize = BlockSizeMax - } - if s.clearCount && s.maxCount == 0 { - for i := range s.count { - s.count[i] = 0 - } - s.clearCount = false - } - if cap(s.Out) == 0 { - s.Out = make([]byte, 0, len(in)) - } - s.Out = s.Out[:0] - - s.OutTable = nil - s.OutData = nil - if cap(s.nodes) < huffNodesLen+1 { - s.nodes = make([]nodeElt, 0, huffNodesLen+1) - } - s.nodes = s.nodes[:0] - if s.fse == nil { - s.fse = &fse.Scratch{} - } - s.br.init(in) - - return s, nil -} - -type cTable []cTableEntry - -func (c cTable) write(s *Scratch) error { - var ( - // precomputed conversion table - bitsToWeight [tableLogMax + 1]byte - huffLog = s.actualTableLog - // last weight is not saved. - maxSymbolValue = uint8(s.symbolLen - 1) - huffWeight = s.huffWeight[:256] - ) - const ( - maxFSETableLog = 6 - ) - // convert to weight - bitsToWeight[0] = 0 - for n := uint8(1); n < huffLog+1; n++ { - bitsToWeight[n] = huffLog + 1 - n - } - - // Acquire histogram for FSE. - hist := s.fse.Histogram() - hist = hist[:256] - for i := range hist[:16] { - hist[i] = 0 - } - for n := uint8(0); n < maxSymbolValue; n++ { - v := bitsToWeight[c[n].nBits] & 15 - huffWeight[n] = v - hist[v]++ - } - - // FSE compress if feasible. - if maxSymbolValue >= 2 { - huffMaxCnt := uint32(0) - huffMax := uint8(0) - for i, v := range hist[:16] { - if v == 0 { - continue - } - huffMax = byte(i) - if v > huffMaxCnt { - huffMaxCnt = v - } - } - s.fse.HistogramFinished(huffMax, int(huffMaxCnt)) - s.fse.TableLog = maxFSETableLog - b, err := fse.Compress(huffWeight[:maxSymbolValue], s.fse) - if err == nil && len(b) < int(s.symbolLen>>1) { - s.Out = append(s.Out, uint8(len(b))) - s.Out = append(s.Out, b...) - return nil - } - // Unable to compress (RLE/uncompressible) - } - // write raw values as 4-bits (max : 15) - if maxSymbolValue > (256 - 128) { - // should not happen : likely means source cannot be compressed - return ErrIncompressible - } - op := s.Out - // special case, pack weights 4 bits/weight. - op = append(op, 128|(maxSymbolValue-1)) - // be sure it doesn't cause msan issue in final combination - huffWeight[maxSymbolValue] = 0 - for n := uint16(0); n < uint16(maxSymbolValue); n += 2 { - op = append(op, (huffWeight[n]<<4)|huffWeight[n+1]) - } - s.Out = op - return nil -} - -func (c cTable) estTableSize(s *Scratch) (sz int, err error) { - var ( - // precomputed conversion table - bitsToWeight [tableLogMax + 1]byte - huffLog = s.actualTableLog - // last weight is not saved. - maxSymbolValue = uint8(s.symbolLen - 1) - huffWeight = s.huffWeight[:256] - ) - const ( - maxFSETableLog = 6 - ) - // convert to weight - bitsToWeight[0] = 0 - for n := uint8(1); n < huffLog+1; n++ { - bitsToWeight[n] = huffLog + 1 - n - } - - // Acquire histogram for FSE. - hist := s.fse.Histogram() - hist = hist[:256] - for i := range hist[:16] { - hist[i] = 0 - } - for n := uint8(0); n < maxSymbolValue; n++ { - v := bitsToWeight[c[n].nBits] & 15 - huffWeight[n] = v - hist[v]++ - } - - // FSE compress if feasible. - if maxSymbolValue >= 2 { - huffMaxCnt := uint32(0) - huffMax := uint8(0) - for i, v := range hist[:16] { - if v == 0 { - continue - } - huffMax = byte(i) - if v > huffMaxCnt { - huffMaxCnt = v - } - } - s.fse.HistogramFinished(huffMax, int(huffMaxCnt)) - s.fse.TableLog = maxFSETableLog - b, err := fse.Compress(huffWeight[:maxSymbolValue], s.fse) - if err == nil && len(b) < int(s.symbolLen>>1) { - sz += 1 + len(b) - return sz, nil - } - // Unable to compress (RLE/uncompressible) - } - // write raw values as 4-bits (max : 15) - if maxSymbolValue > (256 - 128) { - // should not happen : likely means source cannot be compressed - return 0, ErrIncompressible - } - // special case, pack weights 4 bits/weight. - sz += 1 + int(maxSymbolValue/2) - return sz, nil -} - -// estimateSize returns the estimated size in bytes of the input represented in the -// histogram supplied. -func (c cTable) estimateSize(hist []uint32) int { - nbBits := uint32(7) - for i, v := range c[:len(hist)] { - nbBits += uint32(v.nBits) * hist[i] - } - return int(nbBits >> 3) -} - -// minSize returns the minimum possible size considering the shannon limit. -func (s *Scratch) minSize(total int) int { - nbBits := float64(7) - fTotal := float64(total) - for _, v := range s.count[:s.symbolLen] { - n := float64(v) - if n > 0 { - nbBits += math.Log2(fTotal/n) * n - } - } - return int(nbBits) >> 3 -} - -func highBit32(val uint32) (n uint32) { - return uint32(bits.Len32(val) - 1) -} diff --git a/src/vendor/github.com/klauspost/compress/internal/snapref/LICENSE b/src/vendor/github.com/klauspost/compress/internal/snapref/LICENSE deleted file mode 100644 index 6050c10f4..000000000 --- a/src/vendor/github.com/klauspost/compress/internal/snapref/LICENSE +++ /dev/null @@ -1,27 +0,0 @@ -Copyright (c) 2011 The Snappy-Go Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/src/vendor/github.com/klauspost/compress/internal/snapref/decode.go b/src/vendor/github.com/klauspost/compress/internal/snapref/decode.go deleted file mode 100644 index 40796a49d..000000000 --- a/src/vendor/github.com/klauspost/compress/internal/snapref/decode.go +++ /dev/null @@ -1,264 +0,0 @@ -// Copyright 2011 The Snappy-Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package snapref - -import ( - "encoding/binary" - "errors" - "io" -) - -var ( - // ErrCorrupt reports that the input is invalid. - ErrCorrupt = errors.New("snappy: corrupt input") - // ErrTooLarge reports that the uncompressed length is too large. - ErrTooLarge = errors.New("snappy: decoded block is too large") - // ErrUnsupported reports that the input isn't supported. - ErrUnsupported = errors.New("snappy: unsupported input") - - errUnsupportedLiteralLength = errors.New("snappy: unsupported literal length") -) - -// DecodedLen returns the length of the decoded block. -func DecodedLen(src []byte) (int, error) { - v, _, err := decodedLen(src) - return v, err -} - -// decodedLen returns the length of the decoded block and the number of bytes -// that the length header occupied. -func decodedLen(src []byte) (blockLen, headerLen int, err error) { - v, n := binary.Uvarint(src) - if n <= 0 || v > 0xffffffff { - return 0, 0, ErrCorrupt - } - - const wordSize = 32 << (^uint(0) >> 32 & 1) - if wordSize == 32 && v > 0x7fffffff { - return 0, 0, ErrTooLarge - } - return int(v), n, nil -} - -const ( - decodeErrCodeCorrupt = 1 - decodeErrCodeUnsupportedLiteralLength = 2 -) - -// Decode returns the decoded form of src. The returned slice may be a sub- -// slice of dst if dst was large enough to hold the entire decoded block. -// Otherwise, a newly allocated slice will be returned. -// -// The dst and src must not overlap. It is valid to pass a nil dst. -// -// Decode handles the Snappy block format, not the Snappy stream format. -func Decode(dst, src []byte) ([]byte, error) { - dLen, s, err := decodedLen(src) - if err != nil { - return nil, err - } - if dLen <= len(dst) { - dst = dst[:dLen] - } else { - dst = make([]byte, dLen) - } - switch decode(dst, src[s:]) { - case 0: - return dst, nil - case decodeErrCodeUnsupportedLiteralLength: - return nil, errUnsupportedLiteralLength - } - return nil, ErrCorrupt -} - -// NewReader returns a new Reader that decompresses from r, using the framing -// format described at -// https://github.com/google/snappy/blob/master/framing_format.txt -func NewReader(r io.Reader) *Reader { - return &Reader{ - r: r, - decoded: make([]byte, maxBlockSize), - buf: make([]byte, maxEncodedLenOfMaxBlockSize+checksumSize), - } -} - -// Reader is an io.Reader that can read Snappy-compressed bytes. -// -// Reader handles the Snappy stream format, not the Snappy block format. -type Reader struct { - r io.Reader - err error - decoded []byte - buf []byte - // decoded[i:j] contains decoded bytes that have not yet been passed on. - i, j int - readHeader bool -} - -// Reset discards any buffered data, resets all state, and switches the Snappy -// reader to read from r. This permits reusing a Reader rather than allocating -// a new one. -func (r *Reader) Reset(reader io.Reader) { - r.r = reader - r.err = nil - r.i = 0 - r.j = 0 - r.readHeader = false -} - -func (r *Reader) readFull(p []byte, allowEOF bool) (ok bool) { - if _, r.err = io.ReadFull(r.r, p); r.err != nil { - if r.err == io.ErrUnexpectedEOF || (r.err == io.EOF && !allowEOF) { - r.err = ErrCorrupt - } - return false - } - return true -} - -func (r *Reader) fill() error { - for r.i >= r.j { - if !r.readFull(r.buf[:4], true) { - return r.err - } - chunkType := r.buf[0] - if !r.readHeader { - if chunkType != chunkTypeStreamIdentifier { - r.err = ErrCorrupt - return r.err - } - r.readHeader = true - } - chunkLen := int(r.buf[1]) | int(r.buf[2])<<8 | int(r.buf[3])<<16 - if chunkLen > len(r.buf) { - r.err = ErrUnsupported - return r.err - } - - // The chunk types are specified at - // https://github.com/google/snappy/blob/master/framing_format.txt - switch chunkType { - case chunkTypeCompressedData: - // Section 4.2. Compressed data (chunk type 0x00). - if chunkLen < checksumSize { - r.err = ErrCorrupt - return r.err - } - buf := r.buf[:chunkLen] - if !r.readFull(buf, false) { - return r.err - } - checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24 - buf = buf[checksumSize:] - - n, err := DecodedLen(buf) - if err != nil { - r.err = err - return r.err - } - if n > len(r.decoded) { - r.err = ErrCorrupt - return r.err - } - if _, err := Decode(r.decoded, buf); err != nil { - r.err = err - return r.err - } - if crc(r.decoded[:n]) != checksum { - r.err = ErrCorrupt - return r.err - } - r.i, r.j = 0, n - continue - - case chunkTypeUncompressedData: - // Section 4.3. Uncompressed data (chunk type 0x01). - if chunkLen < checksumSize { - r.err = ErrCorrupt - return r.err - } - buf := r.buf[:checksumSize] - if !r.readFull(buf, false) { - return r.err - } - checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24 - // Read directly into r.decoded instead of via r.buf. - n := chunkLen - checksumSize - if n > len(r.decoded) { - r.err = ErrCorrupt - return r.err - } - if !r.readFull(r.decoded[:n], false) { - return r.err - } - if crc(r.decoded[:n]) != checksum { - r.err = ErrCorrupt - return r.err - } - r.i, r.j = 0, n - continue - - case chunkTypeStreamIdentifier: - // Section 4.1. Stream identifier (chunk type 0xff). - if chunkLen != len(magicBody) { - r.err = ErrCorrupt - return r.err - } - if !r.readFull(r.buf[:len(magicBody)], false) { - return r.err - } - for i := 0; i < len(magicBody); i++ { - if r.buf[i] != magicBody[i] { - r.err = ErrCorrupt - return r.err - } - } - continue - } - - if chunkType <= 0x7f { - // Section 4.5. Reserved unskippable chunks (chunk types 0x02-0x7f). - r.err = ErrUnsupported - return r.err - } - // Section 4.4 Padding (chunk type 0xfe). - // Section 4.6. Reserved skippable chunks (chunk types 0x80-0xfd). - if !r.readFull(r.buf[:chunkLen], false) { - return r.err - } - } - - return nil -} - -// Read satisfies the io.Reader interface. -func (r *Reader) Read(p []byte) (int, error) { - if r.err != nil { - return 0, r.err - } - - if err := r.fill(); err != nil { - return 0, err - } - - n := copy(p, r.decoded[r.i:r.j]) - r.i += n - return n, nil -} - -// ReadByte satisfies the io.ByteReader interface. -func (r *Reader) ReadByte() (byte, error) { - if r.err != nil { - return 0, r.err - } - - if err := r.fill(); err != nil { - return 0, err - } - - c := r.decoded[r.i] - r.i++ - return c, nil -} diff --git a/src/vendor/github.com/klauspost/compress/internal/snapref/decode_other.go b/src/vendor/github.com/klauspost/compress/internal/snapref/decode_other.go deleted file mode 100644 index 77395a6b8..000000000 --- a/src/vendor/github.com/klauspost/compress/internal/snapref/decode_other.go +++ /dev/null @@ -1,113 +0,0 @@ -// Copyright 2016 The Snappy-Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package snapref - -// decode writes the decoding of src to dst. It assumes that the varint-encoded -// length of the decompressed bytes has already been read, and that len(dst) -// equals that length. -// -// It returns 0 on success or a decodeErrCodeXxx error code on failure. -func decode(dst, src []byte) int { - var d, s, offset, length int - for s < len(src) { - switch src[s] & 0x03 { - case tagLiteral: - x := uint32(src[s] >> 2) - switch { - case x < 60: - s++ - case x == 60: - s += 2 - if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. - return decodeErrCodeCorrupt - } - x = uint32(src[s-1]) - case x == 61: - s += 3 - if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. - return decodeErrCodeCorrupt - } - x = uint32(src[s-2]) | uint32(src[s-1])<<8 - case x == 62: - s += 4 - if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. - return decodeErrCodeCorrupt - } - x = uint32(src[s-3]) | uint32(src[s-2])<<8 | uint32(src[s-1])<<16 - case x == 63: - s += 5 - if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. - return decodeErrCodeCorrupt - } - x = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24 - } - length = int(x) + 1 - if length <= 0 { - return decodeErrCodeUnsupportedLiteralLength - } - if length > len(dst)-d || length > len(src)-s { - return decodeErrCodeCorrupt - } - copy(dst[d:], src[s:s+length]) - d += length - s += length - continue - - case tagCopy1: - s += 2 - if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. - return decodeErrCodeCorrupt - } - length = 4 + int(src[s-2])>>2&0x7 - offset = int(uint32(src[s-2])&0xe0<<3 | uint32(src[s-1])) - - case tagCopy2: - s += 3 - if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. - return decodeErrCodeCorrupt - } - length = 1 + int(src[s-3])>>2 - offset = int(uint32(src[s-2]) | uint32(src[s-1])<<8) - - case tagCopy4: - s += 5 - if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. - return decodeErrCodeCorrupt - } - length = 1 + int(src[s-5])>>2 - offset = int(uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24) - } - - if offset <= 0 || d < offset || length > len(dst)-d { - return decodeErrCodeCorrupt - } - // Copy from an earlier sub-slice of dst to a later sub-slice. - // If no overlap, use the built-in copy: - if offset >= length { - copy(dst[d:d+length], dst[d-offset:]) - d += length - continue - } - - // Unlike the built-in copy function, this byte-by-byte copy always runs - // forwards, even if the slices overlap. Conceptually, this is: - // - // d += forwardCopy(dst[d:d+length], dst[d-offset:]) - // - // We align the slices into a and b and show the compiler they are the same size. - // This allows the loop to run without bounds checks. - a := dst[d : d+length] - b := dst[d-offset:] - b = b[:len(a)] - for i := range a { - a[i] = b[i] - } - d += length - } - if d != len(dst) { - return decodeErrCodeCorrupt - } - return 0 -} diff --git a/src/vendor/github.com/klauspost/compress/internal/snapref/encode.go b/src/vendor/github.com/klauspost/compress/internal/snapref/encode.go deleted file mode 100644 index 13c6040a5..000000000 --- a/src/vendor/github.com/klauspost/compress/internal/snapref/encode.go +++ /dev/null @@ -1,289 +0,0 @@ -// Copyright 2011 The Snappy-Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package snapref - -import ( - "encoding/binary" - "errors" - "io" -) - -// Encode returns the encoded form of src. The returned slice may be a sub- -// slice of dst if dst was large enough to hold the entire encoded block. -// Otherwise, a newly allocated slice will be returned. -// -// The dst and src must not overlap. It is valid to pass a nil dst. -// -// Encode handles the Snappy block format, not the Snappy stream format. -func Encode(dst, src []byte) []byte { - if n := MaxEncodedLen(len(src)); n < 0 { - panic(ErrTooLarge) - } else if len(dst) < n { - dst = make([]byte, n) - } - - // The block starts with the varint-encoded length of the decompressed bytes. - d := binary.PutUvarint(dst, uint64(len(src))) - - for len(src) > 0 { - p := src - src = nil - if len(p) > maxBlockSize { - p, src = p[:maxBlockSize], p[maxBlockSize:] - } - if len(p) < minNonLiteralBlockSize { - d += emitLiteral(dst[d:], p) - } else { - d += encodeBlock(dst[d:], p) - } - } - return dst[:d] -} - -// inputMargin is the minimum number of extra input bytes to keep, inside -// encodeBlock's inner loop. On some architectures, this margin lets us -// implement a fast path for emitLiteral, where the copy of short (<= 16 byte) -// literals can be implemented as a single load to and store from a 16-byte -// register. That literal's actual length can be as short as 1 byte, so this -// can copy up to 15 bytes too much, but that's OK as subsequent iterations of -// the encoding loop will fix up the copy overrun, and this inputMargin ensures -// that we don't overrun the dst and src buffers. -const inputMargin = 16 - 1 - -// minNonLiteralBlockSize is the minimum size of the input to encodeBlock that -// could be encoded with a copy tag. This is the minimum with respect to the -// algorithm used by encodeBlock, not a minimum enforced by the file format. -// -// The encoded output must start with at least a 1 byte literal, as there are -// no previous bytes to copy. A minimal (1 byte) copy after that, generated -// from an emitCopy call in encodeBlock's main loop, would require at least -// another inputMargin bytes, for the reason above: we want any emitLiteral -// calls inside encodeBlock's main loop to use the fast path if possible, which -// requires being able to overrun by inputMargin bytes. Thus, -// minNonLiteralBlockSize equals 1 + 1 + inputMargin. -// -// The C++ code doesn't use this exact threshold, but it could, as discussed at -// https://groups.google.com/d/topic/snappy-compression/oGbhsdIJSJ8/discussion -// The difference between Go (2+inputMargin) and C++ (inputMargin) is purely an -// optimization. It should not affect the encoded form. This is tested by -// TestSameEncodingAsCppShortCopies. -const minNonLiteralBlockSize = 1 + 1 + inputMargin - -// MaxEncodedLen returns the maximum length of a snappy block, given its -// uncompressed length. -// -// It will return a negative value if srcLen is too large to encode. -func MaxEncodedLen(srcLen int) int { - n := uint64(srcLen) - if n > 0xffffffff { - return -1 - } - // Compressed data can be defined as: - // compressed := item* literal* - // item := literal* copy - // - // The trailing literal sequence has a space blowup of at most 62/60 - // since a literal of length 60 needs one tag byte + one extra byte - // for length information. - // - // Item blowup is trickier to measure. Suppose the "copy" op copies - // 4 bytes of data. Because of a special check in the encoding code, - // we produce a 4-byte copy only if the offset is < 65536. Therefore - // the copy op takes 3 bytes to encode, and this type of item leads - // to at most the 62/60 blowup for representing literals. - // - // Suppose the "copy" op copies 5 bytes of data. If the offset is big - // enough, it will take 5 bytes to encode the copy op. Therefore the - // worst case here is a one-byte literal followed by a five-byte copy. - // That is, 6 bytes of input turn into 7 bytes of "compressed" data. - // - // This last factor dominates the blowup, so the final estimate is: - n = 32 + n + n/6 - if n > 0xffffffff { - return -1 - } - return int(n) -} - -var errClosed = errors.New("snappy: Writer is closed") - -// NewWriter returns a new Writer that compresses to w. -// -// The Writer returned does not buffer writes. There is no need to Flush or -// Close such a Writer. -// -// Deprecated: the Writer returned is not suitable for many small writes, only -// for few large writes. Use NewBufferedWriter instead, which is efficient -// regardless of the frequency and shape of the writes, and remember to Close -// that Writer when done. -func NewWriter(w io.Writer) *Writer { - return &Writer{ - w: w, - obuf: make([]byte, obufLen), - } -} - -// NewBufferedWriter returns a new Writer that compresses to w, using the -// framing format described at -// https://github.com/google/snappy/blob/master/framing_format.txt -// -// The Writer returned buffers writes. Users must call Close to guarantee all -// data has been forwarded to the underlying io.Writer. They may also call -// Flush zero or more times before calling Close. -func NewBufferedWriter(w io.Writer) *Writer { - return &Writer{ - w: w, - ibuf: make([]byte, 0, maxBlockSize), - obuf: make([]byte, obufLen), - } -} - -// Writer is an io.Writer that can write Snappy-compressed bytes. -// -// Writer handles the Snappy stream format, not the Snappy block format. -type Writer struct { - w io.Writer - err error - - // ibuf is a buffer for the incoming (uncompressed) bytes. - // - // Its use is optional. For backwards compatibility, Writers created by the - // NewWriter function have ibuf == nil, do not buffer incoming bytes, and - // therefore do not need to be Flush'ed or Close'd. - ibuf []byte - - // obuf is a buffer for the outgoing (compressed) bytes. - obuf []byte - - // wroteStreamHeader is whether we have written the stream header. - wroteStreamHeader bool -} - -// Reset discards the writer's state and switches the Snappy writer to write to -// w. This permits reusing a Writer rather than allocating a new one. -func (w *Writer) Reset(writer io.Writer) { - w.w = writer - w.err = nil - if w.ibuf != nil { - w.ibuf = w.ibuf[:0] - } - w.wroteStreamHeader = false -} - -// Write satisfies the io.Writer interface. -func (w *Writer) Write(p []byte) (nRet int, errRet error) { - if w.ibuf == nil { - // Do not buffer incoming bytes. This does not perform or compress well - // if the caller of Writer.Write writes many small slices. This - // behavior is therefore deprecated, but still supported for backwards - // compatibility with code that doesn't explicitly Flush or Close. - return w.write(p) - } - - // The remainder of this method is based on bufio.Writer.Write from the - // standard library. - - for len(p) > (cap(w.ibuf)-len(w.ibuf)) && w.err == nil { - var n int - if len(w.ibuf) == 0 { - // Large write, empty buffer. - // Write directly from p to avoid copy. - n, _ = w.write(p) - } else { - n = copy(w.ibuf[len(w.ibuf):cap(w.ibuf)], p) - w.ibuf = w.ibuf[:len(w.ibuf)+n] - w.Flush() - } - nRet += n - p = p[n:] - } - if w.err != nil { - return nRet, w.err - } - n := copy(w.ibuf[len(w.ibuf):cap(w.ibuf)], p) - w.ibuf = w.ibuf[:len(w.ibuf)+n] - nRet += n - return nRet, nil -} - -func (w *Writer) write(p []byte) (nRet int, errRet error) { - if w.err != nil { - return 0, w.err - } - for len(p) > 0 { - obufStart := len(magicChunk) - if !w.wroteStreamHeader { - w.wroteStreamHeader = true - copy(w.obuf, magicChunk) - obufStart = 0 - } - - var uncompressed []byte - if len(p) > maxBlockSize { - uncompressed, p = p[:maxBlockSize], p[maxBlockSize:] - } else { - uncompressed, p = p, nil - } - checksum := crc(uncompressed) - - // Compress the buffer, discarding the result if the improvement - // isn't at least 12.5%. - compressed := Encode(w.obuf[obufHeaderLen:], uncompressed) - chunkType := uint8(chunkTypeCompressedData) - chunkLen := 4 + len(compressed) - obufEnd := obufHeaderLen + len(compressed) - if len(compressed) >= len(uncompressed)-len(uncompressed)/8 { - chunkType = chunkTypeUncompressedData - chunkLen = 4 + len(uncompressed) - obufEnd = obufHeaderLen - } - - // Fill in the per-chunk header that comes before the body. - w.obuf[len(magicChunk)+0] = chunkType - w.obuf[len(magicChunk)+1] = uint8(chunkLen >> 0) - w.obuf[len(magicChunk)+2] = uint8(chunkLen >> 8) - w.obuf[len(magicChunk)+3] = uint8(chunkLen >> 16) - w.obuf[len(magicChunk)+4] = uint8(checksum >> 0) - w.obuf[len(magicChunk)+5] = uint8(checksum >> 8) - w.obuf[len(magicChunk)+6] = uint8(checksum >> 16) - w.obuf[len(magicChunk)+7] = uint8(checksum >> 24) - - if _, err := w.w.Write(w.obuf[obufStart:obufEnd]); err != nil { - w.err = err - return nRet, err - } - if chunkType == chunkTypeUncompressedData { - if _, err := w.w.Write(uncompressed); err != nil { - w.err = err - return nRet, err - } - } - nRet += len(uncompressed) - } - return nRet, nil -} - -// Flush flushes the Writer to its underlying io.Writer. -func (w *Writer) Flush() error { - if w.err != nil { - return w.err - } - if len(w.ibuf) == 0 { - return nil - } - w.write(w.ibuf) - w.ibuf = w.ibuf[:0] - return w.err -} - -// Close calls Flush and then closes the Writer. -func (w *Writer) Close() error { - w.Flush() - ret := w.err - if w.err == nil { - w.err = errClosed - } - return ret -} diff --git a/src/vendor/github.com/klauspost/compress/internal/snapref/encode_other.go b/src/vendor/github.com/klauspost/compress/internal/snapref/encode_other.go deleted file mode 100644 index 511bba65d..000000000 --- a/src/vendor/github.com/klauspost/compress/internal/snapref/encode_other.go +++ /dev/null @@ -1,236 +0,0 @@ -// Copyright 2016 The Snappy-Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package snapref - -func load32(b []byte, i int) uint32 { - b = b[i : i+4 : len(b)] // Help the compiler eliminate bounds checks on the next line. - return uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 -} - -func load64(b []byte, i int) uint64 { - b = b[i : i+8 : len(b)] // Help the compiler eliminate bounds checks on the next line. - return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | - uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 -} - -// emitLiteral writes a literal chunk and returns the number of bytes written. -// -// It assumes that: -// dst is long enough to hold the encoded bytes -// 1 <= len(lit) && len(lit) <= 65536 -func emitLiteral(dst, lit []byte) int { - i, n := 0, uint(len(lit)-1) - switch { - case n < 60: - dst[0] = uint8(n)<<2 | tagLiteral - i = 1 - case n < 1<<8: - dst[0] = 60<<2 | tagLiteral - dst[1] = uint8(n) - i = 2 - default: - dst[0] = 61<<2 | tagLiteral - dst[1] = uint8(n) - dst[2] = uint8(n >> 8) - i = 3 - } - return i + copy(dst[i:], lit) -} - -// emitCopy writes a copy chunk and returns the number of bytes written. -// -// It assumes that: -// dst is long enough to hold the encoded bytes -// 1 <= offset && offset <= 65535 -// 4 <= length && length <= 65535 -func emitCopy(dst []byte, offset, length int) int { - i := 0 - // The maximum length for a single tagCopy1 or tagCopy2 op is 64 bytes. The - // threshold for this loop is a little higher (at 68 = 64 + 4), and the - // length emitted down below is is a little lower (at 60 = 64 - 4), because - // it's shorter to encode a length 67 copy as a length 60 tagCopy2 followed - // by a length 7 tagCopy1 (which encodes as 3+2 bytes) than to encode it as - // a length 64 tagCopy2 followed by a length 3 tagCopy2 (which encodes as - // 3+3 bytes). The magic 4 in the 64±4 is because the minimum length for a - // tagCopy1 op is 4 bytes, which is why a length 3 copy has to be an - // encodes-as-3-bytes tagCopy2 instead of an encodes-as-2-bytes tagCopy1. - for length >= 68 { - // Emit a length 64 copy, encoded as 3 bytes. - dst[i+0] = 63<<2 | tagCopy2 - dst[i+1] = uint8(offset) - dst[i+2] = uint8(offset >> 8) - i += 3 - length -= 64 - } - if length > 64 { - // Emit a length 60 copy, encoded as 3 bytes. - dst[i+0] = 59<<2 | tagCopy2 - dst[i+1] = uint8(offset) - dst[i+2] = uint8(offset >> 8) - i += 3 - length -= 60 - } - if length >= 12 || offset >= 2048 { - // Emit the remaining copy, encoded as 3 bytes. - dst[i+0] = uint8(length-1)<<2 | tagCopy2 - dst[i+1] = uint8(offset) - dst[i+2] = uint8(offset >> 8) - return i + 3 - } - // Emit the remaining copy, encoded as 2 bytes. - dst[i+0] = uint8(offset>>8)<<5 | uint8(length-4)<<2 | tagCopy1 - dst[i+1] = uint8(offset) - return i + 2 -} - -// extendMatch returns the largest k such that k <= len(src) and that -// src[i:i+k-j] and src[j:k] have the same contents. -// -// It assumes that: -// 0 <= i && i < j && j <= len(src) -func extendMatch(src []byte, i, j int) int { - for ; j < len(src) && src[i] == src[j]; i, j = i+1, j+1 { - } - return j -} - -func hash(u, shift uint32) uint32 { - return (u * 0x1e35a7bd) >> shift -} - -// encodeBlock encodes a non-empty src to a guaranteed-large-enough dst. It -// assumes that the varint-encoded length of the decompressed bytes has already -// been written. -// -// It also assumes that: -// len(dst) >= MaxEncodedLen(len(src)) && -// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize -func encodeBlock(dst, src []byte) (d int) { - // Initialize the hash table. Its size ranges from 1<<8 to 1<<14 inclusive. - // The table element type is uint16, as s < sLimit and sLimit < len(src) - // and len(src) <= maxBlockSize and maxBlockSize == 65536. - const ( - maxTableSize = 1 << 14 - // tableMask is redundant, but helps the compiler eliminate bounds - // checks. - tableMask = maxTableSize - 1 - ) - shift := uint32(32 - 8) - for tableSize := 1 << 8; tableSize < maxTableSize && tableSize < len(src); tableSize *= 2 { - shift-- - } - // In Go, all array elements are zero-initialized, so there is no advantage - // to a smaller tableSize per se. However, it matches the C++ algorithm, - // and in the asm versions of this code, we can get away with zeroing only - // the first tableSize elements. - var table [maxTableSize]uint16 - - // sLimit is when to stop looking for offset/length copies. The inputMargin - // lets us use a fast path for emitLiteral in the main loop, while we are - // looking for copies. - sLimit := len(src) - inputMargin - - // nextEmit is where in src the next emitLiteral should start from. - nextEmit := 0 - - // The encoded form must start with a literal, as there are no previous - // bytes to copy, so we start looking for hash matches at s == 1. - s := 1 - nextHash := hash(load32(src, s), shift) - - for { - // Copied from the C++ snappy implementation: - // - // Heuristic match skipping: If 32 bytes are scanned with no matches - // found, start looking only at every other byte. If 32 more bytes are - // scanned (or skipped), look at every third byte, etc.. When a match - // is found, immediately go back to looking at every byte. This is a - // small loss (~5% performance, ~0.1% density) for compressible data - // due to more bookkeeping, but for non-compressible data (such as - // JPEG) it's a huge win since the compressor quickly "realizes" the - // data is incompressible and doesn't bother looking for matches - // everywhere. - // - // The "skip" variable keeps track of how many bytes there are since - // the last match; dividing it by 32 (ie. right-shifting by five) gives - // the number of bytes to move ahead for each iteration. - skip := 32 - - nextS := s - candidate := 0 - for { - s = nextS - bytesBetweenHashLookups := skip >> 5 - nextS = s + bytesBetweenHashLookups - skip += bytesBetweenHashLookups - if nextS > sLimit { - goto emitRemainder - } - candidate = int(table[nextHash&tableMask]) - table[nextHash&tableMask] = uint16(s) - nextHash = hash(load32(src, nextS), shift) - if load32(src, s) == load32(src, candidate) { - break - } - } - - // A 4-byte match has been found. We'll later see if more than 4 bytes - // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit - // them as literal bytes. - d += emitLiteral(dst[d:], src[nextEmit:s]) - - // Call emitCopy, and then see if another emitCopy could be our next - // move. Repeat until we find no match for the input immediately after - // what was consumed by the last emitCopy call. - // - // If we exit this loop normally then we need to call emitLiteral next, - // though we don't yet know how big the literal will be. We handle that - // by proceeding to the next iteration of the main loop. We also can - // exit this loop via goto if we get close to exhausting the input. - for { - // Invariant: we have a 4-byte match at s, and no need to emit any - // literal bytes prior to s. - base := s - - // Extend the 4-byte match as long as possible. - // - // This is an inlined version of: - // s = extendMatch(src, candidate+4, s+4) - s += 4 - for i := candidate + 4; s < len(src) && src[i] == src[s]; i, s = i+1, s+1 { - } - - d += emitCopy(dst[d:], base-candidate, s-base) - nextEmit = s - if s >= sLimit { - goto emitRemainder - } - - // We could immediately start working at s now, but to improve - // compression we first update the hash table at s-1 and at s. If - // another emitCopy is not our next move, also calculate nextHash - // at s+1. At least on GOARCH=amd64, these three hash calculations - // are faster as one load64 call (with some shifts) instead of - // three load32 calls. - x := load64(src, s-1) - prevHash := hash(uint32(x>>0), shift) - table[prevHash&tableMask] = uint16(s - 1) - currHash := hash(uint32(x>>8), shift) - candidate = int(table[currHash&tableMask]) - table[currHash&tableMask] = uint16(s) - if uint32(x>>8) != load32(src, candidate) { - nextHash = hash(uint32(x>>16), shift) - s++ - break - } - } - } - -emitRemainder: - if nextEmit < len(src) { - d += emitLiteral(dst[d:], src[nextEmit:]) - } - return d -} diff --git a/src/vendor/github.com/klauspost/compress/internal/snapref/snappy.go b/src/vendor/github.com/klauspost/compress/internal/snapref/snappy.go deleted file mode 100644 index 34d01f4aa..000000000 --- a/src/vendor/github.com/klauspost/compress/internal/snapref/snappy.go +++ /dev/null @@ -1,98 +0,0 @@ -// Copyright 2011 The Snappy-Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package snapref implements the Snappy compression format. It aims for very -// high speeds and reasonable compression. -// -// There are actually two Snappy formats: block and stream. They are related, -// but different: trying to decompress block-compressed data as a Snappy stream -// will fail, and vice versa. The block format is the Decode and Encode -// functions and the stream format is the Reader and Writer types. -// -// The block format, the more common case, is used when the complete size (the -// number of bytes) of the original data is known upfront, at the time -// compression starts. The stream format, also known as the framing format, is -// for when that isn't always true. -// -// The canonical, C++ implementation is at https://github.com/google/snappy and -// it only implements the block format. -package snapref - -import ( - "hash/crc32" -) - -/* -Each encoded block begins with the varint-encoded length of the decoded data, -followed by a sequence of chunks. Chunks begin and end on byte boundaries. The -first byte of each chunk is broken into its 2 least and 6 most significant bits -called l and m: l ranges in [0, 4) and m ranges in [0, 64). l is the chunk tag. -Zero means a literal tag. All other values mean a copy tag. - -For literal tags: - - If m < 60, the next 1 + m bytes are literal bytes. - - Otherwise, let n be the little-endian unsigned integer denoted by the next - m - 59 bytes. The next 1 + n bytes after that are literal bytes. - -For copy tags, length bytes are copied from offset bytes ago, in the style of -Lempel-Ziv compression algorithms. In particular: - - For l == 1, the offset ranges in [0, 1<<11) and the length in [4, 12). - The length is 4 + the low 3 bits of m. The high 3 bits of m form bits 8-10 - of the offset. The next byte is bits 0-7 of the offset. - - For l == 2, the offset ranges in [0, 1<<16) and the length in [1, 65). - The length is 1 + m. The offset is the little-endian unsigned integer - denoted by the next 2 bytes. - - For l == 3, this tag is a legacy format that is no longer issued by most - encoders. Nonetheless, the offset ranges in [0, 1<<32) and the length in - [1, 65). The length is 1 + m. The offset is the little-endian unsigned - integer denoted by the next 4 bytes. -*/ -const ( - tagLiteral = 0x00 - tagCopy1 = 0x01 - tagCopy2 = 0x02 - tagCopy4 = 0x03 -) - -const ( - checksumSize = 4 - chunkHeaderSize = 4 - magicChunk = "\xff\x06\x00\x00" + magicBody - magicBody = "sNaPpY" - - // maxBlockSize is the maximum size of the input to encodeBlock. It is not - // part of the wire format per se, but some parts of the encoder assume - // that an offset fits into a uint16. - // - // Also, for the framing format (Writer type instead of Encode function), - // https://github.com/google/snappy/blob/master/framing_format.txt says - // that "the uncompressed data in a chunk must be no longer than 65536 - // bytes". - maxBlockSize = 65536 - - // maxEncodedLenOfMaxBlockSize equals MaxEncodedLen(maxBlockSize), but is - // hard coded to be a const instead of a variable, so that obufLen can also - // be a const. Their equivalence is confirmed by - // TestMaxEncodedLenOfMaxBlockSize. - maxEncodedLenOfMaxBlockSize = 76490 - - obufHeaderLen = len(magicChunk) + checksumSize + chunkHeaderSize - obufLen = obufHeaderLen + maxEncodedLenOfMaxBlockSize -) - -const ( - chunkTypeCompressedData = 0x00 - chunkTypeUncompressedData = 0x01 - chunkTypePadding = 0xfe - chunkTypeStreamIdentifier = 0xff -) - -var crcTable = crc32.MakeTable(crc32.Castagnoli) - -// crc implements the checksum specified in section 3 of -// https://github.com/google/snappy/blob/master/framing_format.txt -func crc(b []byte) uint32 { - c := crc32.Update(0, crcTable, b) - return uint32(c>>15|c<<17) + 0xa282ead8 -} diff --git a/src/vendor/github.com/klauspost/compress/s2sx.mod b/src/vendor/github.com/klauspost/compress/s2sx.mod deleted file mode 100644 index 2263853fc..000000000 --- a/src/vendor/github.com/klauspost/compress/s2sx.mod +++ /dev/null @@ -1,4 +0,0 @@ -module github.com/klauspost/compress - -go 1.16 - diff --git a/src/vendor/github.com/klauspost/compress/s2sx.sum b/src/vendor/github.com/klauspost/compress/s2sx.sum deleted file mode 100644 index e69de29bb..000000000 diff --git a/src/vendor/github.com/klauspost/compress/zstd/README.md b/src/vendor/github.com/klauspost/compress/zstd/README.md deleted file mode 100644 index c8f0f16fc..000000000 --- a/src/vendor/github.com/klauspost/compress/zstd/README.md +++ /dev/null @@ -1,441 +0,0 @@ -# zstd - -[Zstandard](https://facebook.github.io/zstd/) is a real-time compression algorithm, providing high compression ratios. -It offers a very wide range of compression / speed trade-off, while being backed by a very fast decoder. -A high performance compression algorithm is implemented. For now focused on speed. - -This package provides [compression](#Compressor) to and [decompression](#Decompressor) of Zstandard content. - -This package is pure Go and without use of "unsafe". - -The `zstd` package is provided as open source software using a Go standard license. - -Currently the package is heavily optimized for 64 bit processors and will be significantly slower on 32 bit processors. - -## Installation - -Install using `go get -u github.com/klauspost/compress`. The package is located in `github.com/klauspost/compress/zstd`. - -[![Go Reference](https://pkg.go.dev/badge/github.com/klauspost/compress/zstd.svg)](https://pkg.go.dev/github.com/klauspost/compress/zstd) - -## Compressor - -### Status: - -STABLE - there may always be subtle bugs, a wide variety of content has been tested and the library is actively -used by several projects. This library is being [fuzz-tested](https://github.com/klauspost/compress-fuzz) for all updates. - -There may still be specific combinations of data types/size/settings that could lead to edge cases, -so as always, testing is recommended. - -For now, a high speed (fastest) and medium-fast (default) compressor has been implemented. - -* The "Fastest" compression ratio is roughly equivalent to zstd level 1. -* The "Default" compression ratio is roughly equivalent to zstd level 3 (default). -* The "Better" compression ratio is roughly equivalent to zstd level 7. -* The "Best" compression ratio is roughly equivalent to zstd level 11. - -In terms of speed, it is typically 2x as fast as the stdlib deflate/gzip in its fastest mode. -The compression ratio compared to stdlib is around level 3, but usually 3x as fast. - - -### Usage - -An Encoder can be used for either compressing a stream via the -`io.WriteCloser` interface supported by the Encoder or as multiple independent -tasks via the `EncodeAll` function. -Smaller encodes are encouraged to use the EncodeAll function. -Use `NewWriter` to create a new instance that can be used for both. - -To create a writer with default options, do like this: - -```Go -// Compress input to output. -func Compress(in io.Reader, out io.Writer) error { - enc, err := zstd.NewWriter(out) - if err != nil { - return err - } - _, err = io.Copy(enc, in) - if err != nil { - enc.Close() - return err - } - return enc.Close() -} -``` - -Now you can encode by writing data to `enc`. The output will be finished writing when `Close()` is called. -Even if your encode fails, you should still call `Close()` to release any resources that may be held up. - -The above is fine for big encodes. However, whenever possible try to *reuse* the writer. - -To reuse the encoder, you can use the `Reset(io.Writer)` function to change to another output. -This will allow the encoder to reuse all resources and avoid wasteful allocations. - -Currently stream encoding has 'light' concurrency, meaning up to 2 goroutines can be working on part -of a stream. This is independent of the `WithEncoderConcurrency(n)`, but that is likely to change -in the future. So if you want to limit concurrency for future updates, specify the concurrency -you would like. - -You can specify your desired compression level using `WithEncoderLevel()` option. Currently only pre-defined -compression settings can be specified. - -#### Future Compatibility Guarantees - -This will be an evolving project. When using this package it is important to note that both the compression efficiency and speed may change. - -The goal will be to keep the default efficiency at the default zstd (level 3). -However the encoding should never be assumed to remain the same, -and you should not use hashes of compressed output for similarity checks. - -The Encoder can be assumed to produce the same output from the exact same code version. -However, the may be modes in the future that break this, -although they will not be enabled without an explicit option. - -This encoder is not designed to (and will probably never) output the exact same bitstream as the reference encoder. - -Also note, that the cgo decompressor currently does not [report all errors on invalid input](https://github.com/DataDog/zstd/issues/59), -[omits error checks](https://github.com/DataDog/zstd/issues/61), [ignores checksums](https://github.com/DataDog/zstd/issues/43) -and seems to ignore concatenated streams, even though [it is part of the spec](https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#frames). - -#### Blocks - -For compressing small blocks, the returned encoder has a function called `EncodeAll(src, dst []byte) []byte`. - -`EncodeAll` will encode all input in src and append it to dst. -This function can be called concurrently, but each call will only run on a single goroutine. - -Encoded blocks can be concatenated and the result will be the combined input stream. -Data compressed with EncodeAll can be decoded with the Decoder, using either a stream or `DecodeAll`. - -Especially when encoding blocks you should take special care to reuse the encoder. -This will effectively make it run without allocations after a warmup period. -To make it run completely without allocations, supply a destination buffer with space for all content. - -```Go -import "github.com/klauspost/compress/zstd" - -// Create a writer that caches compressors. -// For this operation type we supply a nil Reader. -var encoder, _ = zstd.NewWriter(nil) - -// Compress a buffer. -// If you have a destination buffer, the allocation in the call can also be eliminated. -func Compress(src []byte) []byte { - return encoder.EncodeAll(src, make([]byte, 0, len(src))) -} -``` - -You can control the maximum number of concurrent encodes using the `WithEncoderConcurrency(n)` -option when creating the writer. - -Using the Encoder for both a stream and individual blocks concurrently is safe. - -### Performance - -I have collected some speed examples to compare speed and compression against other compressors. - -* `file` is the input file. -* `out` is the compressor used. `zskp` is this package. `zstd` is the Datadog cgo library. `gzstd/gzkp` is gzip standard and this library. -* `level` is the compression level used. For `zskp` level 1 is "fastest", level 2 is "default"; 3 is "better", 4 is "best". -* `insize`/`outsize` is the input/output size. -* `millis` is the number of milliseconds used for compression. -* `mb/s` is megabytes (2^20 bytes) per second. - -``` -Silesia Corpus: -http://sun.aei.polsl.pl/~sdeor/corpus/silesia.zip - -This package: -file out level insize outsize millis mb/s -silesia.tar zskp 1 211947520 73101992 643 313.87 -silesia.tar zskp 2 211947520 67504318 969 208.38 -silesia.tar zskp 3 211947520 64595893 2007 100.68 -silesia.tar zskp 4 211947520 60995370 8825 22.90 - -cgo zstd: -silesia.tar zstd 1 211947520 73605392 543 371.56 -silesia.tar zstd 3 211947520 66793289 864 233.68 -silesia.tar zstd 6 211947520 62916450 1913 105.66 -silesia.tar zstd 9 211947520 60212393 5063 39.92 - -gzip, stdlib/this package: -silesia.tar gzstd 1 211947520 80007735 1654 122.21 -silesia.tar gzkp 1 211947520 80136201 1152 175.45 - -GOB stream of binary data. Highly compressible. -https://files.klauspost.com/compress/gob-stream.7z - -file out level insize outsize millis mb/s -gob-stream zskp 1 1911399616 235022249 3088 590.30 -gob-stream zskp 2 1911399616 205669791 3786 481.34 -gob-stream zskp 3 1911399616 175034659 9636 189.17 -gob-stream zskp 4 1911399616 165609838 50369 36.19 - -gob-stream zstd 1 1911399616 249810424 2637 691.26 -gob-stream zstd 3 1911399616 208192146 3490 522.31 -gob-stream zstd 6 1911399616 193632038 6687 272.56 -gob-stream zstd 9 1911399616 177620386 16175 112.70 - -gob-stream gzstd 1 1911399616 357382641 10251 177.82 -gob-stream gzkp 1 1911399616 359753026 5438 335.20 - -The test data for the Large Text Compression Benchmark is the first -10^9 bytes of the English Wikipedia dump on Mar. 3, 2006. -http://mattmahoney.net/dc/textdata.html - -file out level insize outsize millis mb/s -enwik9 zskp 1 1000000000 343848582 3609 264.18 -enwik9 zskp 2 1000000000 317276632 5746 165.97 -enwik9 zskp 3 1000000000 292243069 12162 78.41 -enwik9 zskp 4 1000000000 262183768 82837 11.51 - -enwik9 zstd 1 1000000000 358072021 3110 306.65 -enwik9 zstd 3 1000000000 313734672 4784 199.35 -enwik9 zstd 6 1000000000 295138875 10290 92.68 -enwik9 zstd 9 1000000000 278348700 28549 33.40 - -enwik9 gzstd 1 1000000000 382578136 9604 99.30 -enwik9 gzkp 1 1000000000 383825945 6544 145.73 - -Highly compressible JSON file. -https://files.klauspost.com/compress/github-june-2days-2019.json.zst - -file out level insize outsize millis mb/s -github-june-2days-2019.json zskp 1 6273951764 699045015 10620 563.40 -github-june-2days-2019.json zskp 2 6273951764 617881763 11687 511.96 -github-june-2days-2019.json zskp 3 6273951764 524340691 34043 175.75 -github-june-2days-2019.json zskp 4 6273951764 470320075 170190 35.16 - -github-june-2days-2019.json zstd 1 6273951764 766284037 8450 708.00 -github-june-2days-2019.json zstd 3 6273951764 661889476 10927 547.57 -github-june-2days-2019.json zstd 6 6273951764 642756859 22996 260.18 -github-june-2days-2019.json zstd 9 6273951764 601974523 52413 114.16 - -github-june-2days-2019.json gzstd 1 6273951764 1164400847 29948 199.79 -github-june-2days-2019.json gzkp 1 6273951764 1125417694 21788 274.61 - -VM Image, Linux mint with a few installed applications: -https://files.klauspost.com/compress/rawstudio-mint14.7z - -file out level insize outsize millis mb/s -rawstudio-mint14.tar zskp 1 8558382592 3667489370 20210 403.84 -rawstudio-mint14.tar zskp 2 8558382592 3364592300 31873 256.07 -rawstudio-mint14.tar zskp 3 8558382592 3158085214 77675 105.08 -rawstudio-mint14.tar zskp 4 8558382592 2965110639 857750 9.52 - -rawstudio-mint14.tar zstd 1 8558382592 3609250104 17136 476.27 -rawstudio-mint14.tar zstd 3 8558382592 3341679997 29262 278.92 -rawstudio-mint14.tar zstd 6 8558382592 3235846406 77904 104.77 -rawstudio-mint14.tar zstd 9 8558382592 3160778861 140946 57.91 - -rawstudio-mint14.tar gzstd 1 8558382592 3926257486 57722 141.40 -rawstudio-mint14.tar gzkp 1 8558382592 3962605659 45113 180.92 - -CSV data: -https://files.klauspost.com/compress/nyc-taxi-data-10M.csv.zst - -file out level insize outsize millis mb/s -nyc-taxi-data-10M.csv zskp 1 3325605752 641339945 8925 355.35 -nyc-taxi-data-10M.csv zskp 2 3325605752 591748091 11268 281.44 -nyc-taxi-data-10M.csv zskp 3 3325605752 530289687 25239 125.66 -nyc-taxi-data-10M.csv zskp 4 3325605752 476268884 135958 23.33 - -nyc-taxi-data-10M.csv zstd 1 3325605752 687399637 8233 385.18 -nyc-taxi-data-10M.csv zstd 3 3325605752 598514411 10065 315.07 -nyc-taxi-data-10M.csv zstd 6 3325605752 570522953 20038 158.27 -nyc-taxi-data-10M.csv zstd 9 3325605752 517554797 64565 49.12 - -nyc-taxi-data-10M.csv gzstd 1 3325605752 928656485 23876 132.83 -nyc-taxi-data-10M.csv gzkp 1 3325605752 922257165 16780 189.00 -``` - -## Decompressor - -Staus: STABLE - there may still be subtle bugs, but a wide variety of content has been tested. - -This library is being continuously [fuzz-tested](https://github.com/klauspost/compress-fuzz), -kindly supplied by [fuzzit.dev](https://fuzzit.dev/). -The main purpose of the fuzz testing is to ensure that it is not possible to crash the decoder, -or run it past its limits with ANY input provided. - -### Usage - -The package has been designed for two main usages, big streams of data and smaller in-memory buffers. -There are two main usages of the package for these. Both of them are accessed by creating a `Decoder`. - -For streaming use a simple setup could look like this: - -```Go -import "github.com/klauspost/compress/zstd" - -func Decompress(in io.Reader, out io.Writer) error { - d, err := zstd.NewReader(in) - if err != nil { - return err - } - defer d.Close() - - // Copy content... - _, err = io.Copy(out, d) - return err -} -``` - -It is important to use the "Close" function when you no longer need the Reader to stop running goroutines. -See "Allocation-less operation" below. - -For decoding buffers, it could look something like this: - -```Go -import "github.com/klauspost/compress/zstd" - -// Create a reader that caches decompressors. -// For this operation type we supply a nil Reader. -var decoder, _ = zstd.NewReader(nil) - -// Decompress a buffer. We don't supply a destination buffer, -// so it will be allocated by the decoder. -func Decompress(src []byte) ([]byte, error) { - return decoder.DecodeAll(src, nil) -} -``` - -Both of these cases should provide the functionality needed. -The decoder can be used for *concurrent* decompression of multiple buffers. -It will only allow a certain number of concurrent operations to run. -To tweak that yourself use the `WithDecoderConcurrency(n)` option when creating the decoder. - -### Dictionaries - -Data compressed with [dictionaries](https://github.com/facebook/zstd#the-case-for-small-data-compression) can be decompressed. - -Dictionaries are added individually to Decoders. -Dictionaries are generated by the `zstd --train` command and contains an initial state for the decoder. -To add a dictionary use the `WithDecoderDicts(dicts ...[]byte)` option with the dictionary data. -Several dictionaries can be added at once. - -The dictionary will be used automatically for the data that specifies them. -A re-used Decoder will still contain the dictionaries registered. - -When registering multiple dictionaries with the same ID, the last one will be used. - -It is possible to use dictionaries when compressing data. - -To enable a dictionary use `WithEncoderDict(dict []byte)`. Here only one dictionary will be used -and it will likely be used even if it doesn't improve compression. - -The used dictionary must be used to decompress the content. - -For any real gains, the dictionary should be built with similar data. -If an unsuitable dictionary is used the output may be slightly larger than using no dictionary. -Use the [zstd commandline tool](https://github.com/facebook/zstd/releases) to build a dictionary from sample data. -For information see [zstd dictionary information](https://github.com/facebook/zstd#the-case-for-small-data-compression). - -For now there is a fixed startup performance penalty for compressing content with dictionaries. -This will likely be improved over time. Just be aware to test performance when implementing. - -### Allocation-less operation - -The decoder has been designed to operate without allocations after a warmup. - -This means that you should *store* the decoder for best performance. -To re-use a stream decoder, use the `Reset(r io.Reader) error` to switch to another stream. -A decoder can safely be re-used even if the previous stream failed. - -To release the resources, you must call the `Close()` function on a decoder. -After this it can *no longer be reused*, but all running goroutines will be stopped. -So you *must* use this if you will no longer need the Reader. - -For decompressing smaller buffers a single decoder can be used. -When decoding buffers, you can supply a destination slice with length 0 and your expected capacity. -In this case no unneeded allocations should be made. - -### Concurrency - -The buffer decoder does everything on the same goroutine and does nothing concurrently. -It can however decode several buffers concurrently. Use `WithDecoderConcurrency(n)` to limit that. - -The stream decoder operates on - -* One goroutine reads input and splits the input to several block decoders. -* A number of decoders will decode blocks. -* A goroutine coordinates these blocks and sends history from one to the next. - -So effectively this also means the decoder will "read ahead" and prepare data to always be available for output. - -Since "blocks" are quite dependent on the output of the previous block stream decoding will only have limited concurrency. - -In practice this means that concurrency is often limited to utilizing about 2 cores effectively. - - -### Benchmarks - -These are some examples of performance compared to [datadog cgo library](https://github.com/DataDog/zstd). - -The first two are streaming decodes and the last are smaller inputs. - -``` -BenchmarkDecoderSilesia-8 3 385000067 ns/op 550.51 MB/s 5498 B/op 8 allocs/op -BenchmarkDecoderSilesiaCgo-8 6 197666567 ns/op 1072.25 MB/s 270672 B/op 8 allocs/op - -BenchmarkDecoderEnwik9-8 1 2027001600 ns/op 493.34 MB/s 10496 B/op 18 allocs/op -BenchmarkDecoderEnwik9Cgo-8 2 979499200 ns/op 1020.93 MB/s 270672 B/op 8 allocs/op - -Concurrent performance: - -BenchmarkDecoder_DecodeAllParallel/kppkn.gtb.zst-16 28915 42469 ns/op 4340.07 MB/s 114 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallel/geo.protodata.zst-16 116505 9965 ns/op 11900.16 MB/s 16 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallel/plrabn12.txt.zst-16 8952 134272 ns/op 3588.70 MB/s 915 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallel/lcet10.txt.zst-16 11820 102538 ns/op 4161.90 MB/s 594 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallel/asyoulik.txt.zst-16 34782 34184 ns/op 3661.88 MB/s 60 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallel/alice29.txt.zst-16 27712 43447 ns/op 3500.58 MB/s 99 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallel/html_x_4.zst-16 62826 18750 ns/op 21845.10 MB/s 104 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallel/paper-100k.pdf.zst-16 631545 1794 ns/op 57078.74 MB/s 2 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallel/fireworks.jpeg.zst-16 1690140 712 ns/op 172938.13 MB/s 1 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallel/urls.10K.zst-16 10432 113593 ns/op 6180.73 MB/s 1143 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallel/html.zst-16 113206 10671 ns/op 9596.27 MB/s 15 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallel/comp-data.bin.zst-16 1530615 779 ns/op 5229.49 MB/s 0 B/op 0 allocs/op - -BenchmarkDecoder_DecodeAllParallelCgo/kppkn.gtb.zst-16 65217 16192 ns/op 11383.34 MB/s 46 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallelCgo/geo.protodata.zst-16 292671 4039 ns/op 29363.19 MB/s 6 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallelCgo/plrabn12.txt.zst-16 26314 46021 ns/op 10470.43 MB/s 293 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallelCgo/lcet10.txt.zst-16 33897 34900 ns/op 12227.96 MB/s 205 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallelCgo/asyoulik.txt.zst-16 104348 11433 ns/op 10949.01 MB/s 20 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallelCgo/alice29.txt.zst-16 75949 15510 ns/op 9805.60 MB/s 32 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallelCgo/html_x_4.zst-16 173910 6756 ns/op 60624.29 MB/s 37 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallelCgo/paper-100k.pdf.zst-16 923076 1339 ns/op 76474.87 MB/s 1 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallelCgo/fireworks.jpeg.zst-16 922920 1351 ns/op 91102.57 MB/s 2 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallelCgo/urls.10K.zst-16 27649 43618 ns/op 16096.19 MB/s 407 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallelCgo/html.zst-16 279073 4160 ns/op 24614.18 MB/s 6 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallelCgo/comp-data.bin.zst-16 749938 1579 ns/op 2581.71 MB/s 0 B/op 0 allocs/op -``` - -This reflects the performance around May 2020, but this may be out of date. - -## Zstd inside ZIP files - -It is possible to use zstandard to compress individual files inside zip archives. -While this isn't widely supported it can be useful for internal files. - -To support the compression and decompression of these files you must register a compressor and decompressor. - -It is highly recommended registering the (de)compressors on individual zip Reader/Writer and NOT -use the global registration functions. The main reason for this is that 2 registrations from -different packages will result in a panic. - -It is a good idea to only have a single compressor and decompressor, since they can be used for multiple zip -files concurrently, and using a single instance will allow reusing some resources. - -See [this example](https://pkg.go.dev/github.com/klauspost/compress/zstd#example-ZipCompressor) for -how to compress and decompress files inside zip archives. - -# Contributions - -Contributions are always welcome. -For new features/fixes, remember to add tests and for performance enhancements include benchmarks. - -For general feedback and experience reports, feel free to open an issue or write me on [Twitter](https://twitter.com/sh0dan). - -This package includes the excellent [`github.com/cespare/xxhash`](https://github.com/cespare/xxhash) package Copyright (c) 2016 Caleb Spare. diff --git a/src/vendor/github.com/klauspost/compress/zstd/bitreader.go b/src/vendor/github.com/klauspost/compress/zstd/bitreader.go deleted file mode 100644 index 854458537..000000000 --- a/src/vendor/github.com/klauspost/compress/zstd/bitreader.go +++ /dev/null @@ -1,136 +0,0 @@ -// Copyright 2019+ Klaus Post. All rights reserved. -// License information can be found in the LICENSE file. -// Based on work by Yann Collet, released under BSD License. - -package zstd - -import ( - "encoding/binary" - "errors" - "io" - "math/bits" -) - -// bitReader reads a bitstream in reverse. -// The last set bit indicates the start of the stream and is used -// for aligning the input. -type bitReader struct { - in []byte - off uint // next byte to read is at in[off - 1] - value uint64 // Maybe use [16]byte, but shifting is awkward. - bitsRead uint8 -} - -// init initializes and resets the bit reader. -func (b *bitReader) init(in []byte) error { - if len(in) < 1 { - return errors.New("corrupt stream: too short") - } - b.in = in - b.off = uint(len(in)) - // The highest bit of the last byte indicates where to start - v := in[len(in)-1] - if v == 0 { - return errors.New("corrupt stream, did not find end of stream") - } - b.bitsRead = 64 - b.value = 0 - if len(in) >= 8 { - b.fillFastStart() - } else { - b.fill() - b.fill() - } - b.bitsRead += 8 - uint8(highBits(uint32(v))) - return nil -} - -// getBits will return n bits. n can be 0. -func (b *bitReader) getBits(n uint8) int { - if n == 0 /*|| b.bitsRead >= 64 */ { - return 0 - } - return b.getBitsFast(n) -} - -// getBitsFast requires that at least one bit is requested every time. -// There are no checks if the buffer is filled. -func (b *bitReader) getBitsFast(n uint8) int { - const regMask = 64 - 1 - v := uint32((b.value << (b.bitsRead & regMask)) >> ((regMask + 1 - n) & regMask)) - b.bitsRead += n - return int(v) -} - -// fillFast() will make sure at least 32 bits are available. -// There must be at least 4 bytes available. -func (b *bitReader) fillFast() { - if b.bitsRead < 32 { - return - } - // 2 bounds checks. - v := b.in[b.off-4:] - v = v[:4] - low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) - b.value = (b.value << 32) | uint64(low) - b.bitsRead -= 32 - b.off -= 4 -} - -// fillFastStart() assumes the bitreader is empty and there is at least 8 bytes to read. -func (b *bitReader) fillFastStart() { - // Do single re-slice to avoid bounds checks. - b.value = binary.LittleEndian.Uint64(b.in[b.off-8:]) - b.bitsRead = 0 - b.off -= 8 -} - -// fill() will make sure at least 32 bits are available. -func (b *bitReader) fill() { - if b.bitsRead < 32 { - return - } - if b.off >= 4 { - v := b.in[b.off-4:] - v = v[:4] - low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) - b.value = (b.value << 32) | uint64(low) - b.bitsRead -= 32 - b.off -= 4 - return - } - for b.off > 0 { - b.value = (b.value << 8) | uint64(b.in[b.off-1]) - b.bitsRead -= 8 - b.off-- - } -} - -// finished returns true if all bits have been read from the bit stream. -func (b *bitReader) finished() bool { - return b.off == 0 && b.bitsRead >= 64 -} - -// overread returns true if more bits have been requested than is on the stream. -func (b *bitReader) overread() bool { - return b.bitsRead > 64 -} - -// remain returns the number of bits remaining. -func (b *bitReader) remain() uint { - return b.off*8 + 64 - uint(b.bitsRead) -} - -// close the bitstream and returns an error if out-of-buffer reads occurred. -func (b *bitReader) close() error { - // Release reference. - b.in = nil - if b.bitsRead > 64 { - return io.ErrUnexpectedEOF - } - return nil -} - -func highBits(val uint32) (n uint32) { - return uint32(bits.Len32(val) - 1) -} diff --git a/src/vendor/github.com/klauspost/compress/zstd/bitwriter.go b/src/vendor/github.com/klauspost/compress/zstd/bitwriter.go deleted file mode 100644 index 303ae90f9..000000000 --- a/src/vendor/github.com/klauspost/compress/zstd/bitwriter.go +++ /dev/null @@ -1,169 +0,0 @@ -// Copyright 2018 Klaus Post. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. -// Based on work Copyright (c) 2013, Yann Collet, released under BSD License. - -package zstd - -import "fmt" - -// bitWriter will write bits. -// First bit will be LSB of the first byte of output. -type bitWriter struct { - bitContainer uint64 - nBits uint8 - out []byte -} - -// bitMask16 is bitmasks. Has extra to avoid bounds check. -var bitMask16 = [32]uint16{ - 0, 1, 3, 7, 0xF, 0x1F, - 0x3F, 0x7F, 0xFF, 0x1FF, 0x3FF, 0x7FF, - 0xFFF, 0x1FFF, 0x3FFF, 0x7FFF, 0xFFFF, 0xFFFF, - 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, - 0xFFFF, 0xFFFF} /* up to 16 bits */ - -var bitMask32 = [32]uint32{ - 0, 1, 3, 7, 0xF, 0x1F, 0x3F, 0x7F, 0xFF, - 0x1FF, 0x3FF, 0x7FF, 0xFFF, 0x1FFF, 0x3FFF, 0x7FFF, 0xFFFF, - 0x1ffff, 0x3ffff, 0x7FFFF, 0xfFFFF, 0x1fFFFF, 0x3fFFFF, 0x7fFFFF, 0xffFFFF, - 0x1ffFFFF, 0x3ffFFFF, 0x7ffFFFF, 0xfffFFFF, 0x1fffFFFF, 0x3fffFFFF, 0x7fffFFFF, -} // up to 32 bits - -// addBits16NC will add up to 16 bits. -// It will not check if there is space for them, -// so the caller must ensure that it has flushed recently. -func (b *bitWriter) addBits16NC(value uint16, bits uint8) { - b.bitContainer |= uint64(value&bitMask16[bits&31]) << (b.nBits & 63) - b.nBits += bits -} - -// addBits32NC will add up to 32 bits. -// It will not check if there is space for them, -// so the caller must ensure that it has flushed recently. -func (b *bitWriter) addBits32NC(value uint32, bits uint8) { - b.bitContainer |= uint64(value&bitMask32[bits&31]) << (b.nBits & 63) - b.nBits += bits -} - -// addBits16Clean will add up to 16 bits. value may not contain more set bits than indicated. -// It will not check if there is space for them, so the caller must ensure that it has flushed recently. -func (b *bitWriter) addBits16Clean(value uint16, bits uint8) { - b.bitContainer |= uint64(value) << (b.nBits & 63) - b.nBits += bits -} - -// flush will flush all pending full bytes. -// There will be at least 56 bits available for writing when this has been called. -// Using flush32 is faster, but leaves less space for writing. -func (b *bitWriter) flush() { - v := b.nBits >> 3 - switch v { - case 0: - case 1: - b.out = append(b.out, - byte(b.bitContainer), - ) - case 2: - b.out = append(b.out, - byte(b.bitContainer), - byte(b.bitContainer>>8), - ) - case 3: - b.out = append(b.out, - byte(b.bitContainer), - byte(b.bitContainer>>8), - byte(b.bitContainer>>16), - ) - case 4: - b.out = append(b.out, - byte(b.bitContainer), - byte(b.bitContainer>>8), - byte(b.bitContainer>>16), - byte(b.bitContainer>>24), - ) - case 5: - b.out = append(b.out, - byte(b.bitContainer), - byte(b.bitContainer>>8), - byte(b.bitContainer>>16), - byte(b.bitContainer>>24), - byte(b.bitContainer>>32), - ) - case 6: - b.out = append(b.out, - byte(b.bitContainer), - byte(b.bitContainer>>8), - byte(b.bitContainer>>16), - byte(b.bitContainer>>24), - byte(b.bitContainer>>32), - byte(b.bitContainer>>40), - ) - case 7: - b.out = append(b.out, - byte(b.bitContainer), - byte(b.bitContainer>>8), - byte(b.bitContainer>>16), - byte(b.bitContainer>>24), - byte(b.bitContainer>>32), - byte(b.bitContainer>>40), - byte(b.bitContainer>>48), - ) - case 8: - b.out = append(b.out, - byte(b.bitContainer), - byte(b.bitContainer>>8), - byte(b.bitContainer>>16), - byte(b.bitContainer>>24), - byte(b.bitContainer>>32), - byte(b.bitContainer>>40), - byte(b.bitContainer>>48), - byte(b.bitContainer>>56), - ) - default: - panic(fmt.Errorf("bits (%d) > 64", b.nBits)) - } - b.bitContainer >>= v << 3 - b.nBits &= 7 -} - -// flush32 will flush out, so there are at least 32 bits available for writing. -func (b *bitWriter) flush32() { - if b.nBits < 32 { - return - } - b.out = append(b.out, - byte(b.bitContainer), - byte(b.bitContainer>>8), - byte(b.bitContainer>>16), - byte(b.bitContainer>>24)) - b.nBits -= 32 - b.bitContainer >>= 32 -} - -// flushAlign will flush remaining full bytes and align to next byte boundary. -func (b *bitWriter) flushAlign() { - nbBytes := (b.nBits + 7) >> 3 - for i := uint8(0); i < nbBytes; i++ { - b.out = append(b.out, byte(b.bitContainer>>(i*8))) - } - b.nBits = 0 - b.bitContainer = 0 -} - -// close will write the alignment bit and write the final byte(s) -// to the output. -func (b *bitWriter) close() error { - // End mark - b.addBits16Clean(1, 1) - // flush until next byte. - b.flushAlign() - return nil -} - -// reset and continue writing by appending to out. -func (b *bitWriter) reset(out []byte) { - b.bitContainer = 0 - b.nBits = 0 - b.out = out -} diff --git a/src/vendor/github.com/klauspost/compress/zstd/blockdec.go b/src/vendor/github.com/klauspost/compress/zstd/blockdec.go deleted file mode 100644 index 8a98c4562..000000000 --- a/src/vendor/github.com/klauspost/compress/zstd/blockdec.go +++ /dev/null @@ -1,736 +0,0 @@ -// Copyright 2019+ Klaus Post. All rights reserved. -// License information can be found in the LICENSE file. -// Based on work by Yann Collet, released under BSD License. - -package zstd - -import ( - "errors" - "fmt" - "io" - "sync" - - "github.com/klauspost/compress/huff0" - "github.com/klauspost/compress/zstd/internal/xxhash" -) - -type blockType uint8 - -//go:generate stringer -type=blockType,literalsBlockType,seqCompMode,tableIndex - -const ( - blockTypeRaw blockType = iota - blockTypeRLE - blockTypeCompressed - blockTypeReserved -) - -type literalsBlockType uint8 - -const ( - literalsBlockRaw literalsBlockType = iota - literalsBlockRLE - literalsBlockCompressed - literalsBlockTreeless -) - -const ( - // maxCompressedBlockSize is the biggest allowed compressed block size (128KB) - maxCompressedBlockSize = 128 << 10 - - // Maximum possible block size (all Raw+Uncompressed). - maxBlockSize = (1 << 21) - 1 - - // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#literals_section_header - maxCompressedLiteralSize = 1 << 18 - maxRLELiteralSize = 1 << 20 - maxMatchLen = 131074 - maxSequences = 0x7f00 + 0xffff - - // We support slightly less than the reference decoder to be able to - // use ints on 32 bit archs. - maxOffsetBits = 30 -) - -var ( - huffDecoderPool = sync.Pool{New: func() interface{} { - return &huff0.Scratch{} - }} - - fseDecoderPool = sync.Pool{New: func() interface{} { - return &fseDecoder{} - }} -) - -type blockDec struct { - // Raw source data of the block. - data []byte - dataStorage []byte - - // Destination of the decoded data. - dst []byte - - // Buffer for literals data. - literalBuf []byte - - // Window size of the block. - WindowSize uint64 - - history chan *history - input chan struct{} - result chan decodeOutput - sequenceBuf []seq - err error - decWG sync.WaitGroup - - // Frame to use for singlethreaded decoding. - // Should not be used by the decoder itself since parent may be another frame. - localFrame *frameDec - - // Block is RLE, this is the size. - RLESize uint32 - tmp [4]byte - - Type blockType - - // Is this the last block of a frame? - Last bool - - // Use less memory - lowMem bool -} - -func (b *blockDec) String() string { - if b == nil { - return "" - } - return fmt.Sprintf("Steam Size: %d, Type: %v, Last: %t, Window: %d", len(b.data), b.Type, b.Last, b.WindowSize) -} - -func newBlockDec(lowMem bool) *blockDec { - b := blockDec{ - lowMem: lowMem, - result: make(chan decodeOutput, 1), - input: make(chan struct{}, 1), - history: make(chan *history, 1), - } - b.decWG.Add(1) - go b.startDecoder() - return &b -} - -// reset will reset the block. -// Input must be a start of a block and will be at the end of the block when returned. -func (b *blockDec) reset(br byteBuffer, windowSize uint64) error { - b.WindowSize = windowSize - tmp, err := br.readSmall(3) - if err != nil { - println("Reading block header:", err) - return err - } - bh := uint32(tmp[0]) | (uint32(tmp[1]) << 8) | (uint32(tmp[2]) << 16) - b.Last = bh&1 != 0 - b.Type = blockType((bh >> 1) & 3) - // find size. - cSize := int(bh >> 3) - maxSize := maxBlockSize - switch b.Type { - case blockTypeReserved: - return ErrReservedBlockType - case blockTypeRLE: - b.RLESize = uint32(cSize) - if b.lowMem { - maxSize = cSize - } - cSize = 1 - case blockTypeCompressed: - if debugDecoder { - println("Data size on stream:", cSize) - } - b.RLESize = 0 - maxSize = maxCompressedBlockSize - if windowSize < maxCompressedBlockSize && b.lowMem { - maxSize = int(windowSize) - } - if cSize > maxCompressedBlockSize || uint64(cSize) > b.WindowSize { - if debugDecoder { - printf("compressed block too big: csize:%d block: %+v\n", uint64(cSize), b) - } - return ErrCompressedSizeTooBig - } - case blockTypeRaw: - b.RLESize = 0 - // We do not need a destination for raw blocks. - maxSize = -1 - default: - panic("Invalid block type") - } - - // Read block data. - if cap(b.dataStorage) < cSize { - if b.lowMem || cSize > maxCompressedBlockSize { - b.dataStorage = make([]byte, 0, cSize) - } else { - b.dataStorage = make([]byte, 0, maxCompressedBlockSize) - } - } - if cap(b.dst) <= maxSize { - b.dst = make([]byte, 0, maxSize+1) - } - b.data, err = br.readBig(cSize, b.dataStorage) - if err != nil { - if debugDecoder { - println("Reading block:", err, "(", cSize, ")", len(b.data)) - printf("%T", br) - } - return err - } - return nil -} - -// sendEOF will make the decoder send EOF on this frame. -func (b *blockDec) sendErr(err error) { - b.Last = true - b.Type = blockTypeReserved - b.err = err - b.input <- struct{}{} -} - -// Close will release resources. -// Closed blockDec cannot be reset. -func (b *blockDec) Close() { - close(b.input) - close(b.history) - close(b.result) - b.decWG.Wait() -} - -// decodeAsync will prepare decoding the block when it receives input. -// This will separate output and history. -func (b *blockDec) startDecoder() { - defer b.decWG.Done() - for range b.input { - //println("blockDec: Got block input") - switch b.Type { - case blockTypeRLE: - if cap(b.dst) < int(b.RLESize) { - if b.lowMem { - b.dst = make([]byte, b.RLESize) - } else { - b.dst = make([]byte, maxBlockSize) - } - } - o := decodeOutput{ - d: b, - b: b.dst[:b.RLESize], - err: nil, - } - v := b.data[0] - for i := range o.b { - o.b[i] = v - } - hist := <-b.history - hist.append(o.b) - b.result <- o - case blockTypeRaw: - o := decodeOutput{ - d: b, - b: b.data, - err: nil, - } - hist := <-b.history - hist.append(o.b) - b.result <- o - case blockTypeCompressed: - b.dst = b.dst[:0] - err := b.decodeCompressed(nil) - o := decodeOutput{ - d: b, - b: b.dst, - err: err, - } - if debugDecoder { - println("Decompressed to", len(b.dst), "bytes, error:", err) - } - b.result <- o - case blockTypeReserved: - // Used for returning errors. - <-b.history - b.result <- decodeOutput{ - d: b, - b: nil, - err: b.err, - } - default: - panic("Invalid block type") - } - if debugDecoder { - println("blockDec: Finished block") - } - } -} - -// decodeAsync will prepare decoding the block when it receives the history. -// If history is provided, it will not fetch it from the channel. -func (b *blockDec) decodeBuf(hist *history) error { - switch b.Type { - case blockTypeRLE: - if cap(b.dst) < int(b.RLESize) { - if b.lowMem { - b.dst = make([]byte, b.RLESize) - } else { - b.dst = make([]byte, maxBlockSize) - } - } - b.dst = b.dst[:b.RLESize] - v := b.data[0] - for i := range b.dst { - b.dst[i] = v - } - hist.appendKeep(b.dst) - return nil - case blockTypeRaw: - hist.appendKeep(b.data) - return nil - case blockTypeCompressed: - saved := b.dst - b.dst = hist.b - hist.b = nil - err := b.decodeCompressed(hist) - if debugDecoder { - println("Decompressed to total", len(b.dst), "bytes, hash:", xxhash.Sum64(b.dst), "error:", err) - } - hist.b = b.dst - b.dst = saved - return err - case blockTypeReserved: - // Used for returning errors. - return b.err - default: - panic("Invalid block type") - } -} - -// decodeCompressed will start decompressing a block. -// If no history is supplied the decoder will decodeAsync as much as possible -// before fetching from blockDec.history -func (b *blockDec) decodeCompressed(hist *history) error { - in := b.data - delayedHistory := hist == nil - - if delayedHistory { - // We must always grab history. - defer func() { - if hist == nil { - <-b.history - } - }() - } - // There must be at least one byte for Literals_Block_Type and one for Sequences_Section_Header - if len(in) < 2 { - return ErrBlockTooSmall - } - litType := literalsBlockType(in[0] & 3) - var litRegenSize int - var litCompSize int - sizeFormat := (in[0] >> 2) & 3 - var fourStreams bool - switch litType { - case literalsBlockRaw, literalsBlockRLE: - switch sizeFormat { - case 0, 2: - // Regenerated_Size uses 5 bits (0-31). Literals_Section_Header uses 1 byte. - litRegenSize = int(in[0] >> 3) - in = in[1:] - case 1: - // Regenerated_Size uses 12 bits (0-4095). Literals_Section_Header uses 2 bytes. - litRegenSize = int(in[0]>>4) + (int(in[1]) << 4) - in = in[2:] - case 3: - // Regenerated_Size uses 20 bits (0-1048575). Literals_Section_Header uses 3 bytes. - if len(in) < 3 { - println("too small: litType:", litType, " sizeFormat", sizeFormat, len(in)) - return ErrBlockTooSmall - } - litRegenSize = int(in[0]>>4) + (int(in[1]) << 4) + (int(in[2]) << 12) - in = in[3:] - } - case literalsBlockCompressed, literalsBlockTreeless: - switch sizeFormat { - case 0, 1: - // Both Regenerated_Size and Compressed_Size use 10 bits (0-1023). - if len(in) < 3 { - println("too small: litType:", litType, " sizeFormat", sizeFormat, len(in)) - return ErrBlockTooSmall - } - n := uint64(in[0]>>4) + (uint64(in[1]) << 4) + (uint64(in[2]) << 12) - litRegenSize = int(n & 1023) - litCompSize = int(n >> 10) - fourStreams = sizeFormat == 1 - in = in[3:] - case 2: - fourStreams = true - if len(in) < 4 { - println("too small: litType:", litType, " sizeFormat", sizeFormat, len(in)) - return ErrBlockTooSmall - } - n := uint64(in[0]>>4) + (uint64(in[1]) << 4) + (uint64(in[2]) << 12) + (uint64(in[3]) << 20) - litRegenSize = int(n & 16383) - litCompSize = int(n >> 14) - in = in[4:] - case 3: - fourStreams = true - if len(in) < 5 { - println("too small: litType:", litType, " sizeFormat", sizeFormat, len(in)) - return ErrBlockTooSmall - } - n := uint64(in[0]>>4) + (uint64(in[1]) << 4) + (uint64(in[2]) << 12) + (uint64(in[3]) << 20) + (uint64(in[4]) << 28) - litRegenSize = int(n & 262143) - litCompSize = int(n >> 18) - in = in[5:] - } - } - if debugDecoder { - println("literals type:", litType, "litRegenSize:", litRegenSize, "litCompSize:", litCompSize, "sizeFormat:", sizeFormat, "4X:", fourStreams) - } - var literals []byte - var huff *huff0.Scratch - switch litType { - case literalsBlockRaw: - if len(in) < litRegenSize { - println("too small: litType:", litType, " sizeFormat", sizeFormat, "remain:", len(in), "want:", litRegenSize) - return ErrBlockTooSmall - } - literals = in[:litRegenSize] - in = in[litRegenSize:] - //printf("Found %d uncompressed literals\n", litRegenSize) - case literalsBlockRLE: - if len(in) < 1 { - println("too small: litType:", litType, " sizeFormat", sizeFormat, "remain:", len(in), "want:", 1) - return ErrBlockTooSmall - } - if cap(b.literalBuf) < litRegenSize { - if b.lowMem { - b.literalBuf = make([]byte, litRegenSize) - } else { - if litRegenSize > maxCompressedLiteralSize { - // Exceptional - b.literalBuf = make([]byte, litRegenSize) - } else { - b.literalBuf = make([]byte, litRegenSize, maxCompressedLiteralSize) - - } - } - } - literals = b.literalBuf[:litRegenSize] - v := in[0] - for i := range literals { - literals[i] = v - } - in = in[1:] - if debugDecoder { - printf("Found %d RLE compressed literals\n", litRegenSize) - } - case literalsBlockTreeless: - if len(in) < litCompSize { - println("too small: litType:", litType, " sizeFormat", sizeFormat, "remain:", len(in), "want:", litCompSize) - return ErrBlockTooSmall - } - // Store compressed literals, so we defer decoding until we get history. - literals = in[:litCompSize] - in = in[litCompSize:] - if debugDecoder { - printf("Found %d compressed literals\n", litCompSize) - } - case literalsBlockCompressed: - if len(in) < litCompSize { - println("too small: litType:", litType, " sizeFormat", sizeFormat, "remain:", len(in), "want:", litCompSize) - return ErrBlockTooSmall - } - literals = in[:litCompSize] - in = in[litCompSize:] - huff = huffDecoderPool.Get().(*huff0.Scratch) - var err error - // Ensure we have space to store it. - if cap(b.literalBuf) < litRegenSize { - if b.lowMem { - b.literalBuf = make([]byte, 0, litRegenSize) - } else { - b.literalBuf = make([]byte, 0, maxCompressedLiteralSize) - } - } - if huff == nil { - huff = &huff0.Scratch{} - } - huff, literals, err = huff0.ReadTable(literals, huff) - if err != nil { - println("reading huffman table:", err) - return err - } - // Use our out buffer. - if fourStreams { - literals, err = huff.Decoder().Decompress4X(b.literalBuf[:0:litRegenSize], literals) - } else { - literals, err = huff.Decoder().Decompress1X(b.literalBuf[:0:litRegenSize], literals) - } - if err != nil { - println("decoding compressed literals:", err) - return err - } - // Make sure we don't leak our literals buffer - if len(literals) != litRegenSize { - return fmt.Errorf("literal output size mismatch want %d, got %d", litRegenSize, len(literals)) - } - if debugDecoder { - printf("Decompressed %d literals into %d bytes\n", litCompSize, litRegenSize) - } - } - - // Decode Sequences - // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#sequences-section - if len(in) < 1 { - return ErrBlockTooSmall - } - seqHeader := in[0] - nSeqs := 0 - switch { - case seqHeader == 0: - in = in[1:] - case seqHeader < 128: - nSeqs = int(seqHeader) - in = in[1:] - case seqHeader < 255: - if len(in) < 2 { - return ErrBlockTooSmall - } - nSeqs = int(seqHeader-128)<<8 | int(in[1]) - in = in[2:] - case seqHeader == 255: - if len(in) < 3 { - return ErrBlockTooSmall - } - nSeqs = 0x7f00 + int(in[1]) + (int(in[2]) << 8) - in = in[3:] - } - // Allocate sequences - if cap(b.sequenceBuf) < nSeqs { - if b.lowMem { - b.sequenceBuf = make([]seq, nSeqs) - } else { - // Allocate max - b.sequenceBuf = make([]seq, nSeqs, maxSequences) - } - } else { - // Reuse buffer - b.sequenceBuf = b.sequenceBuf[:nSeqs] - } - var seqs = &sequenceDecs{} - if nSeqs > 0 { - if len(in) < 1 { - return ErrBlockTooSmall - } - br := byteReader{b: in, off: 0} - compMode := br.Uint8() - br.advance(1) - if debugDecoder { - printf("Compression modes: 0b%b", compMode) - } - for i := uint(0); i < 3; i++ { - mode := seqCompMode((compMode >> (6 - i*2)) & 3) - if debugDecoder { - println("Table", tableIndex(i), "is", mode) - } - var seq *sequenceDec - switch tableIndex(i) { - case tableLiteralLengths: - seq = &seqs.litLengths - case tableOffsets: - seq = &seqs.offsets - case tableMatchLengths: - seq = &seqs.matchLengths - default: - panic("unknown table") - } - switch mode { - case compModePredefined: - seq.fse = &fsePredef[i] - case compModeRLE: - if br.remain() < 1 { - return ErrBlockTooSmall - } - v := br.Uint8() - br.advance(1) - dec := fseDecoderPool.Get().(*fseDecoder) - symb, err := decSymbolValue(v, symbolTableX[i]) - if err != nil { - printf("RLE Transform table (%v) error: %v", tableIndex(i), err) - return err - } - dec.setRLE(symb) - seq.fse = dec - if debugDecoder { - printf("RLE set to %+v, code: %v", symb, v) - } - case compModeFSE: - println("Reading table for", tableIndex(i)) - dec := fseDecoderPool.Get().(*fseDecoder) - err := dec.readNCount(&br, uint16(maxTableSymbol[i])) - if err != nil { - println("Read table error:", err) - return err - } - err = dec.transform(symbolTableX[i]) - if err != nil { - println("Transform table error:", err) - return err - } - if debugDecoder { - println("Read table ok", "symbolLen:", dec.symbolLen) - } - seq.fse = dec - case compModeRepeat: - seq.repeat = true - } - if br.overread() { - return io.ErrUnexpectedEOF - } - } - in = br.unread() - } - - // Wait for history. - // All time spent after this is critical since it is strictly sequential. - if hist == nil { - hist = <-b.history - if hist.error { - return ErrDecoderClosed - } - } - - // Decode treeless literal block. - if litType == literalsBlockTreeless { - // TODO: We could send the history early WITHOUT the stream history. - // This would allow decoding treeless literals before the byte history is available. - // Silencia stats: Treeless 4393, with: 32775, total: 37168, 11% treeless. - // So not much obvious gain here. - - if hist.huffTree == nil { - return errors.New("literal block was treeless, but no history was defined") - } - // Ensure we have space to store it. - if cap(b.literalBuf) < litRegenSize { - if b.lowMem { - b.literalBuf = make([]byte, 0, litRegenSize) - } else { - b.literalBuf = make([]byte, 0, maxCompressedLiteralSize) - } - } - var err error - // Use our out buffer. - huff = hist.huffTree - if fourStreams { - literals, err = huff.Decoder().Decompress4X(b.literalBuf[:0:litRegenSize], literals) - } else { - literals, err = huff.Decoder().Decompress1X(b.literalBuf[:0:litRegenSize], literals) - } - // Make sure we don't leak our literals buffer - if err != nil { - println("decompressing literals:", err) - return err - } - if len(literals) != litRegenSize { - return fmt.Errorf("literal output size mismatch want %d, got %d", litRegenSize, len(literals)) - } - } else { - if hist.huffTree != nil && huff != nil { - if hist.dict == nil || hist.dict.litEnc != hist.huffTree { - huffDecoderPool.Put(hist.huffTree) - } - hist.huffTree = nil - } - } - if huff != nil { - hist.huffTree = huff - } - if debugDecoder { - println("Final literals:", len(literals), "hash:", xxhash.Sum64(literals), "and", nSeqs, "sequences.") - } - - if nSeqs == 0 { - // Decompressed content is defined entirely as Literals Section content. - b.dst = append(b.dst, literals...) - if delayedHistory { - hist.append(literals) - } - return nil - } - - seqs, err := seqs.mergeHistory(&hist.decoders) - if err != nil { - return err - } - if debugDecoder { - println("History merged ok") - } - br := &bitReader{} - if err := br.init(in); err != nil { - return err - } - - // TODO: Investigate if sending history without decoders are faster. - // This would allow the sequences to be decoded async and only have to construct stream history. - // If only recent offsets were not transferred, this would be an obvious win. - // Also, if first 3 sequences don't reference recent offsets, all sequences can be decoded. - - hbytes := hist.b - if len(hbytes) > hist.windowSize { - hbytes = hbytes[len(hbytes)-hist.windowSize:] - // We do not need history any more. - if hist.dict != nil { - hist.dict.content = nil - } - } - - if err := seqs.initialize(br, hist, literals, b.dst); err != nil { - println("initializing sequences:", err) - return err - } - - err = seqs.decode(nSeqs, br, hbytes) - if err != nil { - return err - } - if !br.finished() { - return fmt.Errorf("%d extra bits on block, should be 0", br.remain()) - } - - err = br.close() - if err != nil { - printf("Closing sequences: %v, %+v\n", err, *br) - } - if len(b.data) > maxCompressedBlockSize { - return fmt.Errorf("compressed block size too large (%d)", len(b.data)) - } - // Set output and release references. - b.dst = seqs.out - seqs.out, seqs.literals, seqs.hist = nil, nil, nil - - if !delayedHistory { - // If we don't have delayed history, no need to update. - hist.recentOffsets = seqs.prevOffset - return nil - } - if b.Last { - // if last block we don't care about history. - println("Last block, no history returned") - hist.b = hist.b[:0] - return nil - } - hist.append(b.dst) - hist.recentOffsets = seqs.prevOffset - if debugDecoder { - println("Finished block with literals:", len(literals), "and", nSeqs, "sequences.") - } - - return nil -} diff --git a/src/vendor/github.com/klauspost/compress/zstd/blockenc.go b/src/vendor/github.com/klauspost/compress/zstd/blockenc.go deleted file mode 100644 index 3df185ee4..000000000 --- a/src/vendor/github.com/klauspost/compress/zstd/blockenc.go +++ /dev/null @@ -1,871 +0,0 @@ -// Copyright 2019+ Klaus Post. All rights reserved. -// License information can be found in the LICENSE file. -// Based on work by Yann Collet, released under BSD License. - -package zstd - -import ( - "errors" - "fmt" - "math" - "math/bits" - - "github.com/klauspost/compress/huff0" -) - -type blockEnc struct { - size int - literals []byte - sequences []seq - coders seqCoders - litEnc *huff0.Scratch - dictLitEnc *huff0.Scratch - wr bitWriter - - extraLits int - output []byte - recentOffsets [3]uint32 - prevRecentOffsets [3]uint32 - - last bool - lowMem bool -} - -// init should be used once the block has been created. -// If called more than once, the effect is the same as calling reset. -func (b *blockEnc) init() { - if b.lowMem { - // 1K literals - if cap(b.literals) < 1<<10 { - b.literals = make([]byte, 0, 1<<10) - } - const defSeqs = 20 - if cap(b.sequences) < defSeqs { - b.sequences = make([]seq, 0, defSeqs) - } - // 1K - if cap(b.output) < 1<<10 { - b.output = make([]byte, 0, 1<<10) - } - } else { - if cap(b.literals) < maxCompressedBlockSize { - b.literals = make([]byte, 0, maxCompressedBlockSize) - } - const defSeqs = 200 - if cap(b.sequences) < defSeqs { - b.sequences = make([]seq, 0, defSeqs) - } - if cap(b.output) < maxCompressedBlockSize { - b.output = make([]byte, 0, maxCompressedBlockSize) - } - } - - if b.coders.mlEnc == nil { - b.coders.mlEnc = &fseEncoder{} - b.coders.mlPrev = &fseEncoder{} - b.coders.ofEnc = &fseEncoder{} - b.coders.ofPrev = &fseEncoder{} - b.coders.llEnc = &fseEncoder{} - b.coders.llPrev = &fseEncoder{} - } - b.litEnc = &huff0.Scratch{WantLogLess: 4} - b.reset(nil) -} - -// initNewEncode can be used to reset offsets and encoders to the initial state. -func (b *blockEnc) initNewEncode() { - b.recentOffsets = [3]uint32{1, 4, 8} - b.litEnc.Reuse = huff0.ReusePolicyNone - b.coders.setPrev(nil, nil, nil) -} - -// reset will reset the block for a new encode, but in the same stream, -// meaning that state will be carried over, but the block content is reset. -// If a previous block is provided, the recent offsets are carried over. -func (b *blockEnc) reset(prev *blockEnc) { - b.extraLits = 0 - b.literals = b.literals[:0] - b.size = 0 - b.sequences = b.sequences[:0] - b.output = b.output[:0] - b.last = false - if prev != nil { - b.recentOffsets = prev.prevRecentOffsets - } - b.dictLitEnc = nil -} - -// reset will reset the block for a new encode, but in the same stream, -// meaning that state will be carried over, but the block content is reset. -// If a previous block is provided, the recent offsets are carried over. -func (b *blockEnc) swapEncoders(prev *blockEnc) { - b.coders.swap(&prev.coders) - b.litEnc, prev.litEnc = prev.litEnc, b.litEnc -} - -// blockHeader contains the information for a block header. -type blockHeader uint32 - -// setLast sets the 'last' indicator on a block. -func (h *blockHeader) setLast(b bool) { - if b { - *h = *h | 1 - } else { - const mask = (1 << 24) - 2 - *h = *h & mask - } -} - -// setSize will store the compressed size of a block. -func (h *blockHeader) setSize(v uint32) { - const mask = 7 - *h = (*h)&mask | blockHeader(v<<3) -} - -// setType sets the block type. -func (h *blockHeader) setType(t blockType) { - const mask = 1 | (((1 << 24) - 1) ^ 7) - *h = (*h & mask) | blockHeader(t<<1) -} - -// appendTo will append the block header to a slice. -func (h blockHeader) appendTo(b []byte) []byte { - return append(b, uint8(h), uint8(h>>8), uint8(h>>16)) -} - -// String returns a string representation of the block. -func (h blockHeader) String() string { - return fmt.Sprintf("Type: %d, Size: %d, Last:%t", (h>>1)&3, h>>3, h&1 == 1) -} - -// literalsHeader contains literals header information. -type literalsHeader uint64 - -// setType can be used to set the type of literal block. -func (h *literalsHeader) setType(t literalsBlockType) { - const mask = math.MaxUint64 - 3 - *h = (*h & mask) | literalsHeader(t) -} - -// setSize can be used to set a single size, for uncompressed and RLE content. -func (h *literalsHeader) setSize(regenLen int) { - inBits := bits.Len32(uint32(regenLen)) - // Only retain 2 bits - const mask = 3 - lh := uint64(*h & mask) - switch { - case inBits < 5: - lh |= (uint64(regenLen) << 3) | (1 << 60) - if debugEncoder { - got := int(lh>>3) & 0xff - if got != regenLen { - panic(fmt.Sprint("litRegenSize = ", regenLen, "(want) != ", got, "(got)")) - } - } - case inBits < 12: - lh |= (1 << 2) | (uint64(regenLen) << 4) | (2 << 60) - case inBits < 20: - lh |= (3 << 2) | (uint64(regenLen) << 4) | (3 << 60) - default: - panic(fmt.Errorf("internal error: block too big (%d)", regenLen)) - } - *h = literalsHeader(lh) -} - -// setSizes will set the size of a compressed literals section and the input length. -func (h *literalsHeader) setSizes(compLen, inLen int, single bool) { - compBits, inBits := bits.Len32(uint32(compLen)), bits.Len32(uint32(inLen)) - // Only retain 2 bits - const mask = 3 - lh := uint64(*h & mask) - switch { - case compBits <= 10 && inBits <= 10: - if !single { - lh |= 1 << 2 - } - lh |= (uint64(inLen) << 4) | (uint64(compLen) << (10 + 4)) | (3 << 60) - if debugEncoder { - const mmask = (1 << 24) - 1 - n := (lh >> 4) & mmask - if int(n&1023) != inLen { - panic(fmt.Sprint("regensize:", int(n&1023), "!=", inLen, inBits)) - } - if int(n>>10) != compLen { - panic(fmt.Sprint("compsize:", int(n>>10), "!=", compLen, compBits)) - } - } - case compBits <= 14 && inBits <= 14: - lh |= (2 << 2) | (uint64(inLen) << 4) | (uint64(compLen) << (14 + 4)) | (4 << 60) - if single { - panic("single stream used with more than 10 bits length.") - } - case compBits <= 18 && inBits <= 18: - lh |= (3 << 2) | (uint64(inLen) << 4) | (uint64(compLen) << (18 + 4)) | (5 << 60) - if single { - panic("single stream used with more than 10 bits length.") - } - default: - panic("internal error: block too big") - } - *h = literalsHeader(lh) -} - -// appendTo will append the literals header to a byte slice. -func (h literalsHeader) appendTo(b []byte) []byte { - size := uint8(h >> 60) - switch size { - case 1: - b = append(b, uint8(h)) - case 2: - b = append(b, uint8(h), uint8(h>>8)) - case 3: - b = append(b, uint8(h), uint8(h>>8), uint8(h>>16)) - case 4: - b = append(b, uint8(h), uint8(h>>8), uint8(h>>16), uint8(h>>24)) - case 5: - b = append(b, uint8(h), uint8(h>>8), uint8(h>>16), uint8(h>>24), uint8(h>>32)) - default: - panic(fmt.Errorf("internal error: literalsHeader has invalid size (%d)", size)) - } - return b -} - -// size returns the output size with currently set values. -func (h literalsHeader) size() int { - return int(h >> 60) -} - -func (h literalsHeader) String() string { - return fmt.Sprintf("Type: %d, SizeFormat: %d, Size: 0x%d, Bytes:%d", literalsBlockType(h&3), (h>>2)&3, h&((1<<60)-1)>>4, h>>60) -} - -// pushOffsets will push the recent offsets to the backup store. -func (b *blockEnc) pushOffsets() { - b.prevRecentOffsets = b.recentOffsets -} - -// pushOffsets will push the recent offsets to the backup store. -func (b *blockEnc) popOffsets() { - b.recentOffsets = b.prevRecentOffsets -} - -// matchOffset will adjust recent offsets and return the adjusted one, -// if it matches a previous offset. -func (b *blockEnc) matchOffset(offset, lits uint32) uint32 { - // Check if offset is one of the recent offsets. - // Adjusts the output offset accordingly. - // Gives a tiny bit of compression, typically around 1%. - if true { - if lits > 0 { - switch offset { - case b.recentOffsets[0]: - offset = 1 - case b.recentOffsets[1]: - b.recentOffsets[1] = b.recentOffsets[0] - b.recentOffsets[0] = offset - offset = 2 - case b.recentOffsets[2]: - b.recentOffsets[2] = b.recentOffsets[1] - b.recentOffsets[1] = b.recentOffsets[0] - b.recentOffsets[0] = offset - offset = 3 - default: - b.recentOffsets[2] = b.recentOffsets[1] - b.recentOffsets[1] = b.recentOffsets[0] - b.recentOffsets[0] = offset - offset += 3 - } - } else { - switch offset { - case b.recentOffsets[1]: - b.recentOffsets[1] = b.recentOffsets[0] - b.recentOffsets[0] = offset - offset = 1 - case b.recentOffsets[2]: - b.recentOffsets[2] = b.recentOffsets[1] - b.recentOffsets[1] = b.recentOffsets[0] - b.recentOffsets[0] = offset - offset = 2 - case b.recentOffsets[0] - 1: - b.recentOffsets[2] = b.recentOffsets[1] - b.recentOffsets[1] = b.recentOffsets[0] - b.recentOffsets[0] = offset - offset = 3 - default: - b.recentOffsets[2] = b.recentOffsets[1] - b.recentOffsets[1] = b.recentOffsets[0] - b.recentOffsets[0] = offset - offset += 3 - } - } - } else { - offset += 3 - } - return offset -} - -// encodeRaw can be used to set the output to a raw representation of supplied bytes. -func (b *blockEnc) encodeRaw(a []byte) { - var bh blockHeader - bh.setLast(b.last) - bh.setSize(uint32(len(a))) - bh.setType(blockTypeRaw) - b.output = bh.appendTo(b.output[:0]) - b.output = append(b.output, a...) - if debugEncoder { - println("Adding RAW block, length", len(a), "last:", b.last) - } -} - -// encodeRaw can be used to set the output to a raw representation of supplied bytes. -func (b *blockEnc) encodeRawTo(dst, src []byte) []byte { - var bh blockHeader - bh.setLast(b.last) - bh.setSize(uint32(len(src))) - bh.setType(blockTypeRaw) - dst = bh.appendTo(dst) - dst = append(dst, src...) - if debugEncoder { - println("Adding RAW block, length", len(src), "last:", b.last) - } - return dst -} - -// encodeLits can be used if the block is only litLen. -func (b *blockEnc) encodeLits(lits []byte, raw bool) error { - var bh blockHeader - bh.setLast(b.last) - bh.setSize(uint32(len(lits))) - - // Don't compress extremely small blocks - if len(lits) < 8 || (len(lits) < 32 && b.dictLitEnc == nil) || raw { - if debugEncoder { - println("Adding RAW block, length", len(lits), "last:", b.last) - } - bh.setType(blockTypeRaw) - b.output = bh.appendTo(b.output) - b.output = append(b.output, lits...) - return nil - } - - var ( - out []byte - reUsed, single bool - err error - ) - if b.dictLitEnc != nil { - b.litEnc.TransferCTable(b.dictLitEnc) - b.litEnc.Reuse = huff0.ReusePolicyAllow - b.dictLitEnc = nil - } - if len(lits) >= 1024 { - // Use 4 Streams. - out, reUsed, err = huff0.Compress4X(lits, b.litEnc) - } else if len(lits) > 32 { - // Use 1 stream - single = true - out, reUsed, err = huff0.Compress1X(lits, b.litEnc) - } else { - err = huff0.ErrIncompressible - } - - switch err { - case huff0.ErrIncompressible: - if debugEncoder { - println("Adding RAW block, length", len(lits), "last:", b.last) - } - bh.setType(blockTypeRaw) - b.output = bh.appendTo(b.output) - b.output = append(b.output, lits...) - return nil - case huff0.ErrUseRLE: - if debugEncoder { - println("Adding RLE block, length", len(lits)) - } - bh.setType(blockTypeRLE) - b.output = bh.appendTo(b.output) - b.output = append(b.output, lits[0]) - return nil - case nil: - default: - return err - } - // Compressed... - // Now, allow reuse - b.litEnc.Reuse = huff0.ReusePolicyAllow - bh.setType(blockTypeCompressed) - var lh literalsHeader - if reUsed { - if debugEncoder { - println("Reused tree, compressed to", len(out)) - } - lh.setType(literalsBlockTreeless) - } else { - if debugEncoder { - println("New tree, compressed to", len(out), "tree size:", len(b.litEnc.OutTable)) - } - lh.setType(literalsBlockCompressed) - } - // Set sizes - lh.setSizes(len(out), len(lits), single) - bh.setSize(uint32(len(out) + lh.size() + 1)) - - // Write block headers. - b.output = bh.appendTo(b.output) - b.output = lh.appendTo(b.output) - // Add compressed data. - b.output = append(b.output, out...) - // No sequences. - b.output = append(b.output, 0) - return nil -} - -// fuzzFseEncoder can be used to fuzz the FSE encoder. -func fuzzFseEncoder(data []byte) int { - if len(data) > maxSequences || len(data) < 2 { - return 0 - } - enc := fseEncoder{} - hist := enc.Histogram()[:256] - maxSym := uint8(0) - for i, v := range data { - v = v & 63 - data[i] = v - hist[v]++ - if v > maxSym { - maxSym = v - } - } - if maxSym == 0 { - // All 0 - return 0 - } - maxCount := func(a []uint32) int { - var max uint32 - for _, v := range a { - if v > max { - max = v - } - } - return int(max) - } - cnt := maxCount(hist[:maxSym]) - if cnt == len(data) { - // RLE - return 0 - } - enc.HistogramFinished(maxSym, cnt) - err := enc.normalizeCount(len(data)) - if err != nil { - return 0 - } - _, err = enc.writeCount(nil) - if err != nil { - panic(err) - } - return 1 -} - -// encode will encode the block and append the output in b.output. -// Previous offset codes must be pushed if more blocks are expected. -func (b *blockEnc) encode(org []byte, raw, rawAllLits bool) error { - if len(b.sequences) == 0 { - return b.encodeLits(b.literals, rawAllLits) - } - // We want some difference to at least account for the headers. - saved := b.size - len(b.literals) - (b.size >> 5) - if saved < 16 { - if org == nil { - return errIncompressible - } - b.popOffsets() - return b.encodeLits(org, rawAllLits) - } - - var bh blockHeader - var lh literalsHeader - bh.setLast(b.last) - bh.setType(blockTypeCompressed) - // Store offset of the block header. Needed when we know the size. - bhOffset := len(b.output) - b.output = bh.appendTo(b.output) - - var ( - out []byte - reUsed, single bool - err error - ) - if b.dictLitEnc != nil { - b.litEnc.TransferCTable(b.dictLitEnc) - b.litEnc.Reuse = huff0.ReusePolicyAllow - b.dictLitEnc = nil - } - if len(b.literals) >= 1024 && !raw { - // Use 4 Streams. - out, reUsed, err = huff0.Compress4X(b.literals, b.litEnc) - } else if len(b.literals) > 32 && !raw { - // Use 1 stream - single = true - out, reUsed, err = huff0.Compress1X(b.literals, b.litEnc) - } else { - err = huff0.ErrIncompressible - } - - switch err { - case huff0.ErrIncompressible: - lh.setType(literalsBlockRaw) - lh.setSize(len(b.literals)) - b.output = lh.appendTo(b.output) - b.output = append(b.output, b.literals...) - if debugEncoder { - println("Adding literals RAW, length", len(b.literals)) - } - case huff0.ErrUseRLE: - lh.setType(literalsBlockRLE) - lh.setSize(len(b.literals)) - b.output = lh.appendTo(b.output) - b.output = append(b.output, b.literals[0]) - if debugEncoder { - println("Adding literals RLE") - } - case nil: - // Compressed litLen... - if reUsed { - if debugEncoder { - println("reused tree") - } - lh.setType(literalsBlockTreeless) - } else { - if debugEncoder { - println("new tree, size:", len(b.litEnc.OutTable)) - } - lh.setType(literalsBlockCompressed) - if debugEncoder { - _, _, err := huff0.ReadTable(out, nil) - if err != nil { - panic(err) - } - } - } - lh.setSizes(len(out), len(b.literals), single) - if debugEncoder { - printf("Compressed %d literals to %d bytes", len(b.literals), len(out)) - println("Adding literal header:", lh) - } - b.output = lh.appendTo(b.output) - b.output = append(b.output, out...) - b.litEnc.Reuse = huff0.ReusePolicyAllow - if debugEncoder { - println("Adding literals compressed") - } - default: - if debugEncoder { - println("Adding literals ERROR:", err) - } - return err - } - // Sequence compression - - // Write the number of sequences - switch { - case len(b.sequences) < 128: - b.output = append(b.output, uint8(len(b.sequences))) - case len(b.sequences) < 0x7f00: // TODO: this could be wrong - n := len(b.sequences) - b.output = append(b.output, 128+uint8(n>>8), uint8(n)) - default: - n := len(b.sequences) - 0x7f00 - b.output = append(b.output, 255, uint8(n), uint8(n>>8)) - } - if debugEncoder { - println("Encoding", len(b.sequences), "sequences") - } - b.genCodes() - llEnc := b.coders.llEnc - ofEnc := b.coders.ofEnc - mlEnc := b.coders.mlEnc - err = llEnc.normalizeCount(len(b.sequences)) - if err != nil { - return err - } - err = ofEnc.normalizeCount(len(b.sequences)) - if err != nil { - return err - } - err = mlEnc.normalizeCount(len(b.sequences)) - if err != nil { - return err - } - - // Choose the best compression mode for each type. - // Will evaluate the new vs predefined and previous. - chooseComp := func(cur, prev, preDef *fseEncoder) (*fseEncoder, seqCompMode) { - // See if predefined/previous is better - hist := cur.count[:cur.symbolLen] - nSize := cur.approxSize(hist) + cur.maxHeaderSize() - predefSize := preDef.approxSize(hist) - prevSize := prev.approxSize(hist) - - // Add a small penalty for new encoders. - // Don't bother with extremely small (<2 byte gains). - nSize = nSize + (nSize+2*8*16)>>4 - switch { - case predefSize <= prevSize && predefSize <= nSize || forcePreDef: - if debugEncoder { - println("Using predefined", predefSize>>3, "<=", nSize>>3) - } - return preDef, compModePredefined - case prevSize <= nSize: - if debugEncoder { - println("Using previous", prevSize>>3, "<=", nSize>>3) - } - return prev, compModeRepeat - default: - if debugEncoder { - println("Using new, predef", predefSize>>3, ". previous:", prevSize>>3, ">", nSize>>3, "header max:", cur.maxHeaderSize()>>3, "bytes") - println("tl:", cur.actualTableLog, "symbolLen:", cur.symbolLen, "norm:", cur.norm[:cur.symbolLen], "hist", cur.count[:cur.symbolLen]) - } - return cur, compModeFSE - } - } - - // Write compression mode - var mode uint8 - if llEnc.useRLE { - mode |= uint8(compModeRLE) << 6 - llEnc.setRLE(b.sequences[0].llCode) - if debugEncoder { - println("llEnc.useRLE") - } - } else { - var m seqCompMode - llEnc, m = chooseComp(llEnc, b.coders.llPrev, &fsePredefEnc[tableLiteralLengths]) - mode |= uint8(m) << 6 - } - if ofEnc.useRLE { - mode |= uint8(compModeRLE) << 4 - ofEnc.setRLE(b.sequences[0].ofCode) - if debugEncoder { - println("ofEnc.useRLE") - } - } else { - var m seqCompMode - ofEnc, m = chooseComp(ofEnc, b.coders.ofPrev, &fsePredefEnc[tableOffsets]) - mode |= uint8(m) << 4 - } - - if mlEnc.useRLE { - mode |= uint8(compModeRLE) << 2 - mlEnc.setRLE(b.sequences[0].mlCode) - if debugEncoder { - println("mlEnc.useRLE, code: ", b.sequences[0].mlCode, "value", b.sequences[0].matchLen) - } - } else { - var m seqCompMode - mlEnc, m = chooseComp(mlEnc, b.coders.mlPrev, &fsePredefEnc[tableMatchLengths]) - mode |= uint8(m) << 2 - } - b.output = append(b.output, mode) - if debugEncoder { - printf("Compression modes: 0b%b", mode) - } - b.output, err = llEnc.writeCount(b.output) - if err != nil { - return err - } - start := len(b.output) - b.output, err = ofEnc.writeCount(b.output) - if err != nil { - return err - } - if false { - println("block:", b.output[start:], "tablelog", ofEnc.actualTableLog, "maxcount:", ofEnc.maxCount) - fmt.Printf("selected TableLog: %d, Symbol length: %d\n", ofEnc.actualTableLog, ofEnc.symbolLen) - for i, v := range ofEnc.norm[:ofEnc.symbolLen] { - fmt.Printf("%3d: %5d -> %4d \n", i, ofEnc.count[i], v) - } - } - b.output, err = mlEnc.writeCount(b.output) - if err != nil { - return err - } - - // Maybe in block? - wr := &b.wr - wr.reset(b.output) - - var ll, of, ml cState - - // Current sequence - seq := len(b.sequences) - 1 - s := b.sequences[seq] - llEnc.setBits(llBitsTable[:]) - mlEnc.setBits(mlBitsTable[:]) - ofEnc.setBits(nil) - - llTT, ofTT, mlTT := llEnc.ct.symbolTT[:256], ofEnc.ct.symbolTT[:256], mlEnc.ct.symbolTT[:256] - - // We have 3 bounds checks here (and in the loop). - // Since we are iterating backwards it is kinda hard to avoid. - llB, ofB, mlB := llTT[s.llCode], ofTT[s.ofCode], mlTT[s.mlCode] - ll.init(wr, &llEnc.ct, llB) - of.init(wr, &ofEnc.ct, ofB) - wr.flush32() - ml.init(wr, &mlEnc.ct, mlB) - - // Each of these lookups also generates a bounds check. - wr.addBits32NC(s.litLen, llB.outBits) - wr.addBits32NC(s.matchLen, mlB.outBits) - wr.flush32() - wr.addBits32NC(s.offset, ofB.outBits) - if debugSequences { - println("Encoded seq", seq, s, "codes:", s.llCode, s.mlCode, s.ofCode, "states:", ll.state, ml.state, of.state, "bits:", llB, mlB, ofB) - } - seq-- - if llEnc.maxBits+mlEnc.maxBits+ofEnc.maxBits <= 32 { - // No need to flush (common) - for seq >= 0 { - s = b.sequences[seq] - wr.flush32() - llB, ofB, mlB := llTT[s.llCode], ofTT[s.ofCode], mlTT[s.mlCode] - // tabelog max is 8 for all. - of.encode(ofB) - ml.encode(mlB) - ll.encode(llB) - wr.flush32() - - // We checked that all can stay within 32 bits - wr.addBits32NC(s.litLen, llB.outBits) - wr.addBits32NC(s.matchLen, mlB.outBits) - wr.addBits32NC(s.offset, ofB.outBits) - - if debugSequences { - println("Encoded seq", seq, s) - } - - seq-- - } - } else { - for seq >= 0 { - s = b.sequences[seq] - wr.flush32() - llB, ofB, mlB := llTT[s.llCode], ofTT[s.ofCode], mlTT[s.mlCode] - // tabelog max is below 8 for each. - of.encode(ofB) - ml.encode(mlB) - ll.encode(llB) - wr.flush32() - - // ml+ll = max 32 bits total - wr.addBits32NC(s.litLen, llB.outBits) - wr.addBits32NC(s.matchLen, mlB.outBits) - wr.flush32() - wr.addBits32NC(s.offset, ofB.outBits) - - if debugSequences { - println("Encoded seq", seq, s) - } - - seq-- - } - } - ml.flush(mlEnc.actualTableLog) - of.flush(ofEnc.actualTableLog) - ll.flush(llEnc.actualTableLog) - err = wr.close() - if err != nil { - return err - } - b.output = wr.out - - if len(b.output)-3-bhOffset >= b.size { - // Maybe even add a bigger margin. - b.litEnc.Reuse = huff0.ReusePolicyNone - return errIncompressible - } - - // Size is output minus block header. - bh.setSize(uint32(len(b.output)-bhOffset) - 3) - if debugEncoder { - println("Rewriting block header", bh) - } - _ = bh.appendTo(b.output[bhOffset:bhOffset]) - b.coders.setPrev(llEnc, mlEnc, ofEnc) - return nil -} - -var errIncompressible = errors.New("incompressible") - -func (b *blockEnc) genCodes() { - if len(b.sequences) == 0 { - // nothing to do - return - } - - if len(b.sequences) > math.MaxUint16 { - panic("can only encode up to 64K sequences") - } - // No bounds checks after here: - llH := b.coders.llEnc.Histogram()[:256] - ofH := b.coders.ofEnc.Histogram()[:256] - mlH := b.coders.mlEnc.Histogram()[:256] - for i := range llH { - llH[i] = 0 - } - for i := range ofH { - ofH[i] = 0 - } - for i := range mlH { - mlH[i] = 0 - } - - var llMax, ofMax, mlMax uint8 - for i, seq := range b.sequences { - v := llCode(seq.litLen) - seq.llCode = v - llH[v]++ - if v > llMax { - llMax = v - } - - v = ofCode(seq.offset) - seq.ofCode = v - ofH[v]++ - if v > ofMax { - ofMax = v - } - - v = mlCode(seq.matchLen) - seq.mlCode = v - mlH[v]++ - if v > mlMax { - mlMax = v - if debugAsserts && mlMax > maxMatchLengthSymbol { - panic(fmt.Errorf("mlMax > maxMatchLengthSymbol (%d), matchlen: %d", mlMax, seq.matchLen)) - } - } - b.sequences[i] = seq - } - maxCount := func(a []uint32) int { - var max uint32 - for _, v := range a { - if v > max { - max = v - } - } - return int(max) - } - if debugAsserts && mlMax > maxMatchLengthSymbol { - panic(fmt.Errorf("mlMax > maxMatchLengthSymbol (%d)", mlMax)) - } - if debugAsserts && ofMax > maxOffsetBits { - panic(fmt.Errorf("ofMax > maxOffsetBits (%d)", ofMax)) - } - if debugAsserts && llMax > maxLiteralLengthSymbol { - panic(fmt.Errorf("llMax > maxLiteralLengthSymbol (%d)", llMax)) - } - - b.coders.mlEnc.HistogramFinished(mlMax, maxCount(mlH[:mlMax+1])) - b.coders.ofEnc.HistogramFinished(ofMax, maxCount(ofH[:ofMax+1])) - b.coders.llEnc.HistogramFinished(llMax, maxCount(llH[:llMax+1])) -} diff --git a/src/vendor/github.com/klauspost/compress/zstd/blocktype_string.go b/src/vendor/github.com/klauspost/compress/zstd/blocktype_string.go deleted file mode 100644 index 01a01e486..000000000 --- a/src/vendor/github.com/klauspost/compress/zstd/blocktype_string.go +++ /dev/null @@ -1,85 +0,0 @@ -// Code generated by "stringer -type=blockType,literalsBlockType,seqCompMode,tableIndex"; DO NOT EDIT. - -package zstd - -import "strconv" - -func _() { - // An "invalid array index" compiler error signifies that the constant values have changed. - // Re-run the stringer command to generate them again. - var x [1]struct{} - _ = x[blockTypeRaw-0] - _ = x[blockTypeRLE-1] - _ = x[blockTypeCompressed-2] - _ = x[blockTypeReserved-3] -} - -const _blockType_name = "blockTypeRawblockTypeRLEblockTypeCompressedblockTypeReserved" - -var _blockType_index = [...]uint8{0, 12, 24, 43, 60} - -func (i blockType) String() string { - if i >= blockType(len(_blockType_index)-1) { - return "blockType(" + strconv.FormatInt(int64(i), 10) + ")" - } - return _blockType_name[_blockType_index[i]:_blockType_index[i+1]] -} -func _() { - // An "invalid array index" compiler error signifies that the constant values have changed. - // Re-run the stringer command to generate them again. - var x [1]struct{} - _ = x[literalsBlockRaw-0] - _ = x[literalsBlockRLE-1] - _ = x[literalsBlockCompressed-2] - _ = x[literalsBlockTreeless-3] -} - -const _literalsBlockType_name = "literalsBlockRawliteralsBlockRLEliteralsBlockCompressedliteralsBlockTreeless" - -var _literalsBlockType_index = [...]uint8{0, 16, 32, 55, 76} - -func (i literalsBlockType) String() string { - if i >= literalsBlockType(len(_literalsBlockType_index)-1) { - return "literalsBlockType(" + strconv.FormatInt(int64(i), 10) + ")" - } - return _literalsBlockType_name[_literalsBlockType_index[i]:_literalsBlockType_index[i+1]] -} -func _() { - // An "invalid array index" compiler error signifies that the constant values have changed. - // Re-run the stringer command to generate them again. - var x [1]struct{} - _ = x[compModePredefined-0] - _ = x[compModeRLE-1] - _ = x[compModeFSE-2] - _ = x[compModeRepeat-3] -} - -const _seqCompMode_name = "compModePredefinedcompModeRLEcompModeFSEcompModeRepeat" - -var _seqCompMode_index = [...]uint8{0, 18, 29, 40, 54} - -func (i seqCompMode) String() string { - if i >= seqCompMode(len(_seqCompMode_index)-1) { - return "seqCompMode(" + strconv.FormatInt(int64(i), 10) + ")" - } - return _seqCompMode_name[_seqCompMode_index[i]:_seqCompMode_index[i+1]] -} -func _() { - // An "invalid array index" compiler error signifies that the constant values have changed. - // Re-run the stringer command to generate them again. - var x [1]struct{} - _ = x[tableLiteralLengths-0] - _ = x[tableOffsets-1] - _ = x[tableMatchLengths-2] -} - -const _tableIndex_name = "tableLiteralLengthstableOffsetstableMatchLengths" - -var _tableIndex_index = [...]uint8{0, 19, 31, 48} - -func (i tableIndex) String() string { - if i >= tableIndex(len(_tableIndex_index)-1) { - return "tableIndex(" + strconv.FormatInt(int64(i), 10) + ")" - } - return _tableIndex_name[_tableIndex_index[i]:_tableIndex_index[i+1]] -} diff --git a/src/vendor/github.com/klauspost/compress/zstd/bytebuf.go b/src/vendor/github.com/klauspost/compress/zstd/bytebuf.go deleted file mode 100644 index aab71c6cf..000000000 --- a/src/vendor/github.com/klauspost/compress/zstd/bytebuf.go +++ /dev/null @@ -1,130 +0,0 @@ -// Copyright 2019+ Klaus Post. All rights reserved. -// License information can be found in the LICENSE file. -// Based on work by Yann Collet, released under BSD License. - -package zstd - -import ( - "fmt" - "io" - "io/ioutil" -) - -type byteBuffer interface { - // Read up to 8 bytes. - // Returns io.ErrUnexpectedEOF if this cannot be satisfied. - readSmall(n int) ([]byte, error) - - // Read >8 bytes. - // MAY use the destination slice. - readBig(n int, dst []byte) ([]byte, error) - - // Read a single byte. - readByte() (byte, error) - - // Skip n bytes. - skipN(n int) error -} - -// in-memory buffer -type byteBuf []byte - -func (b *byteBuf) readSmall(n int) ([]byte, error) { - if debugAsserts && n > 8 { - panic(fmt.Errorf("small read > 8 (%d). use readBig", n)) - } - bb := *b - if len(bb) < n { - return nil, io.ErrUnexpectedEOF - } - r := bb[:n] - *b = bb[n:] - return r, nil -} - -func (b *byteBuf) readBig(n int, dst []byte) ([]byte, error) { - bb := *b - if len(bb) < n { - return nil, io.ErrUnexpectedEOF - } - r := bb[:n] - *b = bb[n:] - return r, nil -} - -func (b *byteBuf) remain() []byte { - return *b -} - -func (b *byteBuf) readByte() (byte, error) { - bb := *b - if len(bb) < 1 { - return 0, nil - } - r := bb[0] - *b = bb[1:] - return r, nil -} - -func (b *byteBuf) skipN(n int) error { - bb := *b - if len(bb) < n { - return io.ErrUnexpectedEOF - } - *b = bb[n:] - return nil -} - -// wrapper around a reader. -type readerWrapper struct { - r io.Reader - tmp [8]byte -} - -func (r *readerWrapper) readSmall(n int) ([]byte, error) { - if debugAsserts && n > 8 { - panic(fmt.Errorf("small read > 8 (%d). use readBig", n)) - } - n2, err := io.ReadFull(r.r, r.tmp[:n]) - // We only really care about the actual bytes read. - if err != nil { - if err == io.EOF { - return nil, io.ErrUnexpectedEOF - } - if debugDecoder { - println("readSmall: got", n2, "want", n, "err", err) - } - return nil, err - } - return r.tmp[:n], nil -} - -func (r *readerWrapper) readBig(n int, dst []byte) ([]byte, error) { - if cap(dst) < n { - dst = make([]byte, n) - } - n2, err := io.ReadFull(r.r, dst[:n]) - if err == io.EOF && n > 0 { - err = io.ErrUnexpectedEOF - } - return dst[:n2], err -} - -func (r *readerWrapper) readByte() (byte, error) { - n2, err := r.r.Read(r.tmp[:1]) - if err != nil { - return 0, err - } - if n2 != 1 { - return 0, io.ErrUnexpectedEOF - } - return r.tmp[0], nil -} - -func (r *readerWrapper) skipN(n int) error { - n2, err := io.CopyN(ioutil.Discard, r.r, int64(n)) - if n2 != int64(n) { - err = io.ErrUnexpectedEOF - } - return err -} diff --git a/src/vendor/github.com/klauspost/compress/zstd/bytereader.go b/src/vendor/github.com/klauspost/compress/zstd/bytereader.go deleted file mode 100644 index 2c4fca17f..000000000 --- a/src/vendor/github.com/klauspost/compress/zstd/bytereader.go +++ /dev/null @@ -1,88 +0,0 @@ -// Copyright 2019+ Klaus Post. All rights reserved. -// License information can be found in the LICENSE file. -// Based on work by Yann Collet, released under BSD License. - -package zstd - -// byteReader provides a byte reader that reads -// little endian values from a byte stream. -// The input stream is manually advanced. -// The reader performs no bounds checks. -type byteReader struct { - b []byte - off int -} - -// init will initialize the reader and set the input. -func (b *byteReader) init(in []byte) { - b.b = in - b.off = 0 -} - -// advance the stream b n bytes. -func (b *byteReader) advance(n uint) { - b.off += int(n) -} - -// overread returns whether we have advanced too far. -func (b *byteReader) overread() bool { - return b.off > len(b.b) -} - -// Int32 returns a little endian int32 starting at current offset. -func (b byteReader) Int32() int32 { - b2 := b.b[b.off:] - b2 = b2[:4] - v3 := int32(b2[3]) - v2 := int32(b2[2]) - v1 := int32(b2[1]) - v0 := int32(b2[0]) - return v0 | (v1 << 8) | (v2 << 16) | (v3 << 24) -} - -// Uint8 returns the next byte -func (b *byteReader) Uint8() uint8 { - v := b.b[b.off] - return v -} - -// Uint32 returns a little endian uint32 starting at current offset. -func (b byteReader) Uint32() uint32 { - if r := b.remain(); r < 4 { - // Very rare - v := uint32(0) - for i := 1; i <= r; i++ { - v = (v << 8) | uint32(b.b[len(b.b)-i]) - } - return v - } - b2 := b.b[b.off:] - b2 = b2[:4] - v3 := uint32(b2[3]) - v2 := uint32(b2[2]) - v1 := uint32(b2[1]) - v0 := uint32(b2[0]) - return v0 | (v1 << 8) | (v2 << 16) | (v3 << 24) -} - -// Uint32NC returns a little endian uint32 starting at current offset. -// The caller must be sure if there are at least 4 bytes left. -func (b byteReader) Uint32NC() uint32 { - b2 := b.b[b.off:] - b2 = b2[:4] - v3 := uint32(b2[3]) - v2 := uint32(b2[2]) - v1 := uint32(b2[1]) - v0 := uint32(b2[0]) - return v0 | (v1 << 8) | (v2 << 16) | (v3 << 24) -} - -// unread returns the unread portion of the input. -func (b byteReader) unread() []byte { - return b.b[b.off:] -} - -// remain will return the number of bytes remaining. -func (b byteReader) remain() int { - return len(b.b) - b.off -} diff --git a/src/vendor/github.com/klauspost/compress/zstd/decodeheader.go b/src/vendor/github.com/klauspost/compress/zstd/decodeheader.go deleted file mode 100644 index 69736e8d4..000000000 --- a/src/vendor/github.com/klauspost/compress/zstd/decodeheader.go +++ /dev/null @@ -1,202 +0,0 @@ -// Copyright 2020+ Klaus Post. All rights reserved. -// License information can be found in the LICENSE file. - -package zstd - -import ( - "bytes" - "errors" - "io" -) - -// HeaderMaxSize is the maximum size of a Frame and Block Header. -// If less is sent to Header.Decode it *may* still contain enough information. -const HeaderMaxSize = 14 + 3 - -// Header contains information about the first frame and block within that. -type Header struct { - // Window Size the window of data to keep while decoding. - // Will only be set if HasFCS is false. - WindowSize uint64 - - // Frame content size. - // Expected size of the entire frame. - FrameContentSize uint64 - - // Dictionary ID. - // If 0, no dictionary. - DictionaryID uint32 - - // First block information. - FirstBlock struct { - // OK will be set if first block could be decoded. - OK bool - - // Is this the last block of a frame? - Last bool - - // Is the data compressed? - // If true CompressedSize will be populated. - // Unfortunately DecompressedSize cannot be determined - // without decoding the blocks. - Compressed bool - - // DecompressedSize is the expected decompressed size of the block. - // Will be 0 if it cannot be determined. - DecompressedSize int - - // CompressedSize of the data in the block. - // Does not include the block header. - // Will be equal to DecompressedSize if not Compressed. - CompressedSize int - } - - // Skippable will be true if the frame is meant to be skipped. - // No other information will be populated. - Skippable bool - - // If set there is a checksum present for the block content. - HasCheckSum bool - - // If this is true FrameContentSize will have a valid value - HasFCS bool - - SingleSegment bool -} - -// Decode the header from the beginning of the stream. -// This will decode the frame header and the first block header if enough bytes are provided. -// It is recommended to provide at least HeaderMaxSize bytes. -// If the frame header cannot be read an error will be returned. -// If there isn't enough input, io.ErrUnexpectedEOF is returned. -// The FirstBlock.OK will indicate if enough information was available to decode the first block header. -func (h *Header) Decode(in []byte) error { - if len(in) < 4 { - return io.ErrUnexpectedEOF - } - b, in := in[:4], in[4:] - if !bytes.Equal(b, frameMagic) { - if !bytes.Equal(b[1:4], skippableFrameMagic) || b[0]&0xf0 != 0x50 { - return ErrMagicMismatch - } - *h = Header{Skippable: true} - return nil - } - if len(in) < 1 { - return io.ErrUnexpectedEOF - } - - // Clear output - *h = Header{} - fhd, in := in[0], in[1:] - h.SingleSegment = fhd&(1<<5) != 0 - h.HasCheckSum = fhd&(1<<2) != 0 - - if fhd&(1<<3) != 0 { - return errors.New("reserved bit set on frame header") - } - - // Read Window_Descriptor - // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#window_descriptor - if !h.SingleSegment { - if len(in) < 1 { - return io.ErrUnexpectedEOF - } - var wd byte - wd, in = in[0], in[1:] - windowLog := 10 + (wd >> 3) - windowBase := uint64(1) << windowLog - windowAdd := (windowBase / 8) * uint64(wd&0x7) - h.WindowSize = windowBase + windowAdd - } - - // Read Dictionary_ID - // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#dictionary_id - if size := fhd & 3; size != 0 { - if size == 3 { - size = 4 - } - if len(in) < int(size) { - return io.ErrUnexpectedEOF - } - b, in = in[:size], in[size:] - if b == nil { - return io.ErrUnexpectedEOF - } - switch size { - case 1: - h.DictionaryID = uint32(b[0]) - case 2: - h.DictionaryID = uint32(b[0]) | (uint32(b[1]) << 8) - case 4: - h.DictionaryID = uint32(b[0]) | (uint32(b[1]) << 8) | (uint32(b[2]) << 16) | (uint32(b[3]) << 24) - } - } - - // Read Frame_Content_Size - // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#frame_content_size - var fcsSize int - v := fhd >> 6 - switch v { - case 0: - if h.SingleSegment { - fcsSize = 1 - } - default: - fcsSize = 1 << v - } - - if fcsSize > 0 { - h.HasFCS = true - if len(in) < fcsSize { - return io.ErrUnexpectedEOF - } - b, in = in[:fcsSize], in[fcsSize:] - if b == nil { - return io.ErrUnexpectedEOF - } - switch fcsSize { - case 1: - h.FrameContentSize = uint64(b[0]) - case 2: - // When FCS_Field_Size is 2, the offset of 256 is added. - h.FrameContentSize = uint64(b[0]) | (uint64(b[1]) << 8) + 256 - case 4: - h.FrameContentSize = uint64(b[0]) | (uint64(b[1]) << 8) | (uint64(b[2]) << 16) | (uint64(b[3]) << 24) - case 8: - d1 := uint32(b[0]) | (uint32(b[1]) << 8) | (uint32(b[2]) << 16) | (uint32(b[3]) << 24) - d2 := uint32(b[4]) | (uint32(b[5]) << 8) | (uint32(b[6]) << 16) | (uint32(b[7]) << 24) - h.FrameContentSize = uint64(d1) | (uint64(d2) << 32) - } - } - - // Frame Header done, we will not fail from now on. - if len(in) < 3 { - return nil - } - tmp := in[:3] - bh := uint32(tmp[0]) | (uint32(tmp[1]) << 8) | (uint32(tmp[2]) << 16) - h.FirstBlock.Last = bh&1 != 0 - blockType := blockType((bh >> 1) & 3) - // find size. - cSize := int(bh >> 3) - switch blockType { - case blockTypeReserved: - return nil - case blockTypeRLE: - h.FirstBlock.Compressed = true - h.FirstBlock.DecompressedSize = cSize - h.FirstBlock.CompressedSize = 1 - case blockTypeCompressed: - h.FirstBlock.Compressed = true - h.FirstBlock.CompressedSize = cSize - case blockTypeRaw: - h.FirstBlock.DecompressedSize = cSize - h.FirstBlock.CompressedSize = cSize - default: - panic("Invalid block type") - } - - h.FirstBlock.OK = true - return nil -} diff --git a/src/vendor/github.com/klauspost/compress/zstd/decoder.go b/src/vendor/github.com/klauspost/compress/zstd/decoder.go deleted file mode 100644 index f430f58b5..000000000 --- a/src/vendor/github.com/klauspost/compress/zstd/decoder.go +++ /dev/null @@ -1,555 +0,0 @@ -// Copyright 2019+ Klaus Post. All rights reserved. -// License information can be found in the LICENSE file. -// Based on work by Yann Collet, released under BSD License. - -package zstd - -import ( - "errors" - "io" - "sync" -) - -// Decoder provides decoding of zstandard streams. -// The decoder has been designed to operate without allocations after a warmup. -// This means that you should store the decoder for best performance. -// To re-use a stream decoder, use the Reset(r io.Reader) error to switch to another stream. -// A decoder can safely be re-used even if the previous stream failed. -// To release the resources, you must call the Close() function on a decoder. -type Decoder struct { - o decoderOptions - - // Unreferenced decoders, ready for use. - decoders chan *blockDec - - // Streams ready to be decoded. - stream chan decodeStream - - // Current read position used for Reader functionality. - current decoderState - - // Custom dictionaries. - // Always uses copies. - dicts map[uint32]dict - - // streamWg is the waitgroup for all streams - streamWg sync.WaitGroup -} - -// decoderState is used for maintaining state when the decoder -// is used for streaming. -type decoderState struct { - // current block being written to stream. - decodeOutput - - // output in order to be written to stream. - output chan decodeOutput - - // cancel remaining output. - cancel chan struct{} - - flushed bool -} - -var ( - // Check the interfaces we want to support. - _ = io.WriterTo(&Decoder{}) - _ = io.Reader(&Decoder{}) -) - -// NewReader creates a new decoder. -// A nil Reader can be provided in which case Reset can be used to start a decode. -// -// A Decoder can be used in two modes: -// -// 1) As a stream, or -// 2) For stateless decoding using DecodeAll. -// -// Only a single stream can be decoded concurrently, but the same decoder -// can run multiple concurrent stateless decodes. It is even possible to -// use stateless decodes while a stream is being decoded. -// -// The Reset function can be used to initiate a new stream, which is will considerably -// reduce the allocations normally caused by NewReader. -func NewReader(r io.Reader, opts ...DOption) (*Decoder, error) { - initPredefined() - var d Decoder - d.o.setDefault() - for _, o := range opts { - err := o(&d.o) - if err != nil { - return nil, err - } - } - d.current.output = make(chan decodeOutput, d.o.concurrent) - d.current.flushed = true - - if r == nil { - d.current.err = ErrDecoderNilInput - } - - // Transfer option dicts. - d.dicts = make(map[uint32]dict, len(d.o.dicts)) - for _, dc := range d.o.dicts { - d.dicts[dc.id] = dc - } - d.o.dicts = nil - - // Create decoders - d.decoders = make(chan *blockDec, d.o.concurrent) - for i := 0; i < d.o.concurrent; i++ { - dec := newBlockDec(d.o.lowMem) - dec.localFrame = newFrameDec(d.o) - d.decoders <- dec - } - - if r == nil { - return &d, nil - } - return &d, d.Reset(r) -} - -// Read bytes from the decompressed stream into p. -// Returns the number of bytes written and any error that occurred. -// When the stream is done, io.EOF will be returned. -func (d *Decoder) Read(p []byte) (int, error) { - var n int - for { - if len(d.current.b) > 0 { - filled := copy(p, d.current.b) - p = p[filled:] - d.current.b = d.current.b[filled:] - n += filled - } - if len(p) == 0 { - break - } - if len(d.current.b) == 0 { - // We have an error and no more data - if d.current.err != nil { - break - } - if !d.nextBlock(n == 0) { - return n, nil - } - } - } - if len(d.current.b) > 0 { - if debugDecoder { - println("returning", n, "still bytes left:", len(d.current.b)) - } - // Only return error at end of block - return n, nil - } - if d.current.err != nil { - d.drainOutput() - } - if debugDecoder { - println("returning", n, d.current.err, len(d.decoders)) - } - return n, d.current.err -} - -// Reset will reset the decoder the supplied stream after the current has finished processing. -// Note that this functionality cannot be used after Close has been called. -// Reset can be called with a nil reader to release references to the previous reader. -// After being called with a nil reader, no other operations than Reset or DecodeAll or Close -// should be used. -func (d *Decoder) Reset(r io.Reader) error { - if d.current.err == ErrDecoderClosed { - return d.current.err - } - - d.drainOutput() - - if r == nil { - d.current.err = ErrDecoderNilInput - if len(d.current.b) > 0 { - d.current.b = d.current.b[:0] - } - d.current.flushed = true - return nil - } - - // If bytes buffer and < 5MB, do sync decoding anyway. - if bb, ok := r.(byter); ok && bb.Len() < 5<<20 { - bb2 := bb - if debugDecoder { - println("*bytes.Buffer detected, doing sync decode, len:", bb.Len()) - } - b := bb2.Bytes() - var dst []byte - if cap(d.current.b) > 0 { - dst = d.current.b - } - - dst, err := d.DecodeAll(b, dst[:0]) - if err == nil { - err = io.EOF - } - d.current.b = dst - d.current.err = err - d.current.flushed = true - if debugDecoder { - println("sync decode to", len(dst), "bytes, err:", err) - } - return nil - } - - if d.stream == nil { - d.stream = make(chan decodeStream, 1) - d.streamWg.Add(1) - go d.startStreamDecoder(d.stream) - } - - // Remove current block. - d.current.decodeOutput = decodeOutput{} - d.current.err = nil - d.current.cancel = make(chan struct{}) - d.current.flushed = false - d.current.d = nil - - d.stream <- decodeStream{ - r: r, - output: d.current.output, - cancel: d.current.cancel, - } - return nil -} - -// drainOutput will drain the output until errEndOfStream is sent. -func (d *Decoder) drainOutput() { - if d.current.cancel != nil { - println("cancelling current") - close(d.current.cancel) - d.current.cancel = nil - } - if d.current.d != nil { - if debugDecoder { - printf("re-adding current decoder %p, decoders: %d", d.current.d, len(d.decoders)) - } - d.decoders <- d.current.d - d.current.d = nil - d.current.b = nil - } - if d.current.output == nil || d.current.flushed { - println("current already flushed") - return - } - for v := range d.current.output { - if v.d != nil { - if debugDecoder { - printf("re-adding decoder %p", v.d) - } - d.decoders <- v.d - } - if v.err == errEndOfStream { - println("current flushed") - d.current.flushed = true - return - } - } -} - -// WriteTo writes data to w until there's no more data to write or when an error occurs. -// The return value n is the number of bytes written. -// Any error encountered during the write is also returned. -func (d *Decoder) WriteTo(w io.Writer) (int64, error) { - var n int64 - for { - if len(d.current.b) > 0 { - n2, err2 := w.Write(d.current.b) - n += int64(n2) - if err2 != nil && (d.current.err == nil || d.current.err == io.EOF) { - d.current.err = err2 - } else if n2 != len(d.current.b) { - d.current.err = io.ErrShortWrite - } - } - if d.current.err != nil { - break - } - d.nextBlock(true) - } - err := d.current.err - if err != nil { - d.drainOutput() - } - if err == io.EOF { - err = nil - } - return n, err -} - -// DecodeAll allows stateless decoding of a blob of bytes. -// Output will be appended to dst, so if the destination size is known -// you can pre-allocate the destination slice to avoid allocations. -// DecodeAll can be used concurrently. -// The Decoder concurrency limits will be respected. -func (d *Decoder) DecodeAll(input, dst []byte) ([]byte, error) { - if d.current.err == ErrDecoderClosed { - return dst, ErrDecoderClosed - } - - // Grab a block decoder and frame decoder. - block := <-d.decoders - frame := block.localFrame - defer func() { - if debugDecoder { - printf("re-adding decoder: %p", block) - } - frame.rawInput = nil - frame.bBuf = nil - d.decoders <- block - }() - frame.bBuf = input - - for { - frame.history.reset() - err := frame.reset(&frame.bBuf) - if err == io.EOF { - if debugDecoder { - println("frame reset return EOF") - } - return dst, nil - } - if frame.DictionaryID != nil { - dict, ok := d.dicts[*frame.DictionaryID] - if !ok { - return nil, ErrUnknownDictionary - } - frame.history.setDict(&dict) - } - if err != nil { - return dst, err - } - if frame.FrameContentSize > d.o.maxDecodedSize-uint64(len(dst)) { - return dst, ErrDecoderSizeExceeded - } - if frame.FrameContentSize > 0 && frame.FrameContentSize < 1<<30 { - // Never preallocate moe than 1 GB up front. - if cap(dst)-len(dst) < int(frame.FrameContentSize) { - dst2 := make([]byte, len(dst), len(dst)+int(frame.FrameContentSize)) - copy(dst2, dst) - dst = dst2 - } - } - if cap(dst) == 0 { - // Allocate len(input) * 2 by default if nothing is provided - // and we didn't get frame content size. - size := len(input) * 2 - // Cap to 1 MB. - if size > 1<<20 { - size = 1 << 20 - } - if uint64(size) > d.o.maxDecodedSize { - size = int(d.o.maxDecodedSize) - } - dst = make([]byte, 0, size) - } - - dst, err = frame.runDecoder(dst, block) - if err != nil { - return dst, err - } - if len(frame.bBuf) == 0 { - if debugDecoder { - println("frame dbuf empty") - } - break - } - } - return dst, nil -} - -// nextBlock returns the next block. -// If an error occurs d.err will be set. -// Optionally the function can block for new output. -// If non-blocking mode is used the returned boolean will be false -// if no data was available without blocking. -func (d *Decoder) nextBlock(blocking bool) (ok bool) { - if d.current.d != nil { - if debugDecoder { - printf("re-adding current decoder %p", d.current.d) - } - d.decoders <- d.current.d - d.current.d = nil - } - if d.current.err != nil { - // Keep error state. - return blocking - } - - if blocking { - d.current.decodeOutput = <-d.current.output - } else { - select { - case d.current.decodeOutput = <-d.current.output: - default: - return false - } - } - if debugDecoder { - println("got", len(d.current.b), "bytes, error:", d.current.err) - } - return true -} - -// Close will release all resources. -// It is NOT possible to reuse the decoder after this. -func (d *Decoder) Close() { - if d.current.err == ErrDecoderClosed { - return - } - d.drainOutput() - if d.stream != nil { - close(d.stream) - d.streamWg.Wait() - d.stream = nil - } - if d.decoders != nil { - close(d.decoders) - for dec := range d.decoders { - dec.Close() - } - d.decoders = nil - } - if d.current.d != nil { - d.current.d.Close() - d.current.d = nil - } - d.current.err = ErrDecoderClosed -} - -// IOReadCloser returns the decoder as an io.ReadCloser for convenience. -// Any changes to the decoder will be reflected, so the returned ReadCloser -// can be reused along with the decoder. -// io.WriterTo is also supported by the returned ReadCloser. -func (d *Decoder) IOReadCloser() io.ReadCloser { - return closeWrapper{d: d} -} - -// closeWrapper wraps a function call as a closer. -type closeWrapper struct { - d *Decoder -} - -// WriteTo forwards WriteTo calls to the decoder. -func (c closeWrapper) WriteTo(w io.Writer) (n int64, err error) { - return c.d.WriteTo(w) -} - -// Read forwards read calls to the decoder. -func (c closeWrapper) Read(p []byte) (n int, err error) { - return c.d.Read(p) -} - -// Close closes the decoder. -func (c closeWrapper) Close() error { - c.d.Close() - return nil -} - -type decodeOutput struct { - d *blockDec - b []byte - err error -} - -type decodeStream struct { - r io.Reader - - // Blocks ready to be written to output. - output chan decodeOutput - - // cancel reading from the input - cancel chan struct{} -} - -// errEndOfStream indicates that everything from the stream was read. -var errEndOfStream = errors.New("end-of-stream") - -// Create Decoder: -// Spawn n block decoders. These accept tasks to decode a block. -// Create goroutine that handles stream processing, this will send history to decoders as they are available. -// Decoders update the history as they decode. -// When a block is returned: -// a) history is sent to the next decoder, -// b) content written to CRC. -// c) return data to WRITER. -// d) wait for next block to return data. -// Once WRITTEN, the decoders reused by the writer frame decoder for re-use. -func (d *Decoder) startStreamDecoder(inStream chan decodeStream) { - defer d.streamWg.Done() - frame := newFrameDec(d.o) - for stream := range inStream { - if debugDecoder { - println("got new stream") - } - br := readerWrapper{r: stream.r} - decodeStream: - for { - frame.history.reset() - err := frame.reset(&br) - if debugDecoder && err != nil { - println("Frame decoder returned", err) - } - if err == nil && frame.DictionaryID != nil { - dict, ok := d.dicts[*frame.DictionaryID] - if !ok { - err = ErrUnknownDictionary - } else { - frame.history.setDict(&dict) - } - } - if err != nil { - stream.output <- decodeOutput{ - err: err, - } - break - } - if debugDecoder { - println("starting frame decoder") - } - - // This goroutine will forward history between frames. - frame.frameDone.Add(1) - frame.initAsync() - - go frame.startDecoder(stream.output) - decodeFrame: - // Go through all blocks of the frame. - for { - dec := <-d.decoders - select { - case <-stream.cancel: - if !frame.sendErr(dec, io.EOF) { - // To not let the decoder dangle, send it back. - stream.output <- decodeOutput{d: dec} - } - break decodeStream - default: - } - err := frame.next(dec) - switch err { - case io.EOF: - // End of current frame, no error - println("EOF on next block") - break decodeFrame - case nil: - continue - default: - println("block decoder returned", err) - break decodeStream - } - } - // All blocks have started decoding, check if there are more frames. - println("waiting for done") - frame.frameDone.Wait() - println("done waiting...") - } - frame.frameDone.Wait() - println("Sending EOS") - stream.output <- decodeOutput{err: errEndOfStream} - } -} diff --git a/src/vendor/github.com/klauspost/compress/zstd/decoder_options.go b/src/vendor/github.com/klauspost/compress/zstd/decoder_options.go deleted file mode 100644 index 95cc9b8b8..000000000 --- a/src/vendor/github.com/klauspost/compress/zstd/decoder_options.go +++ /dev/null @@ -1,102 +0,0 @@ -// Copyright 2019+ Klaus Post. All rights reserved. -// License information can be found in the LICENSE file. -// Based on work by Yann Collet, released under BSD License. - -package zstd - -import ( - "errors" - "runtime" -) - -// DOption is an option for creating a decoder. -type DOption func(*decoderOptions) error - -// options retains accumulated state of multiple options. -type decoderOptions struct { - lowMem bool - concurrent int - maxDecodedSize uint64 - maxWindowSize uint64 - dicts []dict -} - -func (o *decoderOptions) setDefault() { - *o = decoderOptions{ - // use less ram: true for now, but may change. - lowMem: true, - concurrent: runtime.GOMAXPROCS(0), - maxWindowSize: MaxWindowSize, - } - o.maxDecodedSize = 1 << 63 -} - -// WithDecoderLowmem will set whether to use a lower amount of memory, -// but possibly have to allocate more while running. -func WithDecoderLowmem(b bool) DOption { - return func(o *decoderOptions) error { o.lowMem = b; return nil } -} - -// WithDecoderConcurrency will set the concurrency, -// meaning the maximum number of decoders to run concurrently. -// The value supplied must be at least 1. -// By default this will be set to GOMAXPROCS. -func WithDecoderConcurrency(n int) DOption { - return func(o *decoderOptions) error { - if n <= 0 { - return errors.New("concurrency must be at least 1") - } - o.concurrent = n - return nil - } -} - -// WithDecoderMaxMemory allows to set a maximum decoded size for in-memory -// non-streaming operations or maximum window size for streaming operations. -// This can be used to control memory usage of potentially hostile content. -// Maximum and default is 1 << 63 bytes. -func WithDecoderMaxMemory(n uint64) DOption { - return func(o *decoderOptions) error { - if n == 0 { - return errors.New("WithDecoderMaxMemory must be at least 1") - } - if n > 1<<63 { - return errors.New("WithDecoderMaxmemory must be less than 1 << 63") - } - o.maxDecodedSize = n - return nil - } -} - -// WithDecoderDicts allows to register one or more dictionaries for the decoder. -// If several dictionaries with the same ID is provided the last one will be used. -func WithDecoderDicts(dicts ...[]byte) DOption { - return func(o *decoderOptions) error { - for _, b := range dicts { - d, err := loadDict(b) - if err != nil { - return err - } - o.dicts = append(o.dicts, *d) - } - return nil - } -} - -// WithDecoderMaxWindow allows to set a maximum window size for decodes. -// This allows rejecting packets that will cause big memory usage. -// The Decoder will likely allocate more memory based on the WithDecoderLowmem setting. -// If WithDecoderMaxMemory is set to a lower value, that will be used. -// Default is 512MB, Maximum is ~3.75 TB as per zstandard spec. -func WithDecoderMaxWindow(size uint64) DOption { - return func(o *decoderOptions) error { - if size < MinWindowSize { - return errors.New("WithMaxWindowSize must be at least 1KB, 1024 bytes") - } - if size > (1<<41)+7*(1<<38) { - return errors.New("WithMaxWindowSize must be less than (1<<41) + 7*(1<<38) ~ 3.75TB") - } - o.maxWindowSize = size - return nil - } -} diff --git a/src/vendor/github.com/klauspost/compress/zstd/dict.go b/src/vendor/github.com/klauspost/compress/zstd/dict.go deleted file mode 100644 index a36ae83ef..000000000 --- a/src/vendor/github.com/klauspost/compress/zstd/dict.go +++ /dev/null @@ -1,122 +0,0 @@ -package zstd - -import ( - "bytes" - "encoding/binary" - "errors" - "fmt" - "io" - - "github.com/klauspost/compress/huff0" -) - -type dict struct { - id uint32 - - litEnc *huff0.Scratch - llDec, ofDec, mlDec sequenceDec - //llEnc, ofEnc, mlEnc []*fseEncoder - offsets [3]int - content []byte -} - -var dictMagic = [4]byte{0x37, 0xa4, 0x30, 0xec} - -// ID returns the dictionary id or 0 if d is nil. -func (d *dict) ID() uint32 { - if d == nil { - return 0 - } - return d.id -} - -// DictContentSize returns the dictionary content size or 0 if d is nil. -func (d *dict) DictContentSize() int { - if d == nil { - return 0 - } - return len(d.content) -} - -// Load a dictionary as described in -// https://github.com/facebook/zstd/blob/master/doc/zstd_compression_format.md#dictionary-format -func loadDict(b []byte) (*dict, error) { - // Check static field size. - if len(b) <= 8+(3*4) { - return nil, io.ErrUnexpectedEOF - } - d := dict{ - llDec: sequenceDec{fse: &fseDecoder{}}, - ofDec: sequenceDec{fse: &fseDecoder{}}, - mlDec: sequenceDec{fse: &fseDecoder{}}, - } - if !bytes.Equal(b[:4], dictMagic[:]) { - return nil, ErrMagicMismatch - } - d.id = binary.LittleEndian.Uint32(b[4:8]) - if d.id == 0 { - return nil, errors.New("dictionaries cannot have ID 0") - } - - // Read literal table - var err error - d.litEnc, b, err = huff0.ReadTable(b[8:], nil) - if err != nil { - return nil, err - } - d.litEnc.Reuse = huff0.ReusePolicyMust - - br := byteReader{ - b: b, - off: 0, - } - readDec := func(i tableIndex, dec *fseDecoder) error { - if err := dec.readNCount(&br, uint16(maxTableSymbol[i])); err != nil { - return err - } - if br.overread() { - return io.ErrUnexpectedEOF - } - err = dec.transform(symbolTableX[i]) - if err != nil { - println("Transform table error:", err) - return err - } - if debugDecoder || debugEncoder { - println("Read table ok", "symbolLen:", dec.symbolLen) - } - // Set decoders as predefined so they aren't reused. - dec.preDefined = true - return nil - } - - if err := readDec(tableOffsets, d.ofDec.fse); err != nil { - return nil, err - } - if err := readDec(tableMatchLengths, d.mlDec.fse); err != nil { - return nil, err - } - if err := readDec(tableLiteralLengths, d.llDec.fse); err != nil { - return nil, err - } - if br.remain() < 12 { - return nil, io.ErrUnexpectedEOF - } - - d.offsets[0] = int(br.Uint32()) - br.advance(4) - d.offsets[1] = int(br.Uint32()) - br.advance(4) - d.offsets[2] = int(br.Uint32()) - br.advance(4) - if d.offsets[0] <= 0 || d.offsets[1] <= 0 || d.offsets[2] <= 0 { - return nil, errors.New("invalid offset in dictionary") - } - d.content = make([]byte, br.remain()) - copy(d.content, br.unread()) - if d.offsets[0] > len(d.content) || d.offsets[1] > len(d.content) || d.offsets[2] > len(d.content) { - return nil, fmt.Errorf("initial offset bigger than dictionary content size %d, offsets: %v", len(d.content), d.offsets) - } - - return &d, nil -} diff --git a/src/vendor/github.com/klauspost/compress/zstd/enc_base.go b/src/vendor/github.com/klauspost/compress/zstd/enc_base.go deleted file mode 100644 index 295cd602a..000000000 --- a/src/vendor/github.com/klauspost/compress/zstd/enc_base.go +++ /dev/null @@ -1,178 +0,0 @@ -package zstd - -import ( - "fmt" - "math/bits" - - "github.com/klauspost/compress/zstd/internal/xxhash" -) - -const ( - dictShardBits = 6 -) - -type fastBase struct { - // cur is the offset at the start of hist - cur int32 - // maximum offset. Should be at least 2x block size. - maxMatchOff int32 - hist []byte - crc *xxhash.Digest - tmp [8]byte - blk *blockEnc - lastDictID uint32 - lowMem bool -} - -// CRC returns the underlying CRC writer. -func (e *fastBase) CRC() *xxhash.Digest { - return e.crc -} - -// AppendCRC will append the CRC to the destination slice and return it. -func (e *fastBase) AppendCRC(dst []byte) []byte { - crc := e.crc.Sum(e.tmp[:0]) - dst = append(dst, crc[7], crc[6], crc[5], crc[4]) - return dst -} - -// WindowSize returns the window size of the encoder, -// or a window size small enough to contain the input size, if > 0. -func (e *fastBase) WindowSize(size int64) int32 { - if size > 0 && size < int64(e.maxMatchOff) { - b := int32(1) << uint(bits.Len(uint(size))) - // Keep minimum window. - if b < 1024 { - b = 1024 - } - return b - } - return e.maxMatchOff -} - -// Block returns the current block. -func (e *fastBase) Block() *blockEnc { - return e.blk -} - -func (e *fastBase) addBlock(src []byte) int32 { - if debugAsserts && e.cur > bufferReset { - panic(fmt.Sprintf("ecur (%d) > buffer reset (%d)", e.cur, bufferReset)) - } - // check if we have space already - if len(e.hist)+len(src) > cap(e.hist) { - if cap(e.hist) == 0 { - e.ensureHist(len(src)) - } else { - if cap(e.hist) < int(e.maxMatchOff+maxCompressedBlockSize) { - panic(fmt.Errorf("unexpected buffer cap %d, want at least %d with window %d", cap(e.hist), e.maxMatchOff+maxCompressedBlockSize, e.maxMatchOff)) - } - // Move down - offset := int32(len(e.hist)) - e.maxMatchOff - copy(e.hist[0:e.maxMatchOff], e.hist[offset:]) - e.cur += offset - e.hist = e.hist[:e.maxMatchOff] - } - } - s := int32(len(e.hist)) - e.hist = append(e.hist, src...) - return s -} - -// ensureHist will ensure that history can keep at least this many bytes. -func (e *fastBase) ensureHist(n int) { - if cap(e.hist) >= n { - return - } - l := e.maxMatchOff - if (e.lowMem && e.maxMatchOff > maxCompressedBlockSize) || e.maxMatchOff <= maxCompressedBlockSize { - l += maxCompressedBlockSize - } else { - l += e.maxMatchOff - } - // Make it at least 1MB. - if l < 1<<20 && !e.lowMem { - l = 1 << 20 - } - // Make it at least the requested size. - if l < int32(n) { - l = int32(n) - } - e.hist = make([]byte, 0, l) -} - -// useBlock will replace the block with the provided one, -// but transfer recent offsets from the previous. -func (e *fastBase) UseBlock(enc *blockEnc) { - enc.reset(e.blk) - e.blk = enc -} - -func (e *fastBase) matchlenNoHist(s, t int32, src []byte) int32 { - // Extend the match to be as long as possible. - return int32(matchLen(src[s:], src[t:])) -} - -func (e *fastBase) matchlen(s, t int32, src []byte) int32 { - if debugAsserts { - if s < 0 { - err := fmt.Sprintf("s (%d) < 0", s) - panic(err) - } - if t < 0 { - err := fmt.Sprintf("s (%d) < 0", s) - panic(err) - } - if s-t > e.maxMatchOff { - err := fmt.Sprintf("s (%d) - t (%d) > maxMatchOff (%d)", s, t, e.maxMatchOff) - panic(err) - } - if len(src)-int(s) > maxCompressedBlockSize { - panic(fmt.Sprintf("len(src)-s (%d) > maxCompressedBlockSize (%d)", len(src)-int(s), maxCompressedBlockSize)) - } - } - - // Extend the match to be as long as possible. - return int32(matchLen(src[s:], src[t:])) -} - -// Reset the encoding table. -func (e *fastBase) resetBase(d *dict, singleBlock bool) { - if e.blk == nil { - e.blk = &blockEnc{lowMem: e.lowMem} - e.blk.init() - } else { - e.blk.reset(nil) - } - e.blk.initNewEncode() - if e.crc == nil { - e.crc = xxhash.New() - } else { - e.crc.Reset() - } - if d != nil { - low := e.lowMem - if singleBlock { - e.lowMem = true - } - e.ensureHist(d.DictContentSize() + maxCompressedBlockSize) - e.lowMem = low - } - - // We offset current position so everything will be out of reach. - // If above reset line, history will be purged. - if e.cur < bufferReset { - e.cur += e.maxMatchOff + int32(len(e.hist)) - } - e.hist = e.hist[:0] - if d != nil { - // Set offsets (currently not used) - for i, off := range d.offsets { - e.blk.recentOffsets[i] = uint32(off) - e.blk.prevRecentOffsets[i] = e.blk.recentOffsets[i] - } - // Transfer litenc. - e.blk.dictLitEnc = d.litEnc - e.hist = append(e.hist, d.content...) - } -} diff --git a/src/vendor/github.com/klauspost/compress/zstd/enc_best.go b/src/vendor/github.com/klauspost/compress/zstd/enc_best.go deleted file mode 100644 index 96028ecd8..000000000 --- a/src/vendor/github.com/klauspost/compress/zstd/enc_best.go +++ /dev/null @@ -1,558 +0,0 @@ -// Copyright 2019+ Klaus Post. All rights reserved. -// License information can be found in the LICENSE file. -// Based on work by Yann Collet, released under BSD License. - -package zstd - -import ( - "bytes" - "fmt" - - "github.com/klauspost/compress" -) - -const ( - bestLongTableBits = 22 // Bits used in the long match table - bestLongTableSize = 1 << bestLongTableBits // Size of the table - bestLongLen = 8 // Bytes used for table hash - - // Note: Increasing the short table bits or making the hash shorter - // can actually lead to compression degradation since it will 'steal' more from the - // long match table and match offsets are quite big. - // This greatly depends on the type of input. - bestShortTableBits = 18 // Bits used in the short match table - bestShortTableSize = 1 << bestShortTableBits // Size of the table - bestShortLen = 4 // Bytes used for table hash - -) - -type match struct { - offset int32 - s int32 - length int32 - rep int32 - est int32 -} - -const highScore = 25000 - -// estBits will estimate output bits from predefined tables. -func (m *match) estBits(bitsPerByte int32) { - mlc := mlCode(uint32(m.length - zstdMinMatch)) - var ofc uint8 - if m.rep < 0 { - ofc = ofCode(uint32(m.s-m.offset) + 3) - } else { - ofc = ofCode(uint32(m.rep)) - } - // Cost, excluding - ofTT, mlTT := fsePredefEnc[tableOffsets].ct.symbolTT[ofc], fsePredefEnc[tableMatchLengths].ct.symbolTT[mlc] - - // Add cost of match encoding... - m.est = int32(ofTT.outBits + mlTT.outBits) - m.est += int32(ofTT.deltaNbBits>>16 + mlTT.deltaNbBits>>16) - // Subtract savings compared to literal encoding... - m.est -= (m.length * bitsPerByte) >> 10 - if m.est > 0 { - // Unlikely gain.. - m.length = 0 - m.est = highScore - } -} - -// bestFastEncoder uses 2 tables, one for short matches (5 bytes) and one for long matches. -// The long match table contains the previous entry with the same hash, -// effectively making it a "chain" of length 2. -// When we find a long match we choose between the two values and select the longest. -// When we find a short match, after checking the long, we check if we can find a long at n+1 -// and that it is longer (lazy matching). -type bestFastEncoder struct { - fastBase - table [bestShortTableSize]prevEntry - longTable [bestLongTableSize]prevEntry - dictTable []prevEntry - dictLongTable []prevEntry -} - -// Encode improves compression... -func (e *bestFastEncoder) Encode(blk *blockEnc, src []byte) { - const ( - // Input margin is the number of bytes we read (8) - // and the maximum we will read ahead (2) - inputMargin = 8 + 4 - minNonLiteralBlockSize = 16 - ) - - // Protect against e.cur wraparound. - for e.cur >= bufferReset { - if len(e.hist) == 0 { - for i := range e.table[:] { - e.table[i] = prevEntry{} - } - for i := range e.longTable[:] { - e.longTable[i] = prevEntry{} - } - e.cur = e.maxMatchOff - break - } - // Shift down everything in the table that isn't already too far away. - minOff := e.cur + int32(len(e.hist)) - e.maxMatchOff - for i := range e.table[:] { - v := e.table[i].offset - v2 := e.table[i].prev - if v < minOff { - v = 0 - v2 = 0 - } else { - v = v - e.cur + e.maxMatchOff - if v2 < minOff { - v2 = 0 - } else { - v2 = v2 - e.cur + e.maxMatchOff - } - } - e.table[i] = prevEntry{ - offset: v, - prev: v2, - } - } - for i := range e.longTable[:] { - v := e.longTable[i].offset - v2 := e.longTable[i].prev - if v < minOff { - v = 0 - v2 = 0 - } else { - v = v - e.cur + e.maxMatchOff - if v2 < minOff { - v2 = 0 - } else { - v2 = v2 - e.cur + e.maxMatchOff - } - } - e.longTable[i] = prevEntry{ - offset: v, - prev: v2, - } - } - e.cur = e.maxMatchOff - break - } - - s := e.addBlock(src) - blk.size = len(src) - if len(src) < minNonLiteralBlockSize { - blk.extraLits = len(src) - blk.literals = blk.literals[:len(src)] - copy(blk.literals, src) - return - } - - // Use this to estimate literal cost. - // Scaled by 10 bits. - bitsPerByte := int32((compress.ShannonEntropyBits(src) * 1024) / len(src)) - // Huffman can never go < 1 bit/byte - if bitsPerByte < 1024 { - bitsPerByte = 1024 - } - - // Override src - src = e.hist - sLimit := int32(len(src)) - inputMargin - const kSearchStrength = 10 - - // nextEmit is where in src the next emitLiteral should start from. - nextEmit := s - cv := load6432(src, s) - - // Relative offsets - offset1 := int32(blk.recentOffsets[0]) - offset2 := int32(blk.recentOffsets[1]) - offset3 := int32(blk.recentOffsets[2]) - - addLiterals := func(s *seq, until int32) { - if until == nextEmit { - return - } - blk.literals = append(blk.literals, src[nextEmit:until]...) - s.litLen = uint32(until - nextEmit) - } - _ = addLiterals - - if debugEncoder { - println("recent offsets:", blk.recentOffsets) - } - -encodeLoop: - for { - // We allow the encoder to optionally turn off repeat offsets across blocks - canRepeat := len(blk.sequences) > 2 - - if debugAsserts && canRepeat && offset1 == 0 { - panic("offset0 was 0") - } - - bestOf := func(a, b match) match { - if a.est+(a.s-b.s)*bitsPerByte>>10 < b.est+(b.s-a.s)*bitsPerByte>>10 { - return a - } - return b - } - const goodEnough = 100 - - nextHashL := hashLen(cv, bestLongTableBits, bestLongLen) - nextHashS := hashLen(cv, bestShortTableBits, bestShortLen) - candidateL := e.longTable[nextHashL] - candidateS := e.table[nextHashS] - - matchAt := func(offset int32, s int32, first uint32, rep int32) match { - if s-offset >= e.maxMatchOff || load3232(src, offset) != first { - return match{s: s, est: highScore} - } - if debugAsserts { - if !bytes.Equal(src[s:s+4], src[offset:offset+4]) { - panic(fmt.Sprintf("first match mismatch: %v != %v, first: %08x", src[s:s+4], src[offset:offset+4], first)) - } - } - m := match{offset: offset, s: s, length: 4 + e.matchlen(s+4, offset+4, src), rep: rep} - m.estBits(bitsPerByte) - return m - } - - best := bestOf(matchAt(candidateL.offset-e.cur, s, uint32(cv), -1), matchAt(candidateL.prev-e.cur, s, uint32(cv), -1)) - best = bestOf(best, matchAt(candidateS.offset-e.cur, s, uint32(cv), -1)) - best = bestOf(best, matchAt(candidateS.prev-e.cur, s, uint32(cv), -1)) - - if canRepeat && best.length < goodEnough { - cv32 := uint32(cv >> 8) - spp := s + 1 - best = bestOf(best, matchAt(spp-offset1, spp, cv32, 1)) - best = bestOf(best, matchAt(spp-offset2, spp, cv32, 2)) - best = bestOf(best, matchAt(spp-offset3, spp, cv32, 3)) - if best.length > 0 { - cv32 = uint32(cv >> 24) - spp += 2 - best = bestOf(best, matchAt(spp-offset1, spp, cv32, 1)) - best = bestOf(best, matchAt(spp-offset2, spp, cv32, 2)) - best = bestOf(best, matchAt(spp-offset3, spp, cv32, 3)) - } - } - // Load next and check... - e.longTable[nextHashL] = prevEntry{offset: s + e.cur, prev: candidateL.offset} - e.table[nextHashS] = prevEntry{offset: s + e.cur, prev: candidateS.offset} - - // Look far ahead, unless we have a really long match already... - if best.length < goodEnough { - // No match found, move forward on input, no need to check forward... - if best.length < 4 { - s += 1 + (s-nextEmit)>>(kSearchStrength-1) - if s >= sLimit { - break encodeLoop - } - cv = load6432(src, s) - continue - } - - s++ - candidateS = e.table[hashLen(cv>>8, bestShortTableBits, bestShortLen)] - cv = load6432(src, s) - cv2 := load6432(src, s+1) - candidateL = e.longTable[hashLen(cv, bestLongTableBits, bestLongLen)] - candidateL2 := e.longTable[hashLen(cv2, bestLongTableBits, bestLongLen)] - - // Short at s+1 - best = bestOf(best, matchAt(candidateS.offset-e.cur, s, uint32(cv), -1)) - // Long at s+1, s+2 - best = bestOf(best, matchAt(candidateL.offset-e.cur, s, uint32(cv), -1)) - best = bestOf(best, matchAt(candidateL.prev-e.cur, s, uint32(cv), -1)) - best = bestOf(best, matchAt(candidateL2.offset-e.cur, s+1, uint32(cv2), -1)) - best = bestOf(best, matchAt(candidateL2.prev-e.cur, s+1, uint32(cv2), -1)) - if false { - // Short at s+3. - // Too often worse... - best = bestOf(best, matchAt(e.table[hashLen(cv2>>8, bestShortTableBits, bestShortLen)].offset-e.cur, s+2, uint32(cv2>>8), -1)) - } - // See if we can find a better match by checking where the current best ends. - // Use that offset to see if we can find a better full match. - if sAt := best.s + best.length; sAt < sLimit { - nextHashL := hashLen(load6432(src, sAt), bestLongTableBits, bestLongLen) - candidateEnd := e.longTable[nextHashL] - if pos := candidateEnd.offset - e.cur - best.length; pos >= 0 { - bestEnd := bestOf(best, matchAt(pos, best.s, load3232(src, best.s), -1)) - if pos := candidateEnd.prev - e.cur - best.length; pos >= 0 { - bestEnd = bestOf(bestEnd, matchAt(pos, best.s, load3232(src, best.s), -1)) - } - best = bestEnd - } - } - } - - if debugAsserts { - if !bytes.Equal(src[best.s:best.s+best.length], src[best.offset:best.offset+best.length]) { - panic(fmt.Sprintf("match mismatch: %v != %v", src[best.s:best.s+best.length], src[best.offset:best.offset+best.length])) - } - } - - // We have a match, we can store the forward value - if best.rep > 0 { - s = best.s - var seq seq - seq.matchLen = uint32(best.length - zstdMinMatch) - - // We might be able to match backwards. - // Extend as long as we can. - start := best.s - // We end the search early, so we don't risk 0 literals - // and have to do special offset treatment. - startLimit := nextEmit + 1 - - tMin := s - e.maxMatchOff - if tMin < 0 { - tMin = 0 - } - repIndex := best.offset - for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch-1 { - repIndex-- - start-- - seq.matchLen++ - } - addLiterals(&seq, start) - - // rep 0 - seq.offset = uint32(best.rep) - if debugSequences { - println("repeat sequence", seq, "next s:", s) - } - blk.sequences = append(blk.sequences, seq) - - // Index match start+1 (long) -> s - 1 - index0 := s - s = best.s + best.length - - nextEmit = s - if s >= sLimit { - if debugEncoder { - println("repeat ended", s, best.length) - - } - break encodeLoop - } - // Index skipped... - off := index0 + e.cur - for index0 < s-1 { - cv0 := load6432(src, index0) - h0 := hashLen(cv0, bestLongTableBits, bestLongLen) - h1 := hashLen(cv0, bestShortTableBits, bestShortLen) - e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset} - e.table[h1] = prevEntry{offset: off, prev: e.table[h1].offset} - off++ - index0++ - } - switch best.rep { - case 2: - offset1, offset2 = offset2, offset1 - case 3: - offset1, offset2, offset3 = offset3, offset1, offset2 - } - cv = load6432(src, s) - continue - } - - // A 4-byte match has been found. Update recent offsets. - // We'll later see if more than 4 bytes. - s = best.s - t := best.offset - offset1, offset2, offset3 = s-t, offset1, offset2 - - if debugAsserts && s <= t { - panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) - } - - if debugAsserts && int(offset1) > len(src) { - panic("invalid offset") - } - - // Extend the n-byte match as long as possible. - l := best.length - - // Extend backwards - tMin := s - e.maxMatchOff - if tMin < 0 { - tMin = 0 - } - for t > tMin && s > nextEmit && src[t-1] == src[s-1] && l < maxMatchLength { - s-- - t-- - l++ - } - - // Write our sequence - var seq seq - seq.litLen = uint32(s - nextEmit) - seq.matchLen = uint32(l - zstdMinMatch) - if seq.litLen > 0 { - blk.literals = append(blk.literals, src[nextEmit:s]...) - } - seq.offset = uint32(s-t) + 3 - s += l - if debugSequences { - println("sequence", seq, "next s:", s) - } - blk.sequences = append(blk.sequences, seq) - nextEmit = s - if s >= sLimit { - break encodeLoop - } - - // Index match start+1 (long) -> s - 1 - index0 := s - l + 1 - // every entry - for index0 < s-1 { - cv0 := load6432(src, index0) - h0 := hashLen(cv0, bestLongTableBits, bestLongLen) - h1 := hashLen(cv0, bestShortTableBits, bestShortLen) - off := index0 + e.cur - e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset} - e.table[h1] = prevEntry{offset: off, prev: e.table[h1].offset} - index0++ - } - - cv = load6432(src, s) - if !canRepeat { - continue - } - - // Check offset 2 - for { - o2 := s - offset2 - if load3232(src, o2) != uint32(cv) { - // Do regular search - break - } - - // Store this, since we have it. - nextHashS := hashLen(cv, bestShortTableBits, bestShortLen) - nextHashL := hashLen(cv, bestLongTableBits, bestLongLen) - - // We have at least 4 byte match. - // No need to check backwards. We come straight from a match - l := 4 + e.matchlen(s+4, o2+4, src) - - e.longTable[nextHashL] = prevEntry{offset: s + e.cur, prev: e.longTable[nextHashL].offset} - e.table[nextHashS] = prevEntry{offset: s + e.cur, prev: e.table[nextHashS].offset} - seq.matchLen = uint32(l) - zstdMinMatch - seq.litLen = 0 - - // Since litlen is always 0, this is offset 1. - seq.offset = 1 - s += l - nextEmit = s - if debugSequences { - println("sequence", seq, "next s:", s) - } - blk.sequences = append(blk.sequences, seq) - - // Swap offset 1 and 2. - offset1, offset2 = offset2, offset1 - if s >= sLimit { - // Finished - break encodeLoop - } - cv = load6432(src, s) - } - } - - if int(nextEmit) < len(src) { - blk.literals = append(blk.literals, src[nextEmit:]...) - blk.extraLits = len(src) - int(nextEmit) - } - blk.recentOffsets[0] = uint32(offset1) - blk.recentOffsets[1] = uint32(offset2) - blk.recentOffsets[2] = uint32(offset3) - if debugEncoder { - println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits) - } -} - -// EncodeNoHist will encode a block with no history and no following blocks. -// Most notable difference is that src will not be copied for history and -// we do not need to check for max match length. -func (e *bestFastEncoder) EncodeNoHist(blk *blockEnc, src []byte) { - e.ensureHist(len(src)) - e.Encode(blk, src) -} - -// Reset will reset and set a dictionary if not nil -func (e *bestFastEncoder) Reset(d *dict, singleBlock bool) { - e.resetBase(d, singleBlock) - if d == nil { - return - } - // Init or copy dict table - if len(e.dictTable) != len(e.table) || d.id != e.lastDictID { - if len(e.dictTable) != len(e.table) { - e.dictTable = make([]prevEntry, len(e.table)) - } - end := int32(len(d.content)) - 8 + e.maxMatchOff - for i := e.maxMatchOff; i < end; i += 4 { - const hashLog = bestShortTableBits - - cv := load6432(d.content, i-e.maxMatchOff) - nextHash := hashLen(cv, hashLog, bestShortLen) // 0 -> 4 - nextHash1 := hashLen(cv>>8, hashLog, bestShortLen) // 1 -> 5 - nextHash2 := hashLen(cv>>16, hashLog, bestShortLen) // 2 -> 6 - nextHash3 := hashLen(cv>>24, hashLog, bestShortLen) // 3 -> 7 - e.dictTable[nextHash] = prevEntry{ - prev: e.dictTable[nextHash].offset, - offset: i, - } - e.dictTable[nextHash1] = prevEntry{ - prev: e.dictTable[nextHash1].offset, - offset: i + 1, - } - e.dictTable[nextHash2] = prevEntry{ - prev: e.dictTable[nextHash2].offset, - offset: i + 2, - } - e.dictTable[nextHash3] = prevEntry{ - prev: e.dictTable[nextHash3].offset, - offset: i + 3, - } - } - e.lastDictID = d.id - } - - // Init or copy dict table - if len(e.dictLongTable) != len(e.longTable) || d.id != e.lastDictID { - if len(e.dictLongTable) != len(e.longTable) { - e.dictLongTable = make([]prevEntry, len(e.longTable)) - } - if len(d.content) >= 8 { - cv := load6432(d.content, 0) - h := hashLen(cv, bestLongTableBits, bestLongLen) - e.dictLongTable[h] = prevEntry{ - offset: e.maxMatchOff, - prev: e.dictLongTable[h].offset, - } - - end := int32(len(d.content)) - 8 + e.maxMatchOff - off := 8 // First to read - for i := e.maxMatchOff + 1; i < end; i++ { - cv = cv>>8 | (uint64(d.content[off]) << 56) - h := hashLen(cv, bestLongTableBits, bestLongLen) - e.dictLongTable[h] = prevEntry{ - offset: i, - prev: e.dictLongTable[h].offset, - } - off++ - } - } - e.lastDictID = d.id - } - // Reset table to initial state - copy(e.longTable[:], e.dictLongTable) - - e.cur = e.maxMatchOff - // Reset table to initial state - copy(e.table[:], e.dictTable) -} diff --git a/src/vendor/github.com/klauspost/compress/zstd/enc_better.go b/src/vendor/github.com/klauspost/compress/zstd/enc_better.go deleted file mode 100644 index 602c05ee0..000000000 --- a/src/vendor/github.com/klauspost/compress/zstd/enc_better.go +++ /dev/null @@ -1,1237 +0,0 @@ -// Copyright 2019+ Klaus Post. All rights reserved. -// License information can be found in the LICENSE file. -// Based on work by Yann Collet, released under BSD License. - -package zstd - -import "fmt" - -const ( - betterLongTableBits = 19 // Bits used in the long match table - betterLongTableSize = 1 << betterLongTableBits // Size of the table - betterLongLen = 8 // Bytes used for table hash - - // Note: Increasing the short table bits or making the hash shorter - // can actually lead to compression degradation since it will 'steal' more from the - // long match table and match offsets are quite big. - // This greatly depends on the type of input. - betterShortTableBits = 13 // Bits used in the short match table - betterShortTableSize = 1 << betterShortTableBits // Size of the table - betterShortLen = 5 // Bytes used for table hash - - betterLongTableShardCnt = 1 << (betterLongTableBits - dictShardBits) // Number of shards in the table - betterLongTableShardSize = betterLongTableSize / betterLongTableShardCnt // Size of an individual shard - - betterShortTableShardCnt = 1 << (betterShortTableBits - dictShardBits) // Number of shards in the table - betterShortTableShardSize = betterShortTableSize / betterShortTableShardCnt // Size of an individual shard -) - -type prevEntry struct { - offset int32 - prev int32 -} - -// betterFastEncoder uses 2 tables, one for short matches (5 bytes) and one for long matches. -// The long match table contains the previous entry with the same hash, -// effectively making it a "chain" of length 2. -// When we find a long match we choose between the two values and select the longest. -// When we find a short match, after checking the long, we check if we can find a long at n+1 -// and that it is longer (lazy matching). -type betterFastEncoder struct { - fastBase - table [betterShortTableSize]tableEntry - longTable [betterLongTableSize]prevEntry -} - -type betterFastEncoderDict struct { - betterFastEncoder - dictTable []tableEntry - dictLongTable []prevEntry - shortTableShardDirty [betterShortTableShardCnt]bool - longTableShardDirty [betterLongTableShardCnt]bool - allDirty bool -} - -// Encode improves compression... -func (e *betterFastEncoder) Encode(blk *blockEnc, src []byte) { - const ( - // Input margin is the number of bytes we read (8) - // and the maximum we will read ahead (2) - inputMargin = 8 + 2 - minNonLiteralBlockSize = 16 - ) - - // Protect against e.cur wraparound. - for e.cur >= bufferReset { - if len(e.hist) == 0 { - for i := range e.table[:] { - e.table[i] = tableEntry{} - } - for i := range e.longTable[:] { - e.longTable[i] = prevEntry{} - } - e.cur = e.maxMatchOff - break - } - // Shift down everything in the table that isn't already too far away. - minOff := e.cur + int32(len(e.hist)) - e.maxMatchOff - for i := range e.table[:] { - v := e.table[i].offset - if v < minOff { - v = 0 - } else { - v = v - e.cur + e.maxMatchOff - } - e.table[i].offset = v - } - for i := range e.longTable[:] { - v := e.longTable[i].offset - v2 := e.longTable[i].prev - if v < minOff { - v = 0 - v2 = 0 - } else { - v = v - e.cur + e.maxMatchOff - if v2 < minOff { - v2 = 0 - } else { - v2 = v2 - e.cur + e.maxMatchOff - } - } - e.longTable[i] = prevEntry{ - offset: v, - prev: v2, - } - } - e.cur = e.maxMatchOff - break - } - - s := e.addBlock(src) - blk.size = len(src) - if len(src) < minNonLiteralBlockSize { - blk.extraLits = len(src) - blk.literals = blk.literals[:len(src)] - copy(blk.literals, src) - return - } - - // Override src - src = e.hist - sLimit := int32(len(src)) - inputMargin - // stepSize is the number of bytes to skip on every main loop iteration. - // It should be >= 1. - const stepSize = 1 - - const kSearchStrength = 9 - - // nextEmit is where in src the next emitLiteral should start from. - nextEmit := s - cv := load6432(src, s) - - // Relative offsets - offset1 := int32(blk.recentOffsets[0]) - offset2 := int32(blk.recentOffsets[1]) - - addLiterals := func(s *seq, until int32) { - if until == nextEmit { - return - } - blk.literals = append(blk.literals, src[nextEmit:until]...) - s.litLen = uint32(until - nextEmit) - } - if debugEncoder { - println("recent offsets:", blk.recentOffsets) - } - -encodeLoop: - for { - var t int32 - // We allow the encoder to optionally turn off repeat offsets across blocks - canRepeat := len(blk.sequences) > 2 - var matched int32 - - for { - if debugAsserts && canRepeat && offset1 == 0 { - panic("offset0 was 0") - } - - nextHashS := hashLen(cv, betterShortTableBits, betterShortLen) - nextHashL := hashLen(cv, betterLongTableBits, betterLongLen) - candidateL := e.longTable[nextHashL] - candidateS := e.table[nextHashS] - - const repOff = 1 - repIndex := s - offset1 + repOff - off := s + e.cur - e.longTable[nextHashL] = prevEntry{offset: off, prev: candidateL.offset} - e.table[nextHashS] = tableEntry{offset: off, val: uint32(cv)} - - if canRepeat { - if repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>(repOff*8)) { - // Consider history as well. - var seq seq - lenght := 4 + e.matchlen(s+4+repOff, repIndex+4, src) - - seq.matchLen = uint32(lenght - zstdMinMatch) - - // We might be able to match backwards. - // Extend as long as we can. - start := s + repOff - // We end the search early, so we don't risk 0 literals - // and have to do special offset treatment. - startLimit := nextEmit + 1 - - tMin := s - e.maxMatchOff - if tMin < 0 { - tMin = 0 - } - for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch-1 { - repIndex-- - start-- - seq.matchLen++ - } - addLiterals(&seq, start) - - // rep 0 - seq.offset = 1 - if debugSequences { - println("repeat sequence", seq, "next s:", s) - } - blk.sequences = append(blk.sequences, seq) - - // Index match start+1 (long) -> s - 1 - index0 := s + repOff - s += lenght + repOff - - nextEmit = s - if s >= sLimit { - if debugEncoder { - println("repeat ended", s, lenght) - - } - break encodeLoop - } - // Index skipped... - for index0 < s-1 { - cv0 := load6432(src, index0) - cv1 := cv0 >> 8 - h0 := hashLen(cv0, betterLongTableBits, betterLongLen) - off := index0 + e.cur - e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset} - e.table[hashLen(cv1, betterShortTableBits, betterShortLen)] = tableEntry{offset: off + 1, val: uint32(cv1)} - index0 += 2 - } - cv = load6432(src, s) - continue - } - const repOff2 = 1 - - // We deviate from the reference encoder and also check offset 2. - // Still slower and not much better, so disabled. - // repIndex = s - offset2 + repOff2 - if false && repIndex >= 0 && load6432(src, repIndex) == load6432(src, s+repOff) { - // Consider history as well. - var seq seq - lenght := 8 + e.matchlen(s+8+repOff2, repIndex+8, src) - - seq.matchLen = uint32(lenght - zstdMinMatch) - - // We might be able to match backwards. - // Extend as long as we can. - start := s + repOff2 - // We end the search early, so we don't risk 0 literals - // and have to do special offset treatment. - startLimit := nextEmit + 1 - - tMin := s - e.maxMatchOff - if tMin < 0 { - tMin = 0 - } - for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch-1 { - repIndex-- - start-- - seq.matchLen++ - } - addLiterals(&seq, start) - - // rep 2 - seq.offset = 2 - if debugSequences { - println("repeat sequence 2", seq, "next s:", s) - } - blk.sequences = append(blk.sequences, seq) - - index0 := s + repOff2 - s += lenght + repOff2 - nextEmit = s - if s >= sLimit { - if debugEncoder { - println("repeat ended", s, lenght) - - } - break encodeLoop - } - - // Index skipped... - for index0 < s-1 { - cv0 := load6432(src, index0) - cv1 := cv0 >> 8 - h0 := hashLen(cv0, betterLongTableBits, betterLongLen) - off := index0 + e.cur - e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset} - e.table[hashLen(cv1, betterShortTableBits, betterShortLen)] = tableEntry{offset: off + 1, val: uint32(cv1)} - index0 += 2 - } - cv = load6432(src, s) - // Swap offsets - offset1, offset2 = offset2, offset1 - continue - } - } - // Find the offsets of our two matches. - coffsetL := candidateL.offset - e.cur - coffsetLP := candidateL.prev - e.cur - - // Check if we have a long match. - if s-coffsetL < e.maxMatchOff && cv == load6432(src, coffsetL) { - // Found a long match, at least 8 bytes. - matched = e.matchlen(s+8, coffsetL+8, src) + 8 - t = coffsetL - if debugAsserts && s <= t { - panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) - } - if debugAsserts && s-t > e.maxMatchOff { - panic("s - t >e.maxMatchOff") - } - if debugMatches { - println("long match") - } - - if s-coffsetLP < e.maxMatchOff && cv == load6432(src, coffsetLP) { - // Found a long match, at least 8 bytes. - prevMatch := e.matchlen(s+8, coffsetLP+8, src) + 8 - if prevMatch > matched { - matched = prevMatch - t = coffsetLP - } - if debugAsserts && s <= t { - panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) - } - if debugAsserts && s-t > e.maxMatchOff { - panic("s - t >e.maxMatchOff") - } - if debugMatches { - println("long match") - } - } - break - } - - // Check if we have a long match on prev. - if s-coffsetLP < e.maxMatchOff && cv == load6432(src, coffsetLP) { - // Found a long match, at least 8 bytes. - matched = e.matchlen(s+8, coffsetLP+8, src) + 8 - t = coffsetLP - if debugAsserts && s <= t { - panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) - } - if debugAsserts && s-t > e.maxMatchOff { - panic("s - t >e.maxMatchOff") - } - if debugMatches { - println("long match") - } - break - } - - coffsetS := candidateS.offset - e.cur - - // Check if we have a short match. - if s-coffsetS < e.maxMatchOff && uint32(cv) == candidateS.val { - // found a regular match - matched = e.matchlen(s+4, coffsetS+4, src) + 4 - - // See if we can find a long match at s+1 - const checkAt = 1 - cv := load6432(src, s+checkAt) - nextHashL = hashLen(cv, betterLongTableBits, betterLongLen) - candidateL = e.longTable[nextHashL] - coffsetL = candidateL.offset - e.cur - - // We can store it, since we have at least a 4 byte match. - e.longTable[nextHashL] = prevEntry{offset: s + checkAt + e.cur, prev: candidateL.offset} - if s-coffsetL < e.maxMatchOff && cv == load6432(src, coffsetL) { - // Found a long match, at least 8 bytes. - matchedNext := e.matchlen(s+8+checkAt, coffsetL+8, src) + 8 - if matchedNext > matched { - t = coffsetL - s += checkAt - matched = matchedNext - if debugMatches { - println("long match (after short)") - } - break - } - } - - // Check prev long... - coffsetL = candidateL.prev - e.cur - if s-coffsetL < e.maxMatchOff && cv == load6432(src, coffsetL) { - // Found a long match, at least 8 bytes. - matchedNext := e.matchlen(s+8+checkAt, coffsetL+8, src) + 8 - if matchedNext > matched { - t = coffsetL - s += checkAt - matched = matchedNext - if debugMatches { - println("prev long match (after short)") - } - break - } - } - t = coffsetS - if debugAsserts && s <= t { - panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) - } - if debugAsserts && s-t > e.maxMatchOff { - panic("s - t >e.maxMatchOff") - } - if debugAsserts && t < 0 { - panic("t<0") - } - if debugMatches { - println("short match") - } - break - } - - // No match found, move forward in input. - s += stepSize + ((s - nextEmit) >> (kSearchStrength - 1)) - if s >= sLimit { - break encodeLoop - } - cv = load6432(src, s) - } - - // Try to find a better match by searching for a long match at the end of the current best match - if s+matched < sLimit { - nextHashL := hashLen(load6432(src, s+matched), betterLongTableBits, betterLongLen) - cv := load3232(src, s) - candidateL := e.longTable[nextHashL] - coffsetL := candidateL.offset - e.cur - matched - if coffsetL >= 0 && coffsetL < s && s-coffsetL < e.maxMatchOff && cv == load3232(src, coffsetL) { - // Found a long match, at least 4 bytes. - matchedNext := e.matchlen(s+4, coffsetL+4, src) + 4 - if matchedNext > matched { - t = coffsetL - matched = matchedNext - if debugMatches { - println("long match at end-of-match") - } - } - } - - // Check prev long... - if true { - coffsetL = candidateL.prev - e.cur - matched - if coffsetL >= 0 && coffsetL < s && s-coffsetL < e.maxMatchOff && cv == load3232(src, coffsetL) { - // Found a long match, at least 4 bytes. - matchedNext := e.matchlen(s+4, coffsetL+4, src) + 4 - if matchedNext > matched { - t = coffsetL - matched = matchedNext - if debugMatches { - println("prev long match at end-of-match") - } - } - } - } - } - // A match has been found. Update recent offsets. - offset2 = offset1 - offset1 = s - t - - if debugAsserts && s <= t { - panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) - } - - if debugAsserts && canRepeat && int(offset1) > len(src) { - panic("invalid offset") - } - - // Extend the n-byte match as long as possible. - l := matched - - // Extend backwards - tMin := s - e.maxMatchOff - if tMin < 0 { - tMin = 0 - } - for t > tMin && s > nextEmit && src[t-1] == src[s-1] && l < maxMatchLength { - s-- - t-- - l++ - } - - // Write our sequence - var seq seq - seq.litLen = uint32(s - nextEmit) - seq.matchLen = uint32(l - zstdMinMatch) - if seq.litLen > 0 { - blk.literals = append(blk.literals, src[nextEmit:s]...) - } - seq.offset = uint32(s-t) + 3 - s += l - if debugSequences { - println("sequence", seq, "next s:", s) - } - blk.sequences = append(blk.sequences, seq) - nextEmit = s - if s >= sLimit { - break encodeLoop - } - - // Index match start+1 (long) -> s - 1 - index0 := s - l + 1 - for index0 < s-1 { - cv0 := load6432(src, index0) - cv1 := cv0 >> 8 - h0 := hashLen(cv0, betterLongTableBits, betterLongLen) - off := index0 + e.cur - e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset} - e.table[hashLen(cv1, betterShortTableBits, betterShortLen)] = tableEntry{offset: off + 1, val: uint32(cv1)} - index0 += 2 - } - - cv = load6432(src, s) - if !canRepeat { - continue - } - - // Check offset 2 - for { - o2 := s - offset2 - if load3232(src, o2) != uint32(cv) { - // Do regular search - break - } - - // Store this, since we have it. - nextHashS := hashLen(cv, betterShortTableBits, betterShortLen) - nextHashL := hashLen(cv, betterLongTableBits, betterLongLen) - - // We have at least 4 byte match. - // No need to check backwards. We come straight from a match - l := 4 + e.matchlen(s+4, o2+4, src) - - e.longTable[nextHashL] = prevEntry{offset: s + e.cur, prev: e.longTable[nextHashL].offset} - e.table[nextHashS] = tableEntry{offset: s + e.cur, val: uint32(cv)} - seq.matchLen = uint32(l) - zstdMinMatch - seq.litLen = 0 - - // Since litlen is always 0, this is offset 1. - seq.offset = 1 - s += l - nextEmit = s - if debugSequences { - println("sequence", seq, "next s:", s) - } - blk.sequences = append(blk.sequences, seq) - - // Swap offset 1 and 2. - offset1, offset2 = offset2, offset1 - if s >= sLimit { - // Finished - break encodeLoop - } - cv = load6432(src, s) - } - } - - if int(nextEmit) < len(src) { - blk.literals = append(blk.literals, src[nextEmit:]...) - blk.extraLits = len(src) - int(nextEmit) - } - blk.recentOffsets[0] = uint32(offset1) - blk.recentOffsets[1] = uint32(offset2) - if debugEncoder { - println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits) - } -} - -// EncodeNoHist will encode a block with no history and no following blocks. -// Most notable difference is that src will not be copied for history and -// we do not need to check for max match length. -func (e *betterFastEncoder) EncodeNoHist(blk *blockEnc, src []byte) { - e.ensureHist(len(src)) - e.Encode(blk, src) -} - -// Encode improves compression... -func (e *betterFastEncoderDict) Encode(blk *blockEnc, src []byte) { - const ( - // Input margin is the number of bytes we read (8) - // and the maximum we will read ahead (2) - inputMargin = 8 + 2 - minNonLiteralBlockSize = 16 - ) - - // Protect against e.cur wraparound. - for e.cur >= bufferReset { - if len(e.hist) == 0 { - for i := range e.table[:] { - e.table[i] = tableEntry{} - } - for i := range e.longTable[:] { - e.longTable[i] = prevEntry{} - } - e.cur = e.maxMatchOff - e.allDirty = true - break - } - // Shift down everything in the table that isn't already too far away. - minOff := e.cur + int32(len(e.hist)) - e.maxMatchOff - for i := range e.table[:] { - v := e.table[i].offset - if v < minOff { - v = 0 - } else { - v = v - e.cur + e.maxMatchOff - } - e.table[i].offset = v - } - for i := range e.longTable[:] { - v := e.longTable[i].offset - v2 := e.longTable[i].prev - if v < minOff { - v = 0 - v2 = 0 - } else { - v = v - e.cur + e.maxMatchOff - if v2 < minOff { - v2 = 0 - } else { - v2 = v2 - e.cur + e.maxMatchOff - } - } - e.longTable[i] = prevEntry{ - offset: v, - prev: v2, - } - } - e.allDirty = true - e.cur = e.maxMatchOff - break - } - - s := e.addBlock(src) - blk.size = len(src) - if len(src) < minNonLiteralBlockSize { - blk.extraLits = len(src) - blk.literals = blk.literals[:len(src)] - copy(blk.literals, src) - return - } - - // Override src - src = e.hist - sLimit := int32(len(src)) - inputMargin - // stepSize is the number of bytes to skip on every main loop iteration. - // It should be >= 1. - const stepSize = 1 - - const kSearchStrength = 9 - - // nextEmit is where in src the next emitLiteral should start from. - nextEmit := s - cv := load6432(src, s) - - // Relative offsets - offset1 := int32(blk.recentOffsets[0]) - offset2 := int32(blk.recentOffsets[1]) - - addLiterals := func(s *seq, until int32) { - if until == nextEmit { - return - } - blk.literals = append(blk.literals, src[nextEmit:until]...) - s.litLen = uint32(until - nextEmit) - } - if debugEncoder { - println("recent offsets:", blk.recentOffsets) - } - -encodeLoop: - for { - var t int32 - // We allow the encoder to optionally turn off repeat offsets across blocks - canRepeat := len(blk.sequences) > 2 - var matched int32 - - for { - if debugAsserts && canRepeat && offset1 == 0 { - panic("offset0 was 0") - } - - nextHashS := hashLen(cv, betterShortTableBits, betterShortLen) - nextHashL := hashLen(cv, betterLongTableBits, betterLongLen) - candidateL := e.longTable[nextHashL] - candidateS := e.table[nextHashS] - - const repOff = 1 - repIndex := s - offset1 + repOff - off := s + e.cur - e.longTable[nextHashL] = prevEntry{offset: off, prev: candidateL.offset} - e.markLongShardDirty(nextHashL) - e.table[nextHashS] = tableEntry{offset: off, val: uint32(cv)} - e.markShortShardDirty(nextHashS) - - if canRepeat { - if repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>(repOff*8)) { - // Consider history as well. - var seq seq - lenght := 4 + e.matchlen(s+4+repOff, repIndex+4, src) - - seq.matchLen = uint32(lenght - zstdMinMatch) - - // We might be able to match backwards. - // Extend as long as we can. - start := s + repOff - // We end the search early, so we don't risk 0 literals - // and have to do special offset treatment. - startLimit := nextEmit + 1 - - tMin := s - e.maxMatchOff - if tMin < 0 { - tMin = 0 - } - for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch-1 { - repIndex-- - start-- - seq.matchLen++ - } - addLiterals(&seq, start) - - // rep 0 - seq.offset = 1 - if debugSequences { - println("repeat sequence", seq, "next s:", s) - } - blk.sequences = append(blk.sequences, seq) - - // Index match start+1 (long) -> s - 1 - index0 := s + repOff - s += lenght + repOff - - nextEmit = s - if s >= sLimit { - if debugEncoder { - println("repeat ended", s, lenght) - - } - break encodeLoop - } - // Index skipped... - for index0 < s-1 { - cv0 := load6432(src, index0) - cv1 := cv0 >> 8 - h0 := hashLen(cv0, betterLongTableBits, betterLongLen) - off := index0 + e.cur - e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset} - e.markLongShardDirty(h0) - h1 := hashLen(cv1, betterShortTableBits, betterShortLen) - e.table[h1] = tableEntry{offset: off + 1, val: uint32(cv1)} - e.markShortShardDirty(h1) - index0 += 2 - } - cv = load6432(src, s) - continue - } - const repOff2 = 1 - - // We deviate from the reference encoder and also check offset 2. - // Still slower and not much better, so disabled. - // repIndex = s - offset2 + repOff2 - if false && repIndex >= 0 && load6432(src, repIndex) == load6432(src, s+repOff) { - // Consider history as well. - var seq seq - lenght := 8 + e.matchlen(s+8+repOff2, repIndex+8, src) - - seq.matchLen = uint32(lenght - zstdMinMatch) - - // We might be able to match backwards. - // Extend as long as we can. - start := s + repOff2 - // We end the search early, so we don't risk 0 literals - // and have to do special offset treatment. - startLimit := nextEmit + 1 - - tMin := s - e.maxMatchOff - if tMin < 0 { - tMin = 0 - } - for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch-1 { - repIndex-- - start-- - seq.matchLen++ - } - addLiterals(&seq, start) - - // rep 2 - seq.offset = 2 - if debugSequences { - println("repeat sequence 2", seq, "next s:", s) - } - blk.sequences = append(blk.sequences, seq) - - index0 := s + repOff2 - s += lenght + repOff2 - nextEmit = s - if s >= sLimit { - if debugEncoder { - println("repeat ended", s, lenght) - - } - break encodeLoop - } - - // Index skipped... - for index0 < s-1 { - cv0 := load6432(src, index0) - cv1 := cv0 >> 8 - h0 := hashLen(cv0, betterLongTableBits, betterLongLen) - off := index0 + e.cur - e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset} - e.markLongShardDirty(h0) - h1 := hashLen(cv1, betterShortTableBits, betterShortLen) - e.table[h1] = tableEntry{offset: off + 1, val: uint32(cv1)} - e.markShortShardDirty(h1) - index0 += 2 - } - cv = load6432(src, s) - // Swap offsets - offset1, offset2 = offset2, offset1 - continue - } - } - // Find the offsets of our two matches. - coffsetL := candidateL.offset - e.cur - coffsetLP := candidateL.prev - e.cur - - // Check if we have a long match. - if s-coffsetL < e.maxMatchOff && cv == load6432(src, coffsetL) { - // Found a long match, at least 8 bytes. - matched = e.matchlen(s+8, coffsetL+8, src) + 8 - t = coffsetL - if debugAsserts && s <= t { - panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) - } - if debugAsserts && s-t > e.maxMatchOff { - panic("s - t >e.maxMatchOff") - } - if debugMatches { - println("long match") - } - - if s-coffsetLP < e.maxMatchOff && cv == load6432(src, coffsetLP) { - // Found a long match, at least 8 bytes. - prevMatch := e.matchlen(s+8, coffsetLP+8, src) + 8 - if prevMatch > matched { - matched = prevMatch - t = coffsetLP - } - if debugAsserts && s <= t { - panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) - } - if debugAsserts && s-t > e.maxMatchOff { - panic("s - t >e.maxMatchOff") - } - if debugMatches { - println("long match") - } - } - break - } - - // Check if we have a long match on prev. - if s-coffsetLP < e.maxMatchOff && cv == load6432(src, coffsetLP) { - // Found a long match, at least 8 bytes. - matched = e.matchlen(s+8, coffsetLP+8, src) + 8 - t = coffsetLP - if debugAsserts && s <= t { - panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) - } - if debugAsserts && s-t > e.maxMatchOff { - panic("s - t >e.maxMatchOff") - } - if debugMatches { - println("long match") - } - break - } - - coffsetS := candidateS.offset - e.cur - - // Check if we have a short match. - if s-coffsetS < e.maxMatchOff && uint32(cv) == candidateS.val { - // found a regular match - matched = e.matchlen(s+4, coffsetS+4, src) + 4 - - // See if we can find a long match at s+1 - const checkAt = 1 - cv := load6432(src, s+checkAt) - nextHashL = hashLen(cv, betterLongTableBits, betterLongLen) - candidateL = e.longTable[nextHashL] - coffsetL = candidateL.offset - e.cur - - // We can store it, since we have at least a 4 byte match. - e.longTable[nextHashL] = prevEntry{offset: s + checkAt + e.cur, prev: candidateL.offset} - e.markLongShardDirty(nextHashL) - if s-coffsetL < e.maxMatchOff && cv == load6432(src, coffsetL) { - // Found a long match, at least 8 bytes. - matchedNext := e.matchlen(s+8+checkAt, coffsetL+8, src) + 8 - if matchedNext > matched { - t = coffsetL - s += checkAt - matched = matchedNext - if debugMatches { - println("long match (after short)") - } - break - } - } - - // Check prev long... - coffsetL = candidateL.prev - e.cur - if s-coffsetL < e.maxMatchOff && cv == load6432(src, coffsetL) { - // Found a long match, at least 8 bytes. - matchedNext := e.matchlen(s+8+checkAt, coffsetL+8, src) + 8 - if matchedNext > matched { - t = coffsetL - s += checkAt - matched = matchedNext - if debugMatches { - println("prev long match (after short)") - } - break - } - } - t = coffsetS - if debugAsserts && s <= t { - panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) - } - if debugAsserts && s-t > e.maxMatchOff { - panic("s - t >e.maxMatchOff") - } - if debugAsserts && t < 0 { - panic("t<0") - } - if debugMatches { - println("short match") - } - break - } - - // No match found, move forward in input. - s += stepSize + ((s - nextEmit) >> (kSearchStrength - 1)) - if s >= sLimit { - break encodeLoop - } - cv = load6432(src, s) - } - // Try to find a better match by searching for a long match at the end of the current best match - if s+matched < sLimit { - nextHashL := hashLen(load6432(src, s+matched), betterLongTableBits, betterLongLen) - cv := load3232(src, s) - candidateL := e.longTable[nextHashL] - coffsetL := candidateL.offset - e.cur - matched - if coffsetL >= 0 && coffsetL < s && s-coffsetL < e.maxMatchOff && cv == load3232(src, coffsetL) { - // Found a long match, at least 4 bytes. - matchedNext := e.matchlen(s+4, coffsetL+4, src) + 4 - if matchedNext > matched { - t = coffsetL - matched = matchedNext - if debugMatches { - println("long match at end-of-match") - } - } - } - - // Check prev long... - if true { - coffsetL = candidateL.prev - e.cur - matched - if coffsetL >= 0 && coffsetL < s && s-coffsetL < e.maxMatchOff && cv == load3232(src, coffsetL) { - // Found a long match, at least 4 bytes. - matchedNext := e.matchlen(s+4, coffsetL+4, src) + 4 - if matchedNext > matched { - t = coffsetL - matched = matchedNext - if debugMatches { - println("prev long match at end-of-match") - } - } - } - } - } - // A match has been found. Update recent offsets. - offset2 = offset1 - offset1 = s - t - - if debugAsserts && s <= t { - panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) - } - - if debugAsserts && canRepeat && int(offset1) > len(src) { - panic("invalid offset") - } - - // Extend the n-byte match as long as possible. - l := matched - - // Extend backwards - tMin := s - e.maxMatchOff - if tMin < 0 { - tMin = 0 - } - for t > tMin && s > nextEmit && src[t-1] == src[s-1] && l < maxMatchLength { - s-- - t-- - l++ - } - - // Write our sequence - var seq seq - seq.litLen = uint32(s - nextEmit) - seq.matchLen = uint32(l - zstdMinMatch) - if seq.litLen > 0 { - blk.literals = append(blk.literals, src[nextEmit:s]...) - } - seq.offset = uint32(s-t) + 3 - s += l - if debugSequences { - println("sequence", seq, "next s:", s) - } - blk.sequences = append(blk.sequences, seq) - nextEmit = s - if s >= sLimit { - break encodeLoop - } - - // Index match start+1 (long) -> s - 1 - index0 := s - l + 1 - for index0 < s-1 { - cv0 := load6432(src, index0) - cv1 := cv0 >> 8 - h0 := hashLen(cv0, betterLongTableBits, betterLongLen) - off := index0 + e.cur - e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset} - e.markLongShardDirty(h0) - h1 := hashLen(cv1, betterShortTableBits, betterShortLen) - e.table[h1] = tableEntry{offset: off + 1, val: uint32(cv1)} - e.markShortShardDirty(h1) - index0 += 2 - } - - cv = load6432(src, s) - if !canRepeat { - continue - } - - // Check offset 2 - for { - o2 := s - offset2 - if load3232(src, o2) != uint32(cv) { - // Do regular search - break - } - - // Store this, since we have it. - nextHashS := hashLen(cv, betterShortTableBits, betterShortLen) - nextHashL := hashLen(cv, betterLongTableBits, betterLongLen) - - // We have at least 4 byte match. - // No need to check backwards. We come straight from a match - l := 4 + e.matchlen(s+4, o2+4, src) - - e.longTable[nextHashL] = prevEntry{offset: s + e.cur, prev: e.longTable[nextHashL].offset} - e.markLongShardDirty(nextHashL) - e.table[nextHashS] = tableEntry{offset: s + e.cur, val: uint32(cv)} - e.markShortShardDirty(nextHashS) - seq.matchLen = uint32(l) - zstdMinMatch - seq.litLen = 0 - - // Since litlen is always 0, this is offset 1. - seq.offset = 1 - s += l - nextEmit = s - if debugSequences { - println("sequence", seq, "next s:", s) - } - blk.sequences = append(blk.sequences, seq) - - // Swap offset 1 and 2. - offset1, offset2 = offset2, offset1 - if s >= sLimit { - // Finished - break encodeLoop - } - cv = load6432(src, s) - } - } - - if int(nextEmit) < len(src) { - blk.literals = append(blk.literals, src[nextEmit:]...) - blk.extraLits = len(src) - int(nextEmit) - } - blk.recentOffsets[0] = uint32(offset1) - blk.recentOffsets[1] = uint32(offset2) - if debugEncoder { - println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits) - } -} - -// ResetDict will reset and set a dictionary if not nil -func (e *betterFastEncoder) Reset(d *dict, singleBlock bool) { - e.resetBase(d, singleBlock) - if d != nil { - panic("betterFastEncoder: Reset with dict") - } -} - -// ResetDict will reset and set a dictionary if not nil -func (e *betterFastEncoderDict) Reset(d *dict, singleBlock bool) { - e.resetBase(d, singleBlock) - if d == nil { - return - } - // Init or copy dict table - if len(e.dictTable) != len(e.table) || d.id != e.lastDictID { - if len(e.dictTable) != len(e.table) { - e.dictTable = make([]tableEntry, len(e.table)) - } - end := int32(len(d.content)) - 8 + e.maxMatchOff - for i := e.maxMatchOff; i < end; i += 4 { - const hashLog = betterShortTableBits - - cv := load6432(d.content, i-e.maxMatchOff) - nextHash := hashLen(cv, hashLog, betterShortLen) // 0 -> 4 - nextHash1 := hashLen(cv>>8, hashLog, betterShortLen) // 1 -> 5 - nextHash2 := hashLen(cv>>16, hashLog, betterShortLen) // 2 -> 6 - nextHash3 := hashLen(cv>>24, hashLog, betterShortLen) // 3 -> 7 - e.dictTable[nextHash] = tableEntry{ - val: uint32(cv), - offset: i, - } - e.dictTable[nextHash1] = tableEntry{ - val: uint32(cv >> 8), - offset: i + 1, - } - e.dictTable[nextHash2] = tableEntry{ - val: uint32(cv >> 16), - offset: i + 2, - } - e.dictTable[nextHash3] = tableEntry{ - val: uint32(cv >> 24), - offset: i + 3, - } - } - e.lastDictID = d.id - e.allDirty = true - } - - // Init or copy dict table - if len(e.dictLongTable) != len(e.longTable) || d.id != e.lastDictID { - if len(e.dictLongTable) != len(e.longTable) { - e.dictLongTable = make([]prevEntry, len(e.longTable)) - } - if len(d.content) >= 8 { - cv := load6432(d.content, 0) - h := hashLen(cv, betterLongTableBits, betterLongLen) - e.dictLongTable[h] = prevEntry{ - offset: e.maxMatchOff, - prev: e.dictLongTable[h].offset, - } - - end := int32(len(d.content)) - 8 + e.maxMatchOff - off := 8 // First to read - for i := e.maxMatchOff + 1; i < end; i++ { - cv = cv>>8 | (uint64(d.content[off]) << 56) - h := hashLen(cv, betterLongTableBits, betterLongLen) - e.dictLongTable[h] = prevEntry{ - offset: i, - prev: e.dictLongTable[h].offset, - } - off++ - } - } - e.lastDictID = d.id - e.allDirty = true - } - - // Reset table to initial state - { - dirtyShardCnt := 0 - if !e.allDirty { - for i := range e.shortTableShardDirty { - if e.shortTableShardDirty[i] { - dirtyShardCnt++ - } - } - } - const shardCnt = betterShortTableShardCnt - const shardSize = betterShortTableShardSize - if e.allDirty || dirtyShardCnt > shardCnt*4/6 { - copy(e.table[:], e.dictTable) - for i := range e.shortTableShardDirty { - e.shortTableShardDirty[i] = false - } - } else { - for i := range e.shortTableShardDirty { - if !e.shortTableShardDirty[i] { - continue - } - - copy(e.table[i*shardSize:(i+1)*shardSize], e.dictTable[i*shardSize:(i+1)*shardSize]) - e.shortTableShardDirty[i] = false - } - } - } - { - dirtyShardCnt := 0 - if !e.allDirty { - for i := range e.shortTableShardDirty { - if e.shortTableShardDirty[i] { - dirtyShardCnt++ - } - } - } - const shardCnt = betterLongTableShardCnt - const shardSize = betterLongTableShardSize - if e.allDirty || dirtyShardCnt > shardCnt*4/6 { - copy(e.longTable[:], e.dictLongTable) - for i := range e.longTableShardDirty { - e.longTableShardDirty[i] = false - } - } else { - for i := range e.longTableShardDirty { - if !e.longTableShardDirty[i] { - continue - } - - copy(e.longTable[i*shardSize:(i+1)*shardSize], e.dictLongTable[i*shardSize:(i+1)*shardSize]) - e.longTableShardDirty[i] = false - } - } - } - e.cur = e.maxMatchOff - e.allDirty = false -} - -func (e *betterFastEncoderDict) markLongShardDirty(entryNum uint32) { - e.longTableShardDirty[entryNum/betterLongTableShardSize] = true -} - -func (e *betterFastEncoderDict) markShortShardDirty(entryNum uint32) { - e.shortTableShardDirty[entryNum/betterShortTableShardSize] = true -} diff --git a/src/vendor/github.com/klauspost/compress/zstd/enc_dfast.go b/src/vendor/github.com/klauspost/compress/zstd/enc_dfast.go deleted file mode 100644 index d6b310424..000000000 --- a/src/vendor/github.com/klauspost/compress/zstd/enc_dfast.go +++ /dev/null @@ -1,1124 +0,0 @@ -// Copyright 2019+ Klaus Post. All rights reserved. -// License information can be found in the LICENSE file. -// Based on work by Yann Collet, released under BSD License. - -package zstd - -import "fmt" - -const ( - dFastLongTableBits = 17 // Bits used in the long match table - dFastLongTableSize = 1 << dFastLongTableBits // Size of the table - dFastLongTableMask = dFastLongTableSize - 1 // Mask for table indices. Redundant, but can eliminate bounds checks. - dFastLongLen = 8 // Bytes used for table hash - - dLongTableShardCnt = 1 << (dFastLongTableBits - dictShardBits) // Number of shards in the table - dLongTableShardSize = dFastLongTableSize / tableShardCnt // Size of an individual shard - - dFastShortTableBits = tableBits // Bits used in the short match table - dFastShortTableSize = 1 << dFastShortTableBits // Size of the table - dFastShortTableMask = dFastShortTableSize - 1 // Mask for table indices. Redundant, but can eliminate bounds checks. - dFastShortLen = 5 // Bytes used for table hash - -) - -type doubleFastEncoder struct { - fastEncoder - longTable [dFastLongTableSize]tableEntry -} - -type doubleFastEncoderDict struct { - fastEncoderDict - longTable [dFastLongTableSize]tableEntry - dictLongTable []tableEntry - longTableShardDirty [dLongTableShardCnt]bool -} - -// Encode mimmics functionality in zstd_dfast.c -func (e *doubleFastEncoder) Encode(blk *blockEnc, src []byte) { - const ( - // Input margin is the number of bytes we read (8) - // and the maximum we will read ahead (2) - inputMargin = 8 + 2 - minNonLiteralBlockSize = 16 - ) - - // Protect against e.cur wraparound. - for e.cur >= bufferReset { - if len(e.hist) == 0 { - for i := range e.table[:] { - e.table[i] = tableEntry{} - } - for i := range e.longTable[:] { - e.longTable[i] = tableEntry{} - } - e.cur = e.maxMatchOff - break - } - // Shift down everything in the table that isn't already too far away. - minOff := e.cur + int32(len(e.hist)) - e.maxMatchOff - for i := range e.table[:] { - v := e.table[i].offset - if v < minOff { - v = 0 - } else { - v = v - e.cur + e.maxMatchOff - } - e.table[i].offset = v - } - for i := range e.longTable[:] { - v := e.longTable[i].offset - if v < minOff { - v = 0 - } else { - v = v - e.cur + e.maxMatchOff - } - e.longTable[i].offset = v - } - e.cur = e.maxMatchOff - break - } - - s := e.addBlock(src) - blk.size = len(src) - if len(src) < minNonLiteralBlockSize { - blk.extraLits = len(src) - blk.literals = blk.literals[:len(src)] - copy(blk.literals, src) - return - } - - // Override src - src = e.hist - sLimit := int32(len(src)) - inputMargin - // stepSize is the number of bytes to skip on every main loop iteration. - // It should be >= 1. - const stepSize = 1 - - const kSearchStrength = 8 - - // nextEmit is where in src the next emitLiteral should start from. - nextEmit := s - cv := load6432(src, s) - - // Relative offsets - offset1 := int32(blk.recentOffsets[0]) - offset2 := int32(blk.recentOffsets[1]) - - addLiterals := func(s *seq, until int32) { - if until == nextEmit { - return - } - blk.literals = append(blk.literals, src[nextEmit:until]...) - s.litLen = uint32(until - nextEmit) - } - if debugEncoder { - println("recent offsets:", blk.recentOffsets) - } - -encodeLoop: - for { - var t int32 - // We allow the encoder to optionally turn off repeat offsets across blocks - canRepeat := len(blk.sequences) > 2 - - for { - if debugAsserts && canRepeat && offset1 == 0 { - panic("offset0 was 0") - } - - nextHashS := hashLen(cv, dFastShortTableBits, dFastShortLen) - nextHashL := hashLen(cv, dFastLongTableBits, dFastLongLen) - candidateL := e.longTable[nextHashL] - candidateS := e.table[nextHashS] - - const repOff = 1 - repIndex := s - offset1 + repOff - entry := tableEntry{offset: s + e.cur, val: uint32(cv)} - e.longTable[nextHashL] = entry - e.table[nextHashS] = entry - - if canRepeat { - if repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>(repOff*8)) { - // Consider history as well. - var seq seq - lenght := 4 + e.matchlen(s+4+repOff, repIndex+4, src) - - seq.matchLen = uint32(lenght - zstdMinMatch) - - // We might be able to match backwards. - // Extend as long as we can. - start := s + repOff - // We end the search early, so we don't risk 0 literals - // and have to do special offset treatment. - startLimit := nextEmit + 1 - - tMin := s - e.maxMatchOff - if tMin < 0 { - tMin = 0 - } - for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch-1 { - repIndex-- - start-- - seq.matchLen++ - } - addLiterals(&seq, start) - - // rep 0 - seq.offset = 1 - if debugSequences { - println("repeat sequence", seq, "next s:", s) - } - blk.sequences = append(blk.sequences, seq) - s += lenght + repOff - nextEmit = s - if s >= sLimit { - if debugEncoder { - println("repeat ended", s, lenght) - - } - break encodeLoop - } - cv = load6432(src, s) - continue - } - } - // Find the offsets of our two matches. - coffsetL := s - (candidateL.offset - e.cur) - coffsetS := s - (candidateS.offset - e.cur) - - // Check if we have a long match. - if coffsetL < e.maxMatchOff && uint32(cv) == candidateL.val { - // Found a long match, likely at least 8 bytes. - // Reference encoder checks all 8 bytes, we only check 4, - // but the likelihood of both the first 4 bytes and the hash matching should be enough. - t = candidateL.offset - e.cur - if debugAsserts && s <= t { - panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) - } - if debugAsserts && s-t > e.maxMatchOff { - panic("s - t >e.maxMatchOff") - } - if debugMatches { - println("long match") - } - break - } - - // Check if we have a short match. - if coffsetS < e.maxMatchOff && uint32(cv) == candidateS.val { - // found a regular match - // See if we can find a long match at s+1 - const checkAt = 1 - cv := load6432(src, s+checkAt) - nextHashL = hashLen(cv, dFastLongTableBits, dFastLongLen) - candidateL = e.longTable[nextHashL] - coffsetL = s - (candidateL.offset - e.cur) + checkAt - - // We can store it, since we have at least a 4 byte match. - e.longTable[nextHashL] = tableEntry{offset: s + checkAt + e.cur, val: uint32(cv)} - if coffsetL < e.maxMatchOff && uint32(cv) == candidateL.val { - // Found a long match, likely at least 8 bytes. - // Reference encoder checks all 8 bytes, we only check 4, - // but the likelihood of both the first 4 bytes and the hash matching should be enough. - t = candidateL.offset - e.cur - s += checkAt - if debugMatches { - println("long match (after short)") - } - break - } - - t = candidateS.offset - e.cur - if debugAsserts && s <= t { - panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) - } - if debugAsserts && s-t > e.maxMatchOff { - panic("s - t >e.maxMatchOff") - } - if debugAsserts && t < 0 { - panic("t<0") - } - if debugMatches { - println("short match") - } - break - } - - // No match found, move forward in input. - s += stepSize + ((s - nextEmit) >> (kSearchStrength - 1)) - if s >= sLimit { - break encodeLoop - } - cv = load6432(src, s) - } - - // A 4-byte match has been found. Update recent offsets. - // We'll later see if more than 4 bytes. - offset2 = offset1 - offset1 = s - t - - if debugAsserts && s <= t { - panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) - } - - if debugAsserts && canRepeat && int(offset1) > len(src) { - panic("invalid offset") - } - - // Extend the 4-byte match as long as possible. - l := e.matchlen(s+4, t+4, src) + 4 - - // Extend backwards - tMin := s - e.maxMatchOff - if tMin < 0 { - tMin = 0 - } - for t > tMin && s > nextEmit && src[t-1] == src[s-1] && l < maxMatchLength { - s-- - t-- - l++ - } - - // Write our sequence - var seq seq - seq.litLen = uint32(s - nextEmit) - seq.matchLen = uint32(l - zstdMinMatch) - if seq.litLen > 0 { - blk.literals = append(blk.literals, src[nextEmit:s]...) - } - seq.offset = uint32(s-t) + 3 - s += l - if debugSequences { - println("sequence", seq, "next s:", s) - } - blk.sequences = append(blk.sequences, seq) - nextEmit = s - if s >= sLimit { - break encodeLoop - } - - // Index match start+1 (long) and start+2 (short) - index0 := s - l + 1 - // Index match end-2 (long) and end-1 (short) - index1 := s - 2 - - cv0 := load6432(src, index0) - cv1 := load6432(src, index1) - te0 := tableEntry{offset: index0 + e.cur, val: uint32(cv0)} - te1 := tableEntry{offset: index1 + e.cur, val: uint32(cv1)} - e.longTable[hashLen(cv0, dFastLongTableBits, dFastLongLen)] = te0 - e.longTable[hashLen(cv1, dFastLongTableBits, dFastLongLen)] = te1 - cv0 >>= 8 - cv1 >>= 8 - te0.offset++ - te1.offset++ - te0.val = uint32(cv0) - te1.val = uint32(cv1) - e.table[hashLen(cv0, dFastShortTableBits, dFastShortLen)] = te0 - e.table[hashLen(cv1, dFastShortTableBits, dFastShortLen)] = te1 - - cv = load6432(src, s) - - if !canRepeat { - continue - } - - // Check offset 2 - for { - o2 := s - offset2 - if load3232(src, o2) != uint32(cv) { - // Do regular search - break - } - - // Store this, since we have it. - nextHashS := hashLen(cv, dFastShortTableBits, dFastShortLen) - nextHashL := hashLen(cv, dFastLongTableBits, dFastLongLen) - - // We have at least 4 byte match. - // No need to check backwards. We come straight from a match - l := 4 + e.matchlen(s+4, o2+4, src) - - entry := tableEntry{offset: s + e.cur, val: uint32(cv)} - e.longTable[nextHashL] = entry - e.table[nextHashS] = entry - seq.matchLen = uint32(l) - zstdMinMatch - seq.litLen = 0 - - // Since litlen is always 0, this is offset 1. - seq.offset = 1 - s += l - nextEmit = s - if debugSequences { - println("sequence", seq, "next s:", s) - } - blk.sequences = append(blk.sequences, seq) - - // Swap offset 1 and 2. - offset1, offset2 = offset2, offset1 - if s >= sLimit { - // Finished - break encodeLoop - } - cv = load6432(src, s) - } - } - - if int(nextEmit) < len(src) { - blk.literals = append(blk.literals, src[nextEmit:]...) - blk.extraLits = len(src) - int(nextEmit) - } - blk.recentOffsets[0] = uint32(offset1) - blk.recentOffsets[1] = uint32(offset2) - if debugEncoder { - println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits) - } -} - -// EncodeNoHist will encode a block with no history and no following blocks. -// Most notable difference is that src will not be copied for history and -// we do not need to check for max match length. -func (e *doubleFastEncoder) EncodeNoHist(blk *blockEnc, src []byte) { - const ( - // Input margin is the number of bytes we read (8) - // and the maximum we will read ahead (2) - inputMargin = 8 + 2 - minNonLiteralBlockSize = 16 - ) - - // Protect against e.cur wraparound. - if e.cur >= bufferReset { - for i := range e.table[:] { - e.table[i] = tableEntry{} - } - for i := range e.longTable[:] { - e.longTable[i] = tableEntry{} - } - e.cur = e.maxMatchOff - } - - s := int32(0) - blk.size = len(src) - if len(src) < minNonLiteralBlockSize { - blk.extraLits = len(src) - blk.literals = blk.literals[:len(src)] - copy(blk.literals, src) - return - } - - // Override src - sLimit := int32(len(src)) - inputMargin - // stepSize is the number of bytes to skip on every main loop iteration. - // It should be >= 1. - const stepSize = 1 - - const kSearchStrength = 8 - - // nextEmit is where in src the next emitLiteral should start from. - nextEmit := s - cv := load6432(src, s) - - // Relative offsets - offset1 := int32(blk.recentOffsets[0]) - offset2 := int32(blk.recentOffsets[1]) - - addLiterals := func(s *seq, until int32) { - if until == nextEmit { - return - } - blk.literals = append(blk.literals, src[nextEmit:until]...) - s.litLen = uint32(until - nextEmit) - } - if debugEncoder { - println("recent offsets:", blk.recentOffsets) - } - -encodeLoop: - for { - var t int32 - for { - - nextHashS := hashLen(cv, dFastShortTableBits, dFastShortLen) - nextHashL := hashLen(cv, dFastLongTableBits, dFastLongLen) - candidateL := e.longTable[nextHashL] - candidateS := e.table[nextHashS] - - const repOff = 1 - repIndex := s - offset1 + repOff - entry := tableEntry{offset: s + e.cur, val: uint32(cv)} - e.longTable[nextHashL] = entry - e.table[nextHashS] = entry - - if len(blk.sequences) > 2 { - if load3232(src, repIndex) == uint32(cv>>(repOff*8)) { - // Consider history as well. - var seq seq - //length := 4 + e.matchlen(s+4+repOff, repIndex+4, src) - length := 4 + int32(matchLen(src[s+4+repOff:], src[repIndex+4:])) - - seq.matchLen = uint32(length - zstdMinMatch) - - // We might be able to match backwards. - // Extend as long as we can. - start := s + repOff - // We end the search early, so we don't risk 0 literals - // and have to do special offset treatment. - startLimit := nextEmit + 1 - - tMin := s - e.maxMatchOff - if tMin < 0 { - tMin = 0 - } - for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] { - repIndex-- - start-- - seq.matchLen++ - } - addLiterals(&seq, start) - - // rep 0 - seq.offset = 1 - if debugSequences { - println("repeat sequence", seq, "next s:", s) - } - blk.sequences = append(blk.sequences, seq) - s += length + repOff - nextEmit = s - if s >= sLimit { - if debugEncoder { - println("repeat ended", s, length) - - } - break encodeLoop - } - cv = load6432(src, s) - continue - } - } - // Find the offsets of our two matches. - coffsetL := s - (candidateL.offset - e.cur) - coffsetS := s - (candidateS.offset - e.cur) - - // Check if we have a long match. - if coffsetL < e.maxMatchOff && uint32(cv) == candidateL.val { - // Found a long match, likely at least 8 bytes. - // Reference encoder checks all 8 bytes, we only check 4, - // but the likelihood of both the first 4 bytes and the hash matching should be enough. - t = candidateL.offset - e.cur - if debugAsserts && s <= t { - panic(fmt.Sprintf("s (%d) <= t (%d). cur: %d", s, t, e.cur)) - } - if debugAsserts && s-t > e.maxMatchOff { - panic("s - t >e.maxMatchOff") - } - if debugMatches { - println("long match") - } - break - } - - // Check if we have a short match. - if coffsetS < e.maxMatchOff && uint32(cv) == candidateS.val { - // found a regular match - // See if we can find a long match at s+1 - const checkAt = 1 - cv := load6432(src, s+checkAt) - nextHashL = hashLen(cv, dFastLongTableBits, dFastLongLen) - candidateL = e.longTable[nextHashL] - coffsetL = s - (candidateL.offset - e.cur) + checkAt - - // We can store it, since we have at least a 4 byte match. - e.longTable[nextHashL] = tableEntry{offset: s + checkAt + e.cur, val: uint32(cv)} - if coffsetL < e.maxMatchOff && uint32(cv) == candidateL.val { - // Found a long match, likely at least 8 bytes. - // Reference encoder checks all 8 bytes, we only check 4, - // but the likelihood of both the first 4 bytes and the hash matching should be enough. - t = candidateL.offset - e.cur - s += checkAt - if debugMatches { - println("long match (after short)") - } - break - } - - t = candidateS.offset - e.cur - if debugAsserts && s <= t { - panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) - } - if debugAsserts && s-t > e.maxMatchOff { - panic("s - t >e.maxMatchOff") - } - if debugAsserts && t < 0 { - panic("t<0") - } - if debugMatches { - println("short match") - } - break - } - - // No match found, move forward in input. - s += stepSize + ((s - nextEmit) >> (kSearchStrength - 1)) - if s >= sLimit { - break encodeLoop - } - cv = load6432(src, s) - } - - // A 4-byte match has been found. Update recent offsets. - // We'll later see if more than 4 bytes. - offset2 = offset1 - offset1 = s - t - - if debugAsserts && s <= t { - panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) - } - - // Extend the 4-byte match as long as possible. - //l := e.matchlen(s+4, t+4, src) + 4 - l := int32(matchLen(src[s+4:], src[t+4:])) + 4 - - // Extend backwards - tMin := s - e.maxMatchOff - if tMin < 0 { - tMin = 0 - } - for t > tMin && s > nextEmit && src[t-1] == src[s-1] { - s-- - t-- - l++ - } - - // Write our sequence - var seq seq - seq.litLen = uint32(s - nextEmit) - seq.matchLen = uint32(l - zstdMinMatch) - if seq.litLen > 0 { - blk.literals = append(blk.literals, src[nextEmit:s]...) - } - seq.offset = uint32(s-t) + 3 - s += l - if debugSequences { - println("sequence", seq, "next s:", s) - } - blk.sequences = append(blk.sequences, seq) - nextEmit = s - if s >= sLimit { - break encodeLoop - } - - // Index match start+1 (long) and start+2 (short) - index0 := s - l + 1 - // Index match end-2 (long) and end-1 (short) - index1 := s - 2 - - cv0 := load6432(src, index0) - cv1 := load6432(src, index1) - te0 := tableEntry{offset: index0 + e.cur, val: uint32(cv0)} - te1 := tableEntry{offset: index1 + e.cur, val: uint32(cv1)} - e.longTable[hashLen(cv0, dFastLongTableBits, dFastLongLen)] = te0 - e.longTable[hashLen(cv1, dFastLongTableBits, dFastLongLen)] = te1 - cv0 >>= 8 - cv1 >>= 8 - te0.offset++ - te1.offset++ - te0.val = uint32(cv0) - te1.val = uint32(cv1) - e.table[hashLen(cv0, dFastShortTableBits, dFastShortLen)] = te0 - e.table[hashLen(cv1, dFastShortTableBits, dFastShortLen)] = te1 - - cv = load6432(src, s) - - if len(blk.sequences) <= 2 { - continue - } - - // Check offset 2 - for { - o2 := s - offset2 - if load3232(src, o2) != uint32(cv) { - // Do regular search - break - } - - // Store this, since we have it. - nextHashS := hashLen(cv1>>8, dFastShortTableBits, dFastShortLen) - nextHashL := hashLen(cv, dFastLongTableBits, dFastLongLen) - - // We have at least 4 byte match. - // No need to check backwards. We come straight from a match - //l := 4 + e.matchlen(s+4, o2+4, src) - l := 4 + int32(matchLen(src[s+4:], src[o2+4:])) - - entry := tableEntry{offset: s + e.cur, val: uint32(cv)} - e.longTable[nextHashL] = entry - e.table[nextHashS] = entry - seq.matchLen = uint32(l) - zstdMinMatch - seq.litLen = 0 - - // Since litlen is always 0, this is offset 1. - seq.offset = 1 - s += l - nextEmit = s - if debugSequences { - println("sequence", seq, "next s:", s) - } - blk.sequences = append(blk.sequences, seq) - - // Swap offset 1 and 2. - offset1, offset2 = offset2, offset1 - if s >= sLimit { - // Finished - break encodeLoop - } - cv = load6432(src, s) - } - } - - if int(nextEmit) < len(src) { - blk.literals = append(blk.literals, src[nextEmit:]...) - blk.extraLits = len(src) - int(nextEmit) - } - if debugEncoder { - println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits) - } - - // We do not store history, so we must offset e.cur to avoid false matches for next user. - if e.cur < bufferReset { - e.cur += int32(len(src)) - } -} - -// Encode will encode the content, with a dictionary if initialized for it. -func (e *doubleFastEncoderDict) Encode(blk *blockEnc, src []byte) { - const ( - // Input margin is the number of bytes we read (8) - // and the maximum we will read ahead (2) - inputMargin = 8 + 2 - minNonLiteralBlockSize = 16 - ) - - // Protect against e.cur wraparound. - for e.cur >= bufferReset { - if len(e.hist) == 0 { - for i := range e.table[:] { - e.table[i] = tableEntry{} - } - for i := range e.longTable[:] { - e.longTable[i] = tableEntry{} - } - e.markAllShardsDirty() - e.cur = e.maxMatchOff - break - } - // Shift down everything in the table that isn't already too far away. - minOff := e.cur + int32(len(e.hist)) - e.maxMatchOff - for i := range e.table[:] { - v := e.table[i].offset - if v < minOff { - v = 0 - } else { - v = v - e.cur + e.maxMatchOff - } - e.table[i].offset = v - } - for i := range e.longTable[:] { - v := e.longTable[i].offset - if v < minOff { - v = 0 - } else { - v = v - e.cur + e.maxMatchOff - } - e.longTable[i].offset = v - } - e.markAllShardsDirty() - e.cur = e.maxMatchOff - break - } - - s := e.addBlock(src) - blk.size = len(src) - if len(src) < minNonLiteralBlockSize { - blk.extraLits = len(src) - blk.literals = blk.literals[:len(src)] - copy(blk.literals, src) - return - } - - // Override src - src = e.hist - sLimit := int32(len(src)) - inputMargin - // stepSize is the number of bytes to skip on every main loop iteration. - // It should be >= 1. - const stepSize = 1 - - const kSearchStrength = 8 - - // nextEmit is where in src the next emitLiteral should start from. - nextEmit := s - cv := load6432(src, s) - - // Relative offsets - offset1 := int32(blk.recentOffsets[0]) - offset2 := int32(blk.recentOffsets[1]) - - addLiterals := func(s *seq, until int32) { - if until == nextEmit { - return - } - blk.literals = append(blk.literals, src[nextEmit:until]...) - s.litLen = uint32(until - nextEmit) - } - if debugEncoder { - println("recent offsets:", blk.recentOffsets) - } - -encodeLoop: - for { - var t int32 - // We allow the encoder to optionally turn off repeat offsets across blocks - canRepeat := len(blk.sequences) > 2 - - for { - if debugAsserts && canRepeat && offset1 == 0 { - panic("offset0 was 0") - } - - nextHashS := hashLen(cv, dFastShortTableBits, dFastShortLen) - nextHashL := hashLen(cv, dFastLongTableBits, dFastLongLen) - candidateL := e.longTable[nextHashL] - candidateS := e.table[nextHashS] - - const repOff = 1 - repIndex := s - offset1 + repOff - entry := tableEntry{offset: s + e.cur, val: uint32(cv)} - e.longTable[nextHashL] = entry - e.markLongShardDirty(nextHashL) - e.table[nextHashS] = entry - e.markShardDirty(nextHashS) - - if canRepeat { - if repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>(repOff*8)) { - // Consider history as well. - var seq seq - lenght := 4 + e.matchlen(s+4+repOff, repIndex+4, src) - - seq.matchLen = uint32(lenght - zstdMinMatch) - - // We might be able to match backwards. - // Extend as long as we can. - start := s + repOff - // We end the search early, so we don't risk 0 literals - // and have to do special offset treatment. - startLimit := nextEmit + 1 - - tMin := s - e.maxMatchOff - if tMin < 0 { - tMin = 0 - } - for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch-1 { - repIndex-- - start-- - seq.matchLen++ - } - addLiterals(&seq, start) - - // rep 0 - seq.offset = 1 - if debugSequences { - println("repeat sequence", seq, "next s:", s) - } - blk.sequences = append(blk.sequences, seq) - s += lenght + repOff - nextEmit = s - if s >= sLimit { - if debugEncoder { - println("repeat ended", s, lenght) - - } - break encodeLoop - } - cv = load6432(src, s) - continue - } - } - // Find the offsets of our two matches. - coffsetL := s - (candidateL.offset - e.cur) - coffsetS := s - (candidateS.offset - e.cur) - - // Check if we have a long match. - if coffsetL < e.maxMatchOff && uint32(cv) == candidateL.val { - // Found a long match, likely at least 8 bytes. - // Reference encoder checks all 8 bytes, we only check 4, - // but the likelihood of both the first 4 bytes and the hash matching should be enough. - t = candidateL.offset - e.cur - if debugAsserts && s <= t { - panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) - } - if debugAsserts && s-t > e.maxMatchOff { - panic("s - t >e.maxMatchOff") - } - if debugMatches { - println("long match") - } - break - } - - // Check if we have a short match. - if coffsetS < e.maxMatchOff && uint32(cv) == candidateS.val { - // found a regular match - // See if we can find a long match at s+1 - const checkAt = 1 - cv := load6432(src, s+checkAt) - nextHashL = hashLen(cv, dFastLongTableBits, dFastLongLen) - candidateL = e.longTable[nextHashL] - coffsetL = s - (candidateL.offset - e.cur) + checkAt - - // We can store it, since we have at least a 4 byte match. - e.longTable[nextHashL] = tableEntry{offset: s + checkAt + e.cur, val: uint32(cv)} - e.markLongShardDirty(nextHashL) - if coffsetL < e.maxMatchOff && uint32(cv) == candidateL.val { - // Found a long match, likely at least 8 bytes. - // Reference encoder checks all 8 bytes, we only check 4, - // but the likelihood of both the first 4 bytes and the hash matching should be enough. - t = candidateL.offset - e.cur - s += checkAt - if debugMatches { - println("long match (after short)") - } - break - } - - t = candidateS.offset - e.cur - if debugAsserts && s <= t { - panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) - } - if debugAsserts && s-t > e.maxMatchOff { - panic("s - t >e.maxMatchOff") - } - if debugAsserts && t < 0 { - panic("t<0") - } - if debugMatches { - println("short match") - } - break - } - - // No match found, move forward in input. - s += stepSize + ((s - nextEmit) >> (kSearchStrength - 1)) - if s >= sLimit { - break encodeLoop - } - cv = load6432(src, s) - } - - // A 4-byte match has been found. Update recent offsets. - // We'll later see if more than 4 bytes. - offset2 = offset1 - offset1 = s - t - - if debugAsserts && s <= t { - panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) - } - - if debugAsserts && canRepeat && int(offset1) > len(src) { - panic("invalid offset") - } - - // Extend the 4-byte match as long as possible. - l := e.matchlen(s+4, t+4, src) + 4 - - // Extend backwards - tMin := s - e.maxMatchOff - if tMin < 0 { - tMin = 0 - } - for t > tMin && s > nextEmit && src[t-1] == src[s-1] && l < maxMatchLength { - s-- - t-- - l++ - } - - // Write our sequence - var seq seq - seq.litLen = uint32(s - nextEmit) - seq.matchLen = uint32(l - zstdMinMatch) - if seq.litLen > 0 { - blk.literals = append(blk.literals, src[nextEmit:s]...) - } - seq.offset = uint32(s-t) + 3 - s += l - if debugSequences { - println("sequence", seq, "next s:", s) - } - blk.sequences = append(blk.sequences, seq) - nextEmit = s - if s >= sLimit { - break encodeLoop - } - - // Index match start+1 (long) and start+2 (short) - index0 := s - l + 1 - // Index match end-2 (long) and end-1 (short) - index1 := s - 2 - - cv0 := load6432(src, index0) - cv1 := load6432(src, index1) - te0 := tableEntry{offset: index0 + e.cur, val: uint32(cv0)} - te1 := tableEntry{offset: index1 + e.cur, val: uint32(cv1)} - longHash1 := hashLen(cv0, dFastLongTableBits, dFastLongLen) - longHash2 := hashLen(cv0, dFastLongTableBits, dFastLongLen) - e.longTable[longHash1] = te0 - e.longTable[longHash2] = te1 - e.markLongShardDirty(longHash1) - e.markLongShardDirty(longHash2) - cv0 >>= 8 - cv1 >>= 8 - te0.offset++ - te1.offset++ - te0.val = uint32(cv0) - te1.val = uint32(cv1) - hashVal1 := hashLen(cv0, dFastShortTableBits, dFastShortLen) - hashVal2 := hashLen(cv1, dFastShortTableBits, dFastShortLen) - e.table[hashVal1] = te0 - e.markShardDirty(hashVal1) - e.table[hashVal2] = te1 - e.markShardDirty(hashVal2) - - cv = load6432(src, s) - - if !canRepeat { - continue - } - - // Check offset 2 - for { - o2 := s - offset2 - if load3232(src, o2) != uint32(cv) { - // Do regular search - break - } - - // Store this, since we have it. - nextHashS := hashLen(cv, dFastShortTableBits, dFastShortLen) - nextHashL := hashLen(cv, dFastLongTableBits, dFastLongLen) - - // We have at least 4 byte match. - // No need to check backwards. We come straight from a match - l := 4 + e.matchlen(s+4, o2+4, src) - - entry := tableEntry{offset: s + e.cur, val: uint32(cv)} - e.longTable[nextHashL] = entry - e.markLongShardDirty(nextHashL) - e.table[nextHashS] = entry - e.markShardDirty(nextHashS) - seq.matchLen = uint32(l) - zstdMinMatch - seq.litLen = 0 - - // Since litlen is always 0, this is offset 1. - seq.offset = 1 - s += l - nextEmit = s - if debugSequences { - println("sequence", seq, "next s:", s) - } - blk.sequences = append(blk.sequences, seq) - - // Swap offset 1 and 2. - offset1, offset2 = offset2, offset1 - if s >= sLimit { - // Finished - break encodeLoop - } - cv = load6432(src, s) - } - } - - if int(nextEmit) < len(src) { - blk.literals = append(blk.literals, src[nextEmit:]...) - blk.extraLits = len(src) - int(nextEmit) - } - blk.recentOffsets[0] = uint32(offset1) - blk.recentOffsets[1] = uint32(offset2) - if debugEncoder { - println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits) - } - // If we encoded more than 64K mark all dirty. - if len(src) > 64<<10 { - e.markAllShardsDirty() - } -} - -// ResetDict will reset and set a dictionary if not nil -func (e *doubleFastEncoder) Reset(d *dict, singleBlock bool) { - e.fastEncoder.Reset(d, singleBlock) - if d != nil { - panic("doubleFastEncoder: Reset with dict not supported") - } -} - -// ResetDict will reset and set a dictionary if not nil -func (e *doubleFastEncoderDict) Reset(d *dict, singleBlock bool) { - allDirty := e.allDirty - e.fastEncoderDict.Reset(d, singleBlock) - if d == nil { - return - } - - // Init or copy dict table - if len(e.dictLongTable) != len(e.longTable) || d.id != e.lastDictID { - if len(e.dictLongTable) != len(e.longTable) { - e.dictLongTable = make([]tableEntry, len(e.longTable)) - } - if len(d.content) >= 8 { - cv := load6432(d.content, 0) - e.dictLongTable[hashLen(cv, dFastLongTableBits, dFastLongLen)] = tableEntry{ - val: uint32(cv), - offset: e.maxMatchOff, - } - end := int32(len(d.content)) - 8 + e.maxMatchOff - for i := e.maxMatchOff + 1; i < end; i++ { - cv = cv>>8 | (uint64(d.content[i-e.maxMatchOff+7]) << 56) - e.dictLongTable[hashLen(cv, dFastLongTableBits, dFastLongLen)] = tableEntry{ - val: uint32(cv), - offset: i, - } - } - } - e.lastDictID = d.id - e.allDirty = true - } - // Reset table to initial state - e.cur = e.maxMatchOff - - dirtyShardCnt := 0 - if !allDirty { - for i := range e.longTableShardDirty { - if e.longTableShardDirty[i] { - dirtyShardCnt++ - } - } - } - - if allDirty || dirtyShardCnt > dLongTableShardCnt/2 { - copy(e.longTable[:], e.dictLongTable) - for i := range e.longTableShardDirty { - e.longTableShardDirty[i] = false - } - return - } - for i := range e.longTableShardDirty { - if !e.longTableShardDirty[i] { - continue - } - - copy(e.longTable[i*dLongTableShardSize:(i+1)*dLongTableShardSize], e.dictLongTable[i*dLongTableShardSize:(i+1)*dLongTableShardSize]) - e.longTableShardDirty[i] = false - } -} - -func (e *doubleFastEncoderDict) markLongShardDirty(entryNum uint32) { - e.longTableShardDirty[entryNum/dLongTableShardSize] = true -} diff --git a/src/vendor/github.com/klauspost/compress/zstd/enc_fast.go b/src/vendor/github.com/klauspost/compress/zstd/enc_fast.go deleted file mode 100644 index f2502629b..000000000 --- a/src/vendor/github.com/klauspost/compress/zstd/enc_fast.go +++ /dev/null @@ -1,1019 +0,0 @@ -// Copyright 2019+ Klaus Post. All rights reserved. -// License information can be found in the LICENSE file. -// Based on work by Yann Collet, released under BSD License. - -package zstd - -import ( - "fmt" - "math" - "math/bits" -) - -const ( - tableBits = 15 // Bits used in the table - tableSize = 1 << tableBits // Size of the table - tableShardCnt = 1 << (tableBits - dictShardBits) // Number of shards in the table - tableShardSize = tableSize / tableShardCnt // Size of an individual shard - tableFastHashLen = 6 - tableMask = tableSize - 1 // Mask for table indices. Redundant, but can eliminate bounds checks. - maxMatchLength = 131074 -) - -type tableEntry struct { - val uint32 - offset int32 -} - -type fastEncoder struct { - fastBase - table [tableSize]tableEntry -} - -type fastEncoderDict struct { - fastEncoder - dictTable []tableEntry - tableShardDirty [tableShardCnt]bool - allDirty bool -} - -// Encode mimmics functionality in zstd_fast.c -func (e *fastEncoder) Encode(blk *blockEnc, src []byte) { - const ( - inputMargin = 8 - minNonLiteralBlockSize = 1 + 1 + inputMargin - ) - - // Protect against e.cur wraparound. - for e.cur >= bufferReset { - if len(e.hist) == 0 { - for i := range e.table[:] { - e.table[i] = tableEntry{} - } - e.cur = e.maxMatchOff - break - } - // Shift down everything in the table that isn't already too far away. - minOff := e.cur + int32(len(e.hist)) - e.maxMatchOff - for i := range e.table[:] { - v := e.table[i].offset - if v < minOff { - v = 0 - } else { - v = v - e.cur + e.maxMatchOff - } - e.table[i].offset = v - } - e.cur = e.maxMatchOff - break - } - - s := e.addBlock(src) - blk.size = len(src) - if len(src) < minNonLiteralBlockSize { - blk.extraLits = len(src) - blk.literals = blk.literals[:len(src)] - copy(blk.literals, src) - return - } - - // Override src - src = e.hist - sLimit := int32(len(src)) - inputMargin - // stepSize is the number of bytes to skip on every main loop iteration. - // It should be >= 2. - const stepSize = 2 - - // TEMPLATE - const hashLog = tableBits - // seems global, but would be nice to tweak. - const kSearchStrength = 7 - - // nextEmit is where in src the next emitLiteral should start from. - nextEmit := s - cv := load6432(src, s) - - // Relative offsets - offset1 := int32(blk.recentOffsets[0]) - offset2 := int32(blk.recentOffsets[1]) - - addLiterals := func(s *seq, until int32) { - if until == nextEmit { - return - } - blk.literals = append(blk.literals, src[nextEmit:until]...) - s.litLen = uint32(until - nextEmit) - } - if debugEncoder { - println("recent offsets:", blk.recentOffsets) - } - -encodeLoop: - for { - // t will contain the match offset when we find one. - // When existing the search loop, we have already checked 4 bytes. - var t int32 - - // We will not use repeat offsets across blocks. - // By not using them for the first 3 matches - canRepeat := len(blk.sequences) > 2 - - for { - if debugAsserts && canRepeat && offset1 == 0 { - panic("offset0 was 0") - } - - nextHash := hashLen(cv, hashLog, tableFastHashLen) - nextHash2 := hashLen(cv>>8, hashLog, tableFastHashLen) - candidate := e.table[nextHash] - candidate2 := e.table[nextHash2] - repIndex := s - offset1 + 2 - - e.table[nextHash] = tableEntry{offset: s + e.cur, val: uint32(cv)} - e.table[nextHash2] = tableEntry{offset: s + e.cur + 1, val: uint32(cv >> 8)} - - if canRepeat && repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>16) { - // Consider history as well. - var seq seq - var length int32 - // length = 4 + e.matchlen(s+6, repIndex+4, src) - { - a := src[s+6:] - b := src[repIndex+4:] - endI := len(a) & (math.MaxInt32 - 7) - length = int32(endI) + 4 - for i := 0; i < endI; i += 8 { - if diff := load64(a, i) ^ load64(b, i); diff != 0 { - length = int32(i+bits.TrailingZeros64(diff)>>3) + 4 - break - } - } - } - - seq.matchLen = uint32(length - zstdMinMatch) - - // We might be able to match backwards. - // Extend as long as we can. - start := s + 2 - // We end the search early, so we don't risk 0 literals - // and have to do special offset treatment. - startLimit := nextEmit + 1 - - sMin := s - e.maxMatchOff - if sMin < 0 { - sMin = 0 - } - for repIndex > sMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch { - repIndex-- - start-- - seq.matchLen++ - } - addLiterals(&seq, start) - - // rep 0 - seq.offset = 1 - if debugSequences { - println("repeat sequence", seq, "next s:", s) - } - blk.sequences = append(blk.sequences, seq) - s += length + 2 - nextEmit = s - if s >= sLimit { - if debugEncoder { - println("repeat ended", s, length) - - } - break encodeLoop - } - cv = load6432(src, s) - continue - } - coffset0 := s - (candidate.offset - e.cur) - coffset1 := s - (candidate2.offset - e.cur) + 1 - if coffset0 < e.maxMatchOff && uint32(cv) == candidate.val { - // found a regular match - t = candidate.offset - e.cur - if debugAsserts && s <= t { - panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) - } - if debugAsserts && s-t > e.maxMatchOff { - panic("s - t >e.maxMatchOff") - } - break - } - - if coffset1 < e.maxMatchOff && uint32(cv>>8) == candidate2.val { - // found a regular match - t = candidate2.offset - e.cur - s++ - if debugAsserts && s <= t { - panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) - } - if debugAsserts && s-t > e.maxMatchOff { - panic("s - t >e.maxMatchOff") - } - if debugAsserts && t < 0 { - panic("t<0") - } - break - } - s += stepSize + ((s - nextEmit) >> (kSearchStrength - 1)) - if s >= sLimit { - break encodeLoop - } - cv = load6432(src, s) - } - // A 4-byte match has been found. We'll later see if more than 4 bytes. - offset2 = offset1 - offset1 = s - t - - if debugAsserts && s <= t { - panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) - } - - if debugAsserts && canRepeat && int(offset1) > len(src) { - panic("invalid offset") - } - - // Extend the 4-byte match as long as possible. - //l := e.matchlen(s+4, t+4, src) + 4 - var l int32 - { - a := src[s+4:] - b := src[t+4:] - endI := len(a) & (math.MaxInt32 - 7) - l = int32(endI) + 4 - for i := 0; i < endI; i += 8 { - if diff := load64(a, i) ^ load64(b, i); diff != 0 { - l = int32(i+bits.TrailingZeros64(diff)>>3) + 4 - break - } - } - } - - // Extend backwards - tMin := s - e.maxMatchOff - if tMin < 0 { - tMin = 0 - } - for t > tMin && s > nextEmit && src[t-1] == src[s-1] && l < maxMatchLength { - s-- - t-- - l++ - } - - // Write our sequence. - var seq seq - seq.litLen = uint32(s - nextEmit) - seq.matchLen = uint32(l - zstdMinMatch) - if seq.litLen > 0 { - blk.literals = append(blk.literals, src[nextEmit:s]...) - } - // Don't use repeat offsets - seq.offset = uint32(s-t) + 3 - s += l - if debugSequences { - println("sequence", seq, "next s:", s) - } - blk.sequences = append(blk.sequences, seq) - nextEmit = s - if s >= sLimit { - break encodeLoop - } - cv = load6432(src, s) - - // Check offset 2 - if o2 := s - offset2; canRepeat && load3232(src, o2) == uint32(cv) { - // We have at least 4 byte match. - // No need to check backwards. We come straight from a match - //l := 4 + e.matchlen(s+4, o2+4, src) - var l int32 - { - a := src[s+4:] - b := src[o2+4:] - endI := len(a) & (math.MaxInt32 - 7) - l = int32(endI) + 4 - for i := 0; i < endI; i += 8 { - if diff := load64(a, i) ^ load64(b, i); diff != 0 { - l = int32(i+bits.TrailingZeros64(diff)>>3) + 4 - break - } - } - } - - // Store this, since we have it. - nextHash := hashLen(cv, hashLog, tableFastHashLen) - e.table[nextHash] = tableEntry{offset: s + e.cur, val: uint32(cv)} - seq.matchLen = uint32(l) - zstdMinMatch - seq.litLen = 0 - // Since litlen is always 0, this is offset 1. - seq.offset = 1 - s += l - nextEmit = s - if debugSequences { - println("sequence", seq, "next s:", s) - } - blk.sequences = append(blk.sequences, seq) - - // Swap offset 1 and 2. - offset1, offset2 = offset2, offset1 - if s >= sLimit { - break encodeLoop - } - // Prepare next loop. - cv = load6432(src, s) - } - } - - if int(nextEmit) < len(src) { - blk.literals = append(blk.literals, src[nextEmit:]...) - blk.extraLits = len(src) - int(nextEmit) - } - blk.recentOffsets[0] = uint32(offset1) - blk.recentOffsets[1] = uint32(offset2) - if debugEncoder { - println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits) - } -} - -// EncodeNoHist will encode a block with no history and no following blocks. -// Most notable difference is that src will not be copied for history and -// we do not need to check for max match length. -func (e *fastEncoder) EncodeNoHist(blk *blockEnc, src []byte) { - const ( - inputMargin = 8 - minNonLiteralBlockSize = 1 + 1 + inputMargin - ) - if debugEncoder { - if len(src) > maxBlockSize { - panic("src too big") - } - } - - // Protect against e.cur wraparound. - if e.cur >= bufferReset { - for i := range e.table[:] { - e.table[i] = tableEntry{} - } - e.cur = e.maxMatchOff - } - - s := int32(0) - blk.size = len(src) - if len(src) < minNonLiteralBlockSize { - blk.extraLits = len(src) - blk.literals = blk.literals[:len(src)] - copy(blk.literals, src) - return - } - - sLimit := int32(len(src)) - inputMargin - // stepSize is the number of bytes to skip on every main loop iteration. - // It should be >= 2. - const stepSize = 2 - - // TEMPLATE - const hashLog = tableBits - // seems global, but would be nice to tweak. - const kSearchStrength = 8 - - // nextEmit is where in src the next emitLiteral should start from. - nextEmit := s - cv := load6432(src, s) - - // Relative offsets - offset1 := int32(blk.recentOffsets[0]) - offset2 := int32(blk.recentOffsets[1]) - - addLiterals := func(s *seq, until int32) { - if until == nextEmit { - return - } - blk.literals = append(blk.literals, src[nextEmit:until]...) - s.litLen = uint32(until - nextEmit) - } - if debugEncoder { - println("recent offsets:", blk.recentOffsets) - } - -encodeLoop: - for { - // t will contain the match offset when we find one. - // When existing the search loop, we have already checked 4 bytes. - var t int32 - - // We will not use repeat offsets across blocks. - // By not using them for the first 3 matches - - for { - nextHash := hashLen(cv, hashLog, tableFastHashLen) - nextHash2 := hashLen(cv>>8, hashLog, tableFastHashLen) - candidate := e.table[nextHash] - candidate2 := e.table[nextHash2] - repIndex := s - offset1 + 2 - - e.table[nextHash] = tableEntry{offset: s + e.cur, val: uint32(cv)} - e.table[nextHash2] = tableEntry{offset: s + e.cur + 1, val: uint32(cv >> 8)} - - if len(blk.sequences) > 2 && load3232(src, repIndex) == uint32(cv>>16) { - // Consider history as well. - var seq seq - // length := 4 + e.matchlen(s+6, repIndex+4, src) - // length := 4 + int32(matchLen(src[s+6:], src[repIndex+4:])) - var length int32 - { - a := src[s+6:] - b := src[repIndex+4:] - endI := len(a) & (math.MaxInt32 - 7) - length = int32(endI) + 4 - for i := 0; i < endI; i += 8 { - if diff := load64(a, i) ^ load64(b, i); diff != 0 { - length = int32(i+bits.TrailingZeros64(diff)>>3) + 4 - break - } - } - } - - seq.matchLen = uint32(length - zstdMinMatch) - - // We might be able to match backwards. - // Extend as long as we can. - start := s + 2 - // We end the search early, so we don't risk 0 literals - // and have to do special offset treatment. - startLimit := nextEmit + 1 - - sMin := s - e.maxMatchOff - if sMin < 0 { - sMin = 0 - } - for repIndex > sMin && start > startLimit && src[repIndex-1] == src[start-1] { - repIndex-- - start-- - seq.matchLen++ - } - addLiterals(&seq, start) - - // rep 0 - seq.offset = 1 - if debugSequences { - println("repeat sequence", seq, "next s:", s) - } - blk.sequences = append(blk.sequences, seq) - s += length + 2 - nextEmit = s - if s >= sLimit { - if debugEncoder { - println("repeat ended", s, length) - - } - break encodeLoop - } - cv = load6432(src, s) - continue - } - coffset0 := s - (candidate.offset - e.cur) - coffset1 := s - (candidate2.offset - e.cur) + 1 - if coffset0 < e.maxMatchOff && uint32(cv) == candidate.val { - // found a regular match - t = candidate.offset - e.cur - if debugAsserts && s <= t { - panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) - } - if debugAsserts && s-t > e.maxMatchOff { - panic("s - t >e.maxMatchOff") - } - if debugAsserts && t < 0 { - panic(fmt.Sprintf("t (%d) < 0, candidate.offset: %d, e.cur: %d, coffset0: %d, e.maxMatchOff: %d", t, candidate.offset, e.cur, coffset0, e.maxMatchOff)) - } - break - } - - if coffset1 < e.maxMatchOff && uint32(cv>>8) == candidate2.val { - // found a regular match - t = candidate2.offset - e.cur - s++ - if debugAsserts && s <= t { - panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) - } - if debugAsserts && s-t > e.maxMatchOff { - panic("s - t >e.maxMatchOff") - } - if debugAsserts && t < 0 { - panic("t<0") - } - break - } - s += stepSize + ((s - nextEmit) >> (kSearchStrength - 1)) - if s >= sLimit { - break encodeLoop - } - cv = load6432(src, s) - } - // A 4-byte match has been found. We'll later see if more than 4 bytes. - offset2 = offset1 - offset1 = s - t - - if debugAsserts && s <= t { - panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) - } - - if debugAsserts && t < 0 { - panic(fmt.Sprintf("t (%d) < 0 ", t)) - } - // Extend the 4-byte match as long as possible. - //l := e.matchlenNoHist(s+4, t+4, src) + 4 - // l := int32(matchLen(src[s+4:], src[t+4:])) + 4 - var l int32 - { - a := src[s+4:] - b := src[t+4:] - endI := len(a) & (math.MaxInt32 - 7) - l = int32(endI) + 4 - for i := 0; i < endI; i += 8 { - if diff := load64(a, i) ^ load64(b, i); diff != 0 { - l = int32(i+bits.TrailingZeros64(diff)>>3) + 4 - break - } - } - } - - // Extend backwards - tMin := s - e.maxMatchOff - if tMin < 0 { - tMin = 0 - } - for t > tMin && s > nextEmit && src[t-1] == src[s-1] { - s-- - t-- - l++ - } - - // Write our sequence. - var seq seq - seq.litLen = uint32(s - nextEmit) - seq.matchLen = uint32(l - zstdMinMatch) - if seq.litLen > 0 { - blk.literals = append(blk.literals, src[nextEmit:s]...) - } - // Don't use repeat offsets - seq.offset = uint32(s-t) + 3 - s += l - if debugSequences { - println("sequence", seq, "next s:", s) - } - blk.sequences = append(blk.sequences, seq) - nextEmit = s - if s >= sLimit { - break encodeLoop - } - cv = load6432(src, s) - - // Check offset 2 - if o2 := s - offset2; len(blk.sequences) > 2 && load3232(src, o2) == uint32(cv) { - // We have at least 4 byte match. - // No need to check backwards. We come straight from a match - //l := 4 + e.matchlenNoHist(s+4, o2+4, src) - // l := 4 + int32(matchLen(src[s+4:], src[o2+4:])) - var l int32 - { - a := src[s+4:] - b := src[o2+4:] - endI := len(a) & (math.MaxInt32 - 7) - l = int32(endI) + 4 - for i := 0; i < endI; i += 8 { - if diff := load64(a, i) ^ load64(b, i); diff != 0 { - l = int32(i+bits.TrailingZeros64(diff)>>3) + 4 - break - } - } - } - - // Store this, since we have it. - nextHash := hashLen(cv, hashLog, tableFastHashLen) - e.table[nextHash] = tableEntry{offset: s + e.cur, val: uint32(cv)} - seq.matchLen = uint32(l) - zstdMinMatch - seq.litLen = 0 - // Since litlen is always 0, this is offset 1. - seq.offset = 1 - s += l - nextEmit = s - if debugSequences { - println("sequence", seq, "next s:", s) - } - blk.sequences = append(blk.sequences, seq) - - // Swap offset 1 and 2. - offset1, offset2 = offset2, offset1 - if s >= sLimit { - break encodeLoop - } - // Prepare next loop. - cv = load6432(src, s) - } - } - - if int(nextEmit) < len(src) { - blk.literals = append(blk.literals, src[nextEmit:]...) - blk.extraLits = len(src) - int(nextEmit) - } - if debugEncoder { - println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits) - } - // We do not store history, so we must offset e.cur to avoid false matches for next user. - if e.cur < bufferReset { - e.cur += int32(len(src)) - } -} - -// Encode will encode the content, with a dictionary if initialized for it. -func (e *fastEncoderDict) Encode(blk *blockEnc, src []byte) { - const ( - inputMargin = 8 - minNonLiteralBlockSize = 1 + 1 + inputMargin - ) - if e.allDirty || len(src) > 32<<10 { - e.fastEncoder.Encode(blk, src) - e.allDirty = true - return - } - // Protect against e.cur wraparound. - for e.cur >= bufferReset { - if len(e.hist) == 0 { - for i := range e.table[:] { - e.table[i] = tableEntry{} - } - e.cur = e.maxMatchOff - break - } - // Shift down everything in the table that isn't already too far away. - minOff := e.cur + int32(len(e.hist)) - e.maxMatchOff - for i := range e.table[:] { - v := e.table[i].offset - if v < minOff { - v = 0 - } else { - v = v - e.cur + e.maxMatchOff - } - e.table[i].offset = v - } - e.cur = e.maxMatchOff - break - } - - s := e.addBlock(src) - blk.size = len(src) - if len(src) < minNonLiteralBlockSize { - blk.extraLits = len(src) - blk.literals = blk.literals[:len(src)] - copy(blk.literals, src) - return - } - - // Override src - src = e.hist - sLimit := int32(len(src)) - inputMargin - // stepSize is the number of bytes to skip on every main loop iteration. - // It should be >= 2. - const stepSize = 2 - - // TEMPLATE - const hashLog = tableBits - // seems global, but would be nice to tweak. - const kSearchStrength = 7 - - // nextEmit is where in src the next emitLiteral should start from. - nextEmit := s - cv := load6432(src, s) - - // Relative offsets - offset1 := int32(blk.recentOffsets[0]) - offset2 := int32(blk.recentOffsets[1]) - - addLiterals := func(s *seq, until int32) { - if until == nextEmit { - return - } - blk.literals = append(blk.literals, src[nextEmit:until]...) - s.litLen = uint32(until - nextEmit) - } - if debugEncoder { - println("recent offsets:", blk.recentOffsets) - } - -encodeLoop: - for { - // t will contain the match offset when we find one. - // When existing the search loop, we have already checked 4 bytes. - var t int32 - - // We will not use repeat offsets across blocks. - // By not using them for the first 3 matches - canRepeat := len(blk.sequences) > 2 - - for { - if debugAsserts && canRepeat && offset1 == 0 { - panic("offset0 was 0") - } - - nextHash := hashLen(cv, hashLog, tableFastHashLen) - nextHash2 := hashLen(cv>>8, hashLog, tableFastHashLen) - candidate := e.table[nextHash] - candidate2 := e.table[nextHash2] - repIndex := s - offset1 + 2 - - e.table[nextHash] = tableEntry{offset: s + e.cur, val: uint32(cv)} - e.markShardDirty(nextHash) - e.table[nextHash2] = tableEntry{offset: s + e.cur + 1, val: uint32(cv >> 8)} - e.markShardDirty(nextHash2) - - if canRepeat && repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>16) { - // Consider history as well. - var seq seq - var length int32 - // length = 4 + e.matchlen(s+6, repIndex+4, src) - { - a := src[s+6:] - b := src[repIndex+4:] - endI := len(a) & (math.MaxInt32 - 7) - length = int32(endI) + 4 - for i := 0; i < endI; i += 8 { - if diff := load64(a, i) ^ load64(b, i); diff != 0 { - length = int32(i+bits.TrailingZeros64(diff)>>3) + 4 - break - } - } - } - - seq.matchLen = uint32(length - zstdMinMatch) - - // We might be able to match backwards. - // Extend as long as we can. - start := s + 2 - // We end the search early, so we don't risk 0 literals - // and have to do special offset treatment. - startLimit := nextEmit + 1 - - sMin := s - e.maxMatchOff - if sMin < 0 { - sMin = 0 - } - for repIndex > sMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch { - repIndex-- - start-- - seq.matchLen++ - } - addLiterals(&seq, start) - - // rep 0 - seq.offset = 1 - if debugSequences { - println("repeat sequence", seq, "next s:", s) - } - blk.sequences = append(blk.sequences, seq) - s += length + 2 - nextEmit = s - if s >= sLimit { - if debugEncoder { - println("repeat ended", s, length) - - } - break encodeLoop - } - cv = load6432(src, s) - continue - } - coffset0 := s - (candidate.offset - e.cur) - coffset1 := s - (candidate2.offset - e.cur) + 1 - if coffset0 < e.maxMatchOff && uint32(cv) == candidate.val { - // found a regular match - t = candidate.offset - e.cur - if debugAsserts && s <= t { - panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) - } - if debugAsserts && s-t > e.maxMatchOff { - panic("s - t >e.maxMatchOff") - } - break - } - - if coffset1 < e.maxMatchOff && uint32(cv>>8) == candidate2.val { - // found a regular match - t = candidate2.offset - e.cur - s++ - if debugAsserts && s <= t { - panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) - } - if debugAsserts && s-t > e.maxMatchOff { - panic("s - t >e.maxMatchOff") - } - if debugAsserts && t < 0 { - panic("t<0") - } - break - } - s += stepSize + ((s - nextEmit) >> (kSearchStrength - 1)) - if s >= sLimit { - break encodeLoop - } - cv = load6432(src, s) - } - // A 4-byte match has been found. We'll later see if more than 4 bytes. - offset2 = offset1 - offset1 = s - t - - if debugAsserts && s <= t { - panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) - } - - if debugAsserts && canRepeat && int(offset1) > len(src) { - panic("invalid offset") - } - - // Extend the 4-byte match as long as possible. - //l := e.matchlen(s+4, t+4, src) + 4 - var l int32 - { - a := src[s+4:] - b := src[t+4:] - endI := len(a) & (math.MaxInt32 - 7) - l = int32(endI) + 4 - for i := 0; i < endI; i += 8 { - if diff := load64(a, i) ^ load64(b, i); diff != 0 { - l = int32(i+bits.TrailingZeros64(diff)>>3) + 4 - break - } - } - } - - // Extend backwards - tMin := s - e.maxMatchOff - if tMin < 0 { - tMin = 0 - } - for t > tMin && s > nextEmit && src[t-1] == src[s-1] && l < maxMatchLength { - s-- - t-- - l++ - } - - // Write our sequence. - var seq seq - seq.litLen = uint32(s - nextEmit) - seq.matchLen = uint32(l - zstdMinMatch) - if seq.litLen > 0 { - blk.literals = append(blk.literals, src[nextEmit:s]...) - } - // Don't use repeat offsets - seq.offset = uint32(s-t) + 3 - s += l - if debugSequences { - println("sequence", seq, "next s:", s) - } - blk.sequences = append(blk.sequences, seq) - nextEmit = s - if s >= sLimit { - break encodeLoop - } - cv = load6432(src, s) - - // Check offset 2 - if o2 := s - offset2; canRepeat && load3232(src, o2) == uint32(cv) { - // We have at least 4 byte match. - // No need to check backwards. We come straight from a match - //l := 4 + e.matchlen(s+4, o2+4, src) - var l int32 - { - a := src[s+4:] - b := src[o2+4:] - endI := len(a) & (math.MaxInt32 - 7) - l = int32(endI) + 4 - for i := 0; i < endI; i += 8 { - if diff := load64(a, i) ^ load64(b, i); diff != 0 { - l = int32(i+bits.TrailingZeros64(diff)>>3) + 4 - break - } - } - } - - // Store this, since we have it. - nextHash := hashLen(cv, hashLog, tableFastHashLen) - e.table[nextHash] = tableEntry{offset: s + e.cur, val: uint32(cv)} - e.markShardDirty(nextHash) - seq.matchLen = uint32(l) - zstdMinMatch - seq.litLen = 0 - // Since litlen is always 0, this is offset 1. - seq.offset = 1 - s += l - nextEmit = s - if debugSequences { - println("sequence", seq, "next s:", s) - } - blk.sequences = append(blk.sequences, seq) - - // Swap offset 1 and 2. - offset1, offset2 = offset2, offset1 - if s >= sLimit { - break encodeLoop - } - // Prepare next loop. - cv = load6432(src, s) - } - } - - if int(nextEmit) < len(src) { - blk.literals = append(blk.literals, src[nextEmit:]...) - blk.extraLits = len(src) - int(nextEmit) - } - blk.recentOffsets[0] = uint32(offset1) - blk.recentOffsets[1] = uint32(offset2) - if debugEncoder { - println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits) - } -} - -// ResetDict will reset and set a dictionary if not nil -func (e *fastEncoder) Reset(d *dict, singleBlock bool) { - e.resetBase(d, singleBlock) - if d != nil { - panic("fastEncoder: Reset with dict") - } -} - -// ResetDict will reset and set a dictionary if not nil -func (e *fastEncoderDict) Reset(d *dict, singleBlock bool) { - e.resetBase(d, singleBlock) - if d == nil { - return - } - - // Init or copy dict table - if len(e.dictTable) != len(e.table) || d.id != e.lastDictID { - if len(e.dictTable) != len(e.table) { - e.dictTable = make([]tableEntry, len(e.table)) - } - if true { - end := e.maxMatchOff + int32(len(d.content)) - 8 - for i := e.maxMatchOff; i < end; i += 3 { - const hashLog = tableBits - - cv := load6432(d.content, i-e.maxMatchOff) - nextHash := hashLen(cv, hashLog, tableFastHashLen) // 0 -> 5 - nextHash1 := hashLen(cv>>8, hashLog, tableFastHashLen) // 1 -> 6 - nextHash2 := hashLen(cv>>16, hashLog, tableFastHashLen) // 2 -> 7 - e.dictTable[nextHash] = tableEntry{ - val: uint32(cv), - offset: i, - } - e.dictTable[nextHash1] = tableEntry{ - val: uint32(cv >> 8), - offset: i + 1, - } - e.dictTable[nextHash2] = tableEntry{ - val: uint32(cv >> 16), - offset: i + 2, - } - } - } - e.lastDictID = d.id - e.allDirty = true - } - - e.cur = e.maxMatchOff - dirtyShardCnt := 0 - if !e.allDirty { - for i := range e.tableShardDirty { - if e.tableShardDirty[i] { - dirtyShardCnt++ - } - } - } - - const shardCnt = tableShardCnt - const shardSize = tableShardSize - if e.allDirty || dirtyShardCnt > shardCnt*4/6 { - copy(e.table[:], e.dictTable) - for i := range e.tableShardDirty { - e.tableShardDirty[i] = false - } - e.allDirty = false - return - } - for i := range e.tableShardDirty { - if !e.tableShardDirty[i] { - continue - } - - copy(e.table[i*shardSize:(i+1)*shardSize], e.dictTable[i*shardSize:(i+1)*shardSize]) - e.tableShardDirty[i] = false - } - e.allDirty = false -} - -func (e *fastEncoderDict) markAllShardsDirty() { - e.allDirty = true -} - -func (e *fastEncoderDict) markShardDirty(entryNum uint32) { - e.tableShardDirty[entryNum/tableShardSize] = true -} diff --git a/src/vendor/github.com/klauspost/compress/zstd/encoder.go b/src/vendor/github.com/klauspost/compress/zstd/encoder.go deleted file mode 100644 index e6e315969..000000000 --- a/src/vendor/github.com/klauspost/compress/zstd/encoder.go +++ /dev/null @@ -1,599 +0,0 @@ -// Copyright 2019+ Klaus Post. All rights reserved. -// License information can be found in the LICENSE file. -// Based on work by Yann Collet, released under BSD License. - -package zstd - -import ( - "crypto/rand" - "fmt" - "io" - rdebug "runtime/debug" - "sync" - - "github.com/klauspost/compress/zstd/internal/xxhash" -) - -// Encoder provides encoding to Zstandard. -// An Encoder can be used for either compressing a stream via the -// io.WriteCloser interface supported by the Encoder or as multiple independent -// tasks via the EncodeAll function. -// Smaller encodes are encouraged to use the EncodeAll function. -// Use NewWriter to create a new instance. -type Encoder struct { - o encoderOptions - encoders chan encoder - state encoderState - init sync.Once -} - -type encoder interface { - Encode(blk *blockEnc, src []byte) - EncodeNoHist(blk *blockEnc, src []byte) - Block() *blockEnc - CRC() *xxhash.Digest - AppendCRC([]byte) []byte - WindowSize(size int64) int32 - UseBlock(*blockEnc) - Reset(d *dict, singleBlock bool) -} - -type encoderState struct { - w io.Writer - filling []byte - current []byte - previous []byte - encoder encoder - writing *blockEnc - err error - writeErr error - nWritten int64 - nInput int64 - frameContentSize int64 - headerWritten bool - eofWritten bool - fullFrameWritten bool - - // This waitgroup indicates an encode is running. - wg sync.WaitGroup - // This waitgroup indicates we have a block encoding/writing. - wWg sync.WaitGroup -} - -// NewWriter will create a new Zstandard encoder. -// If the encoder will be used for encoding blocks a nil writer can be used. -func NewWriter(w io.Writer, opts ...EOption) (*Encoder, error) { - initPredefined() - var e Encoder - e.o.setDefault() - for _, o := range opts { - err := o(&e.o) - if err != nil { - return nil, err - } - } - if w != nil { - e.Reset(w) - } - return &e, nil -} - -func (e *Encoder) initialize() { - if e.o.concurrent == 0 { - e.o.setDefault() - } - e.encoders = make(chan encoder, e.o.concurrent) - for i := 0; i < e.o.concurrent; i++ { - enc := e.o.encoder() - e.encoders <- enc - } -} - -// Reset will re-initialize the writer and new writes will encode to the supplied writer -// as a new, independent stream. -func (e *Encoder) Reset(w io.Writer) { - s := &e.state - s.wg.Wait() - s.wWg.Wait() - if cap(s.filling) == 0 { - s.filling = make([]byte, 0, e.o.blockSize) - } - if cap(s.current) == 0 { - s.current = make([]byte, 0, e.o.blockSize) - } - if cap(s.previous) == 0 { - s.previous = make([]byte, 0, e.o.blockSize) - } - if s.encoder == nil { - s.encoder = e.o.encoder() - } - if s.writing == nil { - s.writing = &blockEnc{lowMem: e.o.lowMem} - s.writing.init() - } - s.writing.initNewEncode() - s.filling = s.filling[:0] - s.current = s.current[:0] - s.previous = s.previous[:0] - s.encoder.Reset(e.o.dict, false) - s.headerWritten = false - s.eofWritten = false - s.fullFrameWritten = false - s.w = w - s.err = nil - s.nWritten = 0 - s.nInput = 0 - s.writeErr = nil - s.frameContentSize = 0 -} - -// ResetContentSize will reset and set a content size for the next stream. -// If the bytes written does not match the size given an error will be returned -// when calling Close(). -// This is removed when Reset is called. -// Sizes <= 0 results in no content size set. -func (e *Encoder) ResetContentSize(w io.Writer, size int64) { - e.Reset(w) - if size >= 0 { - e.state.frameContentSize = size - } -} - -// Write data to the encoder. -// Input data will be buffered and as the buffer fills up -// content will be compressed and written to the output. -// When done writing, use Close to flush the remaining output -// and write CRC if requested. -func (e *Encoder) Write(p []byte) (n int, err error) { - s := &e.state - for len(p) > 0 { - if len(p)+len(s.filling) < e.o.blockSize { - if e.o.crc { - _, _ = s.encoder.CRC().Write(p) - } - s.filling = append(s.filling, p...) - return n + len(p), nil - } - add := p - if len(p)+len(s.filling) > e.o.blockSize { - add = add[:e.o.blockSize-len(s.filling)] - } - if e.o.crc { - _, _ = s.encoder.CRC().Write(add) - } - s.filling = append(s.filling, add...) - p = p[len(add):] - n += len(add) - if len(s.filling) < e.o.blockSize { - return n, nil - } - err := e.nextBlock(false) - if err != nil { - return n, err - } - if debugAsserts && len(s.filling) > 0 { - panic(len(s.filling)) - } - } - return n, nil -} - -// nextBlock will synchronize and start compressing input in e.state.filling. -// If an error has occurred during encoding it will be returned. -func (e *Encoder) nextBlock(final bool) error { - s := &e.state - // Wait for current block. - s.wg.Wait() - if s.err != nil { - return s.err - } - if len(s.filling) > e.o.blockSize { - return fmt.Errorf("block > maxStoreBlockSize") - } - if !s.headerWritten { - // If we have a single block encode, do a sync compression. - if final && len(s.filling) == 0 && !e.o.fullZero { - s.headerWritten = true - s.fullFrameWritten = true - s.eofWritten = true - return nil - } - if final && len(s.filling) > 0 { - s.current = e.EncodeAll(s.filling, s.current[:0]) - var n2 int - n2, s.err = s.w.Write(s.current) - if s.err != nil { - return s.err - } - s.nWritten += int64(n2) - s.nInput += int64(len(s.filling)) - s.current = s.current[:0] - s.filling = s.filling[:0] - s.headerWritten = true - s.fullFrameWritten = true - s.eofWritten = true - return nil - } - - var tmp [maxHeaderSize]byte - fh := frameHeader{ - ContentSize: uint64(s.frameContentSize), - WindowSize: uint32(s.encoder.WindowSize(s.frameContentSize)), - SingleSegment: false, - Checksum: e.o.crc, - DictID: e.o.dict.ID(), - } - - dst, err := fh.appendTo(tmp[:0]) - if err != nil { - return err - } - s.headerWritten = true - s.wWg.Wait() - var n2 int - n2, s.err = s.w.Write(dst) - if s.err != nil { - return s.err - } - s.nWritten += int64(n2) - } - if s.eofWritten { - // Ensure we only write it once. - final = false - } - - if len(s.filling) == 0 { - // Final block, but no data. - if final { - enc := s.encoder - blk := enc.Block() - blk.reset(nil) - blk.last = true - blk.encodeRaw(nil) - s.wWg.Wait() - _, s.err = s.w.Write(blk.output) - s.nWritten += int64(len(blk.output)) - s.eofWritten = true - } - return s.err - } - - // Move blocks forward. - s.filling, s.current, s.previous = s.previous[:0], s.filling, s.current - s.nInput += int64(len(s.current)) - s.wg.Add(1) - go func(src []byte) { - if debugEncoder { - println("Adding block,", len(src), "bytes, final:", final) - } - defer func() { - if r := recover(); r != nil { - s.err = fmt.Errorf("panic while encoding: %v", r) - rdebug.PrintStack() - } - s.wg.Done() - }() - enc := s.encoder - blk := enc.Block() - enc.Encode(blk, src) - blk.last = final - if final { - s.eofWritten = true - } - // Wait for pending writes. - s.wWg.Wait() - if s.writeErr != nil { - s.err = s.writeErr - return - } - // Transfer encoders from previous write block. - blk.swapEncoders(s.writing) - // Transfer recent offsets to next. - enc.UseBlock(s.writing) - s.writing = blk - s.wWg.Add(1) - go func() { - defer func() { - if r := recover(); r != nil { - s.writeErr = fmt.Errorf("panic while encoding/writing: %v", r) - rdebug.PrintStack() - } - s.wWg.Done() - }() - err := errIncompressible - // If we got the exact same number of literals as input, - // assume the literals cannot be compressed. - if len(src) != len(blk.literals) || len(src) != e.o.blockSize { - err = blk.encode(src, e.o.noEntropy, !e.o.allLitEntropy) - } - switch err { - case errIncompressible: - if debugEncoder { - println("Storing incompressible block as raw") - } - blk.encodeRaw(src) - // In fast mode, we do not transfer offsets, so we don't have to deal with changing the. - case nil: - default: - s.writeErr = err - return - } - _, s.writeErr = s.w.Write(blk.output) - s.nWritten += int64(len(blk.output)) - }() - }(s.current) - return nil -} - -// ReadFrom reads data from r until EOF or error. -// The return value n is the number of bytes read. -// Any error except io.EOF encountered during the read is also returned. -// -// The Copy function uses ReaderFrom if available. -func (e *Encoder) ReadFrom(r io.Reader) (n int64, err error) { - if debugEncoder { - println("Using ReadFrom") - } - - // Flush any current writes. - if len(e.state.filling) > 0 { - if err := e.nextBlock(false); err != nil { - return 0, err - } - } - e.state.filling = e.state.filling[:e.o.blockSize] - src := e.state.filling - for { - n2, err := r.Read(src) - if e.o.crc { - _, _ = e.state.encoder.CRC().Write(src[:n2]) - } - // src is now the unfilled part... - src = src[n2:] - n += int64(n2) - switch err { - case io.EOF: - e.state.filling = e.state.filling[:len(e.state.filling)-len(src)] - if debugEncoder { - println("ReadFrom: got EOF final block:", len(e.state.filling)) - } - return n, nil - case nil: - default: - if debugEncoder { - println("ReadFrom: got error:", err) - } - e.state.err = err - return n, err - } - if len(src) > 0 { - if debugEncoder { - println("ReadFrom: got space left in source:", len(src)) - } - continue - } - err = e.nextBlock(false) - if err != nil { - return n, err - } - e.state.filling = e.state.filling[:e.o.blockSize] - src = e.state.filling - } -} - -// Flush will send the currently written data to output -// and block until everything has been written. -// This should only be used on rare occasions where pushing the currently queued data is critical. -func (e *Encoder) Flush() error { - s := &e.state - if len(s.filling) > 0 { - err := e.nextBlock(false) - if err != nil { - return err - } - } - s.wg.Wait() - s.wWg.Wait() - if s.err != nil { - return s.err - } - return s.writeErr -} - -// Close will flush the final output and close the stream. -// The function will block until everything has been written. -// The Encoder can still be re-used after calling this. -func (e *Encoder) Close() error { - s := &e.state - if s.encoder == nil { - return nil - } - err := e.nextBlock(true) - if err != nil { - return err - } - if s.frameContentSize > 0 { - if s.nInput != s.frameContentSize { - return fmt.Errorf("frame content size %d given, but %d bytes was written", s.frameContentSize, s.nInput) - } - } - if e.state.fullFrameWritten { - return s.err - } - s.wg.Wait() - s.wWg.Wait() - - if s.err != nil { - return s.err - } - if s.writeErr != nil { - return s.writeErr - } - - // Write CRC - if e.o.crc && s.err == nil { - // heap alloc. - var tmp [4]byte - _, s.err = s.w.Write(s.encoder.AppendCRC(tmp[:0])) - s.nWritten += 4 - } - - // Add padding with content from crypto/rand.Reader - if s.err == nil && e.o.pad > 0 { - add := calcSkippableFrame(s.nWritten, int64(e.o.pad)) - frame, err := skippableFrame(s.filling[:0], add, rand.Reader) - if err != nil { - return err - } - _, s.err = s.w.Write(frame) - } - return s.err -} - -// EncodeAll will encode all input in src and append it to dst. -// This function can be called concurrently, but each call will only run on a single goroutine. -// If empty input is given, nothing is returned, unless WithZeroFrames is specified. -// Encoded blocks can be concatenated and the result will be the combined input stream. -// Data compressed with EncodeAll can be decoded with the Decoder, -// using either a stream or DecodeAll. -func (e *Encoder) EncodeAll(src, dst []byte) []byte { - if len(src) == 0 { - if e.o.fullZero { - // Add frame header. - fh := frameHeader{ - ContentSize: 0, - WindowSize: MinWindowSize, - SingleSegment: true, - // Adding a checksum would be a waste of space. - Checksum: false, - DictID: 0, - } - dst, _ = fh.appendTo(dst) - - // Write raw block as last one only. - var blk blockHeader - blk.setSize(0) - blk.setType(blockTypeRaw) - blk.setLast(true) - dst = blk.appendTo(dst) - } - return dst - } - e.init.Do(e.initialize) - enc := <-e.encoders - defer func() { - // Release encoder reference to last block. - // If a non-single block is needed the encoder will reset again. - e.encoders <- enc - }() - // Use single segments when above minimum window and below 1MB. - single := len(src) < 1<<20 && len(src) > MinWindowSize - if e.o.single != nil { - single = *e.o.single - } - fh := frameHeader{ - ContentSize: uint64(len(src)), - WindowSize: uint32(enc.WindowSize(int64(len(src)))), - SingleSegment: single, - Checksum: e.o.crc, - DictID: e.o.dict.ID(), - } - - // If less than 1MB, allocate a buffer up front. - if len(dst) == 0 && cap(dst) == 0 && len(src) < 1<<20 && !e.o.lowMem { - dst = make([]byte, 0, len(src)) - } - dst, err := fh.appendTo(dst) - if err != nil { - panic(err) - } - - // If we can do everything in one block, prefer that. - if len(src) <= maxCompressedBlockSize { - enc.Reset(e.o.dict, true) - // Slightly faster with no history and everything in one block. - if e.o.crc { - _, _ = enc.CRC().Write(src) - } - blk := enc.Block() - blk.last = true - if e.o.dict == nil { - enc.EncodeNoHist(blk, src) - } else { - enc.Encode(blk, src) - } - - // If we got the exact same number of literals as input, - // assume the literals cannot be compressed. - err := errIncompressible - oldout := blk.output - if len(blk.literals) != len(src) || len(src) != e.o.blockSize { - // Output directly to dst - blk.output = dst - err = blk.encode(src, e.o.noEntropy, !e.o.allLitEntropy) - } - - switch err { - case errIncompressible: - if debugEncoder { - println("Storing incompressible block as raw") - } - dst = blk.encodeRawTo(dst, src) - case nil: - dst = blk.output - default: - panic(err) - } - blk.output = oldout - } else { - enc.Reset(e.o.dict, false) - blk := enc.Block() - for len(src) > 0 { - todo := src - if len(todo) > e.o.blockSize { - todo = todo[:e.o.blockSize] - } - src = src[len(todo):] - if e.o.crc { - _, _ = enc.CRC().Write(todo) - } - blk.pushOffsets() - enc.Encode(blk, todo) - if len(src) == 0 { - blk.last = true - } - err := errIncompressible - // If we got the exact same number of literals as input, - // assume the literals cannot be compressed. - if len(blk.literals) != len(todo) || len(todo) != e.o.blockSize { - err = blk.encode(todo, e.o.noEntropy, !e.o.allLitEntropy) - } - - switch err { - case errIncompressible: - if debugEncoder { - println("Storing incompressible block as raw") - } - dst = blk.encodeRawTo(dst, todo) - blk.popOffsets() - case nil: - dst = append(dst, blk.output...) - default: - panic(err) - } - blk.reset(nil) - } - } - if e.o.crc { - dst = enc.AppendCRC(dst) - } - // Add padding with content from crypto/rand.Reader - if e.o.pad > 0 { - add := calcSkippableFrame(int64(len(dst)), int64(e.o.pad)) - dst, err = skippableFrame(dst, add, rand.Reader) - if err != nil { - panic(err) - } - } - return dst -} diff --git a/src/vendor/github.com/klauspost/compress/zstd/encoder_options.go b/src/vendor/github.com/klauspost/compress/zstd/encoder_options.go deleted file mode 100644 index 7d29e1d68..000000000 --- a/src/vendor/github.com/klauspost/compress/zstd/encoder_options.go +++ /dev/null @@ -1,312 +0,0 @@ -package zstd - -import ( - "errors" - "fmt" - "runtime" - "strings" -) - -// EOption is an option for creating a encoder. -type EOption func(*encoderOptions) error - -// options retains accumulated state of multiple options. -type encoderOptions struct { - concurrent int - level EncoderLevel - single *bool - pad int - blockSize int - windowSize int - crc bool - fullZero bool - noEntropy bool - allLitEntropy bool - customWindow bool - customALEntropy bool - lowMem bool - dict *dict -} - -func (o *encoderOptions) setDefault() { - *o = encoderOptions{ - concurrent: runtime.GOMAXPROCS(0), - crc: true, - single: nil, - blockSize: 1 << 16, - windowSize: 8 << 20, - level: SpeedDefault, - allLitEntropy: true, - lowMem: false, - } -} - -// encoder returns an encoder with the selected options. -func (o encoderOptions) encoder() encoder { - switch o.level { - case SpeedFastest: - if o.dict != nil { - return &fastEncoderDict{fastEncoder: fastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), lowMem: o.lowMem}}} - } - return &fastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), lowMem: o.lowMem}} - - case SpeedDefault: - if o.dict != nil { - return &doubleFastEncoderDict{fastEncoderDict: fastEncoderDict{fastEncoder: fastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), lowMem: o.lowMem}}}} - } - return &doubleFastEncoder{fastEncoder: fastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), lowMem: o.lowMem}}} - case SpeedBetterCompression: - if o.dict != nil { - return &betterFastEncoderDict{betterFastEncoder: betterFastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), lowMem: o.lowMem}}} - } - return &betterFastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), lowMem: o.lowMem}} - case SpeedBestCompression: - return &bestFastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), lowMem: o.lowMem}} - } - panic("unknown compression level") -} - -// WithEncoderCRC will add CRC value to output. -// Output will be 4 bytes larger. -func WithEncoderCRC(b bool) EOption { - return func(o *encoderOptions) error { o.crc = b; return nil } -} - -// WithEncoderConcurrency will set the concurrency, -// meaning the maximum number of encoders to run concurrently. -// The value supplied must be at least 1. -// By default this will be set to GOMAXPROCS. -func WithEncoderConcurrency(n int) EOption { - return func(o *encoderOptions) error { - if n <= 0 { - return fmt.Errorf("concurrency must be at least 1") - } - o.concurrent = n - return nil - } -} - -// WithWindowSize will set the maximum allowed back-reference distance. -// The value must be a power of two between MinWindowSize and MaxWindowSize. -// A larger value will enable better compression but allocate more memory and, -// for above-default values, take considerably longer. -// The default value is determined by the compression level. -func WithWindowSize(n int) EOption { - return func(o *encoderOptions) error { - switch { - case n < MinWindowSize: - return fmt.Errorf("window size must be at least %d", MinWindowSize) - case n > MaxWindowSize: - return fmt.Errorf("window size must be at most %d", MaxWindowSize) - case (n & (n - 1)) != 0: - return errors.New("window size must be a power of 2") - } - - o.windowSize = n - o.customWindow = true - if o.blockSize > o.windowSize { - o.blockSize = o.windowSize - } - return nil - } -} - -// WithEncoderPadding will add padding to all output so the size will be a multiple of n. -// This can be used to obfuscate the exact output size or make blocks of a certain size. -// The contents will be a skippable frame, so it will be invisible by the decoder. -// n must be > 0 and <= 1GB, 1<<30 bytes. -// The padded area will be filled with data from crypto/rand.Reader. -// If `EncodeAll` is used with data already in the destination, the total size will be multiple of this. -func WithEncoderPadding(n int) EOption { - return func(o *encoderOptions) error { - if n <= 0 { - return fmt.Errorf("padding must be at least 1") - } - // No need to waste our time. - if n == 1 { - o.pad = 0 - } - if n > 1<<30 { - return fmt.Errorf("padding must less than 1GB (1<<30 bytes) ") - } - o.pad = n - return nil - } -} - -// EncoderLevel predefines encoder compression levels. -// Only use the constants made available, since the actual mapping -// of these values are very likely to change and your compression could change -// unpredictably when upgrading the library. -type EncoderLevel int - -const ( - speedNotSet EncoderLevel = iota - - // SpeedFastest will choose the fastest reasonable compression. - // This is roughly equivalent to the fastest Zstandard mode. - SpeedFastest - - // SpeedDefault is the default "pretty fast" compression option. - // This is roughly equivalent to the default Zstandard mode (level 3). - SpeedDefault - - // SpeedBetterCompression will yield better compression than the default. - // Currently it is about zstd level 7-8 with ~ 2x-3x the default CPU usage. - // By using this, notice that CPU usage may go up in the future. - SpeedBetterCompression - - // SpeedBestCompression will choose the best available compression option. - // This will offer the best compression no matter the CPU cost. - SpeedBestCompression - - // speedLast should be kept as the last actual compression option. - // The is not for external usage, but is used to keep track of the valid options. - speedLast -) - -// EncoderLevelFromString will convert a string representation of an encoding level back -// to a compression level. The compare is not case sensitive. -// If the string wasn't recognized, (false, SpeedDefault) will be returned. -func EncoderLevelFromString(s string) (bool, EncoderLevel) { - for l := speedNotSet + 1; l < speedLast; l++ { - if strings.EqualFold(s, l.String()) { - return true, l - } - } - return false, SpeedDefault -} - -// EncoderLevelFromZstd will return an encoder level that closest matches the compression -// ratio of a specific zstd compression level. -// Many input values will provide the same compression level. -func EncoderLevelFromZstd(level int) EncoderLevel { - switch { - case level < 3: - return SpeedFastest - case level >= 3 && level < 6: - return SpeedDefault - case level >= 6 && level < 10: - return SpeedBetterCompression - case level >= 10: - return SpeedBestCompression - } - return SpeedDefault -} - -// String provides a string representation of the compression level. -func (e EncoderLevel) String() string { - switch e { - case SpeedFastest: - return "fastest" - case SpeedDefault: - return "default" - case SpeedBetterCompression: - return "better" - case SpeedBestCompression: - return "best" - default: - return "invalid" - } -} - -// WithEncoderLevel specifies a predefined compression level. -func WithEncoderLevel(l EncoderLevel) EOption { - return func(o *encoderOptions) error { - switch { - case l <= speedNotSet || l >= speedLast: - return fmt.Errorf("unknown encoder level") - } - o.level = l - if !o.customWindow { - switch o.level { - case SpeedFastest: - o.windowSize = 4 << 20 - case SpeedDefault: - o.windowSize = 8 << 20 - case SpeedBetterCompression: - o.windowSize = 16 << 20 - case SpeedBestCompression: - o.windowSize = 32 << 20 - } - } - if !o.customALEntropy { - o.allLitEntropy = l > SpeedFastest - } - - return nil - } -} - -// WithZeroFrames will encode 0 length input as full frames. -// This can be needed for compatibility with zstandard usage, -// but is not needed for this package. -func WithZeroFrames(b bool) EOption { - return func(o *encoderOptions) error { - o.fullZero = b - return nil - } -} - -// WithAllLitEntropyCompression will apply entropy compression if no matches are found. -// Disabling this will skip incompressible data faster, but in cases with no matches but -// skewed character distribution compression is lost. -// Default value depends on the compression level selected. -func WithAllLitEntropyCompression(b bool) EOption { - return func(o *encoderOptions) error { - o.customALEntropy = true - o.allLitEntropy = b - return nil - } -} - -// WithNoEntropyCompression will always skip entropy compression of literals. -// This can be useful if content has matches, but unlikely to benefit from entropy -// compression. Usually the slight speed improvement is not worth enabling this. -func WithNoEntropyCompression(b bool) EOption { - return func(o *encoderOptions) error { - o.noEntropy = b - return nil - } -} - -// WithSingleSegment will set the "single segment" flag when EncodeAll is used. -// If this flag is set, data must be regenerated within a single continuous memory segment. -// In this case, Window_Descriptor byte is skipped, but Frame_Content_Size is necessarily present. -// As a consequence, the decoder must allocate a memory segment of size equal or larger than size of your content. -// In order to preserve the decoder from unreasonable memory requirements, -// a decoder is allowed to reject a compressed frame which requests a memory size beyond decoder's authorized range. -// For broader compatibility, decoders are recommended to support memory sizes of at least 8 MB. -// This is only a recommendation, each decoder is free to support higher or lower limits, depending on local limitations. -// If this is not specified, block encodes will automatically choose this based on the input size. -// This setting has no effect on streamed encodes. -func WithSingleSegment(b bool) EOption { - return func(o *encoderOptions) error { - o.single = &b - return nil - } -} - -// WithLowerEncoderMem will trade in some memory cases trade less memory usage for -// slower encoding speed. -// This will not change the window size which is the primary function for reducing -// memory usage. See WithWindowSize. -func WithLowerEncoderMem(b bool) EOption { - return func(o *encoderOptions) error { - o.lowMem = b - return nil - } -} - -// WithEncoderDict allows to register a dictionary that will be used for the encode. -// The encoder *may* choose to use no dictionary instead for certain payloads. -func WithEncoderDict(dict []byte) EOption { - return func(o *encoderOptions) error { - d, err := loadDict(dict) - if err != nil { - return err - } - o.dict = d - return nil - } -} diff --git a/src/vendor/github.com/klauspost/compress/zstd/framedec.go b/src/vendor/github.com/klauspost/compress/zstd/framedec.go deleted file mode 100644 index 989c79f8c..000000000 --- a/src/vendor/github.com/klauspost/compress/zstd/framedec.go +++ /dev/null @@ -1,521 +0,0 @@ -// Copyright 2019+ Klaus Post. All rights reserved. -// License information can be found in the LICENSE file. -// Based on work by Yann Collet, released under BSD License. - -package zstd - -import ( - "bytes" - "encoding/hex" - "errors" - "hash" - "io" - "sync" - - "github.com/klauspost/compress/zstd/internal/xxhash" -) - -type frameDec struct { - o decoderOptions - crc hash.Hash64 - offset int64 - - WindowSize uint64 - - // In order queue of blocks being decoded. - decoding chan *blockDec - - // Frame history passed between blocks - history history - - rawInput byteBuffer - - // Byte buffer that can be reused for small input blocks. - bBuf byteBuf - - FrameContentSize uint64 - frameDone sync.WaitGroup - - DictionaryID *uint32 - HasCheckSum bool - SingleSegment bool - - // asyncRunning indicates whether the async routine processes input on 'decoding'. - asyncRunningMu sync.Mutex - asyncRunning bool -} - -const ( - // MinWindowSize is the minimum Window Size, which is 1 KB. - MinWindowSize = 1 << 10 - - // MaxWindowSize is the maximum encoder window size - // and the default decoder maximum window size. - MaxWindowSize = 1 << 29 -) - -var ( - frameMagic = []byte{0x28, 0xb5, 0x2f, 0xfd} - skippableFrameMagic = []byte{0x2a, 0x4d, 0x18} -) - -func newFrameDec(o decoderOptions) *frameDec { - if o.maxWindowSize > o.maxDecodedSize { - o.maxWindowSize = o.maxDecodedSize - } - d := frameDec{ - o: o, - } - return &d -} - -// reset will read the frame header and prepare for block decoding. -// If nothing can be read from the input, io.EOF will be returned. -// Any other error indicated that the stream contained data, but -// there was a problem. -func (d *frameDec) reset(br byteBuffer) error { - d.HasCheckSum = false - d.WindowSize = 0 - var signature [4]byte - for { - var err error - // Check if we can read more... - b, err := br.readSmall(1) - switch err { - case io.EOF, io.ErrUnexpectedEOF: - return io.EOF - default: - return err - case nil: - signature[0] = b[0] - } - // Read the rest, don't allow io.ErrUnexpectedEOF - b, err = br.readSmall(3) - switch err { - case io.EOF: - return io.EOF - default: - return err - case nil: - copy(signature[1:], b) - } - - if !bytes.Equal(signature[1:4], skippableFrameMagic) || signature[0]&0xf0 != 0x50 { - if debugDecoder { - println("Not skippable", hex.EncodeToString(signature[:]), hex.EncodeToString(skippableFrameMagic)) - } - // Break if not skippable frame. - break - } - // Read size to skip - b, err = br.readSmall(4) - if err != nil { - if debugDecoder { - println("Reading Frame Size", err) - } - return err - } - n := uint32(b[0]) | (uint32(b[1]) << 8) | (uint32(b[2]) << 16) | (uint32(b[3]) << 24) - println("Skipping frame with", n, "bytes.") - err = br.skipN(int(n)) - if err != nil { - if debugDecoder { - println("Reading discarded frame", err) - } - return err - } - } - if !bytes.Equal(signature[:], frameMagic) { - if debugDecoder { - println("Got magic numbers: ", signature, "want:", frameMagic) - } - return ErrMagicMismatch - } - - // Read Frame_Header_Descriptor - fhd, err := br.readByte() - if err != nil { - if debugDecoder { - println("Reading Frame_Header_Descriptor", err) - } - return err - } - d.SingleSegment = fhd&(1<<5) != 0 - - if fhd&(1<<3) != 0 { - return errors.New("reserved bit set on frame header") - } - - // Read Window_Descriptor - // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#window_descriptor - d.WindowSize = 0 - if !d.SingleSegment { - wd, err := br.readByte() - if err != nil { - if debugDecoder { - println("Reading Window_Descriptor", err) - } - return err - } - printf("raw: %x, mantissa: %d, exponent: %d\n", wd, wd&7, wd>>3) - windowLog := 10 + (wd >> 3) - windowBase := uint64(1) << windowLog - windowAdd := (windowBase / 8) * uint64(wd&0x7) - d.WindowSize = windowBase + windowAdd - } - - // Read Dictionary_ID - // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#dictionary_id - d.DictionaryID = nil - if size := fhd & 3; size != 0 { - if size == 3 { - size = 4 - } - - b, err := br.readSmall(int(size)) - if err != nil { - println("Reading Dictionary_ID", err) - return err - } - var id uint32 - switch size { - case 1: - id = uint32(b[0]) - case 2: - id = uint32(b[0]) | (uint32(b[1]) << 8) - case 4: - id = uint32(b[0]) | (uint32(b[1]) << 8) | (uint32(b[2]) << 16) | (uint32(b[3]) << 24) - } - if debugDecoder { - println("Dict size", size, "ID:", id) - } - if id > 0 { - // ID 0 means "sorry, no dictionary anyway". - // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#dictionary-format - d.DictionaryID = &id - } - } - - // Read Frame_Content_Size - // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#frame_content_size - var fcsSize int - v := fhd >> 6 - switch v { - case 0: - if d.SingleSegment { - fcsSize = 1 - } - default: - fcsSize = 1 << v - } - d.FrameContentSize = 0 - if fcsSize > 0 { - b, err := br.readSmall(fcsSize) - if err != nil { - println("Reading Frame content", err) - return err - } - switch fcsSize { - case 1: - d.FrameContentSize = uint64(b[0]) - case 2: - // When FCS_Field_Size is 2, the offset of 256 is added. - d.FrameContentSize = uint64(b[0]) | (uint64(b[1]) << 8) + 256 - case 4: - d.FrameContentSize = uint64(b[0]) | (uint64(b[1]) << 8) | (uint64(b[2]) << 16) | (uint64(b[3]) << 24) - case 8: - d1 := uint32(b[0]) | (uint32(b[1]) << 8) | (uint32(b[2]) << 16) | (uint32(b[3]) << 24) - d2 := uint32(b[4]) | (uint32(b[5]) << 8) | (uint32(b[6]) << 16) | (uint32(b[7]) << 24) - d.FrameContentSize = uint64(d1) | (uint64(d2) << 32) - } - if debugDecoder { - println("field size bits:", v, "fcsSize:", fcsSize, "FrameContentSize:", d.FrameContentSize, hex.EncodeToString(b[:fcsSize]), "singleseg:", d.SingleSegment, "window:", d.WindowSize) - } - } - // Move this to shared. - d.HasCheckSum = fhd&(1<<2) != 0 - if d.HasCheckSum { - if d.crc == nil { - d.crc = xxhash.New() - } - d.crc.Reset() - } - - if d.WindowSize == 0 && d.SingleSegment { - // We may not need window in this case. - d.WindowSize = d.FrameContentSize - if d.WindowSize < MinWindowSize { - d.WindowSize = MinWindowSize - } - } - - if d.WindowSize > uint64(d.o.maxWindowSize) { - if debugDecoder { - printf("window size %d > max %d\n", d.WindowSize, d.o.maxWindowSize) - } - return ErrWindowSizeExceeded - } - // The minimum Window_Size is 1 KB. - if d.WindowSize < MinWindowSize { - if debugDecoder { - println("got window size: ", d.WindowSize) - } - return ErrWindowSizeTooSmall - } - d.history.windowSize = int(d.WindowSize) - if d.o.lowMem && d.history.windowSize < maxBlockSize { - d.history.maxSize = d.history.windowSize * 2 - } else { - d.history.maxSize = d.history.windowSize + maxBlockSize - } - // history contains input - maybe we do something - d.rawInput = br - return nil -} - -// next will start decoding the next block from stream. -func (d *frameDec) next(block *blockDec) error { - if debugDecoder { - printf("decoding new block %p:%p", block, block.data) - } - err := block.reset(d.rawInput, d.WindowSize) - if err != nil { - println("block error:", err) - // Signal the frame decoder we have a problem. - d.sendErr(block, err) - return err - } - block.input <- struct{}{} - if debugDecoder { - println("next block:", block) - } - d.asyncRunningMu.Lock() - defer d.asyncRunningMu.Unlock() - if !d.asyncRunning { - return nil - } - if block.Last { - // We indicate the frame is done by sending io.EOF - d.decoding <- block - return io.EOF - } - d.decoding <- block - return nil -} - -// sendEOF will queue an error block on the frame. -// This will cause the frame decoder to return when it encounters the block. -// Returns true if the decoder was added. -func (d *frameDec) sendErr(block *blockDec, err error) bool { - d.asyncRunningMu.Lock() - defer d.asyncRunningMu.Unlock() - if !d.asyncRunning { - return false - } - - println("sending error", err.Error()) - block.sendErr(err) - d.decoding <- block - return true -} - -// checkCRC will check the checksum if the frame has one. -// Will return ErrCRCMismatch if crc check failed, otherwise nil. -func (d *frameDec) checkCRC() error { - if !d.HasCheckSum { - return nil - } - var tmp [4]byte - got := d.crc.Sum64() - // Flip to match file order. - tmp[0] = byte(got >> 0) - tmp[1] = byte(got >> 8) - tmp[2] = byte(got >> 16) - tmp[3] = byte(got >> 24) - - // We can overwrite upper tmp now - want, err := d.rawInput.readSmall(4) - if err != nil { - println("CRC missing?", err) - return err - } - - if !bytes.Equal(tmp[:], want) { - if debugDecoder { - println("CRC Check Failed:", tmp[:], "!=", want) - } - return ErrCRCMismatch - } - if debugDecoder { - println("CRC ok", tmp[:]) - } - return nil -} - -func (d *frameDec) initAsync() { - if !d.o.lowMem && !d.SingleSegment { - // set max extra size history to 2MB. - d.history.maxSize = d.history.windowSize + maxBlockSize - } - // re-alloc if more than one extra block size. - if d.o.lowMem && cap(d.history.b) > d.history.maxSize+maxBlockSize { - d.history.b = make([]byte, 0, d.history.maxSize) - } - if cap(d.history.b) < d.history.maxSize { - d.history.b = make([]byte, 0, d.history.maxSize) - } - if cap(d.decoding) < d.o.concurrent { - d.decoding = make(chan *blockDec, d.o.concurrent) - } - if debugDecoder { - h := d.history - printf("history init. len: %d, cap: %d", len(h.b), cap(h.b)) - } - d.asyncRunningMu.Lock() - d.asyncRunning = true - d.asyncRunningMu.Unlock() -} - -// startDecoder will start decoding blocks and write them to the writer. -// The decoder will stop as soon as an error occurs or at end of frame. -// When the frame has finished decoding the *bufio.Reader -// containing the remaining input will be sent on frameDec.frameDone. -func (d *frameDec) startDecoder(output chan decodeOutput) { - written := int64(0) - - defer func() { - d.asyncRunningMu.Lock() - d.asyncRunning = false - d.asyncRunningMu.Unlock() - - // Drain the currently decoding. - d.history.error = true - flushdone: - for { - select { - case b := <-d.decoding: - b.history <- &d.history - output <- <-b.result - default: - break flushdone - } - } - println("frame decoder done, signalling done") - d.frameDone.Done() - }() - // Get decoder for first block. - block := <-d.decoding - block.history <- &d.history - for { - var next *blockDec - // Get result - r := <-block.result - if r.err != nil { - println("Result contained error", r.err) - output <- r - return - } - if debugDecoder { - println("got result, from ", d.offset, "to", d.offset+int64(len(r.b))) - d.offset += int64(len(r.b)) - } - if !block.Last { - // Send history to next block - select { - case next = <-d.decoding: - if debugDecoder { - println("Sending ", len(d.history.b), "bytes as history") - } - next.history <- &d.history - default: - // Wait until we have sent the block, so - // other decoders can potentially get the decoder. - next = nil - } - } - - // Add checksum, async to decoding. - if d.HasCheckSum { - n, err := d.crc.Write(r.b) - if err != nil { - r.err = err - if n != len(r.b) { - r.err = io.ErrShortWrite - } - output <- r - return - } - } - written += int64(len(r.b)) - if d.SingleSegment && uint64(written) > d.FrameContentSize { - println("runDecoder: single segment and", uint64(written), ">", d.FrameContentSize) - r.err = ErrFrameSizeExceeded - output <- r - return - } - if block.Last { - r.err = d.checkCRC() - output <- r - return - } - output <- r - if next == nil { - // There was no decoder available, we wait for one now that we have sent to the writer. - if debugDecoder { - println("Sending ", len(d.history.b), " bytes as history") - } - next = <-d.decoding - next.history <- &d.history - } - block = next - } -} - -// runDecoder will create a sync decoder that will decode a block of data. -func (d *frameDec) runDecoder(dst []byte, dec *blockDec) ([]byte, error) { - saved := d.history.b - - // We use the history for output to avoid copying it. - d.history.b = dst - // Store input length, so we only check new data. - crcStart := len(dst) - var err error - for { - err = dec.reset(d.rawInput, d.WindowSize) - if err != nil { - break - } - if debugDecoder { - println("next block:", dec) - } - err = dec.decodeBuf(&d.history) - if err != nil || dec.Last { - break - } - if uint64(len(d.history.b)) > d.o.maxDecodedSize { - err = ErrDecoderSizeExceeded - break - } - if d.SingleSegment && uint64(len(d.history.b)) > d.o.maxDecodedSize { - println("runDecoder: single segment and", uint64(len(d.history.b)), ">", d.o.maxDecodedSize) - err = ErrFrameSizeExceeded - break - } - } - dst = d.history.b - if err == nil { - if d.HasCheckSum { - var n int - n, err = d.crc.Write(dst[crcStart:]) - if err == nil { - if n != len(dst)-crcStart { - err = io.ErrShortWrite - } else { - err = d.checkCRC() - } - } - } - } - d.history.b = saved - return dst, err -} diff --git a/src/vendor/github.com/klauspost/compress/zstd/frameenc.go b/src/vendor/github.com/klauspost/compress/zstd/frameenc.go deleted file mode 100644 index 4ef7f5a3e..000000000 --- a/src/vendor/github.com/klauspost/compress/zstd/frameenc.go +++ /dev/null @@ -1,137 +0,0 @@ -// Copyright 2019+ Klaus Post. All rights reserved. -// License information can be found in the LICENSE file. -// Based on work by Yann Collet, released under BSD License. - -package zstd - -import ( - "encoding/binary" - "fmt" - "io" - "math" - "math/bits" -) - -type frameHeader struct { - ContentSize uint64 - WindowSize uint32 - SingleSegment bool - Checksum bool - DictID uint32 -} - -const maxHeaderSize = 14 - -func (f frameHeader) appendTo(dst []byte) ([]byte, error) { - dst = append(dst, frameMagic...) - var fhd uint8 - if f.Checksum { - fhd |= 1 << 2 - } - if f.SingleSegment { - fhd |= 1 << 5 - } - - var dictIDContent []byte - if f.DictID > 0 { - var tmp [4]byte - if f.DictID < 256 { - fhd |= 1 - tmp[0] = uint8(f.DictID) - dictIDContent = tmp[:1] - } else if f.DictID < 1<<16 { - fhd |= 2 - binary.LittleEndian.PutUint16(tmp[:2], uint16(f.DictID)) - dictIDContent = tmp[:2] - } else { - fhd |= 3 - binary.LittleEndian.PutUint32(tmp[:4], f.DictID) - dictIDContent = tmp[:4] - } - } - var fcs uint8 - if f.ContentSize >= 256 { - fcs++ - } - if f.ContentSize >= 65536+256 { - fcs++ - } - if f.ContentSize >= 0xffffffff { - fcs++ - } - - fhd |= fcs << 6 - - dst = append(dst, fhd) - if !f.SingleSegment { - const winLogMin = 10 - windowLog := (bits.Len32(f.WindowSize-1) - winLogMin) << 3 - dst = append(dst, uint8(windowLog)) - } - if f.DictID > 0 { - dst = append(dst, dictIDContent...) - } - switch fcs { - case 0: - if f.SingleSegment { - dst = append(dst, uint8(f.ContentSize)) - } - // Unless SingleSegment is set, framessizes < 256 are nto stored. - case 1: - f.ContentSize -= 256 - dst = append(dst, uint8(f.ContentSize), uint8(f.ContentSize>>8)) - case 2: - dst = append(dst, uint8(f.ContentSize), uint8(f.ContentSize>>8), uint8(f.ContentSize>>16), uint8(f.ContentSize>>24)) - case 3: - dst = append(dst, uint8(f.ContentSize), uint8(f.ContentSize>>8), uint8(f.ContentSize>>16), uint8(f.ContentSize>>24), - uint8(f.ContentSize>>32), uint8(f.ContentSize>>40), uint8(f.ContentSize>>48), uint8(f.ContentSize>>56)) - default: - panic("invalid fcs") - } - return dst, nil -} - -const skippableFrameHeader = 4 + 4 - -// calcSkippableFrame will return a total size to be added for written -// to be divisible by multiple. -// The value will always be > skippableFrameHeader. -// The function will panic if written < 0 or wantMultiple <= 0. -func calcSkippableFrame(written, wantMultiple int64) int { - if wantMultiple <= 0 { - panic("wantMultiple <= 0") - } - if written < 0 { - panic("written < 0") - } - leftOver := written % wantMultiple - if leftOver == 0 { - return 0 - } - toAdd := wantMultiple - leftOver - for toAdd < skippableFrameHeader { - toAdd += wantMultiple - } - return int(toAdd) -} - -// skippableFrame will add a skippable frame with a total size of bytes. -// total should be >= skippableFrameHeader and < math.MaxUint32. -func skippableFrame(dst []byte, total int, r io.Reader) ([]byte, error) { - if total == 0 { - return dst, nil - } - if total < skippableFrameHeader { - return dst, fmt.Errorf("requested skippable frame (%d) < 8", total) - } - if int64(total) > math.MaxUint32 { - return dst, fmt.Errorf("requested skippable frame (%d) > max uint32", total) - } - dst = append(dst, 0x50, 0x2a, 0x4d, 0x18) - f := uint32(total - skippableFrameHeader) - dst = append(dst, uint8(f), uint8(f>>8), uint8(f>>16), uint8(f>>24)) - start := len(dst) - dst = append(dst, make([]byte, f)...) - _, err := io.ReadFull(r, dst[start:]) - return dst, err -} diff --git a/src/vendor/github.com/klauspost/compress/zstd/fse_decoder.go b/src/vendor/github.com/klauspost/compress/zstd/fse_decoder.go deleted file mode 100644 index e6d3d49b3..000000000 --- a/src/vendor/github.com/klauspost/compress/zstd/fse_decoder.go +++ /dev/null @@ -1,385 +0,0 @@ -// Copyright 2019+ Klaus Post. All rights reserved. -// License information can be found in the LICENSE file. -// Based on work by Yann Collet, released under BSD License. - -package zstd - -import ( - "errors" - "fmt" -) - -const ( - tablelogAbsoluteMax = 9 -) - -const ( - /*!MEMORY_USAGE : - * Memory usage formula : N->2^N Bytes (examples : 10 -> 1KB; 12 -> 4KB ; 16 -> 64KB; 20 -> 1MB; etc.) - * Increasing memory usage improves compression ratio - * Reduced memory usage can improve speed, due to cache effect - * Recommended max value is 14, for 16KB, which nicely fits into Intel x86 L1 cache */ - maxMemoryUsage = tablelogAbsoluteMax + 2 - - maxTableLog = maxMemoryUsage - 2 - maxTablesize = 1 << maxTableLog - maxTableMask = (1 << maxTableLog) - 1 - minTablelog = 5 - maxSymbolValue = 255 -) - -// fseDecoder provides temporary storage for compression and decompression. -type fseDecoder struct { - dt [maxTablesize]decSymbol // Decompression table. - symbolLen uint16 // Length of active part of the symbol table. - actualTableLog uint8 // Selected tablelog. - maxBits uint8 // Maximum number of additional bits - - // used for table creation to avoid allocations. - stateTable [256]uint16 - norm [maxSymbolValue + 1]int16 - preDefined bool -} - -// tableStep returns the next table index. -func tableStep(tableSize uint32) uint32 { - return (tableSize >> 1) + (tableSize >> 3) + 3 -} - -// readNCount will read the symbol distribution so decoding tables can be constructed. -func (s *fseDecoder) readNCount(b *byteReader, maxSymbol uint16) error { - var ( - charnum uint16 - previous0 bool - ) - if b.remain() < 4 { - return errors.New("input too small") - } - bitStream := b.Uint32NC() - nbBits := uint((bitStream & 0xF) + minTablelog) // extract tableLog - if nbBits > tablelogAbsoluteMax { - println("Invalid tablelog:", nbBits) - return errors.New("tableLog too large") - } - bitStream >>= 4 - bitCount := uint(4) - - s.actualTableLog = uint8(nbBits) - remaining := int32((1 << nbBits) + 1) - threshold := int32(1 << nbBits) - gotTotal := int32(0) - nbBits++ - - for remaining > 1 && charnum <= maxSymbol { - if previous0 { - //println("prev0") - n0 := charnum - for (bitStream & 0xFFFF) == 0xFFFF { - //println("24 x 0") - n0 += 24 - if r := b.remain(); r > 5 { - b.advance(2) - // The check above should make sure we can read 32 bits - bitStream = b.Uint32NC() >> bitCount - } else { - // end of bit stream - bitStream >>= 16 - bitCount += 16 - } - } - //printf("bitstream: %d, 0b%b", bitStream&3, bitStream) - for (bitStream & 3) == 3 { - n0 += 3 - bitStream >>= 2 - bitCount += 2 - } - n0 += uint16(bitStream & 3) - bitCount += 2 - - if n0 > maxSymbolValue { - return errors.New("maxSymbolValue too small") - } - //println("inserting ", n0-charnum, "zeroes from idx", charnum, "ending before", n0) - for charnum < n0 { - s.norm[uint8(charnum)] = 0 - charnum++ - } - - if r := b.remain(); r >= 7 || r-int(bitCount>>3) >= 4 { - b.advance(bitCount >> 3) - bitCount &= 7 - // The check above should make sure we can read 32 bits - bitStream = b.Uint32NC() >> bitCount - } else { - bitStream >>= 2 - } - } - - max := (2*threshold - 1) - remaining - var count int32 - - if int32(bitStream)&(threshold-1) < max { - count = int32(bitStream) & (threshold - 1) - if debugAsserts && nbBits < 1 { - panic("nbBits underflow") - } - bitCount += nbBits - 1 - } else { - count = int32(bitStream) & (2*threshold - 1) - if count >= threshold { - count -= max - } - bitCount += nbBits - } - - // extra accuracy - count-- - if count < 0 { - // -1 means +1 - remaining += count - gotTotal -= count - } else { - remaining -= count - gotTotal += count - } - s.norm[charnum&0xff] = int16(count) - charnum++ - previous0 = count == 0 - for remaining < threshold { - nbBits-- - threshold >>= 1 - } - - if r := b.remain(); r >= 7 || r-int(bitCount>>3) >= 4 { - b.advance(bitCount >> 3) - bitCount &= 7 - // The check above should make sure we can read 32 bits - bitStream = b.Uint32NC() >> (bitCount & 31) - } else { - bitCount -= (uint)(8 * (len(b.b) - 4 - b.off)) - b.off = len(b.b) - 4 - bitStream = b.Uint32() >> (bitCount & 31) - } - } - s.symbolLen = charnum - if s.symbolLen <= 1 { - return fmt.Errorf("symbolLen (%d) too small", s.symbolLen) - } - if s.symbolLen > maxSymbolValue+1 { - return fmt.Errorf("symbolLen (%d) too big", s.symbolLen) - } - if remaining != 1 { - return fmt.Errorf("corruption detected (remaining %d != 1)", remaining) - } - if bitCount > 32 { - return fmt.Errorf("corruption detected (bitCount %d > 32)", bitCount) - } - if gotTotal != 1<> 3) - // println(s.norm[:s.symbolLen], s.symbolLen) - return s.buildDtable() -} - -// decSymbol contains information about a state entry, -// Including the state offset base, the output symbol and -// the number of bits to read for the low part of the destination state. -// Using a composite uint64 is faster than a struct with separate members. -type decSymbol uint64 - -func newDecSymbol(nbits, addBits uint8, newState uint16, baseline uint32) decSymbol { - return decSymbol(nbits) | (decSymbol(addBits) << 8) | (decSymbol(newState) << 16) | (decSymbol(baseline) << 32) -} - -func (d decSymbol) nbBits() uint8 { - return uint8(d) -} - -func (d decSymbol) addBits() uint8 { - return uint8(d >> 8) -} - -func (d decSymbol) newState() uint16 { - return uint16(d >> 16) -} - -func (d decSymbol) baseline() uint32 { - return uint32(d >> 32) -} - -func (d decSymbol) baselineInt() int { - return int(d >> 32) -} - -func (d *decSymbol) set(nbits, addBits uint8, newState uint16, baseline uint32) { - *d = decSymbol(nbits) | (decSymbol(addBits) << 8) | (decSymbol(newState) << 16) | (decSymbol(baseline) << 32) -} - -func (d *decSymbol) setNBits(nBits uint8) { - const mask = 0xffffffffffffff00 - *d = (*d & mask) | decSymbol(nBits) -} - -func (d *decSymbol) setAddBits(addBits uint8) { - const mask = 0xffffffffffff00ff - *d = (*d & mask) | (decSymbol(addBits) << 8) -} - -func (d *decSymbol) setNewState(state uint16) { - const mask = 0xffffffff0000ffff - *d = (*d & mask) | decSymbol(state)<<16 -} - -func (d *decSymbol) setBaseline(baseline uint32) { - const mask = 0xffffffff - *d = (*d & mask) | decSymbol(baseline)<<32 -} - -func (d *decSymbol) setExt(addBits uint8, baseline uint32) { - const mask = 0xffff00ff - *d = (*d & mask) | (decSymbol(addBits) << 8) | (decSymbol(baseline) << 32) -} - -// decSymbolValue returns the transformed decSymbol for the given symbol. -func decSymbolValue(symb uint8, t []baseOffset) (decSymbol, error) { - if int(symb) >= len(t) { - return 0, fmt.Errorf("rle symbol %d >= max %d", symb, len(t)) - } - lu := t[symb] - return newDecSymbol(0, lu.addBits, 0, lu.baseLine), nil -} - -// setRLE will set the decoder til RLE mode. -func (s *fseDecoder) setRLE(symbol decSymbol) { - s.actualTableLog = 0 - s.maxBits = symbol.addBits() - s.dt[0] = symbol -} - -// buildDtable will build the decoding table. -func (s *fseDecoder) buildDtable() error { - tableSize := uint32(1 << s.actualTableLog) - highThreshold := tableSize - 1 - symbolNext := s.stateTable[:256] - - // Init, lay down lowprob symbols - { - for i, v := range s.norm[:s.symbolLen] { - if v == -1 { - s.dt[highThreshold].setAddBits(uint8(i)) - highThreshold-- - symbolNext[i] = 1 - } else { - symbolNext[i] = uint16(v) - } - } - } - // Spread symbols - { - tableMask := tableSize - 1 - step := tableStep(tableSize) - position := uint32(0) - for ss, v := range s.norm[:s.symbolLen] { - for i := 0; i < int(v); i++ { - s.dt[position].setAddBits(uint8(ss)) - position = (position + step) & tableMask - for position > highThreshold { - // lowprob area - position = (position + step) & tableMask - } - } - } - if position != 0 { - // position must reach all cells once, otherwise normalizedCounter is incorrect - return errors.New("corrupted input (position != 0)") - } - } - - // Build Decoding table - { - tableSize := uint16(1 << s.actualTableLog) - for u, v := range s.dt[:tableSize] { - symbol := v.addBits() - nextState := symbolNext[symbol] - symbolNext[symbol] = nextState + 1 - nBits := s.actualTableLog - byte(highBits(uint32(nextState))) - s.dt[u&maxTableMask].setNBits(nBits) - newState := (nextState << nBits) - tableSize - if newState > tableSize { - return fmt.Errorf("newState (%d) outside table size (%d)", newState, tableSize) - } - if newState == uint16(u) && nBits == 0 { - // Seems weird that this is possible with nbits > 0. - return fmt.Errorf("newState (%d) == oldState (%d) and no bits", newState, u) - } - s.dt[u&maxTableMask].setNewState(newState) - } - } - return nil -} - -// transform will transform the decoder table into a table usable for -// decoding without having to apply the transformation while decoding. -// The state will contain the base value and the number of bits to read. -func (s *fseDecoder) transform(t []baseOffset) error { - tableSize := uint16(1 << s.actualTableLog) - s.maxBits = 0 - for i, v := range s.dt[:tableSize] { - add := v.addBits() - if int(add) >= len(t) { - return fmt.Errorf("invalid decoding table entry %d, symbol %d >= max (%d)", i, v.addBits(), len(t)) - } - lu := t[add] - if lu.addBits > s.maxBits { - s.maxBits = lu.addBits - } - v.setExt(lu.addBits, lu.baseLine) - s.dt[i] = v - } - return nil -} - -type fseState struct { - dt []decSymbol - state decSymbol -} - -// Initialize and decodeAsync first state and symbol. -func (s *fseState) init(br *bitReader, tableLog uint8, dt []decSymbol) { - s.dt = dt - br.fill() - s.state = dt[br.getBits(tableLog)] -} - -// next returns the current symbol and sets the next state. -// At least tablelog bits must be available in the bit reader. -func (s *fseState) next(br *bitReader) { - lowBits := uint16(br.getBits(s.state.nbBits())) - s.state = s.dt[s.state.newState()+lowBits] -} - -// finished returns true if all bits have been read from the bitstream -// and the next state would require reading bits from the input. -func (s *fseState) finished(br *bitReader) bool { - return br.finished() && s.state.nbBits() > 0 -} - -// final returns the current state symbol without decoding the next. -func (s *fseState) final() (int, uint8) { - return s.state.baselineInt(), s.state.addBits() -} - -// final returns the current state symbol without decoding the next. -func (s decSymbol) final() (int, uint8) { - return s.baselineInt(), s.addBits() -} - -// nextFast returns the next symbol and sets the next state. -// This can only be used if no symbols are 0 bits. -// At least tablelog bits must be available in the bit reader. -func (s *fseState) nextFast(br *bitReader) (uint32, uint8) { - lowBits := uint16(br.getBitsFast(s.state.nbBits())) - s.state = s.dt[s.state.newState()+lowBits] - return s.state.baseline(), s.state.addBits() -} diff --git a/src/vendor/github.com/klauspost/compress/zstd/fse_encoder.go b/src/vendor/github.com/klauspost/compress/zstd/fse_encoder.go deleted file mode 100644 index b4757ee3f..000000000 --- a/src/vendor/github.com/klauspost/compress/zstd/fse_encoder.go +++ /dev/null @@ -1,725 +0,0 @@ -// Copyright 2019+ Klaus Post. All rights reserved. -// License information can be found in the LICENSE file. -// Based on work by Yann Collet, released under BSD License. - -package zstd - -import ( - "errors" - "fmt" - "math" -) - -const ( - // For encoding we only support up to - maxEncTableLog = 8 - maxEncTablesize = 1 << maxTableLog - maxEncTableMask = (1 << maxTableLog) - 1 - minEncTablelog = 5 - maxEncSymbolValue = maxMatchLengthSymbol -) - -// Scratch provides temporary storage for compression and decompression. -type fseEncoder struct { - symbolLen uint16 // Length of active part of the symbol table. - actualTableLog uint8 // Selected tablelog. - ct cTable // Compression tables. - maxCount int // count of the most probable symbol - zeroBits bool // no bits has prob > 50%. - clearCount bool // clear count - useRLE bool // This encoder is for RLE - preDefined bool // This encoder is predefined. - reUsed bool // Set to know when the encoder has been reused. - rleVal uint8 // RLE Symbol - maxBits uint8 // Maximum output bits after transform. - - // TODO: Technically zstd should be fine with 64 bytes. - count [256]uint32 - norm [256]int16 -} - -// cTable contains tables used for compression. -type cTable struct { - tableSymbol []byte - stateTable []uint16 - symbolTT []symbolTransform -} - -// symbolTransform contains the state transform for a symbol. -type symbolTransform struct { - deltaNbBits uint32 - deltaFindState int16 - outBits uint8 -} - -// String prints values as a human readable string. -func (s symbolTransform) String() string { - return fmt.Sprintf("{deltabits: %08x, findstate:%d outbits:%d}", s.deltaNbBits, s.deltaFindState, s.outBits) -} - -// Histogram allows to populate the histogram and skip that step in the compression, -// It otherwise allows to inspect the histogram when compression is done. -// To indicate that you have populated the histogram call HistogramFinished -// with the value of the highest populated symbol, as well as the number of entries -// in the most populated entry. These are accepted at face value. -// The returned slice will always be length 256. -func (s *fseEncoder) Histogram() []uint32 { - return s.count[:] -} - -// HistogramFinished can be called to indicate that the histogram has been populated. -// maxSymbol is the index of the highest set symbol of the next data segment. -// maxCount is the number of entries in the most populated entry. -// These are accepted at face value. -func (s *fseEncoder) HistogramFinished(maxSymbol uint8, maxCount int) { - s.maxCount = maxCount - s.symbolLen = uint16(maxSymbol) + 1 - s.clearCount = maxCount != 0 -} - -// prepare will prepare and allocate scratch tables used for both compression and decompression. -func (s *fseEncoder) prepare() (*fseEncoder, error) { - if s == nil { - s = &fseEncoder{} - } - s.useRLE = false - if s.clearCount && s.maxCount == 0 { - for i := range s.count { - s.count[i] = 0 - } - s.clearCount = false - } - return s, nil -} - -// allocCtable will allocate tables needed for compression. -// If existing tables a re big enough, they are simply re-used. -func (s *fseEncoder) allocCtable() { - tableSize := 1 << s.actualTableLog - // get tableSymbol that is big enough. - if cap(s.ct.tableSymbol) < tableSize { - s.ct.tableSymbol = make([]byte, tableSize) - } - s.ct.tableSymbol = s.ct.tableSymbol[:tableSize] - - ctSize := tableSize - if cap(s.ct.stateTable) < ctSize { - s.ct.stateTable = make([]uint16, ctSize) - } - s.ct.stateTable = s.ct.stateTable[:ctSize] - - if cap(s.ct.symbolTT) < 256 { - s.ct.symbolTT = make([]symbolTransform, 256) - } - s.ct.symbolTT = s.ct.symbolTT[:256] -} - -// buildCTable will populate the compression table so it is ready to be used. -func (s *fseEncoder) buildCTable() error { - tableSize := uint32(1 << s.actualTableLog) - highThreshold := tableSize - 1 - var cumul [256]int16 - - s.allocCtable() - tableSymbol := s.ct.tableSymbol[:tableSize] - // symbol start positions - { - cumul[0] = 0 - for ui, v := range s.norm[:s.symbolLen-1] { - u := byte(ui) // one less than reference - if v == -1 { - // Low proba symbol - cumul[u+1] = cumul[u] + 1 - tableSymbol[highThreshold] = u - highThreshold-- - } else { - cumul[u+1] = cumul[u] + v - } - } - // Encode last symbol separately to avoid overflowing u - u := int(s.symbolLen - 1) - v := s.norm[s.symbolLen-1] - if v == -1 { - // Low proba symbol - cumul[u+1] = cumul[u] + 1 - tableSymbol[highThreshold] = byte(u) - highThreshold-- - } else { - cumul[u+1] = cumul[u] + v - } - if uint32(cumul[s.symbolLen]) != tableSize { - return fmt.Errorf("internal error: expected cumul[s.symbolLen] (%d) == tableSize (%d)", cumul[s.symbolLen], tableSize) - } - cumul[s.symbolLen] = int16(tableSize) + 1 - } - // Spread symbols - s.zeroBits = false - { - step := tableStep(tableSize) - tableMask := tableSize - 1 - var position uint32 - // if any symbol > largeLimit, we may have 0 bits output. - largeLimit := int16(1 << (s.actualTableLog - 1)) - for ui, v := range s.norm[:s.symbolLen] { - symbol := byte(ui) - if v > largeLimit { - s.zeroBits = true - } - for nbOccurrences := int16(0); nbOccurrences < v; nbOccurrences++ { - tableSymbol[position] = symbol - position = (position + step) & tableMask - for position > highThreshold { - position = (position + step) & tableMask - } /* Low proba area */ - } - } - - // Check if we have gone through all positions - if position != 0 { - return errors.New("position!=0") - } - } - - // Build table - table := s.ct.stateTable - { - tsi := int(tableSize) - for u, v := range tableSymbol { - // TableU16 : sorted by symbol order; gives next state value - table[cumul[v]] = uint16(tsi + u) - cumul[v]++ - } - } - - // Build Symbol Transformation Table - { - total := int16(0) - symbolTT := s.ct.symbolTT[:s.symbolLen] - tableLog := s.actualTableLog - tl := (uint32(tableLog) << 16) - (1 << tableLog) - for i, v := range s.norm[:s.symbolLen] { - switch v { - case 0: - case -1, 1: - symbolTT[i].deltaNbBits = tl - symbolTT[i].deltaFindState = total - 1 - total++ - default: - maxBitsOut := uint32(tableLog) - highBit(uint32(v-1)) - minStatePlus := uint32(v) << maxBitsOut - symbolTT[i].deltaNbBits = (maxBitsOut << 16) - minStatePlus - symbolTT[i].deltaFindState = total - v - total += v - } - } - if total != int16(tableSize) { - return fmt.Errorf("total mismatch %d (got) != %d (want)", total, tableSize) - } - } - return nil -} - -var rtbTable = [...]uint32{0, 473195, 504333, 520860, 550000, 700000, 750000, 830000} - -func (s *fseEncoder) setRLE(val byte) { - s.allocCtable() - s.actualTableLog = 0 - s.ct.stateTable = s.ct.stateTable[:1] - s.ct.symbolTT[val] = symbolTransform{ - deltaFindState: 0, - deltaNbBits: 0, - } - if debugEncoder { - println("setRLE: val", val, "symbolTT", s.ct.symbolTT[val]) - } - s.rleVal = val - s.useRLE = true -} - -// setBits will set output bits for the transform. -// if nil is provided, the number of bits is equal to the index. -func (s *fseEncoder) setBits(transform []byte) { - if s.reUsed || s.preDefined { - return - } - if s.useRLE { - if transform == nil { - s.ct.symbolTT[s.rleVal].outBits = s.rleVal - s.maxBits = s.rleVal - return - } - s.maxBits = transform[s.rleVal] - s.ct.symbolTT[s.rleVal].outBits = s.maxBits - return - } - if transform == nil { - for i := range s.ct.symbolTT[:s.symbolLen] { - s.ct.symbolTT[i].outBits = uint8(i) - } - s.maxBits = uint8(s.symbolLen - 1) - return - } - s.maxBits = 0 - for i, v := range transform[:s.symbolLen] { - s.ct.symbolTT[i].outBits = v - if v > s.maxBits { - // We could assume bits always going up, but we play safe. - s.maxBits = v - } - } -} - -// normalizeCount will normalize the count of the symbols so -// the total is equal to the table size. -// If successful, compression tables will also be made ready. -func (s *fseEncoder) normalizeCount(length int) error { - if s.reUsed { - return nil - } - s.optimalTableLog(length) - var ( - tableLog = s.actualTableLog - scale = 62 - uint64(tableLog) - step = (1 << 62) / uint64(length) - vStep = uint64(1) << (scale - 20) - stillToDistribute = int16(1 << tableLog) - largest int - largestP int16 - lowThreshold = (uint32)(length >> tableLog) - ) - if s.maxCount == length { - s.useRLE = true - return nil - } - s.useRLE = false - for i, cnt := range s.count[:s.symbolLen] { - // already handled - // if (count[s] == s.length) return 0; /* rle special case */ - - if cnt == 0 { - s.norm[i] = 0 - continue - } - if cnt <= lowThreshold { - s.norm[i] = -1 - stillToDistribute-- - } else { - proba := (int16)((uint64(cnt) * step) >> scale) - if proba < 8 { - restToBeat := vStep * uint64(rtbTable[proba]) - v := uint64(cnt)*step - (uint64(proba) << scale) - if v > restToBeat { - proba++ - } - } - if proba > largestP { - largestP = proba - largest = i - } - s.norm[i] = proba - stillToDistribute -= proba - } - } - - if -stillToDistribute >= (s.norm[largest] >> 1) { - // corner case, need another normalization method - err := s.normalizeCount2(length) - if err != nil { - return err - } - if debugAsserts { - err = s.validateNorm() - if err != nil { - return err - } - } - return s.buildCTable() - } - s.norm[largest] += stillToDistribute - if debugAsserts { - err := s.validateNorm() - if err != nil { - return err - } - } - return s.buildCTable() -} - -// Secondary normalization method. -// To be used when primary method fails. -func (s *fseEncoder) normalizeCount2(length int) error { - const notYetAssigned = -2 - var ( - distributed uint32 - total = uint32(length) - tableLog = s.actualTableLog - lowThreshold = total >> tableLog - lowOne = (total * 3) >> (tableLog + 1) - ) - for i, cnt := range s.count[:s.symbolLen] { - if cnt == 0 { - s.norm[i] = 0 - continue - } - if cnt <= lowThreshold { - s.norm[i] = -1 - distributed++ - total -= cnt - continue - } - if cnt <= lowOne { - s.norm[i] = 1 - distributed++ - total -= cnt - continue - } - s.norm[i] = notYetAssigned - } - toDistribute := (1 << tableLog) - distributed - - if (total / toDistribute) > lowOne { - // risk of rounding to zero - lowOne = (total * 3) / (toDistribute * 2) - for i, cnt := range s.count[:s.symbolLen] { - if (s.norm[i] == notYetAssigned) && (cnt <= lowOne) { - s.norm[i] = 1 - distributed++ - total -= cnt - continue - } - } - toDistribute = (1 << tableLog) - distributed - } - if distributed == uint32(s.symbolLen)+1 { - // all values are pretty poor; - // probably incompressible data (should have already been detected); - // find max, then give all remaining points to max - var maxV int - var maxC uint32 - for i, cnt := range s.count[:s.symbolLen] { - if cnt > maxC { - maxV = i - maxC = cnt - } - } - s.norm[maxV] += int16(toDistribute) - return nil - } - - if total == 0 { - // all of the symbols were low enough for the lowOne or lowThreshold - for i := uint32(0); toDistribute > 0; i = (i + 1) % (uint32(s.symbolLen)) { - if s.norm[i] > 0 { - toDistribute-- - s.norm[i]++ - } - } - return nil - } - - var ( - vStepLog = 62 - uint64(tableLog) - mid = uint64((1 << (vStepLog - 1)) - 1) - rStep = (((1 << vStepLog) * uint64(toDistribute)) + mid) / uint64(total) // scale on remaining - tmpTotal = mid - ) - for i, cnt := range s.count[:s.symbolLen] { - if s.norm[i] == notYetAssigned { - var ( - end = tmpTotal + uint64(cnt)*rStep - sStart = uint32(tmpTotal >> vStepLog) - sEnd = uint32(end >> vStepLog) - weight = sEnd - sStart - ) - if weight < 1 { - return errors.New("weight < 1") - } - s.norm[i] = int16(weight) - tmpTotal = end - } - } - return nil -} - -// optimalTableLog calculates and sets the optimal tableLog in s.actualTableLog -func (s *fseEncoder) optimalTableLog(length int) { - tableLog := uint8(maxEncTableLog) - minBitsSrc := highBit(uint32(length)) + 1 - minBitsSymbols := highBit(uint32(s.symbolLen-1)) + 2 - minBits := uint8(minBitsSymbols) - if minBitsSrc < minBitsSymbols { - minBits = uint8(minBitsSrc) - } - - maxBitsSrc := uint8(highBit(uint32(length-1))) - 2 - if maxBitsSrc < tableLog { - // Accuracy can be reduced - tableLog = maxBitsSrc - } - if minBits > tableLog { - tableLog = minBits - } - // Need a minimum to safely represent all symbol values - if tableLog < minEncTablelog { - tableLog = minEncTablelog - } - if tableLog > maxEncTableLog { - tableLog = maxEncTableLog - } - s.actualTableLog = tableLog -} - -// validateNorm validates the normalized histogram table. -func (s *fseEncoder) validateNorm() (err error) { - var total int - for _, v := range s.norm[:s.symbolLen] { - if v >= 0 { - total += int(v) - } else { - total -= int(v) - } - } - defer func() { - if err == nil { - return - } - fmt.Printf("selected TableLog: %d, Symbol length: %d\n", s.actualTableLog, s.symbolLen) - for i, v := range s.norm[:s.symbolLen] { - fmt.Printf("%3d: %5d -> %4d \n", i, s.count[i], v) - } - }() - if total != (1 << s.actualTableLog) { - return fmt.Errorf("warning: Total == %d != %d", total, 1<> 3) + 3 + 2 - - // Write Table Size - bitStream = uint32(tableLog - minEncTablelog) - bitCount = uint(4) - remaining = int16(tableSize + 1) /* +1 for extra accuracy */ - threshold = int16(tableSize) - nbBits = uint(tableLog + 1) - outP = len(out) - ) - if cap(out) < outP+maxHeaderSize { - out = append(out, make([]byte, maxHeaderSize*3)...) - out = out[:len(out)-maxHeaderSize*3] - } - out = out[:outP+maxHeaderSize] - - // stops at 1 - for remaining > 1 { - if previous0 { - start := charnum - for s.norm[charnum] == 0 { - charnum++ - } - for charnum >= start+24 { - start += 24 - bitStream += uint32(0xFFFF) << bitCount - out[outP] = byte(bitStream) - out[outP+1] = byte(bitStream >> 8) - outP += 2 - bitStream >>= 16 - } - for charnum >= start+3 { - start += 3 - bitStream += 3 << bitCount - bitCount += 2 - } - bitStream += uint32(charnum-start) << bitCount - bitCount += 2 - if bitCount > 16 { - out[outP] = byte(bitStream) - out[outP+1] = byte(bitStream >> 8) - outP += 2 - bitStream >>= 16 - bitCount -= 16 - } - } - - count := s.norm[charnum] - charnum++ - max := (2*threshold - 1) - remaining - if count < 0 { - remaining += count - } else { - remaining -= count - } - count++ // +1 for extra accuracy - if count >= threshold { - count += max // [0..max[ [max..threshold[ (...) [threshold+max 2*threshold[ - } - bitStream += uint32(count) << bitCount - bitCount += nbBits - if count < max { - bitCount-- - } - - previous0 = count == 1 - if remaining < 1 { - return nil, errors.New("internal error: remaining < 1") - } - for remaining < threshold { - nbBits-- - threshold >>= 1 - } - - if bitCount > 16 { - out[outP] = byte(bitStream) - out[outP+1] = byte(bitStream >> 8) - outP += 2 - bitStream >>= 16 - bitCount -= 16 - } - } - - if outP+2 > len(out) { - return nil, fmt.Errorf("internal error: %d > %d, maxheader: %d, sl: %d, tl: %d, normcount: %v", outP+2, len(out), maxHeaderSize, s.symbolLen, int(tableLog), s.norm[:s.symbolLen]) - } - out[outP] = byte(bitStream) - out[outP+1] = byte(bitStream >> 8) - outP += int((bitCount + 7) / 8) - - if charnum > s.symbolLen { - return nil, errors.New("internal error: charnum > s.symbolLen") - } - return out[:outP], nil -} - -// Approximate symbol cost, as fractional value, using fixed-point format (accuracyLog fractional bits) -// note 1 : assume symbolValue is valid (<= maxSymbolValue) -// note 2 : if freq[symbolValue]==0, @return a fake cost of tableLog+1 bits * -func (s *fseEncoder) bitCost(symbolValue uint8, accuracyLog uint32) uint32 { - minNbBits := s.ct.symbolTT[symbolValue].deltaNbBits >> 16 - threshold := (minNbBits + 1) << 16 - if debugAsserts { - if !(s.actualTableLog < 16) { - panic("!s.actualTableLog < 16") - } - // ensure enough room for renormalization double shift - if !(uint8(accuracyLog) < 31-s.actualTableLog) { - panic("!uint8(accuracyLog) < 31-s.actualTableLog") - } - } - tableSize := uint32(1) << s.actualTableLog - deltaFromThreshold := threshold - (s.ct.symbolTT[symbolValue].deltaNbBits + tableSize) - // linear interpolation (very approximate) - normalizedDeltaFromThreshold := (deltaFromThreshold << accuracyLog) >> s.actualTableLog - bitMultiplier := uint32(1) << accuracyLog - if debugAsserts { - if s.ct.symbolTT[symbolValue].deltaNbBits+tableSize > threshold { - panic("s.ct.symbolTT[symbolValue].deltaNbBits+tableSize > threshold") - } - if normalizedDeltaFromThreshold > bitMultiplier { - panic("normalizedDeltaFromThreshold > bitMultiplier") - } - } - return (minNbBits+1)*bitMultiplier - normalizedDeltaFromThreshold -} - -// Returns the cost in bits of encoding the distribution in count using ctable. -// Histogram should only be up to the last non-zero symbol. -// Returns an -1 if ctable cannot represent all the symbols in count. -func (s *fseEncoder) approxSize(hist []uint32) uint32 { - if int(s.symbolLen) < len(hist) { - // More symbols than we have. - return math.MaxUint32 - } - if s.useRLE { - // We will never reuse RLE encoders. - return math.MaxUint32 - } - const kAccuracyLog = 8 - badCost := (uint32(s.actualTableLog) + 1) << kAccuracyLog - var cost uint32 - for i, v := range hist { - if v == 0 { - continue - } - if s.norm[i] == 0 { - return math.MaxUint32 - } - bitCost := s.bitCost(uint8(i), kAccuracyLog) - if bitCost > badCost { - return math.MaxUint32 - } - cost += v * bitCost - } - return cost >> kAccuracyLog -} - -// maxHeaderSize returns the maximum header size in bits. -// This is not exact size, but we want a penalty for new tables anyway. -func (s *fseEncoder) maxHeaderSize() uint32 { - if s.preDefined { - return 0 - } - if s.useRLE { - return 8 - } - return (((uint32(s.symbolLen) * uint32(s.actualTableLog)) >> 3) + 3) * 8 -} - -// cState contains the compression state of a stream. -type cState struct { - bw *bitWriter - stateTable []uint16 - state uint16 -} - -// init will initialize the compression state to the first symbol of the stream. -func (c *cState) init(bw *bitWriter, ct *cTable, first symbolTransform) { - c.bw = bw - c.stateTable = ct.stateTable - if len(c.stateTable) == 1 { - // RLE - c.stateTable[0] = uint16(0) - c.state = 0 - return - } - nbBitsOut := (first.deltaNbBits + (1 << 15)) >> 16 - im := int32((nbBitsOut << 16) - first.deltaNbBits) - lu := (im >> nbBitsOut) + int32(first.deltaFindState) - c.state = c.stateTable[lu] -} - -// encode the output symbol provided and write it to the bitstream. -func (c *cState) encode(symbolTT symbolTransform) { - nbBitsOut := (uint32(c.state) + symbolTT.deltaNbBits) >> 16 - dstState := int32(c.state>>(nbBitsOut&15)) + int32(symbolTT.deltaFindState) - c.bw.addBits16NC(c.state, uint8(nbBitsOut)) - c.state = c.stateTable[dstState] -} - -// flush will write the tablelog to the output and flush the remaining full bytes. -func (c *cState) flush(tableLog uint8) { - c.bw.flush32() - c.bw.addBits16NC(c.state, tableLog) -} diff --git a/src/vendor/github.com/klauspost/compress/zstd/fse_predefined.go b/src/vendor/github.com/klauspost/compress/zstd/fse_predefined.go deleted file mode 100644 index 474cb77d2..000000000 --- a/src/vendor/github.com/klauspost/compress/zstd/fse_predefined.go +++ /dev/null @@ -1,158 +0,0 @@ -// Copyright 2019+ Klaus Post. All rights reserved. -// License information can be found in the LICENSE file. -// Based on work by Yann Collet, released under BSD License. - -package zstd - -import ( - "fmt" - "math" - "sync" -) - -var ( - // fsePredef are the predefined fse tables as defined here: - // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#default-distributions - // These values are already transformed. - fsePredef [3]fseDecoder - - // fsePredefEnc are the predefined encoder based on fse tables as defined here: - // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#default-distributions - // These values are already transformed. - fsePredefEnc [3]fseEncoder - - // symbolTableX contain the transformations needed for each type as defined in - // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#the-codes-for-literals-lengths-match-lengths-and-offsets - symbolTableX [3][]baseOffset - - // maxTableSymbol is the biggest supported symbol for each table type - // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#the-codes-for-literals-lengths-match-lengths-and-offsets - maxTableSymbol = [3]uint8{tableLiteralLengths: maxLiteralLengthSymbol, tableOffsets: maxOffsetLengthSymbol, tableMatchLengths: maxMatchLengthSymbol} - - // bitTables is the bits table for each table. - bitTables = [3][]byte{tableLiteralLengths: llBitsTable[:], tableOffsets: nil, tableMatchLengths: mlBitsTable[:]} -) - -type tableIndex uint8 - -const ( - // indexes for fsePredef and symbolTableX - tableLiteralLengths tableIndex = 0 - tableOffsets tableIndex = 1 - tableMatchLengths tableIndex = 2 - - maxLiteralLengthSymbol = 35 - maxOffsetLengthSymbol = 30 - maxMatchLengthSymbol = 52 -) - -// baseOffset is used for calculating transformations. -type baseOffset struct { - baseLine uint32 - addBits uint8 -} - -// fillBase will precalculate base offsets with the given bit distributions. -func fillBase(dst []baseOffset, base uint32, bits ...uint8) { - if len(bits) != len(dst) { - panic(fmt.Sprintf("len(dst) (%d) != len(bits) (%d)", len(dst), len(bits))) - } - for i, bit := range bits { - if base > math.MaxInt32 { - panic("invalid decoding table, base overflows int32") - } - - dst[i] = baseOffset{ - baseLine: base, - addBits: bit, - } - base += 1 << bit - } -} - -var predef sync.Once - -func initPredefined() { - predef.Do(func() { - // Literals length codes - tmp := make([]baseOffset, 36) - for i := range tmp[:16] { - tmp[i] = baseOffset{ - baseLine: uint32(i), - addBits: 0, - } - } - fillBase(tmp[16:], 16, 1, 1, 1, 1, 2, 2, 3, 3, 4, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16) - symbolTableX[tableLiteralLengths] = tmp - - // Match length codes - tmp = make([]baseOffset, 53) - for i := range tmp[:32] { - tmp[i] = baseOffset{ - // The transformation adds the 3 length. - baseLine: uint32(i) + 3, - addBits: 0, - } - } - fillBase(tmp[32:], 35, 1, 1, 1, 1, 2, 2, 3, 3, 4, 4, 5, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16) - symbolTableX[tableMatchLengths] = tmp - - // Offset codes - tmp = make([]baseOffset, maxOffsetBits+1) - tmp[1] = baseOffset{ - baseLine: 1, - addBits: 1, - } - fillBase(tmp[2:], 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30) - symbolTableX[tableOffsets] = tmp - - // Fill predefined tables and transform them. - // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#default-distributions - for i := range fsePredef[:] { - f := &fsePredef[i] - switch tableIndex(i) { - case tableLiteralLengths: - // https://github.com/facebook/zstd/blob/ededcfca57366461021c922720878c81a5854a0a/lib/decompress/zstd_decompress_block.c#L243 - f.actualTableLog = 6 - copy(f.norm[:], []int16{4, 3, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1, - 2, 2, 2, 2, 2, 2, 2, 2, 2, 3, 2, 1, 1, 1, 1, 1, - -1, -1, -1, -1}) - f.symbolLen = 36 - case tableOffsets: - // https://github.com/facebook/zstd/blob/ededcfca57366461021c922720878c81a5854a0a/lib/decompress/zstd_decompress_block.c#L281 - f.actualTableLog = 5 - copy(f.norm[:], []int16{ - 1, 1, 1, 1, 1, 1, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, - 1, 1, 1, 1, 1, 1, 1, 1, -1, -1, -1, -1, -1}) - f.symbolLen = 29 - case tableMatchLengths: - //https://github.com/facebook/zstd/blob/ededcfca57366461021c922720878c81a5854a0a/lib/decompress/zstd_decompress_block.c#L304 - f.actualTableLog = 6 - copy(f.norm[:], []int16{ - 1, 4, 3, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, - 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, - 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, -1, -1, - -1, -1, -1, -1, -1}) - f.symbolLen = 53 - } - if err := f.buildDtable(); err != nil { - panic(fmt.Errorf("building table %v: %v", tableIndex(i), err)) - } - if err := f.transform(symbolTableX[i]); err != nil { - panic(fmt.Errorf("building table %v: %v", tableIndex(i), err)) - } - f.preDefined = true - - // Create encoder as well - enc := &fsePredefEnc[i] - copy(enc.norm[:], f.norm[:]) - enc.symbolLen = f.symbolLen - enc.actualTableLog = f.actualTableLog - if err := enc.buildCTable(); err != nil { - panic(fmt.Errorf("building encoding table %v: %v", tableIndex(i), err)) - } - enc.setBits(bitTables[i]) - enc.preDefined = true - } - }) -} diff --git a/src/vendor/github.com/klauspost/compress/zstd/hash.go b/src/vendor/github.com/klauspost/compress/zstd/hash.go deleted file mode 100644 index cf33f29a1..000000000 --- a/src/vendor/github.com/klauspost/compress/zstd/hash.go +++ /dev/null @@ -1,41 +0,0 @@ -// Copyright 2019+ Klaus Post. All rights reserved. -// License information can be found in the LICENSE file. -// Based on work by Yann Collet, released under BSD License. - -package zstd - -const ( - prime3bytes = 506832829 - prime4bytes = 2654435761 - prime5bytes = 889523592379 - prime6bytes = 227718039650203 - prime7bytes = 58295818150454627 - prime8bytes = 0xcf1bbcdcb7a56463 -) - -// hashLen returns a hash of the lowest mls bytes of with length output bits. -// mls must be >=3 and <=8. Any other value will return hash for 4 bytes. -// length should always be < 32. -// Preferably length and mls should be a constant for inlining. -func hashLen(u uint64, length, mls uint8) uint32 { - switch mls { - case 3: - return (uint32(u<<8) * prime3bytes) >> (32 - length) - case 5: - return uint32(((u << (64 - 40)) * prime5bytes) >> (64 - length)) - case 6: - return uint32(((u << (64 - 48)) * prime6bytes) >> (64 - length)) - case 7: - return uint32(((u << (64 - 56)) * prime7bytes) >> (64 - length)) - case 8: - return uint32((u * prime8bytes) >> (64 - length)) - default: - return (uint32(u) * prime4bytes) >> (32 - length) - } -} - -// hash3 returns the hash of the lower 3 bytes of u to fit in a hash table with h bits. -// Preferably h should be a constant and should always be <32. -func hash3(u uint32, h uint8) uint32 { - return ((u << (32 - 24)) * prime3bytes) >> ((32 - h) & 31) -} diff --git a/src/vendor/github.com/klauspost/compress/zstd/history.go b/src/vendor/github.com/klauspost/compress/zstd/history.go deleted file mode 100644 index f783e32d2..000000000 --- a/src/vendor/github.com/klauspost/compress/zstd/history.go +++ /dev/null @@ -1,89 +0,0 @@ -// Copyright 2019+ Klaus Post. All rights reserved. -// License information can be found in the LICENSE file. -// Based on work by Yann Collet, released under BSD License. - -package zstd - -import ( - "github.com/klauspost/compress/huff0" -) - -// history contains the information transferred between blocks. -type history struct { - b []byte - huffTree *huff0.Scratch - recentOffsets [3]int - decoders sequenceDecs - windowSize int - maxSize int - error bool - dict *dict -} - -// reset will reset the history to initial state of a frame. -// The history must already have been initialized to the desired size. -func (h *history) reset() { - h.b = h.b[:0] - h.error = false - h.recentOffsets = [3]int{1, 4, 8} - if f := h.decoders.litLengths.fse; f != nil && !f.preDefined { - fseDecoderPool.Put(f) - } - if f := h.decoders.offsets.fse; f != nil && !f.preDefined { - fseDecoderPool.Put(f) - } - if f := h.decoders.matchLengths.fse; f != nil && !f.preDefined { - fseDecoderPool.Put(f) - } - h.decoders = sequenceDecs{} - if h.huffTree != nil { - if h.dict == nil || h.dict.litEnc != h.huffTree { - huffDecoderPool.Put(h.huffTree) - } - } - h.huffTree = nil - h.dict = nil - //printf("history created: %+v (l: %d, c: %d)", *h, len(h.b), cap(h.b)) -} - -func (h *history) setDict(dict *dict) { - if dict == nil { - return - } - h.dict = dict - h.decoders.litLengths = dict.llDec - h.decoders.offsets = dict.ofDec - h.decoders.matchLengths = dict.mlDec - h.recentOffsets = dict.offsets - h.huffTree = dict.litEnc -} - -// append bytes to history. -// This function will make sure there is space for it, -// if the buffer has been allocated with enough extra space. -func (h *history) append(b []byte) { - if len(b) >= h.windowSize { - // Discard all history by simply overwriting - h.b = h.b[:h.windowSize] - copy(h.b, b[len(b)-h.windowSize:]) - return - } - - // If there is space, append it. - if len(b) < cap(h.b)-len(h.b) { - h.b = append(h.b, b...) - return - } - - // Move data down so we only have window size left. - // We know we have less than window size in b at this point. - discard := len(b) + len(h.b) - h.windowSize - copy(h.b, h.b[discard:]) - h.b = h.b[:h.windowSize] - copy(h.b[h.windowSize-len(b):], b) -} - -// append bytes to history without ever discarding anything. -func (h *history) appendKeep(b []byte) { - h.b = append(h.b, b...) -} diff --git a/src/vendor/github.com/klauspost/compress/zstd/internal/xxhash/LICENSE.txt b/src/vendor/github.com/klauspost/compress/zstd/internal/xxhash/LICENSE.txt deleted file mode 100644 index 24b53065f..000000000 --- a/src/vendor/github.com/klauspost/compress/zstd/internal/xxhash/LICENSE.txt +++ /dev/null @@ -1,22 +0,0 @@ -Copyright (c) 2016 Caleb Spare - -MIT License - -Permission is hereby granted, free of charge, to any person obtaining -a copy of this software and associated documentation files (the -"Software"), to deal in the Software without restriction, including -without limitation the rights to use, copy, modify, merge, publish, -distribute, sublicense, and/or sell copies of the Software, and to -permit persons to whom the Software is furnished to do so, subject to -the following conditions: - -The above copyright notice and this permission notice shall be -included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE -LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/src/vendor/github.com/klauspost/compress/zstd/internal/xxhash/README.md b/src/vendor/github.com/klauspost/compress/zstd/internal/xxhash/README.md deleted file mode 100644 index 69aa3bb58..000000000 --- a/src/vendor/github.com/klauspost/compress/zstd/internal/xxhash/README.md +++ /dev/null @@ -1,58 +0,0 @@ -# xxhash - -VENDORED: Go to [github.com/cespare/xxhash](https://github.com/cespare/xxhash) for original package. - - -[![GoDoc](https://godoc.org/github.com/cespare/xxhash?status.svg)](https://godoc.org/github.com/cespare/xxhash) -[![Build Status](https://travis-ci.org/cespare/xxhash.svg?branch=master)](https://travis-ci.org/cespare/xxhash) - -xxhash is a Go implementation of the 64-bit -[xxHash](http://cyan4973.github.io/xxHash/) algorithm, XXH64. This is a -high-quality hashing algorithm that is much faster than anything in the Go -standard library. - -This package provides a straightforward API: - -``` -func Sum64(b []byte) uint64 -func Sum64String(s string) uint64 -type Digest struct{ ... } - func New() *Digest -``` - -The `Digest` type implements hash.Hash64. Its key methods are: - -``` -func (*Digest) Write([]byte) (int, error) -func (*Digest) WriteString(string) (int, error) -func (*Digest) Sum64() uint64 -``` - -This implementation provides a fast pure-Go implementation and an even faster -assembly implementation for amd64. - -## Benchmarks - -Here are some quick benchmarks comparing the pure-Go and assembly -implementations of Sum64. - -| input size | purego | asm | -| --- | --- | --- | -| 5 B | 979.66 MB/s | 1291.17 MB/s | -| 100 B | 7475.26 MB/s | 7973.40 MB/s | -| 4 KB | 17573.46 MB/s | 17602.65 MB/s | -| 10 MB | 17131.46 MB/s | 17142.16 MB/s | - -These numbers were generated on Ubuntu 18.04 with an Intel i7-8700K CPU using -the following commands under Go 1.11.2: - -``` -$ go test -tags purego -benchtime 10s -bench '/xxhash,direct,bytes' -$ go test -benchtime 10s -bench '/xxhash,direct,bytes' -``` - -## Projects using this package - -- [InfluxDB](https://github.com/influxdata/influxdb) -- [Prometheus](https://github.com/prometheus/prometheus) -- [FreeCache](https://github.com/coocood/freecache) diff --git a/src/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash.go b/src/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash.go deleted file mode 100644 index 2c112a0ab..000000000 --- a/src/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash.go +++ /dev/null @@ -1,237 +0,0 @@ -// Package xxhash implements the 64-bit variant of xxHash (XXH64) as described -// at http://cyan4973.github.io/xxHash/. -// THIS IS VENDORED: Go to github.com/cespare/xxhash for original package. - -package xxhash - -import ( - "encoding/binary" - "errors" - "math/bits" -) - -const ( - prime1 uint64 = 11400714785074694791 - prime2 uint64 = 14029467366897019727 - prime3 uint64 = 1609587929392839161 - prime4 uint64 = 9650029242287828579 - prime5 uint64 = 2870177450012600261 -) - -// NOTE(caleb): I'm using both consts and vars of the primes. Using consts where -// possible in the Go code is worth a small (but measurable) performance boost -// by avoiding some MOVQs. Vars are needed for the asm and also are useful for -// convenience in the Go code in a few places where we need to intentionally -// avoid constant arithmetic (e.g., v1 := prime1 + prime2 fails because the -// result overflows a uint64). -var ( - prime1v = prime1 - prime2v = prime2 - prime3v = prime3 - prime4v = prime4 - prime5v = prime5 -) - -// Digest implements hash.Hash64. -type Digest struct { - v1 uint64 - v2 uint64 - v3 uint64 - v4 uint64 - total uint64 - mem [32]byte - n int // how much of mem is used -} - -// New creates a new Digest that computes the 64-bit xxHash algorithm. -func New() *Digest { - var d Digest - d.Reset() - return &d -} - -// Reset clears the Digest's state so that it can be reused. -func (d *Digest) Reset() { - d.v1 = prime1v + prime2 - d.v2 = prime2 - d.v3 = 0 - d.v4 = -prime1v - d.total = 0 - d.n = 0 -} - -// Size always returns 8 bytes. -func (d *Digest) Size() int { return 8 } - -// BlockSize always returns 32 bytes. -func (d *Digest) BlockSize() int { return 32 } - -// Write adds more data to d. It always returns len(b), nil. -func (d *Digest) Write(b []byte) (n int, err error) { - n = len(b) - d.total += uint64(n) - - if d.n+n < 32 { - // This new data doesn't even fill the current block. - copy(d.mem[d.n:], b) - d.n += n - return - } - - if d.n > 0 { - // Finish off the partial block. - copy(d.mem[d.n:], b) - d.v1 = round(d.v1, u64(d.mem[0:8])) - d.v2 = round(d.v2, u64(d.mem[8:16])) - d.v3 = round(d.v3, u64(d.mem[16:24])) - d.v4 = round(d.v4, u64(d.mem[24:32])) - b = b[32-d.n:] - d.n = 0 - } - - if len(b) >= 32 { - // One or more full blocks left. - nw := writeBlocks(d, b) - b = b[nw:] - } - - // Store any remaining partial block. - copy(d.mem[:], b) - d.n = len(b) - - return -} - -// Sum appends the current hash to b and returns the resulting slice. -func (d *Digest) Sum(b []byte) []byte { - s := d.Sum64() - return append( - b, - byte(s>>56), - byte(s>>48), - byte(s>>40), - byte(s>>32), - byte(s>>24), - byte(s>>16), - byte(s>>8), - byte(s), - ) -} - -// Sum64 returns the current hash. -func (d *Digest) Sum64() uint64 { - var h uint64 - - if d.total >= 32 { - v1, v2, v3, v4 := d.v1, d.v2, d.v3, d.v4 - h = rol1(v1) + rol7(v2) + rol12(v3) + rol18(v4) - h = mergeRound(h, v1) - h = mergeRound(h, v2) - h = mergeRound(h, v3) - h = mergeRound(h, v4) - } else { - h = d.v3 + prime5 - } - - h += d.total - - i, end := 0, d.n - for ; i+8 <= end; i += 8 { - k1 := round(0, u64(d.mem[i:i+8])) - h ^= k1 - h = rol27(h)*prime1 + prime4 - } - if i+4 <= end { - h ^= uint64(u32(d.mem[i:i+4])) * prime1 - h = rol23(h)*prime2 + prime3 - i += 4 - } - for i < end { - h ^= uint64(d.mem[i]) * prime5 - h = rol11(h) * prime1 - i++ - } - - h ^= h >> 33 - h *= prime2 - h ^= h >> 29 - h *= prime3 - h ^= h >> 32 - - return h -} - -const ( - magic = "xxh\x06" - marshaledSize = len(magic) + 8*5 + 32 -) - -// MarshalBinary implements the encoding.BinaryMarshaler interface. -func (d *Digest) MarshalBinary() ([]byte, error) { - b := make([]byte, 0, marshaledSize) - b = append(b, magic...) - b = appendUint64(b, d.v1) - b = appendUint64(b, d.v2) - b = appendUint64(b, d.v3) - b = appendUint64(b, d.v4) - b = appendUint64(b, d.total) - b = append(b, d.mem[:d.n]...) - b = b[:len(b)+len(d.mem)-d.n] - return b, nil -} - -// UnmarshalBinary implements the encoding.BinaryUnmarshaler interface. -func (d *Digest) UnmarshalBinary(b []byte) error { - if len(b) < len(magic) || string(b[:len(magic)]) != magic { - return errors.New("xxhash: invalid hash state identifier") - } - if len(b) != marshaledSize { - return errors.New("xxhash: invalid hash state size") - } - b = b[len(magic):] - b, d.v1 = consumeUint64(b) - b, d.v2 = consumeUint64(b) - b, d.v3 = consumeUint64(b) - b, d.v4 = consumeUint64(b) - b, d.total = consumeUint64(b) - copy(d.mem[:], b) - d.n = int(d.total % uint64(len(d.mem))) - return nil -} - -func appendUint64(b []byte, x uint64) []byte { - var a [8]byte - binary.LittleEndian.PutUint64(a[:], x) - return append(b, a[:]...) -} - -func consumeUint64(b []byte) ([]byte, uint64) { - x := u64(b) - return b[8:], x -} - -func u64(b []byte) uint64 { return binary.LittleEndian.Uint64(b) } -func u32(b []byte) uint32 { return binary.LittleEndian.Uint32(b) } - -func round(acc, input uint64) uint64 { - acc += input * prime2 - acc = rol31(acc) - acc *= prime1 - return acc -} - -func mergeRound(acc, val uint64) uint64 { - val = round(0, val) - acc ^= val - acc = acc*prime1 + prime4 - return acc -} - -func rol1(x uint64) uint64 { return bits.RotateLeft64(x, 1) } -func rol7(x uint64) uint64 { return bits.RotateLeft64(x, 7) } -func rol11(x uint64) uint64 { return bits.RotateLeft64(x, 11) } -func rol12(x uint64) uint64 { return bits.RotateLeft64(x, 12) } -func rol18(x uint64) uint64 { return bits.RotateLeft64(x, 18) } -func rol23(x uint64) uint64 { return bits.RotateLeft64(x, 23) } -func rol27(x uint64) uint64 { return bits.RotateLeft64(x, 27) } -func rol31(x uint64) uint64 { return bits.RotateLeft64(x, 31) } diff --git a/src/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_amd64.go b/src/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_amd64.go deleted file mode 100644 index 0ae847f75..000000000 --- a/src/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_amd64.go +++ /dev/null @@ -1,12 +0,0 @@ -//go:build !appengine && gc && !purego -// +build !appengine,gc,!purego - -package xxhash - -// Sum64 computes the 64-bit xxHash digest of b. -// -//go:noescape -func Sum64(b []byte) uint64 - -//go:noescape -func writeBlocks(d *Digest, b []byte) int diff --git a/src/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_amd64.s b/src/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_amd64.s deleted file mode 100644 index be8db5bf7..000000000 --- a/src/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_amd64.s +++ /dev/null @@ -1,215 +0,0 @@ -// +build !appengine -// +build gc -// +build !purego - -#include "textflag.h" - -// Register allocation: -// AX h -// SI pointer to advance through b -// DX n -// BX loop end -// R8 v1, k1 -// R9 v2 -// R10 v3 -// R11 v4 -// R12 tmp -// R13 prime1v -// R14 prime2v -// DI prime4v - -// round reads from and advances the buffer pointer in SI. -// It assumes that R13 has prime1v and R14 has prime2v. -#define round(r) \ - MOVQ (SI), R12 \ - ADDQ $8, SI \ - IMULQ R14, R12 \ - ADDQ R12, r \ - ROLQ $31, r \ - IMULQ R13, r - -// mergeRound applies a merge round on the two registers acc and val. -// It assumes that R13 has prime1v, R14 has prime2v, and DI has prime4v. -#define mergeRound(acc, val) \ - IMULQ R14, val \ - ROLQ $31, val \ - IMULQ R13, val \ - XORQ val, acc \ - IMULQ R13, acc \ - ADDQ DI, acc - -// func Sum64(b []byte) uint64 -TEXT ·Sum64(SB), NOSPLIT, $0-32 - // Load fixed primes. - MOVQ ·prime1v(SB), R13 - MOVQ ·prime2v(SB), R14 - MOVQ ·prime4v(SB), DI - - // Load slice. - MOVQ b_base+0(FP), SI - MOVQ b_len+8(FP), DX - LEAQ (SI)(DX*1), BX - - // The first loop limit will be len(b)-32. - SUBQ $32, BX - - // Check whether we have at least one block. - CMPQ DX, $32 - JLT noBlocks - - // Set up initial state (v1, v2, v3, v4). - MOVQ R13, R8 - ADDQ R14, R8 - MOVQ R14, R9 - XORQ R10, R10 - XORQ R11, R11 - SUBQ R13, R11 - - // Loop until SI > BX. -blockLoop: - round(R8) - round(R9) - round(R10) - round(R11) - - CMPQ SI, BX - JLE blockLoop - - MOVQ R8, AX - ROLQ $1, AX - MOVQ R9, R12 - ROLQ $7, R12 - ADDQ R12, AX - MOVQ R10, R12 - ROLQ $12, R12 - ADDQ R12, AX - MOVQ R11, R12 - ROLQ $18, R12 - ADDQ R12, AX - - mergeRound(AX, R8) - mergeRound(AX, R9) - mergeRound(AX, R10) - mergeRound(AX, R11) - - JMP afterBlocks - -noBlocks: - MOVQ ·prime5v(SB), AX - -afterBlocks: - ADDQ DX, AX - - // Right now BX has len(b)-32, and we want to loop until SI > len(b)-8. - ADDQ $24, BX - - CMPQ SI, BX - JG fourByte - -wordLoop: - // Calculate k1. - MOVQ (SI), R8 - ADDQ $8, SI - IMULQ R14, R8 - ROLQ $31, R8 - IMULQ R13, R8 - - XORQ R8, AX - ROLQ $27, AX - IMULQ R13, AX - ADDQ DI, AX - - CMPQ SI, BX - JLE wordLoop - -fourByte: - ADDQ $4, BX - CMPQ SI, BX - JG singles - - MOVL (SI), R8 - ADDQ $4, SI - IMULQ R13, R8 - XORQ R8, AX - - ROLQ $23, AX - IMULQ R14, AX - ADDQ ·prime3v(SB), AX - -singles: - ADDQ $4, BX - CMPQ SI, BX - JGE finalize - -singlesLoop: - MOVBQZX (SI), R12 - ADDQ $1, SI - IMULQ ·prime5v(SB), R12 - XORQ R12, AX - - ROLQ $11, AX - IMULQ R13, AX - - CMPQ SI, BX - JL singlesLoop - -finalize: - MOVQ AX, R12 - SHRQ $33, R12 - XORQ R12, AX - IMULQ R14, AX - MOVQ AX, R12 - SHRQ $29, R12 - XORQ R12, AX - IMULQ ·prime3v(SB), AX - MOVQ AX, R12 - SHRQ $32, R12 - XORQ R12, AX - - MOVQ AX, ret+24(FP) - RET - -// writeBlocks uses the same registers as above except that it uses AX to store -// the d pointer. - -// func writeBlocks(d *Digest, b []byte) int -TEXT ·writeBlocks(SB), NOSPLIT, $0-40 - // Load fixed primes needed for round. - MOVQ ·prime1v(SB), R13 - MOVQ ·prime2v(SB), R14 - - // Load slice. - MOVQ b_base+8(FP), SI - MOVQ b_len+16(FP), DX - LEAQ (SI)(DX*1), BX - SUBQ $32, BX - - // Load vN from d. - MOVQ d+0(FP), AX - MOVQ 0(AX), R8 // v1 - MOVQ 8(AX), R9 // v2 - MOVQ 16(AX), R10 // v3 - MOVQ 24(AX), R11 // v4 - - // We don't need to check the loop condition here; this function is - // always called with at least one block of data to process. -blockLoop: - round(R8) - round(R9) - round(R10) - round(R11) - - CMPQ SI, BX - JLE blockLoop - - // Copy vN back to d. - MOVQ R8, 0(AX) - MOVQ R9, 8(AX) - MOVQ R10, 16(AX) - MOVQ R11, 24(AX) - - // The number of bytes written is SI minus the old base pointer. - SUBQ b_base+8(FP), SI - MOVQ SI, ret+32(FP) - - RET diff --git a/src/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_other.go b/src/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_other.go deleted file mode 100644 index 1f52f296e..000000000 --- a/src/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_other.go +++ /dev/null @@ -1,77 +0,0 @@ -//go:build !amd64 || appengine || !gc || purego -// +build !amd64 appengine !gc purego - -package xxhash - -// Sum64 computes the 64-bit xxHash digest of b. -func Sum64(b []byte) uint64 { - // A simpler version would be - // d := New() - // d.Write(b) - // return d.Sum64() - // but this is faster, particularly for small inputs. - - n := len(b) - var h uint64 - - if n >= 32 { - v1 := prime1v + prime2 - v2 := prime2 - v3 := uint64(0) - v4 := -prime1v - for len(b) >= 32 { - v1 = round(v1, u64(b[0:8:len(b)])) - v2 = round(v2, u64(b[8:16:len(b)])) - v3 = round(v3, u64(b[16:24:len(b)])) - v4 = round(v4, u64(b[24:32:len(b)])) - b = b[32:len(b):len(b)] - } - h = rol1(v1) + rol7(v2) + rol12(v3) + rol18(v4) - h = mergeRound(h, v1) - h = mergeRound(h, v2) - h = mergeRound(h, v3) - h = mergeRound(h, v4) - } else { - h = prime5 - } - - h += uint64(n) - - i, end := 0, len(b) - for ; i+8 <= end; i += 8 { - k1 := round(0, u64(b[i:i+8:len(b)])) - h ^= k1 - h = rol27(h)*prime1 + prime4 - } - if i+4 <= end { - h ^= uint64(u32(b[i:i+4:len(b)])) * prime1 - h = rol23(h)*prime2 + prime3 - i += 4 - } - for ; i < end; i++ { - h ^= uint64(b[i]) * prime5 - h = rol11(h) * prime1 - } - - h ^= h >> 33 - h *= prime2 - h ^= h >> 29 - h *= prime3 - h ^= h >> 32 - - return h -} - -func writeBlocks(d *Digest, b []byte) int { - v1, v2, v3, v4 := d.v1, d.v2, d.v3, d.v4 - n := len(b) - for len(b) >= 32 { - v1 = round(v1, u64(b[0:8:len(b)])) - v2 = round(v2, u64(b[8:16:len(b)])) - v3 = round(v3, u64(b[16:24:len(b)])) - v4 = round(v4, u64(b[24:32:len(b)])) - b = b[32:len(b):len(b)] - } - d.v1, d.v2, d.v3, d.v4 = v1, v2, v3, v4 - return n - len(b) -} diff --git a/src/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_safe.go b/src/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_safe.go deleted file mode 100644 index 6f3b0cb10..000000000 --- a/src/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_safe.go +++ /dev/null @@ -1,11 +0,0 @@ -package xxhash - -// Sum64String computes the 64-bit xxHash digest of s. -func Sum64String(s string) uint64 { - return Sum64([]byte(s)) -} - -// WriteString adds more data to d. It always returns len(s), nil. -func (d *Digest) WriteString(s string) (n int, err error) { - return d.Write([]byte(s)) -} diff --git a/src/vendor/github.com/klauspost/compress/zstd/seqdec.go b/src/vendor/github.com/klauspost/compress/zstd/seqdec.go deleted file mode 100644 index 1dd39e63b..000000000 --- a/src/vendor/github.com/klauspost/compress/zstd/seqdec.go +++ /dev/null @@ -1,492 +0,0 @@ -// Copyright 2019+ Klaus Post. All rights reserved. -// License information can be found in the LICENSE file. -// Based on work by Yann Collet, released under BSD License. - -package zstd - -import ( - "errors" - "fmt" - "io" -) - -type seq struct { - litLen uint32 - matchLen uint32 - offset uint32 - - // Codes are stored here for the encoder - // so they only have to be looked up once. - llCode, mlCode, ofCode uint8 -} - -func (s seq) String() string { - if s.offset <= 3 { - if s.offset == 0 { - return fmt.Sprint("litLen:", s.litLen, ", matchLen:", s.matchLen+zstdMinMatch, ", offset: INVALID (0)") - } - return fmt.Sprint("litLen:", s.litLen, ", matchLen:", s.matchLen+zstdMinMatch, ", offset:", s.offset, " (repeat)") - } - return fmt.Sprint("litLen:", s.litLen, ", matchLen:", s.matchLen+zstdMinMatch, ", offset:", s.offset-3, " (new)") -} - -type seqCompMode uint8 - -const ( - compModePredefined seqCompMode = iota - compModeRLE - compModeFSE - compModeRepeat -) - -type sequenceDec struct { - // decoder keeps track of the current state and updates it from the bitstream. - fse *fseDecoder - state fseState - repeat bool -} - -// init the state of the decoder with input from stream. -func (s *sequenceDec) init(br *bitReader) error { - if s.fse == nil { - return errors.New("sequence decoder not defined") - } - s.state.init(br, s.fse.actualTableLog, s.fse.dt[:1<= 0; i-- { - if br.overread() { - printf("reading sequence %d, exceeded available data\n", seqs-i) - return io.ErrUnexpectedEOF - } - var ll, mo, ml int - if br.off > 4+((maxOffsetBits+16+16)>>3) { - // inlined function: - // ll, mo, ml = s.nextFast(br, llState, mlState, ofState) - - // Final will not read from stream. - var llB, mlB, moB uint8 - ll, llB = llState.final() - ml, mlB = mlState.final() - mo, moB = ofState.final() - - // extra bits are stored in reverse order. - br.fillFast() - mo += br.getBits(moB) - if s.maxBits > 32 { - br.fillFast() - } - ml += br.getBits(mlB) - ll += br.getBits(llB) - - if moB > 1 { - s.prevOffset[2] = s.prevOffset[1] - s.prevOffset[1] = s.prevOffset[0] - s.prevOffset[0] = mo - } else { - // mo = s.adjustOffset(mo, ll, moB) - // Inlined for rather big speedup - if ll == 0 { - // There is an exception though, when current sequence's literals_length = 0. - // In this case, repeated offsets are shifted by one, so an offset_value of 1 means Repeated_Offset2, - // an offset_value of 2 means Repeated_Offset3, and an offset_value of 3 means Repeated_Offset1 - 1_byte. - mo++ - } - - if mo == 0 { - mo = s.prevOffset[0] - } else { - var temp int - if mo == 3 { - temp = s.prevOffset[0] - 1 - } else { - temp = s.prevOffset[mo] - } - - if temp == 0 { - // 0 is not valid; input is corrupted; force offset to 1 - println("temp was 0") - temp = 1 - } - - if mo != 1 { - s.prevOffset[2] = s.prevOffset[1] - } - s.prevOffset[1] = s.prevOffset[0] - s.prevOffset[0] = temp - mo = temp - } - } - br.fillFast() - } else { - ll, mo, ml = s.next(br, llState, mlState, ofState) - br.fill() - } - - if debugSequences { - println("Seq", seqs-i-1, "Litlen:", ll, "mo:", mo, "(abs) ml:", ml) - } - - if ll > len(s.literals) { - return fmt.Errorf("unexpected literal count, want %d bytes, but only %d is available", ll, len(s.literals)) - } - size := ll + ml + len(s.out) - if size-startSize > maxBlockSize { - return fmt.Errorf("output (%d) bigger than max block size", size) - } - if size > cap(s.out) { - // Not enough size, which can happen under high volume block streaming conditions - // but could be if destination slice is too small for sync operations. - // over-allocating here can create a large amount of GC pressure so we try to keep - // it as contained as possible - used := len(s.out) - startSize - addBytes := 256 + ll + ml + used>>2 - // Clamp to max block size. - if used+addBytes > maxBlockSize { - addBytes = maxBlockSize - used - } - s.out = append(s.out, make([]byte, addBytes)...) - s.out = s.out[:len(s.out)-addBytes] - } - if ml > maxMatchLen { - return fmt.Errorf("match len (%d) bigger than max allowed length", ml) - } - - // Add literals - s.out = append(s.out, s.literals[:ll]...) - s.literals = s.literals[ll:] - out := s.out - - if mo == 0 && ml > 0 { - return fmt.Errorf("zero matchoff and matchlen (%d) > 0", ml) - } - - if mo > len(s.out)+len(hist) || mo > s.windowSize { - if len(s.dict) == 0 { - return fmt.Errorf("match offset (%d) bigger than current history (%d)", mo, len(s.out)+len(hist)) - } - - // we may be in dictionary. - dictO := len(s.dict) - (mo - (len(s.out) + len(hist))) - if dictO < 0 || dictO >= len(s.dict) { - return fmt.Errorf("match offset (%d) bigger than current history (%d)", mo, len(s.out)+len(hist)) - } - end := dictO + ml - if end > len(s.dict) { - out = append(out, s.dict[dictO:]...) - mo -= len(s.dict) - dictO - ml -= len(s.dict) - dictO - } else { - out = append(out, s.dict[dictO:end]...) - mo = 0 - ml = 0 - } - } - - // Copy from history. - // TODO: Blocks without history could be made to ignore this completely. - if v := mo - len(s.out); v > 0 { - // v is the start position in history from end. - start := len(s.hist) - v - if ml > v { - // Some goes into current block. - // Copy remainder of history - out = append(out, s.hist[start:]...) - mo -= v - ml -= v - } else { - out = append(out, s.hist[start:start+ml]...) - ml = 0 - } - } - // We must be in current buffer now - if ml > 0 { - start := len(s.out) - mo - if ml <= len(s.out)-start { - // No overlap - out = append(out, s.out[start:start+ml]...) - } else { - // Overlapping copy - // Extend destination slice and copy one byte at the time. - out = out[:len(out)+ml] - src := out[start : start+ml] - // Destination is the space we just added. - dst := out[len(out)-ml:] - dst = dst[:len(src)] - for i := range src { - dst[i] = src[i] - } - } - } - s.out = out - if i == 0 { - // This is the last sequence, so we shouldn't update state. - break - } - - // Manually inlined, ~ 5-20% faster - // Update all 3 states at once. Approx 20% faster. - nBits := llState.nbBits() + mlState.nbBits() + ofState.nbBits() - if nBits == 0 { - llState = llTable[llState.newState()&maxTableMask] - mlState = mlTable[mlState.newState()&maxTableMask] - ofState = ofTable[ofState.newState()&maxTableMask] - } else { - bits := br.getBitsFast(nBits) - lowBits := uint16(bits >> ((ofState.nbBits() + mlState.nbBits()) & 31)) - llState = llTable[(llState.newState()+lowBits)&maxTableMask] - - lowBits = uint16(bits >> (ofState.nbBits() & 31)) - lowBits &= bitMask[mlState.nbBits()&15] - mlState = mlTable[(mlState.newState()+lowBits)&maxTableMask] - - lowBits = uint16(bits) & bitMask[ofState.nbBits()&15] - ofState = ofTable[(ofState.newState()+lowBits)&maxTableMask] - } - } - - // Add final literals - s.out = append(s.out, s.literals...) - return nil -} - -// update states, at least 27 bits must be available. -func (s *sequenceDecs) update(br *bitReader) { - // Max 8 bits - s.litLengths.state.next(br) - // Max 9 bits - s.matchLengths.state.next(br) - // Max 8 bits - s.offsets.state.next(br) -} - -var bitMask [16]uint16 - -func init() { - for i := range bitMask[:] { - bitMask[i] = uint16((1 << uint(i)) - 1) - } -} - -// update states, at least 27 bits must be available. -func (s *sequenceDecs) updateAlt(br *bitReader) { - // Update all 3 states at once. Approx 20% faster. - a, b, c := s.litLengths.state.state, s.matchLengths.state.state, s.offsets.state.state - - nBits := a.nbBits() + b.nbBits() + c.nbBits() - if nBits == 0 { - s.litLengths.state.state = s.litLengths.state.dt[a.newState()] - s.matchLengths.state.state = s.matchLengths.state.dt[b.newState()] - s.offsets.state.state = s.offsets.state.dt[c.newState()] - return - } - bits := br.getBitsFast(nBits) - lowBits := uint16(bits >> ((c.nbBits() + b.nbBits()) & 31)) - s.litLengths.state.state = s.litLengths.state.dt[a.newState()+lowBits] - - lowBits = uint16(bits >> (c.nbBits() & 31)) - lowBits &= bitMask[b.nbBits()&15] - s.matchLengths.state.state = s.matchLengths.state.dt[b.newState()+lowBits] - - lowBits = uint16(bits) & bitMask[c.nbBits()&15] - s.offsets.state.state = s.offsets.state.dt[c.newState()+lowBits] -} - -// nextFast will return new states when there are at least 4 unused bytes left on the stream when done. -func (s *sequenceDecs) nextFast(br *bitReader, llState, mlState, ofState decSymbol) (ll, mo, ml int) { - // Final will not read from stream. - ll, llB := llState.final() - ml, mlB := mlState.final() - mo, moB := ofState.final() - - // extra bits are stored in reverse order. - br.fillFast() - mo += br.getBits(moB) - if s.maxBits > 32 { - br.fillFast() - } - ml += br.getBits(mlB) - ll += br.getBits(llB) - - if moB > 1 { - s.prevOffset[2] = s.prevOffset[1] - s.prevOffset[1] = s.prevOffset[0] - s.prevOffset[0] = mo - return - } - // mo = s.adjustOffset(mo, ll, moB) - // Inlined for rather big speedup - if ll == 0 { - // There is an exception though, when current sequence's literals_length = 0. - // In this case, repeated offsets are shifted by one, so an offset_value of 1 means Repeated_Offset2, - // an offset_value of 2 means Repeated_Offset3, and an offset_value of 3 means Repeated_Offset1 - 1_byte. - mo++ - } - - if mo == 0 { - mo = s.prevOffset[0] - return - } - var temp int - if mo == 3 { - temp = s.prevOffset[0] - 1 - } else { - temp = s.prevOffset[mo] - } - - if temp == 0 { - // 0 is not valid; input is corrupted; force offset to 1 - println("temp was 0") - temp = 1 - } - - if mo != 1 { - s.prevOffset[2] = s.prevOffset[1] - } - s.prevOffset[1] = s.prevOffset[0] - s.prevOffset[0] = temp - mo = temp - return -} - -func (s *sequenceDecs) next(br *bitReader, llState, mlState, ofState decSymbol) (ll, mo, ml int) { - // Final will not read from stream. - ll, llB := llState.final() - ml, mlB := mlState.final() - mo, moB := ofState.final() - - // extra bits are stored in reverse order. - br.fill() - if s.maxBits <= 32 { - mo += br.getBits(moB) - ml += br.getBits(mlB) - ll += br.getBits(llB) - } else { - mo += br.getBits(moB) - br.fill() - // matchlength+literal length, max 32 bits - ml += br.getBits(mlB) - ll += br.getBits(llB) - - } - mo = s.adjustOffset(mo, ll, moB) - return -} - -func (s *sequenceDecs) adjustOffset(offset, litLen int, offsetB uint8) int { - if offsetB > 1 { - s.prevOffset[2] = s.prevOffset[1] - s.prevOffset[1] = s.prevOffset[0] - s.prevOffset[0] = offset - return offset - } - - if litLen == 0 { - // There is an exception though, when current sequence's literals_length = 0. - // In this case, repeated offsets are shifted by one, so an offset_value of 1 means Repeated_Offset2, - // an offset_value of 2 means Repeated_Offset3, and an offset_value of 3 means Repeated_Offset1 - 1_byte. - offset++ - } - - if offset == 0 { - return s.prevOffset[0] - } - var temp int - if offset == 3 { - temp = s.prevOffset[0] - 1 - } else { - temp = s.prevOffset[offset] - } - - if temp == 0 { - // 0 is not valid; input is corrupted; force offset to 1 - println("temp was 0") - temp = 1 - } - - if offset != 1 { - s.prevOffset[2] = s.prevOffset[1] - } - s.prevOffset[1] = s.prevOffset[0] - s.prevOffset[0] = temp - return temp -} - -// mergeHistory will merge history. -func (s *sequenceDecs) mergeHistory(hist *sequenceDecs) (*sequenceDecs, error) { - for i := uint(0); i < 3; i++ { - var sNew, sHist *sequenceDec - switch i { - default: - // same as "case 0": - sNew = &s.litLengths - sHist = &hist.litLengths - case 1: - sNew = &s.offsets - sHist = &hist.offsets - case 2: - sNew = &s.matchLengths - sHist = &hist.matchLengths - } - if sNew.repeat { - if sHist.fse == nil { - return nil, fmt.Errorf("sequence stream %d, repeat requested, but no history", i) - } - continue - } - if sNew.fse == nil { - return nil, fmt.Errorf("sequence stream %d, no fse found", i) - } - if sHist.fse != nil && !sHist.fse.preDefined { - fseDecoderPool.Put(sHist.fse) - } - sHist.fse = sNew.fse - } - return hist, nil -} diff --git a/src/vendor/github.com/klauspost/compress/zstd/seqenc.go b/src/vendor/github.com/klauspost/compress/zstd/seqenc.go deleted file mode 100644 index 8014174a7..000000000 --- a/src/vendor/github.com/klauspost/compress/zstd/seqenc.go +++ /dev/null @@ -1,114 +0,0 @@ -// Copyright 2019+ Klaus Post. All rights reserved. -// License information can be found in the LICENSE file. -// Based on work by Yann Collet, released under BSD License. - -package zstd - -import "math/bits" - -type seqCoders struct { - llEnc, ofEnc, mlEnc *fseEncoder - llPrev, ofPrev, mlPrev *fseEncoder -} - -// swap coders with another (block). -func (s *seqCoders) swap(other *seqCoders) { - *s, *other = *other, *s -} - -// setPrev will update the previous encoders to the actually used ones -// and make sure a fresh one is in the main slot. -func (s *seqCoders) setPrev(ll, ml, of *fseEncoder) { - compareSwap := func(used *fseEncoder, current, prev **fseEncoder) { - // We used the new one, more current to history and reuse the previous history - if *current == used { - *prev, *current = *current, *prev - c := *current - p := *prev - c.reUsed = false - p.reUsed = true - return - } - if used == *prev { - return - } - // Ensure we cannot reuse by accident - prevEnc := *prev - prevEnc.symbolLen = 0 - } - compareSwap(ll, &s.llEnc, &s.llPrev) - compareSwap(ml, &s.mlEnc, &s.mlPrev) - compareSwap(of, &s.ofEnc, &s.ofPrev) -} - -func highBit(val uint32) (n uint32) { - return uint32(bits.Len32(val) - 1) -} - -var llCodeTable = [64]byte{0, 1, 2, 3, 4, 5, 6, 7, - 8, 9, 10, 11, 12, 13, 14, 15, - 16, 16, 17, 17, 18, 18, 19, 19, - 20, 20, 20, 20, 21, 21, 21, 21, - 22, 22, 22, 22, 22, 22, 22, 22, - 23, 23, 23, 23, 23, 23, 23, 23, - 24, 24, 24, 24, 24, 24, 24, 24, - 24, 24, 24, 24, 24, 24, 24, 24} - -// Up to 6 bits -const maxLLCode = 35 - -// llBitsTable translates from ll code to number of bits. -var llBitsTable = [maxLLCode + 1]byte{ - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 1, 1, 1, 1, 2, 2, 3, 3, - 4, 6, 7, 8, 9, 10, 11, 12, - 13, 14, 15, 16} - -// llCode returns the code that represents the literal length requested. -func llCode(litLength uint32) uint8 { - const llDeltaCode = 19 - if litLength <= 63 { - // Compiler insists on bounds check (Go 1.12) - return llCodeTable[litLength&63] - } - return uint8(highBit(litLength)) + llDeltaCode -} - -var mlCodeTable = [128]byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, - 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, - 32, 32, 33, 33, 34, 34, 35, 35, 36, 36, 36, 36, 37, 37, 37, 37, - 38, 38, 38, 38, 38, 38, 38, 38, 39, 39, 39, 39, 39, 39, 39, 39, - 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, - 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, - 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, - 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42} - -// Up to 6 bits -const maxMLCode = 52 - -// mlBitsTable translates from ml code to number of bits. -var mlBitsTable = [maxMLCode + 1]byte{ - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 1, 1, 1, 1, 2, 2, 3, 3, - 4, 4, 5, 7, 8, 9, 10, 11, - 12, 13, 14, 15, 16} - -// note : mlBase = matchLength - MINMATCH; -// because it's the format it's stored in seqStore->sequences -func mlCode(mlBase uint32) uint8 { - const mlDeltaCode = 36 - if mlBase <= 127 { - // Compiler insists on bounds check (Go 1.12) - return mlCodeTable[mlBase&127] - } - return uint8(highBit(mlBase)) + mlDeltaCode -} - -func ofCode(offset uint32) uint8 { - // A valid offset will always be > 0. - return uint8(bits.Len32(offset) - 1) -} diff --git a/src/vendor/github.com/klauspost/compress/zstd/snappy.go b/src/vendor/github.com/klauspost/compress/zstd/snappy.go deleted file mode 100644 index 9e1baad73..000000000 --- a/src/vendor/github.com/klauspost/compress/zstd/snappy.go +++ /dev/null @@ -1,435 +0,0 @@ -// Copyright 2019+ Klaus Post. All rights reserved. -// License information can be found in the LICENSE file. -// Based on work by Yann Collet, released under BSD License. - -package zstd - -import ( - "encoding/binary" - "errors" - "hash/crc32" - "io" - - "github.com/klauspost/compress/huff0" - snappy "github.com/klauspost/compress/internal/snapref" -) - -const ( - snappyTagLiteral = 0x00 - snappyTagCopy1 = 0x01 - snappyTagCopy2 = 0x02 - snappyTagCopy4 = 0x03 -) - -const ( - snappyChecksumSize = 4 - snappyMagicBody = "sNaPpY" - - // snappyMaxBlockSize is the maximum size of the input to encodeBlock. It is not - // part of the wire format per se, but some parts of the encoder assume - // that an offset fits into a uint16. - // - // Also, for the framing format (Writer type instead of Encode function), - // https://github.com/google/snappy/blob/master/framing_format.txt says - // that "the uncompressed data in a chunk must be no longer than 65536 - // bytes". - snappyMaxBlockSize = 65536 - - // snappyMaxEncodedLenOfMaxBlockSize equals MaxEncodedLen(snappyMaxBlockSize), but is - // hard coded to be a const instead of a variable, so that obufLen can also - // be a const. Their equivalence is confirmed by - // TestMaxEncodedLenOfMaxBlockSize. - snappyMaxEncodedLenOfMaxBlockSize = 76490 -) - -const ( - chunkTypeCompressedData = 0x00 - chunkTypeUncompressedData = 0x01 - chunkTypePadding = 0xfe - chunkTypeStreamIdentifier = 0xff -) - -var ( - // ErrSnappyCorrupt reports that the input is invalid. - ErrSnappyCorrupt = errors.New("snappy: corrupt input") - // ErrSnappyTooLarge reports that the uncompressed length is too large. - ErrSnappyTooLarge = errors.New("snappy: decoded block is too large") - // ErrSnappyUnsupported reports that the input isn't supported. - ErrSnappyUnsupported = errors.New("snappy: unsupported input") - - errUnsupportedLiteralLength = errors.New("snappy: unsupported literal length") -) - -// SnappyConverter can read SnappyConverter-compressed streams and convert them to zstd. -// Conversion is done by converting the stream directly from Snappy without intermediate -// full decoding. -// Therefore the compression ratio is much less than what can be done by a full decompression -// and compression, and a faulty Snappy stream may lead to a faulty Zstandard stream without -// any errors being generated. -// No CRC value is being generated and not all CRC values of the Snappy stream are checked. -// However, it provides really fast recompression of Snappy streams. -// The converter can be reused to avoid allocations, even after errors. -type SnappyConverter struct { - r io.Reader - err error - buf []byte - block *blockEnc -} - -// Convert the Snappy stream supplied in 'in' and write the zStandard stream to 'w'. -// If any error is detected on the Snappy stream it is returned. -// The number of bytes written is returned. -func (r *SnappyConverter) Convert(in io.Reader, w io.Writer) (int64, error) { - initPredefined() - r.err = nil - r.r = in - if r.block == nil { - r.block = &blockEnc{} - r.block.init() - } - r.block.initNewEncode() - if len(r.buf) != snappyMaxEncodedLenOfMaxBlockSize+snappyChecksumSize { - r.buf = make([]byte, snappyMaxEncodedLenOfMaxBlockSize+snappyChecksumSize) - } - r.block.litEnc.Reuse = huff0.ReusePolicyNone - var written int64 - var readHeader bool - { - var header []byte - var n int - header, r.err = frameHeader{WindowSize: snappyMaxBlockSize}.appendTo(r.buf[:0]) - - n, r.err = w.Write(header) - if r.err != nil { - return written, r.err - } - written += int64(n) - } - - for { - if !r.readFull(r.buf[:4], true) { - // Add empty last block - r.block.reset(nil) - r.block.last = true - err := r.block.encodeLits(r.block.literals, false) - if err != nil { - return written, err - } - n, err := w.Write(r.block.output) - if err != nil { - return written, err - } - written += int64(n) - - return written, r.err - } - chunkType := r.buf[0] - if !readHeader { - if chunkType != chunkTypeStreamIdentifier { - println("chunkType != chunkTypeStreamIdentifier", chunkType) - r.err = ErrSnappyCorrupt - return written, r.err - } - readHeader = true - } - chunkLen := int(r.buf[1]) | int(r.buf[2])<<8 | int(r.buf[3])<<16 - if chunkLen > len(r.buf) { - println("chunkLen > len(r.buf)", chunkType) - r.err = ErrSnappyUnsupported - return written, r.err - } - - // The chunk types are specified at - // https://github.com/google/snappy/blob/master/framing_format.txt - switch chunkType { - case chunkTypeCompressedData: - // Section 4.2. Compressed data (chunk type 0x00). - if chunkLen < snappyChecksumSize { - println("chunkLen < snappyChecksumSize", chunkLen, snappyChecksumSize) - r.err = ErrSnappyCorrupt - return written, r.err - } - buf := r.buf[:chunkLen] - if !r.readFull(buf, false) { - return written, r.err - } - //checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24 - buf = buf[snappyChecksumSize:] - - n, hdr, err := snappyDecodedLen(buf) - if err != nil { - r.err = err - return written, r.err - } - buf = buf[hdr:] - if n > snappyMaxBlockSize { - println("n > snappyMaxBlockSize", n, snappyMaxBlockSize) - r.err = ErrSnappyCorrupt - return written, r.err - } - r.block.reset(nil) - r.block.pushOffsets() - if err := decodeSnappy(r.block, buf); err != nil { - r.err = err - return written, r.err - } - if r.block.size+r.block.extraLits != n { - printf("invalid size, want %d, got %d\n", n, r.block.size+r.block.extraLits) - r.err = ErrSnappyCorrupt - return written, r.err - } - err = r.block.encode(nil, false, false) - switch err { - case errIncompressible: - r.block.popOffsets() - r.block.reset(nil) - r.block.literals, err = snappy.Decode(r.block.literals[:n], r.buf[snappyChecksumSize:chunkLen]) - if err != nil { - return written, err - } - err = r.block.encodeLits(r.block.literals, false) - if err != nil { - return written, err - } - case nil: - default: - return written, err - } - - n, r.err = w.Write(r.block.output) - if r.err != nil { - return written, err - } - written += int64(n) - continue - case chunkTypeUncompressedData: - if debugEncoder { - println("Uncompressed, chunklen", chunkLen) - } - // Section 4.3. Uncompressed data (chunk type 0x01). - if chunkLen < snappyChecksumSize { - println("chunkLen < snappyChecksumSize", chunkLen, snappyChecksumSize) - r.err = ErrSnappyCorrupt - return written, r.err - } - r.block.reset(nil) - buf := r.buf[:snappyChecksumSize] - if !r.readFull(buf, false) { - return written, r.err - } - checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24 - // Read directly into r.decoded instead of via r.buf. - n := chunkLen - snappyChecksumSize - if n > snappyMaxBlockSize { - println("n > snappyMaxBlockSize", n, snappyMaxBlockSize) - r.err = ErrSnappyCorrupt - return written, r.err - } - r.block.literals = r.block.literals[:n] - if !r.readFull(r.block.literals, false) { - return written, r.err - } - if snappyCRC(r.block.literals) != checksum { - println("literals crc mismatch") - r.err = ErrSnappyCorrupt - return written, r.err - } - err := r.block.encodeLits(r.block.literals, false) - if err != nil { - return written, err - } - n, r.err = w.Write(r.block.output) - if r.err != nil { - return written, err - } - written += int64(n) - continue - - case chunkTypeStreamIdentifier: - if debugEncoder { - println("stream id", chunkLen, len(snappyMagicBody)) - } - // Section 4.1. Stream identifier (chunk type 0xff). - if chunkLen != len(snappyMagicBody) { - println("chunkLen != len(snappyMagicBody)", chunkLen, len(snappyMagicBody)) - r.err = ErrSnappyCorrupt - return written, r.err - } - if !r.readFull(r.buf[:len(snappyMagicBody)], false) { - return written, r.err - } - for i := 0; i < len(snappyMagicBody); i++ { - if r.buf[i] != snappyMagicBody[i] { - println("r.buf[i] != snappyMagicBody[i]", r.buf[i], snappyMagicBody[i], i) - r.err = ErrSnappyCorrupt - return written, r.err - } - } - continue - } - - if chunkType <= 0x7f { - // Section 4.5. Reserved unskippable chunks (chunk types 0x02-0x7f). - println("chunkType <= 0x7f") - r.err = ErrSnappyUnsupported - return written, r.err - } - // Section 4.4 Padding (chunk type 0xfe). - // Section 4.6. Reserved skippable chunks (chunk types 0x80-0xfd). - if !r.readFull(r.buf[:chunkLen], false) { - return written, r.err - } - } -} - -// decodeSnappy writes the decoding of src to dst. It assumes that the varint-encoded -// length of the decompressed bytes has already been read. -func decodeSnappy(blk *blockEnc, src []byte) error { - //decodeRef(make([]byte, snappyMaxBlockSize), src) - var s, length int - lits := blk.extraLits - var offset uint32 - for s < len(src) { - switch src[s] & 0x03 { - case snappyTagLiteral: - x := uint32(src[s] >> 2) - switch { - case x < 60: - s++ - case x == 60: - s += 2 - if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. - println("uint(s) > uint(len(src)", s, src) - return ErrSnappyCorrupt - } - x = uint32(src[s-1]) - case x == 61: - s += 3 - if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. - println("uint(s) > uint(len(src)", s, src) - return ErrSnappyCorrupt - } - x = uint32(src[s-2]) | uint32(src[s-1])<<8 - case x == 62: - s += 4 - if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. - println("uint(s) > uint(len(src)", s, src) - return ErrSnappyCorrupt - } - x = uint32(src[s-3]) | uint32(src[s-2])<<8 | uint32(src[s-1])<<16 - case x == 63: - s += 5 - if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. - println("uint(s) > uint(len(src)", s, src) - return ErrSnappyCorrupt - } - x = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24 - } - if x > snappyMaxBlockSize { - println("x > snappyMaxBlockSize", x, snappyMaxBlockSize) - return ErrSnappyCorrupt - } - length = int(x) + 1 - if length <= 0 { - println("length <= 0 ", length) - - return errUnsupportedLiteralLength - } - //if length > snappyMaxBlockSize-d || uint32(length) > len(src)-s { - // return ErrSnappyCorrupt - //} - - blk.literals = append(blk.literals, src[s:s+length]...) - //println(length, "litLen") - lits += length - s += length - continue - - case snappyTagCopy1: - s += 2 - if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. - println("uint(s) > uint(len(src)", s, len(src)) - return ErrSnappyCorrupt - } - length = 4 + int(src[s-2])>>2&0x7 - offset = uint32(src[s-2])&0xe0<<3 | uint32(src[s-1]) - - case snappyTagCopy2: - s += 3 - if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. - println("uint(s) > uint(len(src)", s, len(src)) - return ErrSnappyCorrupt - } - length = 1 + int(src[s-3])>>2 - offset = uint32(src[s-2]) | uint32(src[s-1])<<8 - - case snappyTagCopy4: - s += 5 - if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. - println("uint(s) > uint(len(src)", s, len(src)) - return ErrSnappyCorrupt - } - length = 1 + int(src[s-5])>>2 - offset = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24 - } - - if offset <= 0 || blk.size+lits < int(offset) /*|| length > len(blk)-d */ { - println("offset <= 0 || blk.size+lits < int(offset)", offset, blk.size+lits, int(offset), blk.size, lits) - - return ErrSnappyCorrupt - } - - // Check if offset is one of the recent offsets. - // Adjusts the output offset accordingly. - // Gives a tiny bit of compression, typically around 1%. - if false { - offset = blk.matchOffset(offset, uint32(lits)) - } else { - offset += 3 - } - - blk.sequences = append(blk.sequences, seq{ - litLen: uint32(lits), - offset: offset, - matchLen: uint32(length) - zstdMinMatch, - }) - blk.size += length + lits - lits = 0 - } - blk.extraLits = lits - return nil -} - -func (r *SnappyConverter) readFull(p []byte, allowEOF bool) (ok bool) { - if _, r.err = io.ReadFull(r.r, p); r.err != nil { - if r.err == io.ErrUnexpectedEOF || (r.err == io.EOF && !allowEOF) { - r.err = ErrSnappyCorrupt - } - return false - } - return true -} - -var crcTable = crc32.MakeTable(crc32.Castagnoli) - -// crc implements the checksum specified in section 3 of -// https://github.com/google/snappy/blob/master/framing_format.txt -func snappyCRC(b []byte) uint32 { - c := crc32.Update(0, crcTable, b) - return c>>15 | c<<17 + 0xa282ead8 -} - -// snappyDecodedLen returns the length of the decoded block and the number of bytes -// that the length header occupied. -func snappyDecodedLen(src []byte) (blockLen, headerLen int, err error) { - v, n := binary.Uvarint(src) - if n <= 0 || v > 0xffffffff { - return 0, 0, ErrSnappyCorrupt - } - - const wordSize = 32 << (^uint(0) >> 32 & 1) - if wordSize == 32 && v > 0x7fffffff { - return 0, 0, ErrSnappyTooLarge - } - return int(v), n, nil -} diff --git a/src/vendor/github.com/klauspost/compress/zstd/zip.go b/src/vendor/github.com/klauspost/compress/zstd/zip.go deleted file mode 100644 index 967f29b31..000000000 --- a/src/vendor/github.com/klauspost/compress/zstd/zip.go +++ /dev/null @@ -1,122 +0,0 @@ -// Copyright 2019+ Klaus Post. All rights reserved. -// License information can be found in the LICENSE file. - -package zstd - -import ( - "errors" - "io" - "sync" -) - -// ZipMethodWinZip is the method for Zstandard compressed data inside Zip files for WinZip. -// See https://www.winzip.com/win/en/comp_info.html -const ZipMethodWinZip = 93 - -// ZipMethodPKWare is the original method number used by PKWARE to indicate Zstandard compression. -// Deprecated: This has been deprecated by PKWARE, use ZipMethodWinZip instead for compression. -// See https://pkware.cachefly.net/webdocs/APPNOTE/APPNOTE-6.3.9.TXT -const ZipMethodPKWare = 20 - -var zipReaderPool sync.Pool - -// newZipReader cannot be used since we would leak goroutines... -func newZipReader(r io.Reader) io.ReadCloser { - dec, ok := zipReaderPool.Get().(*Decoder) - if ok { - dec.Reset(r) - } else { - d, err := NewReader(r, WithDecoderConcurrency(1), WithDecoderLowmem(true)) - if err != nil { - panic(err) - } - dec = d - } - return &pooledZipReader{dec: dec} -} - -type pooledZipReader struct { - mu sync.Mutex // guards Close and Read - dec *Decoder -} - -func (r *pooledZipReader) Read(p []byte) (n int, err error) { - r.mu.Lock() - defer r.mu.Unlock() - if r.dec == nil { - return 0, errors.New("Read after Close") - } - dec, err := r.dec.Read(p) - - return dec, err -} - -func (r *pooledZipReader) Close() error { - r.mu.Lock() - defer r.mu.Unlock() - var err error - if r.dec != nil { - err = r.dec.Reset(nil) - zipReaderPool.Put(r.dec) - r.dec = nil - } - return err -} - -type pooledZipWriter struct { - mu sync.Mutex // guards Close and Read - enc *Encoder - pool *sync.Pool -} - -func (w *pooledZipWriter) Write(p []byte) (n int, err error) { - w.mu.Lock() - defer w.mu.Unlock() - if w.enc == nil { - return 0, errors.New("Write after Close") - } - return w.enc.Write(p) -} - -func (w *pooledZipWriter) Close() error { - w.mu.Lock() - defer w.mu.Unlock() - var err error - if w.enc != nil { - err = w.enc.Close() - w.pool.Put(w.enc) - w.enc = nil - } - return err -} - -// ZipCompressor returns a compressor that can be registered with zip libraries. -// The provided encoder options will be used on all encodes. -func ZipCompressor(opts ...EOption) func(w io.Writer) (io.WriteCloser, error) { - var pool sync.Pool - return func(w io.Writer) (io.WriteCloser, error) { - enc, ok := pool.Get().(*Encoder) - if ok { - enc.Reset(w) - } else { - var err error - enc, err = NewWriter(w, opts...) - if err != nil { - return nil, err - } - } - return &pooledZipWriter{enc: enc, pool: &pool}, nil - } -} - -// ZipDecompressor returns a decompressor that can be registered with zip libraries. -// See ZipCompressor for example. -func ZipDecompressor() func(r io.Reader) io.ReadCloser { - return func(r io.Reader) io.ReadCloser { - d, err := NewReader(r, WithDecoderConcurrency(1), WithDecoderLowmem(true)) - if err != nil { - panic(err) - } - return d.IOReadCloser() - } -} diff --git a/src/vendor/github.com/klauspost/compress/zstd/zstd.go b/src/vendor/github.com/klauspost/compress/zstd/zstd.go deleted file mode 100644 index ef1d49a00..000000000 --- a/src/vendor/github.com/klauspost/compress/zstd/zstd.go +++ /dev/null @@ -1,152 +0,0 @@ -// Package zstd provides decompression of zstandard files. -// -// For advanced usage and examples, go to the README: https://github.com/klauspost/compress/tree/master/zstd#zstd -package zstd - -import ( - "bytes" - "encoding/binary" - "errors" - "log" - "math" - "math/bits" -) - -// enable debug printing -const debug = false - -// enable encoding debug printing -const debugEncoder = debug - -// enable decoding debug printing -const debugDecoder = debug - -// Enable extra assertions. -const debugAsserts = debug || false - -// print sequence details -const debugSequences = false - -// print detailed matching information -const debugMatches = false - -// force encoder to use predefined tables. -const forcePreDef = false - -// zstdMinMatch is the minimum zstd match length. -const zstdMinMatch = 3 - -// Reset the buffer offset when reaching this. -const bufferReset = math.MaxInt32 - MaxWindowSize - -var ( - // ErrReservedBlockType is returned when a reserved block type is found. - // Typically this indicates wrong or corrupted input. - ErrReservedBlockType = errors.New("invalid input: reserved block type encountered") - - // ErrCompressedSizeTooBig is returned when a block is bigger than allowed. - // Typically this indicates wrong or corrupted input. - ErrCompressedSizeTooBig = errors.New("invalid input: compressed size too big") - - // ErrBlockTooSmall is returned when a block is too small to be decoded. - // Typically returned on invalid input. - ErrBlockTooSmall = errors.New("block too small") - - // ErrMagicMismatch is returned when a "magic" number isn't what is expected. - // Typically this indicates wrong or corrupted input. - ErrMagicMismatch = errors.New("invalid input: magic number mismatch") - - // ErrWindowSizeExceeded is returned when a reference exceeds the valid window size. - // Typically this indicates wrong or corrupted input. - ErrWindowSizeExceeded = errors.New("window size exceeded") - - // ErrWindowSizeTooSmall is returned when no window size is specified. - // Typically this indicates wrong or corrupted input. - ErrWindowSizeTooSmall = errors.New("invalid input: window size was too small") - - // ErrDecoderSizeExceeded is returned if decompressed size exceeds the configured limit. - ErrDecoderSizeExceeded = errors.New("decompressed size exceeds configured limit") - - // ErrUnknownDictionary is returned if the dictionary ID is unknown. - // For the time being dictionaries are not supported. - ErrUnknownDictionary = errors.New("unknown dictionary") - - // ErrFrameSizeExceeded is returned if the stated frame size is exceeded. - // This is only returned if SingleSegment is specified on the frame. - ErrFrameSizeExceeded = errors.New("frame size exceeded") - - // ErrCRCMismatch is returned if CRC mismatches. - ErrCRCMismatch = errors.New("CRC check failed") - - // ErrDecoderClosed will be returned if the Decoder was used after - // Close has been called. - ErrDecoderClosed = errors.New("decoder used after Close") - - // ErrDecoderNilInput is returned when a nil Reader was provided - // and an operation other than Reset/DecodeAll/Close was attempted. - ErrDecoderNilInput = errors.New("nil input provided as reader") -) - -func println(a ...interface{}) { - if debug || debugDecoder || debugEncoder { - log.Println(a...) - } -} - -func printf(format string, a ...interface{}) { - if debug || debugDecoder || debugEncoder { - log.Printf(format, a...) - } -} - -// matchLenFast does matching, but will not match the last up to 7 bytes. -func matchLenFast(a, b []byte) int { - endI := len(a) & (math.MaxInt32 - 7) - for i := 0; i < endI; i += 8 { - if diff := load64(a, i) ^ load64(b, i); diff != 0 { - return i + bits.TrailingZeros64(diff)>>3 - } - } - return endI -} - -// matchLen returns the maximum length. -// a must be the shortest of the two. -// The function also returns whether all bytes matched. -func matchLen(a, b []byte) int { - b = b[:len(a)] - for i := 0; i < len(a)-7; i += 8 { - if diff := load64(a, i) ^ load64(b, i); diff != 0 { - return i + (bits.TrailingZeros64(diff) >> 3) - } - } - - checked := (len(a) >> 3) << 3 - a = a[checked:] - b = b[checked:] - for i := range a { - if a[i] != b[i] { - return i + checked - } - } - return len(a) + checked -} - -func load3232(b []byte, i int32) uint32 { - return binary.LittleEndian.Uint32(b[i:]) -} - -func load6432(b []byte, i int32) uint64 { - return binary.LittleEndian.Uint64(b[i:]) -} - -func load64(b []byte, i int) uint64 { - return binary.LittleEndian.Uint64(b[i:]) -} - -type byter interface { - Bytes() []byte - Len() int -} - -var _ byter = &bytes.Buffer{} diff --git a/src/vendor/github.com/liggitt/tabwriter/.travis.yml b/src/vendor/github.com/liggitt/tabwriter/.travis.yml deleted file mode 100644 index 2768dc072..000000000 --- a/src/vendor/github.com/liggitt/tabwriter/.travis.yml +++ /dev/null @@ -1,11 +0,0 @@ -language: go - -go: - - "1.8" - - "1.9" - - "1.10" - - "1.11" - - "1.12" - - master - -script: go test -v ./... diff --git a/src/vendor/github.com/liggitt/tabwriter/LICENSE b/src/vendor/github.com/liggitt/tabwriter/LICENSE deleted file mode 100644 index 6a66aea5e..000000000 --- a/src/vendor/github.com/liggitt/tabwriter/LICENSE +++ /dev/null @@ -1,27 +0,0 @@ -Copyright (c) 2009 The Go Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/src/vendor/github.com/liggitt/tabwriter/README.md b/src/vendor/github.com/liggitt/tabwriter/README.md deleted file mode 100644 index e75d35672..000000000 --- a/src/vendor/github.com/liggitt/tabwriter/README.md +++ /dev/null @@ -1,7 +0,0 @@ -This repo is a drop-in replacement for the golang [text/tabwriter](https://golang.org/pkg/text/tabwriter/) package. - -It is based on that package at [cf2c2ea8](https://github.com/golang/go/tree/cf2c2ea89d09d486bb018b1817c5874388038c3a/src/text/tabwriter) and inherits its license. - -The following additional features are supported: -* `RememberWidths` flag allows remembering maximum widths seen per column even after Flush() is called. -* `RememberedWidths() []int` and `SetRememberedWidths([]int) *Writer` allows obtaining and transferring remembered column width between writers. diff --git a/src/vendor/github.com/liggitt/tabwriter/tabwriter.go b/src/vendor/github.com/liggitt/tabwriter/tabwriter.go deleted file mode 100644 index fd3431fb0..000000000 --- a/src/vendor/github.com/liggitt/tabwriter/tabwriter.go +++ /dev/null @@ -1,637 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package tabwriter implements a write filter (tabwriter.Writer) that -// translates tabbed columns in input into properly aligned text. -// -// It is a drop-in replacement for the golang text/tabwriter package (https://golang.org/pkg/text/tabwriter), -// based on that package at https://github.com/golang/go/tree/cf2c2ea89d09d486bb018b1817c5874388038c3a -// with support for additional features. -// -// The package is using the Elastic Tabstops algorithm described at -// http://nickgravgaard.com/elastictabstops/index.html. -package tabwriter - -import ( - "io" - "unicode/utf8" -) - -// ---------------------------------------------------------------------------- -// Filter implementation - -// A cell represents a segment of text terminated by tabs or line breaks. -// The text itself is stored in a separate buffer; cell only describes the -// segment's size in bytes, its width in runes, and whether it's an htab -// ('\t') terminated cell. -// -type cell struct { - size int // cell size in bytes - width int // cell width in runes - htab bool // true if the cell is terminated by an htab ('\t') -} - -// A Writer is a filter that inserts padding around tab-delimited -// columns in its input to align them in the output. -// -// The Writer treats incoming bytes as UTF-8-encoded text consisting -// of cells terminated by horizontal ('\t') or vertical ('\v') tabs, -// and newline ('\n') or formfeed ('\f') characters; both newline and -// formfeed act as line breaks. -// -// Tab-terminated cells in contiguous lines constitute a column. The -// Writer inserts padding as needed to make all cells in a column have -// the same width, effectively aligning the columns. It assumes that -// all characters have the same width, except for tabs for which a -// tabwidth must be specified. Column cells must be tab-terminated, not -// tab-separated: non-tab terminated trailing text at the end of a line -// forms a cell but that cell is not part of an aligned column. -// For instance, in this example (where | stands for a horizontal tab): -// -// aaaa|bbb|d -// aa |b |dd -// a | -// aa |cccc|eee -// -// the b and c are in distinct columns (the b column is not contiguous -// all the way). The d and e are not in a column at all (there's no -// terminating tab, nor would the column be contiguous). -// -// The Writer assumes that all Unicode code points have the same width; -// this may not be true in some fonts or if the string contains combining -// characters. -// -// If DiscardEmptyColumns is set, empty columns that are terminated -// entirely by vertical (or "soft") tabs are discarded. Columns -// terminated by horizontal (or "hard") tabs are not affected by -// this flag. -// -// If a Writer is configured to filter HTML, HTML tags and entities -// are passed through. The widths of tags and entities are -// assumed to be zero (tags) and one (entities) for formatting purposes. -// -// A segment of text may be escaped by bracketing it with Escape -// characters. The tabwriter passes escaped text segments through -// unchanged. In particular, it does not interpret any tabs or line -// breaks within the segment. If the StripEscape flag is set, the -// Escape characters are stripped from the output; otherwise they -// are passed through as well. For the purpose of formatting, the -// width of the escaped text is always computed excluding the Escape -// characters. -// -// The formfeed character acts like a newline but it also terminates -// all columns in the current line (effectively calling Flush). Tab- -// terminated cells in the next line start new columns. Unless found -// inside an HTML tag or inside an escaped text segment, formfeed -// characters appear as newlines in the output. -// -// The Writer must buffer input internally, because proper spacing -// of one line may depend on the cells in future lines. Clients must -// call Flush when done calling Write. -// -type Writer struct { - // configuration - output io.Writer - minwidth int - tabwidth int - padding int - padbytes [8]byte - flags uint - - // current state - buf []byte // collected text excluding tabs or line breaks - pos int // buffer position up to which cell.width of incomplete cell has been computed - cell cell // current incomplete cell; cell.width is up to buf[pos] excluding ignored sections - endChar byte // terminating char of escaped sequence (Escape for escapes, '>', ';' for HTML tags/entities, or 0) - lines [][]cell // list of lines; each line is a list of cells - widths []int // list of column widths in runes - re-used during formatting - - maxwidths []int // list of max column widths in runes -} - -// addLine adds a new line. -// flushed is a hint indicating whether the underlying writer was just flushed. -// If so, the previous line is not likely to be a good indicator of the new line's cells. -func (b *Writer) addLine(flushed bool) { - // Grow slice instead of appending, - // as that gives us an opportunity - // to re-use an existing []cell. - if n := len(b.lines) + 1; n <= cap(b.lines) { - b.lines = b.lines[:n] - b.lines[n-1] = b.lines[n-1][:0] - } else { - b.lines = append(b.lines, nil) - } - - if !flushed { - // The previous line is probably a good indicator - // of how many cells the current line will have. - // If the current line's capacity is smaller than that, - // abandon it and make a new one. - if n := len(b.lines); n >= 2 { - if prev := len(b.lines[n-2]); prev > cap(b.lines[n-1]) { - b.lines[n-1] = make([]cell, 0, prev) - } - } - } -} - -// Reset the current state. -func (b *Writer) reset() { - b.buf = b.buf[:0] - b.pos = 0 - b.cell = cell{} - b.endChar = 0 - b.lines = b.lines[0:0] - b.widths = b.widths[0:0] - b.addLine(true) -} - -// Internal representation (current state): -// -// - all text written is appended to buf; tabs and line breaks are stripped away -// - at any given time there is a (possibly empty) incomplete cell at the end -// (the cell starts after a tab or line break) -// - cell.size is the number of bytes belonging to the cell so far -// - cell.width is text width in runes of that cell from the start of the cell to -// position pos; html tags and entities are excluded from this width if html -// filtering is enabled -// - the sizes and widths of processed text are kept in the lines list -// which contains a list of cells for each line -// - the widths list is a temporary list with current widths used during -// formatting; it is kept in Writer because it's re-used -// -// |<---------- size ---------->| -// | | -// |<- width ->|<- ignored ->| | -// | | | | -// [---processed---tab------------......] -// ^ ^ ^ -// | | | -// buf start of incomplete cell pos - -// Formatting can be controlled with these flags. -const ( - // Ignore html tags and treat entities (starting with '&' - // and ending in ';') as single characters (width = 1). - FilterHTML uint = 1 << iota - - // Strip Escape characters bracketing escaped text segments - // instead of passing them through unchanged with the text. - StripEscape - - // Force right-alignment of cell content. - // Default is left-alignment. - AlignRight - - // Handle empty columns as if they were not present in - // the input in the first place. - DiscardEmptyColumns - - // Always use tabs for indentation columns (i.e., padding of - // leading empty cells on the left) independent of padchar. - TabIndent - - // Print a vertical bar ('|') between columns (after formatting). - // Discarded columns appear as zero-width columns ("||"). - Debug - - // Remember maximum widths seen per column even after Flush() is called. - RememberWidths -) - -// A Writer must be initialized with a call to Init. The first parameter (output) -// specifies the filter output. The remaining parameters control the formatting: -// -// minwidth minimal cell width including any padding -// tabwidth width of tab characters (equivalent number of spaces) -// padding padding added to a cell before computing its width -// padchar ASCII char used for padding -// if padchar == '\t', the Writer will assume that the -// width of a '\t' in the formatted output is tabwidth, -// and cells are left-aligned independent of align_left -// (for correct-looking results, tabwidth must correspond -// to the tab width in the viewer displaying the result) -// flags formatting control -// -func (b *Writer) Init(output io.Writer, minwidth, tabwidth, padding int, padchar byte, flags uint) *Writer { - if minwidth < 0 || tabwidth < 0 || padding < 0 { - panic("negative minwidth, tabwidth, or padding") - } - b.output = output - b.minwidth = minwidth - b.tabwidth = tabwidth - b.padding = padding - for i := range b.padbytes { - b.padbytes[i] = padchar - } - if padchar == '\t' { - // tab padding enforces left-alignment - flags &^= AlignRight - } - b.flags = flags - - b.reset() - - return b -} - -// debugging support (keep code around) -func (b *Writer) dump() { - pos := 0 - for i, line := range b.lines { - print("(", i, ") ") - for _, c := range line { - print("[", string(b.buf[pos:pos+c.size]), "]") - pos += c.size - } - print("\n") - } - print("\n") -} - -// local error wrapper so we can distinguish errors we want to return -// as errors from genuine panics (which we don't want to return as errors) -type osError struct { - err error -} - -func (b *Writer) write0(buf []byte) { - n, err := b.output.Write(buf) - if n != len(buf) && err == nil { - err = io.ErrShortWrite - } - if err != nil { - panic(osError{err}) - } -} - -func (b *Writer) writeN(src []byte, n int) { - for n > len(src) { - b.write0(src) - n -= len(src) - } - b.write0(src[0:n]) -} - -var ( - newline = []byte{'\n'} - tabs = []byte("\t\t\t\t\t\t\t\t") -) - -func (b *Writer) writePadding(textw, cellw int, useTabs bool) { - if b.padbytes[0] == '\t' || useTabs { - // padding is done with tabs - if b.tabwidth == 0 { - return // tabs have no width - can't do any padding - } - // make cellw the smallest multiple of b.tabwidth - cellw = (cellw + b.tabwidth - 1) / b.tabwidth * b.tabwidth - n := cellw - textw // amount of padding - if n < 0 { - panic("internal error") - } - b.writeN(tabs, (n+b.tabwidth-1)/b.tabwidth) - return - } - - // padding is done with non-tab characters - b.writeN(b.padbytes[0:], cellw-textw) -} - -var vbar = []byte{'|'} - -func (b *Writer) writeLines(pos0 int, line0, line1 int) (pos int) { - pos = pos0 - for i := line0; i < line1; i++ { - line := b.lines[i] - - // if TabIndent is set, use tabs to pad leading empty cells - useTabs := b.flags&TabIndent != 0 - - for j, c := range line { - if j > 0 && b.flags&Debug != 0 { - // indicate column break - b.write0(vbar) - } - - if c.size == 0 { - // empty cell - if j < len(b.widths) { - b.writePadding(c.width, b.widths[j], useTabs) - } - } else { - // non-empty cell - useTabs = false - if b.flags&AlignRight == 0 { // align left - b.write0(b.buf[pos : pos+c.size]) - pos += c.size - if j < len(b.widths) { - b.writePadding(c.width, b.widths[j], false) - } - } else { // align right - if j < len(b.widths) { - b.writePadding(c.width, b.widths[j], false) - } - b.write0(b.buf[pos : pos+c.size]) - pos += c.size - } - } - } - - if i+1 == len(b.lines) { - // last buffered line - we don't have a newline, so just write - // any outstanding buffered data - b.write0(b.buf[pos : pos+b.cell.size]) - pos += b.cell.size - } else { - // not the last line - write newline - b.write0(newline) - } - } - return -} - -// Format the text between line0 and line1 (excluding line1); pos -// is the buffer position corresponding to the beginning of line0. -// Returns the buffer position corresponding to the beginning of -// line1 and an error, if any. -// -func (b *Writer) format(pos0 int, line0, line1 int) (pos int) { - pos = pos0 - column := len(b.widths) - for this := line0; this < line1; this++ { - line := b.lines[this] - - if column >= len(line)-1 { - continue - } - // cell exists in this column => this line - // has more cells than the previous line - // (the last cell per line is ignored because cells are - // tab-terminated; the last cell per line describes the - // text before the newline/formfeed and does not belong - // to a column) - - // print unprinted lines until beginning of block - pos = b.writeLines(pos, line0, this) - line0 = this - - // column block begin - width := b.minwidth // minimal column width - discardable := true // true if all cells in this column are empty and "soft" - for ; this < line1; this++ { - line = b.lines[this] - if column >= len(line)-1 { - break - } - // cell exists in this column - c := line[column] - // update width - if w := c.width + b.padding; w > width { - width = w - } - // update discardable - if c.width > 0 || c.htab { - discardable = false - } - } - // column block end - - // discard empty columns if necessary - if discardable && b.flags&DiscardEmptyColumns != 0 { - width = 0 - } - - if b.flags&RememberWidths != 0 { - if len(b.maxwidths) < len(b.widths) { - b.maxwidths = append(b.maxwidths, b.widths[len(b.maxwidths):]...) - } - - switch { - case len(b.maxwidths) == len(b.widths): - b.maxwidths = append(b.maxwidths, width) - case b.maxwidths[len(b.widths)] > width: - width = b.maxwidths[len(b.widths)] - case b.maxwidths[len(b.widths)] < width: - b.maxwidths[len(b.widths)] = width - } - } - - // format and print all columns to the right of this column - // (we know the widths of this column and all columns to the left) - b.widths = append(b.widths, width) // push width - pos = b.format(pos, line0, this) - b.widths = b.widths[0 : len(b.widths)-1] // pop width - line0 = this - } - - // print unprinted lines until end - return b.writeLines(pos, line0, line1) -} - -// Append text to current cell. -func (b *Writer) append(text []byte) { - b.buf = append(b.buf, text...) - b.cell.size += len(text) -} - -// Update the cell width. -func (b *Writer) updateWidth() { - b.cell.width += utf8.RuneCount(b.buf[b.pos:]) - b.pos = len(b.buf) -} - -// To escape a text segment, bracket it with Escape characters. -// For instance, the tab in this string "Ignore this tab: \xff\t\xff" -// does not terminate a cell and constitutes a single character of -// width one for formatting purposes. -// -// The value 0xff was chosen because it cannot appear in a valid UTF-8 sequence. -// -const Escape = '\xff' - -// Start escaped mode. -func (b *Writer) startEscape(ch byte) { - switch ch { - case Escape: - b.endChar = Escape - case '<': - b.endChar = '>' - case '&': - b.endChar = ';' - } -} - -// Terminate escaped mode. If the escaped text was an HTML tag, its width -// is assumed to be zero for formatting purposes; if it was an HTML entity, -// its width is assumed to be one. In all other cases, the width is the -// unicode width of the text. -// -func (b *Writer) endEscape() { - switch b.endChar { - case Escape: - b.updateWidth() - if b.flags&StripEscape == 0 { - b.cell.width -= 2 // don't count the Escape chars - } - case '>': // tag of zero width - case ';': - b.cell.width++ // entity, count as one rune - } - b.pos = len(b.buf) - b.endChar = 0 -} - -// Terminate the current cell by adding it to the list of cells of the -// current line. Returns the number of cells in that line. -// -func (b *Writer) terminateCell(htab bool) int { - b.cell.htab = htab - line := &b.lines[len(b.lines)-1] - *line = append(*line, b.cell) - b.cell = cell{} - return len(*line) -} - -func handlePanic(err *error, op string) { - if e := recover(); e != nil { - if nerr, ok := e.(osError); ok { - *err = nerr.err - return - } - panic("tabwriter: panic during " + op) - } -} - -// RememberedWidths returns a copy of the remembered per-column maximum widths. -// Requires use of the RememberWidths flag, and is not threadsafe. -func (b *Writer) RememberedWidths() []int { - retval := make([]int, len(b.maxwidths)) - copy(retval, b.maxwidths) - return retval -} - -// SetRememberedWidths sets the remembered per-column maximum widths. -// Requires use of the RememberWidths flag, and is not threadsafe. -func (b *Writer) SetRememberedWidths(widths []int) *Writer { - b.maxwidths = make([]int, len(widths)) - copy(b.maxwidths, widths) - return b -} - -// Flush should be called after the last call to Write to ensure -// that any data buffered in the Writer is written to output. Any -// incomplete escape sequence at the end is considered -// complete for formatting purposes. -func (b *Writer) Flush() error { - return b.flush() -} - -func (b *Writer) flush() (err error) { - defer b.reset() // even in the presence of errors - defer handlePanic(&err, "Flush") - - // add current cell if not empty - if b.cell.size > 0 { - if b.endChar != 0 { - // inside escape - terminate it even if incomplete - b.endEscape() - } - b.terminateCell(false) - } - - // format contents of buffer - b.format(0, 0, len(b.lines)) - return nil -} - -var hbar = []byte("---\n") - -// Write writes buf to the writer b. -// The only errors returned are ones encountered -// while writing to the underlying output stream. -// -func (b *Writer) Write(buf []byte) (n int, err error) { - defer handlePanic(&err, "Write") - - // split text into cells - n = 0 - for i, ch := range buf { - if b.endChar == 0 { - // outside escape - switch ch { - case '\t', '\v', '\n', '\f': - // end of cell - b.append(buf[n:i]) - b.updateWidth() - n = i + 1 // ch consumed - ncells := b.terminateCell(ch == '\t') - if ch == '\n' || ch == '\f' { - // terminate line - b.addLine(ch == '\f') - if ch == '\f' || ncells == 1 { - // A '\f' always forces a flush. Otherwise, if the previous - // line has only one cell which does not have an impact on - // the formatting of the following lines (the last cell per - // line is ignored by format()), thus we can flush the - // Writer contents. - if err = b.Flush(); err != nil { - return - } - if ch == '\f' && b.flags&Debug != 0 { - // indicate section break - b.write0(hbar) - } - } - } - - case Escape: - // start of escaped sequence - b.append(buf[n:i]) - b.updateWidth() - n = i - if b.flags&StripEscape != 0 { - n++ // strip Escape - } - b.startEscape(Escape) - - case '<', '&': - // possibly an html tag/entity - if b.flags&FilterHTML != 0 { - // begin of tag/entity - b.append(buf[n:i]) - b.updateWidth() - n = i - b.startEscape(ch) - } - } - - } else { - // inside escape - if ch == b.endChar { - // end of tag/entity - j := i + 1 - if ch == Escape && b.flags&StripEscape != 0 { - j = i // strip Escape - } - b.append(buf[n:j]) - n = i + 1 // ch consumed - b.endEscape() - } - } - } - - // append leftover text - b.append(buf[n:]) - n = len(buf) - return -} - -// NewWriter allocates and initializes a new tabwriter.Writer. -// The parameters are the same as for the Init function. -// -func NewWriter(output io.Writer, minwidth, tabwidth, padding int, padchar byte, flags uint) *Writer { - return new(Writer).Init(output, minwidth, tabwidth, padding, padchar, flags) -} diff --git a/src/vendor/github.com/mitchellh/copystructure/LICENSE b/src/vendor/github.com/mitchellh/copystructure/LICENSE deleted file mode 100644 index 229851590..000000000 --- a/src/vendor/github.com/mitchellh/copystructure/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2014 Mitchell Hashimoto - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. diff --git a/src/vendor/github.com/mitchellh/copystructure/README.md b/src/vendor/github.com/mitchellh/copystructure/README.md deleted file mode 100644 index f0fbd2e5c..000000000 --- a/src/vendor/github.com/mitchellh/copystructure/README.md +++ /dev/null @@ -1,21 +0,0 @@ -# copystructure - -copystructure is a Go library for deep copying values in Go. - -This allows you to copy Go values that may contain reference values -such as maps, slices, or pointers, and copy their data as well instead -of just their references. - -## Installation - -Standard `go get`: - -``` -$ go get github.com/mitchellh/copystructure -``` - -## Usage & Example - -For usage and examples see the [Godoc](http://godoc.org/github.com/mitchellh/copystructure). - -The `Copy` function has examples associated with it there. diff --git a/src/vendor/github.com/mitchellh/copystructure/copier_time.go b/src/vendor/github.com/mitchellh/copystructure/copier_time.go deleted file mode 100644 index db6a6aa1a..000000000 --- a/src/vendor/github.com/mitchellh/copystructure/copier_time.go +++ /dev/null @@ -1,15 +0,0 @@ -package copystructure - -import ( - "reflect" - "time" -) - -func init() { - Copiers[reflect.TypeOf(time.Time{})] = timeCopier -} - -func timeCopier(v interface{}) (interface{}, error) { - // Just... copy it. - return v.(time.Time), nil -} diff --git a/src/vendor/github.com/mitchellh/copystructure/copystructure.go b/src/vendor/github.com/mitchellh/copystructure/copystructure.go deleted file mode 100644 index 8089e6670..000000000 --- a/src/vendor/github.com/mitchellh/copystructure/copystructure.go +++ /dev/null @@ -1,631 +0,0 @@ -package copystructure - -import ( - "errors" - "reflect" - "sync" - - "github.com/mitchellh/reflectwalk" -) - -const tagKey = "copy" - -// Copy returns a deep copy of v. -// -// Copy is unable to copy unexported fields in a struct (lowercase field names). -// Unexported fields can't be reflected by the Go runtime and therefore -// copystructure can't perform any data copies. -// -// For structs, copy behavior can be controlled with struct tags. For example: -// -// struct { -// Name string -// Data *bytes.Buffer `copy:"shallow"` -// } -// -// The available tag values are: -// -// * "ignore" - The field will be ignored, effectively resulting in it being -// assigned the zero value in the copy. -// -// * "shallow" - The field will be be shallow copied. This means that references -// values such as pointers, maps, slices, etc. will be directly assigned -// versus deep copied. -// -func Copy(v interface{}) (interface{}, error) { - return Config{}.Copy(v) -} - -// CopierFunc is a function that knows how to deep copy a specific type. -// Register these globally with the Copiers variable. -type CopierFunc func(interface{}) (interface{}, error) - -// Copiers is a map of types that behave specially when they are copied. -// If a type is found in this map while deep copying, this function -// will be called to copy it instead of attempting to copy all fields. -// -// The key should be the type, obtained using: reflect.TypeOf(value with type). -// -// It is unsafe to write to this map after Copies have started. If you -// are writing to this map while also copying, wrap all modifications to -// this map as well as to Copy in a mutex. -var Copiers map[reflect.Type]CopierFunc = make(map[reflect.Type]CopierFunc) - -// ShallowCopiers is a map of pointer types that behave specially -// when they are copied. If a type is found in this map while deep -// copying, the pointer value will be shallow copied and not walked -// into. -// -// The key should be the type, obtained using: reflect.TypeOf(value -// with type). -// -// It is unsafe to write to this map after Copies have started. If you -// are writing to this map while also copying, wrap all modifications to -// this map as well as to Copy in a mutex. -var ShallowCopiers map[reflect.Type]struct{} = make(map[reflect.Type]struct{}) - -// Must is a helper that wraps a call to a function returning -// (interface{}, error) and panics if the error is non-nil. It is intended -// for use in variable initializations and should only be used when a copy -// error should be a crashing case. -func Must(v interface{}, err error) interface{} { - if err != nil { - panic("copy error: " + err.Error()) - } - - return v -} - -var errPointerRequired = errors.New("Copy argument must be a pointer when Lock is true") - -type Config struct { - // Lock any types that are a sync.Locker and are not a mutex while copying. - // If there is an RLocker method, use that to get the sync.Locker. - Lock bool - - // Copiers is a map of types associated with a CopierFunc. Use the global - // Copiers map if this is nil. - Copiers map[reflect.Type]CopierFunc - - // ShallowCopiers is a map of pointer types that when they are - // shallow copied no matter where they are encountered. Use the - // global ShallowCopiers if this is nil. - ShallowCopiers map[reflect.Type]struct{} -} - -func (c Config) Copy(v interface{}) (interface{}, error) { - if c.Lock && reflect.ValueOf(v).Kind() != reflect.Ptr { - return nil, errPointerRequired - } - - w := new(walker) - if c.Lock { - w.useLocks = true - } - - if c.Copiers == nil { - c.Copiers = Copiers - } - w.copiers = c.Copiers - - if c.ShallowCopiers == nil { - c.ShallowCopiers = ShallowCopiers - } - w.shallowCopiers = c.ShallowCopiers - - err := reflectwalk.Walk(v, w) - if err != nil { - return nil, err - } - - // Get the result. If the result is nil, then we want to turn it - // into a typed nil if we can. - result := w.Result - if result == nil { - val := reflect.ValueOf(v) - result = reflect.Indirect(reflect.New(val.Type())).Interface() - } - - return result, nil -} - -// Return the key used to index interfaces types we've seen. Store the number -// of pointers in the upper 32bits, and the depth in the lower 32bits. This is -// easy to calculate, easy to match a key with our current depth, and we don't -// need to deal with initializing and cleaning up nested maps or slices. -func ifaceKey(pointers, depth int) uint64 { - return uint64(pointers)<<32 | uint64(depth) -} - -type walker struct { - Result interface{} - - copiers map[reflect.Type]CopierFunc - shallowCopiers map[reflect.Type]struct{} - depth int - ignoreDepth int - vals []reflect.Value - cs []reflect.Value - - // This stores the number of pointers we've walked over, indexed by depth. - ps []int - - // If an interface is indirected by a pointer, we need to know the type of - // interface to create when creating the new value. Store the interface - // types here, indexed by both the walk depth and the number of pointers - // already seen at that depth. Use ifaceKey to calculate the proper uint64 - // value. - ifaceTypes map[uint64]reflect.Type - - // any locks we've taken, indexed by depth - locks []sync.Locker - // take locks while walking the structure - useLocks bool -} - -func (w *walker) Enter(l reflectwalk.Location) error { - w.depth++ - - // ensure we have enough elements to index via w.depth - for w.depth >= len(w.locks) { - w.locks = append(w.locks, nil) - } - - for len(w.ps) < w.depth+1 { - w.ps = append(w.ps, 0) - } - - return nil -} - -func (w *walker) Exit(l reflectwalk.Location) error { - locker := w.locks[w.depth] - w.locks[w.depth] = nil - if locker != nil { - defer locker.Unlock() - } - - // clear out pointers and interfaces as we exit the stack - w.ps[w.depth] = 0 - - for k := range w.ifaceTypes { - mask := uint64(^uint32(0)) - if k&mask == uint64(w.depth) { - delete(w.ifaceTypes, k) - } - } - - w.depth-- - if w.ignoreDepth > w.depth { - w.ignoreDepth = 0 - } - - if w.ignoring() { - return nil - } - - switch l { - case reflectwalk.Array: - fallthrough - case reflectwalk.Map: - fallthrough - case reflectwalk.Slice: - w.replacePointerMaybe() - - // Pop map off our container - w.cs = w.cs[:len(w.cs)-1] - case reflectwalk.MapValue: - // Pop off the key and value - mv := w.valPop() - mk := w.valPop() - m := w.cs[len(w.cs)-1] - - // If mv is the zero value, SetMapIndex deletes the key form the map, - // or in this case never adds it. We need to create a properly typed - // zero value so that this key can be set. - if !mv.IsValid() { - mv = reflect.Zero(m.Elem().Type().Elem()) - } - m.Elem().SetMapIndex(mk, mv) - case reflectwalk.ArrayElem: - // Pop off the value and the index and set it on the array - v := w.valPop() - i := w.valPop().Interface().(int) - if v.IsValid() { - a := w.cs[len(w.cs)-1] - ae := a.Elem().Index(i) // storing array as pointer on stack - so need Elem() call - if ae.CanSet() { - ae.Set(v) - } - } - case reflectwalk.SliceElem: - // Pop off the value and the index and set it on the slice - v := w.valPop() - i := w.valPop().Interface().(int) - if v.IsValid() { - s := w.cs[len(w.cs)-1] - se := s.Elem().Index(i) - if se.CanSet() { - se.Set(v) - } - } - case reflectwalk.Struct: - w.replacePointerMaybe() - - // Remove the struct from the container stack - w.cs = w.cs[:len(w.cs)-1] - case reflectwalk.StructField: - // Pop off the value and the field - v := w.valPop() - f := w.valPop().Interface().(reflect.StructField) - if v.IsValid() { - s := w.cs[len(w.cs)-1] - sf := reflect.Indirect(s).FieldByName(f.Name) - - if sf.CanSet() { - sf.Set(v) - } - } - case reflectwalk.WalkLoc: - // Clear out the slices for GC - w.cs = nil - w.vals = nil - } - - return nil -} - -func (w *walker) Map(m reflect.Value) error { - if w.ignoring() { - return nil - } - w.lock(m) - - // Create the map. If the map itself is nil, then just make a nil map - var newMap reflect.Value - if m.IsNil() { - newMap = reflect.New(m.Type()) - } else { - newMap = wrapPtr(reflect.MakeMap(m.Type())) - } - - w.cs = append(w.cs, newMap) - w.valPush(newMap) - return nil -} - -func (w *walker) MapElem(m, k, v reflect.Value) error { - return nil -} - -func (w *walker) PointerEnter(v bool) error { - if v { - w.ps[w.depth]++ - } - return nil -} - -func (w *walker) PointerExit(v bool) error { - if v { - w.ps[w.depth]-- - } - return nil -} - -func (w *walker) Pointer(v reflect.Value) error { - if _, ok := w.shallowCopiers[v.Type()]; ok { - // Shallow copy this value. Use the same logic as primitive, then - // return skip. - if err := w.Primitive(v); err != nil { - return err - } - - return reflectwalk.SkipEntry - } - - return nil -} - -func (w *walker) Interface(v reflect.Value) error { - if !v.IsValid() { - return nil - } - if w.ifaceTypes == nil { - w.ifaceTypes = make(map[uint64]reflect.Type) - } - - w.ifaceTypes[ifaceKey(w.ps[w.depth], w.depth)] = v.Type() - return nil -} - -func (w *walker) Primitive(v reflect.Value) error { - if w.ignoring() { - return nil - } - w.lock(v) - - // IsValid verifies the v is non-zero and CanInterface verifies - // that we're allowed to read this value (unexported fields). - var newV reflect.Value - if v.IsValid() && v.CanInterface() { - newV = reflect.New(v.Type()) - newV.Elem().Set(v) - } - - w.valPush(newV) - w.replacePointerMaybe() - return nil -} - -func (w *walker) Slice(s reflect.Value) error { - if w.ignoring() { - return nil - } - w.lock(s) - - var newS reflect.Value - if s.IsNil() { - newS = reflect.New(s.Type()) - } else { - newS = wrapPtr(reflect.MakeSlice(s.Type(), s.Len(), s.Cap())) - } - - w.cs = append(w.cs, newS) - w.valPush(newS) - return nil -} - -func (w *walker) SliceElem(i int, elem reflect.Value) error { - if w.ignoring() { - return nil - } - - // We don't write the slice here because elem might still be - // arbitrarily complex. Just record the index and continue on. - w.valPush(reflect.ValueOf(i)) - - return nil -} - -func (w *walker) Array(a reflect.Value) error { - if w.ignoring() { - return nil - } - w.lock(a) - - newA := reflect.New(a.Type()) - - w.cs = append(w.cs, newA) - w.valPush(newA) - return nil -} - -func (w *walker) ArrayElem(i int, elem reflect.Value) error { - if w.ignoring() { - return nil - } - - // We don't write the array here because elem might still be - // arbitrarily complex. Just record the index and continue on. - w.valPush(reflect.ValueOf(i)) - - return nil -} - -func (w *walker) Struct(s reflect.Value) error { - if w.ignoring() { - return nil - } - w.lock(s) - - var v reflect.Value - if c, ok := w.copiers[s.Type()]; ok { - // We have a Copier for this struct, so we use that copier to - // get the copy, and we ignore anything deeper than this. - w.ignoreDepth = w.depth - - dup, err := c(s.Interface()) - if err != nil { - return err - } - - // We need to put a pointer to the value on the value stack, - // so allocate a new pointer and set it. - v = reflect.New(s.Type()) - reflect.Indirect(v).Set(reflect.ValueOf(dup)) - } else { - // No copier, we copy ourselves and allow reflectwalk to guide - // us deeper into the structure for copying. - v = reflect.New(s.Type()) - } - - // Push the value onto the value stack for setting the struct field, - // and add the struct itself to the containers stack in case we walk - // deeper so that its own fields can be modified. - w.valPush(v) - w.cs = append(w.cs, v) - - return nil -} - -func (w *walker) StructField(f reflect.StructField, v reflect.Value) error { - if w.ignoring() { - return nil - } - - // If PkgPath is non-empty, this is a private (unexported) field. - // We do not set this unexported since the Go runtime doesn't allow us. - if f.PkgPath != "" { - return reflectwalk.SkipEntry - } - - switch f.Tag.Get(tagKey) { - case "shallow": - // If we're shallow copying then assign the value directly to the - // struct and skip the entry. - if v.IsValid() { - s := w.cs[len(w.cs)-1] - sf := reflect.Indirect(s).FieldByName(f.Name) - if sf.CanSet() { - sf.Set(v) - } - } - - return reflectwalk.SkipEntry - - case "ignore": - // Do nothing - return reflectwalk.SkipEntry - } - - // Push the field onto the stack, we'll handle it when we exit - // the struct field in Exit... - w.valPush(reflect.ValueOf(f)) - - return nil -} - -// ignore causes the walker to ignore any more values until we exit this on -func (w *walker) ignore() { - w.ignoreDepth = w.depth -} - -func (w *walker) ignoring() bool { - return w.ignoreDepth > 0 && w.depth >= w.ignoreDepth -} - -func (w *walker) pointerPeek() bool { - return w.ps[w.depth] > 0 -} - -func (w *walker) valPop() reflect.Value { - result := w.vals[len(w.vals)-1] - w.vals = w.vals[:len(w.vals)-1] - - // If we're out of values, that means we popped everything off. In - // this case, we reset the result so the next pushed value becomes - // the result. - if len(w.vals) == 0 { - w.Result = nil - } - - return result -} - -func (w *walker) valPush(v reflect.Value) { - w.vals = append(w.vals, v) - - // If we haven't set the result yet, then this is the result since - // it is the first (outermost) value we're seeing. - if w.Result == nil && v.IsValid() { - w.Result = v.Interface() - } -} - -func (w *walker) replacePointerMaybe() { - // Determine the last pointer value. If it is NOT a pointer, then - // we need to push that onto the stack. - if !w.pointerPeek() { - w.valPush(reflect.Indirect(w.valPop())) - return - } - - v := w.valPop() - - // If the expected type is a pointer to an interface of any depth, - // such as *interface{}, **interface{}, etc., then we need to convert - // the value "v" from *CONCRETE to *interface{} so types match for - // Set. - // - // Example if v is type *Foo where Foo is a struct, v would become - // *interface{} instead. This only happens if we have an interface expectation - // at this depth. - // - // For more info, see GH-16 - if iType, ok := w.ifaceTypes[ifaceKey(w.ps[w.depth], w.depth)]; ok && iType.Kind() == reflect.Interface { - y := reflect.New(iType) // Create *interface{} - y.Elem().Set(reflect.Indirect(v)) // Assign "Foo" to interface{} (dereferenced) - v = y // v is now typed *interface{} (where *v = Foo) - } - - for i := 1; i < w.ps[w.depth]; i++ { - if iType, ok := w.ifaceTypes[ifaceKey(w.ps[w.depth]-i, w.depth)]; ok { - iface := reflect.New(iType).Elem() - iface.Set(v) - v = iface - } - - p := reflect.New(v.Type()) - p.Elem().Set(v) - v = p - } - - w.valPush(v) -} - -// if this value is a Locker, lock it and add it to the locks slice -func (w *walker) lock(v reflect.Value) { - if !w.useLocks { - return - } - - if !v.IsValid() || !v.CanInterface() { - return - } - - type rlocker interface { - RLocker() sync.Locker - } - - var locker sync.Locker - - // We can't call Interface() on a value directly, since that requires - // a copy. This is OK, since the pointer to a value which is a sync.Locker - // is also a sync.Locker. - if v.Kind() == reflect.Ptr { - switch l := v.Interface().(type) { - case rlocker: - // don't lock a mutex directly - if _, ok := l.(*sync.RWMutex); !ok { - locker = l.RLocker() - } - case sync.Locker: - locker = l - } - } else if v.CanAddr() { - switch l := v.Addr().Interface().(type) { - case rlocker: - // don't lock a mutex directly - if _, ok := l.(*sync.RWMutex); !ok { - locker = l.RLocker() - } - case sync.Locker: - locker = l - } - } - - // still no callable locker - if locker == nil { - return - } - - // don't lock a mutex directly - switch locker.(type) { - case *sync.Mutex, *sync.RWMutex: - return - } - - locker.Lock() - w.locks[w.depth] = locker -} - -// wrapPtr is a helper that takes v and always make it *v. copystructure -// stores things internally as pointers until the last moment before unwrapping -func wrapPtr(v reflect.Value) reflect.Value { - if !v.IsValid() { - return v - } - vPtr := reflect.New(v.Type()) - vPtr.Elem().Set(v) - return vPtr -} diff --git a/src/vendor/github.com/mitchellh/reflectwalk/.travis.yml b/src/vendor/github.com/mitchellh/reflectwalk/.travis.yml deleted file mode 100644 index 4f2ee4d97..000000000 --- a/src/vendor/github.com/mitchellh/reflectwalk/.travis.yml +++ /dev/null @@ -1 +0,0 @@ -language: go diff --git a/src/vendor/github.com/mitchellh/reflectwalk/LICENSE b/src/vendor/github.com/mitchellh/reflectwalk/LICENSE deleted file mode 100644 index f9c841a51..000000000 --- a/src/vendor/github.com/mitchellh/reflectwalk/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2013 Mitchell Hashimoto - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. diff --git a/src/vendor/github.com/mitchellh/reflectwalk/README.md b/src/vendor/github.com/mitchellh/reflectwalk/README.md deleted file mode 100644 index ac82cd2e1..000000000 --- a/src/vendor/github.com/mitchellh/reflectwalk/README.md +++ /dev/null @@ -1,6 +0,0 @@ -# reflectwalk - -reflectwalk is a Go library for "walking" a value in Go using reflection, -in the same way a directory tree can be "walked" on the filesystem. Walking -a complex structure can allow you to do manipulations on unknown structures -such as those decoded from JSON. diff --git a/src/vendor/github.com/mitchellh/reflectwalk/location.go b/src/vendor/github.com/mitchellh/reflectwalk/location.go deleted file mode 100644 index 6a7f17611..000000000 --- a/src/vendor/github.com/mitchellh/reflectwalk/location.go +++ /dev/null @@ -1,19 +0,0 @@ -package reflectwalk - -//go:generate stringer -type=Location location.go - -type Location uint - -const ( - None Location = iota - Map - MapKey - MapValue - Slice - SliceElem - Array - ArrayElem - Struct - StructField - WalkLoc -) diff --git a/src/vendor/github.com/mitchellh/reflectwalk/location_string.go b/src/vendor/github.com/mitchellh/reflectwalk/location_string.go deleted file mode 100644 index 70760cf4c..000000000 --- a/src/vendor/github.com/mitchellh/reflectwalk/location_string.go +++ /dev/null @@ -1,16 +0,0 @@ -// Code generated by "stringer -type=Location location.go"; DO NOT EDIT. - -package reflectwalk - -import "fmt" - -const _Location_name = "NoneMapMapKeyMapValueSliceSliceElemArrayArrayElemStructStructFieldWalkLoc" - -var _Location_index = [...]uint8{0, 4, 7, 13, 21, 26, 35, 40, 49, 55, 66, 73} - -func (i Location) String() string { - if i >= Location(len(_Location_index)-1) { - return fmt.Sprintf("Location(%d)", i) - } - return _Location_name[_Location_index[i]:_Location_index[i+1]] -} diff --git a/src/vendor/github.com/mitchellh/reflectwalk/reflectwalk.go b/src/vendor/github.com/mitchellh/reflectwalk/reflectwalk.go deleted file mode 100644 index 7fee7b050..000000000 --- a/src/vendor/github.com/mitchellh/reflectwalk/reflectwalk.go +++ /dev/null @@ -1,420 +0,0 @@ -// reflectwalk is a package that allows you to "walk" complex structures -// similar to how you may "walk" a filesystem: visiting every element one -// by one and calling callback functions allowing you to handle and manipulate -// those elements. -package reflectwalk - -import ( - "errors" - "reflect" -) - -// PrimitiveWalker implementations are able to handle primitive values -// within complex structures. Primitive values are numbers, strings, -// booleans, funcs, chans. -// -// These primitive values are often members of more complex -// structures (slices, maps, etc.) that are walkable by other interfaces. -type PrimitiveWalker interface { - Primitive(reflect.Value) error -} - -// InterfaceWalker implementations are able to handle interface values as they -// are encountered during the walk. -type InterfaceWalker interface { - Interface(reflect.Value) error -} - -// MapWalker implementations are able to handle individual elements -// found within a map structure. -type MapWalker interface { - Map(m reflect.Value) error - MapElem(m, k, v reflect.Value) error -} - -// SliceWalker implementations are able to handle slice elements found -// within complex structures. -type SliceWalker interface { - Slice(reflect.Value) error - SliceElem(int, reflect.Value) error -} - -// ArrayWalker implementations are able to handle array elements found -// within complex structures. -type ArrayWalker interface { - Array(reflect.Value) error - ArrayElem(int, reflect.Value) error -} - -// StructWalker is an interface that has methods that are called for -// structs when a Walk is done. -type StructWalker interface { - Struct(reflect.Value) error - StructField(reflect.StructField, reflect.Value) error -} - -// EnterExitWalker implementations are notified before and after -// they walk deeper into complex structures (into struct fields, -// into slice elements, etc.) -type EnterExitWalker interface { - Enter(Location) error - Exit(Location) error -} - -// PointerWalker implementations are notified when the value they're -// walking is a pointer or not. Pointer is called for _every_ value whether -// it is a pointer or not. -type PointerWalker interface { - PointerEnter(bool) error - PointerExit(bool) error -} - -// PointerValueWalker implementations are notified with the value of -// a particular pointer when a pointer is walked. Pointer is called -// right before PointerEnter. -type PointerValueWalker interface { - Pointer(reflect.Value) error -} - -// SkipEntry can be returned from walk functions to skip walking -// the value of this field. This is only valid in the following functions: -// -// - Struct: skips all fields from being walked -// - StructField: skips walking the struct value -// -var SkipEntry = errors.New("skip this entry") - -// Walk takes an arbitrary value and an interface and traverses the -// value, calling callbacks on the interface if they are supported. -// The interface should implement one or more of the walker interfaces -// in this package, such as PrimitiveWalker, StructWalker, etc. -func Walk(data, walker interface{}) (err error) { - v := reflect.ValueOf(data) - ew, ok := walker.(EnterExitWalker) - if ok { - err = ew.Enter(WalkLoc) - } - - if err == nil { - err = walk(v, walker) - } - - if ok && err == nil { - err = ew.Exit(WalkLoc) - } - - return -} - -func walk(v reflect.Value, w interface{}) (err error) { - // Determine if we're receiving a pointer and if so notify the walker. - // The logic here is convoluted but very important (tests will fail if - // almost any part is changed). I will try to explain here. - // - // First, we check if the value is an interface, if so, we really need - // to check the interface's VALUE to see whether it is a pointer. - // - // Check whether the value is then a pointer. If so, then set pointer - // to true to notify the user. - // - // If we still have a pointer or an interface after the indirections, then - // we unwrap another level - // - // At this time, we also set "v" to be the dereferenced value. This is - // because once we've unwrapped the pointer we want to use that value. - pointer := false - pointerV := v - - for { - if pointerV.Kind() == reflect.Interface { - if iw, ok := w.(InterfaceWalker); ok { - if err = iw.Interface(pointerV); err != nil { - return - } - } - - pointerV = pointerV.Elem() - } - - if pointerV.Kind() == reflect.Ptr { - if pw, ok := w.(PointerValueWalker); ok { - if err = pw.Pointer(pointerV); err != nil { - if err == SkipEntry { - // Skip the rest of this entry but clear the error - return nil - } - - return - } - } - - pointer = true - v = reflect.Indirect(pointerV) - } - if pw, ok := w.(PointerWalker); ok { - if err = pw.PointerEnter(pointer); err != nil { - return - } - - defer func(pointer bool) { - if err != nil { - return - } - - err = pw.PointerExit(pointer) - }(pointer) - } - - if pointer { - pointerV = v - } - pointer = false - - // If we still have a pointer or interface we have to indirect another level. - switch pointerV.Kind() { - case reflect.Ptr, reflect.Interface: - continue - } - break - } - - // We preserve the original value here because if it is an interface - // type, we want to pass that directly into the walkPrimitive, so that - // we can set it. - originalV := v - if v.Kind() == reflect.Interface { - v = v.Elem() - } - - k := v.Kind() - if k >= reflect.Int && k <= reflect.Complex128 { - k = reflect.Int - } - - switch k { - // Primitives - case reflect.Bool, reflect.Chan, reflect.Func, reflect.Int, reflect.String, reflect.Invalid: - err = walkPrimitive(originalV, w) - return - case reflect.Map: - err = walkMap(v, w) - return - case reflect.Slice: - err = walkSlice(v, w) - return - case reflect.Struct: - err = walkStruct(v, w) - return - case reflect.Array: - err = walkArray(v, w) - return - default: - panic("unsupported type: " + k.String()) - } -} - -func walkMap(v reflect.Value, w interface{}) error { - ew, ewok := w.(EnterExitWalker) - if ewok { - ew.Enter(Map) - } - - if mw, ok := w.(MapWalker); ok { - if err := mw.Map(v); err != nil { - return err - } - } - - for _, k := range v.MapKeys() { - kv := v.MapIndex(k) - - if mw, ok := w.(MapWalker); ok { - if err := mw.MapElem(v, k, kv); err != nil { - return err - } - } - - ew, ok := w.(EnterExitWalker) - if ok { - ew.Enter(MapKey) - } - - if err := walk(k, w); err != nil { - return err - } - - if ok { - ew.Exit(MapKey) - ew.Enter(MapValue) - } - - // get the map value again as it may have changed in the MapElem call - if err := walk(v.MapIndex(k), w); err != nil { - return err - } - - if ok { - ew.Exit(MapValue) - } - } - - if ewok { - ew.Exit(Map) - } - - return nil -} - -func walkPrimitive(v reflect.Value, w interface{}) error { - if pw, ok := w.(PrimitiveWalker); ok { - return pw.Primitive(v) - } - - return nil -} - -func walkSlice(v reflect.Value, w interface{}) (err error) { - ew, ok := w.(EnterExitWalker) - if ok { - ew.Enter(Slice) - } - - if sw, ok := w.(SliceWalker); ok { - if err := sw.Slice(v); err != nil { - return err - } - } - - for i := 0; i < v.Len(); i++ { - elem := v.Index(i) - - if sw, ok := w.(SliceWalker); ok { - if err := sw.SliceElem(i, elem); err != nil { - return err - } - } - - ew, ok := w.(EnterExitWalker) - if ok { - ew.Enter(SliceElem) - } - - if err := walk(elem, w); err != nil { - return err - } - - if ok { - ew.Exit(SliceElem) - } - } - - ew, ok = w.(EnterExitWalker) - if ok { - ew.Exit(Slice) - } - - return nil -} - -func walkArray(v reflect.Value, w interface{}) (err error) { - ew, ok := w.(EnterExitWalker) - if ok { - ew.Enter(Array) - } - - if aw, ok := w.(ArrayWalker); ok { - if err := aw.Array(v); err != nil { - return err - } - } - - for i := 0; i < v.Len(); i++ { - elem := v.Index(i) - - if aw, ok := w.(ArrayWalker); ok { - if err := aw.ArrayElem(i, elem); err != nil { - return err - } - } - - ew, ok := w.(EnterExitWalker) - if ok { - ew.Enter(ArrayElem) - } - - if err := walk(elem, w); err != nil { - return err - } - - if ok { - ew.Exit(ArrayElem) - } - } - - ew, ok = w.(EnterExitWalker) - if ok { - ew.Exit(Array) - } - - return nil -} - -func walkStruct(v reflect.Value, w interface{}) (err error) { - ew, ewok := w.(EnterExitWalker) - if ewok { - ew.Enter(Struct) - } - - skip := false - if sw, ok := w.(StructWalker); ok { - err = sw.Struct(v) - if err == SkipEntry { - skip = true - err = nil - } - if err != nil { - return - } - } - - if !skip { - vt := v.Type() - for i := 0; i < vt.NumField(); i++ { - sf := vt.Field(i) - f := v.FieldByIndex([]int{i}) - - if sw, ok := w.(StructWalker); ok { - err = sw.StructField(sf, f) - - // SkipEntry just pretends this field doesn't even exist - if err == SkipEntry { - continue - } - - if err != nil { - return - } - } - - ew, ok := w.(EnterExitWalker) - if ok { - ew.Enter(StructField) - } - - err = walk(f, w) - if err != nil { - return - } - - if ok { - ew.Exit(StructField) - } - } - } - - if ewok { - ew.Exit(Struct) - } - - return nil -} diff --git a/src/vendor/github.com/moby/locker/LICENSE b/src/vendor/github.com/moby/locker/LICENSE deleted file mode 100644 index 2e0ec1dcf..000000000 --- a/src/vendor/github.com/moby/locker/LICENSE +++ /dev/null @@ -1,190 +0,0 @@ - Apache License - Version 2.0, January 2004 - https://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - Copyright 2013-2018 Docker, Inc. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - https://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/src/vendor/github.com/moby/locker/README.md b/src/vendor/github.com/moby/locker/README.md deleted file mode 100644 index a0852f0f8..000000000 --- a/src/vendor/github.com/moby/locker/README.md +++ /dev/null @@ -1,65 +0,0 @@ -Locker -===== - -locker provides a mechanism for creating finer-grained locking to help -free up more global locks to handle other tasks. - -The implementation looks close to a sync.Mutex, however, the user must provide a -reference to use to refer to the underlying lock when locking and unlocking, -and unlock may generate an error. - -If a lock with a given name does not exist when `Lock` is called, one is -created. -Lock references are automatically cleaned up on `Unlock` if nothing else is -waiting for the lock. - - -## Usage - -```go -package important - -import ( - "sync" - "time" - - "github.com/moby/locker" -) - -type important struct { - locks *locker.Locker - data map[string]interface{} - mu sync.Mutex -} - -func (i *important) Get(name string) interface{} { - i.locks.Lock(name) - defer i.locks.Unlock(name) - return i.data[name] -} - -func (i *important) Create(name string, data interface{}) { - i.locks.Lock(name) - defer i.locks.Unlock(name) - - i.createImportant(data) - - i.mu.Lock() - i.data[name] = data - i.mu.Unlock() -} - -func (i *important) createImportant(data interface{}) { - time.Sleep(10 * time.Second) -} -``` - -For functions dealing with a given name, always lock at the beginning of the -function (or before doing anything with the underlying state), this ensures any -other function that is dealing with the same name will block. - -When needing to modify the underlying data, use the global lock to ensure nothing -else is modifying it at the same time. -Since name lock is already in place, no reads will occur while the modification -is being performed. - diff --git a/src/vendor/github.com/moby/locker/locker.go b/src/vendor/github.com/moby/locker/locker.go deleted file mode 100644 index 0b22ddfab..000000000 --- a/src/vendor/github.com/moby/locker/locker.go +++ /dev/null @@ -1,112 +0,0 @@ -/* -Package locker provides a mechanism for creating finer-grained locking to help -free up more global locks to handle other tasks. - -The implementation looks close to a sync.Mutex, however the user must provide a -reference to use to refer to the underlying lock when locking and unlocking, -and unlock may generate an error. - -If a lock with a given name does not exist when `Lock` is called, one is -created. -Lock references are automatically cleaned up on `Unlock` if nothing else is -waiting for the lock. -*/ -package locker - -import ( - "errors" - "sync" - "sync/atomic" -) - -// ErrNoSuchLock is returned when the requested lock does not exist -var ErrNoSuchLock = errors.New("no such lock") - -// Locker provides a locking mechanism based on the passed in reference name -type Locker struct { - mu sync.Mutex - locks map[string]*lockCtr -} - -// lockCtr is used by Locker to represent a lock with a given name. -type lockCtr struct { - mu sync.Mutex - // waiters is the number of waiters waiting to acquire the lock - // this is int32 instead of uint32 so we can add `-1` in `dec()` - waiters int32 -} - -// inc increments the number of waiters waiting for the lock -func (l *lockCtr) inc() { - atomic.AddInt32(&l.waiters, 1) -} - -// dec decrements the number of waiters waiting on the lock -func (l *lockCtr) dec() { - atomic.AddInt32(&l.waiters, -1) -} - -// count gets the current number of waiters -func (l *lockCtr) count() int32 { - return atomic.LoadInt32(&l.waiters) -} - -// Lock locks the mutex -func (l *lockCtr) Lock() { - l.mu.Lock() -} - -// Unlock unlocks the mutex -func (l *lockCtr) Unlock() { - l.mu.Unlock() -} - -// New creates a new Locker -func New() *Locker { - return &Locker{ - locks: make(map[string]*lockCtr), - } -} - -// Lock locks a mutex with the given name. If it doesn't exist, one is created -func (l *Locker) Lock(name string) { - l.mu.Lock() - if l.locks == nil { - l.locks = make(map[string]*lockCtr) - } - - nameLock, exists := l.locks[name] - if !exists { - nameLock = &lockCtr{} - l.locks[name] = nameLock - } - - // increment the nameLock waiters while inside the main mutex - // this makes sure that the lock isn't deleted if `Lock` and `Unlock` are called concurrently - nameLock.inc() - l.mu.Unlock() - - // Lock the nameLock outside the main mutex so we don't block other operations - // once locked then we can decrement the number of waiters for this lock - nameLock.Lock() - nameLock.dec() -} - -// Unlock unlocks the mutex with the given name -// If the given lock is not being waited on by any other callers, it is deleted -func (l *Locker) Unlock(name string) error { - l.mu.Lock() - nameLock, exists := l.locks[name] - if !exists { - l.mu.Unlock() - return ErrNoSuchLock - } - - if nameLock.count() == 0 { - delete(l.locks, name) - } - nameLock.Unlock() - - l.mu.Unlock() - return nil -} diff --git a/src/vendor/github.com/moby/term/.gitignore b/src/vendor/github.com/moby/term/.gitignore deleted file mode 100644 index b0747ff01..000000000 --- a/src/vendor/github.com/moby/term/.gitignore +++ /dev/null @@ -1,8 +0,0 @@ -# if you want to ignore files created by your editor/tools, consider using a -# global .gitignore or .git/info/exclude see https://help.github.com/articles/ignoring-files -.* -!.github -!.gitignore -profile.out -# support running go modules in vendor mode for local development -vendor/ diff --git a/src/vendor/github.com/moby/term/LICENSE b/src/vendor/github.com/moby/term/LICENSE deleted file mode 100644 index 6d8d58fb6..000000000 --- a/src/vendor/github.com/moby/term/LICENSE +++ /dev/null @@ -1,191 +0,0 @@ - - Apache License - Version 2.0, January 2004 - https://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - Copyright 2013-2018 Docker, Inc. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - https://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/src/vendor/github.com/moby/term/README.md b/src/vendor/github.com/moby/term/README.md deleted file mode 100644 index 0ce92cc33..000000000 --- a/src/vendor/github.com/moby/term/README.md +++ /dev/null @@ -1,36 +0,0 @@ -# term - utilities for dealing with terminals - -![Test](https://github.com/moby/term/workflows/Test/badge.svg) [![GoDoc](https://godoc.org/github.com/moby/term?status.svg)](https://godoc.org/github.com/moby/term) [![Go Report Card](https://goreportcard.com/badge/github.com/moby/term)](https://goreportcard.com/report/github.com/moby/term) - -term provides structures and helper functions to work with terminal (state, sizes). - -#### Using term - -```go -package main - -import ( - "log" - "os" - - "github.com/moby/term" -) - -func main() { - fd := os.Stdin.Fd() - if term.IsTerminal(fd) { - ws, err := term.GetWinsize(fd) - if err != nil { - log.Fatalf("term.GetWinsize: %s", err) - } - log.Printf("%d:%d\n", ws.Height, ws.Width) - } -} -``` - -## Contributing - -Want to hack on term? [Docker's contributions guidelines](https://github.com/docker/docker/blob/master/CONTRIBUTING.md) apply. - -## Copyright and license -Code and documentation copyright 2015 Docker, inc. Code released under the Apache 2.0 license. Docs released under Creative commons. diff --git a/src/vendor/github.com/moby/term/ascii.go b/src/vendor/github.com/moby/term/ascii.go deleted file mode 100644 index 55873c055..000000000 --- a/src/vendor/github.com/moby/term/ascii.go +++ /dev/null @@ -1,66 +0,0 @@ -package term - -import ( - "fmt" - "strings" -) - -// ASCII list the possible supported ASCII key sequence -var ASCII = []string{ - "ctrl-@", - "ctrl-a", - "ctrl-b", - "ctrl-c", - "ctrl-d", - "ctrl-e", - "ctrl-f", - "ctrl-g", - "ctrl-h", - "ctrl-i", - "ctrl-j", - "ctrl-k", - "ctrl-l", - "ctrl-m", - "ctrl-n", - "ctrl-o", - "ctrl-p", - "ctrl-q", - "ctrl-r", - "ctrl-s", - "ctrl-t", - "ctrl-u", - "ctrl-v", - "ctrl-w", - "ctrl-x", - "ctrl-y", - "ctrl-z", - "ctrl-[", - "ctrl-\\", - "ctrl-]", - "ctrl-^", - "ctrl-_", -} - -// ToBytes converts a string representing a suite of key-sequence to the corresponding ASCII code. -func ToBytes(keys string) ([]byte, error) { - codes := []byte{} -next: - for _, key := range strings.Split(keys, ",") { - if len(key) != 1 { - for code, ctrl := range ASCII { - if ctrl == key { - codes = append(codes, byte(code)) - continue next - } - } - if key == "DEL" { - codes = append(codes, 127) - } else { - return nil, fmt.Errorf("Unknown character: '%s'", key) - } - } else { - codes = append(codes, key[0]) - } - } - return codes, nil -} diff --git a/src/vendor/github.com/moby/term/proxy.go b/src/vendor/github.com/moby/term/proxy.go deleted file mode 100644 index c47756b89..000000000 --- a/src/vendor/github.com/moby/term/proxy.go +++ /dev/null @@ -1,88 +0,0 @@ -package term - -import ( - "io" -) - -// EscapeError is special error which returned by a TTY proxy reader's Read() -// method in case its detach escape sequence is read. -type EscapeError struct{} - -func (EscapeError) Error() string { - return "read escape sequence" -} - -// escapeProxy is used only for attaches with a TTY. It is used to proxy -// stdin keypresses from the underlying reader and look for the passed in -// escape key sequence to signal a detach. -type escapeProxy struct { - escapeKeys []byte - escapeKeyPos int - r io.Reader - buf []byte -} - -// NewEscapeProxy returns a new TTY proxy reader which wraps the given reader -// and detects when the specified escape keys are read, in which case the Read -// method will return an error of type EscapeError. -func NewEscapeProxy(r io.Reader, escapeKeys []byte) io.Reader { - return &escapeProxy{ - escapeKeys: escapeKeys, - r: r, - } -} - -func (r *escapeProxy) Read(buf []byte) (n int, err error) { - if len(r.escapeKeys) > 0 && r.escapeKeyPos == len(r.escapeKeys) { - return 0, EscapeError{} - } - - if len(r.buf) > 0 { - n = copy(buf, r.buf) - r.buf = r.buf[n:] - } - - nr, err := r.r.Read(buf[n:]) - n += nr - if len(r.escapeKeys) == 0 { - return n, err - } - - for i := 0; i < n; i++ { - if buf[i] == r.escapeKeys[r.escapeKeyPos] { - r.escapeKeyPos++ - - // Check if the full escape sequence is matched. - if r.escapeKeyPos == len(r.escapeKeys) { - n = i + 1 - r.escapeKeyPos - if n < 0 { - n = 0 - } - return n, EscapeError{} - } - continue - } - - // If we need to prepend a partial escape sequence from the previous - // read, make sure the new buffer size doesn't exceed len(buf). - // Otherwise, preserve any extra data in a buffer for the next read. - if i < r.escapeKeyPos { - preserve := make([]byte, 0, r.escapeKeyPos+n) - preserve = append(preserve, r.escapeKeys[:r.escapeKeyPos]...) - preserve = append(preserve, buf[:n]...) - n = copy(buf, preserve) - i += r.escapeKeyPos - r.buf = append(r.buf, preserve[n:]...) - } - r.escapeKeyPos = 0 - } - - // If we're in the middle of reading an escape sequence, make sure we don't - // let the caller read it. If later on we find that this is not the escape - // sequence, we'll prepend it back to buf. - n -= r.escapeKeyPos - if n < 0 { - n = 0 - } - return n, err -} diff --git a/src/vendor/github.com/moby/term/tc.go b/src/vendor/github.com/moby/term/tc.go deleted file mode 100644 index 65556027a..000000000 --- a/src/vendor/github.com/moby/term/tc.go +++ /dev/null @@ -1,19 +0,0 @@ -// +build !windows - -package term - -import ( - "golang.org/x/sys/unix" -) - -func tcget(fd uintptr) (*Termios, error) { - p, err := unix.IoctlGetTermios(int(fd), getTermios) - if err != nil { - return nil, err - } - return p, nil -} - -func tcset(fd uintptr, p *Termios) error { - return unix.IoctlSetTermios(int(fd), setTermios, p) -} diff --git a/src/vendor/github.com/moby/term/term.go b/src/vendor/github.com/moby/term/term.go deleted file mode 100644 index 29c6acf1c..000000000 --- a/src/vendor/github.com/moby/term/term.go +++ /dev/null @@ -1,120 +0,0 @@ -// +build !windows - -// Package term provides structures and helper functions to work with -// terminal (state, sizes). -package term - -import ( - "errors" - "fmt" - "io" - "os" - "os/signal" - - "golang.org/x/sys/unix" -) - -var ( - // ErrInvalidState is returned if the state of the terminal is invalid. - ErrInvalidState = errors.New("Invalid terminal state") -) - -// State represents the state of the terminal. -type State struct { - termios Termios -} - -// Winsize represents the size of the terminal window. -type Winsize struct { - Height uint16 - Width uint16 - x uint16 - y uint16 -} - -// StdStreams returns the standard streams (stdin, stdout, stderr). -func StdStreams() (stdIn io.ReadCloser, stdOut, stdErr io.Writer) { - return os.Stdin, os.Stdout, os.Stderr -} - -// GetFdInfo returns the file descriptor for an os.File and indicates whether the file represents a terminal. -func GetFdInfo(in interface{}) (uintptr, bool) { - var inFd uintptr - var isTerminalIn bool - if file, ok := in.(*os.File); ok { - inFd = file.Fd() - isTerminalIn = IsTerminal(inFd) - } - return inFd, isTerminalIn -} - -// IsTerminal returns true if the given file descriptor is a terminal. -func IsTerminal(fd uintptr) bool { - _, err := tcget(fd) - return err == nil -} - -// RestoreTerminal restores the terminal connected to the given file descriptor -// to a previous state. -func RestoreTerminal(fd uintptr, state *State) error { - if state == nil { - return ErrInvalidState - } - return tcset(fd, &state.termios) -} - -// SaveState saves the state of the terminal connected to the given file descriptor. -func SaveState(fd uintptr) (*State, error) { - termios, err := tcget(fd) - if err != nil { - return nil, err - } - return &State{termios: *termios}, nil -} - -// DisableEcho applies the specified state to the terminal connected to the file -// descriptor, with echo disabled. -func DisableEcho(fd uintptr, state *State) error { - newState := state.termios - newState.Lflag &^= unix.ECHO - - if err := tcset(fd, &newState); err != nil { - return err - } - handleInterrupt(fd, state) - return nil -} - -// SetRawTerminal puts the terminal connected to the given file descriptor into -// raw mode and returns the previous state. On UNIX, this puts both the input -// and output into raw mode. On Windows, it only puts the input into raw mode. -func SetRawTerminal(fd uintptr) (*State, error) { - oldState, err := MakeRaw(fd) - if err != nil { - return nil, err - } - handleInterrupt(fd, oldState) - return oldState, err -} - -// SetRawTerminalOutput puts the output of terminal connected to the given file -// descriptor into raw mode. On UNIX, this does nothing and returns nil for the -// state. On Windows, it disables LF -> CRLF translation. -func SetRawTerminalOutput(fd uintptr) (*State, error) { - return nil, nil -} - -func handleInterrupt(fd uintptr, state *State) { - sigchan := make(chan os.Signal, 1) - signal.Notify(sigchan, os.Interrupt) - go func() { - for range sigchan { - // quit cleanly and the new terminal item is on a new line - fmt.Println() - signal.Stop(sigchan) - close(sigchan) - RestoreTerminal(fd, state) - os.Exit(1) - } - }() -} diff --git a/src/vendor/github.com/moby/term/term_windows.go b/src/vendor/github.com/moby/term/term_windows.go deleted file mode 100644 index ba82960d4..000000000 --- a/src/vendor/github.com/moby/term/term_windows.go +++ /dev/null @@ -1,231 +0,0 @@ -package term - -import ( - "io" - "os" - "os/signal" - - windowsconsole "github.com/moby/term/windows" - "golang.org/x/sys/windows" -) - -// State holds the console mode for the terminal. -type State struct { - mode uint32 -} - -// Winsize is used for window size. -type Winsize struct { - Height uint16 - Width uint16 -} - -// vtInputSupported is true if winterm.ENABLE_VIRTUAL_TERMINAL_INPUT is supported by the console -var vtInputSupported bool - -// StdStreams returns the standard streams (stdin, stdout, stderr). -func StdStreams() (stdIn io.ReadCloser, stdOut, stdErr io.Writer) { - // Turn on VT handling on all std handles, if possible. This might - // fail, in which case we will fall back to terminal emulation. - var ( - emulateStdin, emulateStdout, emulateStderr bool - - mode uint32 - ) - - fd := windows.Handle(os.Stdin.Fd()) - if err := windows.GetConsoleMode(fd, &mode); err == nil { - // Validate that winterm.ENABLE_VIRTUAL_TERMINAL_INPUT is supported, but do not set it. - if err = windows.SetConsoleMode(fd, mode|windows.ENABLE_VIRTUAL_TERMINAL_INPUT); err != nil { - emulateStdin = true - } else { - vtInputSupported = true - } - // Unconditionally set the console mode back even on failure because SetConsoleMode - // remembers invalid bits on input handles. - _ = windows.SetConsoleMode(fd, mode) - } - - fd = windows.Handle(os.Stdout.Fd()) - if err := windows.GetConsoleMode(fd, &mode); err == nil { - // Validate winterm.DISABLE_NEWLINE_AUTO_RETURN is supported, but do not set it. - if err = windows.SetConsoleMode(fd, mode|windows.ENABLE_VIRTUAL_TERMINAL_PROCESSING|windows.DISABLE_NEWLINE_AUTO_RETURN); err != nil { - emulateStdout = true - } else { - _ = windows.SetConsoleMode(fd, mode|windows.ENABLE_VIRTUAL_TERMINAL_PROCESSING) - } - } - - fd = windows.Handle(os.Stderr.Fd()) - if err := windows.GetConsoleMode(fd, &mode); err == nil { - // Validate winterm.DISABLE_NEWLINE_AUTO_RETURN is supported, but do not set it. - if err = windows.SetConsoleMode(fd, mode|windows.ENABLE_VIRTUAL_TERMINAL_PROCESSING|windows.DISABLE_NEWLINE_AUTO_RETURN); err != nil { - emulateStderr = true - } else { - _ = windows.SetConsoleMode(fd, mode|windows.ENABLE_VIRTUAL_TERMINAL_PROCESSING) - } - } - - // Temporarily use STD_INPUT_HANDLE, STD_OUTPUT_HANDLE and - // STD_ERROR_HANDLE from syscall rather than x/sys/windows as long as - // go-ansiterm hasn't switch to x/sys/windows. - // TODO: switch back to x/sys/windows once go-ansiterm has switched - if emulateStdin { - h := uint32(windows.STD_INPUT_HANDLE) - stdIn = windowsconsole.NewAnsiReader(int(h)) - } else { - stdIn = os.Stdin - } - - if emulateStdout { - h := uint32(windows.STD_OUTPUT_HANDLE) - stdOut = windowsconsole.NewAnsiWriter(int(h)) - } else { - stdOut = os.Stdout - } - - if emulateStderr { - h := uint32(windows.STD_ERROR_HANDLE) - stdErr = windowsconsole.NewAnsiWriter(int(h)) - } else { - stdErr = os.Stderr - } - - return -} - -// GetFdInfo returns the file descriptor for an os.File and indicates whether the file represents a terminal. -func GetFdInfo(in interface{}) (uintptr, bool) { - return windowsconsole.GetHandleInfo(in) -} - -// GetWinsize returns the window size based on the specified file descriptor. -func GetWinsize(fd uintptr) (*Winsize, error) { - var info windows.ConsoleScreenBufferInfo - if err := windows.GetConsoleScreenBufferInfo(windows.Handle(fd), &info); err != nil { - return nil, err - } - - winsize := &Winsize{ - Width: uint16(info.Window.Right - info.Window.Left + 1), - Height: uint16(info.Window.Bottom - info.Window.Top + 1), - } - - return winsize, nil -} - -// IsTerminal returns true if the given file descriptor is a terminal. -func IsTerminal(fd uintptr) bool { - var mode uint32 - err := windows.GetConsoleMode(windows.Handle(fd), &mode) - return err == nil -} - -// RestoreTerminal restores the terminal connected to the given file descriptor -// to a previous state. -func RestoreTerminal(fd uintptr, state *State) error { - return windows.SetConsoleMode(windows.Handle(fd), state.mode) -} - -// SaveState saves the state of the terminal connected to the given file descriptor. -func SaveState(fd uintptr) (*State, error) { - var mode uint32 - - if err := windows.GetConsoleMode(windows.Handle(fd), &mode); err != nil { - return nil, err - } - - return &State{mode: mode}, nil -} - -// DisableEcho disables echo for the terminal connected to the given file descriptor. -// -- See https://msdn.microsoft.com/en-us/library/windows/desktop/ms683462(v=vs.85).aspx -func DisableEcho(fd uintptr, state *State) error { - mode := state.mode - mode &^= windows.ENABLE_ECHO_INPUT - mode |= windows.ENABLE_PROCESSED_INPUT | windows.ENABLE_LINE_INPUT - err := windows.SetConsoleMode(windows.Handle(fd), mode) - if err != nil { - return err - } - - // Register an interrupt handler to catch and restore prior state - restoreAtInterrupt(fd, state) - return nil -} - -// SetRawTerminal puts the terminal connected to the given file descriptor into -// raw mode and returns the previous state. On UNIX, this puts both the input -// and output into raw mode. On Windows, it only puts the input into raw mode. -func SetRawTerminal(fd uintptr) (*State, error) { - state, err := MakeRaw(fd) - if err != nil { - return nil, err - } - - // Register an interrupt handler to catch and restore prior state - restoreAtInterrupt(fd, state) - return state, err -} - -// SetRawTerminalOutput puts the output of terminal connected to the given file -// descriptor into raw mode. On UNIX, this does nothing and returns nil for the -// state. On Windows, it disables LF -> CRLF translation. -func SetRawTerminalOutput(fd uintptr) (*State, error) { - state, err := SaveState(fd) - if err != nil { - return nil, err - } - - // Ignore failures, since winterm.DISABLE_NEWLINE_AUTO_RETURN might not be supported on this - // version of Windows. - _ = windows.SetConsoleMode(windows.Handle(fd), state.mode|windows.DISABLE_NEWLINE_AUTO_RETURN) - return state, err -} - -// MakeRaw puts the terminal (Windows Console) connected to the given file descriptor into raw -// mode and returns the previous state of the terminal so that it can be restored. -func MakeRaw(fd uintptr) (*State, error) { - state, err := SaveState(fd) - if err != nil { - return nil, err - } - - mode := state.mode - - // See - // -- https://msdn.microsoft.com/en-us/library/windows/desktop/ms686033(v=vs.85).aspx - // -- https://msdn.microsoft.com/en-us/library/windows/desktop/ms683462(v=vs.85).aspx - - // Disable these modes - mode &^= windows.ENABLE_ECHO_INPUT - mode &^= windows.ENABLE_LINE_INPUT - mode &^= windows.ENABLE_MOUSE_INPUT - mode &^= windows.ENABLE_WINDOW_INPUT - mode &^= windows.ENABLE_PROCESSED_INPUT - - // Enable these modes - mode |= windows.ENABLE_EXTENDED_FLAGS - mode |= windows.ENABLE_INSERT_MODE - mode |= windows.ENABLE_QUICK_EDIT_MODE - if vtInputSupported { - mode |= windows.ENABLE_VIRTUAL_TERMINAL_INPUT - } - - err = windows.SetConsoleMode(windows.Handle(fd), mode) - if err != nil { - return nil, err - } - return state, nil -} - -func restoreAtInterrupt(fd uintptr, state *State) { - sigchan := make(chan os.Signal, 1) - signal.Notify(sigchan, os.Interrupt) - - go func() { - _ = <-sigchan - _ = RestoreTerminal(fd, state) - os.Exit(0) - }() -} diff --git a/src/vendor/github.com/moby/term/termios.go b/src/vendor/github.com/moby/term/termios.go deleted file mode 100644 index 0f028e227..000000000 --- a/src/vendor/github.com/moby/term/termios.go +++ /dev/null @@ -1,35 +0,0 @@ -// +build !windows - -package term - -import ( - "golang.org/x/sys/unix" -) - -// Termios is the Unix API for terminal I/O. -type Termios = unix.Termios - -// MakeRaw puts the terminal connected to the given file descriptor into raw -// mode and returns the previous state of the terminal so that it can be -// restored. -func MakeRaw(fd uintptr) (*State, error) { - termios, err := tcget(fd) - if err != nil { - return nil, err - } - - oldState := State{termios: *termios} - - termios.Iflag &^= (unix.IGNBRK | unix.BRKINT | unix.PARMRK | unix.ISTRIP | unix.INLCR | unix.IGNCR | unix.ICRNL | unix.IXON) - termios.Oflag &^= unix.OPOST - termios.Lflag &^= (unix.ECHO | unix.ECHONL | unix.ICANON | unix.ISIG | unix.IEXTEN) - termios.Cflag &^= (unix.CSIZE | unix.PARENB) - termios.Cflag |= unix.CS8 - termios.Cc[unix.VMIN] = 1 - termios.Cc[unix.VTIME] = 0 - - if err := tcset(fd, termios); err != nil { - return nil, err - } - return &oldState, nil -} diff --git a/src/vendor/github.com/moby/term/termios_bsd.go b/src/vendor/github.com/moby/term/termios_bsd.go deleted file mode 100644 index 922dd4baa..000000000 --- a/src/vendor/github.com/moby/term/termios_bsd.go +++ /dev/null @@ -1,12 +0,0 @@ -// +build darwin freebsd openbsd netbsd - -package term - -import ( - "golang.org/x/sys/unix" -) - -const ( - getTermios = unix.TIOCGETA - setTermios = unix.TIOCSETA -) diff --git a/src/vendor/github.com/moby/term/termios_nonbsd.go b/src/vendor/github.com/moby/term/termios_nonbsd.go deleted file mode 100644 index 038fd61ba..000000000 --- a/src/vendor/github.com/moby/term/termios_nonbsd.go +++ /dev/null @@ -1,12 +0,0 @@ -//+build !darwin,!freebsd,!netbsd,!openbsd,!windows - -package term - -import ( - "golang.org/x/sys/unix" -) - -const ( - getTermios = unix.TCGETS - setTermios = unix.TCSETS -) diff --git a/src/vendor/github.com/moby/term/windows/ansi_reader.go b/src/vendor/github.com/moby/term/windows/ansi_reader.go deleted file mode 100644 index 155251521..000000000 --- a/src/vendor/github.com/moby/term/windows/ansi_reader.go +++ /dev/null @@ -1,252 +0,0 @@ -// +build windows - -package windowsconsole - -import ( - "bytes" - "errors" - "fmt" - "io" - "os" - "strings" - "unsafe" - - ansiterm "github.com/Azure/go-ansiterm" - "github.com/Azure/go-ansiterm/winterm" -) - -const ( - escapeSequence = ansiterm.KEY_ESC_CSI -) - -// ansiReader wraps a standard input file (e.g., os.Stdin) providing ANSI sequence translation. -type ansiReader struct { - file *os.File - fd uintptr - buffer []byte - cbBuffer int - command []byte -} - -// NewAnsiReader returns an io.ReadCloser that provides VT100 terminal emulation on top of a -// Windows console input handle. -func NewAnsiReader(nFile int) io.ReadCloser { - file, fd := winterm.GetStdFile(nFile) - return &ansiReader{ - file: file, - fd: fd, - command: make([]byte, 0, ansiterm.ANSI_MAX_CMD_LENGTH), - buffer: make([]byte, 0), - } -} - -// Close closes the wrapped file. -func (ar *ansiReader) Close() (err error) { - return ar.file.Close() -} - -// Fd returns the file descriptor of the wrapped file. -func (ar *ansiReader) Fd() uintptr { - return ar.fd -} - -// Read reads up to len(p) bytes of translated input events into p. -func (ar *ansiReader) Read(p []byte) (int, error) { - if len(p) == 0 { - return 0, nil - } - - // Previously read bytes exist, read as much as we can and return - if len(ar.buffer) > 0 { - originalLength := len(ar.buffer) - copiedLength := copy(p, ar.buffer) - - if copiedLength == originalLength { - ar.buffer = make([]byte, 0, len(p)) - } else { - ar.buffer = ar.buffer[copiedLength:] - } - - return copiedLength, nil - } - - // Read and translate key events - events, err := readInputEvents(ar, len(p)) - if err != nil { - return 0, err - } else if len(events) == 0 { - return 0, nil - } - - keyBytes := translateKeyEvents(events, []byte(escapeSequence)) - - // Save excess bytes and right-size keyBytes - if len(keyBytes) > len(p) { - ar.buffer = keyBytes[len(p):] - keyBytes = keyBytes[:len(p)] - } else if len(keyBytes) == 0 { - return 0, nil - } - - copiedLength := copy(p, keyBytes) - if copiedLength != len(keyBytes) { - return 0, errors.New("unexpected copy length encountered") - } - - return copiedLength, nil -} - -// readInputEvents polls until at least one event is available. -func readInputEvents(ar *ansiReader, maxBytes int) ([]winterm.INPUT_RECORD, error) { - // Determine the maximum number of records to retrieve - // -- Cast around the type system to obtain the size of a single INPUT_RECORD. - // unsafe.Sizeof requires an expression vs. a type-reference; the casting - // tricks the type system into believing it has such an expression. - recordSize := int(unsafe.Sizeof(*((*winterm.INPUT_RECORD)(unsafe.Pointer(&maxBytes))))) - countRecords := maxBytes / recordSize - if countRecords > ansiterm.MAX_INPUT_EVENTS { - countRecords = ansiterm.MAX_INPUT_EVENTS - } else if countRecords == 0 { - countRecords = 1 - } - - // Wait for and read input events - events := make([]winterm.INPUT_RECORD, countRecords) - nEvents := uint32(0) - eventsExist, err := winterm.WaitForSingleObject(ar.fd, winterm.WAIT_INFINITE) - if err != nil { - return nil, err - } - - if eventsExist { - err = winterm.ReadConsoleInput(ar.fd, events, &nEvents) - if err != nil { - return nil, err - } - } - - // Return a slice restricted to the number of returned records - return events[:nEvents], nil -} - -// KeyEvent Translation Helpers - -var arrowKeyMapPrefix = map[uint16]string{ - winterm.VK_UP: "%s%sA", - winterm.VK_DOWN: "%s%sB", - winterm.VK_RIGHT: "%s%sC", - winterm.VK_LEFT: "%s%sD", -} - -var keyMapPrefix = map[uint16]string{ - winterm.VK_UP: "\x1B[%sA", - winterm.VK_DOWN: "\x1B[%sB", - winterm.VK_RIGHT: "\x1B[%sC", - winterm.VK_LEFT: "\x1B[%sD", - winterm.VK_HOME: "\x1B[1%s~", // showkey shows ^[[1 - winterm.VK_END: "\x1B[4%s~", // showkey shows ^[[4 - winterm.VK_INSERT: "\x1B[2%s~", - winterm.VK_DELETE: "\x1B[3%s~", - winterm.VK_PRIOR: "\x1B[5%s~", - winterm.VK_NEXT: "\x1B[6%s~", - winterm.VK_F1: "", - winterm.VK_F2: "", - winterm.VK_F3: "\x1B[13%s~", - winterm.VK_F4: "\x1B[14%s~", - winterm.VK_F5: "\x1B[15%s~", - winterm.VK_F6: "\x1B[17%s~", - winterm.VK_F7: "\x1B[18%s~", - winterm.VK_F8: "\x1B[19%s~", - winterm.VK_F9: "\x1B[20%s~", - winterm.VK_F10: "\x1B[21%s~", - winterm.VK_F11: "\x1B[23%s~", - winterm.VK_F12: "\x1B[24%s~", -} - -// translateKeyEvents converts the input events into the appropriate ANSI string. -func translateKeyEvents(events []winterm.INPUT_RECORD, escapeSequence []byte) []byte { - var buffer bytes.Buffer - for _, event := range events { - if event.EventType == winterm.KEY_EVENT && event.KeyEvent.KeyDown != 0 { - buffer.WriteString(keyToString(&event.KeyEvent, escapeSequence)) - } - } - - return buffer.Bytes() -} - -// keyToString maps the given input event record to the corresponding string. -func keyToString(keyEvent *winterm.KEY_EVENT_RECORD, escapeSequence []byte) string { - if keyEvent.UnicodeChar == 0 { - return formatVirtualKey(keyEvent.VirtualKeyCode, keyEvent.ControlKeyState, escapeSequence) - } - - _, alt, control := getControlKeys(keyEvent.ControlKeyState) - if control { - // TODO(azlinux): Implement following control sequences - // -D Signals the end of input from the keyboard; also exits current shell. - // -H Deletes the first character to the left of the cursor. Also called the ERASE key. - // -Q Restarts printing after it has been stopped with -s. - // -S Suspends printing on the screen (does not stop the program). - // -U Deletes all characters on the current line. Also called the KILL key. - // -E Quits current command and creates a core - - } - - // +Key generates ESC N Key - if !control && alt { - return ansiterm.KEY_ESC_N + strings.ToLower(string(keyEvent.UnicodeChar)) - } - - return string(keyEvent.UnicodeChar) -} - -// formatVirtualKey converts a virtual key (e.g., up arrow) into the appropriate ANSI string. -func formatVirtualKey(key uint16, controlState uint32, escapeSequence []byte) string { - shift, alt, control := getControlKeys(controlState) - modifier := getControlKeysModifier(shift, alt, control) - - if format, ok := arrowKeyMapPrefix[key]; ok { - return fmt.Sprintf(format, escapeSequence, modifier) - } - - if format, ok := keyMapPrefix[key]; ok { - return fmt.Sprintf(format, modifier) - } - - return "" -} - -// getControlKeys extracts the shift, alt, and ctrl key states. -func getControlKeys(controlState uint32) (shift, alt, control bool) { - shift = 0 != (controlState & winterm.SHIFT_PRESSED) - alt = 0 != (controlState & (winterm.LEFT_ALT_PRESSED | winterm.RIGHT_ALT_PRESSED)) - control = 0 != (controlState & (winterm.LEFT_CTRL_PRESSED | winterm.RIGHT_CTRL_PRESSED)) - return shift, alt, control -} - -// getControlKeysModifier returns the ANSI modifier for the given combination of control keys. -func getControlKeysModifier(shift, alt, control bool) string { - if shift && alt && control { - return ansiterm.KEY_CONTROL_PARAM_8 - } - if alt && control { - return ansiterm.KEY_CONTROL_PARAM_7 - } - if shift && control { - return ansiterm.KEY_CONTROL_PARAM_6 - } - if control { - return ansiterm.KEY_CONTROL_PARAM_5 - } - if shift && alt { - return ansiterm.KEY_CONTROL_PARAM_4 - } - if alt { - return ansiterm.KEY_CONTROL_PARAM_3 - } - if shift { - return ansiterm.KEY_CONTROL_PARAM_2 - } - return "" -} diff --git a/src/vendor/github.com/moby/term/windows/ansi_writer.go b/src/vendor/github.com/moby/term/windows/ansi_writer.go deleted file mode 100644 index ccb5ef077..000000000 --- a/src/vendor/github.com/moby/term/windows/ansi_writer.go +++ /dev/null @@ -1,56 +0,0 @@ -// +build windows - -package windowsconsole - -import ( - "io" - "os" - - ansiterm "github.com/Azure/go-ansiterm" - "github.com/Azure/go-ansiterm/winterm" -) - -// ansiWriter wraps a standard output file (e.g., os.Stdout) providing ANSI sequence translation. -type ansiWriter struct { - file *os.File - fd uintptr - infoReset *winterm.CONSOLE_SCREEN_BUFFER_INFO - command []byte - escapeSequence []byte - inAnsiSequence bool - parser *ansiterm.AnsiParser -} - -// NewAnsiWriter returns an io.Writer that provides VT100 terminal emulation on top of a -// Windows console output handle. -func NewAnsiWriter(nFile int) io.Writer { - file, fd := winterm.GetStdFile(nFile) - info, err := winterm.GetConsoleScreenBufferInfo(fd) - if err != nil { - return nil - } - - parser := ansiterm.CreateParser("Ground", winterm.CreateWinEventHandler(fd, file)) - - return &ansiWriter{ - file: file, - fd: fd, - infoReset: info, - command: make([]byte, 0, ansiterm.ANSI_MAX_CMD_LENGTH), - escapeSequence: []byte(ansiterm.KEY_ESC_CSI), - parser: parser, - } -} - -func (aw *ansiWriter) Fd() uintptr { - return aw.fd -} - -// Write writes len(p) bytes from p to the underlying data stream. -func (aw *ansiWriter) Write(p []byte) (total int, err error) { - if len(p) == 0 { - return 0, nil - } - - return aw.parser.Parse(p) -} diff --git a/src/vendor/github.com/moby/term/windows/console.go b/src/vendor/github.com/moby/term/windows/console.go deleted file mode 100644 index 993694ddc..000000000 --- a/src/vendor/github.com/moby/term/windows/console.go +++ /dev/null @@ -1,39 +0,0 @@ -// +build windows - -package windowsconsole - -import ( - "os" - - "golang.org/x/sys/windows" -) - -// GetHandleInfo returns file descriptor and bool indicating whether the file is a console. -func GetHandleInfo(in interface{}) (uintptr, bool) { - switch t := in.(type) { - case *ansiReader: - return t.Fd(), true - case *ansiWriter: - return t.Fd(), true - } - - var inFd uintptr - var isTerminal bool - - if file, ok := in.(*os.File); ok { - inFd = file.Fd() - isTerminal = isConsole(inFd) - } - return inFd, isTerminal -} - -// IsConsole returns true if the given file descriptor is a Windows Console. -// The code assumes that GetConsoleMode will return an error for file descriptors that are not a console. -// Deprecated: use golang.org/x/sys/windows.GetConsoleMode() or golang.org/x/term.IsTerminal() -var IsConsole = isConsole - -func isConsole(fd uintptr) bool { - var mode uint32 - err := windows.GetConsoleMode(windows.Handle(fd), &mode) - return err == nil -} diff --git a/src/vendor/github.com/moby/term/windows/doc.go b/src/vendor/github.com/moby/term/windows/doc.go deleted file mode 100644 index 54265fffa..000000000 --- a/src/vendor/github.com/moby/term/windows/doc.go +++ /dev/null @@ -1,5 +0,0 @@ -// These files implement ANSI-aware input and output streams for use by the Docker Windows client. -// When asked for the set of standard streams (e.g., stdin, stdout, stderr), the code will create -// and return pseudo-streams that convert ANSI sequences to / from Windows Console API calls. - -package windowsconsole diff --git a/src/vendor/github.com/moby/term/winsize.go b/src/vendor/github.com/moby/term/winsize.go deleted file mode 100644 index 1ef98d599..000000000 --- a/src/vendor/github.com/moby/term/winsize.go +++ /dev/null @@ -1,20 +0,0 @@ -// +build !windows - -package term - -import ( - "golang.org/x/sys/unix" -) - -// GetWinsize returns the window size based on the specified file descriptor. -func GetWinsize(fd uintptr) (*Winsize, error) { - uws, err := unix.IoctlGetWinsize(int(fd), unix.TIOCGWINSZ) - ws := &Winsize{Height: uws.Row, Width: uws.Col, x: uws.Xpixel, y: uws.Ypixel} - return ws, err -} - -// SetWinsize tries to set the specified window size for the specified file descriptor. -func SetWinsize(fd uintptr, ws *Winsize) error { - uws := &unix.Winsize{Row: ws.Height, Col: ws.Width, Xpixel: ws.x, Ypixel: ws.y} - return unix.IoctlSetWinsize(int(fd), unix.TIOCSWINSZ, uws) -} diff --git a/src/vendor/github.com/monochromegane/go-gitignore/.travis.yml b/src/vendor/github.com/monochromegane/go-gitignore/.travis.yml deleted file mode 100644 index b06a36a46..000000000 --- a/src/vendor/github.com/monochromegane/go-gitignore/.travis.yml +++ /dev/null @@ -1,6 +0,0 @@ -language: go -go: - - 1.14.x - - master -script: - - go test -v ./... diff --git a/src/vendor/github.com/monochromegane/go-gitignore/LICENSE b/src/vendor/github.com/monochromegane/go-gitignore/LICENSE deleted file mode 100644 index 91b84e927..000000000 --- a/src/vendor/github.com/monochromegane/go-gitignore/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -The MIT License (MIT) - -Copyright (c) [2015] [go-gitignore] - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/src/vendor/github.com/monochromegane/go-gitignore/README.md b/src/vendor/github.com/monochromegane/go-gitignore/README.md deleted file mode 100644 index 51a480747..000000000 --- a/src/vendor/github.com/monochromegane/go-gitignore/README.md +++ /dev/null @@ -1,95 +0,0 @@ -# go-gitignore [![Build Status](https://travis-ci.org/monochromegane/go-gitignore.svg)](https://travis-ci.org/monochromegane/go-gitignore) - -A fast gitignore matching library for Go. - -This library use simple tree index for matching, so keep fast if gitignore file has many pattern. - -## Usage - -```go -gitignore, _ := gitignore.NewGitIgnore("/path/to/gitignore") - -path := "/path/to/file" -isDir := false -gitignore.Match(path, isDir) -``` - -### Specify base directory - -go-gitignore treat `path` as a base directory. -If you want to specify other base (e.g. current directory and Global gitignore), you can like the following. - -```go -gitignore, _ := gitignore.NewGitIgnore("/home/you/.gitignore", ".") -``` - -### From io.Reader - -go-gitignore can initialize from io.Reader. - -```go -gitignore, _ := gitignore.NewGitIgnoreFromReader(base, reader) -``` - -## Simple tree index - -go-gitignore parse gitignore file, and generate a simple tree index for matching like the following. - -``` -. -├── accept -│   ├── absolute -│   │   └── depth -│   │   ├── initial -│   │   └── other -│   └── relative -│   └── depth -│   ├── initial -│   └── other -└── ignore - ├── absolute - │   └── depth - │   ├── initial - │   └── other - └── relative - └── depth - ├── initial - └── other -``` - -## Features - -- Support absolute path (/path/to/ignore) -- Support relative path (path/to/ignore) -- Support accept pattern (!path/to/accept) -- Support directory pattern (path/to/directory/) -- Support glob pattern (path/to/\*.txt) - -*note: glob pattern* - -go-gitignore use [filepath.Match](https://golang.org/pkg/path/filepath/#Match) for matching meta char pattern, so not support recursive pattern (path/`**`/file). - -## Installation - -```sh -$ go get github.com/monochromegane/go-gitignore -``` - -## Contribution - -1. Fork it -2. Create a feature branch -3. Commit your changes -4. Rebase your local changes against the master branch -5. Run test suite with the `go test ./...` command and confirm that it passes -6. Run `gofmt -s` -7. Create new Pull Request - -## License - -[MIT](https://github.com/monochromegane/go-gitignore/blob/master/LICENSE) - -## Author - -[monochromegane](https://github.com/monochromegane) - diff --git a/src/vendor/github.com/monochromegane/go-gitignore/depth_holder.go b/src/vendor/github.com/monochromegane/go-gitignore/depth_holder.go deleted file mode 100644 index 9805b325d..000000000 --- a/src/vendor/github.com/monochromegane/go-gitignore/depth_holder.go +++ /dev/null @@ -1,79 +0,0 @@ -package gitignore - -import "strings" - -const ( - asc = iota - desc -) - -type depthPatternHolder struct { - patterns depthPatterns - order int -} - -func newDepthPatternHolder(order int) depthPatternHolder { - return depthPatternHolder{ - patterns: depthPatterns{m: map[int]initialPatternHolder{}}, - order: order, - } -} - -func (h *depthPatternHolder) add(pattern string) { - count := strings.Count(strings.Trim(pattern, "/"), "/") - h.patterns.set(count+1, pattern) -} - -func (h depthPatternHolder) match(path string, isDir bool) bool { - if h.patterns.size() == 0 { - return false - } - - for depth := 1; ; depth++ { - var part string - var isLast, isDirCurrent bool - if h.order == asc { - part, isLast = cutN(path, depth) - if isLast { - isDirCurrent = isDir - } else { - isDirCurrent = false - } - } else { - part, isLast = cutLastN(path, depth) - isDirCurrent = isDir - } - if patterns, ok := h.patterns.get(depth); ok { - if patterns.match(part, isDirCurrent) { - return true - } - } - if isLast { - break - } - } - return false -} - -type depthPatterns struct { - m map[int]initialPatternHolder -} - -func (p *depthPatterns) set(depth int, pattern string) { - if ps, ok := p.m[depth]; ok { - ps.add(pattern) - } else { - holder := newInitialPatternHolder() - holder.add(pattern) - p.m[depth] = holder - } -} - -func (p depthPatterns) get(depth int) (initialPatternHolder, bool) { - patterns, ok := p.m[depth] - return patterns, ok -} - -func (p depthPatterns) size() int { - return len(p.m) -} diff --git a/src/vendor/github.com/monochromegane/go-gitignore/full_scan_patterns.go b/src/vendor/github.com/monochromegane/go-gitignore/full_scan_patterns.go deleted file mode 100644 index 8c04ef3a7..000000000 --- a/src/vendor/github.com/monochromegane/go-gitignore/full_scan_patterns.go +++ /dev/null @@ -1,31 +0,0 @@ -package gitignore - -import "strings" - -// Only benchmark use -type fullScanPatterns struct { - absolute patterns - relative patterns -} - -func newFullScanPatterns() *fullScanPatterns { - return &fullScanPatterns{ - absolute: patterns{}, - relative: patterns{}, - } -} - -func (ps *fullScanPatterns) add(pattern string) { - if strings.HasPrefix(pattern, "/") { - ps.absolute.add(newPattern(pattern)) - } else { - ps.relative.add(newPattern(pattern)) - } -} - -func (ps fullScanPatterns) match(path string, isDir bool) bool { - if ps.absolute.match(path, isDir) { - return true - } - return ps.relative.match(path, isDir) -} diff --git a/src/vendor/github.com/monochromegane/go-gitignore/gitignore.go b/src/vendor/github.com/monochromegane/go-gitignore/gitignore.go deleted file mode 100644 index 9c719a6ca..000000000 --- a/src/vendor/github.com/monochromegane/go-gitignore/gitignore.go +++ /dev/null @@ -1,80 +0,0 @@ -package gitignore - -import ( - "bufio" - "io" - "os" - "path/filepath" - "strings" -) - -type IgnoreMatcher interface { - Match(path string, isDir bool) bool -} - -type DummyIgnoreMatcher bool - -func (d DummyIgnoreMatcher) Match(path string, isDir bool) bool { - return bool(d) -} - -type gitIgnore struct { - ignorePatterns scanStrategy - acceptPatterns scanStrategy - path string -} - -func NewGitIgnore(gitignore string, base ...string) (IgnoreMatcher, error) { - var path string - if len(base) > 0 { - path = base[0] - } else { - path = filepath.Dir(gitignore) - } - - file, err := os.Open(gitignore) - if err != nil { - return nil, err - } - defer file.Close() - - return NewGitIgnoreFromReader(path, file), nil -} - -func NewGitIgnoreFromReader(path string, r io.Reader) IgnoreMatcher { - g := gitIgnore{ - ignorePatterns: newIndexScanPatterns(), - acceptPatterns: newIndexScanPatterns(), - path: path, - } - scanner := bufio.NewScanner(r) - for scanner.Scan() { - line := strings.Trim(scanner.Text(), " ") - if len(line) == 0 || strings.HasPrefix(line, "#") { - continue - } - if strings.HasPrefix(line, `\#`) { - line = strings.TrimPrefix(line, `\`) - } - - if strings.HasPrefix(line, "!") { - g.acceptPatterns.add(strings.TrimPrefix(line, "!")) - } else { - g.ignorePatterns.add(line) - } - } - return g -} - -func (g gitIgnore) Match(path string, isDir bool) bool { - relativePath, err := filepath.Rel(g.path, path) - if err != nil { - return false - } - relativePath = filepath.ToSlash(relativePath) - - if g.acceptPatterns.match(relativePath, isDir) { - return false - } - return g.ignorePatterns.match(relativePath, isDir) -} diff --git a/src/vendor/github.com/monochromegane/go-gitignore/index_scan_patterns.go b/src/vendor/github.com/monochromegane/go-gitignore/index_scan_patterns.go deleted file mode 100644 index 882280e95..000000000 --- a/src/vendor/github.com/monochromegane/go-gitignore/index_scan_patterns.go +++ /dev/null @@ -1,35 +0,0 @@ -package gitignore - -import "strings" - -type indexScanPatterns struct { - absolute depthPatternHolder - relative depthPatternHolder -} - -func newIndexScanPatterns() *indexScanPatterns { - return &indexScanPatterns{ - absolute: newDepthPatternHolder(asc), - relative: newDepthPatternHolder(desc), - } -} - -func (ps *indexScanPatterns) add(pattern string) { - if strings.HasPrefix(pattern, "/") { - ps.absolute.add(pattern) - } else { - ps.relative.add(pattern) - } -} - -func (ps indexScanPatterns) match(path string, isDir bool) bool { - if ps.absolute.match(path, isDir) { - return true - } - return ps.relative.match(path, isDir) -} - -type scanStrategy interface { - add(pattern string) - match(path string, isDir bool) bool -} diff --git a/src/vendor/github.com/monochromegane/go-gitignore/initial_holder.go b/src/vendor/github.com/monochromegane/go-gitignore/initial_holder.go deleted file mode 100644 index 86f0bfee2..000000000 --- a/src/vendor/github.com/monochromegane/go-gitignore/initial_holder.go +++ /dev/null @@ -1,62 +0,0 @@ -package gitignore - -import "strings" - -const initials = "0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ." - -type initialPatternHolder struct { - patterns initialPatterns - otherPatterns *patterns -} - -func newInitialPatternHolder() initialPatternHolder { - return initialPatternHolder{ - patterns: initialPatterns{m: map[byte]*patterns{}}, - otherPatterns: &patterns{}, - } -} - -func (h *initialPatternHolder) add(pattern string) { - trimedPattern := strings.TrimPrefix(pattern, "/") - if strings.IndexAny(trimedPattern[0:1], initials) != -1 { - h.patterns.set(trimedPattern[0], newPatternForEqualizedPath(pattern)) - } else { - h.otherPatterns.add(newPatternForEqualizedPath(pattern)) - } -} - -func (h initialPatternHolder) match(path string, isDir bool) bool { - if h.patterns.size() == 0 && h.otherPatterns.size() == 0 { - return false - } - if patterns, ok := h.patterns.get(path[0]); ok { - if patterns.match(path, isDir) { - return true - } - } - return h.otherPatterns.match(path, isDir) -} - -type initialPatterns struct { - m map[byte]*patterns -} - -func (p *initialPatterns) set(initial byte, pattern pattern) { - if ps, ok := p.m[initial]; ok { - ps.add(pattern) - } else { - patterns := &patterns{} - patterns.add(pattern) - p.m[initial] = patterns - - } -} - -func (p initialPatterns) get(initial byte) (*patterns, bool) { - patterns, ok := p.m[initial] - return patterns, ok -} - -func (p initialPatterns) size() int { - return len(p.m) -} diff --git a/src/vendor/github.com/monochromegane/go-gitignore/match.go b/src/vendor/github.com/monochromegane/go-gitignore/match.go deleted file mode 100644 index 4140a9bdc..000000000 --- a/src/vendor/github.com/monochromegane/go-gitignore/match.go +++ /dev/null @@ -1,24 +0,0 @@ -package gitignore - -import "path/filepath" - -type pathMatcher interface { - match(path string) bool -} - -type simpleMatcher struct { - path string -} - -func (m simpleMatcher) match(path string) bool { - return m.path == path -} - -type filepathMatcher struct { - path string -} - -func (m filepathMatcher) match(path string) bool { - match, _ := filepath.Match(m.path, path) - return match -} diff --git a/src/vendor/github.com/monochromegane/go-gitignore/pattern.go b/src/vendor/github.com/monochromegane/go-gitignore/pattern.go deleted file mode 100644 index 93adbf763..000000000 --- a/src/vendor/github.com/monochromegane/go-gitignore/pattern.go +++ /dev/null @@ -1,69 +0,0 @@ -package gitignore - -import ( - "path/filepath" - "strings" -) - -var Separator = string(filepath.Separator) - -type pattern struct { - hasRootPrefix bool - hasDirSuffix bool - pathDepth int - matcher pathMatcher - onlyEqualizedPath bool -} - -func newPattern(path string) pattern { - hasRootPrefix := path[0] == '/' - hasDirSuffix := path[len(path)-1] == '/' - - var pathDepth int - if !hasRootPrefix { - pathDepth = strings.Count(path, "/") - } - - var matcher pathMatcher - matchingPath := strings.Trim(path, "/") - if hasMeta(path) { - matcher = filepathMatcher{path: matchingPath} - } else { - matcher = simpleMatcher{path: matchingPath} - } - - return pattern{ - hasRootPrefix: hasRootPrefix, - hasDirSuffix: hasDirSuffix, - pathDepth: pathDepth, - matcher: matcher, - } -} - -func newPatternForEqualizedPath(path string) pattern { - pattern := newPattern(path) - pattern.onlyEqualizedPath = true - return pattern -} - -func (p pattern) match(path string, isDir bool) bool { - - if p.hasDirSuffix && !isDir { - return false - } - - var targetPath string - if p.hasRootPrefix || p.onlyEqualizedPath { - // absolute pattern or only equalized path mode - targetPath = path - } else { - // relative pattern - targetPath = p.equalizeDepth(path) - } - return p.matcher.match(targetPath) -} - -func (p pattern) equalizeDepth(path string) string { - equalizedPath, _ := cutLastN(path, p.pathDepth+1) - return equalizedPath -} diff --git a/src/vendor/github.com/monochromegane/go-gitignore/patterns.go b/src/vendor/github.com/monochromegane/go-gitignore/patterns.go deleted file mode 100644 index 6770fb465..000000000 --- a/src/vendor/github.com/monochromegane/go-gitignore/patterns.go +++ /dev/null @@ -1,22 +0,0 @@ -package gitignore - -type patterns struct { - patterns []pattern -} - -func (ps *patterns) add(pattern pattern) { - ps.patterns = append(ps.patterns, pattern) -} - -func (ps *patterns) size() int { - return len(ps.patterns) -} - -func (ps patterns) match(path string, isDir bool) bool { - for _, p := range ps.patterns { - if match := p.match(path, isDir); match { - return true - } - } - return false -} diff --git a/src/vendor/github.com/monochromegane/go-gitignore/util.go b/src/vendor/github.com/monochromegane/go-gitignore/util.go deleted file mode 100644 index b5ab9bbfd..000000000 --- a/src/vendor/github.com/monochromegane/go-gitignore/util.go +++ /dev/null @@ -1,45 +0,0 @@ -package gitignore - -import ( - "os" - "strings" -) - -func cutN(path string, n int) (string, bool) { - isLast := true - - var i, count int - for i < len(path)-1 { - if os.IsPathSeparator(path[i]) { - count++ - if count >= n { - isLast = false - break - } - } - i++ - } - return path[:i+1], isLast -} - -func cutLastN(path string, n int) (string, bool) { - isLast := true - i := len(path) - 1 - - var count int - for i >= 0 { - if os.IsPathSeparator(path[i]) { - count++ - if count >= n { - isLast = false - break - } - } - i-- - } - return path[i+1:], isLast -} - -func hasMeta(path string) bool { - return strings.IndexAny(path, "*?[") >= 0 -} diff --git a/src/vendor/github.com/morikuni/aec/LICENSE b/src/vendor/github.com/morikuni/aec/LICENSE deleted file mode 100644 index 1c2640164..000000000 --- a/src/vendor/github.com/morikuni/aec/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2016 Taihei Morikuni - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/src/vendor/github.com/morikuni/aec/README.md b/src/vendor/github.com/morikuni/aec/README.md deleted file mode 100644 index 3cbc4343e..000000000 --- a/src/vendor/github.com/morikuni/aec/README.md +++ /dev/null @@ -1,178 +0,0 @@ -# aec - -[![GoDoc](https://godoc.org/github.com/morikuni/aec?status.svg)](https://godoc.org/github.com/morikuni/aec) - -Go wrapper for ANSI escape code. - -## Install - -```bash -go get github.com/morikuni/aec -``` - -## Features - -ANSI escape codes depend on terminal environment. -Some of these features may not work. -Check supported Font-Style/Font-Color features with [checkansi](./checkansi). - -[Wikipedia](https://en.wikipedia.org/wiki/ANSI_escape_code) for more detail. - -### Cursor - -- `Up(n)` -- `Down(n)` -- `Right(n)` -- `Left(n)` -- `NextLine(n)` -- `PreviousLine(n)` -- `Column(col)` -- `Position(row, col)` -- `Save` -- `Restore` -- `Hide` -- `Show` -- `Report` - -### Erase - -- `EraseDisplay(mode)` -- `EraseLine(mode)` - -### Scroll - -- `ScrollUp(n)` -- `ScrollDown(n)` - -### Font Style - -- `Bold` -- `Faint` -- `Italic` -- `Underline` -- `BlinkSlow` -- `BlinkRapid` -- `Inverse` -- `Conceal` -- `CrossOut` -- `Frame` -- `Encircle` -- `Overline` - -### Font Color - -Foreground color. - -- `DefaultF` -- `BlackF` -- `RedF` -- `GreenF` -- `YellowF` -- `BlueF` -- `MagentaF` -- `CyanF` -- `WhiteF` -- `LightBlackF` -- `LightRedF` -- `LightGreenF` -- `LightYellowF` -- `LightBlueF` -- `LightMagentaF` -- `LightCyanF` -- `LightWhiteF` -- `Color3BitF(color)` -- `Color8BitF(color)` -- `FullColorF(r, g, b)` - -Background color. - -- `DefaultB` -- `BlackB` -- `RedB` -- `GreenB` -- `YellowB` -- `BlueB` -- `MagentaB` -- `CyanB` -- `WhiteB` -- `LightBlackB` -- `LightRedB` -- `LightGreenB` -- `LightYellowB` -- `LightBlueB` -- `LightMagentaB` -- `LightCyanB` -- `LightWhiteB` -- `Color3BitB(color)` -- `Color8BitB(color)` -- `FullColorB(r, g, b)` - -### Color Converter - -24bit RGB color to ANSI color. - -- `NewRGB3Bit(r, g, b)` -- `NewRGB8Bit(r, g, b)` - -### Builder - -To mix these features. - -```go -custom := aec.EmptyBuilder.Right(2).RGB8BitF(128, 255, 64).RedB().ANSI -custom.Apply("Hello World") -``` - -## Usage - -1. Create ANSI by `aec.XXX().With(aec.YYY())` or `aec.EmptyBuilder.XXX().YYY().ANSI` -2. Print ANSI by `fmt.Print(ansi, "some string", aec.Reset)` or `fmt.Print(ansi.Apply("some string"))` - -`aec.Reset` should be added when using font style or font color features. - -## Example - -Simple progressbar. - -![sample](./sample.gif) - -```go -package main - -import ( - "fmt" - "strings" - "time" - - "github.com/morikuni/aec" -) - -func main() { - const n = 20 - builder := aec.EmptyBuilder - - up2 := aec.Up(2) - col := aec.Column(n + 2) - bar := aec.Color8BitF(aec.NewRGB8Bit(64, 255, 64)) - label := builder.LightRedF().Underline().With(col).Right(1).ANSI - - // for up2 - fmt.Println() - fmt.Println() - - for i := 0; i <= n; i++ { - fmt.Print(up2) - fmt.Println(label.Apply(fmt.Sprint(i, "/", n))) - fmt.Print("[") - fmt.Print(bar.Apply(strings.Repeat("=", i))) - fmt.Println(col.Apply("]")) - time.Sleep(100 * time.Millisecond) - } -} -``` - -## License - -[MIT](./LICENSE) - - diff --git a/src/vendor/github.com/morikuni/aec/aec.go b/src/vendor/github.com/morikuni/aec/aec.go deleted file mode 100644 index 566be6eb1..000000000 --- a/src/vendor/github.com/morikuni/aec/aec.go +++ /dev/null @@ -1,137 +0,0 @@ -package aec - -import "fmt" - -// EraseMode is listed in a variable EraseModes. -type EraseMode uint - -var ( - // EraseModes is a list of EraseMode. - EraseModes struct { - // All erase all. - All EraseMode - - // Head erase to head. - Head EraseMode - - // Tail erase to tail. - Tail EraseMode - } - - // Save saves the cursor position. - Save ANSI - - // Restore restores the cursor position. - Restore ANSI - - // Hide hides the cursor. - Hide ANSI - - // Show shows the cursor. - Show ANSI - - // Report reports the cursor position. - Report ANSI -) - -// Up moves up the cursor. -func Up(n uint) ANSI { - if n == 0 { - return empty - } - return newAnsi(fmt.Sprintf(esc+"%dA", n)) -} - -// Down moves down the cursor. -func Down(n uint) ANSI { - if n == 0 { - return empty - } - return newAnsi(fmt.Sprintf(esc+"%dB", n)) -} - -// Right moves right the cursor. -func Right(n uint) ANSI { - if n == 0 { - return empty - } - return newAnsi(fmt.Sprintf(esc+"%dC", n)) -} - -// Left moves left the cursor. -func Left(n uint) ANSI { - if n == 0 { - return empty - } - return newAnsi(fmt.Sprintf(esc+"%dD", n)) -} - -// NextLine moves down the cursor to head of a line. -func NextLine(n uint) ANSI { - if n == 0 { - return empty - } - return newAnsi(fmt.Sprintf(esc+"%dE", n)) -} - -// PreviousLine moves up the cursor to head of a line. -func PreviousLine(n uint) ANSI { - if n == 0 { - return empty - } - return newAnsi(fmt.Sprintf(esc+"%dF", n)) -} - -// Column set the cursor position to a given column. -func Column(col uint) ANSI { - return newAnsi(fmt.Sprintf(esc+"%dG", col)) -} - -// Position set the cursor position to a given absolute position. -func Position(row, col uint) ANSI { - return newAnsi(fmt.Sprintf(esc+"%d;%dH", row, col)) -} - -// EraseDisplay erases display by given EraseMode. -func EraseDisplay(m EraseMode) ANSI { - return newAnsi(fmt.Sprintf(esc+"%dJ", m)) -} - -// EraseLine erases lines by given EraseMode. -func EraseLine(m EraseMode) ANSI { - return newAnsi(fmt.Sprintf(esc+"%dK", m)) -} - -// ScrollUp scrolls up the page. -func ScrollUp(n int) ANSI { - if n == 0 { - return empty - } - return newAnsi(fmt.Sprintf(esc+"%dS", n)) -} - -// ScrollDown scrolls down the page. -func ScrollDown(n int) ANSI { - if n == 0 { - return empty - } - return newAnsi(fmt.Sprintf(esc+"%dT", n)) -} - -func init() { - EraseModes = struct { - All EraseMode - Head EraseMode - Tail EraseMode - }{ - Tail: 0, - Head: 1, - All: 2, - } - - Save = newAnsi(esc + "s") - Restore = newAnsi(esc + "u") - Hide = newAnsi(esc + "?25l") - Show = newAnsi(esc + "?25h") - Report = newAnsi(esc + "6n") -} diff --git a/src/vendor/github.com/morikuni/aec/ansi.go b/src/vendor/github.com/morikuni/aec/ansi.go deleted file mode 100644 index e60722e6e..000000000 --- a/src/vendor/github.com/morikuni/aec/ansi.go +++ /dev/null @@ -1,59 +0,0 @@ -package aec - -import ( - "fmt" - "strings" -) - -const esc = "\x1b[" - -// Reset resets SGR effect. -const Reset string = "\x1b[0m" - -var empty = newAnsi("") - -// ANSI represents ANSI escape code. -type ANSI interface { - fmt.Stringer - - // With adapts given ANSIs. - With(...ANSI) ANSI - - // Apply wraps given string in ANSI. - Apply(string) string -} - -type ansiImpl string - -func newAnsi(s string) *ansiImpl { - r := ansiImpl(s) - return &r -} - -func (a *ansiImpl) With(ansi ...ANSI) ANSI { - return concat(append([]ANSI{a}, ansi...)) -} - -func (a *ansiImpl) Apply(s string) string { - return a.String() + s + Reset -} - -func (a *ansiImpl) String() string { - return string(*a) -} - -// Apply wraps given string in ANSIs. -func Apply(s string, ansi ...ANSI) string { - if len(ansi) == 0 { - return s - } - return concat(ansi).Apply(s) -} - -func concat(ansi []ANSI) ANSI { - strs := make([]string, 0, len(ansi)) - for _, p := range ansi { - strs = append(strs, p.String()) - } - return newAnsi(strings.Join(strs, "")) -} diff --git a/src/vendor/github.com/morikuni/aec/builder.go b/src/vendor/github.com/morikuni/aec/builder.go deleted file mode 100644 index 13bd002d4..000000000 --- a/src/vendor/github.com/morikuni/aec/builder.go +++ /dev/null @@ -1,388 +0,0 @@ -package aec - -// Builder is a lightweight syntax to construct customized ANSI. -type Builder struct { - ANSI ANSI -} - -// EmptyBuilder is an initialized Builder. -var EmptyBuilder *Builder - -// NewBuilder creates a Builder from existing ANSI. -func NewBuilder(a ...ANSI) *Builder { - return &Builder{concat(a)} -} - -// With is a syntax for With. -func (builder *Builder) With(a ...ANSI) *Builder { - return NewBuilder(builder.ANSI.With(a...)) -} - -// Up is a syntax for Up. -func (builder *Builder) Up(n uint) *Builder { - return builder.With(Up(n)) -} - -// Down is a syntax for Down. -func (builder *Builder) Down(n uint) *Builder { - return builder.With(Down(n)) -} - -// Right is a syntax for Right. -func (builder *Builder) Right(n uint) *Builder { - return builder.With(Right(n)) -} - -// Left is a syntax for Left. -func (builder *Builder) Left(n uint) *Builder { - return builder.With(Left(n)) -} - -// NextLine is a syntax for NextLine. -func (builder *Builder) NextLine(n uint) *Builder { - return builder.With(NextLine(n)) -} - -// PreviousLine is a syntax for PreviousLine. -func (builder *Builder) PreviousLine(n uint) *Builder { - return builder.With(PreviousLine(n)) -} - -// Column is a syntax for Column. -func (builder *Builder) Column(col uint) *Builder { - return builder.With(Column(col)) -} - -// Position is a syntax for Position. -func (builder *Builder) Position(row, col uint) *Builder { - return builder.With(Position(row, col)) -} - -// EraseDisplay is a syntax for EraseDisplay. -func (builder *Builder) EraseDisplay(m EraseMode) *Builder { - return builder.With(EraseDisplay(m)) -} - -// EraseLine is a syntax for EraseLine. -func (builder *Builder) EraseLine(m EraseMode) *Builder { - return builder.With(EraseLine(m)) -} - -// ScrollUp is a syntax for ScrollUp. -func (builder *Builder) ScrollUp(n int) *Builder { - return builder.With(ScrollUp(n)) -} - -// ScrollDown is a syntax for ScrollDown. -func (builder *Builder) ScrollDown(n int) *Builder { - return builder.With(ScrollDown(n)) -} - -// Save is a syntax for Save. -func (builder *Builder) Save() *Builder { - return builder.With(Save) -} - -// Restore is a syntax for Restore. -func (builder *Builder) Restore() *Builder { - return builder.With(Restore) -} - -// Hide is a syntax for Hide. -func (builder *Builder) Hide() *Builder { - return builder.With(Hide) -} - -// Show is a syntax for Show. -func (builder *Builder) Show() *Builder { - return builder.With(Show) -} - -// Report is a syntax for Report. -func (builder *Builder) Report() *Builder { - return builder.With(Report) -} - -// Bold is a syntax for Bold. -func (builder *Builder) Bold() *Builder { - return builder.With(Bold) -} - -// Faint is a syntax for Faint. -func (builder *Builder) Faint() *Builder { - return builder.With(Faint) -} - -// Italic is a syntax for Italic. -func (builder *Builder) Italic() *Builder { - return builder.With(Italic) -} - -// Underline is a syntax for Underline. -func (builder *Builder) Underline() *Builder { - return builder.With(Underline) -} - -// BlinkSlow is a syntax for BlinkSlow. -func (builder *Builder) BlinkSlow() *Builder { - return builder.With(BlinkSlow) -} - -// BlinkRapid is a syntax for BlinkRapid. -func (builder *Builder) BlinkRapid() *Builder { - return builder.With(BlinkRapid) -} - -// Inverse is a syntax for Inverse. -func (builder *Builder) Inverse() *Builder { - return builder.With(Inverse) -} - -// Conceal is a syntax for Conceal. -func (builder *Builder) Conceal() *Builder { - return builder.With(Conceal) -} - -// CrossOut is a syntax for CrossOut. -func (builder *Builder) CrossOut() *Builder { - return builder.With(CrossOut) -} - -// BlackF is a syntax for BlackF. -func (builder *Builder) BlackF() *Builder { - return builder.With(BlackF) -} - -// RedF is a syntax for RedF. -func (builder *Builder) RedF() *Builder { - return builder.With(RedF) -} - -// GreenF is a syntax for GreenF. -func (builder *Builder) GreenF() *Builder { - return builder.With(GreenF) -} - -// YellowF is a syntax for YellowF. -func (builder *Builder) YellowF() *Builder { - return builder.With(YellowF) -} - -// BlueF is a syntax for BlueF. -func (builder *Builder) BlueF() *Builder { - return builder.With(BlueF) -} - -// MagentaF is a syntax for MagentaF. -func (builder *Builder) MagentaF() *Builder { - return builder.With(MagentaF) -} - -// CyanF is a syntax for CyanF. -func (builder *Builder) CyanF() *Builder { - return builder.With(CyanF) -} - -// WhiteF is a syntax for WhiteF. -func (builder *Builder) WhiteF() *Builder { - return builder.With(WhiteF) -} - -// DefaultF is a syntax for DefaultF. -func (builder *Builder) DefaultF() *Builder { - return builder.With(DefaultF) -} - -// BlackB is a syntax for BlackB. -func (builder *Builder) BlackB() *Builder { - return builder.With(BlackB) -} - -// RedB is a syntax for RedB. -func (builder *Builder) RedB() *Builder { - return builder.With(RedB) -} - -// GreenB is a syntax for GreenB. -func (builder *Builder) GreenB() *Builder { - return builder.With(GreenB) -} - -// YellowB is a syntax for YellowB. -func (builder *Builder) YellowB() *Builder { - return builder.With(YellowB) -} - -// BlueB is a syntax for BlueB. -func (builder *Builder) BlueB() *Builder { - return builder.With(BlueB) -} - -// MagentaB is a syntax for MagentaB. -func (builder *Builder) MagentaB() *Builder { - return builder.With(MagentaB) -} - -// CyanB is a syntax for CyanB. -func (builder *Builder) CyanB() *Builder { - return builder.With(CyanB) -} - -// WhiteB is a syntax for WhiteB. -func (builder *Builder) WhiteB() *Builder { - return builder.With(WhiteB) -} - -// DefaultB is a syntax for DefaultB. -func (builder *Builder) DefaultB() *Builder { - return builder.With(DefaultB) -} - -// Frame is a syntax for Frame. -func (builder *Builder) Frame() *Builder { - return builder.With(Frame) -} - -// Encircle is a syntax for Encircle. -func (builder *Builder) Encircle() *Builder { - return builder.With(Encircle) -} - -// Overline is a syntax for Overline. -func (builder *Builder) Overline() *Builder { - return builder.With(Overline) -} - -// LightBlackF is a syntax for LightBlueF. -func (builder *Builder) LightBlackF() *Builder { - return builder.With(LightBlackF) -} - -// LightRedF is a syntax for LightRedF. -func (builder *Builder) LightRedF() *Builder { - return builder.With(LightRedF) -} - -// LightGreenF is a syntax for LightGreenF. -func (builder *Builder) LightGreenF() *Builder { - return builder.With(LightGreenF) -} - -// LightYellowF is a syntax for LightYellowF. -func (builder *Builder) LightYellowF() *Builder { - return builder.With(LightYellowF) -} - -// LightBlueF is a syntax for LightBlueF. -func (builder *Builder) LightBlueF() *Builder { - return builder.With(LightBlueF) -} - -// LightMagentaF is a syntax for LightMagentaF. -func (builder *Builder) LightMagentaF() *Builder { - return builder.With(LightMagentaF) -} - -// LightCyanF is a syntax for LightCyanF. -func (builder *Builder) LightCyanF() *Builder { - return builder.With(LightCyanF) -} - -// LightWhiteF is a syntax for LightWhiteF. -func (builder *Builder) LightWhiteF() *Builder { - return builder.With(LightWhiteF) -} - -// LightBlackB is a syntax for LightBlackB. -func (builder *Builder) LightBlackB() *Builder { - return builder.With(LightBlackB) -} - -// LightRedB is a syntax for LightRedB. -func (builder *Builder) LightRedB() *Builder { - return builder.With(LightRedB) -} - -// LightGreenB is a syntax for LightGreenB. -func (builder *Builder) LightGreenB() *Builder { - return builder.With(LightGreenB) -} - -// LightYellowB is a syntax for LightYellowB. -func (builder *Builder) LightYellowB() *Builder { - return builder.With(LightYellowB) -} - -// LightBlueB is a syntax for LightBlueB. -func (builder *Builder) LightBlueB() *Builder { - return builder.With(LightBlueB) -} - -// LightMagentaB is a syntax for LightMagentaB. -func (builder *Builder) LightMagentaB() *Builder { - return builder.With(LightMagentaB) -} - -// LightCyanB is a syntax for LightCyanB. -func (builder *Builder) LightCyanB() *Builder { - return builder.With(LightCyanB) -} - -// LightWhiteB is a syntax for LightWhiteB. -func (builder *Builder) LightWhiteB() *Builder { - return builder.With(LightWhiteB) -} - -// Color3BitF is a syntax for Color3BitF. -func (builder *Builder) Color3BitF(c RGB3Bit) *Builder { - return builder.With(Color3BitF(c)) -} - -// Color3BitB is a syntax for Color3BitB. -func (builder *Builder) Color3BitB(c RGB3Bit) *Builder { - return builder.With(Color3BitB(c)) -} - -// Color8BitF is a syntax for Color8BitF. -func (builder *Builder) Color8BitF(c RGB8Bit) *Builder { - return builder.With(Color8BitF(c)) -} - -// Color8BitB is a syntax for Color8BitB. -func (builder *Builder) Color8BitB(c RGB8Bit) *Builder { - return builder.With(Color8BitB(c)) -} - -// FullColorF is a syntax for FullColorF. -func (builder *Builder) FullColorF(r, g, b uint8) *Builder { - return builder.With(FullColorF(r, g, b)) -} - -// FullColorB is a syntax for FullColorB. -func (builder *Builder) FullColorB(r, g, b uint8) *Builder { - return builder.With(FullColorB(r, g, b)) -} - -// RGB3BitF is a syntax for Color3BitF with NewRGB3Bit. -func (builder *Builder) RGB3BitF(r, g, b uint8) *Builder { - return builder.Color3BitF(NewRGB3Bit(r, g, b)) -} - -// RGB3BitB is a syntax for Color3BitB with NewRGB3Bit. -func (builder *Builder) RGB3BitB(r, g, b uint8) *Builder { - return builder.Color3BitB(NewRGB3Bit(r, g, b)) -} - -// RGB8BitF is a syntax for Color8BitF with NewRGB8Bit. -func (builder *Builder) RGB8BitF(r, g, b uint8) *Builder { - return builder.Color8BitF(NewRGB8Bit(r, g, b)) -} - -// RGB8BitB is a syntax for Color8BitB with NewRGB8Bit. -func (builder *Builder) RGB8BitB(r, g, b uint8) *Builder { - return builder.Color8BitB(NewRGB8Bit(r, g, b)) -} - -func init() { - EmptyBuilder = &Builder{empty} -} diff --git a/src/vendor/github.com/morikuni/aec/sample.gif b/src/vendor/github.com/morikuni/aec/sample.gif deleted file mode 100644 index c6c613bb70645efa4e184726060ea1ff981eeceb..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 12548 zcmeHtXHe7ox^?KiNed7{8X;7XCP*=%29P2G0wRPS6cCY(5D}@N_aaD@UX-rVRGLyn ziUk#vP5`li6zlsB?&3b@?0xn-bLW1#I^&ms!?1qqS><^Q4E0qHIxs9l8jiG4D5=G zkc%wJQx+903onrs@|6>>krR&H55KY>eoh`;asZm7fND|@8CDR!sE7_%MAs^cwkwL% zD8UkxCDWAAxhk0ZDx%L+gi8*hnhqkK92Ch^mCRSgl&MNwQ$;^kh1^y{4XcSwtBI7U zOIE2%HL9a~)WsLnMc33J&6<)AG*RQ4$Z0M3oEDOz1$(b0YH(Pw?XYC`VaYpKsV=P4 z7FJXVhd0D24dNsp;UpHd#UJTNZt5VXbS0+rBajp4Phz6_~W&Tz}DWZtdlR_FHdX zbw;p>nRc|ipYKnXb{y$w-B=ziF)lQ{-S%;9vg!1zk=yOtZ)bYq9mUM%UMfA9n94_HLw{y+a(h(q-H)YoF^N>j>XMNzW=%^r(B zo(sGdWe;Xm(&m&J4NpP=hLMdqme=}W3IoesvgnH&z-y5>+3elk1COcaV|B0R4H$9Y zV*2>z@q}O<@LD9v%r8`##56I&r^e>X2z1>hQ07gn4Oe)0x)57)f?6Z+T3DWpO+>(w z>MQt9F%RMD^U($GTVz$+w>n>xyGT1aY;RqzrjI&k+M=7B6Ttxi%Q#QpELdhJQ~WMw zrd9O}%uo?|yP&=kswoRaNCawq{q$jH<8$z(&I>QTF$-N~5~0(e?+vAYzwBrOT3w!r zIIv#MgcGuVLgREB?jH?3m{G>GkIoQ(H43Z^ycVgIdj(kkQGrfjQfKIB7>WB`iZeTV zDFP?^gs~(6!zhWus|6oSv9)GywvA@u-yUUr%Yidbb%azIq|wNzEo9j{_eP}B_#TW- zr16?9#`Q1WA7vD=qshlQS__p!U07Yq&z@YW^3YK)59q?W-}e|55j$k54LvS^H!pPh z>M0R?L0_s0XR*@INk(%=>L+B7Gm$#t_D_0q>8waq&>YTY$&~b)Z5}jE4d2Dfyh!xc zqvg6veBEv^D18mIao_Qpn%-4O0~-fstT}^Is@kX{Nw;&9USun7v>{JYox4d{U~N5X zYUTY3c1q1@)ZJD|mncx`v+Rjm43R#Q!eOs+67*;_)hVXq0E*zp`ncvfcK40?+b&LW zuF##nVk`4s{20GLStcc=Qo3nPM&-L9hw*yXoa~pPnrK@qnC`Jz-I{b}<=Z?^4?m~x z*QbxFck?DmxZQ}6BRz~$@PFN3$guW!E!ZHi<@ zMhcql2!%1dB+rs1iuusFx^h7aQ7BGTc)D4^zJY zH=l4Jr6WOWbx-BYUssFVYg@lLuciL>#^*bq(9Lv*+upVDp6OWY7rgQLL)JmFM;oI| za$n6K9sj=lQQNKV;;Sj9#jo2JqPZUJEG&C}Gow^o+Ww?<<88?FyKU6>FXx^-9Zx!hv)_14rhhu5DCU4NsxrVV%*@?Lx>=@Sb;o*5Cyd;CimDp(W!r0-= z{HeGa4(&)phc3(U{%(P`cTw^QPHYUg9^vQj$l)nhIoZN{pig|GvFxE-DHp-LqMQxU zdXHv#&SvWh$-O6=B+T-9gZ03s@1w2!XZby|^<;e-;uXIjz{gweL6RCG-87KG_bT-n z%Nr7XT#=B^+xOMRk0u59JBn^p8fbiYf8oLxBWTrb%qQ{ji!nMIg+vEJKl+S9s284(QkX=Nod_q1+A_ULu0)}7Y7rh6OE zx-99*MnLOiNdZ)?GrXBd)wr#t6AL46eeUF%4*DhZ9J z9~ShtozIU{i47IouQjo~A^$?v?%}sF^af>qXmi8i&ezMEv#(y6DIEU(9kI(a6DrCA zK8ngm0b0jmp*?evQJarN0?;~mRGJ3SF;X}H!xIf9Pjbf~vSPSn(dDY#acnf>{qgem zcbs8T7JbFq%nFq*;>xDyU9`zZ5DO`~tU>tX@vkxF*grJyFVR8(Xqo;4v=}id*gu?! zVFE*B?^uste|Y+D&LmRkb$wn>T3K}No-^5Wr^}}bLD4a;@ZvpZ@{urtW)PVfQ!K5f zI+HZ7nN9+arh6~dnaJuwWTpXUa$JU6(`yw#OArIr#D8Li9@!~j%VK8*oJrs;S<_y` zxS%7jM^w|A)f{jpKCv3vO#V<4J1-V3(iNiIgEr+y+?90g1tu?+B8M7Juf06^A+bQF zCWuWeX^eTOqBmVSdJ?1%8^NKwzfkL<;ls73{N}F;g4wp-K5vb`^Jixg|7yPPqFG_> zmz|IQ!I{K8Vnnj5^b-&)`pmrWTuY);R1UrpZs@H#HyfSol{QO8U0H~Z5ieIAk408f ziuGmgV37I;Mu_ve%KChU5*k|+WOBw-AR&ocuX`XBj}9tDX$#|B)6Ef^t{Ik`N^Y5! z4zc1Hc5?K1B!Mi6&JOe1b-c{FzB7L%GkZns3X71!K%V~-wPm=^7L$bb&&EdqfK=-r zK>CN-+|0f@2+<_n_Xq!GHuyb)Svx&Sj+U4ez-T|0R&Z60JLHY<@Y10#( z(aCa{W$u#z0iUwq?()?GX7gB^gho8{_OZBvyQRl!pq{#1sgLVj&d{H;1I#9CJORT@ z>qeaF467MEcE1NJcpmuCg*#fkAS!PouewZxZ>%*OjBH&YDQvy1=6&69eOB~mv&qmp zbeHAsx2L;1>;G;xKRo>%(jA`PA)VF#3DST)NGr#vkRtQ|NK149NL_S(h7|lKNNK5% zvI3Brm>Mo7T05w@WKqd`nLyrkgW0Y2m1(Pgt8^E4dw$-XO5Wu>|G^-9?#J7)|A@TW zASRF;&4K?iYWb`vV>+Ne$U8s|PzliZ73ntD#?+GGAF)C~*Cbbi8Y}#XqMF8*0@PS> zELrKyJM&Leq^&QKSmgOJPXO}1)F6dQTQvZq_G7kxbZ4#fQ-HjJSD%FWuB}c>Z)4~> zJa$>3Uz%mRIwE`+bQ7dfUQ~baFI^(6nE>UYoC@(yu*V- zs5xNN9t)Ojzv&&-d}i%s{)Jm-p54Cm=UDM}Wc1*nLI>qT{;NO7iqt*wE_j2h9P6~k zcDG?H-dD^ng`jtK_TN0)d-r>Z$@Zc|kwFMZbtX)J9}Ej;*DfVQa39(vMDn4PoODzD zi-A~i3U3(o;w1A?R9*&P6vd5bivtj~%+B$07R)XQ+#`5ogbJ09d*lt9qSz;~x=qfe zW->p6q*D2qZmz$(05?+TL;Z=5763}n|AtbW#U^p8CAI(FAK}kVwR$t~*YJmG)i;Z0 zP(3Gym;Vv|$iXGi5u?=bXL727GENPDwIS)?!;*(2XNbHzy-=WazjyZ(+ zMc2JwE=g2+rOL#(xmLJ;xbpo~&=xiPHT)Q{RaVu@F?YU>e|$InZ{aWeF(Z;*08c>B zpj81DDp3d#IT*~Wj<>4wY*ez>zS&5K{z5caluQ|o7KtMwv{40>#W=}YiZe)-I#`vS z5Rr))9Ylu&>R?UAGVYe34rTSZq{5-GsAwZONvCvEH1nc%sFL$yrj4BtK4ZqDYBb>> z*_P$g%6x10Qx19fF-0DSf%As0*&-R{U{i2$fra;)Q zB-Hi-F_jghV~+O!$o6a#lpLfsfll6m8MH^jBsKFxFdI>x42MxD<>|L?2G+CW>0GX=472}$wC?^CsDP76MjH4FauEC~M zCuJY7B7Ql|p2acCIRx*XBU%uN&i=CCv6 zYK$-mUo^}S_wyX~1Cag(`iIWeAX#lQ24a}@pzlYElLq5Xlm8g(x!2+x6tSNi*a&A& zmGz~zIK=x11QU3##W_SGsRAv|su4dz$Odq>(t5I_yg;DEk?Eoh-b|1my6J6IO3G-< zH$IQCtomhR^|q8n*UP)1YNliJ;y{bTw@rx+YiV$af`0@D9%=OhDLKiW*0-mjEIkZ$ zT*ZOf{9hi-F{$x+VQV(}xPG8pllxCCjY|siKb-9s_5P!sv`DE>ORb4 zqh<3w5KE|_7Hta6CCj?Fa`_A(e0(f7WMFtqSvgAeKKv$r7>t1#TRk;=fG37R!{A4R zZH#aEJZ>R1@;&JEkDk68nmi|8wQM*5595^%X^pIh?q?G)E_i*G0uiw~GSYwrW^FV9 z%FEpnwvyLc2za0D2uv5_k=SbmUt6!e3cvo5uk68v1BK;`IQw-IjdyP=8KTy@>^jt@ z8y#8sT%9dKH`Y9v*1GoD63jGjHN^LC8%>vH=b&>F9N+RAK>E&&G!{C3>`o98qx|lD z@VPGZ0+^XFKK{xgL*{=i+9nAi3H}tP=vX2i4$sXXI>+Y3s5*%xHB;u0xuul|a!9W# zGEwCv(J4`36%S9=vSuy?gw%N<6)$XPn9|GvkTGwMj7HrHUZxJage*Iiez(g^AuR5p z&I*8#63eU5SHcojY;)N7a~AS4Tb>E!u>wL`5WLm83=gm%d;CdA(*ST^{yn%ej7YaPB-oQ{i%6 z?(^}R-u;@XQ!hQuJ)^LB{w18&gqFmQm;4;nJhG+mEvO zV%KiLHzwM5^99_8>wSN3QD0T=eBN1GJ05fQ@W9u9ZBZM6z{0K1?1bQWV$hEUyT>@8 z`NyAIIpNa-3@}Ug_-CQ*;-sPv%u-zePgH8! z7raf1QFotf`WsD_#UKmQm6Mt5z%0$O4`_9RS&u|5YO8)Ax{EsvDwhMZbZ#lvw=D>N z1Bi=8@$Wn>qKjVOH;`ds0-fNaQ3Xf_?j-*fh0?vMoG&{p9*SVo z=^QIr9$|?gZ0a7c!QvqWB(e;?GQHwq4mFV4`D*i*s`9pBttcw`E7XDS>r-ZtMeY=I2XRW?uo!|+6$Y_-vjX??a+1lR>g{uJGTwin0v&C zN&Z^eNYW&GAT#PPZg=y&YvBcY0WeQ6V?MR=eD@e$>5X1fQ!M_3^wt%r63Nq?7#YTtC&V{MwJ?dT7+o6ija;!0|pOs$%dxN-md_@nXHh zg5w3qv6}&Q9m?Y#BdoLYxP0_>GDq{7yPIhCiF%sQwaa(zaawVoJuUYL8;F7&#B`+L zWHP#8`#Vl(Thmv##tTnoC=^d@jV79-bc5b5Q-yO`ynu)8FDV-_es z4>!gx$ghirX zqE6#O8oQX)`w?}ni<>QGW}53y9JwxRb<&RaZ_h-3zX*HY)2mtCL01Y@p~HN@1~WLsd0W zw_6NaIlBt8rUmlns4=t{A|o$Ayc+b5I9`T}2SoSrj6gYSo&I&8@iV>7(M1w9Rwwr{_QZQck(H&=D+e#q&4>oZ?@`?Eprcn#AViEqK5w%&fO-S2q# z%(rhJ8HK2tla=gcm4(?dmC6+?~fhi z0cUC*i;;n$k_ysw1pN1U*(BcTJoX&S>^wI;GSd`cedib|pI&ky1!vLi zVyyKE(9t6*E2#7|$bdzf#VG@~%!!k+Wqx)(lJ=Kn03FSC%~ErRIh>+jGQ#d#aToV= zZ+15TGRbm&;PW7S9{9n)(Vw{YmvD~*aM%6~+`q1z_q)#&N5^!tTP;z>6w7t)Q^QK6 z>ea#iB$jg>d9ki#04eut<*@L=g*6Qz^u&7Cg`_;7bx_7Iu_Z9rkq}?Zx%>NRjGIn9 zXw|FkN#(g2Cz{((V-OR(87V$=b6uy2`(y%Y=4S15gs-T3J>6!l|bz8u* zN9>6+Hy?-~yrKy1+fcdp=R*--)^PB^m6i?iT`P*@yi-xUI%$}2aYS?W3}pY?$op3} z#av`9GC&B)_a@Lw;L_z}k zoe)Bt0Or(KiiU4JAtPmO0+?gU`}4)*2UT4XRSp@rB&c!Ta!Ky}nhjvi=edxoXF*%4 zt33x`ZmusWo{=UabTr+%jqH|H;XP31;SeHen=J`oo71W2p92b|0?=R7ejJLvV>%00Sv5(_*k7*+BrLn|@2`p`2;-!IKho9cG7xeie*kHuz|OO=Gy;Si&MAby6VU&eOXVB!OHAajo_;w4zX5di zkUBZksyOIr0%4-s3S&JxLFe9Ho3DOLw!2cDzChXNR^9P-#sSW$wOj9Al`|G9fMuT6 z&zFR<%!%W78rj`u=23^ICG{!QcYXHpP{vE%0J} z8?{?+$?aTQ-%KD*z7aj~(vER~P9GKrlLdo0Z?}a0R{NdrtLcf)6{}lT5Bxys%l*?e z!InzGhqZ@={aQZSx%3!9XwJ^Iok$X}VL|5d5n*RTly+c|`%Eb(B0241M99KR_njTtPhefJN|+>WJU8 zviQK&tOU$KG61Cbkc1>}?bw8K*n?cQeJwzB?QH)@V2+3*5Q{l}VTO^iJV$l9+JA=db5wOxd;!kv4QS8HFDoo5Sg`B4$d zk#>dxJqY0(S+6lRu!i*e*~hiqW9Ey)hKtmF95A!aANe-%8{;N-!j!P?XwBI~d z$-)Faa<<``_lp`{f1DF{0#xyCdYy`CI_!vMZ#rF{cYWv4mg7ZJaG9^SUW4>+y42^< zm0a+n;n*t+LwA2Y3klKA0M0@TJYT&As-xAhSARMS**Pkbpo?*ej8&Zk!*g9Loya*h z{Wc;=kr*JJ0=bnKk`aiAld8?OjYGFhf)f;+^^sOU&pV%_v5JpPIHKALQBI4 z3i;22Rwo*${Q`fz?Ez>Rc{Np?r0xDblMDsv1FlbYx%_3mh4&p;u z*KH#;HdQ^1&%b^VVX+Bg=|m3atKt%#hz5VC?NTNs6^-pq(9ZS=atH=*9OKI3VzcSp zZ53$rAg8fdJei0f(5aUmuCWG_V2d)0^H*$5UJy=)Wf%({sp-S6zoPMBdOFctL{4E1 zeJbFin=CjF7s6D?PU+!a(O+NVk-vE39Vl>9VC2io{5RVO_<44|@Q#jCAMAl5u+f%_ zWBYA8!oK*NDbtslvwh|?GQ;n1G@##f)g?g>a6bu}F?`h9p6IBbZ+j@VA_=9H&T+BX zlLGMEUW`jZPD{VFND`1rk-3kENT-m^9b}3&r|t0+E#YSZX-4DsN^JlpFJ@R88OFw# zodO{K(f<)_UDI9N49jiY3v#TkoI>A2TZAg@ty_d*vJKt&-7f^_)XWSA*VgA(mkea)AWycwY#2v*f5OUJSUN>8Y~HHb1Rk*_GP$xDMW=rLjtX z%{JCqL|~@Gqxab@&Itv}>S)QSc9uoCJUjk6#|RRy-{X>*y4kQW9-tarte@{?0;<8; z9r=ZDk&oa?uN?)P5^2!y+nOyQs-VQsz@*ZGEBsN2F;gI~aKP)wOlE8EJwN++wlR_B z`C-4_b=?|l1e`A^%Q(_hv~BZ`yRN|(@&VLF5b+Mn;%)aZ(4PJJW%Yoqcu; z^rD@48c|v!5kJaqa(+hC-h96{1XJh_(QawL_Ap(bA7V5H%!Uj@TZ!~tIV zl0G(?Fp(=G4QyAg6T{RrSVbp`Rwkb~Ix=CUTL`aY67t zqYJA=)-TgC}e>hXU zhfQ(#9nGa5Cw3zq6VwyCA}9-b6R6>}!9Sna@!#m`k3l76Gshr#8VBNJYO`755J22a zP||7~h?gH(=u1>v?RQB&upWa*fkNH_TUU;iR#ar-!)TKKFi9@Yae%Vf2EU%}>z&CH)it%uW9T z=6K%-Ix=upDf6QT`q2X>#=>;=<(Ws#--~2;>6_Pd)GUhz;i4VvoQBkoJaH(5J0>Ez z0Im>${t;j#xX3g8)Iw0ZWbVi*^Ir==_zmfPzi^LaJ@KBLY|<;Y&gM1rnZ5yE8ta)o-#<<3w9mD;?+DF2 zES0URSfg`al4y|umsWkG;Q?p6@%qs_n%8h_zNgZj8A3~6M^T%>(K~yQrjPHz-Mg2@ zcE3u@gq~gT`2DOB3=G#F`q=_(SbpedrTOprc~$pU{bW80=w}Oef`WqXyytKFX?Xl# zfWXiC$-*$OfHT#yHP5i%)Gf3BNk8?o_Vjbf>ks{`_<6W$10ZVsD~JpN_>}sYZU98Y zUJ8xZ5kRriEYIch?$gm8CM?h8#S3ae{KNJwa;D0&sbHQhgkibFKf*BJxw#>b+71QB z(}Wp7E4-%&S=1*LKq&`R$sKb&;heeHSJ6sPCX&t=kao zSPyvys>#$=JS=jn+73mhY>&#u5u?)}p9I{hqPwtJz6|3*!h}R9IL^tZWyqhvSx5KS zj;B)ZVd60S;MAB4!)q+Gh>n=AgPjo=tb}Lv_mHr-ej3thtNgow_$dx!X79NxMRD-U~8KZm@LAKgE$Ll1{C#o(5BI7i+Vv2*c7*|S?05=O%_UI~9s<-^y0f%V4+@&lKGB*Zz!stBk& zdMQd#n9+E}6+hMmkK*{jXO!Gz2PQl+!}^qDSr}xs{8GT`il_LMZel;GH0zX9Rdi&W Y(?6j)4M_L*quqaULH)mVw5$IA0FB)@mjD0& diff --git a/src/vendor/github.com/morikuni/aec/sgr.go b/src/vendor/github.com/morikuni/aec/sgr.go deleted file mode 100644 index 0ba3464e6..000000000 --- a/src/vendor/github.com/morikuni/aec/sgr.go +++ /dev/null @@ -1,202 +0,0 @@ -package aec - -import ( - "fmt" -) - -// RGB3Bit is a 3bit RGB color. -type RGB3Bit uint8 - -// RGB8Bit is a 8bit RGB color. -type RGB8Bit uint8 - -func newSGR(n uint) ANSI { - return newAnsi(fmt.Sprintf(esc+"%dm", n)) -} - -// NewRGB3Bit create a RGB3Bit from given RGB. -func NewRGB3Bit(r, g, b uint8) RGB3Bit { - return RGB3Bit((r >> 7) | ((g >> 6) & 0x2) | ((b >> 5) & 0x4)) -} - -// NewRGB8Bit create a RGB8Bit from given RGB. -func NewRGB8Bit(r, g, b uint8) RGB8Bit { - return RGB8Bit(16 + 36*(r/43) + 6*(g/43) + b/43) -} - -// Color3BitF set the foreground color of text. -func Color3BitF(c RGB3Bit) ANSI { - return newAnsi(fmt.Sprintf(esc+"%dm", c+30)) -} - -// Color3BitB set the background color of text. -func Color3BitB(c RGB3Bit) ANSI { - return newAnsi(fmt.Sprintf(esc+"%dm", c+40)) -} - -// Color8BitF set the foreground color of text. -func Color8BitF(c RGB8Bit) ANSI { - return newAnsi(fmt.Sprintf(esc+"38;5;%dm", c)) -} - -// Color8BitB set the background color of text. -func Color8BitB(c RGB8Bit) ANSI { - return newAnsi(fmt.Sprintf(esc+"48;5;%dm", c)) -} - -// FullColorF set the foreground color of text. -func FullColorF(r, g, b uint8) ANSI { - return newAnsi(fmt.Sprintf(esc+"38;2;%d;%d;%dm", r, g, b)) -} - -// FullColorB set the foreground color of text. -func FullColorB(r, g, b uint8) ANSI { - return newAnsi(fmt.Sprintf(esc+"48;2;%d;%d;%dm", r, g, b)) -} - -// Style -var ( - // Bold set the text style to bold or increased intensity. - Bold ANSI - - // Faint set the text style to faint. - Faint ANSI - - // Italic set the text style to italic. - Italic ANSI - - // Underline set the text style to underline. - Underline ANSI - - // BlinkSlow set the text style to slow blink. - BlinkSlow ANSI - - // BlinkRapid set the text style to rapid blink. - BlinkRapid ANSI - - // Inverse swap the foreground color and background color. - Inverse ANSI - - // Conceal set the text style to conceal. - Conceal ANSI - - // CrossOut set the text style to crossed out. - CrossOut ANSI - - // Frame set the text style to framed. - Frame ANSI - - // Encircle set the text style to encircled. - Encircle ANSI - - // Overline set the text style to overlined. - Overline ANSI -) - -// Foreground color of text. -var ( - // DefaultF is the default color of foreground. - DefaultF ANSI - - // Normal color - BlackF ANSI - RedF ANSI - GreenF ANSI - YellowF ANSI - BlueF ANSI - MagentaF ANSI - CyanF ANSI - WhiteF ANSI - - // Light color - LightBlackF ANSI - LightRedF ANSI - LightGreenF ANSI - LightYellowF ANSI - LightBlueF ANSI - LightMagentaF ANSI - LightCyanF ANSI - LightWhiteF ANSI -) - -// Background color of text. -var ( - // DefaultB is the default color of background. - DefaultB ANSI - - // Normal color - BlackB ANSI - RedB ANSI - GreenB ANSI - YellowB ANSI - BlueB ANSI - MagentaB ANSI - CyanB ANSI - WhiteB ANSI - - // Light color - LightBlackB ANSI - LightRedB ANSI - LightGreenB ANSI - LightYellowB ANSI - LightBlueB ANSI - LightMagentaB ANSI - LightCyanB ANSI - LightWhiteB ANSI -) - -func init() { - Bold = newSGR(1) - Faint = newSGR(2) - Italic = newSGR(3) - Underline = newSGR(4) - BlinkSlow = newSGR(5) - BlinkRapid = newSGR(6) - Inverse = newSGR(7) - Conceal = newSGR(8) - CrossOut = newSGR(9) - - BlackF = newSGR(30) - RedF = newSGR(31) - GreenF = newSGR(32) - YellowF = newSGR(33) - BlueF = newSGR(34) - MagentaF = newSGR(35) - CyanF = newSGR(36) - WhiteF = newSGR(37) - - DefaultF = newSGR(39) - - BlackB = newSGR(40) - RedB = newSGR(41) - GreenB = newSGR(42) - YellowB = newSGR(43) - BlueB = newSGR(44) - MagentaB = newSGR(45) - CyanB = newSGR(46) - WhiteB = newSGR(47) - - DefaultB = newSGR(49) - - Frame = newSGR(51) - Encircle = newSGR(52) - Overline = newSGR(53) - - LightBlackF = newSGR(90) - LightRedF = newSGR(91) - LightGreenF = newSGR(92) - LightYellowF = newSGR(93) - LightBlueF = newSGR(94) - LightMagentaF = newSGR(95) - LightCyanF = newSGR(96) - LightWhiteF = newSGR(97) - - LightBlackB = newSGR(100) - LightRedB = newSGR(101) - LightGreenB = newSGR(102) - LightYellowB = newSGR(103) - LightBlueB = newSGR(104) - LightMagentaB = newSGR(105) - LightCyanB = newSGR(106) - LightWhiteB = newSGR(107) -} diff --git a/src/vendor/github.com/munnerz/goautoneg/LICENSE b/src/vendor/github.com/munnerz/goautoneg/LICENSE deleted file mode 100644 index bbc7b897c..000000000 --- a/src/vendor/github.com/munnerz/goautoneg/LICENSE +++ /dev/null @@ -1,31 +0,0 @@ -Copyright (c) 2011, Open Knowledge Foundation Ltd. -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - - Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in - the documentation and/or other materials provided with the - distribution. - - Neither the name of the Open Knowledge Foundation Ltd. nor the - names of its contributors may be used to endorse or promote - products derived from this software without specific prior written - permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/src/vendor/github.com/munnerz/goautoneg/Makefile b/src/vendor/github.com/munnerz/goautoneg/Makefile deleted file mode 100644 index e33ee1730..000000000 --- a/src/vendor/github.com/munnerz/goautoneg/Makefile +++ /dev/null @@ -1,13 +0,0 @@ -include $(GOROOT)/src/Make.inc - -TARG=bitbucket.org/ww/goautoneg -GOFILES=autoneg.go - -include $(GOROOT)/src/Make.pkg - -format: - gofmt -w *.go - -docs: - gomake clean - godoc ${TARG} > README.txt diff --git a/src/vendor/github.com/munnerz/goautoneg/README.txt b/src/vendor/github.com/munnerz/goautoneg/README.txt deleted file mode 100644 index 7723656d5..000000000 --- a/src/vendor/github.com/munnerz/goautoneg/README.txt +++ /dev/null @@ -1,67 +0,0 @@ -PACKAGE - -package goautoneg -import "bitbucket.org/ww/goautoneg" - -HTTP Content-Type Autonegotiation. - -The functions in this package implement the behaviour specified in -http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html - -Copyright (c) 2011, Open Knowledge Foundation Ltd. -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - - Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in - the documentation and/or other materials provided with the - distribution. - - Neither the name of the Open Knowledge Foundation Ltd. nor the - names of its contributors may be used to endorse or promote - products derived from this software without specific prior written - permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - - -FUNCTIONS - -func Negotiate(header string, alternatives []string) (content_type string) -Negotiate the most appropriate content_type given the accept header -and a list of alternatives. - -func ParseAccept(header string) (accept []Accept) -Parse an Accept Header string returning a sorted list -of clauses - - -TYPES - -type Accept struct { - Type, SubType string - Q float32 - Params map[string]string -} -Structure to represent a clause in an HTTP Accept Header - - -SUBDIRECTORIES - - .hg diff --git a/src/vendor/github.com/munnerz/goautoneg/autoneg.go b/src/vendor/github.com/munnerz/goautoneg/autoneg.go deleted file mode 100644 index 1dd1cad64..000000000 --- a/src/vendor/github.com/munnerz/goautoneg/autoneg.go +++ /dev/null @@ -1,189 +0,0 @@ -/* -HTTP Content-Type Autonegotiation. - -The functions in this package implement the behaviour specified in -http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html - -Copyright (c) 2011, Open Knowledge Foundation Ltd. -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - - Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in - the documentation and/or other materials provided with the - distribution. - - Neither the name of the Open Knowledge Foundation Ltd. nor the - names of its contributors may be used to endorse or promote - products derived from this software without specific prior written - permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -*/ - -package goautoneg - -import ( - "sort" - "strconv" - "strings" -) - -// Structure to represent a clause in an HTTP Accept Header -type Accept struct { - Type, SubType string - Q float64 - Params map[string]string -} - -// acceptSlice is defined to implement sort interface. -type acceptSlice []Accept - -func (slice acceptSlice) Len() int { - return len(slice) -} - -func (slice acceptSlice) Less(i, j int) bool { - ai, aj := slice[i], slice[j] - if ai.Q > aj.Q { - return true - } - if ai.Type != "*" && aj.Type == "*" { - return true - } - if ai.SubType != "*" && aj.SubType == "*" { - return true - } - return false -} - -func (slice acceptSlice) Swap(i, j int) { - slice[i], slice[j] = slice[j], slice[i] -} - -func stringTrimSpaceCutset(r rune) bool { - return r == ' ' -} - -func nextSplitElement(s, sep string) (item string, remaining string) { - if index := strings.Index(s, sep); index != -1 { - return s[:index], s[index+1:] - } - return s, "" -} - -// Parse an Accept Header string returning a sorted list -// of clauses -func ParseAccept(header string) acceptSlice { - partsCount := 0 - remaining := header - for len(remaining) > 0 { - partsCount++ - _, remaining = nextSplitElement(remaining, ",") - } - accept := make(acceptSlice, 0, partsCount) - - remaining = header - var part string - for len(remaining) > 0 { - part, remaining = nextSplitElement(remaining, ",") - part = strings.TrimFunc(part, stringTrimSpaceCutset) - - a := Accept{ - Q: 1.0, - } - - sp, remainingPart := nextSplitElement(part, ";") - - sp0, spRemaining := nextSplitElement(sp, "/") - a.Type = strings.TrimFunc(sp0, stringTrimSpaceCutset) - - switch { - case len(spRemaining) == 0: - if a.Type == "*" { - a.SubType = "*" - } else { - continue - } - default: - var sp1 string - sp1, spRemaining = nextSplitElement(spRemaining, "/") - if len(spRemaining) > 0 { - continue - } - a.SubType = strings.TrimFunc(sp1, stringTrimSpaceCutset) - } - - if len(remainingPart) == 0 { - accept = append(accept, a) - continue - } - - a.Params = make(map[string]string) - for len(remainingPart) > 0 { - sp, remainingPart = nextSplitElement(remainingPart, ";") - sp0, spRemaining = nextSplitElement(sp, "=") - if len(spRemaining) == 0 { - continue - } - var sp1 string - sp1, spRemaining = nextSplitElement(spRemaining, "=") - if len(spRemaining) != 0 { - continue - } - token := strings.TrimFunc(sp0, stringTrimSpaceCutset) - if token == "q" { - a.Q, _ = strconv.ParseFloat(sp1, 32) - } else { - a.Params[token] = strings.TrimFunc(sp1, stringTrimSpaceCutset) - } - } - - accept = append(accept, a) - } - - sort.Sort(accept) - return accept -} - -// Negotiate the most appropriate content_type given the accept header -// and a list of alternatives. -func Negotiate(header string, alternatives []string) (content_type string) { - asp := make([][]string, 0, len(alternatives)) - for _, ctype := range alternatives { - asp = append(asp, strings.SplitN(ctype, "/", 2)) - } - for _, clause := range ParseAccept(header) { - for i, ctsp := range asp { - if clause.Type == ctsp[0] && clause.SubType == ctsp[1] { - content_type = alternatives[i] - return - } - if clause.Type == ctsp[0] && clause.SubType == "*" { - content_type = alternatives[i] - return - } - if clause.Type == "*" && clause.SubType == "*" { - content_type = alternatives[i] - return - } - } - } - return -} diff --git a/src/vendor/github.com/opencontainers/image-spec/specs-go/v1/annotations.go b/src/vendor/github.com/opencontainers/image-spec/specs-go/v1/annotations.go index 581cf7cdf..6f9e6fd3a 100644 --- a/src/vendor/github.com/opencontainers/image-spec/specs-go/v1/annotations.go +++ b/src/vendor/github.com/opencontainers/image-spec/specs-go/v1/annotations.go @@ -59,4 +59,13 @@ const ( // AnnotationBaseImageName is the annotation key for the image reference of the image's base image. AnnotationBaseImageName = "org.opencontainers.image.base.name" + + // AnnotationArtifactCreated is the annotation key for the date and time on which the artifact was built, conforming to RFC 3339. + AnnotationArtifactCreated = "org.opencontainers.artifact.created" + + // AnnotationArtifactDescription is the annotation key for the human readable description for the artifact. + AnnotationArtifactDescription = "org.opencontainers.artifact.description" + + // AnnotationReferrersFiltersApplied is the annotation key for the comma separated list of filters applied by the registry in the referrers listing. + AnnotationReferrersFiltersApplied = "org.opencontainers.referrers.filtersApplied" ) diff --git a/src/vendor/github.com/opencontainers/image-spec/specs-go/v1/artifact.go b/src/vendor/github.com/opencontainers/image-spec/specs-go/v1/artifact.go new file mode 100644 index 000000000..03d76ce43 --- /dev/null +++ b/src/vendor/github.com/opencontainers/image-spec/specs-go/v1/artifact.go @@ -0,0 +1,34 @@ +// Copyright 2022 The Linux Foundation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1 + +// Artifact describes an artifact manifest. +// This structure provides `application/vnd.oci.artifact.manifest.v1+json` mediatype when marshalled to JSON. +type Artifact struct { + // MediaType is the media type of the object this schema refers to. + MediaType string `json:"mediaType"` + + // ArtifactType is the IANA media type of the artifact this schema refers to. + ArtifactType string `json:"artifactType"` + + // Blobs is a collection of blobs referenced by this manifest. + Blobs []Descriptor `json:"blobs,omitempty"` + + // Subject (reference) is an optional link from the artifact to another manifest forming an association between the artifact and the other manifest. + Subject *Descriptor `json:"subject,omitempty"` + + // Annotations contains arbitrary metadata for the artifact manifest. + Annotations map[string]string `json:"annotations,omitempty"` +} diff --git a/src/vendor/github.com/opencontainers/image-spec/specs-go/v1/descriptor.go b/src/vendor/github.com/opencontainers/image-spec/specs-go/v1/descriptor.go index 6e442a085..9654aa5af 100644 --- a/src/vendor/github.com/opencontainers/image-spec/specs-go/v1/descriptor.go +++ b/src/vendor/github.com/opencontainers/image-spec/specs-go/v1/descriptor.go @@ -1,4 +1,4 @@ -// Copyright 2016 The Linux Foundation +// Copyright 2016-2022 The Linux Foundation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -35,10 +35,18 @@ type Descriptor struct { // Annotations contains arbitrary metadata relating to the targeted content. Annotations map[string]string `json:"annotations,omitempty"` + // Data is an embedding of the targeted content. This is encoded as a base64 + // string when marshalled to JSON (automatically, by encoding/json). If + // present, Data can be used directly to avoid fetching the targeted content. + Data []byte `json:"data,omitempty"` + // Platform describes the platform which the image in the manifest runs on. // // This should only be used when referring to a manifest. Platform *Platform `json:"platform,omitempty"` + + // ArtifactType is the IANA media type of this artifact. + ArtifactType string `json:"artifactType,omitempty"` } // Platform describes the platform which the image in the manifest runs on. diff --git a/src/vendor/github.com/opencontainers/image-spec/specs-go/v1/index.go b/src/vendor/github.com/opencontainers/image-spec/specs-go/v1/index.go index 82da6c6a8..ed4a56e59 100644 --- a/src/vendor/github.com/opencontainers/image-spec/specs-go/v1/index.go +++ b/src/vendor/github.com/opencontainers/image-spec/specs-go/v1/index.go @@ -21,7 +21,7 @@ import "github.com/opencontainers/image-spec/specs-go" type Index struct { specs.Versioned - // MediaType specificies the type of this document data structure e.g. `application/vnd.oci.image.index.v1+json` + // MediaType specifies the type of this document data structure e.g. `application/vnd.oci.image.index.v1+json` MediaType string `json:"mediaType,omitempty"` // Manifests references platform specific manifests. diff --git a/src/vendor/github.com/opencontainers/image-spec/specs-go/v1/manifest.go b/src/vendor/github.com/opencontainers/image-spec/specs-go/v1/manifest.go index d72d15ce4..730a09359 100644 --- a/src/vendor/github.com/opencontainers/image-spec/specs-go/v1/manifest.go +++ b/src/vendor/github.com/opencontainers/image-spec/specs-go/v1/manifest.go @@ -1,4 +1,4 @@ -// Copyright 2016 The Linux Foundation +// Copyright 2016-2022 The Linux Foundation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -20,7 +20,7 @@ import "github.com/opencontainers/image-spec/specs-go" type Manifest struct { specs.Versioned - // MediaType specificies the type of this document data structure e.g. `application/vnd.oci.image.manifest.v1+json` + // MediaType specifies the type of this document data structure e.g. `application/vnd.oci.image.manifest.v1+json` MediaType string `json:"mediaType,omitempty"` // Config references a configuration object for a container, by digest. @@ -30,6 +30,9 @@ type Manifest struct { // Layers is an indexed list of layers referenced by the manifest. Layers []Descriptor `json:"layers"` + // Subject is an optional link from the image manifest to another manifest forming an association between the image manifest and the other manifest. + Subject *Descriptor `json:"subject,omitempty"` + // Annotations contains arbitrary metadata for the image manifest. Annotations map[string]string `json:"annotations,omitempty"` } diff --git a/src/vendor/github.com/opencontainers/image-spec/specs-go/v1/mediatype.go b/src/vendor/github.com/opencontainers/image-spec/specs-go/v1/mediatype.go index 4f35ac134..935b481e3 100644 --- a/src/vendor/github.com/opencontainers/image-spec/specs-go/v1/mediatype.go +++ b/src/vendor/github.com/opencontainers/image-spec/specs-go/v1/mediatype.go @@ -54,4 +54,7 @@ const ( // MediaTypeImageConfig specifies the media type for the image configuration. MediaTypeImageConfig = "application/vnd.oci.image.config.v1+json" + + // MediaTypeArtifactManifest specifies the media type for a content descriptor. + MediaTypeArtifactManifest = "application/vnd.oci.artifact.manifest.v1+json" ) diff --git a/src/vendor/github.com/opencontainers/image-spec/specs-go/version.go b/src/vendor/github.com/opencontainers/image-spec/specs-go/version.go index 31f99cf64..d27903579 100644 --- a/src/vendor/github.com/opencontainers/image-spec/specs-go/version.go +++ b/src/vendor/github.com/opencontainers/image-spec/specs-go/version.go @@ -20,12 +20,12 @@ const ( // VersionMajor is for an API incompatible changes VersionMajor = 1 // VersionMinor is for functionality in a backwards-compatible manner - VersionMinor = 0 + VersionMinor = 1 // VersionPatch is for backwards-compatible bug fixes - VersionPatch = 2 + VersionPatch = 0 // VersionDev indicates development branch. Releases will be empty string. - VersionDev = "-dev" + VersionDev = "-rc2" ) // Version is the specification version that the package types support. diff --git a/src/vendor/github.com/peterbourgon/diskv/LICENSE b/src/vendor/github.com/peterbourgon/diskv/LICENSE deleted file mode 100644 index 41ce7f16e..000000000 --- a/src/vendor/github.com/peterbourgon/diskv/LICENSE +++ /dev/null @@ -1,19 +0,0 @@ -Copyright (c) 2011-2012 Peter Bourgon - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. diff --git a/src/vendor/github.com/peterbourgon/diskv/README.md b/src/vendor/github.com/peterbourgon/diskv/README.md deleted file mode 100644 index 3474739ed..000000000 --- a/src/vendor/github.com/peterbourgon/diskv/README.md +++ /dev/null @@ -1,141 +0,0 @@ -# What is diskv? - -Diskv (disk-vee) is a simple, persistent key-value store written in the Go -language. It starts with an incredibly simple API for storing arbitrary data on -a filesystem by key, and builds several layers of performance-enhancing -abstraction on top. The end result is a conceptually simple, but highly -performant, disk-backed storage system. - -[![Build Status][1]][2] - -[1]: https://drone.io/github.com/peterbourgon/diskv/status.png -[2]: https://drone.io/github.com/peterbourgon/diskv/latest - - -# Installing - -Install [Go 1][3], either [from source][4] or [with a prepackaged binary][5]. -Then, - -```bash -$ go get github.com/peterbourgon/diskv -``` - -[3]: http://golang.org -[4]: http://golang.org/doc/install/source -[5]: http://golang.org/doc/install - - -# Usage - -```go -package main - -import ( - "fmt" - "github.com/peterbourgon/diskv" -) - -func main() { - // Simplest transform function: put all the data files into the base dir. - flatTransform := func(s string) []string { return []string{} } - - // Initialize a new diskv store, rooted at "my-data-dir", with a 1MB cache. - d := diskv.New(diskv.Options{ - BasePath: "my-data-dir", - Transform: flatTransform, - CacheSizeMax: 1024 * 1024, - }) - - // Write three bytes to the key "alpha". - key := "alpha" - d.Write(key, []byte{'1', '2', '3'}) - - // Read the value back out of the store. - value, _ := d.Read(key) - fmt.Printf("%v\n", value) - - // Erase the key+value from the store (and the disk). - d.Erase(key) -} -``` - -More complex examples can be found in the "examples" subdirectory. - - -# Theory - -## Basic idea - -At its core, diskv is a map of a key (`string`) to arbitrary data (`[]byte`). -The data is written to a single file on disk, with the same name as the key. -The key determines where that file will be stored, via a user-provided -`TransformFunc`, which takes a key and returns a slice (`[]string`) -corresponding to a path list where the key file will be stored. The simplest -TransformFunc, - -```go -func SimpleTransform (key string) []string { - return []string{} -} -``` - -will place all keys in the same, base directory. The design is inspired by -[Redis diskstore][6]; a TransformFunc which emulates the default diskstore -behavior is available in the content-addressable-storage example. - -[6]: http://groups.google.com/group/redis-db/browse_thread/thread/d444bc786689bde9?pli=1 - -**Note** that your TransformFunc should ensure that one valid key doesn't -transform to a subset of another valid key. That is, it shouldn't be possible -to construct valid keys that resolve to directory names. As a concrete example, -if your TransformFunc splits on every 3 characters, then - -```go -d.Write("abcabc", val) // OK: written to /abc/abc/abcabc -d.Write("abc", val) // Error: attempted write to /abc/abc, but it's a directory -``` - -This will be addressed in an upcoming version of diskv. - -Probably the most important design principle behind diskv is that your data is -always flatly available on the disk. diskv will never do anything that would -prevent you from accessing, copying, backing up, or otherwise interacting with -your data via common UNIX commandline tools. - -## Adding a cache - -An in-memory caching layer is provided by combining the BasicStore -functionality with a simple map structure, and keeping it up-to-date as -appropriate. Since the map structure in Go is not threadsafe, it's combined -with a RWMutex to provide safe concurrent access. - -## Adding order - -diskv is a key-value store and therefore inherently unordered. An ordering -system can be injected into the store by passing something which satisfies the -diskv.Index interface. (A default implementation, using Google's -[btree][7] package, is provided.) Basically, diskv keeps an ordered (by a -user-provided Less function) index of the keys, which can be queried. - -[7]: https://github.com/google/btree - -## Adding compression - -Something which implements the diskv.Compression interface may be passed -during store creation, so that all Writes and Reads are filtered through -a compression/decompression pipeline. Several default implementations, -using stdlib compression algorithms, are provided. Note that data is cached -compressed; the cost of decompression is borne with each Read. - -## Streaming - -diskv also now provides ReadStream and WriteStream methods, to allow very large -data to be handled efficiently. - - -# Future plans - - * Needs plenty of robust testing: huge datasets, etc... - * More thorough benchmarking - * Your suggestions for use-cases I haven't thought of diff --git a/src/vendor/github.com/peterbourgon/diskv/compression.go b/src/vendor/github.com/peterbourgon/diskv/compression.go deleted file mode 100644 index 5192b0273..000000000 --- a/src/vendor/github.com/peterbourgon/diskv/compression.go +++ /dev/null @@ -1,64 +0,0 @@ -package diskv - -import ( - "compress/flate" - "compress/gzip" - "compress/zlib" - "io" -) - -// Compression is an interface that Diskv uses to implement compression of -// data. Writer takes a destination io.Writer and returns a WriteCloser that -// compresses all data written through it. Reader takes a source io.Reader and -// returns a ReadCloser that decompresses all data read through it. You may -// define these methods on your own type, or use one of the NewCompression -// helpers. -type Compression interface { - Writer(dst io.Writer) (io.WriteCloser, error) - Reader(src io.Reader) (io.ReadCloser, error) -} - -// NewGzipCompression returns a Gzip-based Compression. -func NewGzipCompression() Compression { - return NewGzipCompressionLevel(flate.DefaultCompression) -} - -// NewGzipCompressionLevel returns a Gzip-based Compression with the given level. -func NewGzipCompressionLevel(level int) Compression { - return &genericCompression{ - wf: func(w io.Writer) (io.WriteCloser, error) { return gzip.NewWriterLevel(w, level) }, - rf: func(r io.Reader) (io.ReadCloser, error) { return gzip.NewReader(r) }, - } -} - -// NewZlibCompression returns a Zlib-based Compression. -func NewZlibCompression() Compression { - return NewZlibCompressionLevel(flate.DefaultCompression) -} - -// NewZlibCompressionLevel returns a Zlib-based Compression with the given level. -func NewZlibCompressionLevel(level int) Compression { - return NewZlibCompressionLevelDict(level, nil) -} - -// NewZlibCompressionLevelDict returns a Zlib-based Compression with the given -// level, based on the given dictionary. -func NewZlibCompressionLevelDict(level int, dict []byte) Compression { - return &genericCompression{ - func(w io.Writer) (io.WriteCloser, error) { return zlib.NewWriterLevelDict(w, level, dict) }, - func(r io.Reader) (io.ReadCloser, error) { return zlib.NewReaderDict(r, dict) }, - } -} - -type genericCompression struct { - wf func(w io.Writer) (io.WriteCloser, error) - rf func(r io.Reader) (io.ReadCloser, error) -} - -func (g *genericCompression) Writer(dst io.Writer) (io.WriteCloser, error) { - return g.wf(dst) -} - -func (g *genericCompression) Reader(src io.Reader) (io.ReadCloser, error) { - return g.rf(src) -} diff --git a/src/vendor/github.com/peterbourgon/diskv/diskv.go b/src/vendor/github.com/peterbourgon/diskv/diskv.go deleted file mode 100644 index 524dc0a6e..000000000 --- a/src/vendor/github.com/peterbourgon/diskv/diskv.go +++ /dev/null @@ -1,624 +0,0 @@ -// Diskv (disk-vee) is a simple, persistent, key-value store. -// It stores all data flatly on the filesystem. - -package diskv - -import ( - "bytes" - "errors" - "fmt" - "io" - "io/ioutil" - "os" - "path/filepath" - "strings" - "sync" - "syscall" -) - -const ( - defaultBasePath = "diskv" - defaultFilePerm os.FileMode = 0666 - defaultPathPerm os.FileMode = 0777 -) - -var ( - defaultTransform = func(s string) []string { return []string{} } - errCanceled = errors.New("canceled") - errEmptyKey = errors.New("empty key") - errBadKey = errors.New("bad key") - errImportDirectory = errors.New("can't import a directory") -) - -// TransformFunction transforms a key into a slice of strings, with each -// element in the slice representing a directory in the file path where the -// key's entry will eventually be stored. -// -// For example, if TransformFunc transforms "abcdef" to ["ab", "cde", "f"], -// the final location of the data file will be /ab/cde/f/abcdef -type TransformFunction func(s string) []string - -// Options define a set of properties that dictate Diskv behavior. -// All values are optional. -type Options struct { - BasePath string - Transform TransformFunction - CacheSizeMax uint64 // bytes - PathPerm os.FileMode - FilePerm os.FileMode - // If TempDir is set, it will enable filesystem atomic writes by - // writing temporary files to that location before being moved - // to BasePath. - // Note that TempDir MUST be on the same device/partition as - // BasePath. - TempDir string - - Index Index - IndexLess LessFunction - - Compression Compression -} - -// Diskv implements the Diskv interface. You shouldn't construct Diskv -// structures directly; instead, use the New constructor. -type Diskv struct { - Options - mu sync.RWMutex - cache map[string][]byte - cacheSize uint64 -} - -// New returns an initialized Diskv structure, ready to use. -// If the path identified by baseDir already contains data, -// it will be accessible, but not yet cached. -func New(o Options) *Diskv { - if o.BasePath == "" { - o.BasePath = defaultBasePath - } - if o.Transform == nil { - o.Transform = defaultTransform - } - if o.PathPerm == 0 { - o.PathPerm = defaultPathPerm - } - if o.FilePerm == 0 { - o.FilePerm = defaultFilePerm - } - - d := &Diskv{ - Options: o, - cache: map[string][]byte{}, - cacheSize: 0, - } - - if d.Index != nil && d.IndexLess != nil { - d.Index.Initialize(d.IndexLess, d.Keys(nil)) - } - - return d -} - -// Write synchronously writes the key-value pair to disk, making it immediately -// available for reads. Write relies on the filesystem to perform an eventual -// sync to physical media. If you need stronger guarantees, see WriteStream. -func (d *Diskv) Write(key string, val []byte) error { - return d.WriteStream(key, bytes.NewBuffer(val), false) -} - -// WriteStream writes the data represented by the io.Reader to the disk, under -// the provided key. If sync is true, WriteStream performs an explicit sync on -// the file as soon as it's written. -// -// bytes.Buffer provides io.Reader semantics for basic data types. -func (d *Diskv) WriteStream(key string, r io.Reader, sync bool) error { - if len(key) <= 0 { - return errEmptyKey - } - - d.mu.Lock() - defer d.mu.Unlock() - - return d.writeStreamWithLock(key, r, sync) -} - -// createKeyFileWithLock either creates the key file directly, or -// creates a temporary file in TempDir if it is set. -func (d *Diskv) createKeyFileWithLock(key string) (*os.File, error) { - if d.TempDir != "" { - if err := os.MkdirAll(d.TempDir, d.PathPerm); err != nil { - return nil, fmt.Errorf("temp mkdir: %s", err) - } - f, err := ioutil.TempFile(d.TempDir, "") - if err != nil { - return nil, fmt.Errorf("temp file: %s", err) - } - - if err := f.Chmod(d.FilePerm); err != nil { - f.Close() // error deliberately ignored - os.Remove(f.Name()) // error deliberately ignored - return nil, fmt.Errorf("chmod: %s", err) - } - return f, nil - } - - mode := os.O_WRONLY | os.O_CREATE | os.O_TRUNC // overwrite if exists - f, err := os.OpenFile(d.completeFilename(key), mode, d.FilePerm) - if err != nil { - return nil, fmt.Errorf("open file: %s", err) - } - return f, nil -} - -// writeStream does no input validation checking. -func (d *Diskv) writeStreamWithLock(key string, r io.Reader, sync bool) error { - if err := d.ensurePathWithLock(key); err != nil { - return fmt.Errorf("ensure path: %s", err) - } - - f, err := d.createKeyFileWithLock(key) - if err != nil { - return fmt.Errorf("create key file: %s", err) - } - - wc := io.WriteCloser(&nopWriteCloser{f}) - if d.Compression != nil { - wc, err = d.Compression.Writer(f) - if err != nil { - f.Close() // error deliberately ignored - os.Remove(f.Name()) // error deliberately ignored - return fmt.Errorf("compression writer: %s", err) - } - } - - if _, err := io.Copy(wc, r); err != nil { - f.Close() // error deliberately ignored - os.Remove(f.Name()) // error deliberately ignored - return fmt.Errorf("i/o copy: %s", err) - } - - if err := wc.Close(); err != nil { - f.Close() // error deliberately ignored - os.Remove(f.Name()) // error deliberately ignored - return fmt.Errorf("compression close: %s", err) - } - - if sync { - if err := f.Sync(); err != nil { - f.Close() // error deliberately ignored - os.Remove(f.Name()) // error deliberately ignored - return fmt.Errorf("file sync: %s", err) - } - } - - if err := f.Close(); err != nil { - return fmt.Errorf("file close: %s", err) - } - - if f.Name() != d.completeFilename(key) { - if err := os.Rename(f.Name(), d.completeFilename(key)); err != nil { - os.Remove(f.Name()) // error deliberately ignored - return fmt.Errorf("rename: %s", err) - } - } - - if d.Index != nil { - d.Index.Insert(key) - } - - d.bustCacheWithLock(key) // cache only on read - - return nil -} - -// Import imports the source file into diskv under the destination key. If the -// destination key already exists, it's overwritten. If move is true, the -// source file is removed after a successful import. -func (d *Diskv) Import(srcFilename, dstKey string, move bool) (err error) { - if dstKey == "" { - return errEmptyKey - } - - if fi, err := os.Stat(srcFilename); err != nil { - return err - } else if fi.IsDir() { - return errImportDirectory - } - - d.mu.Lock() - defer d.mu.Unlock() - - if err := d.ensurePathWithLock(dstKey); err != nil { - return fmt.Errorf("ensure path: %s", err) - } - - if move { - if err := syscall.Rename(srcFilename, d.completeFilename(dstKey)); err == nil { - d.bustCacheWithLock(dstKey) - return nil - } else if err != syscall.EXDEV { - // If it failed due to being on a different device, fall back to copying - return err - } - } - - f, err := os.Open(srcFilename) - if err != nil { - return err - } - defer f.Close() - err = d.writeStreamWithLock(dstKey, f, false) - if err == nil && move { - err = os.Remove(srcFilename) - } - return err -} - -// Read reads the key and returns the value. -// If the key is available in the cache, Read won't touch the disk. -// If the key is not in the cache, Read will have the side-effect of -// lazily caching the value. -func (d *Diskv) Read(key string) ([]byte, error) { - rc, err := d.ReadStream(key, false) - if err != nil { - return []byte{}, err - } - defer rc.Close() - return ioutil.ReadAll(rc) -} - -// ReadStream reads the key and returns the value (data) as an io.ReadCloser. -// If the value is cached from a previous read, and direct is false, -// ReadStream will use the cached value. Otherwise, it will return a handle to -// the file on disk, and cache the data on read. -// -// If direct is true, ReadStream will lazily delete any cached value for the -// key, and return a direct handle to the file on disk. -// -// If compression is enabled, ReadStream taps into the io.Reader stream prior -// to decompression, and caches the compressed data. -func (d *Diskv) ReadStream(key string, direct bool) (io.ReadCloser, error) { - d.mu.RLock() - defer d.mu.RUnlock() - - if val, ok := d.cache[key]; ok { - if !direct { - buf := bytes.NewBuffer(val) - if d.Compression != nil { - return d.Compression.Reader(buf) - } - return ioutil.NopCloser(buf), nil - } - - go func() { - d.mu.Lock() - defer d.mu.Unlock() - d.uncacheWithLock(key, uint64(len(val))) - }() - } - - return d.readWithRLock(key) -} - -// read ignores the cache, and returns an io.ReadCloser representing the -// decompressed data for the given key, streamed from the disk. Clients should -// acquire a read lock on the Diskv and check the cache themselves before -// calling read. -func (d *Diskv) readWithRLock(key string) (io.ReadCloser, error) { - filename := d.completeFilename(key) - - fi, err := os.Stat(filename) - if err != nil { - return nil, err - } - if fi.IsDir() { - return nil, os.ErrNotExist - } - - f, err := os.Open(filename) - if err != nil { - return nil, err - } - - var r io.Reader - if d.CacheSizeMax > 0 { - r = newSiphon(f, d, key) - } else { - r = &closingReader{f} - } - - var rc = io.ReadCloser(ioutil.NopCloser(r)) - if d.Compression != nil { - rc, err = d.Compression.Reader(r) - if err != nil { - return nil, err - } - } - - return rc, nil -} - -// closingReader provides a Reader that automatically closes the -// embedded ReadCloser when it reaches EOF -type closingReader struct { - rc io.ReadCloser -} - -func (cr closingReader) Read(p []byte) (int, error) { - n, err := cr.rc.Read(p) - if err == io.EOF { - if closeErr := cr.rc.Close(); closeErr != nil { - return n, closeErr // close must succeed for Read to succeed - } - } - return n, err -} - -// siphon is like a TeeReader: it copies all data read through it to an -// internal buffer, and moves that buffer to the cache at EOF. -type siphon struct { - f *os.File - d *Diskv - key string - buf *bytes.Buffer -} - -// newSiphon constructs a siphoning reader that represents the passed file. -// When a successful series of reads ends in an EOF, the siphon will write -// the buffered data to Diskv's cache under the given key. -func newSiphon(f *os.File, d *Diskv, key string) io.Reader { - return &siphon{ - f: f, - d: d, - key: key, - buf: &bytes.Buffer{}, - } -} - -// Read implements the io.Reader interface for siphon. -func (s *siphon) Read(p []byte) (int, error) { - n, err := s.f.Read(p) - - if err == nil { - return s.buf.Write(p[0:n]) // Write must succeed for Read to succeed - } - - if err == io.EOF { - s.d.cacheWithoutLock(s.key, s.buf.Bytes()) // cache may fail - if closeErr := s.f.Close(); closeErr != nil { - return n, closeErr // close must succeed for Read to succeed - } - return n, err - } - - return n, err -} - -// Erase synchronously erases the given key from the disk and the cache. -func (d *Diskv) Erase(key string) error { - d.mu.Lock() - defer d.mu.Unlock() - - d.bustCacheWithLock(key) - - // erase from index - if d.Index != nil { - d.Index.Delete(key) - } - - // erase from disk - filename := d.completeFilename(key) - if s, err := os.Stat(filename); err == nil { - if s.IsDir() { - return errBadKey - } - if err = os.Remove(filename); err != nil { - return err - } - } else { - // Return err as-is so caller can do os.IsNotExist(err). - return err - } - - // clean up and return - d.pruneDirsWithLock(key) - return nil -} - -// EraseAll will delete all of the data from the store, both in the cache and on -// the disk. Note that EraseAll doesn't distinguish diskv-related data from non- -// diskv-related data. Care should be taken to always specify a diskv base -// directory that is exclusively for diskv data. -func (d *Diskv) EraseAll() error { - d.mu.Lock() - defer d.mu.Unlock() - d.cache = make(map[string][]byte) - d.cacheSize = 0 - if d.TempDir != "" { - os.RemoveAll(d.TempDir) // errors ignored - } - return os.RemoveAll(d.BasePath) -} - -// Has returns true if the given key exists. -func (d *Diskv) Has(key string) bool { - d.mu.Lock() - defer d.mu.Unlock() - - if _, ok := d.cache[key]; ok { - return true - } - - filename := d.completeFilename(key) - s, err := os.Stat(filename) - if err != nil { - return false - } - if s.IsDir() { - return false - } - - return true -} - -// Keys returns a channel that will yield every key accessible by the store, -// in undefined order. If a cancel channel is provided, closing it will -// terminate and close the keys channel. -func (d *Diskv) Keys(cancel <-chan struct{}) <-chan string { - return d.KeysPrefix("", cancel) -} - -// KeysPrefix returns a channel that will yield every key accessible by the -// store with the given prefix, in undefined order. If a cancel channel is -// provided, closing it will terminate and close the keys channel. If the -// provided prefix is the empty string, all keys will be yielded. -func (d *Diskv) KeysPrefix(prefix string, cancel <-chan struct{}) <-chan string { - var prepath string - if prefix == "" { - prepath = d.BasePath - } else { - prepath = d.pathFor(prefix) - } - c := make(chan string) - go func() { - filepath.Walk(prepath, walker(c, prefix, cancel)) - close(c) - }() - return c -} - -// walker returns a function which satisfies the filepath.WalkFunc interface. -// It sends every non-directory file entry down the channel c. -func walker(c chan<- string, prefix string, cancel <-chan struct{}) filepath.WalkFunc { - return func(path string, info os.FileInfo, err error) error { - if err != nil { - return err - } - - if info.IsDir() || !strings.HasPrefix(info.Name(), prefix) { - return nil // "pass" - } - - select { - case c <- info.Name(): - case <-cancel: - return errCanceled - } - - return nil - } -} - -// pathFor returns the absolute path for location on the filesystem where the -// data for the given key will be stored. -func (d *Diskv) pathFor(key string) string { - return filepath.Join(d.BasePath, filepath.Join(d.Transform(key)...)) -} - -// ensurePathWithLock is a helper function that generates all necessary -// directories on the filesystem for the given key. -func (d *Diskv) ensurePathWithLock(key string) error { - return os.MkdirAll(d.pathFor(key), d.PathPerm) -} - -// completeFilename returns the absolute path to the file for the given key. -func (d *Diskv) completeFilename(key string) string { - return filepath.Join(d.pathFor(key), key) -} - -// cacheWithLock attempts to cache the given key-value pair in the store's -// cache. It can fail if the value is larger than the cache's maximum size. -func (d *Diskv) cacheWithLock(key string, val []byte) error { - valueSize := uint64(len(val)) - if err := d.ensureCacheSpaceWithLock(valueSize); err != nil { - return fmt.Errorf("%s; not caching", err) - } - - // be very strict about memory guarantees - if (d.cacheSize + valueSize) > d.CacheSizeMax { - panic(fmt.Sprintf("failed to make room for value (%d/%d)", valueSize, d.CacheSizeMax)) - } - - d.cache[key] = val - d.cacheSize += valueSize - return nil -} - -// cacheWithoutLock acquires the store's (write) mutex and calls cacheWithLock. -func (d *Diskv) cacheWithoutLock(key string, val []byte) error { - d.mu.Lock() - defer d.mu.Unlock() - return d.cacheWithLock(key, val) -} - -func (d *Diskv) bustCacheWithLock(key string) { - if val, ok := d.cache[key]; ok { - d.uncacheWithLock(key, uint64(len(val))) - } -} - -func (d *Diskv) uncacheWithLock(key string, sz uint64) { - d.cacheSize -= sz - delete(d.cache, key) -} - -// pruneDirsWithLock deletes empty directories in the path walk leading to the -// key k. Typically this function is called after an Erase is made. -func (d *Diskv) pruneDirsWithLock(key string) error { - pathlist := d.Transform(key) - for i := range pathlist { - dir := filepath.Join(d.BasePath, filepath.Join(pathlist[:len(pathlist)-i]...)) - - // thanks to Steven Blenkinsop for this snippet - switch fi, err := os.Stat(dir); true { - case err != nil: - return err - case !fi.IsDir(): - panic(fmt.Sprintf("corrupt dirstate at %s", dir)) - } - - nlinks, err := filepath.Glob(filepath.Join(dir, "*")) - if err != nil { - return err - } else if len(nlinks) > 0 { - return nil // has subdirs -- do not prune - } - if err = os.Remove(dir); err != nil { - return err - } - } - - return nil -} - -// ensureCacheSpaceWithLock deletes entries from the cache in arbitrary order -// until the cache has at least valueSize bytes available. -func (d *Diskv) ensureCacheSpaceWithLock(valueSize uint64) error { - if valueSize > d.CacheSizeMax { - return fmt.Errorf("value size (%d bytes) too large for cache (%d bytes)", valueSize, d.CacheSizeMax) - } - - safe := func() bool { return (d.cacheSize + valueSize) <= d.CacheSizeMax } - - for key, val := range d.cache { - if safe() { - break - } - - d.uncacheWithLock(key, uint64(len(val))) - } - - if !safe() { - panic(fmt.Sprintf("%d bytes still won't fit in the cache! (max %d bytes)", valueSize, d.CacheSizeMax)) - } - - return nil -} - -// nopWriteCloser wraps an io.Writer and provides a no-op Close method to -// satisfy the io.WriteCloser interface. -type nopWriteCloser struct { - io.Writer -} - -func (wc *nopWriteCloser) Write(p []byte) (int, error) { return wc.Writer.Write(p) } -func (wc *nopWriteCloser) Close() error { return nil } diff --git a/src/vendor/github.com/peterbourgon/diskv/index.go b/src/vendor/github.com/peterbourgon/diskv/index.go deleted file mode 100644 index 96fee5152..000000000 --- a/src/vendor/github.com/peterbourgon/diskv/index.go +++ /dev/null @@ -1,115 +0,0 @@ -package diskv - -import ( - "sync" - - "github.com/google/btree" -) - -// Index is a generic interface for things that can -// provide an ordered list of keys. -type Index interface { - Initialize(less LessFunction, keys <-chan string) - Insert(key string) - Delete(key string) - Keys(from string, n int) []string -} - -// LessFunction is used to initialize an Index of keys in a specific order. -type LessFunction func(string, string) bool - -// btreeString is a custom data type that satisfies the BTree Less interface, -// making the strings it wraps sortable by the BTree package. -type btreeString struct { - s string - l LessFunction -} - -// Less satisfies the BTree.Less interface using the btreeString's LessFunction. -func (s btreeString) Less(i btree.Item) bool { - return s.l(s.s, i.(btreeString).s) -} - -// BTreeIndex is an implementation of the Index interface using google/btree. -type BTreeIndex struct { - sync.RWMutex - LessFunction - *btree.BTree -} - -// Initialize populates the BTree tree with data from the keys channel, -// according to the passed less function. It's destructive to the BTreeIndex. -func (i *BTreeIndex) Initialize(less LessFunction, keys <-chan string) { - i.Lock() - defer i.Unlock() - i.LessFunction = less - i.BTree = rebuild(less, keys) -} - -// Insert inserts the given key (only) into the BTree tree. -func (i *BTreeIndex) Insert(key string) { - i.Lock() - defer i.Unlock() - if i.BTree == nil || i.LessFunction == nil { - panic("uninitialized index") - } - i.BTree.ReplaceOrInsert(btreeString{s: key, l: i.LessFunction}) -} - -// Delete removes the given key (only) from the BTree tree. -func (i *BTreeIndex) Delete(key string) { - i.Lock() - defer i.Unlock() - if i.BTree == nil || i.LessFunction == nil { - panic("uninitialized index") - } - i.BTree.Delete(btreeString{s: key, l: i.LessFunction}) -} - -// Keys yields a maximum of n keys in order. If the passed 'from' key is empty, -// Keys will return the first n keys. If the passed 'from' key is non-empty, the -// first key in the returned slice will be the key that immediately follows the -// passed key, in key order. -func (i *BTreeIndex) Keys(from string, n int) []string { - i.RLock() - defer i.RUnlock() - - if i.BTree == nil || i.LessFunction == nil { - panic("uninitialized index") - } - - if i.BTree.Len() <= 0 { - return []string{} - } - - btreeFrom := btreeString{s: from, l: i.LessFunction} - skipFirst := true - if len(from) <= 0 || !i.BTree.Has(btreeFrom) { - // no such key, so fabricate an always-smallest item - btreeFrom = btreeString{s: "", l: func(string, string) bool { return true }} - skipFirst = false - } - - keys := []string{} - iterator := func(i btree.Item) bool { - keys = append(keys, i.(btreeString).s) - return len(keys) < n - } - i.BTree.AscendGreaterOrEqual(btreeFrom, iterator) - - if skipFirst && len(keys) > 0 { - keys = keys[1:] - } - - return keys -} - -// rebuildIndex does the work of regenerating the index -// with the given keys. -func rebuild(less LessFunction, keys <-chan string) *btree.BTree { - tree := btree.New(2) - for key := range keys { - tree.ReplaceOrInsert(btreeString{s: key, l: less}) - } - return tree -} diff --git a/src/vendor/github.com/spf13/cobra/.gitignore b/src/vendor/github.com/spf13/cobra/.gitignore deleted file mode 100644 index c7b459e4d..000000000 --- a/src/vendor/github.com/spf13/cobra/.gitignore +++ /dev/null @@ -1,39 +0,0 @@ -# Compiled Object files, Static and Dynamic libs (Shared Objects) -*.o -*.a -*.so - -# Folders -_obj -_test - -# Architecture specific extensions/prefixes -*.[568vq] -[568vq].out - -*.cgo1.go -*.cgo2.c -_cgo_defun.c -_cgo_gotypes.go -_cgo_export.* - -_testmain.go - -# Vim files https://github.com/github/gitignore/blob/master/Global/Vim.gitignore -# swap -[._]*.s[a-w][a-z] -[._]s[a-w][a-z] -# session -Session.vim -# temporary -.netrwhist -*~ -# auto-generated tag files -tags - -*.exe -cobra.test -bin - -.idea/ -*.iml diff --git a/src/vendor/github.com/spf13/cobra/.golangci.yml b/src/vendor/github.com/spf13/cobra/.golangci.yml deleted file mode 100644 index 0d6e61793..000000000 --- a/src/vendor/github.com/spf13/cobra/.golangci.yml +++ /dev/null @@ -1,48 +0,0 @@ -run: - deadline: 5m - -linters: - disable-all: true - enable: - #- bodyclose - - deadcode - #- depguard - #- dogsled - #- dupl - - errcheck - #- exhaustive - #- funlen - - gas - #- gochecknoinits - - goconst - #- gocritic - #- gocyclo - #- gofmt - - goimports - - golint - #- gomnd - #- goprintffuncname - #- gosec - #- gosimple - - govet - - ineffassign - - interfacer - #- lll - - maligned - - megacheck - #- misspell - #- nakedret - #- noctx - #- nolintlint - #- rowserrcheck - #- scopelint - #- staticcheck - - structcheck - #- stylecheck - #- typecheck - - unconvert - #- unparam - #- unused - - varcheck - #- whitespace - fast: false diff --git a/src/vendor/github.com/spf13/cobra/.mailmap b/src/vendor/github.com/spf13/cobra/.mailmap deleted file mode 100644 index 94ec53068..000000000 --- a/src/vendor/github.com/spf13/cobra/.mailmap +++ /dev/null @@ -1,3 +0,0 @@ -Steve Francia -Bjørn Erik Pedersen -Fabiano Franz diff --git a/src/vendor/github.com/spf13/cobra/CONDUCT.md b/src/vendor/github.com/spf13/cobra/CONDUCT.md deleted file mode 100644 index 9d16f88fd..000000000 --- a/src/vendor/github.com/spf13/cobra/CONDUCT.md +++ /dev/null @@ -1,37 +0,0 @@ -## Cobra User Contract - -### Versioning -Cobra will follow a steady release cadence. Non breaking changes will be released as minor versions quarterly. Patch bug releases are at the discretion of the maintainers. Users can expect security patch fixes to be released within relatively short order of a CVE becoming known. For more information on security patch fixes see the CVE section below. Releases will follow [Semantic Versioning](https://semver.org/). Users tracking the Master branch should expect unpredictable breaking changes as the project continues to move forward. For stability, it is highly recommended to use a release. - -### Backward Compatibility -We will maintain two major releases in a moving window. The N-1 release will only receive bug fixes and security updates and will be dropped once N+1 is released. - -### Deprecation -Deprecation of Go versions or dependent packages will only occur in major releases. To reduce the change of this taking users by surprise, any large deprecation will be preceded by an announcement in the [#cobra slack channel](https://gophers.slack.com/archives/CD3LP1199) and an Issue on Github. - -### CVE -Maintainers will make every effort to release security patches in the case of a medium to high severity CVE directly impacting the library. The speed in which these patches reach a release is up to the discretion of the maintainers. A low severity CVE may be a lower priority than a high severity one. - -### Communication -Cobra maintainers will use GitHub issues and the [#cobra slack channel](https://gophers.slack.com/archives/CD3LP1199) as the primary means of communication with the community. This is to foster open communication with all users and contributors. - -### Breaking Changes -Breaking changes are generally allowed in the master branch, as this is the branch used to develop the next release of Cobra. - -There may be times, however, when master is closed for breaking changes. This is likely to happen as we near the release of a new version. - -Breaking changes are not allowed in release branches, as these represent minor versions that have already been released. These version have consumers who expect the APIs, behaviors, etc, to remain stable during the lifetime of the patch stream for the minor release. - -Examples of breaking changes include: -- Removing or renaming exported constant, variable, type, or function. -- Updating the version of critical libraries such as `spf13/pflag`, `spf13/viper` etc... - - Some version updates may be acceptable for picking up bug fixes, but maintainers must exercise caution when reviewing. - -There may, at times, need to be exceptions where breaking changes are allowed in release branches. These are at the discretion of the project's maintainers, and must be carefully considered before merging. - -### CI Testing -Maintainers will ensure the Cobra test suite utilizes the current supported versions of Golang. - -### Disclaimer -Changes to this document and the contents therein are at the discretion of the maintainers. -None of the contents of this document are legally binding in any way to the maintainers or the users. diff --git a/src/vendor/github.com/spf13/cobra/CONTRIBUTING.md b/src/vendor/github.com/spf13/cobra/CONTRIBUTING.md deleted file mode 100644 index 6f356e6a8..000000000 --- a/src/vendor/github.com/spf13/cobra/CONTRIBUTING.md +++ /dev/null @@ -1,50 +0,0 @@ -# Contributing to Cobra - -Thank you so much for contributing to Cobra. We appreciate your time and help. -Here are some guidelines to help you get started. - -## Code of Conduct - -Be kind and respectful to the members of the community. Take time to educate -others who are seeking help. Harassment of any kind will not be tolerated. - -## Questions - -If you have questions regarding Cobra, feel free to ask it in the community -[#cobra Slack channel][cobra-slack] - -## Filing a bug or feature - -1. Before filing an issue, please check the existing issues to see if a - similar one was already opened. If there is one already opened, feel free - to comment on it. -1. If you believe you've found a bug, please provide detailed steps of - reproduction, the version of Cobra and anything else you believe will be - useful to help troubleshoot it (e.g. OS environment, environment variables, - etc...). Also state the current behavior vs. the expected behavior. -1. If you'd like to see a feature or an enhancement please open an issue with - a clear title and description of what the feature is and why it would be - beneficial to the project and its users. - -## Submitting changes - -1. CLA: Upon submitting a Pull Request (PR), contributors will be prompted to - sign a CLA. Please sign the CLA :slightly_smiling_face: -1. Tests: If you are submitting code, please ensure you have adequate tests - for the feature. Tests can be run via `go test ./...` or `make test`. -1. Since this is golang project, ensure the new code is properly formatted to - ensure code consistency. Run `make all`. - -### Quick steps to contribute - -1. Fork the project. -1. Download your fork to your PC (`git clone https://github.com/your_username/cobra && cd cobra`) -1. Create your feature branch (`git checkout -b my-new-feature`) -1. Make changes and run tests (`make test`) -1. Add them to staging (`git add .`) -1. Commit your changes (`git commit -m 'Add some feature'`) -1. Push to the branch (`git push origin my-new-feature`) -1. Create new pull request - - -[cobra-slack]: https://gophers.slack.com/archives/CD3LP1199 diff --git a/src/vendor/github.com/spf13/cobra/LICENSE.txt b/src/vendor/github.com/spf13/cobra/LICENSE.txt deleted file mode 100644 index 298f0e266..000000000 --- a/src/vendor/github.com/spf13/cobra/LICENSE.txt +++ /dev/null @@ -1,174 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. diff --git a/src/vendor/github.com/spf13/cobra/MAINTAINERS b/src/vendor/github.com/spf13/cobra/MAINTAINERS deleted file mode 100644 index 4c5ac3dd9..000000000 --- a/src/vendor/github.com/spf13/cobra/MAINTAINERS +++ /dev/null @@ -1,13 +0,0 @@ -maintainers: -- spf13 -- johnSchnake -- jpmcb -- marckhouzam -inactive: -- anthonyfok -- bep -- bogem -- broady -- eparis -- jharshman -- wfernandes diff --git a/src/vendor/github.com/spf13/cobra/Makefile b/src/vendor/github.com/spf13/cobra/Makefile deleted file mode 100644 index 443ef1a98..000000000 --- a/src/vendor/github.com/spf13/cobra/Makefile +++ /dev/null @@ -1,35 +0,0 @@ -BIN="./bin" -SRC=$(shell find . -name "*.go") - -ifeq (, $(shell which golangci-lint)) -$(warning "could not find golangci-lint in $(PATH), run: curl -sfL https://install.goreleaser.com/github.com/golangci/golangci-lint.sh | sh") -endif - -ifeq (, $(shell which richgo)) -$(warning "could not find richgo in $(PATH), run: go get github.com/kyoh86/richgo") -endif - -.PHONY: fmt lint test install_deps clean - -default: all - -all: fmt test - -fmt: - $(info ******************** checking formatting ********************) - @test -z $(shell gofmt -l $(SRC)) || (gofmt -d $(SRC); exit 1) - -lint: - $(info ******************** running lint tools ********************) - golangci-lint run -v - -test: install_deps - $(info ******************** running tests ********************) - richgo test -v ./... - -install_deps: - $(info ******************** downloading dependencies ********************) - go get -v ./... - -clean: - rm -rf $(BIN) diff --git a/src/vendor/github.com/spf13/cobra/README.md b/src/vendor/github.com/spf13/cobra/README.md deleted file mode 100644 index 2bf152082..000000000 --- a/src/vendor/github.com/spf13/cobra/README.md +++ /dev/null @@ -1,111 +0,0 @@ -![cobra logo](https://cloud.githubusercontent.com/assets/173412/10886352/ad566232-814f-11e5-9cd0-aa101788c117.png) - -Cobra is a library for creating powerful modern CLI applications. - -Cobra is used in many Go projects such as [Kubernetes](https://kubernetes.io/), -[Hugo](https://gohugo.io), and [Github CLI](https://github.com/cli/cli) to -name a few. [This list](./projects_using_cobra.md) contains a more extensive list of projects using Cobra. - -[![](https://img.shields.io/github/workflow/status/spf13/cobra/Test?longCache=tru&label=Test&logo=github%20actions&logoColor=fff)](https://github.com/spf13/cobra/actions?query=workflow%3ATest) -[![Go Reference](https://pkg.go.dev/badge/github.com/spf13/cobra.svg)](https://pkg.go.dev/github.com/spf13/cobra) -[![Go Report Card](https://goreportcard.com/badge/github.com/spf13/cobra)](https://goreportcard.com/report/github.com/spf13/cobra) -[![Slack](https://img.shields.io/badge/Slack-cobra-brightgreen)](https://gophers.slack.com/archives/CD3LP1199) - -# Overview - -Cobra is a library providing a simple interface to create powerful modern CLI -interfaces similar to git & go tools. - -Cobra provides: -* Easy subcommand-based CLIs: `app server`, `app fetch`, etc. -* Fully POSIX-compliant flags (including short & long versions) -* Nested subcommands -* Global, local and cascading flags -* Intelligent suggestions (`app srver`... did you mean `app server`?) -* Automatic help generation for commands and flags -* Automatic help flag recognition of `-h`, `--help`, etc. -* Automatically generated shell autocomplete for your application (bash, zsh, fish, powershell) -* Automatically generated man pages for your application -* Command aliases so you can change things without breaking them -* The flexibility to define your own help, usage, etc. -* Optional seamless integration with [viper](https://github.com/spf13/viper) for 12-factor apps - -# Concepts - -Cobra is built on a structure of commands, arguments & flags. - -**Commands** represent actions, **Args** are things and **Flags** are modifiers for those actions. - -The best applications read like sentences when used, and as a result, users -intuitively know how to interact with them. - -The pattern to follow is -`APPNAME VERB NOUN --ADJECTIVE.` - or -`APPNAME COMMAND ARG --FLAG` - -A few good real world examples may better illustrate this point. - -In the following example, 'server' is a command, and 'port' is a flag: - - hugo server --port=1313 - -In this command we are telling Git to clone the url bare. - - git clone URL --bare - -## Commands - -Command is the central point of the application. Each interaction that -the application supports will be contained in a Command. A command can -have children commands and optionally run an action. - -In the example above, 'server' is the command. - -[More about cobra.Command](https://pkg.go.dev/github.com/spf13/cobra#Command) - -## Flags - -A flag is a way to modify the behavior of a command. Cobra supports -fully POSIX-compliant flags as well as the Go [flag package](https://golang.org/pkg/flag/). -A Cobra command can define flags that persist through to children commands -and flags that are only available to that command. - -In the example above, 'port' is the flag. - -Flag functionality is provided by the [pflag -library](https://github.com/spf13/pflag), a fork of the flag standard library -which maintains the same interface while adding POSIX compliance. - -# Installing -Using Cobra is easy. First, use `go get` to install the latest version -of the library. - -``` -go get -u github.com/spf13/cobra@latest -``` - -Next, include Cobra in your application: - -```go -import "github.com/spf13/cobra" -``` - -# Usage -`cobra-cli` is a command line program to generate cobra applications and command files. -It will bootstrap your application scaffolding to rapidly -develop a Cobra-based application. It is the easiest way to incorporate Cobra into your application. - -It can be installed by running: - -``` -go install github.com/spf13/cobra-cli@latest -``` - -For complete details on using the Cobra-CLI generator, please read [The Cobra Generator README](https://github.com/spf13/cobra-cli/blob/main/README.md) - -For complete details on using the Cobra library, please read the [The Cobra User Guide](user_guide.md). - -# License - -Cobra is released under the Apache 2.0 license. See [LICENSE.txt](https://github.com/spf13/cobra/blob/master/LICENSE.txt) diff --git a/src/vendor/github.com/spf13/cobra/active_help.go b/src/vendor/github.com/spf13/cobra/active_help.go deleted file mode 100644 index 0c631913d..000000000 --- a/src/vendor/github.com/spf13/cobra/active_help.go +++ /dev/null @@ -1,49 +0,0 @@ -package cobra - -import ( - "fmt" - "os" - "strings" -) - -const ( - activeHelpMarker = "_activeHelp_ " - // The below values should not be changed: programs will be using them explicitly - // in their user documentation, and users will be using them explicitly. - activeHelpEnvVarSuffix = "_ACTIVE_HELP" - activeHelpGlobalEnvVar = "COBRA_ACTIVE_HELP" - activeHelpGlobalDisable = "0" -) - -// AppendActiveHelp adds the specified string to the specified array to be used as ActiveHelp. -// Such strings will be processed by the completion script and will be shown as ActiveHelp -// to the user. -// The array parameter should be the array that will contain the completions. -// This function can be called multiple times before and/or after completions are added to -// the array. Each time this function is called with the same array, the new -// ActiveHelp line will be shown below the previous ones when completion is triggered. -func AppendActiveHelp(compArray []string, activeHelpStr string) []string { - return append(compArray, fmt.Sprintf("%s%s", activeHelpMarker, activeHelpStr)) -} - -// GetActiveHelpConfig returns the value of the ActiveHelp environment variable -// _ACTIVE_HELP where is the name of the root command in upper -// case, with all - replaced by _. -// It will always return "0" if the global environment variable COBRA_ACTIVE_HELP -// is set to "0". -func GetActiveHelpConfig(cmd *Command) string { - activeHelpCfg := os.Getenv(activeHelpGlobalEnvVar) - if activeHelpCfg != activeHelpGlobalDisable { - activeHelpCfg = os.Getenv(activeHelpEnvVar(cmd.Root().Name())) - } - return activeHelpCfg -} - -// activeHelpEnvVar returns the name of the program-specific ActiveHelp environment -// variable. It has the format _ACTIVE_HELP where is the name of the -// root command in upper case, with all - replaced by _. -func activeHelpEnvVar(name string) string { - // This format should not be changed: users will be using it explicitly. - activeHelpEnvVar := strings.ToUpper(fmt.Sprintf("%s%s", name, activeHelpEnvVarSuffix)) - return strings.ReplaceAll(activeHelpEnvVar, "-", "_") -} diff --git a/src/vendor/github.com/spf13/cobra/active_help.md b/src/vendor/github.com/spf13/cobra/active_help.md deleted file mode 100644 index 5e7f59af3..000000000 --- a/src/vendor/github.com/spf13/cobra/active_help.md +++ /dev/null @@ -1,157 +0,0 @@ -# Active Help - -Active Help is a framework provided by Cobra which allows a program to define messages (hints, warnings, etc) that will be printed during program usage. It aims to make it easier for your users to learn how to use your program. If configured by the program, Active Help is printed when the user triggers shell completion. - -For example, -``` -bash-5.1$ helm repo add [tab] -You must choose a name for the repo you are adding. - -bash-5.1$ bin/helm package [tab] -Please specify the path to the chart to package - -bash-5.1$ bin/helm package [tab][tab] -bin/ internal/ scripts/ pkg/ testdata/ -``` - -**Hint**: A good place to use Active Help messages is when the normal completion system does not provide any suggestions. In such cases, Active Help nicely supplements the normal shell completions to guide the user in knowing what is expected by the program. -## Supported shells - -Active Help is currently only supported for the following shells: -- Bash (using [bash completion V2](shell_completions.md#bash-completion-v2) only). Note that bash 4.4 or higher is required for the prompt to appear when an Active Help message is printed. -- Zsh - -## Adding Active Help messages - -As Active Help uses the shell completion system, the implementation of Active Help messages is done by enhancing custom dynamic completions. If you are not familiar with dynamic completions, please refer to [Shell Completions](shell_completions.md). - -Adding Active Help is done through the use of the `cobra.AppendActiveHelp(...)` function, where the program repeatedly adds Active Help messages to the list of completions. Keep reading for details. - -### Active Help for nouns - -Adding Active Help when completing a noun is done within the `ValidArgsFunction(...)` of a command. Please notice the use of `cobra.AppendActiveHelp(...)` in the following example: - -```go -cmd := &cobra.Command{ - Use: "add [NAME] [URL]", - Short: "add a chart repository", - Args: require.ExactArgs(2), - RunE: func(cmd *cobra.Command, args []string) error { - return addRepo(args) - }, - ValidArgsFunction: func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { - var comps []string - if len(args) == 0 { - comps = cobra.AppendActiveHelp(comps, "You must choose a name for the repo you are adding") - } else if len(args) == 1 { - comps = cobra.AppendActiveHelp(comps, "You must specify the URL for the repo you are adding") - } else { - comps = cobra.AppendActiveHelp(comps, "This command does not take any more arguments") - } - return comps, cobra.ShellCompDirectiveNoFileComp - }, -} -``` -The example above defines the completions (none, in this specific example) as well as the Active Help messages for the `helm repo add` command. It yields the following behavior: -``` -bash-5.1$ helm repo add [tab] -You must choose a name for the repo you are adding - -bash-5.1$ helm repo add grafana [tab] -You must specify the URL for the repo you are adding - -bash-5.1$ helm repo add grafana https://grafana.github.io/helm-charts [tab] -This command does not take any more arguments -``` -**Hint**: As can be seen in the above example, a good place to use Active Help messages is when the normal completion system does not provide any suggestions. In such cases, Active Help nicely supplements the normal shell completions. - -### Active Help for flags - -Providing Active Help for flags is done in the same fashion as for nouns, but using the completion function registered for the flag. For example: -```go -_ = cmd.RegisterFlagCompletionFunc("version", func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { - if len(args) != 2 { - return cobra.AppendActiveHelp(nil, "You must first specify the chart to install before the --version flag can be completed"), cobra.ShellCompDirectiveNoFileComp - } - return compVersionFlag(args[1], toComplete) - }) -``` -The example above prints an Active Help message when not enough information was given by the user to complete the `--version` flag. -``` -bash-5.1$ bin/helm install myrelease --version 2.0.[tab] -You must first specify the chart to install before the --version flag can be completed - -bash-5.1$ bin/helm install myrelease bitnami/solr --version 2.0.[tab][tab] -2.0.1 2.0.2 2.0.3 -``` - -## User control of Active Help - -You may want to allow your users to disable Active Help or choose between different levels of Active Help. It is entirely up to the program to define the type of configurability of Active Help that it wants to offer, if any. -Allowing to configure Active Help is entirely optional; you can use Active Help in your program without doing anything about Active Help configuration. - -The way to configure Active Help is to use the program's Active Help environment -variable. That variable is named `_ACTIVE_HELP` where `` is the name of your -program in uppercase with any `-` replaced by an `_`. The variable should be set by the user to whatever -Active Help configuration values are supported by the program. - -For example, say `helm` has chosen to support three levels for Active Help: `on`, `off`, `local`. Then a user -would set the desired behavior to `local` by doing `export HELM_ACTIVE_HELP=local` in their shell. - -For simplicity, when in `cmd.ValidArgsFunction(...)` or a flag's completion function, the program should read the -Active Help configuration using the `cobra.GetActiveHelpConfig(cmd)` function and select what Active Help messages -should or should not be added (instead of reading the environment variable directly). - -For example: -```go -ValidArgsFunction: func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { - activeHelpLevel := cobra.GetActiveHelpConfig(cmd) - - var comps []string - if len(args) == 0 { - if activeHelpLevel != "off" { - comps = cobra.AppendActiveHelp(comps, "You must choose a name for the repo you are adding") - } - } else if len(args) == 1 { - if activeHelpLevel != "off" { - comps = cobra.AppendActiveHelp(comps, "You must specify the URL for the repo you are adding") - } - } else { - if activeHelpLevel == "local" { - comps = cobra.AppendActiveHelp(comps, "This command does not take any more arguments") - } - } - return comps, cobra.ShellCompDirectiveNoFileComp -}, -``` -**Note 1**: If the `_ACTIVE_HELP` environment variable is set to the string "0", Cobra will automatically disable all Active Help output (even if some output was specified by the program using the `cobra.AppendActiveHelp(...)` function). Using "0" can simplify your code in situations where you want to blindly disable Active Help without having to call `cobra.GetActiveHelpConfig(cmd)` explicitly. - -**Note 2**: If a user wants to disable Active Help for every single program based on Cobra, she can set the environment variable `COBRA_ACTIVE_HELP` to "0". In this case `cobra.GetActiveHelpConfig(cmd)` will return "0" no matter what the variable `_ACTIVE_HELP` is set to. - -**Note 3**: If the user does not set `_ACTIVE_HELP` or `COBRA_ACTIVE_HELP` (which will be a common case), the default value for the Active Help configuration returned by `cobra.GetActiveHelpConfig(cmd)` will be the empty string. -## Active Help with Cobra's default completion command - -Cobra provides a default `completion` command for programs that wish to use it. -When using the default `completion` command, Active Help is configurable in the same -fashion as described above using environment variables. You may wish to document this in more -details for your users. - -## Debugging Active Help - -Debugging your Active Help code is done in the same way as debugging your dynamic completion code, which is with Cobra's hidden `__complete` command. Please refer to [debugging shell completion](shell_completions.md#debugging) for details. - -When debugging with the `__complete` command, if you want to specify different Active Help configurations, you should use the active help environment variable. That variable is named `_ACTIVE_HELP` where any `-` is replaced by an `_`. For example, we can test deactivating some Active Help as shown below: -``` -$ HELM_ACTIVE_HELP=1 bin/helm __complete install wordpress bitnami/h -bitnami/haproxy -bitnami/harbor -_activeHelp_ WARNING: cannot re-use a name that is still in use -:0 -Completion ended with directive: ShellCompDirectiveDefault - -$ HELM_ACTIVE_HELP=0 bin/helm __complete install wordpress bitnami/h -bitnami/haproxy -bitnami/harbor -:0 -Completion ended with directive: ShellCompDirectiveDefault -``` diff --git a/src/vendor/github.com/spf13/cobra/args.go b/src/vendor/github.com/spf13/cobra/args.go deleted file mode 100644 index 20a022b30..000000000 --- a/src/vendor/github.com/spf13/cobra/args.go +++ /dev/null @@ -1,121 +0,0 @@ -package cobra - -import ( - "fmt" - "strings" -) - -type PositionalArgs func(cmd *Command, args []string) error - -// Legacy arg validation has the following behaviour: -// - root commands with no subcommands can take arbitrary arguments -// - root commands with subcommands will do subcommand validity checking -// - subcommands will always accept arbitrary arguments -func legacyArgs(cmd *Command, args []string) error { - // no subcommand, always take args - if !cmd.HasSubCommands() { - return nil - } - - // root command with subcommands, do subcommand checking. - if !cmd.HasParent() && len(args) > 0 { - return fmt.Errorf("unknown command %q for %q%s", args[0], cmd.CommandPath(), cmd.findSuggestions(args[0])) - } - return nil -} - -// NoArgs returns an error if any args are included. -func NoArgs(cmd *Command, args []string) error { - if len(args) > 0 { - return fmt.Errorf("unknown command %q for %q", args[0], cmd.CommandPath()) - } - return nil -} - -// OnlyValidArgs returns an error if any args are not in the list of ValidArgs. -func OnlyValidArgs(cmd *Command, args []string) error { - if len(cmd.ValidArgs) > 0 { - // Remove any description that may be included in ValidArgs. - // A description is following a tab character. - var validArgs []string - for _, v := range cmd.ValidArgs { - validArgs = append(validArgs, strings.Split(v, "\t")[0]) - } - - for _, v := range args { - if !stringInSlice(v, validArgs) { - return fmt.Errorf("invalid argument %q for %q%s", v, cmd.CommandPath(), cmd.findSuggestions(args[0])) - } - } - } - return nil -} - -// ArbitraryArgs never returns an error. -func ArbitraryArgs(cmd *Command, args []string) error { - return nil -} - -// MinimumNArgs returns an error if there is not at least N args. -func MinimumNArgs(n int) PositionalArgs { - return func(cmd *Command, args []string) error { - if len(args) < n { - return fmt.Errorf("requires at least %d arg(s), only received %d", n, len(args)) - } - return nil - } -} - -// MaximumNArgs returns an error if there are more than N args. -func MaximumNArgs(n int) PositionalArgs { - return func(cmd *Command, args []string) error { - if len(args) > n { - return fmt.Errorf("accepts at most %d arg(s), received %d", n, len(args)) - } - return nil - } -} - -// ExactArgs returns an error if there are not exactly n args. -func ExactArgs(n int) PositionalArgs { - return func(cmd *Command, args []string) error { - if len(args) != n { - return fmt.Errorf("accepts %d arg(s), received %d", n, len(args)) - } - return nil - } -} - -// ExactValidArgs returns an error if -// there are not exactly N positional args OR -// there are any positional args that are not in the `ValidArgs` field of `Command` -func ExactValidArgs(n int) PositionalArgs { - return func(cmd *Command, args []string) error { - if err := ExactArgs(n)(cmd, args); err != nil { - return err - } - return OnlyValidArgs(cmd, args) - } -} - -// RangeArgs returns an error if the number of args is not within the expected range. -func RangeArgs(min int, max int) PositionalArgs { - return func(cmd *Command, args []string) error { - if len(args) < min || len(args) > max { - return fmt.Errorf("accepts between %d and %d arg(s), received %d", min, max, len(args)) - } - return nil - } -} - -// MatchAll allows combining several PositionalArgs to work in concert. -func MatchAll(pargs ...PositionalArgs) PositionalArgs { - return func(cmd *Command, args []string) error { - for _, parg := range pargs { - if err := parg(cmd, args); err != nil { - return err - } - } - return nil - } -} diff --git a/src/vendor/github.com/spf13/cobra/bash_completions.go b/src/vendor/github.com/spf13/cobra/bash_completions.go deleted file mode 100644 index cb7e19537..000000000 --- a/src/vendor/github.com/spf13/cobra/bash_completions.go +++ /dev/null @@ -1,698 +0,0 @@ -package cobra - -import ( - "bytes" - "fmt" - "io" - "os" - "sort" - "strings" - - "github.com/spf13/pflag" -) - -// Annotations for Bash completion. -const ( - BashCompFilenameExt = "cobra_annotation_bash_completion_filename_extensions" - BashCompCustom = "cobra_annotation_bash_completion_custom" - BashCompOneRequiredFlag = "cobra_annotation_bash_completion_one_required_flag" - BashCompSubdirsInDir = "cobra_annotation_bash_completion_subdirs_in_dir" -) - -func writePreamble(buf io.StringWriter, name string) { - WriteStringAndCheck(buf, fmt.Sprintf("# bash completion for %-36s -*- shell-script -*-\n", name)) - WriteStringAndCheck(buf, fmt.Sprintf(` -__%[1]s_debug() -{ - if [[ -n ${BASH_COMP_DEBUG_FILE:-} ]]; then - echo "$*" >> "${BASH_COMP_DEBUG_FILE}" - fi -} - -# Homebrew on Macs have version 1.3 of bash-completion which doesn't include -# _init_completion. This is a very minimal version of that function. -__%[1]s_init_completion() -{ - COMPREPLY=() - _get_comp_words_by_ref "$@" cur prev words cword -} - -__%[1]s_index_of_word() -{ - local w word=$1 - shift - index=0 - for w in "$@"; do - [[ $w = "$word" ]] && return - index=$((index+1)) - done - index=-1 -} - -__%[1]s_contains_word() -{ - local w word=$1; shift - for w in "$@"; do - [[ $w = "$word" ]] && return - done - return 1 -} - -__%[1]s_handle_go_custom_completion() -{ - __%[1]s_debug "${FUNCNAME[0]}: cur is ${cur}, words[*] is ${words[*]}, #words[@] is ${#words[@]}" - - local shellCompDirectiveError=%[3]d - local shellCompDirectiveNoSpace=%[4]d - local shellCompDirectiveNoFileComp=%[5]d - local shellCompDirectiveFilterFileExt=%[6]d - local shellCompDirectiveFilterDirs=%[7]d - - local out requestComp lastParam lastChar comp directive args - - # Prepare the command to request completions for the program. - # Calling ${words[0]} instead of directly %[1]s allows to handle aliases - args=("${words[@]:1}") - # Disable ActiveHelp which is not supported for bash completion v1 - requestComp="%[8]s=0 ${words[0]} %[2]s ${args[*]}" - - lastParam=${words[$((${#words[@]}-1))]} - lastChar=${lastParam:$((${#lastParam}-1)):1} - __%[1]s_debug "${FUNCNAME[0]}: lastParam ${lastParam}, lastChar ${lastChar}" - - if [ -z "${cur}" ] && [ "${lastChar}" != "=" ]; then - # If the last parameter is complete (there is a space following it) - # We add an extra empty parameter so we can indicate this to the go method. - __%[1]s_debug "${FUNCNAME[0]}: Adding extra empty parameter" - requestComp="${requestComp} \"\"" - fi - - __%[1]s_debug "${FUNCNAME[0]}: calling ${requestComp}" - # Use eval to handle any environment variables and such - out=$(eval "${requestComp}" 2>/dev/null) - - # Extract the directive integer at the very end of the output following a colon (:) - directive=${out##*:} - # Remove the directive - out=${out%%:*} - if [ "${directive}" = "${out}" ]; then - # There is not directive specified - directive=0 - fi - __%[1]s_debug "${FUNCNAME[0]}: the completion directive is: ${directive}" - __%[1]s_debug "${FUNCNAME[0]}: the completions are: ${out}" - - if [ $((directive & shellCompDirectiveError)) -ne 0 ]; then - # Error code. No completion. - __%[1]s_debug "${FUNCNAME[0]}: received error from custom completion go code" - return - else - if [ $((directive & shellCompDirectiveNoSpace)) -ne 0 ]; then - if [[ $(type -t compopt) = "builtin" ]]; then - __%[1]s_debug "${FUNCNAME[0]}: activating no space" - compopt -o nospace - fi - fi - if [ $((directive & shellCompDirectiveNoFileComp)) -ne 0 ]; then - if [[ $(type -t compopt) = "builtin" ]]; then - __%[1]s_debug "${FUNCNAME[0]}: activating no file completion" - compopt +o default - fi - fi - fi - - if [ $((directive & shellCompDirectiveFilterFileExt)) -ne 0 ]; then - # File extension filtering - local fullFilter filter filteringCmd - # Do not use quotes around the $out variable or else newline - # characters will be kept. - for filter in ${out}; do - fullFilter+="$filter|" - done - - filteringCmd="_filedir $fullFilter" - __%[1]s_debug "File filtering command: $filteringCmd" - $filteringCmd - elif [ $((directive & shellCompDirectiveFilterDirs)) -ne 0 ]; then - # File completion for directories only - local subdir - # Use printf to strip any trailing newline - subdir=$(printf "%%s" "${out}") - if [ -n "$subdir" ]; then - __%[1]s_debug "Listing directories in $subdir" - __%[1]s_handle_subdirs_in_dir_flag "$subdir" - else - __%[1]s_debug "Listing directories in ." - _filedir -d - fi - else - while IFS='' read -r comp; do - COMPREPLY+=("$comp") - done < <(compgen -W "${out}" -- "$cur") - fi -} - -__%[1]s_handle_reply() -{ - __%[1]s_debug "${FUNCNAME[0]}" - local comp - case $cur in - -*) - if [[ $(type -t compopt) = "builtin" ]]; then - compopt -o nospace - fi - local allflags - if [ ${#must_have_one_flag[@]} -ne 0 ]; then - allflags=("${must_have_one_flag[@]}") - else - allflags=("${flags[*]} ${two_word_flags[*]}") - fi - while IFS='' read -r comp; do - COMPREPLY+=("$comp") - done < <(compgen -W "${allflags[*]}" -- "$cur") - if [[ $(type -t compopt) = "builtin" ]]; then - [[ "${COMPREPLY[0]}" == *= ]] || compopt +o nospace - fi - - # complete after --flag=abc - if [[ $cur == *=* ]]; then - if [[ $(type -t compopt) = "builtin" ]]; then - compopt +o nospace - fi - - local index flag - flag="${cur%%=*}" - __%[1]s_index_of_word "${flag}" "${flags_with_completion[@]}" - COMPREPLY=() - if [[ ${index} -ge 0 ]]; then - PREFIX="" - cur="${cur#*=}" - ${flags_completion[${index}]} - if [ -n "${ZSH_VERSION:-}" ]; then - # zsh completion needs --flag= prefix - eval "COMPREPLY=( \"\${COMPREPLY[@]/#/${flag}=}\" )" - fi - fi - fi - - if [[ -z "${flag_parsing_disabled}" ]]; then - # If flag parsing is enabled, we have completed the flags and can return. - # If flag parsing is disabled, we may not know all (or any) of the flags, so we fallthrough - # to possibly call handle_go_custom_completion. - return 0; - fi - ;; - esac - - # check if we are handling a flag with special work handling - local index - __%[1]s_index_of_word "${prev}" "${flags_with_completion[@]}" - if [[ ${index} -ge 0 ]]; then - ${flags_completion[${index}]} - return - fi - - # we are parsing a flag and don't have a special handler, no completion - if [[ ${cur} != "${words[cword]}" ]]; then - return - fi - - local completions - completions=("${commands[@]}") - if [[ ${#must_have_one_noun[@]} -ne 0 ]]; then - completions+=("${must_have_one_noun[@]}") - elif [[ -n "${has_completion_function}" ]]; then - # if a go completion function is provided, defer to that function - __%[1]s_handle_go_custom_completion - fi - if [[ ${#must_have_one_flag[@]} -ne 0 ]]; then - completions+=("${must_have_one_flag[@]}") - fi - while IFS='' read -r comp; do - COMPREPLY+=("$comp") - done < <(compgen -W "${completions[*]}" -- "$cur") - - if [[ ${#COMPREPLY[@]} -eq 0 && ${#noun_aliases[@]} -gt 0 && ${#must_have_one_noun[@]} -ne 0 ]]; then - while IFS='' read -r comp; do - COMPREPLY+=("$comp") - done < <(compgen -W "${noun_aliases[*]}" -- "$cur") - fi - - if [[ ${#COMPREPLY[@]} -eq 0 ]]; then - if declare -F __%[1]s_custom_func >/dev/null; then - # try command name qualified custom func - __%[1]s_custom_func - else - # otherwise fall back to unqualified for compatibility - declare -F __custom_func >/dev/null && __custom_func - fi - fi - - # available in bash-completion >= 2, not always present on macOS - if declare -F __ltrim_colon_completions >/dev/null; then - __ltrim_colon_completions "$cur" - fi - - # If there is only 1 completion and it is a flag with an = it will be completed - # but we don't want a space after the = - if [[ "${#COMPREPLY[@]}" -eq "1" ]] && [[ $(type -t compopt) = "builtin" ]] && [[ "${COMPREPLY[0]}" == --*= ]]; then - compopt -o nospace - fi -} - -# The arguments should be in the form "ext1|ext2|extn" -__%[1]s_handle_filename_extension_flag() -{ - local ext="$1" - _filedir "@(${ext})" -} - -__%[1]s_handle_subdirs_in_dir_flag() -{ - local dir="$1" - pushd "${dir}" >/dev/null 2>&1 && _filedir -d && popd >/dev/null 2>&1 || return -} - -__%[1]s_handle_flag() -{ - __%[1]s_debug "${FUNCNAME[0]}: c is $c words[c] is ${words[c]}" - - # if a command required a flag, and we found it, unset must_have_one_flag() - local flagname=${words[c]} - local flagvalue="" - # if the word contained an = - if [[ ${words[c]} == *"="* ]]; then - flagvalue=${flagname#*=} # take in as flagvalue after the = - flagname=${flagname%%=*} # strip everything after the = - flagname="${flagname}=" # but put the = back - fi - __%[1]s_debug "${FUNCNAME[0]}: looking for ${flagname}" - if __%[1]s_contains_word "${flagname}" "${must_have_one_flag[@]}"; then - must_have_one_flag=() - fi - - # if you set a flag which only applies to this command, don't show subcommands - if __%[1]s_contains_word "${flagname}" "${local_nonpersistent_flags[@]}"; then - commands=() - fi - - # keep flag value with flagname as flaghash - # flaghash variable is an associative array which is only supported in bash > 3. - if [[ -z "${BASH_VERSION:-}" || "${BASH_VERSINFO[0]:-}" -gt 3 ]]; then - if [ -n "${flagvalue}" ] ; then - flaghash[${flagname}]=${flagvalue} - elif [ -n "${words[ $((c+1)) ]}" ] ; then - flaghash[${flagname}]=${words[ $((c+1)) ]} - else - flaghash[${flagname}]="true" # pad "true" for bool flag - fi - fi - - # skip the argument to a two word flag - if [[ ${words[c]} != *"="* ]] && __%[1]s_contains_word "${words[c]}" "${two_word_flags[@]}"; then - __%[1]s_debug "${FUNCNAME[0]}: found a flag ${words[c]}, skip the next argument" - c=$((c+1)) - # if we are looking for a flags value, don't show commands - if [[ $c -eq $cword ]]; then - commands=() - fi - fi - - c=$((c+1)) - -} - -__%[1]s_handle_noun() -{ - __%[1]s_debug "${FUNCNAME[0]}: c is $c words[c] is ${words[c]}" - - if __%[1]s_contains_word "${words[c]}" "${must_have_one_noun[@]}"; then - must_have_one_noun=() - elif __%[1]s_contains_word "${words[c]}" "${noun_aliases[@]}"; then - must_have_one_noun=() - fi - - nouns+=("${words[c]}") - c=$((c+1)) -} - -__%[1]s_handle_command() -{ - __%[1]s_debug "${FUNCNAME[0]}: c is $c words[c] is ${words[c]}" - - local next_command - if [[ -n ${last_command} ]]; then - next_command="_${last_command}_${words[c]//:/__}" - else - if [[ $c -eq 0 ]]; then - next_command="_%[1]s_root_command" - else - next_command="_${words[c]//:/__}" - fi - fi - c=$((c+1)) - __%[1]s_debug "${FUNCNAME[0]}: looking for ${next_command}" - declare -F "$next_command" >/dev/null && $next_command -} - -__%[1]s_handle_word() -{ - if [[ $c -ge $cword ]]; then - __%[1]s_handle_reply - return - fi - __%[1]s_debug "${FUNCNAME[0]}: c is $c words[c] is ${words[c]}" - if [[ "${words[c]}" == -* ]]; then - __%[1]s_handle_flag - elif __%[1]s_contains_word "${words[c]}" "${commands[@]}"; then - __%[1]s_handle_command - elif [[ $c -eq 0 ]]; then - __%[1]s_handle_command - elif __%[1]s_contains_word "${words[c]}" "${command_aliases[@]}"; then - # aliashash variable is an associative array which is only supported in bash > 3. - if [[ -z "${BASH_VERSION:-}" || "${BASH_VERSINFO[0]:-}" -gt 3 ]]; then - words[c]=${aliashash[${words[c]}]} - __%[1]s_handle_command - else - __%[1]s_handle_noun - fi - else - __%[1]s_handle_noun - fi - __%[1]s_handle_word -} - -`, name, ShellCompNoDescRequestCmd, - ShellCompDirectiveError, ShellCompDirectiveNoSpace, ShellCompDirectiveNoFileComp, - ShellCompDirectiveFilterFileExt, ShellCompDirectiveFilterDirs, activeHelpEnvVar(name))) -} - -func writePostscript(buf io.StringWriter, name string) { - name = strings.ReplaceAll(name, ":", "__") - WriteStringAndCheck(buf, fmt.Sprintf("__start_%s()\n", name)) - WriteStringAndCheck(buf, fmt.Sprintf(`{ - local cur prev words cword split - declare -A flaghash 2>/dev/null || : - declare -A aliashash 2>/dev/null || : - if declare -F _init_completion >/dev/null 2>&1; then - _init_completion -s || return - else - __%[1]s_init_completion -n "=" || return - fi - - local c=0 - local flag_parsing_disabled= - local flags=() - local two_word_flags=() - local local_nonpersistent_flags=() - local flags_with_completion=() - local flags_completion=() - local commands=("%[1]s") - local command_aliases=() - local must_have_one_flag=() - local must_have_one_noun=() - local has_completion_function="" - local last_command="" - local nouns=() - local noun_aliases=() - - __%[1]s_handle_word -} - -`, name)) - WriteStringAndCheck(buf, fmt.Sprintf(`if [[ $(type -t compopt) = "builtin" ]]; then - complete -o default -F __start_%s %s -else - complete -o default -o nospace -F __start_%s %s -fi - -`, name, name, name, name)) - WriteStringAndCheck(buf, "# ex: ts=4 sw=4 et filetype=sh\n") -} - -func writeCommands(buf io.StringWriter, cmd *Command) { - WriteStringAndCheck(buf, " commands=()\n") - for _, c := range cmd.Commands() { - if !c.IsAvailableCommand() && c != cmd.helpCommand { - continue - } - WriteStringAndCheck(buf, fmt.Sprintf(" commands+=(%q)\n", c.Name())) - writeCmdAliases(buf, c) - } - WriteStringAndCheck(buf, "\n") -} - -func writeFlagHandler(buf io.StringWriter, name string, annotations map[string][]string, cmd *Command) { - for key, value := range annotations { - switch key { - case BashCompFilenameExt: - WriteStringAndCheck(buf, fmt.Sprintf(" flags_with_completion+=(%q)\n", name)) - - var ext string - if len(value) > 0 { - ext = fmt.Sprintf("__%s_handle_filename_extension_flag ", cmd.Root().Name()) + strings.Join(value, "|") - } else { - ext = "_filedir" - } - WriteStringAndCheck(buf, fmt.Sprintf(" flags_completion+=(%q)\n", ext)) - case BashCompCustom: - WriteStringAndCheck(buf, fmt.Sprintf(" flags_with_completion+=(%q)\n", name)) - - if len(value) > 0 { - handlers := strings.Join(value, "; ") - WriteStringAndCheck(buf, fmt.Sprintf(" flags_completion+=(%q)\n", handlers)) - } else { - WriteStringAndCheck(buf, " flags_completion+=(:)\n") - } - case BashCompSubdirsInDir: - WriteStringAndCheck(buf, fmt.Sprintf(" flags_with_completion+=(%q)\n", name)) - - var ext string - if len(value) == 1 { - ext = fmt.Sprintf("__%s_handle_subdirs_in_dir_flag ", cmd.Root().Name()) + value[0] - } else { - ext = "_filedir -d" - } - WriteStringAndCheck(buf, fmt.Sprintf(" flags_completion+=(%q)\n", ext)) - } - } -} - -const cbn = "\")\n" - -func writeShortFlag(buf io.StringWriter, flag *pflag.Flag, cmd *Command) { - name := flag.Shorthand - format := " " - if len(flag.NoOptDefVal) == 0 { - format += "two_word_" - } - format += "flags+=(\"-%s" + cbn - WriteStringAndCheck(buf, fmt.Sprintf(format, name)) - writeFlagHandler(buf, "-"+name, flag.Annotations, cmd) -} - -func writeFlag(buf io.StringWriter, flag *pflag.Flag, cmd *Command) { - name := flag.Name - format := " flags+=(\"--%s" - if len(flag.NoOptDefVal) == 0 { - format += "=" - } - format += cbn - WriteStringAndCheck(buf, fmt.Sprintf(format, name)) - if len(flag.NoOptDefVal) == 0 { - format = " two_word_flags+=(\"--%s" + cbn - WriteStringAndCheck(buf, fmt.Sprintf(format, name)) - } - writeFlagHandler(buf, "--"+name, flag.Annotations, cmd) -} - -func writeLocalNonPersistentFlag(buf io.StringWriter, flag *pflag.Flag) { - name := flag.Name - format := " local_nonpersistent_flags+=(\"--%[1]s" + cbn - if len(flag.NoOptDefVal) == 0 { - format += " local_nonpersistent_flags+=(\"--%[1]s=" + cbn - } - WriteStringAndCheck(buf, fmt.Sprintf(format, name)) - if len(flag.Shorthand) > 0 { - WriteStringAndCheck(buf, fmt.Sprintf(" local_nonpersistent_flags+=(\"-%s\")\n", flag.Shorthand)) - } -} - -// Setup annotations for go completions for registered flags -func prepareCustomAnnotationsForFlags(cmd *Command) { - flagCompletionMutex.RLock() - defer flagCompletionMutex.RUnlock() - for flag := range flagCompletionFunctions { - // Make sure the completion script calls the __*_go_custom_completion function for - // every registered flag. We need to do this here (and not when the flag was registered - // for completion) so that we can know the root command name for the prefix - // of ___go_custom_completion - if flag.Annotations == nil { - flag.Annotations = map[string][]string{} - } - flag.Annotations[BashCompCustom] = []string{fmt.Sprintf("__%[1]s_handle_go_custom_completion", cmd.Root().Name())} - } -} - -func writeFlags(buf io.StringWriter, cmd *Command) { - prepareCustomAnnotationsForFlags(cmd) - WriteStringAndCheck(buf, ` flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - -`) - - if cmd.DisableFlagParsing { - WriteStringAndCheck(buf, " flag_parsing_disabled=1\n") - } - - localNonPersistentFlags := cmd.LocalNonPersistentFlags() - cmd.NonInheritedFlags().VisitAll(func(flag *pflag.Flag) { - if nonCompletableFlag(flag) { - return - } - writeFlag(buf, flag, cmd) - if len(flag.Shorthand) > 0 { - writeShortFlag(buf, flag, cmd) - } - // localNonPersistentFlags are used to stop the completion of subcommands when one is set - // if TraverseChildren is true we should allow to complete subcommands - if localNonPersistentFlags.Lookup(flag.Name) != nil && !cmd.Root().TraverseChildren { - writeLocalNonPersistentFlag(buf, flag) - } - }) - cmd.InheritedFlags().VisitAll(func(flag *pflag.Flag) { - if nonCompletableFlag(flag) { - return - } - writeFlag(buf, flag, cmd) - if len(flag.Shorthand) > 0 { - writeShortFlag(buf, flag, cmd) - } - }) - - WriteStringAndCheck(buf, "\n") -} - -func writeRequiredFlag(buf io.StringWriter, cmd *Command) { - WriteStringAndCheck(buf, " must_have_one_flag=()\n") - flags := cmd.NonInheritedFlags() - flags.VisitAll(func(flag *pflag.Flag) { - if nonCompletableFlag(flag) { - return - } - for key := range flag.Annotations { - switch key { - case BashCompOneRequiredFlag: - format := " must_have_one_flag+=(\"--%s" - if flag.Value.Type() != "bool" { - format += "=" - } - format += cbn - WriteStringAndCheck(buf, fmt.Sprintf(format, flag.Name)) - - if len(flag.Shorthand) > 0 { - WriteStringAndCheck(buf, fmt.Sprintf(" must_have_one_flag+=(\"-%s"+cbn, flag.Shorthand)) - } - } - } - }) -} - -func writeRequiredNouns(buf io.StringWriter, cmd *Command) { - WriteStringAndCheck(buf, " must_have_one_noun=()\n") - sort.Strings(cmd.ValidArgs) - for _, value := range cmd.ValidArgs { - // Remove any description that may be included following a tab character. - // Descriptions are not supported by bash completion. - value = strings.Split(value, "\t")[0] - WriteStringAndCheck(buf, fmt.Sprintf(" must_have_one_noun+=(%q)\n", value)) - } - if cmd.ValidArgsFunction != nil { - WriteStringAndCheck(buf, " has_completion_function=1\n") - } -} - -func writeCmdAliases(buf io.StringWriter, cmd *Command) { - if len(cmd.Aliases) == 0 { - return - } - - sort.Strings(cmd.Aliases) - - WriteStringAndCheck(buf, fmt.Sprint(` if [[ -z "${BASH_VERSION:-}" || "${BASH_VERSINFO[0]:-}" -gt 3 ]]; then`, "\n")) - for _, value := range cmd.Aliases { - WriteStringAndCheck(buf, fmt.Sprintf(" command_aliases+=(%q)\n", value)) - WriteStringAndCheck(buf, fmt.Sprintf(" aliashash[%q]=%q\n", value, cmd.Name())) - } - WriteStringAndCheck(buf, ` fi`) - WriteStringAndCheck(buf, "\n") -} -func writeArgAliases(buf io.StringWriter, cmd *Command) { - WriteStringAndCheck(buf, " noun_aliases=()\n") - sort.Strings(cmd.ArgAliases) - for _, value := range cmd.ArgAliases { - WriteStringAndCheck(buf, fmt.Sprintf(" noun_aliases+=(%q)\n", value)) - } -} - -func gen(buf io.StringWriter, cmd *Command) { - for _, c := range cmd.Commands() { - if !c.IsAvailableCommand() && c != cmd.helpCommand { - continue - } - gen(buf, c) - } - commandName := cmd.CommandPath() - commandName = strings.ReplaceAll(commandName, " ", "_") - commandName = strings.ReplaceAll(commandName, ":", "__") - - if cmd.Root() == cmd { - WriteStringAndCheck(buf, fmt.Sprintf("_%s_root_command()\n{\n", commandName)) - } else { - WriteStringAndCheck(buf, fmt.Sprintf("_%s()\n{\n", commandName)) - } - - WriteStringAndCheck(buf, fmt.Sprintf(" last_command=%q\n", commandName)) - WriteStringAndCheck(buf, "\n") - WriteStringAndCheck(buf, " command_aliases=()\n") - WriteStringAndCheck(buf, "\n") - - writeCommands(buf, cmd) - writeFlags(buf, cmd) - writeRequiredFlag(buf, cmd) - writeRequiredNouns(buf, cmd) - writeArgAliases(buf, cmd) - WriteStringAndCheck(buf, "}\n\n") -} - -// GenBashCompletion generates bash completion file and writes to the passed writer. -func (c *Command) GenBashCompletion(w io.Writer) error { - buf := new(bytes.Buffer) - writePreamble(buf, c.Name()) - if len(c.BashCompletionFunction) > 0 { - buf.WriteString(c.BashCompletionFunction + "\n") - } - gen(buf, c) - writePostscript(buf, c.Name()) - - _, err := buf.WriteTo(w) - return err -} - -func nonCompletableFlag(flag *pflag.Flag) bool { - return flag.Hidden || len(flag.Deprecated) > 0 -} - -// GenBashCompletionFile generates bash completion file. -func (c *Command) GenBashCompletionFile(filename string) error { - outFile, err := os.Create(filename) - if err != nil { - return err - } - defer outFile.Close() - - return c.GenBashCompletion(outFile) -} diff --git a/src/vendor/github.com/spf13/cobra/bash_completions.md b/src/vendor/github.com/spf13/cobra/bash_completions.md deleted file mode 100644 index 52919b2fa..000000000 --- a/src/vendor/github.com/spf13/cobra/bash_completions.md +++ /dev/null @@ -1,93 +0,0 @@ -# Generating Bash Completions For Your cobra.Command - -Please refer to [Shell Completions](shell_completions.md) for details. - -## Bash legacy dynamic completions - -For backward compatibility, Cobra still supports its legacy dynamic completion solution (described below). Unlike the `ValidArgsFunction` solution, the legacy solution will only work for Bash shell-completion and not for other shells. This legacy solution can be used along-side `ValidArgsFunction` and `RegisterFlagCompletionFunc()`, as long as both solutions are not used for the same command. This provides a path to gradually migrate from the legacy solution to the new solution. - -**Note**: Cobra's default `completion` command uses bash completion V2. If you are currently using Cobra's legacy dynamic completion solution, you should not use the default `completion` command but continue using your own. - -The legacy solution allows you to inject bash functions into the bash completion script. Those bash functions are responsible for providing the completion choices for your own completions. - -Some code that works in kubernetes: - -```bash -const ( - bash_completion_func = `__kubectl_parse_get() -{ - local kubectl_output out - if kubectl_output=$(kubectl get --no-headers "$1" 2>/dev/null); then - out=($(echo "${kubectl_output}" | awk '{print $1}')) - COMPREPLY=( $( compgen -W "${out[*]}" -- "$cur" ) ) - fi -} - -__kubectl_get_resource() -{ - if [[ ${#nouns[@]} -eq 0 ]]; then - return 1 - fi - __kubectl_parse_get ${nouns[${#nouns[@]} -1]} - if [[ $? -eq 0 ]]; then - return 0 - fi -} - -__kubectl_custom_func() { - case ${last_command} in - kubectl_get | kubectl_describe | kubectl_delete | kubectl_stop) - __kubectl_get_resource - return - ;; - *) - ;; - esac -} -`) -``` - -And then I set that in my command definition: - -```go -cmds := &cobra.Command{ - Use: "kubectl", - Short: "kubectl controls the Kubernetes cluster manager", - Long: `kubectl controls the Kubernetes cluster manager. - -Find more information at https://github.com/GoogleCloudPlatform/kubernetes.`, - Run: runHelp, - BashCompletionFunction: bash_completion_func, -} -``` - -The `BashCompletionFunction` option is really only valid/useful on the root command. Doing the above will cause `__kubectl_custom_func()` (`___custom_func()`) to be called when the built in processor was unable to find a solution. In the case of kubernetes a valid command might look something like `kubectl get pod [mypod]`. If you type `kubectl get pod [tab][tab]` the `__kubectl_customc_func()` will run because the cobra.Command only understood "kubectl" and "get." `__kubectl_custom_func()` will see that the cobra.Command is "kubectl_get" and will thus call another helper `__kubectl_get_resource()`. `__kubectl_get_resource` will look at the 'nouns' collected. In our example the only noun will be `pod`. So it will call `__kubectl_parse_get pod`. `__kubectl_parse_get` will actually call out to kubernetes and get any pods. It will then set `COMPREPLY` to valid pods! - -Similarly, for flags: - -```go - annotation := make(map[string][]string) - annotation[cobra.BashCompCustom] = []string{"__kubectl_get_namespaces"} - - flag := &pflag.Flag{ - Name: "namespace", - Usage: usage, - Annotations: annotation, - } - cmd.Flags().AddFlag(flag) -``` - -In addition add the `__kubectl_get_namespaces` implementation in the `BashCompletionFunction` -value, e.g.: - -```bash -__kubectl_get_namespaces() -{ - local template - template="{{ range .items }}{{ .metadata.name }} {{ end }}" - local kubectl_out - if kubectl_out=$(kubectl get -o template --template="${template}" namespace 2>/dev/null); then - COMPREPLY=( $( compgen -W "${kubectl_out}[*]" -- "$cur" ) ) - fi -} -``` diff --git a/src/vendor/github.com/spf13/cobra/bash_completionsV2.go b/src/vendor/github.com/spf13/cobra/bash_completionsV2.go deleted file mode 100644 index 767bf0312..000000000 --- a/src/vendor/github.com/spf13/cobra/bash_completionsV2.go +++ /dev/null @@ -1,369 +0,0 @@ -package cobra - -import ( - "bytes" - "fmt" - "io" - "os" -) - -func (c *Command) genBashCompletion(w io.Writer, includeDesc bool) error { - buf := new(bytes.Buffer) - genBashComp(buf, c.Name(), includeDesc) - _, err := buf.WriteTo(w) - return err -} - -func genBashComp(buf io.StringWriter, name string, includeDesc bool) { - compCmd := ShellCompRequestCmd - if !includeDesc { - compCmd = ShellCompNoDescRequestCmd - } - - WriteStringAndCheck(buf, fmt.Sprintf(`# bash completion V2 for %-36[1]s -*- shell-script -*- - -__%[1]s_debug() -{ - if [[ -n ${BASH_COMP_DEBUG_FILE:-} ]]; then - echo "$*" >> "${BASH_COMP_DEBUG_FILE}" - fi -} - -# Macs have bash3 for which the bash-completion package doesn't include -# _init_completion. This is a minimal version of that function. -__%[1]s_init_completion() -{ - COMPREPLY=() - _get_comp_words_by_ref "$@" cur prev words cword -} - -# This function calls the %[1]s program to obtain the completion -# results and the directive. It fills the 'out' and 'directive' vars. -__%[1]s_get_completion_results() { - local requestComp lastParam lastChar args - - # Prepare the command to request completions for the program. - # Calling ${words[0]} instead of directly %[1]s allows to handle aliases - args=("${words[@]:1}") - requestComp="${words[0]} %[2]s ${args[*]}" - - lastParam=${words[$((${#words[@]}-1))]} - lastChar=${lastParam:$((${#lastParam}-1)):1} - __%[1]s_debug "lastParam ${lastParam}, lastChar ${lastChar}" - - if [ -z "${cur}" ] && [ "${lastChar}" != "=" ]; then - # If the last parameter is complete (there is a space following it) - # We add an extra empty parameter so we can indicate this to the go method. - __%[1]s_debug "Adding extra empty parameter" - requestComp="${requestComp} ''" - fi - - # When completing a flag with an = (e.g., %[1]s -n=) - # bash focuses on the part after the =, so we need to remove - # the flag part from $cur - if [[ "${cur}" == -*=* ]]; then - cur="${cur#*=}" - fi - - __%[1]s_debug "Calling ${requestComp}" - # Use eval to handle any environment variables and such - out=$(eval "${requestComp}" 2>/dev/null) - - # Extract the directive integer at the very end of the output following a colon (:) - directive=${out##*:} - # Remove the directive - out=${out%%:*} - if [ "${directive}" = "${out}" ]; then - # There is not directive specified - directive=0 - fi - __%[1]s_debug "The completion directive is: ${directive}" - __%[1]s_debug "The completions are: ${out}" -} - -__%[1]s_process_completion_results() { - local shellCompDirectiveError=%[3]d - local shellCompDirectiveNoSpace=%[4]d - local shellCompDirectiveNoFileComp=%[5]d - local shellCompDirectiveFilterFileExt=%[6]d - local shellCompDirectiveFilterDirs=%[7]d - - if [ $((directive & shellCompDirectiveError)) -ne 0 ]; then - # Error code. No completion. - __%[1]s_debug "Received error from custom completion go code" - return - else - if [ $((directive & shellCompDirectiveNoSpace)) -ne 0 ]; then - if [[ $(type -t compopt) = "builtin" ]]; then - __%[1]s_debug "Activating no space" - compopt -o nospace - else - __%[1]s_debug "No space directive not supported in this version of bash" - fi - fi - if [ $((directive & shellCompDirectiveNoFileComp)) -ne 0 ]; then - if [[ $(type -t compopt) = "builtin" ]]; then - __%[1]s_debug "Activating no file completion" - compopt +o default - else - __%[1]s_debug "No file completion directive not supported in this version of bash" - fi - fi - fi - - # Separate activeHelp from normal completions - local completions=() - local activeHelp=() - __%[1]s_extract_activeHelp - - if [ $((directive & shellCompDirectiveFilterFileExt)) -ne 0 ]; then - # File extension filtering - local fullFilter filter filteringCmd - - # Do not use quotes around the $completions variable or else newline - # characters will be kept. - for filter in ${completions[*]}; do - fullFilter+="$filter|" - done - - filteringCmd="_filedir $fullFilter" - __%[1]s_debug "File filtering command: $filteringCmd" - $filteringCmd - elif [ $((directive & shellCompDirectiveFilterDirs)) -ne 0 ]; then - # File completion for directories only - - # Use printf to strip any trailing newline - local subdir - subdir=$(printf "%%s" "${completions[0]}") - if [ -n "$subdir" ]; then - __%[1]s_debug "Listing directories in $subdir" - pushd "$subdir" >/dev/null 2>&1 && _filedir -d && popd >/dev/null 2>&1 || return - else - __%[1]s_debug "Listing directories in ." - _filedir -d - fi - else - __%[1]s_handle_completion_types - fi - - __%[1]s_handle_special_char "$cur" : - __%[1]s_handle_special_char "$cur" = - - # Print the activeHelp statements before we finish - if [ ${#activeHelp} -ne 0 ]; then - printf "\n"; - printf "%%s\n" "${activeHelp[@]}" - printf "\n" - - # The prompt format is only available from bash 4.4. - # We test if it is available before using it. - if (x=${PS1@P}) 2> /dev/null; then - printf "%%s" "${PS1@P}${COMP_LINE[@]}" - else - # Can't print the prompt. Just print the - # text the user had typed, it is workable enough. - printf "%%s" "${COMP_LINE[@]}" - fi - fi -} - -# Separate activeHelp lines from real completions. -# Fills the $activeHelp and $completions arrays. -__%[1]s_extract_activeHelp() { - local activeHelpMarker="%[8]s" - local endIndex=${#activeHelpMarker} - - while IFS='' read -r comp; do - if [ "${comp:0:endIndex}" = "$activeHelpMarker" ]; then - comp=${comp:endIndex} - __%[1]s_debug "ActiveHelp found: $comp" - if [ -n "$comp" ]; then - activeHelp+=("$comp") - fi - else - # Not an activeHelp line but a normal completion - completions+=("$comp") - fi - done < <(printf "%%s\n" "${out}") -} - -__%[1]s_handle_completion_types() { - __%[1]s_debug "__%[1]s_handle_completion_types: COMP_TYPE is $COMP_TYPE" - - case $COMP_TYPE in - 37|42) - # Type: menu-complete/menu-complete-backward and insert-completions - # If the user requested inserting one completion at a time, or all - # completions at once on the command-line we must remove the descriptions. - # https://github.com/spf13/cobra/issues/1508 - local tab=$'\t' comp - while IFS='' read -r comp; do - [[ -z $comp ]] && continue - # Strip any description - comp=${comp%%%%$tab*} - # Only consider the completions that match - if [[ $comp == "$cur"* ]]; then - COMPREPLY+=("$comp") - fi - done < <(printf "%%s\n" "${completions[@]}") - ;; - - *) - # Type: complete (normal completion) - __%[1]s_handle_standard_completion_case - ;; - esac -} - -__%[1]s_handle_standard_completion_case() { - local tab=$'\t' comp - - # Short circuit to optimize if we don't have descriptions - if [[ "${completions[*]}" != *$tab* ]]; then - IFS=$'\n' read -ra COMPREPLY -d '' < <(compgen -W "${completions[*]}" -- "$cur") - return 0 - fi - - local longest=0 - local compline - # Look for the longest completion so that we can format things nicely - while IFS='' read -r compline; do - [[ -z $compline ]] && continue - # Strip any description before checking the length - comp=${compline%%%%$tab*} - # Only consider the completions that match - [[ $comp == "$cur"* ]] || continue - COMPREPLY+=("$compline") - if ((${#comp}>longest)); then - longest=${#comp} - fi - done < <(printf "%%s\n" "${completions[@]}") - - # If there is a single completion left, remove the description text - if [ ${#COMPREPLY[*]} -eq 1 ]; then - __%[1]s_debug "COMPREPLY[0]: ${COMPREPLY[0]}" - comp="${COMPREPLY[0]%%%%$tab*}" - __%[1]s_debug "Removed description from single completion, which is now: ${comp}" - COMPREPLY[0]=$comp - else # Format the descriptions - __%[1]s_format_comp_descriptions $longest - fi -} - -__%[1]s_handle_special_char() -{ - local comp="$1" - local char=$2 - if [[ "$comp" == *${char}* && "$COMP_WORDBREAKS" == *${char}* ]]; then - local word=${comp%%"${comp##*${char}}"} - local idx=${#COMPREPLY[*]} - while [[ $((--idx)) -ge 0 ]]; do - COMPREPLY[$idx]=${COMPREPLY[$idx]#"$word"} - done - fi -} - -__%[1]s_format_comp_descriptions() -{ - local tab=$'\t' - local comp desc maxdesclength - local longest=$1 - - local i ci - for ci in ${!COMPREPLY[*]}; do - comp=${COMPREPLY[ci]} - # Properly format the description string which follows a tab character if there is one - if [[ "$comp" == *$tab* ]]; then - __%[1]s_debug "Original comp: $comp" - desc=${comp#*$tab} - comp=${comp%%%%$tab*} - - # $COLUMNS stores the current shell width. - # Remove an extra 4 because we add 2 spaces and 2 parentheses. - maxdesclength=$(( COLUMNS - longest - 4 )) - - # Make sure we can fit a description of at least 8 characters - # if we are to align the descriptions. - if [[ $maxdesclength -gt 8 ]]; then - # Add the proper number of spaces to align the descriptions - for ((i = ${#comp} ; i < longest ; i++)); do - comp+=" " - done - else - # Don't pad the descriptions so we can fit more text after the completion - maxdesclength=$(( COLUMNS - ${#comp} - 4 )) - fi - - # If there is enough space for any description text, - # truncate the descriptions that are too long for the shell width - if [ $maxdesclength -gt 0 ]; then - if [ ${#desc} -gt $maxdesclength ]; then - desc=${desc:0:$(( maxdesclength - 1 ))} - desc+="…" - fi - comp+=" ($desc)" - fi - COMPREPLY[ci]=$comp - __%[1]s_debug "Final comp: $comp" - fi - done -} - -__start_%[1]s() -{ - local cur prev words cword split - - COMPREPLY=() - - # Call _init_completion from the bash-completion package - # to prepare the arguments properly - if declare -F _init_completion >/dev/null 2>&1; then - _init_completion -n "=:" || return - else - __%[1]s_init_completion -n "=:" || return - fi - - __%[1]s_debug - __%[1]s_debug "========= starting completion logic ==========" - __%[1]s_debug "cur is ${cur}, words[*] is ${words[*]}, #words[@] is ${#words[@]}, cword is $cword" - - # The user could have moved the cursor backwards on the command-line. - # We need to trigger completion from the $cword location, so we need - # to truncate the command-line ($words) up to the $cword location. - words=("${words[@]:0:$cword+1}") - __%[1]s_debug "Truncated words[*]: ${words[*]}," - - local out directive - __%[1]s_get_completion_results - __%[1]s_process_completion_results -} - -if [[ $(type -t compopt) = "builtin" ]]; then - complete -o default -F __start_%[1]s %[1]s -else - complete -o default -o nospace -F __start_%[1]s %[1]s -fi - -# ex: ts=4 sw=4 et filetype=sh -`, name, compCmd, - ShellCompDirectiveError, ShellCompDirectiveNoSpace, ShellCompDirectiveNoFileComp, - ShellCompDirectiveFilterFileExt, ShellCompDirectiveFilterDirs, - activeHelpMarker)) -} - -// GenBashCompletionFileV2 generates Bash completion version 2. -func (c *Command) GenBashCompletionFileV2(filename string, includeDesc bool) error { - outFile, err := os.Create(filename) - if err != nil { - return err - } - defer outFile.Close() - - return c.GenBashCompletionV2(outFile, includeDesc) -} - -// GenBashCompletionV2 generates Bash completion file version 2 -// and writes it to the passed writer. -func (c *Command) GenBashCompletionV2(w io.Writer, includeDesc bool) error { - return c.genBashCompletion(w, includeDesc) -} diff --git a/src/vendor/github.com/spf13/cobra/cobra.go b/src/vendor/github.com/spf13/cobra/cobra.go deleted file mode 100644 index d6cbfd719..000000000 --- a/src/vendor/github.com/spf13/cobra/cobra.go +++ /dev/null @@ -1,222 +0,0 @@ -// Copyright © 2013 Steve Francia . -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Commands similar to git, go tools and other modern CLI tools -// inspired by go, go-Commander, gh and subcommand - -package cobra - -import ( - "fmt" - "io" - "os" - "reflect" - "strconv" - "strings" - "text/template" - "time" - "unicode" -) - -var templateFuncs = template.FuncMap{ - "trim": strings.TrimSpace, - "trimRightSpace": trimRightSpace, - "trimTrailingWhitespaces": trimRightSpace, - "appendIfNotPresent": appendIfNotPresent, - "rpad": rpad, - "gt": Gt, - "eq": Eq, -} - -var initializers []func() - -// EnablePrefixMatching allows to set automatic prefix matching. Automatic prefix matching can be a dangerous thing -// to automatically enable in CLI tools. -// Set this to true to enable it. -var EnablePrefixMatching = false - -// EnableCommandSorting controls sorting of the slice of commands, which is turned on by default. -// To disable sorting, set it to false. -var EnableCommandSorting = true - -// MousetrapHelpText enables an information splash screen on Windows -// if the CLI is started from explorer.exe. -// To disable the mousetrap, just set this variable to blank string (""). -// Works only on Microsoft Windows. -var MousetrapHelpText = `This is a command line tool. - -You need to open cmd.exe and run it from there. -` - -// MousetrapDisplayDuration controls how long the MousetrapHelpText message is displayed on Windows -// if the CLI is started from explorer.exe. Set to 0 to wait for the return key to be pressed. -// To disable the mousetrap, just set MousetrapHelpText to blank string (""). -// Works only on Microsoft Windows. -var MousetrapDisplayDuration = 5 * time.Second - -// AddTemplateFunc adds a template function that's available to Usage and Help -// template generation. -func AddTemplateFunc(name string, tmplFunc interface{}) { - templateFuncs[name] = tmplFunc -} - -// AddTemplateFuncs adds multiple template functions that are available to Usage and -// Help template generation. -func AddTemplateFuncs(tmplFuncs template.FuncMap) { - for k, v := range tmplFuncs { - templateFuncs[k] = v - } -} - -// OnInitialize sets the passed functions to be run when each command's -// Execute method is called. -func OnInitialize(y ...func()) { - initializers = append(initializers, y...) -} - -// FIXME Gt is unused by cobra and should be removed in a version 2. It exists only for compatibility with users of cobra. - -// Gt takes two types and checks whether the first type is greater than the second. In case of types Arrays, Chans, -// Maps and Slices, Gt will compare their lengths. Ints are compared directly while strings are first parsed as -// ints and then compared. -func Gt(a interface{}, b interface{}) bool { - var left, right int64 - av := reflect.ValueOf(a) - - switch av.Kind() { - case reflect.Array, reflect.Chan, reflect.Map, reflect.Slice: - left = int64(av.Len()) - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - left = av.Int() - case reflect.String: - left, _ = strconv.ParseInt(av.String(), 10, 64) - } - - bv := reflect.ValueOf(b) - - switch bv.Kind() { - case reflect.Array, reflect.Chan, reflect.Map, reflect.Slice: - right = int64(bv.Len()) - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - right = bv.Int() - case reflect.String: - right, _ = strconv.ParseInt(bv.String(), 10, 64) - } - - return left > right -} - -// FIXME Eq is unused by cobra and should be removed in a version 2. It exists only for compatibility with users of cobra. - -// Eq takes two types and checks whether they are equal. Supported types are int and string. Unsupported types will panic. -func Eq(a interface{}, b interface{}) bool { - av := reflect.ValueOf(a) - bv := reflect.ValueOf(b) - - switch av.Kind() { - case reflect.Array, reflect.Chan, reflect.Map, reflect.Slice: - panic("Eq called on unsupported type") - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - return av.Int() == bv.Int() - case reflect.String: - return av.String() == bv.String() - } - return false -} - -func trimRightSpace(s string) string { - return strings.TrimRightFunc(s, unicode.IsSpace) -} - -// FIXME appendIfNotPresent is unused by cobra and should be removed in a version 2. It exists only for compatibility with users of cobra. - -// appendIfNotPresent will append stringToAppend to the end of s, but only if it's not yet present in s. -func appendIfNotPresent(s, stringToAppend string) string { - if strings.Contains(s, stringToAppend) { - return s - } - return s + " " + stringToAppend -} - -// rpad adds padding to the right of a string. -func rpad(s string, padding int) string { - template := fmt.Sprintf("%%-%ds", padding) - return fmt.Sprintf(template, s) -} - -// tmpl executes the given template text on data, writing the result to w. -func tmpl(w io.Writer, text string, data interface{}) error { - t := template.New("top") - t.Funcs(templateFuncs) - template.Must(t.Parse(text)) - return t.Execute(w, data) -} - -// ld compares two strings and returns the levenshtein distance between them. -func ld(s, t string, ignoreCase bool) int { - if ignoreCase { - s = strings.ToLower(s) - t = strings.ToLower(t) - } - d := make([][]int, len(s)+1) - for i := range d { - d[i] = make([]int, len(t)+1) - } - for i := range d { - d[i][0] = i - } - for j := range d[0] { - d[0][j] = j - } - for j := 1; j <= len(t); j++ { - for i := 1; i <= len(s); i++ { - if s[i-1] == t[j-1] { - d[i][j] = d[i-1][j-1] - } else { - min := d[i-1][j] - if d[i][j-1] < min { - min = d[i][j-1] - } - if d[i-1][j-1] < min { - min = d[i-1][j-1] - } - d[i][j] = min + 1 - } - } - - } - return d[len(s)][len(t)] -} - -func stringInSlice(a string, list []string) bool { - for _, b := range list { - if b == a { - return true - } - } - return false -} - -// CheckErr prints the msg with the prefix 'Error:' and exits with error code 1. If the msg is nil, it does nothing. -func CheckErr(msg interface{}) { - if msg != nil { - fmt.Fprintln(os.Stderr, "Error:", msg) - os.Exit(1) - } -} - -// WriteStringAndCheck writes a string into a buffer, and checks if the error is not nil. -func WriteStringAndCheck(b io.StringWriter, s string) { - _, err := b.WriteString(s) - CheckErr(err) -} diff --git a/src/vendor/github.com/spf13/cobra/command.go b/src/vendor/github.com/spf13/cobra/command.go deleted file mode 100644 index 675bb1340..000000000 --- a/src/vendor/github.com/spf13/cobra/command.go +++ /dev/null @@ -1,1696 +0,0 @@ -// Copyright © 2013 Steve Francia . -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package cobra is a commander providing a simple interface to create powerful modern CLI interfaces. -// In addition to providing an interface, Cobra simultaneously provides a controller to organize your application code. -package cobra - -import ( - "bytes" - "context" - "errors" - "fmt" - "io" - "os" - "path/filepath" - "sort" - "strings" - - flag "github.com/spf13/pflag" -) - -// FParseErrWhitelist configures Flag parse errors to be ignored -type FParseErrWhitelist flag.ParseErrorsWhitelist - -// Command is just that, a command for your application. -// E.g. 'go run ...' - 'run' is the command. Cobra requires -// you to define the usage and description as part of your command -// definition to ensure usability. -type Command struct { - // Use is the one-line usage message. - // Recommended syntax is as follow: - // [ ] identifies an optional argument. Arguments that are not enclosed in brackets are required. - // ... indicates that you can specify multiple values for the previous argument. - // | indicates mutually exclusive information. You can use the argument to the left of the separator or the - // argument to the right of the separator. You cannot use both arguments in a single use of the command. - // { } delimits a set of mutually exclusive arguments when one of the arguments is required. If the arguments are - // optional, they are enclosed in brackets ([ ]). - // Example: add [-F file | -D dir]... [-f format] profile - Use string - - // Aliases is an array of aliases that can be used instead of the first word in Use. - Aliases []string - - // SuggestFor is an array of command names for which this command will be suggested - - // similar to aliases but only suggests. - SuggestFor []string - - // Short is the short description shown in the 'help' output. - Short string - - // Long is the long message shown in the 'help ' output. - Long string - - // Example is examples of how to use the command. - Example string - - // ValidArgs is list of all valid non-flag arguments that are accepted in shell completions - ValidArgs []string - // ValidArgsFunction is an optional function that provides valid non-flag arguments for shell completion. - // It is a dynamic version of using ValidArgs. - // Only one of ValidArgs and ValidArgsFunction can be used for a command. - ValidArgsFunction func(cmd *Command, args []string, toComplete string) ([]string, ShellCompDirective) - - // Expected arguments - Args PositionalArgs - - // ArgAliases is List of aliases for ValidArgs. - // These are not suggested to the user in the shell completion, - // but accepted if entered manually. - ArgAliases []string - - // BashCompletionFunction is custom bash functions used by the legacy bash autocompletion generator. - // For portability with other shells, it is recommended to instead use ValidArgsFunction - BashCompletionFunction string - - // Deprecated defines, if this command is deprecated and should print this string when used. - Deprecated string - - // Annotations are key/value pairs that can be used by applications to identify or - // group commands. - Annotations map[string]string - - // Version defines the version for this command. If this value is non-empty and the command does not - // define a "version" flag, a "version" boolean flag will be added to the command and, if specified, - // will print content of the "Version" variable. A shorthand "v" flag will also be added if the - // command does not define one. - Version string - - // The *Run functions are executed in the following order: - // * PersistentPreRun() - // * PreRun() - // * Run() - // * PostRun() - // * PersistentPostRun() - // All functions get the same args, the arguments after the command name. - // - // PersistentPreRun: children of this command will inherit and execute. - PersistentPreRun func(cmd *Command, args []string) - // PersistentPreRunE: PersistentPreRun but returns an error. - PersistentPreRunE func(cmd *Command, args []string) error - // PreRun: children of this command will not inherit. - PreRun func(cmd *Command, args []string) - // PreRunE: PreRun but returns an error. - PreRunE func(cmd *Command, args []string) error - // Run: Typically the actual work function. Most commands will only implement this. - Run func(cmd *Command, args []string) - // RunE: Run but returns an error. - RunE func(cmd *Command, args []string) error - // PostRun: run after the Run command. - PostRun func(cmd *Command, args []string) - // PostRunE: PostRun but returns an error. - PostRunE func(cmd *Command, args []string) error - // PersistentPostRun: children of this command will inherit and execute after PostRun. - PersistentPostRun func(cmd *Command, args []string) - // PersistentPostRunE: PersistentPostRun but returns an error. - PersistentPostRunE func(cmd *Command, args []string) error - - // args is actual args parsed from flags. - args []string - // flagErrorBuf contains all error messages from pflag. - flagErrorBuf *bytes.Buffer - // flags is full set of flags. - flags *flag.FlagSet - // pflags contains persistent flags. - pflags *flag.FlagSet - // lflags contains local flags. - lflags *flag.FlagSet - // iflags contains inherited flags. - iflags *flag.FlagSet - // parentsPflags is all persistent flags of cmd's parents. - parentsPflags *flag.FlagSet - // globNormFunc is the global normalization function - // that we can use on every pflag set and children commands - globNormFunc func(f *flag.FlagSet, name string) flag.NormalizedName - - // usageFunc is usage func defined by user. - usageFunc func(*Command) error - // usageTemplate is usage template defined by user. - usageTemplate string - // flagErrorFunc is func defined by user and it's called when the parsing of - // flags returns an error. - flagErrorFunc func(*Command, error) error - // helpTemplate is help template defined by user. - helpTemplate string - // helpFunc is help func defined by user. - helpFunc func(*Command, []string) - // helpCommand is command with usage 'help'. If it's not defined by user, - // cobra uses default help command. - helpCommand *Command - // versionTemplate is the version template defined by user. - versionTemplate string - - // inReader is a reader defined by the user that replaces stdin - inReader io.Reader - // outWriter is a writer defined by the user that replaces stdout - outWriter io.Writer - // errWriter is a writer defined by the user that replaces stderr - errWriter io.Writer - - // FParseErrWhitelist flag parse errors to be ignored - FParseErrWhitelist FParseErrWhitelist - - // CompletionOptions is a set of options to control the handling of shell completion - CompletionOptions CompletionOptions - - // commandsAreSorted defines, if command slice are sorted or not. - commandsAreSorted bool - // commandCalledAs is the name or alias value used to call this command. - commandCalledAs struct { - name string - called bool - } - - ctx context.Context - - // commands is the list of commands supported by this program. - commands []*Command - // parent is a parent command for this command. - parent *Command - // Max lengths of commands' string lengths for use in padding. - commandsMaxUseLen int - commandsMaxCommandPathLen int - commandsMaxNameLen int - - // TraverseChildren parses flags on all parents before executing child command. - TraverseChildren bool - - // Hidden defines, if this command is hidden and should NOT show up in the list of available commands. - Hidden bool - - // SilenceErrors is an option to quiet errors down stream. - SilenceErrors bool - - // SilenceUsage is an option to silence usage when an error occurs. - SilenceUsage bool - - // DisableFlagParsing disables the flag parsing. - // If this is true all flags will be passed to the command as arguments. - DisableFlagParsing bool - - // DisableAutoGenTag defines, if gen tag ("Auto generated by spf13/cobra...") - // will be printed by generating docs for this command. - DisableAutoGenTag bool - - // DisableFlagsInUseLine will disable the addition of [flags] to the usage - // line of a command when printing help or generating docs - DisableFlagsInUseLine bool - - // DisableSuggestions disables the suggestions based on Levenshtein distance - // that go along with 'unknown command' messages. - DisableSuggestions bool - - // SuggestionsMinimumDistance defines minimum levenshtein distance to display suggestions. - // Must be > 0. - SuggestionsMinimumDistance int -} - -// Context returns underlying command context. If command was executed -// with ExecuteContext or the context was set with SetContext, the -// previously set context will be returned. Otherwise, nil is returned. -// -// Notice that a call to Execute and ExecuteC will replace a nil context of -// a command with a context.Background, so a background context will be -// returned by Context after one of these functions has been called. -func (c *Command) Context() context.Context { - return c.ctx -} - -// SetContext sets context for the command. It is set to context.Background by default and will be overwritten by -// Command.ExecuteContext or Command.ExecuteContextC -func (c *Command) SetContext(ctx context.Context) { - c.ctx = ctx -} - -// SetArgs sets arguments for the command. It is set to os.Args[1:] by default, if desired, can be overridden -// particularly useful when testing. -func (c *Command) SetArgs(a []string) { - c.args = a -} - -// SetOutput sets the destination for usage and error messages. -// If output is nil, os.Stderr is used. -// Deprecated: Use SetOut and/or SetErr instead -func (c *Command) SetOutput(output io.Writer) { - c.outWriter = output - c.errWriter = output -} - -// SetOut sets the destination for usage messages. -// If newOut is nil, os.Stdout is used. -func (c *Command) SetOut(newOut io.Writer) { - c.outWriter = newOut -} - -// SetErr sets the destination for error messages. -// If newErr is nil, os.Stderr is used. -func (c *Command) SetErr(newErr io.Writer) { - c.errWriter = newErr -} - -// SetIn sets the source for input data -// If newIn is nil, os.Stdin is used. -func (c *Command) SetIn(newIn io.Reader) { - c.inReader = newIn -} - -// SetUsageFunc sets usage function. Usage can be defined by application. -func (c *Command) SetUsageFunc(f func(*Command) error) { - c.usageFunc = f -} - -// SetUsageTemplate sets usage template. Can be defined by Application. -func (c *Command) SetUsageTemplate(s string) { - c.usageTemplate = s -} - -// SetFlagErrorFunc sets a function to generate an error when flag parsing -// fails. -func (c *Command) SetFlagErrorFunc(f func(*Command, error) error) { - c.flagErrorFunc = f -} - -// SetHelpFunc sets help function. Can be defined by Application. -func (c *Command) SetHelpFunc(f func(*Command, []string)) { - c.helpFunc = f -} - -// SetHelpCommand sets help command. -func (c *Command) SetHelpCommand(cmd *Command) { - c.helpCommand = cmd -} - -// SetHelpTemplate sets help template to be used. Application can use it to set custom template. -func (c *Command) SetHelpTemplate(s string) { - c.helpTemplate = s -} - -// SetVersionTemplate sets version template to be used. Application can use it to set custom template. -func (c *Command) SetVersionTemplate(s string) { - c.versionTemplate = s -} - -// SetGlobalNormalizationFunc sets a normalization function to all flag sets and also to child commands. -// The user should not have a cyclic dependency on commands. -func (c *Command) SetGlobalNormalizationFunc(n func(f *flag.FlagSet, name string) flag.NormalizedName) { - c.Flags().SetNormalizeFunc(n) - c.PersistentFlags().SetNormalizeFunc(n) - c.globNormFunc = n - - for _, command := range c.commands { - command.SetGlobalNormalizationFunc(n) - } -} - -// OutOrStdout returns output to stdout. -func (c *Command) OutOrStdout() io.Writer { - return c.getOut(os.Stdout) -} - -// OutOrStderr returns output to stderr -func (c *Command) OutOrStderr() io.Writer { - return c.getOut(os.Stderr) -} - -// ErrOrStderr returns output to stderr -func (c *Command) ErrOrStderr() io.Writer { - return c.getErr(os.Stderr) -} - -// InOrStdin returns input to stdin -func (c *Command) InOrStdin() io.Reader { - return c.getIn(os.Stdin) -} - -func (c *Command) getOut(def io.Writer) io.Writer { - if c.outWriter != nil { - return c.outWriter - } - if c.HasParent() { - return c.parent.getOut(def) - } - return def -} - -func (c *Command) getErr(def io.Writer) io.Writer { - if c.errWriter != nil { - return c.errWriter - } - if c.HasParent() { - return c.parent.getErr(def) - } - return def -} - -func (c *Command) getIn(def io.Reader) io.Reader { - if c.inReader != nil { - return c.inReader - } - if c.HasParent() { - return c.parent.getIn(def) - } - return def -} - -// UsageFunc returns either the function set by SetUsageFunc for this command -// or a parent, or it returns a default usage function. -func (c *Command) UsageFunc() (f func(*Command) error) { - if c.usageFunc != nil { - return c.usageFunc - } - if c.HasParent() { - return c.Parent().UsageFunc() - } - return func(c *Command) error { - c.mergePersistentFlags() - err := tmpl(c.OutOrStderr(), c.UsageTemplate(), c) - if err != nil { - c.PrintErrln(err) - } - return err - } -} - -// Usage puts out the usage for the command. -// Used when a user provides invalid input. -// Can be defined by user by overriding UsageFunc. -func (c *Command) Usage() error { - return c.UsageFunc()(c) -} - -// HelpFunc returns either the function set by SetHelpFunc for this command -// or a parent, or it returns a function with default help behavior. -func (c *Command) HelpFunc() func(*Command, []string) { - if c.helpFunc != nil { - return c.helpFunc - } - if c.HasParent() { - return c.Parent().HelpFunc() - } - return func(c *Command, a []string) { - c.mergePersistentFlags() - // The help should be sent to stdout - // See https://github.com/spf13/cobra/issues/1002 - err := tmpl(c.OutOrStdout(), c.HelpTemplate(), c) - if err != nil { - c.PrintErrln(err) - } - } -} - -// Help puts out the help for the command. -// Used when a user calls help [command]. -// Can be defined by user by overriding HelpFunc. -func (c *Command) Help() error { - c.HelpFunc()(c, []string{}) - return nil -} - -// UsageString returns usage string. -func (c *Command) UsageString() string { - // Storing normal writers - tmpOutput := c.outWriter - tmpErr := c.errWriter - - bb := new(bytes.Buffer) - c.outWriter = bb - c.errWriter = bb - - CheckErr(c.Usage()) - - // Setting things back to normal - c.outWriter = tmpOutput - c.errWriter = tmpErr - - return bb.String() -} - -// FlagErrorFunc returns either the function set by SetFlagErrorFunc for this -// command or a parent, or it returns a function which returns the original -// error. -func (c *Command) FlagErrorFunc() (f func(*Command, error) error) { - if c.flagErrorFunc != nil { - return c.flagErrorFunc - } - - if c.HasParent() { - return c.parent.FlagErrorFunc() - } - return func(c *Command, err error) error { - return err - } -} - -var minUsagePadding = 25 - -// UsagePadding return padding for the usage. -func (c *Command) UsagePadding() int { - if c.parent == nil || minUsagePadding > c.parent.commandsMaxUseLen { - return minUsagePadding - } - return c.parent.commandsMaxUseLen -} - -var minCommandPathPadding = 11 - -// CommandPathPadding return padding for the command path. -func (c *Command) CommandPathPadding() int { - if c.parent == nil || minCommandPathPadding > c.parent.commandsMaxCommandPathLen { - return minCommandPathPadding - } - return c.parent.commandsMaxCommandPathLen -} - -var minNamePadding = 11 - -// NamePadding returns padding for the name. -func (c *Command) NamePadding() int { - if c.parent == nil || minNamePadding > c.parent.commandsMaxNameLen { - return minNamePadding - } - return c.parent.commandsMaxNameLen -} - -// UsageTemplate returns usage template for the command. -func (c *Command) UsageTemplate() string { - if c.usageTemplate != "" { - return c.usageTemplate - } - - if c.HasParent() { - return c.parent.UsageTemplate() - } - return `Usage:{{if .Runnable}} - {{.UseLine}}{{end}}{{if .HasAvailableSubCommands}} - {{.CommandPath}} [command]{{end}}{{if gt (len .Aliases) 0}} - -Aliases: - {{.NameAndAliases}}{{end}}{{if .HasExample}} - -Examples: -{{.Example}}{{end}}{{if .HasAvailableSubCommands}} - -Available Commands:{{range .Commands}}{{if (or .IsAvailableCommand (eq .Name "help"))}} - {{rpad .Name .NamePadding }} {{.Short}}{{end}}{{end}}{{end}}{{if .HasAvailableLocalFlags}} - -Flags: -{{.LocalFlags.FlagUsages | trimTrailingWhitespaces}}{{end}}{{if .HasAvailableInheritedFlags}} - -Global Flags: -{{.InheritedFlags.FlagUsages | trimTrailingWhitespaces}}{{end}}{{if .HasHelpSubCommands}} - -Additional help topics:{{range .Commands}}{{if .IsAdditionalHelpTopicCommand}} - {{rpad .CommandPath .CommandPathPadding}} {{.Short}}{{end}}{{end}}{{end}}{{if .HasAvailableSubCommands}} - -Use "{{.CommandPath}} [command] --help" for more information about a command.{{end}} -` -} - -// HelpTemplate return help template for the command. -func (c *Command) HelpTemplate() string { - if c.helpTemplate != "" { - return c.helpTemplate - } - - if c.HasParent() { - return c.parent.HelpTemplate() - } - return `{{with (or .Long .Short)}}{{. | trimTrailingWhitespaces}} - -{{end}}{{if or .Runnable .HasSubCommands}}{{.UsageString}}{{end}}` -} - -// VersionTemplate return version template for the command. -func (c *Command) VersionTemplate() string { - if c.versionTemplate != "" { - return c.versionTemplate - } - - if c.HasParent() { - return c.parent.VersionTemplate() - } - return `{{with .Name}}{{printf "%s " .}}{{end}}{{printf "version %s" .Version}} -` -} - -func hasNoOptDefVal(name string, fs *flag.FlagSet) bool { - flag := fs.Lookup(name) - if flag == nil { - return false - } - return flag.NoOptDefVal != "" -} - -func shortHasNoOptDefVal(name string, fs *flag.FlagSet) bool { - if len(name) == 0 { - return false - } - - flag := fs.ShorthandLookup(name[:1]) - if flag == nil { - return false - } - return flag.NoOptDefVal != "" -} - -func stripFlags(args []string, c *Command) []string { - if len(args) == 0 { - return args - } - c.mergePersistentFlags() - - commands := []string{} - flags := c.Flags() - -Loop: - for len(args) > 0 { - s := args[0] - args = args[1:] - switch { - case s == "--": - // "--" terminates the flags - break Loop - case strings.HasPrefix(s, "--") && !strings.Contains(s, "=") && !hasNoOptDefVal(s[2:], flags): - // If '--flag arg' then - // delete arg from args. - fallthrough // (do the same as below) - case strings.HasPrefix(s, "-") && !strings.Contains(s, "=") && len(s) == 2 && !shortHasNoOptDefVal(s[1:], flags): - // If '-f arg' then - // delete 'arg' from args or break the loop if len(args) <= 1. - if len(args) <= 1 { - break Loop - } else { - args = args[1:] - continue - } - case s != "" && !strings.HasPrefix(s, "-"): - commands = append(commands, s) - } - } - - return commands -} - -// argsMinusFirstX removes only the first x from args. Otherwise, commands that look like -// openshift admin policy add-role-to-user admin my-user, lose the admin argument (arg[4]). -func argsMinusFirstX(args []string, x string) []string { - for i, y := range args { - if x == y { - ret := []string{} - ret = append(ret, args[:i]...) - ret = append(ret, args[i+1:]...) - return ret - } - } - return args -} - -func isFlagArg(arg string) bool { - return ((len(arg) >= 3 && arg[1] == '-') || - (len(arg) >= 2 && arg[0] == '-' && arg[1] != '-')) -} - -// Find the target command given the args and command tree -// Meant to be run on the highest node. Only searches down. -func (c *Command) Find(args []string) (*Command, []string, error) { - var innerfind func(*Command, []string) (*Command, []string) - - innerfind = func(c *Command, innerArgs []string) (*Command, []string) { - argsWOflags := stripFlags(innerArgs, c) - if len(argsWOflags) == 0 { - return c, innerArgs - } - nextSubCmd := argsWOflags[0] - - cmd := c.findNext(nextSubCmd) - if cmd != nil { - return innerfind(cmd, argsMinusFirstX(innerArgs, nextSubCmd)) - } - return c, innerArgs - } - - commandFound, a := innerfind(c, args) - if commandFound.Args == nil { - return commandFound, a, legacyArgs(commandFound, stripFlags(a, commandFound)) - } - return commandFound, a, nil -} - -func (c *Command) findSuggestions(arg string) string { - if c.DisableSuggestions { - return "" - } - if c.SuggestionsMinimumDistance <= 0 { - c.SuggestionsMinimumDistance = 2 - } - suggestionsString := "" - if suggestions := c.SuggestionsFor(arg); len(suggestions) > 0 { - suggestionsString += "\n\nDid you mean this?\n" - for _, s := range suggestions { - suggestionsString += fmt.Sprintf("\t%v\n", s) - } - } - return suggestionsString -} - -func (c *Command) findNext(next string) *Command { - matches := make([]*Command, 0) - for _, cmd := range c.commands { - if cmd.Name() == next || cmd.HasAlias(next) { - cmd.commandCalledAs.name = next - return cmd - } - if EnablePrefixMatching && cmd.hasNameOrAliasPrefix(next) { - matches = append(matches, cmd) - } - } - - if len(matches) == 1 { - return matches[0] - } - - return nil -} - -// Traverse the command tree to find the command, and parse args for -// each parent. -func (c *Command) Traverse(args []string) (*Command, []string, error) { - flags := []string{} - inFlag := false - - for i, arg := range args { - switch { - // A long flag with a space separated value - case strings.HasPrefix(arg, "--") && !strings.Contains(arg, "="): - // TODO: this isn't quite right, we should really check ahead for 'true' or 'false' - inFlag = !hasNoOptDefVal(arg[2:], c.Flags()) - flags = append(flags, arg) - continue - // A short flag with a space separated value - case strings.HasPrefix(arg, "-") && !strings.Contains(arg, "=") && len(arg) == 2 && !shortHasNoOptDefVal(arg[1:], c.Flags()): - inFlag = true - flags = append(flags, arg) - continue - // The value for a flag - case inFlag: - inFlag = false - flags = append(flags, arg) - continue - // A flag without a value, or with an `=` separated value - case isFlagArg(arg): - flags = append(flags, arg) - continue - } - - cmd := c.findNext(arg) - if cmd == nil { - return c, args, nil - } - - if err := c.ParseFlags(flags); err != nil { - return nil, args, err - } - return cmd.Traverse(args[i+1:]) - } - return c, args, nil -} - -// SuggestionsFor provides suggestions for the typedName. -func (c *Command) SuggestionsFor(typedName string) []string { - suggestions := []string{} - for _, cmd := range c.commands { - if cmd.IsAvailableCommand() { - levenshteinDistance := ld(typedName, cmd.Name(), true) - suggestByLevenshtein := levenshteinDistance <= c.SuggestionsMinimumDistance - suggestByPrefix := strings.HasPrefix(strings.ToLower(cmd.Name()), strings.ToLower(typedName)) - if suggestByLevenshtein || suggestByPrefix { - suggestions = append(suggestions, cmd.Name()) - } - for _, explicitSuggestion := range cmd.SuggestFor { - if strings.EqualFold(typedName, explicitSuggestion) { - suggestions = append(suggestions, cmd.Name()) - } - } - } - } - return suggestions -} - -// VisitParents visits all parents of the command and invokes fn on each parent. -func (c *Command) VisitParents(fn func(*Command)) { - if c.HasParent() { - fn(c.Parent()) - c.Parent().VisitParents(fn) - } -} - -// Root finds root command. -func (c *Command) Root() *Command { - if c.HasParent() { - return c.Parent().Root() - } - return c -} - -// ArgsLenAtDash will return the length of c.Flags().Args at the moment -// when a -- was found during args parsing. -func (c *Command) ArgsLenAtDash() int { - return c.Flags().ArgsLenAtDash() -} - -func (c *Command) execute(a []string) (err error) { - if c == nil { - return fmt.Errorf("Called Execute() on a nil Command") - } - - if len(c.Deprecated) > 0 { - c.Printf("Command %q is deprecated, %s\n", c.Name(), c.Deprecated) - } - - // initialize help and version flag at the last point possible to allow for user - // overriding - c.InitDefaultHelpFlag() - c.InitDefaultVersionFlag() - - err = c.ParseFlags(a) - if err != nil { - return c.FlagErrorFunc()(c, err) - } - - // If help is called, regardless of other flags, return we want help. - // Also say we need help if the command isn't runnable. - helpVal, err := c.Flags().GetBool("help") - if err != nil { - // should be impossible to get here as we always declare a help - // flag in InitDefaultHelpFlag() - c.Println("\"help\" flag declared as non-bool. Please correct your code") - return err - } - - if helpVal { - return flag.ErrHelp - } - - // for back-compat, only add version flag behavior if version is defined - if c.Version != "" { - versionVal, err := c.Flags().GetBool("version") - if err != nil { - c.Println("\"version\" flag declared as non-bool. Please correct your code") - return err - } - if versionVal { - err := tmpl(c.OutOrStdout(), c.VersionTemplate(), c) - if err != nil { - c.Println(err) - } - return err - } - } - - if !c.Runnable() { - return flag.ErrHelp - } - - c.preRun() - - argWoFlags := c.Flags().Args() - if c.DisableFlagParsing { - argWoFlags = a - } - - if err := c.ValidateArgs(argWoFlags); err != nil { - return err - } - - for p := c; p != nil; p = p.Parent() { - if p.PersistentPreRunE != nil { - if err := p.PersistentPreRunE(c, argWoFlags); err != nil { - return err - } - break - } else if p.PersistentPreRun != nil { - p.PersistentPreRun(c, argWoFlags) - break - } - } - if c.PreRunE != nil { - if err := c.PreRunE(c, argWoFlags); err != nil { - return err - } - } else if c.PreRun != nil { - c.PreRun(c, argWoFlags) - } - - if err := c.validateRequiredFlags(); err != nil { - return err - } - if err := c.validateFlagGroups(); err != nil { - return err - } - - if c.RunE != nil { - if err := c.RunE(c, argWoFlags); err != nil { - return err - } - } else { - c.Run(c, argWoFlags) - } - if c.PostRunE != nil { - if err := c.PostRunE(c, argWoFlags); err != nil { - return err - } - } else if c.PostRun != nil { - c.PostRun(c, argWoFlags) - } - for p := c; p != nil; p = p.Parent() { - if p.PersistentPostRunE != nil { - if err := p.PersistentPostRunE(c, argWoFlags); err != nil { - return err - } - break - } else if p.PersistentPostRun != nil { - p.PersistentPostRun(c, argWoFlags) - break - } - } - - return nil -} - -func (c *Command) preRun() { - for _, x := range initializers { - x() - } -} - -// ExecuteContext is the same as Execute(), but sets the ctx on the command. -// Retrieve ctx by calling cmd.Context() inside your *Run lifecycle or ValidArgs -// functions. -func (c *Command) ExecuteContext(ctx context.Context) error { - c.ctx = ctx - return c.Execute() -} - -// Execute uses the args (os.Args[1:] by default) -// and run through the command tree finding appropriate matches -// for commands and then corresponding flags. -func (c *Command) Execute() error { - _, err := c.ExecuteC() - return err -} - -// ExecuteContextC is the same as ExecuteC(), but sets the ctx on the command. -// Retrieve ctx by calling cmd.Context() inside your *Run lifecycle or ValidArgs -// functions. -func (c *Command) ExecuteContextC(ctx context.Context) (*Command, error) { - c.ctx = ctx - return c.ExecuteC() -} - -// ExecuteC executes the command. -func (c *Command) ExecuteC() (cmd *Command, err error) { - if c.ctx == nil { - c.ctx = context.Background() - } - - // Regardless of what command execute is called on, run on Root only - if c.HasParent() { - return c.Root().ExecuteC() - } - - // windows hook - if preExecHookFn != nil { - preExecHookFn(c) - } - - // initialize help at the last point to allow for user overriding - c.InitDefaultHelpCmd() - // initialize completion at the last point to allow for user overriding - c.initDefaultCompletionCmd() - - args := c.args - - // Workaround FAIL with "go test -v" or "cobra.test -test.v", see #155 - if c.args == nil && filepath.Base(os.Args[0]) != "cobra.test" { - args = os.Args[1:] - } - - // initialize the hidden command to be used for shell completion - c.initCompleteCmd(args) - - var flags []string - if c.TraverseChildren { - cmd, flags, err = c.Traverse(args) - } else { - cmd, flags, err = c.Find(args) - } - if err != nil { - // If found parse to a subcommand and then failed, talk about the subcommand - if cmd != nil { - c = cmd - } - if !c.SilenceErrors { - c.PrintErrln("Error:", err.Error()) - c.PrintErrf("Run '%v --help' for usage.\n", c.CommandPath()) - } - return c, err - } - - cmd.commandCalledAs.called = true - if cmd.commandCalledAs.name == "" { - cmd.commandCalledAs.name = cmd.Name() - } - - // We have to pass global context to children command - // if context is present on the parent command. - if cmd.ctx == nil { - cmd.ctx = c.ctx - } - - err = cmd.execute(flags) - if err != nil { - // Always show help if requested, even if SilenceErrors is in - // effect - if errors.Is(err, flag.ErrHelp) { - cmd.HelpFunc()(cmd, args) - return cmd, nil - } - - // If root command has SilenceErrors flagged, - // all subcommands should respect it - if !cmd.SilenceErrors && !c.SilenceErrors { - c.PrintErrln("Error:", err.Error()) - } - - // If root command has SilenceUsage flagged, - // all subcommands should respect it - if !cmd.SilenceUsage && !c.SilenceUsage { - c.Println(cmd.UsageString()) - } - } - return cmd, err -} - -func (c *Command) ValidateArgs(args []string) error { - if c.Args == nil { - return ArbitraryArgs(c, args) - } - return c.Args(c, args) -} - -func (c *Command) validateRequiredFlags() error { - if c.DisableFlagParsing { - return nil - } - - flags := c.Flags() - missingFlagNames := []string{} - flags.VisitAll(func(pflag *flag.Flag) { - requiredAnnotation, found := pflag.Annotations[BashCompOneRequiredFlag] - if !found { - return - } - if (requiredAnnotation[0] == "true") && !pflag.Changed { - missingFlagNames = append(missingFlagNames, pflag.Name) - } - }) - - if len(missingFlagNames) > 0 { - return fmt.Errorf(`required flag(s) "%s" not set`, strings.Join(missingFlagNames, `", "`)) - } - return nil -} - -// InitDefaultHelpFlag adds default help flag to c. -// It is called automatically by executing the c or by calling help and usage. -// If c already has help flag, it will do nothing. -func (c *Command) InitDefaultHelpFlag() { - c.mergePersistentFlags() - if c.Flags().Lookup("help") == nil { - usage := "help for " - if c.Name() == "" { - usage += "this command" - } else { - usage += c.Name() - } - c.Flags().BoolP("help", "h", false, usage) - } -} - -// InitDefaultVersionFlag adds default version flag to c. -// It is called automatically by executing the c. -// If c already has a version flag, it will do nothing. -// If c.Version is empty, it will do nothing. -func (c *Command) InitDefaultVersionFlag() { - if c.Version == "" { - return - } - - c.mergePersistentFlags() - if c.Flags().Lookup("version") == nil { - usage := "version for " - if c.Name() == "" { - usage += "this command" - } else { - usage += c.Name() - } - if c.Flags().ShorthandLookup("v") == nil { - c.Flags().BoolP("version", "v", false, usage) - } else { - c.Flags().Bool("version", false, usage) - } - } -} - -// InitDefaultHelpCmd adds default help command to c. -// It is called automatically by executing the c or by calling help and usage. -// If c already has help command or c has no subcommands, it will do nothing. -func (c *Command) InitDefaultHelpCmd() { - if !c.HasSubCommands() { - return - } - - if c.helpCommand == nil { - c.helpCommand = &Command{ - Use: "help [command]", - Short: "Help about any command", - Long: `Help provides help for any command in the application. -Simply type ` + c.Name() + ` help [path to command] for full details.`, - ValidArgsFunction: func(c *Command, args []string, toComplete string) ([]string, ShellCompDirective) { - var completions []string - cmd, _, e := c.Root().Find(args) - if e != nil { - return nil, ShellCompDirectiveNoFileComp - } - if cmd == nil { - // Root help command. - cmd = c.Root() - } - for _, subCmd := range cmd.Commands() { - if subCmd.IsAvailableCommand() || subCmd == cmd.helpCommand { - if strings.HasPrefix(subCmd.Name(), toComplete) { - completions = append(completions, fmt.Sprintf("%s\t%s", subCmd.Name(), subCmd.Short)) - } - } - } - return completions, ShellCompDirectiveNoFileComp - }, - Run: func(c *Command, args []string) { - cmd, _, e := c.Root().Find(args) - if cmd == nil || e != nil { - c.Printf("Unknown help topic %#q\n", args) - CheckErr(c.Root().Usage()) - } else { - cmd.InitDefaultHelpFlag() // make possible 'help' flag to be shown - CheckErr(cmd.Help()) - } - }, - } - } - c.RemoveCommand(c.helpCommand) - c.AddCommand(c.helpCommand) -} - -// ResetCommands delete parent, subcommand and help command from c. -func (c *Command) ResetCommands() { - c.parent = nil - c.commands = nil - c.helpCommand = nil - c.parentsPflags = nil -} - -// Sorts commands by their names. -type commandSorterByName []*Command - -func (c commandSorterByName) Len() int { return len(c) } -func (c commandSorterByName) Swap(i, j int) { c[i], c[j] = c[j], c[i] } -func (c commandSorterByName) Less(i, j int) bool { return c[i].Name() < c[j].Name() } - -// Commands returns a sorted slice of child commands. -func (c *Command) Commands() []*Command { - // do not sort commands if it already sorted or sorting was disabled - if EnableCommandSorting && !c.commandsAreSorted { - sort.Sort(commandSorterByName(c.commands)) - c.commandsAreSorted = true - } - return c.commands -} - -// AddCommand adds one or more commands to this parent command. -func (c *Command) AddCommand(cmds ...*Command) { - for i, x := range cmds { - if cmds[i] == c { - panic("Command can't be a child of itself") - } - cmds[i].parent = c - // update max lengths - usageLen := len(x.Use) - if usageLen > c.commandsMaxUseLen { - c.commandsMaxUseLen = usageLen - } - commandPathLen := len(x.CommandPath()) - if commandPathLen > c.commandsMaxCommandPathLen { - c.commandsMaxCommandPathLen = commandPathLen - } - nameLen := len(x.Name()) - if nameLen > c.commandsMaxNameLen { - c.commandsMaxNameLen = nameLen - } - // If global normalization function exists, update all children - if c.globNormFunc != nil { - x.SetGlobalNormalizationFunc(c.globNormFunc) - } - c.commands = append(c.commands, x) - c.commandsAreSorted = false - } -} - -// RemoveCommand removes one or more commands from a parent command. -func (c *Command) RemoveCommand(cmds ...*Command) { - commands := []*Command{} -main: - for _, command := range c.commands { - for _, cmd := range cmds { - if command == cmd { - command.parent = nil - continue main - } - } - commands = append(commands, command) - } - c.commands = commands - // recompute all lengths - c.commandsMaxUseLen = 0 - c.commandsMaxCommandPathLen = 0 - c.commandsMaxNameLen = 0 - for _, command := range c.commands { - usageLen := len(command.Use) - if usageLen > c.commandsMaxUseLen { - c.commandsMaxUseLen = usageLen - } - commandPathLen := len(command.CommandPath()) - if commandPathLen > c.commandsMaxCommandPathLen { - c.commandsMaxCommandPathLen = commandPathLen - } - nameLen := len(command.Name()) - if nameLen > c.commandsMaxNameLen { - c.commandsMaxNameLen = nameLen - } - } -} - -// Print is a convenience method to Print to the defined output, fallback to Stderr if not set. -func (c *Command) Print(i ...interface{}) { - fmt.Fprint(c.OutOrStderr(), i...) -} - -// Println is a convenience method to Println to the defined output, fallback to Stderr if not set. -func (c *Command) Println(i ...interface{}) { - c.Print(fmt.Sprintln(i...)) -} - -// Printf is a convenience method to Printf to the defined output, fallback to Stderr if not set. -func (c *Command) Printf(format string, i ...interface{}) { - c.Print(fmt.Sprintf(format, i...)) -} - -// PrintErr is a convenience method to Print to the defined Err output, fallback to Stderr if not set. -func (c *Command) PrintErr(i ...interface{}) { - fmt.Fprint(c.ErrOrStderr(), i...) -} - -// PrintErrln is a convenience method to Println to the defined Err output, fallback to Stderr if not set. -func (c *Command) PrintErrln(i ...interface{}) { - c.PrintErr(fmt.Sprintln(i...)) -} - -// PrintErrf is a convenience method to Printf to the defined Err output, fallback to Stderr if not set. -func (c *Command) PrintErrf(format string, i ...interface{}) { - c.PrintErr(fmt.Sprintf(format, i...)) -} - -// CommandPath returns the full path to this command. -func (c *Command) CommandPath() string { - if c.HasParent() { - return c.Parent().CommandPath() + " " + c.Name() - } - return c.Name() -} - -// UseLine puts out the full usage for a given command (including parents). -func (c *Command) UseLine() string { - var useline string - if c.HasParent() { - useline = c.parent.CommandPath() + " " + c.Use - } else { - useline = c.Use - } - if c.DisableFlagsInUseLine { - return useline - } - if c.HasAvailableFlags() && !strings.Contains(useline, "[flags]") { - useline += " [flags]" - } - return useline -} - -// DebugFlags used to determine which flags have been assigned to which commands -// and which persist. -func (c *Command) DebugFlags() { - c.Println("DebugFlags called on", c.Name()) - var debugflags func(*Command) - - debugflags = func(x *Command) { - if x.HasFlags() || x.HasPersistentFlags() { - c.Println(x.Name()) - } - if x.HasFlags() { - x.flags.VisitAll(func(f *flag.Flag) { - if x.HasPersistentFlags() && x.persistentFlag(f.Name) != nil { - c.Println(" -"+f.Shorthand+",", "--"+f.Name, "["+f.DefValue+"]", "", f.Value, " [LP]") - } else { - c.Println(" -"+f.Shorthand+",", "--"+f.Name, "["+f.DefValue+"]", "", f.Value, " [L]") - } - }) - } - if x.HasPersistentFlags() { - x.pflags.VisitAll(func(f *flag.Flag) { - if x.HasFlags() { - if x.flags.Lookup(f.Name) == nil { - c.Println(" -"+f.Shorthand+",", "--"+f.Name, "["+f.DefValue+"]", "", f.Value, " [P]") - } - } else { - c.Println(" -"+f.Shorthand+",", "--"+f.Name, "["+f.DefValue+"]", "", f.Value, " [P]") - } - }) - } - c.Println(x.flagErrorBuf) - if x.HasSubCommands() { - for _, y := range x.commands { - debugflags(y) - } - } - } - - debugflags(c) -} - -// Name returns the command's name: the first word in the use line. -func (c *Command) Name() string { - name := c.Use - i := strings.Index(name, " ") - if i >= 0 { - name = name[:i] - } - return name -} - -// HasAlias determines if a given string is an alias of the command. -func (c *Command) HasAlias(s string) bool { - for _, a := range c.Aliases { - if a == s { - return true - } - } - return false -} - -// CalledAs returns the command name or alias that was used to invoke -// this command or an empty string if the command has not been called. -func (c *Command) CalledAs() string { - if c.commandCalledAs.called { - return c.commandCalledAs.name - } - return "" -} - -// hasNameOrAliasPrefix returns true if the Name or any of aliases start -// with prefix -func (c *Command) hasNameOrAliasPrefix(prefix string) bool { - if strings.HasPrefix(c.Name(), prefix) { - c.commandCalledAs.name = c.Name() - return true - } - for _, alias := range c.Aliases { - if strings.HasPrefix(alias, prefix) { - c.commandCalledAs.name = alias - return true - } - } - return false -} - -// NameAndAliases returns a list of the command name and all aliases -func (c *Command) NameAndAliases() string { - return strings.Join(append([]string{c.Name()}, c.Aliases...), ", ") -} - -// HasExample determines if the command has example. -func (c *Command) HasExample() bool { - return len(c.Example) > 0 -} - -// Runnable determines if the command is itself runnable. -func (c *Command) Runnable() bool { - return c.Run != nil || c.RunE != nil -} - -// HasSubCommands determines if the command has children commands. -func (c *Command) HasSubCommands() bool { - return len(c.commands) > 0 -} - -// IsAvailableCommand determines if a command is available as a non-help command -// (this includes all non deprecated/hidden commands). -func (c *Command) IsAvailableCommand() bool { - if len(c.Deprecated) != 0 || c.Hidden { - return false - } - - if c.HasParent() && c.Parent().helpCommand == c { - return false - } - - if c.Runnable() || c.HasAvailableSubCommands() { - return true - } - - return false -} - -// IsAdditionalHelpTopicCommand determines if a command is an additional -// help topic command; additional help topic command is determined by the -// fact that it is NOT runnable/hidden/deprecated, and has no sub commands that -// are runnable/hidden/deprecated. -// Concrete example: https://github.com/spf13/cobra/issues/393#issuecomment-282741924. -func (c *Command) IsAdditionalHelpTopicCommand() bool { - // if a command is runnable, deprecated, or hidden it is not a 'help' command - if c.Runnable() || len(c.Deprecated) != 0 || c.Hidden { - return false - } - - // if any non-help sub commands are found, the command is not a 'help' command - for _, sub := range c.commands { - if !sub.IsAdditionalHelpTopicCommand() { - return false - } - } - - // the command either has no sub commands, or no non-help sub commands - return true -} - -// HasHelpSubCommands determines if a command has any available 'help' sub commands -// that need to be shown in the usage/help default template under 'additional help -// topics'. -func (c *Command) HasHelpSubCommands() bool { - // return true on the first found available 'help' sub command - for _, sub := range c.commands { - if sub.IsAdditionalHelpTopicCommand() { - return true - } - } - - // the command either has no sub commands, or no available 'help' sub commands - return false -} - -// HasAvailableSubCommands determines if a command has available sub commands that -// need to be shown in the usage/help default template under 'available commands'. -func (c *Command) HasAvailableSubCommands() bool { - // return true on the first found available (non deprecated/help/hidden) - // sub command - for _, sub := range c.commands { - if sub.IsAvailableCommand() { - return true - } - } - - // the command either has no sub commands, or no available (non deprecated/help/hidden) - // sub commands - return false -} - -// HasParent determines if the command is a child command. -func (c *Command) HasParent() bool { - return c.parent != nil -} - -// GlobalNormalizationFunc returns the global normalization function or nil if it doesn't exist. -func (c *Command) GlobalNormalizationFunc() func(f *flag.FlagSet, name string) flag.NormalizedName { - return c.globNormFunc -} - -// Flags returns the complete FlagSet that applies -// to this command (local and persistent declared here and by all parents). -func (c *Command) Flags() *flag.FlagSet { - if c.flags == nil { - c.flags = flag.NewFlagSet(c.Name(), flag.ContinueOnError) - if c.flagErrorBuf == nil { - c.flagErrorBuf = new(bytes.Buffer) - } - c.flags.SetOutput(c.flagErrorBuf) - } - - return c.flags -} - -// LocalNonPersistentFlags are flags specific to this command which will NOT persist to subcommands. -func (c *Command) LocalNonPersistentFlags() *flag.FlagSet { - persistentFlags := c.PersistentFlags() - - out := flag.NewFlagSet(c.Name(), flag.ContinueOnError) - c.LocalFlags().VisitAll(func(f *flag.Flag) { - if persistentFlags.Lookup(f.Name) == nil { - out.AddFlag(f) - } - }) - return out -} - -// LocalFlags returns the local FlagSet specifically set in the current command. -func (c *Command) LocalFlags() *flag.FlagSet { - c.mergePersistentFlags() - - if c.lflags == nil { - c.lflags = flag.NewFlagSet(c.Name(), flag.ContinueOnError) - if c.flagErrorBuf == nil { - c.flagErrorBuf = new(bytes.Buffer) - } - c.lflags.SetOutput(c.flagErrorBuf) - } - c.lflags.SortFlags = c.Flags().SortFlags - if c.globNormFunc != nil { - c.lflags.SetNormalizeFunc(c.globNormFunc) - } - - addToLocal := func(f *flag.Flag) { - if c.lflags.Lookup(f.Name) == nil && c.parentsPflags.Lookup(f.Name) == nil { - c.lflags.AddFlag(f) - } - } - c.Flags().VisitAll(addToLocal) - c.PersistentFlags().VisitAll(addToLocal) - return c.lflags -} - -// InheritedFlags returns all flags which were inherited from parent commands. -func (c *Command) InheritedFlags() *flag.FlagSet { - c.mergePersistentFlags() - - if c.iflags == nil { - c.iflags = flag.NewFlagSet(c.Name(), flag.ContinueOnError) - if c.flagErrorBuf == nil { - c.flagErrorBuf = new(bytes.Buffer) - } - c.iflags.SetOutput(c.flagErrorBuf) - } - - local := c.LocalFlags() - if c.globNormFunc != nil { - c.iflags.SetNormalizeFunc(c.globNormFunc) - } - - c.parentsPflags.VisitAll(func(f *flag.Flag) { - if c.iflags.Lookup(f.Name) == nil && local.Lookup(f.Name) == nil { - c.iflags.AddFlag(f) - } - }) - return c.iflags -} - -// NonInheritedFlags returns all flags which were not inherited from parent commands. -func (c *Command) NonInheritedFlags() *flag.FlagSet { - return c.LocalFlags() -} - -// PersistentFlags returns the persistent FlagSet specifically set in the current command. -func (c *Command) PersistentFlags() *flag.FlagSet { - if c.pflags == nil { - c.pflags = flag.NewFlagSet(c.Name(), flag.ContinueOnError) - if c.flagErrorBuf == nil { - c.flagErrorBuf = new(bytes.Buffer) - } - c.pflags.SetOutput(c.flagErrorBuf) - } - return c.pflags -} - -// ResetFlags deletes all flags from command. -func (c *Command) ResetFlags() { - c.flagErrorBuf = new(bytes.Buffer) - c.flagErrorBuf.Reset() - c.flags = flag.NewFlagSet(c.Name(), flag.ContinueOnError) - c.flags.SetOutput(c.flagErrorBuf) - c.pflags = flag.NewFlagSet(c.Name(), flag.ContinueOnError) - c.pflags.SetOutput(c.flagErrorBuf) - - c.lflags = nil - c.iflags = nil - c.parentsPflags = nil -} - -// HasFlags checks if the command contains any flags (local plus persistent from the entire structure). -func (c *Command) HasFlags() bool { - return c.Flags().HasFlags() -} - -// HasPersistentFlags checks if the command contains persistent flags. -func (c *Command) HasPersistentFlags() bool { - return c.PersistentFlags().HasFlags() -} - -// HasLocalFlags checks if the command has flags specifically declared locally. -func (c *Command) HasLocalFlags() bool { - return c.LocalFlags().HasFlags() -} - -// HasInheritedFlags checks if the command has flags inherited from its parent command. -func (c *Command) HasInheritedFlags() bool { - return c.InheritedFlags().HasFlags() -} - -// HasAvailableFlags checks if the command contains any flags (local plus persistent from the entire -// structure) which are not hidden or deprecated. -func (c *Command) HasAvailableFlags() bool { - return c.Flags().HasAvailableFlags() -} - -// HasAvailablePersistentFlags checks if the command contains persistent flags which are not hidden or deprecated. -func (c *Command) HasAvailablePersistentFlags() bool { - return c.PersistentFlags().HasAvailableFlags() -} - -// HasAvailableLocalFlags checks if the command has flags specifically declared locally which are not hidden -// or deprecated. -func (c *Command) HasAvailableLocalFlags() bool { - return c.LocalFlags().HasAvailableFlags() -} - -// HasAvailableInheritedFlags checks if the command has flags inherited from its parent command which are -// not hidden or deprecated. -func (c *Command) HasAvailableInheritedFlags() bool { - return c.InheritedFlags().HasAvailableFlags() -} - -// Flag climbs up the command tree looking for matching flag. -func (c *Command) Flag(name string) (flag *flag.Flag) { - flag = c.Flags().Lookup(name) - - if flag == nil { - flag = c.persistentFlag(name) - } - - return -} - -// Recursively find matching persistent flag. -func (c *Command) persistentFlag(name string) (flag *flag.Flag) { - if c.HasPersistentFlags() { - flag = c.PersistentFlags().Lookup(name) - } - - if flag == nil { - c.updateParentsPflags() - flag = c.parentsPflags.Lookup(name) - } - return -} - -// ParseFlags parses persistent flag tree and local flags. -func (c *Command) ParseFlags(args []string) error { - if c.DisableFlagParsing { - return nil - } - - if c.flagErrorBuf == nil { - c.flagErrorBuf = new(bytes.Buffer) - } - beforeErrorBufLen := c.flagErrorBuf.Len() - c.mergePersistentFlags() - - // do it here after merging all flags and just before parse - c.Flags().ParseErrorsWhitelist = flag.ParseErrorsWhitelist(c.FParseErrWhitelist) - - err := c.Flags().Parse(args) - // Print warnings if they occurred (e.g. deprecated flag messages). - if c.flagErrorBuf.Len()-beforeErrorBufLen > 0 && err == nil { - c.Print(c.flagErrorBuf.String()) - } - - return err -} - -// Parent returns a commands parent command. -func (c *Command) Parent() *Command { - return c.parent -} - -// mergePersistentFlags merges c.PersistentFlags() to c.Flags() -// and adds missing persistent flags of all parents. -func (c *Command) mergePersistentFlags() { - c.updateParentsPflags() - c.Flags().AddFlagSet(c.PersistentFlags()) - c.Flags().AddFlagSet(c.parentsPflags) -} - -// updateParentsPflags updates c.parentsPflags by adding -// new persistent flags of all parents. -// If c.parentsPflags == nil, it makes new. -func (c *Command) updateParentsPflags() { - if c.parentsPflags == nil { - c.parentsPflags = flag.NewFlagSet(c.Name(), flag.ContinueOnError) - c.parentsPflags.SetOutput(c.flagErrorBuf) - c.parentsPflags.SortFlags = false - } - - if c.globNormFunc != nil { - c.parentsPflags.SetNormalizeFunc(c.globNormFunc) - } - - c.Root().PersistentFlags().AddFlagSet(flag.CommandLine) - - c.VisitParents(func(parent *Command) { - c.parentsPflags.AddFlagSet(parent.PersistentFlags()) - }) -} diff --git a/src/vendor/github.com/spf13/cobra/command_notwin.go b/src/vendor/github.com/spf13/cobra/command_notwin.go deleted file mode 100644 index bb5dad90b..000000000 --- a/src/vendor/github.com/spf13/cobra/command_notwin.go +++ /dev/null @@ -1,6 +0,0 @@ -//go:build !windows -// +build !windows - -package cobra - -var preExecHookFn func(*Command) diff --git a/src/vendor/github.com/spf13/cobra/command_win.go b/src/vendor/github.com/spf13/cobra/command_win.go deleted file mode 100644 index a84f5a82a..000000000 --- a/src/vendor/github.com/spf13/cobra/command_win.go +++ /dev/null @@ -1,27 +0,0 @@ -//go:build windows -// +build windows - -package cobra - -import ( - "fmt" - "os" - "time" - - "github.com/inconshreveable/mousetrap" -) - -var preExecHookFn = preExecHook - -func preExecHook(c *Command) { - if MousetrapHelpText != "" && mousetrap.StartedByExplorer() { - c.Print(MousetrapHelpText) - if MousetrapDisplayDuration > 0 { - time.Sleep(MousetrapDisplayDuration) - } else { - c.Println("Press return to continue...") - fmt.Scanln() - } - os.Exit(1) - } -} diff --git a/src/vendor/github.com/spf13/cobra/completions.go b/src/vendor/github.com/spf13/cobra/completions.go deleted file mode 100644 index 2c2483998..000000000 --- a/src/vendor/github.com/spf13/cobra/completions.go +++ /dev/null @@ -1,832 +0,0 @@ -package cobra - -import ( - "fmt" - "os" - "strings" - "sync" - - "github.com/spf13/pflag" -) - -const ( - // ShellCompRequestCmd is the name of the hidden command that is used to request - // completion results from the program. It is used by the shell completion scripts. - ShellCompRequestCmd = "__complete" - // ShellCompNoDescRequestCmd is the name of the hidden command that is used to request - // completion results without their description. It is used by the shell completion scripts. - ShellCompNoDescRequestCmd = "__completeNoDesc" -) - -// Global map of flag completion functions. Make sure to use flagCompletionMutex before you try to read and write from it. -var flagCompletionFunctions = map[*pflag.Flag]func(cmd *Command, args []string, toComplete string) ([]string, ShellCompDirective){} - -// lock for reading and writing from flagCompletionFunctions -var flagCompletionMutex = &sync.RWMutex{} - -// ShellCompDirective is a bit map representing the different behaviors the shell -// can be instructed to have once completions have been provided. -type ShellCompDirective int - -type flagCompError struct { - subCommand string - flagName string -} - -func (e *flagCompError) Error() string { - return "Subcommand '" + e.subCommand + "' does not support flag '" + e.flagName + "'" -} - -const ( - // ShellCompDirectiveError indicates an error occurred and completions should be ignored. - ShellCompDirectiveError ShellCompDirective = 1 << iota - - // ShellCompDirectiveNoSpace indicates that the shell should not add a space - // after the completion even if there is a single completion provided. - ShellCompDirectiveNoSpace - - // ShellCompDirectiveNoFileComp indicates that the shell should not provide - // file completion even when no completion is provided. - ShellCompDirectiveNoFileComp - - // ShellCompDirectiveFilterFileExt indicates that the provided completions - // should be used as file extension filters. - // For flags, using Command.MarkFlagFilename() and Command.MarkPersistentFlagFilename() - // is a shortcut to using this directive explicitly. The BashCompFilenameExt - // annotation can also be used to obtain the same behavior for flags. - ShellCompDirectiveFilterFileExt - - // ShellCompDirectiveFilterDirs indicates that only directory names should - // be provided in file completion. To request directory names within another - // directory, the returned completions should specify the directory within - // which to search. The BashCompSubdirsInDir annotation can be used to - // obtain the same behavior but only for flags. - ShellCompDirectiveFilterDirs - - // =========================================================================== - - // All directives using iota should be above this one. - // For internal use. - shellCompDirectiveMaxValue - - // ShellCompDirectiveDefault indicates to let the shell perform its default - // behavior after completions have been provided. - // This one must be last to avoid messing up the iota count. - ShellCompDirectiveDefault ShellCompDirective = 0 -) - -const ( - // Constants for the completion command - compCmdName = "completion" - compCmdNoDescFlagName = "no-descriptions" - compCmdNoDescFlagDesc = "disable completion descriptions" - compCmdNoDescFlagDefault = false -) - -// CompletionOptions are the options to control shell completion -type CompletionOptions struct { - // DisableDefaultCmd prevents Cobra from creating a default 'completion' command - DisableDefaultCmd bool - // DisableNoDescFlag prevents Cobra from creating the '--no-descriptions' flag - // for shells that support completion descriptions - DisableNoDescFlag bool - // DisableDescriptions turns off all completion descriptions for shells - // that support them - DisableDescriptions bool - // HiddenDefaultCmd makes the default 'completion' command hidden - HiddenDefaultCmd bool -} - -// NoFileCompletions can be used to disable file completion for commands that should -// not trigger file completions. -func NoFileCompletions(cmd *Command, args []string, toComplete string) ([]string, ShellCompDirective) { - return nil, ShellCompDirectiveNoFileComp -} - -// FixedCompletions can be used to create a completion function which always -// returns the same results. -func FixedCompletions(choices []string, directive ShellCompDirective) func(cmd *Command, args []string, toComplete string) ([]string, ShellCompDirective) { - return func(cmd *Command, args []string, toComplete string) ([]string, ShellCompDirective) { - return choices, directive - } -} - -// RegisterFlagCompletionFunc should be called to register a function to provide completion for a flag. -func (c *Command) RegisterFlagCompletionFunc(flagName string, f func(cmd *Command, args []string, toComplete string) ([]string, ShellCompDirective)) error { - flag := c.Flag(flagName) - if flag == nil { - return fmt.Errorf("RegisterFlagCompletionFunc: flag '%s' does not exist", flagName) - } - flagCompletionMutex.Lock() - defer flagCompletionMutex.Unlock() - - if _, exists := flagCompletionFunctions[flag]; exists { - return fmt.Errorf("RegisterFlagCompletionFunc: flag '%s' already registered", flagName) - } - flagCompletionFunctions[flag] = f - return nil -} - -// Returns a string listing the different directive enabled in the specified parameter -func (d ShellCompDirective) string() string { - var directives []string - if d&ShellCompDirectiveError != 0 { - directives = append(directives, "ShellCompDirectiveError") - } - if d&ShellCompDirectiveNoSpace != 0 { - directives = append(directives, "ShellCompDirectiveNoSpace") - } - if d&ShellCompDirectiveNoFileComp != 0 { - directives = append(directives, "ShellCompDirectiveNoFileComp") - } - if d&ShellCompDirectiveFilterFileExt != 0 { - directives = append(directives, "ShellCompDirectiveFilterFileExt") - } - if d&ShellCompDirectiveFilterDirs != 0 { - directives = append(directives, "ShellCompDirectiveFilterDirs") - } - if len(directives) == 0 { - directives = append(directives, "ShellCompDirectiveDefault") - } - - if d >= shellCompDirectiveMaxValue { - return fmt.Sprintf("ERROR: unexpected ShellCompDirective value: %d", d) - } - return strings.Join(directives, ", ") -} - -// Adds a special hidden command that can be used to request custom completions. -func (c *Command) initCompleteCmd(args []string) { - completeCmd := &Command{ - Use: fmt.Sprintf("%s [command-line]", ShellCompRequestCmd), - Aliases: []string{ShellCompNoDescRequestCmd}, - DisableFlagsInUseLine: true, - Hidden: true, - DisableFlagParsing: true, - Args: MinimumNArgs(1), - Short: "Request shell completion choices for the specified command-line", - Long: fmt.Sprintf("%[2]s is a special command that is used by the shell completion logic\n%[1]s", - "to request completion choices for the specified command-line.", ShellCompRequestCmd), - Run: func(cmd *Command, args []string) { - finalCmd, completions, directive, err := cmd.getCompletions(args) - if err != nil { - CompErrorln(err.Error()) - // Keep going for multiple reasons: - // 1- There could be some valid completions even though there was an error - // 2- Even without completions, we need to print the directive - } - - noDescriptions := (cmd.CalledAs() == ShellCompNoDescRequestCmd) - for _, comp := range completions { - if GetActiveHelpConfig(finalCmd) == activeHelpGlobalDisable { - // Remove all activeHelp entries in this case - if strings.HasPrefix(comp, activeHelpMarker) { - continue - } - } - if noDescriptions { - // Remove any description that may be included following a tab character. - comp = strings.Split(comp, "\t")[0] - } - - // Make sure we only write the first line to the output. - // This is needed if a description contains a linebreak. - // Otherwise the shell scripts will interpret the other lines as new flags - // and could therefore provide a wrong completion. - comp = strings.Split(comp, "\n")[0] - - // Finally trim the completion. This is especially important to get rid - // of a trailing tab when there are no description following it. - // For example, a sub-command without a description should not be completed - // with a tab at the end (or else zsh will show a -- following it - // although there is no description). - comp = strings.TrimSpace(comp) - - // Print each possible completion to stdout for the completion script to consume. - fmt.Fprintln(finalCmd.OutOrStdout(), comp) - } - - // As the last printout, print the completion directive for the completion script to parse. - // The directive integer must be that last character following a single colon (:). - // The completion script expects : - fmt.Fprintf(finalCmd.OutOrStdout(), ":%d\n", directive) - - // Print some helpful info to stderr for the user to understand. - // Output from stderr must be ignored by the completion script. - fmt.Fprintf(finalCmd.ErrOrStderr(), "Completion ended with directive: %s\n", directive.string()) - }, - } - c.AddCommand(completeCmd) - subCmd, _, err := c.Find(args) - if err != nil || subCmd.Name() != ShellCompRequestCmd { - // Only create this special command if it is actually being called. - // This reduces possible side-effects of creating such a command; - // for example, having this command would cause problems to a - // cobra program that only consists of the root command, since this - // command would cause the root command to suddenly have a subcommand. - c.RemoveCommand(completeCmd) - } -} - -func (c *Command) getCompletions(args []string) (*Command, []string, ShellCompDirective, error) { - // The last argument, which is not completely typed by the user, - // should not be part of the list of arguments - toComplete := args[len(args)-1] - trimmedArgs := args[:len(args)-1] - - var finalCmd *Command - var finalArgs []string - var err error - // Find the real command for which completion must be performed - // check if we need to traverse here to parse local flags on parent commands - if c.Root().TraverseChildren { - finalCmd, finalArgs, err = c.Root().Traverse(trimmedArgs) - } else { - // For Root commands that don't specify any value for their Args fields, when we call - // Find(), if those Root commands don't have any sub-commands, they will accept arguments. - // However, because we have added the __complete sub-command in the current code path, the - // call to Find() -> legacyArgs() will return an error if there are any arguments. - // To avoid this, we first remove the __complete command to get back to having no sub-commands. - rootCmd := c.Root() - if len(rootCmd.Commands()) == 1 { - rootCmd.RemoveCommand(c) - } - - finalCmd, finalArgs, err = rootCmd.Find(trimmedArgs) - } - if err != nil { - // Unable to find the real command. E.g., someInvalidCmd - return c, []string{}, ShellCompDirectiveDefault, fmt.Errorf("Unable to find a command for arguments: %v", trimmedArgs) - } - finalCmd.ctx = c.ctx - - // Check if we are doing flag value completion before parsing the flags. - // This is important because if we are completing a flag value, we need to also - // remove the flag name argument from the list of finalArgs or else the parsing - // could fail due to an invalid value (incomplete) for the flag. - flag, finalArgs, toComplete, flagErr := checkIfFlagCompletion(finalCmd, finalArgs, toComplete) - - // Check if interspersed is false or -- was set on a previous arg. - // This works by counting the arguments. Normally -- is not counted as arg but - // if -- was already set or interspersed is false and there is already one arg then - // the extra added -- is counted as arg. - flagCompletion := true - _ = finalCmd.ParseFlags(append(finalArgs, "--")) - newArgCount := finalCmd.Flags().NArg() - - // Parse the flags early so we can check if required flags are set - if err = finalCmd.ParseFlags(finalArgs); err != nil { - return finalCmd, []string{}, ShellCompDirectiveDefault, fmt.Errorf("Error while parsing flags from args %v: %s", finalArgs, err.Error()) - } - - realArgCount := finalCmd.Flags().NArg() - if newArgCount > realArgCount { - // don't do flag completion (see above) - flagCompletion = false - } - // Error while attempting to parse flags - if flagErr != nil { - // If error type is flagCompError and we don't want flagCompletion we should ignore the error - if _, ok := flagErr.(*flagCompError); !(ok && !flagCompletion) { - return finalCmd, []string{}, ShellCompDirectiveDefault, flagErr - } - } - - // We only remove the flags from the arguments if DisableFlagParsing is not set. - // This is important for commands which have requested to do their own flag completion. - if !finalCmd.DisableFlagParsing { - finalArgs = finalCmd.Flags().Args() - } - - if flag != nil && flagCompletion { - // Check if we are completing a flag value subject to annotations - if validExts, present := flag.Annotations[BashCompFilenameExt]; present { - if len(validExts) != 0 { - // File completion filtered by extensions - return finalCmd, validExts, ShellCompDirectiveFilterFileExt, nil - } - - // The annotation requests simple file completion. There is no reason to do - // that since it is the default behavior anyway. Let's ignore this annotation - // in case the program also registered a completion function for this flag. - // Even though it is a mistake on the program's side, let's be nice when we can. - } - - if subDir, present := flag.Annotations[BashCompSubdirsInDir]; present { - if len(subDir) == 1 { - // Directory completion from within a directory - return finalCmd, subDir, ShellCompDirectiveFilterDirs, nil - } - // Directory completion - return finalCmd, []string{}, ShellCompDirectiveFilterDirs, nil - } - } - - var completions []string - var directive ShellCompDirective - - // Enforce flag groups before doing flag completions - finalCmd.enforceFlagGroupsForCompletion() - - // Note that we want to perform flagname completion even if finalCmd.DisableFlagParsing==true; - // doing this allows for completion of persistent flag names even for commands that disable flag parsing. - // - // When doing completion of a flag name, as soon as an argument starts with - // a '-' we know it is a flag. We cannot use isFlagArg() here as it requires - // the flag name to be complete - if flag == nil && len(toComplete) > 0 && toComplete[0] == '-' && !strings.Contains(toComplete, "=") && flagCompletion { - // First check for required flags - completions = completeRequireFlags(finalCmd, toComplete) - - // If we have not found any required flags, only then can we show regular flags - if len(completions) == 0 { - doCompleteFlags := func(flag *pflag.Flag) { - if !flag.Changed || - strings.Contains(flag.Value.Type(), "Slice") || - strings.Contains(flag.Value.Type(), "Array") { - // If the flag is not already present, or if it can be specified multiple times (Array or Slice) - // we suggest it as a completion - completions = append(completions, getFlagNameCompletions(flag, toComplete)...) - } - } - - // We cannot use finalCmd.Flags() because we may not have called ParsedFlags() for commands - // that have set DisableFlagParsing; it is ParseFlags() that merges the inherited and - // non-inherited flags. - finalCmd.InheritedFlags().VisitAll(func(flag *pflag.Flag) { - doCompleteFlags(flag) - }) - finalCmd.NonInheritedFlags().VisitAll(func(flag *pflag.Flag) { - doCompleteFlags(flag) - }) - } - - directive = ShellCompDirectiveNoFileComp - if len(completions) == 1 && strings.HasSuffix(completions[0], "=") { - // If there is a single completion, the shell usually adds a space - // after the completion. We don't want that if the flag ends with an = - directive = ShellCompDirectiveNoSpace - } - - if !finalCmd.DisableFlagParsing { - // If DisableFlagParsing==false, we have completed the flags as known by Cobra; - // we can return what we found. - // If DisableFlagParsing==true, Cobra may not be aware of all flags, so we - // let the logic continue to see if ValidArgsFunction needs to be called. - return finalCmd, completions, directive, nil - } - } else { - directive = ShellCompDirectiveDefault - if flag == nil { - foundLocalNonPersistentFlag := false - // If TraverseChildren is true on the root command we don't check for - // local flags because we can use a local flag on a parent command - if !finalCmd.Root().TraverseChildren { - // Check if there are any local, non-persistent flags on the command-line - localNonPersistentFlags := finalCmd.LocalNonPersistentFlags() - finalCmd.NonInheritedFlags().VisitAll(func(flag *pflag.Flag) { - if localNonPersistentFlags.Lookup(flag.Name) != nil && flag.Changed { - foundLocalNonPersistentFlag = true - } - }) - } - - // Complete subcommand names, including the help command - if len(finalArgs) == 0 && !foundLocalNonPersistentFlag { - // We only complete sub-commands if: - // - there are no arguments on the command-line and - // - there are no local, non-persistent flags on the command-line or TraverseChildren is true - for _, subCmd := range finalCmd.Commands() { - if subCmd.IsAvailableCommand() || subCmd == finalCmd.helpCommand { - if strings.HasPrefix(subCmd.Name(), toComplete) { - completions = append(completions, fmt.Sprintf("%s\t%s", subCmd.Name(), subCmd.Short)) - } - directive = ShellCompDirectiveNoFileComp - } - } - } - - // Complete required flags even without the '-' prefix - completions = append(completions, completeRequireFlags(finalCmd, toComplete)...) - - // Always complete ValidArgs, even if we are completing a subcommand name. - // This is for commands that have both subcommands and ValidArgs. - if len(finalCmd.ValidArgs) > 0 { - if len(finalArgs) == 0 { - // ValidArgs are only for the first argument - for _, validArg := range finalCmd.ValidArgs { - if strings.HasPrefix(validArg, toComplete) { - completions = append(completions, validArg) - } - } - directive = ShellCompDirectiveNoFileComp - - // If no completions were found within commands or ValidArgs, - // see if there are any ArgAliases that should be completed. - if len(completions) == 0 { - for _, argAlias := range finalCmd.ArgAliases { - if strings.HasPrefix(argAlias, toComplete) { - completions = append(completions, argAlias) - } - } - } - } - - // If there are ValidArgs specified (even if they don't match), we stop completion. - // Only one of ValidArgs or ValidArgsFunction can be used for a single command. - return finalCmd, completions, directive, nil - } - - // Let the logic continue so as to add any ValidArgsFunction completions, - // even if we already found sub-commands. - // This is for commands that have subcommands but also specify a ValidArgsFunction. - } - } - - // Find the completion function for the flag or command - var completionFn func(cmd *Command, args []string, toComplete string) ([]string, ShellCompDirective) - if flag != nil && flagCompletion { - flagCompletionMutex.RLock() - completionFn = flagCompletionFunctions[flag] - flagCompletionMutex.RUnlock() - } else { - completionFn = finalCmd.ValidArgsFunction - } - if completionFn != nil { - // Go custom completion defined for this flag or command. - // Call the registered completion function to get the completions. - var comps []string - comps, directive = completionFn(finalCmd, finalArgs, toComplete) - completions = append(completions, comps...) - } - - return finalCmd, completions, directive, nil -} - -func getFlagNameCompletions(flag *pflag.Flag, toComplete string) []string { - if nonCompletableFlag(flag) { - return []string{} - } - - var completions []string - flagName := "--" + flag.Name - if strings.HasPrefix(flagName, toComplete) { - // Flag without the = - completions = append(completions, fmt.Sprintf("%s\t%s", flagName, flag.Usage)) - - // Why suggest both long forms: --flag and --flag= ? - // This forces the user to *always* have to type either an = or a space after the flag name. - // Let's be nice and avoid making users have to do that. - // Since boolean flags and shortname flags don't show the = form, let's go that route and never show it. - // The = form will still work, we just won't suggest it. - // This also makes the list of suggested flags shorter as we avoid all the = forms. - // - // if len(flag.NoOptDefVal) == 0 { - // // Flag requires a value, so it can be suffixed with = - // flagName += "=" - // completions = append(completions, fmt.Sprintf("%s\t%s", flagName, flag.Usage)) - // } - } - - flagName = "-" + flag.Shorthand - if len(flag.Shorthand) > 0 && strings.HasPrefix(flagName, toComplete) { - completions = append(completions, fmt.Sprintf("%s\t%s", flagName, flag.Usage)) - } - - return completions -} - -func completeRequireFlags(finalCmd *Command, toComplete string) []string { - var completions []string - - doCompleteRequiredFlags := func(flag *pflag.Flag) { - if _, present := flag.Annotations[BashCompOneRequiredFlag]; present { - if !flag.Changed { - // If the flag is not already present, we suggest it as a completion - completions = append(completions, getFlagNameCompletions(flag, toComplete)...) - } - } - } - - // We cannot use finalCmd.Flags() because we may not have called ParsedFlags() for commands - // that have set DisableFlagParsing; it is ParseFlags() that merges the inherited and - // non-inherited flags. - finalCmd.InheritedFlags().VisitAll(func(flag *pflag.Flag) { - doCompleteRequiredFlags(flag) - }) - finalCmd.NonInheritedFlags().VisitAll(func(flag *pflag.Flag) { - doCompleteRequiredFlags(flag) - }) - - return completions -} - -func checkIfFlagCompletion(finalCmd *Command, args []string, lastArg string) (*pflag.Flag, []string, string, error) { - if finalCmd.DisableFlagParsing { - // We only do flag completion if we are allowed to parse flags - // This is important for commands which have requested to do their own flag completion. - return nil, args, lastArg, nil - } - - var flagName string - trimmedArgs := args - flagWithEqual := false - orgLastArg := lastArg - - // When doing completion of a flag name, as soon as an argument starts with - // a '-' we know it is a flag. We cannot use isFlagArg() here as that function - // requires the flag name to be complete - if len(lastArg) > 0 && lastArg[0] == '-' { - if index := strings.Index(lastArg, "="); index >= 0 { - // Flag with an = - if strings.HasPrefix(lastArg[:index], "--") { - // Flag has full name - flagName = lastArg[2:index] - } else { - // Flag is shorthand - // We have to get the last shorthand flag name - // e.g. `-asd` => d to provide the correct completion - // https://github.com/spf13/cobra/issues/1257 - flagName = lastArg[index-1 : index] - } - lastArg = lastArg[index+1:] - flagWithEqual = true - } else { - // Normal flag completion - return nil, args, lastArg, nil - } - } - - if len(flagName) == 0 { - if len(args) > 0 { - prevArg := args[len(args)-1] - if isFlagArg(prevArg) { - // Only consider the case where the flag does not contain an =. - // If the flag contains an = it means it has already been fully processed, - // so we don't need to deal with it here. - if index := strings.Index(prevArg, "="); index < 0 { - if strings.HasPrefix(prevArg, "--") { - // Flag has full name - flagName = prevArg[2:] - } else { - // Flag is shorthand - // We have to get the last shorthand flag name - // e.g. `-asd` => d to provide the correct completion - // https://github.com/spf13/cobra/issues/1257 - flagName = prevArg[len(prevArg)-1:] - } - // Remove the uncompleted flag or else there could be an error created - // for an invalid value for that flag - trimmedArgs = args[:len(args)-1] - } - } - } - } - - if len(flagName) == 0 { - // Not doing flag completion - return nil, trimmedArgs, lastArg, nil - } - - flag := findFlag(finalCmd, flagName) - if flag == nil { - // Flag not supported by this command, the interspersed option might be set so return the original args - return nil, args, orgLastArg, &flagCompError{subCommand: finalCmd.Name(), flagName: flagName} - } - - if !flagWithEqual { - if len(flag.NoOptDefVal) != 0 { - // We had assumed dealing with a two-word flag but the flag is a boolean flag. - // In that case, there is no value following it, so we are not really doing flag completion. - // Reset everything to do noun completion. - trimmedArgs = args - flag = nil - } - } - - return flag, trimmedArgs, lastArg, nil -} - -// initDefaultCompletionCmd adds a default 'completion' command to c. -// This function will do nothing if any of the following is true: -// 1- the feature has been explicitly disabled by the program, -// 2- c has no subcommands (to avoid creating one), -// 3- c already has a 'completion' command provided by the program. -func (c *Command) initDefaultCompletionCmd() { - if c.CompletionOptions.DisableDefaultCmd || !c.HasSubCommands() { - return - } - - for _, cmd := range c.commands { - if cmd.Name() == compCmdName || cmd.HasAlias(compCmdName) { - // A completion command is already available - return - } - } - - haveNoDescFlag := !c.CompletionOptions.DisableNoDescFlag && !c.CompletionOptions.DisableDescriptions - - completionCmd := &Command{ - Use: compCmdName, - Short: "Generate the autocompletion script for the specified shell", - Long: fmt.Sprintf(`Generate the autocompletion script for %[1]s for the specified shell. -See each sub-command's help for details on how to use the generated script. -`, c.Root().Name()), - Args: NoArgs, - ValidArgsFunction: NoFileCompletions, - Hidden: c.CompletionOptions.HiddenDefaultCmd, - } - c.AddCommand(completionCmd) - - out := c.OutOrStdout() - noDesc := c.CompletionOptions.DisableDescriptions - shortDesc := "Generate the autocompletion script for %s" - bash := &Command{ - Use: "bash", - Short: fmt.Sprintf(shortDesc, "bash"), - Long: fmt.Sprintf(`Generate the autocompletion script for the bash shell. - -This script depends on the 'bash-completion' package. -If it is not installed already, you can install it via your OS's package manager. - -To load completions in your current shell session: - - source <(%[1]s completion bash) - -To load completions for every new session, execute once: - -#### Linux: - - %[1]s completion bash > /etc/bash_completion.d/%[1]s - -#### macOS: - - %[1]s completion bash > $(brew --prefix)/etc/bash_completion.d/%[1]s - -You will need to start a new shell for this setup to take effect. -`, c.Root().Name()), - Args: NoArgs, - DisableFlagsInUseLine: true, - ValidArgsFunction: NoFileCompletions, - RunE: func(cmd *Command, args []string) error { - return cmd.Root().GenBashCompletionV2(out, !noDesc) - }, - } - if haveNoDescFlag { - bash.Flags().BoolVar(&noDesc, compCmdNoDescFlagName, compCmdNoDescFlagDefault, compCmdNoDescFlagDesc) - } - - zsh := &Command{ - Use: "zsh", - Short: fmt.Sprintf(shortDesc, "zsh"), - Long: fmt.Sprintf(`Generate the autocompletion script for the zsh shell. - -If shell completion is not already enabled in your environment you will need -to enable it. You can execute the following once: - - echo "autoload -U compinit; compinit" >> ~/.zshrc - -To load completions in your current shell session: - - source <(%[1]s completion zsh); compdef _%[1]s %[1]s - -To load completions for every new session, execute once: - -#### Linux: - - %[1]s completion zsh > "${fpath[1]}/_%[1]s" - -#### macOS: - - %[1]s completion zsh > $(brew --prefix)/share/zsh/site-functions/_%[1]s - -You will need to start a new shell for this setup to take effect. -`, c.Root().Name()), - Args: NoArgs, - ValidArgsFunction: NoFileCompletions, - RunE: func(cmd *Command, args []string) error { - if noDesc { - return cmd.Root().GenZshCompletionNoDesc(out) - } - return cmd.Root().GenZshCompletion(out) - }, - } - if haveNoDescFlag { - zsh.Flags().BoolVar(&noDesc, compCmdNoDescFlagName, compCmdNoDescFlagDefault, compCmdNoDescFlagDesc) - } - - fish := &Command{ - Use: "fish", - Short: fmt.Sprintf(shortDesc, "fish"), - Long: fmt.Sprintf(`Generate the autocompletion script for the fish shell. - -To load completions in your current shell session: - - %[1]s completion fish | source - -To load completions for every new session, execute once: - - %[1]s completion fish > ~/.config/fish/completions/%[1]s.fish - -You will need to start a new shell for this setup to take effect. -`, c.Root().Name()), - Args: NoArgs, - ValidArgsFunction: NoFileCompletions, - RunE: func(cmd *Command, args []string) error { - return cmd.Root().GenFishCompletion(out, !noDesc) - }, - } - if haveNoDescFlag { - fish.Flags().BoolVar(&noDesc, compCmdNoDescFlagName, compCmdNoDescFlagDefault, compCmdNoDescFlagDesc) - } - - powershell := &Command{ - Use: "powershell", - Short: fmt.Sprintf(shortDesc, "powershell"), - Long: fmt.Sprintf(`Generate the autocompletion script for powershell. - -To load completions in your current shell session: - - %[1]s completion powershell | Out-String | Invoke-Expression - -To load completions for every new session, add the output of the above command -to your powershell profile. -`, c.Root().Name()), - Args: NoArgs, - ValidArgsFunction: NoFileCompletions, - RunE: func(cmd *Command, args []string) error { - if noDesc { - return cmd.Root().GenPowerShellCompletion(out) - } - return cmd.Root().GenPowerShellCompletionWithDesc(out) - - }, - } - if haveNoDescFlag { - powershell.Flags().BoolVar(&noDesc, compCmdNoDescFlagName, compCmdNoDescFlagDefault, compCmdNoDescFlagDesc) - } - - completionCmd.AddCommand(bash, zsh, fish, powershell) -} - -func findFlag(cmd *Command, name string) *pflag.Flag { - flagSet := cmd.Flags() - if len(name) == 1 { - // First convert the short flag into a long flag - // as the cmd.Flag() search only accepts long flags - if short := flagSet.ShorthandLookup(name); short != nil { - name = short.Name - } else { - set := cmd.InheritedFlags() - if short = set.ShorthandLookup(name); short != nil { - name = short.Name - } else { - return nil - } - } - } - return cmd.Flag(name) -} - -// CompDebug prints the specified string to the same file as where the -// completion script prints its logs. -// Note that completion printouts should never be on stdout as they would -// be wrongly interpreted as actual completion choices by the completion script. -func CompDebug(msg string, printToStdErr bool) { - msg = fmt.Sprintf("[Debug] %s", msg) - - // Such logs are only printed when the user has set the environment - // variable BASH_COMP_DEBUG_FILE to the path of some file to be used. - if path := os.Getenv("BASH_COMP_DEBUG_FILE"); path != "" { - f, err := os.OpenFile(path, - os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644) - if err == nil { - defer f.Close() - WriteStringAndCheck(f, msg) - } - } - - if printToStdErr { - // Must print to stderr for this not to be read by the completion script. - fmt.Fprint(os.Stderr, msg) - } -} - -// CompDebugln prints the specified string with a newline at the end -// to the same file as where the completion script prints its logs. -// Such logs are only printed when the user has set the environment -// variable BASH_COMP_DEBUG_FILE to the path of some file to be used. -func CompDebugln(msg string, printToStdErr bool) { - CompDebug(fmt.Sprintf("%s\n", msg), printToStdErr) -} - -// CompError prints the specified completion message to stderr. -func CompError(msg string) { - msg = fmt.Sprintf("[Error] %s", msg) - CompDebug(msg, true) -} - -// CompErrorln prints the specified completion message to stderr with a newline at the end. -func CompErrorln(msg string) { - CompError(fmt.Sprintf("%s\n", msg)) -} diff --git a/src/vendor/github.com/spf13/cobra/fish_completions.go b/src/vendor/github.com/spf13/cobra/fish_completions.go deleted file mode 100644 index 005ee6be7..000000000 --- a/src/vendor/github.com/spf13/cobra/fish_completions.go +++ /dev/null @@ -1,220 +0,0 @@ -package cobra - -import ( - "bytes" - "fmt" - "io" - "os" - "strings" -) - -func genFishComp(buf io.StringWriter, name string, includeDesc bool) { - // Variables should not contain a '-' or ':' character - nameForVar := name - nameForVar = strings.ReplaceAll(nameForVar, "-", "_") - nameForVar = strings.ReplaceAll(nameForVar, ":", "_") - - compCmd := ShellCompRequestCmd - if !includeDesc { - compCmd = ShellCompNoDescRequestCmd - } - WriteStringAndCheck(buf, fmt.Sprintf("# fish completion for %-36s -*- shell-script -*-\n", name)) - WriteStringAndCheck(buf, fmt.Sprintf(` -function __%[1]s_debug - set -l file "$BASH_COMP_DEBUG_FILE" - if test -n "$file" - echo "$argv" >> $file - end -end - -function __%[1]s_perform_completion - __%[1]s_debug "Starting __%[1]s_perform_completion" - - # Extract all args except the last one - set -l args (commandline -opc) - # Extract the last arg and escape it in case it is a space - set -l lastArg (string escape -- (commandline -ct)) - - __%[1]s_debug "args: $args" - __%[1]s_debug "last arg: $lastArg" - - # Disable ActiveHelp which is not supported for fish shell - set -l requestComp "%[9]s=0 $args[1] %[3]s $args[2..-1] $lastArg" - - __%[1]s_debug "Calling $requestComp" - set -l results (eval $requestComp 2> /dev/null) - - # Some programs may output extra empty lines after the directive. - # Let's ignore them or else it will break completion. - # Ref: https://github.com/spf13/cobra/issues/1279 - for line in $results[-1..1] - if test (string trim -- $line) = "" - # Found an empty line, remove it - set results $results[1..-2] - else - # Found non-empty line, we have our proper output - break - end - end - - set -l comps $results[1..-2] - set -l directiveLine $results[-1] - - # For Fish, when completing a flag with an = (e.g., -n=) - # completions must be prefixed with the flag - set -l flagPrefix (string match -r -- '-.*=' "$lastArg") - - __%[1]s_debug "Comps: $comps" - __%[1]s_debug "DirectiveLine: $directiveLine" - __%[1]s_debug "flagPrefix: $flagPrefix" - - for comp in $comps - printf "%%s%%s\n" "$flagPrefix" "$comp" - end - - printf "%%s\n" "$directiveLine" -end - -# This function does two things: -# - Obtain the completions and store them in the global __%[1]s_comp_results -# - Return false if file completion should be performed -function __%[1]s_prepare_completions - __%[1]s_debug "" - __%[1]s_debug "========= starting completion logic ==========" - - # Start fresh - set --erase __%[1]s_comp_results - - set -l results (__%[1]s_perform_completion) - __%[1]s_debug "Completion results: $results" - - if test -z "$results" - __%[1]s_debug "No completion, probably due to a failure" - # Might as well do file completion, in case it helps - return 1 - end - - set -l directive (string sub --start 2 $results[-1]) - set --global __%[1]s_comp_results $results[1..-2] - - __%[1]s_debug "Completions are: $__%[1]s_comp_results" - __%[1]s_debug "Directive is: $directive" - - set -l shellCompDirectiveError %[4]d - set -l shellCompDirectiveNoSpace %[5]d - set -l shellCompDirectiveNoFileComp %[6]d - set -l shellCompDirectiveFilterFileExt %[7]d - set -l shellCompDirectiveFilterDirs %[8]d - - if test -z "$directive" - set directive 0 - end - - set -l compErr (math (math --scale 0 $directive / $shellCompDirectiveError) %% 2) - if test $compErr -eq 1 - __%[1]s_debug "Received error directive: aborting." - # Might as well do file completion, in case it helps - return 1 - end - - set -l filefilter (math (math --scale 0 $directive / $shellCompDirectiveFilterFileExt) %% 2) - set -l dirfilter (math (math --scale 0 $directive / $shellCompDirectiveFilterDirs) %% 2) - if test $filefilter -eq 1; or test $dirfilter -eq 1 - __%[1]s_debug "File extension filtering or directory filtering not supported" - # Do full file completion instead - return 1 - end - - set -l nospace (math (math --scale 0 $directive / $shellCompDirectiveNoSpace) %% 2) - set -l nofiles (math (math --scale 0 $directive / $shellCompDirectiveNoFileComp) %% 2) - - __%[1]s_debug "nospace: $nospace, nofiles: $nofiles" - - # If we want to prevent a space, or if file completion is NOT disabled, - # we need to count the number of valid completions. - # To do so, we will filter on prefix as the completions we have received - # may not already be filtered so as to allow fish to match on different - # criteria than the prefix. - if test $nospace -ne 0; or test $nofiles -eq 0 - set -l prefix (commandline -t | string escape --style=regex) - __%[1]s_debug "prefix: $prefix" - - set -l completions (string match -r -- "^$prefix.*" $__%[1]s_comp_results) - set --global __%[1]s_comp_results $completions - __%[1]s_debug "Filtered completions are: $__%[1]s_comp_results" - - # Important not to quote the variable for count to work - set -l numComps (count $__%[1]s_comp_results) - __%[1]s_debug "numComps: $numComps" - - if test $numComps -eq 1; and test $nospace -ne 0 - # We must first split on \t to get rid of the descriptions to be - # able to check what the actual completion will be. - # We don't need descriptions anyway since there is only a single - # real completion which the shell will expand immediately. - set -l split (string split --max 1 \t $__%[1]s_comp_results[1]) - - # Fish won't add a space if the completion ends with any - # of the following characters: @=/:., - set -l lastChar (string sub -s -1 -- $split) - if not string match -r -q "[@=/:.,]" -- "$lastChar" - # In other cases, to support the "nospace" directive we trick the shell - # by outputting an extra, longer completion. - __%[1]s_debug "Adding second completion to perform nospace directive" - set --global __%[1]s_comp_results $split[1] $split[1]. - __%[1]s_debug "Completions are now: $__%[1]s_comp_results" - end - end - - if test $numComps -eq 0; and test $nofiles -eq 0 - # To be consistent with bash and zsh, we only trigger file - # completion when there are no other completions - __%[1]s_debug "Requesting file completion" - return 1 - end - end - - return 0 -end - -# Since Fish completions are only loaded once the user triggers them, we trigger them ourselves -# so we can properly delete any completions provided by another script. -# Only do this if the program can be found, or else fish may print some errors; besides, -# the existing completions will only be loaded if the program can be found. -if type -q "%[2]s" - # The space after the program name is essential to trigger completion for the program - # and not completion of the program name itself. - # Also, we use '> /dev/null 2>&1' since '&>' is not supported in older versions of fish. - complete --do-complete "%[2]s " > /dev/null 2>&1 -end - -# Remove any pre-existing completions for the program since we will be handling all of them. -complete -c %[2]s -e - -# The call to __%[1]s_prepare_completions will setup __%[1]s_comp_results -# which provides the program's completion choices. -complete -c %[2]s -n '__%[1]s_prepare_completions' -f -a '$__%[1]s_comp_results' - -`, nameForVar, name, compCmd, - ShellCompDirectiveError, ShellCompDirectiveNoSpace, ShellCompDirectiveNoFileComp, - ShellCompDirectiveFilterFileExt, ShellCompDirectiveFilterDirs, activeHelpEnvVar(name))) -} - -// GenFishCompletion generates fish completion file and writes to the passed writer. -func (c *Command) GenFishCompletion(w io.Writer, includeDesc bool) error { - buf := new(bytes.Buffer) - genFishComp(buf, c.Name(), includeDesc) - _, err := buf.WriteTo(w) - return err -} - -// GenFishCompletionFile generates fish completion file. -func (c *Command) GenFishCompletionFile(filename string, includeDesc bool) error { - outFile, err := os.Create(filename) - if err != nil { - return err - } - defer outFile.Close() - - return c.GenFishCompletion(outFile, includeDesc) -} diff --git a/src/vendor/github.com/spf13/cobra/fish_completions.md b/src/vendor/github.com/spf13/cobra/fish_completions.md deleted file mode 100644 index 19b2ed129..000000000 --- a/src/vendor/github.com/spf13/cobra/fish_completions.md +++ /dev/null @@ -1,4 +0,0 @@ -## Generating Fish Completions For Your cobra.Command - -Please refer to [Shell Completions](shell_completions.md) for details. - diff --git a/src/vendor/github.com/spf13/cobra/flag_groups.go b/src/vendor/github.com/spf13/cobra/flag_groups.go deleted file mode 100644 index dc7843119..000000000 --- a/src/vendor/github.com/spf13/cobra/flag_groups.go +++ /dev/null @@ -1,223 +0,0 @@ -// Copyright © 2022 Steve Francia . -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package cobra - -import ( - "fmt" - "sort" - "strings" - - flag "github.com/spf13/pflag" -) - -const ( - requiredAsGroup = "cobra_annotation_required_if_others_set" - mutuallyExclusive = "cobra_annotation_mutually_exclusive" -) - -// MarkFlagsRequiredTogether marks the given flags with annotations so that Cobra errors -// if the command is invoked with a subset (but not all) of the given flags. -func (c *Command) MarkFlagsRequiredTogether(flagNames ...string) { - c.mergePersistentFlags() - for _, v := range flagNames { - f := c.Flags().Lookup(v) - if f == nil { - panic(fmt.Sprintf("Failed to find flag %q and mark it as being required in a flag group", v)) - } - if err := c.Flags().SetAnnotation(v, requiredAsGroup, append(f.Annotations[requiredAsGroup], strings.Join(flagNames, " "))); err != nil { - // Only errs if the flag isn't found. - panic(err) - } - } -} - -// MarkFlagsMutuallyExclusive marks the given flags with annotations so that Cobra errors -// if the command is invoked with more than one flag from the given set of flags. -func (c *Command) MarkFlagsMutuallyExclusive(flagNames ...string) { - c.mergePersistentFlags() - for _, v := range flagNames { - f := c.Flags().Lookup(v) - if f == nil { - panic(fmt.Sprintf("Failed to find flag %q and mark it as being in a mutually exclusive flag group", v)) - } - // Each time this is called is a single new entry; this allows it to be a member of multiple groups if needed. - if err := c.Flags().SetAnnotation(v, mutuallyExclusive, append(f.Annotations[mutuallyExclusive], strings.Join(flagNames, " "))); err != nil { - panic(err) - } - } -} - -// validateFlagGroups validates the mutuallyExclusive/requiredAsGroup logic and returns the -// first error encountered. -func (c *Command) validateFlagGroups() error { - if c.DisableFlagParsing { - return nil - } - - flags := c.Flags() - - // groupStatus format is the list of flags as a unique ID, - // then a map of each flag name and whether it is set or not. - groupStatus := map[string]map[string]bool{} - mutuallyExclusiveGroupStatus := map[string]map[string]bool{} - flags.VisitAll(func(pflag *flag.Flag) { - processFlagForGroupAnnotation(flags, pflag, requiredAsGroup, groupStatus) - processFlagForGroupAnnotation(flags, pflag, mutuallyExclusive, mutuallyExclusiveGroupStatus) - }) - - if err := validateRequiredFlagGroups(groupStatus); err != nil { - return err - } - if err := validateExclusiveFlagGroups(mutuallyExclusiveGroupStatus); err != nil { - return err - } - return nil -} - -func hasAllFlags(fs *flag.FlagSet, flagnames ...string) bool { - for _, fname := range flagnames { - f := fs.Lookup(fname) - if f == nil { - return false - } - } - return true -} - -func processFlagForGroupAnnotation(flags *flag.FlagSet, pflag *flag.Flag, annotation string, groupStatus map[string]map[string]bool) { - groupInfo, found := pflag.Annotations[annotation] - if found { - for _, group := range groupInfo { - if groupStatus[group] == nil { - flagnames := strings.Split(group, " ") - - // Only consider this flag group at all if all the flags are defined. - if !hasAllFlags(flags, flagnames...) { - continue - } - - groupStatus[group] = map[string]bool{} - for _, name := range flagnames { - groupStatus[group][name] = false - } - } - - groupStatus[group][pflag.Name] = pflag.Changed - } - } -} - -func validateRequiredFlagGroups(data map[string]map[string]bool) error { - keys := sortedKeys(data) - for _, flagList := range keys { - flagnameAndStatus := data[flagList] - - unset := []string{} - for flagname, isSet := range flagnameAndStatus { - if !isSet { - unset = append(unset, flagname) - } - } - if len(unset) == len(flagnameAndStatus) || len(unset) == 0 { - continue - } - - // Sort values, so they can be tested/scripted against consistently. - sort.Strings(unset) - return fmt.Errorf("if any flags in the group [%v] are set they must all be set; missing %v", flagList, unset) - } - - return nil -} - -func validateExclusiveFlagGroups(data map[string]map[string]bool) error { - keys := sortedKeys(data) - for _, flagList := range keys { - flagnameAndStatus := data[flagList] - var set []string - for flagname, isSet := range flagnameAndStatus { - if isSet { - set = append(set, flagname) - } - } - if len(set) == 0 || len(set) == 1 { - continue - } - - // Sort values, so they can be tested/scripted against consistently. - sort.Strings(set) - return fmt.Errorf("if any flags in the group [%v] are set none of the others can be; %v were all set", flagList, set) - } - return nil -} - -func sortedKeys(m map[string]map[string]bool) []string { - keys := make([]string, len(m)) - i := 0 - for k := range m { - keys[i] = k - i++ - } - sort.Strings(keys) - return keys -} - -// enforceFlagGroupsForCompletion will do the following: -// - when a flag in a group is present, other flags in the group will be marked required -// - when a flag in a mutually exclusive group is present, other flags in the group will be marked as hidden -// This allows the standard completion logic to behave appropriately for flag groups -func (c *Command) enforceFlagGroupsForCompletion() { - if c.DisableFlagParsing { - return - } - - flags := c.Flags() - groupStatus := map[string]map[string]bool{} - mutuallyExclusiveGroupStatus := map[string]map[string]bool{} - c.Flags().VisitAll(func(pflag *flag.Flag) { - processFlagForGroupAnnotation(flags, pflag, requiredAsGroup, groupStatus) - processFlagForGroupAnnotation(flags, pflag, mutuallyExclusive, mutuallyExclusiveGroupStatus) - }) - - // If a flag that is part of a group is present, we make all the other flags - // of that group required so that the shell completion suggests them automatically - for flagList, flagnameAndStatus := range groupStatus { - for _, isSet := range flagnameAndStatus { - if isSet { - // One of the flags of the group is set, mark the other ones as required - for _, fName := range strings.Split(flagList, " ") { - _ = c.MarkFlagRequired(fName) - } - } - } - } - - // If a flag that is mutually exclusive to others is present, we hide the other - // flags of that group so the shell completion does not suggest them - for flagList, flagnameAndStatus := range mutuallyExclusiveGroupStatus { - for flagName, isSet := range flagnameAndStatus { - if isSet { - // One of the flags of the mutually exclusive group is set, mark the other ones as hidden - // Don't mark the flag that is already set as hidden because it may be an - // array or slice flag and therefore must continue being suggested - for _, fName := range strings.Split(flagList, " ") { - if fName != flagName { - flag := c.Flags().Lookup(fName) - flag.Hidden = true - } - } - } - } - } -} diff --git a/src/vendor/github.com/spf13/cobra/powershell_completions.go b/src/vendor/github.com/spf13/cobra/powershell_completions.go deleted file mode 100644 index 379e7c088..000000000 --- a/src/vendor/github.com/spf13/cobra/powershell_completions.go +++ /dev/null @@ -1,288 +0,0 @@ -// The generated scripts require PowerShell v5.0+ (which comes Windows 10, but -// can be downloaded separately for windows 7 or 8.1). - -package cobra - -import ( - "bytes" - "fmt" - "io" - "os" -) - -func genPowerShellComp(buf io.StringWriter, name string, includeDesc bool) { - compCmd := ShellCompRequestCmd - if !includeDesc { - compCmd = ShellCompNoDescRequestCmd - } - WriteStringAndCheck(buf, fmt.Sprintf(`# powershell completion for %-36[1]s -*- shell-script -*- - -function __%[1]s_debug { - if ($env:BASH_COMP_DEBUG_FILE) { - "$args" | Out-File -Append -FilePath "$env:BASH_COMP_DEBUG_FILE" - } -} - -filter __%[1]s_escapeStringWithSpecialChars { -`+" $_ -replace '\\s|#|@|\\$|;|,|''|\\{|\\}|\\(|\\)|\"|`|\\||<|>|&','`$&'"+` -} - -Register-ArgumentCompleter -CommandName '%[1]s' -ScriptBlock { - param( - $WordToComplete, - $CommandAst, - $CursorPosition - ) - - # Get the current command line and convert into a string - $Command = $CommandAst.CommandElements - $Command = "$Command" - - __%[1]s_debug "" - __%[1]s_debug "========= starting completion logic ==========" - __%[1]s_debug "WordToComplete: $WordToComplete Command: $Command CursorPosition: $CursorPosition" - - # The user could have moved the cursor backwards on the command-line. - # We need to trigger completion from the $CursorPosition location, so we need - # to truncate the command-line ($Command) up to the $CursorPosition location. - # Make sure the $Command is longer then the $CursorPosition before we truncate. - # This happens because the $Command does not include the last space. - if ($Command.Length -gt $CursorPosition) { - $Command=$Command.Substring(0,$CursorPosition) - } - __%[1]s_debug "Truncated command: $Command" - - $ShellCompDirectiveError=%[3]d - $ShellCompDirectiveNoSpace=%[4]d - $ShellCompDirectiveNoFileComp=%[5]d - $ShellCompDirectiveFilterFileExt=%[6]d - $ShellCompDirectiveFilterDirs=%[7]d - - # Prepare the command to request completions for the program. - # Split the command at the first space to separate the program and arguments. - $Program,$Arguments = $Command.Split(" ",2) - - $RequestComp="$Program %[2]s $Arguments" - __%[1]s_debug "RequestComp: $RequestComp" - - # we cannot use $WordToComplete because it - # has the wrong values if the cursor was moved - # so use the last argument - if ($WordToComplete -ne "" ) { - $WordToComplete = $Arguments.Split(" ")[-1] - } - __%[1]s_debug "New WordToComplete: $WordToComplete" - - - # Check for flag with equal sign - $IsEqualFlag = ($WordToComplete -Like "--*=*" ) - if ( $IsEqualFlag ) { - __%[1]s_debug "Completing equal sign flag" - # Remove the flag part - $Flag,$WordToComplete = $WordToComplete.Split("=",2) - } - - if ( $WordToComplete -eq "" -And ( -Not $IsEqualFlag )) { - # If the last parameter is complete (there is a space following it) - # We add an extra empty parameter so we can indicate this to the go method. - __%[1]s_debug "Adding extra empty parameter" -`+" # We need to use `\"`\" to pass an empty argument a \"\" or '' does not work!!!"+` -`+" $RequestComp=\"$RequestComp\" + ' `\"`\"'"+` - } - - __%[1]s_debug "Calling $RequestComp" - # First disable ActiveHelp which is not supported for Powershell - $env:%[8]s=0 - - #call the command store the output in $out and redirect stderr and stdout to null - # $Out is an array contains each line per element - Invoke-Expression -OutVariable out "$RequestComp" 2>&1 | Out-Null - - # get directive from last line - [int]$Directive = $Out[-1].TrimStart(':') - if ($Directive -eq "") { - # There is no directive specified - $Directive = 0 - } - __%[1]s_debug "The completion directive is: $Directive" - - # remove directive (last element) from out - $Out = $Out | Where-Object { $_ -ne $Out[-1] } - __%[1]s_debug "The completions are: $Out" - - if (($Directive -band $ShellCompDirectiveError) -ne 0 ) { - # Error code. No completion. - __%[1]s_debug "Received error from custom completion go code" - return - } - - $Longest = 0 - $Values = $Out | ForEach-Object { - #Split the output in name and description -`+" $Name, $Description = $_.Split(\"`t\",2)"+` - __%[1]s_debug "Name: $Name Description: $Description" - - # Look for the longest completion so that we can format things nicely - if ($Longest -lt $Name.Length) { - $Longest = $Name.Length - } - - # Set the description to a one space string if there is none set. - # This is needed because the CompletionResult does not accept an empty string as argument - if (-Not $Description) { - $Description = " " - } - @{Name="$Name";Description="$Description"} - } - - - $Space = " " - if (($Directive -band $ShellCompDirectiveNoSpace) -ne 0 ) { - # remove the space here - __%[1]s_debug "ShellCompDirectiveNoSpace is called" - $Space = "" - } - - if ((($Directive -band $ShellCompDirectiveFilterFileExt) -ne 0 ) -or - (($Directive -band $ShellCompDirectiveFilterDirs) -ne 0 )) { - __%[1]s_debug "ShellCompDirectiveFilterFileExt ShellCompDirectiveFilterDirs are not supported" - - # return here to prevent the completion of the extensions - return - } - - $Values = $Values | Where-Object { - # filter the result - $_.Name -like "$WordToComplete*" - - # Join the flag back if we have an equal sign flag - if ( $IsEqualFlag ) { - __%[1]s_debug "Join the equal sign flag back to the completion value" - $_.Name = $Flag + "=" + $_.Name - } - } - - if (($Directive -band $ShellCompDirectiveNoFileComp) -ne 0 ) { - __%[1]s_debug "ShellCompDirectiveNoFileComp is called" - - if ($Values.Length -eq 0) { - # Just print an empty string here so the - # shell does not start to complete paths. - # We cannot use CompletionResult here because - # it does not accept an empty string as argument. - "" - return - } - } - - # Get the current mode - $Mode = (Get-PSReadLineKeyHandler | Where-Object {$_.Key -eq "Tab" }).Function - __%[1]s_debug "Mode: $Mode" - - $Values | ForEach-Object { - - # store temporary because switch will overwrite $_ - $comp = $_ - - # PowerShell supports three different completion modes - # - TabCompleteNext (default windows style - on each key press the next option is displayed) - # - Complete (works like bash) - # - MenuComplete (works like zsh) - # You set the mode with Set-PSReadLineKeyHandler -Key Tab -Function - - # CompletionResult Arguments: - # 1) CompletionText text to be used as the auto completion result - # 2) ListItemText text to be displayed in the suggestion list - # 3) ResultType type of completion result - # 4) ToolTip text for the tooltip with details about the object - - switch ($Mode) { - - # bash like - "Complete" { - - if ($Values.Length -eq 1) { - __%[1]s_debug "Only one completion left" - - # insert space after value - [System.Management.Automation.CompletionResult]::new($($comp.Name | __%[1]s_escapeStringWithSpecialChars) + $Space, "$($comp.Name)", 'ParameterValue', "$($comp.Description)") - - } else { - # Add the proper number of spaces to align the descriptions - while($comp.Name.Length -lt $Longest) { - $comp.Name = $comp.Name + " " - } - - # Check for empty description and only add parentheses if needed - if ($($comp.Description) -eq " " ) { - $Description = "" - } else { - $Description = " ($($comp.Description))" - } - - [System.Management.Automation.CompletionResult]::new("$($comp.Name)$Description", "$($comp.Name)$Description", 'ParameterValue', "$($comp.Description)") - } - } - - # zsh like - "MenuComplete" { - # insert space after value - # MenuComplete will automatically show the ToolTip of - # the highlighted value at the bottom of the suggestions. - [System.Management.Automation.CompletionResult]::new($($comp.Name | __%[1]s_escapeStringWithSpecialChars) + $Space, "$($comp.Name)", 'ParameterValue', "$($comp.Description)") - } - - # TabCompleteNext and in case we get something unknown - Default { - # Like MenuComplete but we don't want to add a space here because - # the user need to press space anyway to get the completion. - # Description will not be shown because that's not possible with TabCompleteNext - [System.Management.Automation.CompletionResult]::new($($comp.Name | __%[1]s_escapeStringWithSpecialChars), "$($comp.Name)", 'ParameterValue', "$($comp.Description)") - } - } - - } -} -`, name, compCmd, - ShellCompDirectiveError, ShellCompDirectiveNoSpace, ShellCompDirectiveNoFileComp, - ShellCompDirectiveFilterFileExt, ShellCompDirectiveFilterDirs, activeHelpEnvVar(name))) -} - -func (c *Command) genPowerShellCompletion(w io.Writer, includeDesc bool) error { - buf := new(bytes.Buffer) - genPowerShellComp(buf, c.Name(), includeDesc) - _, err := buf.WriteTo(w) - return err -} - -func (c *Command) genPowerShellCompletionFile(filename string, includeDesc bool) error { - outFile, err := os.Create(filename) - if err != nil { - return err - } - defer outFile.Close() - - return c.genPowerShellCompletion(outFile, includeDesc) -} - -// GenPowerShellCompletionFile generates powershell completion file without descriptions. -func (c *Command) GenPowerShellCompletionFile(filename string) error { - return c.genPowerShellCompletionFile(filename, false) -} - -// GenPowerShellCompletion generates powershell completion file without descriptions -// and writes it to the passed writer. -func (c *Command) GenPowerShellCompletion(w io.Writer) error { - return c.genPowerShellCompletion(w, false) -} - -// GenPowerShellCompletionFileWithDesc generates powershell completion file with descriptions. -func (c *Command) GenPowerShellCompletionFileWithDesc(filename string) error { - return c.genPowerShellCompletionFile(filename, true) -} - -// GenPowerShellCompletionWithDesc generates powershell completion file with descriptions -// and writes it to the passed writer. -func (c *Command) GenPowerShellCompletionWithDesc(w io.Writer) error { - return c.genPowerShellCompletion(w, true) -} diff --git a/src/vendor/github.com/spf13/cobra/powershell_completions.md b/src/vendor/github.com/spf13/cobra/powershell_completions.md deleted file mode 100644 index c449f1e5c..000000000 --- a/src/vendor/github.com/spf13/cobra/powershell_completions.md +++ /dev/null @@ -1,3 +0,0 @@ -# Generating PowerShell Completions For Your Own cobra.Command - -Please refer to [Shell Completions](shell_completions.md#powershell-completions) for details. diff --git a/src/vendor/github.com/spf13/cobra/projects_using_cobra.md b/src/vendor/github.com/spf13/cobra/projects_using_cobra.md deleted file mode 100644 index ac680118e..000000000 --- a/src/vendor/github.com/spf13/cobra/projects_using_cobra.md +++ /dev/null @@ -1,54 +0,0 @@ -## Projects using Cobra - -- [Arduino CLI](https://github.com/arduino/arduino-cli) -- [Bleve](https://blevesearch.com/) -- [CockroachDB](https://www.cockroachlabs.com/) -- [Cosmos SDK](https://github.com/cosmos/cosmos-sdk) -- [Datree](https://github.com/datreeio/datree) -- [Delve](https://github.com/derekparker/delve) -- [Docker (distribution)](https://github.com/docker/distribution) -- [Etcd](https://etcd.io/) -- [Gardener](https://github.com/gardener/gardenctl) -- [Giant Swarm's gsctl](https://github.com/giantswarm/gsctl) -- [Git Bump](https://github.com/erdaltsksn/git-bump) -- [Github CLI](https://github.com/cli/cli) -- [GitHub Labeler](https://github.com/erdaltsksn/gh-label) -- [Golangci-lint](https://golangci-lint.run) -- [GopherJS](https://github.com/gopherjs/gopherjs) -- [GoReleaser](https://goreleaser.com) -- [Helm](https://helm.sh) -- [Hugo](https://gohugo.io) -- [Infracost](https://github.com/infracost/infracost) -- [Istio](https://istio.io) -- [Kool](https://github.com/kool-dev/kool) -- [Kubernetes](https://kubernetes.io/) -- [Kubescape](https://github.com/armosec/kubescape) -- [Linkerd](https://linkerd.io/) -- [Mattermost-server](https://github.com/mattermost/mattermost-server) -- [Mercure](https://mercure.rocks/) -- [Meroxa CLI](https://github.com/meroxa/cli) -- [Metal Stack CLI](https://github.com/metal-stack/metalctl) -- [Moby (former Docker)](https://github.com/moby/moby) -- [Moldy](https://github.com/Moldy-Community/moldy) -- [Multi-gitter](https://github.com/lindell/multi-gitter) -- [Nanobox](https://github.com/nanobox-io/nanobox)/[Nanopack](https://github.com/nanopack) -- [nFPM](https://nfpm.goreleaser.com) -- [OpenShift](https://www.openshift.com/) -- [Ory Hydra](https://github.com/ory/hydra) -- [Ory Kratos](https://github.com/ory/kratos) -- [Pixie](https://github.com/pixie-io/pixie) -- [Polygon Edge](https://github.com/0xPolygon/polygon-edge) -- [Pouch](https://github.com/alibaba/pouch) -- [ProjectAtomic (enterprise)](https://www.projectatomic.io/) -- [Prototool](https://github.com/uber/prototool) -- [Pulumi](https://www.pulumi.com) -- [QRcp](https://github.com/claudiodangelis/qrcp) -- [Random](https://github.com/erdaltsksn/random) -- [Rclone](https://rclone.org/) -- [Scaleway CLI](https://github.com/scaleway/scaleway-cli) -- [Skaffold](https://skaffold.dev/) -- [Tendermint](https://github.com/tendermint/tendermint) -- [Twitch CLI](https://github.com/twitchdev/twitch-cli) -- [UpCloud CLI (`upctl`)](https://github.com/UpCloudLtd/upcloud-cli) -- VMware's [Tanzu Community Edition](https://github.com/vmware-tanzu/community-edition) & [Tanzu Framework](https://github.com/vmware-tanzu/tanzu-framework) -- [Werf](https://werf.io/) diff --git a/src/vendor/github.com/spf13/cobra/shell_completions.go b/src/vendor/github.com/spf13/cobra/shell_completions.go deleted file mode 100644 index d99bf91e5..000000000 --- a/src/vendor/github.com/spf13/cobra/shell_completions.go +++ /dev/null @@ -1,84 +0,0 @@ -package cobra - -import ( - "github.com/spf13/pflag" -) - -// MarkFlagRequired instructs the various shell completion implementations to -// prioritize the named flag when performing completion, -// and causes your command to report an error if invoked without the flag. -func (c *Command) MarkFlagRequired(name string) error { - return MarkFlagRequired(c.Flags(), name) -} - -// MarkPersistentFlagRequired instructs the various shell completion implementations to -// prioritize the named persistent flag when performing completion, -// and causes your command to report an error if invoked without the flag. -func (c *Command) MarkPersistentFlagRequired(name string) error { - return MarkFlagRequired(c.PersistentFlags(), name) -} - -// MarkFlagRequired instructs the various shell completion implementations to -// prioritize the named flag when performing completion, -// and causes your command to report an error if invoked without the flag. -func MarkFlagRequired(flags *pflag.FlagSet, name string) error { - return flags.SetAnnotation(name, BashCompOneRequiredFlag, []string{"true"}) -} - -// MarkFlagFilename instructs the various shell completion implementations to -// limit completions for the named flag to the specified file extensions. -func (c *Command) MarkFlagFilename(name string, extensions ...string) error { - return MarkFlagFilename(c.Flags(), name, extensions...) -} - -// MarkFlagCustom adds the BashCompCustom annotation to the named flag, if it exists. -// The bash completion script will call the bash function f for the flag. -// -// This will only work for bash completion. -// It is recommended to instead use c.RegisterFlagCompletionFunc(...) which allows -// to register a Go function which will work across all shells. -func (c *Command) MarkFlagCustom(name string, f string) error { - return MarkFlagCustom(c.Flags(), name, f) -} - -// MarkPersistentFlagFilename instructs the various shell completion -// implementations to limit completions for the named persistent flag to the -// specified file extensions. -func (c *Command) MarkPersistentFlagFilename(name string, extensions ...string) error { - return MarkFlagFilename(c.PersistentFlags(), name, extensions...) -} - -// MarkFlagFilename instructs the various shell completion implementations to -// limit completions for the named flag to the specified file extensions. -func MarkFlagFilename(flags *pflag.FlagSet, name string, extensions ...string) error { - return flags.SetAnnotation(name, BashCompFilenameExt, extensions) -} - -// MarkFlagCustom adds the BashCompCustom annotation to the named flag, if it exists. -// The bash completion script will call the bash function f for the flag. -// -// This will only work for bash completion. -// It is recommended to instead use c.RegisterFlagCompletionFunc(...) which allows -// to register a Go function which will work across all shells. -func MarkFlagCustom(flags *pflag.FlagSet, name string, f string) error { - return flags.SetAnnotation(name, BashCompCustom, []string{f}) -} - -// MarkFlagDirname instructs the various shell completion implementations to -// limit completions for the named flag to directory names. -func (c *Command) MarkFlagDirname(name string) error { - return MarkFlagDirname(c.Flags(), name) -} - -// MarkPersistentFlagDirname instructs the various shell completion -// implementations to limit completions for the named persistent flag to -// directory names. -func (c *Command) MarkPersistentFlagDirname(name string) error { - return MarkFlagDirname(c.PersistentFlags(), name) -} - -// MarkFlagDirname instructs the various shell completion implementations to -// limit completions for the named flag to directory names. -func MarkFlagDirname(flags *pflag.FlagSet, name string) error { - return flags.SetAnnotation(name, BashCompSubdirsInDir, []string{}) -} diff --git a/src/vendor/github.com/spf13/cobra/shell_completions.md b/src/vendor/github.com/spf13/cobra/shell_completions.md deleted file mode 100644 index 1e2058ed6..000000000 --- a/src/vendor/github.com/spf13/cobra/shell_completions.md +++ /dev/null @@ -1,548 +0,0 @@ -# Generating shell completions - -Cobra can generate shell completions for multiple shells. -The currently supported shells are: -- Bash -- Zsh -- fish -- PowerShell - -Cobra will automatically provide your program with a fully functional `completion` command, -similarly to how it provides the `help` command. - -## Creating your own completion command - -If you do not wish to use the default `completion` command, you can choose to -provide your own, which will take precedence over the default one. (This also provides -backwards-compatibility with programs that already have their own `completion` command.) - -If you are using the `cobra-cli` generator, -which can be found at [spf13/cobra-cli](https://github.com/spf13/cobra-cli), -you can create a completion command by running - -```bash -cobra-cli add completion -``` -and then modifying the generated `cmd/completion.go` file to look something like this -(writing the shell script to stdout allows the most flexible use): - -```go -var completionCmd = &cobra.Command{ - Use: "completion [bash|zsh|fish|powershell]", - Short: "Generate completion script", - Long: fmt.Sprintf(`To load completions: - -Bash: - - $ source <(%[1]s completion bash) - - # To load completions for each session, execute once: - # Linux: - $ %[1]s completion bash > /etc/bash_completion.d/%[1]s - # macOS: - $ %[1]s completion bash > $(brew --prefix)/etc/bash_completion.d/%[1]s - -Zsh: - - # If shell completion is not already enabled in your environment, - # you will need to enable it. You can execute the following once: - - $ echo "autoload -U compinit; compinit" >> ~/.zshrc - - # To load completions for each session, execute once: - $ %[1]s completion zsh > "${fpath[1]}/_%[1]s" - - # You will need to start a new shell for this setup to take effect. - -fish: - - $ %[1]s completion fish | source - - # To load completions for each session, execute once: - $ %[1]s completion fish > ~/.config/fish/completions/%[1]s.fish - -PowerShell: - - PS> %[1]s completion powershell | Out-String | Invoke-Expression - - # To load completions for every new session, run: - PS> %[1]s completion powershell > %[1]s.ps1 - # and source this file from your PowerShell profile. -`,cmd.Root().Name()), - DisableFlagsInUseLine: true, - ValidArgs: []string{"bash", "zsh", "fish", "powershell"}, - Args: cobra.ExactValidArgs(1), - Run: func(cmd *cobra.Command, args []string) { - switch args[0] { - case "bash": - cmd.Root().GenBashCompletion(os.Stdout) - case "zsh": - cmd.Root().GenZshCompletion(os.Stdout) - case "fish": - cmd.Root().GenFishCompletion(os.Stdout, true) - case "powershell": - cmd.Root().GenPowerShellCompletionWithDesc(os.Stdout) - } - }, -} -``` - -**Note:** The cobra generator may include messages printed to stdout, for example, if the config file is loaded; this will break the auto-completion script so must be removed. - -## Adapting the default completion command - -Cobra provides a few options for the default `completion` command. To configure such options you must set -the `CompletionOptions` field on the *root* command. - -To tell Cobra *not* to provide the default `completion` command: -``` -rootCmd.CompletionOptions.DisableDefaultCmd = true -``` - -To tell Cobra *not* to provide the user with the `--no-descriptions` flag to the completion sub-commands: -``` -rootCmd.CompletionOptions.DisableNoDescFlag = true -``` - -To tell Cobra to completely disable descriptions for completions: -``` -rootCmd.CompletionOptions.DisableDescriptions = true -``` - -# Customizing completions - -The generated completion scripts will automatically handle completing commands and flags. However, you can make your completions much more powerful by providing information to complete your program's nouns and flag values. - -## Completion of nouns - -### Static completion of nouns - -Cobra allows you to provide a pre-defined list of completion choices for your nouns using the `ValidArgs` field. -For example, if you want `kubectl get [tab][tab]` to show a list of valid "nouns" you have to set them. -Some simplified code from `kubectl get` looks like: - -```go -validArgs = []string{ "pod", "node", "service", "replicationcontroller" } - -cmd := &cobra.Command{ - Use: "get [(-o|--output=)json|yaml|template|...] (RESOURCE [NAME] | RESOURCE/NAME ...)", - Short: "Display one or many resources", - Long: get_long, - Example: get_example, - Run: func(cmd *cobra.Command, args []string) { - cobra.CheckErr(RunGet(f, out, cmd, args)) - }, - ValidArgs: validArgs, -} -``` - -Notice we put the `ValidArgs` field on the `get` sub-command. Doing so will give results like: - -```bash -$ kubectl get [tab][tab] -node pod replicationcontroller service -``` - -#### Aliases for nouns - -If your nouns have aliases, you can define them alongside `ValidArgs` using `ArgAliases`: - -```go -argAliases = []string { "pods", "nodes", "services", "svc", "replicationcontrollers", "rc" } - -cmd := &cobra.Command{ - ... - ValidArgs: validArgs, - ArgAliases: argAliases -} -``` - -The aliases are not shown to the user on tab completion, but they are accepted as valid nouns by -the completion algorithm if entered manually, e.g. in: - -```bash -$ kubectl get rc [tab][tab] -backend frontend database -``` - -Note that without declaring `rc` as an alias, the completion algorithm would not know to show the list of -replication controllers following `rc`. - -### Dynamic completion of nouns - -In some cases it is not possible to provide a list of completions in advance. Instead, the list of completions must be determined at execution-time. In a similar fashion as for static completions, you can use the `ValidArgsFunction` field to provide a Go function that Cobra will execute when it needs the list of completion choices for the nouns of a command. Note that either `ValidArgs` or `ValidArgsFunction` can be used for a single cobra command, but not both. -Simplified code from `helm status` looks like: - -```go -cmd := &cobra.Command{ - Use: "status RELEASE_NAME", - Short: "Display the status of the named release", - Long: status_long, - RunE: func(cmd *cobra.Command, args []string) { - RunGet(args[0]) - }, - ValidArgsFunction: func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { - if len(args) != 0 { - return nil, cobra.ShellCompDirectiveNoFileComp - } - return getReleasesFromCluster(toComplete), cobra.ShellCompDirectiveNoFileComp - }, -} -``` -Where `getReleasesFromCluster()` is a Go function that obtains the list of current Helm releases running on the Kubernetes cluster. -Notice we put the `ValidArgsFunction` on the `status` sub-command. Let's assume the Helm releases on the cluster are: `harbor`, `notary`, `rook` and `thanos` then this dynamic completion will give results like: - -```bash -$ helm status [tab][tab] -harbor notary rook thanos -``` -You may have noticed the use of `cobra.ShellCompDirective`. These directives are bit fields allowing to control some shell completion behaviors for your particular completion. You can combine them with the bit-or operator such as `cobra.ShellCompDirectiveNoSpace | cobra.ShellCompDirectiveNoFileComp` -```go -// Indicates that the shell will perform its default behavior after completions -// have been provided (this implies none of the other directives). -ShellCompDirectiveDefault - -// Indicates an error occurred and completions should be ignored. -ShellCompDirectiveError - -// Indicates that the shell should not add a space after the completion, -// even if there is a single completion provided. -ShellCompDirectiveNoSpace - -// Indicates that the shell should not provide file completion even when -// no completion is provided. -ShellCompDirectiveNoFileComp - -// Indicates that the returned completions should be used as file extension filters. -// For example, to complete only files of the form *.json or *.yaml: -// return []string{"yaml", "json"}, ShellCompDirectiveFilterFileExt -// For flags, using MarkFlagFilename() and MarkPersistentFlagFilename() -// is a shortcut to using this directive explicitly. -// -ShellCompDirectiveFilterFileExt - -// Indicates that only directory names should be provided in file completion. -// For example: -// return nil, ShellCompDirectiveFilterDirs -// For flags, using MarkFlagDirname() is a shortcut to using this directive explicitly. -// -// To request directory names within another directory, the returned completions -// should specify a single directory name within which to search. For example, -// to complete directories within "themes/": -// return []string{"themes"}, ShellCompDirectiveFilterDirs -// -ShellCompDirectiveFilterDirs -``` - -***Note***: When using the `ValidArgsFunction`, Cobra will call your registered function after having parsed all flags and arguments provided in the command-line. You therefore don't need to do this parsing yourself. For example, when a user calls `helm status --namespace my-rook-ns [tab][tab]`, Cobra will call your registered `ValidArgsFunction` after having parsed the `--namespace` flag, as it would have done when calling the `RunE` function. - -#### Debugging - -Cobra achieves dynamic completion through the use of a hidden command called by the completion script. To debug your Go completion code, you can call this hidden command directly: -```bash -$ helm __complete status har -harbor -:4 -Completion ended with directive: ShellCompDirectiveNoFileComp # This is on stderr -``` -***Important:*** If the noun to complete is empty (when the user has not yet typed any letters of that noun), you must pass an empty parameter to the `__complete` command: -```bash -$ helm __complete status "" -harbor -notary -rook -thanos -:4 -Completion ended with directive: ShellCompDirectiveNoFileComp # This is on stderr -``` -Calling the `__complete` command directly allows you to run the Go debugger to troubleshoot your code. You can also add printouts to your code; Cobra provides the following functions to use for printouts in Go completion code: -```go -// Prints to the completion script debug file (if BASH_COMP_DEBUG_FILE -// is set to a file path) and optionally prints to stderr. -cobra.CompDebug(msg string, printToStdErr bool) { -cobra.CompDebugln(msg string, printToStdErr bool) - -// Prints to the completion script debug file (if BASH_COMP_DEBUG_FILE -// is set to a file path) and to stderr. -cobra.CompError(msg string) -cobra.CompErrorln(msg string) -``` -***Important:*** You should **not** leave traces that print directly to stdout in your completion code as they will be interpreted as completion choices by the completion script. Instead, use the cobra-provided debugging traces functions mentioned above. - -## Completions for flags - -### Mark flags as required - -Most of the time completions will only show sub-commands. But if a flag is required to make a sub-command work, you probably want it to show up when the user types [tab][tab]. You can mark a flag as 'Required' like so: - -```go -cmd.MarkFlagRequired("pod") -cmd.MarkFlagRequired("container") -``` - -and you'll get something like - -```bash -$ kubectl exec [tab][tab] --c --container= -p --pod= -``` - -### Specify dynamic flag completion - -As for nouns, Cobra provides a way of defining dynamic completion of flags. To provide a Go function that Cobra will execute when it needs the list of completion choices for a flag, you must register the function using the `command.RegisterFlagCompletionFunc()` function. - -```go -flagName := "output" -cmd.RegisterFlagCompletionFunc(flagName, func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { - return []string{"json", "table", "yaml"}, cobra.ShellCompDirectiveDefault -}) -``` -Notice that calling `RegisterFlagCompletionFunc()` is done through the `command` with which the flag is associated. In our example this dynamic completion will give results like so: - -```bash -$ helm status --output [tab][tab] -json table yaml -``` - -#### Debugging - -You can also easily debug your Go completion code for flags: -```bash -$ helm __complete status --output "" -json -table -yaml -:4 -Completion ended with directive: ShellCompDirectiveNoFileComp # This is on stderr -``` -***Important:*** You should **not** leave traces that print to stdout in your completion code as they will be interpreted as completion choices by the completion script. Instead, use the cobra-provided debugging traces functions mentioned further above. - -### Specify valid filename extensions for flags that take a filename - -To limit completions of flag values to file names with certain extensions you can either use the different `MarkFlagFilename()` functions or a combination of `RegisterFlagCompletionFunc()` and `ShellCompDirectiveFilterFileExt`, like so: -```go -flagName := "output" -cmd.MarkFlagFilename(flagName, "yaml", "json") -``` -or -```go -flagName := "output" -cmd.RegisterFlagCompletionFunc(flagName, func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { - return []string{"yaml", "json"}, ShellCompDirectiveFilterFileExt}) -``` - -### Limit flag completions to directory names - -To limit completions of flag values to directory names you can either use the `MarkFlagDirname()` functions or a combination of `RegisterFlagCompletionFunc()` and `ShellCompDirectiveFilterDirs`, like so: -```go -flagName := "output" -cmd.MarkFlagDirname(flagName) -``` -or -```go -flagName := "output" -cmd.RegisterFlagCompletionFunc(flagName, func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { - return nil, cobra.ShellCompDirectiveFilterDirs -}) -``` -To limit completions of flag values to directory names *within another directory* you can use a combination of `RegisterFlagCompletionFunc()` and `ShellCompDirectiveFilterDirs` like so: -```go -flagName := "output" -cmd.RegisterFlagCompletionFunc(flagName, func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { - return []string{"themes"}, cobra.ShellCompDirectiveFilterDirs -}) -``` -### Descriptions for completions - -Cobra provides support for completion descriptions. Such descriptions are supported for each shell -(however, for bash, it is only available in the [completion V2 version](#bash-completion-v2)). -For commands and flags, Cobra will provide the descriptions automatically, based on usage information. -For example, using zsh: -``` -$ helm s[tab] -search -- search for a keyword in charts -show -- show information of a chart -status -- displays the status of the named release -``` -while using fish: -``` -$ helm s[tab] -search (search for a keyword in charts) show (show information of a chart) status (displays the status of the named release) -``` - -Cobra allows you to add descriptions to your own completions. Simply add the description text after each completion, following a `\t` separator. This technique applies to completions returned by `ValidArgs`, `ValidArgsFunction` and `RegisterFlagCompletionFunc()`. For example: -```go -ValidArgsFunction: func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { - return []string{"harbor\tAn image registry", "thanos\tLong-term metrics"}, cobra.ShellCompDirectiveNoFileComp -}} -``` -or -```go -ValidArgs: []string{"bash\tCompletions for bash", "zsh\tCompletions for zsh"} -``` -## Bash completions - -### Dependencies - -The bash completion script generated by Cobra requires the `bash_completion` package. You should update the help text of your completion command to show how to install the `bash_completion` package ([Kubectl docs](https://kubernetes.io/docs/tasks/tools/install-kubectl/#enabling-shell-autocompletion)) - -### Aliases - -You can also configure `bash` aliases for your program and they will also support completions. - -```bash -alias aliasname=origcommand -complete -o default -F __start_origcommand aliasname - -# and now when you run `aliasname` completion will make -# suggestions as it did for `origcommand`. - -$ aliasname -completion firstcommand secondcommand -``` -### Bash legacy dynamic completions - -For backward compatibility, Cobra still supports its bash legacy dynamic completion solution. -Please refer to [Bash Completions](bash_completions.md) for details. - -### Bash completion V2 - -Cobra provides two versions for bash completion. The original bash completion (which started it all!) can be used by calling -`GenBashCompletion()` or `GenBashCompletionFile()`. - -A new V2 bash completion version is also available. This version can be used by calling `GenBashCompletionV2()` or -`GenBashCompletionFileV2()`. The V2 version does **not** support the legacy dynamic completion -(see [Bash Completions](bash_completions.md)) but instead works only with the Go dynamic completion -solution described in this document. -Unless your program already uses the legacy dynamic completion solution, it is recommended that you use the bash -completion V2 solution which provides the following extra features: -- Supports completion descriptions (like the other shells) -- Small completion script of less than 300 lines (v1 generates scripts of thousands of lines; `kubectl` for example has a bash v1 completion script of over 13K lines) -- Streamlined user experience thanks to a completion behavior aligned with the other shells - -`Bash` completion V2 supports descriptions for completions. When calling `GenBashCompletionV2()` or `GenBashCompletionFileV2()` -you must provide these functions with a parameter indicating if the completions should be annotated with a description; Cobra -will provide the description automatically based on usage information. You can choose to make this option configurable by -your users. - -``` -# With descriptions -$ helm s[tab][tab] -search (search for a keyword in charts) status (display the status of the named release) -show (show information of a chart) - -# Without descriptions -$ helm s[tab][tab] -search show status -``` -**Note**: Cobra's default `completion` command uses bash completion V2. If for some reason you need to use bash completion V1, you will need to implement your own `completion` command. -## Zsh completions - -Cobra supports native zsh completion generated from the root `cobra.Command`. -The generated completion script should be put somewhere in your `$fpath` and be named -`_`. You will need to start a new shell for the completions to become available. - -Zsh supports descriptions for completions. Cobra will provide the description automatically, -based on usage information. Cobra provides a way to completely disable such descriptions by -using `GenZshCompletionNoDesc()` or `GenZshCompletionFileNoDesc()`. You can choose to make -this a configurable option to your users. -``` -# With descriptions -$ helm s[tab] -search -- search for a keyword in charts -show -- show information of a chart -status -- displays the status of the named release - -# Without descriptions -$ helm s[tab] -search show status -``` -*Note*: Because of backward-compatibility requirements, we were forced to have a different API to disable completion descriptions between `zsh` and `fish`. - -### Limitations - -* Custom completions implemented in Bash scripting (legacy) are not supported and will be ignored for `zsh` (including the use of the `BashCompCustom` flag annotation). - * You should instead use `ValidArgsFunction` and `RegisterFlagCompletionFunc()` which are portable to the different shells (`bash`, `zsh`, `fish`, `powershell`). -* The function `MarkFlagCustom()` is not supported and will be ignored for `zsh`. - * You should instead use `RegisterFlagCompletionFunc()`. - -### Zsh completions standardization - -Cobra 1.1 standardized its zsh completion support to align it with its other shell completions. Although the API was kept backward-compatible, some small changes in behavior were introduced. -Please refer to [Zsh Completions](zsh_completions.md) for details. - -## fish completions - -Cobra supports native fish completions generated from the root `cobra.Command`. You can use the `command.GenFishCompletion()` or `command.GenFishCompletionFile()` functions. You must provide these functions with a parameter indicating if the completions should be annotated with a description; Cobra will provide the description automatically based on usage information. You can choose to make this option configurable by your users. -``` -# With descriptions -$ helm s[tab] -search (search for a keyword in charts) show (show information of a chart) status (displays the status of the named release) - -# Without descriptions -$ helm s[tab] -search show status -``` -*Note*: Because of backward-compatibility requirements, we were forced to have a different API to disable completion descriptions between `zsh` and `fish`. - -### Limitations - -* Custom completions implemented in bash scripting (legacy) are not supported and will be ignored for `fish` (including the use of the `BashCompCustom` flag annotation). - * You should instead use `ValidArgsFunction` and `RegisterFlagCompletionFunc()` which are portable to the different shells (`bash`, `zsh`, `fish`, `powershell`). -* The function `MarkFlagCustom()` is not supported and will be ignored for `fish`. - * You should instead use `RegisterFlagCompletionFunc()`. -* The following flag completion annotations are not supported and will be ignored for `fish`: - * `BashCompFilenameExt` (filtering by file extension) - * `BashCompSubdirsInDir` (filtering by directory) -* The functions corresponding to the above annotations are consequently not supported and will be ignored for `fish`: - * `MarkFlagFilename()` and `MarkPersistentFlagFilename()` (filtering by file extension) - * `MarkFlagDirname()` and `MarkPersistentFlagDirname()` (filtering by directory) -* Similarly, the following completion directives are not supported and will be ignored for `fish`: - * `ShellCompDirectiveFilterFileExt` (filtering by file extension) - * `ShellCompDirectiveFilterDirs` (filtering by directory) - -## PowerShell completions - -Cobra supports native PowerShell completions generated from the root `cobra.Command`. You can use the `command.GenPowerShellCompletion()` or `command.GenPowerShellCompletionFile()` functions. To include descriptions use `command.GenPowerShellCompletionWithDesc()` and `command.GenPowerShellCompletionFileWithDesc()`. Cobra will provide the description automatically based on usage information. You can choose to make this option configurable by your users. - -The script is designed to support all three PowerShell completion modes: - -* TabCompleteNext (default windows style - on each key press the next option is displayed) -* Complete (works like bash) -* MenuComplete (works like zsh) - -You set the mode with `Set-PSReadLineKeyHandler -Key Tab -Function `. Descriptions are only displayed when using the `Complete` or `MenuComplete` mode. - -Users need PowerShell version 5.0 or above, which comes with Windows 10 and can be downloaded separately for Windows 7 or 8.1. They can then write the completions to a file and source this file from their PowerShell profile, which is referenced by the `$Profile` environment variable. See `Get-Help about_Profiles` for more info about PowerShell profiles. - -``` -# With descriptions and Mode 'Complete' -$ helm s[tab] -search (search for a keyword in charts) show (show information of a chart) status (displays the status of the named release) - -# With descriptions and Mode 'MenuComplete' The description of the current selected value will be displayed below the suggestions. -$ helm s[tab] -search show status - -search for a keyword in charts - -# Without descriptions -$ helm s[tab] -search show status -``` - -### Limitations - -* Custom completions implemented in bash scripting (legacy) are not supported and will be ignored for `powershell` (including the use of the `BashCompCustom` flag annotation). - * You should instead use `ValidArgsFunction` and `RegisterFlagCompletionFunc()` which are portable to the different shells (`bash`, `zsh`, `fish`, `powershell`). -* The function `MarkFlagCustom()` is not supported and will be ignored for `powershell`. - * You should instead use `RegisterFlagCompletionFunc()`. -* The following flag completion annotations are not supported and will be ignored for `powershell`: - * `BashCompFilenameExt` (filtering by file extension) - * `BashCompSubdirsInDir` (filtering by directory) -* The functions corresponding to the above annotations are consequently not supported and will be ignored for `powershell`: - * `MarkFlagFilename()` and `MarkPersistentFlagFilename()` (filtering by file extension) - * `MarkFlagDirname()` and `MarkPersistentFlagDirname()` (filtering by directory) -* Similarly, the following completion directives are not supported and will be ignored for `powershell`: - * `ShellCompDirectiveFilterFileExt` (filtering by file extension) - * `ShellCompDirectiveFilterDirs` (filtering by directory) diff --git a/src/vendor/github.com/spf13/cobra/user_guide.md b/src/vendor/github.com/spf13/cobra/user_guide.md deleted file mode 100644 index 5a7acf88e..000000000 --- a/src/vendor/github.com/spf13/cobra/user_guide.md +++ /dev/null @@ -1,666 +0,0 @@ -# User Guide - -While you are welcome to provide your own organization, typically a Cobra-based -application will follow the following organizational structure: - -``` - ▾ appName/ - ▾ cmd/ - add.go - your.go - commands.go - here.go - main.go -``` - -In a Cobra app, typically the main.go file is very bare. It serves one purpose: initializing Cobra. - -```go -package main - -import ( - "{pathToYourApp}/cmd" -) - -func main() { - cmd.Execute() -} -``` - -## Using the Cobra Generator - -Cobra-CLI is its own program that will create your application and add any -commands you want. It's the easiest way to incorporate Cobra into your application. - -For complete details on using the Cobra generator, please refer to [The Cobra-CLI Generator README](https://github.com/spf13/cobra-cli/blob/main/README.md) - -## Using the Cobra Library - -To manually implement Cobra you need to create a bare main.go file and a rootCmd file. -You will optionally provide additional commands as you see fit. - -### Create rootCmd - -Cobra doesn't require any special constructors. Simply create your commands. - -Ideally you place this in app/cmd/root.go: - -```go -var rootCmd = &cobra.Command{ - Use: "hugo", - Short: "Hugo is a very fast static site generator", - Long: `A Fast and Flexible Static Site Generator built with - love by spf13 and friends in Go. - Complete documentation is available at https://gohugo.io/documentation/`, - Run: func(cmd *cobra.Command, args []string) { - // Do Stuff Here - }, -} - -func Execute() { - if err := rootCmd.Execute(); err != nil { - fmt.Fprintln(os.Stderr, err) - os.Exit(1) - } -} -``` - -You will additionally define flags and handle configuration in your init() function. - -For example cmd/root.go: - -```go -package cmd - -import ( - "fmt" - "os" - - "github.com/spf13/cobra" - "github.com/spf13/viper" -) - -var ( - // Used for flags. - cfgFile string - userLicense string - - rootCmd = &cobra.Command{ - Use: "cobra-cli", - Short: "A generator for Cobra based Applications", - Long: `Cobra is a CLI library for Go that empowers applications. -This application is a tool to generate the needed files -to quickly create a Cobra application.`, - } -) - -// Execute executes the root command. -func Execute() error { - return rootCmd.Execute() -} - -func init() { - cobra.OnInitialize(initConfig) - - rootCmd.PersistentFlags().StringVar(&cfgFile, "config", "", "config file (default is $HOME/.cobra.yaml)") - rootCmd.PersistentFlags().StringP("author", "a", "YOUR NAME", "author name for copyright attribution") - rootCmd.PersistentFlags().StringVarP(&userLicense, "license", "l", "", "name of license for the project") - rootCmd.PersistentFlags().Bool("viper", true, "use Viper for configuration") - viper.BindPFlag("author", rootCmd.PersistentFlags().Lookup("author")) - viper.BindPFlag("useViper", rootCmd.PersistentFlags().Lookup("viper")) - viper.SetDefault("author", "NAME HERE ") - viper.SetDefault("license", "apache") - - rootCmd.AddCommand(addCmd) - rootCmd.AddCommand(initCmd) -} - -func initConfig() { - if cfgFile != "" { - // Use config file from the flag. - viper.SetConfigFile(cfgFile) - } else { - // Find home directory. - home, err := os.UserHomeDir() - cobra.CheckErr(err) - - // Search config in home directory with name ".cobra" (without extension). - viper.AddConfigPath(home) - viper.SetConfigType("yaml") - viper.SetConfigName(".cobra") - } - - viper.AutomaticEnv() - - if err := viper.ReadInConfig(); err == nil { - fmt.Println("Using config file:", viper.ConfigFileUsed()) - } -} -``` - -### Create your main.go - -With the root command you need to have your main function execute it. -Execute should be run on the root for clarity, though it can be called on any command. - -In a Cobra app, typically the main.go file is very bare. It serves one purpose: to initialize Cobra. - -```go -package main - -import ( - "{pathToYourApp}/cmd" -) - -func main() { - cmd.Execute() -} -``` - -### Create additional commands - -Additional commands can be defined and typically are each given their own file -inside of the cmd/ directory. - -If you wanted to create a version command you would create cmd/version.go and -populate it with the following: - -```go -package cmd - -import ( - "fmt" - - "github.com/spf13/cobra" -) - -func init() { - rootCmd.AddCommand(versionCmd) -} - -var versionCmd = &cobra.Command{ - Use: "version", - Short: "Print the version number of Hugo", - Long: `All software has versions. This is Hugo's`, - Run: func(cmd *cobra.Command, args []string) { - fmt.Println("Hugo Static Site Generator v0.9 -- HEAD") - }, -} -``` - -### Returning and handling errors - -If you wish to return an error to the caller of a command, `RunE` can be used. - -```go -package cmd - -import ( - "fmt" - - "github.com/spf13/cobra" -) - -func init() { - rootCmd.AddCommand(tryCmd) -} - -var tryCmd = &cobra.Command{ - Use: "try", - Short: "Try and possibly fail at something", - RunE: func(cmd *cobra.Command, args []string) error { - if err := someFunc(); err != nil { - return err - } - return nil - }, -} -``` - -The error can then be caught at the execute function call. - -## Working with Flags - -Flags provide modifiers to control how the action command operates. - -### Assign flags to a command - -Since the flags are defined and used in different locations, we need to -define a variable outside with the correct scope to assign the flag to -work with. - -```go -var Verbose bool -var Source string -``` - -There are two different approaches to assign a flag. - -### Persistent Flags - -A flag can be 'persistent', meaning that this flag will be available to the -command it's assigned to as well as every command under that command. For -global flags, assign a flag as a persistent flag on the root. - -```go -rootCmd.PersistentFlags().BoolVarP(&Verbose, "verbose", "v", false, "verbose output") -``` - -### Local Flags - -A flag can also be assigned locally, which will only apply to that specific command. - -```go -localCmd.Flags().StringVarP(&Source, "source", "s", "", "Source directory to read from") -``` - -### Local Flag on Parent Commands - -By default, Cobra only parses local flags on the target command, and any local flags on -parent commands are ignored. By enabling `Command.TraverseChildren`, Cobra will -parse local flags on each command before executing the target command. - -```go -command := cobra.Command{ - Use: "print [OPTIONS] [COMMANDS]", - TraverseChildren: true, -} -``` - -### Bind Flags with Config - -You can also bind your flags with [viper](https://github.com/spf13/viper): -```go -var author string - -func init() { - rootCmd.PersistentFlags().StringVar(&author, "author", "YOUR NAME", "Author name for copyright attribution") - viper.BindPFlag("author", rootCmd.PersistentFlags().Lookup("author")) -} -``` - -In this example, the persistent flag `author` is bound with `viper`. -**Note**: the variable `author` will not be set to the value from config, -when the `--author` flag is provided by user. - -More in [viper documentation](https://github.com/spf13/viper#working-with-flags). - -### Required flags - -Flags are optional by default. If instead you wish your command to report an error -when a flag has not been set, mark it as required: -```go -rootCmd.Flags().StringVarP(&Region, "region", "r", "", "AWS region (required)") -rootCmd.MarkFlagRequired("region") -``` - -Or, for persistent flags: -```go -rootCmd.PersistentFlags().StringVarP(&Region, "region", "r", "", "AWS region (required)") -rootCmd.MarkPersistentFlagRequired("region") -``` - -### Flag Groups - -If you have different flags that must be provided together (e.g. if they provide the `--username` flag they MUST provide the `--password` flag as well) then -Cobra can enforce that requirement: -```go -rootCmd.Flags().StringVarP(&u, "username", "u", "", "Username (required if password is set)") -rootCmd.Flags().StringVarP(&pw, "password", "p", "", "Password (required if username is set)") -rootCmd.MarkFlagsRequiredTogether("username", "password") -``` - -You can also prevent different flags from being provided together if they represent mutually -exclusive options such as specifying an output format as either `--json` or `--yaml` but never both: -```go -rootCmd.Flags().BoolVar(&u, "json", false, "Output in JSON") -rootCmd.Flags().BoolVar(&pw, "yaml", false, "Output in YAML") -rootCmd.MarkFlagsMutuallyExclusive("json", "yaml") -``` - -In both of these cases: - - both local and persistent flags can be used - - **NOTE:** the group is only enforced on commands where every flag is defined - - a flag may appear in multiple groups - - a group may contain any number of flags - -## Positional and Custom Arguments - -Validation of positional arguments can be specified using the `Args` field of `Command`. -If `Args` is undefined or `nil`, it defaults to `ArbitraryArgs`. - -The following validators are built in: - -- `NoArgs` - the command will report an error if there are any positional args. -- `ArbitraryArgs` - the command will accept any args. -- `OnlyValidArgs` - the command will report an error if there are any positional args that are not in the `ValidArgs` field of `Command`. -- `MinimumNArgs(int)` - the command will report an error if there are not at least N positional args. -- `MaximumNArgs(int)` - the command will report an error if there are more than N positional args. -- `ExactArgs(int)` - the command will report an error if there are not exactly N positional args. -- `ExactValidArgs(int)` - the command will report an error if there are not exactly N positional args OR if there are any positional args that are not in the `ValidArgs` field of `Command` -- `RangeArgs(min, max)` - the command will report an error if the number of args is not between the minimum and maximum number of expected args. -- `MatchAll(pargs ...PositionalArgs)` - enables combining existing checks with arbitrary other checks (e.g. you want to check the ExactArgs length along with other qualities). - -An example of setting the custom validator: - -```go -var cmd = &cobra.Command{ - Short: "hello", - Args: func(cmd *cobra.Command, args []string) error { - if len(args) < 1 { - return errors.New("requires a color argument") - } - if myapp.IsValidColor(args[0]) { - return nil - } - return fmt.Errorf("invalid color specified: %s", args[0]) - }, - Run: func(cmd *cobra.Command, args []string) { - fmt.Println("Hello, World!") - }, -} -``` - -## Example - -In the example below, we have defined three commands. Two are at the top level -and one (cmdTimes) is a child of one of the top commands. In this case the root -is not executable, meaning that a subcommand is required. This is accomplished -by not providing a 'Run' for the 'rootCmd'. - -We have only defined one flag for a single command. - -More documentation about flags is available at https://github.com/spf13/pflag - -```go -package main - -import ( - "fmt" - "strings" - - "github.com/spf13/cobra" -) - -func main() { - var echoTimes int - - var cmdPrint = &cobra.Command{ - Use: "print [string to print]", - Short: "Print anything to the screen", - Long: `print is for printing anything back to the screen. -For many years people have printed back to the screen.`, - Args: cobra.MinimumNArgs(1), - Run: func(cmd *cobra.Command, args []string) { - fmt.Println("Print: " + strings.Join(args, " ")) - }, - } - - var cmdEcho = &cobra.Command{ - Use: "echo [string to echo]", - Short: "Echo anything to the screen", - Long: `echo is for echoing anything back. -Echo works a lot like print, except it has a child command.`, - Args: cobra.MinimumNArgs(1), - Run: func(cmd *cobra.Command, args []string) { - fmt.Println("Echo: " + strings.Join(args, " ")) - }, - } - - var cmdTimes = &cobra.Command{ - Use: "times [string to echo]", - Short: "Echo anything to the screen more times", - Long: `echo things multiple times back to the user by providing -a count and a string.`, - Args: cobra.MinimumNArgs(1), - Run: func(cmd *cobra.Command, args []string) { - for i := 0; i < echoTimes; i++ { - fmt.Println("Echo: " + strings.Join(args, " ")) - } - }, - } - - cmdTimes.Flags().IntVarP(&echoTimes, "times", "t", 1, "times to echo the input") - - var rootCmd = &cobra.Command{Use: "app"} - rootCmd.AddCommand(cmdPrint, cmdEcho) - cmdEcho.AddCommand(cmdTimes) - rootCmd.Execute() -} -``` - -For a more complete example of a larger application, please checkout [Hugo](https://gohugo.io/). - -## Help Command - -Cobra automatically adds a help command to your application when you have subcommands. -This will be called when a user runs 'app help'. Additionally, help will also -support all other commands as input. Say, for instance, you have a command called -'create' without any additional configuration; Cobra will work when 'app help -create' is called. Every command will automatically have the '--help' flag added. - -### Example - -The following output is automatically generated by Cobra. Nothing beyond the -command and flag definitions are needed. - - $ cobra help - - Cobra is a CLI library for Go that empowers applications. - This application is a tool to generate the needed files - to quickly create a Cobra application. - - Usage: - cobra [command] - - Available Commands: - add Add a command to a Cobra Application - help Help about any command - init Initialize a Cobra Application - - Flags: - -a, --author string author name for copyright attribution (default "YOUR NAME") - --config string config file (default is $HOME/.cobra.yaml) - -h, --help help for cobra - -l, --license string name of license for the project - --viper use Viper for configuration (default true) - - Use "cobra [command] --help" for more information about a command. - - -Help is just a command like any other. There is no special logic or behavior -around it. In fact, you can provide your own if you want. - -### Defining your own help - -You can provide your own Help command or your own template for the default command to use -with following functions: - -```go -cmd.SetHelpCommand(cmd *Command) -cmd.SetHelpFunc(f func(*Command, []string)) -cmd.SetHelpTemplate(s string) -``` - -The latter two will also apply to any children commands. - -## Usage Message - -When the user provides an invalid flag or invalid command, Cobra responds by -showing the user the 'usage'. - -### Example -You may recognize this from the help above. That's because the default help -embeds the usage as part of its output. - - $ cobra --invalid - Error: unknown flag: --invalid - Usage: - cobra [command] - - Available Commands: - add Add a command to a Cobra Application - help Help about any command - init Initialize a Cobra Application - - Flags: - -a, --author string author name for copyright attribution (default "YOUR NAME") - --config string config file (default is $HOME/.cobra.yaml) - -h, --help help for cobra - -l, --license string name of license for the project - --viper use Viper for configuration (default true) - - Use "cobra [command] --help" for more information about a command. - -### Defining your own usage -You can provide your own usage function or template for Cobra to use. -Like help, the function and template are overridable through public methods: - -```go -cmd.SetUsageFunc(f func(*Command) error) -cmd.SetUsageTemplate(s string) -``` - -## Version Flag - -Cobra adds a top-level '--version' flag if the Version field is set on the root command. -Running an application with the '--version' flag will print the version to stdout using -the version template. The template can be customized using the -`cmd.SetVersionTemplate(s string)` function. - -## PreRun and PostRun Hooks - -It is possible to run functions before or after the main `Run` function of your command. The `PersistentPreRun` and `PreRun` functions will be executed before `Run`. `PersistentPostRun` and `PostRun` will be executed after `Run`. The `Persistent*Run` functions will be inherited by children if they do not declare their own. These functions are run in the following order: - -- `PersistentPreRun` -- `PreRun` -- `Run` -- `PostRun` -- `PersistentPostRun` - -An example of two commands which use all of these features is below. When the subcommand is executed, it will run the root command's `PersistentPreRun` but not the root command's `PersistentPostRun`: - -```go -package main - -import ( - "fmt" - - "github.com/spf13/cobra" -) - -func main() { - - var rootCmd = &cobra.Command{ - Use: "root [sub]", - Short: "My root command", - PersistentPreRun: func(cmd *cobra.Command, args []string) { - fmt.Printf("Inside rootCmd PersistentPreRun with args: %v\n", args) - }, - PreRun: func(cmd *cobra.Command, args []string) { - fmt.Printf("Inside rootCmd PreRun with args: %v\n", args) - }, - Run: func(cmd *cobra.Command, args []string) { - fmt.Printf("Inside rootCmd Run with args: %v\n", args) - }, - PostRun: func(cmd *cobra.Command, args []string) { - fmt.Printf("Inside rootCmd PostRun with args: %v\n", args) - }, - PersistentPostRun: func(cmd *cobra.Command, args []string) { - fmt.Printf("Inside rootCmd PersistentPostRun with args: %v\n", args) - }, - } - - var subCmd = &cobra.Command{ - Use: "sub [no options!]", - Short: "My subcommand", - PreRun: func(cmd *cobra.Command, args []string) { - fmt.Printf("Inside subCmd PreRun with args: %v\n", args) - }, - Run: func(cmd *cobra.Command, args []string) { - fmt.Printf("Inside subCmd Run with args: %v\n", args) - }, - PostRun: func(cmd *cobra.Command, args []string) { - fmt.Printf("Inside subCmd PostRun with args: %v\n", args) - }, - PersistentPostRun: func(cmd *cobra.Command, args []string) { - fmt.Printf("Inside subCmd PersistentPostRun with args: %v\n", args) - }, - } - - rootCmd.AddCommand(subCmd) - - rootCmd.SetArgs([]string{""}) - rootCmd.Execute() - fmt.Println() - rootCmd.SetArgs([]string{"sub", "arg1", "arg2"}) - rootCmd.Execute() -} -``` - -Output: -``` -Inside rootCmd PersistentPreRun with args: [] -Inside rootCmd PreRun with args: [] -Inside rootCmd Run with args: [] -Inside rootCmd PostRun with args: [] -Inside rootCmd PersistentPostRun with args: [] - -Inside rootCmd PersistentPreRun with args: [arg1 arg2] -Inside subCmd PreRun with args: [arg1 arg2] -Inside subCmd Run with args: [arg1 arg2] -Inside subCmd PostRun with args: [arg1 arg2] -Inside subCmd PersistentPostRun with args: [arg1 arg2] -``` - -## Suggestions when "unknown command" happens - -Cobra will print automatic suggestions when "unknown command" errors happen. This allows Cobra to behave similarly to the `git` command when a typo happens. For example: - -``` -$ hugo srever -Error: unknown command "srever" for "hugo" - -Did you mean this? - server - -Run 'hugo --help' for usage. -``` - -Suggestions are automatic based on every subcommand registered and use an implementation of [Levenshtein distance](https://en.wikipedia.org/wiki/Levenshtein_distance). Every registered command that matches a minimum distance of 2 (ignoring case) will be displayed as a suggestion. - -If you need to disable suggestions or tweak the string distance in your command, use: - -```go -command.DisableSuggestions = true -``` - -or - -```go -command.SuggestionsMinimumDistance = 1 -``` - -You can also explicitly set names for which a given command will be suggested using the `SuggestFor` attribute. This allows suggestions for strings that are not close in terms of string distance, but makes sense in your set of commands and for some which you don't want aliases. Example: - -``` -$ kubectl remove -Error: unknown command "remove" for "kubectl" - -Did you mean this? - delete - -Run 'kubectl help' for usage. -``` - -## Generating documentation for your command - -Cobra can generate documentation based on subcommands, flags, etc. Read more about it in the [docs generation documentation](doc/README.md). - -## Generating shell completions - -Cobra can generate a shell-completion file for the following shells: bash, zsh, fish, PowerShell. If you add more information to your commands, these completions can be amazingly powerful and flexible. Read more about it in [Shell Completions](shell_completions.md). - -## Providing Active Help - -Cobra makes use of the shell-completion system to define a framework allowing you to provide Active Help to your users. Active Help are messages (hints, warnings, etc) printed as the program is being used. Read more about it in [Active Help](active_help.md). diff --git a/src/vendor/github.com/spf13/cobra/zsh_completions.go b/src/vendor/github.com/spf13/cobra/zsh_completions.go deleted file mode 100644 index 65cd94c60..000000000 --- a/src/vendor/github.com/spf13/cobra/zsh_completions.go +++ /dev/null @@ -1,287 +0,0 @@ -package cobra - -import ( - "bytes" - "fmt" - "io" - "os" -) - -// GenZshCompletionFile generates zsh completion file including descriptions. -func (c *Command) GenZshCompletionFile(filename string) error { - return c.genZshCompletionFile(filename, true) -} - -// GenZshCompletion generates zsh completion file including descriptions -// and writes it to the passed writer. -func (c *Command) GenZshCompletion(w io.Writer) error { - return c.genZshCompletion(w, true) -} - -// GenZshCompletionFileNoDesc generates zsh completion file without descriptions. -func (c *Command) GenZshCompletionFileNoDesc(filename string) error { - return c.genZshCompletionFile(filename, false) -} - -// GenZshCompletionNoDesc generates zsh completion file without descriptions -// and writes it to the passed writer. -func (c *Command) GenZshCompletionNoDesc(w io.Writer) error { - return c.genZshCompletion(w, false) -} - -// MarkZshCompPositionalArgumentFile only worked for zsh and its behavior was -// not consistent with Bash completion. It has therefore been disabled. -// Instead, when no other completion is specified, file completion is done by -// default for every argument. One can disable file completion on a per-argument -// basis by using ValidArgsFunction and ShellCompDirectiveNoFileComp. -// To achieve file extension filtering, one can use ValidArgsFunction and -// ShellCompDirectiveFilterFileExt. -// -// Deprecated -func (c *Command) MarkZshCompPositionalArgumentFile(argPosition int, patterns ...string) error { - return nil -} - -// MarkZshCompPositionalArgumentWords only worked for zsh. It has therefore -// been disabled. -// To achieve the same behavior across all shells, one can use -// ValidArgs (for the first argument only) or ValidArgsFunction for -// any argument (can include the first one also). -// -// Deprecated -func (c *Command) MarkZshCompPositionalArgumentWords(argPosition int, words ...string) error { - return nil -} - -func (c *Command) genZshCompletionFile(filename string, includeDesc bool) error { - outFile, err := os.Create(filename) - if err != nil { - return err - } - defer outFile.Close() - - return c.genZshCompletion(outFile, includeDesc) -} - -func (c *Command) genZshCompletion(w io.Writer, includeDesc bool) error { - buf := new(bytes.Buffer) - genZshComp(buf, c.Name(), includeDesc) - _, err := buf.WriteTo(w) - return err -} - -func genZshComp(buf io.StringWriter, name string, includeDesc bool) { - compCmd := ShellCompRequestCmd - if !includeDesc { - compCmd = ShellCompNoDescRequestCmd - } - WriteStringAndCheck(buf, fmt.Sprintf(`#compdef %[1]s - -# zsh completion for %-36[1]s -*- shell-script -*- - -__%[1]s_debug() -{ - local file="$BASH_COMP_DEBUG_FILE" - if [[ -n ${file} ]]; then - echo "$*" >> "${file}" - fi -} - -_%[1]s() -{ - local shellCompDirectiveError=%[3]d - local shellCompDirectiveNoSpace=%[4]d - local shellCompDirectiveNoFileComp=%[5]d - local shellCompDirectiveFilterFileExt=%[6]d - local shellCompDirectiveFilterDirs=%[7]d - - local lastParam lastChar flagPrefix requestComp out directive comp lastComp noSpace - local -a completions - - __%[1]s_debug "\n========= starting completion logic ==========" - __%[1]s_debug "CURRENT: ${CURRENT}, words[*]: ${words[*]}" - - # The user could have moved the cursor backwards on the command-line. - # We need to trigger completion from the $CURRENT location, so we need - # to truncate the command-line ($words) up to the $CURRENT location. - # (We cannot use $CURSOR as its value does not work when a command is an alias.) - words=("${=words[1,CURRENT]}") - __%[1]s_debug "Truncated words[*]: ${words[*]}," - - lastParam=${words[-1]} - lastChar=${lastParam[-1]} - __%[1]s_debug "lastParam: ${lastParam}, lastChar: ${lastChar}" - - # For zsh, when completing a flag with an = (e.g., %[1]s -n=) - # completions must be prefixed with the flag - setopt local_options BASH_REMATCH - if [[ "${lastParam}" =~ '-.*=' ]]; then - # We are dealing with a flag with an = - flagPrefix="-P ${BASH_REMATCH}" - fi - - # Prepare the command to obtain completions - requestComp="${words[1]} %[2]s ${words[2,-1]}" - if [ "${lastChar}" = "" ]; then - # If the last parameter is complete (there is a space following it) - # We add an extra empty parameter so we can indicate this to the go completion code. - __%[1]s_debug "Adding extra empty parameter" - requestComp="${requestComp} \"\"" - fi - - __%[1]s_debug "About to call: eval ${requestComp}" - - # Use eval to handle any environment variables and such - out=$(eval ${requestComp} 2>/dev/null) - __%[1]s_debug "completion output: ${out}" - - # Extract the directive integer following a : from the last line - local lastLine - while IFS='\n' read -r line; do - lastLine=${line} - done < <(printf "%%s\n" "${out[@]}") - __%[1]s_debug "last line: ${lastLine}" - - if [ "${lastLine[1]}" = : ]; then - directive=${lastLine[2,-1]} - # Remove the directive including the : and the newline - local suffix - (( suffix=${#lastLine}+2)) - out=${out[1,-$suffix]} - else - # There is no directive specified. Leave $out as is. - __%[1]s_debug "No directive found. Setting do default" - directive=0 - fi - - __%[1]s_debug "directive: ${directive}" - __%[1]s_debug "completions: ${out}" - __%[1]s_debug "flagPrefix: ${flagPrefix}" - - if [ $((directive & shellCompDirectiveError)) -ne 0 ]; then - __%[1]s_debug "Completion received error. Ignoring completions." - return - fi - - local activeHelpMarker="%[8]s" - local endIndex=${#activeHelpMarker} - local startIndex=$((${#activeHelpMarker}+1)) - local hasActiveHelp=0 - while IFS='\n' read -r comp; do - # Check if this is an activeHelp statement (i.e., prefixed with $activeHelpMarker) - if [ "${comp[1,$endIndex]}" = "$activeHelpMarker" ];then - __%[1]s_debug "ActiveHelp found: $comp" - comp="${comp[$startIndex,-1]}" - if [ -n "$comp" ]; then - compadd -x "${comp}" - __%[1]s_debug "ActiveHelp will need delimiter" - hasActiveHelp=1 - fi - - continue - fi - - if [ -n "$comp" ]; then - # If requested, completions are returned with a description. - # The description is preceded by a TAB character. - # For zsh's _describe, we need to use a : instead of a TAB. - # We first need to escape any : as part of the completion itself. - comp=${comp//:/\\:} - - local tab="$(printf '\t')" - comp=${comp//$tab/:} - - __%[1]s_debug "Adding completion: ${comp}" - completions+=${comp} - lastComp=$comp - fi - done < <(printf "%%s\n" "${out[@]}") - - # Add a delimiter after the activeHelp statements, but only if: - # - there are completions following the activeHelp statements, or - # - file completion will be performed (so there will be choices after the activeHelp) - if [ $hasActiveHelp -eq 1 ]; then - if [ ${#completions} -ne 0 ] || [ $((directive & shellCompDirectiveNoFileComp)) -eq 0 ]; then - __%[1]s_debug "Adding activeHelp delimiter" - compadd -x "--" - hasActiveHelp=0 - fi - fi - - if [ $((directive & shellCompDirectiveNoSpace)) -ne 0 ]; then - __%[1]s_debug "Activating nospace." - noSpace="-S ''" - fi - - if [ $((directive & shellCompDirectiveFilterFileExt)) -ne 0 ]; then - # File extension filtering - local filteringCmd - filteringCmd='_files' - for filter in ${completions[@]}; do - if [ ${filter[1]} != '*' ]; then - # zsh requires a glob pattern to do file filtering - filter="\*.$filter" - fi - filteringCmd+=" -g $filter" - done - filteringCmd+=" ${flagPrefix}" - - __%[1]s_debug "File filtering command: $filteringCmd" - _arguments '*:filename:'"$filteringCmd" - elif [ $((directive & shellCompDirectiveFilterDirs)) -ne 0 ]; then - # File completion for directories only - local subdir - subdir="${completions[1]}" - if [ -n "$subdir" ]; then - __%[1]s_debug "Listing directories in $subdir" - pushd "${subdir}" >/dev/null 2>&1 - else - __%[1]s_debug "Listing directories in ." - fi - - local result - _arguments '*:dirname:_files -/'" ${flagPrefix}" - result=$? - if [ -n "$subdir" ]; then - popd >/dev/null 2>&1 - fi - return $result - else - __%[1]s_debug "Calling _describe" - if eval _describe "completions" completions $flagPrefix $noSpace; then - __%[1]s_debug "_describe found some completions" - - # Return the success of having called _describe - return 0 - else - __%[1]s_debug "_describe did not find completions." - __%[1]s_debug "Checking if we should do file completion." - if [ $((directive & shellCompDirectiveNoFileComp)) -ne 0 ]; then - __%[1]s_debug "deactivating file completion" - - # We must return an error code here to let zsh know that there were no - # completions found by _describe; this is what will trigger other - # matching algorithms to attempt to find completions. - # For example zsh can match letters in the middle of words. - return 1 - else - # Perform file completion - __%[1]s_debug "Activating file completion" - - # We must return the result of this command, so it must be the - # last command, or else we must store its result to return it. - _arguments '*:filename:_files'" ${flagPrefix}" - fi - fi - fi -} - -# don't run the completion function when being source-ed or eval-ed -if [ "$funcstack[1]" = "_%[1]s" ]; then - _%[1]s -fi -`, name, compCmd, - ShellCompDirectiveError, ShellCompDirectiveNoSpace, ShellCompDirectiveNoFileComp, - ShellCompDirectiveFilterFileExt, ShellCompDirectiveFilterDirs, - activeHelpMarker)) -} diff --git a/src/vendor/github.com/spf13/cobra/zsh_completions.md b/src/vendor/github.com/spf13/cobra/zsh_completions.md deleted file mode 100644 index 7cff61787..000000000 --- a/src/vendor/github.com/spf13/cobra/zsh_completions.md +++ /dev/null @@ -1,48 +0,0 @@ -## Generating Zsh Completion For Your cobra.Command - -Please refer to [Shell Completions](shell_completions.md) for details. - -## Zsh completions standardization - -Cobra 1.1 standardized its zsh completion support to align it with its other shell completions. Although the API was kept backwards-compatible, some small changes in behavior were introduced. - -### Deprecation summary - -See further below for more details on these deprecations. - -* `cmd.MarkZshCompPositionalArgumentFile(pos, []string{})` is no longer needed. It is therefore **deprecated** and silently ignored. -* `cmd.MarkZshCompPositionalArgumentFile(pos, glob[])` is **deprecated** and silently ignored. - * Instead use `ValidArgsFunction` with `ShellCompDirectiveFilterFileExt`. -* `cmd.MarkZshCompPositionalArgumentWords()` is **deprecated** and silently ignored. - * Instead use `ValidArgsFunction`. - -### Behavioral changes - -**Noun completion** -|Old behavior|New behavior| -|---|---| -|No file completion by default (opposite of bash)|File completion by default; use `ValidArgsFunction` with `ShellCompDirectiveNoFileComp` to turn off file completion on a per-argument basis| -|Completion of flag names without the `-` prefix having been typed|Flag names are only completed if the user has typed the first `-`| -`cmd.MarkZshCompPositionalArgumentFile(pos, []string{})` used to turn on file completion on a per-argument position basis|File completion for all arguments by default; `cmd.MarkZshCompPositionalArgumentFile()` is **deprecated** and silently ignored| -|`cmd.MarkZshCompPositionalArgumentFile(pos, glob[])` used to turn on file completion **with glob filtering** on a per-argument position basis (zsh-specific)|`cmd.MarkZshCompPositionalArgumentFile()` is **deprecated** and silently ignored; use `ValidArgsFunction` with `ShellCompDirectiveFilterFileExt` for file **extension** filtering (not full glob filtering)| -|`cmd.MarkZshCompPositionalArgumentWords(pos, words[])` used to provide completion choices on a per-argument position basis (zsh-specific)|`cmd.MarkZshCompPositionalArgumentWords()` is **deprecated** and silently ignored; use `ValidArgsFunction` to achieve the same behavior| - -**Flag-value completion** - -|Old behavior|New behavior| -|---|---| -|No file completion by default (opposite of bash)|File completion by default; use `RegisterFlagCompletionFunc()` with `ShellCompDirectiveNoFileComp` to turn off file completion| -|`cmd.MarkFlagFilename(flag, []string{})` and similar used to turn on file completion|File completion by default; `cmd.MarkFlagFilename(flag, []string{})` no longer needed in this context and silently ignored| -|`cmd.MarkFlagFilename(flag, glob[])` used to turn on file completion **with glob filtering** (syntax of `[]string{"*.yaml", "*.yml"}` incompatible with bash)|Will continue to work, however, support for bash syntax is added and should be used instead so as to work for all shells (`[]string{"yaml", "yml"}`)| -|`cmd.MarkFlagDirname(flag)` only completes directories (zsh-specific)|Has been added for all shells| -|Completion of a flag name does not repeat, unless flag is of type `*Array` or `*Slice` (not supported by bash)|Retained for `zsh` and added to `fish`| -|Completion of a flag name does not provide the `=` form (unlike bash)|Retained for `zsh` and added to `fish`| - -**Improvements** - -* Custom completion support (`ValidArgsFunction` and `RegisterFlagCompletionFunc()`) -* File completion by default if no other completions found -* Handling of required flags -* File extension filtering no longer mutually exclusive with bash usage -* Completion of directory names *within* another directory -* Support for `=` form of flags diff --git a/src/vendor/github.com/xeipuuv/gojsonpointer/LICENSE-APACHE-2.0.txt b/src/vendor/github.com/xeipuuv/gojsonpointer/LICENSE-APACHE-2.0.txt deleted file mode 100644 index 55ede8a42..000000000 --- a/src/vendor/github.com/xeipuuv/gojsonpointer/LICENSE-APACHE-2.0.txt +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright 2015 xeipuuv - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/src/vendor/github.com/xeipuuv/gojsonpointer/README.md b/src/vendor/github.com/xeipuuv/gojsonpointer/README.md deleted file mode 100644 index 00059242c..000000000 --- a/src/vendor/github.com/xeipuuv/gojsonpointer/README.md +++ /dev/null @@ -1,41 +0,0 @@ -# gojsonpointer -An implementation of JSON Pointer - Go language - -## Usage - jsonText := `{ - "name": "Bobby B", - "occupation": { - "title" : "King", - "years" : 15, - "heir" : "Joffrey B" - } - }` - - var jsonDocument map[string]interface{} - json.Unmarshal([]byte(jsonText), &jsonDocument) - - //create a JSON pointer - pointerString := "/occupation/title" - pointer, _ := NewJsonPointer(pointerString) - - //SET a new value for the "title" in the document - pointer.Set(jsonDocument, "Supreme Leader of Westeros") - - //GET the new "title" from the document - title, _, _ := pointer.Get(jsonDocument) - fmt.Println(title) //outputs "Supreme Leader of Westeros" - - //DELETE the "heir" from the document - deletePointer := NewJsonPointer("/occupation/heir") - deletePointer.Delete(jsonDocument) - - b, _ := json.Marshal(jsonDocument) - fmt.Println(string(b)) - //outputs `{"name":"Bobby B","occupation":{"title":"Supreme Leader of Westeros","years":15}}` - - -## References -http://tools.ietf.org/html/draft-ietf-appsawg-json-pointer-07 - -### Note -The 4.Evaluation part of the previous reference, starting with 'If the currently referenced value is a JSON array, the reference token MUST contain either...' is not implemented. diff --git a/src/vendor/github.com/xeipuuv/gojsonpointer/pointer.go b/src/vendor/github.com/xeipuuv/gojsonpointer/pointer.go deleted file mode 100644 index 7faf5d7f9..000000000 --- a/src/vendor/github.com/xeipuuv/gojsonpointer/pointer.go +++ /dev/null @@ -1,211 +0,0 @@ -// Copyright 2015 xeipuuv ( https://github.com/xeipuuv ) -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// author xeipuuv -// author-github https://github.com/xeipuuv -// author-mail xeipuuv@gmail.com -// -// repository-name gojsonpointer -// repository-desc An implementation of JSON Pointer - Go language -// -// description Main and unique file. -// -// created 25-02-2013 - -package gojsonpointer - -import ( - "errors" - "fmt" - "reflect" - "strconv" - "strings" -) - -const ( - const_empty_pointer = `` - const_pointer_separator = `/` - - const_invalid_start = `JSON pointer must be empty or start with a "` + const_pointer_separator + `"` -) - -type implStruct struct { - mode string // "SET" or "GET" - - inDocument interface{} - - setInValue interface{} - - getOutNode interface{} - getOutKind reflect.Kind - outError error -} - -type JsonPointer struct { - referenceTokens []string -} - -// NewJsonPointer parses the given string JSON pointer and returns an object -func NewJsonPointer(jsonPointerString string) (p JsonPointer, err error) { - - // Pointer to the root of the document - if len(jsonPointerString) == 0 { - // Keep referenceTokens nil - return - } - if jsonPointerString[0] != '/' { - return p, errors.New(const_invalid_start) - } - - p.referenceTokens = strings.Split(jsonPointerString[1:], const_pointer_separator) - return -} - -// Uses the pointer to retrieve a value from a JSON document -func (p *JsonPointer) Get(document interface{}) (interface{}, reflect.Kind, error) { - - is := &implStruct{mode: "GET", inDocument: document} - p.implementation(is) - return is.getOutNode, is.getOutKind, is.outError - -} - -// Uses the pointer to update a value from a JSON document -func (p *JsonPointer) Set(document interface{}, value interface{}) (interface{}, error) { - - is := &implStruct{mode: "SET", inDocument: document, setInValue: value} - p.implementation(is) - return document, is.outError - -} - -// Uses the pointer to delete a value from a JSON document -func (p *JsonPointer) Delete(document interface{}) (interface{}, error) { - is := &implStruct{mode: "DEL", inDocument: document} - p.implementation(is) - return document, is.outError -} - -// Both Get and Set functions use the same implementation to avoid code duplication -func (p *JsonPointer) implementation(i *implStruct) { - - kind := reflect.Invalid - - // Full document when empty - if len(p.referenceTokens) == 0 { - i.getOutNode = i.inDocument - i.outError = nil - i.getOutKind = kind - i.outError = nil - return - } - - node := i.inDocument - - previousNodes := make([]interface{}, len(p.referenceTokens)) - previousTokens := make([]string, len(p.referenceTokens)) - - for ti, token := range p.referenceTokens { - - isLastToken := ti == len(p.referenceTokens)-1 - previousNodes[ti] = node - previousTokens[ti] = token - - switch v := node.(type) { - - case map[string]interface{}: - decodedToken := decodeReferenceToken(token) - if _, ok := v[decodedToken]; ok { - node = v[decodedToken] - if isLastToken && i.mode == "SET" { - v[decodedToken] = i.setInValue - } else if isLastToken && i.mode =="DEL" { - delete(v,decodedToken) - } - } else if (isLastToken && i.mode == "SET") { - v[decodedToken] = i.setInValue - } else { - i.outError = fmt.Errorf("Object has no key '%s'", decodedToken) - i.getOutKind = reflect.Map - i.getOutNode = nil - return - } - - case []interface{}: - tokenIndex, err := strconv.Atoi(token) - if err != nil { - i.outError = fmt.Errorf("Invalid array index '%s'", token) - i.getOutKind = reflect.Slice - i.getOutNode = nil - return - } - if tokenIndex < 0 || tokenIndex >= len(v) { - i.outError = fmt.Errorf("Out of bound array[0,%d] index '%d'", len(v), tokenIndex) - i.getOutKind = reflect.Slice - i.getOutNode = nil - return - } - - node = v[tokenIndex] - if isLastToken && i.mode == "SET" { - v[tokenIndex] = i.setInValue - } else if isLastToken && i.mode =="DEL" { - v[tokenIndex] = v[len(v)-1] - v[len(v)-1] = nil - v = v[:len(v)-1] - previousNodes[ti-1].(map[string]interface{})[previousTokens[ti-1]] = v - } - - default: - i.outError = fmt.Errorf("Invalid token reference '%s'", token) - i.getOutKind = reflect.ValueOf(node).Kind() - i.getOutNode = nil - return - } - - } - - i.getOutNode = node - i.getOutKind = reflect.ValueOf(node).Kind() - i.outError = nil -} - -// Pointer to string representation function -func (p *JsonPointer) String() string { - - if len(p.referenceTokens) == 0 { - return const_empty_pointer - } - - pointerString := const_pointer_separator + strings.Join(p.referenceTokens, const_pointer_separator) - - return pointerString -} - -// Specific JSON pointer encoding here -// ~0 => ~ -// ~1 => / -// ... and vice versa - -func decodeReferenceToken(token string) string { - step1 := strings.Replace(token, `~1`, `/`, -1) - step2 := strings.Replace(step1, `~0`, `~`, -1) - return step2 -} - -func encodeReferenceToken(token string) string { - step1 := strings.Replace(token, `~`, `~0`, -1) - step2 := strings.Replace(step1, `/`, `~1`, -1) - return step2 -} diff --git a/src/vendor/github.com/xeipuuv/gojsonreference/LICENSE-APACHE-2.0.txt b/src/vendor/github.com/xeipuuv/gojsonreference/LICENSE-APACHE-2.0.txt deleted file mode 100644 index 55ede8a42..000000000 --- a/src/vendor/github.com/xeipuuv/gojsonreference/LICENSE-APACHE-2.0.txt +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright 2015 xeipuuv - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/src/vendor/github.com/xeipuuv/gojsonreference/README.md b/src/vendor/github.com/xeipuuv/gojsonreference/README.md deleted file mode 100644 index 9ab6e1eb1..000000000 --- a/src/vendor/github.com/xeipuuv/gojsonreference/README.md +++ /dev/null @@ -1,10 +0,0 @@ -# gojsonreference -An implementation of JSON Reference - Go language - -## Dependencies -https://github.com/xeipuuv/gojsonpointer - -## References -http://tools.ietf.org/html/draft-ietf-appsawg-json-pointer-07 - -http://tools.ietf.org/html/draft-pbryan-zyp-json-ref-03 diff --git a/src/vendor/github.com/xeipuuv/gojsonreference/reference.go b/src/vendor/github.com/xeipuuv/gojsonreference/reference.go deleted file mode 100644 index 645729130..000000000 --- a/src/vendor/github.com/xeipuuv/gojsonreference/reference.go +++ /dev/null @@ -1,147 +0,0 @@ -// Copyright 2015 xeipuuv ( https://github.com/xeipuuv ) -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// author xeipuuv -// author-github https://github.com/xeipuuv -// author-mail xeipuuv@gmail.com -// -// repository-name gojsonreference -// repository-desc An implementation of JSON Reference - Go language -// -// description Main and unique file. -// -// created 26-02-2013 - -package gojsonreference - -import ( - "errors" - "net/url" - "path/filepath" - "runtime" - "strings" - - "github.com/xeipuuv/gojsonpointer" -) - -const ( - const_fragment_char = `#` -) - -func NewJsonReference(jsonReferenceString string) (JsonReference, error) { - - var r JsonReference - err := r.parse(jsonReferenceString) - return r, err - -} - -type JsonReference struct { - referenceUrl *url.URL - referencePointer gojsonpointer.JsonPointer - - HasFullUrl bool - HasUrlPathOnly bool - HasFragmentOnly bool - HasFileScheme bool - HasFullFilePath bool -} - -func (r *JsonReference) GetUrl() *url.URL { - return r.referenceUrl -} - -func (r *JsonReference) GetPointer() *gojsonpointer.JsonPointer { - return &r.referencePointer -} - -func (r *JsonReference) String() string { - - if r.referenceUrl != nil { - return r.referenceUrl.String() - } - - if r.HasFragmentOnly { - return const_fragment_char + r.referencePointer.String() - } - - return r.referencePointer.String() -} - -func (r *JsonReference) IsCanonical() bool { - return (r.HasFileScheme && r.HasFullFilePath) || (!r.HasFileScheme && r.HasFullUrl) -} - -// "Constructor", parses the given string JSON reference -func (r *JsonReference) parse(jsonReferenceString string) (err error) { - - r.referenceUrl, err = url.Parse(jsonReferenceString) - if err != nil { - return - } - refUrl := r.referenceUrl - - if refUrl.Scheme != "" && refUrl.Host != "" { - r.HasFullUrl = true - } else { - if refUrl.Path != "" { - r.HasUrlPathOnly = true - } else if refUrl.RawQuery == "" && refUrl.Fragment != "" { - r.HasFragmentOnly = true - } - } - - r.HasFileScheme = refUrl.Scheme == "file" - if runtime.GOOS == "windows" { - // on Windows, a file URL may have an extra leading slash, and if it - // doesn't then its first component will be treated as the host by the - // Go runtime - if refUrl.Host == "" && strings.HasPrefix(refUrl.Path, "/") { - r.HasFullFilePath = filepath.IsAbs(refUrl.Path[1:]) - } else { - r.HasFullFilePath = filepath.IsAbs(refUrl.Host + refUrl.Path) - } - } else { - r.HasFullFilePath = filepath.IsAbs(refUrl.Path) - } - - // invalid json-pointer error means url has no json-pointer fragment. simply ignore error - r.referencePointer, _ = gojsonpointer.NewJsonPointer(refUrl.Fragment) - - return -} - -// Creates a new reference from a parent and a child -// If the child cannot inherit from the parent, an error is returned -func (r *JsonReference) Inherits(child JsonReference) (*JsonReference, error) { - if child.GetUrl() == nil { - return nil, errors.New("childUrl is nil!") - } - - if r.GetUrl() == nil { - return nil, errors.New("parentUrl is nil!") - } - - // Get a copy of the parent url to make sure we do not modify the original. - // URL reference resolving fails if the fragment of the child is empty, but the parent's is not. - // The fragment of the child must be used, so the fragment of the parent is manually removed. - parentUrl := *r.GetUrl() - parentUrl.Fragment = "" - - ref, err := NewJsonReference(parentUrl.ResolveReference(child.GetUrl()).String()) - if err != nil { - return nil, err - } - return &ref, err -} diff --git a/src/vendor/github.com/xeipuuv/gojsonschema/.gitignore b/src/vendor/github.com/xeipuuv/gojsonschema/.gitignore deleted file mode 100644 index 68e993ce3..000000000 --- a/src/vendor/github.com/xeipuuv/gojsonschema/.gitignore +++ /dev/null @@ -1,3 +0,0 @@ -*.sw[nop] -*.iml -.vscode/ diff --git a/src/vendor/github.com/xeipuuv/gojsonschema/.travis.yml b/src/vendor/github.com/xeipuuv/gojsonschema/.travis.yml deleted file mode 100644 index 3289001cd..000000000 --- a/src/vendor/github.com/xeipuuv/gojsonschema/.travis.yml +++ /dev/null @@ -1,9 +0,0 @@ -language: go -go: - - "1.11" - - "1.12" - - "1.13" -before_install: - - go get github.com/xeipuuv/gojsonreference - - go get github.com/xeipuuv/gojsonpointer - - go get github.com/stretchr/testify/assert diff --git a/src/vendor/github.com/xeipuuv/gojsonschema/LICENSE-APACHE-2.0.txt b/src/vendor/github.com/xeipuuv/gojsonschema/LICENSE-APACHE-2.0.txt deleted file mode 100644 index 55ede8a42..000000000 --- a/src/vendor/github.com/xeipuuv/gojsonschema/LICENSE-APACHE-2.0.txt +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright 2015 xeipuuv - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/src/vendor/github.com/xeipuuv/gojsonschema/README.md b/src/vendor/github.com/xeipuuv/gojsonschema/README.md deleted file mode 100644 index 758f26df0..000000000 --- a/src/vendor/github.com/xeipuuv/gojsonschema/README.md +++ /dev/null @@ -1,466 +0,0 @@ -[![GoDoc](https://godoc.org/github.com/xeipuuv/gojsonschema?status.svg)](https://godoc.org/github.com/xeipuuv/gojsonschema) -[![Build Status](https://travis-ci.org/xeipuuv/gojsonschema.svg)](https://travis-ci.org/xeipuuv/gojsonschema) -[![Go Report Card](https://goreportcard.com/badge/github.com/xeipuuv/gojsonschema)](https://goreportcard.com/report/github.com/xeipuuv/gojsonschema) - -# gojsonschema - -## Description - -An implementation of JSON Schema for the Go programming language. Supports draft-04, draft-06 and draft-07. - -References : - -* http://json-schema.org -* http://json-schema.org/latest/json-schema-core.html -* http://json-schema.org/latest/json-schema-validation.html - -## Installation - -``` -go get github.com/xeipuuv/gojsonschema -``` - -Dependencies : -* [github.com/xeipuuv/gojsonpointer](https://github.com/xeipuuv/gojsonpointer) -* [github.com/xeipuuv/gojsonreference](https://github.com/xeipuuv/gojsonreference) -* [github.com/stretchr/testify/assert](https://github.com/stretchr/testify#assert-package) - -## Usage - -### Example - -```go - -package main - -import ( - "fmt" - "github.com/xeipuuv/gojsonschema" -) - -func main() { - - schemaLoader := gojsonschema.NewReferenceLoader("file:///home/me/schema.json") - documentLoader := gojsonschema.NewReferenceLoader("file:///home/me/document.json") - - result, err := gojsonschema.Validate(schemaLoader, documentLoader) - if err != nil { - panic(err.Error()) - } - - if result.Valid() { - fmt.Printf("The document is valid\n") - } else { - fmt.Printf("The document is not valid. see errors :\n") - for _, desc := range result.Errors() { - fmt.Printf("- %s\n", desc) - } - } -} - - -``` - -#### Loaders - -There are various ways to load your JSON data. -In order to load your schemas and documents, -first declare an appropriate loader : - -* Web / HTTP, using a reference : - -```go -loader := gojsonschema.NewReferenceLoader("http://www.some_host.com/schema.json") -``` - -* Local file, using a reference : - -```go -loader := gojsonschema.NewReferenceLoader("file:///home/me/schema.json") -``` - -References use the URI scheme, the prefix (file://) and a full path to the file are required. - -* JSON strings : - -```go -loader := gojsonschema.NewStringLoader(`{"type": "string"}`) -``` - -* Custom Go types : - -```go -m := map[string]interface{}{"type": "string"} -loader := gojsonschema.NewGoLoader(m) -``` - -And - -```go -type Root struct { - Users []User `json:"users"` -} - -type User struct { - Name string `json:"name"` -} - -... - -data := Root{} -data.Users = append(data.Users, User{"John"}) -data.Users = append(data.Users, User{"Sophia"}) -data.Users = append(data.Users, User{"Bill"}) - -loader := gojsonschema.NewGoLoader(data) -``` - -#### Validation - -Once the loaders are set, validation is easy : - -```go -result, err := gojsonschema.Validate(schemaLoader, documentLoader) -``` - -Alternatively, you might want to load a schema only once and process to multiple validations : - -```go -schema, err := gojsonschema.NewSchema(schemaLoader) -... -result1, err := schema.Validate(documentLoader1) -... -result2, err := schema.Validate(documentLoader2) -... -// etc ... -``` - -To check the result : - -```go - if result.Valid() { - fmt.Printf("The document is valid\n") - } else { - fmt.Printf("The document is not valid. see errors :\n") - for _, err := range result.Errors() { - // Err implements the ResultError interface - fmt.Printf("- %s\n", err) - } - } -``` - - -## Loading local schemas - -By default `file` and `http(s)` references to external schemas are loaded automatically via the file system or via http(s). An external schema can also be loaded using a `SchemaLoader`. - -```go - sl := gojsonschema.NewSchemaLoader() - loader1 := gojsonschema.NewStringLoader(`{ "type" : "string" }`) - err := sl.AddSchema("http://some_host.com/string.json", loader1) -``` - -Alternatively if your schema already has an `$id` you can use the `AddSchemas` function -```go - loader2 := gojsonschema.NewStringLoader(`{ - "$id" : "http://some_host.com/maxlength.json", - "maxLength" : 5 - }`) - err = sl.AddSchemas(loader2) -``` - -The main schema should be passed to the `Compile` function. This main schema can then directly reference the added schemas without needing to download them. -```go - loader3 := gojsonschema.NewStringLoader(`{ - "$id" : "http://some_host.com/main.json", - "allOf" : [ - { "$ref" : "http://some_host.com/string.json" }, - { "$ref" : "http://some_host.com/maxlength.json" } - ] - }`) - - schema, err := sl.Compile(loader3) - - documentLoader := gojsonschema.NewStringLoader(`"hello world"`) - - result, err := schema.Validate(documentLoader) -``` - -It's also possible to pass a `ReferenceLoader` to the `Compile` function that references a loaded schema. - -```go -err = sl.AddSchemas(loader3) -schema, err := sl.Compile(gojsonschema.NewReferenceLoader("http://some_host.com/main.json")) -``` - -Schemas added by `AddSchema` and `AddSchemas` are only validated when the entire schema is compiled, unless meta-schema validation is used. - -## Using a specific draft -By default `gojsonschema` will try to detect the draft of a schema by using the `$schema` keyword and parse it in a strict draft-04, draft-06 or draft-07 mode. If `$schema` is missing, or the draft version is not explicitely set, a hybrid mode is used which merges together functionality of all drafts into one mode. - -Autodectection can be turned off with the `AutoDetect` property. Specific draft versions can be specified with the `Draft` property. - -```go -sl := gojsonschema.NewSchemaLoader() -sl.Draft = gojsonschema.Draft7 -sl.AutoDetect = false -``` - -If autodetection is on (default), a draft-07 schema can savely reference draft-04 schemas and vice-versa, as long as `$schema` is specified in all schemas. - -## Meta-schema validation -Schemas that are added using the `AddSchema`, `AddSchemas` and `Compile` can be validated against their meta-schema by setting the `Validate` property. - -The following example will produce an error as `multipleOf` must be a number. If `Validate` is off (default), this error is only returned at the `Compile` step. - -```go -sl := gojsonschema.NewSchemaLoader() -sl.Validate = true -err := sl.AddSchemas(gojsonschema.NewStringLoader(`{ - $id" : "http://some_host.com/invalid.json", - "$schema": "http://json-schema.org/draft-07/schema#", - "multipleOf" : true -}`)) - ``` -``` - ``` - -Errors returned by meta-schema validation are more readable and contain more information, which helps significantly if you are developing a schema. - -Meta-schema validation also works with a custom `$schema`. In case `$schema` is missing, or `AutoDetect` is set to `false`, the meta-schema of the used draft is used. - - -## Working with Errors - -The library handles string error codes which you can customize by creating your own gojsonschema.locale and setting it -```go -gojsonschema.Locale = YourCustomLocale{} -``` - -However, each error contains additional contextual information. - -Newer versions of `gojsonschema` may have new additional errors, so code that uses a custom locale will need to be updated when this happens. - -**err.Type()**: *string* Returns the "type" of error that occurred. Note you can also type check. See below - -Note: An error of RequiredType has an err.Type() return value of "required" - - "required": RequiredError - "invalid_type": InvalidTypeError - "number_any_of": NumberAnyOfError - "number_one_of": NumberOneOfError - "number_all_of": NumberAllOfError - "number_not": NumberNotError - "missing_dependency": MissingDependencyError - "internal": InternalError - "const": ConstEror - "enum": EnumError - "array_no_additional_items": ArrayNoAdditionalItemsError - "array_min_items": ArrayMinItemsError - "array_max_items": ArrayMaxItemsError - "unique": ItemsMustBeUniqueError - "contains" : ArrayContainsError - "array_min_properties": ArrayMinPropertiesError - "array_max_properties": ArrayMaxPropertiesError - "additional_property_not_allowed": AdditionalPropertyNotAllowedError - "invalid_property_pattern": InvalidPropertyPatternError - "invalid_property_name": InvalidPropertyNameError - "string_gte": StringLengthGTEError - "string_lte": StringLengthLTEError - "pattern": DoesNotMatchPatternError - "multiple_of": MultipleOfError - "number_gte": NumberGTEError - "number_gt": NumberGTError - "number_lte": NumberLTEError - "number_lt": NumberLTError - "condition_then" : ConditionThenError - "condition_else" : ConditionElseError - -**err.Value()**: *interface{}* Returns the value given - -**err.Context()**: *gojsonschema.JsonContext* Returns the context. This has a String() method that will print something like this: (root).firstName - -**err.Field()**: *string* Returns the fieldname in the format firstName, or for embedded properties, person.firstName. This returns the same as the String() method on *err.Context()* but removes the (root). prefix. - -**err.Description()**: *string* The error description. This is based on the locale you are using. See the beginning of this section for overwriting the locale with a custom implementation. - -**err.DescriptionFormat()**: *string* The error description format. This is relevant if you are adding custom validation errors afterwards to the result. - -**err.Details()**: *gojsonschema.ErrorDetails* Returns a map[string]interface{} of additional error details specific to the error. For example, GTE errors will have a "min" value, LTE will have a "max" value. See errors.go for a full description of all the error details. Every error always contains a "field" key that holds the value of *err.Field()* - -Note in most cases, the err.Details() will be used to generate replacement strings in your locales, and not used directly. These strings follow the text/template format i.e. -``` -{{.field}} must be greater than or equal to {{.min}} -``` - -The library allows you to specify custom template functions, should you require more complex error message handling. -```go -gojsonschema.ErrorTemplateFuncs = map[string]interface{}{ - "allcaps": func(s string) string { - return strings.ToUpper(s) - }, -} -``` - -Given the above definition, you can use the custom function `"allcaps"` in your localization templates: -``` -{{allcaps .field}} must be greater than or equal to {{.min}} -``` - -The above error message would then be rendered with the `field` value in capital letters. For example: -``` -"PASSWORD must be greater than or equal to 8" -``` - -Learn more about what types of template functions you can use in `ErrorTemplateFuncs` by referring to Go's [text/template FuncMap](https://golang.org/pkg/text/template/#FuncMap) type. - -## Formats -JSON Schema allows for optional "format" property to validate instances against well-known formats. gojsonschema ships with all of the formats defined in the spec that you can use like this: - -````json -{"type": "string", "format": "email"} -```` - -Not all formats defined in draft-07 are available. Implemented formats are: - -* `date` -* `time` -* `date-time` -* `hostname`. Subdomains that start with a number are also supported, but this means that it doesn't strictly follow [RFC1034](http://tools.ietf.org/html/rfc1034#section-3.5) and has the implication that ipv4 addresses are also recognized as valid hostnames. -* `email`. Go's email parser deviates slightly from [RFC5322](https://tools.ietf.org/html/rfc5322). Includes unicode support. -* `idn-email`. Same caveat as `email`. -* `ipv4` -* `ipv6` -* `uri`. Includes unicode support. -* `uri-reference`. Includes unicode support. -* `iri` -* `iri-reference` -* `uri-template` -* `uuid` -* `regex`. Go uses the [RE2](https://github.com/google/re2/wiki/Syntax) engine and is not [ECMA262](http://www.ecma-international.org/publications/files/ECMA-ST/Ecma-262.pdf) compatible. -* `json-pointer` -* `relative-json-pointer` - -`email`, `uri` and `uri-reference` use the same validation code as their unicode counterparts `idn-email`, `iri` and `iri-reference`. If you rely on unicode support you should use the specific -unicode enabled formats for the sake of interoperability as other implementations might not support unicode in the regular formats. - -The validation code for `uri`, `idn-email` and their relatives use mostly standard library code. - -For repetitive or more complex formats, you can create custom format checkers and add them to gojsonschema like this: - -```go -// Define the format checker -type RoleFormatChecker struct {} - -// Ensure it meets the gojsonschema.FormatChecker interface -func (f RoleFormatChecker) IsFormat(input interface{}) bool { - - asString, ok := input.(string) - if ok == false { - return false - } - - return strings.HasPrefix("ROLE_", asString) -} - -// Add it to the library -gojsonschema.FormatCheckers.Add("role", RoleFormatChecker{}) -```` - -Now to use in your json schema: -````json -{"type": "string", "format": "role"} -```` - -Another example would be to check if the provided integer matches an id on database: - -JSON schema: -```json -{"type": "integer", "format": "ValidUserId"} -``` - -```go -// Define the format checker -type ValidUserIdFormatChecker struct {} - -// Ensure it meets the gojsonschema.FormatChecker interface -func (f ValidUserIdFormatChecker) IsFormat(input interface{}) bool { - - asFloat64, ok := input.(float64) // Numbers are always float64 here - if ok == false { - return false - } - - // XXX - // do the magic on the database looking for the int(asFloat64) - - return true -} - -// Add it to the library -gojsonschema.FormatCheckers.Add("ValidUserId", ValidUserIdFormatChecker{}) -```` - -Formats can also be removed, for example if you want to override one of the formats that is defined by default. - -```go -gojsonschema.FormatCheckers.Remove("hostname") -``` - - -## Additional custom validation -After the validation has run and you have the results, you may add additional -errors using `Result.AddError`. This is useful to maintain the same format within the resultset instead -of having to add special exceptions for your own errors. Below is an example. - -```go -type AnswerInvalidError struct { - gojsonschema.ResultErrorFields -} - -func newAnswerInvalidError(context *gojsonschema.JsonContext, value interface{}, details gojsonschema.ErrorDetails) *AnswerInvalidError { - err := AnswerInvalidError{} - err.SetContext(context) - err.SetType("custom_invalid_error") - // it is important to use SetDescriptionFormat() as this is used to call SetDescription() after it has been parsed - // using the description of err will be overridden by this. - err.SetDescriptionFormat("Answer to the Ultimate Question of Life, the Universe, and Everything is {{.answer}}") - err.SetValue(value) - err.SetDetails(details) - - return &err -} - -func main() { - // ... - schema, err := gojsonschema.NewSchema(schemaLoader) - result, err := gojsonschema.Validate(schemaLoader, documentLoader) - - if true { // some validation - jsonContext := gojsonschema.NewJsonContext("question", nil) - errDetail := gojsonschema.ErrorDetails{ - "answer": 42, - } - result.AddError( - newAnswerInvalidError( - gojsonschema.NewJsonContext("answer", jsonContext), - 52, - errDetail, - ), - errDetail, - ) - } - - return result, err - -} -``` - -This is especially useful if you want to add validation beyond what the -json schema drafts can provide such business specific logic. - -## Uses - -gojsonschema uses the following test suite : - -https://github.com/json-schema/JSON-Schema-Test-Suite diff --git a/src/vendor/github.com/xeipuuv/gojsonschema/draft.go b/src/vendor/github.com/xeipuuv/gojsonschema/draft.go deleted file mode 100644 index 61298e7aa..000000000 --- a/src/vendor/github.com/xeipuuv/gojsonschema/draft.go +++ /dev/null @@ -1,125 +0,0 @@ -// Copyright 2018 johandorland ( https://github.com/johandorland ) -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package gojsonschema - -import ( - "errors" - "math" - "reflect" - - "github.com/xeipuuv/gojsonreference" -) - -// Draft is a JSON-schema draft version -type Draft int - -// Supported Draft versions -const ( - Draft4 Draft = 4 - Draft6 Draft = 6 - Draft7 Draft = 7 - Hybrid Draft = math.MaxInt32 -) - -type draftConfig struct { - Version Draft - MetaSchemaURL string - MetaSchema string -} -type draftConfigs []draftConfig - -var drafts draftConfigs - -func init() { - drafts = []draftConfig{ - { - Version: Draft4, - MetaSchemaURL: "http://json-schema.org/draft-04/schema", - MetaSchema: `{"id":"http://json-schema.org/draft-04/schema#","$schema":"http://json-schema.org/draft-04/schema#","description":"Core schema meta-schema","definitions":{"schemaArray":{"type":"array","minItems":1,"items":{"$ref":"#"}},"positiveInteger":{"type":"integer","minimum":0},"positiveIntegerDefault0":{"allOf":[{"$ref":"#/definitions/positiveInteger"},{"default":0}]},"simpleTypes":{"enum":["array","boolean","integer","null","number","object","string"]},"stringArray":{"type":"array","items":{"type":"string"},"minItems":1,"uniqueItems":true}},"type":"object","properties":{"id":{"type":"string"},"$schema":{"type":"string"},"title":{"type":"string"},"description":{"type":"string"},"default":{},"multipleOf":{"type":"number","minimum":0,"exclusiveMinimum":true},"maximum":{"type":"number"},"exclusiveMaximum":{"type":"boolean","default":false},"minimum":{"type":"number"},"exclusiveMinimum":{"type":"boolean","default":false},"maxLength":{"$ref":"#/definitions/positiveInteger"},"minLength":{"$ref":"#/definitions/positiveIntegerDefault0"},"pattern":{"type":"string","format":"regex"},"additionalItems":{"anyOf":[{"type":"boolean"},{"$ref":"#"}],"default":{}},"items":{"anyOf":[{"$ref":"#"},{"$ref":"#/definitions/schemaArray"}],"default":{}},"maxItems":{"$ref":"#/definitions/positiveInteger"},"minItems":{"$ref":"#/definitions/positiveIntegerDefault0"},"uniqueItems":{"type":"boolean","default":false},"maxProperties":{"$ref":"#/definitions/positiveInteger"},"minProperties":{"$ref":"#/definitions/positiveIntegerDefault0"},"required":{"$ref":"#/definitions/stringArray"},"additionalProperties":{"anyOf":[{"type":"boolean"},{"$ref":"#"}],"default":{}},"definitions":{"type":"object","additionalProperties":{"$ref":"#"},"default":{}},"properties":{"type":"object","additionalProperties":{"$ref":"#"},"default":{}},"patternProperties":{"type":"object","additionalProperties":{"$ref":"#"},"default":{}},"dependencies":{"type":"object","additionalProperties":{"anyOf":[{"$ref":"#"},{"$ref":"#/definitions/stringArray"}]}},"enum":{"type":"array","minItems":1,"uniqueItems":true},"type":{"anyOf":[{"$ref":"#/definitions/simpleTypes"},{"type":"array","items":{"$ref":"#/definitions/simpleTypes"},"minItems":1,"uniqueItems":true}]},"format":{"type":"string"},"allOf":{"$ref":"#/definitions/schemaArray"},"anyOf":{"$ref":"#/definitions/schemaArray"},"oneOf":{"$ref":"#/definitions/schemaArray"},"not":{"$ref":"#"}},"dependencies":{"exclusiveMaximum":["maximum"],"exclusiveMinimum":["minimum"]},"default":{}}`, - }, - { - Version: Draft6, - MetaSchemaURL: "http://json-schema.org/draft-06/schema", - MetaSchema: `{"$schema":"http://json-schema.org/draft-06/schema#","$id":"http://json-schema.org/draft-06/schema#","title":"Core schema meta-schema","definitions":{"schemaArray":{"type":"array","minItems":1,"items":{"$ref":"#"}},"nonNegativeInteger":{"type":"integer","minimum":0},"nonNegativeIntegerDefault0":{"allOf":[{"$ref":"#/definitions/nonNegativeInteger"},{"default":0}]},"simpleTypes":{"enum":["array","boolean","integer","null","number","object","string"]},"stringArray":{"type":"array","items":{"type":"string"},"uniqueItems":true,"default":[]}},"type":["object","boolean"],"properties":{"$id":{"type":"string","format":"uri-reference"},"$schema":{"type":"string","format":"uri"},"$ref":{"type":"string","format":"uri-reference"},"title":{"type":"string"},"description":{"type":"string"},"default":{},"examples":{"type":"array","items":{}},"multipleOf":{"type":"number","exclusiveMinimum":0},"maximum":{"type":"number"},"exclusiveMaximum":{"type":"number"},"minimum":{"type":"number"},"exclusiveMinimum":{"type":"number"},"maxLength":{"$ref":"#/definitions/nonNegativeInteger"},"minLength":{"$ref":"#/definitions/nonNegativeIntegerDefault0"},"pattern":{"type":"string","format":"regex"},"additionalItems":{"$ref":"#"},"items":{"anyOf":[{"$ref":"#"},{"$ref":"#/definitions/schemaArray"}],"default":{}},"maxItems":{"$ref":"#/definitions/nonNegativeInteger"},"minItems":{"$ref":"#/definitions/nonNegativeIntegerDefault0"},"uniqueItems":{"type":"boolean","default":false},"contains":{"$ref":"#"},"maxProperties":{"$ref":"#/definitions/nonNegativeInteger"},"minProperties":{"$ref":"#/definitions/nonNegativeIntegerDefault0"},"required":{"$ref":"#/definitions/stringArray"},"additionalProperties":{"$ref":"#"},"definitions":{"type":"object","additionalProperties":{"$ref":"#"},"default":{}},"properties":{"type":"object","additionalProperties":{"$ref":"#"},"default":{}},"patternProperties":{"type":"object","additionalProperties":{"$ref":"#"},"default":{}},"dependencies":{"type":"object","additionalProperties":{"anyOf":[{"$ref":"#"},{"$ref":"#/definitions/stringArray"}]}},"propertyNames":{"$ref":"#"},"const":{},"enum":{"type":"array","minItems":1,"uniqueItems":true},"type":{"anyOf":[{"$ref":"#/definitions/simpleTypes"},{"type":"array","items":{"$ref":"#/definitions/simpleTypes"},"minItems":1,"uniqueItems":true}]},"format":{"type":"string"},"allOf":{"$ref":"#/definitions/schemaArray"},"anyOf":{"$ref":"#/definitions/schemaArray"},"oneOf":{"$ref":"#/definitions/schemaArray"},"not":{"$ref":"#"}},"default":{}}`, - }, - { - Version: Draft7, - MetaSchemaURL: "http://json-schema.org/draft-07/schema", - MetaSchema: `{"$schema":"http://json-schema.org/draft-07/schema#","$id":"http://json-schema.org/draft-07/schema#","title":"Core schema meta-schema","definitions":{"schemaArray":{"type":"array","minItems":1,"items":{"$ref":"#"}},"nonNegativeInteger":{"type":"integer","minimum":0},"nonNegativeIntegerDefault0":{"allOf":[{"$ref":"#/definitions/nonNegativeInteger"},{"default":0}]},"simpleTypes":{"enum":["array","boolean","integer","null","number","object","string"]},"stringArray":{"type":"array","items":{"type":"string"},"uniqueItems":true,"default":[]}},"type":["object","boolean"],"properties":{"$id":{"type":"string","format":"uri-reference"},"$schema":{"type":"string","format":"uri"},"$ref":{"type":"string","format":"uri-reference"},"$comment":{"type":"string"},"title":{"type":"string"},"description":{"type":"string"},"default":true,"readOnly":{"type":"boolean","default":false},"examples":{"type":"array","items":true},"multipleOf":{"type":"number","exclusiveMinimum":0},"maximum":{"type":"number"},"exclusiveMaximum":{"type":"number"},"minimum":{"type":"number"},"exclusiveMinimum":{"type":"number"},"maxLength":{"$ref":"#/definitions/nonNegativeInteger"},"minLength":{"$ref":"#/definitions/nonNegativeIntegerDefault0"},"pattern":{"type":"string","format":"regex"},"additionalItems":{"$ref":"#"},"items":{"anyOf":[{"$ref":"#"},{"$ref":"#/definitions/schemaArray"}],"default":true},"maxItems":{"$ref":"#/definitions/nonNegativeInteger"},"minItems":{"$ref":"#/definitions/nonNegativeIntegerDefault0"},"uniqueItems":{"type":"boolean","default":false},"contains":{"$ref":"#"},"maxProperties":{"$ref":"#/definitions/nonNegativeInteger"},"minProperties":{"$ref":"#/definitions/nonNegativeIntegerDefault0"},"required":{"$ref":"#/definitions/stringArray"},"additionalProperties":{"$ref":"#"},"definitions":{"type":"object","additionalProperties":{"$ref":"#"},"default":{}},"properties":{"type":"object","additionalProperties":{"$ref":"#"},"default":{}},"patternProperties":{"type":"object","additionalProperties":{"$ref":"#"},"propertyNames":{"format":"regex"},"default":{}},"dependencies":{"type":"object","additionalProperties":{"anyOf":[{"$ref":"#"},{"$ref":"#/definitions/stringArray"}]}},"propertyNames":{"$ref":"#"},"const":true,"enum":{"type":"array","items":true,"minItems":1,"uniqueItems":true},"type":{"anyOf":[{"$ref":"#/definitions/simpleTypes"},{"type":"array","items":{"$ref":"#/definitions/simpleTypes"},"minItems":1,"uniqueItems":true}]},"format":{"type":"string"},"contentMediaType":{"type":"string"},"contentEncoding":{"type":"string"},"if":{"$ref":"#"},"then":{"$ref":"#"},"else":{"$ref":"#"},"allOf":{"$ref":"#/definitions/schemaArray"},"anyOf":{"$ref":"#/definitions/schemaArray"},"oneOf":{"$ref":"#/definitions/schemaArray"},"not":{"$ref":"#"}},"default":true}`, - }, - } -} - -func (dc draftConfigs) GetMetaSchema(url string) string { - for _, config := range dc { - if config.MetaSchemaURL == url { - return config.MetaSchema - } - } - return "" -} -func (dc draftConfigs) GetDraftVersion(url string) *Draft { - for _, config := range dc { - if config.MetaSchemaURL == url { - return &config.Version - } - } - return nil -} -func (dc draftConfigs) GetSchemaURL(draft Draft) string { - for _, config := range dc { - if config.Version == draft { - return config.MetaSchemaURL - } - } - return "" -} - -func parseSchemaURL(documentNode interface{}) (string, *Draft, error) { - - if isKind(documentNode, reflect.Bool) { - return "", nil, nil - } - - if !isKind(documentNode, reflect.Map) { - return "", nil, errors.New("schema is invalid") - } - - m := documentNode.(map[string]interface{}) - - if existsMapKey(m, KEY_SCHEMA) { - if !isKind(m[KEY_SCHEMA], reflect.String) { - return "", nil, errors.New(formatErrorDescription( - Locale.MustBeOfType(), - ErrorDetails{ - "key": KEY_SCHEMA, - "type": TYPE_STRING, - }, - )) - } - - schemaReference, err := gojsonreference.NewJsonReference(m[KEY_SCHEMA].(string)) - - if err != nil { - return "", nil, err - } - - schema := schemaReference.String() - - return schema, drafts.GetDraftVersion(schema), nil - } - - return "", nil, nil -} diff --git a/src/vendor/github.com/xeipuuv/gojsonschema/errors.go b/src/vendor/github.com/xeipuuv/gojsonschema/errors.go deleted file mode 100644 index e4e9814f3..000000000 --- a/src/vendor/github.com/xeipuuv/gojsonschema/errors.go +++ /dev/null @@ -1,364 +0,0 @@ -package gojsonschema - -import ( - "bytes" - "sync" - "text/template" -) - -var errorTemplates = errorTemplate{template.New("errors-new"), sync.RWMutex{}} - -// template.Template is not thread-safe for writing, so some locking is done -// sync.RWMutex is used for efficiently locking when new templates are created -type errorTemplate struct { - *template.Template - sync.RWMutex -} - -type ( - - // FalseError. ErrorDetails: - - FalseError struct { - ResultErrorFields - } - - // RequiredError indicates that a required field is missing - // ErrorDetails: property string - RequiredError struct { - ResultErrorFields - } - - // InvalidTypeError indicates that a field has the incorrect type - // ErrorDetails: expected, given - InvalidTypeError struct { - ResultErrorFields - } - - // NumberAnyOfError is produced in case of a failing "anyOf" validation - // ErrorDetails: - - NumberAnyOfError struct { - ResultErrorFields - } - - // NumberOneOfError is produced in case of a failing "oneOf" validation - // ErrorDetails: - - NumberOneOfError struct { - ResultErrorFields - } - - // NumberAllOfError is produced in case of a failing "allOf" validation - // ErrorDetails: - - NumberAllOfError struct { - ResultErrorFields - } - - // NumberNotError is produced if a "not" validation failed - // ErrorDetails: - - NumberNotError struct { - ResultErrorFields - } - - // MissingDependencyError is produced in case of a "missing dependency" problem - // ErrorDetails: dependency - MissingDependencyError struct { - ResultErrorFields - } - - // InternalError indicates an internal error - // ErrorDetails: error - InternalError struct { - ResultErrorFields - } - - // ConstError indicates a const error - // ErrorDetails: allowed - ConstError struct { - ResultErrorFields - } - - // EnumError indicates an enum error - // ErrorDetails: allowed - EnumError struct { - ResultErrorFields - } - - // ArrayNoAdditionalItemsError is produced if additional items were found, but not allowed - // ErrorDetails: - - ArrayNoAdditionalItemsError struct { - ResultErrorFields - } - - // ArrayMinItemsError is produced if an array contains less items than the allowed minimum - // ErrorDetails: min - ArrayMinItemsError struct { - ResultErrorFields - } - - // ArrayMaxItemsError is produced if an array contains more items than the allowed maximum - // ErrorDetails: max - ArrayMaxItemsError struct { - ResultErrorFields - } - - // ItemsMustBeUniqueError is produced if an array requires unique items, but contains non-unique items - // ErrorDetails: type, i, j - ItemsMustBeUniqueError struct { - ResultErrorFields - } - - // ArrayContainsError is produced if an array contains invalid items - // ErrorDetails: - ArrayContainsError struct { - ResultErrorFields - } - - // ArrayMinPropertiesError is produced if an object contains less properties than the allowed minimum - // ErrorDetails: min - ArrayMinPropertiesError struct { - ResultErrorFields - } - - // ArrayMaxPropertiesError is produced if an object contains more properties than the allowed maximum - // ErrorDetails: max - ArrayMaxPropertiesError struct { - ResultErrorFields - } - - // AdditionalPropertyNotAllowedError is produced if an object has additional properties, but not allowed - // ErrorDetails: property - AdditionalPropertyNotAllowedError struct { - ResultErrorFields - } - - // InvalidPropertyPatternError is produced if an pattern was found - // ErrorDetails: property, pattern - InvalidPropertyPatternError struct { - ResultErrorFields - } - - // InvalidPropertyNameError is produced if an invalid-named property was found - // ErrorDetails: property - InvalidPropertyNameError struct { - ResultErrorFields - } - - // StringLengthGTEError is produced if a string is shorter than the minimum required length - // ErrorDetails: min - StringLengthGTEError struct { - ResultErrorFields - } - - // StringLengthLTEError is produced if a string is longer than the maximum allowed length - // ErrorDetails: max - StringLengthLTEError struct { - ResultErrorFields - } - - // DoesNotMatchPatternError is produced if a string does not match the defined pattern - // ErrorDetails: pattern - DoesNotMatchPatternError struct { - ResultErrorFields - } - - // DoesNotMatchFormatError is produced if a string does not match the defined format - // ErrorDetails: format - DoesNotMatchFormatError struct { - ResultErrorFields - } - - // MultipleOfError is produced if a number is not a multiple of the defined multipleOf - // ErrorDetails: multiple - MultipleOfError struct { - ResultErrorFields - } - - // NumberGTEError is produced if a number is lower than the allowed minimum - // ErrorDetails: min - NumberGTEError struct { - ResultErrorFields - } - - // NumberGTError is produced if a number is lower than, or equal to the specified minimum, and exclusiveMinimum is set - // ErrorDetails: min - NumberGTError struct { - ResultErrorFields - } - - // NumberLTEError is produced if a number is higher than the allowed maximum - // ErrorDetails: max - NumberLTEError struct { - ResultErrorFields - } - - // NumberLTError is produced if a number is higher than, or equal to the specified maximum, and exclusiveMaximum is set - // ErrorDetails: max - NumberLTError struct { - ResultErrorFields - } - - // ConditionThenError is produced if a condition's "then" validation is invalid - // ErrorDetails: - - ConditionThenError struct { - ResultErrorFields - } - - // ConditionElseError is produced if a condition's "else" condition is invalid - // ErrorDetails: - - ConditionElseError struct { - ResultErrorFields - } -) - -// newError takes a ResultError type and sets the type, context, description, details, value, and field -func newError(err ResultError, context *JsonContext, value interface{}, locale locale, details ErrorDetails) { - var t string - var d string - switch err.(type) { - case *FalseError: - t = "false" - d = locale.False() - case *RequiredError: - t = "required" - d = locale.Required() - case *InvalidTypeError: - t = "invalid_type" - d = locale.InvalidType() - case *NumberAnyOfError: - t = "number_any_of" - d = locale.NumberAnyOf() - case *NumberOneOfError: - t = "number_one_of" - d = locale.NumberOneOf() - case *NumberAllOfError: - t = "number_all_of" - d = locale.NumberAllOf() - case *NumberNotError: - t = "number_not" - d = locale.NumberNot() - case *MissingDependencyError: - t = "missing_dependency" - d = locale.MissingDependency() - case *InternalError: - t = "internal" - d = locale.Internal() - case *ConstError: - t = "const" - d = locale.Const() - case *EnumError: - t = "enum" - d = locale.Enum() - case *ArrayNoAdditionalItemsError: - t = "array_no_additional_items" - d = locale.ArrayNoAdditionalItems() - case *ArrayMinItemsError: - t = "array_min_items" - d = locale.ArrayMinItems() - case *ArrayMaxItemsError: - t = "array_max_items" - d = locale.ArrayMaxItems() - case *ItemsMustBeUniqueError: - t = "unique" - d = locale.Unique() - case *ArrayContainsError: - t = "contains" - d = locale.ArrayContains() - case *ArrayMinPropertiesError: - t = "array_min_properties" - d = locale.ArrayMinProperties() - case *ArrayMaxPropertiesError: - t = "array_max_properties" - d = locale.ArrayMaxProperties() - case *AdditionalPropertyNotAllowedError: - t = "additional_property_not_allowed" - d = locale.AdditionalPropertyNotAllowed() - case *InvalidPropertyPatternError: - t = "invalid_property_pattern" - d = locale.InvalidPropertyPattern() - case *InvalidPropertyNameError: - t = "invalid_property_name" - d = locale.InvalidPropertyName() - case *StringLengthGTEError: - t = "string_gte" - d = locale.StringGTE() - case *StringLengthLTEError: - t = "string_lte" - d = locale.StringLTE() - case *DoesNotMatchPatternError: - t = "pattern" - d = locale.DoesNotMatchPattern() - case *DoesNotMatchFormatError: - t = "format" - d = locale.DoesNotMatchFormat() - case *MultipleOfError: - t = "multiple_of" - d = locale.MultipleOf() - case *NumberGTEError: - t = "number_gte" - d = locale.NumberGTE() - case *NumberGTError: - t = "number_gt" - d = locale.NumberGT() - case *NumberLTEError: - t = "number_lte" - d = locale.NumberLTE() - case *NumberLTError: - t = "number_lt" - d = locale.NumberLT() - case *ConditionThenError: - t = "condition_then" - d = locale.ConditionThen() - case *ConditionElseError: - t = "condition_else" - d = locale.ConditionElse() - } - - err.SetType(t) - err.SetContext(context) - err.SetValue(value) - err.SetDetails(details) - err.SetDescriptionFormat(d) - details["field"] = err.Field() - - if _, exists := details["context"]; !exists && context != nil { - details["context"] = context.String() - } - - err.SetDescription(formatErrorDescription(err.DescriptionFormat(), details)) -} - -// formatErrorDescription takes a string in the default text/template -// format and converts it to a string with replacements. The fields come -// from the ErrorDetails struct and vary for each type of error. -func formatErrorDescription(s string, details ErrorDetails) string { - - var tpl *template.Template - var descrAsBuffer bytes.Buffer - var err error - - errorTemplates.RLock() - tpl = errorTemplates.Lookup(s) - errorTemplates.RUnlock() - - if tpl == nil { - errorTemplates.Lock() - tpl = errorTemplates.New(s) - - if ErrorTemplateFuncs != nil { - tpl.Funcs(ErrorTemplateFuncs) - } - - tpl, err = tpl.Parse(s) - errorTemplates.Unlock() - - if err != nil { - return err.Error() - } - } - - err = tpl.Execute(&descrAsBuffer, details) - if err != nil { - return err.Error() - } - - return descrAsBuffer.String() -} diff --git a/src/vendor/github.com/xeipuuv/gojsonschema/format_checkers.go b/src/vendor/github.com/xeipuuv/gojsonschema/format_checkers.go deleted file mode 100644 index 873ffc7d7..000000000 --- a/src/vendor/github.com/xeipuuv/gojsonschema/format_checkers.go +++ /dev/null @@ -1,368 +0,0 @@ -package gojsonschema - -import ( - "net" - "net/mail" - "net/url" - "regexp" - "strings" - "sync" - "time" -) - -type ( - // FormatChecker is the interface all formatters added to FormatCheckerChain must implement - FormatChecker interface { - // IsFormat checks if input has the correct format and type - IsFormat(input interface{}) bool - } - - // FormatCheckerChain holds the formatters - FormatCheckerChain struct { - formatters map[string]FormatChecker - } - - // EmailFormatChecker verifies email address formats - EmailFormatChecker struct{} - - // IPV4FormatChecker verifies IP addresses in the IPv4 format - IPV4FormatChecker struct{} - - // IPV6FormatChecker verifies IP addresses in the IPv6 format - IPV6FormatChecker struct{} - - // DateTimeFormatChecker verifies date/time formats per RFC3339 5.6 - // - // Valid formats: - // Partial Time: HH:MM:SS - // Full Date: YYYY-MM-DD - // Full Time: HH:MM:SSZ-07:00 - // Date Time: YYYY-MM-DDTHH:MM:SSZ-0700 - // - // Where - // YYYY = 4DIGIT year - // MM = 2DIGIT month ; 01-12 - // DD = 2DIGIT day-month ; 01-28, 01-29, 01-30, 01-31 based on month/year - // HH = 2DIGIT hour ; 00-23 - // MM = 2DIGIT ; 00-59 - // SS = 2DIGIT ; 00-58, 00-60 based on leap second rules - // T = Literal - // Z = Literal - // - // Note: Nanoseconds are also suported in all formats - // - // http://tools.ietf.org/html/rfc3339#section-5.6 - DateTimeFormatChecker struct{} - - // DateFormatChecker verifies date formats - // - // Valid format: - // Full Date: YYYY-MM-DD - // - // Where - // YYYY = 4DIGIT year - // MM = 2DIGIT month ; 01-12 - // DD = 2DIGIT day-month ; 01-28, 01-29, 01-30, 01-31 based on month/year - DateFormatChecker struct{} - - // TimeFormatChecker verifies time formats - // - // Valid formats: - // Partial Time: HH:MM:SS - // Full Time: HH:MM:SSZ-07:00 - // - // Where - // HH = 2DIGIT hour ; 00-23 - // MM = 2DIGIT ; 00-59 - // SS = 2DIGIT ; 00-58, 00-60 based on leap second rules - // T = Literal - // Z = Literal - TimeFormatChecker struct{} - - // URIFormatChecker validates a URI with a valid Scheme per RFC3986 - URIFormatChecker struct{} - - // URIReferenceFormatChecker validates a URI or relative-reference per RFC3986 - URIReferenceFormatChecker struct{} - - // URITemplateFormatChecker validates a URI template per RFC6570 - URITemplateFormatChecker struct{} - - // HostnameFormatChecker validates a hostname is in the correct format - HostnameFormatChecker struct{} - - // UUIDFormatChecker validates a UUID is in the correct format - UUIDFormatChecker struct{} - - // RegexFormatChecker validates a regex is in the correct format - RegexFormatChecker struct{} - - // JSONPointerFormatChecker validates a JSON Pointer per RFC6901 - JSONPointerFormatChecker struct{} - - // RelativeJSONPointerFormatChecker validates a relative JSON Pointer is in the correct format - RelativeJSONPointerFormatChecker struct{} -) - -var ( - // FormatCheckers holds the valid formatters, and is a public variable - // so library users can add custom formatters - FormatCheckers = FormatCheckerChain{ - formatters: map[string]FormatChecker{ - "date": DateFormatChecker{}, - "time": TimeFormatChecker{}, - "date-time": DateTimeFormatChecker{}, - "hostname": HostnameFormatChecker{}, - "email": EmailFormatChecker{}, - "idn-email": EmailFormatChecker{}, - "ipv4": IPV4FormatChecker{}, - "ipv6": IPV6FormatChecker{}, - "uri": URIFormatChecker{}, - "uri-reference": URIReferenceFormatChecker{}, - "iri": URIFormatChecker{}, - "iri-reference": URIReferenceFormatChecker{}, - "uri-template": URITemplateFormatChecker{}, - "uuid": UUIDFormatChecker{}, - "regex": RegexFormatChecker{}, - "json-pointer": JSONPointerFormatChecker{}, - "relative-json-pointer": RelativeJSONPointerFormatChecker{}, - }, - } - - // Regex credit: https://www.socketloop.com/tutorials/golang-validate-hostname - rxHostname = regexp.MustCompile(`^([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]{0,61}[a-zA-Z0-9])(\.([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]{0,61}[a-zA-Z0-9]))*$`) - - // Use a regex to make sure curly brackets are balanced properly after validating it as a AURI - rxURITemplate = regexp.MustCompile("^([^{]*({[^}]*})?)*$") - - rxUUID = regexp.MustCompile("^[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}$") - - rxJSONPointer = regexp.MustCompile("^(?:/(?:[^~/]|~0|~1)*)*$") - - rxRelJSONPointer = regexp.MustCompile("^(?:0|[1-9][0-9]*)(?:#|(?:/(?:[^~/]|~0|~1)*)*)$") - - lock = new(sync.RWMutex) -) - -// Add adds a FormatChecker to the FormatCheckerChain -// The name used will be the value used for the format key in your json schema -func (c *FormatCheckerChain) Add(name string, f FormatChecker) *FormatCheckerChain { - lock.Lock() - c.formatters[name] = f - lock.Unlock() - - return c -} - -// Remove deletes a FormatChecker from the FormatCheckerChain (if it exists) -func (c *FormatCheckerChain) Remove(name string) *FormatCheckerChain { - lock.Lock() - delete(c.formatters, name) - lock.Unlock() - - return c -} - -// Has checks to see if the FormatCheckerChain holds a FormatChecker with the given name -func (c *FormatCheckerChain) Has(name string) bool { - lock.RLock() - _, ok := c.formatters[name] - lock.RUnlock() - - return ok -} - -// IsFormat will check an input against a FormatChecker with the given name -// to see if it is the correct format -func (c *FormatCheckerChain) IsFormat(name string, input interface{}) bool { - lock.RLock() - f, ok := c.formatters[name] - lock.RUnlock() - - // If a format is unrecognized it should always pass validation - if !ok { - return true - } - - return f.IsFormat(input) -} - -// IsFormat checks if input is a correctly formatted e-mail address -func (f EmailFormatChecker) IsFormat(input interface{}) bool { - asString, ok := input.(string) - if !ok { - return false - } - - _, err := mail.ParseAddress(asString) - return err == nil -} - -// IsFormat checks if input is a correctly formatted IPv4-address -func (f IPV4FormatChecker) IsFormat(input interface{}) bool { - asString, ok := input.(string) - if !ok { - return false - } - - // Credit: https://github.com/asaskevich/govalidator - ip := net.ParseIP(asString) - return ip != nil && strings.Contains(asString, ".") -} - -// IsFormat checks if input is a correctly formatted IPv6=address -func (f IPV6FormatChecker) IsFormat(input interface{}) bool { - asString, ok := input.(string) - if !ok { - return false - } - - // Credit: https://github.com/asaskevich/govalidator - ip := net.ParseIP(asString) - return ip != nil && strings.Contains(asString, ":") -} - -// IsFormat checks if input is a correctly formatted date/time per RFC3339 5.6 -func (f DateTimeFormatChecker) IsFormat(input interface{}) bool { - asString, ok := input.(string) - if !ok { - return false - } - - formats := []string{ - "15:04:05", - "15:04:05Z07:00", - "2006-01-02", - time.RFC3339, - time.RFC3339Nano, - } - - for _, format := range formats { - if _, err := time.Parse(format, asString); err == nil { - return true - } - } - - return false -} - -// IsFormat checks if input is a correctly formatted date (YYYY-MM-DD) -func (f DateFormatChecker) IsFormat(input interface{}) bool { - asString, ok := input.(string) - if !ok { - return false - } - _, err := time.Parse("2006-01-02", asString) - return err == nil -} - -// IsFormat checks if input correctly formatted time (HH:MM:SS or HH:MM:SSZ-07:00) -func (f TimeFormatChecker) IsFormat(input interface{}) bool { - asString, ok := input.(string) - if !ok { - return false - } - - if _, err := time.Parse("15:04:05Z07:00", asString); err == nil { - return true - } - - _, err := time.Parse("15:04:05", asString) - return err == nil -} - -// IsFormat checks if input is correctly formatted URI with a valid Scheme per RFC3986 -func (f URIFormatChecker) IsFormat(input interface{}) bool { - asString, ok := input.(string) - if !ok { - return false - } - - u, err := url.Parse(asString) - - if err != nil || u.Scheme == "" { - return false - } - - return !strings.Contains(asString, `\`) -} - -// IsFormat checks if input is a correctly formatted URI or relative-reference per RFC3986 -func (f URIReferenceFormatChecker) IsFormat(input interface{}) bool { - asString, ok := input.(string) - if !ok { - return false - } - - _, err := url.Parse(asString) - return err == nil && !strings.Contains(asString, `\`) -} - -// IsFormat checks if input is a correctly formatted URI template per RFC6570 -func (f URITemplateFormatChecker) IsFormat(input interface{}) bool { - asString, ok := input.(string) - if !ok { - return false - } - - u, err := url.Parse(asString) - if err != nil || strings.Contains(asString, `\`) { - return false - } - - return rxURITemplate.MatchString(u.Path) -} - -// IsFormat checks if input is a correctly formatted hostname -func (f HostnameFormatChecker) IsFormat(input interface{}) bool { - asString, ok := input.(string) - if !ok { - return false - } - - return rxHostname.MatchString(asString) && len(asString) < 256 -} - -// IsFormat checks if input is a correctly formatted UUID -func (f UUIDFormatChecker) IsFormat(input interface{}) bool { - asString, ok := input.(string) - if !ok { - return false - } - - return rxUUID.MatchString(asString) -} - -// IsFormat checks if input is a correctly formatted regular expression -func (f RegexFormatChecker) IsFormat(input interface{}) bool { - asString, ok := input.(string) - if !ok { - return false - } - - if asString == "" { - return true - } - _, err := regexp.Compile(asString) - return err == nil -} - -// IsFormat checks if input is a correctly formatted JSON Pointer per RFC6901 -func (f JSONPointerFormatChecker) IsFormat(input interface{}) bool { - asString, ok := input.(string) - if !ok { - return false - } - - return rxJSONPointer.MatchString(asString) -} - -// IsFormat checks if input is a correctly formatted relative JSON Pointer -func (f RelativeJSONPointerFormatChecker) IsFormat(input interface{}) bool { - asString, ok := input.(string) - if !ok { - return false - } - - return rxRelJSONPointer.MatchString(asString) -} diff --git a/src/vendor/github.com/xeipuuv/gojsonschema/glide.yaml b/src/vendor/github.com/xeipuuv/gojsonschema/glide.yaml deleted file mode 100644 index ab6fb867c..000000000 --- a/src/vendor/github.com/xeipuuv/gojsonschema/glide.yaml +++ /dev/null @@ -1,13 +0,0 @@ -package: github.com/xeipuuv/gojsonschema -license: Apache 2.0 -import: -- package: github.com/xeipuuv/gojsonschema - -- package: github.com/xeipuuv/gojsonpointer - -- package: github.com/xeipuuv/gojsonreference - -testImport: -- package: github.com/stretchr/testify - subpackages: - - assert diff --git a/src/vendor/github.com/xeipuuv/gojsonschema/internalLog.go b/src/vendor/github.com/xeipuuv/gojsonschema/internalLog.go deleted file mode 100644 index 4ef7a8d03..000000000 --- a/src/vendor/github.com/xeipuuv/gojsonschema/internalLog.go +++ /dev/null @@ -1,37 +0,0 @@ -// Copyright 2015 xeipuuv ( https://github.com/xeipuuv ) -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// author xeipuuv -// author-github https://github.com/xeipuuv -// author-mail xeipuuv@gmail.com -// -// repository-name gojsonschema -// repository-desc An implementation of JSON Schema, based on IETF's draft v4 - Go language. -// -// description Very simple log wrapper. -// Used for debugging/testing purposes. -// -// created 01-01-2015 - -package gojsonschema - -import ( - "log" -) - -const internalLogEnabled = false - -func internalLog(format string, v ...interface{}) { - log.Printf(format, v...) -} diff --git a/src/vendor/github.com/xeipuuv/gojsonschema/jsonContext.go b/src/vendor/github.com/xeipuuv/gojsonschema/jsonContext.go deleted file mode 100644 index 0e979707b..000000000 --- a/src/vendor/github.com/xeipuuv/gojsonschema/jsonContext.go +++ /dev/null @@ -1,73 +0,0 @@ -// Copyright 2013 MongoDB, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// author tolsen -// author-github https://github.com/tolsen -// -// repository-name gojsonschema -// repository-desc An implementation of JSON Schema, based on IETF's draft v4 - Go language. -// -// description Implements a persistent (immutable w/ shared structure) singly-linked list of strings for the purpose of storing a json context -// -// created 04-09-2013 - -package gojsonschema - -import "bytes" - -// JsonContext implements a persistent linked-list of strings -type JsonContext struct { - head string - tail *JsonContext -} - -// NewJsonContext creates a new JsonContext -func NewJsonContext(head string, tail *JsonContext) *JsonContext { - return &JsonContext{head, tail} -} - -// String displays the context in reverse. -// This plays well with the data structure's persistent nature with -// Cons and a json document's tree structure. -func (c *JsonContext) String(del ...string) string { - byteArr := make([]byte, 0, c.stringLen()) - buf := bytes.NewBuffer(byteArr) - c.writeStringToBuffer(buf, del) - - return buf.String() -} - -func (c *JsonContext) stringLen() int { - length := 0 - if c.tail != nil { - length = c.tail.stringLen() + 1 // add 1 for "." - } - - length += len(c.head) - return length -} - -func (c *JsonContext) writeStringToBuffer(buf *bytes.Buffer, del []string) { - if c.tail != nil { - c.tail.writeStringToBuffer(buf, del) - - if len(del) > 0 { - buf.WriteString(del[0]) - } else { - buf.WriteString(".") - } - } - - buf.WriteString(c.head) -} diff --git a/src/vendor/github.com/xeipuuv/gojsonschema/jsonLoader.go b/src/vendor/github.com/xeipuuv/gojsonschema/jsonLoader.go deleted file mode 100644 index 5d88af263..000000000 --- a/src/vendor/github.com/xeipuuv/gojsonschema/jsonLoader.go +++ /dev/null @@ -1,386 +0,0 @@ -// Copyright 2015 xeipuuv ( https://github.com/xeipuuv ) -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// author xeipuuv -// author-github https://github.com/xeipuuv -// author-mail xeipuuv@gmail.com -// -// repository-name gojsonschema -// repository-desc An implementation of JSON Schema, based on IETF's draft v4 - Go language. -// -// description Different strategies to load JSON files. -// Includes References (file and HTTP), JSON strings and Go types. -// -// created 01-02-2015 - -package gojsonschema - -import ( - "bytes" - "encoding/json" - "errors" - "io" - "io/ioutil" - "net/http" - "net/url" - "os" - "path/filepath" - "runtime" - "strings" - - "github.com/xeipuuv/gojsonreference" -) - -var osFS = osFileSystem(os.Open) - -// JSONLoader defines the JSON loader interface -type JSONLoader interface { - JsonSource() interface{} - LoadJSON() (interface{}, error) - JsonReference() (gojsonreference.JsonReference, error) - LoaderFactory() JSONLoaderFactory -} - -// JSONLoaderFactory defines the JSON loader factory interface -type JSONLoaderFactory interface { - // New creates a new JSON loader for the given source - New(source string) JSONLoader -} - -// DefaultJSONLoaderFactory is the default JSON loader factory -type DefaultJSONLoaderFactory struct { -} - -// FileSystemJSONLoaderFactory is a JSON loader factory that uses http.FileSystem -type FileSystemJSONLoaderFactory struct { - fs http.FileSystem -} - -// New creates a new JSON loader for the given source -func (d DefaultJSONLoaderFactory) New(source string) JSONLoader { - return &jsonReferenceLoader{ - fs: osFS, - source: source, - } -} - -// New creates a new JSON loader for the given source -func (f FileSystemJSONLoaderFactory) New(source string) JSONLoader { - return &jsonReferenceLoader{ - fs: f.fs, - source: source, - } -} - -// osFileSystem is a functional wrapper for os.Open that implements http.FileSystem. -type osFileSystem func(string) (*os.File, error) - -// Opens a file with the given name -func (o osFileSystem) Open(name string) (http.File, error) { - return o(name) -} - -// JSON Reference loader -// references are used to load JSONs from files and HTTP - -type jsonReferenceLoader struct { - fs http.FileSystem - source string -} - -func (l *jsonReferenceLoader) JsonSource() interface{} { - return l.source -} - -func (l *jsonReferenceLoader) JsonReference() (gojsonreference.JsonReference, error) { - return gojsonreference.NewJsonReference(l.JsonSource().(string)) -} - -func (l *jsonReferenceLoader) LoaderFactory() JSONLoaderFactory { - return &FileSystemJSONLoaderFactory{ - fs: l.fs, - } -} - -// NewReferenceLoader returns a JSON reference loader using the given source and the local OS file system. -func NewReferenceLoader(source string) JSONLoader { - return &jsonReferenceLoader{ - fs: osFS, - source: source, - } -} - -// NewReferenceLoaderFileSystem returns a JSON reference loader using the given source and file system. -func NewReferenceLoaderFileSystem(source string, fs http.FileSystem) JSONLoader { - return &jsonReferenceLoader{ - fs: fs, - source: source, - } -} - -func (l *jsonReferenceLoader) LoadJSON() (interface{}, error) { - - var err error - - reference, err := gojsonreference.NewJsonReference(l.JsonSource().(string)) - if err != nil { - return nil, err - } - - refToURL := reference - refToURL.GetUrl().Fragment = "" - - var document interface{} - - if reference.HasFileScheme { - - filename := strings.TrimPrefix(refToURL.String(), "file://") - filename, err = url.QueryUnescape(filename) - - if err != nil { - return nil, err - } - - if runtime.GOOS == "windows" { - // on Windows, a file URL may have an extra leading slash, use slashes - // instead of backslashes, and have spaces escaped - filename = strings.TrimPrefix(filename, "/") - filename = filepath.FromSlash(filename) - } - - document, err = l.loadFromFile(filename) - if err != nil { - return nil, err - } - - } else { - - document, err = l.loadFromHTTP(refToURL.String()) - if err != nil { - return nil, err - } - - } - - return document, nil - -} - -func (l *jsonReferenceLoader) loadFromHTTP(address string) (interface{}, error) { - - // returned cached versions for metaschemas for drafts 4, 6 and 7 - // for performance and allow for easier offline use - if metaSchema := drafts.GetMetaSchema(address); metaSchema != "" { - return decodeJSONUsingNumber(strings.NewReader(metaSchema)) - } - - resp, err := http.Get(address) - if err != nil { - return nil, err - } - - // must return HTTP Status 200 OK - if resp.StatusCode != http.StatusOK { - return nil, errors.New(formatErrorDescription(Locale.HttpBadStatus(), ErrorDetails{"status": resp.Status})) - } - - bodyBuff, err := ioutil.ReadAll(resp.Body) - if err != nil { - return nil, err - } - - return decodeJSONUsingNumber(bytes.NewReader(bodyBuff)) -} - -func (l *jsonReferenceLoader) loadFromFile(path string) (interface{}, error) { - f, err := l.fs.Open(path) - if err != nil { - return nil, err - } - defer f.Close() - - bodyBuff, err := ioutil.ReadAll(f) - if err != nil { - return nil, err - } - - return decodeJSONUsingNumber(bytes.NewReader(bodyBuff)) - -} - -// JSON string loader - -type jsonStringLoader struct { - source string -} - -func (l *jsonStringLoader) JsonSource() interface{} { - return l.source -} - -func (l *jsonStringLoader) JsonReference() (gojsonreference.JsonReference, error) { - return gojsonreference.NewJsonReference("#") -} - -func (l *jsonStringLoader) LoaderFactory() JSONLoaderFactory { - return &DefaultJSONLoaderFactory{} -} - -// NewStringLoader creates a new JSONLoader, taking a string as source -func NewStringLoader(source string) JSONLoader { - return &jsonStringLoader{source: source} -} - -func (l *jsonStringLoader) LoadJSON() (interface{}, error) { - - return decodeJSONUsingNumber(strings.NewReader(l.JsonSource().(string))) - -} - -// JSON bytes loader - -type jsonBytesLoader struct { - source []byte -} - -func (l *jsonBytesLoader) JsonSource() interface{} { - return l.source -} - -func (l *jsonBytesLoader) JsonReference() (gojsonreference.JsonReference, error) { - return gojsonreference.NewJsonReference("#") -} - -func (l *jsonBytesLoader) LoaderFactory() JSONLoaderFactory { - return &DefaultJSONLoaderFactory{} -} - -// NewBytesLoader creates a new JSONLoader, taking a `[]byte` as source -func NewBytesLoader(source []byte) JSONLoader { - return &jsonBytesLoader{source: source} -} - -func (l *jsonBytesLoader) LoadJSON() (interface{}, error) { - return decodeJSONUsingNumber(bytes.NewReader(l.JsonSource().([]byte))) -} - -// JSON Go (types) loader -// used to load JSONs from the code as maps, interface{}, structs ... - -type jsonGoLoader struct { - source interface{} -} - -func (l *jsonGoLoader) JsonSource() interface{} { - return l.source -} - -func (l *jsonGoLoader) JsonReference() (gojsonreference.JsonReference, error) { - return gojsonreference.NewJsonReference("#") -} - -func (l *jsonGoLoader) LoaderFactory() JSONLoaderFactory { - return &DefaultJSONLoaderFactory{} -} - -// NewGoLoader creates a new JSONLoader from a given Go struct -func NewGoLoader(source interface{}) JSONLoader { - return &jsonGoLoader{source: source} -} - -func (l *jsonGoLoader) LoadJSON() (interface{}, error) { - - // convert it to a compliant JSON first to avoid types "mismatches" - - jsonBytes, err := json.Marshal(l.JsonSource()) - if err != nil { - return nil, err - } - - return decodeJSONUsingNumber(bytes.NewReader(jsonBytes)) - -} - -type jsonIOLoader struct { - buf *bytes.Buffer -} - -// NewReaderLoader creates a new JSON loader using the provided io.Reader -func NewReaderLoader(source io.Reader) (JSONLoader, io.Reader) { - buf := &bytes.Buffer{} - return &jsonIOLoader{buf: buf}, io.TeeReader(source, buf) -} - -// NewWriterLoader creates a new JSON loader using the provided io.Writer -func NewWriterLoader(source io.Writer) (JSONLoader, io.Writer) { - buf := &bytes.Buffer{} - return &jsonIOLoader{buf: buf}, io.MultiWriter(source, buf) -} - -func (l *jsonIOLoader) JsonSource() interface{} { - return l.buf.String() -} - -func (l *jsonIOLoader) LoadJSON() (interface{}, error) { - return decodeJSONUsingNumber(l.buf) -} - -func (l *jsonIOLoader) JsonReference() (gojsonreference.JsonReference, error) { - return gojsonreference.NewJsonReference("#") -} - -func (l *jsonIOLoader) LoaderFactory() JSONLoaderFactory { - return &DefaultJSONLoaderFactory{} -} - -// JSON raw loader -// In case the JSON is already marshalled to interface{} use this loader -// This is used for testing as otherwise there is no guarantee the JSON is marshalled -// "properly" by using https://golang.org/pkg/encoding/json/#Decoder.UseNumber -type jsonRawLoader struct { - source interface{} -} - -// NewRawLoader creates a new JSON raw loader for the given source -func NewRawLoader(source interface{}) JSONLoader { - return &jsonRawLoader{source: source} -} -func (l *jsonRawLoader) JsonSource() interface{} { - return l.source -} -func (l *jsonRawLoader) LoadJSON() (interface{}, error) { - return l.source, nil -} -func (l *jsonRawLoader) JsonReference() (gojsonreference.JsonReference, error) { - return gojsonreference.NewJsonReference("#") -} -func (l *jsonRawLoader) LoaderFactory() JSONLoaderFactory { - return &DefaultJSONLoaderFactory{} -} - -func decodeJSONUsingNumber(r io.Reader) (interface{}, error) { - - var document interface{} - - decoder := json.NewDecoder(r) - decoder.UseNumber() - - err := decoder.Decode(&document) - if err != nil { - return nil, err - } - - return document, nil - -} diff --git a/src/vendor/github.com/xeipuuv/gojsonschema/locales.go b/src/vendor/github.com/xeipuuv/gojsonschema/locales.go deleted file mode 100644 index a416225cd..000000000 --- a/src/vendor/github.com/xeipuuv/gojsonschema/locales.go +++ /dev/null @@ -1,472 +0,0 @@ -// Copyright 2015 xeipuuv ( https://github.com/xeipuuv ) -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// author xeipuuv -// author-github https://github.com/xeipuuv -// author-mail xeipuuv@gmail.com -// -// repository-name gojsonschema -// repository-desc An implementation of JSON Schema, based on IETF's draft v4 - Go language. -// -// description Contains const string and messages. -// -// created 01-01-2015 - -package gojsonschema - -type ( - // locale is an interface for defining custom error strings - locale interface { - - // False returns a format-string for "false" schema validation errors - False() string - - // Required returns a format-string for "required" schema validation errors - Required() string - - // InvalidType returns a format-string for "invalid type" schema validation errors - InvalidType() string - - // NumberAnyOf returns a format-string for "anyOf" schema validation errors - NumberAnyOf() string - - // NumberOneOf returns a format-string for "oneOf" schema validation errors - NumberOneOf() string - - // NumberAllOf returns a format-string for "allOf" schema validation errors - NumberAllOf() string - - // NumberNot returns a format-string to format a NumberNotError - NumberNot() string - - // MissingDependency returns a format-string for "missing dependency" schema validation errors - MissingDependency() string - - // Internal returns a format-string for internal errors - Internal() string - - // Const returns a format-string to format a ConstError - Const() string - - // Enum returns a format-string to format an EnumError - Enum() string - - // ArrayNotEnoughItems returns a format-string to format an error for arrays having not enough items to match positional list of schema - ArrayNotEnoughItems() string - - // ArrayNoAdditionalItems returns a format-string to format an ArrayNoAdditionalItemsError - ArrayNoAdditionalItems() string - - // ArrayMinItems returns a format-string to format an ArrayMinItemsError - ArrayMinItems() string - - // ArrayMaxItems returns a format-string to format an ArrayMaxItemsError - ArrayMaxItems() string - - // Unique returns a format-string to format an ItemsMustBeUniqueError - Unique() string - - // ArrayContains returns a format-string to format an ArrayContainsError - ArrayContains() string - - // ArrayMinProperties returns a format-string to format an ArrayMinPropertiesError - ArrayMinProperties() string - - // ArrayMaxProperties returns a format-string to format an ArrayMaxPropertiesError - ArrayMaxProperties() string - - // AdditionalPropertyNotAllowed returns a format-string to format an AdditionalPropertyNotAllowedError - AdditionalPropertyNotAllowed() string - - // InvalidPropertyPattern returns a format-string to format an InvalidPropertyPatternError - InvalidPropertyPattern() string - - // InvalidPropertyName returns a format-string to format an InvalidPropertyNameError - InvalidPropertyName() string - - // StringGTE returns a format-string to format an StringLengthGTEError - StringGTE() string - - // StringLTE returns a format-string to format an StringLengthLTEError - StringLTE() string - - // DoesNotMatchPattern returns a format-string to format an DoesNotMatchPatternError - DoesNotMatchPattern() string - - // DoesNotMatchFormat returns a format-string to format an DoesNotMatchFormatError - DoesNotMatchFormat() string - - // MultipleOf returns a format-string to format an MultipleOfError - MultipleOf() string - - // NumberGTE returns a format-string to format an NumberGTEError - NumberGTE() string - - // NumberGT returns a format-string to format an NumberGTError - NumberGT() string - - // NumberLTE returns a format-string to format an NumberLTEError - NumberLTE() string - - // NumberLT returns a format-string to format an NumberLTError - NumberLT() string - - // Schema validations - - // RegexPattern returns a format-string to format a regex-pattern error - RegexPattern() string - - // GreaterThanZero returns a format-string to format an error where a number must be greater than zero - GreaterThanZero() string - - // MustBeOfA returns a format-string to format an error where a value is of the wrong type - MustBeOfA() string - - // MustBeOfAn returns a format-string to format an error where a value is of the wrong type - MustBeOfAn() string - - // CannotBeUsedWithout returns a format-string to format a "cannot be used without" error - CannotBeUsedWithout() string - - // CannotBeGT returns a format-string to format an error where a value are greater than allowed - CannotBeGT() string - - // MustBeOfType returns a format-string to format an error where a value does not match the required type - MustBeOfType() string - - // MustBeValidRegex returns a format-string to format an error where a regex is invalid - MustBeValidRegex() string - - // MustBeValidFormat returns a format-string to format an error where a value does not match the expected format - MustBeValidFormat() string - - // MustBeGTEZero returns a format-string to format an error where a value must be greater or equal than 0 - MustBeGTEZero() string - - // KeyCannotBeGreaterThan returns a format-string to format an error where a key is greater than the maximum allowed - KeyCannotBeGreaterThan() string - - // KeyItemsMustBeOfType returns a format-string to format an error where a key is of the wrong type - KeyItemsMustBeOfType() string - - // KeyItemsMustBeUnique returns a format-string to format an error where keys are not unique - KeyItemsMustBeUnique() string - - // ReferenceMustBeCanonical returns a format-string to format a "reference must be canonical" error - ReferenceMustBeCanonical() string - - // NotAValidType returns a format-string to format an invalid type error - NotAValidType() string - - // Duplicated returns a format-string to format an error where types are duplicated - Duplicated() string - - // HttpBadStatus returns a format-string for errors when loading a schema using HTTP - HttpBadStatus() string - - // ParseError returns a format-string for JSON parsing errors - ParseError() string - - // ConditionThen returns a format-string for ConditionThenError errors - ConditionThen() string - - // ConditionElse returns a format-string for ConditionElseError errors - ConditionElse() string - - // ErrorFormat returns a format string for errors - ErrorFormat() string - } - - // DefaultLocale is the default locale for this package - DefaultLocale struct{} -) - -// False returns a format-string for "false" schema validation errors -func (l DefaultLocale) False() string { - return "False always fails validation" -} - -// Required returns a format-string for "required" schema validation errors -func (l DefaultLocale) Required() string { - return `{{.property}} is required` -} - -// InvalidType returns a format-string for "invalid type" schema validation errors -func (l DefaultLocale) InvalidType() string { - return `Invalid type. Expected: {{.expected}}, given: {{.given}}` -} - -// NumberAnyOf returns a format-string for "anyOf" schema validation errors -func (l DefaultLocale) NumberAnyOf() string { - return `Must validate at least one schema (anyOf)` -} - -// NumberOneOf returns a format-string for "oneOf" schema validation errors -func (l DefaultLocale) NumberOneOf() string { - return `Must validate one and only one schema (oneOf)` -} - -// NumberAllOf returns a format-string for "allOf" schema validation errors -func (l DefaultLocale) NumberAllOf() string { - return `Must validate all the schemas (allOf)` -} - -// NumberNot returns a format-string to format a NumberNotError -func (l DefaultLocale) NumberNot() string { - return `Must not validate the schema (not)` -} - -// MissingDependency returns a format-string for "missing dependency" schema validation errors -func (l DefaultLocale) MissingDependency() string { - return `Has a dependency on {{.dependency}}` -} - -// Internal returns a format-string for internal errors -func (l DefaultLocale) Internal() string { - return `Internal Error {{.error}}` -} - -// Const returns a format-string to format a ConstError -func (l DefaultLocale) Const() string { - return `{{.field}} does not match: {{.allowed}}` -} - -// Enum returns a format-string to format an EnumError -func (l DefaultLocale) Enum() string { - return `{{.field}} must be one of the following: {{.allowed}}` -} - -// ArrayNoAdditionalItems returns a format-string to format an ArrayNoAdditionalItemsError -func (l DefaultLocale) ArrayNoAdditionalItems() string { - return `No additional items allowed on array` -} - -// ArrayNotEnoughItems returns a format-string to format an error for arrays having not enough items to match positional list of schema -func (l DefaultLocale) ArrayNotEnoughItems() string { - return `Not enough items on array to match positional list of schema` -} - -// ArrayMinItems returns a format-string to format an ArrayMinItemsError -func (l DefaultLocale) ArrayMinItems() string { - return `Array must have at least {{.min}} items` -} - -// ArrayMaxItems returns a format-string to format an ArrayMaxItemsError -func (l DefaultLocale) ArrayMaxItems() string { - return `Array must have at most {{.max}} items` -} - -// Unique returns a format-string to format an ItemsMustBeUniqueError -func (l DefaultLocale) Unique() string { - return `{{.type}} items[{{.i}},{{.j}}] must be unique` -} - -// ArrayContains returns a format-string to format an ArrayContainsError -func (l DefaultLocale) ArrayContains() string { - return `At least one of the items must match` -} - -// ArrayMinProperties returns a format-string to format an ArrayMinPropertiesError -func (l DefaultLocale) ArrayMinProperties() string { - return `Must have at least {{.min}} properties` -} - -// ArrayMaxProperties returns a format-string to format an ArrayMaxPropertiesError -func (l DefaultLocale) ArrayMaxProperties() string { - return `Must have at most {{.max}} properties` -} - -// AdditionalPropertyNotAllowed returns a format-string to format an AdditionalPropertyNotAllowedError -func (l DefaultLocale) AdditionalPropertyNotAllowed() string { - return `Additional property {{.property}} is not allowed` -} - -// InvalidPropertyPattern returns a format-string to format an InvalidPropertyPatternError -func (l DefaultLocale) InvalidPropertyPattern() string { - return `Property "{{.property}}" does not match pattern {{.pattern}}` -} - -// InvalidPropertyName returns a format-string to format an InvalidPropertyNameError -func (l DefaultLocale) InvalidPropertyName() string { - return `Property name of "{{.property}}" does not match` -} - -// StringGTE returns a format-string to format an StringLengthGTEError -func (l DefaultLocale) StringGTE() string { - return `String length must be greater than or equal to {{.min}}` -} - -// StringLTE returns a format-string to format an StringLengthLTEError -func (l DefaultLocale) StringLTE() string { - return `String length must be less than or equal to {{.max}}` -} - -// DoesNotMatchPattern returns a format-string to format an DoesNotMatchPatternError -func (l DefaultLocale) DoesNotMatchPattern() string { - return `Does not match pattern '{{.pattern}}'` -} - -// DoesNotMatchFormat returns a format-string to format an DoesNotMatchFormatError -func (l DefaultLocale) DoesNotMatchFormat() string { - return `Does not match format '{{.format}}'` -} - -// MultipleOf returns a format-string to format an MultipleOfError -func (l DefaultLocale) MultipleOf() string { - return `Must be a multiple of {{.multiple}}` -} - -// NumberGTE returns the format string to format a NumberGTEError -func (l DefaultLocale) NumberGTE() string { - return `Must be greater than or equal to {{.min}}` -} - -// NumberGT returns the format string to format a NumberGTError -func (l DefaultLocale) NumberGT() string { - return `Must be greater than {{.min}}` -} - -// NumberLTE returns the format string to format a NumberLTEError -func (l DefaultLocale) NumberLTE() string { - return `Must be less than or equal to {{.max}}` -} - -// NumberLT returns the format string to format a NumberLTError -func (l DefaultLocale) NumberLT() string { - return `Must be less than {{.max}}` -} - -// Schema validators - -// RegexPattern returns a format-string to format a regex-pattern error -func (l DefaultLocale) RegexPattern() string { - return `Invalid regex pattern '{{.pattern}}'` -} - -// GreaterThanZero returns a format-string to format an error where a number must be greater than zero -func (l DefaultLocale) GreaterThanZero() string { - return `{{.number}} must be strictly greater than 0` -} - -// MustBeOfA returns a format-string to format an error where a value is of the wrong type -func (l DefaultLocale) MustBeOfA() string { - return `{{.x}} must be of a {{.y}}` -} - -// MustBeOfAn returns a format-string to format an error where a value is of the wrong type -func (l DefaultLocale) MustBeOfAn() string { - return `{{.x}} must be of an {{.y}}` -} - -// CannotBeUsedWithout returns a format-string to format a "cannot be used without" error -func (l DefaultLocale) CannotBeUsedWithout() string { - return `{{.x}} cannot be used without {{.y}}` -} - -// CannotBeGT returns a format-string to format an error where a value are greater than allowed -func (l DefaultLocale) CannotBeGT() string { - return `{{.x}} cannot be greater than {{.y}}` -} - -// MustBeOfType returns a format-string to format an error where a value does not match the required type -func (l DefaultLocale) MustBeOfType() string { - return `{{.key}} must be of type {{.type}}` -} - -// MustBeValidRegex returns a format-string to format an error where a regex is invalid -func (l DefaultLocale) MustBeValidRegex() string { - return `{{.key}} must be a valid regex` -} - -// MustBeValidFormat returns a format-string to format an error where a value does not match the expected format -func (l DefaultLocale) MustBeValidFormat() string { - return `{{.key}} must be a valid format {{.given}}` -} - -// MustBeGTEZero returns a format-string to format an error where a value must be greater or equal than 0 -func (l DefaultLocale) MustBeGTEZero() string { - return `{{.key}} must be greater than or equal to 0` -} - -// KeyCannotBeGreaterThan returns a format-string to format an error where a value is greater than the maximum allowed -func (l DefaultLocale) KeyCannotBeGreaterThan() string { - return `{{.key}} cannot be greater than {{.y}}` -} - -// KeyItemsMustBeOfType returns a format-string to format an error where a key is of the wrong type -func (l DefaultLocale) KeyItemsMustBeOfType() string { - return `{{.key}} items must be {{.type}}` -} - -// KeyItemsMustBeUnique returns a format-string to format an error where keys are not unique -func (l DefaultLocale) KeyItemsMustBeUnique() string { - return `{{.key}} items must be unique` -} - -// ReferenceMustBeCanonical returns a format-string to format a "reference must be canonical" error -func (l DefaultLocale) ReferenceMustBeCanonical() string { - return `Reference {{.reference}} must be canonical` -} - -// NotAValidType returns a format-string to format an invalid type error -func (l DefaultLocale) NotAValidType() string { - return `has a primitive type that is NOT VALID -- given: {{.given}} Expected valid values are:{{.expected}}` -} - -// Duplicated returns a format-string to format an error where types are duplicated -func (l DefaultLocale) Duplicated() string { - return `{{.type}} type is duplicated` -} - -// HttpBadStatus returns a format-string for errors when loading a schema using HTTP -func (l DefaultLocale) HttpBadStatus() string { - return `Could not read schema from HTTP, response status is {{.status}}` -} - -// ErrorFormat returns a format string for errors -// Replacement options: field, description, context, value -func (l DefaultLocale) ErrorFormat() string { - return `{{.field}}: {{.description}}` -} - -// ParseError returns a format-string for JSON parsing errors -func (l DefaultLocale) ParseError() string { - return `Expected: {{.expected}}, given: Invalid JSON` -} - -// ConditionThen returns a format-string for ConditionThenError errors -// If/Else -func (l DefaultLocale) ConditionThen() string { - return `Must validate "then" as "if" was valid` -} - -// ConditionElse returns a format-string for ConditionElseError errors -func (l DefaultLocale) ConditionElse() string { - return `Must validate "else" as "if" was not valid` -} - -// constants -const ( - STRING_NUMBER = "number" - STRING_ARRAY_OF_STRINGS = "array of strings" - STRING_ARRAY_OF_SCHEMAS = "array of schemas" - STRING_SCHEMA = "valid schema" - STRING_SCHEMA_OR_ARRAY_OF_STRINGS = "schema or array of strings" - STRING_PROPERTIES = "properties" - STRING_DEPENDENCY = "dependency" - STRING_PROPERTY = "property" - STRING_UNDEFINED = "undefined" - STRING_CONTEXT_ROOT = "(root)" - STRING_ROOT_SCHEMA_PROPERTY = "(root)" -) diff --git a/src/vendor/github.com/xeipuuv/gojsonschema/result.go b/src/vendor/github.com/xeipuuv/gojsonschema/result.go deleted file mode 100644 index 0a0179148..000000000 --- a/src/vendor/github.com/xeipuuv/gojsonschema/result.go +++ /dev/null @@ -1,220 +0,0 @@ -// Copyright 2015 xeipuuv ( https://github.com/xeipuuv ) -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// author xeipuuv -// author-github https://github.com/xeipuuv -// author-mail xeipuuv@gmail.com -// -// repository-name gojsonschema -// repository-desc An implementation of JSON Schema, based on IETF's draft v4 - Go language. -// -// description Result and ResultError implementations. -// -// created 01-01-2015 - -package gojsonschema - -import ( - "fmt" - "strings" -) - -type ( - // ErrorDetails is a map of details specific to each error. - // While the values will vary, every error will contain a "field" value - ErrorDetails map[string]interface{} - - // ResultError is the interface that library errors must implement - ResultError interface { - // Field returns the field name without the root context - // i.e. firstName or person.firstName instead of (root).firstName or (root).person.firstName - Field() string - // SetType sets the error-type - SetType(string) - // Type returns the error-type - Type() string - // SetContext sets the JSON-context for the error - SetContext(*JsonContext) - // Context returns the JSON-context of the error - Context() *JsonContext - // SetDescription sets a description for the error - SetDescription(string) - // Description returns the description of the error - Description() string - // SetDescriptionFormat sets the format for the description in the default text/template format - SetDescriptionFormat(string) - // DescriptionFormat returns the format for the description in the default text/template format - DescriptionFormat() string - // SetValue sets the value related to the error - SetValue(interface{}) - // Value returns the value related to the error - Value() interface{} - // SetDetails sets the details specific to the error - SetDetails(ErrorDetails) - // Details returns details about the error - Details() ErrorDetails - // String returns a string representation of the error - String() string - } - - // ResultErrorFields holds the fields for each ResultError implementation. - // ResultErrorFields implements the ResultError interface, so custom errors - // can be defined by just embedding this type - ResultErrorFields struct { - errorType string // A string with the type of error (i.e. invalid_type) - context *JsonContext // Tree like notation of the part that failed the validation. ex (root).a.b ... - description string // A human readable error message - descriptionFormat string // A format for human readable error message - value interface{} // Value given by the JSON file that is the source of the error - details ErrorDetails - } - - // Result holds the result of a validation - Result struct { - errors []ResultError - // Scores how well the validation matched. Useful in generating - // better error messages for anyOf and oneOf. - score int - } -) - -// Field returns the field name without the root context -// i.e. firstName or person.firstName instead of (root).firstName or (root).person.firstName -func (v *ResultErrorFields) Field() string { - return strings.TrimPrefix(v.context.String(), STRING_ROOT_SCHEMA_PROPERTY+".") -} - -// SetType sets the error-type -func (v *ResultErrorFields) SetType(errorType string) { - v.errorType = errorType -} - -// Type returns the error-type -func (v *ResultErrorFields) Type() string { - return v.errorType -} - -// SetContext sets the JSON-context for the error -func (v *ResultErrorFields) SetContext(context *JsonContext) { - v.context = context -} - -// Context returns the JSON-context of the error -func (v *ResultErrorFields) Context() *JsonContext { - return v.context -} - -// SetDescription sets a description for the error -func (v *ResultErrorFields) SetDescription(description string) { - v.description = description -} - -// Description returns the description of the error -func (v *ResultErrorFields) Description() string { - return v.description -} - -// SetDescriptionFormat sets the format for the description in the default text/template format -func (v *ResultErrorFields) SetDescriptionFormat(descriptionFormat string) { - v.descriptionFormat = descriptionFormat -} - -// DescriptionFormat returns the format for the description in the default text/template format -func (v *ResultErrorFields) DescriptionFormat() string { - return v.descriptionFormat -} - -// SetValue sets the value related to the error -func (v *ResultErrorFields) SetValue(value interface{}) { - v.value = value -} - -// Value returns the value related to the error -func (v *ResultErrorFields) Value() interface{} { - return v.value -} - -// SetDetails sets the details specific to the error -func (v *ResultErrorFields) SetDetails(details ErrorDetails) { - v.details = details -} - -// Details returns details about the error -func (v *ResultErrorFields) Details() ErrorDetails { - return v.details -} - -// String returns a string representation of the error -func (v ResultErrorFields) String() string { - // as a fallback, the value is displayed go style - valueString := fmt.Sprintf("%v", v.value) - - // marshal the go value value to json - if v.value == nil { - valueString = TYPE_NULL - } else { - if vs, err := marshalToJSONString(v.value); err == nil { - if vs == nil { - valueString = TYPE_NULL - } else { - valueString = *vs - } - } - } - - return formatErrorDescription(Locale.ErrorFormat(), ErrorDetails{ - "context": v.context.String(), - "description": v.description, - "value": valueString, - "field": v.Field(), - }) -} - -// Valid indicates if no errors were found -func (v *Result) Valid() bool { - return len(v.errors) == 0 -} - -// Errors returns the errors that were found -func (v *Result) Errors() []ResultError { - return v.errors -} - -// AddError appends a fully filled error to the error set -// SetDescription() will be called with the result of the parsed err.DescriptionFormat() -func (v *Result) AddError(err ResultError, details ErrorDetails) { - if _, exists := details["context"]; !exists && err.Context() != nil { - details["context"] = err.Context().String() - } - - err.SetDescription(formatErrorDescription(err.DescriptionFormat(), details)) - - v.errors = append(v.errors, err) -} - -func (v *Result) addInternalError(err ResultError, context *JsonContext, value interface{}, details ErrorDetails) { - newError(err, context, value, Locale, details) - v.errors = append(v.errors, err) - v.score -= 2 // results in a net -1 when added to the +1 we get at the end of the validation function -} - -// Used to copy errors from a sub-schema to the main one -func (v *Result) mergeErrors(otherResult *Result) { - v.errors = append(v.errors, otherResult.Errors()...) - v.score += otherResult.score -} - -func (v *Result) incrementScore() { - v.score++ -} diff --git a/src/vendor/github.com/xeipuuv/gojsonschema/schema.go b/src/vendor/github.com/xeipuuv/gojsonschema/schema.go deleted file mode 100644 index 9e93cd795..000000000 --- a/src/vendor/github.com/xeipuuv/gojsonschema/schema.go +++ /dev/null @@ -1,1087 +0,0 @@ -// Copyright 2015 xeipuuv ( https://github.com/xeipuuv ) -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// author xeipuuv -// author-github https://github.com/xeipuuv -// author-mail xeipuuv@gmail.com -// -// repository-name gojsonschema -// repository-desc An implementation of JSON Schema, based on IETF's draft v4 - Go language. -// -// description Defines Schema, the main entry to every subSchema. -// Contains the parsing logic and error checking. -// -// created 26-02-2013 - -package gojsonschema - -import ( - "errors" - "math/big" - "reflect" - "regexp" - "text/template" - - "github.com/xeipuuv/gojsonreference" -) - -var ( - // Locale is the default locale to use - // Library users can overwrite with their own implementation - Locale locale = DefaultLocale{} - - // ErrorTemplateFuncs allows you to define custom template funcs for use in localization. - ErrorTemplateFuncs template.FuncMap -) - -// NewSchema instances a schema using the given JSONLoader -func NewSchema(l JSONLoader) (*Schema, error) { - return NewSchemaLoader().Compile(l) -} - -// Schema holds a schema -type Schema struct { - documentReference gojsonreference.JsonReference - rootSchema *subSchema - pool *schemaPool - referencePool *schemaReferencePool -} - -func (d *Schema) parse(document interface{}, draft Draft) error { - d.rootSchema = &subSchema{property: STRING_ROOT_SCHEMA_PROPERTY, draft: &draft} - return d.parseSchema(document, d.rootSchema) -} - -// SetRootSchemaName sets the root-schema name -func (d *Schema) SetRootSchemaName(name string) { - d.rootSchema.property = name -} - -// Parses a subSchema -// -// Pretty long function ( sorry :) )... but pretty straight forward, repetitive and boring -// Not much magic involved here, most of the job is to validate the key names and their values, -// then the values are copied into subSchema struct -// -func (d *Schema) parseSchema(documentNode interface{}, currentSchema *subSchema) error { - - if currentSchema.draft == nil { - if currentSchema.parent == nil { - return errors.New("Draft not set") - } - currentSchema.draft = currentSchema.parent.draft - } - - // As of draft 6 "true" is equivalent to an empty schema "{}" and false equals "{"not":{}}" - if *currentSchema.draft >= Draft6 && isKind(documentNode, reflect.Bool) { - b := documentNode.(bool) - currentSchema.pass = &b - return nil - } - - if !isKind(documentNode, reflect.Map) { - return errors.New(formatErrorDescription( - Locale.ParseError(), - ErrorDetails{ - "expected": STRING_SCHEMA, - }, - )) - } - - m := documentNode.(map[string]interface{}) - - if currentSchema.parent == nil { - currentSchema.ref = &d.documentReference - currentSchema.id = &d.documentReference - } - - if currentSchema.id == nil && currentSchema.parent != nil { - currentSchema.id = currentSchema.parent.id - } - - // In draft 6 the id keyword was renamed to $id - // Hybrid mode uses the old id by default - var keyID string - - switch *currentSchema.draft { - case Draft4: - keyID = KEY_ID - case Hybrid: - keyID = KEY_ID_NEW - if existsMapKey(m, KEY_ID) { - keyID = KEY_ID - } - default: - keyID = KEY_ID_NEW - } - if existsMapKey(m, keyID) && !isKind(m[keyID], reflect.String) { - return errors.New(formatErrorDescription( - Locale.InvalidType(), - ErrorDetails{ - "expected": TYPE_STRING, - "given": keyID, - }, - )) - } - if k, ok := m[keyID].(string); ok { - jsonReference, err := gojsonreference.NewJsonReference(k) - if err != nil { - return err - } - if currentSchema == d.rootSchema { - currentSchema.id = &jsonReference - } else { - ref, err := currentSchema.parent.id.Inherits(jsonReference) - if err != nil { - return err - } - currentSchema.id = ref - } - } - - // definitions - if existsMapKey(m, KEY_DEFINITIONS) { - if isKind(m[KEY_DEFINITIONS], reflect.Map, reflect.Bool) { - for _, dv := range m[KEY_DEFINITIONS].(map[string]interface{}) { - if isKind(dv, reflect.Map, reflect.Bool) { - - newSchema := &subSchema{property: KEY_DEFINITIONS, parent: currentSchema} - - err := d.parseSchema(dv, newSchema) - - if err != nil { - return err - } - } else { - return errors.New(formatErrorDescription( - Locale.InvalidType(), - ErrorDetails{ - "expected": STRING_ARRAY_OF_SCHEMAS, - "given": KEY_DEFINITIONS, - }, - )) - } - } - } else { - return errors.New(formatErrorDescription( - Locale.InvalidType(), - ErrorDetails{ - "expected": STRING_ARRAY_OF_SCHEMAS, - "given": KEY_DEFINITIONS, - }, - )) - } - - } - - // title - if existsMapKey(m, KEY_TITLE) && !isKind(m[KEY_TITLE], reflect.String) { - return errors.New(formatErrorDescription( - Locale.InvalidType(), - ErrorDetails{ - "expected": TYPE_STRING, - "given": KEY_TITLE, - }, - )) - } - if k, ok := m[KEY_TITLE].(string); ok { - currentSchema.title = &k - } - - // description - if existsMapKey(m, KEY_DESCRIPTION) && !isKind(m[KEY_DESCRIPTION], reflect.String) { - return errors.New(formatErrorDescription( - Locale.InvalidType(), - ErrorDetails{ - "expected": TYPE_STRING, - "given": KEY_DESCRIPTION, - }, - )) - } - if k, ok := m[KEY_DESCRIPTION].(string); ok { - currentSchema.description = &k - } - - // $ref - if existsMapKey(m, KEY_REF) && !isKind(m[KEY_REF], reflect.String) { - return errors.New(formatErrorDescription( - Locale.InvalidType(), - ErrorDetails{ - "expected": TYPE_STRING, - "given": KEY_REF, - }, - )) - } - - if k, ok := m[KEY_REF].(string); ok { - - jsonReference, err := gojsonreference.NewJsonReference(k) - if err != nil { - return err - } - - currentSchema.ref = &jsonReference - - if sch, ok := d.referencePool.Get(currentSchema.ref.String()); ok { - currentSchema.refSchema = sch - } else { - err := d.parseReference(documentNode, currentSchema) - - if err != nil { - return err - } - - return nil - } - } - - // type - if existsMapKey(m, KEY_TYPE) { - if isKind(m[KEY_TYPE], reflect.String) { - if k, ok := m[KEY_TYPE].(string); ok { - err := currentSchema.types.Add(k) - if err != nil { - return err - } - } - } else { - if isKind(m[KEY_TYPE], reflect.Slice) { - arrayOfTypes := m[KEY_TYPE].([]interface{}) - for _, typeInArray := range arrayOfTypes { - if reflect.ValueOf(typeInArray).Kind() != reflect.String { - return errors.New(formatErrorDescription( - Locale.InvalidType(), - ErrorDetails{ - "expected": TYPE_STRING + "/" + STRING_ARRAY_OF_STRINGS, - "given": KEY_TYPE, - }, - )) - } - if err := currentSchema.types.Add(typeInArray.(string)); err != nil { - return err - } - } - - } else { - return errors.New(formatErrorDescription( - Locale.InvalidType(), - ErrorDetails{ - "expected": TYPE_STRING + "/" + STRING_ARRAY_OF_STRINGS, - "given": KEY_TYPE, - }, - )) - } - } - } - - // properties - if existsMapKey(m, KEY_PROPERTIES) { - err := d.parseProperties(m[KEY_PROPERTIES], currentSchema) - if err != nil { - return err - } - } - - // additionalProperties - if existsMapKey(m, KEY_ADDITIONAL_PROPERTIES) { - if isKind(m[KEY_ADDITIONAL_PROPERTIES], reflect.Bool) { - currentSchema.additionalProperties = m[KEY_ADDITIONAL_PROPERTIES].(bool) - } else if isKind(m[KEY_ADDITIONAL_PROPERTIES], reflect.Map) { - newSchema := &subSchema{property: KEY_ADDITIONAL_PROPERTIES, parent: currentSchema, ref: currentSchema.ref} - currentSchema.additionalProperties = newSchema - err := d.parseSchema(m[KEY_ADDITIONAL_PROPERTIES], newSchema) - if err != nil { - return errors.New(err.Error()) - } - } else { - return errors.New(formatErrorDescription( - Locale.InvalidType(), - ErrorDetails{ - "expected": TYPE_BOOLEAN + "/" + STRING_SCHEMA, - "given": KEY_ADDITIONAL_PROPERTIES, - }, - )) - } - } - - // patternProperties - if existsMapKey(m, KEY_PATTERN_PROPERTIES) { - if isKind(m[KEY_PATTERN_PROPERTIES], reflect.Map) { - patternPropertiesMap := m[KEY_PATTERN_PROPERTIES].(map[string]interface{}) - if len(patternPropertiesMap) > 0 { - currentSchema.patternProperties = make(map[string]*subSchema) - for k, v := range patternPropertiesMap { - _, err := regexp.MatchString(k, "") - if err != nil { - return errors.New(formatErrorDescription( - Locale.RegexPattern(), - ErrorDetails{"pattern": k}, - )) - } - newSchema := &subSchema{property: k, parent: currentSchema, ref: currentSchema.ref} - err = d.parseSchema(v, newSchema) - if err != nil { - return errors.New(err.Error()) - } - currentSchema.patternProperties[k] = newSchema - } - } - } else { - return errors.New(formatErrorDescription( - Locale.InvalidType(), - ErrorDetails{ - "expected": STRING_SCHEMA, - "given": KEY_PATTERN_PROPERTIES, - }, - )) - } - } - - // propertyNames - if existsMapKey(m, KEY_PROPERTY_NAMES) && *currentSchema.draft >= Draft6 { - if isKind(m[KEY_PROPERTY_NAMES], reflect.Map, reflect.Bool) { - newSchema := &subSchema{property: KEY_PROPERTY_NAMES, parent: currentSchema, ref: currentSchema.ref} - currentSchema.propertyNames = newSchema - err := d.parseSchema(m[KEY_PROPERTY_NAMES], newSchema) - if err != nil { - return err - } - } else { - return errors.New(formatErrorDescription( - Locale.InvalidType(), - ErrorDetails{ - "expected": STRING_SCHEMA, - "given": KEY_PATTERN_PROPERTIES, - }, - )) - } - } - - // dependencies - if existsMapKey(m, KEY_DEPENDENCIES) { - err := d.parseDependencies(m[KEY_DEPENDENCIES], currentSchema) - if err != nil { - return err - } - } - - // items - if existsMapKey(m, KEY_ITEMS) { - if isKind(m[KEY_ITEMS], reflect.Slice) { - for _, itemElement := range m[KEY_ITEMS].([]interface{}) { - if isKind(itemElement, reflect.Map, reflect.Bool) { - newSchema := &subSchema{parent: currentSchema, property: KEY_ITEMS} - newSchema.ref = currentSchema.ref - currentSchema.itemsChildren = append(currentSchema.itemsChildren, newSchema) - err := d.parseSchema(itemElement, newSchema) - if err != nil { - return err - } - } else { - return errors.New(formatErrorDescription( - Locale.InvalidType(), - ErrorDetails{ - "expected": STRING_SCHEMA + "/" + STRING_ARRAY_OF_SCHEMAS, - "given": KEY_ITEMS, - }, - )) - } - currentSchema.itemsChildrenIsSingleSchema = false - } - } else if isKind(m[KEY_ITEMS], reflect.Map, reflect.Bool) { - newSchema := &subSchema{parent: currentSchema, property: KEY_ITEMS} - newSchema.ref = currentSchema.ref - currentSchema.itemsChildren = append(currentSchema.itemsChildren, newSchema) - err := d.parseSchema(m[KEY_ITEMS], newSchema) - if err != nil { - return err - } - currentSchema.itemsChildrenIsSingleSchema = true - } else { - return errors.New(formatErrorDescription( - Locale.InvalidType(), - ErrorDetails{ - "expected": STRING_SCHEMA + "/" + STRING_ARRAY_OF_SCHEMAS, - "given": KEY_ITEMS, - }, - )) - } - } - - // additionalItems - if existsMapKey(m, KEY_ADDITIONAL_ITEMS) { - if isKind(m[KEY_ADDITIONAL_ITEMS], reflect.Bool) { - currentSchema.additionalItems = m[KEY_ADDITIONAL_ITEMS].(bool) - } else if isKind(m[KEY_ADDITIONAL_ITEMS], reflect.Map) { - newSchema := &subSchema{property: KEY_ADDITIONAL_ITEMS, parent: currentSchema, ref: currentSchema.ref} - currentSchema.additionalItems = newSchema - err := d.parseSchema(m[KEY_ADDITIONAL_ITEMS], newSchema) - if err != nil { - return errors.New(err.Error()) - } - } else { - return errors.New(formatErrorDescription( - Locale.InvalidType(), - ErrorDetails{ - "expected": TYPE_BOOLEAN + "/" + STRING_SCHEMA, - "given": KEY_ADDITIONAL_ITEMS, - }, - )) - } - } - - // validation : number / integer - - if existsMapKey(m, KEY_MULTIPLE_OF) { - multipleOfValue := mustBeNumber(m[KEY_MULTIPLE_OF]) - if multipleOfValue == nil { - return errors.New(formatErrorDescription( - Locale.InvalidType(), - ErrorDetails{ - "expected": STRING_NUMBER, - "given": KEY_MULTIPLE_OF, - }, - )) - } - if multipleOfValue.Cmp(big.NewRat(0, 1)) <= 0 { - return errors.New(formatErrorDescription( - Locale.GreaterThanZero(), - ErrorDetails{"number": KEY_MULTIPLE_OF}, - )) - } - currentSchema.multipleOf = multipleOfValue - } - - if existsMapKey(m, KEY_MINIMUM) { - minimumValue := mustBeNumber(m[KEY_MINIMUM]) - if minimumValue == nil { - return errors.New(formatErrorDescription( - Locale.MustBeOfA(), - ErrorDetails{"x": KEY_MINIMUM, "y": STRING_NUMBER}, - )) - } - currentSchema.minimum = minimumValue - } - - if existsMapKey(m, KEY_EXCLUSIVE_MINIMUM) { - switch *currentSchema.draft { - case Draft4: - if !isKind(m[KEY_EXCLUSIVE_MINIMUM], reflect.Bool) { - return errors.New(formatErrorDescription( - Locale.InvalidType(), - ErrorDetails{ - "expected": TYPE_BOOLEAN, - "given": KEY_EXCLUSIVE_MINIMUM, - }, - )) - } - if currentSchema.minimum == nil { - return errors.New(formatErrorDescription( - Locale.CannotBeUsedWithout(), - ErrorDetails{"x": KEY_EXCLUSIVE_MINIMUM, "y": KEY_MINIMUM}, - )) - } - if m[KEY_EXCLUSIVE_MINIMUM].(bool) { - currentSchema.exclusiveMinimum = currentSchema.minimum - currentSchema.minimum = nil - } - case Hybrid: - if isKind(m[KEY_EXCLUSIVE_MINIMUM], reflect.Bool) { - if currentSchema.minimum == nil { - return errors.New(formatErrorDescription( - Locale.CannotBeUsedWithout(), - ErrorDetails{"x": KEY_EXCLUSIVE_MINIMUM, "y": KEY_MINIMUM}, - )) - } - if m[KEY_EXCLUSIVE_MINIMUM].(bool) { - currentSchema.exclusiveMinimum = currentSchema.minimum - currentSchema.minimum = nil - } - } else if isJSONNumber(m[KEY_EXCLUSIVE_MINIMUM]) { - currentSchema.exclusiveMinimum = mustBeNumber(m[KEY_EXCLUSIVE_MINIMUM]) - } else { - return errors.New(formatErrorDescription( - Locale.InvalidType(), - ErrorDetails{ - "expected": TYPE_BOOLEAN + "/" + TYPE_NUMBER, - "given": KEY_EXCLUSIVE_MINIMUM, - }, - )) - } - default: - if isJSONNumber(m[KEY_EXCLUSIVE_MINIMUM]) { - currentSchema.exclusiveMinimum = mustBeNumber(m[KEY_EXCLUSIVE_MINIMUM]) - } else { - return errors.New(formatErrorDescription( - Locale.InvalidType(), - ErrorDetails{ - "expected": TYPE_NUMBER, - "given": KEY_EXCLUSIVE_MINIMUM, - }, - )) - } - } - } - - if existsMapKey(m, KEY_MAXIMUM) { - maximumValue := mustBeNumber(m[KEY_MAXIMUM]) - if maximumValue == nil { - return errors.New(formatErrorDescription( - Locale.MustBeOfA(), - ErrorDetails{"x": KEY_MAXIMUM, "y": STRING_NUMBER}, - )) - } - currentSchema.maximum = maximumValue - } - - if existsMapKey(m, KEY_EXCLUSIVE_MAXIMUM) { - switch *currentSchema.draft { - case Draft4: - if !isKind(m[KEY_EXCLUSIVE_MAXIMUM], reflect.Bool) { - return errors.New(formatErrorDescription( - Locale.InvalidType(), - ErrorDetails{ - "expected": TYPE_BOOLEAN, - "given": KEY_EXCLUSIVE_MAXIMUM, - }, - )) - } - if currentSchema.maximum == nil { - return errors.New(formatErrorDescription( - Locale.CannotBeUsedWithout(), - ErrorDetails{"x": KEY_EXCLUSIVE_MAXIMUM, "y": KEY_MAXIMUM}, - )) - } - if m[KEY_EXCLUSIVE_MAXIMUM].(bool) { - currentSchema.exclusiveMaximum = currentSchema.maximum - currentSchema.maximum = nil - } - case Hybrid: - if isKind(m[KEY_EXCLUSIVE_MAXIMUM], reflect.Bool) { - if currentSchema.maximum == nil { - return errors.New(formatErrorDescription( - Locale.CannotBeUsedWithout(), - ErrorDetails{"x": KEY_EXCLUSIVE_MAXIMUM, "y": KEY_MAXIMUM}, - )) - } - if m[KEY_EXCLUSIVE_MAXIMUM].(bool) { - currentSchema.exclusiveMaximum = currentSchema.maximum - currentSchema.maximum = nil - } - } else if isJSONNumber(m[KEY_EXCLUSIVE_MAXIMUM]) { - currentSchema.exclusiveMaximum = mustBeNumber(m[KEY_EXCLUSIVE_MAXIMUM]) - } else { - return errors.New(formatErrorDescription( - Locale.InvalidType(), - ErrorDetails{ - "expected": TYPE_BOOLEAN + "/" + TYPE_NUMBER, - "given": KEY_EXCLUSIVE_MAXIMUM, - }, - )) - } - default: - if isJSONNumber(m[KEY_EXCLUSIVE_MAXIMUM]) { - currentSchema.exclusiveMaximum = mustBeNumber(m[KEY_EXCLUSIVE_MAXIMUM]) - } else { - return errors.New(formatErrorDescription( - Locale.InvalidType(), - ErrorDetails{ - "expected": TYPE_NUMBER, - "given": KEY_EXCLUSIVE_MAXIMUM, - }, - )) - } - } - } - - // validation : string - - if existsMapKey(m, KEY_MIN_LENGTH) { - minLengthIntegerValue := mustBeInteger(m[KEY_MIN_LENGTH]) - if minLengthIntegerValue == nil { - return errors.New(formatErrorDescription( - Locale.MustBeOfAn(), - ErrorDetails{"x": KEY_MIN_LENGTH, "y": TYPE_INTEGER}, - )) - } - if *minLengthIntegerValue < 0 { - return errors.New(formatErrorDescription( - Locale.MustBeGTEZero(), - ErrorDetails{"key": KEY_MIN_LENGTH}, - )) - } - currentSchema.minLength = minLengthIntegerValue - } - - if existsMapKey(m, KEY_MAX_LENGTH) { - maxLengthIntegerValue := mustBeInteger(m[KEY_MAX_LENGTH]) - if maxLengthIntegerValue == nil { - return errors.New(formatErrorDescription( - Locale.MustBeOfAn(), - ErrorDetails{"x": KEY_MAX_LENGTH, "y": TYPE_INTEGER}, - )) - } - if *maxLengthIntegerValue < 0 { - return errors.New(formatErrorDescription( - Locale.MustBeGTEZero(), - ErrorDetails{"key": KEY_MAX_LENGTH}, - )) - } - currentSchema.maxLength = maxLengthIntegerValue - } - - if currentSchema.minLength != nil && currentSchema.maxLength != nil { - if *currentSchema.minLength > *currentSchema.maxLength { - return errors.New(formatErrorDescription( - Locale.CannotBeGT(), - ErrorDetails{"x": KEY_MIN_LENGTH, "y": KEY_MAX_LENGTH}, - )) - } - } - - if existsMapKey(m, KEY_PATTERN) { - if isKind(m[KEY_PATTERN], reflect.String) { - regexpObject, err := regexp.Compile(m[KEY_PATTERN].(string)) - if err != nil { - return errors.New(formatErrorDescription( - Locale.MustBeValidRegex(), - ErrorDetails{"key": KEY_PATTERN}, - )) - } - currentSchema.pattern = regexpObject - } else { - return errors.New(formatErrorDescription( - Locale.MustBeOfA(), - ErrorDetails{"x": KEY_PATTERN, "y": TYPE_STRING}, - )) - } - } - - if existsMapKey(m, KEY_FORMAT) { - formatString, ok := m[KEY_FORMAT].(string) - if !ok { - return errors.New(formatErrorDescription( - Locale.MustBeOfType(), - ErrorDetails{"key": KEY_FORMAT, "type": TYPE_STRING}, - )) - } - currentSchema.format = formatString - } - - // validation : object - - if existsMapKey(m, KEY_MIN_PROPERTIES) { - minPropertiesIntegerValue := mustBeInteger(m[KEY_MIN_PROPERTIES]) - if minPropertiesIntegerValue == nil { - return errors.New(formatErrorDescription( - Locale.MustBeOfAn(), - ErrorDetails{"x": KEY_MIN_PROPERTIES, "y": TYPE_INTEGER}, - )) - } - if *minPropertiesIntegerValue < 0 { - return errors.New(formatErrorDescription( - Locale.MustBeGTEZero(), - ErrorDetails{"key": KEY_MIN_PROPERTIES}, - )) - } - currentSchema.minProperties = minPropertiesIntegerValue - } - - if existsMapKey(m, KEY_MAX_PROPERTIES) { - maxPropertiesIntegerValue := mustBeInteger(m[KEY_MAX_PROPERTIES]) - if maxPropertiesIntegerValue == nil { - return errors.New(formatErrorDescription( - Locale.MustBeOfAn(), - ErrorDetails{"x": KEY_MAX_PROPERTIES, "y": TYPE_INTEGER}, - )) - } - if *maxPropertiesIntegerValue < 0 { - return errors.New(formatErrorDescription( - Locale.MustBeGTEZero(), - ErrorDetails{"key": KEY_MAX_PROPERTIES}, - )) - } - currentSchema.maxProperties = maxPropertiesIntegerValue - } - - if currentSchema.minProperties != nil && currentSchema.maxProperties != nil { - if *currentSchema.minProperties > *currentSchema.maxProperties { - return errors.New(formatErrorDescription( - Locale.KeyCannotBeGreaterThan(), - ErrorDetails{"key": KEY_MIN_PROPERTIES, "y": KEY_MAX_PROPERTIES}, - )) - } - } - - if existsMapKey(m, KEY_REQUIRED) { - if isKind(m[KEY_REQUIRED], reflect.Slice) { - requiredValues := m[KEY_REQUIRED].([]interface{}) - for _, requiredValue := range requiredValues { - if isKind(requiredValue, reflect.String) { - if isStringInSlice(currentSchema.required, requiredValue.(string)) { - return errors.New(formatErrorDescription( - Locale.KeyItemsMustBeUnique(), - ErrorDetails{"key": KEY_REQUIRED}, - )) - } - currentSchema.required = append(currentSchema.required, requiredValue.(string)) - } else { - return errors.New(formatErrorDescription( - Locale.KeyItemsMustBeOfType(), - ErrorDetails{"key": KEY_REQUIRED, "type": TYPE_STRING}, - )) - } - } - } else { - return errors.New(formatErrorDescription( - Locale.MustBeOfAn(), - ErrorDetails{"x": KEY_REQUIRED, "y": TYPE_ARRAY}, - )) - } - } - - // validation : array - - if existsMapKey(m, KEY_MIN_ITEMS) { - minItemsIntegerValue := mustBeInteger(m[KEY_MIN_ITEMS]) - if minItemsIntegerValue == nil { - return errors.New(formatErrorDescription( - Locale.MustBeOfAn(), - ErrorDetails{"x": KEY_MIN_ITEMS, "y": TYPE_INTEGER}, - )) - } - if *minItemsIntegerValue < 0 { - return errors.New(formatErrorDescription( - Locale.MustBeGTEZero(), - ErrorDetails{"key": KEY_MIN_ITEMS}, - )) - } - currentSchema.minItems = minItemsIntegerValue - } - - if existsMapKey(m, KEY_MAX_ITEMS) { - maxItemsIntegerValue := mustBeInteger(m[KEY_MAX_ITEMS]) - if maxItemsIntegerValue == nil { - return errors.New(formatErrorDescription( - Locale.MustBeOfAn(), - ErrorDetails{"x": KEY_MAX_ITEMS, "y": TYPE_INTEGER}, - )) - } - if *maxItemsIntegerValue < 0 { - return errors.New(formatErrorDescription( - Locale.MustBeGTEZero(), - ErrorDetails{"key": KEY_MAX_ITEMS}, - )) - } - currentSchema.maxItems = maxItemsIntegerValue - } - - if existsMapKey(m, KEY_UNIQUE_ITEMS) { - if isKind(m[KEY_UNIQUE_ITEMS], reflect.Bool) { - currentSchema.uniqueItems = m[KEY_UNIQUE_ITEMS].(bool) - } else { - return errors.New(formatErrorDescription( - Locale.MustBeOfA(), - ErrorDetails{"x": KEY_UNIQUE_ITEMS, "y": TYPE_BOOLEAN}, - )) - } - } - - if existsMapKey(m, KEY_CONTAINS) && *currentSchema.draft >= Draft6 { - newSchema := &subSchema{property: KEY_CONTAINS, parent: currentSchema, ref: currentSchema.ref} - currentSchema.contains = newSchema - err := d.parseSchema(m[KEY_CONTAINS], newSchema) - if err != nil { - return err - } - } - - // validation : all - - if existsMapKey(m, KEY_CONST) && *currentSchema.draft >= Draft6 { - is, err := marshalWithoutNumber(m[KEY_CONST]) - if err != nil { - return err - } - currentSchema._const = is - } - - if existsMapKey(m, KEY_ENUM) { - if isKind(m[KEY_ENUM], reflect.Slice) { - for _, v := range m[KEY_ENUM].([]interface{}) { - is, err := marshalWithoutNumber(v) - if err != nil { - return err - } - if isStringInSlice(currentSchema.enum, *is) { - return errors.New(formatErrorDescription( - Locale.KeyItemsMustBeUnique(), - ErrorDetails{"key": KEY_ENUM}, - )) - } - currentSchema.enum = append(currentSchema.enum, *is) - } - } else { - return errors.New(formatErrorDescription( - Locale.MustBeOfAn(), - ErrorDetails{"x": KEY_ENUM, "y": TYPE_ARRAY}, - )) - } - } - - // validation : subSchema - - if existsMapKey(m, KEY_ONE_OF) { - if isKind(m[KEY_ONE_OF], reflect.Slice) { - for _, v := range m[KEY_ONE_OF].([]interface{}) { - newSchema := &subSchema{property: KEY_ONE_OF, parent: currentSchema, ref: currentSchema.ref} - currentSchema.oneOf = append(currentSchema.oneOf, newSchema) - err := d.parseSchema(v, newSchema) - if err != nil { - return err - } - } - } else { - return errors.New(formatErrorDescription( - Locale.MustBeOfAn(), - ErrorDetails{"x": KEY_ONE_OF, "y": TYPE_ARRAY}, - )) - } - } - - if existsMapKey(m, KEY_ANY_OF) { - if isKind(m[KEY_ANY_OF], reflect.Slice) { - for _, v := range m[KEY_ANY_OF].([]interface{}) { - newSchema := &subSchema{property: KEY_ANY_OF, parent: currentSchema, ref: currentSchema.ref} - currentSchema.anyOf = append(currentSchema.anyOf, newSchema) - err := d.parseSchema(v, newSchema) - if err != nil { - return err - } - } - } else { - return errors.New(formatErrorDescription( - Locale.MustBeOfAn(), - ErrorDetails{"x": KEY_ANY_OF, "y": TYPE_ARRAY}, - )) - } - } - - if existsMapKey(m, KEY_ALL_OF) { - if isKind(m[KEY_ALL_OF], reflect.Slice) { - for _, v := range m[KEY_ALL_OF].([]interface{}) { - newSchema := &subSchema{property: KEY_ALL_OF, parent: currentSchema, ref: currentSchema.ref} - currentSchema.allOf = append(currentSchema.allOf, newSchema) - err := d.parseSchema(v, newSchema) - if err != nil { - return err - } - } - } else { - return errors.New(formatErrorDescription( - Locale.MustBeOfAn(), - ErrorDetails{"x": KEY_ANY_OF, "y": TYPE_ARRAY}, - )) - } - } - - if existsMapKey(m, KEY_NOT) { - if isKind(m[KEY_NOT], reflect.Map, reflect.Bool) { - newSchema := &subSchema{property: KEY_NOT, parent: currentSchema, ref: currentSchema.ref} - currentSchema.not = newSchema - err := d.parseSchema(m[KEY_NOT], newSchema) - if err != nil { - return err - } - } else { - return errors.New(formatErrorDescription( - Locale.MustBeOfAn(), - ErrorDetails{"x": KEY_NOT, "y": TYPE_OBJECT}, - )) - } - } - - if *currentSchema.draft >= Draft7 { - if existsMapKey(m, KEY_IF) { - if isKind(m[KEY_IF], reflect.Map, reflect.Bool) { - newSchema := &subSchema{property: KEY_IF, parent: currentSchema, ref: currentSchema.ref} - currentSchema._if = newSchema - err := d.parseSchema(m[KEY_IF], newSchema) - if err != nil { - return err - } - } else { - return errors.New(formatErrorDescription( - Locale.MustBeOfAn(), - ErrorDetails{"x": KEY_IF, "y": TYPE_OBJECT}, - )) - } - } - - if existsMapKey(m, KEY_THEN) { - if isKind(m[KEY_THEN], reflect.Map, reflect.Bool) { - newSchema := &subSchema{property: KEY_THEN, parent: currentSchema, ref: currentSchema.ref} - currentSchema._then = newSchema - err := d.parseSchema(m[KEY_THEN], newSchema) - if err != nil { - return err - } - } else { - return errors.New(formatErrorDescription( - Locale.MustBeOfAn(), - ErrorDetails{"x": KEY_THEN, "y": TYPE_OBJECT}, - )) - } - } - - if existsMapKey(m, KEY_ELSE) { - if isKind(m[KEY_ELSE], reflect.Map, reflect.Bool) { - newSchema := &subSchema{property: KEY_ELSE, parent: currentSchema, ref: currentSchema.ref} - currentSchema._else = newSchema - err := d.parseSchema(m[KEY_ELSE], newSchema) - if err != nil { - return err - } - } else { - return errors.New(formatErrorDescription( - Locale.MustBeOfAn(), - ErrorDetails{"x": KEY_ELSE, "y": TYPE_OBJECT}, - )) - } - } - } - - return nil -} - -func (d *Schema) parseReference(documentNode interface{}, currentSchema *subSchema) error { - var ( - refdDocumentNode interface{} - dsp *schemaPoolDocument - err error - ) - - newSchema := &subSchema{property: KEY_REF, parent: currentSchema, ref: currentSchema.ref} - - d.referencePool.Add(currentSchema.ref.String(), newSchema) - - dsp, err = d.pool.GetDocument(*currentSchema.ref) - if err != nil { - return err - } - newSchema.id = currentSchema.ref - - refdDocumentNode = dsp.Document - newSchema.draft = dsp.Draft - - if err != nil { - return err - } - - if !isKind(refdDocumentNode, reflect.Map, reflect.Bool) { - return errors.New(formatErrorDescription( - Locale.MustBeOfType(), - ErrorDetails{"key": STRING_SCHEMA, "type": TYPE_OBJECT}, - )) - } - - err = d.parseSchema(refdDocumentNode, newSchema) - if err != nil { - return err - } - - currentSchema.refSchema = newSchema - - return nil - -} - -func (d *Schema) parseProperties(documentNode interface{}, currentSchema *subSchema) error { - - if !isKind(documentNode, reflect.Map) { - return errors.New(formatErrorDescription( - Locale.MustBeOfType(), - ErrorDetails{"key": STRING_PROPERTIES, "type": TYPE_OBJECT}, - )) - } - - m := documentNode.(map[string]interface{}) - for k := range m { - schemaProperty := k - newSchema := &subSchema{property: schemaProperty, parent: currentSchema, ref: currentSchema.ref} - currentSchema.propertiesChildren = append(currentSchema.propertiesChildren, newSchema) - err := d.parseSchema(m[k], newSchema) - if err != nil { - return err - } - } - - return nil -} - -func (d *Schema) parseDependencies(documentNode interface{}, currentSchema *subSchema) error { - - if !isKind(documentNode, reflect.Map) { - return errors.New(formatErrorDescription( - Locale.MustBeOfType(), - ErrorDetails{"key": KEY_DEPENDENCIES, "type": TYPE_OBJECT}, - )) - } - - m := documentNode.(map[string]interface{}) - currentSchema.dependencies = make(map[string]interface{}) - - for k := range m { - switch reflect.ValueOf(m[k]).Kind() { - - case reflect.Slice: - values := m[k].([]interface{}) - var valuesToRegister []string - - for _, value := range values { - if !isKind(value, reflect.String) { - return errors.New(formatErrorDescription( - Locale.MustBeOfType(), - ErrorDetails{ - "key": STRING_DEPENDENCY, - "type": STRING_SCHEMA_OR_ARRAY_OF_STRINGS, - }, - )) - } - valuesToRegister = append(valuesToRegister, value.(string)) - currentSchema.dependencies[k] = valuesToRegister - } - - case reflect.Map, reflect.Bool: - depSchema := &subSchema{property: k, parent: currentSchema, ref: currentSchema.ref} - err := d.parseSchema(m[k], depSchema) - if err != nil { - return err - } - currentSchema.dependencies[k] = depSchema - - default: - return errors.New(formatErrorDescription( - Locale.MustBeOfType(), - ErrorDetails{ - "key": STRING_DEPENDENCY, - "type": STRING_SCHEMA_OR_ARRAY_OF_STRINGS, - }, - )) - } - - } - - return nil -} diff --git a/src/vendor/github.com/xeipuuv/gojsonschema/schemaLoader.go b/src/vendor/github.com/xeipuuv/gojsonschema/schemaLoader.go deleted file mode 100644 index 20db0c1f9..000000000 --- a/src/vendor/github.com/xeipuuv/gojsonschema/schemaLoader.go +++ /dev/null @@ -1,206 +0,0 @@ -// Copyright 2018 johandorland ( https://github.com/johandorland ) -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package gojsonschema - -import ( - "bytes" - "errors" - - "github.com/xeipuuv/gojsonreference" -) - -// SchemaLoader is used to load schemas -type SchemaLoader struct { - pool *schemaPool - AutoDetect bool - Validate bool - Draft Draft -} - -// NewSchemaLoader creates a new NewSchemaLoader -func NewSchemaLoader() *SchemaLoader { - - ps := &SchemaLoader{ - pool: &schemaPool{ - schemaPoolDocuments: make(map[string]*schemaPoolDocument), - }, - AutoDetect: true, - Validate: false, - Draft: Hybrid, - } - ps.pool.autoDetect = &ps.AutoDetect - - return ps -} - -func (sl *SchemaLoader) validateMetaschema(documentNode interface{}) error { - - var ( - schema string - err error - ) - if sl.AutoDetect { - schema, _, err = parseSchemaURL(documentNode) - if err != nil { - return err - } - } - - // If no explicit "$schema" is used, use the default metaschema associated with the draft used - if schema == "" { - if sl.Draft == Hybrid { - return nil - } - schema = drafts.GetSchemaURL(sl.Draft) - } - - //Disable validation when loading the metaschema to prevent an infinite recursive loop - sl.Validate = false - - metaSchema, err := sl.Compile(NewReferenceLoader(schema)) - - if err != nil { - return err - } - - sl.Validate = true - - result := metaSchema.validateDocument(documentNode) - - if !result.Valid() { - var res bytes.Buffer - for _, err := range result.Errors() { - res.WriteString(err.String()) - res.WriteString("\n") - } - return errors.New(res.String()) - } - - return nil -} - -// AddSchemas adds an arbritrary amount of schemas to the schema cache. As this function does not require -// an explicit URL, every schema should contain an $id, so that it can be referenced by the main schema -func (sl *SchemaLoader) AddSchemas(loaders ...JSONLoader) error { - emptyRef, _ := gojsonreference.NewJsonReference("") - - for _, loader := range loaders { - doc, err := loader.LoadJSON() - - if err != nil { - return err - } - - if sl.Validate { - if err := sl.validateMetaschema(doc); err != nil { - return err - } - } - - // Directly use the Recursive function, so that it get only added to the schema pool by $id - // and not by the ref of the document as it's empty - if err = sl.pool.parseReferences(doc, emptyRef, false); err != nil { - return err - } - } - - return nil -} - -//AddSchema adds a schema under the provided URL to the schema cache -func (sl *SchemaLoader) AddSchema(url string, loader JSONLoader) error { - - ref, err := gojsonreference.NewJsonReference(url) - - if err != nil { - return err - } - - doc, err := loader.LoadJSON() - - if err != nil { - return err - } - - if sl.Validate { - if err := sl.validateMetaschema(doc); err != nil { - return err - } - } - - return sl.pool.parseReferences(doc, ref, true) -} - -// Compile loads and compiles a schema -func (sl *SchemaLoader) Compile(rootSchema JSONLoader) (*Schema, error) { - - ref, err := rootSchema.JsonReference() - - if err != nil { - return nil, err - } - - d := Schema{} - d.pool = sl.pool - d.pool.jsonLoaderFactory = rootSchema.LoaderFactory() - d.documentReference = ref - d.referencePool = newSchemaReferencePool() - - var doc interface{} - if ref.String() != "" { - // Get document from schema pool - spd, err := d.pool.GetDocument(d.documentReference) - if err != nil { - return nil, err - } - doc = spd.Document - } else { - // Load JSON directly - doc, err = rootSchema.LoadJSON() - if err != nil { - return nil, err - } - // References need only be parsed if loading JSON directly - // as pool.GetDocument already does this for us if loading by reference - err = sl.pool.parseReferences(doc, ref, true) - if err != nil { - return nil, err - } - } - - if sl.Validate { - if err := sl.validateMetaschema(doc); err != nil { - return nil, err - } - } - - draft := sl.Draft - if sl.AutoDetect { - _, detectedDraft, err := parseSchemaURL(doc) - if err != nil { - return nil, err - } - if detectedDraft != nil { - draft = *detectedDraft - } - } - - err = d.parse(doc, draft) - if err != nil { - return nil, err - } - - return &d, nil -} diff --git a/src/vendor/github.com/xeipuuv/gojsonschema/schemaPool.go b/src/vendor/github.com/xeipuuv/gojsonschema/schemaPool.go deleted file mode 100644 index 35b1cc630..000000000 --- a/src/vendor/github.com/xeipuuv/gojsonschema/schemaPool.go +++ /dev/null @@ -1,215 +0,0 @@ -// Copyright 2015 xeipuuv ( https://github.com/xeipuuv ) -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// author xeipuuv -// author-github https://github.com/xeipuuv -// author-mail xeipuuv@gmail.com -// -// repository-name gojsonschema -// repository-desc An implementation of JSON Schema, based on IETF's draft v4 - Go language. -// -// description Defines resources pooling. -// Eases referencing and avoids downloading the same resource twice. -// -// created 26-02-2013 - -package gojsonschema - -import ( - "errors" - "fmt" - "reflect" - - "github.com/xeipuuv/gojsonreference" -) - -type schemaPoolDocument struct { - Document interface{} - Draft *Draft -} - -type schemaPool struct { - schemaPoolDocuments map[string]*schemaPoolDocument - jsonLoaderFactory JSONLoaderFactory - autoDetect *bool -} - -func (p *schemaPool) parseReferences(document interface{}, ref gojsonreference.JsonReference, pooled bool) error { - - var ( - draft *Draft - err error - reference = ref.String() - ) - // Only the root document should be added to the schema pool if pooled is true - if _, ok := p.schemaPoolDocuments[reference]; pooled && ok { - return fmt.Errorf("Reference already exists: \"%s\"", reference) - } - - if *p.autoDetect { - _, draft, err = parseSchemaURL(document) - if err != nil { - return err - } - } - - err = p.parseReferencesRecursive(document, ref, draft) - - if pooled { - p.schemaPoolDocuments[reference] = &schemaPoolDocument{Document: document, Draft: draft} - } - - return err -} - -func (p *schemaPool) parseReferencesRecursive(document interface{}, ref gojsonreference.JsonReference, draft *Draft) error { - // parseReferencesRecursive parses a JSON document and resolves all $id and $ref references. - // For $ref references it takes into account the $id scope it is in and replaces - // the reference by the absolute resolved reference - - // When encountering errors it fails silently. Error handling is done when the schema - // is syntactically parsed and any error encountered here should also come up there. - switch m := document.(type) { - case []interface{}: - for _, v := range m { - p.parseReferencesRecursive(v, ref, draft) - } - case map[string]interface{}: - localRef := &ref - - keyID := KEY_ID_NEW - if existsMapKey(m, KEY_ID) { - keyID = KEY_ID - } - if existsMapKey(m, keyID) && isKind(m[keyID], reflect.String) { - jsonReference, err := gojsonreference.NewJsonReference(m[keyID].(string)) - if err == nil { - localRef, err = ref.Inherits(jsonReference) - if err == nil { - if _, ok := p.schemaPoolDocuments[localRef.String()]; ok { - return fmt.Errorf("Reference already exists: \"%s\"", localRef.String()) - } - p.schemaPoolDocuments[localRef.String()] = &schemaPoolDocument{Document: document, Draft: draft} - } - } - } - - if existsMapKey(m, KEY_REF) && isKind(m[KEY_REF], reflect.String) { - jsonReference, err := gojsonreference.NewJsonReference(m[KEY_REF].(string)) - if err == nil { - absoluteRef, err := localRef.Inherits(jsonReference) - if err == nil { - m[KEY_REF] = absoluteRef.String() - } - } - } - - for k, v := range m { - // const and enums should be interpreted literally, so ignore them - if k == KEY_CONST || k == KEY_ENUM { - continue - } - // Something like a property or a dependency is not a valid schema, as it might describe properties named "$ref", "$id" or "const", etc - // Therefore don't treat it like a schema. - if k == KEY_PROPERTIES || k == KEY_DEPENDENCIES || k == KEY_PATTERN_PROPERTIES { - if child, ok := v.(map[string]interface{}); ok { - for _, v := range child { - p.parseReferencesRecursive(v, *localRef, draft) - } - } - } else { - p.parseReferencesRecursive(v, *localRef, draft) - } - } - } - return nil -} - -func (p *schemaPool) GetDocument(reference gojsonreference.JsonReference) (*schemaPoolDocument, error) { - - var ( - spd *schemaPoolDocument - draft *Draft - ok bool - err error - ) - - if internalLogEnabled { - internalLog("Get Document ( %s )", reference.String()) - } - - // Create a deep copy, so we can remove the fragment part later on without altering the original - refToURL, _ := gojsonreference.NewJsonReference(reference.String()) - - // First check if the given fragment is a location independent identifier - // http://json-schema.org/latest/json-schema-core.html#rfc.section.8.2.3 - - if spd, ok = p.schemaPoolDocuments[refToURL.String()]; ok { - if internalLogEnabled { - internalLog(" From pool") - } - return spd, nil - } - - // If the given reference is not a location independent identifier, - // strip the fragment and look for a document with it's base URI - - refToURL.GetUrl().Fragment = "" - - if cachedSpd, ok := p.schemaPoolDocuments[refToURL.String()]; ok { - document, _, err := reference.GetPointer().Get(cachedSpd.Document) - - if err != nil { - return nil, err - } - - if internalLogEnabled { - internalLog(" From pool") - } - - spd = &schemaPoolDocument{Document: document, Draft: cachedSpd.Draft} - p.schemaPoolDocuments[reference.String()] = spd - - return spd, nil - } - - // It is not possible to load anything remotely that is not canonical... - if !reference.IsCanonical() { - return nil, errors.New(formatErrorDescription( - Locale.ReferenceMustBeCanonical(), - ErrorDetails{"reference": reference.String()}, - )) - } - - jsonReferenceLoader := p.jsonLoaderFactory.New(reference.String()) - document, err := jsonReferenceLoader.LoadJSON() - - if err != nil { - return nil, err - } - - // add the whole document to the pool for potential re-use - p.parseReferences(document, refToURL, true) - - _, draft, _ = parseSchemaURL(document) - - // resolve the potential fragment and also cache it - document, _, err = reference.GetPointer().Get(document) - - if err != nil { - return nil, err - } - - return &schemaPoolDocument{Document: document, Draft: draft}, nil -} diff --git a/src/vendor/github.com/xeipuuv/gojsonschema/schemaReferencePool.go b/src/vendor/github.com/xeipuuv/gojsonschema/schemaReferencePool.go deleted file mode 100644 index 6e5e1b5cd..000000000 --- a/src/vendor/github.com/xeipuuv/gojsonschema/schemaReferencePool.go +++ /dev/null @@ -1,68 +0,0 @@ -// Copyright 2015 xeipuuv ( https://github.com/xeipuuv ) -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// author xeipuuv -// author-github https://github.com/xeipuuv -// author-mail xeipuuv@gmail.com -// -// repository-name gojsonschema -// repository-desc An implementation of JSON Schema, based on IETF's draft v4 - Go language. -// -// description Pool of referenced schemas. -// -// created 25-06-2013 - -package gojsonschema - -import ( - "fmt" -) - -type schemaReferencePool struct { - documents map[string]*subSchema -} - -func newSchemaReferencePool() *schemaReferencePool { - - p := &schemaReferencePool{} - p.documents = make(map[string]*subSchema) - - return p -} - -func (p *schemaReferencePool) Get(ref string) (r *subSchema, o bool) { - - if internalLogEnabled { - internalLog(fmt.Sprintf("Schema Reference ( %s )", ref)) - } - - if sch, ok := p.documents[ref]; ok { - if internalLogEnabled { - internalLog(fmt.Sprintf(" From pool")) - } - return sch, true - } - - return nil, false -} - -func (p *schemaReferencePool) Add(ref string, sch *subSchema) { - - if internalLogEnabled { - internalLog(fmt.Sprintf("Add Schema Reference %s to pool", ref)) - } - if _, ok := p.documents[ref]; !ok { - p.documents[ref] = sch - } -} diff --git a/src/vendor/github.com/xeipuuv/gojsonschema/schemaType.go b/src/vendor/github.com/xeipuuv/gojsonschema/schemaType.go deleted file mode 100644 index 36b447a29..000000000 --- a/src/vendor/github.com/xeipuuv/gojsonschema/schemaType.go +++ /dev/null @@ -1,83 +0,0 @@ -// Copyright 2015 xeipuuv ( https://github.com/xeipuuv ) -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// author xeipuuv -// author-github https://github.com/xeipuuv -// author-mail xeipuuv@gmail.com -// -// repository-name gojsonschema -// repository-desc An implementation of JSON Schema, based on IETF's draft v4 - Go language. -// -// description Helper structure to handle schema types, and the combination of them. -// -// created 28-02-2013 - -package gojsonschema - -import ( - "errors" - "fmt" - "strings" -) - -type jsonSchemaType struct { - types []string -} - -// Is the schema typed ? that is containing at least one type -// When not typed, the schema does not need any type validation -func (t *jsonSchemaType) IsTyped() bool { - return len(t.types) > 0 -} - -func (t *jsonSchemaType) Add(etype string) error { - - if !isStringInSlice(JSON_TYPES, etype) { - return errors.New(formatErrorDescription(Locale.NotAValidType(), ErrorDetails{"given": "/" + etype + "/", "expected": JSON_TYPES})) - } - - if t.Contains(etype) { - return errors.New(formatErrorDescription(Locale.Duplicated(), ErrorDetails{"type": etype})) - } - - t.types = append(t.types, etype) - - return nil -} - -func (t *jsonSchemaType) Contains(etype string) bool { - - for _, v := range t.types { - if v == etype { - return true - } - } - - return false -} - -func (t *jsonSchemaType) String() string { - - if len(t.types) == 0 { - return STRING_UNDEFINED // should never happen - } - - // Displayed as a list [type1,type2,...] - if len(t.types) > 1 { - return fmt.Sprintf("[%s]", strings.Join(t.types, ",")) - } - - // Only one type: name only - return t.types[0] -} diff --git a/src/vendor/github.com/xeipuuv/gojsonschema/subSchema.go b/src/vendor/github.com/xeipuuv/gojsonschema/subSchema.go deleted file mode 100644 index ec779812c..000000000 --- a/src/vendor/github.com/xeipuuv/gojsonschema/subSchema.go +++ /dev/null @@ -1,149 +0,0 @@ -// Copyright 2015 xeipuuv ( https://github.com/xeipuuv ) -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// author xeipuuv -// author-github https://github.com/xeipuuv -// author-mail xeipuuv@gmail.com -// -// repository-name gojsonschema -// repository-desc An implementation of JSON Schema, based on IETF's draft v4 - Go language. -// -// description Defines the structure of a sub-subSchema. -// A sub-subSchema can contain other sub-schemas. -// -// created 27-02-2013 - -package gojsonschema - -import ( - "github.com/xeipuuv/gojsonreference" - "math/big" - "regexp" -) - -// Constants -const ( - KEY_SCHEMA = "$schema" - KEY_ID = "id" - KEY_ID_NEW = "$id" - KEY_REF = "$ref" - KEY_TITLE = "title" - KEY_DESCRIPTION = "description" - KEY_TYPE = "type" - KEY_ITEMS = "items" - KEY_ADDITIONAL_ITEMS = "additionalItems" - KEY_PROPERTIES = "properties" - KEY_PATTERN_PROPERTIES = "patternProperties" - KEY_ADDITIONAL_PROPERTIES = "additionalProperties" - KEY_PROPERTY_NAMES = "propertyNames" - KEY_DEFINITIONS = "definitions" - KEY_MULTIPLE_OF = "multipleOf" - KEY_MINIMUM = "minimum" - KEY_MAXIMUM = "maximum" - KEY_EXCLUSIVE_MINIMUM = "exclusiveMinimum" - KEY_EXCLUSIVE_MAXIMUM = "exclusiveMaximum" - KEY_MIN_LENGTH = "minLength" - KEY_MAX_LENGTH = "maxLength" - KEY_PATTERN = "pattern" - KEY_FORMAT = "format" - KEY_MIN_PROPERTIES = "minProperties" - KEY_MAX_PROPERTIES = "maxProperties" - KEY_DEPENDENCIES = "dependencies" - KEY_REQUIRED = "required" - KEY_MIN_ITEMS = "minItems" - KEY_MAX_ITEMS = "maxItems" - KEY_UNIQUE_ITEMS = "uniqueItems" - KEY_CONTAINS = "contains" - KEY_CONST = "const" - KEY_ENUM = "enum" - KEY_ONE_OF = "oneOf" - KEY_ANY_OF = "anyOf" - KEY_ALL_OF = "allOf" - KEY_NOT = "not" - KEY_IF = "if" - KEY_THEN = "then" - KEY_ELSE = "else" -) - -type subSchema struct { - draft *Draft - - // basic subSchema meta properties - id *gojsonreference.JsonReference - title *string - description *string - - property string - - // Quick pass/fail for boolean schemas - pass *bool - - // Types associated with the subSchema - types jsonSchemaType - - // Reference url - ref *gojsonreference.JsonReference - // Schema referenced - refSchema *subSchema - - // hierarchy - parent *subSchema - itemsChildren []*subSchema - itemsChildrenIsSingleSchema bool - propertiesChildren []*subSchema - - // validation : number / integer - multipleOf *big.Rat - maximum *big.Rat - exclusiveMaximum *big.Rat - minimum *big.Rat - exclusiveMinimum *big.Rat - - // validation : string - minLength *int - maxLength *int - pattern *regexp.Regexp - format string - - // validation : object - minProperties *int - maxProperties *int - required []string - - dependencies map[string]interface{} - additionalProperties interface{} - patternProperties map[string]*subSchema - propertyNames *subSchema - - // validation : array - minItems *int - maxItems *int - uniqueItems bool - contains *subSchema - - additionalItems interface{} - - // validation : all - _const *string //const is a golang keyword - enum []string - - // validation : subSchema - oneOf []*subSchema - anyOf []*subSchema - allOf []*subSchema - not *subSchema - _if *subSchema // if/else are golang keywords - _then *subSchema - _else *subSchema -} diff --git a/src/vendor/github.com/xeipuuv/gojsonschema/types.go b/src/vendor/github.com/xeipuuv/gojsonschema/types.go deleted file mode 100644 index 0e6fd5173..000000000 --- a/src/vendor/github.com/xeipuuv/gojsonschema/types.go +++ /dev/null @@ -1,62 +0,0 @@ -// Copyright 2015 xeipuuv ( https://github.com/xeipuuv ) -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// author xeipuuv -// author-github https://github.com/xeipuuv -// author-mail xeipuuv@gmail.com -// -// repository-name gojsonschema -// repository-desc An implementation of JSON Schema, based on IETF's draft v4 - Go language. -// -// description Contains const types for schema and JSON. -// -// created 28-02-2013 - -package gojsonschema - -// Type constants -const ( - TYPE_ARRAY = `array` - TYPE_BOOLEAN = `boolean` - TYPE_INTEGER = `integer` - TYPE_NUMBER = `number` - TYPE_NULL = `null` - TYPE_OBJECT = `object` - TYPE_STRING = `string` -) - -// JSON_TYPES hosts the list of type that are supported in JSON -var JSON_TYPES []string - -// SCHEMA_TYPES hosts the list of type that are supported in schemas -var SCHEMA_TYPES []string - -func init() { - JSON_TYPES = []string{ - TYPE_ARRAY, - TYPE_BOOLEAN, - TYPE_INTEGER, - TYPE_NUMBER, - TYPE_NULL, - TYPE_OBJECT, - TYPE_STRING} - - SCHEMA_TYPES = []string{ - TYPE_ARRAY, - TYPE_BOOLEAN, - TYPE_INTEGER, - TYPE_NUMBER, - TYPE_OBJECT, - TYPE_STRING} -} diff --git a/src/vendor/github.com/xeipuuv/gojsonschema/utils.go b/src/vendor/github.com/xeipuuv/gojsonschema/utils.go deleted file mode 100644 index a17d22e3b..000000000 --- a/src/vendor/github.com/xeipuuv/gojsonschema/utils.go +++ /dev/null @@ -1,197 +0,0 @@ -// Copyright 2015 xeipuuv ( https://github.com/xeipuuv ) -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// author xeipuuv -// author-github https://github.com/xeipuuv -// author-mail xeipuuv@gmail.com -// -// repository-name gojsonschema -// repository-desc An implementation of JSON Schema, based on IETF's draft v4 - Go language. -// -// description Various utility functions. -// -// created 26-02-2013 - -package gojsonschema - -import ( - "encoding/json" - "math/big" - "reflect" -) - -func isKind(what interface{}, kinds ...reflect.Kind) bool { - target := what - if isJSONNumber(what) { - // JSON Numbers are strings! - target = *mustBeNumber(what) - } - targetKind := reflect.ValueOf(target).Kind() - for _, kind := range kinds { - if targetKind == kind { - return true - } - } - return false -} - -func existsMapKey(m map[string]interface{}, k string) bool { - _, ok := m[k] - return ok -} - -func isStringInSlice(s []string, what string) bool { - for i := range s { - if s[i] == what { - return true - } - } - return false -} - -// indexStringInSlice returns the index of the first instance of 'what' in s or -1 if it is not found in s. -func indexStringInSlice(s []string, what string) int { - for i := range s { - if s[i] == what { - return i - } - } - return -1 -} - -func marshalToJSONString(value interface{}) (*string, error) { - - mBytes, err := json.Marshal(value) - if err != nil { - return nil, err - } - - sBytes := string(mBytes) - return &sBytes, nil -} - -func marshalWithoutNumber(value interface{}) (*string, error) { - - // The JSON is decoded using https://golang.org/pkg/encoding/json/#Decoder.UseNumber - // This means the numbers are internally still represented as strings and therefore 1.00 is unequal to 1 - // One way to eliminate these differences is to decode and encode the JSON one more time without Decoder.UseNumber - // so that these differences in representation are removed - - jsonString, err := marshalToJSONString(value) - if err != nil { - return nil, err - } - - var document interface{} - - err = json.Unmarshal([]byte(*jsonString), &document) - if err != nil { - return nil, err - } - - return marshalToJSONString(document) -} - -func isJSONNumber(what interface{}) bool { - - switch what.(type) { - - case json.Number: - return true - } - - return false -} - -func checkJSONInteger(what interface{}) (isInt bool) { - - jsonNumber := what.(json.Number) - - bigFloat, isValidNumber := new(big.Rat).SetString(string(jsonNumber)) - - return isValidNumber && bigFloat.IsInt() - -} - -// same as ECMA Number.MAX_SAFE_INTEGER and Number.MIN_SAFE_INTEGER -const ( - maxJSONFloat = float64(1<<53 - 1) // 9007199254740991.0 2^53 - 1 - minJSONFloat = -float64(1<<53 - 1) //-9007199254740991.0 -2^53 - 1 -) - -func mustBeInteger(what interface{}) *int { - - if isJSONNumber(what) { - - number := what.(json.Number) - - isInt := checkJSONInteger(number) - - if isInt { - - int64Value, err := number.Int64() - if err != nil { - return nil - } - - int32Value := int(int64Value) - return &int32Value - } - - } - - return nil -} - -func mustBeNumber(what interface{}) *big.Rat { - - if isJSONNumber(what) { - number := what.(json.Number) - float64Value, success := new(big.Rat).SetString(string(number)) - if success { - return float64Value - } - } - - return nil - -} - -func convertDocumentNode(val interface{}) interface{} { - - if lval, ok := val.([]interface{}); ok { - - res := []interface{}{} - for _, v := range lval { - res = append(res, convertDocumentNode(v)) - } - - return res - - } - - if mval, ok := val.(map[interface{}]interface{}); ok { - - res := map[string]interface{}{} - - for k, v := range mval { - res[k.(string)] = convertDocumentNode(v) - } - - return res - - } - - return val -} diff --git a/src/vendor/github.com/xeipuuv/gojsonschema/validation.go b/src/vendor/github.com/xeipuuv/gojsonschema/validation.go deleted file mode 100644 index 74091bca1..000000000 --- a/src/vendor/github.com/xeipuuv/gojsonschema/validation.go +++ /dev/null @@ -1,858 +0,0 @@ -// Copyright 2015 xeipuuv ( https://github.com/xeipuuv ) -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// author xeipuuv -// author-github https://github.com/xeipuuv -// author-mail xeipuuv@gmail.com -// -// repository-name gojsonschema -// repository-desc An implementation of JSON Schema, based on IETF's draft v4 - Go language. -// -// description Extends Schema and subSchema, implements the validation phase. -// -// created 28-02-2013 - -package gojsonschema - -import ( - "encoding/json" - "math/big" - "reflect" - "regexp" - "strconv" - "strings" - "unicode/utf8" -) - -// Validate loads and validates a JSON schema -func Validate(ls JSONLoader, ld JSONLoader) (*Result, error) { - // load schema - schema, err := NewSchema(ls) - if err != nil { - return nil, err - } - return schema.Validate(ld) -} - -// Validate loads and validates a JSON document -func (v *Schema) Validate(l JSONLoader) (*Result, error) { - root, err := l.LoadJSON() - if err != nil { - return nil, err - } - return v.validateDocument(root), nil -} - -func (v *Schema) validateDocument(root interface{}) *Result { - result := &Result{} - context := NewJsonContext(STRING_CONTEXT_ROOT, nil) - v.rootSchema.validateRecursive(v.rootSchema, root, result, context) - return result -} - -func (v *subSchema) subValidateWithContext(document interface{}, context *JsonContext) *Result { - result := &Result{} - v.validateRecursive(v, document, result, context) - return result -} - -// Walker function to validate the json recursively against the subSchema -func (v *subSchema) validateRecursive(currentSubSchema *subSchema, currentNode interface{}, result *Result, context *JsonContext) { - - if internalLogEnabled { - internalLog("validateRecursive %s", context.String()) - internalLog(" %v", currentNode) - } - - // Handle true/false schema as early as possible as all other fields will be nil - if currentSubSchema.pass != nil { - if !*currentSubSchema.pass { - result.addInternalError( - new(FalseError), - context, - currentNode, - ErrorDetails{}, - ) - } - return - } - - // Handle referenced schemas, returns directly when a $ref is found - if currentSubSchema.refSchema != nil { - v.validateRecursive(currentSubSchema.refSchema, currentNode, result, context) - return - } - - // Check for null value - if currentNode == nil { - if currentSubSchema.types.IsTyped() && !currentSubSchema.types.Contains(TYPE_NULL) { - result.addInternalError( - new(InvalidTypeError), - context, - currentNode, - ErrorDetails{ - "expected": currentSubSchema.types.String(), - "given": TYPE_NULL, - }, - ) - return - } - - currentSubSchema.validateSchema(currentSubSchema, currentNode, result, context) - v.validateCommon(currentSubSchema, currentNode, result, context) - - } else { // Not a null value - - if isJSONNumber(currentNode) { - - value := currentNode.(json.Number) - - isInt := checkJSONInteger(value) - - validType := currentSubSchema.types.Contains(TYPE_NUMBER) || (isInt && currentSubSchema.types.Contains(TYPE_INTEGER)) - - if currentSubSchema.types.IsTyped() && !validType { - - givenType := TYPE_INTEGER - if !isInt { - givenType = TYPE_NUMBER - } - - result.addInternalError( - new(InvalidTypeError), - context, - currentNode, - ErrorDetails{ - "expected": currentSubSchema.types.String(), - "given": givenType, - }, - ) - return - } - - currentSubSchema.validateSchema(currentSubSchema, value, result, context) - v.validateNumber(currentSubSchema, value, result, context) - v.validateCommon(currentSubSchema, value, result, context) - v.validateString(currentSubSchema, value, result, context) - - } else { - - rValue := reflect.ValueOf(currentNode) - rKind := rValue.Kind() - - switch rKind { - - // Slice => JSON array - - case reflect.Slice: - - if currentSubSchema.types.IsTyped() && !currentSubSchema.types.Contains(TYPE_ARRAY) { - result.addInternalError( - new(InvalidTypeError), - context, - currentNode, - ErrorDetails{ - "expected": currentSubSchema.types.String(), - "given": TYPE_ARRAY, - }, - ) - return - } - - castCurrentNode := currentNode.([]interface{}) - - currentSubSchema.validateSchema(currentSubSchema, castCurrentNode, result, context) - - v.validateArray(currentSubSchema, castCurrentNode, result, context) - v.validateCommon(currentSubSchema, castCurrentNode, result, context) - - // Map => JSON object - - case reflect.Map: - if currentSubSchema.types.IsTyped() && !currentSubSchema.types.Contains(TYPE_OBJECT) { - result.addInternalError( - new(InvalidTypeError), - context, - currentNode, - ErrorDetails{ - "expected": currentSubSchema.types.String(), - "given": TYPE_OBJECT, - }, - ) - return - } - - castCurrentNode, ok := currentNode.(map[string]interface{}) - if !ok { - castCurrentNode = convertDocumentNode(currentNode).(map[string]interface{}) - } - - currentSubSchema.validateSchema(currentSubSchema, castCurrentNode, result, context) - - v.validateObject(currentSubSchema, castCurrentNode, result, context) - v.validateCommon(currentSubSchema, castCurrentNode, result, context) - - for _, pSchema := range currentSubSchema.propertiesChildren { - nextNode, ok := castCurrentNode[pSchema.property] - if ok { - subContext := NewJsonContext(pSchema.property, context) - v.validateRecursive(pSchema, nextNode, result, subContext) - } - } - - // Simple JSON values : string, number, boolean - - case reflect.Bool: - - if currentSubSchema.types.IsTyped() && !currentSubSchema.types.Contains(TYPE_BOOLEAN) { - result.addInternalError( - new(InvalidTypeError), - context, - currentNode, - ErrorDetails{ - "expected": currentSubSchema.types.String(), - "given": TYPE_BOOLEAN, - }, - ) - return - } - - value := currentNode.(bool) - - currentSubSchema.validateSchema(currentSubSchema, value, result, context) - v.validateNumber(currentSubSchema, value, result, context) - v.validateCommon(currentSubSchema, value, result, context) - v.validateString(currentSubSchema, value, result, context) - - case reflect.String: - - if currentSubSchema.types.IsTyped() && !currentSubSchema.types.Contains(TYPE_STRING) { - result.addInternalError( - new(InvalidTypeError), - context, - currentNode, - ErrorDetails{ - "expected": currentSubSchema.types.String(), - "given": TYPE_STRING, - }, - ) - return - } - - value := currentNode.(string) - - currentSubSchema.validateSchema(currentSubSchema, value, result, context) - v.validateNumber(currentSubSchema, value, result, context) - v.validateCommon(currentSubSchema, value, result, context) - v.validateString(currentSubSchema, value, result, context) - - } - - } - - } - - result.incrementScore() -} - -// Different kinds of validation there, subSchema / common / array / object / string... -func (v *subSchema) validateSchema(currentSubSchema *subSchema, currentNode interface{}, result *Result, context *JsonContext) { - - if internalLogEnabled { - internalLog("validateSchema %s", context.String()) - internalLog(" %v", currentNode) - } - - if len(currentSubSchema.anyOf) > 0 { - - validatedAnyOf := false - var bestValidationResult *Result - - for _, anyOfSchema := range currentSubSchema.anyOf { - if !validatedAnyOf { - validationResult := anyOfSchema.subValidateWithContext(currentNode, context) - validatedAnyOf = validationResult.Valid() - - if !validatedAnyOf && (bestValidationResult == nil || validationResult.score > bestValidationResult.score) { - bestValidationResult = validationResult - } - } - } - if !validatedAnyOf { - - result.addInternalError(new(NumberAnyOfError), context, currentNode, ErrorDetails{}) - - if bestValidationResult != nil { - // add error messages of closest matching subSchema as - // that's probably the one the user was trying to match - result.mergeErrors(bestValidationResult) - } - } - } - - if len(currentSubSchema.oneOf) > 0 { - - nbValidated := 0 - var bestValidationResult *Result - - for _, oneOfSchema := range currentSubSchema.oneOf { - validationResult := oneOfSchema.subValidateWithContext(currentNode, context) - if validationResult.Valid() { - nbValidated++ - } else if nbValidated == 0 && (bestValidationResult == nil || validationResult.score > bestValidationResult.score) { - bestValidationResult = validationResult - } - } - - if nbValidated != 1 { - - result.addInternalError(new(NumberOneOfError), context, currentNode, ErrorDetails{}) - - if nbValidated == 0 { - // add error messages of closest matching subSchema as - // that's probably the one the user was trying to match - result.mergeErrors(bestValidationResult) - } - } - - } - - if len(currentSubSchema.allOf) > 0 { - nbValidated := 0 - - for _, allOfSchema := range currentSubSchema.allOf { - validationResult := allOfSchema.subValidateWithContext(currentNode, context) - if validationResult.Valid() { - nbValidated++ - } - result.mergeErrors(validationResult) - } - - if nbValidated != len(currentSubSchema.allOf) { - result.addInternalError(new(NumberAllOfError), context, currentNode, ErrorDetails{}) - } - } - - if currentSubSchema.not != nil { - validationResult := currentSubSchema.not.subValidateWithContext(currentNode, context) - if validationResult.Valid() { - result.addInternalError(new(NumberNotError), context, currentNode, ErrorDetails{}) - } - } - - if currentSubSchema.dependencies != nil && len(currentSubSchema.dependencies) > 0 { - if isKind(currentNode, reflect.Map) { - for elementKey := range currentNode.(map[string]interface{}) { - if dependency, ok := currentSubSchema.dependencies[elementKey]; ok { - switch dependency := dependency.(type) { - - case []string: - for _, dependOnKey := range dependency { - if _, dependencyResolved := currentNode.(map[string]interface{})[dependOnKey]; !dependencyResolved { - result.addInternalError( - new(MissingDependencyError), - context, - currentNode, - ErrorDetails{"dependency": dependOnKey}, - ) - } - } - - case *subSchema: - dependency.validateRecursive(dependency, currentNode, result, context) - } - } - } - } - } - - if currentSubSchema._if != nil { - validationResultIf := currentSubSchema._if.subValidateWithContext(currentNode, context) - if currentSubSchema._then != nil && validationResultIf.Valid() { - validationResultThen := currentSubSchema._then.subValidateWithContext(currentNode, context) - if !validationResultThen.Valid() { - result.addInternalError(new(ConditionThenError), context, currentNode, ErrorDetails{}) - result.mergeErrors(validationResultThen) - } - } - if currentSubSchema._else != nil && !validationResultIf.Valid() { - validationResultElse := currentSubSchema._else.subValidateWithContext(currentNode, context) - if !validationResultElse.Valid() { - result.addInternalError(new(ConditionElseError), context, currentNode, ErrorDetails{}) - result.mergeErrors(validationResultElse) - } - } - } - - result.incrementScore() -} - -func (v *subSchema) validateCommon(currentSubSchema *subSchema, value interface{}, result *Result, context *JsonContext) { - - if internalLogEnabled { - internalLog("validateCommon %s", context.String()) - internalLog(" %v", value) - } - - // const: - if currentSubSchema._const != nil { - vString, err := marshalWithoutNumber(value) - if err != nil { - result.addInternalError(new(InternalError), context, value, ErrorDetails{"error": err}) - } - if *vString != *currentSubSchema._const { - result.addInternalError(new(ConstError), - context, - value, - ErrorDetails{ - "allowed": *currentSubSchema._const, - }, - ) - } - } - - // enum: - if len(currentSubSchema.enum) > 0 { - vString, err := marshalWithoutNumber(value) - if err != nil { - result.addInternalError(new(InternalError), context, value, ErrorDetails{"error": err}) - } - if !isStringInSlice(currentSubSchema.enum, *vString) { - result.addInternalError( - new(EnumError), - context, - value, - ErrorDetails{ - "allowed": strings.Join(currentSubSchema.enum, ", "), - }, - ) - } - } - - result.incrementScore() -} - -func (v *subSchema) validateArray(currentSubSchema *subSchema, value []interface{}, result *Result, context *JsonContext) { - - if internalLogEnabled { - internalLog("validateArray %s", context.String()) - internalLog(" %v", value) - } - - nbValues := len(value) - - // TODO explain - if currentSubSchema.itemsChildrenIsSingleSchema { - for i := range value { - subContext := NewJsonContext(strconv.Itoa(i), context) - validationResult := currentSubSchema.itemsChildren[0].subValidateWithContext(value[i], subContext) - result.mergeErrors(validationResult) - } - } else { - if currentSubSchema.itemsChildren != nil && len(currentSubSchema.itemsChildren) > 0 { - - nbItems := len(currentSubSchema.itemsChildren) - - // while we have both schemas and values, check them against each other - for i := 0; i != nbItems && i != nbValues; i++ { - subContext := NewJsonContext(strconv.Itoa(i), context) - validationResult := currentSubSchema.itemsChildren[i].subValidateWithContext(value[i], subContext) - result.mergeErrors(validationResult) - } - - if nbItems < nbValues { - // we have less schemas than elements in the instance array, - // but that might be ok if "additionalItems" is specified. - - switch currentSubSchema.additionalItems.(type) { - case bool: - if !currentSubSchema.additionalItems.(bool) { - result.addInternalError(new(ArrayNoAdditionalItemsError), context, value, ErrorDetails{}) - } - case *subSchema: - additionalItemSchema := currentSubSchema.additionalItems.(*subSchema) - for i := nbItems; i != nbValues; i++ { - subContext := NewJsonContext(strconv.Itoa(i), context) - validationResult := additionalItemSchema.subValidateWithContext(value[i], subContext) - result.mergeErrors(validationResult) - } - } - } - } - } - - // minItems & maxItems - if currentSubSchema.minItems != nil { - if nbValues < int(*currentSubSchema.minItems) { - result.addInternalError( - new(ArrayMinItemsError), - context, - value, - ErrorDetails{"min": *currentSubSchema.minItems}, - ) - } - } - if currentSubSchema.maxItems != nil { - if nbValues > int(*currentSubSchema.maxItems) { - result.addInternalError( - new(ArrayMaxItemsError), - context, - value, - ErrorDetails{"max": *currentSubSchema.maxItems}, - ) - } - } - - // uniqueItems: - if currentSubSchema.uniqueItems { - var stringifiedItems = make(map[string]int) - for j, v := range value { - vString, err := marshalWithoutNumber(v) - if err != nil { - result.addInternalError(new(InternalError), context, value, ErrorDetails{"err": err}) - } - if i, ok := stringifiedItems[*vString]; ok { - result.addInternalError( - new(ItemsMustBeUniqueError), - context, - value, - ErrorDetails{"type": TYPE_ARRAY, "i": i, "j": j}, - ) - } - stringifiedItems[*vString] = j - } - } - - // contains: - - if currentSubSchema.contains != nil { - validatedOne := false - var bestValidationResult *Result - - for i, v := range value { - subContext := NewJsonContext(strconv.Itoa(i), context) - - validationResult := currentSubSchema.contains.subValidateWithContext(v, subContext) - if validationResult.Valid() { - validatedOne = true - break - } else { - if bestValidationResult == nil || validationResult.score > bestValidationResult.score { - bestValidationResult = validationResult - } - } - } - if !validatedOne { - result.addInternalError( - new(ArrayContainsError), - context, - value, - ErrorDetails{}, - ) - if bestValidationResult != nil { - result.mergeErrors(bestValidationResult) - } - } - } - - result.incrementScore() -} - -func (v *subSchema) validateObject(currentSubSchema *subSchema, value map[string]interface{}, result *Result, context *JsonContext) { - - if internalLogEnabled { - internalLog("validateObject %s", context.String()) - internalLog(" %v", value) - } - - // minProperties & maxProperties: - if currentSubSchema.minProperties != nil { - if len(value) < int(*currentSubSchema.minProperties) { - result.addInternalError( - new(ArrayMinPropertiesError), - context, - value, - ErrorDetails{"min": *currentSubSchema.minProperties}, - ) - } - } - if currentSubSchema.maxProperties != nil { - if len(value) > int(*currentSubSchema.maxProperties) { - result.addInternalError( - new(ArrayMaxPropertiesError), - context, - value, - ErrorDetails{"max": *currentSubSchema.maxProperties}, - ) - } - } - - // required: - for _, requiredProperty := range currentSubSchema.required { - _, ok := value[requiredProperty] - if ok { - result.incrementScore() - } else { - result.addInternalError( - new(RequiredError), - context, - value, - ErrorDetails{"property": requiredProperty}, - ) - } - } - - // additionalProperty & patternProperty: - for pk := range value { - - // Check whether this property is described by "properties" - found := false - for _, spValue := range currentSubSchema.propertiesChildren { - if pk == spValue.property { - found = true - } - } - - // Check whether this property is described by "patternProperties" - ppMatch := v.validatePatternProperty(currentSubSchema, pk, value[pk], result, context) - - // If it is not described by neither "properties" nor "patternProperties" it must pass "additionalProperties" - if !found && !ppMatch { - switch ap := currentSubSchema.additionalProperties.(type) { - case bool: - // Handle the boolean case separately as it's cleaner to return a specific error than failing to pass the false schema - if !ap { - result.addInternalError( - new(AdditionalPropertyNotAllowedError), - context, - value[pk], - ErrorDetails{"property": pk}, - ) - - } - case *subSchema: - validationResult := ap.subValidateWithContext(value[pk], NewJsonContext(pk, context)) - result.mergeErrors(validationResult) - } - } - } - - // propertyNames: - if currentSubSchema.propertyNames != nil { - for pk := range value { - validationResult := currentSubSchema.propertyNames.subValidateWithContext(pk, context) - if !validationResult.Valid() { - result.addInternalError(new(InvalidPropertyNameError), - context, - value, ErrorDetails{ - "property": pk, - }) - result.mergeErrors(validationResult) - } - } - } - - result.incrementScore() -} - -func (v *subSchema) validatePatternProperty(currentSubSchema *subSchema, key string, value interface{}, result *Result, context *JsonContext) bool { - - if internalLogEnabled { - internalLog("validatePatternProperty %s", context.String()) - internalLog(" %s %v", key, value) - } - - validated := false - - for pk, pv := range currentSubSchema.patternProperties { - if matches, _ := regexp.MatchString(pk, key); matches { - validated = true - subContext := NewJsonContext(key, context) - validationResult := pv.subValidateWithContext(value, subContext) - result.mergeErrors(validationResult) - } - } - - if !validated { - return false - } - - result.incrementScore() - return true -} - -func (v *subSchema) validateString(currentSubSchema *subSchema, value interface{}, result *Result, context *JsonContext) { - - // Ignore JSON numbers - if isJSONNumber(value) { - return - } - - // Ignore non strings - if !isKind(value, reflect.String) { - return - } - - if internalLogEnabled { - internalLog("validateString %s", context.String()) - internalLog(" %v", value) - } - - stringValue := value.(string) - - // minLength & maxLength: - if currentSubSchema.minLength != nil { - if utf8.RuneCount([]byte(stringValue)) < int(*currentSubSchema.minLength) { - result.addInternalError( - new(StringLengthGTEError), - context, - value, - ErrorDetails{"min": *currentSubSchema.minLength}, - ) - } - } - if currentSubSchema.maxLength != nil { - if utf8.RuneCount([]byte(stringValue)) > int(*currentSubSchema.maxLength) { - result.addInternalError( - new(StringLengthLTEError), - context, - value, - ErrorDetails{"max": *currentSubSchema.maxLength}, - ) - } - } - - // pattern: - if currentSubSchema.pattern != nil { - if !currentSubSchema.pattern.MatchString(stringValue) { - result.addInternalError( - new(DoesNotMatchPatternError), - context, - value, - ErrorDetails{"pattern": currentSubSchema.pattern}, - ) - - } - } - - // format - if currentSubSchema.format != "" { - if !FormatCheckers.IsFormat(currentSubSchema.format, stringValue) { - result.addInternalError( - new(DoesNotMatchFormatError), - context, - value, - ErrorDetails{"format": currentSubSchema.format}, - ) - } - } - - result.incrementScore() -} - -func (v *subSchema) validateNumber(currentSubSchema *subSchema, value interface{}, result *Result, context *JsonContext) { - - // Ignore non numbers - if !isJSONNumber(value) { - return - } - - if internalLogEnabled { - internalLog("validateNumber %s", context.String()) - internalLog(" %v", value) - } - - number := value.(json.Number) - float64Value, _ := new(big.Rat).SetString(string(number)) - - // multipleOf: - if currentSubSchema.multipleOf != nil { - if q := new(big.Rat).Quo(float64Value, currentSubSchema.multipleOf); !q.IsInt() { - result.addInternalError( - new(MultipleOfError), - context, - number, - ErrorDetails{ - "multiple": new(big.Float).SetRat(currentSubSchema.multipleOf), - }, - ) - } - } - - //maximum & exclusiveMaximum: - if currentSubSchema.maximum != nil { - if float64Value.Cmp(currentSubSchema.maximum) == 1 { - result.addInternalError( - new(NumberLTEError), - context, - number, - ErrorDetails{ - "max": new(big.Float).SetRat(currentSubSchema.maximum), - }, - ) - } - } - if currentSubSchema.exclusiveMaximum != nil { - if float64Value.Cmp(currentSubSchema.exclusiveMaximum) >= 0 { - result.addInternalError( - new(NumberLTError), - context, - number, - ErrorDetails{ - "max": new(big.Float).SetRat(currentSubSchema.exclusiveMaximum), - }, - ) - } - } - - //minimum & exclusiveMinimum: - if currentSubSchema.minimum != nil { - if float64Value.Cmp(currentSubSchema.minimum) == -1 { - result.addInternalError( - new(NumberGTEError), - context, - number, - ErrorDetails{ - "min": new(big.Float).SetRat(currentSubSchema.minimum), - }, - ) - } - } - if currentSubSchema.exclusiveMinimum != nil { - if float64Value.Cmp(currentSubSchema.exclusiveMinimum) <= 0 { - result.addInternalError( - new(NumberGTError), - context, - number, - ErrorDetails{ - "min": new(big.Float).SetRat(currentSubSchema.exclusiveMinimum), - }, - ) - } - } - - // format - if currentSubSchema.format != "" { - if !FormatCheckers.IsFormat(currentSubSchema.format, float64Value) { - result.addInternalError( - new(DoesNotMatchFormatError), - context, - value, - ErrorDetails{"format": currentSubSchema.format}, - ) - } - } - - result.incrementScore() -} diff --git a/src/vendor/github.com/xlab/treeprint/LICENSE b/src/vendor/github.com/xlab/treeprint/LICENSE deleted file mode 100644 index 5ab533ad2..000000000 --- a/src/vendor/github.com/xlab/treeprint/LICENSE +++ /dev/null @@ -1,20 +0,0 @@ -The MIT License (MIT) -Copyright © 2016 Maxim Kupriianov - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the “Softwareâ€), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED “AS ISâ€, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. diff --git a/src/vendor/github.com/xlab/treeprint/README.md b/src/vendor/github.com/xlab/treeprint/README.md deleted file mode 100644 index 59fb121fc..000000000 --- a/src/vendor/github.com/xlab/treeprint/README.md +++ /dev/null @@ -1,154 +0,0 @@ -treeprint [![GoDoc](https://godoc.org/github.com/xlab/treeprint?status.svg)](https://godoc.org/github.com/xlab/treeprint) ![test coverage](https://img.shields.io/badge/coverage-68.6%25-green.svg) -========= - -Package `treeprint` provides a simple ASCII tree composing tool. - -SYSTEME FIGURE - -If you are familiar with the [tree](http://mama.indstate.edu/users/ice/tree/) utility that is a recursive directory listing command that produces a depth indented listing of files, then you have the idea of what it would look like. - -On my system the command yields the following - -``` - $ tree -. -├── LICENSE -├── README.md -├── treeprint.go -└── treeprint_test.go - -0 directories, 4 files -``` - -and I'd like to have the same format for my Go data structures when I print them. - -## Installation - -``` -$ go get github.com/xlab/treeprint -``` - -## Concept of work - -The general idea is that you initialise a new tree with `treeprint.New()` and then add nodes and -branches into it. Use `AddNode()` when you want add a node on the same level as the target or -use `AddBranch()` when you want to go a level deeper. So `tree.AddBranch().AddNode().AddNode()` would -create a new level with two distinct nodes on it. So `tree.AddNode().AddNode()` is a flat thing and -`tree.AddBranch().AddBranch().AddBranch()` is a high thing. Use `String()` or `Bytes()` on a branch -to render a subtree, or use it on the root to print the whole tree. - -The utility will yield Unicode-friendly trees. The output is predictable and there is no platform-dependent exceptions, so if you have issues with displaying the tree in the console, all platform-related transformations can be done after the tree has been rendered: [an example](https://github.com/xlab/treeprint/issues/2#issuecomment-324944141) for Asian locales. - -## Use cases - -### When you want to render a complex data structure: - -```go -func main() { - // to add a custom root name use `treeprint.NewWithRoot()` instead - tree := treeprint.New() - - // create a new branch in the root - one := tree.AddBranch("one") - - // add some nodes - one.AddNode("subnode1").AddNode("subnode2") - - // create a new sub-branch - one.AddBranch("two"). - AddNode("subnode1").AddNode("subnode2"). // add some nodes - AddBranch("three"). // add a new sub-branch - AddNode("subnode1").AddNode("subnode2") // add some nodes too - - // add one more node that should surround the inner branch - one.AddNode("subnode3") - - // add a new node to the root - tree.AddNode("outernode") - - fmt.Println(tree.String()) -} -``` - -Will give you: - -``` -. -├── one -│   ├── subnode1 -│   ├── subnode2 -│   ├── two -│   │   ├── subnode1 -│   │   ├── subnode2 -│   │   └── three -│   │   ├── subnode1 -│   │   └── subnode2 -│   └── subnode3 -└── outernode -``` - -### Another case, when you have to make a tree where any leaf may have some meta-data (as `tree` is capable of it): - -```go -func main { - // to add a custom root name use `treeprint.NewWithRoot()` instead - tree := treeprint.New() - - tree.AddNode("Dockerfile") - tree.AddNode("Makefile") - tree.AddNode("aws.sh") - tree.AddMetaBranch(" 204", "bin"). - AddNode("dbmaker").AddNode("someserver").AddNode("testtool") - tree.AddMetaBranch(" 374", "deploy"). - AddNode("Makefile").AddNode("bootstrap.sh") - tree.AddMetaNode("122K", "testtool.a") - - fmt.Println(tree.String()) -} -``` - -Output: - -``` -. -├── Dockerfile -├── Makefile -├── aws.sh -├── [ 204] bin -│   ├── dbmaker -│   ├── someserver -│   └── testtool -├── [ 374] deploy -│   ├── Makefile -│   └── bootstrap.sh -└── [122K] testtool.a -``` - -### Iterating over the tree nodes - -```go -tree := New() - -one := tree.AddBranch("one") -one.AddNode("one-subnode1").AddNode("one-subnode2") -one.AddBranch("two").AddNode("two-subnode1").AddNode("two-subnode2"). - AddBranch("three").AddNode("three-subnode1").AddNode("three-subnode2") -tree.AddNode("outernode") - -// if you need to iterate over the whole tree -// call `VisitAll` from your top root node. -tree.VisitAll(func(item *node) { - if len(item.Nodes) > 0 { - // branch nodes - fmt.Println(item.Value) // will output one, two, three - } else { - // leaf nodes - fmt.Println(item.Value) // will output one-*, two-*, three-* and outernode - } -}) - -``` -Yay! So it works. - -## License -MIT diff --git a/src/vendor/github.com/xlab/treeprint/helpers.go b/src/vendor/github.com/xlab/treeprint/helpers.go deleted file mode 100644 index a091a5a0f..000000000 --- a/src/vendor/github.com/xlab/treeprint/helpers.go +++ /dev/null @@ -1,47 +0,0 @@ -package treeprint - -import ( - "reflect" - "strings" -) - -func isEmpty(v *reflect.Value) bool { - switch v.Kind() { - case reflect.Array, reflect.Map, reflect.Slice, reflect.String: - return v.Len() == 0 - case reflect.Bool: - return !v.Bool() - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - return v.Int() == 0 - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - return v.Uint() == 0 - case reflect.Float32, reflect.Float64: - return v.Float() == 0 - case reflect.Interface, reflect.Ptr: - return v.IsNil() - } - return false -} - -func tagSpec(tag string) (name string, omit bool) { - parts := strings.Split(tag, ",") - if len(parts) < 2 { - return tag, false - } - if parts[1] == "omitempty" { - return parts[0], true - } - return parts[0], false -} - -func filterTags(tag reflect.StructTag) string { - tags := strings.Split(string(tag), " ") - filtered := make([]string, 0, len(tags)) - for i := range tags { - if strings.HasPrefix(tags[i], "tree:") { - continue - } - filtered = append(filtered, tags[i]) - } - return strings.Join(filtered, " ") -} diff --git a/src/vendor/github.com/xlab/treeprint/struct.go b/src/vendor/github.com/xlab/treeprint/struct.go deleted file mode 100644 index 4d5cc8254..000000000 --- a/src/vendor/github.com/xlab/treeprint/struct.go +++ /dev/null @@ -1,322 +0,0 @@ -package treeprint - -import ( - "fmt" - "reflect" - "strings" -) - -type StructTreeOption int - -const ( - StructNameTree StructTreeOption = iota - StructValueTree - StructTagTree - StructTypeTree - StructTypeSizeTree -) - -func FromStruct(v interface{}, opt ...StructTreeOption) (Tree, error) { - var treeOpt StructTreeOption - if len(opt) > 0 { - treeOpt = opt[0] - } - switch treeOpt { - case StructNameTree: - tree := New() - err := nameTree(tree, v) - return tree, err - case StructValueTree: - tree := New() - err := valueTree(tree, v) - return tree, err - case StructTagTree: - tree := New() - err := tagTree(tree, v) - return tree, err - case StructTypeTree: - tree := New() - err := typeTree(tree, v) - return tree, err - case StructTypeSizeTree: - tree := New() - err := typeSizeTree(tree, v) - return tree, err - default: - err := fmt.Errorf("treeprint: invalid StructTreeOption %v", treeOpt) - return nil, err - } -} - -type FmtFunc func(name string, v interface{}) (string, bool) - -func FromStructWithMeta(v interface{}, fmtFunc FmtFunc) (Tree, error) { - if fmtFunc == nil { - tree := New() - err := nameTree(tree, v) - return tree, err - } - tree := New() - err := metaTree(tree, v, fmtFunc) - return tree, err -} - -func Repr(v interface{}) string { - tree := New() - vType := reflect.TypeOf(v) - vValue := reflect.ValueOf(v) - _, val, isStruct := getValue(vType, &vValue) - if !isStruct { - return fmt.Sprintf("%+v", val.Interface()) - } - err := valueTree(tree, val.Interface()) - if err != nil { - return err.Error() - } - return tree.String() -} - -func nameTree(tree Tree, v interface{}) error { - typ, val, err := checkType(v) - if err != nil { - return err - } - fields := typ.NumField() - for i := 0; i < fields; i++ { - field := typ.Field(i) - fieldValue := val.Field(i) - name, skip, omit := getMeta(field.Name, field.Tag) - if skip || omit && isEmpty(&fieldValue) { - continue - } - typ, val, isStruct := getValue(field.Type, &fieldValue) - if !isStruct { - tree.AddNode(name) - continue - } else if subNum := typ.NumField(); subNum == 0 { - tree.AddNode(name) - continue - } - branch := tree.AddBranch(name) - if err := nameTree(branch, val.Interface()); err != nil { - err := fmt.Errorf("%v on struct branch %s", err, name) - return err - } - } - return nil -} - -func getMeta(fieldName string, tag reflect.StructTag) (name string, skip, omit bool) { - if tagStr := tag.Get("tree"); len(tagStr) > 0 { - name, omit = tagSpec(tagStr) - } - if name == "-" { - return fieldName, true, omit - } - if len(name) == 0 { - name = fieldName - } else if trimmed := strings.TrimSpace(name); len(trimmed) == 0 { - name = fieldName - } - return -} - -func valueTree(tree Tree, v interface{}) error { - typ, val, err := checkType(v) - if err != nil { - return err - } - fields := typ.NumField() - for i := 0; i < fields; i++ { - field := typ.Field(i) - fieldValue := val.Field(i) - name, skip, omit := getMeta(field.Name, field.Tag) - if skip || omit && isEmpty(&fieldValue) { - continue - } - typ, val, isStruct := getValue(field.Type, &fieldValue) - if !isStruct { - tree.AddMetaNode(val.Interface(), name) - continue - } else if subNum := typ.NumField(); subNum == 0 { - tree.AddMetaNode(val.Interface(), name) - continue - } - branch := tree.AddBranch(name) - if err := valueTree(branch, val.Interface()); err != nil { - err := fmt.Errorf("%v on struct branch %s", err, name) - return err - } - } - return nil -} - -func tagTree(tree Tree, v interface{}) error { - typ, val, err := checkType(v) - if err != nil { - return err - } - fields := typ.NumField() - for i := 0; i < fields; i++ { - field := typ.Field(i) - fieldValue := val.Field(i) - name, skip, omit := getMeta(field.Name, field.Tag) - if skip || omit && isEmpty(&fieldValue) { - continue - } - filteredTag := filterTags(field.Tag) - typ, val, isStruct := getValue(field.Type, &fieldValue) - if !isStruct { - tree.AddMetaNode(filteredTag, name) - continue - } else if subNum := typ.NumField(); subNum == 0 { - tree.AddMetaNode(filteredTag, name) - continue - } - branch := tree.AddMetaBranch(filteredTag, name) - if err := tagTree(branch, val.Interface()); err != nil { - err := fmt.Errorf("%v on struct branch %s", err, name) - return err - } - } - return nil -} - -func typeTree(tree Tree, v interface{}) error { - typ, val, err := checkType(v) - if err != nil { - return err - } - fields := typ.NumField() - for i := 0; i < fields; i++ { - field := typ.Field(i) - fieldValue := val.Field(i) - name, skip, omit := getMeta(field.Name, field.Tag) - if skip || omit && isEmpty(&fieldValue) { - continue - } - typ, val, isStruct := getValue(field.Type, &fieldValue) - typename := fmt.Sprintf("%T", val.Interface()) - if !isStruct { - tree.AddMetaNode(typename, name) - continue - } else if subNum := typ.NumField(); subNum == 0 { - tree.AddMetaNode(typename, name) - continue - } - branch := tree.AddMetaBranch(typename, name) - if err := typeTree(branch, val.Interface()); err != nil { - err := fmt.Errorf("%v on struct branch %s", err, name) - return err - } - } - return nil -} - -func typeSizeTree(tree Tree, v interface{}) error { - typ, val, err := checkType(v) - if err != nil { - return err - } - fields := typ.NumField() - for i := 0; i < fields; i++ { - field := typ.Field(i) - fieldValue := val.Field(i) - name, skip, omit := getMeta(field.Name, field.Tag) - if skip || omit && isEmpty(&fieldValue) { - continue - } - typ, val, isStruct := getValue(field.Type, &fieldValue) - typesize := typ.Size() - if !isStruct { - tree.AddMetaNode(typesize, name) - continue - } else if subNum := typ.NumField(); subNum == 0 { - tree.AddMetaNode(typesize, name) - continue - } - branch := tree.AddMetaBranch(typesize, name) - if err := typeSizeTree(branch, val.Interface()); err != nil { - err := fmt.Errorf("%v on struct branch %s", err, name) - return err - } - } - return nil -} - -func metaTree(tree Tree, v interface{}, fmtFunc FmtFunc) error { - typ, val, err := checkType(v) - if err != nil { - return err - } - fields := typ.NumField() - for i := 0; i < fields; i++ { - field := typ.Field(i) - fieldValue := val.Field(i) - name, skip, omit := getMeta(field.Name, field.Tag) - if skip || omit && isEmpty(&fieldValue) { - continue - } - typ, val, isStruct := getValue(field.Type, &fieldValue) - formatted, show := fmtFunc(name, val.Interface()) - if !isStruct { - if show { - tree.AddMetaNode(formatted, name) - continue - } - tree.AddNode(name) - continue - } else if subNum := typ.NumField(); subNum == 0 { - if show { - tree.AddMetaNode(formatted, name) - continue - } - tree.AddNode(name) - continue - } - var branch Tree - if show { - branch = tree.AddMetaBranch(formatted, name) - } else { - branch = tree.AddBranch(name) - } - if err := metaTree(branch, val.Interface(), fmtFunc); err != nil { - err := fmt.Errorf("%v on struct branch %s", err, name) - return err - } - } - return nil -} - -func getValue(typ reflect.Type, val *reflect.Value) (reflect.Type, *reflect.Value, bool) { - switch typ.Kind() { - case reflect.Ptr: - typ = typ.Elem() - if typ.Kind() == reflect.Struct { - elem := val.Elem() - return typ, &elem, true - } - case reflect.Struct: - return typ, val, true - } - return typ, val, false -} - -func checkType(v interface{}) (reflect.Type, *reflect.Value, error) { - typ := reflect.TypeOf(v) - val := reflect.ValueOf(v) - switch typ.Kind() { - case reflect.Ptr: - typ = typ.Elem() - if typ.Kind() != reflect.Struct { - err := fmt.Errorf("treeprint: %T is not a struct we could work with", v) - return nil, nil, err - } - val = val.Elem() - case reflect.Struct: - default: - err := fmt.Errorf("treeprint: %T is not a struct we could work with", v) - return nil, nil, err - } - return typ, &val, nil -} diff --git a/src/vendor/github.com/xlab/treeprint/treeprint.go b/src/vendor/github.com/xlab/treeprint/treeprint.go deleted file mode 100644 index f90441461..000000000 --- a/src/vendor/github.com/xlab/treeprint/treeprint.go +++ /dev/null @@ -1,294 +0,0 @@ -// Package treeprint provides a simple ASCII tree composing tool. -package treeprint - -import ( - "bytes" - "fmt" - "io" - "reflect" - "strings" -) - -// Value defines any value -type Value interface{} - -// MetaValue defines any meta value -type MetaValue interface{} - -// NodeVisitor function type for iterating over nodes -type NodeVisitor func(item *node) - -// Tree represents a tree structure with leaf-nodes and branch-nodes. -type Tree interface { - // AddNode adds a new node to a branch. - AddNode(v Value) Tree - // AddMetaNode adds a new node with meta value provided to a branch. - AddMetaNode(meta MetaValue, v Value) Tree - // AddBranch adds a new branch node (a level deeper). - AddBranch(v Value) Tree - // AddMetaBranch adds a new branch node (a level deeper) with meta value provided. - AddMetaBranch(meta MetaValue, v Value) Tree - // Branch converts a leaf-node to a branch-node, - // applying this on a branch-node does no effect. - Branch() Tree - // FindByMeta finds a node whose meta value matches the provided one by reflect.DeepEqual, - // returns nil if not found. - FindByMeta(meta MetaValue) Tree - // FindByValue finds a node whose value matches the provided one by reflect.DeepEqual, - // returns nil if not found. - FindByValue(value Value) Tree - // returns the last node of a tree - FindLastNode() Tree - // String renders the tree or subtree as a string. - String() string - // Bytes renders the tree or subtree as byteslice. - Bytes() []byte - - SetValue(value Value) - SetMetaValue(meta MetaValue) - - // VisitAll iterates over the tree, branches and nodes. - // If need to iterate over the whole tree, use the root node. - // Note this method uses a breadth-first approach. - VisitAll(fn NodeVisitor) -} - -type node struct { - Root *node - Meta MetaValue - Value Value - Nodes []*node -} - -func (n *node) FindLastNode() Tree { - ns := n.Nodes - if len(ns) == 0 { - return nil - } - return ns[len(ns)-1] -} - -func (n *node) AddNode(v Value) Tree { - n.Nodes = append(n.Nodes, &node{ - Root: n, - Value: v, - }) - return n -} - -func (n *node) AddMetaNode(meta MetaValue, v Value) Tree { - n.Nodes = append(n.Nodes, &node{ - Root: n, - Meta: meta, - Value: v, - }) - return n -} - -func (n *node) AddBranch(v Value) Tree { - branch := &node{ - Root: n, - Value: v, - } - n.Nodes = append(n.Nodes, branch) - return branch -} - -func (n *node) AddMetaBranch(meta MetaValue, v Value) Tree { - branch := &node{ - Root: n, - Meta: meta, - Value: v, - } - n.Nodes = append(n.Nodes, branch) - return branch -} - -func (n *node) Branch() Tree { - n.Root = nil - return n -} - -func (n *node) FindByMeta(meta MetaValue) Tree { - for _, node := range n.Nodes { - if reflect.DeepEqual(node.Meta, meta) { - return node - } - if v := node.FindByMeta(meta); v != nil { - return v - } - } - return nil -} - -func (n *node) FindByValue(value Value) Tree { - for _, node := range n.Nodes { - if reflect.DeepEqual(node.Value, value) { - return node - } - if v := node.FindByMeta(value); v != nil { - return v - } - } - return nil -} - -func (n *node) Bytes() []byte { - buf := new(bytes.Buffer) - level := 0 - var levelsEnded []int - if n.Root == nil { - if n.Meta != nil { - buf.WriteString(fmt.Sprintf("[%v] %v", n.Meta, n.Value)) - } else { - buf.WriteString(fmt.Sprintf("%v", n.Value)) - } - buf.WriteByte('\n') - } else { - edge := EdgeTypeMid - if len(n.Nodes) == 0 { - edge = EdgeTypeEnd - levelsEnded = append(levelsEnded, level) - } - printValues(buf, 0, levelsEnded, edge, n) - } - if len(n.Nodes) > 0 { - printNodes(buf, level, levelsEnded, n.Nodes) - } - return buf.Bytes() -} - -func (n *node) String() string { - return string(n.Bytes()) -} - -func (n *node) SetValue(value Value) { - n.Value = value -} - -func (n *node) SetMetaValue(meta MetaValue) { - n.Meta = meta -} - -func (n *node) VisitAll(fn NodeVisitor) { - for _, node := range n.Nodes { - fn(node) - - if len(node.Nodes) > 0 { - node.VisitAll(fn) - continue - } - } -} - -func printNodes(wr io.Writer, - level int, levelsEnded []int, nodes []*node) { - - for i, node := range nodes { - edge := EdgeTypeMid - if i == len(nodes)-1 { - levelsEnded = append(levelsEnded, level) - edge = EdgeTypeEnd - } - printValues(wr, level, levelsEnded, edge, node) - if len(node.Nodes) > 0 { - printNodes(wr, level+1, levelsEnded, node.Nodes) - } - } -} - -func printValues(wr io.Writer, - level int, levelsEnded []int, edge EdgeType, node *node) { - - for i := 0; i < level; i++ { - if isEnded(levelsEnded, i) { - fmt.Fprint(wr, strings.Repeat(" ", IndentSize+1)) - continue - } - fmt.Fprintf(wr, "%s%s", EdgeTypeLink, strings.Repeat(" ", IndentSize)) - } - - val := renderValue(level, node) - meta := node.Meta - - if meta != nil { - fmt.Fprintf(wr, "%s [%v] %v\n", edge, meta, val) - return - } - fmt.Fprintf(wr, "%s %v\n", edge, val) -} - -func isEnded(levelsEnded []int, level int) bool { - for _, l := range levelsEnded { - if l == level { - return true - } - } - return false -} - -func renderValue(level int, node *node) Value { - lines := strings.Split(fmt.Sprintf("%v", node.Value), "\n") - - // If value does not contain multiple lines, return itself. - if len(lines) < 2 { - return node.Value - } - - // If value contains multiple lines, - // generate a padding and prefix each line with it. - pad := padding(level, node) - - for i := 1; i < len(lines); i++ { - lines[i] = fmt.Sprintf("%s%s", pad, lines[i]) - } - - return strings.Join(lines, "\n") -} - -// padding returns a padding for the multiline values with correctly placed link edges. -// It is generated by traversing the tree upwards (from leaf to the root of the tree) -// and, on each level, checking if the node the last one of its siblings. -// If a node is the last one, the padding on that level should be empty (there's nothing to link to below it). -// If a node is not the last one, the padding on that level should be the link edge so the sibling below is correctly connected. -func padding(level int, node *node) string { - links := make([]string, level+1) - - for node.Root != nil { - if isLast(node) { - links[level] = strings.Repeat(" ", IndentSize+1) - } else { - links[level] = fmt.Sprintf("%s%s", EdgeTypeLink, strings.Repeat(" ", IndentSize)) - } - level-- - node = node.Root - } - - return strings.Join(links, "") -} - -// isLast checks if the node is the last one in the slice of its parent children -func isLast(n *node) bool { - return n == n.Root.FindLastNode() -} - -type EdgeType string - -var ( - EdgeTypeLink EdgeType = "│" - EdgeTypeMid EdgeType = "├──" - EdgeTypeEnd EdgeType = "└──" -) - -// IndentSize is the number of spaces per tree level. -var IndentSize = 3 - -// New Generates new tree -func New() Tree { - return &node{Value: "."} -} - -// NewWithRoot Generates new tree with the given root value -func NewWithRoot(root Value) Tree { - return &node{Value: root} -} diff --git a/src/vendor/go.starlark.net/LICENSE b/src/vendor/go.starlark.net/LICENSE deleted file mode 100644 index a6609a143..000000000 --- a/src/vendor/go.starlark.net/LICENSE +++ /dev/null @@ -1,29 +0,0 @@ -Copyright (c) 2017 The Bazel Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - -1. Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - -2. Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in the - documentation and/or other materials provided with the - distribution. - -3. Neither the name of the copyright holder nor the names of its - contributors may be used to endorse or promote products derived - from this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/src/vendor/go.starlark.net/internal/compile/compile.go b/src/vendor/go.starlark.net/internal/compile/compile.go deleted file mode 100644 index eb8e16279..000000000 --- a/src/vendor/go.starlark.net/internal/compile/compile.go +++ /dev/null @@ -1,1903 +0,0 @@ -// Package compile defines the Starlark bytecode compiler. -// It is an internal package of the Starlark interpreter and is not directly accessible to clients. -// -// The compiler generates byte code with optional uint32 operands for a -// virtual machine with the following components: -// - a program counter, which is an index into the byte code array. -// - an operand stack, whose maximum size is computed for each function by the compiler. -// - an stack of active iterators. -// - an array of local variables. -// The number of local variables and their indices are computed by the resolver. -// Locals (possibly including parameters) that are shared with nested functions -// are 'cells': their locals array slot will contain a value of type 'cell', -// an indirect value in a box that is explicitly read/updated by instructions. -// - an array of free variables, for nested functions. -// Free variables are a subset of the ancestors' cell variables. -// As with locals and cells, these are computed by the resolver. -// - an array of global variables, shared among all functions in the same module. -// All elements are initially nil. -// - two maps of predeclared and universal identifiers. -// -// Each function has a line number table that maps each program counter -// offset to a source position, including the column number. -// -// Operands, logically uint32s, are encoded using little-endian 7-bit -// varints, the top bit indicating that more bytes follow. -// -package compile // import "go.starlark.net/internal/compile" - -import ( - "bytes" - "fmt" - "log" - "os" - "path/filepath" - "strconv" - "sync" - - "go.starlark.net/resolve" - "go.starlark.net/syntax" -) - -// Disassemble causes the assembly code for each function -// to be printed to stderr as it is generated. -var Disassemble = false - -const debug = false // make code generation verbose, for debugging the compiler - -// Increment this to force recompilation of saved bytecode files. -const Version = 10 - -type Opcode uint8 - -// "x DUP x x" is a "stack picture" that describes the state of the -// stack before and after execution of the instruction. -// -// OP indicates an immediate operand that is an index into the -// specified table: locals, names, freevars, constants. -const ( - NOP Opcode = iota // - NOP - - - // stack operations - DUP // x DUP x x - DUP2 // x y DUP2 x y x y - POP // x POP - - EXCH // x y EXCH y x - - // binary comparisons - // (order must match Token) - LT - GT - GE - LE - EQL - NEQ - - // binary arithmetic - // (order must match Token) - PLUS - MINUS - STAR - SLASH - SLASHSLASH - PERCENT - AMP - PIPE - CIRCUMFLEX - LTLT - GTGT - - IN - - // unary operators - UPLUS // x UPLUS x - UMINUS // x UMINUS -x - TILDE // x TILDE ~x - - NONE // - NONE None - TRUE // - TRUE True - FALSE // - FALSE False - MANDATORY // - MANDATORY Mandatory [sentinel value for required kwonly args] - - ITERPUSH // iterable ITERPUSH - [pushes the iterator stack] - ITERPOP // - ITERPOP - [pops the iterator stack] - NOT // value NOT bool - RETURN // value RETURN - - SETINDEX // a i new SETINDEX - - INDEX // a i INDEX elem - SETDICT // dict key value SETDICT - - SETDICTUNIQ // dict key value SETDICTUNIQ - - APPEND // list elem APPEND - - SLICE // x lo hi step SLICE slice - INPLACE_ADD // x y INPLACE_ADD z where z is x+y or x.extend(y) - MAKEDICT // - MAKEDICT dict - SETCELL // value cell SETCELL - - CELL // cell CELL value - - // --- opcodes with an argument must go below this line --- - - // control flow - JMP // - JMP - - CJMP // cond CJMP - - ITERJMP // - ITERJMP elem (and fall through) [acts on topmost iterator] - // or: - ITERJMP - (and jump) - - CONSTANT // - CONSTANT value - MAKETUPLE // x1 ... xn MAKETUPLE tuple - MAKELIST // x1 ... xn MAKELIST list - MAKEFUNC // defaults+freevars MAKEFUNC fn - LOAD // from1 ... fromN module LOAD v1 ... vN - SETLOCAL // value SETLOCAL - - SETGLOBAL // value SETGLOBAL - - LOCAL // - LOCAL value - FREE // - FREE cell - GLOBAL // - GLOBAL value - PREDECLARED // - PREDECLARED value - UNIVERSAL // - UNIVERSAL value - ATTR // x ATTR y y = x.name - SETFIELD // x y SETFIELD - x.name = y - UNPACK // iterable UNPACK vn ... v1 - - // n>>8 is #positional args and n&0xff is #named args (pairs). - CALL // fn positional named CALL result - CALL_VAR // fn positional named *args CALL_VAR result - CALL_KW // fn positional named **kwargs CALL_KW result - CALL_VAR_KW // fn positional named *args **kwargs CALL_VAR_KW result - - OpcodeArgMin = JMP - OpcodeMax = CALL_VAR_KW -) - -// TODO(adonovan): add dynamic checks for missing opcodes in the tables below. - -var opcodeNames = [...]string{ - AMP: "amp", - APPEND: "append", - ATTR: "attr", - CALL: "call", - CALL_KW: "call_kw ", - CALL_VAR: "call_var", - CALL_VAR_KW: "call_var_kw", - CELL: "cell", - CIRCUMFLEX: "circumflex", - CJMP: "cjmp", - CONSTANT: "constant", - DUP2: "dup2", - DUP: "dup", - EQL: "eql", - EXCH: "exch", - FALSE: "false", - FREE: "free", - GE: "ge", - GLOBAL: "global", - GT: "gt", - GTGT: "gtgt", - IN: "in", - INDEX: "index", - INPLACE_ADD: "inplace_add", - ITERJMP: "iterjmp", - ITERPOP: "iterpop", - ITERPUSH: "iterpush", - JMP: "jmp", - LE: "le", - LOAD: "load", - LOCAL: "local", - LT: "lt", - LTLT: "ltlt", - MAKEDICT: "makedict", - MAKEFUNC: "makefunc", - MAKELIST: "makelist", - MAKETUPLE: "maketuple", - MANDATORY: "mandatory", - MINUS: "minus", - NEQ: "neq", - NONE: "none", - NOP: "nop", - NOT: "not", - PERCENT: "percent", - PIPE: "pipe", - PLUS: "plus", - POP: "pop", - PREDECLARED: "predeclared", - RETURN: "return", - SETCELL: "setcell", - SETDICT: "setdict", - SETDICTUNIQ: "setdictuniq", - SETFIELD: "setfield", - SETGLOBAL: "setglobal", - SETINDEX: "setindex", - SETLOCAL: "setlocal", - SLASH: "slash", - SLASHSLASH: "slashslash", - SLICE: "slice", - STAR: "star", - TILDE: "tilde", - TRUE: "true", - UMINUS: "uminus", - UNIVERSAL: "universal", - UNPACK: "unpack", - UPLUS: "uplus", -} - -const variableStackEffect = 0x7f - -// stackEffect records the effect on the size of the operand stack of -// each kind of instruction. For some instructions this requires computation. -var stackEffect = [...]int8{ - AMP: -1, - APPEND: -2, - ATTR: 0, - CALL: variableStackEffect, - CALL_KW: variableStackEffect, - CALL_VAR: variableStackEffect, - CALL_VAR_KW: variableStackEffect, - CELL: 0, - CIRCUMFLEX: -1, - CJMP: -1, - CONSTANT: +1, - DUP2: +2, - DUP: +1, - EQL: -1, - FALSE: +1, - FREE: +1, - GE: -1, - GLOBAL: +1, - GT: -1, - GTGT: -1, - IN: -1, - INDEX: -1, - INPLACE_ADD: -1, - ITERJMP: variableStackEffect, - ITERPOP: 0, - ITERPUSH: -1, - JMP: 0, - LE: -1, - LOAD: -1, - LOCAL: +1, - LT: -1, - LTLT: -1, - MAKEDICT: +1, - MAKEFUNC: 0, - MAKELIST: variableStackEffect, - MAKETUPLE: variableStackEffect, - MANDATORY: +1, - MINUS: -1, - NEQ: -1, - NONE: +1, - NOP: 0, - NOT: 0, - PERCENT: -1, - PIPE: -1, - PLUS: -1, - POP: -1, - PREDECLARED: +1, - RETURN: -1, - SETCELL: -2, - SETDICT: -3, - SETDICTUNIQ: -3, - SETFIELD: -2, - SETGLOBAL: -1, - SETINDEX: -3, - SETLOCAL: -1, - SLASH: -1, - SLASHSLASH: -1, - SLICE: -3, - STAR: -1, - TRUE: +1, - UMINUS: 0, - UNIVERSAL: +1, - UNPACK: variableStackEffect, - UPLUS: 0, -} - -func (op Opcode) String() string { - if op < OpcodeMax { - if name := opcodeNames[op]; name != "" { - return name - } - } - return fmt.Sprintf("illegal op (%d)", op) -} - -// A Program is a Starlark file in executable form. -// -// Programs are serialized by the Program.Encode method, -// which must be updated whenever this declaration is changed. -type Program struct { - Loads []Binding // name (really, string) and position of each load stmt - Names []string // names of attributes and predeclared variables - Constants []interface{} // = string | int64 | float64 | *big.Int - Functions []*Funcode - Globals []Binding // for error messages and tracing - Toplevel *Funcode // module initialization function -} - -// A Funcode is the code of a compiled Starlark function. -// -// Funcodes are serialized by the encoder.function method, -// which must be updated whenever this declaration is changed. -type Funcode struct { - Prog *Program - Pos syntax.Position // position of def or lambda token - Name string // name of this function - Doc string // docstring of this function - Code []byte // the byte code - pclinetab []uint16 // mapping from pc to linenum - Locals []Binding // locals, parameters first - Cells []int // indices of Locals that require cells - Freevars []Binding // for tracing - MaxStack int - NumParams int - NumKwonlyParams int - HasVarargs, HasKwargs bool - - // -- transient state -- - - lntOnce sync.Once - lnt []pclinecol // decoded line number table -} - -type pclinecol struct { - pc uint32 - line, col int32 -} - -// A Binding is the name and position of a binding identifier. -type Binding struct { - Name string - Pos syntax.Position -} - -// A pcomp holds the compiler state for a Program. -type pcomp struct { - prog *Program // what we're building - - names map[string]uint32 - constants map[interface{}]uint32 - functions map[*Funcode]uint32 -} - -// An fcomp holds the compiler state for a Funcode. -type fcomp struct { - fn *Funcode // what we're building - - pcomp *pcomp - pos syntax.Position // current position of generated code - loops []loop - block *block -} - -type loop struct { - break_, continue_ *block -} - -type block struct { - insns []insn - - // If the last insn is a RETURN, jmp and cjmp are nil. - // If the last insn is a CJMP or ITERJMP, - // cjmp and jmp are the "true" and "false" successors. - // Otherwise, jmp is the sole successor. - jmp, cjmp *block - - initialstack int // for stack depth computation - - // Used during encoding - index int // -1 => not encoded yet - addr uint32 -} - -type insn struct { - op Opcode - arg uint32 - line, col int32 -} - -// Position returns the source position for program counter pc. -func (fn *Funcode) Position(pc uint32) syntax.Position { - fn.lntOnce.Do(fn.decodeLNT) - - // Binary search to find last LNT entry not greater than pc. - // To avoid dynamic dispatch, this is a specialization of - // sort.Search using this predicate: - // !(i < len(fn.lnt)-1 && fn.lnt[i+1].pc <= pc) - n := len(fn.lnt) - i, j := 0, n - for i < j { - h := int(uint(i+j) >> 1) - if !(h >= n-1 || fn.lnt[h+1].pc > pc) { - i = h + 1 - } else { - j = h - } - } - - var line, col int32 - if i < n { - line = fn.lnt[i].line - col = fn.lnt[i].col - } - - pos := fn.Pos // copy the (annoyingly inaccessible) filename - pos.Col = col - pos.Line = line - return pos -} - -// decodeLNT decodes the line number table and populates fn.lnt. -// It is called at most once. -func (fn *Funcode) decodeLNT() { - // Conceptually the table contains rows of the form - // (pc uint32, line int32, col int32), sorted by pc. - // We use a delta encoding, since the differences - // between successive pc, line, and column values - // are typically small and positive (though line and - // especially column differences may be negative). - // The delta encoding starts from - // {pc: 0, line: fn.Pos.Line, col: fn.Pos.Col}. - // - // Each entry is packed into one or more 16-bit values: - // Δpc uint4 - // Δline int5 - // Δcol int6 - // incomplete uint1 - // The top 4 bits are the unsigned delta pc. - // The next 5 bits are the signed line number delta. - // The next 6 bits are the signed column number delta. - // The bottom bit indicates that more rows follow because - // one of the deltas was maxed out. - // These field widths were chosen from a sample of real programs, - // and allow >97% of rows to be encoded in a single uint16. - - fn.lnt = make([]pclinecol, 0, len(fn.pclinetab)) // a minor overapproximation - entry := pclinecol{ - pc: 0, - line: fn.Pos.Line, - col: fn.Pos.Col, - } - for _, x := range fn.pclinetab { - entry.pc += uint32(x) >> 12 - entry.line += int32((int16(x) << 4) >> (16 - 5)) // sign extend Δline - entry.col += int32((int16(x) << 9) >> (16 - 6)) // sign extend Δcol - if (x & 1) == 0 { - fn.lnt = append(fn.lnt, entry) - } - } -} - -// bindings converts resolve.Bindings to compiled form. -func bindings(bindings []*resolve.Binding) []Binding { - res := make([]Binding, len(bindings)) - for i, bind := range bindings { - res[i].Name = bind.First.Name - res[i].Pos = bind.First.NamePos - } - return res -} - -// Expr compiles an expression to a program whose toplevel function evaluates it. -func Expr(expr syntax.Expr, name string, locals []*resolve.Binding) *Program { - pos := syntax.Start(expr) - stmts := []syntax.Stmt{&syntax.ReturnStmt{Result: expr}} - return File(stmts, pos, name, locals, nil) -} - -// File compiles the statements of a file into a program. -func File(stmts []syntax.Stmt, pos syntax.Position, name string, locals, globals []*resolve.Binding) *Program { - pcomp := &pcomp{ - prog: &Program{ - Globals: bindings(globals), - }, - names: make(map[string]uint32), - constants: make(map[interface{}]uint32), - functions: make(map[*Funcode]uint32), - } - pcomp.prog.Toplevel = pcomp.function(name, pos, stmts, locals, nil) - - return pcomp.prog -} - -func (pcomp *pcomp) function(name string, pos syntax.Position, stmts []syntax.Stmt, locals, freevars []*resolve.Binding) *Funcode { - fcomp := &fcomp{ - pcomp: pcomp, - pos: pos, - fn: &Funcode{ - Prog: pcomp.prog, - Pos: pos, - Name: name, - Doc: docStringFromBody(stmts), - Locals: bindings(locals), - Freevars: bindings(freevars), - }, - } - - // Record indices of locals that require cells. - for i, local := range locals { - if local.Scope == resolve.Cell { - fcomp.fn.Cells = append(fcomp.fn.Cells, i) - } - } - - if debug { - fmt.Fprintf(os.Stderr, "start function(%s @ %s)\n", name, pos) - } - - // Convert AST to a CFG of instructions. - entry := fcomp.newBlock() - fcomp.block = entry - fcomp.stmts(stmts) - if fcomp.block != nil { - fcomp.emit(NONE) - fcomp.emit(RETURN) - } - - var oops bool // something bad happened - - setinitialstack := func(b *block, depth int) { - if b.initialstack == -1 { - b.initialstack = depth - } else if b.initialstack != depth { - fmt.Fprintf(os.Stderr, "%d: setinitialstack: depth mismatch: %d vs %d\n", - b.index, b.initialstack, depth) - oops = true - } - } - - // Linearize the CFG: - // compute order, address, and initial - // stack depth of each reachable block. - var pc uint32 - var blocks []*block - var maxstack int - var visit func(b *block) - visit = func(b *block) { - if b.index >= 0 { - return // already visited - } - b.index = len(blocks) - b.addr = pc - blocks = append(blocks, b) - - stack := b.initialstack - if debug { - fmt.Fprintf(os.Stderr, "%s block %d: (stack = %d)\n", name, b.index, stack) - } - var cjmpAddr *uint32 - var isiterjmp int - for i, insn := range b.insns { - pc++ - - // Compute size of argument. - if insn.op >= OpcodeArgMin { - switch insn.op { - case ITERJMP: - isiterjmp = 1 - fallthrough - case CJMP: - cjmpAddr = &b.insns[i].arg - pc += 4 - default: - pc += uint32(argLen(insn.arg)) - } - } - - // Compute effect on stack. - se := insn.stackeffect() - if debug { - fmt.Fprintln(os.Stderr, "\t", insn.op, stack, stack+se) - } - stack += se - if stack < 0 { - fmt.Fprintf(os.Stderr, "After pc=%d: stack underflow\n", pc) - oops = true - } - if stack+isiterjmp > maxstack { - maxstack = stack + isiterjmp - } - } - - if debug { - fmt.Fprintf(os.Stderr, "successors of block %d (start=%d):\n", - b.addr, b.index) - if b.jmp != nil { - fmt.Fprintf(os.Stderr, "jmp to %d\n", b.jmp.index) - } - if b.cjmp != nil { - fmt.Fprintf(os.Stderr, "cjmp to %d\n", b.cjmp.index) - } - } - - // Place the jmp block next. - if b.jmp != nil { - // jump threading (empty cycles are impossible) - for b.jmp.insns == nil { - b.jmp = b.jmp.jmp - } - - setinitialstack(b.jmp, stack+isiterjmp) - if b.jmp.index < 0 { - // Successor is not yet visited: - // place it next and fall through. - visit(b.jmp) - } else { - // Successor already visited; - // explicit backward jump required. - pc += 5 - } - } - - // Then the cjmp block. - if b.cjmp != nil { - // jump threading (empty cycles are impossible) - for b.cjmp.insns == nil { - b.cjmp = b.cjmp.jmp - } - - setinitialstack(b.cjmp, stack) - visit(b.cjmp) - - // Patch the CJMP/ITERJMP, if present. - if cjmpAddr != nil { - *cjmpAddr = b.cjmp.addr - } - } - } - setinitialstack(entry, 0) - visit(entry) - - fn := fcomp.fn - fn.MaxStack = maxstack - - // Emit bytecode (and position table). - if Disassemble { - fmt.Fprintf(os.Stderr, "Function %s: (%d blocks, %d bytes)\n", name, len(blocks), pc) - } - fcomp.generate(blocks, pc) - - if debug { - fmt.Fprintf(os.Stderr, "code=%d maxstack=%d\n", fn.Code, fn.MaxStack) - } - - // Don't panic until we've completed printing of the function. - if oops { - panic("internal error") - } - - if debug { - fmt.Fprintf(os.Stderr, "end function(%s @ %s)\n", name, pos) - } - - return fn -} - -func docStringFromBody(body []syntax.Stmt) string { - if len(body) == 0 { - return "" - } - expr, ok := body[0].(*syntax.ExprStmt) - if !ok { - return "" - } - lit, ok := expr.X.(*syntax.Literal) - if !ok { - return "" - } - if lit.Token != syntax.STRING { - return "" - } - return lit.Value.(string) -} - -func (insn *insn) stackeffect() int { - se := int(stackEffect[insn.op]) - if se == variableStackEffect { - arg := int(insn.arg) - switch insn.op { - case CALL, CALL_KW, CALL_VAR, CALL_VAR_KW: - se = -int(2*(insn.arg&0xff) + insn.arg>>8) - if insn.op != CALL { - se-- - } - if insn.op == CALL_VAR_KW { - se-- - } - case ITERJMP: - // Stack effect differs by successor: - // +1 for jmp/false/ok - // 0 for cjmp/true/exhausted - // Handled specially in caller. - se = 0 - case MAKELIST, MAKETUPLE: - se = 1 - arg - case UNPACK: - se = arg - 1 - default: - panic(insn.op) - } - } - return se -} - -// generate emits the linear instruction stream from the CFG, -// and builds the PC-to-line number table. -func (fcomp *fcomp) generate(blocks []*block, codelen uint32) { - code := make([]byte, 0, codelen) - var pclinetab []uint16 - prev := pclinecol{ - pc: 0, - line: fcomp.fn.Pos.Line, - col: fcomp.fn.Pos.Col, - } - - for _, b := range blocks { - if Disassemble { - fmt.Fprintf(os.Stderr, "%d:\n", b.index) - } - pc := b.addr - for _, insn := range b.insns { - if insn.line != 0 { - // Instruction has a source position. Delta-encode it. - // See Funcode.Position for the encoding. - for { - var incomplete uint16 - - // Δpc, uint4 - deltapc := pc - prev.pc - if deltapc > 0x0f { - deltapc = 0x0f - incomplete = 1 - } - prev.pc += deltapc - - // Δline, int5 - deltaline, ok := clip(insn.line-prev.line, -0x10, 0x0f) - if !ok { - incomplete = 1 - } - prev.line += deltaline - - // Δcol, int6 - deltacol, ok := clip(insn.col-prev.col, -0x20, 0x1f) - if !ok { - incomplete = 1 - } - prev.col += deltacol - - entry := uint16(deltapc<<12) | uint16(deltaline&0x1f)<<7 | uint16(deltacol&0x3f)<<1 | incomplete - pclinetab = append(pclinetab, entry) - if incomplete == 0 { - break - } - } - - if Disassemble { - fmt.Fprintf(os.Stderr, "\t\t\t\t\t; %s:%d:%d\n", - filepath.Base(fcomp.fn.Pos.Filename()), insn.line, insn.col) - } - } - if Disassemble { - PrintOp(fcomp.fn, pc, insn.op, insn.arg) - } - code = append(code, byte(insn.op)) - pc++ - if insn.op >= OpcodeArgMin { - if insn.op == CJMP || insn.op == ITERJMP { - code = addUint32(code, insn.arg, 4) // pad arg to 4 bytes - } else { - code = addUint32(code, insn.arg, 0) - } - pc = uint32(len(code)) - } - } - - if b.jmp != nil && b.jmp.index != b.index+1 { - addr := b.jmp.addr - if Disassemble { - fmt.Fprintf(os.Stderr, "\t%d\tjmp\t\t%d\t; block %d\n", - pc, addr, b.jmp.index) - } - code = append(code, byte(JMP)) - code = addUint32(code, addr, 4) - } - } - if len(code) != int(codelen) { - panic("internal error: wrong code length") - } - - fcomp.fn.pclinetab = pclinetab - fcomp.fn.Code = code -} - -// clip returns the value nearest x in the range [min...max], -// and whether it equals x. -func clip(x, min, max int32) (int32, bool) { - if x > max { - return max, false - } else if x < min { - return min, false - } else { - return x, true - } -} - -// addUint32 encodes x as 7-bit little-endian varint. -// TODO(adonovan): opt: steal top two bits of opcode -// to encode the number of complete bytes that follow. -func addUint32(code []byte, x uint32, min int) []byte { - end := len(code) + min - for x >= 0x80 { - code = append(code, byte(x)|0x80) - x >>= 7 - } - code = append(code, byte(x)) - // Pad the operand with NOPs to exactly min bytes. - for len(code) < end { - code = append(code, byte(NOP)) - } - return code -} - -func argLen(x uint32) int { - n := 0 - for x >= 0x80 { - n++ - x >>= 7 - } - return n + 1 -} - -// PrintOp prints an instruction. -// It is provided for debugging. -func PrintOp(fn *Funcode, pc uint32, op Opcode, arg uint32) { - if op < OpcodeArgMin { - fmt.Fprintf(os.Stderr, "\t%d\t%s\n", pc, op) - return - } - - var comment string - switch op { - case CONSTANT: - switch x := fn.Prog.Constants[arg].(type) { - case string: - comment = strconv.Quote(x) - default: - comment = fmt.Sprint(x) - } - case MAKEFUNC: - comment = fn.Prog.Functions[arg].Name - case SETLOCAL, LOCAL: - comment = fn.Locals[arg].Name - case SETGLOBAL, GLOBAL: - comment = fn.Prog.Globals[arg].Name - case ATTR, SETFIELD, PREDECLARED, UNIVERSAL: - comment = fn.Prog.Names[arg] - case FREE: - comment = fn.Freevars[arg].Name - case CALL, CALL_VAR, CALL_KW, CALL_VAR_KW: - comment = fmt.Sprintf("%d pos, %d named", arg>>8, arg&0xff) - default: - // JMP, CJMP, ITERJMP, MAKETUPLE, MAKELIST, LOAD, UNPACK: - // arg is just a number - } - var buf bytes.Buffer - fmt.Fprintf(&buf, "\t%d\t%-10s\t%d", pc, op, arg) - if comment != "" { - fmt.Fprint(&buf, "\t; ", comment) - } - fmt.Fprintln(&buf) - os.Stderr.Write(buf.Bytes()) -} - -// newBlock returns a new block. -func (fcomp) newBlock() *block { - return &block{index: -1, initialstack: -1} -} - -// emit emits an instruction to the current block. -func (fcomp *fcomp) emit(op Opcode) { - if op >= OpcodeArgMin { - panic("missing arg: " + op.String()) - } - insn := insn{op: op, line: fcomp.pos.Line, col: fcomp.pos.Col} - fcomp.block.insns = append(fcomp.block.insns, insn) - fcomp.pos.Line = 0 - fcomp.pos.Col = 0 -} - -// emit1 emits an instruction with an immediate operand. -func (fcomp *fcomp) emit1(op Opcode, arg uint32) { - if op < OpcodeArgMin { - panic("unwanted arg: " + op.String()) - } - insn := insn{op: op, arg: arg, line: fcomp.pos.Line, col: fcomp.pos.Col} - fcomp.block.insns = append(fcomp.block.insns, insn) - fcomp.pos.Line = 0 - fcomp.pos.Col = 0 -} - -// jump emits a jump to the specified block. -// On return, the current block is unset. -func (fcomp *fcomp) jump(b *block) { - if b == fcomp.block { - panic("self-jump") // unreachable: Starlark has no arbitrary looping constructs - } - fcomp.block.jmp = b - fcomp.block = nil -} - -// condjump emits a conditional jump (CJMP or ITERJMP) -// to the specified true/false blocks. -// (For ITERJMP, the cases are jmp/f/ok and cjmp/t/exhausted.) -// On return, the current block is unset. -func (fcomp *fcomp) condjump(op Opcode, t, f *block) { - if !(op == CJMP || op == ITERJMP) { - panic("not a conditional jump: " + op.String()) - } - fcomp.emit1(op, 0) // fill in address later - fcomp.block.cjmp = t - fcomp.jump(f) -} - -// nameIndex returns the index of the specified name -// within the name pool, adding it if necessary. -func (pcomp *pcomp) nameIndex(name string) uint32 { - index, ok := pcomp.names[name] - if !ok { - index = uint32(len(pcomp.prog.Names)) - pcomp.names[name] = index - pcomp.prog.Names = append(pcomp.prog.Names, name) - } - return index -} - -// constantIndex returns the index of the specified constant -// within the constant pool, adding it if necessary. -func (pcomp *pcomp) constantIndex(v interface{}) uint32 { - index, ok := pcomp.constants[v] - if !ok { - index = uint32(len(pcomp.prog.Constants)) - pcomp.constants[v] = index - pcomp.prog.Constants = append(pcomp.prog.Constants, v) - } - return index -} - -// functionIndex returns the index of the specified function -// AST the nestedfun pool, adding it if necessary. -func (pcomp *pcomp) functionIndex(fn *Funcode) uint32 { - index, ok := pcomp.functions[fn] - if !ok { - index = uint32(len(pcomp.prog.Functions)) - pcomp.functions[fn] = index - pcomp.prog.Functions = append(pcomp.prog.Functions, fn) - } - return index -} - -// string emits code to push the specified string. -func (fcomp *fcomp) string(s string) { - fcomp.emit1(CONSTANT, fcomp.pcomp.constantIndex(s)) -} - -// setPos sets the current source position. -// It should be called prior to any operation that can fail dynamically. -// All positions are assumed to belong to the same file. -func (fcomp *fcomp) setPos(pos syntax.Position) { - fcomp.pos = pos -} - -// set emits code to store the top-of-stack value -// to the specified local, cell, or global variable. -func (fcomp *fcomp) set(id *syntax.Ident) { - bind := id.Binding.(*resolve.Binding) - switch bind.Scope { - case resolve.Local: - fcomp.emit1(SETLOCAL, uint32(bind.Index)) - case resolve.Cell: - // TODO(adonovan): opt: make a single op for LOCAL, SETCELL. - fcomp.emit1(LOCAL, uint32(bind.Index)) - fcomp.emit(SETCELL) - case resolve.Global: - fcomp.emit1(SETGLOBAL, uint32(bind.Index)) - default: - log.Panicf("%s: set(%s): not global/local/cell (%d)", id.NamePos, id.Name, bind.Scope) - } -} - -// lookup emits code to push the value of the specified variable. -func (fcomp *fcomp) lookup(id *syntax.Ident) { - bind := id.Binding.(*resolve.Binding) - if bind.Scope != resolve.Universal { // (universal lookup can't fail) - fcomp.setPos(id.NamePos) - } - switch bind.Scope { - case resolve.Local: - fcomp.emit1(LOCAL, uint32(bind.Index)) - case resolve.Free: - // TODO(adonovan): opt: make a single op for FREE, CELL. - fcomp.emit1(FREE, uint32(bind.Index)) - fcomp.emit(CELL) - case resolve.Cell: - // TODO(adonovan): opt: make a single op for LOCAL, CELL. - fcomp.emit1(LOCAL, uint32(bind.Index)) - fcomp.emit(CELL) - case resolve.Global: - fcomp.emit1(GLOBAL, uint32(bind.Index)) - case resolve.Predeclared: - fcomp.emit1(PREDECLARED, fcomp.pcomp.nameIndex(id.Name)) - case resolve.Universal: - fcomp.emit1(UNIVERSAL, fcomp.pcomp.nameIndex(id.Name)) - default: - log.Panicf("%s: compiler.lookup(%s): scope = %d", id.NamePos, id.Name, bind.Scope) - } -} - -func (fcomp *fcomp) stmts(stmts []syntax.Stmt) { - for _, stmt := range stmts { - fcomp.stmt(stmt) - } -} - -func (fcomp *fcomp) stmt(stmt syntax.Stmt) { - switch stmt := stmt.(type) { - case *syntax.ExprStmt: - if _, ok := stmt.X.(*syntax.Literal); ok { - // Opt: don't compile doc comments only to pop them. - return - } - fcomp.expr(stmt.X) - fcomp.emit(POP) - - case *syntax.BranchStmt: - // Resolver invariant: break/continue appear only within loops. - switch stmt.Token { - case syntax.PASS: - // no-op - case syntax.BREAK: - b := fcomp.loops[len(fcomp.loops)-1].break_ - fcomp.jump(b) - fcomp.block = fcomp.newBlock() // dead code - case syntax.CONTINUE: - b := fcomp.loops[len(fcomp.loops)-1].continue_ - fcomp.jump(b) - fcomp.block = fcomp.newBlock() // dead code - } - - case *syntax.IfStmt: - // Keep consistent with CondExpr. - t := fcomp.newBlock() - f := fcomp.newBlock() - done := fcomp.newBlock() - - fcomp.ifelse(stmt.Cond, t, f) - - fcomp.block = t - fcomp.stmts(stmt.True) - fcomp.jump(done) - - fcomp.block = f - fcomp.stmts(stmt.False) - fcomp.jump(done) - - fcomp.block = done - - case *syntax.AssignStmt: - switch stmt.Op { - case syntax.EQ: - // simple assignment: x = y - fcomp.expr(stmt.RHS) - fcomp.assign(stmt.OpPos, stmt.LHS) - - case syntax.PLUS_EQ, - syntax.MINUS_EQ, - syntax.STAR_EQ, - syntax.SLASH_EQ, - syntax.SLASHSLASH_EQ, - syntax.PERCENT_EQ, - syntax.AMP_EQ, - syntax.PIPE_EQ, - syntax.CIRCUMFLEX_EQ, - syntax.LTLT_EQ, - syntax.GTGT_EQ: - // augmented assignment: x += y - - var set func() - - // Evaluate "address" of x exactly once to avoid duplicate side-effects. - switch lhs := unparen(stmt.LHS).(type) { - case *syntax.Ident: - // x = ... - fcomp.lookup(lhs) - set = func() { - fcomp.set(lhs) - } - - case *syntax.IndexExpr: - // x[y] = ... - fcomp.expr(lhs.X) - fcomp.expr(lhs.Y) - fcomp.emit(DUP2) - fcomp.setPos(lhs.Lbrack) - fcomp.emit(INDEX) - set = func() { - fcomp.setPos(lhs.Lbrack) - fcomp.emit(SETINDEX) - } - - case *syntax.DotExpr: - // x.f = ... - fcomp.expr(lhs.X) - fcomp.emit(DUP) - name := fcomp.pcomp.nameIndex(lhs.Name.Name) - fcomp.setPos(lhs.Dot) - fcomp.emit1(ATTR, name) - set = func() { - fcomp.setPos(lhs.Dot) - fcomp.emit1(SETFIELD, name) - } - - default: - panic(lhs) - } - - fcomp.expr(stmt.RHS) - - if stmt.Op == syntax.PLUS_EQ { - // Allow the runtime to optimize list += iterable. - fcomp.setPos(stmt.OpPos) - fcomp.emit(INPLACE_ADD) - } else { - fcomp.binop(stmt.OpPos, stmt.Op-syntax.PLUS_EQ+syntax.PLUS) - } - set() - } - - case *syntax.DefStmt: - fcomp.function(stmt.Function.(*resolve.Function)) - fcomp.set(stmt.Name) - - case *syntax.ForStmt: - // Keep consistent with ForClause. - head := fcomp.newBlock() - body := fcomp.newBlock() - tail := fcomp.newBlock() - - fcomp.expr(stmt.X) - fcomp.setPos(stmt.For) - fcomp.emit(ITERPUSH) - fcomp.jump(head) - - fcomp.block = head - fcomp.condjump(ITERJMP, tail, body) - - fcomp.block = body - fcomp.assign(stmt.For, stmt.Vars) - fcomp.loops = append(fcomp.loops, loop{break_: tail, continue_: head}) - fcomp.stmts(stmt.Body) - fcomp.loops = fcomp.loops[:len(fcomp.loops)-1] - fcomp.jump(head) - - fcomp.block = tail - fcomp.emit(ITERPOP) - - case *syntax.WhileStmt: - head := fcomp.newBlock() - body := fcomp.newBlock() - done := fcomp.newBlock() - - fcomp.jump(head) - fcomp.block = head - fcomp.ifelse(stmt.Cond, body, done) - - fcomp.block = body - fcomp.loops = append(fcomp.loops, loop{break_: done, continue_: head}) - fcomp.stmts(stmt.Body) - fcomp.loops = fcomp.loops[:len(fcomp.loops)-1] - fcomp.jump(head) - - fcomp.block = done - - case *syntax.ReturnStmt: - if stmt.Result != nil { - fcomp.expr(stmt.Result) - } else { - fcomp.emit(NONE) - } - fcomp.emit(RETURN) - fcomp.block = fcomp.newBlock() // dead code - - case *syntax.LoadStmt: - for i := range stmt.From { - fcomp.string(stmt.From[i].Name) - } - module := stmt.Module.Value.(string) - fcomp.pcomp.prog.Loads = append(fcomp.pcomp.prog.Loads, Binding{ - Name: module, - Pos: stmt.Module.TokenPos, - }) - fcomp.string(module) - fcomp.setPos(stmt.Load) - fcomp.emit1(LOAD, uint32(len(stmt.From))) - for i := range stmt.To { - fcomp.set(stmt.To[len(stmt.To)-1-i]) - } - - default: - start, _ := stmt.Span() - log.Panicf("%s: exec: unexpected statement %T", start, stmt) - } -} - -// assign implements lhs = rhs for arbitrary expressions lhs. -// RHS is on top of stack, consumed. -func (fcomp *fcomp) assign(pos syntax.Position, lhs syntax.Expr) { - switch lhs := lhs.(type) { - case *syntax.ParenExpr: - // (lhs) = rhs - fcomp.assign(pos, lhs.X) - - case *syntax.Ident: - // x = rhs - fcomp.set(lhs) - - case *syntax.TupleExpr: - // x, y = rhs - fcomp.assignSequence(pos, lhs.List) - - case *syntax.ListExpr: - // [x, y] = rhs - fcomp.assignSequence(pos, lhs.List) - - case *syntax.IndexExpr: - // x[y] = rhs - fcomp.expr(lhs.X) - fcomp.emit(EXCH) - fcomp.expr(lhs.Y) - fcomp.emit(EXCH) - fcomp.setPos(lhs.Lbrack) - fcomp.emit(SETINDEX) - - case *syntax.DotExpr: - // x.f = rhs - fcomp.expr(lhs.X) - fcomp.emit(EXCH) - fcomp.setPos(lhs.Dot) - fcomp.emit1(SETFIELD, fcomp.pcomp.nameIndex(lhs.Name.Name)) - - default: - panic(lhs) - } -} - -func (fcomp *fcomp) assignSequence(pos syntax.Position, lhs []syntax.Expr) { - fcomp.setPos(pos) - fcomp.emit1(UNPACK, uint32(len(lhs))) - for i := range lhs { - fcomp.assign(pos, lhs[i]) - } -} - -func (fcomp *fcomp) expr(e syntax.Expr) { - switch e := e.(type) { - case *syntax.ParenExpr: - fcomp.expr(e.X) - - case *syntax.Ident: - fcomp.lookup(e) - - case *syntax.Literal: - // e.Value is int64, float64, *bigInt, or string. - fcomp.emit1(CONSTANT, fcomp.pcomp.constantIndex(e.Value)) - - case *syntax.ListExpr: - for _, x := range e.List { - fcomp.expr(x) - } - fcomp.emit1(MAKELIST, uint32(len(e.List))) - - case *syntax.CondExpr: - // Keep consistent with IfStmt. - t := fcomp.newBlock() - f := fcomp.newBlock() - done := fcomp.newBlock() - - fcomp.ifelse(e.Cond, t, f) - - fcomp.block = t - fcomp.expr(e.True) - fcomp.jump(done) - - fcomp.block = f - fcomp.expr(e.False) - fcomp.jump(done) - - fcomp.block = done - - case *syntax.IndexExpr: - fcomp.expr(e.X) - fcomp.expr(e.Y) - fcomp.setPos(e.Lbrack) - fcomp.emit(INDEX) - - case *syntax.SliceExpr: - fcomp.setPos(e.Lbrack) - fcomp.expr(e.X) - if e.Lo != nil { - fcomp.expr(e.Lo) - } else { - fcomp.emit(NONE) - } - if e.Hi != nil { - fcomp.expr(e.Hi) - } else { - fcomp.emit(NONE) - } - if e.Step != nil { - fcomp.expr(e.Step) - } else { - fcomp.emit(NONE) - } - fcomp.emit(SLICE) - - case *syntax.Comprehension: - if e.Curly { - fcomp.emit(MAKEDICT) - } else { - fcomp.emit1(MAKELIST, 0) - } - fcomp.comprehension(e, 0) - - case *syntax.TupleExpr: - fcomp.tuple(e.List) - - case *syntax.DictExpr: - fcomp.emit(MAKEDICT) - for _, entry := range e.List { - entry := entry.(*syntax.DictEntry) - fcomp.emit(DUP) - fcomp.expr(entry.Key) - fcomp.expr(entry.Value) - fcomp.setPos(entry.Colon) - fcomp.emit(SETDICTUNIQ) - } - - case *syntax.UnaryExpr: - fcomp.expr(e.X) - fcomp.setPos(e.OpPos) - switch e.Op { - case syntax.MINUS: - fcomp.emit(UMINUS) - case syntax.PLUS: - fcomp.emit(UPLUS) - case syntax.NOT: - fcomp.emit(NOT) - case syntax.TILDE: - fcomp.emit(TILDE) - default: - log.Panicf("%s: unexpected unary op: %s", e.OpPos, e.Op) - } - - case *syntax.BinaryExpr: - switch e.Op { - // short-circuit operators - // TODO(adonovan): use ifelse to simplify conditions. - case syntax.OR: - // x or y => if x then x else y - done := fcomp.newBlock() - y := fcomp.newBlock() - - fcomp.expr(e.X) - fcomp.emit(DUP) - fcomp.condjump(CJMP, done, y) - - fcomp.block = y - fcomp.emit(POP) // discard X - fcomp.expr(e.Y) - fcomp.jump(done) - - fcomp.block = done - - case syntax.AND: - // x and y => if x then y else x - done := fcomp.newBlock() - y := fcomp.newBlock() - - fcomp.expr(e.X) - fcomp.emit(DUP) - fcomp.condjump(CJMP, y, done) - - fcomp.block = y - fcomp.emit(POP) // discard X - fcomp.expr(e.Y) - fcomp.jump(done) - - fcomp.block = done - - case syntax.PLUS: - fcomp.plus(e) - - default: - // all other strict binary operator (includes comparisons) - fcomp.expr(e.X) - fcomp.expr(e.Y) - fcomp.binop(e.OpPos, e.Op) - } - - case *syntax.DotExpr: - fcomp.expr(e.X) - fcomp.setPos(e.Dot) - fcomp.emit1(ATTR, fcomp.pcomp.nameIndex(e.Name.Name)) - - case *syntax.CallExpr: - fcomp.call(e) - - case *syntax.LambdaExpr: - fcomp.function(e.Function.(*resolve.Function)) - - default: - start, _ := e.Span() - log.Panicf("%s: unexpected expr %T", start, e) - } -} - -type summand struct { - x syntax.Expr - plusPos syntax.Position -} - -// plus emits optimized code for ((a+b)+...)+z that avoids naive -// quadratic behavior for strings, tuples, and lists, -// and folds together adjacent literals of the same type. -func (fcomp *fcomp) plus(e *syntax.BinaryExpr) { - // Gather all the right operands of the left tree of plusses. - // A tree (((a+b)+c)+d) becomes args=[a +b +c +d]. - args := make([]summand, 0, 2) // common case: 2 operands - for plus := e; ; { - args = append(args, summand{unparen(plus.Y), plus.OpPos}) - left := unparen(plus.X) - x, ok := left.(*syntax.BinaryExpr) - if !ok || x.Op != syntax.PLUS { - args = append(args, summand{x: left}) - break - } - plus = x - } - // Reverse args to syntactic order. - for i, n := 0, len(args)/2; i < n; i++ { - j := len(args) - 1 - i - args[i], args[j] = args[j], args[i] - } - - // Fold sums of adjacent literals of the same type: ""+"", []+[], ()+(). - out := args[:0] // compact in situ - for i := 0; i < len(args); { - j := i + 1 - if code := addable(args[i].x); code != 0 { - for j < len(args) && addable(args[j].x) == code { - j++ - } - if j > i+1 { - args[i].x = add(code, args[i:j]) - } - } - out = append(out, args[i]) - i = j - } - args = out - - // Emit code for an n-ary sum (n > 0). - fcomp.expr(args[0].x) - for _, summand := range args[1:] { - fcomp.expr(summand.x) - fcomp.setPos(summand.plusPos) - fcomp.emit(PLUS) - } - - // If len(args) > 2, use of an accumulator instead of a chain of - // PLUS operations may be more efficient. - // However, no gain was measured on a workload analogous to Bazel loading; - // TODO(adonovan): opt: re-evaluate on a Bazel analysis-like workload. - // - // We cannot use a single n-ary SUM operation - // a b c SUM<3> - // because we need to report a distinct error for each - // individual '+' operation, so three additional operations are - // needed: - // - // ACCSTART => create buffer and append to it - // ACCUM => append to buffer - // ACCEND => get contents of buffer - // - // For string, list, and tuple values, the interpreter can - // optimize these operations by using a mutable buffer. - // For all other types, ACCSTART and ACCEND would behave like - // the identity function and ACCUM behaves like PLUS. - // ACCUM must correctly support user-defined operations - // such as list+foo. - // - // fcomp.emit(ACCSTART) - // for _, summand := range args[1:] { - // fcomp.expr(summand.x) - // fcomp.setPos(summand.plusPos) - // fcomp.emit(ACCUM) - // } - // fcomp.emit(ACCEND) -} - -// addable reports whether e is a statically addable -// expression: a [s]tring, [l]ist, or [t]uple. -func addable(e syntax.Expr) rune { - switch e := e.(type) { - case *syntax.Literal: - // TODO(adonovan): opt: support INT/FLOAT/BIGINT constant folding. - switch e.Token { - case syntax.STRING: - return 's' - } - case *syntax.ListExpr: - return 'l' - case *syntax.TupleExpr: - return 't' - } - return 0 -} - -// add returns an expression denoting the sum of args, -// which are all addable values of the type indicated by code. -// The resulting syntax is degenerate, lacking position, etc. -func add(code rune, args []summand) syntax.Expr { - switch code { - case 's': - var buf bytes.Buffer - for _, arg := range args { - buf.WriteString(arg.x.(*syntax.Literal).Value.(string)) - } - return &syntax.Literal{Token: syntax.STRING, Value: buf.String()} - case 'l': - var elems []syntax.Expr - for _, arg := range args { - elems = append(elems, arg.x.(*syntax.ListExpr).List...) - } - return &syntax.ListExpr{List: elems} - case 't': - var elems []syntax.Expr - for _, arg := range args { - elems = append(elems, arg.x.(*syntax.TupleExpr).List...) - } - return &syntax.TupleExpr{List: elems} - } - panic(code) -} - -func unparen(e syntax.Expr) syntax.Expr { - if p, ok := e.(*syntax.ParenExpr); ok { - return unparen(p.X) - } - return e -} - -func (fcomp *fcomp) binop(pos syntax.Position, op syntax.Token) { - // TODO(adonovan): simplify by assuming syntax and compiler constants align. - fcomp.setPos(pos) - switch op { - // arithmetic - case syntax.PLUS: - fcomp.emit(PLUS) - case syntax.MINUS: - fcomp.emit(MINUS) - case syntax.STAR: - fcomp.emit(STAR) - case syntax.SLASH: - fcomp.emit(SLASH) - case syntax.SLASHSLASH: - fcomp.emit(SLASHSLASH) - case syntax.PERCENT: - fcomp.emit(PERCENT) - case syntax.AMP: - fcomp.emit(AMP) - case syntax.PIPE: - fcomp.emit(PIPE) - case syntax.CIRCUMFLEX: - fcomp.emit(CIRCUMFLEX) - case syntax.LTLT: - fcomp.emit(LTLT) - case syntax.GTGT: - fcomp.emit(GTGT) - case syntax.IN: - fcomp.emit(IN) - case syntax.NOT_IN: - fcomp.emit(IN) - fcomp.emit(NOT) - - // comparisons - case syntax.EQL, - syntax.NEQ, - syntax.GT, - syntax.LT, - syntax.LE, - syntax.GE: - fcomp.emit(Opcode(op-syntax.EQL) + EQL) - - default: - log.Panicf("%s: unexpected binary op: %s", pos, op) - } -} - -func (fcomp *fcomp) call(call *syntax.CallExpr) { - // TODO(adonovan): opt: Use optimized path for calling methods - // of built-ins: x.f(...) to avoid materializing a closure. - // if dot, ok := call.Fcomp.(*syntax.DotExpr); ok { - // fcomp.expr(dot.X) - // fcomp.args(call) - // fcomp.emit1(CALL_ATTR, fcomp.name(dot.Name.Name)) - // return - // } - - // usual case - fcomp.expr(call.Fn) - op, arg := fcomp.args(call) - fcomp.setPos(call.Lparen) - fcomp.emit1(op, arg) -} - -// args emits code to push a tuple of positional arguments -// and a tuple of named arguments containing alternating keys and values. -// Either or both tuples may be empty (TODO(adonovan): optimize). -func (fcomp *fcomp) args(call *syntax.CallExpr) (op Opcode, arg uint32) { - var callmode int - // Compute the number of each kind of parameter. - var p, n int // number of positional, named arguments - var varargs, kwargs syntax.Expr - for _, arg := range call.Args { - if binary, ok := arg.(*syntax.BinaryExpr); ok && binary.Op == syntax.EQ { - - // named argument (name, value) - fcomp.string(binary.X.(*syntax.Ident).Name) - fcomp.expr(binary.Y) - n++ - continue - } - if unary, ok := arg.(*syntax.UnaryExpr); ok { - if unary.Op == syntax.STAR { - callmode |= 1 - varargs = unary.X - continue - } else if unary.Op == syntax.STARSTAR { - callmode |= 2 - kwargs = unary.X - continue - } - } - - // positional argument - fcomp.expr(arg) - p++ - } - - // Python2 and Python3 both permit named arguments - // to appear both before and after a *args argument: - // f(1, 2, x=3, *[4], y=5, **dict(z=6)) - // - // They also differ in their evaluation order: - // Python2: 1 2 3 5 4 6 (*args and **kwargs evaluated last) - // Python3: 1 2 4 3 5 6 (positional args evaluated before named args) - // Starlark-in-Java historically used a third order: - // Lexical: 1 2 3 4 5 6 (all args evaluated left-to-right) - // - // After discussion in github.com/bazelbuild/starlark#13, the - // spec now requires Starlark to statically reject named - // arguments after *args (e.g. y=5), and to use Python2-style - // evaluation order. This is both easy to implement and - // consistent with lexical order: - // - // f(1, 2, x=3, *[4], **dict(z=6)) # 1 2 3 4 6 - - // *args - if varargs != nil { - fcomp.expr(varargs) - } - - // **kwargs - if kwargs != nil { - fcomp.expr(kwargs) - } - - // TODO(adonovan): avoid this with a more flexible encoding. - if p >= 256 || n >= 256 { - // resolve already checked this; should be unreachable - panic("too many arguments in call") - } - - return CALL + Opcode(callmode), uint32(p<<8 | n) -} - -func (fcomp *fcomp) tuple(elems []syntax.Expr) { - for _, elem := range elems { - fcomp.expr(elem) - } - fcomp.emit1(MAKETUPLE, uint32(len(elems))) -} - -func (fcomp *fcomp) comprehension(comp *syntax.Comprehension, clauseIndex int) { - if clauseIndex == len(comp.Clauses) { - fcomp.emit(DUP) // accumulator - if comp.Curly { - // dict: {k:v for ...} - // Parser ensures that body is of form k:v. - // Python-style set comprehensions {body for vars in x} - // are not supported. - entry := comp.Body.(*syntax.DictEntry) - fcomp.expr(entry.Key) - fcomp.expr(entry.Value) - fcomp.setPos(entry.Colon) - fcomp.emit(SETDICT) - } else { - // list: [body for vars in x] - fcomp.expr(comp.Body) - fcomp.emit(APPEND) - } - return - } - - clause := comp.Clauses[clauseIndex] - switch clause := clause.(type) { - case *syntax.IfClause: - t := fcomp.newBlock() - done := fcomp.newBlock() - fcomp.ifelse(clause.Cond, t, done) - - fcomp.block = t - fcomp.comprehension(comp, clauseIndex+1) - fcomp.jump(done) - - fcomp.block = done - return - - case *syntax.ForClause: - // Keep consistent with ForStmt. - head := fcomp.newBlock() - body := fcomp.newBlock() - tail := fcomp.newBlock() - - fcomp.expr(clause.X) - fcomp.setPos(clause.For) - fcomp.emit(ITERPUSH) - fcomp.jump(head) - - fcomp.block = head - fcomp.condjump(ITERJMP, tail, body) - - fcomp.block = body - fcomp.assign(clause.For, clause.Vars) - fcomp.comprehension(comp, clauseIndex+1) - fcomp.jump(head) - - fcomp.block = tail - fcomp.emit(ITERPOP) - return - } - - start, _ := clause.Span() - log.Panicf("%s: unexpected comprehension clause %T", start, clause) -} - -func (fcomp *fcomp) function(f *resolve.Function) { - // Evaluation of the defaults may fail, so record the position. - fcomp.setPos(f.Pos) - - // To reduce allocation, we emit a combined tuple - // for the defaults and the freevars. - // The function knows where to split it at run time. - - // Generate tuple of parameter defaults. For: - // def f(p1, p2=dp2, p3=dp3, *, k1, k2=dk2, k3, **kwargs) - // the tuple is: - // (dp2, dp3, MANDATORY, dk2, MANDATORY). - ndefaults := 0 - seenStar := false - for _, param := range f.Params { - switch param := param.(type) { - case *syntax.BinaryExpr: - fcomp.expr(param.Y) - ndefaults++ - case *syntax.UnaryExpr: - seenStar = true // * or *args (also **kwargs) - case *syntax.Ident: - if seenStar { - fcomp.emit(MANDATORY) - ndefaults++ - } - } - } - - // Capture the cells of the function's - // free variables from the lexical environment. - for _, freevar := range f.FreeVars { - // Don't call fcomp.lookup because we want - // the cell itself, not its content. - switch freevar.Scope { - case resolve.Free: - fcomp.emit1(FREE, uint32(freevar.Index)) - case resolve.Cell: - fcomp.emit1(LOCAL, uint32(freevar.Index)) - } - } - - fcomp.emit1(MAKETUPLE, uint32(ndefaults+len(f.FreeVars))) - - funcode := fcomp.pcomp.function(f.Name, f.Pos, f.Body, f.Locals, f.FreeVars) - - if debug { - // TODO(adonovan): do compilations sequentially not as a tree, - // to make the log easier to read. - // Simplify by identifying Toplevel and functionIndex 0. - fmt.Fprintf(os.Stderr, "resuming %s @ %s\n", fcomp.fn.Name, fcomp.pos) - } - - // def f(a, *, b=1) has only 2 parameters. - numParams := len(f.Params) - if f.NumKwonlyParams > 0 && !f.HasVarargs { - numParams-- - } - - funcode.NumParams = numParams - funcode.NumKwonlyParams = f.NumKwonlyParams - funcode.HasVarargs = f.HasVarargs - funcode.HasKwargs = f.HasKwargs - fcomp.emit1(MAKEFUNC, fcomp.pcomp.functionIndex(funcode)) -} - -// ifelse emits a Boolean control flow decision. -// On return, the current block is unset. -func (fcomp *fcomp) ifelse(cond syntax.Expr, t, f *block) { - switch cond := cond.(type) { - case *syntax.UnaryExpr: - if cond.Op == syntax.NOT { - // if not x then goto t else goto f - // => - // if x then goto f else goto t - fcomp.ifelse(cond.X, f, t) - return - } - - case *syntax.BinaryExpr: - switch cond.Op { - case syntax.AND: - // if x and y then goto t else goto f - // => - // if x then ifelse(y, t, f) else goto f - fcomp.expr(cond.X) - y := fcomp.newBlock() - fcomp.condjump(CJMP, y, f) - - fcomp.block = y - fcomp.ifelse(cond.Y, t, f) - return - - case syntax.OR: - // if x or y then goto t else goto f - // => - // if x then goto t else ifelse(y, t, f) - fcomp.expr(cond.X) - y := fcomp.newBlock() - fcomp.condjump(CJMP, t, y) - - fcomp.block = y - fcomp.ifelse(cond.Y, t, f) - return - case syntax.NOT_IN: - // if x not in y then goto t else goto f - // => - // if x in y then goto f else goto t - copy := *cond - copy.Op = syntax.IN - fcomp.expr(©) - fcomp.condjump(CJMP, f, t) - return - } - } - - // general case - fcomp.expr(cond) - fcomp.condjump(CJMP, t, f) -} diff --git a/src/vendor/go.starlark.net/internal/compile/serial.go b/src/vendor/go.starlark.net/internal/compile/serial.go deleted file mode 100644 index 0107ef9cd..000000000 --- a/src/vendor/go.starlark.net/internal/compile/serial.go +++ /dev/null @@ -1,389 +0,0 @@ -package compile - -// This file defines functions to read and write a compile.Program to a file. -// -// It is the client's responsibility to avoid version skew between the -// compiler used to produce a file and the interpreter that consumes it. -// The version number is provided as a constant. -// Incompatible protocol changes should also increment the version number. -// -// Encoding -// -// Program: -// "sky!" [4]byte # magic number -// str uint32le # offset of section -// version varint # must match Version -// filename string -// numloads varint -// loads []Ident -// numnames varint -// names []string -// numconsts varint -// consts []Constant -// numglobals varint -// globals []Ident -// toplevel Funcode -// numfuncs varint -// funcs []Funcode -// []byte # concatenation of all referenced strings -// EOF -// -// Funcode: -// id Ident -// code []byte -// pclinetablen varint -// pclinetab []varint -// numlocals varint -// locals []Ident -// numcells varint -// cells []int -// numfreevars varint -// freevar []Ident -// maxstack varint -// numparams varint -// numkwonlyparams varint -// hasvarargs varint (0 or 1) -// haskwargs varint (0 or 1) -// -// Ident: -// filename string -// line, col varint -// -// Constant: # type data -// type varint # 0=string string -// data ... # 1=int varint -// # 2=float varint (bits as uint64) -// # 3=bigint string (decimal ASCII text) -// -// The encoding starts with a four-byte magic number. -// The next four bytes are a little-endian uint32 -// that provides the offset of the string section -// at the end of the file, which contains the ordered -// concatenation of all strings referenced by the -// program. This design permits the decoder to read -// the first and second parts of the file into different -// memory allocations: the first (the encoded program) -// is transient, but the second (the strings) persists -// for the life of the Program. -// -// Within the encoded program, all strings are referred -// to by their length. As the encoder and decoder process -// the entire file sequentially, they are in lock step, -// so the start offset of each string is implicit. -// -// Program.Code is represented as a []byte slice to permit -// modification when breakpoints are set. All other strings -// are represented as strings. They all (unsafely) share the -// same backing byte slice. -// -// Aside from the str field, all integers are encoded as varints. - -import ( - "encoding/binary" - "fmt" - "math" - "math/big" - debugpkg "runtime/debug" - "unsafe" - - "go.starlark.net/syntax" -) - -const magic = "!sky" - -// Encode encodes a compiled Starlark program. -func (prog *Program) Encode() []byte { - var e encoder - e.p = append(e.p, magic...) - e.p = append(e.p, "????"...) // string data offset; filled in later - e.int(Version) - e.string(prog.Toplevel.Pos.Filename()) - e.bindings(prog.Loads) - e.int(len(prog.Names)) - for _, name := range prog.Names { - e.string(name) - } - e.int(len(prog.Constants)) - for _, c := range prog.Constants { - switch c := c.(type) { - case string: - e.int(0) - e.string(c) - case int64: - e.int(1) - e.int64(c) - case float64: - e.int(2) - e.uint64(math.Float64bits(c)) - case *big.Int: - e.int(3) - e.string(c.Text(10)) - } - } - e.bindings(prog.Globals) - e.function(prog.Toplevel) - e.int(len(prog.Functions)) - for _, fn := range prog.Functions { - e.function(fn) - } - - // Patch in the offset of the string data section. - binary.LittleEndian.PutUint32(e.p[4:8], uint32(len(e.p))) - - return append(e.p, e.s...) -} - -type encoder struct { - p []byte // encoded program - s []byte // strings - tmp [binary.MaxVarintLen64]byte -} - -func (e *encoder) int(x int) { - e.int64(int64(x)) -} - -func (e *encoder) int64(x int64) { - n := binary.PutVarint(e.tmp[:], x) - e.p = append(e.p, e.tmp[:n]...) -} - -func (e *encoder) uint64(x uint64) { - n := binary.PutUvarint(e.tmp[:], x) - e.p = append(e.p, e.tmp[:n]...) -} - -func (e *encoder) string(s string) { - e.int(len(s)) - e.s = append(e.s, s...) -} - -func (e *encoder) bytes(b []byte) { - e.int(len(b)) - e.s = append(e.s, b...) -} - -func (e *encoder) binding(bind Binding) { - e.string(bind.Name) - e.int(int(bind.Pos.Line)) - e.int(int(bind.Pos.Col)) -} - -func (e *encoder) bindings(binds []Binding) { - e.int(len(binds)) - for _, bind := range binds { - e.binding(bind) - } -} - -func (e *encoder) function(fn *Funcode) { - e.binding(Binding{fn.Name, fn.Pos}) - e.string(fn.Doc) - e.bytes(fn.Code) - e.int(len(fn.pclinetab)) - for _, x := range fn.pclinetab { - e.int64(int64(x)) - } - e.bindings(fn.Locals) - e.int(len(fn.Cells)) - for _, index := range fn.Cells { - e.int(index) - } - e.bindings(fn.Freevars) - e.int(fn.MaxStack) - e.int(fn.NumParams) - e.int(fn.NumKwonlyParams) - e.int(b2i(fn.HasVarargs)) - e.int(b2i(fn.HasKwargs)) -} - -func b2i(b bool) int { - if b { - return 1 - } else { - return 0 - } -} - -// DecodeProgram decodes a compiled Starlark program from data. -func DecodeProgram(data []byte) (_ *Program, err error) { - if len(data) < len(magic) { - return nil, fmt.Errorf("not a compiled module: no magic number") - } - if got := string(data[:4]); got != magic { - return nil, fmt.Errorf("not a compiled module: got magic number %q, want %q", - got, magic) - } - defer func() { - if x := recover(); x != nil { - debugpkg.PrintStack() - err = fmt.Errorf("internal error while decoding program: %v", x) - } - }() - - offset := binary.LittleEndian.Uint32(data[4:8]) - d := decoder{ - p: data[8:offset], - s: append([]byte(nil), data[offset:]...), // allocate a copy, which will persist - } - - if v := d.int(); v != Version { - return nil, fmt.Errorf("version mismatch: read %d, want %d", v, Version) - } - - filename := d.string() - d.filename = &filename - - loads := d.bindings() - - names := make([]string, d.int()) - for i := range names { - names[i] = d.string() - } - - // constants - constants := make([]interface{}, d.int()) - for i := range constants { - var c interface{} - switch d.int() { - case 0: - c = d.string() - case 1: - c = d.int64() - case 2: - c = math.Float64frombits(d.uint64()) - case 3: - c, _ = new(big.Int).SetString(d.string(), 10) - } - constants[i] = c - } - - globals := d.bindings() - toplevel := d.function() - funcs := make([]*Funcode, d.int()) - for i := range funcs { - funcs[i] = d.function() - } - - prog := &Program{ - Loads: loads, - Names: names, - Constants: constants, - Globals: globals, - Functions: funcs, - Toplevel: toplevel, - } - toplevel.Prog = prog - for _, f := range funcs { - f.Prog = prog - } - - if len(d.p)+len(d.s) > 0 { - return nil, fmt.Errorf("internal error: unconsumed data during decoding") - } - - return prog, nil -} - -type decoder struct { - p []byte // encoded program - s []byte // strings - filename *string // (indirect to avoid keeping decoder live) -} - -func (d *decoder) int() int { - return int(d.int64()) -} - -func (d *decoder) int64() int64 { - x, len := binary.Varint(d.p[:]) - d.p = d.p[len:] - return x -} - -func (d *decoder) uint64() uint64 { - x, len := binary.Uvarint(d.p[:]) - d.p = d.p[len:] - return x -} - -func (d *decoder) string() (s string) { - if slice := d.bytes(); len(slice) > 0 { - // Avoid a memory allocation for each string - // by unsafely aliasing slice. - type string struct { - data *byte - len int - } - ptr := (*string)(unsafe.Pointer(&s)) - ptr.data = &slice[0] - ptr.len = len(slice) - } - return s -} - -func (d *decoder) bytes() []byte { - len := d.int() - r := d.s[:len:len] - d.s = d.s[len:] - return r -} - -func (d *decoder) binding() Binding { - name := d.string() - line := int32(d.int()) - col := int32(d.int()) - return Binding{Name: name, Pos: syntax.MakePosition(d.filename, line, col)} -} - -func (d *decoder) bindings() []Binding { - bindings := make([]Binding, d.int()) - for i := range bindings { - bindings[i] = d.binding() - } - return bindings -} - -func (d *decoder) ints() []int { - ints := make([]int, d.int()) - for i := range ints { - ints[i] = d.int() - } - return ints -} - -func (d *decoder) bool() bool { return d.int() != 0 } - -func (d *decoder) function() *Funcode { - id := d.binding() - doc := d.string() - code := d.bytes() - pclinetab := make([]uint16, d.int()) - for i := range pclinetab { - pclinetab[i] = uint16(d.int()) - } - locals := d.bindings() - cells := d.ints() - freevars := d.bindings() - maxStack := d.int() - numParams := d.int() - numKwonlyParams := d.int() - hasVarargs := d.int() != 0 - hasKwargs := d.int() != 0 - return &Funcode{ - // Prog is filled in later. - Pos: id.Pos, - Name: id.Name, - Doc: doc, - Code: code, - pclinetab: pclinetab, - Locals: locals, - Cells: cells, - Freevars: freevars, - MaxStack: maxStack, - NumParams: numParams, - NumKwonlyParams: numKwonlyParams, - HasVarargs: hasVarargs, - HasKwargs: hasKwargs, - } -} diff --git a/src/vendor/go.starlark.net/internal/spell/spell.go b/src/vendor/go.starlark.net/internal/spell/spell.go deleted file mode 100644 index 7739fabaa..000000000 --- a/src/vendor/go.starlark.net/internal/spell/spell.go +++ /dev/null @@ -1,115 +0,0 @@ -// Package spell file defines a simple spelling checker for use in attribute errors -// such as "no such field .foo; did you mean .food?". -package spell - -import ( - "strings" - "unicode" -) - -// Nearest returns the element of candidates -// nearest to x using the Levenshtein metric, -// or "" if none were promising. -func Nearest(x string, candidates []string) string { - // Ignore underscores and case when matching. - fold := func(s string) string { - return strings.Map(func(r rune) rune { - if r == '_' { - return -1 - } - return unicode.ToLower(r) - }, s) - } - - x = fold(x) - - var best string - bestD := (len(x) + 1) / 2 // allow up to 50% typos - for _, c := range candidates { - d := levenshtein(x, fold(c), bestD) - if d < bestD { - bestD = d - best = c - } - } - return best -} - -// levenshtein returns the non-negative Levenshtein edit distance -// between the byte strings x and y. -// -// If the computed distance exceeds max, -// the function may return early with an approximate value > max. -func levenshtein(x, y string, max int) int { - // This implementation is derived from one by Laurent Le Brun in - // Bazel that uses the single-row space efficiency trick - // described at bitbucket.org/clearer/iosifovich. - - // Let x be the shorter string. - if len(x) > len(y) { - x, y = y, x - } - - // Remove common prefix. - for i := 0; i < len(x); i++ { - if x[i] != y[i] { - x = x[i:] - y = y[i:] - break - } - } - if x == "" { - return len(y) - } - - if d := abs(len(x) - len(y)); d > max { - return d // excessive length divergence - } - - row := make([]int, len(y)+1) - for i := range row { - row[i] = i - } - - for i := 1; i <= len(x); i++ { - row[0] = i - best := i - prev := i - 1 - for j := 1; j <= len(y); j++ { - a := prev + b2i(x[i-1] != y[j-1]) // substitution - b := 1 + row[j-1] // deletion - c := 1 + row[j] // insertion - k := min(a, min(b, c)) - prev, row[j] = row[j], k - best = min(best, k) - } - if best > max { - return best - } - } - return row[len(y)] -} - -func b2i(b bool) int { - if b { - return 1 - } else { - return 0 - } -} - -func min(x, y int) int { - if x < y { - return x - } else { - return y - } -} - -func abs(x int) int { - if x >= 0 { - return x - } else { - return -x - } -} diff --git a/src/vendor/go.starlark.net/resolve/binding.go b/src/vendor/go.starlark.net/resolve/binding.go deleted file mode 100644 index 6b99f4b97..000000000 --- a/src/vendor/go.starlark.net/resolve/binding.go +++ /dev/null @@ -1,74 +0,0 @@ -// Copyright 2019 The Bazel Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package resolve - -import "go.starlark.net/syntax" - -// This file defines resolver data types saved in the syntax tree. -// We cannot guarantee API stability for these types -// as they are closely tied to the implementation. - -// A Binding contains resolver information about an identifer. -// The resolver populates the Binding field of each syntax.Identifier. -// The Binding ties together all identifiers that denote the same variable. -type Binding struct { - Scope Scope - - // Index records the index into the enclosing - // - {DefStmt,File}.Locals, if Scope==Local - // - DefStmt.FreeVars, if Scope==Free - // - File.Globals, if Scope==Global. - // It is zero if Scope is Predeclared, Universal, or Undefined. - Index int - - First *syntax.Ident // first binding use (iff Scope==Local/Free/Global) -} - -// The Scope of Binding indicates what kind of scope it has. -type Scope uint8 - -const ( - Undefined Scope = iota // name is not defined - Local // name is local to its function or file - Cell // name is function-local but shared with a nested function - Free // name is cell of some enclosing function - Global // name is global to module - Predeclared // name is predeclared for this module (e.g. glob) - Universal // name is universal (e.g. len) -) - -var scopeNames = [...]string{ - Undefined: "undefined", - Local: "local", - Cell: "cell", - Free: "free", - Global: "global", - Predeclared: "predeclared", - Universal: "universal", -} - -func (scope Scope) String() string { return scopeNames[scope] } - -// A Module contains resolver information about a file. -// The resolver populates the Module field of each syntax.File. -type Module struct { - Locals []*Binding // the file's (comprehension-)local variables - Globals []*Binding // the file's global variables -} - -// A Function contains resolver information about a named or anonymous function. -// The resolver populates the Function field of each syntax.DefStmt and syntax.LambdaExpr. -type Function struct { - Pos syntax.Position // of DEF or LAMBDA - Name string // name of def, or "lambda" - Params []syntax.Expr // param = ident | ident=expr | * | *ident | **ident - Body []syntax.Stmt // contains synthetic 'return expr' for lambda - - HasVarargs bool // whether params includes *args (convenience) - HasKwargs bool // whether params includes **kwargs (convenience) - NumKwonlyParams int // number of keyword-only optional parameters - Locals []*Binding // this function's local/cell variables, parameters first - FreeVars []*Binding // enclosing cells to capture in closure -} diff --git a/src/vendor/go.starlark.net/resolve/resolve.go b/src/vendor/go.starlark.net/resolve/resolve.go deleted file mode 100644 index 440bcf081..000000000 --- a/src/vendor/go.starlark.net/resolve/resolve.go +++ /dev/null @@ -1,978 +0,0 @@ -// Copyright 2017 The Bazel Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package resolve defines a name-resolution pass for Starlark abstract -// syntax trees. -// -// The resolver sets the Locals and FreeVars arrays of each DefStmt and -// the LocalIndex field of each syntax.Ident that refers to a local or -// free variable. It also sets the Locals array of a File for locals -// bound by top-level comprehensions and load statements. -// Identifiers for global variables do not get an index. -package resolve // import "go.starlark.net/resolve" - -// All references to names are statically resolved. Names may be -// predeclared, global, or local to a function or file. -// File-local variables include those bound by top-level comprehensions -// and by load statements. ("Top-level" means "outside of any function".) -// The resolver maps each global name to a small integer and each local -// name to a small integer; these integers enable a fast and compact -// representation of globals and locals in the evaluator. -// -// As an optimization, the resolver classifies each predeclared name as -// either universal (e.g. None, len) or per-module (e.g. glob in Bazel's -// build language), enabling the evaluator to share the representation -// of the universal environment across all modules. -// -// The lexical environment is a tree of blocks with the file block at -// its root. The file's child blocks may be of two kinds: functions -// and comprehensions, and these may have further children of either -// kind. -// -// Python-style resolution requires multiple passes because a name is -// determined to be local to a function only if the function contains a -// "binding" use of it; similarly, a name is determined to be global (as -// opposed to predeclared) if the module contains a top-level binding use. -// Unlike ordinary top-level assignments, the bindings created by load -// statements are local to the file block. -// A non-binding use may lexically precede the binding to which it is resolved. -// In the first pass, we inspect each function, recording in -// 'uses' each identifier and the environment block in which it occurs. -// If a use of a name is binding, such as a function parameter or -// assignment, we add the name to the block's bindings mapping and add a -// local variable to the enclosing function. -// -// As we finish resolving each function, we inspect all the uses within -// that function and discard ones that were found to be function-local. The -// remaining ones must be either free (local to some lexically enclosing -// function), or top-level (global, predeclared, or file-local), but we cannot tell -// which until we have finished inspecting the outermost enclosing -// function. At that point, we can distinguish local from top-level names -// (and this is when Python would compute free variables). -// -// However, Starlark additionally requires that all references to global -// names are satisfied by some declaration in the current module; -// Starlark permits a function to forward-reference a global or file-local -// that has not -// been declared yet so long as it is declared before the end of the -// module. So, instead of re-resolving the unresolved references after -// each top-level function, we defer this until the end of the module -// and ensure that all such references are satisfied by some definition. -// -// At the end of the module, we visit each of the nested function blocks -// in bottom-up order, doing a recursive lexical lookup for each -// unresolved name. If the name is found to be local to some enclosing -// function, we must create a DefStmt.FreeVar (capture) parameter for -// each intervening function. We enter these synthetic bindings into -// the bindings map so that we create at most one freevar per name. If -// the name was not local, we check that it was defined at module level. -// -// We resolve all uses of locals in the module (due to load statements -// and comprehensions) in a similar way and compute the file's set of -// local variables. -// -// Starlark enforces that all global names are assigned at most once on -// all control flow paths by forbidding if/else statements and loops at -// top level. A global may be used before it is defined, leading to a -// dynamic error. However, the AllowGlobalReassign flag (really: allow -// top-level reassign) makes the resolver allow multiple to a variable -// at top-level. It also allows if-, for-, and while-loops at top-level, -// which in turn may make the evaluator dynamically assign multiple -// values to a variable at top-level. (These two roles should be separated.) - -import ( - "fmt" - "log" - "sort" - "strings" - - "go.starlark.net/internal/spell" - "go.starlark.net/syntax" -) - -const debug = false -const doesnt = "this Starlark dialect does not " - -// global options -// These features are either not standard Starlark (yet), or deprecated -// features of the BUILD language, so we put them behind flags. -var ( - AllowNestedDef = false // allow def statements within function bodies - AllowLambda = false // allow lambda expressions - AllowFloat = false // allow floating point literals, the 'float' built-in, and x / y - AllowSet = false // allow the 'set' built-in - AllowGlobalReassign = false // allow reassignment to top-level names; also, allow if/for/while at top-level - AllowRecursion = false // allow while statements and recursive functions - AllowBitwise = true // obsolete; bitwise operations (&, |, ^, ~, <<, and >>) are always enabled - LoadBindsGlobally = false // load creates global not file-local bindings (deprecated) -) - -// File resolves the specified file and records information about the -// module in file.Module. -// -// The isPredeclared and isUniversal predicates report whether a name is -// a pre-declared identifier (visible in the current module) or a -// universal identifier (visible in every module). -// Clients should typically pass predeclared.Has for the first and -// starlark.Universe.Has for the second, where predeclared is the -// module's StringDict of predeclared names and starlark.Universe is the -// standard set of built-ins. -// The isUniverse predicate is supplied a parameter to avoid a cyclic -// dependency upon starlark.Universe, not because users should ever need -// to redefine it. -func File(file *syntax.File, isPredeclared, isUniversal func(name string) bool) error { - return REPLChunk(file, nil, isPredeclared, isUniversal) -} - -// REPLChunk is a generalization of the File function that supports a -// non-empty initial global block, as occurs in a REPL. -func REPLChunk(file *syntax.File, isGlobal, isPredeclared, isUniversal func(name string) bool) error { - r := newResolver(isGlobal, isPredeclared, isUniversal) - r.stmts(file.Stmts) - - r.env.resolveLocalUses() - - // At the end of the module, resolve all non-local variable references, - // computing closures. - // Function bodies may contain forward references to later global declarations. - r.resolveNonLocalUses(r.env) - - file.Module = &Module{ - Locals: r.moduleLocals, - Globals: r.moduleGlobals, - } - - if len(r.errors) > 0 { - return r.errors - } - return nil -} - -// Expr resolves the specified expression. -// It returns the local variables bound within the expression. -// -// The isPredeclared and isUniversal predicates behave as for the File function. -func Expr(expr syntax.Expr, isPredeclared, isUniversal func(name string) bool) ([]*Binding, error) { - r := newResolver(nil, isPredeclared, isUniversal) - r.expr(expr) - r.env.resolveLocalUses() - r.resolveNonLocalUses(r.env) // globals & universals - if len(r.errors) > 0 { - return nil, r.errors - } - return r.moduleLocals, nil -} - -// An ErrorList is a non-empty list of resolver error messages. -type ErrorList []Error // len > 0 - -func (e ErrorList) Error() string { return e[0].Error() } - -// An Error describes the nature and position of a resolver error. -type Error struct { - Pos syntax.Position - Msg string -} - -func (e Error) Error() string { return e.Pos.String() + ": " + e.Msg } - -func newResolver(isGlobal, isPredeclared, isUniversal func(name string) bool) *resolver { - file := new(block) - return &resolver{ - file: file, - env: file, - isGlobal: isGlobal, - isPredeclared: isPredeclared, - isUniversal: isUniversal, - globals: make(map[string]*Binding), - predeclared: make(map[string]*Binding), - } -} - -type resolver struct { - // env is the current local environment: - // a linked list of blocks, innermost first. - // The tail of the list is the file block. - env *block - file *block // file block (contains load bindings) - - // moduleLocals contains the local variables of the module - // (due to load statements and comprehensions outside any function). - // moduleGlobals contains the global variables of the module. - moduleLocals []*Binding - moduleGlobals []*Binding - - // globals maps each global name in the module to its binding. - // predeclared does the same for predeclared and universal names. - globals map[string]*Binding - predeclared map[string]*Binding - - // These predicates report whether a name is - // pre-declared, either in this module or universally, - // or already declared in the module globals (as in a REPL). - // isGlobal may be nil. - isGlobal, isPredeclared, isUniversal func(name string) bool - - loops int // number of enclosing for loops - - errors ErrorList -} - -// container returns the innermost enclosing "container" block: -// a function (function != nil) or file (function == nil). -// Container blocks accumulate local variable bindings. -func (r *resolver) container() *block { - for b := r.env; ; b = b.parent { - if b.function != nil || b == r.file { - return b - } - } -} - -func (r *resolver) push(b *block) { - r.env.children = append(r.env.children, b) - b.parent = r.env - r.env = b -} - -func (r *resolver) pop() { r.env = r.env.parent } - -type block struct { - parent *block // nil for file block - - // In the file (root) block, both these fields are nil. - function *Function // only for function blocks - comp *syntax.Comprehension // only for comprehension blocks - - // bindings maps a name to its binding. - // A local binding has an index into its innermost enclosing container's locals array. - // A free binding has an index into its innermost enclosing function's freevars array. - bindings map[string]*Binding - - // children records the child blocks of the current one. - children []*block - - // uses records all identifiers seen in this container (function or file), - // and a reference to the environment in which they appear. - // As we leave each container block, we resolve them, - // so that only free and global ones remain. - // At the end of each top-level function we compute closures. - uses []use -} - -func (b *block) bind(name string, bind *Binding) { - if b.bindings == nil { - b.bindings = make(map[string]*Binding) - } - b.bindings[name] = bind -} - -func (b *block) String() string { - if b.function != nil { - return "function block at " + fmt.Sprint(b.function.Pos) - } - if b.comp != nil { - return "comprehension block at " + fmt.Sprint(b.comp.Span()) - } - return "file block" -} - -func (r *resolver) errorf(posn syntax.Position, format string, args ...interface{}) { - r.errors = append(r.errors, Error{posn, fmt.Sprintf(format, args...)}) -} - -// A use records an identifier and the environment in which it appears. -type use struct { - id *syntax.Ident - env *block -} - -// bind creates a binding for id: a global (not file-local) -// binding at top-level, a local binding otherwise. -// At top-level, it reports an error if a global or file-local -// binding already exists, unless AllowGlobalReassign. -// It sets id.Binding to the binding (whether old or new), -// and returns whether a binding already existed. -func (r *resolver) bind(id *syntax.Ident) bool { - // Binding outside any local (comprehension/function) block? - if r.env == r.file { - bind, ok := r.file.bindings[id.Name] - if !ok { - bind, ok = r.globals[id.Name] - if !ok { - // first global binding of this name - bind = &Binding{ - First: id, - Scope: Global, - Index: len(r.moduleGlobals), - } - r.globals[id.Name] = bind - r.moduleGlobals = append(r.moduleGlobals, bind) - } - } - if ok && !AllowGlobalReassign { - r.errorf(id.NamePos, "cannot reassign %s %s declared at %s", - bind.Scope, id.Name, bind.First.NamePos) - } - id.Binding = bind - return ok - } - - return r.bindLocal(id) -} - -func (r *resolver) bindLocal(id *syntax.Ident) bool { - // Mark this name as local to current block. - // Assign it a new local (positive) index in the current container. - _, ok := r.env.bindings[id.Name] - if !ok { - var locals *[]*Binding - if fn := r.container().function; fn != nil { - locals = &fn.Locals - } else { - locals = &r.moduleLocals - } - bind := &Binding{ - First: id, - Scope: Local, - Index: len(*locals), - } - r.env.bind(id.Name, bind) - *locals = append(*locals, bind) - } - - r.use(id) - return ok -} - -func (r *resolver) use(id *syntax.Ident) { - use := use{id, r.env} - - // The spec says that if there is a global binding of a name - // then all references to that name in that block refer to the - // global, even if the use precedes the def---just as for locals. - // For example, in this code, - // - // print(len); len=1; print(len) - // - // both occurrences of len refer to the len=1 binding, which - // completely shadows the predeclared len function. - // - // The rationale for these semantics, which differ from Python, - // is that the static meaning of len (a reference to a global) - // does not change depending on where it appears in the file. - // Of course, its dynamic meaning does change, from an error - // into a valid reference, so it's not clear these semantics - // have any practical advantage. - // - // In any case, the Bazel implementation lags behind the spec - // and follows Python behavior, so the first use of len refers - // to the predeclared function. This typically used in a BUILD - // file that redefines a predeclared name half way through, - // for example: - // - // proto_library(...) # built-in rule - // load("myproto.bzl", "proto_library") - // proto_library(...) # user-defined rule - // - // We will piggyback support for the legacy semantics on the - // AllowGlobalReassign flag, which is loosely related and also - // required for Bazel. - if AllowGlobalReassign && r.env == r.file { - r.useToplevel(use) - return - } - - b := r.container() - b.uses = append(b.uses, use) -} - -// useToplevel resolves use.id as a reference to a name visible at top-level. -// The use.env field captures the original environment for error reporting. -func (r *resolver) useToplevel(use use) (bind *Binding) { - id := use.id - - if prev, ok := r.file.bindings[id.Name]; ok { - // use of load-defined name in file block - bind = prev - } else if prev, ok := r.globals[id.Name]; ok { - // use of global declared by module - bind = prev - } else if r.isGlobal != nil && r.isGlobal(id.Name) { - // use of global defined in a previous REPL chunk - bind = &Binding{ - First: id, // wrong: this is not even a binding use - Scope: Global, - Index: len(r.moduleGlobals), - } - r.globals[id.Name] = bind - r.moduleGlobals = append(r.moduleGlobals, bind) - } else if prev, ok := r.predeclared[id.Name]; ok { - // repeated use of predeclared or universal - bind = prev - } else if r.isPredeclared(id.Name) { - // use of pre-declared name - bind = &Binding{Scope: Predeclared} - r.predeclared[id.Name] = bind // save it - } else if r.isUniversal(id.Name) { - // use of universal name - if !AllowFloat && id.Name == "float" { - r.errorf(id.NamePos, doesnt+"support floating point") - } - if !AllowSet && id.Name == "set" { - r.errorf(id.NamePos, doesnt+"support sets") - } - bind = &Binding{Scope: Universal} - r.predeclared[id.Name] = bind // save it - } else { - bind = &Binding{Scope: Undefined} - var hint string - if n := r.spellcheck(use); n != "" { - hint = fmt.Sprintf(" (did you mean %s?)", n) - } - r.errorf(id.NamePos, "undefined: %s%s", id.Name, hint) - } - id.Binding = bind - return bind -} - -// spellcheck returns the most likely misspelling of -// the name use.id in the environment use.env. -func (r *resolver) spellcheck(use use) string { - var names []string - - // locals - for b := use.env; b != nil; b = b.parent { - for name := range b.bindings { - names = append(names, name) - } - } - - // globals - // - // We have no way to enumerate the sets whose membership - // tests are isPredeclared, isUniverse, and isGlobal, - // which includes prior names in the REPL session. - for _, bind := range r.moduleGlobals { - names = append(names, bind.First.Name) - } - - sort.Strings(names) - return spell.Nearest(use.id.Name, names) -} - -// resolveLocalUses is called when leaving a container (function/module) -// block. It resolves all uses of locals/cells within that block. -func (b *block) resolveLocalUses() { - unresolved := b.uses[:0] - for _, use := range b.uses { - if bind := lookupLocal(use); bind != nil && (bind.Scope == Local || bind.Scope == Cell) { - use.id.Binding = bind - } else { - unresolved = append(unresolved, use) - } - } - b.uses = unresolved -} - -func (r *resolver) stmts(stmts []syntax.Stmt) { - for _, stmt := range stmts { - r.stmt(stmt) - } -} - -func (r *resolver) stmt(stmt syntax.Stmt) { - switch stmt := stmt.(type) { - case *syntax.ExprStmt: - r.expr(stmt.X) - - case *syntax.BranchStmt: - if r.loops == 0 && (stmt.Token == syntax.BREAK || stmt.Token == syntax.CONTINUE) { - r.errorf(stmt.TokenPos, "%s not in a loop", stmt.Token) - } - - case *syntax.IfStmt: - if !AllowGlobalReassign && r.container().function == nil { - r.errorf(stmt.If, "if statement not within a function") - } - r.expr(stmt.Cond) - r.stmts(stmt.True) - r.stmts(stmt.False) - - case *syntax.AssignStmt: - r.expr(stmt.RHS) - isAugmented := stmt.Op != syntax.EQ - r.assign(stmt.LHS, isAugmented) - - case *syntax.DefStmt: - if !AllowNestedDef && r.container().function != nil { - r.errorf(stmt.Def, doesnt+"support nested def") - } - r.bind(stmt.Name) - fn := &Function{ - Name: stmt.Name.Name, - Pos: stmt.Def, - Params: stmt.Params, - Body: stmt.Body, - } - stmt.Function = fn - r.function(fn, stmt.Def) - - case *syntax.ForStmt: - if !AllowGlobalReassign && r.container().function == nil { - r.errorf(stmt.For, "for loop not within a function") - } - r.expr(stmt.X) - const isAugmented = false - r.assign(stmt.Vars, isAugmented) - r.loops++ - r.stmts(stmt.Body) - r.loops-- - - case *syntax.WhileStmt: - if !AllowRecursion { - r.errorf(stmt.While, doesnt+"support while loops") - } - if !AllowGlobalReassign && r.container().function == nil { - r.errorf(stmt.While, "while loop not within a function") - } - r.expr(stmt.Cond) - r.loops++ - r.stmts(stmt.Body) - r.loops-- - - case *syntax.ReturnStmt: - if r.container().function == nil { - r.errorf(stmt.Return, "return statement not within a function") - } - if stmt.Result != nil { - r.expr(stmt.Result) - } - - case *syntax.LoadStmt: - if r.container().function != nil { - r.errorf(stmt.Load, "load statement within a function") - } - - for i, from := range stmt.From { - if from.Name == "" { - r.errorf(from.NamePos, "load: empty identifier") - continue - } - if from.Name[0] == '_' { - r.errorf(from.NamePos, "load: names with leading underscores are not exported: %s", from.Name) - } - - id := stmt.To[i] - if LoadBindsGlobally { - r.bind(id) - } else if r.bindLocal(id) && !AllowGlobalReassign { - // "Global" in AllowGlobalReassign is a misnomer for "toplevel". - // Sadly we can't report the previous declaration - // as id.Binding may not be set yet. - r.errorf(id.NamePos, "cannot reassign top-level %s", id.Name) - } - } - - default: - log.Panicf("unexpected stmt %T", stmt) - } -} - -func (r *resolver) assign(lhs syntax.Expr, isAugmented bool) { - switch lhs := lhs.(type) { - case *syntax.Ident: - // x = ... - r.bind(lhs) - - case *syntax.IndexExpr: - // x[i] = ... - r.expr(lhs.X) - r.expr(lhs.Y) - - case *syntax.DotExpr: - // x.f = ... - r.expr(lhs.X) - - case *syntax.TupleExpr: - // (x, y) = ... - if len(lhs.List) == 0 { - r.errorf(syntax.Start(lhs), "can't assign to ()") - } - if isAugmented { - r.errorf(syntax.Start(lhs), "can't use tuple expression in augmented assignment") - } - for _, elem := range lhs.List { - r.assign(elem, isAugmented) - } - - case *syntax.ListExpr: - // [x, y, z] = ... - if len(lhs.List) == 0 { - r.errorf(syntax.Start(lhs), "can't assign to []") - } - if isAugmented { - r.errorf(syntax.Start(lhs), "can't use list expression in augmented assignment") - } - for _, elem := range lhs.List { - r.assign(elem, isAugmented) - } - - case *syntax.ParenExpr: - r.assign(lhs.X, isAugmented) - - default: - name := strings.ToLower(strings.TrimPrefix(fmt.Sprintf("%T", lhs), "*syntax.")) - r.errorf(syntax.Start(lhs), "can't assign to %s", name) - } -} - -func (r *resolver) expr(e syntax.Expr) { - switch e := e.(type) { - case *syntax.Ident: - r.use(e) - - case *syntax.Literal: - if !AllowFloat && e.Token == syntax.FLOAT { - r.errorf(e.TokenPos, doesnt+"support floating point") - } - - case *syntax.ListExpr: - for _, x := range e.List { - r.expr(x) - } - - case *syntax.CondExpr: - r.expr(e.Cond) - r.expr(e.True) - r.expr(e.False) - - case *syntax.IndexExpr: - r.expr(e.X) - r.expr(e.Y) - - case *syntax.DictEntry: - r.expr(e.Key) - r.expr(e.Value) - - case *syntax.SliceExpr: - r.expr(e.X) - if e.Lo != nil { - r.expr(e.Lo) - } - if e.Hi != nil { - r.expr(e.Hi) - } - if e.Step != nil { - r.expr(e.Step) - } - - case *syntax.Comprehension: - // The 'in' operand of the first clause (always a ForClause) - // is resolved in the outer block; consider: [x for x in x]. - clause := e.Clauses[0].(*syntax.ForClause) - r.expr(clause.X) - - // A list/dict comprehension defines a new lexical block. - // Locals defined within the block will be allotted - // distinct slots in the locals array of the innermost - // enclosing container (function/module) block. - r.push(&block{comp: e}) - - const isAugmented = false - r.assign(clause.Vars, isAugmented) - - for _, clause := range e.Clauses[1:] { - switch clause := clause.(type) { - case *syntax.IfClause: - r.expr(clause.Cond) - case *syntax.ForClause: - r.assign(clause.Vars, isAugmented) - r.expr(clause.X) - } - } - r.expr(e.Body) // body may be *DictEntry - r.pop() - - case *syntax.TupleExpr: - for _, x := range e.List { - r.expr(x) - } - - case *syntax.DictExpr: - for _, entry := range e.List { - entry := entry.(*syntax.DictEntry) - r.expr(entry.Key) - r.expr(entry.Value) - } - - case *syntax.UnaryExpr: - r.expr(e.X) - - case *syntax.BinaryExpr: - if !AllowFloat && e.Op == syntax.SLASH { - r.errorf(e.OpPos, doesnt+"support floating point (use //)") - } - r.expr(e.X) - r.expr(e.Y) - - case *syntax.DotExpr: - r.expr(e.X) - // ignore e.Name - - case *syntax.CallExpr: - r.expr(e.Fn) - var seenVarargs, seenKwargs bool - var seenName map[string]bool - var n, p int - for _, arg := range e.Args { - pos, _ := arg.Span() - if unop, ok := arg.(*syntax.UnaryExpr); ok && unop.Op == syntax.STARSTAR { - // **kwargs - if seenKwargs { - r.errorf(pos, "multiple **kwargs not allowed") - } - seenKwargs = true - r.expr(arg) - } else if ok && unop.Op == syntax.STAR { - // *args - if seenKwargs { - r.errorf(pos, "*args may not follow **kwargs") - } else if seenVarargs { - r.errorf(pos, "multiple *args not allowed") - } - seenVarargs = true - r.expr(arg) - } else if binop, ok := arg.(*syntax.BinaryExpr); ok && binop.Op == syntax.EQ { - // k=v - n++ - if seenKwargs { - r.errorf(pos, "argument may not follow **kwargs") - } - x := binop.X.(*syntax.Ident) - if seenName[x.Name] { - r.errorf(x.NamePos, "keyword argument %s repeated", x.Name) - } else { - if seenName == nil { - seenName = make(map[string]bool) - } - seenName[x.Name] = true - } - r.expr(binop.Y) - } else { - // positional argument - p++ - if seenVarargs { - r.errorf(pos, "argument may not follow *args") - } else if seenKwargs { - r.errorf(pos, "argument may not follow **kwargs") - } else if len(seenName) > 0 { - r.errorf(pos, "positional argument may not follow named") - } - r.expr(arg) - } - } - - // Fail gracefully if compiler-imposed limit is exceeded. - if p >= 256 { - pos, _ := e.Span() - r.errorf(pos, "%v positional arguments in call, limit is 255", p) - } - if n >= 256 { - pos, _ := e.Span() - r.errorf(pos, "%v keyword arguments in call, limit is 255", n) - } - - case *syntax.LambdaExpr: - if !AllowLambda { - r.errorf(e.Lambda, doesnt+"support lambda") - } - fn := &Function{ - Name: "lambda", - Pos: e.Lambda, - Params: e.Params, - Body: []syntax.Stmt{&syntax.ReturnStmt{Result: e.Body}}, - } - e.Function = fn - r.function(fn, e.Lambda) - - case *syntax.ParenExpr: - r.expr(e.X) - - default: - log.Panicf("unexpected expr %T", e) - } -} - -func (r *resolver) function(function *Function, pos syntax.Position) { - // Resolve defaults in enclosing environment. - for _, param := range function.Params { - if binary, ok := param.(*syntax.BinaryExpr); ok { - r.expr(binary.Y) - } - } - - // Enter function block. - b := &block{function: function} - r.push(b) - - var seenOptional bool - var star *syntax.UnaryExpr // * or *args param - var starStar *syntax.Ident // **kwargs ident - var numKwonlyParams int - for _, param := range function.Params { - switch param := param.(type) { - case *syntax.Ident: - // e.g. x - if starStar != nil { - r.errorf(param.NamePos, "required parameter may not follow **%s", starStar.Name) - } else if star != nil { - numKwonlyParams++ - } else if seenOptional { - r.errorf(param.NamePos, "required parameter may not follow optional") - } - if r.bind(param) { - r.errorf(param.NamePos, "duplicate parameter: %s", param.Name) - } - - case *syntax.BinaryExpr: - // e.g. y=dflt - if starStar != nil { - r.errorf(param.OpPos, "optional parameter may not follow **%s", starStar.Name) - } else if star != nil { - numKwonlyParams++ - } - if id := param.X.(*syntax.Ident); r.bind(id) { - r.errorf(param.OpPos, "duplicate parameter: %s", id.Name) - } - seenOptional = true - - case *syntax.UnaryExpr: - // * or *args or **kwargs - if param.Op == syntax.STAR { - if starStar != nil { - r.errorf(param.OpPos, "* parameter may not follow **%s", starStar.Name) - } else if star != nil { - r.errorf(param.OpPos, "multiple * parameters not allowed") - } else { - star = param - } - } else { - if starStar != nil { - r.errorf(param.OpPos, "multiple ** parameters not allowed") - } - starStar = param.X.(*syntax.Ident) - } - } - } - - // Bind the *args and **kwargs parameters at the end, - // so that regular parameters a/b/c are contiguous and - // there is no hole for the "*": - // def f(a, b, *args, c=0, **kwargs) - // def f(a, b, *, c=0, **kwargs) - if star != nil { - if id, _ := star.X.(*syntax.Ident); id != nil { - // *args - if r.bind(id) { - r.errorf(id.NamePos, "duplicate parameter: %s", id.Name) - } - function.HasVarargs = true - } else if numKwonlyParams == 0 { - r.errorf(star.OpPos, "bare * must be followed by keyword-only parameters") - } - } - if starStar != nil { - if r.bind(starStar) { - r.errorf(starStar.NamePos, "duplicate parameter: %s", starStar.Name) - } - function.HasKwargs = true - } - - function.NumKwonlyParams = numKwonlyParams - r.stmts(function.Body) - - // Resolve all uses of this function's local vars, - // and keep just the remaining uses of free/global vars. - b.resolveLocalUses() - - // Leave function block. - r.pop() - - // References within the function body to globals are not - // resolved until the end of the module. -} - -func (r *resolver) resolveNonLocalUses(b *block) { - // First resolve inner blocks. - for _, child := range b.children { - r.resolveNonLocalUses(child) - } - for _, use := range b.uses { - use.id.Binding = r.lookupLexical(use, use.env) - } -} - -// lookupLocal looks up an identifier within its immediately enclosing function. -func lookupLocal(use use) *Binding { - for env := use.env; env != nil; env = env.parent { - if bind, ok := env.bindings[use.id.Name]; ok { - if bind.Scope == Free { - // shouldn't exist till later - log.Panicf("%s: internal error: %s, %v", use.id.NamePos, use.id.Name, bind) - } - return bind // found - } - if env.function != nil { - break - } - } - return nil // not found in this function -} - -// lookupLexical looks up an identifier use.id within its lexically enclosing environment. -// The use.env field captures the original environment for error reporting. -func (r *resolver) lookupLexical(use use, env *block) (bind *Binding) { - if debug { - fmt.Printf("lookupLexical %s in %s = ...\n", use.id.Name, env) - defer func() { fmt.Printf("= %v\n", bind) }() - } - - // Is this the file block? - if env == r.file { - return r.useToplevel(use) // file-local, global, predeclared, or not found - } - - // Defined in this block? - bind, ok := env.bindings[use.id.Name] - if !ok { - // Defined in parent block? - bind = r.lookupLexical(use, env.parent) - if env.function != nil && (bind.Scope == Local || bind.Scope == Free || bind.Scope == Cell) { - // Found in parent block, which belongs to enclosing function. - // Add the parent's binding to the function's freevars, - // and add a new 'free' binding to the inner function's block, - // and turn the parent's local into cell. - if bind.Scope == Local { - bind.Scope = Cell - } - index := len(env.function.FreeVars) - env.function.FreeVars = append(env.function.FreeVars, bind) - bind = &Binding{ - First: bind.First, - Scope: Free, - Index: index, - } - if debug { - fmt.Printf("creating freevar %v in function at %s: %s\n", - len(env.function.FreeVars), env.function.Pos, use.id.Name) - } - } - - // Memoize, to avoid duplicate free vars - // and redundant global (failing) lookups. - env.bind(use.id.Name, bind) - } - return bind -} diff --git a/src/vendor/go.starlark.net/starlark/debug.go b/src/vendor/go.starlark.net/starlark/debug.go deleted file mode 100644 index 22a21240f..000000000 --- a/src/vendor/go.starlark.net/starlark/debug.go +++ /dev/null @@ -1,42 +0,0 @@ -package starlark - -import "go.starlark.net/syntax" - -// This file defines an experimental API for the debugging tools. -// Some of these declarations expose details of internal packages. -// (The debugger makes liberal use of exported fields of unexported types.) -// Breaking changes may occur without notice. - -// Local returns the value of the i'th local variable. -// It may be nil if not yet assigned. -// -// Local may be called only for frames whose Callable is a *Function (a -// function defined by Starlark source code), and only while the frame -// is active; it will panic otherwise. -// -// This function is provided only for debugging tools. -// -// THIS API IS EXPERIMENTAL AND MAY CHANGE WITHOUT NOTICE. -func (fr *frame) Local(i int) Value { return fr.locals[i] } - -// DebugFrame is the debugger API for a frame of the interpreter's call stack. -// -// Most applications have no need for this API; use CallFrame instead. -// -// Clients must not retain a DebugFrame nor call any of its methods once -// the current built-in call has returned or execution has resumed -// after a breakpoint as this may have unpredictable effects, including -// but not limited to retention of object that would otherwise be garbage. -type DebugFrame interface { - Callable() Callable // returns the frame's function - Local(i int) Value // returns the value of the (Starlark) frame's ith local variable - Position() syntax.Position // returns the current position of execution in this frame -} - -// DebugFrame returns the debugger interface for -// the specified frame of the interpreter's call stack. -// Frame numbering is as for Thread.CallFrame. -// -// This function is intended for use in debugging tools. -// Most applications should have no need for it; use CallFrame instead. -func (thread *Thread) DebugFrame(depth int) DebugFrame { return thread.frameAt(depth) } diff --git a/src/vendor/go.starlark.net/starlark/empty.s b/src/vendor/go.starlark.net/starlark/empty.s deleted file mode 100644 index 3b8216999..000000000 --- a/src/vendor/go.starlark.net/starlark/empty.s +++ /dev/null @@ -1,3 +0,0 @@ -// The presence of this file allows the package to use the -// "go:linkname" hack to call non-exported functions in the -// Go runtime, such as hardware-accelerated string hashing. diff --git a/src/vendor/go.starlark.net/starlark/eval.go b/src/vendor/go.starlark.net/starlark/eval.go deleted file mode 100644 index de492ca3c..000000000 --- a/src/vendor/go.starlark.net/starlark/eval.go +++ /dev/null @@ -1,1497 +0,0 @@ -// Copyright 2017 The Bazel Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package starlark - -import ( - "fmt" - "io" - "io/ioutil" - "log" - "math" - "math/big" - "sort" - "strings" - "time" - "unicode" - "unicode/utf8" - - "go.starlark.net/internal/compile" - "go.starlark.net/internal/spell" - "go.starlark.net/resolve" - "go.starlark.net/syntax" -) - -// A Thread contains the state of a Starlark thread, -// such as its call stack and thread-local storage. -// The Thread is threaded throughout the evaluator. -type Thread struct { - // Name is an optional name that describes the thread, for debugging. - Name string - - // stack is the stack of (internal) call frames. - stack []*frame - - // Print is the client-supplied implementation of the Starlark - // 'print' function. If nil, fmt.Fprintln(os.Stderr, msg) is - // used instead. - Print func(thread *Thread, msg string) - - // Load is the client-supplied implementation of module loading. - // Repeated calls with the same module name must return the same - // module environment or error. - // The error message need not include the module name. - // - // See example_test.go for some example implementations of Load. - Load func(thread *Thread, module string) (StringDict, error) - - // locals holds arbitrary "thread-local" Go values belonging to the client. - // They are accessible to the client but not to any Starlark program. - locals map[string]interface{} - - // proftime holds the accumulated execution time since the last profile event. - proftime time.Duration -} - -// SetLocal sets the thread-local value associated with the specified key. -// It must not be called after execution begins. -func (thread *Thread) SetLocal(key string, value interface{}) { - if thread.locals == nil { - thread.locals = make(map[string]interface{}) - } - thread.locals[key] = value -} - -// Local returns the thread-local value associated with the specified key. -func (thread *Thread) Local(key string) interface{} { - return thread.locals[key] -} - -// CallFrame returns a copy of the specified frame of the callstack. -// It should only be used in built-ins called from Starlark code. -// Depth 0 means the frame of the built-in itself, 1 is its caller, and so on. -// -// It is equivalent to CallStack().At(depth), but more efficient. -func (thread *Thread) CallFrame(depth int) CallFrame { - return thread.frameAt(depth).asCallFrame() -} - -func (thread *Thread) frameAt(depth int) *frame { - return thread.stack[len(thread.stack)-1-depth] -} - -// CallStack returns a new slice containing the thread's stack of call frames. -func (thread *Thread) CallStack() CallStack { - frames := make([]CallFrame, len(thread.stack)) - for i, fr := range thread.stack { - frames[i] = fr.asCallFrame() - } - return frames -} - -// CallStackDepth returns the number of frames in the current call stack. -func (thread *Thread) CallStackDepth() int { return len(thread.stack) } - -// A StringDict is a mapping from names to values, and represents -// an environment such as the global variables of a module. -// It is not a true starlark.Value. -type StringDict map[string]Value - -// Keys returns a new sorted slice of d's keys. -func (d StringDict) Keys() []string { - names := make([]string, 0, len(d)) - for name := range d { - names = append(names, name) - } - sort.Strings(names) - return names -} - -func (d StringDict) String() string { - buf := new(strings.Builder) - buf.WriteByte('{') - sep := "" - for _, name := range d.Keys() { - buf.WriteString(sep) - buf.WriteString(name) - buf.WriteString(": ") - writeValue(buf, d[name], nil) - sep = ", " - } - buf.WriteByte('}') - return buf.String() -} - -func (d StringDict) Freeze() { - for _, v := range d { - v.Freeze() - } -} - -// Has reports whether the dictionary contains the specified key. -func (d StringDict) Has(key string) bool { _, ok := d[key]; return ok } - -// A frame records a call to a Starlark function (including module toplevel) -// or a built-in function or method. -type frame struct { - callable Callable // current function (or toplevel) or built-in - pc uint32 // program counter (Starlark frames only) - locals []Value // local variables (Starlark frames only) - spanStart int64 // start time of current profiler span -} - -// Position returns the source position of the current point of execution in this frame. -func (fr *frame) Position() syntax.Position { - switch c := fr.callable.(type) { - case *Function: - // Starlark function - return c.funcode.Position(fr.pc) - case callableWithPosition: - // If a built-in Callable defines - // a Position method, use it. - return c.Position() - } - return syntax.MakePosition(&builtinFilename, 0, 0) -} - -var builtinFilename = "" - -// Function returns the frame's function or built-in. -func (fr *frame) Callable() Callable { return fr.callable } - -// A CallStack is a stack of call frames, outermost first. -type CallStack []CallFrame - -// At returns a copy of the frame at depth i. -// At(0) returns the topmost frame. -func (stack CallStack) At(i int) CallFrame { return stack[len(stack)-1-i] } - -// Pop removes and returns the topmost frame. -func (stack *CallStack) Pop() CallFrame { - last := len(*stack) - 1 - top := (*stack)[last] - *stack = (*stack)[:last] - return top -} - -// String returns a user-friendly description of the stack. -func (stack CallStack) String() string { - out := new(strings.Builder) - fmt.Fprintf(out, "Traceback (most recent call last):\n") - for _, fr := range stack { - fmt.Fprintf(out, " %s: in %s\n", fr.Pos, fr.Name) - } - return out.String() -} - -// An EvalError is a Starlark evaluation error and -// a copy of the thread's stack at the moment of the error. -type EvalError struct { - Msg string - CallStack CallStack - cause error -} - -// A CallFrame represents the function name and current -// position of execution of an enclosing call frame. -type CallFrame struct { - Name string - Pos syntax.Position -} - -func (fr *frame) asCallFrame() CallFrame { - return CallFrame{ - Name: fr.Callable().Name(), - Pos: fr.Position(), - } -} - -func (thread *Thread) evalError(err error) *EvalError { - return &EvalError{ - Msg: err.Error(), - CallStack: thread.CallStack(), - cause: err, - } -} - -func (e *EvalError) Error() string { return e.Msg } - -// Backtrace returns a user-friendly error message describing the stack -// of calls that led to this error. -func (e *EvalError) Backtrace() string { - return fmt.Sprintf("%sError: %s", e.CallStack, e.Msg) -} - -func (e *EvalError) Unwrap() error { return e.cause } - -// A Program is a compiled Starlark program. -// -// Programs are immutable, and contain no Values. -// A Program may be created by parsing a source file (see SourceProgram) -// or by loading a previously saved compiled program (see CompiledProgram). -type Program struct { - compiled *compile.Program -} - -// CompilerVersion is the version number of the protocol for compiled -// files. Applications must not run programs compiled by one version -// with an interpreter at another version, and should thus incorporate -// the compiler version into the cache key when reusing compiled code. -const CompilerVersion = compile.Version - -// Filename returns the name of the file from which this program was loaded. -func (prog *Program) Filename() string { return prog.compiled.Toplevel.Pos.Filename() } - -func (prog *Program) String() string { return prog.Filename() } - -// NumLoads returns the number of load statements in the compiled program. -func (prog *Program) NumLoads() int { return len(prog.compiled.Loads) } - -// Load(i) returns the name and position of the i'th module directly -// loaded by this one, where 0 <= i < NumLoads(). -// The name is unresolved---exactly as it appears in the source. -func (prog *Program) Load(i int) (string, syntax.Position) { - id := prog.compiled.Loads[i] - return id.Name, id.Pos -} - -// WriteTo writes the compiled module to the specified output stream. -func (prog *Program) Write(out io.Writer) error { - data := prog.compiled.Encode() - _, err := out.Write(data) - return err -} - -// ExecFile parses, resolves, and executes a Starlark file in the -// specified global environment, which may be modified during execution. -// -// Thread is the state associated with the Starlark thread. -// -// The filename and src parameters are as for syntax.Parse: -// filename is the name of the file to execute, -// and the name that appears in error messages; -// src is an optional source of bytes to use -// instead of filename. -// -// predeclared defines the predeclared names specific to this module. -// Execution does not modify this dictionary, though it may mutate -// its values. -// -// If ExecFile fails during evaluation, it returns an *EvalError -// containing a backtrace. -func ExecFile(thread *Thread, filename string, src interface{}, predeclared StringDict) (StringDict, error) { - // Parse, resolve, and compile a Starlark source file. - _, mod, err := SourceProgram(filename, src, predeclared.Has) - if err != nil { - return nil, err - } - - g, err := mod.Init(thread, predeclared) - g.Freeze() - return g, err -} - -// SourceProgram produces a new program by parsing, resolving, -// and compiling a Starlark source file. -// On success, it returns the parsed file and the compiled program. -// The filename and src parameters are as for syntax.Parse. -// -// The isPredeclared predicate reports whether a name is -// a pre-declared identifier of the current module. -// Its typical value is predeclared.Has, -// where predeclared is a StringDict of pre-declared values. -func SourceProgram(filename string, src interface{}, isPredeclared func(string) bool) (*syntax.File, *Program, error) { - f, err := syntax.Parse(filename, src, 0) - if err != nil { - return nil, nil, err - } - prog, err := FileProgram(f, isPredeclared) - return f, prog, err -} - -// FileProgram produces a new program by resolving, -// and compiling the Starlark source file syntax tree. -// On success, it returns the compiled program. -// -// Resolving a syntax tree mutates it. -// Do not call FileProgram more than once on the same file. -// -// The isPredeclared predicate reports whether a name is -// a pre-declared identifier of the current module. -// Its typical value is predeclared.Has, -// where predeclared is a StringDict of pre-declared values. -func FileProgram(f *syntax.File, isPredeclared func(string) bool) (*Program, error) { - if err := resolve.File(f, isPredeclared, Universe.Has); err != nil { - return nil, err - } - - var pos syntax.Position - if len(f.Stmts) > 0 { - pos = syntax.Start(f.Stmts[0]) - } else { - pos = syntax.MakePosition(&f.Path, 1, 1) - } - - module := f.Module.(*resolve.Module) - compiled := compile.File(f.Stmts, pos, "", module.Locals, module.Globals) - - return &Program{compiled}, nil -} - -// CompiledProgram produces a new program from the representation -// of a compiled program previously saved by Program.Write. -func CompiledProgram(in io.Reader) (*Program, error) { - data, err := ioutil.ReadAll(in) - if err != nil { - return nil, err - } - compiled, err := compile.DecodeProgram(data) - if err != nil { - return nil, err - } - return &Program{compiled}, nil -} - -// Init creates a set of global variables for the program, -// executes the toplevel code of the specified program, -// and returns a new, unfrozen dictionary of the globals. -func (prog *Program) Init(thread *Thread, predeclared StringDict) (StringDict, error) { - toplevel := makeToplevelFunction(prog.compiled, predeclared) - - _, err := Call(thread, toplevel, nil, nil) - - // Convert the global environment to a map. - // We return a (partial) map even in case of error. - return toplevel.Globals(), err -} - -// ExecREPLChunk compiles and executes file f in the specified thread -// and global environment. This is a variant of ExecFile specialized to -// the needs of a REPL, in which a sequence of input chunks, each -// syntactically a File, manipulates the same set of module globals, -// which are not frozen after execution. -// -// This function is intended to support only go.starlark.net/repl. -// Its API stability is not guaranteed. -func ExecREPLChunk(f *syntax.File, thread *Thread, globals StringDict) error { - var predeclared StringDict - - // -- variant of FileProgram -- - - if err := resolve.REPLChunk(f, globals.Has, predeclared.Has, Universe.Has); err != nil { - return err - } - - var pos syntax.Position - if len(f.Stmts) > 0 { - pos = syntax.Start(f.Stmts[0]) - } else { - pos = syntax.MakePosition(&f.Path, 1, 1) - } - - module := f.Module.(*resolve.Module) - compiled := compile.File(f.Stmts, pos, "", module.Locals, module.Globals) - prog := &Program{compiled} - - // -- variant of Program.Init -- - - toplevel := makeToplevelFunction(prog.compiled, predeclared) - - // Initialize module globals from parameter. - for i, id := range prog.compiled.Globals { - if v := globals[id.Name]; v != nil { - toplevel.module.globals[i] = v - } - } - - _, err := Call(thread, toplevel, nil, nil) - - // Reflect changes to globals back to parameter, even after an error. - for i, id := range prog.compiled.Globals { - if v := toplevel.module.globals[i]; v != nil { - globals[id.Name] = v - } - } - - return err -} - -func makeToplevelFunction(prog *compile.Program, predeclared StringDict) *Function { - // Create the Starlark value denoted by each program constant c. - constants := make([]Value, len(prog.Constants)) - for i, c := range prog.Constants { - var v Value - switch c := c.(type) { - case int64: - v = MakeInt64(c) - case *big.Int: - v = MakeBigInt(c) - case string: - v = String(c) - case float64: - v = Float(c) - default: - log.Panicf("unexpected constant %T: %v", c, c) - } - constants[i] = v - } - - return &Function{ - funcode: prog.Toplevel, - module: &module{ - program: prog, - predeclared: predeclared, - globals: make([]Value, len(prog.Globals)), - constants: constants, - }, - } -} - -// Eval parses, resolves, and evaluates an expression within the -// specified (predeclared) environment. -// -// Evaluation cannot mutate the environment dictionary itself, -// though it may modify variables reachable from the dictionary. -// -// The filename and src parameters are as for syntax.Parse. -// -// If Eval fails during evaluation, it returns an *EvalError -// containing a backtrace. -func Eval(thread *Thread, filename string, src interface{}, env StringDict) (Value, error) { - expr, err := syntax.ParseExpr(filename, src, 0) - if err != nil { - return nil, err - } - f, err := makeExprFunc(expr, env) - if err != nil { - return nil, err - } - return Call(thread, f, nil, nil) -} - -// EvalExpr resolves and evaluates an expression within the -// specified (predeclared) environment. -// Evaluating a comma-separated list of expressions yields a tuple value. -// -// Resolving an expression mutates it. -// Do not call EvalExpr more than once for the same expression. -// -// Evaluation cannot mutate the environment dictionary itself, -// though it may modify variables reachable from the dictionary. -// -// If Eval fails during evaluation, it returns an *EvalError -// containing a backtrace. -func EvalExpr(thread *Thread, expr syntax.Expr, env StringDict) (Value, error) { - fn, err := makeExprFunc(expr, env) - if err != nil { - return nil, err - } - return Call(thread, fn, nil, nil) -} - -// ExprFunc returns a no-argument function -// that evaluates the expression whose source is src. -func ExprFunc(filename string, src interface{}, env StringDict) (*Function, error) { - expr, err := syntax.ParseExpr(filename, src, 0) - if err != nil { - return nil, err - } - return makeExprFunc(expr, env) -} - -// makeExprFunc returns a no-argument function whose body is expr. -func makeExprFunc(expr syntax.Expr, env StringDict) (*Function, error) { - locals, err := resolve.Expr(expr, env.Has, Universe.Has) - if err != nil { - return nil, err - } - - return makeToplevelFunction(compile.Expr(expr, "", locals), env), nil -} - -// The following functions are primitive operations of the byte code interpreter. - -// list += iterable -func listExtend(x *List, y Iterable) { - if ylist, ok := y.(*List); ok { - // fast path: list += list - x.elems = append(x.elems, ylist.elems...) - } else { - iter := y.Iterate() - defer iter.Done() - var z Value - for iter.Next(&z) { - x.elems = append(x.elems, z) - } - } -} - -// getAttr implements x.dot. -func getAttr(x Value, name string) (Value, error) { - hasAttr, ok := x.(HasAttrs) - if !ok { - return nil, fmt.Errorf("%s has no .%s field or method", x.Type(), name) - } - - var errmsg string - v, err := hasAttr.Attr(name) - if err == nil { - if v != nil { - return v, nil // success - } - // (nil, nil) => generic error - errmsg = fmt.Sprintf("%s has no .%s field or method", x.Type(), name) - } else if nsa, ok := err.(NoSuchAttrError); ok { - errmsg = string(nsa) - } else { - return nil, err // return error as is - } - - // add spelling hint - if n := spell.Nearest(name, hasAttr.AttrNames()); n != "" { - errmsg = fmt.Sprintf("%s (did you mean .%s?)", errmsg, n) - } - - return nil, fmt.Errorf("%s", errmsg) -} - -// setField implements x.name = y. -func setField(x Value, name string, y Value) error { - if x, ok := x.(HasSetField); ok { - err := x.SetField(name, y) - if _, ok := err.(NoSuchAttrError); ok { - // No such field: check spelling. - if n := spell.Nearest(name, x.AttrNames()); n != "" { - err = fmt.Errorf("%s (did you mean .%s?)", err, n) - } - } - return err - } - - return fmt.Errorf("can't assign to .%s field of %s", name, x.Type()) -} - -// getIndex implements x[y]. -func getIndex(x, y Value) (Value, error) { - switch x := x.(type) { - case Mapping: // dict - z, found, err := x.Get(y) - if err != nil { - return nil, err - } - if !found { - return nil, fmt.Errorf("key %v not in %s", y, x.Type()) - } - return z, nil - - case Indexable: // string, list, tuple - n := x.Len() - i, err := AsInt32(y) - if err != nil { - return nil, fmt.Errorf("%s index: %s", x.Type(), err) - } - origI := i - if i < 0 { - i += n - } - if i < 0 || i >= n { - return nil, outOfRange(origI, n, x) - } - return x.Index(i), nil - } - return nil, fmt.Errorf("unhandled index operation %s[%s]", x.Type(), y.Type()) -} - -func outOfRange(i, n int, x Value) error { - if n == 0 { - return fmt.Errorf("index %d out of range: empty %s", i, x.Type()) - } else { - return fmt.Errorf("%s index %d out of range [%d:%d]", x.Type(), i, -n, n-1) - } -} - -// setIndex implements x[y] = z. -func setIndex(x, y, z Value) error { - switch x := x.(type) { - case HasSetKey: - if err := x.SetKey(y, z); err != nil { - return err - } - - case HasSetIndex: - n := x.Len() - i, err := AsInt32(y) - if err != nil { - return err - } - origI := i - if i < 0 { - i += n - } - if i < 0 || i >= n { - return outOfRange(origI, n, x) - } - return x.SetIndex(i, z) - - default: - return fmt.Errorf("%s value does not support item assignment", x.Type()) - } - return nil -} - -// Unary applies a unary operator (+, -, ~, not) to its operand. -func Unary(op syntax.Token, x Value) (Value, error) { - // The NOT operator is not customizable. - if op == syntax.NOT { - return !x.Truth(), nil - } - - // Int, Float, and user-defined types - if x, ok := x.(HasUnary); ok { - // (nil, nil) => unhandled - y, err := x.Unary(op) - if y != nil || err != nil { - return y, err - } - } - - return nil, fmt.Errorf("unknown unary op: %s %s", op, x.Type()) -} - -// Binary applies a strict binary operator (not AND or OR) to its operands. -// For equality tests or ordered comparisons, use Compare instead. -func Binary(op syntax.Token, x, y Value) (Value, error) { - switch op { - case syntax.PLUS: - switch x := x.(type) { - case String: - if y, ok := y.(String); ok { - return x + y, nil - } - case Int: - switch y := y.(type) { - case Int: - return x.Add(y), nil - case Float: - return x.Float() + y, nil - } - case Float: - switch y := y.(type) { - case Float: - return x + y, nil - case Int: - return x + y.Float(), nil - } - case *List: - if y, ok := y.(*List); ok { - z := make([]Value, 0, x.Len()+y.Len()) - z = append(z, x.elems...) - z = append(z, y.elems...) - return NewList(z), nil - } - case Tuple: - if y, ok := y.(Tuple); ok { - z := make(Tuple, 0, len(x)+len(y)) - z = append(z, x...) - z = append(z, y...) - return z, nil - } - } - - case syntax.MINUS: - switch x := x.(type) { - case Int: - switch y := y.(type) { - case Int: - return x.Sub(y), nil - case Float: - return x.Float() - y, nil - } - case Float: - switch y := y.(type) { - case Float: - return x - y, nil - case Int: - return x - y.Float(), nil - } - } - - case syntax.STAR: - switch x := x.(type) { - case Int: - switch y := y.(type) { - case Int: - return x.Mul(y), nil - case Float: - return x.Float() * y, nil - case String: - return stringRepeat(y, x) - case *List: - elems, err := tupleRepeat(Tuple(y.elems), x) - if err != nil { - return nil, err - } - return NewList(elems), nil - case Tuple: - return tupleRepeat(y, x) - } - case Float: - switch y := y.(type) { - case Float: - return x * y, nil - case Int: - return x * y.Float(), nil - } - case String: - if y, ok := y.(Int); ok { - return stringRepeat(x, y) - } - case *List: - if y, ok := y.(Int); ok { - elems, err := tupleRepeat(Tuple(x.elems), y) - if err != nil { - return nil, err - } - return NewList(elems), nil - } - case Tuple: - if y, ok := y.(Int); ok { - return tupleRepeat(x, y) - } - - } - - case syntax.SLASH: - switch x := x.(type) { - case Int: - switch y := y.(type) { - case Int: - yf := y.Float() - if yf == 0.0 { - return nil, fmt.Errorf("real division by zero") - } - return x.Float() / yf, nil - case Float: - if y == 0.0 { - return nil, fmt.Errorf("real division by zero") - } - return x.Float() / y, nil - } - case Float: - switch y := y.(type) { - case Float: - if y == 0.0 { - return nil, fmt.Errorf("real division by zero") - } - return x / y, nil - case Int: - yf := y.Float() - if yf == 0.0 { - return nil, fmt.Errorf("real division by zero") - } - return x / yf, nil - } - } - - case syntax.SLASHSLASH: - switch x := x.(type) { - case Int: - switch y := y.(type) { - case Int: - if y.Sign() == 0 { - return nil, fmt.Errorf("floored division by zero") - } - return x.Div(y), nil - case Float: - if y == 0.0 { - return nil, fmt.Errorf("floored division by zero") - } - return floor((x.Float() / y)), nil - } - case Float: - switch y := y.(type) { - case Float: - if y == 0.0 { - return nil, fmt.Errorf("floored division by zero") - } - return floor(x / y), nil - case Int: - yf := y.Float() - if yf == 0.0 { - return nil, fmt.Errorf("floored division by zero") - } - return floor(x / yf), nil - } - } - - case syntax.PERCENT: - switch x := x.(type) { - case Int: - switch y := y.(type) { - case Int: - if y.Sign() == 0 { - return nil, fmt.Errorf("integer modulo by zero") - } - return x.Mod(y), nil - case Float: - if y == 0 { - return nil, fmt.Errorf("float modulo by zero") - } - return x.Float().Mod(y), nil - } - case Float: - switch y := y.(type) { - case Float: - if y == 0.0 { - return nil, fmt.Errorf("float modulo by zero") - } - return Float(math.Mod(float64(x), float64(y))), nil - case Int: - if y.Sign() == 0 { - return nil, fmt.Errorf("float modulo by zero") - } - return x.Mod(y.Float()), nil - } - case String: - return interpolate(string(x), y) - } - - case syntax.NOT_IN: - z, err := Binary(syntax.IN, x, y) - if err != nil { - return nil, err - } - return !z.Truth(), nil - - case syntax.IN: - switch y := y.(type) { - case *List: - for _, elem := range y.elems { - if eq, err := Equal(elem, x); err != nil { - return nil, err - } else if eq { - return True, nil - } - } - return False, nil - case Tuple: - for _, elem := range y { - if eq, err := Equal(elem, x); err != nil { - return nil, err - } else if eq { - return True, nil - } - } - return False, nil - case Mapping: // e.g. dict - // Ignore error from Get as we cannot distinguish true - // errors (value cycle, type error) from "key not found". - _, found, _ := y.Get(x) - return Bool(found), nil - case *Set: - ok, err := y.Has(x) - return Bool(ok), err - case String: - needle, ok := x.(String) - if !ok { - return nil, fmt.Errorf("'in ' requires string as left operand, not %s", x.Type()) - } - return Bool(strings.Contains(string(y), string(needle))), nil - case rangeValue: - i, err := NumberToInt(x) - if err != nil { - return nil, fmt.Errorf("'in ' requires integer as left operand, not %s", x.Type()) - } - return Bool(y.contains(i)), nil - } - - case syntax.PIPE: - switch x := x.(type) { - case Int: - if y, ok := y.(Int); ok { - return x.Or(y), nil - } - case *Set: // union - if y, ok := y.(*Set); ok { - iter := Iterate(y) - defer iter.Done() - return x.Union(iter) - } - } - - case syntax.AMP: - switch x := x.(type) { - case Int: - if y, ok := y.(Int); ok { - return x.And(y), nil - } - case *Set: // intersection - if y, ok := y.(*Set); ok { - set := new(Set) - if x.Len() > y.Len() { - x, y = y, x // opt: range over smaller set - } - for _, xelem := range x.elems() { - // Has, Insert cannot fail here. - if found, _ := y.Has(xelem); found { - set.Insert(xelem) - } - } - return set, nil - } - } - - case syntax.CIRCUMFLEX: - switch x := x.(type) { - case Int: - if y, ok := y.(Int); ok { - return x.Xor(y), nil - } - case *Set: // symmetric difference - if y, ok := y.(*Set); ok { - set := new(Set) - for _, xelem := range x.elems() { - if found, _ := y.Has(xelem); !found { - set.Insert(xelem) - } - } - for _, yelem := range y.elems() { - if found, _ := x.Has(yelem); !found { - set.Insert(yelem) - } - } - return set, nil - } - } - - case syntax.LTLT, syntax.GTGT: - if x, ok := x.(Int); ok { - y, err := AsInt32(y) - if err != nil { - return nil, err - } - if y < 0 { - return nil, fmt.Errorf("negative shift count: %v", y) - } - if op == syntax.LTLT { - if y >= 512 { - return nil, fmt.Errorf("shift count too large: %v", y) - } - return x.Lsh(uint(y)), nil - } else { - return x.Rsh(uint(y)), nil - } - } - - default: - // unknown operator - goto unknown - } - - // user-defined types - // (nil, nil) => unhandled - if x, ok := x.(HasBinary); ok { - z, err := x.Binary(op, y, Left) - if z != nil || err != nil { - return z, err - } - } - if y, ok := y.(HasBinary); ok { - z, err := y.Binary(op, x, Right) - if z != nil || err != nil { - return z, err - } - } - - // unsupported operand types -unknown: - return nil, fmt.Errorf("unknown binary op: %s %s %s", x.Type(), op, y.Type()) -} - -// It's always possible to overeat in small bites but we'll -// try to stop someone swallowing the world in one gulp. -const maxAlloc = 1 << 30 - -func tupleRepeat(elems Tuple, n Int) (Tuple, error) { - if len(elems) == 0 { - return nil, nil - } - i, err := AsInt32(n) - if err != nil { - return nil, fmt.Errorf("repeat count %s too large", n) - } - if i < 1 { - return nil, nil - } - // Inv: i > 0, len > 0 - sz := len(elems) * i - if sz < 0 || sz >= maxAlloc { // sz < 0 => overflow - return nil, fmt.Errorf("excessive repeat (%d elements)", sz) - } - res := make([]Value, sz) - // copy elems into res, doubling each time - x := copy(res, elems) - for x < len(res) { - copy(res[x:], res[:x]) - x *= 2 - } - return res, nil -} - -func stringRepeat(s String, n Int) (String, error) { - if s == "" { - return "", nil - } - i, err := AsInt32(n) - if err != nil { - return "", fmt.Errorf("repeat count %s too large", n) - } - if i < 1 { - return "", nil - } - // Inv: i > 0, len > 0 - sz := len(s) * i - if sz < 0 || sz >= maxAlloc { // sz < 0 => overflow - return "", fmt.Errorf("excessive repeat (%d elements)", sz) - } - return String(strings.Repeat(string(s), i)), nil -} - -// Call calls the function fn with the specified positional and keyword arguments. -func Call(thread *Thread, fn Value, args Tuple, kwargs []Tuple) (Value, error) { - c, ok := fn.(Callable) - if !ok { - return nil, fmt.Errorf("invalid call of non-function (%s)", fn.Type()) - } - - // Allocate and push a new frame. - var fr *frame - // Optimization: use slack portion of thread.stack - // slice as a freelist of empty frames. - if n := len(thread.stack); n < cap(thread.stack) { - fr = thread.stack[n : n+1][0] - } - if fr == nil { - fr = new(frame) - } - thread.stack = append(thread.stack, fr) // push - - fr.callable = c - - thread.beginProfSpan() - result, err := c.CallInternal(thread, args, kwargs) - thread.endProfSpan() - - // Sanity check: nil is not a valid Starlark value. - if result == nil && err == nil { - err = fmt.Errorf("internal error: nil (not None) returned from %s", fn) - } - - // Always return an EvalError with an accurate frame. - if err != nil { - if _, ok := err.(*EvalError); !ok { - err = thread.evalError(err) - } - } - - *fr = frame{} // clear out any references - thread.stack = thread.stack[:len(thread.stack)-1] // pop - - return result, err -} - -func slice(x, lo, hi, step_ Value) (Value, error) { - sliceable, ok := x.(Sliceable) - if !ok { - return nil, fmt.Errorf("invalid slice operand %s", x.Type()) - } - - n := sliceable.Len() - step := 1 - if step_ != None { - var err error - step, err = AsInt32(step_) - if err != nil { - return nil, fmt.Errorf("got %s for slice step, want int", step_.Type()) - } - if step == 0 { - return nil, fmt.Errorf("zero is not a valid slice step") - } - } - - // TODO(adonovan): opt: preallocate result array. - - var start, end int - if step > 0 { - // positive stride - // default indices are [0:n]. - var err error - start, end, err = indices(lo, hi, n) - if err != nil { - return nil, err - } - - if end < start { - end = start // => empty result - } - } else { - // negative stride - // default indices are effectively [n-1:-1], though to - // get this effect using explicit indices requires - // [n-1:-1-n:-1] because of the treatment of -ve values. - start = n - 1 - if err := asIndex(lo, n, &start); err != nil { - return nil, fmt.Errorf("invalid start index: %s", err) - } - if start >= n { - start = n - 1 - } - - end = -1 - if err := asIndex(hi, n, &end); err != nil { - return nil, fmt.Errorf("invalid end index: %s", err) - } - if end < -1 { - end = -1 - } - - if start < end { - start = end // => empty result - } - } - - return sliceable.Slice(start, end, step), nil -} - -// From Hacker's Delight, section 2.8. -func signum64(x int64) int { return int(uint64(x>>63) | uint64(-x)>>63) } -func signum(x int) int { return signum64(int64(x)) } - -// indices converts start_ and end_ to indices in the range [0:len]. -// The start index defaults to 0 and the end index defaults to len. -// An index -len < i < 0 is treated like i+len. -// All other indices outside the range are clamped to the nearest value in the range. -// Beware: start may be greater than end. -// This function is suitable only for slices with positive strides. -func indices(start_, end_ Value, len int) (start, end int, err error) { - start = 0 - if err := asIndex(start_, len, &start); err != nil { - return 0, 0, fmt.Errorf("invalid start index: %s", err) - } - // Clamp to [0:len]. - if start < 0 { - start = 0 - } else if start > len { - start = len - } - - end = len - if err := asIndex(end_, len, &end); err != nil { - return 0, 0, fmt.Errorf("invalid end index: %s", err) - } - // Clamp to [0:len]. - if end < 0 { - end = 0 - } else if end > len { - end = len - } - - return start, end, nil -} - -// asIndex sets *result to the integer value of v, adding len to it -// if it is negative. If v is nil or None, *result is unchanged. -func asIndex(v Value, len int, result *int) error { - if v != nil && v != None { - var err error - *result, err = AsInt32(v) - if err != nil { - return fmt.Errorf("got %s, want int", v.Type()) - } - if *result < 0 { - *result += len - } - } - return nil -} - -// setArgs sets the values of the formal parameters of function fn in -// based on the actual parameter values in args and kwargs. -func setArgs(locals []Value, fn *Function, args Tuple, kwargs []Tuple) error { - - // This is the general schema of a function: - // - // def f(p1, p2=dp2, p3=dp3, *args, k1, k2=dk2, k3, **kwargs) - // - // The p parameters are non-kwonly, and may be specified positionally. - // The k parameters are kwonly, and must be specified by name. - // The defaults tuple is (dp2, dp3, mandatory, dk2, mandatory). - // - // Arguments are processed as follows: - // - positional arguments are bound to a prefix of [p1, p2, p3]. - // - surplus positional arguments are bound to *args. - // - keyword arguments are bound to any of {p1, p2, p3, k1, k2, k3}; - // duplicate bindings are rejected. - // - surplus keyword arguments are bound to **kwargs. - // - defaults are bound to each parameter from p2 to k3 if no value was set. - // default values come from the tuple above. - // It is an error if the tuple entry for an unset parameter is 'mandatory'. - - // Nullary function? - if fn.NumParams() == 0 { - if nactual := len(args) + len(kwargs); nactual > 0 { - return fmt.Errorf("function %s accepts no arguments (%d given)", fn.Name(), nactual) - } - return nil - } - - cond := func(x bool, y, z interface{}) interface{} { - if x { - return y - } - return z - } - - // nparams is the number of ordinary parameters (sans *args and **kwargs). - nparams := fn.NumParams() - var kwdict *Dict - if fn.HasKwargs() { - nparams-- - kwdict = new(Dict) - locals[nparams] = kwdict - } - if fn.HasVarargs() { - nparams-- - } - - // nonkwonly is the number of non-kwonly parameters. - nonkwonly := nparams - fn.NumKwonlyParams() - - // Too many positional args? - n := len(args) - if len(args) > nonkwonly { - if !fn.HasVarargs() { - return fmt.Errorf("function %s accepts %s%d positional argument%s (%d given)", - fn.Name(), - cond(len(fn.defaults) > fn.NumKwonlyParams(), "at most ", ""), - nonkwonly, - cond(nonkwonly == 1, "", "s"), - len(args)) - } - n = nonkwonly - } - - // Bind positional arguments to non-kwonly parameters. - for i := 0; i < n; i++ { - locals[i] = args[i] - } - - // Bind surplus positional arguments to *args parameter. - if fn.HasVarargs() { - tuple := make(Tuple, len(args)-n) - for i := n; i < len(args); i++ { - tuple[i-n] = args[i] - } - locals[nparams] = tuple - } - - // Bind keyword arguments to parameters. - paramIdents := fn.funcode.Locals[:nparams] - for _, pair := range kwargs { - k, v := pair[0].(String), pair[1] - if i := findParam(paramIdents, string(k)); i >= 0 { - if locals[i] != nil { - return fmt.Errorf("function %s got multiple values for parameter %s", fn.Name(), k) - } - locals[i] = v - continue - } - if kwdict == nil { - return fmt.Errorf("function %s got an unexpected keyword argument %s", fn.Name(), k) - } - oldlen := kwdict.Len() - kwdict.SetKey(k, v) - if kwdict.Len() == oldlen { - return fmt.Errorf("function %s got multiple values for parameter %s", fn.Name(), k) - } - } - - // Are defaults required? - if n < nparams || fn.NumKwonlyParams() > 0 { - m := nparams - len(fn.defaults) // first default - - // Report errors for missing required arguments. - var missing []string - var i int - for i = n; i < m; i++ { - if locals[i] == nil { - missing = append(missing, paramIdents[i].Name) - } - } - - // Bind default values to parameters. - for ; i < nparams; i++ { - if locals[i] == nil { - dflt := fn.defaults[i-m] - if _, ok := dflt.(mandatory); ok { - missing = append(missing, paramIdents[i].Name) - continue - } - locals[i] = dflt - } - } - - if missing != nil { - return fmt.Errorf("function %s missing %d argument%s (%s)", - fn.Name(), len(missing), cond(len(missing) > 1, "s", ""), strings.Join(missing, ", ")) - } - } - return nil -} - -func findParam(params []compile.Binding, name string) int { - for i, param := range params { - if param.Name == name { - return i - } - } - return -1 -} - -// https://github.com/google/starlark-go/blob/master/doc/spec.md#string-interpolation -func interpolate(format string, x Value) (Value, error) { - buf := new(strings.Builder) - index := 0 - nargs := 1 - if tuple, ok := x.(Tuple); ok { - nargs = len(tuple) - } - for { - i := strings.IndexByte(format, '%') - if i < 0 { - buf.WriteString(format) - break - } - buf.WriteString(format[:i]) - format = format[i+1:] - - if format != "" && format[0] == '%' { - buf.WriteByte('%') - format = format[1:] - continue - } - - var arg Value - if format != "" && format[0] == '(' { - // keyword argument: %(name)s. - format = format[1:] - j := strings.IndexByte(format, ')') - if j < 0 { - return nil, fmt.Errorf("incomplete format key") - } - key := format[:j] - if dict, ok := x.(Mapping); !ok { - return nil, fmt.Errorf("format requires a mapping") - } else if v, found, _ := dict.Get(String(key)); found { - arg = v - } else { - return nil, fmt.Errorf("key not found: %s", key) - } - format = format[j+1:] - } else { - // positional argument: %s. - if index >= nargs { - return nil, fmt.Errorf("not enough arguments for format string") - } - if tuple, ok := x.(Tuple); ok { - arg = tuple[index] - } else { - arg = x - } - } - - // NOTE: Starlark does not support any of these optional Python features: - // - optional conversion flags: [#0- +], etc. - // - optional minimum field width (number or *). - // - optional precision (.123 or *) - // - optional length modifier - - // conversion type - if format == "" { - return nil, fmt.Errorf("incomplete format") - } - switch c := format[0]; c { - case 's', 'r': - if str, ok := AsString(arg); ok && c == 's' { - buf.WriteString(str) - } else { - writeValue(buf, arg, nil) - } - case 'd', 'i', 'o', 'x', 'X': - i, err := NumberToInt(arg) - if err != nil { - return nil, fmt.Errorf("%%%c format requires integer: %v", c, err) - } - switch c { - case 'd', 'i': - fmt.Fprintf(buf, "%d", i) - case 'o': - fmt.Fprintf(buf, "%o", i) - case 'x': - fmt.Fprintf(buf, "%x", i) - case 'X': - fmt.Fprintf(buf, "%X", i) - } - case 'e', 'f', 'g', 'E', 'F', 'G': - f, ok := AsFloat(arg) - if !ok { - return nil, fmt.Errorf("%%%c format requires float, not %s", c, arg.Type()) - } - switch c { - case 'e': - fmt.Fprintf(buf, "%e", f) - case 'f': - fmt.Fprintf(buf, "%f", f) - case 'g': - fmt.Fprintf(buf, "%g", f) - case 'E': - fmt.Fprintf(buf, "%E", f) - case 'F': - fmt.Fprintf(buf, "%F", f) - case 'G': - fmt.Fprintf(buf, "%G", f) - } - case 'c': - switch arg := arg.(type) { - case Int: - // chr(int) - r, err := AsInt32(arg) - if err != nil || r < 0 || r > unicode.MaxRune { - return nil, fmt.Errorf("%%c format requires a valid Unicode code point, got %s", arg) - } - buf.WriteRune(rune(r)) - case String: - r, size := utf8.DecodeRuneInString(string(arg)) - if size != len(arg) || len(arg) == 0 { - return nil, fmt.Errorf("%%c format requires a single-character string") - } - buf.WriteRune(r) - default: - return nil, fmt.Errorf("%%c format requires int or single-character string, not %s", arg.Type()) - } - case '%': - buf.WriteByte('%') - default: - return nil, fmt.Errorf("unknown conversion %%%c", c) - } - format = format[1:] - index++ - } - - if index < nargs { - return nil, fmt.Errorf("too many arguments for format string") - } - - return String(buf.String()), nil -} diff --git a/src/vendor/go.starlark.net/starlark/hashtable.go b/src/vendor/go.starlark.net/starlark/hashtable.go deleted file mode 100644 index d4250194a..000000000 --- a/src/vendor/go.starlark.net/starlark/hashtable.go +++ /dev/null @@ -1,373 +0,0 @@ -// Copyright 2017 The Bazel Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package starlark - -import ( - "fmt" - _ "unsafe" // for go:linkname hack -) - -// hashtable is used to represent Starlark dict and set values. -// It is a hash table whose key/value entries form a doubly-linked list -// in the order the entries were inserted. -type hashtable struct { - table []bucket // len is zero or a power of two - bucket0 [1]bucket // inline allocation for small maps. - len uint32 - itercount uint32 // number of active iterators (ignored if frozen) - head *entry // insertion order doubly-linked list; may be nil - tailLink **entry // address of nil link at end of list (perhaps &head) - frozen bool -} - -const bucketSize = 8 - -type bucket struct { - entries [bucketSize]entry - next *bucket // linked list of buckets -} - -type entry struct { - hash uint32 // nonzero => in use - key, value Value - next *entry // insertion order doubly-linked list; may be nil - prevLink **entry // address of link to this entry (perhaps &head) -} - -func (ht *hashtable) init(size int) { - if size < 0 { - panic("size < 0") - } - nb := 1 - for overloaded(size, nb) { - nb = nb << 1 - } - if nb < 2 { - ht.table = ht.bucket0[:1] - } else { - ht.table = make([]bucket, nb) - } - ht.tailLink = &ht.head -} - -func (ht *hashtable) freeze() { - if !ht.frozen { - ht.frozen = true - for i := range ht.table { - for p := &ht.table[i]; p != nil; p = p.next { - for i := range p.entries { - e := &p.entries[i] - if e.hash != 0 { - e.key.Freeze() - e.value.Freeze() - } - } - } - } - } -} - -func (ht *hashtable) insert(k, v Value) error { - if ht.frozen { - return fmt.Errorf("cannot insert into frozen hash table") - } - if ht.itercount > 0 { - return fmt.Errorf("cannot insert into hash table during iteration") - } - if ht.table == nil { - ht.init(1) - } - h, err := k.Hash() - if err != nil { - return err - } - if h == 0 { - h = 1 // zero is reserved - } - -retry: - var insert *entry - - // Inspect each bucket in the bucket list. - p := &ht.table[h&(uint32(len(ht.table)-1))] - for { - for i := range p.entries { - e := &p.entries[i] - if e.hash != h { - if e.hash == 0 { - // Found empty entry; make a note. - insert = e - } - continue - } - if eq, err := Equal(k, e.key); err != nil { - return err // e.g. excessively recursive tuple - } else if !eq { - continue - } - // Key already present; update value. - e.value = v - return nil - } - if p.next == nil { - break - } - p = p.next - } - - // Key not found. p points to the last bucket. - - // Does the number of elements exceed the buckets' load factor? - if overloaded(int(ht.len), len(ht.table)) { - ht.grow() - goto retry - } - - if insert == nil { - // No space in existing buckets. Add a new one to the bucket list. - b := new(bucket) - p.next = b - insert = &b.entries[0] - } - - // Insert key/value pair. - insert.hash = h - insert.key = k - insert.value = v - - // Append entry to doubly-linked list. - insert.prevLink = ht.tailLink - *ht.tailLink = insert - ht.tailLink = &insert.next - - ht.len++ - - return nil -} - -func overloaded(elems, buckets int) bool { - const loadFactor = 6.5 // just a guess - return elems >= bucketSize && float64(elems) >= loadFactor*float64(buckets) -} - -func (ht *hashtable) grow() { - // Double the number of buckets and rehash. - // TODO(adonovan): opt: - // - avoid reentrant calls to ht.insert, and specialize it. - // e.g. we know the calls to Equals will return false since - // there are no duplicates among the old keys. - // - saving the entire hash in the bucket would avoid the need to - // recompute the hash. - // - save the old buckets on a free list. - ht.table = make([]bucket, len(ht.table)<<1) - oldhead := ht.head - ht.head = nil - ht.tailLink = &ht.head - ht.len = 0 - for e := oldhead; e != nil; e = e.next { - ht.insert(e.key, e.value) - } - ht.bucket0[0] = bucket{} // clear out unused initial bucket -} - -func (ht *hashtable) lookup(k Value) (v Value, found bool, err error) { - h, err := k.Hash() - if err != nil { - return nil, false, err // unhashable - } - if h == 0 { - h = 1 // zero is reserved - } - if ht.table == nil { - return None, false, nil // empty - } - - // Inspect each bucket in the bucket list. - for p := &ht.table[h&(uint32(len(ht.table)-1))]; p != nil; p = p.next { - for i := range p.entries { - e := &p.entries[i] - if e.hash == h { - if eq, err := Equal(k, e.key); err != nil { - return nil, false, err // e.g. excessively recursive tuple - } else if eq { - return e.value, true, nil // found - } - } - } - } - return None, false, nil // not found -} - -// Items returns all the items in the map (as key/value pairs) in insertion order. -func (ht *hashtable) items() []Tuple { - items := make([]Tuple, 0, ht.len) - array := make([]Value, ht.len*2) // allocate a single backing array - for e := ht.head; e != nil; e = e.next { - pair := Tuple(array[:2:2]) - array = array[2:] - pair[0] = e.key - pair[1] = e.value - items = append(items, pair) - } - return items -} - -func (ht *hashtable) first() (Value, bool) { - if ht.head != nil { - return ht.head.key, true - } - return None, false -} - -func (ht *hashtable) keys() []Value { - keys := make([]Value, 0, ht.len) - for e := ht.head; e != nil; e = e.next { - keys = append(keys, e.key) - } - return keys -} - -func (ht *hashtable) delete(k Value) (v Value, found bool, err error) { - if ht.frozen { - return nil, false, fmt.Errorf("cannot delete from frozen hash table") - } - if ht.itercount > 0 { - return nil, false, fmt.Errorf("cannot delete from hash table during iteration") - } - if ht.table == nil { - return None, false, nil // empty - } - h, err := k.Hash() - if err != nil { - return nil, false, err // unhashable - } - if h == 0 { - h = 1 // zero is reserved - } - - // Inspect each bucket in the bucket list. - for p := &ht.table[h&(uint32(len(ht.table)-1))]; p != nil; p = p.next { - for i := range p.entries { - e := &p.entries[i] - if e.hash == h { - if eq, err := Equal(k, e.key); err != nil { - return nil, false, err - } else if eq { - // Remove e from doubly-linked list. - *e.prevLink = e.next - if e.next == nil { - ht.tailLink = e.prevLink // deletion of last entry - } else { - e.next.prevLink = e.prevLink - } - - v := e.value - *e = entry{} - ht.len-- - return v, true, nil // found - } - } - } - } - - // TODO(adonovan): opt: remove completely empty bucket from bucket list. - - return None, false, nil // not found -} - -func (ht *hashtable) clear() error { - if ht.frozen { - return fmt.Errorf("cannot clear frozen hash table") - } - if ht.itercount > 0 { - return fmt.Errorf("cannot clear hash table during iteration") - } - if ht.table != nil { - for i := range ht.table { - ht.table[i] = bucket{} - } - } - ht.head = nil - ht.tailLink = &ht.head - ht.len = 0 - return nil -} - -// dump is provided as an aid to debugging. -func (ht *hashtable) dump() { - fmt.Printf("hashtable %p len=%d head=%p tailLink=%p", - ht, ht.len, ht.head, ht.tailLink) - if ht.tailLink != nil { - fmt.Printf(" *tailLink=%p", *ht.tailLink) - } - fmt.Println() - for j := range ht.table { - fmt.Printf("bucket chain %d\n", j) - for p := &ht.table[j]; p != nil; p = p.next { - fmt.Printf("bucket %p\n", p) - for i := range p.entries { - e := &p.entries[i] - fmt.Printf("\tentry %d @ %p hash=%d key=%v value=%v\n", - i, e, e.hash, e.key, e.value) - fmt.Printf("\t\tnext=%p &next=%p prev=%p", - e.next, &e.next, e.prevLink) - if e.prevLink != nil { - fmt.Printf(" *prev=%p", *e.prevLink) - } - fmt.Println() - } - } - } -} - -func (ht *hashtable) iterate() *keyIterator { - if !ht.frozen { - ht.itercount++ - } - return &keyIterator{ht: ht, e: ht.head} -} - -type keyIterator struct { - ht *hashtable - e *entry -} - -func (it *keyIterator) Next(k *Value) bool { - if it.e != nil { - *k = it.e.key - it.e = it.e.next - return true - } - return false -} - -func (it *keyIterator) Done() { - if !it.ht.frozen { - it.ht.itercount-- - } -} - -// hashString computes the hash of s. -func hashString(s string) uint32 { - if len(s) >= 12 { - // Call the Go runtime's optimized hash implementation, - // which uses the AESENC instruction on amd64 machines. - return uint32(goStringHash(s, 0)) - } - return softHashString(s) -} - -//go:linkname goStringHash runtime.stringHash -func goStringHash(s string, seed uintptr) uintptr - -// softHashString computes the FNV hash of s in software. -func softHashString(s string) uint32 { - var h uint32 - for i := 0; i < len(s); i++ { - h ^= uint32(s[i]) - h *= 16777619 - } - return h -} diff --git a/src/vendor/go.starlark.net/starlark/int.go b/src/vendor/go.starlark.net/starlark/int.go deleted file mode 100644 index 35bd42b3f..000000000 --- a/src/vendor/go.starlark.net/starlark/int.go +++ /dev/null @@ -1,350 +0,0 @@ -// Copyright 2017 The Bazel Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package starlark - -import ( - "fmt" - "math" - "math/big" - "strconv" - - "go.starlark.net/syntax" -) - -// Int is the type of a Starlark int. -type Int struct { - // We use only the signed 32 bit range of small to ensure - // that small+small and small*small do not overflow. - - small int64 // minint32 <= small <= maxint32 - big *big.Int // big != nil <=> value is not representable as int32 -} - -// newBig allocates a new big.Int. -func newBig(x int64) *big.Int { - if 0 <= x && int64(big.Word(x)) == x { - // x is guaranteed to fit into a single big.Word. - // Most starlark ints are small, - // but math/big assumes that since you've chosen to use math/big, - // your big.Ints will probably grow, so it over-allocates. - // Avoid that over-allocation by manually constructing a single-word slice. - // See https://golang.org/cl/150999, which will hopefully land in Go 1.13. - return new(big.Int).SetBits([]big.Word{big.Word(x)}) - } - return big.NewInt(x) -} - -// MakeInt returns a Starlark int for the specified signed integer. -func MakeInt(x int) Int { return MakeInt64(int64(x)) } - -// MakeInt64 returns a Starlark int for the specified int64. -func MakeInt64(x int64) Int { - if math.MinInt32 <= x && x <= math.MaxInt32 { - return Int{small: x} - } - return Int{big: newBig(x)} -} - -// MakeUint returns a Starlark int for the specified unsigned integer. -func MakeUint(x uint) Int { return MakeUint64(uint64(x)) } - -// MakeUint64 returns a Starlark int for the specified uint64. -func MakeUint64(x uint64) Int { - if x <= math.MaxInt32 { - return Int{small: int64(x)} - } - if uint64(big.Word(x)) == x { - // See comment in newBig for an explanation of this optimization. - return Int{big: new(big.Int).SetBits([]big.Word{big.Word(x)})} - } - return Int{big: new(big.Int).SetUint64(x)} -} - -// MakeBigInt returns a Starlark int for the specified big.Int. -// The caller must not subsequently modify x. -func MakeBigInt(x *big.Int) Int { - if n := x.BitLen(); n < 32 || n == 32 && x.Int64() == math.MinInt32 { - return Int{small: x.Int64()} - } - return Int{big: x} -} - -var ( - zero, one = Int{small: 0}, Int{small: 1} - oneBig = newBig(1) - - _ HasUnary = Int{} -) - -// Unary implements the operations +int, -int, and ~int. -func (i Int) Unary(op syntax.Token) (Value, error) { - switch op { - case syntax.MINUS: - return zero.Sub(i), nil - case syntax.PLUS: - return i, nil - case syntax.TILDE: - return i.Not(), nil - } - return nil, nil -} - -// Int64 returns the value as an int64. -// If it is not exactly representable the result is undefined and ok is false. -func (i Int) Int64() (_ int64, ok bool) { - if i.big != nil { - x, acc := bigintToInt64(i.big) - if acc != big.Exact { - return // inexact - } - return x, true - } - return i.small, true -} - -// BigInt returns the value as a big.Int. -// The returned variable must not be modified by the client. -func (i Int) BigInt() *big.Int { - if i.big != nil { - return i.big - } - return newBig(i.small) -} - -// Uint64 returns the value as a uint64. -// If it is not exactly representable the result is undefined and ok is false. -func (i Int) Uint64() (_ uint64, ok bool) { - if i.big != nil { - x, acc := bigintToUint64(i.big) - if acc != big.Exact { - return // inexact - } - return x, true - } - if i.small < 0 { - return // inexact - } - return uint64(i.small), true -} - -// The math/big API should provide this function. -func bigintToInt64(i *big.Int) (int64, big.Accuracy) { - sign := i.Sign() - if sign > 0 { - if i.Cmp(maxint64) > 0 { - return math.MaxInt64, big.Below - } - } else if sign < 0 { - if i.Cmp(minint64) < 0 { - return math.MinInt64, big.Above - } - } - return i.Int64(), big.Exact -} - -// The math/big API should provide this function. -func bigintToUint64(i *big.Int) (uint64, big.Accuracy) { - sign := i.Sign() - if sign > 0 { - if i.BitLen() > 64 { - return math.MaxUint64, big.Below - } - } else if sign < 0 { - return 0, big.Above - } - return i.Uint64(), big.Exact -} - -var ( - minint64 = new(big.Int).SetInt64(math.MinInt64) - maxint64 = new(big.Int).SetInt64(math.MaxInt64) -) - -func (i Int) Format(s fmt.State, ch rune) { - if i.big != nil { - i.big.Format(s, ch) - return - } - newBig(i.small).Format(s, ch) -} -func (i Int) String() string { - if i.big != nil { - return i.big.Text(10) - } - return strconv.FormatInt(i.small, 10) -} -func (i Int) Type() string { return "int" } -func (i Int) Freeze() {} // immutable -func (i Int) Truth() Bool { return i.Sign() != 0 } -func (i Int) Hash() (uint32, error) { - var lo big.Word - if i.big != nil { - lo = i.big.Bits()[0] - } else { - lo = big.Word(i.small) - } - return 12582917 * uint32(lo+3), nil -} -func (x Int) CompareSameType(op syntax.Token, v Value, depth int) (bool, error) { - y := v.(Int) - if x.big != nil || y.big != nil { - return threeway(op, x.BigInt().Cmp(y.BigInt())), nil - } - return threeway(op, signum64(x.small-y.small)), nil -} - -// Float returns the float value nearest i. -func (i Int) Float() Float { - if i.big != nil { - f, _ := new(big.Float).SetInt(i.big).Float64() - return Float(f) - } - return Float(i.small) -} - -func (x Int) Sign() int { - if x.big != nil { - return x.big.Sign() - } - return signum64(x.small) -} - -func (x Int) Add(y Int) Int { - if x.big != nil || y.big != nil { - return MakeBigInt(new(big.Int).Add(x.BigInt(), y.BigInt())) - } - return MakeInt64(x.small + y.small) -} -func (x Int) Sub(y Int) Int { - if x.big != nil || y.big != nil { - return MakeBigInt(new(big.Int).Sub(x.BigInt(), y.BigInt())) - } - return MakeInt64(x.small - y.small) -} -func (x Int) Mul(y Int) Int { - if x.big != nil || y.big != nil { - return MakeBigInt(new(big.Int).Mul(x.BigInt(), y.BigInt())) - } - return MakeInt64(x.small * y.small) -} -func (x Int) Or(y Int) Int { - if x.big != nil || y.big != nil { - return Int{big: new(big.Int).Or(x.BigInt(), y.BigInt())} - } - return Int{small: x.small | y.small} -} -func (x Int) And(y Int) Int { - if x.big != nil || y.big != nil { - return MakeBigInt(new(big.Int).And(x.BigInt(), y.BigInt())) - } - return Int{small: x.small & y.small} -} -func (x Int) Xor(y Int) Int { - if x.big != nil || y.big != nil { - return MakeBigInt(new(big.Int).Xor(x.BigInt(), y.BigInt())) - } - return Int{small: x.small ^ y.small} -} -func (x Int) Not() Int { - if x.big != nil { - return MakeBigInt(new(big.Int).Not(x.big)) - } - return Int{small: ^x.small} -} -func (x Int) Lsh(y uint) Int { return MakeBigInt(new(big.Int).Lsh(x.BigInt(), y)) } -func (x Int) Rsh(y uint) Int { return MakeBigInt(new(big.Int).Rsh(x.BigInt(), y)) } - -// Precondition: y is nonzero. -func (x Int) Div(y Int) Int { - // http://python-history.blogspot.com/2010/08/why-pythons-integer-division-floors.html - if x.big != nil || y.big != nil { - xb, yb := x.BigInt(), y.BigInt() - - var quo, rem big.Int - quo.QuoRem(xb, yb, &rem) - if (xb.Sign() < 0) != (yb.Sign() < 0) && rem.Sign() != 0 { - quo.Sub(&quo, oneBig) - } - return MakeBigInt(&quo) - } - quo := x.small / y.small - rem := x.small % y.small - if (x.small < 0) != (y.small < 0) && rem != 0 { - quo -= 1 - } - return MakeInt64(quo) -} - -// Precondition: y is nonzero. -func (x Int) Mod(y Int) Int { - if x.big != nil || y.big != nil { - xb, yb := x.BigInt(), y.BigInt() - - var quo, rem big.Int - quo.QuoRem(xb, yb, &rem) - if (xb.Sign() < 0) != (yb.Sign() < 0) && rem.Sign() != 0 { - rem.Add(&rem, yb) - } - return MakeBigInt(&rem) - } - rem := x.small % y.small - if (x.small < 0) != (y.small < 0) && rem != 0 { - rem += y.small - } - return Int{small: rem} -} - -func (i Int) rational() *big.Rat { - if i.big != nil { - return new(big.Rat).SetInt(i.big) - } - return new(big.Rat).SetInt64(i.small) -} - -// AsInt32 returns the value of x if is representable as an int32. -func AsInt32(x Value) (int, error) { - i, ok := x.(Int) - if !ok { - return 0, fmt.Errorf("got %s, want int", x.Type()) - } - if i.big != nil { - return 0, fmt.Errorf("%s out of range", i) - } - return int(i.small), nil -} - -// NumberToInt converts a number x to an integer value. -// An int is returned unchanged, a float is truncated towards zero. -// NumberToInt reports an error for all other values. -func NumberToInt(x Value) (Int, error) { - switch x := x.(type) { - case Int: - return x, nil - case Float: - f := float64(x) - if math.IsInf(f, 0) { - return zero, fmt.Errorf("cannot convert float infinity to integer") - } else if math.IsNaN(f) { - return zero, fmt.Errorf("cannot convert float NaN to integer") - } - return finiteFloatToInt(x), nil - - } - return zero, fmt.Errorf("cannot convert %s to int", x.Type()) -} - -// finiteFloatToInt converts f to an Int, truncating towards zero. -// f must be finite. -func finiteFloatToInt(f Float) Int { - if math.MinInt64 <= f && f <= math.MaxInt64 { - // small values - return MakeInt64(int64(f)) - } - rat := f.rational() - if rat == nil { - panic(f) // non-finite - } - return MakeBigInt(new(big.Int).Div(rat.Num(), rat.Denom())) -} diff --git a/src/vendor/go.starlark.net/starlark/interp.go b/src/vendor/go.starlark.net/starlark/interp.go deleted file mode 100644 index 529073008..000000000 --- a/src/vendor/go.starlark.net/starlark/interp.go +++ /dev/null @@ -1,637 +0,0 @@ -package starlark - -// This file defines the bytecode interpreter. - -import ( - "fmt" - "os" - - "go.starlark.net/internal/compile" - "go.starlark.net/internal/spell" - "go.starlark.net/resolve" - "go.starlark.net/syntax" -) - -const vmdebug = false // TODO(adonovan): use a bitfield of specific kinds of error. - -// TODO(adonovan): -// - optimize position table. -// - opt: record MaxIterStack during compilation and preallocate the stack. - -func (fn *Function) CallInternal(thread *Thread, args Tuple, kwargs []Tuple) (Value, error) { - if !resolve.AllowRecursion { - // detect recursion - for _, fr := range thread.stack[:len(thread.stack)-1] { - // We look for the same function code, - // not function value, otherwise the user could - // defeat the check by writing the Y combinator. - if frfn, ok := fr.Callable().(*Function); ok && frfn.funcode == fn.funcode { - return nil, fmt.Errorf("function %s called recursively", fn.Name()) - } - } - } - - f := fn.funcode - fr := thread.frameAt(0) - - // Allocate space for stack and locals. - // Logically these do not escape from this frame - // (See https://github.com/golang/go/issues/20533.) - // - // This heap allocation looks expensive, but I was unable to get - // more than 1% real time improvement in a large alloc-heavy - // benchmark (in which this alloc was 8% of alloc-bytes) - // by allocating space for 8 Values in each frame, or - // by allocating stack by slicing an array held by the Thread - // that is expanded in chunks of min(k, nspace), for k=256 or 1024. - nlocals := len(f.Locals) - nspace := nlocals + f.MaxStack - space := make([]Value, nspace) - locals := space[:nlocals:nlocals] // local variables, starting with parameters - stack := space[nlocals:] // operand stack - - // Digest arguments and set parameters. - err := setArgs(locals, fn, args, kwargs) - if err != nil { - return nil, thread.evalError(err) - } - - fr.locals = locals - - if vmdebug { - fmt.Printf("Entering %s @ %s\n", f.Name, f.Position(0)) - fmt.Printf("%d stack, %d locals\n", len(stack), len(locals)) - defer fmt.Println("Leaving ", f.Name) - } - - // Spill indicated locals to cells. - // Each cell is a separate alloc to avoid spurious liveness. - for _, index := range f.Cells { - locals[index] = &cell{locals[index]} - } - - // TODO(adonovan): add static check that beneath this point - // - there is exactly one return statement - // - there is no redefinition of 'err'. - - var iterstack []Iterator // stack of active iterators - - sp := 0 - var pc uint32 - var result Value - code := f.Code -loop: - for { - fr.pc = pc - - op := compile.Opcode(code[pc]) - pc++ - var arg uint32 - if op >= compile.OpcodeArgMin { - // TODO(adonovan): opt: profile this. - // Perhaps compiling big endian would be less work to decode? - for s := uint(0); ; s += 7 { - b := code[pc] - pc++ - arg |= uint32(b&0x7f) << s - if b < 0x80 { - break - } - } - } - if vmdebug { - fmt.Fprintln(os.Stderr, stack[:sp]) // very verbose! - compile.PrintOp(f, fr.pc, op, arg) - } - - switch op { - case compile.NOP: - // nop - - case compile.DUP: - stack[sp] = stack[sp-1] - sp++ - - case compile.DUP2: - stack[sp] = stack[sp-2] - stack[sp+1] = stack[sp-1] - sp += 2 - - case compile.POP: - sp-- - - case compile.EXCH: - stack[sp-2], stack[sp-1] = stack[sp-1], stack[sp-2] - - case compile.EQL, compile.NEQ, compile.GT, compile.LT, compile.LE, compile.GE: - op := syntax.Token(op-compile.EQL) + syntax.EQL - y := stack[sp-1] - x := stack[sp-2] - sp -= 2 - ok, err2 := Compare(op, x, y) - if err2 != nil { - err = err2 - break loop - } - stack[sp] = Bool(ok) - sp++ - - case compile.PLUS, - compile.MINUS, - compile.STAR, - compile.SLASH, - compile.SLASHSLASH, - compile.PERCENT, - compile.AMP, - compile.PIPE, - compile.CIRCUMFLEX, - compile.LTLT, - compile.GTGT, - compile.IN: - binop := syntax.Token(op-compile.PLUS) + syntax.PLUS - if op == compile.IN { - binop = syntax.IN // IN token is out of order - } - y := stack[sp-1] - x := stack[sp-2] - sp -= 2 - z, err2 := Binary(binop, x, y) - if err2 != nil { - err = err2 - break loop - } - stack[sp] = z - sp++ - - case compile.UPLUS, compile.UMINUS, compile.TILDE: - var unop syntax.Token - if op == compile.TILDE { - unop = syntax.TILDE - } else { - unop = syntax.Token(op-compile.UPLUS) + syntax.PLUS - } - x := stack[sp-1] - y, err2 := Unary(unop, x) - if err2 != nil { - err = err2 - break loop - } - stack[sp-1] = y - - case compile.INPLACE_ADD: - y := stack[sp-1] - x := stack[sp-2] - sp -= 2 - - // It's possible that y is not Iterable but - // nonetheless defines x+y, in which case we - // should fall back to the general case. - var z Value - if xlist, ok := x.(*List); ok { - if yiter, ok := y.(Iterable); ok { - if err = xlist.checkMutable("apply += to"); err != nil { - break loop - } - listExtend(xlist, yiter) - z = xlist - } - } - if z == nil { - z, err = Binary(syntax.PLUS, x, y) - if err != nil { - break loop - } - } - - stack[sp] = z - sp++ - - case compile.NONE: - stack[sp] = None - sp++ - - case compile.TRUE: - stack[sp] = True - sp++ - - case compile.FALSE: - stack[sp] = False - sp++ - - case compile.MANDATORY: - stack[sp] = mandatory{} - sp++ - - case compile.JMP: - pc = arg - - case compile.CALL, compile.CALL_VAR, compile.CALL_KW, compile.CALL_VAR_KW: - var kwargs Value - if op == compile.CALL_KW || op == compile.CALL_VAR_KW { - kwargs = stack[sp-1] - sp-- - } - - var args Value - if op == compile.CALL_VAR || op == compile.CALL_VAR_KW { - args = stack[sp-1] - sp-- - } - - // named args (pairs) - var kvpairs []Tuple - if nkvpairs := int(arg & 0xff); nkvpairs > 0 { - kvpairs = make([]Tuple, 0, nkvpairs) - kvpairsAlloc := make(Tuple, 2*nkvpairs) // allocate a single backing array - sp -= 2 * nkvpairs - for i := 0; i < nkvpairs; i++ { - pair := kvpairsAlloc[:2:2] - kvpairsAlloc = kvpairsAlloc[2:] - pair[0] = stack[sp+2*i] // name - pair[1] = stack[sp+2*i+1] // value - kvpairs = append(kvpairs, pair) - } - } - if kwargs != nil { - // Add key/value items from **kwargs dictionary. - dict, ok := kwargs.(IterableMapping) - if !ok { - err = fmt.Errorf("argument after ** must be a mapping, not %s", kwargs.Type()) - break loop - } - items := dict.Items() - for _, item := range items { - if _, ok := item[0].(String); !ok { - err = fmt.Errorf("keywords must be strings, not %s", item[0].Type()) - break loop - } - } - if len(kvpairs) == 0 { - kvpairs = items - } else { - kvpairs = append(kvpairs, items...) - } - } - - // positional args - var positional Tuple - if npos := int(arg >> 8); npos > 0 { - positional = make(Tuple, npos) - sp -= npos - copy(positional, stack[sp:]) - } - if args != nil { - // Add elements from *args sequence. - iter := Iterate(args) - if iter == nil { - err = fmt.Errorf("argument after * must be iterable, not %s", args.Type()) - break loop - } - var elem Value - for iter.Next(&elem) { - positional = append(positional, elem) - } - iter.Done() - } - - function := stack[sp-1] - - if vmdebug { - fmt.Printf("VM call %s args=%s kwargs=%s @%s\n", - function, positional, kvpairs, f.Position(fr.pc)) - } - - thread.endProfSpan() - z, err2 := Call(thread, function, positional, kvpairs) - thread.beginProfSpan() - if err2 != nil { - err = err2 - break loop - } - if vmdebug { - fmt.Printf("Resuming %s @ %s\n", f.Name, f.Position(0)) - } - stack[sp-1] = z - - case compile.ITERPUSH: - x := stack[sp-1] - sp-- - iter := Iterate(x) - if iter == nil { - err = fmt.Errorf("%s value is not iterable", x.Type()) - break loop - } - iterstack = append(iterstack, iter) - - case compile.ITERJMP: - iter := iterstack[len(iterstack)-1] - if iter.Next(&stack[sp]) { - sp++ - } else { - pc = arg - } - - case compile.ITERPOP: - n := len(iterstack) - 1 - iterstack[n].Done() - iterstack = iterstack[:n] - - case compile.NOT: - stack[sp-1] = !stack[sp-1].Truth() - - case compile.RETURN: - result = stack[sp-1] - break loop - - case compile.SETINDEX: - z := stack[sp-1] - y := stack[sp-2] - x := stack[sp-3] - sp -= 3 - err = setIndex(x, y, z) - if err != nil { - break loop - } - - case compile.INDEX: - y := stack[sp-1] - x := stack[sp-2] - sp -= 2 - z, err2 := getIndex(x, y) - if err2 != nil { - err = err2 - break loop - } - stack[sp] = z - sp++ - - case compile.ATTR: - x := stack[sp-1] - name := f.Prog.Names[arg] - y, err2 := getAttr(x, name) - if err2 != nil { - err = err2 - break loop - } - stack[sp-1] = y - - case compile.SETFIELD: - y := stack[sp-1] - x := stack[sp-2] - sp -= 2 - name := f.Prog.Names[arg] - if err2 := setField(x, name, y); err2 != nil { - err = err2 - break loop - } - - case compile.MAKEDICT: - stack[sp] = new(Dict) - sp++ - - case compile.SETDICT, compile.SETDICTUNIQ: - dict := stack[sp-3].(*Dict) - k := stack[sp-2] - v := stack[sp-1] - sp -= 3 - oldlen := dict.Len() - if err2 := dict.SetKey(k, v); err2 != nil { - err = err2 - break loop - } - if op == compile.SETDICTUNIQ && dict.Len() == oldlen { - err = fmt.Errorf("duplicate key: %v", k) - break loop - } - - case compile.APPEND: - elem := stack[sp-1] - list := stack[sp-2].(*List) - sp -= 2 - list.elems = append(list.elems, elem) - - case compile.SLICE: - x := stack[sp-4] - lo := stack[sp-3] - hi := stack[sp-2] - step := stack[sp-1] - sp -= 4 - res, err2 := slice(x, lo, hi, step) - if err2 != nil { - err = err2 - break loop - } - stack[sp] = res - sp++ - - case compile.UNPACK: - n := int(arg) - iterable := stack[sp-1] - sp-- - iter := Iterate(iterable) - if iter == nil { - err = fmt.Errorf("got %s in sequence assignment", iterable.Type()) - break loop - } - i := 0 - sp += n - for i < n && iter.Next(&stack[sp-1-i]) { - i++ - } - var dummy Value - if iter.Next(&dummy) { - // NB: Len may return -1 here in obscure cases. - err = fmt.Errorf("too many values to unpack (got %d, want %d)", Len(iterable), n) - break loop - } - iter.Done() - if i < n { - err = fmt.Errorf("too few values to unpack (got %d, want %d)", i, n) - break loop - } - - case compile.CJMP: - if stack[sp-1].Truth() { - pc = arg - } - sp-- - - case compile.CONSTANT: - stack[sp] = fn.module.constants[arg] - sp++ - - case compile.MAKETUPLE: - n := int(arg) - tuple := make(Tuple, n) - sp -= n - copy(tuple, stack[sp:]) - stack[sp] = tuple - sp++ - - case compile.MAKELIST: - n := int(arg) - elems := make([]Value, n) - sp -= n - copy(elems, stack[sp:]) - stack[sp] = NewList(elems) - sp++ - - case compile.MAKEFUNC: - funcode := f.Prog.Functions[arg] - tuple := stack[sp-1].(Tuple) - n := len(tuple) - len(funcode.Freevars) - defaults := tuple[:n:n] - freevars := tuple[n:] - stack[sp-1] = &Function{ - funcode: funcode, - module: fn.module, - defaults: defaults, - freevars: freevars, - } - - case compile.LOAD: - n := int(arg) - module := string(stack[sp-1].(String)) - sp-- - - if thread.Load == nil { - err = fmt.Errorf("load not implemented by this application") - break loop - } - - thread.endProfSpan() - dict, err2 := thread.Load(thread, module) - thread.beginProfSpan() - if err2 != nil { - err = wrappedError{ - msg: fmt.Sprintf("cannot load %s: %v", module, err2), - cause: err2, - } - break loop - } - - for i := 0; i < n; i++ { - from := string(stack[sp-1-i].(String)) - v, ok := dict[from] - if !ok { - err = fmt.Errorf("load: name %s not found in module %s", from, module) - if n := spell.Nearest(from, dict.Keys()); n != "" { - err = fmt.Errorf("%s (did you mean %s?)", err, n) - } - break loop - } - stack[sp-1-i] = v - } - - case compile.SETLOCAL: - locals[arg] = stack[sp-1] - sp-- - - case compile.SETCELL: - x := stack[sp-2] - y := stack[sp-1] - sp -= 2 - y.(*cell).v = x - - case compile.SETGLOBAL: - fn.module.globals[arg] = stack[sp-1] - sp-- - - case compile.LOCAL: - x := locals[arg] - if x == nil { - err = fmt.Errorf("local variable %s referenced before assignment", f.Locals[arg].Name) - break loop - } - stack[sp] = x - sp++ - - case compile.FREE: - stack[sp] = fn.freevars[arg] - sp++ - - case compile.CELL: - x := stack[sp-1] - stack[sp-1] = x.(*cell).v - - case compile.GLOBAL: - x := fn.module.globals[arg] - if x == nil { - err = fmt.Errorf("global variable %s referenced before assignment", f.Prog.Globals[arg].Name) - break loop - } - stack[sp] = x - sp++ - - case compile.PREDECLARED: - name := f.Prog.Names[arg] - x := fn.module.predeclared[name] - if x == nil { - err = fmt.Errorf("internal error: predeclared variable %s is uninitialized", name) - break loop - } - stack[sp] = x - sp++ - - case compile.UNIVERSAL: - stack[sp] = Universe[f.Prog.Names[arg]] - sp++ - - default: - err = fmt.Errorf("unimplemented: %s", op) - break loop - } - } - - // ITERPOP the rest of the iterator stack. - for _, iter := range iterstack { - iter.Done() - } - - fr.locals = nil - - return result, err -} - -type wrappedError struct { - msg string - cause error -} - -func (e wrappedError) Error() string { - return e.msg -} - -// Implements the xerrors.Wrapper interface -// https://godoc.org/golang.org/x/xerrors#Wrapper -func (e wrappedError) Unwrap() error { - return e.cause -} - -// mandatory is a sentinel value used in a function's defaults tuple -// to indicate that a (keyword-only) parameter is mandatory. -type mandatory struct{} - -func (mandatory) String() string { return "mandatory" } -func (mandatory) Type() string { return "mandatory" } -func (mandatory) Freeze() {} // immutable -func (mandatory) Truth() Bool { return False } -func (mandatory) Hash() (uint32, error) { return 0, nil } - -// A cell is a box containing a Value. -// Local variables marked as cells hold their value indirectly -// so that they may be shared by outer and inner nested functions. -// Cells are always accessed using indirect CELL/SETCELL instructions. -// The FreeVars tuple contains only cells. -// The FREE instruction always yields a cell. -type cell struct{ v Value } - -func (c *cell) String() string { return "cell" } -func (c *cell) Type() string { return "cell" } -func (c *cell) Freeze() { - if c.v != nil { - c.v.Freeze() - } -} -func (c *cell) Truth() Bool { panic("unreachable") } -func (c *cell) Hash() (uint32, error) { panic("unreachable") } diff --git a/src/vendor/go.starlark.net/starlark/library.go b/src/vendor/go.starlark.net/starlark/library.go deleted file mode 100644 index 7a9440ed1..000000000 --- a/src/vendor/go.starlark.net/starlark/library.go +++ /dev/null @@ -1,2104 +0,0 @@ -// Copyright 2017 The Bazel Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package starlark - -// This file defines the library of built-ins. -// -// Built-ins must explicitly check the "frozen" flag before updating -// mutable types such as lists and dicts. - -import ( - "errors" - "fmt" - "math/big" - "os" - "sort" - "strconv" - "strings" - "unicode" - "unicode/utf16" - "unicode/utf8" - - "go.starlark.net/syntax" -) - -// Universe defines the set of universal built-ins, such as None, True, and len. -// -// The Go application may add or remove items from the -// universe dictionary before Starlark evaluation begins. -// All values in the dictionary must be immutable. -// Starlark programs cannot modify the dictionary. -var Universe StringDict - -func init() { - // https://github.com/google/starlark-go/blob/master/doc/spec.md#built-in-constants-and-functions - Universe = StringDict{ - "None": None, - "True": True, - "False": False, - "any": NewBuiltin("any", any), - "all": NewBuiltin("all", all), - "bool": NewBuiltin("bool", bool_), - "chr": NewBuiltin("chr", chr), - "dict": NewBuiltin("dict", dict), - "dir": NewBuiltin("dir", dir), - "enumerate": NewBuiltin("enumerate", enumerate), - "fail": NewBuiltin("fail", fail), - "float": NewBuiltin("float", float), // requires resolve.AllowFloat - "getattr": NewBuiltin("getattr", getattr), - "hasattr": NewBuiltin("hasattr", hasattr), - "hash": NewBuiltin("hash", hash), - "int": NewBuiltin("int", int_), - "len": NewBuiltin("len", len_), - "list": NewBuiltin("list", list), - "max": NewBuiltin("max", minmax), - "min": NewBuiltin("min", minmax), - "ord": NewBuiltin("ord", ord), - "print": NewBuiltin("print", print), - "range": NewBuiltin("range", range_), - "repr": NewBuiltin("repr", repr), - "reversed": NewBuiltin("reversed", reversed), - "set": NewBuiltin("set", set), // requires resolve.AllowSet - "sorted": NewBuiltin("sorted", sorted), - "str": NewBuiltin("str", str), - "tuple": NewBuiltin("tuple", tuple), - "type": NewBuiltin("type", type_), - "zip": NewBuiltin("zip", zip), - } -} - -// methods of built-in types -// https://github.com/google/starlark-go/blob/master/doc/spec.md#built-in-methods -var ( - dictMethods = map[string]*Builtin{ - "clear": NewBuiltin("clear", dict_clear), - "get": NewBuiltin("get", dict_get), - "items": NewBuiltin("items", dict_items), - "keys": NewBuiltin("keys", dict_keys), - "pop": NewBuiltin("pop", dict_pop), - "popitem": NewBuiltin("popitem", dict_popitem), - "setdefault": NewBuiltin("setdefault", dict_setdefault), - "update": NewBuiltin("update", dict_update), - "values": NewBuiltin("values", dict_values), - } - - listMethods = map[string]*Builtin{ - "append": NewBuiltin("append", list_append), - "clear": NewBuiltin("clear", list_clear), - "extend": NewBuiltin("extend", list_extend), - "index": NewBuiltin("index", list_index), - "insert": NewBuiltin("insert", list_insert), - "pop": NewBuiltin("pop", list_pop), - "remove": NewBuiltin("remove", list_remove), - } - - stringMethods = map[string]*Builtin{ - "capitalize": NewBuiltin("capitalize", string_capitalize), - "codepoint_ords": NewBuiltin("codepoint_ords", string_iterable), - "codepoints": NewBuiltin("codepoints", string_iterable), // sic - "count": NewBuiltin("count", string_count), - "elem_ords": NewBuiltin("elem_ords", string_iterable), - "elems": NewBuiltin("elems", string_iterable), // sic - "endswith": NewBuiltin("endswith", string_startswith), // sic - "find": NewBuiltin("find", string_find), - "format": NewBuiltin("format", string_format), - "index": NewBuiltin("index", string_index), - "isalnum": NewBuiltin("isalnum", string_isalnum), - "isalpha": NewBuiltin("isalpha", string_isalpha), - "isdigit": NewBuiltin("isdigit", string_isdigit), - "islower": NewBuiltin("islower", string_islower), - "isspace": NewBuiltin("isspace", string_isspace), - "istitle": NewBuiltin("istitle", string_istitle), - "isupper": NewBuiltin("isupper", string_isupper), - "join": NewBuiltin("join", string_join), - "lower": NewBuiltin("lower", string_lower), - "lstrip": NewBuiltin("lstrip", string_strip), // sic - "partition": NewBuiltin("partition", string_partition), - "replace": NewBuiltin("replace", string_replace), - "rfind": NewBuiltin("rfind", string_rfind), - "rindex": NewBuiltin("rindex", string_rindex), - "rpartition": NewBuiltin("rpartition", string_partition), // sic - "rsplit": NewBuiltin("rsplit", string_split), // sic - "rstrip": NewBuiltin("rstrip", string_strip), // sic - "split": NewBuiltin("split", string_split), - "splitlines": NewBuiltin("splitlines", string_splitlines), - "startswith": NewBuiltin("startswith", string_startswith), - "strip": NewBuiltin("strip", string_strip), - "title": NewBuiltin("title", string_title), - "upper": NewBuiltin("upper", string_upper), - } - - setMethods = map[string]*Builtin{ - "union": NewBuiltin("union", set_union), - } -) - -func builtinAttr(recv Value, name string, methods map[string]*Builtin) (Value, error) { - b := methods[name] - if b == nil { - return nil, nil // no such method - } - return b.BindReceiver(recv), nil -} - -func builtinAttrNames(methods map[string]*Builtin) []string { - names := make([]string, 0, len(methods)) - for name := range methods { - names = append(names, name) - } - sort.Strings(names) - return names -} - -// ---- built-in functions ---- - -// https://github.com/google/starlark-go/blob/master/doc/spec.md#all -func all(thread *Thread, _ *Builtin, args Tuple, kwargs []Tuple) (Value, error) { - var iterable Iterable - if err := UnpackPositionalArgs("all", args, kwargs, 1, &iterable); err != nil { - return nil, err - } - iter := iterable.Iterate() - defer iter.Done() - var x Value - for iter.Next(&x) { - if !x.Truth() { - return False, nil - } - } - return True, nil -} - -// https://github.com/google/starlark-go/blob/master/doc/spec.md#any -func any(thread *Thread, _ *Builtin, args Tuple, kwargs []Tuple) (Value, error) { - var iterable Iterable - if err := UnpackPositionalArgs("any", args, kwargs, 1, &iterable); err != nil { - return nil, err - } - iter := iterable.Iterate() - defer iter.Done() - var x Value - for iter.Next(&x) { - if x.Truth() { - return True, nil - } - } - return False, nil -} - -// https://github.com/google/starlark-go/blob/master/doc/spec.md#bool -func bool_(thread *Thread, _ *Builtin, args Tuple, kwargs []Tuple) (Value, error) { - var x Value = False - if err := UnpackPositionalArgs("bool", args, kwargs, 0, &x); err != nil { - return nil, err - } - return x.Truth(), nil -} - -// https://github.com/google/starlark-go/blob/master/doc/spec.md#chr -func chr(thread *Thread, _ *Builtin, args Tuple, kwargs []Tuple) (Value, error) { - if len(kwargs) > 0 { - return nil, fmt.Errorf("chr does not accept keyword arguments") - } - if len(args) != 1 { - return nil, fmt.Errorf("chr: got %d arguments, want 1", len(args)) - } - i, err := AsInt32(args[0]) - if err != nil { - return nil, fmt.Errorf("chr: got %s, want int", args[0].Type()) - } - if i < 0 { - return nil, fmt.Errorf("chr: Unicode code point %d out of range (<0)", i) - } - if i > unicode.MaxRune { - return nil, fmt.Errorf("chr: Unicode code point U+%X out of range (>0x10FFFF)", i) - } - return String(string(i)), nil -} - -// https://github.com/google/starlark-go/blob/master/doc/spec.md#dict -func dict(thread *Thread, _ *Builtin, args Tuple, kwargs []Tuple) (Value, error) { - if len(args) > 1 { - return nil, fmt.Errorf("dict: got %d arguments, want at most 1", len(args)) - } - dict := new(Dict) - if err := updateDict(dict, args, kwargs); err != nil { - return nil, fmt.Errorf("dict: %v", err) - } - return dict, nil -} - -// https://github.com/google/starlark-go/blob/master/doc/spec.md#dir -func dir(thread *Thread, _ *Builtin, args Tuple, kwargs []Tuple) (Value, error) { - if len(kwargs) > 0 { - return nil, fmt.Errorf("dir does not accept keyword arguments") - } - if len(args) != 1 { - return nil, fmt.Errorf("dir: got %d arguments, want 1", len(args)) - } - - var names []string - if x, ok := args[0].(HasAttrs); ok { - names = x.AttrNames() - } - sort.Strings(names) - elems := make([]Value, len(names)) - for i, name := range names { - elems[i] = String(name) - } - return NewList(elems), nil -} - -// https://github.com/google/starlark-go/blob/master/doc/spec.md#enumerate -func enumerate(thread *Thread, _ *Builtin, args Tuple, kwargs []Tuple) (Value, error) { - var iterable Iterable - var start int - if err := UnpackPositionalArgs("enumerate", args, kwargs, 1, &iterable, &start); err != nil { - return nil, err - } - - iter := iterable.Iterate() - if iter == nil { - return nil, fmt.Errorf("enumerate: got %s, want iterable", iterable.Type()) - } - defer iter.Done() - - var pairs []Value - var x Value - - if n := Len(iterable); n >= 0 { - // common case: known length - pairs = make([]Value, 0, n) - array := make(Tuple, 2*n) // allocate a single backing array - for i := 0; iter.Next(&x); i++ { - pair := array[:2:2] - array = array[2:] - pair[0] = MakeInt(start + i) - pair[1] = x - pairs = append(pairs, pair) - } - } else { - // non-sequence (unknown length) - for i := 0; iter.Next(&x); i++ { - pair := Tuple{MakeInt(start + i), x} - pairs = append(pairs, pair) - } - } - - return NewList(pairs), nil -} - -// https://github.com/google/starlark-go/blob/master/doc/spec.md#fail -func fail(thread *Thread, b *Builtin, args Tuple, kwargs []Tuple) (Value, error) { - sep := " " - if err := UnpackArgs("fail", nil, kwargs, "sep?", &sep); err != nil { - return nil, err - } - buf := new(strings.Builder) - buf.WriteString("fail: ") - for i, v := range args { - if i > 0 { - buf.WriteString(sep) - } - if s, ok := AsString(v); ok { - buf.WriteString(s) - } else { - writeValue(buf, v, nil) - } - } - - return nil, errors.New(buf.String()) -} - -func float(thread *Thread, b *Builtin, args Tuple, kwargs []Tuple) (Value, error) { - if len(kwargs) > 0 { - return nil, fmt.Errorf("float does not accept keyword arguments") - } - if len(args) == 0 { - return Float(0.0), nil - } - if len(args) != 1 { - return nil, fmt.Errorf("float got %d arguments, wants 1", len(args)) - } - switch x := args[0].(type) { - case Bool: - if x { - return Float(1.0), nil - } else { - return Float(0.0), nil - } - case Int: - return x.Float(), nil - case Float: - return x, nil - case String: - f, err := strconv.ParseFloat(string(x), 64) - if err != nil { - return nil, nameErr(b, err) - } - return Float(f), nil - default: - return nil, fmt.Errorf("float got %s, want number or string", x.Type()) - } -} - -// https://github.com/google/starlark-go/blob/master/doc/spec.md#getattr -func getattr(thread *Thread, b *Builtin, args Tuple, kwargs []Tuple) (Value, error) { - var object, dflt Value - var name string - if err := UnpackPositionalArgs("getattr", args, kwargs, 2, &object, &name, &dflt); err != nil { - return nil, err - } - if object, ok := object.(HasAttrs); ok { - v, err := object.Attr(name) - if err != nil { - // An error could mean the field doesn't exist, - // or it exists but could not be computed. - if dflt != nil { - return dflt, nil - } - return nil, nameErr(b, err) - } - if v != nil { - return v, nil - } - // (nil, nil) => no such field - } - if dflt != nil { - return dflt, nil - } - return nil, fmt.Errorf("getattr: %s has no .%s field or method", object.Type(), name) -} - -// https://github.com/google/starlark-go/blob/master/doc/spec.md#hasattr -func hasattr(thread *Thread, _ *Builtin, args Tuple, kwargs []Tuple) (Value, error) { - var object Value - var name string - if err := UnpackPositionalArgs("hasattr", args, kwargs, 2, &object, &name); err != nil { - return nil, err - } - if object, ok := object.(HasAttrs); ok { - v, err := object.Attr(name) - if err == nil { - return Bool(v != nil), nil - } - - // An error does not conclusively indicate presence or - // absence of a field: it could occur while computing - // the value of a present attribute, or it could be a - // "no such attribute" error with details. - for _, x := range object.AttrNames() { - if x == name { - return True, nil - } - } - } - return False, nil -} - -// https://github.com/google/starlark-go/blob/master/doc/spec.md#hash -func hash(thread *Thread, _ *Builtin, args Tuple, kwargs []Tuple) (Value, error) { - var s string - if err := UnpackPositionalArgs("hash", args, kwargs, 1, &s); err != nil { - return nil, err - } - - // The Starlark spec requires that the hash function be - // deterministic across all runs, motivated by the need - // for reproducibility of builds. Thus we cannot call - // String.Hash, which uses the fastest implementation - // available, because as varies across process restarts, - // and may evolve with the implementation. - - return MakeInt(int(javaStringHash(s))), nil -} - -// javaStringHash returns the same hash as would be produced by -// java.lang.String.hashCode. This requires transcoding the string to -// UTF-16; transcoding may introduce Unicode replacement characters -// U+FFFD if s does not contain valid UTF-8. -func javaStringHash(s string) (h int32) { - for _, r := range s { - if utf16.IsSurrogate(r) { - c1, c2 := utf16.EncodeRune(r) - h = 31*h + c1 - h = 31*h + c2 - } else { - h = 31*h + r // r may be U+FFFD - } - } - return h -} - -// https://github.com/google/starlark-go/blob/master/doc/spec.md#int -func int_(thread *Thread, _ *Builtin, args Tuple, kwargs []Tuple) (Value, error) { - var x Value = zero - var base Value - if err := UnpackArgs("int", args, kwargs, "x", &x, "base?", &base); err != nil { - return nil, err - } - - // "If x is not a number or base is given, x must be a string." - if s, ok := AsString(x); ok { - b := 10 - if base != nil { - var err error - b, err = AsInt32(base) - if err != nil || b != 0 && (b < 2 || b > 36) { - return nil, fmt.Errorf("int: base must be an integer >= 2 && <= 36") - } - } - - orig := s // save original for error message - - // remove sign - var neg bool - if s != "" { - if s[0] == '+' { - s = s[1:] - } else if s[0] == '-' { - neg = true - s = s[1:] - } - } - - // remove base prefix - baseprefix := 0 - if len(s) > 1 && s[0] == '0' { - if len(s) > 2 { - switch s[1] { - case 'o', 'O': - s = s[2:] - baseprefix = 8 - case 'x', 'X': - s = s[2:] - baseprefix = 16 - case 'b', 'B': - s = s[2:] - baseprefix = 2 - } - } - - // For automatic base detection, - // a string starting with zero - // must be all zeros. - // Thus we reject int("0755", 0). - if baseprefix == 0 && b == 0 { - for i := 1; i < len(s); i++ { - if s[i] != '0' { - goto invalid - } - } - return zero, nil - } - - if b != 0 && baseprefix != 0 && baseprefix != b { - // Explicit base doesn't match prefix, - // e.g. int("0o755", 16). - goto invalid - } - } - - // select base - if b == 0 { - if baseprefix != 0 { - b = baseprefix - } else { - b = 10 - } - } - - // we explicitly handled sign above. - // if a sign remains, it is invalid. - if s != "" && (s[0] == '-' || s[0] == '+') { - goto invalid - } - - // s has no sign or base prefix. - // - // int(x) permits arbitrary precision, unlike the scanner. - if i, ok := new(big.Int).SetString(s, b); ok { - res := MakeBigInt(i) - if neg { - res = zero.Sub(res) - } - return res, nil - } - - invalid: - return nil, fmt.Errorf("int: invalid literal with base %d: %s", b, orig) - } - - if base != nil { - return nil, fmt.Errorf("int: can't convert non-string with explicit base") - } - - if b, ok := x.(Bool); ok { - if b { - return one, nil - } else { - return zero, nil - } - } - - i, err := NumberToInt(x) - if err != nil { - return nil, fmt.Errorf("int: %s", err) - } - return i, nil -} - -// https://github.com/google/starlark-go/blob/master/doc/spec.md#len -func len_(thread *Thread, _ *Builtin, args Tuple, kwargs []Tuple) (Value, error) { - var x Value - if err := UnpackPositionalArgs("len", args, kwargs, 1, &x); err != nil { - return nil, err - } - len := Len(x) - if len < 0 { - return nil, fmt.Errorf("len: value of type %s has no len", x.Type()) - } - return MakeInt(len), nil -} - -// https://github.com/google/starlark-go/blob/master/doc/spec.md#list -func list(thread *Thread, _ *Builtin, args Tuple, kwargs []Tuple) (Value, error) { - var iterable Iterable - if err := UnpackPositionalArgs("list", args, kwargs, 0, &iterable); err != nil { - return nil, err - } - var elems []Value - if iterable != nil { - iter := iterable.Iterate() - defer iter.Done() - if n := Len(iterable); n > 0 { - elems = make([]Value, 0, n) // preallocate if length known - } - var x Value - for iter.Next(&x) { - elems = append(elems, x) - } - } - return NewList(elems), nil -} - -// https://github.com/google/starlark-go/blob/master/doc/spec.md#min -func minmax(thread *Thread, b *Builtin, args Tuple, kwargs []Tuple) (Value, error) { - if len(args) == 0 { - return nil, fmt.Errorf("%s requires at least one positional argument", b.Name()) - } - var keyFunc Callable - if err := UnpackArgs(b.Name(), nil, kwargs, "key?", &keyFunc); err != nil { - return nil, err - } - var op syntax.Token - if b.Name() == "max" { - op = syntax.GT - } else { - op = syntax.LT - } - var iterable Value - if len(args) == 1 { - iterable = args[0] - } else { - iterable = args - } - iter := Iterate(iterable) - if iter == nil { - return nil, fmt.Errorf("%s: %s value is not iterable", b.Name(), iterable.Type()) - } - defer iter.Done() - var extremum Value - if !iter.Next(&extremum) { - return nil, nameErr(b, "argument is an empty sequence") - } - - var extremeKey Value - var keyargs Tuple - if keyFunc == nil { - extremeKey = extremum - } else { - keyargs = Tuple{extremum} - res, err := Call(thread, keyFunc, keyargs, nil) - if err != nil { - return nil, err // to preserve backtrace, don't modify error - } - extremeKey = res - } - - var x Value - for iter.Next(&x) { - var key Value - if keyFunc == nil { - key = x - } else { - keyargs[0] = x - res, err := Call(thread, keyFunc, keyargs, nil) - if err != nil { - return nil, err // to preserve backtrace, don't modify error - } - key = res - } - - if ok, err := Compare(op, key, extremeKey); err != nil { - return nil, nameErr(b, err) - } else if ok { - extremum = x - extremeKey = key - } - } - return extremum, nil -} - -// https://github.com/google/starlark-go/blob/master/doc/spec.md#ord -func ord(thread *Thread, _ *Builtin, args Tuple, kwargs []Tuple) (Value, error) { - if len(kwargs) > 0 { - return nil, fmt.Errorf("ord does not accept keyword arguments") - } - if len(args) != 1 { - return nil, fmt.Errorf("ord: got %d arguments, want 1", len(args)) - } - s, ok := AsString(args[0]) - if !ok { - return nil, fmt.Errorf("ord: got %s, want string", args[0].Type()) - } - r, sz := utf8.DecodeRuneInString(s) - if sz == 0 || sz != len(s) { - n := utf8.RuneCountInString(s) - return nil, fmt.Errorf("ord: string encodes %d Unicode code points, want 1", n) - } - return MakeInt(int(r)), nil -} - -// https://github.com/google/starlark-go/blob/master/doc/spec.md#print -func print(thread *Thread, b *Builtin, args Tuple, kwargs []Tuple) (Value, error) { - sep := " " - if err := UnpackArgs("print", nil, kwargs, "sep?", &sep); err != nil { - return nil, err - } - buf := new(strings.Builder) - for i, v := range args { - if i > 0 { - buf.WriteString(sep) - } - if s, ok := AsString(v); ok { - buf.WriteString(s) - } else { - writeValue(buf, v, nil) - } - } - - s := buf.String() - if thread.Print != nil { - thread.Print(thread, s) - } else { - fmt.Fprintln(os.Stderr, s) - } - return None, nil -} - -// https://github.com/google/starlark-go/blob/master/doc/spec.md#range -func range_(thread *Thread, b *Builtin, args Tuple, kwargs []Tuple) (Value, error) { - var start, stop, step int - step = 1 - if err := UnpackPositionalArgs("range", args, kwargs, 1, &start, &stop, &step); err != nil { - return nil, err - } - - // TODO(adonovan): analyze overflow/underflows cases for 32-bit implementations. - if len(args) == 1 { - // range(stop) - start, stop = 0, start - } - if step == 0 { - // we were given range(start, stop, 0) - return nil, nameErr(b, "step argument must not be zero") - } - - return rangeValue{start: start, stop: stop, step: step, len: rangeLen(start, stop, step)}, nil -} - -// A rangeValue is a comparable, immutable, indexable sequence of integers -// defined by the three parameters to a range(...) call. -// Invariant: step != 0. -type rangeValue struct{ start, stop, step, len int } - -var ( - _ Indexable = rangeValue{} - _ Sequence = rangeValue{} - _ Comparable = rangeValue{} - _ Sliceable = rangeValue{} -) - -func (r rangeValue) Len() int { return r.len } -func (r rangeValue) Index(i int) Value { return MakeInt(r.start + i*r.step) } -func (r rangeValue) Iterate() Iterator { return &rangeIterator{r, 0} } - -// rangeLen calculates the length of a range with the provided start, stop, and step. -// caller must ensure that step is non-zero. -func rangeLen(start, stop, step int) int { - switch { - case step > 0: - if stop > start { - return (stop-1-start)/step + 1 - } - case step < 0: - if start > stop { - return (start-1-stop)/-step + 1 - } - default: - panic("rangeLen: zero step") - } - return 0 -} - -func (r rangeValue) Slice(start, end, step int) Value { - newStart := r.start + r.step*start - newStop := r.start + r.step*end - newStep := r.step * step - return rangeValue{ - start: newStart, - stop: newStop, - step: newStep, - len: rangeLen(newStart, newStop, newStep), - } -} - -func (r rangeValue) Freeze() {} // immutable -func (r rangeValue) String() string { - if r.step != 1 { - return fmt.Sprintf("range(%d, %d, %d)", r.start, r.stop, r.step) - } else if r.start != 0 { - return fmt.Sprintf("range(%d, %d)", r.start, r.stop) - } else { - return fmt.Sprintf("range(%d)", r.stop) - } -} -func (r rangeValue) Type() string { return "range" } -func (r rangeValue) Truth() Bool { return r.len > 0 } -func (r rangeValue) Hash() (uint32, error) { return 0, fmt.Errorf("unhashable: range") } - -func (x rangeValue) CompareSameType(op syntax.Token, y_ Value, depth int) (bool, error) { - y := y_.(rangeValue) - switch op { - case syntax.EQL: - return rangeEqual(x, y), nil - case syntax.NEQ: - return !rangeEqual(x, y), nil - default: - return false, fmt.Errorf("%s %s %s not implemented", x.Type(), op, y.Type()) - } -} - -func rangeEqual(x, y rangeValue) bool { - // Two ranges compare equal if they denote the same sequence. - if x.len != y.len { - return false // sequences differ in length - } - if x.len == 0 { - return true // both sequences are empty - } - if x.start != y.start { - return false // first element differs - } - return x.len == 1 || x.step == y.step -} - -func (r rangeValue) contains(x Int) bool { - x32, err := AsInt32(x) - if err != nil { - return false // out of range - } - delta := x32 - r.start - quo, rem := delta/r.step, delta%r.step - return rem == 0 && 0 <= quo && quo < r.len -} - -type rangeIterator struct { - r rangeValue - i int -} - -func (it *rangeIterator) Next(p *Value) bool { - if it.i < it.r.len { - *p = it.r.Index(it.i) - it.i++ - return true - } - return false -} -func (*rangeIterator) Done() {} - -// https://github.com/google/starlark-go/blob/master/doc/spec.md#repr -func repr(thread *Thread, _ *Builtin, args Tuple, kwargs []Tuple) (Value, error) { - var x Value - if err := UnpackPositionalArgs("repr", args, kwargs, 1, &x); err != nil { - return nil, err - } - return String(x.String()), nil -} - -// https://github.com/google/starlark-go/blob/master/doc/spec.md#reversed -func reversed(thread *Thread, _ *Builtin, args Tuple, kwargs []Tuple) (Value, error) { - var iterable Iterable - if err := UnpackPositionalArgs("reversed", args, kwargs, 1, &iterable); err != nil { - return nil, err - } - iter := iterable.Iterate() - defer iter.Done() - var elems []Value - if n := Len(args[0]); n >= 0 { - elems = make([]Value, 0, n) // preallocate if length known - } - var x Value - for iter.Next(&x) { - elems = append(elems, x) - } - n := len(elems) - for i := 0; i < n>>1; i++ { - elems[i], elems[n-1-i] = elems[n-1-i], elems[i] - } - return NewList(elems), nil -} - -// https://github.com/google/starlark-go/blob/master/doc/spec.md#set -func set(thread *Thread, b *Builtin, args Tuple, kwargs []Tuple) (Value, error) { - var iterable Iterable - if err := UnpackPositionalArgs("set", args, kwargs, 0, &iterable); err != nil { - return nil, err - } - set := new(Set) - if iterable != nil { - iter := iterable.Iterate() - defer iter.Done() - var x Value - for iter.Next(&x) { - if err := set.Insert(x); err != nil { - return nil, nameErr(b, err) - } - } - } - return set, nil -} - -// https://github.com/google/starlark-go/blob/master/doc/spec.md#sorted -func sorted(thread *Thread, _ *Builtin, args Tuple, kwargs []Tuple) (Value, error) { - // Oddly, Python's sorted permits all arguments to be positional, thus so do we. - var iterable Iterable - var key Callable - var reverse bool - if err := UnpackArgs("sorted", args, kwargs, - "iterable", &iterable, - "key?", &key, - "reverse?", &reverse, - ); err != nil { - return nil, err - } - - iter := iterable.Iterate() - defer iter.Done() - var values []Value - if n := Len(iterable); n > 0 { - values = make(Tuple, 0, n) // preallocate if length is known - } - var x Value - for iter.Next(&x) { - values = append(values, x) - } - - // Derive keys from values by applying key function. - var keys []Value - if key != nil { - keys = make([]Value, len(values)) - for i, v := range values { - k, err := Call(thread, key, Tuple{v}, nil) - if err != nil { - return nil, err // to preserve backtrace, don't modify error - } - keys[i] = k - } - } - - slice := &sortSlice{keys: keys, values: values} - if reverse { - sort.Stable(sort.Reverse(slice)) - } else { - sort.Stable(slice) - } - return NewList(slice.values), slice.err -} - -type sortSlice struct { - keys []Value // nil => values[i] is key - values []Value - err error -} - -func (s *sortSlice) Len() int { return len(s.values) } -func (s *sortSlice) Less(i, j int) bool { - keys := s.keys - if s.keys == nil { - keys = s.values - } - ok, err := Compare(syntax.LT, keys[i], keys[j]) - if err != nil { - s.err = err - } - return ok -} -func (s *sortSlice) Swap(i, j int) { - if s.keys != nil { - s.keys[i], s.keys[j] = s.keys[j], s.keys[i] - } - s.values[i], s.values[j] = s.values[j], s.values[i] -} - -// https://github.com/google/starlark-go/blob/master/doc/spec.md#str -func str(thread *Thread, _ *Builtin, args Tuple, kwargs []Tuple) (Value, error) { - if len(kwargs) > 0 { - return nil, fmt.Errorf("str does not accept keyword arguments") - } - if len(args) != 1 { - return nil, fmt.Errorf("str: got %d arguments, want exactly 1", len(args)) - } - x := args[0] - if _, ok := AsString(x); !ok { - x = String(x.String()) - } - return x, nil -} - -// https://github.com/google/starlark-go/blob/master/doc/spec.md#tuple -func tuple(thread *Thread, _ *Builtin, args Tuple, kwargs []Tuple) (Value, error) { - var iterable Iterable - if err := UnpackPositionalArgs("tuple", args, kwargs, 0, &iterable); err != nil { - return nil, err - } - if len(args) == 0 { - return Tuple(nil), nil - } - iter := iterable.Iterate() - defer iter.Done() - var elems Tuple - if n := Len(iterable); n > 0 { - elems = make(Tuple, 0, n) // preallocate if length is known - } - var x Value - for iter.Next(&x) { - elems = append(elems, x) - } - return elems, nil -} - -// https://github.com/google/starlark-go/blob/master/doc/spec.md#type -func type_(thread *Thread, _ *Builtin, args Tuple, kwargs []Tuple) (Value, error) { - if len(kwargs) > 0 { - return nil, fmt.Errorf("type does not accept keyword arguments") - } - if len(args) != 1 { - return nil, fmt.Errorf("type: got %d arguments, want exactly 1", len(args)) - } - return String(args[0].Type()), nil -} - -// https://github.com/google/starlark-go/blob/master/doc/spec.md#zip -func zip(thread *Thread, _ *Builtin, args Tuple, kwargs []Tuple) (Value, error) { - if len(kwargs) > 0 { - return nil, fmt.Errorf("zip does not accept keyword arguments") - } - rows, cols := 0, len(args) - iters := make([]Iterator, cols) - defer func() { - for _, iter := range iters { - if iter != nil { - iter.Done() - } - } - }() - for i, seq := range args { - it := Iterate(seq) - if it == nil { - return nil, fmt.Errorf("zip: argument #%d is not iterable: %s", i+1, seq.Type()) - } - iters[i] = it - n := Len(seq) - if i == 0 || n < rows { - rows = n // possibly -1 - } - } - var result []Value - if rows >= 0 { - // length known - result = make([]Value, rows) - array := make(Tuple, cols*rows) // allocate a single backing array - for i := 0; i < rows; i++ { - tuple := array[:cols:cols] - array = array[cols:] - for j, iter := range iters { - iter.Next(&tuple[j]) - } - result[i] = tuple - } - } else { - // length not known - outer: - for { - tuple := make(Tuple, cols) - for i, iter := range iters { - if !iter.Next(&tuple[i]) { - break outer - } - } - result = append(result, tuple) - } - } - return NewList(result), nil -} - -// ---- methods of built-in types --- - -// https://github.com/google/starlark-go/blob/master/doc/spec.md#dict·get -func dict_get(_ *Thread, b *Builtin, args Tuple, kwargs []Tuple) (Value, error) { - var key, dflt Value - if err := UnpackPositionalArgs(b.Name(), args, kwargs, 1, &key, &dflt); err != nil { - return nil, err - } - if v, ok, err := b.Receiver().(*Dict).Get(key); err != nil { - return nil, nameErr(b, err) - } else if ok { - return v, nil - } else if dflt != nil { - return dflt, nil - } - return None, nil -} - -// https://github.com/google/starlark-go/blob/master/doc/spec.md#dict·clear -func dict_clear(_ *Thread, b *Builtin, args Tuple, kwargs []Tuple) (Value, error) { - if err := UnpackPositionalArgs(b.Name(), args, kwargs, 0); err != nil { - return nil, err - } - return None, b.Receiver().(*Dict).Clear() -} - -// https://github.com/google/starlark-go/blob/master/doc/spec.md#dict·items -func dict_items(_ *Thread, b *Builtin, args Tuple, kwargs []Tuple) (Value, error) { - if err := UnpackPositionalArgs(b.Name(), args, kwargs, 0); err != nil { - return nil, err - } - items := b.Receiver().(*Dict).Items() - res := make([]Value, len(items)) - for i, item := range items { - res[i] = item // convert [2]Value to Value - } - return NewList(res), nil -} - -// https://github.com/google/starlark-go/blob/master/doc/spec.md#dict·keys -func dict_keys(_ *Thread, b *Builtin, args Tuple, kwargs []Tuple) (Value, error) { - if err := UnpackPositionalArgs(b.Name(), args, kwargs, 0); err != nil { - return nil, err - } - return NewList(b.Receiver().(*Dict).Keys()), nil -} - -// https://github.com/google/starlark-go/blob/master/doc/spec.md#dict·pop -func dict_pop(_ *Thread, b *Builtin, args Tuple, kwargs []Tuple) (Value, error) { - var k, d Value - if err := UnpackPositionalArgs(b.Name(), args, kwargs, 1, &k, &d); err != nil { - return nil, err - } - if v, found, err := b.Receiver().(*Dict).Delete(k); err != nil { - return nil, nameErr(b, err) // dict is frozen or key is unhashable - } else if found { - return v, nil - } else if d != nil { - return d, nil - } - return nil, nameErr(b, "missing key") -} - -// https://github.com/google/starlark-go/blob/master/doc/spec.md#dict·popitem -func dict_popitem(_ *Thread, b *Builtin, args Tuple, kwargs []Tuple) (Value, error) { - if err := UnpackPositionalArgs(b.Name(), args, kwargs, 0); err != nil { - return nil, err - } - recv := b.Receiver().(*Dict) - k, ok := recv.ht.first() - if !ok { - return nil, nameErr(b, "empty dict") - } - v, _, err := recv.Delete(k) - if err != nil { - return nil, nameErr(b, err) // dict is frozen - } - return Tuple{k, v}, nil -} - -// https://github.com/google/starlark-go/blob/master/doc/spec.md#dict·setdefault -func dict_setdefault(_ *Thread, b *Builtin, args Tuple, kwargs []Tuple) (Value, error) { - var key, dflt Value = nil, None - if err := UnpackPositionalArgs(b.Name(), args, kwargs, 1, &key, &dflt); err != nil { - return nil, err - } - dict := b.Receiver().(*Dict) - if v, ok, err := dict.Get(key); err != nil { - return nil, nameErr(b, err) - } else if ok { - return v, nil - } else if err := dict.SetKey(key, dflt); err != nil { - return nil, nameErr(b, err) - } else { - return dflt, nil - } -} - -// https://github.com/google/starlark-go/blob/master/doc/spec.md#dict·update -func dict_update(_ *Thread, b *Builtin, args Tuple, kwargs []Tuple) (Value, error) { - if len(args) > 1 { - return nil, fmt.Errorf("update: got %d arguments, want at most 1", len(args)) - } - if err := updateDict(b.Receiver().(*Dict), args, kwargs); err != nil { - return nil, fmt.Errorf("update: %v", err) - } - return None, nil -} - -// https://github.com/google/starlark-go/blob/master/doc/spec.md#dict·update -func dict_values(_ *Thread, b *Builtin, args Tuple, kwargs []Tuple) (Value, error) { - if err := UnpackPositionalArgs(b.Name(), args, kwargs, 0); err != nil { - return nil, err - } - items := b.Receiver().(*Dict).Items() - res := make([]Value, len(items)) - for i, item := range items { - res[i] = item[1] - } - return NewList(res), nil -} - -// https://github.com/google/starlark-go/blob/master/doc/spec.md#list·append -func list_append(_ *Thread, b *Builtin, args Tuple, kwargs []Tuple) (Value, error) { - var object Value - if err := UnpackPositionalArgs(b.Name(), args, kwargs, 1, &object); err != nil { - return nil, err - } - recv := b.Receiver().(*List) - if err := recv.checkMutable("append to"); err != nil { - return nil, nameErr(b, err) - } - recv.elems = append(recv.elems, object) - return None, nil -} - -// https://github.com/google/starlark-go/blob/master/doc/spec.md#list·clear -func list_clear(_ *Thread, b *Builtin, args Tuple, kwargs []Tuple) (Value, error) { - if err := UnpackPositionalArgs(b.Name(), args, kwargs, 0); err != nil { - return nil, err - } - if err := b.Receiver().(*List).Clear(); err != nil { - return nil, nameErr(b, err) - } - return None, nil -} - -// https://github.com/google/starlark-go/blob/master/doc/spec.md#list·extend -func list_extend(_ *Thread, b *Builtin, args Tuple, kwargs []Tuple) (Value, error) { - recv := b.Receiver().(*List) - var iterable Iterable - if err := UnpackPositionalArgs(b.Name(), args, kwargs, 1, &iterable); err != nil { - return nil, err - } - if err := recv.checkMutable("extend"); err != nil { - return nil, nameErr(b, err) - } - listExtend(recv, iterable) - return None, nil -} - -// https://github.com/google/starlark-go/blob/master/doc/spec.md#list·index -func list_index(_ *Thread, b *Builtin, args Tuple, kwargs []Tuple) (Value, error) { - var value, start_, end_ Value - if err := UnpackPositionalArgs(b.Name(), args, kwargs, 1, &value, &start_, &end_); err != nil { - return nil, err - } - - recv := b.Receiver().(*List) - start, end, err := indices(start_, end_, recv.Len()) - if err != nil { - return nil, nameErr(b, err) - } - - for i := start; i < end; i++ { - if eq, err := Equal(recv.elems[i], value); err != nil { - return nil, nameErr(b, err) - } else if eq { - return MakeInt(i), nil - } - } - return nil, nameErr(b, "value not in list") -} - -// https://github.com/google/starlark-go/blob/master/doc/spec.md#list·insert -func list_insert(_ *Thread, b *Builtin, args Tuple, kwargs []Tuple) (Value, error) { - recv := b.Receiver().(*List) - var index int - var object Value - if err := UnpackPositionalArgs(b.Name(), args, kwargs, 2, &index, &object); err != nil { - return nil, err - } - if err := recv.checkMutable("insert into"); err != nil { - return nil, nameErr(b, err) - } - - if index < 0 { - index += recv.Len() - } - - if index >= recv.Len() { - // end - recv.elems = append(recv.elems, object) - } else { - if index < 0 { - index = 0 // start - } - recv.elems = append(recv.elems, nil) - copy(recv.elems[index+1:], recv.elems[index:]) // slide up one - recv.elems[index] = object - } - return None, nil -} - -// https://github.com/google/starlark-go/blob/master/doc/spec.md#list·remove -func list_remove(_ *Thread, b *Builtin, args Tuple, kwargs []Tuple) (Value, error) { - recv := b.Receiver().(*List) - var value Value - if err := UnpackPositionalArgs(b.Name(), args, kwargs, 1, &value); err != nil { - return nil, err - } - if err := recv.checkMutable("remove from"); err != nil { - return nil, nameErr(b, err) - } - for i, elem := range recv.elems { - if eq, err := Equal(elem, value); err != nil { - return nil, fmt.Errorf("remove: %v", err) - } else if eq { - recv.elems = append(recv.elems[:i], recv.elems[i+1:]...) - return None, nil - } - } - return nil, fmt.Errorf("remove: element not found") -} - -// https://github.com/google/starlark-go/blob/master/doc/spec.md#list·pop -func list_pop(_ *Thread, b *Builtin, args Tuple, kwargs []Tuple) (Value, error) { - recv := b.Receiver() - list := recv.(*List) - n := list.Len() - i := n - 1 - if err := UnpackPositionalArgs(b.Name(), args, kwargs, 0, &i); err != nil { - return nil, err - } - origI := i - if i < 0 { - i += n - } - if i < 0 || i >= n { - return nil, nameErr(b, outOfRange(origI, n, list)) - } - if err := list.checkMutable("pop from"); err != nil { - return nil, nameErr(b, err) - } - res := list.elems[i] - list.elems = append(list.elems[:i], list.elems[i+1:]...) - return res, nil -} - -// https://github.com/google/starlark-go/blob/master/doc/spec.md#string·capitalize -func string_capitalize(_ *Thread, b *Builtin, args Tuple, kwargs []Tuple) (Value, error) { - if err := UnpackPositionalArgs(b.Name(), args, kwargs, 0); err != nil { - return nil, err - } - s := string(b.Receiver().(String)) - res := new(strings.Builder) - res.Grow(len(s)) - for i, r := range s { - if i == 0 { - r = unicode.ToTitle(r) - } else { - r = unicode.ToLower(r) - } - res.WriteRune(r) - } - return String(res.String()), nil -} - -// string_iterable returns an unspecified iterable value whose iterator yields: -// - elems: successive 1-byte substrings -// - codepoints: successive substrings that encode a single Unicode code point. -// - elem_ords: numeric values of successive bytes -// - codepoint_ords: numeric values of successive Unicode code points -func string_iterable(_ *Thread, b *Builtin, args Tuple, kwargs []Tuple) (Value, error) { - if err := UnpackPositionalArgs(b.Name(), args, kwargs, 0); err != nil { - return nil, err - } - return stringIterable{ - s: b.Receiver().(String), - ords: b.Name()[len(b.Name())-2] == 'd', - codepoints: b.Name()[0] == 'c', - }, nil -} - -// https://github.com/google/starlark-go/blob/master/doc/spec.md#string·count -func string_count(_ *Thread, b *Builtin, args Tuple, kwargs []Tuple) (Value, error) { - var sub string - var start_, end_ Value - if err := UnpackPositionalArgs(b.Name(), args, kwargs, 1, &sub, &start_, &end_); err != nil { - return nil, err - } - - recv := string(b.Receiver().(String)) - start, end, err := indices(start_, end_, len(recv)) - if err != nil { - return nil, nameErr(b, err) - } - - var slice string - if start < end { - slice = recv[start:end] - } - return MakeInt(strings.Count(slice, sub)), nil -} - -// https://github.com/google/starlark-go/blob/master/doc/spec.md#string·isalnum -func string_isalnum(_ *Thread, b *Builtin, args Tuple, kwargs []Tuple) (Value, error) { - if err := UnpackPositionalArgs(b.Name(), args, kwargs, 0); err != nil { - return nil, err - } - recv := string(b.Receiver().(String)) - for _, r := range recv { - if !unicode.IsLetter(r) && !unicode.IsDigit(r) { - return False, nil - } - } - return Bool(recv != ""), nil -} - -// https://github.com/google/starlark-go/blob/master/doc/spec.md#string·isalpha -func string_isalpha(_ *Thread, b *Builtin, args Tuple, kwargs []Tuple) (Value, error) { - if err := UnpackPositionalArgs(b.Name(), args, kwargs, 0); err != nil { - return nil, err - } - recv := string(b.Receiver().(String)) - for _, r := range recv { - if !unicode.IsLetter(r) { - return False, nil - } - } - return Bool(recv != ""), nil -} - -// https://github.com/google/starlark-go/blob/master/doc/spec.md#string·isdigit -func string_isdigit(_ *Thread, b *Builtin, args Tuple, kwargs []Tuple) (Value, error) { - if err := UnpackPositionalArgs(b.Name(), args, kwargs, 0); err != nil { - return nil, err - } - recv := string(b.Receiver().(String)) - for _, r := range recv { - if !unicode.IsDigit(r) { - return False, nil - } - } - return Bool(recv != ""), nil -} - -// https://github.com/google/starlark-go/blob/master/doc/spec.md#string·islower -func string_islower(_ *Thread, b *Builtin, args Tuple, kwargs []Tuple) (Value, error) { - if err := UnpackPositionalArgs(b.Name(), args, kwargs, 0); err != nil { - return nil, err - } - recv := string(b.Receiver().(String)) - return Bool(isCasedString(recv) && recv == strings.ToLower(recv)), nil -} - -// isCasedString reports whether its argument contains any cased code points. -func isCasedString(s string) bool { - for _, r := range s { - if isCasedRune(r) { - return true - } - } - return false -} - -func isCasedRune(r rune) bool { - // It's unclear what the correct behavior is for a rune such as 'ffi', - // a lowercase letter with no upper or title case and no SimpleFold. - return 'a' <= r && r <= 'z' || 'A' <= r && r <= 'Z' || unicode.SimpleFold(r) != r -} - -// https://github.com/google/starlark-go/blob/master/doc/spec.md#string·isspace -func string_isspace(_ *Thread, b *Builtin, args Tuple, kwargs []Tuple) (Value, error) { - if err := UnpackPositionalArgs(b.Name(), args, kwargs, 0); err != nil { - return nil, err - } - recv := string(b.Receiver().(String)) - for _, r := range recv { - if !unicode.IsSpace(r) { - return False, nil - } - } - return Bool(recv != ""), nil -} - -// https://github.com/google/starlark-go/blob/master/doc/spec.md#string·istitle -func string_istitle(_ *Thread, b *Builtin, args Tuple, kwargs []Tuple) (Value, error) { - if err := UnpackPositionalArgs(b.Name(), args, kwargs, 0); err != nil { - return nil, err - } - recv := string(b.Receiver().(String)) - - // Python semantics differ from x==strings.{To,}Title(x) in Go: - // "uppercase characters may only follow uncased characters and - // lowercase characters only cased ones." - var cased, prevCased bool - for _, r := range recv { - if 'A' <= r && r <= 'Z' || unicode.IsTitle(r) { // e.g. "Ç…" - if prevCased { - return False, nil - } - prevCased = true - cased = true - } else if unicode.IsLower(r) { - if !prevCased { - return False, nil - } - prevCased = true - cased = true - } else if unicode.IsUpper(r) { - return False, nil - } else { - prevCased = false - } - } - return Bool(cased), nil -} - -// https://github.com/google/starlark-go/blob/master/doc/spec.md#string·isupper -func string_isupper(_ *Thread, b *Builtin, args Tuple, kwargs []Tuple) (Value, error) { - if err := UnpackPositionalArgs(b.Name(), args, kwargs, 0); err != nil { - return nil, err - } - recv := string(b.Receiver().(String)) - return Bool(isCasedString(recv) && recv == strings.ToUpper(recv)), nil -} - -// https://github.com/google/starlark-go/blob/master/doc/spec.md#string·find -func string_find(_ *Thread, b *Builtin, args Tuple, kwargs []Tuple) (Value, error) { - return string_find_impl(b, args, kwargs, true, false) -} - -// https://github.com/google/starlark-go/blob/master/doc/spec.md#string·format -func string_format(_ *Thread, b *Builtin, args Tuple, kwargs []Tuple) (Value, error) { - format := string(b.Receiver().(String)) - var auto, manual bool // kinds of positional indexing used - buf := new(strings.Builder) - index := 0 - for { - literal := format - i := strings.IndexByte(format, '{') - if i >= 0 { - literal = format[:i] - } - - // Replace "}}" with "}" in non-field portion, rejecting a lone '}'. - for { - j := strings.IndexByte(literal, '}') - if j < 0 { - buf.WriteString(literal) - break - } - if len(literal) == j+1 || literal[j+1] != '}' { - return nil, fmt.Errorf("format: single '}' in format") - } - buf.WriteString(literal[:j+1]) - literal = literal[j+2:] - } - - if i < 0 { - break // end of format string - } - - if i+1 < len(format) && format[i+1] == '{' { - // "{{" means a literal '{' - buf.WriteByte('{') - format = format[i+2:] - continue - } - - format = format[i+1:] - i = strings.IndexByte(format, '}') - if i < 0 { - return nil, fmt.Errorf("format: unmatched '{' in format") - } - - var arg Value - conv := "s" - var spec string - - field := format[:i] - format = format[i+1:] - - var name string - if i := strings.IndexByte(field, '!'); i < 0 { - // "name" or "name:spec" - if i := strings.IndexByte(field, ':'); i < 0 { - name = field - } else { - name = field[:i] - spec = field[i+1:] - } - } else { - // "name!conv" or "name!conv:spec" - name = field[:i] - field = field[i+1:] - // "conv" or "conv:spec" - if i := strings.IndexByte(field, ':'); i < 0 { - conv = field - } else { - conv = field[:i] - spec = field[i+1:] - } - } - - if name == "" { - // "{}": automatic indexing - if manual { - return nil, fmt.Errorf("format: cannot switch from manual field specification to automatic field numbering") - } - auto = true - if index >= len(args) { - return nil, fmt.Errorf("format: tuple index out of range") - } - arg = args[index] - index++ - } else if num, ok := decimal(name); ok { - // positional argument - if auto { - return nil, fmt.Errorf("format: cannot switch from automatic field numbering to manual field specification") - } - manual = true - if num >= len(args) { - return nil, fmt.Errorf("format: tuple index out of range") - } else { - arg = args[num] - } - } else { - // keyword argument - for _, kv := range kwargs { - if string(kv[0].(String)) == name { - arg = kv[1] - break - } - } - if arg == nil { - // Starlark does not support Python's x.y or a[i] syntaxes, - // or nested use of {...}. - if strings.Contains(name, ".") { - return nil, fmt.Errorf("format: attribute syntax x.y is not supported in replacement fields: %s", name) - } - if strings.Contains(name, "[") { - return nil, fmt.Errorf("format: element syntax a[i] is not supported in replacement fields: %s", name) - } - if strings.Contains(name, "{") { - return nil, fmt.Errorf("format: nested replacement fields not supported") - } - return nil, fmt.Errorf("format: keyword %s not found", name) - } - } - - if spec != "" { - // Starlark does not support Python's format_spec features. - return nil, fmt.Errorf("format spec features not supported in replacement fields: %s", spec) - } - - switch conv { - case "s": - if str, ok := AsString(arg); ok { - buf.WriteString(str) - } else { - writeValue(buf, arg, nil) - } - case "r": - writeValue(buf, arg, nil) - default: - return nil, fmt.Errorf("format: unknown conversion %q", conv) - } - } - return String(buf.String()), nil -} - -// decimal interprets s as a sequence of decimal digits. -func decimal(s string) (x int, ok bool) { - n := len(s) - for i := 0; i < n; i++ { - digit := s[i] - '0' - if digit > 9 { - return 0, false - } - x = x*10 + int(digit) - if x < 0 { - return 0, false // underflow - } - } - return x, true -} - -// https://github.com/google/starlark-go/blob/master/doc/spec.md#string·index -func string_index(_ *Thread, b *Builtin, args Tuple, kwargs []Tuple) (Value, error) { - return string_find_impl(b, args, kwargs, false, false) -} - -// https://github.com/google/starlark-go/blob/master/doc/spec.md#string·join -func string_join(_ *Thread, b *Builtin, args Tuple, kwargs []Tuple) (Value, error) { - recv := string(b.Receiver().(String)) - var iterable Iterable - if err := UnpackPositionalArgs(b.Name(), args, kwargs, 1, &iterable); err != nil { - return nil, err - } - iter := iterable.Iterate() - defer iter.Done() - buf := new(strings.Builder) - var x Value - for i := 0; iter.Next(&x); i++ { - if i > 0 { - buf.WriteString(recv) - } - s, ok := AsString(x) - if !ok { - return nil, fmt.Errorf("join: in list, want string, got %s", x.Type()) - } - buf.WriteString(s) - } - return String(buf.String()), nil -} - -// https://github.com/google/starlark-go/blob/master/doc/spec.md#string·lower -func string_lower(_ *Thread, b *Builtin, args Tuple, kwargs []Tuple) (Value, error) { - if err := UnpackPositionalArgs(b.Name(), args, kwargs, 0); err != nil { - return nil, err - } - return String(strings.ToLower(string(b.Receiver().(String)))), nil -} - -// https://github.com/google/starlark-go/blob/master/doc/spec.md#string·partition -func string_partition(_ *Thread, b *Builtin, args Tuple, kwargs []Tuple) (Value, error) { - recv := string(b.Receiver().(String)) - var sep string - if err := UnpackPositionalArgs(b.Name(), args, kwargs, 1, &sep); err != nil { - return nil, err - } - if sep == "" { - return nil, nameErr(b, "empty separator") - } - var i int - if b.Name()[0] == 'p' { - i = strings.Index(recv, sep) // partition - } else { - i = strings.LastIndex(recv, sep) // rpartition - } - tuple := make(Tuple, 0, 3) - if i < 0 { - if b.Name()[0] == 'p' { - tuple = append(tuple, String(recv), String(""), String("")) - } else { - tuple = append(tuple, String(""), String(""), String(recv)) - } - } else { - tuple = append(tuple, String(recv[:i]), String(sep), String(recv[i+len(sep):])) - } - return tuple, nil -} - -// https://github.com/google/starlark-go/blob/master/doc/spec.md#string·replace -func string_replace(_ *Thread, b *Builtin, args Tuple, kwargs []Tuple) (Value, error) { - recv := string(b.Receiver().(String)) - var old, new string - count := -1 - if err := UnpackPositionalArgs(b.Name(), args, kwargs, 2, &old, &new, &count); err != nil { - return nil, err - } - return String(strings.Replace(recv, old, new, count)), nil -} - -// https://github.com/google/starlark-go/blob/master/doc/spec.md#string·rfind -func string_rfind(_ *Thread, b *Builtin, args Tuple, kwargs []Tuple) (Value, error) { - return string_find_impl(b, args, kwargs, true, true) -} - -// https://github.com/google/starlark-go/blob/master/doc/spec.md#string·rindex -func string_rindex(_ *Thread, b *Builtin, args Tuple, kwargs []Tuple) (Value, error) { - return string_find_impl(b, args, kwargs, false, true) -} - -// https://github.com/google/starlark-go/starlark/blob/master/doc/spec.md#string·startswith -// https://github.com/google/starlark-go/starlark/blob/master/doc/spec.md#string·endswith -func string_startswith(_ *Thread, b *Builtin, args Tuple, kwargs []Tuple) (Value, error) { - var x Value - var start, end Value = None, None - if err := UnpackPositionalArgs(b.Name(), args, kwargs, 1, &x, &start, &end); err != nil { - return nil, err - } - - // compute effective substring. - s := string(b.Receiver().(String)) - if start, end, err := indices(start, end, len(s)); err != nil { - return nil, nameErr(b, err) - } else { - if end < start { - end = start // => empty result - } - s = s[start:end] - } - - f := strings.HasPrefix - if b.Name()[0] == 'e' { // endswith - f = strings.HasSuffix - } - - switch x := x.(type) { - case Tuple: - for i, x := range x { - prefix, ok := AsString(x) - if !ok { - return nil, fmt.Errorf("%s: want string, got %s, for element %d", - b.Name(), x.Type(), i) - } - if f(s, prefix) { - return True, nil - } - } - return False, nil - case String: - return Bool(f(s, string(x))), nil - } - return nil, fmt.Errorf("%s: got %s, want string or tuple of string", b.Name(), x.Type()) -} - -// https://github.com/google/starlark-go/blob/master/doc/spec.md#string·strip -// https://github.com/google/starlark-go/blob/master/doc/spec.md#string·lstrip -// https://github.com/google/starlark-go/blob/master/doc/spec.md#string·rstrip -func string_strip(_ *Thread, b *Builtin, args Tuple, kwargs []Tuple) (Value, error) { - var chars string - if err := UnpackPositionalArgs(b.Name(), args, kwargs, 0, &chars); err != nil { - return nil, err - } - recv := string(b.Receiver().(String)) - var s string - switch b.Name()[0] { - case 's': // strip - if chars != "" { - s = strings.Trim(recv, chars) - } else { - s = strings.TrimSpace(recv) - } - case 'l': // lstrip - if chars != "" { - s = strings.TrimLeft(recv, chars) - } else { - s = strings.TrimLeftFunc(recv, unicode.IsSpace) - } - case 'r': // rstrip - if chars != "" { - s = strings.TrimRight(recv, chars) - } else { - s = strings.TrimRightFunc(recv, unicode.IsSpace) - } - } - return String(s), nil -} - -// https://github.com/google/starlark-go/blob/master/doc/spec.md#string·title -func string_title(_ *Thread, b *Builtin, args Tuple, kwargs []Tuple) (Value, error) { - if err := UnpackPositionalArgs(b.Name(), args, kwargs, 0); err != nil { - return nil, err - } - - s := string(b.Receiver().(String)) - - // Python semantics differ from x==strings.{To,}Title(x) in Go: - // "uppercase characters may only follow uncased characters and - // lowercase characters only cased ones." - buf := new(strings.Builder) - buf.Grow(len(s)) - var prevCased bool - for _, r := range s { - if prevCased { - r = unicode.ToLower(r) - } else { - r = unicode.ToTitle(r) - } - prevCased = isCasedRune(r) - buf.WriteRune(r) - } - return String(buf.String()), nil -} - -// https://github.com/google/starlark-go/blob/master/doc/spec.md#string·upper -func string_upper(_ *Thread, b *Builtin, args Tuple, kwargs []Tuple) (Value, error) { - if err := UnpackPositionalArgs(b.Name(), args, kwargs, 0); err != nil { - return nil, err - } - return String(strings.ToUpper(string(b.Receiver().(String)))), nil -} - -// https://github.com/google/starlark-go/blob/master/doc/spec.md#string·split -// https://github.com/google/starlark-go/blob/master/doc/spec.md#string·rsplit -func string_split(_ *Thread, b *Builtin, args Tuple, kwargs []Tuple) (Value, error) { - recv := string(b.Receiver().(String)) - var sep_ Value - maxsplit := -1 - if err := UnpackPositionalArgs(b.Name(), args, kwargs, 0, &sep_, &maxsplit); err != nil { - return nil, err - } - - var res []string - - if sep_ == nil || sep_ == None { - // special case: split on whitespace - if maxsplit < 0 { - res = strings.Fields(recv) - } else if b.Name() == "split" { - res = splitspace(recv, maxsplit) - } else { // rsplit - res = rsplitspace(recv, maxsplit) - } - - } else if sep, ok := AsString(sep_); ok { - if sep == "" { - return nil, fmt.Errorf("split: empty separator") - } - // usual case: split on non-empty separator - if maxsplit < 0 { - res = strings.Split(recv, sep) - } else if b.Name() == "split" { - res = strings.SplitN(recv, sep, maxsplit+1) - } else { // rsplit - res = strings.Split(recv, sep) - if excess := len(res) - maxsplit; excess > 0 { - res[0] = strings.Join(res[:excess], sep) - res = append(res[:1], res[excess:]...) - } - } - - } else { - return nil, fmt.Errorf("split: got %s for separator, want string", sep_.Type()) - } - - list := make([]Value, len(res)) - for i, x := range res { - list[i] = String(x) - } - return NewList(list), nil -} - -// Precondition: max >= 0. -func rsplitspace(s string, max int) []string { - res := make([]string, 0, max+1) - end := -1 // index of field end, or -1 in a region of spaces. - for i := len(s); i > 0; { - r, sz := utf8.DecodeLastRuneInString(s[:i]) - if unicode.IsSpace(r) { - if end >= 0 { - if len(res) == max { - break // let this field run to the start - } - res = append(res, s[i:end]) - end = -1 - } - } else if end < 0 { - end = i - } - i -= sz - } - if end >= 0 { - res = append(res, s[:end]) - } - - resLen := len(res) - for i := 0; i < resLen/2; i++ { - res[i], res[resLen-1-i] = res[resLen-1-i], res[i] - } - - return res -} - -// Precondition: max >= 0. -func splitspace(s string, max int) []string { - var res []string - start := -1 // index of field start, or -1 in a region of spaces - for i, r := range s { - if unicode.IsSpace(r) { - if start >= 0 { - if len(res) == max { - break // let this field run to the end - } - res = append(res, s[start:i]) - start = -1 - } - } else if start == -1 { - start = i - } - } - if start >= 0 { - res = append(res, s[start:]) - } - return res -} - -// https://github.com/google/starlark-go/blob/master/doc/spec.md#string·splitlines -func string_splitlines(_ *Thread, b *Builtin, args Tuple, kwargs []Tuple) (Value, error) { - var keepends bool - if err := UnpackPositionalArgs(b.Name(), args, kwargs, 0, &keepends); err != nil { - return nil, err - } - var lines []string - if s := string(b.Receiver().(String)); s != "" { - // TODO(adonovan): handle CRLF correctly. - if keepends { - lines = strings.SplitAfter(s, "\n") - } else { - lines = strings.Split(s, "\n") - } - if strings.HasSuffix(s, "\n") { - lines = lines[:len(lines)-1] - } - } - list := make([]Value, len(lines)) - for i, x := range lines { - list[i] = String(x) - } - return NewList(list), nil -} - -// https://github.com/google/starlark-go/blob/master/doc/spec.md#set·union. -func set_union(_ *Thread, b *Builtin, args Tuple, kwargs []Tuple) (Value, error) { - var iterable Iterable - if err := UnpackPositionalArgs(b.Name(), args, kwargs, 0, &iterable); err != nil { - return nil, err - } - iter := iterable.Iterate() - defer iter.Done() - union, err := b.Receiver().(*Set).Union(iter) - if err != nil { - return nil, nameErr(b, err) - } - return union, nil -} - -// Common implementation of string_{r}{find,index}. -func string_find_impl(b *Builtin, args Tuple, kwargs []Tuple, allowError, last bool) (Value, error) { - var sub string - var start_, end_ Value - if err := UnpackPositionalArgs(b.Name(), args, kwargs, 1, &sub, &start_, &end_); err != nil { - return nil, err - } - - s := string(b.Receiver().(String)) - start, end, err := indices(start_, end_, len(s)) - if err != nil { - return nil, nameErr(b, err) - } - var slice string - if start < end { - slice = s[start:end] - } - - var i int - if last { - i = strings.LastIndex(slice, sub) - } else { - i = strings.Index(slice, sub) - } - if i < 0 { - if !allowError { - return nil, nameErr(b, "substring not found") - } - return MakeInt(-1), nil - } - return MakeInt(i + start), nil -} - -// Common implementation of builtin dict function and dict.update method. -// Precondition: len(updates) == 0 or 1. -func updateDict(dict *Dict, updates Tuple, kwargs []Tuple) error { - if len(updates) == 1 { - switch updates := updates[0].(type) { - case IterableMapping: - // Iterate over dict's key/value pairs, not just keys. - for _, item := range updates.Items() { - if err := dict.SetKey(item[0], item[1]); err != nil { - return err // dict is frozen - } - } - default: - // all other sequences - iter := Iterate(updates) - if iter == nil { - return fmt.Errorf("got %s, want iterable", updates.Type()) - } - defer iter.Done() - var pair Value - for i := 0; iter.Next(&pair); i++ { - iter2 := Iterate(pair) - if iter2 == nil { - return fmt.Errorf("dictionary update sequence element #%d is not iterable (%s)", i, pair.Type()) - - } - defer iter2.Done() - len := Len(pair) - if len < 0 { - return fmt.Errorf("dictionary update sequence element #%d has unknown length (%s)", i, pair.Type()) - } else if len != 2 { - return fmt.Errorf("dictionary update sequence element #%d has length %d, want 2", i, len) - } - var k, v Value - iter2.Next(&k) - iter2.Next(&v) - if err := dict.SetKey(k, v); err != nil { - return err - } - } - } - } - - // Then add the kwargs. - before := dict.Len() - for _, pair := range kwargs { - if err := dict.SetKey(pair[0], pair[1]); err != nil { - return err // dict is frozen - } - } - // In the common case, each kwarg will add another dict entry. - // If that's not so, check whether it is because there was a duplicate kwarg. - if dict.Len() < before+len(kwargs) { - keys := make(map[String]bool, len(kwargs)) - for _, kv := range kwargs { - k := kv[0].(String) - if keys[k] { - return fmt.Errorf("duplicate keyword arg: %v", k) - } - keys[k] = true - } - } - - return nil -} - -// nameErr returns an error message of the form "name: msg" -// where name is b.Name() and msg is a string or error. -func nameErr(b *Builtin, msg interface{}) error { - return fmt.Errorf("%s: %v", b.Name(), msg) -} diff --git a/src/vendor/go.starlark.net/starlark/profile.go b/src/vendor/go.starlark.net/starlark/profile.go deleted file mode 100644 index 38da2b2e9..000000000 --- a/src/vendor/go.starlark.net/starlark/profile.go +++ /dev/null @@ -1,449 +0,0 @@ -// Copyright 2019 The Bazel Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package starlark - -// This file defines a simple execution-time profiler for Starlark. -// It measures the wall time spent executing Starlark code, and emits a -// gzipped protocol message in pprof format (github.com/google/pprof). -// -// When profiling is enabled, the interpreter calls the profiler to -// indicate the start and end of each "span" or time interval. A leaf -// function (whether Go or Starlark) has a single span. A function that -// calls another function has spans for each interval in which it is the -// top of the stack. (A LOAD instruction also ends a span.) -// -// At the start of a span, the interpreter records the current time in -// the thread's topmost frame. At the end of the span, it obtains the -// time again and subtracts the span start time. The difference is added -// to an accumulator variable in the thread. If the accumulator exceeds -// some fixed quantum (10ms, say), the profiler records the current call -// stack and sends it to the profiler goroutine, along with the number -// of quanta, which are subtracted. For example, if the accumulator -// holds 3ms and then a completed span adds 25ms to it, its value is 28ms, -// which exceeeds 10ms. The profiler records a stack with the value 20ms -// (2 quanta), and the accumulator is left with 8ms. -// -// The profiler goroutine converts the stacks into the pprof format and -// emits a gzip-compressed protocol message to the designated output -// file. We use a hand-written streaming proto encoder to avoid -// dependencies on pprof and proto, and to avoid the need to -// materialize the profile data structure in memory. -// -// A limitation of this profiler is that it measures wall time, which -// does not necessarily correspond to CPU time. A CPU profiler requires -// that only running (not runnable) threads are sampled; this is -// commonly achieved by having the kernel deliver a (PROF) signal to an -// arbitrary running thread, through setitimer(2). The CPU profiler in the -// Go runtime uses this mechanism, but it is not possible for a Go -// application to register a SIGPROF handler, nor is it possible for a -// Go handler for some other signal to read the stack pointer of -// the interrupted thread. -// -// Two caveats: -// (1) it is tempting to send the leaf Frame directly to the profiler -// goroutine instead of making a copy of the stack, since a Frame is a -// spaghetti stack--a linked list. However, as soon as execution -// resumes, the stack's Frame.pc values may be mutated, so Frames are -// not safe to share with the asynchronous profiler goroutine. -// (2) it is tempting to use Callables as keys in a map when tabulating -// the pprof protocols's Function entities. However, we cannot assume -// that Callables are valid map keys, and furthermore we must not -// pin function values in memory indefinitely as this may cause lambda -// values to keep their free variables live much longer than necessary. - -// TODO(adonovan): -// - make Start/Stop fully thread-safe. -// - fix the pc hack. -// - experiment with other values of quantum. - -import ( - "bufio" - "bytes" - "compress/gzip" - "encoding/binary" - "fmt" - "io" - "log" - "reflect" - "sync/atomic" - "time" - "unsafe" - - "go.starlark.net/syntax" -) - -// StartProfile enables time profiling of all Starlark threads, -// and writes a profile in pprof format to w. -// It must be followed by a call to StopProfiler to stop -// the profiler and finalize the profile. -// -// StartProfile returns an error if profiling was already enabled. -// -// StartProfile must not be called concurrently with Starlark execution. -func StartProfile(w io.Writer) error { - if !atomic.CompareAndSwapUint32(&profiler.on, 0, 1) { - return fmt.Errorf("profiler already running") - } - - // TODO(adonovan): make the API fully concurrency-safe. - // The main challenge is racy reads/writes of profiler.events, - // and of send/close races on the channel it refers to. - // It's easy to solve them with a mutex but harder to do - // it efficiently. - - profiler.events = make(chan *profEvent, 1) - profiler.done = make(chan error) - - go profile(w) - - return nil -} - -// StopProfiler stops the profiler started by a prior call to -// StartProfile and finalizes the profile. It returns an error if the -// profile could not be completed. -// -// StopProfiler must not be called concurrently with Starlark execution. -func StopProfile() error { - // Terminate the profiler goroutine and get its result. - close(profiler.events) - err := <-profiler.done - - profiler.done = nil - profiler.events = nil - atomic.StoreUint32(&profiler.on, 0) - - return err -} - -// globals -var profiler struct { - on uint32 // nonzero => profiler running - events chan *profEvent // profile events from interpreter threads - done chan error // indicates profiler goroutine is ready -} - -func (thread *Thread) beginProfSpan() { - if profiler.events == nil { - return // profiling not enabled - } - - thread.frameAt(0).spanStart = nanotime() -} - -// TODO(adonovan): experiment with smaller values, -// which trade space and time for greater precision. -const quantum = 10 * time.Millisecond - -func (thread *Thread) endProfSpan() { - if profiler.events == nil { - return // profiling not enabled - } - - // Add the span to the thread's accumulator. - thread.proftime += time.Duration(nanotime() - thread.frameAt(0).spanStart) - if thread.proftime < quantum { - return - } - - // Only record complete quanta. - n := thread.proftime / quantum - thread.proftime -= n * quantum - - // Copy the stack. - // (We can't save thread.frame because its pc will change.) - ev := &profEvent{ - thread: thread, - time: n * quantum, - } - ev.stack = ev.stackSpace[:0] - for i := range thread.stack { - fr := thread.frameAt(i) - ev.stack = append(ev.stack, profFrame{ - pos: fr.Position(), - fn: fr.Callable(), - pc: fr.pc, - }) - } - - profiler.events <- ev -} - -type profEvent struct { - thread *Thread // currently unused - time time.Duration - stack []profFrame - stackSpace [8]profFrame // initial space for stack -} - -type profFrame struct { - fn Callable // don't hold this live for too long (prevents GC of lambdas) - pc uint32 // program counter (Starlark frames only) - pos syntax.Position // position of pc within this frame -} - -// profile is the profiler goroutine. -// It runs until StopProfiler is called. -func profile(w io.Writer) { - // Field numbers from pprof protocol. - // See https://github.com/google/pprof/blob/master/proto/profile.proto - const ( - Profile_sample_type = 1 // repeated ValueType - Profile_sample = 2 // repeated Sample - Profile_mapping = 3 // repeated Mapping - Profile_location = 4 // repeated Location - Profile_function = 5 // repeated Function - Profile_string_table = 6 // repeated string - Profile_time_nanos = 9 // int64 - Profile_duration_nanos = 10 // int64 - Profile_period_type = 11 // ValueType - Profile_period = 12 // int64 - - ValueType_type = 1 // int64 - ValueType_unit = 2 // int64 - - Sample_location_id = 1 // repeated uint64 - Sample_value = 2 // repeated int64 - Sample_label = 3 // repeated Label - - Label_key = 1 // int64 - Label_str = 2 // int64 - Label_num = 3 // int64 - Label_num_unit = 4 // int64 - - Location_id = 1 // uint64 - Location_mapping_id = 2 // uint64 - Location_address = 3 // uint64 - Location_line = 4 // repeated Line - - Line_function_id = 1 // uint64 - Line_line = 2 // int64 - - Function_id = 1 // uint64 - Function_name = 2 // int64 - Function_system_name = 3 // int64 - Function_filename = 4 // int64 - Function_start_line = 5 // int64 - ) - - bufw := bufio.NewWriter(w) // write file in 4KB (not 240B flate-sized) chunks - gz := gzip.NewWriter(bufw) - enc := protoEncoder{w: gz} - - // strings - stringIndex := make(map[string]int64) - str := func(s string) int64 { - i, ok := stringIndex[s] - if !ok { - i = int64(len(stringIndex)) - enc.string(Profile_string_table, s) - stringIndex[s] = i - } - return i - } - str("") // entry 0 - - // functions - // - // function returns the ID of a Callable for use in Line.FunctionId. - // The ID is the same as the function's logical address, - // which is supplied by the caller to avoid the need to recompute it. - functionId := make(map[uintptr]uint64) - function := func(fn Callable, addr uintptr) uint64 { - id, ok := functionId[addr] - if !ok { - id = uint64(addr) - - var pos syntax.Position - if fn, ok := fn.(callableWithPosition); ok { - pos = fn.Position() - } - - name := fn.Name() - if name == "" { - name = pos.Filename() - } - - nameIndex := str(name) - - fun := new(bytes.Buffer) - funenc := protoEncoder{w: fun} - funenc.uint(Function_id, id) - funenc.int(Function_name, nameIndex) - funenc.int(Function_system_name, nameIndex) - funenc.int(Function_filename, str(pos.Filename())) - funenc.int(Function_start_line, int64(pos.Line)) - enc.bytes(Profile_function, fun.Bytes()) - - functionId[addr] = id - } - return id - } - - // locations - // - // location returns the ID of the location denoted by fr. - // For Starlark frames, this is the Frame pc. - locationId := make(map[uintptr]uint64) - location := func(fr profFrame) uint64 { - fnAddr := profFuncAddr(fr.fn) - - // For Starlark functions, the frame position - // represents the current PC value. - // Mix it into the low bits of the address. - // This is super hacky and may result in collisions - // in large functions or if functions are numerous. - // TODO(adonovan): fix: try making this cleaner by treating - // each bytecode segment as a Profile.Mapping. - pcAddr := fnAddr - if _, ok := fr.fn.(*Function); ok { - pcAddr = (pcAddr << 16) ^ uintptr(fr.pc) - } - - id, ok := locationId[pcAddr] - if !ok { - id = uint64(pcAddr) - - line := new(bytes.Buffer) - lineenc := protoEncoder{w: line} - lineenc.uint(Line_function_id, function(fr.fn, fnAddr)) - lineenc.int(Line_line, int64(fr.pos.Line)) - loc := new(bytes.Buffer) - locenc := protoEncoder{w: loc} - locenc.uint(Location_id, id) - locenc.uint(Location_address, uint64(pcAddr)) - locenc.bytes(Location_line, line.Bytes()) - enc.bytes(Profile_location, loc.Bytes()) - - locationId[pcAddr] = id - } - return id - } - - wallNanos := new(bytes.Buffer) - wnenc := protoEncoder{w: wallNanos} - wnenc.int(ValueType_type, str("wall")) - wnenc.int(ValueType_unit, str("nanoseconds")) - - // informational fields of Profile - enc.bytes(Profile_sample_type, wallNanos.Bytes()) - enc.int(Profile_period, quantum.Nanoseconds()) // magnitude of sampling period - enc.bytes(Profile_period_type, wallNanos.Bytes()) // dimension and unit of period - enc.int(Profile_time_nanos, time.Now().UnixNano()) // start (real) time of profile - - startNano := nanotime() - - // Read profile events from the channel - // until it is closed by StopProfiler. - for e := range profiler.events { - sample := new(bytes.Buffer) - sampleenc := protoEncoder{w: sample} - sampleenc.int(Sample_value, e.time.Nanoseconds()) // wall nanoseconds - for _, fr := range e.stack { - sampleenc.uint(Sample_location_id, location(fr)) - } - enc.bytes(Profile_sample, sample.Bytes()) - } - - endNano := nanotime() - enc.int(Profile_duration_nanos, endNano-startNano) - - err := gz.Close() // Close reports any prior write error - if flushErr := bufw.Flush(); err == nil { - err = flushErr - } - profiler.done <- err -} - -// nanotime returns the time in nanoseconds since epoch. -// It is implemented by runtime.nanotime using the linkname hack; -// runtime.nanotime is defined for all OSs/ARCHS and uses the -// monotonic system clock, which there is no portable way to access. -// Should that function ever go away, these alternatives exist: -// -// // POSIX only. REALTIME not MONOTONIC. 17ns. -// var tv syscall.Timeval -// syscall.Gettimeofday(&tv) // can't fail -// return tv.Nano() -// -// // Portable. REALTIME not MONOTONIC. 46ns. -// return time.Now().Nanoseconds() -// -// // POSIX only. Adds a dependency. -// import "golang.org/x/sys/unix" -// var ts unix.Timespec -// unix.ClockGettime(CLOCK_MONOTONIC, &ts) // can't fail -// return unix.TimespecToNsec(ts) -// -//go:linkname nanotime runtime.nanotime -func nanotime() int64 - -// profFuncAddr returns the canonical "address" -// of a Callable for use by the profiler. -func profFuncAddr(fn Callable) uintptr { - switch fn := fn.(type) { - case *Builtin: - return reflect.ValueOf(fn.fn).Pointer() - case *Function: - return uintptr(unsafe.Pointer(fn.funcode)) - } - - // User-defined callable types are typically of - // of kind pointer-to-struct. Handle them specially. - if v := reflect.ValueOf(fn); v.Type().Kind() == reflect.Ptr { - return v.Pointer() - } - - // Address zero is reserved by the protocol. - // Use 1 for callables we don't recognize. - log.Printf("Starlark profiler: no address for Callable %T", fn) - return 1 -} - -// We encode the protocol message by hand to avoid making -// the interpreter depend on both github.com/google/pprof -// and github.com/golang/protobuf. -// -// This also avoids the need to materialize a protocol message object -// tree of unbounded size and serialize it all at the end. -// The pprof format appears to have been designed to -// permit streaming implementations such as this one. -// -// See https://developers.google.com/protocol-buffers/docs/encoding. -type protoEncoder struct { - w io.Writer // *bytes.Buffer or *gzip.Writer - tmp [binary.MaxVarintLen64]byte -} - -func (e *protoEncoder) uvarint(x uint64) { - n := binary.PutUvarint(e.tmp[:], x) - e.w.Write(e.tmp[:n]) -} - -func (e *protoEncoder) tag(field, wire uint) { - e.uvarint(uint64(field<<3 | wire)) -} - -func (e *protoEncoder) string(field uint, s string) { - e.tag(field, 2) // length-delimited - e.uvarint(uint64(len(s))) - io.WriteString(e.w, s) -} - -func (e *protoEncoder) bytes(field uint, b []byte) { - e.tag(field, 2) // length-delimited - e.uvarint(uint64(len(b))) - e.w.Write(b) -} - -func (e *protoEncoder) uint(field uint, x uint64) { - e.tag(field, 0) // varint - e.uvarint(x) -} - -func (e *protoEncoder) int(field uint, x int64) { - e.tag(field, 0) // varint - e.uvarint(uint64(x)) -} diff --git a/src/vendor/go.starlark.net/starlark/unpack.go b/src/vendor/go.starlark.net/starlark/unpack.go deleted file mode 100644 index 6c870f951..000000000 --- a/src/vendor/go.starlark.net/starlark/unpack.go +++ /dev/null @@ -1,258 +0,0 @@ -package starlark - -// This file defines the Unpack helper functions used by -// built-in functions to interpret their call arguments. - -import ( - "fmt" - "log" - "reflect" - "strings" -) - -// UnpackArgs unpacks the positional and keyword arguments into the -// supplied parameter variables. pairs is an alternating list of names -// and pointers to variables. -// -// If the variable is a bool, int, string, *List, *Dict, Callable, -// Iterable, or user-defined implementation of Value, -// UnpackArgs performs the appropriate type check. -// An int uses the AsInt32 check. -// If the parameter name ends with "?", -// it and all following parameters are optional. -// -// If the variable implements Value, UnpackArgs may call -// its Type() method while constructing the error message. -// -// Beware: an optional *List, *Dict, Callable, Iterable, or Value variable that is -// not assigned is not a valid Starlark Value, so the caller must -// explicitly handle such cases by interpreting nil as None or some -// computed default. -func UnpackArgs(fnname string, args Tuple, kwargs []Tuple, pairs ...interface{}) error { - nparams := len(pairs) / 2 - var defined intset - defined.init(nparams) - - paramName := func(x interface{}) string { // (no free variables) - name := x.(string) - if name[len(name)-1] == '?' { - name = name[:len(name)-1] - } - return name - } - - // positional arguments - if len(args) > nparams { - return fmt.Errorf("%s: got %d arguments, want at most %d", - fnname, len(args), nparams) - } - for i, arg := range args { - defined.set(i) - if err := unpackOneArg(arg, pairs[2*i+1]); err != nil { - name := paramName(pairs[2*i]) - return fmt.Errorf("%s: for parameter %s: %s", fnname, name, err) - } - } - - // keyword arguments -kwloop: - for _, item := range kwargs { - name, arg := item[0].(String), item[1] - for i := 0; i < nparams; i++ { - if paramName(pairs[2*i]) == string(name) { - // found it - if defined.set(i) { - return fmt.Errorf("%s: got multiple values for keyword argument %s", - fnname, name) - } - ptr := pairs[2*i+1] - if err := unpackOneArg(arg, ptr); err != nil { - return fmt.Errorf("%s: for parameter %s: %s", fnname, name, err) - } - continue kwloop - } - } - return fmt.Errorf("%s: unexpected keyword argument %s", fnname, name) - } - - // Check that all non-optional parameters are defined. - // (We needn't check the first len(args).) - for i := len(args); i < nparams; i++ { - name := pairs[2*i].(string) - if strings.HasSuffix(name, "?") { - break // optional - } - if !defined.get(i) { - return fmt.Errorf("%s: missing argument for %s", fnname, name) - } - } - - return nil -} - -// UnpackPositionalArgs unpacks the positional arguments into -// corresponding variables. Each element of vars is a pointer; see -// UnpackArgs for allowed types and conversions. -// -// UnpackPositionalArgs reports an error if the number of arguments is -// less than min or greater than len(vars), if kwargs is nonempty, or if -// any conversion fails. -func UnpackPositionalArgs(fnname string, args Tuple, kwargs []Tuple, min int, vars ...interface{}) error { - if len(kwargs) > 0 { - return fmt.Errorf("%s: unexpected keyword arguments", fnname) - } - max := len(vars) - if len(args) < min { - var atleast string - if min < max { - atleast = "at least " - } - return fmt.Errorf("%s: got %d arguments, want %s%d", fnname, len(args), atleast, min) - } - if len(args) > max { - var atmost string - if max > min { - atmost = "at most " - } - return fmt.Errorf("%s: got %d arguments, want %s%d", fnname, len(args), atmost, max) - } - for i, arg := range args { - if err := unpackOneArg(arg, vars[i]); err != nil { - return fmt.Errorf("%s: for parameter %d: %s", fnname, i+1, err) - } - } - return nil -} - -func unpackOneArg(v Value, ptr interface{}) error { - // On failure, don't clobber *ptr. - switch ptr := ptr.(type) { - case *Value: - *ptr = v - case *string: - s, ok := AsString(v) - if !ok { - return fmt.Errorf("got %s, want string", v.Type()) - } - *ptr = s - case *bool: - b, ok := v.(Bool) - if !ok { - return fmt.Errorf("got %s, want bool", v.Type()) - } - *ptr = bool(b) - case *int: - i, err := AsInt32(v) - if err != nil { - return err - } - *ptr = i - case **List: - list, ok := v.(*List) - if !ok { - return fmt.Errorf("got %s, want list", v.Type()) - } - *ptr = list - case **Dict: - dict, ok := v.(*Dict) - if !ok { - return fmt.Errorf("got %s, want dict", v.Type()) - } - *ptr = dict - case *Callable: - f, ok := v.(Callable) - if !ok { - return fmt.Errorf("got %s, want callable", v.Type()) - } - *ptr = f - case *Iterable: - it, ok := v.(Iterable) - if !ok { - return fmt.Errorf("got %s, want iterable", v.Type()) - } - *ptr = it - default: - // v must have type *V, where V is some subtype of starlark.Value. - ptrv := reflect.ValueOf(ptr) - if ptrv.Kind() != reflect.Ptr { - log.Panicf("internal error: not a pointer: %T", ptr) - } - paramVar := ptrv.Elem() - if !reflect.TypeOf(v).AssignableTo(paramVar.Type()) { - // The value is not assignable to the variable. - - // Detect a possible bug in the Go program that called Unpack: - // If the variable *ptr is not a subtype of Value, - // no value of v can possibly work. - if !paramVar.Type().AssignableTo(reflect.TypeOf(new(Value)).Elem()) { - log.Panicf("pointer element type does not implement Value: %T", ptr) - } - - // Report Starlark dynamic type error. - // - // We prefer the Starlark Value.Type name over - // its Go reflect.Type name, but calling the - // Value.Type method on the variable is not safe - // in general. If the variable is an interface, - // the call will fail. Even if the variable has - // a concrete type, it might not be safe to call - // Type() on a zero instance. Thus we must use - // recover. - - // Default to Go reflect.Type name - paramType := paramVar.Type().String() - - // Attempt to call Value.Type method. - func() { - defer func() { recover() }() - paramType = paramVar.MethodByName("Type").Call(nil)[0].String() - }() - return fmt.Errorf("got %s, want %s", v.Type(), paramType) - } - paramVar.Set(reflect.ValueOf(v)) - } - return nil -} - -type intset struct { - small uint64 // bitset, used if n < 64 - large map[int]bool // set, used if n >= 64 -} - -func (is *intset) init(n int) { - if n >= 64 { - is.large = make(map[int]bool) - } -} - -func (is *intset) set(i int) (prev bool) { - if is.large == nil { - prev = is.small&(1< Hash(x) == Hash(y). - // Hash may fail if the value's type is not hashable, or if the value - // contains a non-hashable value. The hash is used only by dictionaries and - // is not exposed to the Starlark program. - Hash() (uint32, error) -} - -// A Comparable is a value that defines its own equivalence relation and -// perhaps ordered comparisons. -type Comparable interface { - Value - // CompareSameType compares one value to another of the same Type(). - // The comparison operation must be one of EQL, NEQ, LT, LE, GT, or GE. - // CompareSameType returns an error if an ordered comparison was - // requested for a type that does not support it. - // - // Implementations that recursively compare subcomponents of - // the value should use the CompareDepth function, not Compare, to - // avoid infinite recursion on cyclic structures. - // - // The depth parameter is used to bound comparisons of cyclic - // data structures. Implementations should decrement depth - // before calling CompareDepth and should return an error if depth - // < 1. - // - // Client code should not call this method. Instead, use the - // standalone Compare or Equals functions, which are defined for - // all pairs of operands. - CompareSameType(op syntax.Token, y Value, depth int) (bool, error) -} - -var ( - _ Comparable = None - _ Comparable = Int{} - _ Comparable = False - _ Comparable = Float(0) - _ Comparable = String("") - _ Comparable = (*Dict)(nil) - _ Comparable = (*List)(nil) - _ Comparable = Tuple(nil) - _ Comparable = (*Set)(nil) -) - -// A Callable value f may be the operand of a function call, f(x). -// -// Clients should use the Call function, never the CallInternal method. -type Callable interface { - Value - Name() string - CallInternal(thread *Thread, args Tuple, kwargs []Tuple) (Value, error) -} - -type callableWithPosition interface { - Callable - Position() syntax.Position -} - -var ( - _ Callable = (*Builtin)(nil) - _ Callable = (*Function)(nil) - _ callableWithPosition = (*Function)(nil) -) - -// An Iterable abstracts a sequence of values. -// An iterable value may be iterated over by a 'for' loop or used where -// any other Starlark iterable is allowed. Unlike a Sequence, the length -// of an Iterable is not necessarily known in advance of iteration. -type Iterable interface { - Value - Iterate() Iterator // must be followed by call to Iterator.Done -} - -// A Sequence is a sequence of values of known length. -type Sequence interface { - Iterable - Len() int -} - -var ( - _ Sequence = (*Dict)(nil) - _ Sequence = (*Set)(nil) -) - -// An Indexable is a sequence of known length that supports efficient random access. -// It is not necessarily iterable. -type Indexable interface { - Value - Index(i int) Value // requires 0 <= i < Len() - Len() int -} - -// A Sliceable is a sequence that can be cut into pieces with the slice operator (x[i:j:step]). -// -// All native indexable objects are sliceable. -// This is a separate interface for backwards-compatibility. -type Sliceable interface { - Indexable - // For positive strides (step > 0), 0 <= start <= end <= n. - // For negative strides (step < 0), -1 <= end <= start < n. - // The caller must ensure that the start and end indices are valid - // and that step is non-zero. - Slice(start, end, step int) Value -} - -// A HasSetIndex is an Indexable value whose elements may be assigned (x[i] = y). -// -// The implementation should not add Len to a negative index as the -// evaluator does this before the call. -type HasSetIndex interface { - Indexable - SetIndex(index int, v Value) error -} - -var ( - _ HasSetIndex = (*List)(nil) - _ Indexable = Tuple(nil) - _ Indexable = String("") - _ Sliceable = Tuple(nil) - _ Sliceable = String("") - _ Sliceable = (*List)(nil) -) - -// An Iterator provides a sequence of values to the caller. -// -// The caller must call Done when the iterator is no longer needed. -// Operations that modify a sequence will fail if it has active iterators. -// -// Example usage: -// -// iter := iterable.Iterator() -// defer iter.Done() -// var x Value -// for iter.Next(&x) { -// ... -// } -// -type Iterator interface { - // If the iterator is exhausted, Next returns false. - // Otherwise it sets *p to the current element of the sequence, - // advances the iterator, and returns true. - Next(p *Value) bool - Done() -} - -// A Mapping is a mapping from keys to values, such as a dictionary. -// -// If a type satisfies both Mapping and Iterable, the iterator yields -// the keys of the mapping. -type Mapping interface { - Value - // Get returns the value corresponding to the specified key, - // or !found if the mapping does not contain the key. - // - // Get also defines the behavior of "v in mapping". - // The 'in' operator reports the 'found' component, ignoring errors. - Get(Value) (v Value, found bool, err error) -} - -// An IterableMapping is a mapping that supports key enumeration. -type IterableMapping interface { - Mapping - Iterate() Iterator // see Iterable interface - Items() []Tuple // a new slice containing all key/value pairs -} - -var _ IterableMapping = (*Dict)(nil) - -// A HasSetKey supports map update using x[k]=v syntax, like a dictionary. -type HasSetKey interface { - Mapping - SetKey(k, v Value) error -} - -var _ HasSetKey = (*Dict)(nil) - -// A HasBinary value may be used as either operand of these binary operators: -// + - * / // % in not in | & ^ << >> -// -// The Side argument indicates whether the receiver is the left or right operand. -// -// An implementation may decline to handle an operation by returning (nil, nil). -// For this reason, clients should always call the standalone Binary(op, x, y) -// function rather than calling the method directly. -type HasBinary interface { - Value - Binary(op syntax.Token, y Value, side Side) (Value, error) -} - -type Side bool - -const ( - Left Side = false - Right Side = true -) - -// A HasUnary value may be used as the operand of these unary operators: -// + - ~ -// -// An implementation may decline to handle an operation by returning (nil, nil). -// For this reason, clients should always call the standalone Unary(op, x) -// function rather than calling the method directly. -type HasUnary interface { - Value - Unary(op syntax.Token) (Value, error) -} - -// A HasAttrs value has fields or methods that may be read by a dot expression (y = x.f). -// Attribute names may be listed using the built-in 'dir' function. -// -// For implementation convenience, a result of (nil, nil) from Attr is -// interpreted as a "no such field or method" error. Implementations are -// free to return a more precise error. -type HasAttrs interface { - Value - Attr(name string) (Value, error) // returns (nil, nil) if attribute not present - AttrNames() []string // callers must not modify the result. -} - -var ( - _ HasAttrs = String("") - _ HasAttrs = new(List) - _ HasAttrs = new(Dict) - _ HasAttrs = new(Set) -) - -// A HasSetField value has fields that may be written by a dot expression (x.f = y). -// -// An implementation of SetField may return a NoSuchAttrError, -// in which case the runtime may augment the error message to -// warn of possible misspelling. -type HasSetField interface { - HasAttrs - SetField(name string, val Value) error -} - -// A NoSuchAttrError may be returned by an implementation of -// HasAttrs.Attr or HasSetField.SetField to indicate that no such field -// exists. In that case the runtime may augment the error message to -// warn of possible misspelling. -type NoSuchAttrError string - -func (e NoSuchAttrError) Error() string { return string(e) } - -// NoneType is the type of None. Its only legal value is None. -// (We represent it as a number, not struct{}, so that None may be constant.) -type NoneType byte - -const None = NoneType(0) - -func (NoneType) String() string { return "None" } -func (NoneType) Type() string { return "NoneType" } -func (NoneType) Freeze() {} // immutable -func (NoneType) Truth() Bool { return False } -func (NoneType) Hash() (uint32, error) { return 0, nil } -func (NoneType) CompareSameType(op syntax.Token, y Value, depth int) (bool, error) { - return threeway(op, 0), nil -} - -// Bool is the type of a Starlark bool. -type Bool bool - -const ( - False Bool = false - True Bool = true -) - -func (b Bool) String() string { - if b { - return "True" - } else { - return "False" - } -} -func (b Bool) Type() string { return "bool" } -func (b Bool) Freeze() {} // immutable -func (b Bool) Truth() Bool { return b } -func (b Bool) Hash() (uint32, error) { return uint32(b2i(bool(b))), nil } -func (x Bool) CompareSameType(op syntax.Token, y_ Value, depth int) (bool, error) { - y := y_.(Bool) - return threeway(op, b2i(bool(x))-b2i(bool(y))), nil -} - -// Float is the type of a Starlark float. -type Float float64 - -func (f Float) String() string { return strconv.FormatFloat(float64(f), 'g', 6, 64) } -func (f Float) Type() string { return "float" } -func (f Float) Freeze() {} // immutable -func (f Float) Truth() Bool { return f != 0.0 } -func (f Float) Hash() (uint32, error) { - // Equal float and int values must yield the same hash. - // TODO(adonovan): opt: if f is non-integral, and thus not equal - // to any Int, we can avoid the Int conversion and use a cheaper hash. - if isFinite(float64(f)) { - return finiteFloatToInt(f).Hash() - } - return 1618033, nil // NaN, +/-Inf -} - -func floor(f Float) Float { return Float(math.Floor(float64(f))) } - -// isFinite reports whether f represents a finite rational value. -// It is equivalent to !math.IsNan(f) && !math.IsInf(f, 0). -func isFinite(f float64) bool { - return math.Abs(f) <= math.MaxFloat64 -} - -func (x Float) CompareSameType(op syntax.Token, y_ Value, depth int) (bool, error) { - y := y_.(Float) - switch op { - case syntax.EQL: - return x == y, nil - case syntax.NEQ: - return x != y, nil - case syntax.LE: - return x <= y, nil - case syntax.LT: - return x < y, nil - case syntax.GE: - return x >= y, nil - case syntax.GT: - return x > y, nil - } - panic(op) -} - -func (f Float) rational() *big.Rat { return new(big.Rat).SetFloat64(float64(f)) } - -// AsFloat returns the float64 value closest to x. -// The f result is undefined if x is not a float or int. -func AsFloat(x Value) (f float64, ok bool) { - switch x := x.(type) { - case Float: - return float64(x), true - case Int: - return float64(x.Float()), true - } - return 0, false -} - -func (x Float) Mod(y Float) Float { return Float(math.Mod(float64(x), float64(y))) } - -// Unary implements the operations +float and -float. -func (f Float) Unary(op syntax.Token) (Value, error) { - switch op { - case syntax.MINUS: - return -f, nil - case syntax.PLUS: - return +f, nil - } - return nil, nil -} - -// String is the type of a Starlark string. -// -// A String encapsulates an an immutable sequence of bytes, -// but strings are not directly iterable. Instead, iterate -// over the result of calling one of these four methods: -// codepoints, codepoint_ords, elems, elem_ords. -// -// Warning: the contract of the Value interface's String method is that -// it returns the value printed in Starlark notation, -// so s.String() or fmt.Sprintf("%s", s) returns a quoted string. -// Use string(s) or s.GoString() or fmt.Sprintf("%#v", s) to obtain the raw contents -// of a Starlark string as a Go string. -type String string - -func (s String) String() string { return strconv.Quote(string(s)) } -func (s String) GoString() string { return string(s) } -func (s String) Type() string { return "string" } -func (s String) Freeze() {} // immutable -func (s String) Truth() Bool { return len(s) > 0 } -func (s String) Hash() (uint32, error) { return hashString(string(s)), nil } -func (s String) Len() int { return len(s) } // bytes -func (s String) Index(i int) Value { return s[i : i+1] } - -func (s String) Slice(start, end, step int) Value { - if step == 1 { - return s[start:end] - } - - sign := signum(step) - var str []byte - for i := start; signum(end-i) == sign; i += step { - str = append(str, s[i]) - } - return String(str) -} - -func (s String) Attr(name string) (Value, error) { return builtinAttr(s, name, stringMethods) } -func (s String) AttrNames() []string { return builtinAttrNames(stringMethods) } - -func (x String) CompareSameType(op syntax.Token, y_ Value, depth int) (bool, error) { - y := y_.(String) - return threeway(op, strings.Compare(string(x), string(y))), nil -} - -func AsString(x Value) (string, bool) { v, ok := x.(String); return string(v), ok } - -// A stringIterable is an iterable whose iterator yields a sequence of -// either Unicode code points or elements (bytes), -// either numerically or as successive substrings. -type stringIterable struct { - s String - ords bool - codepoints bool -} - -var _ Iterable = (*stringIterable)(nil) - -func (si stringIterable) String() string { - var etype string - if si.codepoints { - etype = "codepoint" - } else { - etype = "elem" - } - if si.ords { - return si.s.String() + "." + etype + "_ords()" - } else { - return si.s.String() + "." + etype + "s()" - } -} -func (si stringIterable) Type() string { - if si.codepoints { - return "codepoints" - } else { - return "elems" - } -} -func (si stringIterable) Freeze() {} // immutable -func (si stringIterable) Truth() Bool { return True } -func (si stringIterable) Hash() (uint32, error) { return 0, fmt.Errorf("unhashable: %s", si.Type()) } -func (si stringIterable) Iterate() Iterator { return &stringIterator{si, 0} } - -type stringIterator struct { - si stringIterable - i int -} - -func (it *stringIterator) Next(p *Value) bool { - s := it.si.s[it.i:] - if s == "" { - return false - } - if it.si.codepoints { - r, sz := utf8.DecodeRuneInString(string(s)) - if !it.si.ords { - *p = s[:sz] - } else { - *p = MakeInt(int(r)) - } - it.i += sz - } else { - b := int(s[0]) - if !it.si.ords { - *p = s[:1] - } else { - *p = MakeInt(b) - } - it.i += 1 - } - return true -} - -func (*stringIterator) Done() {} - -// A Function is a function defined by a Starlark def statement or lambda expression. -// The initialization behavior of a Starlark module is also represented by a Function. -type Function struct { - funcode *compile.Funcode - module *module - defaults Tuple - freevars Tuple -} - -// A module is the dynamic counterpart to a Program. -// All functions in the same program share a module. -type module struct { - program *compile.Program - predeclared StringDict - globals []Value - constants []Value -} - -// makeGlobalDict returns a new, unfrozen StringDict containing all global -// variables so far defined in the module. -func (m *module) makeGlobalDict() StringDict { - r := make(StringDict, len(m.program.Globals)) - for i, id := range m.program.Globals { - if v := m.globals[i]; v != nil { - r[id.Name] = v - } - } - return r -} - -func (fn *Function) Name() string { return fn.funcode.Name } // "lambda" for anonymous functions -func (fn *Function) Doc() string { return fn.funcode.Doc } -func (fn *Function) Hash() (uint32, error) { return hashString(fn.funcode.Name), nil } -func (fn *Function) Freeze() { fn.defaults.Freeze(); fn.freevars.Freeze() } -func (fn *Function) String() string { return toString(fn) } -func (fn *Function) Type() string { return "function" } -func (fn *Function) Truth() Bool { return true } - -// Globals returns a new, unfrozen StringDict containing all global -// variables so far defined in the function's module. -func (fn *Function) Globals() StringDict { return fn.module.makeGlobalDict() } - -func (fn *Function) Position() syntax.Position { return fn.funcode.Pos } -func (fn *Function) NumParams() int { return fn.funcode.NumParams } -func (fn *Function) NumKwonlyParams() int { return fn.funcode.NumKwonlyParams } - -// Param returns the name and position of the ith parameter, -// where 0 <= i < NumParams(). -// The *args and **kwargs parameters are at the end -// even if there were optional parameters after *args. -func (fn *Function) Param(i int) (string, syntax.Position) { - if i >= fn.NumParams() { - panic(i) - } - id := fn.funcode.Locals[i] - return id.Name, id.Pos -} -func (fn *Function) HasVarargs() bool { return fn.funcode.HasVarargs } -func (fn *Function) HasKwargs() bool { return fn.funcode.HasKwargs } - -// A Builtin is a function implemented in Go. -type Builtin struct { - name string - fn func(thread *Thread, fn *Builtin, args Tuple, kwargs []Tuple) (Value, error) - recv Value // for bound methods (e.g. "".startswith) -} - -func (b *Builtin) Name() string { return b.name } -func (b *Builtin) Freeze() { - if b.recv != nil { - b.recv.Freeze() - } -} -func (b *Builtin) Hash() (uint32, error) { - h := hashString(b.name) - if b.recv != nil { - h ^= 5521 - } - return h, nil -} -func (b *Builtin) Receiver() Value { return b.recv } -func (b *Builtin) String() string { return toString(b) } -func (b *Builtin) Type() string { return "builtin_function_or_method" } -func (b *Builtin) CallInternal(thread *Thread, args Tuple, kwargs []Tuple) (Value, error) { - return b.fn(thread, b, args, kwargs) -} -func (b *Builtin) Truth() Bool { return true } - -// NewBuiltin returns a new 'builtin_function_or_method' value with the specified name -// and implementation. It compares unequal with all other values. -func NewBuiltin(name string, fn func(thread *Thread, fn *Builtin, args Tuple, kwargs []Tuple) (Value, error)) *Builtin { - return &Builtin{name: name, fn: fn} -} - -// BindReceiver returns a new Builtin value representing a method -// closure, that is, a built-in function bound to a receiver value. -// -// In the example below, the value of f is the string.index -// built-in method bound to the receiver value "abc": -// -// f = "abc".index; f("a"); f("b") -// -// In the common case, the receiver is bound only during the call, -// but this still results in the creation of a temporary method closure: -// -// "abc".index("a") -// -func (b *Builtin) BindReceiver(recv Value) *Builtin { - return &Builtin{name: b.name, fn: b.fn, recv: recv} -} - -// A *Dict represents a Starlark dictionary. -// The zero value of Dict is a valid empty dictionary. -// If you know the exact final number of entries, -// it is more efficient to call NewDict. -type Dict struct { - ht hashtable -} - -// NewDict returns a set with initial space for -// at least size insertions before rehashing. -func NewDict(size int) *Dict { - dict := new(Dict) - dict.ht.init(size) - return dict -} - -func (d *Dict) Clear() error { return d.ht.clear() } -func (d *Dict) Delete(k Value) (v Value, found bool, err error) { return d.ht.delete(k) } -func (d *Dict) Get(k Value) (v Value, found bool, err error) { return d.ht.lookup(k) } -func (d *Dict) Items() []Tuple { return d.ht.items() } -func (d *Dict) Keys() []Value { return d.ht.keys() } -func (d *Dict) Len() int { return int(d.ht.len) } -func (d *Dict) Iterate() Iterator { return d.ht.iterate() } -func (d *Dict) SetKey(k, v Value) error { return d.ht.insert(k, v) } -func (d *Dict) String() string { return toString(d) } -func (d *Dict) Type() string { return "dict" } -func (d *Dict) Freeze() { d.ht.freeze() } -func (d *Dict) Truth() Bool { return d.Len() > 0 } -func (d *Dict) Hash() (uint32, error) { return 0, fmt.Errorf("unhashable type: dict") } - -func (d *Dict) Attr(name string) (Value, error) { return builtinAttr(d, name, dictMethods) } -func (d *Dict) AttrNames() []string { return builtinAttrNames(dictMethods) } - -func (x *Dict) CompareSameType(op syntax.Token, y_ Value, depth int) (bool, error) { - y := y_.(*Dict) - switch op { - case syntax.EQL: - ok, err := dictsEqual(x, y, depth) - return ok, err - case syntax.NEQ: - ok, err := dictsEqual(x, y, depth) - return !ok, err - default: - return false, fmt.Errorf("%s %s %s not implemented", x.Type(), op, y.Type()) - } -} - -func dictsEqual(x, y *Dict, depth int) (bool, error) { - if x.Len() != y.Len() { - return false, nil - } - for _, xitem := range x.Items() { - key, xval := xitem[0], xitem[1] - - if yval, found, _ := y.Get(key); !found { - return false, nil - } else if eq, err := EqualDepth(xval, yval, depth-1); err != nil { - return false, err - } else if !eq { - return false, nil - } - } - return true, nil -} - -// A *List represents a Starlark list value. -type List struct { - elems []Value - frozen bool - itercount uint32 // number of active iterators (ignored if frozen) -} - -// NewList returns a list containing the specified elements. -// Callers should not subsequently modify elems. -func NewList(elems []Value) *List { return &List{elems: elems} } - -func (l *List) Freeze() { - if !l.frozen { - l.frozen = true - for _, elem := range l.elems { - elem.Freeze() - } - } -} - -// checkMutable reports an error if the list should not be mutated. -// verb+" list" should describe the operation. -func (l *List) checkMutable(verb string) error { - if l.frozen { - return fmt.Errorf("cannot %s frozen list", verb) - } - if l.itercount > 0 { - return fmt.Errorf("cannot %s list during iteration", verb) - } - return nil -} - -func (l *List) String() string { return toString(l) } -func (l *List) Type() string { return "list" } -func (l *List) Hash() (uint32, error) { return 0, fmt.Errorf("unhashable type: list") } -func (l *List) Truth() Bool { return l.Len() > 0 } -func (l *List) Len() int { return len(l.elems) } -func (l *List) Index(i int) Value { return l.elems[i] } - -func (l *List) Slice(start, end, step int) Value { - if step == 1 { - elems := append([]Value{}, l.elems[start:end]...) - return NewList(elems) - } - - sign := signum(step) - var list []Value - for i := start; signum(end-i) == sign; i += step { - list = append(list, l.elems[i]) - } - return NewList(list) -} - -func (l *List) Attr(name string) (Value, error) { return builtinAttr(l, name, listMethods) } -func (l *List) AttrNames() []string { return builtinAttrNames(listMethods) } - -func (l *List) Iterate() Iterator { - if !l.frozen { - l.itercount++ - } - return &listIterator{l: l} -} - -func (x *List) CompareSameType(op syntax.Token, y_ Value, depth int) (bool, error) { - y := y_.(*List) - // It's tempting to check x == y as an optimization here, - // but wrong because a list containing NaN is not equal to itself. - return sliceCompare(op, x.elems, y.elems, depth) -} - -func sliceCompare(op syntax.Token, x, y []Value, depth int) (bool, error) { - // Fast path: check length. - if len(x) != len(y) && (op == syntax.EQL || op == syntax.NEQ) { - return op == syntax.NEQ, nil - } - - // Find first element that is not equal in both lists. - for i := 0; i < len(x) && i < len(y); i++ { - if eq, err := EqualDepth(x[i], y[i], depth-1); err != nil { - return false, err - } else if !eq { - switch op { - case syntax.EQL: - return false, nil - case syntax.NEQ: - return true, nil - default: - return CompareDepth(op, x[i], y[i], depth-1) - } - } - } - - return threeway(op, len(x)-len(y)), nil -} - -type listIterator struct { - l *List - i int -} - -func (it *listIterator) Next(p *Value) bool { - if it.i < it.l.Len() { - *p = it.l.elems[it.i] - it.i++ - return true - } - return false -} - -func (it *listIterator) Done() { - if !it.l.frozen { - it.l.itercount-- - } -} - -func (l *List) SetIndex(i int, v Value) error { - if err := l.checkMutable("assign to element of"); err != nil { - return err - } - l.elems[i] = v - return nil -} - -func (l *List) Append(v Value) error { - if err := l.checkMutable("append to"); err != nil { - return err - } - l.elems = append(l.elems, v) - return nil -} - -func (l *List) Clear() error { - if err := l.checkMutable("clear"); err != nil { - return err - } - for i := range l.elems { - l.elems[i] = nil // aid GC - } - l.elems = l.elems[:0] - return nil -} - -// A Tuple represents a Starlark tuple value. -type Tuple []Value - -func (t Tuple) Len() int { return len(t) } -func (t Tuple) Index(i int) Value { return t[i] } - -func (t Tuple) Slice(start, end, step int) Value { - if step == 1 { - return t[start:end] - } - - sign := signum(step) - var tuple Tuple - for i := start; signum(end-i) == sign; i += step { - tuple = append(tuple, t[i]) - } - return tuple -} - -func (t Tuple) Iterate() Iterator { return &tupleIterator{elems: t} } -func (t Tuple) Freeze() { - for _, elem := range t { - elem.Freeze() - } -} -func (t Tuple) String() string { return toString(t) } -func (t Tuple) Type() string { return "tuple" } -func (t Tuple) Truth() Bool { return len(t) > 0 } - -func (x Tuple) CompareSameType(op syntax.Token, y_ Value, depth int) (bool, error) { - y := y_.(Tuple) - return sliceCompare(op, x, y, depth) -} - -func (t Tuple) Hash() (uint32, error) { - // Use same algorithm as Python. - var x, mult uint32 = 0x345678, 1000003 - for _, elem := range t { - y, err := elem.Hash() - if err != nil { - return 0, err - } - x = x ^ y*mult - mult += 82520 + uint32(len(t)+len(t)) - } - return x, nil -} - -type tupleIterator struct{ elems Tuple } - -func (it *tupleIterator) Next(p *Value) bool { - if len(it.elems) > 0 { - *p = it.elems[0] - it.elems = it.elems[1:] - return true - } - return false -} - -func (it *tupleIterator) Done() {} - -// A Set represents a Starlark set value. -// The zero value of Set is a valid empty set. -// If you know the exact final number of elements, -// it is more efficient to call NewSet. -type Set struct { - ht hashtable // values are all None -} - -// NewSet returns a dictionary with initial space for -// at least size insertions before rehashing. -func NewSet(size int) *Set { - set := new(Set) - set.ht.init(size) - return set -} - -func (s *Set) Delete(k Value) (found bool, err error) { _, found, err = s.ht.delete(k); return } -func (s *Set) Clear() error { return s.ht.clear() } -func (s *Set) Has(k Value) (found bool, err error) { _, found, err = s.ht.lookup(k); return } -func (s *Set) Insert(k Value) error { return s.ht.insert(k, None) } -func (s *Set) Len() int { return int(s.ht.len) } -func (s *Set) Iterate() Iterator { return s.ht.iterate() } -func (s *Set) String() string { return toString(s) } -func (s *Set) Type() string { return "set" } -func (s *Set) elems() []Value { return s.ht.keys() } -func (s *Set) Freeze() { s.ht.freeze() } -func (s *Set) Hash() (uint32, error) { return 0, fmt.Errorf("unhashable type: set") } -func (s *Set) Truth() Bool { return s.Len() > 0 } - -func (s *Set) Attr(name string) (Value, error) { return builtinAttr(s, name, setMethods) } -func (s *Set) AttrNames() []string { return builtinAttrNames(setMethods) } - -func (x *Set) CompareSameType(op syntax.Token, y_ Value, depth int) (bool, error) { - y := y_.(*Set) - switch op { - case syntax.EQL: - ok, err := setsEqual(x, y, depth) - return ok, err - case syntax.NEQ: - ok, err := setsEqual(x, y, depth) - return !ok, err - default: - return false, fmt.Errorf("%s %s %s not implemented", x.Type(), op, y.Type()) - } -} - -func setsEqual(x, y *Set, depth int) (bool, error) { - if x.Len() != y.Len() { - return false, nil - } - for _, elem := range x.elems() { - if found, _ := y.Has(elem); !found { - return false, nil - } - } - return true, nil -} - -func (s *Set) Union(iter Iterator) (Value, error) { - set := new(Set) - for _, elem := range s.elems() { - set.Insert(elem) // can't fail - } - var x Value - for iter.Next(&x) { - if err := set.Insert(x); err != nil { - return nil, err - } - } - return set, nil -} - -// toString returns the string form of value v. -// It may be more efficient than v.String() for larger values. -func toString(v Value) string { - buf := new(strings.Builder) - writeValue(buf, v, nil) - return buf.String() -} - -// writeValue writes x to out. -// -// path is used to detect cycles. -// It contains the list of *List and *Dict values we're currently printing. -// (These are the only potentially cyclic structures.) -// Callers should generally pass nil for path. -// It is safe to re-use the same path slice for multiple calls. -func writeValue(out *strings.Builder, x Value, path []Value) { - switch x := x.(type) { - case nil: - out.WriteString("") // indicates a bug - - case NoneType: - out.WriteString("None") - - case Int: - out.WriteString(x.String()) - - case Bool: - if x { - out.WriteString("True") - } else { - out.WriteString("False") - } - - case String: - fmt.Fprintf(out, "%q", string(x)) - - case *List: - out.WriteByte('[') - if pathContains(path, x) { - out.WriteString("...") // list contains itself - } else { - for i, elem := range x.elems { - if i > 0 { - out.WriteString(", ") - } - writeValue(out, elem, append(path, x)) - } - } - out.WriteByte(']') - - case Tuple: - out.WriteByte('(') - for i, elem := range x { - if i > 0 { - out.WriteString(", ") - } - writeValue(out, elem, path) - } - if len(x) == 1 { - out.WriteByte(',') - } - out.WriteByte(')') - - case *Function: - fmt.Fprintf(out, "", x.Name()) - - case *Builtin: - if x.recv != nil { - fmt.Fprintf(out, "", x.Name(), x.recv.Type()) - } else { - fmt.Fprintf(out, "", x.Name()) - } - - case *Dict: - out.WriteByte('{') - if pathContains(path, x) { - out.WriteString("...") // dict contains itself - } else { - sep := "" - for _, item := range x.Items() { - k, v := item[0], item[1] - out.WriteString(sep) - writeValue(out, k, path) - out.WriteString(": ") - writeValue(out, v, append(path, x)) // cycle check - sep = ", " - } - } - out.WriteByte('}') - - case *Set: - out.WriteString("set([") - for i, elem := range x.elems() { - if i > 0 { - out.WriteString(", ") - } - writeValue(out, elem, path) - } - out.WriteString("])") - - default: - out.WriteString(x.String()) - } -} - -func pathContains(path []Value, x Value) bool { - for _, y := range path { - if x == y { - return true - } - } - return false -} - -const maxdepth = 10 - -// Equal reports whether two Starlark values are equal. -func Equal(x, y Value) (bool, error) { - if x, ok := x.(String); ok { - return x == y, nil // fast path for an important special case - } - return EqualDepth(x, y, maxdepth) -} - -// EqualDepth reports whether two Starlark values are equal. -// -// Recursive comparisons by implementations of Value.CompareSameType -// should use EqualDepth to prevent infinite recursion. -func EqualDepth(x, y Value, depth int) (bool, error) { - return CompareDepth(syntax.EQL, x, y, depth) -} - -// Compare compares two Starlark values. -// The comparison operation must be one of EQL, NEQ, LT, LE, GT, or GE. -// Compare returns an error if an ordered comparison was -// requested for a type that does not support it. -// -// Recursive comparisons by implementations of Value.CompareSameType -// should use CompareDepth to prevent infinite recursion. -func Compare(op syntax.Token, x, y Value) (bool, error) { - return CompareDepth(op, x, y, maxdepth) -} - -// CompareDepth compares two Starlark values. -// The comparison operation must be one of EQL, NEQ, LT, LE, GT, or GE. -// CompareDepth returns an error if an ordered comparison was -// requested for a pair of values that do not support it. -// -// The depth parameter limits the maximum depth of recursion -// in cyclic data structures. -func CompareDepth(op syntax.Token, x, y Value, depth int) (bool, error) { - if depth < 1 { - return false, fmt.Errorf("comparison exceeded maximum recursion depth") - } - if sameType(x, y) { - if xcomp, ok := x.(Comparable); ok { - return xcomp.CompareSameType(op, y, depth) - } - - // use identity comparison - switch op { - case syntax.EQL: - return x == y, nil - case syntax.NEQ: - return x != y, nil - } - return false, fmt.Errorf("%s %s %s not implemented", x.Type(), op, y.Type()) - } - - // different types - - // int/float ordered comparisons - switch x := x.(type) { - case Int: - if y, ok := y.(Float); ok { - if y != y { - return false, nil // y is NaN - } - var cmp int - if !math.IsInf(float64(y), 0) { - cmp = x.rational().Cmp(y.rational()) // y is finite - } else if y > 0 { - cmp = -1 // y is +Inf - } else { - cmp = +1 // y is -Inf - } - return threeway(op, cmp), nil - } - case Float: - if y, ok := y.(Int); ok { - if x != x { - return false, nil // x is NaN - } - var cmp int - if !math.IsInf(float64(x), 0) { - cmp = x.rational().Cmp(y.rational()) // x is finite - } else if x > 0 { - cmp = -1 // x is +Inf - } else { - cmp = +1 // x is -Inf - } - return threeway(op, cmp), nil - } - } - - // All other values of different types compare unequal. - switch op { - case syntax.EQL: - return false, nil - case syntax.NEQ: - return true, nil - } - return false, fmt.Errorf("%s %s %s not implemented", x.Type(), op, y.Type()) -} - -func sameType(x, y Value) bool { - return reflect.TypeOf(x) == reflect.TypeOf(y) || x.Type() == y.Type() -} - -// threeway interprets a three-way comparison value cmp (-1, 0, +1) -// as a boolean comparison (e.g. x < y). -func threeway(op syntax.Token, cmp int) bool { - switch op { - case syntax.EQL: - return cmp == 0 - case syntax.NEQ: - return cmp != 0 - case syntax.LE: - return cmp <= 0 - case syntax.LT: - return cmp < 0 - case syntax.GE: - return cmp >= 0 - case syntax.GT: - return cmp > 0 - } - panic(op) -} - -func b2i(b bool) int { - if b { - return 1 - } else { - return 0 - } -} - -// Len returns the length of a string or sequence value, -// and -1 for all others. -// -// Warning: Len(x) >= 0 does not imply Iterate(x) != nil. -// A string has a known length but is not directly iterable. -func Len(x Value) int { - switch x := x.(type) { - case String: - return x.Len() - case Sequence: - return x.Len() - } - return -1 -} - -// Iterate return a new iterator for the value if iterable, nil otherwise. -// If the result is non-nil, the caller must call Done when finished with it. -// -// Warning: Iterate(x) != nil does not imply Len(x) >= 0. -// Some iterables may have unknown length. -func Iterate(x Value) Iterator { - if x, ok := x.(Iterable); ok { - return x.Iterate() - } - return nil -} diff --git a/src/vendor/go.starlark.net/starlarkstruct/module.go b/src/vendor/go.starlark.net/starlarkstruct/module.go deleted file mode 100644 index 735c98ae3..000000000 --- a/src/vendor/go.starlark.net/starlarkstruct/module.go +++ /dev/null @@ -1,43 +0,0 @@ -package starlarkstruct - -import ( - "fmt" - - "go.starlark.net/starlark" -) - -// A Module is a named collection of values, -// typically a suite of functions imported by a load statement. -// -// It differs from Struct primarily in that its string representation -// does not enumerate its fields. -type Module struct { - Name string - Members starlark.StringDict -} - -var _ starlark.HasAttrs = (*Module)(nil) - -func (m *Module) Attr(name string) (starlark.Value, error) { return m.Members[name], nil } -func (m *Module) AttrNames() []string { return m.Members.Keys() } -func (m *Module) Freeze() { m.Members.Freeze() } -func (m *Module) Hash() (uint32, error) { return 0, fmt.Errorf("unhashable: %s", m.Type()) } -func (m *Module) String() string { return fmt.Sprintf("", m.Name) } -func (m *Module) Truth() starlark.Bool { return true } -func (m *Module) Type() string { return "module" } - -// MakeModule may be used as the implementation of a Starlark built-in -// function, module(name, **kwargs). It returns a new module with the -// specified name and members. -func MakeModule(thread *starlark.Thread, b *starlark.Builtin, args starlark.Tuple, kwargs []starlark.Tuple) (starlark.Value, error) { - var name string - if err := starlark.UnpackPositionalArgs(b.Name(), args, nil, 1, &name); err != nil { - return nil, err - } - members := make(starlark.StringDict, len(kwargs)) - for _, kwarg := range kwargs { - k := string(kwarg[0].(starlark.String)) - members[k] = kwarg[1] - } - return &Module{name, members}, nil -} diff --git a/src/vendor/go.starlark.net/starlarkstruct/struct.go b/src/vendor/go.starlark.net/starlarkstruct/struct.go deleted file mode 100644 index 1982cc085..000000000 --- a/src/vendor/go.starlark.net/starlarkstruct/struct.go +++ /dev/null @@ -1,281 +0,0 @@ -// Copyright 2017 The Bazel Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package starlarkstruct defines the Starlark types 'struct' and -// 'module', both optional language extensions. -// -package starlarkstruct // import "go.starlark.net/starlarkstruct" - -// It is tempting to introduce a variant of Struct that is a wrapper -// around a Go struct value, for stronger typing guarantees and more -// efficient and convenient field lookup. However: -// 1) all fields of Starlark structs are optional, so we cannot represent -// them using more specific types such as String, Int, *Depset, and -// *File, as such types give no way to represent missing fields. -// 2) the efficiency gain of direct struct field access is rather -// marginal: finding the index of a field by binary searching on the -// sorted list of field names is quite fast compared to the other -// overheads. -// 3) the gains in compactness and spatial locality are also rather -// marginal: the array behind the []entry slice is (due to field name -// strings) only a factor of 2 larger than the corresponding Go struct -// would be, and, like the Go struct, requires only a single allocation. - -import ( - "fmt" - "sort" - "strings" - - "go.starlark.net/starlark" - "go.starlark.net/syntax" -) - -// Make is the implementation of a built-in function that instantiates -// an immutable struct from the specified keyword arguments. -// -// An application can add 'struct' to the Starlark environment like so: -// -// globals := starlark.StringDict{ -// "struct": starlark.NewBuiltin("struct", starlarkstruct.Make), -// } -// -func Make(_ *starlark.Thread, _ *starlark.Builtin, args starlark.Tuple, kwargs []starlark.Tuple) (starlark.Value, error) { - if len(args) > 0 { - return nil, fmt.Errorf("struct: unexpected positional arguments") - } - return FromKeywords(Default, kwargs), nil -} - -// FromKeywords returns a new struct instance whose fields are specified by the -// key/value pairs in kwargs. (Each kwargs[i][0] must be a starlark.String.) -func FromKeywords(constructor starlark.Value, kwargs []starlark.Tuple) *Struct { - if constructor == nil { - panic("nil constructor") - } - s := &Struct{ - constructor: constructor, - entries: make(entries, 0, len(kwargs)), - } - for _, kwarg := range kwargs { - k := string(kwarg[0].(starlark.String)) - v := kwarg[1] - s.entries = append(s.entries, entry{k, v}) - } - sort.Sort(s.entries) - return s -} - -// FromStringDict returns a whose elements are those of d. -// The constructor parameter specifies the constructor; use Default for an ordinary struct. -func FromStringDict(constructor starlark.Value, d starlark.StringDict) *Struct { - if constructor == nil { - panic("nil constructor") - } - s := &Struct{ - constructor: constructor, - entries: make(entries, 0, len(d)), - } - for k, v := range d { - s.entries = append(s.entries, entry{k, v}) - } - sort.Sort(s.entries) - return s -} - -// Struct is an immutable Starlark type that maps field names to values. -// It is not iterable and does not support len. -// -// A struct has a constructor, a distinct value that identifies a class -// of structs, and which appears in the struct's string representation. -// -// Operations such as x+y fail if the constructors of the two operands -// are not equal. -// -// The default constructor, Default, is the string "struct", but -// clients may wish to 'brand' structs for their own purposes. -// The constructor value appears in the printed form of the value, -// and is accessible using the Constructor method. -// -// Use Attr to access its fields and AttrNames to enumerate them. -type Struct struct { - constructor starlark.Value - entries entries // sorted by name -} - -// Default is the default constructor for structs. -// It is merely the string "struct". -const Default = starlark.String("struct") - -type entries []entry - -func (a entries) Len() int { return len(a) } -func (a entries) Less(i, j int) bool { return a[i].name < a[j].name } -func (a entries) Swap(i, j int) { a[i], a[j] = a[j], a[i] } - -type entry struct { - name string - value starlark.Value -} - -var ( - _ starlark.HasAttrs = (*Struct)(nil) - _ starlark.HasBinary = (*Struct)(nil) -) - -// ToStringDict adds a name/value entry to d for each field of the struct. -func (s *Struct) ToStringDict(d starlark.StringDict) { - for _, e := range s.entries { - d[e.name] = e.value - } -} - -func (s *Struct) String() string { - buf := new(strings.Builder) - if s.constructor == Default { - // NB: The Java implementation always prints struct - // even for Bazel provider instances. - buf.WriteString("struct") // avoid String()'s quotation - } else { - buf.WriteString(s.constructor.String()) - } - buf.WriteByte('(') - for i, e := range s.entries { - if i > 0 { - buf.WriteString(", ") - } - buf.WriteString(e.name) - buf.WriteString(" = ") - buf.WriteString(e.value.String()) - } - buf.WriteByte(')') - return buf.String() -} - -// Constructor returns the constructor used to create this struct. -func (s *Struct) Constructor() starlark.Value { return s.constructor } - -func (s *Struct) Type() string { return "struct" } -func (s *Struct) Truth() starlark.Bool { return true } // even when empty -func (s *Struct) Hash() (uint32, error) { - // Same algorithm as Tuple.hash, but with different primes. - var x, m uint32 = 8731, 9839 - for _, e := range s.entries { - namehash, _ := starlark.String(e.name).Hash() - x = x ^ 3*namehash - y, err := e.value.Hash() - if err != nil { - return 0, err - } - x = x ^ y*m - m += 7349 - } - return x, nil -} -func (s *Struct) Freeze() { - for _, e := range s.entries { - e.value.Freeze() - } -} - -func (x *Struct) Binary(op syntax.Token, y starlark.Value, side starlark.Side) (starlark.Value, error) { - if y, ok := y.(*Struct); ok && op == syntax.PLUS { - if side == starlark.Right { - x, y = y, x - } - - if eq, err := starlark.Equal(x.constructor, y.constructor); err != nil { - return nil, fmt.Errorf("in %s + %s: error comparing constructors: %v", - x.constructor, y.constructor, err) - } else if !eq { - return nil, fmt.Errorf("cannot add structs of different constructors: %s + %s", - x.constructor, y.constructor) - } - - z := make(starlark.StringDict, x.len()+y.len()) - for _, e := range x.entries { - z[e.name] = e.value - } - for _, e := range y.entries { - z[e.name] = e.value - } - - return FromStringDict(x.constructor, z), nil - } - return nil, nil // unhandled -} - -// Attr returns the value of the specified field. -func (s *Struct) Attr(name string) (starlark.Value, error) { - // Binary search the entries. - // This implementation is a specialization of - // sort.Search that avoids dynamic dispatch. - n := len(s.entries) - i, j := 0, n - for i < j { - h := int(uint(i+j) >> 1) - if s.entries[h].name < name { - i = h + 1 - } else { - j = h - } - } - if i < n && s.entries[i].name == name { - return s.entries[i].value, nil - } - - var ctor string - if s.constructor != Default { - ctor = s.constructor.String() + " " - } - return nil, starlark.NoSuchAttrError( - fmt.Sprintf("%sstruct has no .%s attribute", ctor, name)) -} - -func (s *Struct) len() int { return len(s.entries) } - -// AttrNames returns a new sorted list of the struct fields. -func (s *Struct) AttrNames() []string { - names := make([]string, len(s.entries)) - for i, e := range s.entries { - names[i] = e.name - } - return names -} - -func (x *Struct) CompareSameType(op syntax.Token, y_ starlark.Value, depth int) (bool, error) { - y := y_.(*Struct) - switch op { - case syntax.EQL: - return structsEqual(x, y, depth) - case syntax.NEQ: - eq, err := structsEqual(x, y, depth) - return !eq, err - default: - return false, fmt.Errorf("%s %s %s not implemented", x.Type(), op, y.Type()) - } -} - -func structsEqual(x, y *Struct, depth int) (bool, error) { - if x.len() != y.len() { - return false, nil - } - - if eq, err := starlark.Equal(x.constructor, y.constructor); err != nil { - return false, fmt.Errorf("error comparing struct constructors %v and %v: %v", - x.constructor, y.constructor, err) - } else if !eq { - return false, nil - } - - for i, n := 0, x.len(); i < n; i++ { - if x.entries[i].name != y.entries[i].name { - return false, nil - } else if eq, err := starlark.EqualDepth(x.entries[i].value, y.entries[i].value, depth-1); err != nil { - return false, err - } else if !eq { - return false, nil - } - } - return true, nil -} diff --git a/src/vendor/go.starlark.net/syntax/grammar.txt b/src/vendor/go.starlark.net/syntax/grammar.txt deleted file mode 100644 index 7f5dfc811..000000000 --- a/src/vendor/go.starlark.net/syntax/grammar.txt +++ /dev/null @@ -1,129 +0,0 @@ - -Grammar of Starlark -================== - -File = {Statement | newline} eof . - -Statement = DefStmt | IfStmt | ForStmt | WhileStmt | SimpleStmt . - -DefStmt = 'def' identifier '(' [Parameters [',']] ')' ':' Suite . - -Parameters = Parameter {',' Parameter}. - -Parameter = identifier | identifier '=' Test | '*' | '*' identifier | '**' identifier . - -IfStmt = 'if' Test ':' Suite {'elif' Test ':' Suite} ['else' ':' Suite] . - -ForStmt = 'for' LoopVariables 'in' Expression ':' Suite . - -WhileStmt = 'while' Test ':' Suite . - -Suite = [newline indent {Statement} outdent] | SimpleStmt . - -SimpleStmt = SmallStmt {';' SmallStmt} [';'] '\n' . -# NOTE: '\n' optional at EOF - -SmallStmt = ReturnStmt - | BreakStmt | ContinueStmt | PassStmt - | AssignStmt - | ExprStmt - | LoadStmt - . - -ReturnStmt = 'return' [Expression] . -BreakStmt = 'break' . -ContinueStmt = 'continue' . -PassStmt = 'pass' . -AssignStmt = Expression ('=' | '+=' | '-=' | '*=' | '/=' | '//=' | '%=' | '&=' | '|=' | '^=' | '<<=' | '>>=') Expression . -ExprStmt = Expression . - -LoadStmt = 'load' '(' string {',' [identifier '='] string} [','] ')' . - -Test = LambdaExpr - | IfExpr - | PrimaryExpr - | UnaryExpr - | BinaryExpr - . - -LambdaExpr = 'lambda' [Parameters] ':' Test . - -IfExpr = Test 'if' Test 'else' Test . - -PrimaryExpr = Operand - | PrimaryExpr DotSuffix - | PrimaryExpr CallSuffix - | PrimaryExpr SliceSuffix - . - -Operand = identifier - | int | float | string - | ListExpr | ListComp - | DictExpr | DictComp - | '(' [Expression [',']] ')' - | ('-' | '+') PrimaryExpr - . - -DotSuffix = '.' identifier . -CallSuffix = '(' [Arguments [',']] ')' . -SliceSuffix = '[' [Expression] [':' Test [':' Test]] ']' . - -Arguments = Argument {',' Argument} . -Argument = Test | identifier '=' Test | '*' Test | '**' Test . - -ListExpr = '[' [Expression [',']] ']' . -ListComp = '[' Test {CompClause} ']'. - -DictExpr = '{' [Entries [',']] '}' . -DictComp = '{' Entry {CompClause} '}' . -Entries = Entry {',' Entry} . -Entry = Test ':' Test . - -CompClause = 'for' LoopVariables 'in' Test | 'if' Test . - -UnaryExpr = 'not' Test . - -BinaryExpr = Test {Binop Test} . - -Binop = 'or' - | 'and' - | '==' | '!=' | '<' | '>' | '<=' | '>=' | 'in' | 'not' 'in' - | '|' - | '^' - | '&' - | '-' | '+' - | '*' | '%' | '/' | '//' - . - -Expression = Test {',' Test} . -# NOTE: trailing comma permitted only when within [...] or (...). - -LoopVariables = PrimaryExpr {',' PrimaryExpr} . - - -# Notation (similar to Go spec): -- lowercase and 'quoted' items are lexical tokens. -- Capitalized names denote grammar productions. -- (...) implies grouping -- x | y means either x or y. -- [x] means x is optional -- {x} means x is repeated zero or more times -- The end of each declaration is marked with a period. - -# Tokens -- spaces: newline, eof, indent, outdent. -- identifier. -- literals: string, int, float. -- plus all quoted tokens such as '+=', 'return'. - -# Notes: -- Ambiguity is resolved using operator precedence. -- The grammar does not enforce the legal order of params and args, - nor that the first compclause must be a 'for'. - -TODO: -- explain how the lexer generates indent, outdent, and newline tokens. -- why is unary NOT separated from unary - and +? -- the grammar is (mostly) in LL(1) style so, for example, - dot expressions are formed suffixes, not complete expressions, - which makes the spec harder to read. Reorganize into non-LL(1) form? diff --git a/src/vendor/go.starlark.net/syntax/parse.go b/src/vendor/go.starlark.net/syntax/parse.go deleted file mode 100644 index 0281e4b87..000000000 --- a/src/vendor/go.starlark.net/syntax/parse.go +++ /dev/null @@ -1,1029 +0,0 @@ -// Copyright 2017 The Bazel Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package syntax - -// This file defines a recursive-descent parser for Starlark. -// The LL(1) grammar of Starlark and the names of many productions follow Python 2.7. -// -// TODO(adonovan): use syntax.Error more systematically throughout the -// package. Verify that error positions are correct using the -// chunkedfile mechanism. - -import "log" - -// Enable this flag to print the token stream and log.Fatal on the first error. -const debug = false - -// A Mode value is a set of flags (or 0) that controls optional parser functionality. -type Mode uint - -const ( - RetainComments Mode = 1 << iota // retain comments in AST; see Node.Comments -) - -// Parse parses the input data and returns the corresponding parse tree. -// -// If src != nil, ParseFile parses the source from src and the filename -// is only used when recording position information. -// The type of the argument for the src parameter must be string, -// []byte, or io.Reader. -// If src == nil, ParseFile parses the file specified by filename. -func Parse(filename string, src interface{}, mode Mode) (f *File, err error) { - in, err := newScanner(filename, src, mode&RetainComments != 0) - if err != nil { - return nil, err - } - p := parser{in: in} - defer p.in.recover(&err) - - p.nextToken() // read first lookahead token - f = p.parseFile() - if f != nil { - f.Path = filename - } - p.assignComments(f) - return f, nil -} - -// ParseCompoundStmt parses a single compound statement: -// a blank line, a def, for, while, or if statement, or a -// semicolon-separated list of simple statements followed -// by a newline. These are the units on which the REPL operates. -// ParseCompoundStmt does not consume any following input. -// The parser calls the readline function each -// time it needs a new line of input. -func ParseCompoundStmt(filename string, readline func() ([]byte, error)) (f *File, err error) { - in, err := newScanner(filename, readline, false) - if err != nil { - return nil, err - } - - p := parser{in: in} - defer p.in.recover(&err) - - p.nextToken() // read first lookahead token - - var stmts []Stmt - switch p.tok { - case DEF, IF, FOR, WHILE: - stmts = p.parseStmt(stmts) - case NEWLINE: - // blank line - default: - stmts = p.parseSimpleStmt(stmts, false) - // Require but don't consume newline, to avoid blocking again. - if p.tok != NEWLINE { - p.in.errorf(p.in.pos, "invalid syntax") - } - } - - return &File{Path: filename, Stmts: stmts}, nil -} - -// ParseExpr parses a Starlark expression. -// A comma-separated list of expressions is parsed as a tuple. -// See Parse for explanation of parameters. -func ParseExpr(filename string, src interface{}, mode Mode) (expr Expr, err error) { - in, err := newScanner(filename, src, mode&RetainComments != 0) - if err != nil { - return nil, err - } - p := parser{in: in} - defer p.in.recover(&err) - - p.nextToken() // read first lookahead token - - // Use parseExpr, not parseTest, to permit an unparenthesized tuple. - expr = p.parseExpr(false) - - // A following newline (e.g. "f()\n") appears outside any brackets, - // on a non-blank line, and thus results in a NEWLINE token. - if p.tok == NEWLINE { - p.nextToken() - } - - if p.tok != EOF { - p.in.errorf(p.in.pos, "got %#v after expression, want EOF", p.tok) - } - p.assignComments(expr) - return expr, nil -} - -type parser struct { - in *scanner - tok Token - tokval tokenValue -} - -// nextToken advances the scanner and returns the position of the -// previous token. -func (p *parser) nextToken() Position { - oldpos := p.tokval.pos - p.tok = p.in.nextToken(&p.tokval) - // enable to see the token stream - if debug { - log.Printf("nextToken: %-20s%+v\n", p.tok, p.tokval.pos) - } - return oldpos -} - -// file_input = (NEWLINE | stmt)* EOF -func (p *parser) parseFile() *File { - var stmts []Stmt - for p.tok != EOF { - if p.tok == NEWLINE { - p.nextToken() - continue - } - stmts = p.parseStmt(stmts) - } - return &File{Stmts: stmts} -} - -func (p *parser) parseStmt(stmts []Stmt) []Stmt { - if p.tok == DEF { - return append(stmts, p.parseDefStmt()) - } else if p.tok == IF { - return append(stmts, p.parseIfStmt()) - } else if p.tok == FOR { - return append(stmts, p.parseForStmt()) - } else if p.tok == WHILE { - return append(stmts, p.parseWhileStmt()) - } - return p.parseSimpleStmt(stmts, true) -} - -func (p *parser) parseDefStmt() Stmt { - defpos := p.nextToken() // consume DEF - id := p.parseIdent() - p.consume(LPAREN) - params := p.parseParams() - p.consume(RPAREN) - p.consume(COLON) - body := p.parseSuite() - return &DefStmt{ - Def: defpos, - Name: id, - Params: params, - Body: body, - } -} - -func (p *parser) parseIfStmt() Stmt { - ifpos := p.nextToken() // consume IF - cond := p.parseTest() - p.consume(COLON) - body := p.parseSuite() - ifStmt := &IfStmt{ - If: ifpos, - Cond: cond, - True: body, - } - tail := ifStmt - for p.tok == ELIF { - elifpos := p.nextToken() // consume ELIF - cond := p.parseTest() - p.consume(COLON) - body := p.parseSuite() - elif := &IfStmt{ - If: elifpos, - Cond: cond, - True: body, - } - tail.ElsePos = elifpos - tail.False = []Stmt{elif} - tail = elif - } - if p.tok == ELSE { - tail.ElsePos = p.nextToken() // consume ELSE - p.consume(COLON) - tail.False = p.parseSuite() - } - return ifStmt -} - -func (p *parser) parseForStmt() Stmt { - forpos := p.nextToken() // consume FOR - vars := p.parseForLoopVariables() - p.consume(IN) - x := p.parseExpr(false) - p.consume(COLON) - body := p.parseSuite() - return &ForStmt{ - For: forpos, - Vars: vars, - X: x, - Body: body, - } -} - -func (p *parser) parseWhileStmt() Stmt { - whilepos := p.nextToken() // consume WHILE - cond := p.parseTest() - p.consume(COLON) - body := p.parseSuite() - return &WhileStmt{ - While: whilepos, - Cond: cond, - Body: body, - } -} - -// Equivalent to 'exprlist' production in Python grammar. -// -// loop_variables = primary_with_suffix (COMMA primary_with_suffix)* COMMA? -func (p *parser) parseForLoopVariables() Expr { - // Avoid parseExpr because it would consume the IN token - // following x in "for x in y: ...". - v := p.parsePrimaryWithSuffix() - if p.tok != COMMA { - return v - } - - list := []Expr{v} - for p.tok == COMMA { - p.nextToken() - if terminatesExprList(p.tok) { - break - } - list = append(list, p.parsePrimaryWithSuffix()) - } - return &TupleExpr{List: list} -} - -// simple_stmt = small_stmt (SEMI small_stmt)* SEMI? NEWLINE -// In REPL mode, it does not consume the NEWLINE. -func (p *parser) parseSimpleStmt(stmts []Stmt, consumeNL bool) []Stmt { - for { - stmts = append(stmts, p.parseSmallStmt()) - if p.tok != SEMI { - break - } - p.nextToken() // consume SEMI - if p.tok == NEWLINE || p.tok == EOF { - break - } - } - // EOF without NEWLINE occurs in `if x: pass`, for example. - if p.tok != EOF && consumeNL { - p.consume(NEWLINE) - } - - return stmts -} - -// small_stmt = RETURN expr? -// | PASS | BREAK | CONTINUE -// | LOAD ... -// | expr ('=' | '+=' | '-=' | '*=' | '/=' | '%=' | '&=' | '|=' | '^=' | '<<=' | '>>=') expr // assign -// | expr -func (p *parser) parseSmallStmt() Stmt { - switch p.tok { - case RETURN: - pos := p.nextToken() // consume RETURN - var result Expr - if p.tok != EOF && p.tok != NEWLINE && p.tok != SEMI { - result = p.parseExpr(false) - } - return &ReturnStmt{Return: pos, Result: result} - - case BREAK, CONTINUE, PASS: - tok := p.tok - pos := p.nextToken() // consume it - return &BranchStmt{Token: tok, TokenPos: pos} - - case LOAD: - return p.parseLoadStmt() - } - - // Assignment - x := p.parseExpr(false) - switch p.tok { - case EQ, PLUS_EQ, MINUS_EQ, STAR_EQ, SLASH_EQ, SLASHSLASH_EQ, PERCENT_EQ, AMP_EQ, PIPE_EQ, CIRCUMFLEX_EQ, LTLT_EQ, GTGT_EQ: - op := p.tok - pos := p.nextToken() // consume op - rhs := p.parseExpr(false) - return &AssignStmt{OpPos: pos, Op: op, LHS: x, RHS: rhs} - } - - // Expression statement (e.g. function call, doc string). - return &ExprStmt{X: x} -} - -// stmt = LOAD '(' STRING {',' (IDENT '=')? STRING} [','] ')' -func (p *parser) parseLoadStmt() *LoadStmt { - loadPos := p.nextToken() // consume LOAD - lparen := p.consume(LPAREN) - - if p.tok != STRING { - p.in.errorf(p.in.pos, "first operand of load statement must be a string literal") - } - module := p.parsePrimary().(*Literal) - - var from, to []*Ident - for p.tok != RPAREN && p.tok != EOF { - p.consume(COMMA) - if p.tok == RPAREN { - break // allow trailing comma - } - switch p.tok { - case STRING: - // load("module", "id") - // To name is same as original. - lit := p.parsePrimary().(*Literal) - id := &Ident{ - NamePos: lit.TokenPos.add(`"`), - Name: lit.Value.(string), - } - to = append(to, id) - from = append(from, id) - - case IDENT: - // load("module", to="from") - id := p.parseIdent() - to = append(to, id) - if p.tok != EQ { - p.in.errorf(p.in.pos, `load operand must be "%[1]s" or %[1]s="originalname" (want '=' after %[1]s)`, id.Name) - } - p.consume(EQ) - if p.tok != STRING { - p.in.errorf(p.in.pos, `original name of loaded symbol must be quoted: %s="originalname"`, id.Name) - } - lit := p.parsePrimary().(*Literal) - from = append(from, &Ident{ - NamePos: lit.TokenPos.add(`"`), - Name: lit.Value.(string), - }) - - case RPAREN: - p.in.errorf(p.in.pos, "trailing comma in load statement") - - default: - p.in.errorf(p.in.pos, `load operand must be "name" or localname="name" (got %#v)`, p.tok) - } - } - rparen := p.consume(RPAREN) - - if len(to) == 0 { - p.in.errorf(lparen, "load statement must import at least 1 symbol") - } - return &LoadStmt{ - Load: loadPos, - Module: module, - To: to, - From: from, - Rparen: rparen, - } -} - -// suite is typically what follows a COLON (e.g. after DEF or FOR). -// suite = simple_stmt | NEWLINE INDENT stmt+ OUTDENT -func (p *parser) parseSuite() []Stmt { - if p.tok == NEWLINE { - p.nextToken() // consume NEWLINE - p.consume(INDENT) - var stmts []Stmt - for p.tok != OUTDENT && p.tok != EOF { - stmts = p.parseStmt(stmts) - } - p.consume(OUTDENT) - return stmts - } - - return p.parseSimpleStmt(nil, true) -} - -func (p *parser) parseIdent() *Ident { - if p.tok != IDENT { - p.in.error(p.in.pos, "not an identifier") - } - id := &Ident{ - NamePos: p.tokval.pos, - Name: p.tokval.raw, - } - p.nextToken() - return id -} - -func (p *parser) consume(t Token) Position { - if p.tok != t { - p.in.errorf(p.in.pos, "got %#v, want %#v", p.tok, t) - } - return p.nextToken() -} - -// params = (param COMMA)* param COMMA? -// | -// -// param = IDENT -// | IDENT EQ test -// | STAR -// | STAR IDENT -// | STARSTAR IDENT -// -// parseParams parses a parameter list. The resulting expressions are of the form: -// -// *Ident x -// *Binary{Op: EQ, X: *Ident, Y: Expr} x=y -// *Unary{Op: STAR} * -// *Unary{Op: STAR, X: *Ident} *args -// *Unary{Op: STARSTAR, X: *Ident} **kwargs -func (p *parser) parseParams() []Expr { - var params []Expr - for p.tok != RPAREN && p.tok != COLON && p.tok != EOF { - if len(params) > 0 { - p.consume(COMMA) - } - if p.tok == RPAREN { - break - } - - // * or *args or **kwargs - if p.tok == STAR || p.tok == STARSTAR { - op := p.tok - pos := p.nextToken() - var x Expr - if op == STARSTAR || p.tok == IDENT { - x = p.parseIdent() - } - params = append(params, &UnaryExpr{ - OpPos: pos, - Op: op, - X: x, - }) - continue - } - - // IDENT - // IDENT = test - id := p.parseIdent() - if p.tok == EQ { // default value - eq := p.nextToken() - dflt := p.parseTest() - params = append(params, &BinaryExpr{ - X: id, - OpPos: eq, - Op: EQ, - Y: dflt, - }) - continue - } - - params = append(params, id) - } - return params -} - -// parseExpr parses an expression, possible consisting of a -// comma-separated list of 'test' expressions. -// -// In many cases we must use parseTest to avoid ambiguity such as -// f(x, y) vs. f((x, y)). -func (p *parser) parseExpr(inParens bool) Expr { - x := p.parseTest() - if p.tok != COMMA { - return x - } - - // tuple - exprs := p.parseExprs([]Expr{x}, inParens) - return &TupleExpr{List: exprs} -} - -// parseExprs parses a comma-separated list of expressions, starting with the comma. -// It is used to parse tuples and list elements. -// expr_list = (',' expr)* ','? -func (p *parser) parseExprs(exprs []Expr, allowTrailingComma bool) []Expr { - for p.tok == COMMA { - pos := p.nextToken() - if terminatesExprList(p.tok) { - if !allowTrailingComma { - p.in.error(pos, "unparenthesized tuple with trailing comma") - } - break - } - exprs = append(exprs, p.parseTest()) - } - return exprs -} - -// parseTest parses a 'test', a single-component expression. -func (p *parser) parseTest() Expr { - if p.tok == LAMBDA { - return p.parseLambda(true) - } - - x := p.parseTestPrec(0) - - // conditional expression (t IF cond ELSE f) - if p.tok == IF { - ifpos := p.nextToken() - cond := p.parseTestPrec(0) - if p.tok != ELSE { - p.in.error(ifpos, "conditional expression without else clause") - } - elsepos := p.nextToken() - else_ := p.parseTest() - return &CondExpr{If: ifpos, Cond: cond, True: x, ElsePos: elsepos, False: else_} - } - - return x -} - -// parseTestNoCond parses a a single-component expression without -// consuming a trailing 'if expr else expr'. -func (p *parser) parseTestNoCond() Expr { - if p.tok == LAMBDA { - return p.parseLambda(false) - } - return p.parseTestPrec(0) -} - -// parseLambda parses a lambda expression. -// The allowCond flag allows the body to be an 'a if b else c' conditional. -func (p *parser) parseLambda(allowCond bool) Expr { - lambda := p.nextToken() - var params []Expr - if p.tok != COLON { - params = p.parseParams() - } - p.consume(COLON) - - var body Expr - if allowCond { - body = p.parseTest() - } else { - body = p.parseTestNoCond() - } - - return &LambdaExpr{ - Lambda: lambda, - Params: params, - Body: body, - } -} - -func (p *parser) parseTestPrec(prec int) Expr { - if prec >= len(preclevels) { - return p.parsePrimaryWithSuffix() - } - - // expr = NOT expr - if p.tok == NOT && prec == int(precedence[NOT]) { - pos := p.nextToken() - x := p.parseTestPrec(prec) - return &UnaryExpr{ - OpPos: pos, - Op: NOT, - X: x, - } - } - - return p.parseBinopExpr(prec) -} - -// expr = test (OP test)* -// Uses precedence climbing; see http://www.engr.mun.ca/~theo/Misc/exp_parsing.htm#climbing. -func (p *parser) parseBinopExpr(prec int) Expr { - x := p.parseTestPrec(prec + 1) - for first := true; ; first = false { - if p.tok == NOT { - p.nextToken() // consume NOT - // In this context, NOT must be followed by IN. - // Replace NOT IN by a single NOT_IN token. - if p.tok != IN { - p.in.errorf(p.in.pos, "got %#v, want in", p.tok) - } - p.tok = NOT_IN - } - - // Binary operator of specified precedence? - opprec := int(precedence[p.tok]) - if opprec < prec { - return x - } - - // Comparisons are non-associative. - if !first && opprec == int(precedence[EQL]) { - p.in.errorf(p.in.pos, "%s does not associate with %s (use parens)", - x.(*BinaryExpr).Op, p.tok) - } - - op := p.tok - pos := p.nextToken() - y := p.parseTestPrec(opprec + 1) - x = &BinaryExpr{OpPos: pos, Op: op, X: x, Y: y} - } -} - -// precedence maps each operator to its precedence (0-7), or -1 for other tokens. -var precedence [maxToken]int8 - -// preclevels groups operators of equal precedence. -// Comparisons are nonassociative; other binary operators associate to the left. -// Unary MINUS, unary PLUS, and TILDE have higher precedence so are handled in parsePrimary. -// See https://github.com/google/starlark-go/blob/master/doc/spec.md#binary-operators -var preclevels = [...][]Token{ - {OR}, // or - {AND}, // and - {NOT}, // not (unary) - {EQL, NEQ, LT, GT, LE, GE, IN, NOT_IN}, // == != < > <= >= in not in - {PIPE}, // | - {CIRCUMFLEX}, // ^ - {AMP}, // & - {LTLT, GTGT}, // << >> - {MINUS, PLUS}, // - - {STAR, PERCENT, SLASH, SLASHSLASH}, // * % / // -} - -func init() { - // populate precedence table - for i := range precedence { - precedence[i] = -1 - } - for level, tokens := range preclevels { - for _, tok := range tokens { - precedence[tok] = int8(level) - } - } -} - -// primary_with_suffix = primary -// | primary '.' IDENT -// | primary slice_suffix -// | primary call_suffix -func (p *parser) parsePrimaryWithSuffix() Expr { - x := p.parsePrimary() - for { - switch p.tok { - case DOT: - dot := p.nextToken() - id := p.parseIdent() - x = &DotExpr{Dot: dot, X: x, Name: id} - case LBRACK: - x = p.parseSliceSuffix(x) - case LPAREN: - x = p.parseCallSuffix(x) - default: - return x - } - } -} - -// slice_suffix = '[' expr? ':' expr? ':' expr? ']' -func (p *parser) parseSliceSuffix(x Expr) Expr { - lbrack := p.nextToken() - var lo, hi, step Expr - if p.tok != COLON { - y := p.parseExpr(false) - - // index x[y] - if p.tok == RBRACK { - rbrack := p.nextToken() - return &IndexExpr{X: x, Lbrack: lbrack, Y: y, Rbrack: rbrack} - } - - lo = y - } - - // slice or substring x[lo:hi:step] - if p.tok == COLON { - p.nextToken() - if p.tok != COLON && p.tok != RBRACK { - hi = p.parseTest() - } - } - if p.tok == COLON { - p.nextToken() - if p.tok != RBRACK { - step = p.parseTest() - } - } - rbrack := p.consume(RBRACK) - return &SliceExpr{X: x, Lbrack: lbrack, Lo: lo, Hi: hi, Step: step, Rbrack: rbrack} -} - -// call_suffix = '(' arg_list? ')' -func (p *parser) parseCallSuffix(fn Expr) Expr { - lparen := p.consume(LPAREN) - var rparen Position - var args []Expr - if p.tok == RPAREN { - rparen = p.nextToken() - } else { - args = p.parseArgs() - rparen = p.consume(RPAREN) - } - return &CallExpr{Fn: fn, Lparen: lparen, Args: args, Rparen: rparen} -} - -// parseArgs parses a list of actual parameter values (arguments). -// It mirrors the structure of parseParams. -// arg_list = ((arg COMMA)* arg COMMA?)? -func (p *parser) parseArgs() []Expr { - var args []Expr - for p.tok != RPAREN && p.tok != EOF { - if len(args) > 0 { - p.consume(COMMA) - } - if p.tok == RPAREN { - break - } - - // *args or **kwargs - if p.tok == STAR || p.tok == STARSTAR { - op := p.tok - pos := p.nextToken() - x := p.parseTest() - args = append(args, &UnaryExpr{ - OpPos: pos, - Op: op, - X: x, - }) - continue - } - - // We use a different strategy from Bazel here to stay within LL(1). - // Instead of looking ahead two tokens (IDENT, EQ) we parse - // 'test = test' then check that the first was an IDENT. - x := p.parseTest() - - if p.tok == EQ { - // name = value - if _, ok := x.(*Ident); !ok { - p.in.errorf(p.in.pos, "keyword argument must have form name=expr") - } - eq := p.nextToken() - y := p.parseTest() - x = &BinaryExpr{ - X: x, - OpPos: eq, - Op: EQ, - Y: y, - } - } - - args = append(args, x) - } - return args -} - -// primary = IDENT -// | INT | FLOAT -// | STRING -// | '[' ... // list literal or comprehension -// | '{' ... // dict literal or comprehension -// | '(' ... // tuple or parenthesized expression -// | ('-'|'+'|'~') primary_with_suffix -func (p *parser) parsePrimary() Expr { - switch p.tok { - case IDENT: - return p.parseIdent() - - case INT, FLOAT, STRING: - var val interface{} - tok := p.tok - switch tok { - case INT: - if p.tokval.bigInt != nil { - val = p.tokval.bigInt - } else { - val = p.tokval.int - } - case FLOAT: - val = p.tokval.float - case STRING: - val = p.tokval.string - } - raw := p.tokval.raw - pos := p.nextToken() - return &Literal{Token: tok, TokenPos: pos, Raw: raw, Value: val} - - case LBRACK: - return p.parseList() - - case LBRACE: - return p.parseDict() - - case LPAREN: - lparen := p.nextToken() - if p.tok == RPAREN { - // empty tuple - rparen := p.nextToken() - return &TupleExpr{Lparen: lparen, Rparen: rparen} - } - e := p.parseExpr(true) // allow trailing comma - rparen := p.consume(RPAREN) - return &ParenExpr{ - Lparen: lparen, - X: e, - Rparen: rparen, - } - - case MINUS, PLUS, TILDE: // unary - tok := p.tok - pos := p.nextToken() - x := p.parsePrimaryWithSuffix() - return &UnaryExpr{ - OpPos: pos, - Op: tok, - X: x, - } - } - p.in.errorf(p.in.pos, "got %#v, want primary expression", p.tok) - panic("unreachable") -} - -// list = '[' ']' -// | '[' expr ']' -// | '[' expr expr_list ']' -// | '[' expr (FOR loop_variables IN expr)+ ']' -func (p *parser) parseList() Expr { - lbrack := p.nextToken() - if p.tok == RBRACK { - // empty List - rbrack := p.nextToken() - return &ListExpr{Lbrack: lbrack, Rbrack: rbrack} - } - - x := p.parseTest() - - if p.tok == FOR { - // list comprehension - return p.parseComprehensionSuffix(lbrack, x, RBRACK) - } - - exprs := []Expr{x} - if p.tok == COMMA { - // multi-item list literal - exprs = p.parseExprs(exprs, true) // allow trailing comma - } - - rbrack := p.consume(RBRACK) - return &ListExpr{Lbrack: lbrack, List: exprs, Rbrack: rbrack} -} - -// dict = '{' '}' -// | '{' dict_entry_list '}' -// | '{' dict_entry FOR loop_variables IN expr '}' -func (p *parser) parseDict() Expr { - lbrace := p.nextToken() - if p.tok == RBRACE { - // empty dict - rbrace := p.nextToken() - return &DictExpr{Lbrace: lbrace, Rbrace: rbrace} - } - - x := p.parseDictEntry() - - if p.tok == FOR { - // dict comprehension - return p.parseComprehensionSuffix(lbrace, x, RBRACE) - } - - entries := []Expr{x} - for p.tok == COMMA { - p.nextToken() - if p.tok == RBRACE { - break - } - entries = append(entries, p.parseDictEntry()) - } - - rbrace := p.consume(RBRACE) - return &DictExpr{Lbrace: lbrace, List: entries, Rbrace: rbrace} -} - -// dict_entry = test ':' test -func (p *parser) parseDictEntry() *DictEntry { - k := p.parseTest() - colon := p.consume(COLON) - v := p.parseTest() - return &DictEntry{Key: k, Colon: colon, Value: v} -} - -// comp_suffix = FOR loopvars IN expr comp_suffix -// | IF expr comp_suffix -// | ']' or ')' (end) -// -// There can be multiple FOR/IF clauses; the first is always a FOR. -func (p *parser) parseComprehensionSuffix(lbrace Position, body Expr, endBrace Token) Expr { - var clauses []Node - for p.tok != endBrace { - if p.tok == FOR { - pos := p.nextToken() - vars := p.parseForLoopVariables() - in := p.consume(IN) - // Following Python 3, the operand of IN cannot be: - // - a conditional expression ('x if y else z'), - // due to conflicts in Python grammar - // ('if' is used by the comprehension); - // - a lambda expression - // - an unparenthesized tuple. - x := p.parseTestPrec(0) - clauses = append(clauses, &ForClause{For: pos, Vars: vars, In: in, X: x}) - } else if p.tok == IF { - pos := p.nextToken() - cond := p.parseTestNoCond() - clauses = append(clauses, &IfClause{If: pos, Cond: cond}) - } else { - p.in.errorf(p.in.pos, "got %#v, want '%s', for, or if", p.tok, endBrace) - } - } - rbrace := p.nextToken() - - return &Comprehension{ - Curly: endBrace == RBRACE, - Lbrack: lbrace, - Body: body, - Clauses: clauses, - Rbrack: rbrace, - } -} - -func terminatesExprList(tok Token) bool { - switch tok { - case EOF, NEWLINE, EQ, RBRACE, RBRACK, RPAREN, SEMI: - return true - } - return false -} - -// Comment assignment. -// We build two lists of all subnodes, preorder and postorder. -// The preorder list is ordered by start location, with outer nodes first. -// The postorder list is ordered by end location, with outer nodes last. -// We use the preorder list to assign each whole-line comment to the syntax -// immediately following it, and we use the postorder list to assign each -// end-of-line comment to the syntax immediately preceding it. - -// flattenAST returns the list of AST nodes, both in prefix order and in postfix -// order. -func flattenAST(root Node) (pre, post []Node) { - stack := []Node{} - Walk(root, func(n Node) bool { - if n != nil { - pre = append(pre, n) - stack = append(stack, n) - } else { - post = append(post, stack[len(stack)-1]) - stack = stack[:len(stack)-1] - } - return true - }) - return pre, post -} - -// assignComments attaches comments to nearby syntax. -func (p *parser) assignComments(n Node) { - // Leave early if there are no comments - if len(p.in.lineComments)+len(p.in.suffixComments) == 0 { - return - } - - pre, post := flattenAST(n) - - // Assign line comments to syntax immediately following. - line := p.in.lineComments - for _, x := range pre { - start, _ := x.Span() - - switch x.(type) { - case *File: - continue - } - - for len(line) > 0 && !start.isBefore(line[0].Start) { - x.AllocComments() - x.Comments().Before = append(x.Comments().Before, line[0]) - line = line[1:] - } - } - - // Remaining line comments go at end of file. - if len(line) > 0 { - n.AllocComments() - n.Comments().After = append(n.Comments().After, line...) - } - - // Assign suffix comments to syntax immediately before. - suffix := p.in.suffixComments - for i := len(post) - 1; i >= 0; i-- { - x := post[i] - - // Do not assign suffix comments to file - switch x.(type) { - case *File: - continue - } - - _, end := x.Span() - if len(suffix) > 0 && end.isBefore(suffix[len(suffix)-1].Start) { - x.AllocComments() - x.Comments().Suffix = append(x.Comments().Suffix, suffix[len(suffix)-1]) - suffix = suffix[:len(suffix)-1] - } - } -} diff --git a/src/vendor/go.starlark.net/syntax/quote.go b/src/vendor/go.starlark.net/syntax/quote.go deleted file mode 100644 index cc9a8d0ae..000000000 --- a/src/vendor/go.starlark.net/syntax/quote.go +++ /dev/null @@ -1,269 +0,0 @@ -// Copyright 2017 The Bazel Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package syntax - -// Starlark quoted string utilities. - -import ( - "fmt" - "strconv" - "strings" -) - -// unesc maps single-letter chars following \ to their actual values. -var unesc = [256]byte{ - 'a': '\a', - 'b': '\b', - 'f': '\f', - 'n': '\n', - 'r': '\r', - 't': '\t', - 'v': '\v', - '\\': '\\', - '\'': '\'', - '"': '"', -} - -// esc maps escape-worthy bytes to the char that should follow \. -var esc = [256]byte{ - '\a': 'a', - '\b': 'b', - '\f': 'f', - '\n': 'n', - '\r': 'r', - '\t': 't', - '\v': 'v', - '\\': '\\', - '\'': '\'', - '"': '"', -} - -// notEsc is a list of characters that can follow a \ in a string value -// without having to escape the \. That is, since ( is in this list, we -// quote the Go string "foo\\(bar" as the Python literal "foo\(bar". -// This really does happen in BUILD files, especially in strings -// being used as shell arguments containing regular expressions. -const notEsc = " !#$%&()*+,-./:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ{|}~" - -// unquote unquotes the quoted string, returning the actual -// string value, whether the original was triple-quoted, and -// an error describing invalid input. -func unquote(quoted string) (s string, triple bool, err error) { - // Check for raw prefix: means don't interpret the inner \. - raw := false - if strings.HasPrefix(quoted, "r") { - raw = true - quoted = quoted[1:] - } - - if len(quoted) < 2 { - err = fmt.Errorf("string literal too short") - return - } - - if quoted[0] != '"' && quoted[0] != '\'' || quoted[0] != quoted[len(quoted)-1] { - err = fmt.Errorf("string literal has invalid quotes") - return - } - - // Check for triple quoted string. - quote := quoted[0] - if len(quoted) >= 6 && quoted[1] == quote && quoted[2] == quote && quoted[:3] == quoted[len(quoted)-3:] { - triple = true - quoted = quoted[3 : len(quoted)-3] - } else { - quoted = quoted[1 : len(quoted)-1] - } - - // Now quoted is the quoted data, but no quotes. - // If we're in raw mode or there are no escapes or - // carriage returns, we're done. - var unquoteChars string - if raw { - unquoteChars = "\r" - } else { - unquoteChars = "\\\r" - } - if !strings.ContainsAny(quoted, unquoteChars) { - s = quoted - return - } - - // Otherwise process quoted string. - // Each iteration processes one escape sequence along with the - // plain text leading up to it. - buf := new(strings.Builder) - for { - // Remove prefix before escape sequence. - i := strings.IndexAny(quoted, unquoteChars) - if i < 0 { - i = len(quoted) - } - buf.WriteString(quoted[:i]) - quoted = quoted[i:] - - if len(quoted) == 0 { - break - } - - // Process carriage return. - if quoted[0] == '\r' { - buf.WriteByte('\n') - if len(quoted) > 1 && quoted[1] == '\n' { - quoted = quoted[2:] - } else { - quoted = quoted[1:] - } - continue - } - - // Process escape sequence. - if len(quoted) == 1 { - err = fmt.Errorf(`truncated escape sequence \`) - return - } - - switch quoted[1] { - default: - // In Python, if \z (for some byte z) is not a known escape sequence - // then it appears as literal text in the string. - buf.WriteString(quoted[:2]) - quoted = quoted[2:] - - case '\n': - // Ignore the escape and the line break. - quoted = quoted[2:] - - case 'a', 'b', 'f', 'n', 'r', 't', 'v', '\\', '\'', '"': - // One-char escape - buf.WriteByte(unesc[quoted[1]]) - quoted = quoted[2:] - - case '0', '1', '2', '3', '4', '5', '6', '7': - // Octal escape, up to 3 digits. - n := int(quoted[1] - '0') - quoted = quoted[2:] - for i := 1; i < 3; i++ { - if len(quoted) == 0 || quoted[0] < '0' || '7' < quoted[0] { - break - } - n = n*8 + int(quoted[0]-'0') - quoted = quoted[1:] - } - if n >= 256 { - // NOTE: Python silently discards the high bit, - // so that '\541' == '\141' == 'a'. - // Let's see if we can avoid doing that in BUILD files. - err = fmt.Errorf(`invalid escape sequence \%03o`, n) - return - } - buf.WriteByte(byte(n)) - - case 'x': - // Hexadecimal escape, exactly 2 digits. - if len(quoted) < 4 { - err = fmt.Errorf(`truncated escape sequence %s`, quoted) - return - } - n, err1 := strconv.ParseUint(quoted[2:4], 16, 0) - if err1 != nil { - err = fmt.Errorf(`invalid escape sequence %s`, quoted[:4]) - return - } - buf.WriteByte(byte(n)) - quoted = quoted[4:] - } - } - - s = buf.String() - return -} - -// indexByte returns the index of the first instance of b in s, or else -1. -func indexByte(s string, b byte) int { - for i := 0; i < len(s); i++ { - if s[i] == b { - return i - } - } - return -1 -} - -// hex is a list of the hexadecimal digits, for use in quoting. -// We always print lower-case hexadecimal. -const hex = "0123456789abcdef" - -// quote returns the quoted form of the string value "x". -// If triple is true, quote uses the triple-quoted form """x""". -func quote(unquoted string, triple bool) string { - q := `"` - if triple { - q = `"""` - } - - buf := new(strings.Builder) - buf.WriteString(q) - - for i := 0; i < len(unquoted); i++ { - c := unquoted[i] - if c == '"' && triple && (i+1 < len(unquoted) && unquoted[i+1] != '"' || i+2 < len(unquoted) && unquoted[i+2] != '"') { - // Can pass up to two quotes through, because they are followed by a non-quote byte. - buf.WriteByte(c) - if i+1 < len(unquoted) && unquoted[i+1] == '"' { - buf.WriteByte(c) - i++ - } - continue - } - if triple && c == '\n' { - // Can allow newline in triple-quoted string. - buf.WriteByte(c) - continue - } - if c == '\'' { - // Can allow ' since we always use ". - buf.WriteByte(c) - continue - } - if c == '\\' { - if i+1 < len(unquoted) && indexByte(notEsc, unquoted[i+1]) >= 0 { - // Can pass \ through when followed by a byte that - // known not to be a valid escape sequence and also - // that does not trigger an escape sequence of its own. - // Use this, because various BUILD files do. - buf.WriteByte('\\') - buf.WriteByte(unquoted[i+1]) - i++ - continue - } - } - if esc[c] != 0 { - buf.WriteByte('\\') - buf.WriteByte(esc[c]) - continue - } - if c < 0x20 || c >= 0x80 { - // BUILD files are supposed to be Latin-1, so escape all control and high bytes. - // I'd prefer to use \x here, but Blaze does not implement - // \x in quoted strings (b/7272572). - buf.WriteByte('\\') - buf.WriteByte(hex[c>>6]) // actually octal but reusing hex digits 0-7. - buf.WriteByte(hex[(c>>3)&7]) - buf.WriteByte(hex[c&7]) - /* - buf.WriteByte('\\') - buf.WriteByte('x') - buf.WriteByte(hex[c>>4]) - buf.WriteByte(hex[c&0xF]) - */ - continue - } - buf.WriteByte(c) - continue - } - - buf.WriteString(q) - return buf.String() -} diff --git a/src/vendor/go.starlark.net/syntax/scan.go b/src/vendor/go.starlark.net/syntax/scan.go deleted file mode 100644 index 51cf8855f..000000000 --- a/src/vendor/go.starlark.net/syntax/scan.go +++ /dev/null @@ -1,1089 +0,0 @@ -// Copyright 2017 The Bazel Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package syntax - -// A lexical scanner for Starlark. - -import ( - "fmt" - "io" - "io/ioutil" - "log" - "math/big" - "os" - "strconv" - "strings" - "unicode" - "unicode/utf8" -) - -// A Token represents a Starlark lexical token. -type Token int8 - -const ( - ILLEGAL Token = iota - EOF - - NEWLINE - INDENT - OUTDENT - - // Tokens with values - IDENT // x - INT // 123 - FLOAT // 1.23e45 - STRING // "foo" or 'foo' or '''foo''' or r'foo' or r"foo" - - // Punctuation - PLUS // + - MINUS // - - STAR // * - SLASH // / - SLASHSLASH // // - PERCENT // % - AMP // & - PIPE // | - CIRCUMFLEX // ^ - LTLT // << - GTGT // >> - TILDE // ~ - DOT // . - COMMA // , - EQ // = - SEMI // ; - COLON // : - LPAREN // ( - RPAREN // ) - LBRACK // [ - RBRACK // ] - LBRACE // { - RBRACE // } - LT // < - GT // > - GE // >= - LE // <= - EQL // == - NEQ // != - PLUS_EQ // += (keep order consistent with PLUS..GTGT) - MINUS_EQ // -= - STAR_EQ // *= - SLASH_EQ // /= - SLASHSLASH_EQ // //= - PERCENT_EQ // %= - AMP_EQ // &= - PIPE_EQ // |= - CIRCUMFLEX_EQ // ^= - LTLT_EQ // <<= - GTGT_EQ // >>= - STARSTAR // ** - - // Keywords - AND - BREAK - CONTINUE - DEF - ELIF - ELSE - FOR - IF - IN - LAMBDA - LOAD - NOT - NOT_IN // synthesized by parser from NOT IN - OR - PASS - RETURN - WHILE - - maxToken -) - -func (tok Token) String() string { return tokenNames[tok] } - -// GoString is like String but quotes punctuation tokens. -// Use Sprintf("%#v", tok) when constructing error messages. -func (tok Token) GoString() string { - if tok >= PLUS && tok <= STARSTAR { - return "'" + tokenNames[tok] + "'" - } - return tokenNames[tok] -} - -var tokenNames = [...]string{ - ILLEGAL: "illegal token", - EOF: "end of file", - NEWLINE: "newline", - INDENT: "indent", - OUTDENT: "outdent", - IDENT: "identifier", - INT: "int literal", - FLOAT: "float literal", - STRING: "string literal", - PLUS: "+", - MINUS: "-", - STAR: "*", - SLASH: "/", - SLASHSLASH: "//", - PERCENT: "%", - AMP: "&", - PIPE: "|", - CIRCUMFLEX: "^", - LTLT: "<<", - GTGT: ">>", - TILDE: "~", - DOT: ".", - COMMA: ",", - EQ: "=", - SEMI: ";", - COLON: ":", - LPAREN: "(", - RPAREN: ")", - LBRACK: "[", - RBRACK: "]", - LBRACE: "{", - RBRACE: "}", - LT: "<", - GT: ">", - GE: ">=", - LE: "<=", - EQL: "==", - NEQ: "!=", - PLUS_EQ: "+=", - MINUS_EQ: "-=", - STAR_EQ: "*=", - SLASH_EQ: "/=", - SLASHSLASH_EQ: "//=", - PERCENT_EQ: "%=", - AMP_EQ: "&=", - PIPE_EQ: "|=", - CIRCUMFLEX_EQ: "^=", - LTLT_EQ: "<<=", - GTGT_EQ: ">>=", - STARSTAR: "**", - AND: "and", - BREAK: "break", - CONTINUE: "continue", - DEF: "def", - ELIF: "elif", - ELSE: "else", - FOR: "for", - IF: "if", - IN: "in", - LAMBDA: "lambda", - LOAD: "load", - NOT: "not", - NOT_IN: "not in", - OR: "or", - PASS: "pass", - RETURN: "return", - WHILE: "while", -} - -// A Position describes the location of a rune of input. -type Position struct { - file *string // filename (indirect for compactness) - Line int32 // 1-based line number; 0 if line unknown - Col int32 // 1-based column (rune) number; 0 if column unknown -} - -// IsValid reports whether the position is valid. -func (p Position) IsValid() bool { return p.file != nil } - -// Filename returns the name of the file containing this position. -func (p Position) Filename() string { - if p.file != nil { - return *p.file - } - return "" -} - -// MakePosition returns position with the specified components. -func MakePosition(file *string, line, col int32) Position { return Position{file, line, col} } - -// add returns the position at the end of s, assuming it starts at p. -func (p Position) add(s string) Position { - if n := strings.Count(s, "\n"); n > 0 { - p.Line += int32(n) - s = s[strings.LastIndex(s, "\n")+1:] - p.Col = 1 - } - p.Col += int32(utf8.RuneCountInString(s)) - return p -} - -func (p Position) String() string { - file := p.Filename() - if p.Line > 0 { - if p.Col > 0 { - return fmt.Sprintf("%s:%d:%d", file, p.Line, p.Col) - } - return fmt.Sprintf("%s:%d", file, p.Line) - } - return file -} - -func (p Position) isBefore(q Position) bool { - if p.Line != q.Line { - return p.Line < q.Line - } - return p.Col < q.Col -} - -// An scanner represents a single input file being parsed. -type scanner struct { - rest []byte // rest of input (in REPL, a line of input) - token []byte // token being scanned - pos Position // current input position - depth int // nesting of [ ] { } ( ) - indentstk []int // stack of indentation levels - dents int // number of saved INDENT (>0) or OUTDENT (<0) tokens to return - lineStart bool // after NEWLINE; convert spaces to indentation tokens - keepComments bool // accumulate comments in slice - lineComments []Comment // list of full line comments (if keepComments) - suffixComments []Comment // list of suffix comments (if keepComments) - - readline func() ([]byte, error) // read next line of input (REPL only) -} - -func newScanner(filename string, src interface{}, keepComments bool) (*scanner, error) { - sc := &scanner{ - pos: Position{file: &filename, Line: 1, Col: 1}, - indentstk: make([]int, 1, 10), // []int{0} + spare capacity - lineStart: true, - keepComments: keepComments, - } - sc.readline, _ = src.(func() ([]byte, error)) // REPL only - if sc.readline == nil { - data, err := readSource(filename, src) - if err != nil { - return nil, err - } - sc.rest = data - } - return sc, nil -} - -func readSource(filename string, src interface{}) ([]byte, error) { - switch src := src.(type) { - case string: - return []byte(src), nil - case []byte: - return src, nil - case io.Reader: - data, err := ioutil.ReadAll(src) - if err != nil { - err = &os.PathError{Op: "read", Path: filename, Err: err} - return nil, err - } - return data, nil - case nil: - return ioutil.ReadFile(filename) - default: - return nil, fmt.Errorf("invalid source: %T", src) - } -} - -// An Error describes the nature and position of a scanner or parser error. -type Error struct { - Pos Position - Msg string -} - -func (e Error) Error() string { return e.Pos.String() + ": " + e.Msg } - -// errorf is called to report an error. -// errorf does not return: it panics. -func (sc *scanner) error(pos Position, s string) { - panic(Error{pos, s}) -} - -func (sc *scanner) errorf(pos Position, format string, args ...interface{}) { - sc.error(pos, fmt.Sprintf(format, args...)) -} - -func (sc *scanner) recover(err *error) { - // The scanner and parser panic both for routine errors like - // syntax errors and for programmer bugs like array index - // errors. Turn both into error returns. Catching bug panics - // is especially important when processing many files. - switch e := recover().(type) { - case nil: - // no panic - case Error: - *err = e - default: - *err = Error{sc.pos, fmt.Sprintf("internal error: %v", e)} - if debug { - log.Fatal(*err) - } - } -} - -// eof reports whether the input has reached end of file. -func (sc *scanner) eof() bool { - return len(sc.rest) == 0 && !sc.readLine() -} - -// readLine attempts to read another line of input. -// Precondition: len(sc.rest)==0. -func (sc *scanner) readLine() bool { - if sc.readline != nil { - var err error - sc.rest, err = sc.readline() - if err != nil { - sc.errorf(sc.pos, "%v", err) // EOF or ErrInterrupt - } - return len(sc.rest) > 0 - } - return false -} - -// peekRune returns the next rune in the input without consuming it. -// Newlines in Unix, DOS, or Mac format are treated as one rune, '\n'. -func (sc *scanner) peekRune() rune { - // TODO(adonovan): opt: measure and perhaps inline eof. - if sc.eof() { - return 0 - } - - // fast path: ASCII - if b := sc.rest[0]; b < utf8.RuneSelf { - if b == '\r' { - return '\n' - } - return rune(b) - } - - r, _ := utf8.DecodeRune(sc.rest) - return r -} - -// readRune consumes and returns the next rune in the input. -// Newlines in Unix, DOS, or Mac format are treated as one rune, '\n'. -func (sc *scanner) readRune() rune { - // eof() has been inlined here, both to avoid a call - // and to establish len(rest)>0 to avoid a bounds check. - if len(sc.rest) == 0 { - if !sc.readLine() { - sc.error(sc.pos, "internal scanner error: readRune at EOF") - } - // Redundant, but eliminates the bounds-check below. - if len(sc.rest) == 0 { - return 0 - } - } - - // fast path: ASCII - if b := sc.rest[0]; b < utf8.RuneSelf { - r := rune(b) - sc.rest = sc.rest[1:] - if r == '\r' { - if len(sc.rest) > 0 && sc.rest[0] == '\n' { - sc.rest = sc.rest[1:] - } - r = '\n' - } - if r == '\n' { - sc.pos.Line++ - sc.pos.Col = 1 - } else { - sc.pos.Col++ - } - return r - } - - r, size := utf8.DecodeRune(sc.rest) - sc.rest = sc.rest[size:] - sc.pos.Col++ - return r -} - -// tokenValue records the position and value associated with each token. -type tokenValue struct { - raw string // raw text of token - int int64 // decoded int - bigInt *big.Int // decoded integers > int64 - float float64 // decoded float - string string // decoded string - pos Position // start position of token -} - -// startToken marks the beginning of the next input token. -// It must be followed by a call to endToken once the token has -// been consumed using readRune. -func (sc *scanner) startToken(val *tokenValue) { - sc.token = sc.rest - val.raw = "" - val.pos = sc.pos -} - -// endToken marks the end of an input token. -// It records the actual token string in val.raw if the caller -// has not done that already. -func (sc *scanner) endToken(val *tokenValue) { - if val.raw == "" { - val.raw = string(sc.token[:len(sc.token)-len(sc.rest)]) - } -} - -// nextToken is called by the parser to obtain the next input token. -// It returns the token value and sets val to the data associated with -// the token. -// -// For all our input tokens, the associated data is val.pos (the -// position where the token begins), val.raw (the input string -// corresponding to the token). For string and int tokens, the string -// and int fields additionally contain the token's interpreted value. -func (sc *scanner) nextToken(val *tokenValue) Token { - - // The following distribution of tokens guides case ordering: - // - // COMMA 27 % - // STRING 23 % - // IDENT 15 % - // EQL 11 % - // LBRACK 5.5 % - // RBRACK 5.5 % - // NEWLINE 3 % - // LPAREN 2.9 % - // RPAREN 2.9 % - // INT 2 % - // others < 1 % - // - // Although NEWLINE tokens are infrequent, and lineStart is - // usually (~97%) false on entry, skipped newlines account for - // about 50% of all iterations of the 'start' loop. - -start: - var c rune - - // Deal with leading spaces and indentation. - blank := false - savedLineStart := sc.lineStart - if sc.lineStart { - sc.lineStart = false - col := 0 - for { - c = sc.peekRune() - if c == ' ' { - col++ - sc.readRune() - } else if c == '\t' { - const tab = 8 - col += int(tab - (sc.pos.Col-1)%tab) - sc.readRune() - } else { - break - } - } - - // The third clause matches EOF. - if c == '#' || c == '\n' || c == 0 { - blank = true - } - - // Compute indentation level for non-blank lines not - // inside an expression. This is not the common case. - if !blank && sc.depth == 0 { - cur := sc.indentstk[len(sc.indentstk)-1] - if col > cur { - // indent - sc.dents++ - sc.indentstk = append(sc.indentstk, col) - } else if col < cur { - // outdent(s) - for len(sc.indentstk) > 0 && col < sc.indentstk[len(sc.indentstk)-1] { - sc.dents-- - sc.indentstk = sc.indentstk[:len(sc.indentstk)-1] // pop - } - if col != sc.indentstk[len(sc.indentstk)-1] { - sc.error(sc.pos, "unindent does not match any outer indentation level") - } - } - } - } - - // Return saved indentation tokens. - if sc.dents != 0 { - sc.startToken(val) - sc.endToken(val) - if sc.dents < 0 { - sc.dents++ - return OUTDENT - } else { - sc.dents-- - return INDENT - } - } - - // start of line proper - c = sc.peekRune() - - // Skip spaces. - for c == ' ' || c == '\t' { - sc.readRune() - c = sc.peekRune() - } - - // comment - if c == '#' { - if sc.keepComments { - sc.startToken(val) - } - // Consume up to newline (included). - for c != 0 && c != '\n' { - sc.readRune() - c = sc.peekRune() - } - if sc.keepComments { - sc.endToken(val) - if blank { - sc.lineComments = append(sc.lineComments, Comment{val.pos, val.raw}) - } else { - sc.suffixComments = append(sc.suffixComments, Comment{val.pos, val.raw}) - } - } - } - - // newline - if c == '\n' { - sc.lineStart = true - - // Ignore newlines within expressions (common case). - if sc.depth > 0 { - sc.readRune() - goto start - } - - // Ignore blank lines, except in the REPL, - // where they emit OUTDENTs and NEWLINE. - if blank { - if sc.readline == nil { - sc.readRune() - goto start - } else if len(sc.indentstk) > 1 { - sc.dents = 1 - len(sc.indentstk) - sc.indentstk = sc.indentstk[:1] - goto start - } - } - - // At top-level (not in an expression). - sc.startToken(val) - sc.readRune() - val.raw = "\n" - return NEWLINE - } - - // end of file - if c == 0 { - // Emit OUTDENTs for unfinished indentation, - // preceded by a NEWLINE if we haven't just emitted one. - if len(sc.indentstk) > 1 { - if savedLineStart { - sc.dents = 1 - len(sc.indentstk) - sc.indentstk = sc.indentstk[:1] - goto start - } else { - sc.lineStart = true - sc.startToken(val) - val.raw = "\n" - return NEWLINE - } - } - - sc.startToken(val) - sc.endToken(val) - return EOF - } - - // line continuation - if c == '\\' { - sc.readRune() - if sc.peekRune() != '\n' { - sc.errorf(sc.pos, "stray backslash in program") - } - sc.readRune() - goto start - } - - // start of the next token - sc.startToken(val) - - // comma (common case) - if c == ',' { - sc.readRune() - sc.endToken(val) - return COMMA - } - - // string literal - if c == '"' || c == '\'' { - return sc.scanString(val, c) - } - - // identifier or keyword - if isIdentStart(c) { - // raw string literal - if c == 'r' && len(sc.rest) > 1 && (sc.rest[1] == '"' || sc.rest[1] == '\'') { - sc.readRune() - c = sc.peekRune() - return sc.scanString(val, c) - } - - for isIdent(c) { - sc.readRune() - c = sc.peekRune() - } - sc.endToken(val) - if k, ok := keywordToken[val.raw]; ok { - return k - } - - return IDENT - } - - // brackets - switch c { - case '[', '(', '{': - sc.depth++ - sc.readRune() - sc.endToken(val) - switch c { - case '[': - return LBRACK - case '(': - return LPAREN - case '{': - return LBRACE - } - panic("unreachable") - - case ']', ')', '}': - if sc.depth == 0 { - sc.errorf(sc.pos, "unexpected %q", c) - } else { - sc.depth-- - } - sc.readRune() - sc.endToken(val) - switch c { - case ']': - return RBRACK - case ')': - return RPAREN - case '}': - return RBRACE - } - panic("unreachable") - } - - // int or float literal, or period - if isdigit(c) || c == '.' { - return sc.scanNumber(val, c) - } - - // other punctuation - defer sc.endToken(val) - switch c { - case '=', '<', '>', '!', '+', '-', '%', '/', '&', '|', '^': // possibly followed by '=' - start := sc.pos - sc.readRune() - if sc.peekRune() == '=' { - sc.readRune() - switch c { - case '<': - return LE - case '>': - return GE - case '=': - return EQL - case '!': - return NEQ - case '+': - return PLUS_EQ - case '-': - return MINUS_EQ - case '/': - return SLASH_EQ - case '%': - return PERCENT_EQ - case '&': - return AMP_EQ - case '|': - return PIPE_EQ - case '^': - return CIRCUMFLEX_EQ - } - } - switch c { - case '=': - return EQ - case '<': - if sc.peekRune() == '<' { - sc.readRune() - if sc.peekRune() == '=' { - sc.readRune() - return LTLT_EQ - } else { - return LTLT - } - } - return LT - case '>': - if sc.peekRune() == '>' { - sc.readRune() - if sc.peekRune() == '=' { - sc.readRune() - return GTGT_EQ - } else { - return GTGT - } - } - return GT - case '!': - sc.error(start, "unexpected input character '!'") - case '+': - return PLUS - case '-': - return MINUS - case '/': - if sc.peekRune() == '/' { - sc.readRune() - if sc.peekRune() == '=' { - sc.readRune() - return SLASHSLASH_EQ - } else { - return SLASHSLASH - } - } - return SLASH - case '%': - return PERCENT - case '&': - return AMP - case '|': - return PIPE - case '^': - return CIRCUMFLEX - } - panic("unreachable") - - case ':', ';', '~': // single-char tokens (except comma) - sc.readRune() - switch c { - case ':': - return COLON - case ';': - return SEMI - case '~': - return TILDE - } - panic("unreachable") - - case '*': // possibly followed by '*' or '=' - sc.readRune() - switch sc.peekRune() { - case '*': - sc.readRune() - return STARSTAR - case '=': - sc.readRune() - return STAR_EQ - } - return STAR - } - - sc.errorf(sc.pos, "unexpected input character %#q", c) - panic("unreachable") -} - -func (sc *scanner) scanString(val *tokenValue, quote rune) Token { - start := sc.pos - triple := len(sc.rest) >= 3 && sc.rest[0] == byte(quote) && sc.rest[1] == byte(quote) && sc.rest[2] == byte(quote) - sc.readRune() - if !triple { - // Precondition: startToken was already called. - for { - if sc.eof() { - sc.error(val.pos, "unexpected EOF in string") - } - c := sc.readRune() - if c == quote { - break - } - if c == '\n' { - sc.error(val.pos, "unexpected newline in string") - } - if c == '\\' { - if sc.eof() { - sc.error(val.pos, "unexpected EOF in string") - } - sc.readRune() - } - } - sc.endToken(val) - } else { - // triple-quoted string literal - sc.readRune() - sc.readRune() - - // A triple-quoted string literal may span multiple - // gulps of REPL input; it is the only such token. - // Thus we must avoid {start,end}Token. - raw := new(strings.Builder) - - // Copy the prefix, e.g. r''' or """ (see startToken). - raw.Write(sc.token[:len(sc.token)-len(sc.rest)]) - - quoteCount := 0 - for { - if sc.eof() { - sc.error(val.pos, "unexpected EOF in string") - } - c := sc.readRune() - raw.WriteRune(c) - if c == quote { - quoteCount++ - if quoteCount == 3 { - break - } - } else { - quoteCount = 0 - } - if c == '\\' { - if sc.eof() { - sc.error(val.pos, "unexpected EOF in string") - } - c = sc.readRune() - raw.WriteRune(c) - } - } - val.raw = raw.String() - } - - s, _, err := unquote(val.raw) - if err != nil { - sc.error(start, err.Error()) - } - val.string = s - return STRING -} - -func (sc *scanner) scanNumber(val *tokenValue, c rune) Token { - // https://github.com/google/starlark-go/blob/master/doc/spec.md#lexical-elements - // - // Python features not supported: - // - integer literals of >64 bits of precision - // - 123L or 123l long suffix - // - traditional octal: 0755 - // https://docs.python.org/2/reference/lexical_analysis.html#integer-and-long-integer-literals - - start := sc.pos - fraction, exponent := false, false - - if c == '.' { - // dot or start of fraction - sc.readRune() - c = sc.peekRune() - if !isdigit(c) { - sc.endToken(val) - return DOT - } - fraction = true - } else if c == '0' { - // hex, octal, binary or float - sc.readRune() - c = sc.peekRune() - - if c == '.' { - fraction = true - } else if c == 'x' || c == 'X' { - // hex - sc.readRune() - c = sc.peekRune() - if !isxdigit(c) { - sc.error(start, "invalid hex literal") - } - for isxdigit(c) { - sc.readRune() - c = sc.peekRune() - } - } else if c == 'o' || c == 'O' { - // octal - sc.readRune() - c = sc.peekRune() - if !isodigit(c) { - sc.error(sc.pos, "invalid octal literal") - } - for isodigit(c) { - sc.readRune() - c = sc.peekRune() - } - } else if c == 'b' || c == 'B' { - // binary - sc.readRune() - c = sc.peekRune() - if !isbdigit(c) { - sc.error(sc.pos, "invalid binary literal") - } - for isbdigit(c) { - sc.readRune() - c = sc.peekRune() - } - } else { - // float (or obsolete octal "0755") - allzeros, octal := true, true - for isdigit(c) { - if c != '0' { - allzeros = false - } - if c > '7' { - octal = false - } - sc.readRune() - c = sc.peekRune() - } - if c == '.' { - fraction = true - } else if c == 'e' || c == 'E' { - exponent = true - } else if octal && !allzeros { - sc.endToken(val) - sc.errorf(sc.pos, "obsolete form of octal literal; use 0o%s", val.raw[1:]) - } - } - } else { - // decimal - for isdigit(c) { - sc.readRune() - c = sc.peekRune() - } - - if c == '.' { - fraction = true - } else if c == 'e' || c == 'E' { - exponent = true - } - } - - if fraction { - sc.readRune() // consume '.' - c = sc.peekRune() - for isdigit(c) { - sc.readRune() - c = sc.peekRune() - } - - if c == 'e' || c == 'E' { - exponent = true - } - } - - if exponent { - sc.readRune() // consume [eE] - c = sc.peekRune() - if c == '+' || c == '-' { - sc.readRune() - c = sc.peekRune() - if !isdigit(c) { - sc.error(sc.pos, "invalid float literal") - } - } - for isdigit(c) { - sc.readRune() - c = sc.peekRune() - } - } - - sc.endToken(val) - if fraction || exponent { - var err error - val.float, err = strconv.ParseFloat(val.raw, 64) - if err != nil { - sc.error(sc.pos, "invalid float literal") - } - return FLOAT - } else { - var err error - s := val.raw - val.bigInt = nil - if len(s) > 2 && s[0] == '0' && (s[1] == 'o' || s[1] == 'O') { - val.int, err = strconv.ParseInt(s[2:], 8, 64) - } else if len(s) > 2 && s[0] == '0' && (s[1] == 'b' || s[1] == 'B') { - val.int, err = strconv.ParseInt(s[2:], 2, 64) - } else { - val.int, err = strconv.ParseInt(s, 0, 64) - if err != nil { - num := new(big.Int) - var ok bool - val.bigInt, ok = num.SetString(s, 0) - if ok { - err = nil - } - } - } - if err != nil { - sc.error(start, "invalid int literal") - } - return INT - } -} - -// isIdent reports whether c is an identifier rune. -func isIdent(c rune) bool { - return isdigit(c) || isIdentStart(c) -} - -func isIdentStart(c rune) bool { - return 'a' <= c && c <= 'z' || - 'A' <= c && c <= 'Z' || - c == '_' || - unicode.IsLetter(c) -} - -func isdigit(c rune) bool { return '0' <= c && c <= '9' } -func isodigit(c rune) bool { return '0' <= c && c <= '7' } -func isxdigit(c rune) bool { return isdigit(c) || 'A' <= c && c <= 'F' || 'a' <= c && c <= 'f' } -func isbdigit(c rune) bool { return '0' == c || c == '1' } - -// keywordToken records the special tokens for -// strings that should not be treated as ordinary identifiers. -var keywordToken = map[string]Token{ - "and": AND, - "break": BREAK, - "continue": CONTINUE, - "def": DEF, - "elif": ELIF, - "else": ELSE, - "for": FOR, - "if": IF, - "in": IN, - "lambda": LAMBDA, - "load": LOAD, - "not": NOT, - "or": OR, - "pass": PASS, - "return": RETURN, - "while": WHILE, - - // reserved words: - "as": ILLEGAL, - // "assert": ILLEGAL, // heavily used by our tests - "class": ILLEGAL, - "del": ILLEGAL, - "except": ILLEGAL, - "finally": ILLEGAL, - "from": ILLEGAL, - "global": ILLEGAL, - "import": ILLEGAL, - "is": ILLEGAL, - "nonlocal": ILLEGAL, - "raise": ILLEGAL, - "try": ILLEGAL, - "with": ILLEGAL, - "yield": ILLEGAL, -} diff --git a/src/vendor/go.starlark.net/syntax/syntax.go b/src/vendor/go.starlark.net/syntax/syntax.go deleted file mode 100644 index b4817c1a2..000000000 --- a/src/vendor/go.starlark.net/syntax/syntax.go +++ /dev/null @@ -1,529 +0,0 @@ -// Copyright 2017 The Bazel Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package syntax provides a Starlark parser and abstract syntax tree. -package syntax // import "go.starlark.net/syntax" - -// A Node is a node in a Starlark syntax tree. -type Node interface { - // Span returns the start and end position of the expression. - Span() (start, end Position) - - // Comments returns the comments associated with this node. - // It returns nil if RetainComments was not specified during parsing, - // or if AllocComments was not called. - Comments() *Comments - - // AllocComments allocates a new Comments node if there was none. - // This makes possible to add new comments using Comments() method. - AllocComments() -} - -// A Comment represents a single # comment. -type Comment struct { - Start Position - Text string // without trailing newline -} - -// Comments collects the comments associated with an expression. -type Comments struct { - Before []Comment // whole-line comments before this expression - Suffix []Comment // end-of-line comments after this expression (up to 1) - - // For top-level expressions only, After lists whole-line - // comments following the expression. - After []Comment -} - -// A commentsRef is a possibly-nil reference to a set of comments. -// A commentsRef is embedded in each type of syntax node, -// and provides its Comments and AllocComments methods. -type commentsRef struct{ ref *Comments } - -// Comments returns the comments associated with a syntax node, -// or nil if AllocComments has not yet been called. -func (cr commentsRef) Comments() *Comments { return cr.ref } - -// AllocComments enables comments to be associated with a syntax node. -func (cr *commentsRef) AllocComments() { - if cr.ref == nil { - cr.ref = new(Comments) - } -} - -// Start returns the start position of the expression. -func Start(n Node) Position { - start, _ := n.Span() - return start -} - -// End returns the end position of the expression. -func End(n Node) Position { - _, end := n.Span() - return end -} - -// A File represents a Starlark file. -type File struct { - commentsRef - Path string - Stmts []Stmt - - Module interface{} // a *resolve.Module, set by resolver -} - -func (x *File) Span() (start, end Position) { - if len(x.Stmts) == 0 { - return - } - start, _ = x.Stmts[0].Span() - _, end = x.Stmts[len(x.Stmts)-1].Span() - return start, end -} - -// A Stmt is a Starlark statement. -type Stmt interface { - Node - stmt() -} - -func (*AssignStmt) stmt() {} -func (*BranchStmt) stmt() {} -func (*DefStmt) stmt() {} -func (*ExprStmt) stmt() {} -func (*ForStmt) stmt() {} -func (*WhileStmt) stmt() {} -func (*IfStmt) stmt() {} -func (*LoadStmt) stmt() {} -func (*ReturnStmt) stmt() {} - -// An AssignStmt represents an assignment: -// x = 0 -// x, y = y, x -// x += 1 -type AssignStmt struct { - commentsRef - OpPos Position - Op Token // = EQ | {PLUS,MINUS,STAR,PERCENT}_EQ - LHS Expr - RHS Expr -} - -func (x *AssignStmt) Span() (start, end Position) { - start, _ = x.LHS.Span() - _, end = x.RHS.Span() - return -} - -// A DefStmt represents a function definition. -type DefStmt struct { - commentsRef - Def Position - Name *Ident - Params []Expr // param = ident | ident=expr | * | *ident | **ident - Body []Stmt - - Function interface{} // a *resolve.Function, set by resolver -} - -func (x *DefStmt) Span() (start, end Position) { - _, end = x.Body[len(x.Body)-1].Span() - return x.Def, end -} - -// An ExprStmt is an expression evaluated for side effects. -type ExprStmt struct { - commentsRef - X Expr -} - -func (x *ExprStmt) Span() (start, end Position) { - return x.X.Span() -} - -// An IfStmt is a conditional: If Cond: True; else: False. -// 'elseif' is desugared into a chain of IfStmts. -type IfStmt struct { - commentsRef - If Position // IF or ELIF - Cond Expr - True []Stmt - ElsePos Position // ELSE or ELIF - False []Stmt // optional -} - -func (x *IfStmt) Span() (start, end Position) { - body := x.False - if body == nil { - body = x.True - } - _, end = body[len(body)-1].Span() - return x.If, end -} - -// A LoadStmt loads another module and binds names from it: -// load(Module, "x", y="foo"). -// -// The AST is slightly unfaithful to the concrete syntax here because -// Starlark's load statement, so that it can be implemented in Python, -// binds some names (like y above) with an identifier and some (like x) -// without. For consistency we create fake identifiers for all the -// strings. -type LoadStmt struct { - commentsRef - Load Position - Module *Literal // a string - From []*Ident // name defined in loading module - To []*Ident // name in loaded module - Rparen Position -} - -func (x *LoadStmt) Span() (start, end Position) { - return x.Load, x.Rparen -} - -// ModuleName returns the name of the module loaded by this statement. -func (x *LoadStmt) ModuleName() string { return x.Module.Value.(string) } - -// A BranchStmt changes the flow of control: break, continue, pass. -type BranchStmt struct { - commentsRef - Token Token // = BREAK | CONTINUE | PASS - TokenPos Position -} - -func (x *BranchStmt) Span() (start, end Position) { - return x.TokenPos, x.TokenPos.add(x.Token.String()) -} - -// A ReturnStmt returns from a function. -type ReturnStmt struct { - commentsRef - Return Position - Result Expr // may be nil -} - -func (x *ReturnStmt) Span() (start, end Position) { - if x.Result == nil { - return x.Return, x.Return.add("return") - } - _, end = x.Result.Span() - return x.Return, end -} - -// An Expr is a Starlark expression. -type Expr interface { - Node - expr() -} - -func (*BinaryExpr) expr() {} -func (*CallExpr) expr() {} -func (*Comprehension) expr() {} -func (*CondExpr) expr() {} -func (*DictEntry) expr() {} -func (*DictExpr) expr() {} -func (*DotExpr) expr() {} -func (*Ident) expr() {} -func (*IndexExpr) expr() {} -func (*LambdaExpr) expr() {} -func (*ListExpr) expr() {} -func (*Literal) expr() {} -func (*ParenExpr) expr() {} -func (*SliceExpr) expr() {} -func (*TupleExpr) expr() {} -func (*UnaryExpr) expr() {} - -// An Ident represents an identifier. -type Ident struct { - commentsRef - NamePos Position - Name string - - Binding interface{} // a *resolver.Binding, set by resolver -} - -func (x *Ident) Span() (start, end Position) { - return x.NamePos, x.NamePos.add(x.Name) -} - -// A Literal represents a literal string or number. -type Literal struct { - commentsRef - Token Token // = STRING | INT - TokenPos Position - Raw string // uninterpreted text - Value interface{} // = string | int64 | *big.Int -} - -func (x *Literal) Span() (start, end Position) { - return x.TokenPos, x.TokenPos.add(x.Raw) -} - -// A ParenExpr represents a parenthesized expression: (X). -type ParenExpr struct { - commentsRef - Lparen Position - X Expr - Rparen Position -} - -func (x *ParenExpr) Span() (start, end Position) { - return x.Lparen, x.Rparen.add(")") -} - -// A CallExpr represents a function call expression: Fn(Args). -type CallExpr struct { - commentsRef - Fn Expr - Lparen Position - Args []Expr // arg = expr | ident=expr | *expr | **expr - Rparen Position -} - -func (x *CallExpr) Span() (start, end Position) { - start, _ = x.Fn.Span() - return start, x.Rparen.add(")") -} - -// A DotExpr represents a field or method selector: X.Name. -type DotExpr struct { - commentsRef - X Expr - Dot Position - NamePos Position - Name *Ident -} - -func (x *DotExpr) Span() (start, end Position) { - start, _ = x.X.Span() - _, end = x.Name.Span() - return -} - -// A Comprehension represents a list or dict comprehension: -// [Body for ... if ...] or {Body for ... if ...} -type Comprehension struct { - commentsRef - Curly bool // {x:y for ...} or {x for ...}, not [x for ...] - Lbrack Position - Body Expr - Clauses []Node // = *ForClause | *IfClause - Rbrack Position -} - -func (x *Comprehension) Span() (start, end Position) { - return x.Lbrack, x.Rbrack.add("]") -} - -// A ForStmt represents a loop: for Vars in X: Body. -type ForStmt struct { - commentsRef - For Position - Vars Expr // name, or tuple of names - X Expr - Body []Stmt -} - -func (x *ForStmt) Span() (start, end Position) { - _, end = x.Body[len(x.Body)-1].Span() - return x.For, end -} - -// A WhileStmt represents a while loop: while X: Body. -type WhileStmt struct { - commentsRef - While Position - Cond Expr - Body []Stmt -} - -func (x *WhileStmt) Span() (start, end Position) { - _, end = x.Body[len(x.Body)-1].Span() - return x.While, end -} - -// A ForClause represents a for clause in a list comprehension: for Vars in X. -type ForClause struct { - commentsRef - For Position - Vars Expr // name, or tuple of names - In Position - X Expr -} - -func (x *ForClause) Span() (start, end Position) { - _, end = x.X.Span() - return x.For, end -} - -// An IfClause represents an if clause in a list comprehension: if Cond. -type IfClause struct { - commentsRef - If Position - Cond Expr -} - -func (x *IfClause) Span() (start, end Position) { - _, end = x.Cond.Span() - return x.If, end -} - -// A DictExpr represents a dictionary literal: { List }. -type DictExpr struct { - commentsRef - Lbrace Position - List []Expr // all *DictEntrys - Rbrace Position -} - -func (x *DictExpr) Span() (start, end Position) { - return x.Lbrace, x.Rbrace.add("}") -} - -// A DictEntry represents a dictionary entry: Key: Value. -// Used only within a DictExpr. -type DictEntry struct { - commentsRef - Key Expr - Colon Position - Value Expr -} - -func (x *DictEntry) Span() (start, end Position) { - start, _ = x.Key.Span() - _, end = x.Value.Span() - return start, end -} - -// A LambdaExpr represents an inline function abstraction. -// -// Although they may be added in future, lambda expressions are not -// currently part of the Starlark spec, so their use is controlled by the -// resolver.AllowLambda flag. -type LambdaExpr struct { - commentsRef - Lambda Position - Params []Expr // param = ident | ident=expr | * | *ident | **ident - Body Expr - - Function interface{} // a *resolve.Function, set by resolver -} - -func (x *LambdaExpr) Span() (start, end Position) { - _, end = x.Body.Span() - return x.Lambda, end -} - -// A ListExpr represents a list literal: [ List ]. -type ListExpr struct { - commentsRef - Lbrack Position - List []Expr - Rbrack Position -} - -func (x *ListExpr) Span() (start, end Position) { - return x.Lbrack, x.Rbrack.add("]") -} - -// CondExpr represents the conditional: X if COND else ELSE. -type CondExpr struct { - commentsRef - If Position - Cond Expr - True Expr - ElsePos Position - False Expr -} - -func (x *CondExpr) Span() (start, end Position) { - start, _ = x.True.Span() - _, end = x.False.Span() - return start, end -} - -// A TupleExpr represents a tuple literal: (List). -type TupleExpr struct { - commentsRef - Lparen Position // optional (e.g. in x, y = 0, 1), but required if List is empty - List []Expr - Rparen Position -} - -func (x *TupleExpr) Span() (start, end Position) { - if x.Lparen.IsValid() { - return x.Lparen, x.Rparen - } else { - return Start(x.List[0]), End(x.List[len(x.List)-1]) - } -} - -// A UnaryExpr represents a unary expression: Op X. -// -// As a special case, UnaryOp{Op:Star} may also represent -// the star parameter in def f(*args) or def f(*, x). -type UnaryExpr struct { - commentsRef - OpPos Position - Op Token - X Expr // may be nil if Op==STAR -} - -func (x *UnaryExpr) Span() (start, end Position) { - if x.X != nil { - _, end = x.X.Span() - } else { - end = x.OpPos.add("*") - } - return x.OpPos, end -} - -// A BinaryExpr represents a binary expression: X Op Y. -// -// As a special case, BinaryExpr{Op:EQ} may also -// represent a named argument in a call f(k=v) -// or a named parameter in a function declaration -// def f(param=default). -type BinaryExpr struct { - commentsRef - X Expr - OpPos Position - Op Token - Y Expr -} - -func (x *BinaryExpr) Span() (start, end Position) { - start, _ = x.X.Span() - _, end = x.Y.Span() - return start, end -} - -// A SliceExpr represents a slice or substring expression: X[Lo:Hi:Step]. -type SliceExpr struct { - commentsRef - X Expr - Lbrack Position - Lo, Hi, Step Expr // all optional - Rbrack Position -} - -func (x *SliceExpr) Span() (start, end Position) { - start, _ = x.X.Span() - return start, x.Rbrack -} - -// An IndexExpr represents an index expression: X[Y]. -type IndexExpr struct { - commentsRef - X Expr - Lbrack Position - Y Expr - Rbrack Position -} - -func (x *IndexExpr) Span() (start, end Position) { - start, _ = x.X.Span() - return start, x.Rbrack -} diff --git a/src/vendor/go.starlark.net/syntax/walk.go b/src/vendor/go.starlark.net/syntax/walk.go deleted file mode 100644 index 1491149c6..000000000 --- a/src/vendor/go.starlark.net/syntax/walk.go +++ /dev/null @@ -1,163 +0,0 @@ -// Copyright 2017 The Bazel Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package syntax - -// Walk traverses a syntax tree in depth-first order. -// It starts by calling f(n); n must not be nil. -// If f returns true, Walk calls itself -// recursively for each non-nil child of n. -// Walk then calls f(nil). -func Walk(n Node, f func(Node) bool) { - if n == nil { - panic("nil") - } - if !f(n) { - return - } - - // TODO(adonovan): opt: order cases using profile data. - switch n := n.(type) { - case *File: - walkStmts(n.Stmts, f) - - case *ExprStmt: - Walk(n.X, f) - - case *BranchStmt: - // no-op - - case *IfStmt: - Walk(n.Cond, f) - walkStmts(n.True, f) - walkStmts(n.False, f) - - case *AssignStmt: - Walk(n.LHS, f) - Walk(n.RHS, f) - - case *DefStmt: - Walk(n.Name, f) - for _, param := range n.Params { - Walk(param, f) - } - walkStmts(n.Body, f) - - case *ForStmt: - Walk(n.Vars, f) - Walk(n.X, f) - walkStmts(n.Body, f) - - case *ReturnStmt: - if n.Result != nil { - Walk(n.Result, f) - } - - case *LoadStmt: - Walk(n.Module, f) - for _, from := range n.From { - Walk(from, f) - } - for _, to := range n.To { - Walk(to, f) - } - - case *Ident, *Literal: - // no-op - - case *ListExpr: - for _, x := range n.List { - Walk(x, f) - } - - case *ParenExpr: - Walk(n.X, f) - - case *CondExpr: - Walk(n.Cond, f) - Walk(n.True, f) - Walk(n.False, f) - - case *IndexExpr: - Walk(n.X, f) - Walk(n.Y, f) - - case *DictEntry: - Walk(n.Key, f) - Walk(n.Value, f) - - case *SliceExpr: - Walk(n.X, f) - if n.Lo != nil { - Walk(n.Lo, f) - } - if n.Hi != nil { - Walk(n.Hi, f) - } - if n.Step != nil { - Walk(n.Step, f) - } - - case *Comprehension: - Walk(n.Body, f) - for _, clause := range n.Clauses { - Walk(clause, f) - } - - case *IfClause: - Walk(n.Cond, f) - - case *ForClause: - Walk(n.Vars, f) - Walk(n.X, f) - - case *TupleExpr: - for _, x := range n.List { - Walk(x, f) - } - - case *DictExpr: - for _, entry := range n.List { - entry := entry.(*DictEntry) - Walk(entry.Key, f) - Walk(entry.Value, f) - } - - case *UnaryExpr: - if n.X != nil { - Walk(n.X, f) - } - - case *BinaryExpr: - Walk(n.X, f) - Walk(n.Y, f) - - case *DotExpr: - Walk(n.X, f) - Walk(n.Name, f) - - case *CallExpr: - Walk(n.Fn, f) - for _, arg := range n.Args { - Walk(arg, f) - } - - case *LambdaExpr: - for _, param := range n.Params { - Walk(param, f) - } - Walk(n.Body, f) - - default: - panic(n) - } - - f(nil) -} - -func walkStmts(stmts []Stmt, f func(Node) bool) { - for _, stmt := range stmts { - Walk(stmt, f) - } -} diff --git a/src/vendor/golang.org/x/crypto/cast5/cast5.go b/src/vendor/golang.org/x/crypto/cast5/cast5.go deleted file mode 100644 index ddcbeb6f2..000000000 --- a/src/vendor/golang.org/x/crypto/cast5/cast5.go +++ /dev/null @@ -1,533 +0,0 @@ -// Copyright 2010 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package cast5 implements CAST5, as defined in RFC 2144. -// -// CAST5 is a legacy cipher and its short block size makes it vulnerable to -// birthday bound attacks (see https://sweet32.info). It should only be used -// where compatibility with legacy systems, not security, is the goal. -// -// Deprecated: any new system should use AES (from crypto/aes, if necessary in -// an AEAD mode like crypto/cipher.NewGCM) or XChaCha20-Poly1305 (from -// golang.org/x/crypto/chacha20poly1305). -package cast5 // import "golang.org/x/crypto/cast5" - -import "errors" - -const BlockSize = 8 -const KeySize = 16 - -type Cipher struct { - masking [16]uint32 - rotate [16]uint8 -} - -func NewCipher(key []byte) (c *Cipher, err error) { - if len(key) != KeySize { - return nil, errors.New("CAST5: keys must be 16 bytes") - } - - c = new(Cipher) - c.keySchedule(key) - return -} - -func (c *Cipher) BlockSize() int { - return BlockSize -} - -func (c *Cipher) Encrypt(dst, src []byte) { - l := uint32(src[0])<<24 | uint32(src[1])<<16 | uint32(src[2])<<8 | uint32(src[3]) - r := uint32(src[4])<<24 | uint32(src[5])<<16 | uint32(src[6])<<8 | uint32(src[7]) - - l, r = r, l^f1(r, c.masking[0], c.rotate[0]) - l, r = r, l^f2(r, c.masking[1], c.rotate[1]) - l, r = r, l^f3(r, c.masking[2], c.rotate[2]) - l, r = r, l^f1(r, c.masking[3], c.rotate[3]) - - l, r = r, l^f2(r, c.masking[4], c.rotate[4]) - l, r = r, l^f3(r, c.masking[5], c.rotate[5]) - l, r = r, l^f1(r, c.masking[6], c.rotate[6]) - l, r = r, l^f2(r, c.masking[7], c.rotate[7]) - - l, r = r, l^f3(r, c.masking[8], c.rotate[8]) - l, r = r, l^f1(r, c.masking[9], c.rotate[9]) - l, r = r, l^f2(r, c.masking[10], c.rotate[10]) - l, r = r, l^f3(r, c.masking[11], c.rotate[11]) - - l, r = r, l^f1(r, c.masking[12], c.rotate[12]) - l, r = r, l^f2(r, c.masking[13], c.rotate[13]) - l, r = r, l^f3(r, c.masking[14], c.rotate[14]) - l, r = r, l^f1(r, c.masking[15], c.rotate[15]) - - dst[0] = uint8(r >> 24) - dst[1] = uint8(r >> 16) - dst[2] = uint8(r >> 8) - dst[3] = uint8(r) - dst[4] = uint8(l >> 24) - dst[5] = uint8(l >> 16) - dst[6] = uint8(l >> 8) - dst[7] = uint8(l) -} - -func (c *Cipher) Decrypt(dst, src []byte) { - l := uint32(src[0])<<24 | uint32(src[1])<<16 | uint32(src[2])<<8 | uint32(src[3]) - r := uint32(src[4])<<24 | uint32(src[5])<<16 | uint32(src[6])<<8 | uint32(src[7]) - - l, r = r, l^f1(r, c.masking[15], c.rotate[15]) - l, r = r, l^f3(r, c.masking[14], c.rotate[14]) - l, r = r, l^f2(r, c.masking[13], c.rotate[13]) - l, r = r, l^f1(r, c.masking[12], c.rotate[12]) - - l, r = r, l^f3(r, c.masking[11], c.rotate[11]) - l, r = r, l^f2(r, c.masking[10], c.rotate[10]) - l, r = r, l^f1(r, c.masking[9], c.rotate[9]) - l, r = r, l^f3(r, c.masking[8], c.rotate[8]) - - l, r = r, l^f2(r, c.masking[7], c.rotate[7]) - l, r = r, l^f1(r, c.masking[6], c.rotate[6]) - l, r = r, l^f3(r, c.masking[5], c.rotate[5]) - l, r = r, l^f2(r, c.masking[4], c.rotate[4]) - - l, r = r, l^f1(r, c.masking[3], c.rotate[3]) - l, r = r, l^f3(r, c.masking[2], c.rotate[2]) - l, r = r, l^f2(r, c.masking[1], c.rotate[1]) - l, r = r, l^f1(r, c.masking[0], c.rotate[0]) - - dst[0] = uint8(r >> 24) - dst[1] = uint8(r >> 16) - dst[2] = uint8(r >> 8) - dst[3] = uint8(r) - dst[4] = uint8(l >> 24) - dst[5] = uint8(l >> 16) - dst[6] = uint8(l >> 8) - dst[7] = uint8(l) -} - -type keyScheduleA [4][7]uint8 -type keyScheduleB [4][5]uint8 - -// keyScheduleRound contains the magic values for a round of the key schedule. -// The keyScheduleA deals with the lines like: -// z0z1z2z3 = x0x1x2x3 ^ S5[xD] ^ S6[xF] ^ S7[xC] ^ S8[xE] ^ S7[x8] -// Conceptually, both x and z are in the same array, x first. The first -// element describes which word of this array gets written to and the -// second, which word gets read. So, for the line above, it's "4, 0", because -// it's writing to the first word of z, which, being after x, is word 4, and -// reading from the first word of x: word 0. -// -// Next are the indexes into the S-boxes. Now the array is treated as bytes. So -// "xD" is 0xd. The first byte of z is written as "16 + 0", just to be clear -// that it's z that we're indexing. -// -// keyScheduleB deals with lines like: -// K1 = S5[z8] ^ S6[z9] ^ S7[z7] ^ S8[z6] ^ S5[z2] -// "K1" is ignored because key words are always written in order. So the five -// elements are the S-box indexes. They use the same form as in keyScheduleA, -// above. - -type keyScheduleRound struct{} -type keySchedule []keyScheduleRound - -var schedule = []struct { - a keyScheduleA - b keyScheduleB -}{ - { - keyScheduleA{ - {4, 0, 0xd, 0xf, 0xc, 0xe, 0x8}, - {5, 2, 16 + 0, 16 + 2, 16 + 1, 16 + 3, 0xa}, - {6, 3, 16 + 7, 16 + 6, 16 + 5, 16 + 4, 9}, - {7, 1, 16 + 0xa, 16 + 9, 16 + 0xb, 16 + 8, 0xb}, - }, - keyScheduleB{ - {16 + 8, 16 + 9, 16 + 7, 16 + 6, 16 + 2}, - {16 + 0xa, 16 + 0xb, 16 + 5, 16 + 4, 16 + 6}, - {16 + 0xc, 16 + 0xd, 16 + 3, 16 + 2, 16 + 9}, - {16 + 0xe, 16 + 0xf, 16 + 1, 16 + 0, 16 + 0xc}, - }, - }, - { - keyScheduleA{ - {0, 6, 16 + 5, 16 + 7, 16 + 4, 16 + 6, 16 + 0}, - {1, 4, 0, 2, 1, 3, 16 + 2}, - {2, 5, 7, 6, 5, 4, 16 + 1}, - {3, 7, 0xa, 9, 0xb, 8, 16 + 3}, - }, - keyScheduleB{ - {3, 2, 0xc, 0xd, 8}, - {1, 0, 0xe, 0xf, 0xd}, - {7, 6, 8, 9, 3}, - {5, 4, 0xa, 0xb, 7}, - }, - }, - { - keyScheduleA{ - {4, 0, 0xd, 0xf, 0xc, 0xe, 8}, - {5, 2, 16 + 0, 16 + 2, 16 + 1, 16 + 3, 0xa}, - {6, 3, 16 + 7, 16 + 6, 16 + 5, 16 + 4, 9}, - {7, 1, 16 + 0xa, 16 + 9, 16 + 0xb, 16 + 8, 0xb}, - }, - keyScheduleB{ - {16 + 3, 16 + 2, 16 + 0xc, 16 + 0xd, 16 + 9}, - {16 + 1, 16 + 0, 16 + 0xe, 16 + 0xf, 16 + 0xc}, - {16 + 7, 16 + 6, 16 + 8, 16 + 9, 16 + 2}, - {16 + 5, 16 + 4, 16 + 0xa, 16 + 0xb, 16 + 6}, - }, - }, - { - keyScheduleA{ - {0, 6, 16 + 5, 16 + 7, 16 + 4, 16 + 6, 16 + 0}, - {1, 4, 0, 2, 1, 3, 16 + 2}, - {2, 5, 7, 6, 5, 4, 16 + 1}, - {3, 7, 0xa, 9, 0xb, 8, 16 + 3}, - }, - keyScheduleB{ - {8, 9, 7, 6, 3}, - {0xa, 0xb, 5, 4, 7}, - {0xc, 0xd, 3, 2, 8}, - {0xe, 0xf, 1, 0, 0xd}, - }, - }, -} - -func (c *Cipher) keySchedule(in []byte) { - var t [8]uint32 - var k [32]uint32 - - for i := 0; i < 4; i++ { - j := i * 4 - t[i] = uint32(in[j])<<24 | uint32(in[j+1])<<16 | uint32(in[j+2])<<8 | uint32(in[j+3]) - } - - x := []byte{6, 7, 4, 5} - ki := 0 - - for half := 0; half < 2; half++ { - for _, round := range schedule { - for j := 0; j < 4; j++ { - var a [7]uint8 - copy(a[:], round.a[j][:]) - w := t[a[1]] - w ^= sBox[4][(t[a[2]>>2]>>(24-8*(a[2]&3)))&0xff] - w ^= sBox[5][(t[a[3]>>2]>>(24-8*(a[3]&3)))&0xff] - w ^= sBox[6][(t[a[4]>>2]>>(24-8*(a[4]&3)))&0xff] - w ^= sBox[7][(t[a[5]>>2]>>(24-8*(a[5]&3)))&0xff] - w ^= sBox[x[j]][(t[a[6]>>2]>>(24-8*(a[6]&3)))&0xff] - t[a[0]] = w - } - - for j := 0; j < 4; j++ { - var b [5]uint8 - copy(b[:], round.b[j][:]) - w := sBox[4][(t[b[0]>>2]>>(24-8*(b[0]&3)))&0xff] - w ^= sBox[5][(t[b[1]>>2]>>(24-8*(b[1]&3)))&0xff] - w ^= sBox[6][(t[b[2]>>2]>>(24-8*(b[2]&3)))&0xff] - w ^= sBox[7][(t[b[3]>>2]>>(24-8*(b[3]&3)))&0xff] - w ^= sBox[4+j][(t[b[4]>>2]>>(24-8*(b[4]&3)))&0xff] - k[ki] = w - ki++ - } - } - } - - for i := 0; i < 16; i++ { - c.masking[i] = k[i] - c.rotate[i] = uint8(k[16+i] & 0x1f) - } -} - -// These are the three 'f' functions. See RFC 2144, section 2.2. -func f1(d, m uint32, r uint8) uint32 { - t := m + d - I := (t << r) | (t >> (32 - r)) - return ((sBox[0][I>>24] ^ sBox[1][(I>>16)&0xff]) - sBox[2][(I>>8)&0xff]) + sBox[3][I&0xff] -} - -func f2(d, m uint32, r uint8) uint32 { - t := m ^ d - I := (t << r) | (t >> (32 - r)) - return ((sBox[0][I>>24] - sBox[1][(I>>16)&0xff]) + sBox[2][(I>>8)&0xff]) ^ sBox[3][I&0xff] -} - -func f3(d, m uint32, r uint8) uint32 { - t := m - d - I := (t << r) | (t >> (32 - r)) - return ((sBox[0][I>>24] + sBox[1][(I>>16)&0xff]) ^ sBox[2][(I>>8)&0xff]) - sBox[3][I&0xff] -} - -var sBox = [8][256]uint32{ - { - 0x30fb40d4, 0x9fa0ff0b, 0x6beccd2f, 0x3f258c7a, 0x1e213f2f, 0x9c004dd3, 0x6003e540, 0xcf9fc949, - 0xbfd4af27, 0x88bbbdb5, 0xe2034090, 0x98d09675, 0x6e63a0e0, 0x15c361d2, 0xc2e7661d, 0x22d4ff8e, - 0x28683b6f, 0xc07fd059, 0xff2379c8, 0x775f50e2, 0x43c340d3, 0xdf2f8656, 0x887ca41a, 0xa2d2bd2d, - 0xa1c9e0d6, 0x346c4819, 0x61b76d87, 0x22540f2f, 0x2abe32e1, 0xaa54166b, 0x22568e3a, 0xa2d341d0, - 0x66db40c8, 0xa784392f, 0x004dff2f, 0x2db9d2de, 0x97943fac, 0x4a97c1d8, 0x527644b7, 0xb5f437a7, - 0xb82cbaef, 0xd751d159, 0x6ff7f0ed, 0x5a097a1f, 0x827b68d0, 0x90ecf52e, 0x22b0c054, 0xbc8e5935, - 0x4b6d2f7f, 0x50bb64a2, 0xd2664910, 0xbee5812d, 0xb7332290, 0xe93b159f, 0xb48ee411, 0x4bff345d, - 0xfd45c240, 0xad31973f, 0xc4f6d02e, 0x55fc8165, 0xd5b1caad, 0xa1ac2dae, 0xa2d4b76d, 0xc19b0c50, - 0x882240f2, 0x0c6e4f38, 0xa4e4bfd7, 0x4f5ba272, 0x564c1d2f, 0xc59c5319, 0xb949e354, 0xb04669fe, - 0xb1b6ab8a, 0xc71358dd, 0x6385c545, 0x110f935d, 0x57538ad5, 0x6a390493, 0xe63d37e0, 0x2a54f6b3, - 0x3a787d5f, 0x6276a0b5, 0x19a6fcdf, 0x7a42206a, 0x29f9d4d5, 0xf61b1891, 0xbb72275e, 0xaa508167, - 0x38901091, 0xc6b505eb, 0x84c7cb8c, 0x2ad75a0f, 0x874a1427, 0xa2d1936b, 0x2ad286af, 0xaa56d291, - 0xd7894360, 0x425c750d, 0x93b39e26, 0x187184c9, 0x6c00b32d, 0x73e2bb14, 0xa0bebc3c, 0x54623779, - 0x64459eab, 0x3f328b82, 0x7718cf82, 0x59a2cea6, 0x04ee002e, 0x89fe78e6, 0x3fab0950, 0x325ff6c2, - 0x81383f05, 0x6963c5c8, 0x76cb5ad6, 0xd49974c9, 0xca180dcf, 0x380782d5, 0xc7fa5cf6, 0x8ac31511, - 0x35e79e13, 0x47da91d0, 0xf40f9086, 0xa7e2419e, 0x31366241, 0x051ef495, 0xaa573b04, 0x4a805d8d, - 0x548300d0, 0x00322a3c, 0xbf64cddf, 0xba57a68e, 0x75c6372b, 0x50afd341, 0xa7c13275, 0x915a0bf5, - 0x6b54bfab, 0x2b0b1426, 0xab4cc9d7, 0x449ccd82, 0xf7fbf265, 0xab85c5f3, 0x1b55db94, 0xaad4e324, - 0xcfa4bd3f, 0x2deaa3e2, 0x9e204d02, 0xc8bd25ac, 0xeadf55b3, 0xd5bd9e98, 0xe31231b2, 0x2ad5ad6c, - 0x954329de, 0xadbe4528, 0xd8710f69, 0xaa51c90f, 0xaa786bf6, 0x22513f1e, 0xaa51a79b, 0x2ad344cc, - 0x7b5a41f0, 0xd37cfbad, 0x1b069505, 0x41ece491, 0xb4c332e6, 0x032268d4, 0xc9600acc, 0xce387e6d, - 0xbf6bb16c, 0x6a70fb78, 0x0d03d9c9, 0xd4df39de, 0xe01063da, 0x4736f464, 0x5ad328d8, 0xb347cc96, - 0x75bb0fc3, 0x98511bfb, 0x4ffbcc35, 0xb58bcf6a, 0xe11f0abc, 0xbfc5fe4a, 0xa70aec10, 0xac39570a, - 0x3f04442f, 0x6188b153, 0xe0397a2e, 0x5727cb79, 0x9ceb418f, 0x1cacd68d, 0x2ad37c96, 0x0175cb9d, - 0xc69dff09, 0xc75b65f0, 0xd9db40d8, 0xec0e7779, 0x4744ead4, 0xb11c3274, 0xdd24cb9e, 0x7e1c54bd, - 0xf01144f9, 0xd2240eb1, 0x9675b3fd, 0xa3ac3755, 0xd47c27af, 0x51c85f4d, 0x56907596, 0xa5bb15e6, - 0x580304f0, 0xca042cf1, 0x011a37ea, 0x8dbfaadb, 0x35ba3e4a, 0x3526ffa0, 0xc37b4d09, 0xbc306ed9, - 0x98a52666, 0x5648f725, 0xff5e569d, 0x0ced63d0, 0x7c63b2cf, 0x700b45e1, 0xd5ea50f1, 0x85a92872, - 0xaf1fbda7, 0xd4234870, 0xa7870bf3, 0x2d3b4d79, 0x42e04198, 0x0cd0ede7, 0x26470db8, 0xf881814c, - 0x474d6ad7, 0x7c0c5e5c, 0xd1231959, 0x381b7298, 0xf5d2f4db, 0xab838653, 0x6e2f1e23, 0x83719c9e, - 0xbd91e046, 0x9a56456e, 0xdc39200c, 0x20c8c571, 0x962bda1c, 0xe1e696ff, 0xb141ab08, 0x7cca89b9, - 0x1a69e783, 0x02cc4843, 0xa2f7c579, 0x429ef47d, 0x427b169c, 0x5ac9f049, 0xdd8f0f00, 0x5c8165bf, - }, - { - 0x1f201094, 0xef0ba75b, 0x69e3cf7e, 0x393f4380, 0xfe61cf7a, 0xeec5207a, 0x55889c94, 0x72fc0651, - 0xada7ef79, 0x4e1d7235, 0xd55a63ce, 0xde0436ba, 0x99c430ef, 0x5f0c0794, 0x18dcdb7d, 0xa1d6eff3, - 0xa0b52f7b, 0x59e83605, 0xee15b094, 0xe9ffd909, 0xdc440086, 0xef944459, 0xba83ccb3, 0xe0c3cdfb, - 0xd1da4181, 0x3b092ab1, 0xf997f1c1, 0xa5e6cf7b, 0x01420ddb, 0xe4e7ef5b, 0x25a1ff41, 0xe180f806, - 0x1fc41080, 0x179bee7a, 0xd37ac6a9, 0xfe5830a4, 0x98de8b7f, 0x77e83f4e, 0x79929269, 0x24fa9f7b, - 0xe113c85b, 0xacc40083, 0xd7503525, 0xf7ea615f, 0x62143154, 0x0d554b63, 0x5d681121, 0xc866c359, - 0x3d63cf73, 0xcee234c0, 0xd4d87e87, 0x5c672b21, 0x071f6181, 0x39f7627f, 0x361e3084, 0xe4eb573b, - 0x602f64a4, 0xd63acd9c, 0x1bbc4635, 0x9e81032d, 0x2701f50c, 0x99847ab4, 0xa0e3df79, 0xba6cf38c, - 0x10843094, 0x2537a95e, 0xf46f6ffe, 0xa1ff3b1f, 0x208cfb6a, 0x8f458c74, 0xd9e0a227, 0x4ec73a34, - 0xfc884f69, 0x3e4de8df, 0xef0e0088, 0x3559648d, 0x8a45388c, 0x1d804366, 0x721d9bfd, 0xa58684bb, - 0xe8256333, 0x844e8212, 0x128d8098, 0xfed33fb4, 0xce280ae1, 0x27e19ba5, 0xd5a6c252, 0xe49754bd, - 0xc5d655dd, 0xeb667064, 0x77840b4d, 0xa1b6a801, 0x84db26a9, 0xe0b56714, 0x21f043b7, 0xe5d05860, - 0x54f03084, 0x066ff472, 0xa31aa153, 0xdadc4755, 0xb5625dbf, 0x68561be6, 0x83ca6b94, 0x2d6ed23b, - 0xeccf01db, 0xa6d3d0ba, 0xb6803d5c, 0xaf77a709, 0x33b4a34c, 0x397bc8d6, 0x5ee22b95, 0x5f0e5304, - 0x81ed6f61, 0x20e74364, 0xb45e1378, 0xde18639b, 0x881ca122, 0xb96726d1, 0x8049a7e8, 0x22b7da7b, - 0x5e552d25, 0x5272d237, 0x79d2951c, 0xc60d894c, 0x488cb402, 0x1ba4fe5b, 0xa4b09f6b, 0x1ca815cf, - 0xa20c3005, 0x8871df63, 0xb9de2fcb, 0x0cc6c9e9, 0x0beeff53, 0xe3214517, 0xb4542835, 0x9f63293c, - 0xee41e729, 0x6e1d2d7c, 0x50045286, 0x1e6685f3, 0xf33401c6, 0x30a22c95, 0x31a70850, 0x60930f13, - 0x73f98417, 0xa1269859, 0xec645c44, 0x52c877a9, 0xcdff33a6, 0xa02b1741, 0x7cbad9a2, 0x2180036f, - 0x50d99c08, 0xcb3f4861, 0xc26bd765, 0x64a3f6ab, 0x80342676, 0x25a75e7b, 0xe4e6d1fc, 0x20c710e6, - 0xcdf0b680, 0x17844d3b, 0x31eef84d, 0x7e0824e4, 0x2ccb49eb, 0x846a3bae, 0x8ff77888, 0xee5d60f6, - 0x7af75673, 0x2fdd5cdb, 0xa11631c1, 0x30f66f43, 0xb3faec54, 0x157fd7fa, 0xef8579cc, 0xd152de58, - 0xdb2ffd5e, 0x8f32ce19, 0x306af97a, 0x02f03ef8, 0x99319ad5, 0xc242fa0f, 0xa7e3ebb0, 0xc68e4906, - 0xb8da230c, 0x80823028, 0xdcdef3c8, 0xd35fb171, 0x088a1bc8, 0xbec0c560, 0x61a3c9e8, 0xbca8f54d, - 0xc72feffa, 0x22822e99, 0x82c570b4, 0xd8d94e89, 0x8b1c34bc, 0x301e16e6, 0x273be979, 0xb0ffeaa6, - 0x61d9b8c6, 0x00b24869, 0xb7ffce3f, 0x08dc283b, 0x43daf65a, 0xf7e19798, 0x7619b72f, 0x8f1c9ba4, - 0xdc8637a0, 0x16a7d3b1, 0x9fc393b7, 0xa7136eeb, 0xc6bcc63e, 0x1a513742, 0xef6828bc, 0x520365d6, - 0x2d6a77ab, 0x3527ed4b, 0x821fd216, 0x095c6e2e, 0xdb92f2fb, 0x5eea29cb, 0x145892f5, 0x91584f7f, - 0x5483697b, 0x2667a8cc, 0x85196048, 0x8c4bacea, 0x833860d4, 0x0d23e0f9, 0x6c387e8a, 0x0ae6d249, - 0xb284600c, 0xd835731d, 0xdcb1c647, 0xac4c56ea, 0x3ebd81b3, 0x230eabb0, 0x6438bc87, 0xf0b5b1fa, - 0x8f5ea2b3, 0xfc184642, 0x0a036b7a, 0x4fb089bd, 0x649da589, 0xa345415e, 0x5c038323, 0x3e5d3bb9, - 0x43d79572, 0x7e6dd07c, 0x06dfdf1e, 0x6c6cc4ef, 0x7160a539, 0x73bfbe70, 0x83877605, 0x4523ecf1, - }, - { - 0x8defc240, 0x25fa5d9f, 0xeb903dbf, 0xe810c907, 0x47607fff, 0x369fe44b, 0x8c1fc644, 0xaececa90, - 0xbeb1f9bf, 0xeefbcaea, 0xe8cf1950, 0x51df07ae, 0x920e8806, 0xf0ad0548, 0xe13c8d83, 0x927010d5, - 0x11107d9f, 0x07647db9, 0xb2e3e4d4, 0x3d4f285e, 0xb9afa820, 0xfade82e0, 0xa067268b, 0x8272792e, - 0x553fb2c0, 0x489ae22b, 0xd4ef9794, 0x125e3fbc, 0x21fffcee, 0x825b1bfd, 0x9255c5ed, 0x1257a240, - 0x4e1a8302, 0xbae07fff, 0x528246e7, 0x8e57140e, 0x3373f7bf, 0x8c9f8188, 0xa6fc4ee8, 0xc982b5a5, - 0xa8c01db7, 0x579fc264, 0x67094f31, 0xf2bd3f5f, 0x40fff7c1, 0x1fb78dfc, 0x8e6bd2c1, 0x437be59b, - 0x99b03dbf, 0xb5dbc64b, 0x638dc0e6, 0x55819d99, 0xa197c81c, 0x4a012d6e, 0xc5884a28, 0xccc36f71, - 0xb843c213, 0x6c0743f1, 0x8309893c, 0x0feddd5f, 0x2f7fe850, 0xd7c07f7e, 0x02507fbf, 0x5afb9a04, - 0xa747d2d0, 0x1651192e, 0xaf70bf3e, 0x58c31380, 0x5f98302e, 0x727cc3c4, 0x0a0fb402, 0x0f7fef82, - 0x8c96fdad, 0x5d2c2aae, 0x8ee99a49, 0x50da88b8, 0x8427f4a0, 0x1eac5790, 0x796fb449, 0x8252dc15, - 0xefbd7d9b, 0xa672597d, 0xada840d8, 0x45f54504, 0xfa5d7403, 0xe83ec305, 0x4f91751a, 0x925669c2, - 0x23efe941, 0xa903f12e, 0x60270df2, 0x0276e4b6, 0x94fd6574, 0x927985b2, 0x8276dbcb, 0x02778176, - 0xf8af918d, 0x4e48f79e, 0x8f616ddf, 0xe29d840e, 0x842f7d83, 0x340ce5c8, 0x96bbb682, 0x93b4b148, - 0xef303cab, 0x984faf28, 0x779faf9b, 0x92dc560d, 0x224d1e20, 0x8437aa88, 0x7d29dc96, 0x2756d3dc, - 0x8b907cee, 0xb51fd240, 0xe7c07ce3, 0xe566b4a1, 0xc3e9615e, 0x3cf8209d, 0x6094d1e3, 0xcd9ca341, - 0x5c76460e, 0x00ea983b, 0xd4d67881, 0xfd47572c, 0xf76cedd9, 0xbda8229c, 0x127dadaa, 0x438a074e, - 0x1f97c090, 0x081bdb8a, 0x93a07ebe, 0xb938ca15, 0x97b03cff, 0x3dc2c0f8, 0x8d1ab2ec, 0x64380e51, - 0x68cc7bfb, 0xd90f2788, 0x12490181, 0x5de5ffd4, 0xdd7ef86a, 0x76a2e214, 0xb9a40368, 0x925d958f, - 0x4b39fffa, 0xba39aee9, 0xa4ffd30b, 0xfaf7933b, 0x6d498623, 0x193cbcfa, 0x27627545, 0x825cf47a, - 0x61bd8ba0, 0xd11e42d1, 0xcead04f4, 0x127ea392, 0x10428db7, 0x8272a972, 0x9270c4a8, 0x127de50b, - 0x285ba1c8, 0x3c62f44f, 0x35c0eaa5, 0xe805d231, 0x428929fb, 0xb4fcdf82, 0x4fb66a53, 0x0e7dc15b, - 0x1f081fab, 0x108618ae, 0xfcfd086d, 0xf9ff2889, 0x694bcc11, 0x236a5cae, 0x12deca4d, 0x2c3f8cc5, - 0xd2d02dfe, 0xf8ef5896, 0xe4cf52da, 0x95155b67, 0x494a488c, 0xb9b6a80c, 0x5c8f82bc, 0x89d36b45, - 0x3a609437, 0xec00c9a9, 0x44715253, 0x0a874b49, 0xd773bc40, 0x7c34671c, 0x02717ef6, 0x4feb5536, - 0xa2d02fff, 0xd2bf60c4, 0xd43f03c0, 0x50b4ef6d, 0x07478cd1, 0x006e1888, 0xa2e53f55, 0xb9e6d4bc, - 0xa2048016, 0x97573833, 0xd7207d67, 0xde0f8f3d, 0x72f87b33, 0xabcc4f33, 0x7688c55d, 0x7b00a6b0, - 0x947b0001, 0x570075d2, 0xf9bb88f8, 0x8942019e, 0x4264a5ff, 0x856302e0, 0x72dbd92b, 0xee971b69, - 0x6ea22fde, 0x5f08ae2b, 0xaf7a616d, 0xe5c98767, 0xcf1febd2, 0x61efc8c2, 0xf1ac2571, 0xcc8239c2, - 0x67214cb8, 0xb1e583d1, 0xb7dc3e62, 0x7f10bdce, 0xf90a5c38, 0x0ff0443d, 0x606e6dc6, 0x60543a49, - 0x5727c148, 0x2be98a1d, 0x8ab41738, 0x20e1be24, 0xaf96da0f, 0x68458425, 0x99833be5, 0x600d457d, - 0x282f9350, 0x8334b362, 0xd91d1120, 0x2b6d8da0, 0x642b1e31, 0x9c305a00, 0x52bce688, 0x1b03588a, - 0xf7baefd5, 0x4142ed9c, 0xa4315c11, 0x83323ec5, 0xdfef4636, 0xa133c501, 0xe9d3531c, 0xee353783, - }, - { - 0x9db30420, 0x1fb6e9de, 0xa7be7bef, 0xd273a298, 0x4a4f7bdb, 0x64ad8c57, 0x85510443, 0xfa020ed1, - 0x7e287aff, 0xe60fb663, 0x095f35a1, 0x79ebf120, 0xfd059d43, 0x6497b7b1, 0xf3641f63, 0x241e4adf, - 0x28147f5f, 0x4fa2b8cd, 0xc9430040, 0x0cc32220, 0xfdd30b30, 0xc0a5374f, 0x1d2d00d9, 0x24147b15, - 0xee4d111a, 0x0fca5167, 0x71ff904c, 0x2d195ffe, 0x1a05645f, 0x0c13fefe, 0x081b08ca, 0x05170121, - 0x80530100, 0xe83e5efe, 0xac9af4f8, 0x7fe72701, 0xd2b8ee5f, 0x06df4261, 0xbb9e9b8a, 0x7293ea25, - 0xce84ffdf, 0xf5718801, 0x3dd64b04, 0xa26f263b, 0x7ed48400, 0x547eebe6, 0x446d4ca0, 0x6cf3d6f5, - 0x2649abdf, 0xaea0c7f5, 0x36338cc1, 0x503f7e93, 0xd3772061, 0x11b638e1, 0x72500e03, 0xf80eb2bb, - 0xabe0502e, 0xec8d77de, 0x57971e81, 0xe14f6746, 0xc9335400, 0x6920318f, 0x081dbb99, 0xffc304a5, - 0x4d351805, 0x7f3d5ce3, 0xa6c866c6, 0x5d5bcca9, 0xdaec6fea, 0x9f926f91, 0x9f46222f, 0x3991467d, - 0xa5bf6d8e, 0x1143c44f, 0x43958302, 0xd0214eeb, 0x022083b8, 0x3fb6180c, 0x18f8931e, 0x281658e6, - 0x26486e3e, 0x8bd78a70, 0x7477e4c1, 0xb506e07c, 0xf32d0a25, 0x79098b02, 0xe4eabb81, 0x28123b23, - 0x69dead38, 0x1574ca16, 0xdf871b62, 0x211c40b7, 0xa51a9ef9, 0x0014377b, 0x041e8ac8, 0x09114003, - 0xbd59e4d2, 0xe3d156d5, 0x4fe876d5, 0x2f91a340, 0x557be8de, 0x00eae4a7, 0x0ce5c2ec, 0x4db4bba6, - 0xe756bdff, 0xdd3369ac, 0xec17b035, 0x06572327, 0x99afc8b0, 0x56c8c391, 0x6b65811c, 0x5e146119, - 0x6e85cb75, 0xbe07c002, 0xc2325577, 0x893ff4ec, 0x5bbfc92d, 0xd0ec3b25, 0xb7801ab7, 0x8d6d3b24, - 0x20c763ef, 0xc366a5fc, 0x9c382880, 0x0ace3205, 0xaac9548a, 0xeca1d7c7, 0x041afa32, 0x1d16625a, - 0x6701902c, 0x9b757a54, 0x31d477f7, 0x9126b031, 0x36cc6fdb, 0xc70b8b46, 0xd9e66a48, 0x56e55a79, - 0x026a4ceb, 0x52437eff, 0x2f8f76b4, 0x0df980a5, 0x8674cde3, 0xedda04eb, 0x17a9be04, 0x2c18f4df, - 0xb7747f9d, 0xab2af7b4, 0xefc34d20, 0x2e096b7c, 0x1741a254, 0xe5b6a035, 0x213d42f6, 0x2c1c7c26, - 0x61c2f50f, 0x6552daf9, 0xd2c231f8, 0x25130f69, 0xd8167fa2, 0x0418f2c8, 0x001a96a6, 0x0d1526ab, - 0x63315c21, 0x5e0a72ec, 0x49bafefd, 0x187908d9, 0x8d0dbd86, 0x311170a7, 0x3e9b640c, 0xcc3e10d7, - 0xd5cad3b6, 0x0caec388, 0xf73001e1, 0x6c728aff, 0x71eae2a1, 0x1f9af36e, 0xcfcbd12f, 0xc1de8417, - 0xac07be6b, 0xcb44a1d8, 0x8b9b0f56, 0x013988c3, 0xb1c52fca, 0xb4be31cd, 0xd8782806, 0x12a3a4e2, - 0x6f7de532, 0x58fd7eb6, 0xd01ee900, 0x24adffc2, 0xf4990fc5, 0x9711aac5, 0x001d7b95, 0x82e5e7d2, - 0x109873f6, 0x00613096, 0xc32d9521, 0xada121ff, 0x29908415, 0x7fbb977f, 0xaf9eb3db, 0x29c9ed2a, - 0x5ce2a465, 0xa730f32c, 0xd0aa3fe8, 0x8a5cc091, 0xd49e2ce7, 0x0ce454a9, 0xd60acd86, 0x015f1919, - 0x77079103, 0xdea03af6, 0x78a8565e, 0xdee356df, 0x21f05cbe, 0x8b75e387, 0xb3c50651, 0xb8a5c3ef, - 0xd8eeb6d2, 0xe523be77, 0xc2154529, 0x2f69efdf, 0xafe67afb, 0xf470c4b2, 0xf3e0eb5b, 0xd6cc9876, - 0x39e4460c, 0x1fda8538, 0x1987832f, 0xca007367, 0xa99144f8, 0x296b299e, 0x492fc295, 0x9266beab, - 0xb5676e69, 0x9bd3ddda, 0xdf7e052f, 0xdb25701c, 0x1b5e51ee, 0xf65324e6, 0x6afce36c, 0x0316cc04, - 0x8644213e, 0xb7dc59d0, 0x7965291f, 0xccd6fd43, 0x41823979, 0x932bcdf6, 0xb657c34d, 0x4edfd282, - 0x7ae5290c, 0x3cb9536b, 0x851e20fe, 0x9833557e, 0x13ecf0b0, 0xd3ffb372, 0x3f85c5c1, 0x0aef7ed2, - }, - { - 0x7ec90c04, 0x2c6e74b9, 0x9b0e66df, 0xa6337911, 0xb86a7fff, 0x1dd358f5, 0x44dd9d44, 0x1731167f, - 0x08fbf1fa, 0xe7f511cc, 0xd2051b00, 0x735aba00, 0x2ab722d8, 0x386381cb, 0xacf6243a, 0x69befd7a, - 0xe6a2e77f, 0xf0c720cd, 0xc4494816, 0xccf5c180, 0x38851640, 0x15b0a848, 0xe68b18cb, 0x4caadeff, - 0x5f480a01, 0x0412b2aa, 0x259814fc, 0x41d0efe2, 0x4e40b48d, 0x248eb6fb, 0x8dba1cfe, 0x41a99b02, - 0x1a550a04, 0xba8f65cb, 0x7251f4e7, 0x95a51725, 0xc106ecd7, 0x97a5980a, 0xc539b9aa, 0x4d79fe6a, - 0xf2f3f763, 0x68af8040, 0xed0c9e56, 0x11b4958b, 0xe1eb5a88, 0x8709e6b0, 0xd7e07156, 0x4e29fea7, - 0x6366e52d, 0x02d1c000, 0xc4ac8e05, 0x9377f571, 0x0c05372a, 0x578535f2, 0x2261be02, 0xd642a0c9, - 0xdf13a280, 0x74b55bd2, 0x682199c0, 0xd421e5ec, 0x53fb3ce8, 0xc8adedb3, 0x28a87fc9, 0x3d959981, - 0x5c1ff900, 0xfe38d399, 0x0c4eff0b, 0x062407ea, 0xaa2f4fb1, 0x4fb96976, 0x90c79505, 0xb0a8a774, - 0xef55a1ff, 0xe59ca2c2, 0xa6b62d27, 0xe66a4263, 0xdf65001f, 0x0ec50966, 0xdfdd55bc, 0x29de0655, - 0x911e739a, 0x17af8975, 0x32c7911c, 0x89f89468, 0x0d01e980, 0x524755f4, 0x03b63cc9, 0x0cc844b2, - 0xbcf3f0aa, 0x87ac36e9, 0xe53a7426, 0x01b3d82b, 0x1a9e7449, 0x64ee2d7e, 0xcddbb1da, 0x01c94910, - 0xb868bf80, 0x0d26f3fd, 0x9342ede7, 0x04a5c284, 0x636737b6, 0x50f5b616, 0xf24766e3, 0x8eca36c1, - 0x136e05db, 0xfef18391, 0xfb887a37, 0xd6e7f7d4, 0xc7fb7dc9, 0x3063fcdf, 0xb6f589de, 0xec2941da, - 0x26e46695, 0xb7566419, 0xf654efc5, 0xd08d58b7, 0x48925401, 0xc1bacb7f, 0xe5ff550f, 0xb6083049, - 0x5bb5d0e8, 0x87d72e5a, 0xab6a6ee1, 0x223a66ce, 0xc62bf3cd, 0x9e0885f9, 0x68cb3e47, 0x086c010f, - 0xa21de820, 0xd18b69de, 0xf3f65777, 0xfa02c3f6, 0x407edac3, 0xcbb3d550, 0x1793084d, 0xb0d70eba, - 0x0ab378d5, 0xd951fb0c, 0xded7da56, 0x4124bbe4, 0x94ca0b56, 0x0f5755d1, 0xe0e1e56e, 0x6184b5be, - 0x580a249f, 0x94f74bc0, 0xe327888e, 0x9f7b5561, 0xc3dc0280, 0x05687715, 0x646c6bd7, 0x44904db3, - 0x66b4f0a3, 0xc0f1648a, 0x697ed5af, 0x49e92ff6, 0x309e374f, 0x2cb6356a, 0x85808573, 0x4991f840, - 0x76f0ae02, 0x083be84d, 0x28421c9a, 0x44489406, 0x736e4cb8, 0xc1092910, 0x8bc95fc6, 0x7d869cf4, - 0x134f616f, 0x2e77118d, 0xb31b2be1, 0xaa90b472, 0x3ca5d717, 0x7d161bba, 0x9cad9010, 0xaf462ba2, - 0x9fe459d2, 0x45d34559, 0xd9f2da13, 0xdbc65487, 0xf3e4f94e, 0x176d486f, 0x097c13ea, 0x631da5c7, - 0x445f7382, 0x175683f4, 0xcdc66a97, 0x70be0288, 0xb3cdcf72, 0x6e5dd2f3, 0x20936079, 0x459b80a5, - 0xbe60e2db, 0xa9c23101, 0xeba5315c, 0x224e42f2, 0x1c5c1572, 0xf6721b2c, 0x1ad2fff3, 0x8c25404e, - 0x324ed72f, 0x4067b7fd, 0x0523138e, 0x5ca3bc78, 0xdc0fd66e, 0x75922283, 0x784d6b17, 0x58ebb16e, - 0x44094f85, 0x3f481d87, 0xfcfeae7b, 0x77b5ff76, 0x8c2302bf, 0xaaf47556, 0x5f46b02a, 0x2b092801, - 0x3d38f5f7, 0x0ca81f36, 0x52af4a8a, 0x66d5e7c0, 0xdf3b0874, 0x95055110, 0x1b5ad7a8, 0xf61ed5ad, - 0x6cf6e479, 0x20758184, 0xd0cefa65, 0x88f7be58, 0x4a046826, 0x0ff6f8f3, 0xa09c7f70, 0x5346aba0, - 0x5ce96c28, 0xe176eda3, 0x6bac307f, 0x376829d2, 0x85360fa9, 0x17e3fe2a, 0x24b79767, 0xf5a96b20, - 0xd6cd2595, 0x68ff1ebf, 0x7555442c, 0xf19f06be, 0xf9e0659a, 0xeeb9491d, 0x34010718, 0xbb30cab8, - 0xe822fe15, 0x88570983, 0x750e6249, 0xda627e55, 0x5e76ffa8, 0xb1534546, 0x6d47de08, 0xefe9e7d4, - }, - { - 0xf6fa8f9d, 0x2cac6ce1, 0x4ca34867, 0xe2337f7c, 0x95db08e7, 0x016843b4, 0xeced5cbc, 0x325553ac, - 0xbf9f0960, 0xdfa1e2ed, 0x83f0579d, 0x63ed86b9, 0x1ab6a6b8, 0xde5ebe39, 0xf38ff732, 0x8989b138, - 0x33f14961, 0xc01937bd, 0xf506c6da, 0xe4625e7e, 0xa308ea99, 0x4e23e33c, 0x79cbd7cc, 0x48a14367, - 0xa3149619, 0xfec94bd5, 0xa114174a, 0xeaa01866, 0xa084db2d, 0x09a8486f, 0xa888614a, 0x2900af98, - 0x01665991, 0xe1992863, 0xc8f30c60, 0x2e78ef3c, 0xd0d51932, 0xcf0fec14, 0xf7ca07d2, 0xd0a82072, - 0xfd41197e, 0x9305a6b0, 0xe86be3da, 0x74bed3cd, 0x372da53c, 0x4c7f4448, 0xdab5d440, 0x6dba0ec3, - 0x083919a7, 0x9fbaeed9, 0x49dbcfb0, 0x4e670c53, 0x5c3d9c01, 0x64bdb941, 0x2c0e636a, 0xba7dd9cd, - 0xea6f7388, 0xe70bc762, 0x35f29adb, 0x5c4cdd8d, 0xf0d48d8c, 0xb88153e2, 0x08a19866, 0x1ae2eac8, - 0x284caf89, 0xaa928223, 0x9334be53, 0x3b3a21bf, 0x16434be3, 0x9aea3906, 0xefe8c36e, 0xf890cdd9, - 0x80226dae, 0xc340a4a3, 0xdf7e9c09, 0xa694a807, 0x5b7c5ecc, 0x221db3a6, 0x9a69a02f, 0x68818a54, - 0xceb2296f, 0x53c0843a, 0xfe893655, 0x25bfe68a, 0xb4628abc, 0xcf222ebf, 0x25ac6f48, 0xa9a99387, - 0x53bddb65, 0xe76ffbe7, 0xe967fd78, 0x0ba93563, 0x8e342bc1, 0xe8a11be9, 0x4980740d, 0xc8087dfc, - 0x8de4bf99, 0xa11101a0, 0x7fd37975, 0xda5a26c0, 0xe81f994f, 0x9528cd89, 0xfd339fed, 0xb87834bf, - 0x5f04456d, 0x22258698, 0xc9c4c83b, 0x2dc156be, 0x4f628daa, 0x57f55ec5, 0xe2220abe, 0xd2916ebf, - 0x4ec75b95, 0x24f2c3c0, 0x42d15d99, 0xcd0d7fa0, 0x7b6e27ff, 0xa8dc8af0, 0x7345c106, 0xf41e232f, - 0x35162386, 0xe6ea8926, 0x3333b094, 0x157ec6f2, 0x372b74af, 0x692573e4, 0xe9a9d848, 0xf3160289, - 0x3a62ef1d, 0xa787e238, 0xf3a5f676, 0x74364853, 0x20951063, 0x4576698d, 0xb6fad407, 0x592af950, - 0x36f73523, 0x4cfb6e87, 0x7da4cec0, 0x6c152daa, 0xcb0396a8, 0xc50dfe5d, 0xfcd707ab, 0x0921c42f, - 0x89dff0bb, 0x5fe2be78, 0x448f4f33, 0x754613c9, 0x2b05d08d, 0x48b9d585, 0xdc049441, 0xc8098f9b, - 0x7dede786, 0xc39a3373, 0x42410005, 0x6a091751, 0x0ef3c8a6, 0x890072d6, 0x28207682, 0xa9a9f7be, - 0xbf32679d, 0xd45b5b75, 0xb353fd00, 0xcbb0e358, 0x830f220a, 0x1f8fb214, 0xd372cf08, 0xcc3c4a13, - 0x8cf63166, 0x061c87be, 0x88c98f88, 0x6062e397, 0x47cf8e7a, 0xb6c85283, 0x3cc2acfb, 0x3fc06976, - 0x4e8f0252, 0x64d8314d, 0xda3870e3, 0x1e665459, 0xc10908f0, 0x513021a5, 0x6c5b68b7, 0x822f8aa0, - 0x3007cd3e, 0x74719eef, 0xdc872681, 0x073340d4, 0x7e432fd9, 0x0c5ec241, 0x8809286c, 0xf592d891, - 0x08a930f6, 0x957ef305, 0xb7fbffbd, 0xc266e96f, 0x6fe4ac98, 0xb173ecc0, 0xbc60b42a, 0x953498da, - 0xfba1ae12, 0x2d4bd736, 0x0f25faab, 0xa4f3fceb, 0xe2969123, 0x257f0c3d, 0x9348af49, 0x361400bc, - 0xe8816f4a, 0x3814f200, 0xa3f94043, 0x9c7a54c2, 0xbc704f57, 0xda41e7f9, 0xc25ad33a, 0x54f4a084, - 0xb17f5505, 0x59357cbe, 0xedbd15c8, 0x7f97c5ab, 0xba5ac7b5, 0xb6f6deaf, 0x3a479c3a, 0x5302da25, - 0x653d7e6a, 0x54268d49, 0x51a477ea, 0x5017d55b, 0xd7d25d88, 0x44136c76, 0x0404a8c8, 0xb8e5a121, - 0xb81a928a, 0x60ed5869, 0x97c55b96, 0xeaec991b, 0x29935913, 0x01fdb7f1, 0x088e8dfa, 0x9ab6f6f5, - 0x3b4cbf9f, 0x4a5de3ab, 0xe6051d35, 0xa0e1d855, 0xd36b4cf1, 0xf544edeb, 0xb0e93524, 0xbebb8fbd, - 0xa2d762cf, 0x49c92f54, 0x38b5f331, 0x7128a454, 0x48392905, 0xa65b1db8, 0x851c97bd, 0xd675cf2f, - }, - { - 0x85e04019, 0x332bf567, 0x662dbfff, 0xcfc65693, 0x2a8d7f6f, 0xab9bc912, 0xde6008a1, 0x2028da1f, - 0x0227bce7, 0x4d642916, 0x18fac300, 0x50f18b82, 0x2cb2cb11, 0xb232e75c, 0x4b3695f2, 0xb28707de, - 0xa05fbcf6, 0xcd4181e9, 0xe150210c, 0xe24ef1bd, 0xb168c381, 0xfde4e789, 0x5c79b0d8, 0x1e8bfd43, - 0x4d495001, 0x38be4341, 0x913cee1d, 0x92a79c3f, 0x089766be, 0xbaeeadf4, 0x1286becf, 0xb6eacb19, - 0x2660c200, 0x7565bde4, 0x64241f7a, 0x8248dca9, 0xc3b3ad66, 0x28136086, 0x0bd8dfa8, 0x356d1cf2, - 0x107789be, 0xb3b2e9ce, 0x0502aa8f, 0x0bc0351e, 0x166bf52a, 0xeb12ff82, 0xe3486911, 0xd34d7516, - 0x4e7b3aff, 0x5f43671b, 0x9cf6e037, 0x4981ac83, 0x334266ce, 0x8c9341b7, 0xd0d854c0, 0xcb3a6c88, - 0x47bc2829, 0x4725ba37, 0xa66ad22b, 0x7ad61f1e, 0x0c5cbafa, 0x4437f107, 0xb6e79962, 0x42d2d816, - 0x0a961288, 0xe1a5c06e, 0x13749e67, 0x72fc081a, 0xb1d139f7, 0xf9583745, 0xcf19df58, 0xbec3f756, - 0xc06eba30, 0x07211b24, 0x45c28829, 0xc95e317f, 0xbc8ec511, 0x38bc46e9, 0xc6e6fa14, 0xbae8584a, - 0xad4ebc46, 0x468f508b, 0x7829435f, 0xf124183b, 0x821dba9f, 0xaff60ff4, 0xea2c4e6d, 0x16e39264, - 0x92544a8b, 0x009b4fc3, 0xaba68ced, 0x9ac96f78, 0x06a5b79a, 0xb2856e6e, 0x1aec3ca9, 0xbe838688, - 0x0e0804e9, 0x55f1be56, 0xe7e5363b, 0xb3a1f25d, 0xf7debb85, 0x61fe033c, 0x16746233, 0x3c034c28, - 0xda6d0c74, 0x79aac56c, 0x3ce4e1ad, 0x51f0c802, 0x98f8f35a, 0x1626a49f, 0xeed82b29, 0x1d382fe3, - 0x0c4fb99a, 0xbb325778, 0x3ec6d97b, 0x6e77a6a9, 0xcb658b5c, 0xd45230c7, 0x2bd1408b, 0x60c03eb7, - 0xb9068d78, 0xa33754f4, 0xf430c87d, 0xc8a71302, 0xb96d8c32, 0xebd4e7be, 0xbe8b9d2d, 0x7979fb06, - 0xe7225308, 0x8b75cf77, 0x11ef8da4, 0xe083c858, 0x8d6b786f, 0x5a6317a6, 0xfa5cf7a0, 0x5dda0033, - 0xf28ebfb0, 0xf5b9c310, 0xa0eac280, 0x08b9767a, 0xa3d9d2b0, 0x79d34217, 0x021a718d, 0x9ac6336a, - 0x2711fd60, 0x438050e3, 0x069908a8, 0x3d7fedc4, 0x826d2bef, 0x4eeb8476, 0x488dcf25, 0x36c9d566, - 0x28e74e41, 0xc2610aca, 0x3d49a9cf, 0xbae3b9df, 0xb65f8de6, 0x92aeaf64, 0x3ac7d5e6, 0x9ea80509, - 0xf22b017d, 0xa4173f70, 0xdd1e16c3, 0x15e0d7f9, 0x50b1b887, 0x2b9f4fd5, 0x625aba82, 0x6a017962, - 0x2ec01b9c, 0x15488aa9, 0xd716e740, 0x40055a2c, 0x93d29a22, 0xe32dbf9a, 0x058745b9, 0x3453dc1e, - 0xd699296e, 0x496cff6f, 0x1c9f4986, 0xdfe2ed07, 0xb87242d1, 0x19de7eae, 0x053e561a, 0x15ad6f8c, - 0x66626c1c, 0x7154c24c, 0xea082b2a, 0x93eb2939, 0x17dcb0f0, 0x58d4f2ae, 0x9ea294fb, 0x52cf564c, - 0x9883fe66, 0x2ec40581, 0x763953c3, 0x01d6692e, 0xd3a0c108, 0xa1e7160e, 0xe4f2dfa6, 0x693ed285, - 0x74904698, 0x4c2b0edd, 0x4f757656, 0x5d393378, 0xa132234f, 0x3d321c5d, 0xc3f5e194, 0x4b269301, - 0xc79f022f, 0x3c997e7e, 0x5e4f9504, 0x3ffafbbd, 0x76f7ad0e, 0x296693f4, 0x3d1fce6f, 0xc61e45be, - 0xd3b5ab34, 0xf72bf9b7, 0x1b0434c0, 0x4e72b567, 0x5592a33d, 0xb5229301, 0xcfd2a87f, 0x60aeb767, - 0x1814386b, 0x30bcc33d, 0x38a0c07d, 0xfd1606f2, 0xc363519b, 0x589dd390, 0x5479f8e6, 0x1cb8d647, - 0x97fd61a9, 0xea7759f4, 0x2d57539d, 0x569a58cf, 0xe84e63ad, 0x462e1b78, 0x6580f87e, 0xf3817914, - 0x91da55f4, 0x40a230f3, 0xd1988f35, 0xb6e318d2, 0x3ffa50bc, 0x3d40f021, 0xc3c0bdae, 0x4958c24c, - 0x518f36b2, 0x84b1d370, 0x0fedce83, 0x878ddada, 0xf2a279c7, 0x94e01be8, 0x90716f4b, 0x954b8aa3, - }, - { - 0xe216300d, 0xbbddfffc, 0xa7ebdabd, 0x35648095, 0x7789f8b7, 0xe6c1121b, 0x0e241600, 0x052ce8b5, - 0x11a9cfb0, 0xe5952f11, 0xece7990a, 0x9386d174, 0x2a42931c, 0x76e38111, 0xb12def3a, 0x37ddddfc, - 0xde9adeb1, 0x0a0cc32c, 0xbe197029, 0x84a00940, 0xbb243a0f, 0xb4d137cf, 0xb44e79f0, 0x049eedfd, - 0x0b15a15d, 0x480d3168, 0x8bbbde5a, 0x669ded42, 0xc7ece831, 0x3f8f95e7, 0x72df191b, 0x7580330d, - 0x94074251, 0x5c7dcdfa, 0xabbe6d63, 0xaa402164, 0xb301d40a, 0x02e7d1ca, 0x53571dae, 0x7a3182a2, - 0x12a8ddec, 0xfdaa335d, 0x176f43e8, 0x71fb46d4, 0x38129022, 0xce949ad4, 0xb84769ad, 0x965bd862, - 0x82f3d055, 0x66fb9767, 0x15b80b4e, 0x1d5b47a0, 0x4cfde06f, 0xc28ec4b8, 0x57e8726e, 0x647a78fc, - 0x99865d44, 0x608bd593, 0x6c200e03, 0x39dc5ff6, 0x5d0b00a3, 0xae63aff2, 0x7e8bd632, 0x70108c0c, - 0xbbd35049, 0x2998df04, 0x980cf42a, 0x9b6df491, 0x9e7edd53, 0x06918548, 0x58cb7e07, 0x3b74ef2e, - 0x522fffb1, 0xd24708cc, 0x1c7e27cd, 0xa4eb215b, 0x3cf1d2e2, 0x19b47a38, 0x424f7618, 0x35856039, - 0x9d17dee7, 0x27eb35e6, 0xc9aff67b, 0x36baf5b8, 0x09c467cd, 0xc18910b1, 0xe11dbf7b, 0x06cd1af8, - 0x7170c608, 0x2d5e3354, 0xd4de495a, 0x64c6d006, 0xbcc0c62c, 0x3dd00db3, 0x708f8f34, 0x77d51b42, - 0x264f620f, 0x24b8d2bf, 0x15c1b79e, 0x46a52564, 0xf8d7e54e, 0x3e378160, 0x7895cda5, 0x859c15a5, - 0xe6459788, 0xc37bc75f, 0xdb07ba0c, 0x0676a3ab, 0x7f229b1e, 0x31842e7b, 0x24259fd7, 0xf8bef472, - 0x835ffcb8, 0x6df4c1f2, 0x96f5b195, 0xfd0af0fc, 0xb0fe134c, 0xe2506d3d, 0x4f9b12ea, 0xf215f225, - 0xa223736f, 0x9fb4c428, 0x25d04979, 0x34c713f8, 0xc4618187, 0xea7a6e98, 0x7cd16efc, 0x1436876c, - 0xf1544107, 0xbedeee14, 0x56e9af27, 0xa04aa441, 0x3cf7c899, 0x92ecbae6, 0xdd67016d, 0x151682eb, - 0xa842eedf, 0xfdba60b4, 0xf1907b75, 0x20e3030f, 0x24d8c29e, 0xe139673b, 0xefa63fb8, 0x71873054, - 0xb6f2cf3b, 0x9f326442, 0xcb15a4cc, 0xb01a4504, 0xf1e47d8d, 0x844a1be5, 0xbae7dfdc, 0x42cbda70, - 0xcd7dae0a, 0x57e85b7a, 0xd53f5af6, 0x20cf4d8c, 0xcea4d428, 0x79d130a4, 0x3486ebfb, 0x33d3cddc, - 0x77853b53, 0x37effcb5, 0xc5068778, 0xe580b3e6, 0x4e68b8f4, 0xc5c8b37e, 0x0d809ea2, 0x398feb7c, - 0x132a4f94, 0x43b7950e, 0x2fee7d1c, 0x223613bd, 0xdd06caa2, 0x37df932b, 0xc4248289, 0xacf3ebc3, - 0x5715f6b7, 0xef3478dd, 0xf267616f, 0xc148cbe4, 0x9052815e, 0x5e410fab, 0xb48a2465, 0x2eda7fa4, - 0xe87b40e4, 0xe98ea084, 0x5889e9e1, 0xefd390fc, 0xdd07d35b, 0xdb485694, 0x38d7e5b2, 0x57720101, - 0x730edebc, 0x5b643113, 0x94917e4f, 0x503c2fba, 0x646f1282, 0x7523d24a, 0xe0779695, 0xf9c17a8f, - 0x7a5b2121, 0xd187b896, 0x29263a4d, 0xba510cdf, 0x81f47c9f, 0xad1163ed, 0xea7b5965, 0x1a00726e, - 0x11403092, 0x00da6d77, 0x4a0cdd61, 0xad1f4603, 0x605bdfb0, 0x9eedc364, 0x22ebe6a8, 0xcee7d28a, - 0xa0e736a0, 0x5564a6b9, 0x10853209, 0xc7eb8f37, 0x2de705ca, 0x8951570f, 0xdf09822b, 0xbd691a6c, - 0xaa12e4f2, 0x87451c0f, 0xe0f6a27a, 0x3ada4819, 0x4cf1764f, 0x0d771c2b, 0x67cdb156, 0x350d8384, - 0x5938fa0f, 0x42399ef3, 0x36997b07, 0x0e84093d, 0x4aa93e61, 0x8360d87b, 0x1fa98b0c, 0x1149382c, - 0xe97625a5, 0x0614d1b7, 0x0e25244b, 0x0c768347, 0x589e8d82, 0x0d2059d1, 0xa466bb1e, 0xf8da0a82, - 0x04f19130, 0xba6e4ec0, 0x99265164, 0x1ee7230d, 0x50b2ad80, 0xeaee6801, 0x8db2a283, 0xea8bf59e, - }, -} diff --git a/src/vendor/golang.org/x/crypto/openpgp/armor/armor.go b/src/vendor/golang.org/x/crypto/openpgp/armor/armor.go deleted file mode 100644 index be342ad47..000000000 --- a/src/vendor/golang.org/x/crypto/openpgp/armor/armor.go +++ /dev/null @@ -1,232 +0,0 @@ -// Copyright 2010 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package armor implements OpenPGP ASCII Armor, see RFC 4880. OpenPGP Armor is -// very similar to PEM except that it has an additional CRC checksum. -// -// Deprecated: this package is unmaintained except for security fixes. New -// applications should consider a more focused, modern alternative to OpenPGP -// for their specific task. If you are required to interoperate with OpenPGP -// systems and need a maintained package, consider a community fork. -// See https://golang.org/issue/44226. -package armor // import "golang.org/x/crypto/openpgp/armor" - -import ( - "bufio" - "bytes" - "encoding/base64" - "golang.org/x/crypto/openpgp/errors" - "io" -) - -// A Block represents an OpenPGP armored structure. -// -// The encoded form is: -// -// -----BEGIN Type----- -// Headers -// -// base64-encoded Bytes -// '=' base64 encoded checksum -// -----END Type----- -// -// where Headers is a possibly empty sequence of Key: Value lines. -// -// Since the armored data can be very large, this package presents a streaming -// interface. -type Block struct { - Type string // The type, taken from the preamble (i.e. "PGP SIGNATURE"). - Header map[string]string // Optional headers. - Body io.Reader // A Reader from which the contents can be read - lReader lineReader - oReader openpgpReader -} - -var ArmorCorrupt error = errors.StructuralError("armor invalid") - -const crc24Init = 0xb704ce -const crc24Poly = 0x1864cfb -const crc24Mask = 0xffffff - -// crc24 calculates the OpenPGP checksum as specified in RFC 4880, section 6.1 -func crc24(crc uint32, d []byte) uint32 { - for _, b := range d { - crc ^= uint32(b) << 16 - for i := 0; i < 8; i++ { - crc <<= 1 - if crc&0x1000000 != 0 { - crc ^= crc24Poly - } - } - } - return crc -} - -var armorStart = []byte("-----BEGIN ") -var armorEnd = []byte("-----END ") -var armorEndOfLine = []byte("-----") - -// lineReader wraps a line based reader. It watches for the end of an armor -// block and records the expected CRC value. -type lineReader struct { - in *bufio.Reader - buf []byte - eof bool - crc uint32 - crcSet bool -} - -func (l *lineReader) Read(p []byte) (n int, err error) { - if l.eof { - return 0, io.EOF - } - - if len(l.buf) > 0 { - n = copy(p, l.buf) - l.buf = l.buf[n:] - return - } - - line, isPrefix, err := l.in.ReadLine() - if err != nil { - return - } - if isPrefix { - return 0, ArmorCorrupt - } - - if bytes.HasPrefix(line, armorEnd) { - l.eof = true - return 0, io.EOF - } - - if len(line) == 5 && line[0] == '=' { - // This is the checksum line - var expectedBytes [3]byte - var m int - m, err = base64.StdEncoding.Decode(expectedBytes[0:], line[1:]) - if m != 3 || err != nil { - return - } - l.crc = uint32(expectedBytes[0])<<16 | - uint32(expectedBytes[1])<<8 | - uint32(expectedBytes[2]) - - line, _, err = l.in.ReadLine() - if err != nil && err != io.EOF { - return - } - if !bytes.HasPrefix(line, armorEnd) { - return 0, ArmorCorrupt - } - - l.eof = true - l.crcSet = true - return 0, io.EOF - } - - if len(line) > 96 { - return 0, ArmorCorrupt - } - - n = copy(p, line) - bytesToSave := len(line) - n - if bytesToSave > 0 { - if cap(l.buf) < bytesToSave { - l.buf = make([]byte, 0, bytesToSave) - } - l.buf = l.buf[0:bytesToSave] - copy(l.buf, line[n:]) - } - - return -} - -// openpgpReader passes Read calls to the underlying base64 decoder, but keeps -// a running CRC of the resulting data and checks the CRC against the value -// found by the lineReader at EOF. -type openpgpReader struct { - lReader *lineReader - b64Reader io.Reader - currentCRC uint32 -} - -func (r *openpgpReader) Read(p []byte) (n int, err error) { - n, err = r.b64Reader.Read(p) - r.currentCRC = crc24(r.currentCRC, p[:n]) - - if err == io.EOF && r.lReader.crcSet && r.lReader.crc != uint32(r.currentCRC&crc24Mask) { - return 0, ArmorCorrupt - } - - return -} - -// Decode reads a PGP armored block from the given Reader. It will ignore -// leading garbage. If it doesn't find a block, it will return nil, io.EOF. The -// given Reader is not usable after calling this function: an arbitrary amount -// of data may have been read past the end of the block. -func Decode(in io.Reader) (p *Block, err error) { - r := bufio.NewReaderSize(in, 100) - var line []byte - ignoreNext := false - -TryNextBlock: - p = nil - - // Skip leading garbage - for { - ignoreThis := ignoreNext - line, ignoreNext, err = r.ReadLine() - if err != nil { - return - } - if ignoreNext || ignoreThis { - continue - } - line = bytes.TrimSpace(line) - if len(line) > len(armorStart)+len(armorEndOfLine) && bytes.HasPrefix(line, armorStart) { - break - } - } - - p = new(Block) - p.Type = string(line[len(armorStart) : len(line)-len(armorEndOfLine)]) - p.Header = make(map[string]string) - nextIsContinuation := false - var lastKey string - - // Read headers - for { - isContinuation := nextIsContinuation - line, nextIsContinuation, err = r.ReadLine() - if err != nil { - p = nil - return - } - if isContinuation { - p.Header[lastKey] += string(line) - continue - } - line = bytes.TrimSpace(line) - if len(line) == 0 { - break - } - - i := bytes.Index(line, []byte(": ")) - if i == -1 { - goto TryNextBlock - } - lastKey = string(line[:i]) - p.Header[lastKey] = string(line[i+2:]) - } - - p.lReader.in = r - p.oReader.currentCRC = crc24Init - p.oReader.lReader = &p.lReader - p.oReader.b64Reader = base64.NewDecoder(base64.StdEncoding, &p.lReader) - p.Body = &p.oReader - - return -} diff --git a/src/vendor/golang.org/x/crypto/openpgp/armor/encode.go b/src/vendor/golang.org/x/crypto/openpgp/armor/encode.go deleted file mode 100644 index 5b6e16c19..000000000 --- a/src/vendor/golang.org/x/crypto/openpgp/armor/encode.go +++ /dev/null @@ -1,161 +0,0 @@ -// Copyright 2010 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package armor - -import ( - "encoding/base64" - "io" -) - -var armorHeaderSep = []byte(": ") -var blockEnd = []byte("\n=") -var newline = []byte("\n") -var armorEndOfLineOut = []byte("-----\n") - -// writeSlices writes its arguments to the given Writer. -func writeSlices(out io.Writer, slices ...[]byte) (err error) { - for _, s := range slices { - _, err = out.Write(s) - if err != nil { - return err - } - } - return -} - -// lineBreaker breaks data across several lines, all of the same byte length -// (except possibly the last). Lines are broken with a single '\n'. -type lineBreaker struct { - lineLength int - line []byte - used int - out io.Writer - haveWritten bool -} - -func newLineBreaker(out io.Writer, lineLength int) *lineBreaker { - return &lineBreaker{ - lineLength: lineLength, - line: make([]byte, lineLength), - used: 0, - out: out, - } -} - -func (l *lineBreaker) Write(b []byte) (n int, err error) { - n = len(b) - - if n == 0 { - return - } - - if l.used == 0 && l.haveWritten { - _, err = l.out.Write([]byte{'\n'}) - if err != nil { - return - } - } - - if l.used+len(b) < l.lineLength { - l.used += copy(l.line[l.used:], b) - return - } - - l.haveWritten = true - _, err = l.out.Write(l.line[0:l.used]) - if err != nil { - return - } - excess := l.lineLength - l.used - l.used = 0 - - _, err = l.out.Write(b[0:excess]) - if err != nil { - return - } - - _, err = l.Write(b[excess:]) - return -} - -func (l *lineBreaker) Close() (err error) { - if l.used > 0 { - _, err = l.out.Write(l.line[0:l.used]) - if err != nil { - return - } - } - - return -} - -// encoding keeps track of a running CRC24 over the data which has been written -// to it and outputs a OpenPGP checksum when closed, followed by an armor -// trailer. -// -// It's built into a stack of io.Writers: -// -// encoding -> base64 encoder -> lineBreaker -> out -type encoding struct { - out io.Writer - breaker *lineBreaker - b64 io.WriteCloser - crc uint32 - blockType []byte -} - -func (e *encoding) Write(data []byte) (n int, err error) { - e.crc = crc24(e.crc, data) - return e.b64.Write(data) -} - -func (e *encoding) Close() (err error) { - err = e.b64.Close() - if err != nil { - return - } - e.breaker.Close() - - var checksumBytes [3]byte - checksumBytes[0] = byte(e.crc >> 16) - checksumBytes[1] = byte(e.crc >> 8) - checksumBytes[2] = byte(e.crc) - - var b64ChecksumBytes [4]byte - base64.StdEncoding.Encode(b64ChecksumBytes[:], checksumBytes[:]) - - return writeSlices(e.out, blockEnd, b64ChecksumBytes[:], newline, armorEnd, e.blockType, armorEndOfLine) -} - -// Encode returns a WriteCloser which will encode the data written to it in -// OpenPGP armor. -func Encode(out io.Writer, blockType string, headers map[string]string) (w io.WriteCloser, err error) { - bType := []byte(blockType) - err = writeSlices(out, armorStart, bType, armorEndOfLineOut) - if err != nil { - return - } - - for k, v := range headers { - err = writeSlices(out, []byte(k), armorHeaderSep, []byte(v), newline) - if err != nil { - return - } - } - - _, err = out.Write(newline) - if err != nil { - return - } - - e := &encoding{ - out: out, - breaker: newLineBreaker(out, 64), - crc: crc24Init, - blockType: bType, - } - e.b64 = base64.NewEncoder(base64.StdEncoding, e.breaker) - return e, nil -} diff --git a/src/vendor/golang.org/x/crypto/openpgp/canonical_text.go b/src/vendor/golang.org/x/crypto/openpgp/canonical_text.go deleted file mode 100644 index e601e389f..000000000 --- a/src/vendor/golang.org/x/crypto/openpgp/canonical_text.go +++ /dev/null @@ -1,59 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package openpgp - -import "hash" - -// NewCanonicalTextHash reformats text written to it into the canonical -// form and then applies the hash h. See RFC 4880, section 5.2.1. -func NewCanonicalTextHash(h hash.Hash) hash.Hash { - return &canonicalTextHash{h, 0} -} - -type canonicalTextHash struct { - h hash.Hash - s int -} - -var newline = []byte{'\r', '\n'} - -func (cth *canonicalTextHash) Write(buf []byte) (int, error) { - start := 0 - - for i, c := range buf { - switch cth.s { - case 0: - if c == '\r' { - cth.s = 1 - } else if c == '\n' { - cth.h.Write(buf[start:i]) - cth.h.Write(newline) - start = i + 1 - } - case 1: - cth.s = 0 - } - } - - cth.h.Write(buf[start:]) - return len(buf), nil -} - -func (cth *canonicalTextHash) Sum(in []byte) []byte { - return cth.h.Sum(in) -} - -func (cth *canonicalTextHash) Reset() { - cth.h.Reset() - cth.s = 0 -} - -func (cth *canonicalTextHash) Size() int { - return cth.h.Size() -} - -func (cth *canonicalTextHash) BlockSize() int { - return cth.h.BlockSize() -} diff --git a/src/vendor/golang.org/x/crypto/openpgp/clearsign/clearsign.go b/src/vendor/golang.org/x/crypto/openpgp/clearsign/clearsign.go deleted file mode 100644 index 644b2e078..000000000 --- a/src/vendor/golang.org/x/crypto/openpgp/clearsign/clearsign.go +++ /dev/null @@ -1,424 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package clearsign generates and processes OpenPGP, clear-signed data. See -// RFC 4880, section 7. -// -// Clearsigned messages are cryptographically signed, but the contents of the -// message are kept in plaintext so that it can be read without special tools. -// -// Deprecated: this package is unmaintained except for security fixes. New -// applications should consider a more focused, modern alternative to OpenPGP -// for their specific task. If you are required to interoperate with OpenPGP -// systems and need a maintained package, consider a community fork. -// See https://golang.org/issue/44226. -package clearsign // import "golang.org/x/crypto/openpgp/clearsign" - -import ( - "bufio" - "bytes" - "crypto" - "fmt" - "hash" - "io" - "net/textproto" - "strconv" - "strings" - - "golang.org/x/crypto/openpgp/armor" - "golang.org/x/crypto/openpgp/errors" - "golang.org/x/crypto/openpgp/packet" -) - -// A Block represents a clearsigned message. A signature on a Block can -// be checked by passing Bytes into openpgp.CheckDetachedSignature. -type Block struct { - Headers textproto.MIMEHeader // Optional unverified Hash headers - Plaintext []byte // The original message text - Bytes []byte // The signed message - ArmoredSignature *armor.Block // The signature block -} - -// start is the marker which denotes the beginning of a clearsigned message. -var start = []byte("\n-----BEGIN PGP SIGNED MESSAGE-----") - -// dashEscape is prefixed to any lines that begin with a hyphen so that they -// can't be confused with endText. -var dashEscape = []byte("- ") - -// endText is a marker which denotes the end of the message and the start of -// an armored signature. -var endText = []byte("-----BEGIN PGP SIGNATURE-----") - -// end is a marker which denotes the end of the armored signature. -var end = []byte("\n-----END PGP SIGNATURE-----") - -var crlf = []byte("\r\n") -var lf = byte('\n') - -// getLine returns the first \r\n or \n delineated line from the given byte -// array. The line does not include the \r\n or \n. The remainder of the byte -// array (also not including the new line bytes) is also returned and this will -// always be smaller than the original argument. -func getLine(data []byte) (line, rest []byte) { - i := bytes.Index(data, []byte{'\n'}) - var j int - if i < 0 { - i = len(data) - j = i - } else { - j = i + 1 - if i > 0 && data[i-1] == '\r' { - i-- - } - } - return data[0:i], data[j:] -} - -// Decode finds the first clearsigned message in data and returns it, as well as -// the suffix of data which remains after the message. Any prefix data is -// discarded. -// -// If no message is found, or if the message is invalid, Decode returns nil and -// the whole data slice. The only allowed header type is Hash, and it is not -// verified against the signature hash. -func Decode(data []byte) (b *Block, rest []byte) { - // start begins with a newline. However, at the very beginning of - // the byte array, we'll accept the start string without it. - rest = data - if bytes.HasPrefix(data, start[1:]) { - rest = rest[len(start)-1:] - } else if i := bytes.Index(data, start); i >= 0 { - rest = rest[i+len(start):] - } else { - return nil, data - } - - // Consume the start line and check it does not have a suffix. - suffix, rest := getLine(rest) - if len(suffix) != 0 { - return nil, data - } - - var line []byte - b = &Block{ - Headers: make(textproto.MIMEHeader), - } - - // Next come a series of header lines. - for { - // This loop terminates because getLine's second result is - // always smaller than its argument. - if len(rest) == 0 { - return nil, data - } - // An empty line marks the end of the headers. - if line, rest = getLine(rest); len(line) == 0 { - break - } - - // Reject headers with control or Unicode characters. - if i := bytes.IndexFunc(line, func(r rune) bool { - return r < 0x20 || r > 0x7e - }); i != -1 { - return nil, data - } - - i := bytes.Index(line, []byte{':'}) - if i == -1 { - return nil, data - } - - key, val := string(line[0:i]), string(line[i+1:]) - key = strings.TrimSpace(key) - if key != "Hash" { - return nil, data - } - val = strings.TrimSpace(val) - b.Headers.Add(key, val) - } - - firstLine := true - for { - start := rest - - line, rest = getLine(rest) - if len(line) == 0 && len(rest) == 0 { - // No armored data was found, so this isn't a complete message. - return nil, data - } - if bytes.Equal(line, endText) { - // Back up to the start of the line because armor expects to see the - // header line. - rest = start - break - } - - // The final CRLF isn't included in the hash so we don't write it until - // we've seen the next line. - if firstLine { - firstLine = false - } else { - b.Bytes = append(b.Bytes, crlf...) - } - - if bytes.HasPrefix(line, dashEscape) { - line = line[2:] - } - line = bytes.TrimRight(line, " \t") - b.Bytes = append(b.Bytes, line...) - - b.Plaintext = append(b.Plaintext, line...) - b.Plaintext = append(b.Plaintext, lf) - } - - // We want to find the extent of the armored data (including any newlines at - // the end). - i := bytes.Index(rest, end) - if i == -1 { - return nil, data - } - i += len(end) - for i < len(rest) && (rest[i] == '\r' || rest[i] == '\n') { - i++ - } - armored := rest[:i] - rest = rest[i:] - - var err error - b.ArmoredSignature, err = armor.Decode(bytes.NewBuffer(armored)) - if err != nil { - return nil, data - } - - return b, rest -} - -// A dashEscaper is an io.WriteCloser which processes the body of a clear-signed -// message. The clear-signed message is written to buffered and a hash, suitable -// for signing, is maintained in h. -// -// When closed, an armored signature is created and written to complete the -// message. -type dashEscaper struct { - buffered *bufio.Writer - hashers []hash.Hash // one per key in privateKeys - hashType crypto.Hash - toHash io.Writer // writes to all the hashes in hashers - - atBeginningOfLine bool - isFirstLine bool - - whitespace []byte - byteBuf []byte // a one byte buffer to save allocations - - privateKeys []*packet.PrivateKey - config *packet.Config -} - -func (d *dashEscaper) Write(data []byte) (n int, err error) { - for _, b := range data { - d.byteBuf[0] = b - - if d.atBeginningOfLine { - // The final CRLF isn't included in the hash so we have to wait - // until this point (the start of the next line) before writing it. - if !d.isFirstLine { - d.toHash.Write(crlf) - } - d.isFirstLine = false - } - - // Any whitespace at the end of the line has to be removed so we - // buffer it until we find out whether there's more on this line. - if b == ' ' || b == '\t' || b == '\r' { - d.whitespace = append(d.whitespace, b) - d.atBeginningOfLine = false - continue - } - - if d.atBeginningOfLine { - // At the beginning of a line, hyphens have to be escaped. - if b == '-' { - // The signature isn't calculated over the dash-escaped text so - // the escape is only written to buffered. - if _, err = d.buffered.Write(dashEscape); err != nil { - return - } - d.toHash.Write(d.byteBuf) - d.atBeginningOfLine = false - } else if b == '\n' { - // Nothing to do because we delay writing CRLF to the hash. - } else { - d.toHash.Write(d.byteBuf) - d.atBeginningOfLine = false - } - if err = d.buffered.WriteByte(b); err != nil { - return - } - } else { - if b == '\n' { - // We got a raw \n. Drop any trailing whitespace and write a - // CRLF. - d.whitespace = d.whitespace[:0] - // We delay writing CRLF to the hash until the start of the - // next line. - if err = d.buffered.WriteByte(b); err != nil { - return - } - d.atBeginningOfLine = true - } else { - // Any buffered whitespace wasn't at the end of the line so - // we need to write it out. - if len(d.whitespace) > 0 { - d.toHash.Write(d.whitespace) - if _, err = d.buffered.Write(d.whitespace); err != nil { - return - } - d.whitespace = d.whitespace[:0] - } - d.toHash.Write(d.byteBuf) - if err = d.buffered.WriteByte(b); err != nil { - return - } - } - } - } - - n = len(data) - return -} - -func (d *dashEscaper) Close() (err error) { - if !d.atBeginningOfLine { - if err = d.buffered.WriteByte(lf); err != nil { - return - } - } - - out, err := armor.Encode(d.buffered, "PGP SIGNATURE", nil) - if err != nil { - return - } - - t := d.config.Now() - for i, k := range d.privateKeys { - sig := new(packet.Signature) - sig.SigType = packet.SigTypeText - sig.PubKeyAlgo = k.PubKeyAlgo - sig.Hash = d.hashType - sig.CreationTime = t - sig.IssuerKeyId = &k.KeyId - - if err = sig.Sign(d.hashers[i], k, d.config); err != nil { - return - } - if err = sig.Serialize(out); err != nil { - return - } - } - - if err = out.Close(); err != nil { - return - } - if err = d.buffered.Flush(); err != nil { - return - } - return -} - -// Encode returns a WriteCloser which will clear-sign a message with privateKey -// and write it to w. If config is nil, sensible defaults are used. -func Encode(w io.Writer, privateKey *packet.PrivateKey, config *packet.Config) (plaintext io.WriteCloser, err error) { - return EncodeMulti(w, []*packet.PrivateKey{privateKey}, config) -} - -// EncodeMulti returns a WriteCloser which will clear-sign a message with all the -// private keys indicated and write it to w. If config is nil, sensible defaults -// are used. -func EncodeMulti(w io.Writer, privateKeys []*packet.PrivateKey, config *packet.Config) (plaintext io.WriteCloser, err error) { - for _, k := range privateKeys { - if k.Encrypted { - return nil, errors.InvalidArgumentError(fmt.Sprintf("signing key %s is encrypted", k.KeyIdString())) - } - } - - hashType := config.Hash() - name := nameOfHash(hashType) - if len(name) == 0 { - return nil, errors.UnsupportedError("unknown hash type: " + strconv.Itoa(int(hashType))) - } - - if !hashType.Available() { - return nil, errors.UnsupportedError("unsupported hash type: " + strconv.Itoa(int(hashType))) - } - var hashers []hash.Hash - var ws []io.Writer - for range privateKeys { - h := hashType.New() - hashers = append(hashers, h) - ws = append(ws, h) - } - toHash := io.MultiWriter(ws...) - - buffered := bufio.NewWriter(w) - // start has a \n at the beginning that we don't want here. - if _, err = buffered.Write(start[1:]); err != nil { - return - } - if err = buffered.WriteByte(lf); err != nil { - return - } - if _, err = buffered.WriteString("Hash: "); err != nil { - return - } - if _, err = buffered.WriteString(name); err != nil { - return - } - if err = buffered.WriteByte(lf); err != nil { - return - } - if err = buffered.WriteByte(lf); err != nil { - return - } - - plaintext = &dashEscaper{ - buffered: buffered, - hashers: hashers, - hashType: hashType, - toHash: toHash, - - atBeginningOfLine: true, - isFirstLine: true, - - byteBuf: make([]byte, 1), - - privateKeys: privateKeys, - config: config, - } - - return -} - -// nameOfHash returns the OpenPGP name for the given hash, or the empty string -// if the name isn't known. See RFC 4880, section 9.4. -func nameOfHash(h crypto.Hash) string { - switch h { - case crypto.MD5: - return "MD5" - case crypto.SHA1: - return "SHA1" - case crypto.RIPEMD160: - return "RIPEMD160" - case crypto.SHA224: - return "SHA224" - case crypto.SHA256: - return "SHA256" - case crypto.SHA384: - return "SHA384" - case crypto.SHA512: - return "SHA512" - } - return "" -} diff --git a/src/vendor/golang.org/x/crypto/openpgp/elgamal/elgamal.go b/src/vendor/golang.org/x/crypto/openpgp/elgamal/elgamal.go deleted file mode 100644 index 743b35a12..000000000 --- a/src/vendor/golang.org/x/crypto/openpgp/elgamal/elgamal.go +++ /dev/null @@ -1,130 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package elgamal implements ElGamal encryption, suitable for OpenPGP, -// as specified in "A Public-Key Cryptosystem and a Signature Scheme Based on -// Discrete Logarithms," IEEE Transactions on Information Theory, v. IT-31, -// n. 4, 1985, pp. 469-472. -// -// This form of ElGamal embeds PKCS#1 v1.5 padding, which may make it -// unsuitable for other protocols. RSA should be used in preference in any -// case. -// -// Deprecated: this package was only provided to support ElGamal encryption in -// OpenPGP. The golang.org/x/crypto/openpgp package is now deprecated (see -// https://golang.org/issue/44226), and ElGamal in the OpenPGP ecosystem has -// compatibility and security issues (see https://eprint.iacr.org/2021/923). -// Moreover, this package doesn't protect against side-channel attacks. -package elgamal // import "golang.org/x/crypto/openpgp/elgamal" - -import ( - "crypto/rand" - "crypto/subtle" - "errors" - "io" - "math/big" -) - -// PublicKey represents an ElGamal public key. -type PublicKey struct { - G, P, Y *big.Int -} - -// PrivateKey represents an ElGamal private key. -type PrivateKey struct { - PublicKey - X *big.Int -} - -// Encrypt encrypts the given message to the given public key. The result is a -// pair of integers. Errors can result from reading random, or because msg is -// too large to be encrypted to the public key. -func Encrypt(random io.Reader, pub *PublicKey, msg []byte) (c1, c2 *big.Int, err error) { - pLen := (pub.P.BitLen() + 7) / 8 - if len(msg) > pLen-11 { - err = errors.New("elgamal: message too long") - return - } - - // EM = 0x02 || PS || 0x00 || M - em := make([]byte, pLen-1) - em[0] = 2 - ps, mm := em[1:len(em)-len(msg)-1], em[len(em)-len(msg):] - err = nonZeroRandomBytes(ps, random) - if err != nil { - return - } - em[len(em)-len(msg)-1] = 0 - copy(mm, msg) - - m := new(big.Int).SetBytes(em) - - k, err := rand.Int(random, pub.P) - if err != nil { - return - } - - c1 = new(big.Int).Exp(pub.G, k, pub.P) - s := new(big.Int).Exp(pub.Y, k, pub.P) - c2 = s.Mul(s, m) - c2.Mod(c2, pub.P) - - return -} - -// Decrypt takes two integers, resulting from an ElGamal encryption, and -// returns the plaintext of the message. An error can result only if the -// ciphertext is invalid. Users should keep in mind that this is a padding -// oracle and thus, if exposed to an adaptive chosen ciphertext attack, can -// be used to break the cryptosystem. See “Chosen Ciphertext Attacks -// Against Protocols Based on the RSA Encryption Standard PKCS #1â€, Daniel -// Bleichenbacher, Advances in Cryptology (Crypto '98), -func Decrypt(priv *PrivateKey, c1, c2 *big.Int) (msg []byte, err error) { - s := new(big.Int).Exp(c1, priv.X, priv.P) - if s.ModInverse(s, priv.P) == nil { - return nil, errors.New("elgamal: invalid private key") - } - s.Mul(s, c2) - s.Mod(s, priv.P) - em := s.Bytes() - - firstByteIsTwo := subtle.ConstantTimeByteEq(em[0], 2) - - // The remainder of the plaintext must be a string of non-zero random - // octets, followed by a 0, followed by the message. - // lookingForIndex: 1 iff we are still looking for the zero. - // index: the offset of the first zero byte. - var lookingForIndex, index int - lookingForIndex = 1 - - for i := 1; i < len(em); i++ { - equals0 := subtle.ConstantTimeByteEq(em[i], 0) - index = subtle.ConstantTimeSelect(lookingForIndex&equals0, i, index) - lookingForIndex = subtle.ConstantTimeSelect(equals0, 0, lookingForIndex) - } - - if firstByteIsTwo != 1 || lookingForIndex != 0 || index < 9 { - return nil, errors.New("elgamal: decryption error") - } - return em[index+1:], nil -} - -// nonZeroRandomBytes fills the given slice with non-zero random octets. -func nonZeroRandomBytes(s []byte, rand io.Reader) (err error) { - _, err = io.ReadFull(rand, s) - if err != nil { - return - } - - for i := 0; i < len(s); i++ { - for s[i] == 0 { - _, err = io.ReadFull(rand, s[i:i+1]) - if err != nil { - return - } - } - } - - return -} diff --git a/src/vendor/golang.org/x/crypto/openpgp/errors/errors.go b/src/vendor/golang.org/x/crypto/openpgp/errors/errors.go deleted file mode 100644 index 1d7a0ea05..000000000 --- a/src/vendor/golang.org/x/crypto/openpgp/errors/errors.go +++ /dev/null @@ -1,78 +0,0 @@ -// Copyright 2010 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package errors contains common error types for the OpenPGP packages. -// -// Deprecated: this package is unmaintained except for security fixes. New -// applications should consider a more focused, modern alternative to OpenPGP -// for their specific task. If you are required to interoperate with OpenPGP -// systems and need a maintained package, consider a community fork. -// See https://golang.org/issue/44226. -package errors // import "golang.org/x/crypto/openpgp/errors" - -import ( - "strconv" -) - -// A StructuralError is returned when OpenPGP data is found to be syntactically -// invalid. -type StructuralError string - -func (s StructuralError) Error() string { - return "openpgp: invalid data: " + string(s) -} - -// UnsupportedError indicates that, although the OpenPGP data is valid, it -// makes use of currently unimplemented features. -type UnsupportedError string - -func (s UnsupportedError) Error() string { - return "openpgp: unsupported feature: " + string(s) -} - -// InvalidArgumentError indicates that the caller is in error and passed an -// incorrect value. -type InvalidArgumentError string - -func (i InvalidArgumentError) Error() string { - return "openpgp: invalid argument: " + string(i) -} - -// SignatureError indicates that a syntactically valid signature failed to -// validate. -type SignatureError string - -func (b SignatureError) Error() string { - return "openpgp: invalid signature: " + string(b) -} - -type keyIncorrectError int - -func (ki keyIncorrectError) Error() string { - return "openpgp: incorrect key" -} - -var ErrKeyIncorrect error = keyIncorrectError(0) - -type unknownIssuerError int - -func (unknownIssuerError) Error() string { - return "openpgp: signature made by unknown entity" -} - -var ErrUnknownIssuer error = unknownIssuerError(0) - -type keyRevokedError int - -func (keyRevokedError) Error() string { - return "openpgp: signature made by revoked key" -} - -var ErrKeyRevoked error = keyRevokedError(0) - -type UnknownPacketTypeError uint8 - -func (upte UnknownPacketTypeError) Error() string { - return "openpgp: unknown packet type: " + strconv.Itoa(int(upte)) -} diff --git a/src/vendor/golang.org/x/crypto/openpgp/keys.go b/src/vendor/golang.org/x/crypto/openpgp/keys.go deleted file mode 100644 index faa2fb369..000000000 --- a/src/vendor/golang.org/x/crypto/openpgp/keys.go +++ /dev/null @@ -1,693 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package openpgp - -import ( - "crypto/rsa" - "io" - "time" - - "golang.org/x/crypto/openpgp/armor" - "golang.org/x/crypto/openpgp/errors" - "golang.org/x/crypto/openpgp/packet" -) - -// PublicKeyType is the armor type for a PGP public key. -var PublicKeyType = "PGP PUBLIC KEY BLOCK" - -// PrivateKeyType is the armor type for a PGP private key. -var PrivateKeyType = "PGP PRIVATE KEY BLOCK" - -// An Entity represents the components of an OpenPGP key: a primary public key -// (which must be a signing key), one or more identities claimed by that key, -// and zero or more subkeys, which may be encryption keys. -type Entity struct { - PrimaryKey *packet.PublicKey - PrivateKey *packet.PrivateKey - Identities map[string]*Identity // indexed by Identity.Name - Revocations []*packet.Signature - Subkeys []Subkey -} - -// An Identity represents an identity claimed by an Entity and zero or more -// assertions by other entities about that claim. -type Identity struct { - Name string // by convention, has the form "Full Name (comment) " - UserId *packet.UserId - SelfSignature *packet.Signature - Signatures []*packet.Signature -} - -// A Subkey is an additional public key in an Entity. Subkeys can be used for -// encryption. -type Subkey struct { - PublicKey *packet.PublicKey - PrivateKey *packet.PrivateKey - Sig *packet.Signature -} - -// A Key identifies a specific public key in an Entity. This is either the -// Entity's primary key or a subkey. -type Key struct { - Entity *Entity - PublicKey *packet.PublicKey - PrivateKey *packet.PrivateKey - SelfSignature *packet.Signature -} - -// A KeyRing provides access to public and private keys. -type KeyRing interface { - // KeysById returns the set of keys that have the given key id. - KeysById(id uint64) []Key - // KeysByIdAndUsage returns the set of keys with the given id - // that also meet the key usage given by requiredUsage. - // The requiredUsage is expressed as the bitwise-OR of - // packet.KeyFlag* values. - KeysByIdUsage(id uint64, requiredUsage byte) []Key - // DecryptionKeys returns all private keys that are valid for - // decryption. - DecryptionKeys() []Key -} - -// primaryIdentity returns the Identity marked as primary or the first identity -// if none are so marked. -func (e *Entity) primaryIdentity() *Identity { - var firstIdentity *Identity - for _, ident := range e.Identities { - if firstIdentity == nil { - firstIdentity = ident - } - if ident.SelfSignature.IsPrimaryId != nil && *ident.SelfSignature.IsPrimaryId { - return ident - } - } - return firstIdentity -} - -// encryptionKey returns the best candidate Key for encrypting a message to the -// given Entity. -func (e *Entity) encryptionKey(now time.Time) (Key, bool) { - candidateSubkey := -1 - - // Iterate the keys to find the newest key - var maxTime time.Time - for i, subkey := range e.Subkeys { - if subkey.Sig.FlagsValid && - subkey.Sig.FlagEncryptCommunications && - subkey.PublicKey.PubKeyAlgo.CanEncrypt() && - !subkey.Sig.KeyExpired(now) && - (maxTime.IsZero() || subkey.Sig.CreationTime.After(maxTime)) { - candidateSubkey = i - maxTime = subkey.Sig.CreationTime - } - } - - if candidateSubkey != -1 { - subkey := e.Subkeys[candidateSubkey] - return Key{e, subkey.PublicKey, subkey.PrivateKey, subkey.Sig}, true - } - - // If we don't have any candidate subkeys for encryption and - // the primary key doesn't have any usage metadata then we - // assume that the primary key is ok. Or, if the primary key is - // marked as ok to encrypt to, then we can obviously use it. - i := e.primaryIdentity() - if !i.SelfSignature.FlagsValid || i.SelfSignature.FlagEncryptCommunications && - e.PrimaryKey.PubKeyAlgo.CanEncrypt() && - !i.SelfSignature.KeyExpired(now) { - return Key{e, e.PrimaryKey, e.PrivateKey, i.SelfSignature}, true - } - - // This Entity appears to be signing only. - return Key{}, false -} - -// signingKey return the best candidate Key for signing a message with this -// Entity. -func (e *Entity) signingKey(now time.Time) (Key, bool) { - candidateSubkey := -1 - - for i, subkey := range e.Subkeys { - if subkey.Sig.FlagsValid && - subkey.Sig.FlagSign && - subkey.PublicKey.PubKeyAlgo.CanSign() && - !subkey.Sig.KeyExpired(now) { - candidateSubkey = i - break - } - } - - if candidateSubkey != -1 { - subkey := e.Subkeys[candidateSubkey] - return Key{e, subkey.PublicKey, subkey.PrivateKey, subkey.Sig}, true - } - - // If we have no candidate subkey then we assume that it's ok to sign - // with the primary key. - i := e.primaryIdentity() - if !i.SelfSignature.FlagsValid || i.SelfSignature.FlagSign && - !i.SelfSignature.KeyExpired(now) { - return Key{e, e.PrimaryKey, e.PrivateKey, i.SelfSignature}, true - } - - return Key{}, false -} - -// An EntityList contains one or more Entities. -type EntityList []*Entity - -// KeysById returns the set of keys that have the given key id. -func (el EntityList) KeysById(id uint64) (keys []Key) { - for _, e := range el { - if e.PrimaryKey.KeyId == id { - var selfSig *packet.Signature - for _, ident := range e.Identities { - if selfSig == nil { - selfSig = ident.SelfSignature - } else if ident.SelfSignature.IsPrimaryId != nil && *ident.SelfSignature.IsPrimaryId { - selfSig = ident.SelfSignature - break - } - } - keys = append(keys, Key{e, e.PrimaryKey, e.PrivateKey, selfSig}) - } - - for _, subKey := range e.Subkeys { - if subKey.PublicKey.KeyId == id { - keys = append(keys, Key{e, subKey.PublicKey, subKey.PrivateKey, subKey.Sig}) - } - } - } - return -} - -// KeysByIdAndUsage returns the set of keys with the given id that also meet -// the key usage given by requiredUsage. The requiredUsage is expressed as -// the bitwise-OR of packet.KeyFlag* values. -func (el EntityList) KeysByIdUsage(id uint64, requiredUsage byte) (keys []Key) { - for _, key := range el.KeysById(id) { - if len(key.Entity.Revocations) > 0 { - continue - } - - if key.SelfSignature.RevocationReason != nil { - continue - } - - if key.SelfSignature.FlagsValid && requiredUsage != 0 { - var usage byte - if key.SelfSignature.FlagCertify { - usage |= packet.KeyFlagCertify - } - if key.SelfSignature.FlagSign { - usage |= packet.KeyFlagSign - } - if key.SelfSignature.FlagEncryptCommunications { - usage |= packet.KeyFlagEncryptCommunications - } - if key.SelfSignature.FlagEncryptStorage { - usage |= packet.KeyFlagEncryptStorage - } - if usage&requiredUsage != requiredUsage { - continue - } - } - - keys = append(keys, key) - } - return -} - -// DecryptionKeys returns all private keys that are valid for decryption. -func (el EntityList) DecryptionKeys() (keys []Key) { - for _, e := range el { - for _, subKey := range e.Subkeys { - if subKey.PrivateKey != nil && (!subKey.Sig.FlagsValid || subKey.Sig.FlagEncryptStorage || subKey.Sig.FlagEncryptCommunications) { - keys = append(keys, Key{e, subKey.PublicKey, subKey.PrivateKey, subKey.Sig}) - } - } - } - return -} - -// ReadArmoredKeyRing reads one or more public/private keys from an armor keyring file. -func ReadArmoredKeyRing(r io.Reader) (EntityList, error) { - block, err := armor.Decode(r) - if err == io.EOF { - return nil, errors.InvalidArgumentError("no armored data found") - } - if err != nil { - return nil, err - } - if block.Type != PublicKeyType && block.Type != PrivateKeyType { - return nil, errors.InvalidArgumentError("expected public or private key block, got: " + block.Type) - } - - return ReadKeyRing(block.Body) -} - -// ReadKeyRing reads one or more public/private keys. Unsupported keys are -// ignored as long as at least a single valid key is found. -func ReadKeyRing(r io.Reader) (el EntityList, err error) { - packets := packet.NewReader(r) - var lastUnsupportedError error - - for { - var e *Entity - e, err = ReadEntity(packets) - if err != nil { - // TODO: warn about skipped unsupported/unreadable keys - if _, ok := err.(errors.UnsupportedError); ok { - lastUnsupportedError = err - err = readToNextPublicKey(packets) - } else if _, ok := err.(errors.StructuralError); ok { - // Skip unreadable, badly-formatted keys - lastUnsupportedError = err - err = readToNextPublicKey(packets) - } - if err == io.EOF { - err = nil - break - } - if err != nil { - el = nil - break - } - } else { - el = append(el, e) - } - } - - if len(el) == 0 && err == nil { - err = lastUnsupportedError - } - return -} - -// readToNextPublicKey reads packets until the start of the entity and leaves -// the first packet of the new entity in the Reader. -func readToNextPublicKey(packets *packet.Reader) (err error) { - var p packet.Packet - for { - p, err = packets.Next() - if err == io.EOF { - return - } else if err != nil { - if _, ok := err.(errors.UnsupportedError); ok { - err = nil - continue - } - return - } - - if pk, ok := p.(*packet.PublicKey); ok && !pk.IsSubkey { - packets.Unread(p) - return - } - } -} - -// ReadEntity reads an entity (public key, identities, subkeys etc) from the -// given Reader. -func ReadEntity(packets *packet.Reader) (*Entity, error) { - e := new(Entity) - e.Identities = make(map[string]*Identity) - - p, err := packets.Next() - if err != nil { - return nil, err - } - - var ok bool - if e.PrimaryKey, ok = p.(*packet.PublicKey); !ok { - if e.PrivateKey, ok = p.(*packet.PrivateKey); !ok { - packets.Unread(p) - return nil, errors.StructuralError("first packet was not a public/private key") - } - e.PrimaryKey = &e.PrivateKey.PublicKey - } - - if !e.PrimaryKey.PubKeyAlgo.CanSign() { - return nil, errors.StructuralError("primary key cannot be used for signatures") - } - - var revocations []*packet.Signature -EachPacket: - for { - p, err := packets.Next() - if err == io.EOF { - break - } else if err != nil { - return nil, err - } - - switch pkt := p.(type) { - case *packet.UserId: - if err := addUserID(e, packets, pkt); err != nil { - return nil, err - } - case *packet.Signature: - if pkt.SigType == packet.SigTypeKeyRevocation { - revocations = append(revocations, pkt) - } else if pkt.SigType == packet.SigTypeDirectSignature { - // TODO: RFC4880 5.2.1 permits signatures - // directly on keys (eg. to bind additional - // revocation keys). - } - // Else, ignoring the signature as it does not follow anything - // we would know to attach it to. - case *packet.PrivateKey: - if pkt.IsSubkey == false { - packets.Unread(p) - break EachPacket - } - err = addSubkey(e, packets, &pkt.PublicKey, pkt) - if err != nil { - return nil, err - } - case *packet.PublicKey: - if pkt.IsSubkey == false { - packets.Unread(p) - break EachPacket - } - err = addSubkey(e, packets, pkt, nil) - if err != nil { - return nil, err - } - default: - // we ignore unknown packets - } - } - - if len(e.Identities) == 0 { - return nil, errors.StructuralError("entity without any identities") - } - - for _, revocation := range revocations { - err = e.PrimaryKey.VerifyRevocationSignature(revocation) - if err == nil { - e.Revocations = append(e.Revocations, revocation) - } else { - // TODO: RFC 4880 5.2.3.15 defines revocation keys. - return nil, errors.StructuralError("revocation signature signed by alternate key") - } - } - - return e, nil -} - -func addUserID(e *Entity, packets *packet.Reader, pkt *packet.UserId) error { - // Make a new Identity object, that we might wind up throwing away. - // We'll only add it if we get a valid self-signature over this - // userID. - identity := new(Identity) - identity.Name = pkt.Id - identity.UserId = pkt - - for { - p, err := packets.Next() - if err == io.EOF { - break - } else if err != nil { - return err - } - - sig, ok := p.(*packet.Signature) - if !ok { - packets.Unread(p) - break - } - - if (sig.SigType == packet.SigTypePositiveCert || sig.SigType == packet.SigTypeGenericCert) && sig.IssuerKeyId != nil && *sig.IssuerKeyId == e.PrimaryKey.KeyId { - if err = e.PrimaryKey.VerifyUserIdSignature(pkt.Id, e.PrimaryKey, sig); err != nil { - return errors.StructuralError("user ID self-signature invalid: " + err.Error()) - } - identity.SelfSignature = sig - e.Identities[pkt.Id] = identity - } else { - identity.Signatures = append(identity.Signatures, sig) - } - } - - return nil -} - -func addSubkey(e *Entity, packets *packet.Reader, pub *packet.PublicKey, priv *packet.PrivateKey) error { - var subKey Subkey - subKey.PublicKey = pub - subKey.PrivateKey = priv - - for { - p, err := packets.Next() - if err == io.EOF { - break - } else if err != nil { - return errors.StructuralError("subkey signature invalid: " + err.Error()) - } - - sig, ok := p.(*packet.Signature) - if !ok { - packets.Unread(p) - break - } - - if sig.SigType != packet.SigTypeSubkeyBinding && sig.SigType != packet.SigTypeSubkeyRevocation { - return errors.StructuralError("subkey signature with wrong type") - } - - if err := e.PrimaryKey.VerifyKeySignature(subKey.PublicKey, sig); err != nil { - return errors.StructuralError("subkey signature invalid: " + err.Error()) - } - - switch sig.SigType { - case packet.SigTypeSubkeyRevocation: - subKey.Sig = sig - case packet.SigTypeSubkeyBinding: - - if shouldReplaceSubkeySig(subKey.Sig, sig) { - subKey.Sig = sig - } - } - } - - if subKey.Sig == nil { - return errors.StructuralError("subkey packet not followed by signature") - } - - e.Subkeys = append(e.Subkeys, subKey) - - return nil -} - -func shouldReplaceSubkeySig(existingSig, potentialNewSig *packet.Signature) bool { - if potentialNewSig == nil { - return false - } - - if existingSig == nil { - return true - } - - if existingSig.SigType == packet.SigTypeSubkeyRevocation { - return false // never override a revocation signature - } - - return potentialNewSig.CreationTime.After(existingSig.CreationTime) -} - -const defaultRSAKeyBits = 2048 - -// NewEntity returns an Entity that contains a fresh RSA/RSA keypair with a -// single identity composed of the given full name, comment and email, any of -// which may be empty but must not contain any of "()<>\x00". -// If config is nil, sensible defaults will be used. -func NewEntity(name, comment, email string, config *packet.Config) (*Entity, error) { - creationTime := config.Now() - - bits := defaultRSAKeyBits - if config != nil && config.RSABits != 0 { - bits = config.RSABits - } - - uid := packet.NewUserId(name, comment, email) - if uid == nil { - return nil, errors.InvalidArgumentError("user id field contained invalid characters") - } - signingPriv, err := rsa.GenerateKey(config.Random(), bits) - if err != nil { - return nil, err - } - encryptingPriv, err := rsa.GenerateKey(config.Random(), bits) - if err != nil { - return nil, err - } - - e := &Entity{ - PrimaryKey: packet.NewRSAPublicKey(creationTime, &signingPriv.PublicKey), - PrivateKey: packet.NewRSAPrivateKey(creationTime, signingPriv), - Identities: make(map[string]*Identity), - } - isPrimaryId := true - e.Identities[uid.Id] = &Identity{ - Name: uid.Id, - UserId: uid, - SelfSignature: &packet.Signature{ - CreationTime: creationTime, - SigType: packet.SigTypePositiveCert, - PubKeyAlgo: packet.PubKeyAlgoRSA, - Hash: config.Hash(), - IsPrimaryId: &isPrimaryId, - FlagsValid: true, - FlagSign: true, - FlagCertify: true, - IssuerKeyId: &e.PrimaryKey.KeyId, - }, - } - err = e.Identities[uid.Id].SelfSignature.SignUserId(uid.Id, e.PrimaryKey, e.PrivateKey, config) - if err != nil { - return nil, err - } - - // If the user passes in a DefaultHash via packet.Config, - // set the PreferredHash for the SelfSignature. - if config != nil && config.DefaultHash != 0 { - e.Identities[uid.Id].SelfSignature.PreferredHash = []uint8{hashToHashId(config.DefaultHash)} - } - - // Likewise for DefaultCipher. - if config != nil && config.DefaultCipher != 0 { - e.Identities[uid.Id].SelfSignature.PreferredSymmetric = []uint8{uint8(config.DefaultCipher)} - } - - e.Subkeys = make([]Subkey, 1) - e.Subkeys[0] = Subkey{ - PublicKey: packet.NewRSAPublicKey(creationTime, &encryptingPriv.PublicKey), - PrivateKey: packet.NewRSAPrivateKey(creationTime, encryptingPriv), - Sig: &packet.Signature{ - CreationTime: creationTime, - SigType: packet.SigTypeSubkeyBinding, - PubKeyAlgo: packet.PubKeyAlgoRSA, - Hash: config.Hash(), - FlagsValid: true, - FlagEncryptStorage: true, - FlagEncryptCommunications: true, - IssuerKeyId: &e.PrimaryKey.KeyId, - }, - } - e.Subkeys[0].PublicKey.IsSubkey = true - e.Subkeys[0].PrivateKey.IsSubkey = true - err = e.Subkeys[0].Sig.SignKey(e.Subkeys[0].PublicKey, e.PrivateKey, config) - if err != nil { - return nil, err - } - return e, nil -} - -// SerializePrivate serializes an Entity, including private key material, but -// excluding signatures from other entities, to the given Writer. -// Identities and subkeys are re-signed in case they changed since NewEntry. -// If config is nil, sensible defaults will be used. -func (e *Entity) SerializePrivate(w io.Writer, config *packet.Config) (err error) { - err = e.PrivateKey.Serialize(w) - if err != nil { - return - } - for _, ident := range e.Identities { - err = ident.UserId.Serialize(w) - if err != nil { - return - } - err = ident.SelfSignature.SignUserId(ident.UserId.Id, e.PrimaryKey, e.PrivateKey, config) - if err != nil { - return - } - err = ident.SelfSignature.Serialize(w) - if err != nil { - return - } - } - for _, subkey := range e.Subkeys { - err = subkey.PrivateKey.Serialize(w) - if err != nil { - return - } - err = subkey.Sig.SignKey(subkey.PublicKey, e.PrivateKey, config) - if err != nil { - return - } - err = subkey.Sig.Serialize(w) - if err != nil { - return - } - } - return nil -} - -// Serialize writes the public part of the given Entity to w, including -// signatures from other entities. No private key material will be output. -func (e *Entity) Serialize(w io.Writer) error { - err := e.PrimaryKey.Serialize(w) - if err != nil { - return err - } - for _, ident := range e.Identities { - err = ident.UserId.Serialize(w) - if err != nil { - return err - } - err = ident.SelfSignature.Serialize(w) - if err != nil { - return err - } - for _, sig := range ident.Signatures { - err = sig.Serialize(w) - if err != nil { - return err - } - } - } - for _, subkey := range e.Subkeys { - err = subkey.PublicKey.Serialize(w) - if err != nil { - return err - } - err = subkey.Sig.Serialize(w) - if err != nil { - return err - } - } - return nil -} - -// SignIdentity adds a signature to e, from signer, attesting that identity is -// associated with e. The provided identity must already be an element of -// e.Identities and the private key of signer must have been decrypted if -// necessary. -// If config is nil, sensible defaults will be used. -func (e *Entity) SignIdentity(identity string, signer *Entity, config *packet.Config) error { - if signer.PrivateKey == nil { - return errors.InvalidArgumentError("signing Entity must have a private key") - } - if signer.PrivateKey.Encrypted { - return errors.InvalidArgumentError("signing Entity's private key must be decrypted") - } - ident, ok := e.Identities[identity] - if !ok { - return errors.InvalidArgumentError("given identity string not found in Entity") - } - - sig := &packet.Signature{ - SigType: packet.SigTypeGenericCert, - PubKeyAlgo: signer.PrivateKey.PubKeyAlgo, - Hash: config.Hash(), - CreationTime: config.Now(), - IssuerKeyId: &signer.PrivateKey.KeyId, - } - if err := sig.SignUserId(identity, e.PrimaryKey, signer.PrivateKey, config); err != nil { - return err - } - ident.Signatures = append(ident.Signatures, sig) - return nil -} diff --git a/src/vendor/golang.org/x/crypto/openpgp/packet/compressed.go b/src/vendor/golang.org/x/crypto/openpgp/packet/compressed.go deleted file mode 100644 index e8f0b5caa..000000000 --- a/src/vendor/golang.org/x/crypto/openpgp/packet/compressed.go +++ /dev/null @@ -1,123 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package packet - -import ( - "compress/bzip2" - "compress/flate" - "compress/zlib" - "golang.org/x/crypto/openpgp/errors" - "io" - "strconv" -) - -// Compressed represents a compressed OpenPGP packet. The decompressed contents -// will contain more OpenPGP packets. See RFC 4880, section 5.6. -type Compressed struct { - Body io.Reader -} - -const ( - NoCompression = flate.NoCompression - BestSpeed = flate.BestSpeed - BestCompression = flate.BestCompression - DefaultCompression = flate.DefaultCompression -) - -// CompressionConfig contains compressor configuration settings. -type CompressionConfig struct { - // Level is the compression level to use. It must be set to - // between -1 and 9, with -1 causing the compressor to use the - // default compression level, 0 causing the compressor to use - // no compression and 1 to 9 representing increasing (better, - // slower) compression levels. If Level is less than -1 or - // more then 9, a non-nil error will be returned during - // encryption. See the constants above for convenient common - // settings for Level. - Level int -} - -func (c *Compressed) parse(r io.Reader) error { - var buf [1]byte - _, err := readFull(r, buf[:]) - if err != nil { - return err - } - - switch buf[0] { - case 1: - c.Body = flate.NewReader(r) - case 2: - c.Body, err = zlib.NewReader(r) - case 3: - c.Body = bzip2.NewReader(r) - default: - err = errors.UnsupportedError("unknown compression algorithm: " + strconv.Itoa(int(buf[0]))) - } - - return err -} - -// compressedWriterCloser represents the serialized compression stream -// header and the compressor. Its Close() method ensures that both the -// compressor and serialized stream header are closed. Its Write() -// method writes to the compressor. -type compressedWriteCloser struct { - sh io.Closer // Stream Header - c io.WriteCloser // Compressor -} - -func (cwc compressedWriteCloser) Write(p []byte) (int, error) { - return cwc.c.Write(p) -} - -func (cwc compressedWriteCloser) Close() (err error) { - err = cwc.c.Close() - if err != nil { - return err - } - - return cwc.sh.Close() -} - -// SerializeCompressed serializes a compressed data packet to w and -// returns a WriteCloser to which the literal data packets themselves -// can be written and which MUST be closed on completion. If cc is -// nil, sensible defaults will be used to configure the compression -// algorithm. -func SerializeCompressed(w io.WriteCloser, algo CompressionAlgo, cc *CompressionConfig) (literaldata io.WriteCloser, err error) { - compressed, err := serializeStreamHeader(w, packetTypeCompressed) - if err != nil { - return - } - - _, err = compressed.Write([]byte{uint8(algo)}) - if err != nil { - return - } - - level := DefaultCompression - if cc != nil { - level = cc.Level - } - - var compressor io.WriteCloser - switch algo { - case CompressionZIP: - compressor, err = flate.NewWriter(compressed, level) - case CompressionZLIB: - compressor, err = zlib.NewWriterLevel(compressed, level) - default: - s := strconv.Itoa(int(algo)) - err = errors.UnsupportedError("Unsupported compression algorithm: " + s) - } - if err != nil { - return - } - - literaldata = compressedWriteCloser{compressed, compressor} - - return -} diff --git a/src/vendor/golang.org/x/crypto/openpgp/packet/config.go b/src/vendor/golang.org/x/crypto/openpgp/packet/config.go deleted file mode 100644 index c76eecc96..000000000 --- a/src/vendor/golang.org/x/crypto/openpgp/packet/config.go +++ /dev/null @@ -1,91 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package packet - -import ( - "crypto" - "crypto/rand" - "io" - "time" -) - -// Config collects a number of parameters along with sensible defaults. -// A nil *Config is valid and results in all default values. -type Config struct { - // Rand provides the source of entropy. - // If nil, the crypto/rand Reader is used. - Rand io.Reader - // DefaultHash is the default hash function to be used. - // If zero, SHA-256 is used. - DefaultHash crypto.Hash - // DefaultCipher is the cipher to be used. - // If zero, AES-128 is used. - DefaultCipher CipherFunction - // Time returns the current time as the number of seconds since the - // epoch. If Time is nil, time.Now is used. - Time func() time.Time - // DefaultCompressionAlgo is the compression algorithm to be - // applied to the plaintext before encryption. If zero, no - // compression is done. - DefaultCompressionAlgo CompressionAlgo - // CompressionConfig configures the compression settings. - CompressionConfig *CompressionConfig - // S2KCount is only used for symmetric encryption. It - // determines the strength of the passphrase stretching when - // the said passphrase is hashed to produce a key. S2KCount - // should be between 1024 and 65011712, inclusive. If Config - // is nil or S2KCount is 0, the value 65536 used. Not all - // values in the above range can be represented. S2KCount will - // be rounded up to the next representable value if it cannot - // be encoded exactly. When set, it is strongly encrouraged to - // use a value that is at least 65536. See RFC 4880 Section - // 3.7.1.3. - S2KCount int - // RSABits is the number of bits in new RSA keys made with NewEntity. - // If zero, then 2048 bit keys are created. - RSABits int -} - -func (c *Config) Random() io.Reader { - if c == nil || c.Rand == nil { - return rand.Reader - } - return c.Rand -} - -func (c *Config) Hash() crypto.Hash { - if c == nil || uint(c.DefaultHash) == 0 { - return crypto.SHA256 - } - return c.DefaultHash -} - -func (c *Config) Cipher() CipherFunction { - if c == nil || uint8(c.DefaultCipher) == 0 { - return CipherAES128 - } - return c.DefaultCipher -} - -func (c *Config) Now() time.Time { - if c == nil || c.Time == nil { - return time.Now() - } - return c.Time() -} - -func (c *Config) Compression() CompressionAlgo { - if c == nil { - return CompressionNone - } - return c.DefaultCompressionAlgo -} - -func (c *Config) PasswordHashIterations() int { - if c == nil || c.S2KCount == 0 { - return 0 - } - return c.S2KCount -} diff --git a/src/vendor/golang.org/x/crypto/openpgp/packet/encrypted_key.go b/src/vendor/golang.org/x/crypto/openpgp/packet/encrypted_key.go deleted file mode 100644 index 6d7639722..000000000 --- a/src/vendor/golang.org/x/crypto/openpgp/packet/encrypted_key.go +++ /dev/null @@ -1,208 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package packet - -import ( - "crypto" - "crypto/rsa" - "encoding/binary" - "io" - "math/big" - "strconv" - - "golang.org/x/crypto/openpgp/elgamal" - "golang.org/x/crypto/openpgp/errors" -) - -const encryptedKeyVersion = 3 - -// EncryptedKey represents a public-key encrypted session key. See RFC 4880, -// section 5.1. -type EncryptedKey struct { - KeyId uint64 - Algo PublicKeyAlgorithm - CipherFunc CipherFunction // only valid after a successful Decrypt - Key []byte // only valid after a successful Decrypt - - encryptedMPI1, encryptedMPI2 parsedMPI -} - -func (e *EncryptedKey) parse(r io.Reader) (err error) { - var buf [10]byte - _, err = readFull(r, buf[:]) - if err != nil { - return - } - if buf[0] != encryptedKeyVersion { - return errors.UnsupportedError("unknown EncryptedKey version " + strconv.Itoa(int(buf[0]))) - } - e.KeyId = binary.BigEndian.Uint64(buf[1:9]) - e.Algo = PublicKeyAlgorithm(buf[9]) - switch e.Algo { - case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly: - e.encryptedMPI1.bytes, e.encryptedMPI1.bitLength, err = readMPI(r) - if err != nil { - return - } - case PubKeyAlgoElGamal: - e.encryptedMPI1.bytes, e.encryptedMPI1.bitLength, err = readMPI(r) - if err != nil { - return - } - e.encryptedMPI2.bytes, e.encryptedMPI2.bitLength, err = readMPI(r) - if err != nil { - return - } - } - _, err = consumeAll(r) - return -} - -func checksumKeyMaterial(key []byte) uint16 { - var checksum uint16 - for _, v := range key { - checksum += uint16(v) - } - return checksum -} - -// Decrypt decrypts an encrypted session key with the given private key. The -// private key must have been decrypted first. -// If config is nil, sensible defaults will be used. -func (e *EncryptedKey) Decrypt(priv *PrivateKey, config *Config) error { - var err error - var b []byte - - // TODO(agl): use session key decryption routines here to avoid - // padding oracle attacks. - switch priv.PubKeyAlgo { - case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly: - // Supports both *rsa.PrivateKey and crypto.Decrypter - k := priv.PrivateKey.(crypto.Decrypter) - b, err = k.Decrypt(config.Random(), padToKeySize(k.Public().(*rsa.PublicKey), e.encryptedMPI1.bytes), nil) - case PubKeyAlgoElGamal: - c1 := new(big.Int).SetBytes(e.encryptedMPI1.bytes) - c2 := new(big.Int).SetBytes(e.encryptedMPI2.bytes) - b, err = elgamal.Decrypt(priv.PrivateKey.(*elgamal.PrivateKey), c1, c2) - default: - err = errors.InvalidArgumentError("cannot decrypted encrypted session key with private key of type " + strconv.Itoa(int(priv.PubKeyAlgo))) - } - - if err != nil { - return err - } - - e.CipherFunc = CipherFunction(b[0]) - e.Key = b[1 : len(b)-2] - expectedChecksum := uint16(b[len(b)-2])<<8 | uint16(b[len(b)-1]) - checksum := checksumKeyMaterial(e.Key) - if checksum != expectedChecksum { - return errors.StructuralError("EncryptedKey checksum incorrect") - } - - return nil -} - -// Serialize writes the encrypted key packet, e, to w. -func (e *EncryptedKey) Serialize(w io.Writer) error { - var mpiLen int - switch e.Algo { - case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly: - mpiLen = 2 + len(e.encryptedMPI1.bytes) - case PubKeyAlgoElGamal: - mpiLen = 2 + len(e.encryptedMPI1.bytes) + 2 + len(e.encryptedMPI2.bytes) - default: - return errors.InvalidArgumentError("don't know how to serialize encrypted key type " + strconv.Itoa(int(e.Algo))) - } - - serializeHeader(w, packetTypeEncryptedKey, 1 /* version */ +8 /* key id */ +1 /* algo */ +mpiLen) - - w.Write([]byte{encryptedKeyVersion}) - binary.Write(w, binary.BigEndian, e.KeyId) - w.Write([]byte{byte(e.Algo)}) - - switch e.Algo { - case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly: - writeMPIs(w, e.encryptedMPI1) - case PubKeyAlgoElGamal: - writeMPIs(w, e.encryptedMPI1, e.encryptedMPI2) - default: - panic("internal error") - } - - return nil -} - -// SerializeEncryptedKey serializes an encrypted key packet to w that contains -// key, encrypted to pub. -// If config is nil, sensible defaults will be used. -func SerializeEncryptedKey(w io.Writer, pub *PublicKey, cipherFunc CipherFunction, key []byte, config *Config) error { - var buf [10]byte - buf[0] = encryptedKeyVersion - binary.BigEndian.PutUint64(buf[1:9], pub.KeyId) - buf[9] = byte(pub.PubKeyAlgo) - - keyBlock := make([]byte, 1 /* cipher type */ +len(key)+2 /* checksum */) - keyBlock[0] = byte(cipherFunc) - copy(keyBlock[1:], key) - checksum := checksumKeyMaterial(key) - keyBlock[1+len(key)] = byte(checksum >> 8) - keyBlock[1+len(key)+1] = byte(checksum) - - switch pub.PubKeyAlgo { - case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly: - return serializeEncryptedKeyRSA(w, config.Random(), buf, pub.PublicKey.(*rsa.PublicKey), keyBlock) - case PubKeyAlgoElGamal: - return serializeEncryptedKeyElGamal(w, config.Random(), buf, pub.PublicKey.(*elgamal.PublicKey), keyBlock) - case PubKeyAlgoDSA, PubKeyAlgoRSASignOnly: - return errors.InvalidArgumentError("cannot encrypt to public key of type " + strconv.Itoa(int(pub.PubKeyAlgo))) - } - - return errors.UnsupportedError("encrypting a key to public key of type " + strconv.Itoa(int(pub.PubKeyAlgo))) -} - -func serializeEncryptedKeyRSA(w io.Writer, rand io.Reader, header [10]byte, pub *rsa.PublicKey, keyBlock []byte) error { - cipherText, err := rsa.EncryptPKCS1v15(rand, pub, keyBlock) - if err != nil { - return errors.InvalidArgumentError("RSA encryption failed: " + err.Error()) - } - - packetLen := 10 /* header length */ + 2 /* mpi size */ + len(cipherText) - - err = serializeHeader(w, packetTypeEncryptedKey, packetLen) - if err != nil { - return err - } - _, err = w.Write(header[:]) - if err != nil { - return err - } - return writeMPI(w, 8*uint16(len(cipherText)), cipherText) -} - -func serializeEncryptedKeyElGamal(w io.Writer, rand io.Reader, header [10]byte, pub *elgamal.PublicKey, keyBlock []byte) error { - c1, c2, err := elgamal.Encrypt(rand, pub, keyBlock) - if err != nil { - return errors.InvalidArgumentError("ElGamal encryption failed: " + err.Error()) - } - - packetLen := 10 /* header length */ - packetLen += 2 /* mpi size */ + (c1.BitLen()+7)/8 - packetLen += 2 /* mpi size */ + (c2.BitLen()+7)/8 - - err = serializeHeader(w, packetTypeEncryptedKey, packetLen) - if err != nil { - return err - } - _, err = w.Write(header[:]) - if err != nil { - return err - } - err = writeBig(w, c1) - if err != nil { - return err - } - return writeBig(w, c2) -} diff --git a/src/vendor/golang.org/x/crypto/openpgp/packet/literal.go b/src/vendor/golang.org/x/crypto/openpgp/packet/literal.go deleted file mode 100644 index 1a9ec6e51..000000000 --- a/src/vendor/golang.org/x/crypto/openpgp/packet/literal.go +++ /dev/null @@ -1,89 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package packet - -import ( - "encoding/binary" - "io" -) - -// LiteralData represents an encrypted file. See RFC 4880, section 5.9. -type LiteralData struct { - IsBinary bool - FileName string - Time uint32 // Unix epoch time. Either creation time or modification time. 0 means undefined. - Body io.Reader -} - -// ForEyesOnly returns whether the contents of the LiteralData have been marked -// as especially sensitive. -func (l *LiteralData) ForEyesOnly() bool { - return l.FileName == "_CONSOLE" -} - -func (l *LiteralData) parse(r io.Reader) (err error) { - var buf [256]byte - - _, err = readFull(r, buf[:2]) - if err != nil { - return - } - - l.IsBinary = buf[0] == 'b' - fileNameLen := int(buf[1]) - - _, err = readFull(r, buf[:fileNameLen]) - if err != nil { - return - } - - l.FileName = string(buf[:fileNameLen]) - - _, err = readFull(r, buf[:4]) - if err != nil { - return - } - - l.Time = binary.BigEndian.Uint32(buf[:4]) - l.Body = r - return -} - -// SerializeLiteral serializes a literal data packet to w and returns a -// WriteCloser to which the data itself can be written and which MUST be closed -// on completion. The fileName is truncated to 255 bytes. -func SerializeLiteral(w io.WriteCloser, isBinary bool, fileName string, time uint32) (plaintext io.WriteCloser, err error) { - var buf [4]byte - buf[0] = 't' - if isBinary { - buf[0] = 'b' - } - if len(fileName) > 255 { - fileName = fileName[:255] - } - buf[1] = byte(len(fileName)) - - inner, err := serializeStreamHeader(w, packetTypeLiteralData) - if err != nil { - return - } - - _, err = inner.Write(buf[:2]) - if err != nil { - return - } - _, err = inner.Write([]byte(fileName)) - if err != nil { - return - } - binary.BigEndian.PutUint32(buf[:], time) - _, err = inner.Write(buf[:]) - if err != nil { - return - } - - plaintext = inner - return -} diff --git a/src/vendor/golang.org/x/crypto/openpgp/packet/ocfb.go b/src/vendor/golang.org/x/crypto/openpgp/packet/ocfb.go deleted file mode 100644 index ce2a33a54..000000000 --- a/src/vendor/golang.org/x/crypto/openpgp/packet/ocfb.go +++ /dev/null @@ -1,143 +0,0 @@ -// Copyright 2010 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// OpenPGP CFB Mode. http://tools.ietf.org/html/rfc4880#section-13.9 - -package packet - -import ( - "crypto/cipher" -) - -type ocfbEncrypter struct { - b cipher.Block - fre []byte - outUsed int -} - -// An OCFBResyncOption determines if the "resynchronization step" of OCFB is -// performed. -type OCFBResyncOption bool - -const ( - OCFBResync OCFBResyncOption = true - OCFBNoResync OCFBResyncOption = false -) - -// NewOCFBEncrypter returns a cipher.Stream which encrypts data with OpenPGP's -// cipher feedback mode using the given cipher.Block, and an initial amount of -// ciphertext. randData must be random bytes and be the same length as the -// cipher.Block's block size. Resync determines if the "resynchronization step" -// from RFC 4880, 13.9 step 7 is performed. Different parts of OpenPGP vary on -// this point. -func NewOCFBEncrypter(block cipher.Block, randData []byte, resync OCFBResyncOption) (cipher.Stream, []byte) { - blockSize := block.BlockSize() - if len(randData) != blockSize { - return nil, nil - } - - x := &ocfbEncrypter{ - b: block, - fre: make([]byte, blockSize), - outUsed: 0, - } - prefix := make([]byte, blockSize+2) - - block.Encrypt(x.fre, x.fre) - for i := 0; i < blockSize; i++ { - prefix[i] = randData[i] ^ x.fre[i] - } - - block.Encrypt(x.fre, prefix[:blockSize]) - prefix[blockSize] = x.fre[0] ^ randData[blockSize-2] - prefix[blockSize+1] = x.fre[1] ^ randData[blockSize-1] - - if resync { - block.Encrypt(x.fre, prefix[2:]) - } else { - x.fre[0] = prefix[blockSize] - x.fre[1] = prefix[blockSize+1] - x.outUsed = 2 - } - return x, prefix -} - -func (x *ocfbEncrypter) XORKeyStream(dst, src []byte) { - for i := 0; i < len(src); i++ { - if x.outUsed == len(x.fre) { - x.b.Encrypt(x.fre, x.fre) - x.outUsed = 0 - } - - x.fre[x.outUsed] ^= src[i] - dst[i] = x.fre[x.outUsed] - x.outUsed++ - } -} - -type ocfbDecrypter struct { - b cipher.Block - fre []byte - outUsed int -} - -// NewOCFBDecrypter returns a cipher.Stream which decrypts data with OpenPGP's -// cipher feedback mode using the given cipher.Block. Prefix must be the first -// blockSize + 2 bytes of the ciphertext, where blockSize is the cipher.Block's -// block size. If an incorrect key is detected then nil is returned. On -// successful exit, blockSize+2 bytes of decrypted data are written into -// prefix. Resync determines if the "resynchronization step" from RFC 4880, -// 13.9 step 7 is performed. Different parts of OpenPGP vary on this point. -func NewOCFBDecrypter(block cipher.Block, prefix []byte, resync OCFBResyncOption) cipher.Stream { - blockSize := block.BlockSize() - if len(prefix) != blockSize+2 { - return nil - } - - x := &ocfbDecrypter{ - b: block, - fre: make([]byte, blockSize), - outUsed: 0, - } - prefixCopy := make([]byte, len(prefix)) - copy(prefixCopy, prefix) - - block.Encrypt(x.fre, x.fre) - for i := 0; i < blockSize; i++ { - prefixCopy[i] ^= x.fre[i] - } - - block.Encrypt(x.fre, prefix[:blockSize]) - prefixCopy[blockSize] ^= x.fre[0] - prefixCopy[blockSize+1] ^= x.fre[1] - - if prefixCopy[blockSize-2] != prefixCopy[blockSize] || - prefixCopy[blockSize-1] != prefixCopy[blockSize+1] { - return nil - } - - if resync { - block.Encrypt(x.fre, prefix[2:]) - } else { - x.fre[0] = prefix[blockSize] - x.fre[1] = prefix[blockSize+1] - x.outUsed = 2 - } - copy(prefix, prefixCopy) - return x -} - -func (x *ocfbDecrypter) XORKeyStream(dst, src []byte) { - for i := 0; i < len(src); i++ { - if x.outUsed == len(x.fre) { - x.b.Encrypt(x.fre, x.fre) - x.outUsed = 0 - } - - c := src[i] - dst[i] = x.fre[x.outUsed] ^ src[i] - x.fre[x.outUsed] = c - x.outUsed++ - } -} diff --git a/src/vendor/golang.org/x/crypto/openpgp/packet/one_pass_signature.go b/src/vendor/golang.org/x/crypto/openpgp/packet/one_pass_signature.go deleted file mode 100644 index 171350339..000000000 --- a/src/vendor/golang.org/x/crypto/openpgp/packet/one_pass_signature.go +++ /dev/null @@ -1,73 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package packet - -import ( - "crypto" - "encoding/binary" - "golang.org/x/crypto/openpgp/errors" - "golang.org/x/crypto/openpgp/s2k" - "io" - "strconv" -) - -// OnePassSignature represents a one-pass signature packet. See RFC 4880, -// section 5.4. -type OnePassSignature struct { - SigType SignatureType - Hash crypto.Hash - PubKeyAlgo PublicKeyAlgorithm - KeyId uint64 - IsLast bool -} - -const onePassSignatureVersion = 3 - -func (ops *OnePassSignature) parse(r io.Reader) (err error) { - var buf [13]byte - - _, err = readFull(r, buf[:]) - if err != nil { - return - } - if buf[0] != onePassSignatureVersion { - err = errors.UnsupportedError("one-pass-signature packet version " + strconv.Itoa(int(buf[0]))) - } - - var ok bool - ops.Hash, ok = s2k.HashIdToHash(buf[2]) - if !ok { - return errors.UnsupportedError("hash function: " + strconv.Itoa(int(buf[2]))) - } - - ops.SigType = SignatureType(buf[1]) - ops.PubKeyAlgo = PublicKeyAlgorithm(buf[3]) - ops.KeyId = binary.BigEndian.Uint64(buf[4:12]) - ops.IsLast = buf[12] != 0 - return -} - -// Serialize marshals the given OnePassSignature to w. -func (ops *OnePassSignature) Serialize(w io.Writer) error { - var buf [13]byte - buf[0] = onePassSignatureVersion - buf[1] = uint8(ops.SigType) - var ok bool - buf[2], ok = s2k.HashToHashId(ops.Hash) - if !ok { - return errors.UnsupportedError("hash type: " + strconv.Itoa(int(ops.Hash))) - } - buf[3] = uint8(ops.PubKeyAlgo) - binary.BigEndian.PutUint64(buf[4:12], ops.KeyId) - if ops.IsLast { - buf[12] = 1 - } - - if err := serializeHeader(w, packetTypeOnePassSignature, len(buf)); err != nil { - return err - } - _, err := w.Write(buf[:]) - return err -} diff --git a/src/vendor/golang.org/x/crypto/openpgp/packet/opaque.go b/src/vendor/golang.org/x/crypto/openpgp/packet/opaque.go deleted file mode 100644 index 398447731..000000000 --- a/src/vendor/golang.org/x/crypto/openpgp/packet/opaque.go +++ /dev/null @@ -1,161 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package packet - -import ( - "bytes" - "io" - - "golang.org/x/crypto/openpgp/errors" -) - -// OpaquePacket represents an OpenPGP packet as raw, unparsed data. This is -// useful for splitting and storing the original packet contents separately, -// handling unsupported packet types or accessing parts of the packet not yet -// implemented by this package. -type OpaquePacket struct { - // Packet type - Tag uint8 - // Reason why the packet was parsed opaquely - Reason error - // Binary contents of the packet data - Contents []byte -} - -func (op *OpaquePacket) parse(r io.Reader) (err error) { - op.Contents, err = io.ReadAll(r) - return -} - -// Serialize marshals the packet to a writer in its original form, including -// the packet header. -func (op *OpaquePacket) Serialize(w io.Writer) (err error) { - err = serializeHeader(w, packetType(op.Tag), len(op.Contents)) - if err == nil { - _, err = w.Write(op.Contents) - } - return -} - -// Parse attempts to parse the opaque contents into a structure supported by -// this package. If the packet is not known then the result will be another -// OpaquePacket. -func (op *OpaquePacket) Parse() (p Packet, err error) { - hdr := bytes.NewBuffer(nil) - err = serializeHeader(hdr, packetType(op.Tag), len(op.Contents)) - if err != nil { - op.Reason = err - return op, err - } - p, err = Read(io.MultiReader(hdr, bytes.NewBuffer(op.Contents))) - if err != nil { - op.Reason = err - p = op - } - return -} - -// OpaqueReader reads OpaquePackets from an io.Reader. -type OpaqueReader struct { - r io.Reader -} - -func NewOpaqueReader(r io.Reader) *OpaqueReader { - return &OpaqueReader{r: r} -} - -// Read the next OpaquePacket. -func (or *OpaqueReader) Next() (op *OpaquePacket, err error) { - tag, _, contents, err := readHeader(or.r) - if err != nil { - return - } - op = &OpaquePacket{Tag: uint8(tag), Reason: err} - err = op.parse(contents) - if err != nil { - consumeAll(contents) - } - return -} - -// OpaqueSubpacket represents an unparsed OpenPGP subpacket, -// as found in signature and user attribute packets. -type OpaqueSubpacket struct { - SubType uint8 - Contents []byte -} - -// OpaqueSubpackets extracts opaque, unparsed OpenPGP subpackets from -// their byte representation. -func OpaqueSubpackets(contents []byte) (result []*OpaqueSubpacket, err error) { - var ( - subHeaderLen int - subPacket *OpaqueSubpacket - ) - for len(contents) > 0 { - subHeaderLen, subPacket, err = nextSubpacket(contents) - if err != nil { - break - } - result = append(result, subPacket) - contents = contents[subHeaderLen+len(subPacket.Contents):] - } - return -} - -func nextSubpacket(contents []byte) (subHeaderLen int, subPacket *OpaqueSubpacket, err error) { - // RFC 4880, section 5.2.3.1 - var subLen uint32 - if len(contents) < 1 { - goto Truncated - } - subPacket = &OpaqueSubpacket{} - switch { - case contents[0] < 192: - subHeaderLen = 2 // 1 length byte, 1 subtype byte - if len(contents) < subHeaderLen { - goto Truncated - } - subLen = uint32(contents[0]) - contents = contents[1:] - case contents[0] < 255: - subHeaderLen = 3 // 2 length bytes, 1 subtype - if len(contents) < subHeaderLen { - goto Truncated - } - subLen = uint32(contents[0]-192)<<8 + uint32(contents[1]) + 192 - contents = contents[2:] - default: - subHeaderLen = 6 // 5 length bytes, 1 subtype - if len(contents) < subHeaderLen { - goto Truncated - } - subLen = uint32(contents[1])<<24 | - uint32(contents[2])<<16 | - uint32(contents[3])<<8 | - uint32(contents[4]) - contents = contents[5:] - } - if subLen > uint32(len(contents)) || subLen == 0 { - goto Truncated - } - subPacket.SubType = contents[0] - subPacket.Contents = contents[1:subLen] - return -Truncated: - err = errors.StructuralError("subpacket truncated") - return -} - -func (osp *OpaqueSubpacket) Serialize(w io.Writer) (err error) { - buf := make([]byte, 6) - n := serializeSubpacketLength(buf, len(osp.Contents)+1) - buf[n] = osp.SubType - if _, err = w.Write(buf[:n+1]); err != nil { - return - } - _, err = w.Write(osp.Contents) - return -} diff --git a/src/vendor/golang.org/x/crypto/openpgp/packet/packet.go b/src/vendor/golang.org/x/crypto/openpgp/packet/packet.go deleted file mode 100644 index 0a19794a8..000000000 --- a/src/vendor/golang.org/x/crypto/openpgp/packet/packet.go +++ /dev/null @@ -1,590 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package packet implements parsing and serialization of OpenPGP packets, as -// specified in RFC 4880. -// -// Deprecated: this package is unmaintained except for security fixes. New -// applications should consider a more focused, modern alternative to OpenPGP -// for their specific task. If you are required to interoperate with OpenPGP -// systems and need a maintained package, consider a community fork. -// See https://golang.org/issue/44226. -package packet // import "golang.org/x/crypto/openpgp/packet" - -import ( - "bufio" - "crypto/aes" - "crypto/cipher" - "crypto/des" - "crypto/rsa" - "io" - "math/big" - "math/bits" - - "golang.org/x/crypto/cast5" - "golang.org/x/crypto/openpgp/errors" -) - -// readFull is the same as io.ReadFull except that reading zero bytes returns -// ErrUnexpectedEOF rather than EOF. -func readFull(r io.Reader, buf []byte) (n int, err error) { - n, err = io.ReadFull(r, buf) - if err == io.EOF { - err = io.ErrUnexpectedEOF - } - return -} - -// readLength reads an OpenPGP length from r. See RFC 4880, section 4.2.2. -func readLength(r io.Reader) (length int64, isPartial bool, err error) { - var buf [4]byte - _, err = readFull(r, buf[:1]) - if err != nil { - return - } - switch { - case buf[0] < 192: - length = int64(buf[0]) - case buf[0] < 224: - length = int64(buf[0]-192) << 8 - _, err = readFull(r, buf[0:1]) - if err != nil { - return - } - length += int64(buf[0]) + 192 - case buf[0] < 255: - length = int64(1) << (buf[0] & 0x1f) - isPartial = true - default: - _, err = readFull(r, buf[0:4]) - if err != nil { - return - } - length = int64(buf[0])<<24 | - int64(buf[1])<<16 | - int64(buf[2])<<8 | - int64(buf[3]) - } - return -} - -// partialLengthReader wraps an io.Reader and handles OpenPGP partial lengths. -// The continuation lengths are parsed and removed from the stream and EOF is -// returned at the end of the packet. See RFC 4880, section 4.2.2.4. -type partialLengthReader struct { - r io.Reader - remaining int64 - isPartial bool -} - -func (r *partialLengthReader) Read(p []byte) (n int, err error) { - for r.remaining == 0 { - if !r.isPartial { - return 0, io.EOF - } - r.remaining, r.isPartial, err = readLength(r.r) - if err != nil { - return 0, err - } - } - - toRead := int64(len(p)) - if toRead > r.remaining { - toRead = r.remaining - } - - n, err = r.r.Read(p[:int(toRead)]) - r.remaining -= int64(n) - if n < int(toRead) && err == io.EOF { - err = io.ErrUnexpectedEOF - } - return -} - -// partialLengthWriter writes a stream of data using OpenPGP partial lengths. -// See RFC 4880, section 4.2.2.4. -type partialLengthWriter struct { - w io.WriteCloser - lengthByte [1]byte - sentFirst bool - buf []byte -} - -// RFC 4880 4.2.2.4: the first partial length MUST be at least 512 octets long. -const minFirstPartialWrite = 512 - -func (w *partialLengthWriter) Write(p []byte) (n int, err error) { - off := 0 - if !w.sentFirst { - if len(w.buf) > 0 || len(p) < minFirstPartialWrite { - off = len(w.buf) - w.buf = append(w.buf, p...) - if len(w.buf) < minFirstPartialWrite { - return len(p), nil - } - p = w.buf - w.buf = nil - } - w.sentFirst = true - } - - power := uint8(30) - for len(p) > 0 { - l := 1 << power - if len(p) < l { - power = uint8(bits.Len32(uint32(len(p)))) - 1 - l = 1 << power - } - w.lengthByte[0] = 224 + power - _, err = w.w.Write(w.lengthByte[:]) - if err == nil { - var m int - m, err = w.w.Write(p[:l]) - n += m - } - if err != nil { - if n < off { - return 0, err - } - return n - off, err - } - p = p[l:] - } - return n - off, nil -} - -func (w *partialLengthWriter) Close() error { - if len(w.buf) > 0 { - // In this case we can't send a 512 byte packet. - // Just send what we have. - p := w.buf - w.sentFirst = true - w.buf = nil - if _, err := w.Write(p); err != nil { - return err - } - } - - w.lengthByte[0] = 0 - _, err := w.w.Write(w.lengthByte[:]) - if err != nil { - return err - } - return w.w.Close() -} - -// A spanReader is an io.LimitReader, but it returns ErrUnexpectedEOF if the -// underlying Reader returns EOF before the limit has been reached. -type spanReader struct { - r io.Reader - n int64 -} - -func (l *spanReader) Read(p []byte) (n int, err error) { - if l.n <= 0 { - return 0, io.EOF - } - if int64(len(p)) > l.n { - p = p[0:l.n] - } - n, err = l.r.Read(p) - l.n -= int64(n) - if l.n > 0 && err == io.EOF { - err = io.ErrUnexpectedEOF - } - return -} - -// readHeader parses a packet header and returns an io.Reader which will return -// the contents of the packet. See RFC 4880, section 4.2. -func readHeader(r io.Reader) (tag packetType, length int64, contents io.Reader, err error) { - var buf [4]byte - _, err = io.ReadFull(r, buf[:1]) - if err != nil { - return - } - if buf[0]&0x80 == 0 { - err = errors.StructuralError("tag byte does not have MSB set") - return - } - if buf[0]&0x40 == 0 { - // Old format packet - tag = packetType((buf[0] & 0x3f) >> 2) - lengthType := buf[0] & 3 - if lengthType == 3 { - length = -1 - contents = r - return - } - lengthBytes := 1 << lengthType - _, err = readFull(r, buf[0:lengthBytes]) - if err != nil { - return - } - for i := 0; i < lengthBytes; i++ { - length <<= 8 - length |= int64(buf[i]) - } - contents = &spanReader{r, length} - return - } - - // New format packet - tag = packetType(buf[0] & 0x3f) - length, isPartial, err := readLength(r) - if err != nil { - return - } - if isPartial { - contents = &partialLengthReader{ - remaining: length, - isPartial: true, - r: r, - } - length = -1 - } else { - contents = &spanReader{r, length} - } - return -} - -// serializeHeader writes an OpenPGP packet header to w. See RFC 4880, section -// 4.2. -func serializeHeader(w io.Writer, ptype packetType, length int) (err error) { - var buf [6]byte - var n int - - buf[0] = 0x80 | 0x40 | byte(ptype) - if length < 192 { - buf[1] = byte(length) - n = 2 - } else if length < 8384 { - length -= 192 - buf[1] = 192 + byte(length>>8) - buf[2] = byte(length) - n = 3 - } else { - buf[1] = 255 - buf[2] = byte(length >> 24) - buf[3] = byte(length >> 16) - buf[4] = byte(length >> 8) - buf[5] = byte(length) - n = 6 - } - - _, err = w.Write(buf[:n]) - return -} - -// serializeStreamHeader writes an OpenPGP packet header to w where the -// length of the packet is unknown. It returns a io.WriteCloser which can be -// used to write the contents of the packet. See RFC 4880, section 4.2. -func serializeStreamHeader(w io.WriteCloser, ptype packetType) (out io.WriteCloser, err error) { - var buf [1]byte - buf[0] = 0x80 | 0x40 | byte(ptype) - _, err = w.Write(buf[:]) - if err != nil { - return - } - out = &partialLengthWriter{w: w} - return -} - -// Packet represents an OpenPGP packet. Users are expected to try casting -// instances of this interface to specific packet types. -type Packet interface { - parse(io.Reader) error -} - -// consumeAll reads from the given Reader until error, returning the number of -// bytes read. -func consumeAll(r io.Reader) (n int64, err error) { - var m int - var buf [1024]byte - - for { - m, err = r.Read(buf[:]) - n += int64(m) - if err == io.EOF { - err = nil - return - } - if err != nil { - return - } - } -} - -// packetType represents the numeric ids of the different OpenPGP packet types. See -// http://www.iana.org/assignments/pgp-parameters/pgp-parameters.xhtml#pgp-parameters-2 -type packetType uint8 - -const ( - packetTypeEncryptedKey packetType = 1 - packetTypeSignature packetType = 2 - packetTypeSymmetricKeyEncrypted packetType = 3 - packetTypeOnePassSignature packetType = 4 - packetTypePrivateKey packetType = 5 - packetTypePublicKey packetType = 6 - packetTypePrivateSubkey packetType = 7 - packetTypeCompressed packetType = 8 - packetTypeSymmetricallyEncrypted packetType = 9 - packetTypeLiteralData packetType = 11 - packetTypeUserId packetType = 13 - packetTypePublicSubkey packetType = 14 - packetTypeUserAttribute packetType = 17 - packetTypeSymmetricallyEncryptedMDC packetType = 18 -) - -// peekVersion detects the version of a public key packet about to -// be read. A bufio.Reader at the original position of the io.Reader -// is returned. -func peekVersion(r io.Reader) (bufr *bufio.Reader, ver byte, err error) { - bufr = bufio.NewReader(r) - var verBuf []byte - if verBuf, err = bufr.Peek(1); err != nil { - return - } - ver = verBuf[0] - return -} - -// Read reads a single OpenPGP packet from the given io.Reader. If there is an -// error parsing a packet, the whole packet is consumed from the input. -func Read(r io.Reader) (p Packet, err error) { - tag, _, contents, err := readHeader(r) - if err != nil { - return - } - - switch tag { - case packetTypeEncryptedKey: - p = new(EncryptedKey) - case packetTypeSignature: - var version byte - // Detect signature version - if contents, version, err = peekVersion(contents); err != nil { - return - } - if version < 4 { - p = new(SignatureV3) - } else { - p = new(Signature) - } - case packetTypeSymmetricKeyEncrypted: - p = new(SymmetricKeyEncrypted) - case packetTypeOnePassSignature: - p = new(OnePassSignature) - case packetTypePrivateKey, packetTypePrivateSubkey: - pk := new(PrivateKey) - if tag == packetTypePrivateSubkey { - pk.IsSubkey = true - } - p = pk - case packetTypePublicKey, packetTypePublicSubkey: - var version byte - if contents, version, err = peekVersion(contents); err != nil { - return - } - isSubkey := tag == packetTypePublicSubkey - if version < 4 { - p = &PublicKeyV3{IsSubkey: isSubkey} - } else { - p = &PublicKey{IsSubkey: isSubkey} - } - case packetTypeCompressed: - p = new(Compressed) - case packetTypeSymmetricallyEncrypted: - p = new(SymmetricallyEncrypted) - case packetTypeLiteralData: - p = new(LiteralData) - case packetTypeUserId: - p = new(UserId) - case packetTypeUserAttribute: - p = new(UserAttribute) - case packetTypeSymmetricallyEncryptedMDC: - se := new(SymmetricallyEncrypted) - se.MDC = true - p = se - default: - err = errors.UnknownPacketTypeError(tag) - } - if p != nil { - err = p.parse(contents) - } - if err != nil { - consumeAll(contents) - } - return -} - -// SignatureType represents the different semantic meanings of an OpenPGP -// signature. See RFC 4880, section 5.2.1. -type SignatureType uint8 - -const ( - SigTypeBinary SignatureType = 0 - SigTypeText = 1 - SigTypeGenericCert = 0x10 - SigTypePersonaCert = 0x11 - SigTypeCasualCert = 0x12 - SigTypePositiveCert = 0x13 - SigTypeSubkeyBinding = 0x18 - SigTypePrimaryKeyBinding = 0x19 - SigTypeDirectSignature = 0x1F - SigTypeKeyRevocation = 0x20 - SigTypeSubkeyRevocation = 0x28 -) - -// PublicKeyAlgorithm represents the different public key system specified for -// OpenPGP. See -// http://www.iana.org/assignments/pgp-parameters/pgp-parameters.xhtml#pgp-parameters-12 -type PublicKeyAlgorithm uint8 - -const ( - PubKeyAlgoRSA PublicKeyAlgorithm = 1 - PubKeyAlgoElGamal PublicKeyAlgorithm = 16 - PubKeyAlgoDSA PublicKeyAlgorithm = 17 - // RFC 6637, Section 5. - PubKeyAlgoECDH PublicKeyAlgorithm = 18 - PubKeyAlgoECDSA PublicKeyAlgorithm = 19 - - // Deprecated in RFC 4880, Section 13.5. Use key flags instead. - PubKeyAlgoRSAEncryptOnly PublicKeyAlgorithm = 2 - PubKeyAlgoRSASignOnly PublicKeyAlgorithm = 3 -) - -// CanEncrypt returns true if it's possible to encrypt a message to a public -// key of the given type. -func (pka PublicKeyAlgorithm) CanEncrypt() bool { - switch pka { - case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoElGamal: - return true - } - return false -} - -// CanSign returns true if it's possible for a public key of the given type to -// sign a message. -func (pka PublicKeyAlgorithm) CanSign() bool { - switch pka { - case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly, PubKeyAlgoDSA, PubKeyAlgoECDSA: - return true - } - return false -} - -// CipherFunction represents the different block ciphers specified for OpenPGP. See -// http://www.iana.org/assignments/pgp-parameters/pgp-parameters.xhtml#pgp-parameters-13 -type CipherFunction uint8 - -const ( - Cipher3DES CipherFunction = 2 - CipherCAST5 CipherFunction = 3 - CipherAES128 CipherFunction = 7 - CipherAES192 CipherFunction = 8 - CipherAES256 CipherFunction = 9 -) - -// KeySize returns the key size, in bytes, of cipher. -func (cipher CipherFunction) KeySize() int { - switch cipher { - case Cipher3DES: - return 24 - case CipherCAST5: - return cast5.KeySize - case CipherAES128: - return 16 - case CipherAES192: - return 24 - case CipherAES256: - return 32 - } - return 0 -} - -// blockSize returns the block size, in bytes, of cipher. -func (cipher CipherFunction) blockSize() int { - switch cipher { - case Cipher3DES: - return des.BlockSize - case CipherCAST5: - return 8 - case CipherAES128, CipherAES192, CipherAES256: - return 16 - } - return 0 -} - -// new returns a fresh instance of the given cipher. -func (cipher CipherFunction) new(key []byte) (block cipher.Block) { - switch cipher { - case Cipher3DES: - block, _ = des.NewTripleDESCipher(key) - case CipherCAST5: - block, _ = cast5.NewCipher(key) - case CipherAES128, CipherAES192, CipherAES256: - block, _ = aes.NewCipher(key) - } - return -} - -// readMPI reads a big integer from r. The bit length returned is the bit -// length that was specified in r. This is preserved so that the integer can be -// reserialized exactly. -func readMPI(r io.Reader) (mpi []byte, bitLength uint16, err error) { - var buf [2]byte - _, err = readFull(r, buf[0:]) - if err != nil { - return - } - bitLength = uint16(buf[0])<<8 | uint16(buf[1]) - numBytes := (int(bitLength) + 7) / 8 - mpi = make([]byte, numBytes) - _, err = readFull(r, mpi) - // According to RFC 4880 3.2. we should check that the MPI has no leading - // zeroes (at least when not an encrypted MPI?), but this implementation - // does generate leading zeroes, so we keep accepting them. - return -} - -// writeMPI serializes a big integer to w. -func writeMPI(w io.Writer, bitLength uint16, mpiBytes []byte) (err error) { - // Note that we can produce leading zeroes, in violation of RFC 4880 3.2. - // Implementations seem to be tolerant of them, and stripping them would - // make it complex to guarantee matching re-serialization. - _, err = w.Write([]byte{byte(bitLength >> 8), byte(bitLength)}) - if err == nil { - _, err = w.Write(mpiBytes) - } - return -} - -// writeBig serializes a *big.Int to w. -func writeBig(w io.Writer, i *big.Int) error { - return writeMPI(w, uint16(i.BitLen()), i.Bytes()) -} - -// padToKeySize left-pads a MPI with zeroes to match the length of the -// specified RSA public. -func padToKeySize(pub *rsa.PublicKey, b []byte) []byte { - k := (pub.N.BitLen() + 7) / 8 - if len(b) >= k { - return b - } - bb := make([]byte, k) - copy(bb[len(bb)-len(b):], b) - return bb -} - -// CompressionAlgo Represents the different compression algorithms -// supported by OpenPGP (except for BZIP2, which is not currently -// supported). See Section 9.3 of RFC 4880. -type CompressionAlgo uint8 - -const ( - CompressionNone CompressionAlgo = 0 - CompressionZIP CompressionAlgo = 1 - CompressionZLIB CompressionAlgo = 2 -) diff --git a/src/vendor/golang.org/x/crypto/openpgp/packet/private_key.go b/src/vendor/golang.org/x/crypto/openpgp/packet/private_key.go deleted file mode 100644 index 192aac376..000000000 --- a/src/vendor/golang.org/x/crypto/openpgp/packet/private_key.go +++ /dev/null @@ -1,384 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package packet - -import ( - "bytes" - "crypto" - "crypto/cipher" - "crypto/dsa" - "crypto/ecdsa" - "crypto/rsa" - "crypto/sha1" - "io" - "math/big" - "strconv" - "time" - - "golang.org/x/crypto/openpgp/elgamal" - "golang.org/x/crypto/openpgp/errors" - "golang.org/x/crypto/openpgp/s2k" -) - -// PrivateKey represents a possibly encrypted private key. See RFC 4880, -// section 5.5.3. -type PrivateKey struct { - PublicKey - Encrypted bool // if true then the private key is unavailable until Decrypt has been called. - encryptedData []byte - cipher CipherFunction - s2k func(out, in []byte) - PrivateKey interface{} // An *{rsa|dsa|ecdsa}.PrivateKey or crypto.Signer/crypto.Decrypter (Decryptor RSA only). - sha1Checksum bool - iv []byte -} - -func NewRSAPrivateKey(creationTime time.Time, priv *rsa.PrivateKey) *PrivateKey { - pk := new(PrivateKey) - pk.PublicKey = *NewRSAPublicKey(creationTime, &priv.PublicKey) - pk.PrivateKey = priv - return pk -} - -func NewDSAPrivateKey(creationTime time.Time, priv *dsa.PrivateKey) *PrivateKey { - pk := new(PrivateKey) - pk.PublicKey = *NewDSAPublicKey(creationTime, &priv.PublicKey) - pk.PrivateKey = priv - return pk -} - -func NewElGamalPrivateKey(creationTime time.Time, priv *elgamal.PrivateKey) *PrivateKey { - pk := new(PrivateKey) - pk.PublicKey = *NewElGamalPublicKey(creationTime, &priv.PublicKey) - pk.PrivateKey = priv - return pk -} - -func NewECDSAPrivateKey(creationTime time.Time, priv *ecdsa.PrivateKey) *PrivateKey { - pk := new(PrivateKey) - pk.PublicKey = *NewECDSAPublicKey(creationTime, &priv.PublicKey) - pk.PrivateKey = priv - return pk -} - -// NewSignerPrivateKey creates a PrivateKey from a crypto.Signer that -// implements RSA or ECDSA. -func NewSignerPrivateKey(creationTime time.Time, signer crypto.Signer) *PrivateKey { - pk := new(PrivateKey) - // In general, the public Keys should be used as pointers. We still - // type-switch on the values, for backwards-compatibility. - switch pubkey := signer.Public().(type) { - case *rsa.PublicKey: - pk.PublicKey = *NewRSAPublicKey(creationTime, pubkey) - case rsa.PublicKey: - pk.PublicKey = *NewRSAPublicKey(creationTime, &pubkey) - case *ecdsa.PublicKey: - pk.PublicKey = *NewECDSAPublicKey(creationTime, pubkey) - case ecdsa.PublicKey: - pk.PublicKey = *NewECDSAPublicKey(creationTime, &pubkey) - default: - panic("openpgp: unknown crypto.Signer type in NewSignerPrivateKey") - } - pk.PrivateKey = signer - return pk -} - -func (pk *PrivateKey) parse(r io.Reader) (err error) { - err = (&pk.PublicKey).parse(r) - if err != nil { - return - } - var buf [1]byte - _, err = readFull(r, buf[:]) - if err != nil { - return - } - - s2kType := buf[0] - - switch s2kType { - case 0: - pk.s2k = nil - pk.Encrypted = false - case 254, 255: - _, err = readFull(r, buf[:]) - if err != nil { - return - } - pk.cipher = CipherFunction(buf[0]) - pk.Encrypted = true - pk.s2k, err = s2k.Parse(r) - if err != nil { - return - } - if s2kType == 254 { - pk.sha1Checksum = true - } - default: - return errors.UnsupportedError("deprecated s2k function in private key") - } - - if pk.Encrypted { - blockSize := pk.cipher.blockSize() - if blockSize == 0 { - return errors.UnsupportedError("unsupported cipher in private key: " + strconv.Itoa(int(pk.cipher))) - } - pk.iv = make([]byte, blockSize) - _, err = readFull(r, pk.iv) - if err != nil { - return - } - } - - pk.encryptedData, err = io.ReadAll(r) - if err != nil { - return - } - - if !pk.Encrypted { - return pk.parsePrivateKey(pk.encryptedData) - } - - return -} - -func mod64kHash(d []byte) uint16 { - var h uint16 - for _, b := range d { - h += uint16(b) - } - return h -} - -func (pk *PrivateKey) Serialize(w io.Writer) (err error) { - // TODO(agl): support encrypted private keys - buf := bytes.NewBuffer(nil) - err = pk.PublicKey.serializeWithoutHeaders(buf) - if err != nil { - return - } - buf.WriteByte(0 /* no encryption */) - - privateKeyBuf := bytes.NewBuffer(nil) - - switch priv := pk.PrivateKey.(type) { - case *rsa.PrivateKey: - err = serializeRSAPrivateKey(privateKeyBuf, priv) - case *dsa.PrivateKey: - err = serializeDSAPrivateKey(privateKeyBuf, priv) - case *elgamal.PrivateKey: - err = serializeElGamalPrivateKey(privateKeyBuf, priv) - case *ecdsa.PrivateKey: - err = serializeECDSAPrivateKey(privateKeyBuf, priv) - default: - err = errors.InvalidArgumentError("unknown private key type") - } - if err != nil { - return - } - - ptype := packetTypePrivateKey - contents := buf.Bytes() - privateKeyBytes := privateKeyBuf.Bytes() - if pk.IsSubkey { - ptype = packetTypePrivateSubkey - } - err = serializeHeader(w, ptype, len(contents)+len(privateKeyBytes)+2) - if err != nil { - return - } - _, err = w.Write(contents) - if err != nil { - return - } - _, err = w.Write(privateKeyBytes) - if err != nil { - return - } - - checksum := mod64kHash(privateKeyBytes) - var checksumBytes [2]byte - checksumBytes[0] = byte(checksum >> 8) - checksumBytes[1] = byte(checksum) - _, err = w.Write(checksumBytes[:]) - - return -} - -func serializeRSAPrivateKey(w io.Writer, priv *rsa.PrivateKey) error { - err := writeBig(w, priv.D) - if err != nil { - return err - } - err = writeBig(w, priv.Primes[1]) - if err != nil { - return err - } - err = writeBig(w, priv.Primes[0]) - if err != nil { - return err - } - return writeBig(w, priv.Precomputed.Qinv) -} - -func serializeDSAPrivateKey(w io.Writer, priv *dsa.PrivateKey) error { - return writeBig(w, priv.X) -} - -func serializeElGamalPrivateKey(w io.Writer, priv *elgamal.PrivateKey) error { - return writeBig(w, priv.X) -} - -func serializeECDSAPrivateKey(w io.Writer, priv *ecdsa.PrivateKey) error { - return writeBig(w, priv.D) -} - -// Decrypt decrypts an encrypted private key using a passphrase. -func (pk *PrivateKey) Decrypt(passphrase []byte) error { - if !pk.Encrypted { - return nil - } - - key := make([]byte, pk.cipher.KeySize()) - pk.s2k(key, passphrase) - block := pk.cipher.new(key) - cfb := cipher.NewCFBDecrypter(block, pk.iv) - - data := make([]byte, len(pk.encryptedData)) - cfb.XORKeyStream(data, pk.encryptedData) - - if pk.sha1Checksum { - if len(data) < sha1.Size { - return errors.StructuralError("truncated private key data") - } - h := sha1.New() - h.Write(data[:len(data)-sha1.Size]) - sum := h.Sum(nil) - if !bytes.Equal(sum, data[len(data)-sha1.Size:]) { - return errors.StructuralError("private key checksum failure") - } - data = data[:len(data)-sha1.Size] - } else { - if len(data) < 2 { - return errors.StructuralError("truncated private key data") - } - var sum uint16 - for i := 0; i < len(data)-2; i++ { - sum += uint16(data[i]) - } - if data[len(data)-2] != uint8(sum>>8) || - data[len(data)-1] != uint8(sum) { - return errors.StructuralError("private key checksum failure") - } - data = data[:len(data)-2] - } - - return pk.parsePrivateKey(data) -} - -func (pk *PrivateKey) parsePrivateKey(data []byte) (err error) { - switch pk.PublicKey.PubKeyAlgo { - case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly, PubKeyAlgoRSAEncryptOnly: - return pk.parseRSAPrivateKey(data) - case PubKeyAlgoDSA: - return pk.parseDSAPrivateKey(data) - case PubKeyAlgoElGamal: - return pk.parseElGamalPrivateKey(data) - case PubKeyAlgoECDSA: - return pk.parseECDSAPrivateKey(data) - } - panic("impossible") -} - -func (pk *PrivateKey) parseRSAPrivateKey(data []byte) (err error) { - rsaPub := pk.PublicKey.PublicKey.(*rsa.PublicKey) - rsaPriv := new(rsa.PrivateKey) - rsaPriv.PublicKey = *rsaPub - - buf := bytes.NewBuffer(data) - d, _, err := readMPI(buf) - if err != nil { - return - } - p, _, err := readMPI(buf) - if err != nil { - return - } - q, _, err := readMPI(buf) - if err != nil { - return - } - - rsaPriv.D = new(big.Int).SetBytes(d) - rsaPriv.Primes = make([]*big.Int, 2) - rsaPriv.Primes[0] = new(big.Int).SetBytes(p) - rsaPriv.Primes[1] = new(big.Int).SetBytes(q) - if err := rsaPriv.Validate(); err != nil { - return err - } - rsaPriv.Precompute() - pk.PrivateKey = rsaPriv - pk.Encrypted = false - pk.encryptedData = nil - - return nil -} - -func (pk *PrivateKey) parseDSAPrivateKey(data []byte) (err error) { - dsaPub := pk.PublicKey.PublicKey.(*dsa.PublicKey) - dsaPriv := new(dsa.PrivateKey) - dsaPriv.PublicKey = *dsaPub - - buf := bytes.NewBuffer(data) - x, _, err := readMPI(buf) - if err != nil { - return - } - - dsaPriv.X = new(big.Int).SetBytes(x) - pk.PrivateKey = dsaPriv - pk.Encrypted = false - pk.encryptedData = nil - - return nil -} - -func (pk *PrivateKey) parseElGamalPrivateKey(data []byte) (err error) { - pub := pk.PublicKey.PublicKey.(*elgamal.PublicKey) - priv := new(elgamal.PrivateKey) - priv.PublicKey = *pub - - buf := bytes.NewBuffer(data) - x, _, err := readMPI(buf) - if err != nil { - return - } - - priv.X = new(big.Int).SetBytes(x) - pk.PrivateKey = priv - pk.Encrypted = false - pk.encryptedData = nil - - return nil -} - -func (pk *PrivateKey) parseECDSAPrivateKey(data []byte) (err error) { - ecdsaPub := pk.PublicKey.PublicKey.(*ecdsa.PublicKey) - - buf := bytes.NewBuffer(data) - d, _, err := readMPI(buf) - if err != nil { - return - } - - pk.PrivateKey = &ecdsa.PrivateKey{ - PublicKey: *ecdsaPub, - D: new(big.Int).SetBytes(d), - } - pk.Encrypted = false - pk.encryptedData = nil - - return nil -} diff --git a/src/vendor/golang.org/x/crypto/openpgp/packet/public_key.go b/src/vendor/golang.org/x/crypto/openpgp/packet/public_key.go deleted file mode 100644 index fcd5f5251..000000000 --- a/src/vendor/golang.org/x/crypto/openpgp/packet/public_key.go +++ /dev/null @@ -1,753 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package packet - -import ( - "bytes" - "crypto" - "crypto/dsa" - "crypto/ecdsa" - "crypto/elliptic" - "crypto/rsa" - "crypto/sha1" - _ "crypto/sha256" - _ "crypto/sha512" - "encoding/binary" - "fmt" - "hash" - "io" - "math/big" - "strconv" - "time" - - "golang.org/x/crypto/openpgp/elgamal" - "golang.org/x/crypto/openpgp/errors" -) - -var ( - // NIST curve P-256 - oidCurveP256 []byte = []byte{0x2A, 0x86, 0x48, 0xCE, 0x3D, 0x03, 0x01, 0x07} - // NIST curve P-384 - oidCurveP384 []byte = []byte{0x2B, 0x81, 0x04, 0x00, 0x22} - // NIST curve P-521 - oidCurveP521 []byte = []byte{0x2B, 0x81, 0x04, 0x00, 0x23} -) - -const maxOIDLength = 8 - -// ecdsaKey stores the algorithm-specific fields for ECDSA keys. -// as defined in RFC 6637, Section 9. -type ecdsaKey struct { - // oid contains the OID byte sequence identifying the elliptic curve used - oid []byte - // p contains the elliptic curve point that represents the public key - p parsedMPI -} - -// parseOID reads the OID for the curve as defined in RFC 6637, Section 9. -func parseOID(r io.Reader) (oid []byte, err error) { - buf := make([]byte, maxOIDLength) - if _, err = readFull(r, buf[:1]); err != nil { - return - } - oidLen := buf[0] - if int(oidLen) > len(buf) { - err = errors.UnsupportedError("invalid oid length: " + strconv.Itoa(int(oidLen))) - return - } - oid = buf[:oidLen] - _, err = readFull(r, oid) - return -} - -func (f *ecdsaKey) parse(r io.Reader) (err error) { - if f.oid, err = parseOID(r); err != nil { - return err - } - f.p.bytes, f.p.bitLength, err = readMPI(r) - return -} - -func (f *ecdsaKey) serialize(w io.Writer) (err error) { - buf := make([]byte, maxOIDLength+1) - buf[0] = byte(len(f.oid)) - copy(buf[1:], f.oid) - if _, err = w.Write(buf[:len(f.oid)+1]); err != nil { - return - } - return writeMPIs(w, f.p) -} - -func (f *ecdsaKey) newECDSA() (*ecdsa.PublicKey, error) { - var c elliptic.Curve - if bytes.Equal(f.oid, oidCurveP256) { - c = elliptic.P256() - } else if bytes.Equal(f.oid, oidCurveP384) { - c = elliptic.P384() - } else if bytes.Equal(f.oid, oidCurveP521) { - c = elliptic.P521() - } else { - return nil, errors.UnsupportedError(fmt.Sprintf("unsupported oid: %x", f.oid)) - } - x, y := elliptic.Unmarshal(c, f.p.bytes) - if x == nil { - return nil, errors.UnsupportedError("failed to parse EC point") - } - return &ecdsa.PublicKey{Curve: c, X: x, Y: y}, nil -} - -func (f *ecdsaKey) byteLen() int { - return 1 + len(f.oid) + 2 + len(f.p.bytes) -} - -type kdfHashFunction byte -type kdfAlgorithm byte - -// ecdhKdf stores key derivation function parameters -// used for ECDH encryption. See RFC 6637, Section 9. -type ecdhKdf struct { - KdfHash kdfHashFunction - KdfAlgo kdfAlgorithm -} - -func (f *ecdhKdf) parse(r io.Reader) (err error) { - buf := make([]byte, 1) - if _, err = readFull(r, buf); err != nil { - return - } - kdfLen := int(buf[0]) - if kdfLen < 3 { - return errors.UnsupportedError("Unsupported ECDH KDF length: " + strconv.Itoa(kdfLen)) - } - buf = make([]byte, kdfLen) - if _, err = readFull(r, buf); err != nil { - return - } - reserved := int(buf[0]) - f.KdfHash = kdfHashFunction(buf[1]) - f.KdfAlgo = kdfAlgorithm(buf[2]) - if reserved != 0x01 { - return errors.UnsupportedError("Unsupported KDF reserved field: " + strconv.Itoa(reserved)) - } - return -} - -func (f *ecdhKdf) serialize(w io.Writer) (err error) { - buf := make([]byte, 4) - // See RFC 6637, Section 9, Algorithm-Specific Fields for ECDH keys. - buf[0] = byte(0x03) // Length of the following fields - buf[1] = byte(0x01) // Reserved for future extensions, must be 1 for now - buf[2] = byte(f.KdfHash) - buf[3] = byte(f.KdfAlgo) - _, err = w.Write(buf[:]) - return -} - -func (f *ecdhKdf) byteLen() int { - return 4 -} - -// PublicKey represents an OpenPGP public key. See RFC 4880, section 5.5.2. -type PublicKey struct { - CreationTime time.Time - PubKeyAlgo PublicKeyAlgorithm - PublicKey interface{} // *rsa.PublicKey, *dsa.PublicKey or *ecdsa.PublicKey - Fingerprint [20]byte - KeyId uint64 - IsSubkey bool - - n, e, p, q, g, y parsedMPI - - // RFC 6637 fields - ec *ecdsaKey - ecdh *ecdhKdf -} - -// signingKey provides a convenient abstraction over signature verification -// for v3 and v4 public keys. -type signingKey interface { - SerializeSignaturePrefix(io.Writer) - serializeWithoutHeaders(io.Writer) error -} - -func fromBig(n *big.Int) parsedMPI { - return parsedMPI{ - bytes: n.Bytes(), - bitLength: uint16(n.BitLen()), - } -} - -// NewRSAPublicKey returns a PublicKey that wraps the given rsa.PublicKey. -func NewRSAPublicKey(creationTime time.Time, pub *rsa.PublicKey) *PublicKey { - pk := &PublicKey{ - CreationTime: creationTime, - PubKeyAlgo: PubKeyAlgoRSA, - PublicKey: pub, - n: fromBig(pub.N), - e: fromBig(big.NewInt(int64(pub.E))), - } - - pk.setFingerPrintAndKeyId() - return pk -} - -// NewDSAPublicKey returns a PublicKey that wraps the given dsa.PublicKey. -func NewDSAPublicKey(creationTime time.Time, pub *dsa.PublicKey) *PublicKey { - pk := &PublicKey{ - CreationTime: creationTime, - PubKeyAlgo: PubKeyAlgoDSA, - PublicKey: pub, - p: fromBig(pub.P), - q: fromBig(pub.Q), - g: fromBig(pub.G), - y: fromBig(pub.Y), - } - - pk.setFingerPrintAndKeyId() - return pk -} - -// NewElGamalPublicKey returns a PublicKey that wraps the given elgamal.PublicKey. -func NewElGamalPublicKey(creationTime time.Time, pub *elgamal.PublicKey) *PublicKey { - pk := &PublicKey{ - CreationTime: creationTime, - PubKeyAlgo: PubKeyAlgoElGamal, - PublicKey: pub, - p: fromBig(pub.P), - g: fromBig(pub.G), - y: fromBig(pub.Y), - } - - pk.setFingerPrintAndKeyId() - return pk -} - -func NewECDSAPublicKey(creationTime time.Time, pub *ecdsa.PublicKey) *PublicKey { - pk := &PublicKey{ - CreationTime: creationTime, - PubKeyAlgo: PubKeyAlgoECDSA, - PublicKey: pub, - ec: new(ecdsaKey), - } - - switch pub.Curve { - case elliptic.P256(): - pk.ec.oid = oidCurveP256 - case elliptic.P384(): - pk.ec.oid = oidCurveP384 - case elliptic.P521(): - pk.ec.oid = oidCurveP521 - default: - panic("unknown elliptic curve") - } - - pk.ec.p.bytes = elliptic.Marshal(pub.Curve, pub.X, pub.Y) - - // The bit length is 3 (for the 0x04 specifying an uncompressed key) - // plus two field elements (for x and y), which are rounded up to the - // nearest byte. See https://tools.ietf.org/html/rfc6637#section-6 - fieldBytes := (pub.Curve.Params().BitSize + 7) & ^7 - pk.ec.p.bitLength = uint16(3 + fieldBytes + fieldBytes) - - pk.setFingerPrintAndKeyId() - return pk -} - -func (pk *PublicKey) parse(r io.Reader) (err error) { - // RFC 4880, section 5.5.2 - var buf [6]byte - _, err = readFull(r, buf[:]) - if err != nil { - return - } - if buf[0] != 4 { - return errors.UnsupportedError("public key version") - } - pk.CreationTime = time.Unix(int64(uint32(buf[1])<<24|uint32(buf[2])<<16|uint32(buf[3])<<8|uint32(buf[4])), 0) - pk.PubKeyAlgo = PublicKeyAlgorithm(buf[5]) - switch pk.PubKeyAlgo { - case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoRSASignOnly: - err = pk.parseRSA(r) - case PubKeyAlgoDSA: - err = pk.parseDSA(r) - case PubKeyAlgoElGamal: - err = pk.parseElGamal(r) - case PubKeyAlgoECDSA: - pk.ec = new(ecdsaKey) - if err = pk.ec.parse(r); err != nil { - return err - } - pk.PublicKey, err = pk.ec.newECDSA() - case PubKeyAlgoECDH: - pk.ec = new(ecdsaKey) - if err = pk.ec.parse(r); err != nil { - return - } - pk.ecdh = new(ecdhKdf) - if err = pk.ecdh.parse(r); err != nil { - return - } - // The ECDH key is stored in an ecdsa.PublicKey for convenience. - pk.PublicKey, err = pk.ec.newECDSA() - default: - err = errors.UnsupportedError("public key type: " + strconv.Itoa(int(pk.PubKeyAlgo))) - } - if err != nil { - return - } - - pk.setFingerPrintAndKeyId() - return -} - -func (pk *PublicKey) setFingerPrintAndKeyId() { - // RFC 4880, section 12.2 - fingerPrint := sha1.New() - pk.SerializeSignaturePrefix(fingerPrint) - pk.serializeWithoutHeaders(fingerPrint) - copy(pk.Fingerprint[:], fingerPrint.Sum(nil)) - pk.KeyId = binary.BigEndian.Uint64(pk.Fingerprint[12:20]) -} - -// parseRSA parses RSA public key material from the given Reader. See RFC 4880, -// section 5.5.2. -func (pk *PublicKey) parseRSA(r io.Reader) (err error) { - pk.n.bytes, pk.n.bitLength, err = readMPI(r) - if err != nil { - return - } - pk.e.bytes, pk.e.bitLength, err = readMPI(r) - if err != nil { - return - } - - if len(pk.e.bytes) > 3 { - err = errors.UnsupportedError("large public exponent") - return - } - rsa := &rsa.PublicKey{ - N: new(big.Int).SetBytes(pk.n.bytes), - E: 0, - } - for i := 0; i < len(pk.e.bytes); i++ { - rsa.E <<= 8 - rsa.E |= int(pk.e.bytes[i]) - } - pk.PublicKey = rsa - return -} - -// parseDSA parses DSA public key material from the given Reader. See RFC 4880, -// section 5.5.2. -func (pk *PublicKey) parseDSA(r io.Reader) (err error) { - pk.p.bytes, pk.p.bitLength, err = readMPI(r) - if err != nil { - return - } - pk.q.bytes, pk.q.bitLength, err = readMPI(r) - if err != nil { - return - } - pk.g.bytes, pk.g.bitLength, err = readMPI(r) - if err != nil { - return - } - pk.y.bytes, pk.y.bitLength, err = readMPI(r) - if err != nil { - return - } - - dsa := new(dsa.PublicKey) - dsa.P = new(big.Int).SetBytes(pk.p.bytes) - dsa.Q = new(big.Int).SetBytes(pk.q.bytes) - dsa.G = new(big.Int).SetBytes(pk.g.bytes) - dsa.Y = new(big.Int).SetBytes(pk.y.bytes) - pk.PublicKey = dsa - return -} - -// parseElGamal parses ElGamal public key material from the given Reader. See -// RFC 4880, section 5.5.2. -func (pk *PublicKey) parseElGamal(r io.Reader) (err error) { - pk.p.bytes, pk.p.bitLength, err = readMPI(r) - if err != nil { - return - } - pk.g.bytes, pk.g.bitLength, err = readMPI(r) - if err != nil { - return - } - pk.y.bytes, pk.y.bitLength, err = readMPI(r) - if err != nil { - return - } - - elgamal := new(elgamal.PublicKey) - elgamal.P = new(big.Int).SetBytes(pk.p.bytes) - elgamal.G = new(big.Int).SetBytes(pk.g.bytes) - elgamal.Y = new(big.Int).SetBytes(pk.y.bytes) - pk.PublicKey = elgamal - return -} - -// SerializeSignaturePrefix writes the prefix for this public key to the given Writer. -// The prefix is used when calculating a signature over this public key. See -// RFC 4880, section 5.2.4. -func (pk *PublicKey) SerializeSignaturePrefix(h io.Writer) { - var pLength uint16 - switch pk.PubKeyAlgo { - case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoRSASignOnly: - pLength += 2 + uint16(len(pk.n.bytes)) - pLength += 2 + uint16(len(pk.e.bytes)) - case PubKeyAlgoDSA: - pLength += 2 + uint16(len(pk.p.bytes)) - pLength += 2 + uint16(len(pk.q.bytes)) - pLength += 2 + uint16(len(pk.g.bytes)) - pLength += 2 + uint16(len(pk.y.bytes)) - case PubKeyAlgoElGamal: - pLength += 2 + uint16(len(pk.p.bytes)) - pLength += 2 + uint16(len(pk.g.bytes)) - pLength += 2 + uint16(len(pk.y.bytes)) - case PubKeyAlgoECDSA: - pLength += uint16(pk.ec.byteLen()) - case PubKeyAlgoECDH: - pLength += uint16(pk.ec.byteLen()) - pLength += uint16(pk.ecdh.byteLen()) - default: - panic("unknown public key algorithm") - } - pLength += 6 - h.Write([]byte{0x99, byte(pLength >> 8), byte(pLength)}) - return -} - -func (pk *PublicKey) Serialize(w io.Writer) (err error) { - length := 6 // 6 byte header - - switch pk.PubKeyAlgo { - case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoRSASignOnly: - length += 2 + len(pk.n.bytes) - length += 2 + len(pk.e.bytes) - case PubKeyAlgoDSA: - length += 2 + len(pk.p.bytes) - length += 2 + len(pk.q.bytes) - length += 2 + len(pk.g.bytes) - length += 2 + len(pk.y.bytes) - case PubKeyAlgoElGamal: - length += 2 + len(pk.p.bytes) - length += 2 + len(pk.g.bytes) - length += 2 + len(pk.y.bytes) - case PubKeyAlgoECDSA: - length += pk.ec.byteLen() - case PubKeyAlgoECDH: - length += pk.ec.byteLen() - length += pk.ecdh.byteLen() - default: - panic("unknown public key algorithm") - } - - packetType := packetTypePublicKey - if pk.IsSubkey { - packetType = packetTypePublicSubkey - } - err = serializeHeader(w, packetType, length) - if err != nil { - return - } - return pk.serializeWithoutHeaders(w) -} - -// serializeWithoutHeaders marshals the PublicKey to w in the form of an -// OpenPGP public key packet, not including the packet header. -func (pk *PublicKey) serializeWithoutHeaders(w io.Writer) (err error) { - var buf [6]byte - buf[0] = 4 - t := uint32(pk.CreationTime.Unix()) - buf[1] = byte(t >> 24) - buf[2] = byte(t >> 16) - buf[3] = byte(t >> 8) - buf[4] = byte(t) - buf[5] = byte(pk.PubKeyAlgo) - - _, err = w.Write(buf[:]) - if err != nil { - return - } - - switch pk.PubKeyAlgo { - case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoRSASignOnly: - return writeMPIs(w, pk.n, pk.e) - case PubKeyAlgoDSA: - return writeMPIs(w, pk.p, pk.q, pk.g, pk.y) - case PubKeyAlgoElGamal: - return writeMPIs(w, pk.p, pk.g, pk.y) - case PubKeyAlgoECDSA: - return pk.ec.serialize(w) - case PubKeyAlgoECDH: - if err = pk.ec.serialize(w); err != nil { - return - } - return pk.ecdh.serialize(w) - } - return errors.InvalidArgumentError("bad public-key algorithm") -} - -// CanSign returns true iff this public key can generate signatures -func (pk *PublicKey) CanSign() bool { - return pk.PubKeyAlgo != PubKeyAlgoRSAEncryptOnly && pk.PubKeyAlgo != PubKeyAlgoElGamal -} - -// VerifySignature returns nil iff sig is a valid signature, made by this -// public key, of the data hashed into signed. signed is mutated by this call. -func (pk *PublicKey) VerifySignature(signed hash.Hash, sig *Signature) (err error) { - if !pk.CanSign() { - return errors.InvalidArgumentError("public key cannot generate signatures") - } - - signed.Write(sig.HashSuffix) - hashBytes := signed.Sum(nil) - - if hashBytes[0] != sig.HashTag[0] || hashBytes[1] != sig.HashTag[1] { - return errors.SignatureError("hash tag doesn't match") - } - - if pk.PubKeyAlgo != sig.PubKeyAlgo { - return errors.InvalidArgumentError("public key and signature use different algorithms") - } - - switch pk.PubKeyAlgo { - case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly: - rsaPublicKey, _ := pk.PublicKey.(*rsa.PublicKey) - err = rsa.VerifyPKCS1v15(rsaPublicKey, sig.Hash, hashBytes, padToKeySize(rsaPublicKey, sig.RSASignature.bytes)) - if err != nil { - return errors.SignatureError("RSA verification failure") - } - return nil - case PubKeyAlgoDSA: - dsaPublicKey, _ := pk.PublicKey.(*dsa.PublicKey) - // Need to truncate hashBytes to match FIPS 186-3 section 4.6. - subgroupSize := (dsaPublicKey.Q.BitLen() + 7) / 8 - if len(hashBytes) > subgroupSize { - hashBytes = hashBytes[:subgroupSize] - } - if !dsa.Verify(dsaPublicKey, hashBytes, new(big.Int).SetBytes(sig.DSASigR.bytes), new(big.Int).SetBytes(sig.DSASigS.bytes)) { - return errors.SignatureError("DSA verification failure") - } - return nil - case PubKeyAlgoECDSA: - ecdsaPublicKey := pk.PublicKey.(*ecdsa.PublicKey) - if !ecdsa.Verify(ecdsaPublicKey, hashBytes, new(big.Int).SetBytes(sig.ECDSASigR.bytes), new(big.Int).SetBytes(sig.ECDSASigS.bytes)) { - return errors.SignatureError("ECDSA verification failure") - } - return nil - default: - return errors.SignatureError("Unsupported public key algorithm used in signature") - } -} - -// VerifySignatureV3 returns nil iff sig is a valid signature, made by this -// public key, of the data hashed into signed. signed is mutated by this call. -func (pk *PublicKey) VerifySignatureV3(signed hash.Hash, sig *SignatureV3) (err error) { - if !pk.CanSign() { - return errors.InvalidArgumentError("public key cannot generate signatures") - } - - suffix := make([]byte, 5) - suffix[0] = byte(sig.SigType) - binary.BigEndian.PutUint32(suffix[1:], uint32(sig.CreationTime.Unix())) - signed.Write(suffix) - hashBytes := signed.Sum(nil) - - if hashBytes[0] != sig.HashTag[0] || hashBytes[1] != sig.HashTag[1] { - return errors.SignatureError("hash tag doesn't match") - } - - if pk.PubKeyAlgo != sig.PubKeyAlgo { - return errors.InvalidArgumentError("public key and signature use different algorithms") - } - - switch pk.PubKeyAlgo { - case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly: - rsaPublicKey := pk.PublicKey.(*rsa.PublicKey) - if err = rsa.VerifyPKCS1v15(rsaPublicKey, sig.Hash, hashBytes, padToKeySize(rsaPublicKey, sig.RSASignature.bytes)); err != nil { - return errors.SignatureError("RSA verification failure") - } - return - case PubKeyAlgoDSA: - dsaPublicKey := pk.PublicKey.(*dsa.PublicKey) - // Need to truncate hashBytes to match FIPS 186-3 section 4.6. - subgroupSize := (dsaPublicKey.Q.BitLen() + 7) / 8 - if len(hashBytes) > subgroupSize { - hashBytes = hashBytes[:subgroupSize] - } - if !dsa.Verify(dsaPublicKey, hashBytes, new(big.Int).SetBytes(sig.DSASigR.bytes), new(big.Int).SetBytes(sig.DSASigS.bytes)) { - return errors.SignatureError("DSA verification failure") - } - return nil - default: - panic("shouldn't happen") - } -} - -// keySignatureHash returns a Hash of the message that needs to be signed for -// pk to assert a subkey relationship to signed. -func keySignatureHash(pk, signed signingKey, hashFunc crypto.Hash) (h hash.Hash, err error) { - if !hashFunc.Available() { - return nil, errors.UnsupportedError("hash function") - } - h = hashFunc.New() - - // RFC 4880, section 5.2.4 - pk.SerializeSignaturePrefix(h) - pk.serializeWithoutHeaders(h) - signed.SerializeSignaturePrefix(h) - signed.serializeWithoutHeaders(h) - return -} - -// VerifyKeySignature returns nil iff sig is a valid signature, made by this -// public key, of signed. -func (pk *PublicKey) VerifyKeySignature(signed *PublicKey, sig *Signature) error { - h, err := keySignatureHash(pk, signed, sig.Hash) - if err != nil { - return err - } - if err = pk.VerifySignature(h, sig); err != nil { - return err - } - - if sig.FlagSign { - // Signing subkeys must be cross-signed. See - // https://www.gnupg.org/faq/subkey-cross-certify.html. - if sig.EmbeddedSignature == nil { - return errors.StructuralError("signing subkey is missing cross-signature") - } - // Verify the cross-signature. This is calculated over the same - // data as the main signature, so we cannot just recursively - // call signed.VerifyKeySignature(...) - if h, err = keySignatureHash(pk, signed, sig.EmbeddedSignature.Hash); err != nil { - return errors.StructuralError("error while hashing for cross-signature: " + err.Error()) - } - if err := signed.VerifySignature(h, sig.EmbeddedSignature); err != nil { - return errors.StructuralError("error while verifying cross-signature: " + err.Error()) - } - } - - return nil -} - -func keyRevocationHash(pk signingKey, hashFunc crypto.Hash) (h hash.Hash, err error) { - if !hashFunc.Available() { - return nil, errors.UnsupportedError("hash function") - } - h = hashFunc.New() - - // RFC 4880, section 5.2.4 - pk.SerializeSignaturePrefix(h) - pk.serializeWithoutHeaders(h) - - return -} - -// VerifyRevocationSignature returns nil iff sig is a valid signature, made by this -// public key. -func (pk *PublicKey) VerifyRevocationSignature(sig *Signature) (err error) { - h, err := keyRevocationHash(pk, sig.Hash) - if err != nil { - return err - } - return pk.VerifySignature(h, sig) -} - -// userIdSignatureHash returns a Hash of the message that needs to be signed -// to assert that pk is a valid key for id. -func userIdSignatureHash(id string, pk *PublicKey, hashFunc crypto.Hash) (h hash.Hash, err error) { - if !hashFunc.Available() { - return nil, errors.UnsupportedError("hash function") - } - h = hashFunc.New() - - // RFC 4880, section 5.2.4 - pk.SerializeSignaturePrefix(h) - pk.serializeWithoutHeaders(h) - - var buf [5]byte - buf[0] = 0xb4 - buf[1] = byte(len(id) >> 24) - buf[2] = byte(len(id) >> 16) - buf[3] = byte(len(id) >> 8) - buf[4] = byte(len(id)) - h.Write(buf[:]) - h.Write([]byte(id)) - - return -} - -// VerifyUserIdSignature returns nil iff sig is a valid signature, made by this -// public key, that id is the identity of pub. -func (pk *PublicKey) VerifyUserIdSignature(id string, pub *PublicKey, sig *Signature) (err error) { - h, err := userIdSignatureHash(id, pub, sig.Hash) - if err != nil { - return err - } - return pk.VerifySignature(h, sig) -} - -// VerifyUserIdSignatureV3 returns nil iff sig is a valid signature, made by this -// public key, that id is the identity of pub. -func (pk *PublicKey) VerifyUserIdSignatureV3(id string, pub *PublicKey, sig *SignatureV3) (err error) { - h, err := userIdSignatureV3Hash(id, pub, sig.Hash) - if err != nil { - return err - } - return pk.VerifySignatureV3(h, sig) -} - -// KeyIdString returns the public key's fingerprint in capital hex -// (e.g. "6C7EE1B8621CC013"). -func (pk *PublicKey) KeyIdString() string { - return fmt.Sprintf("%X", pk.Fingerprint[12:20]) -} - -// KeyIdShortString returns the short form of public key's fingerprint -// in capital hex, as shown by gpg --list-keys (e.g. "621CC013"). -func (pk *PublicKey) KeyIdShortString() string { - return fmt.Sprintf("%X", pk.Fingerprint[16:20]) -} - -// A parsedMPI is used to store the contents of a big integer, along with the -// bit length that was specified in the original input. This allows the MPI to -// be reserialized exactly. -type parsedMPI struct { - bytes []byte - bitLength uint16 -} - -// writeMPIs is a utility function for serializing several big integers to the -// given Writer. -func writeMPIs(w io.Writer, mpis ...parsedMPI) (err error) { - for _, mpi := range mpis { - err = writeMPI(w, mpi.bitLength, mpi.bytes) - if err != nil { - return - } - } - return -} - -// BitLength returns the bit length for the given public key. -func (pk *PublicKey) BitLength() (bitLength uint16, err error) { - switch pk.PubKeyAlgo { - case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoRSASignOnly: - bitLength = pk.n.bitLength - case PubKeyAlgoDSA: - bitLength = pk.p.bitLength - case PubKeyAlgoElGamal: - bitLength = pk.p.bitLength - default: - err = errors.InvalidArgumentError("bad public-key algorithm") - } - return -} diff --git a/src/vendor/golang.org/x/crypto/openpgp/packet/public_key_v3.go b/src/vendor/golang.org/x/crypto/openpgp/packet/public_key_v3.go deleted file mode 100644 index 5daf7b6cf..000000000 --- a/src/vendor/golang.org/x/crypto/openpgp/packet/public_key_v3.go +++ /dev/null @@ -1,279 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package packet - -import ( - "crypto" - "crypto/md5" - "crypto/rsa" - "encoding/binary" - "fmt" - "hash" - "io" - "math/big" - "strconv" - "time" - - "golang.org/x/crypto/openpgp/errors" -) - -// PublicKeyV3 represents older, version 3 public keys. These keys are less secure and -// should not be used for signing or encrypting. They are supported here only for -// parsing version 3 key material and validating signatures. -// See RFC 4880, section 5.5.2. -type PublicKeyV3 struct { - CreationTime time.Time - DaysToExpire uint16 - PubKeyAlgo PublicKeyAlgorithm - PublicKey *rsa.PublicKey - Fingerprint [16]byte - KeyId uint64 - IsSubkey bool - - n, e parsedMPI -} - -// newRSAPublicKeyV3 returns a PublicKey that wraps the given rsa.PublicKey. -// Included here for testing purposes only. RFC 4880, section 5.5.2: -// "an implementation MUST NOT generate a V3 key, but MAY accept it." -func newRSAPublicKeyV3(creationTime time.Time, pub *rsa.PublicKey) *PublicKeyV3 { - pk := &PublicKeyV3{ - CreationTime: creationTime, - PublicKey: pub, - n: fromBig(pub.N), - e: fromBig(big.NewInt(int64(pub.E))), - } - - pk.setFingerPrintAndKeyId() - return pk -} - -func (pk *PublicKeyV3) parse(r io.Reader) (err error) { - // RFC 4880, section 5.5.2 - var buf [8]byte - if _, err = readFull(r, buf[:]); err != nil { - return - } - if buf[0] < 2 || buf[0] > 3 { - return errors.UnsupportedError("public key version") - } - pk.CreationTime = time.Unix(int64(uint32(buf[1])<<24|uint32(buf[2])<<16|uint32(buf[3])<<8|uint32(buf[4])), 0) - pk.DaysToExpire = binary.BigEndian.Uint16(buf[5:7]) - pk.PubKeyAlgo = PublicKeyAlgorithm(buf[7]) - switch pk.PubKeyAlgo { - case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoRSASignOnly: - err = pk.parseRSA(r) - default: - err = errors.UnsupportedError("public key type: " + strconv.Itoa(int(pk.PubKeyAlgo))) - } - if err != nil { - return - } - - pk.setFingerPrintAndKeyId() - return -} - -func (pk *PublicKeyV3) setFingerPrintAndKeyId() { - // RFC 4880, section 12.2 - fingerPrint := md5.New() - fingerPrint.Write(pk.n.bytes) - fingerPrint.Write(pk.e.bytes) - fingerPrint.Sum(pk.Fingerprint[:0]) - pk.KeyId = binary.BigEndian.Uint64(pk.n.bytes[len(pk.n.bytes)-8:]) -} - -// parseRSA parses RSA public key material from the given Reader. See RFC 4880, -// section 5.5.2. -func (pk *PublicKeyV3) parseRSA(r io.Reader) (err error) { - if pk.n.bytes, pk.n.bitLength, err = readMPI(r); err != nil { - return - } - if pk.e.bytes, pk.e.bitLength, err = readMPI(r); err != nil { - return - } - - // RFC 4880 Section 12.2 requires the low 8 bytes of the - // modulus to form the key id. - if len(pk.n.bytes) < 8 { - return errors.StructuralError("v3 public key modulus is too short") - } - if len(pk.e.bytes) > 3 { - err = errors.UnsupportedError("large public exponent") - return - } - rsa := &rsa.PublicKey{N: new(big.Int).SetBytes(pk.n.bytes)} - for i := 0; i < len(pk.e.bytes); i++ { - rsa.E <<= 8 - rsa.E |= int(pk.e.bytes[i]) - } - pk.PublicKey = rsa - return -} - -// SerializeSignaturePrefix writes the prefix for this public key to the given Writer. -// The prefix is used when calculating a signature over this public key. See -// RFC 4880, section 5.2.4. -func (pk *PublicKeyV3) SerializeSignaturePrefix(w io.Writer) { - var pLength uint16 - switch pk.PubKeyAlgo { - case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoRSASignOnly: - pLength += 2 + uint16(len(pk.n.bytes)) - pLength += 2 + uint16(len(pk.e.bytes)) - default: - panic("unknown public key algorithm") - } - pLength += 6 - w.Write([]byte{0x99, byte(pLength >> 8), byte(pLength)}) - return -} - -func (pk *PublicKeyV3) Serialize(w io.Writer) (err error) { - length := 8 // 8 byte header - - switch pk.PubKeyAlgo { - case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoRSASignOnly: - length += 2 + len(pk.n.bytes) - length += 2 + len(pk.e.bytes) - default: - panic("unknown public key algorithm") - } - - packetType := packetTypePublicKey - if pk.IsSubkey { - packetType = packetTypePublicSubkey - } - if err = serializeHeader(w, packetType, length); err != nil { - return - } - return pk.serializeWithoutHeaders(w) -} - -// serializeWithoutHeaders marshals the PublicKey to w in the form of an -// OpenPGP public key packet, not including the packet header. -func (pk *PublicKeyV3) serializeWithoutHeaders(w io.Writer) (err error) { - var buf [8]byte - // Version 3 - buf[0] = 3 - // Creation time - t := uint32(pk.CreationTime.Unix()) - buf[1] = byte(t >> 24) - buf[2] = byte(t >> 16) - buf[3] = byte(t >> 8) - buf[4] = byte(t) - // Days to expire - buf[5] = byte(pk.DaysToExpire >> 8) - buf[6] = byte(pk.DaysToExpire) - // Public key algorithm - buf[7] = byte(pk.PubKeyAlgo) - - if _, err = w.Write(buf[:]); err != nil { - return - } - - switch pk.PubKeyAlgo { - case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoRSASignOnly: - return writeMPIs(w, pk.n, pk.e) - } - return errors.InvalidArgumentError("bad public-key algorithm") -} - -// CanSign returns true iff this public key can generate signatures -func (pk *PublicKeyV3) CanSign() bool { - return pk.PubKeyAlgo != PubKeyAlgoRSAEncryptOnly -} - -// VerifySignatureV3 returns nil iff sig is a valid signature, made by this -// public key, of the data hashed into signed. signed is mutated by this call. -func (pk *PublicKeyV3) VerifySignatureV3(signed hash.Hash, sig *SignatureV3) (err error) { - if !pk.CanSign() { - return errors.InvalidArgumentError("public key cannot generate signatures") - } - - suffix := make([]byte, 5) - suffix[0] = byte(sig.SigType) - binary.BigEndian.PutUint32(suffix[1:], uint32(sig.CreationTime.Unix())) - signed.Write(suffix) - hashBytes := signed.Sum(nil) - - if hashBytes[0] != sig.HashTag[0] || hashBytes[1] != sig.HashTag[1] { - return errors.SignatureError("hash tag doesn't match") - } - - if pk.PubKeyAlgo != sig.PubKeyAlgo { - return errors.InvalidArgumentError("public key and signature use different algorithms") - } - - switch pk.PubKeyAlgo { - case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly: - if err = rsa.VerifyPKCS1v15(pk.PublicKey, sig.Hash, hashBytes, sig.RSASignature.bytes); err != nil { - return errors.SignatureError("RSA verification failure") - } - return - default: - // V3 public keys only support RSA. - panic("shouldn't happen") - } -} - -// VerifyUserIdSignatureV3 returns nil iff sig is a valid signature, made by this -// public key, that id is the identity of pub. -func (pk *PublicKeyV3) VerifyUserIdSignatureV3(id string, pub *PublicKeyV3, sig *SignatureV3) (err error) { - h, err := userIdSignatureV3Hash(id, pk, sig.Hash) - if err != nil { - return err - } - return pk.VerifySignatureV3(h, sig) -} - -// VerifyKeySignatureV3 returns nil iff sig is a valid signature, made by this -// public key, of signed. -func (pk *PublicKeyV3) VerifyKeySignatureV3(signed *PublicKeyV3, sig *SignatureV3) (err error) { - h, err := keySignatureHash(pk, signed, sig.Hash) - if err != nil { - return err - } - return pk.VerifySignatureV3(h, sig) -} - -// userIdSignatureV3Hash returns a Hash of the message that needs to be signed -// to assert that pk is a valid key for id. -func userIdSignatureV3Hash(id string, pk signingKey, hfn crypto.Hash) (h hash.Hash, err error) { - if !hfn.Available() { - return nil, errors.UnsupportedError("hash function") - } - h = hfn.New() - - // RFC 4880, section 5.2.4 - pk.SerializeSignaturePrefix(h) - pk.serializeWithoutHeaders(h) - - h.Write([]byte(id)) - - return -} - -// KeyIdString returns the public key's fingerprint in capital hex -// (e.g. "6C7EE1B8621CC013"). -func (pk *PublicKeyV3) KeyIdString() string { - return fmt.Sprintf("%X", pk.KeyId) -} - -// KeyIdShortString returns the short form of public key's fingerprint -// in capital hex, as shown by gpg --list-keys (e.g. "621CC013"). -func (pk *PublicKeyV3) KeyIdShortString() string { - return fmt.Sprintf("%X", pk.KeyId&0xFFFFFFFF) -} - -// BitLength returns the bit length for the given public key. -func (pk *PublicKeyV3) BitLength() (bitLength uint16, err error) { - switch pk.PubKeyAlgo { - case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoRSASignOnly: - bitLength = pk.n.bitLength - default: - err = errors.InvalidArgumentError("bad public-key algorithm") - } - return -} diff --git a/src/vendor/golang.org/x/crypto/openpgp/packet/reader.go b/src/vendor/golang.org/x/crypto/openpgp/packet/reader.go deleted file mode 100644 index 34bc7c613..000000000 --- a/src/vendor/golang.org/x/crypto/openpgp/packet/reader.go +++ /dev/null @@ -1,76 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package packet - -import ( - "golang.org/x/crypto/openpgp/errors" - "io" -) - -// Reader reads packets from an io.Reader and allows packets to be 'unread' so -// that they result from the next call to Next. -type Reader struct { - q []Packet - readers []io.Reader -} - -// New io.Readers are pushed when a compressed or encrypted packet is processed -// and recursively treated as a new source of packets. However, a carefully -// crafted packet can trigger an infinite recursive sequence of packets. See -// http://mumble.net/~campbell/misc/pgp-quine -// https://web.nvd.nist.gov/view/vuln/detail?vulnId=CVE-2013-4402 -// This constant limits the number of recursive packets that may be pushed. -const maxReaders = 32 - -// Next returns the most recently unread Packet, or reads another packet from -// the top-most io.Reader. Unknown packet types are skipped. -func (r *Reader) Next() (p Packet, err error) { - if len(r.q) > 0 { - p = r.q[len(r.q)-1] - r.q = r.q[:len(r.q)-1] - return - } - - for len(r.readers) > 0 { - p, err = Read(r.readers[len(r.readers)-1]) - if err == nil { - return - } - if err == io.EOF { - r.readers = r.readers[:len(r.readers)-1] - continue - } - if _, ok := err.(errors.UnknownPacketTypeError); !ok { - return nil, err - } - } - - return nil, io.EOF -} - -// Push causes the Reader to start reading from a new io.Reader. When an EOF -// error is seen from the new io.Reader, it is popped and the Reader continues -// to read from the next most recent io.Reader. Push returns a StructuralError -// if pushing the reader would exceed the maximum recursion level, otherwise it -// returns nil. -func (r *Reader) Push(reader io.Reader) (err error) { - if len(r.readers) >= maxReaders { - return errors.StructuralError("too many layers of packets") - } - r.readers = append(r.readers, reader) - return nil -} - -// Unread causes the given Packet to be returned from the next call to Next. -func (r *Reader) Unread(p Packet) { - r.q = append(r.q, p) -} - -func NewReader(r io.Reader) *Reader { - return &Reader{ - q: nil, - readers: []io.Reader{r}, - } -} diff --git a/src/vendor/golang.org/x/crypto/openpgp/packet/signature.go b/src/vendor/golang.org/x/crypto/openpgp/packet/signature.go deleted file mode 100644 index b2a24a532..000000000 --- a/src/vendor/golang.org/x/crypto/openpgp/packet/signature.go +++ /dev/null @@ -1,731 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package packet - -import ( - "bytes" - "crypto" - "crypto/dsa" - "crypto/ecdsa" - "encoding/asn1" - "encoding/binary" - "hash" - "io" - "math/big" - "strconv" - "time" - - "golang.org/x/crypto/openpgp/errors" - "golang.org/x/crypto/openpgp/s2k" -) - -const ( - // See RFC 4880, section 5.2.3.21 for details. - KeyFlagCertify = 1 << iota - KeyFlagSign - KeyFlagEncryptCommunications - KeyFlagEncryptStorage -) - -// Signature represents a signature. See RFC 4880, section 5.2. -type Signature struct { - SigType SignatureType - PubKeyAlgo PublicKeyAlgorithm - Hash crypto.Hash - - // HashSuffix is extra data that is hashed in after the signed data. - HashSuffix []byte - // HashTag contains the first two bytes of the hash for fast rejection - // of bad signed data. - HashTag [2]byte - CreationTime time.Time - - RSASignature parsedMPI - DSASigR, DSASigS parsedMPI - ECDSASigR, ECDSASigS parsedMPI - - // rawSubpackets contains the unparsed subpackets, in order. - rawSubpackets []outputSubpacket - - // The following are optional so are nil when not included in the - // signature. - - SigLifetimeSecs, KeyLifetimeSecs *uint32 - PreferredSymmetric, PreferredHash, PreferredCompression []uint8 - IssuerKeyId *uint64 - IsPrimaryId *bool - - // FlagsValid is set if any flags were given. See RFC 4880, section - // 5.2.3.21 for details. - FlagsValid bool - FlagCertify, FlagSign, FlagEncryptCommunications, FlagEncryptStorage bool - - // RevocationReason is set if this signature has been revoked. - // See RFC 4880, section 5.2.3.23 for details. - RevocationReason *uint8 - RevocationReasonText string - - // MDC is set if this signature has a feature packet that indicates - // support for MDC subpackets. - MDC bool - - // EmbeddedSignature, if non-nil, is a signature of the parent key, by - // this key. This prevents an attacker from claiming another's signing - // subkey as their own. - EmbeddedSignature *Signature - - outSubpackets []outputSubpacket -} - -func (sig *Signature) parse(r io.Reader) (err error) { - // RFC 4880, section 5.2.3 - var buf [5]byte - _, err = readFull(r, buf[:1]) - if err != nil { - return - } - if buf[0] != 4 { - err = errors.UnsupportedError("signature packet version " + strconv.Itoa(int(buf[0]))) - return - } - - _, err = readFull(r, buf[:5]) - if err != nil { - return - } - sig.SigType = SignatureType(buf[0]) - sig.PubKeyAlgo = PublicKeyAlgorithm(buf[1]) - switch sig.PubKeyAlgo { - case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly, PubKeyAlgoDSA, PubKeyAlgoECDSA: - default: - err = errors.UnsupportedError("public key algorithm " + strconv.Itoa(int(sig.PubKeyAlgo))) - return - } - - var ok bool - sig.Hash, ok = s2k.HashIdToHash(buf[2]) - if !ok { - return errors.UnsupportedError("hash function " + strconv.Itoa(int(buf[2]))) - } - - hashedSubpacketsLength := int(buf[3])<<8 | int(buf[4]) - l := 6 + hashedSubpacketsLength - sig.HashSuffix = make([]byte, l+6) - sig.HashSuffix[0] = 4 - copy(sig.HashSuffix[1:], buf[:5]) - hashedSubpackets := sig.HashSuffix[6:l] - _, err = readFull(r, hashedSubpackets) - if err != nil { - return - } - // See RFC 4880, section 5.2.4 - trailer := sig.HashSuffix[l:] - trailer[0] = 4 - trailer[1] = 0xff - trailer[2] = uint8(l >> 24) - trailer[3] = uint8(l >> 16) - trailer[4] = uint8(l >> 8) - trailer[5] = uint8(l) - - err = parseSignatureSubpackets(sig, hashedSubpackets, true) - if err != nil { - return - } - - _, err = readFull(r, buf[:2]) - if err != nil { - return - } - unhashedSubpacketsLength := int(buf[0])<<8 | int(buf[1]) - unhashedSubpackets := make([]byte, unhashedSubpacketsLength) - _, err = readFull(r, unhashedSubpackets) - if err != nil { - return - } - err = parseSignatureSubpackets(sig, unhashedSubpackets, false) - if err != nil { - return - } - - _, err = readFull(r, sig.HashTag[:2]) - if err != nil { - return - } - - switch sig.PubKeyAlgo { - case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly: - sig.RSASignature.bytes, sig.RSASignature.bitLength, err = readMPI(r) - case PubKeyAlgoDSA: - sig.DSASigR.bytes, sig.DSASigR.bitLength, err = readMPI(r) - if err == nil { - sig.DSASigS.bytes, sig.DSASigS.bitLength, err = readMPI(r) - } - case PubKeyAlgoECDSA: - sig.ECDSASigR.bytes, sig.ECDSASigR.bitLength, err = readMPI(r) - if err == nil { - sig.ECDSASigS.bytes, sig.ECDSASigS.bitLength, err = readMPI(r) - } - default: - panic("unreachable") - } - return -} - -// parseSignatureSubpackets parses subpackets of the main signature packet. See -// RFC 4880, section 5.2.3.1. -func parseSignatureSubpackets(sig *Signature, subpackets []byte, isHashed bool) (err error) { - for len(subpackets) > 0 { - subpackets, err = parseSignatureSubpacket(sig, subpackets, isHashed) - if err != nil { - return - } - } - - if sig.CreationTime.IsZero() { - err = errors.StructuralError("no creation time in signature") - } - - return -} - -type signatureSubpacketType uint8 - -const ( - creationTimeSubpacket signatureSubpacketType = 2 - signatureExpirationSubpacket signatureSubpacketType = 3 - keyExpirationSubpacket signatureSubpacketType = 9 - prefSymmetricAlgosSubpacket signatureSubpacketType = 11 - issuerSubpacket signatureSubpacketType = 16 - prefHashAlgosSubpacket signatureSubpacketType = 21 - prefCompressionSubpacket signatureSubpacketType = 22 - primaryUserIdSubpacket signatureSubpacketType = 25 - keyFlagsSubpacket signatureSubpacketType = 27 - reasonForRevocationSubpacket signatureSubpacketType = 29 - featuresSubpacket signatureSubpacketType = 30 - embeddedSignatureSubpacket signatureSubpacketType = 32 -) - -// parseSignatureSubpacket parses a single subpacket. len(subpacket) is >= 1. -func parseSignatureSubpacket(sig *Signature, subpacket []byte, isHashed bool) (rest []byte, err error) { - // RFC 4880, section 5.2.3.1 - var ( - length uint32 - packetType signatureSubpacketType - isCritical bool - ) - switch { - case subpacket[0] < 192: - length = uint32(subpacket[0]) - subpacket = subpacket[1:] - case subpacket[0] < 255: - if len(subpacket) < 2 { - goto Truncated - } - length = uint32(subpacket[0]-192)<<8 + uint32(subpacket[1]) + 192 - subpacket = subpacket[2:] - default: - if len(subpacket) < 5 { - goto Truncated - } - length = uint32(subpacket[1])<<24 | - uint32(subpacket[2])<<16 | - uint32(subpacket[3])<<8 | - uint32(subpacket[4]) - subpacket = subpacket[5:] - } - if length > uint32(len(subpacket)) { - goto Truncated - } - rest = subpacket[length:] - subpacket = subpacket[:length] - if len(subpacket) == 0 { - err = errors.StructuralError("zero length signature subpacket") - return - } - packetType = signatureSubpacketType(subpacket[0] & 0x7f) - isCritical = subpacket[0]&0x80 == 0x80 - subpacket = subpacket[1:] - sig.rawSubpackets = append(sig.rawSubpackets, outputSubpacket{isHashed, packetType, isCritical, subpacket}) - switch packetType { - case creationTimeSubpacket: - if !isHashed { - err = errors.StructuralError("signature creation time in non-hashed area") - return - } - if len(subpacket) != 4 { - err = errors.StructuralError("signature creation time not four bytes") - return - } - t := binary.BigEndian.Uint32(subpacket) - sig.CreationTime = time.Unix(int64(t), 0) - case signatureExpirationSubpacket: - // Signature expiration time, section 5.2.3.10 - if !isHashed { - return - } - if len(subpacket) != 4 { - err = errors.StructuralError("expiration subpacket with bad length") - return - } - sig.SigLifetimeSecs = new(uint32) - *sig.SigLifetimeSecs = binary.BigEndian.Uint32(subpacket) - case keyExpirationSubpacket: - // Key expiration time, section 5.2.3.6 - if !isHashed { - return - } - if len(subpacket) != 4 { - err = errors.StructuralError("key expiration subpacket with bad length") - return - } - sig.KeyLifetimeSecs = new(uint32) - *sig.KeyLifetimeSecs = binary.BigEndian.Uint32(subpacket) - case prefSymmetricAlgosSubpacket: - // Preferred symmetric algorithms, section 5.2.3.7 - if !isHashed { - return - } - sig.PreferredSymmetric = make([]byte, len(subpacket)) - copy(sig.PreferredSymmetric, subpacket) - case issuerSubpacket: - // Issuer, section 5.2.3.5 - if len(subpacket) != 8 { - err = errors.StructuralError("issuer subpacket with bad length") - return - } - sig.IssuerKeyId = new(uint64) - *sig.IssuerKeyId = binary.BigEndian.Uint64(subpacket) - case prefHashAlgosSubpacket: - // Preferred hash algorithms, section 5.2.3.8 - if !isHashed { - return - } - sig.PreferredHash = make([]byte, len(subpacket)) - copy(sig.PreferredHash, subpacket) - case prefCompressionSubpacket: - // Preferred compression algorithms, section 5.2.3.9 - if !isHashed { - return - } - sig.PreferredCompression = make([]byte, len(subpacket)) - copy(sig.PreferredCompression, subpacket) - case primaryUserIdSubpacket: - // Primary User ID, section 5.2.3.19 - if !isHashed { - return - } - if len(subpacket) != 1 { - err = errors.StructuralError("primary user id subpacket with bad length") - return - } - sig.IsPrimaryId = new(bool) - if subpacket[0] > 0 { - *sig.IsPrimaryId = true - } - case keyFlagsSubpacket: - // Key flags, section 5.2.3.21 - if !isHashed { - return - } - if len(subpacket) == 0 { - err = errors.StructuralError("empty key flags subpacket") - return - } - sig.FlagsValid = true - if subpacket[0]&KeyFlagCertify != 0 { - sig.FlagCertify = true - } - if subpacket[0]&KeyFlagSign != 0 { - sig.FlagSign = true - } - if subpacket[0]&KeyFlagEncryptCommunications != 0 { - sig.FlagEncryptCommunications = true - } - if subpacket[0]&KeyFlagEncryptStorage != 0 { - sig.FlagEncryptStorage = true - } - case reasonForRevocationSubpacket: - // Reason For Revocation, section 5.2.3.23 - if !isHashed { - return - } - if len(subpacket) == 0 { - err = errors.StructuralError("empty revocation reason subpacket") - return - } - sig.RevocationReason = new(uint8) - *sig.RevocationReason = subpacket[0] - sig.RevocationReasonText = string(subpacket[1:]) - case featuresSubpacket: - // Features subpacket, section 5.2.3.24 specifies a very general - // mechanism for OpenPGP implementations to signal support for new - // features. In practice, the subpacket is used exclusively to - // indicate support for MDC-protected encryption. - sig.MDC = len(subpacket) >= 1 && subpacket[0]&1 == 1 - case embeddedSignatureSubpacket: - // Only usage is in signatures that cross-certify - // signing subkeys. section 5.2.3.26 describes the - // format, with its usage described in section 11.1 - if sig.EmbeddedSignature != nil { - err = errors.StructuralError("Cannot have multiple embedded signatures") - return - } - sig.EmbeddedSignature = new(Signature) - // Embedded signatures are required to be v4 signatures see - // section 12.1. However, we only parse v4 signatures in this - // file anyway. - if err := sig.EmbeddedSignature.parse(bytes.NewBuffer(subpacket)); err != nil { - return nil, err - } - if sigType := sig.EmbeddedSignature.SigType; sigType != SigTypePrimaryKeyBinding { - return nil, errors.StructuralError("cross-signature has unexpected type " + strconv.Itoa(int(sigType))) - } - default: - if isCritical { - err = errors.UnsupportedError("unknown critical signature subpacket type " + strconv.Itoa(int(packetType))) - return - } - } - return - -Truncated: - err = errors.StructuralError("signature subpacket truncated") - return -} - -// subpacketLengthLength returns the length, in bytes, of an encoded length value. -func subpacketLengthLength(length int) int { - if length < 192 { - return 1 - } - if length < 16320 { - return 2 - } - return 5 -} - -// serializeSubpacketLength marshals the given length into to. -func serializeSubpacketLength(to []byte, length int) int { - // RFC 4880, Section 4.2.2. - if length < 192 { - to[0] = byte(length) - return 1 - } - if length < 16320 { - length -= 192 - to[0] = byte((length >> 8) + 192) - to[1] = byte(length) - return 2 - } - to[0] = 255 - to[1] = byte(length >> 24) - to[2] = byte(length >> 16) - to[3] = byte(length >> 8) - to[4] = byte(length) - return 5 -} - -// subpacketsLength returns the serialized length, in bytes, of the given -// subpackets. -func subpacketsLength(subpackets []outputSubpacket, hashed bool) (length int) { - for _, subpacket := range subpackets { - if subpacket.hashed == hashed { - length += subpacketLengthLength(len(subpacket.contents) + 1) - length += 1 // type byte - length += len(subpacket.contents) - } - } - return -} - -// serializeSubpackets marshals the given subpackets into to. -func serializeSubpackets(to []byte, subpackets []outputSubpacket, hashed bool) { - for _, subpacket := range subpackets { - if subpacket.hashed == hashed { - n := serializeSubpacketLength(to, len(subpacket.contents)+1) - to[n] = byte(subpacket.subpacketType) - to = to[1+n:] - n = copy(to, subpacket.contents) - to = to[n:] - } - } - return -} - -// KeyExpired returns whether sig is a self-signature of a key that has -// expired. -func (sig *Signature) KeyExpired(currentTime time.Time) bool { - if sig.KeyLifetimeSecs == nil { - return false - } - expiry := sig.CreationTime.Add(time.Duration(*sig.KeyLifetimeSecs) * time.Second) - return currentTime.After(expiry) -} - -// buildHashSuffix constructs the HashSuffix member of sig in preparation for signing. -func (sig *Signature) buildHashSuffix() (err error) { - hashedSubpacketsLen := subpacketsLength(sig.outSubpackets, true) - - var ok bool - l := 6 + hashedSubpacketsLen - sig.HashSuffix = make([]byte, l+6) - sig.HashSuffix[0] = 4 - sig.HashSuffix[1] = uint8(sig.SigType) - sig.HashSuffix[2] = uint8(sig.PubKeyAlgo) - sig.HashSuffix[3], ok = s2k.HashToHashId(sig.Hash) - if !ok { - sig.HashSuffix = nil - return errors.InvalidArgumentError("hash cannot be represented in OpenPGP: " + strconv.Itoa(int(sig.Hash))) - } - sig.HashSuffix[4] = byte(hashedSubpacketsLen >> 8) - sig.HashSuffix[5] = byte(hashedSubpacketsLen) - serializeSubpackets(sig.HashSuffix[6:l], sig.outSubpackets, true) - trailer := sig.HashSuffix[l:] - trailer[0] = 4 - trailer[1] = 0xff - trailer[2] = byte(l >> 24) - trailer[3] = byte(l >> 16) - trailer[4] = byte(l >> 8) - trailer[5] = byte(l) - return -} - -func (sig *Signature) signPrepareHash(h hash.Hash) (digest []byte, err error) { - err = sig.buildHashSuffix() - if err != nil { - return - } - - h.Write(sig.HashSuffix) - digest = h.Sum(nil) - copy(sig.HashTag[:], digest) - return -} - -// Sign signs a message with a private key. The hash, h, must contain -// the hash of the message to be signed and will be mutated by this function. -// On success, the signature is stored in sig. Call Serialize to write it out. -// If config is nil, sensible defaults will be used. -func (sig *Signature) Sign(h hash.Hash, priv *PrivateKey, config *Config) (err error) { - sig.outSubpackets = sig.buildSubpackets() - digest, err := sig.signPrepareHash(h) - if err != nil { - return - } - - switch priv.PubKeyAlgo { - case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly: - // supports both *rsa.PrivateKey and crypto.Signer - sig.RSASignature.bytes, err = priv.PrivateKey.(crypto.Signer).Sign(config.Random(), digest, sig.Hash) - sig.RSASignature.bitLength = uint16(8 * len(sig.RSASignature.bytes)) - case PubKeyAlgoDSA: - dsaPriv := priv.PrivateKey.(*dsa.PrivateKey) - - // Need to truncate hashBytes to match FIPS 186-3 section 4.6. - subgroupSize := (dsaPriv.Q.BitLen() + 7) / 8 - if len(digest) > subgroupSize { - digest = digest[:subgroupSize] - } - r, s, err := dsa.Sign(config.Random(), dsaPriv, digest) - if err == nil { - sig.DSASigR.bytes = r.Bytes() - sig.DSASigR.bitLength = uint16(8 * len(sig.DSASigR.bytes)) - sig.DSASigS.bytes = s.Bytes() - sig.DSASigS.bitLength = uint16(8 * len(sig.DSASigS.bytes)) - } - case PubKeyAlgoECDSA: - var r, s *big.Int - if pk, ok := priv.PrivateKey.(*ecdsa.PrivateKey); ok { - // direct support, avoid asn1 wrapping/unwrapping - r, s, err = ecdsa.Sign(config.Random(), pk, digest) - } else { - var b []byte - b, err = priv.PrivateKey.(crypto.Signer).Sign(config.Random(), digest, sig.Hash) - if err == nil { - r, s, err = unwrapECDSASig(b) - } - } - if err == nil { - sig.ECDSASigR = fromBig(r) - sig.ECDSASigS = fromBig(s) - } - default: - err = errors.UnsupportedError("public key algorithm: " + strconv.Itoa(int(sig.PubKeyAlgo))) - } - - return -} - -// unwrapECDSASig parses the two integer components of an ASN.1-encoded ECDSA -// signature. -func unwrapECDSASig(b []byte) (r, s *big.Int, err error) { - var ecsdaSig struct { - R, S *big.Int - } - _, err = asn1.Unmarshal(b, &ecsdaSig) - if err != nil { - return - } - return ecsdaSig.R, ecsdaSig.S, nil -} - -// SignUserId computes a signature from priv, asserting that pub is a valid -// key for the identity id. On success, the signature is stored in sig. Call -// Serialize to write it out. -// If config is nil, sensible defaults will be used. -func (sig *Signature) SignUserId(id string, pub *PublicKey, priv *PrivateKey, config *Config) error { - h, err := userIdSignatureHash(id, pub, sig.Hash) - if err != nil { - return err - } - return sig.Sign(h, priv, config) -} - -// SignKey computes a signature from priv, asserting that pub is a subkey. On -// success, the signature is stored in sig. Call Serialize to write it out. -// If config is nil, sensible defaults will be used. -func (sig *Signature) SignKey(pub *PublicKey, priv *PrivateKey, config *Config) error { - h, err := keySignatureHash(&priv.PublicKey, pub, sig.Hash) - if err != nil { - return err - } - return sig.Sign(h, priv, config) -} - -// Serialize marshals sig to w. Sign, SignUserId or SignKey must have been -// called first. -func (sig *Signature) Serialize(w io.Writer) (err error) { - if len(sig.outSubpackets) == 0 { - sig.outSubpackets = sig.rawSubpackets - } - if sig.RSASignature.bytes == nil && sig.DSASigR.bytes == nil && sig.ECDSASigR.bytes == nil { - return errors.InvalidArgumentError("Signature: need to call Sign, SignUserId or SignKey before Serialize") - } - - sigLength := 0 - switch sig.PubKeyAlgo { - case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly: - sigLength = 2 + len(sig.RSASignature.bytes) - case PubKeyAlgoDSA: - sigLength = 2 + len(sig.DSASigR.bytes) - sigLength += 2 + len(sig.DSASigS.bytes) - case PubKeyAlgoECDSA: - sigLength = 2 + len(sig.ECDSASigR.bytes) - sigLength += 2 + len(sig.ECDSASigS.bytes) - default: - panic("impossible") - } - - unhashedSubpacketsLen := subpacketsLength(sig.outSubpackets, false) - length := len(sig.HashSuffix) - 6 /* trailer not included */ + - 2 /* length of unhashed subpackets */ + unhashedSubpacketsLen + - 2 /* hash tag */ + sigLength - err = serializeHeader(w, packetTypeSignature, length) - if err != nil { - return - } - - _, err = w.Write(sig.HashSuffix[:len(sig.HashSuffix)-6]) - if err != nil { - return - } - - unhashedSubpackets := make([]byte, 2+unhashedSubpacketsLen) - unhashedSubpackets[0] = byte(unhashedSubpacketsLen >> 8) - unhashedSubpackets[1] = byte(unhashedSubpacketsLen) - serializeSubpackets(unhashedSubpackets[2:], sig.outSubpackets, false) - - _, err = w.Write(unhashedSubpackets) - if err != nil { - return - } - _, err = w.Write(sig.HashTag[:]) - if err != nil { - return - } - - switch sig.PubKeyAlgo { - case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly: - err = writeMPIs(w, sig.RSASignature) - case PubKeyAlgoDSA: - err = writeMPIs(w, sig.DSASigR, sig.DSASigS) - case PubKeyAlgoECDSA: - err = writeMPIs(w, sig.ECDSASigR, sig.ECDSASigS) - default: - panic("impossible") - } - return -} - -// outputSubpacket represents a subpacket to be marshaled. -type outputSubpacket struct { - hashed bool // true if this subpacket is in the hashed area. - subpacketType signatureSubpacketType - isCritical bool - contents []byte -} - -func (sig *Signature) buildSubpackets() (subpackets []outputSubpacket) { - creationTime := make([]byte, 4) - binary.BigEndian.PutUint32(creationTime, uint32(sig.CreationTime.Unix())) - subpackets = append(subpackets, outputSubpacket{true, creationTimeSubpacket, false, creationTime}) - - if sig.IssuerKeyId != nil { - keyId := make([]byte, 8) - binary.BigEndian.PutUint64(keyId, *sig.IssuerKeyId) - subpackets = append(subpackets, outputSubpacket{true, issuerSubpacket, false, keyId}) - } - - if sig.SigLifetimeSecs != nil && *sig.SigLifetimeSecs != 0 { - sigLifetime := make([]byte, 4) - binary.BigEndian.PutUint32(sigLifetime, *sig.SigLifetimeSecs) - subpackets = append(subpackets, outputSubpacket{true, signatureExpirationSubpacket, true, sigLifetime}) - } - - // Key flags may only appear in self-signatures or certification signatures. - - if sig.FlagsValid { - var flags byte - if sig.FlagCertify { - flags |= KeyFlagCertify - } - if sig.FlagSign { - flags |= KeyFlagSign - } - if sig.FlagEncryptCommunications { - flags |= KeyFlagEncryptCommunications - } - if sig.FlagEncryptStorage { - flags |= KeyFlagEncryptStorage - } - subpackets = append(subpackets, outputSubpacket{true, keyFlagsSubpacket, false, []byte{flags}}) - } - - // The following subpackets may only appear in self-signatures - - if sig.KeyLifetimeSecs != nil && *sig.KeyLifetimeSecs != 0 { - keyLifetime := make([]byte, 4) - binary.BigEndian.PutUint32(keyLifetime, *sig.KeyLifetimeSecs) - subpackets = append(subpackets, outputSubpacket{true, keyExpirationSubpacket, true, keyLifetime}) - } - - if sig.IsPrimaryId != nil && *sig.IsPrimaryId { - subpackets = append(subpackets, outputSubpacket{true, primaryUserIdSubpacket, false, []byte{1}}) - } - - if len(sig.PreferredSymmetric) > 0 { - subpackets = append(subpackets, outputSubpacket{true, prefSymmetricAlgosSubpacket, false, sig.PreferredSymmetric}) - } - - if len(sig.PreferredHash) > 0 { - subpackets = append(subpackets, outputSubpacket{true, prefHashAlgosSubpacket, false, sig.PreferredHash}) - } - - if len(sig.PreferredCompression) > 0 { - subpackets = append(subpackets, outputSubpacket{true, prefCompressionSubpacket, false, sig.PreferredCompression}) - } - - return -} diff --git a/src/vendor/golang.org/x/crypto/openpgp/packet/signature_v3.go b/src/vendor/golang.org/x/crypto/openpgp/packet/signature_v3.go deleted file mode 100644 index 6edff8893..000000000 --- a/src/vendor/golang.org/x/crypto/openpgp/packet/signature_v3.go +++ /dev/null @@ -1,146 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package packet - -import ( - "crypto" - "encoding/binary" - "fmt" - "io" - "strconv" - "time" - - "golang.org/x/crypto/openpgp/errors" - "golang.org/x/crypto/openpgp/s2k" -) - -// SignatureV3 represents older version 3 signatures. These signatures are less secure -// than version 4 and should not be used to create new signatures. They are included -// here for backwards compatibility to read and validate with older key material. -// See RFC 4880, section 5.2.2. -type SignatureV3 struct { - SigType SignatureType - CreationTime time.Time - IssuerKeyId uint64 - PubKeyAlgo PublicKeyAlgorithm - Hash crypto.Hash - HashTag [2]byte - - RSASignature parsedMPI - DSASigR, DSASigS parsedMPI -} - -func (sig *SignatureV3) parse(r io.Reader) (err error) { - // RFC 4880, section 5.2.2 - var buf [8]byte - if _, err = readFull(r, buf[:1]); err != nil { - return - } - if buf[0] < 2 || buf[0] > 3 { - err = errors.UnsupportedError("signature packet version " + strconv.Itoa(int(buf[0]))) - return - } - if _, err = readFull(r, buf[:1]); err != nil { - return - } - if buf[0] != 5 { - err = errors.UnsupportedError( - "invalid hashed material length " + strconv.Itoa(int(buf[0]))) - return - } - - // Read hashed material: signature type + creation time - if _, err = readFull(r, buf[:5]); err != nil { - return - } - sig.SigType = SignatureType(buf[0]) - t := binary.BigEndian.Uint32(buf[1:5]) - sig.CreationTime = time.Unix(int64(t), 0) - - // Eight-octet Key ID of signer. - if _, err = readFull(r, buf[:8]); err != nil { - return - } - sig.IssuerKeyId = binary.BigEndian.Uint64(buf[:]) - - // Public-key and hash algorithm - if _, err = readFull(r, buf[:2]); err != nil { - return - } - sig.PubKeyAlgo = PublicKeyAlgorithm(buf[0]) - switch sig.PubKeyAlgo { - case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly, PubKeyAlgoDSA: - default: - err = errors.UnsupportedError("public key algorithm " + strconv.Itoa(int(sig.PubKeyAlgo))) - return - } - var ok bool - if sig.Hash, ok = s2k.HashIdToHash(buf[1]); !ok { - return errors.UnsupportedError("hash function " + strconv.Itoa(int(buf[2]))) - } - - // Two-octet field holding left 16 bits of signed hash value. - if _, err = readFull(r, sig.HashTag[:2]); err != nil { - return - } - - switch sig.PubKeyAlgo { - case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly: - sig.RSASignature.bytes, sig.RSASignature.bitLength, err = readMPI(r) - case PubKeyAlgoDSA: - if sig.DSASigR.bytes, sig.DSASigR.bitLength, err = readMPI(r); err != nil { - return - } - sig.DSASigS.bytes, sig.DSASigS.bitLength, err = readMPI(r) - default: - panic("unreachable") - } - return -} - -// Serialize marshals sig to w. Sign, SignUserId or SignKey must have been -// called first. -func (sig *SignatureV3) Serialize(w io.Writer) (err error) { - buf := make([]byte, 8) - - // Write the sig type and creation time - buf[0] = byte(sig.SigType) - binary.BigEndian.PutUint32(buf[1:5], uint32(sig.CreationTime.Unix())) - if _, err = w.Write(buf[:5]); err != nil { - return - } - - // Write the issuer long key ID - binary.BigEndian.PutUint64(buf[:8], sig.IssuerKeyId) - if _, err = w.Write(buf[:8]); err != nil { - return - } - - // Write public key algorithm, hash ID, and hash value - buf[0] = byte(sig.PubKeyAlgo) - hashId, ok := s2k.HashToHashId(sig.Hash) - if !ok { - return errors.UnsupportedError(fmt.Sprintf("hash function %v", sig.Hash)) - } - buf[1] = hashId - copy(buf[2:4], sig.HashTag[:]) - if _, err = w.Write(buf[:4]); err != nil { - return - } - - if sig.RSASignature.bytes == nil && sig.DSASigR.bytes == nil { - return errors.InvalidArgumentError("Signature: need to call Sign, SignUserId or SignKey before Serialize") - } - - switch sig.PubKeyAlgo { - case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly: - err = writeMPIs(w, sig.RSASignature) - case PubKeyAlgoDSA: - err = writeMPIs(w, sig.DSASigR, sig.DSASigS) - default: - panic("impossible") - } - return -} diff --git a/src/vendor/golang.org/x/crypto/openpgp/packet/symmetric_key_encrypted.go b/src/vendor/golang.org/x/crypto/openpgp/packet/symmetric_key_encrypted.go deleted file mode 100644 index 744c2d2c4..000000000 --- a/src/vendor/golang.org/x/crypto/openpgp/packet/symmetric_key_encrypted.go +++ /dev/null @@ -1,155 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package packet - -import ( - "bytes" - "crypto/cipher" - "io" - "strconv" - - "golang.org/x/crypto/openpgp/errors" - "golang.org/x/crypto/openpgp/s2k" -) - -// This is the largest session key that we'll support. Since no 512-bit cipher -// has even been seriously used, this is comfortably large. -const maxSessionKeySizeInBytes = 64 - -// SymmetricKeyEncrypted represents a passphrase protected session key. See RFC -// 4880, section 5.3. -type SymmetricKeyEncrypted struct { - CipherFunc CipherFunction - s2k func(out, in []byte) - encryptedKey []byte -} - -const symmetricKeyEncryptedVersion = 4 - -func (ske *SymmetricKeyEncrypted) parse(r io.Reader) error { - // RFC 4880, section 5.3. - var buf [2]byte - if _, err := readFull(r, buf[:]); err != nil { - return err - } - if buf[0] != symmetricKeyEncryptedVersion { - return errors.UnsupportedError("SymmetricKeyEncrypted version") - } - ske.CipherFunc = CipherFunction(buf[1]) - - if ske.CipherFunc.KeySize() == 0 { - return errors.UnsupportedError("unknown cipher: " + strconv.Itoa(int(buf[1]))) - } - - var err error - ske.s2k, err = s2k.Parse(r) - if err != nil { - return err - } - - encryptedKey := make([]byte, maxSessionKeySizeInBytes) - // The session key may follow. We just have to try and read to find - // out. If it exists then we limit it to maxSessionKeySizeInBytes. - n, err := readFull(r, encryptedKey) - if err != nil && err != io.ErrUnexpectedEOF { - return err - } - - if n != 0 { - if n == maxSessionKeySizeInBytes { - return errors.UnsupportedError("oversized encrypted session key") - } - ske.encryptedKey = encryptedKey[:n] - } - - return nil -} - -// Decrypt attempts to decrypt an encrypted session key and returns the key and -// the cipher to use when decrypting a subsequent Symmetrically Encrypted Data -// packet. -func (ske *SymmetricKeyEncrypted) Decrypt(passphrase []byte) ([]byte, CipherFunction, error) { - key := make([]byte, ske.CipherFunc.KeySize()) - ske.s2k(key, passphrase) - - if len(ske.encryptedKey) == 0 { - return key, ske.CipherFunc, nil - } - - // the IV is all zeros - iv := make([]byte, ske.CipherFunc.blockSize()) - c := cipher.NewCFBDecrypter(ske.CipherFunc.new(key), iv) - plaintextKey := make([]byte, len(ske.encryptedKey)) - c.XORKeyStream(plaintextKey, ske.encryptedKey) - cipherFunc := CipherFunction(plaintextKey[0]) - if cipherFunc.blockSize() == 0 { - return nil, ske.CipherFunc, errors.UnsupportedError("unknown cipher: " + strconv.Itoa(int(cipherFunc))) - } - plaintextKey = plaintextKey[1:] - if l, cipherKeySize := len(plaintextKey), cipherFunc.KeySize(); l != cipherFunc.KeySize() { - return nil, cipherFunc, errors.StructuralError("length of decrypted key (" + strconv.Itoa(l) + ") " + - "not equal to cipher keysize (" + strconv.Itoa(cipherKeySize) + ")") - } - return plaintextKey, cipherFunc, nil -} - -// SerializeSymmetricKeyEncrypted serializes a symmetric key packet to w. The -// packet contains a random session key, encrypted by a key derived from the -// given passphrase. The session key is returned and must be passed to -// SerializeSymmetricallyEncrypted. -// If config is nil, sensible defaults will be used. -func SerializeSymmetricKeyEncrypted(w io.Writer, passphrase []byte, config *Config) (key []byte, err error) { - cipherFunc := config.Cipher() - keySize := cipherFunc.KeySize() - if keySize == 0 { - return nil, errors.UnsupportedError("unknown cipher: " + strconv.Itoa(int(cipherFunc))) - } - - s2kBuf := new(bytes.Buffer) - keyEncryptingKey := make([]byte, keySize) - // s2k.Serialize salts and stretches the passphrase, and writes the - // resulting key to keyEncryptingKey and the s2k descriptor to s2kBuf. - err = s2k.Serialize(s2kBuf, keyEncryptingKey, config.Random(), passphrase, &s2k.Config{Hash: config.Hash(), S2KCount: config.PasswordHashIterations()}) - if err != nil { - return - } - s2kBytes := s2kBuf.Bytes() - - packetLength := 2 /* header */ + len(s2kBytes) + 1 /* cipher type */ + keySize - err = serializeHeader(w, packetTypeSymmetricKeyEncrypted, packetLength) - if err != nil { - return - } - - var buf [2]byte - buf[0] = symmetricKeyEncryptedVersion - buf[1] = byte(cipherFunc) - _, err = w.Write(buf[:]) - if err != nil { - return - } - _, err = w.Write(s2kBytes) - if err != nil { - return - } - - sessionKey := make([]byte, keySize) - _, err = io.ReadFull(config.Random(), sessionKey) - if err != nil { - return - } - iv := make([]byte, cipherFunc.blockSize()) - c := cipher.NewCFBEncrypter(cipherFunc.new(keyEncryptingKey), iv) - encryptedCipherAndKey := make([]byte, keySize+1) - c.XORKeyStream(encryptedCipherAndKey, buf[1:]) - c.XORKeyStream(encryptedCipherAndKey[1:], sessionKey) - _, err = w.Write(encryptedCipherAndKey) - if err != nil { - return - } - - key = sessionKey - return -} diff --git a/src/vendor/golang.org/x/crypto/openpgp/packet/symmetrically_encrypted.go b/src/vendor/golang.org/x/crypto/openpgp/packet/symmetrically_encrypted.go deleted file mode 100644 index 1a1a62964..000000000 --- a/src/vendor/golang.org/x/crypto/openpgp/packet/symmetrically_encrypted.go +++ /dev/null @@ -1,290 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package packet - -import ( - "crypto/cipher" - "crypto/sha1" - "crypto/subtle" - "golang.org/x/crypto/openpgp/errors" - "hash" - "io" - "strconv" -) - -// SymmetricallyEncrypted represents a symmetrically encrypted byte string. The -// encrypted contents will consist of more OpenPGP packets. See RFC 4880, -// sections 5.7 and 5.13. -type SymmetricallyEncrypted struct { - MDC bool // true iff this is a type 18 packet and thus has an embedded MAC. - contents io.Reader - prefix []byte -} - -const symmetricallyEncryptedVersion = 1 - -func (se *SymmetricallyEncrypted) parse(r io.Reader) error { - if se.MDC { - // See RFC 4880, section 5.13. - var buf [1]byte - _, err := readFull(r, buf[:]) - if err != nil { - return err - } - if buf[0] != symmetricallyEncryptedVersion { - return errors.UnsupportedError("unknown SymmetricallyEncrypted version") - } - } - se.contents = r - return nil -} - -// Decrypt returns a ReadCloser, from which the decrypted contents of the -// packet can be read. An incorrect key can, with high probability, be detected -// immediately and this will result in a KeyIncorrect error being returned. -func (se *SymmetricallyEncrypted) Decrypt(c CipherFunction, key []byte) (io.ReadCloser, error) { - keySize := c.KeySize() - if keySize == 0 { - return nil, errors.UnsupportedError("unknown cipher: " + strconv.Itoa(int(c))) - } - if len(key) != keySize { - return nil, errors.InvalidArgumentError("SymmetricallyEncrypted: incorrect key length") - } - - if se.prefix == nil { - se.prefix = make([]byte, c.blockSize()+2) - _, err := readFull(se.contents, se.prefix) - if err != nil { - return nil, err - } - } else if len(se.prefix) != c.blockSize()+2 { - return nil, errors.InvalidArgumentError("can't try ciphers with different block lengths") - } - - ocfbResync := OCFBResync - if se.MDC { - // MDC packets use a different form of OCFB mode. - ocfbResync = OCFBNoResync - } - - s := NewOCFBDecrypter(c.new(key), se.prefix, ocfbResync) - if s == nil { - return nil, errors.ErrKeyIncorrect - } - - plaintext := cipher.StreamReader{S: s, R: se.contents} - - if se.MDC { - // MDC packets have an embedded hash that we need to check. - h := sha1.New() - h.Write(se.prefix) - return &seMDCReader{in: plaintext, h: h}, nil - } - - // Otherwise, we just need to wrap plaintext so that it's a valid ReadCloser. - return seReader{plaintext}, nil -} - -// seReader wraps an io.Reader with a no-op Close method. -type seReader struct { - in io.Reader -} - -func (ser seReader) Read(buf []byte) (int, error) { - return ser.in.Read(buf) -} - -func (ser seReader) Close() error { - return nil -} - -const mdcTrailerSize = 1 /* tag byte */ + 1 /* length byte */ + sha1.Size - -// An seMDCReader wraps an io.Reader, maintains a running hash and keeps hold -// of the most recent 22 bytes (mdcTrailerSize). Upon EOF, those bytes form an -// MDC packet containing a hash of the previous contents which is checked -// against the running hash. See RFC 4880, section 5.13. -type seMDCReader struct { - in io.Reader - h hash.Hash - trailer [mdcTrailerSize]byte - scratch [mdcTrailerSize]byte - trailerUsed int - error bool - eof bool -} - -func (ser *seMDCReader) Read(buf []byte) (n int, err error) { - if ser.error { - err = io.ErrUnexpectedEOF - return - } - if ser.eof { - err = io.EOF - return - } - - // If we haven't yet filled the trailer buffer then we must do that - // first. - for ser.trailerUsed < mdcTrailerSize { - n, err = ser.in.Read(ser.trailer[ser.trailerUsed:]) - ser.trailerUsed += n - if err == io.EOF { - if ser.trailerUsed != mdcTrailerSize { - n = 0 - err = io.ErrUnexpectedEOF - ser.error = true - return - } - ser.eof = true - n = 0 - return - } - - if err != nil { - n = 0 - return - } - } - - // If it's a short read then we read into a temporary buffer and shift - // the data into the caller's buffer. - if len(buf) <= mdcTrailerSize { - n, err = readFull(ser.in, ser.scratch[:len(buf)]) - copy(buf, ser.trailer[:n]) - ser.h.Write(buf[:n]) - copy(ser.trailer[:], ser.trailer[n:]) - copy(ser.trailer[mdcTrailerSize-n:], ser.scratch[:]) - if n < len(buf) { - ser.eof = true - err = io.EOF - } - return - } - - n, err = ser.in.Read(buf[mdcTrailerSize:]) - copy(buf, ser.trailer[:]) - ser.h.Write(buf[:n]) - copy(ser.trailer[:], buf[n:]) - - if err == io.EOF { - ser.eof = true - } - return -} - -// This is a new-format packet tag byte for a type 19 (MDC) packet. -const mdcPacketTagByte = byte(0x80) | 0x40 | 19 - -func (ser *seMDCReader) Close() error { - if ser.error { - return errors.SignatureError("error during reading") - } - - for !ser.eof { - // We haven't seen EOF so we need to read to the end - var buf [1024]byte - _, err := ser.Read(buf[:]) - if err == io.EOF { - break - } - if err != nil { - return errors.SignatureError("error during reading") - } - } - - if ser.trailer[0] != mdcPacketTagByte || ser.trailer[1] != sha1.Size { - return errors.SignatureError("MDC packet not found") - } - ser.h.Write(ser.trailer[:2]) - - final := ser.h.Sum(nil) - if subtle.ConstantTimeCompare(final, ser.trailer[2:]) != 1 { - return errors.SignatureError("hash mismatch") - } - return nil -} - -// An seMDCWriter writes through to an io.WriteCloser while maintains a running -// hash of the data written. On close, it emits an MDC packet containing the -// running hash. -type seMDCWriter struct { - w io.WriteCloser - h hash.Hash -} - -func (w *seMDCWriter) Write(buf []byte) (n int, err error) { - w.h.Write(buf) - return w.w.Write(buf) -} - -func (w *seMDCWriter) Close() (err error) { - var buf [mdcTrailerSize]byte - - buf[0] = mdcPacketTagByte - buf[1] = sha1.Size - w.h.Write(buf[:2]) - digest := w.h.Sum(nil) - copy(buf[2:], digest) - - _, err = w.w.Write(buf[:]) - if err != nil { - return - } - return w.w.Close() -} - -// noOpCloser is like an io.NopCloser, but for an io.Writer. -type noOpCloser struct { - w io.Writer -} - -func (c noOpCloser) Write(data []byte) (n int, err error) { - return c.w.Write(data) -} - -func (c noOpCloser) Close() error { - return nil -} - -// SerializeSymmetricallyEncrypted serializes a symmetrically encrypted packet -// to w and returns a WriteCloser to which the to-be-encrypted packets can be -// written. -// If config is nil, sensible defaults will be used. -func SerializeSymmetricallyEncrypted(w io.Writer, c CipherFunction, key []byte, config *Config) (contents io.WriteCloser, err error) { - if c.KeySize() != len(key) { - return nil, errors.InvalidArgumentError("SymmetricallyEncrypted.Serialize: bad key length") - } - writeCloser := noOpCloser{w} - ciphertext, err := serializeStreamHeader(writeCloser, packetTypeSymmetricallyEncryptedMDC) - if err != nil { - return - } - - _, err = ciphertext.Write([]byte{symmetricallyEncryptedVersion}) - if err != nil { - return - } - - block := c.new(key) - blockSize := block.BlockSize() - iv := make([]byte, blockSize) - _, err = config.Random().Read(iv) - if err != nil { - return - } - s, prefix := NewOCFBEncrypter(block, iv, OCFBNoResync) - _, err = ciphertext.Write(prefix) - if err != nil { - return - } - plaintext := cipher.StreamWriter{S: s, W: ciphertext} - - h := sha1.New() - h.Write(iv) - h.Write(iv[blockSize-2:]) - contents = &seMDCWriter{w: plaintext, h: h} - return -} diff --git a/src/vendor/golang.org/x/crypto/openpgp/packet/userattribute.go b/src/vendor/golang.org/x/crypto/openpgp/packet/userattribute.go deleted file mode 100644 index ff7ef5307..000000000 --- a/src/vendor/golang.org/x/crypto/openpgp/packet/userattribute.go +++ /dev/null @@ -1,90 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package packet - -import ( - "bytes" - "image" - "image/jpeg" - "io" -) - -const UserAttrImageSubpacket = 1 - -// UserAttribute is capable of storing other types of data about a user -// beyond name, email and a text comment. In practice, user attributes are typically used -// to store a signed thumbnail photo JPEG image of the user. -// See RFC 4880, section 5.12. -type UserAttribute struct { - Contents []*OpaqueSubpacket -} - -// NewUserAttributePhoto creates a user attribute packet -// containing the given images. -func NewUserAttributePhoto(photos ...image.Image) (uat *UserAttribute, err error) { - uat = new(UserAttribute) - for _, photo := range photos { - var buf bytes.Buffer - // RFC 4880, Section 5.12.1. - data := []byte{ - 0x10, 0x00, // Little-endian image header length (16 bytes) - 0x01, // Image header version 1 - 0x01, // JPEG - 0, 0, 0, 0, // 12 reserved octets, must be all zero. - 0, 0, 0, 0, - 0, 0, 0, 0} - if _, err = buf.Write(data); err != nil { - return - } - if err = jpeg.Encode(&buf, photo, nil); err != nil { - return - } - uat.Contents = append(uat.Contents, &OpaqueSubpacket{ - SubType: UserAttrImageSubpacket, - Contents: buf.Bytes()}) - } - return -} - -// NewUserAttribute creates a new user attribute packet containing the given subpackets. -func NewUserAttribute(contents ...*OpaqueSubpacket) *UserAttribute { - return &UserAttribute{Contents: contents} -} - -func (uat *UserAttribute) parse(r io.Reader) (err error) { - // RFC 4880, section 5.13 - b, err := io.ReadAll(r) - if err != nil { - return - } - uat.Contents, err = OpaqueSubpackets(b) - return -} - -// Serialize marshals the user attribute to w in the form of an OpenPGP packet, including -// header. -func (uat *UserAttribute) Serialize(w io.Writer) (err error) { - var buf bytes.Buffer - for _, sp := range uat.Contents { - sp.Serialize(&buf) - } - if err = serializeHeader(w, packetTypeUserAttribute, buf.Len()); err != nil { - return err - } - _, err = w.Write(buf.Bytes()) - return -} - -// ImageData returns zero or more byte slices, each containing -// JPEG File Interchange Format (JFIF), for each photo in the -// user attribute packet. -func (uat *UserAttribute) ImageData() (imageData [][]byte) { - for _, sp := range uat.Contents { - if sp.SubType == UserAttrImageSubpacket && len(sp.Contents) > 16 { - imageData = append(imageData, sp.Contents[16:]) - } - } - return -} diff --git a/src/vendor/golang.org/x/crypto/openpgp/packet/userid.go b/src/vendor/golang.org/x/crypto/openpgp/packet/userid.go deleted file mode 100644 index 359a462eb..000000000 --- a/src/vendor/golang.org/x/crypto/openpgp/packet/userid.go +++ /dev/null @@ -1,159 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package packet - -import ( - "io" - "strings" -) - -// UserId contains text that is intended to represent the name and email -// address of the key holder. See RFC 4880, section 5.11. By convention, this -// takes the form "Full Name (Comment) " -type UserId struct { - Id string // By convention, this takes the form "Full Name (Comment) " which is split out in the fields below. - - Name, Comment, Email string -} - -func hasInvalidCharacters(s string) bool { - for _, c := range s { - switch c { - case '(', ')', '<', '>', 0: - return true - } - } - return false -} - -// NewUserId returns a UserId or nil if any of the arguments contain invalid -// characters. The invalid characters are '\x00', '(', ')', '<' and '>' -func NewUserId(name, comment, email string) *UserId { - // RFC 4880 doesn't deal with the structure of userid strings; the - // name, comment and email form is just a convention. However, there's - // no convention about escaping the metacharacters and GPG just refuses - // to create user ids where, say, the name contains a '('. We mirror - // this behaviour. - - if hasInvalidCharacters(name) || hasInvalidCharacters(comment) || hasInvalidCharacters(email) { - return nil - } - - uid := new(UserId) - uid.Name, uid.Comment, uid.Email = name, comment, email - uid.Id = name - if len(comment) > 0 { - if len(uid.Id) > 0 { - uid.Id += " " - } - uid.Id += "(" - uid.Id += comment - uid.Id += ")" - } - if len(email) > 0 { - if len(uid.Id) > 0 { - uid.Id += " " - } - uid.Id += "<" - uid.Id += email - uid.Id += ">" - } - return uid -} - -func (uid *UserId) parse(r io.Reader) (err error) { - // RFC 4880, section 5.11 - b, err := io.ReadAll(r) - if err != nil { - return - } - uid.Id = string(b) - uid.Name, uid.Comment, uid.Email = parseUserId(uid.Id) - return -} - -// Serialize marshals uid to w in the form of an OpenPGP packet, including -// header. -func (uid *UserId) Serialize(w io.Writer) error { - err := serializeHeader(w, packetTypeUserId, len(uid.Id)) - if err != nil { - return err - } - _, err = w.Write([]byte(uid.Id)) - return err -} - -// parseUserId extracts the name, comment and email from a user id string that -// is formatted as "Full Name (Comment) ". -func parseUserId(id string) (name, comment, email string) { - var n, c, e struct { - start, end int - } - var state int - - for offset, rune := range id { - switch state { - case 0: - // Entering name - n.start = offset - state = 1 - fallthrough - case 1: - // In name - if rune == '(' { - state = 2 - n.end = offset - } else if rune == '<' { - state = 5 - n.end = offset - } - case 2: - // Entering comment - c.start = offset - state = 3 - fallthrough - case 3: - // In comment - if rune == ')' { - state = 4 - c.end = offset - } - case 4: - // Between comment and email - if rune == '<' { - state = 5 - } - case 5: - // Entering email - e.start = offset - state = 6 - fallthrough - case 6: - // In email - if rune == '>' { - state = 7 - e.end = offset - } - default: - // After email - } - } - switch state { - case 1: - // ended in the name - n.end = len(id) - case 3: - // ended in comment - c.end = len(id) - case 6: - // ended in email - e.end = len(id) - } - - name = strings.TrimSpace(id[n.start:n.end]) - comment = strings.TrimSpace(id[c.start:c.end]) - email = strings.TrimSpace(id[e.start:e.end]) - return -} diff --git a/src/vendor/golang.org/x/crypto/openpgp/read.go b/src/vendor/golang.org/x/crypto/openpgp/read.go deleted file mode 100644 index 48a893146..000000000 --- a/src/vendor/golang.org/x/crypto/openpgp/read.go +++ /dev/null @@ -1,448 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package openpgp implements high level operations on OpenPGP messages. -// -// Deprecated: this package is unmaintained except for security fixes. New -// applications should consider a more focused, modern alternative to OpenPGP -// for their specific task. If you are required to interoperate with OpenPGP -// systems and need a maintained package, consider a community fork. -// See https://golang.org/issue/44226. -package openpgp // import "golang.org/x/crypto/openpgp" - -import ( - "crypto" - _ "crypto/sha256" - "hash" - "io" - "strconv" - - "golang.org/x/crypto/openpgp/armor" - "golang.org/x/crypto/openpgp/errors" - "golang.org/x/crypto/openpgp/packet" -) - -// SignatureType is the armor type for a PGP signature. -var SignatureType = "PGP SIGNATURE" - -// readArmored reads an armored block with the given type. -func readArmored(r io.Reader, expectedType string) (body io.Reader, err error) { - block, err := armor.Decode(r) - if err != nil { - return - } - - if block.Type != expectedType { - return nil, errors.InvalidArgumentError("expected '" + expectedType + "', got: " + block.Type) - } - - return block.Body, nil -} - -// MessageDetails contains the result of parsing an OpenPGP encrypted and/or -// signed message. -type MessageDetails struct { - IsEncrypted bool // true if the message was encrypted. - EncryptedToKeyIds []uint64 // the list of recipient key ids. - IsSymmetricallyEncrypted bool // true if a passphrase could have decrypted the message. - DecryptedWith Key // the private key used to decrypt the message, if any. - IsSigned bool // true if the message is signed. - SignedByKeyId uint64 // the key id of the signer, if any. - SignedBy *Key // the key of the signer, if available. - LiteralData *packet.LiteralData // the metadata of the contents - UnverifiedBody io.Reader // the contents of the message. - - // If IsSigned is true and SignedBy is non-zero then the signature will - // be verified as UnverifiedBody is read. The signature cannot be - // checked until the whole of UnverifiedBody is read so UnverifiedBody - // must be consumed until EOF before the data can be trusted. Even if a - // message isn't signed (or the signer is unknown) the data may contain - // an authentication code that is only checked once UnverifiedBody has - // been consumed. Once EOF has been seen, the following fields are - // valid. (An authentication code failure is reported as a - // SignatureError error when reading from UnverifiedBody.) - SignatureError error // nil if the signature is good. - Signature *packet.Signature // the signature packet itself, if v4 (default) - SignatureV3 *packet.SignatureV3 // the signature packet if it is a v2 or v3 signature - - decrypted io.ReadCloser -} - -// A PromptFunction is used as a callback by functions that may need to decrypt -// a private key, or prompt for a passphrase. It is called with a list of -// acceptable, encrypted private keys and a boolean that indicates whether a -// passphrase is usable. It should either decrypt a private key or return a -// passphrase to try. If the decrypted private key or given passphrase isn't -// correct, the function will be called again, forever. Any error returned will -// be passed up. -type PromptFunction func(keys []Key, symmetric bool) ([]byte, error) - -// A keyEnvelopePair is used to store a private key with the envelope that -// contains a symmetric key, encrypted with that key. -type keyEnvelopePair struct { - key Key - encryptedKey *packet.EncryptedKey -} - -// ReadMessage parses an OpenPGP message that may be signed and/or encrypted. -// The given KeyRing should contain both public keys (for signature -// verification) and, possibly encrypted, private keys for decrypting. -// If config is nil, sensible defaults will be used. -func ReadMessage(r io.Reader, keyring KeyRing, prompt PromptFunction, config *packet.Config) (md *MessageDetails, err error) { - var p packet.Packet - - var symKeys []*packet.SymmetricKeyEncrypted - var pubKeys []keyEnvelopePair - var se *packet.SymmetricallyEncrypted - - packets := packet.NewReader(r) - md = new(MessageDetails) - md.IsEncrypted = true - - // The message, if encrypted, starts with a number of packets - // containing an encrypted decryption key. The decryption key is either - // encrypted to a public key, or with a passphrase. This loop - // collects these packets. -ParsePackets: - for { - p, err = packets.Next() - if err != nil { - return nil, err - } - switch p := p.(type) { - case *packet.SymmetricKeyEncrypted: - // This packet contains the decryption key encrypted with a passphrase. - md.IsSymmetricallyEncrypted = true - symKeys = append(symKeys, p) - case *packet.EncryptedKey: - // This packet contains the decryption key encrypted to a public key. - md.EncryptedToKeyIds = append(md.EncryptedToKeyIds, p.KeyId) - switch p.Algo { - case packet.PubKeyAlgoRSA, packet.PubKeyAlgoRSAEncryptOnly, packet.PubKeyAlgoElGamal: - break - default: - continue - } - var keys []Key - if p.KeyId == 0 { - keys = keyring.DecryptionKeys() - } else { - keys = keyring.KeysById(p.KeyId) - } - for _, k := range keys { - pubKeys = append(pubKeys, keyEnvelopePair{k, p}) - } - case *packet.SymmetricallyEncrypted: - se = p - break ParsePackets - case *packet.Compressed, *packet.LiteralData, *packet.OnePassSignature: - // This message isn't encrypted. - if len(symKeys) != 0 || len(pubKeys) != 0 { - return nil, errors.StructuralError("key material not followed by encrypted message") - } - packets.Unread(p) - return readSignedMessage(packets, nil, keyring) - } - } - - var candidates []Key - var decrypted io.ReadCloser - - // Now that we have the list of encrypted keys we need to decrypt at - // least one of them or, if we cannot, we need to call the prompt - // function so that it can decrypt a key or give us a passphrase. -FindKey: - for { - // See if any of the keys already have a private key available - candidates = candidates[:0] - candidateFingerprints := make(map[string]bool) - - for _, pk := range pubKeys { - if pk.key.PrivateKey == nil { - continue - } - if !pk.key.PrivateKey.Encrypted { - if len(pk.encryptedKey.Key) == 0 { - pk.encryptedKey.Decrypt(pk.key.PrivateKey, config) - } - if len(pk.encryptedKey.Key) == 0 { - continue - } - decrypted, err = se.Decrypt(pk.encryptedKey.CipherFunc, pk.encryptedKey.Key) - if err != nil && err != errors.ErrKeyIncorrect { - return nil, err - } - if decrypted != nil { - md.DecryptedWith = pk.key - break FindKey - } - } else { - fpr := string(pk.key.PublicKey.Fingerprint[:]) - if v := candidateFingerprints[fpr]; v { - continue - } - candidates = append(candidates, pk.key) - candidateFingerprints[fpr] = true - } - } - - if len(candidates) == 0 && len(symKeys) == 0 { - return nil, errors.ErrKeyIncorrect - } - - if prompt == nil { - return nil, errors.ErrKeyIncorrect - } - - passphrase, err := prompt(candidates, len(symKeys) != 0) - if err != nil { - return nil, err - } - - // Try the symmetric passphrase first - if len(symKeys) != 0 && passphrase != nil { - for _, s := range symKeys { - key, cipherFunc, err := s.Decrypt(passphrase) - if err == nil { - decrypted, err = se.Decrypt(cipherFunc, key) - if err != nil && err != errors.ErrKeyIncorrect { - return nil, err - } - if decrypted != nil { - break FindKey - } - } - - } - } - } - - md.decrypted = decrypted - if err := packets.Push(decrypted); err != nil { - return nil, err - } - return readSignedMessage(packets, md, keyring) -} - -// readSignedMessage reads a possibly signed message if mdin is non-zero then -// that structure is updated and returned. Otherwise a fresh MessageDetails is -// used. -func readSignedMessage(packets *packet.Reader, mdin *MessageDetails, keyring KeyRing) (md *MessageDetails, err error) { - if mdin == nil { - mdin = new(MessageDetails) - } - md = mdin - - var p packet.Packet - var h hash.Hash - var wrappedHash hash.Hash -FindLiteralData: - for { - p, err = packets.Next() - if err != nil { - return nil, err - } - switch p := p.(type) { - case *packet.Compressed: - if err := packets.Push(p.Body); err != nil { - return nil, err - } - case *packet.OnePassSignature: - if !p.IsLast { - return nil, errors.UnsupportedError("nested signatures") - } - - h, wrappedHash, err = hashForSignature(p.Hash, p.SigType) - if err != nil { - md = nil - return - } - - md.IsSigned = true - md.SignedByKeyId = p.KeyId - keys := keyring.KeysByIdUsage(p.KeyId, packet.KeyFlagSign) - if len(keys) > 0 { - md.SignedBy = &keys[0] - } - case *packet.LiteralData: - md.LiteralData = p - break FindLiteralData - } - } - - if md.SignedBy != nil { - md.UnverifiedBody = &signatureCheckReader{packets, h, wrappedHash, md} - } else if md.decrypted != nil { - md.UnverifiedBody = checkReader{md} - } else { - md.UnverifiedBody = md.LiteralData.Body - } - - return md, nil -} - -// hashForSignature returns a pair of hashes that can be used to verify a -// signature. The signature may specify that the contents of the signed message -// should be preprocessed (i.e. to normalize line endings). Thus this function -// returns two hashes. The second should be used to hash the message itself and -// performs any needed preprocessing. -func hashForSignature(hashId crypto.Hash, sigType packet.SignatureType) (hash.Hash, hash.Hash, error) { - if !hashId.Available() { - return nil, nil, errors.UnsupportedError("hash not available: " + strconv.Itoa(int(hashId))) - } - h := hashId.New() - - switch sigType { - case packet.SigTypeBinary: - return h, h, nil - case packet.SigTypeText: - return h, NewCanonicalTextHash(h), nil - } - - return nil, nil, errors.UnsupportedError("unsupported signature type: " + strconv.Itoa(int(sigType))) -} - -// checkReader wraps an io.Reader from a LiteralData packet. When it sees EOF -// it closes the ReadCloser from any SymmetricallyEncrypted packet to trigger -// MDC checks. -type checkReader struct { - md *MessageDetails -} - -func (cr checkReader) Read(buf []byte) (n int, err error) { - n, err = cr.md.LiteralData.Body.Read(buf) - if err == io.EOF { - mdcErr := cr.md.decrypted.Close() - if mdcErr != nil { - err = mdcErr - } - } - return -} - -// signatureCheckReader wraps an io.Reader from a LiteralData packet and hashes -// the data as it is read. When it sees an EOF from the underlying io.Reader -// it parses and checks a trailing Signature packet and triggers any MDC checks. -type signatureCheckReader struct { - packets *packet.Reader - h, wrappedHash hash.Hash - md *MessageDetails -} - -func (scr *signatureCheckReader) Read(buf []byte) (n int, err error) { - n, err = scr.md.LiteralData.Body.Read(buf) - scr.wrappedHash.Write(buf[:n]) - if err == io.EOF { - var p packet.Packet - p, scr.md.SignatureError = scr.packets.Next() - if scr.md.SignatureError != nil { - return - } - - var ok bool - if scr.md.Signature, ok = p.(*packet.Signature); ok { - scr.md.SignatureError = scr.md.SignedBy.PublicKey.VerifySignature(scr.h, scr.md.Signature) - } else if scr.md.SignatureV3, ok = p.(*packet.SignatureV3); ok { - scr.md.SignatureError = scr.md.SignedBy.PublicKey.VerifySignatureV3(scr.h, scr.md.SignatureV3) - } else { - scr.md.SignatureError = errors.StructuralError("LiteralData not followed by Signature") - return - } - - // The SymmetricallyEncrypted packet, if any, might have an - // unsigned hash of its own. In order to check this we need to - // close that Reader. - if scr.md.decrypted != nil { - mdcErr := scr.md.decrypted.Close() - if mdcErr != nil { - err = mdcErr - } - } - } - return -} - -// CheckDetachedSignature takes a signed file and a detached signature and -// returns the signer if the signature is valid. If the signer isn't known, -// ErrUnknownIssuer is returned. -func CheckDetachedSignature(keyring KeyRing, signed, signature io.Reader) (signer *Entity, err error) { - var issuerKeyId uint64 - var hashFunc crypto.Hash - var sigType packet.SignatureType - var keys []Key - var p packet.Packet - - packets := packet.NewReader(signature) - for { - p, err = packets.Next() - if err == io.EOF { - return nil, errors.ErrUnknownIssuer - } - if err != nil { - return nil, err - } - - switch sig := p.(type) { - case *packet.Signature: - if sig.IssuerKeyId == nil { - return nil, errors.StructuralError("signature doesn't have an issuer") - } - issuerKeyId = *sig.IssuerKeyId - hashFunc = sig.Hash - sigType = sig.SigType - case *packet.SignatureV3: - issuerKeyId = sig.IssuerKeyId - hashFunc = sig.Hash - sigType = sig.SigType - default: - return nil, errors.StructuralError("non signature packet found") - } - - keys = keyring.KeysByIdUsage(issuerKeyId, packet.KeyFlagSign) - if len(keys) > 0 { - break - } - } - - if len(keys) == 0 { - panic("unreachable") - } - - h, wrappedHash, err := hashForSignature(hashFunc, sigType) - if err != nil { - return nil, err - } - - if _, err := io.Copy(wrappedHash, signed); err != nil && err != io.EOF { - return nil, err - } - - for _, key := range keys { - switch sig := p.(type) { - case *packet.Signature: - err = key.PublicKey.VerifySignature(h, sig) - case *packet.SignatureV3: - err = key.PublicKey.VerifySignatureV3(h, sig) - default: - panic("unreachable") - } - - if err == nil { - return key.Entity, nil - } - } - - return nil, err -} - -// CheckArmoredDetachedSignature performs the same actions as -// CheckDetachedSignature but expects the signature to be armored. -func CheckArmoredDetachedSignature(keyring KeyRing, signed, signature io.Reader) (signer *Entity, err error) { - body, err := readArmored(signature, SignatureType) - if err != nil { - return - } - - return CheckDetachedSignature(keyring, signed, body) -} diff --git a/src/vendor/golang.org/x/crypto/openpgp/s2k/s2k.go b/src/vendor/golang.org/x/crypto/openpgp/s2k/s2k.go deleted file mode 100644 index f53244a1c..000000000 --- a/src/vendor/golang.org/x/crypto/openpgp/s2k/s2k.go +++ /dev/null @@ -1,279 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package s2k implements the various OpenPGP string-to-key transforms as -// specified in RFC 4800 section 3.7.1. -// -// Deprecated: this package is unmaintained except for security fixes. New -// applications should consider a more focused, modern alternative to OpenPGP -// for their specific task. If you are required to interoperate with OpenPGP -// systems and need a maintained package, consider a community fork. -// See https://golang.org/issue/44226. -package s2k // import "golang.org/x/crypto/openpgp/s2k" - -import ( - "crypto" - "hash" - "io" - "strconv" - - "golang.org/x/crypto/openpgp/errors" -) - -// Config collects configuration parameters for s2k key-stretching -// transformatioms. A nil *Config is valid and results in all default -// values. Currently, Config is used only by the Serialize function in -// this package. -type Config struct { - // Hash is the default hash function to be used. If - // nil, SHA1 is used. - Hash crypto.Hash - // S2KCount is only used for symmetric encryption. It - // determines the strength of the passphrase stretching when - // the said passphrase is hashed to produce a key. S2KCount - // should be between 1024 and 65011712, inclusive. If Config - // is nil or S2KCount is 0, the value 65536 used. Not all - // values in the above range can be represented. S2KCount will - // be rounded up to the next representable value if it cannot - // be encoded exactly. When set, it is strongly encrouraged to - // use a value that is at least 65536. See RFC 4880 Section - // 3.7.1.3. - S2KCount int -} - -func (c *Config) hash() crypto.Hash { - if c == nil || uint(c.Hash) == 0 { - // SHA1 is the historical default in this package. - return crypto.SHA1 - } - - return c.Hash -} - -func (c *Config) encodedCount() uint8 { - if c == nil || c.S2KCount == 0 { - return 96 // The common case. Correspoding to 65536 - } - - i := c.S2KCount - switch { - // Behave like GPG. Should we make 65536 the lowest value used? - case i < 1024: - i = 1024 - case i > 65011712: - i = 65011712 - } - - return encodeCount(i) -} - -// encodeCount converts an iterative "count" in the range 1024 to -// 65011712, inclusive, to an encoded count. The return value is the -// octet that is actually stored in the GPG file. encodeCount panics -// if i is not in the above range (encodedCount above takes care to -// pass i in the correct range). See RFC 4880 Section 3.7.7.1. -func encodeCount(i int) uint8 { - if i < 1024 || i > 65011712 { - panic("count arg i outside the required range") - } - - for encoded := 0; encoded < 256; encoded++ { - count := decodeCount(uint8(encoded)) - if count >= i { - return uint8(encoded) - } - } - - return 255 -} - -// decodeCount returns the s2k mode 3 iterative "count" corresponding to -// the encoded octet c. -func decodeCount(c uint8) int { - return (16 + int(c&15)) << (uint32(c>>4) + 6) -} - -// Simple writes to out the result of computing the Simple S2K function (RFC -// 4880, section 3.7.1.1) using the given hash and input passphrase. -func Simple(out []byte, h hash.Hash, in []byte) { - Salted(out, h, in, nil) -} - -var zero [1]byte - -// Salted writes to out the result of computing the Salted S2K function (RFC -// 4880, section 3.7.1.2) using the given hash, input passphrase and salt. -func Salted(out []byte, h hash.Hash, in []byte, salt []byte) { - done := 0 - var digest []byte - - for i := 0; done < len(out); i++ { - h.Reset() - for j := 0; j < i; j++ { - h.Write(zero[:]) - } - h.Write(salt) - h.Write(in) - digest = h.Sum(digest[:0]) - n := copy(out[done:], digest) - done += n - } -} - -// Iterated writes to out the result of computing the Iterated and Salted S2K -// function (RFC 4880, section 3.7.1.3) using the given hash, input passphrase, -// salt and iteration count. -func Iterated(out []byte, h hash.Hash, in []byte, salt []byte, count int) { - combined := make([]byte, len(in)+len(salt)) - copy(combined, salt) - copy(combined[len(salt):], in) - - if count < len(combined) { - count = len(combined) - } - - done := 0 - var digest []byte - for i := 0; done < len(out); i++ { - h.Reset() - for j := 0; j < i; j++ { - h.Write(zero[:]) - } - written := 0 - for written < count { - if written+len(combined) > count { - todo := count - written - h.Write(combined[:todo]) - written = count - } else { - h.Write(combined) - written += len(combined) - } - } - digest = h.Sum(digest[:0]) - n := copy(out[done:], digest) - done += n - } -} - -// Parse reads a binary specification for a string-to-key transformation from r -// and returns a function which performs that transform. -func Parse(r io.Reader) (f func(out, in []byte), err error) { - var buf [9]byte - - _, err = io.ReadFull(r, buf[:2]) - if err != nil { - return - } - - hash, ok := HashIdToHash(buf[1]) - if !ok { - return nil, errors.UnsupportedError("hash for S2K function: " + strconv.Itoa(int(buf[1]))) - } - if !hash.Available() { - return nil, errors.UnsupportedError("hash not available: " + strconv.Itoa(int(hash))) - } - h := hash.New() - - switch buf[0] { - case 0: - f := func(out, in []byte) { - Simple(out, h, in) - } - return f, nil - case 1: - _, err = io.ReadFull(r, buf[:8]) - if err != nil { - return - } - f := func(out, in []byte) { - Salted(out, h, in, buf[:8]) - } - return f, nil - case 3: - _, err = io.ReadFull(r, buf[:9]) - if err != nil { - return - } - count := decodeCount(buf[8]) - f := func(out, in []byte) { - Iterated(out, h, in, buf[:8], count) - } - return f, nil - } - - return nil, errors.UnsupportedError("S2K function") -} - -// Serialize salts and stretches the given passphrase and writes the -// resulting key into key. It also serializes an S2K descriptor to -// w. The key stretching can be configured with c, which may be -// nil. In that case, sensible defaults will be used. -func Serialize(w io.Writer, key []byte, rand io.Reader, passphrase []byte, c *Config) error { - var buf [11]byte - buf[0] = 3 /* iterated and salted */ - buf[1], _ = HashToHashId(c.hash()) - salt := buf[2:10] - if _, err := io.ReadFull(rand, salt); err != nil { - return err - } - encodedCount := c.encodedCount() - count := decodeCount(encodedCount) - buf[10] = encodedCount - if _, err := w.Write(buf[:]); err != nil { - return err - } - - Iterated(key, c.hash().New(), passphrase, salt, count) - return nil -} - -// hashToHashIdMapping contains pairs relating OpenPGP's hash identifier with -// Go's crypto.Hash type. See RFC 4880, section 9.4. -var hashToHashIdMapping = []struct { - id byte - hash crypto.Hash - name string -}{ - {1, crypto.MD5, "MD5"}, - {2, crypto.SHA1, "SHA1"}, - {3, crypto.RIPEMD160, "RIPEMD160"}, - {8, crypto.SHA256, "SHA256"}, - {9, crypto.SHA384, "SHA384"}, - {10, crypto.SHA512, "SHA512"}, - {11, crypto.SHA224, "SHA224"}, -} - -// HashIdToHash returns a crypto.Hash which corresponds to the given OpenPGP -// hash id. -func HashIdToHash(id byte) (h crypto.Hash, ok bool) { - for _, m := range hashToHashIdMapping { - if m.id == id { - return m.hash, true - } - } - return 0, false -} - -// HashIdToString returns the name of the hash function corresponding to the -// given OpenPGP hash id. -func HashIdToString(id byte) (name string, ok bool) { - for _, m := range hashToHashIdMapping { - if m.id == id { - return m.name, true - } - } - - return "", false -} - -// HashToHashId returns an OpenPGP hash id which corresponds the given Hash. -func HashToHashId(h crypto.Hash) (id byte, ok bool) { - for _, m := range hashToHashIdMapping { - if m.hash == h { - return m.id, true - } - } - return 0, false -} diff --git a/src/vendor/golang.org/x/crypto/openpgp/write.go b/src/vendor/golang.org/x/crypto/openpgp/write.go deleted file mode 100644 index b89d48b81..000000000 --- a/src/vendor/golang.org/x/crypto/openpgp/write.go +++ /dev/null @@ -1,418 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package openpgp - -import ( - "crypto" - "hash" - "io" - "strconv" - "time" - - "golang.org/x/crypto/openpgp/armor" - "golang.org/x/crypto/openpgp/errors" - "golang.org/x/crypto/openpgp/packet" - "golang.org/x/crypto/openpgp/s2k" -) - -// DetachSign signs message with the private key from signer (which must -// already have been decrypted) and writes the signature to w. -// If config is nil, sensible defaults will be used. -func DetachSign(w io.Writer, signer *Entity, message io.Reader, config *packet.Config) error { - return detachSign(w, signer, message, packet.SigTypeBinary, config) -} - -// ArmoredDetachSign signs message with the private key from signer (which -// must already have been decrypted) and writes an armored signature to w. -// If config is nil, sensible defaults will be used. -func ArmoredDetachSign(w io.Writer, signer *Entity, message io.Reader, config *packet.Config) (err error) { - return armoredDetachSign(w, signer, message, packet.SigTypeBinary, config) -} - -// DetachSignText signs message (after canonicalising the line endings) with -// the private key from signer (which must already have been decrypted) and -// writes the signature to w. -// If config is nil, sensible defaults will be used. -func DetachSignText(w io.Writer, signer *Entity, message io.Reader, config *packet.Config) error { - return detachSign(w, signer, message, packet.SigTypeText, config) -} - -// ArmoredDetachSignText signs message (after canonicalising the line endings) -// with the private key from signer (which must already have been decrypted) -// and writes an armored signature to w. -// If config is nil, sensible defaults will be used. -func ArmoredDetachSignText(w io.Writer, signer *Entity, message io.Reader, config *packet.Config) error { - return armoredDetachSign(w, signer, message, packet.SigTypeText, config) -} - -func armoredDetachSign(w io.Writer, signer *Entity, message io.Reader, sigType packet.SignatureType, config *packet.Config) (err error) { - out, err := armor.Encode(w, SignatureType, nil) - if err != nil { - return - } - err = detachSign(out, signer, message, sigType, config) - if err != nil { - return - } - return out.Close() -} - -func detachSign(w io.Writer, signer *Entity, message io.Reader, sigType packet.SignatureType, config *packet.Config) (err error) { - if signer.PrivateKey == nil { - return errors.InvalidArgumentError("signing key doesn't have a private key") - } - if signer.PrivateKey.Encrypted { - return errors.InvalidArgumentError("signing key is encrypted") - } - - sig := new(packet.Signature) - sig.SigType = sigType - sig.PubKeyAlgo = signer.PrivateKey.PubKeyAlgo - sig.Hash = config.Hash() - sig.CreationTime = config.Now() - sig.IssuerKeyId = &signer.PrivateKey.KeyId - - h, wrappedHash, err := hashForSignature(sig.Hash, sig.SigType) - if err != nil { - return - } - io.Copy(wrappedHash, message) - - err = sig.Sign(h, signer.PrivateKey, config) - if err != nil { - return - } - - return sig.Serialize(w) -} - -// FileHints contains metadata about encrypted files. This metadata is, itself, -// encrypted. -type FileHints struct { - // IsBinary can be set to hint that the contents are binary data. - IsBinary bool - // FileName hints at the name of the file that should be written. It's - // truncated to 255 bytes if longer. It may be empty to suggest that the - // file should not be written to disk. It may be equal to "_CONSOLE" to - // suggest the data should not be written to disk. - FileName string - // ModTime contains the modification time of the file, or the zero time if not applicable. - ModTime time.Time -} - -// SymmetricallyEncrypt acts like gpg -c: it encrypts a file with a passphrase. -// The resulting WriteCloser must be closed after the contents of the file have -// been written. -// If config is nil, sensible defaults will be used. -func SymmetricallyEncrypt(ciphertext io.Writer, passphrase []byte, hints *FileHints, config *packet.Config) (plaintext io.WriteCloser, err error) { - if hints == nil { - hints = &FileHints{} - } - - key, err := packet.SerializeSymmetricKeyEncrypted(ciphertext, passphrase, config) - if err != nil { - return - } - w, err := packet.SerializeSymmetricallyEncrypted(ciphertext, config.Cipher(), key, config) - if err != nil { - return - } - - literaldata := w - if algo := config.Compression(); algo != packet.CompressionNone { - var compConfig *packet.CompressionConfig - if config != nil { - compConfig = config.CompressionConfig - } - literaldata, err = packet.SerializeCompressed(w, algo, compConfig) - if err != nil { - return - } - } - - var epochSeconds uint32 - if !hints.ModTime.IsZero() { - epochSeconds = uint32(hints.ModTime.Unix()) - } - return packet.SerializeLiteral(literaldata, hints.IsBinary, hints.FileName, epochSeconds) -} - -// intersectPreferences mutates and returns a prefix of a that contains only -// the values in the intersection of a and b. The order of a is preserved. -func intersectPreferences(a []uint8, b []uint8) (intersection []uint8) { - var j int - for _, v := range a { - for _, v2 := range b { - if v == v2 { - a[j] = v - j++ - break - } - } - } - - return a[:j] -} - -func hashToHashId(h crypto.Hash) uint8 { - v, ok := s2k.HashToHashId(h) - if !ok { - panic("tried to convert unknown hash") - } - return v -} - -// writeAndSign writes the data as a payload package and, optionally, signs -// it. hints contains optional information, that is also encrypted, -// that aids the recipients in processing the message. The resulting -// WriteCloser must be closed after the contents of the file have been -// written. If config is nil, sensible defaults will be used. -func writeAndSign(payload io.WriteCloser, candidateHashes []uint8, signed *Entity, hints *FileHints, config *packet.Config) (plaintext io.WriteCloser, err error) { - var signer *packet.PrivateKey - if signed != nil { - signKey, ok := signed.signingKey(config.Now()) - if !ok { - return nil, errors.InvalidArgumentError("no valid signing keys") - } - signer = signKey.PrivateKey - if signer == nil { - return nil, errors.InvalidArgumentError("no private key in signing key") - } - if signer.Encrypted { - return nil, errors.InvalidArgumentError("signing key must be decrypted") - } - } - - var hash crypto.Hash - for _, hashId := range candidateHashes { - if h, ok := s2k.HashIdToHash(hashId); ok && h.Available() { - hash = h - break - } - } - - // If the hash specified by config is a candidate, we'll use that. - if configuredHash := config.Hash(); configuredHash.Available() { - for _, hashId := range candidateHashes { - if h, ok := s2k.HashIdToHash(hashId); ok && h == configuredHash { - hash = h - break - } - } - } - - if hash == 0 { - hashId := candidateHashes[0] - name, ok := s2k.HashIdToString(hashId) - if !ok { - name = "#" + strconv.Itoa(int(hashId)) - } - return nil, errors.InvalidArgumentError("cannot encrypt because no candidate hash functions are compiled in. (Wanted " + name + " in this case.)") - } - - if signer != nil { - ops := &packet.OnePassSignature{ - SigType: packet.SigTypeBinary, - Hash: hash, - PubKeyAlgo: signer.PubKeyAlgo, - KeyId: signer.KeyId, - IsLast: true, - } - if err := ops.Serialize(payload); err != nil { - return nil, err - } - } - - if hints == nil { - hints = &FileHints{} - } - - w := payload - if signer != nil { - // If we need to write a signature packet after the literal - // data then we need to stop literalData from closing - // encryptedData. - w = noOpCloser{w} - - } - var epochSeconds uint32 - if !hints.ModTime.IsZero() { - epochSeconds = uint32(hints.ModTime.Unix()) - } - literalData, err := packet.SerializeLiteral(w, hints.IsBinary, hints.FileName, epochSeconds) - if err != nil { - return nil, err - } - - if signer != nil { - return signatureWriter{payload, literalData, hash, hash.New(), signer, config}, nil - } - return literalData, nil -} - -// Encrypt encrypts a message to a number of recipients and, optionally, signs -// it. hints contains optional information, that is also encrypted, that aids -// the recipients in processing the message. The resulting WriteCloser must -// be closed after the contents of the file have been written. -// If config is nil, sensible defaults will be used. -func Encrypt(ciphertext io.Writer, to []*Entity, signed *Entity, hints *FileHints, config *packet.Config) (plaintext io.WriteCloser, err error) { - if len(to) == 0 { - return nil, errors.InvalidArgumentError("no encryption recipient provided") - } - - // These are the possible ciphers that we'll use for the message. - candidateCiphers := []uint8{ - uint8(packet.CipherAES128), - uint8(packet.CipherAES256), - uint8(packet.CipherCAST5), - } - // These are the possible hash functions that we'll use for the signature. - candidateHashes := []uint8{ - hashToHashId(crypto.SHA256), - hashToHashId(crypto.SHA384), - hashToHashId(crypto.SHA512), - hashToHashId(crypto.SHA1), - hashToHashId(crypto.RIPEMD160), - } - // In the event that a recipient doesn't specify any supported ciphers - // or hash functions, these are the ones that we assume that every - // implementation supports. - defaultCiphers := candidateCiphers[len(candidateCiphers)-1:] - defaultHashes := candidateHashes[len(candidateHashes)-1:] - - encryptKeys := make([]Key, len(to)) - for i := range to { - var ok bool - encryptKeys[i], ok = to[i].encryptionKey(config.Now()) - if !ok { - return nil, errors.InvalidArgumentError("cannot encrypt a message to key id " + strconv.FormatUint(to[i].PrimaryKey.KeyId, 16) + " because it has no encryption keys") - } - - sig := to[i].primaryIdentity().SelfSignature - - preferredSymmetric := sig.PreferredSymmetric - if len(preferredSymmetric) == 0 { - preferredSymmetric = defaultCiphers - } - preferredHashes := sig.PreferredHash - if len(preferredHashes) == 0 { - preferredHashes = defaultHashes - } - candidateCiphers = intersectPreferences(candidateCiphers, preferredSymmetric) - candidateHashes = intersectPreferences(candidateHashes, preferredHashes) - } - - if len(candidateCiphers) == 0 || len(candidateHashes) == 0 { - return nil, errors.InvalidArgumentError("cannot encrypt because recipient set shares no common algorithms") - } - - cipher := packet.CipherFunction(candidateCiphers[0]) - // If the cipher specified by config is a candidate, we'll use that. - configuredCipher := config.Cipher() - for _, c := range candidateCiphers { - cipherFunc := packet.CipherFunction(c) - if cipherFunc == configuredCipher { - cipher = cipherFunc - break - } - } - - symKey := make([]byte, cipher.KeySize()) - if _, err := io.ReadFull(config.Random(), symKey); err != nil { - return nil, err - } - - for _, key := range encryptKeys { - if err := packet.SerializeEncryptedKey(ciphertext, key.PublicKey, cipher, symKey, config); err != nil { - return nil, err - } - } - - payload, err := packet.SerializeSymmetricallyEncrypted(ciphertext, cipher, symKey, config) - if err != nil { - return - } - - return writeAndSign(payload, candidateHashes, signed, hints, config) -} - -// Sign signs a message. The resulting WriteCloser must be closed after the -// contents of the file have been written. hints contains optional information -// that aids the recipients in processing the message. -// If config is nil, sensible defaults will be used. -func Sign(output io.Writer, signed *Entity, hints *FileHints, config *packet.Config) (input io.WriteCloser, err error) { - if signed == nil { - return nil, errors.InvalidArgumentError("no signer provided") - } - - // These are the possible hash functions that we'll use for the signature. - candidateHashes := []uint8{ - hashToHashId(crypto.SHA256), - hashToHashId(crypto.SHA384), - hashToHashId(crypto.SHA512), - hashToHashId(crypto.SHA1), - hashToHashId(crypto.RIPEMD160), - } - defaultHashes := candidateHashes[len(candidateHashes)-1:] - preferredHashes := signed.primaryIdentity().SelfSignature.PreferredHash - if len(preferredHashes) == 0 { - preferredHashes = defaultHashes - } - candidateHashes = intersectPreferences(candidateHashes, preferredHashes) - return writeAndSign(noOpCloser{output}, candidateHashes, signed, hints, config) -} - -// signatureWriter hashes the contents of a message while passing it along to -// literalData. When closed, it closes literalData, writes a signature packet -// to encryptedData and then also closes encryptedData. -type signatureWriter struct { - encryptedData io.WriteCloser - literalData io.WriteCloser - hashType crypto.Hash - h hash.Hash - signer *packet.PrivateKey - config *packet.Config -} - -func (s signatureWriter) Write(data []byte) (int, error) { - s.h.Write(data) - return s.literalData.Write(data) -} - -func (s signatureWriter) Close() error { - sig := &packet.Signature{ - SigType: packet.SigTypeBinary, - PubKeyAlgo: s.signer.PubKeyAlgo, - Hash: s.hashType, - CreationTime: s.config.Now(), - IssuerKeyId: &s.signer.KeyId, - } - - if err := sig.Sign(s.h, s.signer, s.config); err != nil { - return err - } - if err := s.literalData.Close(); err != nil { - return err - } - if err := sig.Serialize(s.encryptedData); err != nil { - return err - } - return s.encryptedData.Close() -} - -// noOpCloser is like an io.NopCloser, but for an io.Writer. -// TODO: we have two of these in OpenPGP packages alone. This probably needs -// to be promoted somewhere more common. -type noOpCloser struct { - w io.Writer -} - -func (c noOpCloser) Write(data []byte) (n int, err error) { - return c.w.Write(data) -} - -func (c noOpCloser) Close() error { - return nil -} diff --git a/src/vendor/golang.org/x/sync/LICENSE b/src/vendor/golang.org/x/sync/LICENSE deleted file mode 100644 index 6a66aea5e..000000000 --- a/src/vendor/golang.org/x/sync/LICENSE +++ /dev/null @@ -1,27 +0,0 @@ -Copyright (c) 2009 The Go Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/src/vendor/golang.org/x/sync/PATENTS b/src/vendor/golang.org/x/sync/PATENTS deleted file mode 100644 index 733099041..000000000 --- a/src/vendor/golang.org/x/sync/PATENTS +++ /dev/null @@ -1,22 +0,0 @@ -Additional IP Rights Grant (Patents) - -"This implementation" means the copyrightable works distributed by -Google as part of the Go project. - -Google hereby grants to You a perpetual, worldwide, non-exclusive, -no-charge, royalty-free, irrevocable (except as stated in this section) -patent license to make, have made, use, offer to sell, sell, import, -transfer and otherwise run, modify and propagate the contents of this -implementation of Go, where such license applies only to those patent -claims, both currently owned or controlled by Google and acquired in -the future, licensable by Google that are necessarily infringed by this -implementation of Go. This grant does not include claims that would be -infringed only as a consequence of further modification of this -implementation. If you or your agent or exclusive licensee institute or -order or agree to the institution of patent litigation against any -entity (including a cross-claim or counterclaim in a lawsuit) alleging -that this implementation of Go or any code incorporated within this -implementation of Go constitutes direct or contributory patent -infringement, or inducement of patent infringement, then any patent -rights granted to you under this License for this implementation of Go -shall terminate as of the date such litigation is filed. diff --git a/src/vendor/golang.org/x/sync/errgroup/errgroup.go b/src/vendor/golang.org/x/sync/errgroup/errgroup.go deleted file mode 100644 index 4c0850a45..000000000 --- a/src/vendor/golang.org/x/sync/errgroup/errgroup.go +++ /dev/null @@ -1,132 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package errgroup provides synchronization, error propagation, and Context -// cancelation for groups of goroutines working on subtasks of a common task. -package errgroup - -import ( - "context" - "fmt" - "sync" -) - -type token struct{} - -// A Group is a collection of goroutines working on subtasks that are part of -// the same overall task. -// -// A zero Group is valid, has no limit on the number of active goroutines, -// and does not cancel on error. -type Group struct { - cancel func() - - wg sync.WaitGroup - - sem chan token - - errOnce sync.Once - err error -} - -func (g *Group) done() { - if g.sem != nil { - <-g.sem - } - g.wg.Done() -} - -// WithContext returns a new Group and an associated Context derived from ctx. -// -// The derived Context is canceled the first time a function passed to Go -// returns a non-nil error or the first time Wait returns, whichever occurs -// first. -func WithContext(ctx context.Context) (*Group, context.Context) { - ctx, cancel := context.WithCancel(ctx) - return &Group{cancel: cancel}, ctx -} - -// Wait blocks until all function calls from the Go method have returned, then -// returns the first non-nil error (if any) from them. -func (g *Group) Wait() error { - g.wg.Wait() - if g.cancel != nil { - g.cancel() - } - return g.err -} - -// Go calls the given function in a new goroutine. -// It blocks until the new goroutine can be added without the number of -// active goroutines in the group exceeding the configured limit. -// -// The first call to return a non-nil error cancels the group; its error will be -// returned by Wait. -func (g *Group) Go(f func() error) { - if g.sem != nil { - g.sem <- token{} - } - - g.wg.Add(1) - go func() { - defer g.done() - - if err := f(); err != nil { - g.errOnce.Do(func() { - g.err = err - if g.cancel != nil { - g.cancel() - } - }) - } - }() -} - -// TryGo calls the given function in a new goroutine only if the number of -// active goroutines in the group is currently below the configured limit. -// -// The return value reports whether the goroutine was started. -func (g *Group) TryGo(f func() error) bool { - if g.sem != nil { - select { - case g.sem <- token{}: - // Note: this allows barging iff channels in general allow barging. - default: - return false - } - } - - g.wg.Add(1) - go func() { - defer g.done() - - if err := f(); err != nil { - g.errOnce.Do(func() { - g.err = err - if g.cancel != nil { - g.cancel() - } - }) - } - }() - return true -} - -// SetLimit limits the number of active goroutines in this group to at most n. -// A negative value indicates no limit. -// -// Any subsequent call to the Go method will block until it can add an active -// goroutine without exceeding the configured limit. -// -// The limit must not be modified while any goroutines in the group are active. -func (g *Group) SetLimit(n int) { - if n < 0 { - g.sem = nil - return - } - if len(g.sem) != 0 { - panic(fmt.Errorf("errgroup: modify limit while %v goroutines in the group are still active", len(g.sem))) - } - g.sem = make(chan token, n) -} diff --git a/src/vendor/golang.org/x/sync/semaphore/semaphore.go b/src/vendor/golang.org/x/sync/semaphore/semaphore.go deleted file mode 100644 index 30f632c57..000000000 --- a/src/vendor/golang.org/x/sync/semaphore/semaphore.go +++ /dev/null @@ -1,136 +0,0 @@ -// Copyright 2017 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package semaphore provides a weighted semaphore implementation. -package semaphore // import "golang.org/x/sync/semaphore" - -import ( - "container/list" - "context" - "sync" -) - -type waiter struct { - n int64 - ready chan<- struct{} // Closed when semaphore acquired. -} - -// NewWeighted creates a new weighted semaphore with the given -// maximum combined weight for concurrent access. -func NewWeighted(n int64) *Weighted { - w := &Weighted{size: n} - return w -} - -// Weighted provides a way to bound concurrent access to a resource. -// The callers can request access with a given weight. -type Weighted struct { - size int64 - cur int64 - mu sync.Mutex - waiters list.List -} - -// Acquire acquires the semaphore with a weight of n, blocking until resources -// are available or ctx is done. On success, returns nil. On failure, returns -// ctx.Err() and leaves the semaphore unchanged. -// -// If ctx is already done, Acquire may still succeed without blocking. -func (s *Weighted) Acquire(ctx context.Context, n int64) error { - s.mu.Lock() - if s.size-s.cur >= n && s.waiters.Len() == 0 { - s.cur += n - s.mu.Unlock() - return nil - } - - if n > s.size { - // Don't make other Acquire calls block on one that's doomed to fail. - s.mu.Unlock() - <-ctx.Done() - return ctx.Err() - } - - ready := make(chan struct{}) - w := waiter{n: n, ready: ready} - elem := s.waiters.PushBack(w) - s.mu.Unlock() - - select { - case <-ctx.Done(): - err := ctx.Err() - s.mu.Lock() - select { - case <-ready: - // Acquired the semaphore after we were canceled. Rather than trying to - // fix up the queue, just pretend we didn't notice the cancelation. - err = nil - default: - isFront := s.waiters.Front() == elem - s.waiters.Remove(elem) - // If we're at the front and there're extra tokens left, notify other waiters. - if isFront && s.size > s.cur { - s.notifyWaiters() - } - } - s.mu.Unlock() - return err - - case <-ready: - return nil - } -} - -// TryAcquire acquires the semaphore with a weight of n without blocking. -// On success, returns true. On failure, returns false and leaves the semaphore unchanged. -func (s *Weighted) TryAcquire(n int64) bool { - s.mu.Lock() - success := s.size-s.cur >= n && s.waiters.Len() == 0 - if success { - s.cur += n - } - s.mu.Unlock() - return success -} - -// Release releases the semaphore with a weight of n. -func (s *Weighted) Release(n int64) { - s.mu.Lock() - s.cur -= n - if s.cur < 0 { - s.mu.Unlock() - panic("semaphore: released more than held") - } - s.notifyWaiters() - s.mu.Unlock() -} - -func (s *Weighted) notifyWaiters() { - for { - next := s.waiters.Front() - if next == nil { - break // No more waiters blocked. - } - - w := next.Value.(waiter) - if s.size-s.cur < w.n { - // Not enough tokens for the next waiter. We could keep going (to try to - // find a waiter with a smaller request), but under load that could cause - // starvation for large requests; instead, we leave all remaining waiters - // blocked. - // - // Consider a semaphore used as a read-write lock, with N tokens, N - // readers, and one writer. Each reader can Acquire(1) to obtain a read - // lock. The writer can Acquire(N) to obtain a write lock, excluding all - // of the readers. If we allow the readers to jump ahead in the queue, - // the writer will starve — there is always one token available for every - // reader. - break - } - - s.cur += w.n - s.waiters.Remove(next) - close(w.ready) - } -} diff --git a/src/vendor/golang.org/x/sys/execabs/execabs.go b/src/vendor/golang.org/x/sys/execabs/execabs.go deleted file mode 100644 index b981cfbb4..000000000 --- a/src/vendor/golang.org/x/sys/execabs/execabs.go +++ /dev/null @@ -1,102 +0,0 @@ -// Copyright 2020 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package execabs is a drop-in replacement for os/exec -// that requires PATH lookups to find absolute paths. -// That is, execabs.Command("cmd") runs the same PATH lookup -// as exec.Command("cmd"), but if the result is a path -// which is relative, the Run and Start methods will report -// an error instead of running the executable. -// -// See https://blog.golang.org/path-security for more information -// about when it may be necessary or appropriate to use this package. -package execabs - -import ( - "context" - "fmt" - "os/exec" - "path/filepath" - "reflect" - "unsafe" -) - -// ErrNotFound is the error resulting if a path search failed to find an executable file. -// It is an alias for exec.ErrNotFound. -var ErrNotFound = exec.ErrNotFound - -// Cmd represents an external command being prepared or run. -// It is an alias for exec.Cmd. -type Cmd = exec.Cmd - -// Error is returned by LookPath when it fails to classify a file as an executable. -// It is an alias for exec.Error. -type Error = exec.Error - -// An ExitError reports an unsuccessful exit by a command. -// It is an alias for exec.ExitError. -type ExitError = exec.ExitError - -func relError(file, path string) error { - return fmt.Errorf("%s resolves to executable in current directory (.%c%s)", file, filepath.Separator, path) -} - -// LookPath searches for an executable named file in the directories -// named by the PATH environment variable. If file contains a slash, -// it is tried directly and the PATH is not consulted. The result will be -// an absolute path. -// -// LookPath differs from exec.LookPath in its handling of PATH lookups, -// which are used for file names without slashes. If exec.LookPath's -// PATH lookup would have returned an executable from the current directory, -// LookPath instead returns an error. -func LookPath(file string) (string, error) { - path, err := exec.LookPath(file) - if err != nil && !isGo119ErrDot(err) { - return "", err - } - if filepath.Base(file) == file && !filepath.IsAbs(path) { - return "", relError(file, path) - } - return path, nil -} - -func fixCmd(name string, cmd *exec.Cmd) { - if filepath.Base(name) == name && !filepath.IsAbs(cmd.Path) { - // exec.Command was called with a bare binary name and - // exec.LookPath returned a path which is not absolute. - // Set cmd.lookPathErr and clear cmd.Path so that it - // cannot be run. - lookPathErr := (*error)(unsafe.Pointer(reflect.ValueOf(cmd).Elem().FieldByName("lookPathErr").Addr().Pointer())) - if *lookPathErr == nil { - *lookPathErr = relError(name, cmd.Path) - } - cmd.Path = "" - } -} - -// CommandContext is like Command but includes a context. -// -// The provided context is used to kill the process (by calling os.Process.Kill) -// if the context becomes done before the command completes on its own. -func CommandContext(ctx context.Context, name string, arg ...string) *exec.Cmd { - cmd := exec.CommandContext(ctx, name, arg...) - fixCmd(name, cmd) - return cmd - -} - -// Command returns the Cmd struct to execute the named program with the given arguments. -// See exec.Command for most details. -// -// Command differs from exec.Command in its handling of PATH lookups, -// which are used when the program name contains no slashes. -// If exec.Command would have returned an exec.Cmd configured to run an -// executable from the current directory, Command instead -// returns an exec.Cmd that will return an error from Start or Run. -func Command(name string, arg ...string) *exec.Cmd { - cmd := exec.Command(name, arg...) - fixCmd(name, cmd) - return cmd -} diff --git a/src/vendor/golang.org/x/sys/execabs/execabs_go118.go b/src/vendor/golang.org/x/sys/execabs/execabs_go118.go deleted file mode 100644 index 6ab5f5089..000000000 --- a/src/vendor/golang.org/x/sys/execabs/execabs_go118.go +++ /dev/null @@ -1,12 +0,0 @@ -// Copyright 2022 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !go1.19 -// +build !go1.19 - -package execabs - -func isGo119ErrDot(err error) bool { - return false -} diff --git a/src/vendor/golang.org/x/sys/execabs/execabs_go119.go b/src/vendor/golang.org/x/sys/execabs/execabs_go119.go deleted file mode 100644 index 1e7a9ada0..000000000 --- a/src/vendor/golang.org/x/sys/execabs/execabs_go119.go +++ /dev/null @@ -1,15 +0,0 @@ -// Copyright 2022 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build go1.19 -// +build go1.19 - -package execabs - -import "strings" - -func isGo119ErrDot(err error) bool { - // TODO: return errors.Is(err, exec.ErrDot) - return strings.Contains(err.Error(), "current directory") -} diff --git a/src/vendor/golang.org/x/text/encoding/encoding.go b/src/vendor/golang.org/x/text/encoding/encoding.go deleted file mode 100644 index a0bd7cd4d..000000000 --- a/src/vendor/golang.org/x/text/encoding/encoding.go +++ /dev/null @@ -1,335 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package encoding defines an interface for character encodings, such as Shift -// JIS and Windows 1252, that can convert to and from UTF-8. -// -// Encoding implementations are provided in other packages, such as -// golang.org/x/text/encoding/charmap and -// golang.org/x/text/encoding/japanese. -package encoding // import "golang.org/x/text/encoding" - -import ( - "errors" - "io" - "strconv" - "unicode/utf8" - - "golang.org/x/text/encoding/internal/identifier" - "golang.org/x/text/transform" -) - -// TODO: -// - There seems to be some inconsistency in when decoders return errors -// and when not. Also documentation seems to suggest they shouldn't return -// errors at all (except for UTF-16). -// - Encoders seem to rely on or at least benefit from the input being in NFC -// normal form. Perhaps add an example how users could prepare their output. - -// Encoding is a character set encoding that can be transformed to and from -// UTF-8. -type Encoding interface { - // NewDecoder returns a Decoder. - NewDecoder() *Decoder - - // NewEncoder returns an Encoder. - NewEncoder() *Encoder -} - -// A Decoder converts bytes to UTF-8. It implements transform.Transformer. -// -// Transforming source bytes that are not of that encoding will not result in an -// error per se. Each byte that cannot be transcoded will be represented in the -// output by the UTF-8 encoding of '\uFFFD', the replacement rune. -type Decoder struct { - transform.Transformer - - // This forces external creators of Decoders to use names in struct - // initializers, allowing for future extendibility without having to break - // code. - _ struct{} -} - -// Bytes converts the given encoded bytes to UTF-8. It returns the converted -// bytes or nil, err if any error occurred. -func (d *Decoder) Bytes(b []byte) ([]byte, error) { - b, _, err := transform.Bytes(d, b) - if err != nil { - return nil, err - } - return b, nil -} - -// String converts the given encoded string to UTF-8. It returns the converted -// string or "", err if any error occurred. -func (d *Decoder) String(s string) (string, error) { - s, _, err := transform.String(d, s) - if err != nil { - return "", err - } - return s, nil -} - -// Reader wraps another Reader to decode its bytes. -// -// The Decoder may not be used for any other operation as long as the returned -// Reader is in use. -func (d *Decoder) Reader(r io.Reader) io.Reader { - return transform.NewReader(r, d) -} - -// An Encoder converts bytes from UTF-8. It implements transform.Transformer. -// -// Each rune that cannot be transcoded will result in an error. In this case, -// the transform will consume all source byte up to, not including the offending -// rune. Transforming source bytes that are not valid UTF-8 will be replaced by -// `\uFFFD`. To return early with an error instead, use transform.Chain to -// preprocess the data with a UTF8Validator. -type Encoder struct { - transform.Transformer - - // This forces external creators of Encoders to use names in struct - // initializers, allowing for future extendibility without having to break - // code. - _ struct{} -} - -// Bytes converts bytes from UTF-8. It returns the converted bytes or nil, err if -// any error occurred. -func (e *Encoder) Bytes(b []byte) ([]byte, error) { - b, _, err := transform.Bytes(e, b) - if err != nil { - return nil, err - } - return b, nil -} - -// String converts a string from UTF-8. It returns the converted string or -// "", err if any error occurred. -func (e *Encoder) String(s string) (string, error) { - s, _, err := transform.String(e, s) - if err != nil { - return "", err - } - return s, nil -} - -// Writer wraps another Writer to encode its UTF-8 output. -// -// The Encoder may not be used for any other operation as long as the returned -// Writer is in use. -func (e *Encoder) Writer(w io.Writer) io.Writer { - return transform.NewWriter(w, e) -} - -// ASCIISub is the ASCII substitute character, as recommended by -// https://unicode.org/reports/tr36/#Text_Comparison -const ASCIISub = '\x1a' - -// Nop is the nop encoding. Its transformed bytes are the same as the source -// bytes; it does not replace invalid UTF-8 sequences. -var Nop Encoding = nop{} - -type nop struct{} - -func (nop) NewDecoder() *Decoder { - return &Decoder{Transformer: transform.Nop} -} -func (nop) NewEncoder() *Encoder { - return &Encoder{Transformer: transform.Nop} -} - -// Replacement is the replacement encoding. Decoding from the replacement -// encoding yields a single '\uFFFD' replacement rune. Encoding from UTF-8 to -// the replacement encoding yields the same as the source bytes except that -// invalid UTF-8 is converted to '\uFFFD'. -// -// It is defined at http://encoding.spec.whatwg.org/#replacement -var Replacement Encoding = replacement{} - -type replacement struct{} - -func (replacement) NewDecoder() *Decoder { - return &Decoder{Transformer: replacementDecoder{}} -} - -func (replacement) NewEncoder() *Encoder { - return &Encoder{Transformer: replacementEncoder{}} -} - -func (replacement) ID() (mib identifier.MIB, other string) { - return identifier.Replacement, "" -} - -type replacementDecoder struct{ transform.NopResetter } - -func (replacementDecoder) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) { - if len(dst) < 3 { - return 0, 0, transform.ErrShortDst - } - if atEOF { - const fffd = "\ufffd" - dst[0] = fffd[0] - dst[1] = fffd[1] - dst[2] = fffd[2] - nDst = 3 - } - return nDst, len(src), nil -} - -type replacementEncoder struct{ transform.NopResetter } - -func (replacementEncoder) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) { - r, size := rune(0), 0 - - for ; nSrc < len(src); nSrc += size { - r = rune(src[nSrc]) - - // Decode a 1-byte rune. - if r < utf8.RuneSelf { - size = 1 - - } else { - // Decode a multi-byte rune. - r, size = utf8.DecodeRune(src[nSrc:]) - if size == 1 { - // All valid runes of size 1 (those below utf8.RuneSelf) were - // handled above. We have invalid UTF-8 or we haven't seen the - // full character yet. - if !atEOF && !utf8.FullRune(src[nSrc:]) { - err = transform.ErrShortSrc - break - } - r = '\ufffd' - } - } - - if nDst+utf8.RuneLen(r) > len(dst) { - err = transform.ErrShortDst - break - } - nDst += utf8.EncodeRune(dst[nDst:], r) - } - return nDst, nSrc, err -} - -// HTMLEscapeUnsupported wraps encoders to replace source runes outside the -// repertoire of the destination encoding with HTML escape sequences. -// -// This wrapper exists to comply to URL and HTML forms requiring a -// non-terminating legacy encoder. The produced sequences may lead to data -// loss as they are indistinguishable from legitimate input. To avoid this -// issue, use UTF-8 encodings whenever possible. -func HTMLEscapeUnsupported(e *Encoder) *Encoder { - return &Encoder{Transformer: &errorHandler{e, errorToHTML}} -} - -// ReplaceUnsupported wraps encoders to replace source runes outside the -// repertoire of the destination encoding with an encoding-specific -// replacement. -// -// This wrapper is only provided for backwards compatibility and legacy -// handling. Its use is strongly discouraged. Use UTF-8 whenever possible. -func ReplaceUnsupported(e *Encoder) *Encoder { - return &Encoder{Transformer: &errorHandler{e, errorToReplacement}} -} - -type errorHandler struct { - *Encoder - handler func(dst []byte, r rune, err repertoireError) (n int, ok bool) -} - -// TODO: consider making this error public in some form. -type repertoireError interface { - Replacement() byte -} - -func (h errorHandler) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) { - nDst, nSrc, err = h.Transformer.Transform(dst, src, atEOF) - for err != nil { - rerr, ok := err.(repertoireError) - if !ok { - return nDst, nSrc, err - } - r, sz := utf8.DecodeRune(src[nSrc:]) - n, ok := h.handler(dst[nDst:], r, rerr) - if !ok { - return nDst, nSrc, transform.ErrShortDst - } - err = nil - nDst += n - if nSrc += sz; nSrc < len(src) { - var dn, sn int - dn, sn, err = h.Transformer.Transform(dst[nDst:], src[nSrc:], atEOF) - nDst += dn - nSrc += sn - } - } - return nDst, nSrc, err -} - -func errorToHTML(dst []byte, r rune, err repertoireError) (n int, ok bool) { - buf := [8]byte{} - b := strconv.AppendUint(buf[:0], uint64(r), 10) - if n = len(b) + len("&#;"); n >= len(dst) { - return 0, false - } - dst[0] = '&' - dst[1] = '#' - dst[copy(dst[2:], b)+2] = ';' - return n, true -} - -func errorToReplacement(dst []byte, r rune, err repertoireError) (n int, ok bool) { - if len(dst) == 0 { - return 0, false - } - dst[0] = err.Replacement() - return 1, true -} - -// ErrInvalidUTF8 means that a transformer encountered invalid UTF-8. -var ErrInvalidUTF8 = errors.New("encoding: invalid UTF-8") - -// UTF8Validator is a transformer that returns ErrInvalidUTF8 on the first -// input byte that is not valid UTF-8. -var UTF8Validator transform.Transformer = utf8Validator{} - -type utf8Validator struct{ transform.NopResetter } - -func (utf8Validator) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) { - n := len(src) - if n > len(dst) { - n = len(dst) - } - for i := 0; i < n; { - if c := src[i]; c < utf8.RuneSelf { - dst[i] = c - i++ - continue - } - _, size := utf8.DecodeRune(src[i:]) - if size == 1 { - // All valid runes of size 1 (those below utf8.RuneSelf) were - // handled above. We have invalid UTF-8 or we haven't seen the - // full character yet. - err = ErrInvalidUTF8 - if !atEOF && !utf8.FullRune(src[i:]) { - err = transform.ErrShortSrc - } - return i, i, err - } - if i+size > len(dst) { - return i, i, transform.ErrShortDst - } - for ; size > 0; size-- { - dst[i] = src[i] - i++ - } - } - if len(src) > len(dst) { - err = transform.ErrShortDst - } - return n, n, err -} diff --git a/src/vendor/golang.org/x/text/encoding/internal/identifier/identifier.go b/src/vendor/golang.org/x/text/encoding/internal/identifier/identifier.go deleted file mode 100644 index 5c9b85c28..000000000 --- a/src/vendor/golang.org/x/text/encoding/internal/identifier/identifier.go +++ /dev/null @@ -1,81 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:generate go run gen.go - -// Package identifier defines the contract between implementations of Encoding -// and Index by defining identifiers that uniquely identify standardized coded -// character sets (CCS) and character encoding schemes (CES), which we will -// together refer to as encodings, for which Encoding implementations provide -// converters to and from UTF-8. This package is typically only of concern to -// implementers of Indexes and Encodings. -// -// One part of the identifier is the MIB code, which is defined by IANA and -// uniquely identifies a CCS or CES. Each code is associated with data that -// references authorities, official documentation as well as aliases and MIME -// names. -// -// Not all CESs are covered by the IANA registry. The "other" string that is -// returned by ID can be used to identify other character sets or versions of -// existing ones. -// -// It is recommended that each package that provides a set of Encodings provide -// the All and Common variables to reference all supported encodings and -// commonly used subset. This allows Index implementations to include all -// available encodings without explicitly referencing or knowing about them. -package identifier - -// Note: this package is internal, but could be made public if there is a need -// for writing third-party Indexes and Encodings. - -// References: -// - http://source.icu-project.org/repos/icu/icu/trunk/source/data/mappings/convrtrs.txt -// - http://www.iana.org/assignments/character-sets/character-sets.xhtml -// - http://www.iana.org/assignments/ianacharset-mib/ianacharset-mib -// - http://www.ietf.org/rfc/rfc2978.txt -// - https://www.unicode.org/reports/tr22/ -// - http://www.w3.org/TR/encoding/ -// - https://encoding.spec.whatwg.org/ -// - https://encoding.spec.whatwg.org/encodings.json -// - https://tools.ietf.org/html/rfc6657#section-5 - -// Interface can be implemented by Encodings to define the CCS or CES for which -// it implements conversions. -type Interface interface { - // ID returns an encoding identifier. Exactly one of the mib and other - // values should be non-zero. - // - // In the usual case it is only necessary to indicate the MIB code. The - // other string can be used to specify encodings for which there is no MIB, - // such as "x-mac-dingbat". - // - // The other string may only contain the characters a-z, A-Z, 0-9, - and _. - ID() (mib MIB, other string) - - // NOTE: the restrictions on the encoding are to allow extending the syntax - // with additional information such as versions, vendors and other variants. -} - -// A MIB identifies an encoding. It is derived from the IANA MIB codes and adds -// some identifiers for some encodings that are not covered by the IANA -// standard. -// -// See http://www.iana.org/assignments/ianacharset-mib. -type MIB uint16 - -// These additional MIB types are not defined in IANA. They are added because -// they are common and defined within the text repo. -const ( - // Unofficial marks the start of encodings not registered by IANA. - Unofficial MIB = 10000 + iota - - // Replacement is the WhatWG replacement encoding. - Replacement - - // XUserDefined is the code for x-user-defined. - XUserDefined - - // MacintoshCyrillic is the code for x-mac-cyrillic. - MacintoshCyrillic -) diff --git a/src/vendor/golang.org/x/text/encoding/internal/identifier/mib.go b/src/vendor/golang.org/x/text/encoding/internal/identifier/mib.go deleted file mode 100644 index 351fb86e2..000000000 --- a/src/vendor/golang.org/x/text/encoding/internal/identifier/mib.go +++ /dev/null @@ -1,1627 +0,0 @@ -// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. - -package identifier - -const ( - // ASCII is the MIB identifier with IANA name US-ASCII (MIME: US-ASCII). - // - // ANSI X3.4-1986 - // Reference: RFC2046 - ASCII MIB = 3 - - // ISOLatin1 is the MIB identifier with IANA name ISO_8859-1:1987 (MIME: ISO-8859-1). - // - // ISO-IR: International Register of Escape Sequences - // Note: The current registration authority is IPSJ/ITSCJ, Japan. - // Reference: RFC1345 - ISOLatin1 MIB = 4 - - // ISOLatin2 is the MIB identifier with IANA name ISO_8859-2:1987 (MIME: ISO-8859-2). - // - // ISO-IR: International Register of Escape Sequences - // Note: The current registration authority is IPSJ/ITSCJ, Japan. - // Reference: RFC1345 - ISOLatin2 MIB = 5 - - // ISOLatin3 is the MIB identifier with IANA name ISO_8859-3:1988 (MIME: ISO-8859-3). - // - // ISO-IR: International Register of Escape Sequences - // Note: The current registration authority is IPSJ/ITSCJ, Japan. - // Reference: RFC1345 - ISOLatin3 MIB = 6 - - // ISOLatin4 is the MIB identifier with IANA name ISO_8859-4:1988 (MIME: ISO-8859-4). - // - // ISO-IR: International Register of Escape Sequences - // Note: The current registration authority is IPSJ/ITSCJ, Japan. - // Reference: RFC1345 - ISOLatin4 MIB = 7 - - // ISOLatinCyrillic is the MIB identifier with IANA name ISO_8859-5:1988 (MIME: ISO-8859-5). - // - // ISO-IR: International Register of Escape Sequences - // Note: The current registration authority is IPSJ/ITSCJ, Japan. - // Reference: RFC1345 - ISOLatinCyrillic MIB = 8 - - // ISOLatinArabic is the MIB identifier with IANA name ISO_8859-6:1987 (MIME: ISO-8859-6). - // - // ISO-IR: International Register of Escape Sequences - // Note: The current registration authority is IPSJ/ITSCJ, Japan. - // Reference: RFC1345 - ISOLatinArabic MIB = 9 - - // ISOLatinGreek is the MIB identifier with IANA name ISO_8859-7:1987 (MIME: ISO-8859-7). - // - // ISO-IR: International Register of Escape Sequences - // Note: The current registration authority is IPSJ/ITSCJ, Japan. - // Reference: RFC1947 - // Reference: RFC1345 - ISOLatinGreek MIB = 10 - - // ISOLatinHebrew is the MIB identifier with IANA name ISO_8859-8:1988 (MIME: ISO-8859-8). - // - // ISO-IR: International Register of Escape Sequences - // Note: The current registration authority is IPSJ/ITSCJ, Japan. - // Reference: RFC1345 - ISOLatinHebrew MIB = 11 - - // ISOLatin5 is the MIB identifier with IANA name ISO_8859-9:1989 (MIME: ISO-8859-9). - // - // ISO-IR: International Register of Escape Sequences - // Note: The current registration authority is IPSJ/ITSCJ, Japan. - // Reference: RFC1345 - ISOLatin5 MIB = 12 - - // ISOLatin6 is the MIB identifier with IANA name ISO-8859-10 (MIME: ISO-8859-10). - // - // ISO-IR: International Register of Escape Sequences - // Note: The current registration authority is IPSJ/ITSCJ, Japan. - // Reference: RFC1345 - ISOLatin6 MIB = 13 - - // ISOTextComm is the MIB identifier with IANA name ISO_6937-2-add. - // - // ISO-IR: International Register of Escape Sequences and ISO 6937-2:1983 - // Note: The current registration authority is IPSJ/ITSCJ, Japan. - // Reference: RFC1345 - ISOTextComm MIB = 14 - - // HalfWidthKatakana is the MIB identifier with IANA name JIS_X0201. - // - // JIS X 0201-1976. One byte only, this is equivalent to - // JIS/Roman (similar to ASCII) plus eight-bit half-width - // Katakana - // Reference: RFC1345 - HalfWidthKatakana MIB = 15 - - // JISEncoding is the MIB identifier with IANA name JIS_Encoding. - // - // JIS X 0202-1991. Uses ISO 2022 escape sequences to - // shift code sets as documented in JIS X 0202-1991. - JISEncoding MIB = 16 - - // ShiftJIS is the MIB identifier with IANA name Shift_JIS (MIME: Shift_JIS). - // - // This charset is an extension of csHalfWidthKatakana by - // adding graphic characters in JIS X 0208. The CCS's are - // JIS X0201:1997 and JIS X0208:1997. The - // complete definition is shown in Appendix 1 of JIS - // X0208:1997. - // This charset can be used for the top-level media type "text". - ShiftJIS MIB = 17 - - // EUCPkdFmtJapanese is the MIB identifier with IANA name Extended_UNIX_Code_Packed_Format_for_Japanese (MIME: EUC-JP). - // - // Standardized by OSF, UNIX International, and UNIX Systems - // Laboratories Pacific. Uses ISO 2022 rules to select - // code set 0: US-ASCII (a single 7-bit byte set) - // code set 1: JIS X0208-1990 (a double 8-bit byte set) - // restricted to A0-FF in both bytes - // code set 2: Half Width Katakana (a single 7-bit byte set) - // requiring SS2 as the character prefix - // code set 3: JIS X0212-1990 (a double 7-bit byte set) - // restricted to A0-FF in both bytes - // requiring SS3 as the character prefix - EUCPkdFmtJapanese MIB = 18 - - // EUCFixWidJapanese is the MIB identifier with IANA name Extended_UNIX_Code_Fixed_Width_for_Japanese. - // - // Used in Japan. Each character is 2 octets. - // code set 0: US-ASCII (a single 7-bit byte set) - // 1st byte = 00 - // 2nd byte = 20-7E - // code set 1: JIS X0208-1990 (a double 7-bit byte set) - // restricted to A0-FF in both bytes - // code set 2: Half Width Katakana (a single 7-bit byte set) - // 1st byte = 00 - // 2nd byte = A0-FF - // code set 3: JIS X0212-1990 (a double 7-bit byte set) - // restricted to A0-FF in - // the first byte - // and 21-7E in the second byte - EUCFixWidJapanese MIB = 19 - - // ISO4UnitedKingdom is the MIB identifier with IANA name BS_4730. - // - // ISO-IR: International Register of Escape Sequences - // Note: The current registration authority is IPSJ/ITSCJ, Japan. - // Reference: RFC1345 - ISO4UnitedKingdom MIB = 20 - - // ISO11SwedishForNames is the MIB identifier with IANA name SEN_850200_C. - // - // ISO-IR: International Register of Escape Sequences - // Note: The current registration authority is IPSJ/ITSCJ, Japan. - // Reference: RFC1345 - ISO11SwedishForNames MIB = 21 - - // ISO15Italian is the MIB identifier with IANA name IT. - // - // ISO-IR: International Register of Escape Sequences - // Note: The current registration authority is IPSJ/ITSCJ, Japan. - // Reference: RFC1345 - ISO15Italian MIB = 22 - - // ISO17Spanish is the MIB identifier with IANA name ES. - // - // ISO-IR: International Register of Escape Sequences - // Note: The current registration authority is IPSJ/ITSCJ, Japan. - // Reference: RFC1345 - ISO17Spanish MIB = 23 - - // ISO21German is the MIB identifier with IANA name DIN_66003. - // - // ISO-IR: International Register of Escape Sequences - // Note: The current registration authority is IPSJ/ITSCJ, Japan. - // Reference: RFC1345 - ISO21German MIB = 24 - - // ISO60Norwegian1 is the MIB identifier with IANA name NS_4551-1. - // - // ISO-IR: International Register of Escape Sequences - // Note: The current registration authority is IPSJ/ITSCJ, Japan. - // Reference: RFC1345 - ISO60Norwegian1 MIB = 25 - - // ISO69French is the MIB identifier with IANA name NF_Z_62-010. - // - // ISO-IR: International Register of Escape Sequences - // Note: The current registration authority is IPSJ/ITSCJ, Japan. - // Reference: RFC1345 - ISO69French MIB = 26 - - // ISO10646UTF1 is the MIB identifier with IANA name ISO-10646-UTF-1. - // - // Universal Transfer Format (1), this is the multibyte - // encoding, that subsets ASCII-7. It does not have byte - // ordering issues. - ISO10646UTF1 MIB = 27 - - // ISO646basic1983 is the MIB identifier with IANA name ISO_646.basic:1983. - // - // ISO-IR: International Register of Escape Sequences - // Note: The current registration authority is IPSJ/ITSCJ, Japan. - // Reference: RFC1345 - ISO646basic1983 MIB = 28 - - // INVARIANT is the MIB identifier with IANA name INVARIANT. - // - // Reference: RFC1345 - INVARIANT MIB = 29 - - // ISO2IntlRefVersion is the MIB identifier with IANA name ISO_646.irv:1983. - // - // ISO-IR: International Register of Escape Sequences - // Note: The current registration authority is IPSJ/ITSCJ, Japan. - // Reference: RFC1345 - ISO2IntlRefVersion MIB = 30 - - // NATSSEFI is the MIB identifier with IANA name NATS-SEFI. - // - // ISO-IR: International Register of Escape Sequences - // Note: The current registration authority is IPSJ/ITSCJ, Japan. - // Reference: RFC1345 - NATSSEFI MIB = 31 - - // NATSSEFIADD is the MIB identifier with IANA name NATS-SEFI-ADD. - // - // ISO-IR: International Register of Escape Sequences - // Note: The current registration authority is IPSJ/ITSCJ, Japan. - // Reference: RFC1345 - NATSSEFIADD MIB = 32 - - // NATSDANO is the MIB identifier with IANA name NATS-DANO. - // - // ISO-IR: International Register of Escape Sequences - // Note: The current registration authority is IPSJ/ITSCJ, Japan. - // Reference: RFC1345 - NATSDANO MIB = 33 - - // NATSDANOADD is the MIB identifier with IANA name NATS-DANO-ADD. - // - // ISO-IR: International Register of Escape Sequences - // Note: The current registration authority is IPSJ/ITSCJ, Japan. - // Reference: RFC1345 - NATSDANOADD MIB = 34 - - // ISO10Swedish is the MIB identifier with IANA name SEN_850200_B. - // - // ISO-IR: International Register of Escape Sequences - // Note: The current registration authority is IPSJ/ITSCJ, Japan. - // Reference: RFC1345 - ISO10Swedish MIB = 35 - - // KSC56011987 is the MIB identifier with IANA name KS_C_5601-1987. - // - // ISO-IR: International Register of Escape Sequences - // Note: The current registration authority is IPSJ/ITSCJ, Japan. - // Reference: RFC1345 - KSC56011987 MIB = 36 - - // ISO2022KR is the MIB identifier with IANA name ISO-2022-KR (MIME: ISO-2022-KR). - // - // rfc1557 (see also KS_C_5601-1987) - // Reference: RFC1557 - ISO2022KR MIB = 37 - - // EUCKR is the MIB identifier with IANA name EUC-KR (MIME: EUC-KR). - // - // rfc1557 (see also KS_C_5861-1992) - // Reference: RFC1557 - EUCKR MIB = 38 - - // ISO2022JP is the MIB identifier with IANA name ISO-2022-JP (MIME: ISO-2022-JP). - // - // rfc1468 (see also rfc2237 ) - // Reference: RFC1468 - ISO2022JP MIB = 39 - - // ISO2022JP2 is the MIB identifier with IANA name ISO-2022-JP-2 (MIME: ISO-2022-JP-2). - // - // rfc1554 - // Reference: RFC1554 - ISO2022JP2 MIB = 40 - - // ISO13JISC6220jp is the MIB identifier with IANA name JIS_C6220-1969-jp. - // - // ISO-IR: International Register of Escape Sequences - // Note: The current registration authority is IPSJ/ITSCJ, Japan. - // Reference: RFC1345 - ISO13JISC6220jp MIB = 41 - - // ISO14JISC6220ro is the MIB identifier with IANA name JIS_C6220-1969-ro. - // - // ISO-IR: International Register of Escape Sequences - // Note: The current registration authority is IPSJ/ITSCJ, Japan. - // Reference: RFC1345 - ISO14JISC6220ro MIB = 42 - - // ISO16Portuguese is the MIB identifier with IANA name PT. - // - // ISO-IR: International Register of Escape Sequences - // Note: The current registration authority is IPSJ/ITSCJ, Japan. - // Reference: RFC1345 - ISO16Portuguese MIB = 43 - - // ISO18Greek7Old is the MIB identifier with IANA name greek7-old. - // - // ISO-IR: International Register of Escape Sequences - // Note: The current registration authority is IPSJ/ITSCJ, Japan. - // Reference: RFC1345 - ISO18Greek7Old MIB = 44 - - // ISO19LatinGreek is the MIB identifier with IANA name latin-greek. - // - // ISO-IR: International Register of Escape Sequences - // Note: The current registration authority is IPSJ/ITSCJ, Japan. - // Reference: RFC1345 - ISO19LatinGreek MIB = 45 - - // ISO25French is the MIB identifier with IANA name NF_Z_62-010_(1973). - // - // ISO-IR: International Register of Escape Sequences - // Note: The current registration authority is IPSJ/ITSCJ, Japan. - // Reference: RFC1345 - ISO25French MIB = 46 - - // ISO27LatinGreek1 is the MIB identifier with IANA name Latin-greek-1. - // - // ISO-IR: International Register of Escape Sequences - // Note: The current registration authority is IPSJ/ITSCJ, Japan. - // Reference: RFC1345 - ISO27LatinGreek1 MIB = 47 - - // ISO5427Cyrillic is the MIB identifier with IANA name ISO_5427. - // - // ISO-IR: International Register of Escape Sequences - // Note: The current registration authority is IPSJ/ITSCJ, Japan. - // Reference: RFC1345 - ISO5427Cyrillic MIB = 48 - - // ISO42JISC62261978 is the MIB identifier with IANA name JIS_C6226-1978. - // - // ISO-IR: International Register of Escape Sequences - // Note: The current registration authority is IPSJ/ITSCJ, Japan. - // Reference: RFC1345 - ISO42JISC62261978 MIB = 49 - - // ISO47BSViewdata is the MIB identifier with IANA name BS_viewdata. - // - // ISO-IR: International Register of Escape Sequences - // Note: The current registration authority is IPSJ/ITSCJ, Japan. - // Reference: RFC1345 - ISO47BSViewdata MIB = 50 - - // ISO49INIS is the MIB identifier with IANA name INIS. - // - // ISO-IR: International Register of Escape Sequences - // Note: The current registration authority is IPSJ/ITSCJ, Japan. - // Reference: RFC1345 - ISO49INIS MIB = 51 - - // ISO50INIS8 is the MIB identifier with IANA name INIS-8. - // - // ISO-IR: International Register of Escape Sequences - // Note: The current registration authority is IPSJ/ITSCJ, Japan. - // Reference: RFC1345 - ISO50INIS8 MIB = 52 - - // ISO51INISCyrillic is the MIB identifier with IANA name INIS-cyrillic. - // - // ISO-IR: International Register of Escape Sequences - // Note: The current registration authority is IPSJ/ITSCJ, Japan. - // Reference: RFC1345 - ISO51INISCyrillic MIB = 53 - - // ISO54271981 is the MIB identifier with IANA name ISO_5427:1981. - // - // ISO-IR: International Register of Escape Sequences - // Note: The current registration authority is IPSJ/ITSCJ, Japan. - // Reference: RFC1345 - ISO54271981 MIB = 54 - - // ISO5428Greek is the MIB identifier with IANA name ISO_5428:1980. - // - // ISO-IR: International Register of Escape Sequences - // Note: The current registration authority is IPSJ/ITSCJ, Japan. - // Reference: RFC1345 - ISO5428Greek MIB = 55 - - // ISO57GB1988 is the MIB identifier with IANA name GB_1988-80. - // - // ISO-IR: International Register of Escape Sequences - // Note: The current registration authority is IPSJ/ITSCJ, Japan. - // Reference: RFC1345 - ISO57GB1988 MIB = 56 - - // ISO58GB231280 is the MIB identifier with IANA name GB_2312-80. - // - // ISO-IR: International Register of Escape Sequences - // Note: The current registration authority is IPSJ/ITSCJ, Japan. - // Reference: RFC1345 - ISO58GB231280 MIB = 57 - - // ISO61Norwegian2 is the MIB identifier with IANA name NS_4551-2. - // - // ISO-IR: International Register of Escape Sequences - // Note: The current registration authority is IPSJ/ITSCJ, Japan. - // Reference: RFC1345 - ISO61Norwegian2 MIB = 58 - - // ISO70VideotexSupp1 is the MIB identifier with IANA name videotex-suppl. - // - // ISO-IR: International Register of Escape Sequences - // Note: The current registration authority is IPSJ/ITSCJ, Japan. - // Reference: RFC1345 - ISO70VideotexSupp1 MIB = 59 - - // ISO84Portuguese2 is the MIB identifier with IANA name PT2. - // - // ISO-IR: International Register of Escape Sequences - // Note: The current registration authority is IPSJ/ITSCJ, Japan. - // Reference: RFC1345 - ISO84Portuguese2 MIB = 60 - - // ISO85Spanish2 is the MIB identifier with IANA name ES2. - // - // ISO-IR: International Register of Escape Sequences - // Note: The current registration authority is IPSJ/ITSCJ, Japan. - // Reference: RFC1345 - ISO85Spanish2 MIB = 61 - - // ISO86Hungarian is the MIB identifier with IANA name MSZ_7795.3. - // - // ISO-IR: International Register of Escape Sequences - // Note: The current registration authority is IPSJ/ITSCJ, Japan. - // Reference: RFC1345 - ISO86Hungarian MIB = 62 - - // ISO87JISX0208 is the MIB identifier with IANA name JIS_C6226-1983. - // - // ISO-IR: International Register of Escape Sequences - // Note: The current registration authority is IPSJ/ITSCJ, Japan. - // Reference: RFC1345 - ISO87JISX0208 MIB = 63 - - // ISO88Greek7 is the MIB identifier with IANA name greek7. - // - // ISO-IR: International Register of Escape Sequences - // Note: The current registration authority is IPSJ/ITSCJ, Japan. - // Reference: RFC1345 - ISO88Greek7 MIB = 64 - - // ISO89ASMO449 is the MIB identifier with IANA name ASMO_449. - // - // ISO-IR: International Register of Escape Sequences - // Note: The current registration authority is IPSJ/ITSCJ, Japan. - // Reference: RFC1345 - ISO89ASMO449 MIB = 65 - - // ISO90 is the MIB identifier with IANA name iso-ir-90. - // - // ISO-IR: International Register of Escape Sequences - // Note: The current registration authority is IPSJ/ITSCJ, Japan. - // Reference: RFC1345 - ISO90 MIB = 66 - - // ISO91JISC62291984a is the MIB identifier with IANA name JIS_C6229-1984-a. - // - // ISO-IR: International Register of Escape Sequences - // Note: The current registration authority is IPSJ/ITSCJ, Japan. - // Reference: RFC1345 - ISO91JISC62291984a MIB = 67 - - // ISO92JISC62991984b is the MIB identifier with IANA name JIS_C6229-1984-b. - // - // ISO-IR: International Register of Escape Sequences - // Note: The current registration authority is IPSJ/ITSCJ, Japan. - // Reference: RFC1345 - ISO92JISC62991984b MIB = 68 - - // ISO93JIS62291984badd is the MIB identifier with IANA name JIS_C6229-1984-b-add. - // - // ISO-IR: International Register of Escape Sequences - // Note: The current registration authority is IPSJ/ITSCJ, Japan. - // Reference: RFC1345 - ISO93JIS62291984badd MIB = 69 - - // ISO94JIS62291984hand is the MIB identifier with IANA name JIS_C6229-1984-hand. - // - // ISO-IR: International Register of Escape Sequences - // Note: The current registration authority is IPSJ/ITSCJ, Japan. - // Reference: RFC1345 - ISO94JIS62291984hand MIB = 70 - - // ISO95JIS62291984handadd is the MIB identifier with IANA name JIS_C6229-1984-hand-add. - // - // ISO-IR: International Register of Escape Sequences - // Note: The current registration authority is IPSJ/ITSCJ, Japan. - // Reference: RFC1345 - ISO95JIS62291984handadd MIB = 71 - - // ISO96JISC62291984kana is the MIB identifier with IANA name JIS_C6229-1984-kana. - // - // ISO-IR: International Register of Escape Sequences - // Note: The current registration authority is IPSJ/ITSCJ, Japan. - // Reference: RFC1345 - ISO96JISC62291984kana MIB = 72 - - // ISO2033 is the MIB identifier with IANA name ISO_2033-1983. - // - // ISO-IR: International Register of Escape Sequences - // Note: The current registration authority is IPSJ/ITSCJ, Japan. - // Reference: RFC1345 - ISO2033 MIB = 73 - - // ISO99NAPLPS is the MIB identifier with IANA name ANSI_X3.110-1983. - // - // ISO-IR: International Register of Escape Sequences - // Note: The current registration authority is IPSJ/ITSCJ, Japan. - // Reference: RFC1345 - ISO99NAPLPS MIB = 74 - - // ISO102T617bit is the MIB identifier with IANA name T.61-7bit. - // - // ISO-IR: International Register of Escape Sequences - // Note: The current registration authority is IPSJ/ITSCJ, Japan. - // Reference: RFC1345 - ISO102T617bit MIB = 75 - - // ISO103T618bit is the MIB identifier with IANA name T.61-8bit. - // - // ISO-IR: International Register of Escape Sequences - // Note: The current registration authority is IPSJ/ITSCJ, Japan. - // Reference: RFC1345 - ISO103T618bit MIB = 76 - - // ISO111ECMACyrillic is the MIB identifier with IANA name ECMA-cyrillic. - // - // ISO registry - ISO111ECMACyrillic MIB = 77 - - // ISO121Canadian1 is the MIB identifier with IANA name CSA_Z243.4-1985-1. - // - // ISO-IR: International Register of Escape Sequences - // Note: The current registration authority is IPSJ/ITSCJ, Japan. - // Reference: RFC1345 - ISO121Canadian1 MIB = 78 - - // ISO122Canadian2 is the MIB identifier with IANA name CSA_Z243.4-1985-2. - // - // ISO-IR: International Register of Escape Sequences - // Note: The current registration authority is IPSJ/ITSCJ, Japan. - // Reference: RFC1345 - ISO122Canadian2 MIB = 79 - - // ISO123CSAZ24341985gr is the MIB identifier with IANA name CSA_Z243.4-1985-gr. - // - // ISO-IR: International Register of Escape Sequences - // Note: The current registration authority is IPSJ/ITSCJ, Japan. - // Reference: RFC1345 - ISO123CSAZ24341985gr MIB = 80 - - // ISO88596E is the MIB identifier with IANA name ISO_8859-6-E (MIME: ISO-8859-6-E). - // - // rfc1556 - // Reference: RFC1556 - ISO88596E MIB = 81 - - // ISO88596I is the MIB identifier with IANA name ISO_8859-6-I (MIME: ISO-8859-6-I). - // - // rfc1556 - // Reference: RFC1556 - ISO88596I MIB = 82 - - // ISO128T101G2 is the MIB identifier with IANA name T.101-G2. - // - // ISO-IR: International Register of Escape Sequences - // Note: The current registration authority is IPSJ/ITSCJ, Japan. - // Reference: RFC1345 - ISO128T101G2 MIB = 83 - - // ISO88598E is the MIB identifier with IANA name ISO_8859-8-E (MIME: ISO-8859-8-E). - // - // rfc1556 - // Reference: RFC1556 - ISO88598E MIB = 84 - - // ISO88598I is the MIB identifier with IANA name ISO_8859-8-I (MIME: ISO-8859-8-I). - // - // rfc1556 - // Reference: RFC1556 - ISO88598I MIB = 85 - - // ISO139CSN369103 is the MIB identifier with IANA name CSN_369103. - // - // ISO-IR: International Register of Escape Sequences - // Note: The current registration authority is IPSJ/ITSCJ, Japan. - // Reference: RFC1345 - ISO139CSN369103 MIB = 86 - - // ISO141JUSIB1002 is the MIB identifier with IANA name JUS_I.B1.002. - // - // ISO-IR: International Register of Escape Sequences - // Note: The current registration authority is IPSJ/ITSCJ, Japan. - // Reference: RFC1345 - ISO141JUSIB1002 MIB = 87 - - // ISO143IECP271 is the MIB identifier with IANA name IEC_P27-1. - // - // ISO-IR: International Register of Escape Sequences - // Note: The current registration authority is IPSJ/ITSCJ, Japan. - // Reference: RFC1345 - ISO143IECP271 MIB = 88 - - // ISO146Serbian is the MIB identifier with IANA name JUS_I.B1.003-serb. - // - // ISO-IR: International Register of Escape Sequences - // Note: The current registration authority is IPSJ/ITSCJ, Japan. - // Reference: RFC1345 - ISO146Serbian MIB = 89 - - // ISO147Macedonian is the MIB identifier with IANA name JUS_I.B1.003-mac. - // - // ISO-IR: International Register of Escape Sequences - // Note: The current registration authority is IPSJ/ITSCJ, Japan. - // Reference: RFC1345 - ISO147Macedonian MIB = 90 - - // ISO150GreekCCITT is the MIB identifier with IANA name greek-ccitt. - // - // ISO-IR: International Register of Escape Sequences - // Note: The current registration authority is IPSJ/ITSCJ, Japan. - // Reference: RFC1345 - ISO150GreekCCITT MIB = 91 - - // ISO151Cuba is the MIB identifier with IANA name NC_NC00-10:81. - // - // ISO-IR: International Register of Escape Sequences - // Note: The current registration authority is IPSJ/ITSCJ, Japan. - // Reference: RFC1345 - ISO151Cuba MIB = 92 - - // ISO6937Add is the MIB identifier with IANA name ISO_6937-2-25. - // - // ISO-IR: International Register of Escape Sequences - // Note: The current registration authority is IPSJ/ITSCJ, Japan. - // Reference: RFC1345 - ISO6937Add MIB = 93 - - // ISO153GOST1976874 is the MIB identifier with IANA name GOST_19768-74. - // - // ISO-IR: International Register of Escape Sequences - // Note: The current registration authority is IPSJ/ITSCJ, Japan. - // Reference: RFC1345 - ISO153GOST1976874 MIB = 94 - - // ISO8859Supp is the MIB identifier with IANA name ISO_8859-supp. - // - // ISO-IR: International Register of Escape Sequences - // Note: The current registration authority is IPSJ/ITSCJ, Japan. - // Reference: RFC1345 - ISO8859Supp MIB = 95 - - // ISO10367Box is the MIB identifier with IANA name ISO_10367-box. - // - // ISO-IR: International Register of Escape Sequences - // Note: The current registration authority is IPSJ/ITSCJ, Japan. - // Reference: RFC1345 - ISO10367Box MIB = 96 - - // ISO158Lap is the MIB identifier with IANA name latin-lap. - // - // ISO-IR: International Register of Escape Sequences - // Note: The current registration authority is IPSJ/ITSCJ, Japan. - // Reference: RFC1345 - ISO158Lap MIB = 97 - - // ISO159JISX02121990 is the MIB identifier with IANA name JIS_X0212-1990. - // - // ISO-IR: International Register of Escape Sequences - // Note: The current registration authority is IPSJ/ITSCJ, Japan. - // Reference: RFC1345 - ISO159JISX02121990 MIB = 98 - - // ISO646Danish is the MIB identifier with IANA name DS_2089. - // - // Danish Standard, DS 2089, February 1974 - // Reference: RFC1345 - ISO646Danish MIB = 99 - - // USDK is the MIB identifier with IANA name us-dk. - // - // Reference: RFC1345 - USDK MIB = 100 - - // DKUS is the MIB identifier with IANA name dk-us. - // - // Reference: RFC1345 - DKUS MIB = 101 - - // KSC5636 is the MIB identifier with IANA name KSC5636. - // - // Reference: RFC1345 - KSC5636 MIB = 102 - - // Unicode11UTF7 is the MIB identifier with IANA name UNICODE-1-1-UTF-7. - // - // rfc1642 - // Reference: RFC1642 - Unicode11UTF7 MIB = 103 - - // ISO2022CN is the MIB identifier with IANA name ISO-2022-CN. - // - // rfc1922 - // Reference: RFC1922 - ISO2022CN MIB = 104 - - // ISO2022CNEXT is the MIB identifier with IANA name ISO-2022-CN-EXT. - // - // rfc1922 - // Reference: RFC1922 - ISO2022CNEXT MIB = 105 - - // UTF8 is the MIB identifier with IANA name UTF-8. - // - // rfc3629 - // Reference: RFC3629 - UTF8 MIB = 106 - - // ISO885913 is the MIB identifier with IANA name ISO-8859-13. - // - // ISO See https://www.iana.org/assignments/charset-reg/ISO-8859-13 https://www.iana.org/assignments/charset-reg/ISO-8859-13 - ISO885913 MIB = 109 - - // ISO885914 is the MIB identifier with IANA name ISO-8859-14. - // - // ISO See https://www.iana.org/assignments/charset-reg/ISO-8859-14 - ISO885914 MIB = 110 - - // ISO885915 is the MIB identifier with IANA name ISO-8859-15. - // - // ISO - // Please see: https://www.iana.org/assignments/charset-reg/ISO-8859-15 - ISO885915 MIB = 111 - - // ISO885916 is the MIB identifier with IANA name ISO-8859-16. - // - // ISO - ISO885916 MIB = 112 - - // GBK is the MIB identifier with IANA name GBK. - // - // Chinese IT Standardization Technical Committee - // Please see: https://www.iana.org/assignments/charset-reg/GBK - GBK MIB = 113 - - // GB18030 is the MIB identifier with IANA name GB18030. - // - // Chinese IT Standardization Technical Committee - // Please see: https://www.iana.org/assignments/charset-reg/GB18030 - GB18030 MIB = 114 - - // OSDEBCDICDF0415 is the MIB identifier with IANA name OSD_EBCDIC_DF04_15. - // - // Fujitsu-Siemens standard mainframe EBCDIC encoding - // Please see: https://www.iana.org/assignments/charset-reg/OSD-EBCDIC-DF04-15 - OSDEBCDICDF0415 MIB = 115 - - // OSDEBCDICDF03IRV is the MIB identifier with IANA name OSD_EBCDIC_DF03_IRV. - // - // Fujitsu-Siemens standard mainframe EBCDIC encoding - // Please see: https://www.iana.org/assignments/charset-reg/OSD-EBCDIC-DF03-IRV - OSDEBCDICDF03IRV MIB = 116 - - // OSDEBCDICDF041 is the MIB identifier with IANA name OSD_EBCDIC_DF04_1. - // - // Fujitsu-Siemens standard mainframe EBCDIC encoding - // Please see: https://www.iana.org/assignments/charset-reg/OSD-EBCDIC-DF04-1 - OSDEBCDICDF041 MIB = 117 - - // ISO115481 is the MIB identifier with IANA name ISO-11548-1. - // - // See https://www.iana.org/assignments/charset-reg/ISO-11548-1 - ISO115481 MIB = 118 - - // KZ1048 is the MIB identifier with IANA name KZ-1048. - // - // See https://www.iana.org/assignments/charset-reg/KZ-1048 - KZ1048 MIB = 119 - - // Unicode is the MIB identifier with IANA name ISO-10646-UCS-2. - // - // the 2-octet Basic Multilingual Plane, aka Unicode - // this needs to specify network byte order: the standard - // does not specify (it is a 16-bit integer space) - Unicode MIB = 1000 - - // UCS4 is the MIB identifier with IANA name ISO-10646-UCS-4. - // - // the full code space. (same comment about byte order, - // these are 31-bit numbers. - UCS4 MIB = 1001 - - // UnicodeASCII is the MIB identifier with IANA name ISO-10646-UCS-Basic. - // - // ASCII subset of Unicode. Basic Latin = collection 1 - // See ISO 10646, Appendix A - UnicodeASCII MIB = 1002 - - // UnicodeLatin1 is the MIB identifier with IANA name ISO-10646-Unicode-Latin1. - // - // ISO Latin-1 subset of Unicode. Basic Latin and Latin-1 - // Supplement = collections 1 and 2. See ISO 10646, - // Appendix A. See rfc1815 . - UnicodeLatin1 MIB = 1003 - - // UnicodeJapanese is the MIB identifier with IANA name ISO-10646-J-1. - // - // ISO 10646 Japanese, see rfc1815 . - UnicodeJapanese MIB = 1004 - - // UnicodeIBM1261 is the MIB identifier with IANA name ISO-Unicode-IBM-1261. - // - // IBM Latin-2, -3, -5, Extended Presentation Set, GCSGID: 1261 - UnicodeIBM1261 MIB = 1005 - - // UnicodeIBM1268 is the MIB identifier with IANA name ISO-Unicode-IBM-1268. - // - // IBM Latin-4 Extended Presentation Set, GCSGID: 1268 - UnicodeIBM1268 MIB = 1006 - - // UnicodeIBM1276 is the MIB identifier with IANA name ISO-Unicode-IBM-1276. - // - // IBM Cyrillic Greek Extended Presentation Set, GCSGID: 1276 - UnicodeIBM1276 MIB = 1007 - - // UnicodeIBM1264 is the MIB identifier with IANA name ISO-Unicode-IBM-1264. - // - // IBM Arabic Presentation Set, GCSGID: 1264 - UnicodeIBM1264 MIB = 1008 - - // UnicodeIBM1265 is the MIB identifier with IANA name ISO-Unicode-IBM-1265. - // - // IBM Hebrew Presentation Set, GCSGID: 1265 - UnicodeIBM1265 MIB = 1009 - - // Unicode11 is the MIB identifier with IANA name UNICODE-1-1. - // - // rfc1641 - // Reference: RFC1641 - Unicode11 MIB = 1010 - - // SCSU is the MIB identifier with IANA name SCSU. - // - // SCSU See https://www.iana.org/assignments/charset-reg/SCSU - SCSU MIB = 1011 - - // UTF7 is the MIB identifier with IANA name UTF-7. - // - // rfc2152 - // Reference: RFC2152 - UTF7 MIB = 1012 - - // UTF16BE is the MIB identifier with IANA name UTF-16BE. - // - // rfc2781 - // Reference: RFC2781 - UTF16BE MIB = 1013 - - // UTF16LE is the MIB identifier with IANA name UTF-16LE. - // - // rfc2781 - // Reference: RFC2781 - UTF16LE MIB = 1014 - - // UTF16 is the MIB identifier with IANA name UTF-16. - // - // rfc2781 - // Reference: RFC2781 - UTF16 MIB = 1015 - - // CESU8 is the MIB identifier with IANA name CESU-8. - // - // https://www.unicode.org/reports/tr26 - CESU8 MIB = 1016 - - // UTF32 is the MIB identifier with IANA name UTF-32. - // - // https://www.unicode.org/reports/tr19/ - UTF32 MIB = 1017 - - // UTF32BE is the MIB identifier with IANA name UTF-32BE. - // - // https://www.unicode.org/reports/tr19/ - UTF32BE MIB = 1018 - - // UTF32LE is the MIB identifier with IANA name UTF-32LE. - // - // https://www.unicode.org/reports/tr19/ - UTF32LE MIB = 1019 - - // BOCU1 is the MIB identifier with IANA name BOCU-1. - // - // https://www.unicode.org/notes/tn6/ - BOCU1 MIB = 1020 - - // UTF7IMAP is the MIB identifier with IANA name UTF-7-IMAP. - // - // Note: This charset is used to encode Unicode in IMAP mailbox names; - // see section 5.1.3 of rfc3501 . It should never be used - // outside this context. A name has been assigned so that charset processing - // implementations can refer to it in a consistent way. - UTF7IMAP MIB = 1021 - - // Windows30Latin1 is the MIB identifier with IANA name ISO-8859-1-Windows-3.0-Latin-1. - // - // Extended ISO 8859-1 Latin-1 for Windows 3.0. - // PCL Symbol Set id: 9U - Windows30Latin1 MIB = 2000 - - // Windows31Latin1 is the MIB identifier with IANA name ISO-8859-1-Windows-3.1-Latin-1. - // - // Extended ISO 8859-1 Latin-1 for Windows 3.1. - // PCL Symbol Set id: 19U - Windows31Latin1 MIB = 2001 - - // Windows31Latin2 is the MIB identifier with IANA name ISO-8859-2-Windows-Latin-2. - // - // Extended ISO 8859-2. Latin-2 for Windows 3.1. - // PCL Symbol Set id: 9E - Windows31Latin2 MIB = 2002 - - // Windows31Latin5 is the MIB identifier with IANA name ISO-8859-9-Windows-Latin-5. - // - // Extended ISO 8859-9. Latin-5 for Windows 3.1 - // PCL Symbol Set id: 5T - Windows31Latin5 MIB = 2003 - - // HPRoman8 is the MIB identifier with IANA name hp-roman8. - // - // LaserJet IIP Printer User's Manual, - // HP part no 33471-90901, Hewlet-Packard, June 1989. - // Reference: RFC1345 - HPRoman8 MIB = 2004 - - // AdobeStandardEncoding is the MIB identifier with IANA name Adobe-Standard-Encoding. - // - // PostScript Language Reference Manual - // PCL Symbol Set id: 10J - AdobeStandardEncoding MIB = 2005 - - // VenturaUS is the MIB identifier with IANA name Ventura-US. - // - // Ventura US. ASCII plus characters typically used in - // publishing, like pilcrow, copyright, registered, trade mark, - // section, dagger, and double dagger in the range A0 (hex) - // to FF (hex). - // PCL Symbol Set id: 14J - VenturaUS MIB = 2006 - - // VenturaInternational is the MIB identifier with IANA name Ventura-International. - // - // Ventura International. ASCII plus coded characters similar - // to Roman8. - // PCL Symbol Set id: 13J - VenturaInternational MIB = 2007 - - // DECMCS is the MIB identifier with IANA name DEC-MCS. - // - // VAX/VMS User's Manual, - // Order Number: AI-Y517A-TE, April 1986. - // Reference: RFC1345 - DECMCS MIB = 2008 - - // PC850Multilingual is the MIB identifier with IANA name IBM850. - // - // IBM NLS RM Vol2 SE09-8002-01, March 1990 - // Reference: RFC1345 - PC850Multilingual MIB = 2009 - - // PC8DanishNorwegian is the MIB identifier with IANA name PC8-Danish-Norwegian. - // - // PC Danish Norwegian - // 8-bit PC set for Danish Norwegian - // PCL Symbol Set id: 11U - PC8DanishNorwegian MIB = 2012 - - // PC862LatinHebrew is the MIB identifier with IANA name IBM862. - // - // IBM NLS RM Vol2 SE09-8002-01, March 1990 - // Reference: RFC1345 - PC862LatinHebrew MIB = 2013 - - // PC8Turkish is the MIB identifier with IANA name PC8-Turkish. - // - // PC Latin Turkish. PCL Symbol Set id: 9T - PC8Turkish MIB = 2014 - - // IBMSymbols is the MIB identifier with IANA name IBM-Symbols. - // - // Presentation Set, CPGID: 259 - IBMSymbols MIB = 2015 - - // IBMThai is the MIB identifier with IANA name IBM-Thai. - // - // Presentation Set, CPGID: 838 - IBMThai MIB = 2016 - - // HPLegal is the MIB identifier with IANA name HP-Legal. - // - // PCL 5 Comparison Guide, Hewlett-Packard, - // HP part number 5961-0510, October 1992 - // PCL Symbol Set id: 1U - HPLegal MIB = 2017 - - // HPPiFont is the MIB identifier with IANA name HP-Pi-font. - // - // PCL 5 Comparison Guide, Hewlett-Packard, - // HP part number 5961-0510, October 1992 - // PCL Symbol Set id: 15U - HPPiFont MIB = 2018 - - // HPMath8 is the MIB identifier with IANA name HP-Math8. - // - // PCL 5 Comparison Guide, Hewlett-Packard, - // HP part number 5961-0510, October 1992 - // PCL Symbol Set id: 8M - HPMath8 MIB = 2019 - - // HPPSMath is the MIB identifier with IANA name Adobe-Symbol-Encoding. - // - // PostScript Language Reference Manual - // PCL Symbol Set id: 5M - HPPSMath MIB = 2020 - - // HPDesktop is the MIB identifier with IANA name HP-DeskTop. - // - // PCL 5 Comparison Guide, Hewlett-Packard, - // HP part number 5961-0510, October 1992 - // PCL Symbol Set id: 7J - HPDesktop MIB = 2021 - - // VenturaMath is the MIB identifier with IANA name Ventura-Math. - // - // PCL 5 Comparison Guide, Hewlett-Packard, - // HP part number 5961-0510, October 1992 - // PCL Symbol Set id: 6M - VenturaMath MIB = 2022 - - // MicrosoftPublishing is the MIB identifier with IANA name Microsoft-Publishing. - // - // PCL 5 Comparison Guide, Hewlett-Packard, - // HP part number 5961-0510, October 1992 - // PCL Symbol Set id: 6J - MicrosoftPublishing MIB = 2023 - - // Windows31J is the MIB identifier with IANA name Windows-31J. - // - // Windows Japanese. A further extension of Shift_JIS - // to include NEC special characters (Row 13), NEC - // selection of IBM extensions (Rows 89 to 92), and IBM - // extensions (Rows 115 to 119). The CCS's are - // JIS X0201:1997, JIS X0208:1997, and these extensions. - // This charset can be used for the top-level media type "text", - // but it is of limited or specialized use (see rfc2278 ). - // PCL Symbol Set id: 19K - Windows31J MIB = 2024 - - // GB2312 is the MIB identifier with IANA name GB2312 (MIME: GB2312). - // - // Chinese for People's Republic of China (PRC) mixed one byte, - // two byte set: - // 20-7E = one byte ASCII - // A1-FE = two byte PRC Kanji - // See GB 2312-80 - // PCL Symbol Set Id: 18C - GB2312 MIB = 2025 - - // Big5 is the MIB identifier with IANA name Big5 (MIME: Big5). - // - // Chinese for Taiwan Multi-byte set. - // PCL Symbol Set Id: 18T - Big5 MIB = 2026 - - // Macintosh is the MIB identifier with IANA name macintosh. - // - // The Unicode Standard ver1.0, ISBN 0-201-56788-1, Oct 1991 - // Reference: RFC1345 - Macintosh MIB = 2027 - - // IBM037 is the MIB identifier with IANA name IBM037. - // - // IBM NLS RM Vol2 SE09-8002-01, March 1990 - // Reference: RFC1345 - IBM037 MIB = 2028 - - // IBM038 is the MIB identifier with IANA name IBM038. - // - // IBM 3174 Character Set Ref, GA27-3831-02, March 1990 - // Reference: RFC1345 - IBM038 MIB = 2029 - - // IBM273 is the MIB identifier with IANA name IBM273. - // - // IBM NLS RM Vol2 SE09-8002-01, March 1990 - // Reference: RFC1345 - IBM273 MIB = 2030 - - // IBM274 is the MIB identifier with IANA name IBM274. - // - // IBM 3174 Character Set Ref, GA27-3831-02, March 1990 - // Reference: RFC1345 - IBM274 MIB = 2031 - - // IBM275 is the MIB identifier with IANA name IBM275. - // - // IBM NLS RM Vol2 SE09-8002-01, March 1990 - // Reference: RFC1345 - IBM275 MIB = 2032 - - // IBM277 is the MIB identifier with IANA name IBM277. - // - // IBM NLS RM Vol2 SE09-8002-01, March 1990 - // Reference: RFC1345 - IBM277 MIB = 2033 - - // IBM278 is the MIB identifier with IANA name IBM278. - // - // IBM NLS RM Vol2 SE09-8002-01, March 1990 - // Reference: RFC1345 - IBM278 MIB = 2034 - - // IBM280 is the MIB identifier with IANA name IBM280. - // - // IBM NLS RM Vol2 SE09-8002-01, March 1990 - // Reference: RFC1345 - IBM280 MIB = 2035 - - // IBM281 is the MIB identifier with IANA name IBM281. - // - // IBM 3174 Character Set Ref, GA27-3831-02, March 1990 - // Reference: RFC1345 - IBM281 MIB = 2036 - - // IBM284 is the MIB identifier with IANA name IBM284. - // - // IBM NLS RM Vol2 SE09-8002-01, March 1990 - // Reference: RFC1345 - IBM284 MIB = 2037 - - // IBM285 is the MIB identifier with IANA name IBM285. - // - // IBM NLS RM Vol2 SE09-8002-01, March 1990 - // Reference: RFC1345 - IBM285 MIB = 2038 - - // IBM290 is the MIB identifier with IANA name IBM290. - // - // IBM 3174 Character Set Ref, GA27-3831-02, March 1990 - // Reference: RFC1345 - IBM290 MIB = 2039 - - // IBM297 is the MIB identifier with IANA name IBM297. - // - // IBM NLS RM Vol2 SE09-8002-01, March 1990 - // Reference: RFC1345 - IBM297 MIB = 2040 - - // IBM420 is the MIB identifier with IANA name IBM420. - // - // IBM NLS RM Vol2 SE09-8002-01, March 1990, - // IBM NLS RM p 11-11 - // Reference: RFC1345 - IBM420 MIB = 2041 - - // IBM423 is the MIB identifier with IANA name IBM423. - // - // IBM NLS RM Vol2 SE09-8002-01, March 1990 - // Reference: RFC1345 - IBM423 MIB = 2042 - - // IBM424 is the MIB identifier with IANA name IBM424. - // - // IBM NLS RM Vol2 SE09-8002-01, March 1990 - // Reference: RFC1345 - IBM424 MIB = 2043 - - // PC8CodePage437 is the MIB identifier with IANA name IBM437. - // - // IBM NLS RM Vol2 SE09-8002-01, March 1990 - // Reference: RFC1345 - PC8CodePage437 MIB = 2011 - - // IBM500 is the MIB identifier with IANA name IBM500. - // - // IBM NLS RM Vol2 SE09-8002-01, March 1990 - // Reference: RFC1345 - IBM500 MIB = 2044 - - // IBM851 is the MIB identifier with IANA name IBM851. - // - // IBM NLS RM Vol2 SE09-8002-01, March 1990 - // Reference: RFC1345 - IBM851 MIB = 2045 - - // PCp852 is the MIB identifier with IANA name IBM852. - // - // IBM NLS RM Vol2 SE09-8002-01, March 1990 - // Reference: RFC1345 - PCp852 MIB = 2010 - - // IBM855 is the MIB identifier with IANA name IBM855. - // - // IBM NLS RM Vol2 SE09-8002-01, March 1990 - // Reference: RFC1345 - IBM855 MIB = 2046 - - // IBM857 is the MIB identifier with IANA name IBM857. - // - // IBM NLS RM Vol2 SE09-8002-01, March 1990 - // Reference: RFC1345 - IBM857 MIB = 2047 - - // IBM860 is the MIB identifier with IANA name IBM860. - // - // IBM NLS RM Vol2 SE09-8002-01, March 1990 - // Reference: RFC1345 - IBM860 MIB = 2048 - - // IBM861 is the MIB identifier with IANA name IBM861. - // - // IBM NLS RM Vol2 SE09-8002-01, March 1990 - // Reference: RFC1345 - IBM861 MIB = 2049 - - // IBM863 is the MIB identifier with IANA name IBM863. - // - // IBM Keyboard layouts and code pages, PN 07G4586 June 1991 - // Reference: RFC1345 - IBM863 MIB = 2050 - - // IBM864 is the MIB identifier with IANA name IBM864. - // - // IBM Keyboard layouts and code pages, PN 07G4586 June 1991 - // Reference: RFC1345 - IBM864 MIB = 2051 - - // IBM865 is the MIB identifier with IANA name IBM865. - // - // IBM DOS 3.3 Ref (Abridged), 94X9575 (Feb 1987) - // Reference: RFC1345 - IBM865 MIB = 2052 - - // IBM868 is the MIB identifier with IANA name IBM868. - // - // IBM NLS RM Vol2 SE09-8002-01, March 1990 - // Reference: RFC1345 - IBM868 MIB = 2053 - - // IBM869 is the MIB identifier with IANA name IBM869. - // - // IBM Keyboard layouts and code pages, PN 07G4586 June 1991 - // Reference: RFC1345 - IBM869 MIB = 2054 - - // IBM870 is the MIB identifier with IANA name IBM870. - // - // IBM NLS RM Vol2 SE09-8002-01, March 1990 - // Reference: RFC1345 - IBM870 MIB = 2055 - - // IBM871 is the MIB identifier with IANA name IBM871. - // - // IBM NLS RM Vol2 SE09-8002-01, March 1990 - // Reference: RFC1345 - IBM871 MIB = 2056 - - // IBM880 is the MIB identifier with IANA name IBM880. - // - // IBM NLS RM Vol2 SE09-8002-01, March 1990 - // Reference: RFC1345 - IBM880 MIB = 2057 - - // IBM891 is the MIB identifier with IANA name IBM891. - // - // IBM NLS RM Vol2 SE09-8002-01, March 1990 - // Reference: RFC1345 - IBM891 MIB = 2058 - - // IBM903 is the MIB identifier with IANA name IBM903. - // - // IBM NLS RM Vol2 SE09-8002-01, March 1990 - // Reference: RFC1345 - IBM903 MIB = 2059 - - // IBBM904 is the MIB identifier with IANA name IBM904. - // - // IBM NLS RM Vol2 SE09-8002-01, March 1990 - // Reference: RFC1345 - IBBM904 MIB = 2060 - - // IBM905 is the MIB identifier with IANA name IBM905. - // - // IBM 3174 Character Set Ref, GA27-3831-02, March 1990 - // Reference: RFC1345 - IBM905 MIB = 2061 - - // IBM918 is the MIB identifier with IANA name IBM918. - // - // IBM NLS RM Vol2 SE09-8002-01, March 1990 - // Reference: RFC1345 - IBM918 MIB = 2062 - - // IBM1026 is the MIB identifier with IANA name IBM1026. - // - // IBM NLS RM Vol2 SE09-8002-01, March 1990 - // Reference: RFC1345 - IBM1026 MIB = 2063 - - // IBMEBCDICATDE is the MIB identifier with IANA name EBCDIC-AT-DE. - // - // IBM 3270 Char Set Ref Ch 10, GA27-2837-9, April 1987 - // Reference: RFC1345 - IBMEBCDICATDE MIB = 2064 - - // EBCDICATDEA is the MIB identifier with IANA name EBCDIC-AT-DE-A. - // - // IBM 3270 Char Set Ref Ch 10, GA27-2837-9, April 1987 - // Reference: RFC1345 - EBCDICATDEA MIB = 2065 - - // EBCDICCAFR is the MIB identifier with IANA name EBCDIC-CA-FR. - // - // IBM 3270 Char Set Ref Ch 10, GA27-2837-9, April 1987 - // Reference: RFC1345 - EBCDICCAFR MIB = 2066 - - // EBCDICDKNO is the MIB identifier with IANA name EBCDIC-DK-NO. - // - // IBM 3270 Char Set Ref Ch 10, GA27-2837-9, April 1987 - // Reference: RFC1345 - EBCDICDKNO MIB = 2067 - - // EBCDICDKNOA is the MIB identifier with IANA name EBCDIC-DK-NO-A. - // - // IBM 3270 Char Set Ref Ch 10, GA27-2837-9, April 1987 - // Reference: RFC1345 - EBCDICDKNOA MIB = 2068 - - // EBCDICFISE is the MIB identifier with IANA name EBCDIC-FI-SE. - // - // IBM 3270 Char Set Ref Ch 10, GA27-2837-9, April 1987 - // Reference: RFC1345 - EBCDICFISE MIB = 2069 - - // EBCDICFISEA is the MIB identifier with IANA name EBCDIC-FI-SE-A. - // - // IBM 3270 Char Set Ref Ch 10, GA27-2837-9, April 1987 - // Reference: RFC1345 - EBCDICFISEA MIB = 2070 - - // EBCDICFR is the MIB identifier with IANA name EBCDIC-FR. - // - // IBM 3270 Char Set Ref Ch 10, GA27-2837-9, April 1987 - // Reference: RFC1345 - EBCDICFR MIB = 2071 - - // EBCDICIT is the MIB identifier with IANA name EBCDIC-IT. - // - // IBM 3270 Char Set Ref Ch 10, GA27-2837-9, April 1987 - // Reference: RFC1345 - EBCDICIT MIB = 2072 - - // EBCDICPT is the MIB identifier with IANA name EBCDIC-PT. - // - // IBM 3270 Char Set Ref Ch 10, GA27-2837-9, April 1987 - // Reference: RFC1345 - EBCDICPT MIB = 2073 - - // EBCDICES is the MIB identifier with IANA name EBCDIC-ES. - // - // IBM 3270 Char Set Ref Ch 10, GA27-2837-9, April 1987 - // Reference: RFC1345 - EBCDICES MIB = 2074 - - // EBCDICESA is the MIB identifier with IANA name EBCDIC-ES-A. - // - // IBM 3270 Char Set Ref Ch 10, GA27-2837-9, April 1987 - // Reference: RFC1345 - EBCDICESA MIB = 2075 - - // EBCDICESS is the MIB identifier with IANA name EBCDIC-ES-S. - // - // IBM 3270 Char Set Ref Ch 10, GA27-2837-9, April 1987 - // Reference: RFC1345 - EBCDICESS MIB = 2076 - - // EBCDICUK is the MIB identifier with IANA name EBCDIC-UK. - // - // IBM 3270 Char Set Ref Ch 10, GA27-2837-9, April 1987 - // Reference: RFC1345 - EBCDICUK MIB = 2077 - - // EBCDICUS is the MIB identifier with IANA name EBCDIC-US. - // - // IBM 3270 Char Set Ref Ch 10, GA27-2837-9, April 1987 - // Reference: RFC1345 - EBCDICUS MIB = 2078 - - // Unknown8BiT is the MIB identifier with IANA name UNKNOWN-8BIT. - // - // Reference: RFC1428 - Unknown8BiT MIB = 2079 - - // Mnemonic is the MIB identifier with IANA name MNEMONIC. - // - // rfc1345 , also known as "mnemonic+ascii+38" - // Reference: RFC1345 - Mnemonic MIB = 2080 - - // Mnem is the MIB identifier with IANA name MNEM. - // - // rfc1345 , also known as "mnemonic+ascii+8200" - // Reference: RFC1345 - Mnem MIB = 2081 - - // VISCII is the MIB identifier with IANA name VISCII. - // - // rfc1456 - // Reference: RFC1456 - VISCII MIB = 2082 - - // VIQR is the MIB identifier with IANA name VIQR. - // - // rfc1456 - // Reference: RFC1456 - VIQR MIB = 2083 - - // KOI8R is the MIB identifier with IANA name KOI8-R (MIME: KOI8-R). - // - // rfc1489 , based on GOST-19768-74, ISO-6937/8, - // INIS-Cyrillic, ISO-5427. - // Reference: RFC1489 - KOI8R MIB = 2084 - - // HZGB2312 is the MIB identifier with IANA name HZ-GB-2312. - // - // rfc1842 , rfc1843 rfc1843 rfc1842 - HZGB2312 MIB = 2085 - - // IBM866 is the MIB identifier with IANA name IBM866. - // - // IBM NLDG Volume 2 (SE09-8002-03) August 1994 - IBM866 MIB = 2086 - - // PC775Baltic is the MIB identifier with IANA name IBM775. - // - // HP PCL 5 Comparison Guide (P/N 5021-0329) pp B-13, 1996 - PC775Baltic MIB = 2087 - - // KOI8U is the MIB identifier with IANA name KOI8-U. - // - // rfc2319 - // Reference: RFC2319 - KOI8U MIB = 2088 - - // IBM00858 is the MIB identifier with IANA name IBM00858. - // - // IBM See https://www.iana.org/assignments/charset-reg/IBM00858 - IBM00858 MIB = 2089 - - // IBM00924 is the MIB identifier with IANA name IBM00924. - // - // IBM See https://www.iana.org/assignments/charset-reg/IBM00924 - IBM00924 MIB = 2090 - - // IBM01140 is the MIB identifier with IANA name IBM01140. - // - // IBM See https://www.iana.org/assignments/charset-reg/IBM01140 - IBM01140 MIB = 2091 - - // IBM01141 is the MIB identifier with IANA name IBM01141. - // - // IBM See https://www.iana.org/assignments/charset-reg/IBM01141 - IBM01141 MIB = 2092 - - // IBM01142 is the MIB identifier with IANA name IBM01142. - // - // IBM See https://www.iana.org/assignments/charset-reg/IBM01142 - IBM01142 MIB = 2093 - - // IBM01143 is the MIB identifier with IANA name IBM01143. - // - // IBM See https://www.iana.org/assignments/charset-reg/IBM01143 - IBM01143 MIB = 2094 - - // IBM01144 is the MIB identifier with IANA name IBM01144. - // - // IBM See https://www.iana.org/assignments/charset-reg/IBM01144 - IBM01144 MIB = 2095 - - // IBM01145 is the MIB identifier with IANA name IBM01145. - // - // IBM See https://www.iana.org/assignments/charset-reg/IBM01145 - IBM01145 MIB = 2096 - - // IBM01146 is the MIB identifier with IANA name IBM01146. - // - // IBM See https://www.iana.org/assignments/charset-reg/IBM01146 - IBM01146 MIB = 2097 - - // IBM01147 is the MIB identifier with IANA name IBM01147. - // - // IBM See https://www.iana.org/assignments/charset-reg/IBM01147 - IBM01147 MIB = 2098 - - // IBM01148 is the MIB identifier with IANA name IBM01148. - // - // IBM See https://www.iana.org/assignments/charset-reg/IBM01148 - IBM01148 MIB = 2099 - - // IBM01149 is the MIB identifier with IANA name IBM01149. - // - // IBM See https://www.iana.org/assignments/charset-reg/IBM01149 - IBM01149 MIB = 2100 - - // Big5HKSCS is the MIB identifier with IANA name Big5-HKSCS. - // - // See https://www.iana.org/assignments/charset-reg/Big5-HKSCS - Big5HKSCS MIB = 2101 - - // IBM1047 is the MIB identifier with IANA name IBM1047. - // - // IBM1047 (EBCDIC Latin 1/Open Systems) https://www-1.ibm.com/servers/eserver/iseries/software/globalization/pdf/cp01047z.pdf - IBM1047 MIB = 2102 - - // PTCP154 is the MIB identifier with IANA name PTCP154. - // - // See https://www.iana.org/assignments/charset-reg/PTCP154 - PTCP154 MIB = 2103 - - // Amiga1251 is the MIB identifier with IANA name Amiga-1251. - // - // See https://www.amiga.ultranet.ru/Amiga-1251.html - Amiga1251 MIB = 2104 - - // KOI7switched is the MIB identifier with IANA name KOI7-switched. - // - // See https://www.iana.org/assignments/charset-reg/KOI7-switched - KOI7switched MIB = 2105 - - // BRF is the MIB identifier with IANA name BRF. - // - // See https://www.iana.org/assignments/charset-reg/BRF - BRF MIB = 2106 - - // TSCII is the MIB identifier with IANA name TSCII. - // - // See https://www.iana.org/assignments/charset-reg/TSCII - TSCII MIB = 2107 - - // CP51932 is the MIB identifier with IANA name CP51932. - // - // See https://www.iana.org/assignments/charset-reg/CP51932 - CP51932 MIB = 2108 - - // Windows874 is the MIB identifier with IANA name windows-874. - // - // See https://www.iana.org/assignments/charset-reg/windows-874 - Windows874 MIB = 2109 - - // Windows1250 is the MIB identifier with IANA name windows-1250. - // - // Microsoft https://www.iana.org/assignments/charset-reg/windows-1250 - Windows1250 MIB = 2250 - - // Windows1251 is the MIB identifier with IANA name windows-1251. - // - // Microsoft https://www.iana.org/assignments/charset-reg/windows-1251 - Windows1251 MIB = 2251 - - // Windows1252 is the MIB identifier with IANA name windows-1252. - // - // Microsoft https://www.iana.org/assignments/charset-reg/windows-1252 - Windows1252 MIB = 2252 - - // Windows1253 is the MIB identifier with IANA name windows-1253. - // - // Microsoft https://www.iana.org/assignments/charset-reg/windows-1253 - Windows1253 MIB = 2253 - - // Windows1254 is the MIB identifier with IANA name windows-1254. - // - // Microsoft https://www.iana.org/assignments/charset-reg/windows-1254 - Windows1254 MIB = 2254 - - // Windows1255 is the MIB identifier with IANA name windows-1255. - // - // Microsoft https://www.iana.org/assignments/charset-reg/windows-1255 - Windows1255 MIB = 2255 - - // Windows1256 is the MIB identifier with IANA name windows-1256. - // - // Microsoft https://www.iana.org/assignments/charset-reg/windows-1256 - Windows1256 MIB = 2256 - - // Windows1257 is the MIB identifier with IANA name windows-1257. - // - // Microsoft https://www.iana.org/assignments/charset-reg/windows-1257 - Windows1257 MIB = 2257 - - // Windows1258 is the MIB identifier with IANA name windows-1258. - // - // Microsoft https://www.iana.org/assignments/charset-reg/windows-1258 - Windows1258 MIB = 2258 - - // TIS620 is the MIB identifier with IANA name TIS-620. - // - // Thai Industrial Standards Institute (TISI) - TIS620 MIB = 2259 - - // CP50220 is the MIB identifier with IANA name CP50220. - // - // See https://www.iana.org/assignments/charset-reg/CP50220 - CP50220 MIB = 2260 -) diff --git a/src/vendor/golang.org/x/text/encoding/internal/internal.go b/src/vendor/golang.org/x/text/encoding/internal/internal.go deleted file mode 100644 index 75a5fd165..000000000 --- a/src/vendor/golang.org/x/text/encoding/internal/internal.go +++ /dev/null @@ -1,75 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package internal contains code that is shared among encoding implementations. -package internal - -import ( - "golang.org/x/text/encoding" - "golang.org/x/text/encoding/internal/identifier" - "golang.org/x/text/transform" -) - -// Encoding is an implementation of the Encoding interface that adds the String -// and ID methods to an existing encoding. -type Encoding struct { - encoding.Encoding - Name string - MIB identifier.MIB -} - -// _ verifies that Encoding implements identifier.Interface. -var _ identifier.Interface = (*Encoding)(nil) - -func (e *Encoding) String() string { - return e.Name -} - -func (e *Encoding) ID() (mib identifier.MIB, other string) { - return e.MIB, "" -} - -// SimpleEncoding is an Encoding that combines two Transformers. -type SimpleEncoding struct { - Decoder transform.Transformer - Encoder transform.Transformer -} - -func (e *SimpleEncoding) NewDecoder() *encoding.Decoder { - return &encoding.Decoder{Transformer: e.Decoder} -} - -func (e *SimpleEncoding) NewEncoder() *encoding.Encoder { - return &encoding.Encoder{Transformer: e.Encoder} -} - -// FuncEncoding is an Encoding that combines two functions returning a new -// Transformer. -type FuncEncoding struct { - Decoder func() transform.Transformer - Encoder func() transform.Transformer -} - -func (e FuncEncoding) NewDecoder() *encoding.Decoder { - return &encoding.Decoder{Transformer: e.Decoder()} -} - -func (e FuncEncoding) NewEncoder() *encoding.Encoder { - return &encoding.Encoder{Transformer: e.Encoder()} -} - -// A RepertoireError indicates a rune is not in the repertoire of a destination -// encoding. It is associated with an encoding-specific suggested replacement -// byte. -type RepertoireError byte - -// Error implements the error interrface. -func (r RepertoireError) Error() string { - return "encoding: rune not supported by encoding." -} - -// Replacement returns the replacement string associated with this error. -func (r RepertoireError) Replacement() byte { return byte(r) } - -var ErrASCIIReplacement = RepertoireError(encoding.ASCIISub) diff --git a/src/vendor/golang.org/x/text/encoding/unicode/override.go b/src/vendor/golang.org/x/text/encoding/unicode/override.go deleted file mode 100644 index 35d62fcc9..000000000 --- a/src/vendor/golang.org/x/text/encoding/unicode/override.go +++ /dev/null @@ -1,82 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package unicode - -import ( - "golang.org/x/text/transform" -) - -// BOMOverride returns a new decoder transformer that is identical to fallback, -// except that the presence of a Byte Order Mark at the start of the input -// causes it to switch to the corresponding Unicode decoding. It will only -// consider BOMs for UTF-8, UTF-16BE, and UTF-16LE. -// -// This differs from using ExpectBOM by allowing a BOM to switch to UTF-8, not -// just UTF-16 variants, and allowing falling back to any encoding scheme. -// -// This technique is recommended by the W3C for use in HTML 5: "For -// compatibility with deployed content, the byte order mark (also known as BOM) -// is considered more authoritative than anything else." -// http://www.w3.org/TR/encoding/#specification-hooks -// -// Using BOMOverride is mostly intended for use cases where the first characters -// of a fallback encoding are known to not be a BOM, for example, for valid HTML -// and most encodings. -func BOMOverride(fallback transform.Transformer) transform.Transformer { - // TODO: possibly allow a variadic argument of unicode encodings to allow - // specifying details of which fallbacks are supported as well as - // specifying the details of the implementations. This would also allow for - // support for UTF-32, which should not be supported by default. - return &bomOverride{fallback: fallback} -} - -type bomOverride struct { - fallback transform.Transformer - current transform.Transformer -} - -func (d *bomOverride) Reset() { - d.current = nil - d.fallback.Reset() -} - -var ( - // TODO: we could use decode functions here, instead of allocating a new - // decoder on every NewDecoder as IgnoreBOM decoders can be stateless. - utf16le = UTF16(LittleEndian, IgnoreBOM) - utf16be = UTF16(BigEndian, IgnoreBOM) -) - -const utf8BOM = "\ufeff" - -func (d *bomOverride) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) { - if d.current != nil { - return d.current.Transform(dst, src, atEOF) - } - if len(src) < 3 && !atEOF { - return 0, 0, transform.ErrShortSrc - } - d.current = d.fallback - bomSize := 0 - if len(src) >= 2 { - if src[0] == 0xFF && src[1] == 0xFE { - d.current = utf16le.NewDecoder() - bomSize = 2 - } else if src[0] == 0xFE && src[1] == 0xFF { - d.current = utf16be.NewDecoder() - bomSize = 2 - } else if len(src) >= 3 && - src[0] == utf8BOM[0] && - src[1] == utf8BOM[1] && - src[2] == utf8BOM[2] { - d.current = transform.Nop - bomSize = 3 - } - } - if bomSize < len(src) { - nDst, nSrc, err = d.current.Transform(dst, src[bomSize:], atEOF) - } - return nDst, nSrc + bomSize, err -} diff --git a/src/vendor/golang.org/x/text/encoding/unicode/unicode.go b/src/vendor/golang.org/x/text/encoding/unicode/unicode.go deleted file mode 100644 index dd99ad14d..000000000 --- a/src/vendor/golang.org/x/text/encoding/unicode/unicode.go +++ /dev/null @@ -1,512 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package unicode provides Unicode encodings such as UTF-16. -package unicode // import "golang.org/x/text/encoding/unicode" - -import ( - "bytes" - "errors" - "unicode/utf16" - "unicode/utf8" - - "golang.org/x/text/encoding" - "golang.org/x/text/encoding/internal" - "golang.org/x/text/encoding/internal/identifier" - "golang.org/x/text/internal/utf8internal" - "golang.org/x/text/runes" - "golang.org/x/text/transform" -) - -// TODO: I think the Transformers really should return errors on unmatched -// surrogate pairs and odd numbers of bytes. This is not required by RFC 2781, -// which leaves it open, but is suggested by WhatWG. It will allow for all error -// modes as defined by WhatWG: fatal, HTML and Replacement. This would require -// the introduction of some kind of error type for conveying the erroneous code -// point. - -// UTF8 is the UTF-8 encoding. It neither removes nor adds byte order marks. -var UTF8 encoding.Encoding = utf8enc - -// UTF8BOM is an UTF-8 encoding where the decoder strips a leading byte order -// mark while the encoder adds one. -// -// Some editors add a byte order mark as a signature to UTF-8 files. Although -// the byte order mark is not useful for detecting byte order in UTF-8, it is -// sometimes used as a convention to mark UTF-8-encoded files. This relies on -// the observation that the UTF-8 byte order mark is either an illegal or at -// least very unlikely sequence in any other character encoding. -var UTF8BOM encoding.Encoding = utf8bomEncoding{} - -type utf8bomEncoding struct{} - -func (utf8bomEncoding) String() string { - return "UTF-8-BOM" -} - -func (utf8bomEncoding) ID() (identifier.MIB, string) { - return identifier.Unofficial, "x-utf8bom" -} - -func (utf8bomEncoding) NewEncoder() *encoding.Encoder { - return &encoding.Encoder{ - Transformer: &utf8bomEncoder{t: runes.ReplaceIllFormed()}, - } -} - -func (utf8bomEncoding) NewDecoder() *encoding.Decoder { - return &encoding.Decoder{Transformer: &utf8bomDecoder{}} -} - -var utf8enc = &internal.Encoding{ - &internal.SimpleEncoding{utf8Decoder{}, runes.ReplaceIllFormed()}, - "UTF-8", - identifier.UTF8, -} - -type utf8bomDecoder struct { - checked bool -} - -func (t *utf8bomDecoder) Reset() { - t.checked = false -} - -func (t *utf8bomDecoder) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) { - if !t.checked { - if !atEOF && len(src) < len(utf8BOM) { - if len(src) == 0 { - return 0, 0, nil - } - return 0, 0, transform.ErrShortSrc - } - if bytes.HasPrefix(src, []byte(utf8BOM)) { - nSrc += len(utf8BOM) - src = src[len(utf8BOM):] - } - t.checked = true - } - nDst, n, err := utf8Decoder.Transform(utf8Decoder{}, dst[nDst:], src, atEOF) - nSrc += n - return nDst, nSrc, err -} - -type utf8bomEncoder struct { - written bool - t transform.Transformer -} - -func (t *utf8bomEncoder) Reset() { - t.written = false - t.t.Reset() -} - -func (t *utf8bomEncoder) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) { - if !t.written { - if len(dst) < len(utf8BOM) { - return nDst, 0, transform.ErrShortDst - } - nDst = copy(dst, utf8BOM) - t.written = true - } - n, nSrc, err := utf8Decoder.Transform(utf8Decoder{}, dst[nDst:], src, atEOF) - nDst += n - return nDst, nSrc, err -} - -type utf8Decoder struct{ transform.NopResetter } - -func (utf8Decoder) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) { - var pSrc int // point from which to start copy in src - var accept utf8internal.AcceptRange - - // The decoder can only make the input larger, not smaller. - n := len(src) - if len(dst) < n { - err = transform.ErrShortDst - n = len(dst) - atEOF = false - } - for nSrc < n { - c := src[nSrc] - if c < utf8.RuneSelf { - nSrc++ - continue - } - first := utf8internal.First[c] - size := int(first & utf8internal.SizeMask) - if first == utf8internal.FirstInvalid { - goto handleInvalid // invalid starter byte - } - accept = utf8internal.AcceptRanges[first>>utf8internal.AcceptShift] - if nSrc+size > n { - if !atEOF { - // We may stop earlier than necessary here if the short sequence - // has invalid bytes. Not checking for this simplifies the code - // and may avoid duplicate computations in certain conditions. - if err == nil { - err = transform.ErrShortSrc - } - break - } - // Determine the maximal subpart of an ill-formed subsequence. - switch { - case nSrc+1 >= n || src[nSrc+1] < accept.Lo || accept.Hi < src[nSrc+1]: - size = 1 - case nSrc+2 >= n || src[nSrc+2] < utf8internal.LoCB || utf8internal.HiCB < src[nSrc+2]: - size = 2 - default: - size = 3 // As we are short, the maximum is 3. - } - goto handleInvalid - } - if c = src[nSrc+1]; c < accept.Lo || accept.Hi < c { - size = 1 - goto handleInvalid // invalid continuation byte - } else if size == 2 { - } else if c = src[nSrc+2]; c < utf8internal.LoCB || utf8internal.HiCB < c { - size = 2 - goto handleInvalid // invalid continuation byte - } else if size == 3 { - } else if c = src[nSrc+3]; c < utf8internal.LoCB || utf8internal.HiCB < c { - size = 3 - goto handleInvalid // invalid continuation byte - } - nSrc += size - continue - - handleInvalid: - // Copy the scanned input so far. - nDst += copy(dst[nDst:], src[pSrc:nSrc]) - - // Append RuneError to the destination. - const runeError = "\ufffd" - if nDst+len(runeError) > len(dst) { - return nDst, nSrc, transform.ErrShortDst - } - nDst += copy(dst[nDst:], runeError) - - // Skip the maximal subpart of an ill-formed subsequence according to - // the W3C standard way instead of the Go way. This Transform is - // probably the only place in the text repo where it is warranted. - nSrc += size - pSrc = nSrc - - // Recompute the maximum source length. - if sz := len(dst) - nDst; sz < len(src)-nSrc { - err = transform.ErrShortDst - n = nSrc + sz - atEOF = false - } - } - return nDst + copy(dst[nDst:], src[pSrc:nSrc]), nSrc, err -} - -// UTF16 returns a UTF-16 Encoding for the given default endianness and byte -// order mark (BOM) policy. -// -// When decoding from UTF-16 to UTF-8, if the BOMPolicy is IgnoreBOM then -// neither BOMs U+FEFF nor noncharacters U+FFFE in the input stream will affect -// the endianness used for decoding, and will instead be output as their -// standard UTF-8 encodings: "\xef\xbb\xbf" and "\xef\xbf\xbe". If the BOMPolicy -// is UseBOM or ExpectBOM a staring BOM is not written to the UTF-8 output. -// Instead, it overrides the default endianness e for the remainder of the -// transformation. Any subsequent BOMs U+FEFF or noncharacters U+FFFE will not -// affect the endianness used, and will instead be output as their standard -// UTF-8 encodings. For UseBOM, if there is no starting BOM, it will proceed -// with the default Endianness. For ExpectBOM, in that case, the transformation -// will return early with an ErrMissingBOM error. -// -// When encoding from UTF-8 to UTF-16, a BOM will be inserted at the start of -// the output if the BOMPolicy is UseBOM or ExpectBOM. Otherwise, a BOM will not -// be inserted. The UTF-8 input does not need to contain a BOM. -// -// There is no concept of a 'native' endianness. If the UTF-16 data is produced -// and consumed in a greater context that implies a certain endianness, use -// IgnoreBOM. Otherwise, use ExpectBOM and always produce and consume a BOM. -// -// In the language of https://www.unicode.org/faq/utf_bom.html#bom10, IgnoreBOM -// corresponds to "Where the precise type of the data stream is known... the -// BOM should not be used" and ExpectBOM corresponds to "A particular -// protocol... may require use of the BOM". -func UTF16(e Endianness, b BOMPolicy) encoding.Encoding { - return utf16Encoding{config{e, b}, mibValue[e][b&bomMask]} -} - -// mibValue maps Endianness and BOMPolicy settings to MIB constants. Note that -// some configurations map to the same MIB identifier. RFC 2781 has requirements -// and recommendations. Some of the "configurations" are merely recommendations, -// so multiple configurations could match. -var mibValue = map[Endianness][numBOMValues]identifier.MIB{ - BigEndian: [numBOMValues]identifier.MIB{ - IgnoreBOM: identifier.UTF16BE, - UseBOM: identifier.UTF16, // BigEnding default is preferred by RFC 2781. - // TODO: acceptBOM | strictBOM would map to UTF16BE as well. - }, - LittleEndian: [numBOMValues]identifier.MIB{ - IgnoreBOM: identifier.UTF16LE, - UseBOM: identifier.UTF16, // LittleEndian default is allowed and preferred on Windows. - // TODO: acceptBOM | strictBOM would map to UTF16LE as well. - }, - // ExpectBOM is not widely used and has no valid MIB identifier. -} - -// All lists a configuration for each IANA-defined UTF-16 variant. -var All = []encoding.Encoding{ - UTF8, - UTF16(BigEndian, UseBOM), - UTF16(BigEndian, IgnoreBOM), - UTF16(LittleEndian, IgnoreBOM), -} - -// BOMPolicy is a UTF-16 encoding's byte order mark policy. -type BOMPolicy uint8 - -const ( - writeBOM BOMPolicy = 0x01 - acceptBOM BOMPolicy = 0x02 - requireBOM BOMPolicy = 0x04 - bomMask BOMPolicy = 0x07 - - // HACK: numBOMValues == 8 triggers a bug in the 1.4 compiler (cannot have a - // map of an array of length 8 of a type that is also used as a key or value - // in another map). See golang.org/issue/11354. - // TODO: consider changing this value back to 8 if the use of 1.4.* has - // been minimized. - numBOMValues = 8 + 1 - - // IgnoreBOM means to ignore any byte order marks. - IgnoreBOM BOMPolicy = 0 - // Common and RFC 2781-compliant interpretation for UTF-16BE/LE. - - // UseBOM means that the UTF-16 form may start with a byte order mark, which - // will be used to override the default encoding. - UseBOM BOMPolicy = writeBOM | acceptBOM - // Common and RFC 2781-compliant interpretation for UTF-16. - - // ExpectBOM means that the UTF-16 form must start with a byte order mark, - // which will be used to override the default encoding. - ExpectBOM BOMPolicy = writeBOM | acceptBOM | requireBOM - // Used in Java as Unicode (not to be confused with Java's UTF-16) and - // ICU's UTF-16,version=1. Not compliant with RFC 2781. - - // TODO (maybe): strictBOM: BOM must match Endianness. This would allow: - // - UTF-16(B|L)E,version=1: writeBOM | acceptBOM | requireBOM | strictBOM - // (UnicodeBig and UnicodeLittle in Java) - // - RFC 2781-compliant, but less common interpretation for UTF-16(B|L)E: - // acceptBOM | strictBOM (e.g. assigned to CheckBOM). - // This addition would be consistent with supporting ExpectBOM. -) - -// Endianness is a UTF-16 encoding's default endianness. -type Endianness bool - -const ( - // BigEndian is UTF-16BE. - BigEndian Endianness = false - // LittleEndian is UTF-16LE. - LittleEndian Endianness = true -) - -// ErrMissingBOM means that decoding UTF-16 input with ExpectBOM did not find a -// starting byte order mark. -var ErrMissingBOM = errors.New("encoding: missing byte order mark") - -type utf16Encoding struct { - config - mib identifier.MIB -} - -type config struct { - endianness Endianness - bomPolicy BOMPolicy -} - -func (u utf16Encoding) NewDecoder() *encoding.Decoder { - return &encoding.Decoder{Transformer: &utf16Decoder{ - initial: u.config, - current: u.config, - }} -} - -func (u utf16Encoding) NewEncoder() *encoding.Encoder { - return &encoding.Encoder{Transformer: &utf16Encoder{ - endianness: u.endianness, - initialBOMPolicy: u.bomPolicy, - currentBOMPolicy: u.bomPolicy, - }} -} - -func (u utf16Encoding) ID() (mib identifier.MIB, other string) { - return u.mib, "" -} - -func (u utf16Encoding) String() string { - e, b := "B", "" - if u.endianness == LittleEndian { - e = "L" - } - switch u.bomPolicy { - case ExpectBOM: - b = "Expect" - case UseBOM: - b = "Use" - case IgnoreBOM: - b = "Ignore" - } - return "UTF-16" + e + "E (" + b + " BOM)" -} - -type utf16Decoder struct { - initial config - current config -} - -func (u *utf16Decoder) Reset() { - u.current = u.initial -} - -func (u *utf16Decoder) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) { - if len(src) < 2 && atEOF && u.current.bomPolicy&requireBOM != 0 { - return 0, 0, ErrMissingBOM - } - if len(src) == 0 { - return 0, 0, nil - } - if len(src) >= 2 && u.current.bomPolicy&acceptBOM != 0 { - switch { - case src[0] == 0xfe && src[1] == 0xff: - u.current.endianness = BigEndian - nSrc = 2 - case src[0] == 0xff && src[1] == 0xfe: - u.current.endianness = LittleEndian - nSrc = 2 - default: - if u.current.bomPolicy&requireBOM != 0 { - return 0, 0, ErrMissingBOM - } - } - u.current.bomPolicy = IgnoreBOM - } - - var r rune - var dSize, sSize int - for nSrc < len(src) { - if nSrc+1 < len(src) { - x := uint16(src[nSrc+0])<<8 | uint16(src[nSrc+1]) - if u.current.endianness == LittleEndian { - x = x>>8 | x<<8 - } - r, sSize = rune(x), 2 - if utf16.IsSurrogate(r) { - if nSrc+3 < len(src) { - x = uint16(src[nSrc+2])<<8 | uint16(src[nSrc+3]) - if u.current.endianness == LittleEndian { - x = x>>8 | x<<8 - } - // Save for next iteration if it is not a high surrogate. - if isHighSurrogate(rune(x)) { - r, sSize = utf16.DecodeRune(r, rune(x)), 4 - } - } else if !atEOF { - err = transform.ErrShortSrc - break - } - } - if dSize = utf8.RuneLen(r); dSize < 0 { - r, dSize = utf8.RuneError, 3 - } - } else if atEOF { - // Single trailing byte. - r, dSize, sSize = utf8.RuneError, 3, 1 - } else { - err = transform.ErrShortSrc - break - } - if nDst+dSize > len(dst) { - err = transform.ErrShortDst - break - } - nDst += utf8.EncodeRune(dst[nDst:], r) - nSrc += sSize - } - return nDst, nSrc, err -} - -func isHighSurrogate(r rune) bool { - return 0xDC00 <= r && r <= 0xDFFF -} - -type utf16Encoder struct { - endianness Endianness - initialBOMPolicy BOMPolicy - currentBOMPolicy BOMPolicy -} - -func (u *utf16Encoder) Reset() { - u.currentBOMPolicy = u.initialBOMPolicy -} - -func (u *utf16Encoder) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) { - if u.currentBOMPolicy&writeBOM != 0 { - if len(dst) < 2 { - return 0, 0, transform.ErrShortDst - } - dst[0], dst[1] = 0xfe, 0xff - u.currentBOMPolicy = IgnoreBOM - nDst = 2 - } - - r, size := rune(0), 0 - for nSrc < len(src) { - r = rune(src[nSrc]) - - // Decode a 1-byte rune. - if r < utf8.RuneSelf { - size = 1 - - } else { - // Decode a multi-byte rune. - r, size = utf8.DecodeRune(src[nSrc:]) - if size == 1 { - // All valid runes of size 1 (those below utf8.RuneSelf) were - // handled above. We have invalid UTF-8 or we haven't seen the - // full character yet. - if !atEOF && !utf8.FullRune(src[nSrc:]) { - err = transform.ErrShortSrc - break - } - } - } - - if r <= 0xffff { - if nDst+2 > len(dst) { - err = transform.ErrShortDst - break - } - dst[nDst+0] = uint8(r >> 8) - dst[nDst+1] = uint8(r) - nDst += 2 - } else { - if nDst+4 > len(dst) { - err = transform.ErrShortDst - break - } - r1, r2 := utf16.EncodeRune(r) - dst[nDst+0] = uint8(r1 >> 8) - dst[nDst+1] = uint8(r1) - dst[nDst+2] = uint8(r2 >> 8) - dst[nDst+3] = uint8(r2) - nDst += 4 - } - nSrc += size - } - - if u.endianness == LittleEndian { - for i := 0; i < nDst; i += 2 { - dst[i], dst[i+1] = dst[i+1], dst[i] - } - } - return nDst, nSrc, err -} diff --git a/src/vendor/golang.org/x/text/internal/utf8internal/utf8internal.go b/src/vendor/golang.org/x/text/internal/utf8internal/utf8internal.go deleted file mode 100644 index e5c53b1b3..000000000 --- a/src/vendor/golang.org/x/text/internal/utf8internal/utf8internal.go +++ /dev/null @@ -1,87 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package utf8internal contains low-level utf8-related constants, tables, etc. -// that are used internally by the text package. -package utf8internal - -// The default lowest and highest continuation byte. -const ( - LoCB = 0x80 // 1000 0000 - HiCB = 0xBF // 1011 1111 -) - -// Constants related to getting information of first bytes of UTF-8 sequences. -const ( - // ASCII identifies a UTF-8 byte as ASCII. - ASCII = as - - // FirstInvalid indicates a byte is invalid as a first byte of a UTF-8 - // sequence. - FirstInvalid = xx - - // SizeMask is a mask for the size bits. Use use x&SizeMask to get the size. - SizeMask = 7 - - // AcceptShift is the right-shift count for the first byte info byte to get - // the index into the AcceptRanges table. See AcceptRanges. - AcceptShift = 4 - - // The names of these constants are chosen to give nice alignment in the - // table below. The first nibble is an index into acceptRanges or F for - // special one-byte cases. The second nibble is the Rune length or the - // Status for the special one-byte case. - xx = 0xF1 // invalid: size 1 - as = 0xF0 // ASCII: size 1 - s1 = 0x02 // accept 0, size 2 - s2 = 0x13 // accept 1, size 3 - s3 = 0x03 // accept 0, size 3 - s4 = 0x23 // accept 2, size 3 - s5 = 0x34 // accept 3, size 4 - s6 = 0x04 // accept 0, size 4 - s7 = 0x44 // accept 4, size 4 -) - -// First is information about the first byte in a UTF-8 sequence. -var First = [256]uint8{ - // 1 2 3 4 5 6 7 8 9 A B C D E F - as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, // 0x00-0x0F - as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, // 0x10-0x1F - as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, // 0x20-0x2F - as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, // 0x30-0x3F - as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, // 0x40-0x4F - as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, // 0x50-0x5F - as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, // 0x60-0x6F - as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, // 0x70-0x7F - // 1 2 3 4 5 6 7 8 9 A B C D E F - xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, // 0x80-0x8F - xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, // 0x90-0x9F - xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, // 0xA0-0xAF - xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, // 0xB0-0xBF - xx, xx, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, // 0xC0-0xCF - s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, // 0xD0-0xDF - s2, s3, s3, s3, s3, s3, s3, s3, s3, s3, s3, s3, s3, s4, s3, s3, // 0xE0-0xEF - s5, s6, s6, s6, s7, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, // 0xF0-0xFF -} - -// AcceptRange gives the range of valid values for the second byte in a UTF-8 -// sequence for any value for First that is not ASCII or FirstInvalid. -type AcceptRange struct { - Lo uint8 // lowest value for second byte. - Hi uint8 // highest value for second byte. -} - -// AcceptRanges is a slice of AcceptRange values. For a given byte sequence b -// -// AcceptRanges[First[b[0]]>>AcceptShift] -// -// will give the value of AcceptRange for the multi-byte UTF-8 sequence starting -// at b[0]. -var AcceptRanges = [...]AcceptRange{ - 0: {LoCB, HiCB}, - 1: {0xA0, HiCB}, - 2: {LoCB, 0x9F}, - 3: {0x90, HiCB}, - 4: {LoCB, 0x8F}, -} diff --git a/src/vendor/helm.sh/helm/v3/cmd/helm/search/search.go b/src/vendor/helm.sh/helm/v3/cmd/helm/search/search.go deleted file mode 100644 index fc7f30596..000000000 --- a/src/vendor/helm.sh/helm/v3/cmd/helm/search/search.go +++ /dev/null @@ -1,227 +0,0 @@ -/* -Copyright The Helm Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -/*Package search provides client-side repository searching. - -This supports building an in-memory search index based on the contents of -multiple repositories, and then using string matching or regular expressions -to find matches. -*/ -package search - -import ( - "path" - "regexp" - "sort" - "strings" - - "github.com/Masterminds/semver/v3" - - "helm.sh/helm/v3/pkg/repo" -) - -// Result is a search result. -// -// Score indicates how close it is to match. The higher the score, the longer -// the distance. -type Result struct { - Name string - Score int - Chart *repo.ChartVersion -} - -// Index is a searchable index of chart information. -type Index struct { - lines map[string]string - charts map[string]*repo.ChartVersion -} - -const sep = "\v" - -// NewIndex creates a new Index. -func NewIndex() *Index { - return &Index{lines: map[string]string{}, charts: map[string]*repo.ChartVersion{}} -} - -// verSep is a separator for version fields in map keys. -const verSep = "$$" - -// AddRepo adds a repository index to the search index. -func (i *Index) AddRepo(rname string, ind *repo.IndexFile, all bool) { - ind.SortEntries() - for name, ref := range ind.Entries { - if len(ref) == 0 { - // Skip chart names that have zero releases. - continue - } - // By convention, an index file is supposed to have the newest at the - // 0 slot, so our best bet is to grab the 0 entry and build the index - // entry off of that. - // Note: Do not use filePath.Join since on Windows it will return \ - // which results in a repo name that cannot be understood. - fname := path.Join(rname, name) - if !all { - i.lines[fname] = indstr(rname, ref[0]) - i.charts[fname] = ref[0] - continue - } - - // If 'all' is set, then we go through all of the refs, and add them all - // to the index. This will generate a lot of near-duplicate entries. - for _, rr := range ref { - versionedName := fname + verSep + rr.Version - i.lines[versionedName] = indstr(rname, rr) - i.charts[versionedName] = rr - } - } -} - -// All returns all charts in the index as if they were search results. -// -// Each will be given a score of 0. -func (i *Index) All() []*Result { - res := make([]*Result, len(i.charts)) - j := 0 - for name, ch := range i.charts { - parts := strings.Split(name, verSep) - res[j] = &Result{ - Name: parts[0], - Chart: ch, - } - j++ - } - return res -} - -// Search searches an index for the given term. -// -// Threshold indicates the maximum score a term may have before being marked -// irrelevant. (Low score means higher relevance. Golf, not bowling.) -// -// If regexp is true, the term is treated as a regular expression. Otherwise, -// term is treated as a literal string. -func (i *Index) Search(term string, threshold int, regexp bool) ([]*Result, error) { - if regexp { - return i.SearchRegexp(term, threshold) - } - return i.SearchLiteral(term, threshold), nil -} - -// calcScore calculates a score for a match. -func (i *Index) calcScore(index int, matchline string) int { - - // This is currently tied to the fact that sep is a single char. - splits := []int{} - s := rune(sep[0]) - for i, ch := range matchline { - if ch == s { - splits = append(splits, i) - } - } - - for i, pos := range splits { - if index > pos { - continue - } - return i - } - return len(splits) -} - -// SearchLiteral does a literal string search (no regexp). -func (i *Index) SearchLiteral(term string, threshold int) []*Result { - term = strings.ToLower(term) - buf := []*Result{} - for k, v := range i.lines { - lk := strings.ToLower(k) - lv := strings.ToLower(v) - res := strings.Index(lv, term) - if score := i.calcScore(res, lv); res != -1 && score < threshold { - parts := strings.Split(lk, verSep) // Remove version, if it is there. - buf = append(buf, &Result{Name: parts[0], Score: score, Chart: i.charts[k]}) - } - } - return buf -} - -// SearchRegexp searches using a regular expression. -func (i *Index) SearchRegexp(re string, threshold int) ([]*Result, error) { - matcher, err := regexp.Compile(re) - if err != nil { - return []*Result{}, err - } - buf := []*Result{} - for k, v := range i.lines { - ind := matcher.FindStringIndex(v) - if len(ind) == 0 { - continue - } - if score := i.calcScore(ind[0], v); ind[0] >= 0 && score < threshold { - parts := strings.Split(k, verSep) // Remove version, if it is there. - buf = append(buf, &Result{Name: parts[0], Score: score, Chart: i.charts[k]}) - } - } - return buf, nil -} - -// SortScore does an in-place sort of the results. -// -// Lowest scores are highest on the list. Matching scores are subsorted alphabetically. -func SortScore(r []*Result) { - sort.Sort(scoreSorter(r)) -} - -// scoreSorter sorts results by score, and subsorts by alpha Name. -type scoreSorter []*Result - -// Len returns the length of this scoreSorter. -func (s scoreSorter) Len() int { return len(s) } - -// Swap performs an in-place swap. -func (s scoreSorter) Swap(i, j int) { s[i], s[j] = s[j], s[i] } - -// Less compares a to b, and returns true if a is less than b. -func (s scoreSorter) Less(a, b int) bool { - first := s[a] - second := s[b] - - if first.Score > second.Score { - return false - } - if first.Score < second.Score { - return true - } - if first.Name == second.Name { - v1, err := semver.NewVersion(first.Chart.Version) - if err != nil { - return true - } - v2, err := semver.NewVersion(second.Chart.Version) - if err != nil { - return true - } - // Sort so that the newest chart is higher than the oldest chart. This is - // the opposite of what you'd expect in a function called Less. - return v1.GreaterThan(v2) - } - return first.Name < second.Name -} - -func indstr(name string, ref *repo.ChartVersion) string { - i := ref.Name + sep + name + "/" + ref.Name + sep + - ref.Description + sep + strings.Join(ref.Keywords, " ") - return i -} diff --git a/src/vendor/helm.sh/helm/v3/internal/fileutil/fileutil.go b/src/vendor/helm.sh/helm/v3/internal/fileutil/fileutil.go deleted file mode 100644 index 739093f3b..000000000 --- a/src/vendor/helm.sh/helm/v3/internal/fileutil/fileutil.go +++ /dev/null @@ -1,51 +0,0 @@ -/* -Copyright The Helm Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package fileutil - -import ( - "io" - "io/ioutil" - "os" - "path/filepath" - - "helm.sh/helm/v3/internal/third_party/dep/fs" -) - -// AtomicWriteFile atomically (as atomic as os.Rename allows) writes a file to a -// disk. -func AtomicWriteFile(filename string, reader io.Reader, mode os.FileMode) error { - tempFile, err := ioutil.TempFile(filepath.Split(filename)) - if err != nil { - return err - } - tempName := tempFile.Name() - - if _, err := io.Copy(tempFile, reader); err != nil { - tempFile.Close() // return value is ignored as we are already on error path - return err - } - - if err := tempFile.Close(); err != nil { - return err - } - - if err := os.Chmod(tempName, mode); err != nil { - return err - } - - return fs.RenameWithFallback(tempName, filename) -} diff --git a/src/vendor/helm.sh/helm/v3/internal/third_party/dep/fs/fs.go b/src/vendor/helm.sh/helm/v3/internal/third_party/dep/fs/fs.go deleted file mode 100644 index 4e4eacc60..000000000 --- a/src/vendor/helm.sh/helm/v3/internal/third_party/dep/fs/fs.go +++ /dev/null @@ -1,372 +0,0 @@ -/* -Copyright (c) for portions of fs.go are held by The Go Authors, 2016 and are provided under -the BSD license. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -*/ - -package fs - -import ( - "io" - "os" - "path/filepath" - "runtime" - "syscall" - - "github.com/pkg/errors" -) - -// fs contains a copy of a few functions from dep tool code to avoid a dependency on golang/dep. -// This code is copied from https://github.com/golang/dep/blob/37d6c560cdf407be7b6cd035b23dba89df9275cf/internal/fs/fs.go -// No changes to the code were made other than removing some unused functions - -// RenameWithFallback attempts to rename a file or directory, but falls back to -// copying in the event of a cross-device link error. If the fallback copy -// succeeds, src is still removed, emulating normal rename behavior. -func RenameWithFallback(src, dst string) error { - _, err := os.Stat(src) - if err != nil { - return errors.Wrapf(err, "cannot stat %s", src) - } - - err = os.Rename(src, dst) - if err == nil { - return nil - } - - return renameFallback(err, src, dst) -} - -// renameByCopy attempts to rename a file or directory by copying it to the -// destination and then removing the src thus emulating the rename behavior. -func renameByCopy(src, dst string) error { - var cerr error - if dir, _ := IsDir(src); dir { - cerr = CopyDir(src, dst) - if cerr != nil { - cerr = errors.Wrap(cerr, "copying directory failed") - } - } else { - cerr = copyFile(src, dst) - if cerr != nil { - cerr = errors.Wrap(cerr, "copying file failed") - } - } - - if cerr != nil { - return errors.Wrapf(cerr, "rename fallback failed: cannot rename %s to %s", src, dst) - } - - return errors.Wrapf(os.RemoveAll(src), "cannot delete %s", src) -} - -var ( - errSrcNotDir = errors.New("source is not a directory") - errDstExist = errors.New("destination already exists") -) - -// CopyDir recursively copies a directory tree, attempting to preserve permissions. -// Source directory must exist, destination directory must *not* exist. -func CopyDir(src, dst string) error { - src = filepath.Clean(src) - dst = filepath.Clean(dst) - - // We use os.Lstat() here to ensure we don't fall in a loop where a symlink - // actually links to a one of its parent directories. - fi, err := os.Lstat(src) - if err != nil { - return err - } - if !fi.IsDir() { - return errSrcNotDir - } - - _, err = os.Stat(dst) - if err != nil && !os.IsNotExist(err) { - return err - } - if err == nil { - return errDstExist - } - - if err = os.MkdirAll(dst, fi.Mode()); err != nil { - return errors.Wrapf(err, "cannot mkdir %s", dst) - } - - entries, err := os.ReadDir(src) - if err != nil { - return errors.Wrapf(err, "cannot read directory %s", dst) - } - - for _, entry := range entries { - srcPath := filepath.Join(src, entry.Name()) - dstPath := filepath.Join(dst, entry.Name()) - - if entry.IsDir() { - if err = CopyDir(srcPath, dstPath); err != nil { - return errors.Wrap(err, "copying directory failed") - } - } else { - // This will include symlinks, which is what we want when - // copying things. - if err = copyFile(srcPath, dstPath); err != nil { - return errors.Wrap(err, "copying file failed") - } - } - } - - return nil -} - -// copyFile copies the contents of the file named src to the file named -// by dst. The file will be created if it does not already exist. If the -// destination file exists, all its contents will be replaced by the contents -// of the source file. The file mode will be copied from the source. -func copyFile(src, dst string) (err error) { - if sym, err := IsSymlink(src); err != nil { - return errors.Wrap(err, "symlink check failed") - } else if sym { - if err := cloneSymlink(src, dst); err != nil { - if runtime.GOOS == "windows" { - // If cloning the symlink fails on Windows because the user - // does not have the required privileges, ignore the error and - // fall back to copying the file contents. - // - // ERROR_PRIVILEGE_NOT_HELD is 1314 (0x522): - // https://msdn.microsoft.com/en-us/library/windows/desktop/ms681385(v=vs.85).aspx - if lerr, ok := err.(*os.LinkError); ok && lerr.Err != syscall.Errno(1314) { - return err - } - } else { - return err - } - } else { - return nil - } - } - - in, err := os.Open(src) - if err != nil { - return - } - defer in.Close() - - out, err := os.Create(dst) - if err != nil { - return - } - - if _, err = io.Copy(out, in); err != nil { - out.Close() - return - } - - // Check for write errors on Close - if err = out.Close(); err != nil { - return - } - - si, err := os.Stat(src) - if err != nil { - return - } - - // Temporary fix for Go < 1.9 - // - // See: https://github.com/golang/dep/issues/774 - // and https://github.com/golang/go/issues/20829 - if runtime.GOOS == "windows" { - dst = fixLongPath(dst) - } - err = os.Chmod(dst, si.Mode()) - - return -} - -// cloneSymlink will create a new symlink that points to the resolved path of sl. -// If sl is a relative symlink, dst will also be a relative symlink. -func cloneSymlink(sl, dst string) error { - resolved, err := os.Readlink(sl) - if err != nil { - return err - } - - return os.Symlink(resolved, dst) -} - -// IsDir determines is the path given is a directory or not. -func IsDir(name string) (bool, error) { - fi, err := os.Stat(name) - if err != nil { - return false, err - } - if !fi.IsDir() { - return false, errors.Errorf("%q is not a directory", name) - } - return true, nil -} - -// IsSymlink determines if the given path is a symbolic link. -func IsSymlink(path string) (bool, error) { - l, err := os.Lstat(path) - if err != nil { - return false, err - } - - return l.Mode()&os.ModeSymlink == os.ModeSymlink, nil -} - -// fixLongPath returns the extended-length (\\?\-prefixed) form of -// path when needed, in order to avoid the default 260 character file -// path limit imposed by Windows. If path is not easily converted to -// the extended-length form (for example, if path is a relative path -// or contains .. elements), or is short enough, fixLongPath returns -// path unmodified. -// -// See https://msdn.microsoft.com/en-us/library/windows/desktop/aa365247(v=vs.85).aspx#maxpath -func fixLongPath(path string) string { - // Do nothing (and don't allocate) if the path is "short". - // Empirically (at least on the Windows Server 2013 builder), - // the kernel is arbitrarily okay with < 248 bytes. That - // matches what the docs above say: - // "When using an API to create a directory, the specified - // path cannot be so long that you cannot append an 8.3 file - // name (that is, the directory name cannot exceed MAX_PATH - // minus 12)." Since MAX_PATH is 260, 260 - 12 = 248. - // - // The MSDN docs appear to say that a normal path that is 248 bytes long - // will work; empirically the path must be less then 248 bytes long. - if len(path) < 248 { - // Don't fix. (This is how Go 1.7 and earlier worked, - // not automatically generating the \\?\ form) - return path - } - - // The extended form begins with \\?\, as in - // \\?\c:\windows\foo.txt or \\?\UNC\server\share\foo.txt. - // The extended form disables evaluation of . and .. path - // elements and disables the interpretation of / as equivalent - // to \. The conversion here rewrites / to \ and elides - // . elements as well as trailing or duplicate separators. For - // simplicity it avoids the conversion entirely for relative - // paths or paths containing .. elements. For now, - // \\server\share paths are not converted to - // \\?\UNC\server\share paths because the rules for doing so - // are less well-specified. - if len(path) >= 2 && path[:2] == `\\` { - // Don't canonicalize UNC paths. - return path - } - if !isAbs(path) { - // Relative path - return path - } - - const prefix = `\\?` - - pathbuf := make([]byte, len(prefix)+len(path)+len(`\`)) - copy(pathbuf, prefix) - n := len(path) - r, w := 0, len(prefix) - for r < n { - switch { - case os.IsPathSeparator(path[r]): - // empty block - r++ - case path[r] == '.' && (r+1 == n || os.IsPathSeparator(path[r+1])): - // /./ - r++ - case r+1 < n && path[r] == '.' && path[r+1] == '.' && (r+2 == n || os.IsPathSeparator(path[r+2])): - // /../ is currently unhandled - return path - default: - pathbuf[w] = '\\' - w++ - for ; r < n && !os.IsPathSeparator(path[r]); r++ { - pathbuf[w] = path[r] - w++ - } - } - } - // A drive's root directory needs a trailing \ - if w == len(`\\?\c:`) { - pathbuf[w] = '\\' - w++ - } - return string(pathbuf[:w]) -} - -func isAbs(path string) (b bool) { - v := volumeName(path) - if v == "" { - return false - } - path = path[len(v):] - if path == "" { - return false - } - return os.IsPathSeparator(path[0]) -} - -func volumeName(path string) (v string) { - if len(path) < 2 { - return "" - } - // with drive letter - c := path[0] - if path[1] == ':' && - ('0' <= c && c <= '9' || 'a' <= c && c <= 'z' || - 'A' <= c && c <= 'Z') { - return path[:2] - } - // is it UNC - if l := len(path); l >= 5 && os.IsPathSeparator(path[0]) && os.IsPathSeparator(path[1]) && - !os.IsPathSeparator(path[2]) && path[2] != '.' { - // first, leading `\\` and next shouldn't be `\`. its server name. - for n := 3; n < l-1; n++ { - // second, next '\' shouldn't be repeated. - if os.IsPathSeparator(path[n]) { - n++ - // third, following something characters. its share name. - if !os.IsPathSeparator(path[n]) { - if path[n] == '.' { - break - } - for ; n < l; n++ { - if os.IsPathSeparator(path[n]) { - break - } - } - return path[:n] - } - break - } - } - } - return "" -} diff --git a/src/vendor/helm.sh/helm/v3/internal/third_party/dep/fs/rename.go b/src/vendor/helm.sh/helm/v3/internal/third_party/dep/fs/rename.go deleted file mode 100644 index a3e5e56a6..000000000 --- a/src/vendor/helm.sh/helm/v3/internal/third_party/dep/fs/rename.go +++ /dev/null @@ -1,58 +0,0 @@ -//go:build !windows - -/* -Copyright (c) for portions of rename.go are held by The Go Authors, 2016 and are provided under -the BSD license. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -*/ - -package fs - -import ( - "os" - "syscall" - - "github.com/pkg/errors" -) - -// renameFallback attempts to determine the appropriate fallback to failed rename -// operation depending on the resulting error. -func renameFallback(err error, src, dst string) error { - // Rename may fail if src and dst are on different devices; fall back to - // copy if we detect that case. syscall.EXDEV is the common name for the - // cross device link error which has varying output text across different - // operating systems. - terr, ok := err.(*os.LinkError) - if !ok { - return err - } else if terr.Err != syscall.EXDEV { - return errors.Wrapf(terr, "link error: cannot rename %s to %s", src, dst) - } - - return renameByCopy(src, dst) -} diff --git a/src/vendor/helm.sh/helm/v3/internal/third_party/dep/fs/rename_windows.go b/src/vendor/helm.sh/helm/v3/internal/third_party/dep/fs/rename_windows.go deleted file mode 100644 index a377720a6..000000000 --- a/src/vendor/helm.sh/helm/v3/internal/third_party/dep/fs/rename_windows.go +++ /dev/null @@ -1,69 +0,0 @@ -//go:build windows - -/* -Copyright (c) for portions of rename_windows.go are held by The Go Authors, 2016 and are provided under -the BSD license. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -*/ - -package fs - -import ( - "os" - "syscall" - - "github.com/pkg/errors" -) - -// renameFallback attempts to determine the appropriate fallback to failed rename -// operation depending on the resulting error. -func renameFallback(err error, src, dst string) error { - // Rename may fail if src and dst are on different devices; fall back to - // copy if we detect that case. syscall.EXDEV is the common name for the - // cross device link error which has varying output text across different - // operating systems. - terr, ok := err.(*os.LinkError) - if !ok { - return err - } - - if terr.Err != syscall.EXDEV { - // In windows it can drop down to an operating system call that - // returns an operating system error with a different number and - // message. Checking for that as a fall back. - noerr, ok := terr.Err.(syscall.Errno) - - // 0x11 (ERROR_NOT_SAME_DEVICE) is the windows error. - // See https://msdn.microsoft.com/en-us/library/cc231199.aspx - if ok && noerr != 0x11 { - return errors.Wrapf(terr, "link error: cannot rename %s to %s", src, dst) - } - } - - return renameByCopy(src, dst) -} diff --git a/src/vendor/helm.sh/helm/v3/internal/tlsutil/cfg.go b/src/vendor/helm.sh/helm/v3/internal/tlsutil/cfg.go deleted file mode 100644 index 8b9d4329f..000000000 --- a/src/vendor/helm.sh/helm/v3/internal/tlsutil/cfg.go +++ /dev/null @@ -1,58 +0,0 @@ -/* -Copyright The Helm Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package tlsutil - -import ( - "crypto/tls" - "crypto/x509" - "os" - - "github.com/pkg/errors" -) - -// Options represents configurable options used to create client and server TLS configurations. -type Options struct { - CaCertFile string - // If either the KeyFile or CertFile is empty, ClientConfig() will not load them. - KeyFile string - CertFile string - // Client-only options - InsecureSkipVerify bool -} - -// ClientConfig returns a TLS configuration for use by a Helm client. -func ClientConfig(opts Options) (cfg *tls.Config, err error) { - var cert *tls.Certificate - var pool *x509.CertPool - - if opts.CertFile != "" || opts.KeyFile != "" { - if cert, err = CertFromFilePair(opts.CertFile, opts.KeyFile); err != nil { - if os.IsNotExist(err) { - return nil, errors.Wrapf(err, "could not load x509 key pair (cert: %q, key: %q)", opts.CertFile, opts.KeyFile) - } - return nil, errors.Wrapf(err, "could not read x509 key pair (cert: %q, key: %q)", opts.CertFile, opts.KeyFile) - } - } - if !opts.InsecureSkipVerify && opts.CaCertFile != "" { - if pool, err = CertPoolFromFile(opts.CaCertFile); err != nil { - return nil, err - } - } - - cfg = &tls.Config{InsecureSkipVerify: opts.InsecureSkipVerify, Certificates: []tls.Certificate{*cert}, RootCAs: pool} - return cfg, nil -} diff --git a/src/vendor/helm.sh/helm/v3/internal/tlsutil/tls.go b/src/vendor/helm.sh/helm/v3/internal/tlsutil/tls.go deleted file mode 100644 index ed7795dbe..000000000 --- a/src/vendor/helm.sh/helm/v3/internal/tlsutil/tls.go +++ /dev/null @@ -1,76 +0,0 @@ -/* -Copyright The Helm Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package tlsutil - -import ( - "crypto/tls" - "crypto/x509" - "io/ioutil" - - "github.com/pkg/errors" -) - -// NewClientTLS returns tls.Config appropriate for client auth. -func NewClientTLS(certFile, keyFile, caFile string) (*tls.Config, error) { - config := tls.Config{} - - if certFile != "" && keyFile != "" { - cert, err := CertFromFilePair(certFile, keyFile) - if err != nil { - return nil, err - } - config.Certificates = []tls.Certificate{*cert} - } - - if caFile != "" { - cp, err := CertPoolFromFile(caFile) - if err != nil { - return nil, err - } - config.RootCAs = cp - } - - return &config, nil -} - -// CertPoolFromFile returns an x509.CertPool containing the certificates -// in the given PEM-encoded file. -// Returns an error if the file could not be read, a certificate could not -// be parsed, or if the file does not contain any certificates -func CertPoolFromFile(filename string) (*x509.CertPool, error) { - b, err := ioutil.ReadFile(filename) - if err != nil { - return nil, errors.Errorf("can't read CA file: %v", filename) - } - cp := x509.NewCertPool() - if !cp.AppendCertsFromPEM(b) { - return nil, errors.Errorf("failed to append certificates from file: %s", filename) - } - return cp, nil -} - -// CertFromFilePair returns an tls.Certificate containing the -// certificates public/private key pair from a pair of given PEM-encoded files. -// Returns an error if the file could not be read, a certificate could not -// be parsed, or if the file does not contain any certificates -func CertFromFilePair(certFile, keyFile string) (*tls.Certificate, error) { - cert, err := tls.LoadX509KeyPair(certFile, keyFile) - if err != nil { - return nil, errors.Wrapf(err, "can't load key pair from cert %s and key %s", certFile, keyFile) - } - return &cert, err -} diff --git a/src/vendor/helm.sh/helm/v3/internal/urlutil/urlutil.go b/src/vendor/helm.sh/helm/v3/internal/urlutil/urlutil.go deleted file mode 100644 index a8cf7398c..000000000 --- a/src/vendor/helm.sh/helm/v3/internal/urlutil/urlutil.go +++ /dev/null @@ -1,73 +0,0 @@ -/* -Copyright The Helm Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package urlutil - -import ( - "net/url" - "path" - "path/filepath" -) - -// URLJoin joins a base URL to one or more path components. -// -// It's like filepath.Join for URLs. If the baseURL is pathish, this will still -// perform a join. -// -// If the URL is unparsable, this returns an error. -func URLJoin(baseURL string, paths ...string) (string, error) { - u, err := url.Parse(baseURL) - if err != nil { - return "", err - } - // We want path instead of filepath because path always uses /. - all := []string{u.Path} - all = append(all, paths...) - u.Path = path.Join(all...) - return u.String(), nil -} - -// Equal normalizes two URLs and then compares for equality. -func Equal(a, b string) bool { - au, err := url.Parse(a) - if err != nil { - a = filepath.Clean(a) - b = filepath.Clean(b) - // If urls are paths, return true only if they are an exact match - return a == b - } - bu, err := url.Parse(b) - if err != nil { - return false - } - - for _, u := range []*url.URL{au, bu} { - if u.Path == "" { - u.Path = "/" - } - u.Path = filepath.Clean(u.Path) - } - return au.String() == bu.String() -} - -// ExtractHostname returns hostname from URL -func ExtractHostname(addr string) (string, error) { - u, err := url.Parse(addr) - if err != nil { - return "", err - } - return u.Hostname(), nil -} diff --git a/src/vendor/helm.sh/helm/v3/internal/version/version.go b/src/vendor/helm.sh/helm/v3/internal/version/version.go deleted file mode 100644 index bf2af39bb..000000000 --- a/src/vendor/helm.sh/helm/v3/internal/version/version.go +++ /dev/null @@ -1,81 +0,0 @@ -/* -Copyright The Helm Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package version // import "helm.sh/helm/v3/internal/version" - -import ( - "flag" - "runtime" - "strings" -) - -var ( - // version is the current version of Helm. - // Update this whenever making a new release. - // The version is of the format Major.Minor.Patch[-Prerelease][+BuildMetadata] - // - // Increment major number for new feature additions and behavioral changes. - // Increment minor number for bug fixes and performance enhancements. - version = "v3.10" - - // metadata is extra build time data - metadata = "" - // gitCommit is the git sha1 - gitCommit = "" - // gitTreeState is the state of the git tree - gitTreeState = "" -) - -// BuildInfo describes the compile time information. -type BuildInfo struct { - // Version is the current semver. - Version string `json:"version,omitempty"` - // GitCommit is the git sha1. - GitCommit string `json:"git_commit,omitempty"` - // GitTreeState is the state of the git tree. - GitTreeState string `json:"git_tree_state,omitempty"` - // GoVersion is the version of the Go compiler used. - GoVersion string `json:"go_version,omitempty"` -} - -// GetVersion returns the semver string of the version -func GetVersion() string { - if metadata == "" { - return version - } - return version + "+" + metadata -} - -// GetUserAgent returns a user agent for user with an HTTP client -func GetUserAgent() string { - return "Helm/" + strings.TrimPrefix(GetVersion(), "v") -} - -// Get returns build info -func Get() BuildInfo { - v := BuildInfo{ - Version: GetVersion(), - GitCommit: gitCommit, - GitTreeState: gitTreeState, - GoVersion: runtime.Version(), - } - - // HACK(bacongobbler): strip out GoVersion during a test run for consistent test output - if flag.Lookup("test.v") != nil { - v.GoVersion = "" - } - return v -} diff --git a/src/vendor/helm.sh/helm/v3/pkg/chartutil/capabilities.go b/src/vendor/helm.sh/helm/v3/pkg/chartutil/capabilities.go deleted file mode 100644 index 5f57e11a5..000000000 --- a/src/vendor/helm.sh/helm/v3/pkg/chartutil/capabilities.go +++ /dev/null @@ -1,126 +0,0 @@ -/* -Copyright The Helm Authors. -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - -http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package chartutil - -import ( - "fmt" - "strconv" - - "github.com/Masterminds/semver/v3" - "k8s.io/client-go/kubernetes/scheme" - - apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" - apiextensionsv1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" - - helmversion "helm.sh/helm/v3/internal/version" -) - -var ( - // The Kubernetes version can be set by LDFLAGS. In order to do that the value - // must be a string. - k8sVersionMajor = "1" - k8sVersionMinor = "20" - - // DefaultVersionSet is the default version set, which includes only Core V1 ("v1"). - DefaultVersionSet = allKnownVersions() - - // DefaultCapabilities is the default set of capabilities. - DefaultCapabilities = &Capabilities{ - KubeVersion: KubeVersion{ - Version: fmt.Sprintf("v%s.%s.0", k8sVersionMajor, k8sVersionMinor), - Major: k8sVersionMajor, - Minor: k8sVersionMinor, - }, - APIVersions: DefaultVersionSet, - HelmVersion: helmversion.Get(), - } -) - -// Capabilities describes the capabilities of the Kubernetes cluster. -type Capabilities struct { - // KubeVersion is the Kubernetes version. - KubeVersion KubeVersion - // APIversions are supported Kubernetes API versions. - APIVersions VersionSet - // HelmVersion is the build information for this helm version - HelmVersion helmversion.BuildInfo -} - -func (capabilities *Capabilities) Copy() *Capabilities { - return &Capabilities{ - KubeVersion: capabilities.KubeVersion, - APIVersions: capabilities.APIVersions, - HelmVersion: capabilities.HelmVersion, - } -} - -// KubeVersion is the Kubernetes version. -type KubeVersion struct { - Version string // Kubernetes version - Major string // Kubernetes major version - Minor string // Kubernetes minor version -} - -// String implements fmt.Stringer -func (kv *KubeVersion) String() string { return kv.Version } - -// GitVersion returns the Kubernetes version string. -// -// Deprecated: use KubeVersion.Version. -func (kv *KubeVersion) GitVersion() string { return kv.Version } - -// ParseKubeVersion parses kubernetes version from string -func ParseKubeVersion(version string) (*KubeVersion, error) { - sv, err := semver.NewVersion(version) - if err != nil { - return nil, err - } - return &KubeVersion{ - Version: "v" + sv.String(), - Major: strconv.FormatUint(sv.Major(), 10), - Minor: strconv.FormatUint(sv.Minor(), 10), - }, nil -} - -// VersionSet is a set of Kubernetes API versions. -type VersionSet []string - -// Has returns true if the version string is in the set. -// -// vs.Has("apps/v1") -func (v VersionSet) Has(apiVersion string) bool { - for _, x := range v { - if x == apiVersion { - return true - } - } - return false -} - -func allKnownVersions() VersionSet { - // We should register the built in extension APIs as well so CRDs are - // supported in the default version set. This has caused problems with `helm - // template` in the past, so let's be safe - apiextensionsv1beta1.AddToScheme(scheme.Scheme) - apiextensionsv1.AddToScheme(scheme.Scheme) - - groups := scheme.Scheme.PrioritizedVersionsAllGroups() - vs := make(VersionSet, 0, len(groups)) - for _, gv := range groups { - vs = append(vs, gv.String()) - } - return vs -} diff --git a/src/vendor/helm.sh/helm/v3/pkg/chartutil/chartfile.go b/src/vendor/helm.sh/helm/v3/pkg/chartutil/chartfile.go deleted file mode 100644 index 808a902b1..000000000 --- a/src/vendor/helm.sh/helm/v3/pkg/chartutil/chartfile.go +++ /dev/null @@ -1,93 +0,0 @@ -/* -Copyright The Helm Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package chartutil - -import ( - "io/ioutil" - "os" - "path/filepath" - - "github.com/pkg/errors" - "sigs.k8s.io/yaml" - - "helm.sh/helm/v3/pkg/chart" -) - -// LoadChartfile loads a Chart.yaml file into a *chart.Metadata. -func LoadChartfile(filename string) (*chart.Metadata, error) { - b, err := ioutil.ReadFile(filename) - if err != nil { - return nil, err - } - y := new(chart.Metadata) - err = yaml.Unmarshal(b, y) - return y, err -} - -// SaveChartfile saves the given metadata as a Chart.yaml file at the given path. -// -// 'filename' should be the complete path and filename ('foo/Chart.yaml') -func SaveChartfile(filename string, cf *chart.Metadata) error { - // Pull out the dependencies of a v1 Chart, since there's no way - // to tell the serializer to skip a field for just this use case - savedDependencies := cf.Dependencies - if cf.APIVersion == chart.APIVersionV1 { - cf.Dependencies = nil - } - out, err := yaml.Marshal(cf) - if cf.APIVersion == chart.APIVersionV1 { - cf.Dependencies = savedDependencies - } - if err != nil { - return err - } - return ioutil.WriteFile(filename, out, 0644) -} - -// IsChartDir validate a chart directory. -// -// Checks for a valid Chart.yaml. -func IsChartDir(dirName string) (bool, error) { - if fi, err := os.Stat(dirName); err != nil { - return false, err - } else if !fi.IsDir() { - return false, errors.Errorf("%q is not a directory", dirName) - } - - chartYaml := filepath.Join(dirName, ChartfileName) - if _, err := os.Stat(chartYaml); os.IsNotExist(err) { - return false, errors.Errorf("no %s exists in directory %q", ChartfileName, dirName) - } - - chartYamlContent, err := ioutil.ReadFile(chartYaml) - if err != nil { - return false, errors.Errorf("cannot read %s in directory %q", ChartfileName, dirName) - } - - chartContent := new(chart.Metadata) - if err := yaml.Unmarshal(chartYamlContent, &chartContent); err != nil { - return false, err - } - if chartContent == nil { - return false, errors.Errorf("chart metadata (%s) missing", ChartfileName) - } - if chartContent.Name == "" { - return false, errors.Errorf("invalid chart (%s): name must not be empty", ChartfileName) - } - - return true, nil -} diff --git a/src/vendor/helm.sh/helm/v3/pkg/chartutil/coalesce.go b/src/vendor/helm.sh/helm/v3/pkg/chartutil/coalesce.go deleted file mode 100644 index f634d6425..000000000 --- a/src/vendor/helm.sh/helm/v3/pkg/chartutil/coalesce.go +++ /dev/null @@ -1,227 +0,0 @@ -/* -Copyright The Helm Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package chartutil - -import ( - "fmt" - "log" - - "github.com/mitchellh/copystructure" - "github.com/pkg/errors" - - "helm.sh/helm/v3/pkg/chart" -) - -func concatPrefix(a, b string) string { - if a == "" { - return b - } - return fmt.Sprintf("%s.%s", a, b) -} - -// CoalesceValues coalesces all of the values in a chart (and its subcharts). -// -// Values are coalesced together using the following rules: -// -// - Values in a higher level chart always override values in a lower-level -// dependency chart -// - Scalar values and arrays are replaced, maps are merged -// - A chart has access to all of the variables for it, as well as all of -// the values destined for its dependencies. -func CoalesceValues(chrt *chart.Chart, vals map[string]interface{}) (Values, error) { - v, err := copystructure.Copy(vals) - if err != nil { - return vals, err - } - - valsCopy := v.(map[string]interface{}) - // if we have an empty map, make sure it is initialized - if valsCopy == nil { - valsCopy = make(map[string]interface{}) - } - return coalesce(log.Printf, chrt, valsCopy, "") -} - -type printFn func(format string, v ...interface{}) - -// coalesce coalesces the dest values and the chart values, giving priority to the dest values. -// -// This is a helper function for CoalesceValues. -func coalesce(printf printFn, ch *chart.Chart, dest map[string]interface{}, prefix string) (map[string]interface{}, error) { - coalesceValues(printf, ch, dest, prefix) - return coalesceDeps(printf, ch, dest, prefix) -} - -// coalesceDeps coalesces the dependencies of the given chart. -func coalesceDeps(printf printFn, chrt *chart.Chart, dest map[string]interface{}, prefix string) (map[string]interface{}, error) { - for _, subchart := range chrt.Dependencies() { - if c, ok := dest[subchart.Name()]; !ok { - // If dest doesn't already have the key, create it. - dest[subchart.Name()] = make(map[string]interface{}) - } else if !istable(c) { - return dest, errors.Errorf("type mismatch on %s: %t", subchart.Name(), c) - } - if dv, ok := dest[subchart.Name()]; ok { - dvmap := dv.(map[string]interface{}) - subPrefix := concatPrefix(prefix, chrt.Metadata.Name) - - // Get globals out of dest and merge them into dvmap. - coalesceGlobals(printf, dvmap, dest, subPrefix) - - // Now coalesce the rest of the values. - var err error - dest[subchart.Name()], err = coalesce(printf, subchart, dvmap, subPrefix) - if err != nil { - return dest, err - } - } - } - return dest, nil -} - -// coalesceGlobals copies the globals out of src and merges them into dest. -// -// For convenience, returns dest. -func coalesceGlobals(printf printFn, dest, src map[string]interface{}, prefix string) { - var dg, sg map[string]interface{} - - if destglob, ok := dest[GlobalKey]; !ok { - dg = make(map[string]interface{}) - } else if dg, ok = destglob.(map[string]interface{}); !ok { - printf("warning: skipping globals because destination %s is not a table.", GlobalKey) - return - } - - if srcglob, ok := src[GlobalKey]; !ok { - sg = make(map[string]interface{}) - } else if sg, ok = srcglob.(map[string]interface{}); !ok { - printf("warning: skipping globals because source %s is not a table.", GlobalKey) - return - } - - // EXPERIMENTAL: In the past, we have disallowed globals to test tables. This - // reverses that decision. It may somehow be possible to introduce a loop - // here, but I haven't found a way. So for the time being, let's allow - // tables in globals. - for key, val := range sg { - if istable(val) { - vv := copyMap(val.(map[string]interface{})) - if destv, ok := dg[key]; !ok { - // Here there is no merge. We're just adding. - dg[key] = vv - } else { - if destvmap, ok := destv.(map[string]interface{}); !ok { - printf("Conflict: cannot merge map onto non-map for %q. Skipping.", key) - } else { - // Basically, we reverse order of coalesce here to merge - // top-down. - subPrefix := concatPrefix(prefix, key) - coalesceTablesFullKey(printf, vv, destvmap, subPrefix) - dg[key] = vv - } - } - } else if dv, ok := dg[key]; ok && istable(dv) { - // It's not clear if this condition can actually ever trigger. - printf("key %s is table. Skipping", key) - } else { - // TODO: Do we need to do any additional checking on the value? - dg[key] = val - } - } - dest[GlobalKey] = dg -} - -func copyMap(src map[string]interface{}) map[string]interface{} { - m := make(map[string]interface{}, len(src)) - for k, v := range src { - m[k] = v - } - return m -} - -// coalesceValues builds up a values map for a particular chart. -// -// Values in v will override the values in the chart. -func coalesceValues(printf printFn, c *chart.Chart, v map[string]interface{}, prefix string) { - subPrefix := concatPrefix(prefix, c.Metadata.Name) - for key, val := range c.Values { - if value, ok := v[key]; ok { - if value == nil { - // When the YAML value is null, we remove the value's key. - // This allows Helm's various sources of values (value files or --set) to - // remove incompatible keys from any previous chart, file, or set values. - delete(v, key) - } else if dest, ok := value.(map[string]interface{}); ok { - // if v[key] is a table, merge nv's val table into v[key]. - src, ok := val.(map[string]interface{}) - if !ok { - // If the original value is nil, there is nothing to coalesce, so we don't print - // the warning - if val != nil { - printf("warning: skipped value for %s.%s: Not a table.", subPrefix, key) - } - } else { - // Because v has higher precedence than nv, dest values override src - // values. - coalesceTablesFullKey(printf, dest, src, concatPrefix(subPrefix, key)) - } - } - } else { - // If the key is not in v, copy it from nv. - v[key] = val - } - } -} - -// CoalesceTables merges a source map into a destination map. -// -// dest is considered authoritative. -func CoalesceTables(dst, src map[string]interface{}) map[string]interface{} { - return coalesceTablesFullKey(log.Printf, dst, src, "") -} - -// coalesceTablesFullKey merges a source map into a destination map. -// -// dest is considered authoritative. -func coalesceTablesFullKey(printf printFn, dst, src map[string]interface{}, prefix string) map[string]interface{} { - // When --reuse-values is set but there are no modifications yet, return new values - if src == nil { - return dst - } - if dst == nil { - return src - } - // Because dest has higher precedence than src, dest values override src - // values. - for key, val := range src { - fullkey := concatPrefix(prefix, key) - if dv, ok := dst[key]; ok && dv == nil { - delete(dst, key) - } else if !ok { - dst[key] = val - } else if istable(val) { - if istable(dv) { - coalesceTablesFullKey(printf, dv.(map[string]interface{}), val.(map[string]interface{}), fullkey) - } else { - printf("warning: cannot overwrite table with non table for %s (%v)", fullkey, val) - } - } else if istable(dv) && val != nil { - printf("warning: destination for %s is a table. Ignoring non-table value (%v)", fullkey, val) - } - } - return dst -} diff --git a/src/vendor/helm.sh/helm/v3/pkg/chartutil/compatible.go b/src/vendor/helm.sh/helm/v3/pkg/chartutil/compatible.go deleted file mode 100644 index f4656c913..000000000 --- a/src/vendor/helm.sh/helm/v3/pkg/chartutil/compatible.go +++ /dev/null @@ -1,34 +0,0 @@ -/* -Copyright The Helm Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package chartutil - -import "github.com/Masterminds/semver/v3" - -// IsCompatibleRange compares a version to a constraint. -// It returns true if the version matches the constraint, and false in all other cases. -func IsCompatibleRange(constraint, ver string) bool { - sv, err := semver.NewVersion(ver) - if err != nil { - return false - } - - c, err := semver.NewConstraint(constraint) - if err != nil { - return false - } - return c.Check(sv) -} diff --git a/src/vendor/helm.sh/helm/v3/pkg/chartutil/create.go b/src/vendor/helm.sh/helm/v3/pkg/chartutil/create.go deleted file mode 100644 index 3a8f3cc5a..000000000 --- a/src/vendor/helm.sh/helm/v3/pkg/chartutil/create.go +++ /dev/null @@ -1,687 +0,0 @@ -/* -Copyright The Helm Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package chartutil - -import ( - "fmt" - "io" - "io/ioutil" - "os" - "path/filepath" - "regexp" - "strings" - - "github.com/pkg/errors" - "sigs.k8s.io/yaml" - - "helm.sh/helm/v3/pkg/chart" - "helm.sh/helm/v3/pkg/chart/loader" -) - -// chartName is a regular expression for testing the supplied name of a chart. -// This regular expression is probably stricter than it needs to be. We can relax it -// somewhat. Newline characters, as well as $, quotes, +, parens, and % are known to be -// problematic. -var chartName = regexp.MustCompile("^[a-zA-Z0-9._-]+$") - -const ( - // ChartfileName is the default Chart file name. - ChartfileName = "Chart.yaml" - // ValuesfileName is the default values file name. - ValuesfileName = "values.yaml" - // SchemafileName is the default values schema file name. - SchemafileName = "values.schema.json" - // TemplatesDir is the relative directory name for templates. - TemplatesDir = "templates" - // ChartsDir is the relative directory name for charts dependencies. - ChartsDir = "charts" - // TemplatesTestsDir is the relative directory name for tests. - TemplatesTestsDir = TemplatesDir + sep + "tests" - // IgnorefileName is the name of the Helm ignore file. - IgnorefileName = ".helmignore" - // IngressFileName is the name of the example ingress file. - IngressFileName = TemplatesDir + sep + "ingress.yaml" - // DeploymentName is the name of the example deployment file. - DeploymentName = TemplatesDir + sep + "deployment.yaml" - // ServiceName is the name of the example service file. - ServiceName = TemplatesDir + sep + "service.yaml" - // ServiceAccountName is the name of the example serviceaccount file. - ServiceAccountName = TemplatesDir + sep + "serviceaccount.yaml" - // HorizontalPodAutoscalerName is the name of the example hpa file. - HorizontalPodAutoscalerName = TemplatesDir + sep + "hpa.yaml" - // NotesName is the name of the example NOTES.txt file. - NotesName = TemplatesDir + sep + "NOTES.txt" - // HelpersName is the name of the example helpers file. - HelpersName = TemplatesDir + sep + "_helpers.tpl" - // TestConnectionName is the name of the example test file. - TestConnectionName = TemplatesTestsDir + sep + "test-connection.yaml" -) - -// maxChartNameLength is lower than the limits we know of with certain file systems, -// and with certain Kubernetes fields. -const maxChartNameLength = 250 - -const sep = string(filepath.Separator) - -const defaultChartfile = `apiVersion: v2 -name: %s -description: A Helm chart for Kubernetes - -# A chart can be either an 'application' or a 'library' chart. -# -# Application charts are a collection of templates that can be packaged into versioned archives -# to be deployed. -# -# Library charts provide useful utilities or functions for the chart developer. They're included as -# a dependency of application charts to inject those utilities and functions into the rendering -# pipeline. Library charts do not define any templates and therefore cannot be deployed. -type: application - -# This is the chart version. This version number should be incremented each time you make changes -# to the chart and its templates, including the app version. -# Versions are expected to follow Semantic Versioning (https://semver.org/) -version: 0.1.0 - -# This is the version number of the application being deployed. This version number should be -# incremented each time you make changes to the application. Versions are not expected to -# follow Semantic Versioning. They should reflect the version the application is using. -# It is recommended to use it with quotes. -appVersion: "1.16.0" -` - -const defaultValues = `# Default values for %s. -# This is a YAML-formatted file. -# Declare variables to be passed into your templates. - -replicaCount: 1 - -image: - repository: nginx - pullPolicy: IfNotPresent - # Overrides the image tag whose default is the chart appVersion. - tag: "" - -imagePullSecrets: [] -nameOverride: "" -fullnameOverride: "" - -serviceAccount: - # Specifies whether a service account should be created - create: true - # Annotations to add to the service account - annotations: {} - # The name of the service account to use. - # If not set and create is true, a name is generated using the fullname template - name: "" - -podAnnotations: {} - -podSecurityContext: {} - # fsGroup: 2000 - -securityContext: {} - # capabilities: - # drop: - # - ALL - # readOnlyRootFilesystem: true - # runAsNonRoot: true - # runAsUser: 1000 - -service: - type: ClusterIP - port: 80 - -ingress: - enabled: false - className: "" - annotations: {} - # kubernetes.io/ingress.class: nginx - # kubernetes.io/tls-acme: "true" - hosts: - - host: chart-example.local - paths: - - path: / - pathType: ImplementationSpecific - tls: [] - # - secretName: chart-example-tls - # hosts: - # - chart-example.local - -resources: {} - # We usually recommend not to specify default resources and to leave this as a conscious - # choice for the user. This also increases chances charts run on environments with little - # resources, such as Minikube. If you do want to specify resources, uncomment the following - # lines, adjust them as necessary, and remove the curly braces after 'resources:'. - # limits: - # cpu: 100m - # memory: 128Mi - # requests: - # cpu: 100m - # memory: 128Mi - -autoscaling: - enabled: false - minReplicas: 1 - maxReplicas: 100 - targetCPUUtilizationPercentage: 80 - # targetMemoryUtilizationPercentage: 80 - -nodeSelector: {} - -tolerations: [] - -affinity: {} -` - -const defaultIgnore = `# Patterns to ignore when building packages. -# This supports shell glob matching, relative path matching, and -# negation (prefixed with !). Only one pattern per line. -.DS_Store -# Common VCS dirs -.git/ -.gitignore -.bzr/ -.bzrignore -.hg/ -.hgignore -.svn/ -# Common backup files -*.swp -*.bak -*.tmp -*.orig -*~ -# Various IDEs -.project -.idea/ -*.tmproj -.vscode/ -` - -const defaultIngress = `{{- if .Values.ingress.enabled -}} -{{- $fullName := include ".fullname" . -}} -{{- $svcPort := .Values.service.port -}} -{{- if and .Values.ingress.className (not (semverCompare ">=1.18-0" .Capabilities.KubeVersion.GitVersion)) }} - {{- if not (hasKey .Values.ingress.annotations "kubernetes.io/ingress.class") }} - {{- $_ := set .Values.ingress.annotations "kubernetes.io/ingress.class" .Values.ingress.className}} - {{- end }} -{{- end }} -{{- if semverCompare ">=1.19-0" .Capabilities.KubeVersion.GitVersion -}} -apiVersion: networking.k8s.io/v1 -{{- else if semverCompare ">=1.14-0" .Capabilities.KubeVersion.GitVersion -}} -apiVersion: networking.k8s.io/v1beta1 -{{- else -}} -apiVersion: extensions/v1beta1 -{{- end }} -kind: Ingress -metadata: - name: {{ $fullName }} - labels: - {{- include ".labels" . | nindent 4 }} - {{- with .Values.ingress.annotations }} - annotations: - {{- toYaml . | nindent 4 }} - {{- end }} -spec: - {{- if and .Values.ingress.className (semverCompare ">=1.18-0" .Capabilities.KubeVersion.GitVersion) }} - ingressClassName: {{ .Values.ingress.className }} - {{- end }} - {{- if .Values.ingress.tls }} - tls: - {{- range .Values.ingress.tls }} - - hosts: - {{- range .hosts }} - - {{ . | quote }} - {{- end }} - secretName: {{ .secretName }} - {{- end }} - {{- end }} - rules: - {{- range .Values.ingress.hosts }} - - host: {{ .host | quote }} - http: - paths: - {{- range .paths }} - - path: {{ .path }} - {{- if and .pathType (semverCompare ">=1.18-0" $.Capabilities.KubeVersion.GitVersion) }} - pathType: {{ .pathType }} - {{- end }} - backend: - {{- if semverCompare ">=1.19-0" $.Capabilities.KubeVersion.GitVersion }} - service: - name: {{ $fullName }} - port: - number: {{ $svcPort }} - {{- else }} - serviceName: {{ $fullName }} - servicePort: {{ $svcPort }} - {{- end }} - {{- end }} - {{- end }} -{{- end }} -` - -const defaultDeployment = `apiVersion: apps/v1 -kind: Deployment -metadata: - name: {{ include ".fullname" . }} - labels: - {{- include ".labels" . | nindent 4 }} -spec: - {{- if not .Values.autoscaling.enabled }} - replicas: {{ .Values.replicaCount }} - {{- end }} - selector: - matchLabels: - {{- include ".selectorLabels" . | nindent 6 }} - template: - metadata: - {{- with .Values.podAnnotations }} - annotations: - {{- toYaml . | nindent 8 }} - {{- end }} - labels: - {{- include ".selectorLabels" . | nindent 8 }} - spec: - {{- with .Values.imagePullSecrets }} - imagePullSecrets: - {{- toYaml . | nindent 8 }} - {{- end }} - serviceAccountName: {{ include ".serviceAccountName" . }} - securityContext: - {{- toYaml .Values.podSecurityContext | nindent 8 }} - containers: - - name: {{ .Chart.Name }} - securityContext: - {{- toYaml .Values.securityContext | nindent 12 }} - image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}" - imagePullPolicy: {{ .Values.image.pullPolicy }} - ports: - - name: http - containerPort: {{ .Values.service.port }} - protocol: TCP - livenessProbe: - httpGet: - path: / - port: http - readinessProbe: - httpGet: - path: / - port: http - resources: - {{- toYaml .Values.resources | nindent 12 }} - {{- with .Values.nodeSelector }} - nodeSelector: - {{- toYaml . | nindent 8 }} - {{- end }} - {{- with .Values.affinity }} - affinity: - {{- toYaml . | nindent 8 }} - {{- end }} - {{- with .Values.tolerations }} - tolerations: - {{- toYaml . | nindent 8 }} - {{- end }} -` - -const defaultService = `apiVersion: v1 -kind: Service -metadata: - name: {{ include ".fullname" . }} - labels: - {{- include ".labels" . | nindent 4 }} -spec: - type: {{ .Values.service.type }} - ports: - - port: {{ .Values.service.port }} - targetPort: http - protocol: TCP - name: http - selector: - {{- include ".selectorLabels" . | nindent 4 }} -` - -const defaultServiceAccount = `{{- if .Values.serviceAccount.create -}} -apiVersion: v1 -kind: ServiceAccount -metadata: - name: {{ include ".serviceAccountName" . }} - labels: - {{- include ".labels" . | nindent 4 }} - {{- with .Values.serviceAccount.annotations }} - annotations: - {{- toYaml . | nindent 4 }} - {{- end }} -{{- end }} -` - -const defaultHorizontalPodAutoscaler = `{{- if .Values.autoscaling.enabled }} -apiVersion: autoscaling/v2beta1 -kind: HorizontalPodAutoscaler -metadata: - name: {{ include ".fullname" . }} - labels: - {{- include ".labels" . | nindent 4 }} -spec: - scaleTargetRef: - apiVersion: apps/v1 - kind: Deployment - name: {{ include ".fullname" . }} - minReplicas: {{ .Values.autoscaling.minReplicas }} - maxReplicas: {{ .Values.autoscaling.maxReplicas }} - metrics: - {{- if .Values.autoscaling.targetCPUUtilizationPercentage }} - - type: Resource - resource: - name: cpu - targetAverageUtilization: {{ .Values.autoscaling.targetCPUUtilizationPercentage }} - {{- end }} - {{- if .Values.autoscaling.targetMemoryUtilizationPercentage }} - - type: Resource - resource: - name: memory - targetAverageUtilization: {{ .Values.autoscaling.targetMemoryUtilizationPercentage }} - {{- end }} -{{- end }} -` - -const defaultNotes = `1. Get the application URL by running these commands: -{{- if .Values.ingress.enabled }} -{{- range $host := .Values.ingress.hosts }} - {{- range .paths }} - http{{ if $.Values.ingress.tls }}s{{ end }}://{{ $host.host }}{{ .path }} - {{- end }} -{{- end }} -{{- else if contains "NodePort" .Values.service.type }} - export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include ".fullname" . }}) - export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") - echo http://$NODE_IP:$NODE_PORT -{{- else if contains "LoadBalancer" .Values.service.type }} - NOTE: It may take a few minutes for the LoadBalancer IP to be available. - You can watch the status of by running 'kubectl get --namespace {{ .Release.Namespace }} svc -w {{ include ".fullname" . }}' - export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include ".fullname" . }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}") - echo http://$SERVICE_IP:{{ .Values.service.port }} -{{- else if contains "ClusterIP" .Values.service.type }} - export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ include ".name" . }},app.kubernetes.io/instance={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}") - export CONTAINER_PORT=$(kubectl get pod --namespace {{ .Release.Namespace }} $POD_NAME -o jsonpath="{.spec.containers[0].ports[0].containerPort}") - echo "Visit http://127.0.0.1:8080 to use your application" - kubectl --namespace {{ .Release.Namespace }} port-forward $POD_NAME 8080:$CONTAINER_PORT -{{- end }} -` - -const defaultHelpers = `{{/* -Expand the name of the chart. -*/}} -{{- define ".name" -}} -{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }} -{{- end }} - -{{/* -Create a default fully qualified app name. -We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). -If release name contains chart name it will be used as a full name. -*/}} -{{- define ".fullname" -}} -{{- if .Values.fullnameOverride }} -{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }} -{{- else }} -{{- $name := default .Chart.Name .Values.nameOverride }} -{{- if contains $name .Release.Name }} -{{- .Release.Name | trunc 63 | trimSuffix "-" }} -{{- else }} -{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }} -{{- end }} -{{- end }} -{{- end }} - -{{/* -Create chart name and version as used by the chart label. -*/}} -{{- define ".chart" -}} -{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} -{{- end }} - -{{/* -Common labels -*/}} -{{- define ".labels" -}} -helm.sh/chart: {{ include ".chart" . }} -{{ include ".selectorLabels" . }} -{{- if .Chart.AppVersion }} -app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} -{{- end }} -app.kubernetes.io/managed-by: {{ .Release.Service }} -{{- end }} - -{{/* -Selector labels -*/}} -{{- define ".selectorLabels" -}} -app.kubernetes.io/name: {{ include ".name" . }} -app.kubernetes.io/instance: {{ .Release.Name }} -{{- end }} - -{{/* -Create the name of the service account to use -*/}} -{{- define ".serviceAccountName" -}} -{{- if .Values.serviceAccount.create }} -{{- default (include ".fullname" .) .Values.serviceAccount.name }} -{{- else }} -{{- default "default" .Values.serviceAccount.name }} -{{- end }} -{{- end }} -` - -const defaultTestConnection = `apiVersion: v1 -kind: Pod -metadata: - name: "{{ include ".fullname" . }}-test-connection" - labels: - {{- include ".labels" . | nindent 4 }} - annotations: - "helm.sh/hook": test -spec: - containers: - - name: wget - image: busybox - command: ['wget'] - args: ['{{ include ".fullname" . }}:{{ .Values.service.port }}'] - restartPolicy: Never -` - -// Stderr is an io.Writer to which error messages can be written -// -// In Helm 4, this will be replaced. It is needed in Helm 3 to preserve API backward -// compatibility. -var Stderr io.Writer = os.Stderr - -// CreateFrom creates a new chart, but scaffolds it from the src chart. -func CreateFrom(chartfile *chart.Metadata, dest, src string) error { - schart, err := loader.Load(src) - if err != nil { - return errors.Wrapf(err, "could not load %s", src) - } - - schart.Metadata = chartfile - - var updatedTemplates []*chart.File - - for _, template := range schart.Templates { - newData := transform(string(template.Data), schart.Name()) - updatedTemplates = append(updatedTemplates, &chart.File{Name: template.Name, Data: newData}) - } - - schart.Templates = updatedTemplates - b, err := yaml.Marshal(schart.Values) - if err != nil { - return errors.Wrap(err, "reading values file") - } - - var m map[string]interface{} - if err := yaml.Unmarshal(transform(string(b), schart.Name()), &m); err != nil { - return errors.Wrap(err, "transforming values file") - } - schart.Values = m - - // SaveDir looks for the file values.yaml when saving rather than the values - // key in order to preserve the comments in the YAML. The name placeholder - // needs to be replaced on that file. - for _, f := range schart.Raw { - if f.Name == ValuesfileName { - f.Data = transform(string(f.Data), schart.Name()) - } - } - - return SaveDir(schart, dest) -} - -// Create creates a new chart in a directory. -// -// Inside of dir, this will create a directory based on the name of -// chartfile.Name. It will then write the Chart.yaml into this directory and -// create the (empty) appropriate directories. -// -// The returned string will point to the newly created directory. It will be -// an absolute path, even if the provided base directory was relative. -// -// If dir does not exist, this will return an error. -// If Chart.yaml or any directories cannot be created, this will return an -// error. In such a case, this will attempt to clean up by removing the -// new chart directory. -func Create(name, dir string) (string, error) { - - // Sanity-check the name of a chart so user doesn't create one that causes problems. - if err := validateChartName(name); err != nil { - return "", err - } - - path, err := filepath.Abs(dir) - if err != nil { - return path, err - } - - if fi, err := os.Stat(path); err != nil { - return path, err - } else if !fi.IsDir() { - return path, errors.Errorf("no such directory %s", path) - } - - cdir := filepath.Join(path, name) - if fi, err := os.Stat(cdir); err == nil && !fi.IsDir() { - return cdir, errors.Errorf("file %s already exists and is not a directory", cdir) - } - - files := []struct { - path string - content []byte - }{ - { - // Chart.yaml - path: filepath.Join(cdir, ChartfileName), - content: []byte(fmt.Sprintf(defaultChartfile, name)), - }, - { - // values.yaml - path: filepath.Join(cdir, ValuesfileName), - content: []byte(fmt.Sprintf(defaultValues, name)), - }, - { - // .helmignore - path: filepath.Join(cdir, IgnorefileName), - content: []byte(defaultIgnore), - }, - { - // ingress.yaml - path: filepath.Join(cdir, IngressFileName), - content: transform(defaultIngress, name), - }, - { - // deployment.yaml - path: filepath.Join(cdir, DeploymentName), - content: transform(defaultDeployment, name), - }, - { - // service.yaml - path: filepath.Join(cdir, ServiceName), - content: transform(defaultService, name), - }, - { - // serviceaccount.yaml - path: filepath.Join(cdir, ServiceAccountName), - content: transform(defaultServiceAccount, name), - }, - { - // hpa.yaml - path: filepath.Join(cdir, HorizontalPodAutoscalerName), - content: transform(defaultHorizontalPodAutoscaler, name), - }, - { - // NOTES.txt - path: filepath.Join(cdir, NotesName), - content: transform(defaultNotes, name), - }, - { - // _helpers.tpl - path: filepath.Join(cdir, HelpersName), - content: transform(defaultHelpers, name), - }, - { - // test-connection.yaml - path: filepath.Join(cdir, TestConnectionName), - content: transform(defaultTestConnection, name), - }, - } - - for _, file := range files { - if _, err := os.Stat(file.path); err == nil { - // There is no handle to a preferred output stream here. - fmt.Fprintf(Stderr, "WARNING: File %q already exists. Overwriting.\n", file.path) - } - if err := writeFile(file.path, file.content); err != nil { - return cdir, err - } - } - // Need to add the ChartsDir explicitly as it does not contain any file OOTB - if err := os.MkdirAll(filepath.Join(cdir, ChartsDir), 0755); err != nil { - return cdir, err - } - return cdir, nil -} - -// transform performs a string replacement of the specified source for -// a given key with the replacement string -func transform(src, replacement string) []byte { - return []byte(strings.ReplaceAll(src, "", replacement)) -} - -func writeFile(name string, content []byte) error { - if err := os.MkdirAll(filepath.Dir(name), 0755); err != nil { - return err - } - return ioutil.WriteFile(name, content, 0644) -} - -func validateChartName(name string) error { - if name == "" || len(name) > maxChartNameLength { - return fmt.Errorf("chart name must be between 1 and %d characters", maxChartNameLength) - } - if !chartName.MatchString(name) { - return fmt.Errorf("chart name must match the regular expression %q", chartName.String()) - } - return nil -} diff --git a/src/vendor/helm.sh/helm/v3/pkg/chartutil/dependencies.go b/src/vendor/helm.sh/helm/v3/pkg/chartutil/dependencies.go deleted file mode 100644 index e01b95bf7..000000000 --- a/src/vendor/helm.sh/helm/v3/pkg/chartutil/dependencies.go +++ /dev/null @@ -1,285 +0,0 @@ -/* -Copyright The Helm Authors. -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - -http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package chartutil - -import ( - "log" - "strings" - - "helm.sh/helm/v3/pkg/chart" -) - -// ProcessDependencies checks through this chart's dependencies, processing accordingly. -func ProcessDependencies(c *chart.Chart, v Values) error { - if err := processDependencyEnabled(c, v, ""); err != nil { - return err - } - return processDependencyImportValues(c) -} - -// processDependencyConditions disables charts based on condition path value in values -func processDependencyConditions(reqs []*chart.Dependency, cvals Values, cpath string) { - if reqs == nil { - return - } - for _, r := range reqs { - for _, c := range strings.Split(strings.TrimSpace(r.Condition), ",") { - if len(c) > 0 { - // retrieve value - vv, err := cvals.PathValue(cpath + c) - if err == nil { - // if not bool, warn - if bv, ok := vv.(bool); ok { - r.Enabled = bv - break - } else { - log.Printf("Warning: Condition path '%s' for chart %s returned non-bool value", c, r.Name) - } - } else if _, ok := err.(ErrNoValue); !ok { - // this is a real error - log.Printf("Warning: PathValue returned error %v", err) - } - } - } - } -} - -// processDependencyTags disables charts based on tags in values -func processDependencyTags(reqs []*chart.Dependency, cvals Values) { - if reqs == nil { - return - } - vt, err := cvals.Table("tags") - if err != nil { - return - } - for _, r := range reqs { - var hasTrue, hasFalse bool - for _, k := range r.Tags { - if b, ok := vt[k]; ok { - // if not bool, warn - if bv, ok := b.(bool); ok { - if bv { - hasTrue = true - } else { - hasFalse = true - } - } else { - log.Printf("Warning: Tag '%s' for chart %s returned non-bool value", k, r.Name) - } - } - } - if !hasTrue && hasFalse { - r.Enabled = false - } else if hasTrue || !hasTrue && !hasFalse { - r.Enabled = true - } - } -} - -func getAliasDependency(charts []*chart.Chart, dep *chart.Dependency) *chart.Chart { - for _, c := range charts { - if c == nil { - continue - } - if c.Name() != dep.Name { - continue - } - if !IsCompatibleRange(dep.Version, c.Metadata.Version) { - continue - } - - out := *c - md := *c.Metadata - out.Metadata = &md - - if dep.Alias != "" { - md.Name = dep.Alias - } - return &out - } - return nil -} - -// processDependencyEnabled removes disabled charts from dependencies -func processDependencyEnabled(c *chart.Chart, v map[string]interface{}, path string) error { - if c.Metadata.Dependencies == nil { - return nil - } - - var chartDependencies []*chart.Chart - // If any dependency is not a part of Chart.yaml - // then this should be added to chartDependencies. - // However, if the dependency is already specified in Chart.yaml - // we should not add it, as it would be anyways processed from Chart.yaml - -Loop: - for _, existing := range c.Dependencies() { - for _, req := range c.Metadata.Dependencies { - if existing.Name() == req.Name && IsCompatibleRange(req.Version, existing.Metadata.Version) { - continue Loop - } - } - chartDependencies = append(chartDependencies, existing) - } - - for _, req := range c.Metadata.Dependencies { - if chartDependency := getAliasDependency(c.Dependencies(), req); chartDependency != nil { - chartDependencies = append(chartDependencies, chartDependency) - } - if req.Alias != "" { - req.Name = req.Alias - } - } - c.SetDependencies(chartDependencies...) - - // set all to true - for _, lr := range c.Metadata.Dependencies { - lr.Enabled = true - } - cvals, err := CoalesceValues(c, v) - if err != nil { - return err - } - // flag dependencies as enabled/disabled - processDependencyTags(c.Metadata.Dependencies, cvals) - processDependencyConditions(c.Metadata.Dependencies, cvals, path) - // make a map of charts to remove - rm := map[string]struct{}{} - for _, r := range c.Metadata.Dependencies { - if !r.Enabled { - // remove disabled chart - rm[r.Name] = struct{}{} - } - } - // don't keep disabled charts in new slice - cd := []*chart.Chart{} - copy(cd, c.Dependencies()[:0]) - for _, n := range c.Dependencies() { - if _, ok := rm[n.Metadata.Name]; !ok { - cd = append(cd, n) - } - } - // don't keep disabled charts in metadata - cdMetadata := []*chart.Dependency{} - copy(cdMetadata, c.Metadata.Dependencies[:0]) - for _, n := range c.Metadata.Dependencies { - if _, ok := rm[n.Name]; !ok { - cdMetadata = append(cdMetadata, n) - } - } - - // recursively call self to process sub dependencies - for _, t := range cd { - subpath := path + t.Metadata.Name + "." - if err := processDependencyEnabled(t, cvals, subpath); err != nil { - return err - } - } - // set the correct dependencies in metadata - c.Metadata.Dependencies = nil - c.Metadata.Dependencies = append(c.Metadata.Dependencies, cdMetadata...) - c.SetDependencies(cd...) - - return nil -} - -// pathToMap creates a nested map given a YAML path in dot notation. -func pathToMap(path string, data map[string]interface{}) map[string]interface{} { - if path == "." { - return data - } - return set(parsePath(path), data) -} - -func set(path []string, data map[string]interface{}) map[string]interface{} { - if len(path) == 0 { - return nil - } - cur := data - for i := len(path) - 1; i >= 0; i-- { - cur = map[string]interface{}{path[i]: cur} - } - return cur -} - -// processImportValues merges values from child to parent based on the chart's dependencies' ImportValues field. -func processImportValues(c *chart.Chart) error { - if c.Metadata.Dependencies == nil { - return nil - } - // combine chart values and empty config to get Values - cvals, err := CoalesceValues(c, nil) - if err != nil { - return err - } - b := make(map[string]interface{}) - // import values from each dependency if specified in import-values - for _, r := range c.Metadata.Dependencies { - var outiv []interface{} - for _, riv := range r.ImportValues { - switch iv := riv.(type) { - case map[string]interface{}: - child := iv["child"].(string) - parent := iv["parent"].(string) - - outiv = append(outiv, map[string]string{ - "child": child, - "parent": parent, - }) - - // get child table - vv, err := cvals.Table(r.Name + "." + child) - if err != nil { - log.Printf("Warning: ImportValues missing table from chart %s: %v", r.Name, err) - continue - } - // create value map from child to be merged into parent - b = CoalesceTables(cvals, pathToMap(parent, vv.AsMap())) - case string: - child := "exports." + iv - outiv = append(outiv, map[string]string{ - "child": child, - "parent": ".", - }) - vm, err := cvals.Table(r.Name + "." + child) - if err != nil { - log.Printf("Warning: ImportValues missing table: %v", err) - continue - } - b = CoalesceTables(b, vm.AsMap()) - } - } - // set our formatted import values - r.ImportValues = outiv - } - - // set the new values - c.Values = CoalesceTables(cvals, b) - - return nil -} - -// processDependencyImportValues imports specified chart values from child to parent. -func processDependencyImportValues(c *chart.Chart) error { - for _, d := range c.Dependencies() { - // recurse - if err := processDependencyImportValues(d); err != nil { - return err - } - } - return processImportValues(c) -} diff --git a/src/vendor/helm.sh/helm/v3/pkg/chartutil/doc.go b/src/vendor/helm.sh/helm/v3/pkg/chartutil/doc.go deleted file mode 100644 index 8f06bcc9a..000000000 --- a/src/vendor/helm.sh/helm/v3/pkg/chartutil/doc.go +++ /dev/null @@ -1,44 +0,0 @@ -/* -Copyright The Helm Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -/*Package chartutil contains tools for working with charts. - -Charts are described in the chart package (pkg/chart). -This package provides utilities for serializing and deserializing charts. - -A chart can be represented on the file system in one of two ways: - - - As a directory that contains a Chart.yaml file and other chart things. - - As a tarred gzipped file containing a directory that then contains a - Chart.yaml file. - -This package provides utilities for working with those file formats. - -The preferred way of loading a chart is using 'loader.Load`: - - chart, err := loader.Load(filename) - -This will attempt to discover whether the file at 'filename' is a directory or -a chart archive. It will then load accordingly. - -For accepting raw compressed tar file data from an io.Reader, the -'loader.LoadArchive()' will read in the data, uncompress it, and unpack it -into a Chart. - -When creating charts in memory, use the 'helm.sh/helm/pkg/chart' -package directly. -*/ -package chartutil // import "helm.sh/helm/v3/pkg/chartutil" diff --git a/src/vendor/helm.sh/helm/v3/pkg/chartutil/errors.go b/src/vendor/helm.sh/helm/v3/pkg/chartutil/errors.go deleted file mode 100644 index fcdcc27ea..000000000 --- a/src/vendor/helm.sh/helm/v3/pkg/chartutil/errors.go +++ /dev/null @@ -1,35 +0,0 @@ -/* -Copyright The Helm Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package chartutil - -import ( - "fmt" -) - -// ErrNoTable indicates that a chart does not have a matching table. -type ErrNoTable struct { - Key string -} - -func (e ErrNoTable) Error() string { return fmt.Sprintf("%q is not a table", e.Key) } - -// ErrNoValue indicates that Values does not contain a key with a value -type ErrNoValue struct { - Key string -} - -func (e ErrNoValue) Error() string { return fmt.Sprintf("%q is not a value", e.Key) } diff --git a/src/vendor/helm.sh/helm/v3/pkg/chartutil/expand.go b/src/vendor/helm.sh/helm/v3/pkg/chartutil/expand.go deleted file mode 100644 index 6ad09e417..000000000 --- a/src/vendor/helm.sh/helm/v3/pkg/chartutil/expand.go +++ /dev/null @@ -1,91 +0,0 @@ -/* -Copyright The Helm Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package chartutil - -import ( - "io" - "io/ioutil" - "os" - "path/filepath" - - securejoin "github.com/cyphar/filepath-securejoin" - "github.com/pkg/errors" - "sigs.k8s.io/yaml" - - "helm.sh/helm/v3/pkg/chart" - "helm.sh/helm/v3/pkg/chart/loader" -) - -// Expand uncompresses and extracts a chart into the specified directory. -func Expand(dir string, r io.Reader) error { - files, err := loader.LoadArchiveFiles(r) - if err != nil { - return err - } - - // Get the name of the chart - var chartName string - for _, file := range files { - if file.Name == "Chart.yaml" { - ch := &chart.Metadata{} - if err := yaml.Unmarshal(file.Data, ch); err != nil { - return errors.Wrap(err, "cannot load Chart.yaml") - } - chartName = ch.Name - } - } - if chartName == "" { - return errors.New("chart name not specified") - } - - // Find the base directory - chartdir, err := securejoin.SecureJoin(dir, chartName) - if err != nil { - return err - } - - // Copy all files verbatim. We don't parse these files because parsing can remove - // comments. - for _, file := range files { - outpath, err := securejoin.SecureJoin(chartdir, file.Name) - if err != nil { - return err - } - - // Make sure the necessary subdirs get created. - basedir := filepath.Dir(outpath) - if err := os.MkdirAll(basedir, 0755); err != nil { - return err - } - - if err := ioutil.WriteFile(outpath, file.Data, 0644); err != nil { - return err - } - } - - return nil -} - -// ExpandFile expands the src file into the dest directory. -func ExpandFile(dest, src string) error { - h, err := os.Open(src) - if err != nil { - return err - } - defer h.Close() - return Expand(dest, h) -} diff --git a/src/vendor/helm.sh/helm/v3/pkg/chartutil/jsonschema.go b/src/vendor/helm.sh/helm/v3/pkg/chartutil/jsonschema.go deleted file mode 100644 index 7b9768fd3..000000000 --- a/src/vendor/helm.sh/helm/v3/pkg/chartutil/jsonschema.go +++ /dev/null @@ -1,93 +0,0 @@ -/* -Copyright The Helm Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package chartutil - -import ( - "bytes" - "fmt" - "strings" - - "github.com/pkg/errors" - "github.com/xeipuuv/gojsonschema" - "sigs.k8s.io/yaml" - - "helm.sh/helm/v3/pkg/chart" -) - -// ValidateAgainstSchema checks that values does not violate the structure laid out in schema -func ValidateAgainstSchema(chrt *chart.Chart, values map[string]interface{}) error { - var sb strings.Builder - if chrt.Schema != nil { - err := ValidateAgainstSingleSchema(values, chrt.Schema) - if err != nil { - sb.WriteString(fmt.Sprintf("%s:\n", chrt.Name())) - sb.WriteString(err.Error()) - } - } - - // For each dependency, recursively call this function with the coalesced values - for _, subchart := range chrt.Dependencies() { - subchartValues := values[subchart.Name()].(map[string]interface{}) - if err := ValidateAgainstSchema(subchart, subchartValues); err != nil { - sb.WriteString(err.Error()) - } - } - - if sb.Len() > 0 { - return errors.New(sb.String()) - } - - return nil -} - -// ValidateAgainstSingleSchema checks that values does not violate the structure laid out in this schema -func ValidateAgainstSingleSchema(values Values, schemaJSON []byte) (reterr error) { - defer func() { - if r := recover(); r != nil { - reterr = fmt.Errorf("unable to validate schema: %s", r) - } - }() - - valuesData, err := yaml.Marshal(values) - if err != nil { - return err - } - valuesJSON, err := yaml.YAMLToJSON(valuesData) - if err != nil { - return err - } - if bytes.Equal(valuesJSON, []byte("null")) { - valuesJSON = []byte("{}") - } - schemaLoader := gojsonschema.NewBytesLoader(schemaJSON) - valuesLoader := gojsonschema.NewBytesLoader(valuesJSON) - - result, err := gojsonschema.Validate(schemaLoader, valuesLoader) - if err != nil { - return err - } - - if !result.Valid() { - var sb strings.Builder - for _, desc := range result.Errors() { - sb.WriteString(fmt.Sprintf("- %s\n", desc)) - } - return errors.New(sb.String()) - } - - return nil -} diff --git a/src/vendor/helm.sh/helm/v3/pkg/chartutil/save.go b/src/vendor/helm.sh/helm/v3/pkg/chartutil/save.go deleted file mode 100644 index 2ce4eddaf..000000000 --- a/src/vendor/helm.sh/helm/v3/pkg/chartutil/save.go +++ /dev/null @@ -1,244 +0,0 @@ -/* -Copyright The Helm Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package chartutil - -import ( - "archive/tar" - "compress/gzip" - "encoding/json" - "fmt" - "os" - "path/filepath" - "time" - - "github.com/pkg/errors" - "sigs.k8s.io/yaml" - - "helm.sh/helm/v3/pkg/chart" -) - -var headerBytes = []byte("+aHR0cHM6Ly95b3V0dS5iZS96OVV6MWljandyTQo=") - -// SaveDir saves a chart as files in a directory. -// -// This takes the chart name, and creates a new subdirectory inside of the given dest -// directory, writing the chart's contents to that subdirectory. -func SaveDir(c *chart.Chart, dest string) error { - // Create the chart directory - outdir := filepath.Join(dest, c.Name()) - if fi, err := os.Stat(outdir); err == nil && !fi.IsDir() { - return errors.Errorf("file %s already exists and is not a directory", outdir) - } - if err := os.MkdirAll(outdir, 0755); err != nil { - return err - } - - // Save the chart file. - if err := SaveChartfile(filepath.Join(outdir, ChartfileName), c.Metadata); err != nil { - return err - } - - // Save values.yaml - for _, f := range c.Raw { - if f.Name == ValuesfileName { - vf := filepath.Join(outdir, ValuesfileName) - if err := writeFile(vf, f.Data); err != nil { - return err - } - } - } - - // Save values.schema.json if it exists - if c.Schema != nil { - filename := filepath.Join(outdir, SchemafileName) - if err := writeFile(filename, c.Schema); err != nil { - return err - } - } - - // Save templates and files - for _, o := range [][]*chart.File{c.Templates, c.Files} { - for _, f := range o { - n := filepath.Join(outdir, f.Name) - if err := writeFile(n, f.Data); err != nil { - return err - } - } - } - - // Save dependencies - base := filepath.Join(outdir, ChartsDir) - for _, dep := range c.Dependencies() { - // Here, we write each dependency as a tar file. - if _, err := Save(dep, base); err != nil { - return errors.Wrapf(err, "saving %s", dep.ChartFullPath()) - } - } - return nil -} - -// Save creates an archived chart to the given directory. -// -// This takes an existing chart and a destination directory. -// -// If the directory is /foo, and the chart is named bar, with version 1.0.0, this -// will generate /foo/bar-1.0.0.tgz. -// -// This returns the absolute path to the chart archive file. -func Save(c *chart.Chart, outDir string) (string, error) { - if err := c.Validate(); err != nil { - return "", errors.Wrap(err, "chart validation") - } - - filename := fmt.Sprintf("%s-%s.tgz", c.Name(), c.Metadata.Version) - filename = filepath.Join(outDir, filename) - dir := filepath.Dir(filename) - if stat, err := os.Stat(dir); err != nil { - if os.IsNotExist(err) { - if err2 := os.MkdirAll(dir, 0755); err2 != nil { - return "", err2 - } - } else { - return "", errors.Wrapf(err, "stat %s", dir) - } - } else if !stat.IsDir() { - return "", errors.Errorf("is not a directory: %s", dir) - } - - f, err := os.Create(filename) - if err != nil { - return "", err - } - - // Wrap in gzip writer - zipper := gzip.NewWriter(f) - zipper.Header.Extra = headerBytes - zipper.Header.Comment = "Helm" - - // Wrap in tar writer - twriter := tar.NewWriter(zipper) - rollback := false - defer func() { - twriter.Close() - zipper.Close() - f.Close() - if rollback { - os.Remove(filename) - } - }() - - if err := writeTarContents(twriter, c, ""); err != nil { - rollback = true - return filename, err - } - return filename, nil -} - -func writeTarContents(out *tar.Writer, c *chart.Chart, prefix string) error { - base := filepath.Join(prefix, c.Name()) - - // Pull out the dependencies of a v1 Chart, since there's no way - // to tell the serializer to skip a field for just this use case - savedDependencies := c.Metadata.Dependencies - if c.Metadata.APIVersion == chart.APIVersionV1 { - c.Metadata.Dependencies = nil - } - // Save Chart.yaml - cdata, err := yaml.Marshal(c.Metadata) - if c.Metadata.APIVersion == chart.APIVersionV1 { - c.Metadata.Dependencies = savedDependencies - } - if err != nil { - return err - } - if err := writeToTar(out, filepath.Join(base, ChartfileName), cdata); err != nil { - return err - } - - // Save Chart.lock - // TODO: remove the APIVersion check when APIVersionV1 is not used anymore - if c.Metadata.APIVersion == chart.APIVersionV2 { - if c.Lock != nil { - ldata, err := yaml.Marshal(c.Lock) - if err != nil { - return err - } - if err := writeToTar(out, filepath.Join(base, "Chart.lock"), ldata); err != nil { - return err - } - } - } - - // Save values.yaml - for _, f := range c.Raw { - if f.Name == ValuesfileName { - if err := writeToTar(out, filepath.Join(base, ValuesfileName), f.Data); err != nil { - return err - } - } - } - - // Save values.schema.json if it exists - if c.Schema != nil { - if !json.Valid(c.Schema) { - return errors.New("Invalid JSON in " + SchemafileName) - } - if err := writeToTar(out, filepath.Join(base, SchemafileName), c.Schema); err != nil { - return err - } - } - - // Save templates - for _, f := range c.Templates { - n := filepath.Join(base, f.Name) - if err := writeToTar(out, n, f.Data); err != nil { - return err - } - } - - // Save files - for _, f := range c.Files { - n := filepath.Join(base, f.Name) - if err := writeToTar(out, n, f.Data); err != nil { - return err - } - } - - // Save dependencies - for _, dep := range c.Dependencies() { - if err := writeTarContents(out, dep, filepath.Join(base, ChartsDir)); err != nil { - return err - } - } - return nil -} - -// writeToTar writes a single file to a tar archive. -func writeToTar(out *tar.Writer, name string, body []byte) error { - // TODO: Do we need to create dummy parent directory names if none exist? - h := &tar.Header{ - Name: filepath.ToSlash(name), - Mode: 0644, - Size: int64(len(body)), - ModTime: time.Now(), - } - if err := out.WriteHeader(h); err != nil { - return err - } - _, err := out.Write(body) - return err -} diff --git a/src/vendor/helm.sh/helm/v3/pkg/chartutil/validate_name.go b/src/vendor/helm.sh/helm/v3/pkg/chartutil/validate_name.go deleted file mode 100644 index 05c090cb6..000000000 --- a/src/vendor/helm.sh/helm/v3/pkg/chartutil/validate_name.go +++ /dev/null @@ -1,112 +0,0 @@ -/* -Copyright The Helm Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package chartutil - -import ( - "fmt" - "regexp" - - "github.com/pkg/errors" -) - -// validName is a regular expression for resource names. -// -// According to the Kubernetes help text, the regular expression it uses is: -// -// [a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)* -// -// This follows the above regular expression (but requires a full string match, not partial). -// -// The Kubernetes documentation is here, though it is not entirely correct: -// https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names -var validName = regexp.MustCompile(`^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$`) - -var ( - // errMissingName indicates that a release (name) was not provided. - errMissingName = errors.New("no name provided") - - // errInvalidName indicates that an invalid release name was provided - errInvalidName = fmt.Errorf( - "invalid release name, must match regex %s and the length must not be longer than 53", - validName.String()) - - // errInvalidKubernetesName indicates that the name does not meet the Kubernetes - // restrictions on metadata names. - errInvalidKubernetesName = fmt.Errorf( - "invalid metadata name, must match regex %s and the length must not be longer than 253", - validName.String()) -) - -const ( - // According to the Kubernetes docs (https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#rfc-1035-label-names) - // some resource names have a max length of 63 characters while others have a max - // length of 253 characters. As we cannot be sure the resources used in a chart, we - // therefore need to limit it to 63 chars and reserve 10 chars for additional part to name - // of the resource. The reason is that chart maintainers can use release name as part of - // the resource name (and some additional chars). - maxReleaseNameLen = 53 - // maxMetadataNameLen is the maximum length Kubernetes allows for any name. - maxMetadataNameLen = 253 -) - -// ValidateReleaseName performs checks for an entry for a Helm release name -// -// For Helm to allow a name, it must be below a certain character count (53) and also match -// a regular expression. -// -// According to the Kubernetes help text, the regular expression it uses is: -// -// [a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)* -// -// This follows the above regular expression (but requires a full string match, not partial). -// -// The Kubernetes documentation is here, though it is not entirely correct: -// https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names -func ValidateReleaseName(name string) error { - // This case is preserved for backwards compatibility - if name == "" { - return errMissingName - - } - if len(name) > maxReleaseNameLen || !validName.MatchString(name) { - return errInvalidName - } - return nil -} - -// ValidateMetadataName validates the name field of a Kubernetes metadata object. -// -// Empty strings, strings longer than 253 chars, or strings that don't match the regexp -// will fail. -// -// According to the Kubernetes help text, the regular expression it uses is: -// -// [a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)* -// -// This follows the above regular expression (but requires a full string match, not partial). -// -// The Kubernetes documentation is here, though it is not entirely correct: -// https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names -// -// Deprecated: remove in Helm 4. Name validation now uses rules defined in -// pkg/lint/rules.validateMetadataNameFunc() -func ValidateMetadataName(name string) error { - if name == "" || len(name) > maxMetadataNameLen || !validName.MatchString(name) { - return errInvalidKubernetesName - } - return nil -} diff --git a/src/vendor/helm.sh/helm/v3/pkg/chartutil/values.go b/src/vendor/helm.sh/helm/v3/pkg/chartutil/values.go deleted file mode 100644 index 97bf44217..000000000 --- a/src/vendor/helm.sh/helm/v3/pkg/chartutil/values.go +++ /dev/null @@ -1,212 +0,0 @@ -/* -Copyright The Helm Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package chartutil - -import ( - "fmt" - "io" - "io/ioutil" - "strings" - - "github.com/pkg/errors" - "sigs.k8s.io/yaml" - - "helm.sh/helm/v3/pkg/chart" -) - -// GlobalKey is the name of the Values key that is used for storing global vars. -const GlobalKey = "global" - -// Values represents a collection of chart values. -type Values map[string]interface{} - -// YAML encodes the Values into a YAML string. -func (v Values) YAML() (string, error) { - b, err := yaml.Marshal(v) - return string(b), err -} - -// Table gets a table (YAML subsection) from a Values object. -// -// The table is returned as a Values. -// -// Compound table names may be specified with dots: -// -// foo.bar -// -// The above will be evaluated as "The table bar inside the table -// foo". -// -// An ErrNoTable is returned if the table does not exist. -func (v Values) Table(name string) (Values, error) { - table := v - var err error - - for _, n := range parsePath(name) { - if table, err = tableLookup(table, n); err != nil { - break - } - } - return table, err -} - -// AsMap is a utility function for converting Values to a map[string]interface{}. -// -// It protects against nil map panics. -func (v Values) AsMap() map[string]interface{} { - if len(v) == 0 { - return map[string]interface{}{} - } - return v -} - -// Encode writes serialized Values information to the given io.Writer. -func (v Values) Encode(w io.Writer) error { - out, err := yaml.Marshal(v) - if err != nil { - return err - } - _, err = w.Write(out) - return err -} - -func tableLookup(v Values, simple string) (Values, error) { - v2, ok := v[simple] - if !ok { - return v, ErrNoTable{simple} - } - if vv, ok := v2.(map[string]interface{}); ok { - return vv, nil - } - - // This catches a case where a value is of type Values, but doesn't (for some - // reason) match the map[string]interface{}. This has been observed in the - // wild, and might be a result of a nil map of type Values. - if vv, ok := v2.(Values); ok { - return vv, nil - } - - return Values{}, ErrNoTable{simple} -} - -// ReadValues will parse YAML byte data into a Values. -func ReadValues(data []byte) (vals Values, err error) { - err = yaml.Unmarshal(data, &vals) - if len(vals) == 0 { - vals = Values{} - } - return vals, err -} - -// ReadValuesFile will parse a YAML file into a map of values. -func ReadValuesFile(filename string) (Values, error) { - data, err := ioutil.ReadFile(filename) - if err != nil { - return map[string]interface{}{}, err - } - return ReadValues(data) -} - -// ReleaseOptions represents the additional release options needed -// for the composition of the final values struct -type ReleaseOptions struct { - Name string - Namespace string - Revision int - IsUpgrade bool - IsInstall bool -} - -// ToRenderValues composes the struct from the data coming from the Releases, Charts and Values files -// -// This takes both ReleaseOptions and Capabilities to merge into the render values. -func ToRenderValues(chrt *chart.Chart, chrtVals map[string]interface{}, options ReleaseOptions, caps *Capabilities) (Values, error) { - if caps == nil { - caps = DefaultCapabilities - } - top := map[string]interface{}{ - "Chart": chrt.Metadata, - "Capabilities": caps, - "Release": map[string]interface{}{ - "Name": options.Name, - "Namespace": options.Namespace, - "IsUpgrade": options.IsUpgrade, - "IsInstall": options.IsInstall, - "Revision": options.Revision, - "Service": "Helm", - }, - } - - vals, err := CoalesceValues(chrt, chrtVals) - if err != nil { - return top, err - } - - if err := ValidateAgainstSchema(chrt, vals); err != nil { - errFmt := "values don't meet the specifications of the schema(s) in the following chart(s):\n%s" - return top, fmt.Errorf(errFmt, err.Error()) - } - - top["Values"] = vals - return top, nil -} - -// istable is a special-purpose function to see if the present thing matches the definition of a YAML table. -func istable(v interface{}) bool { - _, ok := v.(map[string]interface{}) - return ok -} - -// PathValue takes a path that traverses a YAML structure and returns the value at the end of that path. -// The path starts at the root of the YAML structure and is comprised of YAML keys separated by periods. -// Given the following YAML data the value at path "chapter.one.title" is "Loomings". -// -// chapter: -// one: -// title: "Loomings" -func (v Values) PathValue(path string) (interface{}, error) { - if path == "" { - return nil, errors.New("YAML path cannot be empty") - } - return v.pathValue(parsePath(path)) -} - -func (v Values) pathValue(path []string) (interface{}, error) { - if len(path) == 1 { - // if exists must be root key not table - if _, ok := v[path[0]]; ok && !istable(v[path[0]]) { - return v[path[0]], nil - } - return nil, ErrNoValue{path[0]} - } - - key, path := path[len(path)-1], path[:len(path)-1] - // get our table for table path - t, err := v.Table(joinPath(path...)) - if err != nil { - return nil, ErrNoValue{key} - } - // check table for key and ensure value is not a table - if k, ok := t[key]; ok && !istable(k) { - return k, nil - } - return nil, ErrNoValue{key} -} - -func parsePath(key string) []string { return strings.Split(key, ".") } - -func joinPath(path ...string) string { return strings.Join(path, ".") } diff --git a/src/vendor/helm.sh/helm/v3/pkg/cli/environment.go b/src/vendor/helm.sh/helm/v3/pkg/cli/environment.go deleted file mode 100644 index ac3093629..000000000 --- a/src/vendor/helm.sh/helm/v3/pkg/cli/environment.go +++ /dev/null @@ -1,230 +0,0 @@ -/* -Copyright The Helm Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -/*Package cli describes the operating environment for the Helm CLI. - -Helm's environment encapsulates all of the service dependencies Helm has. -These dependencies are expressed as interfaces so that alternate implementations -(mocks, etc.) can be easily generated. -*/ -package cli - -import ( - "fmt" - "os" - "strconv" - "strings" - - "github.com/spf13/pflag" - "k8s.io/cli-runtime/pkg/genericclioptions" - "k8s.io/client-go/rest" - - "helm.sh/helm/v3/pkg/helmpath" -) - -// defaultMaxHistory sets the maximum number of releases to 0: unlimited -const defaultMaxHistory = 10 - -// defaultBurstLimit sets the default client-side throttling limit -const defaultBurstLimit = 100 - -// EnvSettings describes all of the environment settings. -type EnvSettings struct { - namespace string - config *genericclioptions.ConfigFlags - - // KubeConfig is the path to the kubeconfig file - KubeConfig string - // KubeContext is the name of the kubeconfig context. - KubeContext string - // Bearer KubeToken used for authentication - KubeToken string - // Username to impersonate for the operation - KubeAsUser string - // Groups to impersonate for the operation, multiple groups parsed from a comma delimited list - KubeAsGroups []string - // Kubernetes API Server Endpoint for authentication - KubeAPIServer string - // Custom certificate authority file. - KubeCaFile string - // KubeInsecureSkipTLSVerify indicates if server's certificate will not be checked for validity. - // This makes the HTTPS connections insecure - KubeInsecureSkipTLSVerify bool - // KubeTLSServerName overrides the name to use for server certificate validation. - // If it is not provided, the hostname used to contact the server is used - KubeTLSServerName string - // Debug indicates whether or not Helm is running in Debug mode. - Debug bool - // RegistryConfig is the path to the registry config file. - RegistryConfig string - // RepositoryConfig is the path to the repositories file. - RepositoryConfig string - // RepositoryCache is the path to the repository cache directory. - RepositoryCache string - // PluginsDirectory is the path to the plugins directory. - PluginsDirectory string - // MaxHistory is the max release history maintained. - MaxHistory int - // BurstLimit is the default client-side throttling limit. - BurstLimit int -} - -func New() *EnvSettings { - env := &EnvSettings{ - namespace: os.Getenv("HELM_NAMESPACE"), - MaxHistory: envIntOr("HELM_MAX_HISTORY", defaultMaxHistory), - KubeContext: os.Getenv("HELM_KUBECONTEXT"), - KubeToken: os.Getenv("HELM_KUBETOKEN"), - KubeAsUser: os.Getenv("HELM_KUBEASUSER"), - KubeAsGroups: envCSV("HELM_KUBEASGROUPS"), - KubeAPIServer: os.Getenv("HELM_KUBEAPISERVER"), - KubeCaFile: os.Getenv("HELM_KUBECAFILE"), - KubeTLSServerName: os.Getenv("HELM_KUBETLS_SERVER_NAME"), - KubeInsecureSkipTLSVerify: envBoolOr("HELM_KUBEINSECURE_SKIP_TLS_VERIFY", false), - PluginsDirectory: envOr("HELM_PLUGINS", helmpath.DataPath("plugins")), - RegistryConfig: envOr("HELM_REGISTRY_CONFIG", helmpath.ConfigPath("registry/config.json")), - RepositoryConfig: envOr("HELM_REPOSITORY_CONFIG", helmpath.ConfigPath("repositories.yaml")), - RepositoryCache: envOr("HELM_REPOSITORY_CACHE", helmpath.CachePath("repository")), - BurstLimit: envIntOr("HELM_BURST_LIMIT", defaultBurstLimit), - } - env.Debug, _ = strconv.ParseBool(os.Getenv("HELM_DEBUG")) - - // bind to kubernetes config flags - env.config = &genericclioptions.ConfigFlags{ - Namespace: &env.namespace, - Context: &env.KubeContext, - BearerToken: &env.KubeToken, - APIServer: &env.KubeAPIServer, - CAFile: &env.KubeCaFile, - KubeConfig: &env.KubeConfig, - Impersonate: &env.KubeAsUser, - Insecure: &env.KubeInsecureSkipTLSVerify, - TLSServerName: &env.KubeTLSServerName, - ImpersonateGroup: &env.KubeAsGroups, - WrapConfigFn: func(config *rest.Config) *rest.Config { - config.Burst = env.BurstLimit - return config - }, - } - return env -} - -// AddFlags binds flags to the given flagset. -func (s *EnvSettings) AddFlags(fs *pflag.FlagSet) { - fs.StringVarP(&s.namespace, "namespace", "n", s.namespace, "namespace scope for this request") - fs.StringVar(&s.KubeConfig, "kubeconfig", "", "path to the kubeconfig file") - fs.StringVar(&s.KubeContext, "kube-context", s.KubeContext, "name of the kubeconfig context to use") - fs.StringVar(&s.KubeToken, "kube-token", s.KubeToken, "bearer token used for authentication") - fs.StringVar(&s.KubeAsUser, "kube-as-user", s.KubeAsUser, "username to impersonate for the operation") - fs.StringArrayVar(&s.KubeAsGroups, "kube-as-group", s.KubeAsGroups, "group to impersonate for the operation, this flag can be repeated to specify multiple groups.") - fs.StringVar(&s.KubeAPIServer, "kube-apiserver", s.KubeAPIServer, "the address and the port for the Kubernetes API server") - fs.StringVar(&s.KubeCaFile, "kube-ca-file", s.KubeCaFile, "the certificate authority file for the Kubernetes API server connection") - fs.StringVar(&s.KubeTLSServerName, "kube-tls-server-name", s.KubeTLSServerName, "server name to use for Kubernetes API server certificate validation. If it is not provided, the hostname used to contact the server is used") - fs.BoolVar(&s.KubeInsecureSkipTLSVerify, "kube-insecure-skip-tls-verify", s.KubeInsecureSkipTLSVerify, "if true, the Kubernetes API server's certificate will not be checked for validity. This will make your HTTPS connections insecure") - fs.BoolVar(&s.Debug, "debug", s.Debug, "enable verbose output") - fs.StringVar(&s.RegistryConfig, "registry-config", s.RegistryConfig, "path to the registry config file") - fs.StringVar(&s.RepositoryConfig, "repository-config", s.RepositoryConfig, "path to the file containing repository names and URLs") - fs.StringVar(&s.RepositoryCache, "repository-cache", s.RepositoryCache, "path to the file containing cached repository indexes") - fs.IntVar(&s.BurstLimit, "burst-limit", s.BurstLimit, "client-side default throttling limit") -} - -func envOr(name, def string) string { - if v, ok := os.LookupEnv(name); ok { - return v - } - return def -} - -func envBoolOr(name string, def bool) bool { - if name == "" { - return def - } - envVal := envOr(name, strconv.FormatBool(def)) - ret, err := strconv.ParseBool(envVal) - if err != nil { - return def - } - return ret -} - -func envIntOr(name string, def int) int { - if name == "" { - return def - } - envVal := envOr(name, strconv.Itoa(def)) - ret, err := strconv.Atoi(envVal) - if err != nil { - return def - } - return ret -} - -func envCSV(name string) (ls []string) { - trimmed := strings.Trim(os.Getenv(name), ", ") - if trimmed != "" { - ls = strings.Split(trimmed, ",") - } - return -} - -func (s *EnvSettings) EnvVars() map[string]string { - envvars := map[string]string{ - "HELM_BIN": os.Args[0], - "HELM_CACHE_HOME": helmpath.CachePath(""), - "HELM_CONFIG_HOME": helmpath.ConfigPath(""), - "HELM_DATA_HOME": helmpath.DataPath(""), - "HELM_DEBUG": fmt.Sprint(s.Debug), - "HELM_PLUGINS": s.PluginsDirectory, - "HELM_REGISTRY_CONFIG": s.RegistryConfig, - "HELM_REPOSITORY_CACHE": s.RepositoryCache, - "HELM_REPOSITORY_CONFIG": s.RepositoryConfig, - "HELM_NAMESPACE": s.Namespace(), - "HELM_MAX_HISTORY": strconv.Itoa(s.MaxHistory), - "HELM_BURST_LIMIT": strconv.Itoa(s.BurstLimit), - - // broken, these are populated from helm flags and not kubeconfig. - "HELM_KUBECONTEXT": s.KubeContext, - "HELM_KUBETOKEN": s.KubeToken, - "HELM_KUBEASUSER": s.KubeAsUser, - "HELM_KUBEASGROUPS": strings.Join(s.KubeAsGroups, ","), - "HELM_KUBEAPISERVER": s.KubeAPIServer, - "HELM_KUBECAFILE": s.KubeCaFile, - "HELM_KUBEINSECURE_SKIP_TLS_VERIFY": strconv.FormatBool(s.KubeInsecureSkipTLSVerify), - "HELM_KUBETLS_SERVER_NAME": s.KubeTLSServerName, - } - if s.KubeConfig != "" { - envvars["KUBECONFIG"] = s.KubeConfig - } - return envvars -} - -// Namespace gets the namespace from the configuration -func (s *EnvSettings) Namespace() string { - if ns, _, err := s.config.ToRawKubeConfigLoader().Namespace(); err == nil { - return ns - } - return "default" -} - -// SetNamespace sets the namespace in the configuration -func (s *EnvSettings) SetNamespace(namespace string) { - s.namespace = namespace -} - -// RESTClientGetter gets the kubeconfig from EnvSettings -func (s *EnvSettings) RESTClientGetter() genericclioptions.RESTClientGetter { - return s.config -} diff --git a/src/vendor/helm.sh/helm/v3/pkg/getter/doc.go b/src/vendor/helm.sh/helm/v3/pkg/getter/doc.go deleted file mode 100644 index c53ef1ae0..000000000 --- a/src/vendor/helm.sh/helm/v3/pkg/getter/doc.go +++ /dev/null @@ -1,21 +0,0 @@ -/* -Copyright The Helm Authors. -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - -http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -/*Package getter provides a generalize tool for fetching data by scheme. - -This provides a method by which the plugin system can load arbitrary protocol -handlers based upon a URL scheme. -*/ -package getter diff --git a/src/vendor/helm.sh/helm/v3/pkg/getter/getter.go b/src/vendor/helm.sh/helm/v3/pkg/getter/getter.go deleted file mode 100644 index 653b032fe..000000000 --- a/src/vendor/helm.sh/helm/v3/pkg/getter/getter.go +++ /dev/null @@ -1,193 +0,0 @@ -/* -Copyright The Helm Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package getter - -import ( - "bytes" - "net/http" - "time" - - "github.com/pkg/errors" - - "helm.sh/helm/v3/pkg/cli" - "helm.sh/helm/v3/pkg/registry" -) - -// options are generic parameters to be provided to the getter during instantiation. -// -// Getters may or may not ignore these parameters as they are passed in. -type options struct { - url string - certFile string - keyFile string - caFile string - unTar bool - insecureSkipVerifyTLS bool - username string - password string - passCredentialsAll bool - userAgent string - version string - registryClient *registry.Client - timeout time.Duration - transport *http.Transport -} - -// Option allows specifying various settings configurable by the user for overriding the defaults -// used when performing Get operations with the Getter. -type Option func(*options) - -// WithURL informs the getter the server name that will be used when fetching objects. Used in conjunction with -// WithTLSClientConfig to set the TLSClientConfig's server name. -func WithURL(url string) Option { - return func(opts *options) { - opts.url = url - } -} - -// WithBasicAuth sets the request's Authorization header to use the provided credentials -func WithBasicAuth(username, password string) Option { - return func(opts *options) { - opts.username = username - opts.password = password - } -} - -func WithPassCredentialsAll(pass bool) Option { - return func(opts *options) { - opts.passCredentialsAll = pass - } -} - -// WithUserAgent sets the request's User-Agent header to use the provided agent name. -func WithUserAgent(userAgent string) Option { - return func(opts *options) { - opts.userAgent = userAgent - } -} - -// WithInsecureSkipVerifyTLS determines if a TLS Certificate will be checked -func WithInsecureSkipVerifyTLS(insecureSkipVerifyTLS bool) Option { - return func(opts *options) { - opts.insecureSkipVerifyTLS = insecureSkipVerifyTLS - } -} - -// WithTLSClientConfig sets the client auth with the provided credentials. -func WithTLSClientConfig(certFile, keyFile, caFile string) Option { - return func(opts *options) { - opts.certFile = certFile - opts.keyFile = keyFile - opts.caFile = caFile - } -} - -// WithTimeout sets the timeout for requests -func WithTimeout(timeout time.Duration) Option { - return func(opts *options) { - opts.timeout = timeout - } -} - -func WithTagName(tagname string) Option { - return func(opts *options) { - opts.version = tagname - } -} - -func WithRegistryClient(client *registry.Client) Option { - return func(opts *options) { - opts.registryClient = client - } -} - -func WithUntar() Option { - return func(opts *options) { - opts.unTar = true - } -} - -// WithTransport sets the http.Transport to allow overwriting the HTTPGetter default. -func WithTransport(transport *http.Transport) Option { - return func(opts *options) { - opts.transport = transport - } -} - -// Getter is an interface to support GET to the specified URL. -type Getter interface { - // Get file content by url string - Get(url string, options ...Option) (*bytes.Buffer, error) -} - -// Constructor is the function for every getter which creates a specific instance -// according to the configuration -type Constructor func(options ...Option) (Getter, error) - -// Provider represents any getter and the schemes that it supports. -// -// For example, an HTTP provider may provide one getter that handles both -// 'http' and 'https' schemes. -type Provider struct { - Schemes []string - New Constructor -} - -// Provides returns true if the given scheme is supported by this Provider. -func (p Provider) Provides(scheme string) bool { - for _, i := range p.Schemes { - if i == scheme { - return true - } - } - return false -} - -// Providers is a collection of Provider objects. -type Providers []Provider - -// ByScheme returns a Provider that handles the given scheme. -// -// If no provider handles this scheme, this will return an error. -func (p Providers) ByScheme(scheme string) (Getter, error) { - for _, pp := range p { - if pp.Provides(scheme) { - return pp.New() - } - } - return nil, errors.Errorf("scheme %q not supported", scheme) -} - -var httpProvider = Provider{ - Schemes: []string{"http", "https"}, - New: NewHTTPGetter, -} - -var ociProvider = Provider{ - Schemes: []string{registry.OCIScheme}, - New: NewOCIGetter, -} - -// All finds all of the registered getters as a list of Provider instances. -// Currently, the built-in getters and the discovered plugins with downloader -// notations are collected. -func All(settings *cli.EnvSettings) Providers { - result := Providers{httpProvider, ociProvider} - pluginDownloaders, _ := collectPlugins(settings) - result = append(result, pluginDownloaders...) - return result -} diff --git a/src/vendor/helm.sh/helm/v3/pkg/getter/httpgetter.go b/src/vendor/helm.sh/helm/v3/pkg/getter/httpgetter.go deleted file mode 100644 index 6fe1aa71f..000000000 --- a/src/vendor/helm.sh/helm/v3/pkg/getter/httpgetter.go +++ /dev/null @@ -1,158 +0,0 @@ -/* -Copyright The Helm Authors. -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - -http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package getter - -import ( - "bytes" - "crypto/tls" - "io" - "net/http" - "net/url" - "sync" - - "github.com/pkg/errors" - - "helm.sh/helm/v3/internal/tlsutil" - "helm.sh/helm/v3/internal/urlutil" - "helm.sh/helm/v3/internal/version" -) - -// HTTPGetter is the default HTTP(/S) backend handler -type HTTPGetter struct { - opts options - transport *http.Transport - once sync.Once -} - -// Get performs a Get from repo.Getter and returns the body. -func (g *HTTPGetter) Get(href string, options ...Option) (*bytes.Buffer, error) { - for _, opt := range options { - opt(&g.opts) - } - return g.get(href) -} - -func (g *HTTPGetter) get(href string) (*bytes.Buffer, error) { - // Set a helm specific user agent so that a repo server and metrics can - // separate helm calls from other tools interacting with repos. - req, err := http.NewRequest(http.MethodGet, href, nil) - if err != nil { - return nil, err - } - - req.Header.Set("User-Agent", version.GetUserAgent()) - if g.opts.userAgent != "" { - req.Header.Set("User-Agent", g.opts.userAgent) - } - - // Before setting the basic auth credentials, make sure the URL associated - // with the basic auth is the one being fetched. - u1, err := url.Parse(g.opts.url) - if err != nil { - return nil, errors.Wrap(err, "Unable to parse getter URL") - } - u2, err := url.Parse(href) - if err != nil { - return nil, errors.Wrap(err, "Unable to parse URL getting from") - } - - // Host on URL (returned from url.Parse) contains the port if present. - // This check ensures credentials are not passed between different - // services on different ports. - if g.opts.passCredentialsAll || (u1.Scheme == u2.Scheme && u1.Host == u2.Host) { - if g.opts.username != "" && g.opts.password != "" { - req.SetBasicAuth(g.opts.username, g.opts.password) - } - } - - client, err := g.httpClient() - if err != nil { - return nil, err - } - - resp, err := client.Do(req) - if err != nil { - return nil, err - } - defer resp.Body.Close() - if resp.StatusCode != http.StatusOK { - return nil, errors.Errorf("failed to fetch %s : %s", href, resp.Status) - } - - buf := bytes.NewBuffer(nil) - _, err = io.Copy(buf, resp.Body) - return buf, err -} - -// NewHTTPGetter constructs a valid http/https client as a Getter -func NewHTTPGetter(options ...Option) (Getter, error) { - var client HTTPGetter - - for _, opt := range options { - opt(&client.opts) - } - - return &client, nil -} - -func (g *HTTPGetter) httpClient() (*http.Client, error) { - if g.opts.transport != nil { - return &http.Client{ - Transport: g.opts.transport, - Timeout: g.opts.timeout, - }, nil - } - - g.once.Do(func() { - g.transport = &http.Transport{ - DisableCompression: true, - Proxy: http.ProxyFromEnvironment, - } - }) - - if (g.opts.certFile != "" && g.opts.keyFile != "") || g.opts.caFile != "" { - tlsConf, err := tlsutil.NewClientTLS(g.opts.certFile, g.opts.keyFile, g.opts.caFile) - if err != nil { - return nil, errors.Wrap(err, "can't create TLS config for client") - } - tlsConf.BuildNameToCertificate() - - sni, err := urlutil.ExtractHostname(g.opts.url) - if err != nil { - return nil, err - } - tlsConf.ServerName = sni - - g.transport.TLSClientConfig = tlsConf - } - - if g.opts.insecureSkipVerifyTLS { - if g.transport.TLSClientConfig == nil { - g.transport.TLSClientConfig = &tls.Config{ - InsecureSkipVerify: true, - } - } else { - g.transport.TLSClientConfig.InsecureSkipVerify = true - } - } - - client := &http.Client{ - Transport: g.transport, - Timeout: g.opts.timeout, - } - - return client, nil -} diff --git a/src/vendor/helm.sh/helm/v3/pkg/getter/ocigetter.go b/src/vendor/helm.sh/helm/v3/pkg/getter/ocigetter.go deleted file mode 100644 index 14f5cb3ec..000000000 --- a/src/vendor/helm.sh/helm/v3/pkg/getter/ocigetter.go +++ /dev/null @@ -1,84 +0,0 @@ -/* -Copyright The Helm Authors. -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - -http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package getter - -import ( - "bytes" - "fmt" - "strings" - - "helm.sh/helm/v3/pkg/registry" -) - -// OCIGetter is the default HTTP(/S) backend handler -type OCIGetter struct { - opts options -} - -// Get performs a Get from repo.Getter and returns the body. -func (g *OCIGetter) Get(href string, options ...Option) (*bytes.Buffer, error) { - for _, opt := range options { - opt(&g.opts) - } - return g.get(href) -} - -func (g *OCIGetter) get(href string) (*bytes.Buffer, error) { - client := g.opts.registryClient - - ref := strings.TrimPrefix(href, fmt.Sprintf("%s://", registry.OCIScheme)) - - var pullOpts []registry.PullOption - requestingProv := strings.HasSuffix(ref, ".prov") - if requestingProv { - ref = strings.TrimSuffix(ref, ".prov") - pullOpts = append(pullOpts, - registry.PullOptWithChart(false), - registry.PullOptWithProv(true)) - } - - result, err := client.Pull(ref, pullOpts...) - if err != nil { - return nil, err - } - - if requestingProv { - return bytes.NewBuffer(result.Prov.Data), nil - } - return bytes.NewBuffer(result.Chart.Data), nil -} - -// NewOCIGetter constructs a valid http/https client as a Getter -func NewOCIGetter(ops ...Option) (Getter, error) { - registryClient, err := registry.NewClient( - registry.ClientOptEnableCache(true), - ) - if err != nil { - return nil, err - } - - client := OCIGetter{ - opts: options{ - registryClient: registryClient, - }, - } - - for _, opt := range ops { - opt(&client.opts) - } - - return &client, nil -} diff --git a/src/vendor/helm.sh/helm/v3/pkg/getter/plugingetter.go b/src/vendor/helm.sh/helm/v3/pkg/getter/plugingetter.go deleted file mode 100644 index 0d13ade57..000000000 --- a/src/vendor/helm.sh/helm/v3/pkg/getter/plugingetter.go +++ /dev/null @@ -1,102 +0,0 @@ -/* -Copyright The Helm Authors. -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - -http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package getter - -import ( - "bytes" - "os" - "os/exec" - "path/filepath" - "strings" - - "github.com/pkg/errors" - - "helm.sh/helm/v3/pkg/cli" - "helm.sh/helm/v3/pkg/plugin" -) - -// collectPlugins scans for getter plugins. -// This will load plugins according to the cli. -func collectPlugins(settings *cli.EnvSettings) (Providers, error) { - plugins, err := plugin.FindPlugins(settings.PluginsDirectory) - if err != nil { - return nil, err - } - var result Providers - for _, plugin := range plugins { - for _, downloader := range plugin.Metadata.Downloaders { - result = append(result, Provider{ - Schemes: downloader.Protocols, - New: NewPluginGetter( - downloader.Command, - settings, - plugin.Metadata.Name, - plugin.Dir, - ), - }) - } - } - return result, nil -} - -// pluginGetter is a generic type to invoke custom downloaders, -// implemented in plugins. -type pluginGetter struct { - command string - settings *cli.EnvSettings - name string - base string - opts options -} - -// Get runs downloader plugin command -func (p *pluginGetter) Get(href string, options ...Option) (*bytes.Buffer, error) { - for _, opt := range options { - opt(&p.opts) - } - commands := strings.Split(p.command, " ") - argv := append(commands[1:], p.opts.certFile, p.opts.keyFile, p.opts.caFile, href) - prog := exec.Command(filepath.Join(p.base, commands[0]), argv...) - plugin.SetupPluginEnv(p.settings, p.name, p.base) - prog.Env = os.Environ() - buf := bytes.NewBuffer(nil) - prog.Stdout = buf - prog.Stderr = os.Stderr - if err := prog.Run(); err != nil { - if eerr, ok := err.(*exec.ExitError); ok { - os.Stderr.Write(eerr.Stderr) - return nil, errors.Errorf("plugin %q exited with error", p.command) - } - return nil, err - } - return buf, nil -} - -// NewPluginGetter constructs a valid plugin getter -func NewPluginGetter(command string, settings *cli.EnvSettings, name, base string) Constructor { - return func(options ...Option) (Getter, error) { - result := &pluginGetter{ - command: command, - settings: settings, - name: name, - base: base, - } - for _, opt := range options { - opt(&result.opts) - } - return result, nil - } -} diff --git a/src/vendor/helm.sh/helm/v3/pkg/helmpath/home.go b/src/vendor/helm.sh/helm/v3/pkg/helmpath/home.go deleted file mode 100644 index bd43e8890..000000000 --- a/src/vendor/helm.sh/helm/v3/pkg/helmpath/home.go +++ /dev/null @@ -1,44 +0,0 @@ -// Copyright The Helm Authors. -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at - -// http://www.apache.org/licenses/LICENSE-2.0 - -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package helmpath calculates filesystem paths to Helm's configuration, cache and data. -package helmpath - -// This helper builds paths to Helm's configuration, cache and data paths. -const lp = lazypath("helm") - -// ConfigPath returns the path where Helm stores configuration. -func ConfigPath(elem ...string) string { return lp.configPath(elem...) } - -// CachePath returns the path where Helm stores cached objects. -func CachePath(elem ...string) string { return lp.cachePath(elem...) } - -// DataPath returns the path where Helm stores data. -func DataPath(elem ...string) string { return lp.dataPath(elem...) } - -// CacheIndexFile returns the path to an index for the given named repository. -func CacheIndexFile(name string) string { - if name != "" { - name += "-" - } - return name + "index.yaml" -} - -// CacheChartsFile returns the path to a text file listing all the charts -// within the given named repository. -func CacheChartsFile(name string) string { - if name != "" { - name += "-" - } - return name + "charts.txt" -} diff --git a/src/vendor/helm.sh/helm/v3/pkg/helmpath/lazypath.go b/src/vendor/helm.sh/helm/v3/pkg/helmpath/lazypath.go deleted file mode 100644 index 22d7bf0a1..000000000 --- a/src/vendor/helm.sh/helm/v3/pkg/helmpath/lazypath.go +++ /dev/null @@ -1,72 +0,0 @@ -// Copyright The Helm Authors. -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at - -// http://www.apache.org/licenses/LICENSE-2.0 - -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package helmpath - -import ( - "os" - "path/filepath" - - "helm.sh/helm/v3/pkg/helmpath/xdg" -) - -const ( - // CacheHomeEnvVar is the environment variable used by Helm - // for the cache directory. When no value is set a default is used. - CacheHomeEnvVar = "HELM_CACHE_HOME" - - // ConfigHomeEnvVar is the environment variable used by Helm - // for the config directory. When no value is set a default is used. - ConfigHomeEnvVar = "HELM_CONFIG_HOME" - - // DataHomeEnvVar is the environment variable used by Helm - // for the data directory. When no value is set a default is used. - DataHomeEnvVar = "HELM_DATA_HOME" -) - -// lazypath is an lazy-loaded path buffer for the XDG base directory specification. -type lazypath string - -func (l lazypath) path(helmEnvVar, xdgEnvVar string, defaultFn func() string, elem ...string) string { - - // There is an order to checking for a path. - // 1. See if a Helm specific environment variable has been set. - // 2. Check if an XDG environment variable is set - // 3. Fall back to a default - base := os.Getenv(helmEnvVar) - if base != "" { - return filepath.Join(base, filepath.Join(elem...)) - } - base = os.Getenv(xdgEnvVar) - if base == "" { - base = defaultFn() - } - return filepath.Join(base, string(l), filepath.Join(elem...)) -} - -// cachePath defines the base directory relative to which user specific non-essential data files -// should be stored. -func (l lazypath) cachePath(elem ...string) string { - return l.path(CacheHomeEnvVar, xdg.CacheHomeEnvVar, cacheHome, filepath.Join(elem...)) -} - -// configPath defines the base directory relative to which user specific configuration files should -// be stored. -func (l lazypath) configPath(elem ...string) string { - return l.path(ConfigHomeEnvVar, xdg.ConfigHomeEnvVar, configHome, filepath.Join(elem...)) -} - -// dataPath defines the base directory relative to which user specific data files should be stored. -func (l lazypath) dataPath(elem ...string) string { - return l.path(DataHomeEnvVar, xdg.DataHomeEnvVar, dataHome, filepath.Join(elem...)) -} diff --git a/src/vendor/helm.sh/helm/v3/pkg/helmpath/lazypath_darwin.go b/src/vendor/helm.sh/helm/v3/pkg/helmpath/lazypath_darwin.go deleted file mode 100644 index eba6dde15..000000000 --- a/src/vendor/helm.sh/helm/v3/pkg/helmpath/lazypath_darwin.go +++ /dev/null @@ -1,34 +0,0 @@ -// Copyright The Helm Authors. -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at - -// http://www.apache.org/licenses/LICENSE-2.0 - -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//go:build darwin - -package helmpath - -import ( - "path/filepath" - - "k8s.io/client-go/util/homedir" -) - -func dataHome() string { - return filepath.Join(homedir.HomeDir(), "Library") -} - -func configHome() string { - return filepath.Join(homedir.HomeDir(), "Library", "Preferences") -} - -func cacheHome() string { - return filepath.Join(homedir.HomeDir(), "Library", "Caches") -} diff --git a/src/vendor/helm.sh/helm/v3/pkg/helmpath/lazypath_unix.go b/src/vendor/helm.sh/helm/v3/pkg/helmpath/lazypath_unix.go deleted file mode 100644 index 82fb4b6f1..000000000 --- a/src/vendor/helm.sh/helm/v3/pkg/helmpath/lazypath_unix.go +++ /dev/null @@ -1,45 +0,0 @@ -// Copyright The Helm Authors. -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at - -// http://www.apache.org/licenses/LICENSE-2.0 - -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//go:build !windows && !darwin - -package helmpath - -import ( - "path/filepath" - - "k8s.io/client-go/util/homedir" -) - -// dataHome defines the base directory relative to which user specific data files should be stored. -// -// If $XDG_DATA_HOME is either not set or empty, a default equal to $HOME/.local/share is used. -func dataHome() string { - return filepath.Join(homedir.HomeDir(), ".local", "share") -} - -// configHome defines the base directory relative to which user specific configuration files should -// be stored. -// -// If $XDG_CONFIG_HOME is either not set or empty, a default equal to $HOME/.config is used. -func configHome() string { - return filepath.Join(homedir.HomeDir(), ".config") -} - -// cacheHome defines the base directory relative to which user specific non-essential data files -// should be stored. -// -// If $XDG_CACHE_HOME is either not set or empty, a default equal to $HOME/.cache is used. -func cacheHome() string { - return filepath.Join(homedir.HomeDir(), ".cache") -} diff --git a/src/vendor/helm.sh/helm/v3/pkg/helmpath/lazypath_windows.go b/src/vendor/helm.sh/helm/v3/pkg/helmpath/lazypath_windows.go deleted file mode 100644 index 230aee2a9..000000000 --- a/src/vendor/helm.sh/helm/v3/pkg/helmpath/lazypath_windows.go +++ /dev/null @@ -1,24 +0,0 @@ -// Copyright The Helm Authors. -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at - -// http://www.apache.org/licenses/LICENSE-2.0 - -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//go:build windows - -package helmpath - -import "os" - -func dataHome() string { return configHome() } - -func configHome() string { return os.Getenv("APPDATA") } - -func cacheHome() string { return os.Getenv("TEMP") } diff --git a/src/vendor/helm.sh/helm/v3/pkg/helmpath/xdg/xdg.go b/src/vendor/helm.sh/helm/v3/pkg/helmpath/xdg/xdg.go deleted file mode 100644 index eaa3e6864..000000000 --- a/src/vendor/helm.sh/helm/v3/pkg/helmpath/xdg/xdg.go +++ /dev/null @@ -1,34 +0,0 @@ -/* -Copyright The Helm Authors. -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - -http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package xdg holds constants pertaining to XDG Base Directory Specification. -// -// The XDG Base Directory Specification https://specifications.freedesktop.org/basedir-spec/basedir-spec-latest.html -// specifies the environment variables that define user-specific base directories for various categories of files. -package xdg - -const ( - // CacheHomeEnvVar is the environment variable used by the - // XDG base directory specification for the cache directory. - CacheHomeEnvVar = "XDG_CACHE_HOME" - - // ConfigHomeEnvVar is the environment variable used by the - // XDG base directory specification for the config directory. - ConfigHomeEnvVar = "XDG_CONFIG_HOME" - - // DataHomeEnvVar is the environment variable used by the - // XDG base directory specification for the data directory. - DataHomeEnvVar = "XDG_DATA_HOME" -) diff --git a/src/vendor/helm.sh/helm/v3/pkg/plugin/hooks.go b/src/vendor/helm.sh/helm/v3/pkg/plugin/hooks.go deleted file mode 100644 index e3481515f..000000000 --- a/src/vendor/helm.sh/helm/v3/pkg/plugin/hooks.go +++ /dev/null @@ -1,29 +0,0 @@ -/* -Copyright The Helm Authors. -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - -http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package plugin // import "helm.sh/helm/v3/pkg/plugin" - -// Types of hooks -const ( - // Install is executed after the plugin is added. - Install = "install" - // Delete is executed after the plugin is removed. - Delete = "delete" - // Update is executed after the plugin is updated. - Update = "update" -) - -// Hooks is a map of events to commands. -type Hooks map[string]string diff --git a/src/vendor/helm.sh/helm/v3/pkg/plugin/plugin.go b/src/vendor/helm.sh/helm/v3/pkg/plugin/plugin.go deleted file mode 100644 index 1399b7116..000000000 --- a/src/vendor/helm.sh/helm/v3/pkg/plugin/plugin.go +++ /dev/null @@ -1,282 +0,0 @@ -/* -Copyright The Helm Authors. -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - -http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package plugin // import "helm.sh/helm/v3/pkg/plugin" - -import ( - "fmt" - "io/ioutil" - "os" - "path/filepath" - "regexp" - "runtime" - "strings" - "unicode" - - "github.com/pkg/errors" - "sigs.k8s.io/yaml" - - "helm.sh/helm/v3/pkg/cli" -) - -const PluginFileName = "plugin.yaml" - -// Downloaders represents the plugins capability if it can retrieve -// charts from special sources -type Downloaders struct { - // Protocols are the list of schemes from the charts URL. - Protocols []string `json:"protocols"` - // Command is the executable path with which the plugin performs - // the actual download for the corresponding Protocols - Command string `json:"command"` -} - -// PlatformCommand represents a command for a particular operating system and architecture -type PlatformCommand struct { - OperatingSystem string `json:"os"` - Architecture string `json:"arch"` - Command string `json:"command"` -} - -// Metadata describes a plugin. -// -// This is the plugin equivalent of a chart.Metadata. -type Metadata struct { - // Name is the name of the plugin - Name string `json:"name"` - - // Version is a SemVer 2 version of the plugin. - Version string `json:"version"` - - // Usage is the single-line usage text shown in help - Usage string `json:"usage"` - - // Description is a long description shown in places like `helm help` - Description string `json:"description"` - - // Command is the command, as a single string. - // - // The command will be passed through environment expansion, so env vars can - // be present in this command. Unless IgnoreFlags is set, this will - // also merge the flags passed from Helm. - // - // Note that command is not executed in a shell. To do so, we suggest - // pointing the command to a shell script. - // - // The following rules will apply to processing commands: - // - If platformCommand is present, it will be searched first - // - If both OS and Arch match the current platform, search will stop and the command will be executed - // - If OS matches and there is no more specific match, the command will be executed - // - If no OS/Arch match is found, the default command will be executed - // - If no command is present and no matches are found in platformCommand, Helm will exit with an error - PlatformCommand []PlatformCommand `json:"platformCommand"` - Command string `json:"command"` - - // IgnoreFlags ignores any flags passed in from Helm - // - // For example, if the plugin is invoked as `helm --debug myplugin`, if this - // is false, `--debug` will be appended to `--command`. If this is true, - // the `--debug` flag will be discarded. - IgnoreFlags bool `json:"ignoreFlags"` - - // Hooks are commands that will run on events. - Hooks Hooks - - // Downloaders field is used if the plugin supply downloader mechanism - // for special protocols. - Downloaders []Downloaders `json:"downloaders"` - - // UseTunnelDeprecated indicates that this command needs a tunnel. - // Setting this will cause a number of side effects, such as the - // automatic setting of HELM_HOST. - // DEPRECATED and unused, but retained for backwards compatibility with Helm 2 plugins. Remove in Helm 4 - UseTunnelDeprecated bool `json:"useTunnel,omitempty"` -} - -// Plugin represents a plugin. -type Plugin struct { - // Metadata is a parsed representation of a plugin.yaml - Metadata *Metadata - // Dir is the string path to the directory that holds the plugin. - Dir string -} - -// The following rules will apply to processing the Plugin.PlatformCommand.Command: -// - If both OS and Arch match the current platform, search will stop and the command will be prepared for execution -// - If OS matches and there is no more specific match, the command will be prepared for execution -// - If no OS/Arch match is found, return nil -func getPlatformCommand(cmds []PlatformCommand) []string { - var command []string - eq := strings.EqualFold - for _, c := range cmds { - if eq(c.OperatingSystem, runtime.GOOS) { - command = strings.Split(os.ExpandEnv(c.Command), " ") - } - if eq(c.OperatingSystem, runtime.GOOS) && eq(c.Architecture, runtime.GOARCH) { - return strings.Split(os.ExpandEnv(c.Command), " ") - } - } - return command -} - -// PrepareCommand takes a Plugin.PlatformCommand.Command, a Plugin.Command and will applying the following processing: -// - If platformCommand is present, it will be searched first -// - If both OS and Arch match the current platform, search will stop and the command will be prepared for execution -// - If OS matches and there is no more specific match, the command will be prepared for execution -// - If no OS/Arch match is found, the default command will be prepared for execution -// - If no command is present and no matches are found in platformCommand, will exit with an error -// -// It merges extraArgs into any arguments supplied in the plugin. It -// returns the name of the command and an args array. -// -// The result is suitable to pass to exec.Command. -func (p *Plugin) PrepareCommand(extraArgs []string) (string, []string, error) { - var parts []string - platCmdLen := len(p.Metadata.PlatformCommand) - if platCmdLen > 0 { - parts = getPlatformCommand(p.Metadata.PlatformCommand) - } - if platCmdLen == 0 || parts == nil { - parts = strings.Split(os.ExpandEnv(p.Metadata.Command), " ") - } - if len(parts) == 0 || parts[0] == "" { - return "", nil, fmt.Errorf("no plugin command is applicable") - } - - main := parts[0] - baseArgs := []string{} - if len(parts) > 1 { - baseArgs = parts[1:] - } - if !p.Metadata.IgnoreFlags { - baseArgs = append(baseArgs, extraArgs...) - } - return main, baseArgs, nil -} - -// validPluginName is a regular expression that validates plugin names. -// -// Plugin names can only contain the ASCII characters a-z, A-Z, 0-9, ​_​ and ​-. -var validPluginName = regexp.MustCompile("^[A-Za-z0-9_-]+$") - -// validatePluginData validates a plugin's YAML data. -func validatePluginData(plug *Plugin, filepath string) error { - if !validPluginName.MatchString(plug.Metadata.Name) { - return fmt.Errorf("invalid plugin name at %q", filepath) - } - plug.Metadata.Usage = sanitizeString(plug.Metadata.Usage) - - // We could also validate SemVer, executable, and other fields should we so choose. - return nil -} - -// sanitizeString normalize spaces and removes non-printable characters. -func sanitizeString(str string) string { - return strings.Map(func(r rune) rune { - if unicode.IsSpace(r) { - return ' ' - } - if unicode.IsPrint(r) { - return r - } - return -1 - }, str) -} - -func detectDuplicates(plugs []*Plugin) error { - names := map[string]string{} - - for _, plug := range plugs { - if oldpath, ok := names[plug.Metadata.Name]; ok { - return fmt.Errorf( - "two plugins claim the name %q at %q and %q", - plug.Metadata.Name, - oldpath, - plug.Dir, - ) - } - names[plug.Metadata.Name] = plug.Dir - } - - return nil -} - -// LoadDir loads a plugin from the given directory. -func LoadDir(dirname string) (*Plugin, error) { - pluginfile := filepath.Join(dirname, PluginFileName) - data, err := ioutil.ReadFile(pluginfile) - if err != nil { - return nil, errors.Wrapf(err, "failed to read plugin at %q", pluginfile) - } - - plug := &Plugin{Dir: dirname} - if err := yaml.UnmarshalStrict(data, &plug.Metadata); err != nil { - return nil, errors.Wrapf(err, "failed to load plugin at %q", pluginfile) - } - return plug, validatePluginData(plug, pluginfile) -} - -// LoadAll loads all plugins found beneath the base directory. -// -// This scans only one directory level. -func LoadAll(basedir string) ([]*Plugin, error) { - plugins := []*Plugin{} - // We want basedir/*/plugin.yaml - scanpath := filepath.Join(basedir, "*", PluginFileName) - matches, err := filepath.Glob(scanpath) - if err != nil { - return plugins, errors.Wrapf(err, "failed to find plugins in %q", scanpath) - } - - if matches == nil { - return plugins, nil - } - - for _, yaml := range matches { - dir := filepath.Dir(yaml) - p, err := LoadDir(dir) - if err != nil { - return plugins, err - } - plugins = append(plugins, p) - } - return plugins, detectDuplicates(plugins) -} - -// FindPlugins returns a list of YAML files that describe plugins. -func FindPlugins(plugdirs string) ([]*Plugin, error) { - found := []*Plugin{} - // Let's get all UNIXy and allow path separators - for _, p := range filepath.SplitList(plugdirs) { - matches, err := LoadAll(p) - if err != nil { - return matches, err - } - found = append(found, matches...) - } - return found, nil -} - -// SetupPluginEnv prepares os.Env for plugins. It operates on os.Env because -// the plugin subsystem itself needs access to the environment variables -// created here. -func SetupPluginEnv(settings *cli.EnvSettings, name, base string) { - env := settings.EnvVars() - env["HELM_PLUGIN_NAME"] = name - env["HELM_PLUGIN_DIR"] = base - for key, val := range env { - os.Setenv(key, val) - } -} diff --git a/src/vendor/helm.sh/helm/v3/pkg/provenance/doc.go b/src/vendor/helm.sh/helm/v3/pkg/provenance/doc.go deleted file mode 100644 index 3d2d0ea97..000000000 --- a/src/vendor/helm.sh/helm/v3/pkg/provenance/doc.go +++ /dev/null @@ -1,37 +0,0 @@ -/* -Copyright The Helm Authors. -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - -http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -/*Package provenance provides tools for establishing the authenticity of a chart. - -In Helm, provenance is established via several factors. The primary factor is the -cryptographic signature of a chart. Chart authors may sign charts, which in turn -provide the necessary metadata to ensure the integrity of the chart file, the -Chart.yaml, and the referenced Docker images. - -A provenance file is clear-signed. This provides cryptographic verification that -a particular block of information (Chart.yaml, archive file, images) have not -been tampered with or altered. To learn more, read the GnuPG documentation on -clear signatures: -https://www.gnupg.org/gph/en/manual/x135.html - -The cryptography used by Helm should be compatible with OpenGPG. For example, -you should be able to verify a signature by importing the desired public key -and using `gpg --verify`, `keybase pgp verify`, or similar: - - $ gpg --verify some.sig - gpg: Signature made Mon Jul 25 17:23:44 2016 MDT using RSA key ID 1FC18762 - gpg: Good signature from "Helm Testing (This key should only be used for testing. DO NOT TRUST.) " [ultimate] -*/ -package provenance // import "helm.sh/helm/v3/pkg/provenance" diff --git a/src/vendor/helm.sh/helm/v3/pkg/provenance/sign.go b/src/vendor/helm.sh/helm/v3/pkg/provenance/sign.go deleted file mode 100644 index c41f90c61..000000000 --- a/src/vendor/helm.sh/helm/v3/pkg/provenance/sign.go +++ /dev/null @@ -1,424 +0,0 @@ -/* -Copyright The Helm Authors. -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - -http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package provenance - -import ( - "bytes" - "crypto" - "encoding/hex" - "io" - "io/ioutil" - "os" - "path/filepath" - "strings" - - "github.com/pkg/errors" - "golang.org/x/crypto/openpgp" //nolint - "golang.org/x/crypto/openpgp/clearsign" //nolint - "golang.org/x/crypto/openpgp/packet" //nolint - "sigs.k8s.io/yaml" - - hapi "helm.sh/helm/v3/pkg/chart" - "helm.sh/helm/v3/pkg/chart/loader" -) - -var defaultPGPConfig = packet.Config{ - DefaultHash: crypto.SHA512, -} - -// SumCollection represents a collection of file and image checksums. -// -// Files are of the form: -// FILENAME: "sha256:SUM" -// Images are of the form: -// "IMAGE:TAG": "sha256:SUM" -// Docker optionally supports sha512, and if this is the case, the hash marker -// will be 'sha512' instead of 'sha256'. -type SumCollection struct { - Files map[string]string `json:"files"` - Images map[string]string `json:"images,omitempty"` -} - -// Verification contains information about a verification operation. -type Verification struct { - // SignedBy contains the entity that signed a chart. - SignedBy *openpgp.Entity - // FileHash is the hash, prepended with the scheme, for the file that was verified. - FileHash string - // FileName is the name of the file that FileHash verifies. - FileName string -} - -// Signatory signs things. -// -// Signatories can be constructed from a PGP private key file using NewFromFiles -// or they can be constructed manually by setting the Entity to a valid -// PGP entity. -// -// The same Signatory can be used to sign or validate multiple charts. -type Signatory struct { - // The signatory for this instance of Helm. This is used for signing. - Entity *openpgp.Entity - // The keyring for this instance of Helm. This is used for verification. - KeyRing openpgp.EntityList -} - -// NewFromFiles constructs a new Signatory from the PGP key in the given filename. -// -// This will emit an error if it cannot find a valid GPG keyfile (entity) at the -// given location. -// -// Note that the keyfile may have just a public key, just a private key, or -// both. The Signatory methods may have different requirements of the keys. For -// example, ClearSign must have a valid `openpgp.Entity.PrivateKey` before it -// can sign something. -func NewFromFiles(keyfile, keyringfile string) (*Signatory, error) { - e, err := loadKey(keyfile) - if err != nil { - return nil, err - } - - ring, err := loadKeyRing(keyringfile) - if err != nil { - return nil, err - } - - return &Signatory{ - Entity: e, - KeyRing: ring, - }, nil -} - -// NewFromKeyring reads a keyring file and creates a Signatory. -// -// If id is not the empty string, this will also try to find an Entity in the -// keyring whose name matches, and set that as the signing entity. It will return -// an error if the id is not empty and also not found. -func NewFromKeyring(keyringfile, id string) (*Signatory, error) { - ring, err := loadKeyRing(keyringfile) - if err != nil { - return nil, err - } - - s := &Signatory{KeyRing: ring} - - // If the ID is empty, we can return now. - if id == "" { - return s, nil - } - - // We're gonna go all GnuPG on this and look for a string that _contains_. If - // two or more keys contain the string and none are a direct match, we error - // out. - var candidate *openpgp.Entity - vague := false - for _, e := range ring { - for n := range e.Identities { - if n == id { - s.Entity = e - return s, nil - } - if strings.Contains(n, id) { - if candidate != nil { - vague = true - } - candidate = e - } - } - } - if vague { - return s, errors.Errorf("more than one key contain the id %q", id) - } - - s.Entity = candidate - return s, nil -} - -// PassphraseFetcher returns a passphrase for decrypting keys. -// -// This is used as a callback to read a passphrase from some other location. The -// given name is the Name field on the key, typically of the form: -// -// USER_NAME (COMMENT) -type PassphraseFetcher func(name string) ([]byte, error) - -// DecryptKey decrypts a private key in the Signatory. -// -// If the key is not encrypted, this will return without error. -// -// If the key does not exist, this will return an error. -// -// If the key exists, but cannot be unlocked with the passphrase returned by -// the PassphraseFetcher, this will return an error. -// -// If the key is successfully unlocked, it will return nil. -func (s *Signatory) DecryptKey(fn PassphraseFetcher) error { - if s.Entity == nil { - return errors.New("private key not found") - } else if s.Entity.PrivateKey == nil { - return errors.New("provided key is not a private key. Try providing a keyring with secret keys") - } - - // Nothing else to do if key is not encrypted. - if !s.Entity.PrivateKey.Encrypted { - return nil - } - - fname := "Unknown" - for i := range s.Entity.Identities { - if i != "" { - fname = i - break - } - } - - p, err := fn(fname) - if err != nil { - return err - } - - return s.Entity.PrivateKey.Decrypt(p) -} - -// ClearSign signs a chart with the given key. -// -// This takes the path to a chart archive file and a key, and it returns a clear signature. -// -// The Signatory must have a valid Entity.PrivateKey for this to work. If it does -// not, an error will be returned. -func (s *Signatory) ClearSign(chartpath string) (string, error) { - if s.Entity == nil { - return "", errors.New("private key not found") - } else if s.Entity.PrivateKey == nil { - return "", errors.New("provided key is not a private key. Try providing a keyring with secret keys") - } - - if fi, err := os.Stat(chartpath); err != nil { - return "", err - } else if fi.IsDir() { - return "", errors.New("cannot sign a directory") - } - - out := bytes.NewBuffer(nil) - - b, err := messageBlock(chartpath) - if err != nil { - return "", err - } - - // Sign the buffer - w, err := clearsign.Encode(out, s.Entity.PrivateKey, &defaultPGPConfig) - if err != nil { - return "", err - } - - _, err = io.Copy(w, b) - - if err != nil { - // NB: We intentionally don't call `w.Close()` here! `w.Close()` is the method which - // actually does the PGP signing, and therefore is the part which uses the private key. - // In other words, if we call Close here, there's a risk that there's an attempt to use the - // private key to sign garbage data (since we know that io.Copy failed, `w` won't contain - // anything useful). - return "", errors.Wrap(err, "failed to write to clearsign encoder") - } - - err = w.Close() - if err != nil { - return "", errors.Wrap(err, "failed to either sign or armor message block") - } - - return out.String(), nil -} - -// Verify checks a signature and verifies that it is legit for a chart. -func (s *Signatory) Verify(chartpath, sigpath string) (*Verification, error) { - ver := &Verification{} - for _, fname := range []string{chartpath, sigpath} { - if fi, err := os.Stat(fname); err != nil { - return ver, err - } else if fi.IsDir() { - return ver, errors.Errorf("%s cannot be a directory", fname) - } - } - - // First verify the signature - sig, err := s.decodeSignature(sigpath) - if err != nil { - return ver, errors.Wrap(err, "failed to decode signature") - } - - by, err := s.verifySignature(sig) - if err != nil { - return ver, err - } - ver.SignedBy = by - - // Second, verify the hash of the tarball. - sum, err := DigestFile(chartpath) - if err != nil { - return ver, err - } - _, sums, err := parseMessageBlock(sig.Plaintext) - if err != nil { - return ver, err - } - - sum = "sha256:" + sum - basename := filepath.Base(chartpath) - if sha, ok := sums.Files[basename]; !ok { - return ver, errors.Errorf("provenance does not contain a SHA for a file named %q", basename) - } else if sha != sum { - return ver, errors.Errorf("sha256 sum does not match for %s: %q != %q", basename, sha, sum) - } - ver.FileHash = sum - ver.FileName = basename - - // TODO: when image signing is added, verify that here. - - return ver, nil -} - -func (s *Signatory) decodeSignature(filename string) (*clearsign.Block, error) { - data, err := ioutil.ReadFile(filename) - if err != nil { - return nil, err - } - - block, _ := clearsign.Decode(data) - if block == nil { - // There was no sig in the file. - return nil, errors.New("signature block not found") - } - - return block, nil -} - -// verifySignature verifies that the given block is validly signed, and returns the signer. -func (s *Signatory) verifySignature(block *clearsign.Block) (*openpgp.Entity, error) { - return openpgp.CheckDetachedSignature( - s.KeyRing, - bytes.NewBuffer(block.Bytes), - block.ArmoredSignature.Body, - ) -} - -func messageBlock(chartpath string) (*bytes.Buffer, error) { - var b *bytes.Buffer - // Checksum the archive - chash, err := DigestFile(chartpath) - if err != nil { - return b, err - } - - base := filepath.Base(chartpath) - sums := &SumCollection{ - Files: map[string]string{ - base: "sha256:" + chash, - }, - } - - // Load the archive into memory. - chart, err := loader.LoadFile(chartpath) - if err != nil { - return b, err - } - - // Buffer a hash + checksums YAML file - data, err := yaml.Marshal(chart.Metadata) - if err != nil { - return b, err - } - - // FIXME: YAML uses ---\n as a file start indicator, but this is not legal in a PGP - // clearsign block. So we use ...\n, which is the YAML document end marker. - // http://yaml.org/spec/1.2/spec.html#id2800168 - b = bytes.NewBuffer(data) - b.WriteString("\n...\n") - - data, err = yaml.Marshal(sums) - if err != nil { - return b, err - } - b.Write(data) - - return b, nil -} - -// parseMessageBlock -func parseMessageBlock(data []byte) (*hapi.Metadata, *SumCollection, error) { - // This sucks. - parts := bytes.Split(data, []byte("\n...\n")) - if len(parts) < 2 { - return nil, nil, errors.New("message block must have at least two parts") - } - - md := &hapi.Metadata{} - sc := &SumCollection{} - - if err := yaml.Unmarshal(parts[0], md); err != nil { - return md, sc, err - } - err := yaml.Unmarshal(parts[1], sc) - return md, sc, err -} - -// loadKey loads a GPG key found at a particular path. -func loadKey(keypath string) (*openpgp.Entity, error) { - f, err := os.Open(keypath) - if err != nil { - return nil, err - } - defer f.Close() - - pr := packet.NewReader(f) - return openpgp.ReadEntity(pr) -} - -func loadKeyRing(ringpath string) (openpgp.EntityList, error) { - f, err := os.Open(ringpath) - if err != nil { - return nil, err - } - defer f.Close() - return openpgp.ReadKeyRing(f) -} - -// DigestFile calculates a SHA256 hash (like Docker) for a given file. -// -// It takes the path to the archive file, and returns a string representation of -// the SHA256 sum. -// -// The intended use of this function is to generate a sum of a chart TGZ file. -func DigestFile(filename string) (string, error) { - f, err := os.Open(filename) - if err != nil { - return "", err - } - defer f.Close() - return Digest(f) -} - -// Digest hashes a reader and returns a SHA256 digest. -// -// Helm uses SHA256 as its default hash for all non-cryptographic applications. -func Digest(in io.Reader) (string, error) { - hash := crypto.SHA256.New() - if _, err := io.Copy(hash, in); err != nil { - return "", nil - } - return hex.EncodeToString(hash.Sum(nil)), nil -} diff --git a/src/vendor/helm.sh/helm/v3/pkg/registry/client.go b/src/vendor/helm.sh/helm/v3/pkg/registry/client.go deleted file mode 100644 index c1004f956..000000000 --- a/src/vendor/helm.sh/helm/v3/pkg/registry/client.go +++ /dev/null @@ -1,643 +0,0 @@ -/* -Copyright The Helm Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package registry // import "helm.sh/helm/v3/pkg/registry" - -import ( - "context" - "encoding/json" - "fmt" - "io" - "io/ioutil" - "net/http" - "sort" - "strings" - - "github.com/Masterminds/semver/v3" - "github.com/containerd/containerd/remotes" - ocispec "github.com/opencontainers/image-spec/specs-go/v1" - "github.com/pkg/errors" - "oras.land/oras-go/pkg/auth" - dockerauth "oras.land/oras-go/pkg/auth/docker" - "oras.land/oras-go/pkg/content" - "oras.land/oras-go/pkg/oras" - "oras.land/oras-go/pkg/registry" - registryremote "oras.land/oras-go/pkg/registry/remote" - registryauth "oras.land/oras-go/pkg/registry/remote/auth" - - "helm.sh/helm/v3/internal/version" - "helm.sh/helm/v3/pkg/chart" - "helm.sh/helm/v3/pkg/helmpath" -) - -// See https://github.com/helm/helm/issues/10166 -const registryUnderscoreMessage = ` -OCI artifact references (e.g. tags) do not support the plus sign (+). To support -storing semantic versions, Helm adopts the convention of changing plus (+) to -an underscore (_) in chart version tags when pushing to a registry and back to -a plus (+) when pulling from a registry.` - -type ( - // Client works with OCI-compliant registries - Client struct { - debug bool - enableCache bool - // path to repository config file e.g. ~/.docker/config.json - credentialsFile string - out io.Writer - authorizer auth.Client - registryAuthorizer *registryauth.Client - resolver remotes.Resolver - } - - // ClientOption allows specifying various settings configurable by the user for overriding the defaults - // used when creating a new default client - ClientOption func(*Client) -) - -// NewClient returns a new registry client with config -func NewClient(options ...ClientOption) (*Client, error) { - client := &Client{ - out: ioutil.Discard, - } - for _, option := range options { - option(client) - } - if client.credentialsFile == "" { - client.credentialsFile = helmpath.ConfigPath(CredentialsFileBasename) - } - if client.authorizer == nil { - authClient, err := dockerauth.NewClientWithDockerFallback(client.credentialsFile) - if err != nil { - return nil, err - } - client.authorizer = authClient - } - if client.resolver == nil { - headers := http.Header{} - headers.Set("User-Agent", version.GetUserAgent()) - opts := []auth.ResolverOption{auth.WithResolverHeaders(headers)} - resolver, err := client.authorizer.ResolverWithOpts(opts...) - if err != nil { - return nil, err - } - client.resolver = resolver - } - - // allocate a cache if option is set - var cache registryauth.Cache - if client.enableCache { - cache = registryauth.DefaultCache - } - if client.registryAuthorizer == nil { - client.registryAuthorizer = ®istryauth.Client{ - Header: http.Header{ - "User-Agent": {version.GetUserAgent()}, - }, - Cache: cache, - Credential: func(ctx context.Context, reg string) (registryauth.Credential, error) { - dockerClient, ok := client.authorizer.(*dockerauth.Client) - if !ok { - return registryauth.EmptyCredential, errors.New("unable to obtain docker client") - } - - username, password, err := dockerClient.Credential(reg) - if err != nil { - return registryauth.EmptyCredential, errors.New("unable to retrieve credentials") - } - - // A blank returned username and password value is a bearer token - if username == "" && password != "" { - return registryauth.Credential{ - RefreshToken: password, - }, nil - } - - return registryauth.Credential{ - Username: username, - Password: password, - }, nil - - }, - } - - } - return client, nil -} - -// ClientOptDebug returns a function that sets the debug setting on client options set -func ClientOptDebug(debug bool) ClientOption { - return func(client *Client) { - client.debug = debug - } -} - -// ClientOptEnableCache returns a function that sets the enableCache setting on a client options set -func ClientOptEnableCache(enableCache bool) ClientOption { - return func(client *Client) { - client.enableCache = enableCache - } -} - -// ClientOptWriter returns a function that sets the writer setting on client options set -func ClientOptWriter(out io.Writer) ClientOption { - return func(client *Client) { - client.out = out - } -} - -// ClientOptCredentialsFile returns a function that sets the credentialsFile setting on a client options set -func ClientOptCredentialsFile(credentialsFile string) ClientOption { - return func(client *Client) { - client.credentialsFile = credentialsFile - } -} - -type ( - // LoginOption allows specifying various settings on login - LoginOption func(*loginOperation) - - loginOperation struct { - username string - password string - insecure bool - } -) - -// Login logs into a registry -func (c *Client) Login(host string, options ...LoginOption) error { - operation := &loginOperation{} - for _, option := range options { - option(operation) - } - authorizerLoginOpts := []auth.LoginOption{ - auth.WithLoginContext(ctx(c.out, c.debug)), - auth.WithLoginHostname(host), - auth.WithLoginUsername(operation.username), - auth.WithLoginSecret(operation.password), - auth.WithLoginUserAgent(version.GetUserAgent()), - } - if operation.insecure { - authorizerLoginOpts = append(authorizerLoginOpts, auth.WithLoginInsecure()) - } - if err := c.authorizer.LoginWithOpts(authorizerLoginOpts...); err != nil { - return err - } - fmt.Fprintln(c.out, "Login Succeeded") - return nil -} - -// LoginOptBasicAuth returns a function that sets the username/password settings on login -func LoginOptBasicAuth(username string, password string) LoginOption { - return func(operation *loginOperation) { - operation.username = username - operation.password = password - } -} - -// LoginOptInsecure returns a function that sets the insecure setting on login -func LoginOptInsecure(insecure bool) LoginOption { - return func(operation *loginOperation) { - operation.insecure = insecure - } -} - -type ( - // LogoutOption allows specifying various settings on logout - LogoutOption func(*logoutOperation) - - logoutOperation struct{} -) - -// Logout logs out of a registry -func (c *Client) Logout(host string, opts ...LogoutOption) error { - operation := &logoutOperation{} - for _, opt := range opts { - opt(operation) - } - if err := c.authorizer.Logout(ctx(c.out, c.debug), host); err != nil { - return err - } - fmt.Fprintf(c.out, "Removing login credentials for %s\n", host) - return nil -} - -type ( - // PullOption allows specifying various settings on pull - PullOption func(*pullOperation) - - // PullResult is the result returned upon successful pull. - PullResult struct { - Manifest *descriptorPullSummary `json:"manifest"` - Config *descriptorPullSummary `json:"config"` - Chart *descriptorPullSummaryWithMeta `json:"chart"` - Prov *descriptorPullSummary `json:"prov"` - Ref string `json:"ref"` - } - - descriptorPullSummary struct { - Data []byte `json:"-"` - Digest string `json:"digest"` - Size int64 `json:"size"` - } - - descriptorPullSummaryWithMeta struct { - descriptorPullSummary - Meta *chart.Metadata `json:"meta"` - } - - pullOperation struct { - withChart bool - withProv bool - ignoreMissingProv bool - } -) - -// Pull downloads a chart from a registry -func (c *Client) Pull(ref string, options ...PullOption) (*PullResult, error) { - parsedRef, err := parseReference(ref) - if err != nil { - return nil, err - } - - operation := &pullOperation{ - withChart: true, // By default, always download the chart layer - } - for _, option := range options { - option(operation) - } - if !operation.withChart && !operation.withProv { - return nil, errors.New( - "must specify at least one layer to pull (chart/prov)") - } - memoryStore := content.NewMemory() - allowedMediaTypes := []string{ - ConfigMediaType, - } - minNumDescriptors := 1 // 1 for the config - if operation.withChart { - minNumDescriptors++ - allowedMediaTypes = append(allowedMediaTypes, ChartLayerMediaType, LegacyChartLayerMediaType) - } - if operation.withProv { - if !operation.ignoreMissingProv { - minNumDescriptors++ - } - allowedMediaTypes = append(allowedMediaTypes, ProvLayerMediaType) - } - - var descriptors, layers []ocispec.Descriptor - registryStore := content.Registry{Resolver: c.resolver} - - manifest, err := oras.Copy(ctx(c.out, c.debug), registryStore, parsedRef.String(), memoryStore, "", - oras.WithPullEmptyNameAllowed(), - oras.WithAllowedMediaTypes(allowedMediaTypes), - oras.WithLayerDescriptors(func(l []ocispec.Descriptor) { - layers = l - })) - if err != nil { - return nil, err - } - - descriptors = append(descriptors, manifest) - descriptors = append(descriptors, layers...) - - numDescriptors := len(descriptors) - if numDescriptors < minNumDescriptors { - return nil, fmt.Errorf("manifest does not contain minimum number of descriptors (%d), descriptors found: %d", - minNumDescriptors, numDescriptors) - } - var configDescriptor *ocispec.Descriptor - var chartDescriptor *ocispec.Descriptor - var provDescriptor *ocispec.Descriptor - for _, descriptor := range descriptors { - d := descriptor - switch d.MediaType { - case ConfigMediaType: - configDescriptor = &d - case ChartLayerMediaType: - chartDescriptor = &d - case ProvLayerMediaType: - provDescriptor = &d - case LegacyChartLayerMediaType: - chartDescriptor = &d - fmt.Fprintf(c.out, "Warning: chart media type %s is deprecated\n", LegacyChartLayerMediaType) - } - } - if configDescriptor == nil { - return nil, fmt.Errorf("could not load config with mediatype %s", ConfigMediaType) - } - if operation.withChart && chartDescriptor == nil { - return nil, fmt.Errorf("manifest does not contain a layer with mediatype %s", - ChartLayerMediaType) - } - var provMissing bool - if operation.withProv && provDescriptor == nil { - if operation.ignoreMissingProv { - provMissing = true - } else { - return nil, fmt.Errorf("manifest does not contain a layer with mediatype %s", - ProvLayerMediaType) - } - } - result := &PullResult{ - Manifest: &descriptorPullSummary{ - Digest: manifest.Digest.String(), - Size: manifest.Size, - }, - Config: &descriptorPullSummary{ - Digest: configDescriptor.Digest.String(), - Size: configDescriptor.Size, - }, - Chart: &descriptorPullSummaryWithMeta{}, - Prov: &descriptorPullSummary{}, - Ref: parsedRef.String(), - } - var getManifestErr error - if _, manifestData, ok := memoryStore.Get(manifest); !ok { - getManifestErr = errors.Errorf("Unable to retrieve blob with digest %s", manifest.Digest) - } else { - result.Manifest.Data = manifestData - } - if getManifestErr != nil { - return nil, getManifestErr - } - var getConfigDescriptorErr error - if _, configData, ok := memoryStore.Get(*configDescriptor); !ok { - getConfigDescriptorErr = errors.Errorf("Unable to retrieve blob with digest %s", configDescriptor.Digest) - } else { - result.Config.Data = configData - var meta *chart.Metadata - if err := json.Unmarshal(configData, &meta); err != nil { - return nil, err - } - result.Chart.Meta = meta - } - if getConfigDescriptorErr != nil { - return nil, getConfigDescriptorErr - } - if operation.withChart { - var getChartDescriptorErr error - if _, chartData, ok := memoryStore.Get(*chartDescriptor); !ok { - getChartDescriptorErr = errors.Errorf("Unable to retrieve blob with digest %s", chartDescriptor.Digest) - } else { - result.Chart.Data = chartData - result.Chart.Digest = chartDescriptor.Digest.String() - result.Chart.Size = chartDescriptor.Size - } - if getChartDescriptorErr != nil { - return nil, getChartDescriptorErr - } - } - if operation.withProv && !provMissing { - var getProvDescriptorErr error - if _, provData, ok := memoryStore.Get(*provDescriptor); !ok { - getProvDescriptorErr = errors.Errorf("Unable to retrieve blob with digest %s", provDescriptor.Digest) - } else { - result.Prov.Data = provData - result.Prov.Digest = provDescriptor.Digest.String() - result.Prov.Size = provDescriptor.Size - } - if getProvDescriptorErr != nil { - return nil, getProvDescriptorErr - } - } - - fmt.Fprintf(c.out, "Pulled: %s\n", result.Ref) - fmt.Fprintf(c.out, "Digest: %s\n", result.Manifest.Digest) - - if strings.Contains(result.Ref, "_") { - fmt.Fprintf(c.out, "%s contains an underscore.\n", result.Ref) - fmt.Fprint(c.out, registryUnderscoreMessage+"\n") - } - - return result, nil -} - -// PullOptWithChart returns a function that sets the withChart setting on pull -func PullOptWithChart(withChart bool) PullOption { - return func(operation *pullOperation) { - operation.withChart = withChart - } -} - -// PullOptWithProv returns a function that sets the withProv setting on pull -func PullOptWithProv(withProv bool) PullOption { - return func(operation *pullOperation) { - operation.withProv = withProv - } -} - -// PullOptIgnoreMissingProv returns a function that sets the ignoreMissingProv setting on pull -func PullOptIgnoreMissingProv(ignoreMissingProv bool) PullOption { - return func(operation *pullOperation) { - operation.ignoreMissingProv = ignoreMissingProv - } -} - -type ( - // PushOption allows specifying various settings on push - PushOption func(*pushOperation) - - // PushResult is the result returned upon successful push. - PushResult struct { - Manifest *descriptorPushSummary `json:"manifest"` - Config *descriptorPushSummary `json:"config"` - Chart *descriptorPushSummaryWithMeta `json:"chart"` - Prov *descriptorPushSummary `json:"prov"` - Ref string `json:"ref"` - } - - descriptorPushSummary struct { - Digest string `json:"digest"` - Size int64 `json:"size"` - } - - descriptorPushSummaryWithMeta struct { - descriptorPushSummary - Meta *chart.Metadata `json:"meta"` - } - - pushOperation struct { - provData []byte - strictMode bool - } -) - -// Push uploads a chart to a registry. -func (c *Client) Push(data []byte, ref string, options ...PushOption) (*PushResult, error) { - parsedRef, err := parseReference(ref) - if err != nil { - return nil, err - } - - operation := &pushOperation{ - strictMode: true, // By default, enable strict mode - } - for _, option := range options { - option(operation) - } - meta, err := extractChartMeta(data) - if err != nil { - return nil, err - } - if operation.strictMode { - if !strings.HasSuffix(ref, fmt.Sprintf("/%s:%s", meta.Name, meta.Version)) { - return nil, errors.New( - "strict mode enabled, ref basename and tag must match the chart name and version") - } - } - memoryStore := content.NewMemory() - chartDescriptor, err := memoryStore.Add("", ChartLayerMediaType, data) - if err != nil { - return nil, err - } - - configData, err := json.Marshal(meta) - if err != nil { - return nil, err - } - - configDescriptor, err := memoryStore.Add("", ConfigMediaType, configData) - if err != nil { - return nil, err - } - - descriptors := []ocispec.Descriptor{chartDescriptor} - var provDescriptor ocispec.Descriptor - if operation.provData != nil { - provDescriptor, err = memoryStore.Add("", ProvLayerMediaType, operation.provData) - if err != nil { - return nil, err - } - - descriptors = append(descriptors, provDescriptor) - } - - manifestData, manifest, err := content.GenerateManifest(&configDescriptor, nil, descriptors...) - if err != nil { - return nil, err - } - - if err := memoryStore.StoreManifest(parsedRef.String(), manifest, manifestData); err != nil { - return nil, err - } - - registryStore := content.Registry{Resolver: c.resolver} - _, err = oras.Copy(ctx(c.out, c.debug), memoryStore, parsedRef.String(), registryStore, "", - oras.WithNameValidation(nil)) - if err != nil { - return nil, err - } - chartSummary := &descriptorPushSummaryWithMeta{ - Meta: meta, - } - chartSummary.Digest = chartDescriptor.Digest.String() - chartSummary.Size = chartDescriptor.Size - result := &PushResult{ - Manifest: &descriptorPushSummary{ - Digest: manifest.Digest.String(), - Size: manifest.Size, - }, - Config: &descriptorPushSummary{ - Digest: configDescriptor.Digest.String(), - Size: configDescriptor.Size, - }, - Chart: chartSummary, - Prov: &descriptorPushSummary{}, // prevent nil references - Ref: parsedRef.String(), - } - if operation.provData != nil { - result.Prov = &descriptorPushSummary{ - Digest: provDescriptor.Digest.String(), - Size: provDescriptor.Size, - } - } - fmt.Fprintf(c.out, "Pushed: %s\n", result.Ref) - fmt.Fprintf(c.out, "Digest: %s\n", result.Manifest.Digest) - if strings.Contains(parsedRef.Reference, "_") { - fmt.Fprintf(c.out, "%s contains an underscore.\n", result.Ref) - fmt.Fprint(c.out, registryUnderscoreMessage+"\n") - } - - return result, err -} - -// PushOptProvData returns a function that sets the prov bytes setting on push -func PushOptProvData(provData []byte) PushOption { - return func(operation *pushOperation) { - operation.provData = provData - } -} - -// PushOptStrictMode returns a function that sets the strictMode setting on push -func PushOptStrictMode(strictMode bool) PushOption { - return func(operation *pushOperation) { - operation.strictMode = strictMode - } -} - -// Tags provides a sorted list all semver compliant tags for a given repository -func (c *Client) Tags(ref string) ([]string, error) { - parsedReference, err := registry.ParseReference(ref) - if err != nil { - return nil, err - } - - repository := registryremote.Repository{ - Reference: parsedReference, - Client: c.registryAuthorizer, - } - - var registryTags []string - - for { - registryTags, err = registry.Tags(ctx(c.out, c.debug), &repository) - if err != nil { - // Fallback to http based request - if !repository.PlainHTTP && strings.Contains(err.Error(), "server gave HTTP response") { - repository.PlainHTTP = true - continue - } - return nil, err - } - - break - - } - - var tagVersions []*semver.Version - for _, tag := range registryTags { - // Change underscore (_) back to plus (+) for Helm - // See https://github.com/helm/helm/issues/10166 - tagVersion, err := semver.StrictNewVersion(strings.ReplaceAll(tag, "_", "+")) - if err == nil { - tagVersions = append(tagVersions, tagVersion) - } - } - - // Sort the collection - sort.Sort(sort.Reverse(semver.Collection(tagVersions))) - - tags := make([]string, len(tagVersions)) - - for iTv, tv := range tagVersions { - tags[iTv] = tv.String() - } - - return tags, nil - -} diff --git a/src/vendor/helm.sh/helm/v3/pkg/registry/constants.go b/src/vendor/helm.sh/helm/v3/pkg/registry/constants.go deleted file mode 100644 index 570b6f0d3..000000000 --- a/src/vendor/helm.sh/helm/v3/pkg/registry/constants.go +++ /dev/null @@ -1,37 +0,0 @@ -/* -Copyright The Helm Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package registry // import "helm.sh/helm/v3/pkg/registry" - -const ( - // OCIScheme is the URL scheme for OCI-based requests - OCIScheme = "oci" - - // CredentialsFileBasename is the filename for auth credentials file - CredentialsFileBasename = "registry/config.json" - - // ConfigMediaType is the reserved media type for the Helm chart manifest config - ConfigMediaType = "application/vnd.cncf.helm.config.v1+json" - - // ChartLayerMediaType is the reserved media type for Helm chart package content - ChartLayerMediaType = "application/vnd.cncf.helm.chart.content.v1.tar+gzip" - - // ProvLayerMediaType is the reserved media type for Helm chart provenance files - ProvLayerMediaType = "application/vnd.cncf.helm.chart.provenance.v1.prov" - - // LegacyChartLayerMediaType is the legacy reserved media type for Helm chart package content. - LegacyChartLayerMediaType = "application/tar+gzip" -) diff --git a/src/vendor/helm.sh/helm/v3/pkg/registry/util.go b/src/vendor/helm.sh/helm/v3/pkg/registry/util.go deleted file mode 100644 index 47eed267f..000000000 --- a/src/vendor/helm.sh/helm/v3/pkg/registry/util.go +++ /dev/null @@ -1,131 +0,0 @@ -/* -Copyright The Helm Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package registry // import "helm.sh/helm/v3/pkg/registry" - -import ( - "bytes" - "context" - "fmt" - "io" - "strings" - - "github.com/Masterminds/semver/v3" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" - orascontext "oras.land/oras-go/pkg/context" - "oras.land/oras-go/pkg/registry" - - "helm.sh/helm/v3/pkg/chart" - "helm.sh/helm/v3/pkg/chart/loader" -) - -// IsOCI determines whether or not a URL is to be treated as an OCI URL -func IsOCI(url string) bool { - return strings.HasPrefix(url, fmt.Sprintf("%s://", OCIScheme)) -} - -// ContainsTag determines whether a tag is found in a provided list of tags -func ContainsTag(tags []string, tag string) bool { - for _, t := range tags { - if tag == t { - return true - } - } - return false -} - -func GetTagMatchingVersionOrConstraint(tags []string, versionString string) (string, error) { - var constraint *semver.Constraints - if versionString == "" { - // If string is empty, set wildcard constraint - constraint, _ = semver.NewConstraint("*") - } else { - // when customer input exact version, check whether have exact match - // one first - for _, v := range tags { - if versionString == v { - return v, nil - } - } - - // Otherwise set constraint to the string given - var err error - constraint, err = semver.NewConstraint(versionString) - if err != nil { - return "", err - } - } - - // Otherwise try to find the first available version matching the string, - // in case it is a constraint - for _, v := range tags { - test, err := semver.NewVersion(v) - if err != nil { - continue - } - if constraint.Check(test) { - return v, nil - } - } - - return "", errors.Errorf("Could not locate a version matching provided version string %s", versionString) -} - -// extractChartMeta is used to extract a chart metadata from a byte array -func extractChartMeta(chartData []byte) (*chart.Metadata, error) { - ch, err := loader.LoadArchive(bytes.NewReader(chartData)) - if err != nil { - return nil, err - } - return ch.Metadata, nil -} - -// ctx retrieves a fresh context. -// disable verbose logging coming from ORAS (unless debug is enabled) -func ctx(out io.Writer, debug bool) context.Context { - if !debug { - return orascontext.Background() - } - ctx := orascontext.WithLoggerFromWriter(context.Background(), out) - orascontext.GetLogger(ctx).Logger.SetLevel(logrus.DebugLevel) - return ctx -} - -// parseReference will parse and validate the reference, and clean tags when -// applicable tags are only cleaned when plus (+) signs are present, and are -// converted to underscores (_) before pushing -// See https://github.com/helm/helm/issues/10166 -func parseReference(raw string) (registry.Reference, error) { - // The sole possible reference modification is replacing plus (+) signs - // present in tags with underscores (_). To do this properly, we first - // need to identify a tag, and then pass it on to the reference parser - // NOTE: Passing immediately to the reference parser will fail since (+) - // signs are an invalid tag character, and simply replacing all plus (+) - // occurrences could invalidate other portions of the URI - parts := strings.Split(raw, ":") - if len(parts) > 1 && !strings.Contains(parts[len(parts)-1], "/") { - tag := parts[len(parts)-1] - - if tag != "" { - // Replace any plus (+) signs with known underscore (_) conversion - newTag := strings.ReplaceAll(tag, "+", "_") - raw = strings.ReplaceAll(raw, tag, newTag) - } - } - - return registry.ParseReference(raw) -} diff --git a/src/vendor/helm.sh/helm/v3/pkg/repo/chartrepo.go b/src/vendor/helm.sh/helm/v3/pkg/repo/chartrepo.go deleted file mode 100644 index fce947e4c..000000000 --- a/src/vendor/helm.sh/helm/v3/pkg/repo/chartrepo.go +++ /dev/null @@ -1,313 +0,0 @@ -/* -Copyright The Helm Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package repo // import "helm.sh/helm/v3/pkg/repo" - -import ( - "crypto/rand" - "encoding/base64" - "encoding/json" - "fmt" - "io/ioutil" - "log" - "net/url" - "os" - "path" - "path/filepath" - "strings" - - "github.com/pkg/errors" - "sigs.k8s.io/yaml" - - "helm.sh/helm/v3/pkg/chart/loader" - "helm.sh/helm/v3/pkg/getter" - "helm.sh/helm/v3/pkg/helmpath" - "helm.sh/helm/v3/pkg/provenance" -) - -// Entry represents a collection of parameters for chart repository -type Entry struct { - Name string `json:"name"` - URL string `json:"url"` - Username string `json:"username"` - Password string `json:"password"` - CertFile string `json:"certFile"` - KeyFile string `json:"keyFile"` - CAFile string `json:"caFile"` - InsecureSkipTLSverify bool `json:"insecure_skip_tls_verify"` - PassCredentialsAll bool `json:"pass_credentials_all"` -} - -// ChartRepository represents a chart repository -type ChartRepository struct { - Config *Entry - ChartPaths []string - IndexFile *IndexFile - Client getter.Getter - CachePath string -} - -// NewChartRepository constructs ChartRepository -func NewChartRepository(cfg *Entry, getters getter.Providers) (*ChartRepository, error) { - u, err := url.Parse(cfg.URL) - if err != nil { - return nil, errors.Errorf("invalid chart URL format: %s", cfg.URL) - } - - client, err := getters.ByScheme(u.Scheme) - if err != nil { - return nil, errors.Errorf("could not find protocol handler for: %s", u.Scheme) - } - - return &ChartRepository{ - Config: cfg, - IndexFile: NewIndexFile(), - Client: client, - CachePath: helmpath.CachePath("repository"), - }, nil -} - -// Load loads a directory of charts as if it were a repository. -// -// It requires the presence of an index.yaml file in the directory. -// -// Deprecated: remove in Helm 4. -func (r *ChartRepository) Load() error { - dirInfo, err := os.Stat(r.Config.Name) - if err != nil { - return err - } - if !dirInfo.IsDir() { - return errors.Errorf("%q is not a directory", r.Config.Name) - } - - // FIXME: Why are we recursively walking directories? - // FIXME: Why are we not reading the repositories.yaml to figure out - // what repos to use? - filepath.Walk(r.Config.Name, func(path string, f os.FileInfo, err error) error { - if !f.IsDir() { - if strings.Contains(f.Name(), "-index.yaml") { - i, err := LoadIndexFile(path) - if err != nil { - return err - } - r.IndexFile = i - } else if strings.HasSuffix(f.Name(), ".tgz") { - r.ChartPaths = append(r.ChartPaths, path) - } - } - return nil - }) - return nil -} - -// DownloadIndexFile fetches the index from a repository. -func (r *ChartRepository) DownloadIndexFile() (string, error) { - parsedURL, err := url.Parse(r.Config.URL) - if err != nil { - return "", err - } - parsedURL.RawPath = path.Join(parsedURL.RawPath, "index.yaml") - parsedURL.Path = path.Join(parsedURL.Path, "index.yaml") - - indexURL := parsedURL.String() - // TODO add user-agent - resp, err := r.Client.Get(indexURL, - getter.WithURL(r.Config.URL), - getter.WithInsecureSkipVerifyTLS(r.Config.InsecureSkipTLSverify), - getter.WithTLSClientConfig(r.Config.CertFile, r.Config.KeyFile, r.Config.CAFile), - getter.WithBasicAuth(r.Config.Username, r.Config.Password), - getter.WithPassCredentialsAll(r.Config.PassCredentialsAll), - ) - if err != nil { - return "", err - } - - index, err := ioutil.ReadAll(resp) - if err != nil { - return "", err - } - - indexFile, err := loadIndex(index, r.Config.URL) - if err != nil { - return "", err - } - - // Create the chart list file in the cache directory - var charts strings.Builder - for name := range indexFile.Entries { - fmt.Fprintln(&charts, name) - } - chartsFile := filepath.Join(r.CachePath, helmpath.CacheChartsFile(r.Config.Name)) - os.MkdirAll(filepath.Dir(chartsFile), 0755) - ioutil.WriteFile(chartsFile, []byte(charts.String()), 0644) - - // Create the index file in the cache directory - fname := filepath.Join(r.CachePath, helmpath.CacheIndexFile(r.Config.Name)) - os.MkdirAll(filepath.Dir(fname), 0755) - return fname, ioutil.WriteFile(fname, index, 0644) -} - -// Index generates an index for the chart repository and writes an index.yaml file. -func (r *ChartRepository) Index() error { - err := r.generateIndex() - if err != nil { - return err - } - return r.saveIndexFile() -} - -func (r *ChartRepository) saveIndexFile() error { - index, err := yaml.Marshal(r.IndexFile) - if err != nil { - return err - } - return ioutil.WriteFile(filepath.Join(r.Config.Name, indexPath), index, 0644) -} - -func (r *ChartRepository) generateIndex() error { - for _, path := range r.ChartPaths { - ch, err := loader.Load(path) - if err != nil { - return err - } - - digest, err := provenance.DigestFile(path) - if err != nil { - return err - } - - if !r.IndexFile.Has(ch.Name(), ch.Metadata.Version) { - if err := r.IndexFile.MustAdd(ch.Metadata, path, r.Config.URL, digest); err != nil { - return errors.Wrapf(err, "failed adding to %s to index", path) - } - } - // TODO: If a chart exists, but has a different Digest, should we error? - } - r.IndexFile.SortEntries() - return nil -} - -// FindChartInRepoURL finds chart in chart repository pointed by repoURL -// without adding repo to repositories -func FindChartInRepoURL(repoURL, chartName, chartVersion, certFile, keyFile, caFile string, getters getter.Providers) (string, error) { - return FindChartInAuthRepoURL(repoURL, "", "", chartName, chartVersion, certFile, keyFile, caFile, getters) -} - -// FindChartInAuthRepoURL finds chart in chart repository pointed by repoURL -// without adding repo to repositories, like FindChartInRepoURL, -// but it also receives credentials for the chart repository. -func FindChartInAuthRepoURL(repoURL, username, password, chartName, chartVersion, certFile, keyFile, caFile string, getters getter.Providers) (string, error) { - return FindChartInAuthAndTLSRepoURL(repoURL, username, password, chartName, chartVersion, certFile, keyFile, caFile, false, getters) -} - -// FindChartInAuthAndTLSRepoURL finds chart in chart repository pointed by repoURL -// without adding repo to repositories, like FindChartInRepoURL, -// but it also receives credentials and TLS verify flag for the chart repository. -// TODO Helm 4, FindChartInAuthAndTLSRepoURL should be integrated into FindChartInAuthRepoURL. -func FindChartInAuthAndTLSRepoURL(repoURL, username, password, chartName, chartVersion, certFile, keyFile, caFile string, insecureSkipTLSverify bool, getters getter.Providers) (string, error) { - return FindChartInAuthAndTLSAndPassRepoURL(repoURL, username, password, chartName, chartVersion, certFile, keyFile, caFile, false, false, getters) -} - -// FindChartInAuthAndTLSAndPassRepoURL finds chart in chart repository pointed by repoURL -// without adding repo to repositories, like FindChartInRepoURL, -// but it also receives credentials, TLS verify flag, and if credentials should -// be passed on to other domains. -// TODO Helm 4, FindChartInAuthAndTLSAndPassRepoURL should be integrated into FindChartInAuthRepoURL. -func FindChartInAuthAndTLSAndPassRepoURL(repoURL, username, password, chartName, chartVersion, certFile, keyFile, caFile string, insecureSkipTLSverify, passCredentialsAll bool, getters getter.Providers) (string, error) { - - // Download and write the index file to a temporary location - buf := make([]byte, 20) - rand.Read(buf) - name := strings.ReplaceAll(base64.StdEncoding.EncodeToString(buf), "/", "-") - - c := Entry{ - URL: repoURL, - Username: username, - Password: password, - PassCredentialsAll: passCredentialsAll, - CertFile: certFile, - KeyFile: keyFile, - CAFile: caFile, - Name: name, - InsecureSkipTLSverify: insecureSkipTLSverify, - } - r, err := NewChartRepository(&c, getters) - if err != nil { - return "", err - } - idx, err := r.DownloadIndexFile() - if err != nil { - return "", errors.Wrapf(err, "looks like %q is not a valid chart repository or cannot be reached", repoURL) - } - defer func() { - os.RemoveAll(filepath.Join(r.CachePath, helmpath.CacheChartsFile(r.Config.Name))) - os.RemoveAll(filepath.Join(r.CachePath, helmpath.CacheIndexFile(r.Config.Name))) - }() - - // Read the index file for the repository to get chart information and return chart URL - repoIndex, err := LoadIndexFile(idx) - if err != nil { - return "", err - } - - errMsg := fmt.Sprintf("chart %q", chartName) - if chartVersion != "" { - errMsg = fmt.Sprintf("%s version %q", errMsg, chartVersion) - } - cv, err := repoIndex.Get(chartName, chartVersion) - if err != nil { - return "", errors.Errorf("%s not found in %s repository", errMsg, repoURL) - } - - if len(cv.URLs) == 0 { - return "", errors.Errorf("%s has no downloadable URLs", errMsg) - } - - chartURL := cv.URLs[0] - - absoluteChartURL, err := ResolveReferenceURL(repoURL, chartURL) - if err != nil { - return "", errors.Wrap(err, "failed to make chart URL absolute") - } - - return absoluteChartURL, nil -} - -// ResolveReferenceURL resolves refURL relative to baseURL. -// If refURL is absolute, it simply returns refURL. -func ResolveReferenceURL(baseURL, refURL string) (string, error) { - // We need a trailing slash for ResolveReference to work, but make sure there isn't already one - parsedBaseURL, err := url.Parse(strings.TrimSuffix(baseURL, "/") + "/") - if err != nil { - return "", errors.Wrapf(err, "failed to parse %s as URL", baseURL) - } - - parsedRefURL, err := url.Parse(refURL) - if err != nil { - return "", errors.Wrapf(err, "failed to parse %s as URL", refURL) - } - - return parsedBaseURL.ResolveReference(parsedRefURL).String(), nil -} - -func (e *Entry) String() string { - buf, err := json.Marshal(e) - if err != nil { - log.Panic(err) - } - return string(buf) -} diff --git a/src/vendor/helm.sh/helm/v3/pkg/repo/doc.go b/src/vendor/helm.sh/helm/v3/pkg/repo/doc.go deleted file mode 100644 index 05650100b..000000000 --- a/src/vendor/helm.sh/helm/v3/pkg/repo/doc.go +++ /dev/null @@ -1,93 +0,0 @@ -/* -Copyright The Helm Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -/*Package repo implements the Helm Chart Repository. - -A chart repository is an HTTP server that provides information on charts. A local -repository cache is an on-disk representation of a chart repository. - -There are two important file formats for chart repositories. - -The first is the 'index.yaml' format, which is expressed like this: - - apiVersion: v1 - entries: - frobnitz: - - created: 2016-09-29T12:14:34.830161306-06:00 - description: This is a frobnitz. - digest: 587bd19a9bd9d2bc4a6d25ab91c8c8e7042c47b4ac246e37bf8e1e74386190f4 - home: http://example.com - keywords: - - frobnitz - - sprocket - - dodad - maintainers: - - email: helm@example.com - name: The Helm Team - - email: nobody@example.com - name: Someone Else - name: frobnitz - urls: - - http://example-charts.com/testdata/repository/frobnitz-1.2.3.tgz - version: 1.2.3 - sprocket: - - created: 2016-09-29T12:14:34.830507606-06:00 - description: This is a sprocket" - digest: 8505ff813c39502cc849a38e1e4a8ac24b8e6e1dcea88f4c34ad9b7439685ae6 - home: http://example.com - keywords: - - frobnitz - - sprocket - - dodad - maintainers: - - email: helm@example.com - name: The Helm Team - - email: nobody@example.com - name: Someone Else - name: sprocket - urls: - - http://example-charts.com/testdata/repository/sprocket-1.2.0.tgz - version: 1.2.0 - generated: 2016-09-29T12:14:34.829721375-06:00 - -An index.yaml file contains the necessary descriptive information about what -charts are available in a repository, and how to get them. - -The second file format is the repositories.yaml file format. This file is for -facilitating local cached copies of one or more chart repositories. - -The format of a repository.yaml file is: - - apiVersion: v1 - generated: TIMESTAMP - repositories: - - name: stable - url: http://example.com/charts - cache: stable-index.yaml - - name: incubator - url: http://example.com/incubator - cache: incubator-index.yaml - -This file maps three bits of information about a repository: - - - The name the user uses to refer to it - - The fully qualified URL to the repository (index.yaml will be appended) - - The name of the local cachefile - -The format for both files was changed after Helm v2.0.0-Alpha.4. Helm is not -backwards compatible with those earlier versions. -*/ -package repo diff --git a/src/vendor/helm.sh/helm/v3/pkg/repo/index.go b/src/vendor/helm.sh/helm/v3/pkg/repo/index.go deleted file mode 100644 index 60cfe5801..000000000 --- a/src/vendor/helm.sh/helm/v3/pkg/repo/index.go +++ /dev/null @@ -1,364 +0,0 @@ -/* -Copyright The Helm Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package repo - -import ( - "bytes" - "io/ioutil" - "log" - "os" - "path" - "path/filepath" - "sort" - "strings" - "time" - - "github.com/Masterminds/semver/v3" - "github.com/pkg/errors" - "sigs.k8s.io/yaml" - - "helm.sh/helm/v3/internal/fileutil" - "helm.sh/helm/v3/internal/urlutil" - "helm.sh/helm/v3/pkg/chart" - "helm.sh/helm/v3/pkg/chart/loader" - "helm.sh/helm/v3/pkg/provenance" -) - -var indexPath = "index.yaml" - -// APIVersionV1 is the v1 API version for index and repository files. -const APIVersionV1 = "v1" - -var ( - // ErrNoAPIVersion indicates that an API version was not specified. - ErrNoAPIVersion = errors.New("no API version specified") - // ErrNoChartVersion indicates that a chart with the given version is not found. - ErrNoChartVersion = errors.New("no chart version found") - // ErrNoChartName indicates that a chart with the given name is not found. - ErrNoChartName = errors.New("no chart name found") - // ErrEmptyIndexYaml indicates that the content of index.yaml is empty. - ErrEmptyIndexYaml = errors.New("empty index.yaml file") -) - -// ChartVersions is a list of versioned chart references. -// Implements a sorter on Version. -type ChartVersions []*ChartVersion - -// Len returns the length. -func (c ChartVersions) Len() int { return len(c) } - -// Swap swaps the position of two items in the versions slice. -func (c ChartVersions) Swap(i, j int) { c[i], c[j] = c[j], c[i] } - -// Less returns true if the version of entry a is less than the version of entry b. -func (c ChartVersions) Less(a, b int) bool { - // Failed parse pushes to the back. - i, err := semver.NewVersion(c[a].Version) - if err != nil { - return true - } - j, err := semver.NewVersion(c[b].Version) - if err != nil { - return false - } - return i.LessThan(j) -} - -// IndexFile represents the index file in a chart repository -type IndexFile struct { - // This is used ONLY for validation against chartmuseum's index files and is discarded after validation. - ServerInfo map[string]interface{} `json:"serverInfo,omitempty"` - APIVersion string `json:"apiVersion"` - Generated time.Time `json:"generated"` - Entries map[string]ChartVersions `json:"entries"` - PublicKeys []string `json:"publicKeys,omitempty"` - - // Annotations are additional mappings uninterpreted by Helm. They are made available for - // other applications to add information to the index file. - Annotations map[string]string `json:"annotations,omitempty"` -} - -// NewIndexFile initializes an index. -func NewIndexFile() *IndexFile { - return &IndexFile{ - APIVersion: APIVersionV1, - Generated: time.Now(), - Entries: map[string]ChartVersions{}, - PublicKeys: []string{}, - } -} - -// LoadIndexFile takes a file at the given path and returns an IndexFile object -func LoadIndexFile(path string) (*IndexFile, error) { - b, err := ioutil.ReadFile(path) - if err != nil { - return nil, err - } - i, err := loadIndex(b, path) - if err != nil { - return nil, errors.Wrapf(err, "error loading %s", path) - } - return i, nil -} - -// MustAdd adds a file to the index -// This can leave the index in an unsorted state -func (i IndexFile) MustAdd(md *chart.Metadata, filename, baseURL, digest string) error { - if i.Entries == nil { - return errors.New("entries not initialized") - } - - if md.APIVersion == "" { - md.APIVersion = chart.APIVersionV1 - } - if err := md.Validate(); err != nil { - return errors.Wrapf(err, "validate failed for %s", filename) - } - - u := filename - if baseURL != "" { - _, file := filepath.Split(filename) - var err error - u, err = urlutil.URLJoin(baseURL, file) - if err != nil { - u = path.Join(baseURL, file) - } - } - cr := &ChartVersion{ - URLs: []string{u}, - Metadata: md, - Digest: digest, - Created: time.Now(), - } - ee := i.Entries[md.Name] - i.Entries[md.Name] = append(ee, cr) - return nil -} - -// Add adds a file to the index and logs an error. -// -// Deprecated: Use index.MustAdd instead. -func (i IndexFile) Add(md *chart.Metadata, filename, baseURL, digest string) { - if err := i.MustAdd(md, filename, baseURL, digest); err != nil { - log.Printf("skipping loading invalid entry for chart %q %q from %s: %s", md.Name, md.Version, filename, err) - } -} - -// Has returns true if the index has an entry for a chart with the given name and exact version. -func (i IndexFile) Has(name, version string) bool { - _, err := i.Get(name, version) - return err == nil -} - -// SortEntries sorts the entries by version in descending order. -// -// In canonical form, the individual version records should be sorted so that -// the most recent release for every version is in the 0th slot in the -// Entries.ChartVersions array. That way, tooling can predict the newest -// version without needing to parse SemVers. -func (i IndexFile) SortEntries() { - for _, versions := range i.Entries { - sort.Sort(sort.Reverse(versions)) - } -} - -// Get returns the ChartVersion for the given name. -// -// If version is empty, this will return the chart with the latest stable version, -// prerelease versions will be skipped. -func (i IndexFile) Get(name, version string) (*ChartVersion, error) { - vs, ok := i.Entries[name] - if !ok { - return nil, ErrNoChartName - } - if len(vs) == 0 { - return nil, ErrNoChartVersion - } - - var constraint *semver.Constraints - if version == "" { - constraint, _ = semver.NewConstraint("*") - } else { - var err error - constraint, err = semver.NewConstraint(version) - if err != nil { - return nil, err - } - } - - // when customer input exact version, check whether have exact match one first - if len(version) != 0 { - for _, ver := range vs { - if version == ver.Version { - return ver, nil - } - } - } - - for _, ver := range vs { - test, err := semver.NewVersion(ver.Version) - if err != nil { - continue - } - - if constraint.Check(test) { - return ver, nil - } - } - return nil, errors.Errorf("no chart version found for %s-%s", name, version) -} - -// WriteFile writes an index file to the given destination path. -// -// The mode on the file is set to 'mode'. -func (i IndexFile) WriteFile(dest string, mode os.FileMode) error { - b, err := yaml.Marshal(i) - if err != nil { - return err - } - return fileutil.AtomicWriteFile(dest, bytes.NewReader(b), mode) -} - -// Merge merges the given index file into this index. -// -// This merges by name and version. -// -// If one of the entries in the given index does _not_ already exist, it is added. -// In all other cases, the existing record is preserved. -// -// This can leave the index in an unsorted state -func (i *IndexFile) Merge(f *IndexFile) { - for _, cvs := range f.Entries { - for _, cv := range cvs { - if !i.Has(cv.Name, cv.Version) { - e := i.Entries[cv.Name] - i.Entries[cv.Name] = append(e, cv) - } - } - } -} - -// ChartVersion represents a chart entry in the IndexFile -type ChartVersion struct { - *chart.Metadata - URLs []string `json:"urls"` - Created time.Time `json:"created,omitempty"` - Removed bool `json:"removed,omitempty"` - Digest string `json:"digest,omitempty"` - - // ChecksumDeprecated is deprecated in Helm 3, and therefore ignored. Helm 3 replaced - // this with Digest. However, with a strict YAML parser enabled, a field must be - // present on the struct for backwards compatibility. - ChecksumDeprecated string `json:"checksum,omitempty"` - - // EngineDeprecated is deprecated in Helm 3, and therefore ignored. However, with a strict - // YAML parser enabled, this field must be present. - EngineDeprecated string `json:"engine,omitempty"` - - // TillerVersionDeprecated is deprecated in Helm 3, and therefore ignored. However, with a strict - // YAML parser enabled, this field must be present. - TillerVersionDeprecated string `json:"tillerVersion,omitempty"` - - // URLDeprecated is deprecated in Helm 3, superseded by URLs. It is ignored. However, - // with a strict YAML parser enabled, this must be present on the struct. - URLDeprecated string `json:"url,omitempty"` -} - -// IndexDirectory reads a (flat) directory and generates an index. -// -// It indexes only charts that have been packaged (*.tgz). -// -// The index returned will be in an unsorted state -func IndexDirectory(dir, baseURL string) (*IndexFile, error) { - archives, err := filepath.Glob(filepath.Join(dir, "*.tgz")) - if err != nil { - return nil, err - } - moreArchives, err := filepath.Glob(filepath.Join(dir, "**/*.tgz")) - if err != nil { - return nil, err - } - archives = append(archives, moreArchives...) - - index := NewIndexFile() - for _, arch := range archives { - fname, err := filepath.Rel(dir, arch) - if err != nil { - return index, err - } - - var parentDir string - parentDir, fname = filepath.Split(fname) - // filepath.Split appends an extra slash to the end of parentDir. We want to strip that out. - parentDir = strings.TrimSuffix(parentDir, string(os.PathSeparator)) - parentURL, err := urlutil.URLJoin(baseURL, parentDir) - if err != nil { - parentURL = path.Join(baseURL, parentDir) - } - - c, err := loader.Load(arch) - if err != nil { - // Assume this is not a chart. - continue - } - hash, err := provenance.DigestFile(arch) - if err != nil { - return index, err - } - if err := index.MustAdd(c.Metadata, fname, parentURL, hash); err != nil { - return index, errors.Wrapf(err, "failed adding to %s to index", fname) - } - } - return index, nil -} - -// loadIndex loads an index file and does minimal validity checking. -// -// The source parameter is only used for logging. -// This will fail if API Version is not set (ErrNoAPIVersion) or if the unmarshal fails. -func loadIndex(data []byte, source string) (*IndexFile, error) { - i := &IndexFile{} - - if len(data) == 0 { - return i, ErrEmptyIndexYaml - } - - if err := yaml.UnmarshalStrict(data, i); err != nil { - return i, err - } - - for name, cvs := range i.Entries { - for idx := len(cvs) - 1; idx >= 0; idx-- { - if cvs[idx] == nil { - log.Printf("skipping loading invalid entry for chart %q from %s: empty entry", name, source) - continue - } - if cvs[idx].APIVersion == "" { - cvs[idx].APIVersion = chart.APIVersionV1 - } - if err := cvs[idx].Validate(); err != nil { - log.Printf("skipping loading invalid entry for chart %q %q from %s: %s", name, cvs[idx].Version, source, err) - cvs = append(cvs[:idx], cvs[idx+1:]...) - } - } - } - i.SortEntries() - if i.APIVersion == "" { - return i, ErrNoAPIVersion - } - return i, nil -} diff --git a/src/vendor/helm.sh/helm/v3/pkg/repo/repo.go b/src/vendor/helm.sh/helm/v3/pkg/repo/repo.go deleted file mode 100644 index ee80d04f4..000000000 --- a/src/vendor/helm.sh/helm/v3/pkg/repo/repo.go +++ /dev/null @@ -1,126 +0,0 @@ -/* -Copyright The Helm Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package repo // import "helm.sh/helm/v3/pkg/repo" - -import ( - "io/ioutil" - "os" - "path/filepath" - "time" - - "github.com/pkg/errors" - "sigs.k8s.io/yaml" -) - -// File represents the repositories.yaml file -type File struct { - APIVersion string `json:"apiVersion"` - Generated time.Time `json:"generated"` - Repositories []*Entry `json:"repositories"` -} - -// NewFile generates an empty repositories file. -// -// Generated and APIVersion are automatically set. -func NewFile() *File { - return &File{ - APIVersion: APIVersionV1, - Generated: time.Now(), - Repositories: []*Entry{}, - } -} - -// LoadFile takes a file at the given path and returns a File object -func LoadFile(path string) (*File, error) { - r := new(File) - b, err := ioutil.ReadFile(path) - if err != nil { - return r, errors.Wrapf(err, "couldn't load repositories file (%s)", path) - } - - err = yaml.Unmarshal(b, r) - return r, err -} - -// Add adds one or more repo entries to a repo file. -func (r *File) Add(re ...*Entry) { - r.Repositories = append(r.Repositories, re...) -} - -// Update attempts to replace one or more repo entries in a repo file. If an -// entry with the same name doesn't exist in the repo file it will add it. -func (r *File) Update(re ...*Entry) { - for _, target := range re { - r.update(target) - } -} - -func (r *File) update(e *Entry) { - for j, repo := range r.Repositories { - if repo.Name == e.Name { - r.Repositories[j] = e - return - } - } - r.Add(e) -} - -// Has returns true if the given name is already a repository name. -func (r *File) Has(name string) bool { - entry := r.Get(name) - return entry != nil -} - -// Get returns an entry with the given name if it exists, otherwise returns nil -func (r *File) Get(name string) *Entry { - for _, entry := range r.Repositories { - if entry.Name == name { - return entry - } - } - return nil -} - -// Remove removes the entry from the list of repositories. -func (r *File) Remove(name string) bool { - cp := []*Entry{} - found := false - for _, rf := range r.Repositories { - if rf == nil { - continue - } - if rf.Name == name { - found = true - continue - } - cp = append(cp, rf) - } - r.Repositories = cp - return found -} - -// WriteFile writes a repositories file to the given path. -func (r *File) WriteFile(path string, perm os.FileMode) error { - data, err := yaml.Marshal(r) - if err != nil { - return err - } - if err := os.MkdirAll(filepath.Dir(path), 0755); err != nil { - return err - } - return ioutil.WriteFile(path, data, perm) -} diff --git a/src/vendor/k8s.io/apiextensions-apiserver/LICENSE b/src/vendor/k8s.io/apiextensions-apiserver/LICENSE deleted file mode 100644 index d64569567..000000000 --- a/src/vendor/k8s.io/apiextensions-apiserver/LICENSE +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/src/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/deepcopy.go b/src/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/deepcopy.go deleted file mode 100644 index 2bd5d5293..000000000 --- a/src/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/deepcopy.go +++ /dev/null @@ -1,300 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package apiextensions - -import "k8s.io/apimachinery/pkg/runtime" - -// TODO: Update this after a tag is created for interface fields in DeepCopy -func (in *JSONSchemaProps) DeepCopy() *JSONSchemaProps { - if in == nil { - return nil - } - out := new(JSONSchemaProps) - - *out = *in - - if in.Default != nil { - defaultJSON := JSON(runtime.DeepCopyJSONValue(*(in.Default))) - out.Default = &(defaultJSON) - } else { - out.Default = nil - } - - if in.Example != nil { - exampleJSON := JSON(runtime.DeepCopyJSONValue(*(in.Example))) - out.Example = &(exampleJSON) - } else { - out.Example = nil - } - - if in.Ref != nil { - in, out := &in.Ref, &out.Ref - if *in == nil { - *out = nil - } else { - *out = new(string) - **out = **in - } - } - - if in.Maximum != nil { - in, out := &in.Maximum, &out.Maximum - if *in == nil { - *out = nil - } else { - *out = new(float64) - **out = **in - } - } - - if in.Minimum != nil { - in, out := &in.Minimum, &out.Minimum - if *in == nil { - *out = nil - } else { - *out = new(float64) - **out = **in - } - } - - if in.MaxLength != nil { - in, out := &in.MaxLength, &out.MaxLength - if *in == nil { - *out = nil - } else { - *out = new(int64) - **out = **in - } - } - - if in.MinLength != nil { - in, out := &in.MinLength, &out.MinLength - if *in == nil { - *out = nil - } else { - *out = new(int64) - **out = **in - } - } - if in.MaxItems != nil { - in, out := &in.MaxItems, &out.MaxItems - if *in == nil { - *out = nil - } else { - *out = new(int64) - **out = **in - } - } - - if in.MinItems != nil { - in, out := &in.MinItems, &out.MinItems - if *in == nil { - *out = nil - } else { - *out = new(int64) - **out = **in - } - } - - if in.MultipleOf != nil { - in, out := &in.MultipleOf, &out.MultipleOf - if *in == nil { - *out = nil - } else { - *out = new(float64) - **out = **in - } - } - - if in.Enum != nil { - out.Enum = make([]JSON, len(in.Enum)) - for i := range in.Enum { - out.Enum[i] = runtime.DeepCopyJSONValue(in.Enum[i]) - } - } - - if in.MaxProperties != nil { - in, out := &in.MaxProperties, &out.MaxProperties - if *in == nil { - *out = nil - } else { - *out = new(int64) - **out = **in - } - } - - if in.MinProperties != nil { - in, out := &in.MinProperties, &out.MinProperties - if *in == nil { - *out = nil - } else { - *out = new(int64) - **out = **in - } - } - - if in.Required != nil { - in, out := &in.Required, &out.Required - *out = make([]string, len(*in)) - copy(*out, *in) - } - - if in.Items != nil { - in, out := &in.Items, &out.Items - if *in == nil { - *out = nil - } else { - *out = new(JSONSchemaPropsOrArray) - (*in).DeepCopyInto(*out) - } - } - - if in.AllOf != nil { - in, out := &in.AllOf, &out.AllOf - *out = make([]JSONSchemaProps, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - - if in.OneOf != nil { - in, out := &in.OneOf, &out.OneOf - *out = make([]JSONSchemaProps, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.AnyOf != nil { - in, out := &in.AnyOf, &out.AnyOf - *out = make([]JSONSchemaProps, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - - if in.Not != nil { - in, out := &in.Not, &out.Not - if *in == nil { - *out = nil - } else { - *out = new(JSONSchemaProps) - (*in).DeepCopyInto(*out) - } - } - - if in.Properties != nil { - in, out := &in.Properties, &out.Properties - *out = make(map[string]JSONSchemaProps, len(*in)) - for key, val := range *in { - (*out)[key] = *val.DeepCopy() - } - } - - if in.AdditionalProperties != nil { - in, out := &in.AdditionalProperties, &out.AdditionalProperties - if *in == nil { - *out = nil - } else { - *out = new(JSONSchemaPropsOrBool) - (*in).DeepCopyInto(*out) - } - } - - if in.PatternProperties != nil { - in, out := &in.PatternProperties, &out.PatternProperties - *out = make(map[string]JSONSchemaProps, len(*in)) - for key, val := range *in { - (*out)[key] = *val.DeepCopy() - } - } - - if in.Dependencies != nil { - in, out := &in.Dependencies, &out.Dependencies - *out = make(JSONSchemaDependencies, len(*in)) - for key, val := range *in { - (*out)[key] = *val.DeepCopy() - } - } - - if in.AdditionalItems != nil { - in, out := &in.AdditionalItems, &out.AdditionalItems - if *in == nil { - *out = nil - } else { - *out = new(JSONSchemaPropsOrBool) - (*in).DeepCopyInto(*out) - } - } - - if in.Definitions != nil { - in, out := &in.Definitions, &out.Definitions - *out = make(JSONSchemaDefinitions, len(*in)) - for key, val := range *in { - (*out)[key] = *val.DeepCopy() - } - } - - if in.ExternalDocs != nil { - in, out := &in.ExternalDocs, &out.ExternalDocs - if *in == nil { - *out = nil - } else { - *out = new(ExternalDocumentation) - (*in).DeepCopyInto(*out) - } - } - - if in.XPreserveUnknownFields != nil { - in, out := &in.XPreserveUnknownFields, &out.XPreserveUnknownFields - if *in == nil { - *out = nil - } else { - *out = new(bool) - **out = **in - } - } - - if in.XListMapKeys != nil { - in, out := &in.XListMapKeys, &out.XListMapKeys - *out = make([]string, len(*in)) - copy(*out, *in) - } - - if in.XListType != nil { - in, out := &in.XListType, &out.XListType - if *in == nil { - *out = nil - } else { - *out = new(string) - **out = **in - } - } - - if in.XMapType != nil { - in, out := &in.XMapType, &out.XMapType - *out = new(string) - **out = **in - } - - if in.XValidations != nil { - in, out := &in.XValidations, &out.XValidations - *out = make([]ValidationRule, len(*in)) - copy(*out, *in) - } - - return out -} diff --git a/src/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/doc.go b/src/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/doc.go deleted file mode 100644 index 2a6b02dcc..000000000 --- a/src/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/doc.go +++ /dev/null @@ -1,21 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// +k8s:deepcopy-gen=package -// +groupName=apiextensions.k8s.io - -// Package apiextensions is the internal version of the API. -package apiextensions // import "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions" diff --git a/src/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/helpers.go b/src/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/helpers.go deleted file mode 100644 index 52d6ea866..000000000 --- a/src/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/helpers.go +++ /dev/null @@ -1,257 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package apiextensions - -import ( - "fmt" - "time" - - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -var swaggerMetadataDescriptions = metav1.ObjectMeta{}.SwaggerDoc() - -// SetCRDCondition sets the status condition. It either overwrites the existing one or creates a new one. -func SetCRDCondition(crd *CustomResourceDefinition, newCondition CustomResourceDefinitionCondition) { - newCondition.LastTransitionTime = metav1.NewTime(time.Now()) - - existingCondition := FindCRDCondition(crd, newCondition.Type) - if existingCondition == nil { - crd.Status.Conditions = append(crd.Status.Conditions, newCondition) - return - } - - if existingCondition.Status != newCondition.Status || existingCondition.LastTransitionTime.IsZero() { - existingCondition.LastTransitionTime = newCondition.LastTransitionTime - } - - existingCondition.Status = newCondition.Status - existingCondition.Reason = newCondition.Reason - existingCondition.Message = newCondition.Message -} - -// RemoveCRDCondition removes the status condition. -func RemoveCRDCondition(crd *CustomResourceDefinition, conditionType CustomResourceDefinitionConditionType) { - newConditions := []CustomResourceDefinitionCondition{} - for _, condition := range crd.Status.Conditions { - if condition.Type != conditionType { - newConditions = append(newConditions, condition) - } - } - crd.Status.Conditions = newConditions -} - -// FindCRDCondition returns the condition you're looking for or nil. -func FindCRDCondition(crd *CustomResourceDefinition, conditionType CustomResourceDefinitionConditionType) *CustomResourceDefinitionCondition { - for i := range crd.Status.Conditions { - if crd.Status.Conditions[i].Type == conditionType { - return &crd.Status.Conditions[i] - } - } - - return nil -} - -// IsCRDConditionTrue indicates if the condition is present and strictly true. -func IsCRDConditionTrue(crd *CustomResourceDefinition, conditionType CustomResourceDefinitionConditionType) bool { - return IsCRDConditionPresentAndEqual(crd, conditionType, ConditionTrue) -} - -// IsCRDConditionFalse indicates if the condition is present and false. -func IsCRDConditionFalse(crd *CustomResourceDefinition, conditionType CustomResourceDefinitionConditionType) bool { - return IsCRDConditionPresentAndEqual(crd, conditionType, ConditionFalse) -} - -// IsCRDConditionPresentAndEqual indicates if the condition is present and equal to the given status. -func IsCRDConditionPresentAndEqual(crd *CustomResourceDefinition, conditionType CustomResourceDefinitionConditionType, status ConditionStatus) bool { - for _, condition := range crd.Status.Conditions { - if condition.Type == conditionType { - return condition.Status == status - } - } - return false -} - -// IsCRDConditionEquivalent returns true if the lhs and rhs are equivalent except for times. -func IsCRDConditionEquivalent(lhs, rhs *CustomResourceDefinitionCondition) bool { - if lhs == nil && rhs == nil { - return true - } - if lhs == nil || rhs == nil { - return false - } - - return lhs.Message == rhs.Message && lhs.Reason == rhs.Reason && lhs.Status == rhs.Status && lhs.Type == rhs.Type -} - -// CRDHasFinalizer returns true if the finalizer is in the list. -func CRDHasFinalizer(crd *CustomResourceDefinition, needle string) bool { - for _, finalizer := range crd.Finalizers { - if finalizer == needle { - return true - } - } - - return false -} - -// CRDRemoveFinalizer removes the finalizer if present. -func CRDRemoveFinalizer(crd *CustomResourceDefinition, needle string) { - newFinalizers := []string{} - for _, finalizer := range crd.Finalizers { - if finalizer != needle { - newFinalizers = append(newFinalizers, finalizer) - } - } - crd.Finalizers = newFinalizers -} - -// HasServedCRDVersion returns true if the given version is in the list of CRD's versions and the Served flag is set. -func HasServedCRDVersion(crd *CustomResourceDefinition, version string) bool { - for _, v := range crd.Spec.Versions { - if v.Name == version { - return v.Served - } - } - return false -} - -// GetCRDStorageVersion returns the storage version for given CRD. -func GetCRDStorageVersion(crd *CustomResourceDefinition) (string, error) { - for _, v := range crd.Spec.Versions { - if v.Storage { - return v.Name, nil - } - } - // This should not happened if crd is valid - return "", fmt.Errorf("invalid CustomResourceDefinition, no storage version") -} - -// IsStoredVersion returns whether the given version is the storage version of the CRD. -func IsStoredVersion(crd *CustomResourceDefinition, version string) bool { - for _, v := range crd.Status.StoredVersions { - if version == v { - return true - } - } - return false -} - -// GetSchemaForVersion returns the validation schema for the given version or nil. -func GetSchemaForVersion(crd *CustomResourceDefinition, version string) (*CustomResourceValidation, error) { - if !HasPerVersionSchema(crd.Spec.Versions) { - return crd.Spec.Validation, nil - } - if crd.Spec.Validation != nil { - return nil, fmt.Errorf("malformed CustomResourceDefinition %s version %s: top-level and per-version schemas must be mutual exclusive", crd.Name, version) - } - for _, v := range crd.Spec.Versions { - if version == v.Name { - return v.Schema, nil - } - } - return nil, fmt.Errorf("version %s not found in CustomResourceDefinition: %v", version, crd.Name) -} - -// GetSubresourcesForVersion returns the subresources for given version or nil. -func GetSubresourcesForVersion(crd *CustomResourceDefinition, version string) (*CustomResourceSubresources, error) { - if !HasPerVersionSubresources(crd.Spec.Versions) { - return crd.Spec.Subresources, nil - } - if crd.Spec.Subresources != nil { - return nil, fmt.Errorf("malformed CustomResourceDefinition %s version %s: top-level and per-version subresources must be mutual exclusive", crd.Name, version) - } - for _, v := range crd.Spec.Versions { - if version == v.Name { - return v.Subresources, nil - } - } - return nil, fmt.Errorf("version %s not found in CustomResourceDefinition: %v", version, crd.Name) -} - -// GetColumnsForVersion returns the columns for given version or nil. -// NOTE: the newly logically-defaulted columns is not pointing to the original CRD object. -// One cannot mutate the original CRD columns using the logically-defaulted columns. Please iterate through -// the original CRD object instead. -func GetColumnsForVersion(crd *CustomResourceDefinition, version string) ([]CustomResourceColumnDefinition, error) { - if !HasPerVersionColumns(crd.Spec.Versions) { - return serveDefaultColumnsIfEmpty(crd.Spec.AdditionalPrinterColumns), nil - } - if len(crd.Spec.AdditionalPrinterColumns) > 0 { - return nil, fmt.Errorf("malformed CustomResourceDefinition %s version %s: top-level and per-version additionalPrinterColumns must be mutual exclusive", crd.Name, version) - } - for _, v := range crd.Spec.Versions { - if version == v.Name { - return serveDefaultColumnsIfEmpty(v.AdditionalPrinterColumns), nil - } - } - return nil, fmt.Errorf("version %s not found in CustomResourceDefinition: %v", version, crd.Name) -} - -// HasPerVersionSchema returns true if a CRD uses per-version schema. -func HasPerVersionSchema(versions []CustomResourceDefinitionVersion) bool { - for _, v := range versions { - if v.Schema != nil { - return true - } - } - return false -} - -// HasPerVersionSubresources returns true if a CRD uses per-version subresources. -func HasPerVersionSubresources(versions []CustomResourceDefinitionVersion) bool { - for _, v := range versions { - if v.Subresources != nil { - return true - } - } - return false -} - -// HasPerVersionColumns returns true if a CRD uses per-version columns. -func HasPerVersionColumns(versions []CustomResourceDefinitionVersion) bool { - for _, v := range versions { - if len(v.AdditionalPrinterColumns) > 0 { - return true - } - } - return false -} - -// serveDefaultColumnsIfEmpty applies logically defaulting to columns, if the input columns is empty. -// NOTE: in this way, the newly logically-defaulted columns is not pointing to the original CRD object. -// One cannot mutate the original CRD columns using the logically-defaulted columns. Please iterate through -// the original CRD object instead. -func serveDefaultColumnsIfEmpty(columns []CustomResourceColumnDefinition) []CustomResourceColumnDefinition { - if len(columns) > 0 { - return columns - } - return []CustomResourceColumnDefinition{ - {Name: "Age", Type: "date", Description: swaggerMetadataDescriptions["creationTimestamp"], JSONPath: ".metadata.creationTimestamp"}, - } -} - -// HasVersionServed returns true if given CRD has given version served. -func HasVersionServed(crd *CustomResourceDefinition, version string) bool { - for _, v := range crd.Spec.Versions { - if !v.Served || v.Name != version { - continue - } - return true - } - return false -} diff --git a/src/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/register.go b/src/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/register.go deleted file mode 100644 index 273f7f123..000000000 --- a/src/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/register.go +++ /dev/null @@ -1,51 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package apiextensions - -import ( - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" -) - -const GroupName = "apiextensions.k8s.io" - -// SchemeGroupVersion is group version used to register these objects -var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: runtime.APIVersionInternal} - -// Kind takes an unqualified kind and returns back a Group qualified GroupKind -func Kind(kind string) schema.GroupKind { - return SchemeGroupVersion.WithKind(kind).GroupKind() -} - -// Resource takes an unqualified resource and returns back a Group qualified GroupResource -func Resource(resource string) schema.GroupResource { - return SchemeGroupVersion.WithResource(resource).GroupResource() -} - -var ( - SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) - AddToScheme = SchemeBuilder.AddToScheme -) - -// Adds the list of known types to the given scheme. -func addKnownTypes(scheme *runtime.Scheme) error { - scheme.AddKnownTypes(SchemeGroupVersion, - &CustomResourceDefinition{}, - &CustomResourceDefinitionList{}, - ) - return nil -} diff --git a/src/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/types.go b/src/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/types.go deleted file mode 100644 index b1c5f6f4c..000000000 --- a/src/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/types.go +++ /dev/null @@ -1,422 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package apiextensions - -import ( - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -// ConversionStrategyType describes different conversion types. -type ConversionStrategyType string - -const ( - // NoneConverter is a converter that only sets apiversion of the CR and leave everything else unchanged. - NoneConverter ConversionStrategyType = "None" - // WebhookConverter is a converter that calls to an external webhook to convert the CR. - WebhookConverter ConversionStrategyType = "Webhook" -) - -// CustomResourceDefinitionSpec describes how a user wants their resource to appear -type CustomResourceDefinitionSpec struct { - // Group is the group this resource belongs in - Group string - // Version is the version this resource belongs in - // Should be always first item in Versions field if provided. - // Optional, but at least one of Version or Versions must be set. - // Deprecated: Please use `Versions`. - Version string - // Names are the names used to describe this custom resource - Names CustomResourceDefinitionNames - // Scope indicates whether this resource is cluster or namespace scoped. Default is namespaced - Scope ResourceScope - // Validation describes the validation methods for CustomResources - // Optional, the global validation schema for all versions. - // Top-level and per-version schemas are mutually exclusive. - // +optional - Validation *CustomResourceValidation - // Subresources describes the subresources for CustomResource - // Optional, the global subresources for all versions. - // Top-level and per-version subresources are mutually exclusive. - // +optional - Subresources *CustomResourceSubresources - // Versions is the list of all supported versions for this resource. - // If Version field is provided, this field is optional. - // Validation: All versions must use the same validation schema for now. i.e., top - // level Validation field is applied to all of these versions. - // Order: The version name will be used to compute the order. - // If the version string is "kube-like", it will sort above non "kube-like" version strings, which are ordered - // lexicographically. "Kube-like" versions start with a "v", then are followed by a number (the major version), - // then optionally the string "alpha" or "beta" and another number (the minor version). These are sorted first - // by GA > beta > alpha (where GA is a version with no suffix such as beta or alpha), and then by comparing - // major version, then minor version. An example sorted list of versions: - // v10, v2, v1, v11beta2, v10beta3, v3beta1, v12alpha1, v11alpha2, foo1, foo10. - Versions []CustomResourceDefinitionVersion - // AdditionalPrinterColumns are additional columns shown e.g. in kubectl next to the name. Defaults to a created-at column. - // Optional, the global columns for all versions. - // Top-level and per-version columns are mutually exclusive. - // +optional - AdditionalPrinterColumns []CustomResourceColumnDefinition - - // `conversion` defines conversion settings for the CRD. - Conversion *CustomResourceConversion - - // preserveUnknownFields disables pruning of object fields which are not - // specified in the OpenAPI schema. apiVersion, kind, metadata and known - // fields inside metadata are always preserved. - // Defaults to true in v1beta and will default to false in v1. - PreserveUnknownFields *bool -} - -// CustomResourceConversion describes how to convert different versions of a CR. -type CustomResourceConversion struct { - // `strategy` specifies the conversion strategy. Allowed values are: - // - `None`: The converter only change the apiVersion and would not touch any other field in the CR. - // - `Webhook`: API Server will call to an external webhook to do the conversion. Additional information - // is needed for this option. This requires spec.preserveUnknownFields to be false. - Strategy ConversionStrategyType - - // `webhookClientConfig` is the instructions for how to call the webhook if strategy is `Webhook`. - WebhookClientConfig *WebhookClientConfig - - // ConversionReviewVersions is an ordered list of preferred `ConversionReview` - // versions the Webhook expects. API server will try to use first version in - // the list which it supports. If none of the versions specified in this list - // supported by API server, conversion will fail for this object. - // If a persisted Webhook configuration specifies allowed versions and does not - // include any versions known to the API Server, calls to the webhook will fail. - // +optional - ConversionReviewVersions []string -} - -// WebhookClientConfig contains the information to make a TLS -// connection with the webhook. It has the same field as admissionregistration.internal.WebhookClientConfig. -type WebhookClientConfig struct { - // `url` gives the location of the webhook, in standard URL form - // (`scheme://host:port/path`). Exactly one of `url` or `service` - // must be specified. - // - // The `host` should not refer to a service running in the cluster; use - // the `service` field instead. The host might be resolved via external - // DNS in some apiservers (e.g., `kube-apiserver` cannot resolve - // in-cluster DNS as that would be a layering violation). `host` may - // also be an IP address. - // - // Please note that using `localhost` or `127.0.0.1` as a `host` is - // risky unless you take great care to run this webhook on all hosts - // which run an apiserver which might need to make calls to this - // webhook. Such installs are likely to be non-portable, i.e., not easy - // to turn up in a new cluster. - // - // The scheme must be "https"; the URL must begin with "https://". - // - // A path is optional, and if present may be any string permissible in - // a URL. You may use the path to pass an arbitrary string to the - // webhook, for example, a cluster identifier. - // - // Attempting to use a user or basic auth e.g. "user:password@" is not - // allowed. Fragments ("#...") and query parameters ("?...") are not - // allowed, either. - // - // +optional - URL *string - - // `service` is a reference to the service for this webhook. Either - // `service` or `url` must be specified. - // - // If the webhook is running within the cluster, then you should use `service`. - // - // +optional - Service *ServiceReference - - // `caBundle` is a PEM encoded CA bundle which will be used to validate the webhook's server certificate. - // If unspecified, system trust roots on the apiserver are used. - // +optional - CABundle []byte -} - -// ServiceReference holds a reference to Service.legacy.k8s.io -type ServiceReference struct { - // `namespace` is the namespace of the service. - // Required - Namespace string - // `name` is the name of the service. - // Required - Name string - - // `path` is an optional URL path which will be sent in any request to - // this service. - // +optional - Path *string - - // If specified, the port on the service that hosting webhook. - // `port` should be a valid port number (1-65535, inclusive). - // +optional - Port int32 -} - -// CustomResourceDefinitionVersion describes a version for CRD. -type CustomResourceDefinitionVersion struct { - // Name is the version name, e.g. “v1â€, “v2beta1â€, etc. - Name string - // Served is a flag enabling/disabling this version from being served via REST APIs - Served bool - // Storage flags the version as storage version. There must be exactly one flagged - // as storage version. - Storage bool - // deprecated indicates this version of the custom resource API is deprecated. - // When set to true, API requests to this version receive a warning header in the server response. - // Defaults to false. - Deprecated bool - // deprecationWarning overrides the default warning returned to API clients. - // May only be set when `deprecated` is true. - // The default warning indicates this version is deprecated and recommends use - // of the newest served version of equal or greater stability, if one exists. - DeprecationWarning *string - // Schema describes the schema for CustomResource used in validation, pruning, and defaulting. - // Top-level and per-version schemas are mutually exclusive. - // Per-version schemas must not all be set to identical values (top-level validation schema should be used instead) - // This field is alpha-level and is only honored by servers that enable the CustomResourceWebhookConversion feature. - // +optional - Schema *CustomResourceValidation - // Subresources describes the subresources for CustomResource - // Top-level and per-version subresources are mutually exclusive. - // Per-version subresources must not all be set to identical values (top-level subresources should be used instead) - // This field is alpha-level and is only honored by servers that enable the CustomResourceWebhookConversion feature. - // +optional - Subresources *CustomResourceSubresources - // AdditionalPrinterColumns are additional columns shown e.g. in kubectl next to the name. Defaults to a created-at column. - // Top-level and per-version columns are mutually exclusive. - // Per-version columns must not all be set to identical values (top-level columns should be used instead) - // This field is alpha-level and is only honored by servers that enable the CustomResourceWebhookConversion feature. - // NOTE: CRDs created prior to 1.13 populated the top-level additionalPrinterColumns field by default. To apply an - // update that changes to per-version additionalPrinterColumns, the top-level additionalPrinterColumns field must - // be explicitly set to null - // +optional - AdditionalPrinterColumns []CustomResourceColumnDefinition -} - -// CustomResourceColumnDefinition specifies a column for server side printing. -type CustomResourceColumnDefinition struct { - // name is a human readable name for the column. - Name string - // type is an OpenAPI type definition for this column. - // See https://github.com/OAI/OpenAPI-Specification/blob/master/versions/2.0.md#data-types for more. - Type string - // format is an optional OpenAPI type definition for this column. The 'name' format is applied - // to the primary identifier column to assist in clients identifying column is the resource name. - // See https://github.com/OAI/OpenAPI-Specification/blob/master/versions/2.0.md#data-types for more. - Format string - // description is a human readable description of this column. - Description string - // priority is an integer defining the relative importance of this column compared to others. Lower - // numbers are considered higher priority. Columns that may be omitted in limited space scenarios - // should be given a higher priority. - Priority int32 - - // JSONPath is a simple JSON path, i.e. without array notation. - JSONPath string -} - -// CustomResourceDefinitionNames indicates the names to serve this CustomResourceDefinition -type CustomResourceDefinitionNames struct { - // Plural is the plural name of the resource to serve. It must match the name of the CustomResourceDefinition-registration - // too: plural.group and it must be all lowercase. - Plural string - // Singular is the singular name of the resource. It must be all lowercase Defaults to lowercased - Singular string - // ShortNames are short names for the resource. It must be all lowercase. - ShortNames []string - // Kind is the serialized kind of the resource. It is normally CamelCase and singular. - Kind string - // ListKind is the serialized kind of the list for this resource. Defaults to List. - ListKind string - // Categories is a list of grouped resources custom resources belong to (e.g. 'all') - // +optional - Categories []string -} - -// ResourceScope is an enum defining the different scopes available to a custom resource -type ResourceScope string - -const ( - ClusterScoped ResourceScope = "Cluster" - NamespaceScoped ResourceScope = "Namespaced" -) - -type ConditionStatus string - -// These are valid condition statuses. "ConditionTrue" means a resource is in the condition. -// "ConditionFalse" means a resource is not in the condition. "ConditionUnknown" means kubernetes -// can't decide if a resource is in the condition or not. In the future, we could add other -// intermediate conditions, e.g. ConditionDegraded. -const ( - ConditionTrue ConditionStatus = "True" - ConditionFalse ConditionStatus = "False" - ConditionUnknown ConditionStatus = "Unknown" -) - -// CustomResourceDefinitionConditionType is a valid value for CustomResourceDefinitionCondition.Type -type CustomResourceDefinitionConditionType string - -const ( - // Established means that the resource has become active. A resource is established when all names are - // accepted without a conflict for the first time. A resource stays established until deleted, even during - // a later NamesAccepted due to changed names. Note that not all names can be changed. - Established CustomResourceDefinitionConditionType = "Established" - // NamesAccepted means the names chosen for this CustomResourceDefinition do not conflict with others in - // the group and are therefore accepted. - NamesAccepted CustomResourceDefinitionConditionType = "NamesAccepted" - // NonStructuralSchema means that one or more OpenAPI schema is not structural. - // - // A schema is structural if it specifies types for all values, with the only exceptions of those with - // - x-kubernetes-int-or-string: true — for fields which can be integer or string - // - x-kubernetes-preserve-unknown-fields: true — for raw, unspecified JSON values - // and there is no type, additionalProperties, default, nullable or x-kubernetes-* vendor extenions - // specified under allOf, anyOf, oneOf or not. - // - // Non-structural schemas will not be allowed anymore in v1 API groups. Moreover, new features will not be - // available for non-structural CRDs: - // - pruning - // - defaulting - // - read-only - // - OpenAPI publishing - // - webhook conversion - NonStructuralSchema CustomResourceDefinitionConditionType = "NonStructuralSchema" - // Terminating means that the CustomResourceDefinition has been deleted and is cleaning up. - Terminating CustomResourceDefinitionConditionType = "Terminating" - // KubernetesAPIApprovalPolicyConformant indicates that an API in *.k8s.io or *.kubernetes.io is or is not approved. For CRDs - // outside those groups, this condition will not be set. For CRDs inside those groups, the condition will - // be true if .metadata.annotations["api-approved.kubernetes.io"] is set to a URL, otherwise it will be false. - // See https://github.com/kubernetes/enhancements/pull/1111 for more details. - KubernetesAPIApprovalPolicyConformant CustomResourceDefinitionConditionType = "KubernetesAPIApprovalPolicyConformant" -) - -// CustomResourceDefinitionCondition contains details for the current condition of this pod. -type CustomResourceDefinitionCondition struct { - // Type is the type of the condition. Types include Established, NamesAccepted and Terminating. - Type CustomResourceDefinitionConditionType - // Status is the status of the condition. - // Can be True, False, Unknown. - Status ConditionStatus - // Last time the condition transitioned from one status to another. - // +optional - LastTransitionTime metav1.Time - // Unique, one-word, CamelCase reason for the condition's last transition. - // +optional - Reason string - // Human-readable message indicating details about last transition. - // +optional - Message string -} - -// CustomResourceDefinitionStatus indicates the state of the CustomResourceDefinition -type CustomResourceDefinitionStatus struct { - // Conditions indicate state for particular aspects of a CustomResourceDefinition - // +listType=map - // +listMapKey=type - Conditions []CustomResourceDefinitionCondition - - // AcceptedNames are the names that are actually being used to serve discovery - // They may be different than the names in spec. - AcceptedNames CustomResourceDefinitionNames - - // StoredVersions are all versions of CustomResources that were ever persisted. Tracking these - // versions allows a migration path for stored versions in etcd. The field is mutable - // so the migration controller can first finish a migration to another version (i.e. - // that no old objects are left in the storage), and then remove the rest of the - // versions from this list. - // None of the versions in this list can be removed from the spec.Versions field. - StoredVersions []string -} - -// CustomResourceCleanupFinalizer is the name of the finalizer which will delete instances of -// a CustomResourceDefinition -const CustomResourceCleanupFinalizer = "customresourcecleanup.apiextensions.k8s.io" - -// +genclient -// +genclient:nonNamespaced -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// CustomResourceDefinition represents a resource that should be exposed on the API server. Its name MUST be in the format -// <.spec.name>.<.spec.group>. -type CustomResourceDefinition struct { - metav1.TypeMeta - metav1.ObjectMeta - - // Spec describes how the user wants the resources to appear - Spec CustomResourceDefinitionSpec - // Status indicates the actual state of the CustomResourceDefinition - Status CustomResourceDefinitionStatus -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// CustomResourceDefinitionList is a list of CustomResourceDefinition objects. -type CustomResourceDefinitionList struct { - metav1.TypeMeta - metav1.ListMeta - - // Items individual CustomResourceDefinitions - Items []CustomResourceDefinition -} - -// CustomResourceValidation is a list of validation methods for CustomResources. -type CustomResourceValidation struct { - // OpenAPIV3Schema is the OpenAPI v3 schema to be validated against. - OpenAPIV3Schema *JSONSchemaProps -} - -// CustomResourceSubresources defines the status and scale subresources for CustomResources. -type CustomResourceSubresources struct { - // Status denotes the status subresource for CustomResources - Status *CustomResourceSubresourceStatus - // Scale denotes the scale subresource for CustomResources - Scale *CustomResourceSubresourceScale -} - -// CustomResourceSubresourceStatus defines how to serve the status subresource for CustomResources. -// Status is represented by the `.status` JSON path inside of a CustomResource. When set, -// * exposes a /status subresource for the custom resource -// * PUT requests to the /status subresource take a custom resource object, and ignore changes to anything except the status stanza -// * PUT/POST/PATCH requests to the custom resource ignore changes to the status stanza -type CustomResourceSubresourceStatus struct{} - -// CustomResourceSubresourceScale defines how to serve the scale subresource for CustomResources. -type CustomResourceSubresourceScale struct { - // SpecReplicasPath defines the JSON path inside of a CustomResource that corresponds to Scale.Spec.Replicas. - // Only JSON paths without the array notation are allowed. - // Must be a JSON Path under .spec. - // If there is no value under the given path in the CustomResource, the /scale subresource will return an error on GET. - SpecReplicasPath string - // StatusReplicasPath defines the JSON path inside of a CustomResource that corresponds to Scale.Status.Replicas. - // Only JSON paths without the array notation are allowed. - // Must be a JSON Path under .status. - // If there is no value under the given path in the CustomResource, the status replica value in the /scale subresource - // will default to 0. - StatusReplicasPath string - // LabelSelectorPath defines the JSON path inside of a CustomResource that corresponds to Scale.Status.Selector. - // Only JSON paths without the array notation are allowed. - // Must be a JSON Path under .status or .spec. - // Must be set to work with HPA. - // The field pointed by this JSON path must be a string field (not a complex selector struct) - // which contains a serialized label selector in string form. - // More info: https://kubernetes.io/docs/tasks/access-kubernetes-api/custom-resources/custom-resource-definitions#scale-subresource - // If there is no value under the given path in the CustomResource, the status label selector value in the /scale - // subresource will default to the empty string. - // +optional - LabelSelectorPath *string -} diff --git a/src/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/types_jsonschema.go b/src/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/types_jsonschema.go deleted file mode 100644 index f402c416d..000000000 --- a/src/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/types_jsonschema.go +++ /dev/null @@ -1,238 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package apiextensions - -// JSONSchemaProps is a JSON-Schema following Specification Draft 4 (http://json-schema.org/). -type JSONSchemaProps struct { - ID string - Schema JSONSchemaURL - Ref *string - Description string - Type string - Nullable bool - Format string - Title string - Default *JSON - Maximum *float64 - ExclusiveMaximum bool - Minimum *float64 - ExclusiveMinimum bool - MaxLength *int64 - MinLength *int64 - Pattern string - MaxItems *int64 - MinItems *int64 - UniqueItems bool - MultipleOf *float64 - Enum []JSON - MaxProperties *int64 - MinProperties *int64 - Required []string - Items *JSONSchemaPropsOrArray - AllOf []JSONSchemaProps - OneOf []JSONSchemaProps - AnyOf []JSONSchemaProps - Not *JSONSchemaProps - Properties map[string]JSONSchemaProps - AdditionalProperties *JSONSchemaPropsOrBool - PatternProperties map[string]JSONSchemaProps - Dependencies JSONSchemaDependencies - AdditionalItems *JSONSchemaPropsOrBool - Definitions JSONSchemaDefinitions - ExternalDocs *ExternalDocumentation - Example *JSON - - // x-kubernetes-preserve-unknown-fields stops the API server - // decoding step from pruning fields which are not specified - // in the validation schema. This affects fields recursively, - // but switches back to normal pruning behaviour if nested - // properties or additionalProperties are specified in the schema. - // This can either be true or undefined. False is forbidden. - XPreserveUnknownFields *bool - - // x-kubernetes-embedded-resource defines that the value is an - // embedded Kubernetes runtime.Object, with TypeMeta and - // ObjectMeta. The type must be object. It is allowed to further - // restrict the embedded object. Both ObjectMeta and TypeMeta - // are validated automatically. x-kubernetes-preserve-unknown-fields - // must be true. - XEmbeddedResource bool - - // x-kubernetes-int-or-string specifies that this value is - // either an integer or a string. If this is true, an empty - // type is allowed and type as child of anyOf is permitted - // if following one of the following patterns: - // - // 1) anyOf: - // - type: integer - // - type: string - // 2) allOf: - // - anyOf: - // - type: integer - // - type: string - // - ... zero or more - XIntOrString bool - - // x-kubernetes-list-map-keys annotates an array with the x-kubernetes-list-type `map` by specifying the keys used - // as the index of the map. - // - // This tag MUST only be used on lists that have the "x-kubernetes-list-type" - // extension set to "map". Also, the values specified for this attribute must - // be a scalar typed field of the child structure (no nesting is supported). - XListMapKeys []string - - // x-kubernetes-list-type annotates an array to further describe its topology. - // This extension must only be used on lists and may have 3 possible values: - // - // 1) `atomic`: the list is treated as a single entity, like a scalar. - // Atomic lists will be entirely replaced when updated. This extension - // may be used on any type of list (struct, scalar, ...). - // 2) `set`: - // Sets are lists that must not have multiple items with the same value. Each - // value must be a scalar, an object with x-kubernetes-map-type `atomic` or an - // array with x-kubernetes-list-type `atomic`. - // 3) `map`: - // These lists are like maps in that their elements have a non-index key - // used to identify them. Order is preserved upon merge. The map tag - // must only be used on a list with elements of type object. - XListType *string - - // x-kubernetes-map-type annotates an object to further describe its topology. - // This extension must only be used when type is object and may have 2 possible values: - // - // 1) `granular`: - // These maps are actual maps (key-value pairs) and each fields are independent - // from each other (they can each be manipulated by separate actors). This is - // the default behaviour for all maps. - // 2) `atomic`: the list is treated as a single entity, like a scalar. - // Atomic maps will be entirely replaced when updated. - // +optional - XMapType *string - - // x-kubernetes-validations -kubernetes-validations describes a list of validation rules written in the CEL expression language. - // This field is an alpha-level. Using this field requires the feature gate `CustomResourceValidationExpressions` to be enabled. - // +patchMergeKey=rule - // +patchStrategy=merge - // +listType=map - // +listMapKey=rule - XValidations ValidationRules -} - -// ValidationRules describes a list of validation rules written in the CEL expression language. -type ValidationRules []ValidationRule - -// ValidationRule describes a validation rule written in the CEL expression language. -type ValidationRule struct { - // Rule represents the expression which will be evaluated by CEL. - // ref: https://github.com/google/cel-spec - // The Rule is scoped to the location of the x-kubernetes-validations extension in the schema. - // The `self` variable in the CEL expression is bound to the scoped value. - // Example: - // - Rule scoped to the root of a resource with a status subresource: {"rule": "self.status.actual <= self.spec.maxDesired"} - // - // If the Rule is scoped to an object with properties, the accessible properties of the object are field selectable - // via `self.field` and field presence can be checked via `has(self.field)`. Null valued fields are treated as - // absent fields in CEL expressions. - // If the Rule is scoped to an object with additionalProperties (i.e. a map) the value of the map - // are accessible via `self[mapKey]`, map containment can be checked via `mapKey in self` and all entries of the map - // are accessible via CEL macros and functions such as `self.all(...)`. - // If the Rule is scoped to an array, the elements of the array are accessible via `self[i]` and also by macros and - // functions. - // If the Rule is scoped to a scalar, `self` is bound to the scalar value. - // Examples: - // - Rule scoped to a map of objects: {"rule": "self.components['Widget'].priority < 10"} - // - Rule scoped to a list of integers: {"rule": "self.values.all(value, value >= 0 && value < 100)"} - // - Rule scoped to a string value: {"rule": "self.startsWith('kube')"} - // - // The `apiVersion`, `kind`, `metadata.name` and `metadata.generateName` are always accessible from the root of the - // object and from any x-kubernetes-embedded-resource annotated objects. No other metadata properties are accessible. - // - // Unknown data preserved in custom resources via x-kubernetes-preserve-unknown-fields is not accessible in CEL - // expressions. This includes: - // - Unknown field values that are preserved by object schemas with x-kubernetes-preserve-unknown-fields. - // - Object properties where the property schema is of an "unknown type". An "unknown type" is recursively defined as: - // - A schema with no type and x-kubernetes-preserve-unknown-fields set to true - // - An array where the items schema is of an "unknown type" - // - An object where the additionalProperties schema is of an "unknown type" - // - // Only property names of the form `[a-zA-Z_.-/][a-zA-Z0-9_.-/]*` are accessible. - // Accessible property names are escaped according to the following rules when accessed in the expression: - // - '__' escapes to '__underscores__' - // - '.' escapes to '__dot__' - // - '-' escapes to '__dash__' - // - '/' escapes to '__slash__' - // - Property names that exactly match a CEL RESERVED keyword escape to '__{keyword}__'. The keywords are: - // "true", "false", "null", "in", "as", "break", "const", "continue", "else", "for", "function", "if", - // "import", "let", "loop", "package", "namespace", "return". - // Examples: - // - Rule accessing a property named "namespace": {"rule": "self.__namespace__ > 0"} - // - Rule accessing a property named "x-prop": {"rule": "self.x__dash__prop > 0"} - // - Rule accessing a property named "redact__d": {"rule": "self.redact__underscores__d > 0"} - // - // Equality on arrays with x-kubernetes-list-type of 'set' or 'map' ignores element order, i.e. [1, 2] == [2, 1]. - // Concatenation on arrays with x-kubernetes-list-type use the semantics of the list type: - // - 'set': `X + Y` performs a union where the array positions of all elements in `X` are preserved and - // non-intersecting elements in `Y` are appended, retaining their partial order. - // - 'map': `X + Y` performs a merge where the array positions of all keys in `X` are preserved but the values - // are overwritten by values in `Y` when the key sets of `X` and `Y` intersect. Elements in `Y` with - // non-intersecting keys are appended, retaining their partial order. - Rule string - // Message represents the message displayed when validation fails. The message is required if the Rule contains - // line breaks. The message must not contain line breaks. - // If unset, the message is "failed rule: {Rule}". - // e.g. "must be a URL with the host matching spec.host" - Message string -} - -// JSON represents any valid JSON value. -// These types are supported: bool, int64, float64, string, []interface{}, map[string]interface{} and nil. -type JSON interface{} - -// JSONSchemaURL represents a schema url. -type JSONSchemaURL string - -// JSONSchemaPropsOrArray represents a value that can either be a JSONSchemaProps -// or an array of JSONSchemaProps. Mainly here for serialization purposes. -type JSONSchemaPropsOrArray struct { - Schema *JSONSchemaProps - JSONSchemas []JSONSchemaProps -} - -// JSONSchemaPropsOrBool represents JSONSchemaProps or a boolean value. -// Defaults to true for the boolean property. -type JSONSchemaPropsOrBool struct { - Allows bool - Schema *JSONSchemaProps -} - -// JSONSchemaDependencies represent a dependencies property. -type JSONSchemaDependencies map[string]JSONSchemaPropsOrStringArray - -// JSONSchemaPropsOrStringArray represents a JSONSchemaProps or a string array. -type JSONSchemaPropsOrStringArray struct { - Schema *JSONSchemaProps - Property []string -} - -// JSONSchemaDefinitions contains the models explicitly defined in this spec. -type JSONSchemaDefinitions map[string]JSONSchemaProps - -// ExternalDocumentation allows referencing an external resource for extended documentation. -type ExternalDocumentation struct { - Description string - URL string -} diff --git a/src/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/.import-restrictions b/src/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/.import-restrictions deleted file mode 100644 index 7408dd121..000000000 --- a/src/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/.import-restrictions +++ /dev/null @@ -1,5 +0,0 @@ -inverseRules: - # Allow use of this package in all k8s.io packages. - - selectorRegexp: k8s[.]io - allowedPrefixes: - - '' diff --git a/src/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/conversion.go b/src/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/conversion.go deleted file mode 100644 index 4d29ff823..000000000 --- a/src/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/conversion.go +++ /dev/null @@ -1,215 +0,0 @@ -/* -Copyright 2019 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1 - -import ( - "bytes" - unsafe "unsafe" - - "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions" - apiequality "k8s.io/apimachinery/pkg/api/equality" - "k8s.io/apimachinery/pkg/conversion" - "k8s.io/apimachinery/pkg/util/json" -) - -func Convert_apiextensions_JSONSchemaProps_To_v1_JSONSchemaProps(in *apiextensions.JSONSchemaProps, out *JSONSchemaProps, s conversion.Scope) error { - if err := autoConvert_apiextensions_JSONSchemaProps_To_v1_JSONSchemaProps(in, out, s); err != nil { - return err - } - if in.Default != nil && *(in.Default) == nil { - out.Default = nil - } - if in.Example != nil && *(in.Example) == nil { - out.Example = nil - } - return nil -} - -var nullLiteral = []byte(`null`) - -func Convert_apiextensions_JSON_To_v1_JSON(in *apiextensions.JSON, out *JSON, s conversion.Scope) error { - raw, err := json.Marshal(*in) - if err != nil { - return err - } - if len(raw) == 0 || bytes.Equal(raw, nullLiteral) { - // match JSON#UnmarshalJSON treatment of literal nulls - out.Raw = nil - } else { - out.Raw = raw - } - return nil -} - -func Convert_v1_JSON_To_apiextensions_JSON(in *JSON, out *apiextensions.JSON, s conversion.Scope) error { - if in != nil { - var i interface{} - if len(in.Raw) > 0 && !bytes.Equal(in.Raw, nullLiteral) { - if err := json.Unmarshal(in.Raw, &i); err != nil { - return err - } - } - *out = i - } else { - out = nil - } - return nil -} - -func Convert_apiextensions_CustomResourceDefinitionSpec_To_v1_CustomResourceDefinitionSpec(in *apiextensions.CustomResourceDefinitionSpec, out *CustomResourceDefinitionSpec, s conversion.Scope) error { - if err := autoConvert_apiextensions_CustomResourceDefinitionSpec_To_v1_CustomResourceDefinitionSpec(in, out, s); err != nil { - return err - } - - if len(out.Versions) == 0 && len(in.Version) > 0 { - // no versions were specified, and a version name was specified - out.Versions = []CustomResourceDefinitionVersion{{Name: in.Version, Served: true, Storage: true}} - } - - // If spec.{subresources,validation,additionalPrinterColumns} exists, move to versions - if in.Subresources != nil { - subresources := &CustomResourceSubresources{} - if err := Convert_apiextensions_CustomResourceSubresources_To_v1_CustomResourceSubresources(in.Subresources, subresources, s); err != nil { - return err - } - for i := range out.Versions { - out.Versions[i].Subresources = subresources - } - } - if in.Validation != nil { - schema := &CustomResourceValidation{} - if err := Convert_apiextensions_CustomResourceValidation_To_v1_CustomResourceValidation(in.Validation, schema, s); err != nil { - return err - } - for i := range out.Versions { - out.Versions[i].Schema = schema - } - } - if in.AdditionalPrinterColumns != nil { - additionalPrinterColumns := make([]CustomResourceColumnDefinition, len(in.AdditionalPrinterColumns)) - for i := range in.AdditionalPrinterColumns { - if err := Convert_apiextensions_CustomResourceColumnDefinition_To_v1_CustomResourceColumnDefinition(&in.AdditionalPrinterColumns[i], &additionalPrinterColumns[i], s); err != nil { - return err - } - } - for i := range out.Versions { - out.Versions[i].AdditionalPrinterColumns = additionalPrinterColumns - } - } - return nil -} - -func Convert_v1_CustomResourceDefinitionSpec_To_apiextensions_CustomResourceDefinitionSpec(in *CustomResourceDefinitionSpec, out *apiextensions.CustomResourceDefinitionSpec, s conversion.Scope) error { - if err := autoConvert_v1_CustomResourceDefinitionSpec_To_apiextensions_CustomResourceDefinitionSpec(in, out, s); err != nil { - return err - } - - if len(out.Versions) == 0 { - return nil - } - - // Copy versions[0] to version - out.Version = out.Versions[0].Name - - // If versions[*].{subresources,schema,additionalPrinterColumns} are identical, move to spec - subresources := out.Versions[0].Subresources - subresourcesIdentical := true - validation := out.Versions[0].Schema - validationIdentical := true - additionalPrinterColumns := out.Versions[0].AdditionalPrinterColumns - additionalPrinterColumnsIdentical := true - - // Detect if per-version fields are identical - for _, v := range out.Versions { - if subresourcesIdentical && !apiequality.Semantic.DeepEqual(v.Subresources, subresources) { - subresourcesIdentical = false - } - if validationIdentical && !apiequality.Semantic.DeepEqual(v.Schema, validation) { - validationIdentical = false - } - if additionalPrinterColumnsIdentical && !apiequality.Semantic.DeepEqual(v.AdditionalPrinterColumns, additionalPrinterColumns) { - additionalPrinterColumnsIdentical = false - } - } - - // If they are, set the top-level fields and clear the per-version fields - if subresourcesIdentical { - out.Subresources = subresources - } - if validationIdentical { - out.Validation = validation - } - if additionalPrinterColumnsIdentical { - out.AdditionalPrinterColumns = additionalPrinterColumns - } - for i := range out.Versions { - if subresourcesIdentical { - out.Versions[i].Subresources = nil - } - if validationIdentical { - out.Versions[i].Schema = nil - } - if additionalPrinterColumnsIdentical { - out.Versions[i].AdditionalPrinterColumns = nil - } - } - - return nil -} - -func Convert_v1_CustomResourceConversion_To_apiextensions_CustomResourceConversion(in *CustomResourceConversion, out *apiextensions.CustomResourceConversion, s conversion.Scope) error { - if err := autoConvert_v1_CustomResourceConversion_To_apiextensions_CustomResourceConversion(in, out, s); err != nil { - return err - } - - out.WebhookClientConfig = nil - out.ConversionReviewVersions = nil - if in.Webhook != nil { - out.ConversionReviewVersions = in.Webhook.ConversionReviewVersions - if in.Webhook.ClientConfig != nil { - out.WebhookClientConfig = &apiextensions.WebhookClientConfig{} - if err := Convert_v1_WebhookClientConfig_To_apiextensions_WebhookClientConfig(in.Webhook.ClientConfig, out.WebhookClientConfig, s); err != nil { - return err - } - } - } - return nil -} - -func Convert_apiextensions_CustomResourceConversion_To_v1_CustomResourceConversion(in *apiextensions.CustomResourceConversion, out *CustomResourceConversion, s conversion.Scope) error { - if err := autoConvert_apiextensions_CustomResourceConversion_To_v1_CustomResourceConversion(in, out, s); err != nil { - return err - } - - out.Webhook = nil - if in.WebhookClientConfig != nil || in.ConversionReviewVersions != nil { - out.Webhook = &WebhookConversion{} - out.Webhook.ConversionReviewVersions = in.ConversionReviewVersions - if in.WebhookClientConfig != nil { - out.Webhook.ClientConfig = &WebhookClientConfig{} - if err := Convert_apiextensions_WebhookClientConfig_To_v1_WebhookClientConfig(in.WebhookClientConfig, out.Webhook.ClientConfig, s); err != nil { - return err - } - } - } - return nil -} - -func Convert_apiextensions_ValidationRules_To_v1_ValidationRules(in *apiextensions.ValidationRules, out *ValidationRules, s conversion.Scope) error { - *out = *(*ValidationRules)(unsafe.Pointer(in)) - return nil -} diff --git a/src/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/deepcopy.go b/src/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/deepcopy.go deleted file mode 100644 index 28dfb99f1..000000000 --- a/src/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/deepcopy.go +++ /dev/null @@ -1,260 +0,0 @@ -/* -Copyright 2019 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1 - -// TODO: Update this after a tag is created for interface fields in DeepCopy -func (in *JSONSchemaProps) DeepCopy() *JSONSchemaProps { - if in == nil { - return nil - } - out := new(JSONSchemaProps) - *out = *in - - if in.Ref != nil { - in, out := &in.Ref, &out.Ref - if *in == nil { - *out = nil - } else { - *out = new(string) - **out = **in - } - } - - if in.Maximum != nil { - in, out := &in.Maximum, &out.Maximum - if *in == nil { - *out = nil - } else { - *out = new(float64) - **out = **in - } - } - - if in.Minimum != nil { - in, out := &in.Minimum, &out.Minimum - if *in == nil { - *out = nil - } else { - *out = new(float64) - **out = **in - } - } - - if in.MaxLength != nil { - in, out := &in.MaxLength, &out.MaxLength - if *in == nil { - *out = nil - } else { - *out = new(int64) - **out = **in - } - } - - if in.MinLength != nil { - in, out := &in.MinLength, &out.MinLength - if *in == nil { - *out = nil - } else { - *out = new(int64) - **out = **in - } - } - if in.MaxItems != nil { - in, out := &in.MaxItems, &out.MaxItems - if *in == nil { - *out = nil - } else { - *out = new(int64) - **out = **in - } - } - - if in.MinItems != nil { - in, out := &in.MinItems, &out.MinItems - if *in == nil { - *out = nil - } else { - *out = new(int64) - **out = **in - } - } - - if in.MultipleOf != nil { - in, out := &in.MultipleOf, &out.MultipleOf - if *in == nil { - *out = nil - } else { - *out = new(float64) - **out = **in - } - } - - if in.MaxProperties != nil { - in, out := &in.MaxProperties, &out.MaxProperties - if *in == nil { - *out = nil - } else { - *out = new(int64) - **out = **in - } - } - - if in.MinProperties != nil { - in, out := &in.MinProperties, &out.MinProperties - if *in == nil { - *out = nil - } else { - *out = new(int64) - **out = **in - } - } - - if in.Required != nil { - in, out := &in.Required, &out.Required - *out = make([]string, len(*in)) - copy(*out, *in) - } - - if in.Items != nil { - in, out := &in.Items, &out.Items - if *in == nil { - *out = nil - } else { - *out = new(JSONSchemaPropsOrArray) - (*in).DeepCopyInto(*out) - } - } - - if in.AllOf != nil { - in, out := &in.AllOf, &out.AllOf - *out = make([]JSONSchemaProps, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - - if in.OneOf != nil { - in, out := &in.OneOf, &out.OneOf - *out = make([]JSONSchemaProps, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.AnyOf != nil { - in, out := &in.AnyOf, &out.AnyOf - *out = make([]JSONSchemaProps, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - - if in.Not != nil { - in, out := &in.Not, &out.Not - if *in == nil { - *out = nil - } else { - *out = new(JSONSchemaProps) - (*in).DeepCopyInto(*out) - } - } - - if in.Properties != nil { - in, out := &in.Properties, &out.Properties - *out = make(map[string]JSONSchemaProps, len(*in)) - for key, val := range *in { - (*out)[key] = *val.DeepCopy() - } - } - - if in.AdditionalProperties != nil { - in, out := &in.AdditionalProperties, &out.AdditionalProperties - if *in == nil { - *out = nil - } else { - *out = new(JSONSchemaPropsOrBool) - (*in).DeepCopyInto(*out) - } - } - - if in.PatternProperties != nil { - in, out := &in.PatternProperties, &out.PatternProperties - *out = make(map[string]JSONSchemaProps, len(*in)) - for key, val := range *in { - (*out)[key] = *val.DeepCopy() - } - } - - if in.Dependencies != nil { - in, out := &in.Dependencies, &out.Dependencies - *out = make(JSONSchemaDependencies, len(*in)) - for key, val := range *in { - (*out)[key] = *val.DeepCopy() - } - } - - if in.AdditionalItems != nil { - in, out := &in.AdditionalItems, &out.AdditionalItems - if *in == nil { - *out = nil - } else { - *out = new(JSONSchemaPropsOrBool) - (*in).DeepCopyInto(*out) - } - } - - if in.Definitions != nil { - in, out := &in.Definitions, &out.Definitions - *out = make(JSONSchemaDefinitions, len(*in)) - for key, val := range *in { - (*out)[key] = *val.DeepCopy() - } - } - - if in.ExternalDocs != nil { - in, out := &in.ExternalDocs, &out.ExternalDocs - if *in == nil { - *out = nil - } else { - *out = new(ExternalDocumentation) - (*in).DeepCopyInto(*out) - } - } - - if in.XPreserveUnknownFields != nil { - in, out := &in.XPreserveUnknownFields, &out.XPreserveUnknownFields - if *in == nil { - *out = nil - } else { - *out = new(bool) - **out = **in - } - } - - if in.XMapType != nil { - in, out := &in.XMapType, &out.XMapType - *out = new(string) - **out = **in - } - - if in.XValidations != nil { - in, out := &in.XValidations, &out.XValidations - *out = make([]ValidationRule, len(*in)) - copy(*out, *in) - } - - return out -} diff --git a/src/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/defaults.go b/src/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/defaults.go deleted file mode 100644 index 5cebec927..000000000 --- a/src/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/defaults.go +++ /dev/null @@ -1,61 +0,0 @@ -/* -Copyright 2019 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1 - -import ( - "strings" - - "k8s.io/apimachinery/pkg/runtime" - utilpointer "k8s.io/utils/pointer" -) - -func addDefaultingFuncs(scheme *runtime.Scheme) error { - return RegisterDefaults(scheme) -} - -func SetDefaults_CustomResourceDefinition(obj *CustomResourceDefinition) { - SetDefaults_CustomResourceDefinitionSpec(&obj.Spec) - if len(obj.Status.StoredVersions) == 0 { - for _, v := range obj.Spec.Versions { - if v.Storage { - obj.Status.StoredVersions = append(obj.Status.StoredVersions, v.Name) - break - } - } - } -} - -func SetDefaults_CustomResourceDefinitionSpec(obj *CustomResourceDefinitionSpec) { - if len(obj.Names.Singular) == 0 { - obj.Names.Singular = strings.ToLower(obj.Names.Kind) - } - if len(obj.Names.ListKind) == 0 && len(obj.Names.Kind) > 0 { - obj.Names.ListKind = obj.Names.Kind + "List" - } - if obj.Conversion == nil { - obj.Conversion = &CustomResourceConversion{ - Strategy: NoneConverter, - } - } -} - -// SetDefaults_ServiceReference sets defaults for Webhook's ServiceReference -func SetDefaults_ServiceReference(obj *ServiceReference) { - if obj.Port == nil { - obj.Port = utilpointer.Int32Ptr(443) - } -} diff --git a/src/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/doc.go b/src/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/doc.go deleted file mode 100644 index 09d4872f8..000000000 --- a/src/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/doc.go +++ /dev/null @@ -1,25 +0,0 @@ -/* -Copyright 2019 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// +k8s:deepcopy-gen=package -// +k8s:protobuf-gen=package -// +k8s:conversion-gen=k8s.io/apiextensions-apiserver/pkg/apis/apiextensions -// +k8s:defaulter-gen=TypeMeta -// +k8s:openapi-gen=true -// +groupName=apiextensions.k8s.io - -// Package v1 is the v1 version of the API. -package v1 // import "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" diff --git a/src/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/generated.pb.go b/src/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/generated.pb.go deleted file mode 100644 index 68eb08082..000000000 --- a/src/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/generated.pb.go +++ /dev/null @@ -1,9257 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: k8s.io/kubernetes/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/generated.proto - -package v1 - -import ( - encoding_binary "encoding/binary" - fmt "fmt" - - io "io" - - proto "github.com/gogo/protobuf/proto" - github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" - runtime "k8s.io/apimachinery/pkg/runtime" - - math "math" - math_bits "math/bits" - reflect "reflect" - strings "strings" - - k8s_io_apimachinery_pkg_types "k8s.io/apimachinery/pkg/types" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package - -func (m *ConversionRequest) Reset() { *m = ConversionRequest{} } -func (*ConversionRequest) ProtoMessage() {} -func (*ConversionRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_f5a35c9667703937, []int{0} -} -func (m *ConversionRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ConversionRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *ConversionRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_ConversionRequest.Merge(m, src) -} -func (m *ConversionRequest) XXX_Size() int { - return m.Size() -} -func (m *ConversionRequest) XXX_DiscardUnknown() { - xxx_messageInfo_ConversionRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_ConversionRequest proto.InternalMessageInfo - -func (m *ConversionResponse) Reset() { *m = ConversionResponse{} } -func (*ConversionResponse) ProtoMessage() {} -func (*ConversionResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_f5a35c9667703937, []int{1} -} -func (m *ConversionResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ConversionResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *ConversionResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_ConversionResponse.Merge(m, src) -} -func (m *ConversionResponse) XXX_Size() int { - return m.Size() -} -func (m *ConversionResponse) XXX_DiscardUnknown() { - xxx_messageInfo_ConversionResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_ConversionResponse proto.InternalMessageInfo - -func (m *ConversionReview) Reset() { *m = ConversionReview{} } -func (*ConversionReview) ProtoMessage() {} -func (*ConversionReview) Descriptor() ([]byte, []int) { - return fileDescriptor_f5a35c9667703937, []int{2} -} -func (m *ConversionReview) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ConversionReview) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *ConversionReview) XXX_Merge(src proto.Message) { - xxx_messageInfo_ConversionReview.Merge(m, src) -} -func (m *ConversionReview) XXX_Size() int { - return m.Size() -} -func (m *ConversionReview) XXX_DiscardUnknown() { - xxx_messageInfo_ConversionReview.DiscardUnknown(m) -} - -var xxx_messageInfo_ConversionReview proto.InternalMessageInfo - -func (m *CustomResourceColumnDefinition) Reset() { *m = CustomResourceColumnDefinition{} } -func (*CustomResourceColumnDefinition) ProtoMessage() {} -func (*CustomResourceColumnDefinition) Descriptor() ([]byte, []int) { - return fileDescriptor_f5a35c9667703937, []int{3} -} -func (m *CustomResourceColumnDefinition) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *CustomResourceColumnDefinition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *CustomResourceColumnDefinition) XXX_Merge(src proto.Message) { - xxx_messageInfo_CustomResourceColumnDefinition.Merge(m, src) -} -func (m *CustomResourceColumnDefinition) XXX_Size() int { - return m.Size() -} -func (m *CustomResourceColumnDefinition) XXX_DiscardUnknown() { - xxx_messageInfo_CustomResourceColumnDefinition.DiscardUnknown(m) -} - -var xxx_messageInfo_CustomResourceColumnDefinition proto.InternalMessageInfo - -func (m *CustomResourceConversion) Reset() { *m = CustomResourceConversion{} } -func (*CustomResourceConversion) ProtoMessage() {} -func (*CustomResourceConversion) Descriptor() ([]byte, []int) { - return fileDescriptor_f5a35c9667703937, []int{4} -} -func (m *CustomResourceConversion) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *CustomResourceConversion) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *CustomResourceConversion) XXX_Merge(src proto.Message) { - xxx_messageInfo_CustomResourceConversion.Merge(m, src) -} -func (m *CustomResourceConversion) XXX_Size() int { - return m.Size() -} -func (m *CustomResourceConversion) XXX_DiscardUnknown() { - xxx_messageInfo_CustomResourceConversion.DiscardUnknown(m) -} - -var xxx_messageInfo_CustomResourceConversion proto.InternalMessageInfo - -func (m *CustomResourceDefinition) Reset() { *m = CustomResourceDefinition{} } -func (*CustomResourceDefinition) ProtoMessage() {} -func (*CustomResourceDefinition) Descriptor() ([]byte, []int) { - return fileDescriptor_f5a35c9667703937, []int{5} -} -func (m *CustomResourceDefinition) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *CustomResourceDefinition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *CustomResourceDefinition) XXX_Merge(src proto.Message) { - xxx_messageInfo_CustomResourceDefinition.Merge(m, src) -} -func (m *CustomResourceDefinition) XXX_Size() int { - return m.Size() -} -func (m *CustomResourceDefinition) XXX_DiscardUnknown() { - xxx_messageInfo_CustomResourceDefinition.DiscardUnknown(m) -} - -var xxx_messageInfo_CustomResourceDefinition proto.InternalMessageInfo - -func (m *CustomResourceDefinitionCondition) Reset() { *m = CustomResourceDefinitionCondition{} } -func (*CustomResourceDefinitionCondition) ProtoMessage() {} -func (*CustomResourceDefinitionCondition) Descriptor() ([]byte, []int) { - return fileDescriptor_f5a35c9667703937, []int{6} -} -func (m *CustomResourceDefinitionCondition) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *CustomResourceDefinitionCondition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *CustomResourceDefinitionCondition) XXX_Merge(src proto.Message) { - xxx_messageInfo_CustomResourceDefinitionCondition.Merge(m, src) -} -func (m *CustomResourceDefinitionCondition) XXX_Size() int { - return m.Size() -} -func (m *CustomResourceDefinitionCondition) XXX_DiscardUnknown() { - xxx_messageInfo_CustomResourceDefinitionCondition.DiscardUnknown(m) -} - -var xxx_messageInfo_CustomResourceDefinitionCondition proto.InternalMessageInfo - -func (m *CustomResourceDefinitionList) Reset() { *m = CustomResourceDefinitionList{} } -func (*CustomResourceDefinitionList) ProtoMessage() {} -func (*CustomResourceDefinitionList) Descriptor() ([]byte, []int) { - return fileDescriptor_f5a35c9667703937, []int{7} -} -func (m *CustomResourceDefinitionList) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *CustomResourceDefinitionList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *CustomResourceDefinitionList) XXX_Merge(src proto.Message) { - xxx_messageInfo_CustomResourceDefinitionList.Merge(m, src) -} -func (m *CustomResourceDefinitionList) XXX_Size() int { - return m.Size() -} -func (m *CustomResourceDefinitionList) XXX_DiscardUnknown() { - xxx_messageInfo_CustomResourceDefinitionList.DiscardUnknown(m) -} - -var xxx_messageInfo_CustomResourceDefinitionList proto.InternalMessageInfo - -func (m *CustomResourceDefinitionNames) Reset() { *m = CustomResourceDefinitionNames{} } -func (*CustomResourceDefinitionNames) ProtoMessage() {} -func (*CustomResourceDefinitionNames) Descriptor() ([]byte, []int) { - return fileDescriptor_f5a35c9667703937, []int{8} -} -func (m *CustomResourceDefinitionNames) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *CustomResourceDefinitionNames) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *CustomResourceDefinitionNames) XXX_Merge(src proto.Message) { - xxx_messageInfo_CustomResourceDefinitionNames.Merge(m, src) -} -func (m *CustomResourceDefinitionNames) XXX_Size() int { - return m.Size() -} -func (m *CustomResourceDefinitionNames) XXX_DiscardUnknown() { - xxx_messageInfo_CustomResourceDefinitionNames.DiscardUnknown(m) -} - -var xxx_messageInfo_CustomResourceDefinitionNames proto.InternalMessageInfo - -func (m *CustomResourceDefinitionSpec) Reset() { *m = CustomResourceDefinitionSpec{} } -func (*CustomResourceDefinitionSpec) ProtoMessage() {} -func (*CustomResourceDefinitionSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_f5a35c9667703937, []int{9} -} -func (m *CustomResourceDefinitionSpec) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *CustomResourceDefinitionSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *CustomResourceDefinitionSpec) XXX_Merge(src proto.Message) { - xxx_messageInfo_CustomResourceDefinitionSpec.Merge(m, src) -} -func (m *CustomResourceDefinitionSpec) XXX_Size() int { - return m.Size() -} -func (m *CustomResourceDefinitionSpec) XXX_DiscardUnknown() { - xxx_messageInfo_CustomResourceDefinitionSpec.DiscardUnknown(m) -} - -var xxx_messageInfo_CustomResourceDefinitionSpec proto.InternalMessageInfo - -func (m *CustomResourceDefinitionStatus) Reset() { *m = CustomResourceDefinitionStatus{} } -func (*CustomResourceDefinitionStatus) ProtoMessage() {} -func (*CustomResourceDefinitionStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_f5a35c9667703937, []int{10} -} -func (m *CustomResourceDefinitionStatus) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *CustomResourceDefinitionStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *CustomResourceDefinitionStatus) XXX_Merge(src proto.Message) { - xxx_messageInfo_CustomResourceDefinitionStatus.Merge(m, src) -} -func (m *CustomResourceDefinitionStatus) XXX_Size() int { - return m.Size() -} -func (m *CustomResourceDefinitionStatus) XXX_DiscardUnknown() { - xxx_messageInfo_CustomResourceDefinitionStatus.DiscardUnknown(m) -} - -var xxx_messageInfo_CustomResourceDefinitionStatus proto.InternalMessageInfo - -func (m *CustomResourceDefinitionVersion) Reset() { *m = CustomResourceDefinitionVersion{} } -func (*CustomResourceDefinitionVersion) ProtoMessage() {} -func (*CustomResourceDefinitionVersion) Descriptor() ([]byte, []int) { - return fileDescriptor_f5a35c9667703937, []int{11} -} -func (m *CustomResourceDefinitionVersion) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *CustomResourceDefinitionVersion) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *CustomResourceDefinitionVersion) XXX_Merge(src proto.Message) { - xxx_messageInfo_CustomResourceDefinitionVersion.Merge(m, src) -} -func (m *CustomResourceDefinitionVersion) XXX_Size() int { - return m.Size() -} -func (m *CustomResourceDefinitionVersion) XXX_DiscardUnknown() { - xxx_messageInfo_CustomResourceDefinitionVersion.DiscardUnknown(m) -} - -var xxx_messageInfo_CustomResourceDefinitionVersion proto.InternalMessageInfo - -func (m *CustomResourceSubresourceScale) Reset() { *m = CustomResourceSubresourceScale{} } -func (*CustomResourceSubresourceScale) ProtoMessage() {} -func (*CustomResourceSubresourceScale) Descriptor() ([]byte, []int) { - return fileDescriptor_f5a35c9667703937, []int{12} -} -func (m *CustomResourceSubresourceScale) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *CustomResourceSubresourceScale) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *CustomResourceSubresourceScale) XXX_Merge(src proto.Message) { - xxx_messageInfo_CustomResourceSubresourceScale.Merge(m, src) -} -func (m *CustomResourceSubresourceScale) XXX_Size() int { - return m.Size() -} -func (m *CustomResourceSubresourceScale) XXX_DiscardUnknown() { - xxx_messageInfo_CustomResourceSubresourceScale.DiscardUnknown(m) -} - -var xxx_messageInfo_CustomResourceSubresourceScale proto.InternalMessageInfo - -func (m *CustomResourceSubresourceStatus) Reset() { *m = CustomResourceSubresourceStatus{} } -func (*CustomResourceSubresourceStatus) ProtoMessage() {} -func (*CustomResourceSubresourceStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_f5a35c9667703937, []int{13} -} -func (m *CustomResourceSubresourceStatus) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *CustomResourceSubresourceStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *CustomResourceSubresourceStatus) XXX_Merge(src proto.Message) { - xxx_messageInfo_CustomResourceSubresourceStatus.Merge(m, src) -} -func (m *CustomResourceSubresourceStatus) XXX_Size() int { - return m.Size() -} -func (m *CustomResourceSubresourceStatus) XXX_DiscardUnknown() { - xxx_messageInfo_CustomResourceSubresourceStatus.DiscardUnknown(m) -} - -var xxx_messageInfo_CustomResourceSubresourceStatus proto.InternalMessageInfo - -func (m *CustomResourceSubresources) Reset() { *m = CustomResourceSubresources{} } -func (*CustomResourceSubresources) ProtoMessage() {} -func (*CustomResourceSubresources) Descriptor() ([]byte, []int) { - return fileDescriptor_f5a35c9667703937, []int{14} -} -func (m *CustomResourceSubresources) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *CustomResourceSubresources) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *CustomResourceSubresources) XXX_Merge(src proto.Message) { - xxx_messageInfo_CustomResourceSubresources.Merge(m, src) -} -func (m *CustomResourceSubresources) XXX_Size() int { - return m.Size() -} -func (m *CustomResourceSubresources) XXX_DiscardUnknown() { - xxx_messageInfo_CustomResourceSubresources.DiscardUnknown(m) -} - -var xxx_messageInfo_CustomResourceSubresources proto.InternalMessageInfo - -func (m *CustomResourceValidation) Reset() { *m = CustomResourceValidation{} } -func (*CustomResourceValidation) ProtoMessage() {} -func (*CustomResourceValidation) Descriptor() ([]byte, []int) { - return fileDescriptor_f5a35c9667703937, []int{15} -} -func (m *CustomResourceValidation) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *CustomResourceValidation) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *CustomResourceValidation) XXX_Merge(src proto.Message) { - xxx_messageInfo_CustomResourceValidation.Merge(m, src) -} -func (m *CustomResourceValidation) XXX_Size() int { - return m.Size() -} -func (m *CustomResourceValidation) XXX_DiscardUnknown() { - xxx_messageInfo_CustomResourceValidation.DiscardUnknown(m) -} - -var xxx_messageInfo_CustomResourceValidation proto.InternalMessageInfo - -func (m *ExternalDocumentation) Reset() { *m = ExternalDocumentation{} } -func (*ExternalDocumentation) ProtoMessage() {} -func (*ExternalDocumentation) Descriptor() ([]byte, []int) { - return fileDescriptor_f5a35c9667703937, []int{16} -} -func (m *ExternalDocumentation) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ExternalDocumentation) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *ExternalDocumentation) XXX_Merge(src proto.Message) { - xxx_messageInfo_ExternalDocumentation.Merge(m, src) -} -func (m *ExternalDocumentation) XXX_Size() int { - return m.Size() -} -func (m *ExternalDocumentation) XXX_DiscardUnknown() { - xxx_messageInfo_ExternalDocumentation.DiscardUnknown(m) -} - -var xxx_messageInfo_ExternalDocumentation proto.InternalMessageInfo - -func (m *JSON) Reset() { *m = JSON{} } -func (*JSON) ProtoMessage() {} -func (*JSON) Descriptor() ([]byte, []int) { - return fileDescriptor_f5a35c9667703937, []int{17} -} -func (m *JSON) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *JSON) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *JSON) XXX_Merge(src proto.Message) { - xxx_messageInfo_JSON.Merge(m, src) -} -func (m *JSON) XXX_Size() int { - return m.Size() -} -func (m *JSON) XXX_DiscardUnknown() { - xxx_messageInfo_JSON.DiscardUnknown(m) -} - -var xxx_messageInfo_JSON proto.InternalMessageInfo - -func (m *JSONSchemaProps) Reset() { *m = JSONSchemaProps{} } -func (*JSONSchemaProps) ProtoMessage() {} -func (*JSONSchemaProps) Descriptor() ([]byte, []int) { - return fileDescriptor_f5a35c9667703937, []int{18} -} -func (m *JSONSchemaProps) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *JSONSchemaProps) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *JSONSchemaProps) XXX_Merge(src proto.Message) { - xxx_messageInfo_JSONSchemaProps.Merge(m, src) -} -func (m *JSONSchemaProps) XXX_Size() int { - return m.Size() -} -func (m *JSONSchemaProps) XXX_DiscardUnknown() { - xxx_messageInfo_JSONSchemaProps.DiscardUnknown(m) -} - -var xxx_messageInfo_JSONSchemaProps proto.InternalMessageInfo - -func (m *JSONSchemaPropsOrArray) Reset() { *m = JSONSchemaPropsOrArray{} } -func (*JSONSchemaPropsOrArray) ProtoMessage() {} -func (*JSONSchemaPropsOrArray) Descriptor() ([]byte, []int) { - return fileDescriptor_f5a35c9667703937, []int{19} -} -func (m *JSONSchemaPropsOrArray) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *JSONSchemaPropsOrArray) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *JSONSchemaPropsOrArray) XXX_Merge(src proto.Message) { - xxx_messageInfo_JSONSchemaPropsOrArray.Merge(m, src) -} -func (m *JSONSchemaPropsOrArray) XXX_Size() int { - return m.Size() -} -func (m *JSONSchemaPropsOrArray) XXX_DiscardUnknown() { - xxx_messageInfo_JSONSchemaPropsOrArray.DiscardUnknown(m) -} - -var xxx_messageInfo_JSONSchemaPropsOrArray proto.InternalMessageInfo - -func (m *JSONSchemaPropsOrBool) Reset() { *m = JSONSchemaPropsOrBool{} } -func (*JSONSchemaPropsOrBool) ProtoMessage() {} -func (*JSONSchemaPropsOrBool) Descriptor() ([]byte, []int) { - return fileDescriptor_f5a35c9667703937, []int{20} -} -func (m *JSONSchemaPropsOrBool) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *JSONSchemaPropsOrBool) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *JSONSchemaPropsOrBool) XXX_Merge(src proto.Message) { - xxx_messageInfo_JSONSchemaPropsOrBool.Merge(m, src) -} -func (m *JSONSchemaPropsOrBool) XXX_Size() int { - return m.Size() -} -func (m *JSONSchemaPropsOrBool) XXX_DiscardUnknown() { - xxx_messageInfo_JSONSchemaPropsOrBool.DiscardUnknown(m) -} - -var xxx_messageInfo_JSONSchemaPropsOrBool proto.InternalMessageInfo - -func (m *JSONSchemaPropsOrStringArray) Reset() { *m = JSONSchemaPropsOrStringArray{} } -func (*JSONSchemaPropsOrStringArray) ProtoMessage() {} -func (*JSONSchemaPropsOrStringArray) Descriptor() ([]byte, []int) { - return fileDescriptor_f5a35c9667703937, []int{21} -} -func (m *JSONSchemaPropsOrStringArray) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *JSONSchemaPropsOrStringArray) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *JSONSchemaPropsOrStringArray) XXX_Merge(src proto.Message) { - xxx_messageInfo_JSONSchemaPropsOrStringArray.Merge(m, src) -} -func (m *JSONSchemaPropsOrStringArray) XXX_Size() int { - return m.Size() -} -func (m *JSONSchemaPropsOrStringArray) XXX_DiscardUnknown() { - xxx_messageInfo_JSONSchemaPropsOrStringArray.DiscardUnknown(m) -} - -var xxx_messageInfo_JSONSchemaPropsOrStringArray proto.InternalMessageInfo - -func (m *ServiceReference) Reset() { *m = ServiceReference{} } -func (*ServiceReference) ProtoMessage() {} -func (*ServiceReference) Descriptor() ([]byte, []int) { - return fileDescriptor_f5a35c9667703937, []int{22} -} -func (m *ServiceReference) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ServiceReference) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *ServiceReference) XXX_Merge(src proto.Message) { - xxx_messageInfo_ServiceReference.Merge(m, src) -} -func (m *ServiceReference) XXX_Size() int { - return m.Size() -} -func (m *ServiceReference) XXX_DiscardUnknown() { - xxx_messageInfo_ServiceReference.DiscardUnknown(m) -} - -var xxx_messageInfo_ServiceReference proto.InternalMessageInfo - -func (m *ValidationRule) Reset() { *m = ValidationRule{} } -func (*ValidationRule) ProtoMessage() {} -func (*ValidationRule) Descriptor() ([]byte, []int) { - return fileDescriptor_f5a35c9667703937, []int{23} -} -func (m *ValidationRule) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ValidationRule) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *ValidationRule) XXX_Merge(src proto.Message) { - xxx_messageInfo_ValidationRule.Merge(m, src) -} -func (m *ValidationRule) XXX_Size() int { - return m.Size() -} -func (m *ValidationRule) XXX_DiscardUnknown() { - xxx_messageInfo_ValidationRule.DiscardUnknown(m) -} - -var xxx_messageInfo_ValidationRule proto.InternalMessageInfo - -func (m *WebhookClientConfig) Reset() { *m = WebhookClientConfig{} } -func (*WebhookClientConfig) ProtoMessage() {} -func (*WebhookClientConfig) Descriptor() ([]byte, []int) { - return fileDescriptor_f5a35c9667703937, []int{24} -} -func (m *WebhookClientConfig) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *WebhookClientConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *WebhookClientConfig) XXX_Merge(src proto.Message) { - xxx_messageInfo_WebhookClientConfig.Merge(m, src) -} -func (m *WebhookClientConfig) XXX_Size() int { - return m.Size() -} -func (m *WebhookClientConfig) XXX_DiscardUnknown() { - xxx_messageInfo_WebhookClientConfig.DiscardUnknown(m) -} - -var xxx_messageInfo_WebhookClientConfig proto.InternalMessageInfo - -func (m *WebhookConversion) Reset() { *m = WebhookConversion{} } -func (*WebhookConversion) ProtoMessage() {} -func (*WebhookConversion) Descriptor() ([]byte, []int) { - return fileDescriptor_f5a35c9667703937, []int{25} -} -func (m *WebhookConversion) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *WebhookConversion) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *WebhookConversion) XXX_Merge(src proto.Message) { - xxx_messageInfo_WebhookConversion.Merge(m, src) -} -func (m *WebhookConversion) XXX_Size() int { - return m.Size() -} -func (m *WebhookConversion) XXX_DiscardUnknown() { - xxx_messageInfo_WebhookConversion.DiscardUnknown(m) -} - -var xxx_messageInfo_WebhookConversion proto.InternalMessageInfo - -func init() { - proto.RegisterType((*ConversionRequest)(nil), "k8s.io.apiextensions_apiserver.pkg.apis.apiextensions.v1.ConversionRequest") - proto.RegisterType((*ConversionResponse)(nil), "k8s.io.apiextensions_apiserver.pkg.apis.apiextensions.v1.ConversionResponse") - proto.RegisterType((*ConversionReview)(nil), "k8s.io.apiextensions_apiserver.pkg.apis.apiextensions.v1.ConversionReview") - proto.RegisterType((*CustomResourceColumnDefinition)(nil), "k8s.io.apiextensions_apiserver.pkg.apis.apiextensions.v1.CustomResourceColumnDefinition") - proto.RegisterType((*CustomResourceConversion)(nil), "k8s.io.apiextensions_apiserver.pkg.apis.apiextensions.v1.CustomResourceConversion") - proto.RegisterType((*CustomResourceDefinition)(nil), "k8s.io.apiextensions_apiserver.pkg.apis.apiextensions.v1.CustomResourceDefinition") - proto.RegisterType((*CustomResourceDefinitionCondition)(nil), "k8s.io.apiextensions_apiserver.pkg.apis.apiextensions.v1.CustomResourceDefinitionCondition") - proto.RegisterType((*CustomResourceDefinitionList)(nil), "k8s.io.apiextensions_apiserver.pkg.apis.apiextensions.v1.CustomResourceDefinitionList") - proto.RegisterType((*CustomResourceDefinitionNames)(nil), "k8s.io.apiextensions_apiserver.pkg.apis.apiextensions.v1.CustomResourceDefinitionNames") - proto.RegisterType((*CustomResourceDefinitionSpec)(nil), "k8s.io.apiextensions_apiserver.pkg.apis.apiextensions.v1.CustomResourceDefinitionSpec") - proto.RegisterType((*CustomResourceDefinitionStatus)(nil), "k8s.io.apiextensions_apiserver.pkg.apis.apiextensions.v1.CustomResourceDefinitionStatus") - proto.RegisterType((*CustomResourceDefinitionVersion)(nil), "k8s.io.apiextensions_apiserver.pkg.apis.apiextensions.v1.CustomResourceDefinitionVersion") - proto.RegisterType((*CustomResourceSubresourceScale)(nil), "k8s.io.apiextensions_apiserver.pkg.apis.apiextensions.v1.CustomResourceSubresourceScale") - proto.RegisterType((*CustomResourceSubresourceStatus)(nil), "k8s.io.apiextensions_apiserver.pkg.apis.apiextensions.v1.CustomResourceSubresourceStatus") - proto.RegisterType((*CustomResourceSubresources)(nil), "k8s.io.apiextensions_apiserver.pkg.apis.apiextensions.v1.CustomResourceSubresources") - proto.RegisterType((*CustomResourceValidation)(nil), "k8s.io.apiextensions_apiserver.pkg.apis.apiextensions.v1.CustomResourceValidation") - proto.RegisterType((*ExternalDocumentation)(nil), "k8s.io.apiextensions_apiserver.pkg.apis.apiextensions.v1.ExternalDocumentation") - proto.RegisterType((*JSON)(nil), "k8s.io.apiextensions_apiserver.pkg.apis.apiextensions.v1.JSON") - proto.RegisterType((*JSONSchemaProps)(nil), "k8s.io.apiextensions_apiserver.pkg.apis.apiextensions.v1.JSONSchemaProps") - proto.RegisterMapType((JSONSchemaDefinitions)(nil), "k8s.io.apiextensions_apiserver.pkg.apis.apiextensions.v1.JSONSchemaProps.DefinitionsEntry") - proto.RegisterMapType((JSONSchemaDependencies)(nil), "k8s.io.apiextensions_apiserver.pkg.apis.apiextensions.v1.JSONSchemaProps.DependenciesEntry") - proto.RegisterMapType((map[string]JSONSchemaProps)(nil), "k8s.io.apiextensions_apiserver.pkg.apis.apiextensions.v1.JSONSchemaProps.PatternPropertiesEntry") - proto.RegisterMapType((map[string]JSONSchemaProps)(nil), "k8s.io.apiextensions_apiserver.pkg.apis.apiextensions.v1.JSONSchemaProps.PropertiesEntry") - proto.RegisterType((*JSONSchemaPropsOrArray)(nil), "k8s.io.apiextensions_apiserver.pkg.apis.apiextensions.v1.JSONSchemaPropsOrArray") - proto.RegisterType((*JSONSchemaPropsOrBool)(nil), "k8s.io.apiextensions_apiserver.pkg.apis.apiextensions.v1.JSONSchemaPropsOrBool") - proto.RegisterType((*JSONSchemaPropsOrStringArray)(nil), "k8s.io.apiextensions_apiserver.pkg.apis.apiextensions.v1.JSONSchemaPropsOrStringArray") - proto.RegisterType((*ServiceReference)(nil), "k8s.io.apiextensions_apiserver.pkg.apis.apiextensions.v1.ServiceReference") - proto.RegisterType((*ValidationRule)(nil), "k8s.io.apiextensions_apiserver.pkg.apis.apiextensions.v1.ValidationRule") - proto.RegisterType((*WebhookClientConfig)(nil), "k8s.io.apiextensions_apiserver.pkg.apis.apiextensions.v1.WebhookClientConfig") - proto.RegisterType((*WebhookConversion)(nil), "k8s.io.apiextensions_apiserver.pkg.apis.apiextensions.v1.WebhookConversion") -} - -func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/generated.proto", fileDescriptor_f5a35c9667703937) -} - -var fileDescriptor_f5a35c9667703937 = []byte{ - // 3047 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xc4, 0x5a, 0xdf, 0x6f, 0x24, 0x47, - 0xf1, 0xbf, 0x59, 0xff, 0x5a, 0xb7, 0xed, 0xb3, 0xdd, 0x77, 0xf6, 0x77, 0xce, 0xb9, 0xf3, 0xfa, - 0x36, 0xdf, 0x1c, 0x4e, 0x72, 0x59, 0x27, 0x26, 0x21, 0x47, 0x84, 0x40, 0x5e, 0xdb, 0x97, 0x38, - 0x67, 0x9f, 0xad, 0xde, 0xbb, 0x8b, 0x93, 0x00, 0xc9, 0x78, 0xa7, 0xbd, 0x9e, 0x78, 0x7e, 0x5d, - 0xf7, 0xcc, 0xda, 0x96, 0x40, 0x8a, 0x40, 0x11, 0x10, 0x09, 0xc2, 0x03, 0x0a, 0x4f, 0x08, 0x21, - 0x94, 0x07, 0x78, 0x80, 0x37, 0xf8, 0x17, 0xf2, 0x82, 0x94, 0x27, 0x14, 0x09, 0x69, 0x45, 0x96, - 0x7f, 0x00, 0x09, 0x10, 0xc2, 0x0f, 0x08, 0xf5, 0x8f, 0xe9, 0xe9, 0x9d, 0xdd, 0xbd, 0x3b, 0xd9, - 0xeb, 0xe4, 0xcd, 0xae, 0xaa, 0xae, 0x4f, 0x75, 0x75, 0x75, 0x55, 0x75, 0xcd, 0x02, 0x6b, 0xff, - 0x06, 0x2d, 0x39, 0xc1, 0xc2, 0x7e, 0xbc, 0x83, 0x89, 0x8f, 0x23, 0x4c, 0x17, 0xea, 0xd8, 0xb7, - 0x03, 0xb2, 0x20, 0x19, 0x56, 0xe8, 0xe0, 0xc3, 0x08, 0xfb, 0xd4, 0x09, 0x7c, 0xfa, 0x8c, 0x15, - 0x3a, 0x14, 0x93, 0x3a, 0x26, 0x0b, 0xe1, 0x7e, 0x8d, 0xf1, 0x68, 0xab, 0xc0, 0x42, 0xfd, 0xb9, - 0x85, 0x1a, 0xf6, 0x31, 0xb1, 0x22, 0x6c, 0x97, 0x42, 0x12, 0x44, 0x01, 0xbc, 0x21, 0x34, 0x95, - 0x5a, 0x04, 0xdf, 0x52, 0x9a, 0x4a, 0xe1, 0x7e, 0x8d, 0xf1, 0x68, 0xab, 0x40, 0xa9, 0xfe, 0xdc, - 0xcc, 0x33, 0x35, 0x27, 0xda, 0x8b, 0x77, 0x4a, 0xd5, 0xc0, 0x5b, 0xa8, 0x05, 0xb5, 0x60, 0x81, - 0x2b, 0xdc, 0x89, 0x77, 0xf9, 0x7f, 0xfc, 0x1f, 0xfe, 0x97, 0x00, 0x9a, 0x79, 0x3e, 0x35, 0xd9, - 0xb3, 0xaa, 0x7b, 0x8e, 0x8f, 0xc9, 0x51, 0x6a, 0xa7, 0x87, 0x23, 0xab, 0x83, 0x79, 0x33, 0x0b, - 0xdd, 0x56, 0x91, 0xd8, 0x8f, 0x1c, 0x0f, 0xb7, 0x2d, 0xf8, 0xca, 0xc3, 0x16, 0xd0, 0xea, 0x1e, - 0xf6, 0xac, 0xec, 0xba, 0xe2, 0xb1, 0x01, 0x26, 0x97, 0x03, 0xbf, 0x8e, 0x09, 0xdb, 0x20, 0xc2, - 0xf7, 0x63, 0x4c, 0x23, 0x58, 0x06, 0x7d, 0xb1, 0x63, 0x9b, 0xc6, 0x9c, 0x31, 0x3f, 0x5c, 0x7e, - 0xf6, 0xe3, 0x46, 0xe1, 0x5c, 0xb3, 0x51, 0xe8, 0xbb, 0xbb, 0xb6, 0x72, 0xdc, 0x28, 0x5c, 0xed, - 0x86, 0x14, 0x1d, 0x85, 0x98, 0x96, 0xee, 0xae, 0xad, 0x20, 0xb6, 0x18, 0xbe, 0x0c, 0x26, 0x6d, - 0x4c, 0x1d, 0x82, 0xed, 0xa5, 0xad, 0xb5, 0x7b, 0x42, 0xbf, 0x99, 0xe3, 0x1a, 0x2f, 0x49, 0x8d, - 0x93, 0x2b, 0x59, 0x01, 0xd4, 0xbe, 0x06, 0x6e, 0x83, 0xa1, 0x60, 0xe7, 0x1d, 0x5c, 0x8d, 0xa8, - 0xd9, 0x37, 0xd7, 0x37, 0x3f, 0xb2, 0xf8, 0x4c, 0x29, 0x3d, 0x3c, 0x65, 0x02, 0x3f, 0x31, 0xb9, - 0xd9, 0x12, 0xb2, 0x0e, 0x56, 0x93, 0x43, 0x2b, 0x8f, 0x4b, 0xb4, 0xa1, 0x4d, 0xa1, 0x05, 0x25, - 0xea, 0x8a, 0xbf, 0xce, 0x01, 0xa8, 0x6f, 0x9e, 0x86, 0x81, 0x4f, 0x71, 0x4f, 0x76, 0x4f, 0xc1, - 0x44, 0x95, 0x6b, 0x8e, 0xb0, 0x2d, 0x71, 0xcd, 0xdc, 0x49, 0xac, 0x37, 0x25, 0xfe, 0xc4, 0x72, - 0x46, 0x1d, 0x6a, 0x03, 0x80, 0x77, 0xc0, 0x20, 0xc1, 0x34, 0x76, 0x23, 0xb3, 0x6f, 0xce, 0x98, - 0x1f, 0x59, 0xbc, 0xde, 0x15, 0x8a, 0x87, 0x36, 0x0b, 0xbe, 0x52, 0xfd, 0xb9, 0x52, 0x25, 0xb2, - 0xa2, 0x98, 0x96, 0xcf, 0x4b, 0xa4, 0x41, 0xc4, 0x75, 0x20, 0xa9, 0xab, 0xf8, 0x5f, 0x03, 0x4c, - 0xe8, 0x5e, 0xaa, 0x3b, 0xf8, 0x00, 0x12, 0x30, 0x44, 0x44, 0xb0, 0x70, 0x3f, 0x8d, 0x2c, 0xde, - 0x2a, 0x9d, 0xf4, 0x46, 0x95, 0xda, 0xe2, 0xaf, 0x3c, 0xc2, 0x8e, 0x4b, 0xfe, 0x83, 0x12, 0x20, - 0x58, 0x07, 0x79, 0x22, 0xcf, 0x88, 0x07, 0xd2, 0xc8, 0xe2, 0x7a, 0x6f, 0x40, 0x85, 0xce, 0xf2, - 0x68, 0xb3, 0x51, 0xc8, 0x27, 0xff, 0x21, 0x85, 0x55, 0xfc, 0x65, 0x0e, 0xcc, 0x2e, 0xc7, 0x34, - 0x0a, 0x3c, 0x84, 0x69, 0x10, 0x93, 0x2a, 0x5e, 0x0e, 0xdc, 0xd8, 0xf3, 0x57, 0xf0, 0xae, 0xe3, - 0x3b, 0x11, 0x8b, 0xd1, 0x39, 0xd0, 0xef, 0x5b, 0x1e, 0x96, 0x31, 0x33, 0x2a, 0x3d, 0xd9, 0x7f, - 0xdb, 0xf2, 0x30, 0xe2, 0x1c, 0x26, 0xc1, 0x42, 0x44, 0xde, 0x00, 0x25, 0x71, 0xe7, 0x28, 0xc4, - 0x88, 0x73, 0xe0, 0x35, 0x30, 0xb8, 0x1b, 0x10, 0xcf, 0x12, 0xa7, 0x37, 0x9c, 0x9e, 0xc7, 0x4d, - 0x4e, 0x45, 0x92, 0x0b, 0x5f, 0x00, 0x23, 0x36, 0xa6, 0x55, 0xe2, 0x84, 0x0c, 0xda, 0xec, 0xe7, - 0xc2, 0x17, 0xa4, 0xf0, 0xc8, 0x4a, 0xca, 0x42, 0xba, 0x1c, 0xbc, 0x0e, 0xf2, 0x21, 0x71, 0x02, - 0xe2, 0x44, 0x47, 0xe6, 0xc0, 0x9c, 0x31, 0x3f, 0x50, 0x9e, 0x90, 0x6b, 0xf2, 0x5b, 0x92, 0x8e, - 0x94, 0x04, 0x93, 0x7e, 0x87, 0x06, 0xfe, 0x96, 0x15, 0xed, 0x99, 0x83, 0x1c, 0x41, 0x49, 0xbf, - 0x5a, 0xd9, 0xbc, 0xcd, 0xe8, 0x48, 0x49, 0x14, 0xff, 0x6c, 0x00, 0x33, 0xeb, 0xa1, 0xc4, 0xbd, - 0xf0, 0x26, 0xc8, 0xd3, 0x88, 0xe5, 0x9c, 0xda, 0x91, 0xf4, 0xcf, 0x53, 0x89, 0xaa, 0x8a, 0xa4, - 0x1f, 0x37, 0x0a, 0xd3, 0xe9, 0x8a, 0x84, 0xca, 0x7d, 0xa3, 0xd6, 0xb2, 0x90, 0x3b, 0xc0, 0x3b, - 0x7b, 0x41, 0xb0, 0x2f, 0x4f, 0xff, 0x14, 0x21, 0xf7, 0x9a, 0x50, 0x94, 0x62, 0x8a, 0x90, 0x93, - 0x64, 0x94, 0x00, 0x15, 0xff, 0x93, 0xcb, 0x6e, 0x4c, 0x3b, 0xf4, 0xb7, 0x41, 0x9e, 0x5d, 0x21, - 0xdb, 0x8a, 0x2c, 0x79, 0x09, 0x9e, 0x7d, 0xb4, 0x0b, 0x27, 0xee, 0xeb, 0x06, 0x8e, 0xac, 0x32, - 0x94, 0xae, 0x00, 0x29, 0x0d, 0x29, 0xad, 0xf0, 0x10, 0xf4, 0xd3, 0x10, 0x57, 0xe5, 0x7e, 0xef, - 0x9d, 0x22, 0xda, 0xbb, 0xec, 0xa1, 0x12, 0xe2, 0x6a, 0x1a, 0x8c, 0xec, 0x3f, 0xc4, 0x11, 0xe1, - 0xbb, 0x06, 0x18, 0xa4, 0x3c, 0x2f, 0xc8, 0x5c, 0xb2, 0x7d, 0x06, 0xe0, 0x99, 0xbc, 0x23, 0xfe, - 0x47, 0x12, 0xb7, 0xf8, 0xcf, 0x1c, 0xb8, 0xda, 0x6d, 0xe9, 0x72, 0xe0, 0xdb, 0xe2, 0x10, 0xd6, - 0xe4, 0xbd, 0x12, 0x91, 0xf5, 0x82, 0x7e, 0xaf, 0x8e, 0x1b, 0x85, 0x27, 0x1e, 0xaa, 0x40, 0xbb, - 0x80, 0x5f, 0x55, 0x5b, 0x16, 0x97, 0xf4, 0x6a, 0xab, 0x61, 0xc7, 0x8d, 0xc2, 0xb8, 0x5a, 0xd6, - 0x6a, 0x2b, 0xac, 0x03, 0xe8, 0x5a, 0x34, 0xba, 0x43, 0x2c, 0x9f, 0x0a, 0xb5, 0x8e, 0x87, 0xa5, - 0xe7, 0x9e, 0x7a, 0xb4, 0xa0, 0x60, 0x2b, 0xca, 0x33, 0x12, 0x12, 0xae, 0xb7, 0x69, 0x43, 0x1d, - 0x10, 0x58, 0xce, 0x20, 0xd8, 0xa2, 0x2a, 0x0d, 0x68, 0x39, 0x9c, 0x51, 0x91, 0xe4, 0xc2, 0x27, - 0xc1, 0x90, 0x87, 0x29, 0xb5, 0x6a, 0x98, 0xdf, 0xfd, 0xe1, 0xb4, 0x28, 0x6e, 0x08, 0x32, 0x4a, - 0xf8, 0xc5, 0x7f, 0x19, 0xe0, 0x72, 0x37, 0xaf, 0xad, 0x3b, 0x34, 0x82, 0xdf, 0x6c, 0x0b, 0xfb, - 0xd2, 0xa3, 0xed, 0x90, 0xad, 0xe6, 0x41, 0xaf, 0x52, 0x49, 0x42, 0xd1, 0x42, 0xfe, 0x00, 0x0c, - 0x38, 0x11, 0xf6, 0x92, 0x6a, 0x89, 0x7a, 0x1f, 0x76, 0xe5, 0x31, 0x09, 0x3f, 0xb0, 0xc6, 0x80, - 0x90, 0xc0, 0x2b, 0x7e, 0x94, 0x03, 0x57, 0xba, 0x2d, 0x61, 0x79, 0x9c, 0x32, 0x67, 0x87, 0x6e, - 0x4c, 0x2c, 0x57, 0x06, 0x9b, 0x72, 0xf6, 0x16, 0xa7, 0x22, 0xc9, 0x65, 0xb9, 0x93, 0x3a, 0x7e, - 0x2d, 0x76, 0x2d, 0x22, 0x23, 0x49, 0x6d, 0xb8, 0x22, 0xe9, 0x48, 0x49, 0xc0, 0x12, 0x00, 0x74, - 0x2f, 0x20, 0x11, 0xc7, 0xe0, 0x1d, 0xce, 0x70, 0xf9, 0x3c, 0xcb, 0x08, 0x15, 0x45, 0x45, 0x9a, - 0x04, 0x2b, 0x24, 0xfb, 0x8e, 0x6f, 0xcb, 0x03, 0x57, 0x77, 0xf7, 0x96, 0xe3, 0xdb, 0x88, 0x73, - 0x18, 0xbe, 0xeb, 0xd0, 0x88, 0x51, 0xe4, 0x69, 0xb7, 0x38, 0x9c, 0x4b, 0x2a, 0x09, 0x86, 0x5f, - 0x65, 0x09, 0x36, 0x20, 0x0e, 0xa6, 0xe6, 0x60, 0x8a, 0xbf, 0xac, 0xa8, 0x48, 0x93, 0x28, 0xfe, - 0xa5, 0xbf, 0x7b, 0x7c, 0xb0, 0x04, 0x02, 0x1f, 0x07, 0x03, 0x35, 0x12, 0xc4, 0xa1, 0xf4, 0x92, - 0xf2, 0xf6, 0xcb, 0x8c, 0x88, 0x04, 0x0f, 0x7e, 0x07, 0x0c, 0xf8, 0x72, 0xc3, 0x2c, 0x82, 0x5e, - 0xeb, 0xfd, 0x31, 0x73, 0x6f, 0xa5, 0xe8, 0xc2, 0x91, 0x02, 0x14, 0x3e, 0x0f, 0x06, 0x68, 0x35, - 0x08, 0xb1, 0x74, 0xe2, 0x6c, 0x22, 0x54, 0x61, 0xc4, 0xe3, 0x46, 0x61, 0x2c, 0x51, 0xc7, 0x09, - 0x48, 0x08, 0xc3, 0x1f, 0x18, 0x20, 0x2f, 0xcb, 0x05, 0x35, 0x87, 0x78, 0x78, 0xbe, 0xde, 0x7b, - 0xbb, 0x65, 0xdb, 0x9b, 0x9e, 0x99, 0x24, 0x50, 0xa4, 0xc0, 0xe1, 0xf7, 0x0c, 0x00, 0xaa, 0xaa, - 0x76, 0x99, 0xc3, 0xdc, 0x87, 0x3d, 0xbb, 0x2a, 0x5a, 0x55, 0x14, 0x81, 0x90, 0xb6, 0x4a, 0x1a, - 0x2a, 0xac, 0x80, 0xa9, 0x90, 0x60, 0xae, 0xfb, 0xae, 0xbf, 0xef, 0x07, 0x07, 0xfe, 0x4d, 0x07, - 0xbb, 0x36, 0x35, 0xc1, 0x9c, 0x31, 0x9f, 0x2f, 0x5f, 0x91, 0xf6, 0x4f, 0x6d, 0x75, 0x12, 0x42, - 0x9d, 0xd7, 0x16, 0xdf, 0xeb, 0xcb, 0xf6, 0x5a, 0xd9, 0x7a, 0x01, 0x3f, 0x10, 0x9b, 0x17, 0x79, - 0x98, 0x9a, 0x06, 0x3f, 0x88, 0x37, 0x7b, 0x7f, 0x10, 0x2a, 0xd7, 0xa7, 0x45, 0x5a, 0x91, 0x28, - 0xd2, 0x4c, 0x80, 0x3f, 0x33, 0xc0, 0x98, 0x55, 0xad, 0xe2, 0x30, 0xc2, 0xb6, 0xb8, 0xc6, 0xb9, - 0xb3, 0x8d, 0xea, 0x29, 0x69, 0xd0, 0xd8, 0x92, 0x8e, 0x8a, 0x5a, 0x8d, 0x80, 0x2f, 0x81, 0xf3, - 0x34, 0x0a, 0x08, 0xb6, 0x93, 0x08, 0x92, 0xd9, 0x05, 0x36, 0x1b, 0x85, 0xf3, 0x95, 0x16, 0x0e, - 0xca, 0x48, 0x16, 0x3f, 0x19, 0x00, 0x85, 0x87, 0x44, 0xe8, 0x23, 0x34, 0xbd, 0xd7, 0xc0, 0x20, - 0xdf, 0xa9, 0xcd, 0x1d, 0x92, 0xd7, 0x4a, 0x3d, 0xa7, 0x22, 0xc9, 0x65, 0xe5, 0x89, 0xe1, 0xb3, - 0xf2, 0xd4, 0xc7, 0x05, 0x55, 0x79, 0xaa, 0x08, 0x32, 0x4a, 0xf8, 0x70, 0x11, 0x00, 0x1b, 0x87, - 0x04, 0xb3, 0x8c, 0x64, 0x9b, 0x43, 0x5c, 0x5a, 0x9d, 0xcf, 0x8a, 0xe2, 0x20, 0x4d, 0x0a, 0xde, - 0x04, 0x30, 0xf9, 0xcf, 0x09, 0xfc, 0xd7, 0x2c, 0xe2, 0x3b, 0x7e, 0xcd, 0xcc, 0x73, 0xb3, 0xa7, - 0x59, 0xb5, 0x5d, 0x69, 0xe3, 0xa2, 0x0e, 0x2b, 0x60, 0x1d, 0x0c, 0x8a, 0x67, 0x34, 0xcf, 0x1b, - 0x3d, 0xbc, 0x71, 0xf7, 0x2c, 0xd7, 0xb1, 0x39, 0x54, 0x19, 0x70, 0xf7, 0x70, 0x14, 0x24, 0xd1, - 0xe0, 0xfb, 0x06, 0x18, 0xa5, 0xf1, 0x0e, 0x91, 0xd2, 0x94, 0x67, 0xf5, 0x91, 0xc5, 0x3b, 0xbd, - 0x82, 0xaf, 0x68, 0xba, 0xcb, 0x13, 0xcd, 0x46, 0x61, 0x54, 0xa7, 0xa0, 0x16, 0x6c, 0xf8, 0x07, - 0x03, 0x98, 0x96, 0x2d, 0x42, 0xdf, 0x72, 0xb7, 0x88, 0xe3, 0x47, 0x98, 0x88, 0x07, 0x91, 0x28, - 0x1f, 0x3d, 0xec, 0x15, 0xb3, 0xef, 0xac, 0xf2, 0x9c, 0x3c, 0x69, 0x73, 0xa9, 0x8b, 0x05, 0xa8, - 0xab, 0x6d, 0xc5, 0x7f, 0x1b, 0xd9, 0xd4, 0xa2, 0xed, 0xb2, 0x52, 0xb5, 0x5c, 0x0c, 0x57, 0xc0, - 0x04, 0xeb, 0x7e, 0x11, 0x0e, 0x5d, 0xa7, 0x6a, 0x51, 0xfe, 0xfa, 0x11, 0xd1, 0xad, 0x9e, 0xe1, - 0x95, 0x0c, 0x1f, 0xb5, 0xad, 0x80, 0xaf, 0x02, 0x28, 0xda, 0xc2, 0x16, 0x3d, 0xa2, 0x13, 0x50, - 0x0d, 0x5e, 0xa5, 0x4d, 0x02, 0x75, 0x58, 0x05, 0x97, 0xc1, 0xa4, 0x6b, 0xed, 0x60, 0xb7, 0x82, - 0x5d, 0x5c, 0x8d, 0x02, 0xc2, 0x55, 0x89, 0xf7, 0xe1, 0x54, 0xb3, 0x51, 0x98, 0x5c, 0xcf, 0x32, - 0x51, 0xbb, 0x7c, 0xf1, 0x6a, 0xf6, 0x2e, 0xeb, 0x1b, 0x17, 0xcd, 0xf6, 0x87, 0x39, 0x30, 0xd3, - 0x3d, 0x28, 0xe0, 0x77, 0x55, 0x6b, 0x2c, 0x3a, 0xbe, 0xd7, 0xcf, 0x20, 0xf4, 0xe4, 0x73, 0x00, - 0xb4, 0x3f, 0x05, 0xe0, 0x11, 0xab, 0xd7, 0x96, 0x9b, 0x3c, 0xfb, 0xb7, 0xcf, 0x02, 0x9d, 0xe9, - 0x2f, 0x0f, 0x8b, 0x2e, 0xc0, 0x72, 0x79, 0xd1, 0xb7, 0x5c, 0x5c, 0xfc, 0xa8, 0xed, 0x69, 0x9b, - 0x5e, 0x56, 0xf8, 0x43, 0x03, 0x8c, 0x07, 0x21, 0xf6, 0x97, 0xb6, 0xd6, 0xee, 0x7d, 0x59, 0x5c, - 0x5a, 0xe9, 0xa0, 0xb5, 0x93, 0x9b, 0xc8, 0xde, 0xd7, 0x42, 0xd7, 0x16, 0x09, 0x42, 0x5a, 0xbe, - 0xd0, 0x6c, 0x14, 0xc6, 0x37, 0x5b, 0x51, 0x50, 0x16, 0xb6, 0xe8, 0x81, 0xa9, 0xd5, 0xc3, 0x08, - 0x13, 0xdf, 0x72, 0x57, 0x82, 0x6a, 0xec, 0x61, 0x3f, 0x12, 0x36, 0x66, 0xc6, 0x05, 0xc6, 0x23, - 0x8e, 0x0b, 0xae, 0x80, 0xbe, 0x98, 0xb8, 0x32, 0x6a, 0x47, 0xd4, 0x10, 0x0c, 0xad, 0x23, 0x46, - 0x2f, 0x5e, 0x05, 0xfd, 0xcc, 0x4e, 0x78, 0x09, 0xf4, 0x11, 0xeb, 0x80, 0x6b, 0x1d, 0x2d, 0x0f, - 0x31, 0x11, 0x64, 0x1d, 0x20, 0x46, 0x2b, 0xfe, 0x7d, 0x0e, 0x8c, 0x67, 0xf6, 0x02, 0x67, 0x40, - 0x4e, 0x4d, 0xd6, 0x80, 0x54, 0x9a, 0x5b, 0x5b, 0x41, 0x39, 0xc7, 0x86, 0x2f, 0xaa, 0xec, 0x2a, - 0x40, 0x0b, 0xaa, 0x58, 0x70, 0x2a, 0x6b, 0xcb, 0x52, 0x75, 0xcc, 0x90, 0x24, 0x3d, 0x32, 0x1b, - 0xf0, 0xae, 0xbc, 0x15, 0xc2, 0x06, 0xbc, 0x8b, 0x18, 0xed, 0xa4, 0xb3, 0x92, 0x64, 0x58, 0x33, - 0xf0, 0x08, 0xc3, 0x9a, 0xc1, 0x07, 0x0e, 0x6b, 0x1e, 0x07, 0x03, 0x91, 0x13, 0xb9, 0x98, 0x57, - 0x2a, 0xad, 0x19, 0xbe, 0xc3, 0x88, 0x48, 0xf0, 0x20, 0x06, 0x43, 0x36, 0xde, 0xb5, 0x62, 0x37, - 0xe2, 0x45, 0x69, 0x64, 0xf1, 0xeb, 0xa7, 0x8b, 0x1e, 0x31, 0xcc, 0x58, 0x11, 0x2a, 0x51, 0xa2, - 0x1b, 0x3e, 0x01, 0x86, 0x3c, 0xeb, 0xd0, 0xf1, 0x62, 0x8f, 0x77, 0x8c, 0x86, 0x10, 0xdb, 0x10, - 0x24, 0x94, 0xf0, 0x58, 0x12, 0xc4, 0x87, 0x55, 0x37, 0xa6, 0x4e, 0x1d, 0x4b, 0xa6, 0x6c, 0xe9, - 0x54, 0x12, 0x5c, 0xcd, 0xf0, 0x51, 0xdb, 0x0a, 0x0e, 0xe6, 0xf8, 0x7c, 0xf1, 0x88, 0x06, 0x26, - 0x48, 0x28, 0xe1, 0xb5, 0x82, 0x49, 0xf9, 0xd1, 0x6e, 0x60, 0x72, 0x71, 0xdb, 0x0a, 0xf8, 0x34, - 0x18, 0xf6, 0xac, 0xc3, 0x75, 0xec, 0xd7, 0xa2, 0x3d, 0x73, 0x6c, 0xce, 0x98, 0xef, 0x2b, 0x8f, - 0x35, 0x1b, 0x85, 0xe1, 0x8d, 0x84, 0x88, 0x52, 0x3e, 0x17, 0x76, 0x7c, 0x29, 0x7c, 0x5e, 0x13, - 0x4e, 0x88, 0x28, 0xe5, 0xb3, 0xce, 0x24, 0xb4, 0x22, 0x76, 0xaf, 0xcc, 0xf1, 0xd6, 0x87, 0xf3, - 0x96, 0x20, 0xa3, 0x84, 0x0f, 0xe7, 0x41, 0xde, 0xb3, 0x0e, 0xf9, 0x9b, 0xd2, 0x9c, 0xe0, 0x6a, - 0xf9, 0x40, 0x71, 0x43, 0xd2, 0x90, 0xe2, 0x72, 0x49, 0xc7, 0x17, 0x92, 0x93, 0x9a, 0xa4, 0xa4, - 0x21, 0xc5, 0x65, 0xf1, 0x1b, 0xfb, 0xce, 0xfd, 0x18, 0x0b, 0x61, 0xc8, 0x3d, 0xa3, 0xe2, 0xf7, - 0x6e, 0xca, 0x42, 0xba, 0x1c, 0x7b, 0xd3, 0x79, 0xb1, 0x1b, 0x39, 0xa1, 0x8b, 0x37, 0x77, 0xcd, - 0x0b, 0xdc, 0xff, 0xbc, 0x95, 0xdf, 0x50, 0x54, 0xa4, 0x49, 0xc0, 0xb7, 0x41, 0x3f, 0xf6, 0x63, - 0xcf, 0xbc, 0xc8, 0xcb, 0xf7, 0x69, 0xa3, 0x4f, 0xdd, 0x97, 0x55, 0x3f, 0xf6, 0x10, 0xd7, 0x0c, - 0x5f, 0x04, 0x63, 0x9e, 0x75, 0xc8, 0x92, 0x00, 0x26, 0x11, 0x7b, 0x68, 0x4e, 0xf1, 0x7d, 0x4f, - 0xb2, 0x26, 0x76, 0x43, 0x67, 0xa0, 0x56, 0x39, 0xbe, 0xd0, 0xf1, 0xb5, 0x85, 0xd3, 0xda, 0x42, - 0x9d, 0x81, 0x5a, 0xe5, 0x98, 0x93, 0x09, 0xbe, 0x1f, 0x3b, 0x04, 0xdb, 0xe6, 0xff, 0xf1, 0xbe, - 0x57, 0xce, 0x77, 0x05, 0x0d, 0x29, 0x2e, 0xbc, 0x9f, 0x8c, 0x1c, 0x4c, 0x7e, 0xf9, 0xb6, 0x7a, - 0x96, 0xba, 0x37, 0xc9, 0x12, 0x21, 0xd6, 0x91, 0xa8, 0x2a, 0xfa, 0xb0, 0x01, 0xfa, 0x60, 0xc0, - 0x72, 0xdd, 0xcd, 0x5d, 0xf3, 0x12, 0xf7, 0x78, 0x0f, 0xab, 0x85, 0xca, 0x30, 0x4b, 0x4c, 0x3f, - 0x12, 0x30, 0x0c, 0x2f, 0xf0, 0x59, 0x2c, 0xcc, 0x9c, 0x19, 0xde, 0x26, 0xd3, 0x8f, 0x04, 0x0c, - 0xdf, 0x9f, 0x7f, 0xb4, 0xb9, 0x6b, 0x3e, 0x76, 0x76, 0xfb, 0x63, 0xfa, 0x91, 0x80, 0x81, 0x36, - 0xe8, 0xf3, 0x83, 0xc8, 0xbc, 0xdc, 0xeb, 0xda, 0xcb, 0xab, 0xc9, 0xed, 0x20, 0x42, 0x4c, 0x3d, - 0xfc, 0xb1, 0x01, 0x40, 0x98, 0x46, 0xe2, 0x95, 0xd3, 0x8e, 0x00, 0x32, 0x68, 0xa5, 0x34, 0x7a, - 0x57, 0xfd, 0x88, 0x1c, 0xa5, 0xef, 0x1a, 0x2d, 0xca, 0x35, 0x03, 0xe0, 0x2f, 0x0c, 0x70, 0x51, - 0x6f, 0x77, 0x95, 0x65, 0xb3, 0xdc, 0x0f, 0x9b, 0x3d, 0x0c, 0xe4, 0x72, 0x10, 0xb8, 0x65, 0xb3, - 0xd9, 0x28, 0x5c, 0x5c, 0xea, 0x00, 0x88, 0x3a, 0x9a, 0x01, 0x7f, 0x63, 0x80, 0x49, 0x99, 0x1d, - 0x35, 0xe3, 0x0a, 0xdc, 0x6d, 0x6f, 0xf7, 0xd0, 0x6d, 0x59, 0x08, 0xe1, 0x3d, 0xf5, 0x95, 0xb1, - 0x8d, 0x8f, 0xda, 0xad, 0x82, 0xbf, 0x37, 0xc0, 0xa8, 0x8d, 0x43, 0xec, 0xdb, 0xd8, 0xaf, 0x32, - 0x33, 0xe7, 0x4e, 0x3b, 0x57, 0xc8, 0x9a, 0xb9, 0xa2, 0x69, 0x17, 0x16, 0x96, 0xa4, 0x85, 0xa3, - 0x3a, 0xeb, 0xb8, 0x51, 0x98, 0x4e, 0x97, 0xea, 0x1c, 0xd4, 0x62, 0x20, 0xfc, 0x89, 0x01, 0xc6, - 0x53, 0xb7, 0x8b, 0x02, 0x71, 0xf5, 0x6c, 0x0e, 0x9e, 0xb7, 0xa0, 0x4b, 0xad, 0x58, 0x28, 0x0b, - 0x0e, 0x7f, 0x6b, 0xb0, 0x6e, 0x2b, 0x79, 0xab, 0x51, 0xb3, 0xc8, 0x3d, 0xf8, 0x46, 0x2f, 0x3d, - 0xa8, 0x94, 0x0b, 0x07, 0x5e, 0x4f, 0x3b, 0x39, 0xc5, 0x39, 0x6e, 0x14, 0xa6, 0x74, 0xff, 0x29, - 0x06, 0xd2, 0x8d, 0x83, 0xef, 0x19, 0x60, 0x14, 0xa7, 0x0d, 0x33, 0x35, 0x1f, 0x3f, 0xad, 0xeb, - 0x3a, 0xb6, 0xdf, 0xe2, 0x39, 0xad, 0xb1, 0x28, 0x6a, 0x81, 0x65, 0xbd, 0x1f, 0x3e, 0xb4, 0xbc, - 0xd0, 0xc5, 0xe6, 0xff, 0xf7, 0xae, 0xf7, 0x5b, 0x15, 0x2a, 0x51, 0xa2, 0x1b, 0x5e, 0x07, 0x79, - 0x3f, 0x76, 0x5d, 0x6b, 0xc7, 0xc5, 0xe6, 0x13, 0xbc, 0x8b, 0x50, 0xf3, 0xc5, 0xdb, 0x92, 0x8e, - 0x94, 0x04, 0xdc, 0x05, 0x73, 0x87, 0xb7, 0xd4, 0x8f, 0x2f, 0x3a, 0x0e, 0xf0, 0xcc, 0x6b, 0x5c, - 0xcb, 0x4c, 0xb3, 0x51, 0x98, 0xde, 0xee, 0x3c, 0xe2, 0x7b, 0xa8, 0x0e, 0xf8, 0x26, 0x78, 0x4c, - 0x93, 0x59, 0xf5, 0x76, 0xb0, 0x6d, 0x63, 0x3b, 0x79, 0x68, 0x99, 0x5f, 0xe2, 0x10, 0xea, 0x1e, - 0x6f, 0x67, 0x05, 0xd0, 0x83, 0x56, 0xc3, 0x75, 0x30, 0xad, 0xb1, 0xd7, 0xfc, 0x68, 0x93, 0x54, - 0x22, 0xe2, 0xf8, 0x35, 0x73, 0x9e, 0xeb, 0xbd, 0x98, 0xdc, 0xbe, 0x6d, 0x8d, 0x87, 0xba, 0xac, - 0x81, 0xaf, 0xb4, 0x68, 0xe3, 0x1f, 0x2e, 0xac, 0xf0, 0x16, 0x3e, 0xa2, 0xe6, 0x93, 0xbc, 0xb9, - 0xe0, 0xe7, 0xbc, 0xad, 0xd1, 0x51, 0x17, 0x79, 0xf8, 0x0d, 0x70, 0x21, 0xc3, 0x61, 0xef, 0x0a, - 0xf3, 0x29, 0xf1, 0x40, 0x60, 0x9d, 0xe8, 0x76, 0x42, 0x44, 0x9d, 0x24, 0xe1, 0xd7, 0x00, 0xd4, - 0xc8, 0x1b, 0x56, 0xc8, 0xd7, 0x3f, 0x2d, 0xde, 0x2a, 0xec, 0x44, 0xb7, 0x25, 0x0d, 0x75, 0x90, - 0x83, 0x1f, 0x1a, 0x2d, 0x3b, 0x49, 0x5f, 0xb3, 0xd4, 0xbc, 0xce, 0x2f, 0xec, 0x2b, 0x27, 0x0f, - 0xc0, 0x54, 0x19, 0x8a, 0x5d, 0xac, 0x79, 0x58, 0x43, 0x41, 0x5d, 0xd0, 0x67, 0xd8, 0x63, 0x3a, - 0x93, 0xc3, 0xe1, 0x04, 0xe8, 0xdb, 0xc7, 0xf2, 0xb3, 0x31, 0x62, 0x7f, 0xc2, 0xb7, 0xc0, 0x40, - 0xdd, 0x72, 0xe3, 0x64, 0x14, 0xd0, 0xbb, 0x5a, 0x8f, 0x84, 0xde, 0x97, 0x72, 0x37, 0x8c, 0x99, - 0x0f, 0x0c, 0x30, 0xdd, 0xb9, 0xaa, 0x7c, 0x51, 0x16, 0xfd, 0xdc, 0x00, 0x93, 0x6d, 0x05, 0xa4, - 0x83, 0x31, 0x6e, 0xab, 0x31, 0xf7, 0x7a, 0x58, 0x09, 0xc4, 0x45, 0xe0, 0x1d, 0xad, 0x6e, 0xd9, - 0x8f, 0x0c, 0x30, 0x91, 0x4d, 0xcc, 0x5f, 0x90, 0x97, 0x8a, 0xef, 0xe7, 0xc0, 0x74, 0xe7, 0x1e, - 0x1c, 0x7a, 0x6a, 0xba, 0xd0, 0xf3, 0x01, 0x4d, 0xa7, 0x91, 0xed, 0xbb, 0x06, 0x18, 0x79, 0x47, - 0xc9, 0x25, 0x5f, 0x33, 0x7b, 0x39, 0x15, 0x4a, 0x4a, 0x5f, 0xca, 0xa0, 0x48, 0x87, 0x2c, 0xfe, - 0xce, 0x00, 0x53, 0x1d, 0xcb, 0x39, 0xbc, 0x06, 0x06, 0x2d, 0xd7, 0x0d, 0x0e, 0xc4, 0x34, 0x4f, - 0x1b, 0xcb, 0x2f, 0x71, 0x2a, 0x92, 0x5c, 0xcd, 0x67, 0xb9, 0xcf, 0xc1, 0x67, 0xc5, 0x3f, 0x1a, - 0xe0, 0xf2, 0x83, 0xa2, 0xee, 0xf3, 0x3e, 0xc3, 0x79, 0x90, 0x97, 0xcd, 0xf6, 0x11, 0x3f, 0x3f, - 0x99, 0x5d, 0x65, 0x46, 0xe0, 0xbf, 0x96, 0x11, 0x7f, 0x15, 0x7f, 0x65, 0x80, 0x89, 0x0a, 0x26, - 0x75, 0xa7, 0x8a, 0x11, 0xde, 0xc5, 0x04, 0xfb, 0x55, 0x0c, 0x17, 0xc0, 0x30, 0xff, 0xda, 0x18, - 0x5a, 0xd5, 0xe4, 0x1b, 0xc9, 0xa4, 0x74, 0xf4, 0xf0, 0xed, 0x84, 0x81, 0x52, 0x19, 0xf5, 0x3d, - 0x25, 0xd7, 0xf5, 0x7b, 0xca, 0x65, 0xd0, 0x1f, 0xa6, 0x03, 0xe0, 0x3c, 0xe3, 0xf2, 0x99, 0x2f, - 0xa7, 0x72, 0x6e, 0x40, 0x22, 0x3e, 0xe5, 0x1a, 0x90, 0xdc, 0x80, 0x44, 0x88, 0x53, 0x8b, 0xdf, - 0x02, 0xe7, 0x5b, 0xd3, 0x33, 0xc3, 0x23, 0xb1, 0xdb, 0xf6, 0xfd, 0x86, 0xf1, 0x10, 0xe7, 0xe8, - 0x3f, 0x1b, 0xc8, 0x3d, 0xe4, 0x67, 0x03, 0x7f, 0x32, 0xc0, 0x85, 0xe4, 0x57, 0x35, 0xae, 0x83, - 0xfd, 0x68, 0x39, 0xf0, 0x77, 0x9d, 0x1a, 0xbc, 0x24, 0xe6, 0x88, 0xda, 0x70, 0x2e, 0x99, 0x21, - 0xc2, 0xfb, 0x60, 0x88, 0x0a, 0xa7, 0xc9, 0xf3, 0x7c, 0xf5, 0xe4, 0xe7, 0x99, 0xf5, 0xbe, 0x68, - 0x83, 0x12, 0x6a, 0x82, 0xc3, 0x8e, 0xb4, 0x6a, 0x95, 0x63, 0xdf, 0x96, 0xb3, 0xe4, 0x51, 0x71, - 0xa4, 0xcb, 0x4b, 0x82, 0x86, 0x14, 0xb7, 0xf8, 0x0f, 0x03, 0x4c, 0xb6, 0xfd, 0x4a, 0x08, 0x7e, - 0xdf, 0x00, 0xa3, 0x55, 0x6d, 0x7b, 0xf2, 0x62, 0x6c, 0x9c, 0xfe, 0x97, 0x48, 0x9a, 0x52, 0xd1, - 0x4b, 0xe8, 0x14, 0xd4, 0x02, 0x0a, 0xb7, 0x81, 0x59, 0xcd, 0xfc, 0x20, 0x2f, 0xf3, 0x89, 0xef, - 0x72, 0xb3, 0x51, 0x30, 0x97, 0xbb, 0xc8, 0xa0, 0xae, 0xab, 0xcb, 0xdf, 0xfe, 0xf8, 0xb3, 0xd9, - 0x73, 0x9f, 0x7c, 0x36, 0x7b, 0xee, 0xd3, 0xcf, 0x66, 0xcf, 0xbd, 0xdb, 0x9c, 0x35, 0x3e, 0x6e, - 0xce, 0x1a, 0x9f, 0x34, 0x67, 0x8d, 0x4f, 0x9b, 0xb3, 0xc6, 0x5f, 0x9b, 0xb3, 0xc6, 0x4f, 0xff, - 0x36, 0x7b, 0xee, 0x8d, 0x1b, 0x27, 0xfd, 0x19, 0xee, 0xff, 0x02, 0x00, 0x00, 0xff, 0xff, 0xc0, - 0x20, 0xb3, 0x2b, 0xda, 0x2b, 0x00, 0x00, -} - -func (m *ConversionRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ConversionRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ConversionRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Objects) > 0 { - for iNdEx := len(m.Objects) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Objects[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - } - } - i -= len(m.DesiredAPIVersion) - copy(dAtA[i:], m.DesiredAPIVersion) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.DesiredAPIVersion))) - i-- - dAtA[i] = 0x12 - i -= len(m.UID) - copy(dAtA[i:], m.UID) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.UID))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *ConversionResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ConversionResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ConversionResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - { - size, err := m.Result.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - if len(m.ConvertedObjects) > 0 { - for iNdEx := len(m.ConvertedObjects) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.ConvertedObjects[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - } - i -= len(m.UID) - copy(dAtA[i:], m.UID) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.UID))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *ConversionReview) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ConversionReview) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ConversionReview) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.Response != nil { - { - size, err := m.Response.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - if m.Request != nil { - { - size, err := m.Request.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *CustomResourceColumnDefinition) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *CustomResourceColumnDefinition) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *CustomResourceColumnDefinition) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - i -= len(m.JSONPath) - copy(dAtA[i:], m.JSONPath) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.JSONPath))) - i-- - dAtA[i] = 0x32 - i = encodeVarintGenerated(dAtA, i, uint64(m.Priority)) - i-- - dAtA[i] = 0x28 - i -= len(m.Description) - copy(dAtA[i:], m.Description) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Description))) - i-- - dAtA[i] = 0x22 - i -= len(m.Format) - copy(dAtA[i:], m.Format) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Format))) - i-- - dAtA[i] = 0x1a - i -= len(m.Type) - copy(dAtA[i:], m.Type) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) - i-- - dAtA[i] = 0x12 - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *CustomResourceConversion) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *CustomResourceConversion) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *CustomResourceConversion) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.Webhook != nil { - { - size, err := m.Webhook.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - i -= len(m.Strategy) - copy(dAtA[i:], m.Strategy) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Strategy))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *CustomResourceDefinition) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *CustomResourceDefinition) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *CustomResourceDefinition) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - { - size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - { - size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - { - size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *CustomResourceDefinitionCondition) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *CustomResourceDefinitionCondition) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *CustomResourceDefinitionCondition) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - i -= len(m.Message) - copy(dAtA[i:], m.Message) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) - i-- - dAtA[i] = 0x2a - i -= len(m.Reason) - copy(dAtA[i:], m.Reason) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) - i-- - dAtA[i] = 0x22 - { - size, err := m.LastTransitionTime.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - i -= len(m.Status) - copy(dAtA[i:], m.Status) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) - i-- - dAtA[i] = 0x12 - i -= len(m.Type) - copy(dAtA[i:], m.Type) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *CustomResourceDefinitionList) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *CustomResourceDefinitionList) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *CustomResourceDefinitionList) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Items) > 0 { - for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - } - { - size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *CustomResourceDefinitionNames) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *CustomResourceDefinitionNames) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *CustomResourceDefinitionNames) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Categories) > 0 { - for iNdEx := len(m.Categories) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Categories[iNdEx]) - copy(dAtA[i:], m.Categories[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Categories[iNdEx]))) - i-- - dAtA[i] = 0x32 - } - } - i -= len(m.ListKind) - copy(dAtA[i:], m.ListKind) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.ListKind))) - i-- - dAtA[i] = 0x2a - i -= len(m.Kind) - copy(dAtA[i:], m.Kind) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind))) - i-- - dAtA[i] = 0x22 - if len(m.ShortNames) > 0 { - for iNdEx := len(m.ShortNames) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.ShortNames[iNdEx]) - copy(dAtA[i:], m.ShortNames[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.ShortNames[iNdEx]))) - i-- - dAtA[i] = 0x1a - } - } - i -= len(m.Singular) - copy(dAtA[i:], m.Singular) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Singular))) - i-- - dAtA[i] = 0x12 - i -= len(m.Plural) - copy(dAtA[i:], m.Plural) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Plural))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *CustomResourceDefinitionSpec) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *CustomResourceDefinitionSpec) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *CustomResourceDefinitionSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - i-- - if m.PreserveUnknownFields { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x50 - if m.Conversion != nil { - { - size, err := m.Conversion.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x4a - } - if len(m.Versions) > 0 { - for iNdEx := len(m.Versions) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Versions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x3a - } - } - i -= len(m.Scope) - copy(dAtA[i:], m.Scope) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Scope))) - i-- - dAtA[i] = 0x22 - { - size, err := m.Names.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - i -= len(m.Group) - copy(dAtA[i:], m.Group) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Group))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *CustomResourceDefinitionStatus) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *CustomResourceDefinitionStatus) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *CustomResourceDefinitionStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.StoredVersions) > 0 { - for iNdEx := len(m.StoredVersions) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.StoredVersions[iNdEx]) - copy(dAtA[i:], m.StoredVersions[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.StoredVersions[iNdEx]))) - i-- - dAtA[i] = 0x1a - } - } - { - size, err := m.AcceptedNames.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - if len(m.Conditions) > 0 { - for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func (m *CustomResourceDefinitionVersion) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *CustomResourceDefinitionVersion) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *CustomResourceDefinitionVersion) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.DeprecationWarning != nil { - i -= len(*m.DeprecationWarning) - copy(dAtA[i:], *m.DeprecationWarning) - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.DeprecationWarning))) - i-- - dAtA[i] = 0x42 - } - i-- - if m.Deprecated { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x38 - if len(m.AdditionalPrinterColumns) > 0 { - for iNdEx := len(m.AdditionalPrinterColumns) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.AdditionalPrinterColumns[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x32 - } - } - if m.Subresources != nil { - { - size, err := m.Subresources.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x2a - } - if m.Schema != nil { - { - size, err := m.Schema.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x22 - } - i-- - if m.Storage { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x18 - i-- - if m.Served { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x10 - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *CustomResourceSubresourceScale) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *CustomResourceSubresourceScale) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *CustomResourceSubresourceScale) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.LabelSelectorPath != nil { - i -= len(*m.LabelSelectorPath) - copy(dAtA[i:], *m.LabelSelectorPath) - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.LabelSelectorPath))) - i-- - dAtA[i] = 0x1a - } - i -= len(m.StatusReplicasPath) - copy(dAtA[i:], m.StatusReplicasPath) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.StatusReplicasPath))) - i-- - dAtA[i] = 0x12 - i -= len(m.SpecReplicasPath) - copy(dAtA[i:], m.SpecReplicasPath) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.SpecReplicasPath))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *CustomResourceSubresourceStatus) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *CustomResourceSubresourceStatus) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *CustomResourceSubresourceStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - return len(dAtA) - i, nil -} - -func (m *CustomResourceSubresources) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *CustomResourceSubresources) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *CustomResourceSubresources) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.Scale != nil { - { - size, err := m.Scale.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - if m.Status != nil { - { - size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *CustomResourceValidation) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *CustomResourceValidation) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *CustomResourceValidation) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.OpenAPIV3Schema != nil { - { - size, err := m.OpenAPIV3Schema.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *ExternalDocumentation) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ExternalDocumentation) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ExternalDocumentation) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - i -= len(m.URL) - copy(dAtA[i:], m.URL) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.URL))) - i-- - dAtA[i] = 0x12 - i -= len(m.Description) - copy(dAtA[i:], m.Description) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Description))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *JSON) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *JSON) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *JSON) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.Raw != nil { - i -= len(m.Raw) - copy(dAtA[i:], m.Raw) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Raw))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *JSONSchemaProps) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *JSONSchemaProps) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *JSONSchemaProps) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.XValidations) > 0 { - for iNdEx := len(m.XValidations) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.XValidations[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x2 - i-- - dAtA[i] = 0xe2 - } - } - if m.XMapType != nil { - i -= len(*m.XMapType) - copy(dAtA[i:], *m.XMapType) - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.XMapType))) - i-- - dAtA[i] = 0x2 - i-- - dAtA[i] = 0xda - } - if m.XListType != nil { - i -= len(*m.XListType) - copy(dAtA[i:], *m.XListType) - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.XListType))) - i-- - dAtA[i] = 0x2 - i-- - dAtA[i] = 0xd2 - } - if len(m.XListMapKeys) > 0 { - for iNdEx := len(m.XListMapKeys) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.XListMapKeys[iNdEx]) - copy(dAtA[i:], m.XListMapKeys[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.XListMapKeys[iNdEx]))) - i-- - dAtA[i] = 0x2 - i-- - dAtA[i] = 0xca - } - } - i-- - if m.XIntOrString { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x2 - i-- - dAtA[i] = 0xc0 - i-- - if m.XEmbeddedResource { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x2 - i-- - dAtA[i] = 0xb8 - if m.XPreserveUnknownFields != nil { - i-- - if *m.XPreserveUnknownFields { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x2 - i-- - dAtA[i] = 0xb0 - } - i-- - if m.Nullable { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x2 - i-- - dAtA[i] = 0xa8 - if m.Example != nil { - { - size, err := m.Example.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x2 - i-- - dAtA[i] = 0xa2 - } - if m.ExternalDocs != nil { - { - size, err := m.ExternalDocs.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x2 - i-- - dAtA[i] = 0x9a - } - if len(m.Definitions) > 0 { - keysForDefinitions := make([]string, 0, len(m.Definitions)) - for k := range m.Definitions { - keysForDefinitions = append(keysForDefinitions, string(k)) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForDefinitions) - for iNdEx := len(keysForDefinitions) - 1; iNdEx >= 0; iNdEx-- { - v := m.Definitions[string(keysForDefinitions[iNdEx])] - baseI := i - { - size, err := (&v).MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - i -= len(keysForDefinitions[iNdEx]) - copy(dAtA[i:], keysForDefinitions[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(keysForDefinitions[iNdEx]))) - i-- - dAtA[i] = 0xa - i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) - i-- - dAtA[i] = 0x2 - i-- - dAtA[i] = 0x92 - } - } - if m.AdditionalItems != nil { - { - size, err := m.AdditionalItems.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x2 - i-- - dAtA[i] = 0x8a - } - if len(m.Dependencies) > 0 { - keysForDependencies := make([]string, 0, len(m.Dependencies)) - for k := range m.Dependencies { - keysForDependencies = append(keysForDependencies, string(k)) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForDependencies) - for iNdEx := len(keysForDependencies) - 1; iNdEx >= 0; iNdEx-- { - v := m.Dependencies[string(keysForDependencies[iNdEx])] - baseI := i - { - size, err := (&v).MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - i -= len(keysForDependencies[iNdEx]) - copy(dAtA[i:], keysForDependencies[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(keysForDependencies[iNdEx]))) - i-- - dAtA[i] = 0xa - i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) - i-- - dAtA[i] = 0x2 - i-- - dAtA[i] = 0x82 - } - } - if len(m.PatternProperties) > 0 { - keysForPatternProperties := make([]string, 0, len(m.PatternProperties)) - for k := range m.PatternProperties { - keysForPatternProperties = append(keysForPatternProperties, string(k)) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForPatternProperties) - for iNdEx := len(keysForPatternProperties) - 1; iNdEx >= 0; iNdEx-- { - v := m.PatternProperties[string(keysForPatternProperties[iNdEx])] - baseI := i - { - size, err := (&v).MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - i -= len(keysForPatternProperties[iNdEx]) - copy(dAtA[i:], keysForPatternProperties[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(keysForPatternProperties[iNdEx]))) - i-- - dAtA[i] = 0xa - i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0xfa - } - } - if m.AdditionalProperties != nil { - { - size, err := m.AdditionalProperties.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0xf2 - } - if len(m.Properties) > 0 { - keysForProperties := make([]string, 0, len(m.Properties)) - for k := range m.Properties { - keysForProperties = append(keysForProperties, string(k)) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForProperties) - for iNdEx := len(keysForProperties) - 1; iNdEx >= 0; iNdEx-- { - v := m.Properties[string(keysForProperties[iNdEx])] - baseI := i - { - size, err := (&v).MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - i -= len(keysForProperties[iNdEx]) - copy(dAtA[i:], keysForProperties[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(keysForProperties[iNdEx]))) - i-- - dAtA[i] = 0xa - i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0xea - } - } - if m.Not != nil { - { - size, err := m.Not.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0xe2 - } - if len(m.AnyOf) > 0 { - for iNdEx := len(m.AnyOf) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.AnyOf[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0xda - } - } - if len(m.OneOf) > 0 { - for iNdEx := len(m.OneOf) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.OneOf[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0xd2 - } - } - if len(m.AllOf) > 0 { - for iNdEx := len(m.AllOf) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.AllOf[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0xca - } - } - if m.Items != nil { - { - size, err := m.Items.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0xc2 - } - if len(m.Required) > 0 { - for iNdEx := len(m.Required) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Required[iNdEx]) - copy(dAtA[i:], m.Required[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Required[iNdEx]))) - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0xba - } - } - if m.MinProperties != nil { - i = encodeVarintGenerated(dAtA, i, uint64(*m.MinProperties)) - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0xb0 - } - if m.MaxProperties != nil { - i = encodeVarintGenerated(dAtA, i, uint64(*m.MaxProperties)) - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0xa8 - } - if len(m.Enum) > 0 { - for iNdEx := len(m.Enum) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Enum[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0xa2 - } - } - if m.MultipleOf != nil { - i -= 8 - encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(*m.MultipleOf)))) - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0x99 - } - i-- - if m.UniqueItems { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0x90 - if m.MinItems != nil { - i = encodeVarintGenerated(dAtA, i, uint64(*m.MinItems)) - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0x88 - } - if m.MaxItems != nil { - i = encodeVarintGenerated(dAtA, i, uint64(*m.MaxItems)) - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0x80 - } - i -= len(m.Pattern) - copy(dAtA[i:], m.Pattern) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Pattern))) - i-- - dAtA[i] = 0x7a - if m.MinLength != nil { - i = encodeVarintGenerated(dAtA, i, uint64(*m.MinLength)) - i-- - dAtA[i] = 0x70 - } - if m.MaxLength != nil { - i = encodeVarintGenerated(dAtA, i, uint64(*m.MaxLength)) - i-- - dAtA[i] = 0x68 - } - i-- - if m.ExclusiveMinimum { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x60 - if m.Minimum != nil { - i -= 8 - encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(*m.Minimum)))) - i-- - dAtA[i] = 0x59 - } - i-- - if m.ExclusiveMaximum { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x50 - if m.Maximum != nil { - i -= 8 - encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(*m.Maximum)))) - i-- - dAtA[i] = 0x49 - } - if m.Default != nil { - { - size, err := m.Default.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x42 - } - i -= len(m.Title) - copy(dAtA[i:], m.Title) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Title))) - i-- - dAtA[i] = 0x3a - i -= len(m.Format) - copy(dAtA[i:], m.Format) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Format))) - i-- - dAtA[i] = 0x32 - i -= len(m.Type) - copy(dAtA[i:], m.Type) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) - i-- - dAtA[i] = 0x2a - i -= len(m.Description) - copy(dAtA[i:], m.Description) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Description))) - i-- - dAtA[i] = 0x22 - if m.Ref != nil { - i -= len(*m.Ref) - copy(dAtA[i:], *m.Ref) - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Ref))) - i-- - dAtA[i] = 0x1a - } - i -= len(m.Schema) - copy(dAtA[i:], m.Schema) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Schema))) - i-- - dAtA[i] = 0x12 - i -= len(m.ID) - copy(dAtA[i:], m.ID) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.ID))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *JSONSchemaPropsOrArray) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *JSONSchemaPropsOrArray) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *JSONSchemaPropsOrArray) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.JSONSchemas) > 0 { - for iNdEx := len(m.JSONSchemas) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.JSONSchemas[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - } - if m.Schema != nil { - { - size, err := m.Schema.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *JSONSchemaPropsOrBool) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *JSONSchemaPropsOrBool) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *JSONSchemaPropsOrBool) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.Schema != nil { - { - size, err := m.Schema.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - i-- - if m.Allows { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x8 - return len(dAtA) - i, nil -} - -func (m *JSONSchemaPropsOrStringArray) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *JSONSchemaPropsOrStringArray) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *JSONSchemaPropsOrStringArray) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Property) > 0 { - for iNdEx := len(m.Property) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Property[iNdEx]) - copy(dAtA[i:], m.Property[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Property[iNdEx]))) - i-- - dAtA[i] = 0x12 - } - } - if m.Schema != nil { - { - size, err := m.Schema.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *ServiceReference) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ServiceReference) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ServiceReference) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.Port != nil { - i = encodeVarintGenerated(dAtA, i, uint64(*m.Port)) - i-- - dAtA[i] = 0x20 - } - if m.Path != nil { - i -= len(*m.Path) - copy(dAtA[i:], *m.Path) - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Path))) - i-- - dAtA[i] = 0x1a - } - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i-- - dAtA[i] = 0x12 - i -= len(m.Namespace) - copy(dAtA[i:], m.Namespace) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *ValidationRule) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ValidationRule) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ValidationRule) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - i -= len(m.Message) - copy(dAtA[i:], m.Message) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) - i-- - dAtA[i] = 0x12 - i -= len(m.Rule) - copy(dAtA[i:], m.Rule) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Rule))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *WebhookClientConfig) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *WebhookClientConfig) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *WebhookClientConfig) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.URL != nil { - i -= len(*m.URL) - copy(dAtA[i:], *m.URL) - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.URL))) - i-- - dAtA[i] = 0x1a - } - if m.CABundle != nil { - i -= len(m.CABundle) - copy(dAtA[i:], m.CABundle) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.CABundle))) - i-- - dAtA[i] = 0x12 - } - if m.Service != nil { - { - size, err := m.Service.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *WebhookConversion) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *WebhookConversion) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *WebhookConversion) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.ConversionReviewVersions) > 0 { - for iNdEx := len(m.ConversionReviewVersions) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.ConversionReviewVersions[iNdEx]) - copy(dAtA[i:], m.ConversionReviewVersions[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.ConversionReviewVersions[iNdEx]))) - i-- - dAtA[i] = 0x1a - } - } - if m.ClientConfig != nil { - { - size, err := m.ClientConfig.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - return len(dAtA) - i, nil -} - -func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { - offset -= sovGenerated(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *ConversionRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.UID) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.DesiredAPIVersion) - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Objects) > 0 { - for _, e := range m.Objects { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *ConversionResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.UID) - n += 1 + l + sovGenerated(uint64(l)) - if len(m.ConvertedObjects) > 0 { - for _, e := range m.ConvertedObjects { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - l = m.Result.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *ConversionReview) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Request != nil { - l = m.Request.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.Response != nil { - l = m.Response.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - return n -} - -func (m *CustomResourceColumnDefinition) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Name) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Type) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Format) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Description) - n += 1 + l + sovGenerated(uint64(l)) - n += 1 + sovGenerated(uint64(m.Priority)) - l = len(m.JSONPath) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *CustomResourceConversion) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Strategy) - n += 1 + l + sovGenerated(uint64(l)) - if m.Webhook != nil { - l = m.Webhook.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - return n -} - -func (m *CustomResourceDefinition) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Spec.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Status.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *CustomResourceDefinitionCondition) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Type) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Status) - n += 1 + l + sovGenerated(uint64(l)) - l = m.LastTransitionTime.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Reason) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Message) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *CustomResourceDefinitionList) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *CustomResourceDefinitionNames) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Plural) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Singular) - n += 1 + l + sovGenerated(uint64(l)) - if len(m.ShortNames) > 0 { - for _, s := range m.ShortNames { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } - } - l = len(m.Kind) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.ListKind) - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Categories) > 0 { - for _, s := range m.Categories { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *CustomResourceDefinitionSpec) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Group) - n += 1 + l + sovGenerated(uint64(l)) - l = m.Names.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Scope) - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Versions) > 0 { - for _, e := range m.Versions { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - if m.Conversion != nil { - l = m.Conversion.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - n += 2 - return n -} - -func (m *CustomResourceDefinitionStatus) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Conditions) > 0 { - for _, e := range m.Conditions { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - l = m.AcceptedNames.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.StoredVersions) > 0 { - for _, s := range m.StoredVersions { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *CustomResourceDefinitionVersion) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Name) - n += 1 + l + sovGenerated(uint64(l)) - n += 2 - n += 2 - if m.Schema != nil { - l = m.Schema.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.Subresources != nil { - l = m.Subresources.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if len(m.AdditionalPrinterColumns) > 0 { - for _, e := range m.AdditionalPrinterColumns { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - n += 2 - if m.DeprecationWarning != nil { - l = len(*m.DeprecationWarning) - n += 1 + l + sovGenerated(uint64(l)) - } - return n -} - -func (m *CustomResourceSubresourceScale) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.SpecReplicasPath) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.StatusReplicasPath) - n += 1 + l + sovGenerated(uint64(l)) - if m.LabelSelectorPath != nil { - l = len(*m.LabelSelectorPath) - n += 1 + l + sovGenerated(uint64(l)) - } - return n -} - -func (m *CustomResourceSubresourceStatus) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - return n -} - -func (m *CustomResourceSubresources) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Status != nil { - l = m.Status.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.Scale != nil { - l = m.Scale.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - return n -} - -func (m *CustomResourceValidation) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.OpenAPIV3Schema != nil { - l = m.OpenAPIV3Schema.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - return n -} - -func (m *ExternalDocumentation) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Description) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.URL) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *JSON) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Raw != nil { - l = len(m.Raw) - n += 1 + l + sovGenerated(uint64(l)) - } - return n -} - -func (m *JSONSchemaProps) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.ID) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Schema) - n += 1 + l + sovGenerated(uint64(l)) - if m.Ref != nil { - l = len(*m.Ref) - n += 1 + l + sovGenerated(uint64(l)) - } - l = len(m.Description) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Type) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Format) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Title) - n += 1 + l + sovGenerated(uint64(l)) - if m.Default != nil { - l = m.Default.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.Maximum != nil { - n += 9 - } - n += 2 - if m.Minimum != nil { - n += 9 - } - n += 2 - if m.MaxLength != nil { - n += 1 + sovGenerated(uint64(*m.MaxLength)) - } - if m.MinLength != nil { - n += 1 + sovGenerated(uint64(*m.MinLength)) - } - l = len(m.Pattern) - n += 1 + l + sovGenerated(uint64(l)) - if m.MaxItems != nil { - n += 2 + sovGenerated(uint64(*m.MaxItems)) - } - if m.MinItems != nil { - n += 2 + sovGenerated(uint64(*m.MinItems)) - } - n += 3 - if m.MultipleOf != nil { - n += 10 - } - if len(m.Enum) > 0 { - for _, e := range m.Enum { - l = e.Size() - n += 2 + l + sovGenerated(uint64(l)) - } - } - if m.MaxProperties != nil { - n += 2 + sovGenerated(uint64(*m.MaxProperties)) - } - if m.MinProperties != nil { - n += 2 + sovGenerated(uint64(*m.MinProperties)) - } - if len(m.Required) > 0 { - for _, s := range m.Required { - l = len(s) - n += 2 + l + sovGenerated(uint64(l)) - } - } - if m.Items != nil { - l = m.Items.Size() - n += 2 + l + sovGenerated(uint64(l)) - } - if len(m.AllOf) > 0 { - for _, e := range m.AllOf { - l = e.Size() - n += 2 + l + sovGenerated(uint64(l)) - } - } - if len(m.OneOf) > 0 { - for _, e := range m.OneOf { - l = e.Size() - n += 2 + l + sovGenerated(uint64(l)) - } - } - if len(m.AnyOf) > 0 { - for _, e := range m.AnyOf { - l = e.Size() - n += 2 + l + sovGenerated(uint64(l)) - } - } - if m.Not != nil { - l = m.Not.Size() - n += 2 + l + sovGenerated(uint64(l)) - } - if len(m.Properties) > 0 { - for k, v := range m.Properties { - _ = k - _ = v - l = v.Size() - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) - n += mapEntrySize + 2 + sovGenerated(uint64(mapEntrySize)) - } - } - if m.AdditionalProperties != nil { - l = m.AdditionalProperties.Size() - n += 2 + l + sovGenerated(uint64(l)) - } - if len(m.PatternProperties) > 0 { - for k, v := range m.PatternProperties { - _ = k - _ = v - l = v.Size() - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) - n += mapEntrySize + 2 + sovGenerated(uint64(mapEntrySize)) - } - } - if len(m.Dependencies) > 0 { - for k, v := range m.Dependencies { - _ = k - _ = v - l = v.Size() - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) - n += mapEntrySize + 2 + sovGenerated(uint64(mapEntrySize)) - } - } - if m.AdditionalItems != nil { - l = m.AdditionalItems.Size() - n += 2 + l + sovGenerated(uint64(l)) - } - if len(m.Definitions) > 0 { - for k, v := range m.Definitions { - _ = k - _ = v - l = v.Size() - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) - n += mapEntrySize + 2 + sovGenerated(uint64(mapEntrySize)) - } - } - if m.ExternalDocs != nil { - l = m.ExternalDocs.Size() - n += 2 + l + sovGenerated(uint64(l)) - } - if m.Example != nil { - l = m.Example.Size() - n += 2 + l + sovGenerated(uint64(l)) - } - n += 3 - if m.XPreserveUnknownFields != nil { - n += 3 - } - n += 3 - n += 3 - if len(m.XListMapKeys) > 0 { - for _, s := range m.XListMapKeys { - l = len(s) - n += 2 + l + sovGenerated(uint64(l)) - } - } - if m.XListType != nil { - l = len(*m.XListType) - n += 2 + l + sovGenerated(uint64(l)) - } - if m.XMapType != nil { - l = len(*m.XMapType) - n += 2 + l + sovGenerated(uint64(l)) - } - if len(m.XValidations) > 0 { - for _, e := range m.XValidations { - l = e.Size() - n += 2 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *JSONSchemaPropsOrArray) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Schema != nil { - l = m.Schema.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if len(m.JSONSchemas) > 0 { - for _, e := range m.JSONSchemas { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *JSONSchemaPropsOrBool) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - n += 2 - if m.Schema != nil { - l = m.Schema.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - return n -} - -func (m *JSONSchemaPropsOrStringArray) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Schema != nil { - l = m.Schema.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if len(m.Property) > 0 { - for _, s := range m.Property { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *ServiceReference) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Namespace) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Name) - n += 1 + l + sovGenerated(uint64(l)) - if m.Path != nil { - l = len(*m.Path) - n += 1 + l + sovGenerated(uint64(l)) - } - if m.Port != nil { - n += 1 + sovGenerated(uint64(*m.Port)) - } - return n -} - -func (m *ValidationRule) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Rule) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Message) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *WebhookClientConfig) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Service != nil { - l = m.Service.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.CABundle != nil { - l = len(m.CABundle) - n += 1 + l + sovGenerated(uint64(l)) - } - if m.URL != nil { - l = len(*m.URL) - n += 1 + l + sovGenerated(uint64(l)) - } - return n -} - -func (m *WebhookConversion) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.ClientConfig != nil { - l = m.ClientConfig.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if len(m.ConversionReviewVersions) > 0 { - for _, s := range m.ConversionReviewVersions { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func sovGenerated(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozGenerated(x uint64) (n int) { - return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (this *ConversionRequest) String() string { - if this == nil { - return "nil" - } - repeatedStringForObjects := "[]RawExtension{" - for _, f := range this.Objects { - repeatedStringForObjects += fmt.Sprintf("%v", f) + "," - } - repeatedStringForObjects += "}" - s := strings.Join([]string{`&ConversionRequest{`, - `UID:` + fmt.Sprintf("%v", this.UID) + `,`, - `DesiredAPIVersion:` + fmt.Sprintf("%v", this.DesiredAPIVersion) + `,`, - `Objects:` + repeatedStringForObjects + `,`, - `}`, - }, "") - return s -} -func (this *ConversionResponse) String() string { - if this == nil { - return "nil" - } - repeatedStringForConvertedObjects := "[]RawExtension{" - for _, f := range this.ConvertedObjects { - repeatedStringForConvertedObjects += fmt.Sprintf("%v", f) + "," - } - repeatedStringForConvertedObjects += "}" - s := strings.Join([]string{`&ConversionResponse{`, - `UID:` + fmt.Sprintf("%v", this.UID) + `,`, - `ConvertedObjects:` + repeatedStringForConvertedObjects + `,`, - `Result:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Result), "Status", "v1.Status", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *ConversionReview) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ConversionReview{`, - `Request:` + strings.Replace(this.Request.String(), "ConversionRequest", "ConversionRequest", 1) + `,`, - `Response:` + strings.Replace(this.Response.String(), "ConversionResponse", "ConversionResponse", 1) + `,`, - `}`, - }, "") - return s -} -func (this *CustomResourceColumnDefinition) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&CustomResourceColumnDefinition{`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `Type:` + fmt.Sprintf("%v", this.Type) + `,`, - `Format:` + fmt.Sprintf("%v", this.Format) + `,`, - `Description:` + fmt.Sprintf("%v", this.Description) + `,`, - `Priority:` + fmt.Sprintf("%v", this.Priority) + `,`, - `JSONPath:` + fmt.Sprintf("%v", this.JSONPath) + `,`, - `}`, - }, "") - return s -} -func (this *CustomResourceConversion) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&CustomResourceConversion{`, - `Strategy:` + fmt.Sprintf("%v", this.Strategy) + `,`, - `Webhook:` + strings.Replace(this.Webhook.String(), "WebhookConversion", "WebhookConversion", 1) + `,`, - `}`, - }, "") - return s -} -func (this *CustomResourceDefinition) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&CustomResourceDefinition{`, - `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "CustomResourceDefinitionSpec", "CustomResourceDefinitionSpec", 1), `&`, ``, 1) + `,`, - `Status:` + strings.Replace(strings.Replace(this.Status.String(), "CustomResourceDefinitionStatus", "CustomResourceDefinitionStatus", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *CustomResourceDefinitionCondition) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&CustomResourceDefinitionCondition{`, - `Type:` + fmt.Sprintf("%v", this.Type) + `,`, - `Status:` + fmt.Sprintf("%v", this.Status) + `,`, - `LastTransitionTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastTransitionTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, - `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, - `Message:` + fmt.Sprintf("%v", this.Message) + `,`, - `}`, - }, "") - return s -} -func (this *CustomResourceDefinitionList) String() string { - if this == nil { - return "nil" - } - repeatedStringForItems := "[]CustomResourceDefinition{" - for _, f := range this.Items { - repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "CustomResourceDefinition", "CustomResourceDefinition", 1), `&`, ``, 1) + "," - } - repeatedStringForItems += "}" - s := strings.Join([]string{`&CustomResourceDefinitionList{`, - `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + repeatedStringForItems + `,`, - `}`, - }, "") - return s -} -func (this *CustomResourceDefinitionNames) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&CustomResourceDefinitionNames{`, - `Plural:` + fmt.Sprintf("%v", this.Plural) + `,`, - `Singular:` + fmt.Sprintf("%v", this.Singular) + `,`, - `ShortNames:` + fmt.Sprintf("%v", this.ShortNames) + `,`, - `Kind:` + fmt.Sprintf("%v", this.Kind) + `,`, - `ListKind:` + fmt.Sprintf("%v", this.ListKind) + `,`, - `Categories:` + fmt.Sprintf("%v", this.Categories) + `,`, - `}`, - }, "") - return s -} -func (this *CustomResourceDefinitionSpec) String() string { - if this == nil { - return "nil" - } - repeatedStringForVersions := "[]CustomResourceDefinitionVersion{" - for _, f := range this.Versions { - repeatedStringForVersions += strings.Replace(strings.Replace(f.String(), "CustomResourceDefinitionVersion", "CustomResourceDefinitionVersion", 1), `&`, ``, 1) + "," - } - repeatedStringForVersions += "}" - s := strings.Join([]string{`&CustomResourceDefinitionSpec{`, - `Group:` + fmt.Sprintf("%v", this.Group) + `,`, - `Names:` + strings.Replace(strings.Replace(this.Names.String(), "CustomResourceDefinitionNames", "CustomResourceDefinitionNames", 1), `&`, ``, 1) + `,`, - `Scope:` + fmt.Sprintf("%v", this.Scope) + `,`, - `Versions:` + repeatedStringForVersions + `,`, - `Conversion:` + strings.Replace(this.Conversion.String(), "CustomResourceConversion", "CustomResourceConversion", 1) + `,`, - `PreserveUnknownFields:` + fmt.Sprintf("%v", this.PreserveUnknownFields) + `,`, - `}`, - }, "") - return s -} -func (this *CustomResourceDefinitionStatus) String() string { - if this == nil { - return "nil" - } - repeatedStringForConditions := "[]CustomResourceDefinitionCondition{" - for _, f := range this.Conditions { - repeatedStringForConditions += strings.Replace(strings.Replace(f.String(), "CustomResourceDefinitionCondition", "CustomResourceDefinitionCondition", 1), `&`, ``, 1) + "," - } - repeatedStringForConditions += "}" - s := strings.Join([]string{`&CustomResourceDefinitionStatus{`, - `Conditions:` + repeatedStringForConditions + `,`, - `AcceptedNames:` + strings.Replace(strings.Replace(this.AcceptedNames.String(), "CustomResourceDefinitionNames", "CustomResourceDefinitionNames", 1), `&`, ``, 1) + `,`, - `StoredVersions:` + fmt.Sprintf("%v", this.StoredVersions) + `,`, - `}`, - }, "") - return s -} -func (this *CustomResourceDefinitionVersion) String() string { - if this == nil { - return "nil" - } - repeatedStringForAdditionalPrinterColumns := "[]CustomResourceColumnDefinition{" - for _, f := range this.AdditionalPrinterColumns { - repeatedStringForAdditionalPrinterColumns += strings.Replace(strings.Replace(f.String(), "CustomResourceColumnDefinition", "CustomResourceColumnDefinition", 1), `&`, ``, 1) + "," - } - repeatedStringForAdditionalPrinterColumns += "}" - s := strings.Join([]string{`&CustomResourceDefinitionVersion{`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `Served:` + fmt.Sprintf("%v", this.Served) + `,`, - `Storage:` + fmt.Sprintf("%v", this.Storage) + `,`, - `Schema:` + strings.Replace(this.Schema.String(), "CustomResourceValidation", "CustomResourceValidation", 1) + `,`, - `Subresources:` + strings.Replace(this.Subresources.String(), "CustomResourceSubresources", "CustomResourceSubresources", 1) + `,`, - `AdditionalPrinterColumns:` + repeatedStringForAdditionalPrinterColumns + `,`, - `Deprecated:` + fmt.Sprintf("%v", this.Deprecated) + `,`, - `DeprecationWarning:` + valueToStringGenerated(this.DeprecationWarning) + `,`, - `}`, - }, "") - return s -} -func (this *CustomResourceSubresourceScale) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&CustomResourceSubresourceScale{`, - `SpecReplicasPath:` + fmt.Sprintf("%v", this.SpecReplicasPath) + `,`, - `StatusReplicasPath:` + fmt.Sprintf("%v", this.StatusReplicasPath) + `,`, - `LabelSelectorPath:` + valueToStringGenerated(this.LabelSelectorPath) + `,`, - `}`, - }, "") - return s -} -func (this *CustomResourceSubresourceStatus) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&CustomResourceSubresourceStatus{`, - `}`, - }, "") - return s -} -func (this *CustomResourceSubresources) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&CustomResourceSubresources{`, - `Status:` + strings.Replace(this.Status.String(), "CustomResourceSubresourceStatus", "CustomResourceSubresourceStatus", 1) + `,`, - `Scale:` + strings.Replace(this.Scale.String(), "CustomResourceSubresourceScale", "CustomResourceSubresourceScale", 1) + `,`, - `}`, - }, "") - return s -} -func (this *CustomResourceValidation) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&CustomResourceValidation{`, - `OpenAPIV3Schema:` + strings.Replace(this.OpenAPIV3Schema.String(), "JSONSchemaProps", "JSONSchemaProps", 1) + `,`, - `}`, - }, "") - return s -} -func (this *ExternalDocumentation) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ExternalDocumentation{`, - `Description:` + fmt.Sprintf("%v", this.Description) + `,`, - `URL:` + fmt.Sprintf("%v", this.URL) + `,`, - `}`, - }, "") - return s -} -func (this *JSON) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&JSON{`, - `Raw:` + valueToStringGenerated(this.Raw) + `,`, - `}`, - }, "") - return s -} -func (this *JSONSchemaProps) String() string { - if this == nil { - return "nil" - } - repeatedStringForEnum := "[]JSON{" - for _, f := range this.Enum { - repeatedStringForEnum += strings.Replace(strings.Replace(f.String(), "JSON", "JSON", 1), `&`, ``, 1) + "," - } - repeatedStringForEnum += "}" - repeatedStringForAllOf := "[]JSONSchemaProps{" - for _, f := range this.AllOf { - repeatedStringForAllOf += strings.Replace(strings.Replace(f.String(), "JSONSchemaProps", "JSONSchemaProps", 1), `&`, ``, 1) + "," - } - repeatedStringForAllOf += "}" - repeatedStringForOneOf := "[]JSONSchemaProps{" - for _, f := range this.OneOf { - repeatedStringForOneOf += strings.Replace(strings.Replace(f.String(), "JSONSchemaProps", "JSONSchemaProps", 1), `&`, ``, 1) + "," - } - repeatedStringForOneOf += "}" - repeatedStringForAnyOf := "[]JSONSchemaProps{" - for _, f := range this.AnyOf { - repeatedStringForAnyOf += strings.Replace(strings.Replace(f.String(), "JSONSchemaProps", "JSONSchemaProps", 1), `&`, ``, 1) + "," - } - repeatedStringForAnyOf += "}" - repeatedStringForXValidations := "[]ValidationRule{" - for _, f := range this.XValidations { - repeatedStringForXValidations += strings.Replace(strings.Replace(f.String(), "ValidationRule", "ValidationRule", 1), `&`, ``, 1) + "," - } - repeatedStringForXValidations += "}" - keysForProperties := make([]string, 0, len(this.Properties)) - for k := range this.Properties { - keysForProperties = append(keysForProperties, k) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForProperties) - mapStringForProperties := "map[string]JSONSchemaProps{" - for _, k := range keysForProperties { - mapStringForProperties += fmt.Sprintf("%v: %v,", k, this.Properties[k]) - } - mapStringForProperties += "}" - keysForPatternProperties := make([]string, 0, len(this.PatternProperties)) - for k := range this.PatternProperties { - keysForPatternProperties = append(keysForPatternProperties, k) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForPatternProperties) - mapStringForPatternProperties := "map[string]JSONSchemaProps{" - for _, k := range keysForPatternProperties { - mapStringForPatternProperties += fmt.Sprintf("%v: %v,", k, this.PatternProperties[k]) - } - mapStringForPatternProperties += "}" - keysForDependencies := make([]string, 0, len(this.Dependencies)) - for k := range this.Dependencies { - keysForDependencies = append(keysForDependencies, k) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForDependencies) - mapStringForDependencies := "JSONSchemaDependencies{" - for _, k := range keysForDependencies { - mapStringForDependencies += fmt.Sprintf("%v: %v,", k, this.Dependencies[k]) - } - mapStringForDependencies += "}" - keysForDefinitions := make([]string, 0, len(this.Definitions)) - for k := range this.Definitions { - keysForDefinitions = append(keysForDefinitions, k) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForDefinitions) - mapStringForDefinitions := "JSONSchemaDefinitions{" - for _, k := range keysForDefinitions { - mapStringForDefinitions += fmt.Sprintf("%v: %v,", k, this.Definitions[k]) - } - mapStringForDefinitions += "}" - s := strings.Join([]string{`&JSONSchemaProps{`, - `ID:` + fmt.Sprintf("%v", this.ID) + `,`, - `Schema:` + fmt.Sprintf("%v", this.Schema) + `,`, - `Ref:` + valueToStringGenerated(this.Ref) + `,`, - `Description:` + fmt.Sprintf("%v", this.Description) + `,`, - `Type:` + fmt.Sprintf("%v", this.Type) + `,`, - `Format:` + fmt.Sprintf("%v", this.Format) + `,`, - `Title:` + fmt.Sprintf("%v", this.Title) + `,`, - `Default:` + strings.Replace(this.Default.String(), "JSON", "JSON", 1) + `,`, - `Maximum:` + valueToStringGenerated(this.Maximum) + `,`, - `ExclusiveMaximum:` + fmt.Sprintf("%v", this.ExclusiveMaximum) + `,`, - `Minimum:` + valueToStringGenerated(this.Minimum) + `,`, - `ExclusiveMinimum:` + fmt.Sprintf("%v", this.ExclusiveMinimum) + `,`, - `MaxLength:` + valueToStringGenerated(this.MaxLength) + `,`, - `MinLength:` + valueToStringGenerated(this.MinLength) + `,`, - `Pattern:` + fmt.Sprintf("%v", this.Pattern) + `,`, - `MaxItems:` + valueToStringGenerated(this.MaxItems) + `,`, - `MinItems:` + valueToStringGenerated(this.MinItems) + `,`, - `UniqueItems:` + fmt.Sprintf("%v", this.UniqueItems) + `,`, - `MultipleOf:` + valueToStringGenerated(this.MultipleOf) + `,`, - `Enum:` + repeatedStringForEnum + `,`, - `MaxProperties:` + valueToStringGenerated(this.MaxProperties) + `,`, - `MinProperties:` + valueToStringGenerated(this.MinProperties) + `,`, - `Required:` + fmt.Sprintf("%v", this.Required) + `,`, - `Items:` + strings.Replace(this.Items.String(), "JSONSchemaPropsOrArray", "JSONSchemaPropsOrArray", 1) + `,`, - `AllOf:` + repeatedStringForAllOf + `,`, - `OneOf:` + repeatedStringForOneOf + `,`, - `AnyOf:` + repeatedStringForAnyOf + `,`, - `Not:` + strings.Replace(this.Not.String(), "JSONSchemaProps", "JSONSchemaProps", 1) + `,`, - `Properties:` + mapStringForProperties + `,`, - `AdditionalProperties:` + strings.Replace(this.AdditionalProperties.String(), "JSONSchemaPropsOrBool", "JSONSchemaPropsOrBool", 1) + `,`, - `PatternProperties:` + mapStringForPatternProperties + `,`, - `Dependencies:` + mapStringForDependencies + `,`, - `AdditionalItems:` + strings.Replace(this.AdditionalItems.String(), "JSONSchemaPropsOrBool", "JSONSchemaPropsOrBool", 1) + `,`, - `Definitions:` + mapStringForDefinitions + `,`, - `ExternalDocs:` + strings.Replace(this.ExternalDocs.String(), "ExternalDocumentation", "ExternalDocumentation", 1) + `,`, - `Example:` + strings.Replace(this.Example.String(), "JSON", "JSON", 1) + `,`, - `Nullable:` + fmt.Sprintf("%v", this.Nullable) + `,`, - `XPreserveUnknownFields:` + valueToStringGenerated(this.XPreserveUnknownFields) + `,`, - `XEmbeddedResource:` + fmt.Sprintf("%v", this.XEmbeddedResource) + `,`, - `XIntOrString:` + fmt.Sprintf("%v", this.XIntOrString) + `,`, - `XListMapKeys:` + fmt.Sprintf("%v", this.XListMapKeys) + `,`, - `XListType:` + valueToStringGenerated(this.XListType) + `,`, - `XMapType:` + valueToStringGenerated(this.XMapType) + `,`, - `XValidations:` + repeatedStringForXValidations + `,`, - `}`, - }, "") - return s -} -func (this *JSONSchemaPropsOrArray) String() string { - if this == nil { - return "nil" - } - repeatedStringForJSONSchemas := "[]JSONSchemaProps{" - for _, f := range this.JSONSchemas { - repeatedStringForJSONSchemas += strings.Replace(strings.Replace(f.String(), "JSONSchemaProps", "JSONSchemaProps", 1), `&`, ``, 1) + "," - } - repeatedStringForJSONSchemas += "}" - s := strings.Join([]string{`&JSONSchemaPropsOrArray{`, - `Schema:` + strings.Replace(this.Schema.String(), "JSONSchemaProps", "JSONSchemaProps", 1) + `,`, - `JSONSchemas:` + repeatedStringForJSONSchemas + `,`, - `}`, - }, "") - return s -} -func (this *JSONSchemaPropsOrBool) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&JSONSchemaPropsOrBool{`, - `Allows:` + fmt.Sprintf("%v", this.Allows) + `,`, - `Schema:` + strings.Replace(this.Schema.String(), "JSONSchemaProps", "JSONSchemaProps", 1) + `,`, - `}`, - }, "") - return s -} -func (this *JSONSchemaPropsOrStringArray) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&JSONSchemaPropsOrStringArray{`, - `Schema:` + strings.Replace(this.Schema.String(), "JSONSchemaProps", "JSONSchemaProps", 1) + `,`, - `Property:` + fmt.Sprintf("%v", this.Property) + `,`, - `}`, - }, "") - return s -} -func (this *ServiceReference) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ServiceReference{`, - `Namespace:` + fmt.Sprintf("%v", this.Namespace) + `,`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `Path:` + valueToStringGenerated(this.Path) + `,`, - `Port:` + valueToStringGenerated(this.Port) + `,`, - `}`, - }, "") - return s -} -func (this *ValidationRule) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ValidationRule{`, - `Rule:` + fmt.Sprintf("%v", this.Rule) + `,`, - `Message:` + fmt.Sprintf("%v", this.Message) + `,`, - `}`, - }, "") - return s -} -func (this *WebhookClientConfig) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&WebhookClientConfig{`, - `Service:` + strings.Replace(this.Service.String(), "ServiceReference", "ServiceReference", 1) + `,`, - `CABundle:` + valueToStringGenerated(this.CABundle) + `,`, - `URL:` + valueToStringGenerated(this.URL) + `,`, - `}`, - }, "") - return s -} -func (this *WebhookConversion) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&WebhookConversion{`, - `ClientConfig:` + strings.Replace(this.ClientConfig.String(), "WebhookClientConfig", "WebhookClientConfig", 1) + `,`, - `ConversionReviewVersions:` + fmt.Sprintf("%v", this.ConversionReviewVersions) + `,`, - `}`, - }, "") - return s -} -func valueToStringGenerated(v interface{}) string { - rv := reflect.ValueOf(v) - if rv.IsNil() { - return "nil" - } - pv := reflect.Indirect(rv).Interface() - return fmt.Sprintf("*%v", pv) -} -func (m *ConversionRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ConversionRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ConversionRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field UID", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.UID = k8s_io_apimachinery_pkg_types.UID(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DesiredAPIVersion", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.DesiredAPIVersion = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Objects", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Objects = append(m.Objects, runtime.RawExtension{}) - if err := m.Objects[len(m.Objects)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ConversionResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ConversionResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ConversionResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field UID", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.UID = k8s_io_apimachinery_pkg_types.UID(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ConvertedObjects", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ConvertedObjects = append(m.ConvertedObjects, runtime.RawExtension{}) - if err := m.ConvertedObjects[len(m.ConvertedObjects)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Result", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Result.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ConversionReview) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ConversionReview: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ConversionReview: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Request", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Request == nil { - m.Request = &ConversionRequest{} - } - if err := m.Request.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Response", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Response == nil { - m.Response = &ConversionResponse{} - } - if err := m.Response.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *CustomResourceColumnDefinition) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: CustomResourceColumnDefinition: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: CustomResourceColumnDefinition: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Type = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Format", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Format = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Description", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Description = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 5: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Priority", wireType) - } - m.Priority = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Priority |= int32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field JSONPath", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.JSONPath = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *CustomResourceConversion) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: CustomResourceConversion: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: CustomResourceConversion: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Strategy", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Strategy = ConversionStrategyType(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Webhook", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Webhook == nil { - m.Webhook = &WebhookConversion{} - } - if err := m.Webhook.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *CustomResourceDefinition) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: CustomResourceDefinition: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: CustomResourceDefinition: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *CustomResourceDefinitionCondition) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: CustomResourceDefinitionCondition: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: CustomResourceDefinitionCondition: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Type = CustomResourceDefinitionConditionType(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Status = ConditionStatus(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LastTransitionTime", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.LastTransitionTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Reason = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Message = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *CustomResourceDefinitionList) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: CustomResourceDefinitionList: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: CustomResourceDefinitionList: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Items = append(m.Items, CustomResourceDefinition{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *CustomResourceDefinitionNames) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: CustomResourceDefinitionNames: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: CustomResourceDefinitionNames: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Plural", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Plural = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Singular", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Singular = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ShortNames", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ShortNames = append(m.ShortNames, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Kind = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListKind", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ListKind = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Categories", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Categories = append(m.Categories, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *CustomResourceDefinitionSpec) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: CustomResourceDefinitionSpec: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: CustomResourceDefinitionSpec: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Group", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Group = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Names", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Names.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Scope", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Scope = ResourceScope(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 7: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Versions", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Versions = append(m.Versions, CustomResourceDefinitionVersion{}) - if err := m.Versions[len(m.Versions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 9: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Conversion", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Conversion == nil { - m.Conversion = &CustomResourceConversion{} - } - if err := m.Conversion.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 10: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field PreserveUnknownFields", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.PreserveUnknownFields = bool(v != 0) - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *CustomResourceDefinitionStatus) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: CustomResourceDefinitionStatus: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: CustomResourceDefinitionStatus: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Conditions = append(m.Conditions, CustomResourceDefinitionCondition{}) - if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AcceptedNames", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.AcceptedNames.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field StoredVersions", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.StoredVersions = append(m.StoredVersions, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *CustomResourceDefinitionVersion) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: CustomResourceDefinitionVersion: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: CustomResourceDefinitionVersion: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Served", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.Served = bool(v != 0) - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Storage", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.Storage = bool(v != 0) - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Schema", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Schema == nil { - m.Schema = &CustomResourceValidation{} - } - if err := m.Schema.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Subresources", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Subresources == nil { - m.Subresources = &CustomResourceSubresources{} - } - if err := m.Subresources.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AdditionalPrinterColumns", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.AdditionalPrinterColumns = append(m.AdditionalPrinterColumns, CustomResourceColumnDefinition{}) - if err := m.AdditionalPrinterColumns[len(m.AdditionalPrinterColumns)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 7: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Deprecated", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.Deprecated = bool(v != 0) - case 8: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DeprecationWarning", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - s := string(dAtA[iNdEx:postIndex]) - m.DeprecationWarning = &s - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *CustomResourceSubresourceScale) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: CustomResourceSubresourceScale: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: CustomResourceSubresourceScale: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SpecReplicasPath", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.SpecReplicasPath = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field StatusReplicasPath", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.StatusReplicasPath = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LabelSelectorPath", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - s := string(dAtA[iNdEx:postIndex]) - m.LabelSelectorPath = &s - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *CustomResourceSubresourceStatus) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: CustomResourceSubresourceStatus: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: CustomResourceSubresourceStatus: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *CustomResourceSubresources) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: CustomResourceSubresources: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: CustomResourceSubresources: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Status == nil { - m.Status = &CustomResourceSubresourceStatus{} - } - if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Scale", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Scale == nil { - m.Scale = &CustomResourceSubresourceScale{} - } - if err := m.Scale.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *CustomResourceValidation) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: CustomResourceValidation: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: CustomResourceValidation: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field OpenAPIV3Schema", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.OpenAPIV3Schema == nil { - m.OpenAPIV3Schema = &JSONSchemaProps{} - } - if err := m.OpenAPIV3Schema.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ExternalDocumentation) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ExternalDocumentation: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ExternalDocumentation: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Description", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Description = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field URL", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.URL = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *JSON) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: JSON: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: JSON: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Raw", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Raw = append(m.Raw[:0], dAtA[iNdEx:postIndex]...) - if m.Raw == nil { - m.Raw = []byte{} - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *JSONSchemaProps) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: JSONSchemaProps: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: JSONSchemaProps: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ID = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Schema", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Schema = JSONSchemaURL(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Ref", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - s := string(dAtA[iNdEx:postIndex]) - m.Ref = &s - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Description", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Description = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Type = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Format", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Format = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 7: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Title", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Title = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 8: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Default", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Default == nil { - m.Default = &JSON{} - } - if err := m.Default.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 9: - if wireType != 1 { - return fmt.Errorf("proto: wrong wireType = %d for field Maximum", wireType) - } - var v uint64 - if (iNdEx + 8) > l { - return io.ErrUnexpectedEOF - } - v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) - iNdEx += 8 - v2 := float64(math.Float64frombits(v)) - m.Maximum = &v2 - case 10: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ExclusiveMaximum", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.ExclusiveMaximum = bool(v != 0) - case 11: - if wireType != 1 { - return fmt.Errorf("proto: wrong wireType = %d for field Minimum", wireType) - } - var v uint64 - if (iNdEx + 8) > l { - return io.ErrUnexpectedEOF - } - v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) - iNdEx += 8 - v2 := float64(math.Float64frombits(v)) - m.Minimum = &v2 - case 12: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ExclusiveMinimum", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.ExclusiveMinimum = bool(v != 0) - case 13: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field MaxLength", wireType) - } - var v int64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.MaxLength = &v - case 14: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field MinLength", wireType) - } - var v int64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.MinLength = &v - case 15: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Pattern", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Pattern = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 16: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field MaxItems", wireType) - } - var v int64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.MaxItems = &v - case 17: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field MinItems", wireType) - } - var v int64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.MinItems = &v - case 18: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field UniqueItems", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.UniqueItems = bool(v != 0) - case 19: - if wireType != 1 { - return fmt.Errorf("proto: wrong wireType = %d for field MultipleOf", wireType) - } - var v uint64 - if (iNdEx + 8) > l { - return io.ErrUnexpectedEOF - } - v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) - iNdEx += 8 - v2 := float64(math.Float64frombits(v)) - m.MultipleOf = &v2 - case 20: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Enum", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Enum = append(m.Enum, JSON{}) - if err := m.Enum[len(m.Enum)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 21: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field MaxProperties", wireType) - } - var v int64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.MaxProperties = &v - case 22: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field MinProperties", wireType) - } - var v int64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.MinProperties = &v - case 23: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Required", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Required = append(m.Required, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - case 24: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Items == nil { - m.Items = &JSONSchemaPropsOrArray{} - } - if err := m.Items.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 25: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AllOf", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.AllOf = append(m.AllOf, JSONSchemaProps{}) - if err := m.AllOf[len(m.AllOf)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 26: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field OneOf", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.OneOf = append(m.OneOf, JSONSchemaProps{}) - if err := m.OneOf[len(m.OneOf)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 27: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AnyOf", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.AnyOf = append(m.AnyOf, JSONSchemaProps{}) - if err := m.AnyOf[len(m.AnyOf)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 28: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Not", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Not == nil { - m.Not = &JSONSchemaProps{} - } - if err := m.Not.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 29: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Properties", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Properties == nil { - m.Properties = make(map[string]JSONSchemaProps) - } - var mapkey string - mapvalue := &JSONSchemaProps{} - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey < 0 { - return ErrInvalidLengthGenerated - } - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var mapmsglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - mapmsglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - postmsgIndex := iNdEx + mapmsglen - if postmsgIndex < 0 { - return ErrInvalidLengthGenerated - } - if postmsgIndex > l { - return io.ErrUnexpectedEOF - } - mapvalue = &JSONSchemaProps{} - if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { - return err - } - iNdEx = postmsgIndex - } else { - iNdEx = entryPreIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - m.Properties[mapkey] = *mapvalue - iNdEx = postIndex - case 30: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AdditionalProperties", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.AdditionalProperties == nil { - m.AdditionalProperties = &JSONSchemaPropsOrBool{} - } - if err := m.AdditionalProperties.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 31: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PatternProperties", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.PatternProperties == nil { - m.PatternProperties = make(map[string]JSONSchemaProps) - } - var mapkey string - mapvalue := &JSONSchemaProps{} - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey < 0 { - return ErrInvalidLengthGenerated - } - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var mapmsglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - mapmsglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - postmsgIndex := iNdEx + mapmsglen - if postmsgIndex < 0 { - return ErrInvalidLengthGenerated - } - if postmsgIndex > l { - return io.ErrUnexpectedEOF - } - mapvalue = &JSONSchemaProps{} - if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { - return err - } - iNdEx = postmsgIndex - } else { - iNdEx = entryPreIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - m.PatternProperties[mapkey] = *mapvalue - iNdEx = postIndex - case 32: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Dependencies", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Dependencies == nil { - m.Dependencies = make(JSONSchemaDependencies) - } - var mapkey string - mapvalue := &JSONSchemaPropsOrStringArray{} - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey < 0 { - return ErrInvalidLengthGenerated - } - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var mapmsglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - mapmsglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - postmsgIndex := iNdEx + mapmsglen - if postmsgIndex < 0 { - return ErrInvalidLengthGenerated - } - if postmsgIndex > l { - return io.ErrUnexpectedEOF - } - mapvalue = &JSONSchemaPropsOrStringArray{} - if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { - return err - } - iNdEx = postmsgIndex - } else { - iNdEx = entryPreIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - m.Dependencies[mapkey] = *mapvalue - iNdEx = postIndex - case 33: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AdditionalItems", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.AdditionalItems == nil { - m.AdditionalItems = &JSONSchemaPropsOrBool{} - } - if err := m.AdditionalItems.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 34: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Definitions", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Definitions == nil { - m.Definitions = make(JSONSchemaDefinitions) - } - var mapkey string - mapvalue := &JSONSchemaProps{} - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey < 0 { - return ErrInvalidLengthGenerated - } - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var mapmsglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - mapmsglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - postmsgIndex := iNdEx + mapmsglen - if postmsgIndex < 0 { - return ErrInvalidLengthGenerated - } - if postmsgIndex > l { - return io.ErrUnexpectedEOF - } - mapvalue = &JSONSchemaProps{} - if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { - return err - } - iNdEx = postmsgIndex - } else { - iNdEx = entryPreIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - m.Definitions[mapkey] = *mapvalue - iNdEx = postIndex - case 35: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ExternalDocs", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.ExternalDocs == nil { - m.ExternalDocs = &ExternalDocumentation{} - } - if err := m.ExternalDocs.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 36: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Example", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Example == nil { - m.Example = &JSON{} - } - if err := m.Example.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 37: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Nullable", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.Nullable = bool(v != 0) - case 38: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field XPreserveUnknownFields", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - b := bool(v != 0) - m.XPreserveUnknownFields = &b - case 39: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field XEmbeddedResource", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.XEmbeddedResource = bool(v != 0) - case 40: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field XIntOrString", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.XIntOrString = bool(v != 0) - case 41: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field XListMapKeys", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.XListMapKeys = append(m.XListMapKeys, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - case 42: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field XListType", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - s := string(dAtA[iNdEx:postIndex]) - m.XListType = &s - iNdEx = postIndex - case 43: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field XMapType", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - s := string(dAtA[iNdEx:postIndex]) - m.XMapType = &s - iNdEx = postIndex - case 44: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field XValidations", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.XValidations = append(m.XValidations, ValidationRule{}) - if err := m.XValidations[len(m.XValidations)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *JSONSchemaPropsOrArray) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: JSONSchemaPropsOrArray: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: JSONSchemaPropsOrArray: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Schema", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Schema == nil { - m.Schema = &JSONSchemaProps{} - } - if err := m.Schema.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field JSONSchemas", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.JSONSchemas = append(m.JSONSchemas, JSONSchemaProps{}) - if err := m.JSONSchemas[len(m.JSONSchemas)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *JSONSchemaPropsOrBool) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: JSONSchemaPropsOrBool: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: JSONSchemaPropsOrBool: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Allows", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.Allows = bool(v != 0) - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Schema", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Schema == nil { - m.Schema = &JSONSchemaProps{} - } - if err := m.Schema.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *JSONSchemaPropsOrStringArray) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: JSONSchemaPropsOrStringArray: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: JSONSchemaPropsOrStringArray: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Schema", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Schema == nil { - m.Schema = &JSONSchemaProps{} - } - if err := m.Schema.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Property", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Property = append(m.Property, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ServiceReference) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ServiceReference: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ServiceReference: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Namespace = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - s := string(dAtA[iNdEx:postIndex]) - m.Path = &s - iNdEx = postIndex - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Port", wireType) - } - var v int32 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int32(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.Port = &v - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ValidationRule) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ValidationRule: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ValidationRule: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Rule", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Rule = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Message = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *WebhookClientConfig) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: WebhookClientConfig: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: WebhookClientConfig: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Service", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Service == nil { - m.Service = &ServiceReference{} - } - if err := m.Service.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field CABundle", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.CABundle = append(m.CABundle[:0], dAtA[iNdEx:postIndex]...) - if m.CABundle == nil { - m.CABundle = []byte{} - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field URL", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - s := string(dAtA[iNdEx:postIndex]) - m.URL = &s - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *WebhookConversion) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: WebhookConversion: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: WebhookConversion: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ClientConfig", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.ClientConfig == nil { - m.ClientConfig = &WebhookClientConfig{} - } - if err := m.ClientConfig.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ConversionReviewVersions", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ConversionReviewVersions = append(m.ConversionReviewVersions, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipGenerated(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthGenerated - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupGenerated - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthGenerated - } - if depth == 0 { - return iNdEx, nil - } - } - return 0, io.ErrUnexpectedEOF -} - -var ( - ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group") -) diff --git a/src/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/generated.proto b/src/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/generated.proto deleted file mode 100644 index 9c6b728fa..000000000 --- a/src/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/generated.proto +++ /dev/null @@ -1,729 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - - -// This file was autogenerated by go-to-protobuf. Do not edit it manually! - -syntax = "proto2"; - -package k8s.io.apiextensions_apiserver.pkg.apis.apiextensions.v1; - -import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; -import "k8s.io/apimachinery/pkg/runtime/generated.proto"; -import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; - -// Package-wide variables from generator "generated". -option go_package = "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"; - -// ConversionRequest describes the conversion request parameters. -message ConversionRequest { - // uid is an identifier for the individual request/response. It allows distinguishing instances of requests which are - // otherwise identical (parallel requests, etc). - // The UID is meant to track the round trip (request/response) between the Kubernetes API server and the webhook, not the user request. - // It is suitable for correlating log entries between the webhook and apiserver, for either auditing or debugging. - optional string uid = 1; - - // desiredAPIVersion is the version to convert given objects to. e.g. "myapi.example.com/v1" - optional string desiredAPIVersion = 2; - - // objects is the list of custom resource objects to be converted. - repeated k8s.io.apimachinery.pkg.runtime.RawExtension objects = 3; -} - -// ConversionResponse describes a conversion response. -message ConversionResponse { - // uid is an identifier for the individual request/response. - // This should be copied over from the corresponding `request.uid`. - optional string uid = 1; - - // convertedObjects is the list of converted version of `request.objects` if the `result` is successful, otherwise empty. - // The webhook is expected to set `apiVersion` of these objects to the `request.desiredAPIVersion`. The list - // must also have the same size as the input list with the same objects in the same order (equal kind, metadata.uid, metadata.name and metadata.namespace). - // The webhook is allowed to mutate labels and annotations. Any other change to the metadata is silently ignored. - repeated k8s.io.apimachinery.pkg.runtime.RawExtension convertedObjects = 2; - - // result contains the result of conversion with extra details if the conversion failed. `result.status` determines if - // the conversion failed or succeeded. The `result.status` field is required and represents the success or failure of the - // conversion. A successful conversion must set `result.status` to `Success`. A failed conversion must set - // `result.status` to `Failure` and provide more details in `result.message` and return http status 200. The `result.message` - // will be used to construct an error message for the end user. - optional k8s.io.apimachinery.pkg.apis.meta.v1.Status result = 3; -} - -// ConversionReview describes a conversion request/response. -message ConversionReview { - // request describes the attributes for the conversion request. - // +optional - optional ConversionRequest request = 1; - - // response describes the attributes for the conversion response. - // +optional - optional ConversionResponse response = 2; -} - -// CustomResourceColumnDefinition specifies a column for server side printing. -message CustomResourceColumnDefinition { - // name is a human readable name for the column. - optional string name = 1; - - // type is an OpenAPI type definition for this column. - // See https://github.com/OAI/OpenAPI-Specification/blob/master/versions/2.0.md#data-types for details. - optional string type = 2; - - // format is an optional OpenAPI type definition for this column. The 'name' format is applied - // to the primary identifier column to assist in clients identifying column is the resource name. - // See https://github.com/OAI/OpenAPI-Specification/blob/master/versions/2.0.md#data-types for details. - // +optional - optional string format = 3; - - // description is a human readable description of this column. - // +optional - optional string description = 4; - - // priority is an integer defining the relative importance of this column compared to others. Lower - // numbers are considered higher priority. Columns that may be omitted in limited space scenarios - // should be given a priority greater than 0. - // +optional - optional int32 priority = 5; - - // jsonPath is a simple JSON path (i.e. with array notation) which is evaluated against - // each custom resource to produce the value for this column. - optional string jsonPath = 6; -} - -// CustomResourceConversion describes how to convert different versions of a CR. -message CustomResourceConversion { - // strategy specifies how custom resources are converted between versions. Allowed values are: - // - `None`: The converter only change the apiVersion and would not touch any other field in the custom resource. - // - `Webhook`: API Server will call to an external webhook to do the conversion. Additional information - // is needed for this option. This requires spec.preserveUnknownFields to be false, and spec.conversion.webhook to be set. - optional string strategy = 1; - - // webhook describes how to call the conversion webhook. Required when `strategy` is set to `Webhook`. - // +optional - optional WebhookConversion webhook = 2; -} - -// CustomResourceDefinition represents a resource that should be exposed on the API server. Its name MUST be in the format -// <.spec.name>.<.spec.group>. -message CustomResourceDefinition { - // Standard object's metadata - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - - // spec describes how the user wants the resources to appear - optional CustomResourceDefinitionSpec spec = 2; - - // status indicates the actual state of the CustomResourceDefinition - // +optional - optional CustomResourceDefinitionStatus status = 3; -} - -// CustomResourceDefinitionCondition contains details for the current condition of this pod. -message CustomResourceDefinitionCondition { - // type is the type of the condition. Types include Established, NamesAccepted and Terminating. - optional string type = 1; - - // status is the status of the condition. - // Can be True, False, Unknown. - optional string status = 2; - - // lastTransitionTime last time the condition transitioned from one status to another. - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3; - - // reason is a unique, one-word, CamelCase reason for the condition's last transition. - // +optional - optional string reason = 4; - - // message is a human-readable message indicating details about last transition. - // +optional - optional string message = 5; -} - -// CustomResourceDefinitionList is a list of CustomResourceDefinition objects. -message CustomResourceDefinitionList { - // Standard object's metadata - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - - // items list individual CustomResourceDefinition objects - repeated CustomResourceDefinition items = 2; -} - -// CustomResourceDefinitionNames indicates the names to serve this CustomResourceDefinition -message CustomResourceDefinitionNames { - // plural is the plural name of the resource to serve. - // The custom resources are served under `/apis///.../`. - // Must match the name of the CustomResourceDefinition (in the form `.`). - // Must be all lowercase. - optional string plural = 1; - - // singular is the singular name of the resource. It must be all lowercase. Defaults to lowercased `kind`. - // +optional - optional string singular = 2; - - // shortNames are short names for the resource, exposed in API discovery documents, - // and used by clients to support invocations like `kubectl get `. - // It must be all lowercase. - // +optional - repeated string shortNames = 3; - - // kind is the serialized kind of the resource. It is normally CamelCase and singular. - // Custom resource instances will use this value as the `kind` attribute in API calls. - optional string kind = 4; - - // listKind is the serialized kind of the list for this resource. Defaults to "`kind`List". - // +optional - optional string listKind = 5; - - // categories is a list of grouped resources this custom resource belongs to (e.g. 'all'). - // This is published in API discovery documents, and used by clients to support invocations like - // `kubectl get all`. - // +optional - repeated string categories = 6; -} - -// CustomResourceDefinitionSpec describes how a user wants their resource to appear -message CustomResourceDefinitionSpec { - // group is the API group of the defined custom resource. - // The custom resources are served under `/apis//...`. - // Must match the name of the CustomResourceDefinition (in the form `.`). - optional string group = 1; - - // names specify the resource and kind names for the custom resource. - optional CustomResourceDefinitionNames names = 3; - - // scope indicates whether the defined custom resource is cluster- or namespace-scoped. - // Allowed values are `Cluster` and `Namespaced`. - optional string scope = 4; - - // versions is the list of all API versions of the defined custom resource. - // Version names are used to compute the order in which served versions are listed in API discovery. - // If the version string is "kube-like", it will sort above non "kube-like" version strings, which are ordered - // lexicographically. "Kube-like" versions start with a "v", then are followed by a number (the major version), - // then optionally the string "alpha" or "beta" and another number (the minor version). These are sorted first - // by GA > beta > alpha (where GA is a version with no suffix such as beta or alpha), and then by comparing - // major version, then minor version. An example sorted list of versions: - // v10, v2, v1, v11beta2, v10beta3, v3beta1, v12alpha1, v11alpha2, foo1, foo10. - repeated CustomResourceDefinitionVersion versions = 7; - - // conversion defines conversion settings for the CRD. - // +optional - optional CustomResourceConversion conversion = 9; - - // preserveUnknownFields indicates that object fields which are not specified - // in the OpenAPI schema should be preserved when persisting to storage. - // apiVersion, kind, metadata and known fields inside metadata are always preserved. - // This field is deprecated in favor of setting `x-preserve-unknown-fields` to true in `spec.versions[*].schema.openAPIV3Schema`. - // See https://kubernetes.io/docs/tasks/access-kubernetes-api/custom-resources/custom-resource-definitions/#pruning-versus-preserving-unknown-fields for details. - // +optional - optional bool preserveUnknownFields = 10; -} - -// CustomResourceDefinitionStatus indicates the state of the CustomResourceDefinition -message CustomResourceDefinitionStatus { - // conditions indicate state for particular aspects of a CustomResourceDefinition - // +optional - // +listType=map - // +listMapKey=type - repeated CustomResourceDefinitionCondition conditions = 1; - - // acceptedNames are the names that are actually being used to serve discovery. - // They may be different than the names in spec. - // +optional - optional CustomResourceDefinitionNames acceptedNames = 2; - - // storedVersions lists all versions of CustomResources that were ever persisted. Tracking these - // versions allows a migration path for stored versions in etcd. The field is mutable - // so a migration controller can finish a migration to another version (ensuring - // no old objects are left in storage), and then remove the rest of the - // versions from this list. - // Versions may not be removed from `spec.versions` while they exist in this list. - // +optional - repeated string storedVersions = 3; -} - -// CustomResourceDefinitionVersion describes a version for CRD. -message CustomResourceDefinitionVersion { - // name is the version name, e.g. “v1â€, “v2beta1â€, etc. - // The custom resources are served under this version at `/apis///...` if `served` is true. - optional string name = 1; - - // served is a flag enabling/disabling this version from being served via REST APIs - optional bool served = 2; - - // storage indicates this version should be used when persisting custom resources to storage. - // There must be exactly one version with storage=true. - optional bool storage = 3; - - // deprecated indicates this version of the custom resource API is deprecated. - // When set to true, API requests to this version receive a warning header in the server response. - // Defaults to false. - // +optional - optional bool deprecated = 7; - - // deprecationWarning overrides the default warning returned to API clients. - // May only be set when `deprecated` is true. - // The default warning indicates this version is deprecated and recommends use - // of the newest served version of equal or greater stability, if one exists. - // +optional - optional string deprecationWarning = 8; - - // schema describes the schema used for validation, pruning, and defaulting of this version of the custom resource. - // +optional - optional CustomResourceValidation schema = 4; - - // subresources specify what subresources this version of the defined custom resource have. - // +optional - optional CustomResourceSubresources subresources = 5; - - // additionalPrinterColumns specifies additional columns returned in Table output. - // See https://kubernetes.io/docs/reference/using-api/api-concepts/#receiving-resources-as-tables for details. - // If no columns are specified, a single column displaying the age of the custom resource is used. - // +optional - repeated CustomResourceColumnDefinition additionalPrinterColumns = 6; -} - -// CustomResourceSubresourceScale defines how to serve the scale subresource for CustomResources. -message CustomResourceSubresourceScale { - // specReplicasPath defines the JSON path inside of a custom resource that corresponds to Scale `spec.replicas`. - // Only JSON paths without the array notation are allowed. - // Must be a JSON Path under `.spec`. - // If there is no value under the given path in the custom resource, the `/scale` subresource will return an error on GET. - optional string specReplicasPath = 1; - - // statusReplicasPath defines the JSON path inside of a custom resource that corresponds to Scale `status.replicas`. - // Only JSON paths without the array notation are allowed. - // Must be a JSON Path under `.status`. - // If there is no value under the given path in the custom resource, the `status.replicas` value in the `/scale` subresource - // will default to 0. - optional string statusReplicasPath = 2; - - // labelSelectorPath defines the JSON path inside of a custom resource that corresponds to Scale `status.selector`. - // Only JSON paths without the array notation are allowed. - // Must be a JSON Path under `.status` or `.spec`. - // Must be set to work with HorizontalPodAutoscaler. - // The field pointed by this JSON path must be a string field (not a complex selector struct) - // which contains a serialized label selector in string form. - // More info: https://kubernetes.io/docs/tasks/access-kubernetes-api/custom-resources/custom-resource-definitions#scale-subresource - // If there is no value under the given path in the custom resource, the `status.selector` value in the `/scale` - // subresource will default to the empty string. - // +optional - optional string labelSelectorPath = 3; -} - -// CustomResourceSubresourceStatus defines how to serve the status subresource for CustomResources. -// Status is represented by the `.status` JSON path inside of a CustomResource. When set, -// * exposes a /status subresource for the custom resource -// * PUT requests to the /status subresource take a custom resource object, and ignore changes to anything except the status stanza -// * PUT/POST/PATCH requests to the custom resource ignore changes to the status stanza -message CustomResourceSubresourceStatus { -} - -// CustomResourceSubresources defines the status and scale subresources for CustomResources. -message CustomResourceSubresources { - // status indicates the custom resource should serve a `/status` subresource. - // When enabled: - // 1. requests to the custom resource primary endpoint ignore changes to the `status` stanza of the object. - // 2. requests to the custom resource `/status` subresource ignore changes to anything other than the `status` stanza of the object. - // +optional - optional CustomResourceSubresourceStatus status = 1; - - // scale indicates the custom resource should serve a `/scale` subresource that returns an `autoscaling/v1` Scale object. - // +optional - optional CustomResourceSubresourceScale scale = 2; -} - -// CustomResourceValidation is a list of validation methods for CustomResources. -message CustomResourceValidation { - // openAPIV3Schema is the OpenAPI v3 schema to use for validation and pruning. - // +optional - optional JSONSchemaProps openAPIV3Schema = 1; -} - -// ExternalDocumentation allows referencing an external resource for extended documentation. -message ExternalDocumentation { - optional string description = 1; - - optional string url = 2; -} - -// JSON represents any valid JSON value. -// These types are supported: bool, int64, float64, string, []interface{}, map[string]interface{} and nil. -message JSON { - optional bytes raw = 1; -} - -// JSONSchemaProps is a JSON-Schema following Specification Draft 4 (http://json-schema.org/). -message JSONSchemaProps { - optional string id = 1; - - optional string schema = 2; - - optional string ref = 3; - - optional string description = 4; - - optional string type = 5; - - // format is an OpenAPI v3 format string. Unknown formats are ignored. The following formats are validated: - // - // - bsonobjectid: a bson object ID, i.e. a 24 characters hex string - // - uri: an URI as parsed by Golang net/url.ParseRequestURI - // - email: an email address as parsed by Golang net/mail.ParseAddress - // - hostname: a valid representation for an Internet host name, as defined by RFC 1034, section 3.1 [RFC1034]. - // - ipv4: an IPv4 IP as parsed by Golang net.ParseIP - // - ipv6: an IPv6 IP as parsed by Golang net.ParseIP - // - cidr: a CIDR as parsed by Golang net.ParseCIDR - // - mac: a MAC address as parsed by Golang net.ParseMAC - // - uuid: an UUID that allows uppercase defined by the regex (?i)^[0-9a-f]{8}-?[0-9a-f]{4}-?[0-9a-f]{4}-?[0-9a-f]{4}-?[0-9a-f]{12}$ - // - uuid3: an UUID3 that allows uppercase defined by the regex (?i)^[0-9a-f]{8}-?[0-9a-f]{4}-?3[0-9a-f]{3}-?[0-9a-f]{4}-?[0-9a-f]{12}$ - // - uuid4: an UUID4 that allows uppercase defined by the regex (?i)^[0-9a-f]{8}-?[0-9a-f]{4}-?4[0-9a-f]{3}-?[89ab][0-9a-f]{3}-?[0-9a-f]{12}$ - // - uuid5: an UUID5 that allows uppercase defined by the regex (?i)^[0-9a-f]{8}-?[0-9a-f]{4}-?5[0-9a-f]{3}-?[89ab][0-9a-f]{3}-?[0-9a-f]{12}$ - // - isbn: an ISBN10 or ISBN13 number string like "0321751043" or "978-0321751041" - // - isbn10: an ISBN10 number string like "0321751043" - // - isbn13: an ISBN13 number string like "978-0321751041" - // - creditcard: a credit card number defined by the regex ^(?:4[0-9]{12}(?:[0-9]{3})?|5[1-5][0-9]{14}|6(?:011|5[0-9][0-9])[0-9]{12}|3[47][0-9]{13}|3(?:0[0-5]|[68][0-9])[0-9]{11}|(?:2131|1800|35\\d{3})\\d{11})$ with any non digit characters mixed in - // - ssn: a U.S. social security number following the regex ^\\d{3}[- ]?\\d{2}[- ]?\\d{4}$ - // - hexcolor: an hexadecimal color code like "#FFFFFF: following the regex ^#?([0-9a-fA-F]{3}|[0-9a-fA-F]{6})$ - // - rgbcolor: an RGB color code like rgb like "rgb(255,255,2559" - // - byte: base64 encoded binary data - // - password: any kind of string - // - date: a date string like "2006-01-02" as defined by full-date in RFC3339 - // - duration: a duration string like "22 ns" as parsed by Golang time.ParseDuration or compatible with Scala duration format - // - datetime: a date time string like "2014-12-15T19:30:20.000Z" as defined by date-time in RFC3339. - optional string format = 6; - - optional string title = 7; - - // default is a default value for undefined object fields. - // Defaulting is a beta feature under the CustomResourceDefaulting feature gate. - // Defaulting requires spec.preserveUnknownFields to be false. - optional JSON default = 8; - - optional double maximum = 9; - - optional bool exclusiveMaximum = 10; - - optional double minimum = 11; - - optional bool exclusiveMinimum = 12; - - optional int64 maxLength = 13; - - optional int64 minLength = 14; - - optional string pattern = 15; - - optional int64 maxItems = 16; - - optional int64 minItems = 17; - - optional bool uniqueItems = 18; - - optional double multipleOf = 19; - - repeated JSON enum = 20; - - optional int64 maxProperties = 21; - - optional int64 minProperties = 22; - - repeated string required = 23; - - optional JSONSchemaPropsOrArray items = 24; - - repeated JSONSchemaProps allOf = 25; - - repeated JSONSchemaProps oneOf = 26; - - repeated JSONSchemaProps anyOf = 27; - - optional JSONSchemaProps not = 28; - - map properties = 29; - - optional JSONSchemaPropsOrBool additionalProperties = 30; - - map patternProperties = 31; - - map dependencies = 32; - - optional JSONSchemaPropsOrBool additionalItems = 33; - - map definitions = 34; - - optional ExternalDocumentation externalDocs = 35; - - optional JSON example = 36; - - optional bool nullable = 37; - - // x-kubernetes-preserve-unknown-fields stops the API server - // decoding step from pruning fields which are not specified - // in the validation schema. This affects fields recursively, - // but switches back to normal pruning behaviour if nested - // properties or additionalProperties are specified in the schema. - // This can either be true or undefined. False is forbidden. - optional bool xKubernetesPreserveUnknownFields = 38; - - // x-kubernetes-embedded-resource defines that the value is an - // embedded Kubernetes runtime.Object, with TypeMeta and - // ObjectMeta. The type must be object. It is allowed to further - // restrict the embedded object. kind, apiVersion and metadata - // are validated automatically. x-kubernetes-preserve-unknown-fields - // is allowed to be true, but does not have to be if the object - // is fully specified (up to kind, apiVersion, metadata). - optional bool xKubernetesEmbeddedResource = 39; - - // x-kubernetes-int-or-string specifies that this value is - // either an integer or a string. If this is true, an empty - // type is allowed and type as child of anyOf is permitted - // if following one of the following patterns: - // - // 1) anyOf: - // - type: integer - // - type: string - // 2) allOf: - // - anyOf: - // - type: integer - // - type: string - // - ... zero or more - optional bool xKubernetesIntOrString = 40; - - // x-kubernetes-list-map-keys annotates an array with the x-kubernetes-list-type `map` by specifying the keys used - // as the index of the map. - // - // This tag MUST only be used on lists that have the "x-kubernetes-list-type" - // extension set to "map". Also, the values specified for this attribute must - // be a scalar typed field of the child structure (no nesting is supported). - // - // The properties specified must either be required or have a default value, - // to ensure those properties are present for all list items. - // - // +optional - repeated string xKubernetesListMapKeys = 41; - - // x-kubernetes-list-type annotates an array to further describe its topology. - // This extension must only be used on lists and may have 3 possible values: - // - // 1) `atomic`: the list is treated as a single entity, like a scalar. - // Atomic lists will be entirely replaced when updated. This extension - // may be used on any type of list (struct, scalar, ...). - // 2) `set`: - // Sets are lists that must not have multiple items with the same value. Each - // value must be a scalar, an object with x-kubernetes-map-type `atomic` or an - // array with x-kubernetes-list-type `atomic`. - // 3) `map`: - // These lists are like maps in that their elements have a non-index key - // used to identify them. Order is preserved upon merge. The map tag - // must only be used on a list with elements of type object. - // Defaults to atomic for arrays. - // +optional - optional string xKubernetesListType = 42; - - // x-kubernetes-map-type annotates an object to further describe its topology. - // This extension must only be used when type is object and may have 2 possible values: - // - // 1) `granular`: - // These maps are actual maps (key-value pairs) and each fields are independent - // from each other (they can each be manipulated by separate actors). This is - // the default behaviour for all maps. - // 2) `atomic`: the list is treated as a single entity, like a scalar. - // Atomic maps will be entirely replaced when updated. - // +optional - optional string xKubernetesMapType = 43; - - // x-kubernetes-validations describes a list of validation rules written in the CEL expression language. - // This field is an alpha-level. Using this field requires the feature gate `CustomResourceValidationExpressions` to be enabled. - // +patchMergeKey=rule - // +patchStrategy=merge - // +listType=map - // +listMapKey=rule - repeated ValidationRule xKubernetesValidations = 44; -} - -// JSONSchemaPropsOrArray represents a value that can either be a JSONSchemaProps -// or an array of JSONSchemaProps. Mainly here for serialization purposes. -message JSONSchemaPropsOrArray { - optional JSONSchemaProps schema = 1; - - repeated JSONSchemaProps jSONSchemas = 2; -} - -// JSONSchemaPropsOrBool represents JSONSchemaProps or a boolean value. -// Defaults to true for the boolean property. -message JSONSchemaPropsOrBool { - optional bool allows = 1; - - optional JSONSchemaProps schema = 2; -} - -// JSONSchemaPropsOrStringArray represents a JSONSchemaProps or a string array. -message JSONSchemaPropsOrStringArray { - optional JSONSchemaProps schema = 1; - - repeated string property = 2; -} - -// ServiceReference holds a reference to Service.legacy.k8s.io -message ServiceReference { - // namespace is the namespace of the service. - // Required - optional string namespace = 1; - - // name is the name of the service. - // Required - optional string name = 2; - - // path is an optional URL path at which the webhook will be contacted. - // +optional - optional string path = 3; - - // port is an optional service port at which the webhook will be contacted. - // `port` should be a valid port number (1-65535, inclusive). - // Defaults to 443 for backward compatibility. - // +optional - optional int32 port = 4; -} - -// ValidationRule describes a validation rule written in the CEL expression language. -message ValidationRule { - // Rule represents the expression which will be evaluated by CEL. - // ref: https://github.com/google/cel-spec - // The Rule is scoped to the location of the x-kubernetes-validations extension in the schema. - // The `self` variable in the CEL expression is bound to the scoped value. - // Example: - // - Rule scoped to the root of a resource with a status subresource: {"rule": "self.status.actual <= self.spec.maxDesired"} - // - // If the Rule is scoped to an object with properties, the accessible properties of the object are field selectable - // via `self.field` and field presence can be checked via `has(self.field)`. Null valued fields are treated as - // absent fields in CEL expressions. - // If the Rule is scoped to an object with additionalProperties (i.e. a map) the value of the map - // are accessible via `self[mapKey]`, map containment can be checked via `mapKey in self` and all entries of the map - // are accessible via CEL macros and functions such as `self.all(...)`. - // If the Rule is scoped to an array, the elements of the array are accessible via `self[i]` and also by macros and - // functions. - // If the Rule is scoped to a scalar, `self` is bound to the scalar value. - // Examples: - // - Rule scoped to a map of objects: {"rule": "self.components['Widget'].priority < 10"} - // - Rule scoped to a list of integers: {"rule": "self.values.all(value, value >= 0 && value < 100)"} - // - Rule scoped to a string value: {"rule": "self.startsWith('kube')"} - // - // The `apiVersion`, `kind`, `metadata.name` and `metadata.generateName` are always accessible from the root of the - // object and from any x-kubernetes-embedded-resource annotated objects. No other metadata properties are accessible. - // - // Unknown data preserved in custom resources via x-kubernetes-preserve-unknown-fields is not accessible in CEL - // expressions. This includes: - // - Unknown field values that are preserved by object schemas with x-kubernetes-preserve-unknown-fields. - // - Object properties where the property schema is of an "unknown type". An "unknown type" is recursively defined as: - // - A schema with no type and x-kubernetes-preserve-unknown-fields set to true - // - An array where the items schema is of an "unknown type" - // - An object where the additionalProperties schema is of an "unknown type" - // - // Only property names of the form `[a-zA-Z_.-/][a-zA-Z0-9_.-/]*` are accessible. - // Accessible property names are escaped according to the following rules when accessed in the expression: - // - '__' escapes to '__underscores__' - // - '.' escapes to '__dot__' - // - '-' escapes to '__dash__' - // - '/' escapes to '__slash__' - // - Property names that exactly match a CEL RESERVED keyword escape to '__{keyword}__'. The keywords are: - // "true", "false", "null", "in", "as", "break", "const", "continue", "else", "for", "function", "if", - // "import", "let", "loop", "package", "namespace", "return". - // Examples: - // - Rule accessing a property named "namespace": {"rule": "self.__namespace__ > 0"} - // - Rule accessing a property named "x-prop": {"rule": "self.x__dash__prop > 0"} - // - Rule accessing a property named "redact__d": {"rule": "self.redact__underscores__d > 0"} - // - // Equality on arrays with x-kubernetes-list-type of 'set' or 'map' ignores element order, i.e. [1, 2] == [2, 1]. - // Concatenation on arrays with x-kubernetes-list-type use the semantics of the list type: - // - 'set': `X + Y` performs a union where the array positions of all elements in `X` are preserved and - // non-intersecting elements in `Y` are appended, retaining their partial order. - // - 'map': `X + Y` performs a merge where the array positions of all keys in `X` are preserved but the values - // are overwritten by values in `Y` when the key sets of `X` and `Y` intersect. Elements in `Y` with - // non-intersecting keys are appended, retaining their partial order. - optional string rule = 1; - - // Message represents the message displayed when validation fails. The message is required if the Rule contains - // line breaks. The message must not contain line breaks. - // If unset, the message is "failed rule: {Rule}". - // e.g. "must be a URL with the host matching spec.host" - optional string message = 2; -} - -// WebhookClientConfig contains the information to make a TLS connection with the webhook. -message WebhookClientConfig { - // url gives the location of the webhook, in standard URL form - // (`scheme://host:port/path`). Exactly one of `url` or `service` - // must be specified. - // - // The `host` should not refer to a service running in the cluster; use - // the `service` field instead. The host might be resolved via external - // DNS in some apiservers (e.g., `kube-apiserver` cannot resolve - // in-cluster DNS as that would be a layering violation). `host` may - // also be an IP address. - // - // Please note that using `localhost` or `127.0.0.1` as a `host` is - // risky unless you take great care to run this webhook on all hosts - // which run an apiserver which might need to make calls to this - // webhook. Such installs are likely to be non-portable, i.e., not easy - // to turn up in a new cluster. - // - // The scheme must be "https"; the URL must begin with "https://". - // - // A path is optional, and if present may be any string permissible in - // a URL. You may use the path to pass an arbitrary string to the - // webhook, for example, a cluster identifier. - // - // Attempting to use a user or basic auth e.g. "user:password@" is not - // allowed. Fragments ("#...") and query parameters ("?...") are not - // allowed, either. - // - // +optional - optional string url = 3; - - // service is a reference to the service for this webhook. Either - // service or url must be specified. - // - // If the webhook is running within the cluster, then you should use `service`. - // - // +optional - optional ServiceReference service = 1; - - // caBundle is a PEM encoded CA bundle which will be used to validate the webhook's server certificate. - // If unspecified, system trust roots on the apiserver are used. - // +optional - optional bytes caBundle = 2; -} - -// WebhookConversion describes how to call a conversion webhook -message WebhookConversion { - // clientConfig is the instructions for how to call the webhook if strategy is `Webhook`. - // +optional - optional WebhookClientConfig clientConfig = 2; - - // conversionReviewVersions is an ordered list of preferred `ConversionReview` - // versions the Webhook expects. The API server will use the first version in - // the list which it supports. If none of the versions specified in this list - // are supported by API server, conversion will fail for the custom resource. - // If a persisted Webhook configuration specifies allowed versions and does not - // include any versions known to the API Server, calls to the webhook will fail. - repeated string conversionReviewVersions = 3; -} - diff --git a/src/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/marshal.go b/src/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/marshal.go deleted file mode 100644 index 12cc2f6f2..000000000 --- a/src/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/marshal.go +++ /dev/null @@ -1,136 +0,0 @@ -/* -Copyright 2019 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1 - -import ( - "bytes" - "errors" - - "k8s.io/apimachinery/pkg/util/json" -) - -var jsTrue = []byte("true") -var jsFalse = []byte("false") - -func (s JSONSchemaPropsOrBool) MarshalJSON() ([]byte, error) { - if s.Schema != nil { - return json.Marshal(s.Schema) - } - - if s.Schema == nil && !s.Allows { - return jsFalse, nil - } - return jsTrue, nil -} - -func (s *JSONSchemaPropsOrBool) UnmarshalJSON(data []byte) error { - var nw JSONSchemaPropsOrBool - switch { - case len(data) == 0: - case data[0] == '{': - var sch JSONSchemaProps - if err := json.Unmarshal(data, &sch); err != nil { - return err - } - nw.Allows = true - nw.Schema = &sch - case len(data) == 4 && string(data) == "true": - nw.Allows = true - case len(data) == 5 && string(data) == "false": - nw.Allows = false - default: - return errors.New("boolean or JSON schema expected") - } - *s = nw - return nil -} - -func (s JSONSchemaPropsOrStringArray) MarshalJSON() ([]byte, error) { - if len(s.Property) > 0 { - return json.Marshal(s.Property) - } - if s.Schema != nil { - return json.Marshal(s.Schema) - } - return []byte("null"), nil -} - -func (s *JSONSchemaPropsOrStringArray) UnmarshalJSON(data []byte) error { - var first byte - if len(data) > 1 { - first = data[0] - } - var nw JSONSchemaPropsOrStringArray - if first == '{' { - var sch JSONSchemaProps - if err := json.Unmarshal(data, &sch); err != nil { - return err - } - nw.Schema = &sch - } - if first == '[' { - if err := json.Unmarshal(data, &nw.Property); err != nil { - return err - } - } - *s = nw - return nil -} - -func (s JSONSchemaPropsOrArray) MarshalJSON() ([]byte, error) { - if len(s.JSONSchemas) > 0 { - return json.Marshal(s.JSONSchemas) - } - return json.Marshal(s.Schema) -} - -func (s *JSONSchemaPropsOrArray) UnmarshalJSON(data []byte) error { - var nw JSONSchemaPropsOrArray - var first byte - if len(data) > 1 { - first = data[0] - } - if first == '{' { - var sch JSONSchemaProps - if err := json.Unmarshal(data, &sch); err != nil { - return err - } - nw.Schema = &sch - } - if first == '[' { - if err := json.Unmarshal(data, &nw.JSONSchemas); err != nil { - return err - } - } - *s = nw - return nil -} - -func (s JSON) MarshalJSON() ([]byte, error) { - if len(s.Raw) > 0 { - return s.Raw, nil - } - return []byte("null"), nil - -} - -func (s *JSON) UnmarshalJSON(data []byte) error { - if len(data) > 0 && !bytes.Equal(data, nullLiteral) { - s.Raw = data - } - return nil -} diff --git a/src/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/register.go b/src/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/register.go deleted file mode 100644 index bd6a6ed00..000000000 --- a/src/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/register.go +++ /dev/null @@ -1,62 +0,0 @@ -/* -Copyright 2019 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1 - -import ( - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" -) - -const GroupName = "apiextensions.k8s.io" - -// SchemeGroupVersion is group version used to register these objects -var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1"} - -// Kind takes an unqualified kind and returns back a Group qualified GroupKind -func Kind(kind string) schema.GroupKind { - return SchemeGroupVersion.WithKind(kind).GroupKind() -} - -// Resource takes an unqualified resource and returns back a Group qualified GroupResource -func Resource(resource string) schema.GroupResource { - return SchemeGroupVersion.WithResource(resource).GroupResource() -} - -var ( - SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes, addDefaultingFuncs) - localSchemeBuilder = &SchemeBuilder - AddToScheme = localSchemeBuilder.AddToScheme -) - -// Adds the list of known types to the given scheme. -func addKnownTypes(scheme *runtime.Scheme) error { - scheme.AddKnownTypes(SchemeGroupVersion, - &CustomResourceDefinition{}, - &CustomResourceDefinitionList{}, - &ConversionReview{}, - ) - metav1.AddToGroupVersion(scheme, SchemeGroupVersion) - return nil -} - -func init() { - // We only register manually written functions here. The registration of the - // generated functions takes place in the generated files. The separation - // makes the code compile even when the generated files are missing. - localSchemeBuilder.Register(addDefaultingFuncs) -} diff --git a/src/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/types.go b/src/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/types.go deleted file mode 100644 index 223601bc4..000000000 --- a/src/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/types.go +++ /dev/null @@ -1,485 +0,0 @@ -/* -Copyright 2019 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1 - -import ( - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/types" -) - -// ConversionStrategyType describes different conversion types. -type ConversionStrategyType string - -const ( - // KubeAPIApprovedAnnotation is an annotation that must be set to create a CRD for the k8s.io, *.k8s.io, kubernetes.io, or *.kubernetes.io namespaces. - // The value should be a link to a URL where the current spec was approved, so updates to the spec should also update the URL. - // If the API is unapproved, you may set the annotation to a string starting with `"unapproved"`. For instance, `"unapproved, temporarily squatting"` or `"unapproved, experimental-only"`. This is discouraged. - KubeAPIApprovedAnnotation = "api-approved.kubernetes.io" - - // NoneConverter is a converter that only sets apiversion of the CR and leave everything else unchanged. - NoneConverter ConversionStrategyType = "None" - // WebhookConverter is a converter that calls to an external webhook to convert the CR. - WebhookConverter ConversionStrategyType = "Webhook" -) - -// CustomResourceDefinitionSpec describes how a user wants their resource to appear -type CustomResourceDefinitionSpec struct { - // group is the API group of the defined custom resource. - // The custom resources are served under `/apis//...`. - // Must match the name of the CustomResourceDefinition (in the form `.`). - Group string `json:"group" protobuf:"bytes,1,opt,name=group"` - // names specify the resource and kind names for the custom resource. - Names CustomResourceDefinitionNames `json:"names" protobuf:"bytes,3,opt,name=names"` - // scope indicates whether the defined custom resource is cluster- or namespace-scoped. - // Allowed values are `Cluster` and `Namespaced`. - Scope ResourceScope `json:"scope" protobuf:"bytes,4,opt,name=scope,casttype=ResourceScope"` - // versions is the list of all API versions of the defined custom resource. - // Version names are used to compute the order in which served versions are listed in API discovery. - // If the version string is "kube-like", it will sort above non "kube-like" version strings, which are ordered - // lexicographically. "Kube-like" versions start with a "v", then are followed by a number (the major version), - // then optionally the string "alpha" or "beta" and another number (the minor version). These are sorted first - // by GA > beta > alpha (where GA is a version with no suffix such as beta or alpha), and then by comparing - // major version, then minor version. An example sorted list of versions: - // v10, v2, v1, v11beta2, v10beta3, v3beta1, v12alpha1, v11alpha2, foo1, foo10. - Versions []CustomResourceDefinitionVersion `json:"versions" protobuf:"bytes,7,rep,name=versions"` - - // conversion defines conversion settings for the CRD. - // +optional - Conversion *CustomResourceConversion `json:"conversion,omitempty" protobuf:"bytes,9,opt,name=conversion"` - - // preserveUnknownFields indicates that object fields which are not specified - // in the OpenAPI schema should be preserved when persisting to storage. - // apiVersion, kind, metadata and known fields inside metadata are always preserved. - // This field is deprecated in favor of setting `x-preserve-unknown-fields` to true in `spec.versions[*].schema.openAPIV3Schema`. - // See https://kubernetes.io/docs/tasks/access-kubernetes-api/custom-resources/custom-resource-definitions/#pruning-versus-preserving-unknown-fields for details. - // +optional - PreserveUnknownFields bool `json:"preserveUnknownFields,omitempty" protobuf:"varint,10,opt,name=preserveUnknownFields"` -} - -// CustomResourceConversion describes how to convert different versions of a CR. -type CustomResourceConversion struct { - // strategy specifies how custom resources are converted between versions. Allowed values are: - // - `None`: The converter only change the apiVersion and would not touch any other field in the custom resource. - // - `Webhook`: API Server will call to an external webhook to do the conversion. Additional information - // is needed for this option. This requires spec.preserveUnknownFields to be false, and spec.conversion.webhook to be set. - Strategy ConversionStrategyType `json:"strategy" protobuf:"bytes,1,name=strategy"` - - // webhook describes how to call the conversion webhook. Required when `strategy` is set to `Webhook`. - // +optional - Webhook *WebhookConversion `json:"webhook,omitempty" protobuf:"bytes,2,opt,name=webhook"` -} - -// WebhookConversion describes how to call a conversion webhook -type WebhookConversion struct { - // clientConfig is the instructions for how to call the webhook if strategy is `Webhook`. - // +optional - ClientConfig *WebhookClientConfig `json:"clientConfig,omitempty" protobuf:"bytes,2,name=clientConfig"` - - // conversionReviewVersions is an ordered list of preferred `ConversionReview` - // versions the Webhook expects. The API server will use the first version in - // the list which it supports. If none of the versions specified in this list - // are supported by API server, conversion will fail for the custom resource. - // If a persisted Webhook configuration specifies allowed versions and does not - // include any versions known to the API Server, calls to the webhook will fail. - ConversionReviewVersions []string `json:"conversionReviewVersions" protobuf:"bytes,3,rep,name=conversionReviewVersions"` -} - -// WebhookClientConfig contains the information to make a TLS connection with the webhook. -type WebhookClientConfig struct { - // url gives the location of the webhook, in standard URL form - // (`scheme://host:port/path`). Exactly one of `url` or `service` - // must be specified. - // - // The `host` should not refer to a service running in the cluster; use - // the `service` field instead. The host might be resolved via external - // DNS in some apiservers (e.g., `kube-apiserver` cannot resolve - // in-cluster DNS as that would be a layering violation). `host` may - // also be an IP address. - // - // Please note that using `localhost` or `127.0.0.1` as a `host` is - // risky unless you take great care to run this webhook on all hosts - // which run an apiserver which might need to make calls to this - // webhook. Such installs are likely to be non-portable, i.e., not easy - // to turn up in a new cluster. - // - // The scheme must be "https"; the URL must begin with "https://". - // - // A path is optional, and if present may be any string permissible in - // a URL. You may use the path to pass an arbitrary string to the - // webhook, for example, a cluster identifier. - // - // Attempting to use a user or basic auth e.g. "user:password@" is not - // allowed. Fragments ("#...") and query parameters ("?...") are not - // allowed, either. - // - // +optional - URL *string `json:"url,omitempty" protobuf:"bytes,3,opt,name=url"` - - // service is a reference to the service for this webhook. Either - // service or url must be specified. - // - // If the webhook is running within the cluster, then you should use `service`. - // - // +optional - Service *ServiceReference `json:"service,omitempty" protobuf:"bytes,1,opt,name=service"` - - // caBundle is a PEM encoded CA bundle which will be used to validate the webhook's server certificate. - // If unspecified, system trust roots on the apiserver are used. - // +optional - CABundle []byte `json:"caBundle,omitempty" protobuf:"bytes,2,opt,name=caBundle"` -} - -// ServiceReference holds a reference to Service.legacy.k8s.io -type ServiceReference struct { - // namespace is the namespace of the service. - // Required - Namespace string `json:"namespace" protobuf:"bytes,1,opt,name=namespace"` - // name is the name of the service. - // Required - Name string `json:"name" protobuf:"bytes,2,opt,name=name"` - - // path is an optional URL path at which the webhook will be contacted. - // +optional - Path *string `json:"path,omitempty" protobuf:"bytes,3,opt,name=path"` - - // port is an optional service port at which the webhook will be contacted. - // `port` should be a valid port number (1-65535, inclusive). - // Defaults to 443 for backward compatibility. - // +optional - Port *int32 `json:"port,omitempty" protobuf:"varint,4,opt,name=port"` -} - -// CustomResourceDefinitionVersion describes a version for CRD. -type CustomResourceDefinitionVersion struct { - // name is the version name, e.g. “v1â€, “v2beta1â€, etc. - // The custom resources are served under this version at `/apis///...` if `served` is true. - Name string `json:"name" protobuf:"bytes,1,opt,name=name"` - // served is a flag enabling/disabling this version from being served via REST APIs - Served bool `json:"served" protobuf:"varint,2,opt,name=served"` - // storage indicates this version should be used when persisting custom resources to storage. - // There must be exactly one version with storage=true. - Storage bool `json:"storage" protobuf:"varint,3,opt,name=storage"` - // deprecated indicates this version of the custom resource API is deprecated. - // When set to true, API requests to this version receive a warning header in the server response. - // Defaults to false. - // +optional - Deprecated bool `json:"deprecated,omitempty" protobuf:"varint,7,opt,name=deprecated"` - // deprecationWarning overrides the default warning returned to API clients. - // May only be set when `deprecated` is true. - // The default warning indicates this version is deprecated and recommends use - // of the newest served version of equal or greater stability, if one exists. - // +optional - DeprecationWarning *string `json:"deprecationWarning,omitempty" protobuf:"bytes,8,opt,name=deprecationWarning"` - // schema describes the schema used for validation, pruning, and defaulting of this version of the custom resource. - // +optional - Schema *CustomResourceValidation `json:"schema,omitempty" protobuf:"bytes,4,opt,name=schema"` - // subresources specify what subresources this version of the defined custom resource have. - // +optional - Subresources *CustomResourceSubresources `json:"subresources,omitempty" protobuf:"bytes,5,opt,name=subresources"` - // additionalPrinterColumns specifies additional columns returned in Table output. - // See https://kubernetes.io/docs/reference/using-api/api-concepts/#receiving-resources-as-tables for details. - // If no columns are specified, a single column displaying the age of the custom resource is used. - // +optional - AdditionalPrinterColumns []CustomResourceColumnDefinition `json:"additionalPrinterColumns,omitempty" protobuf:"bytes,6,rep,name=additionalPrinterColumns"` -} - -// CustomResourceColumnDefinition specifies a column for server side printing. -type CustomResourceColumnDefinition struct { - // name is a human readable name for the column. - Name string `json:"name" protobuf:"bytes,1,opt,name=name"` - // type is an OpenAPI type definition for this column. - // See https://github.com/OAI/OpenAPI-Specification/blob/master/versions/2.0.md#data-types for details. - Type string `json:"type" protobuf:"bytes,2,opt,name=type"` - // format is an optional OpenAPI type definition for this column. The 'name' format is applied - // to the primary identifier column to assist in clients identifying column is the resource name. - // See https://github.com/OAI/OpenAPI-Specification/blob/master/versions/2.0.md#data-types for details. - // +optional - Format string `json:"format,omitempty" protobuf:"bytes,3,opt,name=format"` - // description is a human readable description of this column. - // +optional - Description string `json:"description,omitempty" protobuf:"bytes,4,opt,name=description"` - // priority is an integer defining the relative importance of this column compared to others. Lower - // numbers are considered higher priority. Columns that may be omitted in limited space scenarios - // should be given a priority greater than 0. - // +optional - Priority int32 `json:"priority,omitempty" protobuf:"bytes,5,opt,name=priority"` - // jsonPath is a simple JSON path (i.e. with array notation) which is evaluated against - // each custom resource to produce the value for this column. - JSONPath string `json:"jsonPath" protobuf:"bytes,6,opt,name=jsonPath"` -} - -// CustomResourceDefinitionNames indicates the names to serve this CustomResourceDefinition -type CustomResourceDefinitionNames struct { - // plural is the plural name of the resource to serve. - // The custom resources are served under `/apis///.../`. - // Must match the name of the CustomResourceDefinition (in the form `.`). - // Must be all lowercase. - Plural string `json:"plural" protobuf:"bytes,1,opt,name=plural"` - // singular is the singular name of the resource. It must be all lowercase. Defaults to lowercased `kind`. - // +optional - Singular string `json:"singular,omitempty" protobuf:"bytes,2,opt,name=singular"` - // shortNames are short names for the resource, exposed in API discovery documents, - // and used by clients to support invocations like `kubectl get `. - // It must be all lowercase. - // +optional - ShortNames []string `json:"shortNames,omitempty" protobuf:"bytes,3,opt,name=shortNames"` - // kind is the serialized kind of the resource. It is normally CamelCase and singular. - // Custom resource instances will use this value as the `kind` attribute in API calls. - Kind string `json:"kind" protobuf:"bytes,4,opt,name=kind"` - // listKind is the serialized kind of the list for this resource. Defaults to "`kind`List". - // +optional - ListKind string `json:"listKind,omitempty" protobuf:"bytes,5,opt,name=listKind"` - // categories is a list of grouped resources this custom resource belongs to (e.g. 'all'). - // This is published in API discovery documents, and used by clients to support invocations like - // `kubectl get all`. - // +optional - Categories []string `json:"categories,omitempty" protobuf:"bytes,6,rep,name=categories"` -} - -// ResourceScope is an enum defining the different scopes available to a custom resource -type ResourceScope string - -const ( - ClusterScoped ResourceScope = "Cluster" - NamespaceScoped ResourceScope = "Namespaced" -) - -type ConditionStatus string - -// These are valid condition statuses. "ConditionTrue" means a resource is in the condition. -// "ConditionFalse" means a resource is not in the condition. "ConditionUnknown" means kubernetes -// can't decide if a resource is in the condition or not. In the future, we could add other -// intermediate conditions, e.g. ConditionDegraded. -const ( - ConditionTrue ConditionStatus = "True" - ConditionFalse ConditionStatus = "False" - ConditionUnknown ConditionStatus = "Unknown" -) - -// CustomResourceDefinitionConditionType is a valid value for CustomResourceDefinitionCondition.Type -type CustomResourceDefinitionConditionType string - -const ( - // Established means that the resource has become active. A resource is established when all names are - // accepted without a conflict for the first time. A resource stays established until deleted, even during - // a later NamesAccepted due to changed names. Note that not all names can be changed. - Established CustomResourceDefinitionConditionType = "Established" - // NamesAccepted means the names chosen for this CustomResourceDefinition do not conflict with others in - // the group and are therefore accepted. - NamesAccepted CustomResourceDefinitionConditionType = "NamesAccepted" - // NonStructuralSchema means that one or more OpenAPI schema is not structural. - // - // A schema is structural if it specifies types for all values, with the only exceptions of those with - // - x-kubernetes-int-or-string: true — for fields which can be integer or string - // - x-kubernetes-preserve-unknown-fields: true — for raw, unspecified JSON values - // and there is no type, additionalProperties, default, nullable or x-kubernetes-* vendor extenions - // specified under allOf, anyOf, oneOf or not. - // - // Non-structural schemas will not be allowed anymore in v1 API groups. Moreover, new features will not be - // available for non-structural CRDs: - // - pruning - // - defaulting - // - read-only - // - OpenAPI publishing - // - webhook conversion - NonStructuralSchema CustomResourceDefinitionConditionType = "NonStructuralSchema" - // Terminating means that the CustomResourceDefinition has been deleted and is cleaning up. - Terminating CustomResourceDefinitionConditionType = "Terminating" - // KubernetesAPIApprovalPolicyConformant indicates that an API in *.k8s.io or *.kubernetes.io is or is not approved. For CRDs - // outside those groups, this condition will not be set. For CRDs inside those groups, the condition will - // be true if .metadata.annotations["api-approved.kubernetes.io"] is set to a URL, otherwise it will be false. - // See https://github.com/kubernetes/enhancements/pull/1111 for more details. - KubernetesAPIApprovalPolicyConformant CustomResourceDefinitionConditionType = "KubernetesAPIApprovalPolicyConformant" -) - -// CustomResourceDefinitionCondition contains details for the current condition of this pod. -type CustomResourceDefinitionCondition struct { - // type is the type of the condition. Types include Established, NamesAccepted and Terminating. - Type CustomResourceDefinitionConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=CustomResourceDefinitionConditionType"` - // status is the status of the condition. - // Can be True, False, Unknown. - Status ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status,casttype=ConditionStatus"` - // lastTransitionTime last time the condition transitioned from one status to another. - // +optional - LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,3,opt,name=lastTransitionTime"` - // reason is a unique, one-word, CamelCase reason for the condition's last transition. - // +optional - Reason string `json:"reason,omitempty" protobuf:"bytes,4,opt,name=reason"` - // message is a human-readable message indicating details about last transition. - // +optional - Message string `json:"message,omitempty" protobuf:"bytes,5,opt,name=message"` -} - -// CustomResourceDefinitionStatus indicates the state of the CustomResourceDefinition -type CustomResourceDefinitionStatus struct { - // conditions indicate state for particular aspects of a CustomResourceDefinition - // +optional - // +listType=map - // +listMapKey=type - Conditions []CustomResourceDefinitionCondition `json:"conditions" protobuf:"bytes,1,opt,name=conditions"` - - // acceptedNames are the names that are actually being used to serve discovery. - // They may be different than the names in spec. - // +optional - AcceptedNames CustomResourceDefinitionNames `json:"acceptedNames" protobuf:"bytes,2,opt,name=acceptedNames"` - - // storedVersions lists all versions of CustomResources that were ever persisted. Tracking these - // versions allows a migration path for stored versions in etcd. The field is mutable - // so a migration controller can finish a migration to another version (ensuring - // no old objects are left in storage), and then remove the rest of the - // versions from this list. - // Versions may not be removed from `spec.versions` while they exist in this list. - // +optional - StoredVersions []string `json:"storedVersions" protobuf:"bytes,3,rep,name=storedVersions"` -} - -// CustomResourceCleanupFinalizer is the name of the finalizer which will delete instances of -// a CustomResourceDefinition -const CustomResourceCleanupFinalizer = "customresourcecleanup.apiextensions.k8s.io" - -// +genclient -// +genclient:nonNamespaced -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// CustomResourceDefinition represents a resource that should be exposed on the API server. Its name MUST be in the format -// <.spec.name>.<.spec.group>. -type CustomResourceDefinition struct { - metav1.TypeMeta `json:",inline"` - // Standard object's metadata - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata - // +optional - metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // spec describes how the user wants the resources to appear - Spec CustomResourceDefinitionSpec `json:"spec" protobuf:"bytes,2,opt,name=spec"` - // status indicates the actual state of the CustomResourceDefinition - // +optional - Status CustomResourceDefinitionStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// CustomResourceDefinitionList is a list of CustomResourceDefinition objects. -type CustomResourceDefinitionList struct { - metav1.TypeMeta `json:",inline"` - - // Standard object's metadata - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata - // +optional - metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // items list individual CustomResourceDefinition objects - Items []CustomResourceDefinition `json:"items" protobuf:"bytes,2,rep,name=items"` -} - -// CustomResourceValidation is a list of validation methods for CustomResources. -type CustomResourceValidation struct { - // openAPIV3Schema is the OpenAPI v3 schema to use for validation and pruning. - // +optional - OpenAPIV3Schema *JSONSchemaProps `json:"openAPIV3Schema,omitempty" protobuf:"bytes,1,opt,name=openAPIV3Schema"` -} - -// CustomResourceSubresources defines the status and scale subresources for CustomResources. -type CustomResourceSubresources struct { - // status indicates the custom resource should serve a `/status` subresource. - // When enabled: - // 1. requests to the custom resource primary endpoint ignore changes to the `status` stanza of the object. - // 2. requests to the custom resource `/status` subresource ignore changes to anything other than the `status` stanza of the object. - // +optional - Status *CustomResourceSubresourceStatus `json:"status,omitempty" protobuf:"bytes,1,opt,name=status"` - // scale indicates the custom resource should serve a `/scale` subresource that returns an `autoscaling/v1` Scale object. - // +optional - Scale *CustomResourceSubresourceScale `json:"scale,omitempty" protobuf:"bytes,2,opt,name=scale"` -} - -// CustomResourceSubresourceStatus defines how to serve the status subresource for CustomResources. -// Status is represented by the `.status` JSON path inside of a CustomResource. When set, -// * exposes a /status subresource for the custom resource -// * PUT requests to the /status subresource take a custom resource object, and ignore changes to anything except the status stanza -// * PUT/POST/PATCH requests to the custom resource ignore changes to the status stanza -type CustomResourceSubresourceStatus struct{} - -// CustomResourceSubresourceScale defines how to serve the scale subresource for CustomResources. -type CustomResourceSubresourceScale struct { - // specReplicasPath defines the JSON path inside of a custom resource that corresponds to Scale `spec.replicas`. - // Only JSON paths without the array notation are allowed. - // Must be a JSON Path under `.spec`. - // If there is no value under the given path in the custom resource, the `/scale` subresource will return an error on GET. - SpecReplicasPath string `json:"specReplicasPath" protobuf:"bytes,1,name=specReplicasPath"` - // statusReplicasPath defines the JSON path inside of a custom resource that corresponds to Scale `status.replicas`. - // Only JSON paths without the array notation are allowed. - // Must be a JSON Path under `.status`. - // If there is no value under the given path in the custom resource, the `status.replicas` value in the `/scale` subresource - // will default to 0. - StatusReplicasPath string `json:"statusReplicasPath" protobuf:"bytes,2,opt,name=statusReplicasPath"` - // labelSelectorPath defines the JSON path inside of a custom resource that corresponds to Scale `status.selector`. - // Only JSON paths without the array notation are allowed. - // Must be a JSON Path under `.status` or `.spec`. - // Must be set to work with HorizontalPodAutoscaler. - // The field pointed by this JSON path must be a string field (not a complex selector struct) - // which contains a serialized label selector in string form. - // More info: https://kubernetes.io/docs/tasks/access-kubernetes-api/custom-resources/custom-resource-definitions#scale-subresource - // If there is no value under the given path in the custom resource, the `status.selector` value in the `/scale` - // subresource will default to the empty string. - // +optional - LabelSelectorPath *string `json:"labelSelectorPath,omitempty" protobuf:"bytes,3,opt,name=labelSelectorPath"` -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// ConversionReview describes a conversion request/response. -type ConversionReview struct { - metav1.TypeMeta `json:",inline"` - // request describes the attributes for the conversion request. - // +optional - Request *ConversionRequest `json:"request,omitempty" protobuf:"bytes,1,opt,name=request"` - // response describes the attributes for the conversion response. - // +optional - Response *ConversionResponse `json:"response,omitempty" protobuf:"bytes,2,opt,name=response"` -} - -// ConversionRequest describes the conversion request parameters. -type ConversionRequest struct { - // uid is an identifier for the individual request/response. It allows distinguishing instances of requests which are - // otherwise identical (parallel requests, etc). - // The UID is meant to track the round trip (request/response) between the Kubernetes API server and the webhook, not the user request. - // It is suitable for correlating log entries between the webhook and apiserver, for either auditing or debugging. - UID types.UID `json:"uid" protobuf:"bytes,1,name=uid"` - // desiredAPIVersion is the version to convert given objects to. e.g. "myapi.example.com/v1" - DesiredAPIVersion string `json:"desiredAPIVersion" protobuf:"bytes,2,name=desiredAPIVersion"` - // objects is the list of custom resource objects to be converted. - Objects []runtime.RawExtension `json:"objects" protobuf:"bytes,3,rep,name=objects"` -} - -// ConversionResponse describes a conversion response. -type ConversionResponse struct { - // uid is an identifier for the individual request/response. - // This should be copied over from the corresponding `request.uid`. - UID types.UID `json:"uid" protobuf:"bytes,1,name=uid"` - // convertedObjects is the list of converted version of `request.objects` if the `result` is successful, otherwise empty. - // The webhook is expected to set `apiVersion` of these objects to the `request.desiredAPIVersion`. The list - // must also have the same size as the input list with the same objects in the same order (equal kind, metadata.uid, metadata.name and metadata.namespace). - // The webhook is allowed to mutate labels and annotations. Any other change to the metadata is silently ignored. - ConvertedObjects []runtime.RawExtension `json:"convertedObjects" protobuf:"bytes,2,rep,name=convertedObjects"` - // result contains the result of conversion with extra details if the conversion failed. `result.status` determines if - // the conversion failed or succeeded. The `result.status` field is required and represents the success or failure of the - // conversion. A successful conversion must set `result.status` to `Success`. A failed conversion must set - // `result.status` to `Failure` and provide more details in `result.message` and return http status 200. The `result.message` - // will be used to construct an error message for the end user. - Result metav1.Status `json:"result" protobuf:"bytes,3,name=result"` -} diff --git a/src/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/types_jsonschema.go b/src/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/types_jsonschema.go deleted file mode 100644 index 277fd7a12..000000000 --- a/src/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/types_jsonschema.go +++ /dev/null @@ -1,331 +0,0 @@ -/* -Copyright 2019 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1 - -// JSONSchemaProps is a JSON-Schema following Specification Draft 4 (http://json-schema.org/). -type JSONSchemaProps struct { - ID string `json:"id,omitempty" protobuf:"bytes,1,opt,name=id"` - Schema JSONSchemaURL `json:"$schema,omitempty" protobuf:"bytes,2,opt,name=schema"` - Ref *string `json:"$ref,omitempty" protobuf:"bytes,3,opt,name=ref"` - Description string `json:"description,omitempty" protobuf:"bytes,4,opt,name=description"` - Type string `json:"type,omitempty" protobuf:"bytes,5,opt,name=type"` - - // format is an OpenAPI v3 format string. Unknown formats are ignored. The following formats are validated: - // - // - bsonobjectid: a bson object ID, i.e. a 24 characters hex string - // - uri: an URI as parsed by Golang net/url.ParseRequestURI - // - email: an email address as parsed by Golang net/mail.ParseAddress - // - hostname: a valid representation for an Internet host name, as defined by RFC 1034, section 3.1 [RFC1034]. - // - ipv4: an IPv4 IP as parsed by Golang net.ParseIP - // - ipv6: an IPv6 IP as parsed by Golang net.ParseIP - // - cidr: a CIDR as parsed by Golang net.ParseCIDR - // - mac: a MAC address as parsed by Golang net.ParseMAC - // - uuid: an UUID that allows uppercase defined by the regex (?i)^[0-9a-f]{8}-?[0-9a-f]{4}-?[0-9a-f]{4}-?[0-9a-f]{4}-?[0-9a-f]{12}$ - // - uuid3: an UUID3 that allows uppercase defined by the regex (?i)^[0-9a-f]{8}-?[0-9a-f]{4}-?3[0-9a-f]{3}-?[0-9a-f]{4}-?[0-9a-f]{12}$ - // - uuid4: an UUID4 that allows uppercase defined by the regex (?i)^[0-9a-f]{8}-?[0-9a-f]{4}-?4[0-9a-f]{3}-?[89ab][0-9a-f]{3}-?[0-9a-f]{12}$ - // - uuid5: an UUID5 that allows uppercase defined by the regex (?i)^[0-9a-f]{8}-?[0-9a-f]{4}-?5[0-9a-f]{3}-?[89ab][0-9a-f]{3}-?[0-9a-f]{12}$ - // - isbn: an ISBN10 or ISBN13 number string like "0321751043" or "978-0321751041" - // - isbn10: an ISBN10 number string like "0321751043" - // - isbn13: an ISBN13 number string like "978-0321751041" - // - creditcard: a credit card number defined by the regex ^(?:4[0-9]{12}(?:[0-9]{3})?|5[1-5][0-9]{14}|6(?:011|5[0-9][0-9])[0-9]{12}|3[47][0-9]{13}|3(?:0[0-5]|[68][0-9])[0-9]{11}|(?:2131|1800|35\\d{3})\\d{11})$ with any non digit characters mixed in - // - ssn: a U.S. social security number following the regex ^\\d{3}[- ]?\\d{2}[- ]?\\d{4}$ - // - hexcolor: an hexadecimal color code like "#FFFFFF: following the regex ^#?([0-9a-fA-F]{3}|[0-9a-fA-F]{6})$ - // - rgbcolor: an RGB color code like rgb like "rgb(255,255,2559" - // - byte: base64 encoded binary data - // - password: any kind of string - // - date: a date string like "2006-01-02" as defined by full-date in RFC3339 - // - duration: a duration string like "22 ns" as parsed by Golang time.ParseDuration or compatible with Scala duration format - // - datetime: a date time string like "2014-12-15T19:30:20.000Z" as defined by date-time in RFC3339. - Format string `json:"format,omitempty" protobuf:"bytes,6,opt,name=format"` - - Title string `json:"title,omitempty" protobuf:"bytes,7,opt,name=title"` - // default is a default value for undefined object fields. - // Defaulting is a beta feature under the CustomResourceDefaulting feature gate. - // Defaulting requires spec.preserveUnknownFields to be false. - Default *JSON `json:"default,omitempty" protobuf:"bytes,8,opt,name=default"` - Maximum *float64 `json:"maximum,omitempty" protobuf:"bytes,9,opt,name=maximum"` - ExclusiveMaximum bool `json:"exclusiveMaximum,omitempty" protobuf:"bytes,10,opt,name=exclusiveMaximum"` - Minimum *float64 `json:"minimum,omitempty" protobuf:"bytes,11,opt,name=minimum"` - ExclusiveMinimum bool `json:"exclusiveMinimum,omitempty" protobuf:"bytes,12,opt,name=exclusiveMinimum"` - MaxLength *int64 `json:"maxLength,omitempty" protobuf:"bytes,13,opt,name=maxLength"` - MinLength *int64 `json:"minLength,omitempty" protobuf:"bytes,14,opt,name=minLength"` - Pattern string `json:"pattern,omitempty" protobuf:"bytes,15,opt,name=pattern"` - MaxItems *int64 `json:"maxItems,omitempty" protobuf:"bytes,16,opt,name=maxItems"` - MinItems *int64 `json:"minItems,omitempty" protobuf:"bytes,17,opt,name=minItems"` - UniqueItems bool `json:"uniqueItems,omitempty" protobuf:"bytes,18,opt,name=uniqueItems"` - MultipleOf *float64 `json:"multipleOf,omitempty" protobuf:"bytes,19,opt,name=multipleOf"` - Enum []JSON `json:"enum,omitempty" protobuf:"bytes,20,rep,name=enum"` - MaxProperties *int64 `json:"maxProperties,omitempty" protobuf:"bytes,21,opt,name=maxProperties"` - MinProperties *int64 `json:"minProperties,omitempty" protobuf:"bytes,22,opt,name=minProperties"` - Required []string `json:"required,omitempty" protobuf:"bytes,23,rep,name=required"` - Items *JSONSchemaPropsOrArray `json:"items,omitempty" protobuf:"bytes,24,opt,name=items"` - AllOf []JSONSchemaProps `json:"allOf,omitempty" protobuf:"bytes,25,rep,name=allOf"` - OneOf []JSONSchemaProps `json:"oneOf,omitempty" protobuf:"bytes,26,rep,name=oneOf"` - AnyOf []JSONSchemaProps `json:"anyOf,omitempty" protobuf:"bytes,27,rep,name=anyOf"` - Not *JSONSchemaProps `json:"not,omitempty" protobuf:"bytes,28,opt,name=not"` - Properties map[string]JSONSchemaProps `json:"properties,omitempty" protobuf:"bytes,29,rep,name=properties"` - AdditionalProperties *JSONSchemaPropsOrBool `json:"additionalProperties,omitempty" protobuf:"bytes,30,opt,name=additionalProperties"` - PatternProperties map[string]JSONSchemaProps `json:"patternProperties,omitempty" protobuf:"bytes,31,rep,name=patternProperties"` - Dependencies JSONSchemaDependencies `json:"dependencies,omitempty" protobuf:"bytes,32,opt,name=dependencies"` - AdditionalItems *JSONSchemaPropsOrBool `json:"additionalItems,omitempty" protobuf:"bytes,33,opt,name=additionalItems"` - Definitions JSONSchemaDefinitions `json:"definitions,omitempty" protobuf:"bytes,34,opt,name=definitions"` - ExternalDocs *ExternalDocumentation `json:"externalDocs,omitempty" protobuf:"bytes,35,opt,name=externalDocs"` - Example *JSON `json:"example,omitempty" protobuf:"bytes,36,opt,name=example"` - Nullable bool `json:"nullable,omitempty" protobuf:"bytes,37,opt,name=nullable"` - - // x-kubernetes-preserve-unknown-fields stops the API server - // decoding step from pruning fields which are not specified - // in the validation schema. This affects fields recursively, - // but switches back to normal pruning behaviour if nested - // properties or additionalProperties are specified in the schema. - // This can either be true or undefined. False is forbidden. - XPreserveUnknownFields *bool `json:"x-kubernetes-preserve-unknown-fields,omitempty" protobuf:"bytes,38,opt,name=xKubernetesPreserveUnknownFields"` - - // x-kubernetes-embedded-resource defines that the value is an - // embedded Kubernetes runtime.Object, with TypeMeta and - // ObjectMeta. The type must be object. It is allowed to further - // restrict the embedded object. kind, apiVersion and metadata - // are validated automatically. x-kubernetes-preserve-unknown-fields - // is allowed to be true, but does not have to be if the object - // is fully specified (up to kind, apiVersion, metadata). - XEmbeddedResource bool `json:"x-kubernetes-embedded-resource,omitempty" protobuf:"bytes,39,opt,name=xKubernetesEmbeddedResource"` - - // x-kubernetes-int-or-string specifies that this value is - // either an integer or a string. If this is true, an empty - // type is allowed and type as child of anyOf is permitted - // if following one of the following patterns: - // - // 1) anyOf: - // - type: integer - // - type: string - // 2) allOf: - // - anyOf: - // - type: integer - // - type: string - // - ... zero or more - XIntOrString bool `json:"x-kubernetes-int-or-string,omitempty" protobuf:"bytes,40,opt,name=xKubernetesIntOrString"` - - // x-kubernetes-list-map-keys annotates an array with the x-kubernetes-list-type `map` by specifying the keys used - // as the index of the map. - // - // This tag MUST only be used on lists that have the "x-kubernetes-list-type" - // extension set to "map". Also, the values specified for this attribute must - // be a scalar typed field of the child structure (no nesting is supported). - // - // The properties specified must either be required or have a default value, - // to ensure those properties are present for all list items. - // - // +optional - XListMapKeys []string `json:"x-kubernetes-list-map-keys,omitempty" protobuf:"bytes,41,rep,name=xKubernetesListMapKeys"` - - // x-kubernetes-list-type annotates an array to further describe its topology. - // This extension must only be used on lists and may have 3 possible values: - // - // 1) `atomic`: the list is treated as a single entity, like a scalar. - // Atomic lists will be entirely replaced when updated. This extension - // may be used on any type of list (struct, scalar, ...). - // 2) `set`: - // Sets are lists that must not have multiple items with the same value. Each - // value must be a scalar, an object with x-kubernetes-map-type `atomic` or an - // array with x-kubernetes-list-type `atomic`. - // 3) `map`: - // These lists are like maps in that their elements have a non-index key - // used to identify them. Order is preserved upon merge. The map tag - // must only be used on a list with elements of type object. - // Defaults to atomic for arrays. - // +optional - XListType *string `json:"x-kubernetes-list-type,omitempty" protobuf:"bytes,42,opt,name=xKubernetesListType"` - - // x-kubernetes-map-type annotates an object to further describe its topology. - // This extension must only be used when type is object and may have 2 possible values: - // - // 1) `granular`: - // These maps are actual maps (key-value pairs) and each fields are independent - // from each other (they can each be manipulated by separate actors). This is - // the default behaviour for all maps. - // 2) `atomic`: the list is treated as a single entity, like a scalar. - // Atomic maps will be entirely replaced when updated. - // +optional - XMapType *string `json:"x-kubernetes-map-type,omitempty" protobuf:"bytes,43,opt,name=xKubernetesMapType"` - - // x-kubernetes-validations describes a list of validation rules written in the CEL expression language. - // This field is an alpha-level. Using this field requires the feature gate `CustomResourceValidationExpressions` to be enabled. - // +patchMergeKey=rule - // +patchStrategy=merge - // +listType=map - // +listMapKey=rule - XValidations ValidationRules `json:"x-kubernetes-validations,omitempty" patchStrategy:"merge" patchMergeKey:"rule" protobuf:"bytes,44,rep,name=xKubernetesValidations"` -} - -// ValidationRules describes a list of validation rules written in the CEL expression language. -type ValidationRules []ValidationRule - -// ValidationRule describes a validation rule written in the CEL expression language. -type ValidationRule struct { - // Rule represents the expression which will be evaluated by CEL. - // ref: https://github.com/google/cel-spec - // The Rule is scoped to the location of the x-kubernetes-validations extension in the schema. - // The `self` variable in the CEL expression is bound to the scoped value. - // Example: - // - Rule scoped to the root of a resource with a status subresource: {"rule": "self.status.actual <= self.spec.maxDesired"} - // - // If the Rule is scoped to an object with properties, the accessible properties of the object are field selectable - // via `self.field` and field presence can be checked via `has(self.field)`. Null valued fields are treated as - // absent fields in CEL expressions. - // If the Rule is scoped to an object with additionalProperties (i.e. a map) the value of the map - // are accessible via `self[mapKey]`, map containment can be checked via `mapKey in self` and all entries of the map - // are accessible via CEL macros and functions such as `self.all(...)`. - // If the Rule is scoped to an array, the elements of the array are accessible via `self[i]` and also by macros and - // functions. - // If the Rule is scoped to a scalar, `self` is bound to the scalar value. - // Examples: - // - Rule scoped to a map of objects: {"rule": "self.components['Widget'].priority < 10"} - // - Rule scoped to a list of integers: {"rule": "self.values.all(value, value >= 0 && value < 100)"} - // - Rule scoped to a string value: {"rule": "self.startsWith('kube')"} - // - // The `apiVersion`, `kind`, `metadata.name` and `metadata.generateName` are always accessible from the root of the - // object and from any x-kubernetes-embedded-resource annotated objects. No other metadata properties are accessible. - // - // Unknown data preserved in custom resources via x-kubernetes-preserve-unknown-fields is not accessible in CEL - // expressions. This includes: - // - Unknown field values that are preserved by object schemas with x-kubernetes-preserve-unknown-fields. - // - Object properties where the property schema is of an "unknown type". An "unknown type" is recursively defined as: - // - A schema with no type and x-kubernetes-preserve-unknown-fields set to true - // - An array where the items schema is of an "unknown type" - // - An object where the additionalProperties schema is of an "unknown type" - // - // Only property names of the form `[a-zA-Z_.-/][a-zA-Z0-9_.-/]*` are accessible. - // Accessible property names are escaped according to the following rules when accessed in the expression: - // - '__' escapes to '__underscores__' - // - '.' escapes to '__dot__' - // - '-' escapes to '__dash__' - // - '/' escapes to '__slash__' - // - Property names that exactly match a CEL RESERVED keyword escape to '__{keyword}__'. The keywords are: - // "true", "false", "null", "in", "as", "break", "const", "continue", "else", "for", "function", "if", - // "import", "let", "loop", "package", "namespace", "return". - // Examples: - // - Rule accessing a property named "namespace": {"rule": "self.__namespace__ > 0"} - // - Rule accessing a property named "x-prop": {"rule": "self.x__dash__prop > 0"} - // - Rule accessing a property named "redact__d": {"rule": "self.redact__underscores__d > 0"} - // - // Equality on arrays with x-kubernetes-list-type of 'set' or 'map' ignores element order, i.e. [1, 2] == [2, 1]. - // Concatenation on arrays with x-kubernetes-list-type use the semantics of the list type: - // - 'set': `X + Y` performs a union where the array positions of all elements in `X` are preserved and - // non-intersecting elements in `Y` are appended, retaining their partial order. - // - 'map': `X + Y` performs a merge where the array positions of all keys in `X` are preserved but the values - // are overwritten by values in `Y` when the key sets of `X` and `Y` intersect. Elements in `Y` with - // non-intersecting keys are appended, retaining their partial order. - Rule string `json:"rule" protobuf:"bytes,1,opt,name=rule"` - // Message represents the message displayed when validation fails. The message is required if the Rule contains - // line breaks. The message must not contain line breaks. - // If unset, the message is "failed rule: {Rule}". - // e.g. "must be a URL with the host matching spec.host" - Message string `json:"message,omitempty" protobuf:"bytes,2,opt,name=message"` -} - -// JSON represents any valid JSON value. -// These types are supported: bool, int64, float64, string, []interface{}, map[string]interface{} and nil. -type JSON struct { - Raw []byte `protobuf:"bytes,1,opt,name=raw"` -} - -// OpenAPISchemaType is used by the kube-openapi generator when constructing -// the OpenAPI spec of this type. -// -// See: https://github.com/kubernetes/kube-openapi/tree/master/pkg/generators -func (_ JSON) OpenAPISchemaType() []string { - // TODO: return actual types when anyOf is supported - return nil -} - -// OpenAPISchemaFormat is used by the kube-openapi generator when constructing -// the OpenAPI spec of this type. -func (_ JSON) OpenAPISchemaFormat() string { return "" } - -// JSONSchemaURL represents a schema url. -type JSONSchemaURL string - -// JSONSchemaPropsOrArray represents a value that can either be a JSONSchemaProps -// or an array of JSONSchemaProps. Mainly here for serialization purposes. -type JSONSchemaPropsOrArray struct { - Schema *JSONSchemaProps `protobuf:"bytes,1,opt,name=schema"` - JSONSchemas []JSONSchemaProps `protobuf:"bytes,2,rep,name=jSONSchemas"` -} - -// OpenAPISchemaType is used by the kube-openapi generator when constructing -// the OpenAPI spec of this type. -// -// See: https://github.com/kubernetes/kube-openapi/tree/master/pkg/generators -func (_ JSONSchemaPropsOrArray) OpenAPISchemaType() []string { - // TODO: return actual types when anyOf is supported - return nil -} - -// OpenAPISchemaFormat is used by the kube-openapi generator when constructing -// the OpenAPI spec of this type. -func (_ JSONSchemaPropsOrArray) OpenAPISchemaFormat() string { return "" } - -// JSONSchemaPropsOrBool represents JSONSchemaProps or a boolean value. -// Defaults to true for the boolean property. -type JSONSchemaPropsOrBool struct { - Allows bool `protobuf:"varint,1,opt,name=allows"` - Schema *JSONSchemaProps `protobuf:"bytes,2,opt,name=schema"` -} - -// OpenAPISchemaType is used by the kube-openapi generator when constructing -// the OpenAPI spec of this type. -// -// See: https://github.com/kubernetes/kube-openapi/tree/master/pkg/generators -func (_ JSONSchemaPropsOrBool) OpenAPISchemaType() []string { - // TODO: return actual types when anyOf is supported - return nil -} - -// OpenAPISchemaFormat is used by the kube-openapi generator when constructing -// the OpenAPI spec of this type. -func (_ JSONSchemaPropsOrBool) OpenAPISchemaFormat() string { return "" } - -// JSONSchemaDependencies represent a dependencies property. -type JSONSchemaDependencies map[string]JSONSchemaPropsOrStringArray - -// JSONSchemaPropsOrStringArray represents a JSONSchemaProps or a string array. -type JSONSchemaPropsOrStringArray struct { - Schema *JSONSchemaProps `protobuf:"bytes,1,opt,name=schema"` - Property []string `protobuf:"bytes,2,rep,name=property"` -} - -// OpenAPISchemaType is used by the kube-openapi generator when constructing -// the OpenAPI spec of this type. -// -// See: https://github.com/kubernetes/kube-openapi/tree/master/pkg/generators -func (_ JSONSchemaPropsOrStringArray) OpenAPISchemaType() []string { - // TODO: return actual types when anyOf is supported - return nil -} - -// OpenAPISchemaFormat is used by the kube-openapi generator when constructing -// the OpenAPI spec of this type. -func (_ JSONSchemaPropsOrStringArray) OpenAPISchemaFormat() string { return "" } - -// JSONSchemaDefinitions contains the models explicitly defined in this spec. -type JSONSchemaDefinitions map[string]JSONSchemaProps - -// ExternalDocumentation allows referencing an external resource for extended documentation. -type ExternalDocumentation struct { - Description string `json:"description,omitempty" protobuf:"bytes,1,opt,name=description"` - URL string `json:"url,omitempty" protobuf:"bytes,2,opt,name=url"` -} diff --git a/src/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/zz_generated.conversion.go b/src/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/zz_generated.conversion.go deleted file mode 100644 index 95a58529b..000000000 --- a/src/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/zz_generated.conversion.go +++ /dev/null @@ -1,1318 +0,0 @@ -//go:build !ignore_autogenerated -// +build !ignore_autogenerated - -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by conversion-gen. DO NOT EDIT. - -package v1 - -import ( - unsafe "unsafe" - - apiextensions "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - conversion "k8s.io/apimachinery/pkg/conversion" - runtime "k8s.io/apimachinery/pkg/runtime" -) - -func init() { - localSchemeBuilder.Register(RegisterConversions) -} - -// RegisterConversions adds conversion functions to the given scheme. -// Public to allow building arbitrary schemes. -func RegisterConversions(s *runtime.Scheme) error { - if err := s.AddGeneratedConversionFunc((*CustomResourceColumnDefinition)(nil), (*apiextensions.CustomResourceColumnDefinition)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_CustomResourceColumnDefinition_To_apiextensions_CustomResourceColumnDefinition(a.(*CustomResourceColumnDefinition), b.(*apiextensions.CustomResourceColumnDefinition), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*apiextensions.CustomResourceColumnDefinition)(nil), (*CustomResourceColumnDefinition)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_apiextensions_CustomResourceColumnDefinition_To_v1_CustomResourceColumnDefinition(a.(*apiextensions.CustomResourceColumnDefinition), b.(*CustomResourceColumnDefinition), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*CustomResourceDefinition)(nil), (*apiextensions.CustomResourceDefinition)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_CustomResourceDefinition_To_apiextensions_CustomResourceDefinition(a.(*CustomResourceDefinition), b.(*apiextensions.CustomResourceDefinition), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*apiextensions.CustomResourceDefinition)(nil), (*CustomResourceDefinition)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_apiextensions_CustomResourceDefinition_To_v1_CustomResourceDefinition(a.(*apiextensions.CustomResourceDefinition), b.(*CustomResourceDefinition), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*CustomResourceDefinitionCondition)(nil), (*apiextensions.CustomResourceDefinitionCondition)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_CustomResourceDefinitionCondition_To_apiextensions_CustomResourceDefinitionCondition(a.(*CustomResourceDefinitionCondition), b.(*apiextensions.CustomResourceDefinitionCondition), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*apiextensions.CustomResourceDefinitionCondition)(nil), (*CustomResourceDefinitionCondition)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_apiextensions_CustomResourceDefinitionCondition_To_v1_CustomResourceDefinitionCondition(a.(*apiextensions.CustomResourceDefinitionCondition), b.(*CustomResourceDefinitionCondition), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*CustomResourceDefinitionList)(nil), (*apiextensions.CustomResourceDefinitionList)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_CustomResourceDefinitionList_To_apiextensions_CustomResourceDefinitionList(a.(*CustomResourceDefinitionList), b.(*apiextensions.CustomResourceDefinitionList), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*apiextensions.CustomResourceDefinitionList)(nil), (*CustomResourceDefinitionList)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_apiextensions_CustomResourceDefinitionList_To_v1_CustomResourceDefinitionList(a.(*apiextensions.CustomResourceDefinitionList), b.(*CustomResourceDefinitionList), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*CustomResourceDefinitionNames)(nil), (*apiextensions.CustomResourceDefinitionNames)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_CustomResourceDefinitionNames_To_apiextensions_CustomResourceDefinitionNames(a.(*CustomResourceDefinitionNames), b.(*apiextensions.CustomResourceDefinitionNames), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*apiextensions.CustomResourceDefinitionNames)(nil), (*CustomResourceDefinitionNames)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_apiextensions_CustomResourceDefinitionNames_To_v1_CustomResourceDefinitionNames(a.(*apiextensions.CustomResourceDefinitionNames), b.(*CustomResourceDefinitionNames), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*CustomResourceDefinitionStatus)(nil), (*apiextensions.CustomResourceDefinitionStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_CustomResourceDefinitionStatus_To_apiextensions_CustomResourceDefinitionStatus(a.(*CustomResourceDefinitionStatus), b.(*apiextensions.CustomResourceDefinitionStatus), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*apiextensions.CustomResourceDefinitionStatus)(nil), (*CustomResourceDefinitionStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_apiextensions_CustomResourceDefinitionStatus_To_v1_CustomResourceDefinitionStatus(a.(*apiextensions.CustomResourceDefinitionStatus), b.(*CustomResourceDefinitionStatus), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*CustomResourceDefinitionVersion)(nil), (*apiextensions.CustomResourceDefinitionVersion)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_CustomResourceDefinitionVersion_To_apiextensions_CustomResourceDefinitionVersion(a.(*CustomResourceDefinitionVersion), b.(*apiextensions.CustomResourceDefinitionVersion), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*apiextensions.CustomResourceDefinitionVersion)(nil), (*CustomResourceDefinitionVersion)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_apiextensions_CustomResourceDefinitionVersion_To_v1_CustomResourceDefinitionVersion(a.(*apiextensions.CustomResourceDefinitionVersion), b.(*CustomResourceDefinitionVersion), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*CustomResourceSubresourceScale)(nil), (*apiextensions.CustomResourceSubresourceScale)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_CustomResourceSubresourceScale_To_apiextensions_CustomResourceSubresourceScale(a.(*CustomResourceSubresourceScale), b.(*apiextensions.CustomResourceSubresourceScale), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*apiextensions.CustomResourceSubresourceScale)(nil), (*CustomResourceSubresourceScale)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_apiextensions_CustomResourceSubresourceScale_To_v1_CustomResourceSubresourceScale(a.(*apiextensions.CustomResourceSubresourceScale), b.(*CustomResourceSubresourceScale), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*CustomResourceSubresourceStatus)(nil), (*apiextensions.CustomResourceSubresourceStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_CustomResourceSubresourceStatus_To_apiextensions_CustomResourceSubresourceStatus(a.(*CustomResourceSubresourceStatus), b.(*apiextensions.CustomResourceSubresourceStatus), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*apiextensions.CustomResourceSubresourceStatus)(nil), (*CustomResourceSubresourceStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_apiextensions_CustomResourceSubresourceStatus_To_v1_CustomResourceSubresourceStatus(a.(*apiextensions.CustomResourceSubresourceStatus), b.(*CustomResourceSubresourceStatus), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*CustomResourceSubresources)(nil), (*apiextensions.CustomResourceSubresources)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_CustomResourceSubresources_To_apiextensions_CustomResourceSubresources(a.(*CustomResourceSubresources), b.(*apiextensions.CustomResourceSubresources), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*apiextensions.CustomResourceSubresources)(nil), (*CustomResourceSubresources)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_apiextensions_CustomResourceSubresources_To_v1_CustomResourceSubresources(a.(*apiextensions.CustomResourceSubresources), b.(*CustomResourceSubresources), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*CustomResourceValidation)(nil), (*apiextensions.CustomResourceValidation)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_CustomResourceValidation_To_apiextensions_CustomResourceValidation(a.(*CustomResourceValidation), b.(*apiextensions.CustomResourceValidation), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*apiextensions.CustomResourceValidation)(nil), (*CustomResourceValidation)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_apiextensions_CustomResourceValidation_To_v1_CustomResourceValidation(a.(*apiextensions.CustomResourceValidation), b.(*CustomResourceValidation), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*ExternalDocumentation)(nil), (*apiextensions.ExternalDocumentation)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_ExternalDocumentation_To_apiextensions_ExternalDocumentation(a.(*ExternalDocumentation), b.(*apiextensions.ExternalDocumentation), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*apiextensions.ExternalDocumentation)(nil), (*ExternalDocumentation)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_apiextensions_ExternalDocumentation_To_v1_ExternalDocumentation(a.(*apiextensions.ExternalDocumentation), b.(*ExternalDocumentation), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*JSONSchemaProps)(nil), (*apiextensions.JSONSchemaProps)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_JSONSchemaProps_To_apiextensions_JSONSchemaProps(a.(*JSONSchemaProps), b.(*apiextensions.JSONSchemaProps), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*JSONSchemaPropsOrArray)(nil), (*apiextensions.JSONSchemaPropsOrArray)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_JSONSchemaPropsOrArray_To_apiextensions_JSONSchemaPropsOrArray(a.(*JSONSchemaPropsOrArray), b.(*apiextensions.JSONSchemaPropsOrArray), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*apiextensions.JSONSchemaPropsOrArray)(nil), (*JSONSchemaPropsOrArray)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_apiextensions_JSONSchemaPropsOrArray_To_v1_JSONSchemaPropsOrArray(a.(*apiextensions.JSONSchemaPropsOrArray), b.(*JSONSchemaPropsOrArray), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*JSONSchemaPropsOrBool)(nil), (*apiextensions.JSONSchemaPropsOrBool)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_JSONSchemaPropsOrBool_To_apiextensions_JSONSchemaPropsOrBool(a.(*JSONSchemaPropsOrBool), b.(*apiextensions.JSONSchemaPropsOrBool), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*apiextensions.JSONSchemaPropsOrBool)(nil), (*JSONSchemaPropsOrBool)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_apiextensions_JSONSchemaPropsOrBool_To_v1_JSONSchemaPropsOrBool(a.(*apiextensions.JSONSchemaPropsOrBool), b.(*JSONSchemaPropsOrBool), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*JSONSchemaPropsOrStringArray)(nil), (*apiextensions.JSONSchemaPropsOrStringArray)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_JSONSchemaPropsOrStringArray_To_apiextensions_JSONSchemaPropsOrStringArray(a.(*JSONSchemaPropsOrStringArray), b.(*apiextensions.JSONSchemaPropsOrStringArray), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*apiextensions.JSONSchemaPropsOrStringArray)(nil), (*JSONSchemaPropsOrStringArray)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_apiextensions_JSONSchemaPropsOrStringArray_To_v1_JSONSchemaPropsOrStringArray(a.(*apiextensions.JSONSchemaPropsOrStringArray), b.(*JSONSchemaPropsOrStringArray), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*ServiceReference)(nil), (*apiextensions.ServiceReference)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_ServiceReference_To_apiextensions_ServiceReference(a.(*ServiceReference), b.(*apiextensions.ServiceReference), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*apiextensions.ServiceReference)(nil), (*ServiceReference)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_apiextensions_ServiceReference_To_v1_ServiceReference(a.(*apiextensions.ServiceReference), b.(*ServiceReference), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*ValidationRule)(nil), (*apiextensions.ValidationRule)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_ValidationRule_To_apiextensions_ValidationRule(a.(*ValidationRule), b.(*apiextensions.ValidationRule), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*apiextensions.ValidationRule)(nil), (*ValidationRule)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_apiextensions_ValidationRule_To_v1_ValidationRule(a.(*apiextensions.ValidationRule), b.(*ValidationRule), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*WebhookClientConfig)(nil), (*apiextensions.WebhookClientConfig)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_WebhookClientConfig_To_apiextensions_WebhookClientConfig(a.(*WebhookClientConfig), b.(*apiextensions.WebhookClientConfig), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*apiextensions.WebhookClientConfig)(nil), (*WebhookClientConfig)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_apiextensions_WebhookClientConfig_To_v1_WebhookClientConfig(a.(*apiextensions.WebhookClientConfig), b.(*WebhookClientConfig), scope) - }); err != nil { - return err - } - if err := s.AddConversionFunc((*apiextensions.CustomResourceConversion)(nil), (*CustomResourceConversion)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_apiextensions_CustomResourceConversion_To_v1_CustomResourceConversion(a.(*apiextensions.CustomResourceConversion), b.(*CustomResourceConversion), scope) - }); err != nil { - return err - } - if err := s.AddConversionFunc((*apiextensions.CustomResourceDefinitionSpec)(nil), (*CustomResourceDefinitionSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_apiextensions_CustomResourceDefinitionSpec_To_v1_CustomResourceDefinitionSpec(a.(*apiextensions.CustomResourceDefinitionSpec), b.(*CustomResourceDefinitionSpec), scope) - }); err != nil { - return err - } - if err := s.AddConversionFunc((*apiextensions.JSONSchemaProps)(nil), (*JSONSchemaProps)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_apiextensions_JSONSchemaProps_To_v1_JSONSchemaProps(a.(*apiextensions.JSONSchemaProps), b.(*JSONSchemaProps), scope) - }); err != nil { - return err - } - if err := s.AddConversionFunc((*apiextensions.JSON)(nil), (*JSON)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_apiextensions_JSON_To_v1_JSON(a.(*apiextensions.JSON), b.(*JSON), scope) - }); err != nil { - return err - } - if err := s.AddConversionFunc((*apiextensions.ValidationRules)(nil), (*ValidationRules)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_apiextensions_ValidationRules_To_v1_ValidationRules(a.(*apiextensions.ValidationRules), b.(*ValidationRules), scope) - }); err != nil { - return err - } - if err := s.AddConversionFunc((*CustomResourceConversion)(nil), (*apiextensions.CustomResourceConversion)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_CustomResourceConversion_To_apiextensions_CustomResourceConversion(a.(*CustomResourceConversion), b.(*apiextensions.CustomResourceConversion), scope) - }); err != nil { - return err - } - if err := s.AddConversionFunc((*CustomResourceDefinitionSpec)(nil), (*apiextensions.CustomResourceDefinitionSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_CustomResourceDefinitionSpec_To_apiextensions_CustomResourceDefinitionSpec(a.(*CustomResourceDefinitionSpec), b.(*apiextensions.CustomResourceDefinitionSpec), scope) - }); err != nil { - return err - } - if err := s.AddConversionFunc((*JSON)(nil), (*apiextensions.JSON)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_JSON_To_apiextensions_JSON(a.(*JSON), b.(*apiextensions.JSON), scope) - }); err != nil { - return err - } - return nil -} - -func autoConvert_v1_CustomResourceColumnDefinition_To_apiextensions_CustomResourceColumnDefinition(in *CustomResourceColumnDefinition, out *apiextensions.CustomResourceColumnDefinition, s conversion.Scope) error { - out.Name = in.Name - out.Type = in.Type - out.Format = in.Format - out.Description = in.Description - out.Priority = in.Priority - out.JSONPath = in.JSONPath - return nil -} - -// Convert_v1_CustomResourceColumnDefinition_To_apiextensions_CustomResourceColumnDefinition is an autogenerated conversion function. -func Convert_v1_CustomResourceColumnDefinition_To_apiextensions_CustomResourceColumnDefinition(in *CustomResourceColumnDefinition, out *apiextensions.CustomResourceColumnDefinition, s conversion.Scope) error { - return autoConvert_v1_CustomResourceColumnDefinition_To_apiextensions_CustomResourceColumnDefinition(in, out, s) -} - -func autoConvert_apiextensions_CustomResourceColumnDefinition_To_v1_CustomResourceColumnDefinition(in *apiextensions.CustomResourceColumnDefinition, out *CustomResourceColumnDefinition, s conversion.Scope) error { - out.Name = in.Name - out.Type = in.Type - out.Format = in.Format - out.Description = in.Description - out.Priority = in.Priority - out.JSONPath = in.JSONPath - return nil -} - -// Convert_apiextensions_CustomResourceColumnDefinition_To_v1_CustomResourceColumnDefinition is an autogenerated conversion function. -func Convert_apiextensions_CustomResourceColumnDefinition_To_v1_CustomResourceColumnDefinition(in *apiextensions.CustomResourceColumnDefinition, out *CustomResourceColumnDefinition, s conversion.Scope) error { - return autoConvert_apiextensions_CustomResourceColumnDefinition_To_v1_CustomResourceColumnDefinition(in, out, s) -} - -func autoConvert_v1_CustomResourceConversion_To_apiextensions_CustomResourceConversion(in *CustomResourceConversion, out *apiextensions.CustomResourceConversion, s conversion.Scope) error { - out.Strategy = apiextensions.ConversionStrategyType(in.Strategy) - // WARNING: in.Webhook requires manual conversion: does not exist in peer-type - return nil -} - -func autoConvert_apiextensions_CustomResourceConversion_To_v1_CustomResourceConversion(in *apiextensions.CustomResourceConversion, out *CustomResourceConversion, s conversion.Scope) error { - out.Strategy = ConversionStrategyType(in.Strategy) - // WARNING: in.WebhookClientConfig requires manual conversion: does not exist in peer-type - // WARNING: in.ConversionReviewVersions requires manual conversion: does not exist in peer-type - return nil -} - -func autoConvert_v1_CustomResourceDefinition_To_apiextensions_CustomResourceDefinition(in *CustomResourceDefinition, out *apiextensions.CustomResourceDefinition, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - if err := Convert_v1_CustomResourceDefinitionSpec_To_apiextensions_CustomResourceDefinitionSpec(&in.Spec, &out.Spec, s); err != nil { - return err - } - if err := Convert_v1_CustomResourceDefinitionStatus_To_apiextensions_CustomResourceDefinitionStatus(&in.Status, &out.Status, s); err != nil { - return err - } - return nil -} - -// Convert_v1_CustomResourceDefinition_To_apiextensions_CustomResourceDefinition is an autogenerated conversion function. -func Convert_v1_CustomResourceDefinition_To_apiextensions_CustomResourceDefinition(in *CustomResourceDefinition, out *apiextensions.CustomResourceDefinition, s conversion.Scope) error { - return autoConvert_v1_CustomResourceDefinition_To_apiextensions_CustomResourceDefinition(in, out, s) -} - -func autoConvert_apiextensions_CustomResourceDefinition_To_v1_CustomResourceDefinition(in *apiextensions.CustomResourceDefinition, out *CustomResourceDefinition, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - if err := Convert_apiextensions_CustomResourceDefinitionSpec_To_v1_CustomResourceDefinitionSpec(&in.Spec, &out.Spec, s); err != nil { - return err - } - if err := Convert_apiextensions_CustomResourceDefinitionStatus_To_v1_CustomResourceDefinitionStatus(&in.Status, &out.Status, s); err != nil { - return err - } - return nil -} - -// Convert_apiextensions_CustomResourceDefinition_To_v1_CustomResourceDefinition is an autogenerated conversion function. -func Convert_apiextensions_CustomResourceDefinition_To_v1_CustomResourceDefinition(in *apiextensions.CustomResourceDefinition, out *CustomResourceDefinition, s conversion.Scope) error { - return autoConvert_apiextensions_CustomResourceDefinition_To_v1_CustomResourceDefinition(in, out, s) -} - -func autoConvert_v1_CustomResourceDefinitionCondition_To_apiextensions_CustomResourceDefinitionCondition(in *CustomResourceDefinitionCondition, out *apiextensions.CustomResourceDefinitionCondition, s conversion.Scope) error { - out.Type = apiextensions.CustomResourceDefinitionConditionType(in.Type) - out.Status = apiextensions.ConditionStatus(in.Status) - out.LastTransitionTime = in.LastTransitionTime - out.Reason = in.Reason - out.Message = in.Message - return nil -} - -// Convert_v1_CustomResourceDefinitionCondition_To_apiextensions_CustomResourceDefinitionCondition is an autogenerated conversion function. -func Convert_v1_CustomResourceDefinitionCondition_To_apiextensions_CustomResourceDefinitionCondition(in *CustomResourceDefinitionCondition, out *apiextensions.CustomResourceDefinitionCondition, s conversion.Scope) error { - return autoConvert_v1_CustomResourceDefinitionCondition_To_apiextensions_CustomResourceDefinitionCondition(in, out, s) -} - -func autoConvert_apiextensions_CustomResourceDefinitionCondition_To_v1_CustomResourceDefinitionCondition(in *apiextensions.CustomResourceDefinitionCondition, out *CustomResourceDefinitionCondition, s conversion.Scope) error { - out.Type = CustomResourceDefinitionConditionType(in.Type) - out.Status = ConditionStatus(in.Status) - out.LastTransitionTime = in.LastTransitionTime - out.Reason = in.Reason - out.Message = in.Message - return nil -} - -// Convert_apiextensions_CustomResourceDefinitionCondition_To_v1_CustomResourceDefinitionCondition is an autogenerated conversion function. -func Convert_apiextensions_CustomResourceDefinitionCondition_To_v1_CustomResourceDefinitionCondition(in *apiextensions.CustomResourceDefinitionCondition, out *CustomResourceDefinitionCondition, s conversion.Scope) error { - return autoConvert_apiextensions_CustomResourceDefinitionCondition_To_v1_CustomResourceDefinitionCondition(in, out, s) -} - -func autoConvert_v1_CustomResourceDefinitionList_To_apiextensions_CustomResourceDefinitionList(in *CustomResourceDefinitionList, out *apiextensions.CustomResourceDefinitionList, s conversion.Scope) error { - out.ListMeta = in.ListMeta - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]apiextensions.CustomResourceDefinition, len(*in)) - for i := range *in { - if err := Convert_v1_CustomResourceDefinition_To_apiextensions_CustomResourceDefinition(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.Items = nil - } - return nil -} - -// Convert_v1_CustomResourceDefinitionList_To_apiextensions_CustomResourceDefinitionList is an autogenerated conversion function. -func Convert_v1_CustomResourceDefinitionList_To_apiextensions_CustomResourceDefinitionList(in *CustomResourceDefinitionList, out *apiextensions.CustomResourceDefinitionList, s conversion.Scope) error { - return autoConvert_v1_CustomResourceDefinitionList_To_apiextensions_CustomResourceDefinitionList(in, out, s) -} - -func autoConvert_apiextensions_CustomResourceDefinitionList_To_v1_CustomResourceDefinitionList(in *apiextensions.CustomResourceDefinitionList, out *CustomResourceDefinitionList, s conversion.Scope) error { - out.ListMeta = in.ListMeta - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]CustomResourceDefinition, len(*in)) - for i := range *in { - if err := Convert_apiextensions_CustomResourceDefinition_To_v1_CustomResourceDefinition(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.Items = nil - } - return nil -} - -// Convert_apiextensions_CustomResourceDefinitionList_To_v1_CustomResourceDefinitionList is an autogenerated conversion function. -func Convert_apiextensions_CustomResourceDefinitionList_To_v1_CustomResourceDefinitionList(in *apiextensions.CustomResourceDefinitionList, out *CustomResourceDefinitionList, s conversion.Scope) error { - return autoConvert_apiextensions_CustomResourceDefinitionList_To_v1_CustomResourceDefinitionList(in, out, s) -} - -func autoConvert_v1_CustomResourceDefinitionNames_To_apiextensions_CustomResourceDefinitionNames(in *CustomResourceDefinitionNames, out *apiextensions.CustomResourceDefinitionNames, s conversion.Scope) error { - out.Plural = in.Plural - out.Singular = in.Singular - out.ShortNames = *(*[]string)(unsafe.Pointer(&in.ShortNames)) - out.Kind = in.Kind - out.ListKind = in.ListKind - out.Categories = *(*[]string)(unsafe.Pointer(&in.Categories)) - return nil -} - -// Convert_v1_CustomResourceDefinitionNames_To_apiextensions_CustomResourceDefinitionNames is an autogenerated conversion function. -func Convert_v1_CustomResourceDefinitionNames_To_apiextensions_CustomResourceDefinitionNames(in *CustomResourceDefinitionNames, out *apiextensions.CustomResourceDefinitionNames, s conversion.Scope) error { - return autoConvert_v1_CustomResourceDefinitionNames_To_apiextensions_CustomResourceDefinitionNames(in, out, s) -} - -func autoConvert_apiextensions_CustomResourceDefinitionNames_To_v1_CustomResourceDefinitionNames(in *apiextensions.CustomResourceDefinitionNames, out *CustomResourceDefinitionNames, s conversion.Scope) error { - out.Plural = in.Plural - out.Singular = in.Singular - out.ShortNames = *(*[]string)(unsafe.Pointer(&in.ShortNames)) - out.Kind = in.Kind - out.ListKind = in.ListKind - out.Categories = *(*[]string)(unsafe.Pointer(&in.Categories)) - return nil -} - -// Convert_apiextensions_CustomResourceDefinitionNames_To_v1_CustomResourceDefinitionNames is an autogenerated conversion function. -func Convert_apiextensions_CustomResourceDefinitionNames_To_v1_CustomResourceDefinitionNames(in *apiextensions.CustomResourceDefinitionNames, out *CustomResourceDefinitionNames, s conversion.Scope) error { - return autoConvert_apiextensions_CustomResourceDefinitionNames_To_v1_CustomResourceDefinitionNames(in, out, s) -} - -func autoConvert_v1_CustomResourceDefinitionSpec_To_apiextensions_CustomResourceDefinitionSpec(in *CustomResourceDefinitionSpec, out *apiextensions.CustomResourceDefinitionSpec, s conversion.Scope) error { - out.Group = in.Group - if err := Convert_v1_CustomResourceDefinitionNames_To_apiextensions_CustomResourceDefinitionNames(&in.Names, &out.Names, s); err != nil { - return err - } - out.Scope = apiextensions.ResourceScope(in.Scope) - if in.Versions != nil { - in, out := &in.Versions, &out.Versions - *out = make([]apiextensions.CustomResourceDefinitionVersion, len(*in)) - for i := range *in { - if err := Convert_v1_CustomResourceDefinitionVersion_To_apiextensions_CustomResourceDefinitionVersion(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.Versions = nil - } - if in.Conversion != nil { - in, out := &in.Conversion, &out.Conversion - *out = new(apiextensions.CustomResourceConversion) - if err := Convert_v1_CustomResourceConversion_To_apiextensions_CustomResourceConversion(*in, *out, s); err != nil { - return err - } - } else { - out.Conversion = nil - } - if err := metav1.Convert_bool_To_Pointer_bool(&in.PreserveUnknownFields, &out.PreserveUnknownFields, s); err != nil { - return err - } - return nil -} - -func autoConvert_apiextensions_CustomResourceDefinitionSpec_To_v1_CustomResourceDefinitionSpec(in *apiextensions.CustomResourceDefinitionSpec, out *CustomResourceDefinitionSpec, s conversion.Scope) error { - out.Group = in.Group - // WARNING: in.Version requires manual conversion: does not exist in peer-type - if err := Convert_apiextensions_CustomResourceDefinitionNames_To_v1_CustomResourceDefinitionNames(&in.Names, &out.Names, s); err != nil { - return err - } - out.Scope = ResourceScope(in.Scope) - // WARNING: in.Validation requires manual conversion: does not exist in peer-type - // WARNING: in.Subresources requires manual conversion: does not exist in peer-type - if in.Versions != nil { - in, out := &in.Versions, &out.Versions - *out = make([]CustomResourceDefinitionVersion, len(*in)) - for i := range *in { - if err := Convert_apiextensions_CustomResourceDefinitionVersion_To_v1_CustomResourceDefinitionVersion(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.Versions = nil - } - // WARNING: in.AdditionalPrinterColumns requires manual conversion: does not exist in peer-type - if in.Conversion != nil { - in, out := &in.Conversion, &out.Conversion - *out = new(CustomResourceConversion) - if err := Convert_apiextensions_CustomResourceConversion_To_v1_CustomResourceConversion(*in, *out, s); err != nil { - return err - } - } else { - out.Conversion = nil - } - if err := metav1.Convert_Pointer_bool_To_bool(&in.PreserveUnknownFields, &out.PreserveUnknownFields, s); err != nil { - return err - } - return nil -} - -func autoConvert_v1_CustomResourceDefinitionStatus_To_apiextensions_CustomResourceDefinitionStatus(in *CustomResourceDefinitionStatus, out *apiextensions.CustomResourceDefinitionStatus, s conversion.Scope) error { - out.Conditions = *(*[]apiextensions.CustomResourceDefinitionCondition)(unsafe.Pointer(&in.Conditions)) - if err := Convert_v1_CustomResourceDefinitionNames_To_apiextensions_CustomResourceDefinitionNames(&in.AcceptedNames, &out.AcceptedNames, s); err != nil { - return err - } - out.StoredVersions = *(*[]string)(unsafe.Pointer(&in.StoredVersions)) - return nil -} - -// Convert_v1_CustomResourceDefinitionStatus_To_apiextensions_CustomResourceDefinitionStatus is an autogenerated conversion function. -func Convert_v1_CustomResourceDefinitionStatus_To_apiextensions_CustomResourceDefinitionStatus(in *CustomResourceDefinitionStatus, out *apiextensions.CustomResourceDefinitionStatus, s conversion.Scope) error { - return autoConvert_v1_CustomResourceDefinitionStatus_To_apiextensions_CustomResourceDefinitionStatus(in, out, s) -} - -func autoConvert_apiextensions_CustomResourceDefinitionStatus_To_v1_CustomResourceDefinitionStatus(in *apiextensions.CustomResourceDefinitionStatus, out *CustomResourceDefinitionStatus, s conversion.Scope) error { - out.Conditions = *(*[]CustomResourceDefinitionCondition)(unsafe.Pointer(&in.Conditions)) - if err := Convert_apiextensions_CustomResourceDefinitionNames_To_v1_CustomResourceDefinitionNames(&in.AcceptedNames, &out.AcceptedNames, s); err != nil { - return err - } - out.StoredVersions = *(*[]string)(unsafe.Pointer(&in.StoredVersions)) - return nil -} - -// Convert_apiextensions_CustomResourceDefinitionStatus_To_v1_CustomResourceDefinitionStatus is an autogenerated conversion function. -func Convert_apiextensions_CustomResourceDefinitionStatus_To_v1_CustomResourceDefinitionStatus(in *apiextensions.CustomResourceDefinitionStatus, out *CustomResourceDefinitionStatus, s conversion.Scope) error { - return autoConvert_apiextensions_CustomResourceDefinitionStatus_To_v1_CustomResourceDefinitionStatus(in, out, s) -} - -func autoConvert_v1_CustomResourceDefinitionVersion_To_apiextensions_CustomResourceDefinitionVersion(in *CustomResourceDefinitionVersion, out *apiextensions.CustomResourceDefinitionVersion, s conversion.Scope) error { - out.Name = in.Name - out.Served = in.Served - out.Storage = in.Storage - out.Deprecated = in.Deprecated - out.DeprecationWarning = (*string)(unsafe.Pointer(in.DeprecationWarning)) - if in.Schema != nil { - in, out := &in.Schema, &out.Schema - *out = new(apiextensions.CustomResourceValidation) - if err := Convert_v1_CustomResourceValidation_To_apiextensions_CustomResourceValidation(*in, *out, s); err != nil { - return err - } - } else { - out.Schema = nil - } - out.Subresources = (*apiextensions.CustomResourceSubresources)(unsafe.Pointer(in.Subresources)) - out.AdditionalPrinterColumns = *(*[]apiextensions.CustomResourceColumnDefinition)(unsafe.Pointer(&in.AdditionalPrinterColumns)) - return nil -} - -// Convert_v1_CustomResourceDefinitionVersion_To_apiextensions_CustomResourceDefinitionVersion is an autogenerated conversion function. -func Convert_v1_CustomResourceDefinitionVersion_To_apiextensions_CustomResourceDefinitionVersion(in *CustomResourceDefinitionVersion, out *apiextensions.CustomResourceDefinitionVersion, s conversion.Scope) error { - return autoConvert_v1_CustomResourceDefinitionVersion_To_apiextensions_CustomResourceDefinitionVersion(in, out, s) -} - -func autoConvert_apiextensions_CustomResourceDefinitionVersion_To_v1_CustomResourceDefinitionVersion(in *apiextensions.CustomResourceDefinitionVersion, out *CustomResourceDefinitionVersion, s conversion.Scope) error { - out.Name = in.Name - out.Served = in.Served - out.Storage = in.Storage - out.Deprecated = in.Deprecated - out.DeprecationWarning = (*string)(unsafe.Pointer(in.DeprecationWarning)) - if in.Schema != nil { - in, out := &in.Schema, &out.Schema - *out = new(CustomResourceValidation) - if err := Convert_apiextensions_CustomResourceValidation_To_v1_CustomResourceValidation(*in, *out, s); err != nil { - return err - } - } else { - out.Schema = nil - } - out.Subresources = (*CustomResourceSubresources)(unsafe.Pointer(in.Subresources)) - out.AdditionalPrinterColumns = *(*[]CustomResourceColumnDefinition)(unsafe.Pointer(&in.AdditionalPrinterColumns)) - return nil -} - -// Convert_apiextensions_CustomResourceDefinitionVersion_To_v1_CustomResourceDefinitionVersion is an autogenerated conversion function. -func Convert_apiextensions_CustomResourceDefinitionVersion_To_v1_CustomResourceDefinitionVersion(in *apiextensions.CustomResourceDefinitionVersion, out *CustomResourceDefinitionVersion, s conversion.Scope) error { - return autoConvert_apiextensions_CustomResourceDefinitionVersion_To_v1_CustomResourceDefinitionVersion(in, out, s) -} - -func autoConvert_v1_CustomResourceSubresourceScale_To_apiextensions_CustomResourceSubresourceScale(in *CustomResourceSubresourceScale, out *apiextensions.CustomResourceSubresourceScale, s conversion.Scope) error { - out.SpecReplicasPath = in.SpecReplicasPath - out.StatusReplicasPath = in.StatusReplicasPath - out.LabelSelectorPath = (*string)(unsafe.Pointer(in.LabelSelectorPath)) - return nil -} - -// Convert_v1_CustomResourceSubresourceScale_To_apiextensions_CustomResourceSubresourceScale is an autogenerated conversion function. -func Convert_v1_CustomResourceSubresourceScale_To_apiextensions_CustomResourceSubresourceScale(in *CustomResourceSubresourceScale, out *apiextensions.CustomResourceSubresourceScale, s conversion.Scope) error { - return autoConvert_v1_CustomResourceSubresourceScale_To_apiextensions_CustomResourceSubresourceScale(in, out, s) -} - -func autoConvert_apiextensions_CustomResourceSubresourceScale_To_v1_CustomResourceSubresourceScale(in *apiextensions.CustomResourceSubresourceScale, out *CustomResourceSubresourceScale, s conversion.Scope) error { - out.SpecReplicasPath = in.SpecReplicasPath - out.StatusReplicasPath = in.StatusReplicasPath - out.LabelSelectorPath = (*string)(unsafe.Pointer(in.LabelSelectorPath)) - return nil -} - -// Convert_apiextensions_CustomResourceSubresourceScale_To_v1_CustomResourceSubresourceScale is an autogenerated conversion function. -func Convert_apiextensions_CustomResourceSubresourceScale_To_v1_CustomResourceSubresourceScale(in *apiextensions.CustomResourceSubresourceScale, out *CustomResourceSubresourceScale, s conversion.Scope) error { - return autoConvert_apiextensions_CustomResourceSubresourceScale_To_v1_CustomResourceSubresourceScale(in, out, s) -} - -func autoConvert_v1_CustomResourceSubresourceStatus_To_apiextensions_CustomResourceSubresourceStatus(in *CustomResourceSubresourceStatus, out *apiextensions.CustomResourceSubresourceStatus, s conversion.Scope) error { - return nil -} - -// Convert_v1_CustomResourceSubresourceStatus_To_apiextensions_CustomResourceSubresourceStatus is an autogenerated conversion function. -func Convert_v1_CustomResourceSubresourceStatus_To_apiextensions_CustomResourceSubresourceStatus(in *CustomResourceSubresourceStatus, out *apiextensions.CustomResourceSubresourceStatus, s conversion.Scope) error { - return autoConvert_v1_CustomResourceSubresourceStatus_To_apiextensions_CustomResourceSubresourceStatus(in, out, s) -} - -func autoConvert_apiextensions_CustomResourceSubresourceStatus_To_v1_CustomResourceSubresourceStatus(in *apiextensions.CustomResourceSubresourceStatus, out *CustomResourceSubresourceStatus, s conversion.Scope) error { - return nil -} - -// Convert_apiextensions_CustomResourceSubresourceStatus_To_v1_CustomResourceSubresourceStatus is an autogenerated conversion function. -func Convert_apiextensions_CustomResourceSubresourceStatus_To_v1_CustomResourceSubresourceStatus(in *apiextensions.CustomResourceSubresourceStatus, out *CustomResourceSubresourceStatus, s conversion.Scope) error { - return autoConvert_apiextensions_CustomResourceSubresourceStatus_To_v1_CustomResourceSubresourceStatus(in, out, s) -} - -func autoConvert_v1_CustomResourceSubresources_To_apiextensions_CustomResourceSubresources(in *CustomResourceSubresources, out *apiextensions.CustomResourceSubresources, s conversion.Scope) error { - out.Status = (*apiextensions.CustomResourceSubresourceStatus)(unsafe.Pointer(in.Status)) - out.Scale = (*apiextensions.CustomResourceSubresourceScale)(unsafe.Pointer(in.Scale)) - return nil -} - -// Convert_v1_CustomResourceSubresources_To_apiextensions_CustomResourceSubresources is an autogenerated conversion function. -func Convert_v1_CustomResourceSubresources_To_apiextensions_CustomResourceSubresources(in *CustomResourceSubresources, out *apiextensions.CustomResourceSubresources, s conversion.Scope) error { - return autoConvert_v1_CustomResourceSubresources_To_apiextensions_CustomResourceSubresources(in, out, s) -} - -func autoConvert_apiextensions_CustomResourceSubresources_To_v1_CustomResourceSubresources(in *apiextensions.CustomResourceSubresources, out *CustomResourceSubresources, s conversion.Scope) error { - out.Status = (*CustomResourceSubresourceStatus)(unsafe.Pointer(in.Status)) - out.Scale = (*CustomResourceSubresourceScale)(unsafe.Pointer(in.Scale)) - return nil -} - -// Convert_apiextensions_CustomResourceSubresources_To_v1_CustomResourceSubresources is an autogenerated conversion function. -func Convert_apiextensions_CustomResourceSubresources_To_v1_CustomResourceSubresources(in *apiextensions.CustomResourceSubresources, out *CustomResourceSubresources, s conversion.Scope) error { - return autoConvert_apiextensions_CustomResourceSubresources_To_v1_CustomResourceSubresources(in, out, s) -} - -func autoConvert_v1_CustomResourceValidation_To_apiextensions_CustomResourceValidation(in *CustomResourceValidation, out *apiextensions.CustomResourceValidation, s conversion.Scope) error { - if in.OpenAPIV3Schema != nil { - in, out := &in.OpenAPIV3Schema, &out.OpenAPIV3Schema - *out = new(apiextensions.JSONSchemaProps) - if err := Convert_v1_JSONSchemaProps_To_apiextensions_JSONSchemaProps(*in, *out, s); err != nil { - return err - } - } else { - out.OpenAPIV3Schema = nil - } - return nil -} - -// Convert_v1_CustomResourceValidation_To_apiextensions_CustomResourceValidation is an autogenerated conversion function. -func Convert_v1_CustomResourceValidation_To_apiextensions_CustomResourceValidation(in *CustomResourceValidation, out *apiextensions.CustomResourceValidation, s conversion.Scope) error { - return autoConvert_v1_CustomResourceValidation_To_apiextensions_CustomResourceValidation(in, out, s) -} - -func autoConvert_apiextensions_CustomResourceValidation_To_v1_CustomResourceValidation(in *apiextensions.CustomResourceValidation, out *CustomResourceValidation, s conversion.Scope) error { - if in.OpenAPIV3Schema != nil { - in, out := &in.OpenAPIV3Schema, &out.OpenAPIV3Schema - *out = new(JSONSchemaProps) - if err := Convert_apiextensions_JSONSchemaProps_To_v1_JSONSchemaProps(*in, *out, s); err != nil { - return err - } - } else { - out.OpenAPIV3Schema = nil - } - return nil -} - -// Convert_apiextensions_CustomResourceValidation_To_v1_CustomResourceValidation is an autogenerated conversion function. -func Convert_apiextensions_CustomResourceValidation_To_v1_CustomResourceValidation(in *apiextensions.CustomResourceValidation, out *CustomResourceValidation, s conversion.Scope) error { - return autoConvert_apiextensions_CustomResourceValidation_To_v1_CustomResourceValidation(in, out, s) -} - -func autoConvert_v1_ExternalDocumentation_To_apiextensions_ExternalDocumentation(in *ExternalDocumentation, out *apiextensions.ExternalDocumentation, s conversion.Scope) error { - out.Description = in.Description - out.URL = in.URL - return nil -} - -// Convert_v1_ExternalDocumentation_To_apiextensions_ExternalDocumentation is an autogenerated conversion function. -func Convert_v1_ExternalDocumentation_To_apiextensions_ExternalDocumentation(in *ExternalDocumentation, out *apiextensions.ExternalDocumentation, s conversion.Scope) error { - return autoConvert_v1_ExternalDocumentation_To_apiextensions_ExternalDocumentation(in, out, s) -} - -func autoConvert_apiextensions_ExternalDocumentation_To_v1_ExternalDocumentation(in *apiextensions.ExternalDocumentation, out *ExternalDocumentation, s conversion.Scope) error { - out.Description = in.Description - out.URL = in.URL - return nil -} - -// Convert_apiextensions_ExternalDocumentation_To_v1_ExternalDocumentation is an autogenerated conversion function. -func Convert_apiextensions_ExternalDocumentation_To_v1_ExternalDocumentation(in *apiextensions.ExternalDocumentation, out *ExternalDocumentation, s conversion.Scope) error { - return autoConvert_apiextensions_ExternalDocumentation_To_v1_ExternalDocumentation(in, out, s) -} - -func autoConvert_v1_JSON_To_apiextensions_JSON(in *JSON, out *apiextensions.JSON, s conversion.Scope) error { - // WARNING: in.Raw requires manual conversion: does not exist in peer-type - return nil -} - -func autoConvert_apiextensions_JSON_To_v1_JSON(in *apiextensions.JSON, out *JSON, s conversion.Scope) error { - // FIXME: Type apiextensions.JSON is unsupported. - return nil -} - -func autoConvert_v1_JSONSchemaProps_To_apiextensions_JSONSchemaProps(in *JSONSchemaProps, out *apiextensions.JSONSchemaProps, s conversion.Scope) error { - out.ID = in.ID - out.Schema = apiextensions.JSONSchemaURL(in.Schema) - out.Ref = (*string)(unsafe.Pointer(in.Ref)) - out.Description = in.Description - out.Type = in.Type - out.Format = in.Format - out.Title = in.Title - if in.Default != nil { - in, out := &in.Default, &out.Default - *out = new(apiextensions.JSON) - if err := Convert_v1_JSON_To_apiextensions_JSON(*in, *out, s); err != nil { - return err - } - } else { - out.Default = nil - } - out.Maximum = (*float64)(unsafe.Pointer(in.Maximum)) - out.ExclusiveMaximum = in.ExclusiveMaximum - out.Minimum = (*float64)(unsafe.Pointer(in.Minimum)) - out.ExclusiveMinimum = in.ExclusiveMinimum - out.MaxLength = (*int64)(unsafe.Pointer(in.MaxLength)) - out.MinLength = (*int64)(unsafe.Pointer(in.MinLength)) - out.Pattern = in.Pattern - out.MaxItems = (*int64)(unsafe.Pointer(in.MaxItems)) - out.MinItems = (*int64)(unsafe.Pointer(in.MinItems)) - out.UniqueItems = in.UniqueItems - out.MultipleOf = (*float64)(unsafe.Pointer(in.MultipleOf)) - if in.Enum != nil { - in, out := &in.Enum, &out.Enum - *out = make([]apiextensions.JSON, len(*in)) - for i := range *in { - if err := Convert_v1_JSON_To_apiextensions_JSON(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.Enum = nil - } - out.MaxProperties = (*int64)(unsafe.Pointer(in.MaxProperties)) - out.MinProperties = (*int64)(unsafe.Pointer(in.MinProperties)) - out.Required = *(*[]string)(unsafe.Pointer(&in.Required)) - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = new(apiextensions.JSONSchemaPropsOrArray) - if err := Convert_v1_JSONSchemaPropsOrArray_To_apiextensions_JSONSchemaPropsOrArray(*in, *out, s); err != nil { - return err - } - } else { - out.Items = nil - } - if in.AllOf != nil { - in, out := &in.AllOf, &out.AllOf - *out = make([]apiextensions.JSONSchemaProps, len(*in)) - for i := range *in { - if err := Convert_v1_JSONSchemaProps_To_apiextensions_JSONSchemaProps(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.AllOf = nil - } - if in.OneOf != nil { - in, out := &in.OneOf, &out.OneOf - *out = make([]apiextensions.JSONSchemaProps, len(*in)) - for i := range *in { - if err := Convert_v1_JSONSchemaProps_To_apiextensions_JSONSchemaProps(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.OneOf = nil - } - if in.AnyOf != nil { - in, out := &in.AnyOf, &out.AnyOf - *out = make([]apiextensions.JSONSchemaProps, len(*in)) - for i := range *in { - if err := Convert_v1_JSONSchemaProps_To_apiextensions_JSONSchemaProps(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.AnyOf = nil - } - if in.Not != nil { - in, out := &in.Not, &out.Not - *out = new(apiextensions.JSONSchemaProps) - if err := Convert_v1_JSONSchemaProps_To_apiextensions_JSONSchemaProps(*in, *out, s); err != nil { - return err - } - } else { - out.Not = nil - } - if in.Properties != nil { - in, out := &in.Properties, &out.Properties - *out = make(map[string]apiextensions.JSONSchemaProps, len(*in)) - for key, val := range *in { - newVal := new(apiextensions.JSONSchemaProps) - if err := Convert_v1_JSONSchemaProps_To_apiextensions_JSONSchemaProps(&val, newVal, s); err != nil { - return err - } - (*out)[key] = *newVal - } - } else { - out.Properties = nil - } - if in.AdditionalProperties != nil { - in, out := &in.AdditionalProperties, &out.AdditionalProperties - *out = new(apiextensions.JSONSchemaPropsOrBool) - if err := Convert_v1_JSONSchemaPropsOrBool_To_apiextensions_JSONSchemaPropsOrBool(*in, *out, s); err != nil { - return err - } - } else { - out.AdditionalProperties = nil - } - if in.PatternProperties != nil { - in, out := &in.PatternProperties, &out.PatternProperties - *out = make(map[string]apiextensions.JSONSchemaProps, len(*in)) - for key, val := range *in { - newVal := new(apiextensions.JSONSchemaProps) - if err := Convert_v1_JSONSchemaProps_To_apiextensions_JSONSchemaProps(&val, newVal, s); err != nil { - return err - } - (*out)[key] = *newVal - } - } else { - out.PatternProperties = nil - } - if in.Dependencies != nil { - in, out := &in.Dependencies, &out.Dependencies - *out = make(apiextensions.JSONSchemaDependencies, len(*in)) - for key, val := range *in { - newVal := new(apiextensions.JSONSchemaPropsOrStringArray) - if err := Convert_v1_JSONSchemaPropsOrStringArray_To_apiextensions_JSONSchemaPropsOrStringArray(&val, newVal, s); err != nil { - return err - } - (*out)[key] = *newVal - } - } else { - out.Dependencies = nil - } - if in.AdditionalItems != nil { - in, out := &in.AdditionalItems, &out.AdditionalItems - *out = new(apiextensions.JSONSchemaPropsOrBool) - if err := Convert_v1_JSONSchemaPropsOrBool_To_apiextensions_JSONSchemaPropsOrBool(*in, *out, s); err != nil { - return err - } - } else { - out.AdditionalItems = nil - } - if in.Definitions != nil { - in, out := &in.Definitions, &out.Definitions - *out = make(apiextensions.JSONSchemaDefinitions, len(*in)) - for key, val := range *in { - newVal := new(apiextensions.JSONSchemaProps) - if err := Convert_v1_JSONSchemaProps_To_apiextensions_JSONSchemaProps(&val, newVal, s); err != nil { - return err - } - (*out)[key] = *newVal - } - } else { - out.Definitions = nil - } - out.ExternalDocs = (*apiextensions.ExternalDocumentation)(unsafe.Pointer(in.ExternalDocs)) - if in.Example != nil { - in, out := &in.Example, &out.Example - *out = new(apiextensions.JSON) - if err := Convert_v1_JSON_To_apiextensions_JSON(*in, *out, s); err != nil { - return err - } - } else { - out.Example = nil - } - out.Nullable = in.Nullable - out.XPreserveUnknownFields = (*bool)(unsafe.Pointer(in.XPreserveUnknownFields)) - out.XEmbeddedResource = in.XEmbeddedResource - out.XIntOrString = in.XIntOrString - out.XListMapKeys = *(*[]string)(unsafe.Pointer(&in.XListMapKeys)) - out.XListType = (*string)(unsafe.Pointer(in.XListType)) - out.XMapType = (*string)(unsafe.Pointer(in.XMapType)) - out.XValidations = *(*apiextensions.ValidationRules)(unsafe.Pointer(&in.XValidations)) - return nil -} - -// Convert_v1_JSONSchemaProps_To_apiextensions_JSONSchemaProps is an autogenerated conversion function. -func Convert_v1_JSONSchemaProps_To_apiextensions_JSONSchemaProps(in *JSONSchemaProps, out *apiextensions.JSONSchemaProps, s conversion.Scope) error { - return autoConvert_v1_JSONSchemaProps_To_apiextensions_JSONSchemaProps(in, out, s) -} - -func autoConvert_apiextensions_JSONSchemaProps_To_v1_JSONSchemaProps(in *apiextensions.JSONSchemaProps, out *JSONSchemaProps, s conversion.Scope) error { - out.ID = in.ID - out.Schema = JSONSchemaURL(in.Schema) - out.Ref = (*string)(unsafe.Pointer(in.Ref)) - out.Description = in.Description - out.Type = in.Type - out.Nullable = in.Nullable - out.Format = in.Format - out.Title = in.Title - if in.Default != nil { - in, out := &in.Default, &out.Default - *out = new(JSON) - if err := Convert_apiextensions_JSON_To_v1_JSON(*in, *out, s); err != nil { - return err - } - } else { - out.Default = nil - } - out.Maximum = (*float64)(unsafe.Pointer(in.Maximum)) - out.ExclusiveMaximum = in.ExclusiveMaximum - out.Minimum = (*float64)(unsafe.Pointer(in.Minimum)) - out.ExclusiveMinimum = in.ExclusiveMinimum - out.MaxLength = (*int64)(unsafe.Pointer(in.MaxLength)) - out.MinLength = (*int64)(unsafe.Pointer(in.MinLength)) - out.Pattern = in.Pattern - out.MaxItems = (*int64)(unsafe.Pointer(in.MaxItems)) - out.MinItems = (*int64)(unsafe.Pointer(in.MinItems)) - out.UniqueItems = in.UniqueItems - out.MultipleOf = (*float64)(unsafe.Pointer(in.MultipleOf)) - if in.Enum != nil { - in, out := &in.Enum, &out.Enum - *out = make([]JSON, len(*in)) - for i := range *in { - if err := Convert_apiextensions_JSON_To_v1_JSON(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.Enum = nil - } - out.MaxProperties = (*int64)(unsafe.Pointer(in.MaxProperties)) - out.MinProperties = (*int64)(unsafe.Pointer(in.MinProperties)) - out.Required = *(*[]string)(unsafe.Pointer(&in.Required)) - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = new(JSONSchemaPropsOrArray) - if err := Convert_apiextensions_JSONSchemaPropsOrArray_To_v1_JSONSchemaPropsOrArray(*in, *out, s); err != nil { - return err - } - } else { - out.Items = nil - } - if in.AllOf != nil { - in, out := &in.AllOf, &out.AllOf - *out = make([]JSONSchemaProps, len(*in)) - for i := range *in { - if err := Convert_apiextensions_JSONSchemaProps_To_v1_JSONSchemaProps(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.AllOf = nil - } - if in.OneOf != nil { - in, out := &in.OneOf, &out.OneOf - *out = make([]JSONSchemaProps, len(*in)) - for i := range *in { - if err := Convert_apiextensions_JSONSchemaProps_To_v1_JSONSchemaProps(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.OneOf = nil - } - if in.AnyOf != nil { - in, out := &in.AnyOf, &out.AnyOf - *out = make([]JSONSchemaProps, len(*in)) - for i := range *in { - if err := Convert_apiextensions_JSONSchemaProps_To_v1_JSONSchemaProps(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.AnyOf = nil - } - if in.Not != nil { - in, out := &in.Not, &out.Not - *out = new(JSONSchemaProps) - if err := Convert_apiextensions_JSONSchemaProps_To_v1_JSONSchemaProps(*in, *out, s); err != nil { - return err - } - } else { - out.Not = nil - } - if in.Properties != nil { - in, out := &in.Properties, &out.Properties - *out = make(map[string]JSONSchemaProps, len(*in)) - for key, val := range *in { - newVal := new(JSONSchemaProps) - if err := Convert_apiextensions_JSONSchemaProps_To_v1_JSONSchemaProps(&val, newVal, s); err != nil { - return err - } - (*out)[key] = *newVal - } - } else { - out.Properties = nil - } - if in.AdditionalProperties != nil { - in, out := &in.AdditionalProperties, &out.AdditionalProperties - *out = new(JSONSchemaPropsOrBool) - if err := Convert_apiextensions_JSONSchemaPropsOrBool_To_v1_JSONSchemaPropsOrBool(*in, *out, s); err != nil { - return err - } - } else { - out.AdditionalProperties = nil - } - if in.PatternProperties != nil { - in, out := &in.PatternProperties, &out.PatternProperties - *out = make(map[string]JSONSchemaProps, len(*in)) - for key, val := range *in { - newVal := new(JSONSchemaProps) - if err := Convert_apiextensions_JSONSchemaProps_To_v1_JSONSchemaProps(&val, newVal, s); err != nil { - return err - } - (*out)[key] = *newVal - } - } else { - out.PatternProperties = nil - } - if in.Dependencies != nil { - in, out := &in.Dependencies, &out.Dependencies - *out = make(JSONSchemaDependencies, len(*in)) - for key, val := range *in { - newVal := new(JSONSchemaPropsOrStringArray) - if err := Convert_apiextensions_JSONSchemaPropsOrStringArray_To_v1_JSONSchemaPropsOrStringArray(&val, newVal, s); err != nil { - return err - } - (*out)[key] = *newVal - } - } else { - out.Dependencies = nil - } - if in.AdditionalItems != nil { - in, out := &in.AdditionalItems, &out.AdditionalItems - *out = new(JSONSchemaPropsOrBool) - if err := Convert_apiextensions_JSONSchemaPropsOrBool_To_v1_JSONSchemaPropsOrBool(*in, *out, s); err != nil { - return err - } - } else { - out.AdditionalItems = nil - } - if in.Definitions != nil { - in, out := &in.Definitions, &out.Definitions - *out = make(JSONSchemaDefinitions, len(*in)) - for key, val := range *in { - newVal := new(JSONSchemaProps) - if err := Convert_apiextensions_JSONSchemaProps_To_v1_JSONSchemaProps(&val, newVal, s); err != nil { - return err - } - (*out)[key] = *newVal - } - } else { - out.Definitions = nil - } - out.ExternalDocs = (*ExternalDocumentation)(unsafe.Pointer(in.ExternalDocs)) - if in.Example != nil { - in, out := &in.Example, &out.Example - *out = new(JSON) - if err := Convert_apiextensions_JSON_To_v1_JSON(*in, *out, s); err != nil { - return err - } - } else { - out.Example = nil - } - out.XPreserveUnknownFields = (*bool)(unsafe.Pointer(in.XPreserveUnknownFields)) - out.XEmbeddedResource = in.XEmbeddedResource - out.XIntOrString = in.XIntOrString - out.XListMapKeys = *(*[]string)(unsafe.Pointer(&in.XListMapKeys)) - out.XListType = (*string)(unsafe.Pointer(in.XListType)) - out.XMapType = (*string)(unsafe.Pointer(in.XMapType)) - out.XValidations = *(*ValidationRules)(unsafe.Pointer(&in.XValidations)) - return nil -} - -func autoConvert_v1_JSONSchemaPropsOrArray_To_apiextensions_JSONSchemaPropsOrArray(in *JSONSchemaPropsOrArray, out *apiextensions.JSONSchemaPropsOrArray, s conversion.Scope) error { - if in.Schema != nil { - in, out := &in.Schema, &out.Schema - *out = new(apiextensions.JSONSchemaProps) - if err := Convert_v1_JSONSchemaProps_To_apiextensions_JSONSchemaProps(*in, *out, s); err != nil { - return err - } - } else { - out.Schema = nil - } - if in.JSONSchemas != nil { - in, out := &in.JSONSchemas, &out.JSONSchemas - *out = make([]apiextensions.JSONSchemaProps, len(*in)) - for i := range *in { - if err := Convert_v1_JSONSchemaProps_To_apiextensions_JSONSchemaProps(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.JSONSchemas = nil - } - return nil -} - -// Convert_v1_JSONSchemaPropsOrArray_To_apiextensions_JSONSchemaPropsOrArray is an autogenerated conversion function. -func Convert_v1_JSONSchemaPropsOrArray_To_apiextensions_JSONSchemaPropsOrArray(in *JSONSchemaPropsOrArray, out *apiextensions.JSONSchemaPropsOrArray, s conversion.Scope) error { - return autoConvert_v1_JSONSchemaPropsOrArray_To_apiextensions_JSONSchemaPropsOrArray(in, out, s) -} - -func autoConvert_apiextensions_JSONSchemaPropsOrArray_To_v1_JSONSchemaPropsOrArray(in *apiextensions.JSONSchemaPropsOrArray, out *JSONSchemaPropsOrArray, s conversion.Scope) error { - if in.Schema != nil { - in, out := &in.Schema, &out.Schema - *out = new(JSONSchemaProps) - if err := Convert_apiextensions_JSONSchemaProps_To_v1_JSONSchemaProps(*in, *out, s); err != nil { - return err - } - } else { - out.Schema = nil - } - if in.JSONSchemas != nil { - in, out := &in.JSONSchemas, &out.JSONSchemas - *out = make([]JSONSchemaProps, len(*in)) - for i := range *in { - if err := Convert_apiextensions_JSONSchemaProps_To_v1_JSONSchemaProps(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.JSONSchemas = nil - } - return nil -} - -// Convert_apiextensions_JSONSchemaPropsOrArray_To_v1_JSONSchemaPropsOrArray is an autogenerated conversion function. -func Convert_apiextensions_JSONSchemaPropsOrArray_To_v1_JSONSchemaPropsOrArray(in *apiextensions.JSONSchemaPropsOrArray, out *JSONSchemaPropsOrArray, s conversion.Scope) error { - return autoConvert_apiextensions_JSONSchemaPropsOrArray_To_v1_JSONSchemaPropsOrArray(in, out, s) -} - -func autoConvert_v1_JSONSchemaPropsOrBool_To_apiextensions_JSONSchemaPropsOrBool(in *JSONSchemaPropsOrBool, out *apiextensions.JSONSchemaPropsOrBool, s conversion.Scope) error { - out.Allows = in.Allows - if in.Schema != nil { - in, out := &in.Schema, &out.Schema - *out = new(apiextensions.JSONSchemaProps) - if err := Convert_v1_JSONSchemaProps_To_apiextensions_JSONSchemaProps(*in, *out, s); err != nil { - return err - } - } else { - out.Schema = nil - } - return nil -} - -// Convert_v1_JSONSchemaPropsOrBool_To_apiextensions_JSONSchemaPropsOrBool is an autogenerated conversion function. -func Convert_v1_JSONSchemaPropsOrBool_To_apiextensions_JSONSchemaPropsOrBool(in *JSONSchemaPropsOrBool, out *apiextensions.JSONSchemaPropsOrBool, s conversion.Scope) error { - return autoConvert_v1_JSONSchemaPropsOrBool_To_apiextensions_JSONSchemaPropsOrBool(in, out, s) -} - -func autoConvert_apiextensions_JSONSchemaPropsOrBool_To_v1_JSONSchemaPropsOrBool(in *apiextensions.JSONSchemaPropsOrBool, out *JSONSchemaPropsOrBool, s conversion.Scope) error { - out.Allows = in.Allows - if in.Schema != nil { - in, out := &in.Schema, &out.Schema - *out = new(JSONSchemaProps) - if err := Convert_apiextensions_JSONSchemaProps_To_v1_JSONSchemaProps(*in, *out, s); err != nil { - return err - } - } else { - out.Schema = nil - } - return nil -} - -// Convert_apiextensions_JSONSchemaPropsOrBool_To_v1_JSONSchemaPropsOrBool is an autogenerated conversion function. -func Convert_apiextensions_JSONSchemaPropsOrBool_To_v1_JSONSchemaPropsOrBool(in *apiextensions.JSONSchemaPropsOrBool, out *JSONSchemaPropsOrBool, s conversion.Scope) error { - return autoConvert_apiextensions_JSONSchemaPropsOrBool_To_v1_JSONSchemaPropsOrBool(in, out, s) -} - -func autoConvert_v1_JSONSchemaPropsOrStringArray_To_apiextensions_JSONSchemaPropsOrStringArray(in *JSONSchemaPropsOrStringArray, out *apiextensions.JSONSchemaPropsOrStringArray, s conversion.Scope) error { - if in.Schema != nil { - in, out := &in.Schema, &out.Schema - *out = new(apiextensions.JSONSchemaProps) - if err := Convert_v1_JSONSchemaProps_To_apiextensions_JSONSchemaProps(*in, *out, s); err != nil { - return err - } - } else { - out.Schema = nil - } - out.Property = *(*[]string)(unsafe.Pointer(&in.Property)) - return nil -} - -// Convert_v1_JSONSchemaPropsOrStringArray_To_apiextensions_JSONSchemaPropsOrStringArray is an autogenerated conversion function. -func Convert_v1_JSONSchemaPropsOrStringArray_To_apiextensions_JSONSchemaPropsOrStringArray(in *JSONSchemaPropsOrStringArray, out *apiextensions.JSONSchemaPropsOrStringArray, s conversion.Scope) error { - return autoConvert_v1_JSONSchemaPropsOrStringArray_To_apiextensions_JSONSchemaPropsOrStringArray(in, out, s) -} - -func autoConvert_apiextensions_JSONSchemaPropsOrStringArray_To_v1_JSONSchemaPropsOrStringArray(in *apiextensions.JSONSchemaPropsOrStringArray, out *JSONSchemaPropsOrStringArray, s conversion.Scope) error { - if in.Schema != nil { - in, out := &in.Schema, &out.Schema - *out = new(JSONSchemaProps) - if err := Convert_apiextensions_JSONSchemaProps_To_v1_JSONSchemaProps(*in, *out, s); err != nil { - return err - } - } else { - out.Schema = nil - } - out.Property = *(*[]string)(unsafe.Pointer(&in.Property)) - return nil -} - -// Convert_apiextensions_JSONSchemaPropsOrStringArray_To_v1_JSONSchemaPropsOrStringArray is an autogenerated conversion function. -func Convert_apiextensions_JSONSchemaPropsOrStringArray_To_v1_JSONSchemaPropsOrStringArray(in *apiextensions.JSONSchemaPropsOrStringArray, out *JSONSchemaPropsOrStringArray, s conversion.Scope) error { - return autoConvert_apiextensions_JSONSchemaPropsOrStringArray_To_v1_JSONSchemaPropsOrStringArray(in, out, s) -} - -func autoConvert_v1_ServiceReference_To_apiextensions_ServiceReference(in *ServiceReference, out *apiextensions.ServiceReference, s conversion.Scope) error { - out.Namespace = in.Namespace - out.Name = in.Name - out.Path = (*string)(unsafe.Pointer(in.Path)) - if err := metav1.Convert_Pointer_int32_To_int32(&in.Port, &out.Port, s); err != nil { - return err - } - return nil -} - -// Convert_v1_ServiceReference_To_apiextensions_ServiceReference is an autogenerated conversion function. -func Convert_v1_ServiceReference_To_apiextensions_ServiceReference(in *ServiceReference, out *apiextensions.ServiceReference, s conversion.Scope) error { - return autoConvert_v1_ServiceReference_To_apiextensions_ServiceReference(in, out, s) -} - -func autoConvert_apiextensions_ServiceReference_To_v1_ServiceReference(in *apiextensions.ServiceReference, out *ServiceReference, s conversion.Scope) error { - out.Namespace = in.Namespace - out.Name = in.Name - out.Path = (*string)(unsafe.Pointer(in.Path)) - if err := metav1.Convert_int32_To_Pointer_int32(&in.Port, &out.Port, s); err != nil { - return err - } - return nil -} - -// Convert_apiextensions_ServiceReference_To_v1_ServiceReference is an autogenerated conversion function. -func Convert_apiextensions_ServiceReference_To_v1_ServiceReference(in *apiextensions.ServiceReference, out *ServiceReference, s conversion.Scope) error { - return autoConvert_apiextensions_ServiceReference_To_v1_ServiceReference(in, out, s) -} - -func autoConvert_v1_ValidationRule_To_apiextensions_ValidationRule(in *ValidationRule, out *apiextensions.ValidationRule, s conversion.Scope) error { - out.Rule = in.Rule - out.Message = in.Message - return nil -} - -// Convert_v1_ValidationRule_To_apiextensions_ValidationRule is an autogenerated conversion function. -func Convert_v1_ValidationRule_To_apiextensions_ValidationRule(in *ValidationRule, out *apiextensions.ValidationRule, s conversion.Scope) error { - return autoConvert_v1_ValidationRule_To_apiextensions_ValidationRule(in, out, s) -} - -func autoConvert_apiextensions_ValidationRule_To_v1_ValidationRule(in *apiextensions.ValidationRule, out *ValidationRule, s conversion.Scope) error { - out.Rule = in.Rule - out.Message = in.Message - return nil -} - -// Convert_apiextensions_ValidationRule_To_v1_ValidationRule is an autogenerated conversion function. -func Convert_apiextensions_ValidationRule_To_v1_ValidationRule(in *apiextensions.ValidationRule, out *ValidationRule, s conversion.Scope) error { - return autoConvert_apiextensions_ValidationRule_To_v1_ValidationRule(in, out, s) -} - -func autoConvert_v1_WebhookClientConfig_To_apiextensions_WebhookClientConfig(in *WebhookClientConfig, out *apiextensions.WebhookClientConfig, s conversion.Scope) error { - out.URL = (*string)(unsafe.Pointer(in.URL)) - if in.Service != nil { - in, out := &in.Service, &out.Service - *out = new(apiextensions.ServiceReference) - if err := Convert_v1_ServiceReference_To_apiextensions_ServiceReference(*in, *out, s); err != nil { - return err - } - } else { - out.Service = nil - } - out.CABundle = *(*[]byte)(unsafe.Pointer(&in.CABundle)) - return nil -} - -// Convert_v1_WebhookClientConfig_To_apiextensions_WebhookClientConfig is an autogenerated conversion function. -func Convert_v1_WebhookClientConfig_To_apiextensions_WebhookClientConfig(in *WebhookClientConfig, out *apiextensions.WebhookClientConfig, s conversion.Scope) error { - return autoConvert_v1_WebhookClientConfig_To_apiextensions_WebhookClientConfig(in, out, s) -} - -func autoConvert_apiextensions_WebhookClientConfig_To_v1_WebhookClientConfig(in *apiextensions.WebhookClientConfig, out *WebhookClientConfig, s conversion.Scope) error { - out.URL = (*string)(unsafe.Pointer(in.URL)) - if in.Service != nil { - in, out := &in.Service, &out.Service - *out = new(ServiceReference) - if err := Convert_apiextensions_ServiceReference_To_v1_ServiceReference(*in, *out, s); err != nil { - return err - } - } else { - out.Service = nil - } - out.CABundle = *(*[]byte)(unsafe.Pointer(&in.CABundle)) - return nil -} - -// Convert_apiextensions_WebhookClientConfig_To_v1_WebhookClientConfig is an autogenerated conversion function. -func Convert_apiextensions_WebhookClientConfig_To_v1_WebhookClientConfig(in *apiextensions.WebhookClientConfig, out *WebhookClientConfig, s conversion.Scope) error { - return autoConvert_apiextensions_WebhookClientConfig_To_v1_WebhookClientConfig(in, out, s) -} diff --git a/src/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/zz_generated.deepcopy.go b/src/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/zz_generated.deepcopy.go deleted file mode 100644 index e27daa9a3..000000000 --- a/src/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/zz_generated.deepcopy.go +++ /dev/null @@ -1,705 +0,0 @@ -//go:build !ignore_autogenerated -// +build !ignore_autogenerated - -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by deepcopy-gen. DO NOT EDIT. - -package v1 - -import ( - runtime "k8s.io/apimachinery/pkg/runtime" -) - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ConversionRequest) DeepCopyInto(out *ConversionRequest) { - *out = *in - if in.Objects != nil { - in, out := &in.Objects, &out.Objects - *out = make([]runtime.RawExtension, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConversionRequest. -func (in *ConversionRequest) DeepCopy() *ConversionRequest { - if in == nil { - return nil - } - out := new(ConversionRequest) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ConversionResponse) DeepCopyInto(out *ConversionResponse) { - *out = *in - if in.ConvertedObjects != nil { - in, out := &in.ConvertedObjects, &out.ConvertedObjects - *out = make([]runtime.RawExtension, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - in.Result.DeepCopyInto(&out.Result) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConversionResponse. -func (in *ConversionResponse) DeepCopy() *ConversionResponse { - if in == nil { - return nil - } - out := new(ConversionResponse) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ConversionReview) DeepCopyInto(out *ConversionReview) { - *out = *in - out.TypeMeta = in.TypeMeta - if in.Request != nil { - in, out := &in.Request, &out.Request - *out = new(ConversionRequest) - (*in).DeepCopyInto(*out) - } - if in.Response != nil { - in, out := &in.Response, &out.Response - *out = new(ConversionResponse) - (*in).DeepCopyInto(*out) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConversionReview. -func (in *ConversionReview) DeepCopy() *ConversionReview { - if in == nil { - return nil - } - out := new(ConversionReview) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *ConversionReview) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *CustomResourceColumnDefinition) DeepCopyInto(out *CustomResourceColumnDefinition) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomResourceColumnDefinition. -func (in *CustomResourceColumnDefinition) DeepCopy() *CustomResourceColumnDefinition { - if in == nil { - return nil - } - out := new(CustomResourceColumnDefinition) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *CustomResourceConversion) DeepCopyInto(out *CustomResourceConversion) { - *out = *in - if in.Webhook != nil { - in, out := &in.Webhook, &out.Webhook - *out = new(WebhookConversion) - (*in).DeepCopyInto(*out) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomResourceConversion. -func (in *CustomResourceConversion) DeepCopy() *CustomResourceConversion { - if in == nil { - return nil - } - out := new(CustomResourceConversion) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *CustomResourceDefinition) DeepCopyInto(out *CustomResourceDefinition) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - in.Status.DeepCopyInto(&out.Status) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomResourceDefinition. -func (in *CustomResourceDefinition) DeepCopy() *CustomResourceDefinition { - if in == nil { - return nil - } - out := new(CustomResourceDefinition) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *CustomResourceDefinition) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *CustomResourceDefinitionCondition) DeepCopyInto(out *CustomResourceDefinitionCondition) { - *out = *in - in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomResourceDefinitionCondition. -func (in *CustomResourceDefinitionCondition) DeepCopy() *CustomResourceDefinitionCondition { - if in == nil { - return nil - } - out := new(CustomResourceDefinitionCondition) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *CustomResourceDefinitionList) DeepCopyInto(out *CustomResourceDefinitionList) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]CustomResourceDefinition, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomResourceDefinitionList. -func (in *CustomResourceDefinitionList) DeepCopy() *CustomResourceDefinitionList { - if in == nil { - return nil - } - out := new(CustomResourceDefinitionList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *CustomResourceDefinitionList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *CustomResourceDefinitionNames) DeepCopyInto(out *CustomResourceDefinitionNames) { - *out = *in - if in.ShortNames != nil { - in, out := &in.ShortNames, &out.ShortNames - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.Categories != nil { - in, out := &in.Categories, &out.Categories - *out = make([]string, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomResourceDefinitionNames. -func (in *CustomResourceDefinitionNames) DeepCopy() *CustomResourceDefinitionNames { - if in == nil { - return nil - } - out := new(CustomResourceDefinitionNames) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *CustomResourceDefinitionSpec) DeepCopyInto(out *CustomResourceDefinitionSpec) { - *out = *in - in.Names.DeepCopyInto(&out.Names) - if in.Versions != nil { - in, out := &in.Versions, &out.Versions - *out = make([]CustomResourceDefinitionVersion, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.Conversion != nil { - in, out := &in.Conversion, &out.Conversion - *out = new(CustomResourceConversion) - (*in).DeepCopyInto(*out) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomResourceDefinitionSpec. -func (in *CustomResourceDefinitionSpec) DeepCopy() *CustomResourceDefinitionSpec { - if in == nil { - return nil - } - out := new(CustomResourceDefinitionSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *CustomResourceDefinitionStatus) DeepCopyInto(out *CustomResourceDefinitionStatus) { - *out = *in - if in.Conditions != nil { - in, out := &in.Conditions, &out.Conditions - *out = make([]CustomResourceDefinitionCondition, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - in.AcceptedNames.DeepCopyInto(&out.AcceptedNames) - if in.StoredVersions != nil { - in, out := &in.StoredVersions, &out.StoredVersions - *out = make([]string, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomResourceDefinitionStatus. -func (in *CustomResourceDefinitionStatus) DeepCopy() *CustomResourceDefinitionStatus { - if in == nil { - return nil - } - out := new(CustomResourceDefinitionStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *CustomResourceDefinitionVersion) DeepCopyInto(out *CustomResourceDefinitionVersion) { - *out = *in - if in.DeprecationWarning != nil { - in, out := &in.DeprecationWarning, &out.DeprecationWarning - *out = new(string) - **out = **in - } - if in.Schema != nil { - in, out := &in.Schema, &out.Schema - *out = new(CustomResourceValidation) - (*in).DeepCopyInto(*out) - } - if in.Subresources != nil { - in, out := &in.Subresources, &out.Subresources - *out = new(CustomResourceSubresources) - (*in).DeepCopyInto(*out) - } - if in.AdditionalPrinterColumns != nil { - in, out := &in.AdditionalPrinterColumns, &out.AdditionalPrinterColumns - *out = make([]CustomResourceColumnDefinition, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomResourceDefinitionVersion. -func (in *CustomResourceDefinitionVersion) DeepCopy() *CustomResourceDefinitionVersion { - if in == nil { - return nil - } - out := new(CustomResourceDefinitionVersion) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *CustomResourceSubresourceScale) DeepCopyInto(out *CustomResourceSubresourceScale) { - *out = *in - if in.LabelSelectorPath != nil { - in, out := &in.LabelSelectorPath, &out.LabelSelectorPath - *out = new(string) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomResourceSubresourceScale. -func (in *CustomResourceSubresourceScale) DeepCopy() *CustomResourceSubresourceScale { - if in == nil { - return nil - } - out := new(CustomResourceSubresourceScale) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *CustomResourceSubresourceStatus) DeepCopyInto(out *CustomResourceSubresourceStatus) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomResourceSubresourceStatus. -func (in *CustomResourceSubresourceStatus) DeepCopy() *CustomResourceSubresourceStatus { - if in == nil { - return nil - } - out := new(CustomResourceSubresourceStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *CustomResourceSubresources) DeepCopyInto(out *CustomResourceSubresources) { - *out = *in - if in.Status != nil { - in, out := &in.Status, &out.Status - *out = new(CustomResourceSubresourceStatus) - **out = **in - } - if in.Scale != nil { - in, out := &in.Scale, &out.Scale - *out = new(CustomResourceSubresourceScale) - (*in).DeepCopyInto(*out) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomResourceSubresources. -func (in *CustomResourceSubresources) DeepCopy() *CustomResourceSubresources { - if in == nil { - return nil - } - out := new(CustomResourceSubresources) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *CustomResourceValidation) DeepCopyInto(out *CustomResourceValidation) { - *out = *in - if in.OpenAPIV3Schema != nil { - in, out := &in.OpenAPIV3Schema, &out.OpenAPIV3Schema - *out = (*in).DeepCopy() - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomResourceValidation. -func (in *CustomResourceValidation) DeepCopy() *CustomResourceValidation { - if in == nil { - return nil - } - out := new(CustomResourceValidation) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ExternalDocumentation) DeepCopyInto(out *ExternalDocumentation) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExternalDocumentation. -func (in *ExternalDocumentation) DeepCopy() *ExternalDocumentation { - if in == nil { - return nil - } - out := new(ExternalDocumentation) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *JSON) DeepCopyInto(out *JSON) { - *out = *in - if in.Raw != nil { - in, out := &in.Raw, &out.Raw - *out = make([]byte, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JSON. -func (in *JSON) DeepCopy() *JSON { - if in == nil { - return nil - } - out := new(JSON) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in JSONSchemaDefinitions) DeepCopyInto(out *JSONSchemaDefinitions) { - { - in := &in - *out = make(JSONSchemaDefinitions, len(*in)) - for key, val := range *in { - (*out)[key] = *val.DeepCopy() - } - return - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JSONSchemaDefinitions. -func (in JSONSchemaDefinitions) DeepCopy() JSONSchemaDefinitions { - if in == nil { - return nil - } - out := new(JSONSchemaDefinitions) - in.DeepCopyInto(out) - return *out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in JSONSchemaDependencies) DeepCopyInto(out *JSONSchemaDependencies) { - { - in := &in - *out = make(JSONSchemaDependencies, len(*in)) - for key, val := range *in { - (*out)[key] = *val.DeepCopy() - } - return - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JSONSchemaDependencies. -func (in JSONSchemaDependencies) DeepCopy() JSONSchemaDependencies { - if in == nil { - return nil - } - out := new(JSONSchemaDependencies) - in.DeepCopyInto(out) - return *out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *JSONSchemaProps) DeepCopyInto(out *JSONSchemaProps) { - clone := in.DeepCopy() - *out = *clone - return -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *JSONSchemaPropsOrArray) DeepCopyInto(out *JSONSchemaPropsOrArray) { - *out = *in - if in.Schema != nil { - in, out := &in.Schema, &out.Schema - *out = (*in).DeepCopy() - } - if in.JSONSchemas != nil { - in, out := &in.JSONSchemas, &out.JSONSchemas - *out = make([]JSONSchemaProps, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JSONSchemaPropsOrArray. -func (in *JSONSchemaPropsOrArray) DeepCopy() *JSONSchemaPropsOrArray { - if in == nil { - return nil - } - out := new(JSONSchemaPropsOrArray) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *JSONSchemaPropsOrBool) DeepCopyInto(out *JSONSchemaPropsOrBool) { - *out = *in - if in.Schema != nil { - in, out := &in.Schema, &out.Schema - *out = (*in).DeepCopy() - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JSONSchemaPropsOrBool. -func (in *JSONSchemaPropsOrBool) DeepCopy() *JSONSchemaPropsOrBool { - if in == nil { - return nil - } - out := new(JSONSchemaPropsOrBool) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *JSONSchemaPropsOrStringArray) DeepCopyInto(out *JSONSchemaPropsOrStringArray) { - *out = *in - if in.Schema != nil { - in, out := &in.Schema, &out.Schema - *out = (*in).DeepCopy() - } - if in.Property != nil { - in, out := &in.Property, &out.Property - *out = make([]string, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JSONSchemaPropsOrStringArray. -func (in *JSONSchemaPropsOrStringArray) DeepCopy() *JSONSchemaPropsOrStringArray { - if in == nil { - return nil - } - out := new(JSONSchemaPropsOrStringArray) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ServiceReference) DeepCopyInto(out *ServiceReference) { - *out = *in - if in.Path != nil { - in, out := &in.Path, &out.Path - *out = new(string) - **out = **in - } - if in.Port != nil { - in, out := &in.Port, &out.Port - *out = new(int32) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceReference. -func (in *ServiceReference) DeepCopy() *ServiceReference { - if in == nil { - return nil - } - out := new(ServiceReference) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ValidationRule) DeepCopyInto(out *ValidationRule) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ValidationRule. -func (in *ValidationRule) DeepCopy() *ValidationRule { - if in == nil { - return nil - } - out := new(ValidationRule) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in ValidationRules) DeepCopyInto(out *ValidationRules) { - { - in := &in - *out = make(ValidationRules, len(*in)) - copy(*out, *in) - return - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ValidationRules. -func (in ValidationRules) DeepCopy() ValidationRules { - if in == nil { - return nil - } - out := new(ValidationRules) - in.DeepCopyInto(out) - return *out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *WebhookClientConfig) DeepCopyInto(out *WebhookClientConfig) { - *out = *in - if in.URL != nil { - in, out := &in.URL, &out.URL - *out = new(string) - **out = **in - } - if in.Service != nil { - in, out := &in.Service, &out.Service - *out = new(ServiceReference) - (*in).DeepCopyInto(*out) - } - if in.CABundle != nil { - in, out := &in.CABundle, &out.CABundle - *out = make([]byte, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WebhookClientConfig. -func (in *WebhookClientConfig) DeepCopy() *WebhookClientConfig { - if in == nil { - return nil - } - out := new(WebhookClientConfig) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *WebhookConversion) DeepCopyInto(out *WebhookConversion) { - *out = *in - if in.ClientConfig != nil { - in, out := &in.ClientConfig, &out.ClientConfig - *out = new(WebhookClientConfig) - (*in).DeepCopyInto(*out) - } - if in.ConversionReviewVersions != nil { - in, out := &in.ConversionReviewVersions, &out.ConversionReviewVersions - *out = make([]string, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WebhookConversion. -func (in *WebhookConversion) DeepCopy() *WebhookConversion { - if in == nil { - return nil - } - out := new(WebhookConversion) - in.DeepCopyInto(out) - return out -} diff --git a/src/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/zz_generated.defaults.go b/src/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/zz_generated.defaults.go deleted file mode 100644 index 2bc705780..000000000 --- a/src/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/zz_generated.defaults.go +++ /dev/null @@ -1,58 +0,0 @@ -//go:build !ignore_autogenerated -// +build !ignore_autogenerated - -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by defaulter-gen. DO NOT EDIT. - -package v1 - -import ( - runtime "k8s.io/apimachinery/pkg/runtime" -) - -// RegisterDefaults adds defaulters functions to the given scheme. -// Public to allow building arbitrary schemes. -// All generated defaulters are covering - they call all nested defaulters. -func RegisterDefaults(scheme *runtime.Scheme) error { - scheme.AddTypeDefaultingFunc(&CustomResourceDefinition{}, func(obj interface{}) { SetObjectDefaults_CustomResourceDefinition(obj.(*CustomResourceDefinition)) }) - scheme.AddTypeDefaultingFunc(&CustomResourceDefinitionList{}, func(obj interface{}) { - SetObjectDefaults_CustomResourceDefinitionList(obj.(*CustomResourceDefinitionList)) - }) - return nil -} - -func SetObjectDefaults_CustomResourceDefinition(in *CustomResourceDefinition) { - SetDefaults_CustomResourceDefinition(in) - SetDefaults_CustomResourceDefinitionSpec(&in.Spec) - if in.Spec.Conversion != nil { - if in.Spec.Conversion.Webhook != nil { - if in.Spec.Conversion.Webhook.ClientConfig != nil { - if in.Spec.Conversion.Webhook.ClientConfig.Service != nil { - SetDefaults_ServiceReference(in.Spec.Conversion.Webhook.ClientConfig.Service) - } - } - } - } -} - -func SetObjectDefaults_CustomResourceDefinitionList(in *CustomResourceDefinitionList) { - for i := range in.Items { - a := &in.Items[i] - SetObjectDefaults_CustomResourceDefinition(a) - } -} diff --git a/src/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/.import-restrictions b/src/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/.import-restrictions deleted file mode 100644 index 7408dd121..000000000 --- a/src/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/.import-restrictions +++ /dev/null @@ -1,5 +0,0 @@ -inverseRules: - # Allow use of this package in all k8s.io packages. - - selectorRegexp: k8s[.]io - allowedPrefixes: - - '' diff --git a/src/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/conversion.go b/src/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/conversion.go deleted file mode 100644 index eed3fde63..000000000 --- a/src/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/conversion.go +++ /dev/null @@ -1,70 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1beta1 - -import ( - "bytes" - - "k8s.io/apimachinery/pkg/conversion" - "k8s.io/apimachinery/pkg/util/json" - - "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions" -) - -func Convert_apiextensions_JSONSchemaProps_To_v1beta1_JSONSchemaProps(in *apiextensions.JSONSchemaProps, out *JSONSchemaProps, s conversion.Scope) error { - if err := autoConvert_apiextensions_JSONSchemaProps_To_v1beta1_JSONSchemaProps(in, out, s); err != nil { - return err - } - if in.Default != nil && *(in.Default) == nil { - out.Default = nil - } - if in.Example != nil && *(in.Example) == nil { - out.Example = nil - } - return nil -} - -var nullLiteral = []byte(`null`) - -func Convert_apiextensions_JSON_To_v1beta1_JSON(in *apiextensions.JSON, out *JSON, s conversion.Scope) error { - raw, err := json.Marshal(*in) - if err != nil { - return err - } - if len(raw) == 0 || bytes.Equal(raw, nullLiteral) { - // match JSON#UnmarshalJSON treatment of literal nulls - out.Raw = nil - } else { - out.Raw = raw - } - return nil -} - -func Convert_v1beta1_JSON_To_apiextensions_JSON(in *JSON, out *apiextensions.JSON, s conversion.Scope) error { - if in != nil { - var i interface{} - if len(in.Raw) > 0 && !bytes.Equal(in.Raw, nullLiteral) { - if err := json.Unmarshal(in.Raw, &i); err != nil { - return err - } - } - *out = i - } else { - out = nil - } - return nil -} diff --git a/src/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/deepcopy.go b/src/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/deepcopy.go deleted file mode 100644 index 9f64585da..000000000 --- a/src/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/deepcopy.go +++ /dev/null @@ -1,276 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1beta1 - -// TODO: Update this after a tag is created for interface fields in DeepCopy -func (in *JSONSchemaProps) DeepCopy() *JSONSchemaProps { - if in == nil { - return nil - } - out := new(JSONSchemaProps) - *out = *in - - if in.Ref != nil { - in, out := &in.Ref, &out.Ref - if *in == nil { - *out = nil - } else { - *out = new(string) - **out = **in - } - } - - if in.Maximum != nil { - in, out := &in.Maximum, &out.Maximum - if *in == nil { - *out = nil - } else { - *out = new(float64) - **out = **in - } - } - - if in.Minimum != nil { - in, out := &in.Minimum, &out.Minimum - if *in == nil { - *out = nil - } else { - *out = new(float64) - **out = **in - } - } - - if in.MaxLength != nil { - in, out := &in.MaxLength, &out.MaxLength - if *in == nil { - *out = nil - } else { - *out = new(int64) - **out = **in - } - } - - if in.MinLength != nil { - in, out := &in.MinLength, &out.MinLength - if *in == nil { - *out = nil - } else { - *out = new(int64) - **out = **in - } - } - if in.MaxItems != nil { - in, out := &in.MaxItems, &out.MaxItems - if *in == nil { - *out = nil - } else { - *out = new(int64) - **out = **in - } - } - - if in.MinItems != nil { - in, out := &in.MinItems, &out.MinItems - if *in == nil { - *out = nil - } else { - *out = new(int64) - **out = **in - } - } - - if in.MultipleOf != nil { - in, out := &in.MultipleOf, &out.MultipleOf - if *in == nil { - *out = nil - } else { - *out = new(float64) - **out = **in - } - } - - if in.MaxProperties != nil { - in, out := &in.MaxProperties, &out.MaxProperties - if *in == nil { - *out = nil - } else { - *out = new(int64) - **out = **in - } - } - - if in.MinProperties != nil { - in, out := &in.MinProperties, &out.MinProperties - if *in == nil { - *out = nil - } else { - *out = new(int64) - **out = **in - } - } - - if in.Required != nil { - in, out := &in.Required, &out.Required - *out = make([]string, len(*in)) - copy(*out, *in) - } - - if in.Items != nil { - in, out := &in.Items, &out.Items - if *in == nil { - *out = nil - } else { - *out = new(JSONSchemaPropsOrArray) - (*in).DeepCopyInto(*out) - } - } - - if in.AllOf != nil { - in, out := &in.AllOf, &out.AllOf - *out = make([]JSONSchemaProps, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - - if in.OneOf != nil { - in, out := &in.OneOf, &out.OneOf - *out = make([]JSONSchemaProps, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.AnyOf != nil { - in, out := &in.AnyOf, &out.AnyOf - *out = make([]JSONSchemaProps, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - - if in.Not != nil { - in, out := &in.Not, &out.Not - if *in == nil { - *out = nil - } else { - *out = new(JSONSchemaProps) - (*in).DeepCopyInto(*out) - } - } - - if in.Properties != nil { - in, out := &in.Properties, &out.Properties - *out = make(map[string]JSONSchemaProps, len(*in)) - for key, val := range *in { - (*out)[key] = *val.DeepCopy() - } - } - - if in.AdditionalProperties != nil { - in, out := &in.AdditionalProperties, &out.AdditionalProperties - if *in == nil { - *out = nil - } else { - *out = new(JSONSchemaPropsOrBool) - (*in).DeepCopyInto(*out) - } - } - - if in.PatternProperties != nil { - in, out := &in.PatternProperties, &out.PatternProperties - *out = make(map[string]JSONSchemaProps, len(*in)) - for key, val := range *in { - (*out)[key] = *val.DeepCopy() - } - } - - if in.Dependencies != nil { - in, out := &in.Dependencies, &out.Dependencies - *out = make(JSONSchemaDependencies, len(*in)) - for key, val := range *in { - (*out)[key] = *val.DeepCopy() - } - } - - if in.AdditionalItems != nil { - in, out := &in.AdditionalItems, &out.AdditionalItems - if *in == nil { - *out = nil - } else { - *out = new(JSONSchemaPropsOrBool) - (*in).DeepCopyInto(*out) - } - } - - if in.Definitions != nil { - in, out := &in.Definitions, &out.Definitions - *out = make(JSONSchemaDefinitions, len(*in)) - for key, val := range *in { - (*out)[key] = *val.DeepCopy() - } - } - - if in.ExternalDocs != nil { - in, out := &in.ExternalDocs, &out.ExternalDocs - if *in == nil { - *out = nil - } else { - *out = new(ExternalDocumentation) - (*in).DeepCopyInto(*out) - } - } - - if in.XPreserveUnknownFields != nil { - in, out := &in.XPreserveUnknownFields, &out.XPreserveUnknownFields - if *in == nil { - *out = nil - } else { - *out = new(bool) - **out = **in - } - } - - if in.XListMapKeys != nil { - in, out := &in.XListMapKeys, &out.XListMapKeys - *out = make([]string, len(*in)) - copy(*out, *in) - } - - if in.XListType != nil { - in, out := &in.XListType, &out.XListType - if *in == nil { - *out = nil - } else { - *out = new(string) - **out = **in - } - } - - if in.XMapType != nil { - in, out := &in.XMapType, &out.XMapType - *out = new(string) - **out = **in - } - - if in.XValidations != nil { - in, out := &in.XValidations, &out.XValidations - *out = make([]ValidationRule, len(*in)) - copy(*out, *in) - } - - return out -} diff --git a/src/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/defaults.go b/src/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/defaults.go deleted file mode 100644 index 1a9c2238e..000000000 --- a/src/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/defaults.go +++ /dev/null @@ -1,82 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1beta1 - -import ( - "strings" - - "k8s.io/apimachinery/pkg/runtime" - utilpointer "k8s.io/utils/pointer" -) - -func addDefaultingFuncs(scheme *runtime.Scheme) error { - return RegisterDefaults(scheme) -} - -func SetDefaults_CustomResourceDefinition(obj *CustomResourceDefinition) { - SetDefaults_CustomResourceDefinitionSpec(&obj.Spec) - if len(obj.Status.StoredVersions) == 0 { - for _, v := range obj.Spec.Versions { - if v.Storage { - obj.Status.StoredVersions = append(obj.Status.StoredVersions, v.Name) - break - } - } - } -} - -func SetDefaults_CustomResourceDefinitionSpec(obj *CustomResourceDefinitionSpec) { - if len(obj.Scope) == 0 { - obj.Scope = NamespaceScoped - } - if len(obj.Names.Singular) == 0 { - obj.Names.Singular = strings.ToLower(obj.Names.Kind) - } - if len(obj.Names.ListKind) == 0 && len(obj.Names.Kind) > 0 { - obj.Names.ListKind = obj.Names.Kind + "List" - } - // If there is no list of versions, create on using deprecated Version field. - if len(obj.Versions) == 0 && len(obj.Version) != 0 { - obj.Versions = []CustomResourceDefinitionVersion{{ - Name: obj.Version, - Storage: true, - Served: true, - }} - } - // For backward compatibility set the version field to the first item in versions list. - if len(obj.Version) == 0 && len(obj.Versions) != 0 { - obj.Version = obj.Versions[0].Name - } - if obj.Conversion == nil { - obj.Conversion = &CustomResourceConversion{ - Strategy: NoneConverter, - } - } - if obj.Conversion.Strategy == WebhookConverter && len(obj.Conversion.ConversionReviewVersions) == 0 { - obj.Conversion.ConversionReviewVersions = []string{SchemeGroupVersion.Version} - } - if obj.PreserveUnknownFields == nil { - obj.PreserveUnknownFields = utilpointer.BoolPtr(true) - } -} - -// SetDefaults_ServiceReference sets defaults for Webhook's ServiceReference -func SetDefaults_ServiceReference(obj *ServiceReference) { - if obj.Port == nil { - obj.Port = utilpointer.Int32Ptr(443) - } -} diff --git a/src/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/doc.go b/src/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/doc.go deleted file mode 100644 index 7a92cb8b0..000000000 --- a/src/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/doc.go +++ /dev/null @@ -1,26 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// +k8s:deepcopy-gen=package -// +k8s:protobuf-gen=package -// +k8s:conversion-gen=k8s.io/apiextensions-apiserver/pkg/apis/apiextensions -// +k8s:defaulter-gen=TypeMeta -// +k8s:openapi-gen=true -// +k8s:prerelease-lifecycle-gen=true -// +groupName=apiextensions.k8s.io - -// Package v1beta1 is the v1beta1 version of the API. -package v1beta1 // import "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" diff --git a/src/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/generated.pb.go b/src/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/generated.pb.go deleted file mode 100644 index 6ab500667..000000000 --- a/src/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/generated.pb.go +++ /dev/null @@ -1,9297 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: k8s.io/kubernetes/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/generated.proto - -package v1beta1 - -import ( - encoding_binary "encoding/binary" - fmt "fmt" - - io "io" - - proto "github.com/gogo/protobuf/proto" - github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" - runtime "k8s.io/apimachinery/pkg/runtime" - - math "math" - math_bits "math/bits" - reflect "reflect" - strings "strings" - - k8s_io_apimachinery_pkg_types "k8s.io/apimachinery/pkg/types" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package - -func (m *ConversionRequest) Reset() { *m = ConversionRequest{} } -func (*ConversionRequest) ProtoMessage() {} -func (*ConversionRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_98a4cc6918394e53, []int{0} -} -func (m *ConversionRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ConversionRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *ConversionRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_ConversionRequest.Merge(m, src) -} -func (m *ConversionRequest) XXX_Size() int { - return m.Size() -} -func (m *ConversionRequest) XXX_DiscardUnknown() { - xxx_messageInfo_ConversionRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_ConversionRequest proto.InternalMessageInfo - -func (m *ConversionResponse) Reset() { *m = ConversionResponse{} } -func (*ConversionResponse) ProtoMessage() {} -func (*ConversionResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_98a4cc6918394e53, []int{1} -} -func (m *ConversionResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ConversionResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *ConversionResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_ConversionResponse.Merge(m, src) -} -func (m *ConversionResponse) XXX_Size() int { - return m.Size() -} -func (m *ConversionResponse) XXX_DiscardUnknown() { - xxx_messageInfo_ConversionResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_ConversionResponse proto.InternalMessageInfo - -func (m *ConversionReview) Reset() { *m = ConversionReview{} } -func (*ConversionReview) ProtoMessage() {} -func (*ConversionReview) Descriptor() ([]byte, []int) { - return fileDescriptor_98a4cc6918394e53, []int{2} -} -func (m *ConversionReview) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ConversionReview) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *ConversionReview) XXX_Merge(src proto.Message) { - xxx_messageInfo_ConversionReview.Merge(m, src) -} -func (m *ConversionReview) XXX_Size() int { - return m.Size() -} -func (m *ConversionReview) XXX_DiscardUnknown() { - xxx_messageInfo_ConversionReview.DiscardUnknown(m) -} - -var xxx_messageInfo_ConversionReview proto.InternalMessageInfo - -func (m *CustomResourceColumnDefinition) Reset() { *m = CustomResourceColumnDefinition{} } -func (*CustomResourceColumnDefinition) ProtoMessage() {} -func (*CustomResourceColumnDefinition) Descriptor() ([]byte, []int) { - return fileDescriptor_98a4cc6918394e53, []int{3} -} -func (m *CustomResourceColumnDefinition) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *CustomResourceColumnDefinition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *CustomResourceColumnDefinition) XXX_Merge(src proto.Message) { - xxx_messageInfo_CustomResourceColumnDefinition.Merge(m, src) -} -func (m *CustomResourceColumnDefinition) XXX_Size() int { - return m.Size() -} -func (m *CustomResourceColumnDefinition) XXX_DiscardUnknown() { - xxx_messageInfo_CustomResourceColumnDefinition.DiscardUnknown(m) -} - -var xxx_messageInfo_CustomResourceColumnDefinition proto.InternalMessageInfo - -func (m *CustomResourceConversion) Reset() { *m = CustomResourceConversion{} } -func (*CustomResourceConversion) ProtoMessage() {} -func (*CustomResourceConversion) Descriptor() ([]byte, []int) { - return fileDescriptor_98a4cc6918394e53, []int{4} -} -func (m *CustomResourceConversion) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *CustomResourceConversion) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *CustomResourceConversion) XXX_Merge(src proto.Message) { - xxx_messageInfo_CustomResourceConversion.Merge(m, src) -} -func (m *CustomResourceConversion) XXX_Size() int { - return m.Size() -} -func (m *CustomResourceConversion) XXX_DiscardUnknown() { - xxx_messageInfo_CustomResourceConversion.DiscardUnknown(m) -} - -var xxx_messageInfo_CustomResourceConversion proto.InternalMessageInfo - -func (m *CustomResourceDefinition) Reset() { *m = CustomResourceDefinition{} } -func (*CustomResourceDefinition) ProtoMessage() {} -func (*CustomResourceDefinition) Descriptor() ([]byte, []int) { - return fileDescriptor_98a4cc6918394e53, []int{5} -} -func (m *CustomResourceDefinition) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *CustomResourceDefinition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *CustomResourceDefinition) XXX_Merge(src proto.Message) { - xxx_messageInfo_CustomResourceDefinition.Merge(m, src) -} -func (m *CustomResourceDefinition) XXX_Size() int { - return m.Size() -} -func (m *CustomResourceDefinition) XXX_DiscardUnknown() { - xxx_messageInfo_CustomResourceDefinition.DiscardUnknown(m) -} - -var xxx_messageInfo_CustomResourceDefinition proto.InternalMessageInfo - -func (m *CustomResourceDefinitionCondition) Reset() { *m = CustomResourceDefinitionCondition{} } -func (*CustomResourceDefinitionCondition) ProtoMessage() {} -func (*CustomResourceDefinitionCondition) Descriptor() ([]byte, []int) { - return fileDescriptor_98a4cc6918394e53, []int{6} -} -func (m *CustomResourceDefinitionCondition) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *CustomResourceDefinitionCondition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *CustomResourceDefinitionCondition) XXX_Merge(src proto.Message) { - xxx_messageInfo_CustomResourceDefinitionCondition.Merge(m, src) -} -func (m *CustomResourceDefinitionCondition) XXX_Size() int { - return m.Size() -} -func (m *CustomResourceDefinitionCondition) XXX_DiscardUnknown() { - xxx_messageInfo_CustomResourceDefinitionCondition.DiscardUnknown(m) -} - -var xxx_messageInfo_CustomResourceDefinitionCondition proto.InternalMessageInfo - -func (m *CustomResourceDefinitionList) Reset() { *m = CustomResourceDefinitionList{} } -func (*CustomResourceDefinitionList) ProtoMessage() {} -func (*CustomResourceDefinitionList) Descriptor() ([]byte, []int) { - return fileDescriptor_98a4cc6918394e53, []int{7} -} -func (m *CustomResourceDefinitionList) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *CustomResourceDefinitionList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *CustomResourceDefinitionList) XXX_Merge(src proto.Message) { - xxx_messageInfo_CustomResourceDefinitionList.Merge(m, src) -} -func (m *CustomResourceDefinitionList) XXX_Size() int { - return m.Size() -} -func (m *CustomResourceDefinitionList) XXX_DiscardUnknown() { - xxx_messageInfo_CustomResourceDefinitionList.DiscardUnknown(m) -} - -var xxx_messageInfo_CustomResourceDefinitionList proto.InternalMessageInfo - -func (m *CustomResourceDefinitionNames) Reset() { *m = CustomResourceDefinitionNames{} } -func (*CustomResourceDefinitionNames) ProtoMessage() {} -func (*CustomResourceDefinitionNames) Descriptor() ([]byte, []int) { - return fileDescriptor_98a4cc6918394e53, []int{8} -} -func (m *CustomResourceDefinitionNames) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *CustomResourceDefinitionNames) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *CustomResourceDefinitionNames) XXX_Merge(src proto.Message) { - xxx_messageInfo_CustomResourceDefinitionNames.Merge(m, src) -} -func (m *CustomResourceDefinitionNames) XXX_Size() int { - return m.Size() -} -func (m *CustomResourceDefinitionNames) XXX_DiscardUnknown() { - xxx_messageInfo_CustomResourceDefinitionNames.DiscardUnknown(m) -} - -var xxx_messageInfo_CustomResourceDefinitionNames proto.InternalMessageInfo - -func (m *CustomResourceDefinitionSpec) Reset() { *m = CustomResourceDefinitionSpec{} } -func (*CustomResourceDefinitionSpec) ProtoMessage() {} -func (*CustomResourceDefinitionSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_98a4cc6918394e53, []int{9} -} -func (m *CustomResourceDefinitionSpec) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *CustomResourceDefinitionSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *CustomResourceDefinitionSpec) XXX_Merge(src proto.Message) { - xxx_messageInfo_CustomResourceDefinitionSpec.Merge(m, src) -} -func (m *CustomResourceDefinitionSpec) XXX_Size() int { - return m.Size() -} -func (m *CustomResourceDefinitionSpec) XXX_DiscardUnknown() { - xxx_messageInfo_CustomResourceDefinitionSpec.DiscardUnknown(m) -} - -var xxx_messageInfo_CustomResourceDefinitionSpec proto.InternalMessageInfo - -func (m *CustomResourceDefinitionStatus) Reset() { *m = CustomResourceDefinitionStatus{} } -func (*CustomResourceDefinitionStatus) ProtoMessage() {} -func (*CustomResourceDefinitionStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_98a4cc6918394e53, []int{10} -} -func (m *CustomResourceDefinitionStatus) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *CustomResourceDefinitionStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *CustomResourceDefinitionStatus) XXX_Merge(src proto.Message) { - xxx_messageInfo_CustomResourceDefinitionStatus.Merge(m, src) -} -func (m *CustomResourceDefinitionStatus) XXX_Size() int { - return m.Size() -} -func (m *CustomResourceDefinitionStatus) XXX_DiscardUnknown() { - xxx_messageInfo_CustomResourceDefinitionStatus.DiscardUnknown(m) -} - -var xxx_messageInfo_CustomResourceDefinitionStatus proto.InternalMessageInfo - -func (m *CustomResourceDefinitionVersion) Reset() { *m = CustomResourceDefinitionVersion{} } -func (*CustomResourceDefinitionVersion) ProtoMessage() {} -func (*CustomResourceDefinitionVersion) Descriptor() ([]byte, []int) { - return fileDescriptor_98a4cc6918394e53, []int{11} -} -func (m *CustomResourceDefinitionVersion) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *CustomResourceDefinitionVersion) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *CustomResourceDefinitionVersion) XXX_Merge(src proto.Message) { - xxx_messageInfo_CustomResourceDefinitionVersion.Merge(m, src) -} -func (m *CustomResourceDefinitionVersion) XXX_Size() int { - return m.Size() -} -func (m *CustomResourceDefinitionVersion) XXX_DiscardUnknown() { - xxx_messageInfo_CustomResourceDefinitionVersion.DiscardUnknown(m) -} - -var xxx_messageInfo_CustomResourceDefinitionVersion proto.InternalMessageInfo - -func (m *CustomResourceSubresourceScale) Reset() { *m = CustomResourceSubresourceScale{} } -func (*CustomResourceSubresourceScale) ProtoMessage() {} -func (*CustomResourceSubresourceScale) Descriptor() ([]byte, []int) { - return fileDescriptor_98a4cc6918394e53, []int{12} -} -func (m *CustomResourceSubresourceScale) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *CustomResourceSubresourceScale) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *CustomResourceSubresourceScale) XXX_Merge(src proto.Message) { - xxx_messageInfo_CustomResourceSubresourceScale.Merge(m, src) -} -func (m *CustomResourceSubresourceScale) XXX_Size() int { - return m.Size() -} -func (m *CustomResourceSubresourceScale) XXX_DiscardUnknown() { - xxx_messageInfo_CustomResourceSubresourceScale.DiscardUnknown(m) -} - -var xxx_messageInfo_CustomResourceSubresourceScale proto.InternalMessageInfo - -func (m *CustomResourceSubresourceStatus) Reset() { *m = CustomResourceSubresourceStatus{} } -func (*CustomResourceSubresourceStatus) ProtoMessage() {} -func (*CustomResourceSubresourceStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_98a4cc6918394e53, []int{13} -} -func (m *CustomResourceSubresourceStatus) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *CustomResourceSubresourceStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *CustomResourceSubresourceStatus) XXX_Merge(src proto.Message) { - xxx_messageInfo_CustomResourceSubresourceStatus.Merge(m, src) -} -func (m *CustomResourceSubresourceStatus) XXX_Size() int { - return m.Size() -} -func (m *CustomResourceSubresourceStatus) XXX_DiscardUnknown() { - xxx_messageInfo_CustomResourceSubresourceStatus.DiscardUnknown(m) -} - -var xxx_messageInfo_CustomResourceSubresourceStatus proto.InternalMessageInfo - -func (m *CustomResourceSubresources) Reset() { *m = CustomResourceSubresources{} } -func (*CustomResourceSubresources) ProtoMessage() {} -func (*CustomResourceSubresources) Descriptor() ([]byte, []int) { - return fileDescriptor_98a4cc6918394e53, []int{14} -} -func (m *CustomResourceSubresources) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *CustomResourceSubresources) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *CustomResourceSubresources) XXX_Merge(src proto.Message) { - xxx_messageInfo_CustomResourceSubresources.Merge(m, src) -} -func (m *CustomResourceSubresources) XXX_Size() int { - return m.Size() -} -func (m *CustomResourceSubresources) XXX_DiscardUnknown() { - xxx_messageInfo_CustomResourceSubresources.DiscardUnknown(m) -} - -var xxx_messageInfo_CustomResourceSubresources proto.InternalMessageInfo - -func (m *CustomResourceValidation) Reset() { *m = CustomResourceValidation{} } -func (*CustomResourceValidation) ProtoMessage() {} -func (*CustomResourceValidation) Descriptor() ([]byte, []int) { - return fileDescriptor_98a4cc6918394e53, []int{15} -} -func (m *CustomResourceValidation) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *CustomResourceValidation) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *CustomResourceValidation) XXX_Merge(src proto.Message) { - xxx_messageInfo_CustomResourceValidation.Merge(m, src) -} -func (m *CustomResourceValidation) XXX_Size() int { - return m.Size() -} -func (m *CustomResourceValidation) XXX_DiscardUnknown() { - xxx_messageInfo_CustomResourceValidation.DiscardUnknown(m) -} - -var xxx_messageInfo_CustomResourceValidation proto.InternalMessageInfo - -func (m *ExternalDocumentation) Reset() { *m = ExternalDocumentation{} } -func (*ExternalDocumentation) ProtoMessage() {} -func (*ExternalDocumentation) Descriptor() ([]byte, []int) { - return fileDescriptor_98a4cc6918394e53, []int{16} -} -func (m *ExternalDocumentation) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ExternalDocumentation) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *ExternalDocumentation) XXX_Merge(src proto.Message) { - xxx_messageInfo_ExternalDocumentation.Merge(m, src) -} -func (m *ExternalDocumentation) XXX_Size() int { - return m.Size() -} -func (m *ExternalDocumentation) XXX_DiscardUnknown() { - xxx_messageInfo_ExternalDocumentation.DiscardUnknown(m) -} - -var xxx_messageInfo_ExternalDocumentation proto.InternalMessageInfo - -func (m *JSON) Reset() { *m = JSON{} } -func (*JSON) ProtoMessage() {} -func (*JSON) Descriptor() ([]byte, []int) { - return fileDescriptor_98a4cc6918394e53, []int{17} -} -func (m *JSON) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *JSON) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *JSON) XXX_Merge(src proto.Message) { - xxx_messageInfo_JSON.Merge(m, src) -} -func (m *JSON) XXX_Size() int { - return m.Size() -} -func (m *JSON) XXX_DiscardUnknown() { - xxx_messageInfo_JSON.DiscardUnknown(m) -} - -var xxx_messageInfo_JSON proto.InternalMessageInfo - -func (m *JSONSchemaProps) Reset() { *m = JSONSchemaProps{} } -func (*JSONSchemaProps) ProtoMessage() {} -func (*JSONSchemaProps) Descriptor() ([]byte, []int) { - return fileDescriptor_98a4cc6918394e53, []int{18} -} -func (m *JSONSchemaProps) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *JSONSchemaProps) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *JSONSchemaProps) XXX_Merge(src proto.Message) { - xxx_messageInfo_JSONSchemaProps.Merge(m, src) -} -func (m *JSONSchemaProps) XXX_Size() int { - return m.Size() -} -func (m *JSONSchemaProps) XXX_DiscardUnknown() { - xxx_messageInfo_JSONSchemaProps.DiscardUnknown(m) -} - -var xxx_messageInfo_JSONSchemaProps proto.InternalMessageInfo - -func (m *JSONSchemaPropsOrArray) Reset() { *m = JSONSchemaPropsOrArray{} } -func (*JSONSchemaPropsOrArray) ProtoMessage() {} -func (*JSONSchemaPropsOrArray) Descriptor() ([]byte, []int) { - return fileDescriptor_98a4cc6918394e53, []int{19} -} -func (m *JSONSchemaPropsOrArray) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *JSONSchemaPropsOrArray) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *JSONSchemaPropsOrArray) XXX_Merge(src proto.Message) { - xxx_messageInfo_JSONSchemaPropsOrArray.Merge(m, src) -} -func (m *JSONSchemaPropsOrArray) XXX_Size() int { - return m.Size() -} -func (m *JSONSchemaPropsOrArray) XXX_DiscardUnknown() { - xxx_messageInfo_JSONSchemaPropsOrArray.DiscardUnknown(m) -} - -var xxx_messageInfo_JSONSchemaPropsOrArray proto.InternalMessageInfo - -func (m *JSONSchemaPropsOrBool) Reset() { *m = JSONSchemaPropsOrBool{} } -func (*JSONSchemaPropsOrBool) ProtoMessage() {} -func (*JSONSchemaPropsOrBool) Descriptor() ([]byte, []int) { - return fileDescriptor_98a4cc6918394e53, []int{20} -} -func (m *JSONSchemaPropsOrBool) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *JSONSchemaPropsOrBool) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *JSONSchemaPropsOrBool) XXX_Merge(src proto.Message) { - xxx_messageInfo_JSONSchemaPropsOrBool.Merge(m, src) -} -func (m *JSONSchemaPropsOrBool) XXX_Size() int { - return m.Size() -} -func (m *JSONSchemaPropsOrBool) XXX_DiscardUnknown() { - xxx_messageInfo_JSONSchemaPropsOrBool.DiscardUnknown(m) -} - -var xxx_messageInfo_JSONSchemaPropsOrBool proto.InternalMessageInfo - -func (m *JSONSchemaPropsOrStringArray) Reset() { *m = JSONSchemaPropsOrStringArray{} } -func (*JSONSchemaPropsOrStringArray) ProtoMessage() {} -func (*JSONSchemaPropsOrStringArray) Descriptor() ([]byte, []int) { - return fileDescriptor_98a4cc6918394e53, []int{21} -} -func (m *JSONSchemaPropsOrStringArray) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *JSONSchemaPropsOrStringArray) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *JSONSchemaPropsOrStringArray) XXX_Merge(src proto.Message) { - xxx_messageInfo_JSONSchemaPropsOrStringArray.Merge(m, src) -} -func (m *JSONSchemaPropsOrStringArray) XXX_Size() int { - return m.Size() -} -func (m *JSONSchemaPropsOrStringArray) XXX_DiscardUnknown() { - xxx_messageInfo_JSONSchemaPropsOrStringArray.DiscardUnknown(m) -} - -var xxx_messageInfo_JSONSchemaPropsOrStringArray proto.InternalMessageInfo - -func (m *ServiceReference) Reset() { *m = ServiceReference{} } -func (*ServiceReference) ProtoMessage() {} -func (*ServiceReference) Descriptor() ([]byte, []int) { - return fileDescriptor_98a4cc6918394e53, []int{22} -} -func (m *ServiceReference) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ServiceReference) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *ServiceReference) XXX_Merge(src proto.Message) { - xxx_messageInfo_ServiceReference.Merge(m, src) -} -func (m *ServiceReference) XXX_Size() int { - return m.Size() -} -func (m *ServiceReference) XXX_DiscardUnknown() { - xxx_messageInfo_ServiceReference.DiscardUnknown(m) -} - -var xxx_messageInfo_ServiceReference proto.InternalMessageInfo - -func (m *ValidationRule) Reset() { *m = ValidationRule{} } -func (*ValidationRule) ProtoMessage() {} -func (*ValidationRule) Descriptor() ([]byte, []int) { - return fileDescriptor_98a4cc6918394e53, []int{23} -} -func (m *ValidationRule) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ValidationRule) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *ValidationRule) XXX_Merge(src proto.Message) { - xxx_messageInfo_ValidationRule.Merge(m, src) -} -func (m *ValidationRule) XXX_Size() int { - return m.Size() -} -func (m *ValidationRule) XXX_DiscardUnknown() { - xxx_messageInfo_ValidationRule.DiscardUnknown(m) -} - -var xxx_messageInfo_ValidationRule proto.InternalMessageInfo - -func (m *WebhookClientConfig) Reset() { *m = WebhookClientConfig{} } -func (*WebhookClientConfig) ProtoMessage() {} -func (*WebhookClientConfig) Descriptor() ([]byte, []int) { - return fileDescriptor_98a4cc6918394e53, []int{24} -} -func (m *WebhookClientConfig) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *WebhookClientConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *WebhookClientConfig) XXX_Merge(src proto.Message) { - xxx_messageInfo_WebhookClientConfig.Merge(m, src) -} -func (m *WebhookClientConfig) XXX_Size() int { - return m.Size() -} -func (m *WebhookClientConfig) XXX_DiscardUnknown() { - xxx_messageInfo_WebhookClientConfig.DiscardUnknown(m) -} - -var xxx_messageInfo_WebhookClientConfig proto.InternalMessageInfo - -func init() { - proto.RegisterType((*ConversionRequest)(nil), "k8s.io.apiextensions_apiserver.pkg.apis.apiextensions.v1beta1.ConversionRequest") - proto.RegisterType((*ConversionResponse)(nil), "k8s.io.apiextensions_apiserver.pkg.apis.apiextensions.v1beta1.ConversionResponse") - proto.RegisterType((*ConversionReview)(nil), "k8s.io.apiextensions_apiserver.pkg.apis.apiextensions.v1beta1.ConversionReview") - proto.RegisterType((*CustomResourceColumnDefinition)(nil), "k8s.io.apiextensions_apiserver.pkg.apis.apiextensions.v1beta1.CustomResourceColumnDefinition") - proto.RegisterType((*CustomResourceConversion)(nil), "k8s.io.apiextensions_apiserver.pkg.apis.apiextensions.v1beta1.CustomResourceConversion") - proto.RegisterType((*CustomResourceDefinition)(nil), "k8s.io.apiextensions_apiserver.pkg.apis.apiextensions.v1beta1.CustomResourceDefinition") - proto.RegisterType((*CustomResourceDefinitionCondition)(nil), "k8s.io.apiextensions_apiserver.pkg.apis.apiextensions.v1beta1.CustomResourceDefinitionCondition") - proto.RegisterType((*CustomResourceDefinitionList)(nil), "k8s.io.apiextensions_apiserver.pkg.apis.apiextensions.v1beta1.CustomResourceDefinitionList") - proto.RegisterType((*CustomResourceDefinitionNames)(nil), "k8s.io.apiextensions_apiserver.pkg.apis.apiextensions.v1beta1.CustomResourceDefinitionNames") - proto.RegisterType((*CustomResourceDefinitionSpec)(nil), "k8s.io.apiextensions_apiserver.pkg.apis.apiextensions.v1beta1.CustomResourceDefinitionSpec") - proto.RegisterType((*CustomResourceDefinitionStatus)(nil), "k8s.io.apiextensions_apiserver.pkg.apis.apiextensions.v1beta1.CustomResourceDefinitionStatus") - proto.RegisterType((*CustomResourceDefinitionVersion)(nil), "k8s.io.apiextensions_apiserver.pkg.apis.apiextensions.v1beta1.CustomResourceDefinitionVersion") - proto.RegisterType((*CustomResourceSubresourceScale)(nil), "k8s.io.apiextensions_apiserver.pkg.apis.apiextensions.v1beta1.CustomResourceSubresourceScale") - proto.RegisterType((*CustomResourceSubresourceStatus)(nil), "k8s.io.apiextensions_apiserver.pkg.apis.apiextensions.v1beta1.CustomResourceSubresourceStatus") - proto.RegisterType((*CustomResourceSubresources)(nil), "k8s.io.apiextensions_apiserver.pkg.apis.apiextensions.v1beta1.CustomResourceSubresources") - proto.RegisterType((*CustomResourceValidation)(nil), "k8s.io.apiextensions_apiserver.pkg.apis.apiextensions.v1beta1.CustomResourceValidation") - proto.RegisterType((*ExternalDocumentation)(nil), "k8s.io.apiextensions_apiserver.pkg.apis.apiextensions.v1beta1.ExternalDocumentation") - proto.RegisterType((*JSON)(nil), "k8s.io.apiextensions_apiserver.pkg.apis.apiextensions.v1beta1.JSON") - proto.RegisterType((*JSONSchemaProps)(nil), "k8s.io.apiextensions_apiserver.pkg.apis.apiextensions.v1beta1.JSONSchemaProps") - proto.RegisterMapType((JSONSchemaDefinitions)(nil), "k8s.io.apiextensions_apiserver.pkg.apis.apiextensions.v1beta1.JSONSchemaProps.DefinitionsEntry") - proto.RegisterMapType((JSONSchemaDependencies)(nil), "k8s.io.apiextensions_apiserver.pkg.apis.apiextensions.v1beta1.JSONSchemaProps.DependenciesEntry") - proto.RegisterMapType((map[string]JSONSchemaProps)(nil), "k8s.io.apiextensions_apiserver.pkg.apis.apiextensions.v1beta1.JSONSchemaProps.PatternPropertiesEntry") - proto.RegisterMapType((map[string]JSONSchemaProps)(nil), "k8s.io.apiextensions_apiserver.pkg.apis.apiextensions.v1beta1.JSONSchemaProps.PropertiesEntry") - proto.RegisterType((*JSONSchemaPropsOrArray)(nil), "k8s.io.apiextensions_apiserver.pkg.apis.apiextensions.v1beta1.JSONSchemaPropsOrArray") - proto.RegisterType((*JSONSchemaPropsOrBool)(nil), "k8s.io.apiextensions_apiserver.pkg.apis.apiextensions.v1beta1.JSONSchemaPropsOrBool") - proto.RegisterType((*JSONSchemaPropsOrStringArray)(nil), "k8s.io.apiextensions_apiserver.pkg.apis.apiextensions.v1beta1.JSONSchemaPropsOrStringArray") - proto.RegisterType((*ServiceReference)(nil), "k8s.io.apiextensions_apiserver.pkg.apis.apiextensions.v1beta1.ServiceReference") - proto.RegisterType((*ValidationRule)(nil), "k8s.io.apiextensions_apiserver.pkg.apis.apiextensions.v1beta1.ValidationRule") - proto.RegisterType((*WebhookClientConfig)(nil), "k8s.io.apiextensions_apiserver.pkg.apis.apiextensions.v1beta1.WebhookClientConfig") -} - -func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/generated.proto", fileDescriptor_98a4cc6918394e53) -} - -var fileDescriptor_98a4cc6918394e53 = []byte{ - // 3079 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xcc, 0x5a, 0xcf, 0x73, 0x23, 0x47, - 0xf5, 0xdf, 0x91, 0x2d, 0x5b, 0x6e, 0xdb, 0x6b, 0xbb, 0x77, 0xed, 0xcc, 0x3a, 0x1b, 0xcb, 0x56, - 0xbe, 0xd9, 0xaf, 0x93, 0x6c, 0xe4, 0xc4, 0x24, 0x24, 0xa4, 0x48, 0x51, 0x96, 0xed, 0x0d, 0x4e, - 0xd6, 0x3f, 0x68, 0xed, 0x26, 0x86, 0xfc, 0x6c, 0x6b, 0xda, 0xf2, 0xc4, 0xa3, 0x99, 0xd9, 0xe9, - 0x19, 0xd9, 0xae, 0x00, 0xc5, 0x8f, 0x4a, 0x41, 0x51, 0x40, 0x28, 0x92, 0x0b, 0x05, 0x1c, 0x02, - 0xc5, 0x85, 0x03, 0x1c, 0xe0, 0x06, 0x7f, 0x40, 0x8e, 0x29, 0x8a, 0x43, 0x0e, 0x94, 0x20, 0xe2, - 0xca, 0x91, 0x2a, 0xaa, 0x7c, 0xa2, 0xfa, 0xc7, 0xf4, 0xb4, 0x46, 0xd2, 0xee, 0x56, 0x2c, 0x65, - 0xb9, 0x69, 0xde, 0xaf, 0xcf, 0x9b, 0xd7, 0xaf, 0x5f, 0xbf, 0x7e, 0x23, 0xb0, 0x7f, 0xf8, 0x0c, - 0x2d, 0xda, 0xde, 0xd2, 0x61, 0xb4, 0x47, 0x02, 0x97, 0x84, 0x84, 0x2e, 0xd5, 0x89, 0x6b, 0x79, - 0xc1, 0x92, 0x64, 0x60, 0xdf, 0x26, 0xc7, 0x21, 0x71, 0xa9, 0xed, 0xb9, 0xf4, 0x31, 0xec, 0xdb, - 0x94, 0x04, 0x75, 0x12, 0x2c, 0xf9, 0x87, 0x55, 0xc6, 0xa3, 0xad, 0x02, 0x4b, 0xf5, 0x27, 0xf6, - 0x48, 0x88, 0x9f, 0x58, 0xaa, 0x12, 0x97, 0x04, 0x38, 0x24, 0x56, 0xd1, 0x0f, 0xbc, 0xd0, 0x83, - 0xcf, 0x09, 0x73, 0xc5, 0x16, 0xe9, 0x37, 0x94, 0xb9, 0xa2, 0x7f, 0x58, 0x65, 0x3c, 0xda, 0x2a, - 0x50, 0x94, 0xe6, 0x66, 0x1f, 0xab, 0xda, 0xe1, 0x41, 0xb4, 0x57, 0xac, 0x78, 0xb5, 0xa5, 0xaa, - 0x57, 0xf5, 0x96, 0xb8, 0xd5, 0xbd, 0x68, 0x9f, 0x3f, 0xf1, 0x07, 0xfe, 0x4b, 0xa0, 0xcd, 0x3e, - 0x99, 0x38, 0x5f, 0xc3, 0x95, 0x03, 0xdb, 0x25, 0xc1, 0x49, 0xe2, 0x71, 0x8d, 0x84, 0x78, 0xa9, - 0xde, 0xe6, 0xe3, 0xec, 0x52, 0x37, 0xad, 0x20, 0x72, 0x43, 0xbb, 0x46, 0xda, 0x14, 0x3e, 0x7f, - 0x27, 0x05, 0x5a, 0x39, 0x20, 0x35, 0x9c, 0xd6, 0x2b, 0x9c, 0x1a, 0x60, 0x6a, 0xd5, 0x73, 0xeb, - 0x24, 0x60, 0x6f, 0x89, 0xc8, 0xad, 0x88, 0xd0, 0x10, 0x96, 0xc0, 0x40, 0x64, 0x5b, 0xa6, 0x31, - 0x6f, 0x2c, 0x8e, 0x94, 0x1e, 0xff, 0xb0, 0x91, 0x3f, 0xd7, 0x6c, 0xe4, 0x07, 0x6e, 0x6e, 0xac, - 0x9d, 0x36, 0xf2, 0x0b, 0xdd, 0x90, 0xc2, 0x13, 0x9f, 0xd0, 0xe2, 0xcd, 0x8d, 0x35, 0xc4, 0x94, - 0xe1, 0xf3, 0x60, 0xca, 0x22, 0xd4, 0x0e, 0x88, 0xb5, 0xb2, 0xb3, 0xf1, 0x92, 0xb0, 0x6f, 0x66, - 0xb8, 0xc5, 0x4b, 0xd2, 0xe2, 0xd4, 0x5a, 0x5a, 0x00, 0xb5, 0xeb, 0xc0, 0x5d, 0x30, 0xec, 0xed, - 0xbd, 0x45, 0x2a, 0x21, 0x35, 0x07, 0xe6, 0x07, 0x16, 0x47, 0x97, 0x1f, 0x2b, 0x26, 0x2b, 0xa8, - 0x5c, 0xe0, 0xcb, 0x26, 0x5f, 0xb6, 0x88, 0xf0, 0xd1, 0x7a, 0xbc, 0x72, 0xa5, 0x09, 0x89, 0x36, - 0xbc, 0x2d, 0xac, 0xa0, 0xd8, 0x5c, 0xe1, 0xd7, 0x19, 0x00, 0xf5, 0x97, 0xa7, 0xbe, 0xe7, 0x52, - 0xd2, 0x93, 0xb7, 0xa7, 0x60, 0xb2, 0xc2, 0x2d, 0x87, 0xc4, 0x92, 0xb8, 0x66, 0xe6, 0xd3, 0x78, - 0x6f, 0x4a, 0xfc, 0xc9, 0xd5, 0x94, 0x39, 0xd4, 0x06, 0x00, 0x6f, 0x80, 0xa1, 0x80, 0xd0, 0xc8, - 0x09, 0xcd, 0x81, 0x79, 0x63, 0x71, 0x74, 0xf9, 0x6a, 0x57, 0x28, 0x9e, 0xdf, 0x2c, 0xf9, 0x8a, - 0xf5, 0x27, 0x8a, 0xe5, 0x10, 0x87, 0x11, 0x2d, 0x9d, 0x97, 0x48, 0x43, 0x88, 0xdb, 0x40, 0xd2, - 0x56, 0xe1, 0xfb, 0x19, 0x30, 0xa9, 0x47, 0xa9, 0x6e, 0x93, 0x23, 0x78, 0x04, 0x86, 0x03, 0x91, - 0x2c, 0x3c, 0x4e, 0xa3, 0xcb, 0x3b, 0xc5, 0x33, 0x6d, 0xab, 0x62, 0x5b, 0x12, 0x96, 0x46, 0xd9, - 0x9a, 0xc9, 0x07, 0x14, 0xa3, 0xc1, 0xb7, 0x41, 0x2e, 0x90, 0x0b, 0xc5, 0xb3, 0x69, 0x74, 0xf9, - 0x2b, 0x3d, 0x44, 0x16, 0x86, 0x4b, 0x63, 0xcd, 0x46, 0x3e, 0x17, 0x3f, 0x21, 0x05, 0x58, 0x78, - 0x2f, 0x03, 0xe6, 0x56, 0x23, 0x1a, 0x7a, 0x35, 0x44, 0xa8, 0x17, 0x05, 0x15, 0xb2, 0xea, 0x39, - 0x51, 0xcd, 0x5d, 0x23, 0xfb, 0xb6, 0x6b, 0x87, 0x2c, 0x5b, 0xe7, 0xc1, 0xa0, 0x8b, 0x6b, 0x44, - 0x66, 0xcf, 0x98, 0x8c, 0xe9, 0xe0, 0x16, 0xae, 0x11, 0xc4, 0x39, 0x4c, 0x82, 0x25, 0x8b, 0xdc, - 0x0b, 0x4a, 0xe2, 0xc6, 0x89, 0x4f, 0x10, 0xe7, 0xc0, 0x2b, 0x60, 0x68, 0xdf, 0x0b, 0x6a, 0x58, - 0xac, 0xe3, 0x48, 0xb2, 0x32, 0xd7, 0x38, 0x15, 0x49, 0x2e, 0x7c, 0x0a, 0x8c, 0x5a, 0x84, 0x56, - 0x02, 0xdb, 0x67, 0xd0, 0xe6, 0x20, 0x17, 0xbe, 0x20, 0x85, 0x47, 0xd7, 0x12, 0x16, 0xd2, 0xe5, - 0xe0, 0x55, 0x90, 0xf3, 0x03, 0xdb, 0x0b, 0xec, 0xf0, 0xc4, 0xcc, 0xce, 0x1b, 0x8b, 0xd9, 0xd2, - 0xa4, 0xd4, 0xc9, 0xed, 0x48, 0x3a, 0x52, 0x12, 0x70, 0x1e, 0xe4, 0x5e, 0x28, 0x6f, 0x6f, 0xed, - 0xe0, 0xf0, 0xc0, 0x1c, 0xe2, 0x08, 0x83, 0x4c, 0x1a, 0x29, 0x6a, 0xe1, 0x6f, 0x19, 0x60, 0xa6, - 0xa3, 0x12, 0x87, 0x14, 0x5e, 0x03, 0x39, 0x1a, 0xb2, 0x8a, 0x53, 0x3d, 0x91, 0x31, 0x79, 0x24, - 0x06, 0x2b, 0x4b, 0xfa, 0x69, 0x23, 0x3f, 0x93, 0x68, 0xc4, 0x54, 0x1e, 0x0f, 0xa5, 0x0b, 0x7f, - 0x69, 0x80, 0x0b, 0x47, 0x64, 0xef, 0xc0, 0xf3, 0x0e, 0x57, 0x1d, 0x9b, 0xb8, 0xe1, 0xaa, 0xe7, - 0xee, 0xdb, 0x55, 0x99, 0x03, 0xe8, 0x8c, 0x39, 0xf0, 0x72, 0xbb, 0xe5, 0xd2, 0x7d, 0xcd, 0x46, - 0xfe, 0x42, 0x07, 0x06, 0xea, 0xe4, 0x07, 0xdc, 0x05, 0x66, 0x25, 0xb5, 0x49, 0x64, 0x01, 0x13, - 0x65, 0x6b, 0xa4, 0x74, 0xb9, 0xd9, 0xc8, 0x9b, 0xab, 0x5d, 0x64, 0x50, 0x57, 0xed, 0xc2, 0x77, - 0x07, 0xd2, 0xe1, 0xd5, 0xd2, 0xed, 0x4d, 0x90, 0x63, 0xdb, 0xd8, 0xc2, 0x21, 0x96, 0x1b, 0xf1, - 0xf1, 0xbb, 0xdb, 0xf4, 0xa2, 0x66, 0x6c, 0x92, 0x10, 0x97, 0xa0, 0x5c, 0x10, 0x90, 0xd0, 0x90, - 0xb2, 0x0a, 0xbf, 0x01, 0x06, 0xa9, 0x4f, 0x2a, 0x32, 0xd0, 0xaf, 0x9c, 0x75, 0xb3, 0x75, 0x79, - 0x91, 0xb2, 0x4f, 0x2a, 0xc9, 0x5e, 0x60, 0x4f, 0x88, 0xc3, 0xc2, 0x77, 0x0c, 0x30, 0x44, 0x79, - 0x81, 0x92, 0x45, 0xed, 0xb5, 0x7e, 0x79, 0x90, 0xaa, 0x82, 0xe2, 0x19, 0x49, 0xf0, 0xc2, 0xbf, - 0x33, 0x60, 0xa1, 0x9b, 0xea, 0xaa, 0xe7, 0x5a, 0x62, 0x39, 0x36, 0xe4, 0xde, 0x16, 0x99, 0xfe, - 0x94, 0xbe, 0xb7, 0x4f, 0x1b, 0xf9, 0x87, 0xee, 0x68, 0x40, 0x2b, 0x02, 0x5f, 0x50, 0xef, 0x2d, - 0x0a, 0xc5, 0x42, 0xab, 0x63, 0xa7, 0x8d, 0xfc, 0x84, 0x52, 0x6b, 0xf5, 0x15, 0xd6, 0x01, 0x74, - 0x30, 0x0d, 0x6f, 0x04, 0xd8, 0xa5, 0xc2, 0xac, 0x5d, 0x23, 0x32, 0x7c, 0x8f, 0xdc, 0x5d, 0x7a, - 0x30, 0x8d, 0xd2, 0xac, 0x84, 0x84, 0xd7, 0xdb, 0xac, 0xa1, 0x0e, 0x08, 0xac, 0x6e, 0x05, 0x04, - 0x53, 0x55, 0x8a, 0xb4, 0x13, 0x85, 0x51, 0x91, 0xe4, 0xc2, 0x87, 0xc1, 0x70, 0x8d, 0x50, 0x8a, - 0xab, 0x84, 0xd7, 0x9f, 0x91, 0xe4, 0x88, 0xde, 0x14, 0x64, 0x14, 0xf3, 0x59, 0x7f, 0x72, 0xb9, - 0x5b, 0xd4, 0xae, 0xdb, 0x34, 0x84, 0xaf, 0xb6, 0x6d, 0x80, 0xe2, 0xdd, 0xbd, 0x21, 0xd3, 0xe6, - 0xe9, 0xaf, 0x8a, 0x5f, 0x4c, 0xd1, 0x92, 0xff, 0xeb, 0x20, 0x6b, 0x87, 0xa4, 0x16, 0x9f, 0xdd, - 0x2f, 0xf7, 0x29, 0xf7, 0x4a, 0xe3, 0xd2, 0x87, 0xec, 0x06, 0x43, 0x43, 0x02, 0xb4, 0xf0, 0x9b, - 0x0c, 0x78, 0xa0, 0x9b, 0x0a, 0x3b, 0x50, 0x28, 0x8b, 0xb8, 0xef, 0x44, 0x01, 0x76, 0x64, 0xc6, - 0xa9, 0x88, 0xef, 0x70, 0x2a, 0x92, 0x5c, 0x56, 0xf2, 0xa9, 0xed, 0x56, 0x23, 0x07, 0x07, 0x32, - 0x9d, 0xd4, 0x5b, 0x97, 0x25, 0x1d, 0x29, 0x09, 0x58, 0x04, 0x80, 0x1e, 0x78, 0x41, 0xc8, 0x31, - 0x64, 0xf5, 0x3a, 0xcf, 0x0a, 0x44, 0x59, 0x51, 0x91, 0x26, 0xc1, 0x4e, 0xb4, 0x43, 0xdb, 0xb5, - 0xe4, 0xaa, 0xab, 0x5d, 0xfc, 0xa2, 0xed, 0x5a, 0x88, 0x73, 0x18, 0xbe, 0x63, 0xd3, 0x90, 0x51, - 0xe4, 0x92, 0xb7, 0x44, 0x9d, 0x4b, 0x2a, 0x09, 0x86, 0x5f, 0x61, 0x55, 0xdf, 0x0b, 0x6c, 0x42, - 0xcd, 0xa1, 0x04, 0x7f, 0x55, 0x51, 0x91, 0x26, 0x51, 0xf8, 0x57, 0xae, 0x7b, 0x92, 0xb0, 0x52, - 0x02, 0x1f, 0x04, 0xd9, 0x6a, 0xe0, 0x45, 0xbe, 0x8c, 0x92, 0x8a, 0xf6, 0xf3, 0x8c, 0x88, 0x04, - 0x8f, 0x65, 0x65, 0xbd, 0xa5, 0x4d, 0x55, 0x59, 0x19, 0x37, 0xa7, 0x31, 0x1f, 0x7e, 0xdb, 0x00, - 0x59, 0x57, 0x06, 0x87, 0xa5, 0xdc, 0xab, 0x7d, 0xca, 0x0b, 0x1e, 0xde, 0xc4, 0x5d, 0x11, 0x79, - 0x81, 0x0c, 0x9f, 0x04, 0x59, 0x5a, 0xf1, 0x7c, 0x22, 0xa3, 0x3e, 0x17, 0x0b, 0x95, 0x19, 0xf1, - 0xb4, 0x91, 0x1f, 0x8f, 0xcd, 0x71, 0x02, 0x12, 0xc2, 0xf0, 0x7b, 0x06, 0x00, 0x75, 0xec, 0xd8, - 0x16, 0xe6, 0x2d, 0x43, 0x96, 0xbb, 0xdf, 0xdb, 0xb4, 0x7e, 0x49, 0x99, 0x17, 0x8b, 0x96, 0x3c, - 0x23, 0x0d, 0x1a, 0xbe, 0x6b, 0x80, 0x31, 0x1a, 0xed, 0x05, 0x52, 0x8b, 0xf2, 0xe6, 0x62, 0x74, - 0xf9, 0xab, 0x3d, 0xf5, 0xa5, 0xac, 0x01, 0x94, 0x26, 0x9b, 0x8d, 0xfc, 0x98, 0x4e, 0x41, 0x2d, - 0x0e, 0xc0, 0x1f, 0x1a, 0x20, 0x57, 0x8f, 0xcf, 0xec, 0x61, 0xbe, 0xe1, 0x5f, 0xef, 0xd3, 0xc2, - 0xca, 0x8c, 0x4a, 0x76, 0x81, 0xea, 0x03, 0x94, 0x07, 0xf0, 0x4f, 0x06, 0x30, 0xb1, 0x25, 0x0a, - 0x3c, 0x76, 0x76, 0x02, 0xdb, 0x0d, 0x49, 0x20, 0xfa, 0x4d, 0x6a, 0xe6, 0xb8, 0x7b, 0xbd, 0x3d, - 0x0b, 0xd3, 0xbd, 0x6c, 0x69, 0x5e, 0x7a, 0x67, 0xae, 0x74, 0x71, 0x03, 0x75, 0x75, 0x90, 0x27, - 0x5a, 0xd2, 0xd2, 0x98, 0x23, 0x7d, 0x48, 0xb4, 0xa4, 0x97, 0x92, 0xd5, 0x21, 0xe9, 0xa0, 0x34, - 0x68, 0xb8, 0x0d, 0xa6, 0xfd, 0x80, 0x70, 0x80, 0x9b, 0xee, 0xa1, 0xeb, 0x1d, 0xb9, 0xd7, 0x6c, - 0xe2, 0x58, 0xd4, 0x04, 0xf3, 0xc6, 0x62, 0xae, 0x74, 0xa9, 0xd9, 0xc8, 0x4f, 0xef, 0x74, 0x12, - 0x40, 0x9d, 0xf5, 0x0a, 0xef, 0x0e, 0xa4, 0x6f, 0x01, 0xe9, 0x2e, 0x02, 0xbe, 0x2f, 0xde, 0x5e, - 0xc4, 0x86, 0x9a, 0x06, 0x5f, 0xad, 0x37, 0xfb, 0x94, 0x4c, 0xaa, 0x0d, 0x48, 0x3a, 0x39, 0x45, - 0xa2, 0x48, 0xf3, 0x03, 0xfe, 0xcc, 0x00, 0xe3, 0xb8, 0x52, 0x21, 0x7e, 0x48, 0x2c, 0x51, 0xdc, - 0x33, 0x9f, 0x41, 0xfd, 0x9a, 0x96, 0x5e, 0x8d, 0xaf, 0xe8, 0xd0, 0xa8, 0xd5, 0x13, 0xf8, 0x2c, - 0x38, 0x4f, 0x43, 0x2f, 0x20, 0x56, 0xaa, 0x6d, 0x86, 0xcd, 0x46, 0xfe, 0x7c, 0xb9, 0x85, 0x83, - 0x52, 0x92, 0x85, 0xbf, 0x67, 0x41, 0xfe, 0x0e, 0x5b, 0xed, 0x2e, 0x2e, 0x66, 0x57, 0xc0, 0x10, - 0x7f, 0x5d, 0x8b, 0x47, 0x25, 0xa7, 0xb5, 0x82, 0x9c, 0x8a, 0x24, 0x97, 0x1d, 0x14, 0x0c, 0x9f, - 0xb5, 0x2f, 0x03, 0x5c, 0x50, 0x1d, 0x14, 0x65, 0x41, 0x46, 0x31, 0x1f, 0x2e, 0x03, 0x60, 0x11, - 0x3f, 0x20, 0xec, 0xb0, 0xb2, 0xcc, 0x61, 0x2e, 0xad, 0x16, 0x69, 0x4d, 0x71, 0x90, 0x26, 0x05, - 0xaf, 0x01, 0x18, 0x3f, 0xd9, 0x9e, 0xfb, 0x32, 0x0e, 0x5c, 0xdb, 0xad, 0x9a, 0x39, 0xee, 0xf6, - 0x0c, 0xeb, 0xc6, 0xd6, 0xda, 0xb8, 0xa8, 0x83, 0x06, 0x7c, 0x1b, 0x0c, 0x89, 0xa1, 0x0f, 0x3f, - 0x21, 0xfa, 0x58, 0xe5, 0x01, 0x8f, 0x11, 0x87, 0x42, 0x12, 0xb2, 0xbd, 0xba, 0x67, 0xef, 0x75, - 0x75, 0xbf, 0x6d, 0x39, 0x1d, 0xfa, 0x1f, 0x2f, 0xa7, 0x85, 0xff, 0x18, 0xe9, 0x9a, 0xa3, 0xbd, - 0x6a, 0xb9, 0x82, 0x1d, 0x02, 0xd7, 0xc0, 0x24, 0xbb, 0x31, 0x21, 0xe2, 0x3b, 0x76, 0x05, 0x53, - 0x7e, 0x61, 0x17, 0xc9, 0xae, 0x66, 0x48, 0xe5, 0x14, 0x1f, 0xb5, 0x69, 0xc0, 0x17, 0x00, 0x14, - 0xb7, 0x88, 0x16, 0x3b, 0xa2, 0x21, 0x52, 0xf7, 0x81, 0x72, 0x9b, 0x04, 0xea, 0xa0, 0x05, 0x57, - 0xc1, 0x94, 0x83, 0xf7, 0x88, 0x53, 0x26, 0x0e, 0xa9, 0x84, 0x5e, 0xc0, 0x4d, 0x89, 0x91, 0xc6, - 0x74, 0xb3, 0x91, 0x9f, 0xba, 0x9e, 0x66, 0xa2, 0x76, 0xf9, 0xc2, 0x42, 0x7a, 0x6b, 0xeb, 0x2f, - 0x2e, 0xee, 0x66, 0x1f, 0x64, 0xc0, 0x6c, 0xf7, 0xcc, 0x80, 0xdf, 0x49, 0xae, 0x90, 0xe2, 0x86, - 0xf0, 0x7a, 0xbf, 0xb2, 0x50, 0xde, 0x21, 0x41, 0xfb, 0xfd, 0x11, 0x7e, 0x93, 0xb5, 0x6b, 0xd8, - 0x89, 0x87, 0x56, 0xaf, 0xf5, 0xcd, 0x05, 0x06, 0x52, 0x1a, 0x11, 0x9d, 0x20, 0x76, 0x78, 0xe3, - 0x87, 0x1d, 0x52, 0xf8, 0xad, 0x91, 0x9e, 0x22, 0x24, 0x3b, 0x18, 0xfe, 0xc8, 0x00, 0x13, 0x9e, - 0x4f, 0xdc, 0x95, 0x9d, 0x8d, 0x97, 0x3e, 0x27, 0x76, 0xb2, 0x0c, 0xd5, 0xd6, 0x19, 0xfd, 0x7c, - 0xa1, 0xbc, 0xbd, 0x25, 0x0c, 0xee, 0x04, 0x9e, 0x4f, 0x4b, 0x17, 0x9a, 0x8d, 0xfc, 0xc4, 0x76, - 0x2b, 0x14, 0x4a, 0x63, 0x17, 0x6a, 0x60, 0x7a, 0xfd, 0x38, 0x24, 0x81, 0x8b, 0x9d, 0x35, 0xaf, - 0x12, 0xd5, 0x88, 0x1b, 0x0a, 0x47, 0x53, 0x13, 0x2f, 0xe3, 0x2e, 0x27, 0x5e, 0x0f, 0x80, 0x81, - 0x28, 0x70, 0x64, 0x16, 0x8f, 0xaa, 0x89, 0x2e, 0xba, 0x8e, 0x18, 0xbd, 0xb0, 0x00, 0x06, 0x99, - 0x9f, 0xf0, 0x12, 0x18, 0x08, 0xf0, 0x11, 0xb7, 0x3a, 0x56, 0x1a, 0x66, 0x22, 0x08, 0x1f, 0x21, - 0x46, 0x2b, 0xfc, 0x75, 0x01, 0x4c, 0xa4, 0xde, 0x05, 0xce, 0x82, 0x8c, 0x1a, 0x13, 0x03, 0x69, - 0x34, 0xb3, 0xb1, 0x86, 0x32, 0xb6, 0x05, 0x9f, 0x56, 0xc5, 0x57, 0x80, 0xe6, 0xd5, 0x59, 0xc2, - 0xa9, 0xac, 0x3f, 0x4f, 0xcc, 0x31, 0x47, 0xe2, 0xc2, 0xc9, 0x7c, 0x20, 0xfb, 0x72, 0x97, 0x08, - 0x1f, 0xc8, 0x3e, 0x62, 0xb4, 0x4f, 0x3b, 0xee, 0x8b, 0xe7, 0x8d, 0xd9, 0xbb, 0x98, 0x37, 0x0e, - 0xdd, 0x76, 0xde, 0xf8, 0x20, 0xc8, 0x86, 0x76, 0xe8, 0x10, 0x7e, 0x90, 0x69, 0xd7, 0xa8, 0x1b, - 0x8c, 0x88, 0x04, 0x0f, 0xbe, 0x05, 0x86, 0x2d, 0xb2, 0x8f, 0x23, 0x27, 0xe4, 0x67, 0xd6, 0xe8, - 0xf2, 0x6a, 0x0f, 0x52, 0x48, 0x0c, 0x83, 0xd7, 0x84, 0x5d, 0x14, 0x03, 0xc0, 0x87, 0xc0, 0x70, - 0x0d, 0x1f, 0xdb, 0xb5, 0xa8, 0xc6, 0x1b, 0x4c, 0x43, 0x88, 0x6d, 0x0a, 0x12, 0x8a, 0x79, 0xac, - 0x32, 0x92, 0xe3, 0x8a, 0x13, 0x51, 0xbb, 0x4e, 0x24, 0x53, 0x36, 0x7f, 0xaa, 0x32, 0xae, 0xa7, - 0xf8, 0xa8, 0x4d, 0x83, 0x83, 0xd9, 0x2e, 0x57, 0x1e, 0xd5, 0xc0, 0x04, 0x09, 0xc5, 0xbc, 0x56, - 0x30, 0x29, 0x3f, 0xd6, 0x0d, 0x4c, 0x2a, 0xb7, 0x69, 0xc0, 0x47, 0xc1, 0x48, 0x0d, 0x1f, 0x5f, - 0x27, 0x6e, 0x35, 0x3c, 0x30, 0xc7, 0xe7, 0x8d, 0xc5, 0x81, 0xd2, 0x78, 0xb3, 0x91, 0x1f, 0xd9, - 0x8c, 0x89, 0x28, 0xe1, 0x73, 0x61, 0xdb, 0x95, 0xc2, 0xe7, 0x35, 0xe1, 0x98, 0x88, 0x12, 0x3e, - 0xeb, 0x5e, 0x7c, 0x1c, 0xb2, 0xcd, 0x65, 0x4e, 0xb4, 0x5e, 0x73, 0x77, 0x04, 0x19, 0xc5, 0x7c, - 0xb8, 0x08, 0x72, 0x35, 0x7c, 0xcc, 0x47, 0x12, 0xe6, 0x24, 0x37, 0xcb, 0x07, 0xe3, 0x9b, 0x92, - 0x86, 0x14, 0x97, 0x4b, 0xda, 0xae, 0x90, 0x9c, 0xd2, 0x24, 0x25, 0x0d, 0x29, 0x2e, 0x4b, 0xe2, - 0xc8, 0xb5, 0x6f, 0x45, 0x44, 0x08, 0x43, 0x1e, 0x19, 0x95, 0xc4, 0x37, 0x13, 0x16, 0xd2, 0xe5, - 0x60, 0x11, 0x80, 0x5a, 0xe4, 0x84, 0xb6, 0xef, 0x90, 0xed, 0x7d, 0xf3, 0x02, 0x8f, 0x3f, 0x6f, - 0xfa, 0x37, 0x15, 0x15, 0x69, 0x12, 0x90, 0x80, 0x41, 0xe2, 0x46, 0x35, 0xf3, 0x22, 0x3f, 0xd8, - 0x7b, 0x92, 0x82, 0x6a, 0xe7, 0xac, 0xbb, 0x51, 0x0d, 0x71, 0xf3, 0xf0, 0x69, 0x30, 0x5e, 0xc3, - 0xc7, 0xac, 0x1c, 0x90, 0x20, 0xb4, 0x09, 0x35, 0xa7, 0xf9, 0xcb, 0x4f, 0xb1, 0x6e, 0x77, 0x53, - 0x67, 0xa0, 0x56, 0x39, 0xae, 0x68, 0xbb, 0x9a, 0xe2, 0x8c, 0xa6, 0xa8, 0x33, 0x50, 0xab, 0x1c, - 0x8b, 0x74, 0x40, 0x6e, 0x45, 0x76, 0x40, 0x2c, 0xf3, 0x3e, 0xde, 0x20, 0xcb, 0x8f, 0x15, 0x82, - 0x86, 0x14, 0x17, 0xd6, 0xe3, 0xd9, 0x95, 0xc9, 0xb7, 0xe1, 0xcd, 0xde, 0x56, 0xf2, 0xed, 0x60, - 0x25, 0x08, 0xf0, 0x89, 0x38, 0x69, 0xf4, 0xa9, 0x15, 0xa4, 0x20, 0x8b, 0x1d, 0x67, 0x7b, 0xdf, - 0xbc, 0xc4, 0x63, 0xdf, 0xeb, 0x13, 0x44, 0x55, 0x9d, 0x15, 0x06, 0x82, 0x04, 0x16, 0x03, 0xf5, - 0x5c, 0x96, 0x1a, 0xb3, 0xfd, 0x05, 0xdd, 0x66, 0x20, 0x48, 0x60, 0xf1, 0x37, 0x75, 0x4f, 0xb6, - 0xf7, 0xcd, 0xfb, 0xfb, 0xfc, 0xa6, 0x0c, 0x04, 0x09, 0x2c, 0x68, 0x83, 0x01, 0xd7, 0x0b, 0xcd, - 0xcb, 0x7d, 0x39, 0x9e, 0xf9, 0x81, 0xb3, 0xe5, 0x85, 0x88, 0x61, 0xc0, 0x9f, 0x1a, 0x00, 0xf8, - 0x49, 0x8a, 0x3e, 0xd0, 0x93, 0x91, 0x48, 0x0a, 0xb2, 0x98, 0xe4, 0xf6, 0xba, 0x1b, 0x06, 0x27, - 0xc9, 0xf5, 0x48, 0xdb, 0x03, 0x9a, 0x17, 0xf0, 0x57, 0x06, 0xb8, 0xa8, 0xb7, 0xc9, 0xca, 0xbd, - 0x39, 0x1e, 0x91, 0x1b, 0xbd, 0x4e, 0xf3, 0x92, 0xe7, 0x39, 0x25, 0xb3, 0xd9, 0xc8, 0x5f, 0x5c, - 0xe9, 0x80, 0x8a, 0x3a, 0xfa, 0x02, 0x7f, 0x67, 0x80, 0x29, 0x59, 0x45, 0x35, 0x0f, 0xf3, 0x3c, - 0x80, 0xa4, 0xd7, 0x01, 0x4c, 0xe3, 0x88, 0x38, 0xaa, 0x8f, 0xec, 0x6d, 0x7c, 0xd4, 0xee, 0x1a, - 0xfc, 0xa3, 0x01, 0xc6, 0x2c, 0xe2, 0x13, 0xd7, 0x22, 0x6e, 0x85, 0xf9, 0x3a, 0xdf, 0x93, 0x91, - 0x45, 0xda, 0xd7, 0x35, 0x0d, 0x42, 0xb8, 0x59, 0x94, 0x6e, 0x8e, 0xe9, 0xac, 0xd3, 0x46, 0x7e, - 0x26, 0x51, 0xd5, 0x39, 0xa8, 0xc5, 0x4b, 0xf8, 0x9e, 0x01, 0x26, 0x92, 0x05, 0x10, 0x47, 0xca, - 0x42, 0x1f, 0xf3, 0x80, 0xb7, 0xaf, 0x2b, 0xad, 0x80, 0x28, 0xed, 0x01, 0xfc, 0xbd, 0xc1, 0x3a, - 0xb5, 0xf8, 0xde, 0x47, 0xcd, 0x02, 0x8f, 0xe5, 0x1b, 0x3d, 0x8f, 0xa5, 0x42, 0x10, 0xa1, 0xbc, - 0x9a, 0xb4, 0x82, 0x8a, 0x73, 0xda, 0xc8, 0x4f, 0xeb, 0x91, 0x54, 0x0c, 0xa4, 0x7b, 0x08, 0x7f, - 0x60, 0x80, 0x31, 0x92, 0x74, 0xdc, 0xd4, 0x7c, 0xb0, 0x27, 0x41, 0xec, 0xd8, 0xc4, 0x8b, 0x9b, - 0xba, 0xc6, 0xa2, 0xa8, 0x05, 0x9b, 0x75, 0x90, 0xe4, 0x18, 0xd7, 0x7c, 0x87, 0x98, 0xff, 0xd7, - 0xe3, 0x0e, 0x72, 0x5d, 0xd8, 0x45, 0x31, 0x00, 0xbc, 0x0a, 0x72, 0x6e, 0xe4, 0x38, 0x78, 0xcf, - 0x21, 0xe6, 0x43, 0xbc, 0x17, 0x51, 0x23, 0xd9, 0x2d, 0x49, 0x47, 0x4a, 0x02, 0xee, 0x83, 0xf9, - 0xe3, 0x17, 0xd5, 0xdf, 0x93, 0x3a, 0x0e, 0x0d, 0xcd, 0x2b, 0xdc, 0xca, 0x6c, 0xb3, 0x91, 0x9f, - 0xd9, 0xed, 0x3c, 0x56, 0xbc, 0xa3, 0x0d, 0xf8, 0x0a, 0xb8, 0x5f, 0x93, 0x59, 0xaf, 0xed, 0x11, - 0xcb, 0x22, 0x56, 0x7c, 0x71, 0x33, 0xff, 0x5f, 0x0c, 0x2e, 0xe3, 0x0d, 0xbe, 0x9b, 0x16, 0x40, - 0xb7, 0xd3, 0x86, 0xd7, 0xc1, 0x8c, 0xc6, 0xde, 0x70, 0xc3, 0xed, 0xa0, 0x1c, 0x06, 0xb6, 0x5b, - 0x35, 0x17, 0xb9, 0xdd, 0x8b, 0xf1, 0x8e, 0xdc, 0xd5, 0x78, 0xa8, 0x8b, 0x0e, 0xfc, 0x72, 0x8b, - 0x35, 0xfe, 0x09, 0x0d, 0xfb, 0x2f, 0x92, 0x13, 0x6a, 0x3e, 0xcc, 0xbb, 0x13, 0xbe, 0xd8, 0xbb, - 0x1a, 0x1d, 0x75, 0x91, 0x87, 0x5f, 0x02, 0x17, 0x52, 0x1c, 0x76, 0x45, 0x31, 0x1f, 0x11, 0x77, - 0x0d, 0xd6, 0xcf, 0xee, 0xc6, 0x44, 0xd4, 0x49, 0x12, 0x7e, 0x11, 0x40, 0x8d, 0xbc, 0x89, 0x7d, - 0xae, 0xff, 0xa8, 0xb8, 0xf6, 0xb0, 0x15, 0xdd, 0x95, 0x34, 0xd4, 0x41, 0x0e, 0xfe, 0xdc, 0x68, - 0x79, 0x93, 0xe4, 0x76, 0x4c, 0xcd, 0xab, 0x7c, 0xff, 0x6e, 0x9e, 0x31, 0x0b, 0xb5, 0xef, 0x20, - 0x91, 0x43, 0xb4, 0x30, 0x6b, 0x50, 0xa8, 0x8b, 0x0b, 0xb3, 0xec, 0x86, 0x9e, 0xaa, 0xf0, 0x70, - 0x12, 0x0c, 0x1c, 0x12, 0xf9, 0xaf, 0x0a, 0xc4, 0x7e, 0x42, 0x0b, 0x64, 0xeb, 0xd8, 0x89, 0xe2, - 0x21, 0x43, 0x8f, 0xbb, 0x03, 0x24, 0x8c, 0x3f, 0x9b, 0x79, 0xc6, 0x98, 0x7d, 0xdf, 0x00, 0x33, - 0x9d, 0x0f, 0x9e, 0x7b, 0xea, 0xd6, 0x2f, 0x0c, 0x30, 0xd5, 0x76, 0xc6, 0x74, 0xf0, 0xe8, 0x56, - 0xab, 0x47, 0xaf, 0xf4, 0xfa, 0xb0, 0x10, 0x9b, 0x83, 0x77, 0xc8, 0xba, 0x7b, 0x3f, 0x36, 0xc0, - 0x64, 0xba, 0x6c, 0xdf, 0xcb, 0x78, 0x15, 0xde, 0xcf, 0x80, 0x99, 0xce, 0x8d, 0x3d, 0x0c, 0xd4, - 0x04, 0xa3, 0x3f, 0x93, 0xa0, 0x4e, 0x53, 0xe3, 0x77, 0x0c, 0x30, 0xfa, 0x96, 0x92, 0x8b, 0xbf, - 0xba, 0xf7, 0x7c, 0x06, 0x15, 0x9f, 0x93, 0x09, 0x83, 0x22, 0x1d, 0xb7, 0xf0, 0x07, 0x03, 0x4c, - 0x77, 0x6c, 0x00, 0xe0, 0x15, 0x30, 0x84, 0x1d, 0xc7, 0x3b, 0x12, 0xa3, 0x44, 0xed, 0x1b, 0xc1, - 0x0a, 0xa7, 0x22, 0xc9, 0xd5, 0xa2, 0x97, 0xf9, 0xac, 0xa2, 0x57, 0xf8, 0xb3, 0x01, 0x2e, 0xdf, - 0x2e, 0x13, 0xef, 0xc9, 0x92, 0x2e, 0x82, 0x9c, 0x6c, 0xde, 0x4f, 0xf8, 0x72, 0xca, 0x52, 0x2c, - 0x8b, 0x06, 0xff, 0xa3, 0x99, 0xf8, 0x55, 0xf8, 0xc0, 0x00, 0x93, 0x65, 0x12, 0xd4, 0xed, 0x0a, - 0x41, 0x64, 0x9f, 0x04, 0xc4, 0xad, 0x10, 0xb8, 0x04, 0x46, 0xf8, 0xe7, 0x6e, 0x1f, 0x57, 0xe2, - 0x4f, 0x37, 0x53, 0x32, 0xe4, 0x23, 0x5b, 0x31, 0x03, 0x25, 0x32, 0xea, 0x33, 0x4f, 0xa6, 0xeb, - 0x67, 0x9e, 0xcb, 0x60, 0xd0, 0x4f, 0x06, 0xd1, 0x39, 0xc6, 0xe5, 0xb3, 0x67, 0x4e, 0xe5, 0x5c, - 0x2f, 0x08, 0xf9, 0x74, 0x2d, 0x2b, 0xb9, 0x5e, 0x10, 0x22, 0x4e, 0x2d, 0xbc, 0x06, 0xce, 0xb7, - 0x96, 0x71, 0x86, 0x17, 0x44, 0x4e, 0xdb, 0x67, 0x25, 0xc6, 0x43, 0x9c, 0xa3, 0xff, 0xdb, 0x25, - 0x73, 0x87, 0x7f, 0xbb, 0xfc, 0xc5, 0x00, 0x9d, 0xfe, 0x71, 0x06, 0x2f, 0x89, 0xf9, 0xa5, 0x36, - 0x14, 0x8c, 0x67, 0x97, 0xb0, 0x0e, 0x86, 0xa9, 0x08, 0x9a, 0x5c, 0xd4, 0xed, 0x33, 0x2e, 0x6a, - 0x7a, 0x09, 0x44, 0xe3, 0x14, 0x53, 0x63, 0x30, 0xb6, 0xae, 0x15, 0x5c, 0x8a, 0x5c, 0x4b, 0x8e, - 0xb4, 0xc7, 0xc4, 0xba, 0xae, 0xae, 0x08, 0x1a, 0x52, 0xdc, 0x52, 0xe5, 0xc3, 0x4f, 0xe6, 0xce, - 0x7d, 0xf4, 0xc9, 0xdc, 0xb9, 0x8f, 0x3f, 0x99, 0x3b, 0xf7, 0xad, 0xe6, 0x9c, 0xf1, 0x61, 0x73, - 0xce, 0xf8, 0xa8, 0x39, 0x67, 0x7c, 0xdc, 0x9c, 0x33, 0xfe, 0xd1, 0x9c, 0x33, 0x7e, 0xf2, 0xcf, - 0xb9, 0x73, 0x5f, 0x7b, 0xee, 0x4c, 0x7f, 0xf2, 0xfe, 0x6f, 0x00, 0x00, 0x00, 0xff, 0xff, 0xcb, - 0x02, 0xc9, 0x93, 0x3d, 0x2e, 0x00, 0x00, -} - -func (m *ConversionRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ConversionRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ConversionRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Objects) > 0 { - for iNdEx := len(m.Objects) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Objects[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - } - } - i -= len(m.DesiredAPIVersion) - copy(dAtA[i:], m.DesiredAPIVersion) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.DesiredAPIVersion))) - i-- - dAtA[i] = 0x12 - i -= len(m.UID) - copy(dAtA[i:], m.UID) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.UID))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *ConversionResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ConversionResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ConversionResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - { - size, err := m.Result.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - if len(m.ConvertedObjects) > 0 { - for iNdEx := len(m.ConvertedObjects) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.ConvertedObjects[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - } - i -= len(m.UID) - copy(dAtA[i:], m.UID) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.UID))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *ConversionReview) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ConversionReview) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ConversionReview) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.Response != nil { - { - size, err := m.Response.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - if m.Request != nil { - { - size, err := m.Request.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *CustomResourceColumnDefinition) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *CustomResourceColumnDefinition) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *CustomResourceColumnDefinition) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - i -= len(m.JSONPath) - copy(dAtA[i:], m.JSONPath) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.JSONPath))) - i-- - dAtA[i] = 0x32 - i = encodeVarintGenerated(dAtA, i, uint64(m.Priority)) - i-- - dAtA[i] = 0x28 - i -= len(m.Description) - copy(dAtA[i:], m.Description) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Description))) - i-- - dAtA[i] = 0x22 - i -= len(m.Format) - copy(dAtA[i:], m.Format) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Format))) - i-- - dAtA[i] = 0x1a - i -= len(m.Type) - copy(dAtA[i:], m.Type) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) - i-- - dAtA[i] = 0x12 - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *CustomResourceConversion) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *CustomResourceConversion) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *CustomResourceConversion) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.ConversionReviewVersions) > 0 { - for iNdEx := len(m.ConversionReviewVersions) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.ConversionReviewVersions[iNdEx]) - copy(dAtA[i:], m.ConversionReviewVersions[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.ConversionReviewVersions[iNdEx]))) - i-- - dAtA[i] = 0x1a - } - } - if m.WebhookClientConfig != nil { - { - size, err := m.WebhookClientConfig.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - i -= len(m.Strategy) - copy(dAtA[i:], m.Strategy) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Strategy))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *CustomResourceDefinition) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *CustomResourceDefinition) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *CustomResourceDefinition) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - { - size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - { - size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - { - size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *CustomResourceDefinitionCondition) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *CustomResourceDefinitionCondition) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *CustomResourceDefinitionCondition) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - i -= len(m.Message) - copy(dAtA[i:], m.Message) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) - i-- - dAtA[i] = 0x2a - i -= len(m.Reason) - copy(dAtA[i:], m.Reason) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) - i-- - dAtA[i] = 0x22 - { - size, err := m.LastTransitionTime.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - i -= len(m.Status) - copy(dAtA[i:], m.Status) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) - i-- - dAtA[i] = 0x12 - i -= len(m.Type) - copy(dAtA[i:], m.Type) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *CustomResourceDefinitionList) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *CustomResourceDefinitionList) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *CustomResourceDefinitionList) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Items) > 0 { - for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - } - { - size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *CustomResourceDefinitionNames) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *CustomResourceDefinitionNames) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *CustomResourceDefinitionNames) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Categories) > 0 { - for iNdEx := len(m.Categories) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Categories[iNdEx]) - copy(dAtA[i:], m.Categories[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Categories[iNdEx]))) - i-- - dAtA[i] = 0x32 - } - } - i -= len(m.ListKind) - copy(dAtA[i:], m.ListKind) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.ListKind))) - i-- - dAtA[i] = 0x2a - i -= len(m.Kind) - copy(dAtA[i:], m.Kind) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind))) - i-- - dAtA[i] = 0x22 - if len(m.ShortNames) > 0 { - for iNdEx := len(m.ShortNames) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.ShortNames[iNdEx]) - copy(dAtA[i:], m.ShortNames[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.ShortNames[iNdEx]))) - i-- - dAtA[i] = 0x1a - } - } - i -= len(m.Singular) - copy(dAtA[i:], m.Singular) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Singular))) - i-- - dAtA[i] = 0x12 - i -= len(m.Plural) - copy(dAtA[i:], m.Plural) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Plural))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *CustomResourceDefinitionSpec) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *CustomResourceDefinitionSpec) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *CustomResourceDefinitionSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.PreserveUnknownFields != nil { - i-- - if *m.PreserveUnknownFields { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x50 - } - if m.Conversion != nil { - { - size, err := m.Conversion.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x4a - } - if len(m.AdditionalPrinterColumns) > 0 { - for iNdEx := len(m.AdditionalPrinterColumns) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.AdditionalPrinterColumns[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x42 - } - } - if len(m.Versions) > 0 { - for iNdEx := len(m.Versions) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Versions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x3a - } - } - if m.Subresources != nil { - { - size, err := m.Subresources.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x32 - } - if m.Validation != nil { - { - size, err := m.Validation.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x2a - } - i -= len(m.Scope) - copy(dAtA[i:], m.Scope) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Scope))) - i-- - dAtA[i] = 0x22 - { - size, err := m.Names.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - i -= len(m.Version) - copy(dAtA[i:], m.Version) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Version))) - i-- - dAtA[i] = 0x12 - i -= len(m.Group) - copy(dAtA[i:], m.Group) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Group))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *CustomResourceDefinitionStatus) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *CustomResourceDefinitionStatus) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *CustomResourceDefinitionStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.StoredVersions) > 0 { - for iNdEx := len(m.StoredVersions) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.StoredVersions[iNdEx]) - copy(dAtA[i:], m.StoredVersions[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.StoredVersions[iNdEx]))) - i-- - dAtA[i] = 0x1a - } - } - { - size, err := m.AcceptedNames.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - if len(m.Conditions) > 0 { - for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func (m *CustomResourceDefinitionVersion) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *CustomResourceDefinitionVersion) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *CustomResourceDefinitionVersion) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.DeprecationWarning != nil { - i -= len(*m.DeprecationWarning) - copy(dAtA[i:], *m.DeprecationWarning) - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.DeprecationWarning))) - i-- - dAtA[i] = 0x42 - } - i-- - if m.Deprecated { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x38 - if len(m.AdditionalPrinterColumns) > 0 { - for iNdEx := len(m.AdditionalPrinterColumns) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.AdditionalPrinterColumns[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x32 - } - } - if m.Subresources != nil { - { - size, err := m.Subresources.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x2a - } - if m.Schema != nil { - { - size, err := m.Schema.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x22 - } - i-- - if m.Storage { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x18 - i-- - if m.Served { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x10 - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *CustomResourceSubresourceScale) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *CustomResourceSubresourceScale) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *CustomResourceSubresourceScale) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.LabelSelectorPath != nil { - i -= len(*m.LabelSelectorPath) - copy(dAtA[i:], *m.LabelSelectorPath) - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.LabelSelectorPath))) - i-- - dAtA[i] = 0x1a - } - i -= len(m.StatusReplicasPath) - copy(dAtA[i:], m.StatusReplicasPath) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.StatusReplicasPath))) - i-- - dAtA[i] = 0x12 - i -= len(m.SpecReplicasPath) - copy(dAtA[i:], m.SpecReplicasPath) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.SpecReplicasPath))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *CustomResourceSubresourceStatus) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *CustomResourceSubresourceStatus) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *CustomResourceSubresourceStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - return len(dAtA) - i, nil -} - -func (m *CustomResourceSubresources) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *CustomResourceSubresources) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *CustomResourceSubresources) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.Scale != nil { - { - size, err := m.Scale.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - if m.Status != nil { - { - size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *CustomResourceValidation) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *CustomResourceValidation) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *CustomResourceValidation) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.OpenAPIV3Schema != nil { - { - size, err := m.OpenAPIV3Schema.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *ExternalDocumentation) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ExternalDocumentation) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ExternalDocumentation) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - i -= len(m.URL) - copy(dAtA[i:], m.URL) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.URL))) - i-- - dAtA[i] = 0x12 - i -= len(m.Description) - copy(dAtA[i:], m.Description) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Description))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *JSON) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *JSON) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *JSON) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.Raw != nil { - i -= len(m.Raw) - copy(dAtA[i:], m.Raw) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Raw))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *JSONSchemaProps) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *JSONSchemaProps) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *JSONSchemaProps) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.XValidations) > 0 { - for iNdEx := len(m.XValidations) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.XValidations[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x2 - i-- - dAtA[i] = 0xe2 - } - } - if m.XMapType != nil { - i -= len(*m.XMapType) - copy(dAtA[i:], *m.XMapType) - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.XMapType))) - i-- - dAtA[i] = 0x2 - i-- - dAtA[i] = 0xda - } - if m.XListType != nil { - i -= len(*m.XListType) - copy(dAtA[i:], *m.XListType) - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.XListType))) - i-- - dAtA[i] = 0x2 - i-- - dAtA[i] = 0xd2 - } - if len(m.XListMapKeys) > 0 { - for iNdEx := len(m.XListMapKeys) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.XListMapKeys[iNdEx]) - copy(dAtA[i:], m.XListMapKeys[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.XListMapKeys[iNdEx]))) - i-- - dAtA[i] = 0x2 - i-- - dAtA[i] = 0xca - } - } - i-- - if m.XIntOrString { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x2 - i-- - dAtA[i] = 0xc0 - i-- - if m.XEmbeddedResource { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x2 - i-- - dAtA[i] = 0xb8 - if m.XPreserveUnknownFields != nil { - i-- - if *m.XPreserveUnknownFields { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x2 - i-- - dAtA[i] = 0xb0 - } - i-- - if m.Nullable { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x2 - i-- - dAtA[i] = 0xa8 - if m.Example != nil { - { - size, err := m.Example.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x2 - i-- - dAtA[i] = 0xa2 - } - if m.ExternalDocs != nil { - { - size, err := m.ExternalDocs.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x2 - i-- - dAtA[i] = 0x9a - } - if len(m.Definitions) > 0 { - keysForDefinitions := make([]string, 0, len(m.Definitions)) - for k := range m.Definitions { - keysForDefinitions = append(keysForDefinitions, string(k)) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForDefinitions) - for iNdEx := len(keysForDefinitions) - 1; iNdEx >= 0; iNdEx-- { - v := m.Definitions[string(keysForDefinitions[iNdEx])] - baseI := i - { - size, err := (&v).MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - i -= len(keysForDefinitions[iNdEx]) - copy(dAtA[i:], keysForDefinitions[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(keysForDefinitions[iNdEx]))) - i-- - dAtA[i] = 0xa - i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) - i-- - dAtA[i] = 0x2 - i-- - dAtA[i] = 0x92 - } - } - if m.AdditionalItems != nil { - { - size, err := m.AdditionalItems.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x2 - i-- - dAtA[i] = 0x8a - } - if len(m.Dependencies) > 0 { - keysForDependencies := make([]string, 0, len(m.Dependencies)) - for k := range m.Dependencies { - keysForDependencies = append(keysForDependencies, string(k)) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForDependencies) - for iNdEx := len(keysForDependencies) - 1; iNdEx >= 0; iNdEx-- { - v := m.Dependencies[string(keysForDependencies[iNdEx])] - baseI := i - { - size, err := (&v).MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - i -= len(keysForDependencies[iNdEx]) - copy(dAtA[i:], keysForDependencies[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(keysForDependencies[iNdEx]))) - i-- - dAtA[i] = 0xa - i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) - i-- - dAtA[i] = 0x2 - i-- - dAtA[i] = 0x82 - } - } - if len(m.PatternProperties) > 0 { - keysForPatternProperties := make([]string, 0, len(m.PatternProperties)) - for k := range m.PatternProperties { - keysForPatternProperties = append(keysForPatternProperties, string(k)) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForPatternProperties) - for iNdEx := len(keysForPatternProperties) - 1; iNdEx >= 0; iNdEx-- { - v := m.PatternProperties[string(keysForPatternProperties[iNdEx])] - baseI := i - { - size, err := (&v).MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - i -= len(keysForPatternProperties[iNdEx]) - copy(dAtA[i:], keysForPatternProperties[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(keysForPatternProperties[iNdEx]))) - i-- - dAtA[i] = 0xa - i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0xfa - } - } - if m.AdditionalProperties != nil { - { - size, err := m.AdditionalProperties.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0xf2 - } - if len(m.Properties) > 0 { - keysForProperties := make([]string, 0, len(m.Properties)) - for k := range m.Properties { - keysForProperties = append(keysForProperties, string(k)) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForProperties) - for iNdEx := len(keysForProperties) - 1; iNdEx >= 0; iNdEx-- { - v := m.Properties[string(keysForProperties[iNdEx])] - baseI := i - { - size, err := (&v).MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - i -= len(keysForProperties[iNdEx]) - copy(dAtA[i:], keysForProperties[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(keysForProperties[iNdEx]))) - i-- - dAtA[i] = 0xa - i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0xea - } - } - if m.Not != nil { - { - size, err := m.Not.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0xe2 - } - if len(m.AnyOf) > 0 { - for iNdEx := len(m.AnyOf) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.AnyOf[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0xda - } - } - if len(m.OneOf) > 0 { - for iNdEx := len(m.OneOf) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.OneOf[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0xd2 - } - } - if len(m.AllOf) > 0 { - for iNdEx := len(m.AllOf) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.AllOf[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0xca - } - } - if m.Items != nil { - { - size, err := m.Items.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0xc2 - } - if len(m.Required) > 0 { - for iNdEx := len(m.Required) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Required[iNdEx]) - copy(dAtA[i:], m.Required[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Required[iNdEx]))) - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0xba - } - } - if m.MinProperties != nil { - i = encodeVarintGenerated(dAtA, i, uint64(*m.MinProperties)) - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0xb0 - } - if m.MaxProperties != nil { - i = encodeVarintGenerated(dAtA, i, uint64(*m.MaxProperties)) - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0xa8 - } - if len(m.Enum) > 0 { - for iNdEx := len(m.Enum) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Enum[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0xa2 - } - } - if m.MultipleOf != nil { - i -= 8 - encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(*m.MultipleOf)))) - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0x99 - } - i-- - if m.UniqueItems { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0x90 - if m.MinItems != nil { - i = encodeVarintGenerated(dAtA, i, uint64(*m.MinItems)) - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0x88 - } - if m.MaxItems != nil { - i = encodeVarintGenerated(dAtA, i, uint64(*m.MaxItems)) - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0x80 - } - i -= len(m.Pattern) - copy(dAtA[i:], m.Pattern) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Pattern))) - i-- - dAtA[i] = 0x7a - if m.MinLength != nil { - i = encodeVarintGenerated(dAtA, i, uint64(*m.MinLength)) - i-- - dAtA[i] = 0x70 - } - if m.MaxLength != nil { - i = encodeVarintGenerated(dAtA, i, uint64(*m.MaxLength)) - i-- - dAtA[i] = 0x68 - } - i-- - if m.ExclusiveMinimum { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x60 - if m.Minimum != nil { - i -= 8 - encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(*m.Minimum)))) - i-- - dAtA[i] = 0x59 - } - i-- - if m.ExclusiveMaximum { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x50 - if m.Maximum != nil { - i -= 8 - encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(*m.Maximum)))) - i-- - dAtA[i] = 0x49 - } - if m.Default != nil { - { - size, err := m.Default.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x42 - } - i -= len(m.Title) - copy(dAtA[i:], m.Title) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Title))) - i-- - dAtA[i] = 0x3a - i -= len(m.Format) - copy(dAtA[i:], m.Format) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Format))) - i-- - dAtA[i] = 0x32 - i -= len(m.Type) - copy(dAtA[i:], m.Type) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) - i-- - dAtA[i] = 0x2a - i -= len(m.Description) - copy(dAtA[i:], m.Description) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Description))) - i-- - dAtA[i] = 0x22 - if m.Ref != nil { - i -= len(*m.Ref) - copy(dAtA[i:], *m.Ref) - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Ref))) - i-- - dAtA[i] = 0x1a - } - i -= len(m.Schema) - copy(dAtA[i:], m.Schema) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Schema))) - i-- - dAtA[i] = 0x12 - i -= len(m.ID) - copy(dAtA[i:], m.ID) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.ID))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *JSONSchemaPropsOrArray) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *JSONSchemaPropsOrArray) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *JSONSchemaPropsOrArray) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.JSONSchemas) > 0 { - for iNdEx := len(m.JSONSchemas) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.JSONSchemas[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - } - if m.Schema != nil { - { - size, err := m.Schema.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *JSONSchemaPropsOrBool) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *JSONSchemaPropsOrBool) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *JSONSchemaPropsOrBool) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.Schema != nil { - { - size, err := m.Schema.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - i-- - if m.Allows { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x8 - return len(dAtA) - i, nil -} - -func (m *JSONSchemaPropsOrStringArray) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *JSONSchemaPropsOrStringArray) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *JSONSchemaPropsOrStringArray) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Property) > 0 { - for iNdEx := len(m.Property) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Property[iNdEx]) - copy(dAtA[i:], m.Property[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Property[iNdEx]))) - i-- - dAtA[i] = 0x12 - } - } - if m.Schema != nil { - { - size, err := m.Schema.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *ServiceReference) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ServiceReference) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ServiceReference) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.Port != nil { - i = encodeVarintGenerated(dAtA, i, uint64(*m.Port)) - i-- - dAtA[i] = 0x20 - } - if m.Path != nil { - i -= len(*m.Path) - copy(dAtA[i:], *m.Path) - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Path))) - i-- - dAtA[i] = 0x1a - } - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i-- - dAtA[i] = 0x12 - i -= len(m.Namespace) - copy(dAtA[i:], m.Namespace) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *ValidationRule) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ValidationRule) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ValidationRule) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - i -= len(m.Message) - copy(dAtA[i:], m.Message) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) - i-- - dAtA[i] = 0x12 - i -= len(m.Rule) - copy(dAtA[i:], m.Rule) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Rule))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *WebhookClientConfig) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *WebhookClientConfig) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *WebhookClientConfig) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.URL != nil { - i -= len(*m.URL) - copy(dAtA[i:], *m.URL) - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.URL))) - i-- - dAtA[i] = 0x1a - } - if m.CABundle != nil { - i -= len(m.CABundle) - copy(dAtA[i:], m.CABundle) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.CABundle))) - i-- - dAtA[i] = 0x12 - } - if m.Service != nil { - { - size, err := m.Service.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { - offset -= sovGenerated(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *ConversionRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.UID) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.DesiredAPIVersion) - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Objects) > 0 { - for _, e := range m.Objects { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *ConversionResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.UID) - n += 1 + l + sovGenerated(uint64(l)) - if len(m.ConvertedObjects) > 0 { - for _, e := range m.ConvertedObjects { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - l = m.Result.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *ConversionReview) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Request != nil { - l = m.Request.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.Response != nil { - l = m.Response.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - return n -} - -func (m *CustomResourceColumnDefinition) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Name) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Type) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Format) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Description) - n += 1 + l + sovGenerated(uint64(l)) - n += 1 + sovGenerated(uint64(m.Priority)) - l = len(m.JSONPath) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *CustomResourceConversion) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Strategy) - n += 1 + l + sovGenerated(uint64(l)) - if m.WebhookClientConfig != nil { - l = m.WebhookClientConfig.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if len(m.ConversionReviewVersions) > 0 { - for _, s := range m.ConversionReviewVersions { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *CustomResourceDefinition) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Spec.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Status.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *CustomResourceDefinitionCondition) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Type) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Status) - n += 1 + l + sovGenerated(uint64(l)) - l = m.LastTransitionTime.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Reason) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Message) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *CustomResourceDefinitionList) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *CustomResourceDefinitionNames) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Plural) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Singular) - n += 1 + l + sovGenerated(uint64(l)) - if len(m.ShortNames) > 0 { - for _, s := range m.ShortNames { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } - } - l = len(m.Kind) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.ListKind) - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Categories) > 0 { - for _, s := range m.Categories { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *CustomResourceDefinitionSpec) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Group) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Version) - n += 1 + l + sovGenerated(uint64(l)) - l = m.Names.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Scope) - n += 1 + l + sovGenerated(uint64(l)) - if m.Validation != nil { - l = m.Validation.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.Subresources != nil { - l = m.Subresources.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if len(m.Versions) > 0 { - for _, e := range m.Versions { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - if len(m.AdditionalPrinterColumns) > 0 { - for _, e := range m.AdditionalPrinterColumns { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - if m.Conversion != nil { - l = m.Conversion.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.PreserveUnknownFields != nil { - n += 2 - } - return n -} - -func (m *CustomResourceDefinitionStatus) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Conditions) > 0 { - for _, e := range m.Conditions { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - l = m.AcceptedNames.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.StoredVersions) > 0 { - for _, s := range m.StoredVersions { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *CustomResourceDefinitionVersion) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Name) - n += 1 + l + sovGenerated(uint64(l)) - n += 2 - n += 2 - if m.Schema != nil { - l = m.Schema.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.Subresources != nil { - l = m.Subresources.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if len(m.AdditionalPrinterColumns) > 0 { - for _, e := range m.AdditionalPrinterColumns { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - n += 2 - if m.DeprecationWarning != nil { - l = len(*m.DeprecationWarning) - n += 1 + l + sovGenerated(uint64(l)) - } - return n -} - -func (m *CustomResourceSubresourceScale) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.SpecReplicasPath) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.StatusReplicasPath) - n += 1 + l + sovGenerated(uint64(l)) - if m.LabelSelectorPath != nil { - l = len(*m.LabelSelectorPath) - n += 1 + l + sovGenerated(uint64(l)) - } - return n -} - -func (m *CustomResourceSubresourceStatus) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - return n -} - -func (m *CustomResourceSubresources) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Status != nil { - l = m.Status.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.Scale != nil { - l = m.Scale.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - return n -} - -func (m *CustomResourceValidation) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.OpenAPIV3Schema != nil { - l = m.OpenAPIV3Schema.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - return n -} - -func (m *ExternalDocumentation) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Description) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.URL) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *JSON) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Raw != nil { - l = len(m.Raw) - n += 1 + l + sovGenerated(uint64(l)) - } - return n -} - -func (m *JSONSchemaProps) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.ID) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Schema) - n += 1 + l + sovGenerated(uint64(l)) - if m.Ref != nil { - l = len(*m.Ref) - n += 1 + l + sovGenerated(uint64(l)) - } - l = len(m.Description) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Type) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Format) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Title) - n += 1 + l + sovGenerated(uint64(l)) - if m.Default != nil { - l = m.Default.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.Maximum != nil { - n += 9 - } - n += 2 - if m.Minimum != nil { - n += 9 - } - n += 2 - if m.MaxLength != nil { - n += 1 + sovGenerated(uint64(*m.MaxLength)) - } - if m.MinLength != nil { - n += 1 + sovGenerated(uint64(*m.MinLength)) - } - l = len(m.Pattern) - n += 1 + l + sovGenerated(uint64(l)) - if m.MaxItems != nil { - n += 2 + sovGenerated(uint64(*m.MaxItems)) - } - if m.MinItems != nil { - n += 2 + sovGenerated(uint64(*m.MinItems)) - } - n += 3 - if m.MultipleOf != nil { - n += 10 - } - if len(m.Enum) > 0 { - for _, e := range m.Enum { - l = e.Size() - n += 2 + l + sovGenerated(uint64(l)) - } - } - if m.MaxProperties != nil { - n += 2 + sovGenerated(uint64(*m.MaxProperties)) - } - if m.MinProperties != nil { - n += 2 + sovGenerated(uint64(*m.MinProperties)) - } - if len(m.Required) > 0 { - for _, s := range m.Required { - l = len(s) - n += 2 + l + sovGenerated(uint64(l)) - } - } - if m.Items != nil { - l = m.Items.Size() - n += 2 + l + sovGenerated(uint64(l)) - } - if len(m.AllOf) > 0 { - for _, e := range m.AllOf { - l = e.Size() - n += 2 + l + sovGenerated(uint64(l)) - } - } - if len(m.OneOf) > 0 { - for _, e := range m.OneOf { - l = e.Size() - n += 2 + l + sovGenerated(uint64(l)) - } - } - if len(m.AnyOf) > 0 { - for _, e := range m.AnyOf { - l = e.Size() - n += 2 + l + sovGenerated(uint64(l)) - } - } - if m.Not != nil { - l = m.Not.Size() - n += 2 + l + sovGenerated(uint64(l)) - } - if len(m.Properties) > 0 { - for k, v := range m.Properties { - _ = k - _ = v - l = v.Size() - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) - n += mapEntrySize + 2 + sovGenerated(uint64(mapEntrySize)) - } - } - if m.AdditionalProperties != nil { - l = m.AdditionalProperties.Size() - n += 2 + l + sovGenerated(uint64(l)) - } - if len(m.PatternProperties) > 0 { - for k, v := range m.PatternProperties { - _ = k - _ = v - l = v.Size() - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) - n += mapEntrySize + 2 + sovGenerated(uint64(mapEntrySize)) - } - } - if len(m.Dependencies) > 0 { - for k, v := range m.Dependencies { - _ = k - _ = v - l = v.Size() - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) - n += mapEntrySize + 2 + sovGenerated(uint64(mapEntrySize)) - } - } - if m.AdditionalItems != nil { - l = m.AdditionalItems.Size() - n += 2 + l + sovGenerated(uint64(l)) - } - if len(m.Definitions) > 0 { - for k, v := range m.Definitions { - _ = k - _ = v - l = v.Size() - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) - n += mapEntrySize + 2 + sovGenerated(uint64(mapEntrySize)) - } - } - if m.ExternalDocs != nil { - l = m.ExternalDocs.Size() - n += 2 + l + sovGenerated(uint64(l)) - } - if m.Example != nil { - l = m.Example.Size() - n += 2 + l + sovGenerated(uint64(l)) - } - n += 3 - if m.XPreserveUnknownFields != nil { - n += 3 - } - n += 3 - n += 3 - if len(m.XListMapKeys) > 0 { - for _, s := range m.XListMapKeys { - l = len(s) - n += 2 + l + sovGenerated(uint64(l)) - } - } - if m.XListType != nil { - l = len(*m.XListType) - n += 2 + l + sovGenerated(uint64(l)) - } - if m.XMapType != nil { - l = len(*m.XMapType) - n += 2 + l + sovGenerated(uint64(l)) - } - if len(m.XValidations) > 0 { - for _, e := range m.XValidations { - l = e.Size() - n += 2 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *JSONSchemaPropsOrArray) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Schema != nil { - l = m.Schema.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if len(m.JSONSchemas) > 0 { - for _, e := range m.JSONSchemas { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *JSONSchemaPropsOrBool) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - n += 2 - if m.Schema != nil { - l = m.Schema.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - return n -} - -func (m *JSONSchemaPropsOrStringArray) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Schema != nil { - l = m.Schema.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if len(m.Property) > 0 { - for _, s := range m.Property { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *ServiceReference) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Namespace) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Name) - n += 1 + l + sovGenerated(uint64(l)) - if m.Path != nil { - l = len(*m.Path) - n += 1 + l + sovGenerated(uint64(l)) - } - if m.Port != nil { - n += 1 + sovGenerated(uint64(*m.Port)) - } - return n -} - -func (m *ValidationRule) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Rule) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Message) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *WebhookClientConfig) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Service != nil { - l = m.Service.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.CABundle != nil { - l = len(m.CABundle) - n += 1 + l + sovGenerated(uint64(l)) - } - if m.URL != nil { - l = len(*m.URL) - n += 1 + l + sovGenerated(uint64(l)) - } - return n -} - -func sovGenerated(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozGenerated(x uint64) (n int) { - return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (this *ConversionRequest) String() string { - if this == nil { - return "nil" - } - repeatedStringForObjects := "[]RawExtension{" - for _, f := range this.Objects { - repeatedStringForObjects += fmt.Sprintf("%v", f) + "," - } - repeatedStringForObjects += "}" - s := strings.Join([]string{`&ConversionRequest{`, - `UID:` + fmt.Sprintf("%v", this.UID) + `,`, - `DesiredAPIVersion:` + fmt.Sprintf("%v", this.DesiredAPIVersion) + `,`, - `Objects:` + repeatedStringForObjects + `,`, - `}`, - }, "") - return s -} -func (this *ConversionResponse) String() string { - if this == nil { - return "nil" - } - repeatedStringForConvertedObjects := "[]RawExtension{" - for _, f := range this.ConvertedObjects { - repeatedStringForConvertedObjects += fmt.Sprintf("%v", f) + "," - } - repeatedStringForConvertedObjects += "}" - s := strings.Join([]string{`&ConversionResponse{`, - `UID:` + fmt.Sprintf("%v", this.UID) + `,`, - `ConvertedObjects:` + repeatedStringForConvertedObjects + `,`, - `Result:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Result), "Status", "v1.Status", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *ConversionReview) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ConversionReview{`, - `Request:` + strings.Replace(this.Request.String(), "ConversionRequest", "ConversionRequest", 1) + `,`, - `Response:` + strings.Replace(this.Response.String(), "ConversionResponse", "ConversionResponse", 1) + `,`, - `}`, - }, "") - return s -} -func (this *CustomResourceColumnDefinition) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&CustomResourceColumnDefinition{`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `Type:` + fmt.Sprintf("%v", this.Type) + `,`, - `Format:` + fmt.Sprintf("%v", this.Format) + `,`, - `Description:` + fmt.Sprintf("%v", this.Description) + `,`, - `Priority:` + fmt.Sprintf("%v", this.Priority) + `,`, - `JSONPath:` + fmt.Sprintf("%v", this.JSONPath) + `,`, - `}`, - }, "") - return s -} -func (this *CustomResourceConversion) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&CustomResourceConversion{`, - `Strategy:` + fmt.Sprintf("%v", this.Strategy) + `,`, - `WebhookClientConfig:` + strings.Replace(this.WebhookClientConfig.String(), "WebhookClientConfig", "WebhookClientConfig", 1) + `,`, - `ConversionReviewVersions:` + fmt.Sprintf("%v", this.ConversionReviewVersions) + `,`, - `}`, - }, "") - return s -} -func (this *CustomResourceDefinition) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&CustomResourceDefinition{`, - `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "CustomResourceDefinitionSpec", "CustomResourceDefinitionSpec", 1), `&`, ``, 1) + `,`, - `Status:` + strings.Replace(strings.Replace(this.Status.String(), "CustomResourceDefinitionStatus", "CustomResourceDefinitionStatus", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *CustomResourceDefinitionCondition) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&CustomResourceDefinitionCondition{`, - `Type:` + fmt.Sprintf("%v", this.Type) + `,`, - `Status:` + fmt.Sprintf("%v", this.Status) + `,`, - `LastTransitionTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastTransitionTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, - `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, - `Message:` + fmt.Sprintf("%v", this.Message) + `,`, - `}`, - }, "") - return s -} -func (this *CustomResourceDefinitionList) String() string { - if this == nil { - return "nil" - } - repeatedStringForItems := "[]CustomResourceDefinition{" - for _, f := range this.Items { - repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "CustomResourceDefinition", "CustomResourceDefinition", 1), `&`, ``, 1) + "," - } - repeatedStringForItems += "}" - s := strings.Join([]string{`&CustomResourceDefinitionList{`, - `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + repeatedStringForItems + `,`, - `}`, - }, "") - return s -} -func (this *CustomResourceDefinitionNames) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&CustomResourceDefinitionNames{`, - `Plural:` + fmt.Sprintf("%v", this.Plural) + `,`, - `Singular:` + fmt.Sprintf("%v", this.Singular) + `,`, - `ShortNames:` + fmt.Sprintf("%v", this.ShortNames) + `,`, - `Kind:` + fmt.Sprintf("%v", this.Kind) + `,`, - `ListKind:` + fmt.Sprintf("%v", this.ListKind) + `,`, - `Categories:` + fmt.Sprintf("%v", this.Categories) + `,`, - `}`, - }, "") - return s -} -func (this *CustomResourceDefinitionSpec) String() string { - if this == nil { - return "nil" - } - repeatedStringForVersions := "[]CustomResourceDefinitionVersion{" - for _, f := range this.Versions { - repeatedStringForVersions += strings.Replace(strings.Replace(f.String(), "CustomResourceDefinitionVersion", "CustomResourceDefinitionVersion", 1), `&`, ``, 1) + "," - } - repeatedStringForVersions += "}" - repeatedStringForAdditionalPrinterColumns := "[]CustomResourceColumnDefinition{" - for _, f := range this.AdditionalPrinterColumns { - repeatedStringForAdditionalPrinterColumns += strings.Replace(strings.Replace(f.String(), "CustomResourceColumnDefinition", "CustomResourceColumnDefinition", 1), `&`, ``, 1) + "," - } - repeatedStringForAdditionalPrinterColumns += "}" - s := strings.Join([]string{`&CustomResourceDefinitionSpec{`, - `Group:` + fmt.Sprintf("%v", this.Group) + `,`, - `Version:` + fmt.Sprintf("%v", this.Version) + `,`, - `Names:` + strings.Replace(strings.Replace(this.Names.String(), "CustomResourceDefinitionNames", "CustomResourceDefinitionNames", 1), `&`, ``, 1) + `,`, - `Scope:` + fmt.Sprintf("%v", this.Scope) + `,`, - `Validation:` + strings.Replace(this.Validation.String(), "CustomResourceValidation", "CustomResourceValidation", 1) + `,`, - `Subresources:` + strings.Replace(this.Subresources.String(), "CustomResourceSubresources", "CustomResourceSubresources", 1) + `,`, - `Versions:` + repeatedStringForVersions + `,`, - `AdditionalPrinterColumns:` + repeatedStringForAdditionalPrinterColumns + `,`, - `Conversion:` + strings.Replace(this.Conversion.String(), "CustomResourceConversion", "CustomResourceConversion", 1) + `,`, - `PreserveUnknownFields:` + valueToStringGenerated(this.PreserveUnknownFields) + `,`, - `}`, - }, "") - return s -} -func (this *CustomResourceDefinitionStatus) String() string { - if this == nil { - return "nil" - } - repeatedStringForConditions := "[]CustomResourceDefinitionCondition{" - for _, f := range this.Conditions { - repeatedStringForConditions += strings.Replace(strings.Replace(f.String(), "CustomResourceDefinitionCondition", "CustomResourceDefinitionCondition", 1), `&`, ``, 1) + "," - } - repeatedStringForConditions += "}" - s := strings.Join([]string{`&CustomResourceDefinitionStatus{`, - `Conditions:` + repeatedStringForConditions + `,`, - `AcceptedNames:` + strings.Replace(strings.Replace(this.AcceptedNames.String(), "CustomResourceDefinitionNames", "CustomResourceDefinitionNames", 1), `&`, ``, 1) + `,`, - `StoredVersions:` + fmt.Sprintf("%v", this.StoredVersions) + `,`, - `}`, - }, "") - return s -} -func (this *CustomResourceDefinitionVersion) String() string { - if this == nil { - return "nil" - } - repeatedStringForAdditionalPrinterColumns := "[]CustomResourceColumnDefinition{" - for _, f := range this.AdditionalPrinterColumns { - repeatedStringForAdditionalPrinterColumns += strings.Replace(strings.Replace(f.String(), "CustomResourceColumnDefinition", "CustomResourceColumnDefinition", 1), `&`, ``, 1) + "," - } - repeatedStringForAdditionalPrinterColumns += "}" - s := strings.Join([]string{`&CustomResourceDefinitionVersion{`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `Served:` + fmt.Sprintf("%v", this.Served) + `,`, - `Storage:` + fmt.Sprintf("%v", this.Storage) + `,`, - `Schema:` + strings.Replace(this.Schema.String(), "CustomResourceValidation", "CustomResourceValidation", 1) + `,`, - `Subresources:` + strings.Replace(this.Subresources.String(), "CustomResourceSubresources", "CustomResourceSubresources", 1) + `,`, - `AdditionalPrinterColumns:` + repeatedStringForAdditionalPrinterColumns + `,`, - `Deprecated:` + fmt.Sprintf("%v", this.Deprecated) + `,`, - `DeprecationWarning:` + valueToStringGenerated(this.DeprecationWarning) + `,`, - `}`, - }, "") - return s -} -func (this *CustomResourceSubresourceScale) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&CustomResourceSubresourceScale{`, - `SpecReplicasPath:` + fmt.Sprintf("%v", this.SpecReplicasPath) + `,`, - `StatusReplicasPath:` + fmt.Sprintf("%v", this.StatusReplicasPath) + `,`, - `LabelSelectorPath:` + valueToStringGenerated(this.LabelSelectorPath) + `,`, - `}`, - }, "") - return s -} -func (this *CustomResourceSubresourceStatus) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&CustomResourceSubresourceStatus{`, - `}`, - }, "") - return s -} -func (this *CustomResourceSubresources) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&CustomResourceSubresources{`, - `Status:` + strings.Replace(this.Status.String(), "CustomResourceSubresourceStatus", "CustomResourceSubresourceStatus", 1) + `,`, - `Scale:` + strings.Replace(this.Scale.String(), "CustomResourceSubresourceScale", "CustomResourceSubresourceScale", 1) + `,`, - `}`, - }, "") - return s -} -func (this *CustomResourceValidation) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&CustomResourceValidation{`, - `OpenAPIV3Schema:` + strings.Replace(this.OpenAPIV3Schema.String(), "JSONSchemaProps", "JSONSchemaProps", 1) + `,`, - `}`, - }, "") - return s -} -func (this *ExternalDocumentation) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ExternalDocumentation{`, - `Description:` + fmt.Sprintf("%v", this.Description) + `,`, - `URL:` + fmt.Sprintf("%v", this.URL) + `,`, - `}`, - }, "") - return s -} -func (this *JSON) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&JSON{`, - `Raw:` + valueToStringGenerated(this.Raw) + `,`, - `}`, - }, "") - return s -} -func (this *JSONSchemaProps) String() string { - if this == nil { - return "nil" - } - repeatedStringForEnum := "[]JSON{" - for _, f := range this.Enum { - repeatedStringForEnum += strings.Replace(strings.Replace(f.String(), "JSON", "JSON", 1), `&`, ``, 1) + "," - } - repeatedStringForEnum += "}" - repeatedStringForAllOf := "[]JSONSchemaProps{" - for _, f := range this.AllOf { - repeatedStringForAllOf += strings.Replace(strings.Replace(f.String(), "JSONSchemaProps", "JSONSchemaProps", 1), `&`, ``, 1) + "," - } - repeatedStringForAllOf += "}" - repeatedStringForOneOf := "[]JSONSchemaProps{" - for _, f := range this.OneOf { - repeatedStringForOneOf += strings.Replace(strings.Replace(f.String(), "JSONSchemaProps", "JSONSchemaProps", 1), `&`, ``, 1) + "," - } - repeatedStringForOneOf += "}" - repeatedStringForAnyOf := "[]JSONSchemaProps{" - for _, f := range this.AnyOf { - repeatedStringForAnyOf += strings.Replace(strings.Replace(f.String(), "JSONSchemaProps", "JSONSchemaProps", 1), `&`, ``, 1) + "," - } - repeatedStringForAnyOf += "}" - repeatedStringForXValidations := "[]ValidationRule{" - for _, f := range this.XValidations { - repeatedStringForXValidations += strings.Replace(strings.Replace(f.String(), "ValidationRule", "ValidationRule", 1), `&`, ``, 1) + "," - } - repeatedStringForXValidations += "}" - keysForProperties := make([]string, 0, len(this.Properties)) - for k := range this.Properties { - keysForProperties = append(keysForProperties, k) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForProperties) - mapStringForProperties := "map[string]JSONSchemaProps{" - for _, k := range keysForProperties { - mapStringForProperties += fmt.Sprintf("%v: %v,", k, this.Properties[k]) - } - mapStringForProperties += "}" - keysForPatternProperties := make([]string, 0, len(this.PatternProperties)) - for k := range this.PatternProperties { - keysForPatternProperties = append(keysForPatternProperties, k) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForPatternProperties) - mapStringForPatternProperties := "map[string]JSONSchemaProps{" - for _, k := range keysForPatternProperties { - mapStringForPatternProperties += fmt.Sprintf("%v: %v,", k, this.PatternProperties[k]) - } - mapStringForPatternProperties += "}" - keysForDependencies := make([]string, 0, len(this.Dependencies)) - for k := range this.Dependencies { - keysForDependencies = append(keysForDependencies, k) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForDependencies) - mapStringForDependencies := "JSONSchemaDependencies{" - for _, k := range keysForDependencies { - mapStringForDependencies += fmt.Sprintf("%v: %v,", k, this.Dependencies[k]) - } - mapStringForDependencies += "}" - keysForDefinitions := make([]string, 0, len(this.Definitions)) - for k := range this.Definitions { - keysForDefinitions = append(keysForDefinitions, k) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForDefinitions) - mapStringForDefinitions := "JSONSchemaDefinitions{" - for _, k := range keysForDefinitions { - mapStringForDefinitions += fmt.Sprintf("%v: %v,", k, this.Definitions[k]) - } - mapStringForDefinitions += "}" - s := strings.Join([]string{`&JSONSchemaProps{`, - `ID:` + fmt.Sprintf("%v", this.ID) + `,`, - `Schema:` + fmt.Sprintf("%v", this.Schema) + `,`, - `Ref:` + valueToStringGenerated(this.Ref) + `,`, - `Description:` + fmt.Sprintf("%v", this.Description) + `,`, - `Type:` + fmt.Sprintf("%v", this.Type) + `,`, - `Format:` + fmt.Sprintf("%v", this.Format) + `,`, - `Title:` + fmt.Sprintf("%v", this.Title) + `,`, - `Default:` + strings.Replace(this.Default.String(), "JSON", "JSON", 1) + `,`, - `Maximum:` + valueToStringGenerated(this.Maximum) + `,`, - `ExclusiveMaximum:` + fmt.Sprintf("%v", this.ExclusiveMaximum) + `,`, - `Minimum:` + valueToStringGenerated(this.Minimum) + `,`, - `ExclusiveMinimum:` + fmt.Sprintf("%v", this.ExclusiveMinimum) + `,`, - `MaxLength:` + valueToStringGenerated(this.MaxLength) + `,`, - `MinLength:` + valueToStringGenerated(this.MinLength) + `,`, - `Pattern:` + fmt.Sprintf("%v", this.Pattern) + `,`, - `MaxItems:` + valueToStringGenerated(this.MaxItems) + `,`, - `MinItems:` + valueToStringGenerated(this.MinItems) + `,`, - `UniqueItems:` + fmt.Sprintf("%v", this.UniqueItems) + `,`, - `MultipleOf:` + valueToStringGenerated(this.MultipleOf) + `,`, - `Enum:` + repeatedStringForEnum + `,`, - `MaxProperties:` + valueToStringGenerated(this.MaxProperties) + `,`, - `MinProperties:` + valueToStringGenerated(this.MinProperties) + `,`, - `Required:` + fmt.Sprintf("%v", this.Required) + `,`, - `Items:` + strings.Replace(this.Items.String(), "JSONSchemaPropsOrArray", "JSONSchemaPropsOrArray", 1) + `,`, - `AllOf:` + repeatedStringForAllOf + `,`, - `OneOf:` + repeatedStringForOneOf + `,`, - `AnyOf:` + repeatedStringForAnyOf + `,`, - `Not:` + strings.Replace(this.Not.String(), "JSONSchemaProps", "JSONSchemaProps", 1) + `,`, - `Properties:` + mapStringForProperties + `,`, - `AdditionalProperties:` + strings.Replace(this.AdditionalProperties.String(), "JSONSchemaPropsOrBool", "JSONSchemaPropsOrBool", 1) + `,`, - `PatternProperties:` + mapStringForPatternProperties + `,`, - `Dependencies:` + mapStringForDependencies + `,`, - `AdditionalItems:` + strings.Replace(this.AdditionalItems.String(), "JSONSchemaPropsOrBool", "JSONSchemaPropsOrBool", 1) + `,`, - `Definitions:` + mapStringForDefinitions + `,`, - `ExternalDocs:` + strings.Replace(this.ExternalDocs.String(), "ExternalDocumentation", "ExternalDocumentation", 1) + `,`, - `Example:` + strings.Replace(this.Example.String(), "JSON", "JSON", 1) + `,`, - `Nullable:` + fmt.Sprintf("%v", this.Nullable) + `,`, - `XPreserveUnknownFields:` + valueToStringGenerated(this.XPreserveUnknownFields) + `,`, - `XEmbeddedResource:` + fmt.Sprintf("%v", this.XEmbeddedResource) + `,`, - `XIntOrString:` + fmt.Sprintf("%v", this.XIntOrString) + `,`, - `XListMapKeys:` + fmt.Sprintf("%v", this.XListMapKeys) + `,`, - `XListType:` + valueToStringGenerated(this.XListType) + `,`, - `XMapType:` + valueToStringGenerated(this.XMapType) + `,`, - `XValidations:` + repeatedStringForXValidations + `,`, - `}`, - }, "") - return s -} -func (this *JSONSchemaPropsOrArray) String() string { - if this == nil { - return "nil" - } - repeatedStringForJSONSchemas := "[]JSONSchemaProps{" - for _, f := range this.JSONSchemas { - repeatedStringForJSONSchemas += strings.Replace(strings.Replace(f.String(), "JSONSchemaProps", "JSONSchemaProps", 1), `&`, ``, 1) + "," - } - repeatedStringForJSONSchemas += "}" - s := strings.Join([]string{`&JSONSchemaPropsOrArray{`, - `Schema:` + strings.Replace(this.Schema.String(), "JSONSchemaProps", "JSONSchemaProps", 1) + `,`, - `JSONSchemas:` + repeatedStringForJSONSchemas + `,`, - `}`, - }, "") - return s -} -func (this *JSONSchemaPropsOrBool) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&JSONSchemaPropsOrBool{`, - `Allows:` + fmt.Sprintf("%v", this.Allows) + `,`, - `Schema:` + strings.Replace(this.Schema.String(), "JSONSchemaProps", "JSONSchemaProps", 1) + `,`, - `}`, - }, "") - return s -} -func (this *JSONSchemaPropsOrStringArray) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&JSONSchemaPropsOrStringArray{`, - `Schema:` + strings.Replace(this.Schema.String(), "JSONSchemaProps", "JSONSchemaProps", 1) + `,`, - `Property:` + fmt.Sprintf("%v", this.Property) + `,`, - `}`, - }, "") - return s -} -func (this *ServiceReference) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ServiceReference{`, - `Namespace:` + fmt.Sprintf("%v", this.Namespace) + `,`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `Path:` + valueToStringGenerated(this.Path) + `,`, - `Port:` + valueToStringGenerated(this.Port) + `,`, - `}`, - }, "") - return s -} -func (this *ValidationRule) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ValidationRule{`, - `Rule:` + fmt.Sprintf("%v", this.Rule) + `,`, - `Message:` + fmt.Sprintf("%v", this.Message) + `,`, - `}`, - }, "") - return s -} -func (this *WebhookClientConfig) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&WebhookClientConfig{`, - `Service:` + strings.Replace(this.Service.String(), "ServiceReference", "ServiceReference", 1) + `,`, - `CABundle:` + valueToStringGenerated(this.CABundle) + `,`, - `URL:` + valueToStringGenerated(this.URL) + `,`, - `}`, - }, "") - return s -} -func valueToStringGenerated(v interface{}) string { - rv := reflect.ValueOf(v) - if rv.IsNil() { - return "nil" - } - pv := reflect.Indirect(rv).Interface() - return fmt.Sprintf("*%v", pv) -} -func (m *ConversionRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ConversionRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ConversionRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field UID", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.UID = k8s_io_apimachinery_pkg_types.UID(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DesiredAPIVersion", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.DesiredAPIVersion = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Objects", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Objects = append(m.Objects, runtime.RawExtension{}) - if err := m.Objects[len(m.Objects)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ConversionResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ConversionResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ConversionResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field UID", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.UID = k8s_io_apimachinery_pkg_types.UID(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ConvertedObjects", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ConvertedObjects = append(m.ConvertedObjects, runtime.RawExtension{}) - if err := m.ConvertedObjects[len(m.ConvertedObjects)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Result", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Result.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ConversionReview) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ConversionReview: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ConversionReview: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Request", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Request == nil { - m.Request = &ConversionRequest{} - } - if err := m.Request.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Response", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Response == nil { - m.Response = &ConversionResponse{} - } - if err := m.Response.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *CustomResourceColumnDefinition) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: CustomResourceColumnDefinition: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: CustomResourceColumnDefinition: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Type = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Format", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Format = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Description", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Description = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 5: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Priority", wireType) - } - m.Priority = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Priority |= int32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field JSONPath", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.JSONPath = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *CustomResourceConversion) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: CustomResourceConversion: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: CustomResourceConversion: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Strategy", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Strategy = ConversionStrategyType(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field WebhookClientConfig", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.WebhookClientConfig == nil { - m.WebhookClientConfig = &WebhookClientConfig{} - } - if err := m.WebhookClientConfig.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ConversionReviewVersions", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ConversionReviewVersions = append(m.ConversionReviewVersions, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *CustomResourceDefinition) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: CustomResourceDefinition: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: CustomResourceDefinition: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *CustomResourceDefinitionCondition) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: CustomResourceDefinitionCondition: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: CustomResourceDefinitionCondition: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Type = CustomResourceDefinitionConditionType(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Status = ConditionStatus(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LastTransitionTime", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.LastTransitionTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Reason = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Message = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *CustomResourceDefinitionList) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: CustomResourceDefinitionList: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: CustomResourceDefinitionList: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Items = append(m.Items, CustomResourceDefinition{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *CustomResourceDefinitionNames) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: CustomResourceDefinitionNames: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: CustomResourceDefinitionNames: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Plural", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Plural = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Singular", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Singular = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ShortNames", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ShortNames = append(m.ShortNames, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Kind = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListKind", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ListKind = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Categories", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Categories = append(m.Categories, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *CustomResourceDefinitionSpec) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: CustomResourceDefinitionSpec: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: CustomResourceDefinitionSpec: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Group", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Group = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Version = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Names", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Names.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Scope", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Scope = ResourceScope(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Validation", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Validation == nil { - m.Validation = &CustomResourceValidation{} - } - if err := m.Validation.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Subresources", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Subresources == nil { - m.Subresources = &CustomResourceSubresources{} - } - if err := m.Subresources.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 7: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Versions", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Versions = append(m.Versions, CustomResourceDefinitionVersion{}) - if err := m.Versions[len(m.Versions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 8: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AdditionalPrinterColumns", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.AdditionalPrinterColumns = append(m.AdditionalPrinterColumns, CustomResourceColumnDefinition{}) - if err := m.AdditionalPrinterColumns[len(m.AdditionalPrinterColumns)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 9: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Conversion", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Conversion == nil { - m.Conversion = &CustomResourceConversion{} - } - if err := m.Conversion.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 10: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field PreserveUnknownFields", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - b := bool(v != 0) - m.PreserveUnknownFields = &b - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *CustomResourceDefinitionStatus) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: CustomResourceDefinitionStatus: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: CustomResourceDefinitionStatus: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Conditions = append(m.Conditions, CustomResourceDefinitionCondition{}) - if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AcceptedNames", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.AcceptedNames.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field StoredVersions", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.StoredVersions = append(m.StoredVersions, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *CustomResourceDefinitionVersion) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: CustomResourceDefinitionVersion: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: CustomResourceDefinitionVersion: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Served", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.Served = bool(v != 0) - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Storage", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.Storage = bool(v != 0) - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Schema", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Schema == nil { - m.Schema = &CustomResourceValidation{} - } - if err := m.Schema.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Subresources", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Subresources == nil { - m.Subresources = &CustomResourceSubresources{} - } - if err := m.Subresources.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AdditionalPrinterColumns", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.AdditionalPrinterColumns = append(m.AdditionalPrinterColumns, CustomResourceColumnDefinition{}) - if err := m.AdditionalPrinterColumns[len(m.AdditionalPrinterColumns)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 7: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Deprecated", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.Deprecated = bool(v != 0) - case 8: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DeprecationWarning", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - s := string(dAtA[iNdEx:postIndex]) - m.DeprecationWarning = &s - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *CustomResourceSubresourceScale) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: CustomResourceSubresourceScale: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: CustomResourceSubresourceScale: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SpecReplicasPath", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.SpecReplicasPath = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field StatusReplicasPath", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.StatusReplicasPath = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LabelSelectorPath", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - s := string(dAtA[iNdEx:postIndex]) - m.LabelSelectorPath = &s - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *CustomResourceSubresourceStatus) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: CustomResourceSubresourceStatus: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: CustomResourceSubresourceStatus: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *CustomResourceSubresources) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: CustomResourceSubresources: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: CustomResourceSubresources: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Status == nil { - m.Status = &CustomResourceSubresourceStatus{} - } - if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Scale", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Scale == nil { - m.Scale = &CustomResourceSubresourceScale{} - } - if err := m.Scale.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *CustomResourceValidation) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: CustomResourceValidation: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: CustomResourceValidation: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field OpenAPIV3Schema", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.OpenAPIV3Schema == nil { - m.OpenAPIV3Schema = &JSONSchemaProps{} - } - if err := m.OpenAPIV3Schema.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ExternalDocumentation) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ExternalDocumentation: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ExternalDocumentation: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Description", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Description = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field URL", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.URL = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *JSON) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: JSON: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: JSON: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Raw", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Raw = append(m.Raw[:0], dAtA[iNdEx:postIndex]...) - if m.Raw == nil { - m.Raw = []byte{} - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *JSONSchemaProps) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: JSONSchemaProps: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: JSONSchemaProps: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ID = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Schema", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Schema = JSONSchemaURL(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Ref", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - s := string(dAtA[iNdEx:postIndex]) - m.Ref = &s - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Description", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Description = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Type = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Format", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Format = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 7: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Title", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Title = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 8: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Default", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Default == nil { - m.Default = &JSON{} - } - if err := m.Default.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 9: - if wireType != 1 { - return fmt.Errorf("proto: wrong wireType = %d for field Maximum", wireType) - } - var v uint64 - if (iNdEx + 8) > l { - return io.ErrUnexpectedEOF - } - v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) - iNdEx += 8 - v2 := float64(math.Float64frombits(v)) - m.Maximum = &v2 - case 10: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ExclusiveMaximum", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.ExclusiveMaximum = bool(v != 0) - case 11: - if wireType != 1 { - return fmt.Errorf("proto: wrong wireType = %d for field Minimum", wireType) - } - var v uint64 - if (iNdEx + 8) > l { - return io.ErrUnexpectedEOF - } - v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) - iNdEx += 8 - v2 := float64(math.Float64frombits(v)) - m.Minimum = &v2 - case 12: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ExclusiveMinimum", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.ExclusiveMinimum = bool(v != 0) - case 13: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field MaxLength", wireType) - } - var v int64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.MaxLength = &v - case 14: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field MinLength", wireType) - } - var v int64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.MinLength = &v - case 15: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Pattern", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Pattern = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 16: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field MaxItems", wireType) - } - var v int64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.MaxItems = &v - case 17: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field MinItems", wireType) - } - var v int64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.MinItems = &v - case 18: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field UniqueItems", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.UniqueItems = bool(v != 0) - case 19: - if wireType != 1 { - return fmt.Errorf("proto: wrong wireType = %d for field MultipleOf", wireType) - } - var v uint64 - if (iNdEx + 8) > l { - return io.ErrUnexpectedEOF - } - v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) - iNdEx += 8 - v2 := float64(math.Float64frombits(v)) - m.MultipleOf = &v2 - case 20: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Enum", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Enum = append(m.Enum, JSON{}) - if err := m.Enum[len(m.Enum)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 21: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field MaxProperties", wireType) - } - var v int64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.MaxProperties = &v - case 22: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field MinProperties", wireType) - } - var v int64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.MinProperties = &v - case 23: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Required", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Required = append(m.Required, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - case 24: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Items == nil { - m.Items = &JSONSchemaPropsOrArray{} - } - if err := m.Items.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 25: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AllOf", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.AllOf = append(m.AllOf, JSONSchemaProps{}) - if err := m.AllOf[len(m.AllOf)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 26: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field OneOf", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.OneOf = append(m.OneOf, JSONSchemaProps{}) - if err := m.OneOf[len(m.OneOf)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 27: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AnyOf", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.AnyOf = append(m.AnyOf, JSONSchemaProps{}) - if err := m.AnyOf[len(m.AnyOf)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 28: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Not", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Not == nil { - m.Not = &JSONSchemaProps{} - } - if err := m.Not.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 29: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Properties", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Properties == nil { - m.Properties = make(map[string]JSONSchemaProps) - } - var mapkey string - mapvalue := &JSONSchemaProps{} - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey < 0 { - return ErrInvalidLengthGenerated - } - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var mapmsglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - mapmsglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - postmsgIndex := iNdEx + mapmsglen - if postmsgIndex < 0 { - return ErrInvalidLengthGenerated - } - if postmsgIndex > l { - return io.ErrUnexpectedEOF - } - mapvalue = &JSONSchemaProps{} - if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { - return err - } - iNdEx = postmsgIndex - } else { - iNdEx = entryPreIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - m.Properties[mapkey] = *mapvalue - iNdEx = postIndex - case 30: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AdditionalProperties", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.AdditionalProperties == nil { - m.AdditionalProperties = &JSONSchemaPropsOrBool{} - } - if err := m.AdditionalProperties.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 31: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PatternProperties", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.PatternProperties == nil { - m.PatternProperties = make(map[string]JSONSchemaProps) - } - var mapkey string - mapvalue := &JSONSchemaProps{} - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey < 0 { - return ErrInvalidLengthGenerated - } - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var mapmsglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - mapmsglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - postmsgIndex := iNdEx + mapmsglen - if postmsgIndex < 0 { - return ErrInvalidLengthGenerated - } - if postmsgIndex > l { - return io.ErrUnexpectedEOF - } - mapvalue = &JSONSchemaProps{} - if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { - return err - } - iNdEx = postmsgIndex - } else { - iNdEx = entryPreIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - m.PatternProperties[mapkey] = *mapvalue - iNdEx = postIndex - case 32: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Dependencies", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Dependencies == nil { - m.Dependencies = make(JSONSchemaDependencies) - } - var mapkey string - mapvalue := &JSONSchemaPropsOrStringArray{} - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey < 0 { - return ErrInvalidLengthGenerated - } - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var mapmsglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - mapmsglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - postmsgIndex := iNdEx + mapmsglen - if postmsgIndex < 0 { - return ErrInvalidLengthGenerated - } - if postmsgIndex > l { - return io.ErrUnexpectedEOF - } - mapvalue = &JSONSchemaPropsOrStringArray{} - if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { - return err - } - iNdEx = postmsgIndex - } else { - iNdEx = entryPreIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - m.Dependencies[mapkey] = *mapvalue - iNdEx = postIndex - case 33: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AdditionalItems", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.AdditionalItems == nil { - m.AdditionalItems = &JSONSchemaPropsOrBool{} - } - if err := m.AdditionalItems.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 34: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Definitions", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Definitions == nil { - m.Definitions = make(JSONSchemaDefinitions) - } - var mapkey string - mapvalue := &JSONSchemaProps{} - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey < 0 { - return ErrInvalidLengthGenerated - } - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var mapmsglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - mapmsglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - postmsgIndex := iNdEx + mapmsglen - if postmsgIndex < 0 { - return ErrInvalidLengthGenerated - } - if postmsgIndex > l { - return io.ErrUnexpectedEOF - } - mapvalue = &JSONSchemaProps{} - if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { - return err - } - iNdEx = postmsgIndex - } else { - iNdEx = entryPreIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - m.Definitions[mapkey] = *mapvalue - iNdEx = postIndex - case 35: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ExternalDocs", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.ExternalDocs == nil { - m.ExternalDocs = &ExternalDocumentation{} - } - if err := m.ExternalDocs.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 36: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Example", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Example == nil { - m.Example = &JSON{} - } - if err := m.Example.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 37: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Nullable", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.Nullable = bool(v != 0) - case 38: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field XPreserveUnknownFields", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - b := bool(v != 0) - m.XPreserveUnknownFields = &b - case 39: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field XEmbeddedResource", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.XEmbeddedResource = bool(v != 0) - case 40: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field XIntOrString", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.XIntOrString = bool(v != 0) - case 41: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field XListMapKeys", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.XListMapKeys = append(m.XListMapKeys, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - case 42: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field XListType", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - s := string(dAtA[iNdEx:postIndex]) - m.XListType = &s - iNdEx = postIndex - case 43: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field XMapType", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - s := string(dAtA[iNdEx:postIndex]) - m.XMapType = &s - iNdEx = postIndex - case 44: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field XValidations", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.XValidations = append(m.XValidations, ValidationRule{}) - if err := m.XValidations[len(m.XValidations)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *JSONSchemaPropsOrArray) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: JSONSchemaPropsOrArray: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: JSONSchemaPropsOrArray: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Schema", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Schema == nil { - m.Schema = &JSONSchemaProps{} - } - if err := m.Schema.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field JSONSchemas", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.JSONSchemas = append(m.JSONSchemas, JSONSchemaProps{}) - if err := m.JSONSchemas[len(m.JSONSchemas)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *JSONSchemaPropsOrBool) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: JSONSchemaPropsOrBool: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: JSONSchemaPropsOrBool: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Allows", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.Allows = bool(v != 0) - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Schema", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Schema == nil { - m.Schema = &JSONSchemaProps{} - } - if err := m.Schema.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *JSONSchemaPropsOrStringArray) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: JSONSchemaPropsOrStringArray: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: JSONSchemaPropsOrStringArray: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Schema", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Schema == nil { - m.Schema = &JSONSchemaProps{} - } - if err := m.Schema.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Property", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Property = append(m.Property, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ServiceReference) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ServiceReference: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ServiceReference: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Namespace = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - s := string(dAtA[iNdEx:postIndex]) - m.Path = &s - iNdEx = postIndex - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Port", wireType) - } - var v int32 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int32(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.Port = &v - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ValidationRule) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ValidationRule: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ValidationRule: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Rule", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Rule = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Message = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *WebhookClientConfig) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: WebhookClientConfig: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: WebhookClientConfig: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Service", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Service == nil { - m.Service = &ServiceReference{} - } - if err := m.Service.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field CABundle", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.CABundle = append(m.CABundle[:0], dAtA[iNdEx:postIndex]...) - if m.CABundle == nil { - m.CABundle = []byte{} - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field URL", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - s := string(dAtA[iNdEx:postIndex]) - m.URL = &s - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipGenerated(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthGenerated - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupGenerated - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthGenerated - } - if depth == 0 { - return iNdEx, nil - } - } - return 0, io.ErrUnexpectedEOF -} - -var ( - ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group") -) diff --git a/src/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/generated.proto b/src/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/generated.proto deleted file mode 100644 index 678e9b910..000000000 --- a/src/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/generated.proto +++ /dev/null @@ -1,766 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - - -// This file was autogenerated by go-to-protobuf. Do not edit it manually! - -syntax = "proto2"; - -package k8s.io.apiextensions_apiserver.pkg.apis.apiextensions.v1beta1; - -import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; -import "k8s.io/apimachinery/pkg/runtime/generated.proto"; -import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; - -// Package-wide variables from generator "generated". -option go_package = "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1"; - -// ConversionRequest describes the conversion request parameters. -message ConversionRequest { - // uid is an identifier for the individual request/response. It allows distinguishing instances of requests which are - // otherwise identical (parallel requests, etc). - // The UID is meant to track the round trip (request/response) between the Kubernetes API server and the webhook, not the user request. - // It is suitable for correlating log entries between the webhook and apiserver, for either auditing or debugging. - optional string uid = 1; - - // desiredAPIVersion is the version to convert given objects to. e.g. "myapi.example.com/v1" - optional string desiredAPIVersion = 2; - - // objects is the list of custom resource objects to be converted. - repeated k8s.io.apimachinery.pkg.runtime.RawExtension objects = 3; -} - -// ConversionResponse describes a conversion response. -message ConversionResponse { - // uid is an identifier for the individual request/response. - // This should be copied over from the corresponding `request.uid`. - optional string uid = 1; - - // convertedObjects is the list of converted version of `request.objects` if the `result` is successful, otherwise empty. - // The webhook is expected to set `apiVersion` of these objects to the `request.desiredAPIVersion`. The list - // must also have the same size as the input list with the same objects in the same order (equal kind, metadata.uid, metadata.name and metadata.namespace). - // The webhook is allowed to mutate labels and annotations. Any other change to the metadata is silently ignored. - repeated k8s.io.apimachinery.pkg.runtime.RawExtension convertedObjects = 2; - - // result contains the result of conversion with extra details if the conversion failed. `result.status` determines if - // the conversion failed or succeeded. The `result.status` field is required and represents the success or failure of the - // conversion. A successful conversion must set `result.status` to `Success`. A failed conversion must set - // `result.status` to `Failure` and provide more details in `result.message` and return http status 200. The `result.message` - // will be used to construct an error message for the end user. - optional k8s.io.apimachinery.pkg.apis.meta.v1.Status result = 3; -} - -// ConversionReview describes a conversion request/response. -message ConversionReview { - // request describes the attributes for the conversion request. - // +optional - optional ConversionRequest request = 1; - - // response describes the attributes for the conversion response. - // +optional - optional ConversionResponse response = 2; -} - -// CustomResourceColumnDefinition specifies a column for server side printing. -message CustomResourceColumnDefinition { - // name is a human readable name for the column. - optional string name = 1; - - // type is an OpenAPI type definition for this column. - // See https://github.com/OAI/OpenAPI-Specification/blob/master/versions/2.0.md#data-types for details. - optional string type = 2; - - // format is an optional OpenAPI type definition for this column. The 'name' format is applied - // to the primary identifier column to assist in clients identifying column is the resource name. - // See https://github.com/OAI/OpenAPI-Specification/blob/master/versions/2.0.md#data-types for details. - // +optional - optional string format = 3; - - // description is a human readable description of this column. - // +optional - optional string description = 4; - - // priority is an integer defining the relative importance of this column compared to others. Lower - // numbers are considered higher priority. Columns that may be omitted in limited space scenarios - // should be given a priority greater than 0. - // +optional - optional int32 priority = 5; - - // JSONPath is a simple JSON path (i.e. with array notation) which is evaluated against - // each custom resource to produce the value for this column. - optional string JSONPath = 6; -} - -// CustomResourceConversion describes how to convert different versions of a CR. -message CustomResourceConversion { - // strategy specifies how custom resources are converted between versions. Allowed values are: - // - `None`: The converter only change the apiVersion and would not touch any other field in the custom resource. - // - `Webhook`: API Server will call to an external webhook to do the conversion. Additional information - // is needed for this option. This requires spec.preserveUnknownFields to be false, and spec.conversion.webhookClientConfig to be set. - optional string strategy = 1; - - // webhookClientConfig is the instructions for how to call the webhook if strategy is `Webhook`. - // Required when `strategy` is set to `Webhook`. - // +optional - optional WebhookClientConfig webhookClientConfig = 2; - - // conversionReviewVersions is an ordered list of preferred `ConversionReview` - // versions the Webhook expects. The API server will use the first version in - // the list which it supports. If none of the versions specified in this list - // are supported by API server, conversion will fail for the custom resource. - // If a persisted Webhook configuration specifies allowed versions and does not - // include any versions known to the API Server, calls to the webhook will fail. - // Defaults to `["v1beta1"]`. - // +optional - repeated string conversionReviewVersions = 3; -} - -// CustomResourceDefinition represents a resource that should be exposed on the API server. Its name MUST be in the format -// <.spec.name>.<.spec.group>. -// Deprecated in v1.16, planned for removal in v1.22. Use apiextensions.k8s.io/v1 CustomResourceDefinition instead. -message CustomResourceDefinition { - // Standard object's metadata - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - - // spec describes how the user wants the resources to appear - optional CustomResourceDefinitionSpec spec = 2; - - // status indicates the actual state of the CustomResourceDefinition - // +optional - optional CustomResourceDefinitionStatus status = 3; -} - -// CustomResourceDefinitionCondition contains details for the current condition of this pod. -message CustomResourceDefinitionCondition { - // type is the type of the condition. Types include Established, NamesAccepted and Terminating. - optional string type = 1; - - // status is the status of the condition. - // Can be True, False, Unknown. - optional string status = 2; - - // lastTransitionTime last time the condition transitioned from one status to another. - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3; - - // reason is a unique, one-word, CamelCase reason for the condition's last transition. - // +optional - optional string reason = 4; - - // message is a human-readable message indicating details about last transition. - // +optional - optional string message = 5; -} - -// CustomResourceDefinitionList is a list of CustomResourceDefinition objects. -message CustomResourceDefinitionList { - // Standard object's metadata - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - - // items list individual CustomResourceDefinition objects - repeated CustomResourceDefinition items = 2; -} - -// CustomResourceDefinitionNames indicates the names to serve this CustomResourceDefinition -message CustomResourceDefinitionNames { - // plural is the plural name of the resource to serve. - // The custom resources are served under `/apis///.../`. - // Must match the name of the CustomResourceDefinition (in the form `.`). - // Must be all lowercase. - optional string plural = 1; - - // singular is the singular name of the resource. It must be all lowercase. Defaults to lowercased `kind`. - // +optional - optional string singular = 2; - - // shortNames are short names for the resource, exposed in API discovery documents, - // and used by clients to support invocations like `kubectl get `. - // It must be all lowercase. - // +optional - repeated string shortNames = 3; - - // kind is the serialized kind of the resource. It is normally CamelCase and singular. - // Custom resource instances will use this value as the `kind` attribute in API calls. - optional string kind = 4; - - // listKind is the serialized kind of the list for this resource. Defaults to "`kind`List". - // +optional - optional string listKind = 5; - - // categories is a list of grouped resources this custom resource belongs to (e.g. 'all'). - // This is published in API discovery documents, and used by clients to support invocations like - // `kubectl get all`. - // +optional - repeated string categories = 6; -} - -// CustomResourceDefinitionSpec describes how a user wants their resource to appear -message CustomResourceDefinitionSpec { - // group is the API group of the defined custom resource. - // The custom resources are served under `/apis//...`. - // Must match the name of the CustomResourceDefinition (in the form `.`). - optional string group = 1; - - // version is the API version of the defined custom resource. - // The custom resources are served under `/apis///...`. - // Must match the name of the first item in the `versions` list if `version` and `versions` are both specified. - // Optional if `versions` is specified. - // Deprecated: use `versions` instead. - // +optional - optional string version = 2; - - // names specify the resource and kind names for the custom resource. - optional CustomResourceDefinitionNames names = 3; - - // scope indicates whether the defined custom resource is cluster- or namespace-scoped. - // Allowed values are `Cluster` and `Namespaced`. Default is `Namespaced`. - optional string scope = 4; - - // validation describes the schema used for validation and pruning of the custom resource. - // If present, this validation schema is used to validate all versions. - // Top-level and per-version schemas are mutually exclusive. - // +optional - optional CustomResourceValidation validation = 5; - - // subresources specify what subresources the defined custom resource has. - // If present, this field configures subresources for all versions. - // Top-level and per-version subresources are mutually exclusive. - // +optional - optional CustomResourceSubresources subresources = 6; - - // versions is the list of all API versions of the defined custom resource. - // Optional if `version` is specified. - // The name of the first item in the `versions` list must match the `version` field if `version` and `versions` are both specified. - // Version names are used to compute the order in which served versions are listed in API discovery. - // If the version string is "kube-like", it will sort above non "kube-like" version strings, which are ordered - // lexicographically. "Kube-like" versions start with a "v", then are followed by a number (the major version), - // then optionally the string "alpha" or "beta" and another number (the minor version). These are sorted first - // by GA > beta > alpha (where GA is a version with no suffix such as beta or alpha), and then by comparing - // major version, then minor version. An example sorted list of versions: - // v10, v2, v1, v11beta2, v10beta3, v3beta1, v12alpha1, v11alpha2, foo1, foo10. - // +optional - repeated CustomResourceDefinitionVersion versions = 7; - - // additionalPrinterColumns specifies additional columns returned in Table output. - // See https://kubernetes.io/docs/reference/using-api/api-concepts/#receiving-resources-as-tables for details. - // If present, this field configures columns for all versions. - // Top-level and per-version columns are mutually exclusive. - // If no top-level or per-version columns are specified, a single column displaying the age of the custom resource is used. - // +optional - repeated CustomResourceColumnDefinition additionalPrinterColumns = 8; - - // conversion defines conversion settings for the CRD. - // +optional - optional CustomResourceConversion conversion = 9; - - // preserveUnknownFields indicates that object fields which are not specified - // in the OpenAPI schema should be preserved when persisting to storage. - // apiVersion, kind, metadata and known fields inside metadata are always preserved. - // If false, schemas must be defined for all versions. - // Defaults to true in v1beta for backwards compatibility. - // Deprecated: will be required to be false in v1. Preservation of unknown fields can be specified - // in the validation schema using the `x-kubernetes-preserve-unknown-fields: true` extension. - // See https://kubernetes.io/docs/tasks/access-kubernetes-api/custom-resources/custom-resource-definitions/#pruning-versus-preserving-unknown-fields for details. - // +optional - optional bool preserveUnknownFields = 10; -} - -// CustomResourceDefinitionStatus indicates the state of the CustomResourceDefinition -message CustomResourceDefinitionStatus { - // conditions indicate state for particular aspects of a CustomResourceDefinition - // +optional - // +listType=map - // +listMapKey=type - repeated CustomResourceDefinitionCondition conditions = 1; - - // acceptedNames are the names that are actually being used to serve discovery. - // They may be different than the names in spec. - // +optional - optional CustomResourceDefinitionNames acceptedNames = 2; - - // storedVersions lists all versions of CustomResources that were ever persisted. Tracking these - // versions allows a migration path for stored versions in etcd. The field is mutable - // so a migration controller can finish a migration to another version (ensuring - // no old objects are left in storage), and then remove the rest of the - // versions from this list. - // Versions may not be removed from `spec.versions` while they exist in this list. - // +optional - repeated string storedVersions = 3; -} - -// CustomResourceDefinitionVersion describes a version for CRD. -message CustomResourceDefinitionVersion { - // name is the version name, e.g. “v1â€, “v2beta1â€, etc. - // The custom resources are served under this version at `/apis///...` if `served` is true. - optional string name = 1; - - // served is a flag enabling/disabling this version from being served via REST APIs - optional bool served = 2; - - // storage indicates this version should be used when persisting custom resources to storage. - // There must be exactly one version with storage=true. - optional bool storage = 3; - - // deprecated indicates this version of the custom resource API is deprecated. - // When set to true, API requests to this version receive a warning header in the server response. - // Defaults to false. - // +optional - optional bool deprecated = 7; - - // deprecationWarning overrides the default warning returned to API clients. - // May only be set when `deprecated` is true. - // The default warning indicates this version is deprecated and recommends use - // of the newest served version of equal or greater stability, if one exists. - // +optional - optional string deprecationWarning = 8; - - // schema describes the schema used for validation and pruning of this version of the custom resource. - // Top-level and per-version schemas are mutually exclusive. - // Per-version schemas must not all be set to identical values (top-level validation schema should be used instead). - // +optional - optional CustomResourceValidation schema = 4; - - // subresources specify what subresources this version of the defined custom resource have. - // Top-level and per-version subresources are mutually exclusive. - // Per-version subresources must not all be set to identical values (top-level subresources should be used instead). - // +optional - optional CustomResourceSubresources subresources = 5; - - // additionalPrinterColumns specifies additional columns returned in Table output. - // See https://kubernetes.io/docs/reference/using-api/api-concepts/#receiving-resources-as-tables for details. - // Top-level and per-version columns are mutually exclusive. - // Per-version columns must not all be set to identical values (top-level columns should be used instead). - // If no top-level or per-version columns are specified, a single column displaying the age of the custom resource is used. - // +optional - repeated CustomResourceColumnDefinition additionalPrinterColumns = 6; -} - -// CustomResourceSubresourceScale defines how to serve the scale subresource for CustomResources. -message CustomResourceSubresourceScale { - // specReplicasPath defines the JSON path inside of a custom resource that corresponds to Scale `spec.replicas`. - // Only JSON paths without the array notation are allowed. - // Must be a JSON Path under `.spec`. - // If there is no value under the given path in the custom resource, the `/scale` subresource will return an error on GET. - optional string specReplicasPath = 1; - - // statusReplicasPath defines the JSON path inside of a custom resource that corresponds to Scale `status.replicas`. - // Only JSON paths without the array notation are allowed. - // Must be a JSON Path under `.status`. - // If there is no value under the given path in the custom resource, the `status.replicas` value in the `/scale` subresource - // will default to 0. - optional string statusReplicasPath = 2; - - // labelSelectorPath defines the JSON path inside of a custom resource that corresponds to Scale `status.selector`. - // Only JSON paths without the array notation are allowed. - // Must be a JSON Path under `.status` or `.spec`. - // Must be set to work with HorizontalPodAutoscaler. - // The field pointed by this JSON path must be a string field (not a complex selector struct) - // which contains a serialized label selector in string form. - // More info: https://kubernetes.io/docs/tasks/access-kubernetes-api/custom-resources/custom-resource-definitions#scale-subresource - // If there is no value under the given path in the custom resource, the `status.selector` value in the `/scale` - // subresource will default to the empty string. - // +optional - optional string labelSelectorPath = 3; -} - -// CustomResourceSubresourceStatus defines how to serve the status subresource for CustomResources. -// Status is represented by the `.status` JSON path inside of a CustomResource. When set, -// * exposes a /status subresource for the custom resource -// * PUT requests to the /status subresource take a custom resource object, and ignore changes to anything except the status stanza -// * PUT/POST/PATCH requests to the custom resource ignore changes to the status stanza -message CustomResourceSubresourceStatus { -} - -// CustomResourceSubresources defines the status and scale subresources for CustomResources. -message CustomResourceSubresources { - // status indicates the custom resource should serve a `/status` subresource. - // When enabled: - // 1. requests to the custom resource primary endpoint ignore changes to the `status` stanza of the object. - // 2. requests to the custom resource `/status` subresource ignore changes to anything other than the `status` stanza of the object. - // +optional - optional CustomResourceSubresourceStatus status = 1; - - // scale indicates the custom resource should serve a `/scale` subresource that returns an `autoscaling/v1` Scale object. - // +optional - optional CustomResourceSubresourceScale scale = 2; -} - -// CustomResourceValidation is a list of validation methods for CustomResources. -message CustomResourceValidation { - // openAPIV3Schema is the OpenAPI v3 schema to use for validation and pruning. - // +optional - optional JSONSchemaProps openAPIV3Schema = 1; -} - -// ExternalDocumentation allows referencing an external resource for extended documentation. -message ExternalDocumentation { - optional string description = 1; - - optional string url = 2; -} - -// JSON represents any valid JSON value. -// These types are supported: bool, int64, float64, string, []interface{}, map[string]interface{} and nil. -message JSON { - optional bytes raw = 1; -} - -// JSONSchemaProps is a JSON-Schema following Specification Draft 4 (http://json-schema.org/). -message JSONSchemaProps { - optional string id = 1; - - optional string schema = 2; - - optional string ref = 3; - - optional string description = 4; - - optional string type = 5; - - // format is an OpenAPI v3 format string. Unknown formats are ignored. The following formats are validated: - // - // - bsonobjectid: a bson object ID, i.e. a 24 characters hex string - // - uri: an URI as parsed by Golang net/url.ParseRequestURI - // - email: an email address as parsed by Golang net/mail.ParseAddress - // - hostname: a valid representation for an Internet host name, as defined by RFC 1034, section 3.1 [RFC1034]. - // - ipv4: an IPv4 IP as parsed by Golang net.ParseIP - // - ipv6: an IPv6 IP as parsed by Golang net.ParseIP - // - cidr: a CIDR as parsed by Golang net.ParseCIDR - // - mac: a MAC address as parsed by Golang net.ParseMAC - // - uuid: an UUID that allows uppercase defined by the regex (?i)^[0-9a-f]{8}-?[0-9a-f]{4}-?[0-9a-f]{4}-?[0-9a-f]{4}-?[0-9a-f]{12}$ - // - uuid3: an UUID3 that allows uppercase defined by the regex (?i)^[0-9a-f]{8}-?[0-9a-f]{4}-?3[0-9a-f]{3}-?[0-9a-f]{4}-?[0-9a-f]{12}$ - // - uuid4: an UUID4 that allows uppercase defined by the regex (?i)^[0-9a-f]{8}-?[0-9a-f]{4}-?4[0-9a-f]{3}-?[89ab][0-9a-f]{3}-?[0-9a-f]{12}$ - // - uuid5: an UUID5 that allows uppercase defined by the regex (?i)^[0-9a-f]{8}-?[0-9a-f]{4}-?5[0-9a-f]{3}-?[89ab][0-9a-f]{3}-?[0-9a-f]{12}$ - // - isbn: an ISBN10 or ISBN13 number string like "0321751043" or "978-0321751041" - // - isbn10: an ISBN10 number string like "0321751043" - // - isbn13: an ISBN13 number string like "978-0321751041" - // - creditcard: a credit card number defined by the regex ^(?:4[0-9]{12}(?:[0-9]{3})?|5[1-5][0-9]{14}|6(?:011|5[0-9][0-9])[0-9]{12}|3[47][0-9]{13}|3(?:0[0-5]|[68][0-9])[0-9]{11}|(?:2131|1800|35\\d{3})\\d{11})$ with any non digit characters mixed in - // - ssn: a U.S. social security number following the regex ^\\d{3}[- ]?\\d{2}[- ]?\\d{4}$ - // - hexcolor: an hexadecimal color code like "#FFFFFF: following the regex ^#?([0-9a-fA-F]{3}|[0-9a-fA-F]{6})$ - // - rgbcolor: an RGB color code like rgb like "rgb(255,255,2559" - // - byte: base64 encoded binary data - // - password: any kind of string - // - date: a date string like "2006-01-02" as defined by full-date in RFC3339 - // - duration: a duration string like "22 ns" as parsed by Golang time.ParseDuration or compatible with Scala duration format - // - datetime: a date time string like "2014-12-15T19:30:20.000Z" as defined by date-time in RFC3339. - optional string format = 6; - - optional string title = 7; - - // default is a default value for undefined object fields. - // Defaulting is a beta feature under the CustomResourceDefaulting feature gate. - // CustomResourceDefinitions with defaults must be created using the v1 (or newer) CustomResourceDefinition API. - optional JSON default = 8; - - optional double maximum = 9; - - optional bool exclusiveMaximum = 10; - - optional double minimum = 11; - - optional bool exclusiveMinimum = 12; - - optional int64 maxLength = 13; - - optional int64 minLength = 14; - - optional string pattern = 15; - - optional int64 maxItems = 16; - - optional int64 minItems = 17; - - optional bool uniqueItems = 18; - - optional double multipleOf = 19; - - repeated JSON enum = 20; - - optional int64 maxProperties = 21; - - optional int64 minProperties = 22; - - repeated string required = 23; - - optional JSONSchemaPropsOrArray items = 24; - - repeated JSONSchemaProps allOf = 25; - - repeated JSONSchemaProps oneOf = 26; - - repeated JSONSchemaProps anyOf = 27; - - optional JSONSchemaProps not = 28; - - map properties = 29; - - optional JSONSchemaPropsOrBool additionalProperties = 30; - - map patternProperties = 31; - - map dependencies = 32; - - optional JSONSchemaPropsOrBool additionalItems = 33; - - map definitions = 34; - - optional ExternalDocumentation externalDocs = 35; - - optional JSON example = 36; - - optional bool nullable = 37; - - // x-kubernetes-preserve-unknown-fields stops the API server - // decoding step from pruning fields which are not specified - // in the validation schema. This affects fields recursively, - // but switches back to normal pruning behaviour if nested - // properties or additionalProperties are specified in the schema. - // This can either be true or undefined. False is forbidden. - optional bool xKubernetesPreserveUnknownFields = 38; - - // x-kubernetes-embedded-resource defines that the value is an - // embedded Kubernetes runtime.Object, with TypeMeta and - // ObjectMeta. The type must be object. It is allowed to further - // restrict the embedded object. kind, apiVersion and metadata - // are validated automatically. x-kubernetes-preserve-unknown-fields - // is allowed to be true, but does not have to be if the object - // is fully specified (up to kind, apiVersion, metadata). - optional bool xKubernetesEmbeddedResource = 39; - - // x-kubernetes-int-or-string specifies that this value is - // either an integer or a string. If this is true, an empty - // type is allowed and type as child of anyOf is permitted - // if following one of the following patterns: - // - // 1) anyOf: - // - type: integer - // - type: string - // 2) allOf: - // - anyOf: - // - type: integer - // - type: string - // - ... zero or more - optional bool xKubernetesIntOrString = 40; - - // x-kubernetes-list-map-keys annotates an array with the x-kubernetes-list-type `map` by specifying the keys used - // as the index of the map. - // - // This tag MUST only be used on lists that have the "x-kubernetes-list-type" - // extension set to "map". Also, the values specified for this attribute must - // be a scalar typed field of the child structure (no nesting is supported). - // - // The properties specified must either be required or have a default value, - // to ensure those properties are present for all list items. - // - // +optional - repeated string xKubernetesListMapKeys = 41; - - // x-kubernetes-list-type annotates an array to further describe its topology. - // This extension must only be used on lists and may have 3 possible values: - // - // 1) `atomic`: the list is treated as a single entity, like a scalar. - // Atomic lists will be entirely replaced when updated. This extension - // may be used on any type of list (struct, scalar, ...). - // 2) `set`: - // Sets are lists that must not have multiple items with the same value. Each - // value must be a scalar, an object with x-kubernetes-map-type `atomic` or an - // array with x-kubernetes-list-type `atomic`. - // 3) `map`: - // These lists are like maps in that their elements have a non-index key - // used to identify them. Order is preserved upon merge. The map tag - // must only be used on a list with elements of type object. - // Defaults to atomic for arrays. - // +optional - optional string xKubernetesListType = 42; - - // x-kubernetes-map-type annotates an object to further describe its topology. - // This extension must only be used when type is object and may have 2 possible values: - // - // 1) `granular`: - // These maps are actual maps (key-value pairs) and each fields are independent - // from each other (they can each be manipulated by separate actors). This is - // the default behaviour for all maps. - // 2) `atomic`: the list is treated as a single entity, like a scalar. - // Atomic maps will be entirely replaced when updated. - // +optional - optional string xKubernetesMapType = 43; - - // x-kubernetes-validations describes a list of validation rules written in the CEL expression language. - // This field is an alpha-level. Using this field requires the feature gate `CustomResourceValidationExpressions` to be enabled. - // +patchMergeKey=rule - // +patchStrategy=merge - // +listType=map - // +listMapKey=rule - repeated ValidationRule xKubernetesValidations = 44; -} - -// JSONSchemaPropsOrArray represents a value that can either be a JSONSchemaProps -// or an array of JSONSchemaProps. Mainly here for serialization purposes. -message JSONSchemaPropsOrArray { - optional JSONSchemaProps schema = 1; - - repeated JSONSchemaProps jSONSchemas = 2; -} - -// JSONSchemaPropsOrBool represents JSONSchemaProps or a boolean value. -// Defaults to true for the boolean property. -message JSONSchemaPropsOrBool { - optional bool allows = 1; - - optional JSONSchemaProps schema = 2; -} - -// JSONSchemaPropsOrStringArray represents a JSONSchemaProps or a string array. -message JSONSchemaPropsOrStringArray { - optional JSONSchemaProps schema = 1; - - repeated string property = 2; -} - -// ServiceReference holds a reference to Service.legacy.k8s.io -message ServiceReference { - // namespace is the namespace of the service. - // Required - optional string namespace = 1; - - // name is the name of the service. - // Required - optional string name = 2; - - // path is an optional URL path at which the webhook will be contacted. - // +optional - optional string path = 3; - - // port is an optional service port at which the webhook will be contacted. - // `port` should be a valid port number (1-65535, inclusive). - // Defaults to 443 for backward compatibility. - // +optional - optional int32 port = 4; -} - -// ValidationRule describes a validation rule written in the CEL expression language. -message ValidationRule { - // Rule represents the expression which will be evaluated by CEL. - // ref: https://github.com/google/cel-spec - // The Rule is scoped to the location of the x-kubernetes-validations extension in the schema. - // The `self` variable in the CEL expression is bound to the scoped value. - // Example: - // - Rule scoped to the root of a resource with a status subresource: {"rule": "self.status.actual <= self.spec.maxDesired"} - // - // If the Rule is scoped to an object with properties, the accessible properties of the object are field selectable - // via `self.field` and field presence can be checked via `has(self.field)`. Null valued fields are treated as - // absent fields in CEL expressions. - // If the Rule is scoped to an object with additionalProperties (i.e. a map) the value of the map - // are accessible via `self[mapKey]`, map containment can be checked via `mapKey in self` and all entries of the map - // are accessible via CEL macros and functions such as `self.all(...)`. - // If the Rule is scoped to an array, the elements of the array are accessible via `self[i]` and also by macros and - // functions. - // If the Rule is scoped to a scalar, `self` is bound to the scalar value. - // Examples: - // - Rule scoped to a map of objects: {"rule": "self.components['Widget'].priority < 10"} - // - Rule scoped to a list of integers: {"rule": "self.values.all(value, value >= 0 && value < 100)"} - // - Rule scoped to a string value: {"rule": "self.startsWith('kube')"} - // - // The `apiVersion`, `kind`, `metadata.name` and `metadata.generateName` are always accessible from the root of the - // object and from any x-kubernetes-embedded-resource annotated objects. No other metadata properties are accessible. - // - // Unknown data preserved in custom resources via x-kubernetes-preserve-unknown-fields is not accessible in CEL - // expressions. This includes: - // - Unknown field values that are preserved by object schemas with x-kubernetes-preserve-unknown-fields. - // - Object properties where the property schema is of an "unknown type". An "unknown type" is recursively defined as: - // - A schema with no type and x-kubernetes-preserve-unknown-fields set to true - // - An array where the items schema is of an "unknown type" - // - An object where the additionalProperties schema is of an "unknown type" - // - // Only property names of the form `[a-zA-Z_.-/][a-zA-Z0-9_.-/]*` are accessible. - // Accessible property names are escaped according to the following rules when accessed in the expression: - // - '__' escapes to '__underscores__' - // - '.' escapes to '__dot__' - // - '-' escapes to '__dash__' - // - '/' escapes to '__slash__' - // - Property names that exactly match a CEL RESERVED keyword escape to '__{keyword}__'. The keywords are: - // "true", "false", "null", "in", "as", "break", "const", "continue", "else", "for", "function", "if", - // "import", "let", "loop", "package", "namespace", "return". - // Examples: - // - Rule accessing a property named "namespace": {"rule": "self.__namespace__ > 0"} - // - Rule accessing a property named "x-prop": {"rule": "self.x__dash__prop > 0"} - // - Rule accessing a property named "redact__d": {"rule": "self.redact__underscores__d > 0"} - // - // Equality on arrays with x-kubernetes-list-type of 'set' or 'map' ignores element order, i.e. [1, 2] == [2, 1]. - // Concatenation on arrays with x-kubernetes-list-type use the semantics of the list type: - // - 'set': `X + Y` performs a union where the array positions of all elements in `X` are preserved and - // non-intersecting elements in `Y` are appended, retaining their partial order. - // - 'map': `X + Y` performs a merge where the array positions of all keys in `X` are preserved but the values - // are overwritten by values in `Y` when the key sets of `X` and `Y` intersect. Elements in `Y` with - // non-intersecting keys are appended, retaining their partial order. - optional string rule = 1; - - // Message represents the message displayed when validation fails. The message is required if the Rule contains - // line breaks. The message must not contain line breaks. - // If unset, the message is "failed rule: {Rule}". - // e.g. "must be a URL with the host matching spec.host" - optional string message = 2; -} - -// WebhookClientConfig contains the information to make a TLS connection with the webhook. -message WebhookClientConfig { - // url gives the location of the webhook, in standard URL form - // (`scheme://host:port/path`). Exactly one of `url` or `service` - // must be specified. - // - // The `host` should not refer to a service running in the cluster; use - // the `service` field instead. The host might be resolved via external - // DNS in some apiservers (e.g., `kube-apiserver` cannot resolve - // in-cluster DNS as that would be a layering violation). `host` may - // also be an IP address. - // - // Please note that using `localhost` or `127.0.0.1` as a `host` is - // risky unless you take great care to run this webhook on all hosts - // which run an apiserver which might need to make calls to this - // webhook. Such installs are likely to be non-portable, i.e., not easy - // to turn up in a new cluster. - // - // The scheme must be "https"; the URL must begin with "https://". - // - // A path is optional, and if present may be any string permissible in - // a URL. You may use the path to pass an arbitrary string to the - // webhook, for example, a cluster identifier. - // - // Attempting to use a user or basic auth e.g. "user:password@" is not - // allowed. Fragments ("#...") and query parameters ("?...") are not - // allowed, either. - // - // +optional - optional string url = 3; - - // service is a reference to the service for this webhook. Either - // service or url must be specified. - // - // If the webhook is running within the cluster, then you should use `service`. - // - // +optional - optional ServiceReference service = 1; - - // caBundle is a PEM encoded CA bundle which will be used to validate the webhook's server certificate. - // If unspecified, system trust roots on the apiserver are used. - // +optional - optional bytes caBundle = 2; -} - diff --git a/src/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/marshal.go b/src/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/marshal.go deleted file mode 100644 index 44941d82e..000000000 --- a/src/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/marshal.go +++ /dev/null @@ -1,136 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1beta1 - -import ( - "bytes" - "errors" - - "k8s.io/apimachinery/pkg/util/json" -) - -var jsTrue = []byte("true") -var jsFalse = []byte("false") - -func (s JSONSchemaPropsOrBool) MarshalJSON() ([]byte, error) { - if s.Schema != nil { - return json.Marshal(s.Schema) - } - - if s.Schema == nil && !s.Allows { - return jsFalse, nil - } - return jsTrue, nil -} - -func (s *JSONSchemaPropsOrBool) UnmarshalJSON(data []byte) error { - var nw JSONSchemaPropsOrBool - switch { - case len(data) == 0: - case data[0] == '{': - var sch JSONSchemaProps - if err := json.Unmarshal(data, &sch); err != nil { - return err - } - nw.Allows = true - nw.Schema = &sch - case len(data) == 4 && string(data) == "true": - nw.Allows = true - case len(data) == 5 && string(data) == "false": - nw.Allows = false - default: - return errors.New("boolean or JSON schema expected") - } - *s = nw - return nil -} - -func (s JSONSchemaPropsOrStringArray) MarshalJSON() ([]byte, error) { - if len(s.Property) > 0 { - return json.Marshal(s.Property) - } - if s.Schema != nil { - return json.Marshal(s.Schema) - } - return []byte("null"), nil -} - -func (s *JSONSchemaPropsOrStringArray) UnmarshalJSON(data []byte) error { - var first byte - if len(data) > 1 { - first = data[0] - } - var nw JSONSchemaPropsOrStringArray - if first == '{' { - var sch JSONSchemaProps - if err := json.Unmarshal(data, &sch); err != nil { - return err - } - nw.Schema = &sch - } - if first == '[' { - if err := json.Unmarshal(data, &nw.Property); err != nil { - return err - } - } - *s = nw - return nil -} - -func (s JSONSchemaPropsOrArray) MarshalJSON() ([]byte, error) { - if len(s.JSONSchemas) > 0 { - return json.Marshal(s.JSONSchemas) - } - return json.Marshal(s.Schema) -} - -func (s *JSONSchemaPropsOrArray) UnmarshalJSON(data []byte) error { - var nw JSONSchemaPropsOrArray - var first byte - if len(data) > 1 { - first = data[0] - } - if first == '{' { - var sch JSONSchemaProps - if err := json.Unmarshal(data, &sch); err != nil { - return err - } - nw.Schema = &sch - } - if first == '[' { - if err := json.Unmarshal(data, &nw.JSONSchemas); err != nil { - return err - } - } - *s = nw - return nil -} - -func (s JSON) MarshalJSON() ([]byte, error) { - if len(s.Raw) > 0 { - return s.Raw, nil - } - return []byte("null"), nil - -} - -func (s *JSON) UnmarshalJSON(data []byte) error { - if len(data) > 0 && !bytes.Equal(data, nullLiteral) { - s.Raw = data - } - return nil -} diff --git a/src/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/register.go b/src/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/register.go deleted file mode 100644 index ac807211b..000000000 --- a/src/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/register.go +++ /dev/null @@ -1,62 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1beta1 - -import ( - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" -) - -const GroupName = "apiextensions.k8s.io" - -// SchemeGroupVersion is group version used to register these objects -var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1beta1"} - -// Kind takes an unqualified kind and returns back a Group qualified GroupKind -func Kind(kind string) schema.GroupKind { - return SchemeGroupVersion.WithKind(kind).GroupKind() -} - -// Resource takes an unqualified resource and returns back a Group qualified GroupResource -func Resource(resource string) schema.GroupResource { - return SchemeGroupVersion.WithResource(resource).GroupResource() -} - -var ( - SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes, addDefaultingFuncs) - localSchemeBuilder = &SchemeBuilder - AddToScheme = localSchemeBuilder.AddToScheme -) - -// Adds the list of known types to the given scheme. -func addKnownTypes(scheme *runtime.Scheme) error { - scheme.AddKnownTypes(SchemeGroupVersion, - &CustomResourceDefinition{}, - &CustomResourceDefinitionList{}, - &ConversionReview{}, - ) - metav1.AddToGroupVersion(scheme, SchemeGroupVersion) - return nil -} - -func init() { - // We only register manually written functions here. The registration of the - // generated functions takes place in the generated files. The separation - // makes the code compile even when the generated files are missing. - localSchemeBuilder.Register(addDefaultingFuncs) -} diff --git a/src/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/types.go b/src/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/types.go deleted file mode 100644 index 61ebdb63f..000000000 --- a/src/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/types.go +++ /dev/null @@ -1,531 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1beta1 - -import ( - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/types" -) - -// ConversionStrategyType describes different conversion types. -type ConversionStrategyType string - -const ( - // KubeAPIApprovedAnnotation is an annotation that must be set to create a CRD for the k8s.io, *.k8s.io, kubernetes.io, or *.kubernetes.io namespaces. - // The value should be a link to a URL where the current spec was approved, so updates to the spec should also update the URL. - // If the API is unapproved, you may set the annotation to a string starting with `"unapproved"`. For instance, `"unapproved, temporarily squatting"` or `"unapproved, experimental-only"`. This is discouraged. - KubeAPIApprovedAnnotation = "api-approved.kubernetes.io" - - // NoneConverter is a converter that only sets apiversion of the CR and leave everything else unchanged. - NoneConverter ConversionStrategyType = "None" - // WebhookConverter is a converter that calls to an external webhook to convert the CR. - WebhookConverter ConversionStrategyType = "Webhook" -) - -// CustomResourceDefinitionSpec describes how a user wants their resource to appear -type CustomResourceDefinitionSpec struct { - // group is the API group of the defined custom resource. - // The custom resources are served under `/apis//...`. - // Must match the name of the CustomResourceDefinition (in the form `.`). - Group string `json:"group" protobuf:"bytes,1,opt,name=group"` - // version is the API version of the defined custom resource. - // The custom resources are served under `/apis///...`. - // Must match the name of the first item in the `versions` list if `version` and `versions` are both specified. - // Optional if `versions` is specified. - // Deprecated: use `versions` instead. - // +optional - Version string `json:"version,omitempty" protobuf:"bytes,2,opt,name=version"` - // names specify the resource and kind names for the custom resource. - Names CustomResourceDefinitionNames `json:"names" protobuf:"bytes,3,opt,name=names"` - // scope indicates whether the defined custom resource is cluster- or namespace-scoped. - // Allowed values are `Cluster` and `Namespaced`. Default is `Namespaced`. - Scope ResourceScope `json:"scope" protobuf:"bytes,4,opt,name=scope,casttype=ResourceScope"` - // validation describes the schema used for validation and pruning of the custom resource. - // If present, this validation schema is used to validate all versions. - // Top-level and per-version schemas are mutually exclusive. - // +optional - Validation *CustomResourceValidation `json:"validation,omitempty" protobuf:"bytes,5,opt,name=validation"` - // subresources specify what subresources the defined custom resource has. - // If present, this field configures subresources for all versions. - // Top-level and per-version subresources are mutually exclusive. - // +optional - Subresources *CustomResourceSubresources `json:"subresources,omitempty" protobuf:"bytes,6,opt,name=subresources"` - // versions is the list of all API versions of the defined custom resource. - // Optional if `version` is specified. - // The name of the first item in the `versions` list must match the `version` field if `version` and `versions` are both specified. - // Version names are used to compute the order in which served versions are listed in API discovery. - // If the version string is "kube-like", it will sort above non "kube-like" version strings, which are ordered - // lexicographically. "Kube-like" versions start with a "v", then are followed by a number (the major version), - // then optionally the string "alpha" or "beta" and another number (the minor version). These are sorted first - // by GA > beta > alpha (where GA is a version with no suffix such as beta or alpha), and then by comparing - // major version, then minor version. An example sorted list of versions: - // v10, v2, v1, v11beta2, v10beta3, v3beta1, v12alpha1, v11alpha2, foo1, foo10. - // +optional - Versions []CustomResourceDefinitionVersion `json:"versions,omitempty" protobuf:"bytes,7,rep,name=versions"` - // additionalPrinterColumns specifies additional columns returned in Table output. - // See https://kubernetes.io/docs/reference/using-api/api-concepts/#receiving-resources-as-tables for details. - // If present, this field configures columns for all versions. - // Top-level and per-version columns are mutually exclusive. - // If no top-level or per-version columns are specified, a single column displaying the age of the custom resource is used. - // +optional - AdditionalPrinterColumns []CustomResourceColumnDefinition `json:"additionalPrinterColumns,omitempty" protobuf:"bytes,8,rep,name=additionalPrinterColumns"` - - // conversion defines conversion settings for the CRD. - // +optional - Conversion *CustomResourceConversion `json:"conversion,omitempty" protobuf:"bytes,9,opt,name=conversion"` - - // preserveUnknownFields indicates that object fields which are not specified - // in the OpenAPI schema should be preserved when persisting to storage. - // apiVersion, kind, metadata and known fields inside metadata are always preserved. - // If false, schemas must be defined for all versions. - // Defaults to true in v1beta for backwards compatibility. - // Deprecated: will be required to be false in v1. Preservation of unknown fields can be specified - // in the validation schema using the `x-kubernetes-preserve-unknown-fields: true` extension. - // See https://kubernetes.io/docs/tasks/access-kubernetes-api/custom-resources/custom-resource-definitions/#pruning-versus-preserving-unknown-fields for details. - // +optional - PreserveUnknownFields *bool `json:"preserveUnknownFields,omitempty" protobuf:"varint,10,opt,name=preserveUnknownFields"` -} - -// CustomResourceConversion describes how to convert different versions of a CR. -type CustomResourceConversion struct { - // strategy specifies how custom resources are converted between versions. Allowed values are: - // - `None`: The converter only change the apiVersion and would not touch any other field in the custom resource. - // - `Webhook`: API Server will call to an external webhook to do the conversion. Additional information - // is needed for this option. This requires spec.preserveUnknownFields to be false, and spec.conversion.webhookClientConfig to be set. - Strategy ConversionStrategyType `json:"strategy" protobuf:"bytes,1,name=strategy"` - - // webhookClientConfig is the instructions for how to call the webhook if strategy is `Webhook`. - // Required when `strategy` is set to `Webhook`. - // +optional - WebhookClientConfig *WebhookClientConfig `json:"webhookClientConfig,omitempty" protobuf:"bytes,2,name=webhookClientConfig"` - - // conversionReviewVersions is an ordered list of preferred `ConversionReview` - // versions the Webhook expects. The API server will use the first version in - // the list which it supports. If none of the versions specified in this list - // are supported by API server, conversion will fail for the custom resource. - // If a persisted Webhook configuration specifies allowed versions and does not - // include any versions known to the API Server, calls to the webhook will fail. - // Defaults to `["v1beta1"]`. - // +optional - ConversionReviewVersions []string `json:"conversionReviewVersions,omitempty" protobuf:"bytes,3,rep,name=conversionReviewVersions"` -} - -// WebhookClientConfig contains the information to make a TLS connection with the webhook. -type WebhookClientConfig struct { - // url gives the location of the webhook, in standard URL form - // (`scheme://host:port/path`). Exactly one of `url` or `service` - // must be specified. - // - // The `host` should not refer to a service running in the cluster; use - // the `service` field instead. The host might be resolved via external - // DNS in some apiservers (e.g., `kube-apiserver` cannot resolve - // in-cluster DNS as that would be a layering violation). `host` may - // also be an IP address. - // - // Please note that using `localhost` or `127.0.0.1` as a `host` is - // risky unless you take great care to run this webhook on all hosts - // which run an apiserver which might need to make calls to this - // webhook. Such installs are likely to be non-portable, i.e., not easy - // to turn up in a new cluster. - // - // The scheme must be "https"; the URL must begin with "https://". - // - // A path is optional, and if present may be any string permissible in - // a URL. You may use the path to pass an arbitrary string to the - // webhook, for example, a cluster identifier. - // - // Attempting to use a user or basic auth e.g. "user:password@" is not - // allowed. Fragments ("#...") and query parameters ("?...") are not - // allowed, either. - // - // +optional - URL *string `json:"url,omitempty" protobuf:"bytes,3,opt,name=url"` - - // service is a reference to the service for this webhook. Either - // service or url must be specified. - // - // If the webhook is running within the cluster, then you should use `service`. - // - // +optional - Service *ServiceReference `json:"service,omitempty" protobuf:"bytes,1,opt,name=service"` - - // caBundle is a PEM encoded CA bundle which will be used to validate the webhook's server certificate. - // If unspecified, system trust roots on the apiserver are used. - // +optional - CABundle []byte `json:"caBundle,omitempty" protobuf:"bytes,2,opt,name=caBundle"` -} - -// ServiceReference holds a reference to Service.legacy.k8s.io -type ServiceReference struct { - // namespace is the namespace of the service. - // Required - Namespace string `json:"namespace" protobuf:"bytes,1,opt,name=namespace"` - // name is the name of the service. - // Required - Name string `json:"name" protobuf:"bytes,2,opt,name=name"` - - // path is an optional URL path at which the webhook will be contacted. - // +optional - Path *string `json:"path,omitempty" protobuf:"bytes,3,opt,name=path"` - - // port is an optional service port at which the webhook will be contacted. - // `port` should be a valid port number (1-65535, inclusive). - // Defaults to 443 for backward compatibility. - // +optional - Port *int32 `json:"port,omitempty" protobuf:"varint,4,opt,name=port"` -} - -// CustomResourceDefinitionVersion describes a version for CRD. -type CustomResourceDefinitionVersion struct { - // name is the version name, e.g. “v1â€, “v2beta1â€, etc. - // The custom resources are served under this version at `/apis///...` if `served` is true. - Name string `json:"name" protobuf:"bytes,1,opt,name=name"` - // served is a flag enabling/disabling this version from being served via REST APIs - Served bool `json:"served" protobuf:"varint,2,opt,name=served"` - // storage indicates this version should be used when persisting custom resources to storage. - // There must be exactly one version with storage=true. - Storage bool `json:"storage" protobuf:"varint,3,opt,name=storage"` - // deprecated indicates this version of the custom resource API is deprecated. - // When set to true, API requests to this version receive a warning header in the server response. - // Defaults to false. - // +optional - Deprecated bool `json:"deprecated,omitempty" protobuf:"varint,7,opt,name=deprecated"` - // deprecationWarning overrides the default warning returned to API clients. - // May only be set when `deprecated` is true. - // The default warning indicates this version is deprecated and recommends use - // of the newest served version of equal or greater stability, if one exists. - // +optional - DeprecationWarning *string `json:"deprecationWarning,omitempty" protobuf:"bytes,8,opt,name=deprecationWarning"` - // schema describes the schema used for validation and pruning of this version of the custom resource. - // Top-level and per-version schemas are mutually exclusive. - // Per-version schemas must not all be set to identical values (top-level validation schema should be used instead). - // +optional - Schema *CustomResourceValidation `json:"schema,omitempty" protobuf:"bytes,4,opt,name=schema"` - // subresources specify what subresources this version of the defined custom resource have. - // Top-level and per-version subresources are mutually exclusive. - // Per-version subresources must not all be set to identical values (top-level subresources should be used instead). - // +optional - Subresources *CustomResourceSubresources `json:"subresources,omitempty" protobuf:"bytes,5,opt,name=subresources"` - // additionalPrinterColumns specifies additional columns returned in Table output. - // See https://kubernetes.io/docs/reference/using-api/api-concepts/#receiving-resources-as-tables for details. - // Top-level and per-version columns are mutually exclusive. - // Per-version columns must not all be set to identical values (top-level columns should be used instead). - // If no top-level or per-version columns are specified, a single column displaying the age of the custom resource is used. - // +optional - AdditionalPrinterColumns []CustomResourceColumnDefinition `json:"additionalPrinterColumns,omitempty" protobuf:"bytes,6,rep,name=additionalPrinterColumns"` -} - -// CustomResourceColumnDefinition specifies a column for server side printing. -type CustomResourceColumnDefinition struct { - // name is a human readable name for the column. - Name string `json:"name" protobuf:"bytes,1,opt,name=name"` - // type is an OpenAPI type definition for this column. - // See https://github.com/OAI/OpenAPI-Specification/blob/master/versions/2.0.md#data-types for details. - Type string `json:"type" protobuf:"bytes,2,opt,name=type"` - // format is an optional OpenAPI type definition for this column. The 'name' format is applied - // to the primary identifier column to assist in clients identifying column is the resource name. - // See https://github.com/OAI/OpenAPI-Specification/blob/master/versions/2.0.md#data-types for details. - // +optional - Format string `json:"format,omitempty" protobuf:"bytes,3,opt,name=format"` - // description is a human readable description of this column. - // +optional - Description string `json:"description,omitempty" protobuf:"bytes,4,opt,name=description"` - // priority is an integer defining the relative importance of this column compared to others. Lower - // numbers are considered higher priority. Columns that may be omitted in limited space scenarios - // should be given a priority greater than 0. - // +optional - Priority int32 `json:"priority,omitempty" protobuf:"bytes,5,opt,name=priority"` - // JSONPath is a simple JSON path (i.e. with array notation) which is evaluated against - // each custom resource to produce the value for this column. - JSONPath string `json:"JSONPath" protobuf:"bytes,6,opt,name=JSONPath"` -} - -// CustomResourceDefinitionNames indicates the names to serve this CustomResourceDefinition -type CustomResourceDefinitionNames struct { - // plural is the plural name of the resource to serve. - // The custom resources are served under `/apis///.../`. - // Must match the name of the CustomResourceDefinition (in the form `.`). - // Must be all lowercase. - Plural string `json:"plural" protobuf:"bytes,1,opt,name=plural"` - // singular is the singular name of the resource. It must be all lowercase. Defaults to lowercased `kind`. - // +optional - Singular string `json:"singular,omitempty" protobuf:"bytes,2,opt,name=singular"` - // shortNames are short names for the resource, exposed in API discovery documents, - // and used by clients to support invocations like `kubectl get `. - // It must be all lowercase. - // +optional - ShortNames []string `json:"shortNames,omitempty" protobuf:"bytes,3,opt,name=shortNames"` - // kind is the serialized kind of the resource. It is normally CamelCase and singular. - // Custom resource instances will use this value as the `kind` attribute in API calls. - Kind string `json:"kind" protobuf:"bytes,4,opt,name=kind"` - // listKind is the serialized kind of the list for this resource. Defaults to "`kind`List". - // +optional - ListKind string `json:"listKind,omitempty" protobuf:"bytes,5,opt,name=listKind"` - // categories is a list of grouped resources this custom resource belongs to (e.g. 'all'). - // This is published in API discovery documents, and used by clients to support invocations like - // `kubectl get all`. - // +optional - Categories []string `json:"categories,omitempty" protobuf:"bytes,6,rep,name=categories"` -} - -// ResourceScope is an enum defining the different scopes available to a custom resource -type ResourceScope string - -const ( - ClusterScoped ResourceScope = "Cluster" - NamespaceScoped ResourceScope = "Namespaced" -) - -type ConditionStatus string - -// These are valid condition statuses. "ConditionTrue" means a resource is in the condition. -// "ConditionFalse" means a resource is not in the condition. "ConditionUnknown" means kubernetes -// can't decide if a resource is in the condition or not. In the future, we could add other -// intermediate conditions, e.g. ConditionDegraded. -const ( - ConditionTrue ConditionStatus = "True" - ConditionFalse ConditionStatus = "False" - ConditionUnknown ConditionStatus = "Unknown" -) - -// CustomResourceDefinitionConditionType is a valid value for CustomResourceDefinitionCondition.Type -type CustomResourceDefinitionConditionType string - -const ( - // Established means that the resource has become active. A resource is established when all names are - // accepted without a conflict for the first time. A resource stays established until deleted, even during - // a later NamesAccepted due to changed names. Note that not all names can be changed. - Established CustomResourceDefinitionConditionType = "Established" - // NamesAccepted means the names chosen for this CustomResourceDefinition do not conflict with others in - // the group and are therefore accepted. - NamesAccepted CustomResourceDefinitionConditionType = "NamesAccepted" - // NonStructuralSchema means that one or more OpenAPI schema is not structural. - // - // A schema is structural if it specifies types for all values, with the only exceptions of those with - // - x-kubernetes-int-or-string: true — for fields which can be integer or string - // - x-kubernetes-preserve-unknown-fields: true — for raw, unspecified JSON values - // and there is no type, additionalProperties, default, nullable or x-kubernetes-* vendor extenions - // specified under allOf, anyOf, oneOf or not. - // - // Non-structural schemas will not be allowed anymore in v1 API groups. Moreover, new features will not be - // available for non-structural CRDs: - // - pruning - // - defaulting - // - read-only - // - OpenAPI publishing - // - webhook conversion - NonStructuralSchema CustomResourceDefinitionConditionType = "NonStructuralSchema" - // Terminating means that the CustomResourceDefinition has been deleted and is cleaning up. - Terminating CustomResourceDefinitionConditionType = "Terminating" - // KubernetesAPIApprovalPolicyConformant indicates that an API in *.k8s.io or *.kubernetes.io is or is not approved. For CRDs - // outside those groups, this condition will not be set. For CRDs inside those groups, the condition will - // be true if .metadata.annotations["api-approved.kubernetes.io"] is set to a URL, otherwise it will be false. - // See https://github.com/kubernetes/enhancements/pull/1111 for more details. - KubernetesAPIApprovalPolicyConformant CustomResourceDefinitionConditionType = "KubernetesAPIApprovalPolicyConformant" -) - -// CustomResourceDefinitionCondition contains details for the current condition of this pod. -type CustomResourceDefinitionCondition struct { - // type is the type of the condition. Types include Established, NamesAccepted and Terminating. - Type CustomResourceDefinitionConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=CustomResourceDefinitionConditionType"` - // status is the status of the condition. - // Can be True, False, Unknown. - Status ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status,casttype=ConditionStatus"` - // lastTransitionTime last time the condition transitioned from one status to another. - // +optional - LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,3,opt,name=lastTransitionTime"` - // reason is a unique, one-word, CamelCase reason for the condition's last transition. - // +optional - Reason string `json:"reason,omitempty" protobuf:"bytes,4,opt,name=reason"` - // message is a human-readable message indicating details about last transition. - // +optional - Message string `json:"message,omitempty" protobuf:"bytes,5,opt,name=message"` -} - -// CustomResourceDefinitionStatus indicates the state of the CustomResourceDefinition -type CustomResourceDefinitionStatus struct { - // conditions indicate state for particular aspects of a CustomResourceDefinition - // +optional - // +listType=map - // +listMapKey=type - Conditions []CustomResourceDefinitionCondition `json:"conditions" protobuf:"bytes,1,opt,name=conditions"` - - // acceptedNames are the names that are actually being used to serve discovery. - // They may be different than the names in spec. - // +optional - AcceptedNames CustomResourceDefinitionNames `json:"acceptedNames" protobuf:"bytes,2,opt,name=acceptedNames"` - - // storedVersions lists all versions of CustomResources that were ever persisted. Tracking these - // versions allows a migration path for stored versions in etcd. The field is mutable - // so a migration controller can finish a migration to another version (ensuring - // no old objects are left in storage), and then remove the rest of the - // versions from this list. - // Versions may not be removed from `spec.versions` while they exist in this list. - // +optional - StoredVersions []string `json:"storedVersions" protobuf:"bytes,3,rep,name=storedVersions"` -} - -// CustomResourceCleanupFinalizer is the name of the finalizer which will delete instances of -// a CustomResourceDefinition -const CustomResourceCleanupFinalizer = "customresourcecleanup.apiextensions.k8s.io" - -// +genclient -// +genclient:nonNamespaced -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -// +k8s:prerelease-lifecycle-gen:introduced=1.7 -// +k8s:prerelease-lifecycle-gen:deprecated=1.16 -// +k8s:prerelease-lifecycle-gen:removed=1.22 -// +k8s:prerelease-lifecycle-gen:replacement=apiextensions.k8s.io,v1,CustomResourceDefinition - -// CustomResourceDefinition represents a resource that should be exposed on the API server. Its name MUST be in the format -// <.spec.name>.<.spec.group>. -// Deprecated in v1.16, planned for removal in v1.22. Use apiextensions.k8s.io/v1 CustomResourceDefinition instead. -type CustomResourceDefinition struct { - metav1.TypeMeta `json:",inline"` - // Standard object's metadata - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata - // +optional - metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // spec describes how the user wants the resources to appear - Spec CustomResourceDefinitionSpec `json:"spec" protobuf:"bytes,2,opt,name=spec"` - // status indicates the actual state of the CustomResourceDefinition - // +optional - Status CustomResourceDefinitionStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -// +k8s:prerelease-lifecycle-gen:introduced=1.7 -// +k8s:prerelease-lifecycle-gen:deprecated=1.16 -// +k8s:prerelease-lifecycle-gen:removed=1.22 -// +k8s:prerelease-lifecycle-gen:replacement=apiextensions.k8s.io,v1,CustomResourceDefinitionList - -// CustomResourceDefinitionList is a list of CustomResourceDefinition objects. -type CustomResourceDefinitionList struct { - metav1.TypeMeta `json:",inline"` - - // Standard object's metadata - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata - // +optional - metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // items list individual CustomResourceDefinition objects - Items []CustomResourceDefinition `json:"items" protobuf:"bytes,2,rep,name=items"` -} - -// CustomResourceValidation is a list of validation methods for CustomResources. -type CustomResourceValidation struct { - // openAPIV3Schema is the OpenAPI v3 schema to use for validation and pruning. - // +optional - OpenAPIV3Schema *JSONSchemaProps `json:"openAPIV3Schema,omitempty" protobuf:"bytes,1,opt,name=openAPIV3Schema"` -} - -// CustomResourceSubresources defines the status and scale subresources for CustomResources. -type CustomResourceSubresources struct { - // status indicates the custom resource should serve a `/status` subresource. - // When enabled: - // 1. requests to the custom resource primary endpoint ignore changes to the `status` stanza of the object. - // 2. requests to the custom resource `/status` subresource ignore changes to anything other than the `status` stanza of the object. - // +optional - Status *CustomResourceSubresourceStatus `json:"status,omitempty" protobuf:"bytes,1,opt,name=status"` - // scale indicates the custom resource should serve a `/scale` subresource that returns an `autoscaling/v1` Scale object. - // +optional - Scale *CustomResourceSubresourceScale `json:"scale,omitempty" protobuf:"bytes,2,opt,name=scale"` -} - -// CustomResourceSubresourceStatus defines how to serve the status subresource for CustomResources. -// Status is represented by the `.status` JSON path inside of a CustomResource. When set, -// * exposes a /status subresource for the custom resource -// * PUT requests to the /status subresource take a custom resource object, and ignore changes to anything except the status stanza -// * PUT/POST/PATCH requests to the custom resource ignore changes to the status stanza -type CustomResourceSubresourceStatus struct{} - -// CustomResourceSubresourceScale defines how to serve the scale subresource for CustomResources. -type CustomResourceSubresourceScale struct { - // specReplicasPath defines the JSON path inside of a custom resource that corresponds to Scale `spec.replicas`. - // Only JSON paths without the array notation are allowed. - // Must be a JSON Path under `.spec`. - // If there is no value under the given path in the custom resource, the `/scale` subresource will return an error on GET. - SpecReplicasPath string `json:"specReplicasPath" protobuf:"bytes,1,name=specReplicasPath"` - // statusReplicasPath defines the JSON path inside of a custom resource that corresponds to Scale `status.replicas`. - // Only JSON paths without the array notation are allowed. - // Must be a JSON Path under `.status`. - // If there is no value under the given path in the custom resource, the `status.replicas` value in the `/scale` subresource - // will default to 0. - StatusReplicasPath string `json:"statusReplicasPath" protobuf:"bytes,2,opt,name=statusReplicasPath"` - // labelSelectorPath defines the JSON path inside of a custom resource that corresponds to Scale `status.selector`. - // Only JSON paths without the array notation are allowed. - // Must be a JSON Path under `.status` or `.spec`. - // Must be set to work with HorizontalPodAutoscaler. - // The field pointed by this JSON path must be a string field (not a complex selector struct) - // which contains a serialized label selector in string form. - // More info: https://kubernetes.io/docs/tasks/access-kubernetes-api/custom-resources/custom-resource-definitions#scale-subresource - // If there is no value under the given path in the custom resource, the `status.selector` value in the `/scale` - // subresource will default to the empty string. - // +optional - LabelSelectorPath *string `json:"labelSelectorPath,omitempty" protobuf:"bytes,3,opt,name=labelSelectorPath"` -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -// +k8s:prerelease-lifecycle-gen:introduced=1.13 -// +k8s:prerelease-lifecycle-gen:deprecated=1.19 -// This API is never served. It is used for outbound requests from apiservers. This will ensure it never gets served accidentally -// and having the generator against this group will protect future APIs which may be served. -// +k8s:prerelease-lifecycle-gen:replacement=apiextensions.k8s.io,v1,ConversionReview - -// ConversionReview describes a conversion request/response. -type ConversionReview struct { - metav1.TypeMeta `json:",inline"` - // request describes the attributes for the conversion request. - // +optional - Request *ConversionRequest `json:"request,omitempty" protobuf:"bytes,1,opt,name=request"` - // response describes the attributes for the conversion response. - // +optional - Response *ConversionResponse `json:"response,omitempty" protobuf:"bytes,2,opt,name=response"` -} - -// ConversionRequest describes the conversion request parameters. -type ConversionRequest struct { - // uid is an identifier for the individual request/response. It allows distinguishing instances of requests which are - // otherwise identical (parallel requests, etc). - // The UID is meant to track the round trip (request/response) between the Kubernetes API server and the webhook, not the user request. - // It is suitable for correlating log entries between the webhook and apiserver, for either auditing or debugging. - UID types.UID `json:"uid" protobuf:"bytes,1,name=uid"` - // desiredAPIVersion is the version to convert given objects to. e.g. "myapi.example.com/v1" - DesiredAPIVersion string `json:"desiredAPIVersion" protobuf:"bytes,2,name=desiredAPIVersion"` - // objects is the list of custom resource objects to be converted. - Objects []runtime.RawExtension `json:"objects" protobuf:"bytes,3,rep,name=objects"` -} - -// ConversionResponse describes a conversion response. -type ConversionResponse struct { - // uid is an identifier for the individual request/response. - // This should be copied over from the corresponding `request.uid`. - UID types.UID `json:"uid" protobuf:"bytes,1,name=uid"` - // convertedObjects is the list of converted version of `request.objects` if the `result` is successful, otherwise empty. - // The webhook is expected to set `apiVersion` of these objects to the `request.desiredAPIVersion`. The list - // must also have the same size as the input list with the same objects in the same order (equal kind, metadata.uid, metadata.name and metadata.namespace). - // The webhook is allowed to mutate labels and annotations. Any other change to the metadata is silently ignored. - ConvertedObjects []runtime.RawExtension `json:"convertedObjects" protobuf:"bytes,2,rep,name=convertedObjects"` - // result contains the result of conversion with extra details if the conversion failed. `result.status` determines if - // the conversion failed or succeeded. The `result.status` field is required and represents the success or failure of the - // conversion. A successful conversion must set `result.status` to `Success`. A failed conversion must set - // `result.status` to `Failure` and provide more details in `result.message` and return http status 200. The `result.message` - // will be used to construct an error message for the end user. - Result metav1.Status `json:"result" protobuf:"bytes,3,name=result"` -} diff --git a/src/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/types_jsonschema.go b/src/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/types_jsonschema.go deleted file mode 100644 index c9d943c9a..000000000 --- a/src/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/types_jsonschema.go +++ /dev/null @@ -1,331 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1beta1 - -// JSONSchemaProps is a JSON-Schema following Specification Draft 4 (http://json-schema.org/). -type JSONSchemaProps struct { - ID string `json:"id,omitempty" protobuf:"bytes,1,opt,name=id"` - Schema JSONSchemaURL `json:"$schema,omitempty" protobuf:"bytes,2,opt,name=schema"` - Ref *string `json:"$ref,omitempty" protobuf:"bytes,3,opt,name=ref"` - Description string `json:"description,omitempty" protobuf:"bytes,4,opt,name=description"` - Type string `json:"type,omitempty" protobuf:"bytes,5,opt,name=type"` - - // format is an OpenAPI v3 format string. Unknown formats are ignored. The following formats are validated: - // - // - bsonobjectid: a bson object ID, i.e. a 24 characters hex string - // - uri: an URI as parsed by Golang net/url.ParseRequestURI - // - email: an email address as parsed by Golang net/mail.ParseAddress - // - hostname: a valid representation for an Internet host name, as defined by RFC 1034, section 3.1 [RFC1034]. - // - ipv4: an IPv4 IP as parsed by Golang net.ParseIP - // - ipv6: an IPv6 IP as parsed by Golang net.ParseIP - // - cidr: a CIDR as parsed by Golang net.ParseCIDR - // - mac: a MAC address as parsed by Golang net.ParseMAC - // - uuid: an UUID that allows uppercase defined by the regex (?i)^[0-9a-f]{8}-?[0-9a-f]{4}-?[0-9a-f]{4}-?[0-9a-f]{4}-?[0-9a-f]{12}$ - // - uuid3: an UUID3 that allows uppercase defined by the regex (?i)^[0-9a-f]{8}-?[0-9a-f]{4}-?3[0-9a-f]{3}-?[0-9a-f]{4}-?[0-9a-f]{12}$ - // - uuid4: an UUID4 that allows uppercase defined by the regex (?i)^[0-9a-f]{8}-?[0-9a-f]{4}-?4[0-9a-f]{3}-?[89ab][0-9a-f]{3}-?[0-9a-f]{12}$ - // - uuid5: an UUID5 that allows uppercase defined by the regex (?i)^[0-9a-f]{8}-?[0-9a-f]{4}-?5[0-9a-f]{3}-?[89ab][0-9a-f]{3}-?[0-9a-f]{12}$ - // - isbn: an ISBN10 or ISBN13 number string like "0321751043" or "978-0321751041" - // - isbn10: an ISBN10 number string like "0321751043" - // - isbn13: an ISBN13 number string like "978-0321751041" - // - creditcard: a credit card number defined by the regex ^(?:4[0-9]{12}(?:[0-9]{3})?|5[1-5][0-9]{14}|6(?:011|5[0-9][0-9])[0-9]{12}|3[47][0-9]{13}|3(?:0[0-5]|[68][0-9])[0-9]{11}|(?:2131|1800|35\\d{3})\\d{11})$ with any non digit characters mixed in - // - ssn: a U.S. social security number following the regex ^\\d{3}[- ]?\\d{2}[- ]?\\d{4}$ - // - hexcolor: an hexadecimal color code like "#FFFFFF: following the regex ^#?([0-9a-fA-F]{3}|[0-9a-fA-F]{6})$ - // - rgbcolor: an RGB color code like rgb like "rgb(255,255,2559" - // - byte: base64 encoded binary data - // - password: any kind of string - // - date: a date string like "2006-01-02" as defined by full-date in RFC3339 - // - duration: a duration string like "22 ns" as parsed by Golang time.ParseDuration or compatible with Scala duration format - // - datetime: a date time string like "2014-12-15T19:30:20.000Z" as defined by date-time in RFC3339. - Format string `json:"format,omitempty" protobuf:"bytes,6,opt,name=format"` - - Title string `json:"title,omitempty" protobuf:"bytes,7,opt,name=title"` - // default is a default value for undefined object fields. - // Defaulting is a beta feature under the CustomResourceDefaulting feature gate. - // CustomResourceDefinitions with defaults must be created using the v1 (or newer) CustomResourceDefinition API. - Default *JSON `json:"default,omitempty" protobuf:"bytes,8,opt,name=default"` - Maximum *float64 `json:"maximum,omitempty" protobuf:"bytes,9,opt,name=maximum"` - ExclusiveMaximum bool `json:"exclusiveMaximum,omitempty" protobuf:"bytes,10,opt,name=exclusiveMaximum"` - Minimum *float64 `json:"minimum,omitempty" protobuf:"bytes,11,opt,name=minimum"` - ExclusiveMinimum bool `json:"exclusiveMinimum,omitempty" protobuf:"bytes,12,opt,name=exclusiveMinimum"` - MaxLength *int64 `json:"maxLength,omitempty" protobuf:"bytes,13,opt,name=maxLength"` - MinLength *int64 `json:"minLength,omitempty" protobuf:"bytes,14,opt,name=minLength"` - Pattern string `json:"pattern,omitempty" protobuf:"bytes,15,opt,name=pattern"` - MaxItems *int64 `json:"maxItems,omitempty" protobuf:"bytes,16,opt,name=maxItems"` - MinItems *int64 `json:"minItems,omitempty" protobuf:"bytes,17,opt,name=minItems"` - UniqueItems bool `json:"uniqueItems,omitempty" protobuf:"bytes,18,opt,name=uniqueItems"` - MultipleOf *float64 `json:"multipleOf,omitempty" protobuf:"bytes,19,opt,name=multipleOf"` - Enum []JSON `json:"enum,omitempty" protobuf:"bytes,20,rep,name=enum"` - MaxProperties *int64 `json:"maxProperties,omitempty" protobuf:"bytes,21,opt,name=maxProperties"` - MinProperties *int64 `json:"minProperties,omitempty" protobuf:"bytes,22,opt,name=minProperties"` - Required []string `json:"required,omitempty" protobuf:"bytes,23,rep,name=required"` - Items *JSONSchemaPropsOrArray `json:"items,omitempty" protobuf:"bytes,24,opt,name=items"` - AllOf []JSONSchemaProps `json:"allOf,omitempty" protobuf:"bytes,25,rep,name=allOf"` - OneOf []JSONSchemaProps `json:"oneOf,omitempty" protobuf:"bytes,26,rep,name=oneOf"` - AnyOf []JSONSchemaProps `json:"anyOf,omitempty" protobuf:"bytes,27,rep,name=anyOf"` - Not *JSONSchemaProps `json:"not,omitempty" protobuf:"bytes,28,opt,name=not"` - Properties map[string]JSONSchemaProps `json:"properties,omitempty" protobuf:"bytes,29,rep,name=properties"` - AdditionalProperties *JSONSchemaPropsOrBool `json:"additionalProperties,omitempty" protobuf:"bytes,30,opt,name=additionalProperties"` - PatternProperties map[string]JSONSchemaProps `json:"patternProperties,omitempty" protobuf:"bytes,31,rep,name=patternProperties"` - Dependencies JSONSchemaDependencies `json:"dependencies,omitempty" protobuf:"bytes,32,opt,name=dependencies"` - AdditionalItems *JSONSchemaPropsOrBool `json:"additionalItems,omitempty" protobuf:"bytes,33,opt,name=additionalItems"` - Definitions JSONSchemaDefinitions `json:"definitions,omitempty" protobuf:"bytes,34,opt,name=definitions"` - ExternalDocs *ExternalDocumentation `json:"externalDocs,omitempty" protobuf:"bytes,35,opt,name=externalDocs"` - Example *JSON `json:"example,omitempty" protobuf:"bytes,36,opt,name=example"` - Nullable bool `json:"nullable,omitempty" protobuf:"bytes,37,opt,name=nullable"` - - // x-kubernetes-preserve-unknown-fields stops the API server - // decoding step from pruning fields which are not specified - // in the validation schema. This affects fields recursively, - // but switches back to normal pruning behaviour if nested - // properties or additionalProperties are specified in the schema. - // This can either be true or undefined. False is forbidden. - XPreserveUnknownFields *bool `json:"x-kubernetes-preserve-unknown-fields,omitempty" protobuf:"bytes,38,opt,name=xKubernetesPreserveUnknownFields"` - - // x-kubernetes-embedded-resource defines that the value is an - // embedded Kubernetes runtime.Object, with TypeMeta and - // ObjectMeta. The type must be object. It is allowed to further - // restrict the embedded object. kind, apiVersion and metadata - // are validated automatically. x-kubernetes-preserve-unknown-fields - // is allowed to be true, but does not have to be if the object - // is fully specified (up to kind, apiVersion, metadata). - XEmbeddedResource bool `json:"x-kubernetes-embedded-resource,omitempty" protobuf:"bytes,39,opt,name=xKubernetesEmbeddedResource"` - - // x-kubernetes-int-or-string specifies that this value is - // either an integer or a string. If this is true, an empty - // type is allowed and type as child of anyOf is permitted - // if following one of the following patterns: - // - // 1) anyOf: - // - type: integer - // - type: string - // 2) allOf: - // - anyOf: - // - type: integer - // - type: string - // - ... zero or more - XIntOrString bool `json:"x-kubernetes-int-or-string,omitempty" protobuf:"bytes,40,opt,name=xKubernetesIntOrString"` - - // x-kubernetes-list-map-keys annotates an array with the x-kubernetes-list-type `map` by specifying the keys used - // as the index of the map. - // - // This tag MUST only be used on lists that have the "x-kubernetes-list-type" - // extension set to "map". Also, the values specified for this attribute must - // be a scalar typed field of the child structure (no nesting is supported). - // - // The properties specified must either be required or have a default value, - // to ensure those properties are present for all list items. - // - // +optional - XListMapKeys []string `json:"x-kubernetes-list-map-keys,omitempty" protobuf:"bytes,41,rep,name=xKubernetesListMapKeys"` - - // x-kubernetes-list-type annotates an array to further describe its topology. - // This extension must only be used on lists and may have 3 possible values: - // - // 1) `atomic`: the list is treated as a single entity, like a scalar. - // Atomic lists will be entirely replaced when updated. This extension - // may be used on any type of list (struct, scalar, ...). - // 2) `set`: - // Sets are lists that must not have multiple items with the same value. Each - // value must be a scalar, an object with x-kubernetes-map-type `atomic` or an - // array with x-kubernetes-list-type `atomic`. - // 3) `map`: - // These lists are like maps in that their elements have a non-index key - // used to identify them. Order is preserved upon merge. The map tag - // must only be used on a list with elements of type object. - // Defaults to atomic for arrays. - // +optional - XListType *string `json:"x-kubernetes-list-type,omitempty" protobuf:"bytes,42,opt,name=xKubernetesListType"` - - // x-kubernetes-map-type annotates an object to further describe its topology. - // This extension must only be used when type is object and may have 2 possible values: - // - // 1) `granular`: - // These maps are actual maps (key-value pairs) and each fields are independent - // from each other (they can each be manipulated by separate actors). This is - // the default behaviour for all maps. - // 2) `atomic`: the list is treated as a single entity, like a scalar. - // Atomic maps will be entirely replaced when updated. - // +optional - XMapType *string `json:"x-kubernetes-map-type,omitempty" protobuf:"bytes,43,opt,name=xKubernetesMapType"` - - // x-kubernetes-validations describes a list of validation rules written in the CEL expression language. - // This field is an alpha-level. Using this field requires the feature gate `CustomResourceValidationExpressions` to be enabled. - // +patchMergeKey=rule - // +patchStrategy=merge - // +listType=map - // +listMapKey=rule - XValidations ValidationRules `json:"x-kubernetes-validations,omitempty" patchStrategy:"merge" patchMergeKey:"rule" protobuf:"bytes,44,rep,name=xKubernetesValidations"` -} - -// ValidationRules describes a list of validation rules written in the CEL expression language. -type ValidationRules []ValidationRule - -// ValidationRule describes a validation rule written in the CEL expression language. -type ValidationRule struct { - // Rule represents the expression which will be evaluated by CEL. - // ref: https://github.com/google/cel-spec - // The Rule is scoped to the location of the x-kubernetes-validations extension in the schema. - // The `self` variable in the CEL expression is bound to the scoped value. - // Example: - // - Rule scoped to the root of a resource with a status subresource: {"rule": "self.status.actual <= self.spec.maxDesired"} - // - // If the Rule is scoped to an object with properties, the accessible properties of the object are field selectable - // via `self.field` and field presence can be checked via `has(self.field)`. Null valued fields are treated as - // absent fields in CEL expressions. - // If the Rule is scoped to an object with additionalProperties (i.e. a map) the value of the map - // are accessible via `self[mapKey]`, map containment can be checked via `mapKey in self` and all entries of the map - // are accessible via CEL macros and functions such as `self.all(...)`. - // If the Rule is scoped to an array, the elements of the array are accessible via `self[i]` and also by macros and - // functions. - // If the Rule is scoped to a scalar, `self` is bound to the scalar value. - // Examples: - // - Rule scoped to a map of objects: {"rule": "self.components['Widget'].priority < 10"} - // - Rule scoped to a list of integers: {"rule": "self.values.all(value, value >= 0 && value < 100)"} - // - Rule scoped to a string value: {"rule": "self.startsWith('kube')"} - // - // The `apiVersion`, `kind`, `metadata.name` and `metadata.generateName` are always accessible from the root of the - // object and from any x-kubernetes-embedded-resource annotated objects. No other metadata properties are accessible. - // - // Unknown data preserved in custom resources via x-kubernetes-preserve-unknown-fields is not accessible in CEL - // expressions. This includes: - // - Unknown field values that are preserved by object schemas with x-kubernetes-preserve-unknown-fields. - // - Object properties where the property schema is of an "unknown type". An "unknown type" is recursively defined as: - // - A schema with no type and x-kubernetes-preserve-unknown-fields set to true - // - An array where the items schema is of an "unknown type" - // - An object where the additionalProperties schema is of an "unknown type" - // - // Only property names of the form `[a-zA-Z_.-/][a-zA-Z0-9_.-/]*` are accessible. - // Accessible property names are escaped according to the following rules when accessed in the expression: - // - '__' escapes to '__underscores__' - // - '.' escapes to '__dot__' - // - '-' escapes to '__dash__' - // - '/' escapes to '__slash__' - // - Property names that exactly match a CEL RESERVED keyword escape to '__{keyword}__'. The keywords are: - // "true", "false", "null", "in", "as", "break", "const", "continue", "else", "for", "function", "if", - // "import", "let", "loop", "package", "namespace", "return". - // Examples: - // - Rule accessing a property named "namespace": {"rule": "self.__namespace__ > 0"} - // - Rule accessing a property named "x-prop": {"rule": "self.x__dash__prop > 0"} - // - Rule accessing a property named "redact__d": {"rule": "self.redact__underscores__d > 0"} - // - // Equality on arrays with x-kubernetes-list-type of 'set' or 'map' ignores element order, i.e. [1, 2] == [2, 1]. - // Concatenation on arrays with x-kubernetes-list-type use the semantics of the list type: - // - 'set': `X + Y` performs a union where the array positions of all elements in `X` are preserved and - // non-intersecting elements in `Y` are appended, retaining their partial order. - // - 'map': `X + Y` performs a merge where the array positions of all keys in `X` are preserved but the values - // are overwritten by values in `Y` when the key sets of `X` and `Y` intersect. Elements in `Y` with - // non-intersecting keys are appended, retaining their partial order. - Rule string `json:"rule" protobuf:"bytes,1,opt,name=rule"` - // Message represents the message displayed when validation fails. The message is required if the Rule contains - // line breaks. The message must not contain line breaks. - // If unset, the message is "failed rule: {Rule}". - // e.g. "must be a URL with the host matching spec.host" - Message string `json:"message,omitempty" protobuf:"bytes,2,opt,name=message"` -} - -// JSON represents any valid JSON value. -// These types are supported: bool, int64, float64, string, []interface{}, map[string]interface{} and nil. -type JSON struct { - Raw []byte `protobuf:"bytes,1,opt,name=raw"` -} - -// OpenAPISchemaType is used by the kube-openapi generator when constructing -// the OpenAPI spec of this type. -// -// See: https://github.com/kubernetes/kube-openapi/tree/master/pkg/generators -func (_ JSON) OpenAPISchemaType() []string { - // TODO: return actual types when anyOf is supported - return nil -} - -// OpenAPISchemaFormat is used by the kube-openapi generator when constructing -// the OpenAPI spec of this type. -func (_ JSON) OpenAPISchemaFormat() string { return "" } - -// JSONSchemaURL represents a schema url. -type JSONSchemaURL string - -// JSONSchemaPropsOrArray represents a value that can either be a JSONSchemaProps -// or an array of JSONSchemaProps. Mainly here for serialization purposes. -type JSONSchemaPropsOrArray struct { - Schema *JSONSchemaProps `protobuf:"bytes,1,opt,name=schema"` - JSONSchemas []JSONSchemaProps `protobuf:"bytes,2,rep,name=jSONSchemas"` -} - -// OpenAPISchemaType is used by the kube-openapi generator when constructing -// the OpenAPI spec of this type. -// -// See: https://github.com/kubernetes/kube-openapi/tree/master/pkg/generators -func (_ JSONSchemaPropsOrArray) OpenAPISchemaType() []string { - // TODO: return actual types when anyOf is supported - return nil -} - -// OpenAPISchemaFormat is used by the kube-openapi generator when constructing -// the OpenAPI spec of this type. -func (_ JSONSchemaPropsOrArray) OpenAPISchemaFormat() string { return "" } - -// JSONSchemaPropsOrBool represents JSONSchemaProps or a boolean value. -// Defaults to true for the boolean property. -type JSONSchemaPropsOrBool struct { - Allows bool `protobuf:"varint,1,opt,name=allows"` - Schema *JSONSchemaProps `protobuf:"bytes,2,opt,name=schema"` -} - -// OpenAPISchemaType is used by the kube-openapi generator when constructing -// the OpenAPI spec of this type. -// -// See: https://github.com/kubernetes/kube-openapi/tree/master/pkg/generators -func (_ JSONSchemaPropsOrBool) OpenAPISchemaType() []string { - // TODO: return actual types when anyOf is supported - return nil -} - -// OpenAPISchemaFormat is used by the kube-openapi generator when constructing -// the OpenAPI spec of this type. -func (_ JSONSchemaPropsOrBool) OpenAPISchemaFormat() string { return "" } - -// JSONSchemaDependencies represent a dependencies property. -type JSONSchemaDependencies map[string]JSONSchemaPropsOrStringArray - -// JSONSchemaPropsOrStringArray represents a JSONSchemaProps or a string array. -type JSONSchemaPropsOrStringArray struct { - Schema *JSONSchemaProps `protobuf:"bytes,1,opt,name=schema"` - Property []string `protobuf:"bytes,2,rep,name=property"` -} - -// OpenAPISchemaType is used by the kube-openapi generator when constructing -// the OpenAPI spec of this type. -// -// See: https://github.com/kubernetes/kube-openapi/tree/master/pkg/generators -func (_ JSONSchemaPropsOrStringArray) OpenAPISchemaType() []string { - // TODO: return actual types when anyOf is supported - return nil -} - -// OpenAPISchemaFormat is used by the kube-openapi generator when constructing -// the OpenAPI spec of this type. -func (_ JSONSchemaPropsOrStringArray) OpenAPISchemaFormat() string { return "" } - -// JSONSchemaDefinitions contains the models explicitly defined in this spec. -type JSONSchemaDefinitions map[string]JSONSchemaProps - -// ExternalDocumentation allows referencing an external resource for extended documentation. -type ExternalDocumentation struct { - Description string `json:"description,omitempty" protobuf:"bytes,1,opt,name=description"` - URL string `json:"url,omitempty" protobuf:"bytes,2,opt,name=url"` -} diff --git a/src/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/zz_generated.conversion.go b/src/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/zz_generated.conversion.go deleted file mode 100644 index 54cae3cfd..000000000 --- a/src/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/zz_generated.conversion.go +++ /dev/null @@ -1,1366 +0,0 @@ -//go:build !ignore_autogenerated -// +build !ignore_autogenerated - -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by conversion-gen. DO NOT EDIT. - -package v1beta1 - -import ( - unsafe "unsafe" - - apiextensions "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - conversion "k8s.io/apimachinery/pkg/conversion" - runtime "k8s.io/apimachinery/pkg/runtime" -) - -func init() { - localSchemeBuilder.Register(RegisterConversions) -} - -// RegisterConversions adds conversion functions to the given scheme. -// Public to allow building arbitrary schemes. -func RegisterConversions(s *runtime.Scheme) error { - if err := s.AddGeneratedConversionFunc((*CustomResourceColumnDefinition)(nil), (*apiextensions.CustomResourceColumnDefinition)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta1_CustomResourceColumnDefinition_To_apiextensions_CustomResourceColumnDefinition(a.(*CustomResourceColumnDefinition), b.(*apiextensions.CustomResourceColumnDefinition), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*apiextensions.CustomResourceColumnDefinition)(nil), (*CustomResourceColumnDefinition)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_apiextensions_CustomResourceColumnDefinition_To_v1beta1_CustomResourceColumnDefinition(a.(*apiextensions.CustomResourceColumnDefinition), b.(*CustomResourceColumnDefinition), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*CustomResourceConversion)(nil), (*apiextensions.CustomResourceConversion)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta1_CustomResourceConversion_To_apiextensions_CustomResourceConversion(a.(*CustomResourceConversion), b.(*apiextensions.CustomResourceConversion), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*apiextensions.CustomResourceConversion)(nil), (*CustomResourceConversion)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_apiextensions_CustomResourceConversion_To_v1beta1_CustomResourceConversion(a.(*apiextensions.CustomResourceConversion), b.(*CustomResourceConversion), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*CustomResourceDefinition)(nil), (*apiextensions.CustomResourceDefinition)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta1_CustomResourceDefinition_To_apiextensions_CustomResourceDefinition(a.(*CustomResourceDefinition), b.(*apiextensions.CustomResourceDefinition), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*apiextensions.CustomResourceDefinition)(nil), (*CustomResourceDefinition)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_apiextensions_CustomResourceDefinition_To_v1beta1_CustomResourceDefinition(a.(*apiextensions.CustomResourceDefinition), b.(*CustomResourceDefinition), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*CustomResourceDefinitionCondition)(nil), (*apiextensions.CustomResourceDefinitionCondition)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta1_CustomResourceDefinitionCondition_To_apiextensions_CustomResourceDefinitionCondition(a.(*CustomResourceDefinitionCondition), b.(*apiextensions.CustomResourceDefinitionCondition), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*apiextensions.CustomResourceDefinitionCondition)(nil), (*CustomResourceDefinitionCondition)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_apiextensions_CustomResourceDefinitionCondition_To_v1beta1_CustomResourceDefinitionCondition(a.(*apiextensions.CustomResourceDefinitionCondition), b.(*CustomResourceDefinitionCondition), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*CustomResourceDefinitionList)(nil), (*apiextensions.CustomResourceDefinitionList)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta1_CustomResourceDefinitionList_To_apiextensions_CustomResourceDefinitionList(a.(*CustomResourceDefinitionList), b.(*apiextensions.CustomResourceDefinitionList), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*apiextensions.CustomResourceDefinitionList)(nil), (*CustomResourceDefinitionList)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_apiextensions_CustomResourceDefinitionList_To_v1beta1_CustomResourceDefinitionList(a.(*apiextensions.CustomResourceDefinitionList), b.(*CustomResourceDefinitionList), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*CustomResourceDefinitionNames)(nil), (*apiextensions.CustomResourceDefinitionNames)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta1_CustomResourceDefinitionNames_To_apiextensions_CustomResourceDefinitionNames(a.(*CustomResourceDefinitionNames), b.(*apiextensions.CustomResourceDefinitionNames), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*apiextensions.CustomResourceDefinitionNames)(nil), (*CustomResourceDefinitionNames)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_apiextensions_CustomResourceDefinitionNames_To_v1beta1_CustomResourceDefinitionNames(a.(*apiextensions.CustomResourceDefinitionNames), b.(*CustomResourceDefinitionNames), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*CustomResourceDefinitionSpec)(nil), (*apiextensions.CustomResourceDefinitionSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta1_CustomResourceDefinitionSpec_To_apiextensions_CustomResourceDefinitionSpec(a.(*CustomResourceDefinitionSpec), b.(*apiextensions.CustomResourceDefinitionSpec), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*apiextensions.CustomResourceDefinitionSpec)(nil), (*CustomResourceDefinitionSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_apiextensions_CustomResourceDefinitionSpec_To_v1beta1_CustomResourceDefinitionSpec(a.(*apiextensions.CustomResourceDefinitionSpec), b.(*CustomResourceDefinitionSpec), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*CustomResourceDefinitionStatus)(nil), (*apiextensions.CustomResourceDefinitionStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta1_CustomResourceDefinitionStatus_To_apiextensions_CustomResourceDefinitionStatus(a.(*CustomResourceDefinitionStatus), b.(*apiextensions.CustomResourceDefinitionStatus), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*apiextensions.CustomResourceDefinitionStatus)(nil), (*CustomResourceDefinitionStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_apiextensions_CustomResourceDefinitionStatus_To_v1beta1_CustomResourceDefinitionStatus(a.(*apiextensions.CustomResourceDefinitionStatus), b.(*CustomResourceDefinitionStatus), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*CustomResourceDefinitionVersion)(nil), (*apiextensions.CustomResourceDefinitionVersion)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta1_CustomResourceDefinitionVersion_To_apiextensions_CustomResourceDefinitionVersion(a.(*CustomResourceDefinitionVersion), b.(*apiextensions.CustomResourceDefinitionVersion), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*apiextensions.CustomResourceDefinitionVersion)(nil), (*CustomResourceDefinitionVersion)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_apiextensions_CustomResourceDefinitionVersion_To_v1beta1_CustomResourceDefinitionVersion(a.(*apiextensions.CustomResourceDefinitionVersion), b.(*CustomResourceDefinitionVersion), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*CustomResourceSubresourceScale)(nil), (*apiextensions.CustomResourceSubresourceScale)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta1_CustomResourceSubresourceScale_To_apiextensions_CustomResourceSubresourceScale(a.(*CustomResourceSubresourceScale), b.(*apiextensions.CustomResourceSubresourceScale), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*apiextensions.CustomResourceSubresourceScale)(nil), (*CustomResourceSubresourceScale)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_apiextensions_CustomResourceSubresourceScale_To_v1beta1_CustomResourceSubresourceScale(a.(*apiextensions.CustomResourceSubresourceScale), b.(*CustomResourceSubresourceScale), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*CustomResourceSubresourceStatus)(nil), (*apiextensions.CustomResourceSubresourceStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta1_CustomResourceSubresourceStatus_To_apiextensions_CustomResourceSubresourceStatus(a.(*CustomResourceSubresourceStatus), b.(*apiextensions.CustomResourceSubresourceStatus), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*apiextensions.CustomResourceSubresourceStatus)(nil), (*CustomResourceSubresourceStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_apiextensions_CustomResourceSubresourceStatus_To_v1beta1_CustomResourceSubresourceStatus(a.(*apiextensions.CustomResourceSubresourceStatus), b.(*CustomResourceSubresourceStatus), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*CustomResourceSubresources)(nil), (*apiextensions.CustomResourceSubresources)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta1_CustomResourceSubresources_To_apiextensions_CustomResourceSubresources(a.(*CustomResourceSubresources), b.(*apiextensions.CustomResourceSubresources), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*apiextensions.CustomResourceSubresources)(nil), (*CustomResourceSubresources)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_apiextensions_CustomResourceSubresources_To_v1beta1_CustomResourceSubresources(a.(*apiextensions.CustomResourceSubresources), b.(*CustomResourceSubresources), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*CustomResourceValidation)(nil), (*apiextensions.CustomResourceValidation)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta1_CustomResourceValidation_To_apiextensions_CustomResourceValidation(a.(*CustomResourceValidation), b.(*apiextensions.CustomResourceValidation), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*apiextensions.CustomResourceValidation)(nil), (*CustomResourceValidation)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_apiextensions_CustomResourceValidation_To_v1beta1_CustomResourceValidation(a.(*apiextensions.CustomResourceValidation), b.(*CustomResourceValidation), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*ExternalDocumentation)(nil), (*apiextensions.ExternalDocumentation)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta1_ExternalDocumentation_To_apiextensions_ExternalDocumentation(a.(*ExternalDocumentation), b.(*apiextensions.ExternalDocumentation), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*apiextensions.ExternalDocumentation)(nil), (*ExternalDocumentation)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_apiextensions_ExternalDocumentation_To_v1beta1_ExternalDocumentation(a.(*apiextensions.ExternalDocumentation), b.(*ExternalDocumentation), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*JSONSchemaProps)(nil), (*apiextensions.JSONSchemaProps)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta1_JSONSchemaProps_To_apiextensions_JSONSchemaProps(a.(*JSONSchemaProps), b.(*apiextensions.JSONSchemaProps), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*JSONSchemaPropsOrArray)(nil), (*apiextensions.JSONSchemaPropsOrArray)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta1_JSONSchemaPropsOrArray_To_apiextensions_JSONSchemaPropsOrArray(a.(*JSONSchemaPropsOrArray), b.(*apiextensions.JSONSchemaPropsOrArray), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*apiextensions.JSONSchemaPropsOrArray)(nil), (*JSONSchemaPropsOrArray)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_apiextensions_JSONSchemaPropsOrArray_To_v1beta1_JSONSchemaPropsOrArray(a.(*apiextensions.JSONSchemaPropsOrArray), b.(*JSONSchemaPropsOrArray), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*JSONSchemaPropsOrBool)(nil), (*apiextensions.JSONSchemaPropsOrBool)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta1_JSONSchemaPropsOrBool_To_apiextensions_JSONSchemaPropsOrBool(a.(*JSONSchemaPropsOrBool), b.(*apiextensions.JSONSchemaPropsOrBool), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*apiextensions.JSONSchemaPropsOrBool)(nil), (*JSONSchemaPropsOrBool)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_apiextensions_JSONSchemaPropsOrBool_To_v1beta1_JSONSchemaPropsOrBool(a.(*apiextensions.JSONSchemaPropsOrBool), b.(*JSONSchemaPropsOrBool), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*JSONSchemaPropsOrStringArray)(nil), (*apiextensions.JSONSchemaPropsOrStringArray)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta1_JSONSchemaPropsOrStringArray_To_apiextensions_JSONSchemaPropsOrStringArray(a.(*JSONSchemaPropsOrStringArray), b.(*apiextensions.JSONSchemaPropsOrStringArray), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*apiextensions.JSONSchemaPropsOrStringArray)(nil), (*JSONSchemaPropsOrStringArray)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_apiextensions_JSONSchemaPropsOrStringArray_To_v1beta1_JSONSchemaPropsOrStringArray(a.(*apiextensions.JSONSchemaPropsOrStringArray), b.(*JSONSchemaPropsOrStringArray), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*ServiceReference)(nil), (*apiextensions.ServiceReference)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta1_ServiceReference_To_apiextensions_ServiceReference(a.(*ServiceReference), b.(*apiextensions.ServiceReference), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*apiextensions.ServiceReference)(nil), (*ServiceReference)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_apiextensions_ServiceReference_To_v1beta1_ServiceReference(a.(*apiextensions.ServiceReference), b.(*ServiceReference), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*ValidationRule)(nil), (*apiextensions.ValidationRule)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta1_ValidationRule_To_apiextensions_ValidationRule(a.(*ValidationRule), b.(*apiextensions.ValidationRule), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*apiextensions.ValidationRule)(nil), (*ValidationRule)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_apiextensions_ValidationRule_To_v1beta1_ValidationRule(a.(*apiextensions.ValidationRule), b.(*ValidationRule), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*WebhookClientConfig)(nil), (*apiextensions.WebhookClientConfig)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta1_WebhookClientConfig_To_apiextensions_WebhookClientConfig(a.(*WebhookClientConfig), b.(*apiextensions.WebhookClientConfig), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*apiextensions.WebhookClientConfig)(nil), (*WebhookClientConfig)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_apiextensions_WebhookClientConfig_To_v1beta1_WebhookClientConfig(a.(*apiextensions.WebhookClientConfig), b.(*WebhookClientConfig), scope) - }); err != nil { - return err - } - if err := s.AddConversionFunc((*apiextensions.JSONSchemaProps)(nil), (*JSONSchemaProps)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_apiextensions_JSONSchemaProps_To_v1beta1_JSONSchemaProps(a.(*apiextensions.JSONSchemaProps), b.(*JSONSchemaProps), scope) - }); err != nil { - return err - } - if err := s.AddConversionFunc((*apiextensions.JSON)(nil), (*JSON)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_apiextensions_JSON_To_v1beta1_JSON(a.(*apiextensions.JSON), b.(*JSON), scope) - }); err != nil { - return err - } - if err := s.AddConversionFunc((*JSON)(nil), (*apiextensions.JSON)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta1_JSON_To_apiextensions_JSON(a.(*JSON), b.(*apiextensions.JSON), scope) - }); err != nil { - return err - } - return nil -} - -func autoConvert_v1beta1_CustomResourceColumnDefinition_To_apiextensions_CustomResourceColumnDefinition(in *CustomResourceColumnDefinition, out *apiextensions.CustomResourceColumnDefinition, s conversion.Scope) error { - out.Name = in.Name - out.Type = in.Type - out.Format = in.Format - out.Description = in.Description - out.Priority = in.Priority - out.JSONPath = in.JSONPath - return nil -} - -// Convert_v1beta1_CustomResourceColumnDefinition_To_apiextensions_CustomResourceColumnDefinition is an autogenerated conversion function. -func Convert_v1beta1_CustomResourceColumnDefinition_To_apiextensions_CustomResourceColumnDefinition(in *CustomResourceColumnDefinition, out *apiextensions.CustomResourceColumnDefinition, s conversion.Scope) error { - return autoConvert_v1beta1_CustomResourceColumnDefinition_To_apiextensions_CustomResourceColumnDefinition(in, out, s) -} - -func autoConvert_apiextensions_CustomResourceColumnDefinition_To_v1beta1_CustomResourceColumnDefinition(in *apiextensions.CustomResourceColumnDefinition, out *CustomResourceColumnDefinition, s conversion.Scope) error { - out.Name = in.Name - out.Type = in.Type - out.Format = in.Format - out.Description = in.Description - out.Priority = in.Priority - out.JSONPath = in.JSONPath - return nil -} - -// Convert_apiextensions_CustomResourceColumnDefinition_To_v1beta1_CustomResourceColumnDefinition is an autogenerated conversion function. -func Convert_apiextensions_CustomResourceColumnDefinition_To_v1beta1_CustomResourceColumnDefinition(in *apiextensions.CustomResourceColumnDefinition, out *CustomResourceColumnDefinition, s conversion.Scope) error { - return autoConvert_apiextensions_CustomResourceColumnDefinition_To_v1beta1_CustomResourceColumnDefinition(in, out, s) -} - -func autoConvert_v1beta1_CustomResourceConversion_To_apiextensions_CustomResourceConversion(in *CustomResourceConversion, out *apiextensions.CustomResourceConversion, s conversion.Scope) error { - out.Strategy = apiextensions.ConversionStrategyType(in.Strategy) - if in.WebhookClientConfig != nil { - in, out := &in.WebhookClientConfig, &out.WebhookClientConfig - *out = new(apiextensions.WebhookClientConfig) - if err := Convert_v1beta1_WebhookClientConfig_To_apiextensions_WebhookClientConfig(*in, *out, s); err != nil { - return err - } - } else { - out.WebhookClientConfig = nil - } - out.ConversionReviewVersions = *(*[]string)(unsafe.Pointer(&in.ConversionReviewVersions)) - return nil -} - -// Convert_v1beta1_CustomResourceConversion_To_apiextensions_CustomResourceConversion is an autogenerated conversion function. -func Convert_v1beta1_CustomResourceConversion_To_apiextensions_CustomResourceConversion(in *CustomResourceConversion, out *apiextensions.CustomResourceConversion, s conversion.Scope) error { - return autoConvert_v1beta1_CustomResourceConversion_To_apiextensions_CustomResourceConversion(in, out, s) -} - -func autoConvert_apiextensions_CustomResourceConversion_To_v1beta1_CustomResourceConversion(in *apiextensions.CustomResourceConversion, out *CustomResourceConversion, s conversion.Scope) error { - out.Strategy = ConversionStrategyType(in.Strategy) - if in.WebhookClientConfig != nil { - in, out := &in.WebhookClientConfig, &out.WebhookClientConfig - *out = new(WebhookClientConfig) - if err := Convert_apiextensions_WebhookClientConfig_To_v1beta1_WebhookClientConfig(*in, *out, s); err != nil { - return err - } - } else { - out.WebhookClientConfig = nil - } - out.ConversionReviewVersions = *(*[]string)(unsafe.Pointer(&in.ConversionReviewVersions)) - return nil -} - -// Convert_apiextensions_CustomResourceConversion_To_v1beta1_CustomResourceConversion is an autogenerated conversion function. -func Convert_apiextensions_CustomResourceConversion_To_v1beta1_CustomResourceConversion(in *apiextensions.CustomResourceConversion, out *CustomResourceConversion, s conversion.Scope) error { - return autoConvert_apiextensions_CustomResourceConversion_To_v1beta1_CustomResourceConversion(in, out, s) -} - -func autoConvert_v1beta1_CustomResourceDefinition_To_apiextensions_CustomResourceDefinition(in *CustomResourceDefinition, out *apiextensions.CustomResourceDefinition, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - if err := Convert_v1beta1_CustomResourceDefinitionSpec_To_apiextensions_CustomResourceDefinitionSpec(&in.Spec, &out.Spec, s); err != nil { - return err - } - if err := Convert_v1beta1_CustomResourceDefinitionStatus_To_apiextensions_CustomResourceDefinitionStatus(&in.Status, &out.Status, s); err != nil { - return err - } - return nil -} - -// Convert_v1beta1_CustomResourceDefinition_To_apiextensions_CustomResourceDefinition is an autogenerated conversion function. -func Convert_v1beta1_CustomResourceDefinition_To_apiextensions_CustomResourceDefinition(in *CustomResourceDefinition, out *apiextensions.CustomResourceDefinition, s conversion.Scope) error { - return autoConvert_v1beta1_CustomResourceDefinition_To_apiextensions_CustomResourceDefinition(in, out, s) -} - -func autoConvert_apiextensions_CustomResourceDefinition_To_v1beta1_CustomResourceDefinition(in *apiextensions.CustomResourceDefinition, out *CustomResourceDefinition, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - if err := Convert_apiextensions_CustomResourceDefinitionSpec_To_v1beta1_CustomResourceDefinitionSpec(&in.Spec, &out.Spec, s); err != nil { - return err - } - if err := Convert_apiextensions_CustomResourceDefinitionStatus_To_v1beta1_CustomResourceDefinitionStatus(&in.Status, &out.Status, s); err != nil { - return err - } - return nil -} - -// Convert_apiextensions_CustomResourceDefinition_To_v1beta1_CustomResourceDefinition is an autogenerated conversion function. -func Convert_apiextensions_CustomResourceDefinition_To_v1beta1_CustomResourceDefinition(in *apiextensions.CustomResourceDefinition, out *CustomResourceDefinition, s conversion.Scope) error { - return autoConvert_apiextensions_CustomResourceDefinition_To_v1beta1_CustomResourceDefinition(in, out, s) -} - -func autoConvert_v1beta1_CustomResourceDefinitionCondition_To_apiextensions_CustomResourceDefinitionCondition(in *CustomResourceDefinitionCondition, out *apiextensions.CustomResourceDefinitionCondition, s conversion.Scope) error { - out.Type = apiextensions.CustomResourceDefinitionConditionType(in.Type) - out.Status = apiextensions.ConditionStatus(in.Status) - out.LastTransitionTime = in.LastTransitionTime - out.Reason = in.Reason - out.Message = in.Message - return nil -} - -// Convert_v1beta1_CustomResourceDefinitionCondition_To_apiextensions_CustomResourceDefinitionCondition is an autogenerated conversion function. -func Convert_v1beta1_CustomResourceDefinitionCondition_To_apiextensions_CustomResourceDefinitionCondition(in *CustomResourceDefinitionCondition, out *apiextensions.CustomResourceDefinitionCondition, s conversion.Scope) error { - return autoConvert_v1beta1_CustomResourceDefinitionCondition_To_apiextensions_CustomResourceDefinitionCondition(in, out, s) -} - -func autoConvert_apiextensions_CustomResourceDefinitionCondition_To_v1beta1_CustomResourceDefinitionCondition(in *apiextensions.CustomResourceDefinitionCondition, out *CustomResourceDefinitionCondition, s conversion.Scope) error { - out.Type = CustomResourceDefinitionConditionType(in.Type) - out.Status = ConditionStatus(in.Status) - out.LastTransitionTime = in.LastTransitionTime - out.Reason = in.Reason - out.Message = in.Message - return nil -} - -// Convert_apiextensions_CustomResourceDefinitionCondition_To_v1beta1_CustomResourceDefinitionCondition is an autogenerated conversion function. -func Convert_apiextensions_CustomResourceDefinitionCondition_To_v1beta1_CustomResourceDefinitionCondition(in *apiextensions.CustomResourceDefinitionCondition, out *CustomResourceDefinitionCondition, s conversion.Scope) error { - return autoConvert_apiextensions_CustomResourceDefinitionCondition_To_v1beta1_CustomResourceDefinitionCondition(in, out, s) -} - -func autoConvert_v1beta1_CustomResourceDefinitionList_To_apiextensions_CustomResourceDefinitionList(in *CustomResourceDefinitionList, out *apiextensions.CustomResourceDefinitionList, s conversion.Scope) error { - out.ListMeta = in.ListMeta - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]apiextensions.CustomResourceDefinition, len(*in)) - for i := range *in { - if err := Convert_v1beta1_CustomResourceDefinition_To_apiextensions_CustomResourceDefinition(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.Items = nil - } - return nil -} - -// Convert_v1beta1_CustomResourceDefinitionList_To_apiextensions_CustomResourceDefinitionList is an autogenerated conversion function. -func Convert_v1beta1_CustomResourceDefinitionList_To_apiextensions_CustomResourceDefinitionList(in *CustomResourceDefinitionList, out *apiextensions.CustomResourceDefinitionList, s conversion.Scope) error { - return autoConvert_v1beta1_CustomResourceDefinitionList_To_apiextensions_CustomResourceDefinitionList(in, out, s) -} - -func autoConvert_apiextensions_CustomResourceDefinitionList_To_v1beta1_CustomResourceDefinitionList(in *apiextensions.CustomResourceDefinitionList, out *CustomResourceDefinitionList, s conversion.Scope) error { - out.ListMeta = in.ListMeta - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]CustomResourceDefinition, len(*in)) - for i := range *in { - if err := Convert_apiextensions_CustomResourceDefinition_To_v1beta1_CustomResourceDefinition(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.Items = nil - } - return nil -} - -// Convert_apiextensions_CustomResourceDefinitionList_To_v1beta1_CustomResourceDefinitionList is an autogenerated conversion function. -func Convert_apiextensions_CustomResourceDefinitionList_To_v1beta1_CustomResourceDefinitionList(in *apiextensions.CustomResourceDefinitionList, out *CustomResourceDefinitionList, s conversion.Scope) error { - return autoConvert_apiextensions_CustomResourceDefinitionList_To_v1beta1_CustomResourceDefinitionList(in, out, s) -} - -func autoConvert_v1beta1_CustomResourceDefinitionNames_To_apiextensions_CustomResourceDefinitionNames(in *CustomResourceDefinitionNames, out *apiextensions.CustomResourceDefinitionNames, s conversion.Scope) error { - out.Plural = in.Plural - out.Singular = in.Singular - out.ShortNames = *(*[]string)(unsafe.Pointer(&in.ShortNames)) - out.Kind = in.Kind - out.ListKind = in.ListKind - out.Categories = *(*[]string)(unsafe.Pointer(&in.Categories)) - return nil -} - -// Convert_v1beta1_CustomResourceDefinitionNames_To_apiextensions_CustomResourceDefinitionNames is an autogenerated conversion function. -func Convert_v1beta1_CustomResourceDefinitionNames_To_apiextensions_CustomResourceDefinitionNames(in *CustomResourceDefinitionNames, out *apiextensions.CustomResourceDefinitionNames, s conversion.Scope) error { - return autoConvert_v1beta1_CustomResourceDefinitionNames_To_apiextensions_CustomResourceDefinitionNames(in, out, s) -} - -func autoConvert_apiextensions_CustomResourceDefinitionNames_To_v1beta1_CustomResourceDefinitionNames(in *apiextensions.CustomResourceDefinitionNames, out *CustomResourceDefinitionNames, s conversion.Scope) error { - out.Plural = in.Plural - out.Singular = in.Singular - out.ShortNames = *(*[]string)(unsafe.Pointer(&in.ShortNames)) - out.Kind = in.Kind - out.ListKind = in.ListKind - out.Categories = *(*[]string)(unsafe.Pointer(&in.Categories)) - return nil -} - -// Convert_apiextensions_CustomResourceDefinitionNames_To_v1beta1_CustomResourceDefinitionNames is an autogenerated conversion function. -func Convert_apiextensions_CustomResourceDefinitionNames_To_v1beta1_CustomResourceDefinitionNames(in *apiextensions.CustomResourceDefinitionNames, out *CustomResourceDefinitionNames, s conversion.Scope) error { - return autoConvert_apiextensions_CustomResourceDefinitionNames_To_v1beta1_CustomResourceDefinitionNames(in, out, s) -} - -func autoConvert_v1beta1_CustomResourceDefinitionSpec_To_apiextensions_CustomResourceDefinitionSpec(in *CustomResourceDefinitionSpec, out *apiextensions.CustomResourceDefinitionSpec, s conversion.Scope) error { - out.Group = in.Group - out.Version = in.Version - if err := Convert_v1beta1_CustomResourceDefinitionNames_To_apiextensions_CustomResourceDefinitionNames(&in.Names, &out.Names, s); err != nil { - return err - } - out.Scope = apiextensions.ResourceScope(in.Scope) - if in.Validation != nil { - in, out := &in.Validation, &out.Validation - *out = new(apiextensions.CustomResourceValidation) - if err := Convert_v1beta1_CustomResourceValidation_To_apiextensions_CustomResourceValidation(*in, *out, s); err != nil { - return err - } - } else { - out.Validation = nil - } - out.Subresources = (*apiextensions.CustomResourceSubresources)(unsafe.Pointer(in.Subresources)) - if in.Versions != nil { - in, out := &in.Versions, &out.Versions - *out = make([]apiextensions.CustomResourceDefinitionVersion, len(*in)) - for i := range *in { - if err := Convert_v1beta1_CustomResourceDefinitionVersion_To_apiextensions_CustomResourceDefinitionVersion(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.Versions = nil - } - out.AdditionalPrinterColumns = *(*[]apiextensions.CustomResourceColumnDefinition)(unsafe.Pointer(&in.AdditionalPrinterColumns)) - if in.Conversion != nil { - in, out := &in.Conversion, &out.Conversion - *out = new(apiextensions.CustomResourceConversion) - if err := Convert_v1beta1_CustomResourceConversion_To_apiextensions_CustomResourceConversion(*in, *out, s); err != nil { - return err - } - } else { - out.Conversion = nil - } - out.PreserveUnknownFields = (*bool)(unsafe.Pointer(in.PreserveUnknownFields)) - return nil -} - -// Convert_v1beta1_CustomResourceDefinitionSpec_To_apiextensions_CustomResourceDefinitionSpec is an autogenerated conversion function. -func Convert_v1beta1_CustomResourceDefinitionSpec_To_apiextensions_CustomResourceDefinitionSpec(in *CustomResourceDefinitionSpec, out *apiextensions.CustomResourceDefinitionSpec, s conversion.Scope) error { - return autoConvert_v1beta1_CustomResourceDefinitionSpec_To_apiextensions_CustomResourceDefinitionSpec(in, out, s) -} - -func autoConvert_apiextensions_CustomResourceDefinitionSpec_To_v1beta1_CustomResourceDefinitionSpec(in *apiextensions.CustomResourceDefinitionSpec, out *CustomResourceDefinitionSpec, s conversion.Scope) error { - out.Group = in.Group - out.Version = in.Version - if err := Convert_apiextensions_CustomResourceDefinitionNames_To_v1beta1_CustomResourceDefinitionNames(&in.Names, &out.Names, s); err != nil { - return err - } - out.Scope = ResourceScope(in.Scope) - if in.Validation != nil { - in, out := &in.Validation, &out.Validation - *out = new(CustomResourceValidation) - if err := Convert_apiextensions_CustomResourceValidation_To_v1beta1_CustomResourceValidation(*in, *out, s); err != nil { - return err - } - } else { - out.Validation = nil - } - out.Subresources = (*CustomResourceSubresources)(unsafe.Pointer(in.Subresources)) - if in.Versions != nil { - in, out := &in.Versions, &out.Versions - *out = make([]CustomResourceDefinitionVersion, len(*in)) - for i := range *in { - if err := Convert_apiextensions_CustomResourceDefinitionVersion_To_v1beta1_CustomResourceDefinitionVersion(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.Versions = nil - } - out.AdditionalPrinterColumns = *(*[]CustomResourceColumnDefinition)(unsafe.Pointer(&in.AdditionalPrinterColumns)) - if in.Conversion != nil { - in, out := &in.Conversion, &out.Conversion - *out = new(CustomResourceConversion) - if err := Convert_apiextensions_CustomResourceConversion_To_v1beta1_CustomResourceConversion(*in, *out, s); err != nil { - return err - } - } else { - out.Conversion = nil - } - out.PreserveUnknownFields = (*bool)(unsafe.Pointer(in.PreserveUnknownFields)) - return nil -} - -// Convert_apiextensions_CustomResourceDefinitionSpec_To_v1beta1_CustomResourceDefinitionSpec is an autogenerated conversion function. -func Convert_apiextensions_CustomResourceDefinitionSpec_To_v1beta1_CustomResourceDefinitionSpec(in *apiextensions.CustomResourceDefinitionSpec, out *CustomResourceDefinitionSpec, s conversion.Scope) error { - return autoConvert_apiextensions_CustomResourceDefinitionSpec_To_v1beta1_CustomResourceDefinitionSpec(in, out, s) -} - -func autoConvert_v1beta1_CustomResourceDefinitionStatus_To_apiextensions_CustomResourceDefinitionStatus(in *CustomResourceDefinitionStatus, out *apiextensions.CustomResourceDefinitionStatus, s conversion.Scope) error { - out.Conditions = *(*[]apiextensions.CustomResourceDefinitionCondition)(unsafe.Pointer(&in.Conditions)) - if err := Convert_v1beta1_CustomResourceDefinitionNames_To_apiextensions_CustomResourceDefinitionNames(&in.AcceptedNames, &out.AcceptedNames, s); err != nil { - return err - } - out.StoredVersions = *(*[]string)(unsafe.Pointer(&in.StoredVersions)) - return nil -} - -// Convert_v1beta1_CustomResourceDefinitionStatus_To_apiextensions_CustomResourceDefinitionStatus is an autogenerated conversion function. -func Convert_v1beta1_CustomResourceDefinitionStatus_To_apiextensions_CustomResourceDefinitionStatus(in *CustomResourceDefinitionStatus, out *apiextensions.CustomResourceDefinitionStatus, s conversion.Scope) error { - return autoConvert_v1beta1_CustomResourceDefinitionStatus_To_apiextensions_CustomResourceDefinitionStatus(in, out, s) -} - -func autoConvert_apiextensions_CustomResourceDefinitionStatus_To_v1beta1_CustomResourceDefinitionStatus(in *apiextensions.CustomResourceDefinitionStatus, out *CustomResourceDefinitionStatus, s conversion.Scope) error { - out.Conditions = *(*[]CustomResourceDefinitionCondition)(unsafe.Pointer(&in.Conditions)) - if err := Convert_apiextensions_CustomResourceDefinitionNames_To_v1beta1_CustomResourceDefinitionNames(&in.AcceptedNames, &out.AcceptedNames, s); err != nil { - return err - } - out.StoredVersions = *(*[]string)(unsafe.Pointer(&in.StoredVersions)) - return nil -} - -// Convert_apiextensions_CustomResourceDefinitionStatus_To_v1beta1_CustomResourceDefinitionStatus is an autogenerated conversion function. -func Convert_apiextensions_CustomResourceDefinitionStatus_To_v1beta1_CustomResourceDefinitionStatus(in *apiextensions.CustomResourceDefinitionStatus, out *CustomResourceDefinitionStatus, s conversion.Scope) error { - return autoConvert_apiextensions_CustomResourceDefinitionStatus_To_v1beta1_CustomResourceDefinitionStatus(in, out, s) -} - -func autoConvert_v1beta1_CustomResourceDefinitionVersion_To_apiextensions_CustomResourceDefinitionVersion(in *CustomResourceDefinitionVersion, out *apiextensions.CustomResourceDefinitionVersion, s conversion.Scope) error { - out.Name = in.Name - out.Served = in.Served - out.Storage = in.Storage - out.Deprecated = in.Deprecated - out.DeprecationWarning = (*string)(unsafe.Pointer(in.DeprecationWarning)) - if in.Schema != nil { - in, out := &in.Schema, &out.Schema - *out = new(apiextensions.CustomResourceValidation) - if err := Convert_v1beta1_CustomResourceValidation_To_apiextensions_CustomResourceValidation(*in, *out, s); err != nil { - return err - } - } else { - out.Schema = nil - } - out.Subresources = (*apiextensions.CustomResourceSubresources)(unsafe.Pointer(in.Subresources)) - out.AdditionalPrinterColumns = *(*[]apiextensions.CustomResourceColumnDefinition)(unsafe.Pointer(&in.AdditionalPrinterColumns)) - return nil -} - -// Convert_v1beta1_CustomResourceDefinitionVersion_To_apiextensions_CustomResourceDefinitionVersion is an autogenerated conversion function. -func Convert_v1beta1_CustomResourceDefinitionVersion_To_apiextensions_CustomResourceDefinitionVersion(in *CustomResourceDefinitionVersion, out *apiextensions.CustomResourceDefinitionVersion, s conversion.Scope) error { - return autoConvert_v1beta1_CustomResourceDefinitionVersion_To_apiextensions_CustomResourceDefinitionVersion(in, out, s) -} - -func autoConvert_apiextensions_CustomResourceDefinitionVersion_To_v1beta1_CustomResourceDefinitionVersion(in *apiextensions.CustomResourceDefinitionVersion, out *CustomResourceDefinitionVersion, s conversion.Scope) error { - out.Name = in.Name - out.Served = in.Served - out.Storage = in.Storage - out.Deprecated = in.Deprecated - out.DeprecationWarning = (*string)(unsafe.Pointer(in.DeprecationWarning)) - if in.Schema != nil { - in, out := &in.Schema, &out.Schema - *out = new(CustomResourceValidation) - if err := Convert_apiextensions_CustomResourceValidation_To_v1beta1_CustomResourceValidation(*in, *out, s); err != nil { - return err - } - } else { - out.Schema = nil - } - out.Subresources = (*CustomResourceSubresources)(unsafe.Pointer(in.Subresources)) - out.AdditionalPrinterColumns = *(*[]CustomResourceColumnDefinition)(unsafe.Pointer(&in.AdditionalPrinterColumns)) - return nil -} - -// Convert_apiextensions_CustomResourceDefinitionVersion_To_v1beta1_CustomResourceDefinitionVersion is an autogenerated conversion function. -func Convert_apiextensions_CustomResourceDefinitionVersion_To_v1beta1_CustomResourceDefinitionVersion(in *apiextensions.CustomResourceDefinitionVersion, out *CustomResourceDefinitionVersion, s conversion.Scope) error { - return autoConvert_apiextensions_CustomResourceDefinitionVersion_To_v1beta1_CustomResourceDefinitionVersion(in, out, s) -} - -func autoConvert_v1beta1_CustomResourceSubresourceScale_To_apiextensions_CustomResourceSubresourceScale(in *CustomResourceSubresourceScale, out *apiextensions.CustomResourceSubresourceScale, s conversion.Scope) error { - out.SpecReplicasPath = in.SpecReplicasPath - out.StatusReplicasPath = in.StatusReplicasPath - out.LabelSelectorPath = (*string)(unsafe.Pointer(in.LabelSelectorPath)) - return nil -} - -// Convert_v1beta1_CustomResourceSubresourceScale_To_apiextensions_CustomResourceSubresourceScale is an autogenerated conversion function. -func Convert_v1beta1_CustomResourceSubresourceScale_To_apiextensions_CustomResourceSubresourceScale(in *CustomResourceSubresourceScale, out *apiextensions.CustomResourceSubresourceScale, s conversion.Scope) error { - return autoConvert_v1beta1_CustomResourceSubresourceScale_To_apiextensions_CustomResourceSubresourceScale(in, out, s) -} - -func autoConvert_apiextensions_CustomResourceSubresourceScale_To_v1beta1_CustomResourceSubresourceScale(in *apiextensions.CustomResourceSubresourceScale, out *CustomResourceSubresourceScale, s conversion.Scope) error { - out.SpecReplicasPath = in.SpecReplicasPath - out.StatusReplicasPath = in.StatusReplicasPath - out.LabelSelectorPath = (*string)(unsafe.Pointer(in.LabelSelectorPath)) - return nil -} - -// Convert_apiextensions_CustomResourceSubresourceScale_To_v1beta1_CustomResourceSubresourceScale is an autogenerated conversion function. -func Convert_apiextensions_CustomResourceSubresourceScale_To_v1beta1_CustomResourceSubresourceScale(in *apiextensions.CustomResourceSubresourceScale, out *CustomResourceSubresourceScale, s conversion.Scope) error { - return autoConvert_apiextensions_CustomResourceSubresourceScale_To_v1beta1_CustomResourceSubresourceScale(in, out, s) -} - -func autoConvert_v1beta1_CustomResourceSubresourceStatus_To_apiextensions_CustomResourceSubresourceStatus(in *CustomResourceSubresourceStatus, out *apiextensions.CustomResourceSubresourceStatus, s conversion.Scope) error { - return nil -} - -// Convert_v1beta1_CustomResourceSubresourceStatus_To_apiextensions_CustomResourceSubresourceStatus is an autogenerated conversion function. -func Convert_v1beta1_CustomResourceSubresourceStatus_To_apiextensions_CustomResourceSubresourceStatus(in *CustomResourceSubresourceStatus, out *apiextensions.CustomResourceSubresourceStatus, s conversion.Scope) error { - return autoConvert_v1beta1_CustomResourceSubresourceStatus_To_apiextensions_CustomResourceSubresourceStatus(in, out, s) -} - -func autoConvert_apiextensions_CustomResourceSubresourceStatus_To_v1beta1_CustomResourceSubresourceStatus(in *apiextensions.CustomResourceSubresourceStatus, out *CustomResourceSubresourceStatus, s conversion.Scope) error { - return nil -} - -// Convert_apiextensions_CustomResourceSubresourceStatus_To_v1beta1_CustomResourceSubresourceStatus is an autogenerated conversion function. -func Convert_apiextensions_CustomResourceSubresourceStatus_To_v1beta1_CustomResourceSubresourceStatus(in *apiextensions.CustomResourceSubresourceStatus, out *CustomResourceSubresourceStatus, s conversion.Scope) error { - return autoConvert_apiextensions_CustomResourceSubresourceStatus_To_v1beta1_CustomResourceSubresourceStatus(in, out, s) -} - -func autoConvert_v1beta1_CustomResourceSubresources_To_apiextensions_CustomResourceSubresources(in *CustomResourceSubresources, out *apiextensions.CustomResourceSubresources, s conversion.Scope) error { - out.Status = (*apiextensions.CustomResourceSubresourceStatus)(unsafe.Pointer(in.Status)) - out.Scale = (*apiextensions.CustomResourceSubresourceScale)(unsafe.Pointer(in.Scale)) - return nil -} - -// Convert_v1beta1_CustomResourceSubresources_To_apiextensions_CustomResourceSubresources is an autogenerated conversion function. -func Convert_v1beta1_CustomResourceSubresources_To_apiextensions_CustomResourceSubresources(in *CustomResourceSubresources, out *apiextensions.CustomResourceSubresources, s conversion.Scope) error { - return autoConvert_v1beta1_CustomResourceSubresources_To_apiextensions_CustomResourceSubresources(in, out, s) -} - -func autoConvert_apiextensions_CustomResourceSubresources_To_v1beta1_CustomResourceSubresources(in *apiextensions.CustomResourceSubresources, out *CustomResourceSubresources, s conversion.Scope) error { - out.Status = (*CustomResourceSubresourceStatus)(unsafe.Pointer(in.Status)) - out.Scale = (*CustomResourceSubresourceScale)(unsafe.Pointer(in.Scale)) - return nil -} - -// Convert_apiextensions_CustomResourceSubresources_To_v1beta1_CustomResourceSubresources is an autogenerated conversion function. -func Convert_apiextensions_CustomResourceSubresources_To_v1beta1_CustomResourceSubresources(in *apiextensions.CustomResourceSubresources, out *CustomResourceSubresources, s conversion.Scope) error { - return autoConvert_apiextensions_CustomResourceSubresources_To_v1beta1_CustomResourceSubresources(in, out, s) -} - -func autoConvert_v1beta1_CustomResourceValidation_To_apiextensions_CustomResourceValidation(in *CustomResourceValidation, out *apiextensions.CustomResourceValidation, s conversion.Scope) error { - if in.OpenAPIV3Schema != nil { - in, out := &in.OpenAPIV3Schema, &out.OpenAPIV3Schema - *out = new(apiextensions.JSONSchemaProps) - if err := Convert_v1beta1_JSONSchemaProps_To_apiextensions_JSONSchemaProps(*in, *out, s); err != nil { - return err - } - } else { - out.OpenAPIV3Schema = nil - } - return nil -} - -// Convert_v1beta1_CustomResourceValidation_To_apiextensions_CustomResourceValidation is an autogenerated conversion function. -func Convert_v1beta1_CustomResourceValidation_To_apiextensions_CustomResourceValidation(in *CustomResourceValidation, out *apiextensions.CustomResourceValidation, s conversion.Scope) error { - return autoConvert_v1beta1_CustomResourceValidation_To_apiextensions_CustomResourceValidation(in, out, s) -} - -func autoConvert_apiextensions_CustomResourceValidation_To_v1beta1_CustomResourceValidation(in *apiextensions.CustomResourceValidation, out *CustomResourceValidation, s conversion.Scope) error { - if in.OpenAPIV3Schema != nil { - in, out := &in.OpenAPIV3Schema, &out.OpenAPIV3Schema - *out = new(JSONSchemaProps) - if err := Convert_apiextensions_JSONSchemaProps_To_v1beta1_JSONSchemaProps(*in, *out, s); err != nil { - return err - } - } else { - out.OpenAPIV3Schema = nil - } - return nil -} - -// Convert_apiextensions_CustomResourceValidation_To_v1beta1_CustomResourceValidation is an autogenerated conversion function. -func Convert_apiextensions_CustomResourceValidation_To_v1beta1_CustomResourceValidation(in *apiextensions.CustomResourceValidation, out *CustomResourceValidation, s conversion.Scope) error { - return autoConvert_apiextensions_CustomResourceValidation_To_v1beta1_CustomResourceValidation(in, out, s) -} - -func autoConvert_v1beta1_ExternalDocumentation_To_apiextensions_ExternalDocumentation(in *ExternalDocumentation, out *apiextensions.ExternalDocumentation, s conversion.Scope) error { - out.Description = in.Description - out.URL = in.URL - return nil -} - -// Convert_v1beta1_ExternalDocumentation_To_apiextensions_ExternalDocumentation is an autogenerated conversion function. -func Convert_v1beta1_ExternalDocumentation_To_apiextensions_ExternalDocumentation(in *ExternalDocumentation, out *apiextensions.ExternalDocumentation, s conversion.Scope) error { - return autoConvert_v1beta1_ExternalDocumentation_To_apiextensions_ExternalDocumentation(in, out, s) -} - -func autoConvert_apiextensions_ExternalDocumentation_To_v1beta1_ExternalDocumentation(in *apiextensions.ExternalDocumentation, out *ExternalDocumentation, s conversion.Scope) error { - out.Description = in.Description - out.URL = in.URL - return nil -} - -// Convert_apiextensions_ExternalDocumentation_To_v1beta1_ExternalDocumentation is an autogenerated conversion function. -func Convert_apiextensions_ExternalDocumentation_To_v1beta1_ExternalDocumentation(in *apiextensions.ExternalDocumentation, out *ExternalDocumentation, s conversion.Scope) error { - return autoConvert_apiextensions_ExternalDocumentation_To_v1beta1_ExternalDocumentation(in, out, s) -} - -func autoConvert_v1beta1_JSON_To_apiextensions_JSON(in *JSON, out *apiextensions.JSON, s conversion.Scope) error { - // WARNING: in.Raw requires manual conversion: does not exist in peer-type - return nil -} - -func autoConvert_apiextensions_JSON_To_v1beta1_JSON(in *apiextensions.JSON, out *JSON, s conversion.Scope) error { - // FIXME: Type apiextensions.JSON is unsupported. - return nil -} - -func autoConvert_v1beta1_JSONSchemaProps_To_apiextensions_JSONSchemaProps(in *JSONSchemaProps, out *apiextensions.JSONSchemaProps, s conversion.Scope) error { - out.ID = in.ID - out.Schema = apiextensions.JSONSchemaURL(in.Schema) - out.Ref = (*string)(unsafe.Pointer(in.Ref)) - out.Description = in.Description - out.Type = in.Type - out.Format = in.Format - out.Title = in.Title - if in.Default != nil { - in, out := &in.Default, &out.Default - *out = new(apiextensions.JSON) - if err := Convert_v1beta1_JSON_To_apiextensions_JSON(*in, *out, s); err != nil { - return err - } - } else { - out.Default = nil - } - out.Maximum = (*float64)(unsafe.Pointer(in.Maximum)) - out.ExclusiveMaximum = in.ExclusiveMaximum - out.Minimum = (*float64)(unsafe.Pointer(in.Minimum)) - out.ExclusiveMinimum = in.ExclusiveMinimum - out.MaxLength = (*int64)(unsafe.Pointer(in.MaxLength)) - out.MinLength = (*int64)(unsafe.Pointer(in.MinLength)) - out.Pattern = in.Pattern - out.MaxItems = (*int64)(unsafe.Pointer(in.MaxItems)) - out.MinItems = (*int64)(unsafe.Pointer(in.MinItems)) - out.UniqueItems = in.UniqueItems - out.MultipleOf = (*float64)(unsafe.Pointer(in.MultipleOf)) - if in.Enum != nil { - in, out := &in.Enum, &out.Enum - *out = make([]apiextensions.JSON, len(*in)) - for i := range *in { - if err := Convert_v1beta1_JSON_To_apiextensions_JSON(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.Enum = nil - } - out.MaxProperties = (*int64)(unsafe.Pointer(in.MaxProperties)) - out.MinProperties = (*int64)(unsafe.Pointer(in.MinProperties)) - out.Required = *(*[]string)(unsafe.Pointer(&in.Required)) - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = new(apiextensions.JSONSchemaPropsOrArray) - if err := Convert_v1beta1_JSONSchemaPropsOrArray_To_apiextensions_JSONSchemaPropsOrArray(*in, *out, s); err != nil { - return err - } - } else { - out.Items = nil - } - if in.AllOf != nil { - in, out := &in.AllOf, &out.AllOf - *out = make([]apiextensions.JSONSchemaProps, len(*in)) - for i := range *in { - if err := Convert_v1beta1_JSONSchemaProps_To_apiextensions_JSONSchemaProps(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.AllOf = nil - } - if in.OneOf != nil { - in, out := &in.OneOf, &out.OneOf - *out = make([]apiextensions.JSONSchemaProps, len(*in)) - for i := range *in { - if err := Convert_v1beta1_JSONSchemaProps_To_apiextensions_JSONSchemaProps(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.OneOf = nil - } - if in.AnyOf != nil { - in, out := &in.AnyOf, &out.AnyOf - *out = make([]apiextensions.JSONSchemaProps, len(*in)) - for i := range *in { - if err := Convert_v1beta1_JSONSchemaProps_To_apiextensions_JSONSchemaProps(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.AnyOf = nil - } - if in.Not != nil { - in, out := &in.Not, &out.Not - *out = new(apiextensions.JSONSchemaProps) - if err := Convert_v1beta1_JSONSchemaProps_To_apiextensions_JSONSchemaProps(*in, *out, s); err != nil { - return err - } - } else { - out.Not = nil - } - if in.Properties != nil { - in, out := &in.Properties, &out.Properties - *out = make(map[string]apiextensions.JSONSchemaProps, len(*in)) - for key, val := range *in { - newVal := new(apiextensions.JSONSchemaProps) - if err := Convert_v1beta1_JSONSchemaProps_To_apiextensions_JSONSchemaProps(&val, newVal, s); err != nil { - return err - } - (*out)[key] = *newVal - } - } else { - out.Properties = nil - } - if in.AdditionalProperties != nil { - in, out := &in.AdditionalProperties, &out.AdditionalProperties - *out = new(apiextensions.JSONSchemaPropsOrBool) - if err := Convert_v1beta1_JSONSchemaPropsOrBool_To_apiextensions_JSONSchemaPropsOrBool(*in, *out, s); err != nil { - return err - } - } else { - out.AdditionalProperties = nil - } - if in.PatternProperties != nil { - in, out := &in.PatternProperties, &out.PatternProperties - *out = make(map[string]apiextensions.JSONSchemaProps, len(*in)) - for key, val := range *in { - newVal := new(apiextensions.JSONSchemaProps) - if err := Convert_v1beta1_JSONSchemaProps_To_apiextensions_JSONSchemaProps(&val, newVal, s); err != nil { - return err - } - (*out)[key] = *newVal - } - } else { - out.PatternProperties = nil - } - if in.Dependencies != nil { - in, out := &in.Dependencies, &out.Dependencies - *out = make(apiextensions.JSONSchemaDependencies, len(*in)) - for key, val := range *in { - newVal := new(apiextensions.JSONSchemaPropsOrStringArray) - if err := Convert_v1beta1_JSONSchemaPropsOrStringArray_To_apiextensions_JSONSchemaPropsOrStringArray(&val, newVal, s); err != nil { - return err - } - (*out)[key] = *newVal - } - } else { - out.Dependencies = nil - } - if in.AdditionalItems != nil { - in, out := &in.AdditionalItems, &out.AdditionalItems - *out = new(apiextensions.JSONSchemaPropsOrBool) - if err := Convert_v1beta1_JSONSchemaPropsOrBool_To_apiextensions_JSONSchemaPropsOrBool(*in, *out, s); err != nil { - return err - } - } else { - out.AdditionalItems = nil - } - if in.Definitions != nil { - in, out := &in.Definitions, &out.Definitions - *out = make(apiextensions.JSONSchemaDefinitions, len(*in)) - for key, val := range *in { - newVal := new(apiextensions.JSONSchemaProps) - if err := Convert_v1beta1_JSONSchemaProps_To_apiextensions_JSONSchemaProps(&val, newVal, s); err != nil { - return err - } - (*out)[key] = *newVal - } - } else { - out.Definitions = nil - } - out.ExternalDocs = (*apiextensions.ExternalDocumentation)(unsafe.Pointer(in.ExternalDocs)) - if in.Example != nil { - in, out := &in.Example, &out.Example - *out = new(apiextensions.JSON) - if err := Convert_v1beta1_JSON_To_apiextensions_JSON(*in, *out, s); err != nil { - return err - } - } else { - out.Example = nil - } - out.Nullable = in.Nullable - out.XPreserveUnknownFields = (*bool)(unsafe.Pointer(in.XPreserveUnknownFields)) - out.XEmbeddedResource = in.XEmbeddedResource - out.XIntOrString = in.XIntOrString - out.XListMapKeys = *(*[]string)(unsafe.Pointer(&in.XListMapKeys)) - out.XListType = (*string)(unsafe.Pointer(in.XListType)) - out.XMapType = (*string)(unsafe.Pointer(in.XMapType)) - out.XValidations = *(*apiextensions.ValidationRules)(unsafe.Pointer(&in.XValidations)) - return nil -} - -// Convert_v1beta1_JSONSchemaProps_To_apiextensions_JSONSchemaProps is an autogenerated conversion function. -func Convert_v1beta1_JSONSchemaProps_To_apiextensions_JSONSchemaProps(in *JSONSchemaProps, out *apiextensions.JSONSchemaProps, s conversion.Scope) error { - return autoConvert_v1beta1_JSONSchemaProps_To_apiextensions_JSONSchemaProps(in, out, s) -} - -func autoConvert_apiextensions_JSONSchemaProps_To_v1beta1_JSONSchemaProps(in *apiextensions.JSONSchemaProps, out *JSONSchemaProps, s conversion.Scope) error { - out.ID = in.ID - out.Schema = JSONSchemaURL(in.Schema) - out.Ref = (*string)(unsafe.Pointer(in.Ref)) - out.Description = in.Description - out.Type = in.Type - out.Nullable = in.Nullable - out.Format = in.Format - out.Title = in.Title - if in.Default != nil { - in, out := &in.Default, &out.Default - *out = new(JSON) - if err := Convert_apiextensions_JSON_To_v1beta1_JSON(*in, *out, s); err != nil { - return err - } - } else { - out.Default = nil - } - out.Maximum = (*float64)(unsafe.Pointer(in.Maximum)) - out.ExclusiveMaximum = in.ExclusiveMaximum - out.Minimum = (*float64)(unsafe.Pointer(in.Minimum)) - out.ExclusiveMinimum = in.ExclusiveMinimum - out.MaxLength = (*int64)(unsafe.Pointer(in.MaxLength)) - out.MinLength = (*int64)(unsafe.Pointer(in.MinLength)) - out.Pattern = in.Pattern - out.MaxItems = (*int64)(unsafe.Pointer(in.MaxItems)) - out.MinItems = (*int64)(unsafe.Pointer(in.MinItems)) - out.UniqueItems = in.UniqueItems - out.MultipleOf = (*float64)(unsafe.Pointer(in.MultipleOf)) - if in.Enum != nil { - in, out := &in.Enum, &out.Enum - *out = make([]JSON, len(*in)) - for i := range *in { - if err := Convert_apiextensions_JSON_To_v1beta1_JSON(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.Enum = nil - } - out.MaxProperties = (*int64)(unsafe.Pointer(in.MaxProperties)) - out.MinProperties = (*int64)(unsafe.Pointer(in.MinProperties)) - out.Required = *(*[]string)(unsafe.Pointer(&in.Required)) - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = new(JSONSchemaPropsOrArray) - if err := Convert_apiextensions_JSONSchemaPropsOrArray_To_v1beta1_JSONSchemaPropsOrArray(*in, *out, s); err != nil { - return err - } - } else { - out.Items = nil - } - if in.AllOf != nil { - in, out := &in.AllOf, &out.AllOf - *out = make([]JSONSchemaProps, len(*in)) - for i := range *in { - if err := Convert_apiextensions_JSONSchemaProps_To_v1beta1_JSONSchemaProps(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.AllOf = nil - } - if in.OneOf != nil { - in, out := &in.OneOf, &out.OneOf - *out = make([]JSONSchemaProps, len(*in)) - for i := range *in { - if err := Convert_apiextensions_JSONSchemaProps_To_v1beta1_JSONSchemaProps(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.OneOf = nil - } - if in.AnyOf != nil { - in, out := &in.AnyOf, &out.AnyOf - *out = make([]JSONSchemaProps, len(*in)) - for i := range *in { - if err := Convert_apiextensions_JSONSchemaProps_To_v1beta1_JSONSchemaProps(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.AnyOf = nil - } - if in.Not != nil { - in, out := &in.Not, &out.Not - *out = new(JSONSchemaProps) - if err := Convert_apiextensions_JSONSchemaProps_To_v1beta1_JSONSchemaProps(*in, *out, s); err != nil { - return err - } - } else { - out.Not = nil - } - if in.Properties != nil { - in, out := &in.Properties, &out.Properties - *out = make(map[string]JSONSchemaProps, len(*in)) - for key, val := range *in { - newVal := new(JSONSchemaProps) - if err := Convert_apiextensions_JSONSchemaProps_To_v1beta1_JSONSchemaProps(&val, newVal, s); err != nil { - return err - } - (*out)[key] = *newVal - } - } else { - out.Properties = nil - } - if in.AdditionalProperties != nil { - in, out := &in.AdditionalProperties, &out.AdditionalProperties - *out = new(JSONSchemaPropsOrBool) - if err := Convert_apiextensions_JSONSchemaPropsOrBool_To_v1beta1_JSONSchemaPropsOrBool(*in, *out, s); err != nil { - return err - } - } else { - out.AdditionalProperties = nil - } - if in.PatternProperties != nil { - in, out := &in.PatternProperties, &out.PatternProperties - *out = make(map[string]JSONSchemaProps, len(*in)) - for key, val := range *in { - newVal := new(JSONSchemaProps) - if err := Convert_apiextensions_JSONSchemaProps_To_v1beta1_JSONSchemaProps(&val, newVal, s); err != nil { - return err - } - (*out)[key] = *newVal - } - } else { - out.PatternProperties = nil - } - if in.Dependencies != nil { - in, out := &in.Dependencies, &out.Dependencies - *out = make(JSONSchemaDependencies, len(*in)) - for key, val := range *in { - newVal := new(JSONSchemaPropsOrStringArray) - if err := Convert_apiextensions_JSONSchemaPropsOrStringArray_To_v1beta1_JSONSchemaPropsOrStringArray(&val, newVal, s); err != nil { - return err - } - (*out)[key] = *newVal - } - } else { - out.Dependencies = nil - } - if in.AdditionalItems != nil { - in, out := &in.AdditionalItems, &out.AdditionalItems - *out = new(JSONSchemaPropsOrBool) - if err := Convert_apiextensions_JSONSchemaPropsOrBool_To_v1beta1_JSONSchemaPropsOrBool(*in, *out, s); err != nil { - return err - } - } else { - out.AdditionalItems = nil - } - if in.Definitions != nil { - in, out := &in.Definitions, &out.Definitions - *out = make(JSONSchemaDefinitions, len(*in)) - for key, val := range *in { - newVal := new(JSONSchemaProps) - if err := Convert_apiextensions_JSONSchemaProps_To_v1beta1_JSONSchemaProps(&val, newVal, s); err != nil { - return err - } - (*out)[key] = *newVal - } - } else { - out.Definitions = nil - } - out.ExternalDocs = (*ExternalDocumentation)(unsafe.Pointer(in.ExternalDocs)) - if in.Example != nil { - in, out := &in.Example, &out.Example - *out = new(JSON) - if err := Convert_apiextensions_JSON_To_v1beta1_JSON(*in, *out, s); err != nil { - return err - } - } else { - out.Example = nil - } - out.XPreserveUnknownFields = (*bool)(unsafe.Pointer(in.XPreserveUnknownFields)) - out.XEmbeddedResource = in.XEmbeddedResource - out.XIntOrString = in.XIntOrString - out.XListMapKeys = *(*[]string)(unsafe.Pointer(&in.XListMapKeys)) - out.XListType = (*string)(unsafe.Pointer(in.XListType)) - out.XMapType = (*string)(unsafe.Pointer(in.XMapType)) - out.XValidations = *(*ValidationRules)(unsafe.Pointer(&in.XValidations)) - return nil -} - -func autoConvert_v1beta1_JSONSchemaPropsOrArray_To_apiextensions_JSONSchemaPropsOrArray(in *JSONSchemaPropsOrArray, out *apiextensions.JSONSchemaPropsOrArray, s conversion.Scope) error { - if in.Schema != nil { - in, out := &in.Schema, &out.Schema - *out = new(apiextensions.JSONSchemaProps) - if err := Convert_v1beta1_JSONSchemaProps_To_apiextensions_JSONSchemaProps(*in, *out, s); err != nil { - return err - } - } else { - out.Schema = nil - } - if in.JSONSchemas != nil { - in, out := &in.JSONSchemas, &out.JSONSchemas - *out = make([]apiextensions.JSONSchemaProps, len(*in)) - for i := range *in { - if err := Convert_v1beta1_JSONSchemaProps_To_apiextensions_JSONSchemaProps(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.JSONSchemas = nil - } - return nil -} - -// Convert_v1beta1_JSONSchemaPropsOrArray_To_apiextensions_JSONSchemaPropsOrArray is an autogenerated conversion function. -func Convert_v1beta1_JSONSchemaPropsOrArray_To_apiextensions_JSONSchemaPropsOrArray(in *JSONSchemaPropsOrArray, out *apiextensions.JSONSchemaPropsOrArray, s conversion.Scope) error { - return autoConvert_v1beta1_JSONSchemaPropsOrArray_To_apiextensions_JSONSchemaPropsOrArray(in, out, s) -} - -func autoConvert_apiextensions_JSONSchemaPropsOrArray_To_v1beta1_JSONSchemaPropsOrArray(in *apiextensions.JSONSchemaPropsOrArray, out *JSONSchemaPropsOrArray, s conversion.Scope) error { - if in.Schema != nil { - in, out := &in.Schema, &out.Schema - *out = new(JSONSchemaProps) - if err := Convert_apiextensions_JSONSchemaProps_To_v1beta1_JSONSchemaProps(*in, *out, s); err != nil { - return err - } - } else { - out.Schema = nil - } - if in.JSONSchemas != nil { - in, out := &in.JSONSchemas, &out.JSONSchemas - *out = make([]JSONSchemaProps, len(*in)) - for i := range *in { - if err := Convert_apiextensions_JSONSchemaProps_To_v1beta1_JSONSchemaProps(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.JSONSchemas = nil - } - return nil -} - -// Convert_apiextensions_JSONSchemaPropsOrArray_To_v1beta1_JSONSchemaPropsOrArray is an autogenerated conversion function. -func Convert_apiextensions_JSONSchemaPropsOrArray_To_v1beta1_JSONSchemaPropsOrArray(in *apiextensions.JSONSchemaPropsOrArray, out *JSONSchemaPropsOrArray, s conversion.Scope) error { - return autoConvert_apiextensions_JSONSchemaPropsOrArray_To_v1beta1_JSONSchemaPropsOrArray(in, out, s) -} - -func autoConvert_v1beta1_JSONSchemaPropsOrBool_To_apiextensions_JSONSchemaPropsOrBool(in *JSONSchemaPropsOrBool, out *apiextensions.JSONSchemaPropsOrBool, s conversion.Scope) error { - out.Allows = in.Allows - if in.Schema != nil { - in, out := &in.Schema, &out.Schema - *out = new(apiextensions.JSONSchemaProps) - if err := Convert_v1beta1_JSONSchemaProps_To_apiextensions_JSONSchemaProps(*in, *out, s); err != nil { - return err - } - } else { - out.Schema = nil - } - return nil -} - -// Convert_v1beta1_JSONSchemaPropsOrBool_To_apiextensions_JSONSchemaPropsOrBool is an autogenerated conversion function. -func Convert_v1beta1_JSONSchemaPropsOrBool_To_apiextensions_JSONSchemaPropsOrBool(in *JSONSchemaPropsOrBool, out *apiextensions.JSONSchemaPropsOrBool, s conversion.Scope) error { - return autoConvert_v1beta1_JSONSchemaPropsOrBool_To_apiextensions_JSONSchemaPropsOrBool(in, out, s) -} - -func autoConvert_apiextensions_JSONSchemaPropsOrBool_To_v1beta1_JSONSchemaPropsOrBool(in *apiextensions.JSONSchemaPropsOrBool, out *JSONSchemaPropsOrBool, s conversion.Scope) error { - out.Allows = in.Allows - if in.Schema != nil { - in, out := &in.Schema, &out.Schema - *out = new(JSONSchemaProps) - if err := Convert_apiextensions_JSONSchemaProps_To_v1beta1_JSONSchemaProps(*in, *out, s); err != nil { - return err - } - } else { - out.Schema = nil - } - return nil -} - -// Convert_apiextensions_JSONSchemaPropsOrBool_To_v1beta1_JSONSchemaPropsOrBool is an autogenerated conversion function. -func Convert_apiextensions_JSONSchemaPropsOrBool_To_v1beta1_JSONSchemaPropsOrBool(in *apiextensions.JSONSchemaPropsOrBool, out *JSONSchemaPropsOrBool, s conversion.Scope) error { - return autoConvert_apiextensions_JSONSchemaPropsOrBool_To_v1beta1_JSONSchemaPropsOrBool(in, out, s) -} - -func autoConvert_v1beta1_JSONSchemaPropsOrStringArray_To_apiextensions_JSONSchemaPropsOrStringArray(in *JSONSchemaPropsOrStringArray, out *apiextensions.JSONSchemaPropsOrStringArray, s conversion.Scope) error { - if in.Schema != nil { - in, out := &in.Schema, &out.Schema - *out = new(apiextensions.JSONSchemaProps) - if err := Convert_v1beta1_JSONSchemaProps_To_apiextensions_JSONSchemaProps(*in, *out, s); err != nil { - return err - } - } else { - out.Schema = nil - } - out.Property = *(*[]string)(unsafe.Pointer(&in.Property)) - return nil -} - -// Convert_v1beta1_JSONSchemaPropsOrStringArray_To_apiextensions_JSONSchemaPropsOrStringArray is an autogenerated conversion function. -func Convert_v1beta1_JSONSchemaPropsOrStringArray_To_apiextensions_JSONSchemaPropsOrStringArray(in *JSONSchemaPropsOrStringArray, out *apiextensions.JSONSchemaPropsOrStringArray, s conversion.Scope) error { - return autoConvert_v1beta1_JSONSchemaPropsOrStringArray_To_apiextensions_JSONSchemaPropsOrStringArray(in, out, s) -} - -func autoConvert_apiextensions_JSONSchemaPropsOrStringArray_To_v1beta1_JSONSchemaPropsOrStringArray(in *apiextensions.JSONSchemaPropsOrStringArray, out *JSONSchemaPropsOrStringArray, s conversion.Scope) error { - if in.Schema != nil { - in, out := &in.Schema, &out.Schema - *out = new(JSONSchemaProps) - if err := Convert_apiextensions_JSONSchemaProps_To_v1beta1_JSONSchemaProps(*in, *out, s); err != nil { - return err - } - } else { - out.Schema = nil - } - out.Property = *(*[]string)(unsafe.Pointer(&in.Property)) - return nil -} - -// Convert_apiextensions_JSONSchemaPropsOrStringArray_To_v1beta1_JSONSchemaPropsOrStringArray is an autogenerated conversion function. -func Convert_apiextensions_JSONSchemaPropsOrStringArray_To_v1beta1_JSONSchemaPropsOrStringArray(in *apiextensions.JSONSchemaPropsOrStringArray, out *JSONSchemaPropsOrStringArray, s conversion.Scope) error { - return autoConvert_apiextensions_JSONSchemaPropsOrStringArray_To_v1beta1_JSONSchemaPropsOrStringArray(in, out, s) -} - -func autoConvert_v1beta1_ServiceReference_To_apiextensions_ServiceReference(in *ServiceReference, out *apiextensions.ServiceReference, s conversion.Scope) error { - out.Namespace = in.Namespace - out.Name = in.Name - out.Path = (*string)(unsafe.Pointer(in.Path)) - if err := v1.Convert_Pointer_int32_To_int32(&in.Port, &out.Port, s); err != nil { - return err - } - return nil -} - -// Convert_v1beta1_ServiceReference_To_apiextensions_ServiceReference is an autogenerated conversion function. -func Convert_v1beta1_ServiceReference_To_apiextensions_ServiceReference(in *ServiceReference, out *apiextensions.ServiceReference, s conversion.Scope) error { - return autoConvert_v1beta1_ServiceReference_To_apiextensions_ServiceReference(in, out, s) -} - -func autoConvert_apiextensions_ServiceReference_To_v1beta1_ServiceReference(in *apiextensions.ServiceReference, out *ServiceReference, s conversion.Scope) error { - out.Namespace = in.Namespace - out.Name = in.Name - out.Path = (*string)(unsafe.Pointer(in.Path)) - if err := v1.Convert_int32_To_Pointer_int32(&in.Port, &out.Port, s); err != nil { - return err - } - return nil -} - -// Convert_apiextensions_ServiceReference_To_v1beta1_ServiceReference is an autogenerated conversion function. -func Convert_apiextensions_ServiceReference_To_v1beta1_ServiceReference(in *apiextensions.ServiceReference, out *ServiceReference, s conversion.Scope) error { - return autoConvert_apiextensions_ServiceReference_To_v1beta1_ServiceReference(in, out, s) -} - -func autoConvert_v1beta1_ValidationRule_To_apiextensions_ValidationRule(in *ValidationRule, out *apiextensions.ValidationRule, s conversion.Scope) error { - out.Rule = in.Rule - out.Message = in.Message - return nil -} - -// Convert_v1beta1_ValidationRule_To_apiextensions_ValidationRule is an autogenerated conversion function. -func Convert_v1beta1_ValidationRule_To_apiextensions_ValidationRule(in *ValidationRule, out *apiextensions.ValidationRule, s conversion.Scope) error { - return autoConvert_v1beta1_ValidationRule_To_apiextensions_ValidationRule(in, out, s) -} - -func autoConvert_apiextensions_ValidationRule_To_v1beta1_ValidationRule(in *apiextensions.ValidationRule, out *ValidationRule, s conversion.Scope) error { - out.Rule = in.Rule - out.Message = in.Message - return nil -} - -// Convert_apiextensions_ValidationRule_To_v1beta1_ValidationRule is an autogenerated conversion function. -func Convert_apiextensions_ValidationRule_To_v1beta1_ValidationRule(in *apiextensions.ValidationRule, out *ValidationRule, s conversion.Scope) error { - return autoConvert_apiextensions_ValidationRule_To_v1beta1_ValidationRule(in, out, s) -} - -func autoConvert_v1beta1_WebhookClientConfig_To_apiextensions_WebhookClientConfig(in *WebhookClientConfig, out *apiextensions.WebhookClientConfig, s conversion.Scope) error { - out.URL = (*string)(unsafe.Pointer(in.URL)) - if in.Service != nil { - in, out := &in.Service, &out.Service - *out = new(apiextensions.ServiceReference) - if err := Convert_v1beta1_ServiceReference_To_apiextensions_ServiceReference(*in, *out, s); err != nil { - return err - } - } else { - out.Service = nil - } - out.CABundle = *(*[]byte)(unsafe.Pointer(&in.CABundle)) - return nil -} - -// Convert_v1beta1_WebhookClientConfig_To_apiextensions_WebhookClientConfig is an autogenerated conversion function. -func Convert_v1beta1_WebhookClientConfig_To_apiextensions_WebhookClientConfig(in *WebhookClientConfig, out *apiextensions.WebhookClientConfig, s conversion.Scope) error { - return autoConvert_v1beta1_WebhookClientConfig_To_apiextensions_WebhookClientConfig(in, out, s) -} - -func autoConvert_apiextensions_WebhookClientConfig_To_v1beta1_WebhookClientConfig(in *apiextensions.WebhookClientConfig, out *WebhookClientConfig, s conversion.Scope) error { - out.URL = (*string)(unsafe.Pointer(in.URL)) - if in.Service != nil { - in, out := &in.Service, &out.Service - *out = new(ServiceReference) - if err := Convert_apiextensions_ServiceReference_To_v1beta1_ServiceReference(*in, *out, s); err != nil { - return err - } - } else { - out.Service = nil - } - out.CABundle = *(*[]byte)(unsafe.Pointer(&in.CABundle)) - return nil -} - -// Convert_apiextensions_WebhookClientConfig_To_v1beta1_WebhookClientConfig is an autogenerated conversion function. -func Convert_apiextensions_WebhookClientConfig_To_v1beta1_WebhookClientConfig(in *apiextensions.WebhookClientConfig, out *WebhookClientConfig, s conversion.Scope) error { - return autoConvert_apiextensions_WebhookClientConfig_To_v1beta1_WebhookClientConfig(in, out, s) -} diff --git a/src/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/zz_generated.deepcopy.go b/src/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/zz_generated.deepcopy.go deleted file mode 100644 index 26a9d4db9..000000000 --- a/src/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/zz_generated.deepcopy.go +++ /dev/null @@ -1,704 +0,0 @@ -//go:build !ignore_autogenerated -// +build !ignore_autogenerated - -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by deepcopy-gen. DO NOT EDIT. - -package v1beta1 - -import ( - runtime "k8s.io/apimachinery/pkg/runtime" -) - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ConversionRequest) DeepCopyInto(out *ConversionRequest) { - *out = *in - if in.Objects != nil { - in, out := &in.Objects, &out.Objects - *out = make([]runtime.RawExtension, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConversionRequest. -func (in *ConversionRequest) DeepCopy() *ConversionRequest { - if in == nil { - return nil - } - out := new(ConversionRequest) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ConversionResponse) DeepCopyInto(out *ConversionResponse) { - *out = *in - if in.ConvertedObjects != nil { - in, out := &in.ConvertedObjects, &out.ConvertedObjects - *out = make([]runtime.RawExtension, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - in.Result.DeepCopyInto(&out.Result) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConversionResponse. -func (in *ConversionResponse) DeepCopy() *ConversionResponse { - if in == nil { - return nil - } - out := new(ConversionResponse) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ConversionReview) DeepCopyInto(out *ConversionReview) { - *out = *in - out.TypeMeta = in.TypeMeta - if in.Request != nil { - in, out := &in.Request, &out.Request - *out = new(ConversionRequest) - (*in).DeepCopyInto(*out) - } - if in.Response != nil { - in, out := &in.Response, &out.Response - *out = new(ConversionResponse) - (*in).DeepCopyInto(*out) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConversionReview. -func (in *ConversionReview) DeepCopy() *ConversionReview { - if in == nil { - return nil - } - out := new(ConversionReview) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *ConversionReview) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *CustomResourceColumnDefinition) DeepCopyInto(out *CustomResourceColumnDefinition) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomResourceColumnDefinition. -func (in *CustomResourceColumnDefinition) DeepCopy() *CustomResourceColumnDefinition { - if in == nil { - return nil - } - out := new(CustomResourceColumnDefinition) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *CustomResourceConversion) DeepCopyInto(out *CustomResourceConversion) { - *out = *in - if in.WebhookClientConfig != nil { - in, out := &in.WebhookClientConfig, &out.WebhookClientConfig - *out = new(WebhookClientConfig) - (*in).DeepCopyInto(*out) - } - if in.ConversionReviewVersions != nil { - in, out := &in.ConversionReviewVersions, &out.ConversionReviewVersions - *out = make([]string, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomResourceConversion. -func (in *CustomResourceConversion) DeepCopy() *CustomResourceConversion { - if in == nil { - return nil - } - out := new(CustomResourceConversion) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *CustomResourceDefinition) DeepCopyInto(out *CustomResourceDefinition) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - in.Status.DeepCopyInto(&out.Status) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomResourceDefinition. -func (in *CustomResourceDefinition) DeepCopy() *CustomResourceDefinition { - if in == nil { - return nil - } - out := new(CustomResourceDefinition) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *CustomResourceDefinition) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *CustomResourceDefinitionCondition) DeepCopyInto(out *CustomResourceDefinitionCondition) { - *out = *in - in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomResourceDefinitionCondition. -func (in *CustomResourceDefinitionCondition) DeepCopy() *CustomResourceDefinitionCondition { - if in == nil { - return nil - } - out := new(CustomResourceDefinitionCondition) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *CustomResourceDefinitionList) DeepCopyInto(out *CustomResourceDefinitionList) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]CustomResourceDefinition, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomResourceDefinitionList. -func (in *CustomResourceDefinitionList) DeepCopy() *CustomResourceDefinitionList { - if in == nil { - return nil - } - out := new(CustomResourceDefinitionList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *CustomResourceDefinitionList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *CustomResourceDefinitionNames) DeepCopyInto(out *CustomResourceDefinitionNames) { - *out = *in - if in.ShortNames != nil { - in, out := &in.ShortNames, &out.ShortNames - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.Categories != nil { - in, out := &in.Categories, &out.Categories - *out = make([]string, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomResourceDefinitionNames. -func (in *CustomResourceDefinitionNames) DeepCopy() *CustomResourceDefinitionNames { - if in == nil { - return nil - } - out := new(CustomResourceDefinitionNames) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *CustomResourceDefinitionSpec) DeepCopyInto(out *CustomResourceDefinitionSpec) { - *out = *in - in.Names.DeepCopyInto(&out.Names) - if in.Validation != nil { - in, out := &in.Validation, &out.Validation - *out = new(CustomResourceValidation) - (*in).DeepCopyInto(*out) - } - if in.Subresources != nil { - in, out := &in.Subresources, &out.Subresources - *out = new(CustomResourceSubresources) - (*in).DeepCopyInto(*out) - } - if in.Versions != nil { - in, out := &in.Versions, &out.Versions - *out = make([]CustomResourceDefinitionVersion, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.AdditionalPrinterColumns != nil { - in, out := &in.AdditionalPrinterColumns, &out.AdditionalPrinterColumns - *out = make([]CustomResourceColumnDefinition, len(*in)) - copy(*out, *in) - } - if in.Conversion != nil { - in, out := &in.Conversion, &out.Conversion - *out = new(CustomResourceConversion) - (*in).DeepCopyInto(*out) - } - if in.PreserveUnknownFields != nil { - in, out := &in.PreserveUnknownFields, &out.PreserveUnknownFields - *out = new(bool) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomResourceDefinitionSpec. -func (in *CustomResourceDefinitionSpec) DeepCopy() *CustomResourceDefinitionSpec { - if in == nil { - return nil - } - out := new(CustomResourceDefinitionSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *CustomResourceDefinitionStatus) DeepCopyInto(out *CustomResourceDefinitionStatus) { - *out = *in - if in.Conditions != nil { - in, out := &in.Conditions, &out.Conditions - *out = make([]CustomResourceDefinitionCondition, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - in.AcceptedNames.DeepCopyInto(&out.AcceptedNames) - if in.StoredVersions != nil { - in, out := &in.StoredVersions, &out.StoredVersions - *out = make([]string, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomResourceDefinitionStatus. -func (in *CustomResourceDefinitionStatus) DeepCopy() *CustomResourceDefinitionStatus { - if in == nil { - return nil - } - out := new(CustomResourceDefinitionStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *CustomResourceDefinitionVersion) DeepCopyInto(out *CustomResourceDefinitionVersion) { - *out = *in - if in.DeprecationWarning != nil { - in, out := &in.DeprecationWarning, &out.DeprecationWarning - *out = new(string) - **out = **in - } - if in.Schema != nil { - in, out := &in.Schema, &out.Schema - *out = new(CustomResourceValidation) - (*in).DeepCopyInto(*out) - } - if in.Subresources != nil { - in, out := &in.Subresources, &out.Subresources - *out = new(CustomResourceSubresources) - (*in).DeepCopyInto(*out) - } - if in.AdditionalPrinterColumns != nil { - in, out := &in.AdditionalPrinterColumns, &out.AdditionalPrinterColumns - *out = make([]CustomResourceColumnDefinition, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomResourceDefinitionVersion. -func (in *CustomResourceDefinitionVersion) DeepCopy() *CustomResourceDefinitionVersion { - if in == nil { - return nil - } - out := new(CustomResourceDefinitionVersion) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *CustomResourceSubresourceScale) DeepCopyInto(out *CustomResourceSubresourceScale) { - *out = *in - if in.LabelSelectorPath != nil { - in, out := &in.LabelSelectorPath, &out.LabelSelectorPath - *out = new(string) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomResourceSubresourceScale. -func (in *CustomResourceSubresourceScale) DeepCopy() *CustomResourceSubresourceScale { - if in == nil { - return nil - } - out := new(CustomResourceSubresourceScale) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *CustomResourceSubresourceStatus) DeepCopyInto(out *CustomResourceSubresourceStatus) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomResourceSubresourceStatus. -func (in *CustomResourceSubresourceStatus) DeepCopy() *CustomResourceSubresourceStatus { - if in == nil { - return nil - } - out := new(CustomResourceSubresourceStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *CustomResourceSubresources) DeepCopyInto(out *CustomResourceSubresources) { - *out = *in - if in.Status != nil { - in, out := &in.Status, &out.Status - *out = new(CustomResourceSubresourceStatus) - **out = **in - } - if in.Scale != nil { - in, out := &in.Scale, &out.Scale - *out = new(CustomResourceSubresourceScale) - (*in).DeepCopyInto(*out) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomResourceSubresources. -func (in *CustomResourceSubresources) DeepCopy() *CustomResourceSubresources { - if in == nil { - return nil - } - out := new(CustomResourceSubresources) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *CustomResourceValidation) DeepCopyInto(out *CustomResourceValidation) { - *out = *in - if in.OpenAPIV3Schema != nil { - in, out := &in.OpenAPIV3Schema, &out.OpenAPIV3Schema - *out = (*in).DeepCopy() - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomResourceValidation. -func (in *CustomResourceValidation) DeepCopy() *CustomResourceValidation { - if in == nil { - return nil - } - out := new(CustomResourceValidation) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ExternalDocumentation) DeepCopyInto(out *ExternalDocumentation) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExternalDocumentation. -func (in *ExternalDocumentation) DeepCopy() *ExternalDocumentation { - if in == nil { - return nil - } - out := new(ExternalDocumentation) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *JSON) DeepCopyInto(out *JSON) { - *out = *in - if in.Raw != nil { - in, out := &in.Raw, &out.Raw - *out = make([]byte, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JSON. -func (in *JSON) DeepCopy() *JSON { - if in == nil { - return nil - } - out := new(JSON) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in JSONSchemaDefinitions) DeepCopyInto(out *JSONSchemaDefinitions) { - { - in := &in - *out = make(JSONSchemaDefinitions, len(*in)) - for key, val := range *in { - (*out)[key] = *val.DeepCopy() - } - return - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JSONSchemaDefinitions. -func (in JSONSchemaDefinitions) DeepCopy() JSONSchemaDefinitions { - if in == nil { - return nil - } - out := new(JSONSchemaDefinitions) - in.DeepCopyInto(out) - return *out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in JSONSchemaDependencies) DeepCopyInto(out *JSONSchemaDependencies) { - { - in := &in - *out = make(JSONSchemaDependencies, len(*in)) - for key, val := range *in { - (*out)[key] = *val.DeepCopy() - } - return - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JSONSchemaDependencies. -func (in JSONSchemaDependencies) DeepCopy() JSONSchemaDependencies { - if in == nil { - return nil - } - out := new(JSONSchemaDependencies) - in.DeepCopyInto(out) - return *out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *JSONSchemaProps) DeepCopyInto(out *JSONSchemaProps) { - clone := in.DeepCopy() - *out = *clone - return -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *JSONSchemaPropsOrArray) DeepCopyInto(out *JSONSchemaPropsOrArray) { - *out = *in - if in.Schema != nil { - in, out := &in.Schema, &out.Schema - *out = (*in).DeepCopy() - } - if in.JSONSchemas != nil { - in, out := &in.JSONSchemas, &out.JSONSchemas - *out = make([]JSONSchemaProps, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JSONSchemaPropsOrArray. -func (in *JSONSchemaPropsOrArray) DeepCopy() *JSONSchemaPropsOrArray { - if in == nil { - return nil - } - out := new(JSONSchemaPropsOrArray) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *JSONSchemaPropsOrBool) DeepCopyInto(out *JSONSchemaPropsOrBool) { - *out = *in - if in.Schema != nil { - in, out := &in.Schema, &out.Schema - *out = (*in).DeepCopy() - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JSONSchemaPropsOrBool. -func (in *JSONSchemaPropsOrBool) DeepCopy() *JSONSchemaPropsOrBool { - if in == nil { - return nil - } - out := new(JSONSchemaPropsOrBool) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *JSONSchemaPropsOrStringArray) DeepCopyInto(out *JSONSchemaPropsOrStringArray) { - *out = *in - if in.Schema != nil { - in, out := &in.Schema, &out.Schema - *out = (*in).DeepCopy() - } - if in.Property != nil { - in, out := &in.Property, &out.Property - *out = make([]string, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JSONSchemaPropsOrStringArray. -func (in *JSONSchemaPropsOrStringArray) DeepCopy() *JSONSchemaPropsOrStringArray { - if in == nil { - return nil - } - out := new(JSONSchemaPropsOrStringArray) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ServiceReference) DeepCopyInto(out *ServiceReference) { - *out = *in - if in.Path != nil { - in, out := &in.Path, &out.Path - *out = new(string) - **out = **in - } - if in.Port != nil { - in, out := &in.Port, &out.Port - *out = new(int32) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceReference. -func (in *ServiceReference) DeepCopy() *ServiceReference { - if in == nil { - return nil - } - out := new(ServiceReference) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ValidationRule) DeepCopyInto(out *ValidationRule) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ValidationRule. -func (in *ValidationRule) DeepCopy() *ValidationRule { - if in == nil { - return nil - } - out := new(ValidationRule) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in ValidationRules) DeepCopyInto(out *ValidationRules) { - { - in := &in - *out = make(ValidationRules, len(*in)) - copy(*out, *in) - return - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ValidationRules. -func (in ValidationRules) DeepCopy() ValidationRules { - if in == nil { - return nil - } - out := new(ValidationRules) - in.DeepCopyInto(out) - return *out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *WebhookClientConfig) DeepCopyInto(out *WebhookClientConfig) { - *out = *in - if in.URL != nil { - in, out := &in.URL, &out.URL - *out = new(string) - **out = **in - } - if in.Service != nil { - in, out := &in.Service, &out.Service - *out = new(ServiceReference) - (*in).DeepCopyInto(*out) - } - if in.CABundle != nil { - in, out := &in.CABundle, &out.CABundle - *out = make([]byte, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WebhookClientConfig. -func (in *WebhookClientConfig) DeepCopy() *WebhookClientConfig { - if in == nil { - return nil - } - out := new(WebhookClientConfig) - in.DeepCopyInto(out) - return out -} diff --git a/src/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/zz_generated.defaults.go b/src/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/zz_generated.defaults.go deleted file mode 100644 index 225c6ff51..000000000 --- a/src/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/zz_generated.defaults.go +++ /dev/null @@ -1,56 +0,0 @@ -//go:build !ignore_autogenerated -// +build !ignore_autogenerated - -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by defaulter-gen. DO NOT EDIT. - -package v1beta1 - -import ( - runtime "k8s.io/apimachinery/pkg/runtime" -) - -// RegisterDefaults adds defaulters functions to the given scheme. -// Public to allow building arbitrary schemes. -// All generated defaulters are covering - they call all nested defaulters. -func RegisterDefaults(scheme *runtime.Scheme) error { - scheme.AddTypeDefaultingFunc(&CustomResourceDefinition{}, func(obj interface{}) { SetObjectDefaults_CustomResourceDefinition(obj.(*CustomResourceDefinition)) }) - scheme.AddTypeDefaultingFunc(&CustomResourceDefinitionList{}, func(obj interface{}) { - SetObjectDefaults_CustomResourceDefinitionList(obj.(*CustomResourceDefinitionList)) - }) - return nil -} - -func SetObjectDefaults_CustomResourceDefinition(in *CustomResourceDefinition) { - SetDefaults_CustomResourceDefinition(in) - SetDefaults_CustomResourceDefinitionSpec(&in.Spec) - if in.Spec.Conversion != nil { - if in.Spec.Conversion.WebhookClientConfig != nil { - if in.Spec.Conversion.WebhookClientConfig.Service != nil { - SetDefaults_ServiceReference(in.Spec.Conversion.WebhookClientConfig.Service) - } - } - } -} - -func SetObjectDefaults_CustomResourceDefinitionList(in *CustomResourceDefinitionList) { - for i := range in.Items { - a := &in.Items[i] - SetObjectDefaults_CustomResourceDefinition(a) - } -} diff --git a/src/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/zz_generated.prerelease-lifecycle.go b/src/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/zz_generated.prerelease-lifecycle.go deleted file mode 100644 index 9c22ae5c1..000000000 --- a/src/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/zz_generated.prerelease-lifecycle.go +++ /dev/null @@ -1,98 +0,0 @@ -//go:build !ignore_autogenerated -// +build !ignore_autogenerated - -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by prerelease-lifecycle-gen. DO NOT EDIT. - -package v1beta1 - -import ( - schema "k8s.io/apimachinery/pkg/runtime/schema" -) - -// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. -// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. -func (in *ConversionReview) APILifecycleIntroduced() (major, minor int) { - return 1, 13 -} - -// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison. -// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor. -func (in *ConversionReview) APILifecycleDeprecated() (major, minor int) { - return 1, 19 -} - -// APILifecycleReplacement is an autogenerated function, returning the group, version, and kind that should be used instead of this deprecated type. -// It is controlled by "k8s:prerelease-lifecycle-gen:replacement=,," tags in types.go. -func (in *ConversionReview) APILifecycleReplacement() schema.GroupVersionKind { - return schema.GroupVersionKind{Group: "apiextensions.k8s.io", Version: "v1", Kind: "ConversionReview"} -} - -// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. -// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. -func (in *ConversionReview) APILifecycleRemoved() (major, minor int) { - return 1, 22 -} - -// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. -// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. -func (in *CustomResourceDefinition) APILifecycleIntroduced() (major, minor int) { - return 1, 7 -} - -// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison. -// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor. -func (in *CustomResourceDefinition) APILifecycleDeprecated() (major, minor int) { - return 1, 16 -} - -// APILifecycleReplacement is an autogenerated function, returning the group, version, and kind that should be used instead of this deprecated type. -// It is controlled by "k8s:prerelease-lifecycle-gen:replacement=,," tags in types.go. -func (in *CustomResourceDefinition) APILifecycleReplacement() schema.GroupVersionKind { - return schema.GroupVersionKind{Group: "apiextensions.k8s.io", Version: "v1", Kind: "CustomResourceDefinition"} -} - -// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. -// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. -func (in *CustomResourceDefinition) APILifecycleRemoved() (major, minor int) { - return 1, 22 -} - -// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. -// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. -func (in *CustomResourceDefinitionList) APILifecycleIntroduced() (major, minor int) { - return 1, 7 -} - -// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison. -// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor. -func (in *CustomResourceDefinitionList) APILifecycleDeprecated() (major, minor int) { - return 1, 16 -} - -// APILifecycleReplacement is an autogenerated function, returning the group, version, and kind that should be used instead of this deprecated type. -// It is controlled by "k8s:prerelease-lifecycle-gen:replacement=,," tags in types.go. -func (in *CustomResourceDefinitionList) APILifecycleReplacement() schema.GroupVersionKind { - return schema.GroupVersionKind{Group: "apiextensions.k8s.io", Version: "v1", Kind: "CustomResourceDefinitionList"} -} - -// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. -// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. -func (in *CustomResourceDefinitionList) APILifecycleRemoved() (major, minor int) { - return 1, 22 -} diff --git a/src/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/zz_generated.deepcopy.go b/src/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/zz_generated.deepcopy.go deleted file mode 100644 index 998c9dbe2..000000000 --- a/src/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/zz_generated.deepcopy.go +++ /dev/null @@ -1,596 +0,0 @@ -//go:build !ignore_autogenerated -// +build !ignore_autogenerated - -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by deepcopy-gen. DO NOT EDIT. - -package apiextensions - -import ( - runtime "k8s.io/apimachinery/pkg/runtime" -) - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *CustomResourceColumnDefinition) DeepCopyInto(out *CustomResourceColumnDefinition) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomResourceColumnDefinition. -func (in *CustomResourceColumnDefinition) DeepCopy() *CustomResourceColumnDefinition { - if in == nil { - return nil - } - out := new(CustomResourceColumnDefinition) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *CustomResourceConversion) DeepCopyInto(out *CustomResourceConversion) { - *out = *in - if in.WebhookClientConfig != nil { - in, out := &in.WebhookClientConfig, &out.WebhookClientConfig - *out = new(WebhookClientConfig) - (*in).DeepCopyInto(*out) - } - if in.ConversionReviewVersions != nil { - in, out := &in.ConversionReviewVersions, &out.ConversionReviewVersions - *out = make([]string, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomResourceConversion. -func (in *CustomResourceConversion) DeepCopy() *CustomResourceConversion { - if in == nil { - return nil - } - out := new(CustomResourceConversion) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *CustomResourceDefinition) DeepCopyInto(out *CustomResourceDefinition) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - in.Status.DeepCopyInto(&out.Status) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomResourceDefinition. -func (in *CustomResourceDefinition) DeepCopy() *CustomResourceDefinition { - if in == nil { - return nil - } - out := new(CustomResourceDefinition) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *CustomResourceDefinition) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *CustomResourceDefinitionCondition) DeepCopyInto(out *CustomResourceDefinitionCondition) { - *out = *in - in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomResourceDefinitionCondition. -func (in *CustomResourceDefinitionCondition) DeepCopy() *CustomResourceDefinitionCondition { - if in == nil { - return nil - } - out := new(CustomResourceDefinitionCondition) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *CustomResourceDefinitionList) DeepCopyInto(out *CustomResourceDefinitionList) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]CustomResourceDefinition, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomResourceDefinitionList. -func (in *CustomResourceDefinitionList) DeepCopy() *CustomResourceDefinitionList { - if in == nil { - return nil - } - out := new(CustomResourceDefinitionList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *CustomResourceDefinitionList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *CustomResourceDefinitionNames) DeepCopyInto(out *CustomResourceDefinitionNames) { - *out = *in - if in.ShortNames != nil { - in, out := &in.ShortNames, &out.ShortNames - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.Categories != nil { - in, out := &in.Categories, &out.Categories - *out = make([]string, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomResourceDefinitionNames. -func (in *CustomResourceDefinitionNames) DeepCopy() *CustomResourceDefinitionNames { - if in == nil { - return nil - } - out := new(CustomResourceDefinitionNames) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *CustomResourceDefinitionSpec) DeepCopyInto(out *CustomResourceDefinitionSpec) { - *out = *in - in.Names.DeepCopyInto(&out.Names) - if in.Validation != nil { - in, out := &in.Validation, &out.Validation - *out = new(CustomResourceValidation) - (*in).DeepCopyInto(*out) - } - if in.Subresources != nil { - in, out := &in.Subresources, &out.Subresources - *out = new(CustomResourceSubresources) - (*in).DeepCopyInto(*out) - } - if in.Versions != nil { - in, out := &in.Versions, &out.Versions - *out = make([]CustomResourceDefinitionVersion, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.AdditionalPrinterColumns != nil { - in, out := &in.AdditionalPrinterColumns, &out.AdditionalPrinterColumns - *out = make([]CustomResourceColumnDefinition, len(*in)) - copy(*out, *in) - } - if in.Conversion != nil { - in, out := &in.Conversion, &out.Conversion - *out = new(CustomResourceConversion) - (*in).DeepCopyInto(*out) - } - if in.PreserveUnknownFields != nil { - in, out := &in.PreserveUnknownFields, &out.PreserveUnknownFields - *out = new(bool) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomResourceDefinitionSpec. -func (in *CustomResourceDefinitionSpec) DeepCopy() *CustomResourceDefinitionSpec { - if in == nil { - return nil - } - out := new(CustomResourceDefinitionSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *CustomResourceDefinitionStatus) DeepCopyInto(out *CustomResourceDefinitionStatus) { - *out = *in - if in.Conditions != nil { - in, out := &in.Conditions, &out.Conditions - *out = make([]CustomResourceDefinitionCondition, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - in.AcceptedNames.DeepCopyInto(&out.AcceptedNames) - if in.StoredVersions != nil { - in, out := &in.StoredVersions, &out.StoredVersions - *out = make([]string, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomResourceDefinitionStatus. -func (in *CustomResourceDefinitionStatus) DeepCopy() *CustomResourceDefinitionStatus { - if in == nil { - return nil - } - out := new(CustomResourceDefinitionStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *CustomResourceDefinitionVersion) DeepCopyInto(out *CustomResourceDefinitionVersion) { - *out = *in - if in.DeprecationWarning != nil { - in, out := &in.DeprecationWarning, &out.DeprecationWarning - *out = new(string) - **out = **in - } - if in.Schema != nil { - in, out := &in.Schema, &out.Schema - *out = new(CustomResourceValidation) - (*in).DeepCopyInto(*out) - } - if in.Subresources != nil { - in, out := &in.Subresources, &out.Subresources - *out = new(CustomResourceSubresources) - (*in).DeepCopyInto(*out) - } - if in.AdditionalPrinterColumns != nil { - in, out := &in.AdditionalPrinterColumns, &out.AdditionalPrinterColumns - *out = make([]CustomResourceColumnDefinition, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomResourceDefinitionVersion. -func (in *CustomResourceDefinitionVersion) DeepCopy() *CustomResourceDefinitionVersion { - if in == nil { - return nil - } - out := new(CustomResourceDefinitionVersion) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *CustomResourceSubresourceScale) DeepCopyInto(out *CustomResourceSubresourceScale) { - *out = *in - if in.LabelSelectorPath != nil { - in, out := &in.LabelSelectorPath, &out.LabelSelectorPath - *out = new(string) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomResourceSubresourceScale. -func (in *CustomResourceSubresourceScale) DeepCopy() *CustomResourceSubresourceScale { - if in == nil { - return nil - } - out := new(CustomResourceSubresourceScale) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *CustomResourceSubresourceStatus) DeepCopyInto(out *CustomResourceSubresourceStatus) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomResourceSubresourceStatus. -func (in *CustomResourceSubresourceStatus) DeepCopy() *CustomResourceSubresourceStatus { - if in == nil { - return nil - } - out := new(CustomResourceSubresourceStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *CustomResourceSubresources) DeepCopyInto(out *CustomResourceSubresources) { - *out = *in - if in.Status != nil { - in, out := &in.Status, &out.Status - *out = new(CustomResourceSubresourceStatus) - **out = **in - } - if in.Scale != nil { - in, out := &in.Scale, &out.Scale - *out = new(CustomResourceSubresourceScale) - (*in).DeepCopyInto(*out) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomResourceSubresources. -func (in *CustomResourceSubresources) DeepCopy() *CustomResourceSubresources { - if in == nil { - return nil - } - out := new(CustomResourceSubresources) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *CustomResourceValidation) DeepCopyInto(out *CustomResourceValidation) { - *out = *in - if in.OpenAPIV3Schema != nil { - in, out := &in.OpenAPIV3Schema, &out.OpenAPIV3Schema - *out = (*in).DeepCopy() - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomResourceValidation. -func (in *CustomResourceValidation) DeepCopy() *CustomResourceValidation { - if in == nil { - return nil - } - out := new(CustomResourceValidation) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ExternalDocumentation) DeepCopyInto(out *ExternalDocumentation) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExternalDocumentation. -func (in *ExternalDocumentation) DeepCopy() *ExternalDocumentation { - if in == nil { - return nil - } - out := new(ExternalDocumentation) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in JSONSchemaDefinitions) DeepCopyInto(out *JSONSchemaDefinitions) { - { - in := &in - *out = make(JSONSchemaDefinitions, len(*in)) - for key, val := range *in { - (*out)[key] = *val.DeepCopy() - } - return - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JSONSchemaDefinitions. -func (in JSONSchemaDefinitions) DeepCopy() JSONSchemaDefinitions { - if in == nil { - return nil - } - out := new(JSONSchemaDefinitions) - in.DeepCopyInto(out) - return *out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in JSONSchemaDependencies) DeepCopyInto(out *JSONSchemaDependencies) { - { - in := &in - *out = make(JSONSchemaDependencies, len(*in)) - for key, val := range *in { - (*out)[key] = *val.DeepCopy() - } - return - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JSONSchemaDependencies. -func (in JSONSchemaDependencies) DeepCopy() JSONSchemaDependencies { - if in == nil { - return nil - } - out := new(JSONSchemaDependencies) - in.DeepCopyInto(out) - return *out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *JSONSchemaProps) DeepCopyInto(out *JSONSchemaProps) { - clone := in.DeepCopy() - *out = *clone - return -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *JSONSchemaPropsOrArray) DeepCopyInto(out *JSONSchemaPropsOrArray) { - *out = *in - if in.Schema != nil { - in, out := &in.Schema, &out.Schema - *out = (*in).DeepCopy() - } - if in.JSONSchemas != nil { - in, out := &in.JSONSchemas, &out.JSONSchemas - *out = make([]JSONSchemaProps, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JSONSchemaPropsOrArray. -func (in *JSONSchemaPropsOrArray) DeepCopy() *JSONSchemaPropsOrArray { - if in == nil { - return nil - } - out := new(JSONSchemaPropsOrArray) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *JSONSchemaPropsOrBool) DeepCopyInto(out *JSONSchemaPropsOrBool) { - *out = *in - if in.Schema != nil { - in, out := &in.Schema, &out.Schema - *out = (*in).DeepCopy() - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JSONSchemaPropsOrBool. -func (in *JSONSchemaPropsOrBool) DeepCopy() *JSONSchemaPropsOrBool { - if in == nil { - return nil - } - out := new(JSONSchemaPropsOrBool) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *JSONSchemaPropsOrStringArray) DeepCopyInto(out *JSONSchemaPropsOrStringArray) { - *out = *in - if in.Schema != nil { - in, out := &in.Schema, &out.Schema - *out = (*in).DeepCopy() - } - if in.Property != nil { - in, out := &in.Property, &out.Property - *out = make([]string, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JSONSchemaPropsOrStringArray. -func (in *JSONSchemaPropsOrStringArray) DeepCopy() *JSONSchemaPropsOrStringArray { - if in == nil { - return nil - } - out := new(JSONSchemaPropsOrStringArray) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ServiceReference) DeepCopyInto(out *ServiceReference) { - *out = *in - if in.Path != nil { - in, out := &in.Path, &out.Path - *out = new(string) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceReference. -func (in *ServiceReference) DeepCopy() *ServiceReference { - if in == nil { - return nil - } - out := new(ServiceReference) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ValidationRule) DeepCopyInto(out *ValidationRule) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ValidationRule. -func (in *ValidationRule) DeepCopy() *ValidationRule { - if in == nil { - return nil - } - out := new(ValidationRule) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in ValidationRules) DeepCopyInto(out *ValidationRules) { - { - in := &in - *out = make(ValidationRules, len(*in)) - copy(*out, *in) - return - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ValidationRules. -func (in ValidationRules) DeepCopy() ValidationRules { - if in == nil { - return nil - } - out := new(ValidationRules) - in.DeepCopyInto(out) - return *out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *WebhookClientConfig) DeepCopyInto(out *WebhookClientConfig) { - *out = *in - if in.URL != nil { - in, out := &in.URL, &out.URL - *out = new(string) - **out = **in - } - if in.Service != nil { - in, out := &in.Service, &out.Service - *out = new(ServiceReference) - (*in).DeepCopyInto(*out) - } - if in.CABundle != nil { - in, out := &in.CABundle, &out.CABundle - *out = make([]byte, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WebhookClientConfig. -func (in *WebhookClientConfig) DeepCopy() *WebhookClientConfig { - if in == nil { - return nil - } - out := new(WebhookClientConfig) - in.DeepCopyInto(out) - return out -} diff --git a/src/vendor/k8s.io/apimachinery/pkg/api/equality/semantic.go b/src/vendor/k8s.io/apimachinery/pkg/api/equality/semantic.go deleted file mode 100644 index f02fa8e43..000000000 --- a/src/vendor/k8s.io/apimachinery/pkg/api/equality/semantic.go +++ /dev/null @@ -1,49 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package equality - -import ( - "k8s.io/apimachinery/pkg/api/resource" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/conversion" - "k8s.io/apimachinery/pkg/fields" - "k8s.io/apimachinery/pkg/labels" -) - -// Semantic can do semantic deep equality checks for api objects. -// Example: apiequality.Semantic.DeepEqual(aPod, aPodWithNonNilButEmptyMaps) == true -var Semantic = conversion.EqualitiesOrDie( - func(a, b resource.Quantity) bool { - // Ignore formatting, only care that numeric value stayed the same. - // TODO: if we decide it's important, it should be safe to start comparing the format. - // - // Uninitialized quantities are equivalent to 0 quantities. - return a.Cmp(b) == 0 - }, - func(a, b metav1.MicroTime) bool { - return a.UTC() == b.UTC() - }, - func(a, b metav1.Time) bool { - return a.UTC() == b.UTC() - }, - func(a, b labels.Selector) bool { - return a.String() == b.String() - }, - func(a, b fields.Selector) bool { - return a.String() == b.String() - }, -) diff --git a/src/vendor/k8s.io/apimachinery/pkg/api/meta/OWNERS b/src/vendor/k8s.io/apimachinery/pkg/api/meta/OWNERS deleted file mode 100644 index 1e1330fff..000000000 --- a/src/vendor/k8s.io/apimachinery/pkg/api/meta/OWNERS +++ /dev/null @@ -1,14 +0,0 @@ -# See the OWNERS docs at https://go.k8s.io/owners - -reviewers: - - thockin - - smarterclayton - - wojtek-t - - deads2k - - derekwaynecarr - - caesarxuchao - - mikedanese - - liggitt - - janetkuo - - ncdc - - dims diff --git a/src/vendor/k8s.io/apimachinery/pkg/api/meta/conditions.go b/src/vendor/k8s.io/apimachinery/pkg/api/meta/conditions.go deleted file mode 100644 index 60c8209de..000000000 --- a/src/vendor/k8s.io/apimachinery/pkg/api/meta/conditions.go +++ /dev/null @@ -1,102 +0,0 @@ -/* -Copyright 2020 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package meta - -import ( - "time" - - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -// SetStatusCondition sets the corresponding condition in conditions to newCondition. -// conditions must be non-nil. -// 1. if the condition of the specified type already exists (all fields of the existing condition are updated to -// newCondition, LastTransitionTime is set to now if the new status differs from the old status) -// 2. if a condition of the specified type does not exist (LastTransitionTime is set to now() if unset, and newCondition is appended) -func SetStatusCondition(conditions *[]metav1.Condition, newCondition metav1.Condition) { - if conditions == nil { - return - } - existingCondition := FindStatusCondition(*conditions, newCondition.Type) - if existingCondition == nil { - if newCondition.LastTransitionTime.IsZero() { - newCondition.LastTransitionTime = metav1.NewTime(time.Now()) - } - *conditions = append(*conditions, newCondition) - return - } - - if existingCondition.Status != newCondition.Status { - existingCondition.Status = newCondition.Status - if !newCondition.LastTransitionTime.IsZero() { - existingCondition.LastTransitionTime = newCondition.LastTransitionTime - } else { - existingCondition.LastTransitionTime = metav1.NewTime(time.Now()) - } - } - - existingCondition.Reason = newCondition.Reason - existingCondition.Message = newCondition.Message - existingCondition.ObservedGeneration = newCondition.ObservedGeneration -} - -// RemoveStatusCondition removes the corresponding conditionType from conditions. -// conditions must be non-nil. -func RemoveStatusCondition(conditions *[]metav1.Condition, conditionType string) { - if conditions == nil || len(*conditions) == 0 { - return - } - newConditions := make([]metav1.Condition, 0, len(*conditions)-1) - for _, condition := range *conditions { - if condition.Type != conditionType { - newConditions = append(newConditions, condition) - } - } - - *conditions = newConditions -} - -// FindStatusCondition finds the conditionType in conditions. -func FindStatusCondition(conditions []metav1.Condition, conditionType string) *metav1.Condition { - for i := range conditions { - if conditions[i].Type == conditionType { - return &conditions[i] - } - } - - return nil -} - -// IsStatusConditionTrue returns true when the conditionType is present and set to `metav1.ConditionTrue` -func IsStatusConditionTrue(conditions []metav1.Condition, conditionType string) bool { - return IsStatusConditionPresentAndEqual(conditions, conditionType, metav1.ConditionTrue) -} - -// IsStatusConditionFalse returns true when the conditionType is present and set to `metav1.ConditionFalse` -func IsStatusConditionFalse(conditions []metav1.Condition, conditionType string) bool { - return IsStatusConditionPresentAndEqual(conditions, conditionType, metav1.ConditionFalse) -} - -// IsStatusConditionPresentAndEqual returns true when conditionType is present and equal to status. -func IsStatusConditionPresentAndEqual(conditions []metav1.Condition, conditionType string, status metav1.ConditionStatus) bool { - for _, condition := range conditions { - if condition.Type == conditionType { - return condition.Status == status - } - } - return false -} diff --git a/src/vendor/k8s.io/apimachinery/pkg/api/meta/doc.go b/src/vendor/k8s.io/apimachinery/pkg/api/meta/doc.go deleted file mode 100644 index b6d42acf8..000000000 --- a/src/vendor/k8s.io/apimachinery/pkg/api/meta/doc.go +++ /dev/null @@ -1,19 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package meta provides functions for retrieving API metadata from objects -// belonging to the Kubernetes API -package meta // import "k8s.io/apimachinery/pkg/api/meta" diff --git a/src/vendor/k8s.io/apimachinery/pkg/api/meta/errors.go b/src/vendor/k8s.io/apimachinery/pkg/api/meta/errors.go deleted file mode 100644 index cbf5d0263..000000000 --- a/src/vendor/k8s.io/apimachinery/pkg/api/meta/errors.go +++ /dev/null @@ -1,121 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package meta - -import ( - "fmt" - - "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/apimachinery/pkg/util/sets" -) - -// AmbiguousResourceError is returned if the RESTMapper finds multiple matches for a resource -type AmbiguousResourceError struct { - PartialResource schema.GroupVersionResource - - MatchingResources []schema.GroupVersionResource - MatchingKinds []schema.GroupVersionKind -} - -func (e *AmbiguousResourceError) Error() string { - switch { - case len(e.MatchingKinds) > 0 && len(e.MatchingResources) > 0: - return fmt.Sprintf("%v matches multiple resources %v and kinds %v", e.PartialResource, e.MatchingResources, e.MatchingKinds) - case len(e.MatchingKinds) > 0: - return fmt.Sprintf("%v matches multiple kinds %v", e.PartialResource, e.MatchingKinds) - case len(e.MatchingResources) > 0: - return fmt.Sprintf("%v matches multiple resources %v", e.PartialResource, e.MatchingResources) - } - return fmt.Sprintf("%v matches multiple resources or kinds", e.PartialResource) -} - -// AmbiguousKindError is returned if the RESTMapper finds multiple matches for a kind -type AmbiguousKindError struct { - PartialKind schema.GroupVersionKind - - MatchingResources []schema.GroupVersionResource - MatchingKinds []schema.GroupVersionKind -} - -func (e *AmbiguousKindError) Error() string { - switch { - case len(e.MatchingKinds) > 0 && len(e.MatchingResources) > 0: - return fmt.Sprintf("%v matches multiple resources %v and kinds %v", e.PartialKind, e.MatchingResources, e.MatchingKinds) - case len(e.MatchingKinds) > 0: - return fmt.Sprintf("%v matches multiple kinds %v", e.PartialKind, e.MatchingKinds) - case len(e.MatchingResources) > 0: - return fmt.Sprintf("%v matches multiple resources %v", e.PartialKind, e.MatchingResources) - } - return fmt.Sprintf("%v matches multiple resources or kinds", e.PartialKind) -} - -func IsAmbiguousError(err error) bool { - if err == nil { - return false - } - switch err.(type) { - case *AmbiguousResourceError, *AmbiguousKindError: - return true - default: - return false - } -} - -// NoResourceMatchError is returned if the RESTMapper can't find any match for a resource -type NoResourceMatchError struct { - PartialResource schema.GroupVersionResource -} - -func (e *NoResourceMatchError) Error() string { - return fmt.Sprintf("no matches for %v", e.PartialResource) -} - -// NoKindMatchError is returned if the RESTMapper can't find any match for a kind -type NoKindMatchError struct { - // GroupKind is the API group and kind that was searched - GroupKind schema.GroupKind - // SearchedVersions is the optional list of versions the search was restricted to - SearchedVersions []string -} - -func (e *NoKindMatchError) Error() string { - searchedVersions := sets.NewString() - for _, v := range e.SearchedVersions { - searchedVersions.Insert(schema.GroupVersion{Group: e.GroupKind.Group, Version: v}.String()) - } - - switch len(searchedVersions) { - case 0: - return fmt.Sprintf("no matches for kind %q in group %q", e.GroupKind.Kind, e.GroupKind.Group) - case 1: - return fmt.Sprintf("no matches for kind %q in version %q", e.GroupKind.Kind, searchedVersions.List()[0]) - default: - return fmt.Sprintf("no matches for kind %q in versions %q", e.GroupKind.Kind, searchedVersions.List()) - } -} - -func IsNoMatchError(err error) bool { - if err == nil { - return false - } - switch err.(type) { - case *NoResourceMatchError, *NoKindMatchError: - return true - default: - return false - } -} diff --git a/src/vendor/k8s.io/apimachinery/pkg/api/meta/firsthit_restmapper.go b/src/vendor/k8s.io/apimachinery/pkg/api/meta/firsthit_restmapper.go deleted file mode 100644 index 1bc816fe3..000000000 --- a/src/vendor/k8s.io/apimachinery/pkg/api/meta/firsthit_restmapper.go +++ /dev/null @@ -1,105 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package meta - -import ( - "fmt" - - "k8s.io/apimachinery/pkg/runtime/schema" - utilerrors "k8s.io/apimachinery/pkg/util/errors" -) - -var ( - _ ResettableRESTMapper = &FirstHitRESTMapper{} -) - -// FirstHitRESTMapper is a wrapper for multiple RESTMappers which returns the -// first successful result for the singular requests -type FirstHitRESTMapper struct { - MultiRESTMapper -} - -func (m FirstHitRESTMapper) String() string { - return fmt.Sprintf("FirstHitRESTMapper{\n\t%v\n}", m.MultiRESTMapper) -} - -func (m FirstHitRESTMapper) ResourceFor(resource schema.GroupVersionResource) (schema.GroupVersionResource, error) { - errors := []error{} - for _, t := range m.MultiRESTMapper { - ret, err := t.ResourceFor(resource) - if err == nil { - return ret, nil - } - errors = append(errors, err) - } - - return schema.GroupVersionResource{}, collapseAggregateErrors(errors) -} - -func (m FirstHitRESTMapper) KindFor(resource schema.GroupVersionResource) (schema.GroupVersionKind, error) { - errors := []error{} - for _, t := range m.MultiRESTMapper { - ret, err := t.KindFor(resource) - if err == nil { - return ret, nil - } - errors = append(errors, err) - } - - return schema.GroupVersionKind{}, collapseAggregateErrors(errors) -} - -// RESTMapping provides the REST mapping for the resource based on the -// kind and version. This implementation supports multiple REST schemas and -// return the first match. -func (m FirstHitRESTMapper) RESTMapping(gk schema.GroupKind, versions ...string) (*RESTMapping, error) { - errors := []error{} - for _, t := range m.MultiRESTMapper { - ret, err := t.RESTMapping(gk, versions...) - if err == nil { - return ret, nil - } - errors = append(errors, err) - } - - return nil, collapseAggregateErrors(errors) -} - -func (m FirstHitRESTMapper) Reset() { - m.MultiRESTMapper.Reset() -} - -// collapseAggregateErrors returns the minimal errors. it handles empty as nil, handles one item in a list -// by returning the item, and collapses all NoMatchErrors to a single one (since they should all be the same) -func collapseAggregateErrors(errors []error) error { - if len(errors) == 0 { - return nil - } - if len(errors) == 1 { - return errors[0] - } - - allNoMatchErrors := true - for _, err := range errors { - allNoMatchErrors = allNoMatchErrors && IsNoMatchError(err) - } - if allNoMatchErrors { - return errors[0] - } - - return utilerrors.NewAggregate(errors) -} diff --git a/src/vendor/k8s.io/apimachinery/pkg/api/meta/help.go b/src/vendor/k8s.io/apimachinery/pkg/api/meta/help.go deleted file mode 100644 index 899d3e8a6..000000000 --- a/src/vendor/k8s.io/apimachinery/pkg/api/meta/help.go +++ /dev/null @@ -1,265 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package meta - -import ( - "errors" - "fmt" - "reflect" - "sync" - - "k8s.io/apimachinery/pkg/conversion" - "k8s.io/apimachinery/pkg/runtime" -) - -var ( - // isListCache maintains a cache of types that are checked for lists - // which is used by IsListType. - // TODO: remove and replace with an interface check - isListCache = struct { - lock sync.RWMutex - byType map[reflect.Type]bool - }{ - byType: make(map[reflect.Type]bool, 1024), - } -) - -// IsListType returns true if the provided Object has a slice called Items. -// TODO: Replace the code in this check with an interface comparison by -// -// creating and enforcing that lists implement a list accessor. -func IsListType(obj runtime.Object) bool { - switch t := obj.(type) { - case runtime.Unstructured: - return t.IsList() - } - t := reflect.TypeOf(obj) - - isListCache.lock.RLock() - ok, exists := isListCache.byType[t] - isListCache.lock.RUnlock() - - if !exists { - _, err := getItemsPtr(obj) - ok = err == nil - - // cache only the first 1024 types - isListCache.lock.Lock() - if len(isListCache.byType) < 1024 { - isListCache.byType[t] = ok - } - isListCache.lock.Unlock() - } - - return ok -} - -var ( - errExpectFieldItems = errors.New("no Items field in this object") - errExpectSliceItems = errors.New("Items field must be a slice of objects") -) - -// GetItemsPtr returns a pointer to the list object's Items member. -// If 'list' doesn't have an Items member, it's not really a list type -// and an error will be returned. -// This function will either return a pointer to a slice, or an error, but not both. -// TODO: this will be replaced with an interface in the future -func GetItemsPtr(list runtime.Object) (interface{}, error) { - obj, err := getItemsPtr(list) - if err != nil { - return nil, fmt.Errorf("%T is not a list: %v", list, err) - } - return obj, nil -} - -// getItemsPtr returns a pointer to the list object's Items member or an error. -func getItemsPtr(list runtime.Object) (interface{}, error) { - v, err := conversion.EnforcePtr(list) - if err != nil { - return nil, err - } - - items := v.FieldByName("Items") - if !items.IsValid() { - return nil, errExpectFieldItems - } - switch items.Kind() { - case reflect.Interface, reflect.Pointer: - target := reflect.TypeOf(items.Interface()).Elem() - if target.Kind() != reflect.Slice { - return nil, errExpectSliceItems - } - return items.Interface(), nil - case reflect.Slice: - return items.Addr().Interface(), nil - default: - return nil, errExpectSliceItems - } -} - -// EachListItem invokes fn on each runtime.Object in the list. Any error immediately terminates -// the loop. -func EachListItem(obj runtime.Object, fn func(runtime.Object) error) error { - if unstructured, ok := obj.(runtime.Unstructured); ok { - return unstructured.EachListItem(fn) - } - // TODO: Change to an interface call? - itemsPtr, err := GetItemsPtr(obj) - if err != nil { - return err - } - items, err := conversion.EnforcePtr(itemsPtr) - if err != nil { - return err - } - len := items.Len() - if len == 0 { - return nil - } - takeAddr := false - if elemType := items.Type().Elem(); elemType.Kind() != reflect.Pointer && elemType.Kind() != reflect.Interface { - if !items.Index(0).CanAddr() { - return fmt.Errorf("unable to take address of items in %T for EachListItem", obj) - } - takeAddr = true - } - - for i := 0; i < len; i++ { - raw := items.Index(i) - if takeAddr { - raw = raw.Addr() - } - switch item := raw.Interface().(type) { - case *runtime.RawExtension: - if err := fn(item.Object); err != nil { - return err - } - case runtime.Object: - if err := fn(item); err != nil { - return err - } - default: - obj, ok := item.(runtime.Object) - if !ok { - return fmt.Errorf("%v: item[%v]: Expected object, got %#v(%s)", obj, i, raw.Interface(), raw.Kind()) - } - if err := fn(obj); err != nil { - return err - } - } - } - return nil -} - -// ExtractList returns obj's Items element as an array of runtime.Objects. -// Returns an error if obj is not a List type (does not have an Items member). -func ExtractList(obj runtime.Object) ([]runtime.Object, error) { - itemsPtr, err := GetItemsPtr(obj) - if err != nil { - return nil, err - } - items, err := conversion.EnforcePtr(itemsPtr) - if err != nil { - return nil, err - } - list := make([]runtime.Object, items.Len()) - for i := range list { - raw := items.Index(i) - switch item := raw.Interface().(type) { - case runtime.RawExtension: - switch { - case item.Object != nil: - list[i] = item.Object - case item.Raw != nil: - // TODO: Set ContentEncoding and ContentType correctly. - list[i] = &runtime.Unknown{Raw: item.Raw} - default: - list[i] = nil - } - case runtime.Object: - list[i] = item - default: - var found bool - if list[i], found = raw.Addr().Interface().(runtime.Object); !found { - return nil, fmt.Errorf("%v: item[%v]: Expected object, got %#v(%s)", obj, i, raw.Interface(), raw.Kind()) - } - } - } - return list, nil -} - -// objectSliceType is the type of a slice of Objects -var objectSliceType = reflect.TypeOf([]runtime.Object{}) - -// LenList returns the length of this list or 0 if it is not a list. -func LenList(list runtime.Object) int { - itemsPtr, err := GetItemsPtr(list) - if err != nil { - return 0 - } - items, err := conversion.EnforcePtr(itemsPtr) - if err != nil { - return 0 - } - return items.Len() -} - -// SetList sets the given list object's Items member have the elements given in -// objects. -// Returns an error if list is not a List type (does not have an Items member), -// or if any of the objects are not of the right type. -func SetList(list runtime.Object, objects []runtime.Object) error { - itemsPtr, err := GetItemsPtr(list) - if err != nil { - return err - } - items, err := conversion.EnforcePtr(itemsPtr) - if err != nil { - return err - } - if items.Type() == objectSliceType { - items.Set(reflect.ValueOf(objects)) - return nil - } - slice := reflect.MakeSlice(items.Type(), len(objects), len(objects)) - for i := range objects { - dest := slice.Index(i) - if dest.Type() == reflect.TypeOf(runtime.RawExtension{}) { - dest = dest.FieldByName("Object") - } - - // check to see if you're directly assignable - if reflect.TypeOf(objects[i]).AssignableTo(dest.Type()) { - dest.Set(reflect.ValueOf(objects[i])) - continue - } - - src, err := conversion.EnforcePtr(objects[i]) - if err != nil { - return err - } - if src.Type().AssignableTo(dest.Type()) { - dest.Set(src) - } else if src.Type().ConvertibleTo(dest.Type()) { - dest.Set(src.Convert(dest.Type())) - } else { - return fmt.Errorf("item[%d]: can't assign or convert %v into %v", i, src.Type(), dest.Type()) - } - } - items.Set(slice) - return nil -} diff --git a/src/vendor/k8s.io/apimachinery/pkg/api/meta/interfaces.go b/src/vendor/k8s.io/apimachinery/pkg/api/meta/interfaces.go deleted file mode 100644 index a35ce3bd0..000000000 --- a/src/vendor/k8s.io/apimachinery/pkg/api/meta/interfaces.go +++ /dev/null @@ -1,143 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package meta - -import ( - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/apimachinery/pkg/types" -) - -type ListMetaAccessor interface { - GetListMeta() List -} - -// List lets you work with list metadata from any of the versioned or -// internal API objects. Attempting to set or retrieve a field on an object that does -// not support that field will be a no-op and return a default value. -type List metav1.ListInterface - -// Type exposes the type and APIVersion of versioned or internal API objects. -type Type metav1.Type - -// MetadataAccessor lets you work with object and list metadata from any of the versioned or -// internal API objects. Attempting to set or retrieve a field on an object that does -// not support that field (Name, UID, Namespace on lists) will be a no-op and return -// a default value. -// -// MetadataAccessor exposes Interface in a way that can be used with multiple objects. -type MetadataAccessor interface { - APIVersion(obj runtime.Object) (string, error) - SetAPIVersion(obj runtime.Object, version string) error - - Kind(obj runtime.Object) (string, error) - SetKind(obj runtime.Object, kind string) error - - Namespace(obj runtime.Object) (string, error) - SetNamespace(obj runtime.Object, namespace string) error - - Name(obj runtime.Object) (string, error) - SetName(obj runtime.Object, name string) error - - GenerateName(obj runtime.Object) (string, error) - SetGenerateName(obj runtime.Object, name string) error - - UID(obj runtime.Object) (types.UID, error) - SetUID(obj runtime.Object, uid types.UID) error - - SelfLink(obj runtime.Object) (string, error) - SetSelfLink(obj runtime.Object, selfLink string) error - - Labels(obj runtime.Object) (map[string]string, error) - SetLabels(obj runtime.Object, labels map[string]string) error - - Annotations(obj runtime.Object) (map[string]string, error) - SetAnnotations(obj runtime.Object, annotations map[string]string) error - - Continue(obj runtime.Object) (string, error) - SetContinue(obj runtime.Object, c string) error - - runtime.ResourceVersioner -} - -type RESTScopeName string - -const ( - RESTScopeNameNamespace RESTScopeName = "namespace" - RESTScopeNameRoot RESTScopeName = "root" -) - -// RESTScope contains the information needed to deal with REST resources that are in a resource hierarchy -type RESTScope interface { - // Name of the scope - Name() RESTScopeName -} - -// RESTMapping contains the information needed to deal with objects of a specific -// resource and kind in a RESTful manner. -type RESTMapping struct { - // Resource is the GroupVersionResource (location) for this endpoint - Resource schema.GroupVersionResource - - // GroupVersionKind is the GroupVersionKind (data format) to submit to this endpoint - GroupVersionKind schema.GroupVersionKind - - // Scope contains the information needed to deal with REST Resources that are in a resource hierarchy - Scope RESTScope -} - -// RESTMapper allows clients to map resources to kind, and map kind and version -// to interfaces for manipulating those objects. It is primarily intended for -// consumers of Kubernetes compatible REST APIs as defined in docs/devel/api-conventions.md. -// -// The Kubernetes API provides versioned resources and object kinds which are scoped -// to API groups. In other words, kinds and resources should not be assumed to be -// unique across groups. -// -// TODO: split into sub-interfaces -type RESTMapper interface { - // KindFor takes a partial resource and returns the single match. Returns an error if there are multiple matches - KindFor(resource schema.GroupVersionResource) (schema.GroupVersionKind, error) - - // KindsFor takes a partial resource and returns the list of potential kinds in priority order - KindsFor(resource schema.GroupVersionResource) ([]schema.GroupVersionKind, error) - - // ResourceFor takes a partial resource and returns the single match. Returns an error if there are multiple matches - ResourceFor(input schema.GroupVersionResource) (schema.GroupVersionResource, error) - - // ResourcesFor takes a partial resource and returns the list of potential resource in priority order - ResourcesFor(input schema.GroupVersionResource) ([]schema.GroupVersionResource, error) - - // RESTMapping identifies a preferred resource mapping for the provided group kind. - RESTMapping(gk schema.GroupKind, versions ...string) (*RESTMapping, error) - // RESTMappings returns all resource mappings for the provided group kind if no - // version search is provided. Otherwise identifies a preferred resource mapping for - // the provided version(s). - RESTMappings(gk schema.GroupKind, versions ...string) ([]*RESTMapping, error) - - ResourceSingularizer(resource string) (singular string, err error) -} - -// ResettableRESTMapper is a RESTMapper which is capable of resetting itself -// from discovery. -// All rest mappers that delegate to other rest mappers must implement this interface and dynamically -// check if the delegate mapper supports the Reset() operation. -type ResettableRESTMapper interface { - RESTMapper - Reset() -} diff --git a/src/vendor/k8s.io/apimachinery/pkg/api/meta/lazy.go b/src/vendor/k8s.io/apimachinery/pkg/api/meta/lazy.go deleted file mode 100644 index a4298114b..000000000 --- a/src/vendor/k8s.io/apimachinery/pkg/api/meta/lazy.go +++ /dev/null @@ -1,112 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package meta - -import ( - "sync" - - "k8s.io/apimachinery/pkg/runtime/schema" -) - -// lazyObject defers loading the mapper and typer until necessary. -type lazyObject struct { - loader func() (RESTMapper, error) - - lock sync.Mutex - loaded bool - err error - mapper RESTMapper -} - -// NewLazyRESTMapperLoader handles unrecoverable errors when creating a RESTMapper / ObjectTyper by -// returning those initialization errors when the interface methods are invoked. This defers the -// initialization and any server calls until a client actually needs to perform the action. -func NewLazyRESTMapperLoader(fn func() (RESTMapper, error)) RESTMapper { - obj := &lazyObject{loader: fn} - return obj -} - -// init lazily loads the mapper and typer, returning an error if initialization has failed. -func (o *lazyObject) init() error { - o.lock.Lock() - defer o.lock.Unlock() - if o.loaded { - return o.err - } - o.mapper, o.err = o.loader() - o.loaded = true - return o.err -} - -var _ ResettableRESTMapper = &lazyObject{} - -func (o *lazyObject) KindFor(resource schema.GroupVersionResource) (schema.GroupVersionKind, error) { - if err := o.init(); err != nil { - return schema.GroupVersionKind{}, err - } - return o.mapper.KindFor(resource) -} - -func (o *lazyObject) KindsFor(resource schema.GroupVersionResource) ([]schema.GroupVersionKind, error) { - if err := o.init(); err != nil { - return []schema.GroupVersionKind{}, err - } - return o.mapper.KindsFor(resource) -} - -func (o *lazyObject) ResourceFor(input schema.GroupVersionResource) (schema.GroupVersionResource, error) { - if err := o.init(); err != nil { - return schema.GroupVersionResource{}, err - } - return o.mapper.ResourceFor(input) -} - -func (o *lazyObject) ResourcesFor(input schema.GroupVersionResource) ([]schema.GroupVersionResource, error) { - if err := o.init(); err != nil { - return []schema.GroupVersionResource{}, err - } - return o.mapper.ResourcesFor(input) -} - -func (o *lazyObject) RESTMapping(gk schema.GroupKind, versions ...string) (*RESTMapping, error) { - if err := o.init(); err != nil { - return nil, err - } - return o.mapper.RESTMapping(gk, versions...) -} - -func (o *lazyObject) RESTMappings(gk schema.GroupKind, versions ...string) ([]*RESTMapping, error) { - if err := o.init(); err != nil { - return nil, err - } - return o.mapper.RESTMappings(gk, versions...) -} - -func (o *lazyObject) ResourceSingularizer(resource string) (singular string, err error) { - if err := o.init(); err != nil { - return "", err - } - return o.mapper.ResourceSingularizer(resource) -} - -func (o *lazyObject) Reset() { - o.lock.Lock() - defer o.lock.Unlock() - if o.loaded && o.err == nil { - MaybeResetRESTMapper(o.mapper) - } -} diff --git a/src/vendor/k8s.io/apimachinery/pkg/api/meta/meta.go b/src/vendor/k8s.io/apimachinery/pkg/api/meta/meta.go deleted file mode 100644 index 2551f07f5..000000000 --- a/src/vendor/k8s.io/apimachinery/pkg/api/meta/meta.go +++ /dev/null @@ -1,643 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package meta - -import ( - "fmt" - "reflect" - - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/conversion" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/apimachinery/pkg/types" - "k8s.io/klog/v2" -) - -// errNotList is returned when an object implements the Object style interfaces but not the List style -// interfaces. -var errNotList = fmt.Errorf("object does not implement the List interfaces") - -var errNotCommon = fmt.Errorf("object does not implement the common interface for accessing the SelfLink") - -// CommonAccessor returns a Common interface for the provided object or an error if the object does -// not provide List. -func CommonAccessor(obj interface{}) (metav1.Common, error) { - switch t := obj.(type) { - case List: - return t, nil - case ListMetaAccessor: - if m := t.GetListMeta(); m != nil { - return m, nil - } - return nil, errNotCommon - case metav1.ListMetaAccessor: - if m := t.GetListMeta(); m != nil { - return m, nil - } - return nil, errNotCommon - case metav1.Object: - return t, nil - case metav1.ObjectMetaAccessor: - if m := t.GetObjectMeta(); m != nil { - return m, nil - } - return nil, errNotCommon - default: - return nil, errNotCommon - } -} - -// ListAccessor returns a List interface for the provided object or an error if the object does -// not provide List. -// IMPORTANT: Objects are NOT a superset of lists. Do not use this check to determine whether an -// object *is* a List. -func ListAccessor(obj interface{}) (List, error) { - switch t := obj.(type) { - case List: - return t, nil - case ListMetaAccessor: - if m := t.GetListMeta(); m != nil { - return m, nil - } - return nil, errNotList - case metav1.ListMetaAccessor: - if m := t.GetListMeta(); m != nil { - return m, nil - } - return nil, errNotList - default: - return nil, errNotList - } -} - -// errNotObject is returned when an object implements the List style interfaces but not the Object style -// interfaces. -var errNotObject = fmt.Errorf("object does not implement the Object interfaces") - -// Accessor takes an arbitrary object pointer and returns meta.Interface. -// obj must be a pointer to an API type. An error is returned if the minimum -// required fields are missing. Fields that are not required return the default -// value and are a no-op if set. -func Accessor(obj interface{}) (metav1.Object, error) { - switch t := obj.(type) { - case metav1.Object: - return t, nil - case metav1.ObjectMetaAccessor: - if m := t.GetObjectMeta(); m != nil { - return m, nil - } - return nil, errNotObject - default: - return nil, errNotObject - } -} - -// AsPartialObjectMetadata takes the metav1 interface and returns a partial object. -// TODO: consider making this solely a conversion action. -func AsPartialObjectMetadata(m metav1.Object) *metav1.PartialObjectMetadata { - switch t := m.(type) { - case *metav1.ObjectMeta: - return &metav1.PartialObjectMetadata{ObjectMeta: *t} - default: - return &metav1.PartialObjectMetadata{ - ObjectMeta: metav1.ObjectMeta{ - Name: m.GetName(), - GenerateName: m.GetGenerateName(), - Namespace: m.GetNamespace(), - SelfLink: m.GetSelfLink(), - UID: m.GetUID(), - ResourceVersion: m.GetResourceVersion(), - Generation: m.GetGeneration(), - CreationTimestamp: m.GetCreationTimestamp(), - DeletionTimestamp: m.GetDeletionTimestamp(), - DeletionGracePeriodSeconds: m.GetDeletionGracePeriodSeconds(), - Labels: m.GetLabels(), - Annotations: m.GetAnnotations(), - OwnerReferences: m.GetOwnerReferences(), - Finalizers: m.GetFinalizers(), - ManagedFields: m.GetManagedFields(), - }, - } - } -} - -// TypeAccessor returns an interface that allows retrieving and modifying the APIVersion -// and Kind of an in-memory internal object. -// TODO: this interface is used to test code that does not have ObjectMeta or ListMeta -// in round tripping (objects which can use apiVersion/kind, but do not fit the Kube -// api conventions). -func TypeAccessor(obj interface{}) (Type, error) { - if typed, ok := obj.(runtime.Object); ok { - return objectAccessor{typed}, nil - } - v, err := conversion.EnforcePtr(obj) - if err != nil { - return nil, err - } - t := v.Type() - if v.Kind() != reflect.Struct { - return nil, fmt.Errorf("expected struct, but got %v: %v (%#v)", v.Kind(), t, v.Interface()) - } - - typeMeta := v.FieldByName("TypeMeta") - if !typeMeta.IsValid() { - return nil, fmt.Errorf("struct %v lacks embedded TypeMeta type", t) - } - a := &genericAccessor{} - if err := extractFromTypeMeta(typeMeta, a); err != nil { - return nil, fmt.Errorf("unable to find type fields on %#v: %v", typeMeta, err) - } - return a, nil -} - -type objectAccessor struct { - runtime.Object -} - -func (obj objectAccessor) GetKind() string { - return obj.GetObjectKind().GroupVersionKind().Kind -} - -func (obj objectAccessor) SetKind(kind string) { - gvk := obj.GetObjectKind().GroupVersionKind() - gvk.Kind = kind - obj.GetObjectKind().SetGroupVersionKind(gvk) -} - -func (obj objectAccessor) GetAPIVersion() string { - return obj.GetObjectKind().GroupVersionKind().GroupVersion().String() -} - -func (obj objectAccessor) SetAPIVersion(version string) { - gvk := obj.GetObjectKind().GroupVersionKind() - gv, err := schema.ParseGroupVersion(version) - if err != nil { - gv = schema.GroupVersion{Version: version} - } - gvk.Group, gvk.Version = gv.Group, gv.Version - obj.GetObjectKind().SetGroupVersionKind(gvk) -} - -// NewAccessor returns a MetadataAccessor that can retrieve -// or manipulate resource version on objects derived from core API -// metadata concepts. -func NewAccessor() MetadataAccessor { - return resourceAccessor{} -} - -// resourceAccessor implements ResourceVersioner and SelfLinker. -type resourceAccessor struct{} - -func (resourceAccessor) Kind(obj runtime.Object) (string, error) { - return objectAccessor{obj}.GetKind(), nil -} - -func (resourceAccessor) SetKind(obj runtime.Object, kind string) error { - objectAccessor{obj}.SetKind(kind) - return nil -} - -func (resourceAccessor) APIVersion(obj runtime.Object) (string, error) { - return objectAccessor{obj}.GetAPIVersion(), nil -} - -func (resourceAccessor) SetAPIVersion(obj runtime.Object, version string) error { - objectAccessor{obj}.SetAPIVersion(version) - return nil -} - -func (resourceAccessor) Namespace(obj runtime.Object) (string, error) { - accessor, err := Accessor(obj) - if err != nil { - return "", err - } - return accessor.GetNamespace(), nil -} - -func (resourceAccessor) SetNamespace(obj runtime.Object, namespace string) error { - accessor, err := Accessor(obj) - if err != nil { - return err - } - accessor.SetNamespace(namespace) - return nil -} - -func (resourceAccessor) Name(obj runtime.Object) (string, error) { - accessor, err := Accessor(obj) - if err != nil { - return "", err - } - return accessor.GetName(), nil -} - -func (resourceAccessor) SetName(obj runtime.Object, name string) error { - accessor, err := Accessor(obj) - if err != nil { - return err - } - accessor.SetName(name) - return nil -} - -func (resourceAccessor) GenerateName(obj runtime.Object) (string, error) { - accessor, err := Accessor(obj) - if err != nil { - return "", err - } - return accessor.GetGenerateName(), nil -} - -func (resourceAccessor) SetGenerateName(obj runtime.Object, name string) error { - accessor, err := Accessor(obj) - if err != nil { - return err - } - accessor.SetGenerateName(name) - return nil -} - -func (resourceAccessor) UID(obj runtime.Object) (types.UID, error) { - accessor, err := Accessor(obj) - if err != nil { - return "", err - } - return accessor.GetUID(), nil -} - -func (resourceAccessor) SetUID(obj runtime.Object, uid types.UID) error { - accessor, err := Accessor(obj) - if err != nil { - return err - } - accessor.SetUID(uid) - return nil -} - -func (resourceAccessor) SelfLink(obj runtime.Object) (string, error) { - accessor, err := CommonAccessor(obj) - if err != nil { - return "", err - } - return accessor.GetSelfLink(), nil -} - -func (resourceAccessor) SetSelfLink(obj runtime.Object, selfLink string) error { - accessor, err := CommonAccessor(obj) - if err != nil { - return err - } - accessor.SetSelfLink(selfLink) - return nil -} - -func (resourceAccessor) Labels(obj runtime.Object) (map[string]string, error) { - accessor, err := Accessor(obj) - if err != nil { - return nil, err - } - return accessor.GetLabels(), nil -} - -func (resourceAccessor) SetLabels(obj runtime.Object, labels map[string]string) error { - accessor, err := Accessor(obj) - if err != nil { - return err - } - accessor.SetLabels(labels) - return nil -} - -func (resourceAccessor) Annotations(obj runtime.Object) (map[string]string, error) { - accessor, err := Accessor(obj) - if err != nil { - return nil, err - } - return accessor.GetAnnotations(), nil -} - -func (resourceAccessor) SetAnnotations(obj runtime.Object, annotations map[string]string) error { - accessor, err := Accessor(obj) - if err != nil { - return err - } - accessor.SetAnnotations(annotations) - return nil -} - -func (resourceAccessor) ResourceVersion(obj runtime.Object) (string, error) { - accessor, err := CommonAccessor(obj) - if err != nil { - return "", err - } - return accessor.GetResourceVersion(), nil -} - -func (resourceAccessor) SetResourceVersion(obj runtime.Object, version string) error { - accessor, err := CommonAccessor(obj) - if err != nil { - return err - } - accessor.SetResourceVersion(version) - return nil -} - -func (resourceAccessor) Continue(obj runtime.Object) (string, error) { - accessor, err := ListAccessor(obj) - if err != nil { - return "", err - } - return accessor.GetContinue(), nil -} - -func (resourceAccessor) SetContinue(obj runtime.Object, version string) error { - accessor, err := ListAccessor(obj) - if err != nil { - return err - } - accessor.SetContinue(version) - return nil -} - -// extractFromOwnerReference extracts v to o. v is the OwnerReferences field of an object. -func extractFromOwnerReference(v reflect.Value, o *metav1.OwnerReference) error { - if err := runtime.Field(v, "APIVersion", &o.APIVersion); err != nil { - return err - } - if err := runtime.Field(v, "Kind", &o.Kind); err != nil { - return err - } - if err := runtime.Field(v, "Name", &o.Name); err != nil { - return err - } - if err := runtime.Field(v, "UID", &o.UID); err != nil { - return err - } - var controllerPtr *bool - if err := runtime.Field(v, "Controller", &controllerPtr); err != nil { - return err - } - if controllerPtr != nil { - controller := *controllerPtr - o.Controller = &controller - } - var blockOwnerDeletionPtr *bool - if err := runtime.Field(v, "BlockOwnerDeletion", &blockOwnerDeletionPtr); err != nil { - return err - } - if blockOwnerDeletionPtr != nil { - block := *blockOwnerDeletionPtr - o.BlockOwnerDeletion = &block - } - return nil -} - -// setOwnerReference sets v to o. v is the OwnerReferences field of an object. -func setOwnerReference(v reflect.Value, o *metav1.OwnerReference) error { - if err := runtime.SetField(o.APIVersion, v, "APIVersion"); err != nil { - return err - } - if err := runtime.SetField(o.Kind, v, "Kind"); err != nil { - return err - } - if err := runtime.SetField(o.Name, v, "Name"); err != nil { - return err - } - if err := runtime.SetField(o.UID, v, "UID"); err != nil { - return err - } - if o.Controller != nil { - controller := *(o.Controller) - if err := runtime.SetField(&controller, v, "Controller"); err != nil { - return err - } - } - if o.BlockOwnerDeletion != nil { - block := *(o.BlockOwnerDeletion) - if err := runtime.SetField(&block, v, "BlockOwnerDeletion"); err != nil { - return err - } - } - return nil -} - -// genericAccessor contains pointers to strings that can modify an arbitrary -// struct and implements the Accessor interface. -type genericAccessor struct { - namespace *string - name *string - generateName *string - uid *types.UID - apiVersion *string - kind *string - resourceVersion *string - selfLink *string - creationTimestamp *metav1.Time - deletionTimestamp **metav1.Time - labels *map[string]string - annotations *map[string]string - ownerReferences reflect.Value - finalizers *[]string -} - -func (a genericAccessor) GetNamespace() string { - if a.namespace == nil { - return "" - } - return *a.namespace -} - -func (a genericAccessor) SetNamespace(namespace string) { - if a.namespace == nil { - return - } - *a.namespace = namespace -} - -func (a genericAccessor) GetName() string { - if a.name == nil { - return "" - } - return *a.name -} - -func (a genericAccessor) SetName(name string) { - if a.name == nil { - return - } - *a.name = name -} - -func (a genericAccessor) GetGenerateName() string { - if a.generateName == nil { - return "" - } - return *a.generateName -} - -func (a genericAccessor) SetGenerateName(generateName string) { - if a.generateName == nil { - return - } - *a.generateName = generateName -} - -func (a genericAccessor) GetUID() types.UID { - if a.uid == nil { - return "" - } - return *a.uid -} - -func (a genericAccessor) SetUID(uid types.UID) { - if a.uid == nil { - return - } - *a.uid = uid -} - -func (a genericAccessor) GetAPIVersion() string { - return *a.apiVersion -} - -func (a genericAccessor) SetAPIVersion(version string) { - *a.apiVersion = version -} - -func (a genericAccessor) GetKind() string { - return *a.kind -} - -func (a genericAccessor) SetKind(kind string) { - *a.kind = kind -} - -func (a genericAccessor) GetResourceVersion() string { - return *a.resourceVersion -} - -func (a genericAccessor) SetResourceVersion(version string) { - *a.resourceVersion = version -} - -func (a genericAccessor) GetSelfLink() string { - return *a.selfLink -} - -func (a genericAccessor) SetSelfLink(selfLink string) { - *a.selfLink = selfLink -} - -func (a genericAccessor) GetCreationTimestamp() metav1.Time { - return *a.creationTimestamp -} - -func (a genericAccessor) SetCreationTimestamp(timestamp metav1.Time) { - *a.creationTimestamp = timestamp -} - -func (a genericAccessor) GetDeletionTimestamp() *metav1.Time { - return *a.deletionTimestamp -} - -func (a genericAccessor) SetDeletionTimestamp(timestamp *metav1.Time) { - *a.deletionTimestamp = timestamp -} - -func (a genericAccessor) GetLabels() map[string]string { - if a.labels == nil { - return nil - } - return *a.labels -} - -func (a genericAccessor) SetLabels(labels map[string]string) { - *a.labels = labels -} - -func (a genericAccessor) GetAnnotations() map[string]string { - if a.annotations == nil { - return nil - } - return *a.annotations -} - -func (a genericAccessor) SetAnnotations(annotations map[string]string) { - if a.annotations == nil { - emptyAnnotations := make(map[string]string) - a.annotations = &emptyAnnotations - } - *a.annotations = annotations -} - -func (a genericAccessor) GetFinalizers() []string { - if a.finalizers == nil { - return nil - } - return *a.finalizers -} - -func (a genericAccessor) SetFinalizers(finalizers []string) { - *a.finalizers = finalizers -} - -func (a genericAccessor) GetOwnerReferences() []metav1.OwnerReference { - var ret []metav1.OwnerReference - s := a.ownerReferences - if s.Kind() != reflect.Pointer || s.Elem().Kind() != reflect.Slice { - klog.Errorf("expect %v to be a pointer to slice", s) - return ret - } - s = s.Elem() - // Set the capacity to one element greater to avoid copy if the caller later append an element. - ret = make([]metav1.OwnerReference, s.Len(), s.Len()+1) - for i := 0; i < s.Len(); i++ { - if err := extractFromOwnerReference(s.Index(i), &ret[i]); err != nil { - klog.Errorf("extractFromOwnerReference failed: %v", err) - return ret - } - } - return ret -} - -func (a genericAccessor) SetOwnerReferences(references []metav1.OwnerReference) { - s := a.ownerReferences - if s.Kind() != reflect.Pointer || s.Elem().Kind() != reflect.Slice { - klog.Errorf("expect %v to be a pointer to slice", s) - } - s = s.Elem() - newReferences := reflect.MakeSlice(s.Type(), len(references), len(references)) - for i := 0; i < len(references); i++ { - if err := setOwnerReference(newReferences.Index(i), &references[i]); err != nil { - klog.Errorf("setOwnerReference failed: %v", err) - return - } - } - s.Set(newReferences) -} - -// extractFromTypeMeta extracts pointers to version and kind fields from an object -func extractFromTypeMeta(v reflect.Value, a *genericAccessor) error { - if err := runtime.FieldPtr(v, "APIVersion", &a.apiVersion); err != nil { - return err - } - if err := runtime.FieldPtr(v, "Kind", &a.kind); err != nil { - return err - } - return nil -} diff --git a/src/vendor/k8s.io/apimachinery/pkg/api/meta/multirestmapper.go b/src/vendor/k8s.io/apimachinery/pkg/api/meta/multirestmapper.go deleted file mode 100644 index b7e971250..000000000 --- a/src/vendor/k8s.io/apimachinery/pkg/api/meta/multirestmapper.go +++ /dev/null @@ -1,220 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package meta - -import ( - "fmt" - "strings" - - "k8s.io/apimachinery/pkg/runtime/schema" - utilerrors "k8s.io/apimachinery/pkg/util/errors" -) - -var ( - _ ResettableRESTMapper = MultiRESTMapper{} -) - -// MultiRESTMapper is a wrapper for multiple RESTMappers. -type MultiRESTMapper []RESTMapper - -func (m MultiRESTMapper) String() string { - nested := make([]string, 0, len(m)) - for _, t := range m { - currString := fmt.Sprintf("%v", t) - splitStrings := strings.Split(currString, "\n") - nested = append(nested, strings.Join(splitStrings, "\n\t")) - } - - return fmt.Sprintf("MultiRESTMapper{\n\t%s\n}", strings.Join(nested, "\n\t")) -} - -// ResourceSingularizer converts a REST resource name from plural to singular (e.g., from pods to pod) -// This implementation supports multiple REST schemas and return the first match. -func (m MultiRESTMapper) ResourceSingularizer(resource string) (singular string, err error) { - for _, t := range m { - singular, err = t.ResourceSingularizer(resource) - if err == nil { - return - } - } - return -} - -func (m MultiRESTMapper) ResourcesFor(resource schema.GroupVersionResource) ([]schema.GroupVersionResource, error) { - allGVRs := []schema.GroupVersionResource{} - for _, t := range m { - gvrs, err := t.ResourcesFor(resource) - // ignore "no match" errors, but any other error percolates back up - if IsNoMatchError(err) { - continue - } - if err != nil { - return nil, err - } - - // walk the existing values to de-dup - for _, curr := range gvrs { - found := false - for _, existing := range allGVRs { - if curr == existing { - found = true - break - } - } - - if !found { - allGVRs = append(allGVRs, curr) - } - } - } - - if len(allGVRs) == 0 { - return nil, &NoResourceMatchError{PartialResource: resource} - } - - return allGVRs, nil -} - -func (m MultiRESTMapper) KindsFor(resource schema.GroupVersionResource) (gvk []schema.GroupVersionKind, err error) { - allGVKs := []schema.GroupVersionKind{} - for _, t := range m { - gvks, err := t.KindsFor(resource) - // ignore "no match" errors, but any other error percolates back up - if IsNoMatchError(err) { - continue - } - if err != nil { - return nil, err - } - - // walk the existing values to de-dup - for _, curr := range gvks { - found := false - for _, existing := range allGVKs { - if curr == existing { - found = true - break - } - } - - if !found { - allGVKs = append(allGVKs, curr) - } - } - } - - if len(allGVKs) == 0 { - return nil, &NoResourceMatchError{PartialResource: resource} - } - - return allGVKs, nil -} - -func (m MultiRESTMapper) ResourceFor(resource schema.GroupVersionResource) (schema.GroupVersionResource, error) { - resources, err := m.ResourcesFor(resource) - if err != nil { - return schema.GroupVersionResource{}, err - } - if len(resources) == 1 { - return resources[0], nil - } - - return schema.GroupVersionResource{}, &AmbiguousResourceError{PartialResource: resource, MatchingResources: resources} -} - -func (m MultiRESTMapper) KindFor(resource schema.GroupVersionResource) (schema.GroupVersionKind, error) { - kinds, err := m.KindsFor(resource) - if err != nil { - return schema.GroupVersionKind{}, err - } - if len(kinds) == 1 { - return kinds[0], nil - } - - return schema.GroupVersionKind{}, &AmbiguousResourceError{PartialResource: resource, MatchingKinds: kinds} -} - -// RESTMapping provides the REST mapping for the resource based on the -// kind and version. This implementation supports multiple REST schemas and -// return the first match. -func (m MultiRESTMapper) RESTMapping(gk schema.GroupKind, versions ...string) (*RESTMapping, error) { - allMappings := []*RESTMapping{} - errors := []error{} - - for _, t := range m { - currMapping, err := t.RESTMapping(gk, versions...) - // ignore "no match" errors, but any other error percolates back up - if IsNoMatchError(err) { - continue - } - if err != nil { - errors = append(errors, err) - continue - } - - allMappings = append(allMappings, currMapping) - } - - // if we got exactly one mapping, then use it even if other requested failed - if len(allMappings) == 1 { - return allMappings[0], nil - } - if len(allMappings) > 1 { - var kinds []schema.GroupVersionKind - for _, m := range allMappings { - kinds = append(kinds, m.GroupVersionKind) - } - return nil, &AmbiguousKindError{PartialKind: gk.WithVersion(""), MatchingKinds: kinds} - } - if len(errors) > 0 { - return nil, utilerrors.NewAggregate(errors) - } - return nil, &NoKindMatchError{GroupKind: gk, SearchedVersions: versions} -} - -// RESTMappings returns all possible RESTMappings for the provided group kind, or an error -// if the type is not recognized. -func (m MultiRESTMapper) RESTMappings(gk schema.GroupKind, versions ...string) ([]*RESTMapping, error) { - var allMappings []*RESTMapping - var errors []error - - for _, t := range m { - currMappings, err := t.RESTMappings(gk, versions...) - // ignore "no match" errors, but any other error percolates back up - if IsNoMatchError(err) { - continue - } - if err != nil { - errors = append(errors, err) - continue - } - allMappings = append(allMappings, currMappings...) - } - if len(errors) > 0 { - return nil, utilerrors.NewAggregate(errors) - } - if len(allMappings) == 0 { - return nil, &NoKindMatchError{GroupKind: gk, SearchedVersions: versions} - } - return allMappings, nil -} - -func (m MultiRESTMapper) Reset() { - for _, t := range m { - MaybeResetRESTMapper(t) - } -} diff --git a/src/vendor/k8s.io/apimachinery/pkg/api/meta/priority.go b/src/vendor/k8s.io/apimachinery/pkg/api/meta/priority.go deleted file mode 100644 index 4f097c9c9..000000000 --- a/src/vendor/k8s.io/apimachinery/pkg/api/meta/priority.go +++ /dev/null @@ -1,230 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package meta - -import ( - "fmt" - - "k8s.io/apimachinery/pkg/runtime/schema" -) - -const ( - AnyGroup = "*" - AnyVersion = "*" - AnyResource = "*" - AnyKind = "*" -) - -var ( - _ ResettableRESTMapper = PriorityRESTMapper{} -) - -// PriorityRESTMapper is a wrapper for automatically choosing a particular Resource or Kind -// when multiple matches are possible -type PriorityRESTMapper struct { - // Delegate is the RESTMapper to use to locate all the Kind and Resource matches - Delegate RESTMapper - - // ResourcePriority is a list of priority patterns to apply to matching resources. - // The list of all matching resources is narrowed based on the patterns until only one remains. - // A pattern with no matches is skipped. A pattern with more than one match uses its - // matches as the list to continue matching against. - ResourcePriority []schema.GroupVersionResource - - // KindPriority is a list of priority patterns to apply to matching kinds. - // The list of all matching kinds is narrowed based on the patterns until only one remains. - // A pattern with no matches is skipped. A pattern with more than one match uses its - // matches as the list to continue matching against. - KindPriority []schema.GroupVersionKind -} - -func (m PriorityRESTMapper) String() string { - return fmt.Sprintf("PriorityRESTMapper{\n\t%v\n\t%v\n\t%v\n}", m.ResourcePriority, m.KindPriority, m.Delegate) -} - -// ResourceFor finds all resources, then passes them through the ResourcePriority patterns to find a single matching hit. -func (m PriorityRESTMapper) ResourceFor(partiallySpecifiedResource schema.GroupVersionResource) (schema.GroupVersionResource, error) { - originalGVRs, originalErr := m.Delegate.ResourcesFor(partiallySpecifiedResource) - if originalErr != nil && len(originalGVRs) == 0 { - return schema.GroupVersionResource{}, originalErr - } - if len(originalGVRs) == 1 { - return originalGVRs[0], originalErr - } - - remainingGVRs := append([]schema.GroupVersionResource{}, originalGVRs...) - for _, pattern := range m.ResourcePriority { - matchedGVRs := []schema.GroupVersionResource{} - for _, gvr := range remainingGVRs { - if resourceMatches(pattern, gvr) { - matchedGVRs = append(matchedGVRs, gvr) - } - } - - switch len(matchedGVRs) { - case 0: - // if you have no matches, then nothing matched this pattern just move to the next - continue - case 1: - // one match, return - return matchedGVRs[0], originalErr - default: - // more than one match, use the matched hits as the list moving to the next pattern. - // this way you can have a series of selection criteria - remainingGVRs = matchedGVRs - } - } - - return schema.GroupVersionResource{}, &AmbiguousResourceError{PartialResource: partiallySpecifiedResource, MatchingResources: originalGVRs} -} - -// KindFor finds all kinds, then passes them through the KindPriority patterns to find a single matching hit. -func (m PriorityRESTMapper) KindFor(partiallySpecifiedResource schema.GroupVersionResource) (schema.GroupVersionKind, error) { - originalGVKs, originalErr := m.Delegate.KindsFor(partiallySpecifiedResource) - if originalErr != nil && len(originalGVKs) == 0 { - return schema.GroupVersionKind{}, originalErr - } - if len(originalGVKs) == 1 { - return originalGVKs[0], originalErr - } - - remainingGVKs := append([]schema.GroupVersionKind{}, originalGVKs...) - for _, pattern := range m.KindPriority { - matchedGVKs := []schema.GroupVersionKind{} - for _, gvr := range remainingGVKs { - if kindMatches(pattern, gvr) { - matchedGVKs = append(matchedGVKs, gvr) - } - } - - switch len(matchedGVKs) { - case 0: - // if you have no matches, then nothing matched this pattern just move to the next - continue - case 1: - // one match, return - return matchedGVKs[0], originalErr - default: - // more than one match, use the matched hits as the list moving to the next pattern. - // this way you can have a series of selection criteria - remainingGVKs = matchedGVKs - } - } - - return schema.GroupVersionKind{}, &AmbiguousResourceError{PartialResource: partiallySpecifiedResource, MatchingKinds: originalGVKs} -} - -func resourceMatches(pattern schema.GroupVersionResource, resource schema.GroupVersionResource) bool { - if pattern.Group != AnyGroup && pattern.Group != resource.Group { - return false - } - if pattern.Version != AnyVersion && pattern.Version != resource.Version { - return false - } - if pattern.Resource != AnyResource && pattern.Resource != resource.Resource { - return false - } - - return true -} - -func kindMatches(pattern schema.GroupVersionKind, kind schema.GroupVersionKind) bool { - if pattern.Group != AnyGroup && pattern.Group != kind.Group { - return false - } - if pattern.Version != AnyVersion && pattern.Version != kind.Version { - return false - } - if pattern.Kind != AnyKind && pattern.Kind != kind.Kind { - return false - } - - return true -} - -func (m PriorityRESTMapper) RESTMapping(gk schema.GroupKind, versions ...string) (mapping *RESTMapping, err error) { - mappings, originalErr := m.Delegate.RESTMappings(gk, versions...) - if originalErr != nil && len(mappings) == 0 { - return nil, originalErr - } - - // any versions the user provides take priority - priorities := m.KindPriority - if len(versions) > 0 { - priorities = make([]schema.GroupVersionKind, 0, len(m.KindPriority)+len(versions)) - for _, version := range versions { - gv := schema.GroupVersion{ - Version: version, - Group: gk.Group, - } - priorities = append(priorities, gv.WithKind(AnyKind)) - } - priorities = append(priorities, m.KindPriority...) - } - - remaining := append([]*RESTMapping{}, mappings...) - for _, pattern := range priorities { - var matching []*RESTMapping - for _, m := range remaining { - if kindMatches(pattern, m.GroupVersionKind) { - matching = append(matching, m) - } - } - - switch len(matching) { - case 0: - // if you have no matches, then nothing matched this pattern just move to the next - continue - case 1: - // one match, return - return matching[0], originalErr - default: - // more than one match, use the matched hits as the list moving to the next pattern. - // this way you can have a series of selection criteria - remaining = matching - } - } - if len(remaining) == 1 { - return remaining[0], originalErr - } - - var kinds []schema.GroupVersionKind - for _, m := range mappings { - kinds = append(kinds, m.GroupVersionKind) - } - return nil, &AmbiguousKindError{PartialKind: gk.WithVersion(""), MatchingKinds: kinds} -} - -func (m PriorityRESTMapper) RESTMappings(gk schema.GroupKind, versions ...string) ([]*RESTMapping, error) { - return m.Delegate.RESTMappings(gk, versions...) -} - -func (m PriorityRESTMapper) ResourceSingularizer(resource string) (singular string, err error) { - return m.Delegate.ResourceSingularizer(resource) -} - -func (m PriorityRESTMapper) ResourcesFor(partiallySpecifiedResource schema.GroupVersionResource) ([]schema.GroupVersionResource, error) { - return m.Delegate.ResourcesFor(partiallySpecifiedResource) -} - -func (m PriorityRESTMapper) KindsFor(partiallySpecifiedResource schema.GroupVersionResource) (gvk []schema.GroupVersionKind, err error) { - return m.Delegate.KindsFor(partiallySpecifiedResource) -} - -func (m PriorityRESTMapper) Reset() { - MaybeResetRESTMapper(m.Delegate) -} diff --git a/src/vendor/k8s.io/apimachinery/pkg/api/meta/restmapper.go b/src/vendor/k8s.io/apimachinery/pkg/api/meta/restmapper.go deleted file mode 100644 index 91cb98cae..000000000 --- a/src/vendor/k8s.io/apimachinery/pkg/api/meta/restmapper.go +++ /dev/null @@ -1,529 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// TODO: move everything in this file to pkg/api/rest -package meta - -import ( - "fmt" - "sort" - "strings" - - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" -) - -// Implements RESTScope interface -type restScope struct { - name RESTScopeName -} - -func (r *restScope) Name() RESTScopeName { - return r.name -} - -var RESTScopeNamespace = &restScope{ - name: RESTScopeNameNamespace, -} - -var RESTScopeRoot = &restScope{ - name: RESTScopeNameRoot, -} - -// DefaultRESTMapper exposes mappings between the types defined in a -// runtime.Scheme. It assumes that all types defined the provided scheme -// can be mapped with the provided MetadataAccessor and Codec interfaces. -// -// The resource name of a Kind is defined as the lowercase, -// English-plural version of the Kind string. -// When converting from resource to Kind, the singular version of the -// resource name is also accepted for convenience. -// -// TODO: Only accept plural for some operations for increased control? -// (`get pod bar` vs `get pods bar`) -type DefaultRESTMapper struct { - defaultGroupVersions []schema.GroupVersion - - resourceToKind map[schema.GroupVersionResource]schema.GroupVersionKind - kindToPluralResource map[schema.GroupVersionKind]schema.GroupVersionResource - kindToScope map[schema.GroupVersionKind]RESTScope - singularToPlural map[schema.GroupVersionResource]schema.GroupVersionResource - pluralToSingular map[schema.GroupVersionResource]schema.GroupVersionResource -} - -func (m *DefaultRESTMapper) String() string { - if m == nil { - return "" - } - return fmt.Sprintf("DefaultRESTMapper{kindToPluralResource=%v}", m.kindToPluralResource) -} - -var _ RESTMapper = &DefaultRESTMapper{} - -// NewDefaultRESTMapper initializes a mapping between Kind and APIVersion -// to a resource name and back based on the objects in a runtime.Scheme -// and the Kubernetes API conventions. Takes a group name, a priority list of the versions -// to search when an object has no default version (set empty to return an error), -// and a function that retrieves the correct metadata for a given version. -func NewDefaultRESTMapper(defaultGroupVersions []schema.GroupVersion) *DefaultRESTMapper { - resourceToKind := make(map[schema.GroupVersionResource]schema.GroupVersionKind) - kindToPluralResource := make(map[schema.GroupVersionKind]schema.GroupVersionResource) - kindToScope := make(map[schema.GroupVersionKind]RESTScope) - singularToPlural := make(map[schema.GroupVersionResource]schema.GroupVersionResource) - pluralToSingular := make(map[schema.GroupVersionResource]schema.GroupVersionResource) - // TODO: verify name mappings work correctly when versions differ - - return &DefaultRESTMapper{ - resourceToKind: resourceToKind, - kindToPluralResource: kindToPluralResource, - kindToScope: kindToScope, - defaultGroupVersions: defaultGroupVersions, - singularToPlural: singularToPlural, - pluralToSingular: pluralToSingular, - } -} - -func (m *DefaultRESTMapper) Add(kind schema.GroupVersionKind, scope RESTScope) { - plural, singular := UnsafeGuessKindToResource(kind) - m.AddSpecific(kind, plural, singular, scope) -} - -func (m *DefaultRESTMapper) AddSpecific(kind schema.GroupVersionKind, plural, singular schema.GroupVersionResource, scope RESTScope) { - m.singularToPlural[singular] = plural - m.pluralToSingular[plural] = singular - - m.resourceToKind[singular] = kind - m.resourceToKind[plural] = kind - - m.kindToPluralResource[kind] = plural - m.kindToScope[kind] = scope -} - -// unpluralizedSuffixes is a list of resource suffixes that are the same plural and singular -// This is only is only necessary because some bits of code are lazy and don't actually use the RESTMapper like they should. -// TODO eliminate this so that different callers can correctly map to resources. This probably means updating all -// callers to use the RESTMapper they mean. -var unpluralizedSuffixes = []string{ - "endpoints", -} - -// UnsafeGuessKindToResource converts Kind to a resource name. -// Broken. This method only "sort of" works when used outside of this package. It assumes that Kinds and Resources match -// and they aren't guaranteed to do so. -func UnsafeGuessKindToResource(kind schema.GroupVersionKind) ( /*plural*/ schema.GroupVersionResource /*singular*/, schema.GroupVersionResource) { - kindName := kind.Kind - if len(kindName) == 0 { - return schema.GroupVersionResource{}, schema.GroupVersionResource{} - } - singularName := strings.ToLower(kindName) - singular := kind.GroupVersion().WithResource(singularName) - - for _, skip := range unpluralizedSuffixes { - if strings.HasSuffix(singularName, skip) { - return singular, singular - } - } - - switch string(singularName[len(singularName)-1]) { - case "s": - return kind.GroupVersion().WithResource(singularName + "es"), singular - case "y": - return kind.GroupVersion().WithResource(strings.TrimSuffix(singularName, "y") + "ies"), singular - } - - return kind.GroupVersion().WithResource(singularName + "s"), singular -} - -// ResourceSingularizer implements RESTMapper -// It converts a resource name from plural to singular (e.g., from pods to pod) -func (m *DefaultRESTMapper) ResourceSingularizer(resourceType string) (string, error) { - partialResource := schema.GroupVersionResource{Resource: resourceType} - resources, err := m.ResourcesFor(partialResource) - if err != nil { - return resourceType, err - } - - singular := schema.GroupVersionResource{} - for _, curr := range resources { - currSingular, ok := m.pluralToSingular[curr] - if !ok { - continue - } - if singular.Empty() { - singular = currSingular - continue - } - - if currSingular.Resource != singular.Resource { - return resourceType, fmt.Errorf("multiple possible singular resources (%v) found for %v", resources, resourceType) - } - } - - if singular.Empty() { - return resourceType, fmt.Errorf("no singular of resource %v has been defined", resourceType) - } - - return singular.Resource, nil -} - -// coerceResourceForMatching makes the resource lower case and converts internal versions to unspecified (legacy behavior) -func coerceResourceForMatching(resource schema.GroupVersionResource) schema.GroupVersionResource { - resource.Resource = strings.ToLower(resource.Resource) - if resource.Version == runtime.APIVersionInternal { - resource.Version = "" - } - - return resource -} - -func (m *DefaultRESTMapper) ResourcesFor(input schema.GroupVersionResource) ([]schema.GroupVersionResource, error) { - resource := coerceResourceForMatching(input) - - hasResource := len(resource.Resource) > 0 - hasGroup := len(resource.Group) > 0 - hasVersion := len(resource.Version) > 0 - - if !hasResource { - return nil, fmt.Errorf("a resource must be present, got: %v", resource) - } - - ret := []schema.GroupVersionResource{} - switch { - case hasGroup && hasVersion: - // fully qualified. Find the exact match - for plural, singular := range m.pluralToSingular { - if singular == resource { - ret = append(ret, plural) - break - } - if plural == resource { - ret = append(ret, plural) - break - } - } - - case hasGroup: - // given a group, prefer an exact match. If you don't find one, resort to a prefix match on group - foundExactMatch := false - requestedGroupResource := resource.GroupResource() - for plural, singular := range m.pluralToSingular { - if singular.GroupResource() == requestedGroupResource { - foundExactMatch = true - ret = append(ret, plural) - } - if plural.GroupResource() == requestedGroupResource { - foundExactMatch = true - ret = append(ret, plural) - } - } - - // if you didn't find an exact match, match on group prefixing. This allows storageclass.storage to match - // storageclass.storage.k8s.io - if !foundExactMatch { - for plural, singular := range m.pluralToSingular { - if !strings.HasPrefix(plural.Group, requestedGroupResource.Group) { - continue - } - if singular.Resource == requestedGroupResource.Resource { - ret = append(ret, plural) - } - if plural.Resource == requestedGroupResource.Resource { - ret = append(ret, plural) - } - } - - } - - case hasVersion: - for plural, singular := range m.pluralToSingular { - if singular.Version == resource.Version && singular.Resource == resource.Resource { - ret = append(ret, plural) - } - if plural.Version == resource.Version && plural.Resource == resource.Resource { - ret = append(ret, plural) - } - } - - default: - for plural, singular := range m.pluralToSingular { - if singular.Resource == resource.Resource { - ret = append(ret, plural) - } - if plural.Resource == resource.Resource { - ret = append(ret, plural) - } - } - } - - if len(ret) == 0 { - return nil, &NoResourceMatchError{PartialResource: resource} - } - - sort.Sort(resourceByPreferredGroupVersion{ret, m.defaultGroupVersions}) - return ret, nil -} - -func (m *DefaultRESTMapper) ResourceFor(resource schema.GroupVersionResource) (schema.GroupVersionResource, error) { - resources, err := m.ResourcesFor(resource) - if err != nil { - return schema.GroupVersionResource{}, err - } - if len(resources) == 1 { - return resources[0], nil - } - - return schema.GroupVersionResource{}, &AmbiguousResourceError{PartialResource: resource, MatchingResources: resources} -} - -func (m *DefaultRESTMapper) KindsFor(input schema.GroupVersionResource) ([]schema.GroupVersionKind, error) { - resource := coerceResourceForMatching(input) - - hasResource := len(resource.Resource) > 0 - hasGroup := len(resource.Group) > 0 - hasVersion := len(resource.Version) > 0 - - if !hasResource { - return nil, fmt.Errorf("a resource must be present, got: %v", resource) - } - - ret := []schema.GroupVersionKind{} - switch { - // fully qualified. Find the exact match - case hasGroup && hasVersion: - kind, exists := m.resourceToKind[resource] - if exists { - ret = append(ret, kind) - } - - case hasGroup: - foundExactMatch := false - requestedGroupResource := resource.GroupResource() - for currResource, currKind := range m.resourceToKind { - if currResource.GroupResource() == requestedGroupResource { - foundExactMatch = true - ret = append(ret, currKind) - } - } - - // if you didn't find an exact match, match on group prefixing. This allows storageclass.storage to match - // storageclass.storage.k8s.io - if !foundExactMatch { - for currResource, currKind := range m.resourceToKind { - if !strings.HasPrefix(currResource.Group, requestedGroupResource.Group) { - continue - } - if currResource.Resource == requestedGroupResource.Resource { - ret = append(ret, currKind) - } - } - - } - - case hasVersion: - for currResource, currKind := range m.resourceToKind { - if currResource.Version == resource.Version && currResource.Resource == resource.Resource { - ret = append(ret, currKind) - } - } - - default: - for currResource, currKind := range m.resourceToKind { - if currResource.Resource == resource.Resource { - ret = append(ret, currKind) - } - } - } - - if len(ret) == 0 { - return nil, &NoResourceMatchError{PartialResource: input} - } - - sort.Sort(kindByPreferredGroupVersion{ret, m.defaultGroupVersions}) - return ret, nil -} - -func (m *DefaultRESTMapper) KindFor(resource schema.GroupVersionResource) (schema.GroupVersionKind, error) { - kinds, err := m.KindsFor(resource) - if err != nil { - return schema.GroupVersionKind{}, err - } - if len(kinds) == 1 { - return kinds[0], nil - } - - return schema.GroupVersionKind{}, &AmbiguousResourceError{PartialResource: resource, MatchingKinds: kinds} -} - -type kindByPreferredGroupVersion struct { - list []schema.GroupVersionKind - sortOrder []schema.GroupVersion -} - -func (o kindByPreferredGroupVersion) Len() int { return len(o.list) } -func (o kindByPreferredGroupVersion) Swap(i, j int) { o.list[i], o.list[j] = o.list[j], o.list[i] } -func (o kindByPreferredGroupVersion) Less(i, j int) bool { - lhs := o.list[i] - rhs := o.list[j] - if lhs == rhs { - return false - } - - if lhs.GroupVersion() == rhs.GroupVersion() { - return lhs.Kind < rhs.Kind - } - - // otherwise, the difference is in the GroupVersion, so we need to sort with respect to the preferred order - lhsIndex := -1 - rhsIndex := -1 - - for i := range o.sortOrder { - if o.sortOrder[i] == lhs.GroupVersion() { - lhsIndex = i - } - if o.sortOrder[i] == rhs.GroupVersion() { - rhsIndex = i - } - } - - if rhsIndex == -1 { - return true - } - - return lhsIndex < rhsIndex -} - -type resourceByPreferredGroupVersion struct { - list []schema.GroupVersionResource - sortOrder []schema.GroupVersion -} - -func (o resourceByPreferredGroupVersion) Len() int { return len(o.list) } -func (o resourceByPreferredGroupVersion) Swap(i, j int) { o.list[i], o.list[j] = o.list[j], o.list[i] } -func (o resourceByPreferredGroupVersion) Less(i, j int) bool { - lhs := o.list[i] - rhs := o.list[j] - if lhs == rhs { - return false - } - - if lhs.GroupVersion() == rhs.GroupVersion() { - return lhs.Resource < rhs.Resource - } - - // otherwise, the difference is in the GroupVersion, so we need to sort with respect to the preferred order - lhsIndex := -1 - rhsIndex := -1 - - for i := range o.sortOrder { - if o.sortOrder[i] == lhs.GroupVersion() { - lhsIndex = i - } - if o.sortOrder[i] == rhs.GroupVersion() { - rhsIndex = i - } - } - - if rhsIndex == -1 { - return true - } - - return lhsIndex < rhsIndex -} - -// RESTMapping returns a struct representing the resource path and conversion interfaces a -// RESTClient should use to operate on the provided group/kind in order of versions. If a version search -// order is not provided, the search order provided to DefaultRESTMapper will be used to resolve which -// version should be used to access the named group/kind. -func (m *DefaultRESTMapper) RESTMapping(gk schema.GroupKind, versions ...string) (*RESTMapping, error) { - mappings, err := m.RESTMappings(gk, versions...) - if err != nil { - return nil, err - } - if len(mappings) == 0 { - return nil, &NoKindMatchError{GroupKind: gk, SearchedVersions: versions} - } - // since we rely on RESTMappings method - // take the first match and return to the caller - // as this was the existing behavior. - return mappings[0], nil -} - -// RESTMappings returns the RESTMappings for the provided group kind. If a version search order -// is not provided, the search order provided to DefaultRESTMapper will be used. -func (m *DefaultRESTMapper) RESTMappings(gk schema.GroupKind, versions ...string) ([]*RESTMapping, error) { - mappings := make([]*RESTMapping, 0) - potentialGVK := make([]schema.GroupVersionKind, 0) - hadVersion := false - - // Pick an appropriate version - for _, version := range versions { - if len(version) == 0 || version == runtime.APIVersionInternal { - continue - } - currGVK := gk.WithVersion(version) - hadVersion = true - if _, ok := m.kindToPluralResource[currGVK]; ok { - potentialGVK = append(potentialGVK, currGVK) - break - } - } - // Use the default preferred versions - if !hadVersion && len(potentialGVK) == 0 { - for _, gv := range m.defaultGroupVersions { - if gv.Group != gk.Group { - continue - } - potentialGVK = append(potentialGVK, gk.WithVersion(gv.Version)) - } - } - - if len(potentialGVK) == 0 { - return nil, &NoKindMatchError{GroupKind: gk, SearchedVersions: versions} - } - - for _, gvk := range potentialGVK { - //Ensure we have a REST mapping - res, ok := m.kindToPluralResource[gvk] - if !ok { - continue - } - - // Ensure we have a REST scope - scope, ok := m.kindToScope[gvk] - if !ok { - return nil, fmt.Errorf("the provided version %q and kind %q cannot be mapped to a supported scope", gvk.GroupVersion(), gvk.Kind) - } - - mappings = append(mappings, &RESTMapping{ - Resource: res, - GroupVersionKind: gvk, - Scope: scope, - }) - } - - if len(mappings) == 0 { - return nil, &NoResourceMatchError{PartialResource: schema.GroupVersionResource{Group: gk.Group, Resource: gk.Kind}} - } - return mappings, nil -} - -// MaybeResetRESTMapper calls Reset() on the mapper if it is a ResettableRESTMapper. -func MaybeResetRESTMapper(mapper RESTMapper) { - m, ok := mapper.(ResettableRESTMapper) - if ok { - m.Reset() - } -} diff --git a/src/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/unstructuredscheme/scheme.go b/src/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/unstructuredscheme/scheme.go deleted file mode 100644 index cff91d3da..000000000 --- a/src/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/unstructuredscheme/scheme.go +++ /dev/null @@ -1,129 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package unstructuredscheme - -import ( - "fmt" - - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/apimachinery/pkg/runtime/serializer/json" - "k8s.io/apimachinery/pkg/runtime/serializer/versioning" -) - -var scheme = runtime.NewScheme() - -// NewUnstructuredNegotiatedSerializer returns a simple, negotiated serializer -func NewUnstructuredNegotiatedSerializer() runtime.NegotiatedSerializer { - return unstructuredNegotiatedSerializer{ - scheme: scheme, - typer: NewUnstructuredObjectTyper(), - creator: NewUnstructuredCreator(), - } -} - -type unstructuredNegotiatedSerializer struct { - scheme *runtime.Scheme - typer runtime.ObjectTyper - creator runtime.ObjectCreater -} - -func (s unstructuredNegotiatedSerializer) SupportedMediaTypes() []runtime.SerializerInfo { - return []runtime.SerializerInfo{ - { - MediaType: "application/json", - MediaTypeType: "application", - MediaTypeSubType: "json", - EncodesAsText: true, - Serializer: json.NewSerializer(json.DefaultMetaFactory, s.creator, s.typer, false), - PrettySerializer: json.NewSerializer(json.DefaultMetaFactory, s.creator, s.typer, true), - StreamSerializer: &runtime.StreamSerializerInfo{ - EncodesAsText: true, - Serializer: json.NewSerializer(json.DefaultMetaFactory, s.creator, s.typer, false), - Framer: json.Framer, - }, - }, - { - MediaType: "application/yaml", - MediaTypeType: "application", - MediaTypeSubType: "yaml", - EncodesAsText: true, - Serializer: json.NewYAMLSerializer(json.DefaultMetaFactory, s.creator, s.typer), - }, - } -} - -func (s unstructuredNegotiatedSerializer) EncoderForVersion(encoder runtime.Encoder, gv runtime.GroupVersioner) runtime.Encoder { - return versioning.NewDefaultingCodecForScheme(s.scheme, encoder, nil, gv, nil) -} - -func (s unstructuredNegotiatedSerializer) DecoderToVersion(decoder runtime.Decoder, gv runtime.GroupVersioner) runtime.Decoder { - return versioning.NewDefaultingCodecForScheme(s.scheme, nil, decoder, nil, gv) -} - -type unstructuredObjectTyper struct { -} - -// NewUnstructuredObjectTyper returns an object typer that can deal with unstructured things -func NewUnstructuredObjectTyper() runtime.ObjectTyper { - return unstructuredObjectTyper{} -} - -func (t unstructuredObjectTyper) ObjectKinds(obj runtime.Object) ([]schema.GroupVersionKind, bool, error) { - // Delegate for things other than Unstructured. - if _, ok := obj.(runtime.Unstructured); !ok { - return nil, false, fmt.Errorf("cannot type %T", obj) - } - gvk := obj.GetObjectKind().GroupVersionKind() - if len(gvk.Kind) == 0 { - return nil, false, runtime.NewMissingKindErr("object has no kind field ") - } - if len(gvk.Version) == 0 { - return nil, false, runtime.NewMissingVersionErr("object has no apiVersion field") - } - - return []schema.GroupVersionKind{obj.GetObjectKind().GroupVersionKind()}, false, nil -} - -func (t unstructuredObjectTyper) Recognizes(gvk schema.GroupVersionKind) bool { - return true -} - -type unstructuredCreator struct{} - -// NewUnstructuredCreator returns a simple object creator that always returns an unstructured -func NewUnstructuredCreator() runtime.ObjectCreater { - return unstructuredCreator{} -} - -func (c unstructuredCreator) New(kind schema.GroupVersionKind) (runtime.Object, error) { - ret := &unstructured.Unstructured{} - ret.SetGroupVersionKind(kind) - return ret, nil -} - -type unstructuredDefaulter struct { -} - -// NewUnstructuredDefaulter returns defaulter suitable for unstructured types that doesn't default anything -func NewUnstructuredDefaulter() runtime.ObjectDefaulter { - return unstructuredDefaulter{} -} - -func (d unstructuredDefaulter) Default(in runtime.Object) { -} diff --git a/src/vendor/k8s.io/apimachinery/pkg/util/duration/duration.go b/src/vendor/k8s.io/apimachinery/pkg/util/duration/duration.go deleted file mode 100644 index c31b2a0cb..000000000 --- a/src/vendor/k8s.io/apimachinery/pkg/util/duration/duration.go +++ /dev/null @@ -1,93 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package duration - -import ( - "fmt" - "time" -) - -// ShortHumanDuration returns a succint representation of the provided duration -// with limited precision for consumption by humans. -func ShortHumanDuration(d time.Duration) string { - // Allow deviation no more than 2 seconds(excluded) to tolerate machine time - // inconsistence, it can be considered as almost now. - if seconds := int(d.Seconds()); seconds < -1 { - return "" - } else if seconds < 0 { - return "0s" - } else if seconds < 60 { - return fmt.Sprintf("%ds", seconds) - } else if minutes := int(d.Minutes()); minutes < 60 { - return fmt.Sprintf("%dm", minutes) - } else if hours := int(d.Hours()); hours < 24 { - return fmt.Sprintf("%dh", hours) - } else if hours < 24*365 { - return fmt.Sprintf("%dd", hours/24) - } - return fmt.Sprintf("%dy", int(d.Hours()/24/365)) -} - -// HumanDuration returns a succint representation of the provided duration -// with limited precision for consumption by humans. It provides ~2-3 significant -// figures of duration. -func HumanDuration(d time.Duration) string { - // Allow deviation no more than 2 seconds(excluded) to tolerate machine time - // inconsistence, it can be considered as almost now. - if seconds := int(d.Seconds()); seconds < -1 { - return "" - } else if seconds < 0 { - return "0s" - } else if seconds < 60*2 { - return fmt.Sprintf("%ds", seconds) - } - minutes := int(d / time.Minute) - if minutes < 10 { - s := int(d/time.Second) % 60 - if s == 0 { - return fmt.Sprintf("%dm", minutes) - } - return fmt.Sprintf("%dm%ds", minutes, s) - } else if minutes < 60*3 { - return fmt.Sprintf("%dm", minutes) - } - hours := int(d / time.Hour) - if hours < 8 { - m := int(d/time.Minute) % 60 - if m == 0 { - return fmt.Sprintf("%dh", hours) - } - return fmt.Sprintf("%dh%dm", hours, m) - } else if hours < 48 { - return fmt.Sprintf("%dh", hours) - } else if hours < 24*8 { - h := hours % 24 - if h == 0 { - return fmt.Sprintf("%dd", hours/24) - } - return fmt.Sprintf("%dd%dh", hours/24, h) - } else if hours < 24*365*2 { - return fmt.Sprintf("%dd", hours/24) - } else if hours < 24*365*8 { - dy := int(hours/24) % 365 - if dy == 0 { - return fmt.Sprintf("%dy", hours/24/365) - } - return fmt.Sprintf("%dy%dd", hours/24/365, dy) - } - return fmt.Sprintf("%dy", int(hours/24/365)) -} diff --git a/src/vendor/k8s.io/cli-runtime/LICENSE b/src/vendor/k8s.io/cli-runtime/LICENSE deleted file mode 100644 index d64569567..000000000 --- a/src/vendor/k8s.io/cli-runtime/LICENSE +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/src/vendor/k8s.io/cli-runtime/pkg/genericclioptions/builder_flags.go b/src/vendor/k8s.io/cli-runtime/pkg/genericclioptions/builder_flags.go deleted file mode 100644 index acf576a36..000000000 --- a/src/vendor/k8s.io/cli-runtime/pkg/genericclioptions/builder_flags.go +++ /dev/null @@ -1,231 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package genericclioptions - -import ( - "github.com/spf13/pflag" - - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/cli-runtime/pkg/resource" -) - -// ResourceBuilderFlags are flags for finding resources -// TODO(juanvallejo): wire --local flag from commands through -type ResourceBuilderFlags struct { - FileNameFlags *FileNameFlags - - LabelSelector *string - FieldSelector *string - AllNamespaces *bool - All *bool - Local *bool - - Scheme *runtime.Scheme - Latest bool - StopOnFirstError bool -} - -// NewResourceBuilderFlags returns a default ResourceBuilderFlags -func NewResourceBuilderFlags() *ResourceBuilderFlags { - filenames := []string{} - - return &ResourceBuilderFlags{ - FileNameFlags: &FileNameFlags{ - Usage: "identifying the resource.", - Filenames: &filenames, - Recursive: boolPtr(true), - }, - } -} - -// WithFile sets the FileNameFlags. -// If recurse is set, it will process directory recursively. Useful when you want to manage related manifests -// organized within the same directory. -func (o *ResourceBuilderFlags) WithFile(recurse bool, files ...string) *ResourceBuilderFlags { - o.FileNameFlags = &FileNameFlags{ - Usage: "identifying the resource.", - Filenames: &files, - Recursive: boolPtr(recurse), - } - - return o -} - -// WithLabelSelector sets the LabelSelector flag -func (o *ResourceBuilderFlags) WithLabelSelector(selector string) *ResourceBuilderFlags { - o.LabelSelector = &selector - return o -} - -// WithFieldSelector sets the FieldSelector flag -func (o *ResourceBuilderFlags) WithFieldSelector(selector string) *ResourceBuilderFlags { - o.FieldSelector = &selector - return o -} - -// WithAllNamespaces sets the AllNamespaces flag -func (o *ResourceBuilderFlags) WithAllNamespaces(defaultVal bool) *ResourceBuilderFlags { - o.AllNamespaces = &defaultVal - return o -} - -// WithAll sets the All flag -func (o *ResourceBuilderFlags) WithAll(defaultVal bool) *ResourceBuilderFlags { - o.All = &defaultVal - return o -} - -// WithLocal sets the Local flag -func (o *ResourceBuilderFlags) WithLocal(defaultVal bool) *ResourceBuilderFlags { - o.Local = &defaultVal - return o -} - -// WithScheme sets the Scheme flag -func (o *ResourceBuilderFlags) WithScheme(scheme *runtime.Scheme) *ResourceBuilderFlags { - o.Scheme = scheme - return o -} - -// WithLatest sets the Latest flag -func (o *ResourceBuilderFlags) WithLatest() *ResourceBuilderFlags { - o.Latest = true - return o -} - -// StopOnError sets the StopOnFirstError flag -func (o *ResourceBuilderFlags) StopOnError() *ResourceBuilderFlags { - o.StopOnFirstError = true - return o -} - -// AddFlags registers flags for finding resources -func (o *ResourceBuilderFlags) AddFlags(flagset *pflag.FlagSet) { - o.FileNameFlags.AddFlags(flagset) - - if o.LabelSelector != nil { - flagset.StringVarP(o.LabelSelector, "selector", "l", *o.LabelSelector, "Selector (label query) to filter on, supports '=', '==', and '!='.(e.g. -l key1=value1,key2=value2)") - } - if o.FieldSelector != nil { - flagset.StringVar(o.FieldSelector, "field-selector", *o.FieldSelector, "Selector (field query) to filter on, supports '=', '==', and '!='.(e.g. --field-selector key1=value1,key2=value2). The server only supports a limited number of field queries per type.") - } - if o.AllNamespaces != nil { - flagset.BoolVarP(o.AllNamespaces, "all-namespaces", "A", *o.AllNamespaces, "If present, list the requested object(s) across all namespaces. Namespace in current context is ignored even if specified with --namespace.") - } - if o.All != nil { - flagset.BoolVar(o.All, "all", *o.All, "Select all resources in the namespace of the specified resource types") - } - if o.Local != nil { - flagset.BoolVar(o.Local, "local", *o.Local, "If true, annotation will NOT contact api-server but run locally.") - } -} - -// ToBuilder gives you back a resource finder to visit resources that are located -func (o *ResourceBuilderFlags) ToBuilder(restClientGetter RESTClientGetter, resources []string) ResourceFinder { - namespace, enforceNamespace, namespaceErr := restClientGetter.ToRawKubeConfigLoader().Namespace() - - builder := resource.NewBuilder(restClientGetter). - NamespaceParam(namespace).DefaultNamespace() - - if o.AllNamespaces != nil { - builder.AllNamespaces(*o.AllNamespaces) - } - - if o.Scheme != nil { - builder.WithScheme(o.Scheme, o.Scheme.PrioritizedVersionsAllGroups()...) - } else { - builder.Unstructured() - } - - if o.FileNameFlags != nil { - opts := o.FileNameFlags.ToOptions() - builder.FilenameParam(enforceNamespace, &opts) - } - - if o.Local == nil || !*o.Local { - // resource type/name tuples only work non-local - if o.All != nil { - builder.ResourceTypeOrNameArgs(*o.All, resources...) - } else { - builder.ResourceTypeOrNameArgs(false, resources...) - } - // label selectors only work non-local (for now) - if o.LabelSelector != nil { - builder.LabelSelectorParam(*o.LabelSelector) - } - // field selectors only work non-local (forever) - if o.FieldSelector != nil { - builder.FieldSelectorParam(*o.FieldSelector) - } - // latest only works non-local (forever) - if o.Latest { - builder.Latest() - } - - } else { - builder.Local() - - if len(resources) > 0 { - builder.AddError(resource.LocalResourceError) - } - } - - if !o.StopOnFirstError { - builder.ContinueOnError() - } - - return &ResourceFindBuilderWrapper{ - builder: builder. - Flatten(). // I think we're going to recommend this everywhere - AddError(namespaceErr), - } -} - -// ResourceFindBuilderWrapper wraps a builder in an interface -type ResourceFindBuilderWrapper struct { - builder *resource.Builder -} - -// Do finds you resources to check -func (b *ResourceFindBuilderWrapper) Do() resource.Visitor { - return b.builder.Do() -} - -// ResourceFinder allows mocking the resource builder -// TODO resource builders needs to become more interfacey -type ResourceFinder interface { - Do() resource.Visitor -} - -// ResourceFinderFunc is a handy way to make a ResourceFinder -type ResourceFinderFunc func() resource.Visitor - -// Do implements ResourceFinder -func (fn ResourceFinderFunc) Do() resource.Visitor { - return fn() -} - -// ResourceFinderForResult skins a visitor for re-use as a ResourceFinder -func ResourceFinderForResult(result resource.Visitor) ResourceFinder { - return ResourceFinderFunc(func() resource.Visitor { - return result - }) -} - -func boolPtr(val bool) *bool { - return &val -} diff --git a/src/vendor/k8s.io/cli-runtime/pkg/genericclioptions/builder_flags_fake.go b/src/vendor/k8s.io/cli-runtime/pkg/genericclioptions/builder_flags_fake.go deleted file mode 100644 index d43b0c25e..000000000 --- a/src/vendor/k8s.io/cli-runtime/pkg/genericclioptions/builder_flags_fake.go +++ /dev/null @@ -1,54 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package genericclioptions - -import ( - "k8s.io/cli-runtime/pkg/resource" -) - -// NewSimpleFakeResourceFinder builds a super simple ResourceFinder that just iterates over the objects you provided -func NewSimpleFakeResourceFinder(infos ...*resource.Info) ResourceFinder { - return &fakeResourceFinder{ - Infos: infos, - } -} - -type fakeResourceFinder struct { - Infos []*resource.Info -} - -// Do implements the interface -func (f *fakeResourceFinder) Do() resource.Visitor { - return &fakeResourceResult{ - Infos: f.Infos, - } -} - -type fakeResourceResult struct { - Infos []*resource.Info -} - -// Visit just iterates over info -func (r *fakeResourceResult) Visit(fn resource.VisitorFunc) error { - for _, info := range r.Infos { - err := fn(info, nil) - if err != nil { - return err - } - } - return nil -} diff --git a/src/vendor/k8s.io/cli-runtime/pkg/genericclioptions/client_config.go b/src/vendor/k8s.io/cli-runtime/pkg/genericclioptions/client_config.go deleted file mode 100644 index 0e22d7140..000000000 --- a/src/vendor/k8s.io/cli-runtime/pkg/genericclioptions/client_config.go +++ /dev/null @@ -1,72 +0,0 @@ -/* -Copyright 2020 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package genericclioptions - -import ( - restclient "k8s.io/client-go/rest" - "k8s.io/client-go/tools/clientcmd" - clientcmdapi "k8s.io/client-go/tools/clientcmd/api" -) - -var ( - // ErrEmptyConfig is the error message to be displayed if the configuration info is missing or incomplete - ErrEmptyConfig = clientcmd.NewEmptyConfigError(`Missing or incomplete configuration info. Please point to an existing, complete config file: - - - 1. Via the command-line flag --kubeconfig - 2. Via the KUBECONFIG environment variable - 3. In your home directory as ~/.kube/config - -To view or setup config directly use the 'config' command.`) -) - -var _ = clientcmd.ClientConfig(&clientConfig{}) - -type clientConfig struct { - defaultClientConfig clientcmd.ClientConfig -} - -func (c *clientConfig) RawConfig() (clientcmdapi.Config, error) { - config, err := c.defaultClientConfig.RawConfig() - // replace client-go's ErrEmptyConfig error with our custom, more verbose version - if clientcmd.IsEmptyConfig(err) { - return config, ErrEmptyConfig - } - return config, err -} - -func (c *clientConfig) ClientConfig() (*restclient.Config, error) { - config, err := c.defaultClientConfig.ClientConfig() - // replace client-go's ErrEmptyConfig error with our custom, more verbose version - if clientcmd.IsEmptyConfig(err) { - return config, ErrEmptyConfig - } - return config, err -} - -func (c *clientConfig) Namespace() (string, bool, error) { - namespace, ok, err := c.defaultClientConfig.Namespace() - // replace client-go's ErrEmptyConfig error with our custom, more verbose version - if clientcmd.IsEmptyConfig(err) { - return namespace, ok, ErrEmptyConfig - } - return namespace, ok, err -} - -func (c *clientConfig) ConfigAccess() clientcmd.ConfigAccess { - return c.defaultClientConfig.ConfigAccess() -} diff --git a/src/vendor/k8s.io/cli-runtime/pkg/genericclioptions/command_headers.go b/src/vendor/k8s.io/cli-runtime/pkg/genericclioptions/command_headers.go deleted file mode 100644 index ed47e9994..000000000 --- a/src/vendor/k8s.io/cli-runtime/pkg/genericclioptions/command_headers.go +++ /dev/null @@ -1,94 +0,0 @@ -/* -Copyright 2021 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package genericclioptions - -import ( - "net/http" - "strings" - - "github.com/google/uuid" - "github.com/spf13/cobra" -) - -const ( - kubectlCommandHeader = "Kubectl-Command" - kubectlSessionHeader = "Kubectl-Session" -) - -// CommandHeaderRoundTripper adds a layer around the standard -// round tripper to add Request headers before delegation. Implements -// the go standard library "http.RoundTripper" interface. -type CommandHeaderRoundTripper struct { - Delegate http.RoundTripper - Headers map[string]string -} - -// CommandHeaderRoundTripper adds Request headers before delegating to standard -// round tripper. These headers are kubectl command headers which -// detail the kubectl command. See SIG CLI KEP 859: -// -// https://github.com/kubernetes/enhancements/tree/master/keps/sig-cli/859-kubectl-headers -func (c *CommandHeaderRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { - for header, value := range c.Headers { - req.Header.Set(header, value) - } - return c.Delegate.RoundTrip(req) -} - -// ParseCommandHeaders fills in a map of custom headers into the CommandHeaderRoundTripper. These -// headers are then filled into each request. For details on the custom headers see: -// -// https://github.com/kubernetes/enhancements/tree/master/keps/sig-cli/859-kubectl-headers -// -// Each call overwrites the previously parsed command headers (not additive). -// TODO(seans3): Parse/add flags removing PII from flag values. -func (c *CommandHeaderRoundTripper) ParseCommandHeaders(cmd *cobra.Command, args []string) { - if cmd == nil { - return - } - // Overwrites previously parsed command headers (headers not additive). - c.Headers = map[string]string{} - // Session identifier to aggregate multiple Requests from single kubectl command. - uid := uuid.New().String() - c.Headers[kubectlSessionHeader] = uid - // Iterate up the hierarchy of commands from the leaf command to create - // the full command string. Example: kubectl create secret generic - cmdStrs := []string{} - for cmd.HasParent() { - parent := cmd.Parent() - currName := strings.TrimSpace(cmd.Name()) - cmdStrs = append([]string{currName}, cmdStrs...) - cmd = parent - } - currName := strings.TrimSpace(cmd.Name()) - cmdStrs = append([]string{currName}, cmdStrs...) - if len(cmdStrs) > 0 { - c.Headers[kubectlCommandHeader] = strings.Join(cmdStrs, " ") - } -} - -// CancelRequest is propagated to the Delegate RoundTripper within -// if the wrapped RoundTripper implements this function. -func (c *CommandHeaderRoundTripper) CancelRequest(req *http.Request) { - type canceler interface { - CancelRequest(*http.Request) - } - // If possible, call "CancelRequest" on the wrapped Delegate RoundTripper. - if cr, ok := c.Delegate.(canceler); ok { - cr.CancelRequest(req) - } -} diff --git a/src/vendor/k8s.io/cli-runtime/pkg/genericclioptions/config_flags.go b/src/vendor/k8s.io/cli-runtime/pkg/genericclioptions/config_flags.go deleted file mode 100644 index 754951fd6..000000000 --- a/src/vendor/k8s.io/cli-runtime/pkg/genericclioptions/config_flags.go +++ /dev/null @@ -1,466 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package genericclioptions - -import ( - "os" - "path/filepath" - "regexp" - "strings" - "sync" - "time" - - "github.com/spf13/pflag" - - "k8s.io/apimachinery/pkg/api/meta" - "k8s.io/client-go/discovery" - diskcached "k8s.io/client-go/discovery/cached/disk" - "k8s.io/client-go/rest" - "k8s.io/client-go/restmapper" - "k8s.io/client-go/tools/clientcmd" - "k8s.io/client-go/util/homedir" - utilpointer "k8s.io/utils/pointer" -) - -const ( - flagClusterName = "cluster" - flagAuthInfoName = "user" - flagContext = "context" - flagNamespace = "namespace" - flagAPIServer = "server" - flagTLSServerName = "tls-server-name" - flagInsecure = "insecure-skip-tls-verify" - flagCertFile = "client-certificate" - flagKeyFile = "client-key" - flagCAFile = "certificate-authority" - flagBearerToken = "token" - flagImpersonate = "as" - flagImpersonateUID = "as-uid" - flagImpersonateGroup = "as-group" - flagUsername = "username" - flagPassword = "password" - flagTimeout = "request-timeout" - flagCacheDir = "cache-dir" -) - -// RESTClientGetter is an interface that the ConfigFlags describe to provide an easier way to mock for commands -// and eliminate the direct coupling to a struct type. Users may wish to duplicate this type in their own packages -// as per the golang type overlapping. -type RESTClientGetter interface { - // ToRESTConfig returns restconfig - ToRESTConfig() (*rest.Config, error) - // ToDiscoveryClient returns discovery client - ToDiscoveryClient() (discovery.CachedDiscoveryInterface, error) - // ToRESTMapper returns a restmapper - ToRESTMapper() (meta.RESTMapper, error) - // ToRawKubeConfigLoader return kubeconfig loader as-is - ToRawKubeConfigLoader() clientcmd.ClientConfig -} - -var _ RESTClientGetter = &ConfigFlags{} - -// ConfigFlags composes the set of values necessary -// for obtaining a REST client config -type ConfigFlags struct { - CacheDir *string - KubeConfig *string - - // config flags - ClusterName *string - AuthInfoName *string - Context *string - Namespace *string - APIServer *string - TLSServerName *string - Insecure *bool - CertFile *string - KeyFile *string - CAFile *string - BearerToken *string - Impersonate *string - ImpersonateUID *string - ImpersonateGroup *[]string - Username *string - Password *string - Timeout *string - // If non-nil, wrap config function can transform the Config - // before it is returned in ToRESTConfig function. - WrapConfigFn func(*rest.Config) *rest.Config - - clientConfig clientcmd.ClientConfig - clientConfigLock sync.Mutex - - restMapper meta.RESTMapper - restMapperLock sync.Mutex - - discoveryClient discovery.CachedDiscoveryInterface - discoveryClientLock sync.Mutex - - // If set to true, will use persistent client config, rest mapper, discovery client, and - // propagate them to the places that need them, rather than - // instantiating them multiple times. - usePersistentConfig bool - // Allows increasing burst used for discovery, this is useful - // in clusters with many registered resources - discoveryBurst int - // Allows increasing qps used for discovery, this is useful - // in clusters with many registered resources - discoveryQPS float32 -} - -// ToRESTConfig implements RESTClientGetter. -// Returns a REST client configuration based on a provided path -// to a .kubeconfig file, loading rules, and config flag overrides. -// Expects the AddFlags method to have been called. If WrapConfigFn -// is non-nil this function can transform config before return. -func (f *ConfigFlags) ToRESTConfig() (*rest.Config, error) { - c, err := f.ToRawKubeConfigLoader().ClientConfig() - if err != nil { - return nil, err - } - if f.WrapConfigFn != nil { - return f.WrapConfigFn(c), nil - } - return c, nil -} - -// ToRawKubeConfigLoader binds config flag values to config overrides -// Returns an interactive clientConfig if the password flag is enabled, -// or a non-interactive clientConfig otherwise. -func (f *ConfigFlags) ToRawKubeConfigLoader() clientcmd.ClientConfig { - if f.usePersistentConfig { - return f.toRawKubePersistentConfigLoader() - } - return f.toRawKubeConfigLoader() -} - -func (f *ConfigFlags) toRawKubeConfigLoader() clientcmd.ClientConfig { - loadingRules := clientcmd.NewDefaultClientConfigLoadingRules() - // use the standard defaults for this client command - // DEPRECATED: remove and replace with something more accurate - loadingRules.DefaultClientConfig = &clientcmd.DefaultClientConfig - - if f.KubeConfig != nil { - loadingRules.ExplicitPath = *f.KubeConfig - } - - overrides := &clientcmd.ConfigOverrides{ClusterDefaults: clientcmd.ClusterDefaults} - - // bind auth info flag values to overrides - if f.CertFile != nil { - overrides.AuthInfo.ClientCertificate = *f.CertFile - } - if f.KeyFile != nil { - overrides.AuthInfo.ClientKey = *f.KeyFile - } - if f.BearerToken != nil { - overrides.AuthInfo.Token = *f.BearerToken - } - if f.Impersonate != nil { - overrides.AuthInfo.Impersonate = *f.Impersonate - } - if f.ImpersonateUID != nil { - overrides.AuthInfo.ImpersonateUID = *f.ImpersonateUID - } - if f.ImpersonateGroup != nil { - overrides.AuthInfo.ImpersonateGroups = *f.ImpersonateGroup - } - if f.Username != nil { - overrides.AuthInfo.Username = *f.Username - } - if f.Password != nil { - overrides.AuthInfo.Password = *f.Password - } - - // bind cluster flags - if f.APIServer != nil { - overrides.ClusterInfo.Server = *f.APIServer - } - if f.TLSServerName != nil { - overrides.ClusterInfo.TLSServerName = *f.TLSServerName - } - if f.CAFile != nil { - overrides.ClusterInfo.CertificateAuthority = *f.CAFile - } - if f.Insecure != nil { - overrides.ClusterInfo.InsecureSkipTLSVerify = *f.Insecure - } - - // bind context flags - if f.Context != nil { - overrides.CurrentContext = *f.Context - } - if f.ClusterName != nil { - overrides.Context.Cluster = *f.ClusterName - } - if f.AuthInfoName != nil { - overrides.Context.AuthInfo = *f.AuthInfoName - } - if f.Namespace != nil { - overrides.Context.Namespace = *f.Namespace - } - - if f.Timeout != nil { - overrides.Timeout = *f.Timeout - } - - // we only have an interactive prompt when a password is allowed - if f.Password == nil { - return &clientConfig{clientcmd.NewNonInteractiveDeferredLoadingClientConfig(loadingRules, overrides)} - } - return &clientConfig{clientcmd.NewInteractiveDeferredLoadingClientConfig(loadingRules, overrides, os.Stdin)} -} - -// toRawKubePersistentConfigLoader binds config flag values to config overrides -// Returns a persistent clientConfig for propagation. -func (f *ConfigFlags) toRawKubePersistentConfigLoader() clientcmd.ClientConfig { - f.clientConfigLock.Lock() - defer f.clientConfigLock.Unlock() - - if f.clientConfig == nil { - f.clientConfig = f.toRawKubeConfigLoader() - } - - return f.clientConfig -} - -// ToDiscoveryClient implements RESTClientGetter. -// Expects the AddFlags method to have been called. -// Returns a CachedDiscoveryInterface using a computed RESTConfig. -func (f *ConfigFlags) ToDiscoveryClient() (discovery.CachedDiscoveryInterface, error) { - if f.usePersistentConfig { - return f.toPersistentDiscoveryClient() - } - return f.toDiscoveryClient() -} - -func (f *ConfigFlags) toPersistentDiscoveryClient() (discovery.CachedDiscoveryInterface, error) { - f.discoveryClientLock.Lock() - defer f.discoveryClientLock.Unlock() - - if f.discoveryClient == nil { - discoveryClient, err := f.toDiscoveryClient() - if err != nil { - return nil, err - } - f.discoveryClient = discoveryClient - } - return f.discoveryClient, nil -} - -func (f *ConfigFlags) toDiscoveryClient() (discovery.CachedDiscoveryInterface, error) { - config, err := f.ToRESTConfig() - if err != nil { - return nil, err - } - - config.Burst = f.discoveryBurst - config.QPS = f.discoveryQPS - - cacheDir := getDefaultCacheDir() - - // retrieve a user-provided value for the "cache-dir" - // override httpCacheDir and discoveryCacheDir if user-value is given. - // user-provided value has higher precedence than default - // and KUBECACHEDIR environment variable. - if f.CacheDir != nil && *f.CacheDir != "" && *f.CacheDir != getDefaultCacheDir() { - cacheDir = *f.CacheDir - } - - httpCacheDir := filepath.Join(cacheDir, "http") - discoveryCacheDir := computeDiscoverCacheDir(filepath.Join(cacheDir, "discovery"), config.Host) - - return diskcached.NewCachedDiscoveryClientForConfig(config, discoveryCacheDir, httpCacheDir, time.Duration(6*time.Hour)) -} - -// getDefaultCacheDir returns default caching directory path. -// it first looks at KUBECACHEDIR env var if it is set, otherwise -// it returns standard kube cache dir. -func getDefaultCacheDir() string { - if kcd := os.Getenv("KUBECACHEDIR"); kcd != "" { - return kcd - } - - return filepath.Join(homedir.HomeDir(), ".kube", "cache") -} - -// ToRESTMapper returns a mapper. -func (f *ConfigFlags) ToRESTMapper() (meta.RESTMapper, error) { - if f.usePersistentConfig { - return f.toPersistentRESTMapper() - } - return f.toRESTMapper() -} - -func (f *ConfigFlags) toPersistentRESTMapper() (meta.RESTMapper, error) { - f.restMapperLock.Lock() - defer f.restMapperLock.Unlock() - - if f.restMapper == nil { - restMapper, err := f.toRESTMapper() - if err != nil { - return nil, err - } - f.restMapper = restMapper - } - return f.restMapper, nil -} - -func (f *ConfigFlags) toRESTMapper() (meta.RESTMapper, error) { - discoveryClient, err := f.ToDiscoveryClient() - if err != nil { - return nil, err - } - - mapper := restmapper.NewDeferredDiscoveryRESTMapper(discoveryClient) - expander := restmapper.NewShortcutExpander(mapper, discoveryClient) - return expander, nil -} - -// AddFlags binds client configuration flags to a given flagset -func (f *ConfigFlags) AddFlags(flags *pflag.FlagSet) { - if f.KubeConfig != nil { - flags.StringVar(f.KubeConfig, "kubeconfig", *f.KubeConfig, "Path to the kubeconfig file to use for CLI requests.") - } - if f.CacheDir != nil { - flags.StringVar(f.CacheDir, flagCacheDir, *f.CacheDir, "Default cache directory") - } - - // add config options - if f.CertFile != nil { - flags.StringVar(f.CertFile, flagCertFile, *f.CertFile, "Path to a client certificate file for TLS") - } - if f.KeyFile != nil { - flags.StringVar(f.KeyFile, flagKeyFile, *f.KeyFile, "Path to a client key file for TLS") - } - if f.BearerToken != nil { - flags.StringVar(f.BearerToken, flagBearerToken, *f.BearerToken, "Bearer token for authentication to the API server") - } - if f.Impersonate != nil { - flags.StringVar(f.Impersonate, flagImpersonate, *f.Impersonate, "Username to impersonate for the operation. User could be a regular user or a service account in a namespace.") - } - if f.ImpersonateUID != nil { - flags.StringVar(f.ImpersonateUID, flagImpersonateUID, *f.ImpersonateUID, "UID to impersonate for the operation.") - } - if f.ImpersonateGroup != nil { - flags.StringArrayVar(f.ImpersonateGroup, flagImpersonateGroup, *f.ImpersonateGroup, "Group to impersonate for the operation, this flag can be repeated to specify multiple groups.") - } - if f.Username != nil { - flags.StringVar(f.Username, flagUsername, *f.Username, "Username for basic authentication to the API server") - } - if f.Password != nil { - flags.StringVar(f.Password, flagPassword, *f.Password, "Password for basic authentication to the API server") - } - if f.ClusterName != nil { - flags.StringVar(f.ClusterName, flagClusterName, *f.ClusterName, "The name of the kubeconfig cluster to use") - } - if f.AuthInfoName != nil { - flags.StringVar(f.AuthInfoName, flagAuthInfoName, *f.AuthInfoName, "The name of the kubeconfig user to use") - } - if f.Namespace != nil { - flags.StringVarP(f.Namespace, flagNamespace, "n", *f.Namespace, "If present, the namespace scope for this CLI request") - } - if f.Context != nil { - flags.StringVar(f.Context, flagContext, *f.Context, "The name of the kubeconfig context to use") - } - - if f.APIServer != nil { - flags.StringVarP(f.APIServer, flagAPIServer, "s", *f.APIServer, "The address and port of the Kubernetes API server") - } - if f.TLSServerName != nil { - flags.StringVar(f.TLSServerName, flagTLSServerName, *f.TLSServerName, "Server name to use for server certificate validation. If it is not provided, the hostname used to contact the server is used") - } - if f.Insecure != nil { - flags.BoolVar(f.Insecure, flagInsecure, *f.Insecure, "If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure") - } - if f.CAFile != nil { - flags.StringVar(f.CAFile, flagCAFile, *f.CAFile, "Path to a cert file for the certificate authority") - } - if f.Timeout != nil { - flags.StringVar(f.Timeout, flagTimeout, *f.Timeout, "The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests.") - } -} - -// WithDeprecatedPasswordFlag enables the username and password config flags -func (f *ConfigFlags) WithDeprecatedPasswordFlag() *ConfigFlags { - f.Username = utilpointer.String("") - f.Password = utilpointer.String("") - return f -} - -// WithDiscoveryBurst sets the RESTClient burst for discovery. -func (f *ConfigFlags) WithDiscoveryBurst(discoveryBurst int) *ConfigFlags { - f.discoveryBurst = discoveryBurst - return f -} - -// WithDiscoveryQPS sets the RESTClient QPS for discovery. -func (f *ConfigFlags) WithDiscoveryQPS(discoveryQPS float32) *ConfigFlags { - f.discoveryQPS = discoveryQPS - return f -} - -// WithWrapConfigFn allows providing a wrapper function for the client Config. -func (f *ConfigFlags) WithWrapConfigFn(wrapConfigFn func(*rest.Config) *rest.Config) *ConfigFlags { - f.WrapConfigFn = wrapConfigFn - return f -} - -// NewConfigFlags returns ConfigFlags with default values set -func NewConfigFlags(usePersistentConfig bool) *ConfigFlags { - impersonateGroup := []string{} - insecure := false - - return &ConfigFlags{ - Insecure: &insecure, - Timeout: utilpointer.String("0"), - KubeConfig: utilpointer.String(""), - - CacheDir: utilpointer.String(getDefaultCacheDir()), - ClusterName: utilpointer.String(""), - AuthInfoName: utilpointer.String(""), - Context: utilpointer.String(""), - Namespace: utilpointer.String(""), - APIServer: utilpointer.String(""), - TLSServerName: utilpointer.String(""), - CertFile: utilpointer.String(""), - KeyFile: utilpointer.String(""), - CAFile: utilpointer.String(""), - BearerToken: utilpointer.String(""), - Impersonate: utilpointer.String(""), - ImpersonateUID: utilpointer.String(""), - ImpersonateGroup: &impersonateGroup, - - usePersistentConfig: usePersistentConfig, - // The more groups you have, the more discovery requests you need to make. - // with a burst of 300, we will not be rate-limiting for most clusters but - // the safeguard will still be here. This config is only used for discovery. - discoveryBurst: 300, - } -} - -// overlyCautiousIllegalFileCharacters matches characters that *might* not be supported. Windows is really restrictive, so this is really restrictive -var overlyCautiousIllegalFileCharacters = regexp.MustCompile(`[^(\w/.)]`) - -// computeDiscoverCacheDir takes the parentDir and the host and comes up with a "usually non-colliding" name. -func computeDiscoverCacheDir(parentDir, host string) string { - // strip the optional scheme from host if its there: - schemelessHost := strings.Replace(strings.Replace(host, "https://", "", 1), "http://", "", 1) - // now do a simple collapse of non-AZ09 characters. Collisions are possible but unlikely. Even if we do collide the problem is short lived - safeHost := overlyCautiousIllegalFileCharacters.ReplaceAllString(schemelessHost, "_") - return filepath.Join(parentDir, safeHost) -} diff --git a/src/vendor/k8s.io/cli-runtime/pkg/genericclioptions/config_flags_fake.go b/src/vendor/k8s.io/cli-runtime/pkg/genericclioptions/config_flags_fake.go deleted file mode 100644 index 7a9648155..000000000 --- a/src/vendor/k8s.io/cli-runtime/pkg/genericclioptions/config_flags_fake.go +++ /dev/null @@ -1,127 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package genericclioptions - -import ( - "fmt" - - "k8s.io/apimachinery/pkg/api/meta" - "k8s.io/client-go/discovery" - "k8s.io/client-go/rest" - "k8s.io/client-go/restmapper" - "k8s.io/client-go/tools/clientcmd" - clientcmdapi "k8s.io/client-go/tools/clientcmd/api" -) - -// TestConfigFlags contains clientConfig struct -// and interfaces that implements RESTClientGetter -type TestConfigFlags struct { - clientConfig clientcmd.ClientConfig - discoveryClient discovery.CachedDiscoveryInterface - restMapper meta.RESTMapper -} - -// ToRawKubeConfigLoader implements RESTClientGetter -// Returns a clientconfig if it's set -func (f *TestConfigFlags) ToRawKubeConfigLoader() clientcmd.ClientConfig { - if f.clientConfig == nil { - panic("attempt to obtain a test RawKubeConfigLoader with no clientConfig specified") - } - return f.clientConfig -} - -// ToRESTConfig implements RESTClientGetter. -// Returns a REST client configuration based on a provided path -// to a .kubeconfig file, loading rules, and config flag overrides. -// Expects the AddFlags method to have been called. -func (f *TestConfigFlags) ToRESTConfig() (*rest.Config, error) { - return f.ToRawKubeConfigLoader().ClientConfig() -} - -// ToDiscoveryClient implements RESTClientGetter. -// Returns a CachedDiscoveryInterface -func (f *TestConfigFlags) ToDiscoveryClient() (discovery.CachedDiscoveryInterface, error) { - return f.discoveryClient, nil -} - -// ToRESTMapper implements RESTClientGetter. -// Returns a mapper. -func (f *TestConfigFlags) ToRESTMapper() (meta.RESTMapper, error) { - if f.restMapper != nil { - return f.restMapper, nil - } - if f.discoveryClient != nil { - mapper := restmapper.NewDeferredDiscoveryRESTMapper(f.discoveryClient) - expander := restmapper.NewShortcutExpander(mapper, f.discoveryClient) - return expander, nil - } - return nil, fmt.Errorf("no restmapper") -} - -// WithClientConfig sets the clientConfig flag -func (f *TestConfigFlags) WithClientConfig(clientConfig clientcmd.ClientConfig) *TestConfigFlags { - f.clientConfig = clientConfig - return f -} - -// WithRESTMapper sets the restMapper flag -func (f *TestConfigFlags) WithRESTMapper(mapper meta.RESTMapper) *TestConfigFlags { - f.restMapper = mapper - return f -} - -// WithDiscoveryClient sets the discoveryClient flag -func (f *TestConfigFlags) WithDiscoveryClient(c discovery.CachedDiscoveryInterface) *TestConfigFlags { - f.discoveryClient = c - return f -} - -// WithNamespace sets the clientConfig flag by modifying delagate and namespace -func (f *TestConfigFlags) WithNamespace(ns string) *TestConfigFlags { - if f.clientConfig == nil { - panic("attempt to obtain a test RawKubeConfigLoader with no clientConfig specified") - } - f.clientConfig = &namespacedClientConfig{ - delegate: f.clientConfig, - namespace: ns, - } - return f -} - -// NewTestConfigFlags builds a TestConfigFlags struct to test ConfigFlags -func NewTestConfigFlags() *TestConfigFlags { - return &TestConfigFlags{} -} - -type namespacedClientConfig struct { - delegate clientcmd.ClientConfig - namespace string -} - -func (c *namespacedClientConfig) Namespace() (string, bool, error) { - return c.namespace, len(c.namespace) > 0, nil -} - -func (c *namespacedClientConfig) RawConfig() (clientcmdapi.Config, error) { - return c.delegate.RawConfig() -} -func (c *namespacedClientConfig) ClientConfig() (*rest.Config, error) { - return c.delegate.ClientConfig() -} -func (c *namespacedClientConfig) ConfigAccess() clientcmd.ConfigAccess { - return c.delegate.ConfigAccess() -} diff --git a/src/vendor/k8s.io/cli-runtime/pkg/genericclioptions/doc.go b/src/vendor/k8s.io/cli-runtime/pkg/genericclioptions/doc.go deleted file mode 100644 index 303da330b..000000000 --- a/src/vendor/k8s.io/cli-runtime/pkg/genericclioptions/doc.go +++ /dev/null @@ -1,19 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package genericclioptions contains flags which can be added to your command, bound, completed, and produce -// useful helper functions. Nothing in this package can depend on kube/kube -package genericclioptions // import "k8s.io/cli-runtime/pkg/genericclioptions" diff --git a/src/vendor/k8s.io/cli-runtime/pkg/genericclioptions/filename_flags.go b/src/vendor/k8s.io/cli-runtime/pkg/genericclioptions/filename_flags.go deleted file mode 100644 index 74259e417..000000000 --- a/src/vendor/k8s.io/cli-runtime/pkg/genericclioptions/filename_flags.go +++ /dev/null @@ -1,82 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package genericclioptions - -import ( - "strings" - - "github.com/spf13/cobra" - "github.com/spf13/pflag" - - "k8s.io/cli-runtime/pkg/resource" -) - -// FileNameFlags are flags for processing files. -// Usage of this struct by itself is discouraged. -// These flags are composed by ResourceBuilderFlags -// which should be used instead. -type FileNameFlags struct { - Usage string - - Filenames *[]string - Kustomize *string - Recursive *bool -} - -// ToOptions creates a new FileNameOptions struct and sets FilenameOptions based on FileNameflags -func (o *FileNameFlags) ToOptions() resource.FilenameOptions { - options := resource.FilenameOptions{} - - if o == nil { - return options - } - - if o.Recursive != nil { - options.Recursive = *o.Recursive - } - if o.Filenames != nil { - options.Filenames = *o.Filenames - } - if o.Kustomize != nil { - options.Kustomize = *o.Kustomize - } - - return options -} - -// AddFlags binds file name flags to a given flagset -func (o *FileNameFlags) AddFlags(flags *pflag.FlagSet) { - if o == nil { - return - } - - if o.Recursive != nil { - flags.BoolVarP(o.Recursive, "recursive", "R", *o.Recursive, "Process the directory used in -f, --filename recursively. Useful when you want to manage related manifests organized within the same directory.") - } - if o.Filenames != nil { - flags.StringSliceVarP(o.Filenames, "filename", "f", *o.Filenames, o.Usage) - annotations := make([]string, 0, len(resource.FileExtensions)) - for _, ext := range resource.FileExtensions { - annotations = append(annotations, strings.TrimLeft(ext, ".")) - } - flags.SetAnnotation("filename", cobra.BashCompFilenameExt, annotations) - } - if o.Kustomize != nil { - flags.StringVarP(o.Kustomize, "kustomize", "k", *o.Kustomize, - "Process a kustomization directory. This flag can't be used together with -f or -R.") - } -} diff --git a/src/vendor/k8s.io/cli-runtime/pkg/genericclioptions/io_options.go b/src/vendor/k8s.io/cli-runtime/pkg/genericclioptions/io_options.go deleted file mode 100644 index 4fc3a77b0..000000000 --- a/src/vendor/k8s.io/cli-runtime/pkg/genericclioptions/io_options.go +++ /dev/null @@ -1,57 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package genericclioptions - -import ( - "bytes" - "io" - "io/ioutil" -) - -// IOStreams provides the standard names for iostreams. This is useful for embedding and for unit testing. -// Inconsistent and different names make it hard to read and review code -type IOStreams struct { - // In think, os.Stdin - In io.Reader - // Out think, os.Stdout - Out io.Writer - // ErrOut think, os.Stderr - ErrOut io.Writer -} - -// NewTestIOStreams returns a valid IOStreams and in, out, errout buffers for unit tests -func NewTestIOStreams() (IOStreams, *bytes.Buffer, *bytes.Buffer, *bytes.Buffer) { - in := &bytes.Buffer{} - out := &bytes.Buffer{} - errOut := &bytes.Buffer{} - - return IOStreams{ - In: in, - Out: out, - ErrOut: errOut, - }, in, out, errOut -} - -// NewTestIOStreamsDiscard returns a valid IOStreams that just discards -func NewTestIOStreamsDiscard() IOStreams { - in := &bytes.Buffer{} - return IOStreams{ - In: in, - Out: ioutil.Discard, - ErrOut: ioutil.Discard, - } -} diff --git a/src/vendor/k8s.io/cli-runtime/pkg/genericclioptions/json_yaml_flags.go b/src/vendor/k8s.io/cli-runtime/pkg/genericclioptions/json_yaml_flags.go deleted file mode 100644 index ea8789614..000000000 --- a/src/vendor/k8s.io/cli-runtime/pkg/genericclioptions/json_yaml_flags.go +++ /dev/null @@ -1,79 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package genericclioptions - -import ( - "strings" - - "github.com/spf13/cobra" - - "k8s.io/cli-runtime/pkg/printers" -) - -// AllowedFormats returns slice of string of allowed JSONYaml printing format -func (f *JSONYamlPrintFlags) AllowedFormats() []string { - if f == nil { - return []string{} - } - return []string{"json", "yaml"} -} - -// JSONYamlPrintFlags provides default flags necessary for json/yaml printing. -// Given the following flag values, a printer can be requested that knows -// how to handle printing based on these values. -type JSONYamlPrintFlags struct { - ShowManagedFields bool -} - -// ToPrinter receives an outputFormat and returns a printer capable of -// handling --output=(yaml|json) printing. -// Returns false if the specified outputFormat does not match a supported format. -// Supported Format types can be found in pkg/printers/printers.go -func (f *JSONYamlPrintFlags) ToPrinter(outputFormat string) (printers.ResourcePrinter, error) { - var printer printers.ResourcePrinter - - outputFormat = strings.ToLower(outputFormat) - switch outputFormat { - case "json": - printer = &printers.JSONPrinter{} - case "yaml": - printer = &printers.YAMLPrinter{} - default: - return nil, NoCompatiblePrinterError{OutputFormat: &outputFormat, AllowedFormats: f.AllowedFormats()} - } - - if !f.ShowManagedFields { - printer = &printers.OmitManagedFieldsPrinter{Delegate: printer} - } - return printer, nil -} - -// AddFlags receives a *cobra.Command reference and binds -// flags related to JSON or Yaml printing to it -func (f *JSONYamlPrintFlags) AddFlags(c *cobra.Command) { - if f == nil { - return - } - - c.Flags().BoolVar(&f.ShowManagedFields, "show-managed-fields", f.ShowManagedFields, "If true, keep the managedFields when printing objects in JSON or YAML format.") -} - -// NewJSONYamlPrintFlags returns flags associated with -// yaml or json printing, with default values set. -func NewJSONYamlPrintFlags() *JSONYamlPrintFlags { - return &JSONYamlPrintFlags{} -} diff --git a/src/vendor/k8s.io/cli-runtime/pkg/genericclioptions/jsonpath_flags.go b/src/vendor/k8s.io/cli-runtime/pkg/genericclioptions/jsonpath_flags.go deleted file mode 100644 index 06bef474a..000000000 --- a/src/vendor/k8s.io/cli-runtime/pkg/genericclioptions/jsonpath_flags.go +++ /dev/null @@ -1,137 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package genericclioptions - -import ( - "fmt" - "io/ioutil" - "sort" - "strings" - - "github.com/spf13/cobra" - - "k8s.io/cli-runtime/pkg/printers" -) - -// templates are logically optional for specifying a format. -// this allows a user to specify a template format value -// as --output=jsonpath= -var jsonFormats = map[string]bool{ - "jsonpath": true, - "jsonpath-file": true, - "jsonpath-as-json": true, -} - -// JSONPathPrintFlags provides default flags necessary for template printing. -// Given the following flag values, a printer can be requested that knows -// how to handle printing based on these values. -type JSONPathPrintFlags struct { - // indicates if it is OK to ignore missing keys for rendering - // an output template. - AllowMissingKeys *bool - TemplateArgument *string -} - -// AllowedFormats returns slice of string of allowed JSONPath printing format -func (f *JSONPathPrintFlags) AllowedFormats() []string { - formats := make([]string, 0, len(jsonFormats)) - for format := range jsonFormats { - formats = append(formats, format) - } - sort.Strings(formats) - return formats -} - -// ToPrinter receives an templateFormat and returns a printer capable of -// handling --template format printing. -// Returns false if the specified templateFormat does not match a template format. -func (f *JSONPathPrintFlags) ToPrinter(templateFormat string) (printers.ResourcePrinter, error) { - if (f.TemplateArgument == nil || len(*f.TemplateArgument) == 0) && len(templateFormat) == 0 { - return nil, NoCompatiblePrinterError{Options: f, OutputFormat: &templateFormat} - } - - templateValue := "" - - if f.TemplateArgument == nil || len(*f.TemplateArgument) == 0 { - for format := range jsonFormats { - format = format + "=" - if strings.HasPrefix(templateFormat, format) { - templateValue = templateFormat[len(format):] - templateFormat = format[:len(format)-1] - break - } - } - } else { - templateValue = *f.TemplateArgument - } - - if _, supportedFormat := jsonFormats[templateFormat]; !supportedFormat { - return nil, NoCompatiblePrinterError{OutputFormat: &templateFormat, AllowedFormats: f.AllowedFormats()} - } - - if len(templateValue) == 0 { - return nil, fmt.Errorf("template format specified but no template given") - } - - if templateFormat == "jsonpath-file" { - data, err := ioutil.ReadFile(templateValue) - if err != nil { - return nil, fmt.Errorf("error reading --template %s, %v", templateValue, err) - } - - templateValue = string(data) - } - - p, err := printers.NewJSONPathPrinter(templateValue) - if err != nil { - return nil, fmt.Errorf("error parsing jsonpath %s, %v", templateValue, err) - } - - allowMissingKeys := true - if f.AllowMissingKeys != nil { - allowMissingKeys = *f.AllowMissingKeys - } - - p.AllowMissingKeys(allowMissingKeys) - - if templateFormat == "jsonpath-as-json" { - p.EnableJSONOutput(true) - } - - return p, nil -} - -// AddFlags receives a *cobra.Command reference and binds -// flags related to template printing to it -func (f *JSONPathPrintFlags) AddFlags(c *cobra.Command) { - if f.TemplateArgument != nil { - c.Flags().StringVar(f.TemplateArgument, "template", *f.TemplateArgument, "Template string or path to template file to use when --output=jsonpath, --output=jsonpath-file.") - c.MarkFlagFilename("template") - } - if f.AllowMissingKeys != nil { - c.Flags().BoolVar(f.AllowMissingKeys, "allow-missing-template-keys", *f.AllowMissingKeys, "If true, ignore any errors in templates when a field or map key is missing in the template. Only applies to golang and jsonpath output formats.") - } -} - -// NewJSONPathPrintFlags returns flags associated with -// --template printing, with default values set. -func NewJSONPathPrintFlags(templateValue string, allowMissingKeys bool) *JSONPathPrintFlags { - return &JSONPathPrintFlags{ - TemplateArgument: &templateValue, - AllowMissingKeys: &allowMissingKeys, - } -} diff --git a/src/vendor/k8s.io/cli-runtime/pkg/genericclioptions/kube_template_flags.go b/src/vendor/k8s.io/cli-runtime/pkg/genericclioptions/kube_template_flags.go deleted file mode 100644 index 518a20ac6..000000000 --- a/src/vendor/k8s.io/cli-runtime/pkg/genericclioptions/kube_template_flags.go +++ /dev/null @@ -1,94 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package genericclioptions - -import ( - "github.com/spf13/cobra" - - "k8s.io/cli-runtime/pkg/printers" -) - -// KubeTemplatePrintFlags composes print flags that provide both a JSONPath and a go-template printer. -// This is necessary if dealing with cases that require support both both printers, since both sets of flags -// require overlapping flags. -type KubeTemplatePrintFlags struct { - GoTemplatePrintFlags *GoTemplatePrintFlags - JSONPathPrintFlags *JSONPathPrintFlags - - AllowMissingKeys *bool - TemplateArgument *string -} - -// AllowedFormats returns slice of string of allowed GoTemplete and JSONPathPrint printing formats -func (f *KubeTemplatePrintFlags) AllowedFormats() []string { - if f == nil { - return []string{} - } - return append(f.GoTemplatePrintFlags.AllowedFormats(), f.JSONPathPrintFlags.AllowedFormats()...) -} - -// ToPrinter receives an outputFormat and returns a printer capable of -// handling --template printing. -// Returns false if the specified outputFormat does not match a supported format. -// Supported Format types can be found in pkg/printers/printers.go -func (f *KubeTemplatePrintFlags) ToPrinter(outputFormat string) (printers.ResourcePrinter, error) { - if f == nil { - return nil, NoCompatiblePrinterError{} - } - - if p, err := f.JSONPathPrintFlags.ToPrinter(outputFormat); !IsNoCompatiblePrinterError(err) { - return p, err - } - return f.GoTemplatePrintFlags.ToPrinter(outputFormat) -} - -// AddFlags receives a *cobra.Command reference and binds -// flags related to template printing to it -func (f *KubeTemplatePrintFlags) AddFlags(c *cobra.Command) { - if f == nil { - return - } - - if f.TemplateArgument != nil { - c.Flags().StringVar(f.TemplateArgument, "template", *f.TemplateArgument, "Template string or path to template file to use when -o=go-template, -o=go-template-file. The template format is golang templates [http://golang.org/pkg/text/template/#pkg-overview].") - c.MarkFlagFilename("template") - } - if f.AllowMissingKeys != nil { - c.Flags().BoolVar(f.AllowMissingKeys, "allow-missing-template-keys", *f.AllowMissingKeys, "If true, ignore any errors in templates when a field or map key is missing in the template. Only applies to golang and jsonpath output formats.") - } -} - -// NewKubeTemplatePrintFlags returns flags associated with -// --template printing, with default values set. -func NewKubeTemplatePrintFlags() *KubeTemplatePrintFlags { - allowMissingKeysPtr := true - templateArgPtr := "" - - return &KubeTemplatePrintFlags{ - GoTemplatePrintFlags: &GoTemplatePrintFlags{ - TemplateArgument: &templateArgPtr, - AllowMissingKeys: &allowMissingKeysPtr, - }, - JSONPathPrintFlags: &JSONPathPrintFlags{ - TemplateArgument: &templateArgPtr, - AllowMissingKeys: &allowMissingKeysPtr, - }, - - TemplateArgument: &templateArgPtr, - AllowMissingKeys: &allowMissingKeysPtr, - } -} diff --git a/src/vendor/k8s.io/cli-runtime/pkg/genericclioptions/name_flags.go b/src/vendor/k8s.io/cli-runtime/pkg/genericclioptions/name_flags.go deleted file mode 100644 index 16bf42265..000000000 --- a/src/vendor/k8s.io/cli-runtime/pkg/genericclioptions/name_flags.go +++ /dev/null @@ -1,83 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package genericclioptions - -import ( - "fmt" - "strings" - - "github.com/spf13/cobra" - - "k8s.io/cli-runtime/pkg/printers" -) - -// NamePrintFlags provides default flags necessary for printing -// a resource's fully-qualified Kind.group/name, or a successful -// message about that resource if an Operation is provided. -type NamePrintFlags struct { - // Operation describes the name of the action that - // took place on an object, to be included in the - // finalized "successful" message. - Operation string -} - -// Complete sets NamePrintFlags operation flag from successTemplate -func (f *NamePrintFlags) Complete(successTemplate string) error { - f.Operation = fmt.Sprintf(successTemplate, f.Operation) - return nil -} - -// AllowedFormats returns slice of string of allowed Name printing format -func (f *NamePrintFlags) AllowedFormats() []string { - if f == nil { - return []string{} - } - return []string{"name"} -} - -// ToPrinter receives an outputFormat and returns a printer capable of -// handling --output=name printing. -// Returns false if the specified outputFormat does not match a supported format. -// Supported format types can be found in pkg/printers/printers.go -func (f *NamePrintFlags) ToPrinter(outputFormat string) (printers.ResourcePrinter, error) { - namePrinter := &printers.NamePrinter{ - Operation: f.Operation, - } - - outputFormat = strings.ToLower(outputFormat) - switch outputFormat { - case "name": - namePrinter.ShortOutput = true - fallthrough - case "": - return namePrinter, nil - default: - return nil, NoCompatiblePrinterError{OutputFormat: &outputFormat, AllowedFormats: f.AllowedFormats()} - } -} - -// AddFlags receives a *cobra.Command reference and binds -// flags related to name printing to it -func (f *NamePrintFlags) AddFlags(c *cobra.Command) {} - -// NewNamePrintFlags returns flags associated with -// --name printing, with default values set. -func NewNamePrintFlags(operation string) *NamePrintFlags { - return &NamePrintFlags{ - Operation: operation, - } -} diff --git a/src/vendor/k8s.io/cli-runtime/pkg/genericclioptions/print_flags.go b/src/vendor/k8s.io/cli-runtime/pkg/genericclioptions/print_flags.go deleted file mode 100644 index 815f19bbd..000000000 --- a/src/vendor/k8s.io/cli-runtime/pkg/genericclioptions/print_flags.go +++ /dev/null @@ -1,171 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package genericclioptions - -import ( - "fmt" - "sort" - "strings" - - "github.com/spf13/cobra" - - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/cli-runtime/pkg/printers" -) - -// NoCompatiblePrinterError is a struct that contains error information. -// It will be constructed when a invalid printing format is provided -type NoCompatiblePrinterError struct { - OutputFormat *string - AllowedFormats []string - Options interface{} -} - -func (e NoCompatiblePrinterError) Error() string { - output := "" - if e.OutputFormat != nil { - output = *e.OutputFormat - } - - sort.Strings(e.AllowedFormats) - return fmt.Sprintf("unable to match a printer suitable for the output format %q, allowed formats are: %s", output, strings.Join(e.AllowedFormats, ",")) -} - -// IsNoCompatiblePrinterError returns true if it is a not a compatible printer -// otherwise it will return false -func IsNoCompatiblePrinterError(err error) bool { - if err == nil { - return false - } - - _, ok := err.(NoCompatiblePrinterError) - return ok -} - -// PrintFlags composes common printer flag structs -// used across all commands, and provides a method -// of retrieving a known printer based on flag values provided. -type PrintFlags struct { - JSONYamlPrintFlags *JSONYamlPrintFlags - NamePrintFlags *NamePrintFlags - TemplatePrinterFlags *KubeTemplatePrintFlags - - TypeSetterPrinter *printers.TypeSetterPrinter - - OutputFormat *string - - // OutputFlagSpecified indicates whether the user specifically requested a certain kind of output. - // Using this function allows a sophisticated caller to change the flag binding logic if they so desire. - OutputFlagSpecified func() bool -} - -// Complete sets NamePrintFlags operation flag from successTemplate -func (f *PrintFlags) Complete(successTemplate string) error { - return f.NamePrintFlags.Complete(successTemplate) -} - -// AllowedFormats returns slice of string of allowed JSONYaml/Name/Template printing format -func (f *PrintFlags) AllowedFormats() []string { - ret := []string{} - ret = append(ret, f.JSONYamlPrintFlags.AllowedFormats()...) - ret = append(ret, f.NamePrintFlags.AllowedFormats()...) - ret = append(ret, f.TemplatePrinterFlags.AllowedFormats()...) - return ret -} - -// ToPrinter returns a printer capable of -// handling --output or --template printing. -// Returns false if the specified outputFormat does not match a supported format. -// Supported format types can be found in pkg/printers/printers.go -func (f *PrintFlags) ToPrinter() (printers.ResourcePrinter, error) { - outputFormat := "" - if f.OutputFormat != nil { - outputFormat = *f.OutputFormat - } - // For backwards compatibility we want to support a --template argument given, even when no --output format is provided. - // If no explicit output format has been provided via the --output flag, fallback - // to honoring the --template argument. - templateFlagSpecified := f.TemplatePrinterFlags != nil && - f.TemplatePrinterFlags.TemplateArgument != nil && - len(*f.TemplatePrinterFlags.TemplateArgument) > 0 - outputFlagSpecified := f.OutputFlagSpecified != nil && f.OutputFlagSpecified() - if templateFlagSpecified && !outputFlagSpecified { - outputFormat = "go-template" - } - - if f.JSONYamlPrintFlags != nil { - if p, err := f.JSONYamlPrintFlags.ToPrinter(outputFormat); !IsNoCompatiblePrinterError(err) { - return f.TypeSetterPrinter.WrapToPrinter(p, err) - } - } - - if f.NamePrintFlags != nil { - if p, err := f.NamePrintFlags.ToPrinter(outputFormat); !IsNoCompatiblePrinterError(err) { - return f.TypeSetterPrinter.WrapToPrinter(p, err) - } - } - - if f.TemplatePrinterFlags != nil { - if p, err := f.TemplatePrinterFlags.ToPrinter(outputFormat); !IsNoCompatiblePrinterError(err) { - return f.TypeSetterPrinter.WrapToPrinter(p, err) - } - } - - return nil, NoCompatiblePrinterError{OutputFormat: f.OutputFormat, AllowedFormats: f.AllowedFormats()} -} - -// AddFlags receives a *cobra.Command reference and binds -// flags related to JSON/Yaml/Name/Template printing to it -func (f *PrintFlags) AddFlags(cmd *cobra.Command) { - f.JSONYamlPrintFlags.AddFlags(cmd) - f.NamePrintFlags.AddFlags(cmd) - f.TemplatePrinterFlags.AddFlags(cmd) - - if f.OutputFormat != nil { - cmd.Flags().StringVarP(f.OutputFormat, "output", "o", *f.OutputFormat, fmt.Sprintf(`Output format. One of: (%s).`, strings.Join(f.AllowedFormats(), ", "))) - if f.OutputFlagSpecified == nil { - f.OutputFlagSpecified = func() bool { - return cmd.Flag("output").Changed - } - } - } -} - -// WithDefaultOutput sets a default output format if one is not provided through a flag value -func (f *PrintFlags) WithDefaultOutput(output string) *PrintFlags { - f.OutputFormat = &output - return f -} - -// WithTypeSetter sets a wrapper than will surround the returned printer with a printer to type resources -func (f *PrintFlags) WithTypeSetter(scheme *runtime.Scheme) *PrintFlags { - f.TypeSetterPrinter = printers.NewTypeSetter(scheme) - return f -} - -// NewPrintFlags returns a default *PrintFlags -func NewPrintFlags(operation string) *PrintFlags { - outputFormat := "" - - return &PrintFlags{ - OutputFormat: &outputFormat, - - JSONYamlPrintFlags: NewJSONYamlPrintFlags(), - NamePrintFlags: NewNamePrintFlags(operation), - TemplatePrinterFlags: NewKubeTemplatePrintFlags(), - } -} diff --git a/src/vendor/k8s.io/cli-runtime/pkg/genericclioptions/record_flags.go b/src/vendor/k8s.io/cli-runtime/pkg/genericclioptions/record_flags.go deleted file mode 100644 index a5f307de5..000000000 --- a/src/vendor/k8s.io/cli-runtime/pkg/genericclioptions/record_flags.go +++ /dev/null @@ -1,201 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package genericclioptions - -import ( - "os" - "path/filepath" - "strings" - - jsonpatch "github.com/evanphx/json-patch" - "github.com/spf13/cobra" - "github.com/spf13/pflag" - - "k8s.io/apimachinery/pkg/api/meta" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/util/json" -) - -// ChangeCauseAnnotation is the annotation indicating a guess at "why" something was changed -const ChangeCauseAnnotation = "kubernetes.io/change-cause" - -// RecordFlags contains all flags associated with the "--record" operation -type RecordFlags struct { - // Record indicates the state of the recording flag. It is a pointer so a caller can opt out or rebind - Record *bool - - changeCause string -} - -// ToRecorder returns a ChangeCause recorder if --record=false was not -// explicitly given by the user -func (f *RecordFlags) ToRecorder() (Recorder, error) { - if f == nil { - return NoopRecorder{}, nil - } - - shouldRecord := false - if f.Record != nil { - shouldRecord = *f.Record - } - - // if flag was explicitly set to false by the user, - // do not record - if !shouldRecord { - return NoopRecorder{}, nil - } - - return &ChangeCauseRecorder{ - changeCause: f.changeCause, - }, nil -} - -// Complete is called before the command is run, but after it is invoked to finish the state of the struct before use. -func (f *RecordFlags) Complete(cmd *cobra.Command) error { - if f == nil { - return nil - } - - f.changeCause = parseCommandArguments(cmd) - return nil -} - -// CompleteWithChangeCause alters changeCause value with a new cause -func (f *RecordFlags) CompleteWithChangeCause(cause string) error { - if f == nil { - return nil - } - - f.changeCause = cause - return nil -} - -// AddFlags binds the requested flags to the provided flagset -// TODO have this only take a flagset -func (f *RecordFlags) AddFlags(cmd *cobra.Command) { - if f == nil { - return - } - - if f.Record != nil { - cmd.Flags().BoolVar(f.Record, "record", *f.Record, "Record current kubectl command in the resource annotation. If set to false, do not record the command. If set to true, record the command. If not set, default to updating the existing annotation value only if one already exists.") - cmd.Flags().MarkDeprecated("record", "--record will be removed in the future") - } -} - -// NewRecordFlags provides a RecordFlags with reasonable default values set for use -func NewRecordFlags() *RecordFlags { - record := false - - return &RecordFlags{ - Record: &record, - } -} - -// Recorder is used to record why a runtime.Object was changed in an annotation. -type Recorder interface { - // Record records why a runtime.Object was changed in an annotation. - Record(runtime.Object) error - MakeRecordMergePatch(runtime.Object) ([]byte, error) -} - -// NoopRecorder does nothing. It is a "do nothing" that can be returned so code doesn't switch on it. -type NoopRecorder struct{} - -// Record implements Recorder -func (r NoopRecorder) Record(obj runtime.Object) error { - return nil -} - -// MakeRecordMergePatch implements Recorder -func (r NoopRecorder) MakeRecordMergePatch(obj runtime.Object) ([]byte, error) { - return nil, nil -} - -// ChangeCauseRecorder annotates a "change-cause" to an input runtime object -type ChangeCauseRecorder struct { - changeCause string -} - -// Record annotates a "change-cause" to a given info if either "shouldRecord" is true, -// or the resource info previously contained a "change-cause" annotation. -func (r *ChangeCauseRecorder) Record(obj runtime.Object) error { - accessor, err := meta.Accessor(obj) - if err != nil { - return err - } - annotations := accessor.GetAnnotations() - if annotations == nil { - annotations = make(map[string]string) - } - annotations[ChangeCauseAnnotation] = r.changeCause - accessor.SetAnnotations(annotations) - return nil -} - -// MakeRecordMergePatch produces a merge patch for updating the recording annotation. -func (r *ChangeCauseRecorder) MakeRecordMergePatch(obj runtime.Object) ([]byte, error) { - // copy so we don't mess with the original - objCopy := obj.DeepCopyObject() - if err := r.Record(objCopy); err != nil { - return nil, err - } - - oldData, err := json.Marshal(obj) - if err != nil { - return nil, err - } - newData, err := json.Marshal(objCopy) - if err != nil { - return nil, err - } - - return jsonpatch.CreateMergePatch(oldData, newData) -} - -// parseCommandArguments will stringify and return all environment arguments ie. a command run by a client -// using the factory. -// Set showSecrets false to filter out stuff like secrets. -func parseCommandArguments(cmd *cobra.Command) string { - if len(os.Args) == 0 { - return "" - } - - flags := "" - parseFunc := func(flag *pflag.Flag, value string) error { - flags = flags + " --" + flag.Name - if set, ok := flag.Annotations["classified"]; !ok || len(set) == 0 { - flags = flags + "=" + value - } else { - flags = flags + "=CLASSIFIED" - } - return nil - } - var err error - err = cmd.Flags().ParseAll(os.Args[1:], parseFunc) - if err != nil || !cmd.Flags().Parsed() { - return "" - } - - args := "" - if arguments := cmd.Flags().Args(); len(arguments) > 0 { - args = " " + strings.Join(arguments, " ") - } - - base := filepath.Base(os.Args[0]) - return base + args + flags -} diff --git a/src/vendor/k8s.io/cli-runtime/pkg/genericclioptions/template_flags.go b/src/vendor/k8s.io/cli-runtime/pkg/genericclioptions/template_flags.go deleted file mode 100644 index 4502061ec..000000000 --- a/src/vendor/k8s.io/cli-runtime/pkg/genericclioptions/template_flags.go +++ /dev/null @@ -1,136 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package genericclioptions - -import ( - "fmt" - "io/ioutil" - "sort" - "strings" - - "github.com/spf13/cobra" - - "k8s.io/cli-runtime/pkg/printers" -) - -// templates are logically optional for specifying a format. -// this allows a user to specify a template format value -// as --output=go-template= -var templateFormats = map[string]bool{ - "template": true, - "go-template": true, - "go-template-file": true, - "templatefile": true, -} - -// GoTemplatePrintFlags provides default flags necessary for template printing. -// Given the following flag values, a printer can be requested that knows -// how to handle printing based on these values. -type GoTemplatePrintFlags struct { - // indicates if it is OK to ignore missing keys for rendering - // an output template. - AllowMissingKeys *bool - TemplateArgument *string -} - -// AllowedFormats returns slice of string of allowed GoTemplatePrint printing format -func (f *GoTemplatePrintFlags) AllowedFormats() []string { - formats := make([]string, 0, len(templateFormats)) - for format := range templateFormats { - formats = append(formats, format) - } - sort.Strings(formats) - return formats -} - -// ToPrinter receives an templateFormat and returns a printer capable of -// handling --template format printing. -// Returns false if the specified templateFormat does not match a template format. -func (f *GoTemplatePrintFlags) ToPrinter(templateFormat string) (printers.ResourcePrinter, error) { - if (f.TemplateArgument == nil || len(*f.TemplateArgument) == 0) && len(templateFormat) == 0 { - return nil, NoCompatiblePrinterError{Options: f, OutputFormat: &templateFormat} - } - - templateValue := "" - - if f.TemplateArgument == nil || len(*f.TemplateArgument) == 0 { - for format := range templateFormats { - format = format + "=" - if strings.HasPrefix(templateFormat, format) { - templateValue = templateFormat[len(format):] - templateFormat = format[:len(format)-1] - break - } - } - } else { - templateValue = *f.TemplateArgument - } - - if _, supportedFormat := templateFormats[templateFormat]; !supportedFormat { - return nil, NoCompatiblePrinterError{OutputFormat: &templateFormat, AllowedFormats: f.AllowedFormats()} - } - - if len(templateValue) == 0 { - return nil, fmt.Errorf("template format specified but no template given") - } - - if templateFormat == "templatefile" || templateFormat == "go-template-file" { - data, err := ioutil.ReadFile(templateValue) - if err != nil { - return nil, fmt.Errorf("error reading --template %s, %v", templateValue, err) - } - - templateValue = string(data) - } - - p, err := printers.NewGoTemplatePrinter([]byte(templateValue)) - if err != nil { - return nil, fmt.Errorf("error parsing template %s, %v", templateValue, err) - } - - allowMissingKeys := true - if f.AllowMissingKeys != nil { - allowMissingKeys = *f.AllowMissingKeys - } - - p.AllowMissingKeys(allowMissingKeys) - return p, nil -} - -// AddFlags receives a *cobra.Command reference and binds -// flags related to template printing to it -func (f *GoTemplatePrintFlags) AddFlags(c *cobra.Command) { - if f.TemplateArgument != nil { - c.Flags().StringVar(f.TemplateArgument, "template", *f.TemplateArgument, "Template string or path to template file to use when -o=go-template, -o=go-template-file. The template format is golang templates [http://golang.org/pkg/text/template/#pkg-overview].") - c.MarkFlagFilename("template") - } - if f.AllowMissingKeys != nil { - c.Flags().BoolVar(f.AllowMissingKeys, "allow-missing-template-keys", *f.AllowMissingKeys, "If true, ignore any errors in templates when a field or map key is missing in the template. Only applies to golang and jsonpath output formats.") - } -} - -// NewGoTemplatePrintFlags returns flags associated with -// --template printing, with default values set. -func NewGoTemplatePrintFlags() *GoTemplatePrintFlags { - allowMissingKeysPtr := true - templateValuePtr := "" - - return &GoTemplatePrintFlags{ - TemplateArgument: &templateValuePtr, - AllowMissingKeys: &allowMissingKeysPtr, - } -} diff --git a/src/vendor/k8s.io/cli-runtime/pkg/printers/discard.go b/src/vendor/k8s.io/cli-runtime/pkg/printers/discard.go deleted file mode 100644 index cd934976d..000000000 --- a/src/vendor/k8s.io/cli-runtime/pkg/printers/discard.go +++ /dev/null @@ -1,30 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package printers - -import ( - "io" - - "k8s.io/apimachinery/pkg/runtime" -) - -// NewDiscardingPrinter is a printer that discards all objects -func NewDiscardingPrinter() ResourcePrinterFunc { - return ResourcePrinterFunc(func(runtime.Object, io.Writer) error { - return nil - }) -} diff --git a/src/vendor/k8s.io/cli-runtime/pkg/printers/doc.go b/src/vendor/k8s.io/cli-runtime/pkg/printers/doc.go deleted file mode 100644 index ee205371d..000000000 --- a/src/vendor/k8s.io/cli-runtime/pkg/printers/doc.go +++ /dev/null @@ -1,19 +0,0 @@ -/* -Copyright 2019 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package printers is helper for formatting and printing runtime objects into -// primitives io.writer. -package printers // import "k8s.io/cli-runtime/pkg/printers" diff --git a/src/vendor/k8s.io/cli-runtime/pkg/printers/interface.go b/src/vendor/k8s.io/cli-runtime/pkg/printers/interface.go deleted file mode 100644 index e88ff63ae..000000000 --- a/src/vendor/k8s.io/cli-runtime/pkg/printers/interface.go +++ /dev/null @@ -1,54 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package printers - -import ( - "io" - - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" -) - -// ResourcePrinterFunc is a function that can print objects -type ResourcePrinterFunc func(runtime.Object, io.Writer) error - -// PrintObj implements ResourcePrinter -func (fn ResourcePrinterFunc) PrintObj(obj runtime.Object, w io.Writer) error { - return fn(obj, w) -} - -// ResourcePrinter is an interface that knows how to print runtime objects. -type ResourcePrinter interface { - // PrintObj receives a runtime object, formats it and prints it to a writer. - PrintObj(runtime.Object, io.Writer) error -} - -// PrintOptions struct defines a struct for various print options -type PrintOptions struct { - NoHeaders bool - WithNamespace bool - WithKind bool - Wide bool - ShowLabels bool - Kind schema.GroupKind - ColumnLabels []string - - SortBy string - - // indicates if it is OK to ignore missing keys for rendering an output template. - AllowMissingKeys bool -} diff --git a/src/vendor/k8s.io/cli-runtime/pkg/printers/json.go b/src/vendor/k8s.io/cli-runtime/pkg/printers/json.go deleted file mode 100644 index 8ab2235f8..000000000 --- a/src/vendor/k8s.io/cli-runtime/pkg/printers/json.go +++ /dev/null @@ -1,79 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package printers - -import ( - "bytes" - "encoding/json" - "fmt" - "io" - "reflect" - - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" -) - -// JSONPrinter is an implementation of ResourcePrinter which outputs an object as JSON. -type JSONPrinter struct{} - -// PrintObj is an implementation of ResourcePrinter.PrintObj which simply writes the object to the Writer. -func (p *JSONPrinter) PrintObj(obj runtime.Object, w io.Writer) error { - // we use reflect.Indirect here in order to obtain the actual value from a pointer. - // we need an actual value in order to retrieve the package path for an object. - // using reflect.Indirect indiscriminately is valid here, as all runtime.Objects are supposed to be pointers. - if InternalObjectPreventer.IsForbidden(reflect.Indirect(reflect.ValueOf(obj)).Type().PkgPath()) { - return fmt.Errorf(InternalObjectPrinterErr) - } - - switch obj := obj.(type) { - case *metav1.WatchEvent: - if InternalObjectPreventer.IsForbidden(reflect.Indirect(reflect.ValueOf(obj.Object.Object)).Type().PkgPath()) { - return fmt.Errorf(InternalObjectPrinterErr) - } - data, err := json.Marshal(obj) - if err != nil { - return err - } - _, err = w.Write(data) - if err != nil { - return err - } - _, err = w.Write([]byte{'\n'}) - return err - case *runtime.Unknown: - var buf bytes.Buffer - err := json.Indent(&buf, obj.Raw, "", " ") - if err != nil { - return err - } - buf.WriteRune('\n') - _, err = buf.WriteTo(w) - return err - } - - if obj.GetObjectKind().GroupVersionKind().Empty() { - return fmt.Errorf("missing apiVersion or kind; try GetObjectKind().SetGroupVersionKind() if you know the type") - } - - data, err := json.MarshalIndent(obj, "", " ") - if err != nil { - return err - } - data = append(data, '\n') - _, err = w.Write(data) - return err -} diff --git a/src/vendor/k8s.io/cli-runtime/pkg/printers/jsonpath.go b/src/vendor/k8s.io/cli-runtime/pkg/printers/jsonpath.go deleted file mode 100644 index 769960d66..000000000 --- a/src/vendor/k8s.io/cli-runtime/pkg/printers/jsonpath.go +++ /dev/null @@ -1,147 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package printers - -import ( - "bytes" - "encoding/json" - "fmt" - "io" - "reflect" - - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/client-go/util/jsonpath" -) - -// exists returns true if it would be possible to call the index function -// with these arguments. -// -// TODO: how to document this for users? -// -// index returns the result of indexing its first argument by the following -// arguments. Thus "index x 1 2 3" is, in Go syntax, x[1][2][3]. Each -// indexed item must be a map, slice, or array. -func exists(item interface{}, indices ...interface{}) bool { - v := reflect.ValueOf(item) - for _, i := range indices { - index := reflect.ValueOf(i) - var isNil bool - if v, isNil = indirect(v); isNil { - return false - } - switch v.Kind() { - case reflect.Array, reflect.Slice, reflect.String: - var x int64 - switch index.Kind() { - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - x = index.Int() - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - x = int64(index.Uint()) - default: - return false - } - if x < 0 || x >= int64(v.Len()) { - return false - } - v = v.Index(int(x)) - case reflect.Map: - if !index.IsValid() { - index = reflect.Zero(v.Type().Key()) - } - if !index.Type().AssignableTo(v.Type().Key()) { - return false - } - if x := v.MapIndex(index); x.IsValid() { - v = x - } else { - v = reflect.Zero(v.Type().Elem()) - } - default: - return false - } - } - if _, isNil := indirect(v); isNil { - return false - } - return true -} - -// stolen from text/template -// indirect returns the item at the end of indirection, and a bool to indicate if it's nil. -// We indirect through pointers and empty interfaces (only) because -// non-empty interfaces have methods we might need. -func indirect(v reflect.Value) (rv reflect.Value, isNil bool) { - for ; v.Kind() == reflect.Pointer || v.Kind() == reflect.Interface; v = v.Elem() { - if v.IsNil() { - return v, true - } - if v.Kind() == reflect.Interface && v.NumMethod() > 0 { - break - } - } - return v, false -} - -// JSONPathPrinter is an implementation of ResourcePrinter which formats data with jsonpath expression. -type JSONPathPrinter struct { - rawTemplate string - *jsonpath.JSONPath -} - -func NewJSONPathPrinter(tmpl string) (*JSONPathPrinter, error) { - j := jsonpath.New("out") - if err := j.Parse(tmpl); err != nil { - return nil, err - } - return &JSONPathPrinter{ - rawTemplate: tmpl, - JSONPath: j, - }, nil -} - -// PrintObj formats the obj with the JSONPath Template. -func (j *JSONPathPrinter) PrintObj(obj runtime.Object, w io.Writer) error { - // we use reflect.Indirect here in order to obtain the actual value from a pointer. - // we need an actual value in order to retrieve the package path for an object. - // using reflect.Indirect indiscriminately is valid here, as all runtime.Objects are supposed to be pointers. - if InternalObjectPreventer.IsForbidden(reflect.Indirect(reflect.ValueOf(obj)).Type().PkgPath()) { - return fmt.Errorf(InternalObjectPrinterErr) - } - - var queryObj interface{} = obj - if unstructured, ok := obj.(runtime.Unstructured); ok { - queryObj = unstructured.UnstructuredContent() - } else { - data, err := json.Marshal(obj) - if err != nil { - return err - } - queryObj = map[string]interface{}{} - if err := json.Unmarshal(data, &queryObj); err != nil { - return err - } - } - - if err := j.JSONPath.Execute(w, queryObj); err != nil { - buf := bytes.NewBuffer(nil) - fmt.Fprintf(buf, "Error executing template: %v. Printing more information for debugging the template:\n", err) - fmt.Fprintf(buf, "\ttemplate was:\n\t\t%v\n", j.rawTemplate) - fmt.Fprintf(buf, "\tobject given to jsonpath engine was:\n\t\t%#v\n\n", queryObj) - return fmt.Errorf("error executing jsonpath %q: %v\n", j.rawTemplate, buf.String()) - } - return nil -} diff --git a/src/vendor/k8s.io/cli-runtime/pkg/printers/managedfields.go b/src/vendor/k8s.io/cli-runtime/pkg/printers/managedfields.go deleted file mode 100644 index cab54d058..000000000 --- a/src/vendor/k8s.io/cli-runtime/pkg/printers/managedfields.go +++ /dev/null @@ -1,59 +0,0 @@ -/* -Copyright 2021 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package printers - -import ( - "io" - - "k8s.io/apimachinery/pkg/api/meta" - "k8s.io/apimachinery/pkg/runtime" -) - -// OmitManagedFieldsPrinter wraps an existing printer and omits the managed fields from the object -// before printing it. -type OmitManagedFieldsPrinter struct { - Delegate ResourcePrinter -} - -var _ ResourcePrinter = (*OmitManagedFieldsPrinter)(nil) - -func omitManagedFields(o runtime.Object) runtime.Object { - a, err := meta.Accessor(o) - if err != nil { - // The object is not a `metav1.Object`, ignore it. - return o - } - a.SetManagedFields(nil) - return o -} - -// PrintObj copies the object and omits the managed fields from the copied object before printing it. -func (p *OmitManagedFieldsPrinter) PrintObj(obj runtime.Object, w io.Writer) error { - if obj == nil { - return p.Delegate.PrintObj(obj, w) - } - if meta.IsListType(obj) { - obj = obj.DeepCopyObject() - _ = meta.EachListItem(obj, func(item runtime.Object) error { - omitManagedFields(item) - return nil - }) - } else if _, err := meta.Accessor(obj); err == nil { - obj = omitManagedFields(obj.DeepCopyObject()) - } - return p.Delegate.PrintObj(obj, w) -} diff --git a/src/vendor/k8s.io/cli-runtime/pkg/printers/name.go b/src/vendor/k8s.io/cli-runtime/pkg/printers/name.go deleted file mode 100644 index 086166af2..000000000 --- a/src/vendor/k8s.io/cli-runtime/pkg/printers/name.go +++ /dev/null @@ -1,130 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package printers - -import ( - "fmt" - "io" - "reflect" - "strings" - - "k8s.io/apimachinery/pkg/api/meta" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" -) - -// NamePrinter is an implementation of ResourcePrinter which outputs "resource/name" pair of an object. -type NamePrinter struct { - // ShortOutput indicates whether an operation should be - // printed along side the "resource/name" pair for an object. - ShortOutput bool - // Operation describes the name of the action that - // took place on an object, to be included in the - // finalized "successful" message. - Operation string -} - -// PrintObj is an implementation of ResourcePrinter.PrintObj which decodes the object -// and print "resource/name" pair. If the object is a List, print all items in it. -func (p *NamePrinter) PrintObj(obj runtime.Object, w io.Writer) error { - switch castObj := obj.(type) { - case *metav1.WatchEvent: - obj = castObj.Object.Object - } - - // we use reflect.Indirect here in order to obtain the actual value from a pointer. - // using reflect.Indirect indiscriminately is valid here, as all runtime.Objects are supposed to be pointers. - // we need an actual value in order to retrieve the package path for an object. - if InternalObjectPreventer.IsForbidden(reflect.Indirect(reflect.ValueOf(obj)).Type().PkgPath()) { - return fmt.Errorf(InternalObjectPrinterErr) - } - - if meta.IsListType(obj) { - // we allow unstructured lists for now because they always contain the GVK information. We should chase down - // callers and stop them from passing unflattened lists - // TODO chase the caller that is setting this and remove it. - if _, ok := obj.(*unstructured.UnstructuredList); !ok { - return fmt.Errorf("list types are not supported by name printing: %T", obj) - } - - items, err := meta.ExtractList(obj) - if err != nil { - return err - } - for _, obj := range items { - if err := p.PrintObj(obj, w); err != nil { - return err - } - } - return nil - } - - if obj.GetObjectKind().GroupVersionKind().Empty() { - return fmt.Errorf("missing apiVersion or kind; try GetObjectKind().SetGroupVersionKind() if you know the type") - } - - name := "" - if acc, err := meta.Accessor(obj); err == nil { - if n := acc.GetName(); len(n) > 0 { - name = n - } - } - - return printObj(w, name, p.Operation, p.ShortOutput, GetObjectGroupKind(obj)) -} - -func GetObjectGroupKind(obj runtime.Object) schema.GroupKind { - if obj == nil { - return schema.GroupKind{Kind: ""} - } - groupVersionKind := obj.GetObjectKind().GroupVersionKind() - if len(groupVersionKind.Kind) > 0 { - return groupVersionKind.GroupKind() - } - - if uns, ok := obj.(*unstructured.Unstructured); ok { - if len(uns.GroupVersionKind().Kind) > 0 { - return uns.GroupVersionKind().GroupKind() - } - } - - return schema.GroupKind{Kind: ""} -} - -func printObj(w io.Writer, name string, operation string, shortOutput bool, groupKind schema.GroupKind) error { - if len(groupKind.Kind) == 0 { - return fmt.Errorf("missing kind for resource with name %v", name) - } - - if len(operation) > 0 { - operation = " " + operation - } - - if shortOutput { - operation = "" - } - - if len(groupKind.Group) == 0 { - fmt.Fprintf(w, "%s/%s%s\n", strings.ToLower(groupKind.Kind), name, operation) - return nil - } - - fmt.Fprintf(w, "%s.%s/%s%s\n", strings.ToLower(groupKind.Kind), groupKind.Group, name, operation) - return nil -} diff --git a/src/vendor/k8s.io/cli-runtime/pkg/printers/sourcechecker.go b/src/vendor/k8s.io/cli-runtime/pkg/printers/sourcechecker.go deleted file mode 100644 index e360c8fe0..000000000 --- a/src/vendor/k8s.io/cli-runtime/pkg/printers/sourcechecker.go +++ /dev/null @@ -1,60 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package printers - -import ( - "strings" -) - -var ( - InternalObjectPrinterErr = "a versioned object must be passed to a printer" - - // disallowedPackagePrefixes contains regular expression templates - // for object package paths that are not allowed by printers. - disallowedPackagePrefixes = []string{ - "k8s.io/kubernetes/pkg/apis/", - } -) - -var InternalObjectPreventer = &illegalPackageSourceChecker{disallowedPackagePrefixes} - -func IsInternalObjectError(err error) bool { - if err == nil { - return false - } - - return err.Error() == InternalObjectPrinterErr -} - -// illegalPackageSourceChecker compares a given -// object's package path, and determines if the -// object originates from a disallowed source. -type illegalPackageSourceChecker struct { - // disallowedPrefixes is a slice of disallowed package path - // prefixes for a given runtime.Object that we are printing. - disallowedPrefixes []string -} - -func (c *illegalPackageSourceChecker) IsForbidden(pkgPath string) bool { - for _, forbiddenPrefix := range c.disallowedPrefixes { - if strings.HasPrefix(pkgPath, forbiddenPrefix) || strings.Contains(pkgPath, "/vendor/"+forbiddenPrefix) { - return true - } - } - - return false -} diff --git a/src/vendor/k8s.io/cli-runtime/pkg/printers/tableprinter.go b/src/vendor/k8s.io/cli-runtime/pkg/printers/tableprinter.go deleted file mode 100644 index 87bc3f414..000000000 --- a/src/vendor/k8s.io/cli-runtime/pkg/printers/tableprinter.go +++ /dev/null @@ -1,588 +0,0 @@ -/* -Copyright 2019 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package printers - -import ( - "fmt" - "io" - "reflect" - "strings" - "time" - - "github.com/liggitt/tabwriter" - "k8s.io/apimachinery/pkg/api/meta" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/util/duration" - "k8s.io/apimachinery/pkg/watch" -) - -var _ ResourcePrinter = &HumanReadablePrinter{} - -type printHandler struct { - columnDefinitions []metav1.TableColumnDefinition - printFunc reflect.Value -} - -var ( - statusHandlerEntry = &printHandler{ - columnDefinitions: statusColumnDefinitions, - printFunc: reflect.ValueOf(printStatus), - } - - statusColumnDefinitions = []metav1.TableColumnDefinition{ - {Name: "Status", Type: "string"}, - {Name: "Reason", Type: "string"}, - {Name: "Message", Type: "string"}, - } - - defaultHandlerEntry = &printHandler{ - columnDefinitions: objectMetaColumnDefinitions, - printFunc: reflect.ValueOf(printObjectMeta), - } - - objectMetaColumnDefinitions = []metav1.TableColumnDefinition{ - {Name: "Name", Type: "string", Format: "name", Description: metav1.ObjectMeta{}.SwaggerDoc()["name"]}, - {Name: "Age", Type: "string", Description: metav1.ObjectMeta{}.SwaggerDoc()["creationTimestamp"]}, - } - - withEventTypePrefixColumns = []string{"EVENT"} - withNamespacePrefixColumns = []string{"NAMESPACE"} // TODO(erictune): print cluster name too. -) - -// HumanReadablePrinter is an implementation of ResourcePrinter which attempts to provide -// more elegant output. It is not threadsafe, but you may call PrintObj repeatedly; headers -// will only be printed if the object type changes. This makes it useful for printing items -// received from watches. -type HumanReadablePrinter struct { - options PrintOptions - lastType interface{} - lastColumns []metav1.TableColumnDefinition - printedHeaders bool -} - -// NewTablePrinter creates a printer suitable for calling PrintObj(). -func NewTablePrinter(options PrintOptions) ResourcePrinter { - printer := &HumanReadablePrinter{ - options: options, - } - return printer -} - -func printHeader(columnNames []string, w io.Writer) error { - if _, err := fmt.Fprintf(w, "%s\n", strings.Join(columnNames, "\t")); err != nil { - return err - } - return nil -} - -// PrintObj prints the obj in a human-friendly format according to the type of the obj. -func (h *HumanReadablePrinter) PrintObj(obj runtime.Object, output io.Writer) error { - - if _, found := output.(*tabwriter.Writer); !found { - w := GetNewTabWriter(output) - output = w - defer w.Flush() - } - - var eventType string - if event, isEvent := obj.(*metav1.WatchEvent); isEvent { - eventType = event.Type - obj = event.Object.Object - } - - // Parameter "obj" is a table from server; print it. - // display tables following the rules of options - if table, ok := obj.(*metav1.Table); ok { - // Do not print headers if this table has no column definitions, or they are the same as the last ones we printed - localOptions := h.options - if h.printedHeaders && (len(table.ColumnDefinitions) == 0 || reflect.DeepEqual(table.ColumnDefinitions, h.lastColumns)) { - localOptions.NoHeaders = true - } - - if len(table.ColumnDefinitions) == 0 { - // If this table has no column definitions, use the columns from the last table we printed for decoration and layout. - // This is done when receiving tables in watch events to save bandwidth. - table.ColumnDefinitions = h.lastColumns - } else if !reflect.DeepEqual(table.ColumnDefinitions, h.lastColumns) { - // If this table has column definitions, remember them for future use. - h.lastColumns = table.ColumnDefinitions - h.printedHeaders = false - } - - if len(table.Rows) > 0 { - h.printedHeaders = true - } - - if err := decorateTable(table, localOptions); err != nil { - return err - } - if len(eventType) > 0 { - if err := addColumns(beginning, table, - []metav1.TableColumnDefinition{{Name: "Event", Type: "string"}}, - []cellValueFunc{func(metav1.TableRow) (interface{}, error) { return formatEventType(eventType), nil }}, - ); err != nil { - return err - } - } - return printTable(table, output, localOptions) - } - - // Could not find print handler for "obj"; use the default or status print handler. - // Print with the default or status handler, and use the columns from the last time - var handler *printHandler - if _, isStatus := obj.(*metav1.Status); isStatus { - handler = statusHandlerEntry - } else { - handler = defaultHandlerEntry - } - - includeHeaders := h.lastType != handler && !h.options.NoHeaders - - if h.lastType != nil && h.lastType != handler && !h.options.NoHeaders { - fmt.Fprintln(output) - } - - if err := printRowsForHandlerEntry(output, handler, eventType, obj, h.options, includeHeaders); err != nil { - return err - } - h.lastType = handler - - return nil -} - -// printTable prints a table to the provided output respecting the filtering rules for options -// for wide columns and filtered rows. It filters out rows that are Completed. You should call -// decorateTable if you receive a table from a remote server before calling printTable. -func printTable(table *metav1.Table, output io.Writer, options PrintOptions) error { - if !options.NoHeaders { - // avoid printing headers if we have no rows to display - if len(table.Rows) == 0 { - return nil - } - - first := true - for _, column := range table.ColumnDefinitions { - if !options.Wide && column.Priority != 0 { - continue - } - if first { - first = false - } else { - fmt.Fprint(output, "\t") - } - fmt.Fprint(output, strings.ToUpper(column.Name)) - } - fmt.Fprintln(output) - } - for _, row := range table.Rows { - first := true - for i, cell := range row.Cells { - if i >= len(table.ColumnDefinitions) { - // https://issue.k8s.io/66379 - // don't panic in case of bad output from the server, with more cells than column definitions - break - } - column := table.ColumnDefinitions[i] - if !options.Wide && column.Priority != 0 { - continue - } - if first { - first = false - } else { - fmt.Fprint(output, "\t") - } - if cell != nil { - switch val := cell.(type) { - case string: - print := val - truncated := false - // truncate at newlines - newline := strings.Index(print, "\n") - if newline >= 0 { - truncated = true - print = print[:newline] - } - fmt.Fprint(output, print) - if truncated { - fmt.Fprint(output, "...") - } - default: - fmt.Fprint(output, val) - } - } - } - fmt.Fprintln(output) - } - return nil -} - -type cellValueFunc func(metav1.TableRow) (interface{}, error) - -type columnAddPosition int - -const ( - beginning columnAddPosition = 1 - end columnAddPosition = 2 -) - -func addColumns(pos columnAddPosition, table *metav1.Table, columns []metav1.TableColumnDefinition, valueFuncs []cellValueFunc) error { - if len(columns) != len(valueFuncs) { - return fmt.Errorf("cannot prepend columns, unmatched value functions") - } - if len(columns) == 0 { - return nil - } - - // Compute the new rows - newRows := make([][]interface{}, len(table.Rows)) - for i := range table.Rows { - newCells := make([]interface{}, 0, len(columns)+len(table.Rows[i].Cells)) - - if pos == end { - // If we're appending, start with the existing cells, - // then add nil cells to match the number of columns - newCells = append(newCells, table.Rows[i].Cells...) - for len(newCells) < len(table.ColumnDefinitions) { - newCells = append(newCells, nil) - } - } - - // Compute cells for new columns - for _, f := range valueFuncs { - newCell, err := f(table.Rows[i]) - if err != nil { - return err - } - newCells = append(newCells, newCell) - } - - if pos == beginning { - // If we're prepending, add existing cells - newCells = append(newCells, table.Rows[i].Cells...) - } - - // Remember the new cells for this row - newRows[i] = newCells - } - - // All cells successfully computed, now replace columns and rows - newColumns := make([]metav1.TableColumnDefinition, 0, len(columns)+len(table.ColumnDefinitions)) - switch pos { - case beginning: - newColumns = append(newColumns, columns...) - newColumns = append(newColumns, table.ColumnDefinitions...) - case end: - newColumns = append(newColumns, table.ColumnDefinitions...) - newColumns = append(newColumns, columns...) - default: - return fmt.Errorf("invalid column add position: %v", pos) - } - table.ColumnDefinitions = newColumns - for i := range table.Rows { - table.Rows[i].Cells = newRows[i] - } - - return nil -} - -// decorateTable takes a table and attempts to add label columns and the -// namespace column. It will fill empty columns with nil (if the object -// does not expose metadata). It returns an error if the table cannot -// be decorated. -func decorateTable(table *metav1.Table, options PrintOptions) error { - width := len(table.ColumnDefinitions) + len(options.ColumnLabels) - if options.WithNamespace { - width++ - } - if options.ShowLabels { - width++ - } - - columns := table.ColumnDefinitions - - nameColumn := -1 - if options.WithKind && !options.Kind.Empty() { - for i := range columns { - if columns[i].Format == "name" && columns[i].Type == "string" { - nameColumn = i - break - } - } - } - - if width != len(table.ColumnDefinitions) { - columns = make([]metav1.TableColumnDefinition, 0, width) - if options.WithNamespace { - columns = append(columns, metav1.TableColumnDefinition{ - Name: "Namespace", - Type: "string", - }) - } - columns = append(columns, table.ColumnDefinitions...) - for _, label := range formatLabelHeaders(options.ColumnLabels) { - columns = append(columns, metav1.TableColumnDefinition{ - Name: label, - Type: "string", - }) - } - if options.ShowLabels { - columns = append(columns, metav1.TableColumnDefinition{ - Name: "Labels", - Type: "string", - }) - } - } - - rows := table.Rows - - includeLabels := len(options.ColumnLabels) > 0 || options.ShowLabels - if includeLabels || options.WithNamespace || nameColumn != -1 { - for i := range rows { - row := rows[i] - - if nameColumn != -1 { - row.Cells[nameColumn] = fmt.Sprintf("%s/%s", strings.ToLower(options.Kind.String()), row.Cells[nameColumn]) - } - - var m metav1.Object - if obj := row.Object.Object; obj != nil { - if acc, err := meta.Accessor(obj); err == nil { - m = acc - } - } - // if we can't get an accessor, fill out the appropriate columns with empty spaces - if m == nil { - if options.WithNamespace { - r := make([]interface{}, 1, width) - row.Cells = append(r, row.Cells...) - } - for j := 0; j < width-len(row.Cells); j++ { - row.Cells = append(row.Cells, nil) - } - rows[i] = row - continue - } - - if options.WithNamespace { - r := make([]interface{}, 1, width) - r[0] = m.GetNamespace() - row.Cells = append(r, row.Cells...) - } - if includeLabels { - row.Cells = appendLabelCells(row.Cells, m.GetLabels(), options) - } - rows[i] = row - } - } - - table.ColumnDefinitions = columns - table.Rows = rows - return nil -} - -// printRowsForHandlerEntry prints the incremental table output (headers if the current type is -// different from lastType) including all the rows in the object. It returns the current type -// or an error, if any. -func printRowsForHandlerEntry(output io.Writer, handler *printHandler, eventType string, obj runtime.Object, options PrintOptions, includeHeaders bool) error { - var results []reflect.Value - - args := []reflect.Value{reflect.ValueOf(obj), reflect.ValueOf(options)} - results = handler.printFunc.Call(args) - if !results[1].IsNil() { - return results[1].Interface().(error) - } - - if includeHeaders { - var headers []string - for _, column := range handler.columnDefinitions { - if column.Priority != 0 && !options.Wide { - continue - } - headers = append(headers, strings.ToUpper(column.Name)) - } - headers = append(headers, formatLabelHeaders(options.ColumnLabels)...) - // LABELS is always the last column. - headers = append(headers, formatShowLabelsHeader(options.ShowLabels)...) - // prepend namespace header - if options.WithNamespace { - headers = append(withNamespacePrefixColumns, headers...) - } - // prepend event type header - if len(eventType) > 0 { - headers = append(withEventTypePrefixColumns, headers...) - } - printHeader(headers, output) - } - - if results[1].IsNil() { - rows := results[0].Interface().([]metav1.TableRow) - printRows(output, eventType, rows, options) - return nil - } - return results[1].Interface().(error) -} - -var formattedEventType = map[string]string{ - string(watch.Added): "ADDED ", - string(watch.Modified): "MODIFIED", - string(watch.Deleted): "DELETED ", - string(watch.Error): "ERROR ", -} - -func formatEventType(eventType string) string { - if formatted, ok := formattedEventType[eventType]; ok { - return formatted - } - return eventType -} - -// printRows writes the provided rows to output. -func printRows(output io.Writer, eventType string, rows []metav1.TableRow, options PrintOptions) { - for _, row := range rows { - if len(eventType) > 0 { - fmt.Fprint(output, formatEventType(eventType)) - fmt.Fprint(output, "\t") - } - if options.WithNamespace { - if obj := row.Object.Object; obj != nil { - if m, err := meta.Accessor(obj); err == nil { - fmt.Fprint(output, m.GetNamespace()) - } - } - fmt.Fprint(output, "\t") - } - - for i, cell := range row.Cells { - if i != 0 { - fmt.Fprint(output, "\t") - } else { - // TODO: remove this once we drop the legacy printers - if options.WithKind && !options.Kind.Empty() { - fmt.Fprintf(output, "%s/%s", strings.ToLower(options.Kind.String()), cell) - continue - } - } - fmt.Fprint(output, cell) - } - - hasLabels := len(options.ColumnLabels) > 0 - if obj := row.Object.Object; obj != nil && (hasLabels || options.ShowLabels) { - if m, err := meta.Accessor(obj); err == nil { - for _, value := range labelValues(m.GetLabels(), options) { - output.Write([]byte("\t")) - output.Write([]byte(value)) - } - } - } - - output.Write([]byte("\n")) - } -} - -func formatLabelHeaders(columnLabels []string) []string { - formHead := make([]string, len(columnLabels)) - for i, l := range columnLabels { - p := strings.Split(l, "/") - formHead[i] = strings.ToUpper(p[len(p)-1]) - } - return formHead -} - -// headers for --show-labels=true -func formatShowLabelsHeader(showLabels bool) []string { - if showLabels { - return []string{"LABELS"} - } - return nil -} - -// labelValues returns a slice of value columns matching the requested print options. -func labelValues(itemLabels map[string]string, opts PrintOptions) []string { - var values []string - for _, key := range opts.ColumnLabels { - values = append(values, itemLabels[key]) - } - if opts.ShowLabels { - values = append(values, labels.FormatLabels(itemLabels)) - } - return values -} - -// appendLabelCells returns a slice of value columns matching the requested print options. -// Intended for use with tables. -func appendLabelCells(values []interface{}, itemLabels map[string]string, opts PrintOptions) []interface{} { - for _, key := range opts.ColumnLabels { - values = append(values, itemLabels[key]) - } - if opts.ShowLabels { - values = append(values, labels.FormatLabels(itemLabels)) - } - return values -} - -func printStatus(obj runtime.Object, options PrintOptions) ([]metav1.TableRow, error) { - status, ok := obj.(*metav1.Status) - if !ok { - return nil, fmt.Errorf("expected *v1.Status, got %T", obj) - } - return []metav1.TableRow{{ - Object: runtime.RawExtension{Object: obj}, - Cells: []interface{}{status.Status, status.Reason, status.Message}, - }}, nil -} - -func printObjectMeta(obj runtime.Object, options PrintOptions) ([]metav1.TableRow, error) { - if meta.IsListType(obj) { - rows := make([]metav1.TableRow, 0, 16) - err := meta.EachListItem(obj, func(obj runtime.Object) error { - nestedRows, err := printObjectMeta(obj, options) - if err != nil { - return err - } - rows = append(rows, nestedRows...) - return nil - }) - if err != nil { - return nil, err - } - return rows, nil - } - - rows := make([]metav1.TableRow, 0, 1) - m, err := meta.Accessor(obj) - if err != nil { - return nil, err - } - row := metav1.TableRow{ - Object: runtime.RawExtension{Object: obj}, - } - row.Cells = append(row.Cells, m.GetName(), translateTimestampSince(m.GetCreationTimestamp())) - rows = append(rows, row) - return rows, nil -} - -// translateTimestampSince returns the elapsed time since timestamp in -// human-readable approximation. -func translateTimestampSince(timestamp metav1.Time) string { - if timestamp.IsZero() { - return "" - } - - return duration.HumanDuration(time.Since(timestamp.Time)) -} diff --git a/src/vendor/k8s.io/cli-runtime/pkg/printers/tabwriter.go b/src/vendor/k8s.io/cli-runtime/pkg/printers/tabwriter.go deleted file mode 100644 index 21d60e1c4..000000000 --- a/src/vendor/k8s.io/cli-runtime/pkg/printers/tabwriter.go +++ /dev/null @@ -1,36 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package printers - -import ( - "io" - - "github.com/liggitt/tabwriter" -) - -const ( - tabwriterMinWidth = 6 - tabwriterWidth = 4 - tabwriterPadding = 3 - tabwriterPadChar = ' ' - tabwriterFlags = tabwriter.RememberWidths -) - -// GetNewTabWriter returns a tabwriter that translates tabbed columns in input into properly aligned text. -func GetNewTabWriter(output io.Writer) *tabwriter.Writer { - return tabwriter.NewWriter(output, tabwriterMinWidth, tabwriterWidth, tabwriterPadding, tabwriterPadChar, tabwriterFlags) -} diff --git a/src/vendor/k8s.io/cli-runtime/pkg/printers/template.go b/src/vendor/k8s.io/cli-runtime/pkg/printers/template.go deleted file mode 100644 index ccff54226..000000000 --- a/src/vendor/k8s.io/cli-runtime/pkg/printers/template.go +++ /dev/null @@ -1,118 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package printers - -import ( - "encoding/base64" - "fmt" - "io" - "reflect" - "text/template" - - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/util/json" -) - -// GoTemplatePrinter is an implementation of ResourcePrinter which formats data with a Go Template. -type GoTemplatePrinter struct { - rawTemplate string - template *template.Template -} - -func NewGoTemplatePrinter(tmpl []byte) (*GoTemplatePrinter, error) { - t, err := template.New("output"). - Funcs(template.FuncMap{ - "exists": exists, - "base64decode": base64decode, - }). - Parse(string(tmpl)) - if err != nil { - return nil, err - } - return &GoTemplatePrinter{ - rawTemplate: string(tmpl), - template: t, - }, nil -} - -// AllowMissingKeys tells the template engine if missing keys are allowed. -func (p *GoTemplatePrinter) AllowMissingKeys(allow bool) { - if allow { - p.template.Option("missingkey=default") - } else { - p.template.Option("missingkey=error") - } -} - -// PrintObj formats the obj with the Go Template. -func (p *GoTemplatePrinter) PrintObj(obj runtime.Object, w io.Writer) error { - if InternalObjectPreventer.IsForbidden(reflect.Indirect(reflect.ValueOf(obj)).Type().PkgPath()) { - return fmt.Errorf(InternalObjectPrinterErr) - } - - var data []byte - var err error - data, err = json.Marshal(obj) - if err != nil { - return err - } - - out := map[string]interface{}{} - if err := json.Unmarshal(data, &out); err != nil { - return err - } - if err = p.safeExecute(w, out); err != nil { - // It is way easier to debug this stuff when it shows up in - // stdout instead of just stdin. So in addition to returning - // a nice error, also print useful stuff with the writer. - fmt.Fprintf(w, "Error executing template: %v. Printing more information for debugging the template:\n", err) - fmt.Fprintf(w, "\ttemplate was:\n\t\t%v\n", p.rawTemplate) - fmt.Fprintf(w, "\traw data was:\n\t\t%v\n", string(data)) - fmt.Fprintf(w, "\tobject given to template engine was:\n\t\t%+v\n\n", out) - return fmt.Errorf("error executing template %q: %v", p.rawTemplate, err) - } - return nil -} - -// safeExecute tries to execute the template, but catches panics and returns an error -// should the template engine panic. -func (p *GoTemplatePrinter) safeExecute(w io.Writer, obj interface{}) error { - var panicErr error - // Sorry for the double anonymous function. There's probably a clever way - // to do this that has the defer'd func setting the value to be returned, but - // that would be even less obvious. - retErr := func() error { - defer func() { - if x := recover(); x != nil { - panicErr = fmt.Errorf("caught panic: %+v", x) - } - }() - return p.template.Execute(w, obj) - }() - if panicErr != nil { - return panicErr - } - return retErr -} - -func base64decode(v string) (string, error) { - data, err := base64.StdEncoding.DecodeString(v) - if err != nil { - return "", fmt.Errorf("base64 decode failed: %v", err) - } - return string(data), nil -} diff --git a/src/vendor/k8s.io/cli-runtime/pkg/printers/typesetter.go b/src/vendor/k8s.io/cli-runtime/pkg/printers/typesetter.go deleted file mode 100644 index 8d2d9b56e..000000000 --- a/src/vendor/k8s.io/cli-runtime/pkg/printers/typesetter.go +++ /dev/null @@ -1,95 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package printers - -import ( - "fmt" - "io" - - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" -) - -// TypeSetterPrinter is an implementation of ResourcePrinter wraps another printer with types set on the objects -type TypeSetterPrinter struct { - Delegate ResourcePrinter - - Typer runtime.ObjectTyper -} - -// NewTypeSetter constructs a wrapping printer with required params -func NewTypeSetter(typer runtime.ObjectTyper) *TypeSetterPrinter { - return &TypeSetterPrinter{Typer: typer} -} - -// PrintObj is an implementation of ResourcePrinter.PrintObj which sets type information on the obj for the duration -// of printing. It is NOT threadsafe. -func (p *TypeSetterPrinter) PrintObj(obj runtime.Object, w io.Writer) error { - if obj == nil { - return p.Delegate.PrintObj(obj, w) - } - if !obj.GetObjectKind().GroupVersionKind().Empty() { - return p.Delegate.PrintObj(obj, w) - } - - // we were empty coming in, make sure we're empty going out. This makes the call thread-unsafe - defer func() { - obj.GetObjectKind().SetGroupVersionKind(schema.GroupVersionKind{}) - }() - - gvks, _, err := p.Typer.ObjectKinds(obj) - if err != nil { - // printers wrapped by us expect to find the type information present - return fmt.Errorf("missing apiVersion or kind and cannot assign it; %v", err) - } - - for _, gvk := range gvks { - if len(gvk.Kind) == 0 { - continue - } - if len(gvk.Version) == 0 || gvk.Version == runtime.APIVersionInternal { - continue - } - obj.GetObjectKind().SetGroupVersionKind(gvk) - break - } - - return p.Delegate.PrintObj(obj, w) -} - -// ToPrinter returns a printer (not threadsafe!) that has been wrapped -func (p *TypeSetterPrinter) ToPrinter(delegate ResourcePrinter) ResourcePrinter { - if p == nil { - return delegate - } - - p.Delegate = delegate - return p -} - -// WrapToPrinter wraps the common ToPrinter method -func (p *TypeSetterPrinter) WrapToPrinter(delegate ResourcePrinter, err error) (ResourcePrinter, error) { - if err != nil { - return delegate, err - } - if p == nil { - return delegate, nil - } - - p.Delegate = delegate - return p, nil -} diff --git a/src/vendor/k8s.io/cli-runtime/pkg/printers/warningprinter.go b/src/vendor/k8s.io/cli-runtime/pkg/printers/warningprinter.go deleted file mode 100644 index b3a8264f7..000000000 --- a/src/vendor/k8s.io/cli-runtime/pkg/printers/warningprinter.go +++ /dev/null @@ -1,55 +0,0 @@ -/* -Copyright 2022 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package printers - -import ( - "fmt" - "io" -) - -const ( - yellowColor = "\u001b[33;1m" - resetColor = "\u001b[0m" -) - -type WarningPrinter struct { - // out is the writer to output warnings to - out io.Writer - // opts contains options controlling warning output - opts WarningPrinterOptions -} - -// WarningPrinterOptions controls the behavior of a WarningPrinter constructed using NewWarningPrinter() -type WarningPrinterOptions struct { - // Color indicates that warning output can include ANSI color codes - Color bool -} - -// NewWarningPrinter returns an implementation of warningPrinter that outputs warnings to the specified writer. -func NewWarningPrinter(out io.Writer, opts WarningPrinterOptions) *WarningPrinter { - h := &WarningPrinter{out: out, opts: opts} - return h -} - -// Print prints warnings to the configured writer. -func (w *WarningPrinter) Print(message string) { - if w.opts.Color { - fmt.Fprintf(w.out, "%sWarning:%s %s\n", yellowColor, resetColor, message) - } else { - fmt.Fprintf(w.out, "Warning: %s\n", message) - } -} diff --git a/src/vendor/k8s.io/cli-runtime/pkg/printers/yaml.go b/src/vendor/k8s.io/cli-runtime/pkg/printers/yaml.go deleted file mode 100644 index 9c444bdc2..000000000 --- a/src/vendor/k8s.io/cli-runtime/pkg/printers/yaml.go +++ /dev/null @@ -1,85 +0,0 @@ -/* -Copyright 2021 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package printers - -import ( - "fmt" - "io" - "reflect" - "sync/atomic" - - "sigs.k8s.io/yaml" - - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" -) - -// YAMLPrinter is an implementation of ResourcePrinter which outputs an object as YAML. -// The input object is assumed to be in the internal version of an API and is converted -// to the given version first. -// If PrintObj() is called multiple times, objects are separated with a '---' separator. -type YAMLPrinter struct { - printCount int64 -} - -// PrintObj prints the data as YAML. -func (p *YAMLPrinter) PrintObj(obj runtime.Object, w io.Writer) error { - // we use reflect.Indirect here in order to obtain the actual value from a pointer. - // we need an actual value in order to retrieve the package path for an object. - // using reflect.Indirect indiscriminately is valid here, as all runtime.Objects are supposed to be pointers. - if InternalObjectPreventer.IsForbidden(reflect.Indirect(reflect.ValueOf(obj)).Type().PkgPath()) { - return fmt.Errorf(InternalObjectPrinterErr) - } - - count := atomic.AddInt64(&p.printCount, 1) - if count > 1 { - if _, err := w.Write([]byte("---\n")); err != nil { - return err - } - } - - switch obj := obj.(type) { - case *metav1.WatchEvent: - if InternalObjectPreventer.IsForbidden(reflect.Indirect(reflect.ValueOf(obj.Object.Object)).Type().PkgPath()) { - return fmt.Errorf(InternalObjectPrinterErr) - } - data, err := yaml.Marshal(obj) - if err != nil { - return err - } - _, err = w.Write(data) - return err - case *runtime.Unknown: - data, err := yaml.JSONToYAML(obj.Raw) - if err != nil { - return err - } - _, err = w.Write(data) - return err - } - - if obj.GetObjectKind().GroupVersionKind().Empty() { - return fmt.Errorf("missing apiVersion or kind; try GetObjectKind().SetGroupVersionKind() if you know the type") - } - - output, err := yaml.Marshal(obj) - if err != nil { - return err - } - _, err = fmt.Fprint(w, string(output)) - return err -} diff --git a/src/vendor/k8s.io/cli-runtime/pkg/resource/builder.go b/src/vendor/k8s.io/cli-runtime/pkg/resource/builder.go deleted file mode 100644 index fac3d0239..000000000 --- a/src/vendor/k8s.io/cli-runtime/pkg/resource/builder.go +++ /dev/null @@ -1,1247 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package resource - -import ( - "errors" - "fmt" - "io" - "net/url" - "os" - "path/filepath" - "strings" - "sync" - - "k8s.io/apimachinery/pkg/api/meta" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/unstructuredscheme" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/apimachinery/pkg/runtime/serializer" - utilerrors "k8s.io/apimachinery/pkg/util/errors" - "k8s.io/apimachinery/pkg/util/sets" - "k8s.io/client-go/discovery" - "k8s.io/client-go/rest" - "k8s.io/client-go/restmapper" - "sigs.k8s.io/kustomize/kyaml/filesys" -) - -var FileExtensions = []string{".json", ".yaml", ".yml"} -var InputExtensions = append(FileExtensions, "stdin") - -const defaultHttpGetAttempts = 3 -const pathNotExistError = "the path %q does not exist" - -// Builder provides convenience functions for taking arguments and parameters -// from the command line and converting them to a list of resources to iterate -// over using the Visitor interface. -type Builder struct { - categoryExpanderFn CategoryExpanderFunc - - // mapper is set explicitly by resource builders - mapper *mapper - - // clientConfigFn is a function to produce a client, *if* you need one - clientConfigFn ClientConfigFunc - - restMapperFn RESTMapperFunc - - // objectTyper is statically determinant per-command invocation based on your internal or unstructured choice - // it does not ever need to rely upon discovery. - objectTyper runtime.ObjectTyper - - // codecFactory describes which codecs you want to use - negotiatedSerializer runtime.NegotiatedSerializer - - // local indicates that we cannot make server calls - local bool - - errs []error - - paths []Visitor - stream bool - stdinInUse bool - dir bool - - labelSelector *string - fieldSelector *string - selectAll bool - limitChunks int64 - requestTransforms []RequestTransform - - resources []string - subresource string - - namespace string - allNamespace bool - names []string - - resourceTuples []resourceTuple - - defaultNamespace bool - requireNamespace bool - - flatten bool - latest bool - - requireObject bool - - singleResourceType bool - continueOnError bool - - singleItemImplied bool - - schema ContentValidator - - // fakeClientFn is used for testing - fakeClientFn FakeClientFunc -} - -var missingResourceError = fmt.Errorf(`You must provide one or more resources by argument or filename. -Example resource specifications include: - '-f rsrc.yaml' - '--filename=rsrc.json' - ' ' - ''`) - -var LocalResourceError = errors.New(`error: you must specify resources by --filename when --local is set. -Example resource specifications include: - '-f rsrc.yaml' - '--filename=rsrc.json'`) - -var StdinMultiUseError = errors.New("standard input cannot be used for multiple arguments") - -// TODO: expand this to include other errors. -func IsUsageError(err error) bool { - if err == nil { - return false - } - return err == missingResourceError -} - -type FilenameOptions struct { - Filenames []string - Kustomize string - Recursive bool -} - -func (o *FilenameOptions) validate() []error { - var errs []error - if len(o.Filenames) > 0 && len(o.Kustomize) > 0 { - errs = append(errs, fmt.Errorf("only one of -f or -k can be specified")) - } - if len(o.Kustomize) > 0 && o.Recursive { - errs = append(errs, fmt.Errorf("the -k flag can't be used with -f or -R")) - } - return errs -} - -func (o *FilenameOptions) RequireFilenameOrKustomize() error { - if len(o.Filenames) == 0 && len(o.Kustomize) == 0 { - return fmt.Errorf("must specify one of -f and -k") - } - return nil -} - -type resourceTuple struct { - Resource string - Name string -} - -type FakeClientFunc func(version schema.GroupVersion) (RESTClient, error) - -func NewFakeBuilder(fakeClientFn FakeClientFunc, restMapper RESTMapperFunc, categoryExpander CategoryExpanderFunc) *Builder { - ret := newBuilder(nil, restMapper, categoryExpander) - ret.fakeClientFn = fakeClientFn - return ret -} - -// NewBuilder creates a builder that operates on generic objects. At least one of -// internal or unstructured must be specified. -// TODO: Add versioned client (although versioned is still lossy) -// TODO remove internal and unstructured mapper and instead have them set the negotiated serializer for use in the client -func newBuilder(clientConfigFn ClientConfigFunc, restMapper RESTMapperFunc, categoryExpander CategoryExpanderFunc) *Builder { - return &Builder{ - clientConfigFn: clientConfigFn, - restMapperFn: restMapper, - categoryExpanderFn: categoryExpander, - requireObject: true, - } -} - -// noopClientGetter implements RESTClientGetter returning only errors. -// used as a dummy getter in a local-only builder. -type noopClientGetter struct{} - -func (noopClientGetter) ToRESTConfig() (*rest.Config, error) { - return nil, fmt.Errorf("local operation only") -} -func (noopClientGetter) ToDiscoveryClient() (discovery.CachedDiscoveryInterface, error) { - return nil, fmt.Errorf("local operation only") -} -func (noopClientGetter) ToRESTMapper() (meta.RESTMapper, error) { - return nil, fmt.Errorf("local operation only") -} - -// NewLocalBuilder returns a builder that is configured not to create REST clients and avoids asking the server for results. -func NewLocalBuilder() *Builder { - return NewBuilder(noopClientGetter{}).Local() -} - -func NewBuilder(restClientGetter RESTClientGetter) *Builder { - categoryExpanderFn := func() (restmapper.CategoryExpander, error) { - discoveryClient, err := restClientGetter.ToDiscoveryClient() - if err != nil { - return nil, err - } - return restmapper.NewDiscoveryCategoryExpander(discoveryClient), err - } - - return newBuilder( - restClientGetter.ToRESTConfig, - restClientGetter.ToRESTMapper, - (&cachingCategoryExpanderFunc{delegate: categoryExpanderFn}).ToCategoryExpander, - ) -} - -func (b *Builder) Schema(schema ContentValidator) *Builder { - b.schema = schema - return b -} - -func (b *Builder) AddError(err error) *Builder { - if err == nil { - return b - } - b.errs = append(b.errs, err) - return b -} - -// FilenameParam groups input in two categories: URLs and files (files, directories, STDIN) -// If enforceNamespace is false, namespaces in the specs will be allowed to -// override the default namespace. If it is true, namespaces that don't match -// will cause an error. -// If ContinueOnError() is set prior to this method, objects on the path that are not -// recognized will be ignored (but logged at V(2)). -func (b *Builder) FilenameParam(enforceNamespace bool, filenameOptions *FilenameOptions) *Builder { - if errs := filenameOptions.validate(); len(errs) > 0 { - b.errs = append(b.errs, errs...) - return b - } - recursive := filenameOptions.Recursive - paths := filenameOptions.Filenames - for _, s := range paths { - switch { - case s == "-": - b.Stdin() - case strings.Index(s, "http://") == 0 || strings.Index(s, "https://") == 0: - url, err := url.Parse(s) - if err != nil { - b.errs = append(b.errs, fmt.Errorf("the URL passed to filename %q is not valid: %v", s, err)) - continue - } - b.URL(defaultHttpGetAttempts, url) - default: - matches, err := expandIfFilePattern(s) - if err != nil { - b.errs = append(b.errs, err) - continue - } - if !recursive && len(matches) == 1 { - b.singleItemImplied = true - } - b.Path(recursive, matches...) - } - } - if filenameOptions.Kustomize != "" { - b.paths = append( - b.paths, - &KustomizeVisitor{ - mapper: b.mapper, - dirPath: filenameOptions.Kustomize, - schema: b.schema, - fSys: filesys.MakeFsOnDisk(), - }) - } - - if enforceNamespace { - b.RequireNamespace() - } - - return b -} - -// Unstructured updates the builder so that it will request and send unstructured -// objects. Unstructured objects preserve all fields sent by the server in a map format -// based on the object's JSON structure which means no data is lost when the client -// reads and then writes an object. Use this mode in preference to Internal unless you -// are working with Go types directly. -func (b *Builder) Unstructured() *Builder { - if b.mapper != nil { - b.errs = append(b.errs, fmt.Errorf("another mapper was already selected, cannot use unstructured types")) - return b - } - b.objectTyper = unstructuredscheme.NewUnstructuredObjectTyper() - b.mapper = &mapper{ - localFn: b.isLocal, - restMapperFn: b.restMapperFn, - clientFn: b.getClient, - decoder: &metadataValidatingDecoder{unstructured.UnstructuredJSONScheme}, - } - - return b -} - -// WithScheme uses the scheme to manage typing, conversion (optional), and decoding. If decodingVersions -// is empty, then you can end up with internal types. You have been warned. -func (b *Builder) WithScheme(scheme *runtime.Scheme, decodingVersions ...schema.GroupVersion) *Builder { - if b.mapper != nil { - b.errs = append(b.errs, fmt.Errorf("another mapper was already selected, cannot use internal types")) - return b - } - b.objectTyper = scheme - codecFactory := serializer.NewCodecFactory(scheme) - negotiatedSerializer := runtime.NegotiatedSerializer(codecFactory) - // if you specified versions, you're specifying a desire for external types, which you don't want to round-trip through - // internal types - if len(decodingVersions) > 0 { - negotiatedSerializer = codecFactory.WithoutConversion() - } - b.negotiatedSerializer = negotiatedSerializer - - b.mapper = &mapper{ - localFn: b.isLocal, - restMapperFn: b.restMapperFn, - clientFn: b.getClient, - decoder: codecFactory.UniversalDecoder(decodingVersions...), - } - - return b -} - -// LocalParam calls Local() if local is true. -func (b *Builder) LocalParam(local bool) *Builder { - if local { - b.Local() - } - return b -} - -// Local will avoid asking the server for results. -func (b *Builder) Local() *Builder { - b.local = true - return b -} - -func (b *Builder) isLocal() bool { - return b.local -} - -// Mapper returns a copy of the current mapper. -func (b *Builder) Mapper() *mapper { - mapper := *b.mapper - return &mapper -} - -// URL accepts a number of URLs directly. -func (b *Builder) URL(httpAttemptCount int, urls ...*url.URL) *Builder { - for _, u := range urls { - b.paths = append(b.paths, &URLVisitor{ - URL: u, - StreamVisitor: NewStreamVisitor(nil, b.mapper, u.String(), b.schema), - HttpAttemptCount: httpAttemptCount, - }) - } - return b -} - -// Stdin will read objects from the standard input. If ContinueOnError() is set -// prior to this method being called, objects in the stream that are unrecognized -// will be ignored (but logged at V(2)). If StdinInUse() is set prior to this method -// being called, an error will be recorded as there are multiple entities trying to use -// the single standard input stream. -func (b *Builder) Stdin() *Builder { - b.stream = true - if b.stdinInUse { - b.errs = append(b.errs, StdinMultiUseError) - } - b.stdinInUse = true - b.paths = append(b.paths, FileVisitorForSTDIN(b.mapper, b.schema)) - return b -} - -// StdinInUse will mark standard input as in use by this Builder, and therefore standard -// input should not be used by another entity. If Stdin() is set prior to this method -// being called, an error will be recorded as there are multiple entities trying to use -// the single standard input stream. -func (b *Builder) StdinInUse() *Builder { - if b.stdinInUse { - b.errs = append(b.errs, StdinMultiUseError) - } - b.stdinInUse = true - return b -} - -// Stream will read objects from the provided reader, and if an error occurs will -// include the name string in the error message. If ContinueOnError() is set -// prior to this method being called, objects in the stream that are unrecognized -// will be ignored (but logged at V(2)). -func (b *Builder) Stream(r io.Reader, name string) *Builder { - b.stream = true - b.paths = append(b.paths, NewStreamVisitor(r, b.mapper, name, b.schema)) - return b -} - -// Path accepts a set of paths that may be files, directories (all can containing -// one or more resources). Creates a FileVisitor for each file and then each -// FileVisitor is streaming the content to a StreamVisitor. If ContinueOnError() is set -// prior to this method being called, objects on the path that are unrecognized will be -// ignored (but logged at V(2)). -func (b *Builder) Path(recursive bool, paths ...string) *Builder { - for _, p := range paths { - _, err := os.Stat(p) - if os.IsNotExist(err) { - b.errs = append(b.errs, fmt.Errorf(pathNotExistError, p)) - continue - } - if err != nil { - b.errs = append(b.errs, fmt.Errorf("the path %q cannot be accessed: %v", p, err)) - continue - } - - visitors, err := ExpandPathsToFileVisitors(b.mapper, p, recursive, FileExtensions, b.schema) - if err != nil { - b.errs = append(b.errs, fmt.Errorf("error reading %q: %v", p, err)) - } - if len(visitors) > 1 { - b.dir = true - } - - b.paths = append(b.paths, visitors...) - } - if len(b.paths) == 0 && len(b.errs) == 0 { - b.errs = append(b.errs, fmt.Errorf("error reading %v: recognized file extensions are %v", paths, FileExtensions)) - } - return b -} - -// ResourceTypes is a list of types of resources to operate on, when listing objects on -// the server or retrieving objects that match a selector. -func (b *Builder) ResourceTypes(types ...string) *Builder { - b.resources = append(b.resources, types...) - return b -} - -// ResourceNames accepts a default type and one or more names, and creates tuples of -// resources -func (b *Builder) ResourceNames(resource string, names ...string) *Builder { - for _, name := range names { - // See if this input string is of type/name format - tuple, ok, err := splitResourceTypeName(name) - if err != nil { - b.errs = append(b.errs, err) - return b - } - - if ok { - b.resourceTuples = append(b.resourceTuples, tuple) - continue - } - if len(resource) == 0 { - b.errs = append(b.errs, fmt.Errorf("the argument %q must be RESOURCE/NAME", name)) - continue - } - - // Use the given default type to create a resource tuple - b.resourceTuples = append(b.resourceTuples, resourceTuple{Resource: resource, Name: name}) - } - return b -} - -// LabelSelectorParam defines a selector that should be applied to the object types to load. -// This will not affect files loaded from disk or URL. If the parameter is empty it is -// a no-op - to select all resources invoke `b.LabelSelector(labels.Everything.String)`. -func (b *Builder) LabelSelectorParam(s string) *Builder { - selector := strings.TrimSpace(s) - if len(selector) == 0 { - return b - } - if b.selectAll { - b.errs = append(b.errs, fmt.Errorf("found non-empty label selector %q with previously set 'all' parameter. ", s)) - return b - } - return b.LabelSelector(selector) -} - -// LabelSelector accepts a selector directly and will filter the resulting list by that object. -// Use LabelSelectorParam instead for user input. -func (b *Builder) LabelSelector(selector string) *Builder { - if len(selector) == 0 { - return b - } - - b.labelSelector = &selector - return b -} - -// FieldSelectorParam defines a selector that should be applied to the object types to load. -// This will not affect files loaded from disk or URL. If the parameter is empty it is -// a no-op - to select all resources. -func (b *Builder) FieldSelectorParam(s string) *Builder { - s = strings.TrimSpace(s) - if len(s) == 0 { - return b - } - if b.selectAll { - b.errs = append(b.errs, fmt.Errorf("found non-empty field selector %q with previously set 'all' parameter. ", s)) - return b - } - b.fieldSelector = &s - return b -} - -// NamespaceParam accepts the namespace that these resources should be -// considered under from - used by DefaultNamespace() and RequireNamespace() -func (b *Builder) NamespaceParam(namespace string) *Builder { - b.namespace = namespace - return b -} - -// DefaultNamespace instructs the builder to set the namespace value for any object found -// to NamespaceParam() if empty. -func (b *Builder) DefaultNamespace() *Builder { - b.defaultNamespace = true - return b -} - -// AllNamespaces instructs the builder to metav1.NamespaceAll as a namespace to request resources -// across all of the namespace. This overrides the namespace set by NamespaceParam(). -func (b *Builder) AllNamespaces(allNamespace bool) *Builder { - if allNamespace { - b.namespace = metav1.NamespaceAll - } - b.allNamespace = allNamespace - return b -} - -// RequireNamespace instructs the builder to set the namespace value for any object found -// to NamespaceParam() if empty, and if the value on the resource does not match -// NamespaceParam() an error will be returned. -func (b *Builder) RequireNamespace() *Builder { - b.requireNamespace = true - return b -} - -// RequestChunksOf attempts to load responses from the server in batches of size limit -// to avoid long delays loading and transferring very large lists. If unset defaults to -// no chunking. -func (b *Builder) RequestChunksOf(chunkSize int64) *Builder { - b.limitChunks = chunkSize - return b -} - -// TransformRequests alters API calls made by clients requested from this builder. Pass -// an empty list to clear modifiers. -func (b *Builder) TransformRequests(opts ...RequestTransform) *Builder { - b.requestTransforms = opts - return b -} - -// Subresource instructs the builder to retrieve the object at the -// subresource path instead of the main resource path. -func (b *Builder) Subresource(subresource string) *Builder { - b.subresource = subresource - return b -} - -// SelectEverythingParam -func (b *Builder) SelectAllParam(selectAll bool) *Builder { - if selectAll && (b.labelSelector != nil || b.fieldSelector != nil) { - b.errs = append(b.errs, fmt.Errorf("setting 'all' parameter but found a non empty selector. ")) - return b - } - b.selectAll = selectAll - return b -} - -// ResourceTypeOrNameArgs indicates that the builder should accept arguments -// of the form `([,,...]| [,,...])`. When one argument is -// received, the types provided will be retrieved from the server (and be comma delimited). -// When two or more arguments are received, they must be a single type and resource name(s). -// The allowEmptySelector permits to select all the resources (via Everything func). -func (b *Builder) ResourceTypeOrNameArgs(allowEmptySelector bool, args ...string) *Builder { - args = normalizeMultipleResourcesArgs(args) - if ok, err := hasCombinedTypeArgs(args); ok { - if err != nil { - b.errs = append(b.errs, err) - return b - } - for _, s := range args { - tuple, ok, err := splitResourceTypeName(s) - if err != nil { - b.errs = append(b.errs, err) - return b - } - if ok { - b.resourceTuples = append(b.resourceTuples, tuple) - } - } - return b - } - if len(args) > 0 { - // Try replacing aliases only in types - args[0] = b.ReplaceAliases(args[0]) - } - switch { - case len(args) > 2: - b.names = append(b.names, args[1:]...) - b.ResourceTypes(SplitResourceArgument(args[0])...) - case len(args) == 2: - b.names = append(b.names, args[1]) - b.ResourceTypes(SplitResourceArgument(args[0])...) - case len(args) == 1: - b.ResourceTypes(SplitResourceArgument(args[0])...) - if b.labelSelector == nil && allowEmptySelector { - selector := labels.Everything().String() - b.labelSelector = &selector - } - case len(args) == 0: - default: - b.errs = append(b.errs, fmt.Errorf("arguments must consist of a resource or a resource and name")) - } - return b -} - -// ReplaceAliases accepts an argument and tries to expand any existing -// aliases found in it -func (b *Builder) ReplaceAliases(input string) string { - replaced := []string{} - for _, arg := range strings.Split(input, ",") { - if b.categoryExpanderFn == nil { - continue - } - categoryExpander, err := b.categoryExpanderFn() - if err != nil { - b.AddError(err) - continue - } - - if resources, ok := categoryExpander.Expand(arg); ok { - asStrings := []string{} - for _, resource := range resources { - if len(resource.Group) == 0 { - asStrings = append(asStrings, resource.Resource) - continue - } - asStrings = append(asStrings, resource.Resource+"."+resource.Group) - } - arg = strings.Join(asStrings, ",") - } - replaced = append(replaced, arg) - } - return strings.Join(replaced, ",") -} - -func hasCombinedTypeArgs(args []string) (bool, error) { - hasSlash := 0 - for _, s := range args { - if strings.Contains(s, "/") { - hasSlash++ - } - } - switch { - case hasSlash > 0 && hasSlash == len(args): - return true, nil - case hasSlash > 0 && hasSlash != len(args): - baseCmd := "cmd" - if len(os.Args) > 0 { - baseCmdSlice := strings.Split(os.Args[0], "/") - baseCmd = baseCmdSlice[len(baseCmdSlice)-1] - } - return true, fmt.Errorf("there is no need to specify a resource type as a separate argument when passing arguments in resource/name form (e.g. '%s get resource/' instead of '%s get resource resource/'", baseCmd, baseCmd) - default: - return false, nil - } -} - -// Normalize args convert multiple resources to resource tuples, a,b,c d -// as a transform to a/d b/d c/d -func normalizeMultipleResourcesArgs(args []string) []string { - if len(args) >= 2 { - resources := []string{} - resources = append(resources, SplitResourceArgument(args[0])...) - if len(resources) > 1 { - names := []string{} - names = append(names, args[1:]...) - newArgs := []string{} - for _, resource := range resources { - for _, name := range names { - newArgs = append(newArgs, strings.Join([]string{resource, name}, "/")) - } - } - return newArgs - } - } - return args -} - -// splitResourceTypeName handles type/name resource formats and returns a resource tuple -// (empty or not), whether it successfully found one, and an error -func splitResourceTypeName(s string) (resourceTuple, bool, error) { - if !strings.Contains(s, "/") { - return resourceTuple{}, false, nil - } - seg := strings.Split(s, "/") - if len(seg) != 2 { - return resourceTuple{}, false, fmt.Errorf("arguments in resource/name form may not have more than one slash") - } - resource, name := seg[0], seg[1] - if len(resource) == 0 || len(name) == 0 || len(SplitResourceArgument(resource)) != 1 { - return resourceTuple{}, false, fmt.Errorf("arguments in resource/name form must have a single resource and name") - } - return resourceTuple{Resource: resource, Name: name}, true, nil -} - -// Flatten will convert any objects with a field named "Items" that is an array of runtime.Object -// compatible types into individual entries and give them their own items. The original object -// is not passed to any visitors. -func (b *Builder) Flatten() *Builder { - b.flatten = true - return b -} - -// Latest will fetch the latest copy of any objects loaded from URLs or files from the server. -func (b *Builder) Latest() *Builder { - b.latest = true - return b -} - -// RequireObject ensures that resulting infos have an object set. If false, resulting info may not have an object set. -func (b *Builder) RequireObject(require bool) *Builder { - b.requireObject = require - return b -} - -// ContinueOnError will attempt to load and visit as many objects as possible, even if some visits -// return errors or some objects cannot be loaded. The default behavior is to terminate after -// the first error is returned from a VisitorFunc. -func (b *Builder) ContinueOnError() *Builder { - b.continueOnError = true - return b -} - -// SingleResourceType will cause the builder to error if the user specifies more than a single type -// of resource. -func (b *Builder) SingleResourceType() *Builder { - b.singleResourceType = true - return b -} - -// mappingFor returns the RESTMapping for the Kind given, or the Kind referenced by the resource. -// Prefers a fully specified GroupVersionResource match. If one is not found, we match on a fully -// specified GroupVersionKind, or fallback to a match on GroupKind. -func (b *Builder) mappingFor(resourceOrKindArg string) (*meta.RESTMapping, error) { - fullySpecifiedGVR, groupResource := schema.ParseResourceArg(resourceOrKindArg) - gvk := schema.GroupVersionKind{} - restMapper, err := b.restMapperFn() - if err != nil { - return nil, err - } - - if fullySpecifiedGVR != nil { - gvk, _ = restMapper.KindFor(*fullySpecifiedGVR) - } - if gvk.Empty() { - gvk, _ = restMapper.KindFor(groupResource.WithVersion("")) - } - if !gvk.Empty() { - return restMapper.RESTMapping(gvk.GroupKind(), gvk.Version) - } - - fullySpecifiedGVK, groupKind := schema.ParseKindArg(resourceOrKindArg) - if fullySpecifiedGVK == nil { - gvk := groupKind.WithVersion("") - fullySpecifiedGVK = &gvk - } - - if !fullySpecifiedGVK.Empty() { - if mapping, err := restMapper.RESTMapping(fullySpecifiedGVK.GroupKind(), fullySpecifiedGVK.Version); err == nil { - return mapping, nil - } - } - - mapping, err := restMapper.RESTMapping(groupKind, gvk.Version) - if err != nil { - // if we error out here, it is because we could not match a resource or a kind - // for the given argument. To maintain consistency with previous behavior, - // announce that a resource type could not be found. - // if the error is _not_ a *meta.NoKindMatchError, then we had trouble doing discovery, - // so we should return the original error since it may help a user diagnose what is actually wrong - if meta.IsNoMatchError(err) { - return nil, fmt.Errorf("the server doesn't have a resource type %q", groupResource.Resource) - } - return nil, err - } - - return mapping, nil -} - -func (b *Builder) resourceMappings() ([]*meta.RESTMapping, error) { - if len(b.resources) > 1 && b.singleResourceType { - return nil, fmt.Errorf("you may only specify a single resource type") - } - mappings := []*meta.RESTMapping{} - seen := map[schema.GroupVersionKind]bool{} - for _, r := range b.resources { - mapping, err := b.mappingFor(r) - if err != nil { - return nil, err - } - // This ensures the mappings for resources(shortcuts, plural) unique - if seen[mapping.GroupVersionKind] { - continue - } - seen[mapping.GroupVersionKind] = true - - mappings = append(mappings, mapping) - } - return mappings, nil -} - -func (b *Builder) resourceTupleMappings() (map[string]*meta.RESTMapping, error) { - mappings := make(map[string]*meta.RESTMapping) - canonical := make(map[schema.GroupVersionResource]struct{}) - for _, r := range b.resourceTuples { - if _, ok := mappings[r.Resource]; ok { - continue - } - mapping, err := b.mappingFor(r.Resource) - if err != nil { - return nil, err - } - - mappings[r.Resource] = mapping - canonical[mapping.Resource] = struct{}{} - } - if len(canonical) > 1 && b.singleResourceType { - return nil, fmt.Errorf("you may only specify a single resource type") - } - return mappings, nil -} - -func (b *Builder) visitorResult() *Result { - if len(b.errs) > 0 { - return &Result{err: utilerrors.NewAggregate(b.errs)} - } - - if b.selectAll { - selector := labels.Everything().String() - b.labelSelector = &selector - } - - // visit items specified by paths - if len(b.paths) != 0 { - return b.visitByPaths() - } - - // visit selectors - if b.labelSelector != nil || b.fieldSelector != nil { - return b.visitBySelector() - } - - // visit items specified by resource and name - if len(b.resourceTuples) != 0 { - return b.visitByResource() - } - - // visit items specified by name - if len(b.names) != 0 { - return b.visitByName() - } - - if len(b.resources) != 0 { - for _, r := range b.resources { - _, err := b.mappingFor(r) - if err != nil { - return &Result{err: err} - } - } - return &Result{err: fmt.Errorf("resource(s) were provided, but no name was specified")} - } - return &Result{err: missingResourceError} -} - -func (b *Builder) visitBySelector() *Result { - result := &Result{ - targetsSingleItems: false, - } - - if len(b.names) != 0 { - return result.withError(fmt.Errorf("name cannot be provided when a selector is specified")) - } - if len(b.resourceTuples) != 0 { - return result.withError(fmt.Errorf("selectors and the all flag cannot be used when passing resource/name arguments")) - } - if len(b.resources) == 0 { - return result.withError(fmt.Errorf("at least one resource must be specified to use a selector")) - } - if len(b.subresource) != 0 { - return result.withError(fmt.Errorf("subresource cannot be used when bulk resources are specified")) - } - - mappings, err := b.resourceMappings() - if err != nil { - result.err = err - return result - } - - var labelSelector, fieldSelector string - if b.labelSelector != nil { - labelSelector = *b.labelSelector - } - if b.fieldSelector != nil { - fieldSelector = *b.fieldSelector - } - - visitors := []Visitor{} - for _, mapping := range mappings { - client, err := b.getClient(mapping.GroupVersionKind.GroupVersion()) - if err != nil { - result.err = err - return result - } - selectorNamespace := b.namespace - if mapping.Scope.Name() != meta.RESTScopeNameNamespace { - selectorNamespace = "" - } - visitors = append(visitors, NewSelector(client, mapping, selectorNamespace, labelSelector, fieldSelector, b.limitChunks)) - } - if b.continueOnError { - result.visitor = EagerVisitorList(visitors) - } else { - result.visitor = VisitorList(visitors) - } - result.sources = visitors - return result -} - -func (b *Builder) getClient(gv schema.GroupVersion) (RESTClient, error) { - var ( - client RESTClient - err error - ) - - switch { - case b.fakeClientFn != nil: - client, err = b.fakeClientFn(gv) - case b.negotiatedSerializer != nil: - client, err = b.clientConfigFn.withStdinUnavailable(b.stdinInUse).clientForGroupVersion(gv, b.negotiatedSerializer) - default: - client, err = b.clientConfigFn.withStdinUnavailable(b.stdinInUse).unstructuredClientForGroupVersion(gv) - } - - if err != nil { - return nil, err - } - - return NewClientWithOptions(client, b.requestTransforms...), nil -} - -func (b *Builder) visitByResource() *Result { - // if b.singleItemImplied is false, this could be by default, so double-check length - // of resourceTuples to determine if in fact it is singleItemImplied or not - isSingleItemImplied := b.singleItemImplied - if !isSingleItemImplied { - isSingleItemImplied = len(b.resourceTuples) == 1 - } - - result := &Result{ - singleItemImplied: isSingleItemImplied, - targetsSingleItems: true, - } - - if len(b.resources) != 0 { - return result.withError(fmt.Errorf("you may not specify individual resources and bulk resources in the same call")) - } - - // retrieve one client for each resource - mappings, err := b.resourceTupleMappings() - if err != nil { - result.err = err - return result - } - clients := make(map[string]RESTClient) - for _, mapping := range mappings { - s := fmt.Sprintf("%s/%s", mapping.GroupVersionKind.GroupVersion().String(), mapping.Resource.Resource) - if _, ok := clients[s]; ok { - continue - } - client, err := b.getClient(mapping.GroupVersionKind.GroupVersion()) - if err != nil { - result.err = err - return result - } - clients[s] = client - } - - items := []Visitor{} - for _, tuple := range b.resourceTuples { - mapping, ok := mappings[tuple.Resource] - if !ok { - return result.withError(fmt.Errorf("resource %q is not recognized: %v", tuple.Resource, mappings)) - } - s := fmt.Sprintf("%s/%s", mapping.GroupVersionKind.GroupVersion().String(), mapping.Resource.Resource) - client, ok := clients[s] - if !ok { - return result.withError(fmt.Errorf("could not find a client for resource %q", tuple.Resource)) - } - - selectorNamespace := b.namespace - if mapping.Scope.Name() != meta.RESTScopeNameNamespace { - selectorNamespace = "" - } else { - if len(b.namespace) == 0 { - errMsg := "namespace may not be empty when retrieving a resource by name" - if b.allNamespace { - errMsg = "a resource cannot be retrieved by name across all namespaces" - } - return result.withError(fmt.Errorf(errMsg)) - } - } - - info := &Info{ - Client: client, - Mapping: mapping, - Namespace: selectorNamespace, - Name: tuple.Name, - Subresource: b.subresource, - } - items = append(items, info) - } - - var visitors Visitor - if b.continueOnError { - visitors = EagerVisitorList(items) - } else { - visitors = VisitorList(items) - } - result.visitor = visitors - result.sources = items - return result -} - -func (b *Builder) visitByName() *Result { - result := &Result{ - singleItemImplied: len(b.names) == 1, - targetsSingleItems: true, - } - - if len(b.paths) != 0 { - return result.withError(fmt.Errorf("when paths, URLs, or stdin is provided as input, you may not specify a resource by arguments as well")) - } - if len(b.resources) == 0 { - return result.withError(fmt.Errorf("you must provide a resource and a resource name together")) - } - if len(b.resources) > 1 { - return result.withError(fmt.Errorf("you must specify only one resource")) - } - - mappings, err := b.resourceMappings() - if err != nil { - result.err = err - return result - } - mapping := mappings[0] - - client, err := b.getClient(mapping.GroupVersionKind.GroupVersion()) - if err != nil { - result.err = err - return result - } - - selectorNamespace := b.namespace - if mapping.Scope.Name() != meta.RESTScopeNameNamespace { - selectorNamespace = "" - } else { - if len(b.namespace) == 0 { - errMsg := "namespace may not be empty when retrieving a resource by name" - if b.allNamespace { - errMsg = "a resource cannot be retrieved by name across all namespaces" - } - return result.withError(fmt.Errorf(errMsg)) - } - } - - visitors := []Visitor{} - for _, name := range b.names { - info := &Info{ - Client: client, - Mapping: mapping, - Namespace: selectorNamespace, - Name: name, - Subresource: b.subresource, - } - visitors = append(visitors, info) - } - result.visitor = VisitorList(visitors) - result.sources = visitors - return result -} - -func (b *Builder) visitByPaths() *Result { - result := &Result{ - singleItemImplied: !b.dir && !b.stream && len(b.paths) == 1, - targetsSingleItems: true, - } - - if len(b.resources) != 0 { - return result.withError(fmt.Errorf("when paths, URLs, or stdin is provided as input, you may not specify resource arguments as well")) - } - if len(b.names) != 0 { - return result.withError(fmt.Errorf("name cannot be provided when a path is specified")) - } - if len(b.resourceTuples) != 0 { - return result.withError(fmt.Errorf("resource/name arguments cannot be provided when a path is specified")) - } - - var visitors Visitor - if b.continueOnError { - visitors = EagerVisitorList(b.paths) - } else { - visitors = VisitorList(b.paths) - } - - if b.flatten { - visitors = NewFlattenListVisitor(visitors, b.objectTyper, b.mapper) - } - - // only items from disk can be refetched - if b.latest { - // must set namespace prior to fetching - if b.defaultNamespace { - visitors = NewDecoratedVisitor(visitors, SetNamespace(b.namespace)) - } - visitors = NewDecoratedVisitor(visitors, RetrieveLatest) - } - if b.labelSelector != nil { - selector, err := labels.Parse(*b.labelSelector) - if err != nil { - return result.withError(fmt.Errorf("the provided selector %q is not valid: %v", *b.labelSelector, err)) - } - visitors = NewFilteredVisitor(visitors, FilterByLabelSelector(selector)) - } - result.visitor = visitors - result.sources = b.paths - return result -} - -// Do returns a Result object with a Visitor for the resources identified by the Builder. -// The visitor will respect the error behavior specified by ContinueOnError. Note that stream -// inputs are consumed by the first execution - use Infos() or Object() on the Result to capture a list -// for further iteration. -func (b *Builder) Do() *Result { - r := b.visitorResult() - r.mapper = b.Mapper() - if r.err != nil { - return r - } - if b.flatten { - r.visitor = NewFlattenListVisitor(r.visitor, b.objectTyper, b.mapper) - } - helpers := []VisitorFunc{} - if b.defaultNamespace { - helpers = append(helpers, SetNamespace(b.namespace)) - } - if b.requireNamespace { - helpers = append(helpers, RequireNamespace(b.namespace)) - } - helpers = append(helpers, FilterNamespace) - if b.requireObject { - helpers = append(helpers, RetrieveLazy) - } - if b.continueOnError { - r.visitor = ContinueOnErrorVisitor{Visitor: r.visitor} - } - r.visitor = NewDecoratedVisitor(r.visitor, helpers...) - return r -} - -// SplitResourceArgument splits the argument with commas and returns unique -// strings in the original order. -func SplitResourceArgument(arg string) []string { - out := []string{} - set := sets.NewString() - for _, s := range strings.Split(arg, ",") { - if set.Has(s) { - continue - } - set.Insert(s) - out = append(out, s) - } - return out -} - -// HasNames returns true if the provided args contain resource names -func HasNames(args []string) (bool, error) { - args = normalizeMultipleResourcesArgs(args) - hasCombinedTypes, err := hasCombinedTypeArgs(args) - if err != nil { - return false, err - } - return hasCombinedTypes || len(args) > 1, nil -} - -// expandIfFilePattern returns all the filenames that match the input pattern -// or the filename if it is a specific filename and not a pattern. -// If the input is a pattern and it yields no result it will result in an error. -func expandIfFilePattern(pattern string) ([]string, error) { - if _, err := os.Stat(pattern); os.IsNotExist(err) { - matches, err := filepath.Glob(pattern) - if err == nil && len(matches) == 0 { - return nil, fmt.Errorf(pathNotExistError, pattern) - } - if err == filepath.ErrBadPattern { - return nil, fmt.Errorf("pattern %q is not valid: %v", pattern, err) - } - return matches, err - } - return []string{pattern}, nil -} - -type cachingCategoryExpanderFunc struct { - delegate CategoryExpanderFunc - - lock sync.Mutex - cached restmapper.CategoryExpander -} - -func (c *cachingCategoryExpanderFunc) ToCategoryExpander() (restmapper.CategoryExpander, error) { - c.lock.Lock() - defer c.lock.Unlock() - if c.cached != nil { - return c.cached, nil - } - - ret, err := c.delegate() - if err != nil { - return nil, err - } - c.cached = ret - return c.cached, nil -} diff --git a/src/vendor/k8s.io/cli-runtime/pkg/resource/client.go b/src/vendor/k8s.io/cli-runtime/pkg/resource/client.go deleted file mode 100644 index cd52c3043..000000000 --- a/src/vendor/k8s.io/cli-runtime/pkg/resource/client.go +++ /dev/null @@ -1,69 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package resource - -import ( - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/client-go/rest" -) - -// TODO require negotiatedSerializer. leaving it optional lets us plumb current behavior and deal with the difference after major plumbing is complete -func (clientConfigFn ClientConfigFunc) clientForGroupVersion(gv schema.GroupVersion, negotiatedSerializer runtime.NegotiatedSerializer) (RESTClient, error) { - cfg, err := clientConfigFn() - if err != nil { - return nil, err - } - if negotiatedSerializer != nil { - cfg.ContentConfig.NegotiatedSerializer = negotiatedSerializer - } - cfg.GroupVersion = &gv - if len(gv.Group) == 0 { - cfg.APIPath = "/api" - } else { - cfg.APIPath = "/apis" - } - - return rest.RESTClientFor(cfg) -} - -func (clientConfigFn ClientConfigFunc) unstructuredClientForGroupVersion(gv schema.GroupVersion) (RESTClient, error) { - cfg, err := clientConfigFn() - if err != nil { - return nil, err - } - cfg.ContentConfig = UnstructuredPlusDefaultContentConfig() - cfg.GroupVersion = &gv - if len(gv.Group) == 0 { - cfg.APIPath = "/api" - } else { - cfg.APIPath = "/apis" - } - - return rest.RESTClientFor(cfg) -} - -func (clientConfigFn ClientConfigFunc) withStdinUnavailable(stdinUnavailable bool) ClientConfigFunc { - return func() (*rest.Config, error) { - cfg, err := clientConfigFn() - if stdinUnavailable && cfg != nil && cfg.ExecProvider != nil { - cfg.ExecProvider.StdinUnavailable = stdinUnavailable - cfg.ExecProvider.StdinUnavailableMessage = "used by stdin resource manifest reader" - } - return cfg, err - } -} diff --git a/src/vendor/k8s.io/cli-runtime/pkg/resource/crd_finder.go b/src/vendor/k8s.io/cli-runtime/pkg/resource/crd_finder.go deleted file mode 100644 index 4694f7791..000000000 --- a/src/vendor/k8s.io/cli-runtime/pkg/resource/crd_finder.go +++ /dev/null @@ -1,110 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package resource - -import ( - "context" - "fmt" - "reflect" - - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/client-go/dynamic" -) - -// CRDGetter is a function that can download the list of GVK for all -// CRDs. -type CRDGetter func() ([]schema.GroupKind, error) - -func CRDFromDynamic(client dynamic.Interface) CRDGetter { - return func() ([]schema.GroupKind, error) { - list, err := client.Resource(schema.GroupVersionResource{ - Group: "apiextensions.k8s.io", - Version: "v1", - Resource: "customresourcedefinitions", - }).List(context.TODO(), metav1.ListOptions{}) - if err != nil { - return nil, fmt.Errorf("failed to list CRDs: %v", err) - } - if list == nil { - return nil, nil - } - - gks := []schema.GroupKind{} - - // We need to parse the list to get the gvk, I guess that's fine. - for _, crd := range (*list).Items { - // Look for group, version, and kind - group, _, _ := unstructured.NestedString(crd.Object, "spec", "group") - kind, _, _ := unstructured.NestedString(crd.Object, "spec", "names", "kind") - - gks = append(gks, schema.GroupKind{ - Group: group, - Kind: kind, - }) - } - - return gks, nil - } -} - -// CRDFinder keeps a cache of known CRDs and finds a given GVK in the -// list. -type CRDFinder interface { - HasCRD(gvk schema.GroupKind) (bool, error) -} - -func NewCRDFinder(getter CRDGetter) CRDFinder { - return &crdFinder{ - getter: getter, - } -} - -type crdFinder struct { - getter CRDGetter - cache *[]schema.GroupKind -} - -func (f *crdFinder) cacheCRDs() error { - if f.cache != nil { - return nil - } - - list, err := f.getter() - if err != nil { - return err - } - f.cache = &list - return nil -} - -func (f *crdFinder) findCRD(gvk schema.GroupKind) bool { - for _, crd := range *f.cache { - if reflect.DeepEqual(gvk, crd) { - return true - } - } - return false -} - -func (f *crdFinder) HasCRD(gvk schema.GroupKind) (bool, error) { - if err := f.cacheCRDs(); err != nil { - return false, err - } - return f.findCRD(gvk), nil -} diff --git a/src/vendor/k8s.io/cli-runtime/pkg/resource/doc.go b/src/vendor/k8s.io/cli-runtime/pkg/resource/doc.go deleted file mode 100644 index f83fdcbf8..000000000 --- a/src/vendor/k8s.io/cli-runtime/pkg/resource/doc.go +++ /dev/null @@ -1,24 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package resource assists clients in dealing with RESTful objects that match the -// Kubernetes API conventions. The Helper object provides simple CRUD operations -// on resources. The Visitor interface makes it easy to deal with multiple resources -// in bulk for retrieval and operation. The Builder object simplifies converting -// standard command line arguments and parameters into a Visitor that can iterate -// over all of the identified resources, whether on the server or on the local -// filesystem. -package resource // import "k8s.io/cli-runtime/pkg/resource" diff --git a/src/vendor/k8s.io/cli-runtime/pkg/resource/fake.go b/src/vendor/k8s.io/cli-runtime/pkg/resource/fake.go deleted file mode 100644 index 276c343e2..000000000 --- a/src/vendor/k8s.io/cli-runtime/pkg/resource/fake.go +++ /dev/null @@ -1,40 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package resource - -import ( - "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/client-go/restmapper" -) - -// FakeCategoryExpander is for testing only -var FakeCategoryExpander restmapper.CategoryExpander = restmapper.SimpleCategoryExpander{ - Expansions: map[string][]schema.GroupResource{ - "all": { - {Group: "", Resource: "pods"}, - {Group: "", Resource: "replicationcontrollers"}, - {Group: "", Resource: "services"}, - {Group: "apps", Resource: "statefulsets"}, - {Group: "autoscaling", Resource: "horizontalpodautoscalers"}, - {Group: "batch", Resource: "jobs"}, - {Group: "batch", Resource: "cronjobs"}, - {Group: "extensions", Resource: "daemonsets"}, - {Group: "extensions", Resource: "deployments"}, - {Group: "extensions", Resource: "replicasets"}, - }, - }, -} diff --git a/src/vendor/k8s.io/cli-runtime/pkg/resource/helper.go b/src/vendor/k8s.io/cli-runtime/pkg/resource/helper.go deleted file mode 100644 index aa400ae0e..000000000 --- a/src/vendor/k8s.io/cli-runtime/pkg/resource/helper.go +++ /dev/null @@ -1,321 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package resource - -import ( - "context" - "fmt" - - apierrors "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/api/meta" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - - "k8s.io/apimachinery/pkg/fields" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/types" - "k8s.io/apimachinery/pkg/watch" -) - -var metadataAccessor = meta.NewAccessor() - -// Helper provides methods for retrieving or mutating a RESTful -// resource. -type Helper struct { - // The name of this resource as the server would recognize it - Resource string - // The name of the subresource as the server would recognize it - Subresource string - // A RESTClient capable of mutating this resource. - RESTClient RESTClient - // True if the resource type is scoped to namespaces - NamespaceScoped bool - // If true, then use server-side dry-run to not persist changes to storage - // for verbs and resources that support server-side dry-run. - // - // Note this should only be used against an apiserver with dry-run enabled, - // and on resources that support dry-run. If the apiserver or the resource - // does not support dry-run, then the change will be persisted to storage. - ServerDryRun bool - - // FieldManager is the name associated with the actor or entity that is making - // changes. - FieldManager string - - // FieldValidation is the directive used to indicate how the server should perform - // field validation (Ignore, Warn, or Strict) - FieldValidation string -} - -// NewHelper creates a Helper from a ResourceMapping -func NewHelper(client RESTClient, mapping *meta.RESTMapping) *Helper { - return &Helper{ - Resource: mapping.Resource.Resource, - RESTClient: client, - NamespaceScoped: mapping.Scope.Name() == meta.RESTScopeNameNamespace, - } -} - -// DryRun, if true, will use server-side dry-run to not persist changes to storage. -// Otherwise, changes will be persisted to storage. -func (m *Helper) DryRun(dryRun bool) *Helper { - m.ServerDryRun = dryRun - return m -} - -// WithFieldManager sets the field manager option to indicate the actor or entity -// that is making changes in a create or update operation. -func (m *Helper) WithFieldManager(fieldManager string) *Helper { - m.FieldManager = fieldManager - return m -} - -// WithFieldValidation sets the field validation option to indicate -// how the server should perform field validation (Ignore, Warn, or Strict). -func (m *Helper) WithFieldValidation(validationDirective string) *Helper { - m.FieldValidation = validationDirective - return m -} - -// Subresource sets the helper to access (/[ns//]/) -func (m *Helper) WithSubresource(subresource string) *Helper { - m.Subresource = subresource - return m -} - -func (m *Helper) Get(namespace, name string) (runtime.Object, error) { - req := m.RESTClient.Get(). - NamespaceIfScoped(namespace, m.NamespaceScoped). - Resource(m.Resource). - Name(name). - SubResource(m.Subresource) - return req.Do(context.TODO()).Get() -} - -func (m *Helper) List(namespace, apiVersion string, options *metav1.ListOptions) (runtime.Object, error) { - req := m.RESTClient.Get(). - NamespaceIfScoped(namespace, m.NamespaceScoped). - Resource(m.Resource). - VersionedParams(options, metav1.ParameterCodec) - return req.Do(context.TODO()).Get() -} - -// FollowContinue handles the continue parameter returned by the API server when using list -// chunking. To take advantage of this, the initial ListOptions provided by the consumer -// should include a non-zero Limit parameter. -func FollowContinue(initialOpts *metav1.ListOptions, - listFunc func(metav1.ListOptions) (runtime.Object, error)) error { - opts := initialOpts - for { - list, err := listFunc(*opts) - if err != nil { - return err - } - nextContinueToken, _ := metadataAccessor.Continue(list) - if len(nextContinueToken) == 0 { - return nil - } - opts.Continue = nextContinueToken - } -} - -// EnhanceListError augments errors typically returned by List operations with additional context, -// making sure to retain the StatusError type when applicable. -func EnhanceListError(err error, opts metav1.ListOptions, subj string) error { - if apierrors.IsResourceExpired(err) { - return err - } - if apierrors.IsBadRequest(err) || apierrors.IsNotFound(err) { - if se, ok := err.(*apierrors.StatusError); ok { - // modify the message without hiding this is an API error - if len(opts.LabelSelector) == 0 && len(opts.FieldSelector) == 0 { - se.ErrStatus.Message = fmt.Sprintf("Unable to list %q: %v", subj, - se.ErrStatus.Message) - } else { - se.ErrStatus.Message = fmt.Sprintf( - "Unable to find %q that match label selector %q, field selector %q: %v", subj, - opts.LabelSelector, - opts.FieldSelector, se.ErrStatus.Message) - } - return se - } - if len(opts.LabelSelector) == 0 && len(opts.FieldSelector) == 0 { - return fmt.Errorf("Unable to list %q: %v", subj, err) - } - return fmt.Errorf("Unable to find %q that match label selector %q, field selector %q: %v", - subj, opts.LabelSelector, opts.FieldSelector, err) - } - return err -} - -func (m *Helper) Watch(namespace, apiVersion string, options *metav1.ListOptions) (watch.Interface, error) { - options.Watch = true - return m.RESTClient.Get(). - NamespaceIfScoped(namespace, m.NamespaceScoped). - Resource(m.Resource). - VersionedParams(options, metav1.ParameterCodec). - Watch(context.TODO()) -} - -func (m *Helper) WatchSingle(namespace, name, resourceVersion string) (watch.Interface, error) { - return m.RESTClient.Get(). - NamespaceIfScoped(namespace, m.NamespaceScoped). - Resource(m.Resource). - VersionedParams(&metav1.ListOptions{ - ResourceVersion: resourceVersion, - Watch: true, - FieldSelector: fields.OneTermEqualSelector("metadata.name", name).String(), - }, metav1.ParameterCodec). - Watch(context.TODO()) -} - -func (m *Helper) Delete(namespace, name string) (runtime.Object, error) { - return m.DeleteWithOptions(namespace, name, nil) -} - -func (m *Helper) DeleteWithOptions(namespace, name string, options *metav1.DeleteOptions) (runtime.Object, error) { - if options == nil { - options = &metav1.DeleteOptions{} - } - if m.ServerDryRun { - options.DryRun = []string{metav1.DryRunAll} - } - - return m.RESTClient.Delete(). - NamespaceIfScoped(namespace, m.NamespaceScoped). - Resource(m.Resource). - Name(name). - Body(options). - Do(context.TODO()). - Get() -} - -func (m *Helper) Create(namespace string, modify bool, obj runtime.Object) (runtime.Object, error) { - return m.CreateWithOptions(namespace, modify, obj, nil) -} - -func (m *Helper) CreateWithOptions(namespace string, modify bool, obj runtime.Object, options *metav1.CreateOptions) (runtime.Object, error) { - if options == nil { - options = &metav1.CreateOptions{} - } - if m.ServerDryRun { - options.DryRun = []string{metav1.DryRunAll} - } - if m.FieldManager != "" { - options.FieldManager = m.FieldManager - } - if m.FieldValidation != "" { - options.FieldValidation = m.FieldValidation - } - if modify { - // Attempt to version the object based on client logic. - version, err := metadataAccessor.ResourceVersion(obj) - if err != nil { - // We don't know how to clear the version on this object, so send it to the server as is - return m.createResource(m.RESTClient, m.Resource, namespace, obj, options) - } - if version != "" { - if err := metadataAccessor.SetResourceVersion(obj, ""); err != nil { - return nil, err - } - } - } - - return m.createResource(m.RESTClient, m.Resource, namespace, obj, options) -} - -func (m *Helper) createResource(c RESTClient, resource, namespace string, obj runtime.Object, options *metav1.CreateOptions) (runtime.Object, error) { - return c.Post(). - NamespaceIfScoped(namespace, m.NamespaceScoped). - Resource(resource). - VersionedParams(options, metav1.ParameterCodec). - Body(obj). - Do(context.TODO()). - Get() -} -func (m *Helper) Patch(namespace, name string, pt types.PatchType, data []byte, options *metav1.PatchOptions) (runtime.Object, error) { - if options == nil { - options = &metav1.PatchOptions{} - } - if m.ServerDryRun { - options.DryRun = []string{metav1.DryRunAll} - } - if m.FieldManager != "" { - options.FieldManager = m.FieldManager - } - if m.FieldValidation != "" { - options.FieldValidation = m.FieldValidation - } - return m.RESTClient.Patch(pt). - NamespaceIfScoped(namespace, m.NamespaceScoped). - Resource(m.Resource). - Name(name). - SubResource(m.Subresource). - VersionedParams(options, metav1.ParameterCodec). - Body(data). - Do(context.TODO()). - Get() -} - -func (m *Helper) Replace(namespace, name string, overwrite bool, obj runtime.Object) (runtime.Object, error) { - c := m.RESTClient - var options = &metav1.UpdateOptions{} - if m.ServerDryRun { - options.DryRun = []string{metav1.DryRunAll} - } - if m.FieldManager != "" { - options.FieldManager = m.FieldManager - } - if m.FieldValidation != "" { - options.FieldValidation = m.FieldValidation - } - - // Attempt to version the object based on client logic. - version, err := metadataAccessor.ResourceVersion(obj) - if err != nil { - // We don't know how to version this object, so send it to the server as is - return m.replaceResource(c, m.Resource, namespace, name, obj, options) - } - if version == "" && overwrite { - // Retrieve the current version of the object to overwrite the server object - serverObj, err := c.Get().NamespaceIfScoped(namespace, m.NamespaceScoped).Resource(m.Resource).Name(name).SubResource(m.Subresource).Do(context.TODO()).Get() - if err != nil { - // The object does not exist, but we want it to be created - return m.replaceResource(c, m.Resource, namespace, name, obj, options) - } - serverVersion, err := metadataAccessor.ResourceVersion(serverObj) - if err != nil { - return nil, err - } - if err := metadataAccessor.SetResourceVersion(obj, serverVersion); err != nil { - return nil, err - } - } - - return m.replaceResource(c, m.Resource, namespace, name, obj, options) -} - -func (m *Helper) replaceResource(c RESTClient, resource, namespace, name string, obj runtime.Object, options *metav1.UpdateOptions) (runtime.Object, error) { - return c.Put(). - NamespaceIfScoped(namespace, m.NamespaceScoped). - Resource(resource). - Name(name). - SubResource(m.Subresource). - VersionedParams(options, metav1.ParameterCodec). - Body(obj). - Do(context.TODO()). - Get() -} diff --git a/src/vendor/k8s.io/cli-runtime/pkg/resource/interfaces.go b/src/vendor/k8s.io/cli-runtime/pkg/resource/interfaces.go deleted file mode 100644 index 29d7b34ab..000000000 --- a/src/vendor/k8s.io/cli-runtime/pkg/resource/interfaces.go +++ /dev/null @@ -1,103 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package resource - -import ( - "k8s.io/apimachinery/pkg/api/meta" - "k8s.io/apimachinery/pkg/types" - "k8s.io/client-go/discovery" - "k8s.io/client-go/rest" - "k8s.io/client-go/restmapper" -) - -type RESTClientGetter interface { - ToRESTConfig() (*rest.Config, error) - ToDiscoveryClient() (discovery.CachedDiscoveryInterface, error) - ToRESTMapper() (meta.RESTMapper, error) -} - -type ClientConfigFunc func() (*rest.Config, error) -type RESTMapperFunc func() (meta.RESTMapper, error) -type CategoryExpanderFunc func() (restmapper.CategoryExpander, error) - -// RESTClient is a client helper for dealing with RESTful resources -// in a generic way. -type RESTClient interface { - Get() *rest.Request - Post() *rest.Request - Patch(types.PatchType) *rest.Request - Delete() *rest.Request - Put() *rest.Request -} - -// RequestTransform is a function that is given a chance to modify the outgoing request. -type RequestTransform func(*rest.Request) - -// NewClientWithOptions wraps the provided RESTClient and invokes each transform on each -// newly created request. -func NewClientWithOptions(c RESTClient, transforms ...RequestTransform) RESTClient { - if len(transforms) == 0 { - return c - } - return &clientOptions{c: c, transforms: transforms} -} - -type clientOptions struct { - c RESTClient - transforms []RequestTransform -} - -func (c *clientOptions) modify(req *rest.Request) *rest.Request { - for _, transform := range c.transforms { - transform(req) - } - return req -} - -func (c *clientOptions) Get() *rest.Request { - return c.modify(c.c.Get()) -} - -func (c *clientOptions) Post() *rest.Request { - return c.modify(c.c.Post()) -} -func (c *clientOptions) Patch(t types.PatchType) *rest.Request { - return c.modify(c.c.Patch(t)) -} -func (c *clientOptions) Delete() *rest.Request { - return c.modify(c.c.Delete()) -} -func (c *clientOptions) Put() *rest.Request { - return c.modify(c.c.Put()) -} - -// ContentValidator is an interface that knows how to validate an API object serialized to a byte array. -type ContentValidator interface { - ValidateBytes(data []byte) error -} - -// Visitor lets clients walk a list of resources. -type Visitor interface { - Visit(VisitorFunc) error -} - -// VisitorFunc implements the Visitor interface for a matching function. -// If there was a problem walking a list of resources, the incoming error -// will describe the problem and the function can decide how to handle that error. -// A nil returned indicates to accept an error to continue loops even when errors happen. -// This is useful for ignoring certain kinds of errors or aggregating errors in some way. -type VisitorFunc func(*Info, error) error diff --git a/src/vendor/k8s.io/cli-runtime/pkg/resource/kustomizevisitor.go b/src/vendor/k8s.io/cli-runtime/pkg/resource/kustomizevisitor.go deleted file mode 100644 index 164891b88..000000000 --- a/src/vendor/k8s.io/cli-runtime/pkg/resource/kustomizevisitor.go +++ /dev/null @@ -1,54 +0,0 @@ -/* -Copyright 2019 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package resource - -import ( - "bytes" - - "sigs.k8s.io/kustomize/api/krusty" - "sigs.k8s.io/kustomize/kyaml/filesys" -) - -// KustomizeVisitor handles kustomization.yaml files. -type KustomizeVisitor struct { - mapper *mapper - schema ContentValidator - // Directory expected to contain a kustomization file. - dirPath string - // File system containing dirPath. - fSys filesys.FileSystem - // Holds result of kustomize build, retained for tests. - yml []byte -} - -// Visit passes the result of a kustomize build to a StreamVisitor. -func (v *KustomizeVisitor) Visit(fn VisitorFunc) error { - kOpts := krusty.MakeDefaultOptions() - kOpts.DoLegacyResourceSort = true - k := krusty.MakeKustomizer(kOpts) - m, err := k.Run(v.fSys, v.dirPath) - if err != nil { - return err - } - v.yml, err = m.AsYaml() - if err != nil { - return err - } - sv := NewStreamVisitor( - bytes.NewReader(v.yml), v.mapper, v.dirPath, v.schema) - return sv.Visit(fn) -} diff --git a/src/vendor/k8s.io/cli-runtime/pkg/resource/mapper.go b/src/vendor/k8s.io/cli-runtime/pkg/resource/mapper.go deleted file mode 100644 index 5180610e2..000000000 --- a/src/vendor/k8s.io/cli-runtime/pkg/resource/mapper.go +++ /dev/null @@ -1,166 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package resource - -import ( - "fmt" - "reflect" - - "k8s.io/apimachinery/pkg/api/meta" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" -) - -// Mapper is a convenience struct for holding references to the interfaces -// needed to create Info for arbitrary objects. -type mapper struct { - // localFn indicates the call can't make server requests - localFn func() bool - - restMapperFn RESTMapperFunc - clientFn func(version schema.GroupVersion) (RESTClient, error) - decoder runtime.Decoder -} - -// InfoForData creates an Info object for the given data. An error is returned -// if any of the decoding or client lookup steps fail. Name and namespace will be -// set into Info if the mapping's MetadataAccessor can retrieve them. -func (m *mapper) infoForData(data []byte, source string) (*Info, error) { - obj, gvk, err := m.decoder.Decode(data, nil, nil) - if err != nil { - return nil, fmt.Errorf("unable to decode %q: %v", source, err) - } - - name, _ := metadataAccessor.Name(obj) - namespace, _ := metadataAccessor.Namespace(obj) - resourceVersion, _ := metadataAccessor.ResourceVersion(obj) - - ret := &Info{ - Source: source, - Namespace: namespace, - Name: name, - ResourceVersion: resourceVersion, - - Object: obj, - } - - if m.localFn == nil || !m.localFn() { - restMapper, err := m.restMapperFn() - if err != nil { - return nil, err - } - mapping, err := restMapper.RESTMapping(gvk.GroupKind(), gvk.Version) - if err != nil { - if _, ok := err.(*meta.NoKindMatchError); ok { - return nil, fmt.Errorf("resource mapping not found for name: %q namespace: %q from %q: %v\nensure CRDs are installed first", - name, namespace, source, err) - } - return nil, fmt.Errorf("unable to recognize %q: %v", source, err) - } - ret.Mapping = mapping - - client, err := m.clientFn(gvk.GroupVersion()) - if err != nil { - return nil, fmt.Errorf("unable to connect to a server to handle %q: %v", mapping.Resource, err) - } - ret.Client = client - } - - return ret, nil -} - -// InfoForObject creates an Info object for the given Object. An error is returned -// if the object cannot be introspected. Name and namespace will be set into Info -// if the mapping's MetadataAccessor can retrieve them. -func (m *mapper) infoForObject(obj runtime.Object, typer runtime.ObjectTyper, preferredGVKs []schema.GroupVersionKind) (*Info, error) { - groupVersionKinds, _, err := typer.ObjectKinds(obj) - if err != nil { - return nil, fmt.Errorf("unable to get type info from the object %q: %v", reflect.TypeOf(obj), err) - } - - gvk := groupVersionKinds[0] - if len(groupVersionKinds) > 1 && len(preferredGVKs) > 0 { - gvk = preferredObjectKind(groupVersionKinds, preferredGVKs) - } - - name, _ := metadataAccessor.Name(obj) - namespace, _ := metadataAccessor.Namespace(obj) - resourceVersion, _ := metadataAccessor.ResourceVersion(obj) - ret := &Info{ - Namespace: namespace, - Name: name, - ResourceVersion: resourceVersion, - - Object: obj, - } - - if m.localFn == nil || !m.localFn() { - restMapper, err := m.restMapperFn() - if err != nil { - return nil, err - } - mapping, err := restMapper.RESTMapping(gvk.GroupKind(), gvk.Version) - if err != nil { - return nil, fmt.Errorf("unable to recognize %v", err) - } - ret.Mapping = mapping - - client, err := m.clientFn(gvk.GroupVersion()) - if err != nil { - return nil, fmt.Errorf("unable to connect to a server to handle %q: %v", mapping.Resource, err) - } - ret.Client = client - } - - return ret, nil -} - -// preferredObjectKind picks the possibility that most closely matches the priority list in this order: -// GroupVersionKind matches (exact match) -// GroupKind matches -// Group matches -func preferredObjectKind(possibilities []schema.GroupVersionKind, preferences []schema.GroupVersionKind) schema.GroupVersionKind { - // Exact match - for _, priority := range preferences { - for _, possibility := range possibilities { - if possibility == priority { - return possibility - } - } - } - - // GroupKind match - for _, priority := range preferences { - for _, possibility := range possibilities { - if possibility.GroupKind() == priority.GroupKind() { - return possibility - } - } - } - - // Group match - for _, priority := range preferences { - for _, possibility := range possibilities { - if possibility.Group == priority.Group { - return possibility - } - } - } - - // Just pick the first - return possibilities[0] -} diff --git a/src/vendor/k8s.io/cli-runtime/pkg/resource/metadata_decoder.go b/src/vendor/k8s.io/cli-runtime/pkg/resource/metadata_decoder.go deleted file mode 100644 index d688c3a08..000000000 --- a/src/vendor/k8s.io/cli-runtime/pkg/resource/metadata_decoder.go +++ /dev/null @@ -1,56 +0,0 @@ -/* -Copyright 2019 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package resource - -import ( - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" - utiljson "k8s.io/apimachinery/pkg/util/json" -) - -// metadataValidatingDecoder wraps a decoder and additionally ensures metadata schema fields decode before returning an unstructured object -type metadataValidatingDecoder struct { - decoder runtime.Decoder -} - -func (m *metadataValidatingDecoder) Decode(data []byte, defaults *schema.GroupVersionKind, into runtime.Object) (runtime.Object, *schema.GroupVersionKind, error) { - obj, gvk, err := m.decoder.Decode(data, defaults, into) - - // if we already errored, return - if err != nil { - return obj, gvk, err - } - - // if we're not unstructured, return - if _, isUnstructured := obj.(runtime.Unstructured); !isUnstructured { - return obj, gvk, err - } - - // make sure the data can decode into ObjectMeta before we return, - // so we don't silently truncate schema errors in metadata later with accesser get/set calls - v := &metadataOnlyObject{} - if typedErr := utiljson.Unmarshal(data, v); typedErr != nil { - return obj, gvk, typedErr - } - return obj, gvk, err -} - -type metadataOnlyObject struct { - metav1.TypeMeta `json:",inline"` - metav1.ObjectMeta `json:"metadata,omitempty"` -} diff --git a/src/vendor/k8s.io/cli-runtime/pkg/resource/query_param_verifier.go b/src/vendor/k8s.io/cli-runtime/pkg/resource/query_param_verifier.go deleted file mode 100644 index ab2e8d30e..000000000 --- a/src/vendor/k8s.io/cli-runtime/pkg/resource/query_param_verifier.go +++ /dev/null @@ -1,166 +0,0 @@ -/* -Copyright 2019 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package resource - -import ( - "errors" - "fmt" - - openapi_v2 "github.com/google/gnostic/openapiv2" - yaml "gopkg.in/yaml.v2" - "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/client-go/discovery" - "k8s.io/client-go/dynamic" -) - -func NewQueryParamVerifier(dynamicClient dynamic.Interface, openAPIGetter discovery.OpenAPISchemaInterface, queryParam VerifiableQueryParam) *QueryParamVerifier { - return &QueryParamVerifier{ - finder: NewCRDFinder(CRDFromDynamic(dynamicClient)), - openAPIGetter: openAPIGetter, - queryParam: queryParam, - } -} - -// QueryParamVerifier verifies if a given group-version-kind supports a -// given VerifiableQueryParam against the current server. -// -// Currently supported query params are: -// 1. dryRun -// 2. fieldValidation -// -// Support for each of these query params needs to be verified because: -// -// 1. Sending dryRun requests to apiserver that -// don't support it will result in objects being unwillingly persisted. -// -// 2. We determine whether or not to perform server-side or client-side -// schema validation based on whether the fieldValidation query param is -// supported or not. -// -// It reads the OpenAPI to see if the given GVK supports the given query param. -// If the GVK can not be found, we assume that CRDs will have the same level of -// support as "namespaces", and non-CRDs will not be supported. We -// delay the check for CRDs as much as possible though, since it -// requires an extra round-trip to the server. -type QueryParamVerifier struct { - finder CRDFinder - openAPIGetter discovery.OpenAPISchemaInterface - queryParam VerifiableQueryParam -} - -// Verifier is the generic verifier interface used for testing QueryParamVerifier -type Verifier interface { - HasSupport(gvk schema.GroupVersionKind) error -} - -// VerifiableQueryParam is a query parameter who's enablement on the -// apiserver can be determined by evaluating the OpenAPI for a specific -// GVK. -type VerifiableQueryParam string - -const ( - QueryParamDryRun VerifiableQueryParam = "dryRun" - QueryParamFieldValidation VerifiableQueryParam = "fieldValidation" -) - -// HasSupport checks if the given gvk supports the query param configured on v -func (v *QueryParamVerifier) HasSupport(gvk schema.GroupVersionKind) error { - oapi, err := v.openAPIGetter.OpenAPISchema() - if err != nil { - return fmt.Errorf("failed to download openapi: %v", err) - } - supports, err := supportsQueryParam(oapi, gvk, v.queryParam) - if err != nil { - // We assume that we couldn't find the type, then check for namespace: - supports, _ = supportsQueryParam(oapi, schema.GroupVersionKind{Group: "", Version: "v1", Kind: "Namespace"}, v.queryParam) - // If namespace supports the query param, then we will support the query param for CRDs only. - if supports { - supports, err = v.finder.HasCRD(gvk.GroupKind()) - if err != nil { - return fmt.Errorf("failed to check CRD: %v", err) - } - } - } - if !supports { - return NewParamUnsupportedError(gvk, v.queryParam) - } - return nil -} - -type paramUnsupportedError struct { - gvk schema.GroupVersionKind - param VerifiableQueryParam -} - -func NewParamUnsupportedError(gvk schema.GroupVersionKind, param VerifiableQueryParam) error { - return ¶mUnsupportedError{ - gvk: gvk, - param: param, - } -} - -func (e *paramUnsupportedError) Error() string { - return fmt.Sprintf("%v doesn't support %s", e.gvk, e.param) -} - -func IsParamUnsupportedError(err error) bool { - if err == nil { - return false - } - _, ok := err.(*paramUnsupportedError) - return ok -} - -func hasGVKExtension(extensions []*openapi_v2.NamedAny, gvk schema.GroupVersionKind) bool { - for _, extension := range extensions { - if extension.GetValue().GetYaml() == "" || - extension.GetName() != "x-kubernetes-group-version-kind" { - continue - } - var value map[string]string - err := yaml.Unmarshal([]byte(extension.GetValue().GetYaml()), &value) - if err != nil { - continue - } - - if value["group"] == gvk.Group && value["kind"] == gvk.Kind && value["version"] == gvk.Version { - return true - } - return false - } - return false -} - -// supportsQueryParam is a method that let's us look in the OpenAPI if the -// specific group-version-kind supports the specific query parameter for -// the PATCH end-point. -func supportsQueryParam(doc *openapi_v2.Document, gvk schema.GroupVersionKind, queryParam VerifiableQueryParam) (bool, error) { - for _, path := range doc.GetPaths().GetPath() { - // Is this describing the gvk we're looking for? - if !hasGVKExtension(path.GetValue().GetPatch().GetVendorExtension(), gvk) { - continue - } - for _, param := range path.GetValue().GetPatch().GetParameters() { - if param.GetParameter().GetNonBodyParameter().GetQueryParameterSubSchema().GetName() == string(queryParam) { - return true, nil - } - } - return false, nil - } - - return false, errors.New("couldn't find GVK in openapi") -} diff --git a/src/vendor/k8s.io/cli-runtime/pkg/resource/result.go b/src/vendor/k8s.io/cli-runtime/pkg/resource/result.go deleted file mode 100644 index b8722afe6..000000000 --- a/src/vendor/k8s.io/cli-runtime/pkg/resource/result.go +++ /dev/null @@ -1,242 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package resource - -import ( - "fmt" - "reflect" - - "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/meta" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" - utilerrors "k8s.io/apimachinery/pkg/util/errors" - "k8s.io/apimachinery/pkg/util/sets" - "k8s.io/apimachinery/pkg/watch" -) - -// ErrMatchFunc can be used to filter errors that may not be true failures. -type ErrMatchFunc func(error) bool - -// Result contains helper methods for dealing with the outcome of a Builder. -type Result struct { - err error - visitor Visitor - - sources []Visitor - singleItemImplied bool - targetsSingleItems bool - - mapper *mapper - ignoreErrors []utilerrors.Matcher - - // populated by a call to Infos - info []*Info -} - -// withError allows a fluent style for internal result code. -func (r *Result) withError(err error) *Result { - r.err = err - return r -} - -// TargetsSingleItems returns true if any of the builder arguments pointed -// to non-list calls (if the user explicitly asked for any object by name). -// This includes directories, streams, URLs, and resource name tuples. -func (r *Result) TargetsSingleItems() bool { - return r.targetsSingleItems -} - -// IgnoreErrors will filter errors that occur when by visiting the result -// (but not errors that occur by creating the result in the first place), -// eliminating any that match fns. This is best used in combination with -// Builder.ContinueOnError(), where the visitors accumulate errors and return -// them after visiting as a slice of errors. If no errors remain after -// filtering, the various visitor methods on Result will return nil for -// err. -func (r *Result) IgnoreErrors(fns ...ErrMatchFunc) *Result { - for _, fn := range fns { - r.ignoreErrors = append(r.ignoreErrors, utilerrors.Matcher(fn)) - } - return r -} - -// Mapper returns a copy of the builder's mapper. -func (r *Result) Mapper() *mapper { - return r.mapper -} - -// Err returns one or more errors (via a util.ErrorList) that occurred prior -// to visiting the elements in the visitor. To see all errors including those -// that occur during visitation, invoke Infos(). -func (r *Result) Err() error { - return r.err -} - -// Visit implements the Visitor interface on the items described in the Builder. -// Note that some visitor sources are not traversable more than once, or may -// return different results. If you wish to operate on the same set of resources -// multiple times, use the Infos() method. -func (r *Result) Visit(fn VisitorFunc) error { - if r.err != nil { - return r.err - } - err := r.visitor.Visit(fn) - return utilerrors.FilterOut(err, r.ignoreErrors...) -} - -// IntoSingleItemImplied sets the provided boolean pointer to true if the Builder input -// implies a single item, or multiple. -func (r *Result) IntoSingleItemImplied(b *bool) *Result { - *b = r.singleItemImplied - return r -} - -// Infos returns an array of all of the resource infos retrieved via traversal. -// Will attempt to traverse the entire set of visitors only once, and will return -// a cached list on subsequent calls. -func (r *Result) Infos() ([]*Info, error) { - if r.err != nil { - return nil, r.err - } - if r.info != nil { - return r.info, nil - } - - infos := []*Info{} - err := r.visitor.Visit(func(info *Info, err error) error { - if err != nil { - return err - } - infos = append(infos, info) - return nil - }) - err = utilerrors.FilterOut(err, r.ignoreErrors...) - - r.info, r.err = infos, err - return infos, err -} - -// Object returns a single object representing the output of a single visit to all -// found resources. If the Builder was a singular context (expected to return a -// single resource by user input) and only a single resource was found, the resource -// will be returned as is. Otherwise, the returned resources will be part of an -// v1.List. The ResourceVersion of the v1.List will be set only if it is identical -// across all infos returned. -func (r *Result) Object() (runtime.Object, error) { - infos, err := r.Infos() - if err != nil { - return nil, err - } - - versions := sets.String{} - objects := []runtime.Object{} - for _, info := range infos { - if info.Object != nil { - objects = append(objects, info.Object) - versions.Insert(info.ResourceVersion) - } - } - - if len(objects) == 1 { - if r.singleItemImplied { - return objects[0], nil - } - // if the item is a list already, don't create another list - if meta.IsListType(objects[0]) { - return objects[0], nil - } - } - - version := "" - if len(versions) == 1 { - version = versions.List()[0] - } - - return toV1List(objects, version), err -} - -// Compile time check to enforce that list implements the necessary interface -var _ metav1.ListInterface = &v1.List{} -var _ metav1.ListMetaAccessor = &v1.List{} - -// toV1List takes a slice of Objects + their version, and returns -// a v1.List Object containing the objects in the Items field -func toV1List(objects []runtime.Object, version string) runtime.Object { - raw := []runtime.RawExtension{} - for _, o := range objects { - raw = append(raw, runtime.RawExtension{Object: o}) - } - return &v1.List{ - ListMeta: metav1.ListMeta{ - ResourceVersion: version, - }, - Items: raw, - } -} - -// ResourceMapping returns a single meta.RESTMapping representing the -// resources located by the builder, or an error if more than one -// mapping was found. -func (r *Result) ResourceMapping() (*meta.RESTMapping, error) { - if r.err != nil { - return nil, r.err - } - mappings := map[schema.GroupVersionResource]*meta.RESTMapping{} - for i := range r.sources { - m, ok := r.sources[i].(ResourceMapping) - if !ok { - return nil, fmt.Errorf("a resource mapping could not be loaded from %v", reflect.TypeOf(r.sources[i])) - } - mapping := m.ResourceMapping() - mappings[mapping.Resource] = mapping - } - if len(mappings) != 1 { - return nil, fmt.Errorf("expected only a single resource type") - } - for _, mapping := range mappings { - return mapping, nil - } - return nil, nil -} - -// Watch retrieves changes that occur on the server to the specified resource. -// It currently supports watching a single source - if the resource source -// (selectors or pure types) can be watched, they will be, otherwise the list -// will be visited (equivalent to the Infos() call) and if there is a single -// resource present, it will be watched, otherwise an error will be returned. -func (r *Result) Watch(resourceVersion string) (watch.Interface, error) { - if r.err != nil { - return nil, r.err - } - if len(r.sources) != 1 { - return nil, fmt.Errorf("you may only watch a single resource or type of resource at a time") - } - w, ok := r.sources[0].(Watchable) - if !ok { - info, err := r.Infos() - if err != nil { - return nil, err - } - if len(info) != 1 { - return nil, fmt.Errorf("watch is only supported on individual resources and resource collections - %d resources were found", len(info)) - } - return info[0].Watch(resourceVersion) - } - return w.Watch(resourceVersion) -} diff --git a/src/vendor/k8s.io/cli-runtime/pkg/resource/scheme.go b/src/vendor/k8s.io/cli-runtime/pkg/resource/scheme.go deleted file mode 100644 index 0a47d1596..000000000 --- a/src/vendor/k8s.io/cli-runtime/pkg/resource/scheme.go +++ /dev/null @@ -1,82 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package resource - -import ( - "encoding/json" - "io" - "strings" - - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/apimachinery/pkg/runtime/serializer" - "k8s.io/client-go/kubernetes/scheme" - "k8s.io/client-go/rest" -) - -// dynamicCodec is a codec that wraps the standard unstructured codec -// with special handling for Status objects. -// Deprecated only used by test code and its wrong -type dynamicCodec struct{} - -func (dynamicCodec) Decode(data []byte, gvk *schema.GroupVersionKind, obj runtime.Object) (runtime.Object, *schema.GroupVersionKind, error) { - obj, gvk, err := unstructured.UnstructuredJSONScheme.Decode(data, gvk, obj) - if err != nil { - return nil, nil, err - } - - if strings.ToLower(gvk.Kind) == "status" && gvk.Version == "v1" && (gvk.Group == "" || gvk.Group == "meta.k8s.io") { - if _, ok := obj.(*metav1.Status); !ok { - obj = &metav1.Status{} - err := json.Unmarshal(data, obj) - if err != nil { - return nil, nil, err - } - } - } - - return obj, gvk, nil -} - -func (dynamicCodec) Encode(obj runtime.Object, w io.Writer) error { - // There is no need to handle runtime.CacheableObject, as we only - // fallback to other encoders here. - return unstructured.UnstructuredJSONScheme.Encode(obj, w) -} - -// Identifier implements runtime.Encoder interface. -func (dynamicCodec) Identifier() runtime.Identifier { - return unstructured.UnstructuredJSONScheme.Identifier() -} - -// UnstructuredPlusDefaultContentConfig returns a rest.ContentConfig for dynamic types. It includes enough codecs to act as a "normal" -// serializer for the rest.client with options, status and the like. -func UnstructuredPlusDefaultContentConfig() rest.ContentConfig { - // TODO: scheme.Codecs here should become "pkg/apis/server/scheme" which is the minimal core you need - // to talk to a kubernetes server - jsonInfo, _ := runtime.SerializerInfoForMediaType(scheme.Codecs.SupportedMediaTypes(), runtime.ContentTypeJSON) - - jsonInfo.Serializer = dynamicCodec{} - jsonInfo.PrettySerializer = nil - return rest.ContentConfig{ - AcceptContentTypes: runtime.ContentTypeJSON, - ContentType: runtime.ContentTypeJSON, - NegotiatedSerializer: serializer.NegotiatedSerializerWrapper(jsonInfo), - } -} diff --git a/src/vendor/k8s.io/cli-runtime/pkg/resource/selector.go b/src/vendor/k8s.io/cli-runtime/pkg/resource/selector.go deleted file mode 100644 index 2a283d4e0..000000000 --- a/src/vendor/k8s.io/cli-runtime/pkg/resource/selector.go +++ /dev/null @@ -1,92 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package resource - -import ( - "k8s.io/apimachinery/pkg/api/meta" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/watch" -) - -// Selector is a Visitor for resources that match a label selector. -type Selector struct { - Client RESTClient - Mapping *meta.RESTMapping - Namespace string - LabelSelector string - FieldSelector string - LimitChunks int64 -} - -// NewSelector creates a resource selector which hides details of getting items by their label selector. -func NewSelector(client RESTClient, mapping *meta.RESTMapping, namespace, labelSelector, fieldSelector string, limitChunks int64) *Selector { - return &Selector{ - Client: client, - Mapping: mapping, - Namespace: namespace, - LabelSelector: labelSelector, - FieldSelector: fieldSelector, - LimitChunks: limitChunks, - } -} - -// Visit implements Visitor and uses request chunking by default. -func (r *Selector) Visit(fn VisitorFunc) error { - helper := NewHelper(r.Client, r.Mapping) - initialOpts := metav1.ListOptions{ - LabelSelector: r.LabelSelector, - FieldSelector: r.FieldSelector, - Limit: r.LimitChunks, - } - return FollowContinue(&initialOpts, func(options metav1.ListOptions) (runtime.Object, error) { - list, err := helper.List( - r.Namespace, - r.ResourceMapping().GroupVersionKind.GroupVersion().String(), - &options, - ) - if err != nil { - return nil, EnhanceListError(err, options, r.Mapping.Resource.String()) - } - resourceVersion, _ := metadataAccessor.ResourceVersion(list) - - info := &Info{ - Client: r.Client, - Mapping: r.Mapping, - - Namespace: r.Namespace, - ResourceVersion: resourceVersion, - - Object: list, - } - - if err := fn(info, nil); err != nil { - return nil, err - } - return list, nil - }) -} - -func (r *Selector) Watch(resourceVersion string) (watch.Interface, error) { - return NewHelper(r.Client, r.Mapping).Watch(r.Namespace, r.ResourceMapping().GroupVersionKind.GroupVersion().String(), - &metav1.ListOptions{ResourceVersion: resourceVersion, LabelSelector: r.LabelSelector, FieldSelector: r.FieldSelector}) -} - -// ResourceMapping returns the mapping for this resource and implements ResourceMapping -func (r *Selector) ResourceMapping() *meta.RESTMapping { - return r.Mapping -} diff --git a/src/vendor/k8s.io/cli-runtime/pkg/resource/visitor.go b/src/vendor/k8s.io/cli-runtime/pkg/resource/visitor.go deleted file mode 100644 index d5df9f3dd..000000000 --- a/src/vendor/k8s.io/cli-runtime/pkg/resource/visitor.go +++ /dev/null @@ -1,742 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package resource - -import ( - "bytes" - "context" - "fmt" - "io" - "net/http" - "net/url" - "os" - "path/filepath" - "strings" - "time" - - "golang.org/x/text/encoding/unicode" - "golang.org/x/text/transform" - "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/api/meta" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" - utilerrors "k8s.io/apimachinery/pkg/util/errors" - "k8s.io/apimachinery/pkg/util/yaml" - "k8s.io/apimachinery/pkg/watch" -) - -const ( - constSTDINstr = "STDIN" - stopValidateMessage = "if you choose to ignore these errors, turn validation off with --validate=false" -) - -// Watchable describes a resource that can be watched for changes that occur on the server, -// beginning after the provided resource version. -type Watchable interface { - Watch(resourceVersion string) (watch.Interface, error) -} - -// ResourceMapping allows an object to return the resource mapping associated with -// the resource or resources it represents. -type ResourceMapping interface { - ResourceMapping() *meta.RESTMapping -} - -// Info contains temporary info to execute a REST call, or show the results -// of an already completed REST call. -type Info struct { - // Client will only be present if this builder was not local - Client RESTClient - // Mapping will only be present if this builder was not local - Mapping *meta.RESTMapping - - // Namespace will be set if the object is namespaced and has a specified value. - Namespace string - Name string - - // Optional, Source is the filename or URL to template file (.json or .yaml), - // or stdin to use to handle the resource - Source string - // Optional, this is the most recent value returned by the server if available. It will - // typically be in unstructured or internal forms, depending on how the Builder was - // defined. If retrieved from the server, the Builder expects the mapping client to - // decide the final form. Use the AsVersioned, AsUnstructured, and AsInternal helpers - // to alter the object versions. - // If Subresource is specified, this will be the object for the subresource. - Object runtime.Object - // Optional, this is the most recent resource version the server knows about for - // this type of resource. It may not match the resource version of the object, - // but if set it should be equal to or newer than the resource version of the - // object (however the server defines resource version). - ResourceVersion string - // Optional, if specified, the object is the most recent value of the subresource - // returned by the server if available. - Subresource string -} - -// Visit implements Visitor -func (i *Info) Visit(fn VisitorFunc) error { - return fn(i, nil) -} - -// Get retrieves the object from the Namespace and Name fields -func (i *Info) Get() (err error) { - obj, err := NewHelper(i.Client, i.Mapping).WithSubresource(i.Subresource).Get(i.Namespace, i.Name) - if err != nil { - if errors.IsNotFound(err) && len(i.Namespace) > 0 && i.Namespace != metav1.NamespaceDefault && i.Namespace != metav1.NamespaceAll { - err2 := i.Client.Get().AbsPath("api", "v1", "namespaces", i.Namespace).Do(context.TODO()).Error() - if err2 != nil && errors.IsNotFound(err2) { - return err2 - } - } - return err - } - i.Object = obj - i.ResourceVersion, _ = metadataAccessor.ResourceVersion(obj) - return nil -} - -// Refresh updates the object with another object. If ignoreError is set -// the Object will be updated even if name, namespace, or resourceVersion -// attributes cannot be loaded from the object. -func (i *Info) Refresh(obj runtime.Object, ignoreError bool) error { - name, err := metadataAccessor.Name(obj) - if err != nil { - if !ignoreError { - return err - } - } else { - i.Name = name - } - namespace, err := metadataAccessor.Namespace(obj) - if err != nil { - if !ignoreError { - return err - } - } else { - i.Namespace = namespace - } - version, err := metadataAccessor.ResourceVersion(obj) - if err != nil { - if !ignoreError { - return err - } - } else { - i.ResourceVersion = version - } - i.Object = obj - return nil -} - -// ObjectName returns an approximate form of the resource's kind/name. -func (i *Info) ObjectName() string { - if i.Mapping != nil { - return fmt.Sprintf("%s/%s", i.Mapping.Resource.Resource, i.Name) - } - gvk := i.Object.GetObjectKind().GroupVersionKind() - if len(gvk.Group) == 0 { - return fmt.Sprintf("%s/%s", strings.ToLower(gvk.Kind), i.Name) - } - return fmt.Sprintf("%s.%s/%s\n", strings.ToLower(gvk.Kind), gvk.Group, i.Name) -} - -// String returns the general purpose string representation -func (i *Info) String() string { - basicInfo := fmt.Sprintf("Name: %q, Namespace: %q", i.Name, i.Namespace) - if i.Mapping != nil { - mappingInfo := fmt.Sprintf("Resource: %q, GroupVersionKind: %q", i.Mapping.Resource.String(), - i.Mapping.GroupVersionKind.String()) - return fmt.Sprint(mappingInfo, "\n", basicInfo) - } - return basicInfo -} - -// Namespaced returns true if the object belongs to a namespace -func (i *Info) Namespaced() bool { - if i.Mapping != nil { - // if we have RESTMapper info, use it - return i.Mapping.Scope.Name() == meta.RESTScopeNameNamespace - } - // otherwise, use the presence of a namespace in the info as an indicator - return len(i.Namespace) > 0 -} - -// Watch returns server changes to this object after it was retrieved. -func (i *Info) Watch(resourceVersion string) (watch.Interface, error) { - return NewHelper(i.Client, i.Mapping).WatchSingle(i.Namespace, i.Name, resourceVersion) -} - -// ResourceMapping returns the mapping for this resource and implements ResourceMapping -func (i *Info) ResourceMapping() *meta.RESTMapping { - return i.Mapping -} - -// VisitorList implements Visit for the sub visitors it contains. The first error -// returned from a child Visitor will terminate iteration. -type VisitorList []Visitor - -// Visit implements Visitor -func (l VisitorList) Visit(fn VisitorFunc) error { - for i := range l { - if err := l[i].Visit(fn); err != nil { - return err - } - } - return nil -} - -// EagerVisitorList implements Visit for the sub visitors it contains. All errors -// will be captured and returned at the end of iteration. -type EagerVisitorList []Visitor - -// Visit implements Visitor, and gathers errors that occur during processing until -// all sub visitors have been visited. -func (l EagerVisitorList) Visit(fn VisitorFunc) error { - var errs []error - for i := range l { - err := l[i].Visit(func(info *Info, err error) error { - if err != nil { - errs = append(errs, err) - return nil - } - if err := fn(info, nil); err != nil { - errs = append(errs, err) - } - return nil - }) - if err != nil { - errs = append(errs, err) - } - } - return utilerrors.NewAggregate(errs) -} - -func ValidateSchema(data []byte, schema ContentValidator) error { - if schema == nil { - return nil - } - if err := schema.ValidateBytes(data); err != nil { - return fmt.Errorf("error validating data: %v; %s", err, stopValidateMessage) - } - return nil -} - -// URLVisitor downloads the contents of a URL, and if successful, returns -// an info object representing the downloaded object. -type URLVisitor struct { - URL *url.URL - *StreamVisitor - HttpAttemptCount int -} - -func (v *URLVisitor) Visit(fn VisitorFunc) error { - body, err := readHttpWithRetries(httpgetImpl, time.Second, v.URL.String(), v.HttpAttemptCount) - if err != nil { - return err - } - defer body.Close() - v.StreamVisitor.Reader = body - return v.StreamVisitor.Visit(fn) -} - -// readHttpWithRetries tries to http.Get the v.URL retries times before giving up. -func readHttpWithRetries(get httpget, duration time.Duration, u string, attempts int) (io.ReadCloser, error) { - var err error - if attempts <= 0 { - return nil, fmt.Errorf("http attempts must be greater than 0, was %d", attempts) - } - for i := 0; i < attempts; i++ { - var ( - statusCode int - status string - body io.ReadCloser - ) - if i > 0 { - time.Sleep(duration) - } - - // Try to get the URL - statusCode, status, body, err = get(u) - - // Retry Errors - if err != nil { - continue - } - - if statusCode == http.StatusOK { - return body, nil - } - body.Close() - // Error - Set the error condition from the StatusCode - err = fmt.Errorf("unable to read URL %q, server reported %s, status code=%d", u, status, statusCode) - - if statusCode >= 500 && statusCode < 600 { - // Retry 500's - continue - } else { - // Don't retry other StatusCodes - break - } - } - return nil, err -} - -// httpget Defines function to retrieve a url and return the results. Exists for unit test stubbing. -type httpget func(url string) (int, string, io.ReadCloser, error) - -// httpgetImpl Implements a function to retrieve a url and return the results. -func httpgetImpl(url string) (int, string, io.ReadCloser, error) { - resp, err := http.Get(url) - if err != nil { - return 0, "", nil, err - } - return resp.StatusCode, resp.Status, resp.Body, nil -} - -// DecoratedVisitor will invoke the decorators in order prior to invoking the visitor function -// passed to Visit. An error will terminate the visit. -type DecoratedVisitor struct { - visitor Visitor - decorators []VisitorFunc -} - -// NewDecoratedVisitor will create a visitor that invokes the provided visitor functions before -// the user supplied visitor function is invoked, giving them the opportunity to mutate the Info -// object or terminate early with an error. -func NewDecoratedVisitor(v Visitor, fn ...VisitorFunc) Visitor { - if len(fn) == 0 { - return v - } - return DecoratedVisitor{v, fn} -} - -// Visit implements Visitor -func (v DecoratedVisitor) Visit(fn VisitorFunc) error { - return v.visitor.Visit(func(info *Info, err error) error { - if err != nil { - return err - } - for i := range v.decorators { - if err := v.decorators[i](info, nil); err != nil { - return err - } - } - return fn(info, nil) - }) -} - -// ContinueOnErrorVisitor visits each item and, if an error occurs on -// any individual item, returns an aggregate error after all items -// are visited. -type ContinueOnErrorVisitor struct { - Visitor -} - -// Visit returns nil if no error occurs during traversal, a regular -// error if one occurs, or if multiple errors occur, an aggregate -// error. If the provided visitor fails on any individual item it -// will not prevent the remaining items from being visited. An error -// returned by the visitor directly may still result in some items -// not being visited. -func (v ContinueOnErrorVisitor) Visit(fn VisitorFunc) error { - var errs []error - err := v.Visitor.Visit(func(info *Info, err error) error { - if err != nil { - errs = append(errs, err) - return nil - } - if err := fn(info, nil); err != nil { - errs = append(errs, err) - } - return nil - }) - if err != nil { - errs = append(errs, err) - } - if len(errs) == 1 { - return errs[0] - } - return utilerrors.NewAggregate(errs) -} - -// FlattenListVisitor flattens any objects that runtime.ExtractList recognizes as a list -// - has an "Items" public field that is a slice of runtime.Objects or objects satisfying -// that interface - into multiple Infos. Returns nil in the case of no errors. -// When an error is hit on sub items (for instance, if a List contains an object that does -// not have a registered client or resource), returns an aggregate error. -type FlattenListVisitor struct { - visitor Visitor - typer runtime.ObjectTyper - mapper *mapper -} - -// NewFlattenListVisitor creates a visitor that will expand list style runtime.Objects -// into individual items and then visit them individually. -func NewFlattenListVisitor(v Visitor, typer runtime.ObjectTyper, mapper *mapper) Visitor { - return FlattenListVisitor{v, typer, mapper} -} - -func (v FlattenListVisitor) Visit(fn VisitorFunc) error { - return v.visitor.Visit(func(info *Info, err error) error { - if err != nil { - return err - } - if info.Object == nil { - return fn(info, nil) - } - if !meta.IsListType(info.Object) { - return fn(info, nil) - } - - items := []runtime.Object{} - itemsToProcess := []runtime.Object{info.Object} - - for i := 0; i < len(itemsToProcess); i++ { - currObj := itemsToProcess[i] - if !meta.IsListType(currObj) { - items = append(items, currObj) - continue - } - - currItems, err := meta.ExtractList(currObj) - if err != nil { - return err - } - if errs := runtime.DecodeList(currItems, v.mapper.decoder); len(errs) > 0 { - return utilerrors.NewAggregate(errs) - } - itemsToProcess = append(itemsToProcess, currItems...) - } - - // If we have a GroupVersionKind on the list, prioritize that when asking for info on the objects contained in the list - var preferredGVKs []schema.GroupVersionKind - if info.Mapping != nil && !info.Mapping.GroupVersionKind.Empty() { - preferredGVKs = append(preferredGVKs, info.Mapping.GroupVersionKind) - } - var errs []error - for i := range items { - item, err := v.mapper.infoForObject(items[i], v.typer, preferredGVKs) - if err != nil { - errs = append(errs, err) - continue - } - if len(info.ResourceVersion) != 0 { - item.ResourceVersion = info.ResourceVersion - } - // propagate list source to items source - if len(info.Source) != 0 { - item.Source = info.Source - } - if err := fn(item, nil); err != nil { - errs = append(errs, err) - } - } - return utilerrors.NewAggregate(errs) - }) -} - -func ignoreFile(path string, extensions []string) bool { - if len(extensions) == 0 { - return false - } - ext := filepath.Ext(path) - for _, s := range extensions { - if s == ext { - return false - } - } - return true -} - -// FileVisitorForSTDIN return a special FileVisitor just for STDIN -func FileVisitorForSTDIN(mapper *mapper, schema ContentValidator) Visitor { - return &FileVisitor{ - Path: constSTDINstr, - StreamVisitor: NewStreamVisitor(nil, mapper, constSTDINstr, schema), - } -} - -// ExpandPathsToFileVisitors will return a slice of FileVisitors that will handle files from the provided path. -// After FileVisitors open the files, they will pass an io.Reader to a StreamVisitor to do the reading. (stdin -// is also taken care of). Paths argument also accepts a single file, and will return a single visitor -func ExpandPathsToFileVisitors(mapper *mapper, paths string, recursive bool, extensions []string, schema ContentValidator) ([]Visitor, error) { - var visitors []Visitor - err := filepath.Walk(paths, func(path string, fi os.FileInfo, err error) error { - if err != nil { - return err - } - - if fi.IsDir() { - if path != paths && !recursive { - return filepath.SkipDir - } - return nil - } - // Don't check extension if the filepath was passed explicitly - if path != paths && ignoreFile(path, extensions) { - return nil - } - - visitor := &FileVisitor{ - Path: path, - StreamVisitor: NewStreamVisitor(nil, mapper, path, schema), - } - - visitors = append(visitors, visitor) - return nil - }) - - if err != nil { - return nil, err - } - return visitors, nil -} - -// FileVisitor is wrapping around a StreamVisitor, to handle open/close files -type FileVisitor struct { - Path string - *StreamVisitor -} - -// Visit in a FileVisitor is just taking care of opening/closing files -func (v *FileVisitor) Visit(fn VisitorFunc) error { - var f *os.File - if v.Path == constSTDINstr { - f = os.Stdin - } else { - var err error - f, err = os.Open(v.Path) - if err != nil { - return err - } - defer f.Close() - } - - // TODO: Consider adding a flag to force to UTF16, apparently some - // Windows tools don't write the BOM - utf16bom := unicode.BOMOverride(unicode.UTF8.NewDecoder()) - v.StreamVisitor.Reader = transform.NewReader(f, utf16bom) - - return v.StreamVisitor.Visit(fn) -} - -// StreamVisitor reads objects from an io.Reader and walks them. A stream visitor can only be -// visited once. -// TODO: depends on objects being in JSON format before being passed to decode - need to implement -// a stream decoder method on runtime.Codec to properly handle this. -type StreamVisitor struct { - io.Reader - *mapper - - Source string - Schema ContentValidator -} - -// NewStreamVisitor is a helper function that is useful when we want to change the fields of the struct but keep calls the same. -func NewStreamVisitor(r io.Reader, mapper *mapper, source string, schema ContentValidator) *StreamVisitor { - return &StreamVisitor{ - Reader: r, - mapper: mapper, - Source: source, - Schema: schema, - } -} - -// Visit implements Visitor over a stream. StreamVisitor is able to distinct multiple resources in one stream. -func (v *StreamVisitor) Visit(fn VisitorFunc) error { - d := yaml.NewYAMLOrJSONDecoder(v.Reader, 4096) - for { - ext := runtime.RawExtension{} - if err := d.Decode(&ext); err != nil { - if err == io.EOF { - return nil - } - return fmt.Errorf("error parsing %s: %v", v.Source, err) - } - // TODO: This needs to be able to handle object in other encodings and schemas. - ext.Raw = bytes.TrimSpace(ext.Raw) - if len(ext.Raw) == 0 || bytes.Equal(ext.Raw, []byte("null")) { - continue - } - if err := ValidateSchema(ext.Raw, v.Schema); err != nil { - return fmt.Errorf("error validating %q: %v", v.Source, err) - } - info, err := v.infoForData(ext.Raw, v.Source) - if err != nil { - if fnErr := fn(info, err); fnErr != nil { - return fnErr - } - continue - } - if err := fn(info, nil); err != nil { - return err - } - } -} - -func UpdateObjectNamespace(info *Info, err error) error { - if err != nil { - return err - } - if info.Object != nil { - return metadataAccessor.SetNamespace(info.Object, info.Namespace) - } - return nil -} - -// FilterNamespace omits the namespace if the object is not namespace scoped -func FilterNamespace(info *Info, err error) error { - if err != nil { - return err - } - if !info.Namespaced() { - info.Namespace = "" - UpdateObjectNamespace(info, nil) - } - return nil -} - -// SetNamespace ensures that every Info object visited will have a namespace -// set. If info.Object is set, it will be mutated as well. -func SetNamespace(namespace string) VisitorFunc { - return func(info *Info, err error) error { - if err != nil { - return err - } - if !info.Namespaced() { - return nil - } - if len(info.Namespace) == 0 { - info.Namespace = namespace - UpdateObjectNamespace(info, nil) - } - return nil - } -} - -// RequireNamespace will either set a namespace if none is provided on the -// Info object, or if the namespace is set and does not match the provided -// value, returns an error. This is intended to guard against administrators -// accidentally operating on resources outside their namespace. -func RequireNamespace(namespace string) VisitorFunc { - return func(info *Info, err error) error { - if err != nil { - return err - } - if !info.Namespaced() { - return nil - } - if len(info.Namespace) == 0 { - info.Namespace = namespace - UpdateObjectNamespace(info, nil) - return nil - } - if info.Namespace != namespace { - return fmt.Errorf("the namespace from the provided object %q does not match the namespace %q. You must pass '--namespace=%s' to perform this operation.", info.Namespace, namespace, info.Namespace) - } - return nil - } -} - -// RetrieveLatest updates the Object on each Info by invoking a standard client -// Get. -func RetrieveLatest(info *Info, err error) error { - if err != nil { - return err - } - if meta.IsListType(info.Object) { - return fmt.Errorf("watch is only supported on individual resources and resource collections, but a list of resources is found") - } - if len(info.Name) == 0 { - return nil - } - if info.Namespaced() && len(info.Namespace) == 0 { - return fmt.Errorf("no namespace set on resource %s %q", info.Mapping.Resource, info.Name) - } - return info.Get() -} - -// RetrieveLazy updates the object if it has not been loaded yet. -func RetrieveLazy(info *Info, err error) error { - if err != nil { - return err - } - if info.Object == nil { - return info.Get() - } - return nil -} - -type FilterFunc func(info *Info, err error) (bool, error) - -type FilteredVisitor struct { - visitor Visitor - filters []FilterFunc -} - -func NewFilteredVisitor(v Visitor, fn ...FilterFunc) Visitor { - if len(fn) == 0 { - return v - } - return FilteredVisitor{v, fn} -} - -func (v FilteredVisitor) Visit(fn VisitorFunc) error { - return v.visitor.Visit(func(info *Info, err error) error { - if err != nil { - return err - } - for _, filter := range v.filters { - ok, err := filter(info, nil) - if err != nil { - return err - } - if !ok { - return nil - } - } - return fn(info, nil) - }) -} - -func FilterByLabelSelector(s labels.Selector) FilterFunc { - return func(info *Info, err error) (bool, error) { - if err != nil { - return false, err - } - a, err := meta.Accessor(info.Object) - if err != nil { - return false, err - } - if !s.Matches(labels.Set(a.GetLabels())) { - return false, nil - } - return true, nil - } -} - -type InfoListVisitor []*Info - -func (infos InfoListVisitor) Visit(fn VisitorFunc) error { - var err error - for _, i := range infos { - err = fn(i, err) - } - return err -} diff --git a/src/vendor/k8s.io/client-go/discovery/cached/disk/cached_discovery.go b/src/vendor/k8s.io/client-go/discovery/cached/disk/cached_discovery.go deleted file mode 100644 index d3082d1e1..000000000 --- a/src/vendor/k8s.io/client-go/discovery/cached/disk/cached_discovery.go +++ /dev/null @@ -1,312 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package disk - -import ( - "errors" - "io/ioutil" - "net/http" - "os" - "path/filepath" - "sync" - "time" - - openapi_v2 "github.com/google/gnostic/openapiv2" - "k8s.io/klog/v2" - - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/version" - "k8s.io/client-go/discovery" - "k8s.io/client-go/kubernetes/scheme" - "k8s.io/client-go/openapi" - cachedopenapi "k8s.io/client-go/openapi/cached" - restclient "k8s.io/client-go/rest" -) - -// CachedDiscoveryClient implements the functions that discovery server-supported API groups, -// versions and resources. -type CachedDiscoveryClient struct { - delegate discovery.DiscoveryInterface - - // cacheDirectory is the directory where discovery docs are held. It must be unique per host:port combination to work well. - cacheDirectory string - - // ttl is how long the cache should be considered valid - ttl time.Duration - - // mutex protects the variables below - mutex sync.Mutex - - // ourFiles are all filenames of cache files created by this process - ourFiles map[string]struct{} - // invalidated is true if all cache files should be ignored that are not ours (e.g. after Invalidate() was called) - invalidated bool - // fresh is true if all used cache files were ours - fresh bool - - // caching openapi v3 client which wraps the delegate's client - openapiClient openapi.Client -} - -var _ discovery.CachedDiscoveryInterface = &CachedDiscoveryClient{} - -// ServerResourcesForGroupVersion returns the supported resources for a group and version. -func (d *CachedDiscoveryClient) ServerResourcesForGroupVersion(groupVersion string) (*metav1.APIResourceList, error) { - filename := filepath.Join(d.cacheDirectory, groupVersion, "serverresources.json") - cachedBytes, err := d.getCachedFile(filename) - // don't fail on errors, we either don't have a file or won't be able to run the cached check. Either way we can fallback. - if err == nil { - cachedResources := &metav1.APIResourceList{} - if err := runtime.DecodeInto(scheme.Codecs.UniversalDecoder(), cachedBytes, cachedResources); err == nil { - klog.V(10).Infof("returning cached discovery info from %v", filename) - return cachedResources, nil - } - } - - liveResources, err := d.delegate.ServerResourcesForGroupVersion(groupVersion) - if err != nil { - klog.V(3).Infof("skipped caching discovery info due to %v", err) - return liveResources, err - } - if liveResources == nil || len(liveResources.APIResources) == 0 { - klog.V(3).Infof("skipped caching discovery info, no resources found") - return liveResources, err - } - - if err := d.writeCachedFile(filename, liveResources); err != nil { - klog.V(1).Infof("failed to write cache to %v due to %v", filename, err) - } - - return liveResources, nil -} - -// ServerGroupsAndResources returns the supported groups and resources for all groups and versions. -func (d *CachedDiscoveryClient) ServerGroupsAndResources() ([]*metav1.APIGroup, []*metav1.APIResourceList, error) { - return discovery.ServerGroupsAndResources(d) -} - -// ServerGroups returns the supported groups, with information like supported versions and the -// preferred version. -func (d *CachedDiscoveryClient) ServerGroups() (*metav1.APIGroupList, error) { - filename := filepath.Join(d.cacheDirectory, "servergroups.json") - cachedBytes, err := d.getCachedFile(filename) - // don't fail on errors, we either don't have a file or won't be able to run the cached check. Either way we can fallback. - if err == nil { - cachedGroups := &metav1.APIGroupList{} - if err := runtime.DecodeInto(scheme.Codecs.UniversalDecoder(), cachedBytes, cachedGroups); err == nil { - klog.V(10).Infof("returning cached discovery info from %v", filename) - return cachedGroups, nil - } - } - - liveGroups, err := d.delegate.ServerGroups() - if err != nil { - klog.V(3).Infof("skipped caching discovery info due to %v", err) - return liveGroups, err - } - if liveGroups == nil || len(liveGroups.Groups) == 0 { - klog.V(3).Infof("skipped caching discovery info, no groups found") - return liveGroups, err - } - - if err := d.writeCachedFile(filename, liveGroups); err != nil { - klog.V(1).Infof("failed to write cache to %v due to %v", filename, err) - } - - return liveGroups, nil -} - -func (d *CachedDiscoveryClient) getCachedFile(filename string) ([]byte, error) { - // after invalidation ignore cache files not created by this process - d.mutex.Lock() - _, ourFile := d.ourFiles[filename] - if d.invalidated && !ourFile { - d.mutex.Unlock() - return nil, errors.New("cache invalidated") - } - d.mutex.Unlock() - - file, err := os.Open(filename) - if err != nil { - return nil, err - } - defer file.Close() - - fileInfo, err := file.Stat() - if err != nil { - return nil, err - } - - if time.Now().After(fileInfo.ModTime().Add(d.ttl)) { - return nil, errors.New("cache expired") - } - - // the cache is present and its valid. Try to read and use it. - cachedBytes, err := ioutil.ReadAll(file) - if err != nil { - return nil, err - } - - d.mutex.Lock() - defer d.mutex.Unlock() - d.fresh = d.fresh && ourFile - - return cachedBytes, nil -} - -func (d *CachedDiscoveryClient) writeCachedFile(filename string, obj runtime.Object) error { - if err := os.MkdirAll(filepath.Dir(filename), 0750); err != nil { - return err - } - - bytes, err := runtime.Encode(scheme.Codecs.LegacyCodec(), obj) - if err != nil { - return err - } - - f, err := ioutil.TempFile(filepath.Dir(filename), filepath.Base(filename)+".") - if err != nil { - return err - } - defer os.Remove(f.Name()) - _, err = f.Write(bytes) - if err != nil { - return err - } - - err = os.Chmod(f.Name(), 0660) - if err != nil { - return err - } - - name := f.Name() - err = f.Close() - if err != nil { - return err - } - - // atomic rename - d.mutex.Lock() - defer d.mutex.Unlock() - err = os.Rename(name, filename) - if err == nil { - d.ourFiles[filename] = struct{}{} - } - return err -} - -// RESTClient returns a RESTClient that is used to communicate with API server -// by this client implementation. -func (d *CachedDiscoveryClient) RESTClient() restclient.Interface { - return d.delegate.RESTClient() -} - -// ServerPreferredResources returns the supported resources with the version preferred by the -// server. -func (d *CachedDiscoveryClient) ServerPreferredResources() ([]*metav1.APIResourceList, error) { - return discovery.ServerPreferredResources(d) -} - -// ServerPreferredNamespacedResources returns the supported namespaced resources with the -// version preferred by the server. -func (d *CachedDiscoveryClient) ServerPreferredNamespacedResources() ([]*metav1.APIResourceList, error) { - return discovery.ServerPreferredNamespacedResources(d) -} - -// ServerVersion retrieves and parses the server's version (git version). -func (d *CachedDiscoveryClient) ServerVersion() (*version.Info, error) { - return d.delegate.ServerVersion() -} - -// OpenAPISchema retrieves and parses the swagger API schema the server supports. -func (d *CachedDiscoveryClient) OpenAPISchema() (*openapi_v2.Document, error) { - return d.delegate.OpenAPISchema() -} - -// OpenAPIV3 retrieves and parses the OpenAPIV3 specs exposed by the server -func (d *CachedDiscoveryClient) OpenAPIV3() openapi.Client { - // Must take lock since Invalidate call may modify openapiClient - d.mutex.Lock() - defer d.mutex.Unlock() - - if d.openapiClient == nil { - // Delegate is discovery client created with special HTTP client which - // respects E-Tag cache responses to serve cache from disk. - d.openapiClient = cachedopenapi.NewClient(d.delegate.OpenAPIV3()) - } - - return d.openapiClient -} - -// Fresh is supposed to tell the caller whether or not to retry if the cache -// fails to find something (false = retry, true = no need to retry). -func (d *CachedDiscoveryClient) Fresh() bool { - d.mutex.Lock() - defer d.mutex.Unlock() - - return d.fresh -} - -// Invalidate enforces that no cached data is used in the future that is older than the current time. -func (d *CachedDiscoveryClient) Invalidate() { - d.mutex.Lock() - defer d.mutex.Unlock() - - d.ourFiles = map[string]struct{}{} - d.fresh = true - d.invalidated = true - d.openapiClient = nil -} - -// NewCachedDiscoveryClientForConfig creates a new DiscoveryClient for the given config, and wraps -// the created client in a CachedDiscoveryClient. The provided configuration is updated with a -// custom transport that understands cache responses. -// We receive two distinct cache directories for now, in order to preserve old behavior -// which makes use of the --cache-dir flag value for storing cache data from the CacheRoundTripper, -// and makes use of the hardcoded destination (~/.kube/cache/discovery/...) for storing -// CachedDiscoveryClient cache data. If httpCacheDir is empty, the restconfig's transport will not -// be updated with a roundtripper that understands cache responses. -// If discoveryCacheDir is empty, cached server resource data will be looked up in the current directory. -func NewCachedDiscoveryClientForConfig(config *restclient.Config, discoveryCacheDir, httpCacheDir string, ttl time.Duration) (*CachedDiscoveryClient, error) { - if len(httpCacheDir) > 0 { - // update the given restconfig with a custom roundtripper that - // understands how to handle cache responses. - config = restclient.CopyConfig(config) - config.Wrap(func(rt http.RoundTripper) http.RoundTripper { - return newCacheRoundTripper(httpCacheDir, rt) - }) - } - - discoveryClient, err := discovery.NewDiscoveryClientForConfig(config) - if err != nil { - return nil, err - } - - return newCachedDiscoveryClient(discoveryClient, discoveryCacheDir, ttl), nil -} - -// NewCachedDiscoveryClient creates a new DiscoveryClient. cacheDirectory is the directory where discovery docs are held. It must be unique per host:port combination to work well. -func newCachedDiscoveryClient(delegate discovery.DiscoveryInterface, cacheDirectory string, ttl time.Duration) *CachedDiscoveryClient { - return &CachedDiscoveryClient{ - delegate: delegate, - cacheDirectory: cacheDirectory, - ttl: ttl, - ourFiles: map[string]struct{}{}, - fresh: true, - } -} diff --git a/src/vendor/k8s.io/client-go/discovery/cached/disk/round_tripper.go b/src/vendor/k8s.io/client-go/discovery/cached/disk/round_tripper.go deleted file mode 100644 index f3a4b2947..000000000 --- a/src/vendor/k8s.io/client-go/discovery/cached/disk/round_tripper.go +++ /dev/null @@ -1,120 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package disk - -import ( - "bytes" - "crypto/sha256" - "fmt" - "net/http" - "os" - "path/filepath" - - "github.com/gregjones/httpcache" - "github.com/peterbourgon/diskv" - "k8s.io/klog/v2" -) - -type cacheRoundTripper struct { - rt *httpcache.Transport -} - -// newCacheRoundTripper creates a roundtripper that reads the ETag on -// response headers and send the If-None-Match header on subsequent -// corresponding requests. -func newCacheRoundTripper(cacheDir string, rt http.RoundTripper) http.RoundTripper { - d := diskv.New(diskv.Options{ - PathPerm: os.FileMode(0750), - FilePerm: os.FileMode(0660), - BasePath: cacheDir, - TempDir: filepath.Join(cacheDir, ".diskv-temp"), - }) - t := httpcache.NewTransport(&sumDiskCache{disk: d}) - t.Transport = rt - - return &cacheRoundTripper{rt: t} -} - -func (rt *cacheRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { - return rt.rt.RoundTrip(req) -} - -func (rt *cacheRoundTripper) CancelRequest(req *http.Request) { - type canceler interface { - CancelRequest(*http.Request) - } - if cr, ok := rt.rt.Transport.(canceler); ok { - cr.CancelRequest(req) - } else { - klog.Errorf("CancelRequest not implemented by %T", rt.rt.Transport) - } -} - -func (rt *cacheRoundTripper) WrappedRoundTripper() http.RoundTripper { return rt.rt.Transport } - -// A sumDiskCache is a cache backend for github.com/gregjones/httpcache. It is -// similar to httpcache's diskcache package, but uses SHA256 sums to ensure -// cache integrity at read time rather than fsyncing each cache entry to -// increase the likelihood they will be persisted at write time. This avoids -// significant performance degradation on MacOS. -// -// See https://github.com/kubernetes/kubernetes/issues/110753 for more. -type sumDiskCache struct { - disk *diskv.Diskv -} - -// Get the requested key from the cache on disk. If Get encounters an error, or -// the returned value is not a SHA256 sum followed by bytes with a matching -// checksum it will return false to indicate a cache miss. -func (c *sumDiskCache) Get(key string) ([]byte, bool) { - b, err := c.disk.Read(sanitize(key)) - if err != nil || len(b) < sha256.Size { - return []byte{}, false - } - - response := b[sha256.Size:] - want := b[:sha256.Size] // The first 32 bytes of the file should be the SHA256 sum. - got := sha256.Sum256(response) - if !bytes.Equal(want, got[:]) { - return []byte{}, false - } - - return response, true -} - -// Set writes the response to a file on disk. The filename will be the SHA256 -// sum of the key. The file will contain a SHA256 sum of the response bytes, -// followed by said response bytes. -func (c *sumDiskCache) Set(key string, response []byte) { - s := sha256.Sum256(response) - _ = c.disk.Write(sanitize(key), append(s[:], response...)) // Nothing we can do with this error. -} - -func (c *sumDiskCache) Delete(key string) { - _ = c.disk.Erase(sanitize(key)) // Nothing we can do with this error. -} - -// Sanitize an httpcache key such that it can be used as a diskv key, which must -// be a valid filename. The httpcache key will either be the requested URL (if -// the request method was GET) or " " for other methods, per the -// httpcache.cacheKey function. -func sanitize(key string) string { - // These keys are not sensitive. We use sha256 to avoid a (potentially - // malicious) collision causing the wrong cache data to be written or - // accessed. - return fmt.Sprintf("%x", sha256.Sum256([]byte(key))) -} diff --git a/src/vendor/k8s.io/client-go/discovery/discovery_client.go b/src/vendor/k8s.io/client-go/discovery/discovery_client.go deleted file mode 100644 index ec7ebf051..000000000 --- a/src/vendor/k8s.io/client-go/discovery/discovery_client.go +++ /dev/null @@ -1,529 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package discovery - -import ( - "context" - "encoding/json" - "fmt" - "net/http" - "net/url" - "sort" - "strings" - "sync" - "time" - - //nolint:staticcheck // SA1019 Keep using module since it's still being maintained and the api of google.golang.org/protobuf/proto differs - "github.com/golang/protobuf/proto" - openapi_v2 "github.com/google/gnostic/openapiv2" - - "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/apimachinery/pkg/runtime/serializer" - utilruntime "k8s.io/apimachinery/pkg/util/runtime" - "k8s.io/apimachinery/pkg/version" - "k8s.io/client-go/kubernetes/scheme" - "k8s.io/client-go/openapi" - restclient "k8s.io/client-go/rest" -) - -const ( - // defaultRetries is the number of times a resource discovery is repeated if an api group disappears on the fly (e.g. CustomResourceDefinitions). - defaultRetries = 2 - // protobuf mime type - openAPIV2mimePb = "application/com.github.proto-openapi.spec.v2@v1.0+protobuf" - - // defaultTimeout is the maximum amount of time per request when no timeout has been set on a RESTClient. - // Defaults to 32s in order to have a distinguishable length of time, relative to other timeouts that exist. - defaultTimeout = 32 * time.Second - - // defaultBurst is the default burst to be used with the discovery client's token bucket rate limiter - defaultBurst = 300 -) - -// DiscoveryInterface holds the methods that discover server-supported API groups, -// versions and resources. -type DiscoveryInterface interface { - RESTClient() restclient.Interface - ServerGroupsInterface - ServerResourcesInterface - ServerVersionInterface - OpenAPISchemaInterface - OpenAPIV3SchemaInterface -} - -// CachedDiscoveryInterface is a DiscoveryInterface with cache invalidation and freshness. -// Note that If the ServerResourcesForGroupVersion method returns a cache miss -// error, the user needs to explicitly call Invalidate to clear the cache, -// otherwise the same cache miss error will be returned next time. -type CachedDiscoveryInterface interface { - DiscoveryInterface - // Fresh is supposed to tell the caller whether or not to retry if the cache - // fails to find something (false = retry, true = no need to retry). - // - // TODO: this needs to be revisited, this interface can't be locked properly - // and doesn't make a lot of sense. - Fresh() bool - // Invalidate enforces that no cached data that is older than the current time - // is used. - Invalidate() -} - -// ServerGroupsInterface has methods for obtaining supported groups on the API server -type ServerGroupsInterface interface { - // ServerGroups returns the supported groups, with information like supported versions and the - // preferred version. - ServerGroups() (*metav1.APIGroupList, error) -} - -// ServerResourcesInterface has methods for obtaining supported resources on the API server -type ServerResourcesInterface interface { - // ServerResourcesForGroupVersion returns the supported resources for a group and version. - ServerResourcesForGroupVersion(groupVersion string) (*metav1.APIResourceList, error) - // ServerGroupsAndResources returns the supported groups and resources for all groups and versions. - // - // The returned group and resource lists might be non-nil with partial results even in the - // case of non-nil error. - ServerGroupsAndResources() ([]*metav1.APIGroup, []*metav1.APIResourceList, error) - // ServerPreferredResources returns the supported resources with the version preferred by the - // server. - // - // The returned group and resource lists might be non-nil with partial results even in the - // case of non-nil error. - ServerPreferredResources() ([]*metav1.APIResourceList, error) - // ServerPreferredNamespacedResources returns the supported namespaced resources with the - // version preferred by the server. - // - // The returned resource list might be non-nil with partial results even in the case of - // non-nil error. - ServerPreferredNamespacedResources() ([]*metav1.APIResourceList, error) -} - -// ServerVersionInterface has a method for retrieving the server's version. -type ServerVersionInterface interface { - // ServerVersion retrieves and parses the server's version (git version). - ServerVersion() (*version.Info, error) -} - -// OpenAPISchemaInterface has a method to retrieve the open API schema. -type OpenAPISchemaInterface interface { - // OpenAPISchema retrieves and parses the swagger API schema the server supports. - OpenAPISchema() (*openapi_v2.Document, error) -} - -type OpenAPIV3SchemaInterface interface { - OpenAPIV3() openapi.Client -} - -// DiscoveryClient implements the functions that discover server-supported API groups, -// versions and resources. -type DiscoveryClient struct { - restClient restclient.Interface - - LegacyPrefix string -} - -// Convert metav1.APIVersions to metav1.APIGroup. APIVersions is used by legacy v1, so -// group would be "". -func apiVersionsToAPIGroup(apiVersions *metav1.APIVersions) (apiGroup metav1.APIGroup) { - groupVersions := []metav1.GroupVersionForDiscovery{} - for _, version := range apiVersions.Versions { - groupVersion := metav1.GroupVersionForDiscovery{ - GroupVersion: version, - Version: version, - } - groupVersions = append(groupVersions, groupVersion) - } - apiGroup.Versions = groupVersions - // There should be only one groupVersion returned at /api - apiGroup.PreferredVersion = groupVersions[0] - return -} - -// ServerGroups returns the supported groups, with information like supported versions and the -// preferred version. -func (d *DiscoveryClient) ServerGroups() (apiGroupList *metav1.APIGroupList, err error) { - // Get the groupVersions exposed at /api - v := &metav1.APIVersions{} - err = d.restClient.Get().AbsPath(d.LegacyPrefix).Do(context.TODO()).Into(v) - apiGroup := metav1.APIGroup{} - if err == nil && len(v.Versions) != 0 { - apiGroup = apiVersionsToAPIGroup(v) - } - if err != nil && !errors.IsNotFound(err) && !errors.IsForbidden(err) { - return nil, err - } - - // Get the groupVersions exposed at /apis - apiGroupList = &metav1.APIGroupList{} - err = d.restClient.Get().AbsPath("/apis").Do(context.TODO()).Into(apiGroupList) - if err != nil && !errors.IsNotFound(err) && !errors.IsForbidden(err) { - return nil, err - } - // to be compatible with a v1.0 server, if it's a 403 or 404, ignore and return whatever we got from /api - if err != nil && (errors.IsNotFound(err) || errors.IsForbidden(err)) { - apiGroupList = &metav1.APIGroupList{} - } - - // prepend the group retrieved from /api to the list if not empty - if len(v.Versions) != 0 { - apiGroupList.Groups = append([]metav1.APIGroup{apiGroup}, apiGroupList.Groups...) - } - return apiGroupList, nil -} - -// ServerResourcesForGroupVersion returns the supported resources for a group and version. -func (d *DiscoveryClient) ServerResourcesForGroupVersion(groupVersion string) (resources *metav1.APIResourceList, err error) { - url := url.URL{} - if len(groupVersion) == 0 { - return nil, fmt.Errorf("groupVersion shouldn't be empty") - } - if len(d.LegacyPrefix) > 0 && groupVersion == "v1" { - url.Path = d.LegacyPrefix + "/" + groupVersion - } else { - url.Path = "/apis/" + groupVersion - } - resources = &metav1.APIResourceList{ - GroupVersion: groupVersion, - } - err = d.restClient.Get().AbsPath(url.String()).Do(context.TODO()).Into(resources) - if err != nil { - // ignore 403 or 404 error to be compatible with an v1.0 server. - if groupVersion == "v1" && (errors.IsNotFound(err) || errors.IsForbidden(err)) { - return resources, nil - } - return nil, err - } - return resources, nil -} - -// ServerGroupsAndResources returns the supported resources for all groups and versions. -func (d *DiscoveryClient) ServerGroupsAndResources() ([]*metav1.APIGroup, []*metav1.APIResourceList, error) { - return withRetries(defaultRetries, func() ([]*metav1.APIGroup, []*metav1.APIResourceList, error) { - return ServerGroupsAndResources(d) - }) -} - -// ErrGroupDiscoveryFailed is returned if one or more API groups fail to load. -type ErrGroupDiscoveryFailed struct { - // Groups is a list of the groups that failed to load and the error cause - Groups map[schema.GroupVersion]error -} - -// Error implements the error interface -func (e *ErrGroupDiscoveryFailed) Error() string { - var groups []string - for k, v := range e.Groups { - groups = append(groups, fmt.Sprintf("%s: %v", k, v)) - } - sort.Strings(groups) - return fmt.Sprintf("unable to retrieve the complete list of server APIs: %s", strings.Join(groups, ", ")) -} - -// IsGroupDiscoveryFailedError returns true if the provided error indicates the server was unable to discover -// a complete list of APIs for the client to use. -func IsGroupDiscoveryFailedError(err error) bool { - _, ok := err.(*ErrGroupDiscoveryFailed) - return err != nil && ok -} - -func ServerGroupsAndResources(d DiscoveryInterface) ([]*metav1.APIGroup, []*metav1.APIResourceList, error) { - sgs, err := d.ServerGroups() - if sgs == nil { - return nil, nil, err - } - resultGroups := []*metav1.APIGroup{} - for i := range sgs.Groups { - resultGroups = append(resultGroups, &sgs.Groups[i]) - } - - groupVersionResources, failedGroups := fetchGroupVersionResources(d, sgs) - - // order results by group/version discovery order - result := []*metav1.APIResourceList{} - for _, apiGroup := range sgs.Groups { - for _, version := range apiGroup.Versions { - gv := schema.GroupVersion{Group: apiGroup.Name, Version: version.Version} - if resources, ok := groupVersionResources[gv]; ok { - result = append(result, resources) - } - } - } - - if len(failedGroups) == 0 { - return resultGroups, result, nil - } - - return resultGroups, result, &ErrGroupDiscoveryFailed{Groups: failedGroups} -} - -// ServerPreferredResources uses the provided discovery interface to look up preferred resources -func ServerPreferredResources(d DiscoveryInterface) ([]*metav1.APIResourceList, error) { - serverGroupList, err := d.ServerGroups() - if err != nil { - return nil, err - } - - groupVersionResources, failedGroups := fetchGroupVersionResources(d, serverGroupList) - - result := []*metav1.APIResourceList{} - grVersions := map[schema.GroupResource]string{} // selected version of a GroupResource - grAPIResources := map[schema.GroupResource]*metav1.APIResource{} // selected APIResource for a GroupResource - gvAPIResourceLists := map[schema.GroupVersion]*metav1.APIResourceList{} // blueprint for a APIResourceList for later grouping - - for _, apiGroup := range serverGroupList.Groups { - for _, version := range apiGroup.Versions { - groupVersion := schema.GroupVersion{Group: apiGroup.Name, Version: version.Version} - - apiResourceList, ok := groupVersionResources[groupVersion] - if !ok { - continue - } - - // create empty list which is filled later in another loop - emptyAPIResourceList := metav1.APIResourceList{ - GroupVersion: version.GroupVersion, - } - gvAPIResourceLists[groupVersion] = &emptyAPIResourceList - result = append(result, &emptyAPIResourceList) - - for i := range apiResourceList.APIResources { - apiResource := &apiResourceList.APIResources[i] - if strings.Contains(apiResource.Name, "/") { - continue - } - gv := schema.GroupResource{Group: apiGroup.Name, Resource: apiResource.Name} - if _, ok := grAPIResources[gv]; ok && version.Version != apiGroup.PreferredVersion.Version { - // only override with preferred version - continue - } - grVersions[gv] = version.Version - grAPIResources[gv] = apiResource - } - } - } - - // group selected APIResources according to GroupVersion into APIResourceLists - for groupResource, apiResource := range grAPIResources { - version := grVersions[groupResource] - groupVersion := schema.GroupVersion{Group: groupResource.Group, Version: version} - apiResourceList := gvAPIResourceLists[groupVersion] - apiResourceList.APIResources = append(apiResourceList.APIResources, *apiResource) - } - - if len(failedGroups) == 0 { - return result, nil - } - - return result, &ErrGroupDiscoveryFailed{Groups: failedGroups} -} - -// fetchServerResourcesForGroupVersions uses the discovery client to fetch the resources for the specified groups in parallel. -func fetchGroupVersionResources(d DiscoveryInterface, apiGroups *metav1.APIGroupList) (map[schema.GroupVersion]*metav1.APIResourceList, map[schema.GroupVersion]error) { - groupVersionResources := make(map[schema.GroupVersion]*metav1.APIResourceList) - failedGroups := make(map[schema.GroupVersion]error) - - wg := &sync.WaitGroup{} - resultLock := &sync.Mutex{} - for _, apiGroup := range apiGroups.Groups { - for _, version := range apiGroup.Versions { - groupVersion := schema.GroupVersion{Group: apiGroup.Name, Version: version.Version} - wg.Add(1) - go func() { - defer wg.Done() - defer utilruntime.HandleCrash() - - apiResourceList, err := d.ServerResourcesForGroupVersion(groupVersion.String()) - - // lock to record results - resultLock.Lock() - defer resultLock.Unlock() - - if err != nil { - // TODO: maybe restrict this to NotFound errors - failedGroups[groupVersion] = err - } - if apiResourceList != nil { - // even in case of error, some fallback might have been returned - groupVersionResources[groupVersion] = apiResourceList - } - }() - } - } - wg.Wait() - - return groupVersionResources, failedGroups -} - -// ServerPreferredResources returns the supported resources with the version preferred by the -// server. -func (d *DiscoveryClient) ServerPreferredResources() ([]*metav1.APIResourceList, error) { - _, rs, err := withRetries(defaultRetries, func() ([]*metav1.APIGroup, []*metav1.APIResourceList, error) { - rs, err := ServerPreferredResources(d) - return nil, rs, err - }) - return rs, err -} - -// ServerPreferredNamespacedResources returns the supported namespaced resources with the -// version preferred by the server. -func (d *DiscoveryClient) ServerPreferredNamespacedResources() ([]*metav1.APIResourceList, error) { - return ServerPreferredNamespacedResources(d) -} - -// ServerPreferredNamespacedResources uses the provided discovery interface to look up preferred namespaced resources -func ServerPreferredNamespacedResources(d DiscoveryInterface) ([]*metav1.APIResourceList, error) { - all, err := ServerPreferredResources(d) - return FilteredBy(ResourcePredicateFunc(func(groupVersion string, r *metav1.APIResource) bool { - return r.Namespaced - }), all), err -} - -// ServerVersion retrieves and parses the server's version (git version). -func (d *DiscoveryClient) ServerVersion() (*version.Info, error) { - body, err := d.restClient.Get().AbsPath("/version").Do(context.TODO()).Raw() - if err != nil { - return nil, err - } - var info version.Info - err = json.Unmarshal(body, &info) - if err != nil { - return nil, fmt.Errorf("unable to parse the server version: %v", err) - } - return &info, nil -} - -// OpenAPISchema fetches the open api v2 schema using a rest client and parses the proto. -func (d *DiscoveryClient) OpenAPISchema() (*openapi_v2.Document, error) { - data, err := d.restClient.Get().AbsPath("/openapi/v2").SetHeader("Accept", openAPIV2mimePb).Do(context.TODO()).Raw() - if err != nil { - if errors.IsForbidden(err) || errors.IsNotFound(err) || errors.IsNotAcceptable(err) { - // single endpoint not found/registered in old server, try to fetch old endpoint - // TODO: remove this when kubectl/client-go don't work with 1.9 server - data, err = d.restClient.Get().AbsPath("/swagger-2.0.0.pb-v1").Do(context.TODO()).Raw() - if err != nil { - return nil, err - } - } else { - return nil, err - } - } - document := &openapi_v2.Document{} - err = proto.Unmarshal(data, document) - if err != nil { - return nil, err - } - return document, nil -} - -func (d *DiscoveryClient) OpenAPIV3() openapi.Client { - return openapi.NewClient(d.restClient) -} - -// withRetries retries the given recovery function in case the groups supported by the server change after ServerGroup() returns. -func withRetries(maxRetries int, f func() ([]*metav1.APIGroup, []*metav1.APIResourceList, error)) ([]*metav1.APIGroup, []*metav1.APIResourceList, error) { - var result []*metav1.APIResourceList - var resultGroups []*metav1.APIGroup - var err error - for i := 0; i < maxRetries; i++ { - resultGroups, result, err = f() - if err == nil { - return resultGroups, result, nil - } - if _, ok := err.(*ErrGroupDiscoveryFailed); !ok { - return nil, nil, err - } - } - return resultGroups, result, err -} - -func setDiscoveryDefaults(config *restclient.Config) error { - config.APIPath = "" - config.GroupVersion = nil - if config.Timeout == 0 { - config.Timeout = defaultTimeout - } - // if a burst limit is not already configured - if config.Burst == 0 { - // discovery is expected to be bursty, increase the default burst - // to accommodate looking up resource info for many API groups. - // matches burst set by ConfigFlags#ToDiscoveryClient(). - // see https://issue.k8s.io/86149 - config.Burst = defaultBurst - } - codec := runtime.NoopEncoder{Decoder: scheme.Codecs.UniversalDecoder()} - config.NegotiatedSerializer = serializer.NegotiatedSerializerWrapper(runtime.SerializerInfo{Serializer: codec}) - if len(config.UserAgent) == 0 { - config.UserAgent = restclient.DefaultKubernetesUserAgent() - } - return nil -} - -// NewDiscoveryClientForConfig creates a new DiscoveryClient for the given config. This client -// can be used to discover supported resources in the API server. -// NewDiscoveryClientForConfig is equivalent to NewDiscoveryClientForConfigAndClient(c, httpClient), -// where httpClient was generated with rest.HTTPClientFor(c). -func NewDiscoveryClientForConfig(c *restclient.Config) (*DiscoveryClient, error) { - config := *c - if err := setDiscoveryDefaults(&config); err != nil { - return nil, err - } - httpClient, err := restclient.HTTPClientFor(&config) - if err != nil { - return nil, err - } - return NewDiscoveryClientForConfigAndClient(&config, httpClient) -} - -// NewDiscoveryClientForConfigAndClient creates a new DiscoveryClient for the given config. This client -// can be used to discover supported resources in the API server. -// Note the http client provided takes precedence over the configured transport values. -func NewDiscoveryClientForConfigAndClient(c *restclient.Config, httpClient *http.Client) (*DiscoveryClient, error) { - config := *c - if err := setDiscoveryDefaults(&config); err != nil { - return nil, err - } - client, err := restclient.UnversionedRESTClientForConfigAndClient(&config, httpClient) - return &DiscoveryClient{restClient: client, LegacyPrefix: "/api"}, err -} - -// NewDiscoveryClientForConfigOrDie creates a new DiscoveryClient for the given config. If -// there is an error, it panics. -func NewDiscoveryClientForConfigOrDie(c *restclient.Config) *DiscoveryClient { - client, err := NewDiscoveryClientForConfig(c) - if err != nil { - panic(err) - } - return client - -} - -// NewDiscoveryClient returns a new DiscoveryClient for the given RESTClient. -func NewDiscoveryClient(c restclient.Interface) *DiscoveryClient { - return &DiscoveryClient{restClient: c, LegacyPrefix: "/api"} -} - -// RESTClient returns a RESTClient that is used to communicate -// with API server by this client implementation. -func (d *DiscoveryClient) RESTClient() restclient.Interface { - if d == nil { - return nil - } - return d.restClient -} diff --git a/src/vendor/k8s.io/client-go/discovery/doc.go b/src/vendor/k8s.io/client-go/discovery/doc.go deleted file mode 100644 index 6baa1ef2a..000000000 --- a/src/vendor/k8s.io/client-go/discovery/doc.go +++ /dev/null @@ -1,19 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package discovery provides ways to discover server-supported -// API groups, versions and resources. -package discovery // import "k8s.io/client-go/discovery" diff --git a/src/vendor/k8s.io/client-go/discovery/helper.go b/src/vendor/k8s.io/client-go/discovery/helper.go deleted file mode 100644 index e79f073b0..000000000 --- a/src/vendor/k8s.io/client-go/discovery/helper.go +++ /dev/null @@ -1,146 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package discovery - -import ( - "fmt" - - apierrors "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/apimachinery/pkg/util/sets" - apimachineryversion "k8s.io/apimachinery/pkg/version" -) - -// IsResourceEnabled queries the server to determine if the resource specified is present on the server. -// This is particularly helpful when writing a controller or an e2e test that requires a particular resource to function. -func IsResourceEnabled(client DiscoveryInterface, resourceToCheck schema.GroupVersionResource) (bool, error) { - // this is a single request. The ServerResourcesForGroupVersion handles the core v1 group as legacy. - resourceList, err := client.ServerResourcesForGroupVersion(resourceToCheck.GroupVersion().String()) - if apierrors.IsNotFound(err) { // if the discovery endpoint isn't present, then the resource isn't present. - return false, nil - } - if err != nil { - return false, err - } - for _, actualResource := range resourceList.APIResources { - if actualResource.Name == resourceToCheck.Resource { - return true, nil - } - } - - return false, nil -} - -// MatchesServerVersion queries the server to compares the build version -// (git hash) of the client with the server's build version. It returns an error -// if it failed to contact the server or if the versions are not an exact match. -func MatchesServerVersion(clientVersion apimachineryversion.Info, client DiscoveryInterface) error { - sVer, err := client.ServerVersion() - if err != nil { - return fmt.Errorf("couldn't read version from server: %v", err) - } - // GitVersion includes GitCommit and GitTreeState, but best to be safe? - if clientVersion.GitVersion != sVer.GitVersion || clientVersion.GitCommit != sVer.GitCommit || clientVersion.GitTreeState != sVer.GitTreeState { - return fmt.Errorf("server version (%#v) differs from client version (%#v)", sVer, clientVersion) - } - - return nil -} - -// ServerSupportsVersion returns an error if the server doesn't have the required version -func ServerSupportsVersion(client DiscoveryInterface, requiredGV schema.GroupVersion) error { - groups, err := client.ServerGroups() - if err != nil { - // This is almost always a connection error, and higher level code should treat this as a generic error, - // not a negotiation specific error. - return err - } - versions := metav1.ExtractGroupVersions(groups) - serverVersions := sets.String{} - for _, v := range versions { - serverVersions.Insert(v) - } - - if serverVersions.Has(requiredGV.String()) { - return nil - } - - // If the server supports no versions, then we should pretend it has the version because of old servers. - // This can happen because discovery fails due to 403 Forbidden errors - if len(serverVersions) == 0 { - return nil - } - - return fmt.Errorf("server does not support API version %q", requiredGV) -} - -// GroupVersionResources converts APIResourceLists to the GroupVersionResources. -func GroupVersionResources(rls []*metav1.APIResourceList) (map[schema.GroupVersionResource]struct{}, error) { - gvrs := map[schema.GroupVersionResource]struct{}{} - for _, rl := range rls { - gv, err := schema.ParseGroupVersion(rl.GroupVersion) - if err != nil { - return nil, err - } - for i := range rl.APIResources { - gvrs[schema.GroupVersionResource{Group: gv.Group, Version: gv.Version, Resource: rl.APIResources[i].Name}] = struct{}{} - } - } - return gvrs, nil -} - -// FilteredBy filters by the given predicate. Empty APIResourceLists are dropped. -func FilteredBy(pred ResourcePredicate, rls []*metav1.APIResourceList) []*metav1.APIResourceList { - result := []*metav1.APIResourceList{} - for _, rl := range rls { - filtered := *rl - filtered.APIResources = nil - for i := range rl.APIResources { - if pred.Match(rl.GroupVersion, &rl.APIResources[i]) { - filtered.APIResources = append(filtered.APIResources, rl.APIResources[i]) - } - } - if filtered.APIResources != nil { - result = append(result, &filtered) - } - } - return result -} - -// ResourcePredicate has a method to check if a resource matches a given condition. -type ResourcePredicate interface { - Match(groupVersion string, r *metav1.APIResource) bool -} - -// ResourcePredicateFunc returns true if it matches a resource based on a custom condition. -type ResourcePredicateFunc func(groupVersion string, r *metav1.APIResource) bool - -// Match is a wrapper around ResourcePredicateFunc. -func (fn ResourcePredicateFunc) Match(groupVersion string, r *metav1.APIResource) bool { - return fn(groupVersion, r) -} - -// SupportsAllVerbs is a predicate matching a resource iff all given verbs are supported. -type SupportsAllVerbs struct { - Verbs []string -} - -// Match checks if a resource contains all the given verbs. -func (p SupportsAllVerbs) Match(groupVersion string, r *metav1.APIResource) bool { - return sets.NewString([]string(r.Verbs)...).HasAll(p.Verbs...) -} diff --git a/src/vendor/k8s.io/client-go/dynamic/interface.go b/src/vendor/k8s.io/client-go/dynamic/interface.go deleted file mode 100644 index a310b63e5..000000000 --- a/src/vendor/k8s.io/client-go/dynamic/interface.go +++ /dev/null @@ -1,63 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package dynamic - -import ( - "context" - - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/apimachinery/pkg/types" - "k8s.io/apimachinery/pkg/watch" -) - -type Interface interface { - Resource(resource schema.GroupVersionResource) NamespaceableResourceInterface -} - -type ResourceInterface interface { - Create(ctx context.Context, obj *unstructured.Unstructured, options metav1.CreateOptions, subresources ...string) (*unstructured.Unstructured, error) - Update(ctx context.Context, obj *unstructured.Unstructured, options metav1.UpdateOptions, subresources ...string) (*unstructured.Unstructured, error) - UpdateStatus(ctx context.Context, obj *unstructured.Unstructured, options metav1.UpdateOptions) (*unstructured.Unstructured, error) - Delete(ctx context.Context, name string, options metav1.DeleteOptions, subresources ...string) error - DeleteCollection(ctx context.Context, options metav1.DeleteOptions, listOptions metav1.ListOptions) error - Get(ctx context.Context, name string, options metav1.GetOptions, subresources ...string) (*unstructured.Unstructured, error) - List(ctx context.Context, opts metav1.ListOptions) (*unstructured.UnstructuredList, error) - Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, options metav1.PatchOptions, subresources ...string) (*unstructured.Unstructured, error) - Apply(ctx context.Context, name string, obj *unstructured.Unstructured, options metav1.ApplyOptions, subresources ...string) (*unstructured.Unstructured, error) - ApplyStatus(ctx context.Context, name string, obj *unstructured.Unstructured, options metav1.ApplyOptions) (*unstructured.Unstructured, error) -} - -type NamespaceableResourceInterface interface { - Namespace(string) ResourceInterface - ResourceInterface -} - -// APIPathResolverFunc knows how to convert a groupVersion to its API path. The Kind field is optional. -// TODO find a better place to move this for existing callers -type APIPathResolverFunc func(kind schema.GroupVersionKind) string - -// LegacyAPIPathResolverFunc can resolve paths properly with the legacy API. -// TODO find a better place to move this for existing callers -func LegacyAPIPathResolverFunc(kind schema.GroupVersionKind) string { - if len(kind.Group) == 0 { - return "/api" - } - return "/apis" -} diff --git a/src/vendor/k8s.io/client-go/dynamic/scheme.go b/src/vendor/k8s.io/client-go/dynamic/scheme.go deleted file mode 100644 index 3168c872c..000000000 --- a/src/vendor/k8s.io/client-go/dynamic/scheme.go +++ /dev/null @@ -1,108 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package dynamic - -import ( - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/apimachinery/pkg/runtime/serializer" - "k8s.io/apimachinery/pkg/runtime/serializer/json" -) - -var watchScheme = runtime.NewScheme() -var basicScheme = runtime.NewScheme() -var deleteScheme = runtime.NewScheme() -var parameterScheme = runtime.NewScheme() -var deleteOptionsCodec = serializer.NewCodecFactory(deleteScheme) -var dynamicParameterCodec = runtime.NewParameterCodec(parameterScheme) - -var versionV1 = schema.GroupVersion{Version: "v1"} - -func init() { - metav1.AddToGroupVersion(watchScheme, versionV1) - metav1.AddToGroupVersion(basicScheme, versionV1) - metav1.AddToGroupVersion(parameterScheme, versionV1) - metav1.AddToGroupVersion(deleteScheme, versionV1) -} - -// basicNegotiatedSerializer is used to handle discovery and error handling serialization -type basicNegotiatedSerializer struct{} - -func (s basicNegotiatedSerializer) SupportedMediaTypes() []runtime.SerializerInfo { - return []runtime.SerializerInfo{ - { - MediaType: "application/json", - MediaTypeType: "application", - MediaTypeSubType: "json", - EncodesAsText: true, - Serializer: json.NewSerializer(json.DefaultMetaFactory, unstructuredCreater{basicScheme}, unstructuredTyper{basicScheme}, false), - PrettySerializer: json.NewSerializer(json.DefaultMetaFactory, unstructuredCreater{basicScheme}, unstructuredTyper{basicScheme}, true), - StreamSerializer: &runtime.StreamSerializerInfo{ - EncodesAsText: true, - Serializer: json.NewSerializer(json.DefaultMetaFactory, basicScheme, basicScheme, false), - Framer: json.Framer, - }, - }, - } -} - -func (s basicNegotiatedSerializer) EncoderForVersion(encoder runtime.Encoder, gv runtime.GroupVersioner) runtime.Encoder { - return runtime.WithVersionEncoder{ - Version: gv, - Encoder: encoder, - ObjectTyper: unstructuredTyper{basicScheme}, - } -} - -func (s basicNegotiatedSerializer) DecoderToVersion(decoder runtime.Decoder, gv runtime.GroupVersioner) runtime.Decoder { - return decoder -} - -type unstructuredCreater struct { - nested runtime.ObjectCreater -} - -func (c unstructuredCreater) New(kind schema.GroupVersionKind) (runtime.Object, error) { - out, err := c.nested.New(kind) - if err == nil { - return out, nil - } - out = &unstructured.Unstructured{} - out.GetObjectKind().SetGroupVersionKind(kind) - return out, nil -} - -type unstructuredTyper struct { - nested runtime.ObjectTyper -} - -func (t unstructuredTyper) ObjectKinds(obj runtime.Object) ([]schema.GroupVersionKind, bool, error) { - kinds, unversioned, err := t.nested.ObjectKinds(obj) - if err == nil { - return kinds, unversioned, nil - } - if _, ok := obj.(runtime.Unstructured); ok && !obj.GetObjectKind().GroupVersionKind().Empty() { - return []schema.GroupVersionKind{obj.GetObjectKind().GroupVersionKind()}, false, nil - } - return nil, false, err -} - -func (t unstructuredTyper) Recognizes(gvk schema.GroupVersionKind) bool { - return true -} diff --git a/src/vendor/k8s.io/client-go/dynamic/simple.go b/src/vendor/k8s.io/client-go/dynamic/simple.go deleted file mode 100644 index 9dc0fb5c0..000000000 --- a/src/vendor/k8s.io/client-go/dynamic/simple.go +++ /dev/null @@ -1,388 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package dynamic - -import ( - "context" - "fmt" - "net/http" - - "k8s.io/apimachinery/pkg/api/meta" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/apimachinery/pkg/types" - "k8s.io/apimachinery/pkg/watch" - "k8s.io/client-go/rest" -) - -type dynamicClient struct { - client *rest.RESTClient -} - -var _ Interface = &dynamicClient{} - -// ConfigFor returns a copy of the provided config with the -// appropriate dynamic client defaults set. -func ConfigFor(inConfig *rest.Config) *rest.Config { - config := rest.CopyConfig(inConfig) - config.AcceptContentTypes = "application/json" - config.ContentType = "application/json" - config.NegotiatedSerializer = basicNegotiatedSerializer{} // this gets used for discovery and error handling types - if config.UserAgent == "" { - config.UserAgent = rest.DefaultKubernetesUserAgent() - } - return config -} - -// NewForConfigOrDie creates a new Interface for the given config and -// panics if there is an error in the config. -func NewForConfigOrDie(c *rest.Config) Interface { - ret, err := NewForConfig(c) - if err != nil { - panic(err) - } - return ret -} - -// NewForConfig creates a new dynamic client or returns an error. -// NewForConfig is equivalent to NewForConfigAndClient(c, httpClient), -// where httpClient was generated with rest.HTTPClientFor(c). -func NewForConfig(inConfig *rest.Config) (Interface, error) { - config := ConfigFor(inConfig) - - httpClient, err := rest.HTTPClientFor(config) - if err != nil { - return nil, err - } - return NewForConfigAndClient(config, httpClient) -} - -// NewForConfigAndClient creates a new dynamic client for the given config and http client. -// Note the http client provided takes precedence over the configured transport values. -func NewForConfigAndClient(inConfig *rest.Config, h *http.Client) (Interface, error) { - config := ConfigFor(inConfig) - // for serializing the options - config.GroupVersion = &schema.GroupVersion{} - config.APIPath = "/if-you-see-this-search-for-the-break" - - restClient, err := rest.RESTClientForConfigAndClient(config, h) - if err != nil { - return nil, err - } - return &dynamicClient{client: restClient}, nil -} - -type dynamicResourceClient struct { - client *dynamicClient - namespace string - resource schema.GroupVersionResource -} - -func (c *dynamicClient) Resource(resource schema.GroupVersionResource) NamespaceableResourceInterface { - return &dynamicResourceClient{client: c, resource: resource} -} - -func (c *dynamicResourceClient) Namespace(ns string) ResourceInterface { - ret := *c - ret.namespace = ns - return &ret -} - -func (c *dynamicResourceClient) Create(ctx context.Context, obj *unstructured.Unstructured, opts metav1.CreateOptions, subresources ...string) (*unstructured.Unstructured, error) { - outBytes, err := runtime.Encode(unstructured.UnstructuredJSONScheme, obj) - if err != nil { - return nil, err - } - name := "" - if len(subresources) > 0 { - accessor, err := meta.Accessor(obj) - if err != nil { - return nil, err - } - name = accessor.GetName() - if len(name) == 0 { - return nil, fmt.Errorf("name is required") - } - } - - result := c.client.client. - Post(). - AbsPath(append(c.makeURLSegments(name), subresources...)...). - SetHeader("Content-Type", runtime.ContentTypeJSON). - Body(outBytes). - SpecificallyVersionedParams(&opts, dynamicParameterCodec, versionV1). - Do(ctx) - if err := result.Error(); err != nil { - return nil, err - } - - retBytes, err := result.Raw() - if err != nil { - return nil, err - } - uncastObj, err := runtime.Decode(unstructured.UnstructuredJSONScheme, retBytes) - if err != nil { - return nil, err - } - return uncastObj.(*unstructured.Unstructured), nil -} - -func (c *dynamicResourceClient) Update(ctx context.Context, obj *unstructured.Unstructured, opts metav1.UpdateOptions, subresources ...string) (*unstructured.Unstructured, error) { - accessor, err := meta.Accessor(obj) - if err != nil { - return nil, err - } - name := accessor.GetName() - if len(name) == 0 { - return nil, fmt.Errorf("name is required") - } - outBytes, err := runtime.Encode(unstructured.UnstructuredJSONScheme, obj) - if err != nil { - return nil, err - } - - result := c.client.client. - Put(). - AbsPath(append(c.makeURLSegments(name), subresources...)...). - SetHeader("Content-Type", runtime.ContentTypeJSON). - Body(outBytes). - SpecificallyVersionedParams(&opts, dynamicParameterCodec, versionV1). - Do(ctx) - if err := result.Error(); err != nil { - return nil, err - } - - retBytes, err := result.Raw() - if err != nil { - return nil, err - } - uncastObj, err := runtime.Decode(unstructured.UnstructuredJSONScheme, retBytes) - if err != nil { - return nil, err - } - return uncastObj.(*unstructured.Unstructured), nil -} - -func (c *dynamicResourceClient) UpdateStatus(ctx context.Context, obj *unstructured.Unstructured, opts metav1.UpdateOptions) (*unstructured.Unstructured, error) { - accessor, err := meta.Accessor(obj) - if err != nil { - return nil, err - } - name := accessor.GetName() - if len(name) == 0 { - return nil, fmt.Errorf("name is required") - } - - outBytes, err := runtime.Encode(unstructured.UnstructuredJSONScheme, obj) - if err != nil { - return nil, err - } - - result := c.client.client. - Put(). - AbsPath(append(c.makeURLSegments(name), "status")...). - SetHeader("Content-Type", runtime.ContentTypeJSON). - Body(outBytes). - SpecificallyVersionedParams(&opts, dynamicParameterCodec, versionV1). - Do(ctx) - if err := result.Error(); err != nil { - return nil, err - } - - retBytes, err := result.Raw() - if err != nil { - return nil, err - } - uncastObj, err := runtime.Decode(unstructured.UnstructuredJSONScheme, retBytes) - if err != nil { - return nil, err - } - return uncastObj.(*unstructured.Unstructured), nil -} - -func (c *dynamicResourceClient) Delete(ctx context.Context, name string, opts metav1.DeleteOptions, subresources ...string) error { - if len(name) == 0 { - return fmt.Errorf("name is required") - } - deleteOptionsByte, err := runtime.Encode(deleteOptionsCodec.LegacyCodec(schema.GroupVersion{Version: "v1"}), &opts) - if err != nil { - return err - } - - result := c.client.client. - Delete(). - AbsPath(append(c.makeURLSegments(name), subresources...)...). - SetHeader("Content-Type", runtime.ContentTypeJSON). - Body(deleteOptionsByte). - Do(ctx) - return result.Error() -} - -func (c *dynamicResourceClient) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOptions metav1.ListOptions) error { - deleteOptionsByte, err := runtime.Encode(deleteOptionsCodec.LegacyCodec(schema.GroupVersion{Version: "v1"}), &opts) - if err != nil { - return err - } - - result := c.client.client. - Delete(). - AbsPath(c.makeURLSegments("")...). - SetHeader("Content-Type", runtime.ContentTypeJSON). - Body(deleteOptionsByte). - SpecificallyVersionedParams(&listOptions, dynamicParameterCodec, versionV1). - Do(ctx) - return result.Error() -} - -func (c *dynamicResourceClient) Get(ctx context.Context, name string, opts metav1.GetOptions, subresources ...string) (*unstructured.Unstructured, error) { - if len(name) == 0 { - return nil, fmt.Errorf("name is required") - } - result := c.client.client.Get().AbsPath(append(c.makeURLSegments(name), subresources...)...).SpecificallyVersionedParams(&opts, dynamicParameterCodec, versionV1).Do(ctx) - if err := result.Error(); err != nil { - return nil, err - } - retBytes, err := result.Raw() - if err != nil { - return nil, err - } - uncastObj, err := runtime.Decode(unstructured.UnstructuredJSONScheme, retBytes) - if err != nil { - return nil, err - } - return uncastObj.(*unstructured.Unstructured), nil -} - -func (c *dynamicResourceClient) List(ctx context.Context, opts metav1.ListOptions) (*unstructured.UnstructuredList, error) { - result := c.client.client.Get().AbsPath(c.makeURLSegments("")...).SpecificallyVersionedParams(&opts, dynamicParameterCodec, versionV1).Do(ctx) - if err := result.Error(); err != nil { - return nil, err - } - retBytes, err := result.Raw() - if err != nil { - return nil, err - } - uncastObj, err := runtime.Decode(unstructured.UnstructuredJSONScheme, retBytes) - if err != nil { - return nil, err - } - if list, ok := uncastObj.(*unstructured.UnstructuredList); ok { - return list, nil - } - - list, err := uncastObj.(*unstructured.Unstructured).ToList() - if err != nil { - return nil, err - } - return list, nil -} - -func (c *dynamicResourceClient) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - opts.Watch = true - return c.client.client.Get().AbsPath(c.makeURLSegments("")...). - SpecificallyVersionedParams(&opts, dynamicParameterCodec, versionV1). - Watch(ctx) -} - -func (c *dynamicResourceClient) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (*unstructured.Unstructured, error) { - if len(name) == 0 { - return nil, fmt.Errorf("name is required") - } - result := c.client.client. - Patch(pt). - AbsPath(append(c.makeURLSegments(name), subresources...)...). - Body(data). - SpecificallyVersionedParams(&opts, dynamicParameterCodec, versionV1). - Do(ctx) - if err := result.Error(); err != nil { - return nil, err - } - retBytes, err := result.Raw() - if err != nil { - return nil, err - } - uncastObj, err := runtime.Decode(unstructured.UnstructuredJSONScheme, retBytes) - if err != nil { - return nil, err - } - return uncastObj.(*unstructured.Unstructured), nil -} - -func (c *dynamicResourceClient) Apply(ctx context.Context, name string, obj *unstructured.Unstructured, opts metav1.ApplyOptions, subresources ...string) (*unstructured.Unstructured, error) { - if len(name) == 0 { - return nil, fmt.Errorf("name is required") - } - outBytes, err := runtime.Encode(unstructured.UnstructuredJSONScheme, obj) - if err != nil { - return nil, err - } - accessor, err := meta.Accessor(obj) - if err != nil { - return nil, err - } - managedFields := accessor.GetManagedFields() - if len(managedFields) > 0 { - return nil, fmt.Errorf(`cannot apply an object with managed fields already set. - Use the client-go/applyconfigurations "UnstructructuredExtractor" to obtain the unstructured ApplyConfiguration for the given field manager that you can use/modify here to apply`) - } - patchOpts := opts.ToPatchOptions() - - result := c.client.client. - Patch(types.ApplyPatchType). - AbsPath(append(c.makeURLSegments(name), subresources...)...). - Body(outBytes). - SpecificallyVersionedParams(&patchOpts, dynamicParameterCodec, versionV1). - Do(ctx) - if err := result.Error(); err != nil { - return nil, err - } - retBytes, err := result.Raw() - if err != nil { - return nil, err - } - uncastObj, err := runtime.Decode(unstructured.UnstructuredJSONScheme, retBytes) - if err != nil { - return nil, err - } - return uncastObj.(*unstructured.Unstructured), nil -} -func (c *dynamicResourceClient) ApplyStatus(ctx context.Context, name string, obj *unstructured.Unstructured, opts metav1.ApplyOptions) (*unstructured.Unstructured, error) { - return c.Apply(ctx, name, obj, opts, "status") -} - -func (c *dynamicResourceClient) makeURLSegments(name string) []string { - url := []string{} - if len(c.resource.Group) == 0 { - url = append(url, "api") - } else { - url = append(url, "apis", c.resource.Group) - } - url = append(url, c.resource.Version) - - if len(c.namespace) > 0 { - url = append(url, "namespaces", c.namespace) - } - url = append(url, c.resource.Resource) - - if len(name) > 0 { - url = append(url, name) - } - - return url -} diff --git a/src/vendor/k8s.io/client-go/openapi/cached/client.go b/src/vendor/k8s.io/client-go/openapi/cached/client.go deleted file mode 100644 index 17f63ed26..000000000 --- a/src/vendor/k8s.io/client-go/openapi/cached/client.go +++ /dev/null @@ -1,54 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package cached - -import ( - "sync" - - "k8s.io/client-go/openapi" -) - -type client struct { - delegate openapi.Client - - once sync.Once - result map[string]openapi.GroupVersion - err error -} - -func NewClient(other openapi.Client) openapi.Client { - return &client{ - delegate: other, - } -} - -func (c *client) Paths() (map[string]openapi.GroupVersion, error) { - c.once.Do(func() { - uncached, err := c.delegate.Paths() - if err != nil { - c.err = err - return - } - - result := make(map[string]openapi.GroupVersion, len(uncached)) - for k, v := range uncached { - result[k] = newGroupVersion(v) - } - c.result = result - }) - return c.result, c.err -} diff --git a/src/vendor/k8s.io/client-go/openapi/cached/groupversion.go b/src/vendor/k8s.io/client-go/openapi/cached/groupversion.go deleted file mode 100644 index ba78b048b..000000000 --- a/src/vendor/k8s.io/client-go/openapi/cached/groupversion.go +++ /dev/null @@ -1,45 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package cached - -import ( - "sync" - - openapi_v3 "github.com/google/gnostic/openapiv3" - "k8s.io/client-go/openapi" -) - -type groupversion struct { - delegate openapi.GroupVersion - once sync.Once - doc *openapi_v3.Document - err error -} - -func newGroupVersion(delegate openapi.GroupVersion) *groupversion { - return &groupversion{ - delegate: delegate, - } -} - -func (g *groupversion) Schema() (*openapi_v3.Document, error) { - g.once.Do(func() { - g.doc, g.err = g.delegate.Schema() - }) - - return g.doc, g.err -} diff --git a/src/vendor/k8s.io/client-go/openapi/client.go b/src/vendor/k8s.io/client-go/openapi/client.go deleted file mode 100644 index 7b58762ac..000000000 --- a/src/vendor/k8s.io/client-go/openapi/client.go +++ /dev/null @@ -1,64 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package openapi - -import ( - "context" - "encoding/json" - - "k8s.io/client-go/rest" - "k8s.io/kube-openapi/pkg/handler3" -) - -type Client interface { - Paths() (map[string]GroupVersion, error) -} - -type client struct { - // URL includes the `hash` query param to take advantage of cache busting - restClient rest.Interface -} - -func NewClient(restClient rest.Interface) Client { - return &client{ - restClient: restClient, - } -} - -func (c *client) Paths() (map[string]GroupVersion, error) { - data, err := c.restClient.Get(). - AbsPath("/openapi/v3"). - Do(context.TODO()). - Raw() - - if err != nil { - return nil, err - } - - discoMap := &handler3.OpenAPIV3Discovery{} - err = json.Unmarshal(data, discoMap) - if err != nil { - return nil, err - } - - // Create GroupVersions for each element of the result - result := map[string]GroupVersion{} - for k, v := range discoMap.Paths { - result[k] = newGroupVersion(c, v) - } - return result, nil -} diff --git a/src/vendor/k8s.io/client-go/openapi/groupversion.go b/src/vendor/k8s.io/client-go/openapi/groupversion.go deleted file mode 100644 index 7c35833b4..000000000 --- a/src/vendor/k8s.io/client-go/openapi/groupversion.go +++ /dev/null @@ -1,59 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package openapi - -import ( - "context" - - openapi_v3 "github.com/google/gnostic/openapiv3" - "google.golang.org/protobuf/proto" - "k8s.io/kube-openapi/pkg/handler3" -) - -const openAPIV3mimePb = "application/com.github.proto-openapi.spec.v3@v1.0+protobuf" - -type GroupVersion interface { - Schema() (*openapi_v3.Document, error) -} - -type groupversion struct { - client *client - item handler3.OpenAPIV3DiscoveryGroupVersion -} - -func newGroupVersion(client *client, item handler3.OpenAPIV3DiscoveryGroupVersion) *groupversion { - return &groupversion{client: client, item: item} -} - -func (g *groupversion) Schema() (*openapi_v3.Document, error) { - data, err := g.client.restClient.Get(). - RequestURI(g.item.ServerRelativeURL). - SetHeader("Accept", openAPIV3mimePb). - Do(context.TODO()). - Raw() - - if err != nil { - return nil, err - } - - document := &openapi_v3.Document{} - if err := proto.Unmarshal(data, document); err != nil { - return nil, err - } - - return document, nil -} diff --git a/src/vendor/k8s.io/client-go/restmapper/category_expansion.go b/src/vendor/k8s.io/client-go/restmapper/category_expansion.go deleted file mode 100644 index 484e4c839..000000000 --- a/src/vendor/k8s.io/client-go/restmapper/category_expansion.go +++ /dev/null @@ -1,119 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package restmapper - -import ( - "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/client-go/discovery" -) - -// CategoryExpander maps category strings to GroupResources. -// Categories are classification or 'tag' of a group of resources. -type CategoryExpander interface { - Expand(category string) ([]schema.GroupResource, bool) -} - -// SimpleCategoryExpander implements CategoryExpander interface -// using a static mapping of categories to GroupResource mapping. -type SimpleCategoryExpander struct { - Expansions map[string][]schema.GroupResource -} - -// Expand fulfills CategoryExpander -func (e SimpleCategoryExpander) Expand(category string) ([]schema.GroupResource, bool) { - ret, ok := e.Expansions[category] - return ret, ok -} - -// discoveryCategoryExpander struct lets a REST Client wrapper (discoveryClient) to retrieve list of APIResourceList, -// and then convert to fallbackExpander -type discoveryCategoryExpander struct { - discoveryClient discovery.DiscoveryInterface -} - -// NewDiscoveryCategoryExpander returns a category expander that makes use of the "categories" fields from -// the API, found through the discovery client. In case of any error or no category found (which likely -// means we're at a cluster prior to categories support, fallback to the expander provided. -func NewDiscoveryCategoryExpander(client discovery.DiscoveryInterface) CategoryExpander { - if client == nil { - panic("Please provide discovery client to shortcut expander") - } - return discoveryCategoryExpander{discoveryClient: client} -} - -// Expand fulfills CategoryExpander -func (e discoveryCategoryExpander) Expand(category string) ([]schema.GroupResource, bool) { - // Get all supported resources for groups and versions from server, if no resource found, fallback anyway. - _, apiResourceLists, _ := e.discoveryClient.ServerGroupsAndResources() - if len(apiResourceLists) == 0 { - return nil, false - } - - discoveredExpansions := map[string][]schema.GroupResource{} - for _, apiResourceList := range apiResourceLists { - gv, err := schema.ParseGroupVersion(apiResourceList.GroupVersion) - if err != nil { - continue - } - // Collect GroupVersions by categories - for _, apiResource := range apiResourceList.APIResources { - if categories := apiResource.Categories; len(categories) > 0 { - for _, category := range categories { - groupResource := schema.GroupResource{ - Group: gv.Group, - Resource: apiResource.Name, - } - discoveredExpansions[category] = append(discoveredExpansions[category], groupResource) - } - } - } - } - - ret, ok := discoveredExpansions[category] - return ret, ok -} - -// UnionCategoryExpander implements CategoryExpander interface. -// It maps given category string to union of expansions returned by all the CategoryExpanders in the list. -type UnionCategoryExpander []CategoryExpander - -// Expand fulfills CategoryExpander -func (u UnionCategoryExpander) Expand(category string) ([]schema.GroupResource, bool) { - ret := []schema.GroupResource{} - ok := false - - // Expand the category for each CategoryExpander in the list and merge/combine the results. - for _, expansion := range u { - curr, currOk := expansion.Expand(category) - - for _, currGR := range curr { - found := false - for _, existing := range ret { - if existing == currGR { - found = true - break - } - } - if !found { - ret = append(ret, currGR) - } - } - ok = ok || currOk - } - - return ret, ok -} diff --git a/src/vendor/k8s.io/client-go/restmapper/discovery.go b/src/vendor/k8s.io/client-go/restmapper/discovery.go deleted file mode 100644 index 3505178b6..000000000 --- a/src/vendor/k8s.io/client-go/restmapper/discovery.go +++ /dev/null @@ -1,338 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package restmapper - -import ( - "fmt" - "strings" - "sync" - - "k8s.io/apimachinery/pkg/api/meta" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/client-go/discovery" - - "k8s.io/klog/v2" -) - -// APIGroupResources is an API group with a mapping of versions to -// resources. -type APIGroupResources struct { - Group metav1.APIGroup - // A mapping of version string to a slice of APIResources for - // that version. - VersionedResources map[string][]metav1.APIResource -} - -// NewDiscoveryRESTMapper returns a PriorityRESTMapper based on the discovered -// groups and resources passed in. -func NewDiscoveryRESTMapper(groupResources []*APIGroupResources) meta.RESTMapper { - unionMapper := meta.MultiRESTMapper{} - - var groupPriority []string - // /v1 is special. It should always come first - resourcePriority := []schema.GroupVersionResource{{Group: "", Version: "v1", Resource: meta.AnyResource}} - kindPriority := []schema.GroupVersionKind{{Group: "", Version: "v1", Kind: meta.AnyKind}} - - for _, group := range groupResources { - groupPriority = append(groupPriority, group.Group.Name) - - // Make sure the preferred version comes first - if len(group.Group.PreferredVersion.Version) != 0 { - preferred := group.Group.PreferredVersion.Version - if _, ok := group.VersionedResources[preferred]; ok { - resourcePriority = append(resourcePriority, schema.GroupVersionResource{ - Group: group.Group.Name, - Version: group.Group.PreferredVersion.Version, - Resource: meta.AnyResource, - }) - - kindPriority = append(kindPriority, schema.GroupVersionKind{ - Group: group.Group.Name, - Version: group.Group.PreferredVersion.Version, - Kind: meta.AnyKind, - }) - } - } - - for _, discoveryVersion := range group.Group.Versions { - resources, ok := group.VersionedResources[discoveryVersion.Version] - if !ok { - continue - } - - // Add non-preferred versions after the preferred version, in case there are resources that only exist in those versions - if discoveryVersion.Version != group.Group.PreferredVersion.Version { - resourcePriority = append(resourcePriority, schema.GroupVersionResource{ - Group: group.Group.Name, - Version: discoveryVersion.Version, - Resource: meta.AnyResource, - }) - - kindPriority = append(kindPriority, schema.GroupVersionKind{ - Group: group.Group.Name, - Version: discoveryVersion.Version, - Kind: meta.AnyKind, - }) - } - - gv := schema.GroupVersion{Group: group.Group.Name, Version: discoveryVersion.Version} - versionMapper := meta.NewDefaultRESTMapper([]schema.GroupVersion{gv}) - - for _, resource := range resources { - scope := meta.RESTScopeNamespace - if !resource.Namespaced { - scope = meta.RESTScopeRoot - } - - // if we have a slash, then this is a subresource and we shouldn't create mappings for those. - if strings.Contains(resource.Name, "/") { - continue - } - - plural := gv.WithResource(resource.Name) - singular := gv.WithResource(resource.SingularName) - // this is for legacy resources and servers which don't list singular forms. For those we must still guess. - if len(resource.SingularName) == 0 { - _, singular = meta.UnsafeGuessKindToResource(gv.WithKind(resource.Kind)) - } - - versionMapper.AddSpecific(gv.WithKind(strings.ToLower(resource.Kind)), plural, singular, scope) - versionMapper.AddSpecific(gv.WithKind(resource.Kind), plural, singular, scope) - // TODO this is producing unsafe guesses that don't actually work, but it matches previous behavior - versionMapper.Add(gv.WithKind(resource.Kind+"List"), scope) - } - // TODO why is this type not in discovery (at least for "v1") - versionMapper.Add(gv.WithKind("List"), meta.RESTScopeRoot) - unionMapper = append(unionMapper, versionMapper) - } - } - - for _, group := range groupPriority { - resourcePriority = append(resourcePriority, schema.GroupVersionResource{ - Group: group, - Version: meta.AnyVersion, - Resource: meta.AnyResource, - }) - kindPriority = append(kindPriority, schema.GroupVersionKind{ - Group: group, - Version: meta.AnyVersion, - Kind: meta.AnyKind, - }) - } - - return meta.PriorityRESTMapper{ - Delegate: unionMapper, - ResourcePriority: resourcePriority, - KindPriority: kindPriority, - } -} - -// GetAPIGroupResources uses the provided discovery client to gather -// discovery information and populate a slice of APIGroupResources. -func GetAPIGroupResources(cl discovery.DiscoveryInterface) ([]*APIGroupResources, error) { - gs, rs, err := cl.ServerGroupsAndResources() - if rs == nil || gs == nil { - return nil, err - // TODO track the errors and update callers to handle partial errors. - } - rsm := map[string]*metav1.APIResourceList{} - for _, r := range rs { - rsm[r.GroupVersion] = r - } - - var result []*APIGroupResources - for _, group := range gs { - groupResources := &APIGroupResources{ - Group: *group, - VersionedResources: make(map[string][]metav1.APIResource), - } - for _, version := range group.Versions { - resources, ok := rsm[version.GroupVersion] - if !ok { - continue - } - groupResources.VersionedResources[version.Version] = resources.APIResources - } - result = append(result, groupResources) - } - return result, nil -} - -// DeferredDiscoveryRESTMapper is a RESTMapper that will defer -// initialization of the RESTMapper until the first mapping is -// requested. -type DeferredDiscoveryRESTMapper struct { - initMu sync.Mutex - delegate meta.RESTMapper - cl discovery.CachedDiscoveryInterface -} - -// NewDeferredDiscoveryRESTMapper returns a -// DeferredDiscoveryRESTMapper that will lazily query the provided -// client for discovery information to do REST mappings. -func NewDeferredDiscoveryRESTMapper(cl discovery.CachedDiscoveryInterface) *DeferredDiscoveryRESTMapper { - return &DeferredDiscoveryRESTMapper{ - cl: cl, - } -} - -func (d *DeferredDiscoveryRESTMapper) getDelegate() (meta.RESTMapper, error) { - d.initMu.Lock() - defer d.initMu.Unlock() - - if d.delegate != nil { - return d.delegate, nil - } - - groupResources, err := GetAPIGroupResources(d.cl) - if err != nil { - return nil, err - } - - d.delegate = NewDiscoveryRESTMapper(groupResources) - return d.delegate, nil -} - -// Reset resets the internally cached Discovery information and will -// cause the next mapping request to re-discover. -func (d *DeferredDiscoveryRESTMapper) Reset() { - klog.V(5).Info("Invalidating discovery information") - - d.initMu.Lock() - defer d.initMu.Unlock() - - d.cl.Invalidate() - d.delegate = nil -} - -// KindFor takes a partial resource and returns back the single match. -// It returns an error if there are multiple matches. -func (d *DeferredDiscoveryRESTMapper) KindFor(resource schema.GroupVersionResource) (gvk schema.GroupVersionKind, err error) { - del, err := d.getDelegate() - if err != nil { - return schema.GroupVersionKind{}, err - } - gvk, err = del.KindFor(resource) - if err != nil && !d.cl.Fresh() { - d.Reset() - gvk, err = d.KindFor(resource) - } - return -} - -// KindsFor takes a partial resource and returns back the list of -// potential kinds in priority order. -func (d *DeferredDiscoveryRESTMapper) KindsFor(resource schema.GroupVersionResource) (gvks []schema.GroupVersionKind, err error) { - del, err := d.getDelegate() - if err != nil { - return nil, err - } - gvks, err = del.KindsFor(resource) - if len(gvks) == 0 && !d.cl.Fresh() { - d.Reset() - gvks, err = d.KindsFor(resource) - } - return -} - -// ResourceFor takes a partial resource and returns back the single -// match. It returns an error if there are multiple matches. -func (d *DeferredDiscoveryRESTMapper) ResourceFor(input schema.GroupVersionResource) (gvr schema.GroupVersionResource, err error) { - del, err := d.getDelegate() - if err != nil { - return schema.GroupVersionResource{}, err - } - gvr, err = del.ResourceFor(input) - if err != nil && !d.cl.Fresh() { - d.Reset() - gvr, err = d.ResourceFor(input) - } - return -} - -// ResourcesFor takes a partial resource and returns back the list of -// potential resource in priority order. -func (d *DeferredDiscoveryRESTMapper) ResourcesFor(input schema.GroupVersionResource) (gvrs []schema.GroupVersionResource, err error) { - del, err := d.getDelegate() - if err != nil { - return nil, err - } - gvrs, err = del.ResourcesFor(input) - if len(gvrs) == 0 && !d.cl.Fresh() { - d.Reset() - gvrs, err = d.ResourcesFor(input) - } - return -} - -// RESTMapping identifies a preferred resource mapping for the -// provided group kind. -func (d *DeferredDiscoveryRESTMapper) RESTMapping(gk schema.GroupKind, versions ...string) (m *meta.RESTMapping, err error) { - del, err := d.getDelegate() - if err != nil { - return nil, err - } - m, err = del.RESTMapping(gk, versions...) - if err != nil && !d.cl.Fresh() { - d.Reset() - m, err = d.RESTMapping(gk, versions...) - } - return -} - -// RESTMappings returns the RESTMappings for the provided group kind -// in a rough internal preferred order. If no kind is found, it will -// return a NoResourceMatchError. -func (d *DeferredDiscoveryRESTMapper) RESTMappings(gk schema.GroupKind, versions ...string) (ms []*meta.RESTMapping, err error) { - del, err := d.getDelegate() - if err != nil { - return nil, err - } - ms, err = del.RESTMappings(gk, versions...) - if len(ms) == 0 && !d.cl.Fresh() { - d.Reset() - ms, err = d.RESTMappings(gk, versions...) - } - return -} - -// ResourceSingularizer converts a resource name from plural to -// singular (e.g., from pods to pod). -func (d *DeferredDiscoveryRESTMapper) ResourceSingularizer(resource string) (singular string, err error) { - del, err := d.getDelegate() - if err != nil { - return resource, err - } - singular, err = del.ResourceSingularizer(resource) - if err != nil && !d.cl.Fresh() { - d.Reset() - singular, err = d.ResourceSingularizer(resource) - } - return -} - -func (d *DeferredDiscoveryRESTMapper) String() string { - del, err := d.getDelegate() - if err != nil { - return fmt.Sprintf("DeferredDiscoveryRESTMapper{%v}", err) - } - return fmt.Sprintf("DeferredDiscoveryRESTMapper{\n\t%v\n}", del) -} - -// Make sure it satisfies the interface -var _ meta.ResettableRESTMapper = &DeferredDiscoveryRESTMapper{} diff --git a/src/vendor/k8s.io/client-go/restmapper/shortcut.go b/src/vendor/k8s.io/client-go/restmapper/shortcut.go deleted file mode 100644 index 7ab3cd46f..000000000 --- a/src/vendor/k8s.io/client-go/restmapper/shortcut.go +++ /dev/null @@ -1,187 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package restmapper - -import ( - "strings" - - "k8s.io/klog/v2" - - "k8s.io/apimachinery/pkg/api/meta" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/client-go/discovery" -) - -// shortcutExpander is a RESTMapper that can be used for Kubernetes resources. It expands the resource first, then invokes the wrapped -type shortcutExpander struct { - RESTMapper meta.RESTMapper - - discoveryClient discovery.DiscoveryInterface -} - -var _ meta.ResettableRESTMapper = shortcutExpander{} - -// NewShortcutExpander wraps a restmapper in a layer that expands shortcuts found via discovery -func NewShortcutExpander(delegate meta.RESTMapper, client discovery.DiscoveryInterface) meta.RESTMapper { - return shortcutExpander{RESTMapper: delegate, discoveryClient: client} -} - -// KindFor fulfills meta.RESTMapper -func (e shortcutExpander) KindFor(resource schema.GroupVersionResource) (schema.GroupVersionKind, error) { - // expandResourceShortcut works with current API resources as read from discovery cache. - // In case of new CRDs this means we potentially don't have current state of discovery. - // In the current wiring in k8s.io/cli-runtime/pkg/genericclioptions/config_flags.go#toRESTMapper, - // we are using DeferredDiscoveryRESTMapper which on KindFor failure will clear the - // cache and fetch all data from a cluster (see vendor/k8s.io/client-go/restmapper/discovery.go#KindFor). - // Thus another call to expandResourceShortcut, after a NoMatchError should successfully - // read Kind to the user or an error. - gvk, err := e.RESTMapper.KindFor(e.expandResourceShortcut(resource)) - if meta.IsNoMatchError(err) { - return e.RESTMapper.KindFor(e.expandResourceShortcut(resource)) - } - return gvk, err -} - -// KindsFor fulfills meta.RESTMapper -func (e shortcutExpander) KindsFor(resource schema.GroupVersionResource) ([]schema.GroupVersionKind, error) { - return e.RESTMapper.KindsFor(e.expandResourceShortcut(resource)) -} - -// ResourcesFor fulfills meta.RESTMapper -func (e shortcutExpander) ResourcesFor(resource schema.GroupVersionResource) ([]schema.GroupVersionResource, error) { - return e.RESTMapper.ResourcesFor(e.expandResourceShortcut(resource)) -} - -// ResourceFor fulfills meta.RESTMapper -func (e shortcutExpander) ResourceFor(resource schema.GroupVersionResource) (schema.GroupVersionResource, error) { - return e.RESTMapper.ResourceFor(e.expandResourceShortcut(resource)) -} - -// ResourceSingularizer fulfills meta.RESTMapper -func (e shortcutExpander) ResourceSingularizer(resource string) (string, error) { - return e.RESTMapper.ResourceSingularizer(e.expandResourceShortcut(schema.GroupVersionResource{Resource: resource}).Resource) -} - -// RESTMapping fulfills meta.RESTMapper -func (e shortcutExpander) RESTMapping(gk schema.GroupKind, versions ...string) (*meta.RESTMapping, error) { - return e.RESTMapper.RESTMapping(gk, versions...) -} - -// RESTMappings fulfills meta.RESTMapper -func (e shortcutExpander) RESTMappings(gk schema.GroupKind, versions ...string) ([]*meta.RESTMapping, error) { - return e.RESTMapper.RESTMappings(gk, versions...) -} - -// getShortcutMappings returns a set of tuples which holds short names for resources. -// First the list of potential resources will be taken from the API server. -// Next we will append the hardcoded list of resources - to be backward compatible with old servers. -// NOTE that the list is ordered by group priority. -func (e shortcutExpander) getShortcutMappings() ([]*metav1.APIResourceList, []resourceShortcuts, error) { - res := []resourceShortcuts{} - // get server resources - // This can return an error *and* the results it was able to find. We don't need to fail on the error. - _, apiResList, err := e.discoveryClient.ServerGroupsAndResources() - if err != nil { - klog.V(1).Infof("Error loading discovery information: %v", err) - } - for _, apiResources := range apiResList { - gv, err := schema.ParseGroupVersion(apiResources.GroupVersion) - if err != nil { - klog.V(1).Infof("Unable to parse groupversion = %s due to = %s", apiResources.GroupVersion, err.Error()) - continue - } - for _, apiRes := range apiResources.APIResources { - for _, shortName := range apiRes.ShortNames { - rs := resourceShortcuts{ - ShortForm: schema.GroupResource{Group: gv.Group, Resource: shortName}, - LongForm: schema.GroupResource{Group: gv.Group, Resource: apiRes.Name}, - } - res = append(res, rs) - } - } - } - - return apiResList, res, nil -} - -// expandResourceShortcut will return the expanded version of resource -// (something that a pkg/api/meta.RESTMapper can understand), if it is -// indeed a shortcut. If no match has been found, we will match on group prefixing. -// Lastly we will return resource unmodified. -func (e shortcutExpander) expandResourceShortcut(resource schema.GroupVersionResource) schema.GroupVersionResource { - // get the shortcut mappings and return on first match. - if allResources, shortcutResources, err := e.getShortcutMappings(); err == nil { - // avoid expanding if there's an exact match to a full resource name - for _, apiResources := range allResources { - gv, err := schema.ParseGroupVersion(apiResources.GroupVersion) - if err != nil { - continue - } - if len(resource.Group) != 0 && resource.Group != gv.Group { - continue - } - for _, apiRes := range apiResources.APIResources { - if resource.Resource == apiRes.Name { - return resource - } - if resource.Resource == apiRes.SingularName { - return resource - } - } - } - - for _, item := range shortcutResources { - if len(resource.Group) != 0 && resource.Group != item.ShortForm.Group { - continue - } - if resource.Resource == item.ShortForm.Resource { - resource.Resource = item.LongForm.Resource - resource.Group = item.LongForm.Group - return resource - } - } - - // we didn't find exact match so match on group prefixing. This allows autoscal to match autoscaling - if len(resource.Group) == 0 { - return resource - } - for _, item := range shortcutResources { - if !strings.HasPrefix(item.ShortForm.Group, resource.Group) { - continue - } - if resource.Resource == item.ShortForm.Resource { - resource.Resource = item.LongForm.Resource - resource.Group = item.LongForm.Group - return resource - } - } - } - - return resource -} - -func (e shortcutExpander) Reset() { - meta.MaybeResetRESTMapper(e.RESTMapper) -} - -// ResourceShortcuts represents a structure that holds the information how to -// transition from resource's shortcut to its full name. -type resourceShortcuts struct { - ShortForm schema.GroupResource - LongForm schema.GroupResource -} diff --git a/src/vendor/k8s.io/client-go/third_party/forked/golang/LICENSE b/src/vendor/k8s.io/client-go/third_party/forked/golang/LICENSE deleted file mode 100644 index 6a66aea5e..000000000 --- a/src/vendor/k8s.io/client-go/third_party/forked/golang/LICENSE +++ /dev/null @@ -1,27 +0,0 @@ -Copyright (c) 2009 The Go Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/src/vendor/k8s.io/client-go/third_party/forked/golang/PATENTS b/src/vendor/k8s.io/client-go/third_party/forked/golang/PATENTS deleted file mode 100644 index 733099041..000000000 --- a/src/vendor/k8s.io/client-go/third_party/forked/golang/PATENTS +++ /dev/null @@ -1,22 +0,0 @@ -Additional IP Rights Grant (Patents) - -"This implementation" means the copyrightable works distributed by -Google as part of the Go project. - -Google hereby grants to You a perpetual, worldwide, non-exclusive, -no-charge, royalty-free, irrevocable (except as stated in this section) -patent license to make, have made, use, offer to sell, sell, import, -transfer and otherwise run, modify and propagate the contents of this -implementation of Go, where such license applies only to those patent -claims, both currently owned or controlled by Google and acquired in -the future, licensable by Google that are necessarily infringed by this -implementation of Go. This grant does not include claims that would be -infringed only as a consequence of further modification of this -implementation. If you or your agent or exclusive licensee institute or -order or agree to the institution of patent litigation against any -entity (including a cross-claim or counterclaim in a lawsuit) alleging -that this implementation of Go or any code incorporated within this -implementation of Go constitutes direct or contributory patent -infringement, or inducement of patent infringement, then any patent -rights granted to you under this License for this implementation of Go -shall terminate as of the date such litigation is filed. diff --git a/src/vendor/k8s.io/client-go/third_party/forked/golang/template/exec.go b/src/vendor/k8s.io/client-go/third_party/forked/golang/template/exec.go deleted file mode 100644 index 7cf29524c..000000000 --- a/src/vendor/k8s.io/client-go/third_party/forked/golang/template/exec.go +++ /dev/null @@ -1,52 +0,0 @@ -//This package is copied from Go library text/template. -//The original private functions indirect and printableValue -//are exported as public functions. -package template - -import ( - "fmt" - "reflect" -) - -var ( - errorType = reflect.TypeOf((*error)(nil)).Elem() - fmtStringerType = reflect.TypeOf((*fmt.Stringer)(nil)).Elem() -) - -// Indirect returns the item at the end of indirection, and a bool to indicate if it's nil. -// We indirect through pointers and empty interfaces (only) because -// non-empty interfaces have methods we might need. -func Indirect(v reflect.Value) (rv reflect.Value, isNil bool) { - for ; v.Kind() == reflect.Pointer || v.Kind() == reflect.Interface; v = v.Elem() { - if v.IsNil() { - return v, true - } - if v.Kind() == reflect.Interface && v.NumMethod() > 0 { - break - } - } - return v, false -} - -// PrintableValue returns the, possibly indirected, interface value inside v that -// is best for a call to formatted printer. -func PrintableValue(v reflect.Value) (interface{}, bool) { - if v.Kind() == reflect.Pointer { - v, _ = Indirect(v) // fmt.Fprint handles nil. - } - if !v.IsValid() { - return "", true - } - - if !v.Type().Implements(errorType) && !v.Type().Implements(fmtStringerType) { - if v.CanAddr() && (reflect.PointerTo(v.Type()).Implements(errorType) || reflect.PointerTo(v.Type()).Implements(fmtStringerType)) { - v = v.Addr() - } else { - switch v.Kind() { - case reflect.Chan, reflect.Func: - return nil, false - } - } - } - return v.Interface(), true -} diff --git a/src/vendor/k8s.io/client-go/third_party/forked/golang/template/funcs.go b/src/vendor/k8s.io/client-go/third_party/forked/golang/template/funcs.go deleted file mode 100644 index f0c8e712c..000000000 --- a/src/vendor/k8s.io/client-go/third_party/forked/golang/template/funcs.go +++ /dev/null @@ -1,177 +0,0 @@ -//This package is copied from Go library text/template. -//The original private functions eq, ge, gt, le, lt, and ne -//are exported as public functions. -package template - -import ( - "errors" - "reflect" -) - -var ( - errBadComparisonType = errors.New("invalid type for comparison") - errBadComparison = errors.New("incompatible types for comparison") - errNoComparison = errors.New("missing argument for comparison") -) - -type kind int - -const ( - invalidKind kind = iota - boolKind - complexKind - intKind - floatKind - integerKind - stringKind - uintKind -) - -func basicKind(v reflect.Value) (kind, error) { - switch v.Kind() { - case reflect.Bool: - return boolKind, nil - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - return intKind, nil - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - return uintKind, nil - case reflect.Float32, reflect.Float64: - return floatKind, nil - case reflect.Complex64, reflect.Complex128: - return complexKind, nil - case reflect.String: - return stringKind, nil - } - return invalidKind, errBadComparisonType -} - -// Equal evaluates the comparison a == b || a == c || ... -func Equal(arg1 interface{}, arg2 ...interface{}) (bool, error) { - v1 := reflect.ValueOf(arg1) - k1, err := basicKind(v1) - if err != nil { - return false, err - } - if len(arg2) == 0 { - return false, errNoComparison - } - for _, arg := range arg2 { - v2 := reflect.ValueOf(arg) - k2, err := basicKind(v2) - if err != nil { - return false, err - } - truth := false - if k1 != k2 { - // Special case: Can compare integer values regardless of type's sign. - switch { - case k1 == intKind && k2 == uintKind: - truth = v1.Int() >= 0 && uint64(v1.Int()) == v2.Uint() - case k1 == uintKind && k2 == intKind: - truth = v2.Int() >= 0 && v1.Uint() == uint64(v2.Int()) - default: - return false, errBadComparison - } - } else { - switch k1 { - case boolKind: - truth = v1.Bool() == v2.Bool() - case complexKind: - truth = v1.Complex() == v2.Complex() - case floatKind: - truth = v1.Float() == v2.Float() - case intKind: - truth = v1.Int() == v2.Int() - case stringKind: - truth = v1.String() == v2.String() - case uintKind: - truth = v1.Uint() == v2.Uint() - default: - panic("invalid kind") - } - } - if truth { - return true, nil - } - } - return false, nil -} - -// NotEqual evaluates the comparison a != b. -func NotEqual(arg1, arg2 interface{}) (bool, error) { - // != is the inverse of ==. - equal, err := Equal(arg1, arg2) - return !equal, err -} - -// Less evaluates the comparison a < b. -func Less(arg1, arg2 interface{}) (bool, error) { - v1 := reflect.ValueOf(arg1) - k1, err := basicKind(v1) - if err != nil { - return false, err - } - v2 := reflect.ValueOf(arg2) - k2, err := basicKind(v2) - if err != nil { - return false, err - } - truth := false - if k1 != k2 { - // Special case: Can compare integer values regardless of type's sign. - switch { - case k1 == intKind && k2 == uintKind: - truth = v1.Int() < 0 || uint64(v1.Int()) < v2.Uint() - case k1 == uintKind && k2 == intKind: - truth = v2.Int() >= 0 && v1.Uint() < uint64(v2.Int()) - default: - return false, errBadComparison - } - } else { - switch k1 { - case boolKind, complexKind: - return false, errBadComparisonType - case floatKind: - truth = v1.Float() < v2.Float() - case intKind: - truth = v1.Int() < v2.Int() - case stringKind: - truth = v1.String() < v2.String() - case uintKind: - truth = v1.Uint() < v2.Uint() - default: - panic("invalid kind") - } - } - return truth, nil -} - -// LessEqual evaluates the comparison <= b. -func LessEqual(arg1, arg2 interface{}) (bool, error) { - // <= is < or ==. - lessThan, err := Less(arg1, arg2) - if lessThan || err != nil { - return lessThan, err - } - return Equal(arg1, arg2) -} - -// Greater evaluates the comparison a > b. -func Greater(arg1, arg2 interface{}) (bool, error) { - // > is the inverse of <=. - lessOrEqual, err := LessEqual(arg1, arg2) - if err != nil { - return false, err - } - return !lessOrEqual, nil -} - -// GreaterEqual evaluates the comparison a >= b. -func GreaterEqual(arg1, arg2 interface{}) (bool, error) { - // >= is the inverse of <. - lessThan, err := Less(arg1, arg2) - if err != nil { - return false, err - } - return !lessThan, nil -} diff --git a/src/vendor/k8s.io/client-go/tools/auth/OWNERS b/src/vendor/k8s.io/client-go/tools/auth/OWNERS deleted file mode 100644 index c4ea6463d..000000000 --- a/src/vendor/k8s.io/client-go/tools/auth/OWNERS +++ /dev/null @@ -1,8 +0,0 @@ -# See the OWNERS docs at https://go.k8s.io/owners - -approvers: - - sig-auth-authenticators-approvers -reviewers: - - sig-auth-authenticators-reviewers -labels: - - sig/auth diff --git a/src/vendor/k8s.io/client-go/tools/auth/clientauth.go b/src/vendor/k8s.io/client-go/tools/auth/clientauth.go deleted file mode 100644 index ac30271fe..000000000 --- a/src/vendor/k8s.io/client-go/tools/auth/clientauth.go +++ /dev/null @@ -1,126 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -/* -Package auth defines a file format for holding authentication -information needed by clients of Kubernetes. Typically, -a Kubernetes cluster will put auth info for the admin in a known -location when it is created, and will (soon) put it in a known -location within a Container's file tree for Containers that -need access to the Kubernetes API. - -Having a defined format allows: - - clients to be implemented in multiple languages - - applications which link clients to be portable across - clusters with different authentication styles (e.g. - some may use SSL Client certs, others may not, etc) - - when the format changes, applications only - need to update this code. - -The file format is json, marshalled from a struct authcfg.Info. - -Clinet libraries in other languages should use the same format. - -It is not intended to store general preferences, such as default -namespace, output options, etc. CLIs (such as kubectl) and UIs should -develop their own format and may wish to inline the authcfg.Info type. - -The authcfg.Info is just a file format. It is distinct from -client.Config which holds options for creating a client.Client. -Helper functions are provided in this package to fill in a -client.Client from an authcfg.Info. - -Example: - - import ( - "pkg/client" - "pkg/client/auth" - ) - - info, err := auth.LoadFromFile(filename) - if err != nil { - // handle error - } - clientConfig = client.Config{} - clientConfig.Host = "example.com:4901" - clientConfig = info.MergeWithConfig() - client := client.New(clientConfig) - client.Pods(ns).List() -*/ -package auth - -// TODO: need a way to rotate Tokens. Therefore, need a way for client object to be reset when the authcfg is updated. -import ( - "encoding/json" - "io/ioutil" - "os" - - restclient "k8s.io/client-go/rest" -) - -// Info holds Kubernetes API authorization config. It is intended -// to be read/written from a file as a JSON object. -type Info struct { - User string - Password string `datapolicy:"password"` - CAFile string - CertFile string - KeyFile string - BearerToken string `datapolicy:"token"` - Insecure *bool -} - -// LoadFromFile parses an Info object from a file path. -// If the file does not exist, then os.IsNotExist(err) == true -func LoadFromFile(path string) (*Info, error) { - var info Info - if _, err := os.Stat(path); os.IsNotExist(err) { - return nil, err - } - data, err := ioutil.ReadFile(path) - if err != nil { - return nil, err - } - err = json.Unmarshal(data, &info) - if err != nil { - return nil, err - } - return &info, err -} - -// MergeWithConfig returns a copy of a client.Config with values from the Info. -// The fields of client.Config with a corresponding field in the Info are set -// with the value from the Info. -func (info Info) MergeWithConfig(c restclient.Config) (restclient.Config, error) { - var config = c - config.Username = info.User - config.Password = info.Password - config.CAFile = info.CAFile - config.CertFile = info.CertFile - config.KeyFile = info.KeyFile - config.BearerToken = info.BearerToken - if info.Insecure != nil { - config.Insecure = *info.Insecure - } - return config, nil -} - -// Complete returns true if the Kubernetes API authorization info is complete. -func (info Info) Complete() bool { - return len(info.User) > 0 || - len(info.CertFile) > 0 || - len(info.BearerToken) > 0 -} diff --git a/src/vendor/k8s.io/client-go/tools/clientcmd/api/latest/latest.go b/src/vendor/k8s.io/client-go/tools/clientcmd/api/latest/latest.go deleted file mode 100644 index 35bb5dde1..000000000 --- a/src/vendor/k8s.io/client-go/tools/clientcmd/api/latest/latest.go +++ /dev/null @@ -1,61 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package latest - -import ( - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/apimachinery/pkg/runtime/serializer/json" - "k8s.io/apimachinery/pkg/runtime/serializer/versioning" - utilruntime "k8s.io/apimachinery/pkg/util/runtime" - "k8s.io/client-go/tools/clientcmd/api" - "k8s.io/client-go/tools/clientcmd/api/v1" -) - -// Version is the string that represents the current external default version. -const Version = "v1" - -var ExternalVersion = schema.GroupVersion{Group: "", Version: "v1"} - -// OldestVersion is the string that represents the oldest server version supported, -// for client code that wants to hardcode the lowest common denominator. -const OldestVersion = "v1" - -// Versions is the list of versions that are recognized in code. The order provided -// may be assumed to be least feature rich to most feature rich, and clients may -// choose to prefer the latter items in the list over the former items when presented -// with a set of versions to choose. -var Versions = []string{"v1"} - -var ( - Codec runtime.Codec - Scheme *runtime.Scheme -) - -func init() { - Scheme = runtime.NewScheme() - utilruntime.Must(api.AddToScheme(Scheme)) - utilruntime.Must(v1.AddToScheme(Scheme)) - yamlSerializer := json.NewYAMLSerializer(json.DefaultMetaFactory, Scheme, Scheme) - Codec = versioning.NewDefaultingCodecForScheme( - Scheme, - yamlSerializer, - yamlSerializer, - schema.GroupVersion{Version: Version}, - runtime.InternalGroupVersioner, - ) -} diff --git a/src/vendor/k8s.io/client-go/tools/clientcmd/api/v1/conversion.go b/src/vendor/k8s.io/client-go/tools/clientcmd/api/v1/conversion.go deleted file mode 100644 index 6eee281bc..000000000 --- a/src/vendor/k8s.io/client-go/tools/clientcmd/api/v1/conversion.go +++ /dev/null @@ -1,174 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1 - -import ( - "fmt" - "sort" - - "k8s.io/apimachinery/pkg/conversion" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/client-go/tools/clientcmd/api" -) - -func Convert_Slice_v1_NamedCluster_To_Map_string_To_Pointer_api_Cluster(in *[]NamedCluster, out *map[string]*api.Cluster, s conversion.Scope) error { - for _, curr := range *in { - newCluster := api.NewCluster() - if err := Convert_v1_Cluster_To_api_Cluster(&curr.Cluster, newCluster, s); err != nil { - return err - } - if *out == nil { - *out = make(map[string]*api.Cluster) - } - if (*out)[curr.Name] == nil { - (*out)[curr.Name] = newCluster - } else { - return fmt.Errorf("error converting *[]NamedCluster into *map[string]*api.Cluster: duplicate name \"%v\" in list: %v", curr.Name, *in) - } - } - return nil -} - -func Convert_Map_string_To_Pointer_api_Cluster_To_Slice_v1_NamedCluster(in *map[string]*api.Cluster, out *[]NamedCluster, s conversion.Scope) error { - allKeys := make([]string, 0, len(*in)) - for key := range *in { - allKeys = append(allKeys, key) - } - sort.Strings(allKeys) - - for _, key := range allKeys { - newCluster := (*in)[key] - oldCluster := Cluster{} - if err := Convert_api_Cluster_To_v1_Cluster(newCluster, &oldCluster, s); err != nil { - return err - } - namedCluster := NamedCluster{key, oldCluster} - *out = append(*out, namedCluster) - } - return nil -} - -func Convert_Slice_v1_NamedAuthInfo_To_Map_string_To_Pointer_api_AuthInfo(in *[]NamedAuthInfo, out *map[string]*api.AuthInfo, s conversion.Scope) error { - for _, curr := range *in { - newAuthInfo := api.NewAuthInfo() - if err := Convert_v1_AuthInfo_To_api_AuthInfo(&curr.AuthInfo, newAuthInfo, s); err != nil { - return err - } - if *out == nil { - *out = make(map[string]*api.AuthInfo) - } - if (*out)[curr.Name] == nil { - (*out)[curr.Name] = newAuthInfo - } else { - return fmt.Errorf("error converting *[]NamedAuthInfo into *map[string]*api.AuthInfo: duplicate name \"%v\" in list: %v", curr.Name, *in) - } - } - return nil -} - -func Convert_Map_string_To_Pointer_api_AuthInfo_To_Slice_v1_NamedAuthInfo(in *map[string]*api.AuthInfo, out *[]NamedAuthInfo, s conversion.Scope) error { - allKeys := make([]string, 0, len(*in)) - for key := range *in { - allKeys = append(allKeys, key) - } - sort.Strings(allKeys) - - for _, key := range allKeys { - newAuthInfo := (*in)[key] - oldAuthInfo := AuthInfo{} - if err := Convert_api_AuthInfo_To_v1_AuthInfo(newAuthInfo, &oldAuthInfo, s); err != nil { - return err - } - namedAuthInfo := NamedAuthInfo{key, oldAuthInfo} - *out = append(*out, namedAuthInfo) - } - return nil -} - -func Convert_Slice_v1_NamedContext_To_Map_string_To_Pointer_api_Context(in *[]NamedContext, out *map[string]*api.Context, s conversion.Scope) error { - for _, curr := range *in { - newContext := api.NewContext() - if err := Convert_v1_Context_To_api_Context(&curr.Context, newContext, s); err != nil { - return err - } - if *out == nil { - *out = make(map[string]*api.Context) - } - if (*out)[curr.Name] == nil { - (*out)[curr.Name] = newContext - } else { - return fmt.Errorf("error converting *[]NamedContext into *map[string]*api.Context: duplicate name \"%v\" in list: %v", curr.Name, *in) - } - } - return nil -} - -func Convert_Map_string_To_Pointer_api_Context_To_Slice_v1_NamedContext(in *map[string]*api.Context, out *[]NamedContext, s conversion.Scope) error { - allKeys := make([]string, 0, len(*in)) - for key := range *in { - allKeys = append(allKeys, key) - } - sort.Strings(allKeys) - - for _, key := range allKeys { - newContext := (*in)[key] - oldContext := Context{} - if err := Convert_api_Context_To_v1_Context(newContext, &oldContext, s); err != nil { - return err - } - namedContext := NamedContext{key, oldContext} - *out = append(*out, namedContext) - } - return nil -} - -func Convert_Slice_v1_NamedExtension_To_Map_string_To_runtime_Object(in *[]NamedExtension, out *map[string]runtime.Object, s conversion.Scope) error { - for _, curr := range *in { - var newExtension runtime.Object - if err := runtime.Convert_runtime_RawExtension_To_runtime_Object(&curr.Extension, &newExtension, s); err != nil { - return err - } - if *out == nil { - *out = make(map[string]runtime.Object) - } - if (*out)[curr.Name] == nil { - (*out)[curr.Name] = newExtension - } else { - return fmt.Errorf("error converting *[]NamedExtension into *map[string]runtime.Object: duplicate name \"%v\" in list: %v", curr.Name, *in) - } - } - return nil -} - -func Convert_Map_string_To_runtime_Object_To_Slice_v1_NamedExtension(in *map[string]runtime.Object, out *[]NamedExtension, s conversion.Scope) error { - allKeys := make([]string, 0, len(*in)) - for key := range *in { - allKeys = append(allKeys, key) - } - sort.Strings(allKeys) - - for _, key := range allKeys { - newExtension := (*in)[key] - oldExtension := runtime.RawExtension{} - if err := runtime.Convert_runtime_Object_To_runtime_RawExtension(&newExtension, &oldExtension, s); err != nil { - return err - } - namedExtension := NamedExtension{key, oldExtension} - *out = append(*out, namedExtension) - } - return nil -} diff --git a/src/vendor/k8s.io/client-go/tools/clientcmd/api/v1/defaults.go b/src/vendor/k8s.io/client-go/tools/clientcmd/api/v1/defaults.go deleted file mode 100644 index bf513dc7c..000000000 --- a/src/vendor/k8s.io/client-go/tools/clientcmd/api/v1/defaults.go +++ /dev/null @@ -1,37 +0,0 @@ -/* -Copyright 2021 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1 - -import ( - "k8s.io/apimachinery/pkg/runtime" -) - -func addDefaultingFuncs(scheme *runtime.Scheme) error { - return RegisterDefaults(scheme) -} - -func SetDefaults_ExecConfig(exec *ExecConfig) { - if len(exec.InteractiveMode) == 0 { - switch exec.APIVersion { - case "client.authentication.k8s.io/v1beta1", "client.authentication.k8s.io/v1alpha1": - // default to IfAvailableExecInteractiveMode for backwards compatibility - exec.InteractiveMode = IfAvailableExecInteractiveMode - default: - // require other versions to explicitly declare whether they want stdin or not - } - } -} diff --git a/src/vendor/k8s.io/client-go/tools/clientcmd/api/v1/doc.go b/src/vendor/k8s.io/client-go/tools/clientcmd/api/v1/doc.go deleted file mode 100644 index 3ccdebc1c..000000000 --- a/src/vendor/k8s.io/client-go/tools/clientcmd/api/v1/doc.go +++ /dev/null @@ -1,21 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// +k8s:conversion-gen=k8s.io/client-go/tools/clientcmd/api -// +k8s:deepcopy-gen=package -// +k8s:defaulter-gen=Kind - -package v1 diff --git a/src/vendor/k8s.io/client-go/tools/clientcmd/api/v1/register.go b/src/vendor/k8s.io/client-go/tools/clientcmd/api/v1/register.go deleted file mode 100644 index 4a4d4a55f..000000000 --- a/src/vendor/k8s.io/client-go/tools/clientcmd/api/v1/register.go +++ /dev/null @@ -1,56 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1 - -import ( - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" -) - -// SchemeGroupVersion is group version used to register these objects -// TODO this should be in the "kubeconfig" group -var SchemeGroupVersion = schema.GroupVersion{Group: "", Version: "v1"} - -var ( - // TODO: move SchemeBuilder with zz_generated.deepcopy.go to k8s.io/api. - // localSchemeBuilder and AddToScheme will stay in k8s.io/kubernetes. - SchemeBuilder runtime.SchemeBuilder - localSchemeBuilder = &SchemeBuilder - AddToScheme = localSchemeBuilder.AddToScheme -) - -func init() { - // We only register manually written functions here. The registration of the - // generated functions takes place in the generated files. The separation - // makes the code compile even when the generated files are missing. - localSchemeBuilder.Register(addKnownTypes, addDefaultingFuncs) -} - -func addKnownTypes(scheme *runtime.Scheme) error { - scheme.AddKnownTypes(SchemeGroupVersion, - &Config{}, - ) - return nil -} - -func (obj *Config) GetObjectKind() schema.ObjectKind { return obj } -func (obj *Config) SetGroupVersionKind(gvk schema.GroupVersionKind) { - obj.APIVersion, obj.Kind = gvk.ToAPIVersionAndKind() -} -func (obj *Config) GroupVersionKind() schema.GroupVersionKind { - return schema.FromAPIVersionAndKind(obj.APIVersion, obj.Kind) -} diff --git a/src/vendor/k8s.io/client-go/tools/clientcmd/api/v1/types.go b/src/vendor/k8s.io/client-go/tools/clientcmd/api/v1/types.go deleted file mode 100644 index 757ed817b..000000000 --- a/src/vendor/k8s.io/client-go/tools/clientcmd/api/v1/types.go +++ /dev/null @@ -1,266 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1 - -import ( - "k8s.io/apimachinery/pkg/runtime" -) - -// Where possible, json tags match the cli argument names. -// Top level config objects and all values required for proper functioning are not "omitempty". Any truly optional piece of config is allowed to be omitted. - -// Config holds the information needed to build connect to remote kubernetes clusters as a given user -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -type Config struct { - // Legacy field from pkg/api/types.go TypeMeta. - // TODO(jlowdermilk): remove this after eliminating downstream dependencies. - // +k8s:conversion-gen=false - // +optional - Kind string `json:"kind,omitempty"` - // Legacy field from pkg/api/types.go TypeMeta. - // TODO(jlowdermilk): remove this after eliminating downstream dependencies. - // +k8s:conversion-gen=false - // +optional - APIVersion string `json:"apiVersion,omitempty"` - // Preferences holds general information to be use for cli interactions - Preferences Preferences `json:"preferences"` - // Clusters is a map of referencable names to cluster configs - Clusters []NamedCluster `json:"clusters"` - // AuthInfos is a map of referencable names to user configs - AuthInfos []NamedAuthInfo `json:"users"` - // Contexts is a map of referencable names to context configs - Contexts []NamedContext `json:"contexts"` - // CurrentContext is the name of the context that you would like to use by default - CurrentContext string `json:"current-context"` - // Extensions holds additional information. This is useful for extenders so that reads and writes don't clobber unknown fields - // +optional - Extensions []NamedExtension `json:"extensions,omitempty"` -} - -type Preferences struct { - // +optional - Colors bool `json:"colors,omitempty"` - // Extensions holds additional information. This is useful for extenders so that reads and writes don't clobber unknown fields - // +optional - Extensions []NamedExtension `json:"extensions,omitempty"` -} - -// Cluster contains information about how to communicate with a kubernetes cluster -type Cluster struct { - // Server is the address of the kubernetes cluster (https://hostname:port). - Server string `json:"server"` - // TLSServerName is used to check server certificate. If TLSServerName is empty, the hostname used to contact the server is used. - // +optional - TLSServerName string `json:"tls-server-name,omitempty"` - // InsecureSkipTLSVerify skips the validity check for the server's certificate. This will make your HTTPS connections insecure. - // +optional - InsecureSkipTLSVerify bool `json:"insecure-skip-tls-verify,omitempty"` - // CertificateAuthority is the path to a cert file for the certificate authority. - // +optional - CertificateAuthority string `json:"certificate-authority,omitempty"` - // CertificateAuthorityData contains PEM-encoded certificate authority certificates. Overrides CertificateAuthority - // +optional - CertificateAuthorityData []byte `json:"certificate-authority-data,omitempty"` - // ProxyURL is the URL to the proxy to be used for all requests made by this - // client. URLs with "http", "https", and "socks5" schemes are supported. If - // this configuration is not provided or the empty string, the client - // attempts to construct a proxy configuration from http_proxy and - // https_proxy environment variables. If these environment variables are not - // set, the client does not attempt to proxy requests. - // - // socks5 proxying does not currently support spdy streaming endpoints (exec, - // attach, port forward). - // +optional - ProxyURL string `json:"proxy-url,omitempty"` - // Extensions holds additional information. This is useful for extenders so that reads and writes don't clobber unknown fields - // +optional - Extensions []NamedExtension `json:"extensions,omitempty"` -} - -// AuthInfo contains information that describes identity information. This is use to tell the kubernetes cluster who you are. -type AuthInfo struct { - // ClientCertificate is the path to a client cert file for TLS. - // +optional - ClientCertificate string `json:"client-certificate,omitempty"` - // ClientCertificateData contains PEM-encoded data from a client cert file for TLS. Overrides ClientCertificate - // +optional - ClientCertificateData []byte `json:"client-certificate-data,omitempty"` - // ClientKey is the path to a client key file for TLS. - // +optional - ClientKey string `json:"client-key,omitempty"` - // ClientKeyData contains PEM-encoded data from a client key file for TLS. Overrides ClientKey - // +optional - ClientKeyData []byte `json:"client-key-data,omitempty" datapolicy:"security-key"` - // Token is the bearer token for authentication to the kubernetes cluster. - // +optional - Token string `json:"token,omitempty" datapolicy:"token"` - // TokenFile is a pointer to a file that contains a bearer token (as described above). If both Token and TokenFile are present, Token takes precedence. - // +optional - TokenFile string `json:"tokenFile,omitempty"` - // Impersonate is the username to impersonate. The name matches the flag. - // +optional - Impersonate string `json:"as,omitempty"` - // ImpersonateUID is the uid to impersonate. - // +optional - ImpersonateUID string `json:"as-uid,omitempty"` - // ImpersonateGroups is the groups to impersonate. - // +optional - ImpersonateGroups []string `json:"as-groups,omitempty"` - // ImpersonateUserExtra contains additional information for impersonated user. - // +optional - ImpersonateUserExtra map[string][]string `json:"as-user-extra,omitempty"` - // Username is the username for basic authentication to the kubernetes cluster. - // +optional - Username string `json:"username,omitempty"` - // Password is the password for basic authentication to the kubernetes cluster. - // +optional - Password string `json:"password,omitempty" datapolicy:"password"` - // AuthProvider specifies a custom authentication plugin for the kubernetes cluster. - // +optional - AuthProvider *AuthProviderConfig `json:"auth-provider,omitempty"` - // Exec specifies a custom exec-based authentication plugin for the kubernetes cluster. - // +optional - Exec *ExecConfig `json:"exec,omitempty"` - // Extensions holds additional information. This is useful for extenders so that reads and writes don't clobber unknown fields - // +optional - Extensions []NamedExtension `json:"extensions,omitempty"` -} - -// Context is a tuple of references to a cluster (how do I communicate with a kubernetes cluster), a user (how do I identify myself), and a namespace (what subset of resources do I want to work with) -type Context struct { - // Cluster is the name of the cluster for this context - Cluster string `json:"cluster"` - // AuthInfo is the name of the authInfo for this context - AuthInfo string `json:"user"` - // Namespace is the default namespace to use on unspecified requests - // +optional - Namespace string `json:"namespace,omitempty"` - // Extensions holds additional information. This is useful for extenders so that reads and writes don't clobber unknown fields - // +optional - Extensions []NamedExtension `json:"extensions,omitempty"` -} - -// NamedCluster relates nicknames to cluster information -type NamedCluster struct { - // Name is the nickname for this Cluster - Name string `json:"name"` - // Cluster holds the cluster information - Cluster Cluster `json:"cluster"` -} - -// NamedContext relates nicknames to context information -type NamedContext struct { - // Name is the nickname for this Context - Name string `json:"name"` - // Context holds the context information - Context Context `json:"context"` -} - -// NamedAuthInfo relates nicknames to auth information -type NamedAuthInfo struct { - // Name is the nickname for this AuthInfo - Name string `json:"name"` - // AuthInfo holds the auth information - AuthInfo AuthInfo `json:"user"` -} - -// NamedExtension relates nicknames to extension information -type NamedExtension struct { - // Name is the nickname for this Extension - Name string `json:"name"` - // Extension holds the extension information - Extension runtime.RawExtension `json:"extension"` -} - -// AuthProviderConfig holds the configuration for a specified auth provider. -type AuthProviderConfig struct { - Name string `json:"name"` - Config map[string]string `json:"config"` -} - -// ExecConfig specifies a command to provide client credentials. The command is exec'd -// and outputs structured stdout holding credentials. -// -// See the client.authentication.k8s.io API group for specifications of the exact input -// and output format -type ExecConfig struct { - // Command to execute. - Command string `json:"command"` - // Arguments to pass to the command when executing it. - // +optional - Args []string `json:"args"` - // Env defines additional environment variables to expose to the process. These - // are unioned with the host's environment, as well as variables client-go uses - // to pass argument to the plugin. - // +optional - Env []ExecEnvVar `json:"env"` - - // Preferred input version of the ExecInfo. The returned ExecCredentials MUST use - // the same encoding version as the input. - APIVersion string `json:"apiVersion,omitempty"` - - // This text is shown to the user when the executable doesn't seem to be - // present. For example, `brew install foo-cli` might be a good InstallHint for - // foo-cli on Mac OS systems. - InstallHint string `json:"installHint,omitempty"` - - // ProvideClusterInfo determines whether or not to provide cluster information, - // which could potentially contain very large CA data, to this exec plugin as a - // part of the KUBERNETES_EXEC_INFO environment variable. By default, it is set - // to false. Package k8s.io/client-go/tools/auth/exec provides helper methods for - // reading this environment variable. - ProvideClusterInfo bool `json:"provideClusterInfo"` - - // InteractiveMode determines this plugin's relationship with standard input. Valid - // values are "Never" (this exec plugin never uses standard input), "IfAvailable" (this - // exec plugin wants to use standard input if it is available), or "Always" (this exec - // plugin requires standard input to function). See ExecInteractiveMode values for more - // details. - // - // If APIVersion is client.authentication.k8s.io/v1alpha1 or - // client.authentication.k8s.io/v1beta1, then this field is optional and defaults - // to "IfAvailable" when unset. Otherwise, this field is required. - //+optional - InteractiveMode ExecInteractiveMode `json:"interactiveMode,omitempty"` -} - -// ExecEnvVar is used for setting environment variables when executing an exec-based -// credential plugin. -type ExecEnvVar struct { - Name string `json:"name"` - Value string `json:"value"` -} - -// ExecInteractiveMode is a string that describes an exec plugin's relationship with standard input. -type ExecInteractiveMode string - -const ( - // NeverExecInteractiveMode declares that this exec plugin never needs to use standard - // input, and therefore the exec plugin will be run regardless of whether standard input is - // available for user input. - NeverExecInteractiveMode ExecInteractiveMode = "Never" - // IfAvailableExecInteractiveMode declares that this exec plugin would like to use standard input - // if it is available, but can still operate if standard input is not available. Therefore, the - // exec plugin will be run regardless of whether stdin is available for user input. If standard - // input is available for user input, then it will be provided to this exec plugin. - IfAvailableExecInteractiveMode ExecInteractiveMode = "IfAvailable" - // AlwaysExecInteractiveMode declares that this exec plugin requires standard input in order to - // run, and therefore the exec plugin will only be run if standard input is available for user - // input. If standard input is not available for user input, then the exec plugin will not be run - // and an error will be returned by the exec plugin runner. - AlwaysExecInteractiveMode ExecInteractiveMode = "Always" -) diff --git a/src/vendor/k8s.io/client-go/tools/clientcmd/api/v1/zz_generated.conversion.go b/src/vendor/k8s.io/client-go/tools/clientcmd/api/v1/zz_generated.conversion.go deleted file mode 100644 index a13bae64d..000000000 --- a/src/vendor/k8s.io/client-go/tools/clientcmd/api/v1/zz_generated.conversion.go +++ /dev/null @@ -1,456 +0,0 @@ -//go:build !ignore_autogenerated -// +build !ignore_autogenerated - -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by conversion-gen. DO NOT EDIT. - -package v1 - -import ( - unsafe "unsafe" - - conversion "k8s.io/apimachinery/pkg/conversion" - runtime "k8s.io/apimachinery/pkg/runtime" - api "k8s.io/client-go/tools/clientcmd/api" -) - -func init() { - localSchemeBuilder.Register(RegisterConversions) -} - -// RegisterConversions adds conversion functions to the given scheme. -// Public to allow building arbitrary schemes. -func RegisterConversions(s *runtime.Scheme) error { - if err := s.AddGeneratedConversionFunc((*AuthInfo)(nil), (*api.AuthInfo)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_AuthInfo_To_api_AuthInfo(a.(*AuthInfo), b.(*api.AuthInfo), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*api.AuthInfo)(nil), (*AuthInfo)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_api_AuthInfo_To_v1_AuthInfo(a.(*api.AuthInfo), b.(*AuthInfo), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*AuthProviderConfig)(nil), (*api.AuthProviderConfig)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_AuthProviderConfig_To_api_AuthProviderConfig(a.(*AuthProviderConfig), b.(*api.AuthProviderConfig), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*api.AuthProviderConfig)(nil), (*AuthProviderConfig)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_api_AuthProviderConfig_To_v1_AuthProviderConfig(a.(*api.AuthProviderConfig), b.(*AuthProviderConfig), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*Cluster)(nil), (*api.Cluster)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_Cluster_To_api_Cluster(a.(*Cluster), b.(*api.Cluster), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*api.Cluster)(nil), (*Cluster)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_api_Cluster_To_v1_Cluster(a.(*api.Cluster), b.(*Cluster), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*Config)(nil), (*api.Config)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_Config_To_api_Config(a.(*Config), b.(*api.Config), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*api.Config)(nil), (*Config)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_api_Config_To_v1_Config(a.(*api.Config), b.(*Config), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*Context)(nil), (*api.Context)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_Context_To_api_Context(a.(*Context), b.(*api.Context), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*api.Context)(nil), (*Context)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_api_Context_To_v1_Context(a.(*api.Context), b.(*Context), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*ExecConfig)(nil), (*api.ExecConfig)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_ExecConfig_To_api_ExecConfig(a.(*ExecConfig), b.(*api.ExecConfig), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*api.ExecConfig)(nil), (*ExecConfig)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_api_ExecConfig_To_v1_ExecConfig(a.(*api.ExecConfig), b.(*ExecConfig), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*ExecEnvVar)(nil), (*api.ExecEnvVar)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_ExecEnvVar_To_api_ExecEnvVar(a.(*ExecEnvVar), b.(*api.ExecEnvVar), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*api.ExecEnvVar)(nil), (*ExecEnvVar)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_api_ExecEnvVar_To_v1_ExecEnvVar(a.(*api.ExecEnvVar), b.(*ExecEnvVar), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*Preferences)(nil), (*api.Preferences)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_Preferences_To_api_Preferences(a.(*Preferences), b.(*api.Preferences), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*api.Preferences)(nil), (*Preferences)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_api_Preferences_To_v1_Preferences(a.(*api.Preferences), b.(*Preferences), scope) - }); err != nil { - return err - } - if err := s.AddConversionFunc((*map[string]*api.AuthInfo)(nil), (*[]NamedAuthInfo)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_Map_string_To_Pointer_api_AuthInfo_To_Slice_v1_NamedAuthInfo(a.(*map[string]*api.AuthInfo), b.(*[]NamedAuthInfo), scope) - }); err != nil { - return err - } - if err := s.AddConversionFunc((*map[string]*api.Cluster)(nil), (*[]NamedCluster)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_Map_string_To_Pointer_api_Cluster_To_Slice_v1_NamedCluster(a.(*map[string]*api.Cluster), b.(*[]NamedCluster), scope) - }); err != nil { - return err - } - if err := s.AddConversionFunc((*map[string]*api.Context)(nil), (*[]NamedContext)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_Map_string_To_Pointer_api_Context_To_Slice_v1_NamedContext(a.(*map[string]*api.Context), b.(*[]NamedContext), scope) - }); err != nil { - return err - } - if err := s.AddConversionFunc((*map[string]runtime.Object)(nil), (*[]NamedExtension)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_Map_string_To_runtime_Object_To_Slice_v1_NamedExtension(a.(*map[string]runtime.Object), b.(*[]NamedExtension), scope) - }); err != nil { - return err - } - if err := s.AddConversionFunc((*[]NamedAuthInfo)(nil), (*map[string]*api.AuthInfo)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_Slice_v1_NamedAuthInfo_To_Map_string_To_Pointer_api_AuthInfo(a.(*[]NamedAuthInfo), b.(*map[string]*api.AuthInfo), scope) - }); err != nil { - return err - } - if err := s.AddConversionFunc((*[]NamedCluster)(nil), (*map[string]*api.Cluster)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_Slice_v1_NamedCluster_To_Map_string_To_Pointer_api_Cluster(a.(*[]NamedCluster), b.(*map[string]*api.Cluster), scope) - }); err != nil { - return err - } - if err := s.AddConversionFunc((*[]NamedContext)(nil), (*map[string]*api.Context)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_Slice_v1_NamedContext_To_Map_string_To_Pointer_api_Context(a.(*[]NamedContext), b.(*map[string]*api.Context), scope) - }); err != nil { - return err - } - if err := s.AddConversionFunc((*[]NamedExtension)(nil), (*map[string]runtime.Object)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_Slice_v1_NamedExtension_To_Map_string_To_runtime_Object(a.(*[]NamedExtension), b.(*map[string]runtime.Object), scope) - }); err != nil { - return err - } - return nil -} - -func autoConvert_v1_AuthInfo_To_api_AuthInfo(in *AuthInfo, out *api.AuthInfo, s conversion.Scope) error { - out.ClientCertificate = in.ClientCertificate - out.ClientCertificateData = *(*[]byte)(unsafe.Pointer(&in.ClientCertificateData)) - out.ClientKey = in.ClientKey - out.ClientKeyData = *(*[]byte)(unsafe.Pointer(&in.ClientKeyData)) - out.Token = in.Token - out.TokenFile = in.TokenFile - out.Impersonate = in.Impersonate - out.ImpersonateUID = in.ImpersonateUID - out.ImpersonateGroups = *(*[]string)(unsafe.Pointer(&in.ImpersonateGroups)) - out.ImpersonateUserExtra = *(*map[string][]string)(unsafe.Pointer(&in.ImpersonateUserExtra)) - out.Username = in.Username - out.Password = in.Password - out.AuthProvider = (*api.AuthProviderConfig)(unsafe.Pointer(in.AuthProvider)) - if in.Exec != nil { - in, out := &in.Exec, &out.Exec - *out = new(api.ExecConfig) - if err := Convert_v1_ExecConfig_To_api_ExecConfig(*in, *out, s); err != nil { - return err - } - } else { - out.Exec = nil - } - if err := Convert_Slice_v1_NamedExtension_To_Map_string_To_runtime_Object(&in.Extensions, &out.Extensions, s); err != nil { - return err - } - return nil -} - -// Convert_v1_AuthInfo_To_api_AuthInfo is an autogenerated conversion function. -func Convert_v1_AuthInfo_To_api_AuthInfo(in *AuthInfo, out *api.AuthInfo, s conversion.Scope) error { - return autoConvert_v1_AuthInfo_To_api_AuthInfo(in, out, s) -} - -func autoConvert_api_AuthInfo_To_v1_AuthInfo(in *api.AuthInfo, out *AuthInfo, s conversion.Scope) error { - // INFO: in.LocationOfOrigin opted out of conversion generation - out.ClientCertificate = in.ClientCertificate - out.ClientCertificateData = *(*[]byte)(unsafe.Pointer(&in.ClientCertificateData)) - out.ClientKey = in.ClientKey - out.ClientKeyData = *(*[]byte)(unsafe.Pointer(&in.ClientKeyData)) - out.Token = in.Token - out.TokenFile = in.TokenFile - out.Impersonate = in.Impersonate - out.ImpersonateUID = in.ImpersonateUID - out.ImpersonateGroups = *(*[]string)(unsafe.Pointer(&in.ImpersonateGroups)) - out.ImpersonateUserExtra = *(*map[string][]string)(unsafe.Pointer(&in.ImpersonateUserExtra)) - out.Username = in.Username - out.Password = in.Password - out.AuthProvider = (*AuthProviderConfig)(unsafe.Pointer(in.AuthProvider)) - if in.Exec != nil { - in, out := &in.Exec, &out.Exec - *out = new(ExecConfig) - if err := Convert_api_ExecConfig_To_v1_ExecConfig(*in, *out, s); err != nil { - return err - } - } else { - out.Exec = nil - } - if err := Convert_Map_string_To_runtime_Object_To_Slice_v1_NamedExtension(&in.Extensions, &out.Extensions, s); err != nil { - return err - } - return nil -} - -// Convert_api_AuthInfo_To_v1_AuthInfo is an autogenerated conversion function. -func Convert_api_AuthInfo_To_v1_AuthInfo(in *api.AuthInfo, out *AuthInfo, s conversion.Scope) error { - return autoConvert_api_AuthInfo_To_v1_AuthInfo(in, out, s) -} - -func autoConvert_v1_AuthProviderConfig_To_api_AuthProviderConfig(in *AuthProviderConfig, out *api.AuthProviderConfig, s conversion.Scope) error { - out.Name = in.Name - out.Config = *(*map[string]string)(unsafe.Pointer(&in.Config)) - return nil -} - -// Convert_v1_AuthProviderConfig_To_api_AuthProviderConfig is an autogenerated conversion function. -func Convert_v1_AuthProviderConfig_To_api_AuthProviderConfig(in *AuthProviderConfig, out *api.AuthProviderConfig, s conversion.Scope) error { - return autoConvert_v1_AuthProviderConfig_To_api_AuthProviderConfig(in, out, s) -} - -func autoConvert_api_AuthProviderConfig_To_v1_AuthProviderConfig(in *api.AuthProviderConfig, out *AuthProviderConfig, s conversion.Scope) error { - out.Name = in.Name - out.Config = *(*map[string]string)(unsafe.Pointer(&in.Config)) - return nil -} - -// Convert_api_AuthProviderConfig_To_v1_AuthProviderConfig is an autogenerated conversion function. -func Convert_api_AuthProviderConfig_To_v1_AuthProviderConfig(in *api.AuthProviderConfig, out *AuthProviderConfig, s conversion.Scope) error { - return autoConvert_api_AuthProviderConfig_To_v1_AuthProviderConfig(in, out, s) -} - -func autoConvert_v1_Cluster_To_api_Cluster(in *Cluster, out *api.Cluster, s conversion.Scope) error { - out.Server = in.Server - out.TLSServerName = in.TLSServerName - out.InsecureSkipTLSVerify = in.InsecureSkipTLSVerify - out.CertificateAuthority = in.CertificateAuthority - out.CertificateAuthorityData = *(*[]byte)(unsafe.Pointer(&in.CertificateAuthorityData)) - out.ProxyURL = in.ProxyURL - if err := Convert_Slice_v1_NamedExtension_To_Map_string_To_runtime_Object(&in.Extensions, &out.Extensions, s); err != nil { - return err - } - return nil -} - -// Convert_v1_Cluster_To_api_Cluster is an autogenerated conversion function. -func Convert_v1_Cluster_To_api_Cluster(in *Cluster, out *api.Cluster, s conversion.Scope) error { - return autoConvert_v1_Cluster_To_api_Cluster(in, out, s) -} - -func autoConvert_api_Cluster_To_v1_Cluster(in *api.Cluster, out *Cluster, s conversion.Scope) error { - // INFO: in.LocationOfOrigin opted out of conversion generation - out.Server = in.Server - out.TLSServerName = in.TLSServerName - out.InsecureSkipTLSVerify = in.InsecureSkipTLSVerify - out.CertificateAuthority = in.CertificateAuthority - out.CertificateAuthorityData = *(*[]byte)(unsafe.Pointer(&in.CertificateAuthorityData)) - out.ProxyURL = in.ProxyURL - if err := Convert_Map_string_To_runtime_Object_To_Slice_v1_NamedExtension(&in.Extensions, &out.Extensions, s); err != nil { - return err - } - return nil -} - -// Convert_api_Cluster_To_v1_Cluster is an autogenerated conversion function. -func Convert_api_Cluster_To_v1_Cluster(in *api.Cluster, out *Cluster, s conversion.Scope) error { - return autoConvert_api_Cluster_To_v1_Cluster(in, out, s) -} - -func autoConvert_v1_Config_To_api_Config(in *Config, out *api.Config, s conversion.Scope) error { - // INFO: in.Kind opted out of conversion generation - // INFO: in.APIVersion opted out of conversion generation - if err := Convert_v1_Preferences_To_api_Preferences(&in.Preferences, &out.Preferences, s); err != nil { - return err - } - if err := Convert_Slice_v1_NamedCluster_To_Map_string_To_Pointer_api_Cluster(&in.Clusters, &out.Clusters, s); err != nil { - return err - } - if err := Convert_Slice_v1_NamedAuthInfo_To_Map_string_To_Pointer_api_AuthInfo(&in.AuthInfos, &out.AuthInfos, s); err != nil { - return err - } - if err := Convert_Slice_v1_NamedContext_To_Map_string_To_Pointer_api_Context(&in.Contexts, &out.Contexts, s); err != nil { - return err - } - out.CurrentContext = in.CurrentContext - if err := Convert_Slice_v1_NamedExtension_To_Map_string_To_runtime_Object(&in.Extensions, &out.Extensions, s); err != nil { - return err - } - return nil -} - -// Convert_v1_Config_To_api_Config is an autogenerated conversion function. -func Convert_v1_Config_To_api_Config(in *Config, out *api.Config, s conversion.Scope) error { - return autoConvert_v1_Config_To_api_Config(in, out, s) -} - -func autoConvert_api_Config_To_v1_Config(in *api.Config, out *Config, s conversion.Scope) error { - // INFO: in.Kind opted out of conversion generation - // INFO: in.APIVersion opted out of conversion generation - if err := Convert_api_Preferences_To_v1_Preferences(&in.Preferences, &out.Preferences, s); err != nil { - return err - } - if err := Convert_Map_string_To_Pointer_api_Cluster_To_Slice_v1_NamedCluster(&in.Clusters, &out.Clusters, s); err != nil { - return err - } - if err := Convert_Map_string_To_Pointer_api_AuthInfo_To_Slice_v1_NamedAuthInfo(&in.AuthInfos, &out.AuthInfos, s); err != nil { - return err - } - if err := Convert_Map_string_To_Pointer_api_Context_To_Slice_v1_NamedContext(&in.Contexts, &out.Contexts, s); err != nil { - return err - } - out.CurrentContext = in.CurrentContext - if err := Convert_Map_string_To_runtime_Object_To_Slice_v1_NamedExtension(&in.Extensions, &out.Extensions, s); err != nil { - return err - } - return nil -} - -// Convert_api_Config_To_v1_Config is an autogenerated conversion function. -func Convert_api_Config_To_v1_Config(in *api.Config, out *Config, s conversion.Scope) error { - return autoConvert_api_Config_To_v1_Config(in, out, s) -} - -func autoConvert_v1_Context_To_api_Context(in *Context, out *api.Context, s conversion.Scope) error { - out.Cluster = in.Cluster - out.AuthInfo = in.AuthInfo - out.Namespace = in.Namespace - if err := Convert_Slice_v1_NamedExtension_To_Map_string_To_runtime_Object(&in.Extensions, &out.Extensions, s); err != nil { - return err - } - return nil -} - -// Convert_v1_Context_To_api_Context is an autogenerated conversion function. -func Convert_v1_Context_To_api_Context(in *Context, out *api.Context, s conversion.Scope) error { - return autoConvert_v1_Context_To_api_Context(in, out, s) -} - -func autoConvert_api_Context_To_v1_Context(in *api.Context, out *Context, s conversion.Scope) error { - // INFO: in.LocationOfOrigin opted out of conversion generation - out.Cluster = in.Cluster - out.AuthInfo = in.AuthInfo - out.Namespace = in.Namespace - if err := Convert_Map_string_To_runtime_Object_To_Slice_v1_NamedExtension(&in.Extensions, &out.Extensions, s); err != nil { - return err - } - return nil -} - -// Convert_api_Context_To_v1_Context is an autogenerated conversion function. -func Convert_api_Context_To_v1_Context(in *api.Context, out *Context, s conversion.Scope) error { - return autoConvert_api_Context_To_v1_Context(in, out, s) -} - -func autoConvert_v1_ExecConfig_To_api_ExecConfig(in *ExecConfig, out *api.ExecConfig, s conversion.Scope) error { - out.Command = in.Command - out.Args = *(*[]string)(unsafe.Pointer(&in.Args)) - out.Env = *(*[]api.ExecEnvVar)(unsafe.Pointer(&in.Env)) - out.APIVersion = in.APIVersion - out.InstallHint = in.InstallHint - out.ProvideClusterInfo = in.ProvideClusterInfo - out.InteractiveMode = api.ExecInteractiveMode(in.InteractiveMode) - return nil -} - -// Convert_v1_ExecConfig_To_api_ExecConfig is an autogenerated conversion function. -func Convert_v1_ExecConfig_To_api_ExecConfig(in *ExecConfig, out *api.ExecConfig, s conversion.Scope) error { - return autoConvert_v1_ExecConfig_To_api_ExecConfig(in, out, s) -} - -func autoConvert_api_ExecConfig_To_v1_ExecConfig(in *api.ExecConfig, out *ExecConfig, s conversion.Scope) error { - out.Command = in.Command - out.Args = *(*[]string)(unsafe.Pointer(&in.Args)) - out.Env = *(*[]ExecEnvVar)(unsafe.Pointer(&in.Env)) - out.APIVersion = in.APIVersion - out.InstallHint = in.InstallHint - out.ProvideClusterInfo = in.ProvideClusterInfo - // INFO: in.Config opted out of conversion generation - out.InteractiveMode = ExecInteractiveMode(in.InteractiveMode) - // INFO: in.StdinUnavailable opted out of conversion generation - // INFO: in.StdinUnavailableMessage opted out of conversion generation - return nil -} - -// Convert_api_ExecConfig_To_v1_ExecConfig is an autogenerated conversion function. -func Convert_api_ExecConfig_To_v1_ExecConfig(in *api.ExecConfig, out *ExecConfig, s conversion.Scope) error { - return autoConvert_api_ExecConfig_To_v1_ExecConfig(in, out, s) -} - -func autoConvert_v1_ExecEnvVar_To_api_ExecEnvVar(in *ExecEnvVar, out *api.ExecEnvVar, s conversion.Scope) error { - out.Name = in.Name - out.Value = in.Value - return nil -} - -// Convert_v1_ExecEnvVar_To_api_ExecEnvVar is an autogenerated conversion function. -func Convert_v1_ExecEnvVar_To_api_ExecEnvVar(in *ExecEnvVar, out *api.ExecEnvVar, s conversion.Scope) error { - return autoConvert_v1_ExecEnvVar_To_api_ExecEnvVar(in, out, s) -} - -func autoConvert_api_ExecEnvVar_To_v1_ExecEnvVar(in *api.ExecEnvVar, out *ExecEnvVar, s conversion.Scope) error { - out.Name = in.Name - out.Value = in.Value - return nil -} - -// Convert_api_ExecEnvVar_To_v1_ExecEnvVar is an autogenerated conversion function. -func Convert_api_ExecEnvVar_To_v1_ExecEnvVar(in *api.ExecEnvVar, out *ExecEnvVar, s conversion.Scope) error { - return autoConvert_api_ExecEnvVar_To_v1_ExecEnvVar(in, out, s) -} - -func autoConvert_v1_Preferences_To_api_Preferences(in *Preferences, out *api.Preferences, s conversion.Scope) error { - out.Colors = in.Colors - if err := Convert_Slice_v1_NamedExtension_To_Map_string_To_runtime_Object(&in.Extensions, &out.Extensions, s); err != nil { - return err - } - return nil -} - -// Convert_v1_Preferences_To_api_Preferences is an autogenerated conversion function. -func Convert_v1_Preferences_To_api_Preferences(in *Preferences, out *api.Preferences, s conversion.Scope) error { - return autoConvert_v1_Preferences_To_api_Preferences(in, out, s) -} - -func autoConvert_api_Preferences_To_v1_Preferences(in *api.Preferences, out *Preferences, s conversion.Scope) error { - out.Colors = in.Colors - if err := Convert_Map_string_To_runtime_Object_To_Slice_v1_NamedExtension(&in.Extensions, &out.Extensions, s); err != nil { - return err - } - return nil -} - -// Convert_api_Preferences_To_v1_Preferences is an autogenerated conversion function. -func Convert_api_Preferences_To_v1_Preferences(in *api.Preferences, out *Preferences, s conversion.Scope) error { - return autoConvert_api_Preferences_To_v1_Preferences(in, out, s) -} diff --git a/src/vendor/k8s.io/client-go/tools/clientcmd/api/v1/zz_generated.deepcopy.go b/src/vendor/k8s.io/client-go/tools/clientcmd/api/v1/zz_generated.deepcopy.go deleted file mode 100644 index 78492598b..000000000 --- a/src/vendor/k8s.io/client-go/tools/clientcmd/api/v1/zz_generated.deepcopy.go +++ /dev/null @@ -1,349 +0,0 @@ -//go:build !ignore_autogenerated -// +build !ignore_autogenerated - -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by deepcopy-gen. DO NOT EDIT. - -package v1 - -import ( - runtime "k8s.io/apimachinery/pkg/runtime" -) - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AuthInfo) DeepCopyInto(out *AuthInfo) { - *out = *in - if in.ClientCertificateData != nil { - in, out := &in.ClientCertificateData, &out.ClientCertificateData - *out = make([]byte, len(*in)) - copy(*out, *in) - } - if in.ClientKeyData != nil { - in, out := &in.ClientKeyData, &out.ClientKeyData - *out = make([]byte, len(*in)) - copy(*out, *in) - } - if in.ImpersonateGroups != nil { - in, out := &in.ImpersonateGroups, &out.ImpersonateGroups - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.ImpersonateUserExtra != nil { - in, out := &in.ImpersonateUserExtra, &out.ImpersonateUserExtra - *out = make(map[string][]string, len(*in)) - for key, val := range *in { - var outVal []string - if val == nil { - (*out)[key] = nil - } else { - in, out := &val, &outVal - *out = make([]string, len(*in)) - copy(*out, *in) - } - (*out)[key] = outVal - } - } - if in.AuthProvider != nil { - in, out := &in.AuthProvider, &out.AuthProvider - *out = new(AuthProviderConfig) - (*in).DeepCopyInto(*out) - } - if in.Exec != nil { - in, out := &in.Exec, &out.Exec - *out = new(ExecConfig) - (*in).DeepCopyInto(*out) - } - if in.Extensions != nil { - in, out := &in.Extensions, &out.Extensions - *out = make([]NamedExtension, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuthInfo. -func (in *AuthInfo) DeepCopy() *AuthInfo { - if in == nil { - return nil - } - out := new(AuthInfo) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AuthProviderConfig) DeepCopyInto(out *AuthProviderConfig) { - *out = *in - if in.Config != nil { - in, out := &in.Config, &out.Config - *out = make(map[string]string, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuthProviderConfig. -func (in *AuthProviderConfig) DeepCopy() *AuthProviderConfig { - if in == nil { - return nil - } - out := new(AuthProviderConfig) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Cluster) DeepCopyInto(out *Cluster) { - *out = *in - if in.CertificateAuthorityData != nil { - in, out := &in.CertificateAuthorityData, &out.CertificateAuthorityData - *out = make([]byte, len(*in)) - copy(*out, *in) - } - if in.Extensions != nil { - in, out := &in.Extensions, &out.Extensions - *out = make([]NamedExtension, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Cluster. -func (in *Cluster) DeepCopy() *Cluster { - if in == nil { - return nil - } - out := new(Cluster) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Config) DeepCopyInto(out *Config) { - *out = *in - in.Preferences.DeepCopyInto(&out.Preferences) - if in.Clusters != nil { - in, out := &in.Clusters, &out.Clusters - *out = make([]NamedCluster, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.AuthInfos != nil { - in, out := &in.AuthInfos, &out.AuthInfos - *out = make([]NamedAuthInfo, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.Contexts != nil { - in, out := &in.Contexts, &out.Contexts - *out = make([]NamedContext, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.Extensions != nil { - in, out := &in.Extensions, &out.Extensions - *out = make([]NamedExtension, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Config. -func (in *Config) DeepCopy() *Config { - if in == nil { - return nil - } - out := new(Config) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *Config) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Context) DeepCopyInto(out *Context) { - *out = *in - if in.Extensions != nil { - in, out := &in.Extensions, &out.Extensions - *out = make([]NamedExtension, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Context. -func (in *Context) DeepCopy() *Context { - if in == nil { - return nil - } - out := new(Context) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ExecConfig) DeepCopyInto(out *ExecConfig) { - *out = *in - if in.Args != nil { - in, out := &in.Args, &out.Args - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.Env != nil { - in, out := &in.Env, &out.Env - *out = make([]ExecEnvVar, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExecConfig. -func (in *ExecConfig) DeepCopy() *ExecConfig { - if in == nil { - return nil - } - out := new(ExecConfig) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ExecEnvVar) DeepCopyInto(out *ExecEnvVar) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExecEnvVar. -func (in *ExecEnvVar) DeepCopy() *ExecEnvVar { - if in == nil { - return nil - } - out := new(ExecEnvVar) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NamedAuthInfo) DeepCopyInto(out *NamedAuthInfo) { - *out = *in - in.AuthInfo.DeepCopyInto(&out.AuthInfo) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NamedAuthInfo. -func (in *NamedAuthInfo) DeepCopy() *NamedAuthInfo { - if in == nil { - return nil - } - out := new(NamedAuthInfo) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NamedCluster) DeepCopyInto(out *NamedCluster) { - *out = *in - in.Cluster.DeepCopyInto(&out.Cluster) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NamedCluster. -func (in *NamedCluster) DeepCopy() *NamedCluster { - if in == nil { - return nil - } - out := new(NamedCluster) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NamedContext) DeepCopyInto(out *NamedContext) { - *out = *in - in.Context.DeepCopyInto(&out.Context) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NamedContext. -func (in *NamedContext) DeepCopy() *NamedContext { - if in == nil { - return nil - } - out := new(NamedContext) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NamedExtension) DeepCopyInto(out *NamedExtension) { - *out = *in - in.Extension.DeepCopyInto(&out.Extension) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NamedExtension. -func (in *NamedExtension) DeepCopy() *NamedExtension { - if in == nil { - return nil - } - out := new(NamedExtension) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Preferences) DeepCopyInto(out *Preferences) { - *out = *in - if in.Extensions != nil { - in, out := &in.Extensions, &out.Extensions - *out = make([]NamedExtension, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Preferences. -func (in *Preferences) DeepCopy() *Preferences { - if in == nil { - return nil - } - out := new(Preferences) - in.DeepCopyInto(out) - return out -} diff --git a/src/vendor/k8s.io/client-go/tools/clientcmd/api/v1/zz_generated.defaults.go b/src/vendor/k8s.io/client-go/tools/clientcmd/api/v1/zz_generated.defaults.go deleted file mode 100644 index 6a57decf6..000000000 --- a/src/vendor/k8s.io/client-go/tools/clientcmd/api/v1/zz_generated.defaults.go +++ /dev/null @@ -1,43 +0,0 @@ -//go:build !ignore_autogenerated -// +build !ignore_autogenerated - -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by defaulter-gen. DO NOT EDIT. - -package v1 - -import ( - runtime "k8s.io/apimachinery/pkg/runtime" -) - -// RegisterDefaults adds defaulters functions to the given scheme. -// Public to allow building arbitrary schemes. -// All generated defaulters are covering - they call all nested defaulters. -func RegisterDefaults(scheme *runtime.Scheme) error { - scheme.AddTypeDefaultingFunc(&Config{}, func(obj interface{}) { SetObjectDefaults_Config(obj.(*Config)) }) - return nil -} - -func SetObjectDefaults_Config(in *Config) { - for i := range in.AuthInfos { - a := &in.AuthInfos[i] - if a.AuthInfo.Exec != nil { - SetDefaults_ExecConfig(a.AuthInfo.Exec) - } - } -} diff --git a/src/vendor/k8s.io/client-go/tools/clientcmd/auth_loaders.go b/src/vendor/k8s.io/client-go/tools/clientcmd/auth_loaders.go deleted file mode 100644 index 5153a95a2..000000000 --- a/src/vendor/k8s.io/client-go/tools/clientcmd/auth_loaders.go +++ /dev/null @@ -1,111 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package clientcmd - -import ( - "encoding/json" - "fmt" - "io" - "io/ioutil" - "os" - - "golang.org/x/term" - - clientauth "k8s.io/client-go/tools/auth" -) - -// AuthLoaders are used to build clientauth.Info objects. -type AuthLoader interface { - // LoadAuth takes a path to a config file and can then do anything it needs in order to return a valid clientauth.Info - LoadAuth(path string) (*clientauth.Info, error) -} - -// default implementation of an AuthLoader -type defaultAuthLoader struct{} - -// LoadAuth for defaultAuthLoader simply delegates to clientauth.LoadFromFile -func (*defaultAuthLoader) LoadAuth(path string) (*clientauth.Info, error) { - return clientauth.LoadFromFile(path) -} - -type PromptingAuthLoader struct { - reader io.Reader -} - -// LoadAuth parses an AuthInfo object from a file path. It prompts user and creates file if it doesn't exist. -func (a *PromptingAuthLoader) LoadAuth(path string) (*clientauth.Info, error) { - // Prompt for user/pass and write a file if none exists. - if _, err := os.Stat(path); os.IsNotExist(err) { - authPtr, err := a.Prompt() - if err != nil { - return nil, err - } - auth := *authPtr - data, err := json.Marshal(auth) - if err != nil { - return &auth, err - } - err = ioutil.WriteFile(path, data, 0600) - return &auth, err - } - authPtr, err := clientauth.LoadFromFile(path) - if err != nil { - return nil, err - } - return authPtr, nil -} - -// Prompt pulls the user and password from a reader -func (a *PromptingAuthLoader) Prompt() (*clientauth.Info, error) { - var err error - auth := &clientauth.Info{} - auth.User, err = promptForString("Username", a.reader, true) - if err != nil { - return nil, err - } - auth.Password, err = promptForString("Password", nil, false) - if err != nil { - return nil, err - } - return auth, nil -} - -func promptForString(field string, r io.Reader, show bool) (result string, err error) { - fmt.Printf("Please enter %s: ", field) - if show { - _, err = fmt.Fscan(r, &result) - } else { - var data []byte - if term.IsTerminal(int(os.Stdin.Fd())) { - data, err = term.ReadPassword(int(os.Stdin.Fd())) - result = string(data) - } else { - return "", fmt.Errorf("error reading input for %s", field) - } - } - return result, err -} - -// NewPromptingAuthLoader is an AuthLoader that parses an AuthInfo object from a file path. It prompts user and creates file if it doesn't exist. -func NewPromptingAuthLoader(reader io.Reader) *PromptingAuthLoader { - return &PromptingAuthLoader{reader} -} - -// NewDefaultAuthLoader returns a default implementation of an AuthLoader that only reads from a config file -func NewDefaultAuthLoader() AuthLoader { - return &defaultAuthLoader{} -} diff --git a/src/vendor/k8s.io/client-go/tools/clientcmd/client_config.go b/src/vendor/k8s.io/client-go/tools/clientcmd/client_config.go deleted file mode 100644 index cc37c9fbf..000000000 --- a/src/vendor/k8s.io/client-go/tools/clientcmd/client_config.go +++ /dev/null @@ -1,637 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package clientcmd - -import ( - "fmt" - "io" - "io/ioutil" - "net/http" - "net/url" - "os" - "strings" - "unicode" - - restclient "k8s.io/client-go/rest" - clientauth "k8s.io/client-go/tools/auth" - clientcmdapi "k8s.io/client-go/tools/clientcmd/api" - "k8s.io/klog/v2" - - "github.com/imdario/mergo" -) - -const ( - // clusterExtensionKey is reserved in the cluster extensions list for exec plugin config. - clusterExtensionKey = "client.authentication.k8s.io/exec" -) - -var ( - // ClusterDefaults has the same behavior as the old EnvVar and DefaultCluster fields - // DEPRECATED will be replaced - ClusterDefaults = clientcmdapi.Cluster{Server: getDefaultServer()} - // DefaultClientConfig represents the legacy behavior of this package for defaulting - // DEPRECATED will be replace - DefaultClientConfig = DirectClientConfig{*clientcmdapi.NewConfig(), "", &ConfigOverrides{ - ClusterDefaults: ClusterDefaults, - }, nil, NewDefaultClientConfigLoadingRules(), promptedCredentials{}} -) - -// getDefaultServer returns a default setting for DefaultClientConfig -// DEPRECATED -func getDefaultServer() string { - if server := os.Getenv("KUBERNETES_MASTER"); len(server) > 0 { - return server - } - return "http://localhost:8080" -} - -// ClientConfig is used to make it easy to get an api server client -type ClientConfig interface { - // RawConfig returns the merged result of all overrides - RawConfig() (clientcmdapi.Config, error) - // ClientConfig returns a complete client config - ClientConfig() (*restclient.Config, error) - // Namespace returns the namespace resulting from the merged - // result of all overrides and a boolean indicating if it was - // overridden - Namespace() (string, bool, error) - // ConfigAccess returns the rules for loading/persisting the config. - ConfigAccess() ConfigAccess -} - -type PersistAuthProviderConfigForUser func(user string) restclient.AuthProviderConfigPersister - -type promptedCredentials struct { - username string - password string `datapolicy:"password"` -} - -// DirectClientConfig is a ClientConfig interface that is backed by a clientcmdapi.Config, options overrides, and an optional fallbackReader for auth information -type DirectClientConfig struct { - config clientcmdapi.Config - contextName string - overrides *ConfigOverrides - fallbackReader io.Reader - configAccess ConfigAccess - // promptedCredentials store the credentials input by the user - promptedCredentials promptedCredentials -} - -// NewDefaultClientConfig creates a DirectClientConfig using the config.CurrentContext as the context name -func NewDefaultClientConfig(config clientcmdapi.Config, overrides *ConfigOverrides) ClientConfig { - return &DirectClientConfig{config, config.CurrentContext, overrides, nil, NewDefaultClientConfigLoadingRules(), promptedCredentials{}} -} - -// NewNonInteractiveClientConfig creates a DirectClientConfig using the passed context name and does not have a fallback reader for auth information -func NewNonInteractiveClientConfig(config clientcmdapi.Config, contextName string, overrides *ConfigOverrides, configAccess ConfigAccess) ClientConfig { - return &DirectClientConfig{config, contextName, overrides, nil, configAccess, promptedCredentials{}} -} - -// NewInteractiveClientConfig creates a DirectClientConfig using the passed context name and a reader in case auth information is not provided via files or flags -func NewInteractiveClientConfig(config clientcmdapi.Config, contextName string, overrides *ConfigOverrides, fallbackReader io.Reader, configAccess ConfigAccess) ClientConfig { - return &DirectClientConfig{config, contextName, overrides, fallbackReader, configAccess, promptedCredentials{}} -} - -// NewClientConfigFromBytes takes your kubeconfig and gives you back a ClientConfig -func NewClientConfigFromBytes(configBytes []byte) (ClientConfig, error) { - config, err := Load(configBytes) - if err != nil { - return nil, err - } - - return &DirectClientConfig{*config, "", &ConfigOverrides{}, nil, nil, promptedCredentials{}}, nil -} - -// RESTConfigFromKubeConfig is a convenience method to give back a restconfig from your kubeconfig bytes. -// For programmatic access, this is what you want 80% of the time -func RESTConfigFromKubeConfig(configBytes []byte) (*restclient.Config, error) { - clientConfig, err := NewClientConfigFromBytes(configBytes) - if err != nil { - return nil, err - } - return clientConfig.ClientConfig() -} - -func (config *DirectClientConfig) RawConfig() (clientcmdapi.Config, error) { - return config.config, nil -} - -// ClientConfig implements ClientConfig -func (config *DirectClientConfig) ClientConfig() (*restclient.Config, error) { - // check that getAuthInfo, getContext, and getCluster do not return an error. - // Do this before checking if the current config is usable in the event that an - // AuthInfo, Context, or Cluster config with user-defined names are not found. - // This provides a user with the immediate cause for error if one is found - configAuthInfo, err := config.getAuthInfo() - if err != nil { - return nil, err - } - - _, err = config.getContext() - if err != nil { - return nil, err - } - - configClusterInfo, err := config.getCluster() - if err != nil { - return nil, err - } - - if err := config.ConfirmUsable(); err != nil { - return nil, err - } - - clientConfig := &restclient.Config{} - clientConfig.Host = configClusterInfo.Server - if configClusterInfo.ProxyURL != "" { - u, err := parseProxyURL(configClusterInfo.ProxyURL) - if err != nil { - return nil, err - } - clientConfig.Proxy = http.ProxyURL(u) - } - - if config.overrides != nil && len(config.overrides.Timeout) > 0 { - timeout, err := ParseTimeout(config.overrides.Timeout) - if err != nil { - return nil, err - } - clientConfig.Timeout = timeout - } - - if u, err := url.ParseRequestURI(clientConfig.Host); err == nil && u.Opaque == "" && len(u.Path) > 1 { - u.RawQuery = "" - u.Fragment = "" - clientConfig.Host = u.String() - } - if len(configAuthInfo.Impersonate) > 0 { - clientConfig.Impersonate = restclient.ImpersonationConfig{ - UserName: configAuthInfo.Impersonate, - UID: configAuthInfo.ImpersonateUID, - Groups: configAuthInfo.ImpersonateGroups, - Extra: configAuthInfo.ImpersonateUserExtra, - } - } - - // only try to read the auth information if we are secure - if restclient.IsConfigTransportTLS(*clientConfig) { - var err error - var persister restclient.AuthProviderConfigPersister - if config.configAccess != nil { - authInfoName, _ := config.getAuthInfoName() - persister = PersisterForUser(config.configAccess, authInfoName) - } - userAuthPartialConfig, err := config.getUserIdentificationPartialConfig(configAuthInfo, config.fallbackReader, persister, configClusterInfo) - if err != nil { - return nil, err - } - mergo.Merge(clientConfig, userAuthPartialConfig, mergo.WithOverride) - - serverAuthPartialConfig, err := getServerIdentificationPartialConfig(configAuthInfo, configClusterInfo) - if err != nil { - return nil, err - } - mergo.Merge(clientConfig, serverAuthPartialConfig, mergo.WithOverride) - } - - return clientConfig, nil -} - -// clientauth.Info object contain both user identification and server identification. We want different precedence orders for -// both, so we have to split the objects and merge them separately -// we want this order of precedence for the server identification -// 1. configClusterInfo (the final result of command line flags and merged .kubeconfig files) -// 2. configAuthInfo.auth-path (this file can contain information that conflicts with #1, and we want #1 to win the priority) -// 3. load the ~/.kubernetes_auth file as a default -func getServerIdentificationPartialConfig(configAuthInfo clientcmdapi.AuthInfo, configClusterInfo clientcmdapi.Cluster) (*restclient.Config, error) { - mergedConfig := &restclient.Config{} - - // configClusterInfo holds the information identify the server provided by .kubeconfig - configClientConfig := &restclient.Config{} - configClientConfig.CAFile = configClusterInfo.CertificateAuthority - configClientConfig.CAData = configClusterInfo.CertificateAuthorityData - configClientConfig.Insecure = configClusterInfo.InsecureSkipTLSVerify - configClientConfig.ServerName = configClusterInfo.TLSServerName - mergo.Merge(mergedConfig, configClientConfig, mergo.WithOverride) - - return mergedConfig, nil -} - -// clientauth.Info object contain both user identification and server identification. We want different precedence orders for -// both, so we have to split the objects and merge them separately -// we want this order of precedence for user identification -// 1. configAuthInfo minus auth-path (the final result of command line flags and merged .kubeconfig files) -// 2. configAuthInfo.auth-path (this file can contain information that conflicts with #1, and we want #1 to win the priority) -// 3. if there is not enough information to identify the user, load try the ~/.kubernetes_auth file -// 4. if there is not enough information to identify the user, prompt if possible -func (config *DirectClientConfig) getUserIdentificationPartialConfig(configAuthInfo clientcmdapi.AuthInfo, fallbackReader io.Reader, persistAuthConfig restclient.AuthProviderConfigPersister, configClusterInfo clientcmdapi.Cluster) (*restclient.Config, error) { - mergedConfig := &restclient.Config{} - - // blindly overwrite existing values based on precedence - if len(configAuthInfo.Token) > 0 { - mergedConfig.BearerToken = configAuthInfo.Token - mergedConfig.BearerTokenFile = configAuthInfo.TokenFile - } else if len(configAuthInfo.TokenFile) > 0 { - tokenBytes, err := ioutil.ReadFile(configAuthInfo.TokenFile) - if err != nil { - return nil, err - } - mergedConfig.BearerToken = string(tokenBytes) - mergedConfig.BearerTokenFile = configAuthInfo.TokenFile - } - if len(configAuthInfo.Impersonate) > 0 { - mergedConfig.Impersonate = restclient.ImpersonationConfig{ - UserName: configAuthInfo.Impersonate, - UID: configAuthInfo.ImpersonateUID, - Groups: configAuthInfo.ImpersonateGroups, - Extra: configAuthInfo.ImpersonateUserExtra, - } - } - if len(configAuthInfo.ClientCertificate) > 0 || len(configAuthInfo.ClientCertificateData) > 0 { - mergedConfig.CertFile = configAuthInfo.ClientCertificate - mergedConfig.CertData = configAuthInfo.ClientCertificateData - mergedConfig.KeyFile = configAuthInfo.ClientKey - mergedConfig.KeyData = configAuthInfo.ClientKeyData - } - if len(configAuthInfo.Username) > 0 || len(configAuthInfo.Password) > 0 { - mergedConfig.Username = configAuthInfo.Username - mergedConfig.Password = configAuthInfo.Password - } - if configAuthInfo.AuthProvider != nil { - mergedConfig.AuthProvider = configAuthInfo.AuthProvider - mergedConfig.AuthConfigPersister = persistAuthConfig - } - if configAuthInfo.Exec != nil { - mergedConfig.ExecProvider = configAuthInfo.Exec - mergedConfig.ExecProvider.InstallHint = cleanANSIEscapeCodes(mergedConfig.ExecProvider.InstallHint) - mergedConfig.ExecProvider.Config = configClusterInfo.Extensions[clusterExtensionKey] - } - - // if there still isn't enough information to authenticate the user, try prompting - if !canIdentifyUser(*mergedConfig) && (fallbackReader != nil) { - if len(config.promptedCredentials.username) > 0 && len(config.promptedCredentials.password) > 0 { - mergedConfig.Username = config.promptedCredentials.username - mergedConfig.Password = config.promptedCredentials.password - return mergedConfig, nil - } - prompter := NewPromptingAuthLoader(fallbackReader) - promptedAuthInfo, err := prompter.Prompt() - if err != nil { - return nil, err - } - promptedConfig := makeUserIdentificationConfig(*promptedAuthInfo) - previouslyMergedConfig := mergedConfig - mergedConfig = &restclient.Config{} - mergo.Merge(mergedConfig, promptedConfig, mergo.WithOverride) - mergo.Merge(mergedConfig, previouslyMergedConfig, mergo.WithOverride) - config.promptedCredentials.username = mergedConfig.Username - config.promptedCredentials.password = mergedConfig.Password - } - - return mergedConfig, nil -} - -// makeUserIdentificationFieldsConfig returns a client.Config capable of being merged using mergo for only user identification information -func makeUserIdentificationConfig(info clientauth.Info) *restclient.Config { - config := &restclient.Config{} - config.Username = info.User - config.Password = info.Password - config.CertFile = info.CertFile - config.KeyFile = info.KeyFile - config.BearerToken = info.BearerToken - return config -} - -func canIdentifyUser(config restclient.Config) bool { - return len(config.Username) > 0 || - (len(config.CertFile) > 0 || len(config.CertData) > 0) || - len(config.BearerToken) > 0 || - config.AuthProvider != nil || - config.ExecProvider != nil -} - -// cleanANSIEscapeCodes takes an arbitrary string and ensures that there are no -// ANSI escape sequences that could put the terminal in a weird state (e.g., -// "\e[1m" bolds text) -func cleanANSIEscapeCodes(s string) string { - // spaceControlCharacters includes tab, new line, vertical tab, new page, and - // carriage return. These are in the unicode.Cc category, but that category also - // contains ESC (U+001B) which we don't want. - spaceControlCharacters := unicode.RangeTable{ - R16: []unicode.Range16{ - {Lo: 0x0009, Hi: 0x000D, Stride: 1}, - }, - } - - // Why not make this deny-only (instead of allow-only)? Because unicode.C - // contains newline and tab characters that we want. - allowedRanges := []*unicode.RangeTable{ - unicode.L, - unicode.M, - unicode.N, - unicode.P, - unicode.S, - unicode.Z, - &spaceControlCharacters, - } - builder := strings.Builder{} - for _, roon := range s { - if unicode.IsOneOf(allowedRanges, roon) { - builder.WriteRune(roon) // returns nil error, per go doc - } else { - fmt.Fprintf(&builder, "%U", roon) - } - } - return builder.String() -} - -// Namespace implements ClientConfig -func (config *DirectClientConfig) Namespace() (string, bool, error) { - if config.overrides != nil && config.overrides.Context.Namespace != "" { - // In the event we have an empty config but we do have a namespace override, we should return - // the namespace override instead of having config.ConfirmUsable() return an error. This allows - // things like in-cluster clients to execute `kubectl get pods --namespace=foo` and have the - // --namespace flag honored instead of being ignored. - return config.overrides.Context.Namespace, true, nil - } - - if err := config.ConfirmUsable(); err != nil { - return "", false, err - } - - configContext, err := config.getContext() - if err != nil { - return "", false, err - } - - if len(configContext.Namespace) == 0 { - return "default", false, nil - } - - return configContext.Namespace, false, nil -} - -// ConfigAccess implements ClientConfig -func (config *DirectClientConfig) ConfigAccess() ConfigAccess { - return config.configAccess -} - -// ConfirmUsable looks a particular context and determines if that particular part of the config is useable. There might still be errors in the config, -// but no errors in the sections requested or referenced. It does not return early so that it can find as many errors as possible. -func (config *DirectClientConfig) ConfirmUsable() error { - validationErrors := make([]error, 0) - - var contextName string - if len(config.contextName) != 0 { - contextName = config.contextName - } else { - contextName = config.config.CurrentContext - } - - if len(contextName) > 0 { - _, exists := config.config.Contexts[contextName] - if !exists { - validationErrors = append(validationErrors, &errContextNotFound{contextName}) - } - } - - authInfoName, _ := config.getAuthInfoName() - authInfo, _ := config.getAuthInfo() - validationErrors = append(validationErrors, validateAuthInfo(authInfoName, authInfo)...) - clusterName, _ := config.getClusterName() - cluster, _ := config.getCluster() - validationErrors = append(validationErrors, validateClusterInfo(clusterName, cluster)...) - // when direct client config is specified, and our only error is that no server is defined, we should - // return a standard "no config" error - if len(validationErrors) == 1 && validationErrors[0] == ErrEmptyCluster { - return newErrConfigurationInvalid([]error{ErrEmptyConfig}) - } - return newErrConfigurationInvalid(validationErrors) -} - -// getContextName returns the default, or user-set context name, and a boolean that indicates -// whether the default context name has been overwritten by a user-set flag, or left as its default value -func (config *DirectClientConfig) getContextName() (string, bool) { - if config.overrides != nil && len(config.overrides.CurrentContext) != 0 { - return config.overrides.CurrentContext, true - } - if len(config.contextName) != 0 { - return config.contextName, false - } - - return config.config.CurrentContext, false -} - -// getAuthInfoName returns a string containing the current authinfo name for the current context, -// and a boolean indicating whether the default authInfo name is overwritten by a user-set flag, or -// left as its default value -func (config *DirectClientConfig) getAuthInfoName() (string, bool) { - if config.overrides != nil && len(config.overrides.Context.AuthInfo) != 0 { - return config.overrides.Context.AuthInfo, true - } - context, _ := config.getContext() - return context.AuthInfo, false -} - -// getClusterName returns a string containing the default, or user-set cluster name, and a boolean -// indicating whether the default clusterName has been overwritten by a user-set flag, or left as -// its default value -func (config *DirectClientConfig) getClusterName() (string, bool) { - if config.overrides != nil && len(config.overrides.Context.Cluster) != 0 { - return config.overrides.Context.Cluster, true - } - context, _ := config.getContext() - return context.Cluster, false -} - -// getContext returns the clientcmdapi.Context, or an error if a required context is not found. -func (config *DirectClientConfig) getContext() (clientcmdapi.Context, error) { - contexts := config.config.Contexts - contextName, required := config.getContextName() - - mergedContext := clientcmdapi.NewContext() - if configContext, exists := contexts[contextName]; exists { - mergo.Merge(mergedContext, configContext, mergo.WithOverride) - } else if required { - return clientcmdapi.Context{}, fmt.Errorf("context %q does not exist", contextName) - } - if config.overrides != nil { - mergo.Merge(mergedContext, config.overrides.Context, mergo.WithOverride) - } - - return *mergedContext, nil -} - -// getAuthInfo returns the clientcmdapi.AuthInfo, or an error if a required auth info is not found. -func (config *DirectClientConfig) getAuthInfo() (clientcmdapi.AuthInfo, error) { - authInfos := config.config.AuthInfos - authInfoName, required := config.getAuthInfoName() - - mergedAuthInfo := clientcmdapi.NewAuthInfo() - if configAuthInfo, exists := authInfos[authInfoName]; exists { - mergo.Merge(mergedAuthInfo, configAuthInfo, mergo.WithOverride) - } else if required { - return clientcmdapi.AuthInfo{}, fmt.Errorf("auth info %q does not exist", authInfoName) - } - if config.overrides != nil { - mergo.Merge(mergedAuthInfo, config.overrides.AuthInfo, mergo.WithOverride) - } - - return *mergedAuthInfo, nil -} - -// getCluster returns the clientcmdapi.Cluster, or an error if a required cluster is not found. -func (config *DirectClientConfig) getCluster() (clientcmdapi.Cluster, error) { - clusterInfos := config.config.Clusters - clusterInfoName, required := config.getClusterName() - - mergedClusterInfo := clientcmdapi.NewCluster() - if config.overrides != nil { - mergo.Merge(mergedClusterInfo, config.overrides.ClusterDefaults, mergo.WithOverride) - } - if configClusterInfo, exists := clusterInfos[clusterInfoName]; exists { - mergo.Merge(mergedClusterInfo, configClusterInfo, mergo.WithOverride) - } else if required { - return clientcmdapi.Cluster{}, fmt.Errorf("cluster %q does not exist", clusterInfoName) - } - if config.overrides != nil { - mergo.Merge(mergedClusterInfo, config.overrides.ClusterInfo, mergo.WithOverride) - } - - // * An override of --insecure-skip-tls-verify=true and no accompanying CA/CA data should clear already-set CA/CA data - // otherwise, a kubeconfig containing a CA reference would return an error that "CA and insecure-skip-tls-verify couldn't both be set". - // * An override of --certificate-authority should also override TLS skip settings and CA data, otherwise existing CA data will take precedence. - if config.overrides != nil { - caLen := len(config.overrides.ClusterInfo.CertificateAuthority) - caDataLen := len(config.overrides.ClusterInfo.CertificateAuthorityData) - if config.overrides.ClusterInfo.InsecureSkipTLSVerify || caLen > 0 || caDataLen > 0 { - mergedClusterInfo.InsecureSkipTLSVerify = config.overrides.ClusterInfo.InsecureSkipTLSVerify - mergedClusterInfo.CertificateAuthority = config.overrides.ClusterInfo.CertificateAuthority - mergedClusterInfo.CertificateAuthorityData = config.overrides.ClusterInfo.CertificateAuthorityData - } - - // if the --tls-server-name has been set in overrides, use that value. - // if the --server has been set in overrides, then use the value of --tls-server-name specified on the CLI too. This gives the property - // that setting a --server will effectively clear the KUBECONFIG value of tls-server-name if it is specified on the command line which is - // usually correct. - if config.overrides.ClusterInfo.TLSServerName != "" || config.overrides.ClusterInfo.Server != "" { - mergedClusterInfo.TLSServerName = config.overrides.ClusterInfo.TLSServerName - } - } - - return *mergedClusterInfo, nil -} - -// inClusterClientConfig makes a config that will work from within a kubernetes cluster container environment. -// Can take options overrides for flags explicitly provided to the command inside the cluster container. -type inClusterClientConfig struct { - overrides *ConfigOverrides - inClusterConfigProvider func() (*restclient.Config, error) -} - -var _ ClientConfig = &inClusterClientConfig{} - -func (config *inClusterClientConfig) RawConfig() (clientcmdapi.Config, error) { - return clientcmdapi.Config{}, fmt.Errorf("inCluster environment config doesn't support multiple clusters") -} - -func (config *inClusterClientConfig) ClientConfig() (*restclient.Config, error) { - inClusterConfigProvider := config.inClusterConfigProvider - if inClusterConfigProvider == nil { - inClusterConfigProvider = restclient.InClusterConfig - } - - icc, err := inClusterConfigProvider() - if err != nil { - return nil, err - } - - // in-cluster configs only takes a host, token, or CA file - // if any of them were individually provided, overwrite anything else - if config.overrides != nil { - if server := config.overrides.ClusterInfo.Server; len(server) > 0 { - icc.Host = server - } - if len(config.overrides.AuthInfo.Token) > 0 || len(config.overrides.AuthInfo.TokenFile) > 0 { - icc.BearerToken = config.overrides.AuthInfo.Token - icc.BearerTokenFile = config.overrides.AuthInfo.TokenFile - } - if certificateAuthorityFile := config.overrides.ClusterInfo.CertificateAuthority; len(certificateAuthorityFile) > 0 { - icc.TLSClientConfig.CAFile = certificateAuthorityFile - } - } - - return icc, nil -} - -func (config *inClusterClientConfig) Namespace() (string, bool, error) { - // This way assumes you've set the POD_NAMESPACE environment variable using the downward API. - // This check has to be done first for backwards compatibility with the way InClusterConfig was originally set up - if ns := os.Getenv("POD_NAMESPACE"); ns != "" { - return ns, false, nil - } - - // Fall back to the namespace associated with the service account token, if available - if data, err := ioutil.ReadFile("/var/run/secrets/kubernetes.io/serviceaccount/namespace"); err == nil { - if ns := strings.TrimSpace(string(data)); len(ns) > 0 { - return ns, false, nil - } - } - - return "default", false, nil -} - -func (config *inClusterClientConfig) ConfigAccess() ConfigAccess { - return NewDefaultClientConfigLoadingRules() -} - -// Possible returns true if loading an inside-kubernetes-cluster is possible. -func (config *inClusterClientConfig) Possible() bool { - fi, err := os.Stat("/var/run/secrets/kubernetes.io/serviceaccount/token") - return os.Getenv("KUBERNETES_SERVICE_HOST") != "" && - os.Getenv("KUBERNETES_SERVICE_PORT") != "" && - err == nil && !fi.IsDir() -} - -// BuildConfigFromFlags is a helper function that builds configs from a master -// url or a kubeconfig filepath. These are passed in as command line flags for cluster -// components. Warnings should reflect this usage. If neither masterUrl or kubeconfigPath -// are passed in we fallback to inClusterConfig. If inClusterConfig fails, we fallback -// to the default config. -func BuildConfigFromFlags(masterUrl, kubeconfigPath string) (*restclient.Config, error) { - if kubeconfigPath == "" && masterUrl == "" { - klog.Warning("Neither --kubeconfig nor --master was specified. Using the inClusterConfig. This might not work.") - kubeconfig, err := restclient.InClusterConfig() - if err == nil { - return kubeconfig, nil - } - klog.Warning("error creating inClusterConfig, falling back to default config: ", err) - } - return NewNonInteractiveDeferredLoadingClientConfig( - &ClientConfigLoadingRules{ExplicitPath: kubeconfigPath}, - &ConfigOverrides{ClusterInfo: clientcmdapi.Cluster{Server: masterUrl}}).ClientConfig() -} - -// BuildConfigFromKubeconfigGetter is a helper function that builds configs from a master -// url and a kubeconfigGetter. -func BuildConfigFromKubeconfigGetter(masterUrl string, kubeconfigGetter KubeconfigGetter) (*restclient.Config, error) { - // TODO: We do not need a DeferredLoader here. Refactor code and see if we can use DirectClientConfig here. - cc := NewNonInteractiveDeferredLoadingClientConfig( - &ClientConfigGetter{kubeconfigGetter: kubeconfigGetter}, - &ConfigOverrides{ClusterInfo: clientcmdapi.Cluster{Server: masterUrl}}) - return cc.ClientConfig() -} diff --git a/src/vendor/k8s.io/client-go/tools/clientcmd/config.go b/src/vendor/k8s.io/client-go/tools/clientcmd/config.go deleted file mode 100644 index 31f896316..000000000 --- a/src/vendor/k8s.io/client-go/tools/clientcmd/config.go +++ /dev/null @@ -1,500 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package clientcmd - -import ( - "errors" - "os" - "path" - "path/filepath" - "reflect" - "sort" - - "k8s.io/klog/v2" - - restclient "k8s.io/client-go/rest" - clientcmdapi "k8s.io/client-go/tools/clientcmd/api" -) - -// ConfigAccess is used by subcommands and methods in this package to load and modify the appropriate config files -type ConfigAccess interface { - // GetLoadingPrecedence returns the slice of files that should be used for loading and inspecting the config - GetLoadingPrecedence() []string - // GetStartingConfig returns the config that subcommands should being operating against. It may or may not be merged depending on loading rules - GetStartingConfig() (*clientcmdapi.Config, error) - // GetDefaultFilename returns the name of the file you should write into (create if necessary), if you're trying to create a new stanza as opposed to updating an existing one. - GetDefaultFilename() string - // IsExplicitFile indicates whether or not this command is interested in exactly one file. This implementation only ever does that via a flag, but implementations that handle local, global, and flags may have more - IsExplicitFile() bool - // GetExplicitFile returns the particular file this command is operating against. This implementation only ever has one, but implementations that handle local, global, and flags may have more - GetExplicitFile() string -} - -type PathOptions struct { - // GlobalFile is the full path to the file to load as the global (final) option - GlobalFile string - // EnvVar is the env var name that points to the list of kubeconfig files to load - EnvVar string - // ExplicitFileFlag is the name of the flag to use for prompting for the kubeconfig file - ExplicitFileFlag string - - // GlobalFileSubpath is an optional value used for displaying help - GlobalFileSubpath string - - LoadingRules *ClientConfigLoadingRules -} - -var ( - // UseModifyConfigLock ensures that access to kubeconfig file using ModifyConfig method - // is being guarded by a lock file. - // This variable is intentionaly made public so other consumers of this library - // can modify its default behavior, but be caution when disabling it since - // this will make your code not threadsafe. - UseModifyConfigLock = true -) - -func (o *PathOptions) GetEnvVarFiles() []string { - if len(o.EnvVar) == 0 { - return []string{} - } - - envVarValue := os.Getenv(o.EnvVar) - if len(envVarValue) == 0 { - return []string{} - } - - fileList := filepath.SplitList(envVarValue) - // prevent the same path load multiple times - return deduplicate(fileList) -} - -func (o *PathOptions) GetLoadingPrecedence() []string { - if o.IsExplicitFile() { - return []string{o.GetExplicitFile()} - } - - if envVarFiles := o.GetEnvVarFiles(); len(envVarFiles) > 0 { - return envVarFiles - } - return []string{o.GlobalFile} -} - -func (o *PathOptions) GetStartingConfig() (*clientcmdapi.Config, error) { - // don't mutate the original - loadingRules := *o.LoadingRules - loadingRules.Precedence = o.GetLoadingPrecedence() - - clientConfig := NewNonInteractiveDeferredLoadingClientConfig(&loadingRules, &ConfigOverrides{}) - rawConfig, err := clientConfig.RawConfig() - if os.IsNotExist(err) { - return clientcmdapi.NewConfig(), nil - } - if err != nil { - return nil, err - } - - return &rawConfig, nil -} - -func (o *PathOptions) GetDefaultFilename() string { - if o.IsExplicitFile() { - return o.GetExplicitFile() - } - - if envVarFiles := o.GetEnvVarFiles(); len(envVarFiles) > 0 { - if len(envVarFiles) == 1 { - return envVarFiles[0] - } - - // if any of the envvar files already exists, return it - for _, envVarFile := range envVarFiles { - if _, err := os.Stat(envVarFile); err == nil { - return envVarFile - } - } - - // otherwise, return the last one in the list - return envVarFiles[len(envVarFiles)-1] - } - - return o.GlobalFile -} - -func (o *PathOptions) IsExplicitFile() bool { - return len(o.LoadingRules.ExplicitPath) > 0 -} - -func (o *PathOptions) GetExplicitFile() string { - return o.LoadingRules.ExplicitPath -} - -func NewDefaultPathOptions() *PathOptions { - ret := &PathOptions{ - GlobalFile: RecommendedHomeFile, - EnvVar: RecommendedConfigPathEnvVar, - ExplicitFileFlag: RecommendedConfigPathFlag, - - GlobalFileSubpath: path.Join(RecommendedHomeDir, RecommendedFileName), - - LoadingRules: NewDefaultClientConfigLoadingRules(), - } - ret.LoadingRules.DoNotResolvePaths = true - - return ret -} - -// ModifyConfig takes a Config object, iterates through Clusters, AuthInfos, and Contexts, uses the LocationOfOrigin if specified or -// uses the default destination file to write the results into. This results in multiple file reads, but it's very easy to follow. -// Preferences and CurrentContext should always be set in the default destination file. Since we can't distinguish between empty and missing values -// (no nil strings), we're forced have separate handling for them. In the kubeconfig cases, newConfig should have at most one difference, -// that means that this code will only write into a single file. If you want to relativizePaths, you must provide a fully qualified path in any -// modified element. -func ModifyConfig(configAccess ConfigAccess, newConfig clientcmdapi.Config, relativizePaths bool) error { - if UseModifyConfigLock { - possibleSources := configAccess.GetLoadingPrecedence() - // sort the possible kubeconfig files so we always "lock" in the same order - // to avoid deadlock (note: this can fail w/ symlinks, but... come on). - sort.Strings(possibleSources) - for _, filename := range possibleSources { - if err := lockFile(filename); err != nil { - return err - } - defer unlockFile(filename) - } - } - - startingConfig, err := configAccess.GetStartingConfig() - if err != nil { - return err - } - - // We need to find all differences, locate their original files, read a partial config to modify only that stanza and write out the file. - // Special case the test for current context and preferences since those always write to the default file. - if reflect.DeepEqual(*startingConfig, newConfig) { - // nothing to do - return nil - } - - if startingConfig.CurrentContext != newConfig.CurrentContext { - if err := writeCurrentContext(configAccess, newConfig.CurrentContext); err != nil { - return err - } - } - - if !reflect.DeepEqual(startingConfig.Preferences, newConfig.Preferences) { - if err := writePreferences(configAccess, newConfig.Preferences); err != nil { - return err - } - } - - // Search every cluster, authInfo, and context. First from new to old for differences, then from old to new for deletions - for key, cluster := range newConfig.Clusters { - startingCluster, exists := startingConfig.Clusters[key] - if !reflect.DeepEqual(cluster, startingCluster) || !exists { - destinationFile := cluster.LocationOfOrigin - if len(destinationFile) == 0 { - destinationFile = configAccess.GetDefaultFilename() - } - - configToWrite, err := getConfigFromFile(destinationFile) - if err != nil { - return err - } - t := *cluster - - configToWrite.Clusters[key] = &t - configToWrite.Clusters[key].LocationOfOrigin = destinationFile - if relativizePaths { - if err := RelativizeClusterLocalPaths(configToWrite.Clusters[key]); err != nil { - return err - } - } - - if err := WriteToFile(*configToWrite, destinationFile); err != nil { - return err - } - } - } - - // seenConfigs stores a map of config source filenames to computed config objects - seenConfigs := map[string]*clientcmdapi.Config{} - - for key, context := range newConfig.Contexts { - startingContext, exists := startingConfig.Contexts[key] - if !reflect.DeepEqual(context, startingContext) || !exists { - destinationFile := context.LocationOfOrigin - if len(destinationFile) == 0 { - destinationFile = configAccess.GetDefaultFilename() - } - - // we only obtain a fresh config object from its source file - // if we have not seen it already - this prevents us from - // reading and writing to the same number of files repeatedly - // when multiple / all contexts share the same destination file. - configToWrite, seen := seenConfigs[destinationFile] - if !seen { - var err error - configToWrite, err = getConfigFromFile(destinationFile) - if err != nil { - return err - } - seenConfigs[destinationFile] = configToWrite - } - - configToWrite.Contexts[key] = context - } - } - - // actually persist config object changes - for destinationFile, configToWrite := range seenConfigs { - if err := WriteToFile(*configToWrite, destinationFile); err != nil { - return err - } - } - - for key, authInfo := range newConfig.AuthInfos { - startingAuthInfo, exists := startingConfig.AuthInfos[key] - if !reflect.DeepEqual(authInfo, startingAuthInfo) || !exists { - destinationFile := authInfo.LocationOfOrigin - if len(destinationFile) == 0 { - destinationFile = configAccess.GetDefaultFilename() - } - - configToWrite, err := getConfigFromFile(destinationFile) - if err != nil { - return err - } - t := *authInfo - configToWrite.AuthInfos[key] = &t - configToWrite.AuthInfos[key].LocationOfOrigin = destinationFile - if relativizePaths { - if err := RelativizeAuthInfoLocalPaths(configToWrite.AuthInfos[key]); err != nil { - return err - } - } - - if err := WriteToFile(*configToWrite, destinationFile); err != nil { - return err - } - } - } - - for key, cluster := range startingConfig.Clusters { - if _, exists := newConfig.Clusters[key]; !exists { - destinationFile := cluster.LocationOfOrigin - if len(destinationFile) == 0 { - destinationFile = configAccess.GetDefaultFilename() - } - - configToWrite, err := getConfigFromFile(destinationFile) - if err != nil { - return err - } - delete(configToWrite.Clusters, key) - - if err := WriteToFile(*configToWrite, destinationFile); err != nil { - return err - } - } - } - - for key, context := range startingConfig.Contexts { - if _, exists := newConfig.Contexts[key]; !exists { - destinationFile := context.LocationOfOrigin - if len(destinationFile) == 0 { - destinationFile = configAccess.GetDefaultFilename() - } - - configToWrite, err := getConfigFromFile(destinationFile) - if err != nil { - return err - } - delete(configToWrite.Contexts, key) - - if err := WriteToFile(*configToWrite, destinationFile); err != nil { - return err - } - } - } - - for key, authInfo := range startingConfig.AuthInfos { - if _, exists := newConfig.AuthInfos[key]; !exists { - destinationFile := authInfo.LocationOfOrigin - if len(destinationFile) == 0 { - destinationFile = configAccess.GetDefaultFilename() - } - - configToWrite, err := getConfigFromFile(destinationFile) - if err != nil { - return err - } - delete(configToWrite.AuthInfos, key) - - if err := WriteToFile(*configToWrite, destinationFile); err != nil { - return err - } - } - } - - return nil -} - -func PersisterForUser(configAccess ConfigAccess, user string) restclient.AuthProviderConfigPersister { - return &persister{configAccess, user} -} - -type persister struct { - configAccess ConfigAccess - user string -} - -func (p *persister) Persist(config map[string]string) error { - newConfig, err := p.configAccess.GetStartingConfig() - if err != nil { - return err - } - authInfo, ok := newConfig.AuthInfos[p.user] - if ok && authInfo.AuthProvider != nil { - authInfo.AuthProvider.Config = config - return ModifyConfig(p.configAccess, *newConfig, false) - } - return nil -} - -// writeCurrentContext takes three possible paths. -// If newCurrentContext is the same as the startingConfig's current context, then we exit. -// If newCurrentContext has a value, then that value is written into the default destination file. -// If newCurrentContext is empty, then we find the config file that is setting the CurrentContext and clear the value from that file -func writeCurrentContext(configAccess ConfigAccess, newCurrentContext string) error { - if startingConfig, err := configAccess.GetStartingConfig(); err != nil { - return err - } else if startingConfig.CurrentContext == newCurrentContext { - return nil - } - - if configAccess.IsExplicitFile() { - file := configAccess.GetExplicitFile() - currConfig, err := getConfigFromFile(file) - if err != nil { - return err - } - currConfig.CurrentContext = newCurrentContext - if err := WriteToFile(*currConfig, file); err != nil { - return err - } - - return nil - } - - if len(newCurrentContext) > 0 { - destinationFile := configAccess.GetDefaultFilename() - config, err := getConfigFromFile(destinationFile) - if err != nil { - return err - } - config.CurrentContext = newCurrentContext - - if err := WriteToFile(*config, destinationFile); err != nil { - return err - } - - return nil - } - - // we're supposed to be clearing the current context. We need to find the first spot in the chain that is setting it and clear it - for _, file := range configAccess.GetLoadingPrecedence() { - if _, err := os.Stat(file); err == nil { - currConfig, err := getConfigFromFile(file) - if err != nil { - return err - } - - if len(currConfig.CurrentContext) > 0 { - currConfig.CurrentContext = newCurrentContext - if err := WriteToFile(*currConfig, file); err != nil { - return err - } - - return nil - } - } - } - - return errors.New("no config found to write context") -} - -func writePreferences(configAccess ConfigAccess, newPrefs clientcmdapi.Preferences) error { - if startingConfig, err := configAccess.GetStartingConfig(); err != nil { - return err - } else if reflect.DeepEqual(startingConfig.Preferences, newPrefs) { - return nil - } - - if configAccess.IsExplicitFile() { - file := configAccess.GetExplicitFile() - currConfig, err := getConfigFromFile(file) - if err != nil { - return err - } - currConfig.Preferences = newPrefs - if err := WriteToFile(*currConfig, file); err != nil { - return err - } - - return nil - } - - for _, file := range configAccess.GetLoadingPrecedence() { - currConfig, err := getConfigFromFile(file) - if err != nil { - return err - } - - if !reflect.DeepEqual(currConfig.Preferences, newPrefs) { - currConfig.Preferences = newPrefs - if err := WriteToFile(*currConfig, file); err != nil { - return err - } - - return nil - } - } - - return errors.New("no config found to write preferences") -} - -// getConfigFromFile tries to read a kubeconfig file and if it can't, returns an error. One exception, missing files result in empty configs, not an error. -func getConfigFromFile(filename string) (*clientcmdapi.Config, error) { - config, err := LoadFromFile(filename) - if err != nil && !os.IsNotExist(err) { - return nil, err - } - if config == nil { - config = clientcmdapi.NewConfig() - } - return config, nil -} - -// GetConfigFromFileOrDie tries to read a kubeconfig file and if it can't, it calls exit. One exception, missing files result in empty configs, not an exit -func GetConfigFromFileOrDie(filename string) *clientcmdapi.Config { - config, err := getConfigFromFile(filename) - if err != nil { - klog.FatalDepth(1, err) - } - - return config -} diff --git a/src/vendor/k8s.io/client-go/tools/clientcmd/doc.go b/src/vendor/k8s.io/client-go/tools/clientcmd/doc.go deleted file mode 100644 index 424311ee1..000000000 --- a/src/vendor/k8s.io/client-go/tools/clientcmd/doc.go +++ /dev/null @@ -1,37 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -/* -Package clientcmd provides one stop shopping for building a working client from a fixed config, -from a .kubeconfig file, from command line flags, or from any merged combination. - -Sample usage from merged .kubeconfig files (local directory, home directory) - - loadingRules := clientcmd.NewDefaultClientConfigLoadingRules() - // if you want to change the loading rules (which files in which order), you can do so here - - configOverrides := &clientcmd.ConfigOverrides{} - // if you want to change override values or bind them to flags, there are methods to help you - - kubeConfig := clientcmd.NewNonInteractiveDeferredLoadingClientConfig(loadingRules, configOverrides) - config, err := kubeConfig.ClientConfig() - if err != nil { - // Do something - } - client, err := metav1.New(config) - // ... -*/ -package clientcmd // import "k8s.io/client-go/tools/clientcmd" diff --git a/src/vendor/k8s.io/client-go/tools/clientcmd/flag.go b/src/vendor/k8s.io/client-go/tools/clientcmd/flag.go deleted file mode 100644 index 8d60d201c..000000000 --- a/src/vendor/k8s.io/client-go/tools/clientcmd/flag.go +++ /dev/null @@ -1,49 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package clientcmd - -// transformingStringValue implements pflag.Value to store string values, -// allowing transforming them while being set -type transformingStringValue struct { - target *string - transformer func(string) (string, error) -} - -func newTransformingStringValue(val string, target *string, transformer func(string) (string, error)) *transformingStringValue { - *target = val - return &transformingStringValue{ - target: target, - transformer: transformer, - } -} - -func (t *transformingStringValue) Set(val string) error { - val, err := t.transformer(val) - if err != nil { - return err - } - *t.target = val - return nil -} - -func (t *transformingStringValue) Type() string { - return "string" -} - -func (t *transformingStringValue) String() string { - return string(*t.target) -} diff --git a/src/vendor/k8s.io/client-go/tools/clientcmd/helpers.go b/src/vendor/k8s.io/client-go/tools/clientcmd/helpers.go deleted file mode 100644 index d7572232a..000000000 --- a/src/vendor/k8s.io/client-go/tools/clientcmd/helpers.go +++ /dev/null @@ -1,50 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package clientcmd - -import ( - "fmt" - "net/url" - "strconv" - "time" -) - -// ParseTimeout returns a parsed duration from a string -// A duration string value must be a positive integer, optionally followed by a corresponding time unit (s|m|h). -func ParseTimeout(duration string) (time.Duration, error) { - if i, err := strconv.ParseInt(duration, 10, 64); err == nil && i >= 0 { - return (time.Duration(i) * time.Second), nil - } - if requestTimeout, err := time.ParseDuration(duration); err == nil { - return requestTimeout, nil - } - return 0, fmt.Errorf("Invalid timeout value. Timeout must be a single integer in seconds, or an integer followed by a corresponding time unit (e.g. 1s | 2m | 3h)") -} - -func parseProxyURL(proxyURL string) (*url.URL, error) { - u, err := url.Parse(proxyURL) - if err != nil { - return nil, fmt.Errorf("could not parse: %v", proxyURL) - } - - switch u.Scheme { - case "http", "https", "socks5": - default: - return nil, fmt.Errorf("unsupported scheme %q, must be http, https, or socks5", u.Scheme) - } - return u, nil -} diff --git a/src/vendor/k8s.io/client-go/tools/clientcmd/loader.go b/src/vendor/k8s.io/client-go/tools/clientcmd/loader.go deleted file mode 100644 index 4e301332d..000000000 --- a/src/vendor/k8s.io/client-go/tools/clientcmd/loader.go +++ /dev/null @@ -1,648 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package clientcmd - -import ( - "fmt" - "io/ioutil" - "os" - "path/filepath" - "reflect" - goruntime "runtime" - "strings" - - "github.com/imdario/mergo" - "k8s.io/klog/v2" - - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" - utilerrors "k8s.io/apimachinery/pkg/util/errors" - restclient "k8s.io/client-go/rest" - clientcmdapi "k8s.io/client-go/tools/clientcmd/api" - clientcmdlatest "k8s.io/client-go/tools/clientcmd/api/latest" - "k8s.io/client-go/util/homedir" -) - -const ( - RecommendedConfigPathFlag = "kubeconfig" - RecommendedConfigPathEnvVar = "KUBECONFIG" - RecommendedHomeDir = ".kube" - RecommendedFileName = "config" - RecommendedSchemaName = "schema" -) - -var ( - RecommendedConfigDir = filepath.Join(homedir.HomeDir(), RecommendedHomeDir) - RecommendedHomeFile = filepath.Join(RecommendedConfigDir, RecommendedFileName) - RecommendedSchemaFile = filepath.Join(RecommendedConfigDir, RecommendedSchemaName) -) - -// currentMigrationRules returns a map that holds the history of recommended home directories used in previous versions. -// Any future changes to RecommendedHomeFile and related are expected to add a migration rule here, in order to make -// sure existing config files are migrated to their new locations properly. -func currentMigrationRules() map[string]string { - var oldRecommendedHomeFileName string - if goruntime.GOOS == "windows" { - oldRecommendedHomeFileName = RecommendedFileName - } else { - oldRecommendedHomeFileName = ".kubeconfig" - } - return map[string]string{ - RecommendedHomeFile: filepath.Join(os.Getenv("HOME"), RecommendedHomeDir, oldRecommendedHomeFileName), - } -} - -type ClientConfigLoader interface { - ConfigAccess - // IsDefaultConfig returns true if the returned config matches the defaults. - IsDefaultConfig(*restclient.Config) bool - // Load returns the latest config - Load() (*clientcmdapi.Config, error) -} - -type KubeconfigGetter func() (*clientcmdapi.Config, error) - -type ClientConfigGetter struct { - kubeconfigGetter KubeconfigGetter -} - -// ClientConfigGetter implements the ClientConfigLoader interface. -var _ ClientConfigLoader = &ClientConfigGetter{} - -func (g *ClientConfigGetter) Load() (*clientcmdapi.Config, error) { - return g.kubeconfigGetter() -} - -func (g *ClientConfigGetter) GetLoadingPrecedence() []string { - return nil -} -func (g *ClientConfigGetter) GetStartingConfig() (*clientcmdapi.Config, error) { - return g.kubeconfigGetter() -} -func (g *ClientConfigGetter) GetDefaultFilename() string { - return "" -} -func (g *ClientConfigGetter) IsExplicitFile() bool { - return false -} -func (g *ClientConfigGetter) GetExplicitFile() string { - return "" -} -func (g *ClientConfigGetter) IsDefaultConfig(config *restclient.Config) bool { - return false -} - -// ClientConfigLoadingRules is an ExplicitPath and string slice of specific locations that are used for merging together a Config -// Callers can put the chain together however they want, but we'd recommend: -// EnvVarPathFiles if set (a list of files if set) OR the HomeDirectoryPath -// ExplicitPath is special, because if a user specifically requests a certain file be used and error is reported if this file is not present -type ClientConfigLoadingRules struct { - ExplicitPath string - Precedence []string - - // MigrationRules is a map of destination files to source files. If a destination file is not present, then the source file is checked. - // If the source file is present, then it is copied to the destination file BEFORE any further loading happens. - MigrationRules map[string]string - - // DoNotResolvePaths indicates whether or not to resolve paths with respect to the originating files. This is phrased as a negative so - // that a default object that doesn't set this will usually get the behavior it wants. - DoNotResolvePaths bool - - // DefaultClientConfig is an optional field indicating what rules to use to calculate a default configuration. - // This should match the overrides passed in to ClientConfig loader. - DefaultClientConfig ClientConfig - - // WarnIfAllMissing indicates whether the configuration files pointed by KUBECONFIG environment variable are present or not. - // In case of missing files, it warns the user about the missing files. - WarnIfAllMissing bool -} - -// ClientConfigLoadingRules implements the ClientConfigLoader interface. -var _ ClientConfigLoader = &ClientConfigLoadingRules{} - -// NewDefaultClientConfigLoadingRules returns a ClientConfigLoadingRules object with default fields filled in. You are not required to -// use this constructor -func NewDefaultClientConfigLoadingRules() *ClientConfigLoadingRules { - chain := []string{} - warnIfAllMissing := false - - envVarFiles := os.Getenv(RecommendedConfigPathEnvVar) - if len(envVarFiles) != 0 { - fileList := filepath.SplitList(envVarFiles) - // prevent the same path load multiple times - chain = append(chain, deduplicate(fileList)...) - warnIfAllMissing = true - - } else { - chain = append(chain, RecommendedHomeFile) - } - - return &ClientConfigLoadingRules{ - Precedence: chain, - MigrationRules: currentMigrationRules(), - WarnIfAllMissing: warnIfAllMissing, - } -} - -// Load starts by running the MigrationRules and then -// takes the loading rules and returns a Config object based on following rules. -// -// if the ExplicitPath, return the unmerged explicit file -// Otherwise, return a merged config based on the Precedence slice -// -// A missing ExplicitPath file produces an error. Empty filenames or other missing files are ignored. -// Read errors or files with non-deserializable content produce errors. -// The first file to set a particular map key wins and map key's value is never changed. -// BUT, if you set a struct value that is NOT contained inside of map, the value WILL be changed. -// This results in some odd looking logic to merge in one direction, merge in the other, and then merge the two. -// It also means that if two files specify a "red-user", only values from the first file's red-user are used. Even -// non-conflicting entries from the second file's "red-user" are discarded. -// Relative paths inside of the .kubeconfig files are resolved against the .kubeconfig file's parent folder -// and only absolute file paths are returned. -func (rules *ClientConfigLoadingRules) Load() (*clientcmdapi.Config, error) { - if err := rules.Migrate(); err != nil { - return nil, err - } - - errlist := []error{} - missingList := []string{} - - kubeConfigFiles := []string{} - - // Make sure a file we were explicitly told to use exists - if len(rules.ExplicitPath) > 0 { - if _, err := os.Stat(rules.ExplicitPath); os.IsNotExist(err) { - return nil, err - } - kubeConfigFiles = append(kubeConfigFiles, rules.ExplicitPath) - - } else { - kubeConfigFiles = append(kubeConfigFiles, rules.Precedence...) - } - - kubeconfigs := []*clientcmdapi.Config{} - // read and cache the config files so that we only look at them once - for _, filename := range kubeConfigFiles { - if len(filename) == 0 { - // no work to do - continue - } - - config, err := LoadFromFile(filename) - - if os.IsNotExist(err) { - // skip missing files - // Add to the missing list to produce a warning - missingList = append(missingList, filename) - continue - } - - if err != nil { - errlist = append(errlist, fmt.Errorf("error loading config file \"%s\": %v", filename, err)) - continue - } - - kubeconfigs = append(kubeconfigs, config) - } - - if rules.WarnIfAllMissing && len(missingList) > 0 && len(kubeconfigs) == 0 { - klog.Warningf("Config not found: %s", strings.Join(missingList, ", ")) - } - - // first merge all of our maps - mapConfig := clientcmdapi.NewConfig() - - for _, kubeconfig := range kubeconfigs { - mergo.Merge(mapConfig, kubeconfig, mergo.WithOverride) - } - - // merge all of the struct values in the reverse order so that priority is given correctly - // errors are not added to the list the second time - nonMapConfig := clientcmdapi.NewConfig() - for i := len(kubeconfigs) - 1; i >= 0; i-- { - kubeconfig := kubeconfigs[i] - mergo.Merge(nonMapConfig, kubeconfig, mergo.WithOverride) - } - - // since values are overwritten, but maps values are not, we can merge the non-map config on top of the map config and - // get the values we expect. - config := clientcmdapi.NewConfig() - mergo.Merge(config, mapConfig, mergo.WithOverride) - mergo.Merge(config, nonMapConfig, mergo.WithOverride) - - if rules.ResolvePaths() { - if err := ResolveLocalPaths(config); err != nil { - errlist = append(errlist, err) - } - } - return config, utilerrors.NewAggregate(errlist) -} - -// Migrate uses the MigrationRules map. If a destination file is not present, then the source file is checked. -// If the source file is present, then it is copied to the destination file BEFORE any further loading happens. -func (rules *ClientConfigLoadingRules) Migrate() error { - if rules.MigrationRules == nil { - return nil - } - - for destination, source := range rules.MigrationRules { - if _, err := os.Stat(destination); err == nil { - // if the destination already exists, do nothing - continue - } else if os.IsPermission(err) { - // if we can't access the file, skip it - continue - } else if !os.IsNotExist(err) { - // if we had an error other than non-existence, fail - return err - } - - if sourceInfo, err := os.Stat(source); err != nil { - if os.IsNotExist(err) || os.IsPermission(err) { - // if the source file doesn't exist or we can't access it, there's no work to do. - continue - } - - // if we had an error other than non-existence, fail - return err - } else if sourceInfo.IsDir() { - return fmt.Errorf("cannot migrate %v to %v because it is a directory", source, destination) - } - - data, err := ioutil.ReadFile(source) - if err != nil { - return err - } - // destination is created with mode 0666 before umask - err = ioutil.WriteFile(destination, data, 0666) - if err != nil { - return err - } - } - - return nil -} - -// GetLoadingPrecedence implements ConfigAccess -func (rules *ClientConfigLoadingRules) GetLoadingPrecedence() []string { - if len(rules.ExplicitPath) > 0 { - return []string{rules.ExplicitPath} - } - - return rules.Precedence -} - -// GetStartingConfig implements ConfigAccess -func (rules *ClientConfigLoadingRules) GetStartingConfig() (*clientcmdapi.Config, error) { - clientConfig := NewNonInteractiveDeferredLoadingClientConfig(rules, &ConfigOverrides{}) - rawConfig, err := clientConfig.RawConfig() - if os.IsNotExist(err) { - return clientcmdapi.NewConfig(), nil - } - if err != nil { - return nil, err - } - - return &rawConfig, nil -} - -// GetDefaultFilename implements ConfigAccess -func (rules *ClientConfigLoadingRules) GetDefaultFilename() string { - // Explicit file if we have one. - if rules.IsExplicitFile() { - return rules.GetExplicitFile() - } - // Otherwise, first existing file from precedence. - for _, filename := range rules.GetLoadingPrecedence() { - if _, err := os.Stat(filename); err == nil { - return filename - } - } - // If none exists, use the first from precedence. - if len(rules.Precedence) > 0 { - return rules.Precedence[0] - } - return "" -} - -// IsExplicitFile implements ConfigAccess -func (rules *ClientConfigLoadingRules) IsExplicitFile() bool { - return len(rules.ExplicitPath) > 0 -} - -// GetExplicitFile implements ConfigAccess -func (rules *ClientConfigLoadingRules) GetExplicitFile() string { - return rules.ExplicitPath -} - -// IsDefaultConfig returns true if the provided configuration matches the default -func (rules *ClientConfigLoadingRules) IsDefaultConfig(config *restclient.Config) bool { - if rules.DefaultClientConfig == nil { - return false - } - defaultConfig, err := rules.DefaultClientConfig.ClientConfig() - if err != nil { - return false - } - return reflect.DeepEqual(config, defaultConfig) -} - -// LoadFromFile takes a filename and deserializes the contents into Config object -func LoadFromFile(filename string) (*clientcmdapi.Config, error) { - kubeconfigBytes, err := ioutil.ReadFile(filename) - if err != nil { - return nil, err - } - config, err := Load(kubeconfigBytes) - if err != nil { - return nil, err - } - klog.V(6).Infoln("Config loaded from file: ", filename) - - // set LocationOfOrigin on every Cluster, User, and Context - for key, obj := range config.AuthInfos { - obj.LocationOfOrigin = filename - config.AuthInfos[key] = obj - } - for key, obj := range config.Clusters { - obj.LocationOfOrigin = filename - config.Clusters[key] = obj - } - for key, obj := range config.Contexts { - obj.LocationOfOrigin = filename - config.Contexts[key] = obj - } - - if config.AuthInfos == nil { - config.AuthInfos = map[string]*clientcmdapi.AuthInfo{} - } - if config.Clusters == nil { - config.Clusters = map[string]*clientcmdapi.Cluster{} - } - if config.Contexts == nil { - config.Contexts = map[string]*clientcmdapi.Context{} - } - - return config, nil -} - -// Load takes a byte slice and deserializes the contents into Config object. -// Encapsulates deserialization without assuming the source is a file. -func Load(data []byte) (*clientcmdapi.Config, error) { - config := clientcmdapi.NewConfig() - // if there's no data in a file, return the default object instead of failing (DecodeInto reject empty input) - if len(data) == 0 { - return config, nil - } - decoded, _, err := clientcmdlatest.Codec.Decode(data, &schema.GroupVersionKind{Version: clientcmdlatest.Version, Kind: "Config"}, config) - if err != nil { - return nil, err - } - return decoded.(*clientcmdapi.Config), nil -} - -// WriteToFile serializes the config to yaml and writes it out to a file. If not present, it creates the file with the mode 0600. If it is present -// it stomps the contents -func WriteToFile(config clientcmdapi.Config, filename string) error { - content, err := Write(config) - if err != nil { - return err - } - dir := filepath.Dir(filename) - if _, err := os.Stat(dir); os.IsNotExist(err) { - if err = os.MkdirAll(dir, 0755); err != nil { - return err - } - } - - if err := ioutil.WriteFile(filename, content, 0600); err != nil { - return err - } - return nil -} - -func lockFile(filename string) error { - // TODO: find a way to do this with actual file locks. Will - // probably need separate solution for windows and Linux. - - // Make sure the dir exists before we try to create a lock file. - dir := filepath.Dir(filename) - if _, err := os.Stat(dir); os.IsNotExist(err) { - if err = os.MkdirAll(dir, 0755); err != nil { - return err - } - } - f, err := os.OpenFile(lockName(filename), os.O_CREATE|os.O_EXCL, 0) - if err != nil { - return err - } - f.Close() - return nil -} - -func unlockFile(filename string) error { - return os.Remove(lockName(filename)) -} - -func lockName(filename string) string { - return filename + ".lock" -} - -// Write serializes the config to yaml. -// Encapsulates serialization without assuming the destination is a file. -func Write(config clientcmdapi.Config) ([]byte, error) { - return runtime.Encode(clientcmdlatest.Codec, &config) -} - -func (rules ClientConfigLoadingRules) ResolvePaths() bool { - return !rules.DoNotResolvePaths -} - -// ResolveLocalPaths resolves all relative paths in the config object with respect to the stanza's LocationOfOrigin -// this cannot be done directly inside of LoadFromFile because doing so there would make it impossible to load a file without -// modification of its contents. -func ResolveLocalPaths(config *clientcmdapi.Config) error { - for _, cluster := range config.Clusters { - if len(cluster.LocationOfOrigin) == 0 { - continue - } - base, err := filepath.Abs(filepath.Dir(cluster.LocationOfOrigin)) - if err != nil { - return fmt.Errorf("could not determine the absolute path of config file %s: %v", cluster.LocationOfOrigin, err) - } - - if err := ResolvePaths(GetClusterFileReferences(cluster), base); err != nil { - return err - } - } - for _, authInfo := range config.AuthInfos { - if len(authInfo.LocationOfOrigin) == 0 { - continue - } - base, err := filepath.Abs(filepath.Dir(authInfo.LocationOfOrigin)) - if err != nil { - return fmt.Errorf("could not determine the absolute path of config file %s: %v", authInfo.LocationOfOrigin, err) - } - - if err := ResolvePaths(GetAuthInfoFileReferences(authInfo), base); err != nil { - return err - } - } - - return nil -} - -// RelativizeClusterLocalPaths first absolutizes the paths by calling ResolveLocalPaths. This assumes that any NEW path is already -// absolute, but any existing path will be resolved relative to LocationOfOrigin -func RelativizeClusterLocalPaths(cluster *clientcmdapi.Cluster) error { - if len(cluster.LocationOfOrigin) == 0 { - return fmt.Errorf("no location of origin for %s", cluster.Server) - } - base, err := filepath.Abs(filepath.Dir(cluster.LocationOfOrigin)) - if err != nil { - return fmt.Errorf("could not determine the absolute path of config file %s: %v", cluster.LocationOfOrigin, err) - } - - if err := ResolvePaths(GetClusterFileReferences(cluster), base); err != nil { - return err - } - if err := RelativizePathWithNoBacksteps(GetClusterFileReferences(cluster), base); err != nil { - return err - } - - return nil -} - -// RelativizeAuthInfoLocalPaths first absolutizes the paths by calling ResolveLocalPaths. This assumes that any NEW path is already -// absolute, but any existing path will be resolved relative to LocationOfOrigin -func RelativizeAuthInfoLocalPaths(authInfo *clientcmdapi.AuthInfo) error { - if len(authInfo.LocationOfOrigin) == 0 { - return fmt.Errorf("no location of origin for %v", authInfo) - } - base, err := filepath.Abs(filepath.Dir(authInfo.LocationOfOrigin)) - if err != nil { - return fmt.Errorf("could not determine the absolute path of config file %s: %v", authInfo.LocationOfOrigin, err) - } - - if err := ResolvePaths(GetAuthInfoFileReferences(authInfo), base); err != nil { - return err - } - if err := RelativizePathWithNoBacksteps(GetAuthInfoFileReferences(authInfo), base); err != nil { - return err - } - - return nil -} - -func RelativizeConfigPaths(config *clientcmdapi.Config, base string) error { - return RelativizePathWithNoBacksteps(GetConfigFileReferences(config), base) -} - -func ResolveConfigPaths(config *clientcmdapi.Config, base string) error { - return ResolvePaths(GetConfigFileReferences(config), base) -} - -func GetConfigFileReferences(config *clientcmdapi.Config) []*string { - refs := []*string{} - - for _, cluster := range config.Clusters { - refs = append(refs, GetClusterFileReferences(cluster)...) - } - for _, authInfo := range config.AuthInfos { - refs = append(refs, GetAuthInfoFileReferences(authInfo)...) - } - - return refs -} - -func GetClusterFileReferences(cluster *clientcmdapi.Cluster) []*string { - return []*string{&cluster.CertificateAuthority} -} - -func GetAuthInfoFileReferences(authInfo *clientcmdapi.AuthInfo) []*string { - s := []*string{&authInfo.ClientCertificate, &authInfo.ClientKey, &authInfo.TokenFile} - // Only resolve exec command if it isn't PATH based. - if authInfo.Exec != nil && strings.ContainsRune(authInfo.Exec.Command, filepath.Separator) { - s = append(s, &authInfo.Exec.Command) - } - return s -} - -// ResolvePaths updates the given refs to be absolute paths, relative to the given base directory -func ResolvePaths(refs []*string, base string) error { - for _, ref := range refs { - // Don't resolve empty paths - if len(*ref) > 0 { - // Don't resolve absolute paths - if !filepath.IsAbs(*ref) { - *ref = filepath.Join(base, *ref) - } - } - } - return nil -} - -// RelativizePathWithNoBacksteps updates the given refs to be relative paths, relative to the given base directory as long as they do not require backsteps. -// Any path requiring a backstep is left as-is as long it is absolute. Any non-absolute path that can't be relativized produces an error -func RelativizePathWithNoBacksteps(refs []*string, base string) error { - for _, ref := range refs { - // Don't relativize empty paths - if len(*ref) > 0 { - rel, err := MakeRelative(*ref, base) - if err != nil { - return err - } - - // if we have a backstep, don't mess with the path - if strings.HasPrefix(rel, "../") { - if filepath.IsAbs(*ref) { - continue - } - - return fmt.Errorf("%v requires backsteps and is not absolute", *ref) - } - - *ref = rel - } - } - return nil -} - -func MakeRelative(path, base string) (string, error) { - if len(path) > 0 { - rel, err := filepath.Rel(base, path) - if err != nil { - return path, err - } - return rel, nil - } - return path, nil -} - -// deduplicate removes any duplicated values and returns a new slice, keeping the order unchanged -func deduplicate(s []string) []string { - encountered := map[string]bool{} - ret := make([]string, 0) - for i := range s { - if encountered[s[i]] { - continue - } - encountered[s[i]] = true - ret = append(ret, s[i]) - } - return ret -} diff --git a/src/vendor/k8s.io/client-go/tools/clientcmd/merged_client_builder.go b/src/vendor/k8s.io/client-go/tools/clientcmd/merged_client_builder.go deleted file mode 100644 index 10744156b..000000000 --- a/src/vendor/k8s.io/client-go/tools/clientcmd/merged_client_builder.go +++ /dev/null @@ -1,172 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package clientcmd - -import ( - "io" - "sync" - - "k8s.io/klog/v2" - - restclient "k8s.io/client-go/rest" - clientcmdapi "k8s.io/client-go/tools/clientcmd/api" -) - -// DeferredLoadingClientConfig is a ClientConfig interface that is backed by a client config loader. -// It is used in cases where the loading rules may change after you've instantiated them and you want to be sure that -// the most recent rules are used. This is useful in cases where you bind flags to loading rule parameters before -// the parse happens and you want your calling code to be ignorant of how the values are being mutated to avoid -// passing extraneous information down a call stack -type DeferredLoadingClientConfig struct { - loader ClientConfigLoader - overrides *ConfigOverrides - fallbackReader io.Reader - - clientConfig ClientConfig - loadingLock sync.Mutex - - // provided for testing - icc InClusterConfig -} - -// InClusterConfig abstracts details of whether the client is running in a cluster for testing. -type InClusterConfig interface { - ClientConfig - Possible() bool -} - -// NewNonInteractiveDeferredLoadingClientConfig creates a ConfigClientClientConfig using the passed context name -func NewNonInteractiveDeferredLoadingClientConfig(loader ClientConfigLoader, overrides *ConfigOverrides) ClientConfig { - return &DeferredLoadingClientConfig{loader: loader, overrides: overrides, icc: &inClusterClientConfig{overrides: overrides}} -} - -// NewInteractiveDeferredLoadingClientConfig creates a ConfigClientClientConfig using the passed context name and the fallback auth reader -func NewInteractiveDeferredLoadingClientConfig(loader ClientConfigLoader, overrides *ConfigOverrides, fallbackReader io.Reader) ClientConfig { - return &DeferredLoadingClientConfig{loader: loader, overrides: overrides, icc: &inClusterClientConfig{overrides: overrides}, fallbackReader: fallbackReader} -} - -func (config *DeferredLoadingClientConfig) createClientConfig() (ClientConfig, error) { - config.loadingLock.Lock() - defer config.loadingLock.Unlock() - - if config.clientConfig != nil { - return config.clientConfig, nil - } - mergedConfig, err := config.loader.Load() - if err != nil { - return nil, err - } - - var currentContext string - if config.overrides != nil { - currentContext = config.overrides.CurrentContext - } - if config.fallbackReader != nil { - config.clientConfig = NewInteractiveClientConfig(*mergedConfig, currentContext, config.overrides, config.fallbackReader, config.loader) - } else { - config.clientConfig = NewNonInteractiveClientConfig(*mergedConfig, currentContext, config.overrides, config.loader) - } - return config.clientConfig, nil -} - -func (config *DeferredLoadingClientConfig) RawConfig() (clientcmdapi.Config, error) { - mergedConfig, err := config.createClientConfig() - if err != nil { - return clientcmdapi.Config{}, err - } - - return mergedConfig.RawConfig() -} - -// ClientConfig implements ClientConfig -func (config *DeferredLoadingClientConfig) ClientConfig() (*restclient.Config, error) { - mergedClientConfig, err := config.createClientConfig() - if err != nil { - return nil, err - } - - // load the configuration and return on non-empty errors and if the - // content differs from the default config - mergedConfig, err := mergedClientConfig.ClientConfig() - switch { - case err != nil: - if !IsEmptyConfig(err) { - // return on any error except empty config - return nil, err - } - case mergedConfig != nil: - // the configuration is valid, but if this is equal to the defaults we should try - // in-cluster configuration - if !config.loader.IsDefaultConfig(mergedConfig) { - return mergedConfig, nil - } - } - - // check for in-cluster configuration and use it - if config.icc.Possible() { - klog.V(4).Infof("Using in-cluster configuration") - return config.icc.ClientConfig() - } - - // return the result of the merged client config - return mergedConfig, err -} - -// Namespace implements KubeConfig -func (config *DeferredLoadingClientConfig) Namespace() (string, bool, error) { - mergedKubeConfig, err := config.createClientConfig() - if err != nil { - return "", false, err - } - - ns, overridden, err := mergedKubeConfig.Namespace() - // if we get an error and it is not empty config, or if the merged config defined an explicit namespace, or - // if in-cluster config is not possible, return immediately - if (err != nil && !IsEmptyConfig(err)) || overridden || !config.icc.Possible() { - // return on any error except empty config - return ns, overridden, err - } - - if len(ns) > 0 { - // if we got a non-default namespace from the kubeconfig, use it - if ns != "default" { - return ns, false, nil - } - - // if we got a default namespace, determine whether it was explicit or implicit - if raw, err := mergedKubeConfig.RawConfig(); err == nil { - // determine the current context - currentContext := raw.CurrentContext - if config.overrides != nil && len(config.overrides.CurrentContext) > 0 { - currentContext = config.overrides.CurrentContext - } - if context := raw.Contexts[currentContext]; context != nil && len(context.Namespace) > 0 { - return ns, false, nil - } - } - } - - klog.V(4).Infof("Using in-cluster namespace") - - // allow the namespace from the service account token directory to be used. - return config.icc.Namespace() -} - -// ConfigAccess implements ClientConfig -func (config *DeferredLoadingClientConfig) ConfigAccess() ConfigAccess { - return config.loader -} diff --git a/src/vendor/k8s.io/client-go/tools/clientcmd/overrides.go b/src/vendor/k8s.io/client-go/tools/clientcmd/overrides.go deleted file mode 100644 index 4c290db55..000000000 --- a/src/vendor/k8s.io/client-go/tools/clientcmd/overrides.go +++ /dev/null @@ -1,259 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package clientcmd - -import ( - "strconv" - "strings" - - "github.com/spf13/pflag" - - clientcmdapi "k8s.io/client-go/tools/clientcmd/api" -) - -// ConfigOverrides holds values that should override whatever information is pulled from the actual Config object. You can't -// simply use an actual Config object, because Configs hold maps, but overrides are restricted to "at most one" -type ConfigOverrides struct { - AuthInfo clientcmdapi.AuthInfo - // ClusterDefaults are applied before the configured cluster info is loaded. - ClusterDefaults clientcmdapi.Cluster - ClusterInfo clientcmdapi.Cluster - Context clientcmdapi.Context - CurrentContext string - Timeout string -} - -// ConfigOverrideFlags holds the flag names to be used for binding command line flags. Notice that this structure tightly -// corresponds to ConfigOverrides -type ConfigOverrideFlags struct { - AuthOverrideFlags AuthOverrideFlags - ClusterOverrideFlags ClusterOverrideFlags - ContextOverrideFlags ContextOverrideFlags - CurrentContext FlagInfo - Timeout FlagInfo -} - -// AuthOverrideFlags holds the flag names to be used for binding command line flags for AuthInfo objects -type AuthOverrideFlags struct { - ClientCertificate FlagInfo - ClientKey FlagInfo - Token FlagInfo - Impersonate FlagInfo - ImpersonateUID FlagInfo - ImpersonateGroups FlagInfo - Username FlagInfo - Password FlagInfo -} - -// ContextOverrideFlags holds the flag names to be used for binding command line flags for Cluster objects -type ContextOverrideFlags struct { - ClusterName FlagInfo - AuthInfoName FlagInfo - Namespace FlagInfo -} - -// ClusterOverride holds the flag names to be used for binding command line flags for Cluster objects -type ClusterOverrideFlags struct { - APIServer FlagInfo - APIVersion FlagInfo - CertificateAuthority FlagInfo - InsecureSkipTLSVerify FlagInfo - TLSServerName FlagInfo - ProxyURL FlagInfo -} - -// FlagInfo contains information about how to register a flag. This struct is useful if you want to provide a way for an extender to -// get back a set of recommended flag names, descriptions, and defaults, but allow for customization by an extender. This makes for -// coherent extension, without full prescription -type FlagInfo struct { - // LongName is the long string for a flag. If this is empty, then the flag will not be bound - LongName string - // ShortName is the single character for a flag. If this is empty, then there will be no short flag - ShortName string - // Default is the default value for the flag - Default string - // Description is the description for the flag - Description string -} - -// AddSecretAnnotation add secret flag to Annotation. -func (f FlagInfo) AddSecretAnnotation(flags *pflag.FlagSet) FlagInfo { - flags.SetAnnotation(f.LongName, "classified", []string{"true"}) - return f -} - -// BindStringFlag binds the flag based on the provided info. If LongName == "", nothing is registered -func (f FlagInfo) BindStringFlag(flags *pflag.FlagSet, target *string) FlagInfo { - // you can't register a flag without a long name - if len(f.LongName) > 0 { - flags.StringVarP(target, f.LongName, f.ShortName, f.Default, f.Description) - } - return f -} - -// BindTransformingStringFlag binds the flag based on the provided info. If LongName == "", nothing is registered -func (f FlagInfo) BindTransformingStringFlag(flags *pflag.FlagSet, target *string, transformer func(string) (string, error)) FlagInfo { - // you can't register a flag without a long name - if len(f.LongName) > 0 { - flags.VarP(newTransformingStringValue(f.Default, target, transformer), f.LongName, f.ShortName, f.Description) - } - return f -} - -// BindStringSliceFlag binds the flag based on the provided info. If LongName == "", nothing is registered -func (f FlagInfo) BindStringArrayFlag(flags *pflag.FlagSet, target *[]string) FlagInfo { - // you can't register a flag without a long name - if len(f.LongName) > 0 { - sliceVal := []string{} - if len(f.Default) > 0 { - sliceVal = []string{f.Default} - } - flags.StringArrayVarP(target, f.LongName, f.ShortName, sliceVal, f.Description) - } - return f -} - -// BindBoolFlag binds the flag based on the provided info. If LongName == "", nothing is registered -func (f FlagInfo) BindBoolFlag(flags *pflag.FlagSet, target *bool) FlagInfo { - // you can't register a flag without a long name - if len(f.LongName) > 0 { - // try to parse Default as a bool. If it fails, assume false - boolVal, err := strconv.ParseBool(f.Default) - if err != nil { - boolVal = false - } - - flags.BoolVarP(target, f.LongName, f.ShortName, boolVal, f.Description) - } - return f -} - -const ( - FlagClusterName = "cluster" - FlagAuthInfoName = "user" - FlagContext = "context" - FlagNamespace = "namespace" - FlagAPIServer = "server" - FlagTLSServerName = "tls-server-name" - FlagInsecure = "insecure-skip-tls-verify" - FlagCertFile = "client-certificate" - FlagKeyFile = "client-key" - FlagCAFile = "certificate-authority" - FlagEmbedCerts = "embed-certs" - FlagBearerToken = "token" - FlagImpersonate = "as" - FlagImpersonateUID = "as-uid" - FlagImpersonateGroup = "as-group" - FlagUsername = "username" - FlagPassword = "password" - FlagTimeout = "request-timeout" - FlagProxyURL = "proxy-url" -) - -// RecommendedConfigOverrideFlags is a convenience method to return recommended flag names prefixed with a string of your choosing -func RecommendedConfigOverrideFlags(prefix string) ConfigOverrideFlags { - return ConfigOverrideFlags{ - AuthOverrideFlags: RecommendedAuthOverrideFlags(prefix), - ClusterOverrideFlags: RecommendedClusterOverrideFlags(prefix), - ContextOverrideFlags: RecommendedContextOverrideFlags(prefix), - - CurrentContext: FlagInfo{prefix + FlagContext, "", "", "The name of the kubeconfig context to use"}, - Timeout: FlagInfo{prefix + FlagTimeout, "", "0", "The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests."}, - } -} - -// RecommendedAuthOverrideFlags is a convenience method to return recommended flag names prefixed with a string of your choosing -func RecommendedAuthOverrideFlags(prefix string) AuthOverrideFlags { - return AuthOverrideFlags{ - ClientCertificate: FlagInfo{prefix + FlagCertFile, "", "", "Path to a client certificate file for TLS"}, - ClientKey: FlagInfo{prefix + FlagKeyFile, "", "", "Path to a client key file for TLS"}, - Token: FlagInfo{prefix + FlagBearerToken, "", "", "Bearer token for authentication to the API server"}, - Impersonate: FlagInfo{prefix + FlagImpersonate, "", "", "Username to impersonate for the operation"}, - ImpersonateUID: FlagInfo{prefix + FlagImpersonateUID, "", "", "UID to impersonate for the operation"}, - ImpersonateGroups: FlagInfo{prefix + FlagImpersonateGroup, "", "", "Group to impersonate for the operation, this flag can be repeated to specify multiple groups."}, - Username: FlagInfo{prefix + FlagUsername, "", "", "Username for basic authentication to the API server"}, - Password: FlagInfo{prefix + FlagPassword, "", "", "Password for basic authentication to the API server"}, - } -} - -// RecommendedClusterOverrideFlags is a convenience method to return recommended flag names prefixed with a string of your choosing -func RecommendedClusterOverrideFlags(prefix string) ClusterOverrideFlags { - return ClusterOverrideFlags{ - APIServer: FlagInfo{prefix + FlagAPIServer, "", "", "The address and port of the Kubernetes API server"}, - CertificateAuthority: FlagInfo{prefix + FlagCAFile, "", "", "Path to a cert file for the certificate authority"}, - InsecureSkipTLSVerify: FlagInfo{prefix + FlagInsecure, "", "false", "If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure"}, - TLSServerName: FlagInfo{prefix + FlagTLSServerName, "", "", "If provided, this name will be used to validate server certificate. If this is not provided, hostname used to contact the server is used."}, - ProxyURL: FlagInfo{prefix + FlagProxyURL, "", "", "If provided, this URL will be used to connect via proxy"}, - } -} - -// RecommendedContextOverrideFlags is a convenience method to return recommended flag names prefixed with a string of your choosing -func RecommendedContextOverrideFlags(prefix string) ContextOverrideFlags { - return ContextOverrideFlags{ - ClusterName: FlagInfo{prefix + FlagClusterName, "", "", "The name of the kubeconfig cluster to use"}, - AuthInfoName: FlagInfo{prefix + FlagAuthInfoName, "", "", "The name of the kubeconfig user to use"}, - Namespace: FlagInfo{prefix + FlagNamespace, "n", "", "If present, the namespace scope for this CLI request"}, - } -} - -// BindOverrideFlags is a convenience method to bind the specified flags to their associated variables -func BindOverrideFlags(overrides *ConfigOverrides, flags *pflag.FlagSet, flagNames ConfigOverrideFlags) { - BindAuthInfoFlags(&overrides.AuthInfo, flags, flagNames.AuthOverrideFlags) - BindClusterFlags(&overrides.ClusterInfo, flags, flagNames.ClusterOverrideFlags) - BindContextFlags(&overrides.Context, flags, flagNames.ContextOverrideFlags) - flagNames.CurrentContext.BindStringFlag(flags, &overrides.CurrentContext) - flagNames.Timeout.BindStringFlag(flags, &overrides.Timeout) -} - -// BindAuthInfoFlags is a convenience method to bind the specified flags to their associated variables -func BindAuthInfoFlags(authInfo *clientcmdapi.AuthInfo, flags *pflag.FlagSet, flagNames AuthOverrideFlags) { - flagNames.ClientCertificate.BindStringFlag(flags, &authInfo.ClientCertificate).AddSecretAnnotation(flags) - flagNames.ClientKey.BindStringFlag(flags, &authInfo.ClientKey).AddSecretAnnotation(flags) - flagNames.Token.BindStringFlag(flags, &authInfo.Token).AddSecretAnnotation(flags) - flagNames.Impersonate.BindStringFlag(flags, &authInfo.Impersonate).AddSecretAnnotation(flags) - flagNames.ImpersonateUID.BindStringFlag(flags, &authInfo.ImpersonateUID).AddSecretAnnotation(flags) - flagNames.ImpersonateGroups.BindStringArrayFlag(flags, &authInfo.ImpersonateGroups).AddSecretAnnotation(flags) - flagNames.Username.BindStringFlag(flags, &authInfo.Username).AddSecretAnnotation(flags) - flagNames.Password.BindStringFlag(flags, &authInfo.Password).AddSecretAnnotation(flags) -} - -// BindClusterFlags is a convenience method to bind the specified flags to their associated variables -func BindClusterFlags(clusterInfo *clientcmdapi.Cluster, flags *pflag.FlagSet, flagNames ClusterOverrideFlags) { - flagNames.APIServer.BindStringFlag(flags, &clusterInfo.Server) - flagNames.CertificateAuthority.BindStringFlag(flags, &clusterInfo.CertificateAuthority) - flagNames.InsecureSkipTLSVerify.BindBoolFlag(flags, &clusterInfo.InsecureSkipTLSVerify) - flagNames.TLSServerName.BindStringFlag(flags, &clusterInfo.TLSServerName) - flagNames.ProxyURL.BindStringFlag(flags, &clusterInfo.ProxyURL) -} - -// BindFlags is a convenience method to bind the specified flags to their associated variables -func BindContextFlags(contextInfo *clientcmdapi.Context, flags *pflag.FlagSet, flagNames ContextOverrideFlags) { - flagNames.ClusterName.BindStringFlag(flags, &contextInfo.Cluster) - flagNames.AuthInfoName.BindStringFlag(flags, &contextInfo.AuthInfo) - flagNames.Namespace.BindTransformingStringFlag(flags, &contextInfo.Namespace, RemoveNamespacesPrefix) -} - -// RemoveNamespacesPrefix is a transformer that strips "ns/", "namespace/" and "namespaces/" prefixes case-insensitively -func RemoveNamespacesPrefix(value string) (string, error) { - for _, prefix := range []string{"namespaces/", "namespace/", "ns/"} { - if len(value) > len(prefix) && strings.EqualFold(value[0:len(prefix)], prefix) { - value = value[len(prefix):] - break - } - } - return value, nil -} diff --git a/src/vendor/k8s.io/client-go/tools/clientcmd/validation.go b/src/vendor/k8s.io/client-go/tools/clientcmd/validation.go deleted file mode 100644 index 088972ef6..000000000 --- a/src/vendor/k8s.io/client-go/tools/clientcmd/validation.go +++ /dev/null @@ -1,371 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package clientcmd - -import ( - "errors" - "fmt" - "os" - "reflect" - "strings" - - utilerrors "k8s.io/apimachinery/pkg/util/errors" - "k8s.io/apimachinery/pkg/util/validation" - clientcmdapi "k8s.io/client-go/tools/clientcmd/api" -) - -var ( - ErrNoContext = errors.New("no context chosen") - ErrEmptyConfig = NewEmptyConfigError("no configuration has been provided, try setting KUBERNETES_MASTER environment variable") - // message is for consistency with old behavior - ErrEmptyCluster = errors.New("cluster has no server defined") -) - -// NewEmptyConfigError returns an error wrapping the given message which IsEmptyConfig() will recognize as an empty config error -func NewEmptyConfigError(message string) error { - return &errEmptyConfig{message} -} - -type errEmptyConfig struct { - message string -} - -func (e *errEmptyConfig) Error() string { - return e.message -} - -type errContextNotFound struct { - ContextName string -} - -func (e *errContextNotFound) Error() string { - return fmt.Sprintf("context was not found for specified context: %v", e.ContextName) -} - -// IsContextNotFound returns a boolean indicating whether the error is known to -// report that a context was not found -func IsContextNotFound(err error) bool { - if err == nil { - return false - } - if _, ok := err.(*errContextNotFound); ok || err == ErrNoContext { - return true - } - return strings.Contains(err.Error(), "context was not found for specified context") -} - -// IsEmptyConfig returns true if the provided error indicates the provided configuration -// is empty. -func IsEmptyConfig(err error) bool { - switch t := err.(type) { - case errConfigurationInvalid: - if len(t) != 1 { - return false - } - _, ok := t[0].(*errEmptyConfig) - return ok - } - _, ok := err.(*errEmptyConfig) - return ok -} - -// errConfigurationInvalid is a set of errors indicating the configuration is invalid. -type errConfigurationInvalid []error - -// errConfigurationInvalid implements error and Aggregate -var _ error = errConfigurationInvalid{} -var _ utilerrors.Aggregate = errConfigurationInvalid{} - -func newErrConfigurationInvalid(errs []error) error { - switch len(errs) { - case 0: - return nil - default: - return errConfigurationInvalid(errs) - } -} - -// Error implements the error interface -func (e errConfigurationInvalid) Error() string { - return fmt.Sprintf("invalid configuration: %v", utilerrors.NewAggregate(e).Error()) -} - -// Errors implements the utilerrors.Aggregate interface -func (e errConfigurationInvalid) Errors() []error { - return e -} - -// Is implements the utilerrors.Aggregate interface -func (e errConfigurationInvalid) Is(target error) bool { - return e.visit(func(err error) bool { - return errors.Is(err, target) - }) -} - -func (e errConfigurationInvalid) visit(f func(err error) bool) bool { - for _, err := range e { - switch err := err.(type) { - case errConfigurationInvalid: - if match := err.visit(f); match { - return match - } - case utilerrors.Aggregate: - for _, nestedErr := range err.Errors() { - if match := f(nestedErr); match { - return match - } - } - default: - if match := f(err); match { - return match - } - } - } - - return false -} - -// IsConfigurationInvalid returns true if the provided error indicates the configuration is invalid. -func IsConfigurationInvalid(err error) bool { - switch err.(type) { - case *errContextNotFound, errConfigurationInvalid: - return true - } - return IsContextNotFound(err) -} - -// Validate checks for errors in the Config. It does not return early so that it can find as many errors as possible. -func Validate(config clientcmdapi.Config) error { - validationErrors := make([]error, 0) - - if clientcmdapi.IsConfigEmpty(&config) { - return newErrConfigurationInvalid([]error{ErrEmptyConfig}) - } - - if len(config.CurrentContext) != 0 { - if _, exists := config.Contexts[config.CurrentContext]; !exists { - validationErrors = append(validationErrors, &errContextNotFound{config.CurrentContext}) - } - } - - for contextName, context := range config.Contexts { - validationErrors = append(validationErrors, validateContext(contextName, *context, config)...) - } - - for authInfoName, authInfo := range config.AuthInfos { - validationErrors = append(validationErrors, validateAuthInfo(authInfoName, *authInfo)...) - } - - for clusterName, clusterInfo := range config.Clusters { - validationErrors = append(validationErrors, validateClusterInfo(clusterName, *clusterInfo)...) - } - - return newErrConfigurationInvalid(validationErrors) -} - -// ConfirmUsable looks a particular context and determines if that particular part of the config is useable. There might still be errors in the config, -// but no errors in the sections requested or referenced. It does not return early so that it can find as many errors as possible. -func ConfirmUsable(config clientcmdapi.Config, passedContextName string) error { - validationErrors := make([]error, 0) - - if clientcmdapi.IsConfigEmpty(&config) { - return newErrConfigurationInvalid([]error{ErrEmptyConfig}) - } - - var contextName string - if len(passedContextName) != 0 { - contextName = passedContextName - } else { - contextName = config.CurrentContext - } - - if len(contextName) == 0 { - return ErrNoContext - } - - context, exists := config.Contexts[contextName] - if !exists { - validationErrors = append(validationErrors, &errContextNotFound{contextName}) - } - - if exists { - validationErrors = append(validationErrors, validateContext(contextName, *context, config)...) - - // Default to empty users and clusters and let the validation function report an error. - authInfo := config.AuthInfos[context.AuthInfo] - if authInfo == nil { - authInfo = &clientcmdapi.AuthInfo{} - } - validationErrors = append(validationErrors, validateAuthInfo(context.AuthInfo, *authInfo)...) - - cluster := config.Clusters[context.Cluster] - if cluster == nil { - cluster = &clientcmdapi.Cluster{} - } - validationErrors = append(validationErrors, validateClusterInfo(context.Cluster, *cluster)...) - } - - return newErrConfigurationInvalid(validationErrors) -} - -// validateClusterInfo looks for conflicts and errors in the cluster info -func validateClusterInfo(clusterName string, clusterInfo clientcmdapi.Cluster) []error { - validationErrors := make([]error, 0) - - emptyCluster := clientcmdapi.NewCluster() - if reflect.DeepEqual(*emptyCluster, clusterInfo) { - return []error{ErrEmptyCluster} - } - - if len(clusterInfo.Server) == 0 { - if len(clusterName) == 0 { - validationErrors = append(validationErrors, fmt.Errorf("default cluster has no server defined")) - } else { - validationErrors = append(validationErrors, fmt.Errorf("no server found for cluster %q", clusterName)) - } - } - if proxyURL := clusterInfo.ProxyURL; proxyURL != "" { - if _, err := parseProxyURL(proxyURL); err != nil { - validationErrors = append(validationErrors, fmt.Errorf("invalid 'proxy-url' %q for cluster %q: %w", proxyURL, clusterName, err)) - } - } - // Make sure CA data and CA file aren't both specified - if len(clusterInfo.CertificateAuthority) != 0 && len(clusterInfo.CertificateAuthorityData) != 0 { - validationErrors = append(validationErrors, fmt.Errorf("certificate-authority-data and certificate-authority are both specified for %v. certificate-authority-data will override.", clusterName)) - } - if len(clusterInfo.CertificateAuthority) != 0 { - clientCertCA, err := os.Open(clusterInfo.CertificateAuthority) - if err != nil { - validationErrors = append(validationErrors, fmt.Errorf("unable to read certificate-authority %v for %v due to %w", clusterInfo.CertificateAuthority, clusterName, err)) - } else { - defer clientCertCA.Close() - } - } - - return validationErrors -} - -// validateAuthInfo looks for conflicts and errors in the auth info -func validateAuthInfo(authInfoName string, authInfo clientcmdapi.AuthInfo) []error { - validationErrors := make([]error, 0) - - usingAuthPath := false - methods := make([]string, 0, 3) - if len(authInfo.Token) != 0 { - methods = append(methods, "token") - } - if len(authInfo.Username) != 0 || len(authInfo.Password) != 0 { - methods = append(methods, "basicAuth") - } - - if len(authInfo.ClientCertificate) != 0 || len(authInfo.ClientCertificateData) != 0 { - // Make sure cert data and file aren't both specified - if len(authInfo.ClientCertificate) != 0 && len(authInfo.ClientCertificateData) != 0 { - validationErrors = append(validationErrors, fmt.Errorf("client-cert-data and client-cert are both specified for %v. client-cert-data will override.", authInfoName)) - } - // Make sure key data and file aren't both specified - if len(authInfo.ClientKey) != 0 && len(authInfo.ClientKeyData) != 0 { - validationErrors = append(validationErrors, fmt.Errorf("client-key-data and client-key are both specified for %v; client-key-data will override", authInfoName)) - } - // Make sure a key is specified - if len(authInfo.ClientKey) == 0 && len(authInfo.ClientKeyData) == 0 { - validationErrors = append(validationErrors, fmt.Errorf("client-key-data or client-key must be specified for %v to use the clientCert authentication method.", authInfoName)) - } - - if len(authInfo.ClientCertificate) != 0 { - clientCertFile, err := os.Open(authInfo.ClientCertificate) - if err != nil { - validationErrors = append(validationErrors, fmt.Errorf("unable to read client-cert %v for %v due to %w", authInfo.ClientCertificate, authInfoName, err)) - } else { - defer clientCertFile.Close() - } - } - if len(authInfo.ClientKey) != 0 { - clientKeyFile, err := os.Open(authInfo.ClientKey) - if err != nil { - validationErrors = append(validationErrors, fmt.Errorf("unable to read client-key %v for %v due to %w", authInfo.ClientKey, authInfoName, err)) - } else { - defer clientKeyFile.Close() - } - } - } - - if authInfo.Exec != nil { - if authInfo.AuthProvider != nil { - validationErrors = append(validationErrors, fmt.Errorf("authProvider cannot be provided in combination with an exec plugin for %s", authInfoName)) - } - if len(authInfo.Exec.Command) == 0 { - validationErrors = append(validationErrors, fmt.Errorf("command must be specified for %v to use exec authentication plugin", authInfoName)) - } - if len(authInfo.Exec.APIVersion) == 0 { - validationErrors = append(validationErrors, fmt.Errorf("apiVersion must be specified for %v to use exec authentication plugin", authInfoName)) - } - for _, v := range authInfo.Exec.Env { - if len(v.Name) == 0 { - validationErrors = append(validationErrors, fmt.Errorf("env variable name must be specified for %v to use exec authentication plugin", authInfoName)) - } - } - switch authInfo.Exec.InteractiveMode { - case "": - validationErrors = append(validationErrors, fmt.Errorf("interactiveMode must be specified for %v to use exec authentication plugin", authInfoName)) - case clientcmdapi.NeverExecInteractiveMode, clientcmdapi.IfAvailableExecInteractiveMode, clientcmdapi.AlwaysExecInteractiveMode: - // These are valid - default: - validationErrors = append(validationErrors, fmt.Errorf("invalid interactiveMode for %v: %q", authInfoName, authInfo.Exec.InteractiveMode)) - } - } - - // authPath also provides information for the client to identify the server, so allow multiple auth methods in that case - if (len(methods) > 1) && (!usingAuthPath) { - validationErrors = append(validationErrors, fmt.Errorf("more than one authentication method found for %v; found %v, only one is allowed", authInfoName, methods)) - } - - // ImpersonateUID, ImpersonateGroups or ImpersonateUserExtra should be requested with a user - if (len(authInfo.ImpersonateUID) > 0 || len(authInfo.ImpersonateGroups) > 0 || len(authInfo.ImpersonateUserExtra) > 0) && (len(authInfo.Impersonate) == 0) { - validationErrors = append(validationErrors, fmt.Errorf("requesting uid, groups or user-extra for %v without impersonating a user", authInfoName)) - } - return validationErrors -} - -// validateContext looks for errors in the context. It is not transitive, so errors in the reference authInfo or cluster configs are not included in this return -func validateContext(contextName string, context clientcmdapi.Context, config clientcmdapi.Config) []error { - validationErrors := make([]error, 0) - - if len(contextName) == 0 { - validationErrors = append(validationErrors, fmt.Errorf("empty context name for %#v is not allowed", context)) - } - - if len(context.AuthInfo) == 0 { - validationErrors = append(validationErrors, fmt.Errorf("user was not specified for context %q", contextName)) - } else if _, exists := config.AuthInfos[context.AuthInfo]; !exists { - validationErrors = append(validationErrors, fmt.Errorf("user %q was not found for context %q", context.AuthInfo, contextName)) - } - - if len(context.Cluster) == 0 { - validationErrors = append(validationErrors, fmt.Errorf("cluster was not specified for context %q", contextName)) - } else if _, exists := config.Clusters[context.Cluster]; !exists { - validationErrors = append(validationErrors, fmt.Errorf("cluster %q was not found for context %q", context.Cluster, contextName)) - } - - if len(context.Namespace) != 0 { - if len(validation.IsDNS1123Label(context.Namespace)) != 0 { - validationErrors = append(validationErrors, fmt.Errorf("namespace %q for context %q does not conform to the kubernetes DNS_LABEL rules", context.Namespace, contextName)) - } - } - - return validationErrors -} diff --git a/src/vendor/k8s.io/client-go/util/homedir/homedir.go b/src/vendor/k8s.io/client-go/util/homedir/homedir.go deleted file mode 100644 index 3fdbeb8cf..000000000 --- a/src/vendor/k8s.io/client-go/util/homedir/homedir.go +++ /dev/null @@ -1,92 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package homedir - -import ( - "os" - "path/filepath" - "runtime" -) - -// HomeDir returns the home directory for the current user. -// On Windows: -// 1. the first of %HOME%, %HOMEDRIVE%%HOMEPATH%, %USERPROFILE% containing a `.kube\config` file is returned. -// 2. if none of those locations contain a `.kube\config` file, the first of %HOME%, %USERPROFILE%, %HOMEDRIVE%%HOMEPATH% that exists and is writeable is returned. -// 3. if none of those locations are writeable, the first of %HOME%, %USERPROFILE%, %HOMEDRIVE%%HOMEPATH% that exists is returned. -// 4. if none of those locations exists, the first of %HOME%, %USERPROFILE%, %HOMEDRIVE%%HOMEPATH% that is set is returned. -func HomeDir() string { - if runtime.GOOS == "windows" { - home := os.Getenv("HOME") - homeDriveHomePath := "" - if homeDrive, homePath := os.Getenv("HOMEDRIVE"), os.Getenv("HOMEPATH"); len(homeDrive) > 0 && len(homePath) > 0 { - homeDriveHomePath = homeDrive + homePath - } - userProfile := os.Getenv("USERPROFILE") - - // Return first of %HOME%, %HOMEDRIVE%/%HOMEPATH%, %USERPROFILE% that contains a `.kube\config` file. - // %HOMEDRIVE%/%HOMEPATH% is preferred over %USERPROFILE% for backwards-compatibility. - for _, p := range []string{home, homeDriveHomePath, userProfile} { - if len(p) == 0 { - continue - } - if _, err := os.Stat(filepath.Join(p, ".kube", "config")); err != nil { - continue - } - return p - } - - firstSetPath := "" - firstExistingPath := "" - - // Prefer %USERPROFILE% over %HOMEDRIVE%/%HOMEPATH% for compatibility with other auth-writing tools - for _, p := range []string{home, userProfile, homeDriveHomePath} { - if len(p) == 0 { - continue - } - if len(firstSetPath) == 0 { - // remember the first path that is set - firstSetPath = p - } - info, err := os.Stat(p) - if err != nil { - continue - } - if len(firstExistingPath) == 0 { - // remember the first path that exists - firstExistingPath = p - } - if info.IsDir() && info.Mode().Perm()&(1<<(uint(7))) != 0 { - // return first path that is writeable - return p - } - } - - // If none are writeable, return first location that exists - if len(firstExistingPath) > 0 { - return firstExistingPath - } - - // If none exist, return first location that is set - if len(firstSetPath) > 0 { - return firstSetPath - } - - // We've got nothing - return "" - } - return os.Getenv("HOME") -} diff --git a/src/vendor/k8s.io/client-go/util/jsonpath/doc.go b/src/vendor/k8s.io/client-go/util/jsonpath/doc.go deleted file mode 100644 index 0effb15c4..000000000 --- a/src/vendor/k8s.io/client-go/util/jsonpath/doc.go +++ /dev/null @@ -1,20 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// package jsonpath is a template engine using jsonpath syntax, -// which can be seen at http://goessner.net/articles/JsonPath/. -// In addition, it has {range} {end} function to iterate list and slice. -package jsonpath // import "k8s.io/client-go/util/jsonpath" diff --git a/src/vendor/k8s.io/client-go/util/jsonpath/jsonpath.go b/src/vendor/k8s.io/client-go/util/jsonpath/jsonpath.go deleted file mode 100644 index 49ecd1465..000000000 --- a/src/vendor/k8s.io/client-go/util/jsonpath/jsonpath.go +++ /dev/null @@ -1,579 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package jsonpath - -import ( - "bytes" - "encoding/json" - "fmt" - "io" - "reflect" - "strings" - - "k8s.io/client-go/third_party/forked/golang/template" -) - -type JSONPath struct { - name string - parser *Parser - beginRange int - inRange int - endRange int - - lastEndNode *Node - - allowMissingKeys bool - outputJSON bool -} - -// New creates a new JSONPath with the given name. -func New(name string) *JSONPath { - return &JSONPath{ - name: name, - beginRange: 0, - inRange: 0, - endRange: 0, - } -} - -// AllowMissingKeys allows a caller to specify whether they want an error if a field or map key -// cannot be located, or simply an empty result. The receiver is returned for chaining. -func (j *JSONPath) AllowMissingKeys(allow bool) *JSONPath { - j.allowMissingKeys = allow - return j -} - -// Parse parses the given template and returns an error. -func (j *JSONPath) Parse(text string) error { - var err error - j.parser, err = Parse(j.name, text) - return err -} - -// Execute bounds data into template and writes the result. -func (j *JSONPath) Execute(wr io.Writer, data interface{}) error { - fullResults, err := j.FindResults(data) - if err != nil { - return err - } - for ix := range fullResults { - if err := j.PrintResults(wr, fullResults[ix]); err != nil { - return err - } - } - return nil -} - -func (j *JSONPath) FindResults(data interface{}) ([][]reflect.Value, error) { - if j.parser == nil { - return nil, fmt.Errorf("%s is an incomplete jsonpath template", j.name) - } - - cur := []reflect.Value{reflect.ValueOf(data)} - nodes := j.parser.Root.Nodes - fullResult := [][]reflect.Value{} - for i := 0; i < len(nodes); i++ { - node := nodes[i] - results, err := j.walk(cur, node) - if err != nil { - return nil, err - } - - // encounter an end node, break the current block - if j.endRange > 0 && j.endRange <= j.inRange { - j.endRange-- - j.lastEndNode = &nodes[i] - break - } - // encounter a range node, start a range loop - if j.beginRange > 0 { - j.beginRange-- - j.inRange++ - if len(results) > 0 { - for _, value := range results { - j.parser.Root.Nodes = nodes[i+1:] - nextResults, err := j.FindResults(value.Interface()) - if err != nil { - return nil, err - } - fullResult = append(fullResult, nextResults...) - } - } else { - // If the range has no results, we still need to process the nodes within the range - // so the position will advance to the end node - j.parser.Root.Nodes = nodes[i+1:] - _, err := j.FindResults(nil) - if err != nil { - return nil, err - } - } - j.inRange-- - - // Fast forward to resume processing after the most recent end node that was encountered - for k := i + 1; k < len(nodes); k++ { - if &nodes[k] == j.lastEndNode { - i = k - break - } - } - continue - } - fullResult = append(fullResult, results) - } - return fullResult, nil -} - -// EnableJSONOutput changes the PrintResults behavior to return a JSON array of results -func (j *JSONPath) EnableJSONOutput(v bool) { - j.outputJSON = v -} - -// PrintResults writes the results into writer -func (j *JSONPath) PrintResults(wr io.Writer, results []reflect.Value) error { - if j.outputJSON { - // convert the []reflect.Value to something that json - // will be able to marshal - r := make([]interface{}, 0, len(results)) - for i := range results { - r = append(r, results[i].Interface()) - } - results = []reflect.Value{reflect.ValueOf(r)} - } - for i, r := range results { - var text []byte - var err error - outputJSON := true - kind := r.Kind() - if kind == reflect.Interface { - kind = r.Elem().Kind() - } - switch kind { - case reflect.Map: - case reflect.Array: - case reflect.Slice: - case reflect.Struct: - default: - outputJSON = false - } - switch { - case outputJSON || j.outputJSON: - if j.outputJSON { - text, err = json.MarshalIndent(r.Interface(), "", " ") - text = append(text, '\n') - } else { - text, err = json.Marshal(r.Interface()) - } - default: - text, err = j.evalToText(r) - } - if err != nil { - return err - } - if i != len(results)-1 { - text = append(text, ' ') - } - if _, err = wr.Write(text); err != nil { - return err - } - } - - return nil - -} - -// walk visits tree rooted at the given node in DFS order -func (j *JSONPath) walk(value []reflect.Value, node Node) ([]reflect.Value, error) { - switch node := node.(type) { - case *ListNode: - return j.evalList(value, node) - case *TextNode: - return []reflect.Value{reflect.ValueOf(node.Text)}, nil - case *FieldNode: - return j.evalField(value, node) - case *ArrayNode: - return j.evalArray(value, node) - case *FilterNode: - return j.evalFilter(value, node) - case *IntNode: - return j.evalInt(value, node) - case *BoolNode: - return j.evalBool(value, node) - case *FloatNode: - return j.evalFloat(value, node) - case *WildcardNode: - return j.evalWildcard(value, node) - case *RecursiveNode: - return j.evalRecursive(value, node) - case *UnionNode: - return j.evalUnion(value, node) - case *IdentifierNode: - return j.evalIdentifier(value, node) - default: - return value, fmt.Errorf("unexpected Node %v", node) - } -} - -// evalInt evaluates IntNode -func (j *JSONPath) evalInt(input []reflect.Value, node *IntNode) ([]reflect.Value, error) { - result := make([]reflect.Value, len(input)) - for i := range input { - result[i] = reflect.ValueOf(node.Value) - } - return result, nil -} - -// evalFloat evaluates FloatNode -func (j *JSONPath) evalFloat(input []reflect.Value, node *FloatNode) ([]reflect.Value, error) { - result := make([]reflect.Value, len(input)) - for i := range input { - result[i] = reflect.ValueOf(node.Value) - } - return result, nil -} - -// evalBool evaluates BoolNode -func (j *JSONPath) evalBool(input []reflect.Value, node *BoolNode) ([]reflect.Value, error) { - result := make([]reflect.Value, len(input)) - for i := range input { - result[i] = reflect.ValueOf(node.Value) - } - return result, nil -} - -// evalList evaluates ListNode -func (j *JSONPath) evalList(value []reflect.Value, node *ListNode) ([]reflect.Value, error) { - var err error - curValue := value - for _, node := range node.Nodes { - curValue, err = j.walk(curValue, node) - if err != nil { - return curValue, err - } - } - return curValue, nil -} - -// evalIdentifier evaluates IdentifierNode -func (j *JSONPath) evalIdentifier(input []reflect.Value, node *IdentifierNode) ([]reflect.Value, error) { - results := []reflect.Value{} - switch node.Name { - case "range": - j.beginRange++ - results = input - case "end": - if j.inRange > 0 { - j.endRange++ - } else { - return results, fmt.Errorf("not in range, nothing to end") - } - default: - return input, fmt.Errorf("unrecognized identifier %v", node.Name) - } - return results, nil -} - -// evalArray evaluates ArrayNode -func (j *JSONPath) evalArray(input []reflect.Value, node *ArrayNode) ([]reflect.Value, error) { - result := []reflect.Value{} - for _, value := range input { - - value, isNil := template.Indirect(value) - if isNil { - continue - } - if value.Kind() != reflect.Array && value.Kind() != reflect.Slice { - return input, fmt.Errorf("%v is not array or slice", value.Type()) - } - params := node.Params - if !params[0].Known { - params[0].Value = 0 - } - if params[0].Value < 0 { - params[0].Value += value.Len() - } - if !params[1].Known { - params[1].Value = value.Len() - } - - if params[1].Value < 0 || (params[1].Value == 0 && params[1].Derived) { - params[1].Value += value.Len() - } - sliceLength := value.Len() - if params[1].Value != params[0].Value { // if you're requesting zero elements, allow it through. - if params[0].Value >= sliceLength || params[0].Value < 0 { - return input, fmt.Errorf("array index out of bounds: index %d, length %d", params[0].Value, sliceLength) - } - if params[1].Value > sliceLength || params[1].Value < 0 { - return input, fmt.Errorf("array index out of bounds: index %d, length %d", params[1].Value-1, sliceLength) - } - if params[0].Value > params[1].Value { - return input, fmt.Errorf("starting index %d is greater than ending index %d", params[0].Value, params[1].Value) - } - } else { - return result, nil - } - - value = value.Slice(params[0].Value, params[1].Value) - - step := 1 - if params[2].Known { - if params[2].Value <= 0 { - return input, fmt.Errorf("step must be > 0") - } - step = params[2].Value - } - for i := 0; i < value.Len(); i += step { - result = append(result, value.Index(i)) - } - } - return result, nil -} - -// evalUnion evaluates UnionNode -func (j *JSONPath) evalUnion(input []reflect.Value, node *UnionNode) ([]reflect.Value, error) { - result := []reflect.Value{} - for _, listNode := range node.Nodes { - temp, err := j.evalList(input, listNode) - if err != nil { - return input, err - } - result = append(result, temp...) - } - return result, nil -} - -func (j *JSONPath) findFieldInValue(value *reflect.Value, node *FieldNode) (reflect.Value, error) { - t := value.Type() - var inlineValue *reflect.Value - for ix := 0; ix < t.NumField(); ix++ { - f := t.Field(ix) - jsonTag := f.Tag.Get("json") - parts := strings.Split(jsonTag, ",") - if len(parts) == 0 { - continue - } - if parts[0] == node.Value { - return value.Field(ix), nil - } - if len(parts[0]) == 0 { - val := value.Field(ix) - inlineValue = &val - } - } - if inlineValue != nil { - if inlineValue.Kind() == reflect.Struct { - // handle 'inline' - match, err := j.findFieldInValue(inlineValue, node) - if err != nil { - return reflect.Value{}, err - } - if match.IsValid() { - return match, nil - } - } - } - return value.FieldByName(node.Value), nil -} - -// evalField evaluates field of struct or key of map. -func (j *JSONPath) evalField(input []reflect.Value, node *FieldNode) ([]reflect.Value, error) { - results := []reflect.Value{} - // If there's no input, there's no output - if len(input) == 0 { - return results, nil - } - for _, value := range input { - var result reflect.Value - value, isNil := template.Indirect(value) - if isNil { - continue - } - - if value.Kind() == reflect.Struct { - var err error - if result, err = j.findFieldInValue(&value, node); err != nil { - return nil, err - } - } else if value.Kind() == reflect.Map { - mapKeyType := value.Type().Key() - nodeValue := reflect.ValueOf(node.Value) - // node value type must be convertible to map key type - if !nodeValue.Type().ConvertibleTo(mapKeyType) { - return results, fmt.Errorf("%s is not convertible to %s", nodeValue, mapKeyType) - } - result = value.MapIndex(nodeValue.Convert(mapKeyType)) - } - if result.IsValid() { - results = append(results, result) - } - } - if len(results) == 0 { - if j.allowMissingKeys { - return results, nil - } - return results, fmt.Errorf("%s is not found", node.Value) - } - return results, nil -} - -// evalWildcard extracts all contents of the given value -func (j *JSONPath) evalWildcard(input []reflect.Value, node *WildcardNode) ([]reflect.Value, error) { - results := []reflect.Value{} - for _, value := range input { - value, isNil := template.Indirect(value) - if isNil { - continue - } - - kind := value.Kind() - if kind == reflect.Struct { - for i := 0; i < value.NumField(); i++ { - results = append(results, value.Field(i)) - } - } else if kind == reflect.Map { - for _, key := range value.MapKeys() { - results = append(results, value.MapIndex(key)) - } - } else if kind == reflect.Array || kind == reflect.Slice || kind == reflect.String { - for i := 0; i < value.Len(); i++ { - results = append(results, value.Index(i)) - } - } - } - return results, nil -} - -// evalRecursive visits the given value recursively and pushes all of them to result -func (j *JSONPath) evalRecursive(input []reflect.Value, node *RecursiveNode) ([]reflect.Value, error) { - result := []reflect.Value{} - for _, value := range input { - results := []reflect.Value{} - value, isNil := template.Indirect(value) - if isNil { - continue - } - - kind := value.Kind() - if kind == reflect.Struct { - for i := 0; i < value.NumField(); i++ { - results = append(results, value.Field(i)) - } - } else if kind == reflect.Map { - for _, key := range value.MapKeys() { - results = append(results, value.MapIndex(key)) - } - } else if kind == reflect.Array || kind == reflect.Slice || kind == reflect.String { - for i := 0; i < value.Len(); i++ { - results = append(results, value.Index(i)) - } - } - if len(results) != 0 { - result = append(result, value) - output, err := j.evalRecursive(results, node) - if err != nil { - return result, err - } - result = append(result, output...) - } - } - return result, nil -} - -// evalFilter filters array according to FilterNode -func (j *JSONPath) evalFilter(input []reflect.Value, node *FilterNode) ([]reflect.Value, error) { - results := []reflect.Value{} - for _, value := range input { - value, _ = template.Indirect(value) - - if value.Kind() != reflect.Array && value.Kind() != reflect.Slice { - return input, fmt.Errorf("%v is not array or slice and cannot be filtered", value) - } - for i := 0; i < value.Len(); i++ { - temp := []reflect.Value{value.Index(i)} - lefts, err := j.evalList(temp, node.Left) - - //case exists - if node.Operator == "exists" { - if len(lefts) > 0 { - results = append(results, value.Index(i)) - } - continue - } - - if err != nil { - return input, err - } - - var left, right interface{} - switch { - case len(lefts) == 0: - continue - case len(lefts) > 1: - return input, fmt.Errorf("can only compare one element at a time") - } - left = lefts[0].Interface() - - rights, err := j.evalList(temp, node.Right) - if err != nil { - return input, err - } - switch { - case len(rights) == 0: - continue - case len(rights) > 1: - return input, fmt.Errorf("can only compare one element at a time") - } - right = rights[0].Interface() - - pass := false - switch node.Operator { - case "<": - pass, err = template.Less(left, right) - case ">": - pass, err = template.Greater(left, right) - case "==": - pass, err = template.Equal(left, right) - case "!=": - pass, err = template.NotEqual(left, right) - case "<=": - pass, err = template.LessEqual(left, right) - case ">=": - pass, err = template.GreaterEqual(left, right) - default: - return results, fmt.Errorf("unrecognized filter operator %s", node.Operator) - } - if err != nil { - return results, err - } - if pass { - results = append(results, value.Index(i)) - } - } - } - return results, nil -} - -// evalToText translates reflect value to corresponding text -func (j *JSONPath) evalToText(v reflect.Value) ([]byte, error) { - iface, ok := template.PrintableValue(v) - if !ok { - return nil, fmt.Errorf("can't print type %s", v.Type()) - } - var buffer bytes.Buffer - fmt.Fprint(&buffer, iface) - return buffer.Bytes(), nil -} diff --git a/src/vendor/k8s.io/client-go/util/jsonpath/node.go b/src/vendor/k8s.io/client-go/util/jsonpath/node.go deleted file mode 100644 index 83abe8b03..000000000 --- a/src/vendor/k8s.io/client-go/util/jsonpath/node.go +++ /dev/null @@ -1,256 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package jsonpath - -import "fmt" - -// NodeType identifies the type of a parse tree node. -type NodeType int - -// Type returns itself and provides an easy default implementation -func (t NodeType) Type() NodeType { - return t -} - -func (t NodeType) String() string { - return NodeTypeName[t] -} - -const ( - NodeText NodeType = iota - NodeArray - NodeList - NodeField - NodeIdentifier - NodeFilter - NodeInt - NodeFloat - NodeWildcard - NodeRecursive - NodeUnion - NodeBool -) - -var NodeTypeName = map[NodeType]string{ - NodeText: "NodeText", - NodeArray: "NodeArray", - NodeList: "NodeList", - NodeField: "NodeField", - NodeIdentifier: "NodeIdentifier", - NodeFilter: "NodeFilter", - NodeInt: "NodeInt", - NodeFloat: "NodeFloat", - NodeWildcard: "NodeWildcard", - NodeRecursive: "NodeRecursive", - NodeUnion: "NodeUnion", - NodeBool: "NodeBool", -} - -type Node interface { - Type() NodeType - String() string -} - -// ListNode holds a sequence of nodes. -type ListNode struct { - NodeType - Nodes []Node // The element nodes in lexical order. -} - -func newList() *ListNode { - return &ListNode{NodeType: NodeList} -} - -func (l *ListNode) append(n Node) { - l.Nodes = append(l.Nodes, n) -} - -func (l *ListNode) String() string { - return l.Type().String() -} - -// TextNode holds plain text. -type TextNode struct { - NodeType - Text string // The text; may span newlines. -} - -func newText(text string) *TextNode { - return &TextNode{NodeType: NodeText, Text: text} -} - -func (t *TextNode) String() string { - return fmt.Sprintf("%s: %s", t.Type(), t.Text) -} - -// FieldNode holds field of struct -type FieldNode struct { - NodeType - Value string -} - -func newField(value string) *FieldNode { - return &FieldNode{NodeType: NodeField, Value: value} -} - -func (f *FieldNode) String() string { - return fmt.Sprintf("%s: %s", f.Type(), f.Value) -} - -// IdentifierNode holds an identifier -type IdentifierNode struct { - NodeType - Name string -} - -func newIdentifier(value string) *IdentifierNode { - return &IdentifierNode{ - NodeType: NodeIdentifier, - Name: value, - } -} - -func (f *IdentifierNode) String() string { - return fmt.Sprintf("%s: %s", f.Type(), f.Name) -} - -// ParamsEntry holds param information for ArrayNode -type ParamsEntry struct { - Value int - Known bool // whether the value is known when parse it - Derived bool -} - -// ArrayNode holds start, end, step information for array index selection -type ArrayNode struct { - NodeType - Params [3]ParamsEntry // start, end, step -} - -func newArray(params [3]ParamsEntry) *ArrayNode { - return &ArrayNode{ - NodeType: NodeArray, - Params: params, - } -} - -func (a *ArrayNode) String() string { - return fmt.Sprintf("%s: %v", a.Type(), a.Params) -} - -// FilterNode holds operand and operator information for filter -type FilterNode struct { - NodeType - Left *ListNode - Right *ListNode - Operator string -} - -func newFilter(left, right *ListNode, operator string) *FilterNode { - return &FilterNode{ - NodeType: NodeFilter, - Left: left, - Right: right, - Operator: operator, - } -} - -func (f *FilterNode) String() string { - return fmt.Sprintf("%s: %s %s %s", f.Type(), f.Left, f.Operator, f.Right) -} - -// IntNode holds integer value -type IntNode struct { - NodeType - Value int -} - -func newInt(num int) *IntNode { - return &IntNode{NodeType: NodeInt, Value: num} -} - -func (i *IntNode) String() string { - return fmt.Sprintf("%s: %d", i.Type(), i.Value) -} - -// FloatNode holds float value -type FloatNode struct { - NodeType - Value float64 -} - -func newFloat(num float64) *FloatNode { - return &FloatNode{NodeType: NodeFloat, Value: num} -} - -func (i *FloatNode) String() string { - return fmt.Sprintf("%s: %f", i.Type(), i.Value) -} - -// WildcardNode means a wildcard -type WildcardNode struct { - NodeType -} - -func newWildcard() *WildcardNode { - return &WildcardNode{NodeType: NodeWildcard} -} - -func (i *WildcardNode) String() string { - return i.Type().String() -} - -// RecursiveNode means a recursive descent operator -type RecursiveNode struct { - NodeType -} - -func newRecursive() *RecursiveNode { - return &RecursiveNode{NodeType: NodeRecursive} -} - -func (r *RecursiveNode) String() string { - return r.Type().String() -} - -// UnionNode is union of ListNode -type UnionNode struct { - NodeType - Nodes []*ListNode -} - -func newUnion(nodes []*ListNode) *UnionNode { - return &UnionNode{NodeType: NodeUnion, Nodes: nodes} -} - -func (u *UnionNode) String() string { - return u.Type().String() -} - -// BoolNode holds bool value -type BoolNode struct { - NodeType - Value bool -} - -func newBool(value bool) *BoolNode { - return &BoolNode{NodeType: NodeBool, Value: value} -} - -func (b *BoolNode) String() string { - return fmt.Sprintf("%s: %t", b.Type(), b.Value) -} diff --git a/src/vendor/k8s.io/client-go/util/jsonpath/parser.go b/src/vendor/k8s.io/client-go/util/jsonpath/parser.go deleted file mode 100644 index 40bab188d..000000000 --- a/src/vendor/k8s.io/client-go/util/jsonpath/parser.go +++ /dev/null @@ -1,527 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package jsonpath - -import ( - "errors" - "fmt" - "regexp" - "strconv" - "strings" - "unicode" - "unicode/utf8" -) - -const eof = -1 - -const ( - leftDelim = "{" - rightDelim = "}" -) - -type Parser struct { - Name string - Root *ListNode - input string - pos int - start int - width int -} - -var ( - ErrSyntax = errors.New("invalid syntax") - dictKeyRex = regexp.MustCompile(`^'([^']*)'$`) - sliceOperatorRex = regexp.MustCompile(`^(-?[\d]*)(:-?[\d]*)?(:-?[\d]*)?$`) -) - -// Parse parsed the given text and return a node Parser. -// If an error is encountered, parsing stops and an empty -// Parser is returned with the error -func Parse(name, text string) (*Parser, error) { - p := NewParser(name) - err := p.Parse(text) - if err != nil { - p = nil - } - return p, err -} - -func NewParser(name string) *Parser { - return &Parser{ - Name: name, - } -} - -// parseAction parsed the expression inside delimiter -func parseAction(name, text string) (*Parser, error) { - p, err := Parse(name, fmt.Sprintf("%s%s%s", leftDelim, text, rightDelim)) - // when error happens, p will be nil, so we need to return here - if err != nil { - return p, err - } - p.Root = p.Root.Nodes[0].(*ListNode) - return p, nil -} - -func (p *Parser) Parse(text string) error { - p.input = text - p.Root = newList() - p.pos = 0 - return p.parseText(p.Root) -} - -// consumeText return the parsed text since last cosumeText -func (p *Parser) consumeText() string { - value := p.input[p.start:p.pos] - p.start = p.pos - return value -} - -// next returns the next rune in the input. -func (p *Parser) next() rune { - if p.pos >= len(p.input) { - p.width = 0 - return eof - } - r, w := utf8.DecodeRuneInString(p.input[p.pos:]) - p.width = w - p.pos += p.width - return r -} - -// peek returns but does not consume the next rune in the input. -func (p *Parser) peek() rune { - r := p.next() - p.backup() - return r -} - -// backup steps back one rune. Can only be called once per call of next. -func (p *Parser) backup() { - p.pos -= p.width -} - -func (p *Parser) parseText(cur *ListNode) error { - for { - if strings.HasPrefix(p.input[p.pos:], leftDelim) { - if p.pos > p.start { - cur.append(newText(p.consumeText())) - } - return p.parseLeftDelim(cur) - } - if p.next() == eof { - break - } - } - // Correctly reached EOF. - if p.pos > p.start { - cur.append(newText(p.consumeText())) - } - return nil -} - -// parseLeftDelim scans the left delimiter, which is known to be present. -func (p *Parser) parseLeftDelim(cur *ListNode) error { - p.pos += len(leftDelim) - p.consumeText() - newNode := newList() - cur.append(newNode) - cur = newNode - return p.parseInsideAction(cur) -} - -func (p *Parser) parseInsideAction(cur *ListNode) error { - prefixMap := map[string]func(*ListNode) error{ - rightDelim: p.parseRightDelim, - "[?(": p.parseFilter, - "..": p.parseRecursive, - } - for prefix, parseFunc := range prefixMap { - if strings.HasPrefix(p.input[p.pos:], prefix) { - return parseFunc(cur) - } - } - - switch r := p.next(); { - case r == eof || isEndOfLine(r): - return fmt.Errorf("unclosed action") - case r == ' ': - p.consumeText() - case r == '@' || r == '$': //the current object, just pass it - p.consumeText() - case r == '[': - return p.parseArray(cur) - case r == '"' || r == '\'': - return p.parseQuote(cur, r) - case r == '.': - return p.parseField(cur) - case r == '+' || r == '-' || unicode.IsDigit(r): - p.backup() - return p.parseNumber(cur) - case isAlphaNumeric(r): - p.backup() - return p.parseIdentifier(cur) - default: - return fmt.Errorf("unrecognized character in action: %#U", r) - } - return p.parseInsideAction(cur) -} - -// parseRightDelim scans the right delimiter, which is known to be present. -func (p *Parser) parseRightDelim(cur *ListNode) error { - p.pos += len(rightDelim) - p.consumeText() - return p.parseText(p.Root) -} - -// parseIdentifier scans build-in keywords, like "range" "end" -func (p *Parser) parseIdentifier(cur *ListNode) error { - var r rune - for { - r = p.next() - if isTerminator(r) { - p.backup() - break - } - } - value := p.consumeText() - - if isBool(value) { - v, err := strconv.ParseBool(value) - if err != nil { - return fmt.Errorf("can not parse bool '%s': %s", value, err.Error()) - } - - cur.append(newBool(v)) - } else { - cur.append(newIdentifier(value)) - } - - return p.parseInsideAction(cur) -} - -// parseRecursive scans the recursive descent operator .. -func (p *Parser) parseRecursive(cur *ListNode) error { - if lastIndex := len(cur.Nodes) - 1; lastIndex >= 0 && cur.Nodes[lastIndex].Type() == NodeRecursive { - return fmt.Errorf("invalid multiple recursive descent") - } - p.pos += len("..") - p.consumeText() - cur.append(newRecursive()) - if r := p.peek(); isAlphaNumeric(r) { - return p.parseField(cur) - } - return p.parseInsideAction(cur) -} - -// parseNumber scans number -func (p *Parser) parseNumber(cur *ListNode) error { - r := p.peek() - if r == '+' || r == '-' { - p.next() - } - for { - r = p.next() - if r != '.' && !unicode.IsDigit(r) { - p.backup() - break - } - } - value := p.consumeText() - i, err := strconv.Atoi(value) - if err == nil { - cur.append(newInt(i)) - return p.parseInsideAction(cur) - } - d, err := strconv.ParseFloat(value, 64) - if err == nil { - cur.append(newFloat(d)) - return p.parseInsideAction(cur) - } - return fmt.Errorf("cannot parse number %s", value) -} - -// parseArray scans array index selection -func (p *Parser) parseArray(cur *ListNode) error { -Loop: - for { - switch p.next() { - case eof, '\n': - return fmt.Errorf("unterminated array") - case ']': - break Loop - } - } - text := p.consumeText() - text = text[1 : len(text)-1] - if text == "*" { - text = ":" - } - - //union operator - strs := strings.Split(text, ",") - if len(strs) > 1 { - union := []*ListNode{} - for _, str := range strs { - parser, err := parseAction("union", fmt.Sprintf("[%s]", strings.Trim(str, " "))) - if err != nil { - return err - } - union = append(union, parser.Root) - } - cur.append(newUnion(union)) - return p.parseInsideAction(cur) - } - - // dict key - value := dictKeyRex.FindStringSubmatch(text) - if value != nil { - parser, err := parseAction("arraydict", fmt.Sprintf(".%s", value[1])) - if err != nil { - return err - } - for _, node := range parser.Root.Nodes { - cur.append(node) - } - return p.parseInsideAction(cur) - } - - //slice operator - value = sliceOperatorRex.FindStringSubmatch(text) - if value == nil { - return fmt.Errorf("invalid array index %s", text) - } - value = value[1:] - params := [3]ParamsEntry{} - for i := 0; i < 3; i++ { - if value[i] != "" { - if i > 0 { - value[i] = value[i][1:] - } - if i > 0 && value[i] == "" { - params[i].Known = false - } else { - var err error - params[i].Known = true - params[i].Value, err = strconv.Atoi(value[i]) - if err != nil { - return fmt.Errorf("array index %s is not a number", value[i]) - } - } - } else { - if i == 1 { - params[i].Known = true - params[i].Value = params[0].Value + 1 - params[i].Derived = true - } else { - params[i].Known = false - params[i].Value = 0 - } - } - } - cur.append(newArray(params)) - return p.parseInsideAction(cur) -} - -// parseFilter scans filter inside array selection -func (p *Parser) parseFilter(cur *ListNode) error { - p.pos += len("[?(") - p.consumeText() - begin := false - end := false - var pair rune - -Loop: - for { - r := p.next() - switch r { - case eof, '\n': - return fmt.Errorf("unterminated filter") - case '"', '\'': - if begin == false { - //save the paired rune - begin = true - pair = r - continue - } - //only add when met paired rune - if p.input[p.pos-2] != '\\' && r == pair { - end = true - } - case ')': - //in rightParser below quotes only appear zero or once - //and must be paired at the beginning and end - if begin == end { - break Loop - } - } - } - if p.next() != ']' { - return fmt.Errorf("unclosed array expect ]") - } - reg := regexp.MustCompile(`^([^!<>=]+)([!<>=]+)(.+?)$`) - text := p.consumeText() - text = text[:len(text)-2] - value := reg.FindStringSubmatch(text) - if value == nil { - parser, err := parseAction("text", text) - if err != nil { - return err - } - cur.append(newFilter(parser.Root, newList(), "exists")) - } else { - leftParser, err := parseAction("left", value[1]) - if err != nil { - return err - } - rightParser, err := parseAction("right", value[3]) - if err != nil { - return err - } - cur.append(newFilter(leftParser.Root, rightParser.Root, value[2])) - } - return p.parseInsideAction(cur) -} - -// parseQuote unquotes string inside double or single quote -func (p *Parser) parseQuote(cur *ListNode, end rune) error { -Loop: - for { - switch p.next() { - case eof, '\n': - return fmt.Errorf("unterminated quoted string") - case end: - //if it's not escape break the Loop - if p.input[p.pos-2] != '\\' { - break Loop - } - } - } - value := p.consumeText() - s, err := UnquoteExtend(value) - if err != nil { - return fmt.Errorf("unquote string %s error %v", value, err) - } - cur.append(newText(s)) - return p.parseInsideAction(cur) -} - -// parseField scans a field until a terminator -func (p *Parser) parseField(cur *ListNode) error { - p.consumeText() - for p.advance() { - } - value := p.consumeText() - if value == "*" { - cur.append(newWildcard()) - } else { - cur.append(newField(strings.Replace(value, "\\", "", -1))) - } - return p.parseInsideAction(cur) -} - -// advance scans until next non-escaped terminator -func (p *Parser) advance() bool { - r := p.next() - if r == '\\' { - p.next() - } else if isTerminator(r) { - p.backup() - return false - } - return true -} - -// isTerminator reports whether the input is at valid termination character to appear after an identifier. -func isTerminator(r rune) bool { - if isSpace(r) || isEndOfLine(r) { - return true - } - switch r { - case eof, '.', ',', '[', ']', '$', '@', '{', '}': - return true - } - return false -} - -// isSpace reports whether r is a space character. -func isSpace(r rune) bool { - return r == ' ' || r == '\t' -} - -// isEndOfLine reports whether r is an end-of-line character. -func isEndOfLine(r rune) bool { - return r == '\r' || r == '\n' -} - -// isAlphaNumeric reports whether r is an alphabetic, digit, or underscore. -func isAlphaNumeric(r rune) bool { - return r == '_' || unicode.IsLetter(r) || unicode.IsDigit(r) -} - -// isBool reports whether s is a boolean value. -func isBool(s string) bool { - return s == "true" || s == "false" -} - -// UnquoteExtend is almost same as strconv.Unquote(), but it support parse single quotes as a string -func UnquoteExtend(s string) (string, error) { - n := len(s) - if n < 2 { - return "", ErrSyntax - } - quote := s[0] - if quote != s[n-1] { - return "", ErrSyntax - } - s = s[1 : n-1] - - if quote != '"' && quote != '\'' { - return "", ErrSyntax - } - - // Is it trivial? Avoid allocation. - if !contains(s, '\\') && !contains(s, quote) { - return s, nil - } - - var runeTmp [utf8.UTFMax]byte - buf := make([]byte, 0, 3*len(s)/2) // Try to avoid more allocations. - for len(s) > 0 { - c, multibyte, ss, err := strconv.UnquoteChar(s, quote) - if err != nil { - return "", err - } - s = ss - if c < utf8.RuneSelf || !multibyte { - buf = append(buf, byte(c)) - } else { - n := utf8.EncodeRune(runeTmp[:], c) - buf = append(buf, runeTmp[:n]...) - } - } - return string(buf), nil -} - -func contains(s string, c byte) bool { - for i := 0; i < len(s); i++ { - if s[i] == c { - return true - } - } - return false -} diff --git a/src/vendor/k8s.io/kube-openapi/LICENSE b/src/vendor/k8s.io/kube-openapi/LICENSE deleted file mode 100644 index d64569567..000000000 --- a/src/vendor/k8s.io/kube-openapi/LICENSE +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/src/vendor/k8s.io/kube-openapi/pkg/builder3/util/util.go b/src/vendor/k8s.io/kube-openapi/pkg/builder3/util/util.go deleted file mode 100644 index e01566925..000000000 --- a/src/vendor/k8s.io/kube-openapi/pkg/builder3/util/util.go +++ /dev/null @@ -1,51 +0,0 @@ -/* -Copyright 2022 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package util - -import ( - "reflect" - - "k8s.io/kube-openapi/pkg/schemamutation" - "k8s.io/kube-openapi/pkg/validation/spec" -) - -// wrapRefs wraps OpenAPI V3 Schema refs that contain sibling elements. -// AllOf is used to wrap the Ref to prevent references from having sibling elements -// Please see https://github.com/kubernetes/kubernetes/issues/106387#issuecomment-967640388 -func WrapRefs(schema *spec.Schema) *spec.Schema { - walker := schemamutation.Walker{ - SchemaCallback: func(schema *spec.Schema) *spec.Schema { - orig := schema - clone := func() { - if orig == schema { - schema = new(spec.Schema) - *schema = *orig - } - } - if schema.Ref.String() != "" && !reflect.DeepEqual(*schema, spec.Schema{SchemaProps: spec.SchemaProps{Ref: schema.Ref}}) { - clone() - refSchema := new(spec.Schema) - refSchema.Ref = schema.Ref - schema.Ref = spec.Ref{} - schema.AllOf = []spec.Schema{*refSchema} - } - return schema - }, - RefCallback: schemamutation.RefCallbackNoop, - } - return walker.WalkSchema(schema) -} diff --git a/src/vendor/k8s.io/kube-openapi/pkg/common/common.go b/src/vendor/k8s.io/kube-openapi/pkg/common/common.go deleted file mode 100644 index 24f2b0e88..000000000 --- a/src/vendor/k8s.io/kube-openapi/pkg/common/common.go +++ /dev/null @@ -1,320 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package common - -import ( - "net/http" - "strings" - - "github.com/emicklei/go-restful/v3" - - "k8s.io/kube-openapi/pkg/openapiconv" - "k8s.io/kube-openapi/pkg/spec3" - "k8s.io/kube-openapi/pkg/validation/spec" -) - -const ( - // TODO: Make this configurable. - ExtensionPrefix = "x-kubernetes-" - ExtensionV2Schema = ExtensionPrefix + "v2-schema" -) - -// OpenAPIDefinition describes single type. Normally these definitions are auto-generated using gen-openapi. -type OpenAPIDefinition struct { - Schema spec.Schema - Dependencies []string -} - -type ReferenceCallback func(path string) spec.Ref - -// GetOpenAPIDefinitions is collection of all definitions. -type GetOpenAPIDefinitions func(ReferenceCallback) map[string]OpenAPIDefinition - -// OpenAPIDefinitionGetter gets openAPI definitions for a given type. If a type implements this interface, -// the definition returned by it will be used, otherwise the auto-generated definitions will be used. See -// GetOpenAPITypeFormat for more information about trade-offs of using this interface or GetOpenAPITypeFormat method when -// possible. -type OpenAPIDefinitionGetter interface { - OpenAPIDefinition() *OpenAPIDefinition -} - -type OpenAPIV3DefinitionGetter interface { - OpenAPIV3Definition() *OpenAPIDefinition -} - -type PathHandler interface { - Handle(path string, handler http.Handler) -} - -type PathHandlerByGroupVersion interface { - Handle(path string, handler http.Handler) - HandlePrefix(path string, handler http.Handler) -} - -// Config is set of configuration for openAPI spec generation. -type Config struct { - // List of supported protocols such as https, http, etc. - ProtocolList []string - - // Info is general information about the API. - Info *spec.Info - - // DefaultResponse will be used if an operation does not have any responses listed. It - // will show up as ... "responses" : {"default" : $DefaultResponse} in the spec. - DefaultResponse *spec.Response - - // ResponseDefinitions will be added to "responses" under the top-level swagger object. This is an object - // that holds responses definitions that can be used across operations. This property does not define - // global responses for all operations. For more info please refer: - // https://github.com/OAI/OpenAPI-Specification/blob/master/versions/2.0.md#fixed-fields - ResponseDefinitions map[string]spec.Response - - // CommonResponses will be added as a response to all operation specs. This is a good place to add common - // responses such as authorization failed. - CommonResponses map[int]spec.Response - - // List of webservice's path prefixes to ignore - IgnorePrefixes []string - - // OpenAPIDefinitions should provide definition for all models used by routes. Failure to provide this map - // or any of the models will result in spec generation failure. - GetDefinitions GetOpenAPIDefinitions - - // Provides the definition for all models used by routes. One of GetDefinitions or Definitions must be defined to generate a spec. - // This takes precedent over the GetDefinitions function - Definitions map[string]OpenAPIDefinition - - // GetOperationIDAndTags returns operation id and tags for a restful route. It is an optional function to customize operation IDs. - // - // Deprecated: GetOperationIDAndTagsFromRoute should be used instead. This cannot be specified if using the new Route - // interface set of funcs. - GetOperationIDAndTags func(r *restful.Route) (string, []string, error) - - // GetOperationIDAndTagsFromRoute returns operation id and tags for a Route. It is an optional function to customize operation IDs. - GetOperationIDAndTagsFromRoute func(r Route) (string, []string, error) - - // GetDefinitionName returns a friendly name for a definition base on the serving path. parameter `name` is the full name of the definition. - // It is an optional function to customize model names. - GetDefinitionName func(name string) (string, spec.Extensions) - - // PostProcessSpec runs after the spec is ready to serve. It allows a final modification to the spec before serving. - PostProcessSpec func(*spec.Swagger) (*spec.Swagger, error) - - // SecurityDefinitions is list of all security definitions for OpenAPI service. If this is not nil, the user of config - // is responsible to provide DefaultSecurity and (maybe) add unauthorized response to CommonResponses. - SecurityDefinitions *spec.SecurityDefinitions - - // DefaultSecurity for all operations. This will pass as spec.SwaggerProps.Security to OpenAPI. - // For most cases, this will be list of acceptable definitions in SecurityDefinitions. - DefaultSecurity []map[string][]string -} - -// OpenAPIV3Config is set of configuration for OpenAPI V3 spec generation. -type OpenAPIV3Config struct { - // Info is general information about the API. - Info *spec.Info - - // DefaultResponse will be used if an operation does not have any responses listed. It - // will show up as ... "responses" : {"default" : $DefaultResponse} in the spec. - DefaultResponse *spec3.Response - - // ResponseDefinitions will be added to responses component. This is an object - // that holds responses that can be used across operations. - ResponseDefinitions map[string]*spec3.Response - - // CommonResponses will be added as a response to all operation specs. This is a good place to add common - // responses such as authorization failed. - CommonResponses map[int]*spec3.Response - - // List of webservice's path prefixes to ignore - IgnorePrefixes []string - - // OpenAPIDefinitions should provide definition for all models used by routes. Failure to provide this map - // or any of the models will result in spec generation failure. - // One of GetDefinitions or Definitions must be defined to generate a spec. - GetDefinitions GetOpenAPIDefinitions - - // Provides the definition for all models used by routes. One of GetDefinitions or Definitions must be defined to generate a spec. - // This takes precedent over the GetDefinitions function - Definitions map[string]OpenAPIDefinition - - // GetOperationIDAndTags returns operation id and tags for a restful route. It is an optional function to customize operation IDs. - // - // Deprecated: GetOperationIDAndTagsFromRoute should be used instead. This cannot be specified if using the new Route - // interface set of funcs. - GetOperationIDAndTags func(r *restful.Route) (string, []string, error) - - // GetOperationIDAndTagsFromRoute returns operation id and tags for a Route. It is an optional function to customize operation IDs. - GetOperationIDAndTagsFromRoute func(r Route) (string, []string, error) - - // GetDefinitionName returns a friendly name for a definition base on the serving path. parameter `name` is the full name of the definition. - // It is an optional function to customize model names. - GetDefinitionName func(name string) (string, spec.Extensions) - - // SecuritySchemes is list of all security schemes for OpenAPI service. - SecuritySchemes spec3.SecuritySchemes - - // DefaultSecurity for all operations. - DefaultSecurity []map[string][]string -} - -// ConvertConfigToV3 converts a Config object to an OpenAPIV3Config object -func ConvertConfigToV3(config *Config) *OpenAPIV3Config { - if config == nil { - return nil - } - - v3Config := &OpenAPIV3Config{ - Info: config.Info, - IgnorePrefixes: config.IgnorePrefixes, - GetDefinitions: config.GetDefinitions, - GetOperationIDAndTags: config.GetOperationIDAndTags, - GetOperationIDAndTagsFromRoute: config.GetOperationIDAndTagsFromRoute, - GetDefinitionName: config.GetDefinitionName, - Definitions: config.Definitions, - SecuritySchemes: make(spec3.SecuritySchemes), - DefaultSecurity: config.DefaultSecurity, - DefaultResponse: openapiconv.ConvertResponse(config.DefaultResponse, []string{"application/json"}), - - CommonResponses: make(map[int]*spec3.Response), - ResponseDefinitions: make(map[string]*spec3.Response), - } - - if config.SecurityDefinitions != nil { - for s, securityScheme := range *config.SecurityDefinitions { - v3Config.SecuritySchemes[s] = openapiconv.ConvertSecurityScheme(securityScheme) - } - } - for k, commonResponse := range config.CommonResponses { - v3Config.CommonResponses[k] = openapiconv.ConvertResponse(&commonResponse, []string{"application/json"}) - } - - for k, responseDefinition := range config.ResponseDefinitions { - v3Config.ResponseDefinitions[k] = openapiconv.ConvertResponse(&responseDefinition, []string{"application/json"}) - } - return v3Config -} - -type typeInfo struct { - name string - format string - zero interface{} -} - -var schemaTypeFormatMap = map[string]typeInfo{ - "uint": {"integer", "int32", 0.}, - "uint8": {"integer", "byte", 0.}, - "uint16": {"integer", "int32", 0.}, - "uint32": {"integer", "int64", 0.}, - "uint64": {"integer", "int64", 0.}, - "int": {"integer", "int32", 0.}, - "int8": {"integer", "byte", 0.}, - "int16": {"integer", "int32", 0.}, - "int32": {"integer", "int32", 0.}, - "int64": {"integer", "int64", 0.}, - "byte": {"integer", "byte", 0}, - "float64": {"number", "double", 0.}, - "float32": {"number", "float", 0.}, - "bool": {"boolean", "", false}, - "time.Time": {"string", "date-time", ""}, - "string": {"string", "", ""}, - "integer": {"integer", "", 0.}, - "number": {"number", "", 0.}, - "boolean": {"boolean", "", false}, - "[]byte": {"string", "byte", ""}, // base64 encoded characters - "interface{}": {"object", "", interface{}(nil)}, -} - -// This function is a reference for converting go (or any custom type) to a simple open API type,format pair. There are -// two ways to customize spec for a type. If you add it here, a type will be converted to a simple type and the type -// comment (the comment that is added before type definition) will be lost. The spec will still have the property -// comment. The second way is to implement OpenAPIDefinitionGetter interface. That function can customize the spec (so -// the spec does not need to be simple type,format) or can even return a simple type,format (e.g. IntOrString). For simple -// type formats, the benefit of adding OpenAPIDefinitionGetter interface is to keep both type and property documentation. -// Example: -// type Sample struct { -// ... -// // port of the server -// port IntOrString -// ... -// } -// // IntOrString documentation... -// type IntOrString { ... } -// -// Adding IntOrString to this function: -// "port" : { -// format: "string", -// type: "int-or-string", -// Description: "port of the server" -// } -// -// Implement OpenAPIDefinitionGetter for IntOrString: -// -// "port" : { -// $Ref: "#/definitions/IntOrString" -// Description: "port of the server" -// } -// ... -// definitions: -// { -// "IntOrString": { -// format: "string", -// type: "int-or-string", -// Description: "IntOrString documentation..." // new -// } -// } -// -func OpenAPITypeFormat(typeName string) (string, string) { - mapped, ok := schemaTypeFormatMap[typeName] - if !ok { - return "", "" - } - return mapped.name, mapped.format -} - -// Returns the zero-value for the given type along with true if the type -// could be found. -func OpenAPIZeroValue(typeName string) (interface{}, bool) { - mapped, ok := schemaTypeFormatMap[typeName] - if !ok { - return nil, false - } - return mapped.zero, true -} - -func EscapeJsonPointer(p string) string { - // Escaping reference name using rfc6901 - p = strings.Replace(p, "~", "~0", -1) - p = strings.Replace(p, "/", "~1", -1) - return p -} - -func EmbedOpenAPIDefinitionIntoV2Extension(main OpenAPIDefinition, embedded OpenAPIDefinition) OpenAPIDefinition { - if main.Schema.Extensions == nil { - main.Schema.Extensions = make(map[string]interface{}) - } - main.Schema.Extensions[ExtensionV2Schema] = embedded.Schema - return main -} - -// GenerateOpenAPIV3OneOfSchema generate the set of schemas that MUST be assigned to SchemaProps.OneOf -func GenerateOpenAPIV3OneOfSchema(types []string) (oneOf []spec.Schema) { - for _, t := range types { - oneOf = append(oneOf, spec.Schema{SchemaProps: spec.SchemaProps{Type: []string{t}}}) - } - return -} diff --git a/src/vendor/k8s.io/kube-openapi/pkg/common/doc.go b/src/vendor/k8s.io/kube-openapi/pkg/common/doc.go deleted file mode 100644 index 2ba6d247b..000000000 --- a/src/vendor/k8s.io/kube-openapi/pkg/common/doc.go +++ /dev/null @@ -1,19 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// package common holds shared code and types between open API code -// generator and spec generator. -package common diff --git a/src/vendor/k8s.io/kube-openapi/pkg/common/interfaces.go b/src/vendor/k8s.io/kube-openapi/pkg/common/interfaces.go deleted file mode 100644 index 059fc551b..000000000 --- a/src/vendor/k8s.io/kube-openapi/pkg/common/interfaces.go +++ /dev/null @@ -1,88 +0,0 @@ -package common - -// RouteContainer is the entrypoint for a service, which may contain multiple -// routes under a common path with a common set of path parameters. -type RouteContainer interface { - // RootPath is the path that all contained routes are nested under. - RootPath() string - // PathParameters are common parameters defined in the root path. - PathParameters() []Parameter - // Routes are all routes exposed under the root path. - Routes() []Route -} - -// Route is a logical endpoint of a service. -type Route interface { - // Method defines the HTTP Method. - Method() string - // Path defines the route's endpoint. - Path() string - // OperationName defines a machine-readable ID for the route. - OperationName() string - // Parameters defines the list of accepted parameters. - Parameters() []Parameter - // Description is a human-readable route description. - Description() string - // Consumes defines the consumed content-types. - Consumes() []string - // Produces defines the produced content-types. - Produces() []string - // Metadata allows adding extensions to the generated spec. - Metadata() map[string]interface{} - // RequestPayloadSample defines an example request payload. Can return nil. - RequestPayloadSample() interface{} - // ResponsePayloadSample defines an example response payload. Can return nil. - ResponsePayloadSample() interface{} - // StatusCodeResponses defines a mapping of HTTP Status Codes to the specific response(s). - // Multiple responses with the same HTTP Status Code are acceptable. - StatusCodeResponses() []StatusCodeResponse -} - -// StatusCodeResponse is an explicit response type with an HTTP Status Code. -type StatusCodeResponse interface { - // Code defines the HTTP Status Code. - Code() int - // Message returns the human-readable message. - Message() string - // Model defines an example payload for this response. - Model() interface{} -} - -// Parameter is a Route parameter. -type Parameter interface { - // Name defines the unique-per-route identifier. - Name() string - // Description is the human-readable description of the param. - Description() string - // Required defines if this parameter must be provided. - Required() bool - // Kind defines the type of the parameter itself. - Kind() ParameterKind - // DataType defines the type of data the parameter carries. - DataType() string - // AllowMultiple defines if more than one value can be supplied for the parameter. - AllowMultiple() bool -} - -// ParameterKind is an enum of route parameter types. -type ParameterKind int - -const ( - // PathParameterKind indicates the request parameter type is "path". - PathParameterKind = ParameterKind(iota) - - // QueryParameterKind indicates the request parameter type is "query". - QueryParameterKind - - // BodyParameterKind indicates the request parameter type is "body". - BodyParameterKind - - // HeaderParameterKind indicates the request parameter type is "header". - HeaderParameterKind - - // FormParameterKind indicates the request parameter type is "form". - FormParameterKind - - // UnknownParameterKind indicates the request parameter type has not been specified. - UnknownParameterKind -) diff --git a/src/vendor/k8s.io/kube-openapi/pkg/handler3/handler.go b/src/vendor/k8s.io/kube-openapi/pkg/handler3/handler.go deleted file mode 100644 index ec4adcdec..000000000 --- a/src/vendor/k8s.io/kube-openapi/pkg/handler3/handler.go +++ /dev/null @@ -1,291 +0,0 @@ -/* -Copyright 2021 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package handler3 - -import ( - "bytes" - "crypto/sha512" - "encoding/json" - "fmt" - "mime" - "net/http" - "net/url" - "path" - "sort" - "strconv" - "strings" - "sync" - "time" - - "github.com/golang/protobuf/proto" - openapi_v3 "github.com/google/gnostic/openapiv3" - "github.com/munnerz/goautoneg" - "k8s.io/kube-openapi/pkg/common" - "k8s.io/kube-openapi/pkg/internal/handler" - "k8s.io/kube-openapi/pkg/spec3" - "k8s.io/kube-openapi/pkg/validation/spec" -) - -const ( - jsonExt = ".json" - - mimeJson = "application/json" - // TODO(mehdy): change @68f4ded to a version tag when gnostic add version tags. - mimePb = "application/com.github.googleapis.gnostic.OpenAPIv3@68f4ded+protobuf" - mimePbGz = "application/x-gzip" - - subTypeProtobuf = "com.github.proto-openapi.spec.v3@v1.0+protobuf" - subTypeJSON = "json" -) - -// OpenAPIV3Discovery is the format of the Discovery document for OpenAPI V3 -// It maps Discovery paths to their corresponding URLs with a hash parameter included -type OpenAPIV3Discovery struct { - Paths map[string]OpenAPIV3DiscoveryGroupVersion `json:"paths"` -} - -// OpenAPIV3DiscoveryGroupVersion includes information about a group version and URL -// for accessing the OpenAPI. The URL includes a hash parameter to support client side caching -type OpenAPIV3DiscoveryGroupVersion struct { - // Path is an absolute path of an OpenAPI V3 document in the form of /openapi/v3/apis/apps/v1?hash=014fbff9a07c - ServerRelativeURL string `json:"serverRelativeURL"` -} - -// OpenAPIService is the service responsible for serving OpenAPI spec. It has -// the ability to safely change the spec while serving it. -type OpenAPIService struct { - // rwMutex protects All members of this service. - rwMutex sync.RWMutex - lastModified time.Time - v3Schema map[string]*OpenAPIV3Group -} - -type OpenAPIV3Group struct { - rwMutex sync.RWMutex - - lastModified time.Time - - pbCache handler.HandlerCache - jsonCache handler.HandlerCache - etagCache handler.HandlerCache -} - -func init() { - mime.AddExtensionType(".json", mimeJson) - mime.AddExtensionType(".pb-v1", mimePb) - mime.AddExtensionType(".gz", mimePbGz) -} - -func computeETag(data []byte) string { - if data == nil { - return "" - } - return fmt.Sprintf("%X", sha512.Sum512(data)) -} - -func constructServerRelativeURL(gvString, etag string) string { - u := url.URL{Path: path.Join("/openapi/v3", gvString)} - query := url.Values{} - query.Set("hash", etag) - u.RawQuery = query.Encode() - return u.String() -} - -// NewOpenAPIService builds an OpenAPIService starting with the given spec. -func NewOpenAPIService(spec *spec.Swagger) (*OpenAPIService, error) { - o := &OpenAPIService{} - o.v3Schema = make(map[string]*OpenAPIV3Group) - return o, nil -} - -func (o *OpenAPIService) getGroupBytes() ([]byte, error) { - o.rwMutex.RLock() - defer o.rwMutex.RUnlock() - keys := make([]string, len(o.v3Schema)) - i := 0 - for k := range o.v3Schema { - keys[i] = k - i++ - } - - sort.Strings(keys) - discovery := &OpenAPIV3Discovery{Paths: make(map[string]OpenAPIV3DiscoveryGroupVersion)} - for gvString, groupVersion := range o.v3Schema { - etagBytes, err := groupVersion.etagCache.Get() - if err != nil { - return nil, err - } - discovery.Paths[gvString] = OpenAPIV3DiscoveryGroupVersion{ - ServerRelativeURL: constructServerRelativeURL(gvString, string(etagBytes)), - } - } - j, err := json.Marshal(discovery) - if err != nil { - return nil, err - } - return j, nil -} - -func (o *OpenAPIService) getSingleGroupBytes(getType string, group string) ([]byte, string, time.Time, error) { - o.rwMutex.RLock() - defer o.rwMutex.RUnlock() - v, ok := o.v3Schema[group] - if !ok { - return nil, "", time.Now(), fmt.Errorf("Cannot find CRD group %s", group) - } - if getType == subTypeJSON { - specBytes, err := v.jsonCache.Get() - if err != nil { - return nil, "", v.lastModified, err - } - etagBytes, err := v.etagCache.Get() - return specBytes, string(etagBytes), v.lastModified, err - } else if getType == subTypeProtobuf { - specPb, err := v.pbCache.Get() - if err != nil { - return nil, "", v.lastModified, err - } - etagBytes, err := v.etagCache.Get() - return specPb, string(etagBytes), v.lastModified, err - } - return nil, "", time.Now(), fmt.Errorf("Invalid accept clause %s", getType) -} - -func (o *OpenAPIService) UpdateGroupVersion(group string, openapi *spec3.OpenAPI) (err error) { - o.rwMutex.Lock() - defer o.rwMutex.Unlock() - - if _, ok := o.v3Schema[group]; !ok { - o.v3Schema[group] = &OpenAPIV3Group{} - } - return o.v3Schema[group].UpdateSpec(openapi) -} - -func (o *OpenAPIService) DeleteGroupVersion(group string) { - o.rwMutex.Lock() - defer o.rwMutex.Unlock() - delete(o.v3Schema, group) -} - -func ToV3ProtoBinary(json []byte) ([]byte, error) { - document, err := openapi_v3.ParseDocument(json) - if err != nil { - return nil, err - } - return proto.Marshal(document) -} - -func (o *OpenAPIService) HandleDiscovery(w http.ResponseWriter, r *http.Request) { - data, _ := o.getGroupBytes() - http.ServeContent(w, r, "/openapi/v3", time.Now(), bytes.NewReader(data)) -} - -func (o *OpenAPIService) HandleGroupVersion(w http.ResponseWriter, r *http.Request) { - url := strings.SplitAfterN(r.URL.Path, "/", 4) - group := url[3] - - decipherableFormats := r.Header.Get("Accept") - if decipherableFormats == "" { - decipherableFormats = "*/*" - } - clauses := goautoneg.ParseAccept(decipherableFormats) - w.Header().Add("Vary", "Accept") - - if len(clauses) == 0 { - return - } - - accepted := []struct { - Type string - SubType string - }{ - {"application", subTypeJSON}, - {"application", subTypeProtobuf}, - } - - for _, clause := range clauses { - for _, accepts := range accepted { - if clause.Type != accepts.Type && clause.Type != "*" { - continue - } - if clause.SubType != accepts.SubType && clause.SubType != "*" { - continue - } - data, etag, lastModified, err := o.getSingleGroupBytes(accepts.SubType, group) - if err != nil { - return - } - // ETag must be enclosed in double quotes: https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/ETag - w.Header().Set("Etag", strconv.Quote(etag)) - - if hash := r.URL.Query().Get("hash"); hash != "" { - if hash != etag { - u := constructServerRelativeURL(group, etag) - http.Redirect(w, r, u, 301) - return - } - // The Vary header is required because the Accept header can - // change the contents returned. This prevents clients from caching - // protobuf as JSON and vice versa. - w.Header().Set("Vary", "Accept") - - // Only set these headers when a hash is given. - w.Header().Set("Cache-Control", "public, immutable") - // Set the Expires directive to the maximum value of one year from the request, - // effectively indicating that the cache never expires. - w.Header().Set("Expires", time.Now().AddDate(1, 0, 0).Format(time.RFC1123)) - } - http.ServeContent(w, r, "", lastModified, bytes.NewReader(data)) - return - } - } - w.WriteHeader(406) - return -} - -func (o *OpenAPIService) RegisterOpenAPIV3VersionedService(servePath string, handler common.PathHandlerByGroupVersion) error { - handler.Handle(servePath, http.HandlerFunc(o.HandleDiscovery)) - handler.HandlePrefix(servePath+"/", http.HandlerFunc(o.HandleGroupVersion)) - return nil -} - -func (o *OpenAPIV3Group) UpdateSpec(openapi *spec3.OpenAPI) (err error) { - o.rwMutex.Lock() - defer o.rwMutex.Unlock() - - o.jsonCache = o.jsonCache.New(func() ([]byte, error) { - return json.Marshal(openapi) - }) - o.pbCache = o.pbCache.New(func() ([]byte, error) { - json, err := o.jsonCache.Get() - if err != nil { - return nil, err - } - return ToV3ProtoBinary(json) - }) - // TODO: This forces a json marshal of corresponding group-versions. - // We should look to replace this with a faster hashing mechanism. - o.etagCache = o.etagCache.New(func() ([]byte, error) { - json, err := o.jsonCache.Get() - if err != nil { - return nil, err - } - return []byte(computeETag(json)), nil - }) - o.lastModified = time.Now() - return nil -} diff --git a/src/vendor/k8s.io/kube-openapi/pkg/internal/handler/handler_cache.go b/src/vendor/k8s.io/kube-openapi/pkg/internal/handler/handler_cache.go deleted file mode 100644 index e128c26eb..000000000 --- a/src/vendor/k8s.io/kube-openapi/pkg/internal/handler/handler_cache.go +++ /dev/null @@ -1,57 +0,0 @@ -/* -Copyright 2021 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package handler - -import ( - "sync" -) - -// HandlerCache represents a lazy cache for generating a byte array -// It is used to lazily marshal OpenAPI v2/v3 and lazily generate the ETag -type HandlerCache struct { - BuildCache func() ([]byte, error) - once sync.Once - bytes []byte - err error -} - -// Get either returns the cached value or calls BuildCache() once before caching and returning -// its results. If BuildCache returns an error, the last valid value for the cache (from prior -// calls to New()) is used instead if possible. -func (c *HandlerCache) Get() ([]byte, error) { - c.once.Do(func() { - bytes, err := c.BuildCache() - // if there is an error updating the cache, there can be situations where - // c.bytes contains a valid value (carried over from the previous update) - // but c.err is also not nil; the cache user is expected to check for this - c.err = err - if c.err == nil { - // don't override previous spec if we had an error - c.bytes = bytes - } - }) - return c.bytes, c.err -} - -// New creates a new HandlerCache for situations where a cache refresh is needed. -// This function is not thread-safe and should not be called at the same time as Get(). -func (c *HandlerCache) New(cacheBuilder func() ([]byte, error)) HandlerCache { - return HandlerCache{ - bytes: c.bytes, - BuildCache: cacheBuilder, - } -} diff --git a/src/vendor/k8s.io/kube-openapi/pkg/openapiconv/convert.go b/src/vendor/k8s.io/kube-openapi/pkg/openapiconv/convert.go deleted file mode 100644 index e993fe23d..000000000 --- a/src/vendor/k8s.io/kube-openapi/pkg/openapiconv/convert.go +++ /dev/null @@ -1,322 +0,0 @@ -/* -Copyright 2022 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package openapiconv - -import ( - "strings" - - klog "k8s.io/klog/v2" - builderutil "k8s.io/kube-openapi/pkg/builder3/util" - "k8s.io/kube-openapi/pkg/spec3" - "k8s.io/kube-openapi/pkg/validation/spec" -) - -var OpenAPIV2DefPrefix = "#/definitions/" -var OpenAPIV3DefPrefix = "#/components/schemas/" - -// ConvertV2ToV3 converts an OpenAPI V2 object into V3. -// Certain references may be shared between the V2 and V3 objects in the conversion. -func ConvertV2ToV3(v2Spec *spec.Swagger) *spec3.OpenAPI { - v3Spec := &spec3.OpenAPI{ - Version: "3.0.0", - Info: v2Spec.Info, - ExternalDocs: ConvertExternalDocumentation(v2Spec.ExternalDocs), - Paths: ConvertPaths(v2Spec.Paths), - Components: ConvertComponents(v2Spec.SecurityDefinitions, v2Spec.Definitions, v2Spec.Responses, v2Spec.Produces), - } - - return v3Spec -} - -func ConvertExternalDocumentation(v2ED *spec.ExternalDocumentation) *spec3.ExternalDocumentation { - if v2ED == nil { - return nil - } - return &spec3.ExternalDocumentation{ - ExternalDocumentationProps: spec3.ExternalDocumentationProps{ - Description: v2ED.Description, - URL: v2ED.URL, - }, - } -} - -func ConvertComponents(v2SecurityDefinitions spec.SecurityDefinitions, v2Definitions spec.Definitions, v2Responses map[string]spec.Response, produces []string) *spec3.Components { - components := &spec3.Components{} - - if v2Definitions != nil { - components.Schemas = make(map[string]*spec.Schema) - } - for s, schema := range v2Definitions { - components.Schemas[s] = ConvertSchema(&schema) - } - if v2SecurityDefinitions != nil { - components.SecuritySchemes = make(spec3.SecuritySchemes) - } - for s, securityScheme := range v2SecurityDefinitions { - components.SecuritySchemes[s] = ConvertSecurityScheme(securityScheme) - } - if v2Responses != nil { - components.Responses = make(map[string]*spec3.Response) - } - for r, response := range v2Responses { - components.Responses[r] = ConvertResponse(&response, produces) - } - - return components -} - -func ConvertSchema(v2Schema *spec.Schema) *spec.Schema { - if v2Schema == nil { - return nil - } - v3Schema := spec.Schema{ - VendorExtensible: v2Schema.VendorExtensible, - SchemaProps: v2Schema.SchemaProps, - SwaggerSchemaProps: v2Schema.SwaggerSchemaProps, - ExtraProps: v2Schema.ExtraProps, - } - - if refString := v2Schema.Ref.String(); refString != "" { - if idx := strings.Index(refString, OpenAPIV2DefPrefix); idx != -1 { - v3Schema.Ref = spec.MustCreateRef(OpenAPIV3DefPrefix + refString[idx+len(OpenAPIV2DefPrefix):]) - } else { - klog.Errorf("Error: Swagger V2 Ref %s does not contain #/definitions\n", refString) - } - } - - if v2Schema.Properties != nil { - v3Schema.Properties = make(map[string]spec.Schema) - for key, property := range v2Schema.Properties { - v3Schema.Properties[key] = *ConvertSchema(&property) - } - } - if v2Schema.Items != nil { - v3Schema.Items = &spec.SchemaOrArray{ - Schema: ConvertSchema(v2Schema.Items.Schema), - Schemas: ConvertSchemaList(v2Schema.Items.Schemas), - } - } - - if v2Schema.AdditionalProperties != nil { - v3Schema.AdditionalProperties = &spec.SchemaOrBool{ - Schema: ConvertSchema(v2Schema.AdditionalProperties.Schema), - Allows: v2Schema.AdditionalProperties.Allows, - } - } - if v2Schema.AdditionalItems != nil { - v3Schema.AdditionalItems = &spec.SchemaOrBool{ - Schema: ConvertSchema(v2Schema.AdditionalItems.Schema), - Allows: v2Schema.AdditionalItems.Allows, - } - } - - return builderutil.WrapRefs(&v3Schema) -} - -func ConvertSchemaList(v2SchemaList []spec.Schema) []spec.Schema { - if v2SchemaList == nil { - return nil - } - v3SchemaList := []spec.Schema{} - for _, s := range v2SchemaList { - v3SchemaList = append(v3SchemaList, *ConvertSchema(&s)) - } - return v3SchemaList -} - -func ConvertSecurityScheme(v2securityScheme *spec.SecurityScheme) *spec3.SecurityScheme { - if v2securityScheme == nil { - return nil - } - securityScheme := &spec3.SecurityScheme{ - VendorExtensible: v2securityScheme.VendorExtensible, - SecuritySchemeProps: spec3.SecuritySchemeProps{ - Description: v2securityScheme.Description, - Type: v2securityScheme.Type, - Name: v2securityScheme.Name, - In: v2securityScheme.In, - }, - } - - if v2securityScheme.Flow != "" { - securityScheme.Flows = make(map[string]*spec3.OAuthFlow) - securityScheme.Flows[v2securityScheme.Flow] = &spec3.OAuthFlow{ - OAuthFlowProps: spec3.OAuthFlowProps{ - AuthorizationUrl: v2securityScheme.AuthorizationURL, - TokenUrl: v2securityScheme.TokenURL, - Scopes: v2securityScheme.Scopes, - }, - } - } - return securityScheme -} - -func ConvertPaths(v2Paths *spec.Paths) *spec3.Paths { - if v2Paths == nil { - return nil - } - paths := &spec3.Paths{ - VendorExtensible: v2Paths.VendorExtensible, - } - - if v2Paths.Paths != nil { - paths.Paths = make(map[string]*spec3.Path) - } - for k, v := range v2Paths.Paths { - paths.Paths[k] = ConvertPathItem(v) - } - return paths -} - -func ConvertPathItem(v2pathItem spec.PathItem) *spec3.Path { - path := &spec3.Path{ - Refable: v2pathItem.Refable, - PathProps: spec3.PathProps{ - Get: ConvertOperation(v2pathItem.Get), - Put: ConvertOperation(v2pathItem.Put), - Post: ConvertOperation(v2pathItem.Post), - Delete: ConvertOperation(v2pathItem.Delete), - Options: ConvertOperation(v2pathItem.Options), - Head: ConvertOperation(v2pathItem.Head), - Patch: ConvertOperation(v2pathItem.Patch), - }, - VendorExtensible: v2pathItem.VendorExtensible, - } - for _, param := range v2pathItem.Parameters { - path.Parameters = append(path.Parameters, ConvertParameter(param)) - } - return path -} - -func ConvertOperation(v2Operation *spec.Operation) *spec3.Operation { - if v2Operation == nil { - return nil - } - operation := &spec3.Operation{ - VendorExtensible: v2Operation.VendorExtensible, - OperationProps: spec3.OperationProps{ - Description: v2Operation.Description, - ExternalDocs: ConvertExternalDocumentation(v2Operation.OperationProps.ExternalDocs), - Tags: v2Operation.Tags, - Summary: v2Operation.Summary, - Deprecated: v2Operation.Deprecated, - OperationId: v2Operation.ID, - }, - } - - for _, param := range v2Operation.Parameters { - if param.ParamProps.Name == "body" && param.ParamProps.Schema != nil { - operation.OperationProps.RequestBody = &spec3.RequestBody{ - RequestBodyProps: spec3.RequestBodyProps{}, - } - if v2Operation.Consumes != nil { - operation.RequestBody.Content = make(map[string]*spec3.MediaType) - } - for _, consumer := range v2Operation.Consumes { - operation.RequestBody.Content[consumer] = &spec3.MediaType{ - MediaTypeProps: spec3.MediaTypeProps{ - Schema: ConvertSchema(param.ParamProps.Schema), - }, - } - } - } else { - operation.Parameters = append(operation.Parameters, ConvertParameter(param)) - } - } - - operation.Responses = &spec3.Responses{ResponsesProps: spec3.ResponsesProps{ - Default: ConvertResponse(v2Operation.Responses.Default, v2Operation.Produces), - }, - VendorExtensible: v2Operation.Responses.VendorExtensible, - } - - if v2Operation.Responses.StatusCodeResponses != nil { - operation.Responses.StatusCodeResponses = make(map[int]*spec3.Response) - } - for k, v := range v2Operation.Responses.StatusCodeResponses { - operation.Responses.StatusCodeResponses[k] = ConvertResponse(&v, v2Operation.Produces) - } - return operation -} - -func ConvertResponse(v2Response *spec.Response, produces []string) *spec3.Response { - if v2Response == nil { - return nil - } - response := &spec3.Response{ - Refable: ConvertRefableResponse(v2Response.Refable), - VendorExtensible: v2Response.VendorExtensible, - ResponseProps: spec3.ResponseProps{ - Description: v2Response.Description, - }, - } - - if v2Response.Schema != nil { - if produces != nil { - response.Content = make(map[string]*spec3.MediaType) - } - for _, producer := range produces { - response.ResponseProps.Content[producer] = &spec3.MediaType{ - MediaTypeProps: spec3.MediaTypeProps{ - Schema: ConvertSchema(v2Response.Schema), - }, - } - } - } - return response -} - -func ConvertParameter(v2Param spec.Parameter) *spec3.Parameter { - param := &spec3.Parameter{ - Refable: ConvertRefableParameter(v2Param.Refable), - VendorExtensible: v2Param.VendorExtensible, - ParameterProps: spec3.ParameterProps{ - Name: v2Param.Name, - Description: v2Param.Description, - In: v2Param.In, - Required: v2Param.Required, - Schema: ConvertSchema(v2Param.Schema), - AllowEmptyValue: v2Param.AllowEmptyValue, - }, - } - // Convert SimpleSchema into Schema - if param.Schema == nil { - param.Schema = &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Type: []string{v2Param.Type}, - Format: v2Param.Format, - UniqueItems: v2Param.UniqueItems, - }, - } - } - - return param -} - -func ConvertRefableParameter(refable spec.Refable) spec.Refable { - if refable.Ref.String() != "" { - return spec.Refable{Ref: spec.MustCreateRef(strings.Replace(refable.Ref.String(), "#/parameters/", "#/components/parameters/", 1))} - } - return refable -} - -func ConvertRefableResponse(refable spec.Refable) spec.Refable { - if refable.Ref.String() != "" { - return spec.Refable{Ref: spec.MustCreateRef(strings.Replace(refable.Ref.String(), "#/responses/", "#/components/responses/", 1))} - } - return refable -} diff --git a/src/vendor/k8s.io/kube-openapi/pkg/schemamutation/walker.go b/src/vendor/k8s.io/kube-openapi/pkg/schemamutation/walker.go deleted file mode 100644 index 3fac658e3..000000000 --- a/src/vendor/k8s.io/kube-openapi/pkg/schemamutation/walker.go +++ /dev/null @@ -1,519 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package schemamutation - -import ( - "k8s.io/kube-openapi/pkg/validation/spec" -) - -// Walker runs callback functions on all references of an OpenAPI spec, -// replacing the values when visiting corresponding types. -type Walker struct { - // SchemaCallback will be called on each schema, taking the original schema, - // and before any other callbacks of the Walker. - // If the schema needs to be mutated, DO NOT mutate it in-place, - // always create a copy, mutate, and return it. - SchemaCallback func(schema *spec.Schema) *spec.Schema - - // RefCallback will be called on each ref. - // If the ref needs to be mutated, DO NOT mutate it in-place, - // always create a copy, mutate, and return it. - RefCallback func(ref *spec.Ref) *spec.Ref -} - -type SchemaCallbackFunc func(schema *spec.Schema) *spec.Schema -type RefCallbackFunc func(ref *spec.Ref) *spec.Ref - -var SchemaCallBackNoop SchemaCallbackFunc = func(schema *spec.Schema) *spec.Schema { - return schema -} -var RefCallbackNoop RefCallbackFunc = func(ref *spec.Ref) *spec.Ref { - return ref -} - -// ReplaceReferences rewrites the references without mutating the input. -// The output might share data with the input. -func ReplaceReferences(walkRef func(ref *spec.Ref) *spec.Ref, sp *spec.Swagger) *spec.Swagger { - walker := &Walker{RefCallback: walkRef, SchemaCallback: SchemaCallBackNoop} - return walker.WalkRoot(sp) -} - -func (w *Walker) WalkSchema(schema *spec.Schema) *spec.Schema { - if schema == nil { - return nil - } - - orig := schema - clone := func() { - if orig == schema { - schema = &spec.Schema{} - *schema = *orig - } - } - - // Always run callback on the whole schema first - // so that SchemaCallback can take the original schema as input. - schema = w.SchemaCallback(schema) - - if r := w.RefCallback(&schema.Ref); r != &schema.Ref { - clone() - schema.Ref = *r - } - - definitionsCloned := false - for k, v := range schema.Definitions { - if s := w.WalkSchema(&v); s != &v { - if !definitionsCloned { - definitionsCloned = true - clone() - schema.Definitions = make(spec.Definitions, len(orig.Definitions)) - for k2, v2 := range orig.Definitions { - schema.Definitions[k2] = v2 - } - } - schema.Definitions[k] = *s - } - } - - propertiesCloned := false - for k, v := range schema.Properties { - if s := w.WalkSchema(&v); s != &v { - if !propertiesCloned { - propertiesCloned = true - clone() - schema.Properties = make(map[string]spec.Schema, len(orig.Properties)) - for k2, v2 := range orig.Properties { - schema.Properties[k2] = v2 - } - } - schema.Properties[k] = *s - } - } - - patternPropertiesCloned := false - for k, v := range schema.PatternProperties { - if s := w.WalkSchema(&v); s != &v { - if !patternPropertiesCloned { - patternPropertiesCloned = true - clone() - schema.PatternProperties = make(map[string]spec.Schema, len(orig.PatternProperties)) - for k2, v2 := range orig.PatternProperties { - schema.PatternProperties[k2] = v2 - } - } - schema.PatternProperties[k] = *s - } - } - - allOfCloned := false - for i := range schema.AllOf { - if s := w.WalkSchema(&schema.AllOf[i]); s != &schema.AllOf[i] { - if !allOfCloned { - allOfCloned = true - clone() - schema.AllOf = make([]spec.Schema, len(orig.AllOf)) - copy(schema.AllOf, orig.AllOf) - } - schema.AllOf[i] = *s - } - } - - anyOfCloned := false - for i := range schema.AnyOf { - if s := w.WalkSchema(&schema.AnyOf[i]); s != &schema.AnyOf[i] { - if !anyOfCloned { - anyOfCloned = true - clone() - schema.AnyOf = make([]spec.Schema, len(orig.AnyOf)) - copy(schema.AnyOf, orig.AnyOf) - } - schema.AnyOf[i] = *s - } - } - - oneOfCloned := false - for i := range schema.OneOf { - if s := w.WalkSchema(&schema.OneOf[i]); s != &schema.OneOf[i] { - if !oneOfCloned { - oneOfCloned = true - clone() - schema.OneOf = make([]spec.Schema, len(orig.OneOf)) - copy(schema.OneOf, orig.OneOf) - } - schema.OneOf[i] = *s - } - } - - if schema.Not != nil { - if s := w.WalkSchema(schema.Not); s != schema.Not { - clone() - schema.Not = s - } - } - - if schema.AdditionalProperties != nil && schema.AdditionalProperties.Schema != nil { - if s := w.WalkSchema(schema.AdditionalProperties.Schema); s != schema.AdditionalProperties.Schema { - clone() - schema.AdditionalProperties = &spec.SchemaOrBool{Schema: s, Allows: schema.AdditionalProperties.Allows} - } - } - - if schema.AdditionalItems != nil && schema.AdditionalItems.Schema != nil { - if s := w.WalkSchema(schema.AdditionalItems.Schema); s != schema.AdditionalItems.Schema { - clone() - schema.AdditionalItems = &spec.SchemaOrBool{Schema: s, Allows: schema.AdditionalItems.Allows} - } - } - - if schema.Items != nil { - if schema.Items.Schema != nil { - if s := w.WalkSchema(schema.Items.Schema); s != schema.Items.Schema { - clone() - schema.Items = &spec.SchemaOrArray{Schema: s} - } - } else { - itemsCloned := false - for i := range schema.Items.Schemas { - if s := w.WalkSchema(&schema.Items.Schemas[i]); s != &schema.Items.Schemas[i] { - if !itemsCloned { - clone() - schema.Items = &spec.SchemaOrArray{ - Schemas: make([]spec.Schema, len(orig.Items.Schemas)), - } - itemsCloned = true - copy(schema.Items.Schemas, orig.Items.Schemas) - } - schema.Items.Schemas[i] = *s - } - } - } - } - - return schema -} - -func (w *Walker) walkParameter(param *spec.Parameter) *spec.Parameter { - if param == nil { - return nil - } - - orig := param - cloned := false - clone := func() { - if !cloned { - cloned = true - param = &spec.Parameter{} - *param = *orig - } - } - - if r := w.RefCallback(¶m.Ref); r != ¶m.Ref { - clone() - param.Ref = *r - } - if s := w.WalkSchema(param.Schema); s != param.Schema { - clone() - param.Schema = s - } - if param.Items != nil { - if r := w.RefCallback(¶m.Items.Ref); r != ¶m.Items.Ref { - param.Items.Ref = *r - } - } - - return param -} - -func (w *Walker) walkParameters(params []spec.Parameter) ([]spec.Parameter, bool) { - if params == nil { - return nil, false - } - - orig := params - cloned := false - clone := func() { - if !cloned { - cloned = true - params = make([]spec.Parameter, len(params)) - copy(params, orig) - } - } - - for i := range params { - if s := w.walkParameter(¶ms[i]); s != ¶ms[i] { - clone() - params[i] = *s - } - } - - return params, cloned -} - -func (w *Walker) walkResponse(resp *spec.Response) *spec.Response { - if resp == nil { - return nil - } - - orig := resp - cloned := false - clone := func() { - if !cloned { - cloned = true - resp = &spec.Response{} - *resp = *orig - } - } - - if r := w.RefCallback(&resp.Ref); r != &resp.Ref { - clone() - resp.Ref = *r - } - if s := w.WalkSchema(resp.Schema); s != resp.Schema { - clone() - resp.Schema = s - } - - return resp -} - -func (w *Walker) walkResponses(resps *spec.Responses) *spec.Responses { - if resps == nil { - return nil - } - - orig := resps - cloned := false - clone := func() { - if !cloned { - cloned = true - resps = &spec.Responses{} - *resps = *orig - } - } - - if r := w.walkResponse(resps.ResponsesProps.Default); r != resps.ResponsesProps.Default { - clone() - resps.Default = r - } - - responsesCloned := false - for k, v := range resps.ResponsesProps.StatusCodeResponses { - if r := w.walkResponse(&v); r != &v { - if !responsesCloned { - responsesCloned = true - clone() - resps.ResponsesProps.StatusCodeResponses = make(map[int]spec.Response, len(orig.StatusCodeResponses)) - for k2, v2 := range orig.StatusCodeResponses { - resps.ResponsesProps.StatusCodeResponses[k2] = v2 - } - } - resps.ResponsesProps.StatusCodeResponses[k] = *r - } - } - - return resps -} - -func (w *Walker) walkOperation(op *spec.Operation) *spec.Operation { - if op == nil { - return nil - } - - orig := op - cloned := false - clone := func() { - if !cloned { - cloned = true - op = &spec.Operation{} - *op = *orig - } - } - - parametersCloned := false - for i := range op.Parameters { - if s := w.walkParameter(&op.Parameters[i]); s != &op.Parameters[i] { - if !parametersCloned { - parametersCloned = true - clone() - op.Parameters = make([]spec.Parameter, len(orig.Parameters)) - copy(op.Parameters, orig.Parameters) - } - op.Parameters[i] = *s - } - } - - if r := w.walkResponses(op.Responses); r != op.Responses { - clone() - op.Responses = r - } - - return op -} - -func (w *Walker) walkPathItem(pathItem *spec.PathItem) *spec.PathItem { - if pathItem == nil { - return nil - } - - orig := pathItem - cloned := false - clone := func() { - if !cloned { - cloned = true - pathItem = &spec.PathItem{} - *pathItem = *orig - } - } - - if p, changed := w.walkParameters(pathItem.Parameters); changed { - clone() - pathItem.Parameters = p - } - if op := w.walkOperation(pathItem.Get); op != pathItem.Get { - clone() - pathItem.Get = op - } - if op := w.walkOperation(pathItem.Head); op != pathItem.Head { - clone() - pathItem.Head = op - } - if op := w.walkOperation(pathItem.Delete); op != pathItem.Delete { - clone() - pathItem.Delete = op - } - if op := w.walkOperation(pathItem.Options); op != pathItem.Options { - clone() - pathItem.Options = op - } - if op := w.walkOperation(pathItem.Patch); op != pathItem.Patch { - clone() - pathItem.Patch = op - } - if op := w.walkOperation(pathItem.Post); op != pathItem.Post { - clone() - pathItem.Post = op - } - if op := w.walkOperation(pathItem.Put); op != pathItem.Put { - clone() - pathItem.Put = op - } - - return pathItem -} - -func (w *Walker) walkPaths(paths *spec.Paths) *spec.Paths { - if paths == nil { - return nil - } - - orig := paths - cloned := false - clone := func() { - if !cloned { - cloned = true - paths = &spec.Paths{} - *paths = *orig - } - } - - pathsCloned := false - for k, v := range paths.Paths { - if p := w.walkPathItem(&v); p != &v { - if !pathsCloned { - pathsCloned = true - clone() - paths.Paths = make(map[string]spec.PathItem, len(orig.Paths)) - for k2, v2 := range orig.Paths { - paths.Paths[k2] = v2 - } - } - paths.Paths[k] = *p - } - } - - return paths -} - -func (w *Walker) WalkRoot(swagger *spec.Swagger) *spec.Swagger { - if swagger == nil { - return nil - } - - orig := swagger - cloned := false - clone := func() { - if !cloned { - cloned = true - swagger = &spec.Swagger{} - *swagger = *orig - } - } - - parametersCloned := false - for k, v := range swagger.Parameters { - if p := w.walkParameter(&v); p != &v { - if !parametersCloned { - parametersCloned = true - clone() - swagger.Parameters = make(map[string]spec.Parameter, len(orig.Parameters)) - for k2, v2 := range orig.Parameters { - swagger.Parameters[k2] = v2 - } - } - swagger.Parameters[k] = *p - } - } - - responsesCloned := false - for k, v := range swagger.Responses { - if r := w.walkResponse(&v); r != &v { - if !responsesCloned { - responsesCloned = true - clone() - swagger.Responses = make(map[string]spec.Response, len(orig.Responses)) - for k2, v2 := range orig.Responses { - swagger.Responses[k2] = v2 - } - } - swagger.Responses[k] = *r - } - } - - definitionsCloned := false - for k, v := range swagger.Definitions { - if s := w.WalkSchema(&v); s != &v { - if !definitionsCloned { - definitionsCloned = true - clone() - swagger.Definitions = make(spec.Definitions, len(orig.Definitions)) - for k2, v2 := range orig.Definitions { - swagger.Definitions[k2] = v2 - } - } - swagger.Definitions[k] = *s - } - } - - if swagger.Paths != nil { - if p := w.walkPaths(swagger.Paths); p != swagger.Paths { - clone() - swagger.Paths = p - } - } - - return swagger -} diff --git a/src/vendor/k8s.io/kube-openapi/pkg/spec3/component.go b/src/vendor/k8s.io/kube-openapi/pkg/spec3/component.go deleted file mode 100644 index c1bb8bc7b..000000000 --- a/src/vendor/k8s.io/kube-openapi/pkg/spec3/component.go +++ /dev/null @@ -1,47 +0,0 @@ -/* -Copyright 2021 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package spec3 - -import "k8s.io/kube-openapi/pkg/validation/spec" - -// Components holds a set of reusable objects for different aspects of the OAS. -// All objects defined within the components object will have no effect on the API -// unless they are explicitly referenced from properties outside the components object. -// -// more at https://github.com/OAI/OpenAPI-Specification/blob/master/versions/3.0.0.md#componentsObject -type Components struct { - // Schemas holds reusable Schema Objects - Schemas map[string]*spec.Schema `json:"schemas,omitempty"` - // SecuritySchemes holds reusable Security Scheme Objects, more at https://github.com/OAI/OpenAPI-Specification/blob/master/versions/3.0.0.md#securitySchemeObject - SecuritySchemes SecuritySchemes `json:"securitySchemes,omitempty"` - // Responses holds reusable Responses Objects - Responses map[string]*Response `json:"responses,omitempty"` - // Parameters holds reusable Parameters Objects - Parameters map[string]*Parameter `json:"parameters,omitempty"` - // Example holds reusable Example objects - Examples map[string]*Example `json:"examples,omitempty"` - // RequestBodies holds reusable Request Body objects - RequestBodies map[string]*RequestBody `json:"requestBodies,omitempty"` - // Links is a map of operations links that can be followed from the response - Links map[string]*Link `json:"links,omitempty"` - // Headers holds a maps of a headers name to its definition - Headers map[string]*Header `json:"headers,omitempty"` - // all fields are defined at https://github.com/OAI/OpenAPI-Specification/blob/master/versions/3.0.0.md#componentsObject -} - -// SecuritySchemes holds reusable Security Scheme Objects, more at https://github.com/OAI/OpenAPI-Specification/blob/master/versions/3.0.0.md#securitySchemeObject -type SecuritySchemes map[string]*SecurityScheme diff --git a/src/vendor/k8s.io/kube-openapi/pkg/spec3/encoding.go b/src/vendor/k8s.io/kube-openapi/pkg/spec3/encoding.go deleted file mode 100644 index 51dac4bdf..000000000 --- a/src/vendor/k8s.io/kube-openapi/pkg/spec3/encoding.go +++ /dev/null @@ -1,64 +0,0 @@ -/* -Copyright 2021 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package spec3 - -import ( - "encoding/json" - "github.com/go-openapi/swag" - "k8s.io/kube-openapi/pkg/validation/spec" -) - -type Encoding struct { - EncodingProps - spec.VendorExtensible -} - -// MarshalJSON is a custom marshal function that knows how to encode Encoding as JSON -func (e *Encoding) MarshalJSON() ([]byte, error) { - b1, err := json.Marshal(e.EncodingProps) - if err != nil { - return nil, err - } - b2, err := json.Marshal(e.VendorExtensible) - if err != nil { - return nil, err - } - return swag.ConcatJSON(b1, b2), nil -} - -func (e *Encoding) UnmarshalJSON(data []byte) error { - if err := json.Unmarshal(data, &e.EncodingProps); err != nil { - return err - } - if err := json.Unmarshal(data, &e.VendorExtensible); err != nil { - return err - } - return nil -} - -type EncodingProps struct { - // Content Type for encoding a specific property - ContentType string `json:"contentType,omitempty"` - // A map allowing additional information to be provided as headers - Headers map[string]*Header `json:"headers,omitempty"` - // Describes how a specific property value will be serialized depending on its type - Style string `json:"style,omitempty"` - // When this is true, property values of type array or object generate separate parameters for each value of the array, or key-value-pair of the map. For other types of properties this property has no effect - Explode string `json:"explode,omitempty"` - // AllowReserved determines whether the parameter value SHOULD allow reserved characters, as defined by RFC3986 - AllowReserved bool `json:"allowReserved,omitempty"` -} diff --git a/src/vendor/k8s.io/kube-openapi/pkg/spec3/example.go b/src/vendor/k8s.io/kube-openapi/pkg/spec3/example.go deleted file mode 100644 index 0f5ab983c..000000000 --- a/src/vendor/k8s.io/kube-openapi/pkg/spec3/example.go +++ /dev/null @@ -1,73 +0,0 @@ -/* -Copyright 2021 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package spec3 - -import ( - "encoding/json" - - "k8s.io/kube-openapi/pkg/validation/spec" - "github.com/go-openapi/swag" -) - -// Example https://swagger.io/specification/#example-object - -type Example struct { - spec.Refable - ExampleProps - spec.VendorExtensible -} - -// MarshalJSON is a custom marshal function that knows how to encode RequestBody as JSON -func (e *Example) MarshalJSON() ([]byte, error) { - b1, err := json.Marshal(e.Refable) - if err != nil { - return nil, err - } - b2, err := json.Marshal(e.ExampleProps) - if err != nil { - return nil, err - } - b3, err := json.Marshal(e.VendorExtensible) - if err != nil { - return nil, err - } - return swag.ConcatJSON(b1, b2, b3), nil -} - -func (e *Example) UnmarshalJSON(data []byte) error { - if err := json.Unmarshal(data, &e.Refable); err != nil { - return err - } - if err := json.Unmarshal(data, &e.ExampleProps); err != nil { - return err - } - if err := json.Unmarshal(data, &e.VendorExtensible); err != nil { - return err - } - return nil -} - -type ExampleProps struct { - // Summary holds a short description of the example - Summary string `json:"summary,omitempty"` - // Description holds a long description of the example - Description string `json:"description,omitempty"` - // Embedded literal example. - Value interface{} `json:"value,omitempty"` - // A URL that points to the literal example. This provides the capability to reference examples that cannot easily be included in JSON or YAML documents. - ExternalValue string `json:"externalValue,omitempty"` -} diff --git a/src/vendor/k8s.io/kube-openapi/pkg/spec3/external_documentation.go b/src/vendor/k8s.io/kube-openapi/pkg/spec3/external_documentation.go deleted file mode 100644 index 117113e7a..000000000 --- a/src/vendor/k8s.io/kube-openapi/pkg/spec3/external_documentation.go +++ /dev/null @@ -1,58 +0,0 @@ -/* -Copyright 2021 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package spec3 - -import ( - "encoding/json" - "k8s.io/kube-openapi/pkg/validation/spec" - "github.com/go-openapi/swag" -) - -type ExternalDocumentation struct { - ExternalDocumentationProps - spec.VendorExtensible -} - -type ExternalDocumentationProps struct { - // Description is a short description of the target documentation. CommonMark syntax MAY be used for rich text representation. - Description string `json:"description,omitempty"` - // URL is the URL for the target documentation. - URL string `json:"url"` -} - -// MarshalJSON is a custom marshal function that knows how to encode Responses as JSON -func (e *ExternalDocumentation) MarshalJSON() ([]byte, error) { - b1, err := json.Marshal(e.ExternalDocumentationProps) - if err != nil { - return nil, err - } - b2, err := json.Marshal(e.VendorExtensible) - if err != nil { - return nil, err - } - return swag.ConcatJSON(b1, b2), nil -} - -func (e *ExternalDocumentation) UnmarshalJSON(data []byte) error { - if err := json.Unmarshal(data, &e.ExternalDocumentationProps); err != nil { - return err - } - if err := json.Unmarshal(data, &e.VendorExtensible); err != nil { - return err - } - return nil -} diff --git a/src/vendor/k8s.io/kube-openapi/pkg/spec3/header.go b/src/vendor/k8s.io/kube-openapi/pkg/spec3/header.go deleted file mode 100644 index cead4b15d..000000000 --- a/src/vendor/k8s.io/kube-openapi/pkg/spec3/header.go +++ /dev/null @@ -1,90 +0,0 @@ -/* -Copyright 2021 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package spec3 - -import ( - "encoding/json" - - "github.com/go-openapi/swag" - "k8s.io/kube-openapi/pkg/validation/spec" -) - -// Header a struct that describes a single operation parameter, more at https://github.com/OAI/OpenAPI-Specification/blob/master/versions/3.0.0.md#parameterObject -// -// Note that this struct is actually a thin wrapper around HeaderProps to make it referable and extensible -type Header struct { - spec.Refable - HeaderProps - spec.VendorExtensible -} - -// MarshalJSON is a custom marshal function that knows how to encode Header as JSON -func (h *Header) MarshalJSON() ([]byte, error) { - b1, err := json.Marshal(h.Refable) - if err != nil { - return nil, err - } - b2, err := json.Marshal(h.HeaderProps) - if err != nil { - return nil, err - } - b3, err := json.Marshal(h.VendorExtensible) - if err != nil { - return nil, err - } - return swag.ConcatJSON(b1, b2, b3), nil -} - -func (h *Header) UnmarshalJSON(data []byte) error { - if err := json.Unmarshal(data, &h.Refable); err != nil { - return err - } - if err := json.Unmarshal(data, &h.HeaderProps); err != nil { - return err - } - if err := json.Unmarshal(data, &h.VendorExtensible); err != nil { - return err - } - - return nil -} - -// HeaderProps a struct that describes a header object -type HeaderProps struct { - // Description holds a brief description of the parameter - Description string `json:"description,omitempty"` - // Required determines whether this parameter is mandatory - Required bool `json:"required,omitempty"` - // Deprecated declares this operation to be deprecated - Deprecated bool `json:"deprecated,omitempty"` - // AllowEmptyValue sets the ability to pass empty-valued parameters - AllowEmptyValue bool `json:"allowEmptyValue,omitempty"` - // Style describes how the parameter value will be serialized depending on the type of the parameter value - Style string `json:"style,omitempty"` - // Explode when true, parameter values of type array or object generate separate parameters for each value of the array or key-value pair of the map - Explode bool `json:"explode,omitempty"` - // AllowReserved determines whether the parameter value SHOULD allow reserved characters, as defined by RFC3986 - AllowReserved bool `json:"allowReserved,omitempty"` - // Schema holds the schema defining the type used for the parameter - Schema *spec.Schema `json:"schema,omitempty"` - // Content holds a map containing the representations for the parameter - Content map[string]*MediaType `json:"content,omitempty"` - // Example of the header - Example interface{} `json:"example,omitempty"` - // Examples of the header - Examples map[string]*Example `json:"examples,omitempty"` -} diff --git a/src/vendor/k8s.io/kube-openapi/pkg/spec3/media_type.go b/src/vendor/k8s.io/kube-openapi/pkg/spec3/media_type.go deleted file mode 100644 index 828fd8dc5..000000000 --- a/src/vendor/k8s.io/kube-openapi/pkg/spec3/media_type.go +++ /dev/null @@ -1,66 +0,0 @@ -/* -Copyright 2021 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package spec3 - -import ( - "encoding/json" - "github.com/go-openapi/swag" - "k8s.io/kube-openapi/pkg/validation/spec" -) - -// MediaType a struct that allows you to specify content format, more at https://github.com/OAI/OpenAPI-Specification/blob/master/versions/3.0.0.md#mediaTypeObject -// -// Note that this struct is actually a thin wrapper around MediaTypeProps to make it referable and extensible -type MediaType struct { - MediaTypeProps - spec.VendorExtensible -} - -// MarshalJSON is a custom marshal function that knows how to encode MediaType as JSON -func (m *MediaType) MarshalJSON() ([]byte, error) { - b1, err := json.Marshal(m.MediaTypeProps) - if err != nil { - return nil, err - } - b2, err := json.Marshal(m.VendorExtensible) - if err != nil { - return nil, err - } - return swag.ConcatJSON(b1, b2), nil -} - -func (m *MediaType) UnmarshalJSON(data []byte) error { - if err := json.Unmarshal(data, &m.MediaTypeProps); err != nil { - return err - } - if err := json.Unmarshal(data, &m.VendorExtensible); err != nil { - return err - } - return nil -} - -// MediaTypeProps a struct that allows you to specify content format, more at https://github.com/OAI/OpenAPI-Specification/blob/master/versions/3.0.0.md#mediaTypeObject -type MediaTypeProps struct { - // Schema holds the schema defining the type used for the media type - Schema *spec.Schema `json:"schema,omitempty"` - // Example of the media type - Example interface{} `json:"example,omitempty"` - // Examples of the media type. Each example object should match the media type and specific schema if present - Examples map[string]*Example `json:"examples,omitempty"` - // A map between a property name and its encoding information. The key, being the property name, MUST exist in the schema as a property. The encoding object SHALL only apply to requestBody objects when the media type is multipart or application/x-www-form-urlencoded - Encoding map[string]*Encoding `json:"encoding,omitempty"` -} diff --git a/src/vendor/k8s.io/kube-openapi/pkg/spec3/operation.go b/src/vendor/k8s.io/kube-openapi/pkg/spec3/operation.go deleted file mode 100644 index de8aa4602..000000000 --- a/src/vendor/k8s.io/kube-openapi/pkg/spec3/operation.go +++ /dev/null @@ -1,79 +0,0 @@ -/* -Copyright 2021 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package spec3 - -import ( - "encoding/json" - - "k8s.io/kube-openapi/pkg/validation/spec" - "github.com/go-openapi/swag" -) - -// Operation describes a single API operation on a path, more at https://github.com/OAI/OpenAPI-Specification/blob/master/versions/3.0.0.md#operationObject -// -// Note that this struct is actually a thin wrapper around OperationProps to make it referable and extensible -type Operation struct { - OperationProps - spec.VendorExtensible -} - -// MarshalJSON is a custom marshal function that knows how to encode Operation as JSON -func (o *Operation) MarshalJSON() ([]byte, error) { - b1, err := json.Marshal(o.OperationProps) - if err != nil { - return nil, err - } - b2, err := json.Marshal(o.VendorExtensible) - if err != nil { - return nil, err - } - return swag.ConcatJSON(b1, b2), nil -} - -// UnmarshalJSON hydrates this items instance with the data from JSON -func (o *Operation) UnmarshalJSON(data []byte) error { - if err := json.Unmarshal(data, &o.OperationProps); err != nil { - return err - } - return json.Unmarshal(data, &o.VendorExtensible) -} - -// OperationProps describes a single API operation on a path, more at https://github.com/OAI/OpenAPI-Specification/blob/master/versions/3.0.0.md#operationObject -type OperationProps struct { - // Tags holds a list of tags for API documentation control - Tags []string `json:"tags,omitempty"` - // Summary holds a short summary of what the operation does - Summary string `json:"summary,omitempty"` - // Description holds a verbose explanation of the operation behavior - Description string `json:"description,omitempty"` - // ExternalDocs holds additional external documentation for this operation - ExternalDocs *ExternalDocumentation `json:"externalDocs,omitempty"` - // OperationId holds a unique string used to identify the operation - OperationId string `json:"operationId,omitempty"` - // Parameters a list of parameters that are applicable for this operation - Parameters []*Parameter `json:"parameters,omitempty"` - // RequestBody holds the request body applicable for this operation - RequestBody *RequestBody `json:"requestBody,omitempty"` - // Responses holds the list of possible responses as they are returned from executing this operation - Responses *Responses `json:"responses,omitempty"` - // Deprecated declares this operation to be deprecated - Deprecated bool `json:"deprecated,omitempty"` - // SecurityRequirement holds a declaration of which security mechanisms can be used for this operation - SecurityRequirement []*SecurityRequirement `json:"security,omitempty"` - // Servers contains an alternative server array to service this operation - Servers []*Server `json:"servers,omitempty"` -} diff --git a/src/vendor/k8s.io/kube-openapi/pkg/spec3/parameter.go b/src/vendor/k8s.io/kube-openapi/pkg/spec3/parameter.go deleted file mode 100644 index 0d7180e50..000000000 --- a/src/vendor/k8s.io/kube-openapi/pkg/spec3/parameter.go +++ /dev/null @@ -1,94 +0,0 @@ -/* -Copyright 2021 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package spec3 - -import ( - "encoding/json" - - "github.com/go-openapi/swag" - "k8s.io/kube-openapi/pkg/validation/spec" -) - -// Parameter a struct that describes a single operation parameter, more at https://github.com/OAI/OpenAPI-Specification/blob/master/versions/3.0.0.md#parameterObject -// -// Note that this struct is actually a thin wrapper around ParameterProps to make it referable and extensible -type Parameter struct { - spec.Refable - ParameterProps - spec.VendorExtensible -} - -// MarshalJSON is a custom marshal function that knows how to encode Parameter as JSON -func (p *Parameter) MarshalJSON() ([]byte, error) { - b1, err := json.Marshal(p.Refable) - if err != nil { - return nil, err - } - b2, err := json.Marshal(p.ParameterProps) - if err != nil { - return nil, err - } - b3, err := json.Marshal(p.VendorExtensible) - if err != nil { - return nil, err - } - return swag.ConcatJSON(b1, b2, b3), nil -} - -func (p *Parameter) UnmarshalJSON(data []byte) error { - if err := json.Unmarshal(data, &p.Refable); err != nil { - return err - } - if err := json.Unmarshal(data, &p.ParameterProps); err != nil { - return err - } - if err := json.Unmarshal(data, &p.VendorExtensible); err != nil { - return err - } - - return nil -} - -// ParameterProps a struct that describes a single operation parameter, more at https://github.com/OAI/OpenAPI-Specification/blob/master/versions/3.0.0.md#parameterObject -type ParameterProps struct { - // Name holds the name of the parameter - Name string `json:"name,omitempty"` - // In holds the location of the parameter - In string `json:"in,omitempty"` - // Description holds a brief description of the parameter - Description string `json:"description,omitempty"` - // Required determines whether this parameter is mandatory - Required bool `json:"required,omitempty"` - // Deprecated declares this operation to be deprecated - Deprecated bool `json:"deprecated,omitempty"` - // AllowEmptyValue sets the ability to pass empty-valued parameters - AllowEmptyValue bool `json:"allowEmptyValue,omitempty"` - // Style describes how the parameter value will be serialized depending on the type of the parameter value - Style string `json:"style,omitempty"` - // Explode when true, parameter values of type array or object generate separate parameters for each value of the array or key-value pair of the map - Explode bool `json:"explode,omitempty"` - // AllowReserved determines whether the parameter value SHOULD allow reserved characters, as defined by RFC3986 - AllowReserved bool `json:"allowReserved,omitempty"` - // Schema holds the schema defining the type used for the parameter - Schema *spec.Schema `json:"schema,omitempty"` - // Content holds a map containing the representations for the parameter - Content map[string]*MediaType `json:"content,omitempty"` - // Example of the parameter's potential value - Example interface{} `json:"example,omitempty"` - // Examples of the parameter's potential value. Each example SHOULD contain a value in the correct format as specified in the parameter encoding - Examples map[string]*Example `json:"examples,omitempty"` -} diff --git a/src/vendor/k8s.io/kube-openapi/pkg/spec3/path.go b/src/vendor/k8s.io/kube-openapi/pkg/spec3/path.go deleted file mode 100644 index bc48c504d..000000000 --- a/src/vendor/k8s.io/kube-openapi/pkg/spec3/path.go +++ /dev/null @@ -1,142 +0,0 @@ -/* -Copyright 2021 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package spec3 - -import ( - "encoding/json" - "strings" - - "k8s.io/kube-openapi/pkg/validation/spec" - "github.com/go-openapi/swag" -) - -// Paths describes the available paths and operations for the API, more at https://github.com/OAI/OpenAPI-Specification/blob/master/versions/3.0.0.md#pathsObject -type Paths struct { - Paths map[string]*Path - spec.VendorExtensible -} - -// MarshalJSON is a custom marshal function that knows how to encode Paths as JSON -func (p *Paths) MarshalJSON() ([]byte, error) { - b1, err := json.Marshal(p.Paths) - if err != nil { - return nil, err - } - b2, err := json.Marshal(p.VendorExtensible) - if err != nil { - return nil, err - } - return swag.ConcatJSON(b1, b2), nil -} - -// UnmarshalJSON hydrates this items instance with the data from JSON -func (p *Paths) UnmarshalJSON(data []byte) error { - var res map[string]json.RawMessage - if err := json.Unmarshal(data, &res); err != nil { - return err - } - for k, v := range res { - if strings.HasPrefix(strings.ToLower(k), "x-") { - if p.Extensions == nil { - p.Extensions = make(map[string]interface{}) - } - var d interface{} - if err := json.Unmarshal(v, &d); err != nil { - return err - } - p.Extensions[k] = d - } - if strings.HasPrefix(k, "/") { - if p.Paths == nil { - p.Paths = make(map[string]*Path) - } - var pi *Path - if err := json.Unmarshal(v, &pi); err != nil { - return err - } - p.Paths[k] = pi - } - } - return nil -} - -// Path describes the operations available on a single path, more at https://github.com/OAI/OpenAPI-Specification/blob/master/versions/3.0.0.md#pathItemObject -// -// Note that this struct is actually a thin wrapper around PathProps to make it referable and extensible -type Path struct { - spec.Refable - PathProps - spec.VendorExtensible -} - -// MarshalJSON is a custom marshal function that knows how to encode Path as JSON -func (p *Path) MarshalJSON() ([]byte, error) { - b1, err := json.Marshal(p.Refable) - if err != nil { - return nil, err - } - b2, err := json.Marshal(p.PathProps) - if err != nil { - return nil, err - } - b3, err := json.Marshal(p.VendorExtensible) - if err != nil { - return nil, err - } - return swag.ConcatJSON(b1, b2, b3), nil -} - -func (p *Path) UnmarshalJSON(data []byte) error { - if err := json.Unmarshal(data, &p.Refable); err != nil { - return err - } - if err := json.Unmarshal(data, &p.PathProps); err != nil { - return err - } - if err := json.Unmarshal(data, &p.VendorExtensible); err != nil { - return err - } - return nil -} - -// PathProps describes the operations available on a single path, more at https://github.com/OAI/OpenAPI-Specification/blob/master/versions/3.0.0.md#pathItemObject -type PathProps struct { - // Summary holds a summary for all operations in this path - Summary string `json:"summary,omitempty"` - // Description holds a description for all operations in this path - Description string `json:"description,omitempty"` - // Get defines GET operation - Get *Operation `json:"get,omitempty"` - // Put defines PUT operation - Put *Operation `json:"put,omitempty"` - // Post defines POST operation - Post *Operation `json:"post,omitempty"` - // Delete defines DELETE operation - Delete *Operation `json:"delete,omitempty"` - // Options defines OPTIONS operation - Options *Operation `json:"options,omitempty"` - // Head defines HEAD operation - Head *Operation `json:"head,omitempty"` - // Patch defines PATCH operation - Patch *Operation `json:"patch,omitempty"` - // Trace defines TRACE operation - Trace *Operation `json:"trace,omitempty"` - // Servers is an alternative server array to service all operations in this path - Servers []*Server `json:"servers,omitempty"` - // Parameters a list of parameters that are applicable for this operation - Parameters []*Parameter `json:"parameters,omitempty"` -} diff --git a/src/vendor/k8s.io/kube-openapi/pkg/spec3/request_body.go b/src/vendor/k8s.io/kube-openapi/pkg/spec3/request_body.go deleted file mode 100644 index 0adc62826..000000000 --- a/src/vendor/k8s.io/kube-openapi/pkg/spec3/request_body.go +++ /dev/null @@ -1,73 +0,0 @@ -/* -Copyright 2021 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package spec3 - -import ( - "encoding/json" - - "k8s.io/kube-openapi/pkg/validation/spec" - "github.com/go-openapi/swag" -) - -// RequestBody describes a single request body, more at https://github.com/OAI/OpenAPI-Specification/blob/master/versions/3.0.0.md#requestBodyObject -// -// Note that this struct is actually a thin wrapper around RequestBodyProps to make it referable and extensible -type RequestBody struct { - spec.Refable - RequestBodyProps - spec.VendorExtensible -} - -// MarshalJSON is a custom marshal function that knows how to encode RequestBody as JSON -func (r *RequestBody) MarshalJSON() ([]byte, error) { - b1, err := json.Marshal(r.Refable) - if err != nil { - return nil, err - } - b2, err := json.Marshal(r.RequestBodyProps) - if err != nil { - return nil, err - } - b3, err := json.Marshal(r.VendorExtensible) - if err != nil { - return nil, err - } - return swag.ConcatJSON(b1, b2, b3), nil -} - -func (r *RequestBody) UnmarshalJSON(data []byte) error { - if err := json.Unmarshal(data, &r.Refable); err != nil { - return err - } - if err := json.Unmarshal(data, &r.RequestBodyProps); err != nil { - return err - } - if err := json.Unmarshal(data, &r.VendorExtensible); err != nil { - return err - } - return nil -} - -// RequestBodyProps describes a single request body, more at https://github.com/OAI/OpenAPI-Specification/blob/master/versions/3.0.0.md#requestBodyObject -type RequestBodyProps struct { - // Description holds a brief description of the request body - Description string `json:"description,omitempty"` - // Content is the content of the request body. The key is a media type or media type range and the value describes it - Content map[string]*MediaType `json:"content,omitempty"` - // Required determines if the request body is required in the request - Required bool `json:"required,omitempty"` -} diff --git a/src/vendor/k8s.io/kube-openapi/pkg/spec3/response.go b/src/vendor/k8s.io/kube-openapi/pkg/spec3/response.go deleted file mode 100644 index ccd73369f..000000000 --- a/src/vendor/k8s.io/kube-openapi/pkg/spec3/response.go +++ /dev/null @@ -1,203 +0,0 @@ -/* -Copyright 2021 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package spec3 - -import ( - "encoding/json" - "strconv" - - "k8s.io/kube-openapi/pkg/validation/spec" - "github.com/go-openapi/swag" -) - -// Responses holds the list of possible responses as they are returned from executing this operation -// -// Note that this struct is actually a thin wrapper around ResponsesProps to make it referable and extensible -type Responses struct { - ResponsesProps - spec.VendorExtensible -} - -// MarshalJSON is a custom marshal function that knows how to encode Responses as JSON -func (r *Responses) MarshalJSON() ([]byte, error) { - b1, err := json.Marshal(r.ResponsesProps) - if err != nil { - return nil, err - } - b2, err := json.Marshal(r.VendorExtensible) - if err != nil { - return nil, err - } - return swag.ConcatJSON(b1, b2), nil -} - -func (r *Responses) UnmarshalJSON(data []byte) error { - if err := json.Unmarshal(data, &r.ResponsesProps); err != nil { - return err - } - if err := json.Unmarshal(data, &r.VendorExtensible); err != nil { - return err - } - - return nil -} - -// ResponsesProps holds the list of possible responses as they are returned from executing this operation -type ResponsesProps struct { - // Default holds the documentation of responses other than the ones declared for specific HTTP response codes. Use this field to cover undeclared responses - Default *Response `json:"-"` - // StatusCodeResponses holds a map of any HTTP status code to the response definition - StatusCodeResponses map[int]*Response `json:"-"` -} - -// MarshalJSON is a custom marshal function that knows how to encode ResponsesProps as JSON -func (r ResponsesProps) MarshalJSON() ([]byte, error) { - toser := map[string]*Response{} - if r.Default != nil { - toser["default"] = r.Default - } - for k, v := range r.StatusCodeResponses { - toser[strconv.Itoa(k)] = v - } - return json.Marshal(toser) -} - -// UnmarshalJSON unmarshals responses from JSON -func (r *ResponsesProps) UnmarshalJSON(data []byte) error { - var res map[string]*Response - if err := json.Unmarshal(data, &res); err != nil { - return nil - } - if v, ok := res["default"]; ok { - r.Default = v - delete(res, "default") - } - for k, v := range res { - if nk, err := strconv.Atoi(k); err == nil { - if r.StatusCodeResponses == nil { - r.StatusCodeResponses = map[int]*Response{} - } - r.StatusCodeResponses[nk] = v - } - } - return nil -} - -// Response describes a single response from an API Operation, more at https://github.com/OAI/OpenAPI-Specification/blob/master/versions/3.0.0.md#responseObject -// -// Note that this struct is actually a thin wrapper around ResponseProps to make it referable and extensible -type Response struct { - spec.Refable - ResponseProps - spec.VendorExtensible -} - -// MarshalJSON is a custom marshal function that knows how to encode Response as JSON -func (r *Response) MarshalJSON() ([]byte, error) { - b1, err := json.Marshal(r.Refable) - if err != nil { - return nil, err - } - b2, err := json.Marshal(r.ResponseProps) - if err != nil { - return nil, err - } - b3, err := json.Marshal(r.VendorExtensible) - if err != nil { - return nil, err - } - return swag.ConcatJSON(b1, b2, b3), nil -} - -func (r *Response) UnmarshalJSON(data []byte) error { - if err := json.Unmarshal(data, &r.Refable); err != nil { - return err - } - if err := json.Unmarshal(data, &r.ResponseProps); err != nil { - return err - } - if err := json.Unmarshal(data, &r.VendorExtensible); err != nil { - return err - } - - return nil -} - -// ResponseProps describes a single response from an API Operation, more at https://github.com/OAI/OpenAPI-Specification/blob/master/versions/3.0.0.md#responseObject -type ResponseProps struct { - // Description holds a short description of the response - Description string `json:"description,omitempty"` - // Headers holds a maps of a headers name to its definition - Headers map[string]*Header `json:"headers,omitempty"` - // Content holds a map containing descriptions of potential response payloads - Content map[string]*MediaType `json:"content,omitempty"` - // Links is a map of operations links that can be followed from the response - Links map[string]*Link `json:"links,omitempty"` -} - - -// Link represents a possible design-time link for a response, more at https://swagger.io/specification/#link-object -type Link struct { - spec.Refable - LinkProps - spec.VendorExtensible -} - -// MarshalJSON is a custom marshal function that knows how to encode Link as JSON -func (r *Link) MarshalJSON() ([]byte, error) { - b1, err := json.Marshal(r.Refable) - if err != nil { - return nil, err - } - b2, err := json.Marshal(r.LinkProps) - if err != nil { - return nil, err - } - b3, err := json.Marshal(r.VendorExtensible) - if err != nil { - return nil, err - } - return swag.ConcatJSON(b1, b2, b3), nil -} - -func (r *Link) UnmarshalJSON(data []byte) error { - if err := json.Unmarshal(data, &r.Refable); err != nil { - return err - } - if err := json.Unmarshal(data, &r.LinkProps); err != nil { - return err - } - if err := json.Unmarshal(data, &r.VendorExtensible); err != nil { - return err - } - - return nil -} - -// LinkProps describes a single response from an API Operation, more at https://github.com/OAI/OpenAPI-Specification/blob/master/versions/3.0.0.md#responseObject -type LinkProps struct { - // OperationId is the name of an existing, resolvable OAS operation - OperationId string `json:"operationId,omitempty"` - // Parameters is a map representing parameters to pass to an operation as specified with operationId or identified via operationRef - Parameters map[string]interface{} `json:"parameters,omitempty"` - // Description holds a description of the link - Description string `json:"description,omitempty"` - // RequestBody is a literal value or expresion to use as a request body when calling the target operation - RequestBody interface{} `json:"requestBody,omitempty"` - // Server holds a server object used by the target operation - Server *Server `json:"server,omitempty"` -} diff --git a/src/vendor/k8s.io/kube-openapi/pkg/spec3/security_requirement.go b/src/vendor/k8s.io/kube-openapi/pkg/spec3/security_requirement.go deleted file mode 100644 index 0ce8924ef..000000000 --- a/src/vendor/k8s.io/kube-openapi/pkg/spec3/security_requirement.go +++ /dev/null @@ -1,56 +0,0 @@ -/* -Copyright 2021 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package spec3 - -import ( - "encoding/json" - - "k8s.io/kube-openapi/pkg/validation/spec" - "github.com/go-openapi/swag" -) - -// SecurityRequirementProps describes the required security schemes to execute an operation, more at https://swagger.io/specification/#security-requirement-object -// -// Note that this struct is actually a thin wrapper around SecurityRequirementProps to make it referable and extensible -type SecurityRequirement struct { - SecurityRequirementProps - spec.VendorExtensible -} - -// MarshalJSON is a custom marshal function that knows how to encode SecurityRequirement as JSON -func (s *SecurityRequirement) MarshalJSON() ([]byte, error) { - b1, err := json.Marshal(s.SecurityRequirementProps) - if err != nil { - return nil, err - } - b2, err := json.Marshal(s.VendorExtensible) - if err != nil { - return nil, err - } - return swag.ConcatJSON(b1, b2), nil -} - -// UnmarshalJSON hydrates this items instance with the data from JSON -func (s *SecurityRequirement) UnmarshalJSON(data []byte) error { - if err := json.Unmarshal(data, &s.SecurityRequirementProps); err != nil { - return err - } - return json.Unmarshal(data, &s.VendorExtensible) -} - -// SecurityRequirementProps describes the required security schemes to execute an operation, more at https://swagger.io/specification/#security-requirement-object -type SecurityRequirementProps map[string][]string diff --git a/src/vendor/k8s.io/kube-openapi/pkg/spec3/security_scheme.go b/src/vendor/k8s.io/kube-openapi/pkg/spec3/security_scheme.go deleted file mode 100644 index 9b1352f4e..000000000 --- a/src/vendor/k8s.io/kube-openapi/pkg/spec3/security_scheme.go +++ /dev/null @@ -1,118 +0,0 @@ -/* -Copyright 2021 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package spec3 - -import ( - "encoding/json" - - "k8s.io/kube-openapi/pkg/validation/spec" - "github.com/go-openapi/swag" -) - -// SecurityScheme defines reusable Security Scheme Object, more at https://github.com/OAI/OpenAPI-Specification/blob/master/versions/3.0.0.md#securitySchemeObject -type SecurityScheme struct { - spec.Refable - SecuritySchemeProps - spec.VendorExtensible -} - -// MarshalJSON is a custom marshal function that knows how to encode SecurityScheme as JSON -func (s *SecurityScheme) MarshalJSON() ([]byte, error) { - b1, err := json.Marshal(s.SecuritySchemeProps) - if err != nil { - return nil, err - } - b2, err := json.Marshal(s.VendorExtensible) - if err != nil { - return nil, err - } - b3, err := json.Marshal(s.Refable) - if err != nil { - return nil, err - } - return swag.ConcatJSON(b1, b2, b3), nil -} - -// UnmarshalJSON hydrates this items instance with the data from JSON -func (s *SecurityScheme) UnmarshalJSON(data []byte) error { - if err := json.Unmarshal(data, &s.SecuritySchemeProps); err != nil { - return err - } - if err := json.Unmarshal(data, &s.VendorExtensible); err != nil { - return err - } - return json.Unmarshal(data, &s.Refable) -} - -// SecuritySchemeProps defines a security scheme that can be used by the operations -type SecuritySchemeProps struct { - // Type of the security scheme - Type string `json:"type,omitempty"` - // Description holds a short description for security scheme - Description string `json:"description,omitempty"` - // Name holds the name of the header, query or cookie parameter to be used - Name string `json:"name,omitempty"` - // In holds the location of the API key - In string `json:"in,omitempty"` - // Scheme holds the name of the HTTP Authorization scheme to be used in the Authorization header - Scheme string `json:"scheme,omitempty"` - // BearerFormat holds a hint to the client to identify how the bearer token is formatted - BearerFormat string `json:"bearerFormat,omitempty"` - // Flows contains configuration information for the flow types supported. - Flows map[string]*OAuthFlow `json:"flows,omitempty"` - // OpenIdConnectUrl holds an url to discover OAuth2 configuration values from - OpenIdConnectUrl string `json:"openIdConnectUrl,omitempty"` -} - -// OAuthFlow contains configuration information for the flow types supported. -type OAuthFlow struct { - OAuthFlowProps - spec.VendorExtensible -} - -// MarshalJSON is a custom marshal function that knows how to encode OAuthFlow as JSON -func (o *OAuthFlow) MarshalJSON() ([]byte, error) { - b1, err := json.Marshal(o.OAuthFlowProps) - if err != nil { - return nil, err - } - b2, err := json.Marshal(o.VendorExtensible) - if err != nil { - return nil, err - } - return swag.ConcatJSON(b1, b2), nil -} - -// UnmarshalJSON hydrates this items instance with the data from JSON -func (o *OAuthFlow) UnmarshalJSON(data []byte) error { - if err := json.Unmarshal(data, &o.OAuthFlowProps); err != nil { - return err - } - return json.Unmarshal(data, &o.VendorExtensible) -} - -// OAuthFlowProps holds configuration details for a supported OAuth Flow -type OAuthFlowProps struct { - // AuthorizationUrl hold the authorization URL to be used for this flow - AuthorizationUrl string `json:"authorizationUrl,omitempty"` - // TokenUrl holds the token URL to be used for this flow - TokenUrl string `json:"tokenUrl,omitempty"` - // RefreshUrl holds the URL to be used for obtaining refresh tokens - RefreshUrl string `json:"refreshUrl,omitempty"` - // Scopes holds the available scopes for the OAuth2 security scheme - Scopes map[string]string `json:"scopes,omitempty"` -} diff --git a/src/vendor/k8s.io/kube-openapi/pkg/spec3/server.go b/src/vendor/k8s.io/kube-openapi/pkg/spec3/server.go deleted file mode 100644 index a505fb221..000000000 --- a/src/vendor/k8s.io/kube-openapi/pkg/spec3/server.go +++ /dev/null @@ -1,98 +0,0 @@ -/* -Copyright 2021 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package spec3 - -import ( - "encoding/json" - "k8s.io/kube-openapi/pkg/validation/spec" - "github.com/go-openapi/swag" - -) - -type Server struct { - ServerProps - spec.VendorExtensible -} - -type ServerProps struct { - // Description is a short description of the target documentation. CommonMark syntax MAY be used for rich text representation. - Description string `json:"description,omitempty"` - // URL is the URL for the target documentation. - URL string `json:"url"` - // Variables contains a map between a variable name and its value. The value is used for substitution in the server's URL templeate - Variables map[string]*ServerVariable `json:"variables,omitempty"` -} - -// MarshalJSON is a custom marshal function that knows how to encode Responses as JSON -func (s *Server) MarshalJSON() ([]byte, error) { - b1, err := json.Marshal(s.ServerProps) - if err != nil { - return nil, err - } - b2, err := json.Marshal(s.VendorExtensible) - if err != nil { - return nil, err - } - return swag.ConcatJSON(b1, b2), nil -} - -func (s *Server) UnmarshalJSON(data []byte) error { - if err := json.Unmarshal(data, &s.ServerProps); err != nil { - return err - } - if err := json.Unmarshal(data, &s.VendorExtensible); err != nil { - return err - } - return nil -} - -type ServerVariable struct { - ServerVariableProps - spec.VendorExtensible -} - -type ServerVariableProps struct { - // Enum is an enumeration of string values to be used if the substitution options are from a limited set - Enum []string `json:"enum,omitempty"` - // Default is the default value to use for substitution, which SHALL be sent if an alternate value is not supplied - Default string `json:"default"` - // Description is a description for the server variable - Description string `json:"description,omitempty"` -} - -// MarshalJSON is a custom marshal function that knows how to encode Responses as JSON -func (s *ServerVariable) MarshalJSON() ([]byte, error) { - b1, err := json.Marshal(s.ServerVariableProps) - if err != nil { - return nil, err - } - b2, err := json.Marshal(s.VendorExtensible) - if err != nil { - return nil, err - } - return swag.ConcatJSON(b1, b2), nil -} - -func (s *ServerVariable) UnmarshalJSON(data []byte) error { - if err := json.Unmarshal(data, &s.ServerVariableProps); err != nil { - return err - } - if err := json.Unmarshal(data, &s.VendorExtensible); err != nil { - return err - } - return nil -} diff --git a/src/vendor/k8s.io/kube-openapi/pkg/spec3/spec.go b/src/vendor/k8s.io/kube-openapi/pkg/spec3/spec.go deleted file mode 100644 index 3ff48a3c3..000000000 --- a/src/vendor/k8s.io/kube-openapi/pkg/spec3/spec.go +++ /dev/null @@ -1,37 +0,0 @@ -/* -Copyright 2021 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package spec3 - -import ( - "k8s.io/kube-openapi/pkg/validation/spec" -) - -// OpenAPI is an object that describes an API and conforms to the OpenAPI Specification. -type OpenAPI struct { - // Version represents the semantic version number of the OpenAPI Specification that this document uses - Version string `json:"openapi"` - // Info provides metadata about the API - Info *spec.Info `json:"info"` - // Paths holds the available target and operations for the API - Paths *Paths `json:"paths,omitempty"` - // Servers is an array of Server objects which provide connectivity information to a target server - Servers []*Server `json:"servers,omitempty"` - // Components hold various schemas for the specification - Components *Components `json:"components,omitempty"` - // ExternalDocs holds additional external documentation - ExternalDocs *ExternalDocumentation `json:"externalDocs,omitempty"` -} diff --git a/src/vendor/k8s.io/kube-openapi/pkg/validation/spec/.gitignore b/src/vendor/k8s.io/kube-openapi/pkg/validation/spec/.gitignore deleted file mode 100644 index dd91ed6a0..000000000 --- a/src/vendor/k8s.io/kube-openapi/pkg/validation/spec/.gitignore +++ /dev/null @@ -1,2 +0,0 @@ -secrets.yml -coverage.out diff --git a/src/vendor/k8s.io/kube-openapi/pkg/validation/spec/LICENSE b/src/vendor/k8s.io/kube-openapi/pkg/validation/spec/LICENSE deleted file mode 100644 index d64569567..000000000 --- a/src/vendor/k8s.io/kube-openapi/pkg/validation/spec/LICENSE +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/src/vendor/k8s.io/kube-openapi/pkg/validation/spec/contact_info.go b/src/vendor/k8s.io/kube-openapi/pkg/validation/spec/contact_info.go deleted file mode 100644 index f285970aa..000000000 --- a/src/vendor/k8s.io/kube-openapi/pkg/validation/spec/contact_info.go +++ /dev/null @@ -1,24 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package spec - -// ContactInfo contact information for the exposed API. -// -// For more information: http://goo.gl/8us55a#contactObject -type ContactInfo struct { - Name string `json:"name,omitempty"` - URL string `json:"url,omitempty"` - Email string `json:"email,omitempty"` -} diff --git a/src/vendor/k8s.io/kube-openapi/pkg/validation/spec/external_docs.go b/src/vendor/k8s.io/kube-openapi/pkg/validation/spec/external_docs.go deleted file mode 100644 index 88add91b2..000000000 --- a/src/vendor/k8s.io/kube-openapi/pkg/validation/spec/external_docs.go +++ /dev/null @@ -1,24 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package spec - -// ExternalDocumentation allows referencing an external resource for -// extended documentation. -// -// For more information: http://goo.gl/8us55a#externalDocumentationObject -type ExternalDocumentation struct { - Description string `json:"description,omitempty"` - URL string `json:"url,omitempty"` -} diff --git a/src/vendor/k8s.io/kube-openapi/pkg/validation/spec/gnostic.go b/src/vendor/k8s.io/kube-openapi/pkg/validation/spec/gnostic.go deleted file mode 100644 index 35fd57920..000000000 --- a/src/vendor/k8s.io/kube-openapi/pkg/validation/spec/gnostic.go +++ /dev/null @@ -1,1515 +0,0 @@ -/* -Copyright 2022 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package spec - -import ( - "errors" - "strconv" - - "github.com/go-openapi/jsonreference" - openapi_v2 "github.com/google/gnostic/openapiv2" -) - -// Interfaces -type GnosticCommonValidations interface { - GetMaximum() float64 - GetExclusiveMaximum() bool - GetMinimum() float64 - GetExclusiveMinimum() bool - GetMaxLength() int64 - GetMinLength() int64 - GetPattern() string - GetMaxItems() int64 - GetMinItems() int64 - GetUniqueItems() bool - GetMultipleOf() float64 - GetEnum() []*openapi_v2.Any -} - -func (k *CommonValidations) FromGnostic(g GnosticCommonValidations) error { - if g == nil { - return nil - } - - max := g.GetMaximum() - if max != 0 { - k.Maximum = &max - } - - k.ExclusiveMaximum = g.GetExclusiveMaximum() - - min := g.GetMinimum() - if min != 0 { - k.Minimum = &min - } - - k.ExclusiveMinimum = g.GetExclusiveMinimum() - - maxLen := g.GetMaxLength() - if maxLen != 0 { - k.MaxLength = &maxLen - } - - minLen := g.GetMinLength() - if minLen != 0 { - k.MinLength = &minLen - } - - k.Pattern = g.GetPattern() - - maxItems := g.GetMaxItems() - if maxItems != 0 { - k.MaxItems = &maxItems - } - - minItems := g.GetMinItems() - if minItems != 0 { - k.MinItems = &minItems - } - - k.UniqueItems = g.GetUniqueItems() - - multOf := g.GetMultipleOf() - if multOf != 0 { - k.MultipleOf = &multOf - } - - enums := g.GetEnum() - - if enums != nil { - k.Enum = make([]interface{}, len(enums)) - for i, v := range enums { - if v == nil { - continue - } - - var convert interface{} - if err := v.ToRawInfo().Decode(&convert); err != nil { - return err - } else { - k.Enum[i] = convert - } - } - } - - return nil -} - -type GnosticSimpleSchema interface { - GetType() string - GetFormat() string - GetItems() *openapi_v2.PrimitivesItems - GetCollectionFormat() string - GetDefault() *openapi_v2.Any -} - -func (k *SimpleSchema) FromGnostic(g GnosticSimpleSchema) error { - if g == nil { - return nil - } - - k.Type = g.GetType() - k.Format = g.GetFormat() - k.CollectionFormat = g.GetCollectionFormat() - - items := g.GetItems() - if items != nil { - k.Items = &Items{} - if err := k.Items.FromGnostic(items); err != nil { - return err - } - } - - def := g.GetDefault() - if def != nil { - var convert interface{} - if err := def.ToRawInfo().Decode(&convert); err != nil { - return err - } else { - k.Default = convert - } - } - - return nil -} - -func (k *Items) FromGnostic(g *openapi_v2.PrimitivesItems) error { - if g == nil { - return nil - } - - if err := k.SimpleSchema.FromGnostic(g); err != nil { - return err - } - - if err := k.CommonValidations.FromGnostic(g); err != nil { - return err - } - - if err := k.VendorExtensible.FromGnostic(g.VendorExtension); err != nil { - return err - } - - return nil -} - -func (k *VendorExtensible) FromGnostic(g []*openapi_v2.NamedAny) error { - if len(g) == 0 { - return nil - } - - k.Extensions = make(Extensions, len(g)) - for _, v := range g { - if v == nil { - continue - } - - if v.Value == nil { - k.Extensions[v.Name] = nil - continue - } - - var iface interface{} - if err := v.Value.ToRawInfo().Decode(&iface); err != nil { - return err - } else { - k.Extensions[v.Name] = iface - } - } - return nil -} - -func (k *Refable) FromGnostic(g string) error { - return k.Ref.FromGnostic(g) -} - -func (k *Ref) FromGnostic(g string) error { - if g == "" { - return nil - } - - ref, err := jsonreference.New(g) - if err != nil { - return err - } - - *k = Ref{ - Ref: ref, - } - - return nil -} - -// Converts a gnostic v2 Document to a kube-openapi Swagger Document -// -// Caveats: -// -// - gnostic v2 documents treats zero as unspecified for numerical fields of -//CommonValidations fields such as Maximum, Minimum, MaximumItems, etc. -//There will always be data loss if one of the values of these fields is set to zero. -// -// Returns: -// -// - `ok`: `false` if a value was present in the gnostic document which cannot be -// roundtripped into kube-openapi types. In these instances, `ok` is set to -// `false` and the value is skipped. -// -// - `err`: an unexpected error occurred in the conversion from the gnostic type -// to kube-openapi type. -func (k *Swagger) FromGnostic(g *openapi_v2.Document) (ok bool, err error) { - ok = true - if g == nil { - return true, nil - } - - if err := k.VendorExtensible.FromGnostic(g.VendorExtension); err != nil { - return false, err - } - - if nok, err := k.SwaggerProps.FromGnostic(g); err != nil { - return false, err - } else if !nok { - ok = false - } - - return ok, nil -} - -func (k *SwaggerProps) FromGnostic(g *openapi_v2.Document) (ok bool, err error) { - if g == nil { - return true, nil - } - - ok = true - - // openapi_v2.Document does not support "ID" field, so it will not be - // included - k.Consumes = g.Consumes - k.Produces = g.Produces - k.Schemes = g.Schemes - k.Swagger = g.Swagger - - if g.Info != nil { - k.Info = &Info{} - if nok, err := k.Info.FromGnostic(g.Info); err != nil { - return false, err - } else if !nok { - ok = false - } - } - - k.Host = g.Host - k.BasePath = g.BasePath - - if g.Paths != nil { - k.Paths = &Paths{} - if nok, err := k.Paths.FromGnostic(g.Paths); err != nil { - return false, err - } else if !nok { - ok = false - } - } - - if g.Definitions != nil { - k.Definitions = make(Definitions, len(g.Definitions.AdditionalProperties)) - for _, v := range g.Definitions.AdditionalProperties { - if v == nil { - continue - } - converted := Schema{} - if nok, err := converted.FromGnostic(v.Value); err != nil { - return false, err - } else if !nok { - ok = false - } - k.Definitions[v.Name] = converted - - } - } - - if g.Parameters != nil { - k.Parameters = make( - map[string]Parameter, - len(g.Parameters.AdditionalProperties)) - for _, v := range g.Parameters.AdditionalProperties { - if v == nil { - continue - } - p := Parameter{} - if nok, err := p.FromGnostic(v.Value); err != nil { - return false, err - } else if !nok { - ok = false - } - - k.Parameters[v.Name] = p - } - } - - if g.Responses != nil { - k.Responses = make( - map[string]Response, - len(g.Responses.AdditionalProperties)) - - for _, v := range g.Responses.AdditionalProperties { - if v == nil { - continue - } - p := Response{} - if nok, err := p.FromGnostic(v.Value); err != nil { - return false, err - } else if !nok { - ok = false - } - - k.Responses[v.Name] = p - } - } - - if g.SecurityDefinitions != nil { - k.SecurityDefinitions = make(SecurityDefinitions) - if err := k.SecurityDefinitions.FromGnostic(g.SecurityDefinitions); err != nil { - return false, err - } - } - - if g.Security != nil { - k.Security = make([]map[string][]string, len(g.Security)) - for i, v := range g.Security { - if v == nil || v.AdditionalProperties == nil { - continue - } - - k.Security[i] = make(map[string][]string, len(v.AdditionalProperties)) - converted := k.Security[i] - for _, p := range v.AdditionalProperties { - if p == nil { - continue - } - if p.Value != nil { - converted[p.Name] = p.Value.Value - } else { - converted[p.Name] = nil - } - } - } - } - - if g.Tags != nil { - k.Tags = make([]Tag, len(g.Tags)) - for i, v := range g.Tags { - if v == nil { - continue - } else if nok, err := k.Tags[i].FromGnostic(v); err != nil { - return false, err - } else if !nok { - ok = false - } - } - } - - if g.ExternalDocs != nil { - k.ExternalDocs = &ExternalDocumentation{} - if nok, err := k.ExternalDocs.FromGnostic(g.ExternalDocs); err != nil { - return false, err - } else if !nok { - ok = false - } - } - - return ok, nil -} - -// Info - -func (k *Info) FromGnostic(g *openapi_v2.Info) (ok bool, err error) { - ok = true - if g == nil { - return true, nil - } - - if err := k.VendorExtensible.FromGnostic(g.VendorExtension); err != nil { - return false, err - } - - if nok, err := k.InfoProps.FromGnostic(g); err != nil { - return false, err - } else if !nok { - ok = false - } - - return ok, nil -} - -func (k *InfoProps) FromGnostic(g *openapi_v2.Info) (ok bool, err error) { - if g == nil { - return true, nil - } - - ok = true - - k.Description = g.Description - k.Title = g.Title - k.TermsOfService = g.TermsOfService - - if g.Contact != nil { - k.Contact = &ContactInfo{} - - if nok, err := k.Contact.FromGnostic(g.Contact); err != nil { - return false, err - } else if !nok { - ok = false - } - } - - if g.License != nil { - k.License = &License{} - if nok, err := k.License.FromGnostic(g.License); err != nil { - return false, err - } else if !nok { - ok = false - } - } - - k.Version = g.Version - return ok, nil -} - -func (k *License) FromGnostic(g *openapi_v2.License) (ok bool, err error) { - if g == nil { - return true, nil - } - - ok = true - - k.Name = g.Name - k.URL = g.Url - - // License does not embed to VendorExtensible! - // data loss from g.VendorExtension - if len(g.VendorExtension) != 0 { - ok = false - } - - return ok, nil -} - -func (k *ContactInfo) FromGnostic(g *openapi_v2.Contact) (ok bool, err error) { - if g == nil { - return true, nil - } - - ok = true - - k.Name = g.Name - k.URL = g.Url - k.Email = g.Email - - // ContactInfo does not embed to VendorExtensible! - // data loss from g.VendorExtension - if len(g.VendorExtension) != 0 { - ok = false - } - - return ok, nil -} - -// Paths - -func (k *Paths) FromGnostic(g *openapi_v2.Paths) (ok bool, err error) { - if g == nil { - return true, nil - } - - ok = true - - if g.Path != nil { - k.Paths = make(map[string]PathItem, len(g.Path)) - for _, v := range g.Path { - if v == nil { - continue - } - - converted := PathItem{} - if nok, err := converted.FromGnostic(v.Value); err != nil { - return false, err - } else if !nok { - ok = false - } - - k.Paths[v.Name] = converted - } - } - - if err := k.VendorExtensible.FromGnostic(g.VendorExtension); err != nil { - return false, err - } - return ok, nil -} - -func (k *PathItem) FromGnostic(g *openapi_v2.PathItem) (ok bool, err error) { - if g == nil { - return true, nil - } - ok = true - - if nok, err := k.PathItemProps.FromGnostic(g); err != nil { - return false, err - } else if !nok { - ok = false - } - - if err := k.Refable.FromGnostic(g.XRef); err != nil { - return false, err - } - - if err := k.VendorExtensible.FromGnostic(g.VendorExtension); err != nil { - return false, err - } - return ok, nil -} - -func (k *PathItemProps) FromGnostic(g *openapi_v2.PathItem) (ok bool, err error) { - if g == nil { - return true, nil - } - ok = true - if g.Get != nil { - k.Get = &Operation{} - if nok, err := k.Get.FromGnostic(g.Get); err != nil { - return false, err - } else if !nok { - ok = false - } - } - - if g.Put != nil { - k.Put = &Operation{} - if nok, err := k.Put.FromGnostic(g.Put); err != nil { - return false, err - } else if !nok { - ok = false - } - } - - if g.Post != nil { - k.Post = &Operation{} - if nok, err := k.Post.FromGnostic(g.Post); err != nil { - return false, err - } else if !nok { - ok = false - } - } - - if g.Delete != nil { - k.Delete = &Operation{} - if nok, err := k.Delete.FromGnostic(g.Delete); err != nil { - return false, err - } else if !nok { - ok = false - } - } - - if g.Options != nil { - k.Options = &Operation{} - if nok, err := k.Options.FromGnostic(g.Options); err != nil { - return false, err - } else if !nok { - ok = false - } - } - - if g.Head != nil { - k.Head = &Operation{} - if nok, err := k.Head.FromGnostic(g.Head); err != nil { - return false, err - } else if !nok { - ok = false - } - } - - if g.Patch != nil { - k.Patch = &Operation{} - if nok, err := k.Patch.FromGnostic(g.Patch); err != nil { - return false, err - } else if !nok { - ok = false - } - } - - if g.Parameters != nil { - k.Parameters = make([]Parameter, len(g.Parameters)) - for i, v := range g.Parameters { - if v == nil { - continue - } else if nok, err := k.Parameters[i].FromGnosticParametersItem(v); err != nil { - return false, err - } else if !nok { - ok = false - } - } - } - - return ok, nil -} - -func (k *Operation) FromGnostic(g *openapi_v2.Operation) (ok bool, err error) { - if g == nil { - return true, nil - } - - ok = true - - if err := k.VendorExtensible.FromGnostic(g.VendorExtension); err != nil { - return false, err - } - - if nok, err := k.OperationProps.FromGnostic(g); err != nil { - return false, err - } else if !nok { - ok = false - } - - return ok, nil -} - -func (k *OperationProps) FromGnostic(g *openapi_v2.Operation) (ok bool, err error) { - if g == nil { - return true, nil - } - - ok = true - - k.Description = g.Description - k.Consumes = g.Consumes - k.Produces = g.Produces - k.Schemes = g.Schemes - k.Tags = g.Tags - k.Summary = g.Summary - - if g.ExternalDocs != nil { - k.ExternalDocs = &ExternalDocumentation{} - if nok, err := k.ExternalDocs.FromGnostic(g.ExternalDocs); err != nil { - return false, err - } else if !nok { - ok = false - } - } - - k.ID = g.OperationId - k.Deprecated = g.Deprecated - - if g.Security != nil { - k.Security = make([]map[string][]string, len(g.Security)) - for i, v := range g.Security { - if v == nil || v.AdditionalProperties == nil { - continue - } - - k.Security[i] = make(map[string][]string, len(v.AdditionalProperties)) - converted := k.Security[i] - for _, p := range v.AdditionalProperties { - if p == nil { - continue - } - - if p.Value != nil { - converted[p.Name] = p.Value.Value - } else { - converted[p.Name] = nil - } - } - } - } - - if g.Parameters != nil { - k.Parameters = make([]Parameter, len(g.Parameters)) - for i, v := range g.Parameters { - if v == nil { - continue - } else if nok, err := k.Parameters[i].FromGnosticParametersItem(v); err != nil { - return false, err - } else if !nok { - ok = false - } - } - } - - if g.Responses != nil { - k.Responses = &Responses{} - if nok, err := k.Responses.FromGnostic(g.Responses); err != nil { - return false, err - } else if !nok { - ok = false - } - } - - return ok, nil -} - -// Responses - -func (k *Responses) FromGnostic(g *openapi_v2.Responses) (ok bool, err error) { - if g == nil { - return true, nil - } - ok = true - if err := k.VendorExtensible.FromGnostic(g.VendorExtension); err != nil { - return false, err - } - - if nok, err := k.ResponsesProps.FromGnostic(g); err != nil { - return false, err - } else if !nok { - ok = false - } - - return ok, nil -} - -func (k *ResponsesProps) FromGnostic(g *openapi_v2.Responses) (ok bool, err error) { - if g == nil { - return true, nil - } else if g.ResponseCode == nil { - return ok, nil - } - - ok = true - for _, v := range g.ResponseCode { - if v == nil { - continue - } - if v.Name == "default" { - k.Default = &Response{} - if nok, err := k.Default.FromGnosticResponseValue(v.Value); err != nil { - return false, err - } else if !nok { - ok = false - } - } else if nk, err := strconv.Atoi(v.Name); err != nil { - // This should actually never fail, unless gnostic struct was - // manually/purposefully tampered with at runtime. - // Gnostic's ParseDocument validates that all StatusCodeResponses - // keys adhere to the following regex ^([0-9]{3})$|^(default)$ - ok = false - } else { - if k.StatusCodeResponses == nil { - k.StatusCodeResponses = map[int]Response{} - } - - res := Response{} - if nok, err := res.FromGnosticResponseValue(v.Value); err != nil { - return false, err - } else if !nok { - ok = false - } - k.StatusCodeResponses[nk] = res - } - } - - return ok, nil -} - -func (k *Response) FromGnostic(g *openapi_v2.Response) (ok bool, err error) { - if g == nil { - return true, nil - } - ok = true - // Refable case handled in FromGnosticResponseValue - - if err := k.VendorExtensible.FromGnostic(g.VendorExtension); err != nil { - return false, err - } - - if nok, err := k.ResponseProps.FromGnostic(g); err != nil { - return false, err - } else if !nok { - ok = false - } - - return ok, nil -} - -func (k *Response) FromGnosticResponseValue(g *openapi_v2.ResponseValue) (ok bool, err error) { - ok = true - if ref := g.GetJsonReference(); ref != nil { - k.Description = ref.Description - - if err := k.Refable.FromGnostic(ref.XRef); err != nil { - return false, err - } - } else if nok, err := k.FromGnostic(g.GetResponse()); err != nil { - return false, err - } else if !nok { - ok = false - } - - return ok, nil -} - -func (k *ResponseProps) FromGnostic(g *openapi_v2.Response) (ok bool, err error) { - if g == nil { - return true, nil - } - ok = true - k.Description = g.Description - - if g.Schema != nil { - k.Schema = &Schema{} - if nok, err := k.Schema.FromGnosticSchemaItem(g.Schema); err != nil { - return false, err - } else if !nok { - ok = false - } - } - - if g.Headers != nil { - k.Headers = make(map[string]Header, len(g.Headers.AdditionalProperties)) - for _, v := range g.Headers.AdditionalProperties { - if v == nil { - continue - } - - converted := Header{} - if err := converted.FromGnostic(v.GetValue()); err != nil { - return false, err - } - - k.Headers[v.Name] = converted - } - } - - if g.Examples != nil { - k.Examples = make(map[string]interface{}, len(g.Examples.AdditionalProperties)) - for _, v := range g.Examples.AdditionalProperties { - if v == nil { - continue - } else if v.Value == nil { - k.Examples[v.Name] = nil - continue - } - - var iface interface{} - if err := v.Value.ToRawInfo().Decode(&iface); err != nil { - return false, err - } else { - k.Examples[v.Name] = iface - } - } - } - - return ok, nil -} - -// Header - -func (k *Header) FromGnostic(g *openapi_v2.Header) (err error) { - if g == nil { - return nil - } - - if err := k.CommonValidations.FromGnostic(g); err != nil { - return err - } - - if err := k.VendorExtensible.FromGnostic(g.VendorExtension); err != nil { - return err - } - - if err := k.SimpleSchema.FromGnostic(g); err != nil { - return err - } - - if err := k.HeaderProps.FromGnostic(g); err != nil { - return err - } - - return nil -} - -func (k *HeaderProps) FromGnostic(g *openapi_v2.Header) error { - if g == nil { - return nil - } - - // All other fields of openapi_v2.Header are handled by - // the embeded fields, commonvalidations, etc. - k.Description = g.Description - return nil -} - -// Parameters - -func (k *Parameter) FromGnostic(g *openapi_v2.Parameter) (ok bool, err error) { - if g == nil { - return true, nil - } - ok = true - switch p := g.Oneof.(type) { - case *openapi_v2.Parameter_BodyParameter: - if nok, err := k.ParamProps.FromGnostic(p.BodyParameter); err != nil { - return false, err - } else if !nok { - ok = false - } - - if err := k.VendorExtensible.FromGnostic(p.BodyParameter.GetVendorExtension()); err != nil { - return false, err - } - - return ok, nil - case *openapi_v2.Parameter_NonBodyParameter: - switch nb := g.GetNonBodyParameter().Oneof.(type) { - case *openapi_v2.NonBodyParameter_HeaderParameterSubSchema: - if nok, err := k.ParamProps.FromGnostic(nb.HeaderParameterSubSchema); err != nil { - return false, err - } else if !nok { - ok = false - } - - if err := k.SimpleSchema.FromGnostic(nb.HeaderParameterSubSchema); err != nil { - return false, err - } - - if err := k.CommonValidations.FromGnostic(nb.HeaderParameterSubSchema); err != nil { - return false, err - } - - if err := k.VendorExtensible.FromGnostic(nb.HeaderParameterSubSchema.GetVendorExtension()); err != nil { - return false, err - } - - return ok, nil - case *openapi_v2.NonBodyParameter_FormDataParameterSubSchema: - if nok, err := k.ParamProps.FromGnostic(nb.FormDataParameterSubSchema); err != nil { - return false, err - } else if !nok { - ok = false - } - - if err := k.SimpleSchema.FromGnostic(nb.FormDataParameterSubSchema); err != nil { - return false, err - } - - if err := k.CommonValidations.FromGnostic(nb.FormDataParameterSubSchema); err != nil { - return false, err - } - - if err := k.VendorExtensible.FromGnostic(nb.FormDataParameterSubSchema.GetVendorExtension()); err != nil { - return false, err - } - - return ok, nil - case *openapi_v2.NonBodyParameter_QueryParameterSubSchema: - if nok, err := k.ParamProps.FromGnostic(nb.QueryParameterSubSchema); err != nil { - return false, err - } else if !nok { - ok = false - } - - if err := k.SimpleSchema.FromGnostic(nb.QueryParameterSubSchema); err != nil { - return false, err - } - - if err := k.CommonValidations.FromGnostic(nb.QueryParameterSubSchema); err != nil { - return false, err - } - - if err := k.VendorExtensible.FromGnostic(nb.QueryParameterSubSchema.GetVendorExtension()); err != nil { - return false, err - } - - return ok, nil - case *openapi_v2.NonBodyParameter_PathParameterSubSchema: - if nok, err := k.ParamProps.FromGnostic(nb.PathParameterSubSchema); err != nil { - return false, err - } else if !nok { - ok = false - } - - if err := k.SimpleSchema.FromGnostic(nb.PathParameterSubSchema); err != nil { - return false, err - } - - if err := k.CommonValidations.FromGnostic(nb.PathParameterSubSchema); err != nil { - return false, err - } - - if err := k.VendorExtensible.FromGnostic(nb.PathParameterSubSchema.GetVendorExtension()); err != nil { - return false, err - } - - return ok, nil - default: - return false, errors.New("unrecognized nonbody type for Parameter") - } - default: - return false, errors.New("unrecognized type for Parameter") - } -} - -type GnosticCommonParamProps interface { - GetName() string - GetRequired() bool - GetIn() string - GetDescription() string -} - -type GnosticCommonParamPropsBodyParameter interface { - GetSchema() *openapi_v2.Schema -} - -type GnosticCommonParamPropsFormData interface { - GetAllowEmptyValue() bool -} - -func (k *ParamProps) FromGnostic(g GnosticCommonParamProps) (ok bool, err error) { - ok = true - k.Description = g.GetDescription() - k.In = g.GetIn() - k.Name = g.GetName() - k.Required = g.GetRequired() - - if formDataParameter, success := g.(GnosticCommonParamPropsFormData); success { - k.AllowEmptyValue = formDataParameter.GetAllowEmptyValue() - } - - if bodyParameter, success := g.(GnosticCommonParamPropsBodyParameter); success { - if bodyParameter.GetSchema() != nil { - k.Schema = &Schema{} - if nok, err := k.Schema.FromGnostic(bodyParameter.GetSchema()); err != nil { - return false, err - } else if !nok { - ok = false - } - } - } - - return ok, nil -} - -// PB types use a different structure than we do for "refable". For PB, there is -// a wrappign oneof type that could be a ref or the type -func (k *Parameter) FromGnosticParametersItem(g *openapi_v2.ParametersItem) (ok bool, err error) { - if g == nil { - return true, nil - } - - ok = true - if ref := g.GetJsonReference(); ref != nil { - k.Description = ref.Description - - if err := k.Refable.FromGnostic(ref.XRef); err != nil { - return false, err - } - } else if nok, err := k.FromGnostic(g.GetParameter()); err != nil { - return false, err - } else if !nok { - ok = false - } - - return ok, nil -} - -// Schema - -func (k *Schema) FromGnostic(g *openapi_v2.Schema) (ok bool, err error) { - if g == nil { - return true, nil - } - ok = true - - if err := k.VendorExtensible.FromGnostic(g.VendorExtension); err != nil { - return false, err - } - - // SwaggerSchemaProps - k.Discriminator = g.Discriminator - k.ReadOnly = g.ReadOnly - k.Description = g.Description - if g.ExternalDocs != nil { - k.ExternalDocs = &ExternalDocumentation{} - if nok, err := k.ExternalDocs.FromGnostic(g.ExternalDocs); err != nil { - return false, err - } else if !nok { - ok = false - } - } - - if g.Example != nil { - if err := g.Example.ToRawInfo().Decode(&k.Example); err != nil { - return false, err - } - } - - // SchemaProps - if err := k.Ref.FromGnostic(g.XRef); err != nil { - return false, err - } - k.Type = g.Type.GetValue() - k.Format = g.GetFormat() - k.Title = g.GetTitle() - - // These below fields are not available in gnostic types, so will never - // be populated. This means roundtrips which make use of these - // (non-official, kube-only) fields will lose information. - // - // Schema.ID is not available in official spec - // Schema.$schema - // Schema.Nullable - in openapiv3, not v2 - // Schema.AnyOf - in openapiv3, not v2 - // Schema.OneOf - in openapiv3, not v2 - // Schema.Not - in openapiv3, not v2 - // Schema.PatternProperties - in openapiv3, not v2 - // Schema.Dependencies - in openapiv3, not v2 - // Schema.AdditionalItems - // Schema.Definitions - not part of spec - // Schema.ExtraProps - gnostic parser rejects any keys it does not recognize - - if g.GetDefault() != nil { - if err := g.GetDefault().ToRawInfo().Decode(&k.Default); err != nil { - return false, err - } - } - - // These conditionals (!= 0) follow gnostic's logic for ToRawInfo - // The keys in gnostic source are only included if nonzero. - - if g.Maximum != 0.0 { - k.Maximum = &g.Maximum - } - - if g.Minimum != 0.0 { - k.Minimum = &g.Minimum - } - - k.ExclusiveMaximum = g.ExclusiveMaximum - k.ExclusiveMinimum = g.ExclusiveMinimum - - if g.MaxLength != 0 { - k.MaxLength = &g.MaxLength - } - - if g.MinLength != 0 { - k.MinLength = &g.MinLength - } - - k.Pattern = g.GetPattern() - - if g.MaxItems != 0 { - k.MaxItems = &g.MaxItems - } - - if g.MinItems != 0 { - k.MinItems = &g.MinItems - } - k.UniqueItems = g.UniqueItems - - if g.MultipleOf != 0 { - k.MultipleOf = &g.MultipleOf - } - - for _, v := range g.GetEnum() { - if v == nil { - continue - } - - var convert interface{} - if err := v.ToRawInfo().Decode(&convert); err != nil { - return false, err - } - k.Enum = append(k.Enum, convert) - } - - if g.MaxProperties != 0 { - k.MaxProperties = &g.MaxProperties - } - - if g.MinProperties != 0 { - k.MinProperties = &g.MinProperties - } - - k.Required = g.Required - - if g.GetItems() != nil { - k.Items = &SchemaOrArray{} - for _, v := range g.Items.GetSchema() { - if v == nil { - continue - } - - schema := Schema{} - if nok, err := schema.FromGnostic(v); err != nil { - return false, err - } else if !nok { - ok = false - } - k.Items.Schemas = append(k.Items.Schemas, schema) - } - - if len(k.Items.Schemas) == 1 { - k.Items.Schema = &k.Items.Schemas[0] - k.Items.Schemas = nil - } - } - - for i, v := range g.GetAllOf() { - if v == nil { - continue - } - - k.AllOf = append(k.AllOf, Schema{}) - if nok, err := k.AllOf[i].FromGnostic(v); err != nil { - return false, err - } else if !nok { - ok = false - } - } - - if g.Properties != nil { - k.Properties = make(map[string]Schema) - for _, namedSchema := range g.Properties.AdditionalProperties { - if namedSchema == nil { - continue - } - val := &Schema{} - if nok, err := val.FromGnostic(namedSchema.Value); err != nil { - return false, err - } else if !nok { - ok = false - } - - k.Properties[namedSchema.Name] = *val - } - } - - if g.AdditionalProperties != nil { - k.AdditionalProperties = &SchemaOrBool{} - if g.AdditionalProperties.GetSchema() == nil { - k.AdditionalProperties.Allows = g.AdditionalProperties.GetBoolean() - } else { - k.AdditionalProperties.Schema = &Schema{} - if nok, err := k.AdditionalProperties.Schema.FromGnostic(g.AdditionalProperties.GetSchema()); err != nil { - return false, err - } else if !nok { - ok = false - } - } - } - - return ok, nil -} - -func (k *Schema) FromGnosticSchemaItem(g *openapi_v2.SchemaItem) (ok bool, err error) { - if g == nil { - return true, nil - } - ok = true - - switch p := g.Oneof.(type) { - case *openapi_v2.SchemaItem_FileSchema: - fileSchema := p.FileSchema - - if err := k.VendorExtensible.FromGnostic(fileSchema.VendorExtension); err != nil { - return false, err - } - - k.Format = fileSchema.Format - k.Title = fileSchema.Title - k.Description = fileSchema.Description - k.Required = fileSchema.Required - k.Type = []string{fileSchema.Type} - k.ReadOnly = fileSchema.ReadOnly - - if fileSchema.ExternalDocs != nil { - k.ExternalDocs = &ExternalDocumentation{} - if nok, err := k.ExternalDocs.FromGnostic(fileSchema.ExternalDocs); err != nil { - return false, err - } else if !nok { - ok = false - } - } - - if fileSchema.Example != nil { - if err := fileSchema.Example.ToRawInfo().Decode(&k.Example); err != nil { - return false, err - } - } - - if fileSchema.Default != nil { - if err := fileSchema.Default.ToRawInfo().Decode(&k.Default); err != nil { - return false, err - } - } - - case *openapi_v2.SchemaItem_Schema: - schema := p.Schema - - if nok, err := k.FromGnostic(schema); err != nil { - return false, err - } else if !nok { - ok = false - } - default: - return false, errors.New("unrecognized type for SchemaItem") - } - - return ok, nil -} - -// SecurityDefinitions - -func (k SecurityDefinitions) FromGnostic(g *openapi_v2.SecurityDefinitions) error { - for _, v := range g.GetAdditionalProperties() { - if v == nil { - continue - } - secScheme := &SecurityScheme{} - if err := secScheme.FromGnostic(v.Value); err != nil { - return err - } - k[v.Name] = secScheme - } - - return nil -} - -type GnosticCommonSecurityDefinition interface { - GetType() string - GetDescription() string -} - -func (k *SecuritySchemeProps) FromGnostic(g GnosticCommonSecurityDefinition) error { - k.Type = g.GetType() - k.Description = g.GetDescription() - - if hasName, success := g.(interface{ GetName() string }); success { - k.Name = hasName.GetName() - } - - if hasIn, success := g.(interface{ GetIn() string }); success { - k.In = hasIn.GetIn() - } - - if hasFlow, success := g.(interface{ GetFlow() string }); success { - k.Flow = hasFlow.GetFlow() - } - - if hasAuthURL, success := g.(interface{ GetAuthorizationUrl() string }); success { - k.AuthorizationURL = hasAuthURL.GetAuthorizationUrl() - } - - if hasTokenURL, success := g.(interface{ GetTokenUrl() string }); success { - k.TokenURL = hasTokenURL.GetTokenUrl() - } - - if hasScopes, success := g.(interface { - GetScopes() *openapi_v2.Oauth2Scopes - }); success { - scopes := hasScopes.GetScopes() - if scopes != nil { - k.Scopes = make(map[string]string, len(scopes.AdditionalProperties)) - for _, v := range scopes.AdditionalProperties { - if v == nil { - continue - } - - k.Scopes[v.Name] = v.Value - } - } - } - - return nil -} - -func (k *SecurityScheme) FromGnostic(g *openapi_v2.SecurityDefinitionsItem) error { - if g == nil { - return nil - } - - switch s := g.Oneof.(type) { - case *openapi_v2.SecurityDefinitionsItem_ApiKeySecurity: - if err := k.SecuritySchemeProps.FromGnostic(s.ApiKeySecurity); err != nil { - return err - } - if err := k.VendorExtensible.FromGnostic(s.ApiKeySecurity.VendorExtension); err != nil { - return err - } - return nil - case *openapi_v2.SecurityDefinitionsItem_BasicAuthenticationSecurity: - if err := k.SecuritySchemeProps.FromGnostic(s.BasicAuthenticationSecurity); err != nil { - return err - } - if err := k.VendorExtensible.FromGnostic(s.BasicAuthenticationSecurity.VendorExtension); err != nil { - return err - } - return nil - case *openapi_v2.SecurityDefinitionsItem_Oauth2AccessCodeSecurity: - if err := k.SecuritySchemeProps.FromGnostic(s.Oauth2AccessCodeSecurity); err != nil { - return err - } - if err := k.VendorExtensible.FromGnostic(s.Oauth2AccessCodeSecurity.VendorExtension); err != nil { - return err - } - return nil - case *openapi_v2.SecurityDefinitionsItem_Oauth2ApplicationSecurity: - if err := k.SecuritySchemeProps.FromGnostic(s.Oauth2ApplicationSecurity); err != nil { - return err - } - if err := k.VendorExtensible.FromGnostic(s.Oauth2ApplicationSecurity.VendorExtension); err != nil { - return err - } - return nil - case *openapi_v2.SecurityDefinitionsItem_Oauth2ImplicitSecurity: - if err := k.SecuritySchemeProps.FromGnostic(s.Oauth2ImplicitSecurity); err != nil { - return err - } - if err := k.VendorExtensible.FromGnostic(s.Oauth2ImplicitSecurity.VendorExtension); err != nil { - return err - } - return nil - case *openapi_v2.SecurityDefinitionsItem_Oauth2PasswordSecurity: - if err := k.SecuritySchemeProps.FromGnostic(s.Oauth2PasswordSecurity); err != nil { - return err - } - if err := k.VendorExtensible.FromGnostic(s.Oauth2PasswordSecurity.VendorExtension); err != nil { - return err - } - return nil - default: - return errors.New("unrecognized SecurityDefinitionsItem") - } -} - -// Tag - -func (k *Tag) FromGnostic(g *openapi_v2.Tag) (ok bool, err error) { - if g == nil { - return true, nil - } - - ok = true - - if nok, err := k.TagProps.FromGnostic(g); err != nil { - return false, err - } else if !nok { - ok = false - } - - if err := k.VendorExtensible.FromGnostic(g.VendorExtension); err != nil { - return false, err - } - return ok, nil -} - -func (k *TagProps) FromGnostic(g *openapi_v2.Tag) (ok bool, err error) { - if g == nil { - return true, nil - } - ok = true - k.Description = g.Description - k.Name = g.Name - - if g.ExternalDocs != nil { - k.ExternalDocs = &ExternalDocumentation{} - if nok, err := k.ExternalDocs.FromGnostic(g.ExternalDocs); err != nil { - return false, err - } else if !nok { - ok = false - } - } - - return ok, nil -} - -// ExternalDocumentation - -func (k *ExternalDocumentation) FromGnostic(g *openapi_v2.ExternalDocs) (ok bool, err error) { - if g == nil { - return true, nil - } - ok = true - k.Description = g.Description - k.URL = g.Url - - // data loss! g.VendorExtension - if len(g.VendorExtension) != 0 { - ok = false - } - - return ok, nil -} diff --git a/src/vendor/k8s.io/kube-openapi/pkg/validation/spec/header.go b/src/vendor/k8s.io/kube-openapi/pkg/validation/spec/header.go deleted file mode 100644 index 597fc9631..000000000 --- a/src/vendor/k8s.io/kube-openapi/pkg/validation/spec/header.go +++ /dev/null @@ -1,75 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package spec - -import ( - "encoding/json" - - "github.com/go-openapi/swag" -) - -const ( - jsonArray = "array" -) - -// HeaderProps describes a response header -type HeaderProps struct { - Description string `json:"description,omitempty"` -} - -// Header describes a header for a response of the API -// -// For more information: http://goo.gl/8us55a#headerObject -type Header struct { - CommonValidations - SimpleSchema - VendorExtensible - HeaderProps -} - -// MarshalJSON marshal this to JSON -func (h Header) MarshalJSON() ([]byte, error) { - b1, err := json.Marshal(h.CommonValidations) - if err != nil { - return nil, err - } - b2, err := json.Marshal(h.SimpleSchema) - if err != nil { - return nil, err - } - b3, err := json.Marshal(h.HeaderProps) - if err != nil { - return nil, err - } - b4, err := json.Marshal(h.VendorExtensible) - if err != nil { - return nil, err - } - return swag.ConcatJSON(b1, b2, b3, b4), nil -} - -// UnmarshalJSON unmarshals this header from JSON -func (h *Header) UnmarshalJSON(data []byte) error { - if err := json.Unmarshal(data, &h.CommonValidations); err != nil { - return err - } - if err := json.Unmarshal(data, &h.SimpleSchema); err != nil { - return err - } - if err := json.Unmarshal(data, &h.VendorExtensible); err != nil { - return err - } - return json.Unmarshal(data, &h.HeaderProps) -} diff --git a/src/vendor/k8s.io/kube-openapi/pkg/validation/spec/info.go b/src/vendor/k8s.io/kube-openapi/pkg/validation/spec/info.go deleted file mode 100644 index 51a2f5781..000000000 --- a/src/vendor/k8s.io/kube-openapi/pkg/validation/spec/info.go +++ /dev/null @@ -1,174 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package spec - -import ( - "encoding/json" - "strings" - - "github.com/go-openapi/swag" -) - -// Extensions vendor specific extensions -type Extensions map[string]interface{} - -// Add adds a value to these extensions -func (e Extensions) Add(key string, value interface{}) { - realKey := strings.ToLower(key) - e[realKey] = value -} - -// GetString gets a string value from the extensions -func (e Extensions) GetString(key string) (string, bool) { - if v, ok := e[strings.ToLower(key)]; ok { - str, ok := v.(string) - return str, ok - } - return "", false -} - -// GetBool gets a string value from the extensions -func (e Extensions) GetBool(key string) (bool, bool) { - if v, ok := e[strings.ToLower(key)]; ok { - str, ok := v.(bool) - return str, ok - } - return false, false -} - -// GetStringSlice gets a string value from the extensions -func (e Extensions) GetStringSlice(key string) ([]string, bool) { - if v, ok := e[strings.ToLower(key)]; ok { - arr, isSlice := v.([]interface{}) - if !isSlice { - return nil, false - } - var strs []string - for _, iface := range arr { - str, isString := iface.(string) - if !isString { - return nil, false - } - strs = append(strs, str) - } - return strs, ok - } - return nil, false -} - -// GetObject gets the object value from the extensions. -// out must be a json serializable type; the json go struct -// tags of out are used to populate it. -func (e Extensions) GetObject(key string, out interface{}) error { - // This json serialization/deserialization could be replaced with - // an approach using reflection if the optimization becomes justified. - if v, ok := e[strings.ToLower(key)]; ok { - b, err := json.Marshal(v) - if err != nil { - return err - } - err = json.Unmarshal(b, out) - if err != nil { - return err - } - } - return nil -} - -// VendorExtensible composition block. -type VendorExtensible struct { - Extensions Extensions -} - -// AddExtension adds an extension to this extensible object -func (v *VendorExtensible) AddExtension(key string, value interface{}) { - if value == nil { - return - } - if v.Extensions == nil { - v.Extensions = make(map[string]interface{}) - } - v.Extensions.Add(key, value) -} - -// MarshalJSON marshals the extensions to json -func (v VendorExtensible) MarshalJSON() ([]byte, error) { - toser := make(map[string]interface{}) - for k, v := range v.Extensions { - lk := strings.ToLower(k) - if strings.HasPrefix(lk, "x-") { - toser[k] = v - } - } - return json.Marshal(toser) -} - -// UnmarshalJSON for this extensible object -func (v *VendorExtensible) UnmarshalJSON(data []byte) error { - var d map[string]interface{} - if err := json.Unmarshal(data, &d); err != nil { - return err - } - for k, vv := range d { - lk := strings.ToLower(k) - if strings.HasPrefix(lk, "x-") { - if v.Extensions == nil { - v.Extensions = map[string]interface{}{} - } - v.Extensions[k] = vv - } - } - return nil -} - -// InfoProps the properties for an info definition -type InfoProps struct { - Description string `json:"description,omitempty"` - Title string `json:"title,omitempty"` - TermsOfService string `json:"termsOfService,omitempty"` - Contact *ContactInfo `json:"contact,omitempty"` - License *License `json:"license,omitempty"` - Version string `json:"version,omitempty"` -} - -// Info object provides metadata about the API. -// The metadata can be used by the clients if needed, and can be presented in the Swagger-UI for convenience. -// -// For more information: http://goo.gl/8us55a#infoObject -type Info struct { - VendorExtensible - InfoProps -} - -// MarshalJSON marshal this to JSON -func (i Info) MarshalJSON() ([]byte, error) { - b1, err := json.Marshal(i.InfoProps) - if err != nil { - return nil, err - } - b2, err := json.Marshal(i.VendorExtensible) - if err != nil { - return nil, err - } - return swag.ConcatJSON(b1, b2), nil -} - -// UnmarshalJSON marshal this from JSON -func (i *Info) UnmarshalJSON(data []byte) error { - if err := json.Unmarshal(data, &i.InfoProps); err != nil { - return err - } - return json.Unmarshal(data, &i.VendorExtensible) -} diff --git a/src/vendor/k8s.io/kube-openapi/pkg/validation/spec/items.go b/src/vendor/k8s.io/kube-openapi/pkg/validation/spec/items.go deleted file mode 100644 index b75aefe16..000000000 --- a/src/vendor/k8s.io/kube-openapi/pkg/validation/spec/items.go +++ /dev/null @@ -1,109 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package spec - -import ( - "encoding/json" - - "github.com/go-openapi/swag" -) - -const ( - jsonRef = "$ref" -) - -// SimpleSchema describe swagger simple schemas for parameters and headers -type SimpleSchema struct { - Type string `json:"type,omitempty"` - Nullable bool `json:"nullable,omitempty"` - Format string `json:"format,omitempty"` - Items *Items `json:"items,omitempty"` - CollectionFormat string `json:"collectionFormat,omitempty"` - Default interface{} `json:"default,omitempty"` - Example interface{} `json:"example,omitempty"` -} - -// CommonValidations describe common JSON-schema validations -type CommonValidations struct { - Maximum *float64 `json:"maximum,omitempty"` - ExclusiveMaximum bool `json:"exclusiveMaximum,omitempty"` - Minimum *float64 `json:"minimum,omitempty"` - ExclusiveMinimum bool `json:"exclusiveMinimum,omitempty"` - MaxLength *int64 `json:"maxLength,omitempty"` - MinLength *int64 `json:"minLength,omitempty"` - Pattern string `json:"pattern,omitempty"` - MaxItems *int64 `json:"maxItems,omitempty"` - MinItems *int64 `json:"minItems,omitempty"` - UniqueItems bool `json:"uniqueItems,omitempty"` - MultipleOf *float64 `json:"multipleOf,omitempty"` - Enum []interface{} `json:"enum,omitempty"` -} - -// Items a limited subset of JSON-Schema's items object. -// It is used by parameter definitions that are not located in "body". -// -// For more information: http://goo.gl/8us55a#items-object -type Items struct { - Refable - CommonValidations - SimpleSchema - VendorExtensible -} - -// UnmarshalJSON hydrates this items instance with the data from JSON -func (i *Items) UnmarshalJSON(data []byte) error { - var validations CommonValidations - if err := json.Unmarshal(data, &validations); err != nil { - return err - } - var ref Refable - if err := json.Unmarshal(data, &ref); err != nil { - return err - } - var simpleSchema SimpleSchema - if err := json.Unmarshal(data, &simpleSchema); err != nil { - return err - } - var vendorExtensible VendorExtensible - if err := json.Unmarshal(data, &vendorExtensible); err != nil { - return err - } - i.Refable = ref - i.CommonValidations = validations - i.SimpleSchema = simpleSchema - i.VendorExtensible = vendorExtensible - return nil -} - -// MarshalJSON converts this items object to JSON -func (i Items) MarshalJSON() ([]byte, error) { - b1, err := json.Marshal(i.CommonValidations) - if err != nil { - return nil, err - } - b2, err := json.Marshal(i.SimpleSchema) - if err != nil { - return nil, err - } - b3, err := json.Marshal(i.Refable) - if err != nil { - return nil, err - } - b4, err := json.Marshal(i.VendorExtensible) - if err != nil { - return nil, err - } - return swag.ConcatJSON(b4, b3, b1, b2), nil -} diff --git a/src/vendor/k8s.io/kube-openapi/pkg/validation/spec/license.go b/src/vendor/k8s.io/kube-openapi/pkg/validation/spec/license.go deleted file mode 100644 index f20961b4f..000000000 --- a/src/vendor/k8s.io/kube-openapi/pkg/validation/spec/license.go +++ /dev/null @@ -1,23 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package spec - -// License information for the exposed API. -// -// For more information: http://goo.gl/8us55a#licenseObject -type License struct { - Name string `json:"name,omitempty"` - URL string `json:"url,omitempty"` -} diff --git a/src/vendor/k8s.io/kube-openapi/pkg/validation/spec/operation.go b/src/vendor/k8s.io/kube-openapi/pkg/validation/spec/operation.go deleted file mode 100644 index c7acd8672..000000000 --- a/src/vendor/k8s.io/kube-openapi/pkg/validation/spec/operation.go +++ /dev/null @@ -1,96 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package spec - -import ( - "encoding/json" - - "github.com/go-openapi/swag" -) - -// OperationProps describes an operation -// -// NOTES: -// - schemes, when present must be from [http, https, ws, wss]: see validate -// - Security is handled as a special case: see MarshalJSON function -type OperationProps struct { - Description string `json:"description,omitempty"` - Consumes []string `json:"consumes,omitempty"` - Produces []string `json:"produces,omitempty"` - Schemes []string `json:"schemes,omitempty"` - Tags []string `json:"tags,omitempty"` - Summary string `json:"summary,omitempty"` - ExternalDocs *ExternalDocumentation `json:"externalDocs,omitempty"` - ID string `json:"operationId,omitempty"` - Deprecated bool `json:"deprecated,omitempty"` - Security []map[string][]string `json:"security,omitempty"` - Parameters []Parameter `json:"parameters,omitempty"` - Responses *Responses `json:"responses,omitempty"` -} - -// MarshalJSON takes care of serializing operation properties to JSON -// -// We use a custom marhaller here to handle a special cases related to -// the Security field. We need to preserve zero length slice -// while omitting the field when the value is nil/unset. -func (op OperationProps) MarshalJSON() ([]byte, error) { - type Alias OperationProps - if op.Security == nil { - return json.Marshal(&struct { - Security []map[string][]string `json:"security,omitempty"` - *Alias - }{ - Security: op.Security, - Alias: (*Alias)(&op), - }) - } - return json.Marshal(&struct { - Security []map[string][]string `json:"security"` - *Alias - }{ - Security: op.Security, - Alias: (*Alias)(&op), - }) -} - -// Operation describes a single API operation on a path. -// -// For more information: http://goo.gl/8us55a#operationObject -type Operation struct { - VendorExtensible - OperationProps -} - -// UnmarshalJSON hydrates this items instance with the data from JSON -func (o *Operation) UnmarshalJSON(data []byte) error { - if err := json.Unmarshal(data, &o.OperationProps); err != nil { - return err - } - return json.Unmarshal(data, &o.VendorExtensible) -} - -// MarshalJSON converts this items object to JSON -func (o Operation) MarshalJSON() ([]byte, error) { - b1, err := json.Marshal(o.OperationProps) - if err != nil { - return nil, err - } - b2, err := json.Marshal(o.VendorExtensible) - if err != nil { - return nil, err - } - concated := swag.ConcatJSON(b1, b2) - return concated, nil -} diff --git a/src/vendor/k8s.io/kube-openapi/pkg/validation/spec/parameter.go b/src/vendor/k8s.io/kube-openapi/pkg/validation/spec/parameter.go deleted file mode 100644 index 218513974..000000000 --- a/src/vendor/k8s.io/kube-openapi/pkg/validation/spec/parameter.go +++ /dev/null @@ -1,111 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package spec - -import ( - "encoding/json" - - "github.com/go-openapi/swag" -) - -// ParamProps describes the specific attributes of an operation parameter -// -// NOTE: -// - Schema is defined when "in" == "body": see validate -// - AllowEmptyValue is allowed where "in" == "query" || "formData" -type ParamProps struct { - Description string `json:"description,omitempty"` - Name string `json:"name,omitempty"` - In string `json:"in,omitempty"` - Required bool `json:"required,omitempty"` - Schema *Schema `json:"schema,omitempty"` - AllowEmptyValue bool `json:"allowEmptyValue,omitempty"` -} - -// Parameter a unique parameter is defined by a combination of a [name](#parameterName) and [location](#parameterIn). -// -// There are five possible parameter types. -// * Path - Used together with [Path Templating](#pathTemplating), where the parameter value is actually part -// of the operation's URL. This does not include the host or base path of the API. For example, in `/items/{itemId}`, -// the path parameter is `itemId`. -// * Query - Parameters that are appended to the URL. For example, in `/items?id=###`, the query parameter is `id`. -// * Header - Custom headers that are expected as part of the request. -// * Body - The payload that's appended to the HTTP request. Since there can only be one payload, there can only be -// _one_ body parameter. The name of the body parameter has no effect on the parameter itself and is used for -// documentation purposes only. Since Form parameters are also in the payload, body and form parameters cannot exist -// together for the same operation. -// * Form - Used to describe the payload of an HTTP request when either `application/x-www-form-urlencoded` or -// `multipart/form-data` are used as the content type of the request (in Swagger's definition, -// the [`consumes`](#operationConsumes) property of an operation). This is the only parameter type that can be used -// to send files, thus supporting the `file` type. Since form parameters are sent in the payload, they cannot be -// declared together with a body parameter for the same operation. Form parameters have a different format based on -// the content-type used (for further details, consult http://www.w3.org/TR/html401/interact/forms.html#h-17.13.4). -// * `application/x-www-form-urlencoded` - Similar to the format of Query parameters but as a payload. -// For example, `foo=1&bar=swagger` - both `foo` and `bar` are form parameters. This is normally used for simple -// parameters that are being transferred. -// * `multipart/form-data` - each parameter takes a section in the payload with an internal header. -// For example, for the header `Content-Disposition: form-data; name="submit-name"` the name of the parameter is -// `submit-name`. This type of form parameters is more commonly used for file transfers. -// -// For more information: http://goo.gl/8us55a#parameterObject -type Parameter struct { - Refable - CommonValidations - SimpleSchema - VendorExtensible - ParamProps -} - -// UnmarshalJSON hydrates this items instance with the data from JSON -func (p *Parameter) UnmarshalJSON(data []byte) error { - if err := json.Unmarshal(data, &p.CommonValidations); err != nil { - return err - } - if err := json.Unmarshal(data, &p.Refable); err != nil { - return err - } - if err := json.Unmarshal(data, &p.SimpleSchema); err != nil { - return err - } - if err := json.Unmarshal(data, &p.VendorExtensible); err != nil { - return err - } - return json.Unmarshal(data, &p.ParamProps) -} - -// MarshalJSON converts this items object to JSON -func (p Parameter) MarshalJSON() ([]byte, error) { - b1, err := json.Marshal(p.CommonValidations) - if err != nil { - return nil, err - } - b2, err := json.Marshal(p.SimpleSchema) - if err != nil { - return nil, err - } - b3, err := json.Marshal(p.Refable) - if err != nil { - return nil, err - } - b4, err := json.Marshal(p.VendorExtensible) - if err != nil { - return nil, err - } - b5, err := json.Marshal(p.ParamProps) - if err != nil { - return nil, err - } - return swag.ConcatJSON(b3, b1, b2, b4, b5), nil -} diff --git a/src/vendor/k8s.io/kube-openapi/pkg/validation/spec/path_item.go b/src/vendor/k8s.io/kube-openapi/pkg/validation/spec/path_item.go deleted file mode 100644 index 04de58f00..000000000 --- a/src/vendor/k8s.io/kube-openapi/pkg/validation/spec/path_item.go +++ /dev/null @@ -1,74 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package spec - -import ( - "encoding/json" - - "github.com/go-openapi/swag" -) - -// PathItemProps the path item specific properties -type PathItemProps struct { - Get *Operation `json:"get,omitempty"` - Put *Operation `json:"put,omitempty"` - Post *Operation `json:"post,omitempty"` - Delete *Operation `json:"delete,omitempty"` - Options *Operation `json:"options,omitempty"` - Head *Operation `json:"head,omitempty"` - Patch *Operation `json:"patch,omitempty"` - Parameters []Parameter `json:"parameters,omitempty"` -} - -// PathItem describes the operations available on a single path. -// A Path Item may be empty, due to [ACL constraints](http://goo.gl/8us55a#securityFiltering). -// The path itself is still exposed to the documentation viewer but they will -// not know which operations and parameters are available. -// -// For more information: http://goo.gl/8us55a#pathItemObject -type PathItem struct { - Refable - VendorExtensible - PathItemProps -} - -// UnmarshalJSON hydrates this items instance with the data from JSON -func (p *PathItem) UnmarshalJSON(data []byte) error { - if err := json.Unmarshal(data, &p.Refable); err != nil { - return err - } - if err := json.Unmarshal(data, &p.VendorExtensible); err != nil { - return err - } - return json.Unmarshal(data, &p.PathItemProps) -} - -// MarshalJSON converts this items object to JSON -func (p PathItem) MarshalJSON() ([]byte, error) { - b3, err := json.Marshal(p.Refable) - if err != nil { - return nil, err - } - b4, err := json.Marshal(p.VendorExtensible) - if err != nil { - return nil, err - } - b5, err := json.Marshal(p.PathItemProps) - if err != nil { - return nil, err - } - concated := swag.ConcatJSON(b3, b4, b5) - return concated, nil -} diff --git a/src/vendor/k8s.io/kube-openapi/pkg/validation/spec/paths.go b/src/vendor/k8s.io/kube-openapi/pkg/validation/spec/paths.go deleted file mode 100644 index 319aba879..000000000 --- a/src/vendor/k8s.io/kube-openapi/pkg/validation/spec/paths.go +++ /dev/null @@ -1,85 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package spec - -import ( - "encoding/json" - "strings" - - "github.com/go-openapi/swag" -) - -// Paths holds the relative paths to the individual endpoints. -// The path is appended to the [`basePath`](http://goo.gl/8us55a#swaggerBasePath) in order -// to construct the full URL. -// The Paths may be empty, due to [ACL constraints](http://goo.gl/8us55a#securityFiltering). -// -// For more information: http://goo.gl/8us55a#pathsObject -type Paths struct { - VendorExtensible - Paths map[string]PathItem `json:"-"` // custom serializer to flatten this, each entry must start with "/" -} - -// UnmarshalJSON hydrates this items instance with the data from JSON -func (p *Paths) UnmarshalJSON(data []byte) error { - var res map[string]json.RawMessage - if err := json.Unmarshal(data, &res); err != nil { - return err - } - for k, v := range res { - if strings.HasPrefix(strings.ToLower(k), "x-") { - if p.Extensions == nil { - p.Extensions = make(map[string]interface{}) - } - var d interface{} - if err := json.Unmarshal(v, &d); err != nil { - return err - } - p.Extensions[k] = d - } - if strings.HasPrefix(k, "/") { - if p.Paths == nil { - p.Paths = make(map[string]PathItem) - } - var pi PathItem - if err := json.Unmarshal(v, &pi); err != nil { - return err - } - p.Paths[k] = pi - } - } - return nil -} - -// MarshalJSON converts this items object to JSON -func (p Paths) MarshalJSON() ([]byte, error) { - b1, err := json.Marshal(p.VendorExtensible) - if err != nil { - return nil, err - } - - pths := make(map[string]PathItem) - for k, v := range p.Paths { - if strings.HasPrefix(k, "/") { - pths[k] = v - } - } - b2, err := json.Marshal(pths) - if err != nil { - return nil, err - } - concated := swag.ConcatJSON(b1, b2) - return concated, nil -} diff --git a/src/vendor/k8s.io/kube-openapi/pkg/validation/spec/ref.go b/src/vendor/k8s.io/kube-openapi/pkg/validation/spec/ref.go deleted file mode 100644 index 1405bfd8e..000000000 --- a/src/vendor/k8s.io/kube-openapi/pkg/validation/spec/ref.go +++ /dev/null @@ -1,167 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package spec - -import ( - "encoding/json" - "net/http" - "os" - "path/filepath" - - "github.com/go-openapi/jsonreference" -) - -// Refable is a struct for things that accept a $ref property -type Refable struct { - Ref Ref -} - -// MarshalJSON marshals the ref to json -func (r Refable) MarshalJSON() ([]byte, error) { - return r.Ref.MarshalJSON() -} - -// UnmarshalJSON unmarshalss the ref from json -func (r *Refable) UnmarshalJSON(d []byte) error { - return json.Unmarshal(d, &r.Ref) -} - -// Ref represents a json reference that is potentially resolved -type Ref struct { - jsonreference.Ref -} - -// RemoteURI gets the remote uri part of the ref -func (r *Ref) RemoteURI() string { - if r.String() == "" { - return r.String() - } - - u := *r.GetURL() - u.Fragment = "" - return u.String() -} - -// IsValidURI returns true when the url the ref points to can be found -func (r *Ref) IsValidURI(basepaths ...string) bool { - if r.String() == "" { - return true - } - - v := r.RemoteURI() - if v == "" { - return true - } - - if r.HasFullURL { - rr, err := http.Get(v) - if err != nil { - return false - } - - return rr.StatusCode/100 == 2 - } - - if !(r.HasFileScheme || r.HasFullFilePath || r.HasURLPathOnly) { - return false - } - - // check for local file - pth := v - if r.HasURLPathOnly { - base := "." - if len(basepaths) > 0 { - base = filepath.Dir(filepath.Join(basepaths...)) - } - p, e := filepath.Abs(filepath.ToSlash(filepath.Join(base, pth))) - if e != nil { - return false - } - pth = p - } - - fi, err := os.Stat(filepath.ToSlash(pth)) - if err != nil { - return false - } - - return !fi.IsDir() -} - -// Inherits creates a new reference from a parent and a child -// If the child cannot inherit from the parent, an error is returned -func (r *Ref) Inherits(child Ref) (*Ref, error) { - ref, err := r.Ref.Inherits(child.Ref) - if err != nil { - return nil, err - } - return &Ref{Ref: *ref}, nil -} - -// NewRef creates a new instance of a ref object -// returns an error when the reference uri is an invalid uri -func NewRef(refURI string) (Ref, error) { - ref, err := jsonreference.New(refURI) - if err != nil { - return Ref{}, err - } - return Ref{Ref: ref}, nil -} - -// MustCreateRef creates a ref object but panics when refURI is invalid. -// Use the NewRef method for a version that returns an error. -func MustCreateRef(refURI string) Ref { - return Ref{Ref: jsonreference.MustCreateRef(refURI)} -} - -// MarshalJSON marshals this ref into a JSON object -func (r Ref) MarshalJSON() ([]byte, error) { - str := r.String() - if str == "" { - if r.IsRoot() { - return []byte(`{"$ref":""}`), nil - } - return []byte("{}"), nil - } - v := map[string]interface{}{"$ref": str} - return json.Marshal(v) -} - -// UnmarshalJSON unmarshals this ref from a JSON object -func (r *Ref) UnmarshalJSON(d []byte) error { - var v map[string]interface{} - if err := json.Unmarshal(d, &v); err != nil { - return err - } - return r.fromMap(v) -} - -func (r *Ref) fromMap(v map[string]interface{}) error { - if v == nil { - return nil - } - - if vv, ok := v["$ref"]; ok { - if str, ok := vv.(string); ok { - ref, err := jsonreference.New(str) - if err != nil { - return err - } - *r = Ref{Ref: ref} - } - } - - return nil -} diff --git a/src/vendor/k8s.io/kube-openapi/pkg/validation/spec/response.go b/src/vendor/k8s.io/kube-openapi/pkg/validation/spec/response.go deleted file mode 100644 index 9fd717ec3..000000000 --- a/src/vendor/k8s.io/kube-openapi/pkg/validation/spec/response.go +++ /dev/null @@ -1,78 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package spec - -import ( - "encoding/json" - - "github.com/go-openapi/swag" -) - -// ResponseProps properties specific to a response -type ResponseProps struct { - Description string `json:"description,omitempty"` - Schema *Schema `json:"schema,omitempty"` - Headers map[string]Header `json:"headers,omitempty"` - Examples map[string]interface{} `json:"examples,omitempty"` -} - -// Response describes a single response from an API Operation. -// -// For more information: http://goo.gl/8us55a#responseObject -type Response struct { - Refable - ResponseProps - VendorExtensible -} - -// UnmarshalJSON hydrates this items instance with the data from JSON -func (r *Response) UnmarshalJSON(data []byte) error { - if err := json.Unmarshal(data, &r.ResponseProps); err != nil { - return err - } - if err := json.Unmarshal(data, &r.Refable); err != nil { - return err - } - return json.Unmarshal(data, &r.VendorExtensible) -} - -// MarshalJSON converts this items object to JSON -func (r Response) MarshalJSON() ([]byte, error) { - b1, err := json.Marshal(r.ResponseProps) - if err != nil { - return nil, err - } - b2, err := json.Marshal(r.Refable) - if err != nil { - return nil, err - } - b3, err := json.Marshal(r.VendorExtensible) - if err != nil { - return nil, err - } - return swag.ConcatJSON(b1, b2, b3), nil -} - -// NewResponse creates a new response instance -func NewResponse() *Response { - return new(Response) -} - -// ResponseRef creates a response as a json reference -func ResponseRef(url string) *Response { - resp := NewResponse() - resp.Ref = MustCreateRef(url) - return resp -} diff --git a/src/vendor/k8s.io/kube-openapi/pkg/validation/spec/responses.go b/src/vendor/k8s.io/kube-openapi/pkg/validation/spec/responses.go deleted file mode 100644 index b2c3883a9..000000000 --- a/src/vendor/k8s.io/kube-openapi/pkg/validation/spec/responses.go +++ /dev/null @@ -1,110 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package spec - -import ( - "encoding/json" - "reflect" - "strconv" - - "github.com/go-openapi/swag" -) - -// Responses is a container for the expected responses of an operation. -// The container maps a HTTP response code to the expected response. -// It is not expected from the documentation to necessarily cover all possible HTTP response codes, -// since they may not be known in advance. However, it is expected from the documentation to cover -// a successful operation response and any known errors. -// -// The `default` can be used a default response object for all HTTP codes that are not covered -// individually by the specification. -// -// The `Responses Object` MUST contain at least one response code, and it SHOULD be the response -// for a successful operation call. -// -// For more information: http://goo.gl/8us55a#responsesObject -type Responses struct { - VendorExtensible - ResponsesProps -} - -// UnmarshalJSON hydrates this items instance with the data from JSON -func (r *Responses) UnmarshalJSON(data []byte) error { - if err := json.Unmarshal(data, &r.ResponsesProps); err != nil { - return err - } - if err := json.Unmarshal(data, &r.VendorExtensible); err != nil { - return err - } - if reflect.DeepEqual(ResponsesProps{}, r.ResponsesProps) { - r.ResponsesProps = ResponsesProps{} - } - return nil -} - -// MarshalJSON converts this items object to JSON -func (r Responses) MarshalJSON() ([]byte, error) { - b1, err := json.Marshal(r.ResponsesProps) - if err != nil { - return nil, err - } - b2, err := json.Marshal(r.VendorExtensible) - if err != nil { - return nil, err - } - concated := swag.ConcatJSON(b1, b2) - return concated, nil -} - -// ResponsesProps describes all responses for an operation. -// It tells what is the default response and maps all responses with a -// HTTP status code. -type ResponsesProps struct { - Default *Response - StatusCodeResponses map[int]Response -} - -// MarshalJSON marshals responses as JSON -func (r ResponsesProps) MarshalJSON() ([]byte, error) { - toser := map[string]Response{} - if r.Default != nil { - toser["default"] = *r.Default - } - for k, v := range r.StatusCodeResponses { - toser[strconv.Itoa(k)] = v - } - return json.Marshal(toser) -} - -// UnmarshalJSON unmarshals responses from JSON -func (r *ResponsesProps) UnmarshalJSON(data []byte) error { - var res map[string]Response - if err := json.Unmarshal(data, &res); err != nil { - return nil - } - if v, ok := res["default"]; ok { - r.Default = &v - delete(res, "default") - } - for k, v := range res { - if nk, err := strconv.Atoi(k); err == nil { - if r.StatusCodeResponses == nil { - r.StatusCodeResponses = map[int]Response{} - } - r.StatusCodeResponses[nk] = v - } - } - return nil -} diff --git a/src/vendor/k8s.io/kube-openapi/pkg/validation/spec/schema.go b/src/vendor/k8s.io/kube-openapi/pkg/validation/spec/schema.go deleted file mode 100644 index b0aeeb0d0..000000000 --- a/src/vendor/k8s.io/kube-openapi/pkg/validation/spec/schema.go +++ /dev/null @@ -1,513 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package spec - -import ( - "encoding/json" - "fmt" - "net/url" - "strings" - - "github.com/go-openapi/swag" -) - -// BooleanProperty creates a boolean property -func BooleanProperty() *Schema { - return &Schema{SchemaProps: SchemaProps{Type: []string{"boolean"}}} -} - -// BoolProperty creates a boolean property -func BoolProperty() *Schema { return BooleanProperty() } - -// StringProperty creates a string property -func StringProperty() *Schema { - return &Schema{SchemaProps: SchemaProps{Type: []string{"string"}}} -} - -// CharProperty creates a string property -func CharProperty() *Schema { - return &Schema{SchemaProps: SchemaProps{Type: []string{"string"}}} -} - -// Float64Property creates a float64/double property -func Float64Property() *Schema { - return &Schema{SchemaProps: SchemaProps{Type: []string{"number"}, Format: "double"}} -} - -// Float32Property creates a float32/float property -func Float32Property() *Schema { - return &Schema{SchemaProps: SchemaProps{Type: []string{"number"}, Format: "float"}} -} - -// Int8Property creates an int8 property -func Int8Property() *Schema { - return &Schema{SchemaProps: SchemaProps{Type: []string{"integer"}, Format: "int8"}} -} - -// Int16Property creates an int16 property -func Int16Property() *Schema { - return &Schema{SchemaProps: SchemaProps{Type: []string{"integer"}, Format: "int16"}} -} - -// Int32Property creates an int32 property -func Int32Property() *Schema { - return &Schema{SchemaProps: SchemaProps{Type: []string{"integer"}, Format: "int32"}} -} - -// Int64Property creates an int64 property -func Int64Property() *Schema { - return &Schema{SchemaProps: SchemaProps{Type: []string{"integer"}, Format: "int64"}} -} - -// StrFmtProperty creates a property for the named string format -func StrFmtProperty(format string) *Schema { - return &Schema{SchemaProps: SchemaProps{Type: []string{"string"}, Format: format}} -} - -// DateProperty creates a date property -func DateProperty() *Schema { - return &Schema{SchemaProps: SchemaProps{Type: []string{"string"}, Format: "date"}} -} - -// DateTimeProperty creates a date time property -func DateTimeProperty() *Schema { - return &Schema{SchemaProps: SchemaProps{Type: []string{"string"}, Format: "date-time"}} -} - -// MapProperty creates a map property -func MapProperty(property *Schema) *Schema { - return &Schema{SchemaProps: SchemaProps{Type: []string{"object"}, - AdditionalProperties: &SchemaOrBool{Allows: true, Schema: property}}} -} - -// RefProperty creates a ref property -func RefProperty(name string) *Schema { - return &Schema{SchemaProps: SchemaProps{Ref: MustCreateRef(name)}} -} - -// RefSchema creates a ref property -func RefSchema(name string) *Schema { - return &Schema{SchemaProps: SchemaProps{Ref: MustCreateRef(name)}} -} - -// ArrayProperty creates an array property -func ArrayProperty(items *Schema) *Schema { - if items == nil { - return &Schema{SchemaProps: SchemaProps{Type: []string{"array"}}} - } - return &Schema{SchemaProps: SchemaProps{Items: &SchemaOrArray{Schema: items}, Type: []string{"array"}}} -} - -// ComposedSchema creates a schema with allOf -func ComposedSchema(schemas ...Schema) *Schema { - s := new(Schema) - s.AllOf = schemas - return s -} - -// SchemaURL represents a schema url -type SchemaURL string - -// MarshalJSON marshal this to JSON -func (r SchemaURL) MarshalJSON() ([]byte, error) { - if r == "" { - return []byte("{}"), nil - } - v := map[string]interface{}{"$schema": string(r)} - return json.Marshal(v) -} - -// UnmarshalJSON unmarshal this from JSON -func (r *SchemaURL) UnmarshalJSON(data []byte) error { - var v map[string]interface{} - if err := json.Unmarshal(data, &v); err != nil { - return err - } - return r.fromMap(v) -} - -func (r *SchemaURL) fromMap(v map[string]interface{}) error { - if v == nil { - return nil - } - if vv, ok := v["$schema"]; ok { - if str, ok := vv.(string); ok { - u, err := url.Parse(str) - if err != nil { - return err - } - - *r = SchemaURL(u.String()) - } - } - return nil -} - -// SchemaProps describes a JSON schema (draft 4) -type SchemaProps struct { - ID string `json:"id,omitempty"` - Ref Ref `json:"-"` - Schema SchemaURL `json:"-"` - Description string `json:"description,omitempty"` - Type StringOrArray `json:"type,omitempty"` - Nullable bool `json:"nullable,omitempty"` - Format string `json:"format,omitempty"` - Title string `json:"title,omitempty"` - Default interface{} `json:"default,omitempty"` - Maximum *float64 `json:"maximum,omitempty"` - ExclusiveMaximum bool `json:"exclusiveMaximum,omitempty"` - Minimum *float64 `json:"minimum,omitempty"` - ExclusiveMinimum bool `json:"exclusiveMinimum,omitempty"` - MaxLength *int64 `json:"maxLength,omitempty"` - MinLength *int64 `json:"minLength,omitempty"` - Pattern string `json:"pattern,omitempty"` - MaxItems *int64 `json:"maxItems,omitempty"` - MinItems *int64 `json:"minItems,omitempty"` - UniqueItems bool `json:"uniqueItems,omitempty"` - MultipleOf *float64 `json:"multipleOf,omitempty"` - Enum []interface{} `json:"enum,omitempty"` - MaxProperties *int64 `json:"maxProperties,omitempty"` - MinProperties *int64 `json:"minProperties,omitempty"` - Required []string `json:"required,omitempty"` - Items *SchemaOrArray `json:"items,omitempty"` - AllOf []Schema `json:"allOf,omitempty"` - OneOf []Schema `json:"oneOf,omitempty"` - AnyOf []Schema `json:"anyOf,omitempty"` - Not *Schema `json:"not,omitempty"` - Properties map[string]Schema `json:"properties,omitempty"` - AdditionalProperties *SchemaOrBool `json:"additionalProperties,omitempty"` - PatternProperties map[string]Schema `json:"patternProperties,omitempty"` - Dependencies Dependencies `json:"dependencies,omitempty"` - AdditionalItems *SchemaOrBool `json:"additionalItems,omitempty"` - Definitions Definitions `json:"definitions,omitempty"` -} - -// SwaggerSchemaProps are additional properties supported by swagger schemas, but not JSON-schema (draft 4) -type SwaggerSchemaProps struct { - Discriminator string `json:"discriminator,omitempty"` - ReadOnly bool `json:"readOnly,omitempty"` - ExternalDocs *ExternalDocumentation `json:"externalDocs,omitempty"` - Example interface{} `json:"example,omitempty"` -} - -// Schema the schema object allows the definition of input and output data types. -// These types can be objects, but also primitives and arrays. -// This object is based on the [JSON Schema Specification Draft 4](http://json-schema.org/) -// and uses a predefined subset of it. -// On top of this subset, there are extensions provided by this specification to allow for more complete documentation. -// -// For more information: http://goo.gl/8us55a#schemaObject -type Schema struct { - VendorExtensible - SchemaProps - SwaggerSchemaProps - ExtraProps map[string]interface{} `json:"-"` -} - -// WithID sets the id for this schema, allows for chaining -func (s *Schema) WithID(id string) *Schema { - s.ID = id - return s -} - -// WithTitle sets the title for this schema, allows for chaining -func (s *Schema) WithTitle(title string) *Schema { - s.Title = title - return s -} - -// WithDescription sets the description for this schema, allows for chaining -func (s *Schema) WithDescription(description string) *Schema { - s.Description = description - return s -} - -// WithProperties sets the properties for this schema -func (s *Schema) WithProperties(schemas map[string]Schema) *Schema { - s.Properties = schemas - return s -} - -// SetProperty sets a property on this schema -func (s *Schema) SetProperty(name string, schema Schema) *Schema { - if s.Properties == nil { - s.Properties = make(map[string]Schema) - } - s.Properties[name] = schema - return s -} - -// WithAllOf sets the all of property -func (s *Schema) WithAllOf(schemas ...Schema) *Schema { - s.AllOf = schemas - return s -} - -// WithMaxProperties sets the max number of properties an object can have -func (s *Schema) WithMaxProperties(max int64) *Schema { - s.MaxProperties = &max - return s -} - -// WithMinProperties sets the min number of properties an object must have -func (s *Schema) WithMinProperties(min int64) *Schema { - s.MinProperties = &min - return s -} - -// Typed sets the type of this schema for a single value item -func (s *Schema) Typed(tpe, format string) *Schema { - s.Type = []string{tpe} - s.Format = format - return s -} - -// AddType adds a type with potential format to the types for this schema -func (s *Schema) AddType(tpe, format string) *Schema { - s.Type = append(s.Type, tpe) - if format != "" { - s.Format = format - } - return s -} - -// AsNullable flags this schema as nullable. -func (s *Schema) AsNullable() *Schema { - s.Nullable = true - return s -} - -// CollectionOf a fluent builder method for an array parameter -func (s *Schema) CollectionOf(items Schema) *Schema { - s.Type = []string{jsonArray} - s.Items = &SchemaOrArray{Schema: &items} - return s -} - -// WithDefault sets the default value on this parameter -func (s *Schema) WithDefault(defaultValue interface{}) *Schema { - s.Default = defaultValue - return s -} - -// WithRequired flags this parameter as required -func (s *Schema) WithRequired(items ...string) *Schema { - s.Required = items - return s -} - -// AddRequired adds field names to the required properties array -func (s *Schema) AddRequired(items ...string) *Schema { - s.Required = append(s.Required, items...) - return s -} - -// WithMaxLength sets a max length value -func (s *Schema) WithMaxLength(max int64) *Schema { - s.MaxLength = &max - return s -} - -// WithMinLength sets a min length value -func (s *Schema) WithMinLength(min int64) *Schema { - s.MinLength = &min - return s -} - -// WithPattern sets a pattern value -func (s *Schema) WithPattern(pattern string) *Schema { - s.Pattern = pattern - return s -} - -// WithMultipleOf sets a multiple of value -func (s *Schema) WithMultipleOf(number float64) *Schema { - s.MultipleOf = &number - return s -} - -// WithMaximum sets a maximum number value -func (s *Schema) WithMaximum(max float64, exclusive bool) *Schema { - s.Maximum = &max - s.ExclusiveMaximum = exclusive - return s -} - -// WithMinimum sets a minimum number value -func (s *Schema) WithMinimum(min float64, exclusive bool) *Schema { - s.Minimum = &min - s.ExclusiveMinimum = exclusive - return s -} - -// WithEnum sets a the enum values (replace) -func (s *Schema) WithEnum(values ...interface{}) *Schema { - s.Enum = append([]interface{}{}, values...) - return s -} - -// WithMaxItems sets the max items -func (s *Schema) WithMaxItems(size int64) *Schema { - s.MaxItems = &size - return s -} - -// WithMinItems sets the min items -func (s *Schema) WithMinItems(size int64) *Schema { - s.MinItems = &size - return s -} - -// UniqueValues dictates that this array can only have unique items -func (s *Schema) UniqueValues() *Schema { - s.UniqueItems = true - return s -} - -// AllowDuplicates this array can have duplicates -func (s *Schema) AllowDuplicates() *Schema { - s.UniqueItems = false - return s -} - -// AddToAllOf adds a schema to the allOf property -func (s *Schema) AddToAllOf(schemas ...Schema) *Schema { - s.AllOf = append(s.AllOf, schemas...) - return s -} - -// WithDiscriminator sets the name of the discriminator field -func (s *Schema) WithDiscriminator(discriminator string) *Schema { - s.Discriminator = discriminator - return s -} - -// AsReadOnly flags this schema as readonly -func (s *Schema) AsReadOnly() *Schema { - s.ReadOnly = true - return s -} - -// AsWritable flags this schema as writeable (not read-only) -func (s *Schema) AsWritable() *Schema { - s.ReadOnly = false - return s -} - -// WithExample sets the example for this schema -func (s *Schema) WithExample(example interface{}) *Schema { - s.Example = example - return s -} - -// WithExternalDocs sets/removes the external docs for/from this schema. -// When you pass empty strings as params the external documents will be removed. -// When you pass non-empty string as one value then those values will be used on the external docs object. -// So when you pass a non-empty description, you should also pass the url and vice versa. -func (s *Schema) WithExternalDocs(description, url string) *Schema { - if description == "" && url == "" { - s.ExternalDocs = nil - return s - } - - if s.ExternalDocs == nil { - s.ExternalDocs = &ExternalDocumentation{} - } - s.ExternalDocs.Description = description - s.ExternalDocs.URL = url - return s -} - -// MarshalJSON marshal this to JSON -func (s Schema) MarshalJSON() ([]byte, error) { - b1, err := json.Marshal(s.SchemaProps) - if err != nil { - return nil, fmt.Errorf("schema props %v", err) - } - b2, err := json.Marshal(s.VendorExtensible) - if err != nil { - return nil, fmt.Errorf("vendor props %v", err) - } - b3, err := s.Ref.MarshalJSON() - if err != nil { - return nil, fmt.Errorf("ref prop %v", err) - } - b4, err := s.Schema.MarshalJSON() - if err != nil { - return nil, fmt.Errorf("schema prop %v", err) - } - b5, err := json.Marshal(s.SwaggerSchemaProps) - if err != nil { - return nil, fmt.Errorf("common validations %v", err) - } - var b6 []byte - if s.ExtraProps != nil { - jj, err := json.Marshal(s.ExtraProps) - if err != nil { - return nil, fmt.Errorf("extra props %v", err) - } - b6 = jj - } - return swag.ConcatJSON(b1, b2, b3, b4, b5, b6), nil -} - -// UnmarshalJSON marshal this from JSON -func (s *Schema) UnmarshalJSON(data []byte) error { - props := struct { - SchemaProps - SwaggerSchemaProps - }{} - if err := json.Unmarshal(data, &props); err != nil { - return err - } - - sch := Schema{ - SchemaProps: props.SchemaProps, - SwaggerSchemaProps: props.SwaggerSchemaProps, - } - - var d map[string]interface{} - if err := json.Unmarshal(data, &d); err != nil { - return err - } - - _ = sch.Ref.fromMap(d) - _ = sch.Schema.fromMap(d) - - delete(d, "$ref") - delete(d, "$schema") - for _, pn := range swag.DefaultJSONNameProvider.GetJSONNames(s) { - delete(d, pn) - } - - for k, vv := range d { - lk := strings.ToLower(k) - if strings.HasPrefix(lk, "x-") { - if sch.Extensions == nil { - sch.Extensions = map[string]interface{}{} - } - sch.Extensions[k] = vv - continue - } - if sch.ExtraProps == nil { - sch.ExtraProps = map[string]interface{}{} - } - sch.ExtraProps[k] = vv - } - - *s = sch - - return nil -} diff --git a/src/vendor/k8s.io/kube-openapi/pkg/validation/spec/security_scheme.go b/src/vendor/k8s.io/kube-openapi/pkg/validation/spec/security_scheme.go deleted file mode 100644 index 563b9b95e..000000000 --- a/src/vendor/k8s.io/kube-openapi/pkg/validation/spec/security_scheme.go +++ /dev/null @@ -1,64 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package spec - -import ( - "encoding/json" - - "github.com/go-openapi/swag" -) - -// SecuritySchemeProps describes a swagger security scheme in the securityDefinitions section -type SecuritySchemeProps struct { - Description string `json:"description,omitempty"` - Type string `json:"type"` - Name string `json:"name,omitempty"` // api key - In string `json:"in,omitempty"` // api key - Flow string `json:"flow,omitempty"` // oauth2 - AuthorizationURL string `json:"authorizationUrl,omitempty"` // oauth2 - TokenURL string `json:"tokenUrl,omitempty"` // oauth2 - Scopes map[string]string `json:"scopes,omitempty"` // oauth2 -} - -// SecurityScheme allows the definition of a security scheme that can be used by the operations. -// Supported schemes are basic authentication, an API key (either as a header or as a query parameter) -// and OAuth2's common flows (implicit, password, application and access code). -// -// For more information: http://goo.gl/8us55a#securitySchemeObject -type SecurityScheme struct { - VendorExtensible - SecuritySchemeProps -} - -// MarshalJSON marshal this to JSON -func (s SecurityScheme) MarshalJSON() ([]byte, error) { - b1, err := json.Marshal(s.SecuritySchemeProps) - if err != nil { - return nil, err - } - b2, err := json.Marshal(s.VendorExtensible) - if err != nil { - return nil, err - } - return swag.ConcatJSON(b1, b2), nil -} - -// UnmarshalJSON marshal this from JSON -func (s *SecurityScheme) UnmarshalJSON(data []byte) error { - if err := json.Unmarshal(data, &s.SecuritySchemeProps); err != nil { - return err - } - return json.Unmarshal(data, &s.VendorExtensible) -} diff --git a/src/vendor/k8s.io/kube-openapi/pkg/validation/spec/swagger.go b/src/vendor/k8s.io/kube-openapi/pkg/validation/spec/swagger.go deleted file mode 100644 index be66d1ddd..000000000 --- a/src/vendor/k8s.io/kube-openapi/pkg/validation/spec/swagger.go +++ /dev/null @@ -1,286 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package spec - -import ( - "encoding/json" - "fmt" - - "github.com/go-openapi/swag" -) - -// Swagger this is the root document object for the API specification. -// It combines what previously was the Resource Listing and API Declaration (version 1.2 and earlier) -// together into one document. -// -// For more information: http://goo.gl/8us55a#swagger-object- -type Swagger struct { - VendorExtensible - SwaggerProps -} - -// MarshalJSON marshals this swagger structure to json -func (s Swagger) MarshalJSON() ([]byte, error) { - b1, err := json.Marshal(s.SwaggerProps) - if err != nil { - return nil, err - } - b2, err := json.Marshal(s.VendorExtensible) - if err != nil { - return nil, err - } - return swag.ConcatJSON(b1, b2), nil -} - -// UnmarshalJSON unmarshals a swagger spec from json -func (s *Swagger) UnmarshalJSON(data []byte) error { - var sw Swagger - if err := json.Unmarshal(data, &sw.SwaggerProps); err != nil { - return err - } - if err := json.Unmarshal(data, &sw.VendorExtensible); err != nil { - return err - } - *s = sw - return nil -} - -// SwaggerProps captures the top-level properties of an Api specification -// -// NOTE: validation rules -// - the scheme, when present must be from [http, https, ws, wss] -// - BasePath must start with a leading "/" -// - Paths is required -type SwaggerProps struct { - ID string `json:"id,omitempty"` - Consumes []string `json:"consumes,omitempty"` - Produces []string `json:"produces,omitempty"` - Schemes []string `json:"schemes,omitempty"` - Swagger string `json:"swagger,omitempty"` - Info *Info `json:"info,omitempty"` - Host string `json:"host,omitempty"` - BasePath string `json:"basePath,omitempty"` - Paths *Paths `json:"paths"` - Definitions Definitions `json:"definitions,omitempty"` - Parameters map[string]Parameter `json:"parameters,omitempty"` - Responses map[string]Response `json:"responses,omitempty"` - SecurityDefinitions SecurityDefinitions `json:"securityDefinitions,omitempty"` - Security []map[string][]string `json:"security,omitempty"` - Tags []Tag `json:"tags,omitempty"` - ExternalDocs *ExternalDocumentation `json:"externalDocs,omitempty"` -} - -// Dependencies represent a dependencies property -type Dependencies map[string]SchemaOrStringArray - -// SchemaOrBool represents a schema or boolean value, is biased towards true for the boolean property -type SchemaOrBool struct { - Allows bool - Schema *Schema -} - -var jsTrue = []byte("true") -var jsFalse = []byte("false") - -// MarshalJSON convert this object to JSON -func (s SchemaOrBool) MarshalJSON() ([]byte, error) { - if s.Schema != nil { - return json.Marshal(s.Schema) - } - - if s.Schema == nil && !s.Allows { - return jsFalse, nil - } - return jsTrue, nil -} - -// UnmarshalJSON converts this bool or schema object from a JSON structure -func (s *SchemaOrBool) UnmarshalJSON(data []byte) error { - var nw SchemaOrBool - if len(data) >= 4 { - if data[0] == '{' { - var sch Schema - if err := json.Unmarshal(data, &sch); err != nil { - return err - } - nw.Schema = &sch - } - nw.Allows = !(data[0] == 'f' && data[1] == 'a' && data[2] == 'l' && data[3] == 's' && data[4] == 'e') - } - *s = nw - return nil -} - -// SchemaOrStringArray represents a schema or a string array -type SchemaOrStringArray struct { - Schema *Schema - Property []string -} - -// MarshalJSON converts this schema object or array into JSON structure -func (s SchemaOrStringArray) MarshalJSON() ([]byte, error) { - if len(s.Property) > 0 { - return json.Marshal(s.Property) - } - if s.Schema != nil { - return json.Marshal(s.Schema) - } - return []byte("null"), nil -} - -// UnmarshalJSON converts this schema object or array from a JSON structure -func (s *SchemaOrStringArray) UnmarshalJSON(data []byte) error { - var first byte - if len(data) > 1 { - first = data[0] - } - var nw SchemaOrStringArray - if first == '{' { - var sch Schema - if err := json.Unmarshal(data, &sch); err != nil { - return err - } - nw.Schema = &sch - } - if first == '[' { - if err := json.Unmarshal(data, &nw.Property); err != nil { - return err - } - } - *s = nw - return nil -} - -// Definitions contains the models explicitly defined in this spec -// An object to hold data types that can be consumed and produced by operations. -// These data types can be primitives, arrays or models. -// -// For more information: http://goo.gl/8us55a#definitionsObject -type Definitions map[string]Schema - -// SecurityDefinitions a declaration of the security schemes available to be used in the specification. -// This does not enforce the security schemes on the operations and only serves to provide -// the relevant details for each scheme. -// -// For more information: http://goo.gl/8us55a#securityDefinitionsObject -type SecurityDefinitions map[string]*SecurityScheme - -// StringOrArray represents a value that can either be a string -// or an array of strings. Mainly here for serialization purposes -type StringOrArray []string - -// Contains returns true when the value is contained in the slice -func (s StringOrArray) Contains(value string) bool { - for _, str := range s { - if str == value { - return true - } - } - return false -} - -// UnmarshalJSON unmarshals this string or array object from a JSON array or JSON string -func (s *StringOrArray) UnmarshalJSON(data []byte) error { - var first byte - if len(data) > 1 { - first = data[0] - } - - if first == '[' { - var parsed []string - if err := json.Unmarshal(data, &parsed); err != nil { - return err - } - *s = StringOrArray(parsed) - return nil - } - - var single interface{} - if err := json.Unmarshal(data, &single); err != nil { - return err - } - if single == nil { - return nil - } - switch v := single.(type) { - case string: - *s = StringOrArray([]string{v}) - return nil - default: - return fmt.Errorf("only string or array is allowed, not %T", single) - } -} - -// MarshalJSON converts this string or array to a JSON array or JSON string -func (s StringOrArray) MarshalJSON() ([]byte, error) { - if len(s) == 1 { - return json.Marshal([]string(s)[0]) - } - return json.Marshal([]string(s)) -} - -// SchemaOrArray represents a value that can either be a Schema -// or an array of Schema. Mainly here for serialization purposes -type SchemaOrArray struct { - Schema *Schema - Schemas []Schema -} - -// Len returns the number of schemas in this property -func (s SchemaOrArray) Len() int { - if s.Schema != nil { - return 1 - } - return len(s.Schemas) -} - -// ContainsType returns true when one of the schemas is of the specified type -func (s *SchemaOrArray) ContainsType(name string) bool { - if s.Schema != nil { - return s.Schema.Type != nil && s.Schema.Type.Contains(name) - } - return false -} - -// MarshalJSON converts this schema object or array into JSON structure -func (s SchemaOrArray) MarshalJSON() ([]byte, error) { - if len(s.Schemas) > 0 { - return json.Marshal(s.Schemas) - } - return json.Marshal(s.Schema) -} - -// UnmarshalJSON converts this schema object or array from a JSON structure -func (s *SchemaOrArray) UnmarshalJSON(data []byte) error { - var nw SchemaOrArray - var first byte - if len(data) > 1 { - first = data[0] - } - if first == '{' { - var sch Schema - if err := json.Unmarshal(data, &sch); err != nil { - return err - } - nw.Schema = &sch - } - if first == '[' { - if err := json.Unmarshal(data, &nw.Schemas); err != nil { - return err - } - } - *s = nw - return nil -} diff --git a/src/vendor/k8s.io/kube-openapi/pkg/validation/spec/tag.go b/src/vendor/k8s.io/kube-openapi/pkg/validation/spec/tag.go deleted file mode 100644 index ddd1eac7e..000000000 --- a/src/vendor/k8s.io/kube-openapi/pkg/validation/spec/tag.go +++ /dev/null @@ -1,59 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package spec - -import ( - "encoding/json" - - "github.com/go-openapi/swag" -) - -// TagProps describe a tag entry in the top level tags section of a swagger spec -type TagProps struct { - Description string `json:"description,omitempty"` - Name string `json:"name,omitempty"` - ExternalDocs *ExternalDocumentation `json:"externalDocs,omitempty"` -} - -// Tag allows adding meta data to a single tag that is used by the -// [Operation Object](http://goo.gl/8us55a#operationObject). -// It is not mandatory to have a Tag Object per tag used there. -// -// For more information: http://goo.gl/8us55a#tagObject -type Tag struct { - VendorExtensible - TagProps -} - -// MarshalJSON marshal this to JSON -func (t Tag) MarshalJSON() ([]byte, error) { - b1, err := json.Marshal(t.TagProps) - if err != nil { - return nil, err - } - b2, err := json.Marshal(t.VendorExtensible) - if err != nil { - return nil, err - } - return swag.ConcatJSON(b1, b2), nil -} - -// UnmarshalJSON marshal this from JSON -func (t *Tag) UnmarshalJSON(data []byte) error { - if err := json.Unmarshal(data, &t.TagProps); err != nil { - return err - } - return json.Unmarshal(data, &t.VendorExtensible) -} diff --git a/src/vendor/k8s.io/utils/pointer/OWNERS b/src/vendor/k8s.io/utils/pointer/OWNERS deleted file mode 100644 index 0d6392752..000000000 --- a/src/vendor/k8s.io/utils/pointer/OWNERS +++ /dev/null @@ -1,10 +0,0 @@ -# See the OWNERS docs at https://go.k8s.io/owners - -approvers: -- apelisse -- stewart-yu -- thockin -reviewers: -- apelisse -- stewart-yu -- thockin diff --git a/src/vendor/k8s.io/utils/pointer/README.md b/src/vendor/k8s.io/utils/pointer/README.md deleted file mode 100644 index 2ca8073dc..000000000 --- a/src/vendor/k8s.io/utils/pointer/README.md +++ /dev/null @@ -1,3 +0,0 @@ -# Pointer - -This package provides some functions for pointer-based operations. diff --git a/src/vendor/k8s.io/utils/pointer/pointer.go b/src/vendor/k8s.io/utils/pointer/pointer.go deleted file mode 100644 index e745a81be..000000000 --- a/src/vendor/k8s.io/utils/pointer/pointer.go +++ /dev/null @@ -1,300 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package pointer - -import ( - "fmt" - "reflect" - "time" -) - -// AllPtrFieldsNil tests whether all pointer fields in a struct are nil. This is useful when, -// for example, an API struct is handled by plugins which need to distinguish -// "no plugin accepted this spec" from "this spec is empty". -// -// This function is only valid for structs and pointers to structs. Any other -// type will cause a panic. Passing a typed nil pointer will return true. -func AllPtrFieldsNil(obj interface{}) bool { - v := reflect.ValueOf(obj) - if !v.IsValid() { - panic(fmt.Sprintf("reflect.ValueOf() produced a non-valid Value for %#v", obj)) - } - if v.Kind() == reflect.Ptr { - if v.IsNil() { - return true - } - v = v.Elem() - } - for i := 0; i < v.NumField(); i++ { - if v.Field(i).Kind() == reflect.Ptr && !v.Field(i).IsNil() { - return false - } - } - return true -} - -// Int returns a pointer to an int -func Int(i int) *int { - return &i -} - -// IntPtr is a function variable referring to Int. -// Deprecated: Use Int instead. -var IntPtr = Int // for back-compat - -// IntDeref dereferences the int ptr and returns it if not nil, or else -// returns def. -func IntDeref(ptr *int, def int) int { - if ptr != nil { - return *ptr - } - return def -} - -// IntPtrDerefOr is a function variable referring to IntDeref. -// Deprecated: Use IntDeref instead. -var IntPtrDerefOr = IntDeref // for back-compat - -// Int32 returns a pointer to an int32. -func Int32(i int32) *int32 { - return &i -} - -// Int32Ptr is a function variable referring to Int32. -// Deprecated: Use Int32 instead. -var Int32Ptr = Int32 // for back-compat - -// Int32Deref dereferences the int32 ptr and returns it if not nil, or else -// returns def. -func Int32Deref(ptr *int32, def int32) int32 { - if ptr != nil { - return *ptr - } - return def -} - -// Int32PtrDerefOr is a function variable referring to Int32Deref. -// Deprecated: Use Int32Deref instead. -var Int32PtrDerefOr = Int32Deref // for back-compat - -// Int32Equal returns true if both arguments are nil or both arguments -// dereference to the same value. -func Int32Equal(a, b *int32) bool { - if (a == nil) != (b == nil) { - return false - } - if a == nil { - return true - } - return *a == *b -} - -// Int64 returns a pointer to an int64. -func Int64(i int64) *int64 { - return &i -} - -// Int64Ptr is a function variable referring to Int64. -// Deprecated: Use Int64 instead. -var Int64Ptr = Int64 // for back-compat - -// Int64Deref dereferences the int64 ptr and returns it if not nil, or else -// returns def. -func Int64Deref(ptr *int64, def int64) int64 { - if ptr != nil { - return *ptr - } - return def -} - -// Int64PtrDerefOr is a function variable referring to Int64Deref. -// Deprecated: Use Int64Deref instead. -var Int64PtrDerefOr = Int64Deref // for back-compat - -// Int64Equal returns true if both arguments are nil or both arguments -// dereference to the same value. -func Int64Equal(a, b *int64) bool { - if (a == nil) != (b == nil) { - return false - } - if a == nil { - return true - } - return *a == *b -} - -// Bool returns a pointer to a bool. -func Bool(b bool) *bool { - return &b -} - -// BoolPtr is a function variable referring to Bool. -// Deprecated: Use Bool instead. -var BoolPtr = Bool // for back-compat - -// BoolDeref dereferences the bool ptr and returns it if not nil, or else -// returns def. -func BoolDeref(ptr *bool, def bool) bool { - if ptr != nil { - return *ptr - } - return def -} - -// BoolPtrDerefOr is a function variable referring to BoolDeref. -// Deprecated: Use BoolDeref instead. -var BoolPtrDerefOr = BoolDeref // for back-compat - -// BoolEqual returns true if both arguments are nil or both arguments -// dereference to the same value. -func BoolEqual(a, b *bool) bool { - if (a == nil) != (b == nil) { - return false - } - if a == nil { - return true - } - return *a == *b -} - -// String returns a pointer to a string. -func String(s string) *string { - return &s -} - -// StringPtr is a function variable referring to String. -// Deprecated: Use String instead. -var StringPtr = String // for back-compat - -// StringDeref dereferences the string ptr and returns it if not nil, or else -// returns def. -func StringDeref(ptr *string, def string) string { - if ptr != nil { - return *ptr - } - return def -} - -// StringPtrDerefOr is a function variable referring to StringDeref. -// Deprecated: Use StringDeref instead. -var StringPtrDerefOr = StringDeref // for back-compat - -// StringEqual returns true if both arguments are nil or both arguments -// dereference to the same value. -func StringEqual(a, b *string) bool { - if (a == nil) != (b == nil) { - return false - } - if a == nil { - return true - } - return *a == *b -} - -// Float32 returns a pointer to a float32. -func Float32(i float32) *float32 { - return &i -} - -// Float32Ptr is a function variable referring to Float32. -// Deprecated: Use Float32 instead. -var Float32Ptr = Float32 - -// Float32Deref dereferences the float32 ptr and returns it if not nil, or else -// returns def. -func Float32Deref(ptr *float32, def float32) float32 { - if ptr != nil { - return *ptr - } - return def -} - -// Float32PtrDerefOr is a function variable referring to Float32Deref. -// Deprecated: Use Float32Deref instead. -var Float32PtrDerefOr = Float32Deref // for back-compat - -// Float32Equal returns true if both arguments are nil or both arguments -// dereference to the same value. -func Float32Equal(a, b *float32) bool { - if (a == nil) != (b == nil) { - return false - } - if a == nil { - return true - } - return *a == *b -} - -// Float64 returns a pointer to a float64. -func Float64(i float64) *float64 { - return &i -} - -// Float64Ptr is a function variable referring to Float64. -// Deprecated: Use Float64 instead. -var Float64Ptr = Float64 - -// Float64Deref dereferences the float64 ptr and returns it if not nil, or else -// returns def. -func Float64Deref(ptr *float64, def float64) float64 { - if ptr != nil { - return *ptr - } - return def -} - -// Float64PtrDerefOr is a function variable referring to Float64Deref. -// Deprecated: Use Float64Deref instead. -var Float64PtrDerefOr = Float64Deref // for back-compat - -// Float64Equal returns true if both arguments are nil or both arguments -// dereference to the same value. -func Float64Equal(a, b *float64) bool { - if (a == nil) != (b == nil) { - return false - } - if a == nil { - return true - } - return *a == *b -} - -// Duration returns a pointer to a time.Duration. -func Duration(d time.Duration) *time.Duration { - return &d -} - -// DurationDeref dereferences the time.Duration ptr and returns it if not nil, or else -// returns def. -func DurationDeref(ptr *time.Duration, def time.Duration) time.Duration { - if ptr != nil { - return *ptr - } - return def -} - -// DurationEqual returns true if both arguments are nil or both arguments -// dereference to the same value. -func DurationEqual(a, b *time.Duration) bool { - if (a == nil) != (b == nil) { - return false - } - if a == nil { - return true - } - return *a == *b -} diff --git a/src/vendor/modules.txt b/src/vendor/modules.txt index df417346b..3517e18a0 100644 --- a/src/vendor/modules.txt +++ b/src/vendor/modules.txt @@ -5,10 +5,6 @@ cloud.google.com/go/compute/metadata ## explicit github.com/Azure/azure-sdk-for-go/storage github.com/Azure/azure-sdk-for-go/version -# github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 -## explicit; go 1.16 -github.com/Azure/go-ansiterm -github.com/Azure/go-ansiterm/winterm # github.com/Azure/go-autorest v14.2.0+incompatible => github.com/Azure/go-autorest v14.2.0+incompatible ## explicit github.com/Azure/go-autorest @@ -45,6 +41,8 @@ github.com/Masterminds/semver # github.com/Masterminds/semver/v3 v3.1.1 ## explicit; go 1.12 github.com/Masterminds/semver/v3 +# github.com/Microsoft/go-winio v0.5.1 +## explicit; go 1.12 # github.com/PuerkitoBio/purell v1.1.1 ## explicit github.com/PuerkitoBio/purell @@ -128,14 +126,11 @@ github.com/aws/aws-sdk-go/service/sts/stsiface # github.com/beego/beego/v2 v2.0.6 ## explicit; go 1.18 github.com/beego/beego/v2 -github.com/beego/beego/v2/client/cache -github.com/beego/beego/v2/client/cache/redis github.com/beego/beego/v2/client/orm github.com/beego/beego/v2/client/orm/clauses github.com/beego/beego/v2/client/orm/clauses/order_clause github.com/beego/beego/v2/client/orm/hints github.com/beego/beego/v2/core/admin -github.com/beego/beego/v2/core/berror github.com/beego/beego/v2/core/config github.com/beego/beego/v2/core/logs github.com/beego/beego/v2/core/utils @@ -179,30 +174,9 @@ github.com/cenkalti/backoff/v4 github.com/cespare/xxhash/v2 # github.com/cloudflare/cfssl v0.0.0-20190510060611-9c027c93ba9e ## explicit -# github.com/containerd/containerd v1.6.6 -## explicit; go 1.17 -github.com/containerd/containerd/archive/compression -github.com/containerd/containerd/content -github.com/containerd/containerd/content/local -github.com/containerd/containerd/errdefs -github.com/containerd/containerd/filters -github.com/containerd/containerd/images -github.com/containerd/containerd/labels -github.com/containerd/containerd/log -github.com/containerd/containerd/platforms -github.com/containerd/containerd/reference -github.com/containerd/containerd/remotes -github.com/containerd/containerd/remotes/docker -github.com/containerd/containerd/remotes/docker/auth -github.com/containerd/containerd/remotes/docker/schema1 -github.com/containerd/containerd/remotes/errors -github.com/containerd/containerd/version # github.com/coreos/go-oidc/v3 v3.0.0 ## explicit; go 1.14 github.com/coreos/go-oidc/v3/oidc -# github.com/cyphar/filepath-securejoin v0.2.3 -## explicit; go 1.13 -github.com/cyphar/filepath-securejoin # github.com/davecgh/go-spew v1.1.1 ## explicit github.com/davecgh/go-spew/spew @@ -219,12 +193,6 @@ github.com/dghubble/sling github.com/dgryski/go-rendezvous # github.com/dnaeon/go-vcr v1.2.0 ## explicit; go 1.15 -# github.com/docker/cli v20.10.17+incompatible -## explicit -github.com/docker/cli/cli/config -github.com/docker/cli/cli/config/configfile -github.com/docker/cli/cli/config/credentials -github.com/docker/cli/cli/config/types # github.com/docker/distribution v2.8.1+incompatible => github.com/distribution/distribution v2.8.1+incompatible ## explicit github.com/docker/distribution @@ -240,16 +208,12 @@ github.com/docker/distribution/manifest/schema2 github.com/docker/distribution/metrics github.com/docker/distribution/reference github.com/docker/distribution/registry/api/errcode -github.com/docker/distribution/registry/api/v2 github.com/docker/distribution/registry/auth github.com/docker/distribution/registry/auth/token -github.com/docker/distribution/registry/client -github.com/docker/distribution/registry/client/auth github.com/docker/distribution/registry/client/auth/challenge github.com/docker/distribution/registry/client/transport github.com/docker/distribution/registry/storage github.com/docker/distribution/registry/storage/cache -github.com/docker/distribution/registry/storage/cache/memory github.com/docker/distribution/registry/storage/driver github.com/docker/distribution/registry/storage/driver/azure github.com/docker/distribution/registry/storage/driver/base @@ -266,54 +230,15 @@ github.com/docker/distribution/registry/storage/driver/swift github.com/docker/distribution/testutil github.com/docker/distribution/uuid github.com/docker/distribution/version -# github.com/docker/docker v20.10.17+incompatible -## explicit -github.com/docker/docker/api/types -github.com/docker/docker/api/types/blkiodev -github.com/docker/docker/api/types/container -github.com/docker/docker/api/types/filters -github.com/docker/docker/api/types/mount -github.com/docker/docker/api/types/network -github.com/docker/docker/api/types/registry -github.com/docker/docker/api/types/strslice -github.com/docker/docker/api/types/swarm -github.com/docker/docker/api/types/swarm/runtime -github.com/docker/docker/api/types/versions -github.com/docker/docker/errdefs -github.com/docker/docker/pkg/homedir -github.com/docker/docker/pkg/ioutils -github.com/docker/docker/pkg/jsonmessage -github.com/docker/docker/pkg/longpath -github.com/docker/docker/pkg/stringid -github.com/docker/docker/registry -github.com/docker/docker/rootless -# github.com/docker/docker-credential-helpers v0.6.4 -## explicit; go 1.13 -github.com/docker/docker-credential-helpers/client -github.com/docker/docker-credential-helpers/credentials # github.com/docker/go v0.0.0-20160303222718-d30aec9fd63c ## explicit github.com/docker/go/canonical/json -# github.com/docker/go-connections v0.4.0 -## explicit -github.com/docker/go-connections/nat -github.com/docker/go-connections/tlsconfig # github.com/docker/go-metrics v0.0.1 ## explicit; go 1.11 github.com/docker/go-metrics -# github.com/docker/go-units v0.4.0 -## explicit -github.com/docker/go-units # github.com/docker/libtrust v0.0.0-20160708172513-aabc10ec26b7 ## explicit github.com/docker/libtrust -# github.com/emicklei/go-restful/v3 v3.8.0 -## explicit; go 1.13 -github.com/emicklei/go-restful/v3 -github.com/emicklei/go-restful/v3/log -# github.com/evanphx/json-patch v5.6.0+incompatible -## explicit -github.com/evanphx/json-patch # github.com/felixge/httpsnoop v1.0.2 ## explicit; go 1.13 github.com/felixge/httpsnoop @@ -326,9 +251,6 @@ github.com/ghodss/yaml # github.com/go-asn1-ber/asn1-ber v1.5.1 ## explicit; go 1.13 github.com/go-asn1-ber/asn1-ber -# github.com/go-errors/errors v1.0.1 -## explicit -github.com/go-errors/errors # github.com/go-ldap/ldap/v3 v3.2.4 ## explicit; go 1.13 github.com/go-ldap/ldap/v3 @@ -427,18 +349,8 @@ github.com/golang/protobuf/ptypes/wrappers ## explicit github.com/gomodule/redigo/internal github.com/gomodule/redigo/redis -# github.com/google/btree v1.0.1 -## explicit; go 1.12 -github.com/google/btree # github.com/google/certificate-transparency-go v1.0.21 ## explicit -# github.com/google/gnostic v0.5.7-v3refs -## explicit; go 1.12 -github.com/google/gnostic/compiler -github.com/google/gnostic/extensions -github.com/google/gnostic/jsonschema -github.com/google/gnostic/openapiv2 -github.com/google/gnostic/openapiv3 # github.com/google/go-querystring v1.0.0 ## explicit github.com/google/go-querystring/query @@ -446,9 +358,6 @@ github.com/google/go-querystring/query ## explicit; go 1.12 github.com/google/gofuzz github.com/google/gofuzz/bytesource -# github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 -## explicit; go 1.13 -github.com/google/shlex # github.com/google/uuid v1.3.0 ## explicit github.com/google/uuid @@ -467,9 +376,6 @@ github.com/gorilla/securecookie # github.com/graph-gophers/dataloader v5.0.0+incompatible ## explicit github.com/graph-gophers/dataloader -# github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7 -## explicit -github.com/gregjones/httpcache # github.com/grpc-ecosystem/grpc-gateway v1.16.0 ## explicit; go 1.14 github.com/grpc-ecosystem/grpc-gateway/internal @@ -500,12 +406,6 @@ github.com/hashicorp/hcl/hcl/token github.com/hashicorp/hcl/json/parser github.com/hashicorp/hcl/json/scanner github.com/hashicorp/hcl/json/token -# github.com/imdario/mergo v0.3.12 -## explicit; go 1.13 -github.com/imdario/mergo -# github.com/inconshreveable/mousetrap v1.0.0 -## explicit -github.com/inconshreveable/mousetrap # github.com/jackc/chunkreader/v2 v2.0.1 ## explicit; go 1.12 github.com/jackc/chunkreader/v2 @@ -551,17 +451,6 @@ github.com/jpillora/backoff # github.com/json-iterator/go v1.1.12 ## explicit; go 1.12 github.com/json-iterator/go -# github.com/klauspost/compress v1.13.6 -## explicit; go 1.15 -github.com/klauspost/compress -github.com/klauspost/compress/fse -github.com/klauspost/compress/huff0 -github.com/klauspost/compress/internal/snapref -github.com/klauspost/compress/zstd -github.com/klauspost/compress/zstd/internal/xxhash -# github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de -## explicit -github.com/liggitt/tabwriter # github.com/magiconair/properties v1.8.5 ## explicit; go 1.13 github.com/magiconair/properties @@ -579,37 +468,15 @@ github.com/matttproud/golang_protobuf_extensions/pbutil # github.com/miekg/pkcs11 v1.1.1 ## explicit; go 1.12 github.com/miekg/pkcs11 -# github.com/mitchellh/copystructure v1.2.0 -## explicit; go 1.15 -github.com/mitchellh/copystructure # github.com/mitchellh/mapstructure v1.4.3 ## explicit; go 1.14 github.com/mitchellh/mapstructure -# github.com/mitchellh/reflectwalk v1.0.2 -## explicit -github.com/mitchellh/reflectwalk -# github.com/moby/locker v1.0.1 -## explicit; go 1.13 -github.com/moby/locker -# github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6 -## explicit; go 1.13 -github.com/moby/term -github.com/moby/term/windows # github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd ## explicit github.com/modern-go/concurrent # github.com/modern-go/reflect2 v1.0.2 ## explicit; go 1.12 github.com/modern-go/reflect2 -# github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00 -## explicit -github.com/monochromegane/go-gitignore -# github.com/morikuni/aec v1.0.0 -## explicit -github.com/morikuni/aec -# github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 -## explicit -github.com/munnerz/goautoneg # github.com/ncw/swift v1.0.49 ## explicit github.com/ncw/swift @@ -622,8 +489,8 @@ github.com/olekukonko/tablewriter # github.com/opencontainers/go-digest v1.0.0 ## explicit; go 1.13 github.com/opencontainers/go-digest -# github.com/opencontainers/image-spec v1.0.3-0.20211202183452-c5a74bcca799 -## explicit +# github.com/opencontainers/image-spec v1.1.0-rc2 +## explicit; go 1.17 github.com/opencontainers/image-spec/specs-go github.com/opencontainers/image-spec/specs-go/v1 # github.com/opentracing/opentracing-go v1.2.0 @@ -633,9 +500,6 @@ github.com/opentracing/opentracing-go/log # github.com/pelletier/go-toml v1.9.3 ## explicit; go 1.12 github.com/pelletier/go-toml -# github.com/peterbourgon/diskv v2.0.1+incompatible -## explicit -github.com/peterbourgon/diskv # github.com/pkg/errors v0.9.1 ## explicit github.com/pkg/errors @@ -682,9 +546,6 @@ github.com/spf13/afero/mem # github.com/spf13/cast v1.4.1 ## explicit github.com/spf13/cast -# github.com/spf13/cobra v1.5.0 -## explicit; go 1.15 -github.com/spf13/cobra # github.com/spf13/jwalterweatherman v1.1.0 ## explicit github.com/spf13/jwalterweatherman @@ -738,18 +599,6 @@ github.com/vmihailenco/msgpack/v5/msgpcode github.com/vmihailenco/tagparser github.com/vmihailenco/tagparser/internal github.com/vmihailenco/tagparser/internal/parser -# github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f -## explicit -github.com/xeipuuv/gojsonpointer -# github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 -## explicit -github.com/xeipuuv/gojsonreference -# github.com/xeipuuv/gojsonschema v1.2.0 -## explicit -github.com/xeipuuv/gojsonschema -# github.com/xlab/treeprint v1.1.0 -## explicit; go 1.13 -github.com/xlab/treeprint # go.mongodb.org/mongo-driver v1.7.0 ## explicit; go 1.10 go.mongodb.org/mongo-driver/bson @@ -825,14 +674,6 @@ go.opentelemetry.io/proto/otlp/collector/trace/v1 go.opentelemetry.io/proto/otlp/common/v1 go.opentelemetry.io/proto/otlp/resource/v1 go.opentelemetry.io/proto/otlp/trace/v1 -# go.starlark.net v0.0.0-20200306205701-8dd3e2ee1dd5 -## explicit; go 1.13 -go.starlark.net/internal/compile -go.starlark.net/internal/spell -go.starlark.net/resolve -go.starlark.net/starlark -go.starlark.net/starlarkstruct -go.starlark.net/syntax # go.uber.org/atomic v1.7.0 ## explicit; go 1.13 go.uber.org/atomic @@ -843,16 +684,8 @@ go.uber.org/ratelimit ## explicit; go 1.17 golang.org/x/crypto/acme golang.org/x/crypto/acme/autocert -golang.org/x/crypto/cast5 golang.org/x/crypto/ed25519 golang.org/x/crypto/md4 -golang.org/x/crypto/openpgp -golang.org/x/crypto/openpgp/armor -golang.org/x/crypto/openpgp/clearsign -golang.org/x/crypto/openpgp/elgamal -golang.org/x/crypto/openpgp/errors -golang.org/x/crypto/openpgp/packet -golang.org/x/crypto/openpgp/s2k golang.org/x/crypto/pbkdf2 golang.org/x/crypto/pkcs12 golang.org/x/crypto/pkcs12/internal/rc2 @@ -876,13 +709,8 @@ golang.org/x/oauth2/google/internal/externalaccount golang.org/x/oauth2/internal golang.org/x/oauth2/jws golang.org/x/oauth2/jwt -# golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4 -## explicit -golang.org/x/sync/errgroup -golang.org/x/sync/semaphore # golang.org/x/sys v0.2.0 ## explicit; go 1.17 -golang.org/x/sys/execabs golang.org/x/sys/internal/unsafeheader golang.org/x/sys/plan9 golang.org/x/sys/unix @@ -894,15 +722,10 @@ golang.org/x/term # golang.org/x/text v0.4.0 ## explicit; go 1.17 golang.org/x/text/cases -golang.org/x/text/encoding -golang.org/x/text/encoding/internal -golang.org/x/text/encoding/internal/identifier -golang.org/x/text/encoding/unicode golang.org/x/text/internal golang.org/x/text/internal/language golang.org/x/text/internal/language/compact golang.org/x/text/internal/tag -golang.org/x/text/internal/utf8internal golang.org/x/text/language golang.org/x/text/runes golang.org/x/text/secure/bidirule @@ -1058,25 +881,10 @@ gopkg.in/yaml.v2 gopkg.in/yaml.v3 # helm.sh/helm/v3 v3.10.3 ## explicit; go 1.18 -helm.sh/helm/v3/cmd/helm/search -helm.sh/helm/v3/internal/fileutil helm.sh/helm/v3/internal/ignore helm.sh/helm/v3/internal/sympath -helm.sh/helm/v3/internal/third_party/dep/fs -helm.sh/helm/v3/internal/tlsutil -helm.sh/helm/v3/internal/urlutil -helm.sh/helm/v3/internal/version helm.sh/helm/v3/pkg/chart helm.sh/helm/v3/pkg/chart/loader -helm.sh/helm/v3/pkg/chartutil -helm.sh/helm/v3/pkg/cli -helm.sh/helm/v3/pkg/getter -helm.sh/helm/v3/pkg/helmpath -helm.sh/helm/v3/pkg/helmpath/xdg -helm.sh/helm/v3/pkg/plugin -helm.sh/helm/v3/pkg/provenance -helm.sh/helm/v3/pkg/registry -helm.sh/helm/v3/pkg/repo # k8s.io/api v0.25.2 ## explicit; go 1.19 k8s.io/api/admissionregistration/v1 @@ -1125,20 +933,12 @@ k8s.io/api/scheduling/v1beta1 k8s.io/api/storage/v1 k8s.io/api/storage/v1alpha1 k8s.io/api/storage/v1beta1 -# k8s.io/apiextensions-apiserver v0.25.2 -## explicit; go 1.19 -k8s.io/apiextensions-apiserver/pkg/apis/apiextensions -k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1 -k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1 # k8s.io/apimachinery v0.25.2 ## explicit; go 1.19 -k8s.io/apimachinery/pkg/api/equality k8s.io/apimachinery/pkg/api/errors -k8s.io/apimachinery/pkg/api/meta k8s.io/apimachinery/pkg/api/resource k8s.io/apimachinery/pkg/apis/meta/v1 k8s.io/apimachinery/pkg/apis/meta/v1/unstructured -k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/unstructuredscheme k8s.io/apimachinery/pkg/conversion k8s.io/apimachinery/pkg/conversion/queryparams k8s.io/apimachinery/pkg/fields @@ -1153,7 +953,6 @@ k8s.io/apimachinery/pkg/runtime/serializer/streaming k8s.io/apimachinery/pkg/runtime/serializer/versioning k8s.io/apimachinery/pkg/selection k8s.io/apimachinery/pkg/types -k8s.io/apimachinery/pkg/util/duration k8s.io/apimachinery/pkg/util/errors k8s.io/apimachinery/pkg/util/framer k8s.io/apimachinery/pkg/util/intstr @@ -1169,19 +968,9 @@ k8s.io/apimachinery/pkg/util/yaml k8s.io/apimachinery/pkg/version k8s.io/apimachinery/pkg/watch k8s.io/apimachinery/third_party/forked/golang/reflect -# k8s.io/cli-runtime v0.25.2 -## explicit; go 1.19 -k8s.io/cli-runtime/pkg/genericclioptions -k8s.io/cli-runtime/pkg/printers -k8s.io/cli-runtime/pkg/resource # k8s.io/client-go v0.25.2 ## explicit; go 1.19 -k8s.io/client-go/discovery -k8s.io/client-go/discovery/cached/disk -k8s.io/client-go/dynamic k8s.io/client-go/kubernetes/scheme -k8s.io/client-go/openapi -k8s.io/client-go/openapi/cached k8s.io/client-go/pkg/apis/clientauthentication k8s.io/client-go/pkg/apis/clientauthentication/install k8s.io/client-go/pkg/apis/clientauthentication/v1 @@ -1190,20 +979,12 @@ k8s.io/client-go/pkg/version k8s.io/client-go/plugin/pkg/client/auth/exec k8s.io/client-go/rest k8s.io/client-go/rest/watch -k8s.io/client-go/restmapper -k8s.io/client-go/third_party/forked/golang/template -k8s.io/client-go/tools/auth -k8s.io/client-go/tools/clientcmd k8s.io/client-go/tools/clientcmd/api -k8s.io/client-go/tools/clientcmd/api/latest -k8s.io/client-go/tools/clientcmd/api/v1 k8s.io/client-go/tools/metrics k8s.io/client-go/transport k8s.io/client-go/util/cert k8s.io/client-go/util/connrotation k8s.io/client-go/util/flowcontrol -k8s.io/client-go/util/homedir -k8s.io/client-go/util/jsonpath k8s.io/client-go/util/keyutil k8s.io/client-go/util/workqueue # k8s.io/klog/v2 v2.70.1 @@ -1214,16 +995,6 @@ k8s.io/klog/v2/internal/clock k8s.io/klog/v2/internal/dbg k8s.io/klog/v2/internal/serialize k8s.io/klog/v2/internal/severity -# k8s.io/kube-openapi v0.0.0-20220803162953-67bda5d908f1 -## explicit; go 1.18 -k8s.io/kube-openapi/pkg/builder3/util -k8s.io/kube-openapi/pkg/common -k8s.io/kube-openapi/pkg/handler3 -k8s.io/kube-openapi/pkg/internal/handler -k8s.io/kube-openapi/pkg/openapiconv -k8s.io/kube-openapi/pkg/schemamutation -k8s.io/kube-openapi/pkg/spec3 -k8s.io/kube-openapi/pkg/validation/spec # k8s.io/utils v0.0.0-20220728103510-ee6ede2d64ed ## explicit; go 1.12 k8s.io/utils/clock @@ -1231,109 +1002,11 @@ k8s.io/utils/clock/testing k8s.io/utils/integer k8s.io/utils/internal/third_party/forked/golang/net k8s.io/utils/net -k8s.io/utils/pointer k8s.io/utils/strings/slices -# oras.land/oras-go v1.2.0 -## explicit; go 1.17 -oras.land/oras-go/pkg/artifact -oras.land/oras-go/pkg/auth -oras.land/oras-go/pkg/auth/docker -oras.land/oras-go/pkg/content -oras.land/oras-go/pkg/context -oras.land/oras-go/pkg/oras -oras.land/oras-go/pkg/registry -oras.land/oras-go/pkg/registry/remote -oras.land/oras-go/pkg/registry/remote/auth -oras.land/oras-go/pkg/registry/remote/internal/errutil -oras.land/oras-go/pkg/registry/remote/internal/syncutil -oras.land/oras-go/pkg/target # sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2 ## explicit; go 1.18 sigs.k8s.io/json sigs.k8s.io/json/internal/golang/encoding/json -# sigs.k8s.io/kustomize/api v0.12.1 -## explicit; go 1.18 -sigs.k8s.io/kustomize/api/filters/annotations -sigs.k8s.io/kustomize/api/filters/fieldspec -sigs.k8s.io/kustomize/api/filters/filtersutil -sigs.k8s.io/kustomize/api/filters/fsslice -sigs.k8s.io/kustomize/api/filters/iampolicygenerator -sigs.k8s.io/kustomize/api/filters/imagetag -sigs.k8s.io/kustomize/api/filters/labels -sigs.k8s.io/kustomize/api/filters/nameref -sigs.k8s.io/kustomize/api/filters/namespace -sigs.k8s.io/kustomize/api/filters/patchjson6902 -sigs.k8s.io/kustomize/api/filters/patchstrategicmerge -sigs.k8s.io/kustomize/api/filters/prefix -sigs.k8s.io/kustomize/api/filters/refvar -sigs.k8s.io/kustomize/api/filters/replacement -sigs.k8s.io/kustomize/api/filters/replicacount -sigs.k8s.io/kustomize/api/filters/suffix -sigs.k8s.io/kustomize/api/filters/valueadd -sigs.k8s.io/kustomize/api/hasher -sigs.k8s.io/kustomize/api/ifc -sigs.k8s.io/kustomize/api/image -sigs.k8s.io/kustomize/api/internal/accumulator -sigs.k8s.io/kustomize/api/internal/builtins -sigs.k8s.io/kustomize/api/internal/generators -sigs.k8s.io/kustomize/api/internal/git -sigs.k8s.io/kustomize/api/internal/kusterr -sigs.k8s.io/kustomize/api/internal/plugins/builtinconfig -sigs.k8s.io/kustomize/api/internal/plugins/builtinhelpers -sigs.k8s.io/kustomize/api/internal/plugins/execplugin -sigs.k8s.io/kustomize/api/internal/plugins/fnplugin -sigs.k8s.io/kustomize/api/internal/plugins/loader -sigs.k8s.io/kustomize/api/internal/plugins/utils -sigs.k8s.io/kustomize/api/internal/target -sigs.k8s.io/kustomize/api/internal/utils -sigs.k8s.io/kustomize/api/internal/validate -sigs.k8s.io/kustomize/api/konfig -sigs.k8s.io/kustomize/api/konfig/builtinpluginconsts -sigs.k8s.io/kustomize/api/krusty -sigs.k8s.io/kustomize/api/kv -sigs.k8s.io/kustomize/api/loader -sigs.k8s.io/kustomize/api/provenance -sigs.k8s.io/kustomize/api/provider -sigs.k8s.io/kustomize/api/resmap -sigs.k8s.io/kustomize/api/resource -sigs.k8s.io/kustomize/api/types -# sigs.k8s.io/kustomize/kyaml v0.13.9 -## explicit; go 1.18 -sigs.k8s.io/kustomize/kyaml/comments -sigs.k8s.io/kustomize/kyaml/errors -sigs.k8s.io/kustomize/kyaml/ext -sigs.k8s.io/kustomize/kyaml/fieldmeta -sigs.k8s.io/kustomize/kyaml/filesys -sigs.k8s.io/kustomize/kyaml/fn/runtime/container -sigs.k8s.io/kustomize/kyaml/fn/runtime/exec -sigs.k8s.io/kustomize/kyaml/fn/runtime/runtimeutil -sigs.k8s.io/kustomize/kyaml/fn/runtime/starlark -sigs.k8s.io/kustomize/kyaml/internal/forked/github.com/go-yaml/yaml -sigs.k8s.io/kustomize/kyaml/internal/forked/github.com/qri-io/starlib/util -sigs.k8s.io/kustomize/kyaml/kio -sigs.k8s.io/kustomize/kyaml/kio/filters -sigs.k8s.io/kustomize/kyaml/kio/kioutil -sigs.k8s.io/kustomize/kyaml/openapi -sigs.k8s.io/kustomize/kyaml/openapi/kubernetesapi -sigs.k8s.io/kustomize/kyaml/openapi/kubernetesapi/v1212 -sigs.k8s.io/kustomize/kyaml/openapi/kustomizationapi -sigs.k8s.io/kustomize/kyaml/order -sigs.k8s.io/kustomize/kyaml/resid -sigs.k8s.io/kustomize/kyaml/runfn -sigs.k8s.io/kustomize/kyaml/sets -sigs.k8s.io/kustomize/kyaml/sliceutil -sigs.k8s.io/kustomize/kyaml/utils -sigs.k8s.io/kustomize/kyaml/yaml -sigs.k8s.io/kustomize/kyaml/yaml/internal/k8sgen/pkg/labels -sigs.k8s.io/kustomize/kyaml/yaml/internal/k8sgen/pkg/selection -sigs.k8s.io/kustomize/kyaml/yaml/internal/k8sgen/pkg/util/errors -sigs.k8s.io/kustomize/kyaml/yaml/internal/k8sgen/pkg/util/sets -sigs.k8s.io/kustomize/kyaml/yaml/internal/k8sgen/pkg/util/validation -sigs.k8s.io/kustomize/kyaml/yaml/internal/k8sgen/pkg/util/validation/field -sigs.k8s.io/kustomize/kyaml/yaml/merge2 -sigs.k8s.io/kustomize/kyaml/yaml/merge3 -sigs.k8s.io/kustomize/kyaml/yaml/schema -sigs.k8s.io/kustomize/kyaml/yaml/walk # sigs.k8s.io/structured-merge-diff/v4 v4.2.3 ## explicit; go 1.13 sigs.k8s.io/structured-merge-diff/v4/value diff --git a/src/vendor/oras.land/oras-go/LICENSE b/src/vendor/oras.land/oras-go/LICENSE deleted file mode 100644 index a67d16938..000000000 --- a/src/vendor/oras.land/oras-go/LICENSE +++ /dev/null @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright 2021 ORAS Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/src/vendor/oras.land/oras-go/pkg/artifact/consts.go b/src/vendor/oras.land/oras-go/pkg/artifact/consts.go deleted file mode 100644 index 929348723..000000000 --- a/src/vendor/oras.land/oras-go/pkg/artifact/consts.go +++ /dev/null @@ -1,22 +0,0 @@ -/* -Copyright The ORAS Authors. -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - -http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package artifact - -const ( - // UnknownConfigMediaType is the default mediaType used when no - // config media type is specified. - UnknownConfigMediaType = "application/vnd.unknown.config.v1+json" -) diff --git a/src/vendor/oras.land/oras-go/pkg/auth/client.go b/src/vendor/oras.land/oras-go/pkg/auth/client.go deleted file mode 100644 index 2c3be4a61..000000000 --- a/src/vendor/oras.land/oras-go/pkg/auth/client.go +++ /dev/null @@ -1,45 +0,0 @@ -/* -Copyright The ORAS Authors. -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - -http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package auth - -import ( - "context" - "errors" - "net/http" - - "github.com/containerd/containerd/remotes" -) - -// Common errors -var ( - ErrNotLoggedIn = errors.New("not logged in") -) - -// Client provides authentication operations for remotes. -type Client interface { - // Login logs in to a remote server identified by the hostname. - // Deprecated: use LoginWithOpts - Login(ctx context.Context, hostname, username, secret string, insecure bool) error - // LoginWithOpts logs in to a remote server identified by the hostname with custom options - LoginWithOpts(options ...LoginOption) error - // Logout logs out from a remote server identified by the hostname. - Logout(ctx context.Context, hostname string) error - // Resolver returns a new authenticated resolver. - // Deprecated: use ResolverWithOpts - Resolver(ctx context.Context, client *http.Client, plainHTTP bool) (remotes.Resolver, error) - // ResolverWithOpts returns a new authenticated resolver with custom options. - ResolverWithOpts(options ...ResolverOption) (remotes.Resolver, error) -} diff --git a/src/vendor/oras.land/oras-go/pkg/auth/client_opts.go b/src/vendor/oras.land/oras-go/pkg/auth/client_opts.go deleted file mode 100644 index f385b2408..000000000 --- a/src/vendor/oras.land/oras-go/pkg/auth/client_opts.go +++ /dev/null @@ -1,123 +0,0 @@ -/* -Copyright The ORAS Authors. -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - -http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package auth - -import ( - "context" - "net/http" -) - -type ( - // LoginOption allows specifying various settings on login. - LoginOption func(*LoginSettings) - - // LoginSettings represent all the various settings on login. - LoginSettings struct { - Context context.Context - Hostname string - Username string - Secret string - CertFile string - KeyFile string - CAFile string - Insecure bool - UserAgent string - } -) - -// WithLoginContext returns a function that sets the Context setting on login. -func WithLoginContext(context context.Context) LoginOption { - return func(settings *LoginSettings) { - settings.Context = context - } -} - -// WithLoginHostname returns a function that sets the Hostname setting on login. -func WithLoginHostname(hostname string) LoginOption { - return func(settings *LoginSettings) { - settings.Hostname = hostname - } -} - -// WithLoginUsername returns a function that sets the Username setting on login. -func WithLoginUsername(username string) LoginOption { - return func(settings *LoginSettings) { - settings.Username = username - } -} - -// WithLoginSecret returns a function that sets the Secret setting on login. -func WithLoginSecret(secret string) LoginOption { - return func(settings *LoginSettings) { - settings.Secret = secret - } -} - -// WithLoginInsecure returns a function that sets the Insecure setting to true on login. -func WithLoginInsecure() LoginOption { - return func(settings *LoginSettings) { - settings.Insecure = true - } -} - -// WithLoginTLS returns a function that sets the tls settings on login. -func WithLoginTLS(certFile, keyFile, caFile string) LoginOption { - return func(settings *LoginSettings) { - settings.CertFile = certFile - settings.KeyFile = keyFile - settings.CAFile = caFile - } -} - -// WithLoginUserAgent returns a function that sets the UserAgent setting on login. -func WithLoginUserAgent(userAgent string) LoginOption { - return func(settings *LoginSettings) { - settings.UserAgent = userAgent - } -} - -type ( - // ResolverOption allows specifying various settings on the resolver. - ResolverOption func(*ResolverSettings) - - // ResolverSettings represent all the various settings on a resolver. - ResolverSettings struct { - Client *http.Client - PlainHTTP bool - Headers http.Header - } -) - -// WithResolverClient returns a function that sets the Client setting on resolver. -func WithResolverClient(client *http.Client) ResolverOption { - return func(settings *ResolverSettings) { - settings.Client = client - } -} - -// WithResolverPlainHTTP returns a function that sets the PlainHTTP setting to true on resolver. -func WithResolverPlainHTTP() ResolverOption { - return func(settings *ResolverSettings) { - settings.PlainHTTP = true - } -} - -// WithResolverHeaders returns a function that sets the Headers setting on resolver. -func WithResolverHeaders(headers http.Header) ResolverOption { - return func(settings *ResolverSettings) { - settings.Headers = headers - } -} diff --git a/src/vendor/oras.land/oras-go/pkg/auth/docker/client.go b/src/vendor/oras.land/oras-go/pkg/auth/docker/client.go deleted file mode 100644 index b225c89c0..000000000 --- a/src/vendor/oras.land/oras-go/pkg/auth/docker/client.go +++ /dev/null @@ -1,123 +0,0 @@ -/* -Copyright The ORAS Authors. -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - -http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package docker - -import ( - "os" - - "github.com/docker/cli/cli/config" - "github.com/docker/cli/cli/config/configfile" - "github.com/docker/cli/cli/config/credentials" - "github.com/pkg/errors" - - "oras.land/oras-go/pkg/auth" -) - -// Client provides authentication operations for docker registries. -type Client struct { - configs []*configfile.ConfigFile -} - -// NewClient creates a new auth client based on provided config paths. -// If not config path is provided, the default path is used. -// Credentials are read from the first config and fall backs to next. -// All changes will only be written to the first config file. -func NewClient(configPaths ...string) (auth.Client, error) { - if len(configPaths) == 0 { - cfg, err := config.Load(config.Dir()) - if err != nil { - return nil, err - } - if !cfg.ContainsAuth() { - cfg.CredentialsStore = credentials.DetectDefaultStore(cfg.CredentialsStore) - } - - return &Client{ - configs: []*configfile.ConfigFile{cfg}, - }, nil - } - - var configs []*configfile.ConfigFile - for _, path := range configPaths { - cfg, err := loadConfigFile(path) - if err != nil { - return nil, errors.Wrap(err, path) - } - configs = append(configs, cfg) - } - - return &Client{ - configs: configs, - }, nil -} - -// NewClientWithDockerFallback creates a new auth client -// which falls back on Docker's default config path. -// This allows support for ~/.docker/config.json as a fallback, -// as well as support for the DOCKER_CONFIG environment variable. -func NewClientWithDockerFallback(configPaths ...string) (auth.Client, error) { - if len(configPaths) == 0 { - return NewClient() - } - - var configs []*configfile.ConfigFile - for _, path := range configPaths { - cfg, err := loadConfigFile(path) - if err != nil { - return nil, errors.Wrap(err, path) - } - configs = append(configs, cfg) - } - - // Add the Docker default config last - dockerFallbackCfg, err := config.Load(config.Dir()) - if err != nil { - return nil, err - } - if !dockerFallbackCfg.ContainsAuth() { - dockerFallbackCfg.CredentialsStore = credentials.DetectDefaultStore(dockerFallbackCfg.CredentialsStore) - } - configs = append(configs, dockerFallbackCfg) - - return &Client{ - configs: configs, - }, nil -} - -func (c *Client) primaryCredentialsStore(hostname string) credentials.Store { - return c.configs[0].GetCredentialsStore(hostname) -} - -// loadConfigFile reads the configuration files from the given path. -func loadConfigFile(path string) (*configfile.ConfigFile, error) { - cfg := configfile.New(path) - if _, err := os.Stat(path); err == nil { - file, err := os.Open(path) - if err != nil { - return nil, err - } - defer file.Close() - if err := cfg.LoadFromReader(file); err != nil { - return nil, err - } - } else if !os.IsNotExist(err) { - return nil, err - } - if !cfg.ContainsAuth() { - cfg.CredentialsStore = credentials.DetectDefaultStore(cfg.CredentialsStore) - } - return cfg, nil -} diff --git a/src/vendor/oras.land/oras-go/pkg/auth/docker/login.go b/src/vendor/oras.land/oras-go/pkg/auth/docker/login.go deleted file mode 100644 index bd155e530..000000000 --- a/src/vendor/oras.land/oras-go/pkg/auth/docker/login.go +++ /dev/null @@ -1,103 +0,0 @@ -/* -Copyright The ORAS Authors. -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - -http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package docker - -import ( - "context" - - ctypes "github.com/docker/cli/cli/config/types" - "github.com/docker/docker/api/types" - "github.com/docker/docker/registry" - - iface "oras.land/oras-go/pkg/auth" -) - -const IndexHostname = "index.docker.io" - -// Login logs in to a docker registry identified by the hostname. -// Deprecated: use LoginWithOpts -func (c *Client) Login(ctx context.Context, hostname, username, secret string, insecure bool) error { - settings := &iface.LoginSettings{ - Context: ctx, - Hostname: hostname, - Username: username, - Secret: secret, - Insecure: insecure, - } - return c.login(settings) -} - -// LoginWithOpts logs in to a docker registry identified by the hostname with custom options. -func (c *Client) LoginWithOpts(options ...iface.LoginOption) error { - settings := &iface.LoginSettings{} - for _, option := range options { - option(settings) - } - return c.login(settings) -} - -func (c *Client) login(settings *iface.LoginSettings) error { - hostname := resolveHostname(settings.Hostname) - cred := types.AuthConfig{ - Username: settings.Username, - ServerAddress: hostname, - } - if settings.Username == "" { - cred.IdentityToken = settings.Secret - } else { - cred.Password = settings.Secret - } - - opts := registry.ServiceOptions{} - - if settings.Insecure { - opts.InsecureRegistries = []string{hostname} - } - - // Login to ensure valid credential - remote, err := registry.NewService(opts) - if err != nil { - return err - } - ctx := settings.Context - if ctx == nil { - ctx = context.Background() - } - userAgent := settings.UserAgent - if userAgent == "" { - userAgent = "oras" - } - - var token string - if (settings.CertFile != "" && settings.KeyFile != "") || settings.CAFile != "" { - _, token, err = c.loginWithTLS(ctx, remote, settings.CertFile, settings.KeyFile, settings.CAFile, &cred, userAgent) - } else { - _, token, err = remote.Auth(ctx, &cred, userAgent) - } - - if err != nil { - return err - } - - if token != "" { - cred.Username = "" - cred.Password = "" - cred.IdentityToken = token - } - - // Store credential - return c.primaryCredentialsStore(hostname).Store(ctypes.AuthConfig(cred)) -} diff --git a/src/vendor/oras.land/oras-go/pkg/auth/docker/login_tls.go b/src/vendor/oras.land/oras-go/pkg/auth/docker/login_tls.go deleted file mode 100644 index cb92df818..000000000 --- a/src/vendor/oras.land/oras-go/pkg/auth/docker/login_tls.go +++ /dev/null @@ -1,220 +0,0 @@ -/* -Copyright The ORAS Authors. -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - -http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package docker - -import ( - "context" - "crypto/tls" - "fmt" - "net" - "net/http" - "net/url" - "strings" - "time" - - "github.com/docker/distribution/registry/api/errcode" - "github.com/docker/distribution/registry/client/auth" - "github.com/docker/distribution/registry/client/transport" - "github.com/docker/docker/api/types" - "github.com/docker/docker/errdefs" - "github.com/docker/docker/registry" - "github.com/docker/go-connections/tlsconfig" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" -) - -// The following functions are adapted from github.com/docker/docker/registry -// We need these to support passing in a transport that has custom TLS configuration -// They are not exposed in the docker/registry package that's why they are copied here - -type loginCredentialStore struct { - authConfig *types.AuthConfig -} - -func (lcs loginCredentialStore) Basic(*url.URL) (string, string) { - return lcs.authConfig.Username, lcs.authConfig.Password -} - -func (lcs loginCredentialStore) RefreshToken(*url.URL, string) string { - return lcs.authConfig.IdentityToken -} - -func (lcs loginCredentialStore) SetRefreshToken(u *url.URL, service, token string) { - lcs.authConfig.IdentityToken = token -} - -// loginWithTLS tries to login to the v2 registry server. -// A custom tls.Config is used to override the default TLS configuration of the different registry endpoints. -// The tls.Config is created using the provided certificate, certificate key and certificate authority. -func (c *Client) loginWithTLS(ctx context.Context, service registry.Service, certFile, keyFile, caFile string, authConfig *types.AuthConfig, userAgent string) (string, string, error) { - tlsConfig, err := tlsconfig.Client(tlsconfig.Options{CAFile: caFile, CertFile: certFile, KeyFile: keyFile}) - if err != nil { - return "", "", err - } - - endpoints, err := c.getEndpoints(authConfig.ServerAddress, service) - if err != nil { - return "", "", err - } - - var status, token string - for _, endpoint := range endpoints { - endpoint.TLSConfig = tlsConfig - status, token, err = loginV2(authConfig, endpoint, userAgent) - - if err != nil { - if isNotAuthorizedError(err) { - return "", "", err - } - - logrus.WithError(err).Infof("Error logging in to endpoint, trying next endpoint") - continue - } - - return status, token, nil - } - - return "", "", err -} - -// getEndpoints returns the endpoints for the given hostname. -func (c *Client) getEndpoints(address string, service registry.Service) ([]registry.APIEndpoint, error) { - var registryHostName = IndexHostname - - if address != "" { - if !strings.HasPrefix(address, "https://") && !strings.HasPrefix(address, "http://") { - address = fmt.Sprintf("https://%s", address) - } - u, err := url.Parse(address) - if err != nil { - return nil, errdefs.InvalidParameter(errors.Wrapf(err, "unable to parse server address")) - } - registryHostName = u.Host - } - - // Lookup endpoints for authentication using "LookupPushEndpoints", which - // excludes mirrors to prevent sending credentials of the upstream registry - // to a mirror. - endpoints, err := service.LookupPushEndpoints(registryHostName) - if err != nil { - return nil, errdefs.InvalidParameter(err) - } - - return endpoints, nil -} - -// loginV2 tries to login to the v2 registry server. The given registry -// endpoint will be pinged to get authorization challenges. These challenges -// will be used to authenticate against the registry to validate credentials. -func loginV2(authConfig *types.AuthConfig, endpoint registry.APIEndpoint, userAgent string) (string, string, error) { - var ( - endpointStr = strings.TrimRight(endpoint.URL.String(), "/") + "/v2/" - modifiers = registry.Headers(userAgent, nil) - authTransport = transport.NewTransport(newTransport(endpoint.TLSConfig), modifiers...) - credentialAuthConfig = *authConfig - creds = loginCredentialStore{authConfig: &credentialAuthConfig} - ) - - logrus.Debugf("attempting v2 login to registry endpoint %s", endpointStr) - - loginClient, err := v2AuthHTTPClient(endpoint.URL, authTransport, modifiers, creds, nil) - if err != nil { - return "", "", err - } - - req, err := http.NewRequest(http.MethodGet, endpointStr, nil) - if err != nil { - return "", "", err - } - - resp, err := loginClient.Do(req) - if err != nil { - err = translateV2AuthError(err) - return "", "", err - } - defer resp.Body.Close() - - if resp.StatusCode == http.StatusOK { - return "Login Succeeded", credentialAuthConfig.IdentityToken, nil - } - - // TODO(dmcgowan): Attempt to further interpret result, status code and error code string - return "", "", errors.Errorf("login attempt to %s failed with status: %d %s", endpointStr, resp.StatusCode, http.StatusText(resp.StatusCode)) -} - -// newTransport returns a new HTTP transport. If tlsConfig is nil, it uses the -// default TLS configuration. -func newTransport(tlsConfig *tls.Config) *http.Transport { - if tlsConfig == nil { - tlsConfig = tlsconfig.ServerDefault() - } - - direct := &net.Dialer{ - Timeout: 30 * time.Second, - KeepAlive: 30 * time.Second, - } - - return &http.Transport{ - Proxy: http.ProxyFromEnvironment, - DialContext: direct.DialContext, - TLSHandshakeTimeout: 10 * time.Second, - TLSClientConfig: tlsConfig, - // TODO(dmcgowan): Call close idle connections when complete and use keep alive - DisableKeepAlives: true, - } -} - -func v2AuthHTTPClient(endpoint *url.URL, authTransport http.RoundTripper, modifiers []transport.RequestModifier, creds auth.CredentialStore, scopes []auth.Scope) (*http.Client, error) { - challengeManager, _, err := registry.PingV2Registry(endpoint, authTransport) - if err != nil { - return nil, err - } - - tokenHandlerOptions := auth.TokenHandlerOptions{ - Transport: authTransport, - Credentials: creds, - OfflineAccess: true, - ClientID: registry.AuthClientID, - Scopes: scopes, - } - tokenHandler := auth.NewTokenHandlerWithOptions(tokenHandlerOptions) - basicHandler := auth.NewBasicHandler(creds) - modifiers = append(modifiers, auth.NewAuthorizer(challengeManager, tokenHandler, basicHandler)) - - return &http.Client{ - Transport: transport.NewTransport(authTransport, modifiers...), - Timeout: 15 * time.Second, - }, nil -} - -func translateV2AuthError(err error) error { - switch e := err.(type) { - case *url.Error: - switch e2 := e.Err.(type) { - case errcode.Error: - switch e2.Code { - case errcode.ErrorCodeUnauthorized: - return errdefs.Unauthorized(err) - } - } - } - - return err -} - -func isNotAuthorizedError(err error) bool { - return strings.Contains(err.Error(), "401 Unauthorized") -} diff --git a/src/vendor/oras.land/oras-go/pkg/auth/docker/logout.go b/src/vendor/oras.land/oras-go/pkg/auth/docker/logout.go deleted file mode 100644 index ada1f2687..000000000 --- a/src/vendor/oras.land/oras-go/pkg/auth/docker/logout.go +++ /dev/null @@ -1,42 +0,0 @@ -/* -Copyright The ORAS Authors. -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - -http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package docker - -import ( - "context" - - "github.com/docker/cli/cli/config/configfile" - - "oras.land/oras-go/pkg/auth" -) - -// Logout logs out from a docker registry identified by the hostname. -func (c *Client) Logout(_ context.Context, hostname string) error { - hostname = resolveHostname(hostname) - - var configs []*configfile.ConfigFile - for _, config := range c.configs { - if _, ok := config.AuthConfigs[hostname]; ok { - configs = append(configs, config) - } - } - if len(configs) == 0 { - return auth.ErrNotLoggedIn - } - - // Log out form the primary config only as backups are read-only. - return c.primaryCredentialsStore(hostname).Erase(hostname) -} diff --git a/src/vendor/oras.land/oras-go/pkg/auth/docker/resolver.go b/src/vendor/oras.land/oras-go/pkg/auth/docker/resolver.go deleted file mode 100644 index e749c2d2c..000000000 --- a/src/vendor/oras.land/oras-go/pkg/auth/docker/resolver.go +++ /dev/null @@ -1,86 +0,0 @@ -/* -Copyright The ORAS Authors. -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - -http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package docker - -import ( - "context" - "net/http" - - "github.com/containerd/containerd/remotes" - "github.com/containerd/containerd/remotes/docker" - ctypes "github.com/docker/cli/cli/config/types" - "github.com/docker/docker/registry" - - iface "oras.land/oras-go/pkg/auth" -) - -// Resolver returns a new authenticated resolver. -// Deprecated: use ResolverWithOpts -func (c *Client) Resolver(_ context.Context, client *http.Client, plainHTTP bool) (remotes.Resolver, error) { - return docker.NewResolver(docker.ResolverOptions{ - Credentials: c.Credential, - Client: client, - PlainHTTP: plainHTTP, - }), nil -} - -// ResolverWithOpts returns a new authenticated resolver with custom options. -func (c *Client) ResolverWithOpts(options ...iface.ResolverOption) (remotes.Resolver, error) { - settings := &iface.ResolverSettings{} - for _, option := range options { - option(settings) - } - return docker.NewResolver(docker.ResolverOptions{ - Credentials: c.Credential, - Client: settings.Client, - PlainHTTP: settings.PlainHTTP, - Headers: settings.Headers, - }), nil -} - -// Credential returns the login credential of the request host. -func (c *Client) Credential(hostname string) (string, string, error) { - hostname = resolveHostname(hostname) - var ( - auth ctypes.AuthConfig - err error - ) - for _, cfg := range c.configs { - auth, err = cfg.GetAuthConfig(hostname) - if err != nil { - // fall back to next config - continue - } - if auth.IdentityToken != "" { - return "", auth.IdentityToken, nil - } - if auth.Username == "" && auth.Password == "" { - // fall back to next config - continue - } - return auth.Username, auth.Password, nil - } - return "", "", err -} - -// resolveHostname resolves Docker specific hostnames -func resolveHostname(hostname string) string { - switch hostname { - case registry.IndexHostname, registry.IndexName, registry.DefaultV2Registry.Host: - return registry.IndexServer - } - return hostname -} diff --git a/src/vendor/oras.land/oras-go/pkg/content/consts.go b/src/vendor/oras.land/oras-go/pkg/content/consts.go deleted file mode 100644 index ae59fdaa4..000000000 --- a/src/vendor/oras.land/oras-go/pkg/content/consts.go +++ /dev/null @@ -1,57 +0,0 @@ -/* -Copyright The ORAS Authors. -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - -http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package content - -import ( - "github.com/opencontainers/go-digest" - ocispec "github.com/opencontainers/image-spec/specs-go/v1" -) - -const ( - // DefaultBlobMediaType specifies the default blob media type - DefaultBlobMediaType = ocispec.MediaTypeImageLayer - // DefaultBlobDirMediaType specifies the default blob directory media type - DefaultBlobDirMediaType = ocispec.MediaTypeImageLayerGzip -) - -const ( - // TempFilePattern specifies the pattern to create temporary files - TempFilePattern = "oras" -) - -const ( - // AnnotationDigest is the annotation key for the digest of the uncompressed content - AnnotationDigest = "io.deis.oras.content.digest" - // AnnotationUnpack is the annotation key for indication of unpacking - AnnotationUnpack = "io.deis.oras.content.unpack" -) - -const ( - // OCIImageIndexFile is the file name of the index from the OCI Image Layout Specification - // Reference: https://github.com/opencontainers/image-spec/blob/master/image-layout.md#indexjson-file - OCIImageIndexFile = "index.json" -) - -const ( - // DefaultBlocksize default size of each slice of bytes read in each write through in gunzipand untar. - // Simply uses the same size as io.Copy() - DefaultBlocksize = 32768 -) - -const ( - // what you get for a blank digest - BlankHash = digest.Digest("sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855") -) diff --git a/src/vendor/oras.land/oras-go/pkg/content/decompress.go b/src/vendor/oras.land/oras-go/pkg/content/decompress.go deleted file mode 100644 index ecde9c9a5..000000000 --- a/src/vendor/oras.land/oras-go/pkg/content/decompress.go +++ /dev/null @@ -1,151 +0,0 @@ -/* -Copyright The ORAS Authors. -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - -http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package content - -import ( - "context" - "errors" - "strings" - - ctrcontent "github.com/containerd/containerd/content" - "github.com/containerd/containerd/remotes" - ocispec "github.com/opencontainers/image-spec/specs-go/v1" -) - -// Decompress store to decompress content and extract from tar, if needed, wrapping -// another store. By default, a FileStore will simply take each artifact and write it to -// a file, as a MemoryStore will do into memory. If the artifact is gzipped or tarred, -// you might want to store the actual object inside tar or gzip. Wrap your Store -// with Decompress, and it will check the media-type and, if relevant, -// gunzip and/or untar. -// -// For example: -// -// fileStore := NewFileStore(rootPath) -// Decompress := store.NewDecompress(fileStore, WithBlocksize(blocksize)) -// -// The above example works if there is no tar, i.e. each artifact is just a single file, perhaps gzipped, -// or if there is only one file in each tar archive. In other words, when each content.Writer has only one target output stream. -// However, if you have multiple files in each tar archive, each archive of which is an artifact layer, then -// you need a way to select how to handle each file in the tar archive. In other words, when each content.Writer has more than one -// target output stream. In that case, use the following example: -// -// multiStore := NewMultiStore(rootPath) // some store that can handle different filenames -// Decompress := store.NewDecompress(multiStore, WithBlocksize(blocksize), WithMultiWriterIngester()) -// -type Decompress struct { - pusher remotes.Pusher - blocksize int - multiWriterIngester bool -} - -func NewDecompress(pusher remotes.Pusher, opts ...WriterOpt) Decompress { - // we have to reprocess the opts to find the blocksize - var wOpts WriterOpts - for _, opt := range opts { - if err := opt(&wOpts); err != nil { - // TODO: we probably should handle errors here - continue - } - } - - return Decompress{pusher, wOpts.Blocksize, wOpts.MultiWriterIngester} -} - -// Push get a content.Writer -func (d Decompress) Push(ctx context.Context, desc ocispec.Descriptor) (ctrcontent.Writer, error) { - // the logic is straightforward: - // - if there is a desc in the opts, and the mediatype is tar or tar+gzip, then pass the correct decompress writer - // - else, pass the regular writer - var ( - writer ctrcontent.Writer - err error - multiIngester MultiWriterPusher - ok bool - ) - - // check to see if we are supposed to use a MultiWriterIngester - if d.multiWriterIngester { - multiIngester, ok = d.pusher.(MultiWriterPusher) - if !ok { - return nil, errors.New("configured to use multiwriter ingester, but ingester does not implement multiwriter") - } - } - - // figure out if compression and/or archive exists - // before we pass it down, we need to strip anything we are removing here - // and possibly update the digest, since the store indexes things by digest - hasGzip, hasTar, modifiedMediaType := checkCompression(desc.MediaType) - desc.MediaType = modifiedMediaType - // determine if we pass it blocksize, only if positive - writerOpts := []WriterOpt{} - if d.blocksize > 0 { - writerOpts = append(writerOpts, WithBlocksize(d.blocksize)) - } - - writer, err = d.pusher.Push(ctx, desc) - if err != nil { - return nil, err - } - - // do we need to wrap with an untar writer? - if hasTar { - // if not multiingester, get a regular writer - if multiIngester == nil { - writer = NewUntarWriter(writer, writerOpts...) - } else { - writers, err := multiIngester.Pushers(ctx, desc) - if err != nil { - return nil, err - } - writer = NewUntarWriterByName(writers, writerOpts...) - } - } - if hasGzip { - if writer == nil { - writer, err = d.pusher.Push(ctx, desc) - if err != nil { - return nil, err - } - } - writer = NewGunzipWriter(writer, writerOpts...) - } - return writer, nil -} - -// checkCompression check if the mediatype uses gzip compression or tar. -// Returns if it has gzip and/or tar, as well as the base media type without -// those suffixes. -func checkCompression(mediaType string) (gzip, tar bool, mt string) { - mt = mediaType - gzipSuffix := "+gzip" - gzipAltSuffix := ".gzip" - tarSuffix := ".tar" - switch { - case strings.HasSuffix(mt, gzipSuffix): - mt = mt[:len(mt)-len(gzipSuffix)] - gzip = true - case strings.HasSuffix(mt, gzipAltSuffix): - mt = mt[:len(mt)-len(gzipAltSuffix)] - gzip = true - } - - if strings.HasSuffix(mt, tarSuffix) { - mt = mt[:len(mt)-len(tarSuffix)] - tar = true - } - return -} diff --git a/src/vendor/oras.land/oras-go/pkg/content/errors.go b/src/vendor/oras.land/oras-go/pkg/content/errors.go deleted file mode 100644 index 72b2e727a..000000000 --- a/src/vendor/oras.land/oras-go/pkg/content/errors.go +++ /dev/null @@ -1,33 +0,0 @@ -/* -Copyright The ORAS Authors. -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - -http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package content - -import "errors" - -// Common errors -var ( - ErrNotFound = errors.New("not_found") - ErrNoName = errors.New("no_name") - ErrUnsupportedSize = errors.New("unsupported_size") - ErrUnsupportedVersion = errors.New("unsupported_version") - ErrInvalidReference = errors.New("invalid_reference") -) - -// FileStore errors -var ( - ErrPathTraversalDisallowed = errors.New("path_traversal_disallowed") - ErrOverwriteDisallowed = errors.New("overwrite_disallowed") -) diff --git a/src/vendor/oras.land/oras-go/pkg/content/file.go b/src/vendor/oras.land/oras-go/pkg/content/file.go deleted file mode 100644 index 70f46d1ae..000000000 --- a/src/vendor/oras.land/oras-go/pkg/content/file.go +++ /dev/null @@ -1,534 +0,0 @@ -/* -Copyright The ORAS Authors. -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - -http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package content - -import ( - "bytes" - "compress/gzip" - "context" - _ "crypto/sha256" - "fmt" - "io" - "io/ioutil" - "os" - "path/filepath" - "strings" - "sync" - "time" - - "github.com/containerd/containerd/content" - "github.com/containerd/containerd/errdefs" - "github.com/containerd/containerd/remotes" - digest "github.com/opencontainers/go-digest" - ocispec "github.com/opencontainers/image-spec/specs-go/v1" - "github.com/pkg/errors" -) - -// File provides content via files from the file system -type File struct { - DisableOverwrite bool - AllowPathTraversalOnWrite bool - - // Reproducible enables stripping times from added files - Reproducible bool - - root string - descriptor *sync.Map // map[digest.Digest]ocispec.Descriptor - pathMap *sync.Map // map[name string](file string) - memoryMap *sync.Map // map[digest.Digest]([]byte) - refMap *sync.Map // map[string]ocispec.Descriptor - tmpFiles *sync.Map - ignoreNoName bool -} - -// NewFile creats a new file target. It represents a single root reference and all of its components. -func NewFile(rootPath string, opts ...WriterOpt) *File { - // we have to process the opts to find if they told us to change defaults - wOpts := DefaultWriterOpts() - for _, opt := range opts { - if err := opt(&wOpts); err != nil { - continue - } - } - return &File{ - root: rootPath, - descriptor: &sync.Map{}, - pathMap: &sync.Map{}, - memoryMap: &sync.Map{}, - refMap: &sync.Map{}, - tmpFiles: &sync.Map{}, - ignoreNoName: wOpts.IgnoreNoName, - } -} - -func (s *File) Resolver() remotes.Resolver { - return s -} - -func (s *File) Resolve(ctx context.Context, ref string) (name string, desc ocispec.Descriptor, err error) { - desc, ok := s.getRef(ref) - if !ok { - return "", ocispec.Descriptor{}, fmt.Errorf("unknown reference: %s", ref) - } - return ref, desc, nil -} - -func (s *File) Fetcher(ctx context.Context, ref string) (remotes.Fetcher, error) { - if _, ok := s.refMap.Load(ref); !ok { - return nil, fmt.Errorf("unknown reference: %s", ref) - } - return s, nil -} - -// Fetch get an io.ReadCloser for the specific content -func (s *File) Fetch(ctx context.Context, desc ocispec.Descriptor) (io.ReadCloser, error) { - // first see if it is in the in-memory manifest map - manifest, ok := s.getMemory(desc) - if ok { - return ioutil.NopCloser(bytes.NewReader(manifest)), nil - } - desc, ok = s.get(desc) - if !ok { - return nil, ErrNotFound - } - name, ok := ResolveName(desc) - if !ok { - return nil, ErrNoName - } - path := s.ResolvePath(name) - return os.Open(path) -} - -func (s *File) Pusher(ctx context.Context, ref string) (remotes.Pusher, error) { - var tag, hash string - parts := strings.SplitN(ref, "@", 2) - if len(parts) > 0 { - tag = parts[0] - } - if len(parts) > 1 { - hash = parts[1] - } - return &filePusher{ - store: s, - ref: tag, - hash: hash, - }, nil -} - -type filePusher struct { - store *File - ref string - hash string -} - -func (s *filePusher) Push(ctx context.Context, desc ocispec.Descriptor) (content.Writer, error) { - name, ok := ResolveName(desc) - now := time.Now() - if !ok { - // if we were not told to ignore NoName, then return an error - if !s.store.ignoreNoName { - return nil, ErrNoName - } - - // just return a nil writer - we do not want to calculate the hash, so just use - // whatever was passed in the descriptor - return NewIoContentWriter(ioutil.Discard, WithOutputHash(desc.Digest)), nil - } - path, err := s.store.resolveWritePath(name) - if err != nil { - return nil, err - } - file, afterCommit, err := s.store.createWritePath(path, desc, name) - if err != nil { - return nil, err - } - - return &fileWriter{ - store: s.store, - file: file, - desc: desc, - digester: digest.Canonical.Digester(), - status: content.Status{ - Ref: name, - Total: desc.Size, - StartedAt: now, - UpdatedAt: now, - }, - afterCommit: afterCommit, - }, nil -} - -// Add adds a file reference from a path, either directory or single file, -// and returns the reference descriptor. -func (s *File) Add(name, mediaType, path string) (ocispec.Descriptor, error) { - if path == "" { - path = name - } - path = s.MapPath(name, path) - - fileInfo, err := os.Stat(path) - if err != nil { - return ocispec.Descriptor{}, err - } - - var desc ocispec.Descriptor - if fileInfo.IsDir() { - desc, err = s.descFromDir(name, mediaType, path) - } else { - desc, err = s.descFromFile(fileInfo, mediaType, path) - } - if err != nil { - return ocispec.Descriptor{}, err - } - if desc.Annotations == nil { - desc.Annotations = make(map[string]string) - } - desc.Annotations[ocispec.AnnotationTitle] = name - - s.set(desc) - return desc, nil -} - -// Load is a lower-level memory-only version of Add. Rather than taking a path, -// generating a descriptor and creating a reference, it takes raw data and a descriptor -// that describes that data and stores it in memory. It will disappear at process -// termination. -// -// It is especially useful for adding ephemeral data, such as config, that must -// exist in order to walk a manifest. -func (s *File) Load(desc ocispec.Descriptor, data []byte) error { - s.memoryMap.Store(desc.Digest, data) - return nil -} - -// Ref gets a reference's descriptor and content -func (s *File) Ref(ref string) (ocispec.Descriptor, []byte, error) { - desc, ok := s.getRef(ref) - if !ok { - return ocispec.Descriptor{}, nil, ErrNotFound - } - // first see if it is in the in-memory manifest map - manifest, ok := s.getMemory(desc) - if !ok { - return ocispec.Descriptor{}, nil, ErrNotFound - } - return desc, manifest, nil -} - -func (s *File) descFromFile(info os.FileInfo, mediaType, path string) (ocispec.Descriptor, error) { - file, err := os.Open(path) - if err != nil { - return ocispec.Descriptor{}, err - } - defer file.Close() - digest, err := digest.FromReader(file) - if err != nil { - return ocispec.Descriptor{}, err - } - - if mediaType == "" { - mediaType = DefaultBlobMediaType - } - return ocispec.Descriptor{ - MediaType: mediaType, - Digest: digest, - Size: info.Size(), - }, nil -} - -func (s *File) descFromDir(name, mediaType, root string) (ocispec.Descriptor, error) { - // generate temp file - file, err := s.tempFile() - if err != nil { - return ocispec.Descriptor{}, err - } - defer file.Close() - s.MapPath(name, file.Name()) - - // compress directory - digester := digest.Canonical.Digester() - zw := gzip.NewWriter(io.MultiWriter(file, digester.Hash())) - defer zw.Close() - tarDigester := digest.Canonical.Digester() - if err := tarDirectory(root, name, io.MultiWriter(zw, tarDigester.Hash()), s.Reproducible); err != nil { - return ocispec.Descriptor{}, err - } - - // flush all - if err := zw.Close(); err != nil { - return ocispec.Descriptor{}, err - } - if err := file.Sync(); err != nil { - return ocispec.Descriptor{}, err - } - - // generate descriptor - if mediaType == "" { - mediaType = DefaultBlobDirMediaType - } - info, err := file.Stat() - if err != nil { - return ocispec.Descriptor{}, err - } - return ocispec.Descriptor{ - MediaType: mediaType, - Digest: digester.Digest(), - Size: info.Size(), - Annotations: map[string]string{ - AnnotationDigest: tarDigester.Digest().String(), - AnnotationUnpack: "true", - }, - }, nil -} - -func (s *File) tempFile() (*os.File, error) { - file, err := ioutil.TempFile("", TempFilePattern) - if err != nil { - return nil, err - } - s.tmpFiles.Store(file.Name(), file) - return file, nil -} - -// Close frees up resources used by the file store -func (s *File) Close() error { - var errs []string - s.tmpFiles.Range(func(name, _ interface{}) bool { - if err := os.Remove(name.(string)); err != nil { - errs = append(errs, err.Error()) - } - return true - }) - if len(errs) > 0 { - return errors.New(strings.Join(errs, "; ")) - } - return nil -} - -func (s *File) resolveWritePath(name string) (string, error) { - path := s.ResolvePath(name) - if !s.AllowPathTraversalOnWrite { - base, err := filepath.Abs(s.root) - if err != nil { - return "", err - } - target, err := filepath.Abs(path) - if err != nil { - return "", err - } - rel, err := filepath.Rel(base, target) - if err != nil { - return "", ErrPathTraversalDisallowed - } - rel = filepath.ToSlash(rel) - if strings.HasPrefix(rel, "../") || rel == ".." { - return "", ErrPathTraversalDisallowed - } - } - if s.DisableOverwrite { - if _, err := os.Stat(path); err == nil { - return "", ErrOverwriteDisallowed - } else if !os.IsNotExist(err) { - return "", err - } - } - return path, nil -} - -func (s *File) createWritePath(path string, desc ocispec.Descriptor, prefix string) (*os.File, func() error, error) { - if value, ok := desc.Annotations[AnnotationUnpack]; !ok || value != "true" { - if err := os.MkdirAll(filepath.Dir(path), 0755); err != nil { - return nil, nil, err - } - file, err := os.Create(path) - return file, nil, err - } - - if err := os.MkdirAll(path, 0755); err != nil { - return nil, nil, err - } - file, err := s.tempFile() - checksum := desc.Annotations[AnnotationDigest] - afterCommit := func() error { - return extractTarGzip(path, prefix, file.Name(), checksum) - } - return file, afterCommit, err -} - -// MapPath maps name to path -func (s *File) MapPath(name, path string) string { - path = s.resolvePath(path) - s.pathMap.Store(name, path) - return path -} - -// ResolvePath returns the path by name -func (s *File) ResolvePath(name string) string { - if value, ok := s.pathMap.Load(name); ok { - if path, ok := value.(string); ok { - return path - } - } - - // using the name as a fallback solution - return s.resolvePath(name) -} - -func (s *File) resolvePath(path string) string { - if filepath.IsAbs(path) { - return path - } - return filepath.Join(s.root, path) -} - -func (s *File) set(desc ocispec.Descriptor) { - s.descriptor.Store(desc.Digest, desc) -} - -func (s *File) get(desc ocispec.Descriptor) (ocispec.Descriptor, bool) { - value, ok := s.descriptor.Load(desc.Digest) - if !ok { - return ocispec.Descriptor{}, false - } - desc, ok = value.(ocispec.Descriptor) - return desc, ok -} - -func (s *File) getMemory(desc ocispec.Descriptor) ([]byte, bool) { - value, ok := s.memoryMap.Load(desc.Digest) - if !ok { - return nil, false - } - content, ok := value.([]byte) - return content, ok -} - -func (s *File) getRef(ref string) (ocispec.Descriptor, bool) { - value, ok := s.refMap.Load(ref) - if !ok { - return ocispec.Descriptor{}, false - } - desc, ok := value.(ocispec.Descriptor) - return desc, ok -} - -// StoreManifest stores a manifest linked to by the provided ref. The children of the -// manifest, such as layers and config, should already exist in the file store, either -// as files linked via Add(), or via Load(). If they do not exist, then a typical -// Fetcher that walks the manifest will hit an unresolved hash. -// -// StoreManifest does *not* validate their presence. -func (s *File) StoreManifest(ref string, desc ocispec.Descriptor, manifest []byte) error { - s.refMap.Store(ref, desc) - s.memoryMap.Store(desc.Digest, manifest) - return nil -} - -type fileWriter struct { - store *File - file *os.File - desc ocispec.Descriptor - digester digest.Digester - status content.Status - afterCommit func() error -} - -func (w *fileWriter) Status() (content.Status, error) { - return w.status, nil -} - -// Digest returns the current digest of the content, up to the current write. -// -// Cannot be called concurrently with `Write`. -func (w *fileWriter) Digest() digest.Digest { - return w.digester.Digest() -} - -// Write p to the transaction. -func (w *fileWriter) Write(p []byte) (n int, err error) { - n, err = w.file.Write(p) - w.digester.Hash().Write(p[:n]) - w.status.Offset += int64(len(p)) - w.status.UpdatedAt = time.Now() - return n, err -} - -func (w *fileWriter) Commit(ctx context.Context, size int64, expected digest.Digest, opts ...content.Opt) error { - var base content.Info - for _, opt := range opts { - if err := opt(&base); err != nil { - return err - } - } - - if w.file == nil { - return errors.Wrap(errdefs.ErrFailedPrecondition, "cannot commit on closed writer") - } - file := w.file - w.file = nil - - if err := file.Sync(); err != nil { - file.Close() - return errors.Wrap(err, "sync failed") - } - - fileInfo, err := file.Stat() - if err != nil { - file.Close() - return errors.Wrap(err, "stat failed") - } - if err := file.Close(); err != nil { - return errors.Wrap(err, "failed to close file") - } - - if size > 0 && size != fileInfo.Size() { - return errors.Wrapf(errdefs.ErrFailedPrecondition, "unexpected commit size %d, expected %d", fileInfo.Size(), size) - } - if dgst := w.digester.Digest(); expected != "" && expected != dgst { - return errors.Wrapf(errdefs.ErrFailedPrecondition, "unexpected commit digest %s, expected %s", dgst, expected) - } - - w.store.set(w.desc) - if w.afterCommit != nil { - return w.afterCommit() - } - return nil -} - -// Close the writer, flushing any unwritten data and leaving the progress in -// tact. -func (w *fileWriter) Close() error { - if w.file == nil { - return nil - } - - w.file.Sync() - err := w.file.Close() - w.file = nil - return err -} - -func (w *fileWriter) Truncate(size int64) error { - if size != 0 { - return ErrUnsupportedSize - } - w.status.Offset = 0 - w.digester.Hash().Reset() - if _, err := w.file.Seek(0, io.SeekStart); err != nil { - return err - } - return w.file.Truncate(0) -} diff --git a/src/vendor/oras.land/oras-go/pkg/content/gunzip.go b/src/vendor/oras.land/oras-go/pkg/content/gunzip.go deleted file mode 100644 index 65c4646f2..000000000 --- a/src/vendor/oras.land/oras-go/pkg/content/gunzip.go +++ /dev/null @@ -1,72 +0,0 @@ -/* -Copyright The ORAS Authors. -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - -http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package content - -import ( - "compress/gzip" - "fmt" - "io" - - "github.com/containerd/containerd/content" -) - -// NewGunzipWriter wrap a writer with a gunzip, so that the stream is gunzipped -// -// By default, it calculates the hash when writing. If the option `skipHash` is true, -// it will skip doing the hash. Skipping the hash is intended to be used only -// if you are confident about the validity of the data being passed to the writer, -// and wish to save on the hashing time. -func NewGunzipWriter(writer content.Writer, opts ...WriterOpt) content.Writer { - // process opts for default - wOpts := DefaultWriterOpts() - for _, opt := range opts { - if err := opt(&wOpts); err != nil { - return nil - } - } - return NewPassthroughWriter(writer, func(r io.Reader, w io.Writer, done chan<- error) { - gr, err := gzip.NewReader(r) - if err != nil { - done <- fmt.Errorf("error creating gzip reader: %v", err) - return - } - // write out the uncompressed data - b := make([]byte, wOpts.Blocksize, wOpts.Blocksize) - for { - var n int - n, err = gr.Read(b) - if err != nil && err != io.EOF { - err = fmt.Errorf("GunzipWriter data read error: %v\n", err) - break - } - l := n - if n > len(b) { - l = len(b) - } - if _, err2 := w.Write(b[:l]); err2 != nil { - err = fmt.Errorf("GunzipWriter: error writing to underlying writer: %v", err2) - break - } - if err == io.EOF { - // clear the error - err = nil - break - } - } - gr.Close() - done <- err - }, opts...) -} diff --git a/src/vendor/oras.land/oras-go/pkg/content/interface.go b/src/vendor/oras.land/oras-go/pkg/content/interface.go deleted file mode 100644 index f5e312cca..000000000 --- a/src/vendor/oras.land/oras-go/pkg/content/interface.go +++ /dev/null @@ -1,26 +0,0 @@ -/* -Copyright The ORAS Authors. -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - -http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package content - -import ( - "github.com/containerd/containerd/remotes" -) - -// ProvideIngester is the interface that groups the basic Read and Write methods. -type Store interface { - remotes.Pusher - remotes.Fetcher -} diff --git a/src/vendor/oras.land/oras-go/pkg/content/iowriter.go b/src/vendor/oras.land/oras-go/pkg/content/iowriter.go deleted file mode 100644 index e9e2ed8bf..000000000 --- a/src/vendor/oras.land/oras-go/pkg/content/iowriter.go +++ /dev/null @@ -1,112 +0,0 @@ -/* -Copyright The ORAS Authors. -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - -http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package content - -import ( - "context" - "io" - "io/ioutil" - - "github.com/containerd/containerd/content" - "github.com/opencontainers/go-digest" -) - -// IoContentWriter writer that wraps an io.Writer, so the results can be streamed to -// an open io.Writer. For example, can be used to pull a layer and write it to a file, or device. -type IoContentWriter struct { - writer io.Writer - digester digest.Digester - size int64 - hash *digest.Digest -} - -// NewIoContentWriter create a new IoContentWriter. -// -// By default, it calculates the hash when writing. If the option `skipHash` is true, -// it will skip doing the hash. Skipping the hash is intended to be used only -// if you are confident about the validity of the data being passed to the writer, -// and wish to save on the hashing time. -func NewIoContentWriter(writer io.Writer, opts ...WriterOpt) content.Writer { - w := writer - if w == nil { - w = ioutil.Discard - } - // process opts for default - wOpts := DefaultWriterOpts() - for _, opt := range opts { - if err := opt(&wOpts); err != nil { - return nil - } - } - ioc := &IoContentWriter{ - writer: w, - digester: digest.Canonical.Digester(), - // we take the OutputHash, since the InputHash goes to the passthrough writer, - // which then passes the processed output to us - hash: wOpts.OutputHash, - } - return NewPassthroughWriter(ioc, func(r io.Reader, w io.Writer, done chan<- error) { - // write out the data to the io writer - var ( - err error - ) - // we could use io.Copy, but calling it with the default blocksize is identical to - // io.CopyBuffer. Otherwise, we would need some way to let the user flag "I want to use - // io.Copy", when it should not matter to them - b := make([]byte, wOpts.Blocksize, wOpts.Blocksize) - _, err = io.CopyBuffer(w, r, b) - done <- err - }, opts...) -} - -func (w *IoContentWriter) Write(p []byte) (n int, err error) { - n, err = w.writer.Write(p) - if err != nil { - return 0, err - } - w.size += int64(n) - if w.hash == nil { - w.digester.Hash().Write(p[:n]) - } - return -} - -func (w *IoContentWriter) Close() error { - return nil -} - -// Digest may return empty digest or panics until committed. -func (w *IoContentWriter) Digest() digest.Digest { - return w.digester.Digest() -} - -// Commit commits the blob (but no roll-back is guaranteed on an error). -// size and expected can be zero-value when unknown. -// Commit always closes the writer, even on error. -// ErrAlreadyExists aborts the writer. -func (w *IoContentWriter) Commit(ctx context.Context, size int64, expected digest.Digest, opts ...content.Opt) error { - return nil -} - -// Status returns the current state of write -func (w *IoContentWriter) Status() (content.Status, error) { - return content.Status{}, nil -} - -// Truncate updates the size of the target blob -func (w *IoContentWriter) Truncate(size int64) error { - return nil -} diff --git a/src/vendor/oras.land/oras-go/pkg/content/manifest.go b/src/vendor/oras.land/oras-go/pkg/content/manifest.go deleted file mode 100644 index 9830920b4..000000000 --- a/src/vendor/oras.land/oras-go/pkg/content/manifest.go +++ /dev/null @@ -1,95 +0,0 @@ -/* -Copyright The ORAS Authors. -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - -http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ -package content - -import ( - "encoding/json" - "sort" - - "github.com/opencontainers/go-digest" - specs "github.com/opencontainers/image-spec/specs-go" - ocispec "github.com/opencontainers/image-spec/specs-go/v1" - artifact "oras.land/oras-go/pkg/artifact" -) - -// GenerateManifest generates a manifest. The manifest will include the provided config, -// and descs as layers. Raw bytes will be returned. -func GenerateManifest(config *ocispec.Descriptor, annotations map[string]string, descs ...ocispec.Descriptor) ([]byte, ocispec.Descriptor, error) { - // Config - either it was set, or we have to set it - if config == nil { - _, configGen, err := GenerateConfig(nil) - if err != nil { - return nil, ocispec.Descriptor{}, err - } - config = &configGen - } - return pack(*config, annotations, descs) -} - -// GenerateConfig generates a blank config with optional annotations. -func GenerateConfig(annotations map[string]string) ([]byte, ocispec.Descriptor, error) { - configBytes := []byte("{}") - dig := digest.FromBytes(configBytes) - config := ocispec.Descriptor{ - MediaType: artifact.UnknownConfigMediaType, - Digest: dig, - Size: int64(len(configBytes)), - Annotations: annotations, - } - return configBytes, config, nil -} - -// GenerateManifestAndConfig generates a config and then a manifest. Raw bytes will be returned. -func GenerateManifestAndConfig(manifestAnnotations map[string]string, configAnnotations map[string]string, descs ...ocispec.Descriptor) (manifest []byte, manifestDesc ocispec.Descriptor, config []byte, configDesc ocispec.Descriptor, err error) { - config, configDesc, err = GenerateConfig(configAnnotations) - if err != nil { - return nil, ocispec.Descriptor{}, nil, ocispec.Descriptor{}, err - } - manifest, manifestDesc, err = GenerateManifest(&configDesc, manifestAnnotations, descs...) - if err != nil { - return nil, ocispec.Descriptor{}, nil, ocispec.Descriptor{}, err - } - return -} - -// pack given a bunch of descriptors, create a manifest that references all of them -func pack(config ocispec.Descriptor, annotations map[string]string, descriptors []ocispec.Descriptor) ([]byte, ocispec.Descriptor, error) { - if descriptors == nil { - descriptors = []ocispec.Descriptor{} // make it an empty array to prevent potential server-side bugs - } - // sort descriptors alphanumerically by sha hash so it always is consistent - sort.Slice(descriptors, func(i, j int) bool { - return descriptors[i].Digest < descriptors[j].Digest - }) - manifest := ocispec.Manifest{ - Versioned: specs.Versioned{ - SchemaVersion: 2, // historical value. does not pertain to OCI or docker version - }, - Config: config, - Layers: descriptors, - Annotations: annotations, - } - manifestBytes, err := json.Marshal(manifest) - if err != nil { - return nil, ocispec.Descriptor{}, err - } - manifestDescriptor := ocispec.Descriptor{ - MediaType: ocispec.MediaTypeImageManifest, - Digest: digest.FromBytes(manifestBytes), - Size: int64(len(manifestBytes)), - } - - return manifestBytes, manifestDescriptor, nil -} diff --git a/src/vendor/oras.land/oras-go/pkg/content/memory.go b/src/vendor/oras.land/oras-go/pkg/content/memory.go deleted file mode 100644 index 5c194b15a..000000000 --- a/src/vendor/oras.land/oras-go/pkg/content/memory.go +++ /dev/null @@ -1,284 +0,0 @@ -/* -Copyright The ORAS Authors. -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - -http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package content - -import ( - "bytes" - "context" - "fmt" - "io" - "io/ioutil" - "strings" - "sync" - "time" - - "github.com/containerd/containerd/content" - "github.com/containerd/containerd/errdefs" - "github.com/containerd/containerd/remotes" - digest "github.com/opencontainers/go-digest" - ocispec "github.com/opencontainers/image-spec/specs-go/v1" - "github.com/pkg/errors" -) - -// Memory provides content from the memory -type Memory struct { - descriptor map[digest.Digest]ocispec.Descriptor - content map[digest.Digest][]byte - nameMap map[string]ocispec.Descriptor - refMap map[string]ocispec.Descriptor - lock *sync.Mutex -} - -// NewMemory creats a new memory store -func NewMemory() *Memory { - return &Memory{ - descriptor: make(map[digest.Digest]ocispec.Descriptor), - content: make(map[digest.Digest][]byte), - nameMap: make(map[string]ocispec.Descriptor), - refMap: make(map[string]ocispec.Descriptor), - lock: &sync.Mutex{}, - } -} - -func (s *Memory) Resolver() remotes.Resolver { - return s -} - -func (s *Memory) Resolve(ctx context.Context, ref string) (name string, desc ocispec.Descriptor, err error) { - desc, ok := s.refMap[ref] - if !ok { - return "", ocispec.Descriptor{}, fmt.Errorf("unknown reference: %s", ref) - } - return ref, desc, nil -} - -func (s *Memory) Fetcher(ctx context.Context, ref string) (remotes.Fetcher, error) { - if _, ok := s.refMap[ref]; !ok { - return nil, fmt.Errorf("unknown reference: %s", ref) - } - return s, nil -} - -// Fetch get an io.ReadCloser for the specific content -func (s *Memory) Fetch(ctx context.Context, desc ocispec.Descriptor) (io.ReadCloser, error) { - _, content, ok := s.Get(desc) - if !ok { - return nil, ErrNotFound - } - return ioutil.NopCloser(bytes.NewReader(content)), nil -} - -func (s *Memory) Pusher(ctx context.Context, ref string) (remotes.Pusher, error) { - var tag, hash string - parts := strings.SplitN(ref, "@", 2) - if len(parts) > 0 { - tag = parts[0] - } - if len(parts) > 1 { - hash = parts[1] - } - return &memoryPusher{ - store: s, - ref: tag, - hash: hash, - }, nil -} - -type memoryPusher struct { - store *Memory - ref string - hash string -} - -func (s *memoryPusher) Push(ctx context.Context, desc ocispec.Descriptor) (content.Writer, error) { - name, _ := ResolveName(desc) - now := time.Now() - // is this the root? - if desc.Digest.String() == s.hash { - s.store.refMap[s.ref] = desc - } - return &memoryWriter{ - store: s.store, - buffer: bytes.NewBuffer(nil), - desc: desc, - digester: digest.Canonical.Digester(), - status: content.Status{ - Ref: name, - Total: desc.Size, - StartedAt: now, - UpdatedAt: now, - }, - }, nil -} - -// Add adds content, generating a descriptor and returning it. -func (s *Memory) Add(name, mediaType string, content []byte) (ocispec.Descriptor, error) { - var annotations map[string]string - if name != "" { - annotations = map[string]string{ - ocispec.AnnotationTitle: name, - } - } - - if mediaType == "" { - mediaType = DefaultBlobMediaType - } - - desc := ocispec.Descriptor{ - MediaType: mediaType, - Digest: digest.FromBytes(content), - Size: int64(len(content)), - Annotations: annotations, - } - - s.Set(desc, content) - return desc, nil -} - -// Set adds the content to the store -func (s *Memory) Set(desc ocispec.Descriptor, content []byte) { - s.lock.Lock() - defer s.lock.Unlock() - - s.descriptor[desc.Digest] = desc - s.content[desc.Digest] = content - - if name, ok := ResolveName(desc); ok && name != "" { - s.nameMap[name] = desc - } -} - -// Get finds the content from the store -func (s *Memory) Get(desc ocispec.Descriptor) (ocispec.Descriptor, []byte, bool) { - s.lock.Lock() - defer s.lock.Unlock() - - desc, ok := s.descriptor[desc.Digest] - if !ok { - return ocispec.Descriptor{}, nil, false - } - content, ok := s.content[desc.Digest] - return desc, content, ok -} - -// GetByName finds the content from the store by name (i.e. AnnotationTitle) -func (s *Memory) GetByName(name string) (ocispec.Descriptor, []byte, bool) { - s.lock.Lock() - defer s.lock.Unlock() - - desc, ok := s.nameMap[name] - if !ok { - return ocispec.Descriptor{}, nil, false - } - content, ok := s.content[desc.Digest] - return desc, content, ok -} - -// StoreManifest stores a manifest linked to by the provided ref. The children of the -// manifest, such as layers and config, should already exist in the file store, either -// as files linked via Add(), or via Set(). If they do not exist, then a typical -// Fetcher that walks the manifest will hit an unresolved hash. -// -// StoreManifest does *not* validate their presence. -func (s *Memory) StoreManifest(ref string, desc ocispec.Descriptor, manifest []byte) error { - s.refMap[ref] = desc - s.Add("", desc.MediaType, manifest) - return nil -} - -func descFromBytes(b []byte, mediaType string) (ocispec.Descriptor, error) { - digest, err := digest.FromReader(bytes.NewReader(b)) - if err != nil { - return ocispec.Descriptor{}, err - } - - if mediaType == "" { - mediaType = DefaultBlobMediaType - } - return ocispec.Descriptor{ - MediaType: mediaType, - Digest: digest, - Size: int64(len(b)), - }, nil -} - -type memoryWriter struct { - store *Memory - buffer *bytes.Buffer - desc ocispec.Descriptor - digester digest.Digester - status content.Status -} - -func (w *memoryWriter) Status() (content.Status, error) { - return w.status, nil -} - -// Digest returns the current digest of the content, up to the current write. -// -// Cannot be called concurrently with `Write`. -func (w *memoryWriter) Digest() digest.Digest { - return w.digester.Digest() -} - -// Write p to the transaction. -func (w *memoryWriter) Write(p []byte) (n int, err error) { - n, err = w.buffer.Write(p) - w.digester.Hash().Write(p[:n]) - w.status.Offset += int64(len(p)) - w.status.UpdatedAt = time.Now() - return n, err -} - -func (w *memoryWriter) Commit(ctx context.Context, size int64, expected digest.Digest, opts ...content.Opt) error { - var base content.Info - for _, opt := range opts { - if err := opt(&base); err != nil { - return err - } - } - - if w.buffer == nil { - return errors.Wrap(errdefs.ErrFailedPrecondition, "cannot commit on closed writer") - } - content := w.buffer.Bytes() - w.buffer = nil - - if size > 0 && size != int64(len(content)) { - return errors.Wrapf(errdefs.ErrFailedPrecondition, "unexpected commit size %d, expected %d", len(content), size) - } - if dgst := w.digester.Digest(); expected != "" && expected != dgst { - return errors.Wrapf(errdefs.ErrFailedPrecondition, "unexpected commit digest %s, expected %s", dgst, expected) - } - - w.store.Set(w.desc, content) - return nil -} - -func (w *memoryWriter) Close() error { - w.buffer = nil - return nil -} - -func (w *memoryWriter) Truncate(size int64) error { - if size != 0 { - return ErrUnsupportedSize - } - w.status.Offset = 0 - w.digester.Hash().Reset() - w.buffer.Truncate(0) - return nil -} diff --git a/src/vendor/oras.land/oras-go/pkg/content/multireader.go b/src/vendor/oras.land/oras-go/pkg/content/multireader.go deleted file mode 100644 index 8dba773da..000000000 --- a/src/vendor/oras.land/oras-go/pkg/content/multireader.go +++ /dev/null @@ -1,56 +0,0 @@ -/* -Copyright The ORAS Authors. -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - -http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package content - -import ( - "context" - "fmt" - "io" - - "github.com/containerd/containerd/remotes" - ocispec "github.com/opencontainers/image-spec/specs-go/v1" -) - -// MultiReader store to read content from multiple stores. It finds the content by asking each underlying -// store to find the content, which it does based on the hash. -// -// Example: -// fileStore := NewFileStore(rootPath) -// memoryStore := NewMemoryStore() -// // load up content in fileStore and memoryStore -// multiStore := MultiReader([]content.Provider{fileStore, memoryStore}) -// -// You now can use multiStore anywhere that content.Provider is accepted -type MultiReader struct { - stores []remotes.Fetcher -} - -// AddStore add a store to read from -func (m *MultiReader) AddStore(store ...remotes.Fetcher) { - m.stores = append(m.stores, store...) -} - -// ReaderAt get a reader -func (m MultiReader) Fetch(ctx context.Context, desc ocispec.Descriptor) (io.ReadCloser, error) { - for _, store := range m.stores { - r, err := store.Fetch(ctx, desc) - if r != nil && err == nil { - return r, nil - } - } - // we did not find any - return nil, fmt.Errorf("not found") -} diff --git a/src/vendor/oras.land/oras-go/pkg/content/multiwriter.go b/src/vendor/oras.land/oras-go/pkg/content/multiwriter.go deleted file mode 100644 index 1e69bcdf3..000000000 --- a/src/vendor/oras.land/oras-go/pkg/content/multiwriter.go +++ /dev/null @@ -1,42 +0,0 @@ -/* -Copyright The ORAS Authors. -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - -http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package content - -import ( - "context" - - ctrcontent "github.com/containerd/containerd/content" - "github.com/containerd/containerd/remotes" - ocispec "github.com/opencontainers/image-spec/specs-go/v1" -) - -// MultiWriterIngester an ingester that can provide a single writer or multiple writers for a single -// descriptor. Useful when the target of a descriptor can have multiple items within it, e.g. a layer -// that is a tar file with multiple files, each of which should go to a different stream, some of which -// should not be handled at all. -type MultiWriterIngester interface { - ctrcontent.Ingester - Writers(ctx context.Context, opts ...ctrcontent.WriterOpt) (func(string) (ctrcontent.Writer, error), error) -} - -// MultiWriterPusher a pusher that can provide a single writer or multiple writers for a single -// descriptor. Useful when the target of a descriptor can have multiple items within it, e.g. a layer -// that is a tar file with multiple files, each of which should go to a different stream, some of which -// should not be handled at all. -type MultiWriterPusher interface { - remotes.Pusher - Pushers(ctx context.Context, desc ocispec.Descriptor) (func(string) (ctrcontent.Writer, error), error) -} diff --git a/src/vendor/oras.land/oras-go/pkg/content/oci.go b/src/vendor/oras.land/oras-go/pkg/content/oci.go deleted file mode 100644 index b643f61f4..000000000 --- a/src/vendor/oras.land/oras-go/pkg/content/oci.go +++ /dev/null @@ -1,336 +0,0 @@ -/* -Copyright The ORAS Authors. -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - -http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ -package content - -import ( - "context" - "encoding/json" - "errors" - "fmt" - "io" - "io/ioutil" - "os" - "path/filepath" - "strings" - - "github.com/containerd/containerd/content" - "github.com/containerd/containerd/content/local" - "github.com/containerd/containerd/remotes" - "github.com/opencontainers/go-digest" - specs "github.com/opencontainers/image-spec/specs-go" - ocispec "github.com/opencontainers/image-spec/specs-go/v1" -) - -// OCI provides content from the file system with the OCI-Image layout. -// Reference: https://github.com/opencontainers/image-spec/blob/master/image-layout.md -type OCI struct { - content.Store - - root string - index *ocispec.Index - nameMap map[string]ocispec.Descriptor -} - -// NewOCI creates a new OCI store -func NewOCI(rootPath string) (*OCI, error) { - fileStore, err := local.NewStore(rootPath) - if err != nil { - return nil, err - } - - store := &OCI{ - Store: fileStore, - root: rootPath, - } - if err := store.validateOCILayoutFile(); err != nil { - return nil, err - } - if err := store.LoadIndex(); err != nil { - return nil, err - } - - return store, nil -} - -// LoadIndex reads the index.json from the file system -func (s *OCI) LoadIndex() error { - path := filepath.Join(s.root, OCIImageIndexFile) - indexFile, err := os.Open(path) - if err != nil { - if !os.IsNotExist(err) { - return err - } - s.index = &ocispec.Index{ - Versioned: specs.Versioned{ - SchemaVersion: 2, // historical value - }, - } - s.nameMap = make(map[string]ocispec.Descriptor) - - return nil - } - defer indexFile.Close() - - if err := json.NewDecoder(indexFile).Decode(&s.index); err != nil { - return err - } - - s.nameMap = make(map[string]ocispec.Descriptor) - for _, desc := range s.index.Manifests { - if name := desc.Annotations[ocispec.AnnotationRefName]; name != "" { - s.nameMap[name] = desc - } - } - - return nil -} - -// SaveIndex writes the index.json to the file system -func (s *OCI) SaveIndex() error { - // first need to update the index - var descs []ocispec.Descriptor - for name, desc := range s.nameMap { - if desc.Annotations == nil { - desc.Annotations = map[string]string{} - } - desc.Annotations[ocispec.AnnotationRefName] = name - descs = append(descs, desc) - } - s.index.Manifests = descs - indexJSON, err := json.Marshal(s.index) - if err != nil { - return err - } - - path := filepath.Join(s.root, OCIImageIndexFile) - return ioutil.WriteFile(path, indexJSON, 0644) -} - -func (s *OCI) Resolver() remotes.Resolver { - return s -} - -func (s *OCI) Resolve(ctx context.Context, ref string) (name string, desc ocispec.Descriptor, err error) { - if err := s.LoadIndex(); err != nil { - return "", ocispec.Descriptor{}, err - } - desc, ok := s.nameMap[ref] - if !ok { - return "", ocispec.Descriptor{}, fmt.Errorf("reference %s not in store", ref) - } - return ref, desc, nil -} - -func (s *OCI) Fetcher(ctx context.Context, ref string) (remotes.Fetcher, error) { - if err := s.LoadIndex(); err != nil { - return nil, err - } - if _, ok := s.nameMap[ref]; !ok { - return nil, fmt.Errorf("reference %s not in store", ref) - } - return s, nil -} - -// Fetch get an io.ReadCloser for the specific content -func (s *OCI) Fetch(ctx context.Context, desc ocispec.Descriptor) (io.ReadCloser, error) { - readerAt, err := s.Store.ReaderAt(ctx, desc) - if err != nil { - return nil, err - } - // just wrap the ReaderAt with a Reader - return ioutil.NopCloser(&ReaderAtWrapper{readerAt: readerAt}), nil -} - -// Pusher get a remotes.Pusher for the given ref -func (s *OCI) Pusher(ctx context.Context, ref string) (remotes.Pusher, error) { - // separate the tag based ref from the hash - var ( - baseRef, hash string - ) - parts := strings.SplitN(ref, "@", 2) - baseRef = parts[0] - if len(parts) > 1 { - hash = parts[1] - } - return &ociPusher{oci: s, ref: baseRef, digest: hash}, nil -} - -// AddReference adds or updates an reference to index. -func (s *OCI) AddReference(name string, desc ocispec.Descriptor) { - if desc.Annotations == nil { - desc.Annotations = map[string]string{ - ocispec.AnnotationRefName: name, - } - } else { - desc.Annotations[ocispec.AnnotationRefName] = name - } - - if _, ok := s.nameMap[name]; ok { - s.nameMap[name] = desc - - for i, ref := range s.index.Manifests { - if name == ref.Annotations[ocispec.AnnotationRefName] { - s.index.Manifests[i] = desc - return - } - } - - // Process should not reach here. - // Fallthrough to `Add` scenario and recover. - s.index.Manifests = append(s.index.Manifests, desc) - return - } - - s.index.Manifests = append(s.index.Manifests, desc) - s.nameMap[name] = desc -} - -// DeleteReference deletes an reference from index. -func (s *OCI) DeleteReference(name string) { - if _, ok := s.nameMap[name]; !ok { - return - } - - delete(s.nameMap, name) - for i, desc := range s.index.Manifests { - if name == desc.Annotations[ocispec.AnnotationRefName] { - s.index.Manifests[i] = s.index.Manifests[len(s.index.Manifests)-1] - s.index.Manifests = s.index.Manifests[:len(s.index.Manifests)-1] - return - } - } -} - -// ListReferences lists all references in index. -func (s *OCI) ListReferences() map[string]ocispec.Descriptor { - return s.nameMap -} - -// validateOCILayoutFile ensures the `oci-layout` file -func (s *OCI) validateOCILayoutFile() error { - layoutFilePath := filepath.Join(s.root, ocispec.ImageLayoutFile) - layoutFile, err := os.Open(layoutFilePath) - if err != nil { - if !os.IsNotExist(err) { - return err - } - - layout := ocispec.ImageLayout{ - Version: ocispec.ImageLayoutVersion, - } - layoutJSON, err := json.Marshal(layout) - if err != nil { - return err - } - - return ioutil.WriteFile(layoutFilePath, layoutJSON, 0644) - } - defer layoutFile.Close() - - var layout *ocispec.ImageLayout - err = json.NewDecoder(layoutFile).Decode(&layout) - if err != nil { - return err - } - if layout.Version != ocispec.ImageLayoutVersion { - return ErrUnsupportedVersion - } - - return nil -} - -// TODO: implement (needed to create a content.Store) -// TODO: do not return empty content.Info -// Abort completely cancels the ingest operation targeted by ref. -func (s *OCI) Info(ctx context.Context, dgst digest.Digest) (content.Info, error) { - return content.Info{}, nil -} - -// TODO: implement (needed to create a content.Store) -// Update updates mutable information related to content. -// If one or more fieldpaths are provided, only those -// fields will be updated. -// Mutable fields: -// labels.* -func (s *OCI) Update(ctx context.Context, info content.Info, fieldpaths ...string) (content.Info, error) { - return content.Info{}, errors.New("not yet implemented: Update (content.Store interface)") -} - -// TODO: implement (needed to create a content.Store) -// Walk will call fn for each item in the content store which -// match the provided filters. If no filters are given all -// items will be walked. -func (s *OCI) Walk(ctx context.Context, fn content.WalkFunc, filters ...string) error { - return errors.New("not yet implemented: Walk (content.Store interface)") -} - -// Delete removes the content from the store. -func (s *OCI) Delete(ctx context.Context, dgst digest.Digest) error { - return s.Store.Delete(ctx, dgst) -} - -// TODO: implement (needed to create a content.Store) -func (s *OCI) Status(ctx context.Context, ref string) (content.Status, error) { - // Status returns the status of the provided ref. - return content.Status{}, errors.New("not yet implemented: Status (content.Store interface)") -} - -// TODO: implement (needed to create a content.Store) -// ListStatuses returns the status of any active ingestions whose ref match the -// provided regular expression. If empty, all active ingestions will be -// returned. -func (s *OCI) ListStatuses(ctx context.Context, filters ...string) ([]content.Status, error) { - return []content.Status{}, errors.New("not yet implemented: ListStatuses (content.Store interface)") -} - -// TODO: implement (needed to create a content.Store) -// Abort completely cancels the ingest operation targeted by ref. -func (s *OCI) Abort(ctx context.Context, ref string) error { - return errors.New("not yet implemented: Abort (content.Store interface)") -} - -// ReaderAt provides contents -func (s *OCI) ReaderAt(ctx context.Context, desc ocispec.Descriptor) (content.ReaderAt, error) { - return s.Store.ReaderAt(ctx, desc) -} - -// ociPusher to push content for a single referencem can handle multiple descriptors. -// Needs to be able to recognize when a root manifest is being pushed and to create the tag -// for it. -type ociPusher struct { - oci *OCI - ref string - digest string -} - -// Push get a writer for a single Descriptor -func (p *ociPusher) Push(ctx context.Context, desc ocispec.Descriptor) (content.Writer, error) { - // do we need to create a tag? - switch desc.MediaType { - case ocispec.MediaTypeImageManifest, ocispec.MediaTypeImageIndex: - // if the hash of the content matches that which was provided as the hash for the root, mark it - if p.digest != "" && p.digest == desc.Digest.String() { - if err := p.oci.LoadIndex(); err != nil { - return nil, err - } - p.oci.nameMap[p.ref] = desc - if err := p.oci.SaveIndex(); err != nil { - return nil, err - } - } - } - - return p.oci.Store.Writer(ctx, content.WithDescriptor(desc), content.WithRef(p.ref)) -} diff --git a/src/vendor/oras.land/oras-go/pkg/content/opts.go b/src/vendor/oras.land/oras-go/pkg/content/opts.go deleted file mode 100644 index f6eaac692..000000000 --- a/src/vendor/oras.land/oras-go/pkg/content/opts.go +++ /dev/null @@ -1,112 +0,0 @@ -/* -Copyright The ORAS Authors. -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - -http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package content - -import ( - "errors" - - "github.com/opencontainers/go-digest" -) - -type WriterOpts struct { - InputHash *digest.Digest - OutputHash *digest.Digest - Blocksize int - MultiWriterIngester bool - IgnoreNoName bool -} - -type WriterOpt func(*WriterOpts) error - -func DefaultWriterOpts() WriterOpts { - return WriterOpts{ - InputHash: nil, - OutputHash: nil, - Blocksize: DefaultBlocksize, - IgnoreNoName: true, - } -} - -// WithInputHash provide the expected input hash to a writer. Writers -// may suppress their own calculation of a hash on the stream, taking this -// hash instead. If the Writer processes the data before passing it on to another -// Writer layer, this is the hash of the *input* stream. -// -// To have a blank hash, use WithInputHash(BlankHash). -func WithInputHash(hash digest.Digest) WriterOpt { - return func(w *WriterOpts) error { - w.InputHash = &hash - return nil - } -} - -// WithOutputHash provide the expected output hash to a writer. Writers -// may suppress their own calculation of a hash on the stream, taking this -// hash instead. If the Writer processes the data before passing it on to another -// Writer layer, this is the hash of the *output* stream. -// -// To have a blank hash, use WithInputHash(BlankHash). -func WithOutputHash(hash digest.Digest) WriterOpt { - return func(w *WriterOpts) error { - w.OutputHash = &hash - return nil - } -} - -// WithBlocksize set the blocksize used by the processor of data. -// The default is DefaultBlocksize, which is the same as that used by io.Copy. -// Includes a safety check to ensure the caller doesn't actively set it to <= 0. -func WithBlocksize(blocksize int) WriterOpt { - return func(w *WriterOpts) error { - if blocksize <= 0 { - return errors.New("blocksize must be greater than or equal to 0") - } - w.Blocksize = blocksize - return nil - } -} - -// WithMultiWriterIngester the passed ingester also implements MultiWriter -// and should be used as such. If this is set to true, but the ingester does not -// implement MultiWriter, calling Writer should return an error. -func WithMultiWriterIngester() WriterOpt { - return func(w *WriterOpts) error { - w.MultiWriterIngester = true - return nil - } -} - -// WithErrorOnNoName some ingesters, when creating a Writer, do not return an error if -// the descriptor does not have a valid name on the descriptor. Passing WithErrorOnNoName -// tells the writer to return an error instead of passing the data to a nil writer. -func WithErrorOnNoName() WriterOpt { - return func(w *WriterOpts) error { - w.IgnoreNoName = false - return nil - } -} - -// WithIgnoreNoName some ingesters, when creating a Writer, return an error if -// the descriptor does not have a valid name on the descriptor. Passing WithIgnoreNoName -// tells the writer not to return an error, but rather to pass the data to a nil writer. -// -// Deprecated: Use WithErrorOnNoName -func WithIgnoreNoName() WriterOpt { - return func(w *WriterOpts) error { - w.IgnoreNoName = true - return nil - } -} diff --git a/src/vendor/oras.land/oras-go/pkg/content/passthrough.go b/src/vendor/oras.land/oras-go/pkg/content/passthrough.go deleted file mode 100644 index d54c4256c..000000000 --- a/src/vendor/oras.land/oras-go/pkg/content/passthrough.go +++ /dev/null @@ -1,286 +0,0 @@ -/* -Copyright The ORAS Authors. -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - -http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package content - -import ( - "context" - "errors" - "io" - "time" - - "github.com/containerd/containerd/content" - "github.com/opencontainers/go-digest" -) - -// PassthroughWriter takes an input stream and passes it through to an underlying writer, -// while providing the ability to manipulate the stream before it gets passed through -type PassthroughWriter struct { - writer content.Writer - pipew *io.PipeWriter - digester digest.Digester - size int64 - underlyingWriter *underlyingWriter - reader *io.PipeReader - hash *digest.Digest - done chan error -} - -// NewPassthroughWriter creates a pass-through writer that allows for processing -// the content via an arbitrary function. The function should do whatever processing it -// wants, reading from the Reader to the Writer. When done, it must indicate via -// sending an error or nil to the Done -func NewPassthroughWriter(writer content.Writer, f func(r io.Reader, w io.Writer, done chan<- error), opts ...WriterOpt) content.Writer { - // process opts for default - wOpts := DefaultWriterOpts() - for _, opt := range opts { - if err := opt(&wOpts); err != nil { - return nil - } - } - - r, w := io.Pipe() - pw := &PassthroughWriter{ - writer: writer, - pipew: w, - digester: digest.Canonical.Digester(), - underlyingWriter: &underlyingWriter{ - writer: writer, - digester: digest.Canonical.Digester(), - hash: wOpts.OutputHash, - }, - reader: r, - hash: wOpts.InputHash, - done: make(chan error, 1), - } - go f(r, pw.underlyingWriter, pw.done) - return pw -} - -func (pw *PassthroughWriter) Write(p []byte) (n int, err error) { - n, err = pw.pipew.Write(p) - if pw.hash == nil { - pw.digester.Hash().Write(p[:n]) - } - pw.size += int64(n) - return -} - -func (pw *PassthroughWriter) Close() error { - if pw.pipew != nil { - pw.pipew.Close() - } - pw.writer.Close() - return nil -} - -// Digest may return empty digest or panics until committed. -func (pw *PassthroughWriter) Digest() digest.Digest { - if pw.hash != nil { - return *pw.hash - } - return pw.digester.Digest() -} - -// Commit commits the blob (but no roll-back is guaranteed on an error). -// size and expected can be zero-value when unknown. -// Commit always closes the writer, even on error. -// ErrAlreadyExists aborts the writer. -func (pw *PassthroughWriter) Commit(ctx context.Context, size int64, expected digest.Digest, opts ...content.Opt) error { - if pw.pipew != nil { - pw.pipew.Close() - } - err := <-pw.done - if pw.reader != nil { - pw.reader.Close() - } - if err != nil && err != io.EOF { - return err - } - - // Some underlying writers will validate an expected digest, so we need the option to pass it - // that digest. That is why we caluclate the digest of the underlying writer throughout the write process. - return pw.writer.Commit(ctx, pw.underlyingWriter.size, pw.underlyingWriter.Digest(), opts...) -} - -// Status returns the current state of write -func (pw *PassthroughWriter) Status() (content.Status, error) { - return pw.writer.Status() -} - -// Truncate updates the size of the target blob -func (pw *PassthroughWriter) Truncate(size int64) error { - return pw.writer.Truncate(size) -} - -// underlyingWriter implementation of io.Writer to write to the underlying -// io.Writer -type underlyingWriter struct { - writer content.Writer - digester digest.Digester - size int64 - hash *digest.Digest -} - -// Write write to the underlying writer -func (u *underlyingWriter) Write(p []byte) (int, error) { - n, err := u.writer.Write(p) - if err != nil { - return 0, err - } - - if u.hash == nil { - u.digester.Hash().Write(p) - } - u.size += int64(len(p)) - return n, nil -} - -// Size get total size written -func (u *underlyingWriter) Size() int64 { - return u.size -} - -// Digest may return empty digest or panics until committed. -func (u *underlyingWriter) Digest() digest.Digest { - if u.hash != nil { - return *u.hash - } - return u.digester.Digest() -} - -// PassthroughMultiWriter single writer that passes through to multiple writers, allowing the passthrough -// function to select which writer to use. -type PassthroughMultiWriter struct { - writers []*PassthroughWriter - pipew *io.PipeWriter - digester digest.Digester - size int64 - reader *io.PipeReader - hash *digest.Digest - done chan error - startedAt time.Time - updatedAt time.Time -} - -func NewPassthroughMultiWriter(writers func(name string) (content.Writer, error), f func(r io.Reader, getwriter func(name string) io.Writer, done chan<- error), opts ...WriterOpt) content.Writer { - // process opts for default - wOpts := DefaultWriterOpts() - for _, opt := range opts { - if err := opt(&wOpts); err != nil { - return nil - } - } - - r, w := io.Pipe() - - pmw := &PassthroughMultiWriter{ - startedAt: time.Now(), - updatedAt: time.Now(), - done: make(chan error, 1), - digester: digest.Canonical.Digester(), - hash: wOpts.InputHash, - pipew: w, - reader: r, - } - - // get our output writers - getwriter := func(name string) io.Writer { - writer, err := writers(name) - if err != nil || writer == nil { - return nil - } - pw := &PassthroughWriter{ - writer: writer, - digester: digest.Canonical.Digester(), - underlyingWriter: &underlyingWriter{ - writer: writer, - digester: digest.Canonical.Digester(), - hash: wOpts.OutputHash, - }, - done: make(chan error, 1), - } - pmw.writers = append(pmw.writers, pw) - return pw.underlyingWriter - } - go f(r, getwriter, pmw.done) - return pmw -} - -func (pmw *PassthroughMultiWriter) Write(p []byte) (n int, err error) { - n, err = pmw.pipew.Write(p) - if pmw.hash == nil { - pmw.digester.Hash().Write(p[:n]) - } - pmw.size += int64(n) - pmw.updatedAt = time.Now() - return -} - -func (pmw *PassthroughMultiWriter) Close() error { - pmw.pipew.Close() - for _, w := range pmw.writers { - w.Close() - } - return nil -} - -// Digest may return empty digest or panics until committed. -func (pmw *PassthroughMultiWriter) Digest() digest.Digest { - if pmw.hash != nil { - return *pmw.hash - } - return pmw.digester.Digest() -} - -// Commit commits the blob (but no roll-back is guaranteed on an error). -// size and expected can be zero-value when unknown. -// Commit always closes the writer, even on error. -// ErrAlreadyExists aborts the writer. -func (pmw *PassthroughMultiWriter) Commit(ctx context.Context, size int64, expected digest.Digest, opts ...content.Opt) error { - pmw.pipew.Close() - err := <-pmw.done - if pmw.reader != nil { - pmw.reader.Close() - } - if err != nil && err != io.EOF { - return err - } - - // Some underlying writers will validate an expected digest, so we need the option to pass it - // that digest. That is why we caluclate the digest of the underlying writer throughout the write process. - for _, w := range pmw.writers { - // maybe this should be Commit(ctx, pw.underlyingWriter.size, pw.underlyingWriter.Digest(), opts...) - w.done <- err - if err := w.Commit(ctx, size, expected, opts...); err != nil { - return err - } - } - return nil -} - -// Status returns the current state of write -func (pmw *PassthroughMultiWriter) Status() (content.Status, error) { - return content.Status{ - StartedAt: pmw.startedAt, - UpdatedAt: pmw.updatedAt, - Total: pmw.size, - }, nil -} - -// Truncate updates the size of the target blob, but cannot do anything with a multiwriter -func (pmw *PassthroughMultiWriter) Truncate(size int64) error { - return errors.New("truncate unavailable on multiwriter") -} diff --git a/src/vendor/oras.land/oras-go/pkg/content/readerat.go b/src/vendor/oras.land/oras-go/pkg/content/readerat.go deleted file mode 100644 index 7e2cb9b80..000000000 --- a/src/vendor/oras.land/oras-go/pkg/content/readerat.go +++ /dev/null @@ -1,68 +0,0 @@ -/* -Copyright The ORAS Authors. -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - -http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ -package content - -import ( - "io" - - "github.com/containerd/containerd/content" -) - -// ensure interface -var ( - _ content.ReaderAt = sizeReaderAt{} -) - -type readAtCloser interface { - io.ReaderAt - io.Closer -} - -type sizeReaderAt struct { - readAtCloser - size int64 -} - -func (ra sizeReaderAt) Size() int64 { - return ra.size -} - -func NopCloserAt(r io.ReaderAt) nopCloserAt { - return nopCloserAt{r} -} - -type nopCloserAt struct { - io.ReaderAt -} - -func (n nopCloserAt) Close() error { - return nil -} - -// readerAtWrapper wraps a ReaderAt to give a Reader -type ReaderAtWrapper struct { - offset int64 - readerAt io.ReaderAt -} - -func (r *ReaderAtWrapper) Read(p []byte) (n int, err error) { - n, err = r.readerAt.ReadAt(p, r.offset) - r.offset += int64(n) - return -} - -func NewReaderAtWrapper(readerAt io.ReaderAt) *ReaderAtWrapper { - return &ReaderAtWrapper{readerAt: readerAt} -} diff --git a/src/vendor/oras.land/oras-go/pkg/content/registry.go b/src/vendor/oras.land/oras-go/pkg/content/registry.go deleted file mode 100644 index aa22567c6..000000000 --- a/src/vendor/oras.land/oras-go/pkg/content/registry.go +++ /dev/null @@ -1,84 +0,0 @@ -/* -Copyright The ORAS Authors. -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - -http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ -package content - -import ( - "context" - "crypto/tls" - "fmt" - "net/http" - "os" - - auth "oras.land/oras-go/pkg/auth/docker" - - "github.com/containerd/containerd/remotes" - "github.com/containerd/containerd/remotes/docker" -) - -// RegistryOptions provide configuration options to a Registry -type RegistryOptions struct { - Configs []string - Username string - Password string - Insecure bool - PlainHTTP bool -} - -// Registry provides content from a spec-compliant registry. Create an use a new one for each -// registry with unique configuration of RegistryOptions. -type Registry struct { - remotes.Resolver -} - -// NewRegistry creates a new Registry store -func NewRegistry(opts RegistryOptions) (*Registry, error) { - return &Registry{ - Resolver: newResolver(opts.Username, opts.Password, opts.Insecure, opts.PlainHTTP, opts.Configs...), - }, nil -} - -func newResolver(username, password string, insecure bool, plainHTTP bool, configs ...string) remotes.Resolver { - - opts := docker.ResolverOptions{ - PlainHTTP: plainHTTP, - } - - client := http.DefaultClient - if insecure { - client.Transport = &http.Transport{ - TLSClientConfig: &tls.Config{ - InsecureSkipVerify: true, - }, - } - } - opts.Client = client - - if username != "" || password != "" { - opts.Credentials = func(hostName string) (string, string, error) { - return username, password, nil - } - return docker.NewResolver(opts) - } - cli, err := auth.NewClient(configs...) - if err != nil { - fmt.Fprintf(os.Stderr, "WARNING: Error loading auth file: %v\n", err) - } - resolver, err := cli.Resolver(context.Background(), client, plainHTTP) - if err != nil { - fmt.Fprintf(os.Stderr, "WARNING: Error loading resolver: %v\n", err) - resolver = docker.NewResolver(opts) - } - return resolver -} diff --git a/src/vendor/oras.land/oras-go/pkg/content/untar.go b/src/vendor/oras.land/oras-go/pkg/content/untar.go deleted file mode 100644 index fe95a81a4..000000000 --- a/src/vendor/oras.land/oras-go/pkg/content/untar.go +++ /dev/null @@ -1,157 +0,0 @@ -/* -Copyright The ORAS Authors. -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - -http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package content - -import ( - "archive/tar" - "fmt" - "io" - - "github.com/containerd/containerd/content" -) - -// NewUntarWriter wrap a writer with an untar, so that the stream is untarred -// -// By default, it calculates the hash when writing. If the option `skipHash` is true, -// it will skip doing the hash. Skipping the hash is intended to be used only -// if you are confident about the validity of the data being passed to the writer, -// and wish to save on the hashing time. -func NewUntarWriter(writer content.Writer, opts ...WriterOpt) content.Writer { - // process opts for default - wOpts := DefaultWriterOpts() - for _, opt := range opts { - if err := opt(&wOpts); err != nil { - return nil - } - } - - return NewPassthroughWriter(writer, func(r io.Reader, w io.Writer, done chan<- error) { - tr := tar.NewReader(r) - var err error - for { - _, err := tr.Next() - if err == io.EOF { - // clear the error, since we do not pass an io.EOF - err = nil - break // End of archive - } - if err != nil { - // pass the error on - err = fmt.Errorf("UntarWriter tar file header read error: %v", err) - break - } - // write out the untarred data - // we can handle io.EOF, just go to the next file - // any other errors should stop and get reported - b := make([]byte, wOpts.Blocksize, wOpts.Blocksize) - for { - var n int - n, err = tr.Read(b) - if err != nil && err != io.EOF { - err = fmt.Errorf("UntarWriter file data read error: %v\n", err) - break - } - l := n - if n > len(b) { - l = len(b) - } - if _, err2 := w.Write(b[:l]); err2 != nil { - err = fmt.Errorf("UntarWriter error writing to underlying writer: %v", err2) - break - } - if err == io.EOF { - // go to the next file - break - } - } - // did we break with a non-nil and non-EOF error? - if err != nil && err != io.EOF { - break - } - } - done <- err - }, opts...) -} - -// NewUntarWriterByName wrap multiple writers with an untar, so that the stream is untarred and passed -// to the appropriate writer, based on the filename. If a filename is not found, it is up to the called func -// to determine how to process it. -func NewUntarWriterByName(writers func(string) (content.Writer, error), opts ...WriterOpt) content.Writer { - // process opts for default - wOpts := DefaultWriterOpts() - for _, opt := range opts { - if err := opt(&wOpts); err != nil { - return nil - } - } - - // need a PassthroughMultiWriter here - return NewPassthroughMultiWriter(writers, func(r io.Reader, getwriter func(name string) io.Writer, done chan<- error) { - tr := tar.NewReader(r) - var err error - for { - header, err := tr.Next() - if err == io.EOF { - // clear the error, since we do not pass an io.EOF - err = nil - break // End of archive - } - if err != nil { - // pass the error on - err = fmt.Errorf("UntarWriter tar file header read error: %v", err) - break - } - // get the filename - filename := header.Name - - // get the writer for this filename - w := getwriter(filename) - if w == nil { - continue - } - - // write out the untarred data - // we can handle io.EOF, just go to the next file - // any other errors should stop and get reported - b := make([]byte, wOpts.Blocksize, wOpts.Blocksize) - for { - var n int - n, err = tr.Read(b) - if err != nil && err != io.EOF { - err = fmt.Errorf("UntarWriter file data read error: %v\n", err) - break - } - l := n - if n > len(b) { - l = len(b) - } - if _, err2 := w.Write(b[:l]); err2 != nil { - err = fmt.Errorf("UntarWriter error writing to underlying writer at for name '%s': %v", filename, err2) - break - } - if err == io.EOF { - // go to the next file - break - } - } - // did we break with a non-nil and non-EOF error? - if err != nil && err != io.EOF { - break - } - } - done <- err - }, opts...) -} diff --git a/src/vendor/oras.land/oras-go/pkg/content/utils.go b/src/vendor/oras.land/oras-go/pkg/content/utils.go deleted file mode 100644 index c31c54aae..000000000 --- a/src/vendor/oras.land/oras-go/pkg/content/utils.go +++ /dev/null @@ -1,223 +0,0 @@ -/* -Copyright The ORAS Authors. -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - -http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package content - -import ( - "archive/tar" - "compress/gzip" - "fmt" - "io" - "os" - "path/filepath" - "strings" - "time" - - digest "github.com/opencontainers/go-digest" - ocispec "github.com/opencontainers/image-spec/specs-go/v1" - "github.com/pkg/errors" -) - -// ResolveName resolves name from descriptor -func ResolveName(desc ocispec.Descriptor) (string, bool) { - name, ok := desc.Annotations[ocispec.AnnotationTitle] - return name, ok -} - -// tarDirectory walks the directory specified by path, and tar those files with a new -// path prefix. -func tarDirectory(root, prefix string, w io.Writer, stripTimes bool) error { - tw := tar.NewWriter(w) - defer tw.Close() - if err := filepath.Walk(root, func(path string, info os.FileInfo, err error) error { - if err != nil { - return err - } - - // Rename path - name, err := filepath.Rel(root, path) - if err != nil { - return err - } - name = filepath.Join(prefix, name) - name = filepath.ToSlash(name) - - // Generate header - var link string - mode := info.Mode() - if mode&os.ModeSymlink != 0 { - if link, err = os.Readlink(path); err != nil { - return err - } - } - header, err := tar.FileInfoHeader(info, link) - if err != nil { - return errors.Wrap(err, path) - } - header.Name = name - header.Uid = 0 - header.Gid = 0 - header.Uname = "" - header.Gname = "" - - if stripTimes { - header.ModTime = time.Time{} - header.AccessTime = time.Time{} - header.ChangeTime = time.Time{} - } - - // Write file - if err := tw.WriteHeader(header); err != nil { - return errors.Wrap(err, "tar") - } - if mode.IsRegular() { - file, err := os.Open(path) - if err != nil { - return err - } - defer file.Close() - if _, err := io.Copy(tw, file); err != nil { - return errors.Wrap(err, path) - } - } - - return nil - }); err != nil { - return err - } - return nil -} - -// extractTarDirectory extracts tar file to a directory specified by the `root` -// parameter. The file name prefix is ensured to be the string specified by the -// `prefix` parameter and is trimmed. -func extractTarDirectory(root, prefix string, r io.Reader) error { - tr := tar.NewReader(r) - for { - header, err := tr.Next() - if err != nil { - if err == io.EOF { - return nil - } - return err - } - - // Name check - name := header.Name - path, err := ensureBasePath(root, prefix, name) - if err != nil { - return err - } - path = filepath.Join(root, path) - - // Link check - switch header.Typeflag { - case tar.TypeLink, tar.TypeSymlink: - link := header.Linkname - if !filepath.IsAbs(link) { - link = filepath.Join(filepath.Dir(name), link) - } - if _, err := ensureBasePath(root, prefix, link); err != nil { - return err - } - } - - // Create content - switch header.Typeflag { - case tar.TypeReg: - err = writeFile(path, tr, header.FileInfo().Mode()) - case tar.TypeDir: - err = os.MkdirAll(path, header.FileInfo().Mode()) - case tar.TypeLink: - err = os.Link(header.Linkname, path) - case tar.TypeSymlink: - err = os.Symlink(header.Linkname, path) - default: - continue // Non-regular files are skipped - } - if err != nil { - return err - } - - // Change access time and modification time if possible (error ignored) - os.Chtimes(path, header.AccessTime, header.ModTime) - } -} - -// ensureBasePath ensures the target path is in the base path, -// returning its relative path to the base path. -func ensureBasePath(root, base, target string) (string, error) { - path, err := filepath.Rel(base, target) - if err != nil { - return "", err - } - cleanPath := filepath.ToSlash(filepath.Clean(path)) - if cleanPath == ".." || strings.HasPrefix(cleanPath, "../") { - return "", fmt.Errorf("%q is outside of %q", target, base) - } - - // No symbolic link allowed in the relative path - dir := filepath.Dir(path) - for dir != "." { - if info, err := os.Lstat(filepath.Join(root, dir)); err != nil { - if !os.IsNotExist(err) { - return "", err - } - } else if info.Mode()&os.ModeSymlink != 0 { - return "", fmt.Errorf("no symbolic link allowed between %q and %q", base, target) - } - dir = filepath.Dir(dir) - } - - return path, nil -} - -func writeFile(path string, r io.Reader, perm os.FileMode) error { - file, err := os.OpenFile(path, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, perm) - if err != nil { - return err - } - defer file.Close() - _, err = io.Copy(file, r) - return err -} - -func extractTarGzip(root, prefix, filename, checksum string) error { - file, err := os.Open(filename) - if err != nil { - return err - } - defer file.Close() - zr, err := gzip.NewReader(file) - if err != nil { - return err - } - defer zr.Close() - var r io.Reader = zr - var verifier digest.Verifier - if checksum != "" { - if digest, err := digest.Parse(checksum); err == nil { - verifier = digest.Verifier() - r = io.TeeReader(r, verifier) - } - } - if err := extractTarDirectory(root, prefix, r); err != nil { - return err - } - if verifier != nil && !verifier.Verified() { - return errors.New("content digest mismatch") - } - return nil -} diff --git a/src/vendor/oras.land/oras-go/pkg/context/context.go b/src/vendor/oras.land/oras-go/pkg/context/context.go deleted file mode 100644 index af0ac8469..000000000 --- a/src/vendor/oras.land/oras-go/pkg/context/context.go +++ /dev/null @@ -1,24 +0,0 @@ -/* -Copyright The ORAS Authors. -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - -http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package context - -import "context" - -// Background returns a default context with logger discarded. -func Background() context.Context { - ctx := context.Background() - return WithLoggerDiscarded(ctx) -} diff --git a/src/vendor/oras.land/oras-go/pkg/context/logger.go b/src/vendor/oras.land/oras-go/pkg/context/logger.go deleted file mode 100644 index 2fb2e5271..000000000 --- a/src/vendor/oras.land/oras-go/pkg/context/logger.go +++ /dev/null @@ -1,50 +0,0 @@ -/* -Copyright The ORAS Authors. -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - -http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package context - -import ( - "context" - "io" - "io/ioutil" - - "github.com/containerd/containerd/log" - "github.com/sirupsen/logrus" -) - -// WithLogger returns a new context with the provided logger. -// This method wraps github.com/containerd/containerd/log.WithLogger() -func WithLogger(ctx context.Context, logger *logrus.Entry) context.Context { - return log.WithLogger(ctx, logger) -} - -// WithLoggerFromWriter returns a new context with the logger, writting to the provided logger. -func WithLoggerFromWriter(ctx context.Context, writer io.Writer) context.Context { - logger := logrus.New() - logger.Out = writer - entry := logrus.NewEntry(logger) - return WithLogger(ctx, entry) -} - -// WithLoggerDiscarded returns a new context with the logger, writting to nothing. -func WithLoggerDiscarded(ctx context.Context) context.Context { - return WithLoggerFromWriter(ctx, ioutil.Discard) -} - -// GetLogger retrieves the current logger from the context. -// This method wraps github.com/containerd/containerd/log.GetLogger() -func GetLogger(ctx context.Context) *logrus.Entry { - return log.GetLogger(ctx) -} diff --git a/src/vendor/oras.land/oras-go/pkg/oras/copy.go b/src/vendor/oras.land/oras-go/pkg/oras/copy.go deleted file mode 100644 index ebf63276c..000000000 --- a/src/vendor/oras.land/oras-go/pkg/oras/copy.go +++ /dev/null @@ -1,213 +0,0 @@ -/* -Copyright The ORAS Authors. -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - -http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ -package oras - -import ( - "bytes" - "context" - "fmt" - "sync" - - "github.com/containerd/containerd/content" - "github.com/containerd/containerd/errdefs" - "github.com/containerd/containerd/images" - "github.com/containerd/containerd/log" - "github.com/containerd/containerd/remotes" - ocispec "github.com/opencontainers/image-spec/specs-go/v1" - "oras.land/oras-go/pkg/target" -) - -// Copy copy a ref from one target.Target to a ref in another target.Target. If toRef is blank, reuses fromRef -// Returns the root -// Descriptor of the copied item. Can use the root to retrieve child elements from target.Target. -func Copy(ctx context.Context, from target.Target, fromRef string, to target.Target, toRef string, opts ...CopyOpt) (ocispec.Descriptor, error) { - if from == nil { - return ocispec.Descriptor{}, ErrFromTargetUndefined - } - if to == nil { - return ocispec.Descriptor{}, ErrToTargetUndefined - } - // blank toRef - if toRef == "" { - toRef = fromRef - } - opt := copyOptsDefaults() - for _, o := range opts { - if err := o(opt); err != nil { - return ocispec.Descriptor{}, err - } - } - - if from == nil { - return ocispec.Descriptor{}, ErrFromResolverUndefined - } - if to == nil { - return ocispec.Descriptor{}, ErrToResolverUndefined - } - - // for the "from", we resolve the ref, then use resolver.Fetcher to fetch the various content blobs - // for the "to", we simply use resolver.Pusher to push the various content blobs - - _, desc, err := from.Resolve(ctx, fromRef) - if err != nil { - return ocispec.Descriptor{}, err - } - - fetcher, err := from.Fetcher(ctx, fromRef) - if err != nil { - return ocispec.Descriptor{}, err - } - // construct the reference we send to the pusher using the digest, so it knows what the root is - pushRef := fmt.Sprintf("%s@%s", toRef, desc.Digest.String()) - pusher, err := to.Pusher(ctx, pushRef) - if err != nil { - return ocispec.Descriptor{}, err - } - - if err := transferContent(ctx, desc, fetcher, pusher, opt); err != nil { - return ocispec.Descriptor{}, err - } - return desc, nil -} - -func transferContent(ctx context.Context, desc ocispec.Descriptor, fetcher remotes.Fetcher, pusher remotes.Pusher, opts *copyOpts) error { - var descriptors, manifests []ocispec.Descriptor - lock := &sync.Mutex{} - picker := images.HandlerFunc(func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) { - if isAllowedMediaType(desc.MediaType, opts.allowedMediaTypes...) { - if opts.filterName(desc) { - lock.Lock() - defer lock.Unlock() - descriptors = append(descriptors, desc) - } - return nil, nil - } - return nil, nil - }) - - // we use a hybrid store - a cache wrapping the underlying pusher - for two reasons: - // 1. so that we can cache the manifests as pushing them, then retrieve them later to push in reverse order after the blobs - // 2. so that we can retrieve them to analyze and find children in the Dispatch routine - store := opts.contentProvideIngesterPusherFetcher - if store == nil { - store = newHybridStoreFromPusher(pusher, opts.cachedMediaTypes, true) - } - - // fetchHandler pushes to the *store*, which may or may not cache it - baseFetchHandler := func(p remotes.Pusher, f remotes.Fetcher) images.HandlerFunc { - return images.HandlerFunc(func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) { - cw, err := p.Push(ctx, desc) - if err != nil { - if !errdefs.IsAlreadyExists(err) { - return nil, err - } - - return nil, nil - } - defer cw.Close() - - rc, err := f.Fetch(ctx, desc) - if err != nil { - return nil, err - } - defer rc.Close() - return nil, content.Copy(ctx, cw, rc, desc.Size, desc.Digest) - }) - } - - // track all of our manifests that will be cached - fetchHandler := images.HandlerFunc(func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) { - if isAllowedMediaType(desc.MediaType, opts.cachedMediaTypes...) { - lock.Lock() - defer lock.Unlock() - manifests = append(manifests, desc) - } - return baseFetchHandler(store, fetcher)(ctx, desc) - }) - - handlers := []images.Handler{ - filterHandler(opts, opts.allowedMediaTypes...), - } - handlers = append(handlers, opts.baseHandlers...) - handlers = append(handlers, - fetchHandler, - picker, - images.ChildrenHandler(&ProviderWrapper{Fetcher: store}), - ) - handlers = append(handlers, opts.callbackHandlers...) - - if err := opts.dispatch(ctx, images.Handlers(handlers...), nil, desc); err != nil { - return err - } - - // we cached all of the manifests, so push those out - // Iterate in reverse order as seen, parent always uploaded after child - for i := len(manifests) - 1; i >= 0; i-- { - _, err := baseFetchHandler(pusher, store)(ctx, manifests[i]) - if err != nil { - return err - } - } - - // if the option to request the root manifest was passed, accommodate it - if opts.saveManifest != nil && len(manifests) > 0 { - rc, err := store.Fetch(ctx, manifests[0]) - if err != nil { - return fmt.Errorf("could not get root manifest to save based on CopyOpt: %v", err) - } - defer rc.Close() - buf := new(bytes.Buffer) - if _, err := buf.ReadFrom(rc); err != nil { - return fmt.Errorf("unable to read data for root manifest to save based on CopyOpt: %v", err) - } - // get the root manifest from the store - opts.saveManifest(buf.Bytes()) - } - - // if the option to request the layers was passed, accommodate it - if opts.saveLayers != nil && len(descriptors) > 0 { - opts.saveLayers(descriptors) - } - return nil -} - -func filterHandler(opts *copyOpts, allowedMediaTypes ...string) images.HandlerFunc { - return func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) { - switch { - case isAllowedMediaType(desc.MediaType, ocispec.MediaTypeImageManifest, ocispec.MediaTypeImageIndex): - return nil, nil - case isAllowedMediaType(desc.MediaType, allowedMediaTypes...): - if opts.filterName(desc) { - return nil, nil - } - log.G(ctx).Warnf("blob no name: %v", desc.Digest) - default: - log.G(ctx).Warnf("unknown type: %v", desc.MediaType) - } - return nil, images.ErrStopHandler - } -} - -func isAllowedMediaType(mediaType string, allowedMediaTypes ...string) bool { - if len(allowedMediaTypes) == 0 { - return true - } - for _, allowedMediaType := range allowedMediaTypes { - if mediaType == allowedMediaType { - return true - } - } - return false -} diff --git a/src/vendor/oras.land/oras-go/pkg/oras/errors.go b/src/vendor/oras.land/oras-go/pkg/oras/errors.go deleted file mode 100644 index 6c5f9f240..000000000 --- a/src/vendor/oras.land/oras-go/pkg/oras/errors.go +++ /dev/null @@ -1,42 +0,0 @@ -/* -Copyright The ORAS Authors. -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - -http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package oras - -import ( - "errors" - "fmt" -) - -// Common errors -var ( - ErrResolverUndefined = errors.New("resolver undefined") - ErrFromResolverUndefined = errors.New("from target resolver undefined") - ErrToResolverUndefined = errors.New("to target resolver undefined") - ErrFromTargetUndefined = errors.New("from target undefined") - ErrToTargetUndefined = errors.New("from target undefined") -) - -// Path validation related errors -var ( - ErrDirtyPath = errors.New("dirty path") - ErrPathNotSlashSeparated = errors.New("path not slash separated") - ErrAbsolutePathDisallowed = errors.New("absolute path disallowed") - ErrPathTraversalDisallowed = errors.New("path traversal disallowed") -) - -// ErrStopProcessing is used to stop processing an oras operation. -// This error only makes sense in sequential pulling operation. -var ErrStopProcessing = fmt.Errorf("stop processing") diff --git a/src/vendor/oras.land/oras-go/pkg/oras/opts.go b/src/vendor/oras.land/oras-go/pkg/oras/opts.go deleted file mode 100644 index 042a775b0..000000000 --- a/src/vendor/oras.land/oras-go/pkg/oras/opts.go +++ /dev/null @@ -1,254 +0,0 @@ -/* -Copyright The ORAS Authors. -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - -http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ -package oras - -import ( - "context" - "fmt" - "io" - "path/filepath" - "strings" - "sync" - - "github.com/containerd/containerd/images" - "github.com/opencontainers/go-digest" - ocispec "github.com/opencontainers/image-spec/specs-go/v1" - "github.com/pkg/errors" - "golang.org/x/sync/semaphore" - orascontent "oras.land/oras-go/pkg/content" -) - -func copyOptsDefaults() *copyOpts { - return ©Opts{ - dispatch: images.Dispatch, - filterName: filterName, - cachedMediaTypes: []string{ocispec.MediaTypeImageManifest, ocispec.MediaTypeImageIndex}, - validateName: ValidateNameAsPath, - } -} - -type CopyOpt func(o *copyOpts) error - -type copyOpts struct { - allowedMediaTypes []string - dispatch func(context.Context, images.Handler, *semaphore.Weighted, ...ocispec.Descriptor) error - baseHandlers []images.Handler - callbackHandlers []images.Handler - contentProvideIngesterPusherFetcher orascontent.Store - filterName func(ocispec.Descriptor) bool - cachedMediaTypes []string - - saveManifest func([]byte) - saveLayers func([]ocispec.Descriptor) - validateName func(desc ocispec.Descriptor) error - - userAgent string -} - -// ValidateNameAsPath validates name in the descriptor as file path in order -// to generate good packages intended to be pulled using the FileStore or -// the oras cli. -// For cross-platform considerations, only unix paths are accepted. -func ValidateNameAsPath(desc ocispec.Descriptor) error { - // no empty name - path, ok := orascontent.ResolveName(desc) - if !ok || path == "" { - return orascontent.ErrNoName - } - - // path should be clean - if target := filepath.ToSlash(filepath.Clean(path)); target != path { - return errors.Wrap(ErrDirtyPath, path) - } - - // path should be slash-separated - if strings.Contains(path, "\\") { - return errors.Wrap(ErrPathNotSlashSeparated, path) - } - - // disallow absolute path: covers unix and windows format - if strings.HasPrefix(path, "/") { - return errors.Wrap(ErrAbsolutePathDisallowed, path) - } - if len(path) > 2 { - c := path[0] - if path[1] == ':' && path[2] == '/' && ('a' <= c && c <= 'z' || 'A' <= c && c <= 'Z') { - return errors.Wrap(ErrAbsolutePathDisallowed, path) - } - } - - // disallow path traversal - if strings.HasPrefix(path, "../") || path == ".." { - return errors.Wrap(ErrPathTraversalDisallowed, path) - } - - return nil -} - -// dispatchBFS behaves the same as images.Dispatch() but in sequence with breath-first search. -func dispatchBFS(ctx context.Context, handler images.Handler, weighted *semaphore.Weighted, descs ...ocispec.Descriptor) error { - for i := 0; i < len(descs); i++ { - desc := descs[i] - children, err := handler.Handle(ctx, desc) - if err != nil { - switch err := errors.Cause(err); err { - case images.ErrSkipDesc: - continue // don't traverse the children. - case ErrStopProcessing: - return nil - } - return err - } - descs = append(descs, children...) - } - return nil -} - -func filterName(desc ocispec.Descriptor) bool { - // needs to be filled in - return true -} - -// WithAdditionalCachedMediaTypes adds media types normally cached in memory when pulling. -// This does not replace the default media types, but appends to them -func WithAdditionalCachedMediaTypes(cachedMediaTypes ...string) CopyOpt { - return func(o *copyOpts) error { - o.cachedMediaTypes = append(o.cachedMediaTypes, cachedMediaTypes...) - return nil - } -} - -// WithAllowedMediaType sets the allowed media types -func WithAllowedMediaType(allowedMediaTypes ...string) CopyOpt { - return func(o *copyOpts) error { - o.allowedMediaTypes = append(o.allowedMediaTypes, allowedMediaTypes...) - return nil - } -} - -// WithAllowedMediaTypes sets the allowed media types -func WithAllowedMediaTypes(allowedMediaTypes []string) CopyOpt { - return func(o *copyOpts) error { - o.allowedMediaTypes = append(o.allowedMediaTypes, allowedMediaTypes...) - return nil - } -} - -// WithPullByBFS opt to pull in sequence with breath-first search -func WithPullByBFS(o *copyOpts) error { - o.dispatch = dispatchBFS - return nil -} - -// WithPullBaseHandler provides base handlers, which will be called before -// any pull specific handlers. -func WithPullBaseHandler(handlers ...images.Handler) CopyOpt { - return func(o *copyOpts) error { - o.baseHandlers = append(o.baseHandlers, handlers...) - return nil - } -} - -// WithPullCallbackHandler provides callback handlers, which will be called after -// any pull specific handlers. -func WithPullCallbackHandler(handlers ...images.Handler) CopyOpt { - return func(o *copyOpts) error { - o.callbackHandlers = append(o.callbackHandlers, handlers...) - return nil - } -} - -// WithContentProvideIngester opt to the provided Provider and Ingester -// for file system I/O, including caches. -func WithContentStore(store orascontent.Store) CopyOpt { - return func(o *copyOpts) error { - o.contentProvideIngesterPusherFetcher = store - return nil - } -} - -// WithPullEmptyNameAllowed allows pulling blobs with empty name. -func WithPullEmptyNameAllowed() CopyOpt { - return func(o *copyOpts) error { - o.filterName = func(ocispec.Descriptor) bool { - return true - } - return nil - } -} - -// WithPullStatusTrack report results to stdout -func WithPullStatusTrack(writer io.Writer) CopyOpt { - return WithPullCallbackHandler(pullStatusTrack(writer)) -} - -func pullStatusTrack(writer io.Writer) images.Handler { - var printLock sync.Mutex - return images.HandlerFunc(func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) { - if name, ok := orascontent.ResolveName(desc); ok { - digestString := desc.Digest.String() - if err := desc.Digest.Validate(); err == nil { - if algo := desc.Digest.Algorithm(); algo == digest.SHA256 { - digestString = desc.Digest.Encoded()[:12] - } - } - printLock.Lock() - defer printLock.Unlock() - fmt.Fprintln(writer, "Downloaded", digestString, name) - } - return nil, nil - }) -} - -// WithNameValidation validates the image title in the descriptor. -// Pass nil to disable name validation. -func WithNameValidation(validate func(desc ocispec.Descriptor) error) CopyOpt { - return func(o *copyOpts) error { - o.validateName = validate - return nil - } -} - -// WithUserAgent set the user agent string in http communications -func WithUserAgent(agent string) CopyOpt { - return func(o *copyOpts) error { - o.userAgent = agent - return nil - } -} - -// WithLayerDescriptors passes the slice of Descriptors for layers to the -// provided func. If the passed parameter is nil, returns an error. -func WithLayerDescriptors(save func([]ocispec.Descriptor)) CopyOpt { - return func(o *copyOpts) error { - if save == nil { - return errors.New("layers save func must be non-nil") - } - o.saveLayers = save - return nil - } -} - -// WithRootManifest passes the root manifest for the artifacts to the provided -// func. If the passed parameter is nil, returns an error. -func WithRootManifest(save func(b []byte)) CopyOpt { - return func(o *copyOpts) error { - if save == nil { - return errors.New("manifest save func must be non-nil") - } - o.saveManifest = save - return nil - } -} diff --git a/src/vendor/oras.land/oras-go/pkg/oras/provider.go b/src/vendor/oras.land/oras-go/pkg/oras/provider.go deleted file mode 100644 index 97736ad3c..000000000 --- a/src/vendor/oras.land/oras-go/pkg/oras/provider.go +++ /dev/null @@ -1,79 +0,0 @@ -/* -Copyright The ORAS Authors. -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - -http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ -package oras - -import ( - "context" - "errors" - "io" - - "github.com/containerd/containerd/content" - "github.com/containerd/containerd/remotes" - ocispec "github.com/opencontainers/image-spec/specs-go/v1" -) - -// ProviderWrapper wraps a remote.Fetcher to make a content.Provider, which is useful for things -type ProviderWrapper struct { - Fetcher remotes.Fetcher -} - -func (p *ProviderWrapper) ReaderAt(ctx context.Context, desc ocispec.Descriptor) (content.ReaderAt, error) { - if p.Fetcher == nil { - return nil, errors.New("no Fetcher provided") - } - return &fetcherReaderAt{ - ctx: ctx, - fetcher: p.Fetcher, - desc: desc, - offset: 0, - }, nil -} - -type fetcherReaderAt struct { - ctx context.Context - fetcher remotes.Fetcher - desc ocispec.Descriptor - rc io.ReadCloser - offset int64 -} - -func (f *fetcherReaderAt) Close() error { - if f.rc == nil { - return nil - } - return f.rc.Close() -} - -func (f *fetcherReaderAt) Size() int64 { - return f.desc.Size -} - -func (f *fetcherReaderAt) ReadAt(p []byte, off int64) (n int, err error) { - // if we do not have a readcloser, get it - if f.rc == nil || f.offset != off { - rc, err := f.fetcher.Fetch(f.ctx, f.desc) - if err != nil { - return 0, err - } - f.rc = rc - } - - n, err = io.ReadFull(f.rc, p) - if err != nil { - return n, err - } - f.offset += int64(n) - return n, err -} diff --git a/src/vendor/oras.land/oras-go/pkg/oras/store.go b/src/vendor/oras.land/oras-go/pkg/oras/store.go deleted file mode 100644 index edb835625..000000000 --- a/src/vendor/oras.land/oras-go/pkg/oras/store.go +++ /dev/null @@ -1,213 +0,0 @@ -/* -Copyright The ORAS Authors. -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - -http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package oras - -import ( - "context" - "io" - "io/ioutil" - "time" - - "github.com/containerd/containerd/content" - "github.com/containerd/containerd/errdefs" - "github.com/containerd/containerd/remotes" - "github.com/opencontainers/go-digest" - ocispec "github.com/opencontainers/image-spec/specs-go/v1" - "golang.org/x/sync/errgroup" - - orascontent "oras.land/oras-go/pkg/content" -) - -type hybridStore struct { - cache *orascontent.Memory - cachedMediaTypes []string - cacheOnly bool - provider content.Provider - ingester content.Ingester -} - -func newHybridStoreFromPusher(pusher remotes.Pusher, cachedMediaTypes []string, cacheOnly bool) *hybridStore { - // construct an ingester from a pusher - ingester := pusherIngester{ - pusher: pusher, - } - return &hybridStore{ - cache: orascontent.NewMemory(), - cachedMediaTypes: cachedMediaTypes, - ingester: ingester, - cacheOnly: cacheOnly, - } -} - -func (s *hybridStore) Set(desc ocispec.Descriptor, content []byte) { - s.cache.Set(desc, content) -} - -func (s *hybridStore) Fetch(ctx context.Context, desc ocispec.Descriptor) (io.ReadCloser, error) { - reader, err := s.cache.Fetch(ctx, desc) - if err == nil { - return reader, err - } - if s.provider != nil { - rat, err := s.provider.ReaderAt(ctx, desc) - return ioutil.NopCloser(orascontent.NewReaderAtWrapper(rat)), err - } - return nil, err -} - -func (s *hybridStore) Push(ctx context.Context, desc ocispec.Descriptor) (content.Writer, error) { - return s.Writer(ctx, content.WithDescriptor(desc)) -} - -// Writer begins or resumes the active writer identified by desc -func (s *hybridStore) Writer(ctx context.Context, opts ...content.WriterOpt) (content.Writer, error) { - var wOpts content.WriterOpts - for _, opt := range opts { - if err := opt(&wOpts); err != nil { - return nil, err - } - } - - if isAllowedMediaType(wOpts.Desc.MediaType, s.cachedMediaTypes...) || s.ingester == nil { - pusher, err := s.cache.Pusher(ctx, "") - if err != nil { - return nil, err - } - cacheWriter, err := pusher.Push(ctx, wOpts.Desc) - if err != nil { - return nil, err - } - // if we cache it only, do not pass it through - if s.cacheOnly { - return cacheWriter, nil - } - ingesterWriter, err := s.ingester.Writer(ctx, opts...) - switch { - case err == nil: - return newTeeWriter(wOpts.Desc, cacheWriter, ingesterWriter), nil - case errdefs.IsAlreadyExists(err): - return cacheWriter, nil - } - return nil, err - } - return s.ingester.Writer(ctx, opts...) -} - -// teeWriter tees the content to one or more content.Writer -type teeWriter struct { - writers []content.Writer - digester digest.Digester - status content.Status -} - -func newTeeWriter(desc ocispec.Descriptor, writers ...content.Writer) *teeWriter { - now := time.Now() - return &teeWriter{ - writers: writers, - digester: digest.Canonical.Digester(), - status: content.Status{ - Total: desc.Size, - StartedAt: now, - UpdatedAt: now, - }, - } -} - -func (t *teeWriter) Close() error { - g := new(errgroup.Group) - for _, w := range t.writers { - w := w // closure issues, see https://golang.org/doc/faq#closures_and_goroutines - g.Go(func() error { - return w.Close() - }) - } - return g.Wait() -} - -func (t *teeWriter) Write(p []byte) (n int, err error) { - g := new(errgroup.Group) - for _, w := range t.writers { - w := w // closure issues, see https://golang.org/doc/faq#closures_and_goroutines - g.Go(func() error { - n, err := w.Write(p[:]) - if err != nil { - return err - } - if n != len(p) { - return io.ErrShortWrite - } - return nil - }) - } - err = g.Wait() - n = len(p) - if err != nil { - return n, err - } - _, _ = t.digester.Hash().Write(p[:n]) - t.status.Offset += int64(len(p)) - t.status.UpdatedAt = time.Now() - - return n, nil -} - -// Digest may return empty digest or panics until committed. -func (t *teeWriter) Digest() digest.Digest { - return t.digester.Digest() -} - -func (t *teeWriter) Commit(ctx context.Context, size int64, expected digest.Digest, opts ...content.Opt) error { - g := new(errgroup.Group) - for _, w := range t.writers { - w := w // closure issues, see https://golang.org/doc/faq#closures_and_goroutines - g.Go(func() error { - return w.Commit(ctx, size, expected, opts...) - }) - } - return g.Wait() -} - -// Status returns the current state of write -func (t *teeWriter) Status() (content.Status, error) { - return t.status, nil -} - -// Truncate updates the size of the target blob -func (t *teeWriter) Truncate(size int64) error { - g := new(errgroup.Group) - for _, w := range t.writers { - w := w // closure issues, see https://golang.org/doc/faq#closures_and_goroutines - g.Go(func() error { - return w.Truncate(size) - }) - } - return g.Wait() -} - -// pusherIngester simple wrapper to get an ingester from a pusher -type pusherIngester struct { - pusher remotes.Pusher -} - -func (p pusherIngester) Writer(ctx context.Context, opts ...content.WriterOpt) (content.Writer, error) { - var wOpts content.WriterOpts - for _, opt := range opts { - if err := opt(&wOpts); err != nil { - return nil, err - } - } - return p.pusher.Push(ctx, wOpts.Desc) -} diff --git a/src/vendor/oras.land/oras-go/pkg/registry/reference.go b/src/vendor/oras.land/oras-go/pkg/registry/reference.go deleted file mode 100644 index 299651cd1..000000000 --- a/src/vendor/oras.land/oras-go/pkg/registry/reference.go +++ /dev/null @@ -1,177 +0,0 @@ -/* -Copyright The ORAS Authors. -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - -http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ -package registry - -import ( - "fmt" - "net/url" - "regexp" - "strings" - - "github.com/opencontainers/go-digest" - errdef "oras.land/oras-go/pkg/content" -) - -// regular expressions for components. -var ( - // repositoryRegexp is adapted from the distribution implementation. - // The repository name set under OCI distribution spec is a subset of the - // the docker spec. For maximum compability, the docker spec is verified at - // the client side. Further check is left to the server side. - // References: - // - https://github.com/distribution/distribution/blob/v2.7.1/reference/regexp.go#L53 - // - https://github.com/opencontainers/distribution-spec/blob/main/spec.md#pulling-manifests - repositoryRegexp = regexp.MustCompile(`^[a-z0-9]+(?:(?:[._]|__|[-]*)[a-z0-9]+)*(?:/[a-z0-9]+(?:(?:[._]|__|[-]*)[a-z0-9]+)*)*$`) - - // tagRegexp checks the tag name. - // The docker and OCI spec have the same regular expression. - // Reference: https://github.com/opencontainers/distribution-spec/blob/main/spec.md#pulling-manifests - tagRegexp = regexp.MustCompile(`^[\w][\w.-]{0,127}$`) -) - -// Reference references to a descriptor in the registry. -type Reference struct { - // Registry is the name of the registry. - // It is usually the domain name of the registry optionally with a port. - Registry string - - // Repository is the name of the repository. - Repository string - - // Reference is the reference of the object in the repository. - // A reference can be a tag or a digest. - Reference string -} - -// ParseReference parses a string into a artifact reference. -// If the reference contains both the tag and the digest, the tag will be -// dropped. -// Digest is recognized only if the corresponding algorithm is available. -func ParseReference(raw string) (Reference, error) { - parts := strings.SplitN(raw, "/", 2) - if len(parts) == 1 { - return Reference{}, fmt.Errorf("%w: missing repository", errdef.ErrInvalidReference) - } - registry, path := parts[0], parts[1] - var repository string - var reference string - if index := strings.Index(path, "@"); index != -1 { - // digest found - repository = path[:index] - reference = path[index+1:] - - // drop tag since the digest is present. - if index := strings.Index(repository, ":"); index != -1 { - repository = repository[:index] - } - } else if index := strings.Index(path, ":"); index != -1 { - // tag found - repository = path[:index] - reference = path[index+1:] - } else { - // empty reference - repository = path - } - res := Reference{ - Registry: registry, - Repository: repository, - Reference: reference, - } - if err := res.Validate(); err != nil { - return Reference{}, err - } - return res, nil -} - -// Validate validates the entire reference. -func (r Reference) Validate() error { - err := r.ValidateRegistry() - if err != nil { - return err - } - err = r.ValidateRepository() - if err != nil { - return err - } - return r.ValidateReference() -} - -// ValidateRegistry validates the registry. -func (r Reference) ValidateRegistry() error { - uri, err := url.ParseRequestURI("dummy://" + r.Registry) - if err != nil || uri.Host != r.Registry { - return fmt.Errorf("%w: invalid registry", errdef.ErrInvalidReference) - } - return nil -} - -// ValidateRepository validates the repository. -func (r Reference) ValidateRepository() error { - if !repositoryRegexp.MatchString(r.Repository) { - return fmt.Errorf("%w: invalid repository", errdef.ErrInvalidReference) - } - return nil -} - -// ValidateReference validates the reference. -func (r Reference) ValidateReference() error { - if r.Reference == "" { - return nil - } - if _, err := r.Digest(); err == nil { - return nil - } - if !tagRegexp.MatchString(r.Reference) { - return fmt.Errorf("%w: invalid tag", errdef.ErrInvalidReference) - } - return nil -} - -// Host returns the host name of the registry. -func (r Reference) Host() string { - if r.Registry == "docker.io" { - return "registry-1.docker.io" - } - return r.Registry -} - -// ReferenceOrDefault returns the reference or the default reference if empty. -func (r Reference) ReferenceOrDefault() string { - if r.Reference == "" { - return "latest" - } - return r.Reference -} - -// Digest returns the reference as a digest. -func (r Reference) Digest() (digest.Digest, error) { - return digest.Parse(r.Reference) -} - -// String implements `fmt.Stringer` and returns the reference string. -// The resulted string is meaningful only if the reference is valid. -func (r Reference) String() string { - if r.Repository == "" { - return r.Registry - } - ref := r.Registry + "/" + r.Repository - if r.Reference == "" { - return ref - } - if d, err := r.Digest(); err == nil { - return ref + "@" + d.String() - } - return ref + ":" + r.Reference -} diff --git a/src/vendor/oras.land/oras-go/pkg/registry/remote/auth/cache.go b/src/vendor/oras.land/oras-go/pkg/registry/remote/auth/cache.go deleted file mode 100644 index 070588373..000000000 --- a/src/vendor/oras.land/oras-go/pkg/registry/remote/auth/cache.go +++ /dev/null @@ -1,158 +0,0 @@ -/* -Copyright The ORAS Authors. -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - -http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ -package auth - -import ( - "context" - "strings" - "sync" - - errdef "oras.land/oras-go/pkg/content" - "oras.land/oras-go/pkg/registry/remote/internal/syncutil" -) - -// DefaultCache is the sharable cache used by DefaultClient. -var DefaultCache Cache = NewCache() - -// Cache caches the auth-scheme and auth-token for the "Authorization" header in -// accessing the remote registry. -// Precisely, the header is `Authorization: auth-scheme auth-token`. -// The `auth-token` is a generic term as `token68` in RFC 7235 section 2.1. -type Cache interface { - // GetScheme returns the auth-scheme part cached for the given registry. - // A single registry is assumed to have a consistent scheme. - // If a registry has different schemes per path, the auth client is still - // workable. However, the cache may not be effective as the cache cannot - // correctly guess the scheme. - GetScheme(ctx context.Context, registry string) (Scheme, error) - - // GetToken returns the auth-token part cached for the given registry of a - // given scheme. - // The underlying implementation MAY cache the token for all schemes for the - // given registry. - GetToken(ctx context.Context, registry string, scheme Scheme, key string) (string, error) - - // Set fetches the token using the given fetch function and caches the token - // for the given scheme with the given key for the given registry. - // The return values of the fetch function is returned by this function. - // The underlying implementation MAY combine the fetch operation if the Set - // function is invoked multiple times at the same time. - Set(ctx context.Context, registry string, scheme Scheme, key string, fetch func(context.Context) (string, error)) (string, error) -} - -// cacheEntry is a cache entry for a single registry. -type cacheEntry struct { - scheme Scheme - tokens sync.Map // map[string]string -} - -// concurrentCache is a cache suitable for concurrent invocation. -type concurrentCache struct { - status sync.Map // map[string]*syncutil.Once - cache sync.Map // map[string]*cacheEntry -} - -// NewCache creates a new go-routine safe cache instance. -func NewCache() Cache { - return &concurrentCache{} -} - -// GetScheme returns the auth-scheme part cached for the given registry. -func (cc *concurrentCache) GetScheme(ctx context.Context, registry string) (Scheme, error) { - entry, ok := cc.cache.Load(registry) - if !ok { - return SchemeUnknown, errdef.ErrNotFound - } - return entry.(*cacheEntry).scheme, nil -} - -// GetToken returns the auth-token part cached for the given registry of a given -// scheme. -func (cc *concurrentCache) GetToken(ctx context.Context, registry string, scheme Scheme, key string) (string, error) { - entryValue, ok := cc.cache.Load(registry) - if !ok { - return "", errdef.ErrNotFound - } - entry := entryValue.(*cacheEntry) - if entry.scheme != scheme { - return "", errdef.ErrNotFound - } - if token, ok := entry.tokens.Load(key); ok { - return token.(string), nil - } - return "", errdef.ErrNotFound -} - -// Set fetches the token using the given fetch function and caches the token -// for the given scheme with the given key for the given registry. -// Set combines the fetch operation if the Set is invoked multiple times at the -// same time. -func (cc *concurrentCache) Set(ctx context.Context, registry string, scheme Scheme, key string, fetch func(context.Context) (string, error)) (string, error) { - // fetch token - statusKey := strings.Join([]string{ - registry, - scheme.String(), - key, - }, " ") - statusValue, _ := cc.status.LoadOrStore(statusKey, syncutil.NewOnce()) - fetchOnce := statusValue.(*syncutil.Once) - fetchedFirst, result, err := fetchOnce.Do(ctx, func() (interface{}, error) { - return fetch(ctx) - }) - if fetchedFirst { - cc.status.Delete(statusKey) - } - if err != nil { - return "", err - } - token := result.(string) - if !fetchedFirst { - return token, nil - } - - // cache token - newEntry := &cacheEntry{ - scheme: scheme, - } - entryValue, exists := cc.cache.LoadOrStore(registry, newEntry) - entry := entryValue.(*cacheEntry) - if exists && entry.scheme != scheme { - // there is a scheme change, which is not expected in most scenarios. - // force invalidating all previous cache. - entry = newEntry - cc.cache.Store(registry, entry) - } - entry.tokens.Store(key, token) - - return token, nil -} - -// noCache is a cache implementation that does not do cache at all. -type noCache struct{} - -// GetScheme always returns not found error as it has no cache. -func (noCache) GetScheme(ctx context.Context, registry string) (Scheme, error) { - return SchemeUnknown, errdef.ErrNotFound -} - -// GetToken always returns not found error as it has no cache. -func (noCache) GetToken(ctx context.Context, registry string, scheme Scheme, key string) (string, error) { - return "", errdef.ErrNotFound -} - -// Set calls fetch directly without caching. -func (noCache) Set(ctx context.Context, registry string, scheme Scheme, key string, fetch func(context.Context) (string, error)) (string, error) { - return fetch(ctx) -} diff --git a/src/vendor/oras.land/oras-go/pkg/registry/remote/auth/challenge.go b/src/vendor/oras.land/oras-go/pkg/registry/remote/auth/challenge.go deleted file mode 100644 index e2d28a11e..000000000 --- a/src/vendor/oras.land/oras-go/pkg/registry/remote/auth/challenge.go +++ /dev/null @@ -1,166 +0,0 @@ -/* -Copyright The ORAS Authors. -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - -http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ -package auth - -import ( - "strconv" - "strings" -) - -// Scheme define the authentication method. -type Scheme byte - -const ( - // SchemeUnknown represents unknown or unsupported schemes - SchemeUnknown Scheme = iota - - // SchemeBasic represents the "Basic" HTTP authentication scheme. - // Reference: https://tools.ietf.org/html/rfc7617 - SchemeBasic - - // SchemeBearer represents the Bearer token in OAuth 2.0. - // Reference: https://tools.ietf.org/html/rfc6750 - SchemeBearer -) - -// parseScheme parse the authentication scheme from the given string -// case-insensitively. -func parseScheme(scheme string) Scheme { - switch { - case strings.EqualFold(scheme, "basic"): - return SchemeBasic - case strings.EqualFold(scheme, "bearer"): - return SchemeBearer - } - return SchemeUnknown -} - -// String return the string for the scheme. -func (s Scheme) String() string { - switch s { - case SchemeBasic: - return "Basic" - case SchemeBearer: - return "Bearer" - } - return "Unknown" -} - -// parseChallenge parses the "WWW-Authenticate" header returned by the remote -// registry, and extracts parameters if scheme is Bearer. -// References: -// - https://docs.docker.com/registry/spec/auth/token/#how-to-authenticate -// - https://tools.ietf.org/html/rfc7235#section-2.1 -func parseChallenge(header string) (scheme Scheme, params map[string]string) { - // as defined in RFC 7235 section 2.1, we have - // challenge = auth-scheme [ 1*SP ( token68 / #auth-param ) ] - // auth-scheme = token - // auth-param = token BWS "=" BWS ( token / quoted-string ) - // - // since we focus parameters only on Bearer, we have - // challenge = auth-scheme [ 1*SP #auth-param ] - schemeString, rest := parseToken(header) - scheme = parseScheme(schemeString) - - // fast path for non bearer challenge - if scheme != SchemeBearer { - return - } - - // parse params for bearer auth. - // combining RFC 7235 section 2.1 with RFC 7230 section 7, we have - // #auth-param => auth-param *( OWS "," OWS auth-param ) - var key, value string - for { - key, rest = parseToken(skipSpace(rest)) - if key == "" { - return - } - - rest = skipSpace(rest) - if rest == "" || rest[0] != '=' { - return - } - rest = skipSpace(rest[1:]) - if rest == "" { - return - } - - if rest[0] == '"' { - prefix, err := strconv.QuotedPrefix(rest) - if err != nil { - return - } - value, err = strconv.Unquote(prefix) - if err != nil { - return - } - rest = rest[len(prefix):] - } else { - value, rest = parseToken(rest) - if value == "" { - return - } - } - if params == nil { - params = map[string]string{ - key: value, - } - } else { - params[key] = value - } - - rest = skipSpace(rest) - if rest == "" || rest[0] != ',' { - return - } - rest = rest[1:] - } -} - -// isNotTokenChar reports whether rune is not a `tchar` defined in RFC 7230 -// section 3.2.6. -func isNotTokenChar(r rune) bool { - // tchar = "!" / "#" / "$" / "%" / "&" / "'" / "*" - // / "+" / "-" / "." / "^" / "_" / "`" / "|" / "~" - // / DIGIT / ALPHA - // ; any VCHAR, except delimiters - return (r < 'A' || r > 'Z') && (r < 'a' || r > 'z') && - (r < '0' || r > '9') && !strings.ContainsRune("!#$%&'*+-.^_`|~", r) -} - -// parseToken finds the next token from the given string. If no token found, -// an empty token is returned and the whole of the input is returned in rest. -// Note: Since token = 1*tchar, empty string is not a valid token. -func parseToken(s string) (token, rest string) { - if i := strings.IndexFunc(s, isNotTokenChar); i != -1 { - return s[:i], s[i:] - } - return s, "" -} - -// skipSpace skips "bad" whitespace (BWS) defined in RFC 7230 section 3.2.3. -func skipSpace(s string) string { - // OWS = *( SP / HTAB ) - // ; optional whitespace - // BWS = OWS - // ; "bad" whitespace - if i := strings.IndexFunc(s, func(r rune) bool { - return r != ' ' && r != '\t' - }); i != -1 { - return s[i:] - } - return s -} diff --git a/src/vendor/oras.land/oras-go/pkg/registry/remote/auth/client.go b/src/vendor/oras.land/oras-go/pkg/registry/remote/auth/client.go deleted file mode 100644 index ef1c9479d..000000000 --- a/src/vendor/oras.land/oras-go/pkg/registry/remote/auth/client.go +++ /dev/null @@ -1,367 +0,0 @@ -/* -Copyright The ORAS Authors. -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - -http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ -package auth - -import ( - "context" - "encoding/base64" - "encoding/json" - "errors" - "fmt" - "io" - "net/http" - "net/url" - "strings" - - "oras.land/oras-go/pkg/registry/remote/internal/errutil" -) - -// DefaultClient is the default auth-decorated client. -var DefaultClient = &Client{ - Header: http.Header{ - "User-Agent": {"oras-go"}, - }, - Cache: DefaultCache, -} - -// maxResponseBytes specifies the default limit on how many response bytes are -// allowed in the server's response from authorization service servers. -// A typical response message from authorization service servers is around 1 to -// 4 KiB. Since the size of a token must be smaller than the HTTP header size -// limit, which is usually 16 KiB. As specified by the distribution, the -// response may contain 2 identical tokens, that is, 16 x 2 = 32 KiB. -// Hence, 128 KiB should be sufficient. -// References: https://docs.docker.com/registry/spec/auth/token/ -var maxResponseBytes int64 = 128 * 1024 // 128 KiB - -// defaultClientID specifies the default client ID used in OAuth2. -// See also ClientID. -var defaultClientID = "oras-go" - -// Client is an auth-decorated HTTP client. -// Its zero value is a usable client that uses http.DefaultClient with no cache. -type Client struct { - // Client is the underlying HTTP client used to access the remote - // server. - // If nil, http.DefaultClient is used. - Client *http.Client - - // Header contains the custom headers to be added to each request. - Header http.Header - - // Credential specifies the function for resolving the credential for the - // given registry (i.e. host:port). - // `EmptyCredential` is a valid return value and should not be considered as - // an error. - // If nil, the credential is always resolved to `EmptyCredential`. - Credential func(context.Context, string) (Credential, error) - - // Cache caches credentials for direct accessing the remote registry. - // If nil, no cache is used. - Cache Cache - - // ClientID used in fetching OAuth2 token as a required field. - // If empty, a default client ID is used. - // Reference: https://docs.docker.com/registry/spec/auth/oauth/#getting-a-token - ClientID string - - // ForceAttemptOAuth2 controls whether to follow OAuth2 with password grant - // instead the distribution spec when authenticating using username and - // password. - // References: - // - https://docs.docker.com/registry/spec/auth/jwt/ - // - https://docs.docker.com/registry/spec/auth/oauth/ - ForceAttemptOAuth2 bool -} - -// client returns an HTTP client used to access the remote registry. -// http.DefaultClient is return if the client is not configured. -func (c *Client) client() *http.Client { - if c.Client == nil { - return http.DefaultClient - } - return c.Client -} - -// send adds headers to the request and sends the request to the remote server. -func (c *Client) send(req *http.Request) (*http.Response, error) { - for key, values := range c.Header { - req.Header[key] = append(req.Header[key], values...) - } - return c.client().Do(req) -} - -// credential resolves the credential for the given registry. -func (c *Client) credential(ctx context.Context, reg string) (Credential, error) { - if c.Credential == nil { - return EmptyCredential, nil - } - return c.Credential(ctx, reg) -} - -// cache resolves the cache. -// noCache is return if the cache is not configured. -func (c *Client) cache() Cache { - if c.Cache == nil { - return noCache{} - } - return c.Cache -} - -// SetUserAgent sets the user agent for all out-going requests. -func (c *Client) SetUserAgent(userAgent string) { - if c.Header == nil { - c.Header = http.Header{} - } - c.Header.Set("User-Agent", userAgent) -} - -// Do sends the request to the remote server with resolving authentication -// attempted. -// On authentication failure due to bad credential, -// - Do returns error if it fails to fetch token for bearer auth. -// - Do returns the registry response without error for basic auth. -func (c *Client) Do(originalReq *http.Request) (*http.Response, error) { - ctx := originalReq.Context() - req := originalReq.Clone(ctx) - - // attempt cached auth token - var attemptedKey string - cache := c.cache() - registry := originalReq.Host - scheme, err := cache.GetScheme(ctx, registry) - if err == nil { - switch scheme { - case SchemeBasic: - token, err := cache.GetToken(ctx, registry, SchemeBasic, "") - if err == nil { - req.Header.Set("Authorization", "Basic "+token) - } - case SchemeBearer: - scopes := GetScopes(ctx) - attemptedKey = strings.Join(scopes, " ") - token, err := cache.GetToken(ctx, registry, SchemeBearer, attemptedKey) - if err == nil { - req.Header.Set("Authorization", "Bearer "+token) - } - } - } - - resp, err := c.send(req) - if err != nil { - return nil, err - } - if resp.StatusCode != http.StatusUnauthorized { - return resp, nil - } - - // attempt again with credentials for recognized schemes - challenge := resp.Header.Get("Www-Authenticate") - scheme, params := parseChallenge(challenge) - switch scheme { - case SchemeBasic: - resp.Body.Close() - - token, err := cache.Set(ctx, registry, SchemeBasic, "", func(ctx context.Context) (string, error) { - return c.fetchBasicAuth(ctx, registry) - }) - if err != nil { - return nil, fmt.Errorf("%s %q: %w", resp.Request.Method, resp.Request.URL, err) - } - - req = originalReq.Clone(ctx) - req.Header.Set("Authorization", "Basic "+token) - case SchemeBearer: - resp.Body.Close() - - // merge hinted scopes with challenged scopes - scopes := GetScopes(ctx) - if scope := params["scope"]; scope != "" { - scopes = append(scopes, strings.Split(scope, " ")...) - scopes = CleanScopes(scopes) - } - key := strings.Join(scopes, " ") - - // attempt the cache again if there is a scope change - if key != attemptedKey { - if token, err := cache.GetToken(ctx, registry, SchemeBearer, key); err == nil { - req = originalReq.Clone(ctx) - req.Header.Set("Authorization", "Bearer "+token) - - resp, err := c.send(req) - if err != nil { - return nil, err - } - if resp.StatusCode != http.StatusUnauthorized { - return resp, nil - } - resp.Body.Close() - } - } - - // attempt with credentials - realm := params["realm"] - service := params["service"] - token, err := cache.Set(ctx, registry, SchemeBearer, key, func(ctx context.Context) (string, error) { - return c.fetchBearerToken(ctx, registry, realm, service, scopes) - }) - if err != nil { - return nil, fmt.Errorf("%s %q: %w", resp.Request.Method, resp.Request.URL, err) - } - - req = originalReq.Clone(ctx) - req.Header.Set("Authorization", "Bearer "+token) - default: - return resp, nil - } - - return c.send(req) -} - -// fetchBasicAuth fetches a basic auth token for the basic challenge. -func (c *Client) fetchBasicAuth(ctx context.Context, registry string) (string, error) { - cred, err := c.credential(ctx, registry) - if err != nil { - return "", fmt.Errorf("failed to resolve credential: %w", err) - } - if cred == EmptyCredential { - return "", errors.New("credential required for basic auth") - } - if cred.Username == "" || cred.Password == "" { - return "", errors.New("missing username or password for basic auth") - } - auth := cred.Username + ":" + cred.Password - return base64.StdEncoding.EncodeToString([]byte(auth)), nil -} - -// fetchBearerToken fetches an access token for the bearer challenge. -func (c *Client) fetchBearerToken(ctx context.Context, registry, realm, service string, scopes []string) (string, error) { - cred, err := c.credential(ctx, registry) - if err != nil { - return "", err - } - if cred.AccessToken != "" { - return cred.AccessToken, nil - } - if cred == EmptyCredential || (cred.RefreshToken == "" && !c.ForceAttemptOAuth2) { - return c.fetchDistributionToken(ctx, realm, service, scopes, cred.Username, cred.Password) - } - return c.fetchOAuth2Token(ctx, realm, service, scopes, cred) -} - -// fetchDistributionToken fetches an access token as defined by the distribution -// specification. -// It fetches anonymous tokens if no credential is provided. -// References: -// - https://docs.docker.com/registry/spec/auth/jwt/ -// - https://docs.docker.com/registry/spec/auth/token/ -func (c *Client) fetchDistributionToken(ctx context.Context, realm, service string, scopes []string, username, password string) (string, error) { - req, err := http.NewRequestWithContext(ctx, http.MethodGet, realm, nil) - if err != nil { - return "", err - } - if username != "" || password != "" { - req.SetBasicAuth(username, password) - } - q := req.URL.Query() - if service != "" { - q.Set("service", service) - } - for _, scope := range scopes { - q.Add("scope", scope) - } - req.URL.RawQuery = q.Encode() - - resp, err := c.send(req) - if err != nil { - return "", err - } - defer resp.Body.Close() - if resp.StatusCode != http.StatusOK { - return "", errutil.ParseErrorResponse(resp) - } - - // As specified in https://docs.docker.com/registry/spec/auth/token/ section - // "Token Response Fields", the token is either in `token` or - // `access_token`. If both present, they are identical. - var result struct { - Token string `json:"token"` - AccessToken string `json:"access_token"` - } - lr := io.LimitReader(resp.Body, maxResponseBytes) - if err := json.NewDecoder(lr).Decode(&result); err != nil { - return "", fmt.Errorf("%s %q: failed to decode response: %w", resp.Request.Method, resp.Request.URL, err) - } - if result.AccessToken != "" { - return result.AccessToken, nil - } - if result.Token != "" { - return result.Token, nil - } - return "", fmt.Errorf("%s %q: empty token returned", resp.Request.Method, resp.Request.URL) -} - -// fetchOAuth2Token fetches an OAuth2 access token. -// Reference: https://docs.docker.com/registry/spec/auth/oauth/ -func (c *Client) fetchOAuth2Token(ctx context.Context, realm, service string, scopes []string, cred Credential) (string, error) { - form := url.Values{} - if cred.RefreshToken != "" { - form.Set("grant_type", "refresh_token") - form.Set("refresh_token", cred.RefreshToken) - } else if cred.Username != "" && cred.Password != "" { - form.Set("grant_type", "password") - form.Set("username", cred.Username) - form.Set("password", cred.Password) - } else { - return "", errors.New("missing username or password for bearer auth") - } - form.Set("service", service) - clientID := c.ClientID - if clientID == "" { - clientID = defaultClientID - } - form.Set("client_id", clientID) - if len(scopes) != 0 { - form.Set("scope", strings.Join(scopes, " ")) - } - body := strings.NewReader(form.Encode()) - - req, err := http.NewRequestWithContext(ctx, http.MethodPost, realm, body) - if err != nil { - return "", err - } - req.Header.Set("Content-Type", "application/x-www-form-urlencoded") - - resp, err := c.send(req) - if err != nil { - return "", err - } - defer resp.Body.Close() - if resp.StatusCode != http.StatusOK { - return "", errutil.ParseErrorResponse(resp) - } - - var result struct { - AccessToken string `json:"access_token"` - } - lr := io.LimitReader(resp.Body, maxResponseBytes) - if err := json.NewDecoder(lr).Decode(&result); err != nil { - return "", fmt.Errorf("%s %q: failed to decode response: %w", resp.Request.Method, resp.Request.URL, err) - } - if result.AccessToken != "" { - return result.AccessToken, nil - } - return "", fmt.Errorf("%s %q: empty token returned", resp.Request.Method, resp.Request.URL) -} diff --git a/src/vendor/oras.land/oras-go/pkg/registry/remote/auth/credential.go b/src/vendor/oras.land/oras-go/pkg/registry/remote/auth/credential.go deleted file mode 100644 index 2cd9f774b..000000000 --- a/src/vendor/oras.land/oras-go/pkg/registry/remote/auth/credential.go +++ /dev/null @@ -1,39 +0,0 @@ -/* -Copyright The ORAS Authors. -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - -http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ -package auth - -// EmptyCredential represents an empty credential. -var EmptyCredential Credential - -// Credential contains authentication credentials used to access remote -// registries. -type Credential struct { - // Username is the name of the user for the remote registry. - Username string - - // Password is the secret associated with the username. - Password string - - // RefreshToken is a bearer token to be sent to the authorization service - // for fetching access tokens. - // A refresh token is often referred as an identity token. - // Reference: https://docs.docker.com/registry/spec/auth/oauth/ - RefreshToken string - - // AccessToken is a bearer token to be sent to the registry. - // An access token is often referred as a registry token. - // Reference: https://docs.docker.com/registry/spec/auth/token/ - AccessToken string -} diff --git a/src/vendor/oras.land/oras-go/pkg/registry/remote/auth/scope.go b/src/vendor/oras.land/oras-go/pkg/registry/remote/auth/scope.go deleted file mode 100644 index 3b865dd60..000000000 --- a/src/vendor/oras.land/oras-go/pkg/registry/remote/auth/scope.go +++ /dev/null @@ -1,231 +0,0 @@ -/* -Copyright The ORAS Authors. -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - -http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ -package auth - -import ( - "context" - "sort" - "strings" -) - -// Actions used in scopes. -// Reference: https://docs.docker.com/registry/spec/auth/scope/ -const ( - // ActionPull represents generic read access for resources of the repository - // type. - ActionPull = "pull" - - // ActionPush represents generic write access for resources of the - // repository type. - ActionPush = "push" - - // ActionDelete represents the delete permission for resources of the - // repository type. - ActionDelete = "delete" -) - -// ScopeRegistryCatalog is the scope for registry catalog access. -const ScopeRegistryCatalog = "registry:catalog:*" - -// ScopeRepository returns a repository scope with given actions. -// Reference: https://docs.docker.com/registry/spec/auth/scope/ -func ScopeRepository(repository string, actions ...string) string { - actions = cleanActions(actions) - if repository == "" || len(actions) == 0 { - return "" - } - return strings.Join([]string{ - "repository", - repository, - strings.Join(actions, ","), - }, ":") -} - -// scopesContextKey is the context key for scopes. -type scopesContextKey struct{} - -// WithScopes returns a context with scopes added. Scopes are de-duplicated. -// Scopes are used as hints for the auth client to fetch bearer tokens with -// larger scopes. -// For example, uploading blob to the repository "hello-world" does HEAD request -// first then POST and PUT. The HEAD request will return a challenge for scope -// `repository:hello-world:pull`, and the auth client will fetch a token for -// that challenge. Later, the POST request will return a challenge for scope -// `repository:hello-world:push`, and the auth client will fetch a token for -// that challenge again. By invoking `WithScopes()` with the scope -// `repository:hello-world:pull,push`, the auth client with cache is hinted to -// fetch a token via a single token fetch request for all the HEAD, POST, PUT -// requests. -// Passing an empty list of scopes will virtually remove the scope hints in the -// context. -// Reference: https://docs.docker.com/registry/spec/auth/scope/ -func WithScopes(ctx context.Context, scopes ...string) context.Context { - scopes = CleanScopes(scopes) - return context.WithValue(ctx, scopesContextKey{}, scopes) -} - -// AppendScopes appends additional scopes to the existing scopes in the context -// and returns a new context. The resulted scopes are de-duplicated. -// The append operation does modify the existing scope in the context passed in. -func AppendScopes(ctx context.Context, scopes ...string) context.Context { - if len(scopes) == 0 { - return ctx - } - return WithScopes(ctx, append(GetScopes(ctx), scopes...)...) -} - -// GetScopes returns the scopes in the context. -func GetScopes(ctx context.Context) []string { - if scopes, ok := ctx.Value(scopesContextKey{}).([]string); ok { - return append([]string(nil), scopes...) - } - return nil -} - -// CleanScopes merges and sort the actions in ascending order if the scopes have -// the same resource type and name. The final scopes are sorted in ascending -// order. In other words, the scopes passed in are de-duplicated and sorted. -// Therefore, the output of this function is deterministic. -// If there is a wildcard `*` in the action, other actions in the same resource -// type and name are ignored. -func CleanScopes(scopes []string) []string { - // fast paths - switch len(scopes) { - case 0: - return nil - case 1: - scope := scopes[0] - i := strings.LastIndex(scope, ":") - if i == -1 { - return []string{scope} - } - actionList := strings.Split(scope[i+1:], ",") - actionList = cleanActions(actionList) - if len(actionList) == 0 { - return nil - } - actions := strings.Join(actionList, ",") - scope = scope[:i+1] + actions - return []string{scope} - } - - // slow path - var result []string - - // merge recognizable scopes - resourceTypes := make(map[string]map[string]map[string]struct{}) - for _, scope := range scopes { - // extract resource type - i := strings.Index(scope, ":") - if i == -1 { - result = append(result, scope) - continue - } - resourceType := scope[:i] - - // extract resource name and actions - rest := scope[i+1:] - i = strings.LastIndex(rest, ":") - if i == -1 { - result = append(result, scope) - continue - } - resourceName := rest[:i] - actions := rest[i+1:] - if actions == "" { - // drop scope since no action found - continue - } - - // add to the intermediate map for de-duplication - namedActions := resourceTypes[resourceType] - if namedActions == nil { - namedActions = make(map[string]map[string]struct{}) - resourceTypes[resourceType] = namedActions - } - actionSet := namedActions[resourceName] - if actionSet == nil { - actionSet = make(map[string]struct{}) - namedActions[resourceName] = actionSet - } - for _, action := range strings.Split(actions, ",") { - if action != "" { - actionSet[action] = struct{}{} - } - } - } - - // reconstruct scopes - for resourceType, namedActions := range resourceTypes { - for resourceName, actionSet := range namedActions { - if len(actionSet) == 0 { - continue - } - var actions []string - for action := range actionSet { - if action == "*" { - actions = []string{"*"} - break - } - actions = append(actions, action) - } - sort.Strings(actions) - scope := resourceType + ":" + resourceName + ":" + strings.Join(actions, ",") - result = append(result, scope) - } - } - - // sort and return - sort.Strings(result) - return result -} - -// cleanActions removes the duplicated actions and sort in ascending order. -// If there is a wildcard `*` in the action, other actions are ignored. -func cleanActions(actions []string) []string { - // fast paths - switch len(actions) { - case 0: - return nil - case 1: - if actions[0] == "" { - return nil - } - return actions - } - - // slow path - sort.Strings(actions) - n := 0 - for i := 0; i < len(actions); i++ { - if actions[i] == "*" { - return []string{"*"} - } - if actions[i] != actions[n] { - n++ - if n != i { - actions[n] = actions[i] - } - } - } - n++ - if actions[0] == "" { - if n == 1 { - return nil - } - return actions[1:n] - } - return actions[:n] -} diff --git a/src/vendor/oras.land/oras-go/pkg/registry/remote/internal/errutil/errors.go b/src/vendor/oras.land/oras-go/pkg/registry/remote/internal/errutil/errors.go deleted file mode 100644 index 201ecce89..000000000 --- a/src/vendor/oras.land/oras-go/pkg/registry/remote/internal/errutil/errors.go +++ /dev/null @@ -1,83 +0,0 @@ -/* -Copyright The ORAS Authors. -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - -http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ -package errutil - -import ( - "encoding/json" - "fmt" - "io" - "net/http" - "strings" - "unicode" -) - -// maxErrorBytes specifies the default limit on how many response bytes are -// allowed in the server's error response. -// A typical error message is around 200 bytes. Hence, 8 KiB should be -// sufficient. -var maxErrorBytes int64 = 8 * 1024 // 8 KiB - -// requestError contains a single error. -type requestError struct { - Code string `json:"code"` - Message string `json:"message"` -} - -// Error returns a error string describing the error. -func (e requestError) Error() string { - code := strings.Map(func(r rune) rune { - if r == '_' { - return ' ' - } - return unicode.ToLower(r) - }, e.Code) - if e.Message == "" { - return code - } - return fmt.Sprintf("%s: %s", code, e.Message) -} - -// requestErrors is a bundle of requestError. -type requestErrors []requestError - -// Error returns a error string describing the error. -func (errs requestErrors) Error() string { - switch len(errs) { - case 0: - return "" - case 1: - return errs[0].Error() - } - var errmsgs []string - for _, err := range errs { - errmsgs = append(errmsgs, err.Error()) - } - return strings.Join(errmsgs, "; ") -} - -// ParseErrorResponse parses the error returned by the remote registry. -func ParseErrorResponse(resp *http.Response) error { - var errmsg string - var body struct { - Errors requestErrors `json:"errors"` - } - lr := io.LimitReader(resp.Body, maxErrorBytes) - if err := json.NewDecoder(lr).Decode(&body); err == nil && len(body.Errors) > 0 { - errmsg = body.Errors.Error() - } else { - errmsg = http.StatusText(resp.StatusCode) - } - return fmt.Errorf("%s %q: unexpected status code %d: %s", resp.Request.Method, resp.Request.URL, resp.StatusCode, errmsg) -} diff --git a/src/vendor/oras.land/oras-go/pkg/registry/remote/internal/syncutil/once.go b/src/vendor/oras.land/oras-go/pkg/registry/remote/internal/syncutil/once.go deleted file mode 100644 index a685360b6..000000000 --- a/src/vendor/oras.land/oras-go/pkg/registry/remote/internal/syncutil/once.go +++ /dev/null @@ -1,69 +0,0 @@ -/* -Copyright The ORAS Authors. -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - -http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ -package syncutil - -import "context" - -// Once is an object that will perform exactly one action. -// Unlike sync.Once, this Once allowes the action to have return values. -type Once struct { - result interface{} - err error - status chan bool -} - -// NewOnce creates a new Once instance. -func NewOnce() *Once { - status := make(chan bool, 1) - status <- true - return &Once{ - status: status, - } -} - -// Do calls the function f if and only if Do is being called first time or all -// previous function calls are cancelled, deadline exceeded, or panicking. -// When `once.Do(ctx, f)` is called multiple times, the return value of the -// first call of the function f is stored, and is directly returned for other -// calls. -// Besides the return value of the function f, including the error, Do returns -// true if the function f passed is called first and is not cancelled, deadline -// exceeded, or panicking. Otherwise, returns false. -func (o *Once) Do(ctx context.Context, f func() (interface{}, error)) (bool, interface{}, error) { - defer func() { - if r := recover(); r != nil { - o.status <- true - panic(r) - } - }() - for { - select { - case inProgress := <-o.status: - if !inProgress { - return false, o.result, o.err - } - result, err := f() - if err == context.Canceled || err == context.DeadlineExceeded { - o.status <- true - return false, nil, err - } - o.result, o.err = result, err - close(o.status) - return true, result, err - case <-ctx.Done(): - return false, nil, ctx.Err() - } - } -} diff --git a/src/vendor/oras.land/oras-go/pkg/registry/remote/repository.go b/src/vendor/oras.land/oras-go/pkg/registry/remote/repository.go deleted file mode 100644 index 0fbd37b66..000000000 --- a/src/vendor/oras.land/oras-go/pkg/registry/remote/repository.go +++ /dev/null @@ -1,171 +0,0 @@ -/* -Copyright The ORAS Authors. -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - -http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ -package remote - -import ( - "context" - "encoding/json" - "fmt" - "net/http" - "strconv" - - errdef "oras.land/oras-go/pkg/content" - "oras.land/oras-go/pkg/registry" - "oras.land/oras-go/pkg/registry/remote/auth" - "oras.land/oras-go/pkg/registry/remote/internal/errutil" -) - -// Client is an interface for a HTTP client. -type Client interface { - // Do sends an HTTP request and returns an HTTP response. - // - // Unlike http.RoundTripper, Client can attempt to interpret the response - // and handle higher-level protocol details such as redirects and - // authentication. - // - // Like http.RoundTripper, Client should not modify the request, and must - // always close the request body. - Do(*http.Request) (*http.Response, error) -} - -// Repository is an HTTP client to a remote repository. -type Repository struct { - // Client is the underlying HTTP client used to access the remote registry. - // If nil, auth.DefaultClient is used. - Client Client - - // Reference references the remote repository. - Reference registry.Reference - - // PlainHTTP signals the transport to access the remote repository via HTTP - // instead of HTTPS. - PlainHTTP bool - - // ManifestMediaTypes is used in `Accept` header for resolving manifests from - // references. It is also used in identifying manifests and blobs from - // descriptors. - // If an empty list is present, default manifest media types are used. - ManifestMediaTypes []string - - // TagListPageSize specifies the page size when invoking the tag list API. - // If zero, the page size is determined by the remote registry. - // Reference: https://docs.docker.com/registry/spec/api/#tags - TagListPageSize int - - // ReferrerListPageSize specifies the page size when invoking the Referrers - // API. - // If zero, the page size is determined by the remote registry. - // Reference: https://github.com/oras-project/artifacts-spec/blob/main/manifest-referrers-api.md - ReferrerListPageSize int - - // MaxMetadataBytes specifies a limit on how many response bytes are allowed - // in the server's response to the metadata APIs, such as catalog list, tag - // list, and referrers list. - // If zero, a default (currently 4MiB) is used. - MaxMetadataBytes int64 -} - -// NewRepository creates a client to the remote repository identified by a -// reference. -// Example: localhost:5000/hello-world -func NewRepository(reference string) (*Repository, error) { - ref, err := registry.ParseReference(reference) - if err != nil { - return nil, err - } - return &Repository{ - Reference: ref, - }, nil -} - -// client returns an HTTP client used to access the remote repository. -// A default HTTP client is return if the client is not configured. -func (r *Repository) client() Client { - if r.Client == nil { - return auth.DefaultClient - } - return r.Client -} - -// parseReference validates the reference. -// Both simplified or fully qualified references are accepted as input. -// A fully qualified reference is returned on success. -func (r *Repository) parseReference(reference string) (registry.Reference, error) { - ref, err := registry.ParseReference(reference) - if err != nil { - ref = registry.Reference{ - Registry: r.Reference.Registry, - Repository: r.Reference.Repository, - Reference: reference, - } - if err = ref.ValidateReference(); err != nil { - return registry.Reference{}, err - } - return ref, nil - } - if ref.Registry == r.Reference.Registry && ref.Repository == r.Reference.Repository { - return ref, nil - } - return registry.Reference{}, fmt.Errorf("%w %q: expect %q", errdef.ErrInvalidReference, ref, r.Reference) -} - -// Tags lists the tags available in the repository. -func (r *Repository) Tags(ctx context.Context, fn func(tags []string) error) error { - ctx = withScopeHint(ctx, r.Reference, auth.ActionPull) - url := buildRepositoryTagListURL(r.PlainHTTP, r.Reference) - var err error - for err == nil { - url, err = r.tags(ctx, fn, url) - } - if err != errNoLink { - return err - } - return nil -} - -// tags returns a single page of tag list with the next link. -func (r *Repository) tags(ctx context.Context, fn func(tags []string) error, url string) (string, error) { - req, err := http.NewRequestWithContext(ctx, http.MethodGet, url, nil) - if err != nil { - return "", err - } - if r.TagListPageSize > 0 { - q := req.URL.Query() - q.Set("n", strconv.Itoa(r.TagListPageSize)) - req.URL.RawQuery = q.Encode() - } - - resp, err := r.client().Do(req) - if err != nil { - return "", err - } - defer resp.Body.Close() - - if resp.StatusCode != http.StatusOK { - return "", errutil.ParseErrorResponse(resp) - } - var page struct { - Tags []string `json:"tags"` - } - lr := limitReader(resp.Body, r.MaxMetadataBytes) - if err := json.NewDecoder(lr).Decode(&page); err != nil { - return "", fmt.Errorf("%s %q: failed to decode response: %w", resp.Request.Method, resp.Request.URL, err) - } - if err := fn(page.Tags); err != nil { - return "", err - } - - return parseLink(resp) -} diff --git a/src/vendor/oras.land/oras-go/pkg/registry/remote/url.go b/src/vendor/oras.land/oras-go/pkg/registry/remote/url.go deleted file mode 100644 index f01752020..000000000 --- a/src/vendor/oras.land/oras-go/pkg/registry/remote/url.go +++ /dev/null @@ -1,42 +0,0 @@ -/* -Copyright The ORAS Authors. -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - -http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ -package remote - -import ( - "fmt" - - "oras.land/oras-go/pkg/registry" -) - -// buildScheme returns HTTP scheme used to access the remote registry. -func buildScheme(plainHTTP bool) string { - if plainHTTP { - return "http" - } - return "https" -} - -// buildRepositoryBaseURL builds the base endpoint of the remote repository. -// Format: :///v2/ -func buildRepositoryBaseURL(plainHTTP bool, ref registry.Reference) string { - return fmt.Sprintf("%s://%s/v2/%s", buildScheme(plainHTTP), ref.Host(), ref.Repository) -} - -// buildRepositoryTagListURL builds the URL for accessing the tag list API. -// Format: :///v2//tags/list -// Reference: https://docs.docker.com/registry/spec/api/#tags -func buildRepositoryTagListURL(plainHTTP bool, ref registry.Reference) string { - return buildRepositoryBaseURL(plainHTTP, ref) + "/tags/list" -} diff --git a/src/vendor/oras.land/oras-go/pkg/registry/remote/utils.go b/src/vendor/oras.land/oras-go/pkg/registry/remote/utils.go deleted file mode 100644 index eaa84f0f4..000000000 --- a/src/vendor/oras.land/oras-go/pkg/registry/remote/utils.go +++ /dev/null @@ -1,72 +0,0 @@ -/* -Copyright The ORAS Authors. -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - -http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ -package remote - -import ( - "context" - "errors" - "fmt" - "io" - "net/http" - "strings" - - "oras.land/oras-go/pkg/registry" - "oras.land/oras-go/pkg/registry/remote/auth" -) - -// defaultMaxMetadataBytes specifies the default limit on how many response -// bytes are allowed in the server's response to the metadata APIs. -// See also: Repository.MaxMetadataBytes -var defaultMaxMetadataBytes int64 = 4 * 1024 * 1024 // 4 MiB - -// errNoLink is returned by parseLink() when no Link header is present. -var errNoLink = errors.New("no Link header in response") - -// parseLink returns the URL of the response's "Link" header, if present. -func parseLink(resp *http.Response) (string, error) { - link := resp.Header.Get("Link") - if link == "" { - return "", errNoLink - } - if link[0] != '<' { - return "", fmt.Errorf("invalid next link %q: missing '<'", link) - } - if i := strings.IndexByte(link, '>'); i == -1 { - return "", fmt.Errorf("invalid next link %q: missing '>'", link) - } else { - link = link[1:i] - } - - linkURL, err := resp.Request.URL.Parse(link) - if err != nil { - return "", err - } - return linkURL.String(), nil -} - -// limitReader returns a Reader that reads from r but stops with EOF after n -// bytes. If n is zero, defaultMaxMetadataBytes is used. -func limitReader(r io.Reader, n int64) io.Reader { - if n == 0 { - n = defaultMaxMetadataBytes - } - return io.LimitReader(r, n) -} - -// withScopeHint adds a hinted scope to the context. -func withScopeHint(ctx context.Context, ref registry.Reference, actions ...string) context.Context { - scope := auth.ScopeRepository(ref.Repository, actions...) - return auth.AppendScopes(ctx, scope) -} diff --git a/src/vendor/oras.land/oras-go/pkg/registry/repository.go b/src/vendor/oras.land/oras-go/pkg/registry/repository.go deleted file mode 100644 index bcffbe8d2..000000000 --- a/src/vendor/oras.land/oras-go/pkg/registry/repository.go +++ /dev/null @@ -1,57 +0,0 @@ -/* -Copyright The ORAS Authors. -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - -http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ -package registry - -import ( - "context" -) - -// Repository is an ORAS target and an union of the blob and the manifest CASs. -// As specified by https://docs.docker.com/registry/spec/api/, it is natural to -// assume that content.Resolver interface only works for manifests. Tagging a -// blob may be resulted in an `ErrUnsupported` error. However, this interface -// does not restrict tagging blobs. -// Since a repository is an union of the blob and the manifest CASs, all -// operations defined in the `BlobStore` are executed depending on the media -// type of the given descriptor accordingly. -// Furthurmore, this interface also provides the ability to enforce the -// separation of the blob and the manifests CASs. -type Repository interface { - - // Tags lists the tags available in the repository. - // Since the returned tag list may be paginated by the underlying - // implementation, a function should be passed in to process the paginated - // tag list. - // Note: When implemented by a remote registry, the tags API is called. - // However, not all registries supports pagination or conforms the - // specification. - // References: - // - https://github.com/opencontainers/distribution-spec/blob/main/spec.md#content-discovery - // - https://docs.docker.com/registry/spec/api/#tags - // See also `Tags()` in this package. - Tags(ctx context.Context, fn func(tags []string) error) error -} - -// Tags lists the tags available in the repository. -func Tags(ctx context.Context, repo Repository) ([]string, error) { - var res []string - if err := repo.Tags(ctx, func(tags []string) error { - res = append(res, tags...) - return nil - }); err != nil { - return nil, err - } - return res, nil -} diff --git a/src/vendor/oras.land/oras-go/pkg/target/target.go b/src/vendor/oras.land/oras-go/pkg/target/target.go deleted file mode 100644 index 4398f14eb..000000000 --- a/src/vendor/oras.land/oras-go/pkg/target/target.go +++ /dev/null @@ -1,26 +0,0 @@ -/* -Copyright The ORAS Authors. -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - -http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ -package target - -import ( - "github.com/containerd/containerd/remotes" -) - -// Target represents a place to which one can send/push or retrieve/pull artifacts. -// Anything that implements the Target interface can be used as a place to send or -// retrieve artifacts. -type Target interface { - remotes.Resolver -} diff --git a/src/vendor/sigs.k8s.io/kustomize/api/LICENSE b/src/vendor/sigs.k8s.io/kustomize/api/LICENSE deleted file mode 100644 index 8dada3eda..000000000 --- a/src/vendor/sigs.k8s.io/kustomize/api/LICENSE +++ /dev/null @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "{}" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright {yyyy} {name of copyright owner} - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/src/vendor/sigs.k8s.io/kustomize/api/filters/annotations/annotations.go b/src/vendor/sigs.k8s.io/kustomize/api/filters/annotations/annotations.go deleted file mode 100644 index 4998f5a3e..000000000 --- a/src/vendor/sigs.k8s.io/kustomize/api/filters/annotations/annotations.go +++ /dev/null @@ -1,52 +0,0 @@ -// Copyright 2020 The Kubernetes Authors. -// SPDX-License-Identifier: Apache-2.0 - -package annotations - -import ( - "sigs.k8s.io/kustomize/api/filters/filtersutil" - "sigs.k8s.io/kustomize/api/filters/fsslice" - "sigs.k8s.io/kustomize/api/types" - "sigs.k8s.io/kustomize/kyaml/kio" - "sigs.k8s.io/kustomize/kyaml/yaml" -) - -type annoMap map[string]string - -type Filter struct { - // Annotations is the set of annotations to apply to the inputs - Annotations annoMap `yaml:"annotations,omitempty"` - - // FsSlice contains the FieldSpecs to locate the namespace field - FsSlice types.FsSlice - - trackableSetter filtersutil.TrackableSetter -} - -var _ kio.Filter = Filter{} -var _ kio.TrackableFilter = &Filter{} - -// WithMutationTracker registers a callback which will be invoked each time a field is mutated -func (f *Filter) WithMutationTracker(callback func(key, value, tag string, node *yaml.RNode)) { - f.trackableSetter.WithMutationTracker(callback) -} - -func (f Filter) Filter(nodes []*yaml.RNode) ([]*yaml.RNode, error) { - keys := yaml.SortedMapKeys(f.Annotations) - _, err := kio.FilterAll(yaml.FilterFunc( - func(node *yaml.RNode) (*yaml.RNode, error) { - for _, k := range keys { - if err := node.PipeE(fsslice.Filter{ - FsSlice: f.FsSlice, - SetValue: f.trackableSetter.SetEntry( - k, f.Annotations[k], yaml.NodeTagString), - CreateKind: yaml.MappingNode, // Annotations are MappingNodes. - CreateTag: yaml.NodeTagMap, - }); err != nil { - return nil, err - } - } - return node, nil - })).Filter(nodes) - return nodes, err -} diff --git a/src/vendor/sigs.k8s.io/kustomize/api/filters/annotations/doc.go b/src/vendor/sigs.k8s.io/kustomize/api/filters/annotations/doc.go deleted file mode 100644 index b1f6a0b66..000000000 --- a/src/vendor/sigs.k8s.io/kustomize/api/filters/annotations/doc.go +++ /dev/null @@ -1,6 +0,0 @@ -// Copyright 2020 The Kubernetes Authors. -// SPDX-License-Identifier: Apache-2.0 - -// Package annotations contains a kio.Filter implementation of the kustomize -// annotations transformer. -package annotations diff --git a/src/vendor/sigs.k8s.io/kustomize/api/filters/fieldspec/doc.go b/src/vendor/sigs.k8s.io/kustomize/api/filters/fieldspec/doc.go deleted file mode 100644 index 6f643630a..000000000 --- a/src/vendor/sigs.k8s.io/kustomize/api/filters/fieldspec/doc.go +++ /dev/null @@ -1,6 +0,0 @@ -// Copyright 2020 The Kubernetes Authors. -// SPDX-License-Identifier: Apache-2.0 - -// Package fieldspec contains a yaml.Filter to modify a resource -// that matches the FieldSpec. -package fieldspec diff --git a/src/vendor/sigs.k8s.io/kustomize/api/filters/fieldspec/fieldspec.go b/src/vendor/sigs.k8s.io/kustomize/api/filters/fieldspec/fieldspec.go deleted file mode 100644 index 8e4e78ca6..000000000 --- a/src/vendor/sigs.k8s.io/kustomize/api/filters/fieldspec/fieldspec.go +++ /dev/null @@ -1,182 +0,0 @@ -// Copyright 2019 The Kubernetes Authors. -// SPDX-License-Identifier: Apache-2.0 - -package fieldspec - -import ( - "fmt" - "strings" - - "sigs.k8s.io/kustomize/api/filters/filtersutil" - "sigs.k8s.io/kustomize/api/types" - "sigs.k8s.io/kustomize/kyaml/errors" - "sigs.k8s.io/kustomize/kyaml/resid" - "sigs.k8s.io/kustomize/kyaml/utils" - "sigs.k8s.io/kustomize/kyaml/yaml" -) - -var _ yaml.Filter = Filter{} - -// Filter possibly mutates its object argument using a FieldSpec. -// If the object matches the FieldSpec, and the node found -// by following the fieldSpec's path is non-null, this filter calls -// the setValue function on the node at the end of the path. -// If any part of the path doesn't exist, the filter returns -// without doing anything and without error, unless it was set -// to create the path. If set to create, it creates a tree of maps -// along the path, and the leaf node gets the setValue called on it. -// Error on GVK mismatch, empty or poorly formed path. -// Filter expect kustomize style paths, not JSON paths. -// Filter stores internal state and should not be reused -type Filter struct { - // FieldSpec contains the path to the value to set. - FieldSpec types.FieldSpec `yaml:"fieldSpec"` - - // Set the field using this function - SetValue filtersutil.SetFn - - // CreateKind defines the type of node to create if the field is not found - CreateKind yaml.Kind - - CreateTag string - - // path keeps internal state about the current path - path []string -} - -func (fltr Filter) Filter(obj *yaml.RNode) (*yaml.RNode, error) { - // check if the FieldSpec applies to the object - if match := isMatchGVK(fltr.FieldSpec, obj); !match { - return obj, nil - } - fltr.path = utils.PathSplitter(fltr.FieldSpec.Path, "/") - if err := fltr.filter(obj); err != nil { - return nil, errors.WrapPrefixf(err, - "considering field '%s' of object %s", fltr.FieldSpec.Path, resid.FromRNode(obj)) - } - return obj, nil -} - -// Recursively called. -func (fltr Filter) filter(obj *yaml.RNode) error { - if len(fltr.path) == 0 { - // found the field -- set its value - return fltr.SetValue(obj) - } - if obj.IsTaggedNull() || obj.IsNil() { - return nil - } - switch obj.YNode().Kind { - case yaml.SequenceNode: - return fltr.handleSequence(obj) - case yaml.MappingNode: - return fltr.handleMap(obj) - case yaml.AliasNode: - return fltr.filter(yaml.NewRNode(obj.YNode().Alias)) - default: - return errors.Errorf("expected sequence or mapping node") - } -} - -// handleMap calls filter on the map field matching the next path element -func (fltr Filter) handleMap(obj *yaml.RNode) error { - fieldName, isSeq := isSequenceField(fltr.path[0]) - if fieldName == "" { - return fmt.Errorf("cannot set or create an empty field name") - } - // lookup the field matching the next path element - var operation yaml.Filter - var kind yaml.Kind - tag := yaml.NodeTagEmpty - switch { - case !fltr.FieldSpec.CreateIfNotPresent || fltr.CreateKind == 0 || isSeq: - // don't create the field if we don't find it - operation = yaml.Lookup(fieldName) - if isSeq { - // The query path thinks this field should be a sequence; - // accept this hint for use later if the tag is NodeTagNull. - kind = yaml.SequenceNode - } - case len(fltr.path) <= 1: - // create the field if it is missing: use the provided node kind - operation = yaml.LookupCreate(fltr.CreateKind, fieldName) - kind = fltr.CreateKind - tag = fltr.CreateTag - default: - // create the field if it is missing: must be a mapping node - operation = yaml.LookupCreate(yaml.MappingNode, fieldName) - kind = yaml.MappingNode - tag = yaml.NodeTagMap - } - - // locate (or maybe create) the field - field, err := obj.Pipe(operation) - if err != nil { - return errors.WrapPrefixf(err, "fieldName: %s", fieldName) - } - if field == nil { - // No error if field not found. - return nil - } - - // if the value exists, but is null and kind is set, - // then change it to the creation type - // TODO: update yaml.LookupCreate to support this - if field.YNode().Tag == yaml.NodeTagNull && yaml.IsCreate(kind) { - field.YNode().Kind = kind - field.YNode().Tag = tag - } - - // copy the current fltr and change the path on the copy - var next = fltr - // call filter for the next path element on the matching field - next.path = fltr.path[1:] - return next.filter(field) -} - -// seq calls filter on all sequence elements -func (fltr Filter) handleSequence(obj *yaml.RNode) error { - if err := obj.VisitElements(func(node *yaml.RNode) error { - // set an accurate FieldPath for nested elements - node.AppendToFieldPath(obj.FieldPath()...) - // recurse on each element -- re-allocating a Filter is - // not strictly required, but is more consistent with field - // and less likely to have side effects - // keep the entire path -- it does not contain parts for sequences - return fltr.filter(node) - }); err != nil { - return errors.WrapPrefixf(err, - "visit traversal on path: %v", fltr.path) - } - return nil -} - -// isSequenceField returns true if the path element is for a sequence field. -// isSequence also returns the path element with the '[]' suffix trimmed -func isSequenceField(name string) (string, bool) { - shorter := strings.TrimSuffix(name, "[]") - return shorter, shorter != name -} - -// isMatchGVK returns true if the fs.GVK matches the obj GVK. -func isMatchGVK(fs types.FieldSpec, obj *yaml.RNode) bool { - if kind := obj.GetKind(); fs.Kind != "" && fs.Kind != kind { - // kind doesn't match - return false - } - - // parse the group and version from the apiVersion field - group, version := resid.ParseGroupVersion(obj.GetApiVersion()) - - if fs.Group != "" && fs.Group != group { - // group doesn't match - return false - } - - if fs.Version != "" && fs.Version != version { - // version doesn't match - return false - } - - return true -} diff --git a/src/vendor/sigs.k8s.io/kustomize/api/filters/filtersutil/setters.go b/src/vendor/sigs.k8s.io/kustomize/api/filters/filtersutil/setters.go deleted file mode 100644 index f77666785..000000000 --- a/src/vendor/sigs.k8s.io/kustomize/api/filters/filtersutil/setters.go +++ /dev/null @@ -1,105 +0,0 @@ -// Copyright 2022 The Kubernetes Authors. -// SPDX-License-Identifier: Apache-2.0 - -package filtersutil - -import ( - "sigs.k8s.io/kustomize/kyaml/yaml" -) - -// SetFn is a function that accepts an RNode to possibly modify. -type SetFn func(*yaml.RNode) error - -// SetScalar returns a SetFn to set a scalar value -func SetScalar(value string) SetFn { - return SetEntry("", value, yaml.NodeTagEmpty) -} - -// SetEntry returns a SetFn to set a field or a map entry to a value. -// It can be used with an empty name to set both a value and a tag on a scalar node. -// When setting only a value on a scalar node, use SetScalar instead. -func SetEntry(name, value, tag string) SetFn { - n := &yaml.Node{ - Kind: yaml.ScalarNode, - Value: value, - Tag: tag, - } - return func(node *yaml.RNode) error { - return node.PipeE(yaml.FieldSetter{ - Name: name, - Value: yaml.NewRNode(n), - }) - } -} - -type TrackableSetter struct { - // SetValueCallback will be invoked each time a field is set - setValueCallback func(name, value, tag string, node *yaml.RNode) -} - -// WithMutationTracker registers a callback which will be invoked each time a field is mutated -func (s *TrackableSetter) WithMutationTracker(callback func(key, value, tag string, node *yaml.RNode)) *TrackableSetter { - s.setValueCallback = callback - return s -} - -// SetScalar returns a SetFn to set a scalar value. -// if a mutation tracker has been registered, the tracker will be invoked each -// time a scalar is set -func (s TrackableSetter) SetScalar(value string) SetFn { - return s.SetEntry("", value, yaml.NodeTagEmpty) -} - -// SetScalarIfEmpty returns a SetFn to set a scalar value only if it isn't already set. -// If a mutation tracker has been registered, the tracker will be invoked each -// time a scalar is actually set. -func (s TrackableSetter) SetScalarIfEmpty(value string) SetFn { - return s.SetEntryIfEmpty("", value, yaml.NodeTagEmpty) -} - -// SetEntry returns a SetFn to set a field or a map entry to a value. -// It can be used with an empty name to set both a value and a tag on a scalar node. -// When setting only a value on a scalar node, use SetScalar instead. -// If a mutation tracker has been registered, the tracker will be invoked each -// time an entry is set. -func (s TrackableSetter) SetEntry(name, value, tag string) SetFn { - origSetEntry := SetEntry(name, value, tag) - return func(node *yaml.RNode) error { - if s.setValueCallback != nil { - s.setValueCallback(name, value, tag, node) - } - return origSetEntry(node) - } -} - -// SetEntryIfEmpty returns a SetFn to set a field or a map entry to a value only if it isn't already set. -// It can be used with an empty name to set both a value and a tag on a scalar node. -// When setting only a value on a scalar node, use SetScalar instead. -// If a mutation tracker has been registered, the tracker will be invoked each -// time an entry is actually set. -func (s TrackableSetter) SetEntryIfEmpty(key, value, tag string) SetFn { - origSetEntry := SetEntry(key, value, tag) - return func(node *yaml.RNode) error { - if hasExistingValue(node, key) { - return nil - } - if s.setValueCallback != nil { - s.setValueCallback(key, value, tag, node) - } - return origSetEntry(node) - } -} - -func hasExistingValue(node *yaml.RNode, key string) bool { - if node.IsNilOrEmpty() { - return false - } - if err := yaml.ErrorIfInvalid(node, yaml.ScalarNode); err == nil { - return yaml.GetValue(node) != "" - } - entry := node.Field(key) - if entry.IsNilOrEmpty() { - return false - } - return yaml.GetValue(entry.Value) != "" -} diff --git a/src/vendor/sigs.k8s.io/kustomize/api/filters/fsslice/doc.go b/src/vendor/sigs.k8s.io/kustomize/api/filters/fsslice/doc.go deleted file mode 100644 index b0f197722..000000000 --- a/src/vendor/sigs.k8s.io/kustomize/api/filters/fsslice/doc.go +++ /dev/null @@ -1,6 +0,0 @@ -// Copyright 2020 The Kubernetes Authors. -// SPDX-License-Identifier: Apache-2.0 - -// Package fsslice contains a yaml.Filter to modify a resource if -// it matches one or more FieldSpec entries. -package fsslice diff --git a/src/vendor/sigs.k8s.io/kustomize/api/filters/fsslice/fsslice.go b/src/vendor/sigs.k8s.io/kustomize/api/filters/fsslice/fsslice.go deleted file mode 100644 index 9eb5c1313..000000000 --- a/src/vendor/sigs.k8s.io/kustomize/api/filters/fsslice/fsslice.go +++ /dev/null @@ -1,47 +0,0 @@ -// Copyright 2019 The Kubernetes Authors. -// SPDX-License-Identifier: Apache-2.0 - -package fsslice - -import ( - "sigs.k8s.io/kustomize/api/filters/fieldspec" - "sigs.k8s.io/kustomize/api/filters/filtersutil" - "sigs.k8s.io/kustomize/api/types" - "sigs.k8s.io/kustomize/kyaml/yaml" -) - -var _ yaml.Filter = Filter{} - -// Filter ranges over an FsSlice to modify fields on a single object. -// An FsSlice is a range of FieldSpecs. A FieldSpec is a GVK plus a path. -type Filter struct { - // FieldSpecList list of FieldSpecs to set - FsSlice types.FsSlice `yaml:"fsSlice"` - - // SetValue is called on each field that matches one of the FieldSpecs - SetValue filtersutil.SetFn - - // CreateKind is used to create fields that do not exist - CreateKind yaml.Kind - - // CreateTag is used to set the tag if encountering a null field - CreateTag string -} - -func (fltr Filter) Filter(obj *yaml.RNode) (*yaml.RNode, error) { - for i := range fltr.FsSlice { - // apply this FieldSpec - // create a new filter for each iteration because they - // store internal state about the field paths - _, err := (&fieldspec.Filter{ - FieldSpec: fltr.FsSlice[i], - SetValue: fltr.SetValue, - CreateKind: fltr.CreateKind, - CreateTag: fltr.CreateTag, - }).Filter(obj) - if err != nil { - return nil, err - } - } - return obj, nil -} diff --git a/src/vendor/sigs.k8s.io/kustomize/api/filters/iampolicygenerator/doc.go b/src/vendor/sigs.k8s.io/kustomize/api/filters/iampolicygenerator/doc.go deleted file mode 100644 index 3fe20a6d6..000000000 --- a/src/vendor/sigs.k8s.io/kustomize/api/filters/iampolicygenerator/doc.go +++ /dev/null @@ -1,6 +0,0 @@ -// Copyright 2022 The Kubernetes Authors. -// SPDX-License-Identifier: Apache-2.0 - -// Package gkesagenerator contains a kio.Filter that that generates a -// iampolicy-related resources for a given cloud provider -package iampolicygenerator diff --git a/src/vendor/sigs.k8s.io/kustomize/api/filters/iampolicygenerator/iampolicygenerator.go b/src/vendor/sigs.k8s.io/kustomize/api/filters/iampolicygenerator/iampolicygenerator.go deleted file mode 100644 index 97ea31693..000000000 --- a/src/vendor/sigs.k8s.io/kustomize/api/filters/iampolicygenerator/iampolicygenerator.go +++ /dev/null @@ -1,55 +0,0 @@ -// Copyright 2021 The Kubernetes Authors. -// SPDX-License-Identifier: Apache-2.0 - -package iampolicygenerator - -import ( - "fmt" - - "sigs.k8s.io/kustomize/api/types" - "sigs.k8s.io/kustomize/kyaml/yaml" -) - -type Filter struct { - IAMPolicyGenerator types.IAMPolicyGeneratorArgs `json:",inline,omitempty" yaml:",inline,omitempty"` -} - -// Filter adds a GKE service account object to nodes -func (f Filter) Filter(nodes []*yaml.RNode) ([]*yaml.RNode, error) { - switch f.IAMPolicyGenerator.Cloud { - case types.GKE: - IAMPolicyResources, err := f.generateGkeIAMPolicyResources() - if err != nil { - return nil, err - } - nodes = append(nodes, IAMPolicyResources...) - default: - return nil, fmt.Errorf("cloud provider %s not supported yet", f.IAMPolicyGenerator.Cloud) - } - return nodes, nil -} - -func (f Filter) generateGkeIAMPolicyResources() ([]*yaml.RNode, error) { - var result []*yaml.RNode - input := fmt.Sprintf(` -apiVersion: v1 -kind: ServiceAccount -metadata: - annotations: - iam.gke.io/gcp-service-account: %s@%s.iam.gserviceaccount.com - name: %s -`, f.IAMPolicyGenerator.ServiceAccount.Name, - f.IAMPolicyGenerator.ProjectId, - f.IAMPolicyGenerator.KubernetesService.Name) - - if f.IAMPolicyGenerator.Namespace != "" { - input += fmt.Sprintf("\n namespace: %s", f.IAMPolicyGenerator.Namespace) - } - - sa, err := yaml.Parse(input) - if err != nil { - return nil, err - } - - return append(result, sa), nil -} diff --git a/src/vendor/sigs.k8s.io/kustomize/api/filters/imagetag/doc.go b/src/vendor/sigs.k8s.io/kustomize/api/filters/imagetag/doc.go deleted file mode 100644 index d919491dd..000000000 --- a/src/vendor/sigs.k8s.io/kustomize/api/filters/imagetag/doc.go +++ /dev/null @@ -1,12 +0,0 @@ -// Copyright 2020 The Kubernetes Authors. -// SPDX-License-Identifier: Apache-2.0 - -// Package imagetag contains two kio.Filter implementations to cover the -// functionality of the kustomize imagetag transformer. -// -// Filter updates fields based on a FieldSpec and an ImageTag. -// -// LegacyFilter doesn't use a FieldSpec, and instead only updates image -// references if the field is name image and it is underneath a field called -// either containers or initContainers. -package imagetag diff --git a/src/vendor/sigs.k8s.io/kustomize/api/filters/imagetag/imagetag.go b/src/vendor/sigs.k8s.io/kustomize/api/filters/imagetag/imagetag.go deleted file mode 100644 index 24ab99f74..000000000 --- a/src/vendor/sigs.k8s.io/kustomize/api/filters/imagetag/imagetag.go +++ /dev/null @@ -1,72 +0,0 @@ -// Copyright 2020 The Kubernetes Authors. -// SPDX-License-Identifier: Apache-2.0 - -package imagetag - -import ( - "sigs.k8s.io/kustomize/api/filters/filtersutil" - "sigs.k8s.io/kustomize/api/filters/fsslice" - "sigs.k8s.io/kustomize/api/types" - "sigs.k8s.io/kustomize/kyaml/kio" - "sigs.k8s.io/kustomize/kyaml/yaml" -) - -// Filter modifies an "image tag", the value used to specify the -// name, tag, version digest etc. of (docker) container images -// used by a pod template. -type Filter struct { - // imageTag is the tag we want to apply to the inputs - // The name of the image is used as a key, and other fields - // can specify a new name, tag, etc. - ImageTag types.Image `json:"imageTag,omitempty" yaml:"imageTag,omitempty"` - - // FsSlice contains the FieldSpecs to locate an image field, - // e.g. Path: "spec/myContainers[]/image" - FsSlice types.FsSlice `json:"fieldSpecs,omitempty" yaml:"fieldSpecs,omitempty"` - - trackableSetter filtersutil.TrackableSetter -} - -var _ kio.Filter = Filter{} -var _ kio.TrackableFilter = &Filter{} - -// WithMutationTracker registers a callback which will be invoked each time a field is mutated -func (f *Filter) WithMutationTracker(callback func(key, value, tag string, node *yaml.RNode)) { - f.trackableSetter.WithMutationTracker(callback) -} - -func (f Filter) Filter(nodes []*yaml.RNode) ([]*yaml.RNode, error) { - _, err := kio.FilterAll(yaml.FilterFunc(f.filter)).Filter(nodes) - return nodes, err -} - -func (f Filter) filter(node *yaml.RNode) (*yaml.RNode, error) { - // FsSlice is an allowlist, not a denyList, so to deny - // something via configuration a new config mechanism is - // needed. Until then, hardcode it. - if f.isOnDenyList(node) { - return node, nil - } - if err := node.PipeE(fsslice.Filter{ - FsSlice: f.FsSlice, - SetValue: imageTagUpdater{ - ImageTag: f.ImageTag, - trackableSetter: f.trackableSetter, - }.SetImageValue, - }); err != nil { - return nil, err - } - return node, nil -} - -func (f Filter) isOnDenyList(node *yaml.RNode) bool { - meta, err := node.GetMeta() - if err != nil { - // A missing 'meta' field will cause problems elsewhere; - // ignore it here to keep the signature simple. - return false - } - // Ignore CRDs - // https://github.com/kubernetes-sigs/kustomize/issues/890 - return meta.Kind == `CustomResourceDefinition` -} diff --git a/src/vendor/sigs.k8s.io/kustomize/api/filters/imagetag/legacy.go b/src/vendor/sigs.k8s.io/kustomize/api/filters/imagetag/legacy.go deleted file mode 100644 index d6f5b33f2..000000000 --- a/src/vendor/sigs.k8s.io/kustomize/api/filters/imagetag/legacy.go +++ /dev/null @@ -1,104 +0,0 @@ -// Copyright 2020 The Kubernetes Authors. -// SPDX-License-Identifier: Apache-2.0 - -package imagetag - -import ( - "sigs.k8s.io/kustomize/api/internal/utils" - "sigs.k8s.io/kustomize/api/types" - "sigs.k8s.io/kustomize/kyaml/errors" - "sigs.k8s.io/kustomize/kyaml/kio" - "sigs.k8s.io/kustomize/kyaml/yaml" -) - -// LegacyFilter is an implementation of the kio.Filter interface -// that scans through the provided kyaml data structure and updates -// any values of any image fields that is inside a sequence under -// a field called either containers or initContainers. The field is only -// update if it has a value that matches and image reference and the name -// of the image is a match with the provided ImageTag. -type LegacyFilter struct { - ImageTag types.Image `json:"imageTag,omitempty" yaml:"imageTag,omitempty"` -} - -var _ kio.Filter = LegacyFilter{} - -func (lf LegacyFilter) Filter(nodes []*yaml.RNode) ([]*yaml.RNode, error) { - return kio.FilterAll(yaml.FilterFunc(lf.filter)).Filter(nodes) -} - -func (lf LegacyFilter) filter(node *yaml.RNode) (*yaml.RNode, error) { - meta, err := node.GetMeta() - if err != nil { - return nil, err - } - - // We do not make any changes if the type of the resource - // is CustomResourceDefinition. - if meta.Kind == `CustomResourceDefinition` { - return node, nil - } - - fff := findFieldsFilter{ - fields: []string{"containers", "initContainers"}, - fieldCallback: checkImageTagsFn(lf.ImageTag), - } - if err := node.PipeE(fff); err != nil { - return nil, err - } - return node, nil -} - -type fieldCallback func(node *yaml.RNode) error - -// findFieldsFilter is an implementation of the kio.Filter -// interface. It will walk the data structure and look for fields -// that matches the provided list of field names. For each match, -// the value of the field will be passed in as a parameter to the -// provided fieldCallback. -// TODO: move this to kyaml/filterutils -type findFieldsFilter struct { - fields []string - - fieldCallback fieldCallback -} - -func (f findFieldsFilter) Filter(obj *yaml.RNode) (*yaml.RNode, error) { - return obj, f.walk(obj) -} - -func (f findFieldsFilter) walk(node *yaml.RNode) error { - switch node.YNode().Kind { - case yaml.MappingNode: - return node.VisitFields(func(n *yaml.MapNode) error { - err := f.walk(n.Value) - if err != nil { - return err - } - key := n.Key.YNode().Value - if utils.StringSliceContains(f.fields, key) { - return f.fieldCallback(n.Value) - } - return nil - }) - case yaml.SequenceNode: - return errors.Wrap(node.VisitElements(f.walk)) - } - return nil -} - -func checkImageTagsFn(imageTag types.Image) fieldCallback { - return func(node *yaml.RNode) error { - if node.YNode().Kind != yaml.SequenceNode { - return nil - } - - return node.VisitElements(func(n *yaml.RNode) error { - // Look up any fields on the provided node that is named - // image. - return n.PipeE(yaml.Get("image"), imageTagUpdater{ - ImageTag: imageTag, - }) - }) - } -} diff --git a/src/vendor/sigs.k8s.io/kustomize/api/filters/imagetag/updater.go b/src/vendor/sigs.k8s.io/kustomize/api/filters/imagetag/updater.go deleted file mode 100644 index d2a728498..000000000 --- a/src/vendor/sigs.k8s.io/kustomize/api/filters/imagetag/updater.go +++ /dev/null @@ -1,71 +0,0 @@ -// Copyright 2020 The Kubernetes Authors. -// SPDX-License-Identifier: Apache-2.0 - -package imagetag - -import ( - "sigs.k8s.io/kustomize/api/filters/filtersutil" - - "sigs.k8s.io/kustomize/api/image" - "sigs.k8s.io/kustomize/api/types" - "sigs.k8s.io/kustomize/kyaml/yaml" -) - -// imageTagUpdater is an implementation of the kio.Filter interface -// that will update the value of the yaml node based on the provided -// ImageTag if the current value matches the format of an image reference. -type imageTagUpdater struct { - Kind string `yaml:"kind,omitempty"` - ImageTag types.Image `yaml:"imageTag,omitempty"` - trackableSetter filtersutil.TrackableSetter -} - -func (u imageTagUpdater) SetImageValue(rn *yaml.RNode) error { - if err := yaml.ErrorIfInvalid(rn, yaml.ScalarNode); err != nil { - return err - } - - value := rn.YNode().Value - - if !image.IsImageMatched(value, u.ImageTag.Name) { - return nil - } - - name, tag, digest := image.Split(value) - if u.ImageTag.NewName != "" { - name = u.ImageTag.NewName - } - - // overriding tag or digest will replace both original tag and digest values - switch { - case u.ImageTag.NewTag != "" && u.ImageTag.Digest != "": - tag = u.ImageTag.NewTag - digest = u.ImageTag.Digest - case u.ImageTag.NewTag != "": - tag = u.ImageTag.NewTag - digest = "" - case u.ImageTag.Digest != "": - tag = "" - digest = u.ImageTag.Digest - case u.ImageTag.TagSuffix != "": - tag += u.ImageTag.TagSuffix - digest = "" - } - - // build final image name - if tag != "" { - name += ":" + tag - } - if digest != "" { - name += "@" + digest - } - - return u.trackableSetter.SetScalar(name)(rn) -} - -func (u imageTagUpdater) Filter(rn *yaml.RNode) (*yaml.RNode, error) { - if err := u.SetImageValue(rn); err != nil { - return nil, err - } - return rn, nil -} diff --git a/src/vendor/sigs.k8s.io/kustomize/api/filters/labels/doc.go b/src/vendor/sigs.k8s.io/kustomize/api/filters/labels/doc.go deleted file mode 100644 index 978033c7e..000000000 --- a/src/vendor/sigs.k8s.io/kustomize/api/filters/labels/doc.go +++ /dev/null @@ -1,6 +0,0 @@ -// Copyright 2020 The Kubernetes Authors. -// SPDX-License-Identifier: Apache-2.0 - -// Package labels contains a kio.Filter implementation of the kustomize -// labels transformer. -package labels diff --git a/src/vendor/sigs.k8s.io/kustomize/api/filters/labels/labels.go b/src/vendor/sigs.k8s.io/kustomize/api/filters/labels/labels.go deleted file mode 100644 index b67d4d4b1..000000000 --- a/src/vendor/sigs.k8s.io/kustomize/api/filters/labels/labels.go +++ /dev/null @@ -1,53 +0,0 @@ -// Copyright 2020 The Kubernetes Authors. -// SPDX-License-Identifier: Apache-2.0 - -package labels - -import ( - "sigs.k8s.io/kustomize/api/filters/filtersutil" - "sigs.k8s.io/kustomize/api/filters/fsslice" - "sigs.k8s.io/kustomize/api/types" - "sigs.k8s.io/kustomize/kyaml/kio" - "sigs.k8s.io/kustomize/kyaml/yaml" -) - -type labelMap map[string]string - -// Filter sets labels. -type Filter struct { - // Labels is the set of labels to apply to the inputs - Labels labelMap `yaml:"labels,omitempty"` - - // FsSlice identifies the label fields. - FsSlice types.FsSlice - - trackableSetter filtersutil.TrackableSetter -} - -var _ kio.Filter = Filter{} -var _ kio.TrackableFilter = &Filter{} - -// WithMutationTracker registers a callback which will be invoked each time a field is mutated -func (f *Filter) WithMutationTracker(callback func(key, value, tag string, node *yaml.RNode)) { - f.trackableSetter.WithMutationTracker(callback) -} - -func (f Filter) Filter(nodes []*yaml.RNode) ([]*yaml.RNode, error) { - keys := yaml.SortedMapKeys(f.Labels) - _, err := kio.FilterAll(yaml.FilterFunc( - func(node *yaml.RNode) (*yaml.RNode, error) { - for _, k := range keys { - if err := node.PipeE(fsslice.Filter{ - FsSlice: f.FsSlice, - SetValue: f.trackableSetter.SetEntry( - k, f.Labels[k], yaml.NodeTagString), - CreateKind: yaml.MappingNode, // Labels are MappingNodes. - CreateTag: yaml.NodeTagMap, - }); err != nil { - return nil, err - } - } - return node, nil - })).Filter(nodes) - return nodes, err -} diff --git a/src/vendor/sigs.k8s.io/kustomize/api/filters/nameref/doc.go b/src/vendor/sigs.k8s.io/kustomize/api/filters/nameref/doc.go deleted file mode 100644 index 78f938933..000000000 --- a/src/vendor/sigs.k8s.io/kustomize/api/filters/nameref/doc.go +++ /dev/null @@ -1,6 +0,0 @@ -// Copyright 2022 The Kubernetes Authors. -// SPDX-License-Identifier: Apache-2.0 - -// Package nameref contains a kio.Filter implementation of the kustomize -// name reference transformer. -package nameref diff --git a/src/vendor/sigs.k8s.io/kustomize/api/filters/nameref/nameref.go b/src/vendor/sigs.k8s.io/kustomize/api/filters/nameref/nameref.go deleted file mode 100644 index 7549ab700..000000000 --- a/src/vendor/sigs.k8s.io/kustomize/api/filters/nameref/nameref.go +++ /dev/null @@ -1,413 +0,0 @@ -// Copyright 2022 The Kubernetes Authors. -// SPDX-License-Identifier: Apache-2.0 - -package nameref - -import ( - "fmt" - "strings" - - "github.com/pkg/errors" - "sigs.k8s.io/kustomize/api/filters/fieldspec" - "sigs.k8s.io/kustomize/api/resmap" - "sigs.k8s.io/kustomize/api/resource" - "sigs.k8s.io/kustomize/api/types" - "sigs.k8s.io/kustomize/kyaml/kio" - "sigs.k8s.io/kustomize/kyaml/resid" - "sigs.k8s.io/kustomize/kyaml/yaml" -) - -// Filter updates a name references. -type Filter struct { - // Referrer refers to another resource X by X's name. - // E.g. A Deployment can refer to a ConfigMap. - // The Deployment is the Referrer, - // the ConfigMap is the ReferralTarget. - // This filter seeks to repair the reference in Deployment, given - // that the ConfigMap's name may have changed. - Referrer *resource.Resource - - // NameFieldToUpdate is the field in the Referrer - // that holds the name requiring an update. - // This is the field to write. - NameFieldToUpdate types.FieldSpec - - // ReferralTarget is the source of the new value for - // the name, always in the 'metadata/name' field. - // This is the field to read. - ReferralTarget resid.Gvk - - // Set of resources to scan to find the ReferralTarget. - ReferralCandidates resmap.ResMap -} - -// At time of writing, in practice this is called with a slice with only -// one entry, the node also referred to be the resource in the Referrer field. -func (f Filter) Filter(nodes []*yaml.RNode) ([]*yaml.RNode, error) { - return kio.FilterAll(yaml.FilterFunc(f.run)).Filter(nodes) -} - -// The node passed in here is the same node as held in Referrer; -// that's how the referrer's name field is updated. -// Currently, however, this filter still needs the extra methods on Referrer -// to consult things like the resource Id, its namespace, etc. -// TODO(3455): No filter should use the Resource api; all information -// about names should come from annotations, with helper methods -// on the RNode object. Resource should get stupider, RNode smarter. -func (f Filter) run(node *yaml.RNode) (*yaml.RNode, error) { - if err := f.confirmNodeMatchesReferrer(node); err != nil { - // sanity check. - return nil, err - } - f.NameFieldToUpdate.Gvk = f.Referrer.GetGvk() - if err := node.PipeE(fieldspec.Filter{ - FieldSpec: f.NameFieldToUpdate, - SetValue: f.set, - }); err != nil { - return nil, errors.Wrapf( - err, "updating name reference in '%s' field of '%s'", - f.NameFieldToUpdate.Path, f.Referrer.CurId().String()) - } - return node, nil -} - -// This function is called on the node found at FieldSpec.Path. -// It's some node in the Referrer. -func (f Filter) set(node *yaml.RNode) error { - if yaml.IsMissingOrNull(node) { - return nil - } - switch node.YNode().Kind { - case yaml.ScalarNode: - return f.setScalar(node) - case yaml.MappingNode: - return f.setMapping(node) - case yaml.SequenceNode: - return applyFilterToSeq(seqFilter{ - setScalarFn: f.setScalar, - setMappingFn: f.setMapping, - }, node) - default: - return fmt.Errorf("node must be a scalar, sequence or map") - } -} - -// This method used when NameFieldToUpdate doesn't lead to -// one scalar field (typically called 'name'), but rather -// leads to a map field (called anything). In this case we -// must complete the field path, looking for both a 'name' -// and a 'namespace' field to help select the proper -// ReferralTarget to read the name and namespace from. -func (f Filter) setMapping(node *yaml.RNode) error { - if node.YNode().Kind != yaml.MappingNode { - return fmt.Errorf("expect a mapping node") - } - nameNode, err := node.Pipe(yaml.FieldMatcher{Name: "name"}) - if err != nil { - return errors.Wrap(err, "trying to match 'name' field") - } - if nameNode == nil { - // This is a _configuration_ error; the field path - // specified in NameFieldToUpdate.Path doesn't resolve - // to a map with a 'name' field, so we have no idea what - // field to update with a new name. - return fmt.Errorf("path config error; no 'name' field in node") - } - candidates, err := f.filterMapCandidatesByNamespace(node) - if err != nil { - return err - } - oldName := nameNode.YNode().Value - // use allNamesAndNamespacesAreTheSame to compare referral candidates for functional identity, - // because we source both name and namespace values from the referral in this case. - referral, err := f.selectReferral(oldName, candidates, allNamesAndNamespacesAreTheSame) - if err != nil || referral == nil { - // Nil referral means nothing to do. - return err - } - f.recordTheReferral(referral) - if referral.GetName() == oldName && referral.GetNamespace() == "" { - // The name has not changed, nothing to do. - return nil - } - if err = node.PipeE(yaml.FieldSetter{ - Name: "name", - StringValue: referral.GetName(), - }); err != nil { - return err - } - if referral.GetNamespace() == "" { - // Don't write an empty string into the namespace field, as - // it should not replace the value "default". The empty - // string is handled as a wild card here, not as an implicit - // specification of the "default" k8s namespace. - return nil - } - return node.PipeE(yaml.FieldSetter{ - Name: "namespace", - StringValue: referral.GetNamespace(), - }) -} - -func (f Filter) filterMapCandidatesByNamespace( - node *yaml.RNode) ([]*resource.Resource, error) { - namespaceNode, err := node.Pipe(yaml.FieldMatcher{Name: "namespace"}) - if err != nil { - return nil, errors.Wrap(err, "trying to match 'namespace' field") - } - if namespaceNode == nil { - return f.ReferralCandidates.Resources(), nil - } - namespace := namespaceNode.YNode().Value - nsMap := f.ReferralCandidates.GroupedByOriginalNamespace() - if candidates, ok := nsMap[namespace]; ok { - return candidates, nil - } - nsMap = f.ReferralCandidates.GroupedByCurrentNamespace() - // This could be nil, or an empty list. - return nsMap[namespace], nil -} - -func (f Filter) setScalar(node *yaml.RNode) error { - // use allNamesAreTheSame to compare referral candidates for functional identity, - // because we only source the name from the referral in this case. - referral, err := f.selectReferral( - node.YNode().Value, f.ReferralCandidates.Resources(), allNamesAreTheSame) - if err != nil || referral == nil { - // Nil referral means nothing to do. - return err - } - f.recordTheReferral(referral) - if referral.GetName() == node.YNode().Value { - // The name has not changed, nothing to do. - return nil - } - return node.PipeE(yaml.FieldSetter{StringValue: referral.GetName()}) -} - -// In the resource, make a note that it is referred to by the Referrer. -func (f Filter) recordTheReferral(referral *resource.Resource) { - referral.AppendRefBy(f.Referrer.CurId()) -} - -// getRoleRefGvk returns a Gvk in the roleRef field. Return error -// if the roleRef, roleRef/apiGroup or roleRef/kind is missing. -func getRoleRefGvk(n *resource.Resource) (*resid.Gvk, error) { - roleRef, err := n.Pipe(yaml.Lookup("roleRef")) - if err != nil { - return nil, err - } - if roleRef.IsNil() { - return nil, fmt.Errorf("roleRef cannot be found in %s", n.MustString()) - } - apiGroup, err := roleRef.Pipe(yaml.Lookup("apiGroup")) - if err != nil { - return nil, err - } - if apiGroup.IsNil() { - return nil, fmt.Errorf( - "apiGroup cannot be found in roleRef %s", roleRef.MustString()) - } - kind, err := roleRef.Pipe(yaml.Lookup("kind")) - if err != nil { - return nil, err - } - if kind.IsNil() { - return nil, fmt.Errorf( - "kind cannot be found in roleRef %s", roleRef.MustString()) - } - return &resid.Gvk{ - Group: apiGroup.YNode().Value, - Kind: kind.YNode().Value, - }, nil -} - -// sieveFunc returns true if the resource argument satisfies some criteria. -type sieveFunc func(*resource.Resource) bool - -// doSieve uses a function to accept or ignore resources from a list. -// If list is nil, returns immediately. -// It's a filter obviously, but that term is overloaded here. -func doSieve(list []*resource.Resource, fn sieveFunc) (s []*resource.Resource) { - for _, r := range list { - if fn(r) { - s = append(s, r) - } - } - return -} - -func acceptAll(r *resource.Resource) bool { - return true -} - -func previousNameMatches(name string) sieveFunc { - return func(r *resource.Resource) bool { - for _, id := range r.PrevIds() { - if id.Name == name { - return true - } - } - return false - } -} - -func previousIdSelectedByGvk(gvk *resid.Gvk) sieveFunc { - return func(r *resource.Resource) bool { - for _, id := range r.PrevIds() { - if id.IsSelected(gvk) { - return true - } - } - return false - } -} - -// If the we are updating a 'roleRef/name' field, the 'apiGroup' and 'kind' -// fields in the same 'roleRef' map must be considered. -// If either object is cluster-scoped, there can be a referral. -// E.g. a RoleBinding (which exists in a namespace) can refer -// to a ClusterRole (cluster-scoped) object. -// https://kubernetes.io/docs/reference/access-authn-authz/rbac/#role-and-clusterrole -// Likewise, a ClusterRole can refer to a Secret (in a namespace). -// Objects in different namespaces generally cannot refer to other -// with some exceptions (e.g. RoleBinding and ServiceAccount are both -// namespaceable, but the former can refer to accounts in other namespaces). -func (f Filter) roleRefFilter() sieveFunc { - if !strings.HasSuffix(f.NameFieldToUpdate.Path, "roleRef/name") { - return acceptAll - } - roleRefGvk, err := getRoleRefGvk(f.Referrer) - if err != nil { - return acceptAll - } - return previousIdSelectedByGvk(roleRefGvk) -} - -func prefixSuffixEquals(other resource.ResCtx) sieveFunc { - return func(r *resource.Resource) bool { - return r.PrefixesSuffixesEquals(other) - } -} - -func (f Filter) sameCurrentNamespaceAsReferrer() sieveFunc { - referrerCurId := f.Referrer.CurId() - if referrerCurId.IsClusterScoped() { - // If the referrer is cluster-scoped, let anything through. - return acceptAll - } - return func(r *resource.Resource) bool { - if r.CurId().IsClusterScoped() { - // Allow cluster-scoped through. - return true - } - if r.GetKind() == "ServiceAccount" { - // Allow service accounts through, even though they - // are in a namespace. A RoleBinding in another namespace - // can reference them. - return true - } - return referrerCurId.IsNsEquals(r.CurId()) - } -} - -// selectReferral picks the best referral from a list of candidates. -func (f Filter) selectReferral( - // The name referral that may need to be updated. - oldName string, - candidates []*resource.Resource, - // function that returns whether two referrals are identical for the purposes of the transformation - candidatesIdentical func(resources []*resource.Resource) bool) (*resource.Resource, error) { - candidates = doSieve(candidates, previousNameMatches(oldName)) - candidates = doSieve(candidates, previousIdSelectedByGvk(&f.ReferralTarget)) - candidates = doSieve(candidates, f.roleRefFilter()) - candidates = doSieve(candidates, f.sameCurrentNamespaceAsReferrer()) - if len(candidates) == 1 { - return candidates[0], nil - } - candidates = doSieve(candidates, prefixSuffixEquals(f.Referrer)) - if len(candidates) == 1 { - return candidates[0], nil - } - if len(candidates) == 0 { - return nil, nil - } - if candidatesIdentical(candidates) { - // Just take the first one. - return candidates[0], nil - } - ids := getIds(candidates) - return nil, fmt.Errorf("found multiple possible referrals: %s\n%s", ids, f.failureDetails(candidates)) -} - -func (f Filter) failureDetails(resources []*resource.Resource) string { - msg := strings.Builder{} - msg.WriteString(fmt.Sprintf("\n**** Too many possible referral targets to referrer:\n%s\n", f.Referrer.MustYaml())) - for i, r := range resources { - msg.WriteString(fmt.Sprintf("--- possible referral %d:\n%s\n", i, r.MustYaml())) - } - return msg.String() -} - -func allNamesAreTheSame(resources []*resource.Resource) bool { - name := resources[0].GetName() - for i := 1; i < len(resources); i++ { - if name != resources[i].GetName() { - return false - } - } - return true -} - -func allNamesAndNamespacesAreTheSame(resources []*resource.Resource) bool { - name := resources[0].GetName() - namespace := resources[0].GetNamespace() - for i := 1; i < len(resources); i++ { - if name != resources[i].GetName() || namespace != resources[i].GetNamespace() { - return false - } - } - return true -} - -func getIds(rs []*resource.Resource) string { - var result []string - for _, r := range rs { - result = append(result, r.CurId().String()) - } - return strings.Join(result, ", ") -} - -func checkEqual(k, a, b string) error { - if a != b { - return fmt.Errorf( - "node-referrerOriginal '%s' mismatch '%s' != '%s'", - k, a, b) - } - return nil -} - -func (f Filter) confirmNodeMatchesReferrer(node *yaml.RNode) error { - meta, err := node.GetMeta() - if err != nil { - return err - } - gvk := f.Referrer.GetGvk() - if err = checkEqual( - "APIVersion", meta.APIVersion, gvk.ApiVersion()); err != nil { - return err - } - if err = checkEqual( - "Kind", meta.Kind, gvk.Kind); err != nil { - return err - } - if err = checkEqual( - "Name", meta.Name, f.Referrer.GetName()); err != nil { - return err - } - if err = checkEqual( - "Namespace", meta.Namespace, f.Referrer.GetNamespace()); err != nil { - return err - } - return nil -} diff --git a/src/vendor/sigs.k8s.io/kustomize/api/filters/nameref/seqfilter.go b/src/vendor/sigs.k8s.io/kustomize/api/filters/nameref/seqfilter.go deleted file mode 100644 index 0caab4c9b..000000000 --- a/src/vendor/sigs.k8s.io/kustomize/api/filters/nameref/seqfilter.go +++ /dev/null @@ -1,60 +0,0 @@ -// Copyright 2022 The Kubernetes Authors. -// SPDX-License-Identifier: Apache-2.0 - -package nameref - -import ( - "fmt" - - "sigs.k8s.io/kustomize/kyaml/yaml" -) - -type setFn func(*yaml.RNode) error - -type seqFilter struct { - setScalarFn setFn - setMappingFn setFn -} - -func (sf seqFilter) Filter(node *yaml.RNode) (*yaml.RNode, error) { - if yaml.IsMissingOrNull(node) { - return node, nil - } - switch node.YNode().Kind { - case yaml.ScalarNode: - // Kind: Role/ClusterRole - // FieldSpec is rules.resourceNames - err := sf.setScalarFn(node) - return node, err - case yaml.MappingNode: - // Kind: RoleBinding/ClusterRoleBinding - // FieldSpec is subjects - // Note: The corresponding fieldSpec had been changed from - // from path: subjects/name to just path: subjects. This is - // what get mutatefield to request the mapping of the whole - // map containing namespace and name instead of just a simple - // string field containing the name - err := sf.setMappingFn(node) - return node, err - default: - return node, fmt.Errorf( - "%#v is expected to be either a string or a map of string", node) - } -} - -// applyFilterToSeq will apply the filter to each element in the sequence node -func applyFilterToSeq(filter yaml.Filter, node *yaml.RNode) error { - if node.YNode().Kind != yaml.SequenceNode { - return fmt.Errorf("expect a sequence node but got %v", node.YNode().Kind) - } - - for _, elem := range node.Content() { - rnode := yaml.NewRNode(elem) - err := rnode.PipeE(filter) - if err != nil { - return err - } - } - - return nil -} diff --git a/src/vendor/sigs.k8s.io/kustomize/api/filters/namespace/doc.go b/src/vendor/sigs.k8s.io/kustomize/api/filters/namespace/doc.go deleted file mode 100644 index 539758b28..000000000 --- a/src/vendor/sigs.k8s.io/kustomize/api/filters/namespace/doc.go +++ /dev/null @@ -1,9 +0,0 @@ -// Copyright 2020 The Kubernetes Authors. -// SPDX-License-Identifier: Apache-2.0 - -// Package namespace contains a kio.Filter implementation of the kustomize -// namespace transformer. -// -// Special cases for known Kubernetes resources have been hardcoded in addition -// to those defined by the FsSlice. -package namespace diff --git a/src/vendor/sigs.k8s.io/kustomize/api/filters/namespace/namespace.go b/src/vendor/sigs.k8s.io/kustomize/api/filters/namespace/namespace.go deleted file mode 100644 index 5173a9554..000000000 --- a/src/vendor/sigs.k8s.io/kustomize/api/filters/namespace/namespace.go +++ /dev/null @@ -1,209 +0,0 @@ -// Copyright 2019 The Kubernetes Authors. -// SPDX-License-Identifier: Apache-2.0 - -package namespace - -import ( - "sigs.k8s.io/kustomize/api/filters/filtersutil" - "sigs.k8s.io/kustomize/api/filters/fsslice" - "sigs.k8s.io/kustomize/api/types" - "sigs.k8s.io/kustomize/kyaml/errors" - "sigs.k8s.io/kustomize/kyaml/kio" - "sigs.k8s.io/kustomize/kyaml/resid" - "sigs.k8s.io/kustomize/kyaml/yaml" -) - -type Filter struct { - // Namespace is the namespace to apply to the inputs - Namespace string `yaml:"namespace,omitempty"` - - // FsSlice contains the FieldSpecs to locate the namespace field - FsSlice types.FsSlice `json:"fieldSpecs,omitempty" yaml:"fieldSpecs,omitempty"` - - // UnsetOnly means only blank namespace fields will be set - UnsetOnly bool `json:"unsetOnly" yaml:"unsetOnly"` - - // SetRoleBindingSubjects determines which subject fields in RoleBinding and ClusterRoleBinding - // objects will have their namespace fields set. Overrides field specs provided for these types, if any. - // - defaultOnly (default): namespace will be set only on subjects named "default". - // - allServiceAccounts: namespace will be set on all subjects with "kind: ServiceAccount" - // - none: all subjects will be skipped. - SetRoleBindingSubjects RoleBindingSubjectMode `json:"setRoleBindingSubjects" yaml:"setRoleBindingSubjects"` - - trackableSetter filtersutil.TrackableSetter -} - -type RoleBindingSubjectMode string - -const ( - DefaultSubjectsOnly RoleBindingSubjectMode = "defaultOnly" - SubjectModeUnspecified RoleBindingSubjectMode = "" - AllServiceAccountSubjects RoleBindingSubjectMode = "allServiceAccounts" - NoSubjects RoleBindingSubjectMode = "none" -) - -var _ kio.Filter = Filter{} -var _ kio.TrackableFilter = &Filter{} - -// WithMutationTracker registers a callback which will be invoked each time a field is mutated -func (ns *Filter) WithMutationTracker(callback func(key, value, tag string, node *yaml.RNode)) { - ns.trackableSetter.WithMutationTracker(callback) -} - -func (ns Filter) Filter(nodes []*yaml.RNode) ([]*yaml.RNode, error) { - return kio.FilterAll(yaml.FilterFunc(ns.run)).Filter(nodes) -} - -// Run runs the filter on a single node rather than a slice -func (ns Filter) run(node *yaml.RNode) (*yaml.RNode, error) { - // Special handling for metadata.namespace -- :( - // never let SetEntry handle metadata.namespace--it will incorrectly include cluster-scoped resources - ns.FsSlice = ns.removeMetaNamespaceFieldSpecs(ns.FsSlice) - gvk := resid.GvkFromNode(node) - if err := ns.metaNamespaceHack(node, gvk); err != nil { - return nil, err - } - - // Special handling for (cluster) role binding subjects -- :( - if isRoleBinding(gvk.Kind) { - ns.FsSlice = ns.removeRoleBindingSubjectFieldSpecs(ns.FsSlice) - if err := ns.roleBindingHack(node); err != nil { - return nil, err - } - } - - // transformations based on data -- :) - err := node.PipeE(fsslice.Filter{ - FsSlice: ns.FsSlice, - SetValue: ns.fieldSetter(), - CreateKind: yaml.ScalarNode, // Namespace is a ScalarNode - CreateTag: yaml.NodeTagString, - }) - return node, err -} - -// metaNamespaceHack is a hack for implementing the namespace transform -// for the metadata.namespace field on namespace scoped resources. -func (ns Filter) metaNamespaceHack(obj *yaml.RNode, gvk resid.Gvk) error { - if gvk.IsClusterScoped() { - return nil - } - f := fsslice.Filter{ - FsSlice: []types.FieldSpec{ - {Path: types.MetadataNamespacePath, CreateIfNotPresent: true}, - }, - SetValue: ns.fieldSetter(), - CreateKind: yaml.ScalarNode, // Namespace is a ScalarNode - } - _, err := f.Filter(obj) - return err -} - -// roleBindingHack is a hack for implementing the transformer's SetRoleBindingSubjects option -// for RoleBinding and ClusterRoleBinding resource types. -// -// In NoSubjects mode, it does nothing. -// -// In AllServiceAccountSubjects mode, it sets the namespace on subjects with "kind: ServiceAccount". -// -// In DefaultSubjectsOnly mode (default mode), RoleBinding and ClusterRoleBinding have namespace set on -// elements of the "subjects" field if and only if the subject elements -// "name" is "default". Otherwise the namespace is not set. -// Example: -// -// kind: RoleBinding -// subjects: -// - name: "default" # this will have the namespace set -// ... -// - name: "something-else" # this will not have the namespace set -// ... -func (ns Filter) roleBindingHack(obj *yaml.RNode) error { - var visitor filtersutil.SetFn - switch ns.SetRoleBindingSubjects { - case NoSubjects: - return nil - case DefaultSubjectsOnly, SubjectModeUnspecified: - visitor = ns.setSubjectsNamedDefault - case AllServiceAccountSubjects: - visitor = ns.setServiceAccountNamespaces - default: - return errors.Errorf("invalid value %q for setRoleBindingSubjects: "+ - "must be one of %q, %q or %q", ns.SetRoleBindingSubjects, - DefaultSubjectsOnly, NoSubjects, AllServiceAccountSubjects) - } - - // Lookup the subjects field on all elements. - obj, err := obj.Pipe(yaml.Lookup(subjectsField)) - if err != nil || yaml.IsMissingOrNull(obj) { - return err - } - // Use the appropriate visitor to set the namespace field on the correct subset of subjects - return errors.WrapPrefixf(obj.VisitElements(visitor), "setting namespace on (cluster)role binding subjects") -} - -func isRoleBinding(kind string) bool { - return kind == roleBindingKind || kind == clusterRoleBindingKind -} - -func (ns Filter) setServiceAccountNamespaces(o *yaml.RNode) error { - name, err := o.Pipe(yaml.Lookup("kind"), yaml.Match("ServiceAccount")) - if err != nil || yaml.IsMissingOrNull(name) { - return errors.WrapPrefixf(err, "looking up kind on (cluster)role binding subject") - } - return setNamespaceField(o, ns.fieldSetter()) -} - -func (ns Filter) setSubjectsNamedDefault(o *yaml.RNode) error { - name, err := o.Pipe(yaml.Lookup("name"), yaml.Match("default")) - if err != nil || yaml.IsMissingOrNull(name) { - return errors.WrapPrefixf(err, "looking up name on (cluster)role binding subject") - } - return setNamespaceField(o, ns.fieldSetter()) -} - -func setNamespaceField(node *yaml.RNode, setter filtersutil.SetFn) error { - node, err := node.Pipe(yaml.LookupCreate(yaml.ScalarNode, "namespace")) - if err != nil { - return errors.WrapPrefixf(err, "setting namespace field on (cluster)role binding subject") - } - return setter(node) -} - -// removeRoleBindingSubjectFieldSpecs removes from the list fieldspecs that -// have hardcoded implementations -func (ns Filter) removeRoleBindingSubjectFieldSpecs(fs types.FsSlice) types.FsSlice { - var val types.FsSlice - for i := range fs { - if isRoleBinding(fs[i].Kind) && - (fs[i].Path == subjectsNamespacePath || fs[i].Path == subjectsField) { - continue - } - val = append(val, fs[i]) - } - return val -} - -func (ns Filter) removeMetaNamespaceFieldSpecs(fs types.FsSlice) types.FsSlice { - var val types.FsSlice - for i := range fs { - if fs[i].Path == types.MetadataNamespacePath { - continue - } - val = append(val, fs[i]) - } - return val -} - -func (ns *Filter) fieldSetter() filtersutil.SetFn { - if ns.UnsetOnly { - return ns.trackableSetter.SetEntryIfEmpty("", ns.Namespace, yaml.NodeTagString) - } - return ns.trackableSetter.SetEntry("", ns.Namespace, yaml.NodeTagString) -} - -const ( - subjectsField = "subjects" - subjectsNamespacePath = "subjects/namespace" - roleBindingKind = "RoleBinding" - clusterRoleBindingKind = "ClusterRoleBinding" -) diff --git a/src/vendor/sigs.k8s.io/kustomize/api/filters/patchjson6902/doc.go b/src/vendor/sigs.k8s.io/kustomize/api/filters/patchjson6902/doc.go deleted file mode 100644 index ec4cfa821..000000000 --- a/src/vendor/sigs.k8s.io/kustomize/api/filters/patchjson6902/doc.go +++ /dev/null @@ -1,6 +0,0 @@ -// Copyright 2020 The Kubernetes Authors. -// SPDX-License-Identifier: Apache-2.0 - -// Package namespace contains a kio.Filter implementation of the kustomize -// patchjson6902 transformer -package patchjson6902 diff --git a/src/vendor/sigs.k8s.io/kustomize/api/filters/patchjson6902/patchjson6902.go b/src/vendor/sigs.k8s.io/kustomize/api/filters/patchjson6902/patchjson6902.go deleted file mode 100644 index 5749d6ddf..000000000 --- a/src/vendor/sigs.k8s.io/kustomize/api/filters/patchjson6902/patchjson6902.go +++ /dev/null @@ -1,65 +0,0 @@ -// Copyright 2020 The Kubernetes Authors. -// SPDX-License-Identifier: Apache-2.0 - -package patchjson6902 - -import ( - "strings" - - jsonpatch "github.com/evanphx/json-patch" - "sigs.k8s.io/kustomize/kyaml/kio" - "sigs.k8s.io/kustomize/kyaml/yaml" - k8syaml "sigs.k8s.io/yaml" -) - -type Filter struct { - Patch string - - decodedPatch jsonpatch.Patch -} - -var _ kio.Filter = Filter{} - -func (pf Filter) Filter(nodes []*yaml.RNode) ([]*yaml.RNode, error) { - decodedPatch, err := pf.decodePatch() - if err != nil { - return nil, err - } - pf.decodedPatch = decodedPatch - return kio.FilterAll(yaml.FilterFunc(pf.run)).Filter(nodes) -} - -func (pf Filter) decodePatch() (jsonpatch.Patch, error) { - patch := pf.Patch - // If the patch doesn't look like a JSON6902 patch, we - // try to parse it to json. - if !strings.HasPrefix(pf.Patch, "[") { - p, err := k8syaml.YAMLToJSON([]byte(patch)) - if err != nil { - return nil, err - } - patch = string(p) - } - decodedPatch, err := jsonpatch.DecodePatch([]byte(patch)) - if err != nil { - return nil, err - } - return decodedPatch, nil -} - -func (pf Filter) run(node *yaml.RNode) (*yaml.RNode, error) { - // We don't actually use the kyaml library for manipulating the - // yaml here. We just marshal it to json and rely on the - // jsonpatch library to take care of applying the patch. - // This means ordering might not be preserved with this filter. - b, err := node.MarshalJSON() - if err != nil { - return nil, err - } - res, err := pf.decodedPatch.Apply(b) - if err != nil { - return nil, err - } - err = node.UnmarshalJSON(res) - return node, err -} diff --git a/src/vendor/sigs.k8s.io/kustomize/api/filters/patchstrategicmerge/doc.go b/src/vendor/sigs.k8s.io/kustomize/api/filters/patchstrategicmerge/doc.go deleted file mode 100644 index 1733fd8a2..000000000 --- a/src/vendor/sigs.k8s.io/kustomize/api/filters/patchstrategicmerge/doc.go +++ /dev/null @@ -1,6 +0,0 @@ -// Copyright 2020 The Kubernetes Authors. -// SPDX-License-Identifier: Apache-2.0 - -// Package patchstrategicmerge contains a kio.Filter implementation of the -// kustomize strategic merge patch transformer. -package patchstrategicmerge diff --git a/src/vendor/sigs.k8s.io/kustomize/api/filters/patchstrategicmerge/patchstrategicmerge.go b/src/vendor/sigs.k8s.io/kustomize/api/filters/patchstrategicmerge/patchstrategicmerge.go deleted file mode 100644 index 1a70d19aa..000000000 --- a/src/vendor/sigs.k8s.io/kustomize/api/filters/patchstrategicmerge/patchstrategicmerge.go +++ /dev/null @@ -1,36 +0,0 @@ -// Copyright 2019 The Kubernetes Authors. -// SPDX-License-Identifier: Apache-2.0 - -package patchstrategicmerge - -import ( - "sigs.k8s.io/kustomize/kyaml/kio" - "sigs.k8s.io/kustomize/kyaml/yaml" - "sigs.k8s.io/kustomize/kyaml/yaml/merge2" -) - -type Filter struct { - Patch *yaml.RNode -} - -var _ kio.Filter = Filter{} - -// Filter does a strategic merge patch, which can delete nodes. -func (pf Filter) Filter(nodes []*yaml.RNode) ([]*yaml.RNode, error) { - var result []*yaml.RNode - for i := range nodes { - r, err := merge2.Merge( - pf.Patch, nodes[i], - yaml.MergeOptions{ - ListIncreaseDirection: yaml.MergeOptionsListPrepend, - }, - ) - if err != nil { - return nil, err - } - if r != nil { - result = append(result, r) - } - } - return result, nil -} diff --git a/src/vendor/sigs.k8s.io/kustomize/api/filters/prefix/doc.go b/src/vendor/sigs.k8s.io/kustomize/api/filters/prefix/doc.go deleted file mode 100644 index 95236859f..000000000 --- a/src/vendor/sigs.k8s.io/kustomize/api/filters/prefix/doc.go +++ /dev/null @@ -1,6 +0,0 @@ -// Copyright 2020 The Kubernetes Authors. -// SPDX-License-Identifier: Apache-2.0 - -// Package prefix contains a kio.Filter implementation of the kustomize -// PrefixTransformer. -package prefix diff --git a/src/vendor/sigs.k8s.io/kustomize/api/filters/prefix/prefix.go b/src/vendor/sigs.k8s.io/kustomize/api/filters/prefix/prefix.go deleted file mode 100644 index daa375d1f..000000000 --- a/src/vendor/sigs.k8s.io/kustomize/api/filters/prefix/prefix.go +++ /dev/null @@ -1,50 +0,0 @@ -// Copyright 2019 The Kubernetes Authors. -// SPDX-License-Identifier: Apache-2.0 - -package prefix - -import ( - "fmt" - - "sigs.k8s.io/kustomize/api/filters/fieldspec" - "sigs.k8s.io/kustomize/api/filters/filtersutil" - "sigs.k8s.io/kustomize/api/types" - "sigs.k8s.io/kustomize/kyaml/kio" - "sigs.k8s.io/kustomize/kyaml/yaml" -) - -// Filter applies resource name prefix's using the fieldSpecs -type Filter struct { - Prefix string `json:"prefix,omitempty" yaml:"prefix,omitempty"` - - FieldSpec types.FieldSpec `json:"fieldSpec,omitempty" yaml:"fieldSpec,omitempty"` - - trackableSetter filtersutil.TrackableSetter -} - -var _ kio.Filter = Filter{} -var _ kio.TrackableFilter = &Filter{} - -// WithMutationTracker registers a callback which will be invoked each time a field is mutated -func (f *Filter) WithMutationTracker(callback func(key, value, tag string, node *yaml.RNode)) { - f.trackableSetter.WithMutationTracker(callback) -} - -func (f Filter) Filter(nodes []*yaml.RNode) ([]*yaml.RNode, error) { - return kio.FilterAll(yaml.FilterFunc(f.run)).Filter(nodes) -} - -func (f Filter) run(node *yaml.RNode) (*yaml.RNode, error) { - err := node.PipeE(fieldspec.Filter{ - FieldSpec: f.FieldSpec, - SetValue: f.evaluateField, - CreateKind: yaml.ScalarNode, // Name is a ScalarNode - CreateTag: yaml.NodeTagString, - }) - return node, err -} - -func (f Filter) evaluateField(node *yaml.RNode) error { - return f.trackableSetter.SetScalar(fmt.Sprintf( - "%s%s", f.Prefix, node.YNode().Value))(node) -} diff --git a/src/vendor/sigs.k8s.io/kustomize/api/filters/refvar/doc.go b/src/vendor/sigs.k8s.io/kustomize/api/filters/refvar/doc.go deleted file mode 100644 index e30719871..000000000 --- a/src/vendor/sigs.k8s.io/kustomize/api/filters/refvar/doc.go +++ /dev/null @@ -1,6 +0,0 @@ -// Copyright 2022 The Kubernetes Authors. -// SPDX-License-Identifier: Apache-2.0 - -// Package refvar contains a kio.Filter implementation of the kustomize -// refvar transformer (find and replace $(FOO) style variables in strings). -package refvar diff --git a/src/vendor/sigs.k8s.io/kustomize/api/filters/refvar/expand.go b/src/vendor/sigs.k8s.io/kustomize/api/filters/refvar/expand.go deleted file mode 100644 index 3bcbd7a53..000000000 --- a/src/vendor/sigs.k8s.io/kustomize/api/filters/refvar/expand.go +++ /dev/null @@ -1,147 +0,0 @@ -// Copyright 2019 The Kubernetes Authors. -// SPDX-License-Identifier: Apache-2.0 - -package refvar - -import ( - "fmt" - "log" - "strings" -) - -const ( - operator = '$' - referenceOpener = '(' - referenceCloser = ')' -) - -// syntaxWrap returns the input string wrapped by the expansion syntax. -func syntaxWrap(input string) string { - var sb strings.Builder - sb.WriteByte(operator) - sb.WriteByte(referenceOpener) - sb.WriteString(input) - sb.WriteByte(referenceCloser) - return sb.String() -} - -// MappingFunc maps a string to anything. -type MappingFunc func(string) interface{} - -// MakePrimitiveReplacer returns a MappingFunc that uses a map to do -// replacements, and a histogram to count map hits. -// -// Func behavior: -// -// If the input key is NOT found in the map, the key is wrapped up as -// as a variable declaration string and returned, e.g. key FOO becomes $(FOO). -// This string is presumably put back where it was found, and might get replaced -// later. -// -// If the key is found in the map, the value is returned if it is a primitive -// type (string, bool, number), and the hit is counted. -// -// If it's not a primitive type (e.g. a map, struct, func, etc.) then this -// function doesn't know what to do with it and it returns the key wrapped up -// again as if it had not been replaced. This should probably be an error. -func MakePrimitiveReplacer( - counts map[string]int, someMap map[string]interface{}) MappingFunc { - return func(key string) interface{} { - if value, ok := someMap[key]; ok { - switch typedV := value.(type) { - case string, int, int32, int64, float32, float64, bool: - counts[key]++ - return typedV - default: - // If the value is some complicated type (e.g. a map or struct), - // this function doesn't know how to jam it into a string, - // so just pretend it was a cache miss. - // Likely this should be an error instead of a silent failure, - // since the programmer passed an impossible value. - log.Printf( - "MakePrimitiveReplacer: bad replacement type=%T val=%v", - typedV, typedV) - return syntaxWrap(key) - } - } - // If unable to return the mapped variable, return it - // as it was found, and a later mapping might be able to - // replace it. - return syntaxWrap(key) - } -} - -// DoReplacements replaces variable references in the input string -// using the mapping function. -func DoReplacements(input string, mapping MappingFunc) interface{} { - var buf strings.Builder - checkpoint := 0 - for cursor := 0; cursor < len(input); cursor++ { - if input[cursor] == operator && cursor+1 < len(input) { - // Copy the portion of the input string since the last - // checkpoint into the buffer - buf.WriteString(input[checkpoint:cursor]) - - // Attempt to read the variable name as defined by the - // syntax from the input string - read, isVar, advance := tryReadVariableName(input[cursor+1:]) - - if isVar { - // We were able to read a variable name correctly; - // apply the mapping to the variable name and copy the - // bytes into the buffer - mapped := mapping(read) - if input == syntaxWrap(read) { - // Preserve the type of variable - return mapped - } - - // Variable is used in a middle of a string - buf.WriteString(fmt.Sprintf("%v", mapped)) - } else { - // Not a variable name; copy the read bytes into the buffer - buf.WriteString(read) - } - - // Advance the cursor in the input string to account for - // bytes consumed to read the variable name expression - cursor += advance - - // Advance the checkpoint in the input string - checkpoint = cursor + 1 - } - } - - // Return the buffer and any remaining unwritten bytes in the - // input string. - return buf.String() + input[checkpoint:] -} - -// tryReadVariableName attempts to read a variable name from the input -// string and returns the content read from the input, whether that content -// represents a variable name to perform mapping on, and the number of bytes -// consumed in the input string. -// -// The input string is assumed not to contain the initial operator. -func tryReadVariableName(input string) (string, bool, int) { - switch input[0] { - case operator: - // Escaped operator; return it. - return input[0:1], false, 1 - case referenceOpener: - // Scan to expression closer - for i := 1; i < len(input); i++ { - if input[i] == referenceCloser { - return input[1:i], true, i + 1 - } - } - - // Incomplete reference; return it. - return string(operator) + string(referenceOpener), false, 1 - default: - // Not the beginning of an expression, ie, an operator - // that doesn't begin an expression. Return the operator - // and the first rune in the string. - return string(operator) + string(input[0]), false, 1 - } -} diff --git a/src/vendor/sigs.k8s.io/kustomize/api/filters/refvar/refvar.go b/src/vendor/sigs.k8s.io/kustomize/api/filters/refvar/refvar.go deleted file mode 100644 index e00afafd7..000000000 --- a/src/vendor/sigs.k8s.io/kustomize/api/filters/refvar/refvar.go +++ /dev/null @@ -1,113 +0,0 @@ -// Copyright 2022 The Kubernetes Authors. -// SPDX-License-Identifier: Apache-2.0 - -package refvar - -import ( - "fmt" - "strconv" - - "sigs.k8s.io/kustomize/api/filters/fieldspec" - "sigs.k8s.io/kustomize/api/types" - "sigs.k8s.io/kustomize/kyaml/kio" - "sigs.k8s.io/kustomize/kyaml/yaml" -) - -// Filter updates $(VAR) style variables with values. -// The fieldSpecs are the places to look for occurrences of $(VAR). -type Filter struct { - MappingFunc MappingFunc `json:"mappingFunc,omitempty" yaml:"mappingFunc,omitempty"` - FieldSpec types.FieldSpec `json:"fieldSpec,omitempty" yaml:"fieldSpec,omitempty"` -} - -func (f Filter) Filter(nodes []*yaml.RNode) ([]*yaml.RNode, error) { - return kio.FilterAll(yaml.FilterFunc(f.run)).Filter(nodes) -} - -func (f Filter) run(node *yaml.RNode) (*yaml.RNode, error) { - err := node.PipeE(fieldspec.Filter{ - FieldSpec: f.FieldSpec, - SetValue: f.set, - }) - return node, err -} - -func (f Filter) set(node *yaml.RNode) error { - if yaml.IsMissingOrNull(node) { - return nil - } - switch node.YNode().Kind { - case yaml.ScalarNode: - return f.setScalar(node) - case yaml.MappingNode: - return f.setMap(node) - case yaml.SequenceNode: - return f.setSeq(node) - default: - return fmt.Errorf("invalid type encountered %v", node.YNode().Kind) - } -} - -func updateNodeValue(node *yaml.Node, newValue interface{}) { - switch newValue := newValue.(type) { - case int: - node.Value = strconv.FormatInt(int64(newValue), 10) - node.Tag = yaml.NodeTagInt - case int32: - node.Value = strconv.FormatInt(int64(newValue), 10) - node.Tag = yaml.NodeTagInt - case int64: - node.Value = strconv.FormatInt(newValue, 10) - node.Tag = yaml.NodeTagInt - case bool: - node.SetString(strconv.FormatBool(newValue)) - node.Tag = yaml.NodeTagBool - case float32: - node.SetString(strconv.FormatFloat(float64(newValue), 'f', -1, 32)) - node.Tag = yaml.NodeTagFloat - case float64: - node.SetString(strconv.FormatFloat(newValue, 'f', -1, 64)) - node.Tag = yaml.NodeTagFloat - default: - node.SetString(newValue.(string)) - node.Tag = yaml.NodeTagString - } - node.Style = 0 -} - -func (f Filter) setScalar(node *yaml.RNode) error { - if !yaml.IsYNodeString(node.YNode()) { - return nil - } - v := DoReplacements(node.YNode().Value, f.MappingFunc) - updateNodeValue(node.YNode(), v) - return nil -} - -func (f Filter) setMap(node *yaml.RNode) error { - contents := node.YNode().Content - for i := 0; i < len(contents); i += 2 { - if !yaml.IsYNodeString(contents[i]) { - return fmt.Errorf( - "invalid map key: value='%s', tag='%s'", - contents[i].Value, contents[i].Tag) - } - if !yaml.IsYNodeString(contents[i+1]) { - continue - } - newValue := DoReplacements(contents[i+1].Value, f.MappingFunc) - updateNodeValue(contents[i+1], newValue) - } - return nil -} - -func (f Filter) setSeq(node *yaml.RNode) error { - for _, item := range node.YNode().Content { - if !yaml.IsYNodeString(item) { - return fmt.Errorf("invalid value type expect a string") - } - newValue := DoReplacements(item.Value, f.MappingFunc) - updateNodeValue(item, newValue) - } - return nil -} diff --git a/src/vendor/sigs.k8s.io/kustomize/api/filters/replacement/doc.go b/src/vendor/sigs.k8s.io/kustomize/api/filters/replacement/doc.go deleted file mode 100644 index 667c92893..000000000 --- a/src/vendor/sigs.k8s.io/kustomize/api/filters/replacement/doc.go +++ /dev/null @@ -1,7 +0,0 @@ -// Copyright 2022 The Kubernetes Authors. -// SPDX-License-Identifier: Apache-2.0 - -// Package replacement contains a kio.Filter implementation of the kustomize -// replacement transformer (accepts sources and looks for targets to replace -// their values with values from the sources). -package replacement diff --git a/src/vendor/sigs.k8s.io/kustomize/api/filters/replacement/replacement.go b/src/vendor/sigs.k8s.io/kustomize/api/filters/replacement/replacement.go deleted file mode 100644 index a9330fa14..000000000 --- a/src/vendor/sigs.k8s.io/kustomize/api/filters/replacement/replacement.go +++ /dev/null @@ -1,258 +0,0 @@ -// Copyright 2021 The Kubernetes Authors. -// SPDX-License-Identifier: Apache-2.0 - -package replacement - -import ( - "errors" - "fmt" - "strings" - - "sigs.k8s.io/kustomize/api/internal/utils" - "sigs.k8s.io/kustomize/api/resource" - "sigs.k8s.io/kustomize/api/types" - "sigs.k8s.io/kustomize/kyaml/resid" - kyaml_utils "sigs.k8s.io/kustomize/kyaml/utils" - "sigs.k8s.io/kustomize/kyaml/yaml" -) - -type Filter struct { - Replacements []types.Replacement `json:"replacements,omitempty" yaml:"replacements,omitempty"` -} - -// Filter replaces values of targets with values from sources -func (f Filter) Filter(nodes []*yaml.RNode) ([]*yaml.RNode, error) { - for i, r := range f.Replacements { - if r.Source == nil || r.Targets == nil { - return nil, fmt.Errorf("replacements must specify a source and at least one target") - } - value, err := getReplacement(nodes, &f.Replacements[i]) - if err != nil { - return nil, err - } - nodes, err = applyReplacement(nodes, value, r.Targets) - if err != nil { - return nil, err - } - } - return nodes, nil -} - -func getReplacement(nodes []*yaml.RNode, r *types.Replacement) (*yaml.RNode, error) { - source, err := selectSourceNode(nodes, r.Source) - if err != nil { - return nil, err - } - - if r.Source.FieldPath == "" { - r.Source.FieldPath = types.DefaultReplacementFieldPath - } - fieldPath := kyaml_utils.SmarterPathSplitter(r.Source.FieldPath, ".") - - rn, err := source.Pipe(yaml.Lookup(fieldPath...)) - if err != nil { - return nil, fmt.Errorf("error looking up replacement source: %w", err) - } - if rn.IsNilOrEmpty() { - return nil, fmt.Errorf("fieldPath `%s` is missing for replacement source %s", r.Source.FieldPath, r.Source.ResId) - } - - return getRefinedValue(r.Source.Options, rn) -} - -// selectSourceNode finds the node that matches the selector, returning -// an error if multiple or none are found -func selectSourceNode(nodes []*yaml.RNode, selector *types.SourceSelector) (*yaml.RNode, error) { - var matches []*yaml.RNode - for _, n := range nodes { - ids, err := utils.MakeResIds(n) - if err != nil { - return nil, fmt.Errorf("error getting node IDs: %w", err) - } - for _, id := range ids { - if id.IsSelectedBy(selector.ResId) { - if len(matches) > 0 { - return nil, fmt.Errorf( - "multiple matches for selector %s", selector) - } - matches = append(matches, n) - break - } - } - } - if len(matches) == 0 { - return nil, fmt.Errorf("nothing selected by %s", selector) - } - return matches[0], nil -} - -func getRefinedValue(options *types.FieldOptions, rn *yaml.RNode) (*yaml.RNode, error) { - if options == nil || options.Delimiter == "" { - return rn, nil - } - if rn.YNode().Kind != yaml.ScalarNode { - return nil, fmt.Errorf("delimiter option can only be used with scalar nodes") - } - value := strings.Split(yaml.GetValue(rn), options.Delimiter) - if options.Index >= len(value) || options.Index < 0 { - return nil, fmt.Errorf("options.index %d is out of bounds for value %s", options.Index, yaml.GetValue(rn)) - } - n := rn.Copy() - n.YNode().Value = value[options.Index] - return n, nil -} - -func applyReplacement(nodes []*yaml.RNode, value *yaml.RNode, targetSelectors []*types.TargetSelector) ([]*yaml.RNode, error) { - for _, selector := range targetSelectors { - if selector.Select == nil { - return nil, errors.New("target must specify resources to select") - } - if len(selector.FieldPaths) == 0 { - selector.FieldPaths = []string{types.DefaultReplacementFieldPath} - } - for _, possibleTarget := range nodes { - ids, err := utils.MakeResIds(possibleTarget) - if err != nil { - return nil, err - } - - // filter targets by label and annotation selectors - selectByAnnoAndLabel, err := selectByAnnoAndLabel(possibleTarget, selector) - if err != nil { - return nil, err - } - if !selectByAnnoAndLabel { - continue - } - - // filter targets by matching resource IDs - for i, id := range ids { - if id.IsSelectedBy(selector.Select.ResId) && !rejectId(selector.Reject, &ids[i]) { - err := copyValueToTarget(possibleTarget, value, selector) - if err != nil { - return nil, err - } - break - } - } - } - } - return nodes, nil -} - -func selectByAnnoAndLabel(n *yaml.RNode, t *types.TargetSelector) (bool, error) { - if matchesSelect, err := matchesAnnoAndLabelSelector(n, t.Select); !matchesSelect || err != nil { - return false, err - } - for _, reject := range t.Reject { - if reject.AnnotationSelector == "" && reject.LabelSelector == "" { - continue - } - if m, err := matchesAnnoAndLabelSelector(n, reject); m || err != nil { - return false, err - } - } - return true, nil -} - -func matchesAnnoAndLabelSelector(n *yaml.RNode, selector *types.Selector) (bool, error) { - r := resource.Resource{RNode: *n} - annoMatch, err := r.MatchesAnnotationSelector(selector.AnnotationSelector) - if err != nil { - return false, err - } - labelMatch, err := r.MatchesLabelSelector(selector.LabelSelector) - if err != nil { - return false, err - } - return annoMatch && labelMatch, nil -} - -func rejectId(rejects []*types.Selector, id *resid.ResId) bool { - for _, r := range rejects { - if !r.ResId.IsEmpty() && id.IsSelectedBy(r.ResId) { - return true - } - } - return false -} - -func copyValueToTarget(target *yaml.RNode, value *yaml.RNode, selector *types.TargetSelector) error { - for _, fp := range selector.FieldPaths { - fieldPath := kyaml_utils.SmarterPathSplitter(fp, ".") - create, err := shouldCreateField(selector.Options, fieldPath) - if err != nil { - return err - } - - var targetFields []*yaml.RNode - if create { - createdField, createErr := target.Pipe(yaml.LookupCreate(value.YNode().Kind, fieldPath...)) - if createErr != nil { - return fmt.Errorf("error creating replacement node: %w", createErr) - } - targetFields = append(targetFields, createdField) - } else { - // may return multiple fields, always wrapped in a sequence node - foundFieldSequence, lookupErr := target.Pipe(&yaml.PathMatcher{Path: fieldPath}) - if lookupErr != nil { - return fmt.Errorf("error finding field in replacement target: %w", lookupErr) - } - targetFields, err = foundFieldSequence.Elements() - if err != nil { - return fmt.Errorf("error fetching elements in replacement target: %w", err) - } - } - - for _, t := range targetFields { - if err := setFieldValue(selector.Options, t, value); err != nil { - return err - } - } - - } - return nil -} - -func setFieldValue(options *types.FieldOptions, targetField *yaml.RNode, value *yaml.RNode) error { - value = value.Copy() - if options != nil && options.Delimiter != "" { - if targetField.YNode().Kind != yaml.ScalarNode { - return fmt.Errorf("delimiter option can only be used with scalar nodes") - } - tv := strings.Split(targetField.YNode().Value, options.Delimiter) - v := yaml.GetValue(value) - // TODO: Add a way to remove an element - switch { - case options.Index < 0: // prefix - tv = append([]string{v}, tv...) - case options.Index >= len(tv): // suffix - tv = append(tv, v) - default: // replace an element - tv[options.Index] = v - } - value.YNode().Value = strings.Join(tv, options.Delimiter) - } - - if targetField.YNode().Kind == yaml.ScalarNode { - // For scalar, only copy the value (leave any type intact to auto-convert int->string or string->int) - targetField.YNode().Value = value.YNode().Value - } else { - targetField.SetYNode(value.YNode()) - } - - return nil -} - -func shouldCreateField(options *types.FieldOptions, fieldPath []string) (bool, error) { - if options == nil || !options.Create { - return false, nil - } - // create option is not supported in a wildcard matching - for _, f := range fieldPath { - if f == "*" { - return false, fmt.Errorf("cannot support create option in a multi-value target") - } - } - return true, nil -} diff --git a/src/vendor/sigs.k8s.io/kustomize/api/filters/replicacount/doc.go b/src/vendor/sigs.k8s.io/kustomize/api/filters/replicacount/doc.go deleted file mode 100644 index a22d13034..000000000 --- a/src/vendor/sigs.k8s.io/kustomize/api/filters/replicacount/doc.go +++ /dev/null @@ -1,6 +0,0 @@ -// Copyright 2020 The Kubernetes Authors. -// SPDX-License-Identifier: Apache-2.0 - -// Package replicacount contains a kio.Filter implementation of the kustomize -// ReplicaCountTransformer. -package replicacount diff --git a/src/vendor/sigs.k8s.io/kustomize/api/filters/replicacount/replicacount.go b/src/vendor/sigs.k8s.io/kustomize/api/filters/replicacount/replicacount.go deleted file mode 100644 index ea5351f9b..000000000 --- a/src/vendor/sigs.k8s.io/kustomize/api/filters/replicacount/replicacount.go +++ /dev/null @@ -1,48 +0,0 @@ -// Copyright 2022 The Kubernetes Authors. -// SPDX-License-Identifier: Apache-2.0 - -package replicacount - -import ( - "strconv" - - "sigs.k8s.io/kustomize/api/filters/fieldspec" - "sigs.k8s.io/kustomize/api/filters/filtersutil" - "sigs.k8s.io/kustomize/api/types" - "sigs.k8s.io/kustomize/kyaml/kio" - "sigs.k8s.io/kustomize/kyaml/yaml" -) - -// Filter updates/sets replicas fields using the fieldSpecs -type Filter struct { - Replica types.Replica `json:"replica,omitempty" yaml:"replica,omitempty"` - FieldSpec types.FieldSpec `json:"fieldSpec,omitempty" yaml:"fieldSpec,omitempty"` - - trackableSetter filtersutil.TrackableSetter -} - -var _ kio.Filter = Filter{} -var _ kio.TrackableFilter = &Filter{} - -// WithMutationTracker registers a callback which will be invoked each time a field is mutated -func (rc *Filter) WithMutationTracker(callback func(key, value, tag string, node *yaml.RNode)) { - rc.trackableSetter.WithMutationTracker(callback) -} - -func (rc Filter) Filter(nodes []*yaml.RNode) ([]*yaml.RNode, error) { - return kio.FilterAll(yaml.FilterFunc(rc.run)).Filter(nodes) -} - -func (rc Filter) run(node *yaml.RNode) (*yaml.RNode, error) { - err := node.PipeE(fieldspec.Filter{ - FieldSpec: rc.FieldSpec, - SetValue: rc.set, - CreateKind: yaml.ScalarNode, // replicas is a ScalarNode - CreateTag: yaml.NodeTagInt, - }) - return node, err -} - -func (rc Filter) set(node *yaml.RNode) error { - return rc.trackableSetter.SetEntry("", strconv.FormatInt(rc.Replica.Count, 10), yaml.NodeTagInt)(node) -} diff --git a/src/vendor/sigs.k8s.io/kustomize/api/filters/suffix/doc.go b/src/vendor/sigs.k8s.io/kustomize/api/filters/suffix/doc.go deleted file mode 100644 index 18be62dfd..000000000 --- a/src/vendor/sigs.k8s.io/kustomize/api/filters/suffix/doc.go +++ /dev/null @@ -1,6 +0,0 @@ -// Copyright 2021 The Kubernetes Authors. -// SPDX-License-Identifier: Apache-2.0 - -// Package suffix contains a kio.Filter implementation of the kustomize -// SuffixTransformer. -package suffix diff --git a/src/vendor/sigs.k8s.io/kustomize/api/filters/suffix/suffix.go b/src/vendor/sigs.k8s.io/kustomize/api/filters/suffix/suffix.go deleted file mode 100644 index babc257be..000000000 --- a/src/vendor/sigs.k8s.io/kustomize/api/filters/suffix/suffix.go +++ /dev/null @@ -1,50 +0,0 @@ -// Copyright 2021 The Kubernetes Authors. -// SPDX-License-Identifier: Apache-2.0 - -package suffix - -import ( - "fmt" - - "sigs.k8s.io/kustomize/api/filters/fieldspec" - "sigs.k8s.io/kustomize/api/filters/filtersutil" - "sigs.k8s.io/kustomize/api/types" - "sigs.k8s.io/kustomize/kyaml/kio" - "sigs.k8s.io/kustomize/kyaml/yaml" -) - -// Filter applies resource name suffix's using the fieldSpecs -type Filter struct { - Suffix string `json:"suffix,omitempty" yaml:"suffix,omitempty"` - - FieldSpec types.FieldSpec `json:"fieldSpec,omitempty" yaml:"fieldSpec,omitempty"` - - trackableSetter filtersutil.TrackableSetter -} - -var _ kio.Filter = Filter{} -var _ kio.TrackableFilter = &Filter{} - -// WithMutationTracker registers a callback which will be invoked each time a field is mutated -func (f *Filter) WithMutationTracker(callback func(key, value, tag string, node *yaml.RNode)) { - f.trackableSetter.WithMutationTracker(callback) -} - -func (f Filter) Filter(nodes []*yaml.RNode) ([]*yaml.RNode, error) { - return kio.FilterAll(yaml.FilterFunc(f.run)).Filter(nodes) -} - -func (f Filter) run(node *yaml.RNode) (*yaml.RNode, error) { - err := node.PipeE(fieldspec.Filter{ - FieldSpec: f.FieldSpec, - SetValue: f.evaluateField, - CreateKind: yaml.ScalarNode, // Name is a ScalarNode - CreateTag: yaml.NodeTagString, - }) - return node, err -} - -func (f Filter) evaluateField(node *yaml.RNode) error { - return f.trackableSetter.SetScalar(fmt.Sprintf( - "%s%s", node.YNode().Value, f.Suffix))(node) -} diff --git a/src/vendor/sigs.k8s.io/kustomize/api/filters/valueadd/valueadd.go b/src/vendor/sigs.k8s.io/kustomize/api/filters/valueadd/valueadd.go deleted file mode 100644 index f8e6b2f82..000000000 --- a/src/vendor/sigs.k8s.io/kustomize/api/filters/valueadd/valueadd.go +++ /dev/null @@ -1,134 +0,0 @@ -// Copyright 2020 The Kubernetes Authors. -// SPDX-License-Identifier: Apache-2.0 - -package valueadd - -import ( - "strings" - - "sigs.k8s.io/kustomize/kyaml/filesys" - "sigs.k8s.io/kustomize/kyaml/kio" - "sigs.k8s.io/kustomize/kyaml/yaml" -) - -// An 'Add' operation aspiring to IETF RFC 6902 JSON. -// -// The filter tries to add a value to a node at a particular field path. -// -// Kinds of target fields: -// -// - Non-existent target field. -// -// The field will be added and the value inserted. -// -// - Existing field, scalar or map. -// -// E.g. 'spec/template/spec/containers/[name:nginx]/image' -// -// This behaves like an IETF RFC 6902 Replace operation would; -// the existing value is replaced without complaint, even though -// this is an Add operation. In contrast, a Replace operation -// must fail (report an error) if the field doesn't exist. -// -// - Existing field, list (array) -// Not supported yet. -// TODO: Honor fields with RFC-6902-style array indices -// TODO: like 'spec/template/spec/containers/2' -// TODO: Modify kyaml/yaml/PathGetter to allow this. -// The value will be inserted into the array at the given position, -// shifting other contents. To instead replace an array entry, use -// an implementation of an IETF RFC 6902 Replace operation. -// -// For the common case of a filepath in the field value, and a desire -// to add the value to the filepath (rather than replace the filepath), -// use a non-zero value of FilePathPosition (see below). -type Filter struct { - // Value is the value to add. - // - // Empty values are disallowed, i.e. this filter isn't intended - // for use in erasing or removing fields. For that, use a filter - // more aligned with the IETF RFC 6902 JSON Remove operation. - // - // At the time of writing, Value's value should be a simple string, - // not a JSON document. This particular filter focuses on easing - // injection of a single-sourced cloud project and/or cluster name - // into various fields, especially namespace and various filepath - // specifications. - Value string - - // FieldPath is a JSON-style path to the field intended to hold the value. - FieldPath string - - // FilePathPosition is a filepath field index. - // - // Call the value of this field _i_. - // - // If _i_ is zero, negative or unspecified, this field has no effect. - // - // If _i_ is > 0, then it's assumed that - // - 'Value' is a string that can work as a directory or file name, - // - the field value intended for replacement holds a filepath. - // - // The filepath is split into a string slice, the value is inserted - // at position [i-1], shifting the rest of the path to the right. - // A value of i==1 puts the new value at the start of the path. - // This change never converts an absolute path to a relative path, - // meaning adding a new field at position i==1 will preserve a - // leading slash. E.g. if Value == 'PEACH' - // - // OLD : NEW : FilePathPosition - // -------------------------------------------------------- - // {empty} : PEACH : irrelevant - // / : /PEACH : irrelevant - // pie : PEACH/pie : 1 (or less to prefix) - // /pie : /PEACH/pie : 1 (or less to prefix) - // raw : raw/PEACH : 2 (or more to postfix) - // /raw : /raw/PEACH : 2 (or more to postfix) - // a/nice/warm/pie : a/nice/warm/PEACH/pie : 4 - // /a/nice/warm/pie : /a/nice/warm/PEACH/pie : 4 - // - // For robustness (liberal input, conservative output) FilePathPosition - // values that that are too large to index the split filepath result in a - // postfix rather than an error. So use 1 to prefix, 9999 to postfix. - FilePathPosition int `json:"filePathPosition,omitempty" yaml:"filePathPosition,omitempty"` -} - -var _ kio.Filter = Filter{} - -func (f Filter) Filter(nodes []*yaml.RNode) ([]*yaml.RNode, error) { - _, err := kio.FilterAll(yaml.FilterFunc( - func(node *yaml.RNode) (*yaml.RNode, error) { - var fields []string - // if there is forward slash '/' in the field name, a back slash '\' - // will be used to escape it. - for _, f := range strings.Split(f.FieldPath, "/") { - if len(fields) > 0 && strings.HasSuffix(fields[len(fields)-1], "\\") { - concatField := strings.TrimSuffix(fields[len(fields)-1], "\\") + "/" + f - fields = append(fields[:len(fields)-1], concatField) - } else { - fields = append(fields, f) - } - } - // TODO: support SequenceNode. - // Presumably here one could look for array indices (digits) at - // the end of the field path (as described in IETF RFC 6902 JSON), - // and if found, take it as a signal that this should be a - // SequenceNode instead of a ScalarNode, and insert the value - // into the proper slot, shifting every over. - n, err := node.Pipe(yaml.LookupCreate(yaml.ScalarNode, fields...)) - if err != nil { - return node, err - } - // TODO: allow more kinds - if err := yaml.ErrorIfInvalid(n, yaml.ScalarNode); err != nil { - return nil, err - } - newValue := f.Value - if f.FilePathPosition > 0 { - newValue = filesys.InsertPathPart( - n.YNode().Value, f.FilePathPosition-1, newValue) - } - return n.Pipe(yaml.FieldSetter{StringValue: newValue}) - })).Filter(nodes) - return nodes, err -} diff --git a/src/vendor/sigs.k8s.io/kustomize/api/hasher/hasher.go b/src/vendor/sigs.k8s.io/kustomize/api/hasher/hasher.go deleted file mode 100644 index aef436d91..000000000 --- a/src/vendor/sigs.k8s.io/kustomize/api/hasher/hasher.go +++ /dev/null @@ -1,155 +0,0 @@ -// Copyright 2019 The Kubernetes Authors. -// SPDX-License-Identifier: Apache-2.0 - -package hasher - -import ( - "crypto/sha256" - "encoding/json" - "fmt" - "sort" - - "sigs.k8s.io/kustomize/kyaml/yaml" -) - -// SortArrayAndComputeHash sorts a string array and -// returns a hash for it -func SortArrayAndComputeHash(s []string) (string, error) { - sort.Strings(s) - data, err := json.Marshal(s) - if err != nil { - return "", err - } - return encode(hex256(string(data))) -} - -// Copied from https://github.com/kubernetes/kubernetes -// /blob/master/pkg/kubectl/util/hash/hash.go -func encode(hex string) (string, error) { - if len(hex) < 10 { - return "", fmt.Errorf( - "input length must be at least 10") - } - enc := []rune(hex[:10]) - for i := range enc { - switch enc[i] { - case '0': - enc[i] = 'g' - case '1': - enc[i] = 'h' - case '3': - enc[i] = 'k' - case 'a': - enc[i] = 'm' - case 'e': - enc[i] = 't' - } - } - return string(enc), nil -} - -// hex256 returns the hex form of the sha256 of the argument. -func hex256(data string) string { - return fmt.Sprintf("%x", sha256.Sum256([]byte(data))) -} - -// Hasher computes the hash of an RNode. -type Hasher struct{} - -// Hash returns a hash of the argument. -func (h *Hasher) Hash(node *yaml.RNode) (r string, err error) { - var encoded string - switch node.GetKind() { - case "ConfigMap": - encoded, err = encodeConfigMap(node) - case "Secret": - encoded, err = encodeSecret(node) - default: - var encodedBytes []byte - encodedBytes, err = json.Marshal(node.YNode()) - encoded = string(encodedBytes) - } - if err != nil { - return "", err - } - return encode(hex256(encoded)) -} - -func getNodeValues( - node *yaml.RNode, paths []string) (map[string]interface{}, error) { - values := make(map[string]interface{}) - for _, p := range paths { - vn, err := node.Pipe(yaml.Lookup(p)) - if err != nil { - return map[string]interface{}{}, err - } - if vn == nil { - values[p] = "" - continue - } - if vn.YNode().Kind != yaml.ScalarNode { - vs, err := vn.MarshalJSON() - if err != nil { - return map[string]interface{}{}, err - } - // data, binaryData and stringData are all maps - var v map[string]interface{} - json.Unmarshal(vs, &v) - values[p] = v - } else { - values[p] = vn.YNode().Value - } - } - return values, nil -} - -// encodeConfigMap encodes a ConfigMap. -// Data, Kind, and Name are taken into account. -// BinaryData is included if it's not empty to avoid useless key in output. -func encodeConfigMap(node *yaml.RNode) (string, error) { - // get fields - paths := []string{"metadata/name", "data", "binaryData"} - values, err := getNodeValues(node, paths) - if err != nil { - return "", err - } - m := map[string]interface{}{ - "kind": "ConfigMap", - "name": values["metadata/name"], - "data": values["data"], - } - if _, ok := values["binaryData"].(map[string]interface{}); ok { - m["binaryData"] = values["binaryData"] - } - - // json.Marshal sorts the keys in a stable order in the encoding - data, err := json.Marshal(m) - if err != nil { - return "", err - } - return string(data), nil -} - -// encodeSecret encodes a Secret. -// Data, Kind, Name, and Type are taken into account. -// StringData is included if it's not empty to avoid useless key in output. -func encodeSecret(node *yaml.RNode) (string, error) { - // get fields - paths := []string{"type", "metadata/name", "data", "stringData"} - values, err := getNodeValues(node, paths) - if err != nil { - return "", err - } - m := map[string]interface{}{"kind": "Secret", "type": values["type"], - "name": values["metadata/name"], "data": values["data"]} - if _, ok := values["stringData"].(map[string]interface{}); ok { - m["stringData"] = values["stringData"] - } - - // json.Marshal sorts the keys in a stable order in the encoding - data, err := json.Marshal(m) - if err != nil { - return "", err - } - return string(data), nil -} diff --git a/src/vendor/sigs.k8s.io/kustomize/api/ifc/ifc.go b/src/vendor/sigs.k8s.io/kustomize/api/ifc/ifc.go deleted file mode 100644 index 9fd297478..000000000 --- a/src/vendor/sigs.k8s.io/kustomize/api/ifc/ifc.go +++ /dev/null @@ -1,48 +0,0 @@ -// Copyright 2019 The Kubernetes Authors. -// SPDX-License-Identifier: Apache-2.0 - -// Package ifc holds miscellaneous interfaces used by kustomize. -package ifc - -import ( - "sigs.k8s.io/kustomize/api/types" - "sigs.k8s.io/kustomize/kyaml/yaml" -) - -// Validator provides functions to validate annotations and labels -type Validator interface { - MakeAnnotationValidator() func(map[string]string) error - MakeAnnotationNameValidator() func([]string) error - MakeLabelValidator() func(map[string]string) error - MakeLabelNameValidator() func([]string) error - ValidateNamespace(string) []string - ErrIfInvalidKey(string) error - IsEnvVarName(k string) error -} - -// KvLoader reads and validates KV pairs. -type KvLoader interface { - Validator() Validator - Load(args types.KvPairSources) (all []types.Pair, err error) -} - -// Loader interface exposes methods to read bytes. -type Loader interface { - // Root returns the root location for this Loader. - Root() string - // New returns Loader located at newRoot. - New(newRoot string) (Loader, error) - // Load returns the bytes read from the location or an error. - Load(location string) ([]byte, error) - // Cleanup cleans the loader - Cleanup() error -} - -// KustHasher returns a hash of the argument -// or an error. -type KustHasher interface { - Hash(*yaml.RNode) (string, error) -} - -// See core.v1.SecretTypeOpaque -const SecretTypeOpaque = "Opaque" diff --git a/src/vendor/sigs.k8s.io/kustomize/api/image/image.go b/src/vendor/sigs.k8s.io/kustomize/api/image/image.go deleted file mode 100644 index 4a88050b4..000000000 --- a/src/vendor/sigs.k8s.io/kustomize/api/image/image.go +++ /dev/null @@ -1,66 +0,0 @@ -// Copyright 2020 The Kubernetes Authors. -// SPDX-License-Identifier: Apache-2.0 - -package image - -import ( - "regexp" - "strings" -) - -// IsImageMatched returns true if the value of t is identical to the -// image name in the full image name and tag as given by s. -func IsImageMatched(s, t string) bool { - // Tag values are limited to [a-zA-Z0-9_.{}-]. - // Some tools like Bazel rules_k8s allow tag patterns with {} characters. - // More info: https://github.com/bazelbuild/rules_k8s/pull/423 - pattern, _ := regexp.Compile("^" + t + "(:[a-zA-Z0-9_.{}-]*)?(@sha256:[a-zA-Z0-9_.{}-]*)?$") - return pattern.MatchString(s) -} - -// Split separates and returns the name and tag parts -// from the image string using either colon `:` or at `@` separators. -// image reference pattern: [[host[:port]/]component/]component[:tag][@digest] -func Split(imageName string) (name string, tag string, digest string) { - // check if image name contains a domain - // if domain is present, ignore domain and check for `:` - searchName := imageName - slashIndex := strings.Index(imageName, "/") - if slashIndex > 0 { - searchName = imageName[slashIndex:] - } else { - slashIndex = 0 - } - - id := strings.Index(searchName, "@") - ic := strings.Index(searchName, ":") - - // no tag or digest - if ic < 0 && id < 0 { - return imageName, "", "" - } - - // digest only - if id >= 0 && (id < ic || ic < 0) { - id += slashIndex - name = imageName[:id] - digest = strings.TrimPrefix(imageName[id:], "@") - return name, "", digest - } - - // tag and digest - if id >= 0 && ic >= 0 { - id += slashIndex - ic += slashIndex - name = imageName[:ic] - tag = strings.TrimPrefix(imageName[ic:id], ":") - digest = strings.TrimPrefix(imageName[id:], "@") - return name, tag, digest - } - - // tag only - ic += slashIndex - name = imageName[:ic] - tag = strings.TrimPrefix(imageName[ic:], ":") - return name, tag, "" -} diff --git a/src/vendor/sigs.k8s.io/kustomize/api/internal/accumulator/loadconfigfromcrds.go b/src/vendor/sigs.k8s.io/kustomize/api/internal/accumulator/loadconfigfromcrds.go deleted file mode 100644 index cac47c0de..000000000 --- a/src/vendor/sigs.k8s.io/kustomize/api/internal/accumulator/loadconfigfromcrds.go +++ /dev/null @@ -1,198 +0,0 @@ -// Copyright 2019 The Kubernetes Authors. -// SPDX-License-Identifier: Apache-2.0 - -package accumulator - -import ( - "encoding/json" - "strings" - - "github.com/pkg/errors" - "k8s.io/kube-openapi/pkg/validation/spec" - "sigs.k8s.io/kustomize/api/ifc" - "sigs.k8s.io/kustomize/api/internal/plugins/builtinconfig" - "sigs.k8s.io/kustomize/api/types" - "sigs.k8s.io/kustomize/kyaml/filesys" - "sigs.k8s.io/kustomize/kyaml/resid" - "sigs.k8s.io/yaml" -) - -// OpenAPIDefinition describes single type. -// Normally these definitions are auto-generated using gen-openapi. -// Same as in k8s.io / kube-openapi / pkg / common. -type OpenAPIDefinition struct { - Schema spec.Schema - Dependencies []string -} - -type myProperties = map[string]spec.Schema -type nameToApiMap map[string]OpenAPIDefinition - -// LoadConfigFromCRDs parse CRD schemas from paths into a TransformerConfig -func LoadConfigFromCRDs( - ldr ifc.Loader, paths []string) (*builtinconfig.TransformerConfig, error) { - tc := builtinconfig.MakeEmptyConfig() - for _, path := range paths { - content, err := ldr.Load(path) - if err != nil { - return nil, err - } - m, err := makeNameToApiMap(content) - if err != nil { - return nil, errors.Wrapf(err, "unable to parse open API definition from '%s'", path) - } - otherTc, err := makeConfigFromApiMap(m) - if err != nil { - return nil, err - } - tc, err = tc.Merge(otherTc) - if err != nil { - return nil, err - } - } - return tc, nil -} - -func makeNameToApiMap(content []byte) (result nameToApiMap, err error) { - if content[0] == '{' { - err = json.Unmarshal(content, &result) - } else { - err = yaml.Unmarshal(content, &result) - } - return -} - -func makeConfigFromApiMap(m nameToApiMap) (*builtinconfig.TransformerConfig, error) { - result := builtinconfig.MakeEmptyConfig() - for name, api := range m { - if !looksLikeAk8sType(api.Schema.SchemaProps.Properties) { - continue - } - tc := builtinconfig.MakeEmptyConfig() - err := loadCrdIntoConfig( - tc, makeGvkFromTypeName(name), m, name, []string{}) - if err != nil { - return result, err - } - result, err = result.Merge(tc) - if err != nil { - return result, err - } - } - return result, nil -} - -// TODO: Get Group and Version for CRD from the -// openAPI definition once -// "x-kubernetes-group-version-kind" is available in CRD -func makeGvkFromTypeName(n string) resid.Gvk { - names := strings.Split(n, filesys.SelfDir) - kind := names[len(names)-1] - return resid.Gvk{Kind: kind} -} - -func looksLikeAk8sType(properties myProperties) bool { - _, ok := properties["kind"] - if !ok { - return false - } - _, ok = properties["apiVersion"] - if !ok { - return false - } - _, ok = properties["metadata"] - return ok -} - -const ( - // "x-kubernetes-annotation": "" - xAnnotation = "x-kubernetes-annotation" - - // "x-kubernetes-label-selector": "" - xLabelSelector = "x-kubernetes-label-selector" - - // "x-kubernetes-identity": "" - xIdentity = "x-kubernetes-identity" - - // "x-kubernetes-object-ref-api-version": - xVersion = "x-kubernetes-object-ref-api-version" - - // "x-kubernetes-object-ref-kind": - xKind = "x-kubernetes-object-ref-kind" - - // "x-kubernetes-object-ref-name-key": "name" - // default is "name" - xNameKey = "x-kubernetes-object-ref-name-key" -) - -// loadCrdIntoConfig loads a CRD spec into a TransformerConfig -func loadCrdIntoConfig( - theConfig *builtinconfig.TransformerConfig, theGvk resid.Gvk, theMap nameToApiMap, - typeName string, path []string) (err error) { - api, ok := theMap[typeName] - if !ok { - return nil - } - for propName, property := range api.Schema.SchemaProps.Properties { - _, annotate := property.Extensions.GetString(xAnnotation) - if annotate { - err = theConfig.AddAnnotationFieldSpec( - makeFs(theGvk, append(path, propName))) - if err != nil { - return - } - } - _, label := property.Extensions.GetString(xLabelSelector) - if label { - err = theConfig.AddLabelFieldSpec( - makeFs(theGvk, append(path, propName))) - if err != nil { - return - } - } - _, identity := property.Extensions.GetString(xIdentity) - if identity { - err = theConfig.AddPrefixFieldSpec( - makeFs(theGvk, append(path, propName))) - if err != nil { - return - } - } - version, ok := property.Extensions.GetString(xVersion) - if ok { - kind, ok := property.Extensions.GetString(xKind) - if ok { - nameKey, ok := property.Extensions.GetString(xNameKey) - if !ok { - nameKey = "name" - } - err = theConfig.AddNamereferenceFieldSpec( - builtinconfig.NameBackReferences{ - Gvk: resid.Gvk{Kind: kind, Version: version}, - Referrers: []types.FieldSpec{ - makeFs(theGvk, append(path, propName, nameKey))}, - }) - if err != nil { - return - } - } - } - if property.Ref.GetURL() != nil { - err = loadCrdIntoConfig( - theConfig, theGvk, theMap, - property.Ref.String(), append(path, propName)) - if err != nil { - return - } - } - } - return nil -} - -func makeFs(in resid.Gvk, path []string) types.FieldSpec { - return types.FieldSpec{ - CreateIfNotPresent: false, - Gvk: in, - Path: strings.Join(path, "/"), - } -} diff --git a/src/vendor/sigs.k8s.io/kustomize/api/internal/accumulator/namereferencetransformer.go b/src/vendor/sigs.k8s.io/kustomize/api/internal/accumulator/namereferencetransformer.go deleted file mode 100644 index f8f12b4f2..000000000 --- a/src/vendor/sigs.k8s.io/kustomize/api/internal/accumulator/namereferencetransformer.go +++ /dev/null @@ -1,164 +0,0 @@ -// Copyright 2019 The Kubernetes Authors. -// SPDX-License-Identifier: Apache-2.0 - -package accumulator - -import ( - "fmt" - "log" - - "sigs.k8s.io/kustomize/api/filters/nameref" - "sigs.k8s.io/kustomize/api/internal/plugins/builtinconfig" - "sigs.k8s.io/kustomize/api/resmap" - "sigs.k8s.io/kustomize/api/resource" - "sigs.k8s.io/kustomize/kyaml/resid" -) - -type nameReferenceTransformer struct { - backRefs []builtinconfig.NameBackReferences -} - -const doDebug = false - -var _ resmap.Transformer = &nameReferenceTransformer{} - -type filterMap map[*resource.Resource][]nameref.Filter - -// newNameReferenceTransformer constructs a nameReferenceTransformer -// with a given slice of NameBackReferences. -func newNameReferenceTransformer( - br []builtinconfig.NameBackReferences) resmap.Transformer { - if br == nil { - log.Fatal("backrefs not expected to be nil") - } - return &nameReferenceTransformer{backRefs: br} -} - -// Transform updates name references in resource A that -// refer to resource B, given that B's name may have -// changed. -// -// For example, a HorizontalPodAutoscaler (HPA) -// necessarily refers to a Deployment, the thing that -// an HPA scales. In this case: -// -// - the HPA instance is the Referrer, -// - the Deployment instance is the ReferralTarget. -// -// If the Deployment's name changes, e.g. a prefix is added, -// then the HPA's reference to the Deployment must be fixed. -// -func (t *nameReferenceTransformer) Transform(m resmap.ResMap) error { - fMap := t.determineFilters(m.Resources()) - debug(fMap) - for r, fList := range fMap { - c, err := m.SubsetThatCouldBeReferencedByResource(r) - if err != nil { - return err - } - for _, f := range fList { - f.Referrer = r - f.ReferralCandidates = c - if err := f.Referrer.ApplyFilter(f); err != nil { - return err - } - } - } - return nil -} - -func debug(fMap filterMap) { - if !doDebug { - return - } - fmt.Printf("filterMap has %d entries:\n", len(fMap)) - rCount := 0 - for r, fList := range fMap { - yml, _ := r.AsYAML() - rCount++ - fmt.Printf(` ----- %3d. possible referrer ------------- -%s ----------`, rCount, string(yml), - ) - for i, f := range fList { - fmt.Printf(` -%3d/%3d update: %s - from: %s -`, rCount, i+1, f.NameFieldToUpdate.Path, f.ReferralTarget, - ) - } - } -} - -// Produce a map from referrer resources that might need to be fixed -// to filters that might fix them. The keys to this map are potential -// referrers, so won't include resources like ConfigMap or Secret. -// -// In the inner loop over the resources below, say we -// encounter an HPA instance. Then, in scanning the set -// of all known backrefs, we encounter an entry like -// -// - kind: Deployment -// fieldSpecs: -// - kind: HorizontalPodAutoscaler -// path: spec/scaleTargetRef/name -// -// This entry says that an HPA, via its -// 'spec/scaleTargetRef/name' field, may refer to a -// Deployment. -// -// This means that a filter will need to hunt for the right Deployment, -// obtain it's new name, and write that name into the HPA's -// 'spec/scaleTargetRef/name' field. Return a filter that can do that. -func (t *nameReferenceTransformer) determineFilters( - resources []*resource.Resource) (fMap filterMap) { - // We cache the resource OrgId values because they don't change and otherwise are very visible in a memory pprof - resourceOrgIds := make([]resid.ResId, len(resources)) - for i, resource := range resources { - resourceOrgIds[i] = resource.OrgId() - } - - fMap = make(filterMap) - for _, backReference := range t.backRefs { - for _, referrerSpec := range backReference.Referrers { - for i, res := range resources { - if resourceOrgIds[i].IsSelected(&referrerSpec.Gvk) { - // If this is true, the res might be a referrer, and if - // so, the name reference it holds might need an update. - if resHasField(res, referrerSpec.Path) { - // Optimization - the referrer has the field - // that might need updating. - fMap[res] = append(fMap[res], nameref.Filter{ - // Name field to write in the Referrer. - // If the path specified here isn't found in - // the Referrer, nothing happens (no error, - // no field creation). - NameFieldToUpdate: referrerSpec, - // Specification of object class to read from. - // Always read from metadata/name field. - ReferralTarget: backReference.Gvk, - }) - } - } - } - } - } - return fMap -} - -// TODO: check res for field existence here to avoid extra work. -// res.GetFieldValue, which uses yaml.Lookup under the hood, doesn't know -// how to parse fieldspec-style paths that make no distinction -// between maps and sequences. This means it cannot lookup commonly -// used "indeterminate" paths like -// spec/containers/env/valueFrom/configMapKeyRef/name -// ('containers' is a list, not a map). -// However, the fieldspec filter does know how to handle this; -// extract that code and call it here? -func resHasField(res *resource.Resource, path string) bool { - return true - // fld := strings.Join(utils.PathSplitter(path), ".") - // _, e := res.GetFieldValue(fld) - // return e == nil -} diff --git a/src/vendor/sigs.k8s.io/kustomize/api/internal/accumulator/refvartransformer.go b/src/vendor/sigs.k8s.io/kustomize/api/internal/accumulator/refvartransformer.go deleted file mode 100644 index a02edc4fb..000000000 --- a/src/vendor/sigs.k8s.io/kustomize/api/internal/accumulator/refvartransformer.go +++ /dev/null @@ -1,57 +0,0 @@ -// Copyright 2019 The Kubernetes Authors. -// SPDX-License-Identifier: Apache-2.0 - -package accumulator - -import ( - "sigs.k8s.io/kustomize/api/filters/refvar" - "sigs.k8s.io/kustomize/api/resmap" - "sigs.k8s.io/kustomize/api/types" -) - -type refVarTransformer struct { - varMap map[string]interface{} - replacementCounts map[string]int - fieldSpecs []types.FieldSpec -} - -// newRefVarTransformer returns a new refVarTransformer -// that replaces $(VAR) style variables with values. -// The fieldSpecs are the places to look for occurrences of $(VAR). -func newRefVarTransformer( - varMap map[string]interface{}, fs []types.FieldSpec) *refVarTransformer { - return &refVarTransformer{ - varMap: varMap, - fieldSpecs: fs, - } -} - -// UnusedVars returns slice of Var names that were unused -// after a Transform run. -func (rv *refVarTransformer) UnusedVars() []string { - var unused []string - for k := range rv.varMap { - if _, ok := rv.replacementCounts[k]; !ok { - unused = append(unused, k) - } - } - return unused -} - -// Transform replaces $(VAR) style variables with values. -func (rv *refVarTransformer) Transform(m resmap.ResMap) error { - rv.replacementCounts = make(map[string]int) - mf := refvar.MakePrimitiveReplacer(rv.replacementCounts, rv.varMap) - for _, res := range m.Resources() { - for _, fieldSpec := range rv.fieldSpecs { - err := res.ApplyFilter(refvar.Filter{ - MappingFunc: mf, - FieldSpec: fieldSpec, - }) - if err != nil { - return err - } - } - } - return nil -} diff --git a/src/vendor/sigs.k8s.io/kustomize/api/internal/accumulator/resaccumulator.go b/src/vendor/sigs.k8s.io/kustomize/api/internal/accumulator/resaccumulator.go deleted file mode 100644 index 2c1fc71b5..000000000 --- a/src/vendor/sigs.k8s.io/kustomize/api/internal/accumulator/resaccumulator.go +++ /dev/null @@ -1,189 +0,0 @@ -// Copyright 2019 The Kubernetes Authors. -// SPDX-License-Identifier: Apache-2.0 - -package accumulator - -import ( - "fmt" - "log" - "strings" - - "sigs.k8s.io/kustomize/api/internal/plugins/builtinconfig" - "sigs.k8s.io/kustomize/api/resmap" - "sigs.k8s.io/kustomize/api/types" - "sigs.k8s.io/kustomize/kyaml/resid" -) - -// ResAccumulator accumulates resources and the rules -// used to customize those resources. It's a ResMap -// plus stuff needed to modify the ResMap. -type ResAccumulator struct { - resMap resmap.ResMap - tConfig *builtinconfig.TransformerConfig - varSet types.VarSet -} - -func MakeEmptyAccumulator() *ResAccumulator { - ra := &ResAccumulator{} - ra.resMap = resmap.New() - ra.tConfig = &builtinconfig.TransformerConfig{} - ra.varSet = types.NewVarSet() - return ra -} - -// ResMap returns a copy of the internal resMap. -func (ra *ResAccumulator) ResMap() resmap.ResMap { - return ra.resMap.ShallowCopy() -} - -// Vars returns a copy of underlying vars. -func (ra *ResAccumulator) Vars() []types.Var { - return ra.varSet.AsSlice() -} - -func (ra *ResAccumulator) AppendAll(resources resmap.ResMap) error { - return ra.resMap.AppendAll(resources) -} - -func (ra *ResAccumulator) AbsorbAll(resources resmap.ResMap) error { - return ra.resMap.AbsorbAll(resources) -} - -func (ra *ResAccumulator) MergeConfig( - tConfig *builtinconfig.TransformerConfig) (err error) { - ra.tConfig, err = ra.tConfig.Merge(tConfig) - return err -} - -func (ra *ResAccumulator) GetTransformerConfig() *builtinconfig.TransformerConfig { - return ra.tConfig -} - -// MergeVars accumulates vars into ResAccumulator. -// A Var is a tuple of name, object reference and field reference. -// This func takes a list of vars from the current kustomization file and -// annotates the accumulated resources with the names of the vars that match -// those resources. E.g. if there's a var named "sam" that wants to get -// its data from a ConfigMap named "james", and the resource list contains a -// ConfigMap named "james", then that ConfigMap will be annotated with the -// var name "sam". Later this annotation is used to find the data for "sam" -// by digging into a particular fieldpath of "james". -func (ra *ResAccumulator) MergeVars(incoming []types.Var) error { - for _, v := range incoming { - targetId := resid.NewResIdWithNamespace(v.ObjRef.GVK(), v.ObjRef.Name, v.ObjRef.Namespace) - idMatcher := targetId.GvknEquals - if targetId.Namespace != "" || targetId.IsClusterScoped() { - // Preserve backward compatibility. An empty namespace means - // wildcard search on the namespace hence we still use GvknEquals - idMatcher = targetId.Equals - } - matched := ra.resMap.GetMatchingResourcesByAnyId(idMatcher) - if len(matched) > 1 { - return fmt.Errorf( - "found %d resId matches for var %s "+ - "(unable to disambiguate)", - len(matched), v) - } - if len(matched) == 1 { - matched[0].AppendRefVarName(v) - } - } - return ra.varSet.MergeSlice(incoming) -} - -func (ra *ResAccumulator) MergeAccumulator(other *ResAccumulator) (err error) { - err = ra.AppendAll(other.resMap) - if err != nil { - return err - } - err = ra.MergeConfig(other.tConfig) - if err != nil { - return err - } - return ra.varSet.MergeSet(other.varSet) -} - -func (ra *ResAccumulator) findVarValueFromResources(v types.Var) (interface{}, error) { - for _, res := range ra.resMap.Resources() { - for _, varName := range res.GetRefVarNames() { - if varName == v.Name { - s, err := res.GetFieldValue(v.FieldRef.FieldPath) - if err != nil { - return "", fmt.Errorf( - "field specified in var '%v' "+ - "not found in corresponding resource", v) - } - return s, nil - } - } - } - return "", fmt.Errorf( - "var '%v' cannot be mapped to a field "+ - "in the set of known resources", v) -} - -// makeVarReplacementMap returns a map of Var names to -// their final values. The values are strings intended -// for substitution wherever the $(var.Name) occurs. -func (ra *ResAccumulator) makeVarReplacementMap() (map[string]interface{}, error) { - result := map[string]interface{}{} - for _, v := range ra.Vars() { - s, err := ra.findVarValueFromResources(v) - if err != nil { - return nil, err - } - result[v.Name] = s - } - return result, nil -} - -func (ra *ResAccumulator) Transform(t resmap.Transformer) error { - return t.Transform(ra.resMap) -} - -func (ra *ResAccumulator) ResolveVars() error { - replacementMap, err := ra.makeVarReplacementMap() - if err != nil { - return err - } - if len(replacementMap) == 0 { - return nil - } - t := newRefVarTransformer( - replacementMap, ra.tConfig.VarReference) - err = ra.Transform(t) - if len(t.UnusedVars()) > 0 { - log.Printf( - "well-defined vars that were never replaced: %s\n", - strings.Join(t.UnusedVars(), ",")) - } - return err -} - -func (ra *ResAccumulator) FixBackReferences() (err error) { - if ra.tConfig.NameReference == nil { - return nil - } - return ra.Transform( - newNameReferenceTransformer(ra.tConfig.NameReference)) -} - -// Intersection drops the resources which "other" does not have. -func (ra *ResAccumulator) Intersection(other resmap.ResMap) error { - for _, curId := range ra.resMap.AllIds() { - toDelete := true - for _, otherId := range other.AllIds() { - if otherId == curId { - toDelete = false - break - } - } - if toDelete { - err := ra.resMap.Remove(curId) - if err != nil { - return err - } - } - } - return nil -} diff --git a/src/vendor/sigs.k8s.io/kustomize/api/internal/builtins/AnnotationsTransformer.go b/src/vendor/sigs.k8s.io/kustomize/api/internal/builtins/AnnotationsTransformer.go deleted file mode 100644 index 7064fa80c..000000000 --- a/src/vendor/sigs.k8s.io/kustomize/api/internal/builtins/AnnotationsTransformer.go +++ /dev/null @@ -1,38 +0,0 @@ -// Code generated by pluginator on AnnotationsTransformer; DO NOT EDIT. -// pluginator {unknown 1970-01-01T00:00:00Z } - -package builtins - -import ( - "sigs.k8s.io/kustomize/api/filters/annotations" - "sigs.k8s.io/kustomize/api/resmap" - "sigs.k8s.io/kustomize/api/types" - "sigs.k8s.io/yaml" -) - -// Add the given annotations to the given field specifications. -type AnnotationsTransformerPlugin struct { - Annotations map[string]string `json:"annotations,omitempty" yaml:"annotations,omitempty"` - FieldSpecs []types.FieldSpec `json:"fieldSpecs,omitempty" yaml:"fieldSpecs,omitempty"` -} - -func (p *AnnotationsTransformerPlugin) Config( - _ *resmap.PluginHelpers, c []byte) (err error) { - p.Annotations = nil - p.FieldSpecs = nil - return yaml.Unmarshal(c, p) -} - -func (p *AnnotationsTransformerPlugin) Transform(m resmap.ResMap) error { - if len(p.Annotations) == 0 { - return nil - } - return m.ApplyFilter(annotations.Filter{ - Annotations: p.Annotations, - FsSlice: p.FieldSpecs, - }) -} - -func NewAnnotationsTransformerPlugin() resmap.TransformerPlugin { - return &AnnotationsTransformerPlugin{} -} diff --git a/src/vendor/sigs.k8s.io/kustomize/api/internal/builtins/ConfigMapGenerator.go b/src/vendor/sigs.k8s.io/kustomize/api/internal/builtins/ConfigMapGenerator.go deleted file mode 100644 index d853a1cfd..000000000 --- a/src/vendor/sigs.k8s.io/kustomize/api/internal/builtins/ConfigMapGenerator.go +++ /dev/null @@ -1,39 +0,0 @@ -// Code generated by pluginator on ConfigMapGenerator; DO NOT EDIT. -// pluginator {unknown 1970-01-01T00:00:00Z } - -package builtins - -import ( - "sigs.k8s.io/kustomize/api/kv" - "sigs.k8s.io/kustomize/api/resmap" - "sigs.k8s.io/kustomize/api/types" - "sigs.k8s.io/yaml" -) - -type ConfigMapGeneratorPlugin struct { - h *resmap.PluginHelpers - types.ObjectMeta `json:"metadata,omitempty" yaml:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - types.ConfigMapArgs -} - -func (p *ConfigMapGeneratorPlugin) Config(h *resmap.PluginHelpers, config []byte) (err error) { - p.ConfigMapArgs = types.ConfigMapArgs{} - err = yaml.Unmarshal(config, p) - if p.ConfigMapArgs.Name == "" { - p.ConfigMapArgs.Name = p.Name - } - if p.ConfigMapArgs.Namespace == "" { - p.ConfigMapArgs.Namespace = p.Namespace - } - p.h = h - return -} - -func (p *ConfigMapGeneratorPlugin) Generate() (resmap.ResMap, error) { - return p.h.ResmapFactory().FromConfigMapArgs( - kv.NewLoader(p.h.Loader(), p.h.Validator()), p.ConfigMapArgs) -} - -func NewConfigMapGeneratorPlugin() resmap.GeneratorPlugin { - return &ConfigMapGeneratorPlugin{} -} diff --git a/src/vendor/sigs.k8s.io/kustomize/api/internal/builtins/HashTransformer.go b/src/vendor/sigs.k8s.io/kustomize/api/internal/builtins/HashTransformer.go deleted file mode 100644 index 54586beeb..000000000 --- a/src/vendor/sigs.k8s.io/kustomize/api/internal/builtins/HashTransformer.go +++ /dev/null @@ -1,40 +0,0 @@ -// Code generated by pluginator on HashTransformer; DO NOT EDIT. -// pluginator {unknown 1970-01-01T00:00:00Z } - -package builtins - -import ( - "fmt" - - "sigs.k8s.io/kustomize/api/ifc" - "sigs.k8s.io/kustomize/api/resmap" -) - -type HashTransformerPlugin struct { - hasher ifc.KustHasher -} - -func (p *HashTransformerPlugin) Config( - h *resmap.PluginHelpers, _ []byte) (err error) { - p.hasher = h.ResmapFactory().RF().Hasher() - return nil -} - -// Transform appends hash to generated resources. -func (p *HashTransformerPlugin) Transform(m resmap.ResMap) error { - for _, res := range m.Resources() { - if res.NeedHashSuffix() { - h, err := res.Hash(p.hasher) - if err != nil { - return err - } - res.StorePreviousId() - res.SetName(fmt.Sprintf("%s-%s", res.GetName(), h)) - } - } - return nil -} - -func NewHashTransformerPlugin() resmap.TransformerPlugin { - return &HashTransformerPlugin{} -} diff --git a/src/vendor/sigs.k8s.io/kustomize/api/internal/builtins/HelmChartInflationGenerator.go b/src/vendor/sigs.k8s.io/kustomize/api/internal/builtins/HelmChartInflationGenerator.go deleted file mode 100644 index 2a654ad1f..000000000 --- a/src/vendor/sigs.k8s.io/kustomize/api/internal/builtins/HelmChartInflationGenerator.go +++ /dev/null @@ -1,339 +0,0 @@ -// Code generated by pluginator on HelmChartInflationGenerator; DO NOT EDIT. -// pluginator {unknown 1970-01-01T00:00:00Z } - -package builtins - -import ( - "bytes" - "fmt" - "io/ioutil" - "os" - "os/exec" - "path/filepath" - "regexp" - "strings" - - "github.com/imdario/mergo" - "github.com/pkg/errors" - "sigs.k8s.io/kustomize/api/resmap" - "sigs.k8s.io/kustomize/api/types" - "sigs.k8s.io/yaml" -) - -// HelmChartInflationGeneratorPlugin is a plugin to generate resources -// from a remote or local helm chart. -type HelmChartInflationGeneratorPlugin struct { - h *resmap.PluginHelpers - types.HelmGlobals - types.HelmChart - tmpDir string -} - -var KustomizePlugin HelmChartInflationGeneratorPlugin - -const ( - valuesMergeOptionMerge = "merge" - valuesMergeOptionOverride = "override" - valuesMergeOptionReplace = "replace" -) - -var legalMergeOptions = []string{ - valuesMergeOptionMerge, - valuesMergeOptionOverride, - valuesMergeOptionReplace, -} - -// Config uses the input plugin configurations `config` to setup the generator -// options -func (p *HelmChartInflationGeneratorPlugin) Config( - h *resmap.PluginHelpers, config []byte) (err error) { - if h.GeneralConfig() == nil { - return fmt.Errorf("unable to access general config") - } - if !h.GeneralConfig().HelmConfig.Enabled { - return fmt.Errorf("must specify --enable-helm") - } - if h.GeneralConfig().HelmConfig.Command == "" { - return fmt.Errorf("must specify --helm-command") - } - p.h = h - if err = yaml.Unmarshal(config, p); err != nil { - return - } - return p.validateArgs() -} - -// This uses the real file system since tmpDir may be used -// by the helm subprocess. Cannot use a chroot jail or fake -// filesystem since we allow the user to use previously -// downloaded charts. This is safe since this plugin is -// owned by kustomize. -func (p *HelmChartInflationGeneratorPlugin) establishTmpDir() (err error) { - if p.tmpDir != "" { - // already done. - return nil - } - p.tmpDir, err = ioutil.TempDir("", "kustomize-helm-") - return err -} - -func (p *HelmChartInflationGeneratorPlugin) validateArgs() (err error) { - if p.Name == "" { - return fmt.Errorf("chart name cannot be empty") - } - - // ChartHome might be consulted by the plugin (to read - // values files below it), so it must be located under - // the loader root (unless root restrictions are - // disabled, in which case this can be an absolute path). - if p.ChartHome == "" { - p.ChartHome = "charts" - } - - // The ValuesFile may be consulted by the plugin, so it must - // be under the loader root (unless root restrictions are - // disabled). - if p.ValuesFile == "" { - p.ValuesFile = filepath.Join(p.ChartHome, p.Name, "values.yaml") - } - - if err = p.errIfIllegalValuesMerge(); err != nil { - return err - } - - // ConfigHome is not loaded by the plugin, and can be located anywhere. - if p.ConfigHome == "" { - if err = p.establishTmpDir(); err != nil { - return errors.Wrap( - err, "unable to create tmp dir for HELM_CONFIG_HOME") - } - p.ConfigHome = filepath.Join(p.tmpDir, "helm") - } - return nil -} - -func (p *HelmChartInflationGeneratorPlugin) errIfIllegalValuesMerge() error { - if p.ValuesMerge == "" { - // Use the default. - p.ValuesMerge = valuesMergeOptionOverride - return nil - } - for _, opt := range legalMergeOptions { - if p.ValuesMerge == opt { - return nil - } - } - return fmt.Errorf("valuesMerge must be one of %v", legalMergeOptions) -} - -func (p *HelmChartInflationGeneratorPlugin) absChartHome() string { - if filepath.IsAbs(p.ChartHome) { - return p.ChartHome - } - return filepath.Join(p.h.Loader().Root(), p.ChartHome) -} - -func (p *HelmChartInflationGeneratorPlugin) runHelmCommand( - args []string) ([]byte, error) { - stdout := new(bytes.Buffer) - stderr := new(bytes.Buffer) - cmd := exec.Command(p.h.GeneralConfig().HelmConfig.Command, args...) - cmd.Stdout = stdout - cmd.Stderr = stderr - env := []string{ - fmt.Sprintf("HELM_CONFIG_HOME=%s", p.ConfigHome), - fmt.Sprintf("HELM_CACHE_HOME=%s/.cache", p.ConfigHome), - fmt.Sprintf("HELM_DATA_HOME=%s/.data", p.ConfigHome)} - cmd.Env = append(os.Environ(), env...) - err := cmd.Run() - if err != nil { - helm := p.h.GeneralConfig().HelmConfig.Command - err = errors.Wrap( - fmt.Errorf( - "unable to run: '%s %s' with env=%s (is '%s' installed?)", - helm, strings.Join(args, " "), env, helm), - stderr.String(), - ) - } - return stdout.Bytes(), err -} - -// createNewMergedValuesFile replaces/merges original values file with ValuesInline. -func (p *HelmChartInflationGeneratorPlugin) createNewMergedValuesFile() ( - path string, err error) { - if p.ValuesMerge == valuesMergeOptionMerge || - p.ValuesMerge == valuesMergeOptionOverride { - if err = p.replaceValuesInline(); err != nil { - return "", err - } - } - var b []byte - b, err = yaml.Marshal(p.ValuesInline) - if err != nil { - return "", err - } - return p.writeValuesBytes(b) -} - -func (p *HelmChartInflationGeneratorPlugin) replaceValuesInline() error { - pValues, err := p.h.Loader().Load(p.ValuesFile) - if err != nil { - return err - } - chValues := make(map[string]interface{}) - if err = yaml.Unmarshal(pValues, &chValues); err != nil { - return err - } - switch p.ValuesMerge { - case valuesMergeOptionOverride: - err = mergo.Merge( - &chValues, p.ValuesInline, mergo.WithOverride) - case valuesMergeOptionMerge: - err = mergo.Merge(&chValues, p.ValuesInline) - } - p.ValuesInline = chValues - return err -} - -// copyValuesFile to avoid branching. TODO: get rid of this. -func (p *HelmChartInflationGeneratorPlugin) copyValuesFile() (string, error) { - b, err := p.h.Loader().Load(p.ValuesFile) - if err != nil { - return "", err - } - return p.writeValuesBytes(b) -} - -// Write a absolute path file in the tmp file system. -func (p *HelmChartInflationGeneratorPlugin) writeValuesBytes( - b []byte) (string, error) { - if err := p.establishTmpDir(); err != nil { - return "", fmt.Errorf("cannot create tmp dir to write helm values") - } - path := filepath.Join(p.tmpDir, p.Name+"-kustomize-values.yaml") - return path, ioutil.WriteFile(path, b, 0644) -} - -func (p *HelmChartInflationGeneratorPlugin) cleanup() { - if p.tmpDir != "" { - os.RemoveAll(p.tmpDir) - } -} - -// Generate implements generator -func (p *HelmChartInflationGeneratorPlugin) Generate() (rm resmap.ResMap, err error) { - defer p.cleanup() - if err = p.checkHelmVersion(); err != nil { - return nil, err - } - if path, exists := p.chartExistsLocally(); !exists { - if p.Repo == "" { - return nil, fmt.Errorf( - "no repo specified for pull, no chart found at '%s'", path) - } - if _, err := p.runHelmCommand(p.pullCommand()); err != nil { - return nil, err - } - } - if len(p.ValuesInline) > 0 { - p.ValuesFile, err = p.createNewMergedValuesFile() - } else { - p.ValuesFile, err = p.copyValuesFile() - } - if err != nil { - return nil, err - } - var stdout []byte - stdout, err = p.runHelmCommand(p.templateCommand()) - if err != nil { - return nil, err - } - - rm, err = p.h.ResmapFactory().NewResMapFromBytes(stdout) - if err == nil { - return rm, nil - } - // try to remove the contents before first "---" because - // helm may produce messages to stdout before it - stdoutStr := string(stdout) - if idx := strings.Index(stdoutStr, "---"); idx != -1 { - return p.h.ResmapFactory().NewResMapFromBytes([]byte(stdoutStr[idx:])) - } - return nil, err -} - -func (p *HelmChartInflationGeneratorPlugin) templateCommand() []string { - args := []string{"template"} - if p.ReleaseName != "" { - args = append(args, p.ReleaseName) - } - if p.Namespace != "" { - args = append(args, "--namespace", p.Namespace) - } - args = append(args, filepath.Join(p.absChartHome(), p.Name)) - if p.ValuesFile != "" { - args = append(args, "--values", p.ValuesFile) - } - if p.ReleaseName == "" { - // AFAICT, this doesn't work as intended due to a bug in helm. - // See https://github.com/helm/helm/issues/6019 - // I've tried placing the flag before and after the name argument. - args = append(args, "--generate-name") - } - if p.IncludeCRDs { - args = append(args, "--include-crds") - } - return args -} - -func (p *HelmChartInflationGeneratorPlugin) pullCommand() []string { - args := []string{ - "pull", - "--untar", - "--untardir", p.absChartHome(), - "--repo", p.Repo, - p.Name} - if p.Version != "" { - args = append(args, "--version", p.Version) - } - return args -} - -// chartExistsLocally will return true if the chart does exist in -// local chart home. -func (p *HelmChartInflationGeneratorPlugin) chartExistsLocally() (string, bool) { - path := filepath.Join(p.absChartHome(), p.Name) - s, err := os.Stat(path) - if err != nil { - return "", false - } - return path, s.IsDir() -} - -// checkHelmVersion will return an error if the helm version is not V3 -func (p *HelmChartInflationGeneratorPlugin) checkHelmVersion() error { - stdout, err := p.runHelmCommand([]string{"version", "-c", "--short"}) - if err != nil { - return err - } - r, err := regexp.Compile(`v?\d+(\.\d+)+`) - if err != nil { - return err - } - v := r.FindString(string(stdout)) - if v == "" { - return fmt.Errorf("cannot find version string in %s", string(stdout)) - } - if v[0] == 'v' { - v = v[1:] - } - majorVersion := strings.Split(v, ".")[0] - if majorVersion != "3" { - return fmt.Errorf("this plugin requires helm V3 but got v%s", v) - } - return nil -} - -func NewHelmChartInflationGeneratorPlugin() resmap.GeneratorPlugin { - return &HelmChartInflationGeneratorPlugin{} -} diff --git a/src/vendor/sigs.k8s.io/kustomize/api/internal/builtins/IAMPolicyGenerator.go b/src/vendor/sigs.k8s.io/kustomize/api/internal/builtins/IAMPolicyGenerator.go deleted file mode 100644 index 82a2dd603..000000000 --- a/src/vendor/sigs.k8s.io/kustomize/api/internal/builtins/IAMPolicyGenerator.go +++ /dev/null @@ -1,33 +0,0 @@ -// Code generated by pluginator on IAMPolicyGenerator; DO NOT EDIT. -// pluginator {unknown 1970-01-01T00:00:00Z } - -package builtins - -import ( - "sigs.k8s.io/kustomize/api/filters/iampolicygenerator" - "sigs.k8s.io/kustomize/api/resmap" - "sigs.k8s.io/kustomize/api/types" - "sigs.k8s.io/yaml" -) - -type IAMPolicyGeneratorPlugin struct { - types.IAMPolicyGeneratorArgs -} - -func (p *IAMPolicyGeneratorPlugin) Config(h *resmap.PluginHelpers, config []byte) (err error) { - p.IAMPolicyGeneratorArgs = types.IAMPolicyGeneratorArgs{} - err = yaml.Unmarshal(config, p) - return -} - -func (p *IAMPolicyGeneratorPlugin) Generate() (resmap.ResMap, error) { - r := resmap.New() - err := r.ApplyFilter(iampolicygenerator.Filter{ - IAMPolicyGenerator: p.IAMPolicyGeneratorArgs, - }) - return r, err -} - -func NewIAMPolicyGeneratorPlugin() resmap.GeneratorPlugin { - return &IAMPolicyGeneratorPlugin{} -} diff --git a/src/vendor/sigs.k8s.io/kustomize/api/internal/builtins/ImageTagTransformer.go b/src/vendor/sigs.k8s.io/kustomize/api/internal/builtins/ImageTagTransformer.go deleted file mode 100644 index 52a44d385..000000000 --- a/src/vendor/sigs.k8s.io/kustomize/api/internal/builtins/ImageTagTransformer.go +++ /dev/null @@ -1,41 +0,0 @@ -// Code generated by pluginator on ImageTagTransformer; DO NOT EDIT. -// pluginator {unknown 1970-01-01T00:00:00Z } - -package builtins - -import ( - "sigs.k8s.io/kustomize/api/filters/imagetag" - "sigs.k8s.io/kustomize/api/resmap" - "sigs.k8s.io/kustomize/api/types" - "sigs.k8s.io/yaml" -) - -// Find matching image declarations and replace -// the name, tag and/or digest. -type ImageTagTransformerPlugin struct { - ImageTag types.Image `json:"imageTag,omitempty" yaml:"imageTag,omitempty"` - FieldSpecs []types.FieldSpec `json:"fieldSpecs,omitempty" yaml:"fieldSpecs,omitempty"` -} - -func (p *ImageTagTransformerPlugin) Config( - _ *resmap.PluginHelpers, c []byte) (err error) { - p.ImageTag = types.Image{} - p.FieldSpecs = nil - return yaml.Unmarshal(c, p) -} - -func (p *ImageTagTransformerPlugin) Transform(m resmap.ResMap) error { - if err := m.ApplyFilter(imagetag.LegacyFilter{ - ImageTag: p.ImageTag, - }); err != nil { - return err - } - return m.ApplyFilter(imagetag.Filter{ - ImageTag: p.ImageTag, - FsSlice: p.FieldSpecs, - }) -} - -func NewImageTagTransformerPlugin() resmap.TransformerPlugin { - return &ImageTagTransformerPlugin{} -} diff --git a/src/vendor/sigs.k8s.io/kustomize/api/internal/builtins/LabelTransformer.go b/src/vendor/sigs.k8s.io/kustomize/api/internal/builtins/LabelTransformer.go deleted file mode 100644 index af601da5b..000000000 --- a/src/vendor/sigs.k8s.io/kustomize/api/internal/builtins/LabelTransformer.go +++ /dev/null @@ -1,38 +0,0 @@ -// Code generated by pluginator on LabelTransformer; DO NOT EDIT. -// pluginator {unknown 1970-01-01T00:00:00Z } - -package builtins - -import ( - "sigs.k8s.io/kustomize/api/filters/labels" - "sigs.k8s.io/kustomize/api/resmap" - "sigs.k8s.io/kustomize/api/types" - "sigs.k8s.io/yaml" -) - -// Add the given labels to the given field specifications. -type LabelTransformerPlugin struct { - Labels map[string]string `json:"labels,omitempty" yaml:"labels,omitempty"` - FieldSpecs []types.FieldSpec `json:"fieldSpecs,omitempty" yaml:"fieldSpecs,omitempty"` -} - -func (p *LabelTransformerPlugin) Config( - _ *resmap.PluginHelpers, c []byte) (err error) { - p.Labels = nil - p.FieldSpecs = nil - return yaml.Unmarshal(c, p) -} - -func (p *LabelTransformerPlugin) Transform(m resmap.ResMap) error { - if len(p.Labels) == 0 { - return nil - } - return m.ApplyFilter(labels.Filter{ - Labels: p.Labels, - FsSlice: p.FieldSpecs, - }) -} - -func NewLabelTransformerPlugin() resmap.TransformerPlugin { - return &LabelTransformerPlugin{} -} diff --git a/src/vendor/sigs.k8s.io/kustomize/api/internal/builtins/LegacyOrderTransformer.go b/src/vendor/sigs.k8s.io/kustomize/api/internal/builtins/LegacyOrderTransformer.go deleted file mode 100644 index 322cea93a..000000000 --- a/src/vendor/sigs.k8s.io/kustomize/api/internal/builtins/LegacyOrderTransformer.go +++ /dev/null @@ -1,46 +0,0 @@ -// Code generated by pluginator on LegacyOrderTransformer; DO NOT EDIT. -// pluginator {unknown 1970-01-01T00:00:00Z } - -package builtins - -import ( - "sort" - - "github.com/pkg/errors" - "sigs.k8s.io/kustomize/api/resmap" - "sigs.k8s.io/kustomize/api/resource" -) - -// Sort the resources using an ordering defined in the Gvk class. -// This puts cluster-wide basic resources with no -// dependencies (like Namespace, StorageClass, etc.) -// first, and resources with a high number of dependencies -// (like ValidatingWebhookConfiguration) last. -type LegacyOrderTransformerPlugin struct{} - -// Nothing needed for configuration. -func (p *LegacyOrderTransformerPlugin) Config( - _ *resmap.PluginHelpers, _ []byte) (err error) { - return nil -} - -func (p *LegacyOrderTransformerPlugin) Transform(m resmap.ResMap) (err error) { - resources := make([]*resource.Resource, m.Size()) - ids := m.AllIds() - sort.Sort(resmap.IdSlice(ids)) - for i, id := range ids { - resources[i], err = m.GetByCurrentId(id) - if err != nil { - return errors.Wrap(err, "expected match for sorting") - } - } - m.Clear() - for _, r := range resources { - m.Append(r) - } - return nil -} - -func NewLegacyOrderTransformerPlugin() resmap.TransformerPlugin { - return &LegacyOrderTransformerPlugin{} -} diff --git a/src/vendor/sigs.k8s.io/kustomize/api/internal/builtins/NamespaceTransformer.go b/src/vendor/sigs.k8s.io/kustomize/api/internal/builtins/NamespaceTransformer.go deleted file mode 100644 index 71b162a79..000000000 --- a/src/vendor/sigs.k8s.io/kustomize/api/internal/builtins/NamespaceTransformer.go +++ /dev/null @@ -1,74 +0,0 @@ -// Code generated by pluginator on NamespaceTransformer; DO NOT EDIT. -// pluginator {unknown 1970-01-01T00:00:00Z } - -package builtins - -import ( - "fmt" - - "sigs.k8s.io/kustomize/api/filters/namespace" - "sigs.k8s.io/kustomize/api/resmap" - "sigs.k8s.io/kustomize/api/types" - "sigs.k8s.io/kustomize/kyaml/errors" - "sigs.k8s.io/yaml" -) - -// Change or set the namespace of non-cluster level resources. -type NamespaceTransformerPlugin struct { - types.ObjectMeta `json:"metadata,omitempty" yaml:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - FieldSpecs []types.FieldSpec `json:"fieldSpecs,omitempty" yaml:"fieldSpecs,omitempty"` - UnsetOnly bool `json:"unsetOnly" yaml:"unsetOnly"` - SetRoleBindingSubjects namespace.RoleBindingSubjectMode `json:"setRoleBindingSubjects" yaml:"setRoleBindingSubjects"` -} - -func (p *NamespaceTransformerPlugin) Config( - _ *resmap.PluginHelpers, c []byte) (err error) { - p.Namespace = "" - p.FieldSpecs = nil - if err := yaml.Unmarshal(c, p); err != nil { - return errors.WrapPrefixf(err, "unmarshalling NamespaceTransformer config") - } - switch p.SetRoleBindingSubjects { - case namespace.AllServiceAccountSubjects, namespace.DefaultSubjectsOnly, namespace.NoSubjects: - // valid - case namespace.SubjectModeUnspecified: - p.SetRoleBindingSubjects = namespace.DefaultSubjectsOnly - default: - return errors.Errorf("invalid value %q for setRoleBindingSubjects: "+ - "must be one of %q, %q or %q", p.SetRoleBindingSubjects, - namespace.DefaultSubjectsOnly, namespace.NoSubjects, namespace.AllServiceAccountSubjects) - } - - return nil -} - -func (p *NamespaceTransformerPlugin) Transform(m resmap.ResMap) error { - if len(p.Namespace) == 0 { - return nil - } - for _, r := range m.Resources() { - if r.IsNilOrEmpty() { - // Don't mutate empty objects? - continue - } - r.StorePreviousId() - if err := r.ApplyFilter(namespace.Filter{ - Namespace: p.Namespace, - FsSlice: p.FieldSpecs, - SetRoleBindingSubjects: p.SetRoleBindingSubjects, - UnsetOnly: p.UnsetOnly, - }); err != nil { - return err - } - matches := m.GetMatchingResourcesByCurrentId(r.CurId().Equals) - if len(matches) != 1 { - return fmt.Errorf( - "namespace transformation produces ID conflict: %+v", matches) - } - } - return nil -} - -func NewNamespaceTransformerPlugin() resmap.TransformerPlugin { - return &NamespaceTransformerPlugin{} -} diff --git a/src/vendor/sigs.k8s.io/kustomize/api/internal/builtins/PatchJson6902Transformer.go b/src/vendor/sigs.k8s.io/kustomize/api/internal/builtins/PatchJson6902Transformer.go deleted file mode 100644 index cb02c4241..000000000 --- a/src/vendor/sigs.k8s.io/kustomize/api/internal/builtins/PatchJson6902Transformer.go +++ /dev/null @@ -1,105 +0,0 @@ -// Code generated by pluginator on PatchJson6902Transformer; DO NOT EDIT. -// pluginator {unknown 1970-01-01T00:00:00Z } - -package builtins - -import ( - "fmt" - - jsonpatch "github.com/evanphx/json-patch" - "github.com/pkg/errors" - "sigs.k8s.io/kustomize/api/filters/patchjson6902" - "sigs.k8s.io/kustomize/api/ifc" - "sigs.k8s.io/kustomize/api/resmap" - "sigs.k8s.io/kustomize/api/types" - "sigs.k8s.io/kustomize/kyaml/kio/kioutil" - "sigs.k8s.io/yaml" -) - -type PatchJson6902TransformerPlugin struct { - ldr ifc.Loader - decodedPatch jsonpatch.Patch - Target *types.Selector `json:"target,omitempty" yaml:"target,omitempty"` - Path string `json:"path,omitempty" yaml:"path,omitempty"` - JsonOp string `json:"jsonOp,omitempty" yaml:"jsonOp,omitempty"` -} - -func (p *PatchJson6902TransformerPlugin) Config( - h *resmap.PluginHelpers, c []byte) (err error) { - p.ldr = h.Loader() - err = yaml.Unmarshal(c, p) - if err != nil { - return err - } - if p.Target.Name == "" { - return fmt.Errorf("must specify the target name") - } - if p.Path == "" && p.JsonOp == "" { - return fmt.Errorf("empty file path and empty jsonOp") - } - if p.Path != "" { - if p.JsonOp != "" { - return fmt.Errorf("must specify a file path or jsonOp, not both") - } - rawOp, err := p.ldr.Load(p.Path) - if err != nil { - return err - } - p.JsonOp = string(rawOp) - if p.JsonOp == "" { - return fmt.Errorf("patch file '%s' empty seems to be empty", p.Path) - } - } - if p.JsonOp[0] != '[' { - // if it doesn't seem to be JSON, imagine - // it is YAML, and convert to JSON. - op, err := yaml.YAMLToJSON([]byte(p.JsonOp)) - if err != nil { - return err - } - p.JsonOp = string(op) - } - p.decodedPatch, err = jsonpatch.DecodePatch([]byte(p.JsonOp)) - if err != nil { - return errors.Wrapf(err, "decoding %s", p.JsonOp) - } - if len(p.decodedPatch) == 0 { - return fmt.Errorf( - "patch appears to be empty; file=%s, JsonOp=%s", p.Path, p.JsonOp) - } - return err -} - -func (p *PatchJson6902TransformerPlugin) Transform(m resmap.ResMap) error { - if p.Target == nil { - return fmt.Errorf("must specify a target for patch %s", p.JsonOp) - } - resources, err := m.Select(*p.Target) - if err != nil { - return err - } - for _, res := range resources { - internalAnnotations := kioutil.GetInternalAnnotations(&res.RNode) - - err = res.ApplyFilter(patchjson6902.Filter{ - Patch: p.JsonOp, - }) - if err != nil { - return err - } - - annotations := res.GetAnnotations() - for key, value := range internalAnnotations { - annotations[key] = value - } - err = res.SetAnnotations(annotations) - if err != nil { - return err - } - } - return nil -} - -func NewPatchJson6902TransformerPlugin() resmap.TransformerPlugin { - return &PatchJson6902TransformerPlugin{} -} diff --git a/src/vendor/sigs.k8s.io/kustomize/api/internal/builtins/PatchStrategicMergeTransformer.go b/src/vendor/sigs.k8s.io/kustomize/api/internal/builtins/PatchStrategicMergeTransformer.go deleted file mode 100644 index f93d1266d..000000000 --- a/src/vendor/sigs.k8s.io/kustomize/api/internal/builtins/PatchStrategicMergeTransformer.go +++ /dev/null @@ -1,89 +0,0 @@ -// Code generated by pluginator on PatchStrategicMergeTransformer; DO NOT EDIT. -// pluginator {unknown 1970-01-01T00:00:00Z } - -package builtins - -import ( - "fmt" - - "sigs.k8s.io/kustomize/api/resmap" - "sigs.k8s.io/kustomize/api/resource" - "sigs.k8s.io/kustomize/api/types" - "sigs.k8s.io/yaml" -) - -type PatchStrategicMergeTransformerPlugin struct { - loadedPatches []*resource.Resource - Paths []types.PatchStrategicMerge `json:"paths,omitempty" yaml:"paths,omitempty"` - Patches string `json:"patches,omitempty" yaml:"patches,omitempty"` -} - -func (p *PatchStrategicMergeTransformerPlugin) Config( - h *resmap.PluginHelpers, c []byte) (err error) { - err = yaml.Unmarshal(c, p) - if err != nil { - return err - } - if len(p.Paths) == 0 && p.Patches == "" { - return fmt.Errorf("empty file path and empty patch content") - } - if len(p.Paths) != 0 { - patches, err := loadFromPaths(h, p.Paths) - if err != nil { - return err - } - p.loadedPatches = append(p.loadedPatches, patches...) - } - if p.Patches != "" { - patches, err := h.ResmapFactory().RF().SliceFromBytes([]byte(p.Patches)) - if err != nil { - return err - } - p.loadedPatches = append(p.loadedPatches, patches...) - } - if len(p.loadedPatches) == 0 { - return fmt.Errorf( - "patch appears to be empty; files=%v, Patch=%s", p.Paths, p.Patches) - } - return nil -} - -func loadFromPaths( - h *resmap.PluginHelpers, - paths []types.PatchStrategicMerge) ( - result []*resource.Resource, err error) { - var patches []*resource.Resource - for _, path := range paths { - // For legacy reasons, attempt to treat the path string as - // actual patch content. - patches, err = h.ResmapFactory().RF().SliceFromBytes([]byte(path)) - if err != nil { - // Failing that, treat it as a file path. - patches, err = h.ResmapFactory().RF().SliceFromPatches( - h.Loader(), []types.PatchStrategicMerge{path}) - if err != nil { - return - } - } - result = append(result, patches...) - } - return -} - -func (p *PatchStrategicMergeTransformerPlugin) Transform(m resmap.ResMap) error { - for _, patch := range p.loadedPatches { - target, err := m.GetById(patch.OrgId()) - if err != nil { - return err - } - if err = m.ApplySmPatch( - resource.MakeIdSet([]*resource.Resource{target}), patch); err != nil { - return err - } - } - return nil -} - -func NewPatchStrategicMergeTransformerPlugin() resmap.TransformerPlugin { - return &PatchStrategicMergeTransformerPlugin{} -} diff --git a/src/vendor/sigs.k8s.io/kustomize/api/internal/builtins/PatchTransformer.go b/src/vendor/sigs.k8s.io/kustomize/api/internal/builtins/PatchTransformer.go deleted file mode 100644 index dc51748c8..000000000 --- a/src/vendor/sigs.k8s.io/kustomize/api/internal/builtins/PatchTransformer.go +++ /dev/null @@ -1,153 +0,0 @@ -// Code generated by pluginator on PatchTransformer; DO NOT EDIT. -// pluginator {unknown 1970-01-01T00:00:00Z } - -package builtins - -import ( - "fmt" - "strings" - - jsonpatch "github.com/evanphx/json-patch" - "sigs.k8s.io/kustomize/api/filters/patchjson6902" - "sigs.k8s.io/kustomize/api/resmap" - "sigs.k8s.io/kustomize/api/resource" - "sigs.k8s.io/kustomize/api/types" - "sigs.k8s.io/kustomize/kyaml/kio/kioutil" - "sigs.k8s.io/yaml" -) - -type PatchTransformerPlugin struct { - loadedPatch *resource.Resource - decodedPatch jsonpatch.Patch - Path string `json:"path,omitempty" yaml:"path,omitempty"` - Patch string `json:"patch,omitempty" yaml:"patch,omitempty"` - Target *types.Selector `json:"target,omitempty" yaml:"target,omitempty"` - Options map[string]bool `json:"options,omitempty" yaml:"options,omitempty"` -} - -func (p *PatchTransformerPlugin) Config( - h *resmap.PluginHelpers, c []byte) error { - err := yaml.Unmarshal(c, p) - if err != nil { - return err - } - p.Patch = strings.TrimSpace(p.Patch) - if p.Patch == "" && p.Path == "" { - return fmt.Errorf( - "must specify one of patch and path in\n%s", string(c)) - } - if p.Patch != "" && p.Path != "" { - return fmt.Errorf( - "patch and path can't be set at the same time\n%s", string(c)) - } - if p.Path != "" { - loaded, loadErr := h.Loader().Load(p.Path) - if loadErr != nil { - return loadErr - } - p.Patch = string(loaded) - } - - patchSM, errSM := h.ResmapFactory().RF().FromBytes([]byte(p.Patch)) - patchJson, errJson := jsonPatchFromBytes([]byte(p.Patch)) - if (errSM == nil && errJson == nil) || - (patchSM != nil && patchJson != nil) { - return fmt.Errorf( - "illegally qualifies as both an SM and JSON patch: [%v]", - p.Patch) - } - if errSM != nil && errJson != nil { - return fmt.Errorf( - "unable to parse SM or JSON patch from [%v]", p.Patch) - } - if errSM == nil { - p.loadedPatch = patchSM - if p.Options["allowNameChange"] { - p.loadedPatch.AllowNameChange() - } - if p.Options["allowKindChange"] { - p.loadedPatch.AllowKindChange() - } - } else { - p.decodedPatch = patchJson - } - return nil -} - -func (p *PatchTransformerPlugin) Transform(m resmap.ResMap) error { - if p.loadedPatch == nil { - return p.transformJson6902(m, p.decodedPatch) - } - // The patch was a strategic merge patch - return p.transformStrategicMerge(m, p.loadedPatch) -} - -// transformStrategicMerge applies the provided strategic merge patch -// to all the resources in the ResMap that match either the Target or -// the identifier of the patch. -func (p *PatchTransformerPlugin) transformStrategicMerge(m resmap.ResMap, patch *resource.Resource) error { - if p.Target == nil { - target, err := m.GetById(patch.OrgId()) - if err != nil { - return err - } - return target.ApplySmPatch(patch) - } - selected, err := m.Select(*p.Target) - if err != nil { - return err - } - return m.ApplySmPatch(resource.MakeIdSet(selected), patch) -} - -// transformJson6902 applies the provided json6902 patch -// to all the resources in the ResMap that match the Target. -func (p *PatchTransformerPlugin) transformJson6902(m resmap.ResMap, patch jsonpatch.Patch) error { - if p.Target == nil { - return fmt.Errorf("must specify a target for patch %s", p.Patch) - } - resources, err := m.Select(*p.Target) - if err != nil { - return err - } - for _, res := range resources { - res.StorePreviousId() - internalAnnotations := kioutil.GetInternalAnnotations(&res.RNode) - err = res.ApplyFilter(patchjson6902.Filter{ - Patch: p.Patch, - }) - if err != nil { - return err - } - - annotations := res.GetAnnotations() - for key, value := range internalAnnotations { - annotations[key] = value - } - err = res.SetAnnotations(annotations) - } - return nil -} - -// jsonPatchFromBytes loads a Json 6902 patch from -// a bytes input -func jsonPatchFromBytes( - in []byte) (jsonpatch.Patch, error) { - ops := string(in) - if ops == "" { - return nil, fmt.Errorf("empty json patch operations") - } - - if ops[0] != '[' { - jsonOps, err := yaml.YAMLToJSON(in) - if err != nil { - return nil, err - } - ops = string(jsonOps) - } - return jsonpatch.DecodePatch([]byte(ops)) -} - -func NewPatchTransformerPlugin() resmap.TransformerPlugin { - return &PatchTransformerPlugin{} -} diff --git a/src/vendor/sigs.k8s.io/kustomize/api/internal/builtins/PrefixTransformer.go b/src/vendor/sigs.k8s.io/kustomize/api/internal/builtins/PrefixTransformer.go deleted file mode 100644 index 03040ce0f..000000000 --- a/src/vendor/sigs.k8s.io/kustomize/api/internal/builtins/PrefixTransformer.go +++ /dev/null @@ -1,96 +0,0 @@ -// Code generated by pluginator on PrefixTransformer; DO NOT EDIT. -// pluginator {unknown 1970-01-01T00:00:00Z } - -package builtins - -import ( - "errors" - - "sigs.k8s.io/kustomize/api/filters/prefix" - "sigs.k8s.io/kustomize/api/resmap" - "sigs.k8s.io/kustomize/api/types" - "sigs.k8s.io/kustomize/kyaml/resid" - "sigs.k8s.io/kustomize/kyaml/yaml" -) - -// Add the given prefix to the field -type PrefixTransformerPlugin struct { - Prefix string `json:"prefix,omitempty" yaml:"prefix,omitempty"` - FieldSpecs types.FsSlice `json:"fieldSpecs,omitempty" yaml:"fieldSpecs,omitempty"` -} - -// TODO: Make this gvk skip list part of the config. -var prefixFieldSpecsToSkip = types.FsSlice{ - {Gvk: resid.Gvk{Kind: "CustomResourceDefinition"}}, - {Gvk: resid.Gvk{Group: "apiregistration.k8s.io", Kind: "APIService"}}, - {Gvk: resid.Gvk{Kind: "Namespace"}}, -} - -func (p *PrefixTransformerPlugin) Config( - _ *resmap.PluginHelpers, c []byte) (err error) { - p.Prefix = "" - p.FieldSpecs = nil - err = yaml.Unmarshal(c, p) - if err != nil { - return - } - if p.FieldSpecs == nil { - return errors.New("fieldSpecs is not expected to be nil") - } - return -} - -func (p *PrefixTransformerPlugin) Transform(m resmap.ResMap) error { - // Even if the Prefix is empty we want to proceed with the - // transformation. This allows to add contextual information - // to the resources (AddNamePrefix). - for _, r := range m.Resources() { - // TODO: move this test into the filter (i.e. make a better filter) - if p.shouldSkip(r.OrgId()) { - continue - } - id := r.OrgId() - // current default configuration contains - // only one entry: "metadata/name" with no GVK - for _, fs := range p.FieldSpecs { - // TODO: this is redundant to filter (but needed for now) - if !id.IsSelected(&fs.Gvk) { - continue - } - // TODO: move this test into the filter. - if fs.Path == "metadata/name" { - // "metadata/name" is the only field. - // this will add a prefix to the resource - // even if it is empty - - r.AddNamePrefix(p.Prefix) - if p.Prefix != "" { - // TODO: There are multiple transformers that can change a resource's name, and each makes a call to - // StorePreviousID(). We should make it so that we only call StorePreviousID once per kustomization layer - // to avoid storing intermediate names between transformations, to prevent intermediate name conflicts. - r.StorePreviousId() - } - } - if err := r.ApplyFilter(prefix.Filter{ - Prefix: p.Prefix, - FieldSpec: fs, - }); err != nil { - return err - } - } - } - return nil -} - -func (p *PrefixTransformerPlugin) shouldSkip(id resid.ResId) bool { - for _, path := range prefixFieldSpecsToSkip { - if id.IsSelected(&path.Gvk) { - return true - } - } - return false -} - -func NewPrefixTransformerPlugin() resmap.TransformerPlugin { - return &PrefixTransformerPlugin{} -} diff --git a/src/vendor/sigs.k8s.io/kustomize/api/internal/builtins/ReplacementTransformer.go b/src/vendor/sigs.k8s.io/kustomize/api/internal/builtins/ReplacementTransformer.go deleted file mode 100644 index 7accb64bf..000000000 --- a/src/vendor/sigs.k8s.io/kustomize/api/internal/builtins/ReplacementTransformer.go +++ /dev/null @@ -1,78 +0,0 @@ -// Code generated by pluginator on ReplacementTransformer; DO NOT EDIT. -// pluginator {unknown 1970-01-01T00:00:00Z } - -package builtins - -import ( - "fmt" - "reflect" - - "sigs.k8s.io/kustomize/api/filters/replacement" - "sigs.k8s.io/kustomize/api/resmap" - "sigs.k8s.io/kustomize/api/types" - "sigs.k8s.io/yaml" -) - -// Replace values in targets with values from a source -type ReplacementTransformerPlugin struct { - ReplacementList []types.ReplacementField `json:"replacements,omitempty" yaml:"replacements,omitempty"` - Replacements []types.Replacement `json:"omitempty" yaml:"omitempty"` -} - -func (p *ReplacementTransformerPlugin) Config( - h *resmap.PluginHelpers, c []byte) (err error) { - p.ReplacementList = []types.ReplacementField{} - if err := yaml.Unmarshal(c, p); err != nil { - return err - } - - for _, r := range p.ReplacementList { - if r.Path != "" && (r.Source != nil || len(r.Targets) != 0) { - return fmt.Errorf("cannot specify both path and inline replacement") - } - if r.Path != "" { - // load the replacement from the path - content, err := h.Loader().Load(r.Path) - if err != nil { - return err - } - // find if the path contains a a list of replacements or a single replacement - var replacement interface{} - err = yaml.Unmarshal(content, &replacement) - if err != nil { - return err - } - items := reflect.ValueOf(replacement) - switch items.Kind() { - case reflect.Slice: - repl := []types.Replacement{} - if err := yaml.Unmarshal(content, &repl); err != nil { - return err - } - p.Replacements = append(p.Replacements, repl...) - case reflect.Map: - repl := types.Replacement{} - if err := yaml.Unmarshal(content, &repl); err != nil { - return err - } - p.Replacements = append(p.Replacements, repl) - default: - return fmt.Errorf("unsupported replacement type encountered within replacement path: %v", items.Kind()) - } - } else { - // replacement information is already loaded - p.Replacements = append(p.Replacements, r.Replacement) - } - } - return nil -} - -func (p *ReplacementTransformerPlugin) Transform(m resmap.ResMap) (err error) { - return m.ApplyFilter(replacement.Filter{ - Replacements: p.Replacements, - }) -} - -func NewReplacementTransformerPlugin() resmap.TransformerPlugin { - return &ReplacementTransformerPlugin{} -} diff --git a/src/vendor/sigs.k8s.io/kustomize/api/internal/builtins/ReplicaCountTransformer.go b/src/vendor/sigs.k8s.io/kustomize/api/internal/builtins/ReplicaCountTransformer.go deleted file mode 100644 index 120fab340..000000000 --- a/src/vendor/sigs.k8s.io/kustomize/api/internal/builtins/ReplicaCountTransformer.go +++ /dev/null @@ -1,73 +0,0 @@ -// Code generated by pluginator on ReplicaCountTransformer; DO NOT EDIT. -// pluginator {unknown 1970-01-01T00:00:00Z } - -package builtins - -import ( - "fmt" - - "sigs.k8s.io/kustomize/api/filters/replicacount" - "sigs.k8s.io/kustomize/api/resmap" - "sigs.k8s.io/kustomize/api/types" - "sigs.k8s.io/kustomize/kyaml/resid" - "sigs.k8s.io/yaml" -) - -// Find matching replicas declarations and replace the count. -// Eases the kustomization configuration of replica changes. -type ReplicaCountTransformerPlugin struct { - Replica types.Replica `json:"replica,omitempty" yaml:"replica,omitempty"` - FieldSpecs []types.FieldSpec `json:"fieldSpecs,omitempty" yaml:"fieldSpecs,omitempty"` -} - -func (p *ReplicaCountTransformerPlugin) Config( - _ *resmap.PluginHelpers, c []byte) (err error) { - p.Replica = types.Replica{} - p.FieldSpecs = nil - return yaml.Unmarshal(c, p) -} - -func (p *ReplicaCountTransformerPlugin) Transform(m resmap.ResMap) error { - found := false - for _, fs := range p.FieldSpecs { - matcher := p.createMatcher(fs) - resList := m.GetMatchingResourcesByAnyId(matcher) - if len(resList) > 0 { - found = true - for _, r := range resList { - // There are redundant checks in the filter - // that we'll live with until resolution of - // https://github.com/kubernetes-sigs/kustomize/issues/2506 - err := r.ApplyFilter(replicacount.Filter{ - Replica: p.Replica, - FieldSpec: fs, - }) - if err != nil { - return err - } - } - } - } - - if !found { - gvks := make([]string, len(p.FieldSpecs)) - for i, replicaSpec := range p.FieldSpecs { - gvks[i] = replicaSpec.Gvk.String() - } - return fmt.Errorf("resource with name %s does not match a config with the following GVK %v", - p.Replica.Name, gvks) - } - - return nil -} - -// Match Replica.Name and FieldSpec -func (p *ReplicaCountTransformerPlugin) createMatcher(fs types.FieldSpec) resmap.IdMatcher { - return func(r resid.ResId) bool { - return r.Name == p.Replica.Name && r.Gvk.IsSelected(&fs.Gvk) - } -} - -func NewReplicaCountTransformerPlugin() resmap.TransformerPlugin { - return &ReplicaCountTransformerPlugin{} -} diff --git a/src/vendor/sigs.k8s.io/kustomize/api/internal/builtins/SecretGenerator.go b/src/vendor/sigs.k8s.io/kustomize/api/internal/builtins/SecretGenerator.go deleted file mode 100644 index 5e8581eb9..000000000 --- a/src/vendor/sigs.k8s.io/kustomize/api/internal/builtins/SecretGenerator.go +++ /dev/null @@ -1,39 +0,0 @@ -// Code generated by pluginator on SecretGenerator; DO NOT EDIT. -// pluginator {unknown 1970-01-01T00:00:00Z } - -package builtins - -import ( - "sigs.k8s.io/kustomize/api/kv" - "sigs.k8s.io/kustomize/api/resmap" - "sigs.k8s.io/kustomize/api/types" - "sigs.k8s.io/yaml" -) - -type SecretGeneratorPlugin struct { - h *resmap.PluginHelpers - types.ObjectMeta `json:"metadata,omitempty" yaml:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - types.SecretArgs -} - -func (p *SecretGeneratorPlugin) Config(h *resmap.PluginHelpers, config []byte) (err error) { - p.SecretArgs = types.SecretArgs{} - err = yaml.Unmarshal(config, p) - if p.SecretArgs.Name == "" { - p.SecretArgs.Name = p.Name - } - if p.SecretArgs.Namespace == "" { - p.SecretArgs.Namespace = p.Namespace - } - p.h = h - return -} - -func (p *SecretGeneratorPlugin) Generate() (resmap.ResMap, error) { - return p.h.ResmapFactory().FromSecretArgs( - kv.NewLoader(p.h.Loader(), p.h.Validator()), p.SecretArgs) -} - -func NewSecretGeneratorPlugin() resmap.GeneratorPlugin { - return &SecretGeneratorPlugin{} -} diff --git a/src/vendor/sigs.k8s.io/kustomize/api/internal/builtins/SuffixTransformer.go b/src/vendor/sigs.k8s.io/kustomize/api/internal/builtins/SuffixTransformer.go deleted file mode 100644 index 2b389d8e5..000000000 --- a/src/vendor/sigs.k8s.io/kustomize/api/internal/builtins/SuffixTransformer.go +++ /dev/null @@ -1,96 +0,0 @@ -// Code generated by pluginator on SuffixTransformer; DO NOT EDIT. -// pluginator {unknown 1970-01-01T00:00:00Z } - -package builtins - -import ( - "errors" - - "sigs.k8s.io/kustomize/api/filters/suffix" - "sigs.k8s.io/kustomize/api/resmap" - "sigs.k8s.io/kustomize/api/types" - "sigs.k8s.io/kustomize/kyaml/resid" - "sigs.k8s.io/kustomize/kyaml/yaml" -) - -// Add the given suffix to the field -type SuffixTransformerPlugin struct { - Suffix string `json:"suffix,omitempty" yaml:"suffix,omitempty"` - FieldSpecs types.FsSlice `json:"fieldSpecs,omitempty" yaml:"fieldSpecs,omitempty"` -} - -// TODO: Make this gvk skip list part of the config. -var suffixFieldSpecsToSkip = types.FsSlice{ - {Gvk: resid.Gvk{Kind: "CustomResourceDefinition"}}, - {Gvk: resid.Gvk{Group: "apiregistration.k8s.io", Kind: "APIService"}}, - {Gvk: resid.Gvk{Kind: "Namespace"}}, -} - -func (p *SuffixTransformerPlugin) Config( - _ *resmap.PluginHelpers, c []byte) (err error) { - p.Suffix = "" - p.FieldSpecs = nil - err = yaml.Unmarshal(c, p) - if err != nil { - return - } - if p.FieldSpecs == nil { - return errors.New("fieldSpecs is not expected to be nil") - } - return -} - -func (p *SuffixTransformerPlugin) Transform(m resmap.ResMap) error { - // Even if the Suffix is empty we want to proceed with the - // transformation. This allows to add contextual information - // to the resources (AddNameSuffix). - for _, r := range m.Resources() { - // TODO: move this test into the filter (i.e. make a better filter) - if p.shouldSkip(r.OrgId()) { - continue - } - id := r.OrgId() - // current default configuration contains - // only one entry: "metadata/name" with no GVK - for _, fs := range p.FieldSpecs { - // TODO: this is redundant to filter (but needed for now) - if !id.IsSelected(&fs.Gvk) { - continue - } - // TODO: move this test into the filter. - if fs.Path == "metadata/name" { - // "metadata/name" is the only field. - // this will add a suffix to the resource - // even if it is empty - - r.AddNameSuffix(p.Suffix) - if p.Suffix != "" { - // TODO: There are multiple transformers that can change a resource's name, and each makes a call to - // StorePreviousID(). We should make it so that we only call StorePreviousID once per kustomization layer - // to avoid storing intermediate names between transformations, to prevent intermediate name conflicts. - r.StorePreviousId() - } - } - if err := r.ApplyFilter(suffix.Filter{ - Suffix: p.Suffix, - FieldSpec: fs, - }); err != nil { - return err - } - } - } - return nil -} - -func (p *SuffixTransformerPlugin) shouldSkip(id resid.ResId) bool { - for _, path := range suffixFieldSpecsToSkip { - if id.IsSelected(&path.Gvk) { - return true - } - } - return false -} - -func NewSuffixTransformerPlugin() resmap.TransformerPlugin { - return &SuffixTransformerPlugin{} -} diff --git a/src/vendor/sigs.k8s.io/kustomize/api/internal/builtins/ValueAddTransformer.go b/src/vendor/sigs.k8s.io/kustomize/api/internal/builtins/ValueAddTransformer.go deleted file mode 100644 index c2c460ab9..000000000 --- a/src/vendor/sigs.k8s.io/kustomize/api/internal/builtins/ValueAddTransformer.go +++ /dev/null @@ -1,141 +0,0 @@ -// Code generated by pluginator on ValueAddTransformer; DO NOT EDIT. -// pluginator {unknown 1970-01-01T00:00:00Z } - -package builtins - -import ( - "fmt" - "path/filepath" - "strings" - - "sigs.k8s.io/kustomize/api/filters/namespace" - "sigs.k8s.io/kustomize/api/filters/valueadd" - "sigs.k8s.io/kustomize/api/resmap" - "sigs.k8s.io/kustomize/api/resource" - "sigs.k8s.io/kustomize/api/types" - "sigs.k8s.io/yaml" -) - -// An 'Add' transformer inspired by the IETF RFC 6902 JSON spec Add operation. -type ValueAddTransformerPlugin struct { - // Value is the value to add. - // Defaults to base name of encompassing kustomization root. - Value string `json:"value,omitempty" yaml:"value,omitempty"` - - // Targets is a slice of targets that should have the value added. - Targets []Target `json:"targets,omitempty" yaml:"targets,omitempty"` - - // TargetFilePath is a file path. If specified, the file will be parsed into - // a slice of Target, and appended to anything that was specified in the - // Targets field. This is just a means to share common target specifications. - TargetFilePath string `json:"targetFilePath,omitempty" yaml:"targetFilePath,omitempty"` -} - -// Target describes where to put the value. -type Target struct { - // Selector selects the resources to modify. - Selector *types.Selector `json:"selector,omitempty" yaml:"selector,omitempty"` - - // NotSelector selects the resources to exclude - // from those included by overly broad selectors. - // TODO: implement this? - // NotSelector *types.Selector `json:"notSelector,omitempty" yaml:"notSelector,omitempty"` - - // FieldPath is a JSON-style path to the field intended to hold the value. - FieldPath string `json:"fieldPath,omitempty" yaml:"fieldPath,omitempty"` - - // FilePathPosition is passed to the filter directly. Look there for doc. - FilePathPosition int `json:"filePathPosition,omitempty" yaml:"filePathPosition,omitempty"` -} - -func (p *ValueAddTransformerPlugin) Config(h *resmap.PluginHelpers, c []byte) error { - err := yaml.Unmarshal(c, p) - if err != nil { - return err - } - p.Value = strings.TrimSpace(p.Value) - if p.Value == "" { - p.Value = filepath.Base(h.Loader().Root()) - } - if p.TargetFilePath != "" { - bytes, err := h.Loader().Load(p.TargetFilePath) - if err != nil { - return err - } - var targets struct { - Targets []Target `json:"targets,omitempty" yaml:"targets,omitempty"` - } - err = yaml.Unmarshal(bytes, &targets) - if err != nil { - return err - } - p.Targets = append(p.Targets, targets.Targets...) - } - if len(p.Targets) == 0 { - return fmt.Errorf("must specify at least one target") - } - for _, target := range p.Targets { - if err = validateSelector(target.Selector); err != nil { - return err - } - // TODO: call validateSelector(target.NotSelector) if field added. - if err = validateJsonFieldPath(target.FieldPath); err != nil { - return err - } - if target.FilePathPosition < 0 { - return fmt.Errorf( - "value of FilePathPosition (%d) cannot be negative", - target.FilePathPosition) - } - } - return nil -} - -// TODO: implement -func validateSelector(_ *types.Selector) error { - return nil -} - -// TODO: Enforce RFC 6902? -func validateJsonFieldPath(p string) error { - if len(p) == 0 { - return fmt.Errorf("fieldPath cannot be empty") - } - return nil -} - -func (p *ValueAddTransformerPlugin) Transform(m resmap.ResMap) (err error) { - for _, t := range p.Targets { - var resources []*resource.Resource - if t.Selector == nil { - resources = m.Resources() - } else { - resources, err = m.Select(*t.Selector) - if err != nil { - return err - } - } - // TODO: consider t.NotSelector if implemented - for _, res := range resources { - if t.FieldPath == types.MetadataNamespacePath { - err = res.ApplyFilter(namespace.Filter{ - Namespace: p.Value, - }) - } else { - err = res.ApplyFilter(valueadd.Filter{ - Value: p.Value, - FieldPath: t.FieldPath, - FilePathPosition: t.FilePathPosition, - }) - } - if err != nil { - return err - } - } - } - return nil -} - -func NewValueAddTransformerPlugin() resmap.TransformerPlugin { - return &ValueAddTransformerPlugin{} -} diff --git a/src/vendor/sigs.k8s.io/kustomize/api/internal/builtins/doc.go b/src/vendor/sigs.k8s.io/kustomize/api/internal/builtins/doc.go deleted file mode 100644 index 37a8dc6e7..000000000 --- a/src/vendor/sigs.k8s.io/kustomize/api/internal/builtins/doc.go +++ /dev/null @@ -1,8 +0,0 @@ -// Copyright 2019 The Kubernetes Authors. -// SPDX-License-Identifier: Apache-2.0 - -// Package builtins holds code generated from the builtin plugins. -// The "builtin" plugins are written as normal plugins and can -// be used as such, but they are also used to generate the code -// in this package so they can be statically linked to client code. -package builtins diff --git a/src/vendor/sigs.k8s.io/kustomize/api/internal/generators/configmap.go b/src/vendor/sigs.k8s.io/kustomize/api/internal/generators/configmap.go deleted file mode 100644 index 47498aaa5..000000000 --- a/src/vendor/sigs.k8s.io/kustomize/api/internal/generators/configmap.go +++ /dev/null @@ -1,52 +0,0 @@ -// Copyright 2020 The Kubernetes Authors. -// SPDX-License-Identifier: Apache-2.0 - -package generators - -import ( - "sigs.k8s.io/kustomize/api/ifc" - "sigs.k8s.io/kustomize/api/types" - "sigs.k8s.io/kustomize/kyaml/yaml" -) - -// MakeConfigMap makes a configmap. -// -// ConfigMap: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.19/#configmap-v1-core -// -// ConfigMaps and Secrets are similar. -// -// Both objects have a `data` field, which contains a map from keys to -// values that must be UTF-8 valid strings. Such data might be simple text, -// or whoever made the data may have done so by performing a base64 encoding -// on binary data. Regardless, k8s has no means to know this, so it treats -// the data field as a string. -// -// The ConfigMap has an additional field `binaryData`, also a map, but its -// values are _intended_ to be interpreted as a base64 encoding of []byte, -// by whatever makes use of the ConfigMap. -// -// In a ConfigMap, any key used in `data` cannot also be used in `binaryData` -// and vice-versa. A key must be unique across both maps. -func MakeConfigMap( - ldr ifc.KvLoader, args *types.ConfigMapArgs) (rn *yaml.RNode, err error) { - rn, err = makeBaseNode("ConfigMap", args.Name, args.Namespace) - if err != nil { - return nil, err - } - m, err := makeValidatedDataMap(ldr, args.Name, args.KvPairSources) - if err != nil { - return nil, err - } - if err = rn.LoadMapIntoConfigMapData(m); err != nil { - return nil, err - } - err = copyLabelsAndAnnotations(rn, args.Options) - if err != nil { - return nil, err - } - err = setImmutable(rn, args.Options) - if err != nil { - return nil, err - } - return rn, nil -} diff --git a/src/vendor/sigs.k8s.io/kustomize/api/internal/generators/secret.go b/src/vendor/sigs.k8s.io/kustomize/api/internal/generators/secret.go deleted file mode 100644 index 9afaff156..000000000 --- a/src/vendor/sigs.k8s.io/kustomize/api/internal/generators/secret.go +++ /dev/null @@ -1,59 +0,0 @@ -// Copyright 2020 The Kubernetes Authors. -// SPDX-License-Identifier: Apache-2.0 - -package generators - -import ( - "sigs.k8s.io/kustomize/api/ifc" - "sigs.k8s.io/kustomize/api/types" - "sigs.k8s.io/kustomize/kyaml/yaml" -) - -// MakeSecret makes a kubernetes Secret. -// -// Secret: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.19/#secret-v1-core -// -// ConfigMaps and Secrets are similar. -// -// Like a ConfigMap, a Secret has a `data` field, but unlike a ConfigMap it has -// no `binaryData` field. -// -// All of a Secret's data is assumed to be opaque in nature, and assumed to be -// base64 encoded from its original representation, regardless of whether the -// original data was UTF-8 text or binary. -// -// This encoding provides no secrecy. It's just a neutral, common means to -// represent opaque text and binary data. Beneath the base64 encoding -// is presumably further encoding under control of the Secret's consumer. -// -// A Secret has string field `type` which holds an identifier, used by the -// client, to choose the algorithm to interpret the `data` field. Kubernetes -// cannot make use of this data; it's up to a controller or some pod's service -// to interpret the value, using `type` as a clue as to how to do this. -func MakeSecret( - ldr ifc.KvLoader, args *types.SecretArgs) (rn *yaml.RNode, err error) { - rn, err = makeBaseNode("Secret", args.Name, args.Namespace) - if err != nil { - return nil, err - } - t := "Opaque" - if args.Type != "" { - t = args.Type - } - if _, err := rn.Pipe( - yaml.FieldSetter{ - Name: "type", - Value: yaml.NewStringRNode(t)}); err != nil { - return nil, err - } - m, err := makeValidatedDataMap(ldr, args.Name, args.KvPairSources) - if err != nil { - return nil, err - } - if err = rn.LoadMapIntoSecretData(m); err != nil { - return nil, err - } - copyLabelsAndAnnotations(rn, args.Options) - setImmutable(rn, args.Options) - return rn, nil -} diff --git a/src/vendor/sigs.k8s.io/kustomize/api/internal/generators/utils.go b/src/vendor/sigs.k8s.io/kustomize/api/internal/generators/utils.go deleted file mode 100644 index d6ea5fbf0..000000000 --- a/src/vendor/sigs.k8s.io/kustomize/api/internal/generators/utils.go +++ /dev/null @@ -1,97 +0,0 @@ -// Copyright 2020 The Kubernetes Authors. -// SPDX-License-Identifier: Apache-2.0 - -package generators - -import ( - "fmt" - - "github.com/go-errors/errors" - "sigs.k8s.io/kustomize/api/ifc" - "sigs.k8s.io/kustomize/api/types" - "sigs.k8s.io/kustomize/kyaml/yaml" -) - -func makeBaseNode(kind, name, namespace string) (*yaml.RNode, error) { - rn, err := yaml.Parse(fmt.Sprintf(` -apiVersion: v1 -kind: %s -`, kind)) - if err != nil { - return nil, err - } - if name == "" { - return nil, errors.Errorf("a configmap must have a name") - } - if _, err := rn.Pipe(yaml.SetK8sName(name)); err != nil { - return nil, err - } - if namespace != "" { - if _, err := rn.Pipe(yaml.SetK8sNamespace(namespace)); err != nil { - return nil, err - } - } - return rn, nil -} - -func makeValidatedDataMap( - ldr ifc.KvLoader, name string, sources types.KvPairSources) (map[string]string, error) { - pairs, err := ldr.Load(sources) - if err != nil { - return nil, errors.WrapPrefix(err, "loading KV pairs", 0) - } - knownKeys := make(map[string]string) - for _, p := range pairs { - // legal key: alphanumeric characters, '-', '_' or '.' - if err := ldr.Validator().ErrIfInvalidKey(p.Key); err != nil { - return nil, err - } - if _, ok := knownKeys[p.Key]; ok { - return nil, errors.Errorf( - "configmap %s illegally repeats the key `%s`", name, p.Key) - } - knownKeys[p.Key] = p.Value - } - return knownKeys, nil -} - -// copyLabelsAndAnnotations copies labels and annotations from -// GeneratorOptions into the given object. -func copyLabelsAndAnnotations( - rn *yaml.RNode, opts *types.GeneratorOptions) error { - if opts == nil { - return nil - } - for _, k := range yaml.SortedMapKeys(opts.Labels) { - v := opts.Labels[k] - if _, err := rn.Pipe(yaml.SetLabel(k, v)); err != nil { - return err - } - } - for _, k := range yaml.SortedMapKeys(opts.Annotations) { - v := opts.Annotations[k] - if _, err := rn.Pipe(yaml.SetAnnotation(k, v)); err != nil { - return err - } - } - return nil -} - -func setImmutable( - rn *yaml.RNode, opts *types.GeneratorOptions) error { - if opts == nil { - return nil - } - if opts.Immutable { - n := &yaml.Node{ - Kind: yaml.ScalarNode, - Value: "true", - Tag: yaml.NodeTagBool, - } - if _, err := rn.Pipe(yaml.FieldSetter{Name: "immutable", Value: yaml.NewRNode(n)}); err != nil { - return err - } - } - - return nil -} diff --git a/src/vendor/sigs.k8s.io/kustomize/api/internal/git/cloner.go b/src/vendor/sigs.k8s.io/kustomize/api/internal/git/cloner.go deleted file mode 100644 index 78f4f32cb..000000000 --- a/src/vendor/sigs.k8s.io/kustomize/api/internal/git/cloner.go +++ /dev/null @@ -1,54 +0,0 @@ -// Copyright 2019 The Kubernetes Authors. -// SPDX-License-Identifier: Apache-2.0 - -package git - -import ( - "sigs.k8s.io/kustomize/kyaml/filesys" -) - -// Cloner is a function that can clone a git repo. -type Cloner func(repoSpec *RepoSpec) error - -// ClonerUsingGitExec uses a local git install, as opposed -// to say, some remote API, to obtain a local clone of -// a remote repo. -func ClonerUsingGitExec(repoSpec *RepoSpec) error { - r, err := newCmdRunner(repoSpec.Timeout) - if err != nil { - return err - } - repoSpec.Dir = r.dir - if err = r.run("init"); err != nil { - return err - } - if err = r.run( - "remote", "add", "origin", repoSpec.CloneSpec()); err != nil { - return err - } - ref := "HEAD" - if repoSpec.Ref != "" { - ref = repoSpec.Ref - } - if err = r.run("fetch", "--depth=1", "origin", ref); err != nil { - return err - } - if err = r.run("checkout", "FETCH_HEAD"); err != nil { - return err - } - if repoSpec.Submodules { - return r.run("submodule", "update", "--init", "--recursive") - } - return nil -} - -// DoNothingCloner returns a cloner that only sets -// cloneDir field in the repoSpec. It's assumed that -// the cloneDir is associated with some fake filesystem -// used in a test. -func DoNothingCloner(dir filesys.ConfirmedDir) Cloner { - return func(rs *RepoSpec) error { - rs.Dir = dir - return nil - } -} diff --git a/src/vendor/sigs.k8s.io/kustomize/api/internal/git/gitrunner.go b/src/vendor/sigs.k8s.io/kustomize/api/internal/git/gitrunner.go deleted file mode 100644 index fcc7130cd..000000000 --- a/src/vendor/sigs.k8s.io/kustomize/api/internal/git/gitrunner.go +++ /dev/null @@ -1,55 +0,0 @@ -// Copyright 2020 The Kubernetes Authors. -// SPDX-License-Identifier: Apache-2.0 - -package git - -import ( - "os/exec" - "time" - - "github.com/pkg/errors" - "sigs.k8s.io/kustomize/api/internal/utils" - "sigs.k8s.io/kustomize/kyaml/filesys" -) - -// gitRunner runs the external git binary. -type gitRunner struct { - gitProgram string - duration time.Duration - dir filesys.ConfirmedDir -} - -// newCmdRunner returns a gitRunner if it can find the binary. -// It also creats a temp directory for cloning repos. -func newCmdRunner(timeout time.Duration) (*gitRunner, error) { - gitProgram, err := exec.LookPath("git") - if err != nil { - return nil, errors.Wrap(err, "no 'git' program on path") - } - dir, err := filesys.NewTmpConfirmedDir() - if err != nil { - return nil, err - } - return &gitRunner{ - gitProgram: gitProgram, - duration: timeout, - dir: dir, - }, nil -} - -// run a command with a timeout. -func (r gitRunner) run(args ...string) error { - //nolint: gosec - cmd := exec.Command(r.gitProgram, args...) - cmd.Dir = r.dir.String() - return utils.TimedCall( - cmd.String(), - r.duration, - func() error { - _, err := cmd.CombinedOutput() - if err != nil { - return errors.Wrapf(err, "git cmd = '%s'", cmd.String()) - } - return err - }) -} diff --git a/src/vendor/sigs.k8s.io/kustomize/api/internal/git/repospec.go b/src/vendor/sigs.k8s.io/kustomize/api/internal/git/repospec.go deleted file mode 100644 index 996f0cf23..000000000 --- a/src/vendor/sigs.k8s.io/kustomize/api/internal/git/repospec.go +++ /dev/null @@ -1,267 +0,0 @@ -// Copyright 2019 The Kubernetes Authors. -// SPDX-License-Identifier: Apache-2.0 - -package git - -import ( - "fmt" - "net/url" - "path/filepath" - "strconv" - "strings" - "time" - - "sigs.k8s.io/kustomize/kyaml/filesys" -) - -// Used as a temporary non-empty occupant of the cloneDir -// field, as something distinguishable from the empty string -// in various outputs (especially tests). Not using an -// actual directory name here, as that's a temporary directory -// with a unique name that isn't created until clone time. -const notCloned = filesys.ConfirmedDir("/notCloned") - -// RepoSpec specifies a git repository and a branch and path therein. -type RepoSpec struct { - // Raw, original spec, used to look for cycles. - // TODO(monopole): Drop raw, use processed fields instead. - raw string - - // Host, e.g. github.com - Host string - - // orgRepo name (organization/repoName), - // e.g. kubernetes-sigs/kustomize - OrgRepo string - - // Dir where the orgRepo is cloned to. - Dir filesys.ConfirmedDir - - // Relative path in the repository, and in the cloneDir, - // to a Kustomization. - Path string - - // Branch or tag reference. - Ref string - - // e.g. .git or empty in case of _git is present - GitSuffix string - - // Submodules indicates whether or not to clone git submodules. - Submodules bool - - // Timeout is the maximum duration allowed for execing git commands. - Timeout time.Duration -} - -// CloneSpec returns a string suitable for "git clone {spec}". -func (x *RepoSpec) CloneSpec() string { - if isAzureHost(x.Host) || isAWSHost(x.Host) { - return x.Host + x.OrgRepo - } - return x.Host + x.OrgRepo + x.GitSuffix -} - -func (x *RepoSpec) CloneDir() filesys.ConfirmedDir { - return x.Dir -} - -func (x *RepoSpec) Raw() string { - return x.raw -} - -func (x *RepoSpec) AbsPath() string { - return x.Dir.Join(x.Path) -} - -func (x *RepoSpec) Cleaner(fSys filesys.FileSystem) func() error { - return func() error { return fSys.RemoveAll(x.Dir.String()) } -} - -// NewRepoSpecFromURL parses git-like urls. -// From strings like git@github.com:someOrg/someRepo.git or -// https://github.com/someOrg/someRepo?ref=someHash, extract -// the parts. -func NewRepoSpecFromURL(n string) (*RepoSpec, error) { - if filepath.IsAbs(n) { - return nil, fmt.Errorf("uri looks like abs path: %s", n) - } - host, orgRepo, path, gitRef, gitSubmodules, suffix, gitTimeout := parseGitURL(n) - if orgRepo == "" { - return nil, fmt.Errorf("url lacks orgRepo: %s", n) - } - if host == "" { - return nil, fmt.Errorf("url lacks host: %s", n) - } - return &RepoSpec{ - raw: n, Host: host, OrgRepo: orgRepo, - Dir: notCloned, Path: path, Ref: gitRef, GitSuffix: suffix, - Submodules: gitSubmodules, Timeout: gitTimeout}, nil -} - -const ( - refQuery = "?ref=" - gitSuffix = ".git" - gitDelimiter = "_git/" -) - -// From strings like git@github.com:someOrg/someRepo.git or -// https://github.com/someOrg/someRepo?ref=someHash, extract -// the parts. -func parseGitURL(n string) ( - host string, orgRepo string, path string, gitRef string, gitSubmodules bool, gitSuff string, gitTimeout time.Duration) { - if strings.Contains(n, gitDelimiter) { - index := strings.Index(n, gitDelimiter) - // Adding _git/ to host - host = normalizeGitHostSpec(n[:index+len(gitDelimiter)]) - orgRepo = strings.Split(strings.Split(n[index+len(gitDelimiter):], "/")[0], "?")[0] - path, gitRef, gitTimeout, gitSubmodules = peelQuery(n[index+len(gitDelimiter)+len(orgRepo):]) - return - } - host, n = parseHostSpec(n) - gitSuff = gitSuffix - if strings.Contains(n, gitSuffix) { - index := strings.Index(n, gitSuffix) - orgRepo = n[0:index] - n = n[index+len(gitSuffix):] - if len(n) > 0 && n[0] == '/' { - n = n[1:] - } - path, gitRef, gitTimeout, gitSubmodules = peelQuery(n) - return - } - - i := strings.Index(n, "/") - if i < 1 { - path, gitRef, gitTimeout, gitSubmodules = peelQuery(n) - return - } - j := strings.Index(n[i+1:], "/") - if j >= 0 { - j += i + 1 - orgRepo = n[:j] - path, gitRef, gitTimeout, gitSubmodules = peelQuery(n[j+1:]) - return - } - path = "" - orgRepo, gitRef, gitTimeout, gitSubmodules = peelQuery(n) - return host, orgRepo, path, gitRef, gitSubmodules, gitSuff, gitTimeout -} - -// Clone git submodules by default. -const defaultSubmodules = true - -// Arbitrary, but non-infinite, timeout for running commands. -const defaultTimeout = 27 * time.Second - -func peelQuery(arg string) (string, string, time.Duration, bool) { - // Parse the given arg into a URL. In the event of a parse failure, return - // our defaults. - parsed, err := url.Parse(arg) - if err != nil { - return arg, "", defaultTimeout, defaultSubmodules - } - values := parsed.Query() - - // ref is the desired git ref to target. Can be specified by in a git URL - // with ?ref= or ?version=, although ref takes precedence. - ref := values.Get("version") - if queryValue := values.Get("ref"); queryValue != "" { - ref = queryValue - } - - // depth is the desired git exec timeout. Can be specified by in a git URL - // with ?timeout=. - duration := defaultTimeout - if queryValue := values.Get("timeout"); queryValue != "" { - // Attempt to first parse as a number of integer seconds (like "61"), - // and then attempt to parse as a suffixed duration (like "61s"). - if intValue, err := strconv.Atoi(queryValue); err == nil && intValue > 0 { - duration = time.Duration(intValue) * time.Second - } else if durationValue, err := time.ParseDuration(queryValue); err == nil && durationValue > 0 { - duration = durationValue - } - } - - // submodules indicates if git submodule cloning is desired. Can be - // specified by in a git URL with ?submodules=. - submodules := defaultSubmodules - if queryValue := values.Get("submodules"); queryValue != "" { - if boolValue, err := strconv.ParseBool(queryValue); err == nil { - submodules = boolValue - } - } - - return parsed.Path, ref, duration, submodules -} - -func parseHostSpec(n string) (string, string) { - var host string - // Start accumulating the host part. - for _, p := range []string{ - // Order matters here. - "git::", "gh:", "ssh://", "https://", "http://", - "git@", "github.com:", "github.com/"} { - if len(p) < len(n) && strings.ToLower(n[:len(p)]) == p { - n = n[len(p):] - host += p - } - } - if host == "git@" { - i := strings.Index(n, "/") - if i > -1 { - host += n[:i+1] - n = n[i+1:] - } else { - i = strings.Index(n, ":") - if i > -1 { - host += n[:i+1] - n = n[i+1:] - } - } - return host, n - } - - // If host is a http(s) or ssh URL, grab the domain part. - for _, p := range []string{ - "ssh://", "https://", "http://"} { - if strings.HasSuffix(host, p) { - i := strings.Index(n, "/") - if i > -1 { - host += n[0 : i+1] - n = n[i+1:] - } - break - } - } - - return normalizeGitHostSpec(host), n -} - -func normalizeGitHostSpec(host string) string { - s := strings.ToLower(host) - if strings.Contains(s, "github.com") { - if strings.Contains(s, "git@") || strings.Contains(s, "ssh:") { - host = "git@github.com:" - } else { - host = "https://github.com/" - } - } - if strings.HasPrefix(s, "git::") { - host = strings.TrimPrefix(s, "git::") - } - return host -} - -// The format of Azure repo URL is documented -// https://docs.microsoft.com/en-us/azure/devops/repos/git/clone?view=vsts&tabs=visual-studio#clone_url -func isAzureHost(host string) bool { - return strings.Contains(host, "dev.azure.com") || - strings.Contains(host, "visualstudio.com") -} - -// The format of AWS repo URL is documented -// https://docs.aws.amazon.com/codecommit/latest/userguide/regions.html -func isAWSHost(host string) bool { - return strings.Contains(host, "amazonaws.com") -} diff --git a/src/vendor/sigs.k8s.io/kustomize/api/internal/kusterr/yamlformaterror.go b/src/vendor/sigs.k8s.io/kustomize/api/internal/kusterr/yamlformaterror.go deleted file mode 100644 index aa76d1dd7..000000000 --- a/src/vendor/sigs.k8s.io/kustomize/api/internal/kusterr/yamlformaterror.go +++ /dev/null @@ -1,55 +0,0 @@ -// Copyright 2019 The Kubernetes Authors. -// SPDX-License-Identifier: Apache-2.0 - -// Package error has contextual error types. -package kusterr - -import ( - "fmt" - "strings" -) - -// YamlFormatError represents error with yaml file name where json/yaml format error happens. -type YamlFormatError struct { - Path string - ErrorMsg string -} - -func (e YamlFormatError) Error() string { - return fmt.Sprintf("YAML file [%s] encounters a format error.\n%s\n", e.Path, e.ErrorMsg) -} - -// MalformedYamlError represents an error that occurred while trying to decode a given YAML. -type MalformedYamlError struct { - Path string - ErrorMsg string -} - -func (e MalformedYamlError) Error() string { - return fmt.Sprintf("%s in File: %s", e.ErrorMsg, e.Path) -} - -// Handler handles YamlFormatError -func Handler(e error, path string) error { - if isYAMLSyntaxError(e) { - return YamlFormatError{ - Path: path, - ErrorMsg: e.Error(), - } - } - if IsMalformedYAMLError(e) { - return MalformedYamlError{ - Path: path, - ErrorMsg: e.Error(), - } - } - return e -} - -func isYAMLSyntaxError(e error) bool { - return strings.Contains(e.Error(), "error converting YAML to JSON") || strings.Contains(e.Error(), "error unmarshaling JSON") -} - -func IsMalformedYAMLError(e error) bool { - return strings.Contains(e.Error(), "MalformedYAMLError") -} diff --git a/src/vendor/sigs.k8s.io/kustomize/api/internal/plugins/builtinconfig/doc.go b/src/vendor/sigs.k8s.io/kustomize/api/internal/plugins/builtinconfig/doc.go deleted file mode 100644 index f41f79b0c..000000000 --- a/src/vendor/sigs.k8s.io/kustomize/api/internal/plugins/builtinconfig/doc.go +++ /dev/null @@ -1,10 +0,0 @@ -// Copyright 2019 The Kubernetes Authors. -// SPDX-License-Identifier: Apache-2.0 - -// Package builtinconfig provides legacy methods for -// configuring builtin plugins from a common config file. -// As a user, its best to configure plugins individually -// with plugin config files specified in the `transformers:` -// or `generators:` field, than to use this legacy -// configuration technique. -package builtinconfig diff --git a/src/vendor/sigs.k8s.io/kustomize/api/internal/plugins/builtinconfig/loaddefaultconfig.go b/src/vendor/sigs.k8s.io/kustomize/api/internal/plugins/builtinconfig/loaddefaultconfig.go deleted file mode 100644 index bf5e3f8a3..000000000 --- a/src/vendor/sigs.k8s.io/kustomize/api/internal/plugins/builtinconfig/loaddefaultconfig.go +++ /dev/null @@ -1,42 +0,0 @@ -// Copyright 2019 The Kubernetes Authors. -// SPDX-License-Identifier: Apache-2.0 - -package builtinconfig - -import ( - "sigs.k8s.io/kustomize/api/ifc" - "sigs.k8s.io/yaml" -) - -// loadDefaultConfig returns a TranformerConfig -// object from a list of files. -func loadDefaultConfig( - ldr ifc.Loader, paths []string) (*TransformerConfig, error) { - result := &TransformerConfig{} - for _, path := range paths { - data, err := ldr.Load(path) - if err != nil { - return nil, err - } - t, err := makeTransformerConfigFromBytes(data) - if err != nil { - return nil, err - } - result, err = result.Merge(t) - if err != nil { - return nil, err - } - } - return result, nil -} - -// makeTransformerConfigFromBytes returns a TransformerConfig object from bytes -func makeTransformerConfigFromBytes(data []byte) (*TransformerConfig, error) { - var t TransformerConfig - err := yaml.Unmarshal(data, &t) - if err != nil { - return nil, err - } - t.sortFields() - return &t, nil -} diff --git a/src/vendor/sigs.k8s.io/kustomize/api/internal/plugins/builtinconfig/namebackreferences.go b/src/vendor/sigs.k8s.io/kustomize/api/internal/plugins/builtinconfig/namebackreferences.go deleted file mode 100644 index 354f70e51..000000000 --- a/src/vendor/sigs.k8s.io/kustomize/api/internal/plugins/builtinconfig/namebackreferences.go +++ /dev/null @@ -1,99 +0,0 @@ -// Copyright 2019 The Kubernetes Authors. -// SPDX-License-Identifier: Apache-2.0 - -package builtinconfig - -import ( - "strings" - - "sigs.k8s.io/kustomize/api/types" - "sigs.k8s.io/kustomize/kyaml/resid" -) - -// NameBackReferences is an association between a gvk.GVK (a ReferralTarget) -// and a list of Referrers that could refer to it. -// -// It is used to handle name changes, and can be thought of as a -// a contact list. If you change your own contact info (name, -// phone number, etc.), you must tell your contacts or they won't -// know about the change. -// -// For example, ConfigMaps can be used by Pods and everything that -// contains a Pod; Deployment, Job, StatefulSet, etc. -// The ConfigMap is the ReferralTarget, the others are Referrers. -// -// If the the name of a ConfigMap instance changed from 'alice' to 'bob', -// one must -// - visit all objects that could refer to the ConfigMap (the Referrers) -// - see if they mention 'alice', -// - if so, change the Referrer's name reference to 'bob'. -// -// The NameBackReferences instance to aid in this could look like -// { -// kind: ConfigMap -// version: v1 -// fieldSpecs: -// - kind: Pod -// version: v1 -// path: spec/volumes/configMap/name -// - kind: Deployment -// path: spec/template/spec/volumes/configMap/name -// - kind: Job -// path: spec/template/spec/volumes/configMap/name -// (etc.) -// } -type NameBackReferences struct { - resid.Gvk `json:",inline,omitempty" yaml:",inline,omitempty"` - // TODO: rename json 'fieldSpecs' to 'referrers' for clarity. - // This will, however, break anyone using a custom config. - Referrers types.FsSlice `json:"fieldSpecs,omitempty" yaml:"fieldSpecs,omitempty"` -} - -func (n NameBackReferences) String() string { - var r []string - for _, f := range n.Referrers { - r = append(r, f.String()) - } - return n.Gvk.String() + ": (\n" + - strings.Join(r, "\n") + "\n)" -} - -type nbrSlice []NameBackReferences - -func (s nbrSlice) Len() int { return len(s) } -func (s nbrSlice) Swap(i, j int) { s[i], s[j] = s[j], s[i] } -func (s nbrSlice) Less(i, j int) bool { - return s[i].Gvk.IsLessThan(s[j].Gvk) -} - -func (s nbrSlice) mergeAll(o nbrSlice) (result nbrSlice, err error) { - result = s - for _, r := range o { - result, err = result.mergeOne(r) - if err != nil { - return nil, err - } - } - return result, nil -} - -func (s nbrSlice) mergeOne(other NameBackReferences) (nbrSlice, error) { - var result nbrSlice - var err error - found := false - for _, c := range s { - if c.Gvk.Equals(other.Gvk) { - c.Referrers, err = c.Referrers.MergeAll(other.Referrers) - if err != nil { - return nil, err - } - found = true - } - result = append(result, c) - } - - if !found { - result = append(result, other) - } - return result, nil -} diff --git a/src/vendor/sigs.k8s.io/kustomize/api/internal/plugins/builtinconfig/transformerconfig.go b/src/vendor/sigs.k8s.io/kustomize/api/internal/plugins/builtinconfig/transformerconfig.go deleted file mode 100644 index a28627a13..000000000 --- a/src/vendor/sigs.k8s.io/kustomize/api/internal/plugins/builtinconfig/transformerconfig.go +++ /dev/null @@ -1,148 +0,0 @@ -// Copyright 2019 The Kubernetes Authors. -// SPDX-License-Identifier: Apache-2.0 - -package builtinconfig - -import ( - "log" - "sort" - - "sigs.k8s.io/kustomize/api/ifc" - "sigs.k8s.io/kustomize/api/konfig/builtinpluginconsts" - "sigs.k8s.io/kustomize/api/types" -) - -// TransformerConfig holds the data needed to perform transformations. -type TransformerConfig struct { - NamePrefix types.FsSlice `json:"namePrefix,omitempty" yaml:"namePrefix,omitempty"` - NameSuffix types.FsSlice `json:"nameSuffix,omitempty" yaml:"nameSuffix,omitempty"` - NameSpace types.FsSlice `json:"namespace,omitempty" yaml:"namespace,omitempty"` - CommonLabels types.FsSlice `json:"commonLabels,omitempty" yaml:"commonLabels,omitempty"` - CommonAnnotations types.FsSlice `json:"commonAnnotations,omitempty" yaml:"commonAnnotations,omitempty"` - NameReference nbrSlice `json:"nameReference,omitempty" yaml:"nameReference,omitempty"` - VarReference types.FsSlice `json:"varReference,omitempty" yaml:"varReference,omitempty"` - Images types.FsSlice `json:"images,omitempty" yaml:"images,omitempty"` - Replicas types.FsSlice `json:"replicas,omitempty" yaml:"replicas,omitempty"` -} - -// MakeEmptyConfig returns an empty TransformerConfig object -func MakeEmptyConfig() *TransformerConfig { - return &TransformerConfig{} -} - -// MakeDefaultConfig returns a default TransformerConfig. -func MakeDefaultConfig() *TransformerConfig { - c, err := makeTransformerConfigFromBytes( - builtinpluginconsts.GetDefaultFieldSpecs()) - if err != nil { - log.Fatalf("Unable to make default transformconfig: %v", err) - } - return c -} - -// MakeTransformerConfig returns a merger of custom config, -// if any, with default config. -func MakeTransformerConfig( - ldr ifc.Loader, paths []string) (*TransformerConfig, error) { - t1 := MakeDefaultConfig() - if len(paths) == 0 { - return t1, nil - } - t2, err := loadDefaultConfig(ldr, paths) - if err != nil { - return nil, err - } - return t1.Merge(t2) -} - -// sortFields provides determinism in logging, tests, etc. -func (t *TransformerConfig) sortFields() { - sort.Sort(t.NamePrefix) - sort.Sort(t.NameSpace) - sort.Sort(t.CommonLabels) - sort.Sort(t.CommonAnnotations) - sort.Sort(t.NameReference) - sort.Sort(t.VarReference) - sort.Sort(t.Images) - sort.Sort(t.Replicas) -} - -// AddPrefixFieldSpec adds a FieldSpec to NamePrefix -func (t *TransformerConfig) AddPrefixFieldSpec(fs types.FieldSpec) (err error) { - t.NamePrefix, err = t.NamePrefix.MergeOne(fs) - return err -} - -// AddSuffixFieldSpec adds a FieldSpec to NameSuffix -func (t *TransformerConfig) AddSuffixFieldSpec(fs types.FieldSpec) (err error) { - t.NameSuffix, err = t.NameSuffix.MergeOne(fs) - return err -} - -// AddLabelFieldSpec adds a FieldSpec to CommonLabels -func (t *TransformerConfig) AddLabelFieldSpec(fs types.FieldSpec) (err error) { - t.CommonLabels, err = t.CommonLabels.MergeOne(fs) - return err -} - -// AddAnnotationFieldSpec adds a FieldSpec to CommonAnnotations -func (t *TransformerConfig) AddAnnotationFieldSpec(fs types.FieldSpec) (err error) { - t.CommonAnnotations, err = t.CommonAnnotations.MergeOne(fs) - return err -} - -// AddNamereferenceFieldSpec adds a NameBackReferences to NameReference -func (t *TransformerConfig) AddNamereferenceFieldSpec( - nbrs NameBackReferences) (err error) { - t.NameReference, err = t.NameReference.mergeOne(nbrs) - return err -} - -// Merge merges two TransformerConfigs objects into -// a new TransformerConfig object -func (t *TransformerConfig) Merge(input *TransformerConfig) ( - merged *TransformerConfig, err error) { - if input == nil { - return t, nil - } - merged = &TransformerConfig{} - merged.NamePrefix, err = t.NamePrefix.MergeAll(input.NamePrefix) - if err != nil { - return nil, err - } - merged.NameSuffix, err = t.NameSuffix.MergeAll(input.NameSuffix) - if err != nil { - return nil, err - } - merged.NameSpace, err = t.NameSpace.MergeAll(input.NameSpace) - if err != nil { - return nil, err - } - merged.CommonAnnotations, err = t.CommonAnnotations.MergeAll( - input.CommonAnnotations) - if err != nil { - return nil, err - } - merged.CommonLabels, err = t.CommonLabels.MergeAll(input.CommonLabels) - if err != nil { - return nil, err - } - merged.VarReference, err = t.VarReference.MergeAll(input.VarReference) - if err != nil { - return nil, err - } - merged.NameReference, err = t.NameReference.mergeAll(input.NameReference) - if err != nil { - return nil, err - } - merged.Images, err = t.Images.MergeAll(input.Images) - if err != nil { - return nil, err - } - merged.Replicas, err = t.Replicas.MergeAll(input.Replicas) - if err != nil { - return nil, err - } - merged.sortFields() - return merged, nil -} diff --git a/src/vendor/sigs.k8s.io/kustomize/api/internal/plugins/builtinhelpers/builtinplugintype_string.go b/src/vendor/sigs.k8s.io/kustomize/api/internal/plugins/builtinhelpers/builtinplugintype_string.go deleted file mode 100644 index 1b347cbc8..000000000 --- a/src/vendor/sigs.k8s.io/kustomize/api/internal/plugins/builtinhelpers/builtinplugintype_string.go +++ /dev/null @@ -1,42 +0,0 @@ -// Code generated by "stringer -type=BuiltinPluginType"; DO NOT EDIT. - -package builtinhelpers - -import "strconv" - -func _() { - // An "invalid array index" compiler error signifies that the constant values have changed. - // Re-run the stringer command to generate them again. - var x [1]struct{} - _ = x[Unknown-0] - _ = x[AnnotationsTransformer-1] - _ = x[ConfigMapGenerator-2] - _ = x[IAMPolicyGenerator-3] - _ = x[HashTransformer-4] - _ = x[ImageTagTransformer-5] - _ = x[LabelTransformer-6] - _ = x[LegacyOrderTransformer-7] - _ = x[NamespaceTransformer-8] - _ = x[PatchJson6902Transformer-9] - _ = x[PatchStrategicMergeTransformer-10] - _ = x[PatchTransformer-11] - _ = x[PrefixSuffixTransformer-12] - _ = x[PrefixTransformer-13] - _ = x[SuffixTransformer-14] - _ = x[ReplicaCountTransformer-15] - _ = x[SecretGenerator-16] - _ = x[ValueAddTransformer-17] - _ = x[HelmChartInflationGenerator-18] - _ = x[ReplacementTransformer-19] -} - -const _BuiltinPluginType_name = "UnknownAnnotationsTransformerConfigMapGeneratorIAMPolicyGeneratorHashTransformerImageTagTransformerLabelTransformerLegacyOrderTransformerNamespaceTransformerPatchJson6902TransformerPatchStrategicMergeTransformerPatchTransformerPrefixSuffixTransformerPrefixTransformerSuffixTransformerReplicaCountTransformerSecretGeneratorValueAddTransformerHelmChartInflationGeneratorReplacementTransformer" - -var _BuiltinPluginType_index = [...]uint16{0, 7, 29, 47, 65, 80, 99, 115, 137, 157, 181, 211, 227, 250, 267, 284, 307, 322, 341, 368, 390} - -func (i BuiltinPluginType) String() string { - if i < 0 || i >= BuiltinPluginType(len(_BuiltinPluginType_index)-1) { - return "BuiltinPluginType(" + strconv.FormatInt(int64(i), 10) + ")" - } - return _BuiltinPluginType_name[_BuiltinPluginType_index[i]:_BuiltinPluginType_index[i+1]] -} diff --git a/src/vendor/sigs.k8s.io/kustomize/api/internal/plugins/builtinhelpers/builtins.go b/src/vendor/sigs.k8s.io/kustomize/api/internal/plugins/builtinhelpers/builtins.go deleted file mode 100644 index b1c987a52..000000000 --- a/src/vendor/sigs.k8s.io/kustomize/api/internal/plugins/builtinhelpers/builtins.go +++ /dev/null @@ -1,114 +0,0 @@ -// Copyright 2019 The Kubernetes Authors. -// SPDX-License-Identifier: Apache-2.0 - -package builtinhelpers - -import ( - "sigs.k8s.io/kustomize/api/internal/builtins" - "sigs.k8s.io/kustomize/api/resmap" -) - -//go:generate stringer -type=BuiltinPluginType -type BuiltinPluginType int - -const ( - Unknown BuiltinPluginType = iota - AnnotationsTransformer - ConfigMapGenerator - IAMPolicyGenerator - HashTransformer - ImageTagTransformer - LabelTransformer - LegacyOrderTransformer - NamespaceTransformer - PatchJson6902Transformer - PatchStrategicMergeTransformer - PatchTransformer - PrefixSuffixTransformer - PrefixTransformer - SuffixTransformer - ReplicaCountTransformer - SecretGenerator - ValueAddTransformer - HelmChartInflationGenerator - ReplacementTransformer -) - -var stringToBuiltinPluginTypeMap map[string]BuiltinPluginType - -func init() { //nolint:gochecknoinits - stringToBuiltinPluginTypeMap = makeStringToBuiltinPluginTypeMap() -} - -func makeStringToBuiltinPluginTypeMap() (result map[string]BuiltinPluginType) { - result = make(map[string]BuiltinPluginType, 23) - for k := range GeneratorFactories { - result[k.String()] = k - } - for k := range TransformerFactories { - result[k.String()] = k - } - return -} - -func GetBuiltinPluginType(n string) BuiltinPluginType { - result, ok := stringToBuiltinPluginTypeMap[n] - if ok { - return result - } - return Unknown -} - -var GeneratorFactories = map[BuiltinPluginType]func() resmap.GeneratorPlugin{ - ConfigMapGenerator: builtins.NewConfigMapGeneratorPlugin, - IAMPolicyGenerator: builtins.NewIAMPolicyGeneratorPlugin, - SecretGenerator: builtins.NewSecretGeneratorPlugin, - HelmChartInflationGenerator: builtins.NewHelmChartInflationGeneratorPlugin, -} - -type MultiTransformer struct { - transformers []resmap.TransformerPlugin -} - -func (t *MultiTransformer) Transform(m resmap.ResMap) error { - for _, transformer := range t.transformers { - if err := transformer.Transform(m); err != nil { - return err - } - } - return nil -} - -func (t *MultiTransformer) Config(h *resmap.PluginHelpers, b []byte) error { - for _, transformer := range t.transformers { - if err := transformer.Config(h, b); err != nil { - return err - } - } - return nil -} - -func NewMultiTransformer() resmap.TransformerPlugin { - return &MultiTransformer{[]resmap.TransformerPlugin{ - builtins.NewPrefixTransformerPlugin(), - builtins.NewSuffixTransformerPlugin(), - }} -} - -var TransformerFactories = map[BuiltinPluginType]func() resmap.TransformerPlugin{ - AnnotationsTransformer: builtins.NewAnnotationsTransformerPlugin, - HashTransformer: builtins.NewHashTransformerPlugin, - ImageTagTransformer: builtins.NewImageTagTransformerPlugin, - LabelTransformer: builtins.NewLabelTransformerPlugin, - LegacyOrderTransformer: builtins.NewLegacyOrderTransformerPlugin, - NamespaceTransformer: builtins.NewNamespaceTransformerPlugin, - PatchJson6902Transformer: builtins.NewPatchJson6902TransformerPlugin, - PatchStrategicMergeTransformer: builtins.NewPatchStrategicMergeTransformerPlugin, - PatchTransformer: builtins.NewPatchTransformerPlugin, - PrefixSuffixTransformer: NewMultiTransformer, - PrefixTransformer: builtins.NewPrefixTransformerPlugin, - SuffixTransformer: builtins.NewSuffixTransformerPlugin, - ReplacementTransformer: builtins.NewReplacementTransformerPlugin, - ReplicaCountTransformer: builtins.NewReplicaCountTransformerPlugin, - ValueAddTransformer: builtins.NewValueAddTransformerPlugin, -} diff --git a/src/vendor/sigs.k8s.io/kustomize/api/internal/plugins/execplugin/execplugin.go b/src/vendor/sigs.k8s.io/kustomize/api/internal/plugins/execplugin/execplugin.go deleted file mode 100644 index 9380558cb..000000000 --- a/src/vendor/sigs.k8s.io/kustomize/api/internal/plugins/execplugin/execplugin.go +++ /dev/null @@ -1,192 +0,0 @@ -// Copyright 2019 The Kubernetes Authors. -// SPDX-License-Identifier: Apache-2.0 - -package execplugin - -import ( - "bytes" - "fmt" - "io/ioutil" - "os" - "os/exec" - "runtime" - "strings" - - "github.com/google/shlex" - - "github.com/pkg/errors" - "sigs.k8s.io/kustomize/api/internal/plugins/utils" - "sigs.k8s.io/kustomize/api/resmap" - "sigs.k8s.io/yaml" -) - -const ( - tmpConfigFilePrefix = "kust-plugin-config-" -) - -// ExecPlugin record the name and args of an executable -// It triggers the executable generator and transformer -type ExecPlugin struct { - // absolute path of the executable - path string - - // Optional command line arguments to the executable - // pulled from specially named fields in cfg. - // This is for executables that don't want to parse YAML. - args []string - - // Plugin configuration data. - cfg []byte - - // PluginHelpers - h *resmap.PluginHelpers -} - -func NewExecPlugin(p string) *ExecPlugin { - return &ExecPlugin{path: p} -} - -func (p *ExecPlugin) ErrIfNotExecutable() error { - f, err := os.Stat(p.path) - if err != nil { - return err - } - // In Windows, it is not possible to determine whether a - // file is executable through file mode. - // TODO: provide for setting the executable FileMode bit on Windows - // The (fs *fileStat) Mode() (m FileMode) {} function in - // https://golang.org/src/os/types_windows.go - // lacks the ability to set the FileMode executable bit in response - // to file data on Windows. - if f.Mode()&0111 == 0000 && runtime.GOOS != "windows" { - return fmt.Errorf("unexecutable plugin at: %s", p.path) - } - return nil -} - -func (p *ExecPlugin) Path() string { - return p.path -} - -func (p *ExecPlugin) Args() []string { - return p.args -} - -func (p *ExecPlugin) Cfg() []byte { - return p.cfg -} - -func (p *ExecPlugin) Config(h *resmap.PluginHelpers, config []byte) error { - p.h = h - p.cfg = config - return p.processOptionalArgsFields() -} - -type argsConfig struct { - ArgsOneLiner string `json:"argsOneLiner,omitempty" yaml:"argsOneLiner,omitempty"` - ArgsFromFile string `json:"argsFromFile,omitempty" yaml:"argsFromFile,omitempty"` -} - -func (p *ExecPlugin) processOptionalArgsFields() error { - var c argsConfig - err := yaml.Unmarshal(p.cfg, &c) - if err != nil { - return err - } - if c.ArgsOneLiner != "" { - p.args, _ = shlex.Split(c.ArgsOneLiner) - } - if c.ArgsFromFile != "" { - content, err := p.h.Loader().Load(c.ArgsFromFile) - if err != nil { - return err - } - for _, x := range strings.Split(string(content), "\n") { - x := strings.TrimLeft(x, " ") - if x != "" { - p.args = append(p.args, x) - } - } - } - return nil -} - -func (p *ExecPlugin) Generate() (resmap.ResMap, error) { - output, err := p.invokePlugin(nil) - if err != nil { - return nil, err - } - rm, err := p.h.ResmapFactory().NewResMapFromBytes(output) - if err != nil { - return nil, err - } - return utils.UpdateResourceOptions(rm) -} - -func (p *ExecPlugin) Transform(rm resmap.ResMap) error { - // add ResIds as annotations to all objects so that we can add them back - inputRM, err := utils.GetResMapWithIDAnnotation(rm) - if err != nil { - return err - } - - // encode the ResMap so it can be fed to the plugin - resources, err := inputRM.AsYaml() - if err != nil { - return err - } - - // invoke the plugin with resources as the input - output, err := p.invokePlugin(resources) - if err != nil { - return fmt.Errorf("%v %s", err, string(output)) - } - - // update the original ResMap based on the output - return utils.UpdateResMapValues(p.path, p.h, output, rm) -} - -// invokePlugin writes plugin config to a temp file, then -// passes the full temp file path as the first arg to a process -// running the plugin binary. Process output is returned. -func (p *ExecPlugin) invokePlugin(input []byte) ([]byte, error) { - f, err := ioutil.TempFile("", tmpConfigFilePrefix) - if err != nil { - return nil, errors.Wrap( - err, "creating tmp plugin config file") - } - _, err = f.Write(p.cfg) - if err != nil { - return nil, errors.Wrap( - err, "writing plugin config to "+f.Name()) - } - err = f.Close() - if err != nil { - return nil, errors.Wrap( - err, "closing plugin config file "+f.Name()) - } - //nolint:gosec - cmd := exec.Command( - p.path, append([]string{f.Name()}, p.args...)...) - cmd.Env = p.getEnv() - cmd.Stdin = bytes.NewReader(input) - cmd.Stderr = os.Stderr - if _, err := os.Stat(p.h.Loader().Root()); err == nil { - cmd.Dir = p.h.Loader().Root() - } - result, err := cmd.Output() - if err != nil { - return nil, errors.Wrapf( - err, "failure in plugin configured via %s; %v", - f.Name(), err.Error()) - } - return result, os.Remove(f.Name()) -} - -func (p *ExecPlugin) getEnv() []string { - env := os.Environ() - env = append(env, - "KUSTOMIZE_PLUGIN_CONFIG_STRING="+string(p.cfg), - "KUSTOMIZE_PLUGIN_CONFIG_ROOT="+p.h.Loader().Root()) - return env -} diff --git a/src/vendor/sigs.k8s.io/kustomize/api/internal/plugins/fnplugin/fnplugin.go b/src/vendor/sigs.k8s.io/kustomize/api/internal/plugins/fnplugin/fnplugin.go deleted file mode 100644 index 84bc0ac05..000000000 --- a/src/vendor/sigs.k8s.io/kustomize/api/internal/plugins/fnplugin/fnplugin.go +++ /dev/null @@ -1,199 +0,0 @@ -// Copyright 2019 The Kubernetes Authors. -// SPDX-License-Identifier: Apache-2.0 - -package fnplugin - -import ( - "bytes" - "fmt" - - "github.com/pkg/errors" - - "sigs.k8s.io/kustomize/api/internal/plugins/utils" - "sigs.k8s.io/kustomize/api/resmap" - "sigs.k8s.io/kustomize/api/resource" - "sigs.k8s.io/kustomize/api/types" - "sigs.k8s.io/kustomize/kyaml/fn/runtime/runtimeutil" - "sigs.k8s.io/kustomize/kyaml/runfn" - "sigs.k8s.io/kustomize/kyaml/yaml" -) - -// FnPlugin is the struct to hold function information -type FnPlugin struct { - // Function runner - runFns runfn.RunFns - - // Plugin configuration data. - cfg []byte - - // Plugin name cache for error output - pluginName string - - // PluginHelpers - h *resmap.PluginHelpers -} - -func bytesToRNode(yml []byte) (*yaml.RNode, error) { - rnode, err := yaml.Parse(string(yml)) - if err != nil { - return nil, err - } - return rnode, nil -} - -func resourceToRNode(res *resource.Resource) (*yaml.RNode, error) { - yml, err := res.AsYAML() - if err != nil { - return nil, err - } - - return bytesToRNode(yml) -} - -// GetFunctionSpec return function spec is there is. Otherwise return nil -func GetFunctionSpec(res *resource.Resource) *runtimeutil.FunctionSpec { - rnode, err := resourceToRNode(res) - if err != nil { - return nil - } - - return runtimeutil.GetFunctionSpec(rnode) -} - -func toStorageMounts(mounts []string) []runtimeutil.StorageMount { - var sms []runtimeutil.StorageMount - for _, mount := range mounts { - sms = append(sms, runtimeutil.StringToStorageMount(mount)) - } - return sms -} - -// NewFnPlugin creates a FnPlugin struct -func NewFnPlugin(o *types.FnPluginLoadingOptions) *FnPlugin { - return &FnPlugin{ - runFns: runfn.RunFns{ - Functions: []*yaml.RNode{}, - Network: o.Network, - EnableStarlark: o.EnableStar, - EnableExec: o.EnableExec, - StorageMounts: toStorageMounts(o.Mounts), - Env: o.Env, - AsCurrentUser: o.AsCurrentUser, - WorkingDir: o.WorkingDir, - }, - } -} - -// Cfg returns function config -func (p *FnPlugin) Cfg() []byte { - return p.cfg -} - -// Config is called by kustomize to pass-in config information -func (p *FnPlugin) Config(h *resmap.PluginHelpers, config []byte) error { - p.h = h - p.cfg = config - - fn, err := bytesToRNode(p.cfg) - if err != nil { - return err - } - - meta, err := fn.GetMeta() - if err != nil { - return err - } - - p.pluginName = fmt.Sprintf("api: %s, kind: %s, name: %s", - meta.APIVersion, meta.Kind, meta.Name) - - return nil -} - -// Generate is called when run as generator -func (p *FnPlugin) Generate() (resmap.ResMap, error) { - output, err := p.invokePlugin(nil) - if err != nil { - return nil, err - } - rm, err := p.h.ResmapFactory().NewResMapFromBytes(output) - if err != nil { - return nil, err - } - return utils.UpdateResourceOptions(rm) -} - -// Transform is called when run as transformer -func (p *FnPlugin) Transform(rm resmap.ResMap) error { - // add ResIds as annotations to all objects so that we can add them back - inputRM, err := utils.GetResMapWithIDAnnotation(rm) - if err != nil { - return err - } - - // encode the ResMap so it can be fed to the plugin - resources, err := inputRM.AsYaml() - if err != nil { - return err - } - - // invoke the plugin with resources as the input - output, err := p.invokePlugin(resources) - if err != nil { - return fmt.Errorf("%v %s", err, string(output)) - } - - // update the original ResMap based on the output - return utils.UpdateResMapValues(p.pluginName, p.h, output, rm) -} - -func injectAnnotation(input *yaml.RNode, k, v string) error { - err := input.PipeE(yaml.SetAnnotation(k, v)) - if err != nil { - return err - } - return nil -} - -// invokePlugin uses Function runner to run function as plugin -func (p *FnPlugin) invokePlugin(input []byte) ([]byte, error) { - // get function config rnode - functionConfig, err := bytesToRNode(p.cfg) - if err != nil { - return nil, err - } - - // This annotation will let kustomize ingnore this item in output - err = injectAnnotation(functionConfig, "config.kubernetes.io/local-config", "true") - if err != nil { - return nil, err - } - // we need to add config as input for generators. Some of them don't work with FunctionConfig - // and in addition kio.Pipeline won't create anything if there are no objects - // see https://github.com/kubernetes-sigs/kustomize/blob/master/kyaml/kio/kio.go#L93 - // Since we added `local-config` annotation so it will be ignored in generator output - // TODO(donnyxia): This is actually not used by generator and only used to bypass a kio limitation. - // Need better solution. - if input == nil { - yml, err := functionConfig.String() - if err != nil { - return nil, err - } - input = []byte(yml) - } - - // Configure and Execute Fn. We don't need to convert resources to ResourceList here - // because function runtime will do that. See kyaml/fn/runtime/runtimeutil/runtimeutil.go - var ouputBuffer bytes.Buffer - p.runFns.Input = bytes.NewReader(input) - p.runFns.Functions = append(p.runFns.Functions, functionConfig) - p.runFns.Output = &ouputBuffer - - err = p.runFns.Execute() - if err != nil { - return nil, errors.Wrap( - err, "couldn't execute function") - } - - return ouputBuffer.Bytes(), nil -} diff --git a/src/vendor/sigs.k8s.io/kustomize/api/internal/plugins/loader/loader.go b/src/vendor/sigs.k8s.io/kustomize/api/internal/plugins/loader/loader.go deleted file mode 100644 index 82cb93965..000000000 --- a/src/vendor/sigs.k8s.io/kustomize/api/internal/plugins/loader/loader.go +++ /dev/null @@ -1,317 +0,0 @@ -// Copyright 2019 The Kubernetes Authors. -// SPDX-License-Identifier: Apache-2.0 - -package loader - -import ( - "fmt" - "log" - "os" - "path/filepath" - "plugin" - "reflect" - "strings" - - "github.com/pkg/errors" - "sigs.k8s.io/kustomize/api/ifc" - "sigs.k8s.io/kustomize/api/internal/plugins/builtinhelpers" - "sigs.k8s.io/kustomize/api/internal/plugins/execplugin" - "sigs.k8s.io/kustomize/api/internal/plugins/fnplugin" - "sigs.k8s.io/kustomize/api/internal/plugins/utils" - "sigs.k8s.io/kustomize/api/konfig" - "sigs.k8s.io/kustomize/api/resmap" - "sigs.k8s.io/kustomize/api/resource" - "sigs.k8s.io/kustomize/api/types" - "sigs.k8s.io/kustomize/kyaml/filesys" - "sigs.k8s.io/kustomize/kyaml/resid" -) - -// Loader loads plugins using a file loader (a different loader). -type Loader struct { - pc *types.PluginConfig - rf *resmap.Factory - fs filesys.FileSystem - - // absolutePluginHome caches the location of a valid plugin root directory. - // It should only be set once the directory's existence has been confirmed. - absolutePluginHome string -} - -func NewLoader( - pc *types.PluginConfig, rf *resmap.Factory, fs filesys.FileSystem) *Loader { - return &Loader{pc: pc, rf: rf, fs: fs} -} - -// Config provides the global (not plugin specific) PluginConfig data. -func (l *Loader) Config() *types.PluginConfig { - return l.pc -} - -// SetWorkDir sets the working directory for this loader's plugins -func (l *Loader) SetWorkDir(wd string) { - l.pc.FnpLoadingOptions.WorkingDir = wd -} - -func (l *Loader) LoadGenerators( - ldr ifc.Loader, v ifc.Validator, rm resmap.ResMap) ( - result []*resmap.GeneratorWithProperties, err error) { - for _, res := range rm.Resources() { - g, err := l.LoadGenerator(ldr, v, res) - if err != nil { - return nil, err - } - generatorOrigin, err := resource.OriginFromCustomPlugin(res) - if err != nil { - return nil, err - } - result = append(result, &resmap.GeneratorWithProperties{Generator: g, Origin: generatorOrigin}) - } - return result, nil -} - -func (l *Loader) LoadGenerator( - ldr ifc.Loader, v ifc.Validator, res *resource.Resource) (resmap.Generator, error) { - c, err := l.loadAndConfigurePlugin(ldr, v, res) - if err != nil { - return nil, err - } - g, ok := c.(resmap.Generator) - if !ok { - return nil, fmt.Errorf("plugin %s not a generator", res.OrgId()) - } - return g, nil -} - -func (l *Loader) LoadTransformers( - ldr ifc.Loader, v ifc.Validator, rm resmap.ResMap) ([]*resmap.TransformerWithProperties, error) { - var result []*resmap.TransformerWithProperties - for _, res := range rm.Resources() { - t, err := l.LoadTransformer(ldr, v, res) - if err != nil { - return nil, err - } - transformerOrigin, err := resource.OriginFromCustomPlugin(res) - if err != nil { - return nil, err - } - result = append(result, &resmap.TransformerWithProperties{Transformer: t, Origin: transformerOrigin}) - } - return result, nil -} - -func (l *Loader) LoadTransformer( - ldr ifc.Loader, v ifc.Validator, res *resource.Resource) (*resmap.TransformerWithProperties, error) { - c, err := l.loadAndConfigurePlugin(ldr, v, res) - if err != nil { - return nil, err - } - t, ok := c.(resmap.Transformer) - if !ok { - return nil, fmt.Errorf("plugin %s not a transformer", res.OrgId()) - } - return &resmap.TransformerWithProperties{Transformer: t}, nil -} - -func relativePluginPath(id resid.ResId) string { - return filepath.Join( - id.Group, - id.Version, - strings.ToLower(id.Kind)) -} - -func (l *Loader) AbsolutePluginPath(id resid.ResId) (string, error) { - pluginHome, err := l.absPluginHome() - if err != nil { - return "", err - } - return filepath.Join(pluginHome, relativePluginPath(id), id.Kind), nil -} - -// absPluginHome is the home of kustomize Exec and Go plugins. -// Kustomize plugin configuration files are k8s-style objects -// containing the fields 'apiVersion' and 'kind', e.g. -// apiVersion: apps/v1 -// kind: Deployment -// kustomize reads plugin configuration data from a file path -// specified in the 'generators:' or 'transformers:' field of a -// kustomization file. For Exec and Go plugins, kustomize -// uses this data to both locate the plugin and configure it. -// Each Exec or Go plugin (its code, its tests, its supporting data -// files, etc.) must be housed in its own directory at -// ${absPluginHome}/${pluginApiVersion}/LOWERCASE(${pluginKind}) -// where -// - ${absPluginHome} is an absolute path, defined below. -// - ${pluginApiVersion} is taken from the plugin config file. -// - ${pluginKind} is taken from the plugin config file. -func (l *Loader) absPluginHome() (string, error) { - // External plugins are disabled--return the dummy plugin root. - if l.pc.PluginRestrictions != types.PluginRestrictionsNone { - return konfig.NoPluginHomeSentinal, nil - } - // We've already determined plugin home--use the cached value. - if l.absolutePluginHome != "" { - return l.absolutePluginHome, nil - } - - // Check default locations for a valid plugin root, and cache it if found. - dir, err := konfig.DefaultAbsPluginHome(l.fs) - if err != nil { - return "", err - } - l.absolutePluginHome = dir - return l.absolutePluginHome, nil -} - -func isBuiltinPlugin(res *resource.Resource) bool { - // TODO: the special string should appear in Group, not Version. - return res.GetGvk().Group == "" && - res.GetGvk().Version == konfig.BuiltinPluginApiVersion -} - -func (l *Loader) loadAndConfigurePlugin( - ldr ifc.Loader, - v ifc.Validator, - res *resource.Resource) (c resmap.Configurable, err error) { - if isBuiltinPlugin(res) { - switch l.pc.BpLoadingOptions { - case types.BploLoadFromFileSys: - c, err = l.loadPlugin(res) - case types.BploUseStaticallyLinked: - // Instead of looking for and loading a .so file, - // instantiate the plugin from a generated factory - // function (see "pluginator"). Being able to do this - // is what makes a plugin "builtin". - c, err = l.makeBuiltinPlugin(res.GetGvk()) - default: - err = fmt.Errorf( - "unknown plugin loader behavior specified: %v", - l.pc.BpLoadingOptions) - } - } else { - switch l.pc.PluginRestrictions { - case types.PluginRestrictionsNone: - c, err = l.loadPlugin(res) - case types.PluginRestrictionsBuiltinsOnly: - err = types.NewErrOnlyBuiltinPluginsAllowed(res.OrgId().Kind) - default: - err = fmt.Errorf( - "unknown plugin restriction specified: %v", - l.pc.PluginRestrictions) - } - } - if err != nil { - return nil, err - } - yaml, err := res.AsYAML() - if err != nil { - return nil, errors.Wrapf(err, "marshalling yaml from res %s", res.OrgId()) - } - err = c.Config(resmap.NewPluginHelpers(ldr, v, l.rf, l.pc), yaml) - if err != nil { - return nil, errors.Wrapf( - err, "plugin %s fails configuration", res.OrgId()) - } - return c, nil -} - -func (l *Loader) makeBuiltinPlugin(r resid.Gvk) (resmap.Configurable, error) { - bpt := builtinhelpers.GetBuiltinPluginType(r.Kind) - if f, ok := builtinhelpers.GeneratorFactories[bpt]; ok { - return f(), nil - } - if f, ok := builtinhelpers.TransformerFactories[bpt]; ok { - return f(), nil - } - return nil, errors.Errorf("unable to load builtin %s", r) -} - -func (l *Loader) loadPlugin(res *resource.Resource) (resmap.Configurable, error) { - spec := fnplugin.GetFunctionSpec(res) - if spec != nil { - // validation check that function mounts are under the current kustomization directory - for _, mount := range spec.Container.StorageMounts { - if filepath.IsAbs(mount.Src) { - return nil, errors.New(fmt.Sprintf("plugin %s with mount path '%s' is not permitted; "+ - "mount paths must be relative to the current kustomization directory", res.OrgId(), mount.Src)) - } - if strings.HasPrefix(filepath.Clean(mount.Src), "../") { - return nil, errors.New(fmt.Sprintf("plugin %s with mount path '%s' is not permitted; "+ - "mount paths must be under the current kustomization directory", res.OrgId(), mount.Src)) - } - } - return fnplugin.NewFnPlugin(&l.pc.FnpLoadingOptions), nil - } - return l.loadExecOrGoPlugin(res.OrgId()) -} - -func (l *Loader) loadExecOrGoPlugin(resId resid.ResId) (resmap.Configurable, error) { - absPluginPath, err := l.AbsolutePluginPath(resId) - if err != nil { - return nil, err - } - // First try to load the plugin as an executable. - p := execplugin.NewExecPlugin(absPluginPath) - if err = p.ErrIfNotExecutable(); err == nil { - return p, nil - } - if !os.IsNotExist(err) { - // The file exists, but something else is wrong, - // likely it's not executable. - // Assume the user forgot to set the exec bit, - // and return an error, rather than adding ".so" - // to the name and attempting to load it as a Go - // plugin, which will likely fail and result - // in an obscure message. - return nil, err - } - // Failing the above, try loading it as a Go plugin. - c, err := l.loadGoPlugin(resId, absPluginPath+".so") - if err != nil { - return nil, err - } - return c, nil -} - -// registry is a means to avoid trying to load the same .so file -// into memory more than once, which results in an error. -// Each test makes its own loader, and tries to load its own plugins, -// but the loaded .so files are in shared memory, so one will get -// "this plugin already loaded" errors if the registry is maintained -// as a Loader instance variable. So make it a package variable. -var registry = make(map[string]resmap.Configurable) - -func (l *Loader) loadGoPlugin(id resid.ResId, absPath string) (resmap.Configurable, error) { - regId := relativePluginPath(id) - if c, ok := registry[regId]; ok { - return copyPlugin(c), nil - } - if !utils.FileExists(absPath) { - return nil, fmt.Errorf( - "expected file with Go object code at: %s", absPath) - } - log.Printf("Attempting plugin load from '%s'", absPath) - p, err := plugin.Open(absPath) - if err != nil { - return nil, errors.Wrapf(err, "plugin %s fails to load", absPath) - } - symbol, err := p.Lookup(konfig.PluginSymbol) - if err != nil { - return nil, errors.Wrapf( - err, "plugin %s doesn't have symbol %s", - regId, konfig.PluginSymbol) - } - c, ok := symbol.(resmap.Configurable) - if !ok { - return nil, fmt.Errorf("plugin '%s' not configurable", regId) - } - registry[regId] = c - return copyPlugin(c), nil -} - -func copyPlugin(c resmap.Configurable) resmap.Configurable { - indirect := reflect.Indirect(reflect.ValueOf(c)) - newIndirect := reflect.New(indirect.Type()) - newIndirect.Elem().Set(reflect.ValueOf(indirect.Interface())) - newNamed := newIndirect.Interface() - return newNamed.(resmap.Configurable) -} diff --git a/src/vendor/sigs.k8s.io/kustomize/api/internal/plugins/utils/utils.go b/src/vendor/sigs.k8s.io/kustomize/api/internal/plugins/utils/utils.go deleted file mode 100644 index 8182f203e..000000000 --- a/src/vendor/sigs.k8s.io/kustomize/api/internal/plugins/utils/utils.go +++ /dev/null @@ -1,240 +0,0 @@ -// Copyright 2020 The Kubernetes Authors. -// SPDX-License-Identifier: Apache-2.0 - -package utils - -import ( - "encoding/json" - "fmt" - "os" - "path/filepath" - "runtime" - "strconv" - "time" - - "sigs.k8s.io/kustomize/api/konfig" - "sigs.k8s.io/kustomize/api/resmap" - "sigs.k8s.io/kustomize/api/resource" - "sigs.k8s.io/kustomize/api/types" - "sigs.k8s.io/kustomize/kyaml/filesys" - "sigs.k8s.io/yaml" -) - -const ( - idAnnotation = "kustomize.config.k8s.io/id" - HashAnnotation = "kustomize.config.k8s.io/needs-hash" - BehaviorAnnotation = "kustomize.config.k8s.io/behavior" -) - -func GoBin() string { - return filepath.Join(runtime.GOROOT(), "bin", "go") -} - -// DeterminePluginSrcRoot guesses where the user -// has her ${g}/${v}/$lower(${k})/${k}.go files. -func DeterminePluginSrcRoot(fSys filesys.FileSystem) (string, error) { - return konfig.FirstDirThatExistsElseError( - "plugin src root", fSys, []konfig.NotedFunc{ - { - Note: "relative to unit test", - F: func() string { - return filepath.Clean( - filepath.Join( - os.Getenv("PWD"), - "..", "..", - konfig.RelPluginHome)) - }, - }, - { - Note: "relative to unit test (internal pkg)", - F: func() string { - return filepath.Clean( - filepath.Join( - os.Getenv("PWD"), - "..", "..", "..", "..", - konfig.RelPluginHome)) - }, - }, - { - Note: "relative to api package", - F: func() string { - return filepath.Clean( - filepath.Join( - os.Getenv("PWD"), - "..", "..", "..", - konfig.RelPluginHome)) - }, - }, - { - Note: "old style $GOPATH", - F: func() string { - return filepath.Join( - os.Getenv("GOPATH"), - "src", konfig.DomainName, - konfig.ProgramName, konfig.RelPluginHome) - }, - }, - { - Note: "HOME with literal 'gopath'", - F: func() string { - return filepath.Join( - konfig.HomeDir(), "gopath", - "src", konfig.DomainName, - konfig.ProgramName, konfig.RelPluginHome) - }, - }, - { - Note: "home directory", - F: func() string { - return filepath.Join( - konfig.HomeDir(), konfig.DomainName, - konfig.ProgramName, konfig.RelPluginHome) - }, - }, - }) -} - -// FileYoungerThan returns true if the file both exists and has an -// age is <= the Duration argument. -func FileYoungerThan(path string, d time.Duration) bool { - fi, err := os.Stat(path) - if err != nil { - if os.IsNotExist(err) { - return false - } - } - return time.Since(fi.ModTime()) <= d -} - -// FileModifiedAfter returns true if the file both exists and was -// modified after the given time.. -func FileModifiedAfter(path string, t time.Time) bool { - fi, err := os.Stat(path) - if err != nil { - if os.IsNotExist(err) { - return false - } - } - return fi.ModTime().After(t) -} - -func FileExists(path string) bool { - if _, err := os.Stat(path); err != nil { - if os.IsNotExist(err) { - return false - } - } - return true -} - -// GetResMapWithIDAnnotation returns a new copy of the given ResMap with the ResIds annotated in each Resource -func GetResMapWithIDAnnotation(rm resmap.ResMap) (resmap.ResMap, error) { - inputRM := rm.DeepCopy() - for _, r := range inputRM.Resources() { - idString, err := yaml.Marshal(r.CurId()) - if err != nil { - return nil, err - } - annotations := r.GetAnnotations() - annotations[idAnnotation] = string(idString) - if err = r.SetAnnotations(annotations); err != nil { - return nil, err - } - } - return inputRM, nil -} - -// UpdateResMapValues updates the Resource value in the given ResMap -// with the emitted Resource values in output. -func UpdateResMapValues(pluginName string, h *resmap.PluginHelpers, output []byte, rm resmap.ResMap) error { - mapFactory := h.ResmapFactory() - resFactory := mapFactory.RF() - resources, err := resFactory.SliceFromBytes(output) - if err != nil { - return err - } - // Don't use resources here, or error message will be unfriendly to plugin builders - newMap, err := mapFactory.NewResMapFromBytes([]byte{}) - if err != nil { - return err - } - - for _, r := range resources { - // stale--not manipulated by plugin transformers - if err = removeIDAnnotation(r); err != nil { - return err - } - - // Add to the new map, checking for duplicates - if err := newMap.Append(r); err != nil { - prettyID, err := json.Marshal(r.CurId()) - if err != nil { - prettyID = []byte(r.CurId().String()) - } - return fmt.Errorf("plugin %s generated duplicate resource: %s", pluginName, prettyID) - } - - // Add to or update the old map - oldIdx, err := rm.GetIndexOfCurrentId(r.CurId()) - if err != nil { - return err - } - if oldIdx != -1 { - rm.GetByIndex(oldIdx).ResetRNode(r) - } else { - if err := rm.Append(r); err != nil { - return err - } - } - } - - // Remove items the transformer deleted from the old map - for _, id := range rm.AllIds() { - newIdx, _ := newMap.GetIndexOfCurrentId(id) - if newIdx == -1 { - if err = rm.Remove(id); err != nil { - return err - } - } - } - - return nil -} - -func removeIDAnnotation(r *resource.Resource) error { - // remove the annotation set by Kustomize to track the resource - annotations := r.GetAnnotations() - delete(annotations, idAnnotation) - return r.SetAnnotations(annotations) -} - -// UpdateResourceOptions updates the generator options for each resource in the -// given ResMap based on plugin provided annotations. -func UpdateResourceOptions(rm resmap.ResMap) (resmap.ResMap, error) { - for _, r := range rm.Resources() { - // Disable name hashing by default and require plugin to explicitly - // request it for each resource. - annotations := r.GetAnnotations() - behavior := annotations[BehaviorAnnotation] - var needsHash bool - if val, ok := annotations[HashAnnotation]; ok { - b, err := strconv.ParseBool(val) - if err != nil { - return nil, fmt.Errorf( - "the annotation %q contains an invalid value (%q)", - HashAnnotation, val) - } - needsHash = b - } - delete(annotations, HashAnnotation) - delete(annotations, BehaviorAnnotation) - if err := r.SetAnnotations(annotations); err != nil { - return nil, err - } - if needsHash { - r.EnableHashSuffix() - } - r.SetBehavior(types.NewGenerationBehavior(behavior)) - } - return rm, nil -} diff --git a/src/vendor/sigs.k8s.io/kustomize/api/internal/target/errmissingkustomization.go b/src/vendor/sigs.k8s.io/kustomize/api/internal/target/errmissingkustomization.go deleted file mode 100644 index 45e489593..000000000 --- a/src/vendor/sigs.k8s.io/kustomize/api/internal/target/errmissingkustomization.go +++ /dev/null @@ -1,48 +0,0 @@ -// Copyright 2019 The Kubernetes Authors. -// SPDX-License-Identifier: Apache-2.0 - -package target - -import ( - "fmt" - "strings" - - "github.com/pkg/errors" - "sigs.k8s.io/kustomize/api/konfig" -) - -type errMissingKustomization struct { - path string -} - -func (e *errMissingKustomization) Error() string { - return fmt.Sprintf( - "unable to find one of %v in directory '%s'", - commaOr(quoted(konfig.RecognizedKustomizationFileNames())), - e.path) -} - -func IsMissingKustomizationFileError(err error) bool { - _, ok := err.(*errMissingKustomization) - if ok { - return true - } - _, ok = errors.Cause(err).(*errMissingKustomization) - return ok -} - -func NewErrMissingKustomization(p string) *errMissingKustomization { - return &errMissingKustomization{path: p} -} - -func quoted(l []string) []string { - r := make([]string, len(l)) - for i, v := range l { - r[i] = "'" + v + "'" - } - return r -} - -func commaOr(q []string) string { - return strings.Join(q[:len(q)-1], ", ") + " or " + q[len(q)-1] -} diff --git a/src/vendor/sigs.k8s.io/kustomize/api/internal/target/kusttarget.go b/src/vendor/sigs.k8s.io/kustomize/api/internal/target/kusttarget.go deleted file mode 100644 index 4bbf7e98a..000000000 --- a/src/vendor/sigs.k8s.io/kustomize/api/internal/target/kusttarget.go +++ /dev/null @@ -1,565 +0,0 @@ -// Copyright 2019 The Kubernetes Authors. -// SPDX-License-Identifier: Apache-2.0 - -package target - -import ( - "encoding/json" - "fmt" - "strings" - - "github.com/pkg/errors" - - "sigs.k8s.io/kustomize/api/ifc" - "sigs.k8s.io/kustomize/api/internal/accumulator" - "sigs.k8s.io/kustomize/api/internal/builtins" - "sigs.k8s.io/kustomize/api/internal/kusterr" - "sigs.k8s.io/kustomize/api/internal/plugins/builtinconfig" - "sigs.k8s.io/kustomize/api/internal/plugins/builtinhelpers" - "sigs.k8s.io/kustomize/api/internal/plugins/loader" - "sigs.k8s.io/kustomize/api/internal/utils" - "sigs.k8s.io/kustomize/api/konfig" - load "sigs.k8s.io/kustomize/api/loader" - "sigs.k8s.io/kustomize/api/resmap" - "sigs.k8s.io/kustomize/api/resource" - "sigs.k8s.io/kustomize/api/types" - "sigs.k8s.io/kustomize/kyaml/openapi" - "sigs.k8s.io/yaml" -) - -// KustTarget encapsulates the entirety of a kustomization build. -type KustTarget struct { - kustomization *types.Kustomization - kustFileName string - ldr ifc.Loader - validator ifc.Validator - rFactory *resmap.Factory - pLdr *loader.Loader - origin *resource.Origin -} - -// NewKustTarget returns a new instance of KustTarget. -func NewKustTarget( - ldr ifc.Loader, - validator ifc.Validator, - rFactory *resmap.Factory, - pLdr *loader.Loader) *KustTarget { - pLdrCopy := *pLdr - pLdrCopy.SetWorkDir(ldr.Root()) - return &KustTarget{ - ldr: ldr, - validator: validator, - rFactory: rFactory, - pLdr: &pLdrCopy, - } -} - -// Load attempts to load the target's kustomization file. -func (kt *KustTarget) Load() error { - content, kustFileName, err := loadKustFile(kt.ldr) - if err != nil { - return err - } - content, err = types.FixKustomizationPreUnmarshalling(content) - if err != nil { - return err - } - var k types.Kustomization - err = k.Unmarshal(content) - if err != nil { - return err - } - k.FixKustomizationPostUnmarshalling() - errs := k.EnforceFields() - if len(errs) > 0 { - return fmt.Errorf( - "Failed to read kustomization file under %s:\n"+ - strings.Join(errs, "\n"), kt.ldr.Root()) - } - kt.kustomization = &k - kt.kustFileName = kustFileName - return nil -} - -// Kustomization returns a copy of the immutable, internal kustomization object. -func (kt *KustTarget) Kustomization() types.Kustomization { - var result types.Kustomization - b, _ := json.Marshal(*kt.kustomization) - json.Unmarshal(b, &result) - return result -} - -func loadKustFile(ldr ifc.Loader) ([]byte, string, error) { - var content []byte - match := 0 - var kustFileName string - for _, kf := range konfig.RecognizedKustomizationFileNames() { - c, err := ldr.Load(kf) - if err == nil { - match += 1 - content = c - kustFileName = kf - } - } - switch match { - case 0: - return nil, "", NewErrMissingKustomization(ldr.Root()) - case 1: - return content, kustFileName, nil - default: - return nil, "", fmt.Errorf( - "Found multiple kustomization files under: %s\n", ldr.Root()) - } -} - -// MakeCustomizedResMap creates a fully customized ResMap -// per the instructions contained in its kustomization instance. -func (kt *KustTarget) MakeCustomizedResMap() (resmap.ResMap, error) { - return kt.makeCustomizedResMap() -} - -func (kt *KustTarget) makeCustomizedResMap() (resmap.ResMap, error) { - var origin *resource.Origin - if len(kt.kustomization.BuildMetadata) != 0 { - origin = &resource.Origin{} - } - kt.origin = origin - ra, err := kt.AccumulateTarget() - if err != nil { - return nil, err - } - - // The following steps must be done last, not as part of - // the recursion implicit in AccumulateTarget. - - err = kt.addHashesToNames(ra) - if err != nil { - return nil, err - } - - // Given that names have changed (prefixs/suffixes added), - // fix all the back references to those names. - err = ra.FixBackReferences() - if err != nil { - return nil, err - } - - // With all the back references fixed, it's OK to resolve Vars. - err = ra.ResolveVars() - if err != nil { - return nil, err - } - - return ra.ResMap(), nil -} - -func (kt *KustTarget) addHashesToNames( - ra *accumulator.ResAccumulator) error { - p := builtins.NewHashTransformerPlugin() - err := kt.configureBuiltinPlugin(p, nil, builtinhelpers.HashTransformer) - if err != nil { - return err - } - return ra.Transform(p) -} - -// AccumulateTarget returns a new ResAccumulator, -// holding customized resources and the data/rules used -// to do so. The name back references and vars are -// not yet fixed. -// The origin parameter is used through the recursive calls -// to annotate each resource with information about where -// the resource came from, e.g. the file and/or the repository -// it originated from. -// As an entrypoint, one can pass an empty resource.Origin object to -// AccumulateTarget. As AccumulateTarget moves recursively -// through kustomization directories, it updates `origin.path` -// accordingly. When a remote base is found, it updates `origin.repo` -// and `origin.ref` accordingly. -func (kt *KustTarget) AccumulateTarget() ( - ra *accumulator.ResAccumulator, err error) { - return kt.accumulateTarget(accumulator.MakeEmptyAccumulator()) -} - -// ra should be empty when this KustTarget is a Kustomization, or the ra of the parent if this KustTarget is a Component -// (or empty if the Component does not have a parent). -func (kt *KustTarget) accumulateTarget(ra *accumulator.ResAccumulator) ( - resRa *accumulator.ResAccumulator, err error) { - ra, err = kt.accumulateResources(ra, kt.kustomization.Resources) - if err != nil { - return nil, errors.Wrap(err, "accumulating resources") - } - ra, err = kt.accumulateComponents(ra, kt.kustomization.Components) - if err != nil { - return nil, errors.Wrap(err, "accumulating components") - } - tConfig, err := builtinconfig.MakeTransformerConfig( - kt.ldr, kt.kustomization.Configurations) - if err != nil { - return nil, err - } - err = ra.MergeConfig(tConfig) - if err != nil { - return nil, errors.Wrapf( - err, "merging config %v", tConfig) - } - crdTc, err := accumulator.LoadConfigFromCRDs(kt.ldr, kt.kustomization.Crds) - if err != nil { - return nil, errors.Wrapf( - err, "loading CRDs %v", kt.kustomization.Crds) - } - err = ra.MergeConfig(crdTc) - if err != nil { - return nil, errors.Wrapf( - err, "merging CRDs %v", crdTc) - } - err = kt.runGenerators(ra) - if err != nil { - return nil, err - } - err = kt.runTransformers(ra) - if err != nil { - return nil, err - } - err = kt.runValidators(ra) - if err != nil { - return nil, err - } - err = ra.MergeVars(kt.kustomization.Vars) - if err != nil { - return nil, errors.Wrapf( - err, "merging vars %v", kt.kustomization.Vars) - } - err = kt.IgnoreLocal(ra) - if err != nil { - return nil, err - } - return ra, nil -} - -// IgnoreLocal drops the local resource by checking the annotation "config.kubernetes.io/local-config". -func (kt *KustTarget) IgnoreLocal(ra *accumulator.ResAccumulator) error { - rf := kt.rFactory.RF() - if rf.IncludeLocalConfigs { - return nil - } - remainRes, err := rf.DropLocalNodes(ra.ResMap().ToRNodeSlice()) - if err != nil { - return err - } - return ra.Intersection(kt.rFactory.FromResourceSlice(remainRes)) -} - -func (kt *KustTarget) runGenerators( - ra *accumulator.ResAccumulator) error { - var generators []*resmap.GeneratorWithProperties - gs, err := kt.configureBuiltinGenerators() - if err != nil { - return err - } - generators = append(generators, gs...) - - gs, err = kt.configureExternalGenerators() - if err != nil { - return errors.Wrap(err, "loading generator plugins") - } - generators = append(generators, gs...) - for i, g := range generators { - resMap, err := g.Generate() - if err != nil { - return err - } - if resMap != nil { - err = resMap.AddOriginAnnotation(generators[i].Origin) - if err != nil { - return errors.Wrapf(err, "adding origin annotations for generator %v", g) - } - } - err = ra.AbsorbAll(resMap) - if err != nil { - return errors.Wrapf(err, "merging from generator %v", g) - } - } - return nil -} - -func (kt *KustTarget) configureExternalGenerators() ( - []*resmap.GeneratorWithProperties, error) { - ra := accumulator.MakeEmptyAccumulator() - var generatorPaths []string - for _, p := range kt.kustomization.Generators { - // handle inline generators - rm, err := kt.rFactory.NewResMapFromBytes([]byte(p)) - if err != nil { - // not an inline config - generatorPaths = append(generatorPaths, p) - continue - } - // inline config, track the origin - if kt.origin != nil { - resources := rm.Resources() - for _, r := range resources { - r.SetOrigin(kt.origin.Append(kt.kustFileName)) - rm.Replace(r) - } - } - if err = ra.AppendAll(rm); err != nil { - return nil, errors.Wrapf(err, "configuring external generator") - } - } - ra, err := kt.accumulateResources(ra, generatorPaths) - if err != nil { - return nil, err - } - return kt.pLdr.LoadGenerators(kt.ldr, kt.validator, ra.ResMap()) -} - -func (kt *KustTarget) runTransformers(ra *accumulator.ResAccumulator) error { - var r []*resmap.TransformerWithProperties - tConfig := ra.GetTransformerConfig() - lts, err := kt.configureBuiltinTransformers(tConfig) - if err != nil { - return err - } - r = append(r, lts...) - lts, err = kt.configureExternalTransformers(kt.kustomization.Transformers) - if err != nil { - return err - } - r = append(r, lts...) - return ra.Transform(newMultiTransformer(r)) -} - -func (kt *KustTarget) configureExternalTransformers(transformers []string) ([]*resmap.TransformerWithProperties, error) { - ra := accumulator.MakeEmptyAccumulator() - var transformerPaths []string - for _, p := range transformers { - // handle inline transformers - rm, err := kt.rFactory.NewResMapFromBytes([]byte(p)) - if err != nil { - // not an inline config - transformerPaths = append(transformerPaths, p) - continue - } - // inline config, track the origin - if kt.origin != nil { - resources := rm.Resources() - for _, r := range resources { - r.SetOrigin(kt.origin.Append(kt.kustFileName)) - rm.Replace(r) - } - } - - if err = ra.AppendAll(rm); err != nil { - return nil, errors.Wrapf(err, "configuring external transformer") - } - } - ra, err := kt.accumulateResources(ra, transformerPaths) - if err != nil { - return nil, err - } - return kt.pLdr.LoadTransformers(kt.ldr, kt.validator, ra.ResMap()) -} - -func (kt *KustTarget) runValidators(ra *accumulator.ResAccumulator) error { - validators, err := kt.configureExternalTransformers(kt.kustomization.Validators) - if err != nil { - return err - } - for _, v := range validators { - // Validators shouldn't modify the resource map - orignal := ra.ResMap().DeepCopy() - err = v.Transform(ra.ResMap()) - if err != nil { - return err - } - newMap := ra.ResMap().DeepCopy() - if err = kt.removeValidatedByLabel(newMap); err != nil { - return err - } - if err = orignal.ErrorIfNotEqualSets(newMap); err != nil { - return fmt.Errorf("validator shouldn't modify the resource map: %v", err) - } - } - return nil -} - -func (kt *KustTarget) removeValidatedByLabel(rm resmap.ResMap) error { - resources := rm.Resources() - for _, r := range resources { - labels := r.GetLabels() - if _, found := labels[konfig.ValidatedByLabelKey]; !found { - continue - } - delete(labels, konfig.ValidatedByLabelKey) - if err := r.SetLabels(labels); err != nil { - return err - } - } - return nil -} - -// accumulateResources fills the given resourceAccumulator -// with resources read from the given list of paths. -func (kt *KustTarget) accumulateResources( - ra *accumulator.ResAccumulator, paths []string) (*accumulator.ResAccumulator, error) { - for _, path := range paths { - // try loading resource as file then as base (directory or git repository) - if errF := kt.accumulateFile(ra, path); errF != nil { - // not much we can do if the error is an HTTP error so we bail out - if errors.Is(errF, load.ErrHTTP) { - return nil, errF - } - ldr, err := kt.ldr.New(path) - if err != nil { - if kusterr.IsMalformedYAMLError(errF) { // Some error occurred while tyring to decode YAML file - return nil, errF - } - return nil, errors.Wrapf( - err, "accumulation err='%s'", errF.Error()) - } - // store the origin, we'll need it later - origin := kt.origin.Copy() - if kt.origin != nil { - kt.origin = kt.origin.Append(path) - ra, err = kt.accumulateDirectory(ra, ldr, false) - // after we are done recursing through the directory, reset the origin - kt.origin = &origin - } else { - ra, err = kt.accumulateDirectory(ra, ldr, false) - } - if err != nil { - if kusterr.IsMalformedYAMLError(errF) { // Some error occurred while tyring to decode YAML file - return nil, errF - } - return nil, errors.Wrapf( - err, "accumulation err='%s'", errF.Error()) - } - } - } - return ra, nil -} - -// accumulateResources fills the given resourceAccumulator -// with resources read from the given list of paths. -func (kt *KustTarget) accumulateComponents( - ra *accumulator.ResAccumulator, paths []string) (*accumulator.ResAccumulator, error) { - for _, path := range paths { - // Components always refer to directories - ldr, errL := kt.ldr.New(path) - if errL != nil { - return nil, fmt.Errorf("loader.New %q", errL) - } - var errD error - // store the origin, we'll need it later - origin := kt.origin.Copy() - if kt.origin != nil { - kt.origin = kt.origin.Append(path) - ra, errD = kt.accumulateDirectory(ra, ldr, true) - // after we are done recursing through the directory, reset the origin - kt.origin = &origin - } else { - ra, errD = kt.accumulateDirectory(ra, ldr, true) - } - if errD != nil { - return nil, fmt.Errorf("accumulateDirectory: %q", errD) - } - } - return ra, nil -} - -func (kt *KustTarget) accumulateDirectory( - ra *accumulator.ResAccumulator, ldr ifc.Loader, isComponent bool) (*accumulator.ResAccumulator, error) { - defer ldr.Cleanup() - subKt := NewKustTarget(ldr, kt.validator, kt.rFactory, kt.pLdr) - err := subKt.Load() - if err != nil { - return nil, errors.Wrapf( - err, "couldn't make target for path '%s'", ldr.Root()) - } - subKt.kustomization.BuildMetadata = kt.kustomization.BuildMetadata - subKt.origin = kt.origin - var bytes []byte - if openApiPath, exists := subKt.Kustomization().OpenAPI["path"]; exists { - bytes, err = ldr.Load(openApiPath) - if err != nil { - return nil, err - } - } - err = openapi.SetSchema(subKt.Kustomization().OpenAPI, bytes, false) - if err != nil { - return nil, err - } - if isComponent && subKt.kustomization.Kind != types.ComponentKind { - return nil, fmt.Errorf( - "expected kind '%s' for path '%s' but got '%s'", types.ComponentKind, ldr.Root(), subKt.kustomization.Kind) - } else if !isComponent && subKt.kustomization.Kind == types.ComponentKind { - return nil, fmt.Errorf( - "expected kind != '%s' for path '%s'", types.ComponentKind, ldr.Root()) - } - - var subRa *accumulator.ResAccumulator - if isComponent { - // Components don't create a new accumulator: the kustomization directives are added to the current accumulator - subRa, err = subKt.accumulateTarget(ra) - ra = accumulator.MakeEmptyAccumulator() - } else { - // Child Kustomizations create a new accumulator which resolves their kustomization directives, which will later - // be merged into the current accumulator. - subRa, err = subKt.AccumulateTarget() - } - if err != nil { - return nil, errors.Wrapf( - err, "recursed accumulation of path '%s'", ldr.Root()) - } - err = ra.MergeAccumulator(subRa) - if err != nil { - return nil, errors.Wrapf( - err, "recursed merging from path '%s'", ldr.Root()) - } - return ra, nil -} - -func (kt *KustTarget) accumulateFile( - ra *accumulator.ResAccumulator, path string) error { - resources, err := kt.rFactory.FromFile(kt.ldr, path) - if err != nil { - return errors.Wrapf(err, "accumulating resources from '%s'", path) - } - if kt.origin != nil { - originAnno, err := kt.origin.Append(path).String() - if err != nil { - return errors.Wrapf(err, "cannot add path annotation for '%s'", path) - } - err = resources.AnnotateAll(utils.OriginAnnotationKey, originAnno) - if err != nil || originAnno == "" { - return errors.Wrapf(err, "cannot add path annotation for '%s'", path) - } - } - err = ra.AppendAll(resources) - if err != nil { - return errors.Wrapf(err, "merging resources from '%s'", path) - } - return nil -} - -func (kt *KustTarget) configureBuiltinPlugin( - p resmap.Configurable, c interface{}, bpt builtinhelpers.BuiltinPluginType) (err error) { - var y []byte - if c != nil { - y, err = yaml.Marshal(c) - if err != nil { - return errors.Wrapf( - err, "builtin %s marshal", bpt) - } - } - err = p.Config( - resmap.NewPluginHelpers( - kt.ldr, kt.validator, kt.rFactory, kt.pLdr.Config()), - y) - if err != nil { - return errors.Wrapf( - err, "trouble configuring builtin %s with config: `\n%s`", bpt, string(y)) - } - return nil -} diff --git a/src/vendor/sigs.k8s.io/kustomize/api/internal/target/kusttarget_configplugin.go b/src/vendor/sigs.k8s.io/kustomize/api/internal/target/kusttarget_configplugin.go deleted file mode 100644 index 0991c5fac..000000000 --- a/src/vendor/sigs.k8s.io/kustomize/api/internal/target/kusttarget_configplugin.go +++ /dev/null @@ -1,446 +0,0 @@ -// Copyright 2019 The Kubernetes Authors. -// SPDX-License-Identifier: Apache-2.0 - -package target - -import ( - "fmt" - "path/filepath" - - "github.com/pkg/errors" - "sigs.k8s.io/kustomize/api/internal/plugins/builtinconfig" - "sigs.k8s.io/kustomize/api/internal/plugins/builtinhelpers" - "sigs.k8s.io/kustomize/api/resmap" - "sigs.k8s.io/kustomize/api/resource" - "sigs.k8s.io/kustomize/api/types" - "sigs.k8s.io/kustomize/kyaml/yaml" -) - -// Functions dedicated to configuring the builtin -// transformer and generator plugins using config data -// read from a kustomization file and from the -// config.TransformerConfig, whose data may be a -// mix of hardcoded values and data read from file. -// -// Non-builtin plugins will get their configuration -// from their own dedicated structs and YAML files. -// -// There are some loops in the functions below because -// the kustomization file would, say, allow someone to -// request multiple secrets be made, or run multiple -// image tag transforms. In these cases, we'll need -// N plugin instances with differing configurations. - -func (kt *KustTarget) configureBuiltinGenerators() ( - result []*resmap.GeneratorWithProperties, err error) { - for _, bpt := range []builtinhelpers.BuiltinPluginType{ - builtinhelpers.ConfigMapGenerator, - builtinhelpers.SecretGenerator, - builtinhelpers.HelmChartInflationGenerator, - } { - r, err := generatorConfigurators[bpt]( - kt, bpt, builtinhelpers.GeneratorFactories[bpt]) - if err != nil { - return nil, err - } - - var generatorOrigin *resource.Origin - if kt.origin != nil { - generatorOrigin = &resource.Origin{ - Repo: kt.origin.Repo, - Ref: kt.origin.Ref, - ConfiguredIn: filepath.Join(kt.origin.Path, kt.kustFileName), - ConfiguredBy: yaml.ResourceIdentifier{ - TypeMeta: yaml.TypeMeta{ - APIVersion: "builtin", - Kind: bpt.String(), - }, - }, - } - } - - for i := range r { - result = append(result, &resmap.GeneratorWithProperties{Generator: r[i], Origin: generatorOrigin}) - } - } - return result, nil -} - -func (kt *KustTarget) configureBuiltinTransformers( - tc *builtinconfig.TransformerConfig) ( - result []*resmap.TransformerWithProperties, err error) { - for _, bpt := range []builtinhelpers.BuiltinPluginType{ - builtinhelpers.PatchStrategicMergeTransformer, - builtinhelpers.PatchTransformer, - builtinhelpers.NamespaceTransformer, - builtinhelpers.PrefixTransformer, - builtinhelpers.SuffixTransformer, - builtinhelpers.LabelTransformer, - builtinhelpers.AnnotationsTransformer, - builtinhelpers.PatchJson6902Transformer, - builtinhelpers.ReplicaCountTransformer, - builtinhelpers.ImageTagTransformer, - builtinhelpers.ReplacementTransformer, - } { - r, err := transformerConfigurators[bpt]( - kt, bpt, builtinhelpers.TransformerFactories[bpt], tc) - if err != nil { - return nil, err - } - var transformerOrigin *resource.Origin - if kt.origin != nil { - transformerOrigin = &resource.Origin{ - Repo: kt.origin.Repo, - Ref: kt.origin.Ref, - ConfiguredIn: filepath.Join(kt.origin.Path, kt.kustFileName), - ConfiguredBy: yaml.ResourceIdentifier{ - TypeMeta: yaml.TypeMeta{ - APIVersion: "builtin", - Kind: bpt.String(), - }, - }, - } - } - for i := range r { - result = append(result, &resmap.TransformerWithProperties{Transformer: r[i], Origin: transformerOrigin}) - } - } - return result, nil -} - -type gFactory func() resmap.GeneratorPlugin - -var generatorConfigurators = map[builtinhelpers.BuiltinPluginType]func( - kt *KustTarget, - bpt builtinhelpers.BuiltinPluginType, - factory gFactory) (result []resmap.Generator, err error){ - builtinhelpers.SecretGenerator: func(kt *KustTarget, bpt builtinhelpers.BuiltinPluginType, f gFactory) ( - result []resmap.Generator, err error) { - var c struct { - types.SecretArgs - } - for _, args := range kt.kustomization.SecretGenerator { - c.SecretArgs = args - c.SecretArgs.Options = types.MergeGlobalOptionsIntoLocal( - c.SecretArgs.Options, kt.kustomization.GeneratorOptions) - p := f() - err := kt.configureBuiltinPlugin(p, c, bpt) - if err != nil { - return nil, err - } - result = append(result, p) - } - return - }, - - builtinhelpers.ConfigMapGenerator: func(kt *KustTarget, bpt builtinhelpers.BuiltinPluginType, f gFactory) ( - result []resmap.Generator, err error) { - var c struct { - types.ConfigMapArgs - } - for _, args := range kt.kustomization.ConfigMapGenerator { - c.ConfigMapArgs = args - c.ConfigMapArgs.Options = types.MergeGlobalOptionsIntoLocal( - c.ConfigMapArgs.Options, kt.kustomization.GeneratorOptions) - p := f() - err := kt.configureBuiltinPlugin(p, c, bpt) - if err != nil { - return nil, err - } - result = append(result, p) - } - return - }, - - builtinhelpers.HelmChartInflationGenerator: func( - kt *KustTarget, bpt builtinhelpers.BuiltinPluginType, f gFactory) ( - result []resmap.Generator, err error) { - var c struct { - types.HelmGlobals - types.HelmChart - } - var globals types.HelmGlobals - if kt.kustomization.HelmGlobals != nil { - globals = *kt.kustomization.HelmGlobals - } - for _, chart := range kt.kustomization.HelmCharts { - c.HelmGlobals = globals - c.HelmChart = chart - p := f() - if err = kt.configureBuiltinPlugin(p, c, bpt); err != nil { - return nil, err - } - result = append(result, p) - } - return - }, -} - -type tFactory func() resmap.TransformerPlugin - -var transformerConfigurators = map[builtinhelpers.BuiltinPluginType]func( - kt *KustTarget, - bpt builtinhelpers.BuiltinPluginType, - f tFactory, - tc *builtinconfig.TransformerConfig) (result []resmap.Transformer, err error){ - builtinhelpers.NamespaceTransformer: func( - kt *KustTarget, bpt builtinhelpers.BuiltinPluginType, f tFactory, tc *builtinconfig.TransformerConfig) ( - result []resmap.Transformer, err error) { - if kt.kustomization.Namespace == "" { - return - } - var c struct { - types.ObjectMeta `json:"metadata,omitempty" yaml:"metadata,omitempty"` - FieldSpecs []types.FieldSpec - } - c.Namespace = kt.kustomization.Namespace - c.FieldSpecs = tc.NameSpace - p := f() - err = kt.configureBuiltinPlugin(p, c, bpt) - if err != nil { - return nil, err - } - result = append(result, p) - return - }, - - builtinhelpers.PatchJson6902Transformer: func( - kt *KustTarget, bpt builtinhelpers.BuiltinPluginType, f tFactory, _ *builtinconfig.TransformerConfig) ( - result []resmap.Transformer, err error) { - var c struct { - Target *types.Selector `json:"target,omitempty" yaml:"target,omitempty"` - Path string `json:"path,omitempty" yaml:"path,omitempty"` - JsonOp string `json:"jsonOp,omitempty" yaml:"jsonOp,omitempty"` - } - for _, args := range kt.kustomization.PatchesJson6902 { - c.Target = args.Target - c.Path = args.Path - c.JsonOp = args.Patch - p := f() - err = kt.configureBuiltinPlugin(p, c, bpt) - if err != nil { - return nil, err - } - result = append(result, p) - } - return - }, - builtinhelpers.PatchStrategicMergeTransformer: func( - kt *KustTarget, bpt builtinhelpers.BuiltinPluginType, f tFactory, _ *builtinconfig.TransformerConfig) ( - result []resmap.Transformer, err error) { - if len(kt.kustomization.PatchesStrategicMerge) == 0 { - return - } - var c struct { - Paths []types.PatchStrategicMerge `json:"paths,omitempty" yaml:"paths,omitempty"` - } - c.Paths = kt.kustomization.PatchesStrategicMerge - p := f() - err = kt.configureBuiltinPlugin(p, c, bpt) - if err != nil { - return nil, err - } - result = append(result, p) - return - }, - builtinhelpers.PatchTransformer: func( - kt *KustTarget, bpt builtinhelpers.BuiltinPluginType, f tFactory, _ *builtinconfig.TransformerConfig) ( - result []resmap.Transformer, err error) { - if len(kt.kustomization.Patches) == 0 { - return - } - var c struct { - Path string `json:"path,omitempty" yaml:"path,omitempty"` - Patch string `json:"patch,omitempty" yaml:"patch,omitempty"` - Target *types.Selector `json:"target,omitempty" yaml:"target,omitempty"` - Options map[string]bool `json:"options,omitempty" yaml:"options,omitempty"` - } - for _, pc := range kt.kustomization.Patches { - c.Target = pc.Target - c.Patch = pc.Patch - c.Path = pc.Path - c.Options = pc.Options - p := f() - err = kt.configureBuiltinPlugin(p, c, bpt) - if err != nil { - return nil, err - } - result = append(result, p) - } - return - }, - builtinhelpers.LabelTransformer: func( - kt *KustTarget, bpt builtinhelpers.BuiltinPluginType, f tFactory, tc *builtinconfig.TransformerConfig) ( - result []resmap.Transformer, err error) { - if len(kt.kustomization.Labels) == 0 && len(kt.kustomization.CommonLabels) == 0 { - return - } - for _, label := range kt.kustomization.Labels { - var c struct { - Labels map[string]string - FieldSpecs []types.FieldSpec - } - c.Labels = label.Pairs - fss := types.FsSlice(label.FieldSpecs) - // merge the custom fieldSpecs with the default - if label.IncludeSelectors { - fss, err = fss.MergeAll(tc.CommonLabels) - } else { - // merge spec/template/metadata fieldSpec if includeTemplate flag is true - if label.IncludeTemplates { - fss, err = fss.MergeOne(types.FieldSpec{Path: "spec/template/metadata/labels", CreateIfNotPresent: false}) - if err != nil { - return nil, errors.Wrap(err, "failed to merge template fieldSpec") - } - } - // only add to metadata by default - fss, err = fss.MergeOne(types.FieldSpec{Path: "metadata/labels", CreateIfNotPresent: true}) - } - if err != nil { - return nil, err - } - c.FieldSpecs = fss - p := f() - err = kt.configureBuiltinPlugin(p, c, bpt) - if err != nil { - return nil, err - } - result = append(result, p) - } - var c struct { - Labels map[string]string - FieldSpecs []types.FieldSpec - } - c.Labels = kt.kustomization.CommonLabels - c.FieldSpecs = tc.CommonLabels - p := f() - err = kt.configureBuiltinPlugin(p, c, bpt) - if err != nil { - return nil, err - } - result = append(result, p) - return - }, - builtinhelpers.AnnotationsTransformer: func( - kt *KustTarget, bpt builtinhelpers.BuiltinPluginType, f tFactory, tc *builtinconfig.TransformerConfig) ( - result []resmap.Transformer, err error) { - if len(kt.kustomization.CommonAnnotations) == 0 { - return - } - var c struct { - Annotations map[string]string - FieldSpecs []types.FieldSpec - } - c.Annotations = kt.kustomization.CommonAnnotations - c.FieldSpecs = tc.CommonAnnotations - p := f() - err = kt.configureBuiltinPlugin(p, c, bpt) - if err != nil { - return nil, err - } - result = append(result, p) - return - }, - builtinhelpers.PrefixTransformer: func( - kt *KustTarget, bpt builtinhelpers.BuiltinPluginType, f tFactory, tc *builtinconfig.TransformerConfig) ( - result []resmap.Transformer, err error) { - if kt.kustomization.NamePrefix == "" { - return - } - var c struct { - Prefix string `json:"prefix,omitempty" yaml:"prefix,omitempty"` - FieldSpecs []types.FieldSpec `json:"fieldSpecs,omitempty" yaml:"fieldSpecs,omitempty"` - } - c.Prefix = kt.kustomization.NamePrefix - c.FieldSpecs = tc.NamePrefix - p := f() - err = kt.configureBuiltinPlugin(p, c, bpt) - if err != nil { - return nil, err - } - result = append(result, p) - return - }, - builtinhelpers.SuffixTransformer: func( - kt *KustTarget, bpt builtinhelpers.BuiltinPluginType, f tFactory, tc *builtinconfig.TransformerConfig) ( - result []resmap.Transformer, err error) { - if kt.kustomization.NameSuffix == "" { - return - } - var c struct { - Suffix string `json:"suffix,omitempty" yaml:"suffix,omitempty"` - FieldSpecs []types.FieldSpec `json:"fieldSpecs,omitempty" yaml:"fieldSpecs,omitempty"` - } - c.Suffix = kt.kustomization.NameSuffix - c.FieldSpecs = tc.NameSuffix - p := f() - err = kt.configureBuiltinPlugin(p, c, bpt) - if err != nil { - return nil, err - } - result = append(result, p) - return - }, - builtinhelpers.ImageTagTransformer: func( - kt *KustTarget, bpt builtinhelpers.BuiltinPluginType, f tFactory, tc *builtinconfig.TransformerConfig) ( - result []resmap.Transformer, err error) { - var c struct { - ImageTag types.Image - FieldSpecs []types.FieldSpec - } - for _, args := range kt.kustomization.Images { - c.ImageTag = args - c.FieldSpecs = tc.Images - p := f() - err = kt.configureBuiltinPlugin(p, c, bpt) - if err != nil { - return nil, err - } - result = append(result, p) - } - return - }, - builtinhelpers.ReplacementTransformer: func( - kt *KustTarget, bpt builtinhelpers.BuiltinPluginType, f tFactory, _ *builtinconfig.TransformerConfig) ( - result []resmap.Transformer, err error) { - if len(kt.kustomization.Replacements) == 0 { - return - } - var c struct { - Replacements []types.ReplacementField - } - c.Replacements = kt.kustomization.Replacements - p := f() - err = kt.configureBuiltinPlugin(p, c, bpt) - if err != nil { - return nil, err - } - result = append(result, p) - return result, nil - }, - builtinhelpers.ReplicaCountTransformer: func( - kt *KustTarget, bpt builtinhelpers.BuiltinPluginType, f tFactory, tc *builtinconfig.TransformerConfig) ( - result []resmap.Transformer, err error) { - var c struct { - Replica types.Replica - FieldSpecs []types.FieldSpec - } - for _, args := range kt.kustomization.Replicas { - c.Replica = args - c.FieldSpecs = tc.Replicas - p := f() - err = kt.configureBuiltinPlugin(p, c, bpt) - if err != nil { - return nil, err - } - result = append(result, p) - } - return - }, - // No kustomization file keyword for this yet. - builtinhelpers.ValueAddTransformer: func( - kt *KustTarget, bpt builtinhelpers.BuiltinPluginType, f tFactory, tc *builtinconfig.TransformerConfig) ( - result []resmap.Transformer, err error) { - return nil, fmt.Errorf("valueadd keyword not yet defined") - }, -} diff --git a/src/vendor/sigs.k8s.io/kustomize/api/internal/target/multitransformer.go b/src/vendor/sigs.k8s.io/kustomize/api/internal/target/multitransformer.go deleted file mode 100644 index 3bc0a8715..000000000 --- a/src/vendor/sigs.k8s.io/kustomize/api/internal/target/multitransformer.go +++ /dev/null @@ -1,41 +0,0 @@ -// Copyright 2019 The Kubernetes Authors. -// SPDX-License-Identifier: Apache-2.0 - -package target - -import ( - "sigs.k8s.io/kustomize/api/resmap" -) - -// multiTransformer contains a list of transformers. -type multiTransformer struct { - transformers []*resmap.TransformerWithProperties -} - -var _ resmap.Transformer = &multiTransformer{} - -// newMultiTransformer constructs a multiTransformer. -func newMultiTransformer(t []*resmap.TransformerWithProperties) resmap.Transformer { - r := &multiTransformer{ - transformers: make([]*resmap.TransformerWithProperties, len(t)), - } - copy(r.transformers, t) - return r -} - -// Transform applies the member transformers in order to the resources, -// optionally detecting and erroring on commutation conflict. -func (o *multiTransformer) Transform(m resmap.ResMap) error { - for _, t := range o.transformers { - if err := t.Transform(m); err != nil { - return err - } - if t.Origin != nil { - if err := m.AddTransformerAnnotation(t.Origin); err != nil { - return err - } - } - m.DropEmpties() - } - return nil -} diff --git a/src/vendor/sigs.k8s.io/kustomize/api/internal/utils/annotations.go b/src/vendor/sigs.k8s.io/kustomize/api/internal/utils/annotations.go deleted file mode 100644 index 8a2d5bb4b..000000000 --- a/src/vendor/sigs.k8s.io/kustomize/api/internal/utils/annotations.go +++ /dev/null @@ -1,29 +0,0 @@ -// Copyright 2022 The Kubernetes Authors. -// SPDX-License-Identifier: Apache-2.0 - -package utils - -import "sigs.k8s.io/kustomize/api/konfig" - -const ( - // build annotations - BuildAnnotationPreviousKinds = konfig.ConfigAnnoDomain + "/previousKinds" - BuildAnnotationPreviousNames = konfig.ConfigAnnoDomain + "/previousNames" - BuildAnnotationPrefixes = konfig.ConfigAnnoDomain + "/prefixes" - BuildAnnotationSuffixes = konfig.ConfigAnnoDomain + "/suffixes" - BuildAnnotationPreviousNamespaces = konfig.ConfigAnnoDomain + "/previousNamespaces" - BuildAnnotationsRefBy = konfig.ConfigAnnoDomain + "/refBy" - BuildAnnotationsGenBehavior = konfig.ConfigAnnoDomain + "/generatorBehavior" - BuildAnnotationsGenAddHashSuffix = konfig.ConfigAnnoDomain + "/needsHashSuffix" - - // the following are only for patches, to specify whether they can change names - // and kinds of their targets - BuildAnnotationAllowNameChange = konfig.ConfigAnnoDomain + "/allowNameChange" - BuildAnnotationAllowKindChange = konfig.ConfigAnnoDomain + "/allowKindChange" - - // for keeping track of origin and transformer data - OriginAnnotationKey = "config.kubernetes.io/origin" - TransformerAnnotationKey = "alpha.config.kubernetes.io/transformations" - - Enabled = "enabled" -) diff --git a/src/vendor/sigs.k8s.io/kustomize/api/internal/utils/errtimeout.go b/src/vendor/sigs.k8s.io/kustomize/api/internal/utils/errtimeout.go deleted file mode 100644 index 24b8abe66..000000000 --- a/src/vendor/sigs.k8s.io/kustomize/api/internal/utils/errtimeout.go +++ /dev/null @@ -1,36 +0,0 @@ -// Copyright 2020 The Kubernetes Authors. -// SPDX-License-Identifier: Apache-2.0 - -package utils - -import ( - "fmt" - "time" - - "github.com/pkg/errors" -) - -type errTimeOut struct { - duration time.Duration - cmd string -} - -func NewErrTimeOut(d time.Duration, c string) errTimeOut { - return errTimeOut{duration: d, cmd: c} -} - -func (e errTimeOut) Error() string { - return fmt.Sprintf("hit %s timeout running '%s'", e.duration, e.cmd) -} - -func IsErrTimeout(err error) bool { - if err == nil { - return false - } - _, ok := err.(errTimeOut) - if ok { - return true - } - _, ok = errors.Cause(err).(errTimeOut) - return ok -} diff --git a/src/vendor/sigs.k8s.io/kustomize/api/internal/utils/makeResIds.go b/src/vendor/sigs.k8s.io/kustomize/api/internal/utils/makeResIds.go deleted file mode 100644 index 61fbe1314..000000000 --- a/src/vendor/sigs.k8s.io/kustomize/api/internal/utils/makeResIds.go +++ /dev/null @@ -1,67 +0,0 @@ -// Copyright 2022 The Kubernetes Authors. -// SPDX-License-Identifier: Apache-2.0 - -package utils - -import ( - "fmt" - "strings" - - "sigs.k8s.io/kustomize/kyaml/resid" - "sigs.k8s.io/kustomize/kyaml/yaml" -) - -// MakeResIds returns all of an RNode's current and previous Ids -func MakeResIds(n *yaml.RNode) ([]resid.ResId, error) { - var result []resid.ResId - apiVersion := n.Field(yaml.APIVersionField) - var group, version string - if apiVersion != nil { - group, version = resid.ParseGroupVersion(yaml.GetValue(apiVersion.Value)) - } - result = append(result, resid.NewResIdWithNamespace( - resid.Gvk{Group: group, Version: version, Kind: n.GetKind()}, n.GetName(), n.GetNamespace()), - ) - prevIds, err := PrevIds(n) - if err != nil { - return nil, err - } - result = append(result, prevIds...) - return result, nil -} - -// PrevIds returns all of an RNode's previous Ids -func PrevIds(n *yaml.RNode) ([]resid.ResId, error) { - var ids []resid.ResId - // TODO: merge previous names and namespaces into one list of - // pairs on one annotation so there is no chance of error - annotations := n.GetAnnotations() - if _, ok := annotations[BuildAnnotationPreviousNames]; !ok { - return nil, nil - } - names := strings.Split(annotations[BuildAnnotationPreviousNames], ",") - ns := strings.Split(annotations[BuildAnnotationPreviousNamespaces], ",") - kinds := strings.Split(annotations[BuildAnnotationPreviousKinds], ",") - // This should never happen - if len(names) != len(ns) || len(names) != len(kinds) { - return nil, fmt.Errorf( - "number of previous names, " + - "number of previous namespaces, " + - "number of previous kinds not equal") - } - for i := range names { - meta, err := n.GetMeta() - if err != nil { - return nil, err - } - group, version := resid.ParseGroupVersion(meta.APIVersion) - gvk := resid.Gvk{ - Group: group, - Version: version, - Kind: kinds[i], - } - ids = append(ids, resid.NewResIdWithNamespace( - gvk, names[i], ns[i])) - } - return ids, nil -} diff --git a/src/vendor/sigs.k8s.io/kustomize/api/internal/utils/stringslice.go b/src/vendor/sigs.k8s.io/kustomize/api/internal/utils/stringslice.go deleted file mode 100644 index 3dc422725..000000000 --- a/src/vendor/sigs.k8s.io/kustomize/api/internal/utils/stringslice.go +++ /dev/null @@ -1,44 +0,0 @@ -// Copyright 2020 The Kubernetes Authors. -// SPDX-License-Identifier: Apache-2.0 - -package utils - -// StringSliceIndex returns the index of the str, else -1. -func StringSliceIndex(slice []string, str string) int { - for i := range slice { - if slice[i] == str { - return i - } - } - return -1 -} - -// StringSliceContains returns true if the slice has the string. -func StringSliceContains(slice []string, str string) bool { - for _, s := range slice { - if s == str { - return true - } - } - return false -} - -// SameEndingSubSlice returns true if the slices end the same way, e.g. -// {"a", "b", "c"}, {"b", "c"} => true -// {"a", "b", "c"}, {"a", "b"} => false -// If one slice is empty and the other is not, return false. -func SameEndingSubSlice(shortest, longest []string) bool { - if len(shortest) > len(longest) { - longest, shortest = shortest, longest - } - diff := len(longest) - len(shortest) - if len(shortest) == 0 { - return diff == 0 - } - for i := len(shortest) - 1; i >= 0; i-- { - if longest[i+diff] != shortest[i] { - return false - } - } - return true -} diff --git a/src/vendor/sigs.k8s.io/kustomize/api/internal/utils/timedcall.go b/src/vendor/sigs.k8s.io/kustomize/api/internal/utils/timedcall.go deleted file mode 100644 index 0afadd0c3..000000000 --- a/src/vendor/sigs.k8s.io/kustomize/api/internal/utils/timedcall.go +++ /dev/null @@ -1,23 +0,0 @@ -// Copyright 2020 The Kubernetes Authors. -// SPDX-License-Identifier: Apache-2.0 - -package utils - -import ( - "time" -) - -// TimedCall runs fn, failing if it doesn't complete in the given duration. -// The description is used in the timeout error message. -func TimedCall(description string, d time.Duration, fn func() error) error { - done := make(chan error) - timer := time.NewTimer(d) - defer timer.Stop() - go func() { done <- fn() }() - select { - case err := <-done: - return err - case <-timer.C: - return NewErrTimeOut(d, description) - } -} diff --git a/src/vendor/sigs.k8s.io/kustomize/api/internal/validate/fieldvalidator.go b/src/vendor/sigs.k8s.io/kustomize/api/internal/validate/fieldvalidator.go deleted file mode 100644 index 5ccfc3ce7..000000000 --- a/src/vendor/sigs.k8s.io/kustomize/api/internal/validate/fieldvalidator.go +++ /dev/null @@ -1,68 +0,0 @@ -// Copyright 2020 The Kubernetes Authors. -// SPDX-License-Identifier: Apache-2.0 - -package validate - -import ( - "sigs.k8s.io/kustomize/api/ifc" -) - -// FieldValidator implements ifc.Validator to check -// the values of various KRM string fields, -// e.g. labels, annotations, names, namespaces. -// -// TODO: Have this use kyaml/yaml/internal/k8sgen/pkg/labels -// which has label and annotation validation code, but is internal -// so this impl would need to move to kyaml (a fine idea). -type FieldValidator struct { -} - -var _ ifc.Validator = (*FieldValidator)(nil) - -func NewFieldValidator() *FieldValidator { - return &FieldValidator{} -} - -// TODO(#FieldValidator): implement MakeAnnotationValidator -func (f FieldValidator) MakeAnnotationValidator() func(map[string]string) error { - return func(x map[string]string) error { - return nil - } -} - -// TODO(#FieldValidator): implement MakeAnnotationNameValidator -func (f FieldValidator) MakeAnnotationNameValidator() func([]string) error { - return func(x []string) error { - return nil - } -} - -// TODO(#FieldValidator): implement MakeLabelValidator -func (f FieldValidator) MakeLabelValidator() func(map[string]string) error { - return func(x map[string]string) error { - return nil - } -} - -// TODO(#FieldValidator): implement MakeLabelNameValidator -func (f FieldValidator) MakeLabelNameValidator() func([]string) error { - return func(x []string) error { - return nil - } -} - -// TODO(#FieldValidator): implement ValidateNamespace -func (f FieldValidator) ValidateNamespace(s string) []string { - var errs []string - return errs -} - -// TODO(#FieldValidator): implement ErrIfInvalidKey -func (f FieldValidator) ErrIfInvalidKey(s string) error { - return nil -} - -// TODO(#FieldValidator): implement IsEnvVarName -func (f FieldValidator) IsEnvVarName(k string) error { - return nil -} diff --git a/src/vendor/sigs.k8s.io/kustomize/api/konfig/builtinpluginconsts/commonannotations.go b/src/vendor/sigs.k8s.io/kustomize/api/konfig/builtinpluginconsts/commonannotations.go deleted file mode 100644 index 97c1d6b22..000000000 --- a/src/vendor/sigs.k8s.io/kustomize/api/konfig/builtinpluginconsts/commonannotations.go +++ /dev/null @@ -1,47 +0,0 @@ -// Copyright 2019 The Kubernetes Authors. -// SPDX-License-Identifier: Apache-2.0 - -package builtinpluginconsts - -const commonAnnotationFieldSpecs = ` -commonAnnotations: -- path: metadata/annotations - create: true - -- path: spec/template/metadata/annotations - create: true - version: v1 - kind: ReplicationController - -- path: spec/template/metadata/annotations - create: true - kind: Deployment - -- path: spec/template/metadata/annotations - create: true - kind: ReplicaSet - -- path: spec/template/metadata/annotations - create: true - kind: DaemonSet - -- path: spec/template/metadata/annotations - create: true - kind: StatefulSet - -- path: spec/template/metadata/annotations - create: true - group: batch - kind: Job - -- path: spec/jobTemplate/metadata/annotations - create: true - group: batch - kind: CronJob - -- path: spec/jobTemplate/spec/template/metadata/annotations - create: true - group: batch - kind: CronJob - -` diff --git a/src/vendor/sigs.k8s.io/kustomize/api/konfig/builtinpluginconsts/commonlabels.go b/src/vendor/sigs.k8s.io/kustomize/api/konfig/builtinpluginconsts/commonlabels.go deleted file mode 100644 index 7775a544f..000000000 --- a/src/vendor/sigs.k8s.io/kustomize/api/konfig/builtinpluginconsts/commonlabels.go +++ /dev/null @@ -1,159 +0,0 @@ -// Copyright 2019 The Kubernetes Authors. -// SPDX-License-Identifier: Apache-2.0 - -package builtinpluginconsts - -const commonLabelFieldSpecs = ` -commonLabels: -- path: metadata/labels - create: true - -- path: spec/selector - create: true - version: v1 - kind: Service - -- path: spec/selector - create: true - version: v1 - kind: ReplicationController - -- path: spec/template/metadata/labels - create: true - version: v1 - kind: ReplicationController - -- path: spec/selector/matchLabels - create: true - kind: Deployment - -- path: spec/template/metadata/labels - create: true - kind: Deployment - -- path: spec/template/spec/affinity/podAffinity/preferredDuringSchedulingIgnoredDuringExecution/podAffinityTerm/labelSelector/matchLabels - create: false - group: apps - kind: Deployment - -- path: spec/template/spec/affinity/podAffinity/requiredDuringSchedulingIgnoredDuringExecution/labelSelector/matchLabels - create: false - group: apps - kind: Deployment - -- path: spec/template/spec/affinity/podAntiAffinity/preferredDuringSchedulingIgnoredDuringExecution/podAffinityTerm/labelSelector/matchLabels - create: false - group: apps - kind: Deployment - -- path: spec/template/spec/affinity/podAntiAffinity/requiredDuringSchedulingIgnoredDuringExecution/labelSelector/matchLabels - create: false - group: apps - kind: Deployment - -- path: spec/template/spec/topologySpreadConstraints/labelSelector/matchLabels - create: false - group: apps - kind: Deployment - -- path: spec/selector/matchLabels - create: true - kind: ReplicaSet - -- path: spec/template/metadata/labels - create: true - kind: ReplicaSet - -- path: spec/selector/matchLabels - create: true - kind: DaemonSet - -- path: spec/template/metadata/labels - create: true - kind: DaemonSet - -- path: spec/selector/matchLabels - create: true - group: apps - kind: StatefulSet - -- path: spec/template/metadata/labels - create: true - group: apps - kind: StatefulSet - -- path: spec/template/spec/affinity/podAffinity/preferredDuringSchedulingIgnoredDuringExecution/podAffinityTerm/labelSelector/matchLabels - create: false - group: apps - kind: StatefulSet - -- path: spec/template/spec/affinity/podAffinity/requiredDuringSchedulingIgnoredDuringExecution/labelSelector/matchLabels - create: false - group: apps - kind: StatefulSet - -- path: spec/template/spec/affinity/podAntiAffinity/preferredDuringSchedulingIgnoredDuringExecution/podAffinityTerm/labelSelector/matchLabels - create: false - group: apps - kind: StatefulSet - -- path: spec/template/spec/affinity/podAntiAffinity/requiredDuringSchedulingIgnoredDuringExecution/labelSelector/matchLabels - create: false - group: apps - kind: StatefulSet - -- path: spec/template/spec/topologySpreadConstraints/labelSelector/matchLabels - create: false - group: apps - kind: StatefulSet - -- path: spec/volumeClaimTemplates[]/metadata/labels - create: true - group: apps - kind: StatefulSet - -- path: spec/selector/matchLabels - create: false - group: batch - kind: Job - -- path: spec/template/metadata/labels - create: true - group: batch - kind: Job - -- path: spec/jobTemplate/spec/selector/matchLabels - create: false - group: batch - kind: CronJob - -- path: spec/jobTemplate/metadata/labels - create: true - group: batch - kind: CronJob - -- path: spec/jobTemplate/spec/template/metadata/labels - create: true - group: batch - kind: CronJob - -- path: spec/selector/matchLabels - create: false - group: policy - kind: PodDisruptionBudget - -- path: spec/podSelector/matchLabels - create: false - group: networking.k8s.io - kind: NetworkPolicy - -- path: spec/ingress/from/podSelector/matchLabels - create: false - group: networking.k8s.io - kind: NetworkPolicy - -- path: spec/egress/to/podSelector/matchLabels - create: false - group: networking.k8s.io - kind: NetworkPolicy -` diff --git a/src/vendor/sigs.k8s.io/kustomize/api/konfig/builtinpluginconsts/defaultconfig.go b/src/vendor/sigs.k8s.io/kustomize/api/konfig/builtinpluginconsts/defaultconfig.go deleted file mode 100644 index 29673d76a..000000000 --- a/src/vendor/sigs.k8s.io/kustomize/api/konfig/builtinpluginconsts/defaultconfig.go +++ /dev/null @@ -1,40 +0,0 @@ -// Copyright 2019 The Kubernetes Authors. -// SPDX-License-Identifier: Apache-2.0 - -package builtinpluginconsts - -import ( - "bytes" -) - -// GetDefaultFieldSpecs returns default fieldSpecs. -func GetDefaultFieldSpecs() []byte { - configData := [][]byte{ - []byte(namePrefixFieldSpecs), - []byte(nameSuffixFieldSpecs), - []byte(commonLabelFieldSpecs), - []byte(commonAnnotationFieldSpecs), - []byte(namespaceFieldSpecs), - []byte(varReferenceFieldSpecs), - []byte(nameReferenceFieldSpecs), - []byte(imagesFieldSpecs), - []byte(replicasFieldSpecs), - } - return bytes.Join(configData, []byte("\n")) -} - -// GetDefaultFieldSpecsAsMap returns default fieldSpecs -// as a string->string map. -func GetDefaultFieldSpecsAsMap() map[string]string { - result := make(map[string]string) - result["nameprefix"] = namePrefixFieldSpecs - result["namesuffix"] = nameSuffixFieldSpecs - result["commonlabels"] = commonLabelFieldSpecs - result["commonannotations"] = commonAnnotationFieldSpecs - result["namespace"] = namespaceFieldSpecs - result["varreference"] = varReferenceFieldSpecs - result["namereference"] = nameReferenceFieldSpecs - result["images"] = imagesFieldSpecs - result["replicas"] = replicasFieldSpecs - return result -} diff --git a/src/vendor/sigs.k8s.io/kustomize/api/konfig/builtinpluginconsts/doc.go b/src/vendor/sigs.k8s.io/kustomize/api/konfig/builtinpluginconsts/doc.go deleted file mode 100644 index 4b7b5faac..000000000 --- a/src/vendor/sigs.k8s.io/kustomize/api/konfig/builtinpluginconsts/doc.go +++ /dev/null @@ -1,8 +0,0 @@ -// Copyright 2019 The Kubernetes Authors. -// SPDX-License-Identifier: Apache-2.0 - -// Package builtinpluginconsts provides builtin plugin -// configuration data. Builtin plugins can also be -// configured individually with plugin config files, -// in which case the constants in this package are ignored. -package builtinpluginconsts diff --git a/src/vendor/sigs.k8s.io/kustomize/api/konfig/builtinpluginconsts/images.go b/src/vendor/sigs.k8s.io/kustomize/api/konfig/builtinpluginconsts/images.go deleted file mode 100644 index b8d8bf1e3..000000000 --- a/src/vendor/sigs.k8s.io/kustomize/api/konfig/builtinpluginconsts/images.go +++ /dev/null @@ -1,18 +0,0 @@ -// Copyright 2019 The Kubernetes Authors. -// SPDX-License-Identifier: Apache-2.0 - -package builtinpluginconsts - -const ( - imagesFieldSpecs = ` -images: -- path: spec/containers[]/image - create: true -- path: spec/initContainers[]/image - create: true -- path: spec/template/spec/containers[]/image - create: true -- path: spec/template/spec/initContainers[]/image - create: true -` -) diff --git a/src/vendor/sigs.k8s.io/kustomize/api/konfig/builtinpluginconsts/nameprefix.go b/src/vendor/sigs.k8s.io/kustomize/api/konfig/builtinpluginconsts/nameprefix.go deleted file mode 100644 index 59a25a61f..000000000 --- a/src/vendor/sigs.k8s.io/kustomize/api/konfig/builtinpluginconsts/nameprefix.go +++ /dev/null @@ -1,11 +0,0 @@ -// Copyright 2019 The Kubernetes Authors. -// SPDX-License-Identifier: Apache-2.0 - -package builtinpluginconsts - -const ( - namePrefixFieldSpecs = ` -namePrefix: -- path: metadata/name -` -) diff --git a/src/vendor/sigs.k8s.io/kustomize/api/konfig/builtinpluginconsts/namereference.go b/src/vendor/sigs.k8s.io/kustomize/api/konfig/builtinpluginconsts/namereference.go deleted file mode 100644 index 658b0c34b..000000000 --- a/src/vendor/sigs.k8s.io/kustomize/api/konfig/builtinpluginconsts/namereference.go +++ /dev/null @@ -1,427 +0,0 @@ -// Copyright 2019 The Kubernetes Authors. -// SPDX-License-Identifier: Apache-2.0 - -package builtinpluginconsts - -// LINT.IfChange -const ( - nameReferenceFieldSpecs = ` -nameReference: -- kind: Deployment - fieldSpecs: - - path: spec/scaleTargetRef/name - kind: HorizontalPodAutoscaler - -- kind: ReplicationController - fieldSpecs: - - path: spec/scaleTargetRef/name - kind: HorizontalPodAutoscaler - -- kind: ReplicaSet - fieldSpecs: - - path: spec/scaleTargetRef/name - kind: HorizontalPodAutoscaler - -- kind: StatefulSet - fieldSpecs: - - path: spec/scaleTargetRef/name - kind: HorizontalPodAutoscaler - -- kind: ConfigMap - version: v1 - fieldSpecs: - - path: spec/volumes/configMap/name - version: v1 - kind: Pod - - path: spec/containers/env/valueFrom/configMapKeyRef/name - version: v1 - kind: Pod - - path: spec/initContainers/env/valueFrom/configMapKeyRef/name - version: v1 - kind: Pod - - path: spec/containers/envFrom/configMapRef/name - version: v1 - kind: Pod - - path: spec/initContainers/envFrom/configMapRef/name - version: v1 - kind: Pod - - path: spec/volumes/projected/sources/configMap/name - version: v1 - kind: Pod - - path: template/spec/volumes/configMap/name - kind: PodTemplate - - path: template/spec/containers/env/valueFrom/configMapKeyRef/name - kind: PodTemplate - - path: template/spec/initContainers/env/valueFrom/configMapKeyRef/name - kind: PodTemplate - - path: template/spec/containers/envFrom/configMapRef/name - kind: PodTemplate - - path: template/spec/initContainers/envFrom/configMapRef/name - kind: PodTemplate - - path: template/spec/volumes/projected/sources/configMap/name - kind: PodTemplate - - path: spec/template/spec/volumes/configMap/name - kind: Deployment - - path: spec/template/spec/containers/env/valueFrom/configMapKeyRef/name - kind: Deployment - - path: spec/template/spec/initContainers/env/valueFrom/configMapKeyRef/name - kind: Deployment - - path: spec/template/spec/containers/envFrom/configMapRef/name - kind: Deployment - - path: spec/template/spec/initContainers/envFrom/configMapRef/name - kind: Deployment - - path: spec/template/spec/volumes/projected/sources/configMap/name - kind: Deployment - - path: spec/template/spec/volumes/configMap/name - kind: ReplicaSet - - path: spec/template/spec/containers/env/valueFrom/configMapKeyRef/name - kind: ReplicaSet - - path: spec/template/spec/initContainers/env/valueFrom/configMapKeyRef/name - kind: ReplicaSet - - path: spec/template/spec/containers/envFrom/configMapRef/name - kind: ReplicaSet - - path: spec/template/spec/initContainers/envFrom/configMapRef/name - kind: ReplicaSet - - path: spec/template/spec/volumes/projected/sources/configMap/name - kind: ReplicaSet - - path: spec/template/spec/volumes/configMap/name - kind: DaemonSet - - path: spec/template/spec/containers/env/valueFrom/configMapKeyRef/name - kind: DaemonSet - - path: spec/template/spec/initContainers/env/valueFrom/configMapKeyRef/name - kind: DaemonSet - - path: spec/template/spec/containers/envFrom/configMapRef/name - kind: DaemonSet - - path: spec/template/spec/initContainers/envFrom/configMapRef/name - kind: DaemonSet - - path: spec/template/spec/volumes/projected/sources/configMap/name - kind: DaemonSet - - path: spec/template/spec/volumes/configMap/name - kind: StatefulSet - - path: spec/template/spec/containers/env/valueFrom/configMapKeyRef/name - kind: StatefulSet - - path: spec/template/spec/initContainers/env/valueFrom/configMapKeyRef/name - kind: StatefulSet - - path: spec/template/spec/containers/envFrom/configMapRef/name - kind: StatefulSet - - path: spec/template/spec/initContainers/envFrom/configMapRef/name - kind: StatefulSet - - path: spec/template/spec/volumes/projected/sources/configMap/name - kind: StatefulSet - - path: spec/template/spec/volumes/configMap/name - kind: Job - - path: spec/template/spec/containers/env/valueFrom/configMapKeyRef/name - kind: Job - - path: spec/template/spec/initContainers/env/valueFrom/configMapKeyRef/name - kind: Job - - path: spec/template/spec/containers/envFrom/configMapRef/name - kind: Job - - path: spec/template/spec/initContainers/envFrom/configMapRef/name - kind: Job - - path: spec/template/spec/volumes/projected/sources/configMap/name - kind: Job - - path: spec/jobTemplate/spec/template/spec/volumes/configMap/name - kind: CronJob - - path: spec/jobTemplate/spec/template/spec/volumes/projected/sources/configMap/name - kind: CronJob - - path: spec/jobTemplate/spec/template/spec/containers/env/valueFrom/configMapKeyRef/name - kind: CronJob - - path: spec/jobTemplate/spec/template/spec/initContainers/env/valueFrom/configMapKeyRef/name - kind: CronJob - - path: spec/jobTemplate/spec/template/spec/containers/envFrom/configMapRef/name - kind: CronJob - - path: spec/jobTemplate/spec/template/spec/initContainers/envFrom/configMapRef/name - kind: CronJob - - path: spec/configSource/configMap - kind: Node - - path: rules/resourceNames - kind: Role - - path: rules/resourceNames - kind: ClusterRole - - path: metadata/annotations/nginx.ingress.kubernetes.io\/fastcgi-params-configmap - kind: Ingress - -- kind: Secret - version: v1 - fieldSpecs: - - path: spec/volumes/secret/secretName - version: v1 - kind: Pod - - path: spec/containers/env/valueFrom/secretKeyRef/name - version: v1 - kind: Pod - - path: spec/initContainers/env/valueFrom/secretKeyRef/name - version: v1 - kind: Pod - - path: spec/containers/envFrom/secretRef/name - version: v1 - kind: Pod - - path: spec/initContainers/envFrom/secretRef/name - version: v1 - kind: Pod - - path: spec/imagePullSecrets/name - version: v1 - kind: Pod - - path: spec/volumes/projected/sources/secret/name - version: v1 - kind: Pod - - path: template/spec/volumes/secret/secretName - kind: PodTemplate - - path: template/spec/containers/env/valueFrom/secretKeyRef/name - kind: PodTemplate - - path: template/spec/initContainers/env/valueFrom/secretKeyRef/name - kind: PodTemplate - - path: template/spec/containers/envFrom/secretRef/name - kind: PodTemplate - - path: template/spec/initContainers/envFrom/secretRef/name - kind: PodTemplate - - path: template/spec/imagePullSecrets/name - kind: PodTemplate - - path: template/spec/volumes/projected/sources/secret/name - kind: PodTemplate - - path: spec/template/spec/volumes/secret/secretName - kind: Deployment - - path: spec/template/spec/containers/env/valueFrom/secretKeyRef/name - kind: Deployment - - path: spec/template/spec/initContainers/env/valueFrom/secretKeyRef/name - kind: Deployment - - path: spec/template/spec/containers/envFrom/secretRef/name - kind: Deployment - - path: spec/template/spec/initContainers/envFrom/secretRef/name - kind: Deployment - - path: spec/template/spec/imagePullSecrets/name - kind: Deployment - - path: spec/template/spec/volumes/projected/sources/secret/name - kind: Deployment - - path: spec/template/spec/volumes/secret/secretName - kind: ReplicaSet - - path: spec/template/spec/containers/env/valueFrom/secretKeyRef/name - kind: ReplicaSet - - path: spec/template/spec/initContainers/env/valueFrom/secretKeyRef/name - kind: ReplicaSet - - path: spec/template/spec/containers/envFrom/secretRef/name - kind: ReplicaSet - - path: spec/template/spec/initContainers/envFrom/secretRef/name - kind: ReplicaSet - - path: spec/template/spec/imagePullSecrets/name - kind: ReplicaSet - - path: spec/template/spec/volumes/projected/sources/secret/name - kind: ReplicaSet - - path: spec/template/spec/volumes/secret/secretName - kind: DaemonSet - - path: spec/template/spec/containers/env/valueFrom/secretKeyRef/name - kind: DaemonSet - - path: spec/template/spec/initContainers/env/valueFrom/secretKeyRef/name - kind: DaemonSet - - path: spec/template/spec/containers/envFrom/secretRef/name - kind: DaemonSet - - path: spec/template/spec/initContainers/envFrom/secretRef/name - kind: DaemonSet - - path: spec/template/spec/imagePullSecrets/name - kind: DaemonSet - - path: spec/template/spec/volumes/projected/sources/secret/name - kind: DaemonSet - - path: spec/template/spec/volumes/secret/secretName - kind: StatefulSet - - path: spec/template/spec/containers/env/valueFrom/secretKeyRef/name - kind: StatefulSet - - path: spec/template/spec/initContainers/env/valueFrom/secretKeyRef/name - kind: StatefulSet - - path: spec/template/spec/containers/envFrom/secretRef/name - kind: StatefulSet - - path: spec/template/spec/initContainers/envFrom/secretRef/name - kind: StatefulSet - - path: spec/template/spec/imagePullSecrets/name - kind: StatefulSet - - path: spec/template/spec/volumes/projected/sources/secret/name - kind: StatefulSet - - path: spec/template/spec/volumes/secret/secretName - kind: Job - - path: spec/template/spec/containers/env/valueFrom/secretKeyRef/name - kind: Job - - path: spec/template/spec/initContainers/env/valueFrom/secretKeyRef/name - kind: Job - - path: spec/template/spec/containers/envFrom/secretRef/name - kind: Job - - path: spec/template/spec/initContainers/envFrom/secretRef/name - kind: Job - - path: spec/template/spec/imagePullSecrets/name - kind: Job - - path: spec/template/spec/volumes/projected/sources/secret/name - kind: Job - - path: spec/jobTemplate/spec/template/spec/volumes/secret/secretName - kind: CronJob - - path: spec/jobTemplate/spec/template/spec/volumes/projected/sources/secret/name - kind: CronJob - - path: spec/jobTemplate/spec/template/spec/containers/env/valueFrom/secretKeyRef/name - kind: CronJob - - path: spec/jobTemplate/spec/template/spec/initContainers/env/valueFrom/secretKeyRef/name - kind: CronJob - - path: spec/jobTemplate/spec/template/spec/containers/envFrom/secretRef/name - kind: CronJob - - path: spec/jobTemplate/spec/template/spec/initContainers/envFrom/secretRef/name - kind: CronJob - - path: spec/jobTemplate/spec/template/spec/imagePullSecrets/name - kind: CronJob - - path: spec/tls/secretName - kind: Ingress - - path: metadata/annotations/ingress.kubernetes.io\/auth-secret - kind: Ingress - - path: metadata/annotations/nginx.ingress.kubernetes.io\/auth-secret - kind: Ingress - - path: metadata/annotations/nginx.ingress.kubernetes.io\/auth-tls-secret - kind: Ingress - - path: spec/tls/secretName - kind: Ingress - - path: imagePullSecrets/name - kind: ServiceAccount - - path: parameters/secretName - kind: StorageClass - - path: parameters/adminSecretName - kind: StorageClass - - path: parameters/userSecretName - kind: StorageClass - - path: parameters/secretRef - kind: StorageClass - - path: rules/resourceNames - kind: Role - - path: rules/resourceNames - kind: ClusterRole - - path: spec/template/spec/containers/env/valueFrom/secretKeyRef/name - kind: Service - group: serving.knative.dev - version: v1 - - path: spec/azureFile/secretName - kind: PersistentVolume - -- kind: Service - version: v1 - fieldSpecs: - - path: spec/serviceName - kind: StatefulSet - group: apps - - path: spec/rules/http/paths/backend/serviceName - kind: Ingress - - path: spec/backend/serviceName - kind: Ingress - - path: spec/rules/http/paths/backend/service/name - kind: Ingress - - path: spec/defaultBackend/service/name - kind: Ingress - - path: spec/service/name - kind: APIService - group: apiregistration.k8s.io - - path: webhooks/clientConfig/service - kind: ValidatingWebhookConfiguration - group: admissionregistration.k8s.io - - path: webhooks/clientConfig/service - kind: MutatingWebhookConfiguration - group: admissionregistration.k8s.io - -- kind: Role - group: rbac.authorization.k8s.io - fieldSpecs: - - path: roleRef/name - kind: RoleBinding - group: rbac.authorization.k8s.io - -- kind: ClusterRole - group: rbac.authorization.k8s.io - fieldSpecs: - - path: roleRef/name - kind: RoleBinding - group: rbac.authorization.k8s.io - - path: roleRef/name - kind: ClusterRoleBinding - group: rbac.authorization.k8s.io - -- kind: ServiceAccount - version: v1 - fieldSpecs: - - path: subjects - kind: RoleBinding - group: rbac.authorization.k8s.io - - path: subjects - kind: ClusterRoleBinding - group: rbac.authorization.k8s.io - - path: spec/serviceAccountName - kind: Pod - - path: spec/template/spec/serviceAccountName - kind: StatefulSet - - path: spec/template/spec/serviceAccountName - kind: Deployment - - path: spec/template/spec/serviceAccountName - kind: ReplicationController - - path: spec/jobTemplate/spec/template/spec/serviceAccountName - kind: CronJob - - path: spec/template/spec/serviceAccountName - kind: Job - - path: spec/template/spec/serviceAccountName - kind: DaemonSet - -- kind: PersistentVolumeClaim - version: v1 - fieldSpecs: - - path: spec/volumes/persistentVolumeClaim/claimName - kind: Pod - - path: spec/template/spec/volumes/persistentVolumeClaim/claimName - kind: StatefulSet - - path: spec/template/spec/volumes/persistentVolumeClaim/claimName - kind: Deployment - - path: spec/template/spec/volumes/persistentVolumeClaim/claimName - kind: ReplicationController - - path: spec/jobTemplate/spec/template/spec/volumes/persistentVolumeClaim/claimName - kind: CronJob - - path: spec/template/spec/volumes/persistentVolumeClaim/claimName - kind: Job - - path: spec/template/spec/volumes/persistentVolumeClaim/claimName - kind: DaemonSet - -- kind: PersistentVolume - version: v1 - fieldSpecs: - - path: spec/volumeName - kind: PersistentVolumeClaim - - path: rules/resourceNames - kind: ClusterRole - -- kind: StorageClass - version: v1 - group: storage.k8s.io - fieldSpecs: - - path: spec/storageClassName - kind: PersistentVolume - - path: spec/storageClassName - kind: PersistentVolumeClaim - - path: spec/volumeClaimTemplates/spec/storageClassName - kind: StatefulSet - -- kind: PriorityClass - version: v1 - group: scheduling.k8s.io - fieldSpecs: - - path: spec/priorityClassName - kind: Pod - - path: spec/template/spec/priorityClassName - kind: StatefulSet - - path: spec/template/spec/priorityClassName - kind: Deployment - - path: spec/template/spec/priorityClassName - kind: ReplicationController - - path: spec/jobTemplate/spec/template/spec/priorityClassName - kind: CronJob - - path: spec/template/spec/priorityClassName - kind: Job - - path: spec/template/spec/priorityClassName - kind: DaemonSet - -- kind: IngressClass - version: v1 - group: networking.k8s.io/v1 - fieldSpecs: - - path: spec/ingressClassName - kind: Ingress -` -) - -// LINT.ThenChange(/examples/transformerconfigs/README.md) diff --git a/src/vendor/sigs.k8s.io/kustomize/api/konfig/builtinpluginconsts/namespace.go b/src/vendor/sigs.k8s.io/kustomize/api/konfig/builtinpluginconsts/namespace.go deleted file mode 100644 index 35774a7db..000000000 --- a/src/vendor/sigs.k8s.io/kustomize/api/konfig/builtinpluginconsts/namespace.go +++ /dev/null @@ -1,20 +0,0 @@ -// Copyright 2019 The Kubernetes Authors. -// SPDX-License-Identifier: Apache-2.0 - -package builtinpluginconsts - -const ( - namespaceFieldSpecs = ` -namespace: -- path: metadata/name - kind: Namespace - create: true -- path: spec/service/namespace - group: apiregistration.k8s.io - kind: APIService - create: true -- path: spec/conversion/webhook/clientConfig/service/namespace - group: apiextensions.k8s.io - kind: CustomResourceDefinition -` -) diff --git a/src/vendor/sigs.k8s.io/kustomize/api/konfig/builtinpluginconsts/namesuffix.go b/src/vendor/sigs.k8s.io/kustomize/api/konfig/builtinpluginconsts/namesuffix.go deleted file mode 100644 index 11592bd2b..000000000 --- a/src/vendor/sigs.k8s.io/kustomize/api/konfig/builtinpluginconsts/namesuffix.go +++ /dev/null @@ -1,11 +0,0 @@ -// Copyright 2019 The Kubernetes Authors. -// SPDX-License-Identifier: Apache-2.0 - -package builtinpluginconsts - -const ( - nameSuffixFieldSpecs = ` -nameSuffix: -- path: metadata/name -` -) diff --git a/src/vendor/sigs.k8s.io/kustomize/api/konfig/builtinpluginconsts/replicas.go b/src/vendor/sigs.k8s.io/kustomize/api/konfig/builtinpluginconsts/replicas.go deleted file mode 100644 index 76549c21f..000000000 --- a/src/vendor/sigs.k8s.io/kustomize/api/konfig/builtinpluginconsts/replicas.go +++ /dev/null @@ -1,23 +0,0 @@ -// Copyright 2019 The Kubernetes Authors. -// SPDX-License-Identifier: Apache-2.0 - -package builtinpluginconsts - -const replicasFieldSpecs = ` -replicas: -- path: spec/replicas - create: true - kind: Deployment - -- path: spec/replicas - create: true - kind: ReplicationController - -- path: spec/replicas - create: true - kind: ReplicaSet - -- path: spec/replicas - create: true - kind: StatefulSet -` diff --git a/src/vendor/sigs.k8s.io/kustomize/api/konfig/builtinpluginconsts/varreference.go b/src/vendor/sigs.k8s.io/kustomize/api/konfig/builtinpluginconsts/varreference.go deleted file mode 100644 index f4011d825..000000000 --- a/src/vendor/sigs.k8s.io/kustomize/api/konfig/builtinpluginconsts/varreference.go +++ /dev/null @@ -1,223 +0,0 @@ -// Copyright 2019 The Kubernetes Authors. -// SPDX-License-Identifier: Apache-2.0 - -package builtinpluginconsts - -const ( - varReferenceFieldSpecs = ` -varReference: -- path: spec/jobTemplate/spec/template/spec/containers/args - kind: CronJob - -- path: spec/jobTemplate/spec/template/spec/containers/command - kind: CronJob - -- path: spec/jobTemplate/spec/template/spec/containers/env/value - kind: CronJob - -- path: spec/jobTemplate/spec/template/spec/containers/volumeMounts/mountPath - kind: CronJob - -- path: spec/jobTemplate/spec/template/spec/initContainers/args - kind: CronJob - -- path: spec/jobTemplate/spec/template/spec/initContainers/command - kind: CronJob - -- path: spec/jobTemplate/spec/template/spec/initContainers/env/value - kind: CronJob - -- path: spec/jobTemplate/spec/template/spec/initContainers/volumeMounts/mountPath - kind: CronJob - -- path: spec/jobTemplate/spec/template/volumes/nfs/server - kind: CronJob - -- path: spec/template/spec/containers/args - kind: DaemonSet - -- path: spec/template/spec/containers/command - kind: DaemonSet - -- path: spec/template/spec/containers/env/value - kind: DaemonSet - -- path: spec/template/spec/containers/volumeMounts/mountPath - kind: DaemonSet - -- path: spec/template/spec/initContainers/args - kind: DaemonSet - -- path: spec/template/spec/initContainers/command - kind: DaemonSet - -- path: spec/template/spec/initContainers/env/value - kind: DaemonSet - -- path: spec/template/spec/initContainers/volumeMounts/mountPath - kind: DaemonSet - -- path: spec/template/spec/volumes/nfs/server - kind: DaemonSet - -- path: spec/template/spec/containers/args - kind: Deployment - -- path: spec/template/spec/containers/command - kind: Deployment - -- path: spec/template/spec/containers/env/value - kind: Deployment - -- path: spec/template/spec/containers/volumeMounts/mountPath - kind: Deployment - -- path: spec/template/spec/initContainers/args - kind: Deployment - -- path: spec/template/spec/initContainers/command - kind: Deployment - -- path: spec/template/spec/initContainers/env/value - kind: Deployment - -- path: spec/template/spec/initContainers/volumeMounts/mountPath - kind: Deployment - -- path: spec/template/spec/volumes/nfs/server - kind: Deployment - -- path: spec/template/metadata/annotations - kind: Deployment - -- path: spec/rules/host - kind: Ingress - -- path: spec/tls/hosts - kind: Ingress - -- path: spec/tls/secretName - kind: Ingress - -- path: spec/template/spec/containers/args - kind: Job - -- path: spec/template/spec/containers/command - kind: Job - -- path: spec/template/spec/containers/env/value - kind: Job - -- path: spec/template/spec/containers/volumeMounts/mountPath - kind: Job - -- path: spec/template/spec/initContainers/args - kind: Job - -- path: spec/template/spec/initContainers/command - kind: Job - -- path: spec/template/spec/initContainers/env/value - kind: Job - -- path: spec/template/spec/initContainers/volumeMounts/mountPath - kind: Job - -- path: spec/template/spec/volumes/nfs/server - kind: Job - -- path: spec/containers/args - kind: Pod - -- path: spec/containers/command - kind: Pod - -- path: spec/containers/env/value - kind: Pod - -- path: spec/containers/volumeMounts/mountPath - kind: Pod - -- path: spec/initContainers/args - kind: Pod - -- path: spec/initContainers/command - kind: Pod - -- path: spec/initContainers/env/value - kind: Pod - -- path: spec/initContainers/volumeMounts/mountPath - kind: Pod - -- path: spec/volumes/nfs/server - kind: Pod - -- path: spec/template/spec/containers/args - kind: ReplicaSet - -- path: spec/template/spec/containers/command - kind: ReplicaSet - -- path: spec/template/spec/containers/env/value - kind: ReplicaSet - -- path: spec/template/spec/containers/volumeMounts/mountPath - kind: ReplicaSet - -- path: spec/template/spec/initContainers/args - kind: ReplicaSet - -- path: spec/template/spec/initContainers/command - kind: ReplicaSet - -- path: spec/template/spec/initContainers/env/value - kind: ReplicaSet - -- path: spec/template/spec/initContainers/volumeMounts/mountPath - kind: ReplicaSet - -- path: spec/template/spec/volumes/nfs/server - kind: ReplicaSet - -- path: spec/ports/port - kind: Service - -- path: spec/ports/targetPort - kind: Service - -- path: spec/template/spec/containers/args - kind: StatefulSet - -- path: spec/template/spec/containers/command - kind: StatefulSet - -- path: spec/template/spec/containers/env/value - kind: StatefulSet - -- path: spec/template/spec/containers/volumeMounts/mountPath - kind: StatefulSet - -- path: spec/template/spec/initContainers/args - kind: StatefulSet - -- path: spec/template/spec/initContainers/command - kind: StatefulSet - -- path: spec/template/spec/initContainers/env/value - kind: StatefulSet - -- path: spec/template/spec/initContainers/volumeMounts/mountPath - kind: StatefulSet - -- path: spec/volumeClaimTemplates/spec/nfs/server - kind: StatefulSet - -- path: spec/nfs/server - kind: PersistentVolume - -- path: metadata/labels - -- path: metadata/annotations -` -) diff --git a/src/vendor/sigs.k8s.io/kustomize/api/konfig/doc.go b/src/vendor/sigs.k8s.io/kustomize/api/konfig/doc.go deleted file mode 100644 index 8c5f8f2cd..000000000 --- a/src/vendor/sigs.k8s.io/kustomize/api/konfig/doc.go +++ /dev/null @@ -1,7 +0,0 @@ -// Copyright 2019 The Kubernetes Authors. -// SPDX-License-Identifier: Apache-2.0 - -// Package konfig provides configuration methods and constants -// for the kustomize API, e.g. the set of file names to look for -// to identify a kustomization root. -package konfig diff --git a/src/vendor/sigs.k8s.io/kustomize/api/konfig/general.go b/src/vendor/sigs.k8s.io/kustomize/api/konfig/general.go deleted file mode 100644 index 712bfe789..000000000 --- a/src/vendor/sigs.k8s.io/kustomize/api/konfig/general.go +++ /dev/null @@ -1,49 +0,0 @@ -// Copyright 2019 The Kubernetes Authors. -// SPDX-License-Identifier: Apache-2.0 - -package konfig - -// RecognizedKustomizationFileNames is a list of file names -// that kustomize recognizes. -// To avoid ambiguity, a kustomization directory may not -// contain more than one match to this list. -func RecognizedKustomizationFileNames() []string { - return []string{ - "kustomization.yaml", - "kustomization.yml", - "Kustomization", - } -} - -func DefaultKustomizationFileName() string { - return RecognizedKustomizationFileNames()[0] -} - -const ( - // An environment variable to consult for kustomization - // configuration data. See: - // https://specifications.freedesktop.org/basedir-spec/basedir-spec-latest.html - XdgConfigHomeEnv = "XDG_CONFIG_HOME" - - // Use this when XdgConfigHomeEnv not defined. - XdgConfigHomeEnvDefault = ".config" - - // A program name, for use in help, finding the XDG_CONFIG_DIR, etc. - ProgramName = "kustomize" - - // ConfigAnnoDomain is internal configuration-related annotation namespace. - // See https://github.com/kubernetes-sigs/kustomize/blob/master/cmd/config/docs/api-conventions/functions-spec.md. - ConfigAnnoDomain = "internal.config.kubernetes.io" - - // If a resource has this annotation, kustomize will drop it. - IgnoredByKustomizeAnnotation = "config.kubernetes.io/local-config" - - // Label key that indicates the resources are built from Kustomize - ManagedbyLabelKey = "app.kubernetes.io/managed-by" - - // An environment variable to turn on/off adding the ManagedByLabelKey - EnableManagedbyLabelEnv = "KUSTOMIZE_ENABLE_MANAGEDBY_LABEL" - - // Label key that indicates the resources are validated by a validator - ValidatedByLabelKey = "validated-by" -) diff --git a/src/vendor/sigs.k8s.io/kustomize/api/konfig/plugins.go b/src/vendor/sigs.k8s.io/kustomize/api/konfig/plugins.go deleted file mode 100644 index 30bd3b6e3..000000000 --- a/src/vendor/sigs.k8s.io/kustomize/api/konfig/plugins.go +++ /dev/null @@ -1,138 +0,0 @@ -// Copyright 2019 The Kubernetes Authors. -// SPDX-License-Identifier: Apache-2.0 - -package konfig - -import ( - "os" - "path/filepath" - "runtime" - - "sigs.k8s.io/kustomize/api/types" - "sigs.k8s.io/kustomize/kyaml/filesys" -) - -const ( - // Symbol that must be used inside Go plugins. - PluginSymbol = "KustomizePlugin" - - // Name of environment variable used to set AbsPluginHome. - // See that variable for an explanation. - KustomizePluginHomeEnv = "KUSTOMIZE_PLUGIN_HOME" - - // Relative path below XDG_CONFIG_HOME/kustomize to find plugins. - // e.g. AbsPluginHome = XDG_CONFIG_HOME/kustomize/plugin - RelPluginHome = "plugin" - - // Location of builtin plugins below AbsPluginHome. - BuiltinPluginPackage = "builtin" - - // The value of kubernetes ApiVersion to use in configuration - // files for builtin plugins. - // The value for non-builtins can be anything. - BuiltinPluginApiVersion = BuiltinPluginPackage - - // Domain from which kustomize code is imported, for locating - // plugin source code under $GOPATH when GOPATH is defined. - DomainName = "sigs.k8s.io" - - // Injected into plugin paths when plugins are disabled. - // Provides a clue in flows that shouldn't happen. - NoPluginHomeSentinal = "/No/non-builtin/plugins!" -) - -type NotedFunc struct { - Note string - F func() string -} - -// DefaultAbsPluginHome returns the absolute path in the given file -// system to first directory that looks like a good candidate for -// the home of kustomize plugins. -func DefaultAbsPluginHome(fSys filesys.FileSystem) (string, error) { - return FirstDirThatExistsElseError( - "plugin root", fSys, []NotedFunc{ - { - Note: "homed in $" + KustomizePluginHomeEnv, - F: func() string { - return os.Getenv(KustomizePluginHomeEnv) - }, - }, - { - Note: "homed in $" + XdgConfigHomeEnv, - F: func() string { - if root := os.Getenv(XdgConfigHomeEnv); root != "" { - return filepath.Join(root, ProgramName, RelPluginHome) - } - // do not look in "kustomize/plugin" if XdgConfigHomeEnv is unset - return "" - }, - }, - { - Note: "homed in default value of $" + XdgConfigHomeEnv, - F: func() string { - return filepath.Join( - HomeDir(), XdgConfigHomeEnvDefault, - ProgramName, RelPluginHome) - }, - }, - { - Note: "homed in home directory", - F: func() string { - return filepath.Join( - HomeDir(), ProgramName, RelPluginHome) - }, - }, - }) -} - -// FirstDirThatExistsElseError tests different path functions for -// existence, returning the first that works, else error if all fail. -func FirstDirThatExistsElseError( - what string, - fSys filesys.FileSystem, - pathFuncs []NotedFunc) (string, error) { - var nope []types.Pair - for _, dt := range pathFuncs { - if dir := dt.F(); dir != "" { - if fSys.Exists(dir) { - return dir, nil - } - nope = append(nope, types.Pair{Key: dt.Note, Value: dir}) - } else { - nope = append(nope, types.Pair{Key: dt.Note, Value: ""}) - } - } - return "", types.NewErrUnableToFind(what, nope) -} - -func HomeDir() string { - home := os.Getenv(homeEnv()) - if len(home) > 0 { - return home - } - return "~" -} - -func homeEnv() string { - if runtime.GOOS == "windows" { - return "USERPROFILE" - } - return "HOME" -} - -func CurrentWorkingDir() string { - // Try for full path first to be explicit. - pwd := os.Getenv(pwdEnv()) - if len(pwd) > 0 { - return pwd - } - return filesys.SelfDir -} - -func pwdEnv() string { - if runtime.GOOS == "windows" { - return "CD" - } - return "PWD" -} diff --git a/src/vendor/sigs.k8s.io/kustomize/api/krusty/doc.go b/src/vendor/sigs.k8s.io/kustomize/api/krusty/doc.go deleted file mode 100644 index bf516ca94..000000000 --- a/src/vendor/sigs.k8s.io/kustomize/api/krusty/doc.go +++ /dev/null @@ -1,11 +0,0 @@ -// Copyright 2019 The Kubernetes Authors. -// SPDX-License-Identifier: Apache-2.0 - -// Package krusty is intended as the entry point package -// for those seeking to add kustomize ability to other -// programs. -// -// To use, follow the example of the kustomize CLI's 'build' -// command. Also, see the high level tests in this package, -// which serve a dual purpose as examples. -package krusty diff --git a/src/vendor/sigs.k8s.io/kustomize/api/krusty/kustomizer.go b/src/vendor/sigs.k8s.io/kustomize/api/krusty/kustomizer.go deleted file mode 100644 index 5f1f27ff2..000000000 --- a/src/vendor/sigs.k8s.io/kustomize/api/krusty/kustomizer.go +++ /dev/null @@ -1,121 +0,0 @@ -// Copyright 2019 The Kubernetes Authors. -// SPDX-License-Identifier: Apache-2.0 - -package krusty - -import ( - "fmt" - - "sigs.k8s.io/kustomize/api/internal/builtins" - pLdr "sigs.k8s.io/kustomize/api/internal/plugins/loader" - "sigs.k8s.io/kustomize/api/internal/target" - "sigs.k8s.io/kustomize/api/internal/utils" - "sigs.k8s.io/kustomize/api/konfig" - fLdr "sigs.k8s.io/kustomize/api/loader" - "sigs.k8s.io/kustomize/api/provenance" - "sigs.k8s.io/kustomize/api/provider" - "sigs.k8s.io/kustomize/api/resmap" - "sigs.k8s.io/kustomize/api/types" - "sigs.k8s.io/kustomize/kyaml/filesys" - "sigs.k8s.io/kustomize/kyaml/openapi" -) - -// Kustomizer performs kustomizations. -// -// It's meant to behave similarly to the kustomize CLI, and can be -// used instead of performing an exec to a kustomize CLI subprocess. -// To use, load a filesystem with kustomization files (any -// number of overlays and bases), then make a Kustomizer -// injected with the given filesystem, then call Run. -type Kustomizer struct { - options *Options - depProvider *provider.DepProvider -} - -// MakeKustomizer returns an instance of Kustomizer. -func MakeKustomizer(o *Options) *Kustomizer { - return &Kustomizer{ - options: o, - depProvider: provider.NewDepProvider(), - } -} - -// Run performs a kustomization. -// -// It reads given path from the given file system, interprets it as -// a kustomization.yaml file, perform the kustomization it represents, -// and return the resulting resources. -// -// Any files referenced by the kustomization must be present on the -// filesystem. One may call Run any number of times, on any number -// of internal paths (e.g. the filesystem may contain multiple overlays, -// and Run can be called on each of them). -func (b *Kustomizer) Run( - fSys filesys.FileSystem, path string) (resmap.ResMap, error) { - resmapFactory := resmap.NewFactory(b.depProvider.GetResourceFactory()) - lr := fLdr.RestrictionNone - if b.options.LoadRestrictions == types.LoadRestrictionsRootOnly { - lr = fLdr.RestrictionRootOnly - } - ldr, err := fLdr.NewLoader(lr, path, fSys) - if err != nil { - return nil, err - } - defer ldr.Cleanup() - kt := target.NewKustTarget( - ldr, - b.depProvider.GetFieldValidator(), - resmapFactory, - // The plugin configs are always located on disk, regardless of the fSys passed in - pLdr.NewLoader(b.options.PluginConfig, resmapFactory, filesys.MakeFsOnDisk()), - ) - err = kt.Load() - if err != nil { - return nil, err - } - var bytes []byte - if openApiPath, exists := kt.Kustomization().OpenAPI["path"]; exists { - bytes, err = ldr.Load(openApiPath) - if err != nil { - return nil, err - } - } - err = openapi.SetSchema(kt.Kustomization().OpenAPI, bytes, true) - if err != nil { - return nil, err - } - var m resmap.ResMap - m, err = kt.MakeCustomizedResMap() - if err != nil { - return nil, err - } - if b.options.DoLegacyResourceSort { - err = builtins.NewLegacyOrderTransformerPlugin().Transform(m) - if err != nil { - return nil, err - } - } - if b.options.AddManagedbyLabel || utils.StringSliceContains(kt.Kustomization().BuildMetadata, types.ManagedByLabelOption) { - t := builtins.LabelTransformerPlugin{ - Labels: map[string]string{ - konfig.ManagedbyLabelKey: fmt.Sprintf("kustomize-%s", provenance.GetProvenance().Semver()), - }, - FieldSpecs: []types.FieldSpec{{ - Path: "metadata/labels", - CreateIfNotPresent: true, - }}, - } - err = t.Transform(m) - if err != nil { - return nil, err - } - } - m.RemoveBuildAnnotations() - if !utils.StringSliceContains(kt.Kustomization().BuildMetadata, types.OriginAnnotations) { - m.RemoveOriginAnnotations() - } - if !utils.StringSliceContains(kt.Kustomization().BuildMetadata, types.TransformerAnnotations) { - m.RemoveTransformerAnnotations() - } - return m, nil -} diff --git a/src/vendor/sigs.k8s.io/kustomize/api/krusty/options.go b/src/vendor/sigs.k8s.io/kustomize/api/krusty/options.go deleted file mode 100644 index 438f6c102..000000000 --- a/src/vendor/sigs.k8s.io/kustomize/api/krusty/options.go +++ /dev/null @@ -1,58 +0,0 @@ -// Copyright 2019 The Kubernetes Authors. -// SPDX-License-Identifier: Apache-2.0 - -package krusty - -import ( - "sigs.k8s.io/kustomize/api/internal/plugins/builtinhelpers" - "sigs.k8s.io/kustomize/api/types" -) - -// Options holds high-level kustomize configuration options, -// e.g. are plugins enabled, should the loader be restricted -// to the kustomization root, etc. -type Options struct { - // When true, sort the resources before emitting them, - // per a particular sort order. When false, don't do the - // sort, and instead respect the depth-first resource input - // order as specified by the kustomization file(s). - DoLegacyResourceSort bool - - // When true, a label - // app.kubernetes.io/managed-by: kustomize- - // is added to all the resources in the build out. - AddManagedbyLabel bool - - // Restrictions on what can be loaded from the file system. - // See type definition. - LoadRestrictions types.LoadRestrictions - - // Create an inventory object for pruning. - DoPrune bool - - // Options related to kustomize plugins. - PluginConfig *types.PluginConfig -} - -// MakeDefaultOptions returns a default instance of Options. -func MakeDefaultOptions() *Options { - return &Options{ - DoLegacyResourceSort: false, - AddManagedbyLabel: false, - LoadRestrictions: types.LoadRestrictionsRootOnly, - DoPrune: false, - PluginConfig: types.DisabledPluginConfig(), - } -} - -// GetBuiltinPluginNames returns a list of builtin plugin names -func GetBuiltinPluginNames() []string { - var ret []string - for k := range builtinhelpers.GeneratorFactories { - ret = append(ret, k.String()) - } - for k := range builtinhelpers.TransformerFactories { - ret = append(ret, k.String()) - } - return ret -} diff --git a/src/vendor/sigs.k8s.io/kustomize/api/kv/kv.go b/src/vendor/sigs.k8s.io/kustomize/api/kv/kv.go deleted file mode 100644 index 303aede86..000000000 --- a/src/vendor/sigs.k8s.io/kustomize/api/kv/kv.go +++ /dev/null @@ -1,229 +0,0 @@ -// Copyright 2019 The Kubernetes Authors. -// SPDX-License-Identifier: Apache-2.0 - -package kv - -import ( - "bufio" - "bytes" - "fmt" - "os" - "path" - "strings" - "unicode" - "unicode/utf8" - - "github.com/pkg/errors" - "sigs.k8s.io/kustomize/api/ifc" - "sigs.k8s.io/kustomize/api/types" -) - -var utf8bom = []byte{0xEF, 0xBB, 0xBF} - -// loader reads and validates KV pairs. -type loader struct { - // Used to read the filesystem. - ldr ifc.Loader - - // Used to validate various k8s data fields. - validator ifc.Validator -} - -func NewLoader(ldr ifc.Loader, v ifc.Validator) ifc.KvLoader { - return &loader{ldr: ldr, validator: v} -} - -func (kvl *loader) Validator() ifc.Validator { - return kvl.validator -} - -func (kvl *loader) Load( - args types.KvPairSources) (all []types.Pair, err error) { - pairs, err := kvl.keyValuesFromEnvFiles(args.EnvSources) - if err != nil { - return nil, errors.Wrap(err, fmt.Sprintf( - "env source files: %v", - args.EnvSources)) - } - all = append(all, pairs...) - - pairs, err = keyValuesFromLiteralSources(args.LiteralSources) - if err != nil { - return nil, errors.Wrap(err, fmt.Sprintf( - "literal sources %v", args.LiteralSources)) - } - all = append(all, pairs...) - - pairs, err = kvl.keyValuesFromFileSources(args.FileSources) - if err != nil { - return nil, errors.Wrap(err, fmt.Sprintf( - "file sources: %v", args.FileSources)) - } - return append(all, pairs...), nil -} - -func keyValuesFromLiteralSources(sources []string) ([]types.Pair, error) { - var kvs []types.Pair - for _, s := range sources { - k, v, err := parseLiteralSource(s) - if err != nil { - return nil, err - } - kvs = append(kvs, types.Pair{Key: k, Value: v}) - } - return kvs, nil -} - -func (kvl *loader) keyValuesFromFileSources(sources []string) ([]types.Pair, error) { - var kvs []types.Pair - for _, s := range sources { - k, fPath, err := parseFileSource(s) - if err != nil { - return nil, err - } - content, err := kvl.ldr.Load(fPath) - if err != nil { - return nil, err - } - kvs = append(kvs, types.Pair{Key: k, Value: string(content)}) - } - return kvs, nil -} - -func (kvl *loader) keyValuesFromEnvFiles(paths []string) ([]types.Pair, error) { - var kvs []types.Pair - for _, p := range paths { - content, err := kvl.ldr.Load(p) - if err != nil { - return nil, err - } - more, err := kvl.keyValuesFromLines(content) - if err != nil { - return nil, err - } - kvs = append(kvs, more...) - } - return kvs, nil -} - -// keyValuesFromLines parses given content in to a list of key-value pairs. -func (kvl *loader) keyValuesFromLines(content []byte) ([]types.Pair, error) { - var kvs []types.Pair - - scanner := bufio.NewScanner(bytes.NewReader(content)) - currentLine := 0 - for scanner.Scan() { - // Process the current line, retrieving a key/value pair if - // possible. - scannedBytes := scanner.Bytes() - kv, err := kvl.keyValuesFromLine(scannedBytes, currentLine) - if err != nil { - return nil, err - } - currentLine++ - - if len(kv.Key) == 0 { - // no key means line was empty or a comment - continue - } - - kvs = append(kvs, kv) - } - return kvs, nil -} - -// KeyValuesFromLine returns a kv with blank key if the line is empty or a comment. -func (kvl *loader) keyValuesFromLine(line []byte, currentLine int) (types.Pair, error) { - kv := types.Pair{} - - if !utf8.Valid(line) { - return kv, fmt.Errorf("line %d has invalid utf8 bytes : %v", line, string(line)) - } - - // We trim UTF8 BOM from the first line of the file but no others - if currentLine == 0 { - line = bytes.TrimPrefix(line, utf8bom) - } - - // trim the line from all leading whitespace first - line = bytes.TrimLeftFunc(line, unicode.IsSpace) - - // If the line is empty or a comment, we return a blank key/value pair. - if len(line) == 0 || line[0] == '#' { - return kv, nil - } - - data := strings.SplitN(string(line), "=", 2) - key := data[0] - if err := kvl.validator.IsEnvVarName(key); err != nil { - return kv, err - } - - if len(data) == 2 { - kv.Value = data[1] - } else { - // No value (no `=` in the line) is a signal to obtain the value - // from the environment. This behaviour was accidentally imported from kubectl code, and - // will be removed in the next major release of Kustomize. - _, _ = fmt.Fprintln(os.Stderr, "WARNING: "+ - "This Kustomization is relying on a bug that loads values from the environment "+ - "when they are omitted from an env file. "+ - "This behaviour will be removed in the next major release of Kustomize.") - kv.Value = os.Getenv(key) - } - kv.Key = key - return kv, nil -} - -// ParseFileSource parses the source given. -// -// Acceptable formats include: -// 1. source-path: the basename will become the key name -// 2. source-name=source-path: the source-name will become the key name and -// source-path is the path to the key file. -// -// Key names cannot include '='. -func parseFileSource(source string) (keyName, filePath string, err error) { - numSeparators := strings.Count(source, "=") - switch { - case numSeparators == 0: - return path.Base(source), source, nil - case numSeparators == 1 && strings.HasPrefix(source, "="): - return "", "", fmt.Errorf("key name for file path %v missing", strings.TrimPrefix(source, "=")) - case numSeparators == 1 && strings.HasSuffix(source, "="): - return "", "", fmt.Errorf("file path for key name %v missing", strings.TrimSuffix(source, "=")) - case numSeparators > 1: - return "", "", errors.New("key names or file paths cannot contain '='") - default: - components := strings.Split(source, "=") - return components[0], components[1], nil - } -} - -// ParseLiteralSource parses the source key=val pair into its component pieces. -// This functionality is distinguished from strings.SplitN(source, "=", 2) since -// it returns an error in the case of empty keys, values, or a missing equals sign. -func parseLiteralSource(source string) (keyName, value string, err error) { - // leading equal is invalid - if strings.Index(source, "=") == 0 { - return "", "", fmt.Errorf("invalid literal source %v, expected key=value", source) - } - // split after the first equal (so values can have the = character) - items := strings.SplitN(source, "=", 2) - if len(items) != 2 { - return "", "", fmt.Errorf("invalid literal source %v, expected key=value", source) - } - return items[0], removeQuotes(items[1]), nil -} - -// removeQuotes removes the surrounding quotes from the provided string only if it is surrounded on both sides -// rather than blindly trimming all quotation marks on either side. -func removeQuotes(str string) string { - if len(str) == 0 || str[0] != str[len(str)-1] { - return str - } - if str[0] == '"' || str[0] == '\'' { - return str[1 : len(str)-1] - } - return str -} diff --git a/src/vendor/sigs.k8s.io/kustomize/api/loader/errors.go b/src/vendor/sigs.k8s.io/kustomize/api/loader/errors.go deleted file mode 100644 index 2463debdb..000000000 --- a/src/vendor/sigs.k8s.io/kustomize/api/loader/errors.go +++ /dev/null @@ -1,11 +0,0 @@ -// Copyright 2022 The Kubernetes Authors. -// SPDX-License-Identifier: Apache-2.0 - -package loader - -import "sigs.k8s.io/kustomize/kyaml/errors" - -var ( - ErrHTTP = errors.Errorf("HTTP Error") - ErrRtNotDir = errors.Errorf("must build at directory") -) diff --git a/src/vendor/sigs.k8s.io/kustomize/api/loader/fileloader.go b/src/vendor/sigs.k8s.io/kustomize/api/loader/fileloader.go deleted file mode 100644 index 3201e8509..000000000 --- a/src/vendor/sigs.k8s.io/kustomize/api/loader/fileloader.go +++ /dev/null @@ -1,324 +0,0 @@ -// Copyright 2019 The Kubernetes Authors. -// SPDX-License-Identifier: Apache-2.0 - -package loader - -import ( - "fmt" - "io/ioutil" - "log" - "net/http" - "net/url" - "path/filepath" - "strings" - - "sigs.k8s.io/kustomize/api/ifc" - "sigs.k8s.io/kustomize/api/internal/git" - "sigs.k8s.io/kustomize/kyaml/errors" - "sigs.k8s.io/kustomize/kyaml/filesys" -) - -// fileLoader is a kustomization's interface to files. -// -// The directory in which a kustomization file sits -// is referred to below as the kustomization's _root_. -// -// An instance of fileLoader has an immutable root, -// and offers a `New` method returning a new loader -// with a new root. -// -// A kustomization file refers to two kinds of files: -// -// * supplemental data paths -// -// `Load` is used to visit these paths. -// -// These paths refer to resources, patches, -// data for ConfigMaps and Secrets, etc. -// -// The loadRestrictor may disallow certain paths -// or classes of paths. -// -// * bases (other kustomizations) -// -// `New` is used to load bases. -// -// A base can be either a remote git repo URL, or -// a directory specified relative to the current -// root. In the former case, the repo is locally -// cloned, and the new loader is rooted on a path -// in that clone. -// -// As loaders create new loaders, a root history -// is established, and used to disallow: -// -// - A base that is a repository that, in turn, -// specifies a base repository seen previously -// in the loading stack (a cycle). -// -// - An overlay depending on a base positioned at -// or above it. I.e. '../foo' is OK, but '.', -// '..', '../..', etc. are disallowed. Allowing -// such a base has no advantages and encourages -// cycles, particularly if some future change -// were to introduce globbing to file -// specifications in the kustomization file. -// -// These restrictions assure that kustomizations -// are self-contained and relocatable, and impose -// some safety when relying on remote kustomizations, -// e.g. a remotely loaded ConfigMap generator specified -// to read from /etc/passwd will fail. -// -type fileLoader struct { - // Loader that spawned this loader. - // Used to avoid cycles. - referrer *fileLoader - - // An absolute, cleaned path to a directory. - // The Load function will read non-absolute - // paths relative to this directory. - root filesys.ConfirmedDir - - // Restricts behavior of Load function. - loadRestrictor LoadRestrictorFunc - - // If this is non-nil, the files were - // obtained from the given repository. - repoSpec *git.RepoSpec - - // File system utilities. - fSys filesys.FileSystem - - // Used to load from HTTP - http *http.Client - - // Used to clone repositories. - cloner git.Cloner - - // Used to clean up, as needed. - cleaner func() error -} - -// NewFileLoaderAtCwd returns a loader that loads from PWD. -// A convenience for kustomize edit commands. -func NewFileLoaderAtCwd(fSys filesys.FileSystem) *fileLoader { - return newLoaderOrDie( - RestrictionRootOnly, fSys, filesys.SelfDir) -} - -// NewFileLoaderAtRoot returns a loader that loads from "/". -// A convenience for tests. -func NewFileLoaderAtRoot(fSys filesys.FileSystem) *fileLoader { - return newLoaderOrDie( - RestrictionRootOnly, fSys, filesys.Separator) -} - -// Root returns the absolute path that is prepended to any -// relative paths used in Load. -func (fl *fileLoader) Root() string { - return fl.root.String() -} - -func newLoaderOrDie( - lr LoadRestrictorFunc, - fSys filesys.FileSystem, path string) *fileLoader { - root, err := filesys.ConfirmDir(fSys, path) - if err != nil { - log.Fatalf("unable to make loader at '%s'; %v", path, err) - } - return newLoaderAtConfirmedDir( - lr, root, fSys, nil, git.ClonerUsingGitExec) -} - -// newLoaderAtConfirmedDir returns a new fileLoader with given root. -func newLoaderAtConfirmedDir( - lr LoadRestrictorFunc, - root filesys.ConfirmedDir, fSys filesys.FileSystem, - referrer *fileLoader, cloner git.Cloner) *fileLoader { - return &fileLoader{ - loadRestrictor: lr, - root: root, - referrer: referrer, - fSys: fSys, - cloner: cloner, - cleaner: func() error { return nil }, - } -} - -// New returns a new Loader, rooted relative to current loader, -// or rooted in a temp directory holding a git repo clone. -func (fl *fileLoader) New(path string) (ifc.Loader, error) { - if path == "" { - return nil, errors.Errorf("new root cannot be empty") - } - - repoSpec, err := git.NewRepoSpecFromURL(path) - if err == nil { - // Treat this as git repo clone request. - if err = fl.errIfRepoCycle(repoSpec); err != nil { - return nil, err - } - return newLoaderAtGitClone( - repoSpec, fl.fSys, fl, fl.cloner) - } - - if filepath.IsAbs(path) { - return nil, fmt.Errorf("new root '%s' cannot be absolute", path) - } - root, err := filesys.ConfirmDir(fl.fSys, fl.root.Join(path)) - if err != nil { - return nil, errors.WrapPrefixf(err, ErrRtNotDir.Error()) - } - if err = fl.errIfGitContainmentViolation(root); err != nil { - return nil, err - } - if err = fl.errIfArgEqualOrHigher(root); err != nil { - return nil, err - } - return newLoaderAtConfirmedDir( - fl.loadRestrictor, root, fl.fSys, fl, fl.cloner), nil -} - -// newLoaderAtGitClone returns a new Loader pinned to a temporary -// directory holding a cloned git repo. -func newLoaderAtGitClone( - repoSpec *git.RepoSpec, fSys filesys.FileSystem, - referrer *fileLoader, cloner git.Cloner) (ifc.Loader, error) { - cleaner := repoSpec.Cleaner(fSys) - err := cloner(repoSpec) - if err != nil { - cleaner() - return nil, err - } - root, f, err := fSys.CleanedAbs(repoSpec.AbsPath()) - if err != nil { - cleaner() - return nil, err - } - // We don't know that the path requested in repoSpec - // is a directory until we actually clone it and look - // inside. That just happened, hence the error check - // is here. - if f != "" { - cleaner() - return nil, fmt.Errorf( - "'%s' refers to file '%s'; expecting directory", - repoSpec.AbsPath(), f) - } - return &fileLoader{ - // Clones never allowed to escape root. - loadRestrictor: RestrictionRootOnly, - root: root, - referrer: referrer, - repoSpec: repoSpec, - fSys: fSys, - cloner: cloner, - cleaner: cleaner, - }, nil -} - -func (fl *fileLoader) errIfGitContainmentViolation( - base filesys.ConfirmedDir) error { - containingRepo := fl.containingRepo() - if containingRepo == nil { - return nil - } - if !base.HasPrefix(containingRepo.CloneDir()) { - return fmt.Errorf( - "security; bases in kustomizations found in "+ - "cloned git repos must be within the repo, "+ - "but base '%s' is outside '%s'", - base, containingRepo.CloneDir()) - } - return nil -} - -// Looks back through referrers for a git repo, returning nil -// if none found. -func (fl *fileLoader) containingRepo() *git.RepoSpec { - if fl.repoSpec != nil { - return fl.repoSpec - } - if fl.referrer == nil { - return nil - } - return fl.referrer.containingRepo() -} - -// errIfArgEqualOrHigher tests whether the argument, -// is equal to or above the root of any ancestor. -func (fl *fileLoader) errIfArgEqualOrHigher( - candidateRoot filesys.ConfirmedDir) error { - if fl.root.HasPrefix(candidateRoot) { - return fmt.Errorf( - "cycle detected: candidate root '%s' contains visited root '%s'", - candidateRoot, fl.root) - } - if fl.referrer == nil { - return nil - } - return fl.referrer.errIfArgEqualOrHigher(candidateRoot) -} - -// TODO(monopole): Distinguish branches? -// I.e. Allow a distinction between git URI with -// path foo and tag bar and a git URI with the same -// path but a different tag? -func (fl *fileLoader) errIfRepoCycle(newRepoSpec *git.RepoSpec) error { - // TODO(monopole): Use parsed data instead of Raw(). - if fl.repoSpec != nil && - strings.HasPrefix(fl.repoSpec.Raw(), newRepoSpec.Raw()) { - return fmt.Errorf( - "cycle detected: URI '%s' referenced by previous URI '%s'", - newRepoSpec.Raw(), fl.repoSpec.Raw()) - } - if fl.referrer == nil { - return nil - } - return fl.referrer.errIfRepoCycle(newRepoSpec) -} - -// Load returns the content of file at the given path, -// else an error. Relative paths are taken relative -// to the root. -func (fl *fileLoader) Load(path string) ([]byte, error) { - if u, err := url.Parse(path); err == nil && (u.Scheme == "http" || u.Scheme == "https") { - var hc *http.Client - if fl.http != nil { - hc = fl.http - } else { - hc = &http.Client{} - } - resp, err := hc.Get(path) - if err != nil { - return nil, err - } - defer resp.Body.Close() - if resp.StatusCode < 200 || resp.StatusCode > 299 { - _, err := git.NewRepoSpecFromURL(path) - if err == nil { - return nil, errors.Errorf("URL is a git repository") - } - return nil, fmt.Errorf("%w: status code %d (%s)", ErrHTTP, resp.StatusCode, http.StatusText(resp.StatusCode)) - } - body, err := ioutil.ReadAll(resp.Body) - if err != nil { - return nil, err - } - return body, nil - } - if !filepath.IsAbs(path) { - path = fl.root.Join(path) - } - path, err := fl.loadRestrictor(fl.fSys, fl.root, path) - if err != nil { - return nil, err - } - return fl.fSys.ReadFile(path) -} - -// Cleanup runs the cleaner. -func (fl *fileLoader) Cleanup() error { - return fl.cleaner() -} diff --git a/src/vendor/sigs.k8s.io/kustomize/api/loader/loader.go b/src/vendor/sigs.k8s.io/kustomize/api/loader/loader.go deleted file mode 100644 index e10885b9b..000000000 --- a/src/vendor/sigs.k8s.io/kustomize/api/loader/loader.go +++ /dev/null @@ -1,35 +0,0 @@ -// Copyright 2019 The Kubernetes Authors. -// SPDX-License-Identifier: Apache-2.0 - -// Package loader has a data loading interface and various implementations. -package loader - -import ( - "sigs.k8s.io/kustomize/api/ifc" - "sigs.k8s.io/kustomize/api/internal/git" - "sigs.k8s.io/kustomize/kyaml/errors" - "sigs.k8s.io/kustomize/kyaml/filesys" -) - -// NewLoader returns a Loader pointed at the given target. -// If the target is remote, the loader will be restricted -// to the root and below only. If the target is local, the -// loader will have the restrictions passed in. Regardless, -// if a local target attempts to transitively load remote bases, -// the remote bases will all be root-only restricted. -func NewLoader( - lr LoadRestrictorFunc, - target string, fSys filesys.FileSystem) (ifc.Loader, error) { - repoSpec, err := git.NewRepoSpecFromURL(target) - if err == nil { - // The target qualifies as a remote git target. - return newLoaderAtGitClone( - repoSpec, fSys, nil, git.ClonerUsingGitExec) - } - root, err := filesys.ConfirmDir(fSys, target) - if err != nil { - return nil, errors.WrapPrefixf(err, ErrRtNotDir.Error()) - } - return newLoaderAtConfirmedDir( - lr, root, fSys, nil, git.ClonerUsingGitExec), nil -} diff --git a/src/vendor/sigs.k8s.io/kustomize/api/loader/loadrestrictions.go b/src/vendor/sigs.k8s.io/kustomize/api/loader/loadrestrictions.go deleted file mode 100644 index a016a9625..000000000 --- a/src/vendor/sigs.k8s.io/kustomize/api/loader/loadrestrictions.go +++ /dev/null @@ -1,35 +0,0 @@ -// Copyright 2019 The Kubernetes Authors. -// SPDX-License-Identifier: Apache-2.0 - -package loader - -import ( - "fmt" - - "sigs.k8s.io/kustomize/kyaml/filesys" -) - -type LoadRestrictorFunc func( - filesys.FileSystem, filesys.ConfirmedDir, string) (string, error) - -func RestrictionRootOnly( - fSys filesys.FileSystem, root filesys.ConfirmedDir, path string) (string, error) { - d, f, err := fSys.CleanedAbs(path) - if err != nil { - return "", err - } - if f == "" { - return "", fmt.Errorf("'%s' must resolve to a file", path) - } - if !d.HasPrefix(root) { - return "", fmt.Errorf( - "security; file '%s' is not in or below '%s'", - path, root) - } - return d.Join(f), nil -} - -func RestrictionNone( - _ filesys.FileSystem, _ filesys.ConfirmedDir, path string) (string, error) { - return path, nil -} diff --git a/src/vendor/sigs.k8s.io/kustomize/api/provenance/provenance.go b/src/vendor/sigs.k8s.io/kustomize/api/provenance/provenance.go deleted file mode 100644 index 4ad40395a..000000000 --- a/src/vendor/sigs.k8s.io/kustomize/api/provenance/provenance.go +++ /dev/null @@ -1,68 +0,0 @@ -// Copyright 2019 The Kubernetes Authors. -// SPDX-License-Identifier: Apache-2.0 - -package provenance - -import ( - "fmt" - "runtime" - "strings" -) - -var ( - version = "unknown" - // sha1 from git, output of $(git rev-parse HEAD) - gitCommit = "$Format:%H$" - // build date in ISO8601 format, output of $(date -u +'%Y-%m-%dT%H:%M:%SZ') - buildDate = "1970-01-01T00:00:00Z" - goos = runtime.GOOS - goarch = runtime.GOARCH -) - -// Provenance holds information about the build of an executable. -type Provenance struct { - // Version of the kustomize binary. - Version string `json:"version,omitempty"` - // GitCommit is a git commit - GitCommit string `json:"gitCommit,omitempty"` - // BuildDate is date of the build. - BuildDate string `json:"buildDate,omitempty"` - // GoOs holds OS name. - GoOs string `json:"goOs,omitempty"` - // GoArch holds architecture name. - GoArch string `json:"goArch,omitempty"` -} - -// GetProvenance returns an instance of Provenance. -func GetProvenance() Provenance { - return Provenance{ - version, - gitCommit, - buildDate, - goos, - goarch, - } -} - -// Full returns the full provenance stamp. -func (v Provenance) Full() string { - return fmt.Sprintf("%+v", v) -} - -// Short returns the shortened provenance stamp. -func (v Provenance) Short() string { - return fmt.Sprintf( - "%v", - Provenance{ - Version: v.Version, - BuildDate: v.BuildDate, - }) -} - -// Semver returns the semantic version of kustomize. -// kustomize version is set in format "kustomize/vX.X.X" in every release. -// X.X.X is a semver. If the version string is not in this format, -// return the original version string -func (v Provenance) Semver() string { - return strings.TrimPrefix(v.Version, "kustomize/") -} diff --git a/src/vendor/sigs.k8s.io/kustomize/api/provider/depprovider.go b/src/vendor/sigs.k8s.io/kustomize/api/provider/depprovider.go deleted file mode 100644 index 0102c89ce..000000000 --- a/src/vendor/sigs.k8s.io/kustomize/api/provider/depprovider.go +++ /dev/null @@ -1,42 +0,0 @@ -// Copyright 2020 The Kubernetes Authors. -// SPDX-License-Identifier: Apache-2.0 - -package provider - -import ( - "sigs.k8s.io/kustomize/api/hasher" - "sigs.k8s.io/kustomize/api/ifc" - "sigs.k8s.io/kustomize/api/internal/validate" - "sigs.k8s.io/kustomize/api/resource" -) - -// DepProvider is a dependency provider, injecting different -// implementations depending on the context. -type DepProvider struct { - resourceFactory *resource.Factory - // implemented by api/internal/validate.FieldValidator - // See TODO inside the validator for status. - // At time of writing, this is a do-nothing - // validator as it's not critical to kustomize function. - fieldValidator ifc.Validator -} - -func NewDepProvider() *DepProvider { - rf := resource.NewFactory(&hasher.Hasher{}) - return &DepProvider{ - resourceFactory: rf, - fieldValidator: validate.NewFieldValidator(), - } -} - -func NewDefaultDepProvider() *DepProvider { - return NewDepProvider() -} - -func (dp *DepProvider) GetResourceFactory() *resource.Factory { - return dp.resourceFactory -} - -func (dp *DepProvider) GetFieldValidator() ifc.Validator { - return dp.fieldValidator -} diff --git a/src/vendor/sigs.k8s.io/kustomize/api/resmap/factory.go b/src/vendor/sigs.k8s.io/kustomize/api/resmap/factory.go deleted file mode 100644 index ee6c68f10..000000000 --- a/src/vendor/sigs.k8s.io/kustomize/api/resmap/factory.go +++ /dev/null @@ -1,145 +0,0 @@ -// Copyright 2019 The Kubernetes Authors. -// SPDX-License-Identifier: Apache-2.0 - -package resmap - -import ( - "github.com/pkg/errors" - "sigs.k8s.io/kustomize/api/ifc" - "sigs.k8s.io/kustomize/api/internal/kusterr" - "sigs.k8s.io/kustomize/api/resource" - "sigs.k8s.io/kustomize/api/types" - "sigs.k8s.io/kustomize/kyaml/yaml" -) - -// Factory makes instances of ResMap. -type Factory struct { - // Makes resources. - resF *resource.Factory -} - -// NewFactory returns a new resmap.Factory. -func NewFactory(rf *resource.Factory) *Factory { - return &Factory{resF: rf} -} - -// RF returns a resource.Factory. -func (rmF *Factory) RF() *resource.Factory { - return rmF.resF -} - -func New() ResMap { - return newOne() -} - -// FromResource returns a ResMap with one entry. -func (rmF *Factory) FromResource(res *resource.Resource) ResMap { - m, err := newResMapFromResourceSlice([]*resource.Resource{res}) - if err != nil { - panic(err) - } - return m -} - -// FromResourceSlice returns a ResMap with a slice of resources. -func (rmF *Factory) FromResourceSlice(ress []*resource.Resource) ResMap { - m, err := newResMapFromResourceSlice(ress) - if err != nil { - panic(err) - } - return m -} - -// FromFile returns a ResMap given a resource path. -func (rmF *Factory) FromFile( - loader ifc.Loader, path string) (ResMap, error) { - content, err := loader.Load(path) - if err != nil { - return nil, err - } - m, err := rmF.NewResMapFromBytes(content) - if err != nil { - return nil, kusterr.Handler(err, path) - } - return m, nil -} - -// NewResMapFromBytes decodes a list of objects in byte array format. -func (rmF *Factory) NewResMapFromBytes(b []byte) (ResMap, error) { - resources, err := rmF.resF.SliceFromBytes(b) - if err != nil { - return nil, err - } - return newResMapFromResourceSlice(resources) -} - -// NewResMapFromConfigMapArgs returns a Resource slice given -// a configmap metadata slice from kustomization file. -func (rmF *Factory) NewResMapFromConfigMapArgs( - kvLdr ifc.KvLoader, argList []types.ConfigMapArgs) (ResMap, error) { - var resources []*resource.Resource - for i := range argList { - res, err := rmF.resF.MakeConfigMap(kvLdr, &argList[i]) - if err != nil { - return nil, errors.Wrap(err, "NewResMapFromConfigMapArgs") - } - resources = append(resources, res) - } - return newResMapFromResourceSlice(resources) -} - -// FromConfigMapArgs creates a new ResMap containing one ConfigMap. -func (rmF *Factory) FromConfigMapArgs( - kvLdr ifc.KvLoader, args types.ConfigMapArgs) (ResMap, error) { - res, err := rmF.resF.MakeConfigMap(kvLdr, &args) - if err != nil { - return nil, err - } - return rmF.FromResource(res), nil -} - -// NewResMapFromSecretArgs takes a SecretArgs slice, generates -// secrets from each entry, and accumulates them in a ResMap. -func (rmF *Factory) NewResMapFromSecretArgs( - kvLdr ifc.KvLoader, argsList []types.SecretArgs) (ResMap, error) { - var resources []*resource.Resource - for i := range argsList { - res, err := rmF.resF.MakeSecret(kvLdr, &argsList[i]) - if err != nil { - return nil, errors.Wrap(err, "NewResMapFromSecretArgs") - } - resources = append(resources, res) - } - return newResMapFromResourceSlice(resources) -} - -// FromSecretArgs creates a new ResMap containing one secret. -func (rmF *Factory) FromSecretArgs( - kvLdr ifc.KvLoader, args types.SecretArgs) (ResMap, error) { - res, err := rmF.resF.MakeSecret(kvLdr, &args) - if err != nil { - return nil, err - } - return rmF.FromResource(res), nil -} - -func newResMapFromResourceSlice( - resources []*resource.Resource) (ResMap, error) { - result := New() - for _, res := range resources { - err := result.Append(res) - if err != nil { - return nil, err - } - } - return result, nil -} - -// NewResMapFromRNodeSlice returns a ResMap from a slice of RNodes -func (rmF *Factory) NewResMapFromRNodeSlice(s []*yaml.RNode) (ResMap, error) { - rs, err := rmF.resF.ResourcesFromRNodes(s) - if err != nil { - return nil, err - } - return newResMapFromResourceSlice(rs) -} diff --git a/src/vendor/sigs.k8s.io/kustomize/api/resmap/idslice.go b/src/vendor/sigs.k8s.io/kustomize/api/resmap/idslice.go deleted file mode 100644 index 8c25cbb2a..000000000 --- a/src/vendor/sigs.k8s.io/kustomize/api/resmap/idslice.go +++ /dev/null @@ -1,37 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package resmap - -import ( - "sort" - - "sigs.k8s.io/kustomize/kyaml/resid" -) - -// IdSlice implements the sort interface. -type IdSlice []resid.ResId - -var _ sort.Interface = IdSlice{} - -func (a IdSlice) Len() int { return len(a) } -func (a IdSlice) Swap(i, j int) { a[i], a[j] = a[j], a[i] } -func (a IdSlice) Less(i, j int) bool { - if !a[i].Gvk.Equals(a[j].Gvk) { - return a[i].Gvk.IsLessThan(a[j].Gvk) - } - return a[i].LegacySortString() < a[j].LegacySortString() -} diff --git a/src/vendor/sigs.k8s.io/kustomize/api/resmap/resmap.go b/src/vendor/sigs.k8s.io/kustomize/api/resmap/resmap.go deleted file mode 100644 index ea913ba6b..000000000 --- a/src/vendor/sigs.k8s.io/kustomize/api/resmap/resmap.go +++ /dev/null @@ -1,333 +0,0 @@ -// Copyright 2019 The Kubernetes Authors. -// SPDX-License-Identifier: Apache-2.0 - -// Package resmap implements a map from ResId to Resource that -// tracks all resources in a kustomization. -package resmap - -import ( - "sigs.k8s.io/kustomize/api/ifc" - "sigs.k8s.io/kustomize/api/resource" - "sigs.k8s.io/kustomize/api/types" - "sigs.k8s.io/kustomize/kyaml/kio" - "sigs.k8s.io/kustomize/kyaml/resid" - "sigs.k8s.io/kustomize/kyaml/yaml" -) - -// A Transformer modifies an instance of ResMap. -type Transformer interface { - // Transform modifies data in the argument, - // e.g. adding labels to resources that can be labelled. - Transform(m ResMap) error -} - -// A TransformerWithProperties contains a Transformer and stores -// some of its properties -type TransformerWithProperties struct { - Transformer - Origin *resource.Origin -} - -// A Generator creates an instance of ResMap. -type Generator interface { - Generate() (ResMap, error) -} - -// A GeneratorWithProperties contains a Generator and stores -// some of its properties -type GeneratorWithProperties struct { - Generator - Origin *resource.Origin -} - -// Something that's configurable accepts an -// instance of PluginHelpers and a raw config -// object (YAML in []byte form). -type Configurable interface { - Config(h *PluginHelpers, config []byte) error -} - -// NewPluginHelpers makes an instance of PluginHelpers. -func NewPluginHelpers( - ldr ifc.Loader, v ifc.Validator, rf *Factory, - pc *types.PluginConfig) *PluginHelpers { - return &PluginHelpers{ldr: ldr, v: v, rf: rf, pc: pc} -} - -// PluginHelpers holds things that any or all plugins might need. -// This should be available to each plugin, in addition to -// any plugin-specific configuration. -type PluginHelpers struct { - ldr ifc.Loader - v ifc.Validator - rf *Factory - pc *types.PluginConfig -} - -func (c *PluginHelpers) GeneralConfig() *types.PluginConfig { - return c.pc -} - -func (c *PluginHelpers) Loader() ifc.Loader { - return c.ldr -} - -func (c *PluginHelpers) ResmapFactory() *Factory { - return c.rf -} - -func (c *PluginHelpers) Validator() ifc.Validator { - return c.v -} - -type GeneratorPlugin interface { - Generator - Configurable -} - -type TransformerPlugin interface { - Transformer - Configurable -} - -// ResMap is an interface describing operations on the -// core kustomize data structure, a list of Resources. -// -// Every Resource has two ResIds: OrgId and CurId. -// -// In a ResMap, no two resources may have the same CurId, -// but they may have the same OrgId. The latter can happen -// when mixing two or more different overlays apply different -// transformations to a common base. When looking for a -// resource to transform, try the OrgId first, and if this -// fails or finds too many, it might make sense to then try -// the CurrId. Depends on the situation. -// -// TODO: get rid of this interface (use bare resWrangler). -// There aren't multiple implementations any more. -type ResMap interface { - // Size reports the number of resources. - Size() int - - // Resources provides a discardable slice - // of resource pointers, returned in the order - // as appended. - Resources() []*resource.Resource - - // Append adds a Resource. Error on CurId collision. - // - // A class invariant of ResMap is that all of its - // resources must differ in their value of - // CurId(), aka current Id. The Id is the tuple - // of {namespace, group, version, kind, name} - // (see ResId). - // - // This invariant reflects the invariant of a - // kubernetes cluster, where if one tries to add - // a resource to the cluster whose Id matches - // that of a resource already in the cluster, - // only two outcomes are allowed. Either the - // incoming resource is _merged_ into the existing - // one, or the incoming resource is rejected. - // One cannot end up with two resources - // in the cluster with the same Id. - Append(*resource.Resource) error - - // AppendAll appends another ResMap to self, - // failing on any CurId collision. - AppendAll(ResMap) error - - // AbsorbAll appends, replaces or merges the contents - // of another ResMap into self, - // allowing and sometimes demanding ID collisions. - // A collision would be demanded, say, when a generated - // ConfigMap has the "replace" option in its generation - // instructions, meaning it _must_ replace - // something in the known set of resources. - // If a resource id for resource X is found to already - // be in self, then the behavior field for X must - // be BehaviorMerge or BehaviorReplace. If X is not in - // self, then its behavior _cannot_ be merge or replace. - AbsorbAll(ResMap) error - - // AddOriginAnnotation will add the provided origin as - // an origin annotation to all resources in the ResMap, if - // the origin is not nil. - AddOriginAnnotation(origin *resource.Origin) error - - // RemoveOriginAnnotation will remove the origin annotation - // from all resources in the ResMap - RemoveOriginAnnotations() error - - // AddTransformerAnnotation will add the provided origin as - // an origin annotation if the resource doesn't have one; a - // transformer annotation otherwise; to all resources in - // ResMap - AddTransformerAnnotation(origin *resource.Origin) error - - // RemoveTransformerAnnotation will remove the transformer annotation - // from all resources in the ResMap - RemoveTransformerAnnotations() error - - // AnnotateAll annotates all resources in the ResMap with - // the provided key value pair. - AnnotateAll(key string, value string) error - - // AsYaml returns the yaml form of resources. - AsYaml() ([]byte, error) - - // GetByIndex returns a resource at the given index, - // nil if out of range. - GetByIndex(int) *resource.Resource - - // GetIndexOfCurrentId returns the index of the resource - // with the given CurId. - // Returns error if there is more than one match. - // Returns (-1, nil) if there is no match. - GetIndexOfCurrentId(id resid.ResId) (int, error) - - // GetMatchingResourcesByCurrentId returns the resources - // who's CurId is matched by the argument. - GetMatchingResourcesByCurrentId(matches IdMatcher) []*resource.Resource - - // GetMatchingResourcesByAnyId returns the resources - // who's current or previous IDs is matched by the argument. - GetMatchingResourcesByAnyId(matches IdMatcher) []*resource.Resource - - // GetByCurrentId is shorthand for calling - // GetMatchingResourcesByCurrentId with a matcher requiring - // an exact match, returning an error on multiple or no matches. - GetByCurrentId(resid.ResId) (*resource.Resource, error) - - // GetById is shorthand for calling - // GetMatchingResourcesByAnyId with a matcher requiring - // an exact match, returning an error on multiple or no matches. - GetById(resid.ResId) (*resource.Resource, error) - - // GroupedByCurrentNamespace returns a map of namespace - // to a slice of *Resource in that namespace. - // Cluster-scoped Resources are not included (see ClusterScoped). - // Resources with an empty namespace are placed - // in the resid.DefaultNamespace entry. - GroupedByCurrentNamespace() map[string][]*resource.Resource - - // GroupedByOriginalNamespace performs as GroupByNamespace - // but use the original namespace instead of the current - // one to perform the grouping. - GroupedByOriginalNamespace() map[string][]*resource.Resource - - // ClusterScoped returns a slice of resources that - // cannot be placed in a namespace, e.g. - // Node, ClusterRole, Namespace itself, etc. - ClusterScoped() []*resource.Resource - - // AllIds returns all CurrentIds. - AllIds() []resid.ResId - - // Replace replaces the resource with the matching CurId. - // Error if there's no match or more than one match. - // Returns the index where the replacement happened. - Replace(*resource.Resource) (int, error) - - // Remove removes the resource whose CurId matches the argument. - // Error if not found. - Remove(resid.ResId) error - - // Clear removes all resources and Ids. - Clear() - - // DropEmpties drops empty resources from the ResMap. - DropEmpties() - - // SubsetThatCouldBeReferencedByResource returns a ResMap subset - // of self with resources that could be referenced by the - // resource argument. - // This is a filter; it excludes things that cannot be - // referenced by the resource, e.g. objects in other - // namespaces. Cluster wide objects are never excluded. - SubsetThatCouldBeReferencedByResource(*resource.Resource) (ResMap, error) - - // DeAnchor replaces YAML aliases with structured data copied from anchors. - // This cannot be undone; if desired, call DeepCopy first. - // Subsequent marshalling to YAML will no longer have anchor - // definitions ('&') or aliases ('*'). - // - // Anchors are not expected to work across YAML 'documents'. - // If three resources are loaded from one file containing three YAML docs: - // - // {resourceA} - // --- - // {resourceB} - // --- - // {resourceC} - // - // then anchors defined in A cannot be seen from B and C and vice versa. - // OTOH, cross-resource links (a field in B referencing fields in A) will - // work if the resources are gathered in a ResourceList: - // - // apiVersion: config.kubernetes.io/v1 - // kind: ResourceList - // metadata: - // name: someList - // items: - // - {resourceA} - // - {resourceB} - // - {resourceC} - // - DeAnchor() error - - // DeepCopy copies the ResMap and underlying resources. - DeepCopy() ResMap - - // ShallowCopy copies the ResMap but - // not the underlying resources. - ShallowCopy() ResMap - - // ErrorIfNotEqualSets returns an error if the - // argument doesn't have the same resources as self. - // Ordering is _not_ taken into account, - // as this function was solely used in tests written - // before internal resource order was maintained, - // and those tests are initialized with maps which - // by definition have random ordering, and will - // fail spuriously. - // TODO: modify tests to not use resmap.FromMap, - // TODO: - and replace this with a stricter equals. - ErrorIfNotEqualSets(ResMap) error - - // ErrorIfNotEqualLists returns an error if the - // argument doesn't have the resource objects - // data as self, in the same order. - // Meta information is ignored; this is similar - // to comparing the AsYaml() strings, but allows - // for more informed errors on not equals. - ErrorIfNotEqualLists(ResMap) error - - // Debug prints the ResMap. - Debug(title string) - - // Select returns a list of resources that - // are selected by a Selector - Select(types.Selector) ([]*resource.Resource, error) - - // ToRNodeSlice returns a copy of the resources as RNodes. - ToRNodeSlice() []*yaml.RNode - - // ApplySmPatch applies a strategic-merge patch to the - // selected set of resources. - ApplySmPatch( - selectedSet *resource.IdSet, patch *resource.Resource) error - - // RemoveBuildAnnotations removes annotations created by the build process. - RemoveBuildAnnotations() - - // ApplyFilter applies an RNode filter to all Resources in the ResMap. - // TODO: Send/recover ancillary Resource data to/from subprocesses. - // Assure that the ancillary data in Resource (everything not in the RNode) - // is sent to and re-captured from transformer subprocess (as the process - // might edit that information). One way to do this would be to solely use - // RNode metadata annotation reading and writing instead of using Resource - // struct data members, i.e. the Resource struct is replaced by RNode - // and use of (slow) k8s metadata annotations inside the RNode. - ApplyFilter(f kio.Filter) error -} diff --git a/src/vendor/sigs.k8s.io/kustomize/api/resmap/reswrangler.go b/src/vendor/sigs.k8s.io/kustomize/api/resmap/reswrangler.go deleted file mode 100644 index e80299448..000000000 --- a/src/vendor/sigs.k8s.io/kustomize/api/resmap/reswrangler.go +++ /dev/null @@ -1,764 +0,0 @@ -// Copyright 2019 The Kubernetes Authors. -// SPDX-License-Identifier: Apache-2.0 - -package resmap - -import ( - "bytes" - "fmt" - "reflect" - - "github.com/pkg/errors" - "sigs.k8s.io/kustomize/api/filters/annotations" - "sigs.k8s.io/kustomize/api/resource" - "sigs.k8s.io/kustomize/api/types" - "sigs.k8s.io/kustomize/kyaml/kio" - "sigs.k8s.io/kustomize/kyaml/resid" - kyaml "sigs.k8s.io/kustomize/kyaml/yaml" -) - -// resWrangler implements ResMap. -type resWrangler struct { - // Resource list maintained in load (append) order. - // This is important for transformers, which must - // be performed in a specific order, and for users - // who for whatever reasons wish the order they - // specify in kustomizations to be maintained and - // available as an option for final YAML rendering. - rList []*resource.Resource -} - -func newOne() *resWrangler { - result := &resWrangler{} - result.Clear() - return result -} - -// Clear implements ResMap. -func (m *resWrangler) Clear() { - m.rList = nil -} - -// DropEmpties quickly drops empty resources. -// It doesn't use Append, which checks for Id collisions. -func (m *resWrangler) DropEmpties() { - var rList []*resource.Resource - for _, r := range m.rList { - if !r.IsNilOrEmpty() { - rList = append(rList, r) - } - } - m.rList = rList -} - -// Size implements ResMap. -func (m *resWrangler) Size() int { - return len(m.rList) -} - -func (m *resWrangler) indexOfResource(other *resource.Resource) int { - for i, r := range m.rList { - if r == other { - return i - } - } - return -1 -} - -// Resources implements ResMap. -func (m *resWrangler) Resources() []*resource.Resource { - tmp := make([]*resource.Resource, len(m.rList)) - copy(tmp, m.rList) - return tmp -} - -// Append implements ResMap. -func (m *resWrangler) Append(res *resource.Resource) error { - id := res.CurId() - if r := m.GetMatchingResourcesByCurrentId(id.Equals); len(r) > 0 { - return fmt.Errorf( - "may not add resource with an already registered id: %s", id) - } - m.append(res) - return nil -} - -// append appends without performing an Id check -func (m *resWrangler) append(res *resource.Resource) { - m.rList = append(m.rList, res) -} - -// Remove implements ResMap. -func (m *resWrangler) Remove(adios resid.ResId) error { - var rList []*resource.Resource - for _, r := range m.rList { - if r.CurId() != adios { - rList = append(rList, r) - } - } - if len(rList) != m.Size()-1 { - return fmt.Errorf("id %s not found in removal", adios) - } - m.rList = rList - return nil -} - -// Replace implements ResMap. -func (m *resWrangler) Replace(res *resource.Resource) (int, error) { - id := res.CurId() - i, err := m.GetIndexOfCurrentId(id) - if err != nil { - return -1, errors.Wrap(err, "in Replace") - } - if i < 0 { - return -1, fmt.Errorf("cannot find resource with id %s to replace", id) - } - m.rList[i] = res - return i, nil -} - -// AllIds implements ResMap. -func (m *resWrangler) AllIds() (ids []resid.ResId) { - ids = make([]resid.ResId, m.Size()) - for i, r := range m.rList { - ids[i] = r.CurId() - } - return -} - -// Debug implements ResMap. -func (m *resWrangler) Debug(title string) { - fmt.Println("--------------------------- " + title) - firstObj := true - for i, r := range m.rList { - if firstObj { - firstObj = false - } else { - fmt.Println("---") - } - fmt.Printf("# %d %s\n%s\n", i, r.OrgId(), r.String()) - } -} - -type IdMatcher func(resid.ResId) bool - -// GetByIndex implements ResMap. -func (m *resWrangler) GetByIndex(i int) *resource.Resource { - if i < 0 || i >= m.Size() { - return nil - } - return m.rList[i] -} - -// GetIndexOfCurrentId implements ResMap. -func (m *resWrangler) GetIndexOfCurrentId(id resid.ResId) (int, error) { - count := 0 - result := -1 - for i, r := range m.rList { - if id.Equals(r.CurId()) { - count++ - result = i - } - } - if count > 1 { - return -1, fmt.Errorf("id matched %d resources", count) - } - return result, nil -} - -type IdFromResource func(r *resource.Resource) resid.ResId - -func GetCurrentId(r *resource.Resource) resid.ResId { return r.CurId() } - -// GetMatchingResourcesByCurrentId implements ResMap. -func (m *resWrangler) GetMatchingResourcesByCurrentId( - matches IdMatcher) []*resource.Resource { - return m.filteredById(matches, GetCurrentId) -} - -// GetMatchingResourcesByAnyId implements ResMap. -func (m *resWrangler) GetMatchingResourcesByAnyId( - matches IdMatcher) []*resource.Resource { - var result []*resource.Resource - for _, r := range m.rList { - for _, id := range append(r.PrevIds(), r.CurId()) { - if matches(id) { - result = append(result, r) - break - } - } - } - return result -} - -func (m *resWrangler) filteredById( - matches IdMatcher, idGetter IdFromResource) []*resource.Resource { - var result []*resource.Resource - for _, r := range m.rList { - if matches(idGetter(r)) { - result = append(result, r) - } - } - return result -} - -// GetByCurrentId implements ResMap. -func (m *resWrangler) GetByCurrentId( - id resid.ResId) (*resource.Resource, error) { - return demandOneMatch(m.GetMatchingResourcesByCurrentId, id, "Current") -} - -// GetById implements ResMap. -func (m *resWrangler) GetById( - id resid.ResId) (*resource.Resource, error) { - r, err := demandOneMatch(m.GetMatchingResourcesByAnyId, id, "Id") - if err != nil { - return nil, fmt.Errorf( - "%s; failed to find unique target for patch %s", - err.Error(), id.String()) - } - return r, nil -} - -type resFinder func(IdMatcher) []*resource.Resource - -func demandOneMatch( - f resFinder, id resid.ResId, s string) (*resource.Resource, error) { - r := f(id.Equals) - if len(r) == 1 { - return r[0], nil - } - if len(r) > 1 { - return nil, fmt.Errorf("multiple matches for %s %s", s, id) - } - return nil, fmt.Errorf("no matches for %s %s", s, id) -} - -// GroupedByCurrentNamespace implements ResMap. -func (m *resWrangler) GroupedByCurrentNamespace() map[string][]*resource.Resource { - items := m.groupedByCurrentNamespace() - delete(items, resid.TotallyNotANamespace) - return items -} - -// ClusterScoped implements ResMap. -func (m *resWrangler) ClusterScoped() []*resource.Resource { - return m.groupedByCurrentNamespace()[resid.TotallyNotANamespace] -} - -func (m *resWrangler) groupedByCurrentNamespace() map[string][]*resource.Resource { - byNamespace := make(map[string][]*resource.Resource) - for _, res := range m.rList { - namespace := res.CurId().EffectiveNamespace() - if _, found := byNamespace[namespace]; !found { - byNamespace[namespace] = []*resource.Resource{} - } - byNamespace[namespace] = append(byNamespace[namespace], res) - } - return byNamespace -} - -// GroupedByOriginalNamespace implements ResMap. -func (m *resWrangler) GroupedByOriginalNamespace() map[string][]*resource.Resource { - items := m.groupedByOriginalNamespace() - delete(items, resid.TotallyNotANamespace) - return items -} - -func (m *resWrangler) groupedByOriginalNamespace() map[string][]*resource.Resource { - byNamespace := make(map[string][]*resource.Resource) - for _, res := range m.rList { - namespace := res.OrgId().EffectiveNamespace() - if _, found := byNamespace[namespace]; !found { - byNamespace[namespace] = []*resource.Resource{} - } - byNamespace[namespace] = append(byNamespace[namespace], res) - } - return byNamespace -} - -// AsYaml implements ResMap. -func (m *resWrangler) AsYaml() ([]byte, error) { - firstObj := true - var b []byte - buf := bytes.NewBuffer(b) - for _, res := range m.rList { - out, err := res.AsYAML() - if err != nil { - m, _ := res.Map() - return nil, errors.Wrapf(err, "%#v", m) - } - if firstObj { - firstObj = false - } else { - if _, err = buf.WriteString("---\n"); err != nil { - return nil, err - } - } - if _, err = buf.Write(out); err != nil { - return nil, err - } - } - return buf.Bytes(), nil -} - -// ErrorIfNotEqualSets implements ResMap. -func (m *resWrangler) ErrorIfNotEqualSets(other ResMap) error { - m2, ok := other.(*resWrangler) - if !ok { - return fmt.Errorf("bad cast to resWrangler 1") - } - if m.Size() != m2.Size() { - return fmt.Errorf( - "lists have different number of entries: %#v doesn't equal %#v", - m.rList, m2.rList) - } - seen := make(map[int]bool) - for _, r1 := range m.rList { - id := r1.CurId() - others := m2.GetMatchingResourcesByCurrentId(id.Equals) - if len(others) == 0 { - return fmt.Errorf( - "id in self missing from other; id: %s", id) - } - if len(others) > 1 { - return fmt.Errorf( - "id in self matches %d in other; id: %s", len(others), id) - } - r2 := others[0] - if !reflect.DeepEqual(r1.RNode, r2.RNode) { - return fmt.Errorf( - "nodes unequal: \n -- %s,\n -- %s\n\n--\n%#v\n------\n%#v\n", - r1, r2, r1, r2) - } - seen[m2.indexOfResource(r2)] = true - } - if len(seen) != m.Size() { - return fmt.Errorf("counting problem %d != %d", len(seen), m.Size()) - } - return nil -} - -// ErrorIfNotEqualLists implements ResMap. -func (m *resWrangler) ErrorIfNotEqualLists(other ResMap) error { - m2, ok := other.(*resWrangler) - if !ok { - return fmt.Errorf("bad cast to resWrangler 2") - } - if m.Size() != m2.Size() { - return fmt.Errorf( - "lists have different number of entries: %#v doesn't equal %#v", - m.rList, m2.rList) - } - for i, r1 := range m.rList { - r2 := m2.rList[i] - if err := r1.ErrIfNotEquals(r2); err != nil { - return err - } - } - return nil -} - -type resCopier func(r *resource.Resource) *resource.Resource - -// ShallowCopy implements ResMap. -func (m *resWrangler) ShallowCopy() ResMap { - return m.makeCopy( - func(r *resource.Resource) *resource.Resource { - return r - }) -} - -// DeepCopy implements ResMap. -func (m *resWrangler) DeepCopy() ResMap { - return m.makeCopy( - func(r *resource.Resource) *resource.Resource { - return r.DeepCopy() - }) -} - -// makeCopy copies the ResMap. -func (m *resWrangler) makeCopy(copier resCopier) ResMap { - result := &resWrangler{} - result.rList = make([]*resource.Resource, m.Size()) - for i, r := range m.rList { - result.rList[i] = copier(r) - } - return result -} - -// SubsetThatCouldBeReferencedByResource implements ResMap. -func (m *resWrangler) SubsetThatCouldBeReferencedByResource( - referrer *resource.Resource) (ResMap, error) { - referrerId := referrer.CurId() - if referrerId.IsClusterScoped() { - // A cluster scoped resource can refer to anything. - return m, nil - } - result := newOne() - roleBindingNamespaces, err := getNamespacesForRoleBinding(referrer) - if err != nil { - return nil, err - } - for _, possibleTarget := range m.rList { - id := possibleTarget.CurId() - if id.IsClusterScoped() { - // A cluster-scoped resource can be referred to by anything. - result.append(possibleTarget) - continue - } - if id.IsNsEquals(referrerId) { - // The two objects are in the same namespace. - result.append(possibleTarget) - continue - } - // The two objects are namespaced (not cluster-scoped), AND - // are in different namespaces. - // There's still a chance they can refer to each other. - if roleBindingNamespaces[possibleTarget.GetNamespace()] { - result.append(possibleTarget) - } - } - return result, nil -} - -// getNamespacesForRoleBinding returns referenced ServiceAccount namespaces -// if the resource is a RoleBinding -func getNamespacesForRoleBinding(r *resource.Resource) (map[string]bool, error) { - result := make(map[string]bool) - if r.GetKind() != "RoleBinding" { - return result, nil - } - subjects, err := r.GetSlice("subjects") - if err != nil || subjects == nil { - return result, nil - } - for _, s := range subjects { - subject := s.(map[string]interface{}) - if ns, ok1 := subject["namespace"]; ok1 { - if kind, ok2 := subject["kind"]; ok2 { - if kind.(string) == "ServiceAccount" { - if n, ok3 := ns.(string); ok3 { - result[n] = true - } else { - return nil, errors.Errorf("Invalid Input: namespace is blank for resource %q\n", r.CurId()) - } - } - } - } - } - return result, nil -} - -// AppendAll implements ResMap. -func (m *resWrangler) AppendAll(other ResMap) error { - if other == nil { - return nil - } - m2, ok := other.(*resWrangler) - if !ok { - return fmt.Errorf("bad cast to resWrangler 3") - } - return m.appendAll(m2.rList) -} - -// appendAll appends all the resources, error on Id collision. -func (m *resWrangler) appendAll(list []*resource.Resource) error { - for _, res := range list { - if err := m.Append(res); err != nil { - return err - } - } - return nil -} - -// AbsorbAll implements ResMap. -func (m *resWrangler) AbsorbAll(other ResMap) error { - if other == nil { - return nil - } - m2, ok := other.(*resWrangler) - if !ok { - return fmt.Errorf("bad cast to resWrangler 4") - } - for _, r := range m2.rList { - err := m.appendReplaceOrMerge(r) - if err != nil { - return err - } - } - return nil -} - -// AddOriginAnnotation implements ResMap. -func (m *resWrangler) AddOriginAnnotation(origin *resource.Origin) error { - if origin == nil { - return nil - } - for _, res := range m.rList { - or, err := res.GetOrigin() - if or != nil || err != nil { - // if any resources already have an origin annotation, - // skip it - continue - } - if err := res.SetOrigin(origin); err != nil { - return err - } - } - return nil -} - -// RemoveOriginAnnotation implements ResMap -func (m *resWrangler) RemoveOriginAnnotations() error { - for _, res := range m.rList { - if err := res.SetOrigin(nil); err != nil { - return err - } - } - return nil -} - -// AddTransformerAnnotation implements ResMap -func (m *resWrangler) AddTransformerAnnotation(origin *resource.Origin) error { - for _, res := range m.rList { - or, err := res.GetOrigin() - if err != nil { - return err - } - if or == nil { - // the resource does not have an origin annotation, so - // we assume that the transformer generated the resource - // rather than modifying it - err = res.SetOrigin(origin) - } else { - // the resource already has an origin annotation, so we - // record the provided origin as a transformation - err = res.AddTransformation(origin) - } - if err != nil { - return err - } - } - return nil -} - -// RemoveTransformerAnnotations implements ResMap -func (m *resWrangler) RemoveTransformerAnnotations() error { - for _, res := range m.rList { - if err := res.ClearTransformations(); err != nil { - return err - } - } - return nil -} - -func (m *resWrangler) appendReplaceOrMerge(res *resource.Resource) error { - id := res.CurId() - matches := m.GetMatchingResourcesByAnyId(id.Equals) - switch len(matches) { - case 0: - switch res.Behavior() { - case types.BehaviorMerge, types.BehaviorReplace: - return fmt.Errorf( - "id %#v does not exist; cannot merge or replace", id) - default: - // presumably types.BehaviorCreate - return m.Append(res) - } - case 1: - old := matches[0] - if old == nil { - return fmt.Errorf("id lookup failure") - } - index := m.indexOfResource(old) - if index < 0 { - return fmt.Errorf("indexing problem") - } - switch res.Behavior() { - case types.BehaviorReplace: - res.CopyMergeMetaDataFieldsFrom(old) - case types.BehaviorMerge: - // ensure the origin annotation doesn't get overwritten - orig, err := old.GetOrigin() - if err != nil { - return err - } - res.CopyMergeMetaDataFieldsFrom(old) - res.MergeDataMapFrom(old) - res.MergeBinaryDataMapFrom(old) - if orig != nil { - res.SetOrigin(orig) - } - - default: - return fmt.Errorf( - "id %#v exists; behavior must be merge or replace", id) - } - i, err := m.Replace(res) - if err != nil { - return err - } - if i != index { - return fmt.Errorf("unexpected target index in replacement") - } - return nil - default: - return fmt.Errorf( - "found multiple objects %v that could accept merge of %v", - matches, id) - } -} - -// AnnotateAll implements ResMap -func (m *resWrangler) AnnotateAll(key string, value string) error { - return m.ApplyFilter(annotations.Filter{ - Annotations: map[string]string{ - key: value, - }, - FsSlice: []types.FieldSpec{{ - Path: "metadata/annotations", - CreateIfNotPresent: true, - }}, - }) -} - -// Select returns a list of resources that -// are selected by a Selector -func (m *resWrangler) Select(s types.Selector) ([]*resource.Resource, error) { - var result []*resource.Resource - sr, err := types.NewSelectorRegex(&s) - if err != nil { - return nil, err - } - for _, r := range m.rList { - curId := r.CurId() - orgId := r.OrgId() - - // It first tries to match with the original namespace - // then matches with the current namespace - if !sr.MatchNamespace(orgId.EffectiveNamespace()) && - !sr.MatchNamespace(curId.EffectiveNamespace()) { - continue - } - - // It first tries to match with the original name - // then matches with the current name - if !sr.MatchName(orgId.Name) && - !sr.MatchName(curId.Name) { - continue - } - - // matches the GVK - if !sr.MatchGvk(r.GetGvk()) { - continue - } - - // matches the label selector - matched, err := r.MatchesLabelSelector(s.LabelSelector) - if err != nil { - return nil, err - } - if !matched { - continue - } - - // matches the annotation selector - matched, err = r.MatchesAnnotationSelector(s.AnnotationSelector) - if err != nil { - return nil, err - } - if !matched { - continue - } - result = append(result, r) - } - return result, nil -} - -// ToRNodeSlice returns a copy of the resources as RNodes. -func (m *resWrangler) ToRNodeSlice() []*kyaml.RNode { - result := make([]*kyaml.RNode, len(m.rList)) - for i := range m.rList { - result[i] = m.rList[i].Copy() - } - return result -} - -// DeAnchor implements ResMap. -func (m *resWrangler) DeAnchor() (err error) { - for i := range m.rList { - if err = m.rList[i].DeAnchor(); err != nil { - return err - } - } - return nil -} - -// ApplySmPatch applies the patch, and errors on Id collisions. -func (m *resWrangler) ApplySmPatch( - selectedSet *resource.IdSet, patch *resource.Resource) error { - var list []*resource.Resource - for _, res := range m.rList { - if selectedSet.Contains(res.CurId()) { - patchCopy := patch.DeepCopy() - patchCopy.CopyMergeMetaDataFieldsFrom(patch) - patchCopy.SetGvk(res.GetGvk()) - patchCopy.SetKind(patch.GetKind()) - if err := res.ApplySmPatch(patchCopy); err != nil { - return err - } - } - if !res.IsNilOrEmpty() { - list = append(list, res) - } - } - m.Clear() - return m.appendAll(list) -} - -func (m *resWrangler) RemoveBuildAnnotations() { - for _, r := range m.rList { - r.RemoveBuildAnnotations() - } -} - -// ApplyFilter implements ResMap. -func (m *resWrangler) ApplyFilter(f kio.Filter) error { - reverseLookup := make(map[*kyaml.RNode]*resource.Resource, len(m.rList)) - nodes := make([]*kyaml.RNode, len(m.rList)) - for i, r := range m.rList { - ptr := &(r.RNode) - nodes[i] = ptr - reverseLookup[ptr] = r - } - // The filter can modify nodes, but also delete and create them. - // The filtered list might be smaller or larger than the nodes list. - filtered, err := f.Filter(nodes) - if err != nil { - return err - } - // Rebuild the resmap from the filtered RNodes. - var nRList []*resource.Resource - for _, rn := range filtered { - if rn.IsNilOrEmpty() { - // A node might make it through the filter as an object, - // but still be empty. Drop such entries. - continue - } - res, ok := reverseLookup[rn] - if !ok { - // A node was created; make a Resource to wrap it. - res = &resource.Resource{ - RNode: *rn, - // Leave remaining fields empty. - // At at time of writing, seeking to eliminate those fields. - // Alternatively, could just return error on creation attempt - // until remaining fields eliminated. - } - } - nRList = append(nRList, res) - } - m.rList = nRList - return nil -} diff --git a/src/vendor/sigs.k8s.io/kustomize/api/resource/doc.go b/src/vendor/sigs.k8s.io/kustomize/api/resource/doc.go deleted file mode 100644 index 32d34b162..000000000 --- a/src/vendor/sigs.k8s.io/kustomize/api/resource/doc.go +++ /dev/null @@ -1,5 +0,0 @@ -// Copyright 2020 The Kubernetes Authors. -// SPDX-License-Identifier: Apache-2.0 - -// Package resource implements representations of k8s API resources. -package resource diff --git a/src/vendor/sigs.k8s.io/kustomize/api/resource/factory.go b/src/vendor/sigs.k8s.io/kustomize/api/resource/factory.go deleted file mode 100644 index cbda87237..000000000 --- a/src/vendor/sigs.k8s.io/kustomize/api/resource/factory.go +++ /dev/null @@ -1,293 +0,0 @@ -// Copyright 2019 The Kubernetes Authors. -// SPDX-License-Identifier: Apache-2.0 - -package resource - -import ( - "encoding/json" - "fmt" - "log" - "strings" - - "sigs.k8s.io/kustomize/api/ifc" - "sigs.k8s.io/kustomize/api/internal/generators" - "sigs.k8s.io/kustomize/api/internal/kusterr" - "sigs.k8s.io/kustomize/api/konfig" - "sigs.k8s.io/kustomize/api/types" - "sigs.k8s.io/kustomize/kyaml/kio" - "sigs.k8s.io/kustomize/kyaml/resid" - "sigs.k8s.io/kustomize/kyaml/yaml" -) - -// Factory makes instances of Resource. -type Factory struct { - hasher ifc.KustHasher - - // When set to true, IncludeLocalConfigs indicates - // that Factory should include resources with the - // annotation 'config.kubernetes.io/local-config'. - // By default these resources are ignored. - IncludeLocalConfigs bool -} - -// NewFactory makes an instance of Factory. -func NewFactory(h ifc.KustHasher) *Factory { - return &Factory{hasher: h} -} - -// Hasher returns an ifc.KustHasher -func (rf *Factory) Hasher() ifc.KustHasher { - return rf.hasher -} - -// FromMap returns a new instance of Resource. -func (rf *Factory) FromMap(m map[string]interface{}) *Resource { - return rf.FromMapAndOption(m, nil) -} - -// FromMapWithName returns a new instance with the given "original" name. -func (rf *Factory) FromMapWithName(n string, m map[string]interface{}) *Resource { - return rf.FromMapWithNamespaceAndName(resid.DefaultNamespace, n, m) -} - -// FromMapWithNamespaceAndName returns a new instance with the given "original" namespace. -func (rf *Factory) FromMapWithNamespaceAndName(ns string, n string, m map[string]interface{}) *Resource { - r := rf.FromMapAndOption(m, nil) - return r.setPreviousId(ns, n, r.GetKind()) -} - -// FromMapAndOption returns a new instance of Resource with given options. -func (rf *Factory) FromMapAndOption( - m map[string]interface{}, args *types.GeneratorArgs) *Resource { - n, err := yaml.FromMap(m) - if err != nil { - // TODO: return err instead of log. - log.Fatal(err) - } - return rf.makeOne(n, args) -} - -// makeOne returns a new instance of Resource. -func (rf *Factory) makeOne(rn *yaml.RNode, o *types.GeneratorArgs) *Resource { - if rn == nil { - log.Fatal("RNode must not be null") - } - resource := &Resource{RNode: *rn} - if o != nil { - if o.Options == nil || !o.Options.DisableNameSuffixHash { - resource.EnableHashSuffix() - } - resource.SetBehavior(types.NewGenerationBehavior(o.Behavior)) - } - - return resource -} - -// SliceFromPatches returns a slice of resources given a patch path -// slice from a kustomization file. -func (rf *Factory) SliceFromPatches( - ldr ifc.Loader, paths []types.PatchStrategicMerge) ([]*Resource, error) { - var result []*Resource - for _, path := range paths { - content, err := ldr.Load(string(path)) - if err != nil { - return nil, err - } - res, err := rf.SliceFromBytes(content) - if err != nil { - return nil, kusterr.Handler(err, string(path)) - } - result = append(result, res...) - } - return result, nil -} - -// FromBytes unmarshalls bytes into one Resource. -func (rf *Factory) FromBytes(in []byte) (*Resource, error) { - result, err := rf.SliceFromBytes(in) - if err != nil { - return nil, err - } - if len(result) != 1 { - return nil, fmt.Errorf( - "expected 1 resource, found %d in %v", len(result), in) - } - return result[0], nil -} - -// SliceFromBytes unmarshals bytes into a Resource slice. -func (rf *Factory) SliceFromBytes(in []byte) ([]*Resource, error) { - nodes, err := rf.RNodesFromBytes(in) - if err != nil { - return nil, err - } - return rf.resourcesFromRNodes(nodes), nil -} - -// DropLocalNodes removes the local nodes by default. Local nodes are detected via the annotation `config.kubernetes.io/local-config: "true"` -func (rf *Factory) DropLocalNodes(nodes []*yaml.RNode) ([]*Resource, error) { - var result []*yaml.RNode - for _, node := range nodes { - if node.IsNilOrEmpty() { - continue - } - md, err := node.GetValidatedMetadata() - if err != nil { - return nil, err - } - - if rf.IncludeLocalConfigs { - result = append(result, node) - continue - } - localConfig, exist := md.ObjectMeta.Annotations[konfig.IgnoredByKustomizeAnnotation] - if !exist || localConfig == "false" { - result = append(result, node) - } - } - return rf.resourcesFromRNodes(result), nil -} - -// ResourcesFromRNodes converts RNodes to Resources. -func (rf *Factory) ResourcesFromRNodes( - nodes []*yaml.RNode) (result []*Resource, err error) { - return rf.DropLocalNodes(nodes) -} - -// resourcesFromRNode assumes all nodes are good. -func (rf *Factory) resourcesFromRNodes( - nodes []*yaml.RNode) (result []*Resource) { - for _, n := range nodes { - result = append(result, rf.makeOne(n, nil)) - } - return -} - -func (rf *Factory) RNodesFromBytes(b []byte) ([]*yaml.RNode, error) { - nodes, err := kio.FromBytes(b) - if err != nil { - return nil, err - } - nodes, err = rf.dropBadNodes(nodes) - if err != nil { - return nil, err - } - return rf.inlineAnyEmbeddedLists(nodes) -} - -// inlineAnyEmbeddedLists scans the RNode slice for nodes named FooList. -// Such nodes are expected to be lists of resources, each of type Foo. -// These lists are replaced in the result by their inlined resources. -func (rf *Factory) inlineAnyEmbeddedLists( - nodes []*yaml.RNode) (result []*yaml.RNode, err error) { - var n0 *yaml.RNode - for len(nodes) > 0 { - n0, nodes = nodes[0], nodes[1:] - kind := n0.GetKind() - if !strings.HasSuffix(kind, "List") { - result = append(result, n0) - continue - } - // Convert a FooList into a slice of Foo. - var m map[string]interface{} - m, err = n0.Map() - if err != nil { - return nil, fmt.Errorf("trouble expanding list of %s; %w", kind, err) - } - items, ok := m["items"] - if !ok { - // treat as an empty list - continue - } - slice, ok := items.([]interface{}) - if !ok { - if items == nil { - // an empty list - continue - } - return nil, fmt.Errorf( - "expected array in %s/items, but found %T", kind, items) - } - innerNodes, err := rf.convertObjectSliceToNodeSlice(slice) - if err != nil { - return nil, err - } - nodes = append(nodes, innerNodes...) - } - return result, nil -} - -// convertObjectSlice converts a list of objects to a list of RNode. -func (rf *Factory) convertObjectSliceToNodeSlice( - objects []interface{}) (result []*yaml.RNode, err error) { - var bytes []byte - var nodes []*yaml.RNode - for _, obj := range objects { - bytes, err = json.Marshal(obj) - if err != nil { - return - } - nodes, err = kio.FromBytes(bytes) - if err != nil { - return - } - nodes, err = rf.dropBadNodes(nodes) - if err != nil { - return - } - result = append(result, nodes...) - } - return -} - -// dropBadNodes may drop some nodes from its input argument. -func (rf *Factory) dropBadNodes(nodes []*yaml.RNode) ([]*yaml.RNode, error) { - var result []*yaml.RNode - for _, n := range nodes { - if n.IsNilOrEmpty() { - continue - } - if _, err := n.GetValidatedMetadata(); err != nil { - return nil, err - } - if foundNil, path := n.HasNilEntryInList(); foundNil { - return nil, fmt.Errorf("empty item at %v in object %v", path, n) - } - result = append(result, n) - } - return result, nil -} - -// SliceFromBytesWithNames unmarshals bytes into a Resource slice with specified original -// name. -func (rf *Factory) SliceFromBytesWithNames(names []string, in []byte) ([]*Resource, error) { - result, err := rf.SliceFromBytes(in) - if err != nil { - return nil, err - } - if len(names) != len(result) { - return nil, fmt.Errorf("number of names doesn't match number of resources") - } - for i, res := range result { - res.setPreviousId(resid.DefaultNamespace, names[i], res.GetKind()) - } - return result, nil -} - -// MakeConfigMap makes an instance of Resource for ConfigMap -func (rf *Factory) MakeConfigMap(kvLdr ifc.KvLoader, args *types.ConfigMapArgs) (*Resource, error) { - rn, err := generators.MakeConfigMap(kvLdr, args) - if err != nil { - return nil, err - } - return rf.makeOne(rn, &args.GeneratorArgs), nil -} - -// MakeSecret makes an instance of Resource for Secret -func (rf *Factory) MakeSecret(kvLdr ifc.KvLoader, args *types.SecretArgs) (*Resource, error) { - rn, err := generators.MakeSecret(kvLdr, args) - if err != nil { - return nil, err - } - return rf.makeOne(rn, &args.GeneratorArgs), nil -} diff --git a/src/vendor/sigs.k8s.io/kustomize/api/resource/idset.go b/src/vendor/sigs.k8s.io/kustomize/api/resource/idset.go deleted file mode 100644 index 5d6bd63ed..000000000 --- a/src/vendor/sigs.k8s.io/kustomize/api/resource/idset.go +++ /dev/null @@ -1,30 +0,0 @@ -// Copyright 2020 The Kubernetes Authors. -// SPDX-License-Identifier: Apache-2.0 - -package resource - -import "sigs.k8s.io/kustomize/kyaml/resid" - -type IdSet struct { - ids map[resid.ResId]bool -} - -func MakeIdSet(slice []*Resource) *IdSet { - set := make(map[resid.ResId]bool) - for _, r := range slice { - id := r.CurId() - if _, ok := set[id]; !ok { - set[id] = true - } - } - return &IdSet{ids: set} -} - -func (s IdSet) Contains(id resid.ResId) bool { - _, ok := s.ids[id] - return ok -} - -func (s IdSet) Size() int { - return len(s.ids) -} diff --git a/src/vendor/sigs.k8s.io/kustomize/api/resource/origin.go b/src/vendor/sigs.k8s.io/kustomize/api/resource/origin.go deleted file mode 100644 index 3f4cfe340..000000000 --- a/src/vendor/sigs.k8s.io/kustomize/api/resource/origin.go +++ /dev/null @@ -1,106 +0,0 @@ -// Copyright 2019 The Kubernetes Authors. -// SPDX-License-Identifier: Apache-2.0 - -package resource - -import ( - "path/filepath" - "strings" - - "sigs.k8s.io/kustomize/api/internal/git" - kyaml "sigs.k8s.io/kustomize/kyaml/yaml" -) - -// Origin retains information about the origin of resources and transformer configs -// that contributed to the output of `kustomize build` -type Origin struct { - // Path is the path to the resource. If a local resource, this path is - // rooted from the directory upon which `kustomize build` was invoked. If a - // remote resource, this path is rooted from the root of the remote repo. - Path string `json:"path,omitempty" yaml:"path,omitempty"` - - // Repo is the remote repository that the resource or transformer originated from if it is - // not from a local file - Repo string `json:"repo,omitempty" yaml:"repo,omitempty"` - - // Ref is the ref of the remote repository that the resource or transformer originated from - // if it is not from a local file - Ref string `json:"ref,omitempty" yaml:"ref,omitempty"` - - // The following fields only apply to resources that have been - // generated by fields other than the `resources` field, or to transformer - // configs. - - // ConfiguredIn is the file path to the generator or transformer config that created the - // resource - ConfiguredIn string `json:"configuredIn,omitempty" yaml:"configuredIn,omitempty"` - - // ConfiguredBy is the ObjectReference of the generator or transformer config - ConfiguredBy kyaml.ResourceIdentifier `json:"configuredBy,omitempty" yaml:"configuredBy,omitempty"` -} - -// Copy returns a copy of origin -func (origin *Origin) Copy() Origin { - if origin == nil { - return Origin{} - } - return *origin -} - -// Append returns a copy of origin with a path appended to it -func (origin *Origin) Append(path string) *Origin { - originCopy := origin.Copy() - repoSpec, err := git.NewRepoSpecFromURL(path) - if err == nil { - originCopy.Repo = repoSpec.Host + repoSpec.OrgRepo - absPath := repoSpec.AbsPath() - path = absPath[strings.Index(absPath[1:], "/")+1:][1:] - originCopy.Path = "" - originCopy.Ref = repoSpec.Ref - } - originCopy.Path = filepath.Join(originCopy.Path, path) - return &originCopy -} - -// String returns a string version of origin -func (origin *Origin) String() (string, error) { - anno, err := kyaml.Marshal(origin) - return string(anno), err -} - -// Transformations is a list of Origin -type Transformations []*Origin - -// String returns a string version of Transformations -func (transformations *Transformations) String() (string, error) { - anno, err := kyaml.Marshal(transformations) - return string(anno), err -} - -// OriginFromCustomPlugin takes a custom plugin defined as a resource -// and returns an origin object to describe it -func OriginFromCustomPlugin(res *Resource) (*Origin, error) { - origin, err := res.GetOrigin() - if err != nil { - return nil, err - } - var result *Origin - if origin != nil { - result = &Origin{ - Repo: origin.Repo, - Ref: origin.Ref, - ConfiguredIn: origin.Path, - ConfiguredBy: kyaml.ResourceIdentifier{ - TypeMeta: kyaml.TypeMeta{ - APIVersion: res.GetApiVersion(), - Kind: res.GetKind(), - }, - NameMeta: kyaml.NameMeta{ - Name: res.GetName(), - Namespace: res.GetNamespace(), - }, - }, - } - } - return result, nil -} diff --git a/src/vendor/sigs.k8s.io/kustomize/api/resource/resource.go b/src/vendor/sigs.k8s.io/kustomize/api/resource/resource.go deleted file mode 100644 index a88ce31d2..000000000 --- a/src/vendor/sigs.k8s.io/kustomize/api/resource/resource.go +++ /dev/null @@ -1,527 +0,0 @@ -// Copyright 2019 The Kubernetes Authors. -// SPDX-License-Identifier: Apache-2.0 - -package resource - -import ( - "fmt" - "log" - "strings" - - "sigs.k8s.io/kustomize/api/filters/patchstrategicmerge" - "sigs.k8s.io/kustomize/api/ifc" - "sigs.k8s.io/kustomize/api/internal/utils" - "sigs.k8s.io/kustomize/api/types" - "sigs.k8s.io/kustomize/kyaml/kio" - "sigs.k8s.io/kustomize/kyaml/kio/kioutil" - "sigs.k8s.io/kustomize/kyaml/resid" - kyaml "sigs.k8s.io/kustomize/kyaml/yaml" - "sigs.k8s.io/yaml" -) - -// Resource is an RNode, representing a Kubernetes Resource Model object, -// paired with metadata used by kustomize. -type Resource struct { - kyaml.RNode - refVarNames []string -} - -var BuildAnnotations = []string{ - utils.BuildAnnotationPreviousKinds, - utils.BuildAnnotationPreviousNames, - utils.BuildAnnotationPrefixes, - utils.BuildAnnotationSuffixes, - utils.BuildAnnotationPreviousNamespaces, - utils.BuildAnnotationAllowNameChange, - utils.BuildAnnotationAllowKindChange, - utils.BuildAnnotationsRefBy, - utils.BuildAnnotationsGenBehavior, - utils.BuildAnnotationsGenAddHashSuffix, - - kioutil.PathAnnotation, - kioutil.IndexAnnotation, - kioutil.SeqIndentAnnotation, - kioutil.IdAnnotation, - kioutil.InternalAnnotationsMigrationResourceIDAnnotation, - - kioutil.LegacyPathAnnotation, - kioutil.LegacyIndexAnnotation, - kioutil.LegacyIdAnnotation, -} - -func (r *Resource) ResetRNode(incoming *Resource) { - r.RNode = *incoming.Copy() -} - -func (r *Resource) GetGvk() resid.Gvk { - return resid.GvkFromNode(&r.RNode) -} - -func (r *Resource) Hash(h ifc.KustHasher) (string, error) { - return h.Hash(&r.RNode) -} - -func (r *Resource) SetGvk(gvk resid.Gvk) { - r.SetKind(gvk.Kind) - r.SetApiVersion(gvk.ApiVersion()) -} - -func (r *Resource) GetOrigin() (*Origin, error) { - annotations := r.GetAnnotations() - originAnnotations, ok := annotations[utils.OriginAnnotationKey] - if !ok { - return nil, nil - } - var origin Origin - if err := yaml.Unmarshal([]byte(originAnnotations), &origin); err != nil { - return nil, err - } - return &origin, nil -} - -func (r *Resource) SetOrigin(origin *Origin) error { - annotations := r.GetAnnotations() - if origin == nil { - delete(annotations, utils.OriginAnnotationKey) - } else { - originStr, err := origin.String() - if err != nil { - return err - } - annotations[utils.OriginAnnotationKey] = originStr - } - return r.SetAnnotations(annotations) -} - -func (r *Resource) GetTransformations() (Transformations, error) { - annotations := r.GetAnnotations() - transformerAnnotations, ok := annotations[utils.TransformerAnnotationKey] - if !ok { - return nil, nil - } - var transformations Transformations - if err := yaml.Unmarshal([]byte(transformerAnnotations), &transformations); err != nil { - return nil, err - } - return transformations, nil -} - -func (r *Resource) AddTransformation(origin *Origin) error { - annotations := r.GetAnnotations() - transformations, err := r.GetTransformations() - if err != nil { - return err - } - if transformations == nil { - transformations = Transformations{} - } - transformations = append(transformations, origin) - transformationStr, err := transformations.String() - if err != nil { - return err - } - annotations[utils.TransformerAnnotationKey] = transformationStr - return r.SetAnnotations(annotations) -} - -func (r *Resource) ClearTransformations() error { - annotations := r.GetAnnotations() - delete(annotations, utils.TransformerAnnotationKey) - return r.SetAnnotations(annotations) -} - -// ResCtx is an interface describing the contextual added -// kept kustomize in the context of each Resource object. -// Currently mainly the name prefix and name suffix are added. -type ResCtx interface { - AddNamePrefix(p string) - AddNameSuffix(s string) - GetNamePrefixes() []string - GetNameSuffixes() []string -} - -// ResCtxMatcher returns true if two Resources are being -// modified in the same kustomize context. -type ResCtxMatcher func(ResCtx) bool - -// DeepCopy returns a new copy of resource -func (r *Resource) DeepCopy() *Resource { - rc := &Resource{ - RNode: *r.Copy(), - } - rc.copyKustomizeSpecificFields(r) - return rc -} - -// CopyMergeMetaDataFieldsFrom copies everything but the non-metadata in -// the resource. -// TODO: move to RNode, use GetMeta to improve performance. -// TODO: make a version of mergeStringMaps that is build-annotation aware -// to avoid repeatedly setting refby and genargs annotations -// Must remove the kustomize bit at the end. -func (r *Resource) CopyMergeMetaDataFieldsFrom(other *Resource) error { - if err := r.SetLabels( - mergeStringMaps(other.GetLabels(), r.GetLabels())); err != nil { - return fmt.Errorf("copyMerge cannot set labels - %w", err) - } - if err := r.SetAnnotations( - mergeStringMapsWithBuildAnnotations(other.GetAnnotations(), r.GetAnnotations())); err != nil { - return fmt.Errorf("copyMerge cannot set annotations - %w", err) - } - if err := r.SetName(other.GetName()); err != nil { - return fmt.Errorf("copyMerge cannot set name - %w", err) - } - if err := r.SetNamespace(other.GetNamespace()); err != nil { - return fmt.Errorf("copyMerge cannot set namespace - %w", err) - } - r.copyKustomizeSpecificFields(other) - return nil -} - -func (r *Resource) copyKustomizeSpecificFields(other *Resource) { - r.refVarNames = copyStringSlice(other.refVarNames) -} - -func (r *Resource) MergeDataMapFrom(o *Resource) { - r.SetDataMap(mergeStringMaps(o.GetDataMap(), r.GetDataMap())) -} - -func (r *Resource) MergeBinaryDataMapFrom(o *Resource) { - r.SetBinaryDataMap(mergeStringMaps(o.GetBinaryDataMap(), r.GetBinaryDataMap())) -} - -func (r *Resource) ErrIfNotEquals(o *Resource) error { - meYaml, err := r.AsYAML() - if err != nil { - return err - } - otherYaml, err := o.AsYAML() - if err != nil { - return err - } - if !r.ReferencesEqual(o) { - return fmt.Errorf( - `unequal references - self: -%sreferenced by: %s ---- other: -%sreferenced by: %s -`, meYaml, r.GetRefBy(), otherYaml, o.GetRefBy()) - } - if string(meYaml) != string(otherYaml) { - return fmt.Errorf(`--- self: -%s ---- other: -%s -`, meYaml, otherYaml) - } - return nil -} - -func (r *Resource) ReferencesEqual(other *Resource) bool { - setSelf := make(map[resid.ResId]bool) - setOther := make(map[resid.ResId]bool) - for _, ref := range other.GetRefBy() { - setOther[ref] = true - } - for _, ref := range r.GetRefBy() { - if _, ok := setOther[ref]; !ok { - return false - } - setSelf[ref] = true - } - return len(setSelf) == len(setOther) -} - -func copyStringSlice(s []string) []string { - if s == nil { - return nil - } - c := make([]string, len(s)) - copy(c, s) - return c -} - -// Implements ResCtx AddNamePrefix -func (r *Resource) AddNamePrefix(p string) { - r.appendCsvAnnotation(utils.BuildAnnotationPrefixes, p) -} - -// Implements ResCtx AddNameSuffix -func (r *Resource) AddNameSuffix(s string) { - r.appendCsvAnnotation(utils.BuildAnnotationSuffixes, s) -} - -func (r *Resource) appendCsvAnnotation(name, value string) { - if value == "" { - return - } - currentValue := r.getCsvAnnotation(name) - newValue := strings.Join(append(currentValue, value), ",") - if err := r.RNode.PipeE(kyaml.SetAnnotation(name, newValue)); err != nil { - panic(err) - } -} - -// Implements ResCtx GetNamePrefixes -func (r *Resource) GetNamePrefixes() []string { - return r.getCsvAnnotation(utils.BuildAnnotationPrefixes) -} - -// Implements ResCtx GetNameSuffixes -func (r *Resource) GetNameSuffixes() []string { - return r.getCsvAnnotation(utils.BuildAnnotationSuffixes) -} - -func (r *Resource) getCsvAnnotation(name string) []string { - annotations := r.GetAnnotations() - if _, ok := annotations[name]; !ok { - return nil - } - return strings.Split(annotations[name], ",") -} - -// PrefixesSuffixesEquals is conceptually doing the same task -// as OutermostPrefixSuffix but performs a deeper comparison -// of the suffix and prefix slices. -func (r *Resource) PrefixesSuffixesEquals(o ResCtx) bool { - return utils.SameEndingSubSlice(r.GetNamePrefixes(), o.GetNamePrefixes()) && - utils.SameEndingSubSlice(r.GetNameSuffixes(), o.GetNameSuffixes()) -} - -// RemoveBuildAnnotations removes annotations created by the build process. -// These are internal-only to kustomize, added to the data pipeline to -// track name changes so name references can be fixed. -func (r *Resource) RemoveBuildAnnotations() { - annotations := r.GetAnnotations() - if len(annotations) == 0 { - return - } - for _, a := range BuildAnnotations { - delete(annotations, a) - } - if err := r.SetAnnotations(annotations); err != nil { - panic(err) - } -} - -func (r *Resource) setPreviousId(ns string, n string, k string) *Resource { - r.appendCsvAnnotation(utils.BuildAnnotationPreviousNames, n) - r.appendCsvAnnotation(utils.BuildAnnotationPreviousNamespaces, ns) - r.appendCsvAnnotation(utils.BuildAnnotationPreviousKinds, k) - return r -} - -// AllowNameChange allows name changes to the resource. -func (r *Resource) AllowNameChange() { - r.enable(utils.BuildAnnotationAllowNameChange) -} - -// NameChangeAllowed checks if a patch resource is allowed to change another resource's name. -func (r *Resource) NameChangeAllowed() bool { - return r.isEnabled(utils.BuildAnnotationAllowNameChange) -} - -// AllowKindChange allows kind changes to the resource. -func (r *Resource) AllowKindChange() { - r.enable(utils.BuildAnnotationAllowKindChange) -} - -// KindChangeAllowed checks if a patch resource is allowed to change another resource's kind. -func (r *Resource) KindChangeAllowed() bool { - return r.isEnabled(utils.BuildAnnotationAllowKindChange) -} - -func (r *Resource) isEnabled(annoKey string) bool { - annotations := r.GetAnnotations() - v, ok := annotations[annoKey] - return ok && v == utils.Enabled -} - -func (r *Resource) enable(annoKey string) { - annotations := r.GetAnnotations() - annotations[annoKey] = utils.Enabled - if err := r.SetAnnotations(annotations); err != nil { - panic(err) - } -} - -// String returns resource as JSON. -func (r *Resource) String() string { - bs, err := r.MarshalJSON() - if err != nil { - return "<" + err.Error() + ">" - } - return strings.TrimSpace(string(bs)) -} - -// AsYAML returns the resource in Yaml form. -// Easier to read than JSON. -func (r *Resource) AsYAML() ([]byte, error) { - json, err := r.MarshalJSON() - if err != nil { - return nil, err - } - return yaml.JSONToYAML(json) -} - -// MustYaml returns YAML or panics. -func (r *Resource) MustYaml() string { - yml, err := r.AsYAML() - if err != nil { - log.Fatal(err) - } - return string(yml) -} - -// Behavior returns the behavior for the resource. -func (r *Resource) Behavior() types.GenerationBehavior { - annotations := r.GetAnnotations() - if v, ok := annotations[utils.BuildAnnotationsGenBehavior]; ok { - return types.NewGenerationBehavior(v) - } - return types.NewGenerationBehavior("") -} - -// SetBehavior sets the behavior for the resource. -func (r *Resource) SetBehavior(behavior types.GenerationBehavior) { - annotations := r.GetAnnotations() - annotations[utils.BuildAnnotationsGenBehavior] = behavior.String() - if err := r.SetAnnotations(annotations); err != nil { - panic(err) - } -} - -// NeedHashSuffix returns true if a resource content -// hash should be appended to the name of the resource. -func (r *Resource) NeedHashSuffix() bool { - return r.isEnabled(utils.BuildAnnotationsGenAddHashSuffix) -} - -// EnableHashSuffix marks the resource as needing a content -// hash to be appended to the name of the resource. -func (r *Resource) EnableHashSuffix() { - r.enable(utils.BuildAnnotationsGenAddHashSuffix) -} - -// OrgId returns the original, immutable ResId for the resource. -// This doesn't have to be unique in a ResMap. -func (r *Resource) OrgId() resid.ResId { - ids := r.PrevIds() - if len(ids) > 0 { - return ids[0] - } - return r.CurId() -} - -// PrevIds returns a list of ResIds that includes every -// previous ResId the resource has had through all of its -// GVKN transformations, in the order that it had that ID. -// I.e. the oldest ID is first. -// The returned array does not include the resource's current -// ID. If there are no previous IDs, this will return nil. -func (r *Resource) PrevIds() []resid.ResId { - prevIds, err := utils.PrevIds(&r.RNode) - if err != nil { - // this should never happen - panic(err) - } - return prevIds -} - -// StorePreviousId stores the resource's current ID via build annotations. -func (r *Resource) StorePreviousId() { - id := r.CurId() - r.setPreviousId(id.EffectiveNamespace(), id.Name, id.Kind) -} - -// CurId returns a ResId for the resource using the -// mutable parts of the resource. -// This should be unique in any ResMap. -func (r *Resource) CurId() resid.ResId { - return resid.NewResIdWithNamespace( - r.GetGvk(), r.GetName(), r.GetNamespace()) -} - -// GetRefBy returns the ResIds that referred to current resource -func (r *Resource) GetRefBy() []resid.ResId { - var resIds []resid.ResId - asStrings := r.getCsvAnnotation(utils.BuildAnnotationsRefBy) - for _, s := range asStrings { - resIds = append(resIds, resid.FromString(s)) - } - return resIds -} - -// AppendRefBy appends a ResId into the refBy list -// Using any type except fmt.Stringer here results in a compilation error -func (r *Resource) AppendRefBy(id fmt.Stringer) { - r.appendCsvAnnotation(utils.BuildAnnotationsRefBy, id.String()) -} - -// GetRefVarNames returns vars that refer to current resource -func (r *Resource) GetRefVarNames() []string { - return r.refVarNames -} - -// AppendRefVarName appends a name of a var into the refVar list -func (r *Resource) AppendRefVarName(variable types.Var) { - r.refVarNames = append(r.refVarNames, variable.Name) -} - -// ApplySmPatch applies the provided strategic merge patch. -func (r *Resource) ApplySmPatch(patch *Resource) error { - n, ns, k := r.GetName(), r.GetNamespace(), r.GetKind() - if patch.NameChangeAllowed() || patch.KindChangeAllowed() { - r.StorePreviousId() - } - if err := r.ApplyFilter(patchstrategicmerge.Filter{ - Patch: &patch.RNode, - }); err != nil { - return err - } - if r.IsNilOrEmpty() { - return nil - } - if !patch.KindChangeAllowed() { - r.SetKind(k) - } - if !patch.NameChangeAllowed() { - r.SetName(n) - } - r.SetNamespace(ns) - return nil -} - -func (r *Resource) ApplyFilter(f kio.Filter) error { - l, err := f.Filter([]*kyaml.RNode{&r.RNode}) - if len(l) == 0 { - // The node was deleted, which means the entire resource - // must be deleted. Signal that via the following: - r.SetYNode(nil) - } - return err -} - -func mergeStringMaps(maps ...map[string]string) map[string]string { - result := map[string]string{} - for _, m := range maps { - for key, value := range m { - result[key] = value - } - } - return result -} - -func mergeStringMapsWithBuildAnnotations(maps ...map[string]string) map[string]string { - result := mergeStringMaps(maps...) - for i := range BuildAnnotations { - if len(maps) > 0 { - if v, ok := maps[0][BuildAnnotations[i]]; ok { - result[BuildAnnotations[i]] = v - continue - } - } - delete(result, BuildAnnotations[i]) - } - return result -} diff --git a/src/vendor/sigs.k8s.io/kustomize/api/types/builtinpluginloadingoptions_string.go b/src/vendor/sigs.k8s.io/kustomize/api/types/builtinpluginloadingoptions_string.go deleted file mode 100644 index 033a45123..000000000 --- a/src/vendor/sigs.k8s.io/kustomize/api/types/builtinpluginloadingoptions_string.go +++ /dev/null @@ -1,25 +0,0 @@ -// Code generated by "stringer -type=BuiltinPluginLoadingOptions"; DO NOT EDIT. - -package types - -import "strconv" - -func _() { - // An "invalid array index" compiler error signifies that the constant values have changed. - // Re-run the stringer command to generate them again. - var x [1]struct{} - _ = x[BploUndefined-0] - _ = x[BploUseStaticallyLinked-1] - _ = x[BploLoadFromFileSys-2] -} - -const _BuiltinPluginLoadingOptions_name = "BploUndefinedBploUseStaticallyLinkedBploLoadFromFileSys" - -var _BuiltinPluginLoadingOptions_index = [...]uint8{0, 13, 36, 55} - -func (i BuiltinPluginLoadingOptions) String() string { - if i < 0 || i >= BuiltinPluginLoadingOptions(len(_BuiltinPluginLoadingOptions_index)-1) { - return "BuiltinPluginLoadingOptions(" + strconv.FormatInt(int64(i), 10) + ")" - } - return _BuiltinPluginLoadingOptions_name[_BuiltinPluginLoadingOptions_index[i]:_BuiltinPluginLoadingOptions_index[i+1]] -} diff --git a/src/vendor/sigs.k8s.io/kustomize/api/types/configmapargs.go b/src/vendor/sigs.k8s.io/kustomize/api/types/configmapargs.go deleted file mode 100644 index 69877769f..000000000 --- a/src/vendor/sigs.k8s.io/kustomize/api/types/configmapargs.go +++ /dev/null @@ -1,10 +0,0 @@ -// Copyright 2019 The Kubernetes Authors. -// SPDX-License-Identifier: Apache-2.0 - -package types - -// ConfigMapArgs contains the metadata of how to generate a configmap. -type ConfigMapArgs struct { - // GeneratorArgs for the configmap. - GeneratorArgs `json:",inline,omitempty" yaml:",inline,omitempty"` -} diff --git a/src/vendor/sigs.k8s.io/kustomize/api/types/doc.go b/src/vendor/sigs.k8s.io/kustomize/api/types/doc.go deleted file mode 100644 index 22c38a651..000000000 --- a/src/vendor/sigs.k8s.io/kustomize/api/types/doc.go +++ /dev/null @@ -1,9 +0,0 @@ -// Copyright 2019 The Kubernetes Authors. -// SPDX-License-Identifier: Apache-2.0 - -// Package types holds the definition of the kustomization struct and -// supporting structs. It's the k8s API conformant object that describes -// a set of generation and transformation operations to create and/or -// modify k8s resources. -// A kustomization file is a serialization of this struct. -package types diff --git a/src/vendor/sigs.k8s.io/kustomize/api/types/erronlybuiltinpluginsallowed.go b/src/vendor/sigs.k8s.io/kustomize/api/types/erronlybuiltinpluginsallowed.go deleted file mode 100644 index 73ee95fd7..000000000 --- a/src/vendor/sigs.k8s.io/kustomize/api/types/erronlybuiltinpluginsallowed.go +++ /dev/null @@ -1,33 +0,0 @@ -// Copyright 2019 The Kubernetes Authors. -// SPDX-License-Identifier: Apache-2.0 - -package types - -import ( - "fmt" - - "github.com/pkg/errors" -) - -type errOnlyBuiltinPluginsAllowed struct { - name string -} - -func (e *errOnlyBuiltinPluginsAllowed) Error() string { - return fmt.Sprintf( - "external plugins disabled; unable to load external plugin '%s'", - e.name) -} - -func NewErrOnlyBuiltinPluginsAllowed(n string) *errOnlyBuiltinPluginsAllowed { - return &errOnlyBuiltinPluginsAllowed{name: n} -} - -func IsErrOnlyBuiltinPluginsAllowed(err error) bool { - _, ok := err.(*errOnlyBuiltinPluginsAllowed) - if ok { - return true - } - _, ok = errors.Cause(err).(*errOnlyBuiltinPluginsAllowed) - return ok -} diff --git a/src/vendor/sigs.k8s.io/kustomize/api/types/errunabletofind.go b/src/vendor/sigs.k8s.io/kustomize/api/types/errunabletofind.go deleted file mode 100644 index f95b8edd5..000000000 --- a/src/vendor/sigs.k8s.io/kustomize/api/types/errunabletofind.go +++ /dev/null @@ -1,40 +0,0 @@ -// Copyright 2019 The Kubernetes Authors. -// SPDX-License-Identifier: Apache-2.0 - -package types - -import ( - "fmt" - "strings" - - "github.com/pkg/errors" -) - -type errUnableToFind struct { - // What are we unable to find? - what string - // What things did we try? - attempts []Pair -} - -func (e *errUnableToFind) Error() string { - var m []string - for _, p := range e.attempts { - m = append(m, "('"+p.Value+"'; "+p.Key+")") - } - return fmt.Sprintf( - "unable to find %s - tried: %s", e.what, strings.Join(m, ", ")) -} - -func NewErrUnableToFind(w string, a []Pair) *errUnableToFind { - return &errUnableToFind{what: w, attempts: a} -} - -func IsErrUnableToFind(err error) bool { - _, ok := err.(*errUnableToFind) - if ok { - return true - } - _, ok = errors.Cause(err).(*errUnableToFind) - return ok -} diff --git a/src/vendor/sigs.k8s.io/kustomize/api/types/fieldspec.go b/src/vendor/sigs.k8s.io/kustomize/api/types/fieldspec.go deleted file mode 100644 index 8d3579544..000000000 --- a/src/vendor/sigs.k8s.io/kustomize/api/types/fieldspec.go +++ /dev/null @@ -1,91 +0,0 @@ -// Copyright 2019 The Kubernetes Authors. -// SPDX-License-Identifier: Apache-2.0 - -package types - -import ( - "fmt" - - "sigs.k8s.io/kustomize/kyaml/resid" -) - -// FieldSpec completely specifies a kustomizable field in a k8s API object. -// It helps define the operands of transformations. -// -// For example, a directive to add a common label to objects -// will need to know that a 'Deployment' object (in API group -// 'apps', any version) can have labels at field path -// 'spec/template/metadata/labels', and further that it is OK -// (or not OK) to add that field path to the object if the -// field path doesn't exist already. -// -// This would look like -// { -// group: apps -// kind: Deployment -// path: spec/template/metadata/labels -// create: true -// } -type FieldSpec struct { - resid.Gvk `json:",inline,omitempty" yaml:",inline,omitempty"` - Path string `json:"path,omitempty" yaml:"path,omitempty"` - CreateIfNotPresent bool `json:"create,omitempty" yaml:"create,omitempty"` -} - -func (fs FieldSpec) String() string { - return fmt.Sprintf( - "%s:%v:%s", fs.Gvk.String(), fs.CreateIfNotPresent, fs.Path) -} - -// If true, the primary key is the same, but other fields might not be. -func (fs FieldSpec) effectivelyEquals(other FieldSpec) bool { - return fs.IsSelected(&other.Gvk) && fs.Path == other.Path -} - -type FsSlice []FieldSpec - -func (s FsSlice) Len() int { return len(s) } -func (s FsSlice) Swap(i, j int) { s[i], s[j] = s[j], s[i] } -func (s FsSlice) Less(i, j int) bool { - return s[i].Gvk.IsLessThan(s[j].Gvk) -} - -// MergeAll merges the argument into this, returning the result. -// Items already present are ignored. -// Items that conflict (primary key matches, but remain data differs) -// result in an error. -func (s FsSlice) MergeAll(incoming FsSlice) (result FsSlice, err error) { - result = s - for _, x := range incoming { - result, err = result.MergeOne(x) - if err != nil { - return nil, err - } - } - return result, nil -} - -// MergeOne merges the argument into this, returning the result. -// If the item's primary key is already present, and there are no -// conflicts, it is ignored (we don't want duplicates). -// If there is a conflict, the merge fails. -func (s FsSlice) MergeOne(x FieldSpec) (FsSlice, error) { - i := s.index(x) - if i > -1 { - // It's already there. - if s[i].CreateIfNotPresent != x.CreateIfNotPresent { - return nil, fmt.Errorf("conflicting fieldspecs") - } - return s, nil - } - return append(s, x), nil -} - -func (s FsSlice) index(fs FieldSpec) int { - for i, x := range s { - if x.effectivelyEquals(fs) { - return i - } - } - return -1 -} diff --git a/src/vendor/sigs.k8s.io/kustomize/api/types/fix.go b/src/vendor/sigs.k8s.io/kustomize/api/types/fix.go deleted file mode 100644 index de70467ed..000000000 --- a/src/vendor/sigs.k8s.io/kustomize/api/types/fix.go +++ /dev/null @@ -1,54 +0,0 @@ -// Copyright 2019 The Kubernetes Authors. -// SPDX-License-Identifier: Apache-2.0 - -package types - -import ( - "regexp" - - "sigs.k8s.io/yaml" -) - -// FixKustomizationPreUnmarshalling modifies the raw data -// before marshalling - e.g. changes old field names to -// new field names. -func FixKustomizationPreUnmarshalling(data []byte) ([]byte, error) { - deprecatedFieldsMap := map[string]string{ - "imageTags:": "images:", - } - for oldname, newname := range deprecatedFieldsMap { - pattern := regexp.MustCompile(oldname) - data = pattern.ReplaceAll(data, []byte(newname)) - } - doLegacy, err := useLegacyPatch(data) - if err != nil { - return nil, err - } - if doLegacy { - pattern := regexp.MustCompile("patches:") - data = pattern.ReplaceAll(data, []byte("patchesStrategicMerge:")) - } - return data, nil -} - -func useLegacyPatch(data []byte) (bool, error) { - found := false - var object map[string]interface{} - err := yaml.Unmarshal(data, &object) - if err != nil { - return false, err - } - if rawPatches, ok := object["patches"]; ok { - patches, ok := rawPatches.([]interface{}) - if !ok { - return false, err - } - for _, p := range patches { - _, ok := p.(string) - if ok { - found = true - } - } - } - return found, nil -} diff --git a/src/vendor/sigs.k8s.io/kustomize/api/types/generationbehavior.go b/src/vendor/sigs.k8s.io/kustomize/api/types/generationbehavior.go deleted file mode 100644 index f8f362780..000000000 --- a/src/vendor/sigs.k8s.io/kustomize/api/types/generationbehavior.go +++ /dev/null @@ -1,46 +0,0 @@ -// Copyright 2019 The Kubernetes Authors. -// SPDX-License-Identifier: Apache-2.0 - -package types - -// GenerationBehavior specifies generation behavior of configmaps, secrets and maybe other resources. -type GenerationBehavior int - -const ( - // BehaviorUnspecified is an Unspecified behavior; typically treated as a Create. - BehaviorUnspecified GenerationBehavior = iota - // BehaviorCreate makes a new resource. - BehaviorCreate - // BehaviorReplace replaces a resource. - BehaviorReplace - // BehaviorMerge attempts to merge a new resource with an existing resource. - BehaviorMerge -) - -// String converts a GenerationBehavior to a string. -func (b GenerationBehavior) String() string { - switch b { - case BehaviorReplace: - return "replace" - case BehaviorMerge: - return "merge" - case BehaviorCreate: - return "create" - default: - return "unspecified" - } -} - -// NewGenerationBehavior converts a string to a GenerationBehavior. -func NewGenerationBehavior(s string) GenerationBehavior { - switch s { - case "replace": - return BehaviorReplace - case "merge": - return BehaviorMerge - case "create": - return BehaviorCreate - default: - return BehaviorUnspecified - } -} diff --git a/src/vendor/sigs.k8s.io/kustomize/api/types/generatorargs.go b/src/vendor/sigs.k8s.io/kustomize/api/types/generatorargs.go deleted file mode 100644 index a4145db3d..000000000 --- a/src/vendor/sigs.k8s.io/kustomize/api/types/generatorargs.go +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright 2019 The Kubernetes Authors. -// SPDX-License-Identifier: Apache-2.0 - -package types - -// GeneratorArgs contains arguments common to ConfigMap and Secret generators. -type GeneratorArgs struct { - // Namespace for the configmap, optional - Namespace string `json:"namespace,omitempty" yaml:"namespace,omitempty"` - - // Name - actually the partial name - of the generated resource. - // The full name ends up being something like - // NamePrefix + this.Name + hash(content of generated resource). - Name string `json:"name,omitempty" yaml:"name,omitempty"` - - // Behavior of generated resource, must be one of: - // 'create': create a new one - // 'replace': replace the existing one - // 'merge': merge with the existing one - Behavior string `json:"behavior,omitempty" yaml:"behavior,omitempty"` - - // KvPairSources for the generator. - KvPairSources `json:",inline,omitempty" yaml:",inline,omitempty"` - - // Local overrides to global generatorOptions field. - Options *GeneratorOptions `json:"options,omitempty" yaml:"options,omitempty"` -} diff --git a/src/vendor/sigs.k8s.io/kustomize/api/types/generatoroptions.go b/src/vendor/sigs.k8s.io/kustomize/api/types/generatoroptions.go deleted file mode 100644 index 683d89bfd..000000000 --- a/src/vendor/sigs.k8s.io/kustomize/api/types/generatoroptions.go +++ /dev/null @@ -1,76 +0,0 @@ -// Copyright 2019 The Kubernetes Authors. -// SPDX-License-Identifier: Apache-2.0 - -package types - -// GeneratorOptions modify behavior of all ConfigMap and Secret generators. -type GeneratorOptions struct { - // Labels to add to all generated resources. - Labels map[string]string `json:"labels,omitempty" yaml:"labels,omitempty"` - - // Annotations to add to all generated resources. - Annotations map[string]string `json:"annotations,omitempty" yaml:"annotations,omitempty"` - - // DisableNameSuffixHash if true disables the default behavior of adding a - // suffix to the names of generated resources that is a hash of the - // resource contents. - DisableNameSuffixHash bool `json:"disableNameSuffixHash,omitempty" yaml:"disableNameSuffixHash,omitempty"` - - // Immutable if true add to all generated resources. - Immutable bool `json:"immutable,omitempty" yaml:"immutable,omitempty"` -} - -// MergeGlobalOptionsIntoLocal merges two instances of GeneratorOptions. -// Values in the first 'local' argument cannot be overridden by the second -// 'global' argument, except in the case of booleans. -// -// With booleans, there's no way to distinguish an 'intentional' -// false from 'default' false. So the rule is, if the global value -// of the value of a boolean is true, i.e. disable, it trumps the -// local value. If the global value is false, then the local value is -// respected. Bottom line: a local false cannot override a global true. -// -// boolean fields are always a bad idea; should always use enums instead. -func MergeGlobalOptionsIntoLocal( - localOpts *GeneratorOptions, - globalOpts *GeneratorOptions) *GeneratorOptions { - if globalOpts == nil { - return localOpts - } - if localOpts == nil { - localOpts = &GeneratorOptions{} - } - overrideMap(&localOpts.Labels, globalOpts.Labels) - overrideMap(&localOpts.Annotations, globalOpts.Annotations) - if globalOpts.DisableNameSuffixHash { - localOpts.DisableNameSuffixHash = true - } - if globalOpts.Immutable { - localOpts.Immutable = true - } - return localOpts -} - -func overrideMap(localMap *map[string]string, globalMap map[string]string) { - if *localMap == nil { - if globalMap != nil { - *localMap = CopyMap(globalMap) - } - return - } - for k, v := range globalMap { - _, ok := (*localMap)[k] - if !ok { - (*localMap)[k] = v - } - } -} - -// CopyMap copies a map. -func CopyMap(in map[string]string) map[string]string { - out := make(map[string]string) - for k, v := range in { - out[k] = v - } - return out -} diff --git a/src/vendor/sigs.k8s.io/kustomize/api/types/helmchartargs.go b/src/vendor/sigs.k8s.io/kustomize/api/types/helmchartargs.go deleted file mode 100644 index 05d540462..000000000 --- a/src/vendor/sigs.k8s.io/kustomize/api/types/helmchartargs.go +++ /dev/null @@ -1,122 +0,0 @@ -// Copyright 2019 The Kubernetes Authors. -// SPDX-License-Identifier: Apache-2.0 - -package types - -type HelmGlobals struct { - // ChartHome is a file path, relative to the kustomization root, - // to a directory containing a subdirectory for each chart to be - // included in the kustomization. - // The default value of this field is "charts". - // So, for example, kustomize looks for the minecraft chart - // at {kustomizationRoot}/{ChartHome}/minecraft. - // If the chart is there at build time, kustomize will use it as found, - // and not check version numbers or dates. - // If the chart is not there, kustomize will attempt to pull it - // using the version number specified in the kustomization file, - // and put it there. To suppress the pull attempt, simply assure - // that the chart is already there. - ChartHome string `json:"chartHome,omitempty" yaml:"chartHome,omitempty"` - - // ConfigHome defines a value that kustomize should pass to helm via - // the HELM_CONFIG_HOME environment variable. kustomize doesn't attempt - // to read or write this directory. - // If omitted, {tmpDir}/helm is used, where {tmpDir} is some temporary - // directory created by kustomize for the benefit of helm. - // Likewise, kustomize sets - // HELM_CACHE_HOME={ConfigHome}/.cache - // HELM_DATA_HOME={ConfigHome}/.data - // for the helm subprocess. - ConfigHome string `json:"configHome,omitempty" yaml:"configHome,omitempty"` -} - -type HelmChart struct { - // Name is the name of the chart, e.g. 'minecraft'. - Name string `json:"name,omitempty" yaml:"name,omitempty"` - - // Version is the version of the chart, e.g. '3.1.3' - Version string `json:"version,omitempty" yaml:"version,omitempty"` - - // Repo is a URL locating the chart on the internet. - // This is the argument to helm's `--repo` flag, e.g. - // `https://itzg.github.io/minecraft-server-charts`. - Repo string `json:"repo,omitempty" yaml:"repo,omitempty"` - - // ReleaseName replaces RELEASE-NAME in chart template output, - // making a particular inflation of a chart unique with respect to - // other inflations of the same chart in a cluster. It's the first - // argument to the helm `install` and `template` commands, i.e. - // helm install {RELEASE-NAME} {chartName} - // helm template {RELEASE-NAME} {chartName} - // If omitted, the flag --generate-name is passed to 'helm template'. - ReleaseName string `json:"releaseName,omitempty" yaml:"releaseName,omitempty"` - - // Namespace set the target namespace for a release. It is .Release.Namespace - // in the helm template - Namespace string `json:"namespace,omitempty" yaml:"namespace,omitempty"` - - // ValuesFile is local file path to a values file to use _instead of_ - // the default values that accompanied the chart. - // The default values are in '{ChartHome}/{Name}/values.yaml'. - ValuesFile string `json:"valuesFile,omitempty" yaml:"valuesFile,omitempty"` - - // ValuesInline holds value mappings specified directly, - // rather than in a separate file. - ValuesInline map[string]interface{} `json:"valuesInline,omitempty" yaml:"valuesInline,omitempty"` - - // ValuesMerge specifies how to treat ValuesInline with respect to Values. - // Legal values: 'merge', 'override', 'replace'. - // Defaults to 'override'. - ValuesMerge string `json:"valuesMerge,omitempty" yaml:"valuesMerge,omitempty"` - - // IncludeCRDs specifies if Helm should also generate CustomResourceDefinitions. - // Defaults to 'false'. - IncludeCRDs bool `json:"includeCRDs,omitempty" yaml:"includeCRDs,omitempty"` // nolint: tagliatelle -} - -// HelmChartArgs contains arguments to helm. -// Deprecated. Use HelmGlobals and HelmChart instead. -type HelmChartArgs struct { - ChartName string `json:"chartName,omitempty" yaml:"chartName,omitempty"` - ChartVersion string `json:"chartVersion,omitempty" yaml:"chartVersion,omitempty"` - ChartRepoURL string `json:"chartRepoUrl,omitempty" yaml:"chartRepoUrl,omitempty"` - ChartHome string `json:"chartHome,omitempty" yaml:"chartHome,omitempty"` - ChartRepoName string `json:"chartRepoName,omitempty" yaml:"chartRepoName,omitempty"` - HelmBin string `json:"helmBin,omitempty" yaml:"helmBin,omitempty"` - HelmHome string `json:"helmHome,omitempty" yaml:"helmHome,omitempty"` - Values string `json:"values,omitempty" yaml:"values,omitempty"` - ValuesLocal map[string]interface{} `json:"valuesLocal,omitempty" yaml:"valuesLocal,omitempty"` - ValuesMerge string `json:"valuesMerge,omitempty" yaml:"valuesMerge,omitempty"` - ReleaseName string `json:"releaseName,omitempty" yaml:"releaseName,omitempty"` - ReleaseNamespace string `json:"releaseNamespace,omitempty" yaml:"releaseNamespace,omitempty"` - ExtraArgs []string `json:"extraArgs,omitempty" yaml:"extraArgs,omitempty"` -} - -// SplitHelmParameters splits helm parameters into -// per-chart params and global chart-independent parameters. -func SplitHelmParameters( - oldArgs []HelmChartArgs) (charts []HelmChart, globals HelmGlobals) { - for i, old := range oldArgs { - charts = append(charts, makeHelmChartFromHca(&oldArgs[i])) - if old.HelmHome != "" { - // last non-empty wins - globals.ConfigHome = old.HelmHome - } - if old.ChartHome != "" { - // last non-empty wins - globals.ChartHome = old.ChartHome - } - } - return charts, globals -} - -func makeHelmChartFromHca(old *HelmChartArgs) (c HelmChart) { - c.Name = old.ChartName - c.Version = old.ChartVersion - c.Repo = old.ChartRepoURL - c.ValuesFile = old.Values - c.ValuesInline = old.ValuesLocal - c.ValuesMerge = old.ValuesMerge - c.ReleaseName = old.ReleaseName - return -} diff --git a/src/vendor/sigs.k8s.io/kustomize/api/types/iampolicygenerator.go b/src/vendor/sigs.k8s.io/kustomize/api/types/iampolicygenerator.go deleted file mode 100644 index f1d27ba7b..000000000 --- a/src/vendor/sigs.k8s.io/kustomize/api/types/iampolicygenerator.go +++ /dev/null @@ -1,36 +0,0 @@ -// Copyright 2019 The Kubernetes Authors. -// SPDX-License-Identifier: Apache-2.0 - -package types - -type Cloud string - -const GKE Cloud = "gke" - -// IAMPolicyGeneratorArgs contains arguments to generate a GKE service account resource. -type IAMPolicyGeneratorArgs struct { - // which cloud provider to generate for (e.g. "gke") - Cloud `json:"cloud" yaml:"cloud"` - - // information about the kubernetes cluster for this object - KubernetesService `json:"kubernetesService" yaml:"kubernetesService"` - - // information about the service account and project - ServiceAccount `json:"serviceAccount" yaml:"serviceAccount"` -} - -type KubernetesService struct { - // the name used for the Kubernetes service account - Name string `json:"name" yaml:"name"` - - // the name of the Kubernetes namespace for this object - Namespace string `json:"namespace,omitempty" yaml:"namespace,omitempty"` -} - -type ServiceAccount struct { - // the name of the new cloud provider service account - Name string `json:"name" yaml:"name"` - - // The ID of the project - ProjectId string `json:"projectId" yaml:"projectId"` -} diff --git a/src/vendor/sigs.k8s.io/kustomize/api/types/image.go b/src/vendor/sigs.k8s.io/kustomize/api/types/image.go deleted file mode 100644 index e40ed324d..000000000 --- a/src/vendor/sigs.k8s.io/kustomize/api/types/image.go +++ /dev/null @@ -1,25 +0,0 @@ -// Copyright 2019 The Kubernetes Authors. -// SPDX-License-Identifier: Apache-2.0 - -package types - -// Image contains an image name, a new name, a new tag or digest, -// which will replace the original name and tag. -type Image struct { - // Name is a tag-less image name. - Name string `json:"name,omitempty" yaml:"name,omitempty"` - - // NewName is the value used to replace the original name. - NewName string `json:"newName,omitempty" yaml:"newName,omitempty"` - - // TagSuffix is the value used to suffix the original tag - // If Digest and NewTag is present an error is thrown - TagSuffix string `json:"tagSuffix,omitempty" yaml:"tagSuffix,omitempty"` - - // NewTag is the value used to replace the original tag. - NewTag string `json:"newTag,omitempty" yaml:"newTag,omitempty"` - - // Digest is the value used to replace the original image tag. - // If digest is present NewTag value is ignored. - Digest string `json:"digest,omitempty" yaml:"digest,omitempty"` -} diff --git a/src/vendor/sigs.k8s.io/kustomize/api/types/inventory.go b/src/vendor/sigs.k8s.io/kustomize/api/types/inventory.go deleted file mode 100644 index 544deb5e5..000000000 --- a/src/vendor/sigs.k8s.io/kustomize/api/types/inventory.go +++ /dev/null @@ -1,16 +0,0 @@ -// Copyright 2019 The Kubernetes Authors. -// SPDX-License-Identifier: Apache-2.0 - -package types - -// Inventory records all objects touched in a build operation. -type Inventory struct { - Type string `json:"type,omitempty" yaml:"type,omitempty"` - ConfigMap NameArgs `json:"configMap,omitempty" yaml:"configMap,omitempty"` -} - -// NameArgs holds both namespace and name. -type NameArgs struct { - Name string `json:"name,omitempty" yaml:"name,omitempty"` - Namespace string `json:"namespace,omitempty" yaml:"namespace,omitempty"` -} diff --git a/src/vendor/sigs.k8s.io/kustomize/api/types/kustomization.go b/src/vendor/sigs.k8s.io/kustomize/api/types/kustomization.go deleted file mode 100644 index c194dcc3c..000000000 --- a/src/vendor/sigs.k8s.io/kustomize/api/types/kustomization.go +++ /dev/null @@ -1,274 +0,0 @@ -// Copyright 2019 The Kubernetes Authors. -// SPDX-License-Identifier: Apache-2.0 - -package types - -import ( - "bytes" - "encoding/json" - "fmt" - - "sigs.k8s.io/yaml" -) - -const ( - KustomizationVersion = "kustomize.config.k8s.io/v1beta1" - KustomizationKind = "Kustomization" - ComponentVersion = "kustomize.config.k8s.io/v1alpha1" - ComponentKind = "Component" - MetadataNamespacePath = "metadata/namespace" - - OriginAnnotations = "originAnnotations" - TransformerAnnotations = "transformerAnnotations" - ManagedByLabelOption = "managedByLabel" -) - -var BuildMetadataOptions = []string{OriginAnnotations, TransformerAnnotations, ManagedByLabelOption} - -// Kustomization holds the information needed to generate customized k8s api resources. -type Kustomization struct { - TypeMeta `json:",inline" yaml:",inline"` - - // MetaData is a pointer to avoid marshalling empty struct - MetaData *ObjectMeta `json:"metadata,omitempty" yaml:"metadata,omitempty"` - - // OpenAPI contains information about what kubernetes schema to use. - OpenAPI map[string]string `json:"openapi,omitempty" yaml:"openapi,omitempty"` - - // - // Operators - what kustomize can do. - // - - // NamePrefix will prefix the names of all resources mentioned in the kustomization - // file including generated configmaps and secrets. - NamePrefix string `json:"namePrefix,omitempty" yaml:"namePrefix,omitempty"` - - // NameSuffix will suffix the names of all resources mentioned in the kustomization - // file including generated configmaps and secrets. - NameSuffix string `json:"nameSuffix,omitempty" yaml:"nameSuffix,omitempty"` - - // Namespace to add to all objects. - Namespace string `json:"namespace,omitempty" yaml:"namespace,omitempty"` - - // CommonLabels to add to all objects and selectors. - CommonLabels map[string]string `json:"commonLabels,omitempty" yaml:"commonLabels,omitempty"` - - // Labels to add to all objects but not selectors. - Labels []Label `json:"labels,omitempty" yaml:"labels,omitempty"` - - // CommonAnnotations to add to all objects. - CommonAnnotations map[string]string `json:"commonAnnotations,omitempty" yaml:"commonAnnotations,omitempty"` - - // PatchesStrategicMerge specifies the relative path to a file - // containing a strategic merge patch. Format documented at - // https://github.com/kubernetes/community/blob/master/contributors/devel/sig-api-machinery/strategic-merge-patch.md - // URLs and globs are not supported. - PatchesStrategicMerge []PatchStrategicMerge `json:"patchesStrategicMerge,omitempty" yaml:"patchesStrategicMerge,omitempty"` - - // JSONPatches is a list of JSONPatch for applying JSON patch. - // Format documented at https://tools.ietf.org/html/rfc6902 - // and http://jsonpatch.com - PatchesJson6902 []Patch `json:"patchesJson6902,omitempty" yaml:"patchesJson6902,omitempty"` - - // Patches is a list of patches, where each one can be either a - // Strategic Merge Patch or a JSON patch. - // Each patch can be applied to multiple target objects. - Patches []Patch `json:"patches,omitempty" yaml:"patches,omitempty"` - - // Images is a list of (image name, new name, new tag or digest) - // for changing image names, tags or digests. This can also be achieved with a - // patch, but this operator is simpler to specify. - Images []Image `json:"images,omitempty" yaml:"images,omitempty"` - - // Replacements is a list of replacements, which will copy nodes from a - // specified source to N specified targets. - Replacements []ReplacementField `json:"replacements,omitempty" yaml:"replacements,omitempty"` - - // Replicas is a list of {resourcename, count} that allows for simpler replica - // specification. This can also be done with a patch. - Replicas []Replica `json:"replicas,omitempty" yaml:"replicas,omitempty"` - - // Vars allow things modified by kustomize to be injected into a - // kubernetes object specification. A var is a name (e.g. FOO) associated - // with a field in a specific resource instance. The field must - // contain a value of type string/bool/int/float, and defaults to the name field - // of the instance. Any appearance of "$(FOO)" in the object - // spec will be replaced at kustomize build time, after the final - // value of the specified field has been determined. - Vars []Var `json:"vars,omitempty" yaml:"vars,omitempty"` - - // - // Operands - what kustomize operates on. - // - - // Resources specifies relative paths to files holding YAML representations - // of kubernetes API objects, or specifications of other kustomizations - // via relative paths, absolute paths, or URLs. - Resources []string `json:"resources,omitempty" yaml:"resources,omitempty"` - - // Components specifies relative paths to specifications of other Components - // via relative paths, absolute paths, or URLs. - Components []string `json:"components,omitempty" yaml:"components,omitempty"` - - // Crds specifies relative paths to Custom Resource Definition files. - // This allows custom resources to be recognized as operands, making - // it possible to add them to the Resources list. - // CRDs themselves are not modified. - Crds []string `json:"crds,omitempty" yaml:"crds,omitempty"` - - // Deprecated. - // Anything that would have been specified here should - // be specified in the Resources field instead. - Bases []string `json:"bases,omitempty" yaml:"bases,omitempty"` - - // - // Generators (operators that create operands) - // - - // ConfigMapGenerator is a list of configmaps to generate from - // local data (one configMap per list item). - // The resulting resource is a normal operand, subject to - // name prefixing, patching, etc. By default, the name of - // the map will have a suffix hash generated from its contents. - ConfigMapGenerator []ConfigMapArgs `json:"configMapGenerator,omitempty" yaml:"configMapGenerator,omitempty"` - - // SecretGenerator is a list of secrets to generate from - // local data (one secret per list item). - // The resulting resource is a normal operand, subject to - // name prefixing, patching, etc. By default, the name of - // the map will have a suffix hash generated from its contents. - SecretGenerator []SecretArgs `json:"secretGenerator,omitempty" yaml:"secretGenerator,omitempty"` - - // HelmGlobals contains helm configuration that isn't chart specific. - HelmGlobals *HelmGlobals `json:"helmGlobals,omitempty" yaml:"helmGlobals,omitempty"` - - // HelmCharts is a list of helm chart configuration instances. - HelmCharts []HelmChart `json:"helmCharts,omitempty" yaml:"helmCharts,omitempty"` - - // HelmChartInflationGenerator is a list of helm chart configurations. - // Deprecated. Auto-converted to HelmGlobals and HelmCharts. - HelmChartInflationGenerator []HelmChartArgs `json:"helmChartInflationGenerator,omitempty" yaml:"helmChartInflationGenerator,omitempty"` - - // GeneratorOptions modify behavior of all ConfigMap and Secret generators. - GeneratorOptions *GeneratorOptions `json:"generatorOptions,omitempty" yaml:"generatorOptions,omitempty"` - - // Configurations is a list of transformer configuration files - Configurations []string `json:"configurations,omitempty" yaml:"configurations,omitempty"` - - // Generators is a list of files containing custom generators - Generators []string `json:"generators,omitempty" yaml:"generators,omitempty"` - - // Transformers is a list of files containing transformers - Transformers []string `json:"transformers,omitempty" yaml:"transformers,omitempty"` - - // Validators is a list of files containing validators - Validators []string `json:"validators,omitempty" yaml:"validators,omitempty"` - - // Inventory appends an object that contains the record - // of all other objects, which can be used in apply, prune and delete - Inventory *Inventory `json:"inventory,omitempty" yaml:"inventory,omitempty"` - - // BuildMetadata is a list of strings used to toggle different build options - BuildMetadata []string `json:"buildMetadata,omitempty" yaml:"buildMetadata,omitempty"` -} - -// FixKustomizationPostUnmarshalling fixes things -// like empty fields that should not be empty, or -// moving content of deprecated fields to newer -// fields. -func (k *Kustomization) FixKustomizationPostUnmarshalling() { - if k.Kind == "" { - k.Kind = KustomizationKind - } - if k.APIVersion == "" { - if k.Kind == ComponentKind { - k.APIVersion = ComponentVersion - } else { - k.APIVersion = KustomizationVersion - } - } - k.Resources = append(k.Resources, k.Bases...) - k.Bases = nil - for i, g := range k.ConfigMapGenerator { - if g.EnvSource != "" { - k.ConfigMapGenerator[i].EnvSources = - append(g.EnvSources, g.EnvSource) //nolint:gocritic - k.ConfigMapGenerator[i].EnvSource = "" - } - } - for i, g := range k.SecretGenerator { - if g.EnvSource != "" { - k.SecretGenerator[i].EnvSources = - append(g.EnvSources, g.EnvSource) //nolint:gocritic - k.SecretGenerator[i].EnvSource = "" - } - } - charts, globals := SplitHelmParameters(k.HelmChartInflationGenerator) - if k.HelmGlobals == nil { - if globals.ChartHome != "" || globals.ConfigHome != "" { - k.HelmGlobals = &globals - } - } - k.HelmCharts = append(k.HelmCharts, charts...) - // Wipe it for the fix command. - k.HelmChartInflationGenerator = nil -} - -// FixKustomizationPreMarshalling fixes things -// that should occur after the kustomization file -// has been processed. -func (k *Kustomization) FixKustomizationPreMarshalling() error { - // PatchesJson6902 should be under the Patches field. - k.Patches = append(k.Patches, k.PatchesJson6902...) - k.PatchesJson6902 = nil - - // this fix is not in FixKustomizationPostUnmarshalling because - // it will break some commands like `create` and `add`. those - // commands depend on 'commonLabels' field - if cl := labelFromCommonLabels(k.CommonLabels); cl != nil { - // check conflicts between commonLabels and labels - for _, l := range k.Labels { - for k := range l.Pairs { - if _, exist := cl.Pairs[k]; exist { - return fmt.Errorf("label name '%s' exists in both commonLabels and labels", k) - } - } - } - k.Labels = append(k.Labels, *cl) - k.CommonLabels = nil - } - - return nil -} - -func (k *Kustomization) EnforceFields() []string { - var errs []string - if k.Kind != "" && k.Kind != KustomizationKind && k.Kind != ComponentKind { - errs = append(errs, "kind should be "+KustomizationKind+" or "+ComponentKind) - } - requiredVersion := KustomizationVersion - if k.Kind == ComponentKind { - requiredVersion = ComponentVersion - } - if k.APIVersion != "" && k.APIVersion != requiredVersion { - errs = append(errs, "apiVersion for "+k.Kind+" should be "+requiredVersion) - } - return errs -} - -// Unmarshal replace k with the content in YAML input y -func (k *Kustomization) Unmarshal(y []byte) error { - j, err := yaml.YAMLToJSON(y) - if err != nil { - return err - } - dec := json.NewDecoder(bytes.NewReader(j)) - dec.DisallowUnknownFields() - var nk Kustomization - err = dec.Decode(&nk) - if err != nil { - return err - } - *k = nk - return nil -} diff --git a/src/vendor/sigs.k8s.io/kustomize/api/types/kvpairsources.go b/src/vendor/sigs.k8s.io/kustomize/api/types/kvpairsources.go deleted file mode 100644 index 9898defad..000000000 --- a/src/vendor/sigs.k8s.io/kustomize/api/types/kvpairsources.go +++ /dev/null @@ -1,36 +0,0 @@ -// Copyright 2019 The Kubernetes Authors. -// SPDX-License-Identifier: Apache-2.0 - -package types - -// KvPairSources defines places to obtain key value pairs. -type KvPairSources struct { - // LiteralSources is a list of literal - // pair sources. Each literal source should - // be a key and literal value, e.g. `key=value` - LiteralSources []string `json:"literals,omitempty" yaml:"literals,omitempty"` - - // FileSources is a list of file "sources" to - // use in creating a list of key, value pairs. - // A source takes the form: [{key}=]{path} - // If the "key=" part is missing, the key is the - // path's basename. If they "key=" part is present, - // it becomes the key (replacing the basename). - // In either case, the value is the file contents. - // Specifying a directory will iterate each named - // file in the directory whose basename is a - // valid configmap key. - FileSources []string `json:"files,omitempty" yaml:"files,omitempty"` - - // EnvSources is a list of file paths. - // The contents of each file should be one - // key=value pair per line, e.g. a Docker - // or npm ".env" file or a ".ini" file - // (wikipedia.org/wiki/INI_file) - EnvSources []string `json:"envs,omitempty" yaml:"envs,omitempty"` - - // Older, singular form of EnvSources. - // On edits (e.g. `kustomize fix`) this is merged into the plural form - // for consistency with LiteralSources and FileSources. - EnvSource string `json:"env,omitempty" yaml:"env,omitempty"` -} diff --git a/src/vendor/sigs.k8s.io/kustomize/api/types/labels.go b/src/vendor/sigs.k8s.io/kustomize/api/types/labels.go deleted file mode 100644 index 05ba890f9..000000000 --- a/src/vendor/sigs.k8s.io/kustomize/api/types/labels.go +++ /dev/null @@ -1,30 +0,0 @@ -// Copyright 2019 The Kubernetes Authors. -// SPDX-License-Identifier: Apache-2.0 - -package types - -type Label struct { - // Pairs contains the key-value pairs for labels to add - Pairs map[string]string `json:"pairs,omitempty" yaml:"pairs,omitempty"` - // IncludeSelectors inidicates should transformer include the - // fieldSpecs for selectors. Custom fieldSpecs specified by - // FieldSpecs will be merged with builtin fieldSpecs if this - // is true. - IncludeSelectors bool `json:"includeSelectors,omitempty" yaml:"includeSelectors,omitempty"` - // IncludeTemplates inidicates should transformer include the - // spec/template/metadata fieldSpec. Custom fieldSpecs specified by - // FieldSpecs will be merged with spec/template/metadata fieldSpec if this - // is true. If IncludeSelectors is true, IncludeTemplates is not needed. - IncludeTemplates bool `json:"includeTemplates,omitempty" yaml:"includeTemplates,omitempty"` - FieldSpecs []FieldSpec `json:"fields,omitempty" yaml:"fields,omitempty"` -} - -func labelFromCommonLabels(commonLabels map[string]string) *Label { - if len(commonLabels) == 0 { - return nil - } - return &Label{ - Pairs: commonLabels, - IncludeSelectors: true, - } -} diff --git a/src/vendor/sigs.k8s.io/kustomize/api/types/loadrestrictions.go b/src/vendor/sigs.k8s.io/kustomize/api/types/loadrestrictions.go deleted file mode 100644 index 6617abdac..000000000 --- a/src/vendor/sigs.k8s.io/kustomize/api/types/loadrestrictions.go +++ /dev/null @@ -1,24 +0,0 @@ -// Copyright 2019 The Kubernetes Authors. -// SPDX-License-Identifier: Apache-2.0 - -package types - -// Restrictions on what things can be referred to -// in a kustomization file. -// -//go:generate stringer -type=LoadRestrictions -type LoadRestrictions int - -const ( - LoadRestrictionsUnknown LoadRestrictions = iota - - // Files referenced by a kustomization file must be in - // or under the directory holding the kustomization - // file itself. - LoadRestrictionsRootOnly - - // The kustomization file may specify absolute or - // relative paths to patch or resources files outside - // its own tree. - LoadRestrictionsNone -) diff --git a/src/vendor/sigs.k8s.io/kustomize/api/types/loadrestrictions_string.go b/src/vendor/sigs.k8s.io/kustomize/api/types/loadrestrictions_string.go deleted file mode 100644 index d2355950b..000000000 --- a/src/vendor/sigs.k8s.io/kustomize/api/types/loadrestrictions_string.go +++ /dev/null @@ -1,25 +0,0 @@ -// Code generated by "stringer -type=LoadRestrictions"; DO NOT EDIT. - -package types - -import "strconv" - -func _() { - // An "invalid array index" compiler error signifies that the constant values have changed. - // Re-run the stringer command to generate them again. - var x [1]struct{} - _ = x[LoadRestrictionsUnknown-0] - _ = x[LoadRestrictionsRootOnly-1] - _ = x[LoadRestrictionsNone-2] -} - -const _LoadRestrictions_name = "LoadRestrictionsUnknownLoadRestrictionsRootOnlyLoadRestrictionsNone" - -var _LoadRestrictions_index = [...]uint8{0, 23, 47, 67} - -func (i LoadRestrictions) String() string { - if i < 0 || i >= LoadRestrictions(len(_LoadRestrictions_index)-1) { - return "LoadRestrictions(" + strconv.FormatInt(int64(i), 10) + ")" - } - return _LoadRestrictions_name[_LoadRestrictions_index[i]:_LoadRestrictions_index[i+1]] -} diff --git a/src/vendor/sigs.k8s.io/kustomize/api/types/objectmeta.go b/src/vendor/sigs.k8s.io/kustomize/api/types/objectmeta.go deleted file mode 100644 index 4f5d41f4a..000000000 --- a/src/vendor/sigs.k8s.io/kustomize/api/types/objectmeta.go +++ /dev/null @@ -1,13 +0,0 @@ -// Copyright 2019 The Kubernetes Authors. -// SPDX-License-Identifier: Apache-2.0 - -package types - -// ObjectMeta partially copies apimachinery/pkg/apis/meta/v1.ObjectMeta -// No need for a direct dependence; the fields are stable. -type ObjectMeta struct { - Name string `json:"name,omitempty" yaml:"name,omitempty"` - Namespace string `json:"namespace,omitempty" yaml:"namespace,omitempty"` - Labels map[string]string `json:"labels,omitempty" yaml:"labels,omitempty"` - Annotations map[string]string `json:"annotations,omitempty" yaml:"annotations,omitempty"` -} diff --git a/src/vendor/sigs.k8s.io/kustomize/api/types/pair.go b/src/vendor/sigs.k8s.io/kustomize/api/types/pair.go deleted file mode 100644 index 63cfb776e..000000000 --- a/src/vendor/sigs.k8s.io/kustomize/api/types/pair.go +++ /dev/null @@ -1,10 +0,0 @@ -// Copyright 2019 The Kubernetes Authors. -// SPDX-License-Identifier: Apache-2.0 - -package types - -// Pair is a key value pair. -type Pair struct { - Key string - Value string -} diff --git a/src/vendor/sigs.k8s.io/kustomize/api/types/patch.go b/src/vendor/sigs.k8s.io/kustomize/api/types/patch.go deleted file mode 100644 index 5310a6e66..000000000 --- a/src/vendor/sigs.k8s.io/kustomize/api/types/patch.go +++ /dev/null @@ -1,34 +0,0 @@ -// Copyright 2019 The Kubernetes Authors. -// SPDX-License-Identifier: Apache-2.0 - -package types - -import "reflect" - -// Patch represent either a Strategic Merge Patch or a JSON patch -// and its targets. -// The content of the patch can either be from a file -// or from an inline string. -type Patch struct { - // Path is a relative file path to the patch file. - Path string `json:"path,omitempty" yaml:"path,omitempty"` - - // Patch is the content of a patch. - Patch string `json:"patch,omitempty" yaml:"patch,omitempty"` - - // Target points to the resources that the patch is applied to - Target *Selector `json:"target,omitempty" yaml:"target,omitempty"` - - // Options is a list of options for the patch - Options map[string]bool `json:"options,omitempty" yaml:"options,omitempty"` -} - -// Equals return true if p equals o. -func (p *Patch) Equals(o Patch) bool { - targetEqual := (p.Target == o.Target) || - (p.Target != nil && o.Target != nil && *p.Target == *o.Target) - return p.Path == o.Path && - p.Patch == o.Patch && - targetEqual && - reflect.DeepEqual(p.Options, o.Options) -} diff --git a/src/vendor/sigs.k8s.io/kustomize/api/types/patchstrategicmerge.go b/src/vendor/sigs.k8s.io/kustomize/api/types/patchstrategicmerge.go deleted file mode 100644 index 81a5ba456..000000000 --- a/src/vendor/sigs.k8s.io/kustomize/api/types/patchstrategicmerge.go +++ /dev/null @@ -1,9 +0,0 @@ -// Copyright 2019 The Kubernetes Authors. -// SPDX-License-Identifier: Apache-2.0 - -package types - -// PatchStrategicMerge represents a relative path to a -// stategic merge patch with the format -// https://github.com/kubernetes/community/blob/master/contributors/devel/sig-api-machinery/strategic-merge-patch.md -type PatchStrategicMerge string diff --git a/src/vendor/sigs.k8s.io/kustomize/api/types/pluginconfig.go b/src/vendor/sigs.k8s.io/kustomize/api/types/pluginconfig.go deleted file mode 100644 index 741e5debc..000000000 --- a/src/vendor/sigs.k8s.io/kustomize/api/types/pluginconfig.go +++ /dev/null @@ -1,47 +0,0 @@ -// Copyright 2019 The Kubernetes Authors. -// SPDX-License-Identifier: Apache-2.0 - -package types - -type HelmConfig struct { - Enabled bool - Command string -} - -// PluginConfig holds plugin configuration. -type PluginConfig struct { - // PluginRestrictions distinguishes plugin restrictions. - PluginRestrictions PluginRestrictions - - // BpLoadingOptions distinguishes builtin plugin behaviors. - BpLoadingOptions BuiltinPluginLoadingOptions - - // FnpLoadingOptions sets the way function-based plugin behaviors. - FnpLoadingOptions FnPluginLoadingOptions - - // HelmConfig contains metadata needed for allowing and running helm. - HelmConfig HelmConfig -} - -func EnabledPluginConfig(b BuiltinPluginLoadingOptions) (pc *PluginConfig) { - pc = MakePluginConfig(PluginRestrictionsNone, b) - pc.FnpLoadingOptions.EnableStar = true - pc.HelmConfig.Enabled = true - // If this command is not on PATH, tests needing it should skip. - pc.HelmConfig.Command = "helmV3" - return -} - -func DisabledPluginConfig() *PluginConfig { - return MakePluginConfig( - PluginRestrictionsBuiltinsOnly, - BploUseStaticallyLinked) -} - -func MakePluginConfig(pr PluginRestrictions, - b BuiltinPluginLoadingOptions) *PluginConfig { - return &PluginConfig{ - PluginRestrictions: pr, - BpLoadingOptions: b, - } -} diff --git a/src/vendor/sigs.k8s.io/kustomize/api/types/pluginrestrictions.go b/src/vendor/sigs.k8s.io/kustomize/api/types/pluginrestrictions.go deleted file mode 100644 index 88b03b3f5..000000000 --- a/src/vendor/sigs.k8s.io/kustomize/api/types/pluginrestrictions.go +++ /dev/null @@ -1,62 +0,0 @@ -// Copyright 2019 The Kubernetes Authors. -// SPDX-License-Identifier: Apache-2.0 - -package types - -// Some plugin classes -// - builtin: plugins defined in the kustomize repo. -// May be freely used and re-configured. -// - local: plugins that aren't builtin but are -// locally defined (presumably by the user), meaning -// the kustomization refers to them via a relative -// file path, not a URL. -// - remote: require a build-time download to obtain. -// Unadvised, unless one controls the -// serving site. -// -//go:generate stringer -type=PluginRestrictions -type PluginRestrictions int - -const ( - PluginRestrictionsUnknown PluginRestrictions = iota - - // Non-builtin plugins completely disabled. - PluginRestrictionsBuiltinsOnly - - // No restrictions, do whatever you want. - PluginRestrictionsNone -) - -// BuiltinPluginLoadingOptions distinguish ways in which builtin plugins are used. -//go:generate stringer -type=BuiltinPluginLoadingOptions -type BuiltinPluginLoadingOptions int - -const ( - BploUndefined BuiltinPluginLoadingOptions = iota - - // Desired in production use for performance. - BploUseStaticallyLinked - - // Desired in testing and development cycles where it's undesirable - // to generate static code. - BploLoadFromFileSys -) - -// FnPluginLoadingOptions set way functions-based plugins are restricted -type FnPluginLoadingOptions struct { - // Allow to run executables - EnableExec bool - // Allow to run starlark - EnableStar bool - // Allow container access to network - Network bool - NetworkName string - // list of mounts - Mounts []string - // list of env variables to pass to fn - Env []string - // Run as uid and gid of the command executor - AsCurrentUser bool - // Run in this working directory - WorkingDir string -} diff --git a/src/vendor/sigs.k8s.io/kustomize/api/types/pluginrestrictions_string.go b/src/vendor/sigs.k8s.io/kustomize/api/types/pluginrestrictions_string.go deleted file mode 100644 index b9dba7dfc..000000000 --- a/src/vendor/sigs.k8s.io/kustomize/api/types/pluginrestrictions_string.go +++ /dev/null @@ -1,25 +0,0 @@ -// Code generated by "stringer -type=PluginRestrictions"; DO NOT EDIT. - -package types - -import "strconv" - -func _() { - // An "invalid array index" compiler error signifies that the constant values have changed. - // Re-run the stringer command to generate them again. - var x [1]struct{} - _ = x[PluginRestrictionsUnknown-0] - _ = x[PluginRestrictionsBuiltinsOnly-1] - _ = x[PluginRestrictionsNone-2] -} - -const _PluginRestrictions_name = "PluginRestrictionsUnknownPluginRestrictionsBuiltinsOnlyPluginRestrictionsNone" - -var _PluginRestrictions_index = [...]uint8{0, 25, 55, 77} - -func (i PluginRestrictions) String() string { - if i < 0 || i >= PluginRestrictions(len(_PluginRestrictions_index)-1) { - return "PluginRestrictions(" + strconv.FormatInt(int64(i), 10) + ")" - } - return _PluginRestrictions_name[_PluginRestrictions_index[i]:_PluginRestrictions_index[i+1]] -} diff --git a/src/vendor/sigs.k8s.io/kustomize/api/types/replacement.go b/src/vendor/sigs.k8s.io/kustomize/api/types/replacement.go deleted file mode 100644 index cb4163429..000000000 --- a/src/vendor/sigs.k8s.io/kustomize/api/types/replacement.go +++ /dev/null @@ -1,87 +0,0 @@ -// Copyright 2021 The Kubernetes Authors. -// SPDX-License-Identifier: Apache-2.0 - -package types - -import ( - "fmt" - "strings" - - "sigs.k8s.io/kustomize/kyaml/resid" -) - -const DefaultReplacementFieldPath = "metadata.name" - -// Replacement defines how to perform a substitution -// where it is from and where it is to. -type Replacement struct { - // The source of the value. - Source *SourceSelector `json:"source,omitempty" yaml:"source,omitempty"` - - // The N fields to write the value to. - Targets []*TargetSelector `json:"targets,omitempty" yaml:"targets,omitempty"` -} - -// SourceSelector is the source of the replacement transformer. -type SourceSelector struct { - // A specific object to read it from. - resid.ResId `json:",inline,omitempty" yaml:",inline,omitempty"` - - // Structured field path expected in the allowed object. - FieldPath string `json:"fieldPath,omitempty" yaml:"fieldPath,omitempty"` - - // Used to refine the interpretation of the field. - Options *FieldOptions `json:"options,omitempty" yaml:"options,omitempty"` -} - -func (s *SourceSelector) String() string { - if s == nil { - return "" - } - result := []string{s.ResId.String()} - if s.FieldPath != "" { - result = append(result, s.FieldPath) - } - if opts := s.Options.String(); opts != "" { - result = append(result, opts) - } - return strings.Join(result, ":") -} - -// TargetSelector specifies fields in one or more objects. -type TargetSelector struct { - // Include objects that match this. - Select *Selector `json:"select" yaml:"select"` - - // From the allowed set, remove objects that match this. - Reject []*Selector `json:"reject,omitempty" yaml:"reject,omitempty"` - - // Structured field paths expected in each allowed object. - FieldPaths []string `json:"fieldPaths,omitempty" yaml:"fieldPaths,omitempty"` - - // Used to refine the interpretation of the field. - Options *FieldOptions `json:"options,omitempty" yaml:"options,omitempty"` -} - -// FieldOptions refine the interpretation of FieldPaths. -type FieldOptions struct { - // Used to split/join the field. - Delimiter string `json:"delimiter,omitempty" yaml:"delimiter,omitempty"` - - // Which position in the split to consider. - Index int `json:"index,omitempty" yaml:"index,omitempty"` - - // TODO (#3492): Implement use of this option - // None, Base64, URL, Hex, etc - Encoding string `json:"encoding,omitempty" yaml:"encoding,omitempty"` - - // If field missing, add it. - Create bool `json:"create,omitempty" yaml:"create,omitempty"` -} - -func (fo *FieldOptions) String() string { - if fo == nil || (fo.Delimiter == "" && !fo.Create) { - return "" - } - return fmt.Sprintf("%s(%d), create=%t", fo.Delimiter, fo.Index, fo.Create) -} diff --git a/src/vendor/sigs.k8s.io/kustomize/api/types/replacementfield.go b/src/vendor/sigs.k8s.io/kustomize/api/types/replacementfield.go deleted file mode 100644 index 303e5c9e2..000000000 --- a/src/vendor/sigs.k8s.io/kustomize/api/types/replacementfield.go +++ /dev/null @@ -1,9 +0,0 @@ -// Copyright 2022 The Kubernetes Authors. -// SPDX-License-Identifier: Apache-2.0 - -package types - -type ReplacementField struct { - Replacement `json:",inline,omitempty" yaml:",inline,omitempty"` - Path string `json:"path,omitempty" yaml:"path,omitempty"` -} diff --git a/src/vendor/sigs.k8s.io/kustomize/api/types/replica.go b/src/vendor/sigs.k8s.io/kustomize/api/types/replica.go deleted file mode 100644 index 8267366b5..000000000 --- a/src/vendor/sigs.k8s.io/kustomize/api/types/replica.go +++ /dev/null @@ -1,16 +0,0 @@ -// Copyright 2019 The Kubernetes Authors. -// SPDX-License-Identifier: Apache-2.0 - -package types - -// Replica specifies a modification to a replica config. -// The number of replicas of a resource whose name matches will be set to count. -// This struct is used by the ReplicaCountTransform, and is meant to supplement -// the existing patch functionality with a simpler syntax for replica configuration. -type Replica struct { - // The name of the resource to change the replica count - Name string `json:"name,omitempty" yaml:"name,omitempty"` - - // The number of replicas required. - Count int64 `json:"count" yaml:"count"` -} diff --git a/src/vendor/sigs.k8s.io/kustomize/api/types/secretargs.go b/src/vendor/sigs.k8s.io/kustomize/api/types/secretargs.go deleted file mode 100644 index 62dbe26a7..000000000 --- a/src/vendor/sigs.k8s.io/kustomize/api/types/secretargs.go +++ /dev/null @@ -1,19 +0,0 @@ -// Copyright 2019 The Kubernetes Authors. -// SPDX-License-Identifier: Apache-2.0 - -package types - -// SecretArgs contains the metadata of how to generate a secret. -type SecretArgs struct { - // GeneratorArgs for the secret. - GeneratorArgs `json:",inline,omitempty" yaml:",inline,omitempty"` - - // Type of the secret. - // - // This is the same field as the secret type field in v1/Secret: - // It can be "Opaque" (default), or "kubernetes.io/tls". - // - // If type is "kubernetes.io/tls", then "literals" or "files" must have exactly two - // keys: "tls.key" and "tls.crt" - Type string `json:"type,omitempty" yaml:"type,omitempty"` -} diff --git a/src/vendor/sigs.k8s.io/kustomize/api/types/selector.go b/src/vendor/sigs.k8s.io/kustomize/api/types/selector.go deleted file mode 100644 index 2c07f0b01..000000000 --- a/src/vendor/sigs.k8s.io/kustomize/api/types/selector.go +++ /dev/null @@ -1,124 +0,0 @@ -// Copyright 2019 The Kubernetes Authors. -// SPDX-License-Identifier: Apache-2.0 - -package types - -import ( - "fmt" - "regexp" - - "sigs.k8s.io/kustomize/kyaml/resid" -) - -// Selector specifies a set of resources. -// Any resource that matches intersection of all conditions -// is included in this set. -type Selector struct { - // ResId refers to a GVKN/Ns of a resource. - resid.ResId `json:",inline,omitempty" yaml:",inline,omitempty"` - - // AnnotationSelector is a string that follows the label selection expression - // https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#api - // It matches with the resource annotations. - AnnotationSelector string `json:"annotationSelector,omitempty" yaml:"annotationSelector,omitempty"` - - // LabelSelector is a string that follows the label selection expression - // https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#api - // It matches with the resource labels. - LabelSelector string `json:"labelSelector,omitempty" yaml:"labelSelector,omitempty"` -} - -func (s *Selector) Copy() Selector { - return *s -} - -func (s *Selector) String() string { - return fmt.Sprintf( - "%s:a=%s:l=%s", s.ResId, s.AnnotationSelector, s.LabelSelector) -} - -// SelectorRegex is a Selector with regex in GVK -// Any resource that matches intersection of all conditions -// is included in this set. -type SelectorRegex struct { - selector *Selector - groupRegex *regexp.Regexp - versionRegex *regexp.Regexp - kindRegex *regexp.Regexp - nameRegex *regexp.Regexp - namespaceRegex *regexp.Regexp -} - -// NewSelectorRegex returns a pointer to a new SelectorRegex -// which uses the same condition as s. -func NewSelectorRegex(s *Selector) (*SelectorRegex, error) { - sr := new(SelectorRegex) - var err error - sr.selector = s - sr.groupRegex, err = regexp.Compile(anchorRegex(s.Gvk.Group)) - if err != nil { - return nil, err - } - sr.versionRegex, err = regexp.Compile(anchorRegex(s.Gvk.Version)) - if err != nil { - return nil, err - } - sr.kindRegex, err = regexp.Compile(anchorRegex(s.Gvk.Kind)) - if err != nil { - return nil, err - } - sr.nameRegex, err = regexp.Compile(anchorRegex(s.Name)) - if err != nil { - return nil, err - } - sr.namespaceRegex, err = regexp.Compile(anchorRegex(s.Namespace)) - if err != nil { - return nil, err - } - return sr, nil -} - -func anchorRegex(pattern string) string { - if pattern == "" { - return pattern - } - return "^(?:" + pattern + ")$" -} - -// MatchGvk return true if gvk can be matched by s. -func (s *SelectorRegex) MatchGvk(gvk resid.Gvk) bool { - if len(s.selector.Gvk.Group) > 0 { - if !s.groupRegex.MatchString(gvk.Group) { - return false - } - } - if len(s.selector.Gvk.Version) > 0 { - if !s.versionRegex.MatchString(gvk.Version) { - return false - } - } - if len(s.selector.Gvk.Kind) > 0 { - if !s.kindRegex.MatchString(gvk.Kind) { - return false - } - } - return true -} - -// MatchName returns true if the name in selector is -// empty or the n can be matches by the name in selector -func (s *SelectorRegex) MatchName(n string) bool { - if s.selector.Name == "" { - return true - } - return s.nameRegex.MatchString(n) -} - -// MatchNamespace returns true if the namespace in selector is -// empty or the ns can be matches by the namespace in selector -func (s *SelectorRegex) MatchNamespace(ns string) bool { - if s.selector.Namespace == "" { - return true - } - return s.namespaceRegex.MatchString(ns) -} diff --git a/src/vendor/sigs.k8s.io/kustomize/api/types/typemeta.go b/src/vendor/sigs.k8s.io/kustomize/api/types/typemeta.go deleted file mode 100644 index 0ddafd3d8..000000000 --- a/src/vendor/sigs.k8s.io/kustomize/api/types/typemeta.go +++ /dev/null @@ -1,11 +0,0 @@ -// Copyright 2019 The Kubernetes Authors. -// SPDX-License-Identifier: Apache-2.0 - -package types - -// TypeMeta partially copies apimachinery/pkg/apis/meta/v1.TypeMeta -// No need for a direct dependence; the fields are stable. -type TypeMeta struct { - Kind string `json:"kind,omitempty" yaml:"kind,omitempty"` - APIVersion string `json:"apiVersion,omitempty" yaml:"apiVersion,omitempty"` -} diff --git a/src/vendor/sigs.k8s.io/kustomize/api/types/var.go b/src/vendor/sigs.k8s.io/kustomize/api/types/var.go deleted file mode 100644 index 0ca5579c0..000000000 --- a/src/vendor/sigs.k8s.io/kustomize/api/types/var.go +++ /dev/null @@ -1,211 +0,0 @@ -// Copyright 2019 The Kubernetes Authors. -// SPDX-License-Identifier: Apache-2.0 - -package types - -import ( - "fmt" - "reflect" - "sort" - "strings" - - "sigs.k8s.io/kustomize/kyaml/resid" -) - -// Var represents a variable whose value will be sourced -// from a field in a Kubernetes object. -type Var struct { - // Value of identifier name e.g. FOO used in container args, annotations - // Appears in pod template as $(FOO) - Name string `json:"name" yaml:"name"` - - // ObjRef must refer to a Kubernetes resource under the - // purview of this kustomization. ObjRef should use the - // raw name of the object (the name specified in its YAML, - // before addition of a namePrefix and a nameSuffix). - ObjRef Target `json:"objref" yaml:"objref"` - - // FieldRef refers to the field of the object referred to by - // ObjRef whose value will be extracted for use in - // replacing $(FOO). - // If unspecified, this defaults to fieldPath: $defaultFieldPath - FieldRef FieldSelector `json:"fieldref,omitempty" yaml:"fieldref,omitempty"` -} - -// Target refers to a kubernetes object by Group, Version, Kind and Name -// gvk.Gvk contains Group, Version and Kind -// APIVersion is added to keep the backward compatibility of using ObjectReference -// for Var.ObjRef -type Target struct { - APIVersion string `json:"apiVersion,omitempty" yaml:"apiVersion,omitempty"` - resid.Gvk `json:",inline,omitempty" yaml:",inline,omitempty"` - Name string `json:"name" yaml:"name"` - Namespace string `json:"namespace,omitempty" yaml:"namespace,omitempty"` -} - -// GVK returns the Gvk object in Target -func (t *Target) GVK() resid.Gvk { - if t.APIVersion == "" { - return t.Gvk - } - versions := strings.Split(t.APIVersion, "/") - if len(versions) == 2 { - t.Group = versions[0] - t.Version = versions[1] - } - if len(versions) == 1 { - t.Version = versions[0] - } - return t.Gvk -} - -// FieldSelector contains the fieldPath to an object field. -// This struct is added to keep the backward compatibility of using ObjectFieldSelector -// for Var.FieldRef -type FieldSelector struct { - FieldPath string `json:"fieldPath,omitempty" yaml:"fieldPath,omitempty"` -} - -// defaulting sets reference to field used by default. -func (v *Var) Defaulting() { - if v.FieldRef.FieldPath == "" { - v.FieldRef.FieldPath = DefaultReplacementFieldPath - } - v.ObjRef.GVK() -} - -// DeepEqual returns true if var a and b are Equals. -// Note 1: The objects are unchanged by the VarEqual -// Note 2: Should be normalize be FieldPath before doing -// the DeepEqual. spec.a[b] is supposed to be the same -// as spec.a.b -func (v Var) DeepEqual(other Var) bool { - v.Defaulting() - other.Defaulting() - return reflect.DeepEqual(v, other) -} - -// VarSet is a set of Vars where no var.Name is repeated. -type VarSet struct { - set map[string]Var -} - -// NewVarSet returns an initialized VarSet -func NewVarSet() VarSet { - return VarSet{set: map[string]Var{}} -} - -// AsSlice returns the vars as a slice. -func (vs *VarSet) AsSlice() []Var { - s := make([]Var, len(vs.set)) - i := 0 - for _, v := range vs.set { - s[i] = v - i++ - } - sort.Sort(byName(s)) - return s -} - -// Copy returns a copy of the var set. -func (vs *VarSet) Copy() VarSet { - newSet := make(map[string]Var, len(vs.set)) - for k, v := range vs.set { - newSet[k] = v - } - return VarSet{set: newSet} -} - -// MergeSet absorbs other vars with error on name collision. -func (vs *VarSet) MergeSet(incoming VarSet) error { - for _, incomingVar := range incoming.set { - if err := vs.Merge(incomingVar); err != nil { - return err - } - } - return nil -} - -// MergeSlice absorbs a Var slice with error on name collision. -// Empty fields in incoming vars are defaulted. -func (vs *VarSet) MergeSlice(incoming []Var) error { - for _, v := range incoming { - if err := vs.Merge(v); err != nil { - return err - } - } - return nil -} - -// Merge absorbs another Var with error on name collision. -// Empty fields in incoming Var is defaulted. -func (vs *VarSet) Merge(v Var) error { - if vs.Contains(v) { - return fmt.Errorf( - "var '%s' already encountered", v.Name) - } - v.Defaulting() - vs.set[v.Name] = v - return nil -} - -// AbsorbSet absorbs other vars with error on (name,value) collision. -func (vs *VarSet) AbsorbSet(incoming VarSet) error { - for _, v := range incoming.set { - if err := vs.Absorb(v); err != nil { - return err - } - } - return nil -} - -// AbsorbSlice absorbs a Var slice with error on (name,value) collision. -// Empty fields in incoming vars are defaulted. -func (vs *VarSet) AbsorbSlice(incoming []Var) error { - for _, v := range incoming { - if err := vs.Absorb(v); err != nil { - return err - } - } - return nil -} - -// Absorb absorbs another Var with error on (name,value) collision. -// Empty fields in incoming Var is defaulted. -func (vs *VarSet) Absorb(v Var) error { - conflicting := vs.Get(v.Name) - if conflicting == nil { - // no conflict. The var is valid. - v.Defaulting() - vs.set[v.Name] = v - return nil - } - - if !reflect.DeepEqual(v, *conflicting) { - // two vars with the same name are pointing at two - // different resources. - return fmt.Errorf( - "var '%s' already encountered", v.Name) - } - return nil -} - -// Contains is true if the set has the other var. -func (vs *VarSet) Contains(other Var) bool { - return vs.Get(other.Name) != nil -} - -// Get returns the var with the given name, else nil. -func (vs *VarSet) Get(name string) *Var { - if v, found := vs.set[name]; found { - return &v - } - return nil -} - -// byName is a sort interface which sorts Vars by name alphabetically -type byName []Var - -func (v byName) Len() int { return len(v) } -func (v byName) Swap(i, j int) { v[i], v[j] = v[j], v[i] } -func (v byName) Less(i, j int) bool { return v[i].Name < v[j].Name } diff --git a/src/vendor/sigs.k8s.io/kustomize/kyaml/LICENSE b/src/vendor/sigs.k8s.io/kustomize/kyaml/LICENSE deleted file mode 100644 index 8dada3eda..000000000 --- a/src/vendor/sigs.k8s.io/kustomize/kyaml/LICENSE +++ /dev/null @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "{}" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright {yyyy} {name of copyright owner} - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/src/vendor/sigs.k8s.io/kustomize/kyaml/comments/comments.go b/src/vendor/sigs.k8s.io/kustomize/kyaml/comments/comments.go deleted file mode 100644 index 97334d0f0..000000000 --- a/src/vendor/sigs.k8s.io/kustomize/kyaml/comments/comments.go +++ /dev/null @@ -1,83 +0,0 @@ -// Copyright 2019 The Kubernetes Authors. -// SPDX-License-Identifier: Apache-2.0 - -package comments - -import ( - "sigs.k8s.io/kustomize/kyaml/openapi" - "sigs.k8s.io/kustomize/kyaml/yaml" - "sigs.k8s.io/kustomize/kyaml/yaml/walk" -) - -// CopyComments recursively copies the comments on fields in from to fields in to -func CopyComments(from, to *yaml.RNode) error { - // from node should not be modified, it should be just used as a reference - fromCopy := from.Copy() - copyFieldComments(fromCopy, to) - // walk the fields copying comments - _, err := walk.Walker{ - Sources: []*yaml.RNode{fromCopy, to}, - Visitor: &copier{}, - VisitKeysAsScalars: true}.Walk() - return err -} - -// copier implements walk.Visitor, and copies comments to fields shared between 2 instances -// of a resource -type copier struct{} - -func (c *copier) VisitMap(s walk.Sources, _ *openapi.ResourceSchema) (*yaml.RNode, error) { - copyFieldComments(s.Dest(), s.Origin()) - return s.Dest(), nil -} - -func (c *copier) VisitScalar(s walk.Sources, _ *openapi.ResourceSchema) (*yaml.RNode, error) { - to := s.Origin() - // TODO: File a bug with upstream yaml to handle comments for FoldedStyle scalar nodes - // Hack: convert FoldedStyle scalar node to DoubleQuotedStyle as the line comments are - // being serialized without space - // https://github.com/GoogleContainerTools/kpt/issues/766 - if to != nil && to.Document().Style == yaml.FoldedStyle { - to.Document().Style = yaml.DoubleQuotedStyle - } - - copyFieldComments(s.Dest(), to) - return s.Dest(), nil -} - -func (c *copier) VisitList(s walk.Sources, _ *openapi.ResourceSchema, _ walk.ListKind) ( - *yaml.RNode, error) { - copyFieldComments(s.Dest(), s.Origin()) - destItems := s.Dest().Content() - originItems := s.Origin().Content() - - for i := 0; i < len(destItems) && i < len(originItems); i++ { - dest := destItems[i] - origin := originItems[i] - - if dest.Value == origin.Value { - // We copy the comments recursively on each node in the list. - if err := CopyComments(yaml.NewRNode(dest), yaml.NewRNode(origin)); err != nil { - return nil, err - } - } - } - - return s.Dest(), nil -} - -// copyFieldComments copies the comment from one field to another -func copyFieldComments(from, to *yaml.RNode) { - if from == nil || to == nil { - return - } - if to.Document().LineComment == "" { - to.Document().LineComment = from.Document().LineComment - } - if to.Document().HeadComment == "" { - to.Document().HeadComment = from.Document().HeadComment - } - if to.Document().FootComment == "" { - to.Document().FootComment = from.Document().FootComment - } -} diff --git a/src/vendor/sigs.k8s.io/kustomize/kyaml/errors/errors.go b/src/vendor/sigs.k8s.io/kustomize/kyaml/errors/errors.go deleted file mode 100644 index f072c3c97..000000000 --- a/src/vendor/sigs.k8s.io/kustomize/kyaml/errors/errors.go +++ /dev/null @@ -1,40 +0,0 @@ -// Copyright 2019 The Kubernetes Authors. -// SPDX-License-Identifier: Apache-2.0 - -// Package errors provides libraries for working with the go-errors/errors library. -package errors - -import ( - "fmt" - - goerrors "github.com/go-errors/errors" -) - -// Wrap returns err wrapped in a go-error. If err is nil, returns nil. -func Wrap(err interface{}) error { - if err == nil { - return nil - } - return goerrors.Wrap(err, 1) -} - -// WrapPrefixf returns err wrapped in a go-error with a message prefix. If err is nil, returns nil. -func WrapPrefixf(err interface{}, msg string, args ...interface{}) error { - if err == nil { - return nil - } - return goerrors.WrapPrefix(err, fmt.Sprintf(msg, args...), 1) -} - -// Errorf returns a new go-error. -func Errorf(msg string, args ...interface{}) error { - return goerrors.Wrap(fmt.Errorf(msg, args...), 1) -} - -// GetStack returns a stack trace for the error if it has one -func GetStack(err error) string { - if e, ok := err.(*goerrors.Error); ok { - return string(e.Stack()) - } - return "" -} diff --git a/src/vendor/sigs.k8s.io/kustomize/kyaml/ext/ext.go b/src/vendor/sigs.k8s.io/kustomize/kyaml/ext/ext.go deleted file mode 100644 index c946577cc..000000000 --- a/src/vendor/sigs.k8s.io/kustomize/kyaml/ext/ext.go +++ /dev/null @@ -1,10 +0,0 @@ -// Copyright 2019 The Kubernetes Authors. -// SPDX-License-Identifier: Apache-2.0 - -package ext - -// IgnoreFileName returns the name for ignore files in -// packages. It can be overridden by tools using this library. -var IgnoreFileName = func() string { - return ".krmignore" -} diff --git a/src/vendor/sigs.k8s.io/kustomize/kyaml/fieldmeta/fieldmeta.go b/src/vendor/sigs.k8s.io/kustomize/kyaml/fieldmeta/fieldmeta.go deleted file mode 100644 index c537c3372..000000000 --- a/src/vendor/sigs.k8s.io/kustomize/kyaml/fieldmeta/fieldmeta.go +++ /dev/null @@ -1,275 +0,0 @@ -// Copyright 2019 The Kubernetes Authors. -// SPDX-License-Identifier: Apache-2.0 - -package fieldmeta - -import ( - "encoding/json" - "fmt" - "reflect" - "strconv" - "strings" - - "k8s.io/kube-openapi/pkg/validation/spec" - "sigs.k8s.io/kustomize/kyaml/errors" - "sigs.k8s.io/kustomize/kyaml/openapi" - "sigs.k8s.io/kustomize/kyaml/yaml" -) - -// FieldMeta contains metadata that may be attached to fields as comments -type FieldMeta struct { - Schema spec.Schema - - Extensions XKustomize - - SettersSchema *spec.Schema -} - -type XKustomize struct { - SetBy string `yaml:"setBy,omitempty" json:"setBy,omitempty"` - PartialFieldSetters []PartialFieldSetter `yaml:"partialSetters,omitempty" json:"partialSetters,omitempty"` - FieldSetter *PartialFieldSetter `yaml:"setter,omitempty" json:"setter,omitempty"` -} - -// PartialFieldSetter defines how to set part of a field rather than the full field -// value. e.g. the tag part of an image field -type PartialFieldSetter struct { - // Name is the name of this setter. - Name string `yaml:"name" json:"name"` - - // Value is the current value that has been set. - Value string `yaml:"value" json:"value"` -} - -// IsEmpty returns true if the FieldMeta has any empty Schema -func (fm *FieldMeta) IsEmpty() bool { - if fm == nil { - return true - } - return reflect.DeepEqual(fm.Schema, spec.Schema{}) -} - -// Read reads the FieldMeta from a node -func (fm *FieldMeta) Read(n *yaml.RNode) error { - // check for metadata on head and line comments - comments := []string{n.YNode().LineComment, n.YNode().HeadComment} - for _, c := range comments { - if c == "" { - continue - } - c := strings.TrimLeft(c, "#") - - // check for new short hand notation or fall back to openAPI ref format - if !fm.processShortHand(c) { - // if it doesn't Unmarshal that is fine, it means there is no metadata - // other comments are valid, they just don't parse - // TODO: consider more sophisticated parsing techniques similar to what is used - // for go struct tags. - if err := fm.Schema.UnmarshalJSON([]byte(c)); err != nil { - // note: don't return an error if the comment isn't a fieldmeta struct - return nil - } - } - fe := fm.Schema.VendorExtensible.Extensions["x-kustomize"] - if fe == nil { - return nil - } - b, err := json.Marshal(fe) - if err != nil { - return errors.Wrap(err) - } - return json.Unmarshal(b, &fm.Extensions) - } - return nil -} - -// processShortHand parses the comment for short hand ref, loads schema to fm -// and returns true if successful, returns false for any other cases and not throw -// error, as the comment might not be a setter ref -func (fm *FieldMeta) processShortHand(comment string) bool { - input := map[string]string{} - err := json.Unmarshal([]byte(comment), &input) - if err != nil { - return false - } - name := input[shortHandRef] - if name == "" { - return false - } - - // check if setter with the name exists, else check for a substitution - // setter and substitution can't have same name in shorthand - - setterRef, err := spec.NewRef(DefinitionsPrefix + SetterDefinitionPrefix + name) - if err != nil { - return false - } - - setterRefBytes, err := setterRef.MarshalJSON() - if err != nil { - return false - } - - if _, err := openapi.Resolve(&setterRef, fm.SettersSchema); err == nil { - setterErr := fm.Schema.UnmarshalJSON(setterRefBytes) - return setterErr == nil - } - - substRef, err := spec.NewRef(DefinitionsPrefix + SubstitutionDefinitionPrefix + name) - if err != nil { - return false - } - - substRefBytes, err := substRef.MarshalJSON() - if err != nil { - return false - } - - if _, err := openapi.Resolve(&substRef, fm.SettersSchema); err == nil { - substErr := fm.Schema.UnmarshalJSON(substRefBytes) - return substErr == nil - } - return false -} - -func isExtensionEmpty(x XKustomize) bool { - if x.FieldSetter != nil { - return false - } - if x.SetBy != "" { - return false - } - if len(x.PartialFieldSetters) > 0 { - return false - } - return true -} - -// Write writes the FieldMeta to a node -func (fm *FieldMeta) Write(n *yaml.RNode) error { - if !isExtensionEmpty(fm.Extensions) { - return fm.WriteV1Setters(n) - } - - // Ref is removed when a setter is deleted, so the Ref string could be empty. - if fm.Schema.Ref.String() != "" { - // Ex: {"$ref":"#/definitions/io.k8s.cli.setters.replicas"} should be converted to - // {"$openAPI":"replicas"} and added to the line comment - ref := fm.Schema.Ref.String() - var shortHandRefValue string - switch { - case strings.HasPrefix(ref, DefinitionsPrefix+SetterDefinitionPrefix): - shortHandRefValue = strings.TrimPrefix(ref, DefinitionsPrefix+SetterDefinitionPrefix) - case strings.HasPrefix(ref, DefinitionsPrefix+SubstitutionDefinitionPrefix): - shortHandRefValue = strings.TrimPrefix(ref, DefinitionsPrefix+SubstitutionDefinitionPrefix) - default: - return fmt.Errorf("unexpected ref format: %s", ref) - } - n.YNode().LineComment = fmt.Sprintf(`{"%s":"%s"}`, shortHandRef, - shortHandRefValue) - } else { - n.YNode().LineComment = "" - } - - return nil -} - -// WriteV1Setters is the v1 setters way of writing setter definitions -// TODO: pmarupaka - remove this method after migration -func (fm *FieldMeta) WriteV1Setters(n *yaml.RNode) error { - fm.Schema.VendorExtensible.AddExtension("x-kustomize", fm.Extensions) - b, err := json.Marshal(fm.Schema) - if err != nil { - return errors.Wrap(err) - } - n.YNode().LineComment = string(b) - return nil -} - -// FieldValueType defines the type of input to register -type FieldValueType string - -const ( - // String defines a string flag - String FieldValueType = "string" - // Bool defines a bool flag - Bool = "boolean" - // Int defines an int flag - Int = "integer" -) - -func (it FieldValueType) String() string { - if it == "" { - return "string" - } - return string(it) -} - -func (it FieldValueType) Validate(value string) error { - switch it { - case Int: - if _, err := strconv.Atoi(value); err != nil { - return errors.WrapPrefixf(err, "value must be an int") - } - case Bool: - if _, err := strconv.ParseBool(value); err != nil { - return errors.WrapPrefixf(err, "value must be a bool") - } - } - return nil -} - -func (it FieldValueType) Tag() string { - switch it { - case String: - return yaml.NodeTagString - case Bool: - return yaml.NodeTagBool - case Int: - return yaml.NodeTagInt - } - return "" -} - -func (it FieldValueType) TagForValue(value string) string { - switch it { - case String: - return yaml.NodeTagString - case Bool: - if _, err := strconv.ParseBool(string(it)); err != nil { - return "" - } - return yaml.NodeTagBool - case Int: - if _, err := strconv.ParseInt(string(it), 0, 32); err != nil { - return "" - } - return yaml.NodeTagInt - } - return "" -} - -const ( - // CLIDefinitionsPrefix is the prefix for cli definition keys. - CLIDefinitionsPrefix = "io.k8s.cli." - - // SetterDefinitionPrefix is the prefix for setter definition keys. - SetterDefinitionPrefix = CLIDefinitionsPrefix + "setters." - - // SubstitutionDefinitionPrefix is the prefix for substitution definition keys. - SubstitutionDefinitionPrefix = CLIDefinitionsPrefix + "substitutions." - - // DefinitionsPrefix is the prefix used to reference definitions in the OpenAPI - DefinitionsPrefix = "#/definitions/" -) - -// shortHandRef is the shorthand reference to setters and substitutions -var shortHandRef = "$openapi" - -func SetShortHandRef(ref string) { - shortHandRef = ref -} - -func ShortHandRef() string { - return shortHandRef -} diff --git a/src/vendor/sigs.k8s.io/kustomize/kyaml/filesys/confirmeddir.go b/src/vendor/sigs.k8s.io/kustomize/kyaml/filesys/confirmeddir.go deleted file mode 100644 index 4b0008427..000000000 --- a/src/vendor/sigs.k8s.io/kustomize/kyaml/filesys/confirmeddir.go +++ /dev/null @@ -1,79 +0,0 @@ -// Copyright 2019 The Kubernetes Authors. -// SPDX-License-Identifier: Apache-2.0 - -package filesys - -import ( - "io/ioutil" - "path/filepath" - "strings" -) - -// ConfirmedDir is a clean, absolute, delinkified path -// that was confirmed to point to an existing directory. -type ConfirmedDir string - -// NewTmpConfirmedDir returns a temporary dir, else error. -// The directory is cleaned, no symlinks, etc. so it's -// returned as a ConfirmedDir. -func NewTmpConfirmedDir() (ConfirmedDir, error) { - n, err := ioutil.TempDir("", "kustomize-") - if err != nil { - return "", err - } - - // In MacOs `ioutil.TempDir` creates a directory - // with root in the `/var` folder, which is in turn - // a symlinked path to `/private/var`. - // Function `filepath.EvalSymlinks`is used to - // resolve the real absolute path. - deLinked, err := filepath.EvalSymlinks(n) - return ConfirmedDir(deLinked), err -} - -// HasPrefix returns true if the directory argument -// is a prefix of self (d) from the point of view of -// a file system. -// -// I.e., it's true if the argument equals or contains -// self (d) in a file path sense. -// -// HasPrefix emulates the semantics of strings.HasPrefix -// such that the following are true: -// -// strings.HasPrefix("foobar", "foobar") -// strings.HasPrefix("foobar", "foo") -// strings.HasPrefix("foobar", "") -// -// d := fSys.ConfirmDir("/foo/bar") -// d.HasPrefix("/foo/bar") -// d.HasPrefix("/foo") -// d.HasPrefix("/") -// -// Not contacting a file system here to check for -// actual path existence. -// -// This is tested on linux, but will have trouble -// on other operating systems. -// TODO(monopole) Refactor when #golang/go/18358 closes. -// See also: -// https://github.com/golang/go/issues/18358 -// https://github.com/golang/dep/issues/296 -// https://github.com/golang/dep/blob/master/internal/fs/fs.go#L33 -// https://codereview.appspot.com/5712045 -func (d ConfirmedDir) HasPrefix(path ConfirmedDir) bool { - if path.String() == string(filepath.Separator) || path == d { - return true - } - return strings.HasPrefix( - string(d), - string(path)+string(filepath.Separator)) -} - -func (d ConfirmedDir) Join(path string) string { - return filepath.Join(string(d), path) -} - -func (d ConfirmedDir) String() string { - return string(d) -} diff --git a/src/vendor/sigs.k8s.io/kustomize/kyaml/filesys/doc.go b/src/vendor/sigs.k8s.io/kustomize/kyaml/filesys/doc.go deleted file mode 100644 index bd3963441..000000000 --- a/src/vendor/sigs.k8s.io/kustomize/kyaml/filesys/doc.go +++ /dev/null @@ -1,7 +0,0 @@ -// Copyright 2019 The Kubernetes Authors. -// SPDX-License-Identifier: Apache-2.0 - -// Package filesys provides a file system abstraction, -// a subset of that provided by golang.org/pkg/os, -// with an on-disk and in-memory representation. -package filesys diff --git a/src/vendor/sigs.k8s.io/kustomize/kyaml/filesys/file.go b/src/vendor/sigs.k8s.io/kustomize/kyaml/filesys/file.go deleted file mode 100644 index 5044c653e..000000000 --- a/src/vendor/sigs.k8s.io/kustomize/kyaml/filesys/file.go +++ /dev/null @@ -1,15 +0,0 @@ -// Copyright 2019 The Kubernetes Authors. -// SPDX-License-Identifier: Apache-2.0 - -package filesys - -import ( - "io" - "os" -) - -// File groups the basic os.File methods. -type File interface { - io.ReadWriteCloser - Stat() (os.FileInfo, error) -} diff --git a/src/vendor/sigs.k8s.io/kustomize/kyaml/filesys/fileinfo.go b/src/vendor/sigs.k8s.io/kustomize/kyaml/filesys/fileinfo.go deleted file mode 100644 index 57646d244..000000000 --- a/src/vendor/sigs.k8s.io/kustomize/kyaml/filesys/fileinfo.go +++ /dev/null @@ -1,34 +0,0 @@ -// Copyright 2019 The Kubernetes Authors. -// SPDX-License-Identifier: Apache-2.0 - -package filesys - -import ( - "os" - "time" -) - -var _ os.FileInfo = fileInfo{} - -// fileInfo implements os.FileInfo for a fileInMemory instance. -type fileInfo struct { - node *fsNode -} - -// Name returns the name of the file -func (fi fileInfo) Name() string { return fi.node.Name() } - -// Size returns the size of the file -func (fi fileInfo) Size() int64 { return fi.node.Size() } - -// Mode returns the file mode -func (fi fileInfo) Mode() os.FileMode { return 0777 } - -// ModTime returns a bogus time -func (fi fileInfo) ModTime() time.Time { return time.Time{} } - -// IsDir returns true if it is a directory -func (fi fileInfo) IsDir() bool { return fi.node.isNodeADir() } - -// Sys should return underlying data source, but it now returns nil -func (fi fileInfo) Sys() interface{} { return nil } diff --git a/src/vendor/sigs.k8s.io/kustomize/kyaml/filesys/fileondisk.go b/src/vendor/sigs.k8s.io/kustomize/kyaml/filesys/fileondisk.go deleted file mode 100644 index 8ed92d90e..000000000 --- a/src/vendor/sigs.k8s.io/kustomize/kyaml/filesys/fileondisk.go +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright 2019 The Kubernetes Authors. -// SPDX-License-Identifier: Apache-2.0 - -package filesys - -import ( - "os" -) - -var _ File = &fileOnDisk{} - -// fileOnDisk implements File using the local filesystem. -type fileOnDisk struct { - file *os.File -} - -// Close closes a file. -func (f *fileOnDisk) Close() error { return f.file.Close() } - -// Read reads a file's content. -func (f *fileOnDisk) Read(p []byte) (n int, err error) { return f.file.Read(p) } - -// Write writes bytes to a file -func (f *fileOnDisk) Write(p []byte) (n int, err error) { return f.file.Write(p) } - -// Stat returns an interface which has all the information regarding the file. -func (f *fileOnDisk) Stat() (os.FileInfo, error) { return f.file.Stat() } diff --git a/src/vendor/sigs.k8s.io/kustomize/kyaml/filesys/filesystem.go b/src/vendor/sigs.k8s.io/kustomize/kyaml/filesys/filesystem.go deleted file mode 100644 index 79dfc53bf..000000000 --- a/src/vendor/sigs.k8s.io/kustomize/kyaml/filesys/filesystem.go +++ /dev/null @@ -1,153 +0,0 @@ -// Copyright 2019 The Kubernetes Authors. -// SPDX-License-Identifier: Apache-2.0 - -package filesys - -import ( - "fmt" - "path/filepath" - - "sigs.k8s.io/kustomize/kyaml/errors" -) - -const ( - Separator = string(filepath.Separator) - SelfDir = "." - ParentDir = ".." -) - -// FileSystem groups basic os filesystem methods. -// It's supposed be functional subset of https://golang.org/pkg/os -type FileSystem interface { - - // Create a file. - Create(path string) (File, error) - - // MkDir makes a directory. - Mkdir(path string) error - - // MkDirAll makes a directory path, creating intervening directories. - MkdirAll(path string) error - - // RemoveAll removes path and any children it contains. - RemoveAll(path string) error - - // Open opens the named file for reading. - Open(path string) (File, error) - - // IsDir returns true if the path is a directory. - IsDir(path string) bool - - // ReadDir returns a list of files and directories within a directory. - ReadDir(path string) ([]string, error) - - // CleanedAbs converts the given path into a - // directory and a file name, where the directory - // is represented as a ConfirmedDir and all that implies. - // If the entire path is a directory, the file component - // is an empty string. - CleanedAbs(path string) (ConfirmedDir, string, error) - - // Exists is true if the path exists in the file system. - Exists(path string) bool - - // Glob returns the list of matching files, - // emulating https://golang.org/pkg/path/filepath/#Glob - Glob(pattern string) ([]string, error) - - // ReadFile returns the contents of the file at the given path. - ReadFile(path string) ([]byte, error) - - // WriteFile writes the data to a file at the given path, - // overwriting anything that's already there. - WriteFile(path string, data []byte) error - - // Walk walks the file system with the given WalkFunc. - Walk(path string, walkFn filepath.WalkFunc) error -} - -// ConfirmDir returns an error if the user-specified path is not an existing directory on fSys. -// Otherwise, ConfirmDir returns path, which can be relative, as a ConfirmedDir and all that implies. -func ConfirmDir(fSys FileSystem, path string) (ConfirmedDir, error) { - if path == "" { - return "", errors.Errorf("directory path cannot be empty") - } - - d, f, err := fSys.CleanedAbs(path) - if err != nil { - return "", errors.WrapPrefixf(err, "not a valid directory") - } - if f != "" { - return "", errors.WrapPrefixf(errors.Errorf("file is not directory"), fmt.Sprintf("'%s'", path)) - } - return d, nil -} - -// FileSystemOrOnDisk satisfies the FileSystem interface by forwarding -// all of its method calls to the given FileSystem whenever it's not nil. -// If it's nil, the call is forwarded to the OS's underlying file system. -type FileSystemOrOnDisk struct { - FileSystem FileSystem -} - -// Set sets the given FileSystem as the target for all the FileSystem method calls. -func (fs *FileSystemOrOnDisk) Set(f FileSystem) { fs.FileSystem = f } - -func (fs FileSystemOrOnDisk) fs() FileSystem { - if fs.FileSystem != nil { - return fs.FileSystem - } - return MakeFsOnDisk() -} - -func (fs FileSystemOrOnDisk) Create(path string) (File, error) { - return fs.fs().Create(path) -} - -func (fs FileSystemOrOnDisk) Mkdir(path string) error { - return fs.fs().Mkdir(path) -} - -func (fs FileSystemOrOnDisk) MkdirAll(path string) error { - return fs.fs().MkdirAll(path) -} - -func (fs FileSystemOrOnDisk) RemoveAll(path string) error { - return fs.fs().RemoveAll(path) -} - -func (fs FileSystemOrOnDisk) Open(path string) (File, error) { - return fs.fs().Open(path) -} - -func (fs FileSystemOrOnDisk) IsDir(path string) bool { - return fs.fs().IsDir(path) -} - -func (fs FileSystemOrOnDisk) ReadDir(path string) ([]string, error) { - return fs.fs().ReadDir(path) -} - -func (fs FileSystemOrOnDisk) CleanedAbs(path string) (ConfirmedDir, string, error) { - return fs.fs().CleanedAbs(path) -} - -func (fs FileSystemOrOnDisk) Exists(path string) bool { - return fs.fs().Exists(path) -} - -func (fs FileSystemOrOnDisk) Glob(pattern string) ([]string, error) { - return fs.fs().Glob(pattern) -} - -func (fs FileSystemOrOnDisk) ReadFile(path string) ([]byte, error) { - return fs.fs().ReadFile(path) -} - -func (fs FileSystemOrOnDisk) WriteFile(path string, data []byte) error { - return fs.fs().WriteFile(path, data) -} - -func (fs FileSystemOrOnDisk) Walk(path string, walkFn filepath.WalkFunc) error { - return fs.fs().Walk(path, walkFn) -} diff --git a/src/vendor/sigs.k8s.io/kustomize/kyaml/filesys/fsnode.go b/src/vendor/sigs.k8s.io/kustomize/kyaml/filesys/fsnode.go deleted file mode 100644 index 608b8e38a..000000000 --- a/src/vendor/sigs.k8s.io/kustomize/kyaml/filesys/fsnode.go +++ /dev/null @@ -1,647 +0,0 @@ -// Copyright 2019 The Kubernetes Authors. -// SPDX-License-Identifier: Apache-2.0 - -package filesys - -import ( - "bytes" - "fmt" - "io" - "log" - "os" - "path/filepath" - "regexp" - "sort" - "strings" - - "sigs.k8s.io/kustomize/kyaml/errors" -) - -var _ File = &fsNode{} -var _ FileSystem = &fsNode{} - -// fsNode is either a file or a directory. -type fsNode struct { - // What node owns me? - parent *fsNode - - // Value to return as the Name() when the - // parent is nil. - nilParentName string - - // A directory mapping names to nodes. - // If dir is nil, then self node is a file. - // If dir is non-nil, then self node is a directory, - // albeit possibly an empty directory. - dir map[string]*fsNode - - // if this node is a file, this is the content. - content []byte - - // if offset is not nil the file is open and it tracks - // the current file offset. - offset *int -} - -// MakeEmptyDirInMemory returns an empty directory. -// The paths of nodes in this object will never -// report a leading Separator, meaning they -// aren't "absolute" in the sense defined by -// https://golang.org/pkg/path/filepath/#IsAbs. -func MakeEmptyDirInMemory() *fsNode { - return &fsNode{ - dir: make(map[string]*fsNode), - } -} - -// MakeFsInMemory returns an empty 'file system'. -// The paths of nodes in this object will always -// report a leading Separator, meaning they -// are "absolute" in the sense defined by -// https://golang.org/pkg/path/filepath/#IsAbs. -// This is a relevant difference when using Walk, -// Glob, Match, etc. -func MakeFsInMemory() FileSystem { - return &fsNode{ - nilParentName: Separator, - dir: make(map[string]*fsNode), - } -} - -// Name returns the name of the node. -func (n *fsNode) Name() string { - if n.parent == nil { - // Unable to lookup name in parent. - return n.nilParentName - } - if !n.parent.isNodeADir() { - log.Fatal("parent not a dir") - } - for key, value := range n.parent.dir { - if value == n { - return key - } - } - log.Fatal("unable to find fsNode name") - return "" -} - -// Path returns the full path to the node. -func (n *fsNode) Path() string { - if n.parent == nil { - return n.nilParentName - } - if !n.parent.isNodeADir() { - log.Fatal("parent not a dir, structural error") - } - return filepath.Join(n.parent.Path(), n.Name()) -} - -// mySplit trims trailing separators from the directory -// result of filepath.Split. -func mySplit(s string) (string, string) { - dName, fName := filepath.Split(s) - return StripTrailingSeps(dName), fName -} - -func (n *fsNode) addFile(name string, c []byte) (result *fsNode, err error) { - parent := n - dName, fileName := mySplit(name) - if dName != "" { - parent, err = parent.addDir(dName) - if err != nil { - return nil, err - } - } - if !isLegalFileNameForCreation(fileName) { - return nil, fmt.Errorf( - "illegal name '%s' in file creation", fileName) - } - result, ok := parent.dir[fileName] - if ok { - // File already exists; overwrite it. - if result.offset != nil { - return nil, fmt.Errorf("cannot add already opened file '%s'", n.Path()) - } - result.content = append(result.content[:0], c...) - return result, nil - } - result = &fsNode{ - content: append([]byte(nil), c...), - parent: parent, - } - parent.dir[fileName] = result - return result, nil -} - -// Create implements FileSystem. -// Create makes an empty file. -func (n *fsNode) Create(path string) (result File, err error) { - f, err := n.AddFile(path, nil) - if err != nil { - return f, err - } - f.offset = new(int) - return f, nil -} - -// WriteFile implements FileSystem. -func (n *fsNode) WriteFile(path string, d []byte) error { - _, err := n.AddFile(path, d) - return err -} - -// AddFile adds a file and any necessary containing -// directories to the node. -func (n *fsNode) AddFile( - name string, c []byte) (result *fsNode, err error) { - if n.dir == nil { - return nil, fmt.Errorf( - "cannot add a file to a non-directory '%s'", n.Name()) - } - return n.addFile(cleanQueryPath(name), c) -} - -func (n *fsNode) addDir(path string) (result *fsNode, err error) { - parent := n - dName, subDirName := mySplit(path) - if dName != "" { - parent, err = n.addDir(dName) - if err != nil { - return nil, err - } - } - switch subDirName { - case "", SelfDir: - return n, nil - case ParentDir: - if n.parent == nil { - return nil, fmt.Errorf( - "cannot add a directory above '%s'", n.Path()) - } - return n.parent, nil - default: - if !isLegalFileNameForCreation(subDirName) { - return nil, fmt.Errorf( - "illegal name '%s' in directory creation", subDirName) - } - result, ok := parent.dir[subDirName] - if ok { - if result.isNodeADir() { - // it's already there. - return result, nil - } - return nil, fmt.Errorf( - "cannot make dir '%s'; a file of that name already exists in '%s'", - subDirName, parent.Name()) - } - result = &fsNode{ - dir: make(map[string]*fsNode), - parent: parent, - } - parent.dir[subDirName] = result - return result, nil - } -} - -// Mkdir implements FileSystem. -// Mkdir creates a directory. -func (n *fsNode) Mkdir(path string) error { - _, err := n.AddDir(path) - return err -} - -// MkdirAll implements FileSystem. -// MkdirAll creates a directory. -func (n *fsNode) MkdirAll(path string) error { - _, err := n.AddDir(path) - return err -} - -// AddDir adds a directory to the node, not complaining -// if it is already there. -func (n *fsNode) AddDir(path string) (result *fsNode, err error) { - if n.dir == nil { - return nil, fmt.Errorf( - "cannot add a directory to file node '%s'", n.Name()) - } - return n.addDir(cleanQueryPath(path)) -} - -// CleanedAbs implements FileSystem. -func (n *fsNode) CleanedAbs(path string) (ConfirmedDir, string, error) { - node, err := n.Find(path) - if err != nil { - return "", "", errors.WrapPrefixf(err, "unable to clean") - } - if node == nil { - return "", "", notExistError(path) - } - if node.isNodeADir() { - return ConfirmedDir(node.Path()), "", nil - } - return ConfirmedDir(node.parent.Path()), node.Name(), nil -} - -// Exists implements FileSystem. -// Exists returns true if the path exists. -func (n *fsNode) Exists(path string) bool { - if !n.isNodeADir() { - return n.Name() == path - } - result, err := n.Find(path) - if err != nil { - return false - } - return result != nil -} - -func cleanQueryPath(path string) string { - // Always ignore leading separator? - // Remember that filepath.Clean returns "." if - // given an empty string argument. - return filepath.Clean(StripLeadingSeps(path)) -} - -// Find finds the given node, else nil if not found. -// Return error on structural/argument errors. -func (n *fsNode) Find(path string) (*fsNode, error) { - if !n.isNodeADir() { - return nil, fmt.Errorf("can only find inside a dir") - } - if path == "" { - // Special case; check *before* cleaning and *before* - // comparison to nilParentName. - return nil, nil - } - if (n.parent == nil && path == n.nilParentName) || path == SelfDir { - // Special case - return n, nil - } - return n.findIt(cleanQueryPath(path)) -} - -func (n *fsNode) findIt(path string) (result *fsNode, err error) { - parent := n - dName, item := mySplit(path) - if dName != "" { - parent, err = n.findIt(dName) - if err != nil { - return nil, err - } - if parent == nil { - // all done, target doesn't exist. - return nil, nil - } - } - if !parent.isNodeADir() { - return nil, fmt.Errorf("'%s' is not a directory", parent.Path()) - } - return parent.dir[item], nil -} - -// RemoveAll implements FileSystem. -// RemoveAll removes an item and everything it contains. -func (n *fsNode) RemoveAll(path string) error { - result, err := n.Find(path) - if err != nil { - return err - } - if result == nil { - // If the path doesn't exist, no need to remove anything. - return nil - } - return result.Remove() -} - -// Remove drop the node, and everything it contains, from its parent. -func (n *fsNode) Remove() error { - if n.parent == nil { - return fmt.Errorf("cannot remove a root node") - } - if !n.parent.isNodeADir() { - log.Fatal("parent not a dir") - } - for key, value := range n.parent.dir { - if value == n { - delete(n.parent.dir, key) - return nil - } - } - log.Fatal("unable to find self in parent") - return nil -} - -// isNodeADir returns true if the node is a directory. -// Cannot collide with the poorly named "IsDir". -func (n *fsNode) isNodeADir() bool { - return n.dir != nil -} - -// IsDir implements FileSystem. -// IsDir returns true if the argument resolves -// to a directory rooted at the node. -func (n *fsNode) IsDir(path string) bool { - result, err := n.Find(path) - if err != nil || result == nil { - return false - } - return result.isNodeADir() -} - -// ReadDir implements FileSystem. -func (n *fsNode) ReadDir(path string) ([]string, error) { - if !n.Exists(path) { - return nil, notExistError(path) - } - if !n.IsDir(path) { - return nil, fmt.Errorf("%s is not a directory", path) - } - - dir, err := n.Find(path) - if err != nil { - return nil, err - } - if dir == nil { - return nil, fmt.Errorf("could not find directory %s", path) - } - - keys := make([]string, len(dir.dir)) - i := 0 - for k := range dir.dir { - keys[i] = k - i++ - } - return keys, nil -} - -// Size returns the size of the node. -func (n *fsNode) Size() int64 { - if n.isNodeADir() { - return int64(len(n.dir)) - } - return int64(len(n.content)) -} - -// Open implements FileSystem. -// Open opens the node in read-write mode and sets the offset its start. -// Writing right after opening the file will replace the original content -// and move the offset forward, as with a file opened with O_RDWR | O_CREATE. -// -// As an example, let's consider a file with content "content": -// - open: sets offset to start, content is "content" -// - write "@": offset increases by one, the content is now "@ontent" -// - read the rest: since offset is 1, the read operation returns "ontent" -// - write "$": offset is at EOF, so "$" is appended and content is now "@ontent$" -// - read the rest: returns 0 bytes and EOF -// - close: the content is still "@ontent$" -func (n *fsNode) Open(path string) (File, error) { - result, err := n.Find(path) - if err != nil { - return nil, err - } - if result == nil { - return nil, notExistError(path) - } - if result.offset != nil { - return nil, fmt.Errorf("cannot open previously opened file '%s'", path) - } - result.offset = new(int) - return result, nil -} - -// Close marks the node closed. -func (n *fsNode) Close() error { - if n.offset == nil { - return fmt.Errorf("cannot close already closed file '%s'", n.Path()) - } - n.offset = nil - return nil -} - -// ReadFile implements FileSystem. -func (n *fsNode) ReadFile(path string) (c []byte, err error) { - result, err := n.Find(path) - if err != nil { - return nil, err - } - if result == nil { - return nil, notExistError(path) - } - if result.isNodeADir() { - return nil, fmt.Errorf("cannot read content from non-file '%s'", n.Path()) - } - c = make([]byte, len(result.content)) - copy(c, result.content) - return c, nil -} - -// Read returns the content of the file node. -func (n *fsNode) Read(d []byte) (c int, err error) { - if n.isNodeADir() { - return 0, fmt.Errorf( - "cannot read content from non-file '%s'", n.Path()) - } - if n.offset == nil { - return 0, fmt.Errorf("cannot read from closed file '%s'", n.Path()) - } - - rest := n.content[*n.offset:] - if len(d) < len(rest) { - rest = rest[:len(d)] - } else { - err = io.EOF - } - copy(d, rest) - *n.offset += len(rest) - return len(rest), err -} - -// Write saves the contents of the argument to the file node. -func (n *fsNode) Write(p []byte) (c int, err error) { - if n.isNodeADir() { - return 0, fmt.Errorf( - "cannot write content to non-file '%s'", n.Path()) - } - if n.offset == nil { - return 0, fmt.Errorf("cannot write to closed file '%s'", n.Path()) - } - n.content = append(n.content[:*n.offset], p...) - *n.offset = len(n.content) - return len(p), nil -} - -// ContentMatches returns true if v matches fake file's content. -func (n *fsNode) ContentMatches(v []byte) bool { - return bytes.Equal(v, n.content) -} - -// GetContent the content of a fake file. -func (n *fsNode) GetContent() []byte { - return n.content -} - -// Stat returns an instance of FileInfo. -func (n *fsNode) Stat() (os.FileInfo, error) { - return fileInfo{node: n}, nil -} - -// Walk implements FileSystem. -func (n *fsNode) Walk(path string, walkFn filepath.WalkFunc) error { - result, err := n.Find(path) - if err != nil { - return err - } - if result == nil { - return notExistError(path) - } - return result.WalkMe(walkFn) -} - -// Walk runs the given walkFn on each node. -func (n *fsNode) WalkMe(walkFn filepath.WalkFunc) error { - fi, err := n.Stat() - // always visit self first - err = walkFn(n.Path(), fi, err) - if !n.isNodeADir() { - // it's a file, so nothing more to do - return err - } - // process self as a directory - if err == filepath.SkipDir { - return nil - } - // Walk is supposed to visit in lexical order. - for _, k := range n.sortedDirEntries() { - if err := n.dir[k].WalkMe(walkFn); err != nil { - if err == filepath.SkipDir { - // stop processing this directory - break - } - // bail out completely - return err - } - } - return nil -} - -func (n *fsNode) sortedDirEntries() []string { - keys := make([]string, len(n.dir)) - i := 0 - for k := range n.dir { - keys[i] = k - i++ - } - sort.Strings(keys) - return keys -} - -// FileCount returns a count of files. -// Directories, empty or otherwise, not counted. -func (n *fsNode) FileCount() int { - count := 0 - n.WalkMe(func(path string, info os.FileInfo, err error) error { - if err != nil { - return err - } - if !info.IsDir() { - count++ - } - return nil - }) - return count -} - -func (n *fsNode) DebugPrint() { - n.WalkMe(func(path string, info os.FileInfo, err error) error { - if err != nil { - fmt.Printf("err '%v' at path %q\n", err, path) - return nil - } - if info.IsDir() { - if info.Size() == 0 { - fmt.Println("empty dir: " + path) - } - } else { - fmt.Println(" file: " + path) - } - return nil - }) -} - -var legalFileNamePattern = regexp.MustCompile("^[a-zA-Z0-9-_.:]+$") - -// This rules enforced here should be simpler and tighter -// than what's allowed on a real OS. -// Should be fine for testing or in-memory purposes. -func isLegalFileNameForCreation(n string) bool { - if n == "" || n == SelfDir || !legalFileNamePattern.MatchString(n) { - return false - } - return !strings.Contains(n, ParentDir) -} - -// RegExpGlob returns a list of file paths matching the regexp. -// Excludes directories. -func (n *fsNode) RegExpGlob(pattern string) ([]string, error) { - var result []string - var expression = regexp.MustCompile(pattern) - err := n.WalkMe(func(path string, info os.FileInfo, err error) error { - if err != nil { - return err - } - if !info.IsDir() { - if expression.MatchString(path) { - result = append(result, path) - } - } - return nil - }) - if err != nil { - return nil, err - } - sort.Strings(result) - return result, nil -} - -// Glob implements FileSystem. -// Glob returns the list of file paths matching -// per filepath.Match semantics, i.e. unlike RegExpGlob, -// Match("foo/a*") will not match sub-sub directories of foo. -// This is how /bin/ls behaves. -func (n *fsNode) Glob(pattern string) ([]string, error) { - var result []string - var allFiles []string - err := n.WalkMe(func(path string, info os.FileInfo, err error) error { - if err != nil { - return err - } - if !info.IsDir() { - match, err := filepath.Match(pattern, path) - if err != nil { - return err - } - if match { - allFiles = append(allFiles, path) - } - } - return nil - }) - if err != nil { - return nil, err - } - if IsHiddenFilePath(pattern) { - result = allFiles - } else { - result = RemoveHiddenFiles(allFiles) - } - sort.Strings(result) - return result, nil -} - -// notExistError indicates that a file or directory does not exist. -// Unwrapping returns os.ErrNotExist so errors.Is(err, os.ErrNotExist) works correctly. -type notExistError string - -func (err notExistError) Error() string { return fmt.Sprintf("'%s' doesn't exist", string(err)) } -func (err notExistError) Unwrap() error { return os.ErrNotExist } diff --git a/src/vendor/sigs.k8s.io/kustomize/kyaml/filesys/fsondisk.go b/src/vendor/sigs.k8s.io/kustomize/kyaml/filesys/fsondisk.go deleted file mode 100644 index 4808f8d2e..000000000 --- a/src/vendor/sigs.k8s.io/kustomize/kyaml/filesys/fsondisk.go +++ /dev/null @@ -1,139 +0,0 @@ -// Copyright 2019 The Kubernetes Authors. -// SPDX-License-Identifier: Apache-2.0 - -package filesys - -import ( - "fmt" - "io/ioutil" - "log" - "os" - "path/filepath" - - "sigs.k8s.io/kustomize/kyaml/errors" -) - -var _ FileSystem = fsOnDisk{} - -// fsOnDisk implements FileSystem using the local filesystem. -type fsOnDisk struct{} - -// MakeFsOnDisk makes an instance of fsOnDisk. -func MakeFsOnDisk() FileSystem { - return fsOnDisk{} -} - -// Create delegates to os.Create. -func (fsOnDisk) Create(name string) (File, error) { return os.Create(name) } - -// Mkdir delegates to os.Mkdir. -func (fsOnDisk) Mkdir(name string) error { - return os.Mkdir(name, 0777|os.ModeDir) -} - -// MkdirAll delegates to os.MkdirAll. -func (fsOnDisk) MkdirAll(name string) error { - return os.MkdirAll(name, 0777|os.ModeDir) -} - -// RemoveAll delegates to os.RemoveAll. -func (fsOnDisk) RemoveAll(name string) error { - return os.RemoveAll(name) -} - -// Open delegates to os.Open. -func (fsOnDisk) Open(name string) (File, error) { return os.Open(name) } - -// CleanedAbs converts the given path into a -// directory and a file name, where the directory -// is represented as a ConfirmedDir and all that implies. -// If the entire path is a directory, the file component -// is an empty string. -func (x fsOnDisk) CleanedAbs( - path string) (ConfirmedDir, string, error) { - absRoot, err := filepath.Abs(path) - if err != nil { - return "", "", fmt.Errorf( - "abs path error on '%s' : %v", path, err) - } - deLinked, err := filepath.EvalSymlinks(absRoot) - if err != nil { - return "", "", fmt.Errorf( - "evalsymlink failure on '%s' : %w", path, err) - } - if x.IsDir(deLinked) { - return ConfirmedDir(deLinked), "", nil - } - d := filepath.Dir(deLinked) - if !x.IsDir(d) { - // Programmer/assumption error. - log.Fatalf("first part of '%s' not a directory", deLinked) - } - if d == deLinked { - // Programmer/assumption error. - log.Fatalf("d '%s' should be a subset of deLinked", d) - } - f := filepath.Base(deLinked) - if filepath.Join(d, f) != deLinked { - // Programmer/assumption error. - log.Fatalf("these should be equal: '%s', '%s'", - filepath.Join(d, f), deLinked) - } - return ConfirmedDir(d), f, nil -} - -// Exists returns true if os.Stat succeeds. -func (fsOnDisk) Exists(name string) bool { - _, err := os.Stat(name) - return err == nil -} - -// Glob returns the list of matching files -func (fsOnDisk) Glob(pattern string) ([]string, error) { - var result []string - allFilePaths, err := filepath.Glob(pattern) - if err != nil { - return nil, err - } - if IsHiddenFilePath(pattern) { - result = allFilePaths - } else { - result = RemoveHiddenFiles(allFilePaths) - } - return result, nil -} - -// IsDir delegates to os.Stat and FileInfo.IsDir -func (fsOnDisk) IsDir(name string) bool { - info, err := os.Stat(name) - if err != nil { - return false - } - return info.IsDir() -} - -// ReadDir delegates to os.ReadDir -func (fsOnDisk) ReadDir(name string) ([]string, error) { - dirEntries, err := os.ReadDir(name) - if err != nil { - return nil, err - } - result := make([]string, len(dirEntries)) - for i := range dirEntries { - result[i] = dirEntries[i].Name() - } - return result, nil -} - -// ReadFile delegates to ioutil.ReadFile. -func (fsOnDisk) ReadFile(name string) ([]byte, error) { return ioutil.ReadFile(name) } - -// WriteFile delegates to ioutil.WriteFile with read/write permissions. -func (fsOnDisk) WriteFile(name string, c []byte) error { - return errors.Wrap(ioutil.WriteFile(name, c, 0666)) //nolint:gosec -} - -// Walk delegates to filepath.Walk. -func (fsOnDisk) Walk(path string, walkFn filepath.WalkFunc) error { - return filepath.Walk(path, walkFn) -} diff --git a/src/vendor/sigs.k8s.io/kustomize/kyaml/filesys/fsondisk_unix.go b/src/vendor/sigs.k8s.io/kustomize/kyaml/filesys/fsondisk_unix.go deleted file mode 100644 index 15935a462..000000000 --- a/src/vendor/sigs.k8s.io/kustomize/kyaml/filesys/fsondisk_unix.go +++ /dev/null @@ -1,15 +0,0 @@ -// Copyright 2022 The Kubernetes Authors. -// SPDX-License-Identifier: Apache-2.0 - -//go:build !windows -// +build !windows - -package filesys - -import ( - "path/filepath" -) - -func getOSRoot() (string, error) { - return string(filepath.Separator), nil -} diff --git a/src/vendor/sigs.k8s.io/kustomize/kyaml/filesys/fsondisk_windows.go b/src/vendor/sigs.k8s.io/kustomize/kyaml/filesys/fsondisk_windows.go deleted file mode 100644 index 8c8a33c4d..000000000 --- a/src/vendor/sigs.k8s.io/kustomize/kyaml/filesys/fsondisk_windows.go +++ /dev/null @@ -1,18 +0,0 @@ -// Copyright 2022 The Kubernetes Authors. -// SPDX-License-Identifier: Apache-2.0 - -package filesys - -import ( - "path/filepath" - - "golang.org/x/sys/windows" -) - -func getOSRoot() (string, error) { - sysDir, err := windows.GetSystemDirectory() - if err != nil { - return "", err - } - return filepath.VolumeName(sysDir) + `\`, nil -} diff --git a/src/vendor/sigs.k8s.io/kustomize/kyaml/filesys/util.go b/src/vendor/sigs.k8s.io/kustomize/kyaml/filesys/util.go deleted file mode 100644 index fdbe5e2c7..000000000 --- a/src/vendor/sigs.k8s.io/kustomize/kyaml/filesys/util.go +++ /dev/null @@ -1,143 +0,0 @@ -// Copyright 2019 The Kubernetes Authors. -// SPDX-License-Identifier: Apache-2.0 - -package filesys - -import ( - "os" - "path/filepath" - "strings" -) - -// RootedPath returns a rooted path, e.g. "/foo/bar" as -// opposed to "foo/bar". -func RootedPath(elem ...string) string { - return Separator + filepath.Join(elem...) -} - -// StripTrailingSeps trims trailing filepath separators from input. -func StripTrailingSeps(s string) string { - k := len(s) - for k > 0 && s[k-1] == filepath.Separator { - k-- - } - return s[:k] -} - -// StripLeadingSeps trims leading filepath separators from input. -func StripLeadingSeps(s string) string { - k := 0 - for k < len(s) && s[k] == filepath.Separator { - k++ - } - return s[k:] -} - -// PathSplit converts a file path to a slice of string. -// If the path is absolute (if the path has a leading slash), -// then the first entry in the result is an empty string. -// Desired: path == PathJoin(PathSplit(path)) -func PathSplit(incoming string) []string { - if incoming == "" { - return []string{} - } - dir, path := filepath.Split(incoming) - if dir == string(os.PathSeparator) { - if path == "" { - return []string{""} - } - return []string{"", path} - } - dir = strings.TrimSuffix(dir, string(os.PathSeparator)) - if dir == "" { - return []string{path} - } - return append(PathSplit(dir), path) -} - -// PathJoin converts a slice of string to a file path. -// If the first entry is an empty string, then the returned -// path is absolute (it has a leading slash). -// Desired: path == PathJoin(PathSplit(path)) -func PathJoin(incoming []string) string { - if len(incoming) == 0 { - return "" - } - if incoming[0] == "" { - return string(os.PathSeparator) + filepath.Join(incoming[1:]...) - } - return filepath.Join(incoming...) -} - -// InsertPathPart inserts 'part' at position 'pos' in the given filepath. -// The first position is 0. -// -// E.g. if part == 'PEACH' -// -// OLD : NEW : POS -// -------------------------------------------------------- -// {empty} : PEACH : irrelevant -// / : /PEACH : irrelevant -// pie : PEACH/pie : 0 (or negative) -// /pie : /PEACH/pie : 0 (or negative) -// raw : raw/PEACH : 1 (or larger) -// /raw : /raw/PEACH : 1 (or larger) -// a/nice/warm/pie : a/nice/warm/PEACH/pie : 3 -// /a/nice/warm/pie : /a/nice/warm/PEACH/pie : 3 -// -// * An empty part results in no change. -// -// * Absolute paths get their leading '/' stripped, treated like -// relative paths, and the leading '/' is re-added on output. -// The meaning of pos is intentionally the same in either absolute or -// relative paths; if it weren't, this function could convert absolute -// paths to relative paths, which is not desirable. -// -// * For robustness (liberal input, conservative output) Pos values that -// that are too small (large) to index the split filepath result in a -// prefix (postfix) rather than an error. Use extreme position values -// to assure a prefix or postfix (e.g. 0 will always prefix, and -// 9999 will presumably always postfix). -func InsertPathPart(path string, pos int, part string) string { - if part == "" { - return path - } - parts := PathSplit(path) - if pos < 0 { - pos = 0 - } else if pos > len(parts) { - pos = len(parts) - } - if len(parts) > 0 && parts[0] == "" && pos < len(parts) { - // An empty string at 0 indicates an absolute path, and means - // we must increment pos. This change means that a position - // specification has the same meaning in relative and absolute paths. - // E.g. in either the path 'a/b/c' or the path '/a/b/c', - // 'a' is at 0, 'b' is at 1 and 'c' is at 2, and inserting at - // zero means a new first field _without_ changing an absolute - // path to a relative path. - pos++ - } - result := make([]string, len(parts)+1) - copy(result, parts[0:pos]) - result[pos] = part - return PathJoin(append(result, parts[pos:]...)) // nolint: makezero -} - -func IsHiddenFilePath(pattern string) bool { - return strings.HasPrefix(filepath.Base(pattern), ".") -} - -// Removes paths containing hidden files/folders from a list of paths -func RemoveHiddenFiles(paths []string) []string { - if len(paths) == 0 { - return paths - } - var result []string - for _, path := range paths { - if !IsHiddenFilePath(path) { - result = append(result, path) - } - } - return result -} diff --git a/src/vendor/sigs.k8s.io/kustomize/kyaml/fn/runtime/container/container.go b/src/vendor/sigs.k8s.io/kustomize/kyaml/fn/runtime/container/container.go deleted file mode 100644 index 76b51a8c5..000000000 --- a/src/vendor/sigs.k8s.io/kustomize/kyaml/fn/runtime/container/container.go +++ /dev/null @@ -1,208 +0,0 @@ -// Copyright 2019 The Kubernetes Authors. -// SPDX-License-Identifier: Apache-2.0 - -package container - -import ( - "fmt" - "os" - "path/filepath" - - "sigs.k8s.io/kustomize/kyaml/errors" - runtimeexec "sigs.k8s.io/kustomize/kyaml/fn/runtime/exec" - "sigs.k8s.io/kustomize/kyaml/fn/runtime/runtimeutil" - "sigs.k8s.io/kustomize/kyaml/yaml" -) - -// Filter filters Resources using a container image. -// The container must start a process that reads the list of -// input Resources from stdin, reads the Configuration from the env -// API_CONFIG, and writes the filtered Resources to stdout. -// If there is a error or validation failure, the process must exit -// non-zero. -// The full set of environment variables from the parent process -// are passed to the container. -// -// Function Scoping: -// Filter applies the function only to Resources to which it is scoped. -// -// Resources are scoped to a function if any of the following are true: -// - the Resource were read from the same directory as the function config -// - the Resource were read from a subdirectory of the function config directory -// - the function config is in a directory named "functions" and -// they were read from a subdirectory of "functions" parent -// - the function config doesn't have a path annotation (considered globally scoped) -// - the Filter has GlobalScope == true -// -// In Scope Examples: -// -// Example 1: deployment.yaml and service.yaml in function.yaml scope -// same directory as the function config directory -// . -// ├── function.yaml -// ├── deployment.yaml -// └── service.yaml -// -// Example 2: apps/deployment.yaml and apps/service.yaml in function.yaml scope -// subdirectory of the function config directory -// . -// ├── function.yaml -// └── apps -//    ├── deployment.yaml -//    └── service.yaml -// -// Example 3: apps/deployment.yaml and apps/service.yaml in functions/function.yaml scope -// function config is in a directory named "functions" -// . -// ├── functions -// │   └── function.yaml -// └── apps -//    ├── deployment.yaml -//    └── service.yaml -// -// Out of Scope Examples: -// -// Example 1: apps/deployment.yaml and apps/service.yaml NOT in stuff/function.yaml scope -// . -// ├── stuff -// │   └── function.yaml -// └── apps -//    ├── deployment.yaml -//    └── service.yaml -// -// Example 2: apps/deployment.yaml and apps/service.yaml NOT in stuff/functions/function.yaml scope -// . -// ├── stuff -// │   └── functions -// │    └── function.yaml -// └── apps -//    ├── deployment.yaml -//    └── service.yaml -// -// Default Paths: -// Resources emitted by functions will have default path applied as annotations -// if none is present. -// The default path will be the function-dir/ (or parent directory in the case of "functions") -// + function-file-name/ + namespace/ + kind_name.yaml -// -// Example 1: Given a function in fn.yaml that produces a Deployment name foo and a Service named bar -// dir -// └── fn.yaml -// -// Would default newly generated Resources to: -// -// dir -// ├── fn.yaml -// └── fn -//    ├── deployment_foo.yaml -//    └── service_bar.yaml -// -// Example 2: Given a function in functions/fn.yaml that produces a Deployment name foo and a Service named bar -// dir -// └── fn.yaml -// -// Would default newly generated Resources to: -// -// dir -// ├── functions -// │   └── fn.yaml -// └── fn -//    ├── deployment_foo.yaml -//    └── service_bar.yaml -// -// Example 3: Given a function in fn.yaml that produces a Deployment name foo, namespace baz and a Service named bar namespace baz -// dir -// └── fn.yaml -// -// Would default newly generated Resources to: -// -// dir -// ├── fn.yaml -// └── fn -// └── baz -//    ├── deployment_foo.yaml -//    └── service_bar.yaml -type Filter struct { - runtimeutil.ContainerSpec `json:",inline" yaml:",inline"` - - Exec runtimeexec.Filter - - UIDGID string -} - -func (c Filter) String() string { - if c.Exec.DeferFailure { - return fmt.Sprintf("%s deferFailure: %v", c.Image, c.Exec.DeferFailure) - } - return c.Image -} -func (c Filter) GetExit() error { - return c.Exec.GetExit() -} - -func (c *Filter) Filter(nodes []*yaml.RNode) ([]*yaml.RNode, error) { - if err := c.setupExec(); err != nil { - return nil, err - } - return c.Exec.Filter(nodes) -} - -func (c *Filter) setupExec() error { - // don't init 2x - if c.Exec.Path != "" { - return nil - } - - if c.Exec.WorkingDir == "" { - wd, err := os.Getwd() - if err != nil { - return errors.Wrap(err) - } - c.Exec.WorkingDir = wd - } - - path, args := c.getCommand() - c.Exec.Path = path - c.Exec.Args = args - return nil -} - -// getArgs returns the command + args to run to spawn the container -func (c *Filter) getCommand() (string, []string) { - network := runtimeutil.NetworkNameNone - if c.ContainerSpec.Network { - network = runtimeutil.NetworkNameHost - } - // run the container using docker. this is simpler than using the docker - // libraries, and ensures things like auth work the same as if the container - // was run from the cli. - args := []string{"run", - "--rm", // delete the container afterward - "-i", "-a", "STDIN", "-a", "STDOUT", "-a", "STDERR", // attach stdin, stdout, stderr - "--network", string(network), - - // added security options - "--user", c.UIDGID, - "--security-opt=no-new-privileges", // don't allow the user to escalate privileges - // note: don't make fs readonly because things like heredoc rely on writing tmp files - } - - for _, storageMount := range c.StorageMounts { - // convert declarative relative paths to absolute (otherwise docker will throw an error) - if !filepath.IsAbs(storageMount.Src) { - storageMount.Src = filepath.Join(c.Exec.WorkingDir, storageMount.Src) - } - args = append(args, "--mount", storageMount.String()) - } - - args = append(args, runtimeutil.NewContainerEnvFromStringSlice(c.Env).GetDockerFlags()...) - a := append(args, c.Image) //nolint:gocritic - return "docker", a -} - -// NewContainer returns a new container filter -func NewContainer(spec runtimeutil.ContainerSpec, uidgid string) Filter { - f := Filter{ContainerSpec: spec, UIDGID: uidgid} - - return f -} diff --git a/src/vendor/sigs.k8s.io/kustomize/kyaml/fn/runtime/exec/doc.go b/src/vendor/sigs.k8s.io/kustomize/kyaml/fn/runtime/exec/doc.go deleted file mode 100644 index 2747a96fb..000000000 --- a/src/vendor/sigs.k8s.io/kustomize/kyaml/fn/runtime/exec/doc.go +++ /dev/null @@ -1,5 +0,0 @@ -// Copyright 2019 The Kubernetes Authors. -// SPDX-License-Identifier: Apache-2.0 - -// Package exec contains the exec function implementation. -package exec diff --git a/src/vendor/sigs.k8s.io/kustomize/kyaml/fn/runtime/exec/exec.go b/src/vendor/sigs.k8s.io/kustomize/kyaml/fn/runtime/exec/exec.go deleted file mode 100644 index e9546820f..000000000 --- a/src/vendor/sigs.k8s.io/kustomize/kyaml/fn/runtime/exec/exec.go +++ /dev/null @@ -1,54 +0,0 @@ -// Copyright 2019 The Kubernetes Authors. -// SPDX-License-Identifier: Apache-2.0 - -package exec - -import ( - "io" - "os" - "os/exec" - "path/filepath" - - "sigs.k8s.io/kustomize/kyaml/errors" - "sigs.k8s.io/kustomize/kyaml/fn/runtime/runtimeutil" - "sigs.k8s.io/kustomize/kyaml/yaml" -) - -type Filter struct { - // Path is the path to the executable to run - Path string `yaml:"path,omitempty"` - - // Args are the arguments to the executable - Args []string `yaml:"args,omitempty"` - - // WorkingDir is the working directory that the executable - // should run in - WorkingDir string - - runtimeutil.FunctionFilter -} - -func (c *Filter) Filter(nodes []*yaml.RNode) ([]*yaml.RNode, error) { - c.FunctionFilter.Run = c.Run - return c.FunctionFilter.Filter(nodes) -} - -func (c *Filter) Run(reader io.Reader, writer io.Writer) error { - cmd := exec.Command(c.Path, c.Args...) // nolint:gosec - cmd.Stdin = reader - cmd.Stdout = writer - cmd.Stderr = os.Stderr - if c.WorkingDir == "" { - return errors.Errorf("no working directory set for exec function") - } - if !filepath.IsAbs(c.WorkingDir) { - return errors.Errorf( - "relative working directory %s not allowed", c.WorkingDir) - } - if c.WorkingDir == "/" { - return errors.Errorf( - "root working directory '/' not allowed") - } - cmd.Dir = c.WorkingDir - return cmd.Run() -} diff --git a/src/vendor/sigs.k8s.io/kustomize/kyaml/fn/runtime/runtimeutil/doc.go b/src/vendor/sigs.k8s.io/kustomize/kyaml/fn/runtime/runtimeutil/doc.go deleted file mode 100644 index 89f9036a4..000000000 --- a/src/vendor/sigs.k8s.io/kustomize/kyaml/fn/runtime/runtimeutil/doc.go +++ /dev/null @@ -1,5 +0,0 @@ -// Copyright 2019 The Kubernetes Authors. -// SPDX-License-Identifier: Apache-2.0 - -// Package runtimeutil contains libraries for implementing function runtimes. -package runtimeutil diff --git a/src/vendor/sigs.k8s.io/kustomize/kyaml/fn/runtime/runtimeutil/functiontypes.go b/src/vendor/sigs.k8s.io/kustomize/kyaml/fn/runtime/runtimeutil/functiontypes.go deleted file mode 100644 index 39cb24195..000000000 --- a/src/vendor/sigs.k8s.io/kustomize/kyaml/fn/runtime/runtimeutil/functiontypes.go +++ /dev/null @@ -1,300 +0,0 @@ -// Copyright 2019 The Kubernetes Authors. -// SPDX-License-Identifier: Apache-2.0 - -package runtimeutil - -import ( - "fmt" - "os" - "sort" - "strings" - - "sigs.k8s.io/kustomize/kyaml/yaml" -) - -const ( - FunctionAnnotationKey = "config.kubernetes.io/function" - oldFunctionAnnotationKey = "config.k8s.io/function" -) - -var functionAnnotationKeys = []string{FunctionAnnotationKey, oldFunctionAnnotationKey} - -// ContainerNetworkName is a type for network name used in container -type ContainerNetworkName string - -const ( - NetworkNameNone ContainerNetworkName = "none" - NetworkNameHost ContainerNetworkName = "host" -) -const defaultEnvValue string = "true" - -// ContainerEnv defines the environment present in a container. -type ContainerEnv struct { - // EnvVars is a key-value map that will be set as env in container - EnvVars map[string]string - - // VarsToExport are only env key. Value will be the value in the host system - VarsToExport []string -} - -// GetDockerFlags returns docker run style env flags -func (ce *ContainerEnv) GetDockerFlags() []string { - envs := ce.EnvVars - if envs == nil { - envs = make(map[string]string) - } - - flags := []string{} - // return in order to keep consistent among different runs - keys := []string{} - for k := range envs { - keys = append(keys, k) - } - sort.Strings(keys) - for _, key := range keys { - flags = append(flags, "-e", key+"="+envs[key]) - } - - for _, key := range ce.VarsToExport { - flags = append(flags, "-e", key) - } - - return flags -} - -// AddKeyValue adds a key-value pair into the envs -func (ce *ContainerEnv) AddKeyValue(key, value string) { - if ce.EnvVars == nil { - ce.EnvVars = make(map[string]string) - } - ce.EnvVars[key] = value -} - -// HasExportedKey returns true if the key is a exported key -func (ce *ContainerEnv) HasExportedKey(key string) bool { - for _, k := range ce.VarsToExport { - if k == key { - return true - } - } - return false -} - -// AddKey adds a key into the envs -func (ce *ContainerEnv) AddKey(key string) { - if !ce.HasExportedKey(key) { - ce.VarsToExport = append(ce.VarsToExport, key) - } -} - -// Raw returns a slice of string which represents the envs. -// Example: [foo=bar, baz] -func (ce *ContainerEnv) Raw() []string { - var ret []string - for k, v := range ce.EnvVars { - ret = append(ret, k+"="+v) - } - - ret = append(ret, ce.VarsToExport...) - return ret -} - -// NewContainerEnv returns a pointer to a new ContainerEnv -func NewContainerEnv() *ContainerEnv { - var ce ContainerEnv - ce.EnvVars = make(map[string]string) - // default envs - ce.EnvVars["LOG_TO_STDERR"] = defaultEnvValue - ce.EnvVars["STRUCTURED_RESULTS"] = defaultEnvValue - return &ce -} - -// NewContainerEnvFromStringSlice returns a new ContainerEnv pointer with parsing -// input envStr. envStr example: ["foo=bar", "baz"] -func NewContainerEnvFromStringSlice(envStr []string) *ContainerEnv { - ce := NewContainerEnv() - for _, e := range envStr { - parts := strings.SplitN(e, "=", 2) - if len(parts) == 1 { - ce.AddKey(e) - } else { - ce.AddKeyValue(parts[0], parts[1]) - } - } - return ce -} - -// FunctionSpec defines a spec for running a function -type FunctionSpec struct { - DeferFailure bool `json:"deferFailure,omitempty" yaml:"deferFailure,omitempty"` - - // Container is the spec for running a function as a container - Container ContainerSpec `json:"container,omitempty" yaml:"container,omitempty"` - - // Starlark is the spec for running a function as a starlark script - Starlark StarlarkSpec `json:"starlark,omitempty" yaml:"starlark,omitempty"` - - // ExecSpec is the spec for running a function as an executable - Exec ExecSpec `json:"exec,omitempty" yaml:"exec,omitempty"` -} - -type ExecSpec struct { - Path string `json:"path,omitempty" yaml:"path,omitempty"` -} - -// ContainerSpec defines a spec for running a function as a container -type ContainerSpec struct { - // Image is the container image to run - Image string `json:"image,omitempty" yaml:"image,omitempty"` - - // Network defines network specific configuration - Network bool `json:"network,omitempty" yaml:"network,omitempty"` - - // Mounts are the storage or directories to mount into the container - StorageMounts []StorageMount `json:"mounts,omitempty" yaml:"mounts,omitempty"` - - // Env is a slice of env string that will be exposed to container - Env []string `json:"envs,omitempty" yaml:"envs,omitempty"` -} - -// StarlarkSpec defines how to run a function as a starlark program -type StarlarkSpec struct { - Name string `json:"name,omitempty" yaml:"name,omitempty"` - - // Path specifies a path to a starlark script - Path string `json:"path,omitempty" yaml:"path,omitempty"` - - // URL specifies a url containing a starlark script - URL string `json:"url,omitempty" yaml:"url,omitempty"` -} - -// StorageMount represents a container's mounted storage option(s) -type StorageMount struct { - // Type of mount e.g. bind mount, local volume, etc. - MountType string `json:"type,omitempty" yaml:"type,omitempty"` - - // Source for the storage to be mounted. - // For named volumes, this is the name of the volume. - // For anonymous volumes, this field is omitted (empty string). - // For bind mounts, this is the path to the file or directory on the host. - Src string `json:"src,omitempty" yaml:"src,omitempty"` - - // The path where the file or directory is mounted in the container. - DstPath string `json:"dst,omitempty" yaml:"dst,omitempty"` - - // Mount in ReadWrite mode if it's explicitly configured - // See https://docs.docker.com/storage/bind-mounts/#use-a-read-only-bind-mount - ReadWriteMode bool `json:"rw,omitempty" yaml:"rw,omitempty"` -} - -func (s *StorageMount) String() string { - mode := "" - if !s.ReadWriteMode { - mode = ",readonly" - } - return fmt.Sprintf("type=%s,source=%s,target=%s%s", s.MountType, s.Src, s.DstPath, mode) -} - -// GetFunctionSpec returns the FunctionSpec for a resource. Returns -// nil if the resource does not have a FunctionSpec. -// -// The FunctionSpec is read from the resource metadata.annotation -// "config.kubernetes.io/function" -func GetFunctionSpec(n *yaml.RNode) *FunctionSpec { - meta, err := n.GetMeta() - if err != nil { - return nil - } - if fn := getFunctionSpecFromAnnotation(n, meta); fn != nil { - return fn - } - - // legacy function specification for backwards compatibility - container := meta.Annotations["config.kubernetes.io/container"] - if container != "" { - return &FunctionSpec{Container: ContainerSpec{Image: container}} - } - return nil -} - -// getFunctionSpecFromAnnotation parses the config function from an annotation -// if it is found -func getFunctionSpecFromAnnotation(n *yaml.RNode, meta yaml.ResourceMeta) *FunctionSpec { - var fs FunctionSpec - for _, s := range functionAnnotationKeys { - fn := meta.Annotations[s] - if fn != "" { - err := yaml.Unmarshal([]byte(fn), &fs) - if err != nil { - fmt.Fprintf(os.Stderr, "%v\n", err) - } - return &fs - } - } - n, err := n.Pipe(yaml.Lookup("metadata", "configFn")) - if err != nil || yaml.IsMissingOrNull(n) { - return nil - } - s, err := n.String() - if err != nil { - fmt.Fprintf(os.Stderr, "%v\n", err) - } - err = yaml.Unmarshal([]byte(s), &fs) - if err != nil { - fmt.Fprintf(os.Stderr, "%v\n", err) - } - return &fs -} - -func StringToStorageMount(s string) StorageMount { - m := make(map[string]string) - options := strings.Split(s, ",") - for _, option := range options { - keyVal := strings.SplitN(option, "=", 2) - if len(keyVal) == 2 { - m[keyVal[0]] = keyVal[1] - } - } - var sm StorageMount - for key, value := range m { - switch { - case key == "type": - sm.MountType = value - case key == "src" || key == "source": - sm.Src = value - case key == "dst" || key == "target": - sm.DstPath = value - case key == "rw" && value == "true": - sm.ReadWriteMode = true - } - } - return sm -} - -// IsReconcilerFilter filters Resources based on whether or not they are Reconciler Resource. -// Resources with an apiVersion starting with '*.gcr.io', 'gcr.io' or 'docker.io' are considered -// Reconciler Resources. -type IsReconcilerFilter struct { - // ExcludeReconcilers if set to true, then Reconcilers will be excluded -- e.g. - // Resources with a reconcile container through the apiVersion (gcr.io prefix) or - // through the annotations - ExcludeReconcilers bool `yaml:"excludeReconcilers,omitempty"` - - // IncludeNonReconcilers if set to true, the NonReconciler will be included. - IncludeNonReconcilers bool `yaml:"includeNonReconcilers,omitempty"` -} - -// Filter implements kio.Filter -func (c *IsReconcilerFilter) Filter(inputs []*yaml.RNode) ([]*yaml.RNode, error) { - var out []*yaml.RNode - for i := range inputs { - isFnResource := GetFunctionSpec(inputs[i]) != nil - if isFnResource && !c.ExcludeReconcilers { - out = append(out, inputs[i]) - } - if !isFnResource && c.IncludeNonReconcilers { - out = append(out, inputs[i]) - } - } - return out, nil -} diff --git a/src/vendor/sigs.k8s.io/kustomize/kyaml/fn/runtime/runtimeutil/runtimeutil.go b/src/vendor/sigs.k8s.io/kustomize/kyaml/fn/runtime/runtimeutil/runtimeutil.go deleted file mode 100644 index ae6fd93ba..000000000 --- a/src/vendor/sigs.k8s.io/kustomize/kyaml/fn/runtime/runtimeutil/runtimeutil.go +++ /dev/null @@ -1,281 +0,0 @@ -// Copyright 2019 The Kubernetes Authors. -// SPDX-License-Identifier: Apache-2.0 - -package runtimeutil - -import ( - "bytes" - "fmt" - "io" - "io/ioutil" - "path" - "strings" - - "sigs.k8s.io/kustomize/kyaml/comments" - "sigs.k8s.io/kustomize/kyaml/errors" - "sigs.k8s.io/kustomize/kyaml/kio" - "sigs.k8s.io/kustomize/kyaml/kio/kioutil" - "sigs.k8s.io/kustomize/kyaml/order" - - "sigs.k8s.io/kustomize/kyaml/yaml" -) - -// FunctionFilter wraps another filter to be invoked in the context of a function. -// FunctionFilter manages scoping the function, deferring failures, and saving results -// to files. -type FunctionFilter struct { - // Run implements the function. - Run func(reader io.Reader, writer io.Writer) error - - // FunctionConfig is passed to the function through ResourceList.functionConfig. - FunctionConfig *yaml.RNode `yaml:"functionConfig,omitempty"` - - // GlobalScope explicitly scopes the function to all input resources rather than only those - // resources scoped to it by path. - GlobalScope bool - - // ResultsFile is the file to write function ResourceList.results to. - // If unset, results will not be written. - ResultsFile string - - // DeferFailure will cause the Filter to return a nil error even if Run returns an error. - // The Run error will be available through GetExit(). - DeferFailure bool - - // results saves the results emitted from Run - Results *yaml.RNode - - // exit saves the error returned from Run - exit error - - ids map[string]*yaml.RNode -} - -// GetExit returns the error from Run -func (c FunctionFilter) GetExit() error { - return c.exit -} - -// functionsDirectoryName is keyword directory name for functions scoped 1 directory higher -const functionsDirectoryName = "functions" - -// getFunctionScope returns the path of the directory containing the function config, -// or its parent directory if the base directory is named "functions" -func (c *FunctionFilter) getFunctionScope() (string, error) { - m, err := c.FunctionConfig.GetMeta() - if err != nil { - return "", errors.Wrap(err) - } - var p string - var found bool - p, found = m.Annotations[kioutil.PathAnnotation] - if !found { - p, found = m.Annotations[kioutil.LegacyPathAnnotation] - if !found { - return "", nil - } - } - - functionDir := path.Clean(path.Dir(p)) - - if path.Base(functionDir) == functionsDirectoryName { - // the scope of functions in a directory called "functions" is 1 level higher - // this is similar to how the golang "internal" directory scoping works - functionDir = path.Dir(functionDir) - } - return functionDir, nil -} - -// scope partitions the input nodes into 2 slices. The first slice contains only Resources -// which are scoped under dir, and the second slice contains the Resources which are not. -func (c *FunctionFilter) scope(dir string, nodes []*yaml.RNode) ([]*yaml.RNode, []*yaml.RNode, error) { - // scope container filtered Resources to Resources under that directory - var input, saved []*yaml.RNode - if c.GlobalScope { - return nodes, nil, nil - } - - // global function - if dir == "" || dir == "." { - return nodes, nil, nil - } - - // identify Resources read from directories under the function configuration - for i := range nodes { - m, err := nodes[i].GetMeta() - if err != nil { - return nil, nil, err - } - var p string - var found bool - p, found = m.Annotations[kioutil.PathAnnotation] - if !found { - p, found = m.Annotations[kioutil.LegacyPathAnnotation] - if !found { - // this Resource isn't scoped under the function -- don't know where it came from - // consider it out of scope - saved = append(saved, nodes[i]) - continue - } - } - - resourceDir := path.Clean(path.Dir(p)) - if path.Base(resourceDir) == functionsDirectoryName { - // Functions in the `functions` directory are scoped to - // themselves, and should see themselves as input - resourceDir = path.Dir(resourceDir) - } - if !strings.HasPrefix(resourceDir, dir) { - // this Resource doesn't fall under the function scope if it - // isn't in a subdirectory of where the function lives - saved = append(saved, nodes[i]) - continue - } - - // this input is scoped under the function - input = append(input, nodes[i]) - } - - return input, saved, nil -} - -func (c *FunctionFilter) Filter(nodes []*yaml.RNode) ([]*yaml.RNode, error) { - in := &bytes.Buffer{} - out := &bytes.Buffer{} - - // only process Resources scoped to this function, save the others - functionDir, err := c.getFunctionScope() - if err != nil { - return nil, err - } - input, saved, err := c.scope(functionDir, nodes) - if err != nil { - return nil, err - } - - // set ids on each input so it is possible to copy comments from inputs back to outputs - if err := c.setIds(input); err != nil { - return nil, err - } - - // write the input - err = kio.ByteWriter{ - WrappingAPIVersion: kio.ResourceListAPIVersion, - WrappingKind: kio.ResourceListKind, - Writer: in, - KeepReaderAnnotations: true, - FunctionConfig: c.FunctionConfig}.Write(input) - if err != nil { - return nil, err - } - - // capture the command stdout for the return value - r := &kio.ByteReader{Reader: out} - - // don't exit immediately if the function fails -- write out the validation - c.exit = c.Run(in, out) - - output, err := r.Read() - if err != nil { - return nil, err - } - - // copy the comments and sync the order of fields from the inputs to the outputs - if err := c.copyCommentsAndSyncOrder(output); err != nil { - return nil, err - } - - if err := c.doResults(r); err != nil { - return nil, err - } - - if c.exit != nil && !c.DeferFailure { - return append(output, saved...), c.exit - } - - // annotate any generated Resources with a path and index if they don't already have one - if err := kioutil.DefaultPathAnnotation(functionDir, output); err != nil { - return nil, err - } - - // emit both the Resources output from the function, and the out-of-scope Resources - // which were not provided to the function - return append(output, saved...), nil -} - -func (c *FunctionFilter) setIds(nodes []*yaml.RNode) error { - // set the id on each node to map inputs to outputs - var id int - c.ids = map[string]*yaml.RNode{} - for i := range nodes { - id++ - idStr := fmt.Sprintf("%v", id) - err := nodes[i].PipeE(yaml.SetAnnotation(kioutil.IdAnnotation, idStr)) - if err != nil { - return errors.Wrap(err) - } - err = nodes[i].PipeE(yaml.SetAnnotation(kioutil.LegacyIdAnnotation, idStr)) - if err != nil { - return errors.Wrap(err) - } - c.ids[idStr] = nodes[i] - } - return nil -} - -func (c *FunctionFilter) copyCommentsAndSyncOrder(nodes []*yaml.RNode) error { - for i := range nodes { - node := nodes[i] - anID, err := node.Pipe(yaml.GetAnnotation(kioutil.IdAnnotation)) - if err != nil { - return errors.Wrap(err) - } - if anID == nil { - anID, err = node.Pipe(yaml.GetAnnotation(kioutil.LegacyIdAnnotation)) - if err != nil { - return errors.Wrap(err) - } - if anID == nil { - continue - } - } - - var in *yaml.RNode - var found bool - if in, found = c.ids[anID.YNode().Value]; !found { - continue - } - if err := comments.CopyComments(in, node); err != nil { - return errors.Wrap(err) - } - if err := order.SyncOrder(in, node); err != nil { - return errors.Wrap(err) - } - if err := node.PipeE(yaml.ClearAnnotation(kioutil.IdAnnotation)); err != nil { - return errors.Wrap(err) - } - if err := node.PipeE(yaml.ClearAnnotation(kioutil.LegacyIdAnnotation)); err != nil { - return errors.Wrap(err) - } - } - return nil -} - -func (c *FunctionFilter) doResults(r *kio.ByteReader) error { - // Write the results to a file if configured to do so - if c.ResultsFile != "" && r.Results != nil { - results, err := r.Results.String() - if err != nil { - return err - } - err = ioutil.WriteFile(c.ResultsFile, []byte(results), 0600) - if err != nil { - return err - } - } - - if r.Results != nil { - c.Results = r.Results - } - return nil -} diff --git a/src/vendor/sigs.k8s.io/kustomize/kyaml/fn/runtime/runtimeutil/types.go b/src/vendor/sigs.k8s.io/kustomize/kyaml/fn/runtime/runtimeutil/types.go deleted file mode 100644 index 5edc4ebc3..000000000 --- a/src/vendor/sigs.k8s.io/kustomize/kyaml/fn/runtime/runtimeutil/types.go +++ /dev/null @@ -1,8 +0,0 @@ -// Copyright 2019 The Kubernetes Authors. -// SPDX-License-Identifier: Apache-2.0 - -package runtimeutil - -type DeferFailureFunction interface { - GetExit() error -} diff --git a/src/vendor/sigs.k8s.io/kustomize/kyaml/fn/runtime/starlark/context.go b/src/vendor/sigs.k8s.io/kustomize/kyaml/fn/runtime/starlark/context.go deleted file mode 100644 index bbf07f66c..000000000 --- a/src/vendor/sigs.k8s.io/kustomize/kyaml/fn/runtime/starlark/context.go +++ /dev/null @@ -1,79 +0,0 @@ -// Copyright 2019 The Kubernetes Authors. -// SPDX-License-Identifier: Apache-2.0 - -package starlark - -import ( - "encoding/json" - "os" - "strings" - - "go.starlark.net/starlark" - "go.starlark.net/starlarkstruct" - "sigs.k8s.io/kustomize/kyaml/errors" - "sigs.k8s.io/kustomize/kyaml/internal/forked/github.com/qri-io/starlib/util" - "sigs.k8s.io/kustomize/kyaml/openapi" - "sigs.k8s.io/kustomize/kyaml/yaml" -) - -type Context struct { - resourceList starlark.Value -} - -func (c *Context) predeclared() (starlark.StringDict, error) { - e, err := env() - if err != nil { - return nil, err - } - oa, err := oa() - if err != nil { - return nil, err - } - dict := starlark.StringDict{ - "resource_list": c.resourceList, - "open_api": oa, - "environment": e, - } - - return starlark.StringDict{ - "ctx": starlarkstruct.FromStringDict(starlarkstruct.Default, dict), - }, nil -} - -func oa() (starlark.Value, error) { - return interfaceToValue(openapi.Schema()) -} - -func env() (starlark.Value, error) { - env := map[string]interface{}{} - for _, e := range os.Environ() { - pair := strings.SplitN(e, "=", 2) - if len(pair) < 2 { - continue - } - env[pair[0]] = pair[1] - } - value, err := util.Marshal(env) - if err != nil { - return nil, errors.Wrap(err) - } - return value, nil -} - -func interfaceToValue(i interface{}) (starlark.Value, error) { - b, err := json.Marshal(i) - if err != nil { - return nil, err - } - - var in map[string]interface{} - if err := yaml.Unmarshal(b, &in); err != nil { - return nil, errors.Wrap(err) - } - - value, err := util.Marshal(in) - if err != nil { - return nil, errors.Wrap(err) - } - return value, nil -} diff --git a/src/vendor/sigs.k8s.io/kustomize/kyaml/fn/runtime/starlark/doc.go b/src/vendor/sigs.k8s.io/kustomize/kyaml/fn/runtime/starlark/doc.go deleted file mode 100644 index 70f0a2c7b..000000000 --- a/src/vendor/sigs.k8s.io/kustomize/kyaml/fn/runtime/starlark/doc.go +++ /dev/null @@ -1,36 +0,0 @@ -// Copyright 2019 The Kubernetes Authors. -// SPDX-License-Identifier: Apache-2.0 - -// Package starlark contains a kio.Filter which can be applied to resources to transform -// them through starlark program. -// -// Starlark has become a popular runtime embedding in go programs, especially for Kubernetes -// and data processing. -// Examples: https://github.com/cruise-automation/isopod, https://qri.io/docs/starlark/starlib, -// https://github.com/stripe/skycfg, https://github.com/k14s/ytt -// -// The resources are provided to the starlark program through the global variable "resourceList". -// "resourceList" is a dictionary containing an "items" field with a list of resources. -// The starlark modified "resourceList" is the Filter output. -// -// After being run through the starlark program, the filter will copy the comments from the input -// resources to restore them -- due to them being dropped as a result of serializing the resources -// as starlark values. -// -// "resourceList" may also contain a "functionConfig" entry to configure the starlark script itself. -// Changes made by the starlark program to the "functionConfig" will be reflected in the -// Filter.FunctionConfig value. -// -// The Filter will also format the output so that output has the preferred field ordering -// rather than an alphabetical field ordering. -// -// The resourceList variable adheres to the kustomize function spec as specified by: -// https://github.com/kubernetes-sigs/kustomize/blob/master/cmd/config/docs/api-conventions/functions-spec.md -// -// All items in the resourceList are resources represented as starlark dictionaries/ -// The items in the resourceList respect the io spec specified by: -// https://github.com/kubernetes-sigs/kustomize/blob/master/cmd/config/docs/api-conventions/config-io.md -// -// The starlark language spec can be found here: -// https://github.com/google/starlark-go/blob/master/doc/spec.md -package starlark diff --git a/src/vendor/sigs.k8s.io/kustomize/kyaml/fn/runtime/starlark/starlark.go b/src/vendor/sigs.k8s.io/kustomize/kyaml/fn/runtime/starlark/starlark.go deleted file mode 100644 index 9d7450a48..000000000 --- a/src/vendor/sigs.k8s.io/kustomize/kyaml/fn/runtime/starlark/starlark.go +++ /dev/null @@ -1,180 +0,0 @@ -// Copyright 2019 The Kubernetes Authors. -// SPDX-License-Identifier: Apache-2.0 - -package starlark - -import ( - "bytes" - "fmt" - "io" - "io/ioutil" - "net/http" - - "go.starlark.net/starlark" - "sigs.k8s.io/kustomize/kyaml/errors" - "sigs.k8s.io/kustomize/kyaml/fn/runtime/runtimeutil" - "sigs.k8s.io/kustomize/kyaml/internal/forked/github.com/qri-io/starlib/util" - "sigs.k8s.io/kustomize/kyaml/kio/filters" - "sigs.k8s.io/kustomize/kyaml/yaml" -) - -// Filter transforms a set of resources through the provided program -type Filter struct { - Name string - - // Program is a starlark script which will be run against the resources - Program string - - // URL is the url of a starlark program to fetch and run - URL string - - // Path is the path to a starlark program to read and run - Path string - - runtimeutil.FunctionFilter -} - -func (sf *Filter) String() string { - return fmt.Sprintf( - "name: %v path: %v url: %v program: %v", sf.Name, sf.Path, sf.URL, sf.Program) -} - -func (sf *Filter) Filter(nodes []*yaml.RNode) ([]*yaml.RNode, error) { - if err := sf.setup(); err != nil { - return nil, err - } - sf.FunctionFilter.Run = sf.Run - - return sf.FunctionFilter.Filter(nodes) -} - -func (sf *Filter) setup() error { - if (sf.URL != "" && sf.Path != "") || - (sf.URL != "" && sf.Program != "") || - (sf.Path != "" && sf.Program != "") { - return errors.Errorf("Filter Path, Program and URL are mutually exclusive") - } - - // read the program from a file - if sf.Path != "" { - b, err := ioutil.ReadFile(sf.Path) - if err != nil { - return err - } - sf.Program = string(b) - } - - // read the program from a URL - if sf.URL != "" { - err := func() error { - resp, err := http.Get(sf.URL) - if err != nil { - return err - } - defer resp.Body.Close() - b, err := ioutil.ReadAll(resp.Body) - if err != nil { - return err - } - sf.Program = string(b) - return nil - }() - if err != nil { - return err - } - } - - return nil -} - -func (sf *Filter) Run(reader io.Reader, writer io.Writer) error { - // retain map of inputs to outputs by id so if the name is changed by the - // starlark program, we are able to match the same resources - value, err := sf.readResourceList(reader) - if err != nil { - return errors.Wrap(err) - } - - // run the starlark as program as transformation function - thread := &starlark.Thread{Name: sf.Name} - - ctx := &Context{resourceList: value} - pd, err := ctx.predeclared() - if err != nil { - return errors.Wrap(err) - } - _, err = starlark.ExecFile(thread, sf.Name, sf.Program, pd) - if err != nil { - return errors.Wrap(err) - } - - return sf.writeResourceList(value, writer) -} - -// inputToResourceList transforms input into a starlark.Value -func (sf *Filter) readResourceList(reader io.Reader) (starlark.Value, error) { - // read and parse the inputs - rl := bytes.Buffer{} - _, err := rl.ReadFrom(reader) - if err != nil { - return nil, errors.Wrap(err) - } - rn, err := yaml.Parse(rl.String()) - if err != nil { - return nil, errors.Wrap(err) - } - - // convert to a starlark value - b, err := yaml.Marshal(rn.Document()) // convert to bytes - if err != nil { - return nil, errors.Wrap(err) - } - var in map[string]interface{} - err = yaml.Unmarshal(b, &in) // convert to map[string]interface{} - if err != nil { - return nil, errors.Wrap(err) - } - return util.Marshal(in) // convert to starlark value -} - -// resourceListToOutput converts the output of the starlark program to the filter output -func (sf *Filter) writeResourceList(value starlark.Value, writer io.Writer) error { - // convert the modified resourceList back into a slice of RNodes - // by first converting to a map[string]interface{} - out, err := util.Unmarshal(value) - if err != nil { - return errors.Wrap(err) - } - b, err := yaml.Marshal(out) - if err != nil { - return errors.Wrap(err) - } - - rl, err := yaml.Parse(string(b)) - if err != nil { - return errors.Wrap(err) - } - - // preserve the comments from the input - items, err := rl.Pipe(yaml.Lookup("items")) - if err != nil { - return errors.Wrap(err) - } - err = items.VisitElements(func(node *yaml.RNode) error { - // starlark will serialize the resources sorting the fields alphabetically, - // format them to have a better ordering - _, err := filters.FormatFilter{}.Filter([]*yaml.RNode{node}) - return err - }) - if err != nil { - return errors.Wrap(err) - } - - s, err := rl.String() - if err != nil { - return errors.Wrap(err) - } - - _, err = writer.Write([]byte(s)) - return err -} diff --git a/src/vendor/sigs.k8s.io/kustomize/kyaml/internal/forked/github.com/go-yaml/yaml/LICENSE b/src/vendor/sigs.k8s.io/kustomize/kyaml/internal/forked/github.com/go-yaml/yaml/LICENSE deleted file mode 100644 index 2683e4bb1..000000000 --- a/src/vendor/sigs.k8s.io/kustomize/kyaml/internal/forked/github.com/go-yaml/yaml/LICENSE +++ /dev/null @@ -1,50 +0,0 @@ - -This project is covered by two different licenses: MIT and Apache. - -#### MIT License #### - -The following files were ported to Go from C files of libyaml, and thus -are still covered by their original MIT license, with the additional -copyright staring in 2011 when the project was ported over: - - apic.go emitterc.go parserc.go readerc.go scannerc.go - writerc.go yamlh.go yamlprivateh.go - -Copyright (c) 2006-2010 Kirill Simonov -Copyright (c) 2006-2011 Kirill Simonov - -Permission is hereby granted, free of charge, to any person obtaining a copy of -this software and associated documentation files (the "Software"), to deal in -the Software without restriction, including without limitation the rights to -use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies -of the Software, and to permit persons to whom the Software is furnished to do -so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - -### Apache License ### - -All the remaining project files are covered by the Apache license: - -Copyright (c) 2011-2019 Canonical Ltd - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. diff --git a/src/vendor/sigs.k8s.io/kustomize/kyaml/internal/forked/github.com/go-yaml/yaml/NOTICE b/src/vendor/sigs.k8s.io/kustomize/kyaml/internal/forked/github.com/go-yaml/yaml/NOTICE deleted file mode 100644 index 866d74a7a..000000000 --- a/src/vendor/sigs.k8s.io/kustomize/kyaml/internal/forked/github.com/go-yaml/yaml/NOTICE +++ /dev/null @@ -1,13 +0,0 @@ -Copyright 2011-2016 Canonical Ltd. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. diff --git a/src/vendor/sigs.k8s.io/kustomize/kyaml/internal/forked/github.com/go-yaml/yaml/README.md b/src/vendor/sigs.k8s.io/kustomize/kyaml/internal/forked/github.com/go-yaml/yaml/README.md deleted file mode 100644 index 08eb1babd..000000000 --- a/src/vendor/sigs.k8s.io/kustomize/kyaml/internal/forked/github.com/go-yaml/yaml/README.md +++ /dev/null @@ -1,150 +0,0 @@ -# YAML support for the Go language - -Introduction ------------- - -The yaml package enables Go programs to comfortably encode and decode YAML -values. It was developed within [Canonical](https://www.canonical.com) as -part of the [juju](https://juju.ubuntu.com) project, and is based on a -pure Go port of the well-known [libyaml](http://pyyaml.org/wiki/LibYAML) -C library to parse and generate YAML data quickly and reliably. - -Compatibility -------------- - -The yaml package supports most of YAML 1.2, but preserves some behavior -from 1.1 for backwards compatibility. - -Specifically, as of v3 of the yaml package: - - - YAML 1.1 bools (_yes/no, on/off_) are supported as long as they are being - decoded into a typed bool value. Otherwise they behave as a string. Booleans - in YAML 1.2 are _true/false_ only. - - Octals encode and decode as _0777_ per YAML 1.1, rather than _0o777_ - as specified in YAML 1.2, because most parsers still use the old format. - Octals in the _0o777_ format are supported though, so new files work. - - Does not support base-60 floats. These are gone from YAML 1.2, and were - actually never supported by this package as it's clearly a poor choice. - -and offers backwards -compatibility with YAML 1.1 in some cases. -1.2, including support for -anchors, tags, map merging, etc. Multi-document unmarshalling is not yet -implemented, and base-60 floats from YAML 1.1 are purposefully not -supported since they're a poor design and are gone in YAML 1.2. - -Installation and usage ----------------------- - -The import path for the package is *gopkg.in/yaml.v3*. - -To install it, run: - - go get gopkg.in/yaml.v3 - -API documentation ------------------ - -If opened in a browser, the import path itself leads to the API documentation: - - - [https://gopkg.in/yaml.v3](https://gopkg.in/yaml.v3) - -API stability -------------- - -The package API for yaml v3 will remain stable as described in [gopkg.in](https://gopkg.in). - - -License -------- - -The yaml package is licensed under the MIT and Apache License 2.0 licenses. -Please see the LICENSE file for details. - - -Example -------- - -```Go -package main - -import ( - "fmt" - "log" - - "gopkg.in/yaml.v3" -) - -var data = ` -a: Easy! -b: - c: 2 - d: [3, 4] -` - -// Note: struct fields must be public in order for unmarshal to -// correctly populate the data. -type T struct { - A string - B struct { - RenamedC int `yaml:"c"` - D []int `yaml:",flow"` - } -} - -func main() { - t := T{} - - err := yaml.Unmarshal([]byte(data), &t) - if err != nil { - log.Fatalf("error: %v", err) - } - fmt.Printf("--- t:\n%v\n\n", t) - - d, err := yaml.Marshal(&t) - if err != nil { - log.Fatalf("error: %v", err) - } - fmt.Printf("--- t dump:\n%s\n\n", string(d)) - - m := make(map[interface{}]interface{}) - - err = yaml.Unmarshal([]byte(data), &m) - if err != nil { - log.Fatalf("error: %v", err) - } - fmt.Printf("--- m:\n%v\n\n", m) - - d, err = yaml.Marshal(&m) - if err != nil { - log.Fatalf("error: %v", err) - } - fmt.Printf("--- m dump:\n%s\n\n", string(d)) -} -``` - -This example will generate the following output: - -``` ---- t: -{Easy! {2 [3 4]}} - ---- t dump: -a: Easy! -b: - c: 2 - d: [3, 4] - - ---- m: -map[a:Easy! b:map[c:2 d:[3 4]]] - ---- m dump: -a: Easy! -b: - c: 2 - d: - - 3 - - 4 -``` - diff --git a/src/vendor/sigs.k8s.io/kustomize/kyaml/internal/forked/github.com/go-yaml/yaml/apic.go b/src/vendor/sigs.k8s.io/kustomize/kyaml/internal/forked/github.com/go-yaml/yaml/apic.go deleted file mode 100644 index ae7d049f1..000000000 --- a/src/vendor/sigs.k8s.io/kustomize/kyaml/internal/forked/github.com/go-yaml/yaml/apic.go +++ /dev/null @@ -1,747 +0,0 @@ -// -// Copyright (c) 2011-2019 Canonical Ltd -// Copyright (c) 2006-2010 Kirill Simonov -// -// Permission is hereby granted, free of charge, to any person obtaining a copy of -// this software and associated documentation files (the "Software"), to deal in -// the Software without restriction, including without limitation the rights to -// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies -// of the Software, and to permit persons to whom the Software is furnished to do -// so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in all -// copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -// SOFTWARE. - -package yaml - -import ( - "io" -) - -func yaml_insert_token(parser *yaml_parser_t, pos int, token *yaml_token_t) { - //fmt.Println("yaml_insert_token", "pos:", pos, "typ:", token.typ, "head:", parser.tokens_head, "len:", len(parser.tokens)) - - // Check if we can move the queue at the beginning of the buffer. - if parser.tokens_head > 0 && len(parser.tokens) == cap(parser.tokens) { - if parser.tokens_head != len(parser.tokens) { - copy(parser.tokens, parser.tokens[parser.tokens_head:]) - } - parser.tokens = parser.tokens[:len(parser.tokens)-parser.tokens_head] - parser.tokens_head = 0 - } - parser.tokens = append(parser.tokens, *token) - if pos < 0 { - return - } - copy(parser.tokens[parser.tokens_head+pos+1:], parser.tokens[parser.tokens_head+pos:]) - parser.tokens[parser.tokens_head+pos] = *token -} - -// Create a new parser object. -func yaml_parser_initialize(parser *yaml_parser_t) bool { - *parser = yaml_parser_t{ - raw_buffer: make([]byte, 0, input_raw_buffer_size), - buffer: make([]byte, 0, input_buffer_size), - } - return true -} - -// Destroy a parser object. -func yaml_parser_delete(parser *yaml_parser_t) { - *parser = yaml_parser_t{} -} - -// String read handler. -func yaml_string_read_handler(parser *yaml_parser_t, buffer []byte) (n int, err error) { - if parser.input_pos == len(parser.input) { - return 0, io.EOF - } - n = copy(buffer, parser.input[parser.input_pos:]) - parser.input_pos += n - return n, nil -} - -// Reader read handler. -func yaml_reader_read_handler(parser *yaml_parser_t, buffer []byte) (n int, err error) { - return parser.input_reader.Read(buffer) -} - -// Set a string input. -func yaml_parser_set_input_string(parser *yaml_parser_t, input []byte) { - if parser.read_handler != nil { - panic("must set the input source only once") - } - parser.read_handler = yaml_string_read_handler - parser.input = input - parser.input_pos = 0 -} - -// Set a file input. -func yaml_parser_set_input_reader(parser *yaml_parser_t, r io.Reader) { - if parser.read_handler != nil { - panic("must set the input source only once") - } - parser.read_handler = yaml_reader_read_handler - parser.input_reader = r -} - -// Set the source encoding. -func yaml_parser_set_encoding(parser *yaml_parser_t, encoding yaml_encoding_t) { - if parser.encoding != yaml_ANY_ENCODING { - panic("must set the encoding only once") - } - parser.encoding = encoding -} - -// Create a new emitter object. -func yaml_emitter_initialize(emitter *yaml_emitter_t) { - *emitter = yaml_emitter_t{ - buffer: make([]byte, output_buffer_size), - raw_buffer: make([]byte, 0, output_raw_buffer_size), - states: make([]yaml_emitter_state_t, 0, initial_stack_size), - events: make([]yaml_event_t, 0, initial_queue_size), - best_width: -1, - } -} - -// Destroy an emitter object. -func yaml_emitter_delete(emitter *yaml_emitter_t) { - *emitter = yaml_emitter_t{} -} - -// String write handler. -func yaml_string_write_handler(emitter *yaml_emitter_t, buffer []byte) error { - *emitter.output_buffer = append(*emitter.output_buffer, buffer...) - return nil -} - -// yaml_writer_write_handler uses emitter.output_writer to write the -// emitted text. -func yaml_writer_write_handler(emitter *yaml_emitter_t, buffer []byte) error { - _, err := emitter.output_writer.Write(buffer) - return err -} - -// Set a string output. -func yaml_emitter_set_output_string(emitter *yaml_emitter_t, output_buffer *[]byte) { - if emitter.write_handler != nil { - panic("must set the output target only once") - } - emitter.write_handler = yaml_string_write_handler - emitter.output_buffer = output_buffer -} - -// Set a file output. -func yaml_emitter_set_output_writer(emitter *yaml_emitter_t, w io.Writer) { - if emitter.write_handler != nil { - panic("must set the output target only once") - } - emitter.write_handler = yaml_writer_write_handler - emitter.output_writer = w -} - -// Set the output encoding. -func yaml_emitter_set_encoding(emitter *yaml_emitter_t, encoding yaml_encoding_t) { - if emitter.encoding != yaml_ANY_ENCODING { - panic("must set the output encoding only once") - } - emitter.encoding = encoding -} - -// Set the canonical output style. -func yaml_emitter_set_canonical(emitter *yaml_emitter_t, canonical bool) { - emitter.canonical = canonical -} - -// Set the indentation increment. -func yaml_emitter_set_indent(emitter *yaml_emitter_t, indent int) { - if indent < 2 || indent > 9 { - indent = 2 - } - emitter.best_indent = indent -} - -// Set the preferred line width. -func yaml_emitter_set_width(emitter *yaml_emitter_t, width int) { - if width < 0 { - width = -1 - } - emitter.best_width = width -} - -// Set if unescaped non-ASCII characters are allowed. -func yaml_emitter_set_unicode(emitter *yaml_emitter_t, unicode bool) { - emitter.unicode = unicode -} - -// Set the preferred line break character. -func yaml_emitter_set_break(emitter *yaml_emitter_t, line_break yaml_break_t) { - emitter.line_break = line_break -} - -///* -// * Destroy a token object. -// */ -// -//YAML_DECLARE(void) -//yaml_token_delete(yaml_token_t *token) -//{ -// assert(token); // Non-NULL token object expected. -// -// switch (token.type) -// { -// case YAML_TAG_DIRECTIVE_TOKEN: -// yaml_free(token.data.tag_directive.handle); -// yaml_free(token.data.tag_directive.prefix); -// break; -// -// case YAML_ALIAS_TOKEN: -// yaml_free(token.data.alias.value); -// break; -// -// case YAML_ANCHOR_TOKEN: -// yaml_free(token.data.anchor.value); -// break; -// -// case YAML_TAG_TOKEN: -// yaml_free(token.data.tag.handle); -// yaml_free(token.data.tag.suffix); -// break; -// -// case YAML_SCALAR_TOKEN: -// yaml_free(token.data.scalar.value); -// break; -// -// default: -// break; -// } -// -// memset(token, 0, sizeof(yaml_token_t)); -//} -// -///* -// * Check if a string is a valid UTF-8 sequence. -// * -// * Check 'reader.c' for more details on UTF-8 encoding. -// */ -// -//static int -//yaml_check_utf8(yaml_char_t *start, size_t length) -//{ -// yaml_char_t *end = start+length; -// yaml_char_t *pointer = start; -// -// while (pointer < end) { -// unsigned char octet; -// unsigned int width; -// unsigned int value; -// size_t k; -// -// octet = pointer[0]; -// width = (octet & 0x80) == 0x00 ? 1 : -// (octet & 0xE0) == 0xC0 ? 2 : -// (octet & 0xF0) == 0xE0 ? 3 : -// (octet & 0xF8) == 0xF0 ? 4 : 0; -// value = (octet & 0x80) == 0x00 ? octet & 0x7F : -// (octet & 0xE0) == 0xC0 ? octet & 0x1F : -// (octet & 0xF0) == 0xE0 ? octet & 0x0F : -// (octet & 0xF8) == 0xF0 ? octet & 0x07 : 0; -// if (!width) return 0; -// if (pointer+width > end) return 0; -// for (k = 1; k < width; k ++) { -// octet = pointer[k]; -// if ((octet & 0xC0) != 0x80) return 0; -// value = (value << 6) + (octet & 0x3F); -// } -// if (!((width == 1) || -// (width == 2 && value >= 0x80) || -// (width == 3 && value >= 0x800) || -// (width == 4 && value >= 0x10000))) return 0; -// -// pointer += width; -// } -// -// return 1; -//} -// - -// Create STREAM-START. -func yaml_stream_start_event_initialize(event *yaml_event_t, encoding yaml_encoding_t) { - *event = yaml_event_t{ - typ: yaml_STREAM_START_EVENT, - encoding: encoding, - } -} - -// Create STREAM-END. -func yaml_stream_end_event_initialize(event *yaml_event_t) { - *event = yaml_event_t{ - typ: yaml_STREAM_END_EVENT, - } -} - -// Create DOCUMENT-START. -func yaml_document_start_event_initialize( - event *yaml_event_t, - version_directive *yaml_version_directive_t, - tag_directives []yaml_tag_directive_t, - implicit bool, -) { - *event = yaml_event_t{ - typ: yaml_DOCUMENT_START_EVENT, - version_directive: version_directive, - tag_directives: tag_directives, - implicit: implicit, - } -} - -// Create DOCUMENT-END. -func yaml_document_end_event_initialize(event *yaml_event_t, implicit bool) { - *event = yaml_event_t{ - typ: yaml_DOCUMENT_END_EVENT, - implicit: implicit, - } -} - -// Create ALIAS. -func yaml_alias_event_initialize(event *yaml_event_t, anchor []byte) bool { - *event = yaml_event_t{ - typ: yaml_ALIAS_EVENT, - anchor: anchor, - } - return true -} - -// Create SCALAR. -func yaml_scalar_event_initialize(event *yaml_event_t, anchor, tag, value []byte, plain_implicit, quoted_implicit bool, style yaml_scalar_style_t) bool { - *event = yaml_event_t{ - typ: yaml_SCALAR_EVENT, - anchor: anchor, - tag: tag, - value: value, - implicit: plain_implicit, - quoted_implicit: quoted_implicit, - style: yaml_style_t(style), - } - return true -} - -// Create SEQUENCE-START. -func yaml_sequence_start_event_initialize(event *yaml_event_t, anchor, tag []byte, implicit bool, style yaml_sequence_style_t) bool { - *event = yaml_event_t{ - typ: yaml_SEQUENCE_START_EVENT, - anchor: anchor, - tag: tag, - implicit: implicit, - style: yaml_style_t(style), - } - return true -} - -// Create SEQUENCE-END. -func yaml_sequence_end_event_initialize(event *yaml_event_t) bool { - *event = yaml_event_t{ - typ: yaml_SEQUENCE_END_EVENT, - } - return true -} - -// Create MAPPING-START. -func yaml_mapping_start_event_initialize(event *yaml_event_t, anchor, tag []byte, implicit bool, style yaml_mapping_style_t) { - *event = yaml_event_t{ - typ: yaml_MAPPING_START_EVENT, - anchor: anchor, - tag: tag, - implicit: implicit, - style: yaml_style_t(style), - } -} - -// Create MAPPING-END. -func yaml_mapping_end_event_initialize(event *yaml_event_t) { - *event = yaml_event_t{ - typ: yaml_MAPPING_END_EVENT, - } -} - -// Destroy an event object. -func yaml_event_delete(event *yaml_event_t) { - *event = yaml_event_t{} -} - -///* -// * Create a document object. -// */ -// -//YAML_DECLARE(int) -//yaml_document_initialize(document *yaml_document_t, -// version_directive *yaml_version_directive_t, -// tag_directives_start *yaml_tag_directive_t, -// tag_directives_end *yaml_tag_directive_t, -// start_implicit int, end_implicit int) -//{ -// struct { -// error yaml_error_type_t -// } context -// struct { -// start *yaml_node_t -// end *yaml_node_t -// top *yaml_node_t -// } nodes = { NULL, NULL, NULL } -// version_directive_copy *yaml_version_directive_t = NULL -// struct { -// start *yaml_tag_directive_t -// end *yaml_tag_directive_t -// top *yaml_tag_directive_t -// } tag_directives_copy = { NULL, NULL, NULL } -// value yaml_tag_directive_t = { NULL, NULL } -// mark yaml_mark_t = { 0, 0, 0 } -// -// assert(document) // Non-NULL document object is expected. -// assert((tag_directives_start && tag_directives_end) || -// (tag_directives_start == tag_directives_end)) -// // Valid tag directives are expected. -// -// if (!STACK_INIT(&context, nodes, INITIAL_STACK_SIZE)) goto error -// -// if (version_directive) { -// version_directive_copy = yaml_malloc(sizeof(yaml_version_directive_t)) -// if (!version_directive_copy) goto error -// version_directive_copy.major = version_directive.major -// version_directive_copy.minor = version_directive.minor -// } -// -// if (tag_directives_start != tag_directives_end) { -// tag_directive *yaml_tag_directive_t -// if (!STACK_INIT(&context, tag_directives_copy, INITIAL_STACK_SIZE)) -// goto error -// for (tag_directive = tag_directives_start -// tag_directive != tag_directives_end; tag_directive ++) { -// assert(tag_directive.handle) -// assert(tag_directive.prefix) -// if (!yaml_check_utf8(tag_directive.handle, -// strlen((char *)tag_directive.handle))) -// goto error -// if (!yaml_check_utf8(tag_directive.prefix, -// strlen((char *)tag_directive.prefix))) -// goto error -// value.handle = yaml_strdup(tag_directive.handle) -// value.prefix = yaml_strdup(tag_directive.prefix) -// if (!value.handle || !value.prefix) goto error -// if (!PUSH(&context, tag_directives_copy, value)) -// goto error -// value.handle = NULL -// value.prefix = NULL -// } -// } -// -// DOCUMENT_INIT(*document, nodes.start, nodes.end, version_directive_copy, -// tag_directives_copy.start, tag_directives_copy.top, -// start_implicit, end_implicit, mark, mark) -// -// return 1 -// -//error: -// STACK_DEL(&context, nodes) -// yaml_free(version_directive_copy) -// while (!STACK_EMPTY(&context, tag_directives_copy)) { -// value yaml_tag_directive_t = POP(&context, tag_directives_copy) -// yaml_free(value.handle) -// yaml_free(value.prefix) -// } -// STACK_DEL(&context, tag_directives_copy) -// yaml_free(value.handle) -// yaml_free(value.prefix) -// -// return 0 -//} -// -///* -// * Destroy a document object. -// */ -// -//YAML_DECLARE(void) -//yaml_document_delete(document *yaml_document_t) -//{ -// struct { -// error yaml_error_type_t -// } context -// tag_directive *yaml_tag_directive_t -// -// context.error = YAML_NO_ERROR // Eliminate a compiler warning. -// -// assert(document) // Non-NULL document object is expected. -// -// while (!STACK_EMPTY(&context, document.nodes)) { -// node yaml_node_t = POP(&context, document.nodes) -// yaml_free(node.tag) -// switch (node.type) { -// case YAML_SCALAR_NODE: -// yaml_free(node.data.scalar.value) -// break -// case YAML_SEQUENCE_NODE: -// STACK_DEL(&context, node.data.sequence.items) -// break -// case YAML_MAPPING_NODE: -// STACK_DEL(&context, node.data.mapping.pairs) -// break -// default: -// assert(0) // Should not happen. -// } -// } -// STACK_DEL(&context, document.nodes) -// -// yaml_free(document.version_directive) -// for (tag_directive = document.tag_directives.start -// tag_directive != document.tag_directives.end -// tag_directive++) { -// yaml_free(tag_directive.handle) -// yaml_free(tag_directive.prefix) -// } -// yaml_free(document.tag_directives.start) -// -// memset(document, 0, sizeof(yaml_document_t)) -//} -// -///** -// * Get a document node. -// */ -// -//YAML_DECLARE(yaml_node_t *) -//yaml_document_get_node(document *yaml_document_t, index int) -//{ -// assert(document) // Non-NULL document object is expected. -// -// if (index > 0 && document.nodes.start + index <= document.nodes.top) { -// return document.nodes.start + index - 1 -// } -// return NULL -//} -// -///** -// * Get the root object. -// */ -// -//YAML_DECLARE(yaml_node_t *) -//yaml_document_get_root_node(document *yaml_document_t) -//{ -// assert(document) // Non-NULL document object is expected. -// -// if (document.nodes.top != document.nodes.start) { -// return document.nodes.start -// } -// return NULL -//} -// -///* -// * Add a scalar node to a document. -// */ -// -//YAML_DECLARE(int) -//yaml_document_add_scalar(document *yaml_document_t, -// tag *yaml_char_t, value *yaml_char_t, length int, -// style yaml_scalar_style_t) -//{ -// struct { -// error yaml_error_type_t -// } context -// mark yaml_mark_t = { 0, 0, 0 } -// tag_copy *yaml_char_t = NULL -// value_copy *yaml_char_t = NULL -// node yaml_node_t -// -// assert(document) // Non-NULL document object is expected. -// assert(value) // Non-NULL value is expected. -// -// if (!tag) { -// tag = (yaml_char_t *)YAML_DEFAULT_SCALAR_TAG -// } -// -// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error -// tag_copy = yaml_strdup(tag) -// if (!tag_copy) goto error -// -// if (length < 0) { -// length = strlen((char *)value) -// } -// -// if (!yaml_check_utf8(value, length)) goto error -// value_copy = yaml_malloc(length+1) -// if (!value_copy) goto error -// memcpy(value_copy, value, length) -// value_copy[length] = '\0' -// -// SCALAR_NODE_INIT(node, tag_copy, value_copy, length, style, mark, mark) -// if (!PUSH(&context, document.nodes, node)) goto error -// -// return document.nodes.top - document.nodes.start -// -//error: -// yaml_free(tag_copy) -// yaml_free(value_copy) -// -// return 0 -//} -// -///* -// * Add a sequence node to a document. -// */ -// -//YAML_DECLARE(int) -//yaml_document_add_sequence(document *yaml_document_t, -// tag *yaml_char_t, style yaml_sequence_style_t) -//{ -// struct { -// error yaml_error_type_t -// } context -// mark yaml_mark_t = { 0, 0, 0 } -// tag_copy *yaml_char_t = NULL -// struct { -// start *yaml_node_item_t -// end *yaml_node_item_t -// top *yaml_node_item_t -// } items = { NULL, NULL, NULL } -// node yaml_node_t -// -// assert(document) // Non-NULL document object is expected. -// -// if (!tag) { -// tag = (yaml_char_t *)YAML_DEFAULT_SEQUENCE_TAG -// } -// -// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error -// tag_copy = yaml_strdup(tag) -// if (!tag_copy) goto error -// -// if (!STACK_INIT(&context, items, INITIAL_STACK_SIZE)) goto error -// -// SEQUENCE_NODE_INIT(node, tag_copy, items.start, items.end, -// style, mark, mark) -// if (!PUSH(&context, document.nodes, node)) goto error -// -// return document.nodes.top - document.nodes.start -// -//error: -// STACK_DEL(&context, items) -// yaml_free(tag_copy) -// -// return 0 -//} -// -///* -// * Add a mapping node to a document. -// */ -// -//YAML_DECLARE(int) -//yaml_document_add_mapping(document *yaml_document_t, -// tag *yaml_char_t, style yaml_mapping_style_t) -//{ -// struct { -// error yaml_error_type_t -// } context -// mark yaml_mark_t = { 0, 0, 0 } -// tag_copy *yaml_char_t = NULL -// struct { -// start *yaml_node_pair_t -// end *yaml_node_pair_t -// top *yaml_node_pair_t -// } pairs = { NULL, NULL, NULL } -// node yaml_node_t -// -// assert(document) // Non-NULL document object is expected. -// -// if (!tag) { -// tag = (yaml_char_t *)YAML_DEFAULT_MAPPING_TAG -// } -// -// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error -// tag_copy = yaml_strdup(tag) -// if (!tag_copy) goto error -// -// if (!STACK_INIT(&context, pairs, INITIAL_STACK_SIZE)) goto error -// -// MAPPING_NODE_INIT(node, tag_copy, pairs.start, pairs.end, -// style, mark, mark) -// if (!PUSH(&context, document.nodes, node)) goto error -// -// return document.nodes.top - document.nodes.start -// -//error: -// STACK_DEL(&context, pairs) -// yaml_free(tag_copy) -// -// return 0 -//} -// -///* -// * Append an item to a sequence node. -// */ -// -//YAML_DECLARE(int) -//yaml_document_append_sequence_item(document *yaml_document_t, -// sequence int, item int) -//{ -// struct { -// error yaml_error_type_t -// } context -// -// assert(document) // Non-NULL document is required. -// assert(sequence > 0 -// && document.nodes.start + sequence <= document.nodes.top) -// // Valid sequence id is required. -// assert(document.nodes.start[sequence-1].type == YAML_SEQUENCE_NODE) -// // A sequence node is required. -// assert(item > 0 && document.nodes.start + item <= document.nodes.top) -// // Valid item id is required. -// -// if (!PUSH(&context, -// document.nodes.start[sequence-1].data.sequence.items, item)) -// return 0 -// -// return 1 -//} -// -///* -// * Append a pair of a key and a value to a mapping node. -// */ -// -//YAML_DECLARE(int) -//yaml_document_append_mapping_pair(document *yaml_document_t, -// mapping int, key int, value int) -//{ -// struct { -// error yaml_error_type_t -// } context -// -// pair yaml_node_pair_t -// -// assert(document) // Non-NULL document is required. -// assert(mapping > 0 -// && document.nodes.start + mapping <= document.nodes.top) -// // Valid mapping id is required. -// assert(document.nodes.start[mapping-1].type == YAML_MAPPING_NODE) -// // A mapping node is required. -// assert(key > 0 && document.nodes.start + key <= document.nodes.top) -// // Valid key id is required. -// assert(value > 0 && document.nodes.start + value <= document.nodes.top) -// // Valid value id is required. -// -// pair.key = key -// pair.value = value -// -// if (!PUSH(&context, -// document.nodes.start[mapping-1].data.mapping.pairs, pair)) -// return 0 -// -// return 1 -//} -// -// diff --git a/src/vendor/sigs.k8s.io/kustomize/kyaml/internal/forked/github.com/go-yaml/yaml/decode.go b/src/vendor/sigs.k8s.io/kustomize/kyaml/internal/forked/github.com/go-yaml/yaml/decode.go deleted file mode 100644 index df36e3a30..000000000 --- a/src/vendor/sigs.k8s.io/kustomize/kyaml/internal/forked/github.com/go-yaml/yaml/decode.go +++ /dev/null @@ -1,950 +0,0 @@ -// -// Copyright (c) 2011-2019 Canonical Ltd -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package yaml - -import ( - "encoding" - "encoding/base64" - "fmt" - "io" - "math" - "reflect" - "strconv" - "time" -) - -// ---------------------------------------------------------------------------- -// Parser, produces a node tree out of a libyaml event stream. - -type parser struct { - parser yaml_parser_t - event yaml_event_t - doc *Node - anchors map[string]*Node - doneInit bool - textless bool -} - -func newParser(b []byte) *parser { - p := parser{} - if !yaml_parser_initialize(&p.parser) { - panic("failed to initialize YAML emitter") - } - if len(b) == 0 { - b = []byte{'\n'} - } - yaml_parser_set_input_string(&p.parser, b) - return &p -} - -func newParserFromReader(r io.Reader) *parser { - p := parser{} - if !yaml_parser_initialize(&p.parser) { - panic("failed to initialize YAML emitter") - } - yaml_parser_set_input_reader(&p.parser, r) - return &p -} - -func (p *parser) init() { - if p.doneInit { - return - } - p.anchors = make(map[string]*Node) - p.expect(yaml_STREAM_START_EVENT) - p.doneInit = true -} - -func (p *parser) destroy() { - if p.event.typ != yaml_NO_EVENT { - yaml_event_delete(&p.event) - } - yaml_parser_delete(&p.parser) -} - -// expect consumes an event from the event stream and -// checks that it's of the expected type. -func (p *parser) expect(e yaml_event_type_t) { - if p.event.typ == yaml_NO_EVENT { - if !yaml_parser_parse(&p.parser, &p.event) { - p.fail() - } - } - if p.event.typ == yaml_STREAM_END_EVENT { - failf("attempted to go past the end of stream; corrupted value?") - } - if p.event.typ != e { - p.parser.problem = fmt.Sprintf("expected %s event but got %s", e, p.event.typ) - p.fail() - } - yaml_event_delete(&p.event) - p.event.typ = yaml_NO_EVENT -} - -// peek peeks at the next event in the event stream, -// puts the results into p.event and returns the event type. -func (p *parser) peek() yaml_event_type_t { - if p.event.typ != yaml_NO_EVENT { - return p.event.typ - } - if !yaml_parser_parse(&p.parser, &p.event) { - p.fail() - } - return p.event.typ -} - -func (p *parser) fail() { - var where string - var line int - if p.parser.context_mark.line != 0 { - line = p.parser.context_mark.line - // Scanner errors don't iterate line before returning error - if p.parser.error == yaml_SCANNER_ERROR { - line++ - } - } else if p.parser.problem_mark.line != 0 { - line = p.parser.problem_mark.line - // Scanner errors don't iterate line before returning error - if p.parser.error == yaml_SCANNER_ERROR { - line++ - } - } - if line != 0 { - where = "line " + strconv.Itoa(line) + ": " - } - var msg string - if len(p.parser.problem) > 0 { - msg = p.parser.problem - } else { - msg = "unknown problem parsing YAML content" - } - failf("%s%s", where, msg) -} - -func (p *parser) anchor(n *Node, anchor []byte) { - if anchor != nil { - n.Anchor = string(anchor) - p.anchors[n.Anchor] = n - } -} - -func (p *parser) parse() *Node { - p.init() - switch p.peek() { - case yaml_SCALAR_EVENT: - return p.scalar() - case yaml_ALIAS_EVENT: - return p.alias() - case yaml_MAPPING_START_EVENT: - return p.mapping() - case yaml_SEQUENCE_START_EVENT: - return p.sequence() - case yaml_DOCUMENT_START_EVENT: - return p.document() - case yaml_STREAM_END_EVENT: - // Happens when attempting to decode an empty buffer. - return nil - case yaml_TAIL_COMMENT_EVENT: - panic("internal error: unexpected tail comment event (please report)") - default: - panic("internal error: attempted to parse unknown event (please report): " + p.event.typ.String()) - } -} - -func (p *parser) node(kind Kind, defaultTag, tag, value string) *Node { - var style Style - if tag != "" && tag != "!" { - tag = shortTag(tag) - style = TaggedStyle - } else if defaultTag != "" { - tag = defaultTag - } else if kind == ScalarNode { - tag, _ = resolve("", value) - } - n := &Node{ - Kind: kind, - Tag: tag, - Value: value, - Style: style, - } - if !p.textless { - n.Line = p.event.start_mark.line + 1 - n.Column = p.event.start_mark.column + 1 - n.HeadComment = string(p.event.head_comment) - n.LineComment = string(p.event.line_comment) - n.FootComment = string(p.event.foot_comment) - } - return n -} - -func (p *parser) parseChild(parent *Node) *Node { - child := p.parse() - parent.Content = append(parent.Content, child) - return child -} - -func (p *parser) document() *Node { - n := p.node(DocumentNode, "", "", "") - p.doc = n - p.expect(yaml_DOCUMENT_START_EVENT) - p.parseChild(n) - if p.peek() == yaml_DOCUMENT_END_EVENT { - n.FootComment = string(p.event.foot_comment) - } - p.expect(yaml_DOCUMENT_END_EVENT) - return n -} - -func (p *parser) alias() *Node { - n := p.node(AliasNode, "", "", string(p.event.anchor)) - n.Alias = p.anchors[n.Value] - if n.Alias == nil { - failf("unknown anchor '%s' referenced", n.Value) - } - p.expect(yaml_ALIAS_EVENT) - return n -} - -func (p *parser) scalar() *Node { - var parsedStyle = p.event.scalar_style() - var nodeStyle Style - switch { - case parsedStyle&yaml_DOUBLE_QUOTED_SCALAR_STYLE != 0: - nodeStyle = DoubleQuotedStyle - case parsedStyle&yaml_SINGLE_QUOTED_SCALAR_STYLE != 0: - nodeStyle = SingleQuotedStyle - case parsedStyle&yaml_LITERAL_SCALAR_STYLE != 0: - nodeStyle = LiteralStyle - case parsedStyle&yaml_FOLDED_SCALAR_STYLE != 0: - nodeStyle = FoldedStyle - } - var nodeValue = string(p.event.value) - var nodeTag = string(p.event.tag) - var defaultTag string - if nodeStyle == 0 { - if nodeValue == "<<" { - defaultTag = mergeTag - } - } else { - defaultTag = strTag - } - n := p.node(ScalarNode, defaultTag, nodeTag, nodeValue) - n.Style |= nodeStyle - p.anchor(n, p.event.anchor) - p.expect(yaml_SCALAR_EVENT) - return n -} - -func (p *parser) sequence() *Node { - n := p.node(SequenceNode, seqTag, string(p.event.tag), "") - if p.event.sequence_style()&yaml_FLOW_SEQUENCE_STYLE != 0 { - n.Style |= FlowStyle - } - p.anchor(n, p.event.anchor) - p.expect(yaml_SEQUENCE_START_EVENT) - for p.peek() != yaml_SEQUENCE_END_EVENT { - p.parseChild(n) - } - n.LineComment = string(p.event.line_comment) - n.FootComment = string(p.event.foot_comment) - p.expect(yaml_SEQUENCE_END_EVENT) - return n -} - -func (p *parser) mapping() *Node { - n := p.node(MappingNode, mapTag, string(p.event.tag), "") - block := true - if p.event.mapping_style()&yaml_FLOW_MAPPING_STYLE != 0 { - block = false - n.Style |= FlowStyle - } - p.anchor(n, p.event.anchor) - p.expect(yaml_MAPPING_START_EVENT) - for p.peek() != yaml_MAPPING_END_EVENT { - k := p.parseChild(n) - if block && k.FootComment != "" { - // Must be a foot comment for the prior value when being dedented. - if len(n.Content) > 2 { - n.Content[len(n.Content)-3].FootComment = k.FootComment - k.FootComment = "" - } - } - v := p.parseChild(n) - if k.FootComment == "" && v.FootComment != "" { - k.FootComment = v.FootComment - v.FootComment = "" - } - if p.peek() == yaml_TAIL_COMMENT_EVENT { - if k.FootComment == "" { - k.FootComment = string(p.event.foot_comment) - } - p.expect(yaml_TAIL_COMMENT_EVENT) - } - } - n.LineComment = string(p.event.line_comment) - n.FootComment = string(p.event.foot_comment) - if n.Style&FlowStyle == 0 && n.FootComment != "" && len(n.Content) > 1 { - n.Content[len(n.Content)-2].FootComment = n.FootComment - n.FootComment = "" - } - p.expect(yaml_MAPPING_END_EVENT) - return n -} - -// ---------------------------------------------------------------------------- -// Decoder, unmarshals a node into a provided value. - -type decoder struct { - doc *Node - aliases map[*Node]bool - terrors []string - - stringMapType reflect.Type - generalMapType reflect.Type - - knownFields bool - uniqueKeys bool - decodeCount int - aliasCount int - aliasDepth int -} - -var ( - nodeType = reflect.TypeOf(Node{}) - durationType = reflect.TypeOf(time.Duration(0)) - stringMapType = reflect.TypeOf(map[string]interface{}{}) - generalMapType = reflect.TypeOf(map[interface{}]interface{}{}) - ifaceType = generalMapType.Elem() - timeType = reflect.TypeOf(time.Time{}) - ptrTimeType = reflect.TypeOf(&time.Time{}) -) - -func newDecoder() *decoder { - d := &decoder{ - stringMapType: stringMapType, - generalMapType: generalMapType, - uniqueKeys: true, - } - d.aliases = make(map[*Node]bool) - return d -} - -func (d *decoder) terror(n *Node, tag string, out reflect.Value) { - if n.Tag != "" { - tag = n.Tag - } - value := n.Value - if tag != seqTag && tag != mapTag { - if len(value) > 10 { - value = " `" + value[:7] + "...`" - } else { - value = " `" + value + "`" - } - } - d.terrors = append(d.terrors, fmt.Sprintf("line %d: cannot unmarshal %s%s into %s", n.Line, shortTag(tag), value, out.Type())) -} - -func (d *decoder) callUnmarshaler(n *Node, u Unmarshaler) (good bool) { - err := u.UnmarshalYAML(n) - if e, ok := err.(*TypeError); ok { - d.terrors = append(d.terrors, e.Errors...) - return false - } - if err != nil { - fail(err) - } - return true -} - -func (d *decoder) callObsoleteUnmarshaler(n *Node, u obsoleteUnmarshaler) (good bool) { - terrlen := len(d.terrors) - err := u.UnmarshalYAML(func(v interface{}) (err error) { - defer handleErr(&err) - d.unmarshal(n, reflect.ValueOf(v)) - if len(d.terrors) > terrlen { - issues := d.terrors[terrlen:] - d.terrors = d.terrors[:terrlen] - return &TypeError{issues} - } - return nil - }) - if e, ok := err.(*TypeError); ok { - d.terrors = append(d.terrors, e.Errors...) - return false - } - if err != nil { - fail(err) - } - return true -} - -// d.prepare initializes and dereferences pointers and calls UnmarshalYAML -// if a value is found to implement it. -// It returns the initialized and dereferenced out value, whether -// unmarshalling was already done by UnmarshalYAML, and if so whether -// its types unmarshalled appropriately. -// -// If n holds a null value, prepare returns before doing anything. -func (d *decoder) prepare(n *Node, out reflect.Value) (newout reflect.Value, unmarshaled, good bool) { - if n.ShortTag() == nullTag { - return out, false, false - } - again := true - for again { - again = false - if out.Kind() == reflect.Ptr { - if out.IsNil() { - out.Set(reflect.New(out.Type().Elem())) - } - out = out.Elem() - again = true - } - if out.CanAddr() { - outi := out.Addr().Interface() - if u, ok := outi.(Unmarshaler); ok { - good = d.callUnmarshaler(n, u) - return out, true, good - } - if u, ok := outi.(obsoleteUnmarshaler); ok { - good = d.callObsoleteUnmarshaler(n, u) - return out, true, good - } - } - } - return out, false, false -} - -func (d *decoder) fieldByIndex(n *Node, v reflect.Value, index []int) (field reflect.Value) { - if n.ShortTag() == nullTag { - return reflect.Value{} - } - for _, num := range index { - for { - if v.Kind() == reflect.Ptr { - if v.IsNil() { - v.Set(reflect.New(v.Type().Elem())) - } - v = v.Elem() - continue - } - break - } - v = v.Field(num) - } - return v -} - -const ( - // 400,000 decode operations is ~500kb of dense object declarations, or - // ~5kb of dense object declarations with 10000% alias expansion - alias_ratio_range_low = 400000 - - // 4,000,000 decode operations is ~5MB of dense object declarations, or - // ~4.5MB of dense object declarations with 10% alias expansion - alias_ratio_range_high = 4000000 - - // alias_ratio_range is the range over which we scale allowed alias ratios - alias_ratio_range = float64(alias_ratio_range_high - alias_ratio_range_low) -) - -func allowedAliasRatio(decodeCount int) float64 { - switch { - case decodeCount <= alias_ratio_range_low: - // allow 99% to come from alias expansion for small-to-medium documents - return 0.99 - case decodeCount >= alias_ratio_range_high: - // allow 10% to come from alias expansion for very large documents - return 0.10 - default: - // scale smoothly from 99% down to 10% over the range. - // this maps to 396,000 - 400,000 allowed alias-driven decodes over the range. - // 400,000 decode operations is ~100MB of allocations in worst-case scenarios (single-item maps). - return 0.99 - 0.89*(float64(decodeCount-alias_ratio_range_low)/alias_ratio_range) - } -} - -func (d *decoder) unmarshal(n *Node, out reflect.Value) (good bool) { - d.decodeCount++ - if d.aliasDepth > 0 { - d.aliasCount++ - } - if d.aliasCount > 100 && d.decodeCount > 1000 && float64(d.aliasCount)/float64(d.decodeCount) > allowedAliasRatio(d.decodeCount) { - failf("document contains excessive aliasing") - } - if out.Type() == nodeType { - out.Set(reflect.ValueOf(n).Elem()) - return true - } - switch n.Kind { - case DocumentNode: - return d.document(n, out) - case AliasNode: - return d.alias(n, out) - } - out, unmarshaled, good := d.prepare(n, out) - if unmarshaled { - return good - } - switch n.Kind { - case ScalarNode: - good = d.scalar(n, out) - case MappingNode: - good = d.mapping(n, out) - case SequenceNode: - good = d.sequence(n, out) - case 0: - if n.IsZero() { - return d.null(out) - } - fallthrough - default: - failf("cannot decode node with unknown kind %d", n.Kind) - } - return good -} - -func (d *decoder) document(n *Node, out reflect.Value) (good bool) { - if len(n.Content) == 1 { - d.doc = n - d.unmarshal(n.Content[0], out) - return true - } - return false -} - -func (d *decoder) alias(n *Node, out reflect.Value) (good bool) { - if d.aliases[n] { - // TODO this could actually be allowed in some circumstances. - failf("anchor '%s' value contains itself", n.Value) - } - d.aliases[n] = true - d.aliasDepth++ - good = d.unmarshal(n.Alias, out) - d.aliasDepth-- - delete(d.aliases, n) - return good -} - -var zeroValue reflect.Value - -func resetMap(out reflect.Value) { - for _, k := range out.MapKeys() { - out.SetMapIndex(k, zeroValue) - } -} - -func (d *decoder) null(out reflect.Value) bool { - if out.CanAddr() { - switch out.Kind() { - case reflect.Interface, reflect.Ptr, reflect.Map, reflect.Slice: - out.Set(reflect.Zero(out.Type())) - return true - } - } - return false -} - -func (d *decoder) scalar(n *Node, out reflect.Value) bool { - var tag string - var resolved interface{} - if n.indicatedString() { - tag = strTag - resolved = n.Value - } else { - tag, resolved = resolve(n.Tag, n.Value) - if tag == binaryTag { - data, err := base64.StdEncoding.DecodeString(resolved.(string)) - if err != nil { - failf("!!binary value contains invalid base64 data") - } - resolved = string(data) - } - } - if resolved == nil { - return d.null(out) - } - if resolvedv := reflect.ValueOf(resolved); out.Type() == resolvedv.Type() { - // We've resolved to exactly the type we want, so use that. - out.Set(resolvedv) - return true - } - // Perhaps we can use the value as a TextUnmarshaler to - // set its value. - if out.CanAddr() { - u, ok := out.Addr().Interface().(encoding.TextUnmarshaler) - if ok { - var text []byte - if tag == binaryTag { - text = []byte(resolved.(string)) - } else { - // We let any value be unmarshaled into TextUnmarshaler. - // That might be more lax than we'd like, but the - // TextUnmarshaler itself should bowl out any dubious values. - text = []byte(n.Value) - } - err := u.UnmarshalText(text) - if err != nil { - fail(err) - } - return true - } - } - switch out.Kind() { - case reflect.String: - if tag == binaryTag { - out.SetString(resolved.(string)) - return true - } - out.SetString(n.Value) - return true - case reflect.Interface: - out.Set(reflect.ValueOf(resolved)) - return true - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - // This used to work in v2, but it's very unfriendly. - isDuration := out.Type() == durationType - - switch resolved := resolved.(type) { - case int: - if !isDuration && !out.OverflowInt(int64(resolved)) { - out.SetInt(int64(resolved)) - return true - } - case int64: - if !isDuration && !out.OverflowInt(resolved) { - out.SetInt(resolved) - return true - } - case uint64: - if !isDuration && resolved <= math.MaxInt64 && !out.OverflowInt(int64(resolved)) { - out.SetInt(int64(resolved)) - return true - } - case float64: - if !isDuration && resolved <= math.MaxInt64 && !out.OverflowInt(int64(resolved)) { - out.SetInt(int64(resolved)) - return true - } - case string: - if out.Type() == durationType { - d, err := time.ParseDuration(resolved) - if err == nil { - out.SetInt(int64(d)) - return true - } - } - } - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - switch resolved := resolved.(type) { - case int: - if resolved >= 0 && !out.OverflowUint(uint64(resolved)) { - out.SetUint(uint64(resolved)) - return true - } - case int64: - if resolved >= 0 && !out.OverflowUint(uint64(resolved)) { - out.SetUint(uint64(resolved)) - return true - } - case uint64: - if !out.OverflowUint(uint64(resolved)) { - out.SetUint(uint64(resolved)) - return true - } - case float64: - if resolved <= math.MaxUint64 && !out.OverflowUint(uint64(resolved)) { - out.SetUint(uint64(resolved)) - return true - } - } - case reflect.Bool: - switch resolved := resolved.(type) { - case bool: - out.SetBool(resolved) - return true - case string: - // This offers some compatibility with the 1.1 spec (https://yaml.org/type/bool.html). - // It only works if explicitly attempting to unmarshal into a typed bool value. - switch resolved { - case "y", "Y", "yes", "Yes", "YES", "on", "On", "ON": - out.SetBool(true) - return true - case "n", "N", "no", "No", "NO", "off", "Off", "OFF": - out.SetBool(false) - return true - } - } - case reflect.Float32, reflect.Float64: - switch resolved := resolved.(type) { - case int: - out.SetFloat(float64(resolved)) - return true - case int64: - out.SetFloat(float64(resolved)) - return true - case uint64: - out.SetFloat(float64(resolved)) - return true - case float64: - out.SetFloat(resolved) - return true - } - case reflect.Struct: - if resolvedv := reflect.ValueOf(resolved); out.Type() == resolvedv.Type() { - out.Set(resolvedv) - return true - } - case reflect.Ptr: - panic("yaml internal error: please report the issue") - } - d.terror(n, tag, out) - return false -} - -func settableValueOf(i interface{}) reflect.Value { - v := reflect.ValueOf(i) - sv := reflect.New(v.Type()).Elem() - sv.Set(v) - return sv -} - -func (d *decoder) sequence(n *Node, out reflect.Value) (good bool) { - l := len(n.Content) - - var iface reflect.Value - switch out.Kind() { - case reflect.Slice: - out.Set(reflect.MakeSlice(out.Type(), l, l)) - case reflect.Array: - if l != out.Len() { - failf("invalid array: want %d elements but got %d", out.Len(), l) - } - case reflect.Interface: - // No type hints. Will have to use a generic sequence. - iface = out - out = settableValueOf(make([]interface{}, l)) - default: - d.terror(n, seqTag, out) - return false - } - et := out.Type().Elem() - - j := 0 - for i := 0; i < l; i++ { - e := reflect.New(et).Elem() - if ok := d.unmarshal(n.Content[i], e); ok { - out.Index(j).Set(e) - j++ - } - } - if out.Kind() != reflect.Array { - out.Set(out.Slice(0, j)) - } - if iface.IsValid() { - iface.Set(out) - } - return true -} - -func (d *decoder) mapping(n *Node, out reflect.Value) (good bool) { - l := len(n.Content) - if d.uniqueKeys { - nerrs := len(d.terrors) - for i := 0; i < l; i += 2 { - ni := n.Content[i] - for j := i + 2; j < l; j += 2 { - nj := n.Content[j] - if ni.Kind == nj.Kind && ni.Value == nj.Value { - d.terrors = append(d.terrors, fmt.Sprintf("line %d: mapping key %#v already defined at line %d", nj.Line, nj.Value, ni.Line)) - } - } - } - if len(d.terrors) > nerrs { - return false - } - } - switch out.Kind() { - case reflect.Struct: - return d.mappingStruct(n, out) - case reflect.Map: - // okay - case reflect.Interface: - iface := out - if isStringMap(n) { - out = reflect.MakeMap(d.stringMapType) - } else { - out = reflect.MakeMap(d.generalMapType) - } - iface.Set(out) - default: - d.terror(n, mapTag, out) - return false - } - - outt := out.Type() - kt := outt.Key() - et := outt.Elem() - - stringMapType := d.stringMapType - generalMapType := d.generalMapType - if outt.Elem() == ifaceType { - if outt.Key().Kind() == reflect.String { - d.stringMapType = outt - } else if outt.Key() == ifaceType { - d.generalMapType = outt - } - } - - mapIsNew := false - if out.IsNil() { - out.Set(reflect.MakeMap(outt)) - mapIsNew = true - } - for i := 0; i < l; i += 2 { - if isMerge(n.Content[i]) { - d.merge(n.Content[i+1], out) - continue - } - k := reflect.New(kt).Elem() - if d.unmarshal(n.Content[i], k) { - kkind := k.Kind() - if kkind == reflect.Interface { - kkind = k.Elem().Kind() - } - if kkind == reflect.Map || kkind == reflect.Slice { - failf("invalid map key: %#v", k.Interface()) - } - e := reflect.New(et).Elem() - if d.unmarshal(n.Content[i+1], e) || n.Content[i+1].ShortTag() == nullTag && (mapIsNew || !out.MapIndex(k).IsValid()) { - out.SetMapIndex(k, e) - } - } - } - d.stringMapType = stringMapType - d.generalMapType = generalMapType - return true -} - -func isStringMap(n *Node) bool { - if n.Kind != MappingNode { - return false - } - l := len(n.Content) - for i := 0; i < l; i += 2 { - if n.Content[i].ShortTag() != strTag { - return false - } - } - return true -} - -func (d *decoder) mappingStruct(n *Node, out reflect.Value) (good bool) { - sinfo, err := getStructInfo(out.Type()) - if err != nil { - panic(err) - } - - var inlineMap reflect.Value - var elemType reflect.Type - if sinfo.InlineMap != -1 { - inlineMap = out.Field(sinfo.InlineMap) - inlineMap.Set(reflect.New(inlineMap.Type()).Elem()) - elemType = inlineMap.Type().Elem() - } - - for _, index := range sinfo.InlineUnmarshalers { - field := d.fieldByIndex(n, out, index) - d.prepare(n, field) - } - - var doneFields []bool - if d.uniqueKeys { - doneFields = make([]bool, len(sinfo.FieldsList)) - } - name := settableValueOf("") - l := len(n.Content) - for i := 0; i < l; i += 2 { - ni := n.Content[i] - if isMerge(ni) { - d.merge(n.Content[i+1], out) - continue - } - if !d.unmarshal(ni, name) { - continue - } - if info, ok := sinfo.FieldsMap[name.String()]; ok { - if d.uniqueKeys { - if doneFields[info.Id] { - d.terrors = append(d.terrors, fmt.Sprintf("line %d: field %s already set in type %s", ni.Line, name.String(), out.Type())) - continue - } - doneFields[info.Id] = true - } - var field reflect.Value - if info.Inline == nil { - field = out.Field(info.Num) - } else { - field = d.fieldByIndex(n, out, info.Inline) - } - d.unmarshal(n.Content[i+1], field) - } else if sinfo.InlineMap != -1 { - if inlineMap.IsNil() { - inlineMap.Set(reflect.MakeMap(inlineMap.Type())) - } - value := reflect.New(elemType).Elem() - d.unmarshal(n.Content[i+1], value) - inlineMap.SetMapIndex(name, value) - } else if d.knownFields { - d.terrors = append(d.terrors, fmt.Sprintf("line %d: field %s not found in type %s", ni.Line, name.String(), out.Type())) - } - } - return true -} - -func failWantMap() { - failf("map merge requires map or sequence of maps as the value") -} - -func (d *decoder) merge(n *Node, out reflect.Value) { - switch n.Kind { - case MappingNode: - d.unmarshal(n, out) - case AliasNode: - if n.Alias != nil && n.Alias.Kind != MappingNode { - failWantMap() - } - d.unmarshal(n, out) - case SequenceNode: - // Step backwards as earlier nodes take precedence. - for i := len(n.Content) - 1; i >= 0; i-- { - ni := n.Content[i] - if ni.Kind == AliasNode { - if ni.Alias != nil && ni.Alias.Kind != MappingNode { - failWantMap() - } - } else if ni.Kind != MappingNode { - failWantMap() - } - d.unmarshal(ni, out) - } - default: - failWantMap() - } -} - -func isMerge(n *Node) bool { - return n.Kind == ScalarNode && n.Value == "<<" && (n.Tag == "" || n.Tag == "!" || shortTag(n.Tag) == mergeTag) -} diff --git a/src/vendor/sigs.k8s.io/kustomize/kyaml/internal/forked/github.com/go-yaml/yaml/emitterc.go b/src/vendor/sigs.k8s.io/kustomize/kyaml/internal/forked/github.com/go-yaml/yaml/emitterc.go deleted file mode 100644 index f0f3d1867..000000000 --- a/src/vendor/sigs.k8s.io/kustomize/kyaml/internal/forked/github.com/go-yaml/yaml/emitterc.go +++ /dev/null @@ -1,2028 +0,0 @@ -// -// Copyright (c) 2011-2019 Canonical Ltd -// Copyright (c) 2006-2010 Kirill Simonov -// -// Permission is hereby granted, free of charge, to any person obtaining a copy of -// this software and associated documentation files (the "Software"), to deal in -// the Software without restriction, including without limitation the rights to -// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies -// of the Software, and to permit persons to whom the Software is furnished to do -// so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in all -// copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -// SOFTWARE. - -package yaml - -import ( - "bytes" - "fmt" -) - -// Flush the buffer if needed. -func flush(emitter *yaml_emitter_t) bool { - if emitter.buffer_pos+5 >= len(emitter.buffer) { - return yaml_emitter_flush(emitter) - } - return true -} - -// Put a character to the output buffer. -func put(emitter *yaml_emitter_t, value byte) bool { - if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) { - return false - } - emitter.buffer[emitter.buffer_pos] = value - emitter.buffer_pos++ - emitter.column++ - return true -} - -// Put a line break to the output buffer. -func put_break(emitter *yaml_emitter_t) bool { - if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) { - return false - } - switch emitter.line_break { - case yaml_CR_BREAK: - emitter.buffer[emitter.buffer_pos] = '\r' - emitter.buffer_pos += 1 - case yaml_LN_BREAK: - emitter.buffer[emitter.buffer_pos] = '\n' - emitter.buffer_pos += 1 - case yaml_CRLN_BREAK: - emitter.buffer[emitter.buffer_pos+0] = '\r' - emitter.buffer[emitter.buffer_pos+1] = '\n' - emitter.buffer_pos += 2 - default: - panic("unknown line break setting") - } - if emitter.column == 0 { - emitter.space_above = true - } - emitter.column = 0 - emitter.line++ - // [Go] Do this here and below and drop from everywhere else (see commented lines). - emitter.indention = true - return true -} - -// Copy a character from a string into buffer. -func write(emitter *yaml_emitter_t, s []byte, i *int) bool { - if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) { - return false - } - p := emitter.buffer_pos - w := width(s[*i]) - switch w { - case 4: - emitter.buffer[p+3] = s[*i+3] - fallthrough - case 3: - emitter.buffer[p+2] = s[*i+2] - fallthrough - case 2: - emitter.buffer[p+1] = s[*i+1] - fallthrough - case 1: - emitter.buffer[p+0] = s[*i+0] - default: - panic("unknown character width") - } - emitter.column++ - emitter.buffer_pos += w - *i += w - return true -} - -// Write a whole string into buffer. -func write_all(emitter *yaml_emitter_t, s []byte) bool { - for i := 0; i < len(s); { - if !write(emitter, s, &i) { - return false - } - } - return true -} - -// Copy a line break character from a string into buffer. -func write_break(emitter *yaml_emitter_t, s []byte, i *int) bool { - if s[*i] == '\n' { - if !put_break(emitter) { - return false - } - *i++ - } else { - if !write(emitter, s, i) { - return false - } - if emitter.column == 0 { - emitter.space_above = true - } - emitter.column = 0 - emitter.line++ - // [Go] Do this here and above and drop from everywhere else (see commented lines). - emitter.indention = true - } - return true -} - -// Set an emitter error and return false. -func yaml_emitter_set_emitter_error(emitter *yaml_emitter_t, problem string) bool { - emitter.error = yaml_EMITTER_ERROR - emitter.problem = problem - return false -} - -// Emit an event. -func yaml_emitter_emit(emitter *yaml_emitter_t, event *yaml_event_t) bool { - emitter.events = append(emitter.events, *event) - for !yaml_emitter_need_more_events(emitter) { - event := &emitter.events[emitter.events_head] - if !yaml_emitter_analyze_event(emitter, event) { - return false - } - if !yaml_emitter_state_machine(emitter, event) { - return false - } - yaml_event_delete(event) - emitter.events_head++ - } - return true -} - -// Check if we need to accumulate more events before emitting. -// -// We accumulate extra -// - 1 event for DOCUMENT-START -// - 2 events for SEQUENCE-START -// - 3 events for MAPPING-START -// -func yaml_emitter_need_more_events(emitter *yaml_emitter_t) bool { - if emitter.events_head == len(emitter.events) { - return true - } - var accumulate int - switch emitter.events[emitter.events_head].typ { - case yaml_DOCUMENT_START_EVENT: - accumulate = 1 - break - case yaml_SEQUENCE_START_EVENT: - accumulate = 2 - break - case yaml_MAPPING_START_EVENT: - accumulate = 3 - break - default: - return false - } - if len(emitter.events)-emitter.events_head > accumulate { - return false - } - var level int - for i := emitter.events_head; i < len(emitter.events); i++ { - switch emitter.events[i].typ { - case yaml_STREAM_START_EVENT, yaml_DOCUMENT_START_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT: - level++ - case yaml_STREAM_END_EVENT, yaml_DOCUMENT_END_EVENT, yaml_SEQUENCE_END_EVENT, yaml_MAPPING_END_EVENT: - level-- - } - if level == 0 { - return false - } - } - return true -} - -// Append a directive to the directives stack. -func yaml_emitter_append_tag_directive(emitter *yaml_emitter_t, value *yaml_tag_directive_t, allow_duplicates bool) bool { - for i := 0; i < len(emitter.tag_directives); i++ { - if bytes.Equal(value.handle, emitter.tag_directives[i].handle) { - if allow_duplicates { - return true - } - return yaml_emitter_set_emitter_error(emitter, "duplicate %TAG directive") - } - } - - // [Go] Do we actually need to copy this given garbage collection - // and the lack of deallocating destructors? - tag_copy := yaml_tag_directive_t{ - handle: make([]byte, len(value.handle)), - prefix: make([]byte, len(value.prefix)), - } - copy(tag_copy.handle, value.handle) - copy(tag_copy.prefix, value.prefix) - emitter.tag_directives = append(emitter.tag_directives, tag_copy) - return true -} - -// Increase the indentation level. -func yaml_emitter_increase_indent(emitter *yaml_emitter_t, flow, indentless bool, compact_seq bool) bool { - emitter.indents = append(emitter.indents, emitter.indent) - if emitter.indent < 0 { - if flow { - emitter.indent = emitter.best_indent - } else { - emitter.indent = 0 - } - } else if !indentless { - // [Go] This was changed so that indentations are more regular. - if emitter.states[len(emitter.states)-1] == yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE { - // The first indent inside a sequence will just skip the "- " indicator. - emitter.indent += 2 - } else { - // Everything else aligns to the chosen indentation. - emitter.indent = emitter.best_indent*((emitter.indent+emitter.best_indent)/emitter.best_indent) - } - if compact_seq { - emitter.indent = emitter.indent - 2 - } - } - return true -} - -// State dispatcher. -func yaml_emitter_state_machine(emitter *yaml_emitter_t, event *yaml_event_t) bool { - switch emitter.state { - default: - case yaml_EMIT_STREAM_START_STATE: - return yaml_emitter_emit_stream_start(emitter, event) - - case yaml_EMIT_FIRST_DOCUMENT_START_STATE: - return yaml_emitter_emit_document_start(emitter, event, true) - - case yaml_EMIT_DOCUMENT_START_STATE: - return yaml_emitter_emit_document_start(emitter, event, false) - - case yaml_EMIT_DOCUMENT_CONTENT_STATE: - return yaml_emitter_emit_document_content(emitter, event) - - case yaml_EMIT_DOCUMENT_END_STATE: - return yaml_emitter_emit_document_end(emitter, event) - - case yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE: - return yaml_emitter_emit_flow_sequence_item(emitter, event, true, false) - - case yaml_EMIT_FLOW_SEQUENCE_TRAIL_ITEM_STATE: - return yaml_emitter_emit_flow_sequence_item(emitter, event, false, true) - - case yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE: - return yaml_emitter_emit_flow_sequence_item(emitter, event, false, false) - - case yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE: - return yaml_emitter_emit_flow_mapping_key(emitter, event, true, false) - - case yaml_EMIT_FLOW_MAPPING_TRAIL_KEY_STATE: - return yaml_emitter_emit_flow_mapping_key(emitter, event, false, true) - - case yaml_EMIT_FLOW_MAPPING_KEY_STATE: - return yaml_emitter_emit_flow_mapping_key(emitter, event, false, false) - - case yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE: - return yaml_emitter_emit_flow_mapping_value(emitter, event, true) - - case yaml_EMIT_FLOW_MAPPING_VALUE_STATE: - return yaml_emitter_emit_flow_mapping_value(emitter, event, false) - - case yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE: - return yaml_emitter_emit_block_sequence_item(emitter, event, true) - - case yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE: - return yaml_emitter_emit_block_sequence_item(emitter, event, false) - - case yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE: - return yaml_emitter_emit_block_mapping_key(emitter, event, true) - - case yaml_EMIT_BLOCK_MAPPING_KEY_STATE: - return yaml_emitter_emit_block_mapping_key(emitter, event, false) - - case yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE: - return yaml_emitter_emit_block_mapping_value(emitter, event, true) - - case yaml_EMIT_BLOCK_MAPPING_VALUE_STATE: - return yaml_emitter_emit_block_mapping_value(emitter, event, false) - - case yaml_EMIT_END_STATE: - return yaml_emitter_set_emitter_error(emitter, "expected nothing after STREAM-END") - } - panic("invalid emitter state") -} - -// Expect STREAM-START. -func yaml_emitter_emit_stream_start(emitter *yaml_emitter_t, event *yaml_event_t) bool { - if event.typ != yaml_STREAM_START_EVENT { - return yaml_emitter_set_emitter_error(emitter, "expected STREAM-START") - } - if emitter.encoding == yaml_ANY_ENCODING { - emitter.encoding = event.encoding - if emitter.encoding == yaml_ANY_ENCODING { - emitter.encoding = yaml_UTF8_ENCODING - } - } - if emitter.best_indent < 2 || emitter.best_indent > 9 { - emitter.best_indent = 2 - } - if emitter.best_width >= 0 && emitter.best_width <= emitter.best_indent*2 { - emitter.best_width = 80 - } - if emitter.best_width < 0 { - emitter.best_width = 1<<31 - 1 - } - if emitter.line_break == yaml_ANY_BREAK { - emitter.line_break = yaml_LN_BREAK - } - - emitter.indent = -1 - emitter.line = 0 - emitter.column = 0 - emitter.whitespace = true - emitter.indention = true - emitter.space_above = true - emitter.foot_indent = -1 - - if emitter.encoding != yaml_UTF8_ENCODING { - if !yaml_emitter_write_bom(emitter) { - return false - } - } - emitter.state = yaml_EMIT_FIRST_DOCUMENT_START_STATE - return true -} - -// Expect DOCUMENT-START or STREAM-END. -func yaml_emitter_emit_document_start(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { - - if event.typ == yaml_DOCUMENT_START_EVENT { - - if event.version_directive != nil { - if !yaml_emitter_analyze_version_directive(emitter, event.version_directive) { - return false - } - } - - for i := 0; i < len(event.tag_directives); i++ { - tag_directive := &event.tag_directives[i] - if !yaml_emitter_analyze_tag_directive(emitter, tag_directive) { - return false - } - if !yaml_emitter_append_tag_directive(emitter, tag_directive, false) { - return false - } - } - - for i := 0; i < len(default_tag_directives); i++ { - tag_directive := &default_tag_directives[i] - if !yaml_emitter_append_tag_directive(emitter, tag_directive, true) { - return false - } - } - - implicit := event.implicit - if !first || emitter.canonical { - implicit = false - } - - if emitter.open_ended && (event.version_directive != nil || len(event.tag_directives) > 0) { - if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) { - return false - } - if !yaml_emitter_write_indent(emitter) { - return false - } - } - - if event.version_directive != nil { - implicit = false - if !yaml_emitter_write_indicator(emitter, []byte("%YAML"), true, false, false) { - return false - } - if !yaml_emitter_write_indicator(emitter, []byte("1.1"), true, false, false) { - return false - } - if !yaml_emitter_write_indent(emitter) { - return false - } - } - - if len(event.tag_directives) > 0 { - implicit = false - for i := 0; i < len(event.tag_directives); i++ { - tag_directive := &event.tag_directives[i] - if !yaml_emitter_write_indicator(emitter, []byte("%TAG"), true, false, false) { - return false - } - if !yaml_emitter_write_tag_handle(emitter, tag_directive.handle) { - return false - } - if !yaml_emitter_write_tag_content(emitter, tag_directive.prefix, true) { - return false - } - if !yaml_emitter_write_indent(emitter) { - return false - } - } - } - - if yaml_emitter_check_empty_document(emitter) { - implicit = false - } - if !implicit { - if !yaml_emitter_write_indent(emitter) { - return false - } - if !yaml_emitter_write_indicator(emitter, []byte("---"), true, false, false) { - return false - } - if emitter.canonical || true { - if !yaml_emitter_write_indent(emitter) { - return false - } - } - } - - if len(emitter.head_comment) > 0 { - if !yaml_emitter_process_head_comment(emitter) { - return false - } - if !put_break(emitter) { - return false - } - } - - emitter.state = yaml_EMIT_DOCUMENT_CONTENT_STATE - return true - } - - if event.typ == yaml_STREAM_END_EVENT { - if emitter.open_ended { - if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) { - return false - } - if !yaml_emitter_write_indent(emitter) { - return false - } - } - if !yaml_emitter_flush(emitter) { - return false - } - emitter.state = yaml_EMIT_END_STATE - return true - } - - return yaml_emitter_set_emitter_error(emitter, "expected DOCUMENT-START or STREAM-END") -} - -// Expect the root node. -func yaml_emitter_emit_document_content(emitter *yaml_emitter_t, event *yaml_event_t) bool { - emitter.states = append(emitter.states, yaml_EMIT_DOCUMENT_END_STATE) - - if !yaml_emitter_process_head_comment(emitter) { - return false - } - if !yaml_emitter_emit_node(emitter, event, true, false, false, false) { - return false - } - if !yaml_emitter_process_line_comment(emitter, false) { - return false - } - if !yaml_emitter_process_foot_comment(emitter) { - return false - } - return true -} - -// Expect DOCUMENT-END. -func yaml_emitter_emit_document_end(emitter *yaml_emitter_t, event *yaml_event_t) bool { - if event.typ != yaml_DOCUMENT_END_EVENT { - return yaml_emitter_set_emitter_error(emitter, "expected DOCUMENT-END") - } - // [Go] Force document foot separation. - emitter.foot_indent = 0 - if !yaml_emitter_process_foot_comment(emitter) { - return false - } - emitter.foot_indent = -1 - if !yaml_emitter_write_indent(emitter) { - return false - } - if !event.implicit { - // [Go] Allocate the slice elsewhere. - if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) { - return false - } - if !yaml_emitter_write_indent(emitter) { - return false - } - } - if !yaml_emitter_flush(emitter) { - return false - } - emitter.state = yaml_EMIT_DOCUMENT_START_STATE - emitter.tag_directives = emitter.tag_directives[:0] - return true -} - -// Expect a flow item node. -func yaml_emitter_emit_flow_sequence_item(emitter *yaml_emitter_t, event *yaml_event_t, first, trail bool) bool { - if first { - if !yaml_emitter_write_indicator(emitter, []byte{'['}, true, true, false) { - return false - } - if !yaml_emitter_increase_indent(emitter, true, false, false) { - return false - } - emitter.flow_level++ - } - - if event.typ == yaml_SEQUENCE_END_EVENT { - if emitter.canonical && !first && !trail { - if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { - return false - } - } - emitter.flow_level-- - emitter.indent = emitter.indents[len(emitter.indents)-1] - emitter.indents = emitter.indents[:len(emitter.indents)-1] - if emitter.column == 0 || emitter.canonical && !first { - if !yaml_emitter_write_indent(emitter) { - return false - } - } - if !yaml_emitter_write_indicator(emitter, []byte{']'}, false, false, false) { - return false - } - if !yaml_emitter_process_line_comment(emitter, false) { - return false - } - if !yaml_emitter_process_foot_comment(emitter) { - return false - } - emitter.state = emitter.states[len(emitter.states)-1] - emitter.states = emitter.states[:len(emitter.states)-1] - - return true - } - - if !first && !trail { - if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { - return false - } - } - - if !yaml_emitter_process_head_comment(emitter) { - return false - } - if emitter.column == 0 { - if !yaml_emitter_write_indent(emitter) { - return false - } - } - - if emitter.canonical || emitter.column > emitter.best_width { - if !yaml_emitter_write_indent(emitter) { - return false - } - } - if len(emitter.line_comment)+len(emitter.foot_comment)+len(emitter.tail_comment) > 0 { - emitter.states = append(emitter.states, yaml_EMIT_FLOW_SEQUENCE_TRAIL_ITEM_STATE) - } else { - emitter.states = append(emitter.states, yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE) - } - if !yaml_emitter_emit_node(emitter, event, false, true, false, false) { - return false - } - if len(emitter.line_comment)+len(emitter.foot_comment)+len(emitter.tail_comment) > 0 { - if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { - return false - } - } - if !yaml_emitter_process_line_comment(emitter, false) { - return false - } - if !yaml_emitter_process_foot_comment(emitter) { - return false - } - return true -} - -// Expect a flow key node. -func yaml_emitter_emit_flow_mapping_key(emitter *yaml_emitter_t, event *yaml_event_t, first, trail bool) bool { - if first { - if !yaml_emitter_write_indicator(emitter, []byte{'{'}, true, true, false) { - return false - } - if !yaml_emitter_increase_indent(emitter, true, false, false) { - return false - } - emitter.flow_level++ - } - - if event.typ == yaml_MAPPING_END_EVENT { - if (emitter.canonical || len(emitter.head_comment)+len(emitter.foot_comment)+len(emitter.tail_comment) > 0) && !first && !trail { - if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { - return false - } - } - if !yaml_emitter_process_head_comment(emitter) { - return false - } - emitter.flow_level-- - emitter.indent = emitter.indents[len(emitter.indents)-1] - emitter.indents = emitter.indents[:len(emitter.indents)-1] - if emitter.canonical && !first { - if !yaml_emitter_write_indent(emitter) { - return false - } - } - if !yaml_emitter_write_indicator(emitter, []byte{'}'}, false, false, false) { - return false - } - if !yaml_emitter_process_line_comment(emitter, false) { - return false - } - if !yaml_emitter_process_foot_comment(emitter) { - return false - } - emitter.state = emitter.states[len(emitter.states)-1] - emitter.states = emitter.states[:len(emitter.states)-1] - return true - } - - if !first && !trail { - if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { - return false - } - } - - if !yaml_emitter_process_head_comment(emitter) { - return false - } - - if emitter.column == 0 { - if !yaml_emitter_write_indent(emitter) { - return false - } - } - - if emitter.canonical || emitter.column > emitter.best_width { - if !yaml_emitter_write_indent(emitter) { - return false - } - } - - if !emitter.canonical && yaml_emitter_check_simple_key(emitter) { - emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE) - return yaml_emitter_emit_node(emitter, event, false, false, true, true) - } - if !yaml_emitter_write_indicator(emitter, []byte{'?'}, true, false, false) { - return false - } - emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_VALUE_STATE) - return yaml_emitter_emit_node(emitter, event, false, false, true, false) -} - -// Expect a flow value node. -func yaml_emitter_emit_flow_mapping_value(emitter *yaml_emitter_t, event *yaml_event_t, simple bool) bool { - if simple { - if !yaml_emitter_write_indicator(emitter, []byte{':'}, false, false, false) { - return false - } - } else { - if emitter.canonical || emitter.column > emitter.best_width { - if !yaml_emitter_write_indent(emitter) { - return false - } - } - if !yaml_emitter_write_indicator(emitter, []byte{':'}, true, false, false) { - return false - } - } - if len(emitter.line_comment)+len(emitter.foot_comment)+len(emitter.tail_comment) > 0 { - emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_TRAIL_KEY_STATE) - } else { - emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_KEY_STATE) - } - if !yaml_emitter_emit_node(emitter, event, false, false, true, false) { - return false - } - if len(emitter.line_comment)+len(emitter.foot_comment)+len(emitter.tail_comment) > 0 { - if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { - return false - } - } - if !yaml_emitter_process_line_comment(emitter, false) { - return false - } - if !yaml_emitter_process_foot_comment(emitter) { - return false - } - return true -} - -// Expect a block item node. -func yaml_emitter_emit_block_sequence_item(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { - if first { - seq := emitter.mapping_context && (emitter.column == 0 || !emitter.indention) && - emitter.compact_sequence_indent - if !yaml_emitter_increase_indent(emitter, false, false, seq){ - return false - } - } - if event.typ == yaml_SEQUENCE_END_EVENT { - emitter.indent = emitter.indents[len(emitter.indents)-1] - emitter.indents = emitter.indents[:len(emitter.indents)-1] - emitter.state = emitter.states[len(emitter.states)-1] - emitter.states = emitter.states[:len(emitter.states)-1] - return true - } - if !yaml_emitter_process_head_comment(emitter) { - return false - } - if !yaml_emitter_write_indent(emitter) { - return false - } - if !yaml_emitter_write_indicator(emitter, []byte{'-'}, true, false, true) { - return false - } - emitter.states = append(emitter.states, yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE) - if !yaml_emitter_emit_node(emitter, event, false, true, false, false) { - return false - } - if !yaml_emitter_process_line_comment(emitter, false) { - return false - } - if !yaml_emitter_process_foot_comment(emitter) { - return false - } - return true -} - -// Expect a block key node. -func yaml_emitter_emit_block_mapping_key(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { - if first { - if !yaml_emitter_increase_indent(emitter, false, false, false) { - return false - } - } - if !yaml_emitter_process_head_comment(emitter) { - return false - } - if event.typ == yaml_MAPPING_END_EVENT { - emitter.indent = emitter.indents[len(emitter.indents)-1] - emitter.indents = emitter.indents[:len(emitter.indents)-1] - emitter.state = emitter.states[len(emitter.states)-1] - emitter.states = emitter.states[:len(emitter.states)-1] - return true - } - if !yaml_emitter_write_indent(emitter) { - return false - } - if len(emitter.line_comment) > 0 { - // [Go] A line comment was provided for the key. That's unusual as the - // scanner associates line comments with the value. Either way, - // save the line comment and render it appropriately later. - emitter.key_line_comment = emitter.line_comment - emitter.line_comment = nil - } - if yaml_emitter_check_simple_key(emitter) { - emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE) - return yaml_emitter_emit_node(emitter, event, false, false, true, true) - } - if !yaml_emitter_write_indicator(emitter, []byte{'?'}, true, false, true) { - return false - } - emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_VALUE_STATE) - return yaml_emitter_emit_node(emitter, event, false, false, true, false) -} - -// Expect a block value node. -func yaml_emitter_emit_block_mapping_value(emitter *yaml_emitter_t, event *yaml_event_t, simple bool) bool { - if simple { - if !yaml_emitter_write_indicator(emitter, []byte{':'}, false, false, false) { - return false - } - } else { - if !yaml_emitter_write_indent(emitter) { - return false - } - if !yaml_emitter_write_indicator(emitter, []byte{':'}, true, false, true) { - return false - } - } - if len(emitter.key_line_comment) > 0 { - // [Go] Line comments are generally associated with the value, but when there's - // no value on the same line as a mapping key they end up attached to the - // key itself. - if event.typ == yaml_SCALAR_EVENT { - if len(emitter.line_comment) == 0 { - // A scalar is coming and it has no line comments by itself yet, - // so just let it handle the line comment as usual. If it has a - // line comment, we can't have both so the one from the key is lost. - emitter.line_comment = emitter.key_line_comment - emitter.key_line_comment = nil - } - } else if event.sequence_style() != yaml_FLOW_SEQUENCE_STYLE && (event.typ == yaml_MAPPING_START_EVENT || event.typ == yaml_SEQUENCE_START_EVENT) { - // An indented block follows, so write the comment right now. - emitter.line_comment, emitter.key_line_comment = emitter.key_line_comment, emitter.line_comment - if !yaml_emitter_process_line_comment(emitter, false) { - return false - } - emitter.line_comment, emitter.key_line_comment = emitter.key_line_comment, emitter.line_comment - } - } - emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_KEY_STATE) - if !yaml_emitter_emit_node(emitter, event, false, false, true, false) { - return false - } - if !yaml_emitter_process_line_comment(emitter, false) { - return false - } - if !yaml_emitter_process_foot_comment(emitter) { - return false - } - return true -} - -func yaml_emitter_silent_nil_event(emitter *yaml_emitter_t, event *yaml_event_t) bool { - return event.typ == yaml_SCALAR_EVENT && event.implicit && !emitter.canonical && len(emitter.scalar_data.value) == 0 -} - -// Expect a node. -func yaml_emitter_emit_node(emitter *yaml_emitter_t, event *yaml_event_t, - root bool, sequence bool, mapping bool, simple_key bool) bool { - - emitter.root_context = root - emitter.sequence_context = sequence - emitter.mapping_context = mapping - emitter.simple_key_context = simple_key - - switch event.typ { - case yaml_ALIAS_EVENT: - return yaml_emitter_emit_alias(emitter, event) - case yaml_SCALAR_EVENT: - return yaml_emitter_emit_scalar(emitter, event) - case yaml_SEQUENCE_START_EVENT: - return yaml_emitter_emit_sequence_start(emitter, event) - case yaml_MAPPING_START_EVENT: - return yaml_emitter_emit_mapping_start(emitter, event) - default: - return yaml_emitter_set_emitter_error(emitter, - fmt.Sprintf("expected SCALAR, SEQUENCE-START, MAPPING-START, or ALIAS, but got %v", event.typ)) - } -} - -// Expect ALIAS. -func yaml_emitter_emit_alias(emitter *yaml_emitter_t, event *yaml_event_t) bool { - if !yaml_emitter_process_anchor(emitter) { - return false - } - emitter.state = emitter.states[len(emitter.states)-1] - emitter.states = emitter.states[:len(emitter.states)-1] - return true -} - -// Expect SCALAR. -func yaml_emitter_emit_scalar(emitter *yaml_emitter_t, event *yaml_event_t) bool { - if !yaml_emitter_select_scalar_style(emitter, event) { - return false - } - if !yaml_emitter_process_anchor(emitter) { - return false - } - if !yaml_emitter_process_tag(emitter) { - return false - } - if !yaml_emitter_increase_indent(emitter, true, false, false) { - return false - } - if !yaml_emitter_process_scalar(emitter) { - return false - } - emitter.indent = emitter.indents[len(emitter.indents)-1] - emitter.indents = emitter.indents[:len(emitter.indents)-1] - emitter.state = emitter.states[len(emitter.states)-1] - emitter.states = emitter.states[:len(emitter.states)-1] - return true -} - -// Expect SEQUENCE-START. -func yaml_emitter_emit_sequence_start(emitter *yaml_emitter_t, event *yaml_event_t) bool { - if !yaml_emitter_process_anchor(emitter) { - return false - } - if !yaml_emitter_process_tag(emitter) { - return false - } - if emitter.flow_level > 0 || emitter.canonical || event.sequence_style() == yaml_FLOW_SEQUENCE_STYLE || - yaml_emitter_check_empty_sequence(emitter) { - emitter.state = yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE - } else { - emitter.state = yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE - } - return true -} - -// Expect MAPPING-START. -func yaml_emitter_emit_mapping_start(emitter *yaml_emitter_t, event *yaml_event_t) bool { - if !yaml_emitter_process_anchor(emitter) { - return false - } - if !yaml_emitter_process_tag(emitter) { - return false - } - if emitter.flow_level > 0 || emitter.canonical || event.mapping_style() == yaml_FLOW_MAPPING_STYLE || - yaml_emitter_check_empty_mapping(emitter) { - emitter.state = yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE - } else { - emitter.state = yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE - } - return true -} - -// Check if the document content is an empty scalar. -func yaml_emitter_check_empty_document(emitter *yaml_emitter_t) bool { - return false // [Go] Huh? -} - -// Check if the next events represent an empty sequence. -func yaml_emitter_check_empty_sequence(emitter *yaml_emitter_t) bool { - if len(emitter.events)-emitter.events_head < 2 { - return false - } - return emitter.events[emitter.events_head].typ == yaml_SEQUENCE_START_EVENT && - emitter.events[emitter.events_head+1].typ == yaml_SEQUENCE_END_EVENT -} - -// Check if the next events represent an empty mapping. -func yaml_emitter_check_empty_mapping(emitter *yaml_emitter_t) bool { - if len(emitter.events)-emitter.events_head < 2 { - return false - } - return emitter.events[emitter.events_head].typ == yaml_MAPPING_START_EVENT && - emitter.events[emitter.events_head+1].typ == yaml_MAPPING_END_EVENT -} - -// Check if the next node can be expressed as a simple key. -func yaml_emitter_check_simple_key(emitter *yaml_emitter_t) bool { - length := 0 - switch emitter.events[emitter.events_head].typ { - case yaml_ALIAS_EVENT: - length += len(emitter.anchor_data.anchor) - case yaml_SCALAR_EVENT: - if emitter.scalar_data.multiline { - return false - } - length += len(emitter.anchor_data.anchor) + - len(emitter.tag_data.handle) + - len(emitter.tag_data.suffix) + - len(emitter.scalar_data.value) - case yaml_SEQUENCE_START_EVENT: - if !yaml_emitter_check_empty_sequence(emitter) { - return false - } - length += len(emitter.anchor_data.anchor) + - len(emitter.tag_data.handle) + - len(emitter.tag_data.suffix) - case yaml_MAPPING_START_EVENT: - if !yaml_emitter_check_empty_mapping(emitter) { - return false - } - length += len(emitter.anchor_data.anchor) + - len(emitter.tag_data.handle) + - len(emitter.tag_data.suffix) - default: - return false - } - return length <= 128 -} - -// Determine an acceptable scalar style. -func yaml_emitter_select_scalar_style(emitter *yaml_emitter_t, event *yaml_event_t) bool { - - no_tag := len(emitter.tag_data.handle) == 0 && len(emitter.tag_data.suffix) == 0 - if no_tag && !event.implicit && !event.quoted_implicit { - return yaml_emitter_set_emitter_error(emitter, "neither tag nor implicit flags are specified") - } - - style := event.scalar_style() - if style == yaml_ANY_SCALAR_STYLE { - style = yaml_PLAIN_SCALAR_STYLE - } - if emitter.canonical { - style = yaml_DOUBLE_QUOTED_SCALAR_STYLE - } - if emitter.simple_key_context && emitter.scalar_data.multiline { - style = yaml_DOUBLE_QUOTED_SCALAR_STYLE - } - - if style == yaml_PLAIN_SCALAR_STYLE { - if emitter.flow_level > 0 && !emitter.scalar_data.flow_plain_allowed || - emitter.flow_level == 0 && !emitter.scalar_data.block_plain_allowed { - style = yaml_SINGLE_QUOTED_SCALAR_STYLE - } - if len(emitter.scalar_data.value) == 0 && (emitter.flow_level > 0 || emitter.simple_key_context) { - style = yaml_SINGLE_QUOTED_SCALAR_STYLE - } - if no_tag && !event.implicit { - style = yaml_SINGLE_QUOTED_SCALAR_STYLE - } - } - if style == yaml_SINGLE_QUOTED_SCALAR_STYLE { - if !emitter.scalar_data.single_quoted_allowed { - style = yaml_DOUBLE_QUOTED_SCALAR_STYLE - } - } - if style == yaml_LITERAL_SCALAR_STYLE || style == yaml_FOLDED_SCALAR_STYLE { - if !emitter.scalar_data.block_allowed || emitter.flow_level > 0 || emitter.simple_key_context { - style = yaml_DOUBLE_QUOTED_SCALAR_STYLE - } - } - - if no_tag && !event.quoted_implicit && style != yaml_PLAIN_SCALAR_STYLE { - emitter.tag_data.handle = []byte{'!'} - } - emitter.scalar_data.style = style - return true -} - -// Write an anchor. -func yaml_emitter_process_anchor(emitter *yaml_emitter_t) bool { - if emitter.anchor_data.anchor == nil { - return true - } - c := []byte{'&'} - if emitter.anchor_data.alias { - c[0] = '*' - } - if !yaml_emitter_write_indicator(emitter, c, true, false, false) { - return false - } - return yaml_emitter_write_anchor(emitter, emitter.anchor_data.anchor) -} - -// Write a tag. -func yaml_emitter_process_tag(emitter *yaml_emitter_t) bool { - if len(emitter.tag_data.handle) == 0 && len(emitter.tag_data.suffix) == 0 { - return true - } - if len(emitter.tag_data.handle) > 0 { - if !yaml_emitter_write_tag_handle(emitter, emitter.tag_data.handle) { - return false - } - if len(emitter.tag_data.suffix) > 0 { - if !yaml_emitter_write_tag_content(emitter, emitter.tag_data.suffix, false) { - return false - } - } - } else { - // [Go] Allocate these slices elsewhere. - if !yaml_emitter_write_indicator(emitter, []byte("!<"), true, false, false) { - return false - } - if !yaml_emitter_write_tag_content(emitter, emitter.tag_data.suffix, false) { - return false - } - if !yaml_emitter_write_indicator(emitter, []byte{'>'}, false, false, false) { - return false - } - } - return true -} - -// Write a scalar. -func yaml_emitter_process_scalar(emitter *yaml_emitter_t) bool { - switch emitter.scalar_data.style { - case yaml_PLAIN_SCALAR_STYLE: - return yaml_emitter_write_plain_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context) - - case yaml_SINGLE_QUOTED_SCALAR_STYLE: - return yaml_emitter_write_single_quoted_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context) - - case yaml_DOUBLE_QUOTED_SCALAR_STYLE: - return yaml_emitter_write_double_quoted_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context) - - case yaml_LITERAL_SCALAR_STYLE: - return yaml_emitter_write_literal_scalar(emitter, emitter.scalar_data.value) - - case yaml_FOLDED_SCALAR_STYLE: - return yaml_emitter_write_folded_scalar(emitter, emitter.scalar_data.value) - } - panic("unknown scalar style") -} - -// Write a head comment. -func yaml_emitter_process_head_comment(emitter *yaml_emitter_t) bool { - if len(emitter.tail_comment) > 0 { - if !yaml_emitter_write_indent(emitter) { - return false - } - if !yaml_emitter_write_comment(emitter, emitter.tail_comment) { - return false - } - emitter.tail_comment = emitter.tail_comment[:0] - emitter.foot_indent = emitter.indent - if emitter.foot_indent < 0 { - emitter.foot_indent = 0 - } - } - - if len(emitter.head_comment) == 0 { - return true - } - if !yaml_emitter_write_indent(emitter) { - return false - } - if !yaml_emitter_write_comment(emitter, emitter.head_comment) { - return false - } - emitter.head_comment = emitter.head_comment[:0] - return true -} - -// Write an line comment. -func yaml_emitter_process_line_comment(emitter *yaml_emitter_t, linebreak bool) bool { - if len(emitter.line_comment) == 0 { - if linebreak && !put_break(emitter) { - return false - } - return true - } - if !emitter.whitespace { - if !put(emitter, ' ') { - return false - } - } - if !yaml_emitter_write_comment(emitter, emitter.line_comment) { - return false - } - emitter.line_comment = emitter.line_comment[:0] - return true -} - -// Write a foot comment. -func yaml_emitter_process_foot_comment(emitter *yaml_emitter_t) bool { - if len(emitter.foot_comment) == 0 { - return true - } - if !yaml_emitter_write_indent(emitter) { - return false - } - if !yaml_emitter_write_comment(emitter, emitter.foot_comment) { - return false - } - emitter.foot_comment = emitter.foot_comment[:0] - emitter.foot_indent = emitter.indent - if emitter.foot_indent < 0 { - emitter.foot_indent = 0 - } - return true -} - -// Check if a %YAML directive is valid. -func yaml_emitter_analyze_version_directive(emitter *yaml_emitter_t, version_directive *yaml_version_directive_t) bool { - if version_directive.major != 1 || version_directive.minor != 1 { - return yaml_emitter_set_emitter_error(emitter, "incompatible %YAML directive") - } - return true -} - -// Check if a %TAG directive is valid. -func yaml_emitter_analyze_tag_directive(emitter *yaml_emitter_t, tag_directive *yaml_tag_directive_t) bool { - handle := tag_directive.handle - prefix := tag_directive.prefix - if len(handle) == 0 { - return yaml_emitter_set_emitter_error(emitter, "tag handle must not be empty") - } - if handle[0] != '!' { - return yaml_emitter_set_emitter_error(emitter, "tag handle must start with '!'") - } - if handle[len(handle)-1] != '!' { - return yaml_emitter_set_emitter_error(emitter, "tag handle must end with '!'") - } - for i := 1; i < len(handle)-1; i += width(handle[i]) { - if !is_alpha(handle, i) { - return yaml_emitter_set_emitter_error(emitter, "tag handle must contain alphanumerical characters only") - } - } - if len(prefix) == 0 { - return yaml_emitter_set_emitter_error(emitter, "tag prefix must not be empty") - } - return true -} - -// Check if an anchor is valid. -func yaml_emitter_analyze_anchor(emitter *yaml_emitter_t, anchor []byte, alias bool) bool { - if len(anchor) == 0 { - problem := "anchor value must not be empty" - if alias { - problem = "alias value must not be empty" - } - return yaml_emitter_set_emitter_error(emitter, problem) - } - for i := 0; i < len(anchor); i += width(anchor[i]) { - if !is_alpha(anchor, i) { - problem := "anchor value must contain alphanumerical characters only" - if alias { - problem = "alias value must contain alphanumerical characters only" - } - return yaml_emitter_set_emitter_error(emitter, problem) - } - } - emitter.anchor_data.anchor = anchor - emitter.anchor_data.alias = alias - return true -} - -// Check if a tag is valid. -func yaml_emitter_analyze_tag(emitter *yaml_emitter_t, tag []byte) bool { - if len(tag) == 0 { - return yaml_emitter_set_emitter_error(emitter, "tag value must not be empty") - } - for i := 0; i < len(emitter.tag_directives); i++ { - tag_directive := &emitter.tag_directives[i] - if bytes.HasPrefix(tag, tag_directive.prefix) { - emitter.tag_data.handle = tag_directive.handle - emitter.tag_data.suffix = tag[len(tag_directive.prefix):] - return true - } - } - emitter.tag_data.suffix = tag - return true -} - -// Check if a scalar is valid. -func yaml_emitter_analyze_scalar(emitter *yaml_emitter_t, value []byte) bool { - var ( - block_indicators = false - flow_indicators = false - line_breaks = false - special_characters = false - tab_characters = false - - leading_space = false - leading_break = false - trailing_space = false - trailing_break = false - break_space = false - space_break = false - - preceded_by_whitespace = false - followed_by_whitespace = false - previous_space = false - previous_break = false - ) - - emitter.scalar_data.value = value - - if len(value) == 0 { - emitter.scalar_data.multiline = false - emitter.scalar_data.flow_plain_allowed = false - emitter.scalar_data.block_plain_allowed = true - emitter.scalar_data.single_quoted_allowed = true - emitter.scalar_data.block_allowed = false - return true - } - - if len(value) >= 3 && ((value[0] == '-' && value[1] == '-' && value[2] == '-') || (value[0] == '.' && value[1] == '.' && value[2] == '.')) { - block_indicators = true - flow_indicators = true - } - - preceded_by_whitespace = true - for i, w := 0, 0; i < len(value); i += w { - w = width(value[i]) - followed_by_whitespace = i+w >= len(value) || is_blank(value, i+w) - - if i == 0 { - switch value[i] { - case '#', ',', '[', ']', '{', '}', '&', '*', '!', '|', '>', '\'', '"', '%', '@', '`': - flow_indicators = true - block_indicators = true - case '?', ':': - flow_indicators = true - if followed_by_whitespace { - block_indicators = true - } - case '-': - if followed_by_whitespace { - flow_indicators = true - block_indicators = true - } - } - } else { - switch value[i] { - case ',', '?', '[', ']', '{', '}': - flow_indicators = true - case ':': - flow_indicators = true - if followed_by_whitespace { - block_indicators = true - } - case '#': - if preceded_by_whitespace { - flow_indicators = true - block_indicators = true - } - } - } - - if value[i] == '\t' { - tab_characters = true - } else if !is_printable(value, i) || !is_ascii(value, i) && !emitter.unicode { - special_characters = true - } - if is_space(value, i) { - if i == 0 { - leading_space = true - } - if i+width(value[i]) == len(value) { - trailing_space = true - } - if previous_break { - break_space = true - } - previous_space = true - previous_break = false - } else if is_break(value, i) { - line_breaks = true - if i == 0 { - leading_break = true - } - if i+width(value[i]) == len(value) { - trailing_break = true - } - if previous_space { - space_break = true - } - previous_space = false - previous_break = true - } else { - previous_space = false - previous_break = false - } - - // [Go]: Why 'z'? Couldn't be the end of the string as that's the loop condition. - preceded_by_whitespace = is_blankz(value, i) - } - - emitter.scalar_data.multiline = line_breaks - emitter.scalar_data.flow_plain_allowed = true - emitter.scalar_data.block_plain_allowed = true - emitter.scalar_data.single_quoted_allowed = true - emitter.scalar_data.block_allowed = true - - if leading_space || leading_break || trailing_space || trailing_break { - emitter.scalar_data.flow_plain_allowed = false - emitter.scalar_data.block_plain_allowed = false - } - if trailing_space { - emitter.scalar_data.block_allowed = false - } - if break_space { - emitter.scalar_data.flow_plain_allowed = false - emitter.scalar_data.block_plain_allowed = false - emitter.scalar_data.single_quoted_allowed = false - } - if space_break || tab_characters || special_characters { - emitter.scalar_data.flow_plain_allowed = false - emitter.scalar_data.block_plain_allowed = false - emitter.scalar_data.single_quoted_allowed = false - } - if space_break || special_characters { - emitter.scalar_data.block_allowed = false - } - if line_breaks { - emitter.scalar_data.flow_plain_allowed = false - emitter.scalar_data.block_plain_allowed = false - } - if flow_indicators { - emitter.scalar_data.flow_plain_allowed = false - } - if block_indicators { - emitter.scalar_data.block_plain_allowed = false - } - return true -} - -// Check if the event data is valid. -func yaml_emitter_analyze_event(emitter *yaml_emitter_t, event *yaml_event_t) bool { - - emitter.anchor_data.anchor = nil - emitter.tag_data.handle = nil - emitter.tag_data.suffix = nil - emitter.scalar_data.value = nil - - if len(event.head_comment) > 0 { - emitter.head_comment = event.head_comment - } - if len(event.line_comment) > 0 { - emitter.line_comment = event.line_comment - } - if len(event.foot_comment) > 0 { - emitter.foot_comment = event.foot_comment - } - if len(event.tail_comment) > 0 { - emitter.tail_comment = event.tail_comment - } - - switch event.typ { - case yaml_ALIAS_EVENT: - if !yaml_emitter_analyze_anchor(emitter, event.anchor, true) { - return false - } - - case yaml_SCALAR_EVENT: - if len(event.anchor) > 0 { - if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) { - return false - } - } - if len(event.tag) > 0 && (emitter.canonical || (!event.implicit && !event.quoted_implicit)) { - if !yaml_emitter_analyze_tag(emitter, event.tag) { - return false - } - } - if !yaml_emitter_analyze_scalar(emitter, event.value) { - return false - } - - case yaml_SEQUENCE_START_EVENT: - if len(event.anchor) > 0 { - if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) { - return false - } - } - if len(event.tag) > 0 && (emitter.canonical || !event.implicit) { - if !yaml_emitter_analyze_tag(emitter, event.tag) { - return false - } - } - - case yaml_MAPPING_START_EVENT: - if len(event.anchor) > 0 { - if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) { - return false - } - } - if len(event.tag) > 0 && (emitter.canonical || !event.implicit) { - if !yaml_emitter_analyze_tag(emitter, event.tag) { - return false - } - } - } - return true -} - -// Write the BOM character. -func yaml_emitter_write_bom(emitter *yaml_emitter_t) bool { - if !flush(emitter) { - return false - } - pos := emitter.buffer_pos - emitter.buffer[pos+0] = '\xEF' - emitter.buffer[pos+1] = '\xBB' - emitter.buffer[pos+2] = '\xBF' - emitter.buffer_pos += 3 - return true -} - -func yaml_emitter_write_indent(emitter *yaml_emitter_t) bool { - indent := emitter.indent - if indent < 0 { - indent = 0 - } - if !emitter.indention || emitter.column > indent || (emitter.column == indent && !emitter.whitespace) { - if !put_break(emitter) { - return false - } - } - if emitter.foot_indent == indent { - if !put_break(emitter) { - return false - } - } - for emitter.column < indent { - if !put(emitter, ' ') { - return false - } - } - emitter.whitespace = true - //emitter.indention = true - emitter.space_above = false - emitter.foot_indent = -1 - return true -} - -func yaml_emitter_write_indicator(emitter *yaml_emitter_t, indicator []byte, need_whitespace, is_whitespace, is_indention bool) bool { - if need_whitespace && !emitter.whitespace { - if !put(emitter, ' ') { - return false - } - } - if !write_all(emitter, indicator) { - return false - } - emitter.whitespace = is_whitespace - emitter.indention = (emitter.indention && is_indention) - emitter.open_ended = false - return true -} - -func yaml_emitter_write_anchor(emitter *yaml_emitter_t, value []byte) bool { - if !write_all(emitter, value) { - return false - } - emitter.whitespace = false - emitter.indention = false - return true -} - -func yaml_emitter_write_tag_handle(emitter *yaml_emitter_t, value []byte) bool { - if !emitter.whitespace { - if !put(emitter, ' ') { - return false - } - } - if !write_all(emitter, value) { - return false - } - emitter.whitespace = false - emitter.indention = false - return true -} - -func yaml_emitter_write_tag_content(emitter *yaml_emitter_t, value []byte, need_whitespace bool) bool { - if need_whitespace && !emitter.whitespace { - if !put(emitter, ' ') { - return false - } - } - for i := 0; i < len(value); { - var must_write bool - switch value[i] { - case ';', '/', '?', ':', '@', '&', '=', '+', '$', ',', '_', '.', '~', '*', '\'', '(', ')', '[', ']': - must_write = true - default: - must_write = is_alpha(value, i) - } - if must_write { - if !write(emitter, value, &i) { - return false - } - } else { - w := width(value[i]) - for k := 0; k < w; k++ { - octet := value[i] - i++ - if !put(emitter, '%') { - return false - } - - c := octet >> 4 - if c < 10 { - c += '0' - } else { - c += 'A' - 10 - } - if !put(emitter, c) { - return false - } - - c = octet & 0x0f - if c < 10 { - c += '0' - } else { - c += 'A' - 10 - } - if !put(emitter, c) { - return false - } - } - } - } - emitter.whitespace = false - emitter.indention = false - return true -} - -func yaml_emitter_write_plain_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool { - if len(value) > 0 && !emitter.whitespace { - if !put(emitter, ' ') { - return false - } - } - - spaces := false - breaks := false - for i := 0; i < len(value); { - if is_space(value, i) { - if allow_breaks && !spaces && emitter.column > emitter.best_width && !is_space(value, i+1) { - if !yaml_emitter_write_indent(emitter) { - return false - } - i += width(value[i]) - } else { - if !write(emitter, value, &i) { - return false - } - } - spaces = true - } else if is_break(value, i) { - if !breaks && value[i] == '\n' { - if !put_break(emitter) { - return false - } - } - if !write_break(emitter, value, &i) { - return false - } - //emitter.indention = true - breaks = true - } else { - if breaks { - if !yaml_emitter_write_indent(emitter) { - return false - } - } - if !write(emitter, value, &i) { - return false - } - emitter.indention = false - spaces = false - breaks = false - } - } - - if len(value) > 0 { - emitter.whitespace = false - } - emitter.indention = false - if emitter.root_context { - emitter.open_ended = true - } - - return true -} - -func yaml_emitter_write_single_quoted_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool { - - if !yaml_emitter_write_indicator(emitter, []byte{'\''}, true, false, false) { - return false - } - - spaces := false - breaks := false - for i := 0; i < len(value); { - if is_space(value, i) { - if allow_breaks && !spaces && emitter.column > emitter.best_width && i > 0 && i < len(value)-1 && !is_space(value, i+1) { - if !yaml_emitter_write_indent(emitter) { - return false - } - i += width(value[i]) - } else { - if !write(emitter, value, &i) { - return false - } - } - spaces = true - } else if is_break(value, i) { - if !breaks && value[i] == '\n' { - if !put_break(emitter) { - return false - } - } - if !write_break(emitter, value, &i) { - return false - } - //emitter.indention = true - breaks = true - } else { - if breaks { - if !yaml_emitter_write_indent(emitter) { - return false - } - } - if value[i] == '\'' { - if !put(emitter, '\'') { - return false - } - } - if !write(emitter, value, &i) { - return false - } - emitter.indention = false - spaces = false - breaks = false - } - } - if !yaml_emitter_write_indicator(emitter, []byte{'\''}, false, false, false) { - return false - } - emitter.whitespace = false - emitter.indention = false - return true -} - -func yaml_emitter_write_double_quoted_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool { - spaces := false - if !yaml_emitter_write_indicator(emitter, []byte{'"'}, true, false, false) { - return false - } - - for i := 0; i < len(value); { - if !is_printable(value, i) || (!emitter.unicode && !is_ascii(value, i)) || - is_bom(value, i) || is_break(value, i) || - value[i] == '"' || value[i] == '\\' { - - octet := value[i] - - var w int - var v rune - switch { - case octet&0x80 == 0x00: - w, v = 1, rune(octet&0x7F) - case octet&0xE0 == 0xC0: - w, v = 2, rune(octet&0x1F) - case octet&0xF0 == 0xE0: - w, v = 3, rune(octet&0x0F) - case octet&0xF8 == 0xF0: - w, v = 4, rune(octet&0x07) - } - for k := 1; k < w; k++ { - octet = value[i+k] - v = (v << 6) + (rune(octet) & 0x3F) - } - i += w - - if !put(emitter, '\\') { - return false - } - - var ok bool - switch v { - case 0x00: - ok = put(emitter, '0') - case 0x07: - ok = put(emitter, 'a') - case 0x08: - ok = put(emitter, 'b') - case 0x09: - ok = put(emitter, 't') - case 0x0A: - ok = put(emitter, 'n') - case 0x0b: - ok = put(emitter, 'v') - case 0x0c: - ok = put(emitter, 'f') - case 0x0d: - ok = put(emitter, 'r') - case 0x1b: - ok = put(emitter, 'e') - case 0x22: - ok = put(emitter, '"') - case 0x5c: - ok = put(emitter, '\\') - case 0x85: - ok = put(emitter, 'N') - case 0xA0: - ok = put(emitter, '_') - case 0x2028: - ok = put(emitter, 'L') - case 0x2029: - ok = put(emitter, 'P') - default: - if v <= 0xFF { - ok = put(emitter, 'x') - w = 2 - } else if v <= 0xFFFF { - ok = put(emitter, 'u') - w = 4 - } else { - ok = put(emitter, 'U') - w = 8 - } - for k := (w - 1) * 4; ok && k >= 0; k -= 4 { - digit := byte((v >> uint(k)) & 0x0F) - if digit < 10 { - ok = put(emitter, digit+'0') - } else { - ok = put(emitter, digit+'A'-10) - } - } - } - if !ok { - return false - } - spaces = false - } else if is_space(value, i) { - if allow_breaks && !spaces && emitter.column > emitter.best_width && i > 0 && i < len(value)-1 { - if !yaml_emitter_write_indent(emitter) { - return false - } - if is_space(value, i+1) { - if !put(emitter, '\\') { - return false - } - } - i += width(value[i]) - } else if !write(emitter, value, &i) { - return false - } - spaces = true - } else { - if !write(emitter, value, &i) { - return false - } - spaces = false - } - } - if !yaml_emitter_write_indicator(emitter, []byte{'"'}, false, false, false) { - return false - } - emitter.whitespace = false - emitter.indention = false - return true -} - -func yaml_emitter_write_block_scalar_hints(emitter *yaml_emitter_t, value []byte) bool { - if is_space(value, 0) || is_break(value, 0) { - indent_hint := []byte{'0' + byte(emitter.best_indent)} - if !yaml_emitter_write_indicator(emitter, indent_hint, false, false, false) { - return false - } - } - - emitter.open_ended = false - - var chomp_hint [1]byte - if len(value) == 0 { - chomp_hint[0] = '-' - } else { - i := len(value) - 1 - for value[i]&0xC0 == 0x80 { - i-- - } - if !is_break(value, i) { - chomp_hint[0] = '-' - } else if i == 0 { - chomp_hint[0] = '+' - emitter.open_ended = true - } else { - i-- - for value[i]&0xC0 == 0x80 { - i-- - } - if is_break(value, i) { - chomp_hint[0] = '+' - emitter.open_ended = true - } - } - } - if chomp_hint[0] != 0 { - if !yaml_emitter_write_indicator(emitter, chomp_hint[:], false, false, false) { - return false - } - } - return true -} - -func yaml_emitter_write_literal_scalar(emitter *yaml_emitter_t, value []byte) bool { - if !yaml_emitter_write_indicator(emitter, []byte{'|'}, true, false, false) { - return false - } - if !yaml_emitter_write_block_scalar_hints(emitter, value) { - return false - } - if !yaml_emitter_process_line_comment(emitter, true) { - return false - } - //emitter.indention = true - emitter.whitespace = true - breaks := true - for i := 0; i < len(value); { - if is_break(value, i) { - if !write_break(emitter, value, &i) { - return false - } - //emitter.indention = true - breaks = true - } else { - if breaks { - if !yaml_emitter_write_indent(emitter) { - return false - } - } - if !write(emitter, value, &i) { - return false - } - emitter.indention = false - breaks = false - } - } - - return true -} - -func yaml_emitter_write_folded_scalar(emitter *yaml_emitter_t, value []byte) bool { - if !yaml_emitter_write_indicator(emitter, []byte{'>'}, true, false, false) { - return false - } - if !yaml_emitter_write_block_scalar_hints(emitter, value) { - return false - } - if !yaml_emitter_process_line_comment(emitter, true) { - return false - } - - //emitter.indention = true - emitter.whitespace = true - - breaks := true - leading_spaces := true - for i := 0; i < len(value); { - if is_break(value, i) { - if !breaks && !leading_spaces && value[i] == '\n' { - k := 0 - for is_break(value, k) { - k += width(value[k]) - } - if !is_blankz(value, k) { - if !put_break(emitter) { - return false - } - } - } - if !write_break(emitter, value, &i) { - return false - } - //emitter.indention = true - breaks = true - } else { - if breaks { - if !yaml_emitter_write_indent(emitter) { - return false - } - leading_spaces = is_blank(value, i) - } - if !breaks && is_space(value, i) && !is_space(value, i+1) && emitter.column > emitter.best_width { - if !yaml_emitter_write_indent(emitter) { - return false - } - i += width(value[i]) - } else { - if !write(emitter, value, &i) { - return false - } - } - emitter.indention = false - breaks = false - } - } - return true -} - -func yaml_emitter_write_comment(emitter *yaml_emitter_t, comment []byte) bool { - breaks := false - pound := false - for i := 0; i < len(comment); { - if is_break(comment, i) { - if !write_break(emitter, comment, &i) { - return false - } - //emitter.indention = true - breaks = true - pound = false - } else { - if breaks && !yaml_emitter_write_indent(emitter) { - return false - } - if !pound { - if comment[i] != '#' && (!put(emitter, '#') || !put(emitter, ' ')) { - return false - } - pound = true - } - if !write(emitter, comment, &i) { - return false - } - emitter.indention = false - breaks = false - } - } - if !breaks && !put_break(emitter) { - return false - } - - emitter.whitespace = true - //emitter.indention = true - return true -} diff --git a/src/vendor/sigs.k8s.io/kustomize/kyaml/internal/forked/github.com/go-yaml/yaml/encode.go b/src/vendor/sigs.k8s.io/kustomize/kyaml/internal/forked/github.com/go-yaml/yaml/encode.go deleted file mode 100644 index de9e72a3e..000000000 --- a/src/vendor/sigs.k8s.io/kustomize/kyaml/internal/forked/github.com/go-yaml/yaml/encode.go +++ /dev/null @@ -1,577 +0,0 @@ -// -// Copyright (c) 2011-2019 Canonical Ltd -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package yaml - -import ( - "encoding" - "fmt" - "io" - "reflect" - "regexp" - "sort" - "strconv" - "strings" - "time" - "unicode/utf8" -) - -type encoder struct { - emitter yaml_emitter_t - event yaml_event_t - out []byte - flow bool - indent int - doneInit bool -} - -func newEncoder() *encoder { - e := &encoder{} - yaml_emitter_initialize(&e.emitter) - yaml_emitter_set_output_string(&e.emitter, &e.out) - yaml_emitter_set_unicode(&e.emitter, true) - return e -} - -func newEncoderWithWriter(w io.Writer) *encoder { - e := &encoder{} - yaml_emitter_initialize(&e.emitter) - yaml_emitter_set_output_writer(&e.emitter, w) - yaml_emitter_set_unicode(&e.emitter, true) - return e -} - -func (e *encoder) init() { - if e.doneInit { - return - } - if e.indent == 0 { - e.indent = 4 - } - e.emitter.best_indent = e.indent - yaml_stream_start_event_initialize(&e.event, yaml_UTF8_ENCODING) - e.emit() - e.doneInit = true -} - -func (e *encoder) finish() { - e.emitter.open_ended = false - yaml_stream_end_event_initialize(&e.event) - e.emit() -} - -func (e *encoder) destroy() { - yaml_emitter_delete(&e.emitter) -} - -func (e *encoder) emit() { - // This will internally delete the e.event value. - e.must(yaml_emitter_emit(&e.emitter, &e.event)) -} - -func (e *encoder) must(ok bool) { - if !ok { - msg := e.emitter.problem - if msg == "" { - msg = "unknown problem generating YAML content" - } - failf("%s", msg) - } -} - -func (e *encoder) marshalDoc(tag string, in reflect.Value) { - e.init() - var node *Node - if in.IsValid() { - node, _ = in.Interface().(*Node) - } - if node != nil && node.Kind == DocumentNode { - e.nodev(in) - } else { - yaml_document_start_event_initialize(&e.event, nil, nil, true) - e.emit() - e.marshal(tag, in) - yaml_document_end_event_initialize(&e.event, true) - e.emit() - } -} - -func (e *encoder) marshal(tag string, in reflect.Value) { - tag = shortTag(tag) - if !in.IsValid() || in.Kind() == reflect.Ptr && in.IsNil() { - e.nilv() - return - } - iface := in.Interface() - switch value := iface.(type) { - case *Node: - e.nodev(in) - return - case Node: - if !in.CanAddr() { - var n = reflect.New(in.Type()).Elem() - n.Set(in) - in = n - } - e.nodev(in.Addr()) - return - case time.Time: - e.timev(tag, in) - return - case *time.Time: - e.timev(tag, in.Elem()) - return - case time.Duration: - e.stringv(tag, reflect.ValueOf(value.String())) - return - case Marshaler: - v, err := value.MarshalYAML() - if err != nil { - fail(err) - } - if v == nil { - e.nilv() - return - } - e.marshal(tag, reflect.ValueOf(v)) - return - case encoding.TextMarshaler: - text, err := value.MarshalText() - if err != nil { - fail(err) - } - in = reflect.ValueOf(string(text)) - case nil: - e.nilv() - return - } - switch in.Kind() { - case reflect.Interface: - e.marshal(tag, in.Elem()) - case reflect.Map: - e.mapv(tag, in) - case reflect.Ptr: - e.marshal(tag, in.Elem()) - case reflect.Struct: - e.structv(tag, in) - case reflect.Slice, reflect.Array: - e.slicev(tag, in) - case reflect.String: - e.stringv(tag, in) - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - e.intv(tag, in) - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - e.uintv(tag, in) - case reflect.Float32, reflect.Float64: - e.floatv(tag, in) - case reflect.Bool: - e.boolv(tag, in) - default: - panic("cannot marshal type: " + in.Type().String()) - } -} - -func (e *encoder) mapv(tag string, in reflect.Value) { - e.mappingv(tag, func() { - keys := keyList(in.MapKeys()) - sort.Sort(keys) - for _, k := range keys { - e.marshal("", k) - e.marshal("", in.MapIndex(k)) - } - }) -} - -func (e *encoder) fieldByIndex(v reflect.Value, index []int) (field reflect.Value) { - for _, num := range index { - for { - if v.Kind() == reflect.Ptr { - if v.IsNil() { - return reflect.Value{} - } - v = v.Elem() - continue - } - break - } - v = v.Field(num) - } - return v -} - -func (e *encoder) structv(tag string, in reflect.Value) { - sinfo, err := getStructInfo(in.Type()) - if err != nil { - panic(err) - } - e.mappingv(tag, func() { - for _, info := range sinfo.FieldsList { - var value reflect.Value - if info.Inline == nil { - value = in.Field(info.Num) - } else { - value = e.fieldByIndex(in, info.Inline) - if !value.IsValid() { - continue - } - } - if info.OmitEmpty && isZero(value) { - continue - } - e.marshal("", reflect.ValueOf(info.Key)) - e.flow = info.Flow - e.marshal("", value) - } - if sinfo.InlineMap >= 0 { - m := in.Field(sinfo.InlineMap) - if m.Len() > 0 { - e.flow = false - keys := keyList(m.MapKeys()) - sort.Sort(keys) - for _, k := range keys { - if _, found := sinfo.FieldsMap[k.String()]; found { - panic(fmt.Sprintf("cannot have key %q in inlined map: conflicts with struct field", k.String())) - } - e.marshal("", k) - e.flow = false - e.marshal("", m.MapIndex(k)) - } - } - } - }) -} - -func (e *encoder) mappingv(tag string, f func()) { - implicit := tag == "" - style := yaml_BLOCK_MAPPING_STYLE - if e.flow { - e.flow = false - style = yaml_FLOW_MAPPING_STYLE - } - yaml_mapping_start_event_initialize(&e.event, nil, []byte(tag), implicit, style) - e.emit() - f() - yaml_mapping_end_event_initialize(&e.event) - e.emit() -} - -func (e *encoder) slicev(tag string, in reflect.Value) { - implicit := tag == "" - style := yaml_BLOCK_SEQUENCE_STYLE - if e.flow { - e.flow = false - style = yaml_FLOW_SEQUENCE_STYLE - } - e.must(yaml_sequence_start_event_initialize(&e.event, nil, []byte(tag), implicit, style)) - e.emit() - n := in.Len() - for i := 0; i < n; i++ { - e.marshal("", in.Index(i)) - } - e.must(yaml_sequence_end_event_initialize(&e.event)) - e.emit() -} - -// isBase60 returns whether s is in base 60 notation as defined in YAML 1.1. -// -// The base 60 float notation in YAML 1.1 is a terrible idea and is unsupported -// in YAML 1.2 and by this package, but these should be marshalled quoted for -// the time being for compatibility with other parsers. -func isBase60Float(s string) (result bool) { - // Fast path. - if s == "" { - return false - } - c := s[0] - if !(c == '+' || c == '-' || c >= '0' && c <= '9') || strings.IndexByte(s, ':') < 0 { - return false - } - // Do the full match. - return base60float.MatchString(s) -} - -// From http://yaml.org/type/float.html, except the regular expression there -// is bogus. In practice parsers do not enforce the "\.[0-9_]*" suffix. -var base60float = regexp.MustCompile(`^[-+]?[0-9][0-9_]*(?::[0-5]?[0-9])+(?:\.[0-9_]*)?$`) - -// isOldBool returns whether s is bool notation as defined in YAML 1.1. -// -// We continue to force strings that YAML 1.1 would interpret as booleans to be -// rendered as quotes strings so that the marshalled output valid for YAML 1.1 -// parsing. -func isOldBool(s string) (result bool) { - switch s { - case "y", "Y", "yes", "Yes", "YES", "on", "On", "ON", - "n", "N", "no", "No", "NO", "off", "Off", "OFF": - return true - default: - return false - } -} - -func (e *encoder) stringv(tag string, in reflect.Value) { - var style yaml_scalar_style_t - s := in.String() - canUsePlain := true - switch { - case !utf8.ValidString(s): - if tag == binaryTag { - failf("explicitly tagged !!binary data must be base64-encoded") - } - if tag != "" { - failf("cannot marshal invalid UTF-8 data as %s", shortTag(tag)) - } - // It can't be encoded directly as YAML so use a binary tag - // and encode it as base64. - tag = binaryTag - s = encodeBase64(s) - case tag == "": - // Check to see if it would resolve to a specific - // tag when encoded unquoted. If it doesn't, - // there's no need to quote it. - rtag, _ := resolve("", s) - canUsePlain = rtag == strTag && !(isBase60Float(s) || isOldBool(s)) - } - // Note: it's possible for user code to emit invalid YAML - // if they explicitly specify a tag and a string containing - // text that's incompatible with that tag. - switch { - case strings.Contains(s, "\n"): - if e.flow { - style = yaml_DOUBLE_QUOTED_SCALAR_STYLE - } else { - style = yaml_LITERAL_SCALAR_STYLE - } - case canUsePlain: - style = yaml_PLAIN_SCALAR_STYLE - default: - style = yaml_DOUBLE_QUOTED_SCALAR_STYLE - } - e.emitScalar(s, "", tag, style, nil, nil, nil, nil) -} - -func (e *encoder) boolv(tag string, in reflect.Value) { - var s string - if in.Bool() { - s = "true" - } else { - s = "false" - } - e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE, nil, nil, nil, nil) -} - -func (e *encoder) intv(tag string, in reflect.Value) { - s := strconv.FormatInt(in.Int(), 10) - e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE, nil, nil, nil, nil) -} - -func (e *encoder) uintv(tag string, in reflect.Value) { - s := strconv.FormatUint(in.Uint(), 10) - e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE, nil, nil, nil, nil) -} - -func (e *encoder) timev(tag string, in reflect.Value) { - t := in.Interface().(time.Time) - s := t.Format(time.RFC3339Nano) - e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE, nil, nil, nil, nil) -} - -func (e *encoder) floatv(tag string, in reflect.Value) { - // Issue #352: When formatting, use the precision of the underlying value - precision := 64 - if in.Kind() == reflect.Float32 { - precision = 32 - } - - s := strconv.FormatFloat(in.Float(), 'g', -1, precision) - switch s { - case "+Inf": - s = ".inf" - case "-Inf": - s = "-.inf" - case "NaN": - s = ".nan" - } - e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE, nil, nil, nil, nil) -} - -func (e *encoder) nilv() { - e.emitScalar("null", "", "", yaml_PLAIN_SCALAR_STYLE, nil, nil, nil, nil) -} - -func (e *encoder) emitScalar(value, anchor, tag string, style yaml_scalar_style_t, head, line, foot, tail []byte) { - // TODO Kill this function. Replace all initialize calls by their underlining Go literals. - implicit := tag == "" - if !implicit { - tag = longTag(tag) - } - e.must(yaml_scalar_event_initialize(&e.event, []byte(anchor), []byte(tag), []byte(value), implicit, implicit, style)) - e.event.head_comment = head - e.event.line_comment = line - e.event.foot_comment = foot - e.event.tail_comment = tail - e.emit() -} - -func (e *encoder) nodev(in reflect.Value) { - e.node(in.Interface().(*Node), "") -} - -func (e *encoder) node(node *Node, tail string) { - // Zero nodes behave as nil. - if node.Kind == 0 && node.IsZero() { - e.nilv() - return - } - - // If the tag was not explicitly requested, and dropping it won't change the - // implicit tag of the value, don't include it in the presentation. - var tag = node.Tag - var stag = shortTag(tag) - var forceQuoting bool - if tag != "" && node.Style&TaggedStyle == 0 { - if node.Kind == ScalarNode { - if stag == strTag && node.Style&(SingleQuotedStyle|DoubleQuotedStyle|LiteralStyle|FoldedStyle) != 0 { - tag = "" - } else { - rtag, _ := resolve("", node.Value) - if rtag == stag { - tag = "" - } else if stag == strTag { - tag = "" - forceQuoting = true - } - } - } else { - var rtag string - switch node.Kind { - case MappingNode: - rtag = mapTag - case SequenceNode: - rtag = seqTag - } - if rtag == stag { - tag = "" - } - } - } - - switch node.Kind { - case DocumentNode: - yaml_document_start_event_initialize(&e.event, nil, nil, true) - e.event.head_comment = []byte(node.HeadComment) - e.emit() - for _, node := range node.Content { - e.node(node, "") - } - yaml_document_end_event_initialize(&e.event, true) - e.event.foot_comment = []byte(node.FootComment) - e.emit() - - case SequenceNode: - style := yaml_BLOCK_SEQUENCE_STYLE - if node.Style&FlowStyle != 0 { - style = yaml_FLOW_SEQUENCE_STYLE - } - e.must(yaml_sequence_start_event_initialize(&e.event, []byte(node.Anchor), []byte(longTag(tag)), tag == "", style)) - e.event.head_comment = []byte(node.HeadComment) - e.emit() - for _, node := range node.Content { - e.node(node, "") - } - e.must(yaml_sequence_end_event_initialize(&e.event)) - e.event.line_comment = []byte(node.LineComment) - e.event.foot_comment = []byte(node.FootComment) - e.emit() - - case MappingNode: - style := yaml_BLOCK_MAPPING_STYLE - if node.Style&FlowStyle != 0 { - style = yaml_FLOW_MAPPING_STYLE - } - yaml_mapping_start_event_initialize(&e.event, []byte(node.Anchor), []byte(longTag(tag)), tag == "", style) - e.event.tail_comment = []byte(tail) - e.event.head_comment = []byte(node.HeadComment) - e.emit() - - // The tail logic below moves the foot comment of prior keys to the following key, - // since the value for each key may be a nested structure and the foot needs to be - // processed only the entirety of the value is streamed. The last tail is processed - // with the mapping end event. - var tail string - for i := 0; i+1 < len(node.Content); i += 2 { - k := node.Content[i] - foot := k.FootComment - if foot != "" { - kopy := *k - kopy.FootComment = "" - k = &kopy - } - e.node(k, tail) - tail = foot - - v := node.Content[i+1] - e.node(v, "") - } - - yaml_mapping_end_event_initialize(&e.event) - e.event.tail_comment = []byte(tail) - e.event.line_comment = []byte(node.LineComment) - e.event.foot_comment = []byte(node.FootComment) - e.emit() - - case AliasNode: - yaml_alias_event_initialize(&e.event, []byte(node.Value)) - e.event.head_comment = []byte(node.HeadComment) - e.event.line_comment = []byte(node.LineComment) - e.event.foot_comment = []byte(node.FootComment) - e.emit() - - case ScalarNode: - value := node.Value - if !utf8.ValidString(value) { - if stag == binaryTag { - failf("explicitly tagged !!binary data must be base64-encoded") - } - if stag != "" { - failf("cannot marshal invalid UTF-8 data as %s", stag) - } - // It can't be encoded directly as YAML so use a binary tag - // and encode it as base64. - tag = binaryTag - value = encodeBase64(value) - } - - style := yaml_PLAIN_SCALAR_STYLE - switch { - case node.Style&DoubleQuotedStyle != 0: - style = yaml_DOUBLE_QUOTED_SCALAR_STYLE - case node.Style&SingleQuotedStyle != 0: - style = yaml_SINGLE_QUOTED_SCALAR_STYLE - case node.Style&LiteralStyle != 0: - style = yaml_LITERAL_SCALAR_STYLE - case node.Style&FoldedStyle != 0: - style = yaml_FOLDED_SCALAR_STYLE - case strings.Contains(value, "\n"): - style = yaml_LITERAL_SCALAR_STYLE - case forceQuoting: - style = yaml_DOUBLE_QUOTED_SCALAR_STYLE - } - - e.emitScalar(value, node.Anchor, tag, style, []byte(node.HeadComment), []byte(node.LineComment), []byte(node.FootComment), []byte(tail)) - default: - failf("cannot encode node with unknown kind %d", node.Kind) - } -} diff --git a/src/vendor/sigs.k8s.io/kustomize/kyaml/internal/forked/github.com/go-yaml/yaml/parserc.go b/src/vendor/sigs.k8s.io/kustomize/kyaml/internal/forked/github.com/go-yaml/yaml/parserc.go deleted file mode 100644 index ac66fccc0..000000000 --- a/src/vendor/sigs.k8s.io/kustomize/kyaml/internal/forked/github.com/go-yaml/yaml/parserc.go +++ /dev/null @@ -1,1249 +0,0 @@ -// -// Copyright (c) 2011-2019 Canonical Ltd -// Copyright (c) 2006-2010 Kirill Simonov -// -// Permission is hereby granted, free of charge, to any person obtaining a copy of -// this software and associated documentation files (the "Software"), to deal in -// the Software without restriction, including without limitation the rights to -// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies -// of the Software, and to permit persons to whom the Software is furnished to do -// so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in all -// copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -// SOFTWARE. - -package yaml - -import ( - "bytes" -) - -// The parser implements the following grammar: -// -// stream ::= STREAM-START implicit_document? explicit_document* STREAM-END -// implicit_document ::= block_node DOCUMENT-END* -// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* -// block_node_or_indentless_sequence ::= -// ALIAS -// | properties (block_content | indentless_block_sequence)? -// | block_content -// | indentless_block_sequence -// block_node ::= ALIAS -// | properties block_content? -// | block_content -// flow_node ::= ALIAS -// | properties flow_content? -// | flow_content -// properties ::= TAG ANCHOR? | ANCHOR TAG? -// block_content ::= block_collection | flow_collection | SCALAR -// flow_content ::= flow_collection | SCALAR -// block_collection ::= block_sequence | block_mapping -// flow_collection ::= flow_sequence | flow_mapping -// block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END -// indentless_sequence ::= (BLOCK-ENTRY block_node?)+ -// block_mapping ::= BLOCK-MAPPING_START -// ((KEY block_node_or_indentless_sequence?)? -// (VALUE block_node_or_indentless_sequence?)?)* -// BLOCK-END -// flow_sequence ::= FLOW-SEQUENCE-START -// (flow_sequence_entry FLOW-ENTRY)* -// flow_sequence_entry? -// FLOW-SEQUENCE-END -// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? -// flow_mapping ::= FLOW-MAPPING-START -// (flow_mapping_entry FLOW-ENTRY)* -// flow_mapping_entry? -// FLOW-MAPPING-END -// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? - -// Peek the next token in the token queue. -func peek_token(parser *yaml_parser_t) *yaml_token_t { - if parser.token_available || yaml_parser_fetch_more_tokens(parser) { - token := &parser.tokens[parser.tokens_head] - yaml_parser_unfold_comments(parser, token) - return token - } - return nil -} - -// yaml_parser_unfold_comments walks through the comments queue and joins all -// comments behind the position of the provided token into the respective -// top-level comment slices in the parser. -func yaml_parser_unfold_comments(parser *yaml_parser_t, token *yaml_token_t) { - for parser.comments_head < len(parser.comments) && token.start_mark.index >= parser.comments[parser.comments_head].token_mark.index { - comment := &parser.comments[parser.comments_head] - if len(comment.head) > 0 { - if token.typ == yaml_BLOCK_END_TOKEN { - // No heads on ends, so keep comment.head for a follow up token. - break - } - if len(parser.head_comment) > 0 { - parser.head_comment = append(parser.head_comment, '\n') - } - parser.head_comment = append(parser.head_comment, comment.head...) - } - if len(comment.foot) > 0 { - if len(parser.foot_comment) > 0 { - parser.foot_comment = append(parser.foot_comment, '\n') - } - parser.foot_comment = append(parser.foot_comment, comment.foot...) - } - if len(comment.line) > 0 { - if len(parser.line_comment) > 0 { - parser.line_comment = append(parser.line_comment, '\n') - } - parser.line_comment = append(parser.line_comment, comment.line...) - } - *comment = yaml_comment_t{} - parser.comments_head++ - } -} - -// Remove the next token from the queue (must be called after peek_token). -func skip_token(parser *yaml_parser_t) { - parser.token_available = false - parser.tokens_parsed++ - parser.stream_end_produced = parser.tokens[parser.tokens_head].typ == yaml_STREAM_END_TOKEN - parser.tokens_head++ -} - -// Get the next event. -func yaml_parser_parse(parser *yaml_parser_t, event *yaml_event_t) bool { - // Erase the event object. - *event = yaml_event_t{} - - // No events after the end of the stream or error. - if parser.stream_end_produced || parser.error != yaml_NO_ERROR || parser.state == yaml_PARSE_END_STATE { - return true - } - - // Generate the next event. - return yaml_parser_state_machine(parser, event) -} - -// Set parser error. -func yaml_parser_set_parser_error(parser *yaml_parser_t, problem string, problem_mark yaml_mark_t) bool { - parser.error = yaml_PARSER_ERROR - parser.problem = problem - parser.problem_mark = problem_mark - return false -} - -func yaml_parser_set_parser_error_context(parser *yaml_parser_t, context string, context_mark yaml_mark_t, problem string, problem_mark yaml_mark_t) bool { - parser.error = yaml_PARSER_ERROR - parser.context = context - parser.context_mark = context_mark - parser.problem = problem - parser.problem_mark = problem_mark - return false -} - -// State dispatcher. -func yaml_parser_state_machine(parser *yaml_parser_t, event *yaml_event_t) bool { - //trace("yaml_parser_state_machine", "state:", parser.state.String()) - - switch parser.state { - case yaml_PARSE_STREAM_START_STATE: - return yaml_parser_parse_stream_start(parser, event) - - case yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE: - return yaml_parser_parse_document_start(parser, event, true) - - case yaml_PARSE_DOCUMENT_START_STATE: - return yaml_parser_parse_document_start(parser, event, false) - - case yaml_PARSE_DOCUMENT_CONTENT_STATE: - return yaml_parser_parse_document_content(parser, event) - - case yaml_PARSE_DOCUMENT_END_STATE: - return yaml_parser_parse_document_end(parser, event) - - case yaml_PARSE_BLOCK_NODE_STATE: - return yaml_parser_parse_node(parser, event, true, false) - - case yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE: - return yaml_parser_parse_node(parser, event, true, true) - - case yaml_PARSE_FLOW_NODE_STATE: - return yaml_parser_parse_node(parser, event, false, false) - - case yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE: - return yaml_parser_parse_block_sequence_entry(parser, event, true) - - case yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE: - return yaml_parser_parse_block_sequence_entry(parser, event, false) - - case yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE: - return yaml_parser_parse_indentless_sequence_entry(parser, event) - - case yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE: - return yaml_parser_parse_block_mapping_key(parser, event, true) - - case yaml_PARSE_BLOCK_MAPPING_KEY_STATE: - return yaml_parser_parse_block_mapping_key(parser, event, false) - - case yaml_PARSE_BLOCK_MAPPING_VALUE_STATE: - return yaml_parser_parse_block_mapping_value(parser, event) - - case yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE: - return yaml_parser_parse_flow_sequence_entry(parser, event, true) - - case yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE: - return yaml_parser_parse_flow_sequence_entry(parser, event, false) - - case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE: - return yaml_parser_parse_flow_sequence_entry_mapping_key(parser, event) - - case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE: - return yaml_parser_parse_flow_sequence_entry_mapping_value(parser, event) - - case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE: - return yaml_parser_parse_flow_sequence_entry_mapping_end(parser, event) - - case yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE: - return yaml_parser_parse_flow_mapping_key(parser, event, true) - - case yaml_PARSE_FLOW_MAPPING_KEY_STATE: - return yaml_parser_parse_flow_mapping_key(parser, event, false) - - case yaml_PARSE_FLOW_MAPPING_VALUE_STATE: - return yaml_parser_parse_flow_mapping_value(parser, event, false) - - case yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE: - return yaml_parser_parse_flow_mapping_value(parser, event, true) - - default: - panic("invalid parser state") - } -} - -// Parse the production: -// stream ::= STREAM-START implicit_document? explicit_document* STREAM-END -// ************ -func yaml_parser_parse_stream_start(parser *yaml_parser_t, event *yaml_event_t) bool { - token := peek_token(parser) - if token == nil { - return false - } - if token.typ != yaml_STREAM_START_TOKEN { - return yaml_parser_set_parser_error(parser, "did not find expected ", token.start_mark) - } - parser.state = yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE - *event = yaml_event_t{ - typ: yaml_STREAM_START_EVENT, - start_mark: token.start_mark, - end_mark: token.end_mark, - encoding: token.encoding, - } - skip_token(parser) - return true -} - -// Parse the productions: -// implicit_document ::= block_node DOCUMENT-END* -// * -// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* -// ************************* -func yaml_parser_parse_document_start(parser *yaml_parser_t, event *yaml_event_t, implicit bool) bool { - - token := peek_token(parser) - if token == nil { - return false - } - - // Parse extra document end indicators. - if !implicit { - for token.typ == yaml_DOCUMENT_END_TOKEN { - skip_token(parser) - token = peek_token(parser) - if token == nil { - return false - } - } - } - - if implicit && token.typ != yaml_VERSION_DIRECTIVE_TOKEN && - token.typ != yaml_TAG_DIRECTIVE_TOKEN && - token.typ != yaml_DOCUMENT_START_TOKEN && - token.typ != yaml_STREAM_END_TOKEN { - // Parse an implicit document. - if !yaml_parser_process_directives(parser, nil, nil) { - return false - } - parser.states = append(parser.states, yaml_PARSE_DOCUMENT_END_STATE) - parser.state = yaml_PARSE_BLOCK_NODE_STATE - - var head_comment []byte - if len(parser.head_comment) > 0 { - // [Go] Scan the header comment backwards, and if an empty line is found, break - // the header so the part before the last empty line goes into the - // document header, while the bottom of it goes into a follow up event. - for i := len(parser.head_comment) - 1; i > 0; i-- { - if parser.head_comment[i] == '\n' { - if i == len(parser.head_comment)-1 { - head_comment = parser.head_comment[:i] - parser.head_comment = parser.head_comment[i+1:] - break - } else if parser.head_comment[i-1] == '\n' { - head_comment = parser.head_comment[:i-1] - parser.head_comment = parser.head_comment[i+1:] - break - } - } - } - } - - *event = yaml_event_t{ - typ: yaml_DOCUMENT_START_EVENT, - start_mark: token.start_mark, - end_mark: token.end_mark, - - head_comment: head_comment, - } - - } else if token.typ != yaml_STREAM_END_TOKEN { - // Parse an explicit document. - var version_directive *yaml_version_directive_t - var tag_directives []yaml_tag_directive_t - start_mark := token.start_mark - if !yaml_parser_process_directives(parser, &version_directive, &tag_directives) { - return false - } - token = peek_token(parser) - if token == nil { - return false - } - if token.typ != yaml_DOCUMENT_START_TOKEN { - yaml_parser_set_parser_error(parser, - "did not find expected ", token.start_mark) - return false - } - parser.states = append(parser.states, yaml_PARSE_DOCUMENT_END_STATE) - parser.state = yaml_PARSE_DOCUMENT_CONTENT_STATE - end_mark := token.end_mark - - *event = yaml_event_t{ - typ: yaml_DOCUMENT_START_EVENT, - start_mark: start_mark, - end_mark: end_mark, - version_directive: version_directive, - tag_directives: tag_directives, - implicit: false, - } - skip_token(parser) - - } else { - // Parse the stream end. - parser.state = yaml_PARSE_END_STATE - *event = yaml_event_t{ - typ: yaml_STREAM_END_EVENT, - start_mark: token.start_mark, - end_mark: token.end_mark, - } - skip_token(parser) - } - - return true -} - -// Parse the productions: -// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* -// *********** -// -func yaml_parser_parse_document_content(parser *yaml_parser_t, event *yaml_event_t) bool { - token := peek_token(parser) - if token == nil { - return false - } - - if token.typ == yaml_VERSION_DIRECTIVE_TOKEN || - token.typ == yaml_TAG_DIRECTIVE_TOKEN || - token.typ == yaml_DOCUMENT_START_TOKEN || - token.typ == yaml_DOCUMENT_END_TOKEN || - token.typ == yaml_STREAM_END_TOKEN { - parser.state = parser.states[len(parser.states)-1] - parser.states = parser.states[:len(parser.states)-1] - return yaml_parser_process_empty_scalar(parser, event, - token.start_mark) - } - return yaml_parser_parse_node(parser, event, true, false) -} - -// Parse the productions: -// implicit_document ::= block_node DOCUMENT-END* -// ************* -// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* -// -func yaml_parser_parse_document_end(parser *yaml_parser_t, event *yaml_event_t) bool { - token := peek_token(parser) - if token == nil { - return false - } - - start_mark := token.start_mark - end_mark := token.start_mark - - implicit := true - if token.typ == yaml_DOCUMENT_END_TOKEN { - end_mark = token.end_mark - skip_token(parser) - implicit = false - } - - parser.tag_directives = parser.tag_directives[:0] - - parser.state = yaml_PARSE_DOCUMENT_START_STATE - *event = yaml_event_t{ - typ: yaml_DOCUMENT_END_EVENT, - start_mark: start_mark, - end_mark: end_mark, - implicit: implicit, - } - yaml_parser_set_event_comments(parser, event) - if len(event.head_comment) > 0 && len(event.foot_comment) == 0 { - event.foot_comment = event.head_comment - event.head_comment = nil - } - return true -} - -func yaml_parser_set_event_comments(parser *yaml_parser_t, event *yaml_event_t) { - event.head_comment = parser.head_comment - event.line_comment = parser.line_comment - event.foot_comment = parser.foot_comment - parser.head_comment = nil - parser.line_comment = nil - parser.foot_comment = nil - parser.tail_comment = nil - parser.stem_comment = nil -} - -// Parse the productions: -// block_node_or_indentless_sequence ::= -// ALIAS -// ***** -// | properties (block_content | indentless_block_sequence)? -// ********** * -// | block_content | indentless_block_sequence -// * -// block_node ::= ALIAS -// ***** -// | properties block_content? -// ********** * -// | block_content -// * -// flow_node ::= ALIAS -// ***** -// | properties flow_content? -// ********** * -// | flow_content -// * -// properties ::= TAG ANCHOR? | ANCHOR TAG? -// ************************* -// block_content ::= block_collection | flow_collection | SCALAR -// ****** -// flow_content ::= flow_collection | SCALAR -// ****** -func yaml_parser_parse_node(parser *yaml_parser_t, event *yaml_event_t, block, indentless_sequence bool) bool { - //defer trace("yaml_parser_parse_node", "block:", block, "indentless_sequence:", indentless_sequence)() - - token := peek_token(parser) - if token == nil { - return false - } - - if token.typ == yaml_ALIAS_TOKEN { - parser.state = parser.states[len(parser.states)-1] - parser.states = parser.states[:len(parser.states)-1] - *event = yaml_event_t{ - typ: yaml_ALIAS_EVENT, - start_mark: token.start_mark, - end_mark: token.end_mark, - anchor: token.value, - } - yaml_parser_set_event_comments(parser, event) - skip_token(parser) - return true - } - - start_mark := token.start_mark - end_mark := token.start_mark - - var tag_token bool - var tag_handle, tag_suffix, anchor []byte - var tag_mark yaml_mark_t - if token.typ == yaml_ANCHOR_TOKEN { - anchor = token.value - start_mark = token.start_mark - end_mark = token.end_mark - skip_token(parser) - token = peek_token(parser) - if token == nil { - return false - } - if token.typ == yaml_TAG_TOKEN { - tag_token = true - tag_handle = token.value - tag_suffix = token.suffix - tag_mark = token.start_mark - end_mark = token.end_mark - skip_token(parser) - token = peek_token(parser) - if token == nil { - return false - } - } - } else if token.typ == yaml_TAG_TOKEN { - tag_token = true - tag_handle = token.value - tag_suffix = token.suffix - start_mark = token.start_mark - tag_mark = token.start_mark - end_mark = token.end_mark - skip_token(parser) - token = peek_token(parser) - if token == nil { - return false - } - if token.typ == yaml_ANCHOR_TOKEN { - anchor = token.value - end_mark = token.end_mark - skip_token(parser) - token = peek_token(parser) - if token == nil { - return false - } - } - } - - var tag []byte - if tag_token { - if len(tag_handle) == 0 { - tag = tag_suffix - tag_suffix = nil - } else { - for i := range parser.tag_directives { - if bytes.Equal(parser.tag_directives[i].handle, tag_handle) { - tag = append([]byte(nil), parser.tag_directives[i].prefix...) - tag = append(tag, tag_suffix...) - break - } - } - if len(tag) == 0 { - yaml_parser_set_parser_error_context(parser, - "while parsing a node", start_mark, - "found undefined tag handle", tag_mark) - return false - } - } - } - - implicit := len(tag) == 0 - if indentless_sequence && token.typ == yaml_BLOCK_ENTRY_TOKEN { - end_mark = token.end_mark - parser.state = yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE - *event = yaml_event_t{ - typ: yaml_SEQUENCE_START_EVENT, - start_mark: start_mark, - end_mark: end_mark, - anchor: anchor, - tag: tag, - implicit: implicit, - style: yaml_style_t(yaml_BLOCK_SEQUENCE_STYLE), - } - return true - } - if token.typ == yaml_SCALAR_TOKEN { - var plain_implicit, quoted_implicit bool - end_mark = token.end_mark - if (len(tag) == 0 && token.style == yaml_PLAIN_SCALAR_STYLE) || (len(tag) == 1 && tag[0] == '!') { - plain_implicit = true - } else if len(tag) == 0 { - quoted_implicit = true - } - parser.state = parser.states[len(parser.states)-1] - parser.states = parser.states[:len(parser.states)-1] - - *event = yaml_event_t{ - typ: yaml_SCALAR_EVENT, - start_mark: start_mark, - end_mark: end_mark, - anchor: anchor, - tag: tag, - value: token.value, - implicit: plain_implicit, - quoted_implicit: quoted_implicit, - style: yaml_style_t(token.style), - } - yaml_parser_set_event_comments(parser, event) - skip_token(parser) - return true - } - if token.typ == yaml_FLOW_SEQUENCE_START_TOKEN { - // [Go] Some of the events below can be merged as they differ only on style. - end_mark = token.end_mark - parser.state = yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE - *event = yaml_event_t{ - typ: yaml_SEQUENCE_START_EVENT, - start_mark: start_mark, - end_mark: end_mark, - anchor: anchor, - tag: tag, - implicit: implicit, - style: yaml_style_t(yaml_FLOW_SEQUENCE_STYLE), - } - yaml_parser_set_event_comments(parser, event) - return true - } - if token.typ == yaml_FLOW_MAPPING_START_TOKEN { - end_mark = token.end_mark - parser.state = yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE - *event = yaml_event_t{ - typ: yaml_MAPPING_START_EVENT, - start_mark: start_mark, - end_mark: end_mark, - anchor: anchor, - tag: tag, - implicit: implicit, - style: yaml_style_t(yaml_FLOW_MAPPING_STYLE), - } - yaml_parser_set_event_comments(parser, event) - return true - } - if block && token.typ == yaml_BLOCK_SEQUENCE_START_TOKEN { - end_mark = token.end_mark - parser.state = yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE - *event = yaml_event_t{ - typ: yaml_SEQUENCE_START_EVENT, - start_mark: start_mark, - end_mark: end_mark, - anchor: anchor, - tag: tag, - implicit: implicit, - style: yaml_style_t(yaml_BLOCK_SEQUENCE_STYLE), - } - if parser.stem_comment != nil { - event.head_comment = parser.stem_comment - parser.stem_comment = nil - } - return true - } - if block && token.typ == yaml_BLOCK_MAPPING_START_TOKEN { - end_mark = token.end_mark - parser.state = yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE - *event = yaml_event_t{ - typ: yaml_MAPPING_START_EVENT, - start_mark: start_mark, - end_mark: end_mark, - anchor: anchor, - tag: tag, - implicit: implicit, - style: yaml_style_t(yaml_BLOCK_MAPPING_STYLE), - } - if parser.stem_comment != nil { - event.head_comment = parser.stem_comment - parser.stem_comment = nil - } - return true - } - if len(anchor) > 0 || len(tag) > 0 { - parser.state = parser.states[len(parser.states)-1] - parser.states = parser.states[:len(parser.states)-1] - - *event = yaml_event_t{ - typ: yaml_SCALAR_EVENT, - start_mark: start_mark, - end_mark: end_mark, - anchor: anchor, - tag: tag, - implicit: implicit, - quoted_implicit: false, - style: yaml_style_t(yaml_PLAIN_SCALAR_STYLE), - } - return true - } - - context := "while parsing a flow node" - if block { - context = "while parsing a block node" - } - yaml_parser_set_parser_error_context(parser, context, start_mark, - "did not find expected node content", token.start_mark) - return false -} - -// Parse the productions: -// block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END -// ******************** *********** * ********* -// -func yaml_parser_parse_block_sequence_entry(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { - if first { - token := peek_token(parser) - parser.marks = append(parser.marks, token.start_mark) - skip_token(parser) - } - - token := peek_token(parser) - if token == nil { - return false - } - - if token.typ == yaml_BLOCK_ENTRY_TOKEN { - mark := token.end_mark - prior_head_len := len(parser.head_comment) - skip_token(parser) - yaml_parser_split_stem_comment(parser, prior_head_len) - token = peek_token(parser) - if token == nil { - return false - } - if token.typ != yaml_BLOCK_ENTRY_TOKEN && token.typ != yaml_BLOCK_END_TOKEN { - parser.states = append(parser.states, yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE) - return yaml_parser_parse_node(parser, event, true, false) - } else { - parser.state = yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE - return yaml_parser_process_empty_scalar(parser, event, mark) - } - } - if token.typ == yaml_BLOCK_END_TOKEN { - parser.state = parser.states[len(parser.states)-1] - parser.states = parser.states[:len(parser.states)-1] - parser.marks = parser.marks[:len(parser.marks)-1] - - *event = yaml_event_t{ - typ: yaml_SEQUENCE_END_EVENT, - start_mark: token.start_mark, - end_mark: token.end_mark, - } - - skip_token(parser) - return true - } - - context_mark := parser.marks[len(parser.marks)-1] - parser.marks = parser.marks[:len(parser.marks)-1] - return yaml_parser_set_parser_error_context(parser, - "while parsing a block collection", context_mark, - "did not find expected '-' indicator", token.start_mark) -} - -// Parse the productions: -// indentless_sequence ::= (BLOCK-ENTRY block_node?)+ -// *********** * -func yaml_parser_parse_indentless_sequence_entry(parser *yaml_parser_t, event *yaml_event_t) bool { - token := peek_token(parser) - if token == nil { - return false - } - - if token.typ == yaml_BLOCK_ENTRY_TOKEN { - mark := token.end_mark - prior_head_len := len(parser.head_comment) - skip_token(parser) - yaml_parser_split_stem_comment(parser, prior_head_len) - token = peek_token(parser) - if token == nil { - return false - } - if token.typ != yaml_BLOCK_ENTRY_TOKEN && - token.typ != yaml_KEY_TOKEN && - token.typ != yaml_VALUE_TOKEN && - token.typ != yaml_BLOCK_END_TOKEN { - parser.states = append(parser.states, yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE) - return yaml_parser_parse_node(parser, event, true, false) - } - parser.state = yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE - return yaml_parser_process_empty_scalar(parser, event, mark) - } - parser.state = parser.states[len(parser.states)-1] - parser.states = parser.states[:len(parser.states)-1] - - *event = yaml_event_t{ - typ: yaml_SEQUENCE_END_EVENT, - start_mark: token.start_mark, - end_mark: token.start_mark, // [Go] Shouldn't this be token.end_mark? - } - return true -} - -// Split stem comment from head comment. -// -// When a sequence or map is found under a sequence entry, the former head comment -// is assigned to the underlying sequence or map as a whole, not the individual -// sequence or map entry as would be expected otherwise. To handle this case the -// previous head comment is moved aside as the stem comment. -func yaml_parser_split_stem_comment(parser *yaml_parser_t, stem_len int) { - if stem_len == 0 { - return - } - - token := peek_token(parser) - if token.typ != yaml_BLOCK_SEQUENCE_START_TOKEN && token.typ != yaml_BLOCK_MAPPING_START_TOKEN { - return - } - - parser.stem_comment = parser.head_comment[:stem_len] - if len(parser.head_comment) == stem_len { - parser.head_comment = nil - } else { - // Copy suffix to prevent very strange bugs if someone ever appends - // further bytes to the prefix in the stem_comment slice above. - parser.head_comment = append([]byte(nil), parser.head_comment[stem_len+1:]...) - } -} - -// Parse the productions: -// block_mapping ::= BLOCK-MAPPING_START -// ******************* -// ((KEY block_node_or_indentless_sequence?)? -// *** * -// (VALUE block_node_or_indentless_sequence?)?)* -// -// BLOCK-END -// ********* -// -func yaml_parser_parse_block_mapping_key(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { - if first { - token := peek_token(parser) - parser.marks = append(parser.marks, token.start_mark) - skip_token(parser) - } - - token := peek_token(parser) - if token == nil { - return false - } - - // [Go] A tail comment was left from the prior mapping value processed. Emit an event - // as it needs to be processed with that value and not the following key. - if len(parser.tail_comment) > 0 { - *event = yaml_event_t{ - typ: yaml_TAIL_COMMENT_EVENT, - start_mark: token.start_mark, - end_mark: token.end_mark, - foot_comment: parser.tail_comment, - } - parser.tail_comment = nil - return true - } - - if token.typ == yaml_KEY_TOKEN { - mark := token.end_mark - skip_token(parser) - token = peek_token(parser) - if token == nil { - return false - } - if token.typ != yaml_KEY_TOKEN && - token.typ != yaml_VALUE_TOKEN && - token.typ != yaml_BLOCK_END_TOKEN { - parser.states = append(parser.states, yaml_PARSE_BLOCK_MAPPING_VALUE_STATE) - return yaml_parser_parse_node(parser, event, true, true) - } else { - parser.state = yaml_PARSE_BLOCK_MAPPING_VALUE_STATE - return yaml_parser_process_empty_scalar(parser, event, mark) - } - } else if token.typ == yaml_BLOCK_END_TOKEN { - parser.state = parser.states[len(parser.states)-1] - parser.states = parser.states[:len(parser.states)-1] - parser.marks = parser.marks[:len(parser.marks)-1] - *event = yaml_event_t{ - typ: yaml_MAPPING_END_EVENT, - start_mark: token.start_mark, - end_mark: token.end_mark, - } - yaml_parser_set_event_comments(parser, event) - skip_token(parser) - return true - } - - context_mark := parser.marks[len(parser.marks)-1] - parser.marks = parser.marks[:len(parser.marks)-1] - return yaml_parser_set_parser_error_context(parser, - "while parsing a block mapping", context_mark, - "did not find expected key", token.start_mark) -} - -// Parse the productions: -// block_mapping ::= BLOCK-MAPPING_START -// -// ((KEY block_node_or_indentless_sequence?)? -// -// (VALUE block_node_or_indentless_sequence?)?)* -// ***** * -// BLOCK-END -// -// -func yaml_parser_parse_block_mapping_value(parser *yaml_parser_t, event *yaml_event_t) bool { - token := peek_token(parser) - if token == nil { - return false - } - if token.typ == yaml_VALUE_TOKEN { - mark := token.end_mark - skip_token(parser) - token = peek_token(parser) - if token == nil { - return false - } - if token.typ != yaml_KEY_TOKEN && - token.typ != yaml_VALUE_TOKEN && - token.typ != yaml_BLOCK_END_TOKEN { - parser.states = append(parser.states, yaml_PARSE_BLOCK_MAPPING_KEY_STATE) - return yaml_parser_parse_node(parser, event, true, true) - } - parser.state = yaml_PARSE_BLOCK_MAPPING_KEY_STATE - return yaml_parser_process_empty_scalar(parser, event, mark) - } - parser.state = yaml_PARSE_BLOCK_MAPPING_KEY_STATE - return yaml_parser_process_empty_scalar(parser, event, token.start_mark) -} - -// Parse the productions: -// flow_sequence ::= FLOW-SEQUENCE-START -// ******************* -// (flow_sequence_entry FLOW-ENTRY)* -// * ********** -// flow_sequence_entry? -// * -// FLOW-SEQUENCE-END -// ***************** -// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? -// * -// -func yaml_parser_parse_flow_sequence_entry(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { - if first { - token := peek_token(parser) - parser.marks = append(parser.marks, token.start_mark) - skip_token(parser) - } - token := peek_token(parser) - if token == nil { - return false - } - if token.typ != yaml_FLOW_SEQUENCE_END_TOKEN { - if !first { - if token.typ == yaml_FLOW_ENTRY_TOKEN { - skip_token(parser) - token = peek_token(parser) - if token == nil { - return false - } - } else { - context_mark := parser.marks[len(parser.marks)-1] - parser.marks = parser.marks[:len(parser.marks)-1] - return yaml_parser_set_parser_error_context(parser, - "while parsing a flow sequence", context_mark, - "did not find expected ',' or ']'", token.start_mark) - } - } - - if token.typ == yaml_KEY_TOKEN { - parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE - *event = yaml_event_t{ - typ: yaml_MAPPING_START_EVENT, - start_mark: token.start_mark, - end_mark: token.end_mark, - implicit: true, - style: yaml_style_t(yaml_FLOW_MAPPING_STYLE), - } - skip_token(parser) - return true - } else if token.typ != yaml_FLOW_SEQUENCE_END_TOKEN { - parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE) - return yaml_parser_parse_node(parser, event, false, false) - } - } - - parser.state = parser.states[len(parser.states)-1] - parser.states = parser.states[:len(parser.states)-1] - parser.marks = parser.marks[:len(parser.marks)-1] - - *event = yaml_event_t{ - typ: yaml_SEQUENCE_END_EVENT, - start_mark: token.start_mark, - end_mark: token.end_mark, - } - yaml_parser_set_event_comments(parser, event) - - skip_token(parser) - return true -} - -// -// Parse the productions: -// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? -// *** * -// -func yaml_parser_parse_flow_sequence_entry_mapping_key(parser *yaml_parser_t, event *yaml_event_t) bool { - token := peek_token(parser) - if token == nil { - return false - } - if token.typ != yaml_VALUE_TOKEN && - token.typ != yaml_FLOW_ENTRY_TOKEN && - token.typ != yaml_FLOW_SEQUENCE_END_TOKEN { - parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE) - return yaml_parser_parse_node(parser, event, false, false) - } - mark := token.end_mark - skip_token(parser) - parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE - return yaml_parser_process_empty_scalar(parser, event, mark) -} - -// Parse the productions: -// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? -// ***** * -// -func yaml_parser_parse_flow_sequence_entry_mapping_value(parser *yaml_parser_t, event *yaml_event_t) bool { - token := peek_token(parser) - if token == nil { - return false - } - if token.typ == yaml_VALUE_TOKEN { - skip_token(parser) - token := peek_token(parser) - if token == nil { - return false - } - if token.typ != yaml_FLOW_ENTRY_TOKEN && token.typ != yaml_FLOW_SEQUENCE_END_TOKEN { - parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE) - return yaml_parser_parse_node(parser, event, false, false) - } - } - parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE - return yaml_parser_process_empty_scalar(parser, event, token.start_mark) -} - -// Parse the productions: -// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? -// * -// -func yaml_parser_parse_flow_sequence_entry_mapping_end(parser *yaml_parser_t, event *yaml_event_t) bool { - token := peek_token(parser) - if token == nil { - return false - } - parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE - *event = yaml_event_t{ - typ: yaml_MAPPING_END_EVENT, - start_mark: token.start_mark, - end_mark: token.start_mark, // [Go] Shouldn't this be end_mark? - } - return true -} - -// Parse the productions: -// flow_mapping ::= FLOW-MAPPING-START -// ****************** -// (flow_mapping_entry FLOW-ENTRY)* -// * ********** -// flow_mapping_entry? -// ****************** -// FLOW-MAPPING-END -// **************** -// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? -// * *** * -// -func yaml_parser_parse_flow_mapping_key(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { - if first { - token := peek_token(parser) - parser.marks = append(parser.marks, token.start_mark) - skip_token(parser) - } - - token := peek_token(parser) - if token == nil { - return false - } - - if token.typ != yaml_FLOW_MAPPING_END_TOKEN { - if !first { - if token.typ == yaml_FLOW_ENTRY_TOKEN { - skip_token(parser) - token = peek_token(parser) - if token == nil { - return false - } - } else { - context_mark := parser.marks[len(parser.marks)-1] - parser.marks = parser.marks[:len(parser.marks)-1] - return yaml_parser_set_parser_error_context(parser, - "while parsing a flow mapping", context_mark, - "did not find expected ',' or '}'", token.start_mark) - } - } - - if token.typ == yaml_KEY_TOKEN { - skip_token(parser) - token = peek_token(parser) - if token == nil { - return false - } - if token.typ != yaml_VALUE_TOKEN && - token.typ != yaml_FLOW_ENTRY_TOKEN && - token.typ != yaml_FLOW_MAPPING_END_TOKEN { - parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_VALUE_STATE) - return yaml_parser_parse_node(parser, event, false, false) - } else { - parser.state = yaml_PARSE_FLOW_MAPPING_VALUE_STATE - return yaml_parser_process_empty_scalar(parser, event, token.start_mark) - } - } else if token.typ != yaml_FLOW_MAPPING_END_TOKEN { - parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE) - return yaml_parser_parse_node(parser, event, false, false) - } - } - - parser.state = parser.states[len(parser.states)-1] - parser.states = parser.states[:len(parser.states)-1] - parser.marks = parser.marks[:len(parser.marks)-1] - *event = yaml_event_t{ - typ: yaml_MAPPING_END_EVENT, - start_mark: token.start_mark, - end_mark: token.end_mark, - } - yaml_parser_set_event_comments(parser, event) - skip_token(parser) - return true -} - -// Parse the productions: -// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? -// * ***** * -// -func yaml_parser_parse_flow_mapping_value(parser *yaml_parser_t, event *yaml_event_t, empty bool) bool { - token := peek_token(parser) - if token == nil { - return false - } - if empty { - parser.state = yaml_PARSE_FLOW_MAPPING_KEY_STATE - return yaml_parser_process_empty_scalar(parser, event, token.start_mark) - } - if token.typ == yaml_VALUE_TOKEN { - skip_token(parser) - token = peek_token(parser) - if token == nil { - return false - } - if token.typ != yaml_FLOW_ENTRY_TOKEN && token.typ != yaml_FLOW_MAPPING_END_TOKEN { - parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_KEY_STATE) - return yaml_parser_parse_node(parser, event, false, false) - } - } - parser.state = yaml_PARSE_FLOW_MAPPING_KEY_STATE - return yaml_parser_process_empty_scalar(parser, event, token.start_mark) -} - -// Generate an empty scalar event. -func yaml_parser_process_empty_scalar(parser *yaml_parser_t, event *yaml_event_t, mark yaml_mark_t) bool { - *event = yaml_event_t{ - typ: yaml_SCALAR_EVENT, - start_mark: mark, - end_mark: mark, - value: nil, // Empty - implicit: true, - style: yaml_style_t(yaml_PLAIN_SCALAR_STYLE), - } - return true -} - -var default_tag_directives = []yaml_tag_directive_t{ - {[]byte("!"), []byte("!")}, - {[]byte("!!"), []byte("tag:yaml.org,2002:")}, -} - -// Parse directives. -func yaml_parser_process_directives(parser *yaml_parser_t, - version_directive_ref **yaml_version_directive_t, - tag_directives_ref *[]yaml_tag_directive_t) bool { - - var version_directive *yaml_version_directive_t - var tag_directives []yaml_tag_directive_t - - token := peek_token(parser) - if token == nil { - return false - } - - for token.typ == yaml_VERSION_DIRECTIVE_TOKEN || token.typ == yaml_TAG_DIRECTIVE_TOKEN { - if token.typ == yaml_VERSION_DIRECTIVE_TOKEN { - if version_directive != nil { - yaml_parser_set_parser_error(parser, - "found duplicate %YAML directive", token.start_mark) - return false - } - if token.major != 1 || token.minor != 1 { - yaml_parser_set_parser_error(parser, - "found incompatible YAML document", token.start_mark) - return false - } - version_directive = &yaml_version_directive_t{ - major: token.major, - minor: token.minor, - } - } else if token.typ == yaml_TAG_DIRECTIVE_TOKEN { - value := yaml_tag_directive_t{ - handle: token.value, - prefix: token.prefix, - } - if !yaml_parser_append_tag_directive(parser, value, false, token.start_mark) { - return false - } - tag_directives = append(tag_directives, value) - } - - skip_token(parser) - token = peek_token(parser) - if token == nil { - return false - } - } - - for i := range default_tag_directives { - if !yaml_parser_append_tag_directive(parser, default_tag_directives[i], true, token.start_mark) { - return false - } - } - - if version_directive_ref != nil { - *version_directive_ref = version_directive - } - if tag_directives_ref != nil { - *tag_directives_ref = tag_directives - } - return true -} - -// Append a tag directive to the directives stack. -func yaml_parser_append_tag_directive(parser *yaml_parser_t, value yaml_tag_directive_t, allow_duplicates bool, mark yaml_mark_t) bool { - for i := range parser.tag_directives { - if bytes.Equal(value.handle, parser.tag_directives[i].handle) { - if allow_duplicates { - return true - } - return yaml_parser_set_parser_error(parser, "found duplicate %TAG directive", mark) - } - } - - // [Go] I suspect the copy is unnecessary. This was likely done - // because there was no way to track ownership of the data. - value_copy := yaml_tag_directive_t{ - handle: make([]byte, len(value.handle)), - prefix: make([]byte, len(value.prefix)), - } - copy(value_copy.handle, value.handle) - copy(value_copy.prefix, value.prefix) - parser.tag_directives = append(parser.tag_directives, value_copy) - return true -} diff --git a/src/vendor/sigs.k8s.io/kustomize/kyaml/internal/forked/github.com/go-yaml/yaml/readerc.go b/src/vendor/sigs.k8s.io/kustomize/kyaml/internal/forked/github.com/go-yaml/yaml/readerc.go deleted file mode 100644 index b7de0a89c..000000000 --- a/src/vendor/sigs.k8s.io/kustomize/kyaml/internal/forked/github.com/go-yaml/yaml/readerc.go +++ /dev/null @@ -1,434 +0,0 @@ -// -// Copyright (c) 2011-2019 Canonical Ltd -// Copyright (c) 2006-2010 Kirill Simonov -// -// Permission is hereby granted, free of charge, to any person obtaining a copy of -// this software and associated documentation files (the "Software"), to deal in -// the Software without restriction, including without limitation the rights to -// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies -// of the Software, and to permit persons to whom the Software is furnished to do -// so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in all -// copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -// SOFTWARE. - -package yaml - -import ( - "io" -) - -// Set the reader error and return 0. -func yaml_parser_set_reader_error(parser *yaml_parser_t, problem string, offset int, value int) bool { - parser.error = yaml_READER_ERROR - parser.problem = problem - parser.problem_offset = offset - parser.problem_value = value - return false -} - -// Byte order marks. -const ( - bom_UTF8 = "\xef\xbb\xbf" - bom_UTF16LE = "\xff\xfe" - bom_UTF16BE = "\xfe\xff" -) - -// Determine the input stream encoding by checking the BOM symbol. If no BOM is -// found, the UTF-8 encoding is assumed. Return 1 on success, 0 on failure. -func yaml_parser_determine_encoding(parser *yaml_parser_t) bool { - // Ensure that we had enough bytes in the raw buffer. - for !parser.eof && len(parser.raw_buffer)-parser.raw_buffer_pos < 3 { - if !yaml_parser_update_raw_buffer(parser) { - return false - } - } - - // Determine the encoding. - buf := parser.raw_buffer - pos := parser.raw_buffer_pos - avail := len(buf) - pos - if avail >= 2 && buf[pos] == bom_UTF16LE[0] && buf[pos+1] == bom_UTF16LE[1] { - parser.encoding = yaml_UTF16LE_ENCODING - parser.raw_buffer_pos += 2 - parser.offset += 2 - } else if avail >= 2 && buf[pos] == bom_UTF16BE[0] && buf[pos+1] == bom_UTF16BE[1] { - parser.encoding = yaml_UTF16BE_ENCODING - parser.raw_buffer_pos += 2 - parser.offset += 2 - } else if avail >= 3 && buf[pos] == bom_UTF8[0] && buf[pos+1] == bom_UTF8[1] && buf[pos+2] == bom_UTF8[2] { - parser.encoding = yaml_UTF8_ENCODING - parser.raw_buffer_pos += 3 - parser.offset += 3 - } else { - parser.encoding = yaml_UTF8_ENCODING - } - return true -} - -// Update the raw buffer. -func yaml_parser_update_raw_buffer(parser *yaml_parser_t) bool { - size_read := 0 - - // Return if the raw buffer is full. - if parser.raw_buffer_pos == 0 && len(parser.raw_buffer) == cap(parser.raw_buffer) { - return true - } - - // Return on EOF. - if parser.eof { - return true - } - - // Move the remaining bytes in the raw buffer to the beginning. - if parser.raw_buffer_pos > 0 && parser.raw_buffer_pos < len(parser.raw_buffer) { - copy(parser.raw_buffer, parser.raw_buffer[parser.raw_buffer_pos:]) - } - parser.raw_buffer = parser.raw_buffer[:len(parser.raw_buffer)-parser.raw_buffer_pos] - parser.raw_buffer_pos = 0 - - // Call the read handler to fill the buffer. - size_read, err := parser.read_handler(parser, parser.raw_buffer[len(parser.raw_buffer):cap(parser.raw_buffer)]) - parser.raw_buffer = parser.raw_buffer[:len(parser.raw_buffer)+size_read] - if err == io.EOF { - parser.eof = true - } else if err != nil { - return yaml_parser_set_reader_error(parser, "input error: "+err.Error(), parser.offset, -1) - } - return true -} - -// Ensure that the buffer contains at least `length` characters. -// Return true on success, false on failure. -// -// The length is supposed to be significantly less that the buffer size. -func yaml_parser_update_buffer(parser *yaml_parser_t, length int) bool { - if parser.read_handler == nil { - panic("read handler must be set") - } - - // [Go] This function was changed to guarantee the requested length size at EOF. - // The fact we need to do this is pretty awful, but the description above implies - // for that to be the case, and there are tests - - // If the EOF flag is set and the raw buffer is empty, do nothing. - if parser.eof && parser.raw_buffer_pos == len(parser.raw_buffer) { - // [Go] ACTUALLY! Read the documentation of this function above. - // This is just broken. To return true, we need to have the - // given length in the buffer. Not doing that means every single - // check that calls this function to make sure the buffer has a - // given length is Go) panicking; or C) accessing invalid memory. - //return true - } - - // Return if the buffer contains enough characters. - if parser.unread >= length { - return true - } - - // Determine the input encoding if it is not known yet. - if parser.encoding == yaml_ANY_ENCODING { - if !yaml_parser_determine_encoding(parser) { - return false - } - } - - // Move the unread characters to the beginning of the buffer. - buffer_len := len(parser.buffer) - if parser.buffer_pos > 0 && parser.buffer_pos < buffer_len { - copy(parser.buffer, parser.buffer[parser.buffer_pos:]) - buffer_len -= parser.buffer_pos - parser.buffer_pos = 0 - } else if parser.buffer_pos == buffer_len { - buffer_len = 0 - parser.buffer_pos = 0 - } - - // Open the whole buffer for writing, and cut it before returning. - parser.buffer = parser.buffer[:cap(parser.buffer)] - - // Fill the buffer until it has enough characters. - first := true - for parser.unread < length { - - // Fill the raw buffer if necessary. - if !first || parser.raw_buffer_pos == len(parser.raw_buffer) { - if !yaml_parser_update_raw_buffer(parser) { - parser.buffer = parser.buffer[:buffer_len] - return false - } - } - first = false - - // Decode the raw buffer. - inner: - for parser.raw_buffer_pos != len(parser.raw_buffer) { - var value rune - var width int - - raw_unread := len(parser.raw_buffer) - parser.raw_buffer_pos - - // Decode the next character. - switch parser.encoding { - case yaml_UTF8_ENCODING: - // Decode a UTF-8 character. Check RFC 3629 - // (http://www.ietf.org/rfc/rfc3629.txt) for more details. - // - // The following table (taken from the RFC) is used for - // decoding. - // - // Char. number range | UTF-8 octet sequence - // (hexadecimal) | (binary) - // --------------------+------------------------------------ - // 0000 0000-0000 007F | 0xxxxxxx - // 0000 0080-0000 07FF | 110xxxxx 10xxxxxx - // 0000 0800-0000 FFFF | 1110xxxx 10xxxxxx 10xxxxxx - // 0001 0000-0010 FFFF | 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx - // - // Additionally, the characters in the range 0xD800-0xDFFF - // are prohibited as they are reserved for use with UTF-16 - // surrogate pairs. - - // Determine the length of the UTF-8 sequence. - octet := parser.raw_buffer[parser.raw_buffer_pos] - switch { - case octet&0x80 == 0x00: - width = 1 - case octet&0xE0 == 0xC0: - width = 2 - case octet&0xF0 == 0xE0: - width = 3 - case octet&0xF8 == 0xF0: - width = 4 - default: - // The leading octet is invalid. - return yaml_parser_set_reader_error(parser, - "invalid leading UTF-8 octet", - parser.offset, int(octet)) - } - - // Check if the raw buffer contains an incomplete character. - if width > raw_unread { - if parser.eof { - return yaml_parser_set_reader_error(parser, - "incomplete UTF-8 octet sequence", - parser.offset, -1) - } - break inner - } - - // Decode the leading octet. - switch { - case octet&0x80 == 0x00: - value = rune(octet & 0x7F) - case octet&0xE0 == 0xC0: - value = rune(octet & 0x1F) - case octet&0xF0 == 0xE0: - value = rune(octet & 0x0F) - case octet&0xF8 == 0xF0: - value = rune(octet & 0x07) - default: - value = 0 - } - - // Check and decode the trailing octets. - for k := 1; k < width; k++ { - octet = parser.raw_buffer[parser.raw_buffer_pos+k] - - // Check if the octet is valid. - if (octet & 0xC0) != 0x80 { - return yaml_parser_set_reader_error(parser, - "invalid trailing UTF-8 octet", - parser.offset+k, int(octet)) - } - - // Decode the octet. - value = (value << 6) + rune(octet&0x3F) - } - - // Check the length of the sequence against the value. - switch { - case width == 1: - case width == 2 && value >= 0x80: - case width == 3 && value >= 0x800: - case width == 4 && value >= 0x10000: - default: - return yaml_parser_set_reader_error(parser, - "invalid length of a UTF-8 sequence", - parser.offset, -1) - } - - // Check the range of the value. - if value >= 0xD800 && value <= 0xDFFF || value > 0x10FFFF { - return yaml_parser_set_reader_error(parser, - "invalid Unicode character", - parser.offset, int(value)) - } - - case yaml_UTF16LE_ENCODING, yaml_UTF16BE_ENCODING: - var low, high int - if parser.encoding == yaml_UTF16LE_ENCODING { - low, high = 0, 1 - } else { - low, high = 1, 0 - } - - // The UTF-16 encoding is not as simple as one might - // naively think. Check RFC 2781 - // (http://www.ietf.org/rfc/rfc2781.txt). - // - // Normally, two subsequent bytes describe a Unicode - // character. However a special technique (called a - // surrogate pair) is used for specifying character - // values larger than 0xFFFF. - // - // A surrogate pair consists of two pseudo-characters: - // high surrogate area (0xD800-0xDBFF) - // low surrogate area (0xDC00-0xDFFF) - // - // The following formulas are used for decoding - // and encoding characters using surrogate pairs: - // - // U = U' + 0x10000 (0x01 00 00 <= U <= 0x10 FF FF) - // U' = yyyyyyyyyyxxxxxxxxxx (0 <= U' <= 0x0F FF FF) - // W1 = 110110yyyyyyyyyy - // W2 = 110111xxxxxxxxxx - // - // where U is the character value, W1 is the high surrogate - // area, W2 is the low surrogate area. - - // Check for incomplete UTF-16 character. - if raw_unread < 2 { - if parser.eof { - return yaml_parser_set_reader_error(parser, - "incomplete UTF-16 character", - parser.offset, -1) - } - break inner - } - - // Get the character. - value = rune(parser.raw_buffer[parser.raw_buffer_pos+low]) + - (rune(parser.raw_buffer[parser.raw_buffer_pos+high]) << 8) - - // Check for unexpected low surrogate area. - if value&0xFC00 == 0xDC00 { - return yaml_parser_set_reader_error(parser, - "unexpected low surrogate area", - parser.offset, int(value)) - } - - // Check for a high surrogate area. - if value&0xFC00 == 0xD800 { - width = 4 - - // Check for incomplete surrogate pair. - if raw_unread < 4 { - if parser.eof { - return yaml_parser_set_reader_error(parser, - "incomplete UTF-16 surrogate pair", - parser.offset, -1) - } - break inner - } - - // Get the next character. - value2 := rune(parser.raw_buffer[parser.raw_buffer_pos+low+2]) + - (rune(parser.raw_buffer[parser.raw_buffer_pos+high+2]) << 8) - - // Check for a low surrogate area. - if value2&0xFC00 != 0xDC00 { - return yaml_parser_set_reader_error(parser, - "expected low surrogate area", - parser.offset+2, int(value2)) - } - - // Generate the value of the surrogate pair. - value = 0x10000 + ((value & 0x3FF) << 10) + (value2 & 0x3FF) - } else { - width = 2 - } - - default: - panic("impossible") - } - - // Check if the character is in the allowed range: - // #x9 | #xA | #xD | [#x20-#x7E] (8 bit) - // | #x85 | [#xA0-#xD7FF] | [#xE000-#xFFFD] (16 bit) - // | [#x10000-#x10FFFF] (32 bit) - switch { - case value == 0x09: - case value == 0x0A: - case value == 0x0D: - case value >= 0x20 && value <= 0x7E: - case value == 0x85: - case value >= 0xA0 && value <= 0xD7FF: - case value >= 0xE000 && value <= 0xFFFD: - case value >= 0x10000 && value <= 0x10FFFF: - default: - return yaml_parser_set_reader_error(parser, - "control characters are not allowed", - parser.offset, int(value)) - } - - // Move the raw pointers. - parser.raw_buffer_pos += width - parser.offset += width - - // Finally put the character into the buffer. - if value <= 0x7F { - // 0000 0000-0000 007F . 0xxxxxxx - parser.buffer[buffer_len+0] = byte(value) - buffer_len += 1 - } else if value <= 0x7FF { - // 0000 0080-0000 07FF . 110xxxxx 10xxxxxx - parser.buffer[buffer_len+0] = byte(0xC0 + (value >> 6)) - parser.buffer[buffer_len+1] = byte(0x80 + (value & 0x3F)) - buffer_len += 2 - } else if value <= 0xFFFF { - // 0000 0800-0000 FFFF . 1110xxxx 10xxxxxx 10xxxxxx - parser.buffer[buffer_len+0] = byte(0xE0 + (value >> 12)) - parser.buffer[buffer_len+1] = byte(0x80 + ((value >> 6) & 0x3F)) - parser.buffer[buffer_len+2] = byte(0x80 + (value & 0x3F)) - buffer_len += 3 - } else { - // 0001 0000-0010 FFFF . 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx - parser.buffer[buffer_len+0] = byte(0xF0 + (value >> 18)) - parser.buffer[buffer_len+1] = byte(0x80 + ((value >> 12) & 0x3F)) - parser.buffer[buffer_len+2] = byte(0x80 + ((value >> 6) & 0x3F)) - parser.buffer[buffer_len+3] = byte(0x80 + (value & 0x3F)) - buffer_len += 4 - } - - parser.unread++ - } - - // On EOF, put NUL into the buffer and return. - if parser.eof { - parser.buffer[buffer_len] = 0 - buffer_len++ - parser.unread++ - break - } - } - // [Go] Read the documentation of this function above. To return true, - // we need to have the given length in the buffer. Not doing that means - // every single check that calls this function to make sure the buffer - // has a given length is Go) panicking; or C) accessing invalid memory. - // This happens here due to the EOF above breaking early. - for buffer_len < length { - parser.buffer[buffer_len] = 0 - buffer_len++ - } - parser.buffer = parser.buffer[:buffer_len] - return true -} diff --git a/src/vendor/sigs.k8s.io/kustomize/kyaml/internal/forked/github.com/go-yaml/yaml/resolve.go b/src/vendor/sigs.k8s.io/kustomize/kyaml/internal/forked/github.com/go-yaml/yaml/resolve.go deleted file mode 100644 index 64ae88805..000000000 --- a/src/vendor/sigs.k8s.io/kustomize/kyaml/internal/forked/github.com/go-yaml/yaml/resolve.go +++ /dev/null @@ -1,326 +0,0 @@ -// -// Copyright (c) 2011-2019 Canonical Ltd -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package yaml - -import ( - "encoding/base64" - "math" - "regexp" - "strconv" - "strings" - "time" -) - -type resolveMapItem struct { - value interface{} - tag string -} - -var resolveTable = make([]byte, 256) -var resolveMap = make(map[string]resolveMapItem) - -func init() { - t := resolveTable - t[int('+')] = 'S' // Sign - t[int('-')] = 'S' - for _, c := range "0123456789" { - t[int(c)] = 'D' // Digit - } - for _, c := range "yYnNtTfFoO~" { - t[int(c)] = 'M' // In map - } - t[int('.')] = '.' // Float (potentially in map) - - var resolveMapList = []struct { - v interface{} - tag string - l []string - }{ - {true, boolTag, []string{"true", "True", "TRUE"}}, - {false, boolTag, []string{"false", "False", "FALSE"}}, - {nil, nullTag, []string{"", "~", "null", "Null", "NULL"}}, - {math.NaN(), floatTag, []string{".nan", ".NaN", ".NAN"}}, - {math.Inf(+1), floatTag, []string{".inf", ".Inf", ".INF"}}, - {math.Inf(+1), floatTag, []string{"+.inf", "+.Inf", "+.INF"}}, - {math.Inf(-1), floatTag, []string{"-.inf", "-.Inf", "-.INF"}}, - {"<<", mergeTag, []string{"<<"}}, - } - - m := resolveMap - for _, item := range resolveMapList { - for _, s := range item.l { - m[s] = resolveMapItem{item.v, item.tag} - } - } -} - -const ( - nullTag = "!!null" - boolTag = "!!bool" - strTag = "!!str" - intTag = "!!int" - floatTag = "!!float" - timestampTag = "!!timestamp" - seqTag = "!!seq" - mapTag = "!!map" - binaryTag = "!!binary" - mergeTag = "!!merge" -) - -var longTags = make(map[string]string) -var shortTags = make(map[string]string) - -func init() { - for _, stag := range []string{nullTag, boolTag, strTag, intTag, floatTag, timestampTag, seqTag, mapTag, binaryTag, mergeTag} { - ltag := longTag(stag) - longTags[stag] = ltag - shortTags[ltag] = stag - } -} - -const longTagPrefix = "tag:yaml.org,2002:" - -func shortTag(tag string) string { - if strings.HasPrefix(tag, longTagPrefix) { - if stag, ok := shortTags[tag]; ok { - return stag - } - return "!!" + tag[len(longTagPrefix):] - } - return tag -} - -func longTag(tag string) string { - if strings.HasPrefix(tag, "!!") { - if ltag, ok := longTags[tag]; ok { - return ltag - } - return longTagPrefix + tag[2:] - } - return tag -} - -func resolvableTag(tag string) bool { - switch tag { - case "", strTag, boolTag, intTag, floatTag, nullTag, timestampTag: - return true - } - return false -} - -var yamlStyleFloat = regexp.MustCompile(`^[-+]?(\.[0-9]+|[0-9]+(\.[0-9]*)?)([eE][-+]?[0-9]+)?$`) - -func resolve(tag string, in string) (rtag string, out interface{}) { - tag = shortTag(tag) - if !resolvableTag(tag) { - return tag, in - } - - defer func() { - switch tag { - case "", rtag, strTag, binaryTag: - return - case floatTag: - if rtag == intTag { - switch v := out.(type) { - case int64: - rtag = floatTag - out = float64(v) - return - case int: - rtag = floatTag - out = float64(v) - return - } - } - } - failf("cannot decode %s `%s` as a %s", shortTag(rtag), in, shortTag(tag)) - }() - - // Any data is accepted as a !!str or !!binary. - // Otherwise, the prefix is enough of a hint about what it might be. - hint := byte('N') - if in != "" { - hint = resolveTable[in[0]] - } - if hint != 0 && tag != strTag && tag != binaryTag { - // Handle things we can lookup in a map. - if item, ok := resolveMap[in]; ok { - return item.tag, item.value - } - - // Base 60 floats are a bad idea, were dropped in YAML 1.2, and - // are purposefully unsupported here. They're still quoted on - // the way out for compatibility with other parser, though. - - switch hint { - case 'M': - // We've already checked the map above. - - case '.': - // Not in the map, so maybe a normal float. - floatv, err := strconv.ParseFloat(in, 64) - if err == nil { - return floatTag, floatv - } - - case 'D', 'S': - // Int, float, or timestamp. - // Only try values as a timestamp if the value is unquoted or there's an explicit - // !!timestamp tag. - if tag == "" || tag == timestampTag { - t, ok := parseTimestamp(in) - if ok { - return timestampTag, t - } - } - - plain := strings.Replace(in, "_", "", -1) - intv, err := strconv.ParseInt(plain, 0, 64) - if err == nil { - if intv == int64(int(intv)) { - return intTag, int(intv) - } else { - return intTag, intv - } - } - uintv, err := strconv.ParseUint(plain, 0, 64) - if err == nil { - return intTag, uintv - } - if yamlStyleFloat.MatchString(plain) { - floatv, err := strconv.ParseFloat(plain, 64) - if err == nil { - return floatTag, floatv - } - } - if strings.HasPrefix(plain, "0b") { - intv, err := strconv.ParseInt(plain[2:], 2, 64) - if err == nil { - if intv == int64(int(intv)) { - return intTag, int(intv) - } else { - return intTag, intv - } - } - uintv, err := strconv.ParseUint(plain[2:], 2, 64) - if err == nil { - return intTag, uintv - } - } else if strings.HasPrefix(plain, "-0b") { - intv, err := strconv.ParseInt("-"+plain[3:], 2, 64) - if err == nil { - if true || intv == int64(int(intv)) { - return intTag, int(intv) - } else { - return intTag, intv - } - } - } - // Octals as introduced in version 1.2 of the spec. - // Octals from the 1.1 spec, spelled as 0777, are still - // decoded by default in v3 as well for compatibility. - // May be dropped in v4 depending on how usage evolves. - if strings.HasPrefix(plain, "0o") { - intv, err := strconv.ParseInt(plain[2:], 8, 64) - if err == nil { - if intv == int64(int(intv)) { - return intTag, int(intv) - } else { - return intTag, intv - } - } - uintv, err := strconv.ParseUint(plain[2:], 8, 64) - if err == nil { - return intTag, uintv - } - } else if strings.HasPrefix(plain, "-0o") { - intv, err := strconv.ParseInt("-"+plain[3:], 8, 64) - if err == nil { - if true || intv == int64(int(intv)) { - return intTag, int(intv) - } else { - return intTag, intv - } - } - } - default: - panic("internal error: missing handler for resolver table: " + string(rune(hint)) + " (with " + in + ")") - } - } - return strTag, in -} - -// encodeBase64 encodes s as base64 that is broken up into multiple lines -// as appropriate for the resulting length. -func encodeBase64(s string) string { - const lineLen = 70 - encLen := base64.StdEncoding.EncodedLen(len(s)) - lines := encLen/lineLen + 1 - buf := make([]byte, encLen*2+lines) - in := buf[0:encLen] - out := buf[encLen:] - base64.StdEncoding.Encode(in, []byte(s)) - k := 0 - for i := 0; i < len(in); i += lineLen { - j := i + lineLen - if j > len(in) { - j = len(in) - } - k += copy(out[k:], in[i:j]) - if lines > 1 { - out[k] = '\n' - k++ - } - } - return string(out[:k]) -} - -// This is a subset of the formats allowed by the regular expression -// defined at http://yaml.org/type/timestamp.html. -var allowedTimestampFormats = []string{ - "2006-1-2T15:4:5.999999999Z07:00", // RCF3339Nano with short date fields. - "2006-1-2t15:4:5.999999999Z07:00", // RFC3339Nano with short date fields and lower-case "t". - "2006-1-2 15:4:5.999999999", // space separated with no time zone - "2006-1-2", // date only - // Notable exception: time.Parse cannot handle: "2001-12-14 21:59:43.10 -5" - // from the set of examples. -} - -// parseTimestamp parses s as a timestamp string and -// returns the timestamp and reports whether it succeeded. -// Timestamp formats are defined at http://yaml.org/type/timestamp.html -func parseTimestamp(s string) (time.Time, bool) { - // TODO write code to check all the formats supported by - // http://yaml.org/type/timestamp.html instead of using time.Parse. - - // Quick check: all date formats start with YYYY-. - i := 0 - for ; i < len(s); i++ { - if c := s[i]; c < '0' || c > '9' { - break - } - } - if i != 4 || i == len(s) || s[i] != '-' { - return time.Time{}, false - } - for _, format := range allowedTimestampFormats { - if t, err := time.Parse(format, s); err == nil { - return t, true - } - } - return time.Time{}, false -} diff --git a/src/vendor/sigs.k8s.io/kustomize/kyaml/internal/forked/github.com/go-yaml/yaml/scannerc.go b/src/vendor/sigs.k8s.io/kustomize/kyaml/internal/forked/github.com/go-yaml/yaml/scannerc.go deleted file mode 100644 index ca0070108..000000000 --- a/src/vendor/sigs.k8s.io/kustomize/kyaml/internal/forked/github.com/go-yaml/yaml/scannerc.go +++ /dev/null @@ -1,3038 +0,0 @@ -// -// Copyright (c) 2011-2019 Canonical Ltd -// Copyright (c) 2006-2010 Kirill Simonov -// -// Permission is hereby granted, free of charge, to any person obtaining a copy of -// this software and associated documentation files (the "Software"), to deal in -// the Software without restriction, including without limitation the rights to -// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies -// of the Software, and to permit persons to whom the Software is furnished to do -// so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in all -// copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -// SOFTWARE. - -package yaml - -import ( - "bytes" - "fmt" -) - -// Introduction -// ************ -// -// The following notes assume that you are familiar with the YAML specification -// (http://yaml.org/spec/1.2/spec.html). We mostly follow it, although in -// some cases we are less restrictive that it requires. -// -// The process of transforming a YAML stream into a sequence of events is -// divided on two steps: Scanning and Parsing. -// -// The Scanner transforms the input stream into a sequence of tokens, while the -// parser transform the sequence of tokens produced by the Scanner into a -// sequence of parsing events. -// -// The Scanner is rather clever and complicated. The Parser, on the contrary, -// is a straightforward implementation of a recursive-descendant parser (or, -// LL(1) parser, as it is usually called). -// -// Actually there are two issues of Scanning that might be called "clever", the -// rest is quite straightforward. The issues are "block collection start" and -// "simple keys". Both issues are explained below in details. -// -// Here the Scanning step is explained and implemented. We start with the list -// of all the tokens produced by the Scanner together with short descriptions. -// -// Now, tokens: -// -// STREAM-START(encoding) # The stream start. -// STREAM-END # The stream end. -// VERSION-DIRECTIVE(major,minor) # The '%YAML' directive. -// TAG-DIRECTIVE(handle,prefix) # The '%TAG' directive. -// DOCUMENT-START # '---' -// DOCUMENT-END # '...' -// BLOCK-SEQUENCE-START # Indentation increase denoting a block -// BLOCK-MAPPING-START # sequence or a block mapping. -// BLOCK-END # Indentation decrease. -// FLOW-SEQUENCE-START # '[' -// FLOW-SEQUENCE-END # ']' -// BLOCK-SEQUENCE-START # '{' -// BLOCK-SEQUENCE-END # '}' -// BLOCK-ENTRY # '-' -// FLOW-ENTRY # ',' -// KEY # '?' or nothing (simple keys). -// VALUE # ':' -// ALIAS(anchor) # '*anchor' -// ANCHOR(anchor) # '&anchor' -// TAG(handle,suffix) # '!handle!suffix' -// SCALAR(value,style) # A scalar. -// -// The following two tokens are "virtual" tokens denoting the beginning and the -// end of the stream: -// -// STREAM-START(encoding) -// STREAM-END -// -// We pass the information about the input stream encoding with the -// STREAM-START token. -// -// The next two tokens are responsible for tags: -// -// VERSION-DIRECTIVE(major,minor) -// TAG-DIRECTIVE(handle,prefix) -// -// Example: -// -// %YAML 1.1 -// %TAG ! !foo -// %TAG !yaml! tag:yaml.org,2002: -// --- -// -// The correspoding sequence of tokens: -// -// STREAM-START(utf-8) -// VERSION-DIRECTIVE(1,1) -// TAG-DIRECTIVE("!","!foo") -// TAG-DIRECTIVE("!yaml","tag:yaml.org,2002:") -// DOCUMENT-START -// STREAM-END -// -// Note that the VERSION-DIRECTIVE and TAG-DIRECTIVE tokens occupy a whole -// line. -// -// The document start and end indicators are represented by: -// -// DOCUMENT-START -// DOCUMENT-END -// -// Note that if a YAML stream contains an implicit document (without '---' -// and '...' indicators), no DOCUMENT-START and DOCUMENT-END tokens will be -// produced. -// -// In the following examples, we present whole documents together with the -// produced tokens. -// -// 1. An implicit document: -// -// 'a scalar' -// -// Tokens: -// -// STREAM-START(utf-8) -// SCALAR("a scalar",single-quoted) -// STREAM-END -// -// 2. An explicit document: -// -// --- -// 'a scalar' -// ... -// -// Tokens: -// -// STREAM-START(utf-8) -// DOCUMENT-START -// SCALAR("a scalar",single-quoted) -// DOCUMENT-END -// STREAM-END -// -// 3. Several documents in a stream: -// -// 'a scalar' -// --- -// 'another scalar' -// --- -// 'yet another scalar' -// -// Tokens: -// -// STREAM-START(utf-8) -// SCALAR("a scalar",single-quoted) -// DOCUMENT-START -// SCALAR("another scalar",single-quoted) -// DOCUMENT-START -// SCALAR("yet another scalar",single-quoted) -// STREAM-END -// -// We have already introduced the SCALAR token above. The following tokens are -// used to describe aliases, anchors, tag, and scalars: -// -// ALIAS(anchor) -// ANCHOR(anchor) -// TAG(handle,suffix) -// SCALAR(value,style) -// -// The following series of examples illustrate the usage of these tokens: -// -// 1. A recursive sequence: -// -// &A [ *A ] -// -// Tokens: -// -// STREAM-START(utf-8) -// ANCHOR("A") -// FLOW-SEQUENCE-START -// ALIAS("A") -// FLOW-SEQUENCE-END -// STREAM-END -// -// 2. A tagged scalar: -// -// !!float "3.14" # A good approximation. -// -// Tokens: -// -// STREAM-START(utf-8) -// TAG("!!","float") -// SCALAR("3.14",double-quoted) -// STREAM-END -// -// 3. Various scalar styles: -// -// --- # Implicit empty plain scalars do not produce tokens. -// --- a plain scalar -// --- 'a single-quoted scalar' -// --- "a double-quoted scalar" -// --- |- -// a literal scalar -// --- >- -// a folded -// scalar -// -// Tokens: -// -// STREAM-START(utf-8) -// DOCUMENT-START -// DOCUMENT-START -// SCALAR("a plain scalar",plain) -// DOCUMENT-START -// SCALAR("a single-quoted scalar",single-quoted) -// DOCUMENT-START -// SCALAR("a double-quoted scalar",double-quoted) -// DOCUMENT-START -// SCALAR("a literal scalar",literal) -// DOCUMENT-START -// SCALAR("a folded scalar",folded) -// STREAM-END -// -// Now it's time to review collection-related tokens. We will start with -// flow collections: -// -// FLOW-SEQUENCE-START -// FLOW-SEQUENCE-END -// FLOW-MAPPING-START -// FLOW-MAPPING-END -// FLOW-ENTRY -// KEY -// VALUE -// -// The tokens FLOW-SEQUENCE-START, FLOW-SEQUENCE-END, FLOW-MAPPING-START, and -// FLOW-MAPPING-END represent the indicators '[', ']', '{', and '}' -// correspondingly. FLOW-ENTRY represent the ',' indicator. Finally the -// indicators '?' and ':', which are used for denoting mapping keys and values, -// are represented by the KEY and VALUE tokens. -// -// The following examples show flow collections: -// -// 1. A flow sequence: -// -// [item 1, item 2, item 3] -// -// Tokens: -// -// STREAM-START(utf-8) -// FLOW-SEQUENCE-START -// SCALAR("item 1",plain) -// FLOW-ENTRY -// SCALAR("item 2",plain) -// FLOW-ENTRY -// SCALAR("item 3",plain) -// FLOW-SEQUENCE-END -// STREAM-END -// -// 2. A flow mapping: -// -// { -// a simple key: a value, # Note that the KEY token is produced. -// ? a complex key: another value, -// } -// -// Tokens: -// -// STREAM-START(utf-8) -// FLOW-MAPPING-START -// KEY -// SCALAR("a simple key",plain) -// VALUE -// SCALAR("a value",plain) -// FLOW-ENTRY -// KEY -// SCALAR("a complex key",plain) -// VALUE -// SCALAR("another value",plain) -// FLOW-ENTRY -// FLOW-MAPPING-END -// STREAM-END -// -// A simple key is a key which is not denoted by the '?' indicator. Note that -// the Scanner still produce the KEY token whenever it encounters a simple key. -// -// For scanning block collections, the following tokens are used (note that we -// repeat KEY and VALUE here): -// -// BLOCK-SEQUENCE-START -// BLOCK-MAPPING-START -// BLOCK-END -// BLOCK-ENTRY -// KEY -// VALUE -// -// The tokens BLOCK-SEQUENCE-START and BLOCK-MAPPING-START denote indentation -// increase that precedes a block collection (cf. the INDENT token in Python). -// The token BLOCK-END denote indentation decrease that ends a block collection -// (cf. the DEDENT token in Python). However YAML has some syntax pecularities -// that makes detections of these tokens more complex. -// -// The tokens BLOCK-ENTRY, KEY, and VALUE are used to represent the indicators -// '-', '?', and ':' correspondingly. -// -// The following examples show how the tokens BLOCK-SEQUENCE-START, -// BLOCK-MAPPING-START, and BLOCK-END are emitted by the Scanner: -// -// 1. Block sequences: -// -// - item 1 -// - item 2 -// - -// - item 3.1 -// - item 3.2 -// - -// key 1: value 1 -// key 2: value 2 -// -// Tokens: -// -// STREAM-START(utf-8) -// BLOCK-SEQUENCE-START -// BLOCK-ENTRY -// SCALAR("item 1",plain) -// BLOCK-ENTRY -// SCALAR("item 2",plain) -// BLOCK-ENTRY -// BLOCK-SEQUENCE-START -// BLOCK-ENTRY -// SCALAR("item 3.1",plain) -// BLOCK-ENTRY -// SCALAR("item 3.2",plain) -// BLOCK-END -// BLOCK-ENTRY -// BLOCK-MAPPING-START -// KEY -// SCALAR("key 1",plain) -// VALUE -// SCALAR("value 1",plain) -// KEY -// SCALAR("key 2",plain) -// VALUE -// SCALAR("value 2",plain) -// BLOCK-END -// BLOCK-END -// STREAM-END -// -// 2. Block mappings: -// -// a simple key: a value # The KEY token is produced here. -// ? a complex key -// : another value -// a mapping: -// key 1: value 1 -// key 2: value 2 -// a sequence: -// - item 1 -// - item 2 -// -// Tokens: -// -// STREAM-START(utf-8) -// BLOCK-MAPPING-START -// KEY -// SCALAR("a simple key",plain) -// VALUE -// SCALAR("a value",plain) -// KEY -// SCALAR("a complex key",plain) -// VALUE -// SCALAR("another value",plain) -// KEY -// SCALAR("a mapping",plain) -// BLOCK-MAPPING-START -// KEY -// SCALAR("key 1",plain) -// VALUE -// SCALAR("value 1",plain) -// KEY -// SCALAR("key 2",plain) -// VALUE -// SCALAR("value 2",plain) -// BLOCK-END -// KEY -// SCALAR("a sequence",plain) -// VALUE -// BLOCK-SEQUENCE-START -// BLOCK-ENTRY -// SCALAR("item 1",plain) -// BLOCK-ENTRY -// SCALAR("item 2",plain) -// BLOCK-END -// BLOCK-END -// STREAM-END -// -// YAML does not always require to start a new block collection from a new -// line. If the current line contains only '-', '?', and ':' indicators, a new -// block collection may start at the current line. The following examples -// illustrate this case: -// -// 1. Collections in a sequence: -// -// - - item 1 -// - item 2 -// - key 1: value 1 -// key 2: value 2 -// - ? complex key -// : complex value -// -// Tokens: -// -// STREAM-START(utf-8) -// BLOCK-SEQUENCE-START -// BLOCK-ENTRY -// BLOCK-SEQUENCE-START -// BLOCK-ENTRY -// SCALAR("item 1",plain) -// BLOCK-ENTRY -// SCALAR("item 2",plain) -// BLOCK-END -// BLOCK-ENTRY -// BLOCK-MAPPING-START -// KEY -// SCALAR("key 1",plain) -// VALUE -// SCALAR("value 1",plain) -// KEY -// SCALAR("key 2",plain) -// VALUE -// SCALAR("value 2",plain) -// BLOCK-END -// BLOCK-ENTRY -// BLOCK-MAPPING-START -// KEY -// SCALAR("complex key") -// VALUE -// SCALAR("complex value") -// BLOCK-END -// BLOCK-END -// STREAM-END -// -// 2. Collections in a mapping: -// -// ? a sequence -// : - item 1 -// - item 2 -// ? a mapping -// : key 1: value 1 -// key 2: value 2 -// -// Tokens: -// -// STREAM-START(utf-8) -// BLOCK-MAPPING-START -// KEY -// SCALAR("a sequence",plain) -// VALUE -// BLOCK-SEQUENCE-START -// BLOCK-ENTRY -// SCALAR("item 1",plain) -// BLOCK-ENTRY -// SCALAR("item 2",plain) -// BLOCK-END -// KEY -// SCALAR("a mapping",plain) -// VALUE -// BLOCK-MAPPING-START -// KEY -// SCALAR("key 1",plain) -// VALUE -// SCALAR("value 1",plain) -// KEY -// SCALAR("key 2",plain) -// VALUE -// SCALAR("value 2",plain) -// BLOCK-END -// BLOCK-END -// STREAM-END -// -// YAML also permits non-indented sequences if they are included into a block -// mapping. In this case, the token BLOCK-SEQUENCE-START is not produced: -// -// key: -// - item 1 # BLOCK-SEQUENCE-START is NOT produced here. -// - item 2 -// -// Tokens: -// -// STREAM-START(utf-8) -// BLOCK-MAPPING-START -// KEY -// SCALAR("key",plain) -// VALUE -// BLOCK-ENTRY -// SCALAR("item 1",plain) -// BLOCK-ENTRY -// SCALAR("item 2",plain) -// BLOCK-END -// - -// Ensure that the buffer contains the required number of characters. -// Return true on success, false on failure (reader error or memory error). -func cache(parser *yaml_parser_t, length int) bool { - // [Go] This was inlined: !cache(A, B) -> unread < B && !update(A, B) - return parser.unread >= length || yaml_parser_update_buffer(parser, length) -} - -// Advance the buffer pointer. -func skip(parser *yaml_parser_t) { - if !is_blank(parser.buffer, parser.buffer_pos) { - parser.newlines = 0 - } - parser.mark.index++ - parser.mark.column++ - parser.unread-- - parser.buffer_pos += width(parser.buffer[parser.buffer_pos]) -} - -func skip_line(parser *yaml_parser_t) { - if is_crlf(parser.buffer, parser.buffer_pos) { - parser.mark.index += 2 - parser.mark.column = 0 - parser.mark.line++ - parser.unread -= 2 - parser.buffer_pos += 2 - parser.newlines++ - } else if is_break(parser.buffer, parser.buffer_pos) { - parser.mark.index++ - parser.mark.column = 0 - parser.mark.line++ - parser.unread-- - parser.buffer_pos += width(parser.buffer[parser.buffer_pos]) - parser.newlines++ - } -} - -// Copy a character to a string buffer and advance pointers. -func read(parser *yaml_parser_t, s []byte) []byte { - if !is_blank(parser.buffer, parser.buffer_pos) { - parser.newlines = 0 - } - w := width(parser.buffer[parser.buffer_pos]) - if w == 0 { - panic("invalid character sequence") - } - if len(s) == 0 { - s = make([]byte, 0, 32) - } - if w == 1 && len(s)+w <= cap(s) { - s = s[:len(s)+1] - s[len(s)-1] = parser.buffer[parser.buffer_pos] - parser.buffer_pos++ - } else { - s = append(s, parser.buffer[parser.buffer_pos:parser.buffer_pos+w]...) - parser.buffer_pos += w - } - parser.mark.index++ - parser.mark.column++ - parser.unread-- - return s -} - -// Copy a line break character to a string buffer and advance pointers. -func read_line(parser *yaml_parser_t, s []byte) []byte { - buf := parser.buffer - pos := parser.buffer_pos - switch { - case buf[pos] == '\r' && buf[pos+1] == '\n': - // CR LF . LF - s = append(s, '\n') - parser.buffer_pos += 2 - parser.mark.index++ - parser.unread-- - case buf[pos] == '\r' || buf[pos] == '\n': - // CR|LF . LF - s = append(s, '\n') - parser.buffer_pos += 1 - case buf[pos] == '\xC2' && buf[pos+1] == '\x85': - // NEL . LF - s = append(s, '\n') - parser.buffer_pos += 2 - case buf[pos] == '\xE2' && buf[pos+1] == '\x80' && (buf[pos+2] == '\xA8' || buf[pos+2] == '\xA9'): - // LS|PS . LS|PS - s = append(s, buf[parser.buffer_pos:pos+3]...) - parser.buffer_pos += 3 - default: - return s - } - parser.mark.index++ - parser.mark.column = 0 - parser.mark.line++ - parser.unread-- - parser.newlines++ - return s -} - -// Get the next token. -func yaml_parser_scan(parser *yaml_parser_t, token *yaml_token_t) bool { - // Erase the token object. - *token = yaml_token_t{} // [Go] Is this necessary? - - // No tokens after STREAM-END or error. - if parser.stream_end_produced || parser.error != yaml_NO_ERROR { - return true - } - - // Ensure that the tokens queue contains enough tokens. - if !parser.token_available { - if !yaml_parser_fetch_more_tokens(parser) { - return false - } - } - - // Fetch the next token from the queue. - *token = parser.tokens[parser.tokens_head] - parser.tokens_head++ - parser.tokens_parsed++ - parser.token_available = false - - if token.typ == yaml_STREAM_END_TOKEN { - parser.stream_end_produced = true - } - return true -} - -// Set the scanner error and return false. -func yaml_parser_set_scanner_error(parser *yaml_parser_t, context string, context_mark yaml_mark_t, problem string) bool { - parser.error = yaml_SCANNER_ERROR - parser.context = context - parser.context_mark = context_mark - parser.problem = problem - parser.problem_mark = parser.mark - return false -} - -func yaml_parser_set_scanner_tag_error(parser *yaml_parser_t, directive bool, context_mark yaml_mark_t, problem string) bool { - context := "while parsing a tag" - if directive { - context = "while parsing a %TAG directive" - } - return yaml_parser_set_scanner_error(parser, context, context_mark, problem) -} - -func trace(args ...interface{}) func() { - pargs := append([]interface{}{"+++"}, args...) - fmt.Println(pargs...) - pargs = append([]interface{}{"---"}, args...) - return func() { fmt.Println(pargs...) } -} - -// Ensure that the tokens queue contains at least one token which can be -// returned to the Parser. -func yaml_parser_fetch_more_tokens(parser *yaml_parser_t) bool { - // While we need more tokens to fetch, do it. - for { - // [Go] The comment parsing logic requires a lookahead of two tokens - // so that foot comments may be parsed in time of associating them - // with the tokens that are parsed before them, and also for line - // comments to be transformed into head comments in some edge cases. - if parser.tokens_head < len(parser.tokens)-2 { - // If a potential simple key is at the head position, we need to fetch - // the next token to disambiguate it. - head_tok_idx, ok := parser.simple_keys_by_tok[parser.tokens_parsed] - if !ok { - break - } else if valid, ok := yaml_simple_key_is_valid(parser, &parser.simple_keys[head_tok_idx]); !ok { - return false - } else if !valid { - break - } - } - // Fetch the next token. - if !yaml_parser_fetch_next_token(parser) { - return false - } - } - - parser.token_available = true - return true -} - -// The dispatcher for token fetchers. -func yaml_parser_fetch_next_token(parser *yaml_parser_t) (ok bool) { - // Ensure that the buffer is initialized. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - - // Check if we just started scanning. Fetch STREAM-START then. - if !parser.stream_start_produced { - return yaml_parser_fetch_stream_start(parser) - } - - scan_mark := parser.mark - - // Eat whitespaces and comments until we reach the next token. - if !yaml_parser_scan_to_next_token(parser) { - return false - } - - // [Go] While unrolling indents, transform the head comments of prior - // indentation levels observed after scan_start into foot comments at - // the respective indexes. - - // Check the indentation level against the current column. - if !yaml_parser_unroll_indent(parser, parser.mark.column, scan_mark) { - return false - } - - // Ensure that the buffer contains at least 4 characters. 4 is the length - // of the longest indicators ('--- ' and '... '). - if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) { - return false - } - - // Is it the end of the stream? - if is_z(parser.buffer, parser.buffer_pos) { - return yaml_parser_fetch_stream_end(parser) - } - - // Is it a directive? - if parser.mark.column == 0 && parser.buffer[parser.buffer_pos] == '%' { - return yaml_parser_fetch_directive(parser) - } - - buf := parser.buffer - pos := parser.buffer_pos - - // Is it the document start indicator? - if parser.mark.column == 0 && buf[pos] == '-' && buf[pos+1] == '-' && buf[pos+2] == '-' && is_blankz(buf, pos+3) { - return yaml_parser_fetch_document_indicator(parser, yaml_DOCUMENT_START_TOKEN) - } - - // Is it the document end indicator? - if parser.mark.column == 0 && buf[pos] == '.' && buf[pos+1] == '.' && buf[pos+2] == '.' && is_blankz(buf, pos+3) { - return yaml_parser_fetch_document_indicator(parser, yaml_DOCUMENT_END_TOKEN) - } - - comment_mark := parser.mark - if len(parser.tokens) > 0 && (parser.flow_level == 0 && buf[pos] == ':' || parser.flow_level > 0 && buf[pos] == ',') { - // Associate any following comments with the prior token. - comment_mark = parser.tokens[len(parser.tokens)-1].start_mark - } - defer func() { - if !ok { - return - } - if len(parser.tokens) > 0 && parser.tokens[len(parser.tokens)-1].typ == yaml_BLOCK_ENTRY_TOKEN { - // Sequence indicators alone have no line comments. It becomes - // a head comment for whatever follows. - return - } - if !yaml_parser_scan_line_comment(parser, comment_mark) { - ok = false - return - } - }() - - // Is it the flow sequence start indicator? - if buf[pos] == '[' { - return yaml_parser_fetch_flow_collection_start(parser, yaml_FLOW_SEQUENCE_START_TOKEN) - } - - // Is it the flow mapping start indicator? - if parser.buffer[parser.buffer_pos] == '{' { - return yaml_parser_fetch_flow_collection_start(parser, yaml_FLOW_MAPPING_START_TOKEN) - } - - // Is it the flow sequence end indicator? - if parser.buffer[parser.buffer_pos] == ']' { - return yaml_parser_fetch_flow_collection_end(parser, - yaml_FLOW_SEQUENCE_END_TOKEN) - } - - // Is it the flow mapping end indicator? - if parser.buffer[parser.buffer_pos] == '}' { - return yaml_parser_fetch_flow_collection_end(parser, - yaml_FLOW_MAPPING_END_TOKEN) - } - - // Is it the flow entry indicator? - if parser.buffer[parser.buffer_pos] == ',' { - return yaml_parser_fetch_flow_entry(parser) - } - - // Is it the block entry indicator? - if parser.buffer[parser.buffer_pos] == '-' && is_blankz(parser.buffer, parser.buffer_pos+1) { - return yaml_parser_fetch_block_entry(parser) - } - - // Is it the key indicator? - if parser.buffer[parser.buffer_pos] == '?' && (parser.flow_level > 0 || is_blankz(parser.buffer, parser.buffer_pos+1)) { - return yaml_parser_fetch_key(parser) - } - - // Is it the value indicator? - if parser.buffer[parser.buffer_pos] == ':' && (parser.flow_level > 0 || is_blankz(parser.buffer, parser.buffer_pos+1)) { - return yaml_parser_fetch_value(parser) - } - - // Is it an alias? - if parser.buffer[parser.buffer_pos] == '*' { - return yaml_parser_fetch_anchor(parser, yaml_ALIAS_TOKEN) - } - - // Is it an anchor? - if parser.buffer[parser.buffer_pos] == '&' { - return yaml_parser_fetch_anchor(parser, yaml_ANCHOR_TOKEN) - } - - // Is it a tag? - if parser.buffer[parser.buffer_pos] == '!' { - return yaml_parser_fetch_tag(parser) - } - - // Is it a literal scalar? - if parser.buffer[parser.buffer_pos] == '|' && parser.flow_level == 0 { - return yaml_parser_fetch_block_scalar(parser, true) - } - - // Is it a folded scalar? - if parser.buffer[parser.buffer_pos] == '>' && parser.flow_level == 0 { - return yaml_parser_fetch_block_scalar(parser, false) - } - - // Is it a single-quoted scalar? - if parser.buffer[parser.buffer_pos] == '\'' { - return yaml_parser_fetch_flow_scalar(parser, true) - } - - // Is it a double-quoted scalar? - if parser.buffer[parser.buffer_pos] == '"' { - return yaml_parser_fetch_flow_scalar(parser, false) - } - - // Is it a plain scalar? - // - // A plain scalar may start with any non-blank characters except - // - // '-', '?', ':', ',', '[', ']', '{', '}', - // '#', '&', '*', '!', '|', '>', '\'', '\"', - // '%', '@', '`'. - // - // In the block context (and, for the '-' indicator, in the flow context - // too), it may also start with the characters - // - // '-', '?', ':' - // - // if it is followed by a non-space character. - // - // The last rule is more restrictive than the specification requires. - // [Go] TODO Make this logic more reasonable. - //switch parser.buffer[parser.buffer_pos] { - //case '-', '?', ':', ',', '?', '-', ',', ':', ']', '[', '}', '{', '&', '#', '!', '*', '>', '|', '"', '\'', '@', '%', '-', '`': - //} - if !(is_blankz(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == '-' || - parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == ':' || - parser.buffer[parser.buffer_pos] == ',' || parser.buffer[parser.buffer_pos] == '[' || - parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '{' || - parser.buffer[parser.buffer_pos] == '}' || parser.buffer[parser.buffer_pos] == '#' || - parser.buffer[parser.buffer_pos] == '&' || parser.buffer[parser.buffer_pos] == '*' || - parser.buffer[parser.buffer_pos] == '!' || parser.buffer[parser.buffer_pos] == '|' || - parser.buffer[parser.buffer_pos] == '>' || parser.buffer[parser.buffer_pos] == '\'' || - parser.buffer[parser.buffer_pos] == '"' || parser.buffer[parser.buffer_pos] == '%' || - parser.buffer[parser.buffer_pos] == '@' || parser.buffer[parser.buffer_pos] == '`') || - (parser.buffer[parser.buffer_pos] == '-' && !is_blank(parser.buffer, parser.buffer_pos+1)) || - (parser.flow_level == 0 && - (parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == ':') && - !is_blankz(parser.buffer, parser.buffer_pos+1)) { - return yaml_parser_fetch_plain_scalar(parser) - } - - // If we don't determine the token type so far, it is an error. - return yaml_parser_set_scanner_error(parser, - "while scanning for the next token", parser.mark, - "found character that cannot start any token") -} - -func yaml_simple_key_is_valid(parser *yaml_parser_t, simple_key *yaml_simple_key_t) (valid, ok bool) { - if !simple_key.possible { - return false, true - } - - // The 1.2 specification says: - // - // "If the ? indicator is omitted, parsing needs to see past the - // implicit key to recognize it as such. To limit the amount of - // lookahead required, the “:†indicator must appear at most 1024 - // Unicode characters beyond the start of the key. In addition, the key - // is restricted to a single line." - // - if simple_key.mark.line < parser.mark.line || simple_key.mark.index+1024 < parser.mark.index { - // Check if the potential simple key to be removed is required. - if simple_key.required { - return false, yaml_parser_set_scanner_error(parser, - "while scanning a simple key", simple_key.mark, - "could not find expected ':'") - } - simple_key.possible = false - return false, true - } - return true, true -} - -// Check if a simple key may start at the current position and add it if -// needed. -func yaml_parser_save_simple_key(parser *yaml_parser_t) bool { - // A simple key is required at the current position if the scanner is in - // the block context and the current column coincides with the indentation - // level. - - required := parser.flow_level == 0 && parser.indent == parser.mark.column - - // - // If the current position may start a simple key, save it. - // - if parser.simple_key_allowed { - simple_key := yaml_simple_key_t{ - possible: true, - required: required, - token_number: parser.tokens_parsed + (len(parser.tokens) - parser.tokens_head), - mark: parser.mark, - } - - if !yaml_parser_remove_simple_key(parser) { - return false - } - parser.simple_keys[len(parser.simple_keys)-1] = simple_key - parser.simple_keys_by_tok[simple_key.token_number] = len(parser.simple_keys) - 1 - } - return true -} - -// Remove a potential simple key at the current flow level. -func yaml_parser_remove_simple_key(parser *yaml_parser_t) bool { - i := len(parser.simple_keys) - 1 - if parser.simple_keys[i].possible { - // If the key is required, it is an error. - if parser.simple_keys[i].required { - return yaml_parser_set_scanner_error(parser, - "while scanning a simple key", parser.simple_keys[i].mark, - "could not find expected ':'") - } - // Remove the key from the stack. - parser.simple_keys[i].possible = false - delete(parser.simple_keys_by_tok, parser.simple_keys[i].token_number) - } - return true -} - -// max_flow_level limits the flow_level -const max_flow_level = 10000 - -// Increase the flow level and resize the simple key list if needed. -func yaml_parser_increase_flow_level(parser *yaml_parser_t) bool { - // Reset the simple key on the next level. - parser.simple_keys = append(parser.simple_keys, yaml_simple_key_t{ - possible: false, - required: false, - token_number: parser.tokens_parsed + (len(parser.tokens) - parser.tokens_head), - mark: parser.mark, - }) - - // Increase the flow level. - parser.flow_level++ - if parser.flow_level > max_flow_level { - return yaml_parser_set_scanner_error(parser, - "while increasing flow level", parser.simple_keys[len(parser.simple_keys)-1].mark, - fmt.Sprintf("exceeded max depth of %d", max_flow_level)) - } - return true -} - -// Decrease the flow level. -func yaml_parser_decrease_flow_level(parser *yaml_parser_t) bool { - if parser.flow_level > 0 { - parser.flow_level-- - last := len(parser.simple_keys) - 1 - delete(parser.simple_keys_by_tok, parser.simple_keys[last].token_number) - parser.simple_keys = parser.simple_keys[:last] - } - return true -} - -// max_indents limits the indents stack size -const max_indents = 10000 - -// Push the current indentation level to the stack and set the new level -// the current column is greater than the indentation level. In this case, -// append or insert the specified token into the token queue. -func yaml_parser_roll_indent(parser *yaml_parser_t, column, number int, typ yaml_token_type_t, mark yaml_mark_t) bool { - // In the flow context, do nothing. - if parser.flow_level > 0 { - return true - } - - if parser.indent < column { - // Push the current indentation level to the stack and set the new - // indentation level. - parser.indents = append(parser.indents, parser.indent) - parser.indent = column - if len(parser.indents) > max_indents { - return yaml_parser_set_scanner_error(parser, - "while increasing indent level", parser.simple_keys[len(parser.simple_keys)-1].mark, - fmt.Sprintf("exceeded max depth of %d", max_indents)) - } - - // Create a token and insert it into the queue. - token := yaml_token_t{ - typ: typ, - start_mark: mark, - end_mark: mark, - } - if number > -1 { - number -= parser.tokens_parsed - } - yaml_insert_token(parser, number, &token) - } - return true -} - -// Pop indentation levels from the indents stack until the current level -// becomes less or equal to the column. For each indentation level, append -// the BLOCK-END token. -func yaml_parser_unroll_indent(parser *yaml_parser_t, column int, scan_mark yaml_mark_t) bool { - // In the flow context, do nothing. - if parser.flow_level > 0 { - return true - } - - block_mark := scan_mark - block_mark.index-- - - // Loop through the indentation levels in the stack. - for parser.indent > column { - - // [Go] Reposition the end token before potential following - // foot comments of parent blocks. For that, search - // backwards for recent comments that were at the same - // indent as the block that is ending now. - stop_index := block_mark.index - for i := len(parser.comments) - 1; i >= 0; i-- { - comment := &parser.comments[i] - - if comment.end_mark.index < stop_index { - // Don't go back beyond the start of the comment/whitespace scan, unless column < 0. - // If requested indent column is < 0, then the document is over and everything else - // is a foot anyway. - break - } - if comment.start_mark.column == parser.indent+1 { - // This is a good match. But maybe there's a former comment - // at that same indent level, so keep searching. - block_mark = comment.start_mark - } - - // While the end of the former comment matches with - // the start of the following one, we know there's - // nothing in between and scanning is still safe. - stop_index = comment.scan_mark.index - } - - // Create a token and append it to the queue. - token := yaml_token_t{ - typ: yaml_BLOCK_END_TOKEN, - start_mark: block_mark, - end_mark: block_mark, - } - yaml_insert_token(parser, -1, &token) - - // Pop the indentation level. - parser.indent = parser.indents[len(parser.indents)-1] - parser.indents = parser.indents[:len(parser.indents)-1] - } - return true -} - -// Initialize the scanner and produce the STREAM-START token. -func yaml_parser_fetch_stream_start(parser *yaml_parser_t) bool { - - // Set the initial indentation. - parser.indent = -1 - - // Initialize the simple key stack. - parser.simple_keys = append(parser.simple_keys, yaml_simple_key_t{}) - - parser.simple_keys_by_tok = make(map[int]int) - - // A simple key is allowed at the beginning of the stream. - parser.simple_key_allowed = true - - // We have started. - parser.stream_start_produced = true - - // Create the STREAM-START token and append it to the queue. - token := yaml_token_t{ - typ: yaml_STREAM_START_TOKEN, - start_mark: parser.mark, - end_mark: parser.mark, - encoding: parser.encoding, - } - yaml_insert_token(parser, -1, &token) - return true -} - -// Produce the STREAM-END token and shut down the scanner. -func yaml_parser_fetch_stream_end(parser *yaml_parser_t) bool { - - // Force new line. - if parser.mark.column != 0 { - parser.mark.column = 0 - parser.mark.line++ - } - - // Reset the indentation level. - if !yaml_parser_unroll_indent(parser, -1, parser.mark) { - return false - } - - // Reset simple keys. - if !yaml_parser_remove_simple_key(parser) { - return false - } - - parser.simple_key_allowed = false - - // Create the STREAM-END token and append it to the queue. - token := yaml_token_t{ - typ: yaml_STREAM_END_TOKEN, - start_mark: parser.mark, - end_mark: parser.mark, - } - yaml_insert_token(parser, -1, &token) - return true -} - -// Produce a VERSION-DIRECTIVE or TAG-DIRECTIVE token. -func yaml_parser_fetch_directive(parser *yaml_parser_t) bool { - // Reset the indentation level. - if !yaml_parser_unroll_indent(parser, -1, parser.mark) { - return false - } - - // Reset simple keys. - if !yaml_parser_remove_simple_key(parser) { - return false - } - - parser.simple_key_allowed = false - - // Create the YAML-DIRECTIVE or TAG-DIRECTIVE token. - token := yaml_token_t{} - if !yaml_parser_scan_directive(parser, &token) { - return false - } - // Append the token to the queue. - yaml_insert_token(parser, -1, &token) - return true -} - -// Produce the DOCUMENT-START or DOCUMENT-END token. -func yaml_parser_fetch_document_indicator(parser *yaml_parser_t, typ yaml_token_type_t) bool { - // Reset the indentation level. - if !yaml_parser_unroll_indent(parser, -1, parser.mark) { - return false - } - - // Reset simple keys. - if !yaml_parser_remove_simple_key(parser) { - return false - } - - parser.simple_key_allowed = false - - // Consume the token. - start_mark := parser.mark - - skip(parser) - skip(parser) - skip(parser) - - end_mark := parser.mark - - // Create the DOCUMENT-START or DOCUMENT-END token. - token := yaml_token_t{ - typ: typ, - start_mark: start_mark, - end_mark: end_mark, - } - // Append the token to the queue. - yaml_insert_token(parser, -1, &token) - return true -} - -// Produce the FLOW-SEQUENCE-START or FLOW-MAPPING-START token. -func yaml_parser_fetch_flow_collection_start(parser *yaml_parser_t, typ yaml_token_type_t) bool { - - // The indicators '[' and '{' may start a simple key. - if !yaml_parser_save_simple_key(parser) { - return false - } - - // Increase the flow level. - if !yaml_parser_increase_flow_level(parser) { - return false - } - - // A simple key may follow the indicators '[' and '{'. - parser.simple_key_allowed = true - - // Consume the token. - start_mark := parser.mark - skip(parser) - end_mark := parser.mark - - // Create the FLOW-SEQUENCE-START of FLOW-MAPPING-START token. - token := yaml_token_t{ - typ: typ, - start_mark: start_mark, - end_mark: end_mark, - } - // Append the token to the queue. - yaml_insert_token(parser, -1, &token) - return true -} - -// Produce the FLOW-SEQUENCE-END or FLOW-MAPPING-END token. -func yaml_parser_fetch_flow_collection_end(parser *yaml_parser_t, typ yaml_token_type_t) bool { - // Reset any potential simple key on the current flow level. - if !yaml_parser_remove_simple_key(parser) { - return false - } - - // Decrease the flow level. - if !yaml_parser_decrease_flow_level(parser) { - return false - } - - // No simple keys after the indicators ']' and '}'. - parser.simple_key_allowed = false - - // Consume the token. - - start_mark := parser.mark - skip(parser) - end_mark := parser.mark - - // Create the FLOW-SEQUENCE-END of FLOW-MAPPING-END token. - token := yaml_token_t{ - typ: typ, - start_mark: start_mark, - end_mark: end_mark, - } - // Append the token to the queue. - yaml_insert_token(parser, -1, &token) - return true -} - -// Produce the FLOW-ENTRY token. -func yaml_parser_fetch_flow_entry(parser *yaml_parser_t) bool { - // Reset any potential simple keys on the current flow level. - if !yaml_parser_remove_simple_key(parser) { - return false - } - - // Simple keys are allowed after ','. - parser.simple_key_allowed = true - - // Consume the token. - start_mark := parser.mark - skip(parser) - end_mark := parser.mark - - // Create the FLOW-ENTRY token and append it to the queue. - token := yaml_token_t{ - typ: yaml_FLOW_ENTRY_TOKEN, - start_mark: start_mark, - end_mark: end_mark, - } - yaml_insert_token(parser, -1, &token) - return true -} - -// Produce the BLOCK-ENTRY token. -func yaml_parser_fetch_block_entry(parser *yaml_parser_t) bool { - // Check if the scanner is in the block context. - if parser.flow_level == 0 { - // Check if we are allowed to start a new entry. - if !parser.simple_key_allowed { - return yaml_parser_set_scanner_error(parser, "", parser.mark, - "block sequence entries are not allowed in this context") - } - // Add the BLOCK-SEQUENCE-START token if needed. - if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_SEQUENCE_START_TOKEN, parser.mark) { - return false - } - } else { - // It is an error for the '-' indicator to occur in the flow context, - // but we let the Parser detect and report about it because the Parser - // is able to point to the context. - } - - // Reset any potential simple keys on the current flow level. - if !yaml_parser_remove_simple_key(parser) { - return false - } - - // Simple keys are allowed after '-'. - parser.simple_key_allowed = true - - // Consume the token. - start_mark := parser.mark - skip(parser) - end_mark := parser.mark - - // Create the BLOCK-ENTRY token and append it to the queue. - token := yaml_token_t{ - typ: yaml_BLOCK_ENTRY_TOKEN, - start_mark: start_mark, - end_mark: end_mark, - } - yaml_insert_token(parser, -1, &token) - return true -} - -// Produce the KEY token. -func yaml_parser_fetch_key(parser *yaml_parser_t) bool { - - // In the block context, additional checks are required. - if parser.flow_level == 0 { - // Check if we are allowed to start a new key (not nessesary simple). - if !parser.simple_key_allowed { - return yaml_parser_set_scanner_error(parser, "", parser.mark, - "mapping keys are not allowed in this context") - } - // Add the BLOCK-MAPPING-START token if needed. - if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_MAPPING_START_TOKEN, parser.mark) { - return false - } - } - - // Reset any potential simple keys on the current flow level. - if !yaml_parser_remove_simple_key(parser) { - return false - } - - // Simple keys are allowed after '?' in the block context. - parser.simple_key_allowed = parser.flow_level == 0 - - // Consume the token. - start_mark := parser.mark - skip(parser) - end_mark := parser.mark - - // Create the KEY token and append it to the queue. - token := yaml_token_t{ - typ: yaml_KEY_TOKEN, - start_mark: start_mark, - end_mark: end_mark, - } - yaml_insert_token(parser, -1, &token) - return true -} - -// Produce the VALUE token. -func yaml_parser_fetch_value(parser *yaml_parser_t) bool { - - simple_key := &parser.simple_keys[len(parser.simple_keys)-1] - - // Have we found a simple key? - if valid, ok := yaml_simple_key_is_valid(parser, simple_key); !ok { - return false - - } else if valid { - - // Create the KEY token and insert it into the queue. - token := yaml_token_t{ - typ: yaml_KEY_TOKEN, - start_mark: simple_key.mark, - end_mark: simple_key.mark, - } - yaml_insert_token(parser, simple_key.token_number-parser.tokens_parsed, &token) - - // In the block context, we may need to add the BLOCK-MAPPING-START token. - if !yaml_parser_roll_indent(parser, simple_key.mark.column, - simple_key.token_number, - yaml_BLOCK_MAPPING_START_TOKEN, simple_key.mark) { - return false - } - - // Remove the simple key. - simple_key.possible = false - delete(parser.simple_keys_by_tok, simple_key.token_number) - - // A simple key cannot follow another simple key. - parser.simple_key_allowed = false - - } else { - // The ':' indicator follows a complex key. - - // In the block context, extra checks are required. - if parser.flow_level == 0 { - - // Check if we are allowed to start a complex value. - if !parser.simple_key_allowed { - return yaml_parser_set_scanner_error(parser, "", parser.mark, - "mapping values are not allowed in this context") - } - - // Add the BLOCK-MAPPING-START token if needed. - if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_MAPPING_START_TOKEN, parser.mark) { - return false - } - } - - // Simple keys after ':' are allowed in the block context. - parser.simple_key_allowed = parser.flow_level == 0 - } - - // Consume the token. - start_mark := parser.mark - skip(parser) - end_mark := parser.mark - - // Create the VALUE token and append it to the queue. - token := yaml_token_t{ - typ: yaml_VALUE_TOKEN, - start_mark: start_mark, - end_mark: end_mark, - } - yaml_insert_token(parser, -1, &token) - return true -} - -// Produce the ALIAS or ANCHOR token. -func yaml_parser_fetch_anchor(parser *yaml_parser_t, typ yaml_token_type_t) bool { - // An anchor or an alias could be a simple key. - if !yaml_parser_save_simple_key(parser) { - return false - } - - // A simple key cannot follow an anchor or an alias. - parser.simple_key_allowed = false - - // Create the ALIAS or ANCHOR token and append it to the queue. - var token yaml_token_t - if !yaml_parser_scan_anchor(parser, &token, typ) { - return false - } - yaml_insert_token(parser, -1, &token) - return true -} - -// Produce the TAG token. -func yaml_parser_fetch_tag(parser *yaml_parser_t) bool { - // A tag could be a simple key. - if !yaml_parser_save_simple_key(parser) { - return false - } - - // A simple key cannot follow a tag. - parser.simple_key_allowed = false - - // Create the TAG token and append it to the queue. - var token yaml_token_t - if !yaml_parser_scan_tag(parser, &token) { - return false - } - yaml_insert_token(parser, -1, &token) - return true -} - -// Produce the SCALAR(...,literal) or SCALAR(...,folded) tokens. -func yaml_parser_fetch_block_scalar(parser *yaml_parser_t, literal bool) bool { - // Remove any potential simple keys. - if !yaml_parser_remove_simple_key(parser) { - return false - } - - // A simple key may follow a block scalar. - parser.simple_key_allowed = true - - // Create the SCALAR token and append it to the queue. - var token yaml_token_t - if !yaml_parser_scan_block_scalar(parser, &token, literal) { - return false - } - yaml_insert_token(parser, -1, &token) - return true -} - -// Produce the SCALAR(...,single-quoted) or SCALAR(...,double-quoted) tokens. -func yaml_parser_fetch_flow_scalar(parser *yaml_parser_t, single bool) bool { - // A plain scalar could be a simple key. - if !yaml_parser_save_simple_key(parser) { - return false - } - - // A simple key cannot follow a flow scalar. - parser.simple_key_allowed = false - - // Create the SCALAR token and append it to the queue. - var token yaml_token_t - if !yaml_parser_scan_flow_scalar(parser, &token, single) { - return false - } - yaml_insert_token(parser, -1, &token) - return true -} - -// Produce the SCALAR(...,plain) token. -func yaml_parser_fetch_plain_scalar(parser *yaml_parser_t) bool { - // A plain scalar could be a simple key. - if !yaml_parser_save_simple_key(parser) { - return false - } - - // A simple key cannot follow a flow scalar. - parser.simple_key_allowed = false - - // Create the SCALAR token and append it to the queue. - var token yaml_token_t - if !yaml_parser_scan_plain_scalar(parser, &token) { - return false - } - yaml_insert_token(parser, -1, &token) - return true -} - -// Eat whitespaces and comments until the next token is found. -func yaml_parser_scan_to_next_token(parser *yaml_parser_t) bool { - - scan_mark := parser.mark - - // Until the next token is not found. - for { - // Allow the BOM mark to start a line. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - if parser.mark.column == 0 && is_bom(parser.buffer, parser.buffer_pos) { - skip(parser) - } - - // Eat whitespaces. - // Tabs are allowed: - // - in the flow context - // - in the block context, but not at the beginning of the line or - // after '-', '?', or ':' (complex value). - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - - for parser.buffer[parser.buffer_pos] == ' ' || ((parser.flow_level > 0 || !parser.simple_key_allowed) && parser.buffer[parser.buffer_pos] == '\t') { - skip(parser) - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - - // Check if we just had a line comment under a sequence entry that - // looks more like a header to the following content. Similar to this: - // - // - # The comment - // - Some data - // - // If so, transform the line comment to a head comment and reposition. - if len(parser.comments) > 0 && len(parser.tokens) > 1 { - tokenA := parser.tokens[len(parser.tokens)-2] - tokenB := parser.tokens[len(parser.tokens)-1] - comment := &parser.comments[len(parser.comments)-1] - if tokenA.typ == yaml_BLOCK_SEQUENCE_START_TOKEN && tokenB.typ == yaml_BLOCK_ENTRY_TOKEN && len(comment.line) > 0 && !is_break(parser.buffer, parser.buffer_pos) { - // If it was in the prior line, reposition so it becomes a - // header of the follow up token. Otherwise, keep it in place - // so it becomes a header of the former. - comment.head = comment.line - comment.line = nil - if comment.start_mark.line == parser.mark.line-1 { - comment.token_mark = parser.mark - } - } - } - - // Eat a comment until a line break. - if parser.buffer[parser.buffer_pos] == '#' { - if !yaml_parser_scan_comments(parser, scan_mark) { - return false - } - } - - // If it is a line break, eat it. - if is_break(parser.buffer, parser.buffer_pos) { - if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { - return false - } - skip_line(parser) - - // In the block context, a new line may start a simple key. - if parser.flow_level == 0 { - parser.simple_key_allowed = true - } - } else { - break // We have found a token. - } - } - - return true -} - -// Scan a YAML-DIRECTIVE or TAG-DIRECTIVE token. -// -// Scope: -// %YAML 1.1 # a comment \n -// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -// %TAG !yaml! tag:yaml.org,2002: \n -// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -// -func yaml_parser_scan_directive(parser *yaml_parser_t, token *yaml_token_t) bool { - // Eat '%'. - start_mark := parser.mark - skip(parser) - - // Scan the directive name. - var name []byte - if !yaml_parser_scan_directive_name(parser, start_mark, &name) { - return false - } - - // Is it a YAML directive? - if bytes.Equal(name, []byte("YAML")) { - // Scan the VERSION directive value. - var major, minor int8 - if !yaml_parser_scan_version_directive_value(parser, start_mark, &major, &minor) { - return false - } - end_mark := parser.mark - - // Create a VERSION-DIRECTIVE token. - *token = yaml_token_t{ - typ: yaml_VERSION_DIRECTIVE_TOKEN, - start_mark: start_mark, - end_mark: end_mark, - major: major, - minor: minor, - } - - // Is it a TAG directive? - } else if bytes.Equal(name, []byte("TAG")) { - // Scan the TAG directive value. - var handle, prefix []byte - if !yaml_parser_scan_tag_directive_value(parser, start_mark, &handle, &prefix) { - return false - } - end_mark := parser.mark - - // Create a TAG-DIRECTIVE token. - *token = yaml_token_t{ - typ: yaml_TAG_DIRECTIVE_TOKEN, - start_mark: start_mark, - end_mark: end_mark, - value: handle, - prefix: prefix, - } - - // Unknown directive. - } else { - yaml_parser_set_scanner_error(parser, "while scanning a directive", - start_mark, "found unknown directive name") - return false - } - - // Eat the rest of the line including any comments. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - - for is_blank(parser.buffer, parser.buffer_pos) { - skip(parser) - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - - if parser.buffer[parser.buffer_pos] == '#' { - // [Go] Discard this inline comment for the time being. - //if !yaml_parser_scan_line_comment(parser, start_mark) { - // return false - //} - for !is_breakz(parser.buffer, parser.buffer_pos) { - skip(parser) - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - } - - // Check if we are at the end of the line. - if !is_breakz(parser.buffer, parser.buffer_pos) { - yaml_parser_set_scanner_error(parser, "while scanning a directive", - start_mark, "did not find expected comment or line break") - return false - } - - // Eat a line break. - if is_break(parser.buffer, parser.buffer_pos) { - if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { - return false - } - skip_line(parser) - } - - return true -} - -// Scan the directive name. -// -// Scope: -// %YAML 1.1 # a comment \n -// ^^^^ -// %TAG !yaml! tag:yaml.org,2002: \n -// ^^^ -// -func yaml_parser_scan_directive_name(parser *yaml_parser_t, start_mark yaml_mark_t, name *[]byte) bool { - // Consume the directive name. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - - var s []byte - for is_alpha(parser.buffer, parser.buffer_pos) { - s = read(parser, s) - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - - // Check if the name is empty. - if len(s) == 0 { - yaml_parser_set_scanner_error(parser, "while scanning a directive", - start_mark, "could not find expected directive name") - return false - } - - // Check for an blank character after the name. - if !is_blankz(parser.buffer, parser.buffer_pos) { - yaml_parser_set_scanner_error(parser, "while scanning a directive", - start_mark, "found unexpected non-alphabetical character") - return false - } - *name = s - return true -} - -// Scan the value of VERSION-DIRECTIVE. -// -// Scope: -// %YAML 1.1 # a comment \n -// ^^^^^^ -func yaml_parser_scan_version_directive_value(parser *yaml_parser_t, start_mark yaml_mark_t, major, minor *int8) bool { - // Eat whitespaces. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - for is_blank(parser.buffer, parser.buffer_pos) { - skip(parser) - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - - // Consume the major version number. - if !yaml_parser_scan_version_directive_number(parser, start_mark, major) { - return false - } - - // Eat '.'. - if parser.buffer[parser.buffer_pos] != '.' { - return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive", - start_mark, "did not find expected digit or '.' character") - } - - skip(parser) - - // Consume the minor version number. - if !yaml_parser_scan_version_directive_number(parser, start_mark, minor) { - return false - } - return true -} - -const max_number_length = 2 - -// Scan the version number of VERSION-DIRECTIVE. -// -// Scope: -// %YAML 1.1 # a comment \n -// ^ -// %YAML 1.1 # a comment \n -// ^ -func yaml_parser_scan_version_directive_number(parser *yaml_parser_t, start_mark yaml_mark_t, number *int8) bool { - - // Repeat while the next character is digit. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - var value, length int8 - for is_digit(parser.buffer, parser.buffer_pos) { - // Check if the number is too long. - length++ - if length > max_number_length { - return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive", - start_mark, "found extremely long version number") - } - value = value*10 + int8(as_digit(parser.buffer, parser.buffer_pos)) - skip(parser) - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - - // Check if the number was present. - if length == 0 { - return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive", - start_mark, "did not find expected version number") - } - *number = value - return true -} - -// Scan the value of a TAG-DIRECTIVE token. -// -// Scope: -// %TAG !yaml! tag:yaml.org,2002: \n -// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -// -func yaml_parser_scan_tag_directive_value(parser *yaml_parser_t, start_mark yaml_mark_t, handle, prefix *[]byte) bool { - var handle_value, prefix_value []byte - - // Eat whitespaces. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - - for is_blank(parser.buffer, parser.buffer_pos) { - skip(parser) - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - - // Scan a handle. - if !yaml_parser_scan_tag_handle(parser, true, start_mark, &handle_value) { - return false - } - - // Expect a whitespace. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - if !is_blank(parser.buffer, parser.buffer_pos) { - yaml_parser_set_scanner_error(parser, "while scanning a %TAG directive", - start_mark, "did not find expected whitespace") - return false - } - - // Eat whitespaces. - for is_blank(parser.buffer, parser.buffer_pos) { - skip(parser) - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - - // Scan a prefix. - if !yaml_parser_scan_tag_uri(parser, true, nil, start_mark, &prefix_value) { - return false - } - - // Expect a whitespace or line break. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - if !is_blankz(parser.buffer, parser.buffer_pos) { - yaml_parser_set_scanner_error(parser, "while scanning a %TAG directive", - start_mark, "did not find expected whitespace or line break") - return false - } - - *handle = handle_value - *prefix = prefix_value - return true -} - -func yaml_parser_scan_anchor(parser *yaml_parser_t, token *yaml_token_t, typ yaml_token_type_t) bool { - var s []byte - - // Eat the indicator character. - start_mark := parser.mark - skip(parser) - - // Consume the value. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - - for is_alpha(parser.buffer, parser.buffer_pos) { - s = read(parser, s) - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - - end_mark := parser.mark - - /* - * Check if length of the anchor is greater than 0 and it is followed by - * a whitespace character or one of the indicators: - * - * '?', ':', ',', ']', '}', '%', '@', '`'. - */ - - if len(s) == 0 || - !(is_blankz(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == '?' || - parser.buffer[parser.buffer_pos] == ':' || parser.buffer[parser.buffer_pos] == ',' || - parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '}' || - parser.buffer[parser.buffer_pos] == '%' || parser.buffer[parser.buffer_pos] == '@' || - parser.buffer[parser.buffer_pos] == '`') { - context := "while scanning an alias" - if typ == yaml_ANCHOR_TOKEN { - context = "while scanning an anchor" - } - yaml_parser_set_scanner_error(parser, context, start_mark, - "did not find expected alphabetic or numeric character") - return false - } - - // Create a token. - *token = yaml_token_t{ - typ: typ, - start_mark: start_mark, - end_mark: end_mark, - value: s, - } - - return true -} - -/* - * Scan a TAG token. - */ - -func yaml_parser_scan_tag(parser *yaml_parser_t, token *yaml_token_t) bool { - var handle, suffix []byte - - start_mark := parser.mark - - // Check if the tag is in the canonical form. - if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { - return false - } - - if parser.buffer[parser.buffer_pos+1] == '<' { - // Keep the handle as '' - - // Eat '!<' - skip(parser) - skip(parser) - - // Consume the tag value. - if !yaml_parser_scan_tag_uri(parser, false, nil, start_mark, &suffix) { - return false - } - - // Check for '>' and eat it. - if parser.buffer[parser.buffer_pos] != '>' { - yaml_parser_set_scanner_error(parser, "while scanning a tag", - start_mark, "did not find the expected '>'") - return false - } - - skip(parser) - } else { - // The tag has either the '!suffix' or the '!handle!suffix' form. - - // First, try to scan a handle. - if !yaml_parser_scan_tag_handle(parser, false, start_mark, &handle) { - return false - } - - // Check if it is, indeed, handle. - if handle[0] == '!' && len(handle) > 1 && handle[len(handle)-1] == '!' { - // Scan the suffix now. - if !yaml_parser_scan_tag_uri(parser, false, nil, start_mark, &suffix) { - return false - } - } else { - // It wasn't a handle after all. Scan the rest of the tag. - if !yaml_parser_scan_tag_uri(parser, false, handle, start_mark, &suffix) { - return false - } - - // Set the handle to '!'. - handle = []byte{'!'} - - // A special case: the '!' tag. Set the handle to '' and the - // suffix to '!'. - if len(suffix) == 0 { - handle, suffix = suffix, handle - } - } - } - - // Check the character which ends the tag. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - if !is_blankz(parser.buffer, parser.buffer_pos) { - yaml_parser_set_scanner_error(parser, "while scanning a tag", - start_mark, "did not find expected whitespace or line break") - return false - } - - end_mark := parser.mark - - // Create a token. - *token = yaml_token_t{ - typ: yaml_TAG_TOKEN, - start_mark: start_mark, - end_mark: end_mark, - value: handle, - suffix: suffix, - } - return true -} - -// Scan a tag handle. -func yaml_parser_scan_tag_handle(parser *yaml_parser_t, directive bool, start_mark yaml_mark_t, handle *[]byte) bool { - // Check the initial '!' character. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - if parser.buffer[parser.buffer_pos] != '!' { - yaml_parser_set_scanner_tag_error(parser, directive, - start_mark, "did not find expected '!'") - return false - } - - var s []byte - - // Copy the '!' character. - s = read(parser, s) - - // Copy all subsequent alphabetical and numerical characters. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - for is_alpha(parser.buffer, parser.buffer_pos) { - s = read(parser, s) - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - - // Check if the trailing character is '!' and copy it. - if parser.buffer[parser.buffer_pos] == '!' { - s = read(parser, s) - } else { - // It's either the '!' tag or not really a tag handle. If it's a %TAG - // directive, it's an error. If it's a tag token, it must be a part of URI. - if directive && string(s) != "!" { - yaml_parser_set_scanner_tag_error(parser, directive, - start_mark, "did not find expected '!'") - return false - } - } - - *handle = s - return true -} - -// Scan a tag. -func yaml_parser_scan_tag_uri(parser *yaml_parser_t, directive bool, head []byte, start_mark yaml_mark_t, uri *[]byte) bool { - //size_t length = head ? strlen((char *)head) : 0 - var s []byte - hasTag := len(head) > 0 - - // Copy the head if needed. - // - // Note that we don't copy the leading '!' character. - if len(head) > 1 { - s = append(s, head[1:]...) - } - - // Scan the tag. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - - // The set of characters that may appear in URI is as follows: - // - // '0'-'9', 'A'-'Z', 'a'-'z', '_', '-', ';', '/', '?', ':', '@', '&', - // '=', '+', '$', ',', '.', '!', '~', '*', '\'', '(', ')', '[', ']', - // '%'. - // [Go] TODO Convert this into more reasonable logic. - for is_alpha(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == ';' || - parser.buffer[parser.buffer_pos] == '/' || parser.buffer[parser.buffer_pos] == '?' || - parser.buffer[parser.buffer_pos] == ':' || parser.buffer[parser.buffer_pos] == '@' || - parser.buffer[parser.buffer_pos] == '&' || parser.buffer[parser.buffer_pos] == '=' || - parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '$' || - parser.buffer[parser.buffer_pos] == ',' || parser.buffer[parser.buffer_pos] == '.' || - parser.buffer[parser.buffer_pos] == '!' || parser.buffer[parser.buffer_pos] == '~' || - parser.buffer[parser.buffer_pos] == '*' || parser.buffer[parser.buffer_pos] == '\'' || - parser.buffer[parser.buffer_pos] == '(' || parser.buffer[parser.buffer_pos] == ')' || - parser.buffer[parser.buffer_pos] == '[' || parser.buffer[parser.buffer_pos] == ']' || - parser.buffer[parser.buffer_pos] == '%' { - // Check if it is a URI-escape sequence. - if parser.buffer[parser.buffer_pos] == '%' { - if !yaml_parser_scan_uri_escapes(parser, directive, start_mark, &s) { - return false - } - } else { - s = read(parser, s) - } - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - hasTag = true - } - - if !hasTag { - yaml_parser_set_scanner_tag_error(parser, directive, - start_mark, "did not find expected tag URI") - return false - } - *uri = s - return true -} - -// Decode an URI-escape sequence corresponding to a single UTF-8 character. -func yaml_parser_scan_uri_escapes(parser *yaml_parser_t, directive bool, start_mark yaml_mark_t, s *[]byte) bool { - - // Decode the required number of characters. - w := 1024 - for w > 0 { - // Check for a URI-escaped octet. - if parser.unread < 3 && !yaml_parser_update_buffer(parser, 3) { - return false - } - - if !(parser.buffer[parser.buffer_pos] == '%' && - is_hex(parser.buffer, parser.buffer_pos+1) && - is_hex(parser.buffer, parser.buffer_pos+2)) { - return yaml_parser_set_scanner_tag_error(parser, directive, - start_mark, "did not find URI escaped octet") - } - - // Get the octet. - octet := byte((as_hex(parser.buffer, parser.buffer_pos+1) << 4) + as_hex(parser.buffer, parser.buffer_pos+2)) - - // If it is the leading octet, determine the length of the UTF-8 sequence. - if w == 1024 { - w = width(octet) - if w == 0 { - return yaml_parser_set_scanner_tag_error(parser, directive, - start_mark, "found an incorrect leading UTF-8 octet") - } - } else { - // Check if the trailing octet is correct. - if octet&0xC0 != 0x80 { - return yaml_parser_set_scanner_tag_error(parser, directive, - start_mark, "found an incorrect trailing UTF-8 octet") - } - } - - // Copy the octet and move the pointers. - *s = append(*s, octet) - skip(parser) - skip(parser) - skip(parser) - w-- - } - return true -} - -// Scan a block scalar. -func yaml_parser_scan_block_scalar(parser *yaml_parser_t, token *yaml_token_t, literal bool) bool { - // Eat the indicator '|' or '>'. - start_mark := parser.mark - skip(parser) - - // Scan the additional block scalar indicators. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - - // Check for a chomping indicator. - var chomping, increment int - if parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '-' { - // Set the chomping method and eat the indicator. - if parser.buffer[parser.buffer_pos] == '+' { - chomping = +1 - } else { - chomping = -1 - } - skip(parser) - - // Check for an indentation indicator. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - if is_digit(parser.buffer, parser.buffer_pos) { - // Check that the indentation is greater than 0. - if parser.buffer[parser.buffer_pos] == '0' { - yaml_parser_set_scanner_error(parser, "while scanning a block scalar", - start_mark, "found an indentation indicator equal to 0") - return false - } - - // Get the indentation level and eat the indicator. - increment = as_digit(parser.buffer, parser.buffer_pos) - skip(parser) - } - - } else if is_digit(parser.buffer, parser.buffer_pos) { - // Do the same as above, but in the opposite order. - - if parser.buffer[parser.buffer_pos] == '0' { - yaml_parser_set_scanner_error(parser, "while scanning a block scalar", - start_mark, "found an indentation indicator equal to 0") - return false - } - increment = as_digit(parser.buffer, parser.buffer_pos) - skip(parser) - - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - if parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '-' { - if parser.buffer[parser.buffer_pos] == '+' { - chomping = +1 - } else { - chomping = -1 - } - skip(parser) - } - } - - // Eat whitespaces and comments to the end of the line. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - for is_blank(parser.buffer, parser.buffer_pos) { - skip(parser) - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - if parser.buffer[parser.buffer_pos] == '#' { - if !yaml_parser_scan_line_comment(parser, start_mark) { - return false - } - for !is_breakz(parser.buffer, parser.buffer_pos) { - skip(parser) - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - } - - // Check if we are at the end of the line. - if !is_breakz(parser.buffer, parser.buffer_pos) { - yaml_parser_set_scanner_error(parser, "while scanning a block scalar", - start_mark, "did not find expected comment or line break") - return false - } - - // Eat a line break. - if is_break(parser.buffer, parser.buffer_pos) { - if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { - return false - } - skip_line(parser) - } - - end_mark := parser.mark - - // Set the indentation level if it was specified. - var indent int - if increment > 0 { - if parser.indent >= 0 { - indent = parser.indent + increment - } else { - indent = increment - } - } - - // Scan the leading line breaks and determine the indentation level if needed. - var s, leading_break, trailing_breaks []byte - if !yaml_parser_scan_block_scalar_breaks(parser, &indent, &trailing_breaks, start_mark, &end_mark) { - return false - } - - // Scan the block scalar content. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - var leading_blank, trailing_blank bool - for parser.mark.column == indent && !is_z(parser.buffer, parser.buffer_pos) { - // We are at the beginning of a non-empty line. - - // Is it a trailing whitespace? - trailing_blank = is_blank(parser.buffer, parser.buffer_pos) - - // Check if we need to fold the leading line break. - if !literal && !leading_blank && !trailing_blank && len(leading_break) > 0 && leading_break[0] == '\n' { - // Do we need to join the lines by space? - if len(trailing_breaks) == 0 { - s = append(s, ' ') - } - } else { - s = append(s, leading_break...) - } - leading_break = leading_break[:0] - - // Append the remaining line breaks. - s = append(s, trailing_breaks...) - trailing_breaks = trailing_breaks[:0] - - // Is it a leading whitespace? - leading_blank = is_blank(parser.buffer, parser.buffer_pos) - - // Consume the current line. - for !is_breakz(parser.buffer, parser.buffer_pos) { - s = read(parser, s) - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - - // Consume the line break. - if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { - return false - } - - leading_break = read_line(parser, leading_break) - - // Eat the following indentation spaces and line breaks. - if !yaml_parser_scan_block_scalar_breaks(parser, &indent, &trailing_breaks, start_mark, &end_mark) { - return false - } - } - - // Chomp the tail. - if chomping != -1 { - s = append(s, leading_break...) - } - if chomping == 1 { - s = append(s, trailing_breaks...) - } - - // Create a token. - *token = yaml_token_t{ - typ: yaml_SCALAR_TOKEN, - start_mark: start_mark, - end_mark: end_mark, - value: s, - style: yaml_LITERAL_SCALAR_STYLE, - } - if !literal { - token.style = yaml_FOLDED_SCALAR_STYLE - } - return true -} - -// Scan indentation spaces and line breaks for a block scalar. Determine the -// indentation level if needed. -func yaml_parser_scan_block_scalar_breaks(parser *yaml_parser_t, indent *int, breaks *[]byte, start_mark yaml_mark_t, end_mark *yaml_mark_t) bool { - *end_mark = parser.mark - - // Eat the indentation spaces and line breaks. - max_indent := 0 - for { - // Eat the indentation spaces. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - for (*indent == 0 || parser.mark.column < *indent) && is_space(parser.buffer, parser.buffer_pos) { - skip(parser) - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - if parser.mark.column > max_indent { - max_indent = parser.mark.column - } - - // Check for a tab character messing the indentation. - if (*indent == 0 || parser.mark.column < *indent) && is_tab(parser.buffer, parser.buffer_pos) { - return yaml_parser_set_scanner_error(parser, "while scanning a block scalar", - start_mark, "found a tab character where an indentation space is expected") - } - - // Have we found a non-empty line? - if !is_break(parser.buffer, parser.buffer_pos) { - break - } - - // Consume the line break. - if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { - return false - } - // [Go] Should really be returning breaks instead. - *breaks = read_line(parser, *breaks) - *end_mark = parser.mark - } - - // Determine the indentation level if needed. - if *indent == 0 { - *indent = max_indent - if *indent < parser.indent+1 { - *indent = parser.indent + 1 - } - if *indent < 1 { - *indent = 1 - } - } - return true -} - -// Scan a quoted scalar. -func yaml_parser_scan_flow_scalar(parser *yaml_parser_t, token *yaml_token_t, single bool) bool { - // Eat the left quote. - start_mark := parser.mark - skip(parser) - - // Consume the content of the quoted scalar. - var s, leading_break, trailing_breaks, whitespaces []byte - for { - // Check that there are no document indicators at the beginning of the line. - if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) { - return false - } - - if parser.mark.column == 0 && - ((parser.buffer[parser.buffer_pos+0] == '-' && - parser.buffer[parser.buffer_pos+1] == '-' && - parser.buffer[parser.buffer_pos+2] == '-') || - (parser.buffer[parser.buffer_pos+0] == '.' && - parser.buffer[parser.buffer_pos+1] == '.' && - parser.buffer[parser.buffer_pos+2] == '.')) && - is_blankz(parser.buffer, parser.buffer_pos+3) { - yaml_parser_set_scanner_error(parser, "while scanning a quoted scalar", - start_mark, "found unexpected document indicator") - return false - } - - // Check for EOF. - if is_z(parser.buffer, parser.buffer_pos) { - yaml_parser_set_scanner_error(parser, "while scanning a quoted scalar", - start_mark, "found unexpected end of stream") - return false - } - - // Consume non-blank characters. - leading_blanks := false - for !is_blankz(parser.buffer, parser.buffer_pos) { - if single && parser.buffer[parser.buffer_pos] == '\'' && parser.buffer[parser.buffer_pos+1] == '\'' { - // Is is an escaped single quote. - s = append(s, '\'') - skip(parser) - skip(parser) - - } else if single && parser.buffer[parser.buffer_pos] == '\'' { - // It is a right single quote. - break - } else if !single && parser.buffer[parser.buffer_pos] == '"' { - // It is a right double quote. - break - - } else if !single && parser.buffer[parser.buffer_pos] == '\\' && is_break(parser.buffer, parser.buffer_pos+1) { - // It is an escaped line break. - if parser.unread < 3 && !yaml_parser_update_buffer(parser, 3) { - return false - } - skip(parser) - skip_line(parser) - leading_blanks = true - break - - } else if !single && parser.buffer[parser.buffer_pos] == '\\' { - // It is an escape sequence. - code_length := 0 - - // Check the escape character. - switch parser.buffer[parser.buffer_pos+1] { - case '0': - s = append(s, 0) - case 'a': - s = append(s, '\x07') - case 'b': - s = append(s, '\x08') - case 't', '\t': - s = append(s, '\x09') - case 'n': - s = append(s, '\x0A') - case 'v': - s = append(s, '\x0B') - case 'f': - s = append(s, '\x0C') - case 'r': - s = append(s, '\x0D') - case 'e': - s = append(s, '\x1B') - case ' ': - s = append(s, '\x20') - case '"': - s = append(s, '"') - case '\'': - s = append(s, '\'') - case '\\': - s = append(s, '\\') - case 'N': // NEL (#x85) - s = append(s, '\xC2') - s = append(s, '\x85') - case '_': // #xA0 - s = append(s, '\xC2') - s = append(s, '\xA0') - case 'L': // LS (#x2028) - s = append(s, '\xE2') - s = append(s, '\x80') - s = append(s, '\xA8') - case 'P': // PS (#x2029) - s = append(s, '\xE2') - s = append(s, '\x80') - s = append(s, '\xA9') - case 'x': - code_length = 2 - case 'u': - code_length = 4 - case 'U': - code_length = 8 - default: - yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar", - start_mark, "found unknown escape character") - return false - } - - skip(parser) - skip(parser) - - // Consume an arbitrary escape code. - if code_length > 0 { - var value int - - // Scan the character value. - if parser.unread < code_length && !yaml_parser_update_buffer(parser, code_length) { - return false - } - for k := 0; k < code_length; k++ { - if !is_hex(parser.buffer, parser.buffer_pos+k) { - yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar", - start_mark, "did not find expected hexdecimal number") - return false - } - value = (value << 4) + as_hex(parser.buffer, parser.buffer_pos+k) - } - - // Check the value and write the character. - if (value >= 0xD800 && value <= 0xDFFF) || value > 0x10FFFF { - yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar", - start_mark, "found invalid Unicode character escape code") - return false - } - if value <= 0x7F { - s = append(s, byte(value)) - } else if value <= 0x7FF { - s = append(s, byte(0xC0+(value>>6))) - s = append(s, byte(0x80+(value&0x3F))) - } else if value <= 0xFFFF { - s = append(s, byte(0xE0+(value>>12))) - s = append(s, byte(0x80+((value>>6)&0x3F))) - s = append(s, byte(0x80+(value&0x3F))) - } else { - s = append(s, byte(0xF0+(value>>18))) - s = append(s, byte(0x80+((value>>12)&0x3F))) - s = append(s, byte(0x80+((value>>6)&0x3F))) - s = append(s, byte(0x80+(value&0x3F))) - } - - // Advance the pointer. - for k := 0; k < code_length; k++ { - skip(parser) - } - } - } else { - // It is a non-escaped non-blank character. - s = read(parser, s) - } - if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { - return false - } - } - - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - - // Check if we are at the end of the scalar. - if single { - if parser.buffer[parser.buffer_pos] == '\'' { - break - } - } else { - if parser.buffer[parser.buffer_pos] == '"' { - break - } - } - - // Consume blank characters. - for is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos) { - if is_blank(parser.buffer, parser.buffer_pos) { - // Consume a space or a tab character. - if !leading_blanks { - whitespaces = read(parser, whitespaces) - } else { - skip(parser) - } - } else { - if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { - return false - } - - // Check if it is a first line break. - if !leading_blanks { - whitespaces = whitespaces[:0] - leading_break = read_line(parser, leading_break) - leading_blanks = true - } else { - trailing_breaks = read_line(parser, trailing_breaks) - } - } - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - - // Join the whitespaces or fold line breaks. - if leading_blanks { - // Do we need to fold line breaks? - if len(leading_break) > 0 && leading_break[0] == '\n' { - if len(trailing_breaks) == 0 { - s = append(s, ' ') - } else { - s = append(s, trailing_breaks...) - } - } else { - s = append(s, leading_break...) - s = append(s, trailing_breaks...) - } - trailing_breaks = trailing_breaks[:0] - leading_break = leading_break[:0] - } else { - s = append(s, whitespaces...) - whitespaces = whitespaces[:0] - } - } - - // Eat the right quote. - skip(parser) - end_mark := parser.mark - - // Create a token. - *token = yaml_token_t{ - typ: yaml_SCALAR_TOKEN, - start_mark: start_mark, - end_mark: end_mark, - value: s, - style: yaml_SINGLE_QUOTED_SCALAR_STYLE, - } - if !single { - token.style = yaml_DOUBLE_QUOTED_SCALAR_STYLE - } - return true -} - -// Scan a plain scalar. -func yaml_parser_scan_plain_scalar(parser *yaml_parser_t, token *yaml_token_t) bool { - - var s, leading_break, trailing_breaks, whitespaces []byte - var leading_blanks bool - var indent = parser.indent + 1 - - start_mark := parser.mark - end_mark := parser.mark - - // Consume the content of the plain scalar. - for { - // Check for a document indicator. - if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) { - return false - } - if parser.mark.column == 0 && - ((parser.buffer[parser.buffer_pos+0] == '-' && - parser.buffer[parser.buffer_pos+1] == '-' && - parser.buffer[parser.buffer_pos+2] == '-') || - (parser.buffer[parser.buffer_pos+0] == '.' && - parser.buffer[parser.buffer_pos+1] == '.' && - parser.buffer[parser.buffer_pos+2] == '.')) && - is_blankz(parser.buffer, parser.buffer_pos+3) { - break - } - - // Check for a comment. - if parser.buffer[parser.buffer_pos] == '#' { - break - } - - // Consume non-blank characters. - for !is_blankz(parser.buffer, parser.buffer_pos) { - - // Check for indicators that may end a plain scalar. - if (parser.buffer[parser.buffer_pos] == ':' && is_blankz(parser.buffer, parser.buffer_pos+1)) || - (parser.flow_level > 0 && - (parser.buffer[parser.buffer_pos] == ',' || - parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == '[' || - parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '{' || - parser.buffer[parser.buffer_pos] == '}')) { - break - } - - // Check if we need to join whitespaces and breaks. - if leading_blanks || len(whitespaces) > 0 { - if leading_blanks { - // Do we need to fold line breaks? - if leading_break[0] == '\n' { - if len(trailing_breaks) == 0 { - s = append(s, ' ') - } else { - s = append(s, trailing_breaks...) - } - } else { - s = append(s, leading_break...) - s = append(s, trailing_breaks...) - } - trailing_breaks = trailing_breaks[:0] - leading_break = leading_break[:0] - leading_blanks = false - } else { - s = append(s, whitespaces...) - whitespaces = whitespaces[:0] - } - } - - // Copy the character. - s = read(parser, s) - - end_mark = parser.mark - if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { - return false - } - } - - // Is it the end? - if !(is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos)) { - break - } - - // Consume blank characters. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - - for is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos) { - if is_blank(parser.buffer, parser.buffer_pos) { - - // Check for tab characters that abuse indentation. - if leading_blanks && parser.mark.column < indent && is_tab(parser.buffer, parser.buffer_pos) { - yaml_parser_set_scanner_error(parser, "while scanning a plain scalar", - start_mark, "found a tab character that violates indentation") - return false - } - - // Consume a space or a tab character. - if !leading_blanks { - whitespaces = read(parser, whitespaces) - } else { - skip(parser) - } - } else { - if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { - return false - } - - // Check if it is a first line break. - if !leading_blanks { - whitespaces = whitespaces[:0] - leading_break = read_line(parser, leading_break) - leading_blanks = true - } else { - trailing_breaks = read_line(parser, trailing_breaks) - } - } - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - - // Check indentation level. - if parser.flow_level == 0 && parser.mark.column < indent { - break - } - } - - // Create a token. - *token = yaml_token_t{ - typ: yaml_SCALAR_TOKEN, - start_mark: start_mark, - end_mark: end_mark, - value: s, - style: yaml_PLAIN_SCALAR_STYLE, - } - - // Note that we change the 'simple_key_allowed' flag. - if leading_blanks { - parser.simple_key_allowed = true - } - return true -} - -func yaml_parser_scan_line_comment(parser *yaml_parser_t, token_mark yaml_mark_t) bool { - if parser.newlines > 0 { - return true - } - - var start_mark yaml_mark_t - var text []byte - - for peek := 0; peek < 512; peek++ { - if parser.unread < peek+1 && !yaml_parser_update_buffer(parser, peek+1) { - break - } - if is_blank(parser.buffer, parser.buffer_pos+peek) { - continue - } - if parser.buffer[parser.buffer_pos+peek] == '#' { - seen := parser.mark.index+peek - for { - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - if is_breakz(parser.buffer, parser.buffer_pos) { - if parser.mark.index >= seen { - break - } - if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { - return false - } - skip_line(parser) - } else if parser.mark.index >= seen { - if len(text) == 0 { - start_mark = parser.mark - } - text = read(parser, text) - } else { - skip(parser) - } - } - } - break - } - if len(text) > 0 { - parser.comments = append(parser.comments, yaml_comment_t{ - token_mark: token_mark, - start_mark: start_mark, - line: text, - }) - } - return true -} - -func yaml_parser_scan_comments(parser *yaml_parser_t, scan_mark yaml_mark_t) bool { - token := parser.tokens[len(parser.tokens)-1] - - if token.typ == yaml_FLOW_ENTRY_TOKEN && len(parser.tokens) > 1 { - token = parser.tokens[len(parser.tokens)-2] - } - - var token_mark = token.start_mark - var start_mark yaml_mark_t - var next_indent = parser.indent - if next_indent < 0 { - next_indent = 0 - } - - var recent_empty = false - var first_empty = parser.newlines <= 1 - - var line = parser.mark.line - var column = parser.mark.column - - var text []byte - - // The foot line is the place where a comment must start to - // still be considered as a foot of the prior content. - // If there's some content in the currently parsed line, then - // the foot is the line below it. - var foot_line = -1 - if scan_mark.line > 0 { - foot_line = parser.mark.line-parser.newlines+1 - if parser.newlines == 0 && parser.mark.column > 1 { - foot_line++ - } - } - - var peek = 0 - for ; peek < 512; peek++ { - if parser.unread < peek+1 && !yaml_parser_update_buffer(parser, peek+1) { - break - } - column++ - if is_blank(parser.buffer, parser.buffer_pos+peek) { - continue - } - c := parser.buffer[parser.buffer_pos+peek] - var close_flow = parser.flow_level > 0 && (c == ']' || c == '}') - if close_flow || is_breakz(parser.buffer, parser.buffer_pos+peek) { - // Got line break or terminator. - if close_flow || !recent_empty { - if close_flow || first_empty && (start_mark.line == foot_line && token.typ != yaml_VALUE_TOKEN || start_mark.column-1 < next_indent) { - // This is the first empty line and there were no empty lines before, - // so this initial part of the comment is a foot of the prior token - // instead of being a head for the following one. Split it up. - // Alternatively, this might also be the last comment inside a flow - // scope, so it must be a footer. - if len(text) > 0 { - if start_mark.column-1 < next_indent { - // If dedented it's unrelated to the prior token. - token_mark = start_mark - } - parser.comments = append(parser.comments, yaml_comment_t{ - scan_mark: scan_mark, - token_mark: token_mark, - start_mark: start_mark, - end_mark: yaml_mark_t{parser.mark.index + peek, line, column}, - foot: text, - }) - scan_mark = yaml_mark_t{parser.mark.index + peek, line, column} - token_mark = scan_mark - text = nil - } - } else { - if len(text) > 0 && parser.buffer[parser.buffer_pos+peek] != 0 { - text = append(text, '\n') - } - } - } - if !is_break(parser.buffer, parser.buffer_pos+peek) { - break - } - first_empty = false - recent_empty = true - column = 0 - line++ - continue - } - - if len(text) > 0 && (close_flow || column-1 < next_indent && column != start_mark.column) { - // The comment at the different indentation is a foot of the - // preceding data rather than a head of the upcoming one. - parser.comments = append(parser.comments, yaml_comment_t{ - scan_mark: scan_mark, - token_mark: token_mark, - start_mark: start_mark, - end_mark: yaml_mark_t{parser.mark.index + peek, line, column}, - foot: text, - }) - scan_mark = yaml_mark_t{parser.mark.index + peek, line, column} - token_mark = scan_mark - text = nil - } - - if parser.buffer[parser.buffer_pos+peek] != '#' { - break - } - - if len(text) == 0 { - start_mark = yaml_mark_t{parser.mark.index + peek, line, column} - } else { - text = append(text, '\n') - } - - recent_empty = false - - // Consume until after the consumed comment line. - seen := parser.mark.index+peek - for { - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - if is_breakz(parser.buffer, parser.buffer_pos) { - if parser.mark.index >= seen { - break - } - if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { - return false - } - skip_line(parser) - } else if parser.mark.index >= seen { - text = read(parser, text) - } else { - skip(parser) - } - } - - peek = 0 - column = 0 - line = parser.mark.line - next_indent = parser.indent - if next_indent < 0 { - next_indent = 0 - } - } - - if len(text) > 0 { - parser.comments = append(parser.comments, yaml_comment_t{ - scan_mark: scan_mark, - token_mark: start_mark, - start_mark: start_mark, - end_mark: yaml_mark_t{parser.mark.index + peek - 1, line, column}, - head: text, - }) - } - return true -} diff --git a/src/vendor/sigs.k8s.io/kustomize/kyaml/internal/forked/github.com/go-yaml/yaml/sorter.go b/src/vendor/sigs.k8s.io/kustomize/kyaml/internal/forked/github.com/go-yaml/yaml/sorter.go deleted file mode 100644 index 9210ece7e..000000000 --- a/src/vendor/sigs.k8s.io/kustomize/kyaml/internal/forked/github.com/go-yaml/yaml/sorter.go +++ /dev/null @@ -1,134 +0,0 @@ -// -// Copyright (c) 2011-2019 Canonical Ltd -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package yaml - -import ( - "reflect" - "unicode" -) - -type keyList []reflect.Value - -func (l keyList) Len() int { return len(l) } -func (l keyList) Swap(i, j int) { l[i], l[j] = l[j], l[i] } -func (l keyList) Less(i, j int) bool { - a := l[i] - b := l[j] - ak := a.Kind() - bk := b.Kind() - for (ak == reflect.Interface || ak == reflect.Ptr) && !a.IsNil() { - a = a.Elem() - ak = a.Kind() - } - for (bk == reflect.Interface || bk == reflect.Ptr) && !b.IsNil() { - b = b.Elem() - bk = b.Kind() - } - af, aok := keyFloat(a) - bf, bok := keyFloat(b) - if aok && bok { - if af != bf { - return af < bf - } - if ak != bk { - return ak < bk - } - return numLess(a, b) - } - if ak != reflect.String || bk != reflect.String { - return ak < bk - } - ar, br := []rune(a.String()), []rune(b.String()) - digits := false - for i := 0; i < len(ar) && i < len(br); i++ { - if ar[i] == br[i] { - digits = unicode.IsDigit(ar[i]) - continue - } - al := unicode.IsLetter(ar[i]) - bl := unicode.IsLetter(br[i]) - if al && bl { - return ar[i] < br[i] - } - if al || bl { - if digits { - return al - } else { - return bl - } - } - var ai, bi int - var an, bn int64 - if ar[i] == '0' || br[i] == '0' { - for j := i - 1; j >= 0 && unicode.IsDigit(ar[j]); j-- { - if ar[j] != '0' { - an = 1 - bn = 1 - break - } - } - } - for ai = i; ai < len(ar) && unicode.IsDigit(ar[ai]); ai++ { - an = an*10 + int64(ar[ai]-'0') - } - for bi = i; bi < len(br) && unicode.IsDigit(br[bi]); bi++ { - bn = bn*10 + int64(br[bi]-'0') - } - if an != bn { - return an < bn - } - if ai != bi { - return ai < bi - } - return ar[i] < br[i] - } - return len(ar) < len(br) -} - -// keyFloat returns a float value for v if it is a number/bool -// and whether it is a number/bool or not. -func keyFloat(v reflect.Value) (f float64, ok bool) { - switch v.Kind() { - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - return float64(v.Int()), true - case reflect.Float32, reflect.Float64: - return v.Float(), true - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - return float64(v.Uint()), true - case reflect.Bool: - if v.Bool() { - return 1, true - } - return 0, true - } - return 0, false -} - -// numLess returns whether a < b. -// a and b must necessarily have the same kind. -func numLess(a, b reflect.Value) bool { - switch a.Kind() { - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - return a.Int() < b.Int() - case reflect.Float32, reflect.Float64: - return a.Float() < b.Float() - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - return a.Uint() < b.Uint() - case reflect.Bool: - return !a.Bool() && b.Bool() - } - panic("not a number") -} diff --git a/src/vendor/sigs.k8s.io/kustomize/kyaml/internal/forked/github.com/go-yaml/yaml/writerc.go b/src/vendor/sigs.k8s.io/kustomize/kyaml/internal/forked/github.com/go-yaml/yaml/writerc.go deleted file mode 100644 index b8a116bf9..000000000 --- a/src/vendor/sigs.k8s.io/kustomize/kyaml/internal/forked/github.com/go-yaml/yaml/writerc.go +++ /dev/null @@ -1,48 +0,0 @@ -// -// Copyright (c) 2011-2019 Canonical Ltd -// Copyright (c) 2006-2010 Kirill Simonov -// -// Permission is hereby granted, free of charge, to any person obtaining a copy of -// this software and associated documentation files (the "Software"), to deal in -// the Software without restriction, including without limitation the rights to -// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies -// of the Software, and to permit persons to whom the Software is furnished to do -// so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in all -// copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -// SOFTWARE. - -package yaml - -// Set the writer error and return false. -func yaml_emitter_set_writer_error(emitter *yaml_emitter_t, problem string) bool { - emitter.error = yaml_WRITER_ERROR - emitter.problem = problem - return false -} - -// Flush the output buffer. -func yaml_emitter_flush(emitter *yaml_emitter_t) bool { - if emitter.write_handler == nil { - panic("write handler not set") - } - - // Check if the buffer is empty. - if emitter.buffer_pos == 0 { - return true - } - - if err := emitter.write_handler(emitter, emitter.buffer[:emitter.buffer_pos]); err != nil { - return yaml_emitter_set_writer_error(emitter, "write error: "+err.Error()) - } - emitter.buffer_pos = 0 - return true -} diff --git a/src/vendor/sigs.k8s.io/kustomize/kyaml/internal/forked/github.com/go-yaml/yaml/yaml.go b/src/vendor/sigs.k8s.io/kustomize/kyaml/internal/forked/github.com/go-yaml/yaml/yaml.go deleted file mode 100644 index bb6418dba..000000000 --- a/src/vendor/sigs.k8s.io/kustomize/kyaml/internal/forked/github.com/go-yaml/yaml/yaml.go +++ /dev/null @@ -1,708 +0,0 @@ -// -// Copyright (c) 2011-2019 Canonical Ltd -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package yaml implements YAML support for the Go language. -// -// Source code and other details for the project are available at GitHub: -// -// https://github.com/go-yaml/yaml -// -package yaml - -import ( - "errors" - "fmt" - "io" - "reflect" - "strings" - "sync" - "unicode/utf8" -) - -// The Unmarshaler interface may be implemented by types to customize their -// behavior when being unmarshaled from a YAML document. -type Unmarshaler interface { - UnmarshalYAML(value *Node) error -} - -type obsoleteUnmarshaler interface { - UnmarshalYAML(unmarshal func(interface{}) error) error -} - -// The Marshaler interface may be implemented by types to customize their -// behavior when being marshaled into a YAML document. The returned value -// is marshaled in place of the original value implementing Marshaler. -// -// If an error is returned by MarshalYAML, the marshaling procedure stops -// and returns with the provided error. -type Marshaler interface { - MarshalYAML() (interface{}, error) -} - -// Unmarshal decodes the first document found within the in byte slice -// and assigns decoded values into the out value. -// -// Maps and pointers (to a struct, string, int, etc) are accepted as out -// values. If an internal pointer within a struct is not initialized, -// the yaml package will initialize it if necessary for unmarshalling -// the provided data. The out parameter must not be nil. -// -// The type of the decoded values should be compatible with the respective -// values in out. If one or more values cannot be decoded due to a type -// mismatches, decoding continues partially until the end of the YAML -// content, and a *yaml.TypeError is returned with details for all -// missed values. -// -// Struct fields are only unmarshalled if they are exported (have an -// upper case first letter), and are unmarshalled using the field name -// lowercased as the default key. Custom keys may be defined via the -// "yaml" name in the field tag: the content preceding the first comma -// is used as the key, and the following comma-separated options are -// used to tweak the marshalling process (see Marshal). -// Conflicting names result in a runtime error. -// -// For example: -// -// type T struct { -// F int `yaml:"a,omitempty"` -// B int -// } -// var t T -// yaml.Unmarshal([]byte("a: 1\nb: 2"), &t) -// -// See the documentation of Marshal for the format of tags and a list of -// supported tag options. -// -func Unmarshal(in []byte, out interface{}) (err error) { - return unmarshal(in, out, false) -} - -// A Decoder reads and decodes YAML values from an input stream. -type Decoder struct { - parser *parser - knownFields bool -} - -// NewDecoder returns a new decoder that reads from r. -// -// The decoder introduces its own buffering and may read -// data from r beyond the YAML values requested. -func NewDecoder(r io.Reader) *Decoder { - return &Decoder{ - parser: newParserFromReader(r), - } -} - -// KnownFields ensures that the keys in decoded mappings to -// exist as fields in the struct being decoded into. -func (dec *Decoder) KnownFields(enable bool) { - dec.knownFields = enable -} - -// Decode reads the next YAML-encoded value from its input -// and stores it in the value pointed to by v. -// -// See the documentation for Unmarshal for details about the -// conversion of YAML into a Go value. -func (dec *Decoder) Decode(v interface{}) (err error) { - d := newDecoder() - d.knownFields = dec.knownFields - defer handleErr(&err) - node := dec.parser.parse() - if node == nil { - return io.EOF - } - out := reflect.ValueOf(v) - if out.Kind() == reflect.Ptr && !out.IsNil() { - out = out.Elem() - } - d.unmarshal(node, out) - if len(d.terrors) > 0 { - return &TypeError{d.terrors} - } - return nil -} - -// Decode decodes the node and stores its data into the value pointed to by v. -// -// See the documentation for Unmarshal for details about the -// conversion of YAML into a Go value. -func (n *Node) Decode(v interface{}) (err error) { - d := newDecoder() - defer handleErr(&err) - out := reflect.ValueOf(v) - if out.Kind() == reflect.Ptr && !out.IsNil() { - out = out.Elem() - } - d.unmarshal(n, out) - if len(d.terrors) > 0 { - return &TypeError{d.terrors} - } - return nil -} - -func unmarshal(in []byte, out interface{}, strict bool) (err error) { - defer handleErr(&err) - d := newDecoder() - p := newParser(in) - defer p.destroy() - node := p.parse() - if node != nil { - v := reflect.ValueOf(out) - if v.Kind() == reflect.Ptr && !v.IsNil() { - v = v.Elem() - } - d.unmarshal(node, v) - } - if len(d.terrors) > 0 { - return &TypeError{d.terrors} - } - return nil -} - -// Marshal serializes the value provided into a YAML document. The structure -// of the generated document will reflect the structure of the value itself. -// Maps and pointers (to struct, string, int, etc) are accepted as the in value. -// -// Struct fields are only marshalled if they are exported (have an upper case -// first letter), and are marshalled using the field name lowercased as the -// default key. Custom keys may be defined via the "yaml" name in the field -// tag: the content preceding the first comma is used as the key, and the -// following comma-separated options are used to tweak the marshalling process. -// Conflicting names result in a runtime error. -// -// The field tag format accepted is: -// -// `(...) yaml:"[][,[,]]" (...)` -// -// The following flags are currently supported: -// -// omitempty Only include the field if it's not set to the zero -// value for the type or to empty slices or maps. -// Zero valued structs will be omitted if all their public -// fields are zero, unless they implement an IsZero -// method (see the IsZeroer interface type), in which -// case the field will be excluded if IsZero returns true. -// -// flow Marshal using a flow style (useful for structs, -// sequences and maps). -// -// inline Inline the field, which must be a struct or a map, -// causing all of its fields or keys to be processed as if -// they were part of the outer struct. For maps, keys must -// not conflict with the yaml keys of other struct fields. -// -// In addition, if the key is "-", the field is ignored. -// -// For example: -// -// type T struct { -// F int `yaml:"a,omitempty"` -// B int -// } -// yaml.Marshal(&T{B: 2}) // Returns "b: 2\n" -// yaml.Marshal(&T{F: 1}} // Returns "a: 1\nb: 0\n" -// -func Marshal(in interface{}) (out []byte, err error) { - defer handleErr(&err) - e := newEncoder() - defer e.destroy() - e.marshalDoc("", reflect.ValueOf(in)) - e.finish() - out = e.out - return -} - -// An Encoder writes YAML values to an output stream. -type Encoder struct { - encoder *encoder -} - -// NewEncoder returns a new encoder that writes to w. -// The Encoder should be closed after use to flush all data -// to w. -func NewEncoder(w io.Writer) *Encoder { - return &Encoder{ - encoder: newEncoderWithWriter(w), - } -} - -// Encode writes the YAML encoding of v to the stream. -// If multiple items are encoded to the stream, the -// second and subsequent document will be preceded -// with a "---" document separator, but the first will not. -// -// See the documentation for Marshal for details about the conversion of Go -// values to YAML. -func (e *Encoder) Encode(v interface{}) (err error) { - defer handleErr(&err) - e.encoder.marshalDoc("", reflect.ValueOf(v)) - return nil -} - -// Encode encodes value v and stores its representation in n. -// -// See the documentation for Marshal for details about the -// conversion of Go values into YAML. -func (n *Node) Encode(v interface{}) (err error) { - defer handleErr(&err) - e := newEncoder() - defer e.destroy() - e.marshalDoc("", reflect.ValueOf(v)) - e.finish() - p := newParser(e.out) - p.textless = true - defer p.destroy() - doc := p.parse() - *n = *doc.Content[0] - return nil -} - -// SetIndent changes the used indentation used when encoding. -func (e *Encoder) SetIndent(spaces int) { - if spaces < 0 { - panic("yaml: cannot indent to a negative number of spaces") - } - e.encoder.indent = spaces -} - -// CompactSeqIndent makes it so that '- ' is considered part of the indentation. -func (e *Encoder) CompactSeqIndent() { - e.encoder.emitter.compact_sequence_indent = true -} - -// DefaultSeqIndent makes it so that '- ' is not considered part of the indentation. -func (e *Encoder) DefaultSeqIndent() { - e.encoder.emitter.compact_sequence_indent = false -} - -// Close closes the encoder by writing any remaining data. -// It does not write a stream terminating string "...". -func (e *Encoder) Close() (err error) { - defer handleErr(&err) - e.encoder.finish() - return nil -} - -func handleErr(err *error) { - if v := recover(); v != nil { - if e, ok := v.(yamlError); ok { - *err = e.err - } else { - panic(v) - } - } -} - -type yamlError struct { - err error -} - -func fail(err error) { - panic(yamlError{err}) -} - -func failf(format string, args ...interface{}) { - panic(yamlError{fmt.Errorf("yaml: "+format, args...)}) -} - -// A TypeError is returned by Unmarshal when one or more fields in -// the YAML document cannot be properly decoded into the requested -// types. When this error is returned, the value is still -// unmarshaled partially. -type TypeError struct { - Errors []string -} - -func (e *TypeError) Error() string { - return fmt.Sprintf("yaml: unmarshal errors:\n %s", strings.Join(e.Errors, "\n ")) -} - -type Kind uint32 - -const ( - DocumentNode Kind = 1 << iota - SequenceNode - MappingNode - ScalarNode - AliasNode -) - -type Style uint32 - -const ( - TaggedStyle Style = 1 << iota - DoubleQuotedStyle - SingleQuotedStyle - LiteralStyle - FoldedStyle - FlowStyle -) - -// Node represents an element in the YAML document hierarchy. While documents -// are typically encoded and decoded into higher level types, such as structs -// and maps, Node is an intermediate representation that allows detailed -// control over the content being decoded or encoded. -// -// It's worth noting that although Node offers access into details such as -// line numbers, colums, and comments, the content when re-encoded will not -// have its original textual representation preserved. An effort is made to -// render the data plesantly, and to preserve comments near the data they -// describe, though. -// -// Values that make use of the Node type interact with the yaml package in the -// same way any other type would do, by encoding and decoding yaml data -// directly or indirectly into them. -// -// For example: -// -// var person struct { -// Name string -// Address yaml.Node -// } -// err := yaml.Unmarshal(data, &person) -// -// Or by itself: -// -// var person Node -// err := yaml.Unmarshal(data, &person) -// -type Node struct { - // Kind defines whether the node is a document, a mapping, a sequence, - // a scalar value, or an alias to another node. The specific data type of - // scalar nodes may be obtained via the ShortTag and LongTag methods. - Kind Kind - - // Style allows customizing the apperance of the node in the tree. - Style Style - - // Tag holds the YAML tag defining the data type for the value. - // When decoding, this field will always be set to the resolved tag, - // even when it wasn't explicitly provided in the YAML content. - // When encoding, if this field is unset the value type will be - // implied from the node properties, and if it is set, it will only - // be serialized into the representation if TaggedStyle is used or - // the implicit tag diverges from the provided one. - Tag string - - // Value holds the unescaped and unquoted represenation of the value. - Value string - - // Anchor holds the anchor name for this node, which allows aliases to point to it. - Anchor string - - // Alias holds the node that this alias points to. Only valid when Kind is AliasNode. - Alias *Node - - // Content holds contained nodes for documents, mappings, and sequences. - Content []*Node - - // HeadComment holds any comments in the lines preceding the node and - // not separated by an empty line. - HeadComment string - - // LineComment holds any comments at the end of the line where the node is in. - LineComment string - - // FootComment holds any comments following the node and before empty lines. - FootComment string - - // Line and Column hold the node position in the decoded YAML text. - // These fields are not respected when encoding the node. - Line int - Column int -} - -// IsZero returns whether the node has all of its fields unset. -func (n *Node) IsZero() bool { - return n.Kind == 0 && n.Style == 0 && n.Tag == "" && n.Value == "" && n.Anchor == "" && n.Alias == nil && n.Content == nil && - n.HeadComment == "" && n.LineComment == "" && n.FootComment == "" && n.Line == 0 && n.Column == 0 -} - - -// LongTag returns the long form of the tag that indicates the data type for -// the node. If the Tag field isn't explicitly defined, one will be computed -// based on the node properties. -func (n *Node) LongTag() string { - return longTag(n.ShortTag()) -} - -// ShortTag returns the short form of the YAML tag that indicates data type for -// the node. If the Tag field isn't explicitly defined, one will be computed -// based on the node properties. -func (n *Node) ShortTag() string { - if n.indicatedString() { - return strTag - } - if n.Tag == "" || n.Tag == "!" { - switch n.Kind { - case MappingNode: - return mapTag - case SequenceNode: - return seqTag - case AliasNode: - if n.Alias != nil { - return n.Alias.ShortTag() - } - case ScalarNode: - tag, _ := resolve("", n.Value) - return tag - case 0: - // Special case to make the zero value convenient. - if n.IsZero() { - return nullTag - } - } - return "" - } - return shortTag(n.Tag) -} - -func (n *Node) indicatedString() bool { - return n.Kind == ScalarNode && - (shortTag(n.Tag) == strTag || - (n.Tag == "" || n.Tag == "!") && n.Style&(SingleQuotedStyle|DoubleQuotedStyle|LiteralStyle|FoldedStyle) != 0) -} - -// SetString is a convenience function that sets the node to a string value -// and defines its style in a pleasant way depending on its content. -func (n *Node) SetString(s string) { - n.Kind = ScalarNode - if utf8.ValidString(s) { - n.Value = s - n.Tag = strTag - } else { - n.Value = encodeBase64(s) - n.Tag = binaryTag - } - if strings.Contains(n.Value, "\n") { - n.Style = LiteralStyle - } -} - -// -------------------------------------------------------------------------- -// Maintain a mapping of keys to structure field indexes - -// The code in this section was copied from mgo/bson. - -// structInfo holds details for the serialization of fields of -// a given struct. -type structInfo struct { - FieldsMap map[string]fieldInfo - FieldsList []fieldInfo - - // InlineMap is the number of the field in the struct that - // contains an ,inline map, or -1 if there's none. - InlineMap int - - // InlineUnmarshalers holds indexes to inlined fields that - // contain unmarshaler values. - InlineUnmarshalers [][]int -} - -type fieldInfo struct { - Key string - Num int - OmitEmpty bool - Flow bool - // Id holds the unique field identifier, so we can cheaply - // check for field duplicates without maintaining an extra map. - Id int - - // Inline holds the field index if the field is part of an inlined struct. - Inline []int -} - -var structMap = make(map[reflect.Type]*structInfo) -var fieldMapMutex sync.RWMutex -var unmarshalerType reflect.Type - -func init() { - var v Unmarshaler - unmarshalerType = reflect.ValueOf(&v).Elem().Type() -} - -func getStructInfo(st reflect.Type) (*structInfo, error) { - fieldMapMutex.RLock() - sinfo, found := structMap[st] - fieldMapMutex.RUnlock() - if found { - return sinfo, nil - } - - n := st.NumField() - fieldsMap := make(map[string]fieldInfo) - fieldsList := make([]fieldInfo, 0, n) - inlineMap := -1 - inlineUnmarshalers := [][]int(nil) - for i := 0; i != n; i++ { - field := st.Field(i) - if field.PkgPath != "" && !field.Anonymous { - continue // Private field - } - - info := fieldInfo{Num: i} - - tag := field.Tag.Get("yaml") - if tag == "" && strings.Index(string(field.Tag), ":") < 0 { - tag = string(field.Tag) - } - if tag == "-" { - continue - } - - inline := false - fields := strings.Split(tag, ",") - if len(fields) > 1 { - for _, flag := range fields[1:] { - switch flag { - case "omitempty": - info.OmitEmpty = true - case "flow": - info.Flow = true - case "inline": - inline = true - default: - return nil, errors.New(fmt.Sprintf("unsupported flag %q in tag %q of type %s", flag, tag, st)) - } - } - tag = fields[0] - } - - if inline { - switch field.Type.Kind() { - case reflect.Map: - if inlineMap >= 0 { - return nil, errors.New("multiple ,inline maps in struct " + st.String()) - } - if field.Type.Key() != reflect.TypeOf("") { - return nil, errors.New("option ,inline needs a map with string keys in struct " + st.String()) - } - inlineMap = info.Num - case reflect.Struct, reflect.Ptr: - ftype := field.Type - for ftype.Kind() == reflect.Ptr { - ftype = ftype.Elem() - } - if ftype.Kind() != reflect.Struct { - return nil, errors.New("option ,inline may only be used on a struct or map field") - } - if reflect.PtrTo(ftype).Implements(unmarshalerType) { - inlineUnmarshalers = append(inlineUnmarshalers, []int{i}) - } else { - sinfo, err := getStructInfo(ftype) - if err != nil { - return nil, err - } - for _, index := range sinfo.InlineUnmarshalers { - inlineUnmarshalers = append(inlineUnmarshalers, append([]int{i}, index...)) - } - for _, finfo := range sinfo.FieldsList { - if _, found := fieldsMap[finfo.Key]; found { - msg := "duplicated key '" + finfo.Key + "' in struct " + st.String() - return nil, errors.New(msg) - } - if finfo.Inline == nil { - finfo.Inline = []int{i, finfo.Num} - } else { - finfo.Inline = append([]int{i}, finfo.Inline...) - } - finfo.Id = len(fieldsList) - fieldsMap[finfo.Key] = finfo - fieldsList = append(fieldsList, finfo) - } - } - default: - return nil, errors.New("option ,inline may only be used on a struct or map field") - } - continue - } - - if tag != "" { - info.Key = tag - } else { - info.Key = strings.ToLower(field.Name) - } - - if _, found = fieldsMap[info.Key]; found { - msg := "duplicated key '" + info.Key + "' in struct " + st.String() - return nil, errors.New(msg) - } - - info.Id = len(fieldsList) - fieldsList = append(fieldsList, info) - fieldsMap[info.Key] = info - } - - sinfo = &structInfo{ - FieldsMap: fieldsMap, - FieldsList: fieldsList, - InlineMap: inlineMap, - InlineUnmarshalers: inlineUnmarshalers, - } - - fieldMapMutex.Lock() - structMap[st] = sinfo - fieldMapMutex.Unlock() - return sinfo, nil -} - -// IsZeroer is used to check whether an object is zero to -// determine whether it should be omitted when marshaling -// with the omitempty flag. One notable implementation -// is time.Time. -type IsZeroer interface { - IsZero() bool -} - -func isZero(v reflect.Value) bool { - kind := v.Kind() - if z, ok := v.Interface().(IsZeroer); ok { - if (kind == reflect.Ptr || kind == reflect.Interface) && v.IsNil() { - return true - } - return z.IsZero() - } - switch kind { - case reflect.String: - return len(v.String()) == 0 - case reflect.Interface, reflect.Ptr: - return v.IsNil() - case reflect.Slice: - return v.Len() == 0 - case reflect.Map: - return v.Len() == 0 - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - return v.Int() == 0 - case reflect.Float32, reflect.Float64: - return v.Float() == 0 - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - return v.Uint() == 0 - case reflect.Bool: - return !v.Bool() - case reflect.Struct: - vt := v.Type() - for i := v.NumField() - 1; i >= 0; i-- { - if vt.Field(i).PkgPath != "" { - continue // Private field - } - if !isZero(v.Field(i)) { - return false - } - } - return true - } - return false -} diff --git a/src/vendor/sigs.k8s.io/kustomize/kyaml/internal/forked/github.com/go-yaml/yaml/yamlh.go b/src/vendor/sigs.k8s.io/kustomize/kyaml/internal/forked/github.com/go-yaml/yaml/yamlh.go deleted file mode 100644 index 40c74de49..000000000 --- a/src/vendor/sigs.k8s.io/kustomize/kyaml/internal/forked/github.com/go-yaml/yaml/yamlh.go +++ /dev/null @@ -1,809 +0,0 @@ -// -// Copyright (c) 2011-2019 Canonical Ltd -// Copyright (c) 2006-2010 Kirill Simonov -// -// Permission is hereby granted, free of charge, to any person obtaining a copy of -// this software and associated documentation files (the "Software"), to deal in -// the Software without restriction, including without limitation the rights to -// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies -// of the Software, and to permit persons to whom the Software is furnished to do -// so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in all -// copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -// SOFTWARE. - -package yaml - -import ( - "fmt" - "io" -) - -// The version directive data. -type yaml_version_directive_t struct { - major int8 // The major version number. - minor int8 // The minor version number. -} - -// The tag directive data. -type yaml_tag_directive_t struct { - handle []byte // The tag handle. - prefix []byte // The tag prefix. -} - -type yaml_encoding_t int - -// The stream encoding. -const ( - // Let the parser choose the encoding. - yaml_ANY_ENCODING yaml_encoding_t = iota - - yaml_UTF8_ENCODING // The default UTF-8 encoding. - yaml_UTF16LE_ENCODING // The UTF-16-LE encoding with BOM. - yaml_UTF16BE_ENCODING // The UTF-16-BE encoding with BOM. -) - -type yaml_break_t int - -// Line break types. -const ( - // Let the parser choose the break type. - yaml_ANY_BREAK yaml_break_t = iota - - yaml_CR_BREAK // Use CR for line breaks (Mac style). - yaml_LN_BREAK // Use LN for line breaks (Unix style). - yaml_CRLN_BREAK // Use CR LN for line breaks (DOS style). -) - -type yaml_error_type_t int - -// Many bad things could happen with the parser and emitter. -const ( - // No error is produced. - yaml_NO_ERROR yaml_error_type_t = iota - - yaml_MEMORY_ERROR // Cannot allocate or reallocate a block of memory. - yaml_READER_ERROR // Cannot read or decode the input stream. - yaml_SCANNER_ERROR // Cannot scan the input stream. - yaml_PARSER_ERROR // Cannot parse the input stream. - yaml_COMPOSER_ERROR // Cannot compose a YAML document. - yaml_WRITER_ERROR // Cannot write to the output stream. - yaml_EMITTER_ERROR // Cannot emit a YAML stream. -) - -// The pointer position. -type yaml_mark_t struct { - index int // The position index. - line int // The position line. - column int // The position column. -} - -// Node Styles - -type yaml_style_t int8 - -type yaml_scalar_style_t yaml_style_t - -// Scalar styles. -const ( - // Let the emitter choose the style. - yaml_ANY_SCALAR_STYLE yaml_scalar_style_t = 0 - - yaml_PLAIN_SCALAR_STYLE yaml_scalar_style_t = 1 << iota // The plain scalar style. - yaml_SINGLE_QUOTED_SCALAR_STYLE // The single-quoted scalar style. - yaml_DOUBLE_QUOTED_SCALAR_STYLE // The double-quoted scalar style. - yaml_LITERAL_SCALAR_STYLE // The literal scalar style. - yaml_FOLDED_SCALAR_STYLE // The folded scalar style. -) - -type yaml_sequence_style_t yaml_style_t - -// Sequence styles. -const ( - // Let the emitter choose the style. - yaml_ANY_SEQUENCE_STYLE yaml_sequence_style_t = iota - - yaml_BLOCK_SEQUENCE_STYLE // The block sequence style. - yaml_FLOW_SEQUENCE_STYLE // The flow sequence style. -) - -type yaml_mapping_style_t yaml_style_t - -// Mapping styles. -const ( - // Let the emitter choose the style. - yaml_ANY_MAPPING_STYLE yaml_mapping_style_t = iota - - yaml_BLOCK_MAPPING_STYLE // The block mapping style. - yaml_FLOW_MAPPING_STYLE // The flow mapping style. -) - -// Tokens - -type yaml_token_type_t int - -// Token types. -const ( - // An empty token. - yaml_NO_TOKEN yaml_token_type_t = iota - - yaml_STREAM_START_TOKEN // A STREAM-START token. - yaml_STREAM_END_TOKEN // A STREAM-END token. - - yaml_VERSION_DIRECTIVE_TOKEN // A VERSION-DIRECTIVE token. - yaml_TAG_DIRECTIVE_TOKEN // A TAG-DIRECTIVE token. - yaml_DOCUMENT_START_TOKEN // A DOCUMENT-START token. - yaml_DOCUMENT_END_TOKEN // A DOCUMENT-END token. - - yaml_BLOCK_SEQUENCE_START_TOKEN // A BLOCK-SEQUENCE-START token. - yaml_BLOCK_MAPPING_START_TOKEN // A BLOCK-SEQUENCE-END token. - yaml_BLOCK_END_TOKEN // A BLOCK-END token. - - yaml_FLOW_SEQUENCE_START_TOKEN // A FLOW-SEQUENCE-START token. - yaml_FLOW_SEQUENCE_END_TOKEN // A FLOW-SEQUENCE-END token. - yaml_FLOW_MAPPING_START_TOKEN // A FLOW-MAPPING-START token. - yaml_FLOW_MAPPING_END_TOKEN // A FLOW-MAPPING-END token. - - yaml_BLOCK_ENTRY_TOKEN // A BLOCK-ENTRY token. - yaml_FLOW_ENTRY_TOKEN // A FLOW-ENTRY token. - yaml_KEY_TOKEN // A KEY token. - yaml_VALUE_TOKEN // A VALUE token. - - yaml_ALIAS_TOKEN // An ALIAS token. - yaml_ANCHOR_TOKEN // An ANCHOR token. - yaml_TAG_TOKEN // A TAG token. - yaml_SCALAR_TOKEN // A SCALAR token. -) - -func (tt yaml_token_type_t) String() string { - switch tt { - case yaml_NO_TOKEN: - return "yaml_NO_TOKEN" - case yaml_STREAM_START_TOKEN: - return "yaml_STREAM_START_TOKEN" - case yaml_STREAM_END_TOKEN: - return "yaml_STREAM_END_TOKEN" - case yaml_VERSION_DIRECTIVE_TOKEN: - return "yaml_VERSION_DIRECTIVE_TOKEN" - case yaml_TAG_DIRECTIVE_TOKEN: - return "yaml_TAG_DIRECTIVE_TOKEN" - case yaml_DOCUMENT_START_TOKEN: - return "yaml_DOCUMENT_START_TOKEN" - case yaml_DOCUMENT_END_TOKEN: - return "yaml_DOCUMENT_END_TOKEN" - case yaml_BLOCK_SEQUENCE_START_TOKEN: - return "yaml_BLOCK_SEQUENCE_START_TOKEN" - case yaml_BLOCK_MAPPING_START_TOKEN: - return "yaml_BLOCK_MAPPING_START_TOKEN" - case yaml_BLOCK_END_TOKEN: - return "yaml_BLOCK_END_TOKEN" - case yaml_FLOW_SEQUENCE_START_TOKEN: - return "yaml_FLOW_SEQUENCE_START_TOKEN" - case yaml_FLOW_SEQUENCE_END_TOKEN: - return "yaml_FLOW_SEQUENCE_END_TOKEN" - case yaml_FLOW_MAPPING_START_TOKEN: - return "yaml_FLOW_MAPPING_START_TOKEN" - case yaml_FLOW_MAPPING_END_TOKEN: - return "yaml_FLOW_MAPPING_END_TOKEN" - case yaml_BLOCK_ENTRY_TOKEN: - return "yaml_BLOCK_ENTRY_TOKEN" - case yaml_FLOW_ENTRY_TOKEN: - return "yaml_FLOW_ENTRY_TOKEN" - case yaml_KEY_TOKEN: - return "yaml_KEY_TOKEN" - case yaml_VALUE_TOKEN: - return "yaml_VALUE_TOKEN" - case yaml_ALIAS_TOKEN: - return "yaml_ALIAS_TOKEN" - case yaml_ANCHOR_TOKEN: - return "yaml_ANCHOR_TOKEN" - case yaml_TAG_TOKEN: - return "yaml_TAG_TOKEN" - case yaml_SCALAR_TOKEN: - return "yaml_SCALAR_TOKEN" - } - return "" -} - -// The token structure. -type yaml_token_t struct { - // The token type. - typ yaml_token_type_t - - // The start/end of the token. - start_mark, end_mark yaml_mark_t - - // The stream encoding (for yaml_STREAM_START_TOKEN). - encoding yaml_encoding_t - - // The alias/anchor/scalar value or tag/tag directive handle - // (for yaml_ALIAS_TOKEN, yaml_ANCHOR_TOKEN, yaml_SCALAR_TOKEN, yaml_TAG_TOKEN, yaml_TAG_DIRECTIVE_TOKEN). - value []byte - - // The tag suffix (for yaml_TAG_TOKEN). - suffix []byte - - // The tag directive prefix (for yaml_TAG_DIRECTIVE_TOKEN). - prefix []byte - - // The scalar style (for yaml_SCALAR_TOKEN). - style yaml_scalar_style_t - - // The version directive major/minor (for yaml_VERSION_DIRECTIVE_TOKEN). - major, minor int8 -} - -// Events - -type yaml_event_type_t int8 - -// Event types. -const ( - // An empty event. - yaml_NO_EVENT yaml_event_type_t = iota - - yaml_STREAM_START_EVENT // A STREAM-START event. - yaml_STREAM_END_EVENT // A STREAM-END event. - yaml_DOCUMENT_START_EVENT // A DOCUMENT-START event. - yaml_DOCUMENT_END_EVENT // A DOCUMENT-END event. - yaml_ALIAS_EVENT // An ALIAS event. - yaml_SCALAR_EVENT // A SCALAR event. - yaml_SEQUENCE_START_EVENT // A SEQUENCE-START event. - yaml_SEQUENCE_END_EVENT // A SEQUENCE-END event. - yaml_MAPPING_START_EVENT // A MAPPING-START event. - yaml_MAPPING_END_EVENT // A MAPPING-END event. - yaml_TAIL_COMMENT_EVENT -) - -var eventStrings = []string{ - yaml_NO_EVENT: "none", - yaml_STREAM_START_EVENT: "stream start", - yaml_STREAM_END_EVENT: "stream end", - yaml_DOCUMENT_START_EVENT: "document start", - yaml_DOCUMENT_END_EVENT: "document end", - yaml_ALIAS_EVENT: "alias", - yaml_SCALAR_EVENT: "scalar", - yaml_SEQUENCE_START_EVENT: "sequence start", - yaml_SEQUENCE_END_EVENT: "sequence end", - yaml_MAPPING_START_EVENT: "mapping start", - yaml_MAPPING_END_EVENT: "mapping end", - yaml_TAIL_COMMENT_EVENT: "tail comment", -} - -func (e yaml_event_type_t) String() string { - if e < 0 || int(e) >= len(eventStrings) { - return fmt.Sprintf("unknown event %d", e) - } - return eventStrings[e] -} - -// The event structure. -type yaml_event_t struct { - - // The event type. - typ yaml_event_type_t - - // The start and end of the event. - start_mark, end_mark yaml_mark_t - - // The document encoding (for yaml_STREAM_START_EVENT). - encoding yaml_encoding_t - - // The version directive (for yaml_DOCUMENT_START_EVENT). - version_directive *yaml_version_directive_t - - // The list of tag directives (for yaml_DOCUMENT_START_EVENT). - tag_directives []yaml_tag_directive_t - - // The comments - head_comment []byte - line_comment []byte - foot_comment []byte - tail_comment []byte - - // The anchor (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT, yaml_ALIAS_EVENT). - anchor []byte - - // The tag (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT). - tag []byte - - // The scalar value (for yaml_SCALAR_EVENT). - value []byte - - // Is the document start/end indicator implicit, or the tag optional? - // (for yaml_DOCUMENT_START_EVENT, yaml_DOCUMENT_END_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT, yaml_SCALAR_EVENT). - implicit bool - - // Is the tag optional for any non-plain style? (for yaml_SCALAR_EVENT). - quoted_implicit bool - - // The style (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT). - style yaml_style_t -} - -func (e *yaml_event_t) scalar_style() yaml_scalar_style_t { return yaml_scalar_style_t(e.style) } -func (e *yaml_event_t) sequence_style() yaml_sequence_style_t { return yaml_sequence_style_t(e.style) } -func (e *yaml_event_t) mapping_style() yaml_mapping_style_t { return yaml_mapping_style_t(e.style) } - -// Nodes - -const ( - yaml_NULL_TAG = "tag:yaml.org,2002:null" // The tag !!null with the only possible value: null. - yaml_BOOL_TAG = "tag:yaml.org,2002:bool" // The tag !!bool with the values: true and false. - yaml_STR_TAG = "tag:yaml.org,2002:str" // The tag !!str for string values. - yaml_INT_TAG = "tag:yaml.org,2002:int" // The tag !!int for integer values. - yaml_FLOAT_TAG = "tag:yaml.org,2002:float" // The tag !!float for float values. - yaml_TIMESTAMP_TAG = "tag:yaml.org,2002:timestamp" // The tag !!timestamp for date and time values. - - yaml_SEQ_TAG = "tag:yaml.org,2002:seq" // The tag !!seq is used to denote sequences. - yaml_MAP_TAG = "tag:yaml.org,2002:map" // The tag !!map is used to denote mapping. - - // Not in original libyaml. - yaml_BINARY_TAG = "tag:yaml.org,2002:binary" - yaml_MERGE_TAG = "tag:yaml.org,2002:merge" - - yaml_DEFAULT_SCALAR_TAG = yaml_STR_TAG // The default scalar tag is !!str. - yaml_DEFAULT_SEQUENCE_TAG = yaml_SEQ_TAG // The default sequence tag is !!seq. - yaml_DEFAULT_MAPPING_TAG = yaml_MAP_TAG // The default mapping tag is !!map. -) - -type yaml_node_type_t int - -// Node types. -const ( - // An empty node. - yaml_NO_NODE yaml_node_type_t = iota - - yaml_SCALAR_NODE // A scalar node. - yaml_SEQUENCE_NODE // A sequence node. - yaml_MAPPING_NODE // A mapping node. -) - -// An element of a sequence node. -type yaml_node_item_t int - -// An element of a mapping node. -type yaml_node_pair_t struct { - key int // The key of the element. - value int // The value of the element. -} - -// The node structure. -type yaml_node_t struct { - typ yaml_node_type_t // The node type. - tag []byte // The node tag. - - // The node data. - - // The scalar parameters (for yaml_SCALAR_NODE). - scalar struct { - value []byte // The scalar value. - length int // The length of the scalar value. - style yaml_scalar_style_t // The scalar style. - } - - // The sequence parameters (for YAML_SEQUENCE_NODE). - sequence struct { - items_data []yaml_node_item_t // The stack of sequence items. - style yaml_sequence_style_t // The sequence style. - } - - // The mapping parameters (for yaml_MAPPING_NODE). - mapping struct { - pairs_data []yaml_node_pair_t // The stack of mapping pairs (key, value). - pairs_start *yaml_node_pair_t // The beginning of the stack. - pairs_end *yaml_node_pair_t // The end of the stack. - pairs_top *yaml_node_pair_t // The top of the stack. - style yaml_mapping_style_t // The mapping style. - } - - start_mark yaml_mark_t // The beginning of the node. - end_mark yaml_mark_t // The end of the node. - -} - -// The document structure. -type yaml_document_t struct { - - // The document nodes. - nodes []yaml_node_t - - // The version directive. - version_directive *yaml_version_directive_t - - // The list of tag directives. - tag_directives_data []yaml_tag_directive_t - tag_directives_start int // The beginning of the tag directives list. - tag_directives_end int // The end of the tag directives list. - - start_implicit int // Is the document start indicator implicit? - end_implicit int // Is the document end indicator implicit? - - // The start/end of the document. - start_mark, end_mark yaml_mark_t -} - -// The prototype of a read handler. -// -// The read handler is called when the parser needs to read more bytes from the -// source. The handler should write not more than size bytes to the buffer. -// The number of written bytes should be set to the size_read variable. -// -// [in,out] data A pointer to an application data specified by -// yaml_parser_set_input(). -// [out] buffer The buffer to write the data from the source. -// [in] size The size of the buffer. -// [out] size_read The actual number of bytes read from the source. -// -// On success, the handler should return 1. If the handler failed, -// the returned value should be 0. On EOF, the handler should set the -// size_read to 0 and return 1. -type yaml_read_handler_t func(parser *yaml_parser_t, buffer []byte) (n int, err error) - -// This structure holds information about a potential simple key. -type yaml_simple_key_t struct { - possible bool // Is a simple key possible? - required bool // Is a simple key required? - token_number int // The number of the token. - mark yaml_mark_t // The position mark. -} - -// The states of the parser. -type yaml_parser_state_t int - -const ( - yaml_PARSE_STREAM_START_STATE yaml_parser_state_t = iota - - yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE // Expect the beginning of an implicit document. - yaml_PARSE_DOCUMENT_START_STATE // Expect DOCUMENT-START. - yaml_PARSE_DOCUMENT_CONTENT_STATE // Expect the content of a document. - yaml_PARSE_DOCUMENT_END_STATE // Expect DOCUMENT-END. - yaml_PARSE_BLOCK_NODE_STATE // Expect a block node. - yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE // Expect a block node or indentless sequence. - yaml_PARSE_FLOW_NODE_STATE // Expect a flow node. - yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE // Expect the first entry of a block sequence. - yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE // Expect an entry of a block sequence. - yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE // Expect an entry of an indentless sequence. - yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE // Expect the first key of a block mapping. - yaml_PARSE_BLOCK_MAPPING_KEY_STATE // Expect a block mapping key. - yaml_PARSE_BLOCK_MAPPING_VALUE_STATE // Expect a block mapping value. - yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE // Expect the first entry of a flow sequence. - yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE // Expect an entry of a flow sequence. - yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE // Expect a key of an ordered mapping. - yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE // Expect a value of an ordered mapping. - yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE // Expect the and of an ordered mapping entry. - yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE // Expect the first key of a flow mapping. - yaml_PARSE_FLOW_MAPPING_KEY_STATE // Expect a key of a flow mapping. - yaml_PARSE_FLOW_MAPPING_VALUE_STATE // Expect a value of a flow mapping. - yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE // Expect an empty value of a flow mapping. - yaml_PARSE_END_STATE // Expect nothing. -) - -func (ps yaml_parser_state_t) String() string { - switch ps { - case yaml_PARSE_STREAM_START_STATE: - return "yaml_PARSE_STREAM_START_STATE" - case yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE: - return "yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE" - case yaml_PARSE_DOCUMENT_START_STATE: - return "yaml_PARSE_DOCUMENT_START_STATE" - case yaml_PARSE_DOCUMENT_CONTENT_STATE: - return "yaml_PARSE_DOCUMENT_CONTENT_STATE" - case yaml_PARSE_DOCUMENT_END_STATE: - return "yaml_PARSE_DOCUMENT_END_STATE" - case yaml_PARSE_BLOCK_NODE_STATE: - return "yaml_PARSE_BLOCK_NODE_STATE" - case yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE: - return "yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE" - case yaml_PARSE_FLOW_NODE_STATE: - return "yaml_PARSE_FLOW_NODE_STATE" - case yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE: - return "yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE" - case yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE: - return "yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE" - case yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE: - return "yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE" - case yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE: - return "yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE" - case yaml_PARSE_BLOCK_MAPPING_KEY_STATE: - return "yaml_PARSE_BLOCK_MAPPING_KEY_STATE" - case yaml_PARSE_BLOCK_MAPPING_VALUE_STATE: - return "yaml_PARSE_BLOCK_MAPPING_VALUE_STATE" - case yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE: - return "yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE" - case yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE: - return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE" - case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE: - return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE" - case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE: - return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE" - case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE: - return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE" - case yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE: - return "yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE" - case yaml_PARSE_FLOW_MAPPING_KEY_STATE: - return "yaml_PARSE_FLOW_MAPPING_KEY_STATE" - case yaml_PARSE_FLOW_MAPPING_VALUE_STATE: - return "yaml_PARSE_FLOW_MAPPING_VALUE_STATE" - case yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE: - return "yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE" - case yaml_PARSE_END_STATE: - return "yaml_PARSE_END_STATE" - } - return "" -} - -// This structure holds aliases data. -type yaml_alias_data_t struct { - anchor []byte // The anchor. - index int // The node id. - mark yaml_mark_t // The anchor mark. -} - -// The parser structure. -// -// All members are internal. Manage the structure using the -// yaml_parser_ family of functions. -type yaml_parser_t struct { - - // Error handling - - error yaml_error_type_t // Error type. - - problem string // Error description. - - // The byte about which the problem occurred. - problem_offset int - problem_value int - problem_mark yaml_mark_t - - // The error context. - context string - context_mark yaml_mark_t - - // Reader stuff - - read_handler yaml_read_handler_t // Read handler. - - input_reader io.Reader // File input data. - input []byte // String input data. - input_pos int - - eof bool // EOF flag - - buffer []byte // The working buffer. - buffer_pos int // The current position of the buffer. - - unread int // The number of unread characters in the buffer. - - newlines int // The number of line breaks since last non-break/non-blank character - - raw_buffer []byte // The raw buffer. - raw_buffer_pos int // The current position of the buffer. - - encoding yaml_encoding_t // The input encoding. - - offset int // The offset of the current position (in bytes). - mark yaml_mark_t // The mark of the current position. - - // Comments - - head_comment []byte // The current head comments - line_comment []byte // The current line comments - foot_comment []byte // The current foot comments - tail_comment []byte // Foot comment that happens at the end of a block. - stem_comment []byte // Comment in item preceding a nested structure (list inside list item, etc) - - comments []yaml_comment_t // The folded comments for all parsed tokens - comments_head int - - // Scanner stuff - - stream_start_produced bool // Have we started to scan the input stream? - stream_end_produced bool // Have we reached the end of the input stream? - - flow_level int // The number of unclosed '[' and '{' indicators. - - tokens []yaml_token_t // The tokens queue. - tokens_head int // The head of the tokens queue. - tokens_parsed int // The number of tokens fetched from the queue. - token_available bool // Does the tokens queue contain a token ready for dequeueing. - - indent int // The current indentation level. - indents []int // The indentation levels stack. - - simple_key_allowed bool // May a simple key occur at the current position? - simple_keys []yaml_simple_key_t // The stack of simple keys. - simple_keys_by_tok map[int]int // possible simple_key indexes indexed by token_number - - // Parser stuff - - state yaml_parser_state_t // The current parser state. - states []yaml_parser_state_t // The parser states stack. - marks []yaml_mark_t // The stack of marks. - tag_directives []yaml_tag_directive_t // The list of TAG directives. - - // Dumper stuff - - aliases []yaml_alias_data_t // The alias data. - - document *yaml_document_t // The currently parsed document. -} - -type yaml_comment_t struct { - - scan_mark yaml_mark_t // Position where scanning for comments started - token_mark yaml_mark_t // Position after which tokens will be associated with this comment - start_mark yaml_mark_t // Position of '#' comment mark - end_mark yaml_mark_t // Position where comment terminated - - head []byte - line []byte - foot []byte -} - -// Emitter Definitions - -// The prototype of a write handler. -// -// The write handler is called when the emitter needs to flush the accumulated -// characters to the output. The handler should write @a size bytes of the -// @a buffer to the output. -// -// @param[in,out] data A pointer to an application data specified by -// yaml_emitter_set_output(). -// @param[in] buffer The buffer with bytes to be written. -// @param[in] size The size of the buffer. -// -// @returns On success, the handler should return @c 1. If the handler failed, -// the returned value should be @c 0. -// -type yaml_write_handler_t func(emitter *yaml_emitter_t, buffer []byte) error - -type yaml_emitter_state_t int - -// The emitter states. -const ( - // Expect STREAM-START. - yaml_EMIT_STREAM_START_STATE yaml_emitter_state_t = iota - - yaml_EMIT_FIRST_DOCUMENT_START_STATE // Expect the first DOCUMENT-START or STREAM-END. - yaml_EMIT_DOCUMENT_START_STATE // Expect DOCUMENT-START or STREAM-END. - yaml_EMIT_DOCUMENT_CONTENT_STATE // Expect the content of a document. - yaml_EMIT_DOCUMENT_END_STATE // Expect DOCUMENT-END. - yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE // Expect the first item of a flow sequence. - yaml_EMIT_FLOW_SEQUENCE_TRAIL_ITEM_STATE // Expect the next item of a flow sequence, with the comma already written out - yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE // Expect an item of a flow sequence. - yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE // Expect the first key of a flow mapping. - yaml_EMIT_FLOW_MAPPING_TRAIL_KEY_STATE // Expect the next key of a flow mapping, with the comma already written out - yaml_EMIT_FLOW_MAPPING_KEY_STATE // Expect a key of a flow mapping. - yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE // Expect a value for a simple key of a flow mapping. - yaml_EMIT_FLOW_MAPPING_VALUE_STATE // Expect a value of a flow mapping. - yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE // Expect the first item of a block sequence. - yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE // Expect an item of a block sequence. - yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE // Expect the first key of a block mapping. - yaml_EMIT_BLOCK_MAPPING_KEY_STATE // Expect the key of a block mapping. - yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE // Expect a value for a simple key of a block mapping. - yaml_EMIT_BLOCK_MAPPING_VALUE_STATE // Expect a value of a block mapping. - yaml_EMIT_END_STATE // Expect nothing. -) - -// The emitter structure. -// -// All members are internal. Manage the structure using the @c yaml_emitter_ -// family of functions. -type yaml_emitter_t struct { - - // Error handling - - error yaml_error_type_t // Error type. - problem string // Error description. - - // Writer stuff - - write_handler yaml_write_handler_t // Write handler. - - output_buffer *[]byte // String output data. - output_writer io.Writer // File output data. - - buffer []byte // The working buffer. - buffer_pos int // The current position of the buffer. - - raw_buffer []byte // The raw buffer. - raw_buffer_pos int // The current position of the buffer. - - encoding yaml_encoding_t // The stream encoding. - - // Emitter stuff - - canonical bool // If the output is in the canonical style? - best_indent int // The number of indentation spaces. - best_width int // The preferred width of the output lines. - unicode bool // Allow unescaped non-ASCII characters? - line_break yaml_break_t // The preferred line break. - - state yaml_emitter_state_t // The current emitter state. - states []yaml_emitter_state_t // The stack of states. - - events []yaml_event_t // The event queue. - events_head int // The head of the event queue. - - indents []int // The stack of indentation levels. - - tag_directives []yaml_tag_directive_t // The list of tag directives. - - indent int // The current indentation level. - - compact_sequence_indent bool // Is '- ' is considered part of the indentation for sequence elements? - - flow_level int // The current flow level. - - root_context bool // Is it the document root context? - sequence_context bool // Is it a sequence context? - mapping_context bool // Is it a mapping context? - simple_key_context bool // Is it a simple mapping key context? - - line int // The current line. - column int // The current column. - whitespace bool // If the last character was a whitespace? - indention bool // If the last character was an indentation character (' ', '-', '?', ':')? - open_ended bool // If an explicit document end is required? - - space_above bool // Is there's an empty line above? - foot_indent int // The indent used to write the foot comment above, or -1 if none. - - // Anchor analysis. - anchor_data struct { - anchor []byte // The anchor value. - alias bool // Is it an alias? - } - - // Tag analysis. - tag_data struct { - handle []byte // The tag handle. - suffix []byte // The tag suffix. - } - - // Scalar analysis. - scalar_data struct { - value []byte // The scalar value. - multiline bool // Does the scalar contain line breaks? - flow_plain_allowed bool // Can the scalar be expessed in the flow plain style? - block_plain_allowed bool // Can the scalar be expressed in the block plain style? - single_quoted_allowed bool // Can the scalar be expressed in the single quoted style? - block_allowed bool // Can the scalar be expressed in the literal or folded styles? - style yaml_scalar_style_t // The output style. - } - - // Comments - head_comment []byte - line_comment []byte - foot_comment []byte - tail_comment []byte - - key_line_comment []byte - - // Dumper stuff - - opened bool // If the stream was already opened? - closed bool // If the stream was already closed? - - // The information associated with the document nodes. - anchors *struct { - references int // The number of references. - anchor int // The anchor id. - serialized bool // If the node has been emitted? - } - - last_anchor_id int // The last assigned anchor id. - - document *yaml_document_t // The currently emitted document. -} diff --git a/src/vendor/sigs.k8s.io/kustomize/kyaml/internal/forked/github.com/go-yaml/yaml/yamlprivateh.go b/src/vendor/sigs.k8s.io/kustomize/kyaml/internal/forked/github.com/go-yaml/yaml/yamlprivateh.go deleted file mode 100644 index e88f9c54a..000000000 --- a/src/vendor/sigs.k8s.io/kustomize/kyaml/internal/forked/github.com/go-yaml/yaml/yamlprivateh.go +++ /dev/null @@ -1,198 +0,0 @@ -// -// Copyright (c) 2011-2019 Canonical Ltd -// Copyright (c) 2006-2010 Kirill Simonov -// -// Permission is hereby granted, free of charge, to any person obtaining a copy of -// this software and associated documentation files (the "Software"), to deal in -// the Software without restriction, including without limitation the rights to -// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies -// of the Software, and to permit persons to whom the Software is furnished to do -// so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in all -// copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -// SOFTWARE. - -package yaml - -const ( - // The size of the input raw buffer. - input_raw_buffer_size = 512 - - // The size of the input buffer. - // It should be possible to decode the whole raw buffer. - input_buffer_size = input_raw_buffer_size * 3 - - // The size of the output buffer. - output_buffer_size = 128 - - // The size of the output raw buffer. - // It should be possible to encode the whole output buffer. - output_raw_buffer_size = (output_buffer_size*2 + 2) - - // The size of other stacks and queues. - initial_stack_size = 16 - initial_queue_size = 16 - initial_string_size = 16 -) - -// Check if the character at the specified position is an alphabetical -// character, a digit, '_', or '-'. -func is_alpha(b []byte, i int) bool { - return b[i] >= '0' && b[i] <= '9' || b[i] >= 'A' && b[i] <= 'Z' || b[i] >= 'a' && b[i] <= 'z' || b[i] == '_' || b[i] == '-' -} - -// Check if the character at the specified position is a digit. -func is_digit(b []byte, i int) bool { - return b[i] >= '0' && b[i] <= '9' -} - -// Get the value of a digit. -func as_digit(b []byte, i int) int { - return int(b[i]) - '0' -} - -// Check if the character at the specified position is a hex-digit. -func is_hex(b []byte, i int) bool { - return b[i] >= '0' && b[i] <= '9' || b[i] >= 'A' && b[i] <= 'F' || b[i] >= 'a' && b[i] <= 'f' -} - -// Get the value of a hex-digit. -func as_hex(b []byte, i int) int { - bi := b[i] - if bi >= 'A' && bi <= 'F' { - return int(bi) - 'A' + 10 - } - if bi >= 'a' && bi <= 'f' { - return int(bi) - 'a' + 10 - } - return int(bi) - '0' -} - -// Check if the character is ASCII. -func is_ascii(b []byte, i int) bool { - return b[i] <= 0x7F -} - -// Check if the character at the start of the buffer can be printed unescaped. -func is_printable(b []byte, i int) bool { - return ((b[i] == 0x0A) || // . == #x0A - (b[i] >= 0x20 && b[i] <= 0x7E) || // #x20 <= . <= #x7E - (b[i] == 0xC2 && b[i+1] >= 0xA0) || // #0xA0 <= . <= #xD7FF - (b[i] > 0xC2 && b[i] < 0xED) || - (b[i] == 0xED && b[i+1] < 0xA0) || - (b[i] == 0xEE) || - (b[i] == 0xEF && // #xE000 <= . <= #xFFFD - !(b[i+1] == 0xBB && b[i+2] == 0xBF) && // && . != #xFEFF - !(b[i+1] == 0xBF && (b[i+2] == 0xBE || b[i+2] == 0xBF)))) -} - -// Check if the character at the specified position is NUL. -func is_z(b []byte, i int) bool { - return b[i] == 0x00 -} - -// Check if the beginning of the buffer is a BOM. -func is_bom(b []byte, i int) bool { - return b[0] == 0xEF && b[1] == 0xBB && b[2] == 0xBF -} - -// Check if the character at the specified position is space. -func is_space(b []byte, i int) bool { - return b[i] == ' ' -} - -// Check if the character at the specified position is tab. -func is_tab(b []byte, i int) bool { - return b[i] == '\t' -} - -// Check if the character at the specified position is blank (space or tab). -func is_blank(b []byte, i int) bool { - //return is_space(b, i) || is_tab(b, i) - return b[i] == ' ' || b[i] == '\t' -} - -// Check if the character at the specified position is a line break. -func is_break(b []byte, i int) bool { - return (b[i] == '\r' || // CR (#xD) - b[i] == '\n' || // LF (#xA) - b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) - b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) - b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9) // PS (#x2029) -} - -func is_crlf(b []byte, i int) bool { - return b[i] == '\r' && b[i+1] == '\n' -} - -// Check if the character is a line break or NUL. -func is_breakz(b []byte, i int) bool { - //return is_break(b, i) || is_z(b, i) - return ( - // is_break: - b[i] == '\r' || // CR (#xD) - b[i] == '\n' || // LF (#xA) - b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) - b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) - b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029) - // is_z: - b[i] == 0) -} - -// Check if the character is a line break, space, or NUL. -func is_spacez(b []byte, i int) bool { - //return is_space(b, i) || is_breakz(b, i) - return ( - // is_space: - b[i] == ' ' || - // is_breakz: - b[i] == '\r' || // CR (#xD) - b[i] == '\n' || // LF (#xA) - b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) - b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) - b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029) - b[i] == 0) -} - -// Check if the character is a line break, space, tab, or NUL. -func is_blankz(b []byte, i int) bool { - //return is_blank(b, i) || is_breakz(b, i) - return ( - // is_blank: - b[i] == ' ' || b[i] == '\t' || - // is_breakz: - b[i] == '\r' || // CR (#xD) - b[i] == '\n' || // LF (#xA) - b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) - b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) - b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029) - b[i] == 0) -} - -// Determine the width of the character. -func width(b byte) int { - // Don't replace these by a switch without first - // confirming that it is being inlined. - if b&0x80 == 0x00 { - return 1 - } - if b&0xE0 == 0xC0 { - return 2 - } - if b&0xF0 == 0xE0 { - return 3 - } - if b&0xF8 == 0xF0 { - return 4 - } - return 0 - -} diff --git a/src/vendor/sigs.k8s.io/kustomize/kyaml/internal/forked/github.com/qri-io/starlib/util/LICENSE b/src/vendor/sigs.k8s.io/kustomize/kyaml/internal/forked/github.com/qri-io/starlib/util/LICENSE deleted file mode 100644 index 31f292dce..000000000 --- a/src/vendor/sigs.k8s.io/kustomize/kyaml/internal/forked/github.com/qri-io/starlib/util/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2018 QRI, Inc. - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. diff --git a/src/vendor/sigs.k8s.io/kustomize/kyaml/internal/forked/github.com/qri-io/starlib/util/doc.go b/src/vendor/sigs.k8s.io/kustomize/kyaml/internal/forked/github.com/qri-io/starlib/util/doc.go deleted file mode 100644 index 035b47921..000000000 --- a/src/vendor/sigs.k8s.io/kustomize/kyaml/internal/forked/github.com/qri-io/starlib/util/doc.go +++ /dev/null @@ -1,25 +0,0 @@ -// The MIT License (MIT) - -// Copyright (c) 2018 QRI, Inc. - -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: - -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. - -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -// Package util is forked from https://github.com/qri-io/starlib in order to prune -// excessive transitive dependencies from pulling in that library. -package util diff --git a/src/vendor/sigs.k8s.io/kustomize/kyaml/internal/forked/github.com/qri-io/starlib/util/util.go b/src/vendor/sigs.k8s.io/kustomize/kyaml/internal/forked/github.com/qri-io/starlib/util/util.go deleted file mode 100644 index 96b4a9aea..000000000 --- a/src/vendor/sigs.k8s.io/kustomize/kyaml/internal/forked/github.com/qri-io/starlib/util/util.go +++ /dev/null @@ -1,273 +0,0 @@ -// The MIT License (MIT) - -// Copyright (c) 2018 QRI, Inc. - -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: - -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. - -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package util - -import ( - "fmt" - - "github.com/pkg/errors" - "go.starlark.net/starlark" - "go.starlark.net/starlarkstruct" -) - -// // asString unquotes a starlark string value -// func asString(x starlark.Value) (string, error) { -// return strconv.Unquote(x.String()) -// } - -// IsEmptyString checks is a starlark string is empty ("" for a go string) -// starlark.String.String performs repr-style quotation, which is necessary -// for the starlark.Value contract but a frequent source of errors in API -// clients. This helper method makes sure it'll work properly -func IsEmptyString(s starlark.String) bool { - return s.String() == `""` -} - -// Unmarshal decodes a starlark.Value into it's golang counterpart -//nolint:nakedret -func Unmarshal(x starlark.Value) (val interface{}, err error) { - switch v := x.(type) { - case starlark.NoneType: - val = nil - case starlark.Bool: - val = v.Truth() == starlark.True - case starlark.Int: - val, err = starlark.AsInt32(x) - case starlark.Float: - if f, ok := starlark.AsFloat(x); !ok { - err = fmt.Errorf("couldn't parse float") - } else { - val = f - } - case starlark.String: - val = v.GoString() - // case starlibtime.Time: - // val = time.Time(v) - case *starlark.Dict: - var ( - dictVal starlark.Value - pval interface{} - kval interface{} - keys []interface{} - vals []interface{} - // key as interface if found one key is not a string - ki bool - ) - - for _, k := range v.Keys() { - dictVal, _, err = v.Get(k) - if err != nil { - return - } - - pval, err = Unmarshal(dictVal) - if err != nil { - err = fmt.Errorf("unmarshaling starlark value: %w", err) - return - } - - kval, err = Unmarshal(k) - if err != nil { - err = fmt.Errorf("unmarshaling starlark key: %w", err) - return - } - - if _, ok := kval.(string); !ok { - // found key as not a string - ki = true - } - - keys = append(keys, kval) - vals = append(vals, pval) - } - - // prepare result - - rs := map[string]interface{}{} - ri := map[interface{}]interface{}{} - - for i, key := range keys { - // key as interface - if ki { - ri[key] = vals[i] - } else { - rs[key.(string)] = vals[i] - } - } - - if ki { - val = ri // map[interface{}]interface{} - } else { - val = rs // map[string]interface{} - } - case *starlark.List: - var ( - i int - listVal starlark.Value - iter = v.Iterate() - value = make([]interface{}, v.Len()) - ) - - defer iter.Done() - for iter.Next(&listVal) { - value[i], err = Unmarshal(listVal) - if err != nil { - return - } - i++ - } - val = value - case starlark.Tuple: - var ( - i int - tupleVal starlark.Value - iter = v.Iterate() - value = make([]interface{}, v.Len()) - ) - - defer iter.Done() - for iter.Next(&tupleVal) { - value[i], err = Unmarshal(tupleVal) - if err != nil { - return - } - i++ - } - val = value - case *starlark.Set: - fmt.Println("errnotdone: SET") - err = fmt.Errorf("sets aren't yet supported") - case *starlarkstruct.Struct: - if _var, ok := v.Constructor().(Unmarshaler); ok { - err = _var.UnmarshalStarlark(x) - if err != nil { - err = errors.Wrapf(err, "failed marshal %q to Starlark object", v.Constructor().Type()) - return - } - val = _var - } else { - err = fmt.Errorf("constructor object from *starlarkstruct.Struct not supported Marshaler to starlark object: %s", v.Constructor().Type()) - } - default: - fmt.Println("errbadtype:", x.Type()) - err = fmt.Errorf("unrecognized starlark type: %s", x.Type()) - } - return -} - -// Marshal turns go values into starlark types -//nolint:nakedret -func Marshal(data interface{}) (v starlark.Value, err error) { - switch x := data.(type) { - case nil: - v = starlark.None - case bool: - v = starlark.Bool(x) - case string: - v = starlark.String(x) - case int: - v = starlark.MakeInt(x) - case int8: - v = starlark.MakeInt(int(x)) - case int16: - v = starlark.MakeInt(int(x)) - case int32: - v = starlark.MakeInt(int(x)) - case int64: - v = starlark.MakeInt64(x) - case uint: - v = starlark.MakeUint(x) - case uint8: - v = starlark.MakeUint(uint(x)) - case uint16: - v = starlark.MakeUint(uint(x)) - case uint32: - v = starlark.MakeUint(uint(x)) - case uint64: - v = starlark.MakeUint64(x) - case float32: - v = starlark.Float(float64(x)) - case float64: - v = starlark.Float(x) - // case time.Time: - // v = starlibtime.Time(x) - case []interface{}: - var elems = make([]starlark.Value, len(x)) - for i, val := range x { - elems[i], err = Marshal(val) - if err != nil { - return - } - } - v = starlark.NewList(elems) - case map[interface{}]interface{}: - dict := &starlark.Dict{} - var elem starlark.Value - for ki, val := range x { - var key starlark.Value - key, err = Marshal(ki) - if err != nil { - return - } - - elem, err = Marshal(val) - if err != nil { - return - } - if err = dict.SetKey(key, elem); err != nil { - return - } - } - v = dict - case map[string]interface{}: - dict := &starlark.Dict{} - var elem starlark.Value - for key, val := range x { - elem, err = Marshal(val) - if err != nil { - return - } - if err = dict.SetKey(starlark.String(key), elem); err != nil { - return - } - } - v = dict - case Marshaler: - v, err = x.MarshalStarlark() - default: - return starlark.None, fmt.Errorf("unrecognized type: %#v", x) - } - return -} - -// Unmarshaler is the interface use to unmarshal starlark custom types. -type Unmarshaler interface { - // UnmarshalStarlark unmarshal a starlark object to custom type. - UnmarshalStarlark(starlark.Value) error -} - -// Marshaler is the interface use to marshal starlark custom types. -type Marshaler interface { - // MarshalStarlark marshal a custom type to starlark object. - MarshalStarlark() (starlark.Value, error) -} diff --git a/src/vendor/sigs.k8s.io/kustomize/kyaml/kio/byteio_reader.go b/src/vendor/sigs.k8s.io/kustomize/kyaml/kio/byteio_reader.go deleted file mode 100644 index 230ab891b..000000000 --- a/src/vendor/sigs.k8s.io/kustomize/kyaml/kio/byteio_reader.go +++ /dev/null @@ -1,349 +0,0 @@ -// Copyright 2019 The Kubernetes Authors. -// SPDX-License-Identifier: Apache-2.0 - -package kio - -import ( - "bytes" - "fmt" - "io" - "regexp" - "sort" - "strings" - - "sigs.k8s.io/kustomize/kyaml/errors" - "sigs.k8s.io/kustomize/kyaml/kio/kioutil" - "sigs.k8s.io/kustomize/kyaml/yaml" -) - -const ( - ResourceListKind = "ResourceList" - ResourceListAPIVersion = "config.kubernetes.io/v1" -) - -// ByteReadWriter reads from an input and writes to an output. -type ByteReadWriter struct { - // Reader is where ResourceNodes are decoded from. - Reader io.Reader - - // Writer is where ResourceNodes are encoded. - Writer io.Writer - - // OmitReaderAnnotations will configures Read to skip setting the config.kubernetes.io/index - // annotation on Resources as they are Read. - OmitReaderAnnotations bool - - // KeepReaderAnnotations if set will keep the Reader specific annotations when writing - // the Resources, otherwise they will be cleared. - KeepReaderAnnotations bool - - // PreserveSeqIndent if true adds kioutil.SeqIndentAnnotation to each resource - PreserveSeqIndent bool - - // Style is a style that is set on the Resource Node Document. - Style yaml.Style - - // WrapBareSeqNode wraps the bare sequence node document with map node, - // kyaml uses reader annotations to track resources, it is not possible to - // add them to bare sequence nodes, this option enables wrapping such bare - // sequence nodes into map node with key yaml.BareSeqNodeWrappingKey - // note that this wrapping is different and not related to ResourceList wrapping - WrapBareSeqNode bool - - FunctionConfig *yaml.RNode - - Results *yaml.RNode - - NoWrap bool - WrappingAPIVersion string - WrappingKind string -} - -func (rw *ByteReadWriter) Read() ([]*yaml.RNode, error) { - b := &ByteReader{ - Reader: rw.Reader, - OmitReaderAnnotations: rw.OmitReaderAnnotations, - PreserveSeqIndent: rw.PreserveSeqIndent, - WrapBareSeqNode: rw.WrapBareSeqNode, - } - val, err := b.Read() - rw.Results = b.Results - - if rw.FunctionConfig == nil { - rw.FunctionConfig = b.FunctionConfig - } - if !rw.NoWrap && rw.WrappingKind == "" { - rw.WrappingAPIVersion = b.WrappingAPIVersion - rw.WrappingKind = b.WrappingKind - } - return val, errors.Wrap(err) -} - -func (rw *ByteReadWriter) Write(nodes []*yaml.RNode) error { - w := ByteWriter{ - Writer: rw.Writer, - KeepReaderAnnotations: rw.KeepReaderAnnotations, - Style: rw.Style, - FunctionConfig: rw.FunctionConfig, - Results: rw.Results, - } - if !rw.NoWrap { - w.WrappingAPIVersion = rw.WrappingAPIVersion - w.WrappingKind = rw.WrappingKind - } - return w.Write(nodes) -} - -// ParseAll reads all of the inputs into resources -func ParseAll(inputs ...string) ([]*yaml.RNode, error) { - return (&ByteReader{ - Reader: bytes.NewBufferString(strings.Join(inputs, "\n---\n")), - }).Read() -} - -// FromBytes reads from a byte slice. -func FromBytes(bs []byte) ([]*yaml.RNode, error) { - return (&ByteReader{ - OmitReaderAnnotations: true, - AnchorsAweigh: true, - Reader: bytes.NewBuffer(bs), - }).Read() -} - -// StringAll writes all of the resources to a string -func StringAll(resources []*yaml.RNode) (string, error) { - var b bytes.Buffer - err := (&ByteWriter{Writer: &b}).Write(resources) - return b.String(), err -} - -// ByteReader decodes ResourceNodes from bytes. -// By default, Read will set the config.kubernetes.io/index annotation on each RNode as it -// is read so they can be written back in the same order. -type ByteReader struct { - // Reader is where ResourceNodes are decoded from. - Reader io.Reader - - // OmitReaderAnnotations will configures Read to skip setting the config.kubernetes.io/index - // and internal.config.kubernetes.io/seqindent annotations on Resources as they are Read. - OmitReaderAnnotations bool - - // PreserveSeqIndent if true adds kioutil.SeqIndentAnnotation to each resource - PreserveSeqIndent bool - - // SetAnnotations is a map of caller specified annotations to set on resources as they are read - // These are independent of the annotations controlled by OmitReaderAnnotations - SetAnnotations map[string]string - - FunctionConfig *yaml.RNode - - Results *yaml.RNode - - // DisableUnwrapping prevents Resources in Lists and ResourceLists from being unwrapped - DisableUnwrapping bool - - // WrappingAPIVersion is set by Read(), and is the apiVersion of the object that - // the read objects were originally wrapped in. - WrappingAPIVersion string - - // WrappingKind is set by Read(), and is the kind of the object that - // the read objects were originally wrapped in. - WrappingKind string - - // WrapBareSeqNode wraps the bare sequence node document with map node, - // kyaml uses reader annotations to track resources, it is not possible to - // add them to bare sequence nodes, this option enables wrapping such bare - // sequence nodes into map node with key yaml.BareSeqNodeWrappingKey - // note that this wrapping is different and not related to ResourceList wrapping - WrapBareSeqNode bool - - // AnchorsAweigh set to true attempts to replace all YAML anchor aliases - // with their definitions (anchor values) immediately after the read. - AnchorsAweigh bool -} - -var _ Reader = &ByteReader{} - -// splitDocuments returns a slice of all documents contained in a YAML string. Multiple documents can be divided by the -// YAML document separator (---). It allows for white space and comments to be after the separator on the same line, -// but will return an error if anything else is on the line. -func splitDocuments(s string) ([]string, error) { - docs := make([]string, 0) - if len(s) > 0 { - // The YAML document separator is any line that starts with --- - yamlSeparatorRegexp := regexp.MustCompile(`\n---.*\n`) - - // Find all separators, check them for invalid content, and append each document to docs - separatorLocations := yamlSeparatorRegexp.FindAllStringIndex(s, -1) - prev := 0 - for i := range separatorLocations { - loc := separatorLocations[i] - separator := s[loc[0]:loc[1]] - - // If the next non-whitespace character on the line following the separator is not a comment, return an error - trimmedContentAfterSeparator := strings.TrimSpace(separator[4:]) - if len(trimmedContentAfterSeparator) > 0 && trimmedContentAfterSeparator[0] != '#' { - return nil, errors.Errorf("invalid document separator: %s", strings.TrimSpace(separator)) - } - - docs = append(docs, s[prev:loc[0]]) - prev = loc[1] - } - docs = append(docs, s[prev:]) - } - - return docs, nil -} - -func (r *ByteReader) Read() ([]*yaml.RNode, error) { - if r.PreserveSeqIndent && r.OmitReaderAnnotations { - return nil, errors.Errorf(`"PreserveSeqIndent" option adds a reader annotation, please set "OmitReaderAnnotations" to false`) - } - - output := ResourceNodeSlice{} - - // by manually splitting resources -- otherwise the decoder will get the Resource - // boundaries wrong for header comments. - input := &bytes.Buffer{} - _, err := io.Copy(input, r.Reader) - if err != nil { - return nil, errors.Wrap(err) - } - - // Replace the ending \r\n (line ending used in windows) with \n and then split it into multiple YAML documents - // if it contains document separators (---) - values, err := splitDocuments(strings.ReplaceAll(input.String(), "\r\n", "\n")) - if err != nil { - return nil, errors.Wrap(err) - } - - index := 0 - for i := range values { - // the Split used above will eat the tail '\n' from each resource. This may affect the - // literal string value since '\n' is meaningful in it. - if i != len(values)-1 { - values[i] += "\n" - } - decoder := yaml.NewDecoder(bytes.NewBufferString(values[i])) - node, err := r.decode(values[i], index, decoder) - if err == io.EOF { - continue - } - - if err != nil { - return nil, errors.Wrap(err) - } - if yaml.IsMissingOrNull(node) { - // empty value - continue - } - - // ok if no metadata -- assume not an InputList - meta, err := node.GetMeta() - if err != yaml.ErrMissingMetadata && err != nil { - return nil, errors.WrapPrefixf(err, "[%d]", i) - } - - // the elements are wrapped in an InputList, unwrap them - // don't check apiVersion, we haven't standardized on the domain - if !r.DisableUnwrapping && - len(values) == 1 && // Only unwrap if there is only 1 value - (meta.Kind == ResourceListKind || meta.Kind == "List") && - (node.Field("items") != nil || node.Field("functionConfig") != nil) { - r.WrappingKind = meta.Kind - r.WrappingAPIVersion = meta.APIVersion - - // unwrap the list - if fc := node.Field("functionConfig"); fc != nil { - r.FunctionConfig = fc.Value - } - if res := node.Field("results"); res != nil { - r.Results = res.Value - } - - items := node.Field("items") - if items != nil { - for i := range items.Value.Content() { - // add items - output = append(output, yaml.NewRNode(items.Value.Content()[i])) - } - } - continue - } - - // add the node to the list - output = append(output, node) - - // increment the index annotation value - index++ - } - if r.AnchorsAweigh { - for _, n := range output { - if err = n.DeAnchor(); err != nil { - return nil, err - } - } - } - return output, nil -} - -func (r *ByteReader) decode(originalYAML string, index int, decoder *yaml.Decoder) (*yaml.RNode, error) { - node := &yaml.Node{} - err := decoder.Decode(node) - if err == io.EOF { - return nil, io.EOF - } - if err != nil { - return nil, errors.WrapPrefixf(err, "MalformedYAMLError") - } - - if yaml.IsYNodeEmptyDoc(node) { - return nil, nil - } - - // set annotations on the read Resources - // sort the annotations by key so the output Resources is consistent (otherwise the - // annotations will be in a random order) - n := yaml.NewRNode(node) - // check if it is a bare sequence node and wrap it with a yaml.BareSeqNodeWrappingKey - if r.WrapBareSeqNode && node.Kind == yaml.DocumentNode && len(node.Content) > 0 && - node.Content[0] != nil && node.Content[0].Kind == yaml.SequenceNode { - wrappedNode := yaml.NewRNode(&yaml.Node{ - Kind: yaml.MappingNode, - }) - wrappedNode.PipeE(yaml.SetField(yaml.BareSeqNodeWrappingKey, n)) - n = wrappedNode - } - - if r.SetAnnotations == nil { - r.SetAnnotations = map[string]string{} - } - if !r.OmitReaderAnnotations { - err := kioutil.CopyLegacyAnnotations(n) - if err != nil { - return nil, err - } - r.SetAnnotations[kioutil.IndexAnnotation] = fmt.Sprintf("%d", index) - r.SetAnnotations[kioutil.LegacyIndexAnnotation] = fmt.Sprintf("%d", index) - - if r.PreserveSeqIndent { - // derive and add the seqindent annotation - seqIndentStyle := yaml.DeriveSeqIndentStyle(originalYAML) - if seqIndentStyle != "" { - r.SetAnnotations[kioutil.SeqIndentAnnotation] = seqIndentStyle - } - } - } - var keys []string - for k := range r.SetAnnotations { - keys = append(keys, k) - } - sort.Strings(keys) - for _, k := range keys { - _, err = n.Pipe(yaml.SetAnnotation(k, r.SetAnnotations[k])) - if err != nil { - return nil, errors.Wrap(err) - } - } - return n, nil -} diff --git a/src/vendor/sigs.k8s.io/kustomize/kyaml/kio/byteio_writer.go b/src/vendor/sigs.k8s.io/kustomize/kyaml/kio/byteio_writer.go deleted file mode 100644 index 7785c9d44..000000000 --- a/src/vendor/sigs.k8s.io/kustomize/kyaml/kio/byteio_writer.go +++ /dev/null @@ -1,198 +0,0 @@ -// Copyright 2019 The Kubernetes Authors. -// SPDX-License-Identifier: Apache-2.0 - -package kio - -import ( - "encoding/json" - "io" - "path/filepath" - - "sigs.k8s.io/kustomize/kyaml/errors" - "sigs.k8s.io/kustomize/kyaml/kio/kioutil" - "sigs.k8s.io/kustomize/kyaml/yaml" -) - -// ByteWriter writes ResourceNodes to bytes. Generally YAML encoding will be used but in the special -// case of writing a single, bare yaml.RNode that has a kioutil.PathAnnotation indicating that the -// target is a JSON file JSON encoding is used. See shouldJSONEncodeSingleBareNode below for more -// information. -type ByteWriter struct { - // Writer is where ResourceNodes are encoded. - Writer io.Writer - - // KeepReaderAnnotations if set will keep the Reader specific annotations when writing - // the Resources, otherwise they will be cleared. - KeepReaderAnnotations bool - - // ClearAnnotations is a list of annotations to clear when writing the Resources. - ClearAnnotations []string - - // Style is a style that is set on the Resource Node Document. - Style yaml.Style - - // FunctionConfig is the function config for an ResourceList. If non-nil - // wrap the results in an ResourceList. - FunctionConfig *yaml.RNode - - Results *yaml.RNode - - // WrappingKind if set will cause ByteWriter to wrap the Resources in - // an 'items' field in this kind. e.g. if WrappingKind is 'List', - // ByteWriter will wrap the Resources in a List .items field. - WrappingKind string - - // WrappingAPIVersion is the apiVersion for WrappingKind - WrappingAPIVersion string - - // Sort if set, will cause ByteWriter to sort the the nodes before writing them. - Sort bool -} - -var _ Writer = ByteWriter{} - -func (w ByteWriter) Write(inputNodes []*yaml.RNode) error { - // Copy the nodes to prevent writer from mutating the original nodes. - nodes := copyRNodes(inputNodes) - if w.Sort { - if err := kioutil.SortNodes(nodes); err != nil { - return errors.Wrap(err) - } - } - - // Even though we use the this value further down we must check this before removing annotations - jsonEncodeSingleBareNode := w.shouldJSONEncodeSingleBareNode(nodes) - - // store seqindent annotation value for each node in order to set the encoder indentation - var seqIndentsForNodes []string - for i := range nodes { - seqIndentsForNodes = append(seqIndentsForNodes, nodes[i].GetAnnotations()[kioutil.SeqIndentAnnotation]) - } - - for i := range nodes { - // clean resources by removing annotations set by the Reader - if !w.KeepReaderAnnotations { - _, err := nodes[i].Pipe(yaml.ClearAnnotation(kioutil.IndexAnnotation)) - if err != nil { - return errors.Wrap(err) - } - _, err = nodes[i].Pipe(yaml.ClearAnnotation(kioutil.LegacyIndexAnnotation)) - if err != nil { - return errors.Wrap(err) - } - - _, err = nodes[i].Pipe(yaml.ClearAnnotation(kioutil.SeqIndentAnnotation)) - if err != nil { - return errors.Wrap(err) - } - } - for _, a := range w.ClearAnnotations { - _, err := nodes[i].Pipe(yaml.ClearAnnotation(a)) - if err != nil { - return errors.Wrap(err) - } - } - - if err := yaml.ClearEmptyAnnotations(nodes[i]); err != nil { - return err - } - - if w.Style != 0 { - nodes[i].YNode().Style = w.Style - } - } - - if jsonEncodeSingleBareNode { - encoder := json.NewEncoder(w.Writer) - encoder.SetIndent("", " ") - return errors.Wrap(encoder.Encode(nodes[0])) - } - - encoder := yaml.NewEncoder(w.Writer) - defer encoder.Close() - // don't wrap the elements - if w.WrappingKind == "" { - for i := range nodes { - if seqIndentsForNodes[i] == string(yaml.WideSequenceStyle) { - encoder.DefaultSeqIndent() - } else { - encoder.CompactSeqIndent() - } - if err := encoder.Encode(upWrapBareSequenceNode(nodes[i].Document())); err != nil { - return errors.Wrap(err) - } - } - return nil - } - // wrap the elements in a list - items := &yaml.Node{Kind: yaml.SequenceNode} - list := &yaml.Node{ - Kind: yaml.MappingNode, - Style: w.Style, - Content: []*yaml.Node{ - {Kind: yaml.ScalarNode, Value: "apiVersion"}, - {Kind: yaml.ScalarNode, Value: w.WrappingAPIVersion}, - {Kind: yaml.ScalarNode, Value: "kind"}, - {Kind: yaml.ScalarNode, Value: w.WrappingKind}, - {Kind: yaml.ScalarNode, Value: "items"}, items, - }} - if w.FunctionConfig != nil { - list.Content = append(list.Content, - &yaml.Node{Kind: yaml.ScalarNode, Value: "functionConfig"}, - w.FunctionConfig.YNode()) - } - if w.Results != nil { - list.Content = append(list.Content, - &yaml.Node{Kind: yaml.ScalarNode, Value: "results"}, - w.Results.YNode()) - } - doc := &yaml.Node{ - Kind: yaml.DocumentNode, - Content: []*yaml.Node{list}} - for i := range nodes { - items.Content = append(items.Content, nodes[i].YNode()) - } - return encoder.Encode(doc) -} - -func copyRNodes(in []*yaml.RNode) []*yaml.RNode { - out := make([]*yaml.RNode, len(in)) - for i := range in { - out[i] = in[i].Copy() - } - return out -} - -// shouldJSONEncodeSingleBareNode determines if nodes contain a single node that should not be -// wrapped and has a JSON file extension, which in turn means that the node should be JSON encoded. -// Note 1: this must be checked before any annotations to avoid losing information about the target -// filename extension. -// Note 2: JSON encoding should only be used for single, unwrapped nodes because multiple unwrapped -// nodes cannot be represented in JSON (no multi doc support). Furthermore, the typical use -// cases for wrapping nodes would likely not include later writing the whole wrapper to a -// .json file, i.e. there is no point risking any edge case information loss e.g. comments -// disappearing, that could come from JSON encoding the whole wrapper just to ensure that -// one (or all nodes) can be read as JSON. -func (w ByteWriter) shouldJSONEncodeSingleBareNode(nodes []*yaml.RNode) bool { - if w.WrappingKind == "" && len(nodes) == 1 { - if path, _, _ := kioutil.GetFileAnnotations(nodes[0]); path != "" { - filename := filepath.Base(path) - for _, glob := range JSONMatch { - if match, _ := filepath.Match(glob, filename); match { - return true - } - } - } - } - return false -} - -// upWrapBareSequenceNode unwraps the bare sequence nodes wrapped by yaml.BareSeqNodeWrappingKey -func upWrapBareSequenceNode(node *yaml.Node) *yaml.Node { - rNode := yaml.NewRNode(node) - seqNode, err := rNode.Pipe(yaml.Lookup(yaml.BareSeqNodeWrappingKey)) - if err == nil && !seqNode.IsNilOrEmpty() { - return seqNode.YNode() - } - return node -} diff --git a/src/vendor/sigs.k8s.io/kustomize/kyaml/kio/doc.go b/src/vendor/sigs.k8s.io/kustomize/kyaml/kio/doc.go deleted file mode 100644 index 9c11a1463..000000000 --- a/src/vendor/sigs.k8s.io/kustomize/kyaml/kio/doc.go +++ /dev/null @@ -1,35 +0,0 @@ -// Copyright 2019 The Kubernetes Authors. -// SPDX-License-Identifier: Apache-2.0 - -// Package kio contains libraries for reading and writing collections of Resources. -// -// Reading Resources -// -// Resources are Read using a kio.Reader function. Examples: -// [kio.LocalPackageReader{}, kio.ByteReader{}] -// -// Resources read using a LocalPackageReader will have annotations applied so they can be -// written back to the files they were read from. -// -// Modifying Resources -// -// Resources are modified using a kio.Filter. The kio.Filter accepts a collection of -// Resources as input, and returns a new collection as output. -// It is recommended to use the yaml package for manipulating individual Resources in -// the collection. -// -// Writing Resources -// -// Resources are Read using a kio.Reader function. Examples: -// [kio.LocalPackageWriter{}, kio.ByteWriter{}] -// -// ReadWriters -// -// It is preferred to use a ReadWriter when reading and writing from / to the same source. -// -// Building Pipelines -// -// The preferred way to transforms a collection of Resources is to use kio.Pipeline to Read, -// Modify and Write the collection of Resources. Pipeline will automatically sequentially -// invoke the Read, Modify, Write steps, returning and error immediately on any failure. -package kio diff --git a/src/vendor/sigs.k8s.io/kustomize/kyaml/kio/filters/filters.go b/src/vendor/sigs.k8s.io/kustomize/kyaml/kio/filters/filters.go deleted file mode 100644 index 8d7968b3c..000000000 --- a/src/vendor/sigs.k8s.io/kustomize/kyaml/kio/filters/filters.go +++ /dev/null @@ -1,210 +0,0 @@ -// Copyright 2019 The Kubernetes Authors. -// SPDX-License-Identifier: Apache-2.0 - -package filters - -import ( - "fmt" - "sort" - "strings" - - "sigs.k8s.io/kustomize/kyaml/kio" - "sigs.k8s.io/kustomize/kyaml/kio/kioutil" - "sigs.k8s.io/kustomize/kyaml/yaml" -) - -// Filters are the list of known filters for unmarshalling a filter into a concrete -// implementation. -var Filters = map[string]func() kio.Filter{ - "FileSetter": func() kio.Filter { return &FileSetter{} }, - "FormatFilter": func() kio.Filter { return &FormatFilter{} }, - "GrepFilter": func() kio.Filter { return GrepFilter{} }, - "MatchModifier": func() kio.Filter { return &MatchModifyFilter{} }, - "Modifier": func() kio.Filter { return &Modifier{} }, -} - -// filter wraps a kio.filter so that it can be unmarshalled from yaml. -type KFilter struct { - kio.Filter -} - -func (t KFilter) MarshalYAML() (interface{}, error) { - return t.Filter, nil -} - -func (t *KFilter) UnmarshalYAML(unmarshal func(interface{}) error) error { - i := map[string]interface{}{} - if err := unmarshal(i); err != nil { - return err - } - meta := &yaml.ResourceMeta{} - if err := unmarshal(meta); err != nil { - return err - } - filter, found := Filters[meta.Kind] - if !found { - var knownFilters []string - for k := range Filters { - knownFilters = append(knownFilters, k) - } - sort.Strings(knownFilters) - return fmt.Errorf("unsupported filter Kind %v: may be one of: [%s]", - meta, strings.Join(knownFilters, ",")) - } - t.Filter = filter() - - return unmarshal(t.Filter) -} - -// Modifier modifies the input Resources by invoking the provided pipeline. -// Modifier will return any Resources for which the pipeline does not return an error. -type Modifier struct { - Kind string `yaml:"kind,omitempty"` - - Filters yaml.YFilters `yaml:"pipeline,omitempty"` -} - -var _ kio.Filter = &Modifier{} - -func (f Modifier) Filter(input []*yaml.RNode) ([]*yaml.RNode, error) { - for i := range input { - if _, err := input[i].Pipe(f.Filters.Filters()...); err != nil { - return nil, err - } - } - return input, nil -} - -type MatchModifyFilter struct { - Kind string `yaml:"kind,omitempty"` - - MatchFilters []yaml.YFilters `yaml:"match,omitempty"` - - ModifyFilters yaml.YFilters `yaml:"modify,omitempty"` -} - -var _ kio.Filter = &MatchModifyFilter{} - -func (f MatchModifyFilter) Filter(input []*yaml.RNode) ([]*yaml.RNode, error) { - var matches = input - var err error - for _, filter := range f.MatchFilters { - matches, err = MatchFilter{Filters: filter}.Filter(matches) - if err != nil { - return nil, err - } - } - _, err = Modifier{Filters: f.ModifyFilters}.Filter(matches) - if err != nil { - return nil, err - } - return input, nil -} - -type MatchFilter struct { - Kind string `yaml:"kind,omitempty"` - - Filters yaml.YFilters `yaml:"pipeline,omitempty"` -} - -var _ kio.Filter = &MatchFilter{} - -func (f MatchFilter) Filter(input []*yaml.RNode) ([]*yaml.RNode, error) { - var output []*yaml.RNode - for i := range input { - if v, err := input[i].Pipe(f.Filters.Filters()...); err != nil { - return nil, err - } else if v == nil { - continue - } - output = append(output, input[i]) - } - return output, nil -} - -type FilenameFmtVerb string - -const ( - // KindFmt substitutes kind - KindFmt FilenameFmtVerb = "%k" - - // NameFmt substitutes metadata.name - NameFmt FilenameFmtVerb = "%n" - - // NamespaceFmt substitutes metdata.namespace - NamespaceFmt FilenameFmtVerb = "%s" -) - -// FileSetter sets the file name and mode annotations on Resources. -type FileSetter struct { - Kind string `yaml:"kind,omitempty"` - - // FilenamePattern is the pattern to use for generating filenames. FilenameFmtVerb - // FielnameFmtVerbs may be specified to substitute Resource metadata into the filename. - FilenamePattern string `yaml:"filenamePattern,omitempty"` - - // Mode is the filemode to write. - Mode string `yaml:"mode,omitempty"` - - // Override will override the existing filename if it is set on the pattern. - // Otherwise the existing filename is kept. - Override bool `yaml:"override,omitempty"` -} - -var _ kio.Filter = &FileSetter{} - -const DefaultFilenamePattern = "%n_%k.yaml" - -func (f *FileSetter) Filter(input []*yaml.RNode) ([]*yaml.RNode, error) { - if f.Mode == "" { - f.Mode = fmt.Sprintf("%d", 0600) - } - if f.FilenamePattern == "" { - f.FilenamePattern = DefaultFilenamePattern - } - - resources := map[string][]*yaml.RNode{} - for i := range input { - if err := kioutil.CopyLegacyAnnotations(input[i]); err != nil { - return nil, err - } - - m, err := input[i].GetMeta() - if err != nil { - return nil, err - } - file := f.FilenamePattern - file = strings.ReplaceAll(file, string(KindFmt), strings.ToLower(m.Kind)) - file = strings.ReplaceAll(file, string(NameFmt), strings.ToLower(m.Name)) - file = strings.ReplaceAll(file, string(NamespaceFmt), strings.ToLower(m.Namespace)) - - if _, found := m.Annotations[kioutil.PathAnnotation]; !found || f.Override { - if _, err := input[i].Pipe(yaml.SetAnnotation(kioutil.PathAnnotation, file)); err != nil { - return nil, err - } - if _, err := input[i].Pipe(yaml.SetAnnotation(kioutil.LegacyPathAnnotation, file)); err != nil { - return nil, err - } - } - resources[file] = append(resources[file], input[i]) - } - - var output []*yaml.RNode - for i := range resources { - if err := kioutil.SortNodes(resources[i]); err != nil { - return nil, err - } - for j := range resources[i] { - if _, err := resources[i][j].Pipe( - yaml.SetAnnotation(kioutil.IndexAnnotation, fmt.Sprintf("%d", j))); err != nil { - return nil, err - } - if _, err := resources[i][j].Pipe( - yaml.SetAnnotation(kioutil.LegacyIndexAnnotation, fmt.Sprintf("%d", j))); err != nil { - return nil, err - } - output = append(output, resources[i][j]) - } - } - return output, nil -} diff --git a/src/vendor/sigs.k8s.io/kustomize/kyaml/kio/filters/fmtr.go b/src/vendor/sigs.k8s.io/kustomize/kyaml/kio/filters/fmtr.go deleted file mode 100644 index 7f2acbda4..000000000 --- a/src/vendor/sigs.k8s.io/kustomize/kyaml/kio/filters/fmtr.go +++ /dev/null @@ -1,314 +0,0 @@ -// Copyright 2019 The Kubernetes Authors. -// SPDX-License-Identifier: Apache-2.0 - -// Package yamlfmt contains libraries for formatting yaml files containing -// Kubernetes Resource configuration. -// -// Yaml files are formatted by: -// - Sorting fields and map values -// - Sorting unordered lists for whitelisted types -// - Applying a canonical yaml Style -// -// Fields are ordered using a relative ordering applied to commonly -// encountered Resource fields. All Resources, including non-builtin -// Resources such as CRDs, share the same field precedence. -// -// Fields that do not appear in the explicit ordering are ordered -// lexicographically. -// -// A subset of well known known unordered lists are sorted by element field -// values. -package filters - -import ( - "bytes" - "fmt" - "io" - "sort" - - "sigs.k8s.io/kustomize/kyaml/kio" - "sigs.k8s.io/kustomize/kyaml/openapi" - "sigs.k8s.io/kustomize/kyaml/yaml" -) - -type FormattingStrategy = string - -const ( - // NoFmtAnnotation determines if the resource should be formatted. - FmtAnnotation string = "config.kubernetes.io/formatting" - - // FmtStrategyStandard means the resource will be formatted according - // to the default rules. - FmtStrategyStandard FormattingStrategy = "standard" - - // FmtStrategyNone means the resource will not be formatted. - FmtStrategyNone FormattingStrategy = "none" -) - -// FormatInput returns the formatted input. -func FormatInput(input io.Reader) (*bytes.Buffer, error) { - buff := &bytes.Buffer{} - err := kio.Pipeline{ - Inputs: []kio.Reader{&kio.ByteReader{Reader: input}}, - Filters: []kio.Filter{FormatFilter{}}, - Outputs: []kio.Writer{kio.ByteWriter{Writer: buff}}, - }.Execute() - - return buff, err -} - -// FormatFileOrDirectory reads the file or directory and formats each file's -// contents by writing it back to the file. -func FormatFileOrDirectory(path string) error { - return kio.Pipeline{ - Inputs: []kio.Reader{kio.LocalPackageReader{ - PackagePath: path, - }}, - Filters: []kio.Filter{FormatFilter{}}, - Outputs: []kio.Writer{kio.LocalPackageWriter{PackagePath: path}}, - }.Execute() -} - -type FormatFilter struct { - Process func(n *yaml.Node) error - UseSchema bool -} - -var _ kio.Filter = FormatFilter{} - -func (f FormatFilter) Filter(slice []*yaml.RNode) ([]*yaml.RNode, error) { - for i := range slice { - fmtStrategy, err := getFormattingStrategy(slice[i]) - if err != nil { - return nil, err - } - - if fmtStrategy == FmtStrategyNone { - continue - } - - kindNode, err := slice[i].Pipe(yaml.Get("kind")) - if err != nil { - return nil, err - } - if kindNode == nil { - continue - } - apiVersionNode, err := slice[i].Pipe(yaml.Get("apiVersion")) - if err != nil { - return nil, err - } - if apiVersionNode == nil { - continue - } - kind, apiVersion := kindNode.YNode().Value, apiVersionNode.YNode().Value - var s *openapi.ResourceSchema - if f.UseSchema { - s = openapi.SchemaForResourceType(yaml.TypeMeta{APIVersion: apiVersion, Kind: kind}) - } else { - s = nil - } - err = (&formatter{apiVersion: apiVersion, kind: kind, process: f.Process}). - fmtNode(slice[i].YNode(), "", s) - if err != nil { - return nil, err - } - } - return slice, nil -} - -// getFormattingStrategy looks for the formatting annotation to determine -// which strategy should be used for formatting. The default is standard -// if no annotation is found. -func getFormattingStrategy(node *yaml.RNode) (FormattingStrategy, error) { - value, err := node.Pipe(yaml.GetAnnotation(FmtAnnotation)) - if err != nil || value == nil { - return FmtStrategyStandard, err - } - - fmtStrategy := value.YNode().Value - - switch fmtStrategy { - case FmtStrategyStandard: - return FmtStrategyStandard, nil - case FmtStrategyNone: - return FmtStrategyNone, nil - default: - return "", fmt.Errorf( - "formatting annotation has illegal value %s", fmtStrategy) - } -} - -type formatter struct { - apiVersion string - kind string - process func(n *yaml.Node) error -} - -// fmtNode recursively formats the Document Contents. -// See: https://godoc.org/gopkg.in/yaml.v3#Node -func (f *formatter) fmtNode(n *yaml.Node, path string, schema *openapi.ResourceSchema) error { - if n.Kind == yaml.ScalarNode && schema != nil && schema.Schema != nil { - // ensure values that are interpreted as non-string values (e.g. "true") - // are properly quoted - yaml.FormatNonStringStyle(n, *schema.Schema) - } - - // sort the order of mapping fields - if n.Kind == yaml.MappingNode { - sort.Sort(sortedMapContents(*n)) - } - - // sort the order of sequence elements if it is whitelisted - if n.Kind == yaml.SequenceNode { - if yaml.WhitelistedListSortKinds.Has(f.kind) && - yaml.WhitelistedListSortApis.Has(f.apiVersion) { - if sortField, found := yaml.WhitelistedListSortFields[path]; found { - sort.Sort(sortedSeqContents{Node: *n, sortField: sortField}) - } - } - } - - // format the Content - for i := range n.Content { - // MappingNode are structured as having their fields as Content, - // with the field-key and field-value alternating. e.g. Even elements - // are the keys and odd elements are the values - isFieldKey := n.Kind == yaml.MappingNode && i%2 == 0 - isFieldValue := n.Kind == yaml.MappingNode && i%2 == 1 - isElement := n.Kind == yaml.SequenceNode - - // run the process callback on the node if it has been set - // don't process keys: their format should be fixed - if f.process != nil && !isFieldKey { - if err := f.process(n.Content[i]); err != nil { - return err - } - } - - // get the schema for this Node - p := path - var s *openapi.ResourceSchema - switch { - case isFieldValue: - // if the node is a field, lookup the schema using the field name - p = fmt.Sprintf("%s.%s", path, n.Content[i-1].Value) - if schema != nil { - s = schema.Field(n.Content[i-1].Value) - } - case isElement: - // if the node is a list element, lookup the schema for the array items - if schema != nil { - s = schema.Elements() - } - } - // format the node using the schema - err := f.fmtNode(n.Content[i], p, s) - if err != nil { - return err - } - } - return nil -} - -// sortedMapContents sorts the Contents field of a MappingNode by the field names using a statically -// defined field precedence, and falling back on lexicographical sorting -type sortedMapContents yaml.Node - -func (s sortedMapContents) Len() int { - return len(s.Content) / 2 -} -func (s sortedMapContents) Swap(i, j int) { - // yaml MappingNode Contents are a list of field names followed by - // field values, rather than a list of field pairs. - // increment. - // - // e.g. ["field1Name", "field1Value", "field2Name", "field2Value"] - iFieldNameIndex := i * 2 - jFieldNameIndex := j * 2 - iFieldValueIndex := iFieldNameIndex + 1 - jFieldValueIndex := jFieldNameIndex + 1 - - // swap field names - s.Content[iFieldNameIndex], s.Content[jFieldNameIndex] = - s.Content[jFieldNameIndex], s.Content[iFieldNameIndex] - - // swap field values - s.Content[iFieldValueIndex], s.Content[jFieldValueIndex] = s. - Content[jFieldValueIndex], s.Content[iFieldValueIndex] -} - -func (s sortedMapContents) Less(i, j int) bool { - iFieldNameIndex := i * 2 - jFieldNameIndex := j * 2 - iFieldName := s.Content[iFieldNameIndex].Value - jFieldName := s.Content[jFieldNameIndex].Value - - // order by their precedence values looked up from the index - iOrder, foundI := yaml.FieldOrder[iFieldName] - jOrder, foundJ := yaml.FieldOrder[jFieldName] - if foundI && foundJ { - return iOrder < jOrder - } - - // known fields come before unknown fields - if foundI { - return true - } - if foundJ { - return false - } - - // neither field is known, sort them lexicographically - return iFieldName < jFieldName -} - -// sortedSeqContents sorts the Contents field of a SequenceNode by the value of -// the elements sortField. -// e.g. it will sort spec.template.spec.containers by the value of the container `name` field -type sortedSeqContents struct { - yaml.Node - sortField string -} - -func (s sortedSeqContents) Len() int { - return len(s.Content) -} -func (s sortedSeqContents) Swap(i, j int) { - s.Content[i], s.Content[j] = s.Content[j], s.Content[i] -} -func (s sortedSeqContents) Less(i, j int) bool { - // primitive lists -- sort by the element's primitive values - if s.sortField == "" { - iValue := s.Content[i].Value - jValue := s.Content[j].Value - return iValue < jValue - } - - // map lists -- sort by the element's sortField values - var iValue, jValue string - for a := range s.Content[i].Content { - if a%2 != 0 { - continue // not a fieldNameIndex - } - // locate the index of the sortField field - if s.Content[i].Content[a].Value == s.sortField { - // a is the yaml node for the field key, a+1 is the node for the field value - iValue = s.Content[i].Content[a+1].Value - } - } - for a := range s.Content[j].Content { - if a%2 != 0 { - continue // not a fieldNameIndex - } - - // locate the index of the sortField field - if s.Content[j].Content[a].Value == s.sortField { - // a is the yaml node for the field key, a+1 is the node for the field value - jValue = s.Content[j].Content[a+1].Value - } - } - - // compare the field values - return iValue < jValue -} diff --git a/src/vendor/sigs.k8s.io/kustomize/kyaml/kio/filters/grep.go b/src/vendor/sigs.k8s.io/kustomize/kyaml/kio/filters/grep.go deleted file mode 100644 index 2eb8a8886..000000000 --- a/src/vendor/sigs.k8s.io/kustomize/kyaml/kio/filters/grep.go +++ /dev/null @@ -1,117 +0,0 @@ -// Copyright 2019 The Kubernetes Authors. -// SPDX-License-Identifier: Apache-2.0 - -package filters - -import ( - "regexp" - "strings" - - "sigs.k8s.io/kustomize/kyaml/kio" - "sigs.k8s.io/kustomize/kyaml/yaml" -) - -type GrepType int - -const ( - Regexp GrepType = 1 << iota - GreaterThanEq - GreaterThan - LessThan - LessThanEq -) - -// GrepFilter filters RNodes with a matching field -type GrepFilter struct { - Path []string `yaml:"path,omitempty"` - Value string `yaml:"value,omitempty"` - MatchType GrepType `yaml:"matchType,omitempty"` - InvertMatch bool `yaml:"invertMatch,omitempty"` - Compare func(a, b string) (int, error) -} - -var _ kio.Filter = GrepFilter{} - -func (f GrepFilter) Filter(input []*yaml.RNode) ([]*yaml.RNode, error) { - // compile the regular expression 1 time if we are matching using regex - var reg *regexp.Regexp - var err error - if f.MatchType == Regexp || f.MatchType == 0 { - reg, err = regexp.Compile(f.Value) - if err != nil { - return nil, err - } - } - - var output kio.ResourceNodeSlice - for i := range input { - node := input[i] - val, err := node.Pipe(&yaml.PathMatcher{Path: f.Path}) - if err != nil { - return nil, err - } - if val == nil || len(val.Content()) == 0 { - if f.InvertMatch { - output = append(output, input[i]) - } - continue - } - found := false - err = val.VisitElements(func(elem *yaml.RNode) error { - // get the value - var str string - if f.MatchType == Regexp { - style := elem.YNode().Style - defer func() { elem.YNode().Style = style }() - elem.YNode().Style = yaml.FlowStyle - str, err = elem.String() - if err != nil { - return err - } - str = strings.TrimSpace(strings.ReplaceAll(str, `"`, "")) - } else { - // if not regexp, then it needs to parse into a quantity and comments will - // break that - str = elem.YNode().Value - if str == "" { - return nil - } - } - - if f.MatchType == Regexp || f.MatchType == 0 { - if reg.MatchString(str) { - found = true - } - return nil - } - - comp, err := f.Compare(str, f.Value) - if err != nil { - return err - } - - if f.MatchType == GreaterThan && comp > 0 { - found = true - } - if f.MatchType == GreaterThanEq && comp >= 0 { - found = true - } - if f.MatchType == LessThan && comp < 0 { - found = true - } - if f.MatchType == LessThanEq && comp <= 0 { - found = true - } - return nil - }) - if err != nil { - return nil, err - } - if found == f.InvertMatch { - continue - } - - output = append(output, input[i]) - } - return output, nil -} diff --git a/src/vendor/sigs.k8s.io/kustomize/kyaml/kio/filters/local.go b/src/vendor/sigs.k8s.io/kustomize/kyaml/kio/filters/local.go deleted file mode 100644 index bdac1a28f..000000000 --- a/src/vendor/sigs.k8s.io/kustomize/kyaml/kio/filters/local.go +++ /dev/null @@ -1,38 +0,0 @@ -// Copyright 2019 The Kubernetes Authors. -// SPDX-License-Identifier: Apache-2.0 - -package filters - -import ( - "sigs.k8s.io/kustomize/kyaml/yaml" -) - -const LocalConfigAnnotation = "config.kubernetes.io/local-config" - -// IsLocalConfig filters Resources using the config.kubernetes.io/local-config annotation -type IsLocalConfig struct { - // IncludeLocalConfig will include local-config if set to true - IncludeLocalConfig bool `yaml:"includeLocalConfig,omitempty"` - - // ExcludeNonLocalConfig will exclude non local-config if set to true - ExcludeNonLocalConfig bool `yaml:"excludeNonLocalConfig,omitempty"` -} - -// Filter implements kio.Filter -func (c *IsLocalConfig) Filter(inputs []*yaml.RNode) ([]*yaml.RNode, error) { - var out []*yaml.RNode - for i := range inputs { - meta, err := inputs[i].GetMeta() - if err != nil { - return nil, err - } - _, local := meta.Annotations[LocalConfigAnnotation] - - if local && c.IncludeLocalConfig { - out = append(out, inputs[i]) - } else if !local && !c.ExcludeNonLocalConfig { - out = append(out, inputs[i]) - } - } - return out, nil -} diff --git a/src/vendor/sigs.k8s.io/kustomize/kyaml/kio/filters/merge.go b/src/vendor/sigs.k8s.io/kustomize/kyaml/kio/filters/merge.go deleted file mode 100644 index 5159052cc..000000000 --- a/src/vendor/sigs.k8s.io/kustomize/kyaml/kio/filters/merge.go +++ /dev/null @@ -1,86 +0,0 @@ -// Copyright 2019 The Kubernetes Authors. -// SPDX-License-Identifier: Apache-2.0 - -// Package merge contains libraries for merging Resources and Patches -package filters - -import ( - "sigs.k8s.io/kustomize/kyaml/kio" - "sigs.k8s.io/kustomize/kyaml/yaml" - "sigs.k8s.io/kustomize/kyaml/yaml/merge2" -) - -// MergeFilter merges Resources with the Group/Version/Kind/Namespace/Name together using -// a 2-way merge strategy. -// -// - Fields set to null in the source will be cleared from the destination -// - Fields with matching keys will be merged recursively -// - Lists with an associative key (e.g. name) will have their elements merged using the key -// - List without an associative key will have the dest list replaced by the source list -type MergeFilter struct { - Reverse bool -} - -var _ kio.Filter = MergeFilter{} - -type mergeKey struct { - apiVersion string - kind string - namespace string - name string -} - -// MergeFilter implements kio.Filter by merging Resources with the same G/V/K/NS/N -func (c MergeFilter) Filter(input []*yaml.RNode) ([]*yaml.RNode, error) { - // invert the merge precedence - if c.Reverse { - for i, j := 0, len(input)-1; i < j; i, j = i+1, j-1 { - input[i], input[j] = input[j], input[i] - } - } - - // index the Resources by G/V/K/NS/N - index := map[mergeKey][]*yaml.RNode{} - // retain the original ordering - var order []mergeKey - for i := range input { - meta, err := input[i].GetMeta() - if err != nil { - return nil, err - } - key := mergeKey{ - apiVersion: meta.APIVersion, - kind: meta.Kind, - namespace: meta.Namespace, - name: meta.Name, - } - if _, found := index[key]; !found { - order = append(order, key) - } - index[key] = append(index[key], input[i]) - } - - // merge each of the G/V/K/NS/N lists - var output []*yaml.RNode - var err error - for _, k := range order { - var merged *yaml.RNode - resources := index[k] - for i := range resources { - patch := resources[i] - if merged == nil { - // first resources, don't merge it - merged = resources[i] - } else { - merged, err = merge2.Merge(patch, merged, yaml.MergeOptions{ - ListIncreaseDirection: yaml.MergeOptionsListPrepend, - }) - if err != nil { - return nil, err - } - } - } - output = append(output, merged) - } - return output, nil -} diff --git a/src/vendor/sigs.k8s.io/kustomize/kyaml/kio/filters/merge3.go b/src/vendor/sigs.k8s.io/kustomize/kyaml/kio/filters/merge3.go deleted file mode 100644 index de8bf6f67..000000000 --- a/src/vendor/sigs.k8s.io/kustomize/kyaml/kio/filters/merge3.go +++ /dev/null @@ -1,317 +0,0 @@ -// Copyright 2019 The Kubernetes Authors. -// SPDX-License-Identifier: Apache-2.0 - -package filters - -import ( - "fmt" - - "sigs.k8s.io/kustomize/kyaml/kio" - "sigs.k8s.io/kustomize/kyaml/kio/kioutil" - "sigs.k8s.io/kustomize/kyaml/yaml" - "sigs.k8s.io/kustomize/kyaml/yaml/merge3" -) - -const ( - mergeSourceAnnotation = "config.kubernetes.io/merge-source" - mergeSourceOriginal = "original" - mergeSourceUpdated = "updated" - mergeSourceDest = "dest" -) - -// ResourceMatcher interface is used to match two resources based on IsSameResource implementation -// This is the way to group same logical resources in upstream, local and origin for merge -// The default way to group them is using GVKNN similar to how kubernetes server identifies resources -// Users of this library might have their own interpretation of grouping similar resources -// for e.g. if consumer adds a name-prefix to local resource, it should not be treated as new resource -// for updates etc. -// Hence, the callers of this library may pass different implementation for IsSameResource -type ResourceMatcher interface { - IsSameResource(node1, node2 *yaml.RNode) bool -} - -// ResourceMergeStrategy is the return type from the Handle function in the -// ResourceHandler interface. It determines which version of a resource should -// be included in the output (if any). -type ResourceMergeStrategy int - -const ( - // Merge means the output to dest should be the 3-way merge of original, - // updated and dest. - Merge ResourceMergeStrategy = iota - // KeepDest means the version of the resource in dest should be the output. - KeepDest - // KeepUpdated means the version of the resource in updated should be the - // output. - KeepUpdated - // KeepOriginal means the version of the resource in original should be the - // output. - KeepOriginal - // Skip means the resource should not be included in the output. - Skip -) - -// ResourceHandler interface is used to determine what should be done for a -// resource once the versions in original, updated and dest has been -// identified based on the ResourceMatcher. This allows users to customize -// what should be the result in dest if a resource has been deleted from -// upstream. -type ResourceHandler interface { - Handle(original, updated, dest *yaml.RNode) (ResourceMergeStrategy, error) -} - -// Merge3 performs a 3-way merge on the original, updated, and destination packages. -type Merge3 struct { - OriginalPath string - UpdatedPath string - DestPath string - MatchFilesGlob []string - Matcher ResourceMatcher - Handler ResourceHandler -} - -func (m Merge3) Merge() error { - // Read the destination package. The ReadWriter will take take of deleting files - // for removed resources. - var inputs []kio.Reader - dest := &kio.LocalPackageReadWriter{ - PackagePath: m.DestPath, - MatchFilesGlob: m.MatchFilesGlob, - SetAnnotations: map[string]string{mergeSourceAnnotation: mergeSourceDest}, - } - inputs = append(inputs, dest) - - // Read the original package - inputs = append(inputs, kio.LocalPackageReader{ - PackagePath: m.OriginalPath, - MatchFilesGlob: m.MatchFilesGlob, - SetAnnotations: map[string]string{mergeSourceAnnotation: mergeSourceOriginal}, - }) - - // Read the updated package - inputs = append(inputs, kio.LocalPackageReader{ - PackagePath: m.UpdatedPath, - MatchFilesGlob: m.MatchFilesGlob, - SetAnnotations: map[string]string{mergeSourceAnnotation: mergeSourceUpdated}, - }) - - return kio.Pipeline{ - Inputs: inputs, - Filters: []kio.Filter{m}, - Outputs: []kio.Writer{dest}, - }.Execute() -} - -// Filter combines Resources with the same GVK + N + NS into tuples, and then merges them -func (m Merge3) Filter(nodes []*yaml.RNode) ([]*yaml.RNode, error) { - // index the nodes by their identity - matcher := m.Matcher - if matcher == nil { - matcher = &DefaultGVKNNMatcher{MergeOnPath: true} - } - handler := m.Handler - if handler == nil { - handler = &DefaultResourceHandler{} - } - - tl := tuples{matcher: matcher} - for i := range nodes { - if err := tl.add(nodes[i]); err != nil { - return nil, err - } - } - - // iterate over the inputs, merging as needed - var output []*yaml.RNode - for i := range tl.list { - t := tl.list[i] - strategy, err := handler.Handle(t.original, t.updated, t.dest) - if err != nil { - return nil, err - } - switch strategy { - case Merge: - node, err := t.merge() - if err != nil { - return nil, err - } - if node != nil { - output = append(output, node) - } - case KeepDest: - output = append(output, t.dest) - case KeepUpdated: - output = append(output, t.updated) - case KeepOriginal: - output = append(output, t.original) - case Skip: - // do nothing - } - } - return output, nil -} - -// tuples combines nodes with the same GVK + N + NS -type tuples struct { - list []*tuple - - // matcher matches the resources for merge - matcher ResourceMatcher -} - -// DefaultGVKNNMatcher holds the default matching of resources implementation based on -// Group, Version, Kind, Name and Namespace of the resource -type DefaultGVKNNMatcher struct { - // MergeOnPath will use the relative filepath as part of the merge key. - // This may be necessary if the directory contains multiple copies of - // the same resource, or resources patches. - MergeOnPath bool -} - -// IsSameResource returns true if metadata of node1 and metadata of node2 belongs to same logical resource -func (dm *DefaultGVKNNMatcher) IsSameResource(node1, node2 *yaml.RNode) bool { - if node1 == nil || node2 == nil { - return false - } - if err := kioutil.CopyLegacyAnnotations(node1); err != nil { - return false - } - if err := kioutil.CopyLegacyAnnotations(node2); err != nil { - return false - } - - meta1, err := node1.GetMeta() - if err != nil { - return false - } - - meta2, err := node2.GetMeta() - if err != nil { - return false - } - - if meta1.Name != meta2.Name { - return false - } - if meta1.Namespace != meta2.Namespace { - return false - } - if meta1.APIVersion != meta2.APIVersion { - return false - } - if meta1.Kind != meta2.Kind { - return false - } - if dm.MergeOnPath { - // directories may contain multiple copies of a resource with the same - // name, namespace, apiVersion and kind -- e.g. kustomize patches, or - // multiple environments - // mergeOnPath configures the merge logic to use the path as part of the - // resource key - if meta1.Annotations[kioutil.PathAnnotation] != meta2.Annotations[kioutil.PathAnnotation] { - return false - } - } - return true -} - -// add adds a node to the list, combining it with an existing matching Resource if found -func (ts *tuples) add(node *yaml.RNode) error { - for i := range ts.list { - t := ts.list[i] - if ts.matcher.IsSameResource(addedNode(t), node) { - return t.add(node) - } - } - t := &tuple{} - if err := t.add(node); err != nil { - return err - } - ts.list = append(ts.list, t) - return nil -} - -// addedNode returns one on the existing added nodes in the tuple -func addedNode(t *tuple) *yaml.RNode { - if t.updated != nil { - return t.updated - } - if t.original != nil { - return t.original - } - return t.dest -} - -// tuple wraps an original, updated, and dest tuple for a given Resource -type tuple struct { - original *yaml.RNode - updated *yaml.RNode - dest *yaml.RNode -} - -// add sets the corresponding tuple field for the node -func (t *tuple) add(node *yaml.RNode) error { - meta, err := node.GetMeta() - if err != nil { - return err - } - switch meta.Annotations[mergeSourceAnnotation] { - case mergeSourceDest: - if t.dest != nil { - return duplicateError("local", meta.Annotations[kioutil.PathAnnotation]) - } - t.dest = node - case mergeSourceOriginal: - if t.original != nil { - return duplicateError("original upstream", meta.Annotations[kioutil.PathAnnotation]) - } - t.original = node - case mergeSourceUpdated: - if t.updated != nil { - return duplicateError("updated upstream", meta.Annotations[kioutil.PathAnnotation]) - } - t.updated = node - default: - return fmt.Errorf("no source annotation for Resource") - } - return nil -} - -// merge performs a 3-way merge on the tuple -func (t *tuple) merge() (*yaml.RNode, error) { - return merge3.Merge(t.dest, t.original, t.updated) -} - -// duplicateError returns duplicate resources error -func duplicateError(source, filePath string) error { - return fmt.Errorf(`found duplicate %q resources in file %q, please refer to "update" documentation for the fix`, source, filePath) -} - -// DefaultResourceHandler is the default implementation of the ResourceHandler -// interface. It uses the following rules: -// * Keep dest if resource only exists in dest. -// * Keep updated if resource added in updated. -// * Delete dest if updated has been deleted. -// * Don't add the resource back if removed from dest. -// * Otherwise merge. -type DefaultResourceHandler struct{} - -func (*DefaultResourceHandler) Handle(original, updated, dest *yaml.RNode) (ResourceMergeStrategy, error) { - switch { - case original == nil && updated == nil && dest != nil: - // added locally -- keep dest - return KeepDest, nil - case updated != nil && dest == nil: - // added in the update -- add update - return KeepUpdated, nil - case original != nil && updated == nil: - // deleted in the update - return Skip, nil - case original != nil && dest == nil: - // deleted locally - return Skip, nil - default: - // dest and updated are non-nil -- merge them - return Merge, nil - } -} diff --git a/src/vendor/sigs.k8s.io/kustomize/kyaml/kio/filters/modify.go b/src/vendor/sigs.k8s.io/kustomize/kyaml/kio/filters/modify.go deleted file mode 100644 index b1090302a..000000000 --- a/src/vendor/sigs.k8s.io/kustomize/kyaml/kio/filters/modify.go +++ /dev/null @@ -1,4 +0,0 @@ -// Copyright 2019 The Kubernetes Authors. -// SPDX-License-Identifier: Apache-2.0 - -package filters diff --git a/src/vendor/sigs.k8s.io/kustomize/kyaml/kio/filters/stripcomments.go b/src/vendor/sigs.k8s.io/kustomize/kyaml/kio/filters/stripcomments.go deleted file mode 100644 index 8e9ecb3f2..000000000 --- a/src/vendor/sigs.k8s.io/kustomize/kyaml/kio/filters/stripcomments.go +++ /dev/null @@ -1,32 +0,0 @@ -// Copyright 2019 The Kubernetes Authors. -// SPDX-License-Identifier: Apache-2.0 - -package filters - -import ( - "sigs.k8s.io/kustomize/kyaml/kio" - "sigs.k8s.io/kustomize/kyaml/yaml" -) - -type StripCommentsFilter struct{} - -var _ kio.Filter = StripCommentsFilter{} - -func (f StripCommentsFilter) Filter(slice []*yaml.RNode) ([]*yaml.RNode, error) { - for i := range slice { - stripComments(slice[i].YNode()) - } - return slice, nil -} - -func stripComments(node *yaml.Node) { - if node == nil { - return - } - node.HeadComment = "" - node.LineComment = "" - node.FootComment = "" - for i := range node.Content { - stripComments(node.Content[i]) - } -} diff --git a/src/vendor/sigs.k8s.io/kustomize/kyaml/kio/ignorefilesmatcher.go b/src/vendor/sigs.k8s.io/kustomize/kyaml/kio/ignorefilesmatcher.go deleted file mode 100644 index 0ba3d8382..000000000 --- a/src/vendor/sigs.k8s.io/kustomize/kyaml/kio/ignorefilesmatcher.go +++ /dev/null @@ -1,105 +0,0 @@ -// Copyright 2019 The Kubernetes Authors. -// SPDX-License-Identifier: Apache-2.0 - -package kio - -import ( - "errors" - "os" - "path/filepath" - "strings" - - gitignore "github.com/monochromegane/go-gitignore" - "sigs.k8s.io/kustomize/kyaml/ext" - "sigs.k8s.io/kustomize/kyaml/filesys" -) - -// ignoreFilesMatcher handles `.krmignore` files, which allows for ignoring -// files or folders in a package. The format of this file is a subset of the -// gitignore format, with recursive patterns (like a/**/c) not supported. If a -// file or folder matches any of the patterns in the .krmignore file for the -// package, it will be excluded. -// -// It works as follows: -// -// * It will look for .krmignore file in the top folder and on the top level -// of any subpackages. Subpackages are defined by the presence of a Krmfile -// in the folder. -// * `.krmignore` files only cover files and folders for the package in which -// it is defined. So ignore patterns defined in a parent package does not -// affect which files are ignored from a subpackage. -// * An ignore pattern can not ignore a subpackage. So even if the parent -// package contains a pattern that ignores the directory foo, if foo is a -// subpackage, it will still be included if the IncludeSubpackages property -// is set to true -type ignoreFilesMatcher struct { - matchers []matcher - fs filesys.FileSystemOrOnDisk -} - -// readIgnoreFile checks whether there is a .krmignore file in the path, and -// if it is, reads it in and turns it into a matcher. If we can't find a file, -// we just add a matcher that match nothing. -func (i *ignoreFilesMatcher) readIgnoreFile(path string) error { - i.verifyPath(path) - f, err := i.fs.Open(filepath.Join(path, ext.IgnoreFileName())) - if err != nil { - if errors.Is(err, os.ErrNotExist) { - i.matchers = append(i.matchers, matcher{ - matcher: gitignore.DummyIgnoreMatcher(false), - basePath: path, - }) - return nil - } - return err - } - defer f.Close() - - i.matchers = append(i.matchers, matcher{ - matcher: gitignore.NewGitIgnoreFromReader(path, f), - basePath: path, - }) - return nil -} - -// verifyPath checks whether the top matcher on the stack -// is correct for the provided filepath. Matchers are removed once -// we encounter a filepath that is not a subpath of the basepath for -// the matcher. -func (i *ignoreFilesMatcher) verifyPath(path string) { - for j := len(i.matchers) - 1; j >= 0; j-- { - matcher := i.matchers[j] - if strings.HasPrefix(path, matcher.basePath) || path == matcher.basePath { - i.matchers = i.matchers[:j+1] - return - } - } -} - -// matchFile checks whether the file given by the provided path matches -// any of the patterns in the .krmignore file for the package. -func (i *ignoreFilesMatcher) matchFile(path string) bool { - if len(i.matchers) == 0 { - return false - } - i.verifyPath(filepath.Dir(path)) - return i.matchers[len(i.matchers)-1].matcher.Match(path, false) -} - -// matchFile checks whether the directory given by the provided path matches -// any of the patterns in the .krmignore file for the package. -func (i *ignoreFilesMatcher) matchDir(path string) bool { - if len(i.matchers) == 0 { - return false - } - i.verifyPath(path) - return i.matchers[len(i.matchers)-1].matcher.Match(path, true) -} - -// matcher wraps the gitignore matcher and the path to the folder -// where the file was found. -type matcher struct { - matcher gitignore.IgnoreMatcher - - basePath string -} diff --git a/src/vendor/sigs.k8s.io/kustomize/kyaml/kio/kio.go b/src/vendor/sigs.k8s.io/kustomize/kyaml/kio/kio.go deleted file mode 100644 index 9e00509eb..000000000 --- a/src/vendor/sigs.k8s.io/kustomize/kyaml/kio/kio.go +++ /dev/null @@ -1,447 +0,0 @@ -// Copyright 2019 The Kubernetes Authors. -// SPDX-License-Identifier: Apache-2.0 - -// Package kio contains low-level libraries for reading, modifying and writing -// Resource Configuration and packages. -package kio - -import ( - "fmt" - "strconv" - - "sigs.k8s.io/kustomize/kyaml/errors" - "sigs.k8s.io/kustomize/kyaml/kio/kioutil" - "sigs.k8s.io/kustomize/kyaml/yaml" -) - -// Reader reads ResourceNodes. Analogous to io.Reader. -type Reader interface { - Read() ([]*yaml.RNode, error) -} - -// ResourceNodeSlice is a collection of ResourceNodes. -// While ResourceNodeSlice has no inherent constraints on ordering or uniqueness, specific -// Readers, Filters or Writers may have constraints. -type ResourceNodeSlice []*yaml.RNode - -var _ Reader = ResourceNodeSlice{} - -func (o ResourceNodeSlice) Read() ([]*yaml.RNode, error) { - return o, nil -} - -// Writer writes ResourceNodes. Analogous to io.Writer. -type Writer interface { - Write([]*yaml.RNode) error -} - -// WriterFunc implements a Writer as a function. -type WriterFunc func([]*yaml.RNode) error - -func (fn WriterFunc) Write(o []*yaml.RNode) error { - return fn(o) -} - -// ReaderWriter implements both Reader and Writer interfaces -type ReaderWriter interface { - Reader - Writer -} - -// Filter modifies a collection of Resource Configuration by returning the modified slice. -// When possible, Filters should be serializable to yaml so that they can be described -// as either data or code. -// -// Analogous to http://www.linfo.org/filters.html -type Filter interface { - Filter([]*yaml.RNode) ([]*yaml.RNode, error) -} - -// TrackableFilter is an extension of Filter which is also capable of tracking -// which fields were mutated by the filter. -type TrackableFilter interface { - Filter - WithMutationTracker(func(key, value, tag string, node *yaml.RNode)) -} - -// FilterFunc implements a Filter as a function. -type FilterFunc func([]*yaml.RNode) ([]*yaml.RNode, error) - -func (fn FilterFunc) Filter(o []*yaml.RNode) ([]*yaml.RNode, error) { - return fn(o) -} - -// Pipeline reads Resource Configuration from a set of Inputs, applies some -// transformation filters, and writes the results to a set of Outputs. -// -// Analogous to http://www.linfo.org/pipes.html -type Pipeline struct { - // Inputs provide sources for Resource Configuration to be read. - Inputs []Reader `yaml:"inputs,omitempty"` - - // Filters are transformations applied to the Resource Configuration. - // They are applied in the order they are specified. - // Analogous to http://www.linfo.org/filters.html - Filters []Filter `yaml:"filters,omitempty"` - - // Outputs are where the transformed Resource Configuration is written. - Outputs []Writer `yaml:"outputs,omitempty"` - - // ContinueOnEmptyResult configures what happens when a filter in the pipeline - // returns an empty result. - // If it is false (default), subsequent filters will be skipped and the result - // will be returned immediately. This is useful as an optimization when you - // know that subsequent filters will not alter the empty result. - // If it is true, the empty result will be provided as input to the next - // filter in the list. This is useful when subsequent functions in the - // pipeline may generate new resources. - ContinueOnEmptyResult bool `yaml:"continueOnEmptyResult,omitempty"` -} - -// Execute executes each step in the sequence, returning immediately after encountering -// any error as part of the Pipeline. -func (p Pipeline) Execute() error { - return p.ExecuteWithCallback(nil) -} - -// PipelineExecuteCallbackFunc defines a callback function that will be called each time a step in the pipeline succeeds. -type PipelineExecuteCallbackFunc = func(op Filter) - -// ExecuteWithCallback executes each step in the sequence, returning immediately after encountering -// any error as part of the Pipeline. The callback will be called each time a step succeeds. -func (p Pipeline) ExecuteWithCallback(callback PipelineExecuteCallbackFunc) error { - var result []*yaml.RNode - - // read from the inputs - for _, i := range p.Inputs { - nodes, err := i.Read() - if err != nil { - return errors.Wrap(err) - } - result = append(result, nodes...) - } - - // apply operations - for i := range p.Filters { - // Not all RNodes passed through kio.Pipeline have metadata nor should - // they all be required to. - nodeAnnos, err := PreprocessResourcesForInternalAnnotationMigration(result) - if err != nil { - return err - } - - op := p.Filters[i] - if callback != nil { - callback(op) - } - result, err = op.Filter(result) - // TODO (issue 2872): This len(result) == 0 should be removed and empty result list should be - // handled by outputs. However currently some writer like LocalPackageReadWriter - // will clear the output directory and which will cause unpredictable results - if len(result) == 0 && !p.ContinueOnEmptyResult || err != nil { - return errors.Wrap(err) - } - - // If either the internal annotations for path, index, and id OR the legacy - // annotations for path, index, and id are changed, we have to update the other. - err = ReconcileInternalAnnotations(result, nodeAnnos) - if err != nil { - return err - } - } - - // write to the outputs - for _, o := range p.Outputs { - if err := o.Write(result); err != nil { - return errors.Wrap(err) - } - } - return nil -} - -// FilterAll runs the yaml.Filter against all inputs -func FilterAll(filter yaml.Filter) Filter { - return FilterFunc(func(nodes []*yaml.RNode) ([]*yaml.RNode, error) { - for i := range nodes { - _, err := filter.Filter(nodes[i]) - if err != nil { - return nil, errors.Wrap(err) - } - } - return nodes, nil - }) -} - -// PreprocessResourcesForInternalAnnotationMigration returns a mapping from id to all -// internal annotations, so that we can use it to reconcile the annotations -// later. This is necessary because currently both internal-prefixed annotations -// and legacy annotations are currently supported, and a change to one must be -// reflected in the other if needed. -func PreprocessResourcesForInternalAnnotationMigration(result []*yaml.RNode) (map[string]map[string]string, error) { - idToAnnosMap := make(map[string]map[string]string) - for i := range result { - idStr := strconv.Itoa(i) - err := result[i].PipeE(yaml.SetAnnotation(kioutil.InternalAnnotationsMigrationResourceIDAnnotation, idStr)) - if err != nil { - return nil, err - } - idToAnnosMap[idStr] = kioutil.GetInternalAnnotations(result[i]) - if err = kioutil.CopyLegacyAnnotations(result[i]); err != nil { - return nil, err - } - meta, _ := result[i].GetMeta() - if err = checkMismatchedAnnos(meta.Annotations); err != nil { - return nil, err - } - } - return idToAnnosMap, nil -} - -func checkMismatchedAnnos(annotations map[string]string) error { - path := annotations[kioutil.PathAnnotation] - index := annotations[kioutil.IndexAnnotation] - id := annotations[kioutil.IdAnnotation] - - legacyPath := annotations[kioutil.LegacyPathAnnotation] - legacyIndex := annotations[kioutil.LegacyIndexAnnotation] - legacyId := annotations[kioutil.LegacyIdAnnotation] - - // if prior to running the functions, the legacy and internal annotations differ, - // throw an error as we cannot infer the user's intent. - if path != "" && legacyPath != "" && path != legacyPath { - return fmt.Errorf("resource input to function has mismatched legacy and internal path annotations") - } - if index != "" && legacyIndex != "" && index != legacyIndex { - return fmt.Errorf("resource input to function has mismatched legacy and internal index annotations") - } - if id != "" && legacyId != "" && id != legacyId { - return fmt.Errorf("resource input to function has mismatched legacy and internal id annotations") - } - return nil -} - -type nodeAnnotations struct { - path string - index string - id string -} - -// ReconcileInternalAnnotations reconciles the annotation format for path, index and id annotations. -// It will ensure the output annotation format matches the format in the input. e.g. if the input -// format uses the legacy format and the output will be converted to the legacy format if it's not. -func ReconcileInternalAnnotations(result []*yaml.RNode, nodeAnnosMap map[string]map[string]string) error { - useInternal, useLegacy, err := determineAnnotationsFormat(nodeAnnosMap) - if err != nil { - return err - } - - for i := range result { - // if only one annotation is set, set the other. - err = missingInternalOrLegacyAnnotations(result[i]) - if err != nil { - return err - } - // we must check to see if the function changed either the new internal annotations - // or the old legacy annotations. If one is changed, the change must be reflected - // in the other. - err = checkAnnotationsAltered(result[i], nodeAnnosMap) - if err != nil { - return err - } - // We invoke determineAnnotationsFormat to find out if the original annotations - // use the internal or (and) the legacy format. We format the resources to - // make them consistent with original format. - err = formatInternalAnnotations(result[i], useInternal, useLegacy) - if err != nil { - return err - } - // if the annotations are still somehow out of sync, throw an error - meta, _ := result[i].GetMeta() - err = checkMismatchedAnnos(meta.Annotations) - if err != nil { - return err - } - - if _, err = result[i].Pipe(yaml.ClearAnnotation(kioutil.InternalAnnotationsMigrationResourceIDAnnotation)); err != nil { - return err - } - } - return nil -} - -// determineAnnotationsFormat determines if the resources are using one of the internal and legacy annotation format or both of them. -func determineAnnotationsFormat(nodeAnnosMap map[string]map[string]string) (bool, bool, error) { - var useInternal, useLegacy bool - var err error - - if len(nodeAnnosMap) == 0 { - return true, true, nil - } - - var internal, legacy *bool - for _, annos := range nodeAnnosMap { - _, foundPath := annos[kioutil.PathAnnotation] - _, foundIndex := annos[kioutil.IndexAnnotation] - _, foundId := annos[kioutil.IdAnnotation] - _, foundLegacyPath := annos[kioutil.LegacyPathAnnotation] - _, foundLegacyIndex := annos[kioutil.LegacyIndexAnnotation] - _, foundLegacyId := annos[kioutil.LegacyIdAnnotation] - - if !(foundPath || foundIndex || foundId || foundLegacyPath || foundLegacyIndex || foundLegacyId) { - continue - } - - foundOneOf := foundPath || foundIndex || foundId - if internal == nil { - f := foundOneOf - internal = &f - } - if (foundOneOf && !*internal) || (!foundOneOf && *internal) { - err = fmt.Errorf("the annotation formatting in the input resources is not consistent") - return useInternal, useLegacy, err - } - - foundOneOf = foundLegacyPath || foundLegacyIndex || foundLegacyId - if legacy == nil { - f := foundOneOf - legacy = &f - } - if (foundOneOf && !*legacy) || (!foundOneOf && *legacy) { - err = fmt.Errorf("the annotation formatting in the input resources is not consistent") - return useInternal, useLegacy, err - } - } - if internal != nil { - useInternal = *internal - } - if legacy != nil { - useLegacy = *legacy - } - return useInternal, useLegacy, err -} - -func missingInternalOrLegacyAnnotations(rn *yaml.RNode) error { - if err := missingInternalOrLegacyAnnotation(rn, kioutil.PathAnnotation, kioutil.LegacyPathAnnotation); err != nil { - return err - } - if err := missingInternalOrLegacyAnnotation(rn, kioutil.IndexAnnotation, kioutil.LegacyIndexAnnotation); err != nil { - return err - } - if err := missingInternalOrLegacyAnnotation(rn, kioutil.IdAnnotation, kioutil.LegacyIdAnnotation); err != nil { - return err - } - return nil -} - -func missingInternalOrLegacyAnnotation(rn *yaml.RNode, newKey string, legacyKey string) error { - meta, _ := rn.GetMeta() - annotations := meta.Annotations - value := annotations[newKey] - legacyValue := annotations[legacyKey] - - if value == "" && legacyValue == "" { - // do nothing - return nil - } - - if value == "" { - // new key is not set, copy from legacy key - if err := rn.PipeE(yaml.SetAnnotation(newKey, legacyValue)); err != nil { - return err - } - } else if legacyValue == "" { - // legacy key is not set, copy from new key - if err := rn.PipeE(yaml.SetAnnotation(legacyKey, value)); err != nil { - return err - } - } - return nil -} - -func checkAnnotationsAltered(rn *yaml.RNode, nodeAnnosMap map[string]map[string]string) error { - meta, _ := rn.GetMeta() - annotations := meta.Annotations - // get the resource's current path, index, and ids from the new annotations - internal := nodeAnnotations{ - path: annotations[kioutil.PathAnnotation], - index: annotations[kioutil.IndexAnnotation], - id: annotations[kioutil.IdAnnotation], - } - - // get the resource's current path, index, and ids from the legacy annotations - legacy := nodeAnnotations{ - path: annotations[kioutil.LegacyPathAnnotation], - index: annotations[kioutil.LegacyIndexAnnotation], - id: annotations[kioutil.LegacyIdAnnotation], - } - - rid := annotations[kioutil.InternalAnnotationsMigrationResourceIDAnnotation] - originalAnnotations, found := nodeAnnosMap[rid] - if !found { - return nil - } - originalPath, found := originalAnnotations[kioutil.PathAnnotation] - if !found { - originalPath = originalAnnotations[kioutil.LegacyPathAnnotation] - } - if originalPath != "" { - switch { - case originalPath != internal.path && originalPath != legacy.path && internal.path != legacy.path: - return fmt.Errorf("resource input to function has mismatched legacy and internal path annotations") - case originalPath != internal.path: - if _, err := rn.Pipe(yaml.SetAnnotation(kioutil.LegacyPathAnnotation, internal.path)); err != nil { - return err - } - case originalPath != legacy.path: - if _, err := rn.Pipe(yaml.SetAnnotation(kioutil.PathAnnotation, legacy.path)); err != nil { - return err - } - } - } - - originalIndex, found := originalAnnotations[kioutil.IndexAnnotation] - if !found { - originalIndex = originalAnnotations[kioutil.LegacyIndexAnnotation] - } - if originalIndex != "" { - switch { - case originalIndex != internal.index && originalIndex != legacy.index && internal.index != legacy.index: - return fmt.Errorf("resource input to function has mismatched legacy and internal index annotations") - case originalIndex != internal.index: - if _, err := rn.Pipe(yaml.SetAnnotation(kioutil.LegacyIndexAnnotation, internal.index)); err != nil { - return err - } - case originalIndex != legacy.index: - if _, err := rn.Pipe(yaml.SetAnnotation(kioutil.IndexAnnotation, legacy.index)); err != nil { - return err - } - } - } - return nil -} - -func formatInternalAnnotations(rn *yaml.RNode, useInternal, useLegacy bool) error { - if !useInternal { - if err := rn.PipeE(yaml.ClearAnnotation(kioutil.IdAnnotation)); err != nil { - return err - } - if err := rn.PipeE(yaml.ClearAnnotation(kioutil.PathAnnotation)); err != nil { - return err - } - if err := rn.PipeE(yaml.ClearAnnotation(kioutil.IndexAnnotation)); err != nil { - return err - } - } - if !useLegacy { - if err := rn.PipeE(yaml.ClearAnnotation(kioutil.LegacyIdAnnotation)); err != nil { - return err - } - if err := rn.PipeE(yaml.ClearAnnotation(kioutil.LegacyPathAnnotation)); err != nil { - return err - } - if err := rn.PipeE(yaml.ClearAnnotation(kioutil.LegacyIndexAnnotation)); err != nil { - return err - } - } - return nil -} diff --git a/src/vendor/sigs.k8s.io/kustomize/kyaml/kio/kioutil/kioutil.go b/src/vendor/sigs.k8s.io/kustomize/kyaml/kio/kioutil/kioutil.go deleted file mode 100644 index 510ecae18..000000000 --- a/src/vendor/sigs.k8s.io/kustomize/kyaml/kio/kioutil/kioutil.go +++ /dev/null @@ -1,420 +0,0 @@ -// Copyright 2019 The Kubernetes Authors. -// SPDX-License-Identifier: Apache-2.0 - -package kioutil - -import ( - "fmt" - "path" - "sort" - "strconv" - "strings" - - "sigs.k8s.io/kustomize/kyaml/errors" - "sigs.k8s.io/kustomize/kyaml/yaml" -) - -type AnnotationKey = string - -const ( - // internalPrefix is the prefix given to internal annotations that are used - // internally by the orchestrator - internalPrefix string = "internal.config.kubernetes.io/" - - // IndexAnnotation records the index of a specific resource in a file or input stream. - IndexAnnotation AnnotationKey = internalPrefix + "index" - - // PathAnnotation records the path to the file the Resource was read from - PathAnnotation AnnotationKey = internalPrefix + "path" - - // SeqIndentAnnotation records the sequence nodes indentation of the input resource - SeqIndentAnnotation AnnotationKey = internalPrefix + "seqindent" - - // IdAnnotation records the id of the resource to map inputs to outputs - IdAnnotation AnnotationKey = internalPrefix + "id" - - // Deprecated: Use IndexAnnotation instead. - LegacyIndexAnnotation AnnotationKey = "config.kubernetes.io/index" - - // Deprecated: use PathAnnotation instead. - LegacyPathAnnotation AnnotationKey = "config.kubernetes.io/path" - - // Deprecated: use IdAnnotation instead. - LegacyIdAnnotation = "config.k8s.io/id" - - // InternalAnnotationsMigrationResourceIDAnnotation is used to uniquely identify - // resources during round trip to and from a function execution. We will use it - // to track the internal annotations and reconcile them if needed. - InternalAnnotationsMigrationResourceIDAnnotation = internalPrefix + "annotations-migration-resource-id" -) - -func GetFileAnnotations(rn *yaml.RNode) (string, string, error) { - rm, _ := rn.GetMeta() - annotations := rm.Annotations - path, found := annotations[PathAnnotation] - if !found { - path = annotations[LegacyPathAnnotation] - } - index, found := annotations[IndexAnnotation] - if !found { - index = annotations[LegacyIndexAnnotation] - } - return path, index, nil -} - -func GetIdAnnotation(rn *yaml.RNode) string { - rm, _ := rn.GetMeta() - annotations := rm.Annotations - id, found := annotations[IdAnnotation] - if !found { - id = annotations[LegacyIdAnnotation] - } - return id -} - -func CopyLegacyAnnotations(rn *yaml.RNode) error { - meta, err := rn.GetMeta() - if err != nil { - if err == yaml.ErrMissingMetadata { - // resource has no metadata, this should be a no-op - return nil - } - return err - } - if err := copyAnnotations(meta, rn, LegacyPathAnnotation, PathAnnotation); err != nil { - return err - } - if err := copyAnnotations(meta, rn, LegacyIndexAnnotation, IndexAnnotation); err != nil { - return err - } - if err := copyAnnotations(meta, rn, LegacyIdAnnotation, IdAnnotation); err != nil { - return err - } - return nil -} - -func copyAnnotations(meta yaml.ResourceMeta, rn *yaml.RNode, legacyKey string, newKey string) error { - newValue := meta.Annotations[newKey] - legacyValue := meta.Annotations[legacyKey] - if newValue != "" { - if legacyValue == "" { - if err := rn.PipeE(yaml.SetAnnotation(legacyKey, newValue)); err != nil { - return err - } - } - } else { - if legacyValue != "" { - if err := rn.PipeE(yaml.SetAnnotation(newKey, legacyValue)); err != nil { - return err - } - } - } - return nil -} - -// ErrorIfMissingAnnotation validates the provided annotations are present on the given resources -func ErrorIfMissingAnnotation(nodes []*yaml.RNode, keys ...AnnotationKey) error { - for _, key := range keys { - for _, node := range nodes { - val, err := node.Pipe(yaml.GetAnnotation(key)) - if err != nil { - return errors.Wrap(err) - } - if val == nil { - return errors.Errorf("missing annotation %s", key) - } - } - } - return nil -} - -// CreatePathAnnotationValue creates a default path annotation value for a Resource. -// The path prefix will be dir. -func CreatePathAnnotationValue(dir string, m yaml.ResourceMeta) string { - filename := fmt.Sprintf("%s_%s.yaml", strings.ToLower(m.Kind), m.Name) - return path.Join(dir, m.Namespace, filename) -} - -// DefaultPathAndIndexAnnotation sets a default path or index value on any nodes missing the -// annotation -func DefaultPathAndIndexAnnotation(dir string, nodes []*yaml.RNode) error { - counts := map[string]int{} - - // check each node for the path annotation - for i := range nodes { - if err := CopyLegacyAnnotations(nodes[i]); err != nil { - return err - } - m, err := nodes[i].GetMeta() - if err != nil { - return err - } - - // calculate the max index in each file in case we are appending - if p, found := m.Annotations[PathAnnotation]; found { - // record the max indexes into each file - if i, found := m.Annotations[IndexAnnotation]; found { - index, _ := strconv.Atoi(i) - if index > counts[p] { - counts[p] = index - } - } - - // has the path annotation already -- do nothing - continue - } - - // set a path annotation on the Resource - path := CreatePathAnnotationValue(dir, m) - if err := nodes[i].PipeE(yaml.SetAnnotation(PathAnnotation, path)); err != nil { - return err - } - if err := nodes[i].PipeE(yaml.SetAnnotation(LegacyPathAnnotation, path)); err != nil { - return err - } - } - - // set the index annotations - for i := range nodes { - m, err := nodes[i].GetMeta() - if err != nil { - return err - } - - if _, found := m.Annotations[IndexAnnotation]; found { - continue - } - - p := m.Annotations[PathAnnotation] - - // set an index annotation on the Resource - c := counts[p] - counts[p] = c + 1 - if err := nodes[i].PipeE( - yaml.SetAnnotation(IndexAnnotation, fmt.Sprintf("%d", c))); err != nil { - return err - } - if err := nodes[i].PipeE( - yaml.SetAnnotation(LegacyIndexAnnotation, fmt.Sprintf("%d", c))); err != nil { - return err - } - } - return nil -} - -// DefaultPathAnnotation sets a default path annotation on any Reources -// missing it. -func DefaultPathAnnotation(dir string, nodes []*yaml.RNode) error { - // check each node for the path annotation - for i := range nodes { - if err := CopyLegacyAnnotations(nodes[i]); err != nil { - return err - } - m, err := nodes[i].GetMeta() - if err != nil { - return err - } - - if _, found := m.Annotations[PathAnnotation]; found { - // has the path annotation already -- do nothing - continue - } - - // set a path annotation on the Resource - path := CreatePathAnnotationValue(dir, m) - if err := nodes[i].PipeE(yaml.SetAnnotation(PathAnnotation, path)); err != nil { - return err - } - if err := nodes[i].PipeE(yaml.SetAnnotation(LegacyPathAnnotation, path)); err != nil { - return err - } - } - return nil -} - -// Map invokes fn for each element in nodes. -func Map(nodes []*yaml.RNode, fn func(*yaml.RNode) (*yaml.RNode, error)) ([]*yaml.RNode, error) { - var returnNodes []*yaml.RNode - for i := range nodes { - n, err := fn(nodes[i]) - if err != nil { - return nil, errors.Wrap(err) - } - if n != nil { - returnNodes = append(returnNodes, n) - } - } - return returnNodes, nil -} - -func MapMeta(nodes []*yaml.RNode, fn func(*yaml.RNode, yaml.ResourceMeta) (*yaml.RNode, error)) ( - []*yaml.RNode, error) { - var returnNodes []*yaml.RNode - for i := range nodes { - meta, err := nodes[i].GetMeta() - if err != nil { - return nil, errors.Wrap(err) - } - n, err := fn(nodes[i], meta) - if err != nil { - return nil, errors.Wrap(err) - } - if n != nil { - returnNodes = append(returnNodes, n) - } - } - return returnNodes, nil -} - -// SortNodes sorts nodes in place: -// - by PathAnnotation annotation -// - by IndexAnnotation annotation -func SortNodes(nodes []*yaml.RNode) error { - var err error - // use stable sort to keep ordering of equal elements - sort.SliceStable(nodes, func(i, j int) bool { - if err != nil { - return false - } - if err := CopyLegacyAnnotations(nodes[i]); err != nil { - return false - } - if err := CopyLegacyAnnotations(nodes[j]); err != nil { - return false - } - var iMeta, jMeta yaml.ResourceMeta - if iMeta, _ = nodes[i].GetMeta(); err != nil { - return false - } - if jMeta, _ = nodes[j].GetMeta(); err != nil { - return false - } - - iValue := iMeta.Annotations[PathAnnotation] - jValue := jMeta.Annotations[PathAnnotation] - if iValue != jValue { - return iValue < jValue - } - - iValue = iMeta.Annotations[IndexAnnotation] - jValue = jMeta.Annotations[IndexAnnotation] - - // put resource config without an index first - if iValue == jValue { - return false - } - if iValue == "" { - return true - } - if jValue == "" { - return false - } - - // sort by index - var iIndex, jIndex int - iIndex, err = strconv.Atoi(iValue) - if err != nil { - err = fmt.Errorf("unable to parse config.kubernetes.io/index %s :%v", iValue, err) - return false - } - jIndex, err = strconv.Atoi(jValue) - if err != nil { - err = fmt.Errorf("unable to parse config.kubernetes.io/index %s :%v", jValue, err) - return false - } - if iIndex != jIndex { - return iIndex < jIndex - } - - // elements are equal - return false - }) - return errors.Wrap(err) -} - -// CopyInternalAnnotations copies the annotations that begin with the prefix -// `internal.config.kubernetes.io` from the source RNode to the destination RNode. -// It takes a parameter exclusions, which is a list of annotation keys to ignore. -func CopyInternalAnnotations(src *yaml.RNode, dst *yaml.RNode, exclusions ...AnnotationKey) error { - srcAnnotations := GetInternalAnnotations(src) - for k, v := range srcAnnotations { - if stringSliceContains(exclusions, k) { - continue - } - if err := dst.PipeE(yaml.SetAnnotation(k, v)); err != nil { - return err - } - } - return nil -} - -// ConfirmInternalAnnotationUnchanged compares the annotations of the RNodes that begin with the prefix -// `internal.config.kubernetes.io`, throwing an error if they differ. It takes a parameter exclusions, -// which is a list of annotation keys to ignore. -func ConfirmInternalAnnotationUnchanged(r1 *yaml.RNode, r2 *yaml.RNode, exclusions ...AnnotationKey) error { - r1Annotations := GetInternalAnnotations(r1) - r2Annotations := GetInternalAnnotations(r2) - - // this is a map to prevent duplicates - diffAnnos := make(map[string]bool) - - for k, v1 := range r1Annotations { - if stringSliceContains(exclusions, k) { - continue - } - if v2, ok := r2Annotations[k]; !ok || v1 != v2 { - diffAnnos[k] = true - } - } - - for k, v2 := range r2Annotations { - if stringSliceContains(exclusions, k) { - continue - } - if v1, ok := r1Annotations[k]; !ok || v2 != v1 { - diffAnnos[k] = true - } - } - - if len(diffAnnos) > 0 { - keys := make([]string, 0, len(diffAnnos)) - for k := range diffAnnos { - keys = append(keys, k) - } - sort.Strings(keys) - - errorString := "internal annotations differ: " - for _, key := range keys { - errorString = errorString + key + ", " - } - return errors.Errorf(errorString[0 : len(errorString)-2]) - } - - return nil -} - -// GetInternalAnnotations returns a map of all the annotations of the provided -// RNode that satisfies one of the following: 1) begin with the prefix -// `internal.config.kubernetes.io` 2) is one of `config.kubernetes.io/path`, -// `config.kubernetes.io/index` and `config.k8s.io/id`. -func GetInternalAnnotations(rn *yaml.RNode) map[string]string { - meta, _ := rn.GetMeta() - annotations := meta.Annotations - result := make(map[string]string) - for k, v := range annotations { - if strings.HasPrefix(k, internalPrefix) || k == LegacyPathAnnotation || k == LegacyIndexAnnotation || k == LegacyIdAnnotation { - result[k] = v - } - } - return result -} - -// stringSliceContains returns true if the slice has the string. -func stringSliceContains(slice []string, str string) bool { - for _, s := range slice { - if s == str { - return true - } - } - return false -} diff --git a/src/vendor/sigs.k8s.io/kustomize/kyaml/kio/pkgio_reader.go b/src/vendor/sigs.k8s.io/kustomize/kyaml/kio/pkgio_reader.go deleted file mode 100644 index 609a791f3..000000000 --- a/src/vendor/sigs.k8s.io/kustomize/kyaml/kio/pkgio_reader.go +++ /dev/null @@ -1,360 +0,0 @@ -// Copyright 2019 The Kubernetes Authors. -// SPDX-License-Identifier: Apache-2.0 - -package kio - -import ( - "fmt" - "os" - "path/filepath" - - "sigs.k8s.io/kustomize/kyaml/errors" - "sigs.k8s.io/kustomize/kyaml/filesys" - "sigs.k8s.io/kustomize/kyaml/kio/kioutil" - "sigs.k8s.io/kustomize/kyaml/sets" - "sigs.k8s.io/kustomize/kyaml/yaml" -) - -// requiredResourcePackageAnnotations are annotations that are required to write resources back to -// files. -var requiredResourcePackageAnnotations = []string{kioutil.IndexAnnotation, kioutil.PathAnnotation} - -// PackageBuffer implements Reader and Writer, storing Resources in a local field. -type PackageBuffer struct { - Nodes []*yaml.RNode -} - -func (r *PackageBuffer) Read() ([]*yaml.RNode, error) { - return r.Nodes, nil -} - -func (r *PackageBuffer) Write(nodes []*yaml.RNode) error { - r.Nodes = nodes - return nil -} - -// LocalPackageReadWriter reads and writes Resources from / to a local directory. -// When writing, LocalPackageReadWriter will delete files if all of the Resources from -// that file have been removed from the output. -type LocalPackageReadWriter struct { - Kind string `yaml:"kind,omitempty"` - - KeepReaderAnnotations bool `yaml:"keepReaderAnnotations,omitempty"` - - // PreserveSeqIndent if true adds kioutil.SeqIndentAnnotation to each resource - PreserveSeqIndent bool - - // PackagePath is the path to the package directory. - PackagePath string `yaml:"path,omitempty"` - - // PackageFileName is the name of file containing package metadata. - // It will be used to identify package. - PackageFileName string `yaml:"packageFileName,omitempty"` - - // MatchFilesGlob configures Read to only read Resources from files matching any of the - // provided patterns. - // Defaults to ["*.yaml", "*.yml"] if empty. To match all files specify ["*"]. - MatchFilesGlob []string `yaml:"matchFilesGlob,omitempty"` - - // IncludeSubpackages will configure Read to read Resources from subpackages. - // Subpackages are identified by presence of PackageFileName. - IncludeSubpackages bool `yaml:"includeSubpackages,omitempty"` - - // ErrorIfNonResources will configure Read to throw an error if yaml missing missing - // apiVersion or kind is read. - ErrorIfNonResources bool `yaml:"errorIfNonResources,omitempty"` - - // OmitReaderAnnotations will cause the reader to skip annotating Resources with the file - // path and mode. - OmitReaderAnnotations bool `yaml:"omitReaderAnnotations,omitempty"` - - // SetAnnotations are annotations to set on the Resources as they are read. - SetAnnotations map[string]string `yaml:"setAnnotations,omitempty"` - - // NoDeleteFiles if set to true, LocalPackageReadWriter won't delete any files - NoDeleteFiles bool `yaml:"noDeleteFiles,omitempty"` - - files sets.String - - // FileSkipFunc is a function which returns true if reader should ignore - // the file - FileSkipFunc LocalPackageSkipFileFunc - - // FileSystem can be used to mock the disk file system. - FileSystem filesys.FileSystemOrOnDisk - - // WrapBareSeqNode wraps the bare sequence node document with map node, - // kyaml uses reader annotations to track resources, it is not possible to - // add them to bare sequence nodes, this option enables wrapping such bare - // sequence nodes into map node with key yaml.BareSeqNodeWrappingKey - // note that this wrapping is different and not related to ResourceList wrapping - WrapBareSeqNode bool -} - -func (r *LocalPackageReadWriter) Read() ([]*yaml.RNode, error) { - nodes, err := LocalPackageReader{ - PackagePath: r.PackagePath, - MatchFilesGlob: r.MatchFilesGlob, - IncludeSubpackages: r.IncludeSubpackages, - ErrorIfNonResources: r.ErrorIfNonResources, - SetAnnotations: r.SetAnnotations, - PackageFileName: r.PackageFileName, - FileSkipFunc: r.FileSkipFunc, - PreserveSeqIndent: r.PreserveSeqIndent, - FileSystem: r.FileSystem, - WrapBareSeqNode: r.WrapBareSeqNode, - }.Read() - if err != nil { - return nil, errors.Wrap(err) - } - // keep track of all the files - if !r.NoDeleteFiles { - r.files, err = r.getFiles(nodes) - if err != nil { - return nil, errors.Wrap(err) - } - } - return nodes, nil -} - -func (r *LocalPackageReadWriter) Write(nodes []*yaml.RNode) error { - newFiles, err := r.getFiles(nodes) - if err != nil { - return errors.Wrap(err) - } - var clear []string - for k := range r.SetAnnotations { - clear = append(clear, k) - } - err = LocalPackageWriter{ - PackagePath: r.PackagePath, - ClearAnnotations: clear, - KeepReaderAnnotations: r.KeepReaderAnnotations, - FileSystem: r.FileSystem, - }.Write(nodes) - if err != nil { - return errors.Wrap(err) - } - deleteFiles := r.files.Difference(newFiles) - for f := range deleteFiles { - if err = r.FileSystem.RemoveAll(filepath.Join(r.PackagePath, f)); err != nil { - return errors.Wrap(err) - } - } - return nil -} - -func (r *LocalPackageReadWriter) getFiles(nodes []*yaml.RNode) (sets.String, error) { - val := sets.String{} - for _, n := range nodes { - path, _, err := kioutil.GetFileAnnotations(n) - if err != nil { - return nil, errors.Wrap(err) - } - val.Insert(path) - } - return val, nil -} - -// LocalPackageSkipFileFunc is a function which returns true if the file -// in the package should be ignored by reader. -// relPath is an OS specific relative path -type LocalPackageSkipFileFunc func(relPath string) bool - -// LocalPackageReader reads ResourceNodes from a local package. -type LocalPackageReader struct { - Kind string `yaml:"kind,omitempty"` - - // PackagePath is the path to the package directory. - PackagePath string `yaml:"path,omitempty"` - - // PackageFileName is the name of file containing package metadata. - // It will be used to identify package. - PackageFileName string `yaml:"packageFileName,omitempty"` - - // MatchFilesGlob configures Read to only read Resources from files matching any of the - // provided patterns. - // Defaults to ["*.yaml", "*.yml"] if empty. To match all files specify ["*"]. - MatchFilesGlob []string `yaml:"matchFilesGlob,omitempty"` - - // IncludeSubpackages will configure Read to read Resources from subpackages. - // Subpackages are identified by presence of PackageFileName. - IncludeSubpackages bool `yaml:"includeSubpackages,omitempty"` - - // ErrorIfNonResources will configure Read to throw an error if yaml missing missing - // apiVersion or kind is read. - ErrorIfNonResources bool `yaml:"errorIfNonResources,omitempty"` - - // OmitReaderAnnotations will cause the reader to skip annotating Resources with the file - // path and mode. - OmitReaderAnnotations bool `yaml:"omitReaderAnnotations,omitempty"` - - // SetAnnotations are annotations to set on the Resources as they are read. - SetAnnotations map[string]string `yaml:"setAnnotations,omitempty"` - - // FileSkipFunc is a function which returns true if reader should ignore - // the file - FileSkipFunc LocalPackageSkipFileFunc - - // PreserveSeqIndent if true adds kioutil.SeqIndentAnnotation to each resource - PreserveSeqIndent bool - - // FileSystem can be used to mock the disk file system. - FileSystem filesys.FileSystemOrOnDisk - - // WrapBareSeqNode wraps the bare sequence node document with map node, - // kyaml uses reader annotations to track resources, it is not possible to - // add them to bare sequence nodes, this option enables wrapping such bare - // sequence nodes into map node with key yaml.BareSeqNodeWrappingKey - // note that this wrapping is different and not related to ResourceList wrapping - WrapBareSeqNode bool -} - -var _ Reader = LocalPackageReader{} - -var DefaultMatch = []string{"*.yaml", "*.yml"} -var JSONMatch = []string{"*.json"} -var MatchAll = append(DefaultMatch, JSONMatch...) - -// Read reads the Resources. -func (r LocalPackageReader) Read() ([]*yaml.RNode, error) { - if r.PackagePath == "" { - return nil, fmt.Errorf("must specify package path") - } - - // use slash for path - r.PackagePath = filepath.ToSlash(r.PackagePath) - if len(r.MatchFilesGlob) == 0 { - r.MatchFilesGlob = DefaultMatch - } - - var operand ResourceNodeSlice - var pathRelativeTo string - var err error - ignoreFilesMatcher := &ignoreFilesMatcher{ - fs: r.FileSystem, - } - dir, file, err := r.FileSystem.CleanedAbs(r.PackagePath) - if err != nil { - return nil, errors.Wrap(err) - } - r.PackagePath = filepath.Join(string(dir), file) - err = r.FileSystem.Walk(r.PackagePath, func( - path string, info os.FileInfo, err error) error { - if err != nil { - return errors.Wrap(err) - } - - // is this the user specified path? - if path == r.PackagePath { - if info.IsDir() { - // skip the root package directory, but check for a - // .krmignore file first. - pathRelativeTo = r.PackagePath - return ignoreFilesMatcher.readIgnoreFile(path) - } - - // user specified path is a file rather than a directory. - // make its path relative to its parent so it can be written to another file. - pathRelativeTo = filepath.Dir(r.PackagePath) - } - - // check if we should skip the directory or file - if info.IsDir() { - return r.shouldSkipDir(path, ignoreFilesMatcher) - } - - // get the relative path to file within the package so we can write the files back out - // to another location. - relPath, err := filepath.Rel(pathRelativeTo, path) - if err != nil { - return errors.WrapPrefixf(err, pathRelativeTo) - } - if match, err := r.shouldSkipFile(path, relPath, ignoreFilesMatcher); err != nil { - return err - } else if match { - // skip this file - return nil - } - - r.initReaderAnnotations(relPath, info) - nodes, err := r.readFile(path, info) - if err != nil { - return errors.WrapPrefixf(err, path) - } - operand = append(operand, nodes...) - return nil - }) - return operand, err -} - -// readFile reads the ResourceNodes from a file -func (r *LocalPackageReader) readFile(path string, _ os.FileInfo) ([]*yaml.RNode, error) { - f, err := r.FileSystem.Open(path) - if err != nil { - return nil, err - } - defer f.Close() - - rr := &ByteReader{ - DisableUnwrapping: true, - Reader: f, - OmitReaderAnnotations: r.OmitReaderAnnotations, - SetAnnotations: r.SetAnnotations, - PreserveSeqIndent: r.PreserveSeqIndent, - WrapBareSeqNode: r.WrapBareSeqNode, - } - return rr.Read() -} - -// shouldSkipFile returns true if the file should be skipped -func (r *LocalPackageReader) shouldSkipFile(path, relPath string, matcher *ignoreFilesMatcher) (bool, error) { - // check if the file is covered by a .krmignore file. - if matcher.matchFile(path) { - return true, nil - } - - if r.FileSkipFunc != nil && r.FileSkipFunc(relPath) { - return true, nil - } - - // check if the files are in scope - for _, g := range r.MatchFilesGlob { - if match, err := filepath.Match(g, filepath.Base(path)); err != nil { - return true, errors.Wrap(err) - } else if match { - return false, nil - } - } - return true, nil -} - -// initReaderAnnotations adds the LocalPackageReader Annotations to r.SetAnnotations -func (r *LocalPackageReader) initReaderAnnotations(path string, _ os.FileInfo) { - if r.SetAnnotations == nil { - r.SetAnnotations = map[string]string{} - } - if !r.OmitReaderAnnotations { - r.SetAnnotations[kioutil.PathAnnotation] = path - r.SetAnnotations[kioutil.LegacyPathAnnotation] = path - } -} - -// shouldSkipDir returns a filepath.SkipDir if the directory should be skipped -func (r *LocalPackageReader) shouldSkipDir(path string, matcher *ignoreFilesMatcher) error { - if matcher.matchDir(path) { - return filepath.SkipDir - } - - if r.PackageFileName == "" { - return nil - } - // check if this is a subpackage - if !r.FileSystem.Exists(filepath.Join(path, r.PackageFileName)) { - return nil - } - if !r.IncludeSubpackages { - return filepath.SkipDir - } - return matcher.readIgnoreFile(path) -} diff --git a/src/vendor/sigs.k8s.io/kustomize/kyaml/kio/pkgio_writer.go b/src/vendor/sigs.k8s.io/kustomize/kyaml/kio/pkgio_writer.go deleted file mode 100644 index ce6fa45a5..000000000 --- a/src/vendor/sigs.k8s.io/kustomize/kyaml/kio/pkgio_writer.go +++ /dev/null @@ -1,150 +0,0 @@ -// Copyright 2019 The Kubernetes Authors. -// SPDX-License-Identifier: Apache-2.0 - -package kio - -import ( - "bytes" - "fmt" - "os" - "path/filepath" - "strings" - - "sigs.k8s.io/kustomize/kyaml/errors" - "sigs.k8s.io/kustomize/kyaml/filesys" - "sigs.k8s.io/kustomize/kyaml/kio/kioutil" - "sigs.k8s.io/kustomize/kyaml/yaml" -) - -// LocalPackageWriter writes ResourceNodes to a filesystem -type LocalPackageWriter struct { - Kind string `yaml:"kind,omitempty"` - - // PackagePath is the path to the package directory. - PackagePath string `yaml:"path,omitempty"` - - // KeepReaderAnnotations if set will retain the annotations set by LocalPackageReader - KeepReaderAnnotations bool `yaml:"keepReaderAnnotations,omitempty"` - - // ClearAnnotations will clear annotations before writing the resources - ClearAnnotations []string `yaml:"clearAnnotations,omitempty"` - - // FileSystem can be used to mock the disk file system. - FileSystem filesys.FileSystemOrOnDisk -} - -var _ Writer = LocalPackageWriter{} - -func (r LocalPackageWriter) Write(nodes []*yaml.RNode) error { - // set the path and index annotations if they are missing - if err := kioutil.DefaultPathAndIndexAnnotation("", nodes); err != nil { - return err - } - - if !r.FileSystem.Exists(r.PackagePath) { - return errors.WrapPrefixf(os.ErrNotExist, "could not write to %q", r.PackagePath) - } - if !r.FileSystem.IsDir(r.PackagePath) { - // if the user specified input isn't a directory, the package is the directory of the - // target - r.PackagePath = filepath.Dir(r.PackagePath) - } - - // setup indexes for writing Resources back to files - if err := r.errorIfMissingRequiredAnnotation(nodes); err != nil { - return err - } - outputFiles, err := r.indexByFilePath(nodes) - if err != nil { - return err - } - for k := range outputFiles { - if err = kioutil.SortNodes(outputFiles[k]); err != nil { - return errors.Wrap(err) - } - } - - if !r.KeepReaderAnnotations { - r.ClearAnnotations = append(r.ClearAnnotations, kioutil.PathAnnotation) - r.ClearAnnotations = append(r.ClearAnnotations, kioutil.LegacyPathAnnotation) - } - - // validate outputs before writing any - for path := range outputFiles { - outputPath := filepath.Join(r.PackagePath, path) - if r.FileSystem.IsDir(outputPath) { - return fmt.Errorf("config.kubernetes.io/path cannot be a directory: %s", path) - } - - err = r.FileSystem.MkdirAll(filepath.Dir(outputPath)) - if err != nil { - return errors.Wrap(err) - } - } - - // write files - buf := bytes.NewBuffer(nil) - for path := range outputFiles { - outputPath := filepath.Join(r.PackagePath, path) - err = r.FileSystem.MkdirAll(filepath.Dir(filepath.Join(r.PackagePath, path))) - if err != nil { - return errors.Wrap(err) - } - - buf.Reset() - w := ByteWriter{ - Writer: buf, - KeepReaderAnnotations: r.KeepReaderAnnotations, - ClearAnnotations: r.ClearAnnotations, - } - if err = w.Write(outputFiles[path]); err != nil { - return errors.Wrap(err) - } - - if err := r.FileSystem.WriteFile(outputPath, buf.Bytes()); err != nil { - return errors.Wrap(err) - } - } - - return nil -} - -func (r LocalPackageWriter) errorIfMissingRequiredAnnotation(nodes []*yaml.RNode) error { - for i := range nodes { - for _, s := range requiredResourcePackageAnnotations { - key, err := nodes[i].Pipe(yaml.GetAnnotation(s)) - if err != nil { - return errors.Wrap(err) - } - if key == nil || key.YNode() == nil || key.YNode().Value == "" { - return errors.Errorf( - "resources must be annotated with %s to be written to files", s) - } - } - } - return nil -} - -func (r LocalPackageWriter) indexByFilePath(nodes []*yaml.RNode) (map[string][]*yaml.RNode, error) { - outputFiles := map[string][]*yaml.RNode{} - for i := range nodes { - // parse the file write path - node := nodes[i] - value, err := node.Pipe(yaml.GetAnnotation(kioutil.PathAnnotation)) - if err != nil { - // this should never happen if errorIfMissingRequiredAnnotation was run - return nil, errors.Wrap(err) - } - path := value.YNode().Value - outputFiles[path] = append(outputFiles[path], node) - - if filepath.IsAbs(path) { - return nil, errors.Errorf("package paths may not be absolute paths") - } - if strings.Contains(filepath.Clean(path), "..") { - return nil, fmt.Errorf("resource must be written under package %s: %s", - r.PackagePath, filepath.Clean(path)) - } - } - return outputFiles, nil -} diff --git a/src/vendor/sigs.k8s.io/kustomize/kyaml/kio/tree.go b/src/vendor/sigs.k8s.io/kustomize/kyaml/kio/tree.go deleted file mode 100644 index a14181578..000000000 --- a/src/vendor/sigs.k8s.io/kustomize/kyaml/kio/tree.go +++ /dev/null @@ -1,519 +0,0 @@ -// Copyright 2019 The Kubernetes Authors. -// SPDX-License-Identifier: Apache-2.0 - -package kio - -import ( - "fmt" - "io" - "os" - "path/filepath" - "sort" - "strings" - - "github.com/xlab/treeprint" - "sigs.k8s.io/kustomize/kyaml/kio/kioutil" - "sigs.k8s.io/kustomize/kyaml/yaml" -) - -type TreeStructure string - -const ( - // TreeStructurePackage configures TreeWriter to generate the tree structure off of the - // Resources packages. - TreeStructurePackage TreeStructure = "directory" - - // TreeStructureOwners configures TreeWriter to generate the tree structure off of the - // Resource owners. - TreeStructureGraph TreeStructure = "owners" -) - -var GraphStructures = []string{string(TreeStructureGraph), string(TreeStructurePackage)} - -// TreeWriter prints the package structured as a tree. -// TODO(pwittrock): test this package better. it is lower-risk since it is only -// used for printing rather than updating or editing. -type TreeWriter struct { - Writer io.Writer - Root string - Fields []TreeWriterField - Structure TreeStructure - OpenAPIFileName string -} - -// TreeWriterField configures a Resource field to be included in the tree -type TreeWriterField struct { - yaml.PathMatcher - Name string - SubName string -} - -func (p TreeWriter) packageStructure(nodes []*yaml.RNode) error { - for i := range nodes { - if err := kioutil.CopyLegacyAnnotations(nodes[i]); err != nil { - return err - } - } - indexByPackage := p.index(nodes) - - // create the new tree - tree := treeprint.New() - tree.SetValue(p.Root) - - // add each package to the tree - treeIndex := map[string]treeprint.Tree{} - keys := p.sort(indexByPackage) - for _, pkg := range keys { - // create a branch for this package -- search for the parent package and create - // the branch under it -- requires that the keys are sorted - branch := tree - for parent, subTree := range treeIndex { - if strings.HasPrefix(pkg, parent) { - // found a package whose path is a prefix to our own, use this - // package if a closer one isn't found - branch = subTree - // don't break, continue searching for more closely related ancestors - } - } - - // create a new branch for the package - createOk := pkg != "." // special edge case logic for tree on current working dir - if createOk { - branch = branch.AddBranch(branchName(p.Root, pkg, p.OpenAPIFileName)) - } - - // cache the branch for this package - treeIndex[pkg] = branch - - // print each resource in the package - for i := range indexByPackage[pkg] { - var err error - if _, err = p.doResource(indexByPackage[pkg][i], "", branch); err != nil { - return err - } - } - } - - _, err := io.WriteString(p.Writer, tree.String()) - return err -} - -// branchName takes the root directory and relative path to the directory -// and returns the branch name -func branchName(root, dirRelPath, openAPIFileName string) string { - name := filepath.Base(dirRelPath) - _, err := os.Stat(filepath.Join(root, dirRelPath, openAPIFileName)) - if !os.IsNotExist(err) { - // add Pkg: prefix indicating that it is a separate package as it has - // openAPIFile - return fmt.Sprintf("Pkg: %s", name) - } - return name -} - -// Write writes the ascii tree to p.Writer -func (p TreeWriter) Write(nodes []*yaml.RNode) error { - switch p.Structure { - case TreeStructurePackage: - return p.packageStructure(nodes) - case TreeStructureGraph: - return p.graphStructure(nodes) - } - - // If any resource has an owner reference, default to the graph structure. Otherwise, use package structure. - for _, node := range nodes { - if owners, _ := node.Pipe(yaml.Lookup("metadata", "ownerReferences")); owners != nil { - return p.graphStructure(nodes) - } - } - return p.packageStructure(nodes) -} - -// node wraps a tree node, and any children nodes -type node struct { - p TreeWriter - *yaml.RNode - children []*node -} - -func (a node) Len() int { return len(a.children) } -func (a node) Swap(i, j int) { a.children[i], a.children[j] = a.children[j], a.children[i] } -func (a node) Less(i, j int) bool { - return compareNodes(a.children[i].RNode, a.children[j].RNode) -} - -// Tree adds this node to the root -func (a node) Tree(root treeprint.Tree) error { - sort.Sort(a) - branch := root - var err error - - // generate a node for the Resource - if a.RNode != nil { - branch, err = a.p.doResource(a.RNode, "Resource", root) - if err != nil { - return err - } - } - - // attach children to the branch - for _, n := range a.children { - if err := n.Tree(branch); err != nil { - return err - } - } - return nil -} - -// graphStructure writes the tree using owners for structure -func (p TreeWriter) graphStructure(nodes []*yaml.RNode) error { - resourceToOwner := map[string]*node{} - root := &node{} - // index each of the nodes by their owner - for _, n := range nodes { - ownerVal, err := ownerToString(n) - if err != nil { - return err - } - var owner *node - if ownerVal == "" { - // no owner -- attach to the root - owner = root - } else { - // owner found -- attach to the owner - var found bool - owner, found = resourceToOwner[ownerVal] - if !found { - // initialize the owner if not found - resourceToOwner[ownerVal] = &node{p: p} - owner = resourceToOwner[ownerVal] - } - } - - nodeVal, err := nodeToString(n) - if err != nil { - return err - } - val, found := resourceToOwner[nodeVal] - if !found { - // initialize the node if not found -- may have already been initialized if it - // is the owner of another node - resourceToOwner[nodeVal] = &node{p: p} - val = resourceToOwner[nodeVal] - } - val.RNode = n - owner.children = append(owner.children, val) - } - - for k, v := range resourceToOwner { - if v.RNode == nil { - return fmt.Errorf( - "owner '%s' not found in input, but found as an owner of input objects", k) - } - } - - // print the tree - tree := treeprint.New() - if err := root.Tree(tree); err != nil { - return err - } - - _, err := io.WriteString(p.Writer, tree.String()) - return err -} - -// nodeToString generates a string to identify the node -- matches ownerToString format -func nodeToString(node *yaml.RNode) (string, error) { - meta, err := node.GetMeta() - if err != nil { - return "", err - } - - return fmt.Sprintf("%s %s/%s", meta.Kind, meta.Namespace, meta.Name), nil -} - -// ownerToString generate a string to identify the owner -- matches nodeToString format -func ownerToString(node *yaml.RNode) (string, error) { - meta, err := node.GetMeta() - if err != nil { - return "", err - } - namespace := meta.Namespace - - owners, err := node.Pipe(yaml.Lookup("metadata", "ownerReferences")) - if err != nil { - return "", err - } - if owners == nil { - return "", nil - } - - elements, err := owners.Elements() - if err != nil { - return "", err - } - if len(elements) == 0 { - return "", err - } - owner := elements[0] - var kind, name string - - if value := owner.Field("kind"); !value.IsNilOrEmpty() { - kind = value.Value.YNode().Value - } - if value := owner.Field("name"); !value.IsNilOrEmpty() { - name = value.Value.YNode().Value - } - - return fmt.Sprintf("%s %s/%s", kind, namespace, name), nil -} - -// index indexes the Resources by their package -func (p TreeWriter) index(nodes []*yaml.RNode) map[string][]*yaml.RNode { - // index the ResourceNodes by package - indexByPackage := map[string][]*yaml.RNode{} - for i := range nodes { - meta, err := nodes[i].GetMeta() - if err != nil || meta.Kind == "" { - // not a resource - continue - } - pkg := filepath.Dir(meta.Annotations[kioutil.PathAnnotation]) - indexByPackage[pkg] = append(indexByPackage[pkg], nodes[i]) - } - return indexByPackage -} - -func compareNodes(i, j *yaml.RNode) bool { - metai, _ := i.GetMeta() - metaj, _ := j.GetMeta() - pi := metai.Annotations[kioutil.PathAnnotation] - pj := metaj.Annotations[kioutil.PathAnnotation] - - // compare file names - if filepath.Base(pi) != filepath.Base(pj) { - return filepath.Base(pi) < filepath.Base(pj) - } - - // compare namespace - if metai.Namespace != metaj.Namespace { - return metai.Namespace < metaj.Namespace - } - - // compare name - if metai.Name != metaj.Name { - return metai.Name < metaj.Name - } - - // compare kind - if metai.Kind != metaj.Kind { - return metai.Kind < metaj.Kind - } - - // compare apiVersion - if metai.APIVersion != metaj.APIVersion { - return metai.APIVersion < metaj.APIVersion - } - return true -} - -// sort sorts the Resources in the index in display order and returns the ordered -// keys for the index -// -// Packages are sorted by package name -// Resources within a package are sorted by: [filename, namespace, name, kind, apiVersion] -func (p TreeWriter) sort(indexByPackage map[string][]*yaml.RNode) []string { - var keys []string - for k := range indexByPackage { - pkgNodes := indexByPackage[k] - sort.Slice(pkgNodes, func(i, j int) bool { return compareNodes(pkgNodes[i], pkgNodes[j]) }) - keys = append(keys, k) - } - - // return the package names sorted lexicographically - sort.Strings(keys) - return keys -} - -func (p TreeWriter) doResource(leaf *yaml.RNode, metaString string, branch treeprint.Tree) (treeprint.Tree, error) { - meta, _ := leaf.GetMeta() - if metaString == "" { - path := meta.Annotations[kioutil.PathAnnotation] - path = filepath.Base(path) - metaString = path - } - - value := fmt.Sprintf("%s %s", meta.Kind, meta.Name) - if len(meta.Namespace) > 0 { - value = fmt.Sprintf("%s %s/%s", meta.Kind, meta.Namespace, meta.Name) - } - - fields, err := p.getFields(leaf) - if err != nil { - return nil, err - } - - n := branch.AddMetaBranch(metaString, value) - for i := range fields { - field := fields[i] - - // do leaf node - if len(field.matchingElementsAndFields) == 0 { - n.AddNode(fmt.Sprintf("%s: %s", field.name, field.value)) - continue - } - - // do nested nodes - b := n.AddBranch(field.name) - for j := range field.matchingElementsAndFields { - elem := field.matchingElementsAndFields[j] - b := b.AddBranch(elem.name) - for k := range elem.matchingElementsAndFields { - field := elem.matchingElementsAndFields[k] - b.AddNode(fmt.Sprintf("%s: %s", field.name, field.value)) - } - } - } - - return n, nil -} - -// getFields looks up p.Fields from leaf and structures them into treeFields. -// TODO(pwittrock): simplify this function -func (p TreeWriter) getFields(leaf *yaml.RNode) (treeFields, error) { - fieldsByName := map[string]*treeField{} - - // index nested and non-nested fields - for i := range p.Fields { - f := p.Fields[i] - seq, err := leaf.Pipe(&f) - if err != nil { - return nil, err - } - if seq == nil { - continue - } - - if fieldsByName[f.Name] == nil { - fieldsByName[f.Name] = &treeField{name: f.Name} - } - - // non-nested field -- add directly to the treeFields list - if f.SubName == "" { - // non-nested field -- only 1 element - val, err := yaml.String(seq.Content()[0], yaml.Trim, yaml.Flow) - if err != nil { - return nil, err - } - fieldsByName[f.Name].value = val - continue - } - - // nested-field -- create a parent elem, and index by the 'match' value - if fieldsByName[f.Name].subFieldByMatch == nil { - fieldsByName[f.Name].subFieldByMatch = map[string]treeFields{} - } - index := fieldsByName[f.Name].subFieldByMatch - for j := range seq.Content() { - elem := seq.Content()[j] - matches := f.Matches[elem] - str, err := yaml.String(elem, yaml.Trim, yaml.Flow) - if err != nil { - return nil, err - } - - // map the field by the name of the element - // index the subfields by the matching element so we can put all the fields for the - // same element under the same branch - matchKey := strings.Join(matches, "/") - index[matchKey] = append(index[matchKey], &treeField{name: f.SubName, value: str}) - } - } - - // iterate over collection of all queried fields in the Resource - for _, field := range fieldsByName { - // iterate over collection of elements under the field -- indexed by element name - for match, subFields := range field.subFieldByMatch { - // create a new element for this collection of fields - // note: we will convert name to an index later, but keep the match for sorting - elem := &treeField{name: match} - field.matchingElementsAndFields = append(field.matchingElementsAndFields, elem) - - // iterate over collection of queried fields for the element - for i := range subFields { - // add to the list of fields for this element - elem.matchingElementsAndFields = append(elem.matchingElementsAndFields, subFields[i]) - } - } - // clear this cached data - field.subFieldByMatch = nil - } - - // put the fields in a list so they are ordered - fieldList := treeFields{} - for _, v := range fieldsByName { - fieldList = append(fieldList, v) - } - - // sort the fields - sort.Sort(fieldList) - for i := range fieldList { - field := fieldList[i] - // sort the elements under this field - sort.Sort(field.matchingElementsAndFields) - - for i := range field.matchingElementsAndFields { - element := field.matchingElementsAndFields[i] - // sort the elements under a list field by their name - sort.Sort(element.matchingElementsAndFields) - // set the name of the element to its index - element.name = fmt.Sprintf("%d", i) - } - } - - return fieldList, nil -} - -// treeField wraps a field node -type treeField struct { - // name is the name of the node - name string - - // value is the value of the node -- may be empty - value string - - // matchingElementsAndFields is a slice of fields that go under this as a branch - matchingElementsAndFields treeFields - - // subFieldByMatch caches matchingElementsAndFields indexed by the name of the matching elem - subFieldByMatch map[string]treeFields -} - -// treeFields wraps a slice of treeField so they can be sorted -type treeFields []*treeField - -func (nodes treeFields) Len() int { return len(nodes) } - -func (nodes treeFields) Less(i, j int) bool { - iIndex, iFound := yaml.FieldOrder[nodes[i].name] - jIndex, jFound := yaml.FieldOrder[nodes[j].name] - if iFound && jFound { - return iIndex < jIndex - } - if iFound { - return true - } - if jFound { - return false - } - - if nodes[i].name != nodes[j].name { - return nodes[i].name < nodes[j].name - } - if nodes[i].value != nodes[j].value { - return nodes[i].value < nodes[j].value - } - return false -} - -func (nodes treeFields) Swap(i, j int) { nodes[i], nodes[j] = nodes[j], nodes[i] } diff --git a/src/vendor/sigs.k8s.io/kustomize/kyaml/openapi/Makefile b/src/vendor/sigs.k8s.io/kustomize/kyaml/openapi/Makefile deleted file mode 100644 index 8de66aa29..000000000 --- a/src/vendor/sigs.k8s.io/kustomize/kyaml/openapi/Makefile +++ /dev/null @@ -1,62 +0,0 @@ -# Copyright 2020 The Kubernetes Authors. -# SPDX-License-Identifier: Apache-2.0 - -MYGOBIN = $(shell go env GOBIN) -ifeq ($(MYGOBIN),) -MYGOBIN = $(shell go env GOPATH)/bin -endif -API_VERSION := "v1.21.2" -KIND_VERSION := "v0.11.1" - -.PHONY: all -all: \ - kustomizationapi/swagger.go \ - kubernetesapi/swagger.go \ - kubernetesapi/openapiinfo.go - -.PHONY: clean -clean: - rm kustomizationapi/swagger.go - rm kubernetesapi/openapiinfo.go - -# This will remove all currently built-in schema, -# so think twice before deleting. -# To replace what this will delete typically requires the ability -# to contact a live kubernetes API server. -.PHONY: nuke -nuke: clean - rm -r kubernetesapi/* - -$(MYGOBIN)/go-bindata: - go install github.com/go-bindata/go-bindata/v3/go-bindata - -$(MYGOBIN)/kind: - ( \ - set -e; \ - d=$(shell mktemp -d); cd $$d; \ - wget -O ./kind https://github.com/kubernetes-sigs/kind/releases/download/$(KIND_VERSION)/kind-$(shell uname)-amd64; \ - chmod +x ./kind; \ - mv ./kind $(MYGOBIN); \ - rm -rf $$d; \ - ) - -kustomizationapi/swagger.go: $(MYGOBIN)/go-bindata kustomizationapi/swagger.json - $(MYGOBIN)/go-bindata \ - --pkg kustomizationapi \ - -o kustomizationapi/swagger.go \ - kustomizationapi/swagger.json - -.PHONY: kubernetesapi/openapiinfo.go -kubernetesapi/openapiinfo.go: - ./scripts/makeOpenApiInfoDotGo.sh - -.PHONY: kubernetesapi/swagger.json -kubernetesapi/swagger.json: $(MYGOBIN)/kind $(MYGOBIN)/kustomize - ./scripts/fetchSchemaFromCluster.sh $(API_VERSION) - -.PHONY: kubernetesapi/swagger.go -kubernetesapi/swagger.go: $(MYGOBIN)/go-bindata kubernetesapi/swagger.json - ./scripts/generateSwaggerDotGo.sh $(API_VERSION) - -$(MYGOBIN)/kustomize: - $(shell cd ../.. && MYGOBIN=$(MYGOBIN) make $(MYGOBIN)/kustomize) diff --git a/src/vendor/sigs.k8s.io/kustomize/kyaml/openapi/README.md b/src/vendor/sigs.k8s.io/kustomize/kyaml/openapi/README.md deleted file mode 100644 index fbe8103c3..000000000 --- a/src/vendor/sigs.k8s.io/kustomize/kyaml/openapi/README.md +++ /dev/null @@ -1,94 +0,0 @@ -# Sampling New OpenAPI Data - -[OpenAPI schema]: ./kubernetesapi/ -[Kustomization schema]: ./kustomizationapi/ -[kind]: https://hub.docker.com/r/kindest/node/tags - -This document describes how to fetch OpenAPI data from a -live kubernetes API server. -The scripts used will create a clean [kind] instance for this purpose. - -## Replacing the default openapi schema version - -### Delete all currently built-in schema - -This will remove both the Kustomization and Kubernetes schemas: - -``` -make nuke -``` - -### Choose the new version to use - -The compiled-in schema version should maximize API availability with respect to all actively supported Kubernetes versions. For example, while 1.20, 1.21 and 1.22 are the actively supported versions, 1.21 is the best choice. This is because 1.21 introduces at least one new API and does not remove any, while 1.22 removes a large set of long-deprecated APIs that are still supported in 1.20/1.21. - -### Update the built-in schema to a new version - -In the Makefile in this directory, update the `API_VERSION` to your desired version. - -You may need to update the version of Kind these scripts use by changing `KIND_VERSION` in the Makefile in this directory. You can find compatibility information in the [kind release notes](https://github.com/kubernetes-sigs/kind/releases). - -In this directory, fetch the openapi schema and generate the -corresponding swagger.go for the kubernetes api: - -``` -make all -``` - -The above command will update the [OpenAPI schema] and the [Kustomization schema]. It will -create a directory kubernetesapi/v1212 and store the resulting -swagger.json and swagger.go files there. - -#### Precomputations - -To avoid expensive schema lookups, some functions have precomputed results based on the schema. Unit tests -ensure these are kept in sync with the schema; if these tests fail you will need to follow the suggested diff -to update the precomputed results. - -### Run all tests - -At the top of the repository, run the tests. - -``` -make prow-presubmit-check >& /tmp/k.txt; echo $? -``` - -The exit code should be zero; if not, examine `/tmp/k.txt`. - -## Generating additional schemas - -Instead of replacing the default version, you can specify a desired version as part of the make invocation: - -``` -rm kubernetesapi/swagger.go -make kubernetesapi/swagger.go API_VERSION=v1.21.2 -``` - -While the above commands generate the swagger.go files, they -do not make them available for use nor do they update the -info field reported by `kustomize openapi info`. To make the -newly fetched schema and swagger.go available: - -``` -rm kubernetesapi/openapiinfo.go -make kubernetesapi/openapiinfo.go -``` - -## Partial regeneration - -You can also regenerate the kubernetes api schemas specifically with: - -``` -rm kubernetesapi/swagger.go -make kubernetesapi/swagger.go -``` - -To fetch the schema without generating the swagger.go, you can -run: - -``` -rm kubernetesapi/swagger.json -make kubernetesapi/swagger.json -``` - -Note that generating the swagger.go will re-fetch the schema. diff --git a/src/vendor/sigs.k8s.io/kustomize/kyaml/openapi/kubernetesapi/openapiinfo.go b/src/vendor/sigs.k8s.io/kustomize/kyaml/openapi/kubernetesapi/openapiinfo.go deleted file mode 100644 index c4e1c287a..000000000 --- a/src/vendor/sigs.k8s.io/kustomize/kyaml/openapi/kubernetesapi/openapiinfo.go +++ /dev/null @@ -1,18 +0,0 @@ -// Copyright 2020 The Kubernetes Authors. -// SPDX-License-Identifier: Apache-2.0 - -// Code generated by ./scripts/makeOpenApiInfoDotGo.sh; DO NOT EDIT. - -package kubernetesapi - -import ( - "sigs.k8s.io/kustomize/kyaml/openapi/kubernetesapi/v1212" -) - -const Info = "{title:Kubernetes,version:v1.21.2}" - -var OpenAPIMustAsset = map[string]func(string) []byte{ - "v1212": v1212.MustAsset, -} - -const DefaultOpenAPI = "v1212" diff --git a/src/vendor/sigs.k8s.io/kustomize/kyaml/openapi/kubernetesapi/v1212/swagger.go b/src/vendor/sigs.k8s.io/kustomize/kyaml/openapi/kubernetesapi/v1212/swagger.go deleted file mode 100644 index 681dc97fa..000000000 --- a/src/vendor/sigs.k8s.io/kustomize/kyaml/openapi/kubernetesapi/v1212/swagger.go +++ /dev/null @@ -1,249 +0,0 @@ -// Code generated by go-bindata. (@generated) DO NOT EDIT. - - //Package v1212 generated by go-bindata.// sources: -// kubernetesapi/v1212/swagger.pb -package v1212 - -import ( - "bytes" - "compress/gzip" - "fmt" - "io" - "io/ioutil" - "os" - "path/filepath" - "strings" - "time" -) - -func bindataRead(data []byte, name string) ([]byte, error) { - gz, err := gzip.NewReader(bytes.NewBuffer(data)) - if err != nil { - return nil, fmt.Errorf("read %q: %v", name, err) - } - - var buf bytes.Buffer - _, err = io.Copy(&buf, gz) - clErr := gz.Close() - - if err != nil { - return nil, fmt.Errorf("read %q: %v", name, err) - } - if clErr != nil { - return nil, err - } - - return buf.Bytes(), nil -} - -type asset struct { - bytes []byte - info os.FileInfo -} - -type bindataFileInfo struct { - name string - size int64 - mode os.FileMode - modTime time.Time -} - -// Name return file name -func (fi bindataFileInfo) Name() string { - return fi.name -} - -// Size return file size -func (fi bindataFileInfo) Size() int64 { - return fi.size -} - -// Mode return file mode -func (fi bindataFileInfo) Mode() os.FileMode { - return fi.mode -} - -// ModTime return file modify time -func (fi bindataFileInfo) ModTime() time.Time { - return fi.modTime -} - -// IsDir return file whether a directory -func (fi bindataFileInfo) IsDir() bool { - return fi.mode&os.ModeDir != 0 -} - -// Sys return file is sys mode -func (fi bindataFileInfo) Sys() interface{} { - return nil -} - -var _kubernetesapiV1212SwaggerPb = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xec\xfd\x7b\x7c\x24\xd7\x59\xe0\x8d\x7f\x5a\xd2\x78\x66\x9e\x4c\x12\xfb\xd8\x8e\xe3\xf6\xad\xdc\x63\x8f\xa4\x1e\xa9\x35\xad\xb9\x6b\xae\x6a\x69\x66\xac\x1e\x79\x46\x1e\xd9\xb2\x63\xc6\x1e\x97\xba\x8e\x5a\x35\xd3\xaa\xea\x54\x55\x6b\xac\x2c\xfe\xed\x8f\x84\x6c\x92\x4f\x96\xdd\x2c\x97\x6c\x16\xf6\x0d\x10\x76\xb3\xc9\x12\x12\xc2\x2d\x2c\x81\x6c\x16\xde\x80\x21\x60\x08\x09\x6f\x58\xb2\x04\x02\x01\xc2\x35\x01\xc2\x06\x78\x61\xf3\x7e\xce\xa5\x2e\xdd\x5d\xd5\xd5\x6a\x75\xb7\x5a\xa3\xe7\x9f\xb1\xdc\x55\x75\xea\xd4\xb9\x3c\xe7\x39\xdf\xe7\x39\xcf\x03\xfd\xe3\x99\x03\xe4\x6e\x80\x8b\x95\x45\x6a\x19\xd4\xa1\x36\xd9\xb9\x9a\xcd\x8c\x67\x33\xe3\xb9\xdf\xfd\xca\xfb\x12\xe4\x5d\x69\x78\x62\x4c\x2d\xeb\xf6\x98\xa6\xdb\x05\x73\x95\x5a\x6b\x99\x1b\xc7\xec\x8c\x6e\x8e\xad\x66\x17\xa9\xa3\x66\xc7\x6e\xaa\x4e\x61\x79\xcc\x50\x57\xa8\x5d\x56\x0b\xd4\x1e\xfb\x17\xde\xdf\x2f\x8d\x51\x43\x2b\x9b\xba\xe1\xd8\x25\xdd\xbb\xf4\x12\xf9\x99\x61\xf2\xae\x1d\x70\x87\x57\xe4\x35\x59\x56\xf2\x27\x12\xbc\x34\xa5\xb0\xac\x1a\x45\x6a\x2b\x8e\xa9\xa8\x86\x62\x2e\x5e\xa7\x05\x47\x31\x97\x94\x1b\xba\xa1\x29\xe7\x64\x99\xf3\xac\xcc\x8c\xa2\xd1\xb2\x45\x0b\xaa\x43\xb5\x09\xa5\x62\x53\xc5\x59\xa6\xca\x20\x2f\x66\x50\x29\xab\x96\xba\x42\x1d\x6a\x29\x37\x75\x67\x59\x51\x95\x92\x6e\x3b\x8a\x59\xa6\x96\xea\xe8\xa6\xa1\xe8\x86\xed\x50\x55\x1b\x51\x96\xf4\x92\x43\x2d\xaa\xf1\x37\x2a\xb6\x6e\x14\x4b\x54\xd1\x1d\xba\x22\x1e\xe4\x65\x2e\xe9\xb4\xa4\xcd\xd3\x12\x2d\x38\xa6\x15\x28\x3b\x93\x1e\xe1\xaf\x9b\x76\x3f\x67\x41\x7c\xcd\x25\xb7\x19\xb4\xaa\x1a\x8f\xdf\xae\x96\xcb\x25\xbd\xc0\x6b\x30\x76\xdd\x36\x8d\xea\x5f\xd6\xd4\x95\xd2\xf8\xde\xe0\x2f\xab\x86\x96\xb9\xe1\xf5\x50\xa6\x6c\x99\x8e\xb9\x58\x59\x1a\x7f\xa0\xb6\xa0\x13\xb6\x63\x51\x75\xe5\x14\xaf\xce\xf8\x81\x26\xca\xa8\x7a\x62\xa2\x3f\x3d\x96\xce\xdf\x80\x39\xe8\x1f\x3f\x70\x80\xcc\xc0\x05\xe8\xbb\x7c\x91\x4c\xc2\x19\x38\xb5\x77\x4c\xa3\x4b\xba\xa1\xb3\xc2\xec\x31\xdd\xe4\xa3\x40\x2d\xeb\x2b\x6a\x61\x59\x37\xd8\xb0\x28\xdf\x28\xb2\x1f\xec\xcc\x0a\x75\xd4\xcc\x6a\x36\xf3\x34\x2b\xf3\xdc\x2a\x35\x1c\xb8\x07\xfa\x0f\x1d\xc8\x92\xdb\xe1\x35\xb0\xe7\x29\x43\xad\x38\xcb\xa6\xa5\xbf\x89\x6a\x57\x76\x2c\x3b\x4e\xd9\xbe\xfe\x10\xdc\xf9\xe2\xa8\x5f\xbd\x51\xb5\xc0\xde\x43\x76\x91\xdb\x78\xc5\xe0\xfa\x22\x3c\x54\x75\x43\xd1\x32\x2b\xe5\xd1\x55\x6a\xd9\xba\x69\x8c\xb2\x61\x41\xce\x90\x53\xfc\xd7\x09\xa5\x76\xa8\x02\xbb\x3e\x51\x3d\x6e\x40\x3e\x3b\xa1\xc8\xb1\x07\xf9\x37\x0f\xc0\x37\xfb\xc9\x3f\xf5\x27\xff\xa1\x9f\xec\x78\x63\x85\x5a\x6b\xc9\x57\xfa\xd5\x52\xc9\xbc\xc9\xbf\x24\x67\x9a\x37\x56\x54\xeb\x86\xad\x58\xf4\x8d\x15\x6a\x3b\xb6\x22\x06\x2b\x65\x9f\x68\xcb\x81\xb2\x56\xa6\x4a\x2a\x77\xf9\xf2\xc5\xc7\x27\xaf\x5c\x4c\x65\x94\x79\x6a\xb1\x37\x29\xce\xb2\xea\x28\x9a\xa9\x18\xa6\xa3\xe8\x2b\xe5\x12\x5d\xa1\x86\xa3\x2c\x7a\x65\xae\xa8\x6b\x8a\x5e\x34\x4c\x8b\x0d\x60\xdd\x56\x96\x4a\x6a\x51\x51\x0d\x2d\x70\x8b\x6a\x51\xc5\x66\x4f\xa9\x0e\x1f\x90\x36\x2f\x7a\xd0\xe6\xdf\x6b\x51\xd6\x62\x19\x65\xaa\xa4\xf3\xda\xd8\xcb\x66\xa5\xa4\xf1\xd7\xa9\xb6\x5d\x59\xa1\x35\x05\x59\xd4\xa9\x58\x06\xd5\x58\x61\xaa\xb1\xa6\xd8\x65\x5a\xd0\x97\xf4\x82\xa2\x1b\x0e\xb5\x56\xd5\xd2\x88\x62\x98\x16\xaf\x96\xb3\x4c\xd7\xdc\x42\xfc\xf7\x2a\x37\xf5\x52\x89\xd5\x47\xe3\xcf\xbb\x9f\x2c\x5a\x43\xd1\x2a\x96\x6e\x14\xd9\x54\xa2\xb6\xcd\x2b\x36\xb3\x24\x3e\x4c\xb7\x45\xa5\x44\xe3\x8d\xc8\xaf\x65\x53\x8b\x5d\x12\x4d\xa0\xc9\xdb\xa9\xb2\x44\x55\xa7\x62\x51\xa5\xa8\x3a\x54\xa9\xe9\x06\x59\x12\x35\xd4\xc5\x12\xd5\x14\xdd\x50\xd8\xf0\xe3\x95\x8b\x2a\x36\x75\x67\x48\x7f\x8e\xef\x5c\x34\xcd\x12\x55\x8d\x1f\x48\x24\xf2\x5f\xdd\x0d\x7f\xb1\x9b\xfc\xe9\xee\xe4\x1f\xef\x76\x07\xc1\x2f\xed\x7e\x72\x99\x2a\x05\xd3\x70\x74\xa3\x42\x15\xb3\xcc\x45\x87\x6c\xe0\x45\xd6\x1e\x8e\x72\x73\x99\x1a\xac\x4d\x2d\x9d\xae\xb2\x0f\x5f\x31\x79\x1b\xdb\x95\x92\x63\x2b\x4b\x96\xb9\x12\x68\xb9\x8c\x32\xaf\x1b\x05\xd9\xcf\xab\x6a\xa9\x42\x59\x15\x65\xab\xf2\x59\x46\xb5\x11\xa5\x20\x3b\x92\xf5\x80\x69\x94\xd6\x3c\xd9\xe6\x55\x44\x3c\xca\x0b\x57\x95\xb2\x45\x57\x75\xb3\x62\x2b\xbc\xce\xf2\xd5\x62\x4c\xea\x1a\x35\x1c\xbd\xa0\x96\xe4\x35\x4f\x70\xd9\xca\x10\x7d\xb1\x40\xcb\x8e\xb2\x64\x5a\xbc\x6c\x51\xa4\xb9\xe4\xbd\x64\x98\x8f\xc1\x40\xaf\xb3\xea\x58\x94\xcb\x63\xb5\xb6\x2a\x3a\x1b\xe1\x54\xf4\x8a\x45\x0b\x66\xd1\xd0\xdf\x44\xbd\xae\x94\x23\x8c\x6a\x75\x8f\xb1\x27\x94\x92\x69\x14\xa9\xc5\x7e\xd2\x35\xd6\x9c\xce\x32\x6b\x8d\x0a\x65\x12\x99\xbe\x58\xd6\xa5\xc4\x1e\x2a\x52\x83\x5a\x6a\xa9\xb4\xa6\x2c\xe9\xab\xfc\xea\x92\xbe\xe4\x50\x6a\x28\x2b\xba\x51\x71\xa8\x3d\xac\x98\x96\xa8\xdb\x92\x5e\xac\xc8\xc7\xc4\x82\xa2\x98\x46\xe0\x5b\x46\xea\x46\xb3\x45\xed\xb2\x69\x68\xee\x62\x71\x28\x7b\x40\xb9\x42\x6d\xb3\x62\x15\xe8\x39\x56\x05\xaa\x29\xd4\xb2\x58\x5b\x99\x45\x51\x41\x79\xa7\xf7\x45\x8e\x79\x83\x1a\xde\x27\x8b\x3e\x54\x0c\x4a\x35\x5b\xdc\x65\xeb\xb6\xc3\x7e\x62\xeb\xd0\x08\x6b\xb0\x95\x8a\xcd\x1a\xcb\x76\x54\x8b\xcf\x6a\xdd\x12\x6b\x14\x2b\xd8\xac\x38\xd5\x3d\xce\xc7\x73\x46\xb9\xcc\x5e\x7d\x53\xb7\xe9\x48\xf0\x2d\xac\x67\xe4\x74\x34\x79\xdd\x78\x39\x52\x56\xf9\xcb\x18\xaf\x21\xeb\x1e\xaa\xaf\x52\xcd\xff\x9d\x7d\x2c\xff\xb8\xd8\x66\xe1\xe5\xf2\x0a\xb3\x91\xee\x0d\x6e\x83\xbe\xe8\x28\x37\xe8\xda\x88\xb2\x58\x71\xfc\x9f\x4b\xaa\xc3\xde\x6f\x1b\x6a\xd9\x5e\x36\x9d\x11\xe5\xe6\xb2\x5e\x58\xe6\x93\xd2\x08\x34\x88\x77\xbf\x37\x90\x65\xf5\xc5\x14\x1a\x95\x4a\x80\x14\xa4\x4c\x82\x15\x2c\xca\x16\xfd\x11\x65\xc5\xd4\xf8\xb8\x1a\x61\x1d\xaf\xd1\x12\x75\x98\x58\x5b\x62\xab\x3e\x17\x21\xba\x65\x3b\xb5\x8d\x51\x2a\xb1\x99\xab\x1b\x85\x52\x45\x13\xb2\x83\xdd\x2a\x3e\x94\xb5\xab\x6a\xf3\x01\xc9\xfe\x2b\x3a\xe5\x06\x5d\x13\x82\xd3\x2f\x38\xe5\x7e\x71\x2a\x03\xf0\x64\x95\xbc\x61\x33\xc0\xae\x94\xcb\xa6\xc5\xea\xc2\x85\x83\x58\x2b\x74\x5b\x71\xac\x0a\xf5\xe5\x34\xef\x35\xde\xf9\x52\x22\x06\x5b\x4e\x34\x00\x1f\x7f\x0b\x62\xb9\x92\x53\xc6\x93\xde\x8b\x6b\xc1\xde\x62\x73\x95\xbd\x7a\x45\xb7\x6d\x2e\x95\x45\xd3\x08\x15\xc0\xce\xa4\x76\xb9\x23\x69\xfc\x36\xdb\x61\x02\x9a\x49\xbc\xb7\x25\xe0\xdb\x13\xe4\xdb\x12\xc9\x7f\xe9\x0a\xbc\xab\x93\x8a\x2d\xd5\x1c\x36\xbf\xd8\xe8\xb4\xf4\x82\x18\x8b\x42\x81\x5a\xf2\x6b\xe0\xf6\x8b\xa8\x89\x6e\x89\x46\xb0\x33\xca\x34\x5d\x52\x79\xdf\xb1\x09\xcc\x56\x63\x67\x59\x37\x8a\x99\xd4\xab\xab\xf4\xa8\x4e\xd6\xa4\xa4\x2e\xd2\x52\xa3\x9a\xf0\x1b\xc2\x6a\xf2\x8f\x00\xdf\x00\xf2\x75\x48\xfe\x35\xb8\x55\xf9\x1c\x94\xf4\x15\xdd\x61\x3d\xa8\x2a\x2b\xea\x8b\xfa\x4a\x65\x45\x31\x2a\x2b\x8b\xd4\x12\xb5\x10\x43\xc7\x16\xf5\x64\x55\xe2\x52\x55\x4e\x97\x82\x5a\x2a\x71\xb1\xc0\x17\x06\xa6\x58\xda\x0a\x7d\x91\x0b\x81\xfa\x25\x55\x7c\xdf\x0b\x6e\x67\xbd\x20\xc7\x95\x14\x5e\xbc\x3c\xa6\x63\x69\xaa\xa3\x0a\x7d\x55\x0c\x0a\x3e\x33\x0a\xaa\xc1\x46\x76\xc5\x0e\xce\x6d\x5b\x5d\x61\x83\x5d\x77\x74\x6f\x11\x10\xb5\x64\xcb\x15\xf5\x67\x2f\x7b\xb5\xf8\x16\xd6\x5c\x4c\x79\x71\x1c\xb1\x8c\x8b\x6f\x17\xc2\x5f\x7c\x1b\xbd\xc9\xa7\x81\xea\xce\x1c\x3e\xb3\xd8\xc4\x5b\x31\x2b\x06\x2f\x46\x7c\xe5\x50\xa5\xcc\x5e\xf6\x26\x6a\x99\xe2\x97\x61\x77\xb6\x09\x4d\x41\xe5\x02\xc6\x7d\xda\xed\x43\x36\xd3\x3c\x9d\x9c\xc9\x41\x36\xb4\x0b\xd5\xba\x4d\xd5\xaa\x58\xb6\xa8\x4d\xd9\xaa\x6a\x2e\x85\xc8\x4c\x56\x03\x8d\x2d\x79\x2b\xba\x41\xbd\xd5\xa5\x6a\x91\xe6\x73\x7b\x55\xd5\x4b\x4c\x9b\xf0\xf5\x36\xf6\xc9\x85\x65\xd3\xb4\x29\x9f\x59\x8e\xe9\xce\x6b\xd9\x17\xac\x59\x54\xab\x58\xe1\xea\x9c\xca\x85\x24\xff\x20\xde\x48\xec\xdb\x64\x7d\xbc\xa2\xfd\xd6\x9d\x59\x52\xbc\x21\xe5\x2f\x8e\xee\x7a\x5b\xf3\x01\xba\xad\xd0\x95\xb2\xb3\x56\xad\x18\x78\x5a\x99\xea\xb0\x35\xb4\xd1\x07\x35\x12\x50\xfa\x52\x8d\x78\x62\xf2\xcc\x1b\x95\xc5\x8a\x6a\xa9\x86\x43\xa9\x14\xbe\xac\x76\x6e\x3f\x79\x93\x8f\xcb\xb8\x0a\xdb\x38\xf9\x35\xf7\xc4\xac\xa7\x80\x38\xa6\xa2\xdb\x76\x45\xaa\x86\x62\x97\xe5\xcd\x0f\x6f\xcd\x73\x87\xdb\xa8\x78\x9f\x6e\x33\x5d\xd4\x7b\xe5\xfa\xe4\xbe\x2b\xf2\xb9\xe6\x69\x57\xa8\x16\x2a\xfc\xb9\x06\x5c\x59\xb4\xd9\xcd\x86\xe3\x7d\x81\xe6\xa9\xfa\xb2\xfd\x58\x4f\x99\x2b\xd4\xd1\x57\x28\xfb\xf6\x25\x6a\xb9\x7b\xc6\x9a\xf5\xdd\x5f\xee\x58\x7f\x52\xc3\xae\x58\x6e\xf3\xa9\xee\x82\x2d\x3f\x4e\xb6\x9a\xf8\x64\x3e\x2d\xf9\xd2\xac\xd8\x2b\x6a\xa9\x44\x2d\xa5\xb0\x5c\x31\x6e\xd8\x6c\x1c\xa9\x0a\x13\x5e\x4a\x49\xb5\x8a\x6e\x37\xf3\xf9\x2e\x8a\x17\x7a\xba\x4d\x29\x1f\x76\x65\xd3\xb6\x75\x36\xdc\x64\xb3\xf1\xe1\x16\x9c\x5d\x95\xb2\xc6\x9a\xd1\xd7\xd4\xf9\x7b\xa8\x26\xfa\x83\x2b\x83\x72\xc9\x91\x23\x58\xee\xc2\x79\xad\x6f\xaa\xb6\x9c\x71\xde\x6e\x84\xb5\x49\xfd\x7a\x2b\xb4\x50\xd5\x66\x1d\x5c\xa8\x94\xf8\x1b\x75\x7f\xd8\x64\x52\x3b\xf8\x77\x8f\xef\x64\xdb\x8e\x22\xb5\x98\xf0\x9d\x80\x63\xe4\x48\xea\xd0\xae\x04\x19\x28\xab\xce\x72\xf2\x5e\x83\x89\x2f\x59\x8b\xaa\x4d\x5c\x6a\x80\x5d\x4a\x4b\xc1\xfd\x9e\x44\x22\xff\x02\x3c\x4f\xae\xa6\x9e\xf5\x9e\x9d\x90\xb5\xe6\x45\xb0\xae\x60\x7b\x4f\xc5\x2e\x98\x65\x3a\xa2\xd8\x95\xc2\x32\xeb\x39\xae\xfd\x52\x75\xc5\xe6\x77\x94\x2d\x93\xb7\x52\x6a\xb7\xc7\x31\x82\x6f\xb8\x0c\x8f\x93\x8b\xc9\x19\x77\x59\x18\x9d\x59\x52\x06\xd9\x9c\x19\xe4\x92\x5c\x48\x36\xb3\xe2\x94\x2b\xbc\x6f\xcb\x16\x75\x9c\x35\xa5\x6c\xb1\xef\xd3\x32\xa9\xdb\xc4\x0f\xc1\xb5\xe6\xff\x4d\xc0\xdf\x27\xc8\xdf\x25\x92\x7f\x93\x70\x0b\xfd\xd5\x44\xed\xb2\x6f\x53\xc7\x1d\x61\x8e\xa5\xea\x4c\xc0\x1a\xca\x4d\xd6\x15\xee\x9d\x6e\x77\xb1\xdb\xdc\x41\xcf\x44\xc4\xa2\x9c\xc7\x1a\x57\x2c\x98\x60\xa3\x0a\xdf\x74\x4f\x8c\x8d\x05\x80\x80\x6e\x8e\x69\x66\xc1\x1e\xe3\x83\x9a\x09\xd2\x31\x3e\x2c\x47\xd5\xb2\x3e\xa6\x96\xf5\xd1\x82\x69\xb0\x9d\x82\x3d\xb6\xd7\x7d\xe1\xa8\xf7\xc2\x25\x3e\xff\x1c\x55\x2f\xd9\x19\x80\xe0\x6a\x5b\x31\x6c\xea\xa4\x5e\x5b\xf3\x35\xc1\xaf\xff\x9d\x3e\xf8\x7c\x1f\xf9\x5c\x5f\xf2\x33\x7d\xee\xd7\xbf\xbf\xaf\xe6\xfe\xc7\xb9\x6c\xf2\xe4\xb7\xad\x2c\x9b\x37\xeb\x14\x23\xb6\x2e\x97\xcb\x25\x5d\xcc\x47\x4f\xa8\xb0\x81\xcf\x7b\x62\x59\x2f\x2e\x97\xd6\xf8\xbe\x64\x65\x85\x1a\x6c\xe2\x3b\xc1\xf6\xab\x7a\x97\xdc\xd5\xb1\x0f\xf3\x4b\x62\x32\x4e\xc8\xd7\xda\x17\xb3\x7b\x37\xa1\x5d\xef\x0a\xab\x7b\xb0\x71\xdf\x9b\x80\xef\x4b\x90\xf7\x24\x92\xdf\xe3\x0d\xad\xf2\x93\xfa\x0a\x65\x42\xd6\xdd\xf2\xb1\xef\x1b\x93\xd4\x8d\x2b\x29\x5c\xd0\xf1\x59\xc9\x75\x5f\x26\x20\xc4\x1e\xca\x5d\x59\xd5\x52\x69\x44\xb1\x68\x51\xb5\xb4\x12\xb5\x85\x64\x32\xd6\x14\xb5\xe0\xe8\xab\xba\xb3\xc6\xa4\xb1\x6e\xb8\xff\x97\x49\xbd\xc6\x11\x6f\x9c\xa7\x05\xd3\xd0\xec\xaa\xa9\xfe\xd1\x04\x7c\x38\x41\x7e\x38\x91\xfc\x80\x57\xc1\x77\x26\x9e\x16\x6a\xb0\x69\x05\x31\x20\xaf\x09\xb5\x0b\x96\xbe\xc8\xc5\xb2\xf8\x70\x31\x63\xe5\x6a\xeb\x2c\xd3\x15\x21\x88\x05\xd4\xe2\x15\xd3\xb4\x11\x29\xea\x46\xe4\xbd\x2b\xe6\x2a\x5f\xcb\x03\x8a\xb1\x32\xcf\x97\xdf\xb5\xda\xbe\xcd\xa4\x76\x08\x94\x16\x20\x04\xe4\x43\xc3\x30\x25\x90\xe8\x52\xc9\xbc\xc9\x96\x0a\xcb\x2c\x65\x3c\xf4\x10\x8e\x47\xd9\xad\x76\x61\x99\xae\xa8\x1e\x04\xfd\xf2\x10\xf9\xfe\x1d\x70\x7f\xa0\x90\x49\xb7\x0c\x8f\x87\x7e\xb4\x19\x1e\x7a\xbe\x64\xde\x9c\xe7\x65\xf7\x04\x0c\x4d\xf3\xd7\x9d\x0f\xf9\x2c\xc9\x45\xfd\xfa\x22\x0a\x6d\x01\x85\xde\x88\x47\xa1\x8f\x91\xf3\x12\x85\x36\x1a\xa2\x12\x8b\xfa\xdd\x81\x4c\x14\x99\x28\x32\x51\x64\xa2\xc8\x44\x91\x89\x22\x13\x45\x26\x8a\x4c\x14\x99\x28\x32\x51\x64\xa2\xc8\x44\x37\x9d\x89\x1e\x85\xc3\xe4\x60\x2a\xeb\x71\xcd\xd7\x05\x99\xa8\xbf\x83\xab\x07\xa2\x88\x2b\x11\x57\x22\xae\xdc\xc6\xb8\xf2\xb3\x43\x30\x2a\x70\xa5\xed\x98\x96\x5a\xa4\x3e\xa0\x94\x6c\xb2\x60\xeb\x86\xa9\xf9\xde\x99\x3f\x38\x44\x7e\x6f\x00\x40\xde\x7e\x6d\x35\x9b\xfc\x70\x33\x18\x72\x6a\x7e\xe6\x92\xa9\xf5\x86\x43\xe6\xdd\xfc\x75\xf3\xe2\x0b\x16\xb2\xb2\x6a\x88\x1b\x5b\xc0\x8d\x4f\xc5\xe3\xc6\x71\x72\x40\xe2\xc6\xea\x21\x26\x01\xa3\x6c\xfd\x20\x5d\x44\xb0\x88\x60\x11\xc1\x22\x82\x45\x04\x8b\x08\x16\x11\x2c\x22\x58\x44\xb0\x88\x60\x11\xc1\x22\x82\x45\x04\x8b\xdd\x06\x8b\x87\x60\x9c\x1c\x48\x65\x3c\xb0\x78\x67\x10\x2c\xca\x9d\x1b\x52\x45\xa4\x8a\x48\x15\x91\x2a\x06\xa8\xe2\xc7\x5e\x05\x27\x04\x55\x74\x91\x12\x2f\xa5\xd6\xf9\xd1\xa6\xa5\x25\xbb\xc2\x65\x99\x5a\x28\x50\xdb\x66\xfb\x0d\x7a\xd3\x26\xdf\x80\xd4\x9f\xee\x80\xbb\xab\x1e\xf6\xbc\x1e\xe5\xaa\xa3\xa8\xca\x3c\x2d\x2d\xcd\x8b\xe7\x27\xf9\xf3\x57\xf8\xf3\xe9\xac\xb8\x63\x32\xf8\xb8\xf4\x2e\x8c\x78\xa4\x6d\xec\x8f\xa3\xba\xdc\x55\x78\x16\x9e\x21\x03\x8b\xa6\xb6\x96\xe4\xff\x2a\x89\xf4\x63\x70\x3e\x8a\xd5\x65\xaa\x5b\x49\x7e\x68\x26\xa2\xae\xf9\x9f\xec\x83\x37\x08\x12\x78\x05\xe6\x38\x09\xcc\x43\xdb\x4a\x87\xe7\x59\xd1\x59\xf2\x34\x3c\x05\x3b\xa7\xc4\xfa\xde\xd6\xf2\xaf\xb1\xf2\xc7\xc9\x33\xb0\x00\xbb\xd8\x85\x72\xbb\x5f\x10\xc7\x34\x1f\x0c\x67\x9a\x3b\xc9\x8e\xb2\x69\x3b\x70\xbd\x1c\x8f\x34\x1f\x27\x17\x25\xd2\x0c\x1b\xdf\x12\x6c\x46\xd5\xaf\xde\x8d\xf2\xbd\x7d\xf0\x7d\x7d\xe4\x3d\x7d\xc9\xef\xf1\x04\xfc\xdf\x27\x9e\x66\x2b\xa5\x5c\xd8\x47\x14\xdd\xd0\xd8\xc8\x73\x15\x97\xaa\xcd\x64\x10\x30\x2e\x52\xa5\xcc\xca\x67\x1a\x7c\x46\x99\x34\x14\xdd\x10\x18\xc7\xb4\x94\x8a\xe1\x91\x20\x4d\xd1\xac\xb5\x2b\x15\x43\xd1\x74\x8b\x32\x31\x44\x3d\xbc\xc0\xb4\x04\xae\x86\x49\xbe\xe2\xee\xa6\xe4\x8e\x56\x59\xaa\x58\x5c\x63\x2f\x5b\x26\xfb\x28\xa6\xb0\x48\xa9\x27\x97\xd4\x8c\xb2\xc0\xdf\xc8\x77\x42\x5c\xc1\x99\x50\x46\x95\xc9\x52\x69\x82\x6b\x43\x9a\xb5\xa6\x58\x15\x83\xed\xb4\x99\xdc\x72\x55\x40\x59\x1c\xd5\x52\xb7\x89\xaa\x05\x85\xf4\xbb\xfb\xe0\xbb\xfa\xc8\xbb\xfa\x92\xdf\xe1\x35\xd0\x9f\x25\xb8\x02\xfd\xb8\x6a\xa8\x45\x6a\x89\xad\xa0\x38\xc6\x61\xdb\x66\x41\xe7\xda\x8d\xb7\xe9\x52\xf9\xae\xd5\xb4\x14\xa6\xfc\x3a\x6b\x9e\xba\xb7\xa2\xde\x60\xf5\x77\x96\xa9\x4d\x5d\x49\xca\xa4\xbb\x8b\xc3\x38\x11\x5a\xa4\x0a\x97\xe1\x7c\x83\x65\x5a\x4a\x76\xfc\x18\xbb\xd7\x52\x0b\x9c\xdb\x95\x4c\xa3\x28\xe4\x26\xdf\x04\x31\x7d\x55\xd5\x0d\xa1\xce\xf0\x4d\x86\x7f\x2f\x67\x19\x92\x2a\xb2\xad\xb1\xbb\x16\x16\xcd\x92\x6a\x14\x33\xa6\x55\x1c\x2b\xdf\x28\x8e\x55\x0c\xbd\x60\x6a\x74\x6c\xef\x8c\x3d\xc7\x4a\xc9\xa4\xf6\x04\xbf\x35\xd8\x30\xed\xd6\xb4\xc8\xd7\x06\x21\x25\x24\xf7\x22\xb7\xfe\x54\xfb\xa9\x17\x2c\xd3\xb8\x6e\x2e\xda\xe4\x97\x07\xc9\xcf\x0d\xc0\xab\xf9\x3d\x9e\x60\xb6\xe5\xc6\xc5\xd0\xf4\x55\x5d\xab\xa8\xa5\x2a\x8b\x90\x07\x09\xa6\x2c\xd3\xc8\x9b\x8b\x6d\xb1\x04\xb9\xc1\x35\x72\xec\x1f\x29\xe2\x65\xf9\xb3\xba\xed\x9c\x37\xad\xc9\x52\xc9\x8b\xb4\x61\xf7\xbe\x89\x27\x4a\x78\xb5\xdf\xf6\xe3\x8a\xc3\x27\xe2\xc5\x5d\x86\x8c\x48\x71\xc7\x3b\xdc\x35\xdc\x88\x76\xae\x97\x67\xd7\xf7\x86\x4b\xd8\x3d\x04\xf8\xb7\xb2\xae\x44\x13\x0f\x9a\x78\xd0\xc4\x83\x26\x1e\x34\xf1\xa0\x89\x07\x4d\x3c\x68\xe2\x41\x13\x0f\x9a\x78\xd0\xc4\x83\x26\x1e\x34\xf1\x74\xdb\xc4\x83\xc6\x1a\x34\xd6\xa0\xb1\x66\x1b\x1b\x6b\xde\x3f\x04\x07\x9b\x0a\xe2\x5b\x1d\xad\x97\xfc\xd5\x20\xf9\x83\x81\xb0\x30\xbd\x6b\xcd\x71\xc0\xf6\x07\xea\xcd\xa4\x8f\x84\x86\xda\xad\x7a\xd3\xd6\xe4\x82\xdd\x77\xfd\xee\x42\x4c\x5d\x04\x85\x08\x0a\x11\x14\x22\x28\x44\x50\x88\xa0\x10\x41\x21\x82\x42\x04\x85\x08\x0a\x11\x14\x22\x28\x44\x50\x88\xa0\x10\x41\x21\x82\x42\x04\x85\x3d\x05\x0a\xff\x39\x01\xb7\x8f\x99\x65\x6a\xe8\xda\xd8\x6a\x76\xec\xfa\xcd\x1b\xf6\x18\xf9\x4a\x82\xfc\x51\x02\x6e\x13\x3f\x27\xaf\x16\xa9\xc3\x67\x9d\x5e\xa0\x8a\x5a\x28\x70\x9d\x83\x0b\x78\x4b\xb9\x5c\xa6\xc6\xcc\xb4\x92\x9f\xbf\x7c\x49\x79\x9a\x2e\x2a\x17\xe9\x1a\xd3\x64\x94\x21\xe9\x32\x69\x2b\xe5\xca\x62\x49\x2f\xc8\xad\xd0\x2a\xb5\xbc\x8a\x72\x35\x7f\x38\xbd\xb7\x48\x9d\x79\x51\xf8\xa4\x28\x7b\x86\x17\x2d\x4a\xbe\x48\xd7\x6c\xea\x8c\xbf\xbe\x8a\xc0\xdd\xbc\x31\x6a\x53\x67\xff\x75\xdb\x34\xf2\x47\xe1\x7e\x41\xd1\xee\x86\x3b\x39\x45\x7b\x35\xbc\xea\xc7\x12\xbb\x40\x8e\x9b\x38\x22\x46\x7e\x7f\x10\x1e\x94\x5e\xed\xe5\xb2\xed\x47\xc8\xd0\x54\xba\xc2\x74\x4c\xc7\x26\x3f\x37\x48\x3e\x3a\x00\x3b\xd9\xf5\x6b\xab\xd9\x64\xa5\x39\x12\x3a\xcd\x0b\x98\xa7\x4e\x9b\x28\xe8\x10\x7f\x60\xb2\x5c\xb6\x17\xb2\x5e\xd9\xc8\x3d\x9b\xe3\x9e\x8f\xc7\x73\xcf\x34\x19\x72\xdd\xbf\xcb\x65\x5b\xb2\x4e\xaf\xa1\xab\x22\x59\x20\xe2\x44\xc4\x89\x88\x13\x11\x27\x22\x4e\x44\x9c\x88\x38\x11\x71\x22\xe2\x44\xc4\x89\x88\x13\x11\x27\x22\x4e\x44\x9c\x88\x38\x11\x71\x22\xe2\xec\x29\xc4\xf9\xf6\x61\x38\x2c\x10\x5f\xd9\x2c\xe9\x85\xb5\x1a\x0f\xc8\xb2\xa9\xd9\xb4\x50\xb1\x74\x67\x8d\x5f\xd7\xfd\xb0\xb8\xaf\x0c\x91\x37\xef\x80\xd7\x88\xc7\x3c\x57\xc8\x9f\x6e\x26\x34\xee\x9c\xa9\xcd\xcb\x52\xe7\xf8\xe3\x3d\x11\x24\x77\x2f\x7f\x9d\xa8\x90\xf4\xa3\xac\xab\x28\xf2\xc3\x7a\x7e\x78\x35\x9e\x1f\x1e\x27\x47\x25\x3f\x14\xc3\x45\x12\xc4\xba\xe6\x0d\xf1\x98\x8c\x0b\xc8\x8b\x28\x11\x51\x22\xa2\x44\x44\x89\x88\x12\x11\x25\x22\x4a\x44\x94\x88\x28\x11\x51\x22\xa2\x44\x44\x89\x88\x12\xbb\x8c\x12\x4f\xc3\x49\x32\x91\x3a\xe6\x45\xce\x7d\x20\x18\x39\xb7\x6e\xa7\x87\x31\x74\x11\x45\x22\x8a\x44\x14\x19\x40\x91\xff\x6b\x3f\x5c\x08\x45\x91\x86\xe7\xbe\x27\xd8\x23\xff\xfb\xa5\xb1\xb2\xa9\x69\xba\x6d\x55\xf8\xc6\x7e\xb1\xa2\x15\xa9\xe3\xc1\xc9\x77\xed\x27\xff\xa5\xbf\x0e\x4e\xee\xb3\xa8\xaa\xd5\xec\x6a\xe7\x4c\x6d\xda\x2b\x25\xc7\x4b\x49\x67\xd8\x7d\x55\x20\xd0\xf3\x20\xd4\x42\xee\x6f\x6f\x2c\xdd\xbc\x0e\x97\x04\xc3\xbb\x00\xe7\x38\xc3\x3b\x03\xa7\xe0\x44\x64\xac\x58\xf1\x8d\x5e\x90\xd8\x90\xfa\xc5\x12\xbc\xe7\xe3\x09\xde\x09\x72\xbc\x8e\xcd\x45\x20\xbd\xba\xd7\x5f\x7f\x20\x9c\xe1\xdd\x46\x06\xd8\xe5\xe4\xbb\x76\xd7\xf5\xd4\xb0\x45\xcb\x25\xb5\x40\x9b\xe8\xac\xac\xbc\x75\xb3\xfa\x2b\xf7\x04\xb0\xa5\xab\x3a\xf6\xf1\xc6\x3a\x2c\x87\xf1\x71\x1b\xc7\xc7\xcd\x61\x7c\xdc\x88\xf8\xb8\x9f\x48\xb4\x5d\x7a\xcc\x8b\xf8\xd8\xb3\x90\xf7\xe3\x63\x77\x5c\x24\x45\x8b\x8c\x72\xc5\x81\x26\x25\x56\xb3\x02\xaa\x4e\xb2\xa5\x7f\xec\xf6\x3a\x91\x74\xbf\xd8\xab\x28\x6a\xa8\x14\x3a\x20\xae\x6e\x9a\x10\x7a\x12\xae\xc0\x5c\x50\x08\xa5\x73\x70\xb6\x05\xbb\xcf\x34\xff\x8c\xcb\xbc\xa2\x36\xca\xa1\x38\x39\xf4\xe5\x3e\xf8\x52\x1f\xf9\x62\x5f\xf2\x0b\x5e\x03\x7d\xb4\xef\xc9\xa0\x72\xa7\xb3\x3d\x06\x57\xd0\x94\x45\xba\x24\x0c\x45\xde\xae\xcc\xb7\x46\xc8\x8d\x30\xaf\x62\x40\xce\x18\xa6\x31\x6a\xd0\xa2\xca\x1b\x42\xea\x77\x41\x79\x24\xf8\x8c\xd7\x05\x72\x88\xea\x2b\x2b\x54\x63\x12\xaf\xb4\xe6\x5b\x70\x7c\x6a\xaf\x97\x46\xa4\xda\xc7\x15\x5f\xa5\x68\xb1\x95\xb6\x4c\x2d\xdd\xd4\x3c\xf5\xd5\x5f\x74\xb9\x71\xcc\x6d\x8d\x8a\xcd\x2a\x19\x54\x99\x55\xf6\xa4\xfb\x41\xf2\x25\x4b\x82\x54\xb8\x45\x64\x44\x3d\x57\xa8\x6a\x84\xd6\x31\x45\x78\x15\xe6\x78\x0d\x42\xd4\xd9\xdc\x67\xfa\xe0\x95\x3e\xf2\xe9\xbe\xe4\xcb\x5e\x33\xff\x40\xdf\x74\xc0\x64\x5c\x2e\x51\xd5\xa6\x1e\x56\x9a\xb3\xcc\xb2\x5a\xe4\x1d\x20\x66\x64\x95\x65\xc9\xfd\x16\xdf\xe6\xcc\x7a\x29\x9b\x39\x9a\x51\xe6\x45\x87\x88\xd6\x29\xb3\xdd\x0c\xdb\x11\xba\x84\x92\x2a\xa6\x55\x5e\x56\x0d\xd7\xd2\x65\x55\xe8\xd8\x92\x5a\x72\x89\x7e\x4a\x5c\x4d\x29\x4b\xba\xa1\x96\xf4\x37\xb9\x68\x70\x91\x32\x75\x99\xef\x9f\xc6\x84\x9a\xac\xf9\xb8\x58\x14\x3e\x68\xfb\x0f\x09\x86\x9e\x51\xce\xe9\x7c\x38\x07\x2a\x6e\x5a\xf5\x5f\xe6\xef\x45\x1d\x81\xf0\xf9\x64\x33\x9d\xe5\x4c\xea\x76\x51\x9f\x69\xf7\x43\xaa\x4c\x66\xb9\xb7\x0f\xc0\x5b\x07\xc8\x9b\x07\x92\xdf\xf4\xec\xa6\x9f\xef\x7f\x5a\x32\x36\x36\xa5\xd8\x66\xb0\xa8\x5a\x8b\x6a\x91\x2a\x05\xb3\x54\xa2\x5c\x04\xfb\xf3\x82\x5a\x4b\xa6\xb5\xc2\xda\x22\xb4\xa6\x97\x6b\x5e\x1e\x5d\x51\x3e\x9e\xdd\xc1\x28\xa4\x2e\x1b\xa7\x1a\x2d\xe8\x9a\x0f\xcb\x39\x77\xe5\xa6\x0b\xaf\x75\xd9\xf6\x50\x52\x49\x97\xac\x66\x02\xcd\xe8\x32\x39\x6f\x03\xe8\x59\x4b\xab\x5f\x96\x51\x44\x5a\x06\xbe\x38\x07\x85\xc3\xa0\xf8\x86\x41\x65\x54\x76\x7c\xf5\xc0\xb0\x4f\x28\x83\x39\xb5\x70\x83\x2d\x33\x86\xc6\xee\xe2\x66\x4a\x7e\x53\x4d\xc3\x09\x10\x2e\x87\x7e\x75\x21\xee\x17\x2c\x7a\x25\x9d\x50\x06\xcf\x9b\x16\x0d\x14\xab\x14\x54\xbb\xa0\x6a\xec\xeb\x65\xfb\x08\xc3\x34\x2f\xcf\x16\xd2\xab\xae\xc0\x25\xaf\x8c\x4c\xea\x8e\x72\xed\xb8\x09\x2a\x0d\x1f\x4f\xc0\xac\x50\x1a\xce\xc1\x14\x57\x1a\x4e\xc1\x09\x38\xde\xc2\xf2\x31\xef\xa8\x4e\xc5\x86\x2b\x22\xe5\xc5\x45\x98\x09\xa4\xbc\xd8\x60\x99\x71\x1a\x83\x12\xae\x31\xec\x26\x3b\x45\x33\x75\x5c\x69\xc8\xbd\x7f\x4f\x9d\xd2\x30\x5e\x56\x2d\x47\xe7\x76\x4f\xb1\x59\x6e\x62\x43\x33\x56\xae\xf3\x43\xe9\x92\x26\x71\x5f\x6d\x41\xa3\xbc\x2a\xfc\x30\xe1\xc4\xfd\xc1\x8b\x2b\xd4\x2a\xd2\xe0\xd5\x7d\xc1\xab\xb6\x63\xa9\x0e\x2d\xea\x85\xd1\xba\xfb\xaa\x4a\x61\x7f\xaf\xc9\xab\xac\x96\xb9\x4b\x30\x0b\xf9\x9a\xad\xd4\x04\x1c\x6b\x61\xd0\xcc\x71\x5f\x01\xd4\x5f\x62\xf4\x97\x8f\xf4\xc3\x87\xfa\xc9\x07\xfb\x93\xef\xf7\x16\x81\xb7\xf4\x6f\x9f\x7d\x54\x8d\x79\x85\xb5\x33\x77\x17\xe0\x26\x40\x36\x38\x7d\xa7\xa1\xa1\x88\x81\x3b\xcc\xd7\x32\xe1\x5d\xa2\x96\xf8\x93\x4c\x69\x13\x4f\xf3\x3b\xb8\x02\x65\x2b\x43\x79\xdb\x34\xe6\x84\xf7\xcc\xe3\x6c\x5a\xc8\xbf\xe7\xdd\xc9\xe2\xff\x38\x1c\xbd\xc1\xcb\xbd\x92\x80\x4f\x27\xc8\xcb\x89\xe4\xa7\x3c\xb2\xf7\x81\xc4\x79\xd3\x2a\x70\xdd\xae\x68\xf2\x66\x37\x95\xd4\x12\xfb\x29\xa5\x4c\x56\x7d\x05\x67\xb6\x42\x0d\xab\xd8\xbe\x9f\xc0\xa8\x5a\xe0\x1f\xce\xbd\x2f\x4a\x7a\x41\x2e\xb3\xb4\xa4\xd9\x8a\x79\x53\xb6\xa9\xf0\x4e\x28\x53\xb3\x5c\xa2\x19\x45\xbc\x91\xbb\x3b\xb9\x7d\xc9\xd9\x69\x68\x03\x78\xaf\x4f\xed\xe0\xd5\xaa\x72\xdd\xe9\x3e\xee\x8a\x76\x29\x2b\x8b\x1c\xef\x1d\x5e\x28\xf2\x67\xe1\x34\x39\x99\x9a\xf0\x4c\x25\x0f\xd5\x98\x4a\x6a\x8b\xa8\x37\x96\xbc\x00\xcf\x93\xab\xa9\x67\xbd\x12\x26\xa4\xfe\x2d\x26\xaa\xa1\xf1\x34\x4d\x8a\x5d\x30\xcb\x74\x44\xb1\x2b\x85\x65\x36\x41\xb8\x6a\x4f\xd5\x15\xa1\x19\x95\x2d\x93\xab\xb5\xa9\xdd\x1e\x53\xed\xa4\x39\x86\xfc\xd2\x20\x8c\xc6\x44\xdd\xac\x89\xb7\xf9\x3d\x83\xe4\x1d\xa1\xf1\x36\xf7\x0b\x27\x02\x4b\x9a\x3f\x5d\xfd\xdc\x75\x2f\xad\x0a\xc2\x98\xce\xb2\x9b\x1b\x06\xc8\xdc\x6a\x87\xc4\xaf\xc3\x65\x31\x63\x1e\x83\xf3\x7c\xc6\x9c\x85\xd3\x70\x32\x72\xc6\xf8\xcd\xed\x4e\x9a\xba\xf0\xa0\x1b\x48\x21\xc6\xcf\x73\x37\x19\x3b\xb3\xa9\x18\x99\x51\x01\x36\xd1\xd1\x13\x1d\x3d\xd1\xd1\x13\x1d\x3d\xd1\xd1\x13\x1d\x3d\xd1\xd1\x13\x1d\x3d\xd1\xd1\x13\x1d\x3d\xd1\xd1\x13\x1d\x3d\xd1\xd1\x13\xcf\x8c\xa3\xa3\x26\x3a\x6a\xa2\xa3\x66\xb7\x1c\x35\x3f\x32\x08\xc3\x82\xe4\x19\xd4\xb9\x69\x5a\x37\x98\xb6\x56\x83\xf2\x74\xa3\x68\x51\xdb\xa6\x36\xf9\xb6\x41\xf2\x77\xfd\x40\xfc\x5b\x3d\x8c\xf7\x68\x63\x8c\x37\x23\x8a\x48\x8f\xb0\xdb\x2e\x79\x8f\x4b\x82\x27\xaf\x6e\x35\x76\x47\xa3\x13\x5e\xe7\x05\xd4\x9b\x82\x49\x0e\xf5\x4e\xc0\x71\x38\x1a\x09\xf5\x02\x2d\xef\x52\x3d\xd9\x22\xb3\xba\xed\x34\x8b\xed\x9e\x8b\xc7\x76\x13\xe4\x98\x24\x72\x75\x9d\x2d\x99\xb7\x7c\x6f\x08\xe7\x46\x62\x87\xc4\x0e\x89\x1d\x12\x3b\x24\x76\x48\xec\x90\xd8\x21\xb1\x43\x62\x87\xc4\x0e\x89\x1d\x12\x3b\x24\x76\x48\xec\x90\xd8\x21\xb1\x43\x62\xd7\x25\x62\xf7\x5b\xfb\x21\x27\x88\x9d\xb5\xa8\x16\x32\x2e\x74\xe2\x45\xd5\x92\xbb\x42\xa9\x62\x3b\x6c\xdf\x52\xa2\x8b\xba\xa1\xe9\x46\xd1\x3b\x55\xfd\x6f\xf6\x93\x4f\xf6\xc3\xbd\xac\x8c\xc9\x60\x11\x3e\xd1\x0b\x39\x60\x3d\x25\xca\xbb\x62\x96\x68\x4e\x94\x27\xce\x57\x5f\xa9\x2d\x45\x82\xbd\xfa\xfb\xdb\x7c\xbe\x7a\x09\x2e\x0a\xd2\x36\x0d\x39\x4e\xda\x4e\x42\x03\x1f\xf3\x0c\x6f\x31\x97\xb1\xd5\x57\x6e\x03\x27\x19\xf9\xd9\xe8\x72\x3c\x82\x7b\x9c\x5c\x14\xa4\x2d\xe4\xed\x51\xee\x73\x91\xfd\x0c\xc9\x1f\xda\xdd\xa8\x0b\x87\xc2\x4f\x5e\x87\xf4\xa2\x7b\xf0\x7a\xb3\x3a\x72\xbd\xa7\x05\xe2\x7a\x12\x4f\x0b\xe0\xa9\xeb\x16\x4f\x5d\xff\x74\xa2\xbd\x32\x65\x4e\x1c\xb9\x9e\x81\x0b\xfe\x91\xeb\xcd\x92\x52\xfc\xbc\x75\xd7\xa5\x54\xfa\x95\xdb\x1b\x49\xa9\xfb\xbc\xc3\xd8\x21\x82\x49\x9e\xc5\xde\x34\xb9\x84\x67\xb1\xf1\x2c\x36\x9e\xc5\xc6\xb3\xd8\x78\x16\x1b\xcf\x62\xe3\x59\xec\xf0\xb3\xd8\xcd\x29\x14\xb1\x6a\x42\xf3\x1a\x47\xee\xbf\xef\x69\xa4\x50\x64\x63\x0e\x6a\x87\xa8\x19\xe2\x9c\xf6\x66\x68\x19\x78\x4e\x7b\xbb\xe9\x36\x78\x4e\x1b\xcf\x69\x6f\xf2\x39\xed\x2e\x63\xb3\xd8\x43\xda\x5d\x5f\x41\xf2\x67\xe0\x14\x39\x91\x3a\xee\x1d\xba\x7e\x30\x78\x6c\xbb\xbe\x84\xce\x87\xb8\x25\x7f\xd1\x07\x77\x09\xae\xbb\xc8\xd3\xf5\xb8\x0c\x97\x7c\xb6\x8f\xfc\x46\x1f\xbc\x9a\xff\xea\xad\x71\xf7\x14\xa9\x53\x6d\x36\x15\x40\x3a\x7d\x5f\x91\x3a\x39\x76\xab\x5c\xbb\x26\xe7\x66\x5c\x2f\x9d\xf6\x39\x54\x4e\xd4\x15\x34\x51\x57\xd0\x44\x33\x05\xe5\xcb\xf0\x94\x18\x89\x6c\x19\x62\x23\xf1\x3c\x4c\x43\xae\x85\xc5\x27\xf0\x9d\xcd\x9c\x82\x26\x3f\x3e\x0c\x47\xab\xf3\xa1\x47\x84\x26\xb5\x28\xff\x0a\x3b\x10\x91\xf4\x1b\x43\xe4\x4f\xfa\xfc\x44\xe9\x0f\x84\x90\xf2\x2b\xe2\xa9\x79\xea\xa4\x1f\x64\x97\x45\x3e\x73\x3f\xf6\x8b\x7f\xbd\xcd\x44\xfc\x0d\x70\x5a\x34\xe8\x51\x38\xcc\x1b\x74\x0c\x46\x61\x7f\xe4\xd4\x66\xdf\xc0\x9a\xcf\xaf\xcf\x46\x21\xf8\xa5\xf8\xb9\xbc\x9f\x0c\xd7\xa7\x20\x0f\xd4\x20\x98\x83\x3c\xf9\xa3\xbb\xfc\x96\x56\xc2\x81\x76\xa0\xb1\x1f\x96\x77\x74\xab\xbd\x73\x3c\x29\x53\x8d\xfa\xb4\xbe\x06\x47\x8d\x09\x59\x75\x8b\xac\xfa\xbd\x89\x68\x17\xf4\x8d\x8a\x81\x29\xc1\xad\x99\x66\xe0\x71\xeb\xf5\x16\xe2\x8a\x8c\x36\xcb\x84\x18\xc2\x9d\x7e\xf3\xed\xbe\xc8\xb8\xd3\xa3\xcb\x01\x29\x21\x77\xf8\x5d\x13\x12\x48\x91\x91\x22\x23\x45\x46\x8a\x8c\x14\x19\x29\x72\x2b\x14\x39\x72\x91\xef\x79\xbc\xdc\x29\x0d\x20\x96\x4a\xe7\xfe\xf8\x55\xbe\x12\x30\x1c\x43\x84\x03\xaa\xc1\x43\x1c\x49\x74\x43\x33\x40\xf2\xbb\xdd\xf4\x11\x24\xbf\x48\x7e\x37\x99\xfc\x76\x1e\x0f\xb5\x5b\xd2\xc7\xc1\xe3\xfc\x51\x38\x4c\x0e\xa6\xb2\x1e\xca\x7d\x5d\x10\xe5\xfa\xa5\xde\x12\x81\x37\x7f\x36\x0d\x0b\x92\x5a\x56\x1c\xd3\x2e\xa8\x25\xdd\x28\x8e\xad\x8e\x2f\x52\x47\x1d\x97\x79\xde\x23\x38\x26\xef\x26\x36\xab\x4b\x65\x53\x73\x9f\xa6\x96\x07\x35\xbf\x34\x4c\x7e\x64\x07\xdc\x19\x28\xf7\x9a\x2c\x37\xf9\x89\x66\x12\xc1\x3f\xe6\x95\x3f\x67\x6a\x93\x5e\xf9\x3d\x91\x0e\xfe\x18\x7f\xdd\xa4\xff\x69\x0b\xe2\xcb\xfc\x05\x3e\xa2\xf6\xbd\x1e\x82\x60\x33\x72\xc4\xc7\x65\x71\xbf\x5e\x8c\x97\x00\xd3\x24\xe7\x4f\x72\xd1\x17\xae\x1f\x61\x60\x00\x4a\xc9\x10\xd1\x37\x18\x93\x00\x63\x12\x60\x4c\x02\x8c\x49\x80\x31\x09\x30\x26\x01\xc6\x24\xc0\x98\x04\x18\x93\x00\x63\x12\x60\x4c\x02\x8c\x49\x80\x31\x09\xba\x1d\x93\x60\x1a\x72\xe4\x6c\xea\xb4\x07\x52\xf6\x06\x09\x4c\xc4\xee\xed\x56\xc0\x31\x18\x3b\x01\x63\x27\x60\xec\x84\x2d\x14\x3b\xe1\xef\x86\xe0\xb0\x8c\x76\x6a\x6a\xb4\x36\x5a\x82\xc0\xa7\x56\xc5\x60\x5f\x5d\x28\xa9\xb6\x4d\x3d\x3a\xfa\xf1\x21\xf2\xf5\x01\xd8\xc3\x1e\xf3\x7c\x70\x7f\xbc\x19\x2c\x7a\x45\x14\x37\xc5\x8a\xeb\x09\x16\x7a\x3f\x7f\xdd\x25\x53\xa3\xd2\x43\x38\x58\x43\xe4\x9d\x2d\xf0\xce\xab\xf1\xbc\xf3\x38\x39\x2a\xad\x1c\x81\xc6\x8e\x3c\x4a\x1d\x18\x9c\x08\x39\x11\x72\x22\xe4\x44\xc8\x89\x90\x13\x21\x27\x42\x4e\x84\x9c\x08\x39\x11\x72\x22\xe4\x44\xc8\x89\x90\xb3\xdb\x90\xf3\x38\x1c\x25\x87\x53\x07\x3d\x3c\xf9\xfa\x2a\x37\xb3\xc0\xb6\xae\xf3\x67\x85\x91\x3b\x22\x77\x44\xee\xb8\x85\xb8\xe3\xbb\x87\xe1\x94\x7b\xd8\x5c\xb7\x68\x51\xe7\x3e\xfb\x21\x01\x5b\x05\x82\x94\xc0\x40\x2f\xf8\xfc\xf1\x73\x43\xe4\xdf\xee\x80\x7b\x6a\x1e\xf7\x50\xe4\x47\x9b\x41\x91\x93\x73\x33\xf3\xa2\xd8\x9e\x00\x91\x8f\x0a\xa7\xcc\xea\x2f\xf2\xa3\x16\xc8\xaa\x22\x91\xac\x27\x92\x34\x1e\x38\xe6\xc8\x59\xcf\xc5\x3a\x6c\xc4\xd5\xb3\x47\xc1\x27\xfd\x86\x6f\xe4\x79\x2d\xc0\x27\xa2\x49\x44\x93\x88\x26\x11\x4d\x22\x9a\x44\x34\x89\x68\x12\xd1\x24\xa2\x49\x44\x93\x88\x26\x11\x4d\x22\x9a\xec\x32\x9a\x6c\x78\x02\xd6\xdf\xd1\x21\x98\x44\x30\x89\x60\x12\xc1\x64\x10\x4c\x5e\x83\xb3\x02\x4c\x8a\xc8\x35\x1e\x88\x8c\x38\x44\x5e\x36\x35\x4d\xb7\xad\x0a\xdf\xd1\x2f\x56\xb4\x22\x75\x6c\xf2\x99\xe7\xc9\x27\xf7\xc1\x6b\x44\x09\x7e\x14\xee\xc6\x19\xe1\xe7\x4c\x6d\xda\x2b\x29\xc7\x4b\x4a\x67\xd8\x23\x22\x00\x8e\xe4\x80\xfe\x01\xed\x90\xfb\x7b\x1c\x0d\xe6\x10\x8f\x21\x1e\xcb\x21\x1e\x43\x3c\x86\x78\x0c\xf1\xd8\xb6\xc1\x63\xb9\x9e\xc1\x63\x6d\xaf\x49\xcb\x78\x2c\x87\x78\x0c\xf1\x18\xe2\x31\xc4\x63\x88\xc7\xba\x8f\xc7\x72\xdb\x1a\x4a\xe5\x10\x4a\x75\x0e\x4a\xe5\x7a\x1d\x4a\xe5\xb6\x20\x94\xca\xaf\x44\xc7\x3b\xbe\x22\x7c\xad\x2e\xc2\x0c\xf7\xb5\x9a\x82\x49\x38\x13\x19\xbc\x52\xc6\x8f\x76\x13\x17\x85\x10\xa4\x59\xdd\xf6\x72\x14\x3c\x1f\xef\x54\x75\x82\x1c\x97\x4e\x55\xa2\x68\xe9\x33\x15\x52\x70\xbd\x7b\xd5\xf5\x07\xc3\xbd\xa9\x76\x92\x1d\x6c\x80\x40\xea\x7f\xee\xae\x43\x68\xf7\x8b\xa5\x49\x51\x43\x69\xd9\x01\x71\x75\x13\x78\x99\xc0\x5b\x4f\xc0\x65\x78\xbc\x26\x7c\xf0\x29\x38\xb1\x81\xee\xc0\x08\xc2\x98\x09\xa5\xc5\x4c\x28\xdf\xdd\x17\x2d\x34\x2e\x09\xa1\x71\x01\xce\x71\xa1\x71\x06\x36\x36\x4a\x61\x5e\x64\x46\x99\x85\xbc\x9f\x19\x65\xc3\x85\x3e\x29\x62\xaf\x3f\x0e\x17\x03\xb1\xd7\x37\x5a\xaa\x2b\xdb\xa2\x65\x4f\xd9\xb4\x1d\xe8\xb4\xec\x4b\xff\xd3\x48\x9d\x6c\x1b\x92\xe1\xf6\x03\xb9\x0b\xcc\xa5\x50\x39\x37\x21\xee\xac\x92\x73\x53\xde\x53\xdd\x94\x78\x9d\x49\xe3\x82\x84\x18\x09\x31\x12\x62\x24\xc4\xdb\x87\x10\xa3\x9a\x17\xa3\xe6\xf5\x0e\x42\xc7\x14\x5a\x5d\x49\xa1\x85\x96\x0a\xb4\x54\xa0\xa5\x02\x2d\x15\x68\xa9\xd8\xd6\x96\x0a\xcc\xa4\x88\x99\x14\x31\x93\x62\xa7\x32\x29\xa2\x21\x10\x0d\x81\xdb\xd5\x10\x98\x2f\xb6\x39\x57\x68\x5c\x8c\x82\x74\x38\x72\xbe\x93\xdc\x21\x66\xb3\x2f\x69\x9b\xc5\xcf\xeb\x00\xce\xd5\xa4\x7a\x0b\x46\x16\x27\x5f\x18\x82\xa3\x6e\xc4\x90\xb2\x3d\xb6\x9a\x6d\x9c\xdc\xcd\x76\x54\x87\x2e\x55\x4a\x4c\x78\x91\xef\x1f\x22\x1f\x1d\xf0\x93\x9f\xde\x94\x0a\xae\xa1\xe9\xab\xba\x56\x51\x4b\x55\x51\x42\xbc\xcd\xe4\xbc\x2c\x62\x9e\x3a\x6d\x09\x0f\x92\x49\x3f\x72\x33\x2c\x95\x6a\xe0\x3d\xb3\xba\xdd\xeb\x4e\xfc\x9b\x12\xdf\xe3\x72\xfc\x7c\x18\x21\xe9\xfa\x14\x8a\x81\xa6\xad\xce\xa1\xb8\x37\x7c\x32\xee\x21\xc0\xbf\x94\x1b\xa0\x31\x9a\x07\x1e\x57\xc0\x68\x1e\x68\x8c\x42\x63\x14\x1a\xa3\xb6\x91\x31\x0a\xa3\x79\x60\x34\x0f\x34\x02\xa0\x11\x00\x8d\x00\x68\x04\xe8\x09\x23\xc0\x16\xa4\x15\x18\xf6\x03\xc1\x2a\x86\xfd\xd8\x3a\x27\x2c\xc8\x4f\x3e\x07\xd9\x6a\xba\xd8\x0c\x57\xfc\xe6\x55\xf2\xed\xfb\x7c\xae\x38\xdc\x38\xc0\x47\x00\x46\xa5\x1f\x62\xb7\x36\x00\x81\x3d\x0e\x01\x31\x92\x07\xa2\x31\x8c\xe4\x81\x68\x0c\xd1\x18\xa2\xb1\x6d\x84\xc6\x7a\xc8\x0d\xb9\x67\xd0\x18\xfa\xc7\x22\x1a\x43\x34\x86\x68\x0c\xd1\x18\x46\xf2\x40\x07\xbe\x5b\x87\x33\xf5\xbc\x03\xdf\x96\x8c\xe4\xf1\x3c\x4c\x09\xe7\xa9\x93\x30\xc1\x9d\xa7\x0e\xc1\x38\x1c\x88\x3c\xd0\xae\x96\xcb\xb6\xeb\x62\x18\x70\x11\x8b\xf5\x97\x8a\x09\xad\xd1\x76\x7f\xaa\xd4\xd7\x76\xf9\x18\xec\x2e\x2f\x48\x47\x90\x78\x3d\x2c\x7e\xed\x06\xf3\x12\x88\x2a\x07\x67\xe1\x74\x4d\x34\x8e\x0c\x8c\xac\xa7\xad\xf1\x5c\x26\x86\xdf\x68\x31\xfc\xc6\x9f\x24\xe0\x8c\x98\xe9\xc7\xe0\x08\x9f\xe9\x07\x60\x9d\xa3\x0f\xa6\x45\x58\x8d\x53\x70\xc2\x0f\xab\xb1\xfe\x52\xce\x89\x38\x1a\xa7\xe1\x64\x20\x8e\xc6\xfa\x8b\x69\x5d\xe4\x88\x88\x1a\xcd\x89\x9c\xa0\x50\x69\x2c\x7e\xd2\xbf\x3e\xe2\x8b\x1c\x25\x34\x76\x46\x50\xfc\xec\x17\x77\x08\xf1\x13\x16\x2c\xa3\x73\x82\x08\x83\x64\x20\x7c\x45\xf8\x8a\xf0\x15\xe1\x2b\x06\xc9\xc0\x20\x19\x18\x24\x03\x83\x64\xa0\x11\x00\x8d\x00\x68\x04\x40\x23\x00\x1a\x01\xda\x62\x04\xc0\x20\x19\x18\x24\x03\x83\x64\x60\x90\x0c\xb4\xb1\xa1\x8d\x6d\x1b\x05\xc9\x68\xb7\x11\x6b\x0b\x9e\x33\x21\x5f\xed\x83\xfb\xa5\xdf\x7a\x85\x95\xe0\x48\xda\xc1\x7b\x44\x37\xc7\x08\x57\x0c\xe0\x35\xd5\x57\x93\xc9\x22\x5f\x1b\xd8\x0a\xe5\x0d\x1d\x55\xe1\x4d\x95\xbe\xb7\x48\x9d\xc9\xaa\xdb\x27\xe7\x66\x2e\xb0\x4b\xed\xe3\xe2\x75\x05\x4d\xd4\x15\x34\xd1\x4c\x41\x79\x7d\x5d\xb1\xcc\x1b\x8c\x4f\xf7\x1b\xe3\x46\x28\x79\xf7\x20\x3c\x22\x5a\xdc\xa0\xce\x4d\xd3\xba\xc1\x76\xc6\xb2\xb5\x57\xb3\x63\xba\x51\xb4\xa8\x6d\x53\x9b\xfc\xe5\x3e\xf2\xa5\x7e\x78\xb5\x7f\xd7\xb5\xd5\x6c\xf2\xd1\xc6\x47\x04\x66\xc4\xd3\xe9\x41\x76\xdb\x25\xef\xc9\x85\xac\xbc\x70\xde\xb4\x26\x4b\x25\xcf\x68\x61\xf7\xf8\x31\x81\xfc\x0b\xc2\xfa\x74\x80\x5b\x9f\x58\xf7\x1c\x81\x43\x30\x1e\x69\x77\x0a\xb4\xe8\x6a\x36\x23\xbf\xb9\x29\x83\xf7\x33\xf1\xb2\xe0\x30\x39\x28\x65\x41\x5d\xc7\x49\xc1\x20\x5f\x58\x1d\x29\x24\xc6\x94\x8e\x41\x42\xf0\x24\x04\x06\x09\x41\x63\x1c\x1a\xe3\xd0\x18\xb7\x8d\x8c\x71\x18\x24\x04\x83\x84\xa0\x11\x04\x8d\x20\x68\x04\x41\x23\x48\x4f\x18\x41\x30\x84\x07\x86\xf0\xd8\x2a\xd8\x17\x43\x78\x74\x22\x84\xc7\xd7\x9e\x83\x4c\x34\x98\xe3\x61\x90\x5d\x3a\x57\x28\xa9\x1c\xd1\xfd\xdc\x73\xe4\xa3\xfb\x80\x54\x21\x3a\x91\x8c\x2f\xdd\x14\xa7\x9b\x62\xe5\xa4\x1f\xae\x85\x75\xbc\x88\xe0\x2d\x3d\x8e\xe9\x30\x9a\x07\x32\x2c\x8c\xe6\x81\x0c\x0b\x19\x16\x32\xac\x6d\xc4\xb0\x7a\xc8\x5f\xba\x67\x18\x16\x3a\xf2\x22\xc3\x42\x86\x85\x0c\x0b\x19\x16\x46\xf3\x40\x4f\xc3\x5b\x07\x39\xf5\xbc\xa7\xe1\x96\x8c\xe6\x71\x1d\x2e\x0b\xf7\xa6\xc7\xe0\x3c\x77\x6f\x3a\x0b\xa7\xe1\x64\x73\xee\x4d\x1c\x12\x65\x82\x94\xa8\x2d\x91\x3d\x16\xe3\x1d\xa1\xce\x90\x53\xcd\x39\x42\xf1\x5a\xd5\xe7\x12\x4b\xfd\xfc\xee\x50\x66\xf6\x3a\x37\xee\x87\x51\xcd\xc7\xf6\x8a\xdf\xbb\x43\xc8\x04\xd0\xba\x04\xb3\x90\xaf\x89\xfd\x31\x01\xc7\x5a\xed\x19\x3c\x7a\x8a\x71\x40\x5a\x8c\x03\xf2\xce\x3e\x78\x42\x04\xe0\xc8\xc3\x63\x81\x00\x1c\x27\x61\x03\xe3\x31\x52\x4a\xc0\x45\x21\x8f\xa6\x21\xc7\xe5\xd1\xc6\xde\x32\x27\xe2\x8f\xcc\xc0\x05\x3f\xfe\xc8\x86\x4a\x6c\x36\x56\x48\x17\x84\x58\xfa\x6b\x23\xa1\x42\xec\xe1\xd0\x48\x22\x55\xf2\x6c\x54\xdc\x52\x27\xcf\xfc\xa8\x22\x1d\x94\x6c\x18\x4c\x04\xd9\x2f\xb2\x5f\x64\xbf\xc8\x7e\x31\x98\x08\x06\x13\xc1\x60\x22\x18\x4c\x04\x6d\x10\x68\x83\x40\x1b\x04\xda\x20\xd0\x06\xd1\x16\x1b\x04\x06\x13\xc1\x60\x22\x18\x4c\x04\x83\x89\xa0\x89\x0f\x4d\x7c\x5b\x3c\x98\x48\x17\x30\xf2\x7a\x02\x96\xb4\x3f\xfa\xc7\x3b\x92\x70\x82\x8d\xab\x06\xf9\x2a\xdd\x41\xf3\xc6\x8a\xe9\xa8\xf2\x92\xc8\x62\x59\xb1\xc9\x2f\xdf\x4b\xbe\xad\x1f\x76\x16\x4c\x8b\x5e\x5b\xcd\x26\xd3\x16\x55\x35\x45\x5c\x73\xfb\xdf\x57\x8d\x5d\x26\xf8\x04\x2b\x29\x3d\xc8\xee\x9d\x32\x2d\x1a\x0c\xe6\x5f\x75\x8b\xe8\xa4\xf6\xe2\xef\xfc\x55\x98\x14\x43\x68\x02\x8e\xf1\x21\x34\x0e\x07\x20\x13\x69\x8f\x60\x5f\xc6\x06\x4c\x55\xc5\x62\xc7\xcd\x03\xe1\x7d\x7a\x1b\x19\x28\xd2\xf5\x26\x4f\x48\xa5\xe4\x38\xaa\xae\x42\x30\xc4\x44\xf2\x37\x76\xf9\x7d\x30\x6a\xd1\x72\x49\x2d\xd0\x26\xbb\x21\x2d\x6f\xef\x7a\x4f\xe4\xa6\x21\x07\x67\x6b\x4c\xac\xeb\xee\x0a\xc4\x70\x68\x58\x6d\xd1\xb0\xfa\x9f\x12\xed\x10\x05\xe7\x85\x89\xf3\x0c\x9c\xf2\x4d\x9c\x9d\x10\x29\xed\x96\x19\x0d\x64\x54\xb9\xe2\x40\xee\xff\xbf\xc7\x17\x29\x47\xca\xaa\xe5\xe8\xdc\x78\x24\xb6\x9d\x4d\xca\x96\xa1\x32\xd3\x13\xba\x2a\x59\xee\xab\x2d\x68\x94\xd7\x61\x3f\x0f\x11\x75\x7f\xf0\xe2\x0a\xb5\x8a\x34\x78\x75\x5f\xf0\x2a\x53\x9b\x1d\x5a\xd4\x0b\xa3\x75\xf7\x55\x95\xc2\xfe\x5e\x93\x57\x59\x2d\xd7\xeb\x38\xd2\x40\x47\x99\xe3\x27\x6d\x50\xbe\xc5\xc8\xb7\x8f\xf4\xc3\x87\xfa\xc9\x07\xfb\x93\xef\xf7\xb6\xc6\x6f\xe9\xdf\x3e\xf2\xad\x06\x0c\xb2\x76\xe6\x06\x57\x0e\xaf\xd9\xe0\xf4\x8f\xdc\x0d\x45\x0c\xdc\x61\xbe\xc3\x17\xf6\x79\xb5\xc4\x9f\x34\x4c\x63\x54\x3c\xcd\xef\xe0\xd6\x05\x5b\x19\xca\xdb\xa6\x31\x27\xce\x9e\x3d\xce\xa6\x85\xfc\x7b\xde\x9d\x2c\xfe\x8f\xc3\xd1\x82\x37\xf7\x4a\x02\x3e\x9d\x20\x2f\x27\x92\x9f\xf2\xb6\x19\x1f\x48\x9c\x37\xd9\xa6\x54\xb7\x95\xa2\xc9\x9b\xdd\x54\x52\x4b\xec\xa7\x94\x32\x59\xf5\x15\x7c\x7f\x26\x6c\x14\x15\xdb\xb7\xb4\x8e\xaa\x05\xfe\xe1\xdc\x7e\x5d\xd2\x0b\x12\x3e\xd0\x92\x66\x2b\xe6\x4d\xd9\xa6\xc2\xbe\x5b\xa6\x66\xb9\x44\x33\x8a\x78\x23\x3f\x2c\xe8\xf6\x25\xdf\x27\x85\x36\x80\xf7\xfa\xd4\x0e\x5e\xad\x2a\x2f\xbe\xae\xe8\x94\x0f\x85\xcb\xeb\x5d\xe4\x36\x5e\xc3\x0d\xe4\xc7\x89\x58\x2d\xf2\x13\x70\x8c\x1c\x49\x1d\xf2\xa2\x19\xde\xcb\xe7\x90\x9c\xd0\x55\xf7\xa6\x06\xd8\xa5\x60\x9c\xc2\x2d\x18\x09\xf1\x9d\x43\x6e\x06\x7f\xdb\x31\x2d\xb5\x48\x6b\xcf\xfe\xf2\x5d\xaf\x7b\xd1\x3d\x01\xfc\x5b\x83\xe4\x97\x07\xe0\xb5\xf2\x57\xcf\x0b\xe8\x45\x09\xe4\x0d\x4d\x5f\xd5\xb5\x8a\x5a\x0a\xfa\x9f\xaa\x9e\xf1\x6b\x5e\x3c\xc6\xf7\x86\x99\x00\xe3\x9c\xf0\xe8\xe8\x20\x2f\x68\xd0\xf7\x3e\xa9\x72\x27\x30\xcb\xd4\xb3\x60\xda\x0e\x55\xb5\x4c\x7a\x2f\x7f\x40\x16\x2c\x1d\x8b\x82\xaf\x99\xd5\xed\xf6\xe5\x27\xea\x50\xd4\xbf\x1b\xc2\x73\xec\x00\xf7\x1c\x63\x13\x6a\x12\xce\xc0\xa9\x16\xd6\x50\xee\xfe\x7b\x6e\x95\x1a\xf1\x7e\xb1\xd7\xe2\xa7\xcf\x49\x32\x21\xa7\x4c\xf5\x08\xf1\xc2\x82\xfa\xad\x1c\xb2\xd1\xdf\x1b\x3e\x81\xf7\x10\xe0\x5f\x8e\xc1\x00\xf1\x20\x35\x06\x03\x44\x67\x3a\x74\xa6\x43\x67\xba\xed\xe6\x4c\x87\xc1\x00\x31\x18\x20\x3a\x31\xa1\x13\x13\x3a\x31\xa1\x13\x53\x4f\x38\x31\x61\x30\x40\x0c\x06\xb8\x55\xdc\x36\x30\x18\x60\x27\x82\x01\xfe\xab\x21\x18\x8b\xa0\x81\x12\x04\xae\x9a\xa5\xca\x0a\x55\x1d\x47\x2d\x2c\xb3\x95\xd7\x26\xbf\x39\x48\x7e\x71\x00\xc0\x67\x81\xc9\x6f\x6d\x0e\x03\x2e\xf0\xa2\x26\xbd\xa2\xda\x84\x02\x53\xd5\x28\xb0\xf6\x2d\x5b\x83\x04\x46\x9e\x54\x6d\x3b\x22\x74\x49\xe0\xf3\xf1\x24\xf0\x04\x39\xde\x88\x04\xd6\xb6\x74\xb5\xe5\x15\x41\x20\x82\x40\x04\x81\x08\x02\x11\x04\x22\x08\x44\x10\x88\x20\x10\x41\x20\x82\x40\x04\x81\x08\x02\x11\x04\x22\x08\x44\x10\x88\x20\x10\x41\x60\x2f\x81\xc0\x1f\x1c\x84\x94\x00\x81\xf4\x45\x87\x1a\xbc\xab\x6a\xd3\x81\x50\x9b\xfc\xe3\x3e\xf2\x57\xfd\x40\xfc\x7b\x3c\x57\xc0\x26\x33\xf6\x8e\xb0\xdb\xce\x79\x8f\x57\x87\x38\xdc\x6a\x69\x7b\x29\xe4\x05\x9d\x9b\x82\x49\x4e\xe7\x4e\xc0\x71\x38\x1a\xe9\x11\xeb\xb7\x5a\x6d\xd4\xb7\xb6\x84\xb4\x5c\x88\x07\x7a\x07\x49\x56\x02\x3d\xbf\x2e\xd1\x49\x7d\x85\x47\x1f\x32\x3a\x64\x74\xc8\xe8\x90\xd1\x21\xa3\x43\x46\x87\x8c\x0e\x19\x1d\x32\x3a\x64\x74\xc8\xe8\x90\xd1\x21\xa3\x43\x46\x87\x8c\x0e\x19\x1d\x32\xba\x6e\x31\xba\x8f\x8d\xc2\xe3\x82\xd1\x2d\x95\xcc\x9b\x4c\x18\x5b\x66\x29\xe3\x6d\xee\x6b\x0f\xf2\x96\x2d\xdd\xb4\x74\x67\xad\x44\x57\x69\xa9\x6a\xdb\xe7\xc6\x37\x22\x7f\x34\x42\xbe\xd6\x0f\xf7\x07\x8a\x9b\x74\x4b\xf3\xc0\xde\x28\x8f\x76\x54\xbd\x53\x9d\x93\x45\xcf\xb2\xa2\xa7\x82\x45\xa7\x8f\xb3\xdb\xcf\x87\x14\x28\x51\x5f\xf4\xa3\x6d\x0e\x81\x54\x81\x67\x05\x9c\x9b\x87\x27\x38\x9c\xbb\x08\x33\x70\x21\x12\xce\x05\x9b\xd4\xa5\x73\xd1\x75\x8d\x85\x75\xff\x22\x1e\xc6\x3d\x43\x16\x24\x8c\x6b\xd4\x9d\x12\xcf\x35\xa8\x4a\xfd\x19\xdc\xc6\x81\x99\x92\x7f\xbd\x3b\xa6\xcb\x0f\xb8\xc1\x95\x9a\xee\xf5\x93\xf2\x89\x1e\xe8\xf8\xdc\x73\xf0\x2d\xf0\x86\x9a\xd8\x24\xed\xeb\x79\x0c\x55\x82\xa1\x98\x5a\x0c\xc5\xf4\xff\x24\xe0\x9a\x88\xa3\xf4\x0c\x2c\xf8\x71\x94\xba\x22\x97\x3a\x29\x0c\x7b\x5c\xe6\x95\x2b\x0e\xa4\xbf\x7a\x7b\x8c\xcc\xdb\x2b\x23\xa1\xaa\x8d\xc4\xdc\x09\x71\x53\x2f\x48\xb9\xce\x24\xb8\x41\xe1\x16\x23\xdc\x30\xc9\x42\x57\x92\x2c\x60\x74\x6d\x8c\xae\x8d\xd1\xb5\x3b\x15\x5d\x3b\xff\xf1\x44\x9b\x43\x0c\x5f\x11\xb9\xfb\x98\x56\xe1\xe7\xee\xeb\x70\xd8\x62\x25\x7c\xc5\xdf\x4d\x76\x8a\x66\x82\x4d\x55\x48\x72\x5f\xdd\x13\xa3\x71\x1c\xaf\x8b\x37\xd9\xf4\x76\x6b\x82\x07\x43\xdb\x64\x35\x04\x83\x50\x6e\x37\xe5\x07\x83\x50\x62\x10\xca\x4d\x0e\x42\xb9\xb9\x54\x2f\x36\x3a\xe5\x66\xae\x38\xf9\x0b\x70\x8e\x4c\xa5\x26\xbd\x68\x94\xfb\x82\x91\x2c\xa3\x4b\xaa\x0f\x6b\xd9\xf6\xa0\x93\xdf\xb8\x0a\xfb\xa2\x8e\x99\xd7\x44\x9a\xfc\xd5\xab\xe4\xdf\xed\xab\x3a\x5d\x9e\x6e\xec\x59\x1a\x8c\x47\x98\xbe\x97\xdd\xeb\x1d\x05\x0f\x5e\xea\x71\x5f\xd2\x1c\x7a\x59\xa2\x97\x25\xe6\x17\x46\x2f\x4b\xf4\xb2\x44\x2f\xcb\xed\xe3\x65\xd9\x43\xe9\x73\x7b\xc6\xcb\x12\xf3\xba\xa2\x97\x25\x7a\x59\xa2\x97\x25\x7a\x59\x6e\x46\x5e\xd7\x6d\xed\x14\x89\x89\x27\xb7\x73\xe2\xc9\xdc\x16\x74\x8a\xcc\x2f\xc2\x05\x01\x04\xcf\xc2\x69\x0e\x04\x8f\xc1\x11\x38\x14\x09\x04\x5d\xfe\xc4\xcd\x4c\xd5\xe9\x42\x36\x7c\x00\xf7\xd9\x78\xf8\x77\x94\x1c\x6e\x25\xb7\x06\xa4\xde\xb1\xbb\x8a\x89\xdd\x2d\xd6\x21\x45\xad\xc6\x5f\xf7\x89\x9f\x3b\x0b\xc0\x04\xaf\x3a\x0f\xd3\x90\xab\x31\xbe\x8c\xc3\x81\xf5\x36\x3c\x1a\x5d\xd0\x9d\xae\x45\x77\xba\xbf\x4d\xc0\x94\x98\xfa\x27\x61\x82\x4f\xfd\x43\xd0\xc2\x08\x84\xc7\x84\x4f\xde\x24\x9c\xf1\x7d\xf2\x5a\x2b\x69\x46\x58\xc1\x73\x70\x36\x60\x05\x6f\xad\xa8\x38\x59\xd4\x41\x59\xd3\x40\xce\x95\x4d\xdb\x81\xf4\x6f\x8f\x54\xc9\xa2\x87\xa5\x97\x43\xc0\x65\xa4\x26\xe7\x53\xfa\x51\x71\x8b\x27\x97\xa6\xbc\x5b\x3b\x28\xa1\x3a\xe3\x21\x87\x88\x16\x11\x2d\x22\x5a\x44\xb4\xdb\x07\xd1\xa2\x86\x16\xa3\xa1\xf5\x0e\xc3\x46\xef\xe4\xae\x78\x27\xa3\xa9\x00\x4d\x05\x68\x2a\x40\x53\x01\x9a\x0a\xb6\xb5\xa9\x00\x0f\xa9\xe0\x21\x15\x3c\xa4\xd2\xa9\x43\x2a\x68\x89\x43\x4b\xdc\x76\xb5\xc4\xe5\x8b\x6d\x3e\x9f\x15\x87\x93\xd3\xe1\xc8\xf7\x4e\x72\x87\x98\xcd\xbe\xa4\xed\xa8\x99\xab\xfd\x9e\xe7\xdf\x39\x04\x19\xe1\x79\xae\xe9\x76\xc1\x64\x7a\x49\x5d\x8a\x33\x6a\x68\x65\x53\x37\x1c\xbb\xa4\x17\xa8\x4d\x7e\x6f\x90\x7c\x76\x00\xf6\x78\xf7\x5f\x5b\xcd\x26\xd7\x9a\xcb\x70\x76\x4e\x96\x34\xcf\x4a\x6a\x53\x7a\xb3\x2c\x7f\x60\xda\xad\xcd\x42\xb6\xea\x25\xb3\xba\xed\x6c\xb5\xb0\xc9\x37\xda\x9f\xd4\x2c\x76\x80\x3f\x17\x3f\x68\x27\xc8\x31\x39\x68\x6b\x87\x8a\x1c\xb6\x55\x0d\x8f\xc9\xce\xd0\xc5\x1f\x03\x29\xa3\xfd\x08\xed\x47\x68\x3f\x42\xfb\x11\x06\x52\xc6\x40\xca\xc8\xed\x91\xdb\x23\xb7\x47\x6e\xdf\xeb\xdc\x1e\x03\x29\x63\x20\xe5\xad\x42\x2a\x31\x90\x72\x27\x02\x29\xff\xfa\x10\x1c\x60\x5d\xe7\x23\x40\xc3\x83\x67\x22\x34\x32\xff\xfb\xa5\x31\xde\x86\x16\xff\x02\xf2\xbd\x43\xe4\x43\x03\xb0\xb3\x60\x5a\xdc\xeb\x75\xb5\x39\x22\x38\xcb\x4a\xb8\xc2\x2e\xb4\x09\x07\xee\xe5\x0f\x4c\x99\x16\x5d\xc8\x7a\xc8\x4f\xf3\x5f\x33\xab\xdb\x0e\x02\xc0\x7a\x00\xd8\x0c\xa1\xbb\x3e\x1b\x4f\x09\x87\xc9\xa0\xa4\x84\xa9\x94\xe4\x82\x7e\xe3\x57\xc3\x6c\xe4\x7d\xc8\xfb\x90\xf7\x21\xef\x43\xde\x87\xbc\x0f\x79\x1f\xf2\x3e\xe4\x7d\xc8\xfb\x90\xf7\x21\xef\x43\xde\xd7\x5d\xde\xf7\x02\x3c\x4f\xae\xa6\x9e\xf5\xe2\x62\x4e\xc8\x37\x8b\x23\xd3\x4c\xc1\xa9\x38\xcb\x8a\x5d\x30\xcb\x74\x44\xb1\x2b\x85\x65\xd6\xfa\x5c\x83\xa5\xea\x8a\xe0\x13\x65\xcb\xe4\x5f\x9a\xda\xed\xd1\x81\x4e\x06\xcc\x44\xa2\x88\x44\x11\x89\xe2\x16\x22\x8a\xff\x66\x08\xb2\xc2\xcd\x50\x65\xdb\xba\xa2\xce\x83\x9a\xb3\xbb\x6b\x9d\x0d\x25\x52\xe0\x9e\x86\xbf\x3d\x48\x7e\x75\x00\x48\xcd\x23\xeb\xa0\x8b\x93\x73\x33\xf3\xa2\xb0\x36\xd1\xc5\x47\xf8\x03\x93\xd5\x15\x5a\xc8\xfa\xef\x41\xbc\x18\x8e\x17\x5f\x88\x27\x87\xa7\xc8\x09\x49\x0e\xc3\xc7\x48\x15\x3c\x14\x64\xd1\x6f\x77\x74\x31\x44\xe4\x88\xc8\x11\x91\x23\x22\x47\x44\x8e\x88\x1c\x11\x39\x22\x72\x44\xe4\x88\xc8\x11\x91\x23\x22\xc7\x9e\x44\x8e\x08\x04\x11\x08\x22\x10\xdc\xc6\x40\xf0\xe7\xf7\xc0\xbc\x04\x82\x92\x15\xd5\xe0\x40\x9e\xb3\x2b\xd2\xef\xd0\x2c\xa8\x25\xbb\xc2\xa5\x95\x5a\x28\x50\xdb\x66\x3b\x0a\x7a\xd3\x26\x7f\xf6\xaa\xd4\x3f\xee\x80\xbb\xab\x0a\xf5\x32\x4a\x3e\xec\x45\x05\x9e\x65\x05\xcc\x8b\x02\x26\x79\x01\x57\x78\x01\xe9\x93\xe2\x96\xc9\xe0\xf3\x32\x67\x64\xc0\x9b\x30\xe2\xe9\x4d\x4e\xd4\x5f\xdd\x92\x6e\x3a\xb7\xa8\xca\xe6\x7f\xba\x6f\x7d\xb9\xe2\xd6\x57\x7c\x0b\xa9\xdc\xd7\xf9\x82\x17\x44\x34\xd9\x37\xc0\xd3\x81\x68\xb2\x6d\x7d\x43\x1c\xd4\x7c\x63\x3c\xd4\xbc\x44\x66\xeb\x12\xd1\xb9\x94\x33\x64\xe0\xbb\x1e\x93\x51\x35\x8a\x8b\x3d\x9b\xc7\x60\x84\x8d\x83\x11\xe6\x31\x5c\x74\x44\xb8\xe8\xad\x67\x05\x26\x3f\x9f\x86\xb3\x62\x11\xa1\x2f\x3a\xd4\xe0\xeb\x7d\xdc\xd2\xa1\x1b\x45\x8b\x0d\x0e\xf9\xeb\x4b\xe4\x9f\x87\xc9\xdb\xfa\x81\xf8\x25\x78\xcb\x45\xd2\xa2\xaa\x56\xc3\x35\x67\xc4\xe3\xe9\x7d\xec\xda\x39\xef\x99\xba\x25\x42\xde\xd7\xde\x15\x21\xaf\xc2\x79\x21\xb1\xcf\xc0\x29\x2e\xb1\x8f\xc2\x61\x38\x18\x29\xee\xfc\x6f\xf2\x64\x9d\xac\x57\xac\x68\x7b\x20\x5c\xce\xdc\x46\x06\x8a\xd4\x81\xeb\x0b\xf1\x92\xef\x20\xc9\x4a\x41\xe7\x57\x43\x8a\x37\xb7\x12\x75\x92\x31\xf9\xfb\xbb\x42\x7b\xe2\x01\x8b\x96\x4b\x6a\xa1\x36\x03\xb4\xdb\x19\xc3\xf2\x72\xb7\xfb\x23\x37\x03\x17\xe0\x5c\xcd\x0a\xdd\x5a\x87\x60\x14\x59\x8c\xf3\xdf\xa2\xe0\xfe\x70\xa2\x6d\x52\xe1\xa2\xd0\xd9\xa6\x21\xe7\xeb\x6c\x9b\x20\x62\xca\x95\xce\x89\x98\xf4\xf7\xdd\x1e\x2a\x62\x64\x20\x27\x36\xe6\x5d\xb1\x32\x24\x7e\xea\xba\x54\xe9\x4c\x60\x7e\x14\x30\x31\x02\x06\x83\x43\x77\x25\x38\x34\x46\x05\xc5\xa8\xa0\x18\x15\xb4\x53\x51\x41\xf3\x1f\x4f\xc0\x15\x01\x46\x2e\xc2\x4c\x00\x8c\x74\x28\x40\x62\x7b\xe3\x30\xba\xaa\x81\x12\xae\x1a\xec\x26\x3b\x45\x33\x35\xab\x1d\xc4\xe8\x02\xf5\xca\x43\xee\x5f\xed\x09\xd5\x0e\xf6\x95\x55\xcb\xd1\xb9\x4b\x8a\xe0\xa0\x11\x3b\x91\xc1\x32\x77\x89\xeb\xa2\xc6\x70\x5f\x6d\x41\xa3\xbc\x0a\xfb\xd9\x9f\x13\xf7\x07\x2f\xae\x50\xab\x48\x83\x57\xf7\x05\xaf\x72\x67\x3b\x5a\xd4\x0b\xa3\x75\xf7\x55\x95\xc2\xfe\x5e\x93\x57\x59\x2d\x73\x97\x60\x16\xf2\x35\x7b\xa0\x09\x38\xd6\xc2\x18\x98\xe3\x2e\x5b\xa8\xa7\xc4\xe8\x29\x1f\xe9\x87\x0f\xf5\x93\x0f\xf6\x27\xdf\xef\x09\xfb\xb7\xf4\x6f\x9f\x8d\x50\x8d\x95\x9b\xb5\x33\xf7\xda\xe2\x9e\x18\x6c\x70\xfa\xbe\x9b\x43\x11\x03\x77\x98\xaf\x59\xc2\xc9\x4f\x2d\xf1\x27\x99\x72\x26\x9e\xe6\x77\x70\x45\xc9\x56\x86\xf2\xb6\x69\xcc\x09\x27\xc6\xc7\xd9\xb4\x90\x7f\xcf\xbb\x93\xc5\xff\x71\x38\x7a\x87\x96\x7b\x25\x01\x9f\x4e\x90\x97\x13\xc9\x4f\x79\x06\x9a\x0f\x24\xce\x9b\x56\x81\xeb\x70\x45\x93\x37\xbb\xa9\xa4\x96\xd8\x4f\x29\x65\xb2\xea\x2b\xb8\xe9\x4d\xa8\x5b\x15\xdb\x77\xd7\x1a\x55\x0b\xfc\xc3\xb9\x13\x5c\x49\x2f\xc8\xe5\x94\x96\x34\x5b\x31\x6f\xca\x36\x15\x4e\x62\x65\x6a\x96\x4b\x34\xa3\x88\x37\x72\xaf\x53\xb7\x2f\xb9\x09\x2c\xb4\x01\xbc\xd7\xa7\x76\xf0\x6a\x55\x79\x50\x76\x8f\x43\x3d\x14\xbe\x12\xec\x22\xb7\xf1\x6a\x76\x6c\x9b\x98\x3f\x04\xe3\xe4\x40\x2a\xe3\xe1\xd0\x3b\xf9\x7c\x92\x93\x5b\x3e\x96\x1a\x60\x3f\x06\x11\xe7\x16\x84\xa8\x5f\x24\x70\xd4\x0d\xf6\x11\xc1\x4c\xcb\xa6\xe6\xe2\xd2\xb1\xb2\x65\xbe\xb8\x36\xf6\x2f\xd8\xd7\xbd\x44\xde\x47\xc8\xf7\xf5\xf9\x31\x3f\x1e\x29\x98\x86\xc1\x3e\xf7\xc2\xb9\x27\xfd\x59\xe8\x98\x0a\x7f\x88\x35\xde\x9c\xa9\xa5\xd3\xf2\x2e\x11\xa3\xe3\x02\x75\xfc\xd5\x71\xce\xd4\xe6\xd8\xad\x4f\xeb\xce\xf2\x9c\xea\x2c\x8f\xb3\xfd\xb0\x40\x9f\x47\xe1\x7e\x31\xe4\xee\x86\x3b\xf9\x90\x7b\x35\xbc\xea\xc7\x12\xbb\x40\x7e\x49\xec\x40\x7a\x38\x7c\x20\x01\xd9\x25\xab\x03\xd7\x9f\x88\x1f\x4a\x19\x32\x52\x1b\xdd\xc2\xad\xb3\xdc\x67\x57\x79\xa9\x27\x43\x5b\x67\xee\xa9\xa6\x5b\x67\xae\x72\x0b\xb7\x4e\xea\xff\x0a\xb4\xce\xa3\x5e\xeb\x5c\x9e\x6f\xd0\x3c\xfb\xab\x9b\xc7\xb4\x6f\xe1\xf6\x49\xff\x60\xa0\x7d\x06\xdd\xf6\x99\x3e\x37\x7b\xee\xc9\x73\xd1\x2d\x34\x5a\xd5\x42\x82\x01\x6d\xad\x36\x0a\xb6\x41\x5c\x7b\x8d\xff\x50\xa0\x8d\x86\xdc\x36\xba\x3c\xf7\xe4\xcc\xe5\x4b\xf3\xd1\x8d\x94\xa9\x6a\x24\x59\x58\x57\x5a\xa9\xfd\x4d\xd0\x44\xc3\x4f\x84\xce\xb4\xc7\xce\x4d\x4e\x37\x3b\xd3\x1e\xa3\xaa\xb6\xb5\x46\xd1\x7a\x66\x5a\xee\xfb\x03\xed\xb3\xcf\x93\x44\x93\x4f\x4e\x3d\x16\xdd\x40\x23\xd5\xa2\x88\xa9\x23\x3d\x34\x82\xd6\xf3\xf9\x4d\x34\x7a\xfe\x24\x4c\x90\x63\xa9\x23\x9e\x66\x73\x5f\x50\x1f\xaa\x29\x7c\x33\xf4\xa2\xc3\x70\x90\x64\x53\x63\xde\x1b\xee\x62\xff\xba\x3e\x4f\x2e\x35\x4a\xf1\x6b\xc1\xc7\x54\xb8\x46\x9e\x4b\x7e\x8b\xab\x4e\x4d\xb2\xbe\xe2\x2e\xb2\xcb\x54\x79\xea\xca\xac\xe2\x96\x52\xb1\xa9\x87\x53\x0b\x15\xcb\xa2\x86\x23\x47\x83\xeb\x26\xc8\x86\x87\xa9\x65\xc4\x2b\x82\x0a\xd6\x6f\xdd\x0b\xa7\xdc\xb3\x8f\x65\xbb\x81\x9a\xa5\xa9\x74\x85\x6d\x2e\x1d\x4f\xd9\xb2\x39\x15\x21\xdf\x75\x2f\xf9\xdb\x3e\xd8\xc9\x1e\xe7\x03\x94\xdb\xa5\xc5\x35\xb7\x07\x7c\x14\x31\xcd\x4b\x99\xa7\x4e\x7a\x2f\xbb\x6f\xb2\x5c\xb6\x83\x91\xd0\xbc\xcb\x82\xb8\xb4\xd9\x40\xfd\x0c\x9c\x12\x63\xfb\x08\x1c\xe2\x63\x3b\x03\x23\x90\x8e\xf6\xc7\x29\x97\x6d\x6e\xa9\x70\x2b\xb5\x51\xbb\xf4\xe3\xf1\x73\x23\x4d\x86\x42\xa4\x2b\xab\x89\x9c\x29\x7e\x65\x92\x3f\xb3\xcb\x6f\x75\xd7\xc8\xdc\x44\xc3\xef\x93\xb7\x76\xb5\xed\x73\x67\xe1\x34\x9c\xac\x01\x31\xeb\x6a\x7c\x44\x2f\x68\x83\x6e\xd1\x06\xfd\x1f\x13\x1b\x9d\xf8\x39\x61\x7a\x3e\x01\xc7\x7d\xd3\x73\xf7\x84\x07\xb7\x38\x37\x27\x3c\x1a\x09\x8c\x2a\x9d\xe2\x6f\x5e\xe5\x0b\x8f\xf1\x3a\x7e\x1c\x2f\x45\x1e\x29\x8b\xb3\xe6\x5d\x92\x21\x08\x92\xb7\x9b\x34\x43\x90\x8c\x20\x79\x93\x41\x72\xc7\xf5\xc5\x58\x7e\xdc\x5e\xa1\x9f\x3f\x02\x87\xc8\x78\xea\x80\xb7\x0d\xb9\x3b\xb8\x4d\xf2\x9e\xb9\x25\xc0\xf1\xcf\x0e\xc3\x65\xef\x08\x87\x69\x17\xd4\x92\x6e\x14\xc7\x56\xc7\x85\xfb\x6d\xc3\xb8\xd1\xbc\x93\xd8\x94\x2e\x95\x4d\xcd\x7d\x9a\x5a\x36\xf9\xca\x10\xf9\xc7\x01\xb8\x33\x50\xe0\x35\x59\x60\xf2\x2d\x89\xe6\x62\xbe\x3c\xe6\x95\x3d\x67\x6a\x93\x5e\xd9\x6d\x0a\x00\x73\x52\x04\x80\xf1\xeb\xb7\x30\x5e\x63\xe1\x8d\x78\xff\xd6\x08\x0c\x13\x69\xe6\x6f\x7b\xc4\x98\x75\xc5\x9d\x2e\xc6\x4f\xd2\x69\x92\xf3\xcf\x55\xb8\x9d\x23\xe7\x6a\x44\x97\x04\x66\xee\xb8\xb4\xfa\x60\x7c\x18\x8c\x0f\x83\xf1\x61\x30\x3e\x0c\xc6\x87\xc1\xf8\x30\x18\x1f\x06\xe3\xc3\x60\x7c\x18\x8c\x0f\x83\xf1\x61\x30\x3e\x0c\xc6\x87\xe9\x6e\x7c\x98\xad\x87\x43\x30\x02\x0d\x46\xa0\xc1\x08\x34\x5b\x28\x02\xcd\x5b\x87\xe1\x9c\xc0\x97\x9a\x6e\x17\x4c\x26\x56\xeb\x82\x51\x47\xe0\x4b\x6a\x68\x65\x53\x37\x1c\xbb\xc4\xc3\x54\x7f\x7c\x88\xfc\xda\x00\xec\xf1\x8a\xb9\xb6\x9a\x4d\xae\x35\x07\x2b\xcf\xc9\x92\xe6\x4b\xed\x8b\x51\xbd\x9f\x3f\x30\xed\xd6\x26\x68\x3c\xac\x7a\xdd\xd6\x20\x92\x3d\x9a\x09\xef\xb9\x78\x22\x39\x41\x8e\x49\x22\x59\x3b\xc0\x24\x96\xac\xea\x0d\x4c\x8d\x87\x1c\x12\x39\x24\x72\x48\xe4\x90\xc8\x21\x91\x43\x22\x87\x44\x0e\x89\x1c\x12\x39\x24\x72\x48\xe4\x90\xc8\x21\x91\x43\x22\x87\x44\x0e\x79\x8b\x72\xc8\x97\x87\x61\x52\x06\x31\xe5\xf0\xa6\x69\x08\xc9\xef\x76\xa3\x98\xbe\x79\x98\xfc\xfe\x00\xec\x16\x3f\x5e\x5b\xcd\x26\x3f\x24\xdd\x25\x83\xd8\xd1\x70\x05\xa7\xb9\xa4\xdc\xd0\x0d\x4d\xe1\x40\xac\x2d\xd8\x71\xc4\x57\x91\xb8\x1a\x28\x17\x54\xa6\x65\xf9\xba\xdf\x60\x95\xba\x1d\x28\x3b\x93\xbe\xff\xa6\x07\xe8\xaa\x8e\x3b\xf0\x5f\x90\x52\xae\xc7\xb1\xf9\xa6\x70\x6c\xbe\x12\x4f\x28\xc7\xc8\xa8\x1b\x18\x23\x38\xf4\x5c\x3c\xc9\xeb\x81\x58\x12\xb1\x24\x62\x49\xc4\x92\x88\x25\x11\x4b\x22\x96\x44\x2c\x89\x58\x12\xb1\x24\x62\x49\xc4\x92\x88\x25\x37\x11\x4b\x8e\xc3\x01\x92\x49\x8d\x78\x58\xf2\x8e\xe0\x29\x53\xbe\x6f\xbb\x15\x4e\x98\x22\xca\x44\x94\x89\x28\x73\x0b\xa1\xcc\x7f\x1c\x84\xa1\x70\x94\x19\x3c\x13\x2e\xae\x91\x5f\x1b\x24\xff\x63\x00\x5e\xe3\x11\x4b\x71\xca\xfb\x8d\x4d\xfa\x4d\xb6\x0d\x5c\xba\xfe\x92\x2e\x7a\xe4\xd5\xe0\xff\x33\xab\xdb\xce\x79\xd3\x9a\x2c\x95\x3c\x1c\xd9\xbe\x70\x2b\xb7\x10\x89\x6c\xca\x5f\xf2\xe9\x78\x1a\x79\x88\x8c\xaf\x87\x46\xe2\x89\x6d\x44\x92\x88\x24\x11\x49\x22\x92\x44\x24\x89\x48\x12\x91\x24\x22\x49\x44\x92\x88\x24\x11\x49\x22\x92\xdc\x14\x24\x89\xf0\x0f\xe1\x1f\xc2\xbf\x6d\x0c\xff\x7e\xa8\x1f\xf6\xcb\x70\x90\x1a\x53\x65\x75\xd3\xb0\x68\x51\xe7\xe1\x80\xd9\x33\x35\x2c\x90\xfc\x6d\x1f\xf9\x5a\x1f\x3c\x10\x7a\xb3\x87\x03\xef\x29\x52\xa7\x7a\x15\x16\xdf\x97\xde\x5f\xa4\xce\x64\xd8\xa3\x92\xe0\x4d\xce\xcd\xb8\x9b\xc0\x36\xc6\x48\xae\x2b\x68\xa2\xae\xa0\x89\x66\x0a\xca\x97\xe1\x29\xc1\xe9\x2e\xc1\x2c\xe7\x74\xe7\x61\x1a\x72\x2d\x70\xba\xc0\x77\xce\xea\x76\x2c\xac\x23\x9f\x1b\x76\x53\xe6\x2f\x72\x18\x5b\x8d\x66\x23\x5c\x4d\x0b\x96\x69\x5c\x37\x17\x3d\x67\xd3\x7f\x3b\x4c\xfe\x7c\x00\x5e\xcd\x4b\xf0\xba\xea\xc3\xcd\x38\x9c\x4e\x59\xa6\x91\x37\x17\x7b\xc2\xe5\x34\xc5\x5f\x97\x63\xff\xd4\xa5\x69\x94\xf5\xec\x7d\xdc\xdb\xfd\x80\x9d\xeb\x4a\x3f\xc2\x87\x88\x24\xb8\xb2\x4d\xeb\x19\x6e\xac\x2f\x2b\x42\x5e\x84\xbc\x08\x79\x11\xf2\x22\xe4\x45\xc8\x8b\x90\x17\x21\x2f\x42\x5e\x84\xbc\x08\x79\x11\xf2\x22\xe4\xed\x32\xe4\x6d\x98\x14\x5b\xee\xef\xd0\xf3\x14\xe1\x33\xc2\x67\x84\xcf\x5d\x85\xcf\x1f\xbb\x03\x1e\xf6\x92\xd8\x9b\x1a\x0d\x4f\x57\xff\xef\xee\x20\xdf\x1d\x96\x08\x39\x32\x5d\xfd\x25\x53\xa3\xe9\x87\xeb\xf2\xd5\x9b\x1a\xed\x54\x72\xdf\x75\x9d\xda\xf6\x92\xfb\x7a\x35\x6a\x31\xbb\x6f\x32\xb4\x59\x22\xf3\xd4\x87\x34\xcb\x5c\xa5\xa3\xcd\xd2\x44\x56\xe8\xe6\x5a\xae\x51\x62\xed\xba\x56\x4c\xbd\x3b\x34\x2d\x74\x64\x82\x7a\xde\x2e\xa9\xfa\x0c\xf5\x5b\xa1\x61\xd6\x35\xa4\xd2\xdf\x1b\x96\x75\xbd\x51\x66\x7a\xde\x34\x8f\x84\xa5\xa6\xbf\xe5\x26\xd3\xf8\x7f\x0c\x34\xce\x70\x53\x29\xe9\x79\xeb\x3c\x1a\x9a\x93\xfe\x96\x6b\x9e\x89\xd0\x49\x15\x9d\x8b\x3e\x64\x52\xf1\x64\xf4\xb7\xdc\xa4\xca\xbd\x27\xd0\x30\x83\x4d\x24\xa1\xe7\x2d\xb3\x37\x24\x0b\xfd\x2d\xd7\x34\xf9\x53\x70\x82\x1c\x4f\x1d\xf5\xb6\x10\xf7\x07\x37\x1e\xb5\x8f\xd6\xef\x40\x5a\x4c\x0e\xbf\x08\x2f\x90\xe7\x93\x57\x5d\x2d\x28\xb7\xa1\xe4\xf0\x4c\x2d\x09\xc9\x0e\xff\xb6\x41\xb8\x3f\xcc\x20\xeb\xda\x5c\xc9\x1f\xef\x23\xbf\xd3\x5f\x6b\x6c\x7d\x54\x20\x36\x4b\xc2\x01\x77\xf7\x58\x63\x66\x4d\x0f\xb2\xdb\x82\x16\x4e\x79\x61\xab\x1d\x6b\x79\x01\xce\x89\x51\x7b\x1a\x4e\xf2\x51\x7b\x04\x0e\xc1\x78\x64\x9a\x50\xde\x56\x19\xd9\x56\x19\xf9\xcd\xcd\x98\xc7\xaf\x3f\x18\x3e\xbc\x77\x92\x1d\xe2\x18\x4b\x73\xc6\xcf\x3a\x0b\x67\x03\x6b\x28\xda\x36\xd1\xb6\x89\xb6\x4d\xb4\x6d\xa2\x6d\x13\x6d\x9b\x68\xdb\x44\xdb\x26\xda\x36\xd1\xb6\x89\xb6\x4d\xb4\x6d\xa2\x6d\x13\x0f\xb0\xa0\x0d\x11\x6d\x88\x68\x43\xec\x96\x0d\xf1\xc7\x87\xe1\x09\x41\xe2\xac\x45\xb5\x90\x71\xe1\x50\xe8\xe9\x95\x86\xc7\x25\x2c\xb3\x44\x17\x75\x43\xd3\x8d\xa2\x4d\x7e\x77\x88\x7c\x75\x00\xee\x65\x45\x4e\x06\x4b\xf4\x50\xde\xcd\xe6\x22\xde\x5c\x31\x4b\x34\x27\x0a\x6d\x53\xdc\x9b\x23\xfc\x81\x2b\xb5\x15\xab\x3b\x0b\x11\x78\x33\xa6\x0c\xdc\x40\x08\x1c\x3d\x9e\x1d\x9e\x27\xd3\x82\x0e\x06\xda\xbc\xfe\xbc\x84\xa4\x89\x91\xa3\x14\x99\x22\x32\x45\x64\x8a\xc8\x14\x91\x29\x22\x53\x44\xa6\x88\x4c\x11\x99\x22\x32\x45\x64\x8a\xc8\x14\x91\x29\x62\xfa\x40\xa4\x96\x48\x2d\x91\x5a\xde\x42\xd4\xf2\xdf\x0f\x43\xbe\x6d\xd4\xd2\x26\x2f\x0f\x91\xdf\x69\x88\x2b\xcb\xcd\xe3\xca\x36\x71\xca\xb1\x75\x70\x4a\x04\x94\xe1\x80\x72\x31\x9e\x3d\x9e\x21\xa7\x62\xc1\xa2\x0f\x27\x43\xa2\xb8\x34\x03\x41\x91\x4c\x22\x99\x44\x32\x89\x64\x12\xc9\x24\x92\x49\x24\x93\x48\x26\x91\x4c\x22\x99\x44\x32\x89\x64\x12\xc9\x24\x92\x49\x24\x93\x48\x26\x91\x4c\xde\x3a\x64\xf2\xcb\x83\xf0\x90\x1b\x93\x45\x90\xc7\x32\xbb\x9b\xcb\xee\x55\xb3\x54\x59\xa1\x36\xf9\xe4\x20\xf9\x89\x01\xff\xd4\xfb\xb7\x36\x07\x17\xe7\xbc\x72\x16\x78\x39\x6d\x02\x8d\x0f\xf1\x07\xe4\x41\xfa\x9a\x57\x6c\x0d\xb0\xd8\xfd\x68\xd0\x1b\x8f\xba\x52\xdb\xd2\x08\x12\x11\x24\x22\x48\x44\x90\x88\x20\x11\x41\x22\x82\x44\x04\x89\x08\x12\x11\x24\x22\x48\x44\x90\x88\x20\xb1\x27\x41\x22\x62\x3e\xc4\x7c\x88\xf9\xb6\x31\xe6\xfb\x52\x1a\xce\x0b\x07\xc4\x82\x69\x5a\x9a\x6e\x84\xba\x1e\x46\x38\x1d\x96\xa8\x6a\x7b\xc1\x9a\xc9\xbb\xd3\xe4\xdf\xf4\xc3\x5d\xc1\x72\x3c\xbf\xc3\xd7\x5b\x54\xd5\x6a\xb6\xa6\xb3\xec\xe9\xf4\x3e\x76\x65\x2a\xf0\x4c\x9d\x67\x20\xbf\xaf\x7d\x59\x00\x39\x6c\x53\xe1\xbc\x60\x6a\x67\xe0\x14\x67\x6a\x47\xe1\x30\x1c\x8c\x0c\x49\x58\xd5\x36\x6e\x64\x42\x5e\xaf\x58\xdf\xbd\x07\xc2\x71\xd8\x6d\x64\xa0\x48\x1d\xb8\xfe\x5c\x3c\x81\x9b\x20\xc7\x24\x75\x0b\xe9\x22\x89\xe1\x44\x5d\xea\xbc\xfa\x92\x7f\xb2\x2b\xa2\x43\xee\xb3\x68\xb9\xa4\x16\x68\x68\x9f\x0c\xcb\x8b\xdd\xee\x96\xdc\x0c\x5c\x80\x73\x64\x60\xd1\xd4\xd6\x92\xfc\x5f\x25\x91\x6e\xad\x5f\x72\xef\xed\x83\xef\xeb\x23\xef\xe9\x4b\x7e\x8f\x27\x8a\xff\x3e\xf1\x34\x5b\xd3\xe4\x62\x3a\xc2\x41\x75\x81\x6d\x99\x85\xec\xac\xda\xc0\x05\xa1\xde\x22\x55\x5c\xf6\xad\x65\x94\x49\x43\xd1\x0d\x81\x4e\x4c\x4b\xa9\x18\x1e\x7d\xd1\x14\xcd\x5a\xbb\x52\x31\x14\x4d\xb7\x28\x13\x18\xd4\xdb\xd2\xb3\x95\x99\xab\x3e\x92\x69\xb8\x3b\x18\xb9\x8b\x54\x96\x2a\x16\xd7\x92\xcb\x96\x59\xa0\x36\xd7\x51\xa4\x7c\x92\x8b\x5f\x46\x59\xe0\x6f\xe4\xbb\x0f\xae\x54\x4c\x28\xa3\xca\x64\xa9\x34\xc1\x35\x10\xcd\x5a\x53\xac\x8a\xc1\x76\xb7\x4c\xc2\xb8\x6a\x97\x2c\x8e\x6a\xa9\xdb\x44\xd5\x02\xe2\x34\xf7\xee\x3e\xf8\xae\x3e\xf2\xae\xbe\xe4\x77\x78\x0d\xf4\x67\x09\xae\xb4\x3e\xae\x1a\x6a\x91\x5a\x62\xfb\x25\x4c\x8a\xb6\x6d\x16\x74\xae\x51\x78\x1b\x1d\x95\xef\x14\x4d\x4b\x61\x0a\xa7\xb3\xe6\xa9\x58\x2b\xea\x0d\x56\x7f\x67\x99\xda\xd4\x95\x79\x4c\x0e\xbb\x08\x8a\x53\x98\x45\xaa\x70\x69\xcb\x37\x35\xa6\xa5\x64\xc7\x8f\xb1\x7b\x2d\xb5\xc0\x59\x59\xc9\x34\x8a\x42\xc2\xf1\x8d\x07\xd3\x11\x55\xdd\x10\x8a\x07\x57\xec\xfd\x7b\x39\x3f\x90\x24\x8f\x6d\x47\xdd\x55\xab\x68\x96\x54\xa3\x98\x31\xad\xe2\x58\xf9\x46\x71\xac\x62\xe8\x05\x53\xa3\x63\x7b\x67\xec\x39\x56\x4a\x26\xb5\x27\xf8\xad\xc1\x75\xe6\xc3\x89\xb6\x09\x87\x8b\xac\x9c\x2c\x99\x86\x1c\xec\x9c\x12\xba\xf4\x66\x48\x9a\x72\xa5\xe3\x92\x26\xfd\x83\xb7\x47\x48\x9a\xd7\x88\xed\x82\xa2\x4a\xe1\x32\x24\xfe\xbf\xeb\xb2\xe5\x49\xb8\x02\x73\x41\xd9\x92\xce\xc1\xd9\x16\xac\x28\x22\xf0\xb9\x0c\x56\x8c\x62\x26\x4e\xcc\x7c\xb9\x0f\xbe\xd4\x47\xbe\xd8\x97\xfc\x82\xd7\x40\x1f\xed\x7b\x32\xa8\x89\xe9\x6c\x43\xc0\xb5\x29\x65\x91\x2e\x09\xab\x8c\xb7\x05\xf2\xd1\xbf\xdc\x75\xf2\x2a\x06\xc4\x88\x61\x1a\xa3\x06\x2d\xaa\xbc\x21\xa4\x32\x16\x14\x37\x02\x86\x78\x5d\x20\x07\xa3\xbe\xb2\x42\x35\x26\xd0\x4a\x6b\xbe\xb9\xc4\x47\xe4\x7a\x69\x44\xea\x68\x5c\x4b\x55\x8a\x16\x5b\x2a\xcb\xd4\xd2\x4d\xcd\xd3\x35\xfd\x55\x93\x5b\xa2\xdc\xd6\xa8\xd8\xac\x92\x41\xfd\x56\x65\x4f\xba\x1f\x24\x5f\xb2\x24\xb0\x80\x5b\x44\x46\xd4\x73\x85\xaa\x46\x68\x1d\x53\x84\x57\x61\x8e\xd7\x20\x44\xf7\xcc\x7d\xa6\x0f\x5e\xe9\x23\x9f\xee\x4b\xbe\xec\x35\xf3\x0f\xf4\x4d\x07\xec\xaa\x65\xae\xac\x79\x0c\x67\xce\x32\xcb\x6a\x91\x77\xc0\x9c\x59\xd2\x0b\x6b\x55\x66\x1c\xf7\x5b\x7c\xc3\x2c\xeb\xa5\x6c\xe6\x68\x46\x99\x17\x1d\x22\x5a\xa7\xcc\xb6\x1e\x6c\xfb\xe6\xe2\x40\xaa\x98\x56\x79\x59\x35\x5c\xb3\x92\x55\xa1\x63\x4b\x6a\xc9\xc5\xe7\x29\x71\x35\xa5\x2c\xe9\x86\x5a\xd2\xdf\xe4\x72\xb8\x45\xca\x74\x5b\xbe\xd9\x19\x13\x3a\xad\xe6\xb3\x59\x51\xf8\xa0\xed\x3f\x24\x80\x75\x46\x39\xa7\xf3\xe1\x1c\xa8\xb8\x69\xd5\x7f\x99\xbf\x71\x74\x04\x2f\xe7\x93\xcd\x74\x96\x33\xa9\xdb\x45\x7d\xa6\xdd\x0f\xa9\xb2\x4f\xe5\xde\x3e\x00\x6f\x1d\x20\x6f\x1e\x48\x7e\xd3\x33\x52\x7e\xbe\xff\x69\x09\xb4\xd8\x94\x62\x3b\xb7\xa2\x6a\x2d\xaa\x45\xaa\x14\xcc\x52\x89\x72\x89\xeb\xcf\x0b\x6a\x2d\x99\xd6\x0a\x6b\x8b\xd0\x9a\x5e\xae\x79\x79\x74\x45\xf9\x78\x76\x07\x63\x59\x7c\x95\xce\x06\x4a\x41\xd7\x7c\x32\xcd\x21\x27\xb7\x13\x78\xad\xcb\xf6\x72\x12\x01\xba\x18\x33\x13\x68\x46\x17\x80\x79\xbb\x35\xcf\x34\x59\xfd\xb2\x8c\x32\x59\x60\x1b\x3b\xbe\xf6\x06\x85\xc3\xa0\xf8\x86\x41\x65\x54\x76\x7c\xf5\xc0\xb0\x4f\x28\x83\x39\xb5\x70\x83\xad\x2a\x86\xc6\xee\xe2\x36\x41\x7e\x53\x4d\xc3\x09\xea\x2c\x87\x7e\x75\x21\xee\x17\x2c\x7a\x25\x9d\x50\x06\xcf\x9b\x16\x0d\x14\xab\x14\x54\xbb\xa0\x6a\xec\xeb\x65\xfb\x08\x2b\x30\x2f\xcf\x16\xd2\xab\xae\xc0\x25\xaf\x8c\x4c\xea\x8e\x72\xed\xb8\x09\xea\x04\x1f\x4f\xc0\x15\xb6\x96\x8f\x93\x8b\x30\x03\xbb\x44\x6b\x50\x8d\x9c\x82\x13\x70\xbc\x85\x45\x64\xde\x51\x9d\x8a\x1d\x6d\xf1\x9f\x15\x0a\xc8\x39\x98\xe2\x0a\xc8\xc6\x5e\xe3\x2a\x08\x4a\xb8\x82\xb0\x9b\xec\x14\xcd\xd4\x69\x1d\x21\xf7\x1d\x7b\x22\x74\x84\x47\xca\xaa\xe5\xe8\xdc\xd2\x28\xb6\xb7\xa1\xdb\x92\xc1\xb2\xf0\xf0\xe8\x9e\xe2\x70\x5f\x6d\x41\xa3\xbc\x0a\xfb\x79\xf2\xf8\xfb\x83\x17\x57\xa8\x55\xa4\xc1\xab\xfb\x82\x57\x79\x7a\x7b\x5a\xd4\x0b\xa3\x75\xf7\x55\x95\xc2\xfe\x5e\x93\x57\x59\x2d\x73\x97\x60\x16\xf2\x35\x1b\xa2\x09\x38\xd6\xc2\x50\xe0\xb9\x25\x50\x5d\x89\x53\x57\x3e\xd2\x0f\x1f\xea\x27\x1f\xec\x4f\xbe\xdf\x93\xf9\x6f\xe9\xdf\x3e\xbb\xa2\x1a\xd3\x05\x6b\x67\x6e\x8a\xe7\xe6\x35\x36\x38\x7d\x87\x9c\xa1\x88\x81\x3b\xcc\x97\x2e\xe1\xb9\xa1\x96\xf8\x93\x4c\x47\x13\x4f\xf3\x3b\xb8\xbe\x64\x2b\x43\x79\xdb\x34\xe6\x84\x67\xca\xe3\x6c\x5a\xc8\xbf\xe7\xdd\xc9\xe2\xff\x38\x1c\xbd\x5d\xcb\xbd\x92\x80\x4f\x27\xc8\xcb\x89\xe4\xa7\x3c\xea\xf6\x81\xc4\x79\xd3\x2a\x70\x55\xae\x68\xf2\x66\x37\x95\xd4\x12\xfb\x29\xa5\x4c\x56\x7d\x05\xe7\xa9\x42\xeb\xaa\xd8\xbe\x0d\x7e\x54\x2d\xf0\x0f\xe7\x9e\x0d\x25\xbd\x20\x57\x55\x5a\xd2\x6c\xc5\xbc\x29\xdb\x54\x58\xfe\xcb\xd4\x2c\x97\x68\x46\x11\x6f\xe4\xae\x44\x6e\x5f\x72\xae\x19\xda\x00\xde\xeb\x53\x3b\x78\xb5\xaa\xdc\x62\xba\xc7\xa6\xa2\x33\xf7\xf3\x6a\x76\x7a\x3d\xc8\x8f\xc3\x01\x92\x49\x8d\x78\x6e\xcc\x77\x04\xf3\xc2\xf0\x87\x6e\x85\x74\x94\xe4\x8b\xf7\xc1\x25\xc1\x58\x85\x6a\x12\x87\x55\xcb\xa6\xa6\xe9\xb6\x55\xe1\x53\x68\xb1\xa2\x15\xa9\xe3\x65\xc4\xb3\xf9\x8a\x4e\xde\x79\x1f\xf9\x58\x3f\xbc\x46\x94\xe7\x2d\xa3\x07\x38\x65\x15\xb7\xb8\xcd\xe8\x2f\xa2\x73\xa6\x36\xed\x15\x9b\xe3\xc5\xa6\x0f\xb1\x27\x84\xba\x53\xb7\x96\x86\xdc\x2f\xf4\x89\x36\xb3\x58\x3d\x5a\x0d\xba\x24\x26\xc2\x05\x38\xc7\x27\x02\x9b\x10\x27\x22\x27\x82\x54\x55\xdd\x29\x10\x52\xfd\x26\x99\xec\xf3\xf1\xa3\xfe\x04\x39\x2e\x47\xbd\x78\xab\xeb\x0d\x59\xff\xce\x10\x28\xfb\x03\xbb\xeb\x7a\xee\xa0\x8b\x63\xd7\xd3\x79\x47\xe5\x43\x9b\xdb\x7f\xb9\x27\x80\xcd\x98\x6a\x1d\x65\x63\xfd\x84\x6a\x0a\xc2\xdb\x16\xe1\xed\x27\x12\xed\x16\x1a\x30\x2f\x20\xee\x2c\xe4\x7d\x88\xbb\xe1\x42\x37\x08\x73\x3b\x2c\xa2\x72\x3f\xb5\xa7\x4e\x44\x9d\xac\xdb\xa3\xad\x47\x56\x1d\xe6\x1a\xc5\xe6\x49\x2a\xdc\xc9\x6d\x37\x11\x89\x3b\x39\xdc\xc9\x6d\xf2\x4e\x4e\x6f\xfb\x5a\xb4\xe1\x1d\x5d\x87\x17\x8e\xfc\x59\x38\x4d\x4e\xa6\x26\xbc\xed\xd9\x43\xc1\x2d\x5d\x48\x11\xb7\xc4\x06\xef\x7d\xaa\x1b\xc5\x4b\xd5\x56\x74\x7e\x08\xc7\xa2\x45\x9d\xaf\x15\x21\xde\x34\x5c\xb8\xaa\x6c\x24\xde\xa4\x8b\xcb\xa6\x79\xa3\xea\xfc\x84\x4d\x3e\xfb\x02\x79\xff\x20\x3c\x10\x5a\x96\xbf\x1c\x37\xce\x21\xba\xe0\xbd\xe3\x69\xf1\x8e\xa9\xe0\x3b\xd2\xa7\xd9\xd3\x93\x61\x2f\x90\x8b\x73\xe3\xe7\x7b\xfc\x2c\x5d\x0e\x8f\x97\xe1\xf1\xb2\x1c\x1e\x2f\xc3\xe3\x65\x78\xbc\x0c\x8f\x97\x6d\x9b\xe3\x65\xb9\x9e\x39\x5e\xd6\xf6\x9a\xb4\x7c\xbc\x2c\x87\xc7\xcb\xf0\x78\x19\x1e\x2f\xc3\xe3\x65\x78\xbc\xac\xfb\xc7\xcb\x72\xdb\xfa\x34\x58\x0e\x4f\x83\x75\xee\x34\x58\xae\xd7\x4f\x83\xe5\xb6\xe0\x69\xb0\xfc\xdb\x12\x50\x12\xc4\x92\x42\x81\x13\xcb\xe7\xe0\x5b\xe0\x0d\x91\xc4\x32\x1c\x77\xb9\x00\xb3\x31\x43\x9a\xd5\xed\x78\x9e\xf9\x60\x38\xcf\xdc\x49\x76\x88\xac\x8c\xff\x32\x1e\x67\x5e\x25\xcf\x4a\x9c\xd9\x08\xce\x49\xc8\xd9\xb8\xca\xf5\xbc\x33\xf5\xaf\x5f\x15\x07\xea\x06\xc5\x2a\xa7\xa8\x71\x4c\xee\xac\xb8\xb1\x07\xa8\x9c\x80\x68\x37\x40\x87\x62\x8d\xc5\xea\x69\x78\xaa\x23\x83\x01\xcd\x59\x68\xf1\x6f\xd1\xe2\xff\x07\x7d\xb0\x22\x2c\xf4\x4b\xa0\xf9\x16\xfa\x37\x40\xa7\xc6\x2a\x18\xc2\x13\xbc\x08\x34\xe0\x09\xde\xc1\xf7\x45\xba\x47\x2d\x0b\x59\xad\xc2\x35\x2e\xab\x3b\x57\x05\x57\x1c\x37\x27\x6e\xa3\x52\xde\x6e\x5c\xfe\x36\x58\x0f\xca\xa6\xed\x40\xfa\x97\x46\xe3\xc4\xf1\x41\x79\xc4\x20\x70\x5e\xc3\x5c\x8a\x13\xcd\x79\xf1\x50\x23\xd1\x3c\xe5\x95\xd7\x55\x21\xdd\x99\x53\x6d\x88\xce\x11\x9d\x23\x3a\x47\x74\xbe\x7d\xd0\x39\xea\x9f\x31\xfa\x67\xef\xd8\x16\xf0\x44\x71\x57\x4e\x14\xa3\x09\x07\x4d\x38\x68\xc2\x41\x13\x0e\x9a\x70\xb6\xb5\x09\x07\x03\x4b\x60\x60\x09\x0c\x2c\xd1\xa9\xc0\x12\x68\x21\x45\x0b\xe9\x76\xb5\x90\xe6\x8b\xed\x0d\x73\x12\x6b\x4c\x4c\x87\xc3\xe3\x3b\xc9\x1d\x62\x36\xfb\x92\x76\xf3\x0d\x8b\xed\x3f\xa4\xf0\x73\xa3\x4d\x1e\x52\x18\x5b\xa9\x38\xd1\xa7\x13\xdc\x68\x9f\x7f\x35\x42\xbe\xd6\x0f\xaf\x8f\x82\xed\xc9\xb1\x90\x88\x9f\x8f\xcb\x82\x43\x19\xfb\x51\xf6\x40\x04\x61\x6f\xf4\x60\x9b\x4f\xa3\x7f\x2b\xa8\x62\x50\x3e\x0b\xcf\xf0\x41\x79\x05\xe6\xe0\xd2\xba\xad\x2a\x99\x46\x55\xde\x68\xd0\xd0\x17\xe3\x07\xe7\x53\x64\xbe\xf9\xc1\xd9\xb0\xae\xc1\xec\x3f\xc9\x6f\xee\x6e\xd0\xe5\xe3\xe1\x31\x45\x1b\xf6\xfa\x84\x7c\x66\xd3\x3b\x3e\xa7\xc1\x22\xbc\x50\x63\xee\x6e\x7b\xcf\x23\x67\x44\x3b\x77\x8b\x76\xee\x2f\x27\xba\x21\x99\x96\x84\x29\xfd\x1a\x3c\xe7\x9b\xd2\x7b\x4b\x02\xf2\xf3\xef\x9b\x26\x01\xd3\x5f\xb9\xbd\x81\x04\x7c\xd4\x8b\x75\xda\x50\xe8\x1d\x6f\x68\x4e\xee\xa2\xcc\xc3\x98\xa8\x18\x13\xf5\xd6\xb5\x60\x20\xba\x42\x74\x85\xe8\xaa\x93\x31\x51\xdb\xbb\x7f\xef\x66\x84\xd5\xe6\x63\xa2\x6e\x9a\xaa\x91\xfb\xea\x9e\x06\xaa\xc6\x89\x98\x90\xa9\x0d\x15\x90\x63\x3c\x1a\xc4\xa6\xea\x1f\x18\x90\x67\xbb\x69\x3d\x18\x90\x07\x03\xf2\x6c\x72\x40\x9e\x6f\x8d\xf6\xa5\xee\xfc\xde\xda\x5d\x72\x36\x6d\x41\x89\x8d\x0e\x94\x9f\x81\x0b\xe4\x5c\x6a\xca\x8b\xbd\x33\x14\x8c\xde\xd3\xe8\x3d\xf5\x61\x7c\xda\xce\xaf\xbf\x3e\x0c\x73\xcd\xf1\x6b\x91\xae\xbe\x29\x8a\xfd\xfd\xc3\xe4\xe5\x1d\x0d\x56\xd9\x5f\x48\x48\xe3\x48\x20\x91\xbd\xe1\x6e\x5b\xdc\x58\x3b\x8d\x1a\xa6\x2d\xc9\xed\x47\x7c\xef\x1b\xbe\x75\x92\xbe\x1a\xba\x43\x57\x7c\xf1\x38\x58\xe5\x30\x17\x28\x3b\x93\x3e\x76\x73\xb3\x97\xfb\x5b\x28\x67\x7e\xf4\x24\xba\x29\x42\x6c\x35\x37\xbf\x43\x92\xea\x6f\x74\xae\x63\x56\x7d\x0c\x7b\x84\x59\xf5\xf1\xec\x06\x9e\xdd\xc0\xb3\x1b\xdb\xe8\xec\x06\x66\xd5\xc7\xac\xfa\xe8\x33\x8f\x3e\xf3\xe8\x33\x8f\x3e\xf3\x3d\xe1\x33\xdf\xcb\x18\x05\x13\xf4\xa3\xc3\x31\x26\xe8\xdf\x3a\x21\x99\xc8\x07\xd2\xf0\x84\xc4\x9e\x15\xc7\xb4\x0b\x6a\x49\x37\x8a\x3e\xe5\x8c\x48\x21\xc5\x29\x98\x69\x38\x6a\xa9\x6c\x6a\xee\x83\xd4\xf2\xb8\xe7\x6f\x0e\x93\xf7\xee\x80\xd7\x04\x8a\xbc\xb6\x9a\x4d\x7e\xa2\x19\xda\xf9\x98\x57\xf4\x9c\xa9\x4d\x7a\x45\xf7\x04\xe8\x3c\x28\x40\xa7\xff\x55\x0b\x81\x2c\x23\x11\x15\xef\x7d\xc6\xd9\x76\x94\xb9\x81\x34\x02\x92\x71\x2e\xc6\x33\xce\x33\xe4\x94\x60\x96\x11\xed\x0e\x61\x04\xd4\xef\x39\xa4\x99\x48\x33\x91\x66\x22\xcd\x44\x9a\x89\x34\x13\x69\x26\xd2\x4c\xa4\x99\x48\x33\x91\x66\x22\xcd\x44\x9a\xd9\x6d\x9a\x39\x0d\x39\x72\x36\x75\xda\xa3\x99\x7b\x83\x34\x33\x62\x73\x77\x2b\xa4\x75\x43\x54\x8a\xa8\x14\x51\xe9\x16\x42\xa5\x6f\xef\x87\x94\x40\xa5\x05\xd3\xb4\x34\xdd\xa8\x71\x0c\x15\xd9\x17\x09\x3f\x7a\x07\x77\x05\xef\xf1\xa2\x03\xdf\x53\xa4\x4e\xb5\x6e\x20\xbe\x26\x9d\x2a\x52\x67\x2a\xf0\x84\x8c\xfa\x3b\x39\x37\xe3\xee\x48\xdb\x98\xac\xb8\xae\xa0\x89\xba\x82\x26\x9a\x29\x28\x5f\x86\xa7\x04\x3b\xbc\x04\xb3\x9c\x1d\x9e\x87\x69\xc8\xb5\xc0\x0e\x03\xdf\xd9\x4c\xdc\x7e\xf2\xd1\xab\xb0\x97\x75\xc5\xd8\x6a\x36\x0a\x51\x97\x4d\xcd\x26\xef\xb8\x4a\xfe\xe0\x51\xd8\x59\x30\x2d\x7a\x6d\x35\x9b\x7c\xb8\x71\x52\xcb\x39\x53\x4b\xdf\xc3\x6e\x99\x32\x2d\xba\x50\x9d\x40\xba\xc7\x31\x2e\xa6\xa4\x44\x9a\x89\x29\x29\x91\x66\x22\xcd\x44\x9a\xb9\x8d\x68\x66\x0f\x85\x8d\xee\x19\x9a\x89\xf1\x8c\x91\x66\x22\xcd\x44\x9a\x89\x34\x13\x53\x52\x62\xc0\xd5\x5b\x07\xea\xf5\x7c\xc0\xd5\x2d\x99\x92\x72\x21\xfa\x88\xef\x09\xc1\xb6\x0e\xc1\x38\x67\x5b\x23\x90\x86\xa1\xc8\x23\xfb\x05\xd3\xa2\x3c\x58\x87\xa9\xcd\xea\xb6\xd3\x6c\x82\xc9\x73\xf1\x8e\x6e\x29\xa2\x48\xf7\xb5\x54\x4a\x1e\xd3\x9d\x33\xb5\x2a\xf7\xb6\xd4\x4f\xed\xf2\x21\xd7\x1e\x2f\x21\xe4\x9c\xa9\xa5\xef\x15\xff\xd7\x49\xa2\x25\x00\xd4\x04\x1c\x83\x23\x35\xf1\x4e\xf6\xc1\x23\xcd\x34\x18\xc6\x36\xc1\xa8\x95\x2d\x46\xad\xfc\xf5\x04\x1c\x15\xd3\xf4\x00\x64\xf8\x34\x1d\x82\x26\x47\x1d\x9c\x14\xb1\x28\x0f\xc3\x41\x3f\x16\x65\xf3\x4f\x9f\x12\xd1\xa4\x8e\xc0\xa1\x40\x34\xa9\xe6\x1f\x6f\x3d\x33\x2d\xcf\x44\xd8\x26\xc1\x91\xfe\xc9\x11\x5f\x70\xbc\x3e\x34\x75\x21\x13\x22\x7b\xc5\x15\x21\x44\xfc\x4c\x84\x9d\x14\x27\x98\x78\x10\x01\x29\x02\x52\x04\xa4\x08\x48\x31\xf1\x20\x26\x1e\xc4\xb0\xbd\x98\x78\x10\x41\x3d\x82\x7a\x04\xf5\x08\xea\x11\xd4\xb7\x05\xd4\x63\xf4\x76\x8c\xde\x8e\xd1\xdb\x31\xf1\x20\xda\xc1\xd0\x0e\xb6\xc5\x13\x0f\xb6\x87\x05\xaf\x27\x7f\xe1\x16\x3c\x6e\x43\xfe\xe8\x5e\x38\x2d\x23\x9d\x94\xcb\x76\x03\xcf\x71\x8d\x96\x4b\xe6\x1a\x53\xb5\xdd\x78\x26\x63\x36\xef\x08\xf2\xbe\x7b\xc9\x37\xfa\x60\x27\x7b\xfe\xda\x6a\x36\x39\xc8\x73\x10\x8a\x6b\xee\x38\xf2\x55\xec\x69\xaf\x98\xf4\x23\x3c\xf7\x60\xb9\x6c\x07\x4d\x70\xfe\x75\xd1\xcd\x6d\x4e\x34\xf8\x06\x38\x2d\x06\xe1\x51\x38\xcc\x07\xe1\x18\x8c\xc2\xfe\xe8\x90\xe3\xe5\xb2\x2d\x78\xb9\x5b\xab\x8d\x66\x11\xbc\x14\x3f\x2a\xf7\x93\x61\x37\x32\x47\xb9\x6c\xcb\x71\x19\xa8\x41\x55\x6e\xc0\x4f\xee\xf2\x5b\x3e\xed\xa6\x02\x6c\xa2\xf1\x07\xdd\x14\x80\x5d\x6d\xff\x1c\x0f\xa3\x52\x63\x0f\x5d\x5f\x07\x20\xbb\x43\xb3\x68\x8b\x66\xd1\xf7\x26\x60\x4a\x58\x37\x4f\xc2\x84\x6f\xdd\x6c\x9b\x08\xd8\xa8\x6c\x69\x32\x0d\x5f\x73\x22\xa4\xa1\xd8\x08\xca\x97\xdc\xff\x7e\x95\x2f\x42\x0e\xd6\x25\xb8\x69\x42\x96\x3c\x2a\x12\xdb\x74\x4d\x92\x60\x16\x9b\xed\x26\xd2\x30\x8b\x0d\x66\xb1\xd9\xe4\x2c\x36\x9d\xd7\x1c\xe3\x12\xc5\xb4\x5b\x77\xcc\x1f\x85\xc3\xe4\x60\x2a\xeb\x6d\x57\x5e\x17\x8c\x31\xe0\x3f\x74\x2b\x84\x15\x20\x6f\xee\x17\x67\x63\xed\xb1\xa5\x92\x79\x93\x4d\x52\xcb\x2c\x65\xbc\xd3\x85\xee\x79\x65\xf2\x3b\x7d\xe4\xf3\x7d\x70\x57\xe0\xa6\x49\xf7\x9e\x64\xb2\xc8\x31\xd8\x92\x69\xad\x78\xbb\x64\x55\xec\x20\xd3\x0f\x15\xa9\x73\x3e\xe4\xa1\xc9\xb9\x99\x0b\xec\x86\x1e\x3c\xa7\xac\xc3\x25\x31\xa2\x2f\xc0\x39\x3e\xa2\xcf\xc0\x29\x38\xd1\xda\x39\x65\xfe\x8d\xb1\x07\x94\x3f\xd3\x07\x44\x74\x82\x61\x6a\xd4\x6b\xf3\x9f\xe9\x23\x1f\xeb\x83\x01\xf6\x5b\xc3\x36\x7e\x6d\x91\x3a\x97\x4c\x8d\x62\x9b\x06\xda\xf4\x0b\xc3\x90\x15\x6d\x6a\x17\x96\xa9\x56\x29\xe9\x46\x31\x90\x96\xa9\x6c\xe9\xa6\xa5\x3b\x6b\x85\x92\x6a\xdb\xd4\x0b\x45\xfa\x9f\x87\xc9\xdb\xfb\xe1\xd5\xfe\x23\x4c\xf9\x53\xf8\xce\xbd\x5a\xcd\x9b\x93\xcf\x4f\xb1\xe7\xd3\x0f\xb0\x3b\xe6\xbd\x87\x16\xb2\x55\x97\xdb\xbc\x57\x5f\x84\x0b\xa2\x2d\xcf\xc2\x69\xde\x96\xc7\xe0\x08\x1c\x8a\x94\xb8\x81\xcf\x67\x8a\x55\xb0\x62\x1b\xdd\xb4\x3f\x1f\x2f\x78\x4f\x90\xe3\x52\xf0\xd6\x75\x83\x4b\x96\xaa\x6a\x54\xb5\x89\xff\xc3\x5d\xb5\x5d\xb1\x37\x3c\xab\x7f\x75\x6f\x28\xf2\xa6\x2e\x75\x48\x2e\x0f\x8f\xc1\xf9\x1a\xb5\xb7\xc5\x1e\x41\x95\x17\x77\xf1\x2d\xee\xe2\x7f\x34\x11\xbd\x01\x6f\x9b\xc0\x98\x15\x9c\xe0\x1c\x4c\xf9\x9c\xa0\xe5\xd2\x9a\xdc\xd7\x77\x58\xca\xa4\xbf\xf7\xf6\x5a\x29\xf3\x3a\x2f\x73\x7e\xb5\x60\x79\x48\xfc\xde\x2d\xb9\x82\x09\xf1\x31\x21\xfe\xad\xeb\x59\x87\x2e\x15\xe8\x52\x81\x2e\x15\x9d\x4c\x88\xdf\xcd\x14\xf6\xed\x35\x62\x37\x9f\x10\xbf\xc3\xca\x41\xee\x6d\x7b\x6a\x95\x83\x91\x98\x5c\xf7\xd5\x2a\xc3\x83\x9c\x51\x75\x41\x63\x40\xf8\xbf\xdd\xf4\x14\x84\xff\x08\xff\x37\x19\xfe\x77\x11\x45\x75\x58\xd0\xc7\xa7\xa3\x9f\x80\x63\xe4\x48\xea\x90\x07\xf7\xef\x0d\x5a\x05\xaa\x0a\xee\x42\xfe\xf9\xef\x1e\x86\x13\x92\x6e\x3a\xa6\xa5\x16\x69\x6d\x60\x51\x91\x90\x69\xd5\x2c\x55\x56\xa8\xea\x38\x6a\x61\x39\xe8\xa2\x44\x3e\x37\x44\xde\xb1\x03\x5e\x2b\x1f\xf6\x22\x8e\x7e\xac\x99\x9c\x4b\x0b\xbc\xd0\x49\xaf\xd0\x9e\x48\xb6\xb4\x97\xbf\x6e\x5e\x7c\x8f\x8c\x87\x5a\x5b\xd1\x1e\x8f\xca\xd9\x9b\xc9\x95\x0a\xf1\xf3\xee\x2c\x39\x2d\xa6\x57\x6d\x83\x07\x67\x18\xef\x11\xd7\xc1\xa2\x7a\xd0\x62\x76\x25\x8c\x47\x8a\xd9\x95\xf0\xb8\x3d\x1e\xb7\xc7\xe3\xf6\xdb\xe9\xb8\x3d\x66\x57\xc2\xec\x4a\x78\xcc\x19\x8f\x39\xe3\x31\x67\x3c\xe6\xdc\x13\xc7\x9c\xf3\xa7\xe0\x04\x39\x9e\x3a\xea\x31\x8e\xfb\x83\x8c\xa3\x76\x77\x87\xf9\xe1\x31\xe9\xd1\x96\x3d\x17\x8a\x49\x8f\x3a\x91\xf4\xe8\xcf\xf7\x09\x47\xd6\xb1\xd5\xec\x98\xd8\x05\xae\xa8\x65\x9b\x7c\x6e\x1f\x79\xb9\xdf\x0f\x1d\x38\xd8\x38\xb1\xce\x14\x7f\xf0\x71\xb5\x9c\xde\xeb\xa7\xd7\xf1\x7e\x3c\x6f\x5a\x93\xa5\x92\x77\x9c\xa6\x7d\x16\xb4\x0e\x41\xbd\xab\x30\x29\xa0\xde\x04\x1c\xe3\x50\x6f\x1c\x0e\x40\x26\x36\xea\xa3\xf7\xbd\xcd\x64\x38\x8a\x0d\x1c\x7b\x31\x1e\xe2\x0d\x91\x7d\xb5\x67\x7e\xbd\x3a\x54\xbb\xc7\x23\xac\x43\x58\x87\xb0\x0e\x61\x1d\xc2\x3a\x84\x75\x08\xeb\x10\xd6\x21\xac\x43\x58\x87\xb0\x0e\x61\x1d\xc2\xba\xee\xc2\x3a\xa4\x6d\x48\xdb\x90\xb6\x6d\x63\xda\xf6\x47\x69\x18\x77\x63\x94\xe9\x16\x2d\xea\xdc\x77\xbb\x2a\xcb\xf8\x98\xdc\xea\xeb\x05\xff\x8c\xf3\x87\xd3\xe4\x3f\xf7\x03\xa9\x79\xe6\xda\x6a\x36\xf9\x40\xc8\x41\xe7\xc9\xb9\x99\x79\x51\x40\xfa\x21\x11\x98\xac\xea\xb1\x85\xac\x7f\x43\x9b\xcf\x39\xaf\xc1\xf3\x82\x9c\x3d\x0d\x4f\x71\x72\x76\x19\x1e\x87\x8b\xa1\xe4\x8c\x95\x31\xaa\x16\x8b\x6c\x2c\x38\xa6\xe5\x7b\xc4\xd5\x36\x8c\x38\x41\x2e\xeb\xbb\xd1\xe3\xcf\x2f\xc4\x53\xb5\x53\xe4\x84\x17\x77\x22\xac\x8b\xaa\xc8\x9a\xa0\x6e\x81\xfa\x25\x3f\xbc\x3b\xb4\x9f\x94\xf0\x53\xd0\x81\xae\x4a\x79\x61\xcc\xba\xd4\x5b\x39\x15\xae\xc1\x73\x35\xee\xff\xed\xed\x2e\x3c\x11\x80\x67\xa3\x5b\x3c\x1b\xfd\xc5\x44\xc7\x65\x49\x41\x9c\x8c\xbe\x0a\xcf\xfa\x27\xa3\x7b\x48\x60\xf1\x93\xd4\x1d\x17\x58\xe9\x1f\xbc\x3d\x54\x60\xdd\xe5\x1e\xa8\x36\x82\x42\xea\x61\xf1\x6b\xf7\x64\x14\x1e\xa8\xc6\x03\xd5\x78\xa0\x1a\x0f\x54\xe3\x81\x6a\x3c\x50\xdd\xca\x81\xea\xf6\x06\xea\xee\xe6\xf1\xec\xe6\x0f\x54\x37\xa7\x23\x34\xd4\x02\x1a\x2b\x10\xb9\xff\xb4\x27\x54\x47\x18\x8e\x39\x57\x1d\x50\x1c\x14\x19\x58\xb5\x0b\x7a\x03\x1e\xab\xde\x6e\xda\x0a\x1e\xab\xc6\x63\xd5\x9b\x7c\xac\x7a\xf3\xc9\x57\xc7\x77\x8a\xf1\xe7\xad\x1b\x46\x61\xf5\x4b\xea\xc2\x61\xeb\xef\x19\x74\x63\xa4\x16\x4c\xd3\xd2\x74\xa3\x16\xb2\x72\xbd\xda\x26\x7f\xb9\x8f\x7c\xa9\x1f\x5e\x1b\xbc\x89\xc7\x2c\x6c\xec\xee\x38\xcb\x1e\x4e\x0f\x0a\x57\x47\xff\xc9\x85\x2c\xbf\xb0\xd5\xdc\x1d\x5f\x88\x8e\x10\x73\x4e\x8c\xe9\xd3\x70\x92\x8f\xe9\x23\x70\x08\xc6\x1b\xf8\x41\x06\x5a\x7a\x35\x9b\xe1\x8d\xb1\x9e\x5c\xf9\xcf\xc4\x8f\xe0\xc3\xe4\xa0\x1c\xc1\x21\xfd\x2a\x87\x2c\x7f\x2f\xfa\x3f\xa2\xff\x23\xfa\x3f\xa2\xff\x23\xfa\x3f\xa2\xff\x23\xfa\x3f\xa2\xff\x23\xfa\x3f\xa2\xff\x23\xfa\x3f\xa2\xff\x23\xfa\x3f\xa2\xff\x23\xfa\x3f\xa2\xff\x23\xfa\x3f\x6e\x8a\xff\xe3\x77\x0e\x41\xa6\x11\x97\x0b\x46\x42\x94\x88\xee\xf7\x06\xc9\x67\x07\xe0\xae\x1a\x44\x27\x82\x1f\xbe\x51\xae\x6d\x86\xa6\xaf\xea\x5a\x45\x2d\x55\x85\x41\xf4\xf4\x48\x4e\x84\xda\x12\xf3\x30\x93\xce\xf2\x07\xaa\xb1\x1f\xaf\x8c\x87\xbb\xb6\x1a\xff\xdb\x8c\x18\x86\x7b\xc3\x51\xe0\x1e\x02\xbc\x62\x82\x07\x3e\x17\xcf\x03\x27\xc8\xb1\xa8\x80\x85\x71\x80\x10\xa1\x20\x42\x41\x84\x82\x08\x05\x11\x0a\x22\x14\x44\x28\x88\x50\x10\xa1\x20\x42\x41\x84\x82\x08\x05\x11\x0a\x22\x14\x44\x28\x88\x50\x10\xa1\x60\xb7\xa0\xe0\xe7\x9f\x87\xf3\x02\x0a\x5a\x8b\x6a\x21\xe3\xd2\xa2\x5a\x97\x3d\x2f\xbb\xba\x3c\x16\xcd\xff\x7e\x69\xcc\x32\x4b\x74\x51\x37\x34\xdd\x28\xda\xe4\x5f\x3f\x4f\x7e\x7c\x1f\xdc\xc5\xca\x99\x0c\x16\xc3\xbd\xd5\x1b\x3b\xf5\x5d\x31\x4b\x34\x27\xca\x49\x8f\xb0\x5b\xaf\xd4\x16\xb2\x90\xf5\xa0\x9e\x16\xb8\xbb\xc7\xf9\x5e\x0e\x39\x17\x72\xae\x1c\x72\x2e\xe4\x5c\xc8\xb9\x90\x73\x6d\x1b\xce\x95\xeb\x19\xce\xd5\xf6\x9a\xb4\xcc\xb9\x72\xc8\xb9\x90\x73\x21\xe7\x42\xce\x85\x9c\xab\xfb\x9c\x2b\xb7\xad\xb1\x54\x0e\xb1\x54\xe7\xb0\x54\xae\xd7\xb1\x54\x6e\x0b\x62\xa9\xfc\xf3\x30\x25\xfc\xa2\x4e\xc2\x04\xf7\x8b\x3a\x04\xe3\x70\x20\xf2\xf8\x23\x67\x57\xab\xd9\x4c\x80\x0c\xb5\x25\x11\x84\x16\xef\x05\x35\x49\xce\x04\x8f\x3b\x4a\x07\xa8\x48\x98\x26\xdd\xa0\x02\x15\x85\xd4\xf7\xef\x8e\xa0\x66\x77\x89\x75\x49\x51\xab\x00\x59\x46\xfc\xda\x75\x44\x26\x88\x56\x0e\xce\xc2\xe9\x9a\x00\x0b\x19\x18\x59\x4f\xd7\x60\x50\x05\x8c\x4c\xd7\x62\x64\xba\x3f\x49\xc0\x19\x21\x18\x8e\xc1\x11\x2e\x18\x0e\xc0\x3a\x47\x1f\x4c\x8b\xd0\x73\xa7\xe0\x84\x1f\x7a\x6e\xfd\xa5\x9c\x13\xe1\x68\x4e\xc3\xc9\x40\x38\x9a\xf5\x17\xd3\xba\x84\x2a\x9b\xcd\x4b\xa8\x16\xa4\x52\x50\xa8\xa5\xff\xcf\x48\x84\x84\x72\xc3\x0f\x05\x62\x39\x99\x4b\x55\xd2\xea\x98\xb8\x23\x44\x5a\x4d\x79\xcf\x74\x43\x6e\x75\x26\x98\x1d\xa2\x5d\x44\xbb\x88\x76\x11\xed\x6e\x1f\xb4\x8b\xba\x5b\x8c\xee\xd6\x3b\xec\x1b\x03\x89\x76\x25\x90\x28\x9a\x18\xd0\xc4\x80\x26\x06\x34\x31\xa0\x89\x61\x5b\x9b\x18\x30\x9e\x34\xc6\x93\xc6\x78\xd2\x9d\x8a\x27\x8d\x16\x3c\xb4\xe0\x6d\x57\x0b\x5e\xbe\xd8\xe6\x50\xea\x71\xd0\xb9\x2b\x50\xf9\x7a\x3a\x1c\x6d\xdf\x49\xee\x10\x32\xc3\x97\xe7\x90\x7f\x01\x9e\x27\x57\x53\xcf\x7a\xd1\x6d\x27\xe4\xb2\x2e\x2c\x20\x86\xa6\xb0\x0a\x28\x76\xc1\x2c\xd3\x11\xc5\xae\x14\x96\x99\x6a\xc3\xfb\x8e\xaa\x2b\x42\xca\x96\x2d\x93\x2f\x91\xa9\xdd\x9e\x03\x7b\x47\xc3\xe0\x7e\x72\x08\x86\x85\x67\xbd\xed\x98\x96\x5a\xa4\x01\x77\xfa\x82\xad\x6b\x96\xce\x5a\xc3\xcd\x32\xf6\x9d\x43\xe4\x2f\xfa\x00\xe4\xad\xd7\x56\xb3\xc9\xfb\x43\xb2\x8b\x4d\xcd\xcf\x4c\xf3\xe7\xd2\xaf\x63\x57\xe7\xc5\xdd\x0b\x59\xef\xf7\x36\xe7\x14\xfb\x96\xe8\xf0\xb4\x67\xc5\x88\x3c\x0e\x47\xf9\x88\xcc\xc2\x18\x8c\x46\x5a\x3e\xdc\x16\x58\xcd\x66\xbc\xaa\x36\x99\x35\xec\xe9\xf8\xb1\x78\x88\x8c\xcb\xb1\x58\xdd\xd2\x6e\x5e\x7e\xf7\x8d\x55\xc3\x2f\xf9\xdf\x76\x55\x35\xf7\x43\xe1\x49\xc2\xfc\x16\xbf\x57\xde\xd0\xe9\x46\xcf\x4d\xc1\x24\x9c\xa9\xb1\xb0\xae\xb7\x71\x11\xd3\xa1\x89\xb5\x45\x13\xeb\xfb\x12\x1b\x9f\xdb\xc2\x3a\x9a\xe5\xd6\x51\xcf\xc6\xda\x42\x31\x1b\x4c\xe0\xd5\x29\xd9\x91\xfe\xcb\xd7\x56\xc9\x0e\xe2\xe6\xeb\x0a\x88\x8b\xd7\x8b\xdf\x3a\x2e\x2d\x30\x49\x17\x26\xe9\xba\x75\xd9\x3a\x42\x15\x84\x2a\x08\x55\x3a\x96\xa4\xeb\x87\xda\xb1\xd6\x9f\x17\x9e\x50\x67\xe0\x54\xc0\x13\xaa\x03\x8b\x7d\x7c\x32\xae\x4e\xad\xf7\xb9\x3f\x7f\x55\xd5\x7a\x3f\x14\x93\x7b\xcb\xd7\x02\xee\xe1\xd9\x59\x3a\xa9\x04\x60\xc6\xad\xed\xa6\x7a\x60\xc6\x2d\xcc\xb8\xb5\xc9\x19\xb7\xbe\xa5\x0d\xcb\x46\x9c\xb8\x8f\xcb\x78\xd5\x31\x69\x9f\x3f\x02\x87\xc8\x78\xea\x80\x07\x1b\xef\x0e\xa6\xd2\xf2\x9e\xe9\x42\x26\xad\x5f\xdb\x07\x7b\xc6\xd4\xb2\x3e\xb6\x9a\x1d\x2b\x9b\x9a\x4d\x7e\x76\x1f\xf9\xd1\x7e\xd8\x59\x30\x2d\xbe\x0e\x3d\xdc\x38\xaa\xc6\x9c\xa9\xa5\x1f\x10\x89\xb2\x2c\xba\x90\x9d\x33\xb5\xad\x16\x1e\x77\x01\x4e\x88\x71\x76\x08\xc6\xf9\x38\x1b\x81\x34\x0c\x35\xc8\x82\x65\xf1\x41\x36\x67\x6a\x4d\x1d\xff\x38\x17\x3f\x82\x52\x44\x91\x23\x28\x95\x92\xa3\x66\xce\xd4\xaa\x41\x76\xcc\x29\x12\x0c\x76\x8b\x41\x40\x30\xd8\x2d\x7a\x8a\xa3\xa7\x38\x7a\x8a\x6f\x23\x4f\x71\x0c\x76\x8b\xc1\x6e\xd1\x43\x17\x3d\x74\xd1\x43\x17\x3d\x74\x7b\xc2\x43\x17\x83\xdd\x62\xb0\xdb\xad\xe2\x93\x88\xc1\x6e\x3b\x11\xec\xf6\xd3\xc3\x70\x4a\xb8\xe4\xa9\xe5\xb2\x3d\xb6\xea\x66\xbb\x8a\x8a\x6e\x4b\x39\xb3\x62\x13\xd8\x75\xd3\x7b\xc7\x30\xf9\x83\x01\xd8\xc9\x1e\xbf\xb6\x9a\x4d\x7e\x34\x21\x9b\x3f\x90\xf9\xca\x70\x05\x9a\x17\xdc\x56\x14\x33\x4f\x9d\xb6\xa4\xc1\x1a\xf1\xf5\x17\xae\xa3\xc9\xd5\x8e\xa9\x40\xbe\x62\x36\x58\xa5\x0b\x07\xca\xce\xa4\x1f\xe2\xaf\x9b\x2c\x97\xed\xaa\x80\x21\x5e\x25\x7b\x9d\x09\x6e\x46\xca\xac\x68\x16\x7d\x53\xb0\xe8\x4b\xf1\x24\x71\x3f\x19\x96\x24\x91\x8d\x1e\xd7\x39\xd6\x6b\x76\x4c\x9a\x8f\xc8\x10\x91\x21\x22\x43\x44\x86\x88\x0c\x11\x19\x22\x32\x44\x64\x88\xc8\x10\x91\x21\x22\x43\x44\x86\x9b\x89\x0c\x8f\xc2\x61\x72\x30\x95\xf5\x5c\x81\x5e\x17\x74\x05\xf2\x37\x6f\xf5\xbe\x40\x5b\xef\xc0\x22\xd2\x4c\xa4\x99\x48\x33\xb7\x10\xcd\xfc\xd9\x34\x2c\x48\x9a\x59\x71\x4c\xbb\xa0\x96\x74\xa3\x38\xb6\x3a\x1e\xcc\xe3\x1f\x41\x36\x39\xde\x32\x0d\x47\x2d\x95\x4d\xcd\x7d\x3a\x70\x1a\xf9\x4b\xc3\xe4\x47\x76\xc0\x9d\x81\x72\xaf\xc9\x72\x93\x9f\x68\x06\x79\x3e\xe6\x95\x3f\x67\x6a\x93\x5e\xf9\x3d\xc1\x3f\x8f\x09\xfe\xe9\x7f\xda\x82\xf8\x32\x9f\x85\x46\xd4\x1e\xc1\x68\x0b\x60\xb4\x18\x0f\x46\xa7\x49\xce\x05\xa3\x7e\xa7\x48\x3e\x1a\xd1\x15\x01\x58\x2a\x3a\x0f\x89\x29\x12\x53\x24\xa6\x48\x4c\x91\x98\x22\x31\x45\x62\x8a\xc4\x14\x89\x29\x12\x53\x24\xa6\x48\x4c\x91\x98\x76\x9b\x98\x4e\x43\x8e\x9c\x4d\x9d\xf6\xc0\xe7\xde\x20\x31\x8d\xd8\xce\x21\x3e\x45\x7c\x8a\xf8\x14\xf1\x69\x57\xf1\xe9\x97\xfa\xe0\x6e\x81\x4f\x0d\x53\x0b\x06\x67\x24\xbf\xd4\x47\xfe\xef\x3e\xd8\xc9\x7e\xbe\xb6\x9a\x4d\xde\x53\xa4\x4e\xb5\x06\x20\xea\x9c\xbe\xbb\x48\x9d\x4b\xa6\x46\x17\xb2\x93\x73\x33\xee\x56\xb3\x7d\x67\xaa\x27\xea\x0a\x9a\xa8\x2b\x68\xa2\x99\x82\xf2\x65\x78\x4a\x90\xc1\x4b\x30\xcb\xc9\xe0\x79\x98\x86\x5c\x0b\x64\x30\xf0\x9d\xcd\x1c\xb0\x26\xff\x63\x10\xc6\x44\x1b\x47\x46\x09\x65\x2d\x6e\x99\x25\xba\x28\x42\x84\xda\xe4\x3b\x06\xc9\xb7\x0d\x44\xa4\x9b\x1a\x6e\x7c\xe0\x3d\x98\x77\x6a\x9c\xdd\x1a\x92\x75\x2a\x70\xcf\x56\x3b\x0d\xdf\xa5\xa4\x88\xdd\x89\xfe\x8a\x87\xe6\x91\xe7\x22\xcf\x45\x9e\x8b\x3c\x17\x79\x2e\xf2\x5c\xe4\xb9\xc8\x73\x91\xe7\x22\xcf\x45\x9e\x8b\x3c\xb7\xb7\x78\x2e\x72\x52\xe4\xa4\xc8\x49\xb7\x31\x27\xfd\xc6\x20\x8c\x44\xe5\xb1\x11\x4e\xa6\xf2\xf7\x42\x49\xb5\x6d\x6a\x93\x5f\x1c\x24\x3f\x33\x50\x15\x2f\xf9\x45\xb9\x9e\x19\x9a\xbe\xaa\x6b\x15\xb5\x54\xe5\x3d\xea\xe9\x8e\x32\x4c\xf2\x14\x2b\xa7\x2d\xae\xa2\x99\xf4\x83\x37\xab\xe2\x2f\x07\xdf\x30\xab\xdb\x78\xd6\x3d\x04\xff\xed\x0d\x07\x73\x7b\x08\xf0\x8a\x71\x3a\x77\xfd\xd9\x78\x46\x78\x94\x1c\x6e\x14\x7b\x35\xd8\x15\x78\xf6\x1d\xc9\x1f\x92\x3f\x24\x7f\x48\xfe\x90\xfc\x21\xf9\x43\xf2\x87\xe4\x0f\xc9\x1f\x92\x3f\x24\x7f\x48\xfe\x90\xfc\x21\xf9\x43\xf2\x87\xe4\x6f\x53\xc8\xdf\x1f\x8e\xc2\xac\x3c\x60\xae\x31\x55\x56\x37\x0d\x8b\x16\x75\x9e\x29\xac\xda\x81\x8f\xef\x16\x55\xa6\xa3\xdc\xa4\x8b\xcb\xa6\x79\xa3\x6a\xd7\xe7\x27\xb9\x1e\x25\x7f\xdf\x0f\xaf\x0f\x2d\xed\xda\x6a\x36\x99\x0d\x49\x79\xbd\xe0\x15\xfd\xb4\x28\x7a\x2a\x58\x74\xfa\x38\x7b\x64\x32\xac\xc0\x85\x6c\xe3\x47\xdb\x9c\x2a\xfb\xff\x07\x05\x81\xea\xae\xc2\xb3\x1c\xd5\xf1\xc4\xac\x91\xce\x79\xe1\x2d\xba\x9a\xcd\x34\xae\xf4\x06\x32\xe2\xf2\x6c\xda\x6f\x8a\xe7\x76\x4f\x93\xa7\x04\xa1\x8b\xa9\x49\x90\xd9\xb9\xe7\xb7\x1b\x0c\x13\x48\xbe\x13\x1a\x74\xfd\xa1\xf0\xf4\xdb\x31\xbd\x7f\x52\x3e\xd5\x03\x03\x20\xb7\x04\x1a\x2c\xd6\xe4\xc1\xeb\xc0\x08\xc0\xfc\x78\x98\xd9\xbb\xc5\xcc\xde\x5f\x49\x74\x47\x46\x2d\x8b\xe4\xdf\x2a\x5c\xf3\x93\x7f\x6f\x8e\x34\x6c\x4e\xdc\x35\x23\xbd\xd6\x2d\x12\x63\x72\x93\xa7\xff\xea\xf6\x06\xd2\x70\xd0\x4b\x28\x1e\x23\x00\x4f\x88\x1b\x7b\x41\xfe\x61\x22\x72\x4c\x44\x8e\x89\xc8\x31\x11\x39\x26\x22\xc7\x44\xe4\xeb\x4e\x44\xfe\xf1\x44\xe4\x5a\x0e\xb3\x42\x67\x39\x07\x53\x5c\x67\x39\x05\x27\xe0\x78\x0b\xeb\xca\xbc\xa3\x3a\x15\x1b\xae\x88\x6c\xe5\x17\x61\x26\x90\xad\x7c\x63\x65\x36\x9f\xa0\x7c\x13\xf5\x91\xdc\x37\xf6\x34\x50\x38\x4e\xc5\x64\x34\x8f\x51\x43\x26\x78\x4a\xde\x4d\xd6\x42\x30\x13\xfa\x76\xd3\x7d\x30\x13\x3a\x66\x42\xdf\xe4\x4c\xe8\x3d\x82\xfd\x62\x93\xa5\x6f\xe2\xca\x93\xbf\x08\x33\xe4\x42\xea\x9c\x17\xcc\x23\x1d\x0c\x09\xd2\xb8\xa0\x2e\x24\x59\xff\x87\x3e\x78\x40\x50\xee\x02\xb5\x24\x2b\xa7\x76\x30\x1e\x00\x37\xdb\xc0\x6b\x83\x97\x1b\xc6\x05\x78\xa0\x48\x9d\xa9\xc0\xcd\x18\x1f\x20\x2a\x3e\xc0\xaf\x3c\x0f\x93\x32\x06\x03\x75\x6e\x9a\x16\x13\xbb\x81\x96\x8f\x88\x5e\x2b\xef\xe5\x7a\xa8\x4e\x6d\xf2\xed\xcf\x93\x1f\xde\x07\xaf\xf6\x8b\x60\xbd\xb3\xbf\x71\xa8\x80\x4b\xe2\x66\xa1\x89\xa6\x07\xd9\xcd\x97\xbc\xe7\x83\x89\xb1\xaa\x6e\xec\x71\x7f\xe1\x1c\xba\xce\xa2\xeb\x6c\x0e\x5d\x67\xd1\x75\x16\x5d\x67\xd1\x75\x76\xdb\xb8\xce\xe6\x7a\xc6\x75\xb6\xed\x35\x69\xd9\x75\x36\x87\xae\xb3\xe8\x3a\x8b\xae\xb3\xe8\x3a\x8b\xae\xb3\xdd\x77\x9d\xcd\x6d\x6b\x4f\xd7\x1c\x7a\xba\x76\xce\xd3\x35\xd7\xeb\x9e\xae\xb9\x2d\xe8\xe9\x9a\x5f\x82\x8b\x02\x80\x4d\x43\x8e\x03\xb0\x93\xd0\xc0\x70\x93\x09\xc0\xaa\xd5\x6c\xa6\x8a\x10\x35\x15\x61\xf1\xf9\x78\x18\x7b\x82\x1c\x97\x30\xb6\x8e\x8c\x49\x02\x5b\xf5\xda\x75\xc5\x56\x4c\x7d\x64\x77\x2d\x2d\x7b\x9d\x58\x92\x14\xb5\x06\x8c\x0d\x8b\xdf\xbb\x89\xc6\x04\xc9\xca\xc3\x63\x70\xbe\xc6\x96\x76\x04\x0e\xb5\xd2\x25\x68\x47\x43\x8f\xc9\x16\x3d\x26\xbf\x99\x80\x0b\x42\x30\x9c\x85\xd3\x5c\x30\x1c\x83\x16\x47\xa1\x70\x63\xc8\x72\x37\x06\xcf\x2b\xb2\xf5\xd2\x1e\x17\x6e\x0c\xe7\x61\x3a\xe0\xc6\xd0\x7a\x71\xed\x91\x58\x71\x82\x29\x52\xa2\x35\x90\x58\x65\xd3\x76\x20\xfd\x57\x23\xb5\x12\x2b\x25\x5d\x59\x02\x7e\x41\xe6\x52\x8d\xf4\x3a\x28\xee\x09\x4a\xaf\x29\xef\xf6\xee\xc8\xb1\xce\x78\x45\x22\xe2\x45\xc4\x8b\x88\x17\x11\xef\xf6\x41\xbc\xa8\xc3\xc5\xe8\x70\xbd\xc3\xc0\xd1\x23\xbd\x2b\x1e\xe9\x68\x6a\x40\x53\x03\x9a\x1a\xd0\xd4\x80\xa6\x86\x6d\x6d\x6a\xc0\x83\x49\x78\x30\x09\x0f\x26\x75\xea\x60\x12\x5a\xf2\xd0\x92\xb7\x5d\x2d\x79\xf9\x62\x9b\x8f\xde\xc5\x41\xe6\x74\x38\x04\xbe\x93\xdc\x21\x66\xb3\x2f\x69\xa1\xd3\x26\xb4\x2d\x98\x80\x94\xbc\xef\x5e\x18\x64\xa3\x76\x6c\x35\x3b\xe6\x02\x0e\xc3\x59\x35\x4b\x95\x15\xd7\x81\xfe\xa5\x31\x9b\x77\x05\xf9\xdf\xaf\x27\x6f\xe9\x87\x9d\x05\xd3\xe2\x01\xba\x47\x79\xe0\x1d\x71\xcd\x1d\x49\xbe\x92\x3d\xe7\x15\xb6\xc0\x0b\x4b\x2b\xec\xf6\x29\xd3\xa2\x0b\xd9\xda\x6b\xa2\xab\xdb\x1c\x5b\xe7\x1a\x4c\x8b\x81\xc8\x06\x20\x1b\x88\x87\xe1\x20\x64\x23\x0d\x1d\xec\xab\xf8\x29\xba\x9a\xba\x6d\x34\x78\xce\x95\xf8\x31\x37\x46\x46\xe5\x98\x4b\xa5\xe4\x20\xab\xab\x45\x70\x9c\x25\x7f\x7b\x97\xdf\x0b\x07\xdc\x18\x38\x4d\x77\xc4\x5e\xf9\x44\xf7\xfa\x22\x77\x01\xce\xc1\x54\x8d\x61\xb6\x95\xce\x40\xa2\x87\x56\xd9\x16\xad\xb2\xff\x35\xd1\x26\x79\x30\x23\x4c\xb2\x39\x38\xeb\x9b\x64\x3b\x24\x5a\x3a\x20\x3b\x62\x22\xcc\xe4\xde\xbc\xc7\x17\x2d\xc7\xeb\xce\x77\x37\x2d\x63\x1e\xe6\x27\x08\xbb\x23\x61\xf0\x08\xf7\x76\x13\x72\x78\x84\x1b\x8f\x70\x6f\xf2\x11\xee\x6e\x69\x97\xb1\x67\xb4\x9b\x5b\x23\x9a\x58\x19\xfc\x65\x24\x7f\x0a\x4e\x90\xe3\xa9\xa3\xde\x3e\xe6\xfe\xe0\xd9\xeb\xda\x42\xba\x70\xda\xfa\xbf\x0c\xc2\x83\x51\x9b\x94\x42\x49\xd5\x57\x6c\xf2\xed\x83\xe4\xef\x03\x7b\x93\x83\x8d\x0f\xf2\xd6\x7e\xc3\x14\x2b\x24\x3d\xc6\x1e\x0a\x5f\xb3\xf8\x0d\x5b\x2d\xf5\xf7\xb2\x70\xf8\x3a\xc0\x1d\xbe\xd8\x18\x3d\x0d\x27\x61\x62\xdd\x63\x94\x7f\x7b\x53\x2e\xaa\x4f\xc7\x8f\xc5\x43\x64\x3c\x4e\x5f\xe1\xef\xc3\xbc\xdf\x78\x84\x19\xb3\xff\xa0\x7f\x1b\xfa\xb7\xa1\x7f\x1b\xfa\xb7\x61\xf6\x1f\xcc\xfe\x83\x7e\x45\xe8\x57\x84\x7e\x45\xe8\x57\xd4\xc3\x7e\x45\x98\xfd\x07\xb3\xff\x6c\x15\x4f\x0a\xcc\xfe\xd3\x89\xec\x3f\xff\x7d\x3f\x1c\x95\xd9\x7f\xd8\xa6\x2b\x2c\xef\xcf\x22\x75\xd4\xec\x98\xdc\xef\xeb\x5e\x98\xbe\x97\xc8\x5b\xf6\x93\xff\xd6\x0f\xf7\xd4\x3c\x78\x4d\x3e\x91\x7c\x20\x24\xcf\xcf\xe4\xdc\xcc\xbc\x28\x25\xfd\x08\xcf\xe9\x53\xfd\xec\x82\x78\xd4\xbf\x6b\xf3\xd2\xf7\xb0\x32\x46\xd5\x62\x91\x0d\x0d\xc7\xb4\x7c\xd3\x4f\x6d\x3b\xc9\xcf\xcd\xf8\x95\xde\xa8\x07\x02\x8d\xa7\x72\x39\x72\xd6\x8d\xe2\x19\xda\x6d\x41\x1a\xc7\xab\x27\xd1\x5d\xa0\x92\xc9\x5f\xdd\x1d\xdd\x77\x4a\x78\xa2\x9e\x40\xf7\x0d\xba\x49\x79\xba\xda\x83\xeb\xca\xbf\xb3\x81\x2e\x44\x93\x1e\xfa\x2d\x74\x21\xff\xce\x46\x84\xcc\xfa\xf2\xef\x74\x52\x9c\x75\x47\x5e\xc5\xa5\xda\xf9\xd1\xdb\xa3\xc5\xd9\x5d\x6e\xa6\x1d\x23\x28\xc2\xf6\xc9\xb4\x3a\xdd\x95\x60\x98\x41\x07\x33\xe8\xdc\xba\xe7\x15\xf1\xa0\x0a\x1e\x54\xc1\x83\x2a\x9d\xcc\xa0\xd3\xf3\x89\x72\x62\xf5\x85\xf8\x0c\x3a\xdd\xd1\x28\x72\x3f\xbd\x27\x5a\x65\x18\x8e\xc9\x95\x13\xd0\x23\x1e\x15\x79\x71\xba\xa6\x46\xa0\xff\xe4\x76\x53\x5e\xd0\x7f\x12\xfd\x27\xb7\x4e\x0a\x9c\x4e\xee\x35\x63\xdd\x2b\xbb\xb3\x74\xe4\x8f\xc2\x61\x72\x30\x95\xf5\x3c\x2e\x5f\x17\xf4\xb8\xf4\x6f\xec\x82\xaf\xe5\xd7\x9f\x87\x13\x82\xe0\x5a\x8b\x6a\x21\xe3\x36\x59\x28\xc4\x2d\x94\x2a\xb6\xc3\xc4\x5a\x89\x2e\x32\x89\x6a\x14\x6d\xf2\x13\xcf\x93\xff\xb9\x0f\xee\x65\x0f\x4f\x06\x9f\xf5\x56\xc2\x03\x8d\x5d\x33\xa7\x44\x99\x57\xcc\x12\xcd\x89\x32\xd3\x19\xf6\xc4\x95\xda\x02\xe5\x8a\x58\x7f\x7f\x8f\xbb\x65\x62\xbe\x15\x74\x56\xc4\x7c\x2b\xe8\xac\x88\xce\x8a\xe8\xac\xb8\x8d\x9c\x15\x7b\x28\xd6\x5c\xcf\x38\x2b\x62\x10\x34\x74\x56\x44\x67\x45\x74\x56\x44\x67\x45\xcc\xb7\x82\x51\x9a\x6e\x1d\xdf\xc2\x9e\x8f\xd2\xb4\x25\xf3\xad\x5c\x87\xcb\x02\x19\x3e\x06\xe7\x39\x32\x3c\x0b\xa7\xe1\x64\xe4\x71\x56\x8e\xaf\x5c\x24\x58\x4f\x89\x9a\x3a\xd0\x1a\x73\xee\xf4\x7a\x39\x9e\x0e\x3e\x4e\x2e\xd6\xf1\x3f\x89\x0b\x23\x01\x9b\x04\x84\xf5\x95\x86\xd4\x9f\xee\x6e\xc4\xd6\xee\xf3\x32\xb2\x84\x60\xb4\x03\xe2\xe2\x26\x80\x34\xc1\xbd\xd6\x69\xe6\x89\xeb\x40\x34\xf3\xa0\x4f\x5d\x8b\x3e\x75\xef\xec\x83\x27\x84\xa1\x3a\x0f\x8f\x05\x0c\xd5\x8d\x13\x38\xc5\x8c\xc7\x48\x61\xb2\xbe\x34\x51\x71\x6f\x99\x13\x7e\x7a\x33\x70\xc1\xf7\xd3\xdb\x50\x89\xf1\xb2\x8e\x67\x53\x69\x52\xd6\x45\x4a\xae\xf5\x0b\xc1\xf4\x7b\x46\x1b\xc9\xba\xc1\xd0\x5c\x2e\x21\x72\x6f\x42\xdc\x18\x29\xf7\xbc\xe7\x3b\x2e\x01\x31\xaf\x0b\xa2\x64\x44\xc9\x88\x92\x11\x25\x63\x5e\x17\xcc\xeb\x82\x7e\xd2\x98\xd7\x05\x4d\x1a\x68\xd2\x40\x93\x06\x9a\x34\xd0\xa4\xd1\x16\x93\x06\x1e\x97\xc1\xe3\x32\x78\x5c\x06\xf3\xba\xa0\xc5\x10\x2d\x86\x5b\x3c\xaf\x4b\x73\xb8\xb9\x75\x4b\x5a\x1d\x8f\x5e\x4f\x26\x99\xf6\xbb\xdd\xff\xf2\x55\x48\x09\xb7\x7b\xdb\x31\x2d\xb5\x48\x7d\x5f\xfb\xb1\x82\xad\x6b\x96\xce\x2a\x4c\xde\x77\x95\xbc\x75\x1f\x80\xbc\xe7\xda\x6a\x36\x39\x18\xe3\x4e\x3f\x3f\x33\xcd\x9f\x4d\xbf\x8e\xdd\x38\x2f\x1e\x5c\xc8\x7a\xbf\xa3\xb7\x3c\x7a\xcb\xa3\xb7\x3c\x9a\x38\xd0\xc4\x81\x26\x0e\x34\x71\xf4\x8c\x89\xa3\x77\x08\x3e\xa2\x65\x44\xcb\x88\x96\x11\x2d\x23\x5a\xde\xd6\x68\x19\xd9\x17\xb2\xaf\x6d\xca\xbe\xb6\xa4\xb7\xfc\x3a\x13\x94\xb9\xd4\x69\x35\x9b\xf1\xd0\x50\x5b\x5c\xe4\xd7\x95\x13\xaa\x9a\x7d\xb9\xf0\xce\xad\x4f\x55\x4e\xa8\xd4\xd7\x77\x55\x71\x30\xe2\xbb\xbe\x7b\xc8\xeb\xf5\xe2\xb7\x0e\x42\x2f\xc1\xa8\xa6\x60\x12\xce\xd4\x78\xb6\x8f\xc1\xe8\xba\x1a\x1b\x9d\x9a\xd0\x9d\xbd\x45\x77\xf6\x3f\x4b\xc0\x59\x31\xd7\x8f\xc3\x51\x3e\xd7\xb3\xb0\xde\xe1\x07\xe7\x84\x5f\xf9\x69\x38\xe9\xfb\x95\xb7\x50\xcc\x79\xe1\x57\x7f\x06\x4e\x05\xfc\xea\x5b\x28\xa7\x75\xb1\x23\xbc\xd5\x3b\x25\x76\xd2\xbf\x3e\x52\x25\x76\x1e\x0c\xf7\x42\xf7\x44\x50\x4a\x5c\xf7\x45\x90\xef\x6d\xde\x21\x61\x84\x4e\xe6\x48\x60\x91\xc0\x22\x81\x45\x02\x8b\x4e\xe6\xe8\x64\x8e\x4e\xe6\xe8\x64\x8e\x96\x00\xb4\x04\xa0\x25\x00\x2d\x01\x68\x09\x40\x27\x73\x74\x32\x47\x27\x73\x74\x32\x47\x43\x1b\x1a\xda\x7a\xd0\xd0\xd6\x75\x27\xf3\x75\xb8\x7c\x77\x8c\x28\xb7\xdf\x97\xfc\x37\xae\xc2\xa3\x6c\x30\x8d\xad\x66\xc7\x0c\x75\x85\xda\x65\xd5\x4b\xb3\xc9\xff\x7e\x69\x4c\x78\x47\x93\xf7\x5f\x25\x7f\xf6\x28\xec\x2c\x98\x16\x87\xd9\x7b\x1b\xfb\x92\x9f\x63\x0f\xa5\xef\x65\x37\x4d\x99\x16\x5d\xc8\x5e\x72\x0b\xd4\xf8\x25\x74\x25\x47\x57\x72\x74\x25\x47\x43\x06\x1a\x32\xd0\x90\x81\x86\x8c\x9e\x31\x64\xf4\x0e\xa7\x47\x80\x8c\x00\x19\x01\x32\x02\x64\x04\xc8\xdb\x1a\x20\x23\xe1\x42\xc2\xb5\x4d\x09\xd7\x96\x74\x25\x7f\x26\x3a\xb4\xf1\x29\x81\xeb\x8e\xc0\x21\x8e\xeb\x32\x30\x02\xe9\x48\x47\xcf\x82\x69\x71\x2f\x4f\x0e\x8b\x66\x75\xdb\x69\xd6\x87\xfc\x42\x3c\x7a\x7b\x84\xa4\x24\x7a\x4b\xa5\x24\x6e\xe3\xaf\xa9\xf6\x19\xff\xd4\x2e\x1f\x76\xbd\xd6\x75\x18\x37\x24\xd8\x92\xc1\xd3\x3b\x8b\xb6\x04\x89\xe2\x01\x9b\x6b\x1c\xc6\x87\x60\x5f\x73\x2d\x87\x9e\x49\xe8\x29\xde\xa2\xa7\xf8\x6f\x25\xe0\xb8\x98\xb1\xe3\x70\x80\xcf\xd8\x34\x34\x3d\xee\xe0\xb4\x70\x11\x3f\x0a\x87\x7d\x17\xf1\xf5\x3c\x7f\x46\xf8\x86\x1f\x83\x23\x01\xdf\xf0\xf5\x14\xb0\x51\xa7\xf0\x76\xc9\x91\xf4\xc7\x47\x7c\x39\x92\x0c\xf5\x00\x17\x22\xe5\x51\x71\x4d\x88\x14\xdf\xf5\xbb\xb3\xc2\x05\x1d\xc0\x91\x9b\x22\x37\x45\x6e\x8a\xdc\x14\x1d\xc0\xd1\x01\x1c\x1d\xc0\xd1\x01\x1c\xf9\x3d\xf2\x7b\xe4\xf7\xc8\xef\x91\xdf\xa3\x03\x38\x3a\x80\xa3\x03\x38\x3a\x80\xa3\x79\x0c\xcd\x63\x3d\x68\x1e\xeb\xba\x03\x78\xbb\x88\xf0\xba\x82\x87\xbf\x00\xcf\x93\xab\xa9\x67\x77\x25\xc8\x40\x59\x75\x96\x93\x13\x72\xe5\x16\xf6\x08\x43\x53\x58\x3d\x15\xbb\x60\x96\xe9\x88\x62\x57\x0a\xcb\x4c\x7b\xe1\xdd\x43\xd5\x15\x21\x48\xcb\x96\xc9\x57\xc1\xd4\x6e\xcf\xa9\x3b\x2d\xfb\xf9\x3d\x89\x44\xfb\x5d\xca\x7f\x6b\x18\x66\x44\x78\xf2\x02\xb5\xa4\x21\x91\xda\x7e\x8c\x72\x1e\x42\x5d\x8c\x9a\xaa\x3b\xf4\xa2\xa1\x1b\x45\x57\x49\x13\x2e\xe8\x2f\x91\xb7\x0d\x93\x1f\xdf\x01\x77\x05\x6f\xf4\xd2\x7a\x7e\x32\x21\xc7\x9e\x6f\x09\x55\x0d\x57\xb7\xf1\xa2\x9a\xfb\x4f\xce\x8b\x57\x5c\x71\x29\x87\x16\xd0\x43\x5c\x05\x64\x90\x17\x39\xe8\x03\xde\x2a\x62\x67\x96\xa9\x07\x09\x6c\x87\xaa\xda\x88\xbf\xb3\xe1\xbb\x37\xa9\x07\xb3\xcd\x91\xbf\x65\x1b\xac\x82\x11\x81\xb2\x33\xe9\x2c\x7f\x5d\xa0\x8e\xb6\x9b\x6a\x34\xaa\xda\x3d\xee\x39\x9f\xbf\x21\xd2\xce\x1e\xe0\x69\x67\xd9\xb4\x9c\x84\x33\x70\xaa\x85\x69\xc9\x2d\xdd\xcd\x19\x73\xde\x18\x3f\x35\x2f\x91\x59\x79\xb2\x22\xaa\x61\x23\xd3\xce\x86\x0c\x63\xb8\xfe\x50\xf8\x24\xde\x45\x6e\xe3\x6d\x01\x79\x3c\x41\x80\x27\x08\xf2\x68\x09\x43\x4b\x18\x5a\xc2\xd0\x12\xb6\x6d\x2c\x61\xf9\x9e\x31\xf4\xb4\xbd\x26\x2d\x5b\x20\xf2\x68\x81\x40\x0b\x04\x5a\x20\xd0\x02\x81\x16\x88\xee\x5b\x20\xf2\xe7\x61\x9a\xe4\x52\x67\x3d\x82\xf2\x28\x47\x27\xb2\x16\x91\x9b\xc1\xd4\x00\xbb\xad\x93\x9c\x24\xbf\xad\xd9\x6d\x1e\xd9\x6d\xe7\xd8\x6d\xbe\xe7\xd9\xed\x16\x3c\xda\x40\xbe\xfa\x1c\x4c\x48\xae\x69\x9a\x96\xa6\x1b\xc1\xe4\x91\x0d\xe2\x67\x70\x03\xa7\x4d\x7e\xf4\x39\xf2\x5d\xfb\xe0\xb5\xc1\x67\x9b\x88\xa3\x31\xcb\x1e\x4e\x3f\x2c\xe2\x68\xf8\x4f\x06\x0f\x1d\xf0\x5b\x7a\x9c\x0a\x62\x3c\x0d\xa4\x61\x18\x4f\x03\x69\x18\xd2\x30\xa4\x61\xdb\x88\x86\xf5\x90\xdb\x73\xcf\xd0\x30\xf4\xc7\x45\x1a\x86\x34\x0c\x69\x18\xd2\x30\x8c\xa7\x81\x0e\x83\xb7\x0e\x74\xea\x79\x87\xc1\x2d\x19\x4f\xe3\x05\x91\x6c\xed\x00\x4f\xb6\xd6\x77\xf9\x22\x0f\x9f\x31\xde\xe0\x0c\x7c\x80\x4c\xad\x66\x33\x9c\x0d\x35\x95\x9b\xf1\x99\x78\x17\xaa\xc3\xe4\xa0\xeb\x11\x55\x0f\xc0\xa4\xbb\x23\x7f\x61\xb5\xbb\x63\x4c\xc4\x8e\xd4\x5b\x77\xd7\x53\xb1\xd7\x78\x19\x1a\x05\x00\xdb\xeb\xc6\xdb\xe8\x06\x02\x13\xc4\x6a\x1a\x72\x70\xb6\x26\xee\xc6\x01\xc8\xac\xaf\xe9\xf1\x60\x28\xc6\xdf\x68\x31\xfe\xc6\x5f\x26\xe0\x82\x08\x82\x71\x16\x4e\x07\x82\x60\x8c\xc3\xba\x47\x61\x74\x4c\x9e\x49\x21\x5c\x26\xe0\x18\x17\x2e\xad\x94\x7d\x5e\x44\xfa\x38\x03\xa7\xfc\x48\x1f\x2d\x94\xd3\x6c\x5c\x8e\x8e\xc9\xa9\xf4\xff\x1a\xa9\x97\x43\xe1\x01\x3b\x84\x4c\x1a\x75\x03\x76\x04\x65\x52\x58\xe0\x8e\x4e\x48\x27\x0c\xdc\x81\x80\x16\x01\x2d\x02\x5a\x04\xb4\x18\xb8\x03\x03\x77\x60\xe0\x0e\x0c\xdc\x81\x86\x02\x34\x14\xa0\xa1\x00\x0d\x05\x68\x28\x68\x8b\xa1\x00\x03\x77\x60\xe0\x0e\x0c\xdc\x81\x81\x3b\xd0\x0e\x87\x76\xb8\x2d\x1e\xb8\x63\x3d\x99\x1b\x9b\xc3\xcb\x41\x6e\xdc\x24\x6a\xde\x8a\x91\x3c\x3e\x3b\x04\x87\x85\xc7\xbb\x5a\x2e\xdb\x63\xab\x6e\xd4\x8e\x08\x47\x77\x4d\xa5\x2b\x6c\xf7\xe5\xd8\xe4\x3f\x0c\x91\x1f\x1e\x80\x9d\xec\xb1\x6b\xab\xd9\x64\x45\x2a\xb6\x86\xa6\xaf\xea\x5a\x45\x2d\x55\x85\xec\xf0\x36\x91\xd3\xbc\x80\x79\xda\x9e\xd0\x1c\x99\x74\x8a\x3f\x30\x59\x2e\xdb\x41\x63\xa1\xf7\x96\x59\x1d\xa3\x69\x84\xcd\x97\xbd\xe1\xf3\x65\x0f\x01\x5e\x31\x91\x67\xe1\xf1\xf8\x89\x92\x26\x43\x72\x72\xb0\x71\x20\x67\x83\xd7\xfa\xd5\x89\x4d\xf1\x7c\x00\x9e\x0f\xc0\x68\x19\x68\x7e\x42\xf3\x13\x9a\x9f\xb6\x8f\xf9\x09\xa3\x65\x60\xb4\x0c\xc4\xfe\x88\xfd\x11\xfb\x23\xf6\xef\x09\xec\xbf\x05\x29\x05\xc6\xd1\x40\x94\x8a\x71\x34\xb6\xce\x91\x06\xf2\x3b\xfb\xe0\x4e\x41\x15\x17\x39\x4d\x5c\xcd\x8e\x5d\x37\x17\x6d\xf2\xf3\xfb\xc8\x4f\xf5\xc3\x2e\xfe\xe3\xb5\xd5\x6c\xf2\xe1\xc6\x91\x31\xf2\xe6\x62\xfa\x41\x76\x4b\x8e\x5d\x5e\xc8\xe6\xcd\xc5\xf3\xa6\x35\x59\x2a\x79\xa4\xcf\xee\x75\xb8\xf7\x34\x9c\x14\x70\xef\x30\x1c\xe4\x70\x6f\x14\xf6\xc3\x70\xa4\xdf\x34\x6f\x99\xcc\x6a\x36\x93\x37\x17\x9b\x3a\xd3\x11\x97\x2b\xf5\xb1\x78\x86\xf7\x28\xd9\x1b\x02\xbb\x79\x4d\x24\xd0\xcb\x9b\x8b\x88\xef\x10\xdf\x21\xbe\x43\x7c\x87\xf8\x0e\xf1\x1d\xe2\x3b\xc4\x77\x88\xef\x10\xdf\x21\xbe\x43\x7c\x87\xf8\xae\xdb\xf8\x0e\xe1\x1a\xc2\x35\x84\x6b\xdb\x18\xae\x7d\xf0\x00\x0c\xb2\xae\x6b\x10\x8f\x96\x4d\x38\xbd\x40\x6d\xf2\xcf\x63\xe4\x6f\x1e\x85\x9d\x05\xd3\xa2\xd7\x56\xb3\xc9\x47\x1b\xf3\xb6\x79\xf1\x58\xfa\x3e\x11\x8b\xd6\xa2\x41\x9f\x3a\x79\xb1\xc7\x81\x1b\x46\xa1\x45\x4c\x85\x51\x68\x11\x53\x21\xa6\x42\x4c\xb5\x8d\x30\x55\x0f\x9d\xe1\xef\x19\x4c\x85\x87\xcb\x11\x53\x21\xa6\x42\x4c\x85\x98\x0a\xa3\xd0\xe2\xe9\xd7\x5b\x87\x2a\xf5\xfc\xe9\xd7\x2d\x19\x85\xf6\x59\x38\x23\x1c\x95\x8e\xc1\x11\xee\xa8\x74\x00\x32\x30\xd2\x20\xc0\xa3\x45\xf9\xf9\x5c\x41\x85\xda\xe2\xab\x34\x13\xef\xab\xb4\x8f\x3c\x52\x9b\x7d\x5f\xd6\xa0\xea\xac\x61\xea\x37\x77\xf9\xd0\xeb\x76\x2f\xd0\xac\xcb\xb7\x1e\x70\x43\xcd\x76\x96\x70\x09\x20\x75\x1a\x4e\xc2\x44\x4d\x90\xd9\x34\x0c\x35\xdb\xb2\x18\xbe\x0c\xc3\xcb\xb6\x18\x5e\xf6\x0b\x09\x38\x21\xe6\xf4\x21\x18\xe7\x73\x7a\x04\xd6\x31\xf2\xe0\xac\x08\xfb\x7a\x1c\x8e\xfa\x61\x5f\xd7\x57\xc2\xa4\x88\x6e\x3b\x01\xc7\x02\xd1\x6d\xd7\x57\x44\xeb\x42\x45\x04\x93\x6d\x4e\xa8\x84\x38\x40\xd6\x09\x98\x2d\xe8\x38\x4f\x7e\xb7\x0f\x5e\x5b\xe3\x88\x4b\x7e\xb9\x8f\xfc\x62\x5f\xc0\x09\xf7\x9e\x22\x75\xaa\x77\x36\x62\x11\x4a\xbf\xae\x48\x5d\xcf\xdb\xc9\xb9\x19\x97\xa1\xb5\xcf\xe5\x76\xa2\xae\xa0\x89\xba\x82\x26\x9a\x29\x28\x5f\x86\xa7\xc4\x40\xbf\x04\xb3\x7c\xa0\x9f\x87\x69\xc8\xb5\x70\x84\x3e\xf0\x9d\xcd\x2c\x69\xe4\x0f\x87\xe0\x78\x75\xfb\x2e\x52\x47\x8d\x09\xa2\x50\xb0\x4c\x83\xbb\x43\xff\xe7\x21\xf2\xb1\x01\x78\xb5\xdb\x13\xfc\xd1\xa4\xdd\x5c\x20\x85\x29\xcb\x34\xf2\xe6\x62\x9b\xc2\x28\xec\xe3\x0f\xc8\xce\xe6\xf5\xf0\x97\x45\xf9\x26\x0c\xa5\x10\x2e\x80\x9e\x88\x17\x30\x19\x32\x22\x44\x89\x6c\xca\xa0\xae\xc2\x1b\xbb\xda\xe9\xba\xa9\xe8\x0c\xe8\x8f\x8d\x86\x2e\xf4\xc7\x46\x43\x17\x1a\xba\xd0\xd0\xb5\x8d\x0c\x5d\xe8\x8f\x8d\xfe\xd8\x68\xe8\x42\x43\x17\x1a\xba\xd0\xd0\xd5\x13\x86\xae\x2d\x48\x85\xd0\xe3\x1b\x6d\x73\xe8\xf1\xbd\x75\x6c\x73\xe4\x5d\x43\x90\x11\x94\xd1\x5a\x54\x0b\x19\x97\x41\x05\x03\xd1\xfa\x91\x5b\x2d\xb3\x44\x6d\xf2\xbb\x83\xe4\x33\x03\x70\x17\xbb\x7f\x32\x78\xfb\xb5\xd5\x6c\xb2\xdc\x1c\x61\xbc\x62\xb2\x85\xb8\x2d\x78\xf1\x00\x7f\xe0\x4a\x6d\x65\x16\xb2\xec\x1d\xb3\xba\xed\x6c\xb5\xb0\x0e\x37\xa2\xf3\xed\xb5\x9d\x40\xae\x2b\x66\xeb\x73\xf1\x34\x72\x82\x1c\x93\xb8\x31\x72\x34\x49\xcb\x07\xeb\x1d\x8c\xe1\x8a\xd0\x11\xa1\x23\x42\x47\x84\x8e\x08\x1d\x11\x3a\x22\x74\x44\xe8\x88\xd0\x11\xa1\x23\x42\x47\x84\x8e\x9b\x09\x1d\x11\x09\x22\x12\x44\x24\xb8\x8d\x91\xe0\xf7\x3e\x07\xe9\x98\x20\x10\xbc\xf5\x2c\x5e\x77\xf2\xe7\x57\xc9\xff\x09\xc4\x81\x18\x6a\x1c\x07\x62\x96\x3d\x79\x85\x3d\x29\xc2\xaf\xd6\x3a\xca\xfb\xd7\x7b\x9c\xd3\x61\x34\x08\xe4\x55\x18\x0d\x02\x79\x15\xf2\x2a\xe4\x55\xdb\x88\x57\x61\x34\x08\x8c\x06\x81\xbc\x0a\x79\x15\xf2\x2a\xe4\x55\x3d\xc1\xab\x30\x1a\x04\xe2\x25\x8c\x06\xb1\x75\xf0\x52\xfe\xb9\x68\xff\xa6\x9c\xf0\x6f\x3a\x01\xc7\xb9\x7f\xd3\x41\xc8\xc2\x58\xec\x69\x6e\x9f\x18\xcd\xea\xb6\xd3\x6c\x40\x88\xd9\x78\x67\xa6\x61\x32\x58\x7b\x5e\xdb\x7f\x57\x75\x4c\x88\x3f\x09\xc4\x84\xb8\xd3\x8b\x09\x11\x60\x5d\x4a\x78\x58\x88\x0e\xd0\x2e\x01\xa7\xb8\x6b\x58\x4d\x64\x88\x51\xd8\xbf\x8e\xc6\xc4\xe0\x10\x18\x1c\xa2\xc5\xe0\x10\x5f\x4e\xc0\x69\x31\x93\x8f\xc2\x61\x3e\x93\xc7\x60\x7d\x83\x0f\xa6\x44\x7c\x88\x93\x30\xe1\xc7\x87\x58\x77\x21\xd3\x22\x44\xc4\x29\x38\x11\x08\x11\xb1\xee\x52\x36\x1a\x25\xa2\x39\x49\xd3\x50\xbe\xf8\x62\x28\xfd\x2b\x23\xbe\xa4\x79\x48\x68\xb1\x4a\xc1\x2c\xb1\x3d\xa9\x5c\x63\x02\x52\x27\x2d\x6e\x10\x52\x67\xca\xbb\xab\x0b\xf2\xe7\x49\xb8\x02\x73\x41\xf9\x93\xce\xc1\xd9\x16\x5c\x55\xa7\xf9\x07\x5c\xe6\x0c\xd5\x46\xda\x8a\xb4\x15\x69\x2b\xd2\xd6\x6d\x44\x5b\x51\x09\x8b\x51\xc2\x7a\x07\x47\x7f\xb9\x0f\xbe\xd4\x47\xbe\xd8\x97\xfc\x82\xd7\x55\x1f\xed\x7b\x32\xb8\x01\xd4\x0d\xc5\x16\x9b\x38\x65\x91\x2e\x09\xdb\xad\x07\x4a\xfc\x25\x4b\xce\x12\xde\x58\x01\x75\xcf\x30\x8d\x51\x83\x16\x55\xde\x25\x72\x0f\x18\x54\x0b\x05\x32\xf5\x06\x83\xd4\x0d\xf4\x95\x15\xaa\x31\xc5\xb3\xb4\xe6\x1b\x55\x7d\xd1\xae\x97\x46\xe4\xd6\x90\x7f\xae\x52\xb4\xd4\x02\x1f\x27\xba\xa9\x79\x0b\x8f\xbf\x38\x70\x7b\xb5\xdb\x2f\x15\x9b\x55\x32\xd8\x50\x2a\x7b\xd2\xfd\x20\xf9\x92\x25\x31\xdf\xdc\x22\x32\xa2\x9e\x2b\x54\x35\x42\xeb\x98\x22\xbc\x0a\x73\xbc\x06\x61\x5b\x5e\xa4\xfe\x48\xfd\x91\xfa\x23\xf5\x47\xea\xbf\xad\xa9\xff\x67\xfa\xe0\x95\x3e\xf2\xe9\xbe\xe4\xcb\xde\x6a\xfb\x03\x7d\xd3\x81\x03\xa5\xe5\x12\x55\x6d\xea\x4d\xfd\x39\xcb\x2c\xab\x45\xbe\x0e\xcf\x99\x25\xbd\xb0\x56\xe5\xf3\xe3\x76\xb7\x7f\x22\x95\x75\x78\x36\x73\x34\xa3\xcc\x0b\x39\x22\x16\xc9\x32\x35\xd8\x30\xf5\x57\x11\xaa\x98\x56\x79\x59\x35\x5c\x1f\x24\xab\x42\xc7\x96\xd4\x92\xab\xfd\xa7\xc4\xd5\x94\xb2\xa4\x1b\x6a\x49\x7f\x93\x2b\xbe\x17\xa9\xa2\x6a\x1c\xa1\x9b\x63\x82\xa8\x6a\xbe\x6a\x29\x0a\x1f\xb4\xfd\x87\x84\xbe\x9d\x51\xce\xe9\x5c\x24\x05\x2a\x6e\x5a\xf5\x5f\xe6\x9b\x2d\x1c\xa1\xee\x73\xed\xcf\x74\x96\x33\xa9\xdb\x45\x7d\xa6\xdd\x0f\xa9\x76\x66\x7a\xfb\x00\xbc\x75\x80\xbc\x79\x20\xf9\x4d\xcf\xa3\xed\xf3\xfd\x4f\x4b\x39\xc8\x86\xe8\xb2\x79\x53\x29\xaa\xd6\xa2\x5a\xac\x82\x0e\x9e\xa2\x46\xad\x25\xd3\x5a\x61\x6d\x11\x5a\xd3\xcb\x35\x2f\x8f\xae\x28\x57\x6b\x5c\x9d\xa4\x2c\xbe\x4a\x67\xfa\x42\x41\xd7\x7c\xc5\x9a\xaf\x8d\x7c\x9b\xe3\xb5\x2e\x5b\x9c\xe4\xca\xe1\xae\x7e\x99\x40\x33\xba\x72\xd3\xb3\x15\x78\x7e\x6c\xd5\x2f\xcb\x28\x82\x17\x71\x59\x1c\xd4\x56\x07\xc5\x37\x0c\xb2\x3d\x0f\xff\xab\x7a\x60\xd8\x27\x94\xc1\x9c\x5a\xb8\x51\xb4\xcc\x8a\xa1\xb1\xbb\xb8\x03\x19\xbf\xa9\xa6\xe1\x84\xb2\x22\x35\xa0\xea\x42\xdc\x2f\x58\xf4\x4a\x3a\xa1\x0c\x9e\x37\x2d\x1a\x28\x56\x29\xa8\x76\x41\xd5\xd8\xd7\xcb\xf6\x11\x2e\x83\xbc\x3c\x5b\xa8\xd3\x75\x05\x2e\x79\x65\x64\x52\x77\x94\x6b\xc7\x4d\x50\xb7\x41\xa3\x1a\x1a\xd5\xb6\xa9\x51\x2d\x5f\x84\x59\x41\xaf\xcf\xc1\x14\xa7\xd7\xa7\xe0\x04\x1c\x6f\x01\x5e\xce\x3b\xaa\x53\xb1\x63\x01\x72\x3a\x1c\x20\xdf\x49\xee\x10\xb3\xd9\x97\xb4\x6d\x36\x5b\x6d\xc1\xf8\x32\xe4\xaf\xf7\xc3\x95\xf8\x78\x15\x22\x52\x6e\x84\xf3\xba\x65\x96\xe8\x22\xdb\x28\x1b\x45\x79\xe1\x25\xf2\xbd\xfb\xc9\x87\xfb\xe1\xde\xb0\x98\x16\x22\x74\xee\x83\x16\x55\xb5\x9a\xdd\xf0\x15\xb3\x44\x73\xa2\xa0\x74\x96\x5d\x0f\x89\x42\x51\x13\xf0\x36\xf0\x48\x7b\x89\x7b\xfe\x9a\x30\x77\x1c\xe0\xa3\x95\x8d\xda\xc3\x70\x10\xb2\x91\x86\x0e\xde\x7a\xf2\xe3\x32\x81\x5a\xc5\x8e\x56\x3d\x7e\x04\x9e\x27\xd3\xeb\x88\x02\xe1\xbe\xb8\x2e\x70\xed\xf5\x07\xc2\x27\xc6\x6d\x64\xa0\x48\x1d\x48\xbe\x65\x77\xa3\x0e\x7b\xd8\xa2\xe5\x92\x5a\xa0\x0d\xfa\xec\x90\xbc\x65\x13\xbb\x2d\x77\x01\xce\xc1\x54\x8d\xa1\xb6\x95\x7e\x43\x52\x88\xe6\xda\x16\xcd\xb5\xff\x35\xd1\x26\xd1\x31\x23\x8c\xb6\x39\x38\xeb\x1b\x6d\x3b\x24\x85\xa2\x45\x43\xb9\xe2\x40\x17\x85\x54\xfa\x17\x6e\x6f\x24\x85\xee\x92\x0a\xbe\x5a\x25\x78\x0e\x8a\x5f\x37\x53\xee\x74\xc6\x40\x8b\x42\x28\x46\x08\xa1\x91\xa0\x2b\x46\x02\xa4\x43\x48\x87\x90\x0e\x75\x8a\x0e\xe5\x3f\x9e\x68\xf3\x16\xf9\x8a\xf0\xd4\xba\x08\x33\x01\x4f\xad\x0e\x6f\xbb\x95\x70\x15\x62\x37\xd9\x29\x9a\xa9\x59\x2d\xa2\x09\x25\x21\x56\xd1\xc8\xfd\xc8\x9e\x46\x5a\x44\xba\xac\x5a\x8e\xce\xfd\x6a\x04\x91\x6f\xb0\xa9\x19\x2f\x47\xc4\x43\xec\x86\x6a\x71\x5f\x6d\x41\xa3\xbc\x36\xfb\x79\x02\x9e\xfb\x83\x17\x57\xa8\x55\xa4\xc1\xab\xfb\x82\x57\x6d\xc7\x52\x1d\x5a\xd4\x0b\xa3\x75\xf7\x55\x95\xc2\xfe\x5e\x93\x57\x59\x2d\x73\x97\x60\x16\xf2\x35\xdb\xa9\x09\x38\xd6\xc2\x28\x9a\xe3\x87\x7e\x51\xa1\x89\x51\x68\x3e\xd2\x0f\x1f\xea\x27\x1f\xec\x4f\xbe\xdf\x5b\x15\xde\xd2\xbf\x7d\x76\x55\x35\x16\x53\xd6\xce\xdc\x13\x8d\x5b\xf5\xd9\xe0\xf4\x4f\xff\x0f\x45\x0c\xdc\x61\xbe\xb8\x09\xc7\x45\xb5\xc4\x9f\x64\x5a\x9c\x78\x9a\xdf\xc1\x35\x2a\x5b\x19\xca\xdb\xa6\x31\x27\x8e\xc1\x3f\xce\xa6\x85\xfc\x7b\xde\x9d\x2c\xfe\x8f\xc3\xd1\xdb\xbd\xdc\x2b\x09\xf8\x74\x82\xbc\x9c\x48\x7e\xca\xe3\xaf\x1f\x48\x9c\x37\xad\x02\x57\xf6\x8a\x26\x6f\x76\x53\x49\x2d\xb1\x9f\x52\xca\x64\xd5\x57\x70\x70\x2d\xf4\xb2\x8a\xed\xbb\xa0\x8d\xaa\x05\xfe\xe1\xdc\xb1\xaf\xa4\x17\xe4\xba\x4b\x4b\x9a\xad\x98\x37\x65\x9b\x0a\xc7\xb7\x32\x35\xcb\x25\x9a\x51\xc4\x1b\x79\xdc\x02\xb7\x2f\x39\x40\x0e\x6d\x00\xef\xf5\xa9\x1d\xbc\x5a\x55\x07\x0a\x7a\x8b\x7c\xb5\x63\x39\xb8\xfe\x50\xf8\xda\xb4\x8b\xdc\xc6\xdb\x03\xf2\xc7\xe0\x08\x39\x94\x1a\xf7\xc8\xed\x3d\x7c\x82\xc9\xd9\x1e\x78\x79\x6a\x80\x5d\x08\x12\xd9\x2d\xc8\x7c\xbf\xfb\x39\x18\x14\xcc\xd7\x76\x4c\x4b\x2d\xd2\x5a\xd0\x5b\xb0\x75\xcd\xd2\x59\x3b\x93\xaf\x5c\x25\xff\x61\x1f\xbc\x56\xde\xe8\xad\xa0\x83\x8d\xa3\x92\x4c\xcd\xcf\x4c\xf3\x02\xd2\xf7\xb1\x1b\xe7\xc5\xd3\x72\xcd\xf4\x2e\x62\x44\x12\x8c\x48\x82\x11\x49\xd0\x47\x1e\x7d\xe4\xd1\x47\x1e\x7d\xe4\x7b\xc6\x47\xbe\x77\x5c\xc0\xd1\x37\x19\x7d\x93\xd1\x37\x19\x7d\x93\xd1\x37\x79\x5b\xfb\x26\xa3\xf3\x24\x3a\x4f\x6e\x53\xe7\xc9\x2d\x19\x91\x44\x13\x9e\x2b\x07\xb8\xe7\x4a\xdf\xe5\x8b\x64\x02\x8e\xc1\x91\x48\x80\xe8\x42\x28\x97\x21\x7a\x7c\xa8\x99\xb4\xfe\xd7\xaf\xc6\x53\xc4\xe3\xe4\xa8\x44\x84\xd5\xbc\x4b\xfa\xa3\x78\xef\x0b\x71\x99\x8b\x09\x7b\x92\xfa\xf7\xbb\xeb\xd9\x18\xf1\x02\x96\xf8\x18\xec\x01\xf1\x5b\xa7\x41\x98\xe0\x56\x8f\xc1\x79\x98\xae\x31\xda\x1c\x82\xf1\xf5\x77\x00\x9a\x6b\xd0\x09\xae\x45\x27\xb8\x7f\x48\xc0\x39\x21\x04\x4e\xc3\x49\x2e\x04\x8e\x40\x4b\x63\x10\xf2\xc2\x0b\x6e\x0a\x26\x7d\x2f\xb8\x56\xcb\xba\x28\xec\xe2\xd3\x90\x0b\xd8\xc5\x5b\x2d\xac\x57\x25\x13\x0f\x93\x92\xfe\xe2\x48\xbd\x64\x7a\x30\x34\xc0\x89\x2f\xa5\x06\xc5\xf5\x1a\x29\xe5\xdd\xdc\x29\x79\x85\xc1\x4d\x10\xdc\x22\xb8\x45\x70\x8b\xe0\x16\x83\x9b\x60\x70\x13\xf4\x5b\xc6\xe0\x26\x68\x40\x40\x03\x02\x1a\x10\xd0\x80\x80\x06\x84\xb6\x18\x10\xf0\xf8\x0a\x1e\x5f\xc1\xe3\x2b\x18\xdc\x04\xed\x73\x68\x9f\xdb\x46\xc1\x4d\x9a\x03\xd0\x51\xee\xf4\x31\x44\xba\xfd\x6e\xe9\x6f\x19\x82\x31\xe1\x96\x6e\x50\xe7\xa6\x69\xdd\x60\x5b\x2a\xcf\x33\x5d\xf4\xf0\x98\x6e\x14\x2d\x6a\xdb\x85\x92\x6a\xdb\xd4\x26\xbf\x3e\x48\x7e\x61\x00\x5e\xed\x3f\x70\x6d\x35\x9b\x7c\x51\xaa\x9e\x86\xa6\xaf\xea\x5a\x45\x2d\x05\xcd\xa4\xaa\xb7\xcd\x9b\x11\x45\x4d\xb1\xa2\x32\x81\xd5\x7c\xc2\xd3\x03\x06\x79\x41\x83\x3e\x67\xad\x02\x67\x66\x99\x7a\x7b\x75\xdb\xa1\xaa\x96\x49\x3f\xcc\x1f\xb8\xe4\xd5\x66\x21\x1b\x7c\xc9\xac\x6e\x3b\x3d\xee\xf3\x9e\xbf\x11\x9d\x4d\x62\x4e\x0c\xf4\x19\xb8\xc0\x07\x3a\x4f\x89\xd0\xc2\x40\xe7\xc6\xeb\x73\x6c\xa7\xe6\x8e\xe9\xe7\xe2\xc7\xe9\x04\x39\x26\x87\x65\xdd\xd8\x90\x23\x33\xd8\xd0\x55\x51\x79\xae\xef\x0d\x9f\x32\x7b\x08\xf0\xef\xe6\x86\xdc\x3c\xfa\xfa\xa3\xaf\x7f\x1e\x4d\x46\x68\x32\x42\x93\x11\x9a\x8c\xb6\x8d\xc9\x28\xdf\x33\x16\x91\xb6\xd7\xa4\x65\x54\x9f\x47\x54\x8f\xa8\x1e\x51\x3d\xa2\x7a\x44\xf5\xdd\x47\xf5\x6d\xa7\x0a\xf9\x6d\x0d\x27\xf3\x08\x27\x3b\x07\x27\xf3\x3d\x0f\x27\xb7\xe0\xe1\x01\xf2\xf5\x61\x98\x15\x14\xb0\x60\x9a\x96\xa6\x1b\xa1\xa1\x88\x05\x0c\x8c\x08\x48\xcc\x8d\x79\x5e\x28\xe2\x1f\x1a\x26\xdf\xb6\x03\xee\x0a\x96\xe6\x39\xc4\x7e\x28\x21\xbb\x26\xc0\x07\x0d\x57\xd8\xb9\xc1\x2c\x66\x59\x71\x6d\x01\x84\x23\xbe\x5a\xc3\x55\x37\xb9\x08\x32\xcd\xc8\xd7\xd7\x06\xab\x54\xe4\x40\xd9\x99\xb4\x78\xdd\x54\xe0\x4b\xea\xc2\x50\xf1\xca\x22\x64\xac\x87\x8c\xd1\x21\x60\x78\xc5\xa0\x49\x0a\x29\x03\x80\xb3\x56\x8e\x0c\x44\x13\x32\x70\x11\x30\x22\x60\x44\xc0\x88\x80\x11\x01\x23\x02\x46\x04\x8c\x08\x18\x11\x30\x22\x60\x44\xc0\x88\x80\x11\x01\x63\xb7\x01\xe3\x38\x1c\x20\x99\xd4\x88\x17\xaf\xf3\x8e\x60\xa4\x4f\xbe\xaf\xbb\x15\x62\x7c\x22\xf6\x44\xec\x89\xd8\x73\x0b\x61\xcf\x37\x8f\xc2\x9c\xc0\x9e\x6a\xc5\x31\xed\x82\x5a\xd2\x8d\xe2\xd8\xea\xf8\x22\x75\xd4\xf1\x28\xd0\xc9\xa1\x99\x69\x38\x6a\xa9\x6c\x6a\xee\x73\xd4\xf2\xd0\xe7\x8f\x8c\x90\x4f\xf7\xc3\x9d\x81\x12\xaf\xc9\x12\x93\xe9\x90\xfc\x6b\x8f\x79\xc5\xcd\x99\xda\xa4\x57\x5c\xfa\x28\xbb\x77\xd2\x2f\x64\x41\x94\xe1\x43\xc7\x88\x07\xdb\x9c\x91\x2d\x3a\xe2\x02\x3c\x2d\xa8\xe1\x1c\x5c\xe2\xd4\x90\x07\x40\x89\x8c\xe8\x10\x68\x8e\x8c\x6c\x8e\x4c\xc4\x17\xc4\xa7\x47\x2a\x52\x07\xae\x17\xe3\xd1\xe1\x34\xc9\x49\x3a\x18\x78\xbd\x74\x5d\x8c\x78\x79\x80\x2f\x8a\x5a\x42\xf2\x17\x77\x87\xf7\xe6\x68\x78\x72\xb6\xa8\x0e\x9d\x90\xb7\x6f\x7a\x9f\xe6\x9e\x85\x67\x60\xa1\x26\x54\x4d\x9b\xfa\x0e\x8f\x43\x63\xf0\x9a\x16\x83\xd7\xfc\x46\xa2\x63\x12\x05\xae\x8a\x70\x36\x4f\xc1\xbc\x1f\xce\xa6\x7d\xa5\x6f\x30\xcf\x5b\xb7\x04\x59\xfa\xd7\x6e\x0f\x17\x64\x8a\x97\xdf\x2d\x4a\x76\x1d\x17\x77\x6c\xbe\xe8\xc2\x8c\x6f\x98\xf1\xed\xd6\x8d\x9c\x80\x47\x66\xf1\xc8\x2c\x1e\x99\xc5\x8c\x6f\x1b\xcb\xf8\xd6\x9c\x3e\xb1\x4e\xe5\x21\x44\xfd\xc8\xfd\xca\x9e\x70\x7d\xe2\x48\x4c\xa6\xb7\x28\x2d\xe3\x18\xcf\x0b\xb4\xa9\x4a\x06\xe6\x7e\xdb\x6e\xaa\x0d\xe6\x7e\xc3\xdc\x6f\x9b\x9c\xfb\xcd\xea\xdc\xc6\x37\x6e\x35\xe9\xd6\xde\x33\x3e\x19\xdc\x34\xe4\xc8\xd9\xd4\x69\xcf\xdc\xb3\x37\x68\x22\x8a\x78\xcf\xad\x60\x34\x22\x7f\x33\x04\x93\xb1\x27\xf0\x23\x58\xb4\x3c\x98\x4f\x6d\xf2\xa1\x21\xf2\x89\xba\x33\xf9\xf6\xba\xce\xe4\xb7\xe9\x38\xfe\xbe\xba\xe3\xf8\xfe\xfa\x2d\xdf\xb4\x35\xce\xe4\xb7\xdd\x2b\x36\x76\x36\x36\x73\x68\xfe\xfa\x33\xf1\x53\xf6\x30\x39\x18\x3c\x8c\xdf\xdc\x21\x7e\xf4\x96\x45\x6f\x59\xf4\x96\x45\x6f\x59\xf4\x96\x45\x6f\x59\xf4\x96\x45\x6f\x59\xf4\x96\x45\x6f\x59\xf4\x96\x45\x6f\x59\xf4\x96\xed\xb6\xb7\xec\xd6\x83\x18\xe8\xf9\x8a\x9e\xaf\xe8\xf9\xba\x85\x3c\x5f\x5f\x19\x85\x29\xe9\xf9\x5a\xd6\xe9\x8b\x0e\x35\x78\x6f\xd5\x9e\xf8\x2f\x54\x6c\xc7\x5c\x71\x0b\x0c\xa2\x2f\xe9\xec\xfa\xaf\x46\xc9\xdf\xf4\xc3\xdd\x55\x85\x78\x07\xfd\xf7\x87\xb8\xbb\x4e\xf1\x12\xdd\x4d\xe5\xb4\x57\x62\xfa\x00\xf7\x77\x0d\x96\xe3\xa6\xc2\x8a\x78\xa2\xcd\x8e\xae\xdf\x99\x80\x9b\x02\xf8\x95\xc1\xe0\xc0\x6f\x19\x96\x40\x8b\x02\x7e\x7e\x35\x47\x3d\xe8\xe2\xb3\xbf\xea\x46\xf5\xb2\x98\x45\x7c\x49\x2c\x17\x7c\x63\x3c\xf2\xbb\x44\x66\x65\xb0\xd8\xa8\x97\x44\x1d\x9c\x0f\x1b\x00\x10\xe3\x7c\x9b\xfc\x10\x44\x75\x7a\x26\xdc\x2b\x36\xb2\xdf\x0f\xba\x6e\xb1\x9b\xd6\xf5\xb9\x37\xc1\x8b\xb0\x5a\x63\x73\xed\x52\xd7\xa3\x7d\x16\xfd\x65\x5b\xf4\x97\x7d\x57\xdf\xe6\x09\xac\x6f\x15\xfe\xb4\x15\xb0\x7d\x7f\xda\xde\x11\x97\x31\xfe\xb6\xcd\x49\xd3\x46\xd2\x71\xbd\x92\x36\xfd\xd9\xdb\xa3\xc4\xe5\xc3\x9e\xef\x6d\xa4\x84\x1c\x97\xce\xb7\x9b\x27\x20\xd1\xeb\x16\xbd\x6e\xd1\xeb\x16\xbd\x6e\xd1\xeb\x16\xbd\x6e\xd1\xeb\x36\xdc\xeb\xb6\x39\xad\x62\x5d\x9b\xb0\x18\x35\x23\xf7\xd7\x7b\xa2\xb4\x8a\xa3\x31\x1e\xb8\x91\xba\x46\x56\xb8\xe0\x6e\x8a\xaa\x81\xbe\xb7\xdb\x4d\xc1\x41\xdf\x5b\xf4\xbd\xdd\x64\xdf\xdb\x5e\xc6\x7e\x71\x3e\xb3\x9b\xb0\x93\xcd\x9f\x83\x29\x32\x99\x3a\xe3\xd9\xa6\x1e\x09\xba\xe9\x46\x95\x53\xef\xa7\xdb\x76\x2f\xda\xcf\x0f\xc2\x3d\x63\x6a\x59\xf7\x5d\x66\xa9\xa1\x95\x4d\xdd\x70\x6c\xf2\xf1\x41\xf2\x91\x01\xd8\x59\x30\x2d\x7a\x6d\x35\x9b\xac\x34\xe7\x15\x7b\xce\x7d\xbe\x4d\x7e\xb1\x43\x32\x8e\xac\x45\x17\xb2\x5e\xd9\xb3\xba\xed\x9c\x37\xad\xc9\x52\xc9\x73\x92\xb5\x7b\xdf\x33\xb6\xfb\x81\x64\x9b\xf2\x8c\xbd\x18\x3f\x1d\x86\xc8\x3e\x39\x1d\x52\x29\x39\xf8\xbd\xbe\xa8\x4a\x60\x85\xce\xb0\xe8\x0c\x8b\xce\xb0\xe8\x0c\x8b\xce\xb0\xe8\x0c\x8b\xce\xb0\xe8\x0c\x8b\xce\xb0\xe8\x0c\x8b\xce\xb0\xe8\x0c\x8b\xce\xb0\x98\x9b\x0a\x5d\x55\xd1\x55\x15\x5d\x55\xbb\xe5\xaa\xfa\x5f\x87\x61\xbc\x9a\xec\x45\x1c\x86\x17\xe4\xc5\xf5\x4c\xfd\xdb\x21\xf2\x99\x00\xf4\x6b\x2a\xe9\x14\x47\x4e\x3d\x91\x74\x2a\x19\x80\x85\xfe\xf1\x79\x5e\xbf\xde\xc7\x83\xdd\x3f\x38\x1f\x9b\x67\xea\x42\x3c\x1a\x7c\x84\xa4\x24\x10\xe4\xaf\x0c\x39\x40\x9f\x4a\x21\x16\x44\x2c\x88\x58\x10\xb1\x20\x62\x41\xc4\x82\x88\x05\x11\x0b\x22\x16\x44\x2c\x88\x58\x10\xb1\x20\x62\xc1\x9e\xca\x28\xc5\x77\x70\xb7\x42\x70\x40\x84\x95\x08\x2b\x11\x56\x6e\x21\x58\xf9\x4f\x43\x70\xa2\x29\x58\xe9\x7a\x73\x1b\xce\xaa\x59\xaa\xac\xd0\x42\x49\xfd\xff\xd8\xfb\xfb\x38\x37\xae\xfb\xbe\x17\x7f\x01\xbb\x94\xc8\xaf\x68\x8b\x3a\xb2\x2c\x13\xa2\x24\x08\x94\x16\xbb\xe0\x2e\x96\x58\x3e\x2c\xb9\x7c\x5c\xec\x03\xb9\xe0\x8a\x5c\x71\x25\xea\xc1\xa2\xe8\x59\x60\x16\x04\x85\xc5\xc0\x18\xec\x32\x74\x7e\x6a\x6d\xcb\x4e\xdd\xa6\xfe\xb5\x71\x93\xb8\x69\x1d\x5f\xdb\x75\x5d\xab\x55\xec\xd8\x6e\x7a\x93\xa8\xad\xdd\x36\xb2\xd5\xaa\x55\x6d\xa7\xb5\x9b\x38\x71\xe3\x46\x49\x9a\xdc\x1b\xc7\x49\x6e\xed\x24\xb5\xaf\xee\x6b\xce\x99\x47\x60\x06\x83\xc5\x0e\xb0\x58\xe2\xf3\x0f\x5f\x4b\xcc\xcc\x99\x33\x67\xce\x7c\xcf\xf7\xbc\xbf\x4f\x85\x15\x95\xfd\xcb\x41\xf6\x9a\x8d\x5a\xbe\x27\xd4\x9c\xaf\xe2\x82\xd9\xda\x25\xde\xda\x94\xd6\x5a\x40\x7e\x8b\x49\x57\x14\xe9\x7a\x47\xe4\xf5\xdc\x40\x5e\xcf\xc7\xfd\x11\xe5\x41\x36\x56\xeb\xbd\xe8\xfa\x1e\xe0\xc9\x08\x64\x09\x64\x09\x64\x09\x64\x09\x64\x09\x64\x09\x64\x09\x64\x09\x64\x09\x64\x09\x64\x09\x64\xb9\x99\xc8\x12\xf8\x11\xf8\x11\xf8\x11\xf8\xb1\x1b\xf0\x63\x45\x36\xd9\x99\xb6\x28\x54\x14\x4d\x24\xb7\x8c\x1f\x2f\x5a\xad\x4d\x99\xad\xb5\x15\x3f\xba\xde\x11\xf8\xb1\xe3\xf8\xd1\xf5\x3d\x00\x3f\x02\x3f\x02\x3f\x02\x3f\x02\x3f\x02\x3f\x02\x3f\x02\x3f\x02\x3f\x02\x3f\x02\x3f\x02\x3f\x02\x3f\x02\x3f\x02\x3f\x02\x3f\x02\x3f\xde\xa4\xf8\xf1\x27\x9f\xa1\x83\xa2\xaa\x50\x65\x49\xca\x26\x0d\x12\xc5\x9b\xaa\x2b\x2d\x54\x5c\x55\xab\xda\xce\xa8\x28\xab\xec\xeb\x97\xd9\xbf\x1a\xa0\xdd\xda\x55\x93\xf6\x8b\xcc\x84\xc6\x43\x42\x2d\xad\xe8\x0b\xaa\x21\x71\x8d\xc0\xed\x29\xd1\xd8\x45\xa5\x28\x27\xe2\xda\xa9\x17\x6b\x5b\x32\xb2\x18\x5b\x27\x76\x39\x29\x4c\x83\x9b\x81\x9b\xa5\xc1\xcd\xc0\xcd\xc0\xcd\xc0\xcd\x7a\x86\x9b\xa5\xbb\x86\x9b\x05\xde\x93\x96\xb9\x59\x1a\xdc\x0c\xdc\x0c\xdc\x0c\xdc\x0c\xdc\xac\xf3\xdc\x2c\xdd\xd3\x10\x2a\x0d\x08\xd5\x3e\x08\x95\xee\x76\x08\x95\xde\x82\x10\x2a\x93\xa5\xb3\xc2\xc3\x6a\x92\x4e\x71\x0f\xab\xa3\x34\x4e\x87\xbc\x3c\xac\x92\x9c\x54\x99\xa5\x5a\x2c\x3c\x34\x5f\x50\xfd\x3d\xab\xee\x73\xf7\xac\xba\x95\x6d\x13\x4e\x55\x05\x7f\xa7\xaa\x59\x36\xad\x3b\x55\x79\x32\x33\xa3\x4a\x8b\xd5\xb9\xfa\xc2\x2c\xb1\x5f\xde\xd1\x88\x9f\xbd\x45\x2c\x53\x51\xc9\x81\xca\x86\xc4\xaf\x9d\x84\x65\x82\x6d\x9d\xa1\x19\x9a\xaa\xa9\xb4\x75\x80\x52\xeb\x7e\x49\x28\xb1\x85\xf2\xc9\x2d\x96\x4f\xfe\x7e\x88\xa6\x85\x9c\x38\x41\xc7\xb8\x9c\x38\x44\xad\x4c\x41\x9a\x13\xb5\x90\xd3\x74\xda\xaa\x85\xdc\x62\x53\x19\x51\xd9\x70\x8a\x26\x6d\x95\x0d\x5b\x6c\xab\x75\xd1\x55\x56\x3a\x2b\xba\x12\xcf\x8f\x34\x12\x5d\x46\x61\x4b\x5b\x95\x50\x65\xd9\x21\xc6\x0e\x88\x33\x3c\xc5\x98\x79\x61\xfb\x04\x5a\x7b\x2a\x24\x83\xfe\x82\xfe\x82\xfe\x82\xfe\xf6\x0e\xfd\x85\x3e\xe7\xa3\xcf\x75\x0f\x1e\x47\x75\xfa\x8e\x54\xa7\x87\x15\x02\x56\x08\x58\x21\x60\x85\x80\x15\xa2\xa7\xad\x10\x5f\x0b\xd3\x6b\x61\xf6\x6a\x38\xf2\x8a\xb9\xda\x7e\x2c\x3c\x6d\x0b\xb0\x2d\x17\x65\x49\x95\xcd\x4f\x7f\xa1\xa2\x94\xa5\x3c\x5f\x87\x17\x94\x62\x21\x7b\xc3\xe1\x83\x64\xbc\x6e\x2b\x42\x57\x7b\xe1\xa9\xe4\x78\x32\xba\x28\xe4\x88\x58\x24\xcb\x72\x49\x9b\xa6\xd6\x2a\x22\x47\x95\x4a\xf9\xaa\x54\x32\x7c\xa2\x2a\xab\xf2\xe8\xb2\x54\x34\xb4\xff\x98\x38\x1a\x8b\x2e\x17\x4a\x52\xb1\xf0\x2e\x43\x7c\x2f\xc9\x51\x29\xc7\x91\xbe\x32\x2a\x08\x6f\xce\x52\x2d\x45\xe3\x71\xd5\xba\x48\xe8\xdb\xc9\xe8\x4c\x81\x8b\x24\x5b\xc7\x95\x4a\xfd\x93\x59\x66\x94\xaa\x50\xf7\xb9\xf6\xa7\x54\xaf\x26\x63\xbb\x44\x7f\xa6\x8d\x07\x71\x3a\x57\x7d\xa0\x9f\xde\xdf\xcf\xde\xdb\x1f\x79\xc3\xf4\xb0\xfb\x66\xdf\xe3\xba\x1c\xd4\xa6\xe8\x55\xe5\x7a\x34\x2f\x55\x96\xa4\xbc\x03\x3f\x98\x8a\x9a\x5c\x59\x56\x2a\x2b\xda\x58\xb8\xf6\xf4\x42\xcd\xcd\xbd\x3b\xca\xd5\x1a\x43\x27\x29\x8b\xa7\x2a\x68\xfa\x42\xb6\x90\xb3\x14\x6b\xbe\x36\x8a\xe2\xe8\xc6\xe8\x6a\x8b\x93\xbe\x72\x18\xab\x5f\xd2\x36\x8c\x86\xdc\x34\x6d\x17\xa6\x5f\x9d\xf3\x66\xc9\xa8\xc0\x4d\x5c\x16\xdb\xb5\xd5\xb8\x78\x86\xb8\xb6\xe7\xe1\x7f\x39\x27\x86\x7a\x2c\x1a\x4f\x4b\xd9\x67\xf3\x15\x65\xb5\x94\xd3\xce\xe2\x0e\x6d\xfc\xa4\x9a\x81\x13\xca\x8a\xae\x01\x39\x1b\x31\x9e\x60\xc9\x6c\xe9\x58\x34\x3e\xab\x54\x64\x5b\xb3\xd1\xac\xa4\x66\xa5\x9c\xf6\xf4\xfa\xf8\x08\x17\x46\xde\x9e\x2a\xd4\xe9\xba\x06\x97\xcd\x36\x92\xb1\x3b\xca\xb5\xf3\xc6\xae\xdb\xc0\xc8\x07\x23\x5f\x8f\x1a\xf9\x32\x79\x9a\x17\x20\x7c\x86\xa6\x38\x08\x3f\x41\xc7\xe8\x68\x0b\xf0\x72\xb1\x2a\x55\x57\x55\x5f\xf2\x9c\x70\x27\xcf\x77\xb2\x3b\xc4\xd7\x6c\x49\xda\x4e\x52\xe8\xc0\xa3\x57\xd8\x37\x07\xe8\xcd\x46\x32\x0e\xe1\x94\xcd\x7e\x6d\x80\xfd\x72\x9f\x95\x5f\x63\x6f\x63\xaf\x76\x9e\xdd\x21\x71\xbf\x76\x92\xc8\x7f\xc1\x7f\x98\x55\x2a\x93\xc5\xa2\x99\x0a\x43\xed\x72\x3f\xf6\xcc\x13\x74\x42\x4c\xaf\xc3\x74\x90\x4f\xaf\x24\x0d\x53\xc2\xd3\xa0\xa1\x8d\x8d\x36\x99\xf8\xb3\x06\x62\x84\x6d\xae\xf6\x57\x6d\x66\x8b\xfa\x2a\x60\xc8\x64\x01\x8f\x7c\x64\xb2\x80\x4d\x06\x36\x19\xd8\x64\x7a\xc8\x26\x83\x4c\x16\xc8\x64\x01\x16\x0e\x16\x0e\x16\x0e\x16\xde\x15\x2c\x1c\x79\x26\x90\x67\x62\xab\xd0\x3f\xe4\x99\x68\x47\x9e\x89\x3f\xdc\x4d\x27\x44\x9e\x09\xa9\x5c\x56\x47\xd7\x52\x3e\x69\x6e\xb5\x4f\x57\xfc\xfe\xdc\xa8\x9a\x95\x8a\x32\xfb\x27\xbb\xd9\x5f\x86\xe9\x56\xed\xf2\x2b\x6b\xa9\xc8\x40\x45\x96\x72\x51\x7e\xc8\x18\x63\x6b\xc1\xd5\x73\x9c\x2e\xca\xd5\xc4\x5e\xed\xbc\xc9\x72\x59\x75\xc9\x45\xbb\x28\x57\x17\xb5\x06\x82\xf5\x2c\xcd\xbc\x9d\x4e\x0b\x7c\x76\x94\xc6\x39\x3e\x4b\xd1\x28\x8d\x78\xe2\x33\x69\xb5\xaa\x68\xcf\xa1\xe9\xaf\x6b\xa9\x24\xef\x91\x2f\x41\xbb\xd7\x9d\xa0\xdd\xc2\xfa\xf3\x72\x95\xae\x2d\xf8\x03\xb4\x11\xb6\xcf\xa5\x60\xbe\xad\x33\x3a\x55\x13\xfd\x89\x7c\x79\xbb\x35\xf6\x43\xda\x4b\x92\xb2\x72\x13\xc3\x3f\xa0\x9f\xda\xd1\x37\x90\x9e\xa2\x49\x3a\x55\x13\xac\xb0\xde\x57\x00\xc7\x36\x04\x2a\xb4\x18\xa8\xf0\x89\x50\x00\x12\x60\x46\x04\x29\x9c\xa4\xe3\x56\x90\x42\x1b\x04\x49\x73\x92\xc2\x47\x3a\xd8\x05\x49\x03\xd1\x54\x5e\xad\x52\xfa\x2f\x6e\xb3\x04\xc9\x58\x59\xaa\x68\x1b\x58\x6d\xd3\xc7\x97\x9b\x26\x24\xca\x83\x65\x6d\x71\xe9\x98\x3c\xb9\xa7\xb6\xa1\x11\x7e\xff\x7d\xda\x9f\x13\x7b\xec\x07\x57\xe4\x4a\x5e\xb6\x1f\x1d\xb0\x1f\xd5\x34\xd0\xaa\x9c\x2f\x64\x47\xea\xce\x73\xb4\xa2\xfd\x7d\x43\x3f\xaa\xf5\x32\x7d\x9e\xe6\x29\x53\x23\xcb\x26\xe8\x48\x0b\xc6\xbe\x05\x8e\xc8\x21\xd6\x7c\xc4\xda\x67\xfb\xe8\xc5\x3e\xf6\x42\x5f\xe4\x53\xa6\xcd\xe8\xf9\xbe\xde\x11\x6b\x35\x54\x41\x1b\x67\x4e\xc9\x39\xf9\xd2\x26\xa7\x65\x2b\x1b\xf4\x98\xb8\x43\x1c\x17\x0b\xa3\x8a\x54\xe4\x57\x96\x94\xd2\x88\xb8\x9a\x9f\xc1\x8d\x6a\x6a\x74\x30\xa3\x2a\xa5\x05\x61\x34\x7a\x58\xfb\x2c\xf4\xbf\x17\x8d\x8f\xc5\xfa\x71\xc8\x5b\xde\xa6\x5f\x0b\xd1\xab\x21\xf6\x4a\x28\xf2\xb2\xa9\x10\x7f\x3a\x34\xab\x68\xfb\xbb\x82\x1a\xcd\x2b\x7c\xd8\x95\x68\x6c\x59\xfb\x29\x16\x9d\x74\x3c\x05\xdf\xea\x08\x67\xdf\x55\xd5\xc2\xe3\x23\x52\x96\x3f\x38\x37\x3a\x14\x0b\x59\xdd\x8b\x47\x2e\xe6\xd4\xa8\x72\x5d\x1f\x53\x01\xe5\xcb\xb2\x52\x2e\xca\xc9\xa8\xb8\x23\xb7\xf2\x19\xef\x92\x6f\x39\x5c\x07\xc0\xbc\x7d\x6c\x1b\xef\x96\xc3\x62\xd5\x09\x0d\x32\x78\xc1\x7f\xbf\xbb\xe0\xdf\xce\x6e\xe1\x8f\x4c\x8d\x2b\x80\xf3\x06\x6f\x86\x0a\xe0\xec\xab\x43\x74\xd2\xb9\xd7\x69\x58\xd8\x43\xad\x4a\x55\x79\x79\xb5\x68\xdb\xf2\xb0\x0f\x0e\xb1\xdf\xeb\xb7\xd6\xc9\xcf\xeb\x55\x3d\xec\xa5\x3c\x4a\x06\xbd\x31\x7c\x0f\x16\xf5\x76\x16\xe5\x6a\x20\x85\x3c\x86\x2d\x5a\xcb\x89\xb4\xce\xf6\x0a\x55\x79\xc5\x12\x76\x71\x07\xf9\xb7\xb5\x9d\x4c\x08\xfe\x58\xbb\x50\xdb\x7a\xd9\xed\x6e\x10\x9b\x51\xf8\xc3\xfb\x1b\xe2\x1d\xa3\x6b\x17\xfc\x3f\xdb\x61\x96\x30\x3e\xdb\x72\x59\x35\xbe\x57\x6b\xdc\xe1\x22\x01\x17\x09\xb8\x48\xc0\x45\x02\x2e\x12\x70\x91\x80\x8b\x04\x5c\x24\xe0\x22\x01\x17\x09\xb8\x48\xc0\x45\x02\x2e\x12\x9b\xe9\x22\x71\x84\x0e\xb3\x83\xb1\x31\x13\x74\xdc\xed\x80\x23\xd6\xee\xed\x66\x40\x24\x70\xdf\x80\xfb\x06\xdc\x37\xb6\x90\xfb\xc6\x5f\x0c\xd0\x6e\x27\xd2\xcc\xc9\xe5\xa2\x72\x63\x85\xc7\x48\xfd\xe6\x00\x7b\xad\xcf\xa2\x95\x83\x8d\x63\xa4\xa6\xcd\x2b\x13\x0f\x6a\x67\x0a\x42\x68\xfd\xba\xd5\xa2\xa5\x2e\x53\x5a\x60\xc2\x63\x74\x94\x63\xc2\x03\x94\xa2\x51\x6f\x58\x5f\x2e\xab\x22\x6f\x98\xf1\xc0\x81\x84\x4c\x9d\xf7\xe7\x82\xfb\xd8\x90\x60\x81\xd6\xad\xc9\xcd\x05\xa4\x5c\x56\x81\x05\x81\x05\x81\x05\x81\x05\x81\x05\x81\x05\x81\x05\x81\x05\x81\x05\x81\x05\x81\x05\x81\x05\x81\x05\x11\x39\x05\xf4\x06\xf4\x06\xf4\xd6\x31\xf4\xf6\xd5\x38\xbd\xd5\xc8\x49\x24\xfc\x08\xb5\xef\xab\x90\x95\x55\xf6\x4b\x71\xf6\x42\xbf\x95\x9b\x48\xd5\x97\xad\x52\xae\xb0\x56\xc8\xad\x4a\x45\x87\xbf\xa0\xa9\x22\x2e\x8a\xcb\x03\xf1\x11\x4c\x26\x06\xf8\x05\x22\xd9\x91\xde\xf2\x7c\x41\xdd\x72\x10\x6f\x33\x7c\xfd\xe6\xfc\x91\xdd\x00\x7b\xb0\x36\xcb\x91\x3e\xc8\x4e\xd7\xdb\xbd\xee\x74\x70\x27\x23\xfe\x8c\x1c\x11\x02\xe9\x01\xe9\x01\xe9\x01\xe9\x01\xe9\x01\xe9\x01\xe9\x01\xe9\x01\xe9\x01\xe9\x01\xe9\x01\xe9\x01\xe9\x01\xe9\x01\xe9\x01\xe9\x01\xe9\x75\x0a\xe9\x7d\xf4\x19\x3a\xd2\x54\x32\x24\x4d\x4e\x57\x14\x4d\x4a\x6a\x9b\x06\xfe\x4a\xd9\x77\x2e\xb3\x8f\x0d\x58\xce\x76\xfb\x1b\x3b\xdb\x4d\x99\x2d\x5c\xd4\x5b\x48\x0c\x58\x4e\x77\x56\x58\x6e\xfd\x79\x5d\x4e\xec\xd2\xa0\x59\xa0\x59\x28\xb7\x0a\x9a\x05\x9a\x05\x9a\xd5\x3b\x34\xab\x8b\xaa\x89\x76\x0d\xcd\x42\x99\x4b\xd0\x2c\xd0\x2c\xd0\x2c\xd0\xac\xcd\x28\x73\xd9\xd3\xf0\x09\x75\xf8\x7a\xb9\x0e\x5f\x7a\x0b\xc2\xa7\x8c\x4c\x19\xe1\xe9\x34\x45\x93\xdc\xd3\xe9\x18\x1d\xa5\x71\xdf\x70\xc5\x7a\x40\x14\x48\xd8\xe2\x25\x7f\x1f\xa8\x03\x2c\x55\x9f\xce\xac\xbe\x3b\x0e\x87\xa8\xd8\xc7\x77\x58\x88\xec\x1e\xb1\x06\x45\x25\x37\x1a\x36\x28\x0e\x76\x90\x87\x09\x7c\x35\x47\x67\x68\xa6\x26\xad\xeb\x21\x3a\xd0\xc2\x7b\x40\x46\x57\x24\xaa\x6e\x31\x51\xf5\x0f\x43\x9e\x1f\x30\xcd\x0a\x29\x71\x8a\x4e\x70\x29\x31\x4e\xad\xcd\x4e\x3a\x27\xf2\x58\x4f\x53\xda\xca\x63\xdd\x72\x63\xbc\xec\xe9\x18\x2f\x7b\xba\x5d\xd4\x3f\xde\x40\x6b\xfe\x32\xaa\xac\xb4\x51\x46\x25\x5e\x1f\xb6\x64\x54\x5c\x2f\xb3\x6c\xab\x59\xad\x2c\xbb\xc9\xab\x31\x71\xa2\x90\x57\x53\xe6\xd9\x1d\x94\x5c\x8f\xd2\x45\x5a\xb0\x4b\xae\x44\x9a\x4e\xb7\xe0\x29\x3b\xcd\x1f\xe4\x02\xe7\xb3\x2a\x48\x2e\x48\x2e\x48\x2e\x48\x6e\x0f\x91\x5c\xa8\x6d\x3e\x6a\x5b\xf7\xa0\xee\xd7\xc3\xf4\x9d\x30\xfb\x76\x38\xf2\x2d\xf3\x55\x7d\x2e\xfc\xa8\x7d\x73\x59\x28\x45\x55\xb1\x41\x8c\x2e\xc9\xcb\xc2\x2e\x6c\x42\x18\x6b\xc9\xd2\xbf\x12\x3e\x58\x36\x05\xb1\xa4\x94\x46\x4a\x72\x5e\xe2\xaf\x44\xdf\x5f\xda\x15\x49\x81\x63\xcd\xc9\xa0\xeb\x0a\x85\x95\x15\x39\xa7\xa9\xaa\xc5\x1b\x96\xc1\xd6\x12\xed\x85\xe2\xb0\xbe\xed\xe4\x8f\x1b\xcd\x57\xa4\x2c\x9f\x27\x05\x25\x67\x2e\x3c\xd6\xe2\xc0\x6d\xe1\xc6\x7b\x59\x55\xb5\x4e\xda\x07\x4a\xd2\xae\x34\x1e\x48\xbf\xc9\xb2\xf8\xde\x8c\x26\x92\xa2\x9f\x22\x97\xbe\x4b\x1f\x63\x8c\x77\x61\x81\xf7\xc0\x6d\x3b\x0d\x8b\x02\x2c\x0a\xb0\x28\xc0\xa2\x00\x8b\x42\x4f\x5b\x14\xbe\x16\xa6\xd7\xc2\xec\xd5\x70\xe4\x15\x73\xb5\xfd\x58\x78\xda\x16\xac\x5a\x2e\xca\x92\x2a\x9b\x9f\xfe\x42\x45\x29\x4b\x79\xbe\x0e\x2f\x28\xc5\x42\xf6\x86\xc3\x9f\xc8\x78\xdd\x56\xb4\xab\xf6\xc2\x53\xc9\xf1\x64\x74\x51\xc8\x11\xb1\x48\x96\xe5\x92\x36\x4d\xad\x55\x44\x8e\x2a\x95\xf2\x55\xa9\x64\xf8\x37\x55\x56\xe5\xd1\x65\xa9\x68\x68\xff\x31\x71\x34\x16\x5d\x2e\x94\xa4\x62\xe1\x5d\x86\xf8\x5e\x92\xa3\x52\x8e\xe3\x79\x65\x54\xd0\xda\x9c\xa5\x5a\x8a\xc6\xe3\xaa\x75\x91\xd0\xb7\x93\xd1\x99\x02\x17\x49\xb6\x8e\x2b\x95\xfa\x27\xb3\x4c\x22\x55\xa1\xee\x73\xed\x4f\xa9\x5e\x4d\xc6\x76\x89\xfe\x4c\x1b\x0f\xe2\x74\x94\xfa\x40\x3f\xbd\xbf\x9f\xbd\xb7\x3f\xf2\x86\xe9\x2d\xf7\xcd\xbe\xc7\x75\x39\xa8\x4d\xd1\xab\xca\xf5\x68\x5e\xaa\x2c\x49\x79\x07\x84\x30\x15\x35\xb9\xb2\xac\x54\x56\xb4\xb1\x70\xed\xe9\x85\x9a\x9b\x7b\x77\x94\xab\x35\x86\x4e\x52\x16\x4f\x55\xd0\xf4\x85\x6c\x21\x67\x29\xd6\x7c\x6d\x14\x25\x77\x8c\xd1\xd5\x16\x27\x7d\xe5\x30\x56\xbf\xa4\x6d\x18\x0d\xb9\x69\xda\x21\x4c\x1f\x39\xe7\xcd\x92\x51\x01\x8e\xb8\x2c\xb6\x6b\xab\x71\xf1\x0c\x71\x6d\xcf\xc3\xff\x72\x4e\x0c\xf5\x58\x34\x9e\x96\xb2\xcf\xe6\x2b\xca\x6a\x29\xa7\x9d\xc5\x9d\xd3\xf8\x49\x35\x03\x27\x94\x15\x5d\x03\x72\x36\x62\x3c\xc1\x92\xd9\xd2\xb1\x68\x7c\x56\xa9\xc8\xb6\x66\xa3\x59\x49\xcd\x4a\x39\xed\xe9\xf5\xf1\x11\xee\x88\xbc\x3d\x55\xa8\xd3\x75\x0d\x2e\x9b\x6d\x24\x63\x77\x94\x6b\xe7\x8d\x5d\xb7\x81\xc1\x0e\x06\xbb\x1e\x35\xd8\x65\xf2\x82\x20\xef\xe7\x04\x39\x7c\xe1\x1c\x3b\x41\xc7\xe8\x68\x0b\xf0\x72\xb1\x2a\x55\x57\x55\x5f\xf3\x57\x9b\xd0\xf1\xb5\x84\x3b\xb2\xbe\x93\xdd\x21\xa4\x84\x25\xc1\x69\x0b\xa6\x87\x66\xbf\x15\xa7\x7b\x85\x83\xfc\x12\xcf\x78\x61\xa6\xbe\xc8\x56\x94\xd2\x35\x65\x49\x65\xbf\x1a\x67\x9f\xe9\xa7\xed\xfc\xf0\x3a\x72\x5f\x4c\x55\x94\x52\x46\x59\x0a\x28\xf7\x85\xb8\x20\xad\xfd\x73\x29\xa5\x37\x8d\xe4\x17\xcd\x25\xbf\x98\xf7\xff\x32\x86\x58\x5c\xff\x32\xf8\x6b\x36\x3e\x0d\x31\xce\xc8\x7f\x81\x88\x01\xe4\xbf\x80\x9d\x09\x76\x26\xd8\x99\x60\x67\x42\xfe\x0b\xe4\xbf\x00\xdf\x07\xdf\x07\xdf\x07\xdf\xef\x76\xbe\x8f\xfc\x17\xc8\x7f\xb1\x55\x88\x26\xf2\x5f\xb4\x23\xff\xc5\x0f\x23\x34\x2d\xf0\xde\x72\x51\xb9\xae\x27\xb9\x48\x9a\x9b\x7b\x0e\x9a\x0a\xca\xe8\x5a\x6a\x49\xae\x4a\x29\x7e\x92\x9a\xbd\x2a\xaf\x48\x46\x7d\x7c\x5e\x33\x7f\x55\x65\x5f\x88\xb0\x2f\xf5\xd1\x1e\x5b\x2b\x93\x46\x23\x57\xf4\xab\x23\xf1\x8a\x2c\xe5\xa2\xe2\x02\xe3\x05\x58\xab\xf1\x6c\x51\xb9\xbe\xc8\xdb\x4e\x8c\x6a\x27\xce\xba\x34\x75\x49\xb4\x64\x9d\x2a\xe0\x6f\xb0\x6e\xb5\xeb\x8d\xcb\xb0\x8f\x9c\xfe\xa8\x49\xab\x87\xbe\xf8\xed\x59\x7f\xfc\x76\x96\xcd\xea\xf8\xad\xd1\x5b\xd2\xa9\x9c\xed\xd6\x36\x30\xc7\xbb\x45\xd7\xee\x75\xa7\x73\xb7\xb0\xfe\xbc\x5c\xa5\xc8\xc7\x77\xf8\xbc\xc2\x44\x45\x2e\x17\xa5\xac\xdc\xcc\x5b\x1c\xd3\xcf\xdd\xb4\x17\x99\x7e\x98\xce\xd1\x5c\x4d\x64\x47\xeb\x6f\x12\x6e\x82\x88\xee\x68\x31\xba\xe3\x5f\x84\x02\x15\x29\x17\x44\x20\xc7\x59\x9a\xb5\x02\x39\xda\x2a\xa3\xbc\xc5\x46\x79\xb5\x4a\x1d\x15\x61\xe9\x2f\xee\xf4\x91\x51\x07\xca\x52\x45\xdb\x7e\x6b\x5b\x56\xbe\x58\x36\x23\xac\xf6\x97\xb5\xb5\x71\x53\x44\xd5\x3d\xb5\x0d\x8d\xf0\xbe\xec\xd3\xfe\x9c\xd8\x63\x3f\xb8\x22\x57\xf2\xb2\xfd\xe8\x80\xfd\xa8\xa6\x4c\x57\xe5\x7c\x21\x3b\x52\x77\x9e\xa3\x15\xed\xef\x1b\xfa\x51\xad\x97\xe9\xf3\x34\x4f\x99\x1a\x31\x39\x41\x47\x5a\xb0\x3a\x2d\x70\xda\x0f\x39\xe9\x23\x27\x3f\xdb\x47\x2f\xf6\xb1\x17\xfa\x22\x9f\x32\xcd\x5f\xcf\xf7\xf5\x8e\x9c\xac\x01\x24\xda\x38\x73\xe0\xcf\x21\x9e\x36\x39\x2d\xb3\xdf\xa0\xc7\xc4\x1d\xe2\xe4\x5b\xd8\x87\xa4\x22\xbf\xb2\xa4\x94\x46\xc4\xd5\xfc\x0c\x6e\x1f\x54\xa3\x83\x19\x55\x29\x2d\x08\xfb\xd7\xc3\xda\x67\xa1\xff\xbd\x68\x7c\x2c\xd6\x8f\x43\xde\x02\x3c\xfd\x5a\x88\x5e\x0d\xb1\x57\x42\x91\x97\x4d\xdd\xfe\xd3\xa1\x59\x45\xdb\xaa\x16\xd4\x68\x5e\xe1\xc3\xae\x44\x63\xcb\xda\x4f\xb1\xe8\xa4\xe3\x29\xf8\xae\x4d\xf8\x62\xaf\xaa\x16\xe9\x1f\x91\xb2\xfc\xc1\xb9\xfd\xa4\x58\xc8\xea\x4e\x56\x72\x31\xa7\x46\x95\xeb\xfa\x98\x0a\xfb\x42\x59\x56\xca\x45\x39\x19\x15\x77\xe4\x06\x4b\xe3\x5d\xf2\xdd\x93\xeb\x00\x98\xb7\x8f\x6d\xe3\xdd\xda\x48\xec\xf1\x06\xd7\x8f\xfb\xdd\xd7\x8f\xed\xec\x16\xde\xd5\xce\xae\x20\x99\x71\x3a\xc4\x0e\xc4\x52\xa6\x1b\xc6\x5b\xed\xf5\xbd\xad\x2b\xeb\xcb\x7b\x07\xee\x5d\xf1\x9e\x21\x23\xfd\xa0\x5a\x55\x2a\x52\x5e\xae\xdd\x70\xe9\x65\x46\xc4\xc1\x6c\x51\x52\x55\x23\x2f\xe1\x73\xec\xd7\x06\xd9\x8f\xfa\xe9\x76\xfd\xa0\xb9\xfc\x7d\x21\xa4\x6f\x80\x6d\x5e\x17\x25\x03\x29\x19\xc9\x08\x17\xc5\x45\x53\x5a\x8b\x81\xb8\x60\x0c\x5b\x0c\x99\x73\x72\x9d\x38\x16\xaa\xf2\x8a\x25\xb7\xe2\x0e\x7b\x84\xad\xed\x64\xe2\x7e\x7e\x3b\xbd\x5b\xfa\x92\x6b\xef\x24\x1c\x37\xea\xbf\xaa\x2b\xfe\x1f\xcd\x71\x36\xa1\x7f\x34\xce\x09\x66\x54\x30\xb1\x8d\xb0\xcb\x6e\xd1\xfb\xb3\xe5\x4f\x0e\x3f\x0e\xf8\x71\xc0\x8f\x03\x7e\x1c\xf0\xe3\x80\x1f\x07\xfc\x38\xe0\xc7\x01\x3f\x0e\xf8\x71\xc0\x8f\x03\x7e\x1c\xf0\xe3\xe8\xb4\x1f\xc7\x51\x1a\x67\x87\x62\x07\x4c\xa2\xf1\x36\x3b\xd1\xb0\x6f\xf2\xda\xcf\x34\xe0\x02\x02\x17\x10\xb8\x80\x6c\x21\x17\x90\x2f\xc5\x89\x39\xab\x1a\x97\x95\x9c\xca\x5e\x88\xb3\x9f\xb7\x55\x34\x56\x9a\x8b\xea\x5a\x50\x72\x01\x45\x74\xc5\x6c\xd5\x8c\x17\x94\x1c\x82\xb9\x9a\x63\x82\x33\xfe\x4c\x30\xc6\xa2\xb5\x95\x8c\x17\x94\x1c\xa2\xb8\x40\xff\x40\xff\x40\xff\x40\xff\x40\xff\x40\xff\x40\xff\x40\xff\x40\xff\x40\xff\x40\xff\x40\xff\xba\x9e\xfe\x01\xe1\x01\xe1\x01\xe1\xf5\x30\xc2\xfb\x85\x9d\x94\x32\x10\x9e\x47\xfd\xe2\xb2\x92\x33\x43\xb6\xa4\x6a\x55\xca\x5e\x65\x3f\xb8\x8d\xfd\x6c\xd8\x22\x7c\x0f\x65\x95\x52\x49\x13\x58\x67\x66\x1e\xb5\x88\x8d\x26\xaa\xf9\xe9\x3a\xdd\x4b\xec\xd5\x4f\x13\x64\xee\x8c\x5c\xb5\xea\x1d\x2c\x28\xb9\x49\x7e\xee\x58\x5f\x62\x34\x21\x18\xd8\x38\xed\x11\x0c\xec\x2e\xba\x93\x33\xb0\x37\xd1\x6d\x9f\x0f\x6d\x27\x7d\x4a\xf8\xf2\xac\x07\xdc\x41\x14\xb1\xed\x7a\x3f\xe8\xda\x45\x7f\xe4\x35\xca\x46\x5c\x90\x97\xe8\xac\x5e\xf6\xc0\x59\xb4\xe6\xef\xdb\x06\x66\xc0\x18\x98\x85\x0b\x8b\x8d\x46\xe6\x41\xc7\xc8\x2c\x28\xaa\xef\xd0\x78\x56\xfc\x68\x38\x66\x9b\x3b\x34\x99\x8f\x84\xe8\xe7\x42\xec\x67\x43\x91\xbf\x67\x7e\x20\x55\x83\xf7\x48\x85\x92\xac\x7d\x6a\xfa\x9e\x95\x93\x08\x39\xbb\x5a\x35\x40\xcc\xca\x8a\x54\xaa\xc9\x66\x6d\xf7\x32\xd7\xae\xe5\x5f\x74\x85\x43\x0e\x7e\x48\x29\xd5\x34\xcd\x95\x57\x25\x97\x8c\xed\x30\x7f\xb6\x0b\x98\x13\x74\x8c\x1d\x8d\x8d\x9b\xa6\xb0\x3d\x76\x53\x58\xed\x93\xd5\x9b\xc3\xda\x9f\xa2\xed\xf9\x10\xbd\x3b\xc4\xfe\x5a\xe4\xff\x67\x8c\xde\x95\xc5\x6a\x4e\xae\x88\x47\xaf\xac\xca\xb5\xb1\x11\xaa\x7e\x94\x8f\xd6\x92\x26\xec\x45\x90\x83\x6c\xa5\x0e\xd7\xe7\xa1\x10\x96\xf6\xc1\xe5\xba\x69\xec\x16\xd1\x84\x03\xdb\xbd\x27\x44\x7f\x9d\x3d\x17\xf9\x71\x7b\x27\x0a\x25\xa3\x0f\xc3\xe6\x5d\x84\xc0\x55\xab\x52\x29\x27\x55\x34\xd5\x4f\xd3\x22\x2c\x61\xa6\xbf\x0c\xbd\x27\x05\xd5\xa5\x0f\x3c\x51\x6b\x32\xb6\x4d\xd5\x6e\xe0\xe8\x83\xeb\x48\x68\x0b\x81\xf7\x48\x28\x42\x87\xd9\xc8\x48\x28\xab\x55\x47\x2f\xbe\x17\xa2\x3f\x0e\xb1\x3f\x0a\x45\xfe\xc0\xa6\xec\x3c\xfa\xe8\x93\x5e\xbd\x90\xa2\x9a\xfa\x64\x26\x98\x2d\x16\x15\x91\xca\xd6\xb5\x0b\x86\x12\x5c\x96\x54\x95\x2f\xef\x15\x65\x35\x7f\xd5\xdc\xaf\x88\x49\x5d\x59\x2d\x71\x65\x50\x15\x6b\x8a\xd6\x7c\x41\xb5\xb5\xac\xef\x63\xaf\x2b\x95\x67\xe5\x4a\xb4\xa4\xe4\x64\x03\x63\xd4\xb5\xe1\x3a\xf4\x7d\x9a\x7a\x67\x5f\x2f\xde\xd7\x47\x7b\xf5\xaa\xf7\xb9\x95\x02\xe7\xcd\x15\x39\x5f\xe0\x01\x44\xda\x1a\xa3\x3b\xa1\xb3\xdf\x0a\xb3\xdf\x08\xd3\x5d\xae\x27\x45\x22\x79\x9e\x05\x76\x59\xa9\xac\x98\x8b\xb1\x24\x2c\x0b\x89\x68\x5e\xae\x4e\xba\x5d\x35\xb9\x30\x77\x46\x3b\x23\xb8\xe8\xa9\xba\x86\x26\xea\x1a\x9a\x68\xa6\xa1\x4c\x81\xce\x0b\xa9\x7b\x86\x66\xb8\xd4\x3d\x45\x27\xe8\x58\x0b\xd6\x1a\xe3\x19\xfd\xd6\x36\xf6\x85\x21\x1a\xd7\x5f\x43\xb9\xac\x36\x58\xbc\x73\x72\xb9\xa8\xdc\xd0\x76\xcc\xa6\xf3\xff\x0f\x06\xd9\x1f\x84\xad\xa2\x45\xf7\xf2\xd0\x6a\x27\x35\x9e\x36\xaf\x4a\xdc\xa7\x1d\xae\x2d\xac\x66\x1d\x0f\x38\x7e\xfa\x49\xef\x85\xed\xa4\x18\xe2\x71\x3a\xc4\x87\x78\x94\x46\x68\x9f\x6f\xbd\x28\xab\xa3\xfe\xf1\x88\x79\xb9\x4a\xd7\xce\xfb\x2f\x7b\xfb\xd8\x50\x7d\xae\x4f\xeb\x46\x8e\x25\x2f\xf2\x8b\xdb\xad\x91\x8e\x1a\x11\xd0\x9e\x83\xfd\x80\x7e\x46\xa7\xc6\x3b\xcd\xcd\x8a\x35\xf1\x7b\xeb\x1b\x57\x84\xec\x21\xb4\xb9\xc5\xd0\xe6\x8f\x86\x36\xfa\x51\xd3\x94\x08\x67\x3e\x4e\x13\x56\x38\xf3\xba\x1b\xf1\xdb\x48\x04\x2c\x13\x7c\x42\xa2\x13\xef\xdd\x65\x89\x8c\x3b\xf5\xfc\xe3\x92\x5d\x4a\x44\xed\xd5\xe3\x3a\x20\x24\xda\x53\x2b\x0e\x72\xc3\x47\x6e\xa0\x5e\x51\x47\xea\x15\xa1\x50\x05\x0a\x55\xa0\x50\x45\xbb\x0a\x55\x64\x5e\x0a\x05\x9c\xad\xff\xa2\xa8\x1e\x7b\x8e\xe6\x6c\xd5\x63\xdb\x5c\x01\x20\xea\xbe\x62\xef\x60\xb7\x8a\x61\x6a\x76\xdf\x60\x57\x03\x1a\xea\x0b\xe9\xdf\xbf\xcd\x52\x02\x86\xea\xb2\x92\x78\x6e\x20\xee\xe7\x41\xf1\x9d\xd0\x0c\x90\x7a\xa4\xd7\xf4\x11\xa4\x1e\x41\xea\x91\x4d\x4e\x3d\xf2\xe4\x86\x37\x8c\x1b\x4e\x37\x12\xf0\x66\xb0\x71\x46\x11\xeb\xa2\xcd\x30\x37\x04\x9e\xb3\xe4\x8d\x30\xdd\xaf\x53\xcb\x55\xad\x85\xaa\xfe\x8d\x58\xa9\x4b\x46\xd9\xb7\xc2\xec\xbf\x85\xe9\x0e\xe7\x09\xda\x2a\x78\x77\x5e\xae\x3a\x5d\x7b\x84\xd1\x34\x71\x7f\x5e\xae\x4e\x3a\x4e\xbf\x94\x9a\x5c\x98\x33\xbc\x49\x03\x4c\xb7\x15\x18\x30\x2e\xd3\x63\x62\x1e\x6b\x8b\x98\x36\x8f\x67\x69\x9a\xd2\xad\x01\x63\xe3\x39\xe7\x0b\xaa\xef\xf4\x66\xff\x62\x88\x46\xc5\x1b\x28\x29\xb9\xba\x94\x31\xba\x25\xa0\x26\x59\xcc\x7b\x87\xd8\xbb\xfb\x68\xa7\x76\x81\x99\x29\xe6\x7e\x17\x68\x7c\x51\x5c\xcd\x83\xc5\x12\xf7\x68\x27\x9c\x57\x72\x46\x42\x16\xfb\xc1\x80\x99\xf1\x3b\x68\x46\x0c\xe6\x49\x3a\xce\x07\xf3\x30\x1d\xa4\x31\x4f\xa1\xc0\x1f\xdc\x48\x44\x64\xef\xd6\x06\x52\xd9\x71\x74\xfc\xb4\xbf\x64\x38\xca\xc6\x75\xc9\x60\x1b\x7e\x5d\x40\x38\xba\x52\x97\x50\x25\xf2\xcd\xed\x35\xaf\x20\xe6\x4e\x93\x1d\x6f\xe1\x3e\xfd\x9c\x8e\xbc\x88\xf4\x59\x9a\xa5\xe9\x1a\x8d\xac\xa5\x37\x01\x5d\x0c\x4c\xb9\x45\xa6\xfc\x62\x28\x28\x69\x90\x11\x68\x79\x8a\x26\x2d\xb4\xdc\x79\xc9\xc2\x93\x64\x36\x27\x59\xea\x64\x46\x73\xa2\x26\xf1\xd3\xbb\x6a\x24\xcb\x5d\x26\x74\x76\x08\x93\x7b\xc5\xcf\x9d\x91\x25\x60\xce\x60\xce\x60\xce\x60\xce\x60\xce\x60\xce\x60\xce\xee\xcc\xb9\x39\xb5\xa0\x99\xdd\x85\x8b\xa6\x90\x7e\xe3\xb6\x1a\xb5\x60\x9f\x0f\x86\x76\x28\x0b\x7b\x38\x2e\x69\xbb\xae\x00\x0a\xdd\x6b\x1a\x0a\x28\x34\x28\xf4\x26\x53\xe8\x8e\x01\xa7\xb6\xee\xfb\x7c\x51\x77\xe3\xd4\x50\xf6\xb6\x3a\x90\xee\xfa\x83\x83\x94\x14\xe0\x52\x28\x0c\x35\x69\xae\xcb\x4a\x4e\x95\xb3\xab\x95\x42\xf5\x06\x3f\x5e\x90\x55\xf6\x9b\x71\xf6\x5a\x3f\xbd\x59\x9c\x6f\xae\x62\xcf\x35\x9d\x8d\x66\x51\x6f\x70\x41\x57\xaa\x02\xac\x36\x2e\x9a\xd4\x57\xc6\xba\x5b\xcd\x17\xd4\xe0\x0c\xb5\x6d\x4b\x50\xe3\xe9\xa8\x1b\x78\xe6\x1a\xe3\x73\x68\x26\xb3\xcc\x06\xbf\x19\x31\x59\xac\x60\x16\xe7\x8b\x41\xde\x1a\xe4\xad\x41\xde\x1a\xe4\xad\x41\xde\x1a\xe4\xad\x41\xde\x1a\xe4\xad\x41\xde\x1a\xe4\xad\x41\xde\x1a\xe4\xad\x41\xde\x1a\xe4\xad\x41\xde\x1a\xe4\xad\x41\xde\x9a\x8e\xe5\xad\x79\x6f\x88\x6e\x1f\x2d\x2a\x79\x75\xf4\xc7\x8b\x4a\xbe\x2c\x55\xaf\x3e\xc7\x14\x76\x84\xfa\xb5\xdf\x12\x6f\x2e\x2a\xf9\xd9\x42\x51\x3e\x2b\x95\x72\x45\xb9\x92\xd9\xed\x87\x5a\x33\x07\x28\xc5\x46\x63\x23\x26\xeb\xbc\x5d\xfb\xd7\x78\xe4\xa2\x92\x8f\xdd\xaa\xdf\xc6\x46\x39\xd9\xf7\x06\xe8\x4e\x23\x79\x0e\x7f\x57\x15\x3e\x52\xec\xbf\x0d\xb0\xff\xd8\x67\x65\x81\x19\x14\x6a\x68\x45\x5f\x40\x0d\x09\x6b\x14\xd4\x9b\xd7\xae\xbc\xa8\x5d\x99\x78\x50\x3b\x53\x24\x81\xb1\x7e\xdd\x6a\xb9\xab\x2f\x53\x5a\x10\xc0\x63\x74\x94\x13\xc0\x03\x94\xa2\x51\x4f\x3c\xae\x8d\x52\x72\x2d\x95\xb4\x1e\xb8\x19\x4f\xd6\x6b\xf7\xb9\xc3\xc0\x5b\xd9\x36\xc1\x01\xe7\xfd\x39\xe0\x10\x8b\xd7\xe6\xaf\xb1\x3a\xe1\xf4\xd2\x06\xf7\x03\xf7\x03\xf7\x03\xf7\x03\xf7\x03\xf7\x03\xf7\x03\xf7\x03\xf7\x03\xf7\x03\xf7\x03\xf7\x03\xf7\x03\xf7\x03\xf7\x03\xf7\x03\xf7\xeb\x10\xf7\xfb\xc3\x3d\xf4\xb8\x19\x42\xae\xa8\x59\xa9\x58\x28\xe5\x47\xd7\xc6\x96\xe4\xaa\x34\xe6\x95\x04\x93\xa3\x23\xa5\x54\x95\x8a\x65\x25\x67\x5c\x27\x57\xcc\xac\xd6\x2a\xf7\xc8\x67\x1f\xdc\xc3\xbe\xd5\x47\x77\xda\x1a\xbe\xa2\x37\x1c\x39\xc8\xe3\x9d\xc5\x79\xc6\xab\xb0\xd6\xe5\xb3\x66\xfb\x0b\x4a\x6e\xd2\x6c\x3f\x71\x82\xe7\xce\xb4\x9a\xbb\x24\x5a\xb3\x12\xb3\x78\x5c\x28\x22\x04\x02\x0e\x93\xae\x78\x7b\xec\x3d\x2e\x78\xdd\x02\x9d\xe7\xbc\x8e\xc7\xf1\x7a\x27\x55\xb0\x9e\x27\xa9\x0f\x4f\xd2\xe3\x39\x9a\x0c\x9c\xce\xfb\xa3\xba\x69\x96\x36\x52\x2a\x58\xb7\xd7\x99\x9d\xc7\xcd\x6d\x00\x4f\xf4\x92\x22\xdf\xd8\xe1\xfe\x76\xc7\x8d\x50\xea\xf5\xbe\xe0\xd3\x46\xbe\xce\xee\x78\xc7\xe9\xa7\xe8\x09\xba\x54\x13\x8d\x10\xd0\xbb\x44\x64\x02\x62\xb2\x5b\x8c\xc9\xfe\x6a\xa8\x6d\x12\x86\x9e\x16\x51\xda\x8f\xd1\xa2\x15\xa5\x1d\x5c\xeb\x1b\x8c\xdb\x6e\x4e\xb0\xad\x53\x8a\xb9\xc8\xc1\xf4\x6f\xee\x74\x17\x6c\xa7\xeb\x42\xb6\xd6\x2b\xe1\x4e\x8a\x84\x62\x9b\x2f\xdf\x10\xe9\xd5\x6b\xf2\x14\x91\x5e\x88\xf4\xda\xe4\x48\xaf\x4a\xfb\x16\xae\x0d\x27\x22\xeb\x94\xd6\x9c\x99\xa6\x34\x3b\x1d\x3b\x69\xba\x46\xec\xb5\x87\x81\x79\x34\x73\x53\xa4\x2b\xfb\xf0\x20\xed\xd7\x93\x65\xc9\xd5\xeb\x4a\x45\x13\x1b\xb6\x54\x65\x22\xf2\x4c\x3f\x64\x46\x9d\xfd\x8f\x38\xfb\x46\x3f\xbd\xc9\xba\xe2\xca\x5a\x2a\x72\xa3\xb9\xa0\xb3\xf3\xe2\xa2\x40\x03\xce\xc6\xf8\x05\xe7\xcd\xee\x5c\x4a\x39\xee\x82\xe2\xe8\xcd\x15\x47\x6f\x2a\xf6\xec\x19\xff\x4f\xf2\x18\x3b\x6a\x84\x67\xd6\xce\x29\xfd\xc3\x74\xbc\x1e\x78\xa1\xc0\x0b\x05\x5e\x28\xf0\x42\x81\x17\x0a\xbc\x50\xe0\x85\x02\x2f\x14\x78\xa1\xc0\x0b\x05\x5e\x28\xf0\x42\x81\x17\x0a\xbc\x50\xe0\x85\x02\x2f\x14\x78\xa1\x6c\x8a\x17\xca\x67\xe3\xf4\x60\x4d\x36\xaa\xd1\xb2\x92\xcb\x15\xd4\xca\x2a\xdf\x7c\x2f\xad\xe6\xf2\x72\x55\x65\x3f\x11\x67\x7f\xd5\x47\x3b\xcc\x1c\x54\x91\x54\xe3\x58\xb0\x05\x25\x37\x6d\x36\x92\xe6\x8d\x24\x46\xb5\x4b\x8c\x8c\x51\x2e\x27\x6c\x35\x7c\x77\x95\x1e\x16\xf8\x6e\x96\xa6\x39\xbe\x3b\x49\xc7\x69\xc2\x13\xaa\xeb\x39\x4d\xd7\x52\x49\x97\x67\x0f\x24\x54\xec\x49\x7f\x6c\x77\x98\x1d\x74\xa9\xdd\x54\x9b\x2d\xaa\xb6\x77\x20\x76\x20\x76\x20\x76\x20\x76\x20\x76\x20\x76\x20\x76\x20\x76\x20\x76\x20\x76\x20\x76\x20\x76\x20\x76\x20\x76\x20\x76\x20\x76\x20\x76\x1d\x23\x76\xbf\x19\xa7\x83\x82\xd8\x55\x96\xa4\x6c\xd2\x80\x44\x35\xe5\x47\xf5\x32\x98\x4a\x51\x5e\x2a\x94\x72\x85\x52\x5e\x65\x1f\x8f\xb3\x9f\xe9\xa7\xdd\xda\x55\x93\xf6\x8b\xcc\x84\xf2\x43\x8d\x89\xde\x45\xa5\x28\xa7\x45\x63\x89\x71\xed\xd4\x8b\xb5\x2d\x19\x05\x52\xac\x13\xb7\x1a\xd1\xcb\xd2\x59\x41\xf4\x26\xe9\x14\x27\x7a\x47\x69\x9c\x0e\x79\x12\x3d\xfe\x06\xcc\x82\x08\xd6\x63\x07\x02\xf3\x0a\xfe\x30\x6f\x96\x4d\xeb\x00\xcf\x73\x2e\x18\x05\x13\xac\xce\xd5\x17\xcd\x01\xdc\x03\xdc\x03\xdc\x03\xdc\x03\xdc\x03\xdc\x03\xdc\x03\xdc\x03\xdc\x03\xdc\x03\xdc\x03\xdc\x03\xdc\x03\xdc\x03\xdc\x03\xdc\x03\xdc\xeb\x14\xdc\xfb\x44\x82\x1e\x13\x70\x4f\xad\x2a\x15\x29\x2f\xd7\x12\x3d\x3d\x54\xd7\x3d\x3b\x54\x56\x93\xc1\xfc\xb2\xac\x54\x96\xb2\x85\x6a\xc1\x38\xe1\x39\xf6\xca\x10\xfb\xb9\x6d\x74\xbb\x7e\xdc\x64\x7e\xbf\x12\xd2\x5f\x8f\x2d\x8a\xb7\x64\x08\x3c\x83\xfc\x4d\x2d\xce\x2d\x8a\xeb\xa6\x44\xbb\xc1\x84\xf4\x0e\x5b\x7a\x0e\xd7\xe5\xf4\x55\x51\x53\x95\x2c\x05\x2e\xee\xd0\x99\x6d\x6d\x27\x13\x62\x30\xf4\x9e\xe9\xec\xd1\xca\xe6\x51\xdf\xe9\x6e\x87\x8f\x9b\x11\x0d\xec\x1d\x9c\x7f\x5d\x04\xe7\xcb\xfe\x14\x32\xcd\x4e\x7b\x55\xa1\x74\xce\x62\x9d\x45\xd6\xbf\x19\x10\x48\x10\x48\x10\x48\x10\x48\x10\x48\x10\x48\x10\x48\x10\x48\x10\x48\x10\x48\x10\x48\x10\x48\x10\xc8\x4e\x13\xc8\x53\x74\x82\x1d\x8b\x1d\x35\x93\x9b\xdd\x67\x4f\x91\x56\xbf\x71\xbb\x19\xb2\xa3\x01\x92\x02\x92\x02\x92\x6e\x21\x48\xfa\xe1\xdb\x68\xdc\xcc\x9c\xef\xed\xfc\xa8\xae\x72\xc9\x23\x65\xb3\xb2\xaa\x6a\x1b\x1b\xf9\xba\xca\xbe\x45\xb1\xff\xb4\x8d\xee\x92\x5c\x1d\x20\xf7\x88\xe5\x2d\x2a\x45\x17\xc5\xb5\x93\xfc\xda\x8b\xfc\xda\xc4\x88\x38\xea\xe6\xf1\xe8\x72\x7a\xc0\xf9\xd0\x9f\xa0\x4b\xf4\x68\x4d\xce\xde\x69\x4a\x37\x4a\xe0\x69\x1b\x19\xc3\x45\xd1\xa5\x9f\x99\x17\xc2\xf4\x98\xc0\x8e\xe7\x69\x9e\x63\xc7\x59\x0a\xa4\x65\x7a\x4a\x64\xb4\x5e\xa4\x47\xac\x8c\xd6\x41\xb5\xfd\x76\xad\xed\x31\xf6\x28\x5d\xa4\xed\xda\x81\x72\x90\x8d\xb7\xee\xb9\x59\x56\xd4\x2a\x5d\x2b\xfa\x33\xd3\x39\x76\xc6\x4a\x68\xea\xe5\xb4\xe9\xd6\xb7\x7a\xe7\x4d\xa4\x5f\x6e\x9c\x7e\x39\x83\x74\xf6\x1e\xe9\xec\x03\xcf\x33\xfb\x77\x6e\xa3\xc3\x0d\x25\xf3\xa8\x2a\x17\x97\x5d\x05\xf3\x7f\xa1\xd8\x97\xb7\xd1\xae\x5a\xc1\x1c\x89\x5a\x32\x59\x2e\x2e\xbb\xc9\xe5\x61\x57\xb9\xec\x71\x76\xc0\x62\xf9\x71\x7a\x8c\x16\x6b\xc4\xf2\x14\x4d\x36\x2d\x83\x92\x1e\xdd\xcc\x7c\x2a\xec\x5d\xe4\xe4\x51\x21\xae\x1f\xa6\x73\x5c\x5c\xcf\x50\x10\x77\xa4\x27\x85\xb4\xbe\x48\x0b\x96\xb4\x0e\xa8\xe9\xa7\x84\xb0\xd6\x16\x02\x4b\x58\x07\xd3\xb6\x21\x92\x9f\xf5\x17\xb9\x67\xd9\xac\x2e\x56\x3d\xba\xe9\x92\x17\xc3\x55\x38\xfb\xc9\x7f\x48\x64\x48\xe4\x6e\x91\xc8\xff\x3d\x4c\xac\xbe\xca\x14\x7b\x25\xcc\x5e\x0e\xd3\x6d\xb6\xdf\x22\x91\xbc\xac\x4d\x94\x65\xa5\xb2\x62\xee\x36\xa4\x28\xff\x0e\x12\x6f\xcd\xcb\x55\x5b\xe5\x8c\xc9\x85\xb9\x33\xda\xef\xc1\x09\xd3\xba\x86\x26\xea\x1a\x9a\x68\xa6\xa1\x4c\x81\xce\x0b\xe9\x78\x86\x66\xb8\x74\x3c\x45\x27\xe8\x58\x0b\x36\x74\xe3\x19\xfd\x94\x41\xf6\xff\x86\x29\xe2\x55\xc7\x2b\x35\xca\xfe\x7b\x98\xfd\x56\xd8\xad\xa6\x49\x2a\x72\xb7\x36\xe2\x0e\x36\x29\xb6\x54\x89\x07\x9c\xc3\x2d\x0a\x95\xa4\x26\x17\xe6\x0c\x83\x58\x80\xd5\x48\x02\x1b\xf9\xf2\xba\xb6\x11\x8d\x47\xde\x78\xce\x66\xe2\xa8\xd8\x07\x2e\x53\x52\xbc\x80\x25\xee\x1c\xb3\x96\xf2\xf4\x8f\xa9\x28\xa5\x6b\xca\x92\xca\xfe\xeb\xd3\xec\x2f\x1e\xa2\xed\xfc\x7c\x4d\xc9\x78\xa8\x71\xe4\xdb\x54\x45\x29\x65\x94\xa5\xc4\x1e\xed\xb4\xb4\x76\xca\x25\xbb\xa3\x89\x38\xda\xe5\xde\x25\x69\x78\x58\xc0\xc3\x22\x0d\x0f\x0b\x78\x58\xc0\xc3\x02\x1e\x16\x3d\xe3\x61\x91\xee\x1a\x0f\x8b\xc0\x7b\xd2\xb2\x87\x45\x1a\x1e\x16\xf0\xb0\x80\x87\x05\x3c\x2c\xe0\x61\xd1\x79\x0f\x8b\x74\x4f\x7b\x1b\xa4\xe1\x6d\xd0\x3e\x6f\x83\x74\xb7\x7b\x1b\xa4\xb7\xa0\xb7\x41\xe6\xed\x74\x5a\x70\xad\xa3\x34\xce\xb9\x56\x8a\x46\x69\xc4\xd3\x70\xc1\x99\x52\x72\x2d\x95\xd4\xb1\x50\x53\xa9\x80\xe6\xfd\xad\x17\x43\x2c\xae\xdb\x24\xf8\x1d\x8c\x50\x1a\x71\x13\x87\xe9\xc2\x2f\xb1\x50\xec\x5b\xdb\x6d\xe8\x6b\x97\x69\x5f\x33\x28\xd7\x7d\xe2\x97\xb6\x73\x2e\x81\xa5\x38\x9f\xad\xb1\xa0\xed\xa3\xa1\xa6\xc7\x17\xd5\x87\x51\xcd\xbd\x45\x63\xcb\xb7\x43\x74\x5c\x7c\xda\x87\xe8\x00\xff\xb4\x47\x68\x3d\x53\x8f\x26\x85\xc9\x74\x82\x8e\x58\x26\xd3\x75\x36\x91\x16\xa6\xd1\x63\x74\xd4\x66\x1a\x5d\x67\x1b\xdd\x22\x5d\xb8\xf1\x33\xf1\xe5\x61\x9b\x74\xd9\x23\x14\xd7\x68\x56\x29\x6a\xdb\x50\x7d\x59\x31\x24\xcd\xa0\x38\xaa\x4b\x9a\x29\xf3\x9c\x76\xcb\x9c\x47\xe9\x22\x2d\xd8\x65\x4e\x22\x4d\xa7\x5b\xb0\x54\x4c\xf3\xee\x5f\xe0\xc4\x54\x05\x5b\x05\x5b\x05\x5b\x05\x5b\xed\x21\xb6\x0a\xd5\xcb\x47\xf5\xea\x1e\xf8\xfc\x7a\x98\xbe\x13\x66\xdf\x0e\x47\xbe\x65\xbe\xaa\xcf\x85\x1f\xb5\x6f\xf7\x0a\xa5\xa8\x2a\xb6\x6c\xd1\x25\x79\x59\x58\x6a\x4d\x2c\x62\x2d\x59\xfa\x57\xc2\x07\xcb\xa6\xe4\x95\x94\xd2\x48\x49\xce\x4b\xfc\x95\xe8\x3b\x3e\xbb\x32\x28\x00\xa9\x39\x19\x74\xb5\xa0\xb0\xb2\x22\xe7\x34\x75\xb3\x78\xc3\x32\xa1\x5a\xa2\xbd\x50\x1c\xd6\x37\x82\xfc\x71\xa3\xf9\x8a\x94\xe5\xf3\xa4\xa0\xe4\xcc\x85\xc7\x5a\x1c\xb8\x75\xda\x78\x2f\xab\xaa\xd6\x49\xfb\x40\x49\xda\x95\xc6\x03\xe9\x37\x59\x16\xdf\x9b\xd1\x44\x52\xf4\x73\x45\x96\x4a\xae\x7d\x8c\x31\xde\x85\x05\xde\x03\xb7\x0d\x2e\x18\x3f\x18\x3f\x18\x3f\x18\x3f\x18\x7f\x4f\x33\xfe\xaf\x85\xe9\xb5\x30\x7b\x35\x1c\x79\xc5\x5c\x6d\x3f\x16\x9e\xb6\x65\x4c\x2a\x17\x65\x49\x95\xcd\x4f\x7f\xa1\xa2\x94\xa5\x3c\x5f\x87\x45\x79\x44\x87\x87\x8f\xf1\xba\xad\x94\x4b\xda\x0b\x4f\x25\xc7\x93\xd1\x45\x21\x47\xc4\x22\x59\x96\x4b\xda\x34\xb5\x56\x11\x39\xaa\x54\xca\x57\xa5\x92\xe1\x71\x54\x59\x95\x47\x97\xa5\xa2\xa1\xfd\xc7\xc4\xd1\x58\x74\xb9\x50\x92\x8a\x85\x77\x19\xe2\x7b\x49\x8e\x4a\x39\x0e\xcc\x95\x51\xc1\x4f\x73\x96\x6a\x29\x1a\x8f\xab\xd6\x45\x42\xdf\x4e\x46\x67\x0a\x5c\x24\xd9\x3a\xae\x54\xea\x9f\xcc\x32\x52\x54\x85\xba\xcf\xb5\x3f\xa5\x7a\x35\x19\xdb\x25\xfa\x33\x6d\x3c\x88\xd3\x75\xe9\x03\xfd\xf4\xfe\x7e\xf6\xde\xfe\xc8\x1b\xa6\xff\xda\x37\xfb\x1e\xd7\xe5\xa0\x36\x45\xaf\x2a\xd7\xa3\x79\xa9\xb2\x24\xe5\x1d\xbc\xc1\x54\xd4\xe4\xca\xb2\x52\x59\xd1\xc6\xc2\xb5\xa7\x17\x6a\x6e\xee\xdd\x51\xae\xd6\x18\x3a\x89\xa8\x9c\xa8\xcd\x88\x9c\x9c\x2d\xe4\x2c\xc5\x9a\xaf\x8d\x7c\x9b\x63\x8e\xae\xca\x3d\x6c\xf9\x51\x63\xf5\x4b\xda\x86\xd1\x90\x9b\xa6\x65\xc0\xf4\x5a\x73\xde\x2c\x19\x15\xa4\x88\xcb\x62\xbb\xb6\x1a\x17\xcf\x10\xd7\xf6\x3c\xfc\x2f\xe7\xc4\x50\x8f\x45\xe3\x69\x29\xfb\x6c\xbe\xa2\xac\x96\x72\xda\x59\xdc\x5d\x8c\x9f\x54\x33\x70\x42\x59\xd1\x35\x20\x67\x23\xc6\x13\x2c\x99\x2d\x1d\x8b\xc6\x67\x95\x8a\x6c\x6b\x36\x9a\x95\xd4\xac\x94\xd3\x9e\x5e\x1f\x1f\xe1\x20\xc8\xdb\x53\x85\x3a\x5d\xd7\xe0\xb2\xd9\x46\x32\x76\x47\xb9\x76\xde\xd8\x75\x1b\x98\xd0\x60\x42\xeb\x51\x13\x5a\x26\x4f\xf3\x82\x59\xcf\xd0\x14\x67\xd6\x27\xe8\x18\x1d\x6d\x01\x5e\x2e\x56\xa5\xea\xaa\xea\x0b\x8f\x13\xee\xb8\xf7\x4e\x76\x87\xf8\x9a\x2d\x49\x4b\x4d\x82\x66\x6f\xb6\xec\x40\xd0\x5b\x30\x93\x00\xfb\x9e\x19\x0b\x90\x2b\xa8\x59\x45\xd3\x75\x6c\xb1\x6f\xec\x6b\x61\xf6\x5a\x98\x76\x9a\xc7\xae\xac\x35\x08\x02\x88\xe4\xe5\xea\xb4\x71\xe6\x25\x78\xff\x7b\x78\xff\xbf\xf0\x8c\x11\x72\xd8\xa8\x1c\xce\x68\xb6\xb8\xaa\x56\xe5\x8a\xa3\x20\xce\x77\x2f\xb3\x97\x07\xe8\x2d\x6e\x05\x71\x22\xfb\x7d\x22\x02\x44\x73\xf6\x92\x38\x43\x1e\x25\x71\xea\x4f\x45\xa4\x00\x22\x05\x10\x29\x00\x6b\x16\xac\x59\xb0\x66\xc1\x9a\xd5\x35\xd6\xac\xee\x31\xd6\xc0\x8a\x00\x2b\x02\xac\x08\xb0\x22\xc0\x8a\xd0\xd3\x56\x04\x60\x4e\x60\xce\x1e\xc5\x9c\x5b\x32\x52\x40\xa6\x8c\x60\x60\x53\x34\xc9\x19\xd8\x31\x3a\x4a\xe3\x7e\xc5\x83\x93\xf5\x80\x28\x90\xf2\xc1\xcd\x65\x44\x6a\xb2\x7c\x70\x7d\x1f\x1d\xe0\x36\xf6\xe5\x1d\x1e\x1c\xed\x1e\x2b\xbc\xa0\x1e\x99\xed\x13\x07\x3b\x0b\xcd\x04\xe3\x9a\xa3\x33\x34\x53\x13\x76\x70\x88\x0e\xb4\xf0\xb2\xe0\x05\x87\x00\x84\x16\x03\x10\x7e\x18\xa2\x59\x21\x31\x4e\xd1\x09\x2e\x31\xc6\xa9\xb5\x49\x48\xe7\x44\x28\xc2\x34\xa5\xad\x50\x84\x96\x1b\x9b\x17\x41\x09\x33\x34\x65\x0b\x4a\x68\xb9\xb5\x8d\x66\xd3\xec\xa4\x20\x4b\xfc\xcd\x11\x0f\x41\x16\x77\x8f\x64\xa8\x17\x6a\x87\xc4\x89\x6e\x42\xcd\xbc\xb4\xed\xe2\x0d\x11\x0e\x60\xc2\x60\xc2\x60\xc2\x60\xc2\x88\x70\x40\x84\x03\x22\x1c\x10\xe1\x00\xdb\x04\x6c\x13\xb0\x4d\xc0\x36\x01\xdb\x04\x22\x1c\x10\xe1\x80\x08\x07\x44\x38\xc0\xf4\x07\xd3\x5f\x17\x9a\xfe\xba\x3a\xc2\xa1\x93\x2c\x3a\xf8\xf0\x84\x6f\xc6\x29\x25\x7c\xe5\x9d\x75\xf9\xcd\x92\x69\x59\x4d\x35\xe2\x47\xb2\xa2\xc0\x63\x41\x56\xd9\xc7\xe3\xec\x67\xfa\xe9\x76\xfd\x80\x59\x2c\xcd\xcf\x43\xbe\xae\x56\x64\xe2\xa0\x76\x85\xfe\xa3\x5e\x38\xad\xfe\xac\x59\xa5\x32\x59\x2c\x9a\x19\x80\x82\x8b\x75\x68\x8f\xb3\x7c\x66\xc5\xbb\x4e\xcf\x45\x31\x8d\xcf\xd1\x1c\x9f\xc6\x53\x34\x49\xa7\x3c\x0d\x28\xc6\x1b\x31\xea\x92\xd5\x8f\xcc\x7c\x41\xad\x36\x6b\xfc\x95\xfd\xe7\x69\x9a\x9d\xd6\xe7\xa9\x73\x32\x18\x93\xb3\xee\xfe\x2e\xa5\xc7\x10\x29\x80\x48\x81\x0c\xac\x42\xb0\x0a\xc1\x2a\x04\xab\x50\xcf\x58\x85\x32\x5d\x63\xf4\x08\xbc\x27\x2d\xd3\xf8\x0c\x68\x3c\x68\x3c\x68\x3c\x68\x3c\x68\x7c\xe7\x69\x7c\xe0\xa4\x20\xd3\xd3\xfc\x31\x03\xfe\xd8\x3e\xfe\x98\xe9\x7a\xfe\xb8\x05\x43\x0f\xd8\xa7\xfa\x29\xaa\xbd\x3a\x9e\xea\x42\x59\x29\x2b\x25\x6d\xc1\xe3\xfc\xd3\xa8\x81\xf9\x1c\x7b\x4f\x3f\xfb\x93\x30\xdd\x9a\x55\x2a\xf2\x95\xb5\x54\x24\x56\x91\xa5\x5c\xcd\x3e\x73\xca\xb8\x56\xb0\xd3\xc4\x6e\xed\x9c\x29\xa5\x22\x5f\x4a\xd5\x1c\x0a\xd6\x61\x35\xf3\x0c\x4d\x09\x5a\x76\x9c\x26\x38\x2d\x3b\x48\x63\xb4\xdf\x93\x96\x69\xcf\xc0\xdd\x8d\x9d\x9d\xf2\x65\xbd\x8f\xf8\x73\xb1\x24\x1b\xd6\x09\x58\x4d\xdb\x2e\x99\x6a\x62\x31\xba\x76\xaf\x3b\x8a\xbb\x85\xf5\xe7\xe5\x2a\x65\x8e\xd3\x04\x3b\x12\x3b\x6c\x66\xb1\xb9\x87\xbb\xb9\xeb\xd3\xb6\xe6\x16\xb1\x7e\xed\x60\x5b\x33\xd4\xfc\x70\x0f\x3d\x2a\x10\xf0\x72\x51\xb9\xae\xad\xda\x15\xa5\x98\x34\x29\x50\x2d\x10\x2e\x57\x0a\x4a\xa5\x50\xbd\x51\x94\xd7\xe4\xa2\x83\x0f\x18\xb3\x6a\x54\xcc\x32\xf6\x8f\xf6\xb0\x77\xf7\xd3\x1e\x5b\xab\x93\x46\xa3\x26\x32\x1e\xe7\x33\x4e\x5c\x60\x0c\x81\x35\xf7\x16\xf4\x7b\xcd\x6b\xf7\x9a\xb2\xdf\x2b\x71\x4a\xbb\x70\xd6\xa5\x69\x1d\x23\x7b\x5f\xda\x96\xc9\xba\x2a\x2a\x9a\xef\xe7\x15\xcd\xb5\xc9\x7a\x8e\xe6\xe8\x8c\xe7\x64\xb5\x8f\xb4\x81\x77\xbd\x7b\xec\x3b\x87\x7f\xdc\x7f\x0e\x3f\xc1\x2e\xe9\x33\xb4\xd1\x5b\xd6\x49\x6f\x83\xae\xd4\x11\x5f\x9f\xd9\x1e\x79\x0f\xf9\x4c\x81\x89\x8a\x5c\x2e\x4a\x59\xb9\x95\x59\x90\xd6\xaf\xed\x9a\x89\x90\xbe\x4c\x6f\xa7\x27\x6b\xa2\x88\x82\x9b\x09\xf0\x3e\x45\x64\x51\x8b\x91\x45\xdf\x08\xb5\x55\x44\x5d\x11\xd1\x46\x4f\xd0\x25\x2b\xda\xa8\xa3\x32\xd0\x5b\x0c\x95\x57\xab\xb4\xa9\x22\x32\xfd\xc6\x4e\x1f\x19\x38\x55\x96\x2a\xd5\x02\x07\xf2\x42\x0b\x6c\x45\x18\x9e\x2e\x6b\x4a\x60\x57\x88\xc2\x7b\x6a\x1b\x1a\xe1\x7d\xdb\xc7\xf3\xc9\xed\xb1\x1f\x5c\x91\x2b\x79\xd9\x7e\x74\xc0\x7e\x54\xdb\x45\x56\xe5\x7c\x21\x3b\x52\x77\x9e\xa3\x15\xed\xef\x1b\xfa\x51\xad\x97\xe9\xf3\x34\x4f\x99\x1a\x31\x3c\x41\x47\x5a\x70\x19\x58\xe0\x66\x2e\xc8\x5d\x1f\xb9\xfb\xd9\x3e\x7a\xb1\x8f\xbd\xd0\x17\xf9\x94\x69\xf7\x7d\xbe\xaf\x77\xe4\x6e\x0d\x19\xd4\xc6\x99\x5b\xba\x38\xbd\xd6\x26\xa7\x65\xef\x1e\xf4\x98\xb8\x43\xdc\xe4\x23\x0c\xa3\x52\x91\x5f\x59\x52\x4a\x23\xe2\x6a\x7e\x06\x37\x8c\xab\xd1\xc1\x8c\xaa\x94\x16\x84\xe1\xf7\x61\xed\xb3\xd0\xff\x5e\x34\x3e\x16\xeb\xc7\x21\xef\x05\x21\xfd\x5a\x88\x5e\x0d\xb1\x57\x42\x91\x97\xcd\x4d\xed\xa7\x43\xb3\x4a\x25\xcb\x8d\x89\x79\x85\x0f\xbb\x12\x8d\x2d\x6b\x3f\xc5\xa2\x93\x8e\xa7\xe0\xb8\x42\x84\x0c\xac\xaa\x96\x89\x6b\x44\xca\xf2\x07\xe7\x86\xc3\x62\x21\xab\xfb\x02\xca\xc5\x9c\x1a\x55\xae\xeb\x63\x2a\x0c\x6b\x65\x59\x29\x17\xe5\x64\x54\xdc\x91\x5b\xea\x8d\x77\xc9\xb1\x81\xeb\x00\x98\xb7\x8f\x6d\xe3\xdd\x72\x58\x9d\x37\x57\x07\xbf\xdf\x7d\xfd\xd9\xce\x6e\xe1\x5d\xdf\xdc\x15\x28\x73\x86\x66\xd8\x54\x6c\xd2\xdc\x73\x0e\xd8\xf7\x9c\xde\x2d\x75\x60\xfb\xf9\x13\x43\x34\x2b\xb6\x9f\x25\xb9\x7a\x5d\xa9\x68\x9f\x7b\xed\x9e\x93\xd3\x8d\x51\x33\x61\xab\xbe\xd1\xe4\x7f\x3f\x37\x5a\x28\xe5\x2b\x9a\x58\x52\xd9\x4b\x83\xec\x3f\xf5\x13\xb3\xda\x31\xd7\x57\x55\x27\xf3\xa5\x5c\x61\xad\x90\x5b\x95\x8a\x76\x66\x23\x99\x56\xb0\x39\xd1\x54\xd2\xe6\x75\x3c\x61\x1a\x47\xe2\xbc\x8d\xb8\x65\xf9\x77\x98\x72\x95\xb2\x6c\xc6\x14\xa9\x55\x59\xca\x25\x13\xfb\xf8\x05\xe7\xcd\xde\xe8\xab\xb0\x55\xca\x4c\xbf\xdd\x7c\x41\xad\x76\xbb\x4f\xd3\xb3\xb4\x20\xbe\xad\x39\x3a\xc3\xbf\xad\x49\x3a\x45\x27\x5a\x58\x4e\x39\x35\x9b\x59\x93\x4b\xfe\xe9\x2c\x2e\xfb\x7f\x30\x13\xec\x88\xfe\xc1\xd4\x4d\x1e\xfd\x2b\xd1\xc7\xd8\x65\xdf\xba\xd7\xfd\x83\xdd\xc9\x88\x3f\x37\xf7\x9a\x82\x3b\x13\xdc\x99\xe0\xce\x04\x77\x26\xb8\x33\xc1\x9d\x09\xee\x4c\x70\x67\x82\x3b\x13\xdc\x99\xe0\xce\x04\x77\x26\xb8\x33\x75\xd8\x9d\x69\xeb\x55\x7e\x81\xc3\x14\x1c\xa6\xe0\x30\xb5\x85\x1c\xa6\x7e\x69\x90\x0e\x08\x10\x29\xad\x56\x15\x35\x2b\x15\x0b\xa5\xfc\xe8\x9a\x41\x1f\x39\x9c\x52\x4a\x55\xa9\x58\x56\x72\xc6\x19\x72\x45\x65\x3f\x8a\xb3\x3f\xed\xa7\x37\xdb\x2e\xba\xb2\x96\x8a\x3c\x1f\x6a\x0e\x39\x9e\x35\x9b\x5d\x50\x72\x93\x66\xb3\x01\x21\x48\x81\xf0\x26\xad\xae\x5d\x4a\x79\xdc\x70\xbe\xa0\x56\xb7\x5a\xa0\xe5\x66\x40\xc9\x66\xa8\xe1\xb5\x25\x7f\x72\x79\x8a\x9d\xd0\xc9\xa5\x6d\xe2\xe8\xcc\xd2\xe3\x15\x39\x43\x81\x41\x26\x41\x26\x41\x26\x41\x26\x41\x26\x41\x26\x41\x26\x41\x26\x41\x26\x41\x26\x41\x26\x41\x26\x41\x26\x3b\x4b\x26\xc1\x0d\xc1\x0d\xc1\x0d\x7b\x98\x1b\xbe\x34\x48\xf7\x1b\x81\x96\xee\x8e\x8a\xcf\xb1\x0f\x0f\xb2\x5f\xef\xb7\xe2\x2c\x7f\x51\x87\x83\x76\x22\x58\x32\x44\x96\x91\x2e\xcd\xe4\x70\x81\xb0\xc0\x61\x4b\x41\xe1\x4a\x98\xbe\x9c\x69\x3a\x8e\xa5\x79\xc5\x1d\xca\xae\xad\xed\x64\xe2\x2d\xfc\x76\x22\xee\xd3\xec\x59\xf7\x03\x42\xcf\x4c\x6c\x81\x93\x43\x7f\x3f\xe0\xeb\xc2\x0f\xf8\x9c\x3f\x1c\x1c\x64\x03\x56\x38\xa9\x60\x82\xe6\x98\x83\x02\x82\x02\x82\x02\x82\x02\x82\x02\x82\x02\x82\x02\x82\x02\x82\x02\x82\x02\x82\x02\x82\x02\x82\x02\x6e\x26\x05\x3c\x4c\x07\xd9\x58\x6c\xbf\xe9\x9f\x78\x97\x3d\xbe\xd2\xdc\xbb\xb5\x3f\x9c\x12\xf4\x10\xf4\x10\xf4\x70\x0b\xd1\xc3\x3f\x0f\xd3\x3d\xde\xe1\xcf\xa3\xec\xd7\xc3\xec\xab\x61\x7a\x93\x23\xa6\x39\x72\x77\x5e\xae\x3a\x57\x67\xd1\xff\xc4\x3d\x79\xb9\x6a\x0f\x38\x9e\x5c\x98\x33\x36\x83\x01\x66\xf7\xa8\x6b\x68\xa2\xae\xa1\x89\x66\x1a\xca\x94\xe9\x31\x81\xe1\xce\xd3\x3c\xc7\x70\xb3\x34\x4d\xe9\x16\x30\x9c\xed\x39\x9b\xa9\x94\xcf\xbe\x7b\x99\x4e\x89\x51\xf7\xac\xc8\xa1\x8d\xbe\x47\xc4\x79\x45\x29\xca\x2a\xfb\xa7\x97\xd9\x87\x07\x3c\x4a\x43\xc7\x1a\x57\xc2\xb8\xa8\x14\xe5\xc4\x43\xda\x39\x2e\x35\xa1\xad\xf8\x70\xed\xb4\x2e\x47\xac\x69\x00\x48\x00\x48\x54\x01\x07\x80\x04\x80\x04\x80\xec\x1d\x00\xd9\x45\x45\xae\xbb\x06\x40\xa2\xfa\x32\x00\x24\x00\x24\x00\x24\x00\xe4\x66\x54\x5f\xee\x69\xee\x87\xf2\xb0\xbd\x5c\x1e\x36\xbd\x05\xb9\x5f\xe6\x71\x3a\x2e\xd0\xd7\x21\x3a\xc0\xd1\xd7\x08\xed\xa3\x21\xcf\x64\x95\x1c\x53\xad\xa5\x92\x17\x95\x62\x53\x84\xcb\xb7\x1c\xe8\xba\x92\xeb\xf9\x95\xad\xd5\x7a\xe5\xf0\x4b\x8b\x7d\x77\xbb\x07\x19\x7b\x93\x58\x89\xa2\x92\x80\x60\x71\xf1\xdf\xce\x61\x30\x41\xad\x8e\xd1\x51\x1a\xaf\x49\x0f\x1c\xa7\x87\x9a\x1a\x7e\xe4\x02\x46\x0e\xf6\x16\x73\xb0\x7f\x3d\x44\x47\xc4\x57\x9f\xa2\x51\xfe\xd5\x0f\x51\xb3\xd3\x8e\x4e\x88\x0c\xeb\x87\xe9\xa0\x95\x61\x7d\x1d\x97\x9f\xd4\x2e\x1f\x63\xe3\x74\x88\xb6\x8b\x52\xfa\xeb\xbb\xbe\x75\x81\x53\x56\xda\x2f\x70\x12\x7f\x38\xec\x21\x70\x76\xeb\x05\xfd\xad\x9a\xdd\xda\x07\xc0\x85\x4f\x4a\x1c\x72\x11\x3e\x53\xe6\xc9\x6d\x15\x43\x8f\xd2\x45\x5a\xb0\x8b\xa1\x44\x9a\x4e\xb7\x60\xfe\x98\xe6\xcf\x71\x81\x33\x56\x15\x34\x16\x34\x16\x34\x16\x34\xb6\x87\x68\x2c\x34\x32\x1f\x8d\xac\x7b\x70\xf5\xeb\x61\xfa\x4e\x98\x7d\x3b\x1c\xf9\x96\xf9\xaa\x3e\x17\x7e\xd4\xbe\x41\x2c\x94\xa2\xaa\xd8\xe4\x45\x97\xe4\x65\x61\xdb\x35\x41\x8a\xb5\x64\xe9\x5f\x09\x1f\x2c\x9b\xee\x57\x52\x4a\x23\x25\x39\x2f\xf1\x57\xa2\xef\x11\xed\x3a\xa2\x40\xaa\xe6\x64\xd0\x95\x83\xc2\xca\x8a\x9c\xd3\xb4\xd0\xe2\x0d\xcb\xe8\x6a\x89\xf6\x42\x71\x58\xdf\x3a\xf2\xc7\x8d\xe6\x2b\x52\x96\xcf\x93\x82\x92\x33\x17\x1e\x6b\x71\xe0\xf6\x6c\xe3\xbd\xac\xaa\x5a\x27\xed\x03\x25\x69\x57\x1a\x0f\xa4\xdf\x64\x59\x7c\x6f\x46\x13\x49\xd1\x4f\x51\x60\xc1\xa5\x8f\x31\xc6\xbb\xb0\xc0\x7b\xe0\xb6\x25\x86\x55\x00\x56\x01\x58\x05\x60\x15\x80\x55\xa0\xa7\xad\x02\x5f\x0b\xd3\x6b\x61\xf6\x6a\x38\xf2\x8a\xb9\xda\x7e\x2c\x3c\x6d\x8b\x26\x2e\x17\x65\x49\x95\xcd\x4f\x7f\xa1\xa2\x94\xa5\x3c\x5f\x87\x17\x94\x62\x21\x7b\xc3\xe1\x13\x64\xbc\x6e\x2b\x1c\x59\x7b\xe1\xa9\xe4\x78\x32\xba\x28\xe4\x88\x58\x24\xcb\x72\x49\x9b\xa6\xd6\x2a\x22\x47\x95\x4a\xf9\xaa\x54\x32\x7c\x94\x2a\xab\xf2\xe8\xb2\x54\x34\xb4\xff\x98\x38\x1a\x8b\x2e\x17\x4a\x52\xb1\xf0\x2e\x43\x7c\x2f\xc9\x51\x29\xc7\x11\xbb\x32\x2a\x88\x6b\xce\x52\x2d\x45\xe3\x71\xd5\xba\x48\xe8\xdb\xc9\xe8\x4c\x81\x8b\x24\x5b\xc7\x95\x4a\xfd\x93\x59\x66\x8d\xaa\x50\xf7\xb9\xf6\xa7\x54\xaf\x26\x63\xbb\x44\x7f\xa6\x8d\x07\x71\x3a\x3b\x7d\xa0\x9f\xde\xdf\xcf\xde\xdb\x1f\x79\xc3\xf4\x78\xfb\x66\xdf\xe3\xba\x1c\xd4\xa6\xe8\x55\xe5\x7a\x34\x2f\x55\x96\xa4\xbc\x83\x3a\x98\x8a\x9a\x5c\x59\x56\x2a\x2b\xda\x58\xb8\xf6\xf4\x42\xcd\xcd\xbd\x3b\xca\xd5\x1a\x43\x27\x29\x8b\xa7\x2a\x68\xfa\x42\xb6\x90\xb3\x14\x6b\xbe\x36\x8a\x3a\x4c\xc6\xe8\x6a\x8b\x93\xbe\x72\x18\xab\x5f\xd2\x36\x8c\x86\xdc\x34\x6d\x09\xa6\x9f\x9b\xf3\x66\xc9\xa8\x00\x49\x5c\x16\xdb\xb5\xd5\xb8\x78\x86\xb8\xb6\xe7\xe1\x7f\x39\x27\x86\x7a\x2c\x1a\x4f\x4b\xd9\x67\xf3\x15\x65\xb5\x94\xd3\xce\xe2\x0e\x66\xfc\xa4\x9a\x81\x13\xca\x8a\xae\x01\x39\x1b\x31\x9e\x60\xc9\x6c\xe9\x58\x34\x3e\xab\x54\x64\x5b\xb3\xd1\xac\xa4\x66\xa5\x9c\xf6\xf4\xfa\xf8\x08\x97\x42\xde\x9e\x2a\xd4\xe9\xba\x06\x97\xcd\x36\x92\xb1\x3b\xca\xb5\xf3\xc6\xae\xdb\xc0\xe8\x06\xa3\x5b\x8f\x1a\xdd\x32\x79\x9a\x17\x28\x7b\x86\xa6\x38\xca\x3e\x41\xc7\xe8\x68\x0b\xf0\xb2\xc9\x3a\xdd\x09\x77\xbe\x7c\x27\xbb\x43\x7c\xcd\x96\xa4\x6d\x37\x6b\xde\x82\xc9\xc8\xd9\x4f\x27\x68\xb1\x09\x87\xf7\x86\x85\xd6\x2a\x4a\x51\x5e\xd2\xb6\xce\xa5\xbc\x99\xd8\xe4\x5f\x0e\xb1\x9f\xde\xe6\x41\xde\x3f\xdf\x4c\x96\x13\x6e\x51\x13\x8d\x76\x45\x9e\x93\x11\x7e\x3b\x5f\x7b\xa4\xde\xe5\x2e\xf7\xce\xdf\x94\x0c\xc9\xbe\x09\x50\x72\xfe\x5f\xe7\x24\x3b\xb5\x8e\xaf\x53\x7f\x19\xc8\x8c\x82\xc0\x04\x64\x46\x81\x29\x0c\xa6\x30\x98\xc2\x7a\xd5\x14\x86\xcc\x28\xc8\x8c\x02\x13\x04\x4c\x10\x30\x41\xc0\x04\xd1\x15\x26\x88\xcc\x11\x3a\xcc\x0e\xc6\xc6\x4c\x58\x72\xb7\x3d\x33\x8a\x6d\xf7\x56\x9f\x1b\x65\xeb\x61\x16\x64\x5f\x01\x10\x46\xf6\x95\xad\x13\x85\xc1\x7e\xfb\x69\xda\x6d\xe4\x6e\x36\xdc\xbe\x4a\xd5\x35\xa5\xb8\xba\x22\xab\xec\x8b\x4f\xb3\xbf\x3d\x60\x65\x6d\x4e\x36\x4e\xea\xb1\x60\x5e\x7f\x89\x5f\x9f\x88\x68\xe7\x8b\x8c\xc9\xb5\xc7\xba\x9c\x1b\x22\xab\x07\xe0\x19\xb2\x7a\x00\x9e\x01\x9e\x01\x9e\xf5\x10\x3c\xeb\x22\x37\xe9\xae\x81\x67\xf0\xdf\x05\x3c\x03\x3c\x03\x3c\x03\x3c\x43\x56\x0f\x38\x18\xde\x3c\x3c\xa9\xeb\x1d\x0c\xb7\x64\x56\x8f\x2c\x9d\x15\xfe\x56\x93\x74\x8a\xfb\x5b\x1d\xa5\x71\x3a\xe4\x19\x60\x9f\x55\x2a\x72\x72\x2d\x95\xac\xc5\x43\x4d\x65\xf8\xb8\xe8\xef\x46\x35\xca\x46\x6a\xeb\x48\xd5\xde\xca\xe1\x34\xe5\x97\x35\x24\xf6\x37\x76\x58\x38\x6c\xb7\x99\xc9\xa3\x8e\x7c\xed\x11\x87\xda\xcc\xbe\x04\xaa\x3a\x43\x33\x34\x55\x93\xca\xe3\x00\xa5\xd6\x3d\xe6\x08\x22\x45\x5a\x8f\x16\xd3\x7a\x7c\x3f\x44\xd3\xe2\xb3\x3f\x41\xc7\xf8\x67\x7f\x88\x5a\x99\x82\x34\x27\x52\x7c\xa4\xe9\xb4\x95\xe2\xa3\xc5\xa6\x32\x22\xdd\xc7\x14\x4d\xda\xd2\x7d\xb4\xd8\xd6\x46\x53\x7f\xb4\x41\x52\x25\xbe\x3e\x6c\x49\xa2\x87\x5c\x53\x7c\xd4\x49\xa5\x01\x71\x9a\x90\x4a\x56\x86\x8f\x36\xcb\x27\xe4\xf8\x00\x9b\x05\x9b\x05\x9b\x05\x9b\x45\x8e\x0f\xe4\xf8\x40\x8e\x0f\xe4\xf8\x80\x8d\x00\x36\x02\xd8\x08\x60\x23\x80\x8d\x20\x10\x1b\x01\x72\x7c\x20\xc7\x07\x72\x7c\x20\xc7\x07\x4c\x70\x30\xc1\x05\x9e\xe3\xc3\x0b\xfd\x06\x9b\xfc\xa3\x95\x1c\x1f\x6d\x80\xca\xc1\xa7\xdd\xf8\x3f\x13\x74\x48\xa4\xdd\x50\xab\x4a\x45\xca\xcb\x56\xae\x8d\x25\xb9\x2a\xa5\x46\x85\xab\xb9\x54\xad\x4a\xd9\xab\x9a\x4e\x6c\x26\xd6\xf8\xd1\x10\xfb\x48\x1f\xdd\xae\x5f\x76\x45\x3f\x3f\xb2\xb7\x22\x4b\xb9\x9a\xad\xa8\x78\x92\x49\xb3\x8d\x44\x4c\x3b\x69\x51\x5c\x79\x49\x5c\x58\x7b\x4e\xb0\x90\x7b\x9d\xc9\x60\x92\xc6\x60\xe8\x4f\x95\xac\xed\x9d\xaf\xc5\xe1\x5e\xf7\x89\x72\x0b\xeb\xcf\xcb\x55\xba\x96\xf5\x9f\x1b\xa7\xd9\x49\x7d\x6e\x38\xdf\x8c\x3e\x4f\xea\x7a\x64\x9b\x27\xbc\xcf\x14\x79\xcf\x8e\xfa\xd7\x33\x50\x91\xcb\x45\x29\x2b\xfb\xbd\xa1\x87\xf4\xf3\x3a\xf9\x92\xd2\x17\xe8\x61\x3a\x57\x63\x29\xdd\xc8\x5b\x02\x92\x83\xc5\xb4\x45\x8b\xe9\xaf\x86\x02\x16\x18\x8f\x08\xcb\x69\x86\xce\x5a\x96\xd3\x36\xcb\xa0\xe6\x84\x4c\xb3\xe2\xc4\x5d\x18\x35\x10\x74\xe5\xd5\x2a\x25\x5e\xd8\x55\x2f\x83\x8c\x82\x07\x52\xbd\xd8\x79\x50\x1c\xea\xa8\xd4\x69\x8f\xfd\x13\xa2\xc7\x47\xf4\x80\xc1\x77\x84\xc1\x03\xbe\x00\xbe\x00\xbe\xb4\x0b\xbe\x64\x5e\x0a\x5a\x4f\xb8\x28\xdc\xa2\xce\xd1\x9c\xcd\x2d\xaa\xcd\x8a\x42\xd4\x7d\x0d\xdf\xc1\x6e\x15\xc3\xd4\x99\xfd\x4a\xfa\x1f\xee\xac\xd7\x15\x46\xcb\x52\xa5\x5a\xe0\x6e\x2a\x02\x70\xfb\x6d\x5c\xf6\x96\xa5\x6a\xf6\x6a\xa7\x14\x88\x7b\x6a\x1b\x1a\xe1\xb7\xdf\xa7\xfd\x39\xb1\xc7\x7e\x70\x45\xae\xe4\x65\xfb\xd1\x01\xfb\x51\xb5\x5a\x91\xaa\x72\xbe\x90\x1d\xa9\x3b\xcf\xd1\x8a\xf6\xf7\x0d\xfd\xa8\xd6\xcb\xf4\x79\x9a\xa7\x4c\xcd\x96\x69\x82\x8e\xb4\xa0\xbe\x2c\xf0\x38\x5b\xa8\x2d\x3e\x6a\xcb\x67\xfb\xe8\xc5\x3e\xf6\x42\x5f\xe4\x53\xa6\xec\x7f\xbe\xaf\x77\x76\x4c\x35\x66\x47\x6d\x9c\xb9\x3b\x17\x37\x8d\x6b\x93\xd3\x0a\xb8\x1f\xf4\x98\xb8\x43\x7c\x09\x13\xde\x7f\x52\x91\x5f\xa9\xe9\x6a\xe2\x6a\x7e\x06\xd7\x9b\xd4\xe8\x60\x46\x55\x4a\x0b\x22\xf2\xfc\x61\xed\xb3\xd0\xff\x5e\x34\x3e\x16\xeb\xc7\x21\xef\xad\x5c\xfa\xb5\x10\xbd\x1a\x62\xaf\x84\x22\x2f\x9b\x10\xf3\xd3\xa1\x59\xa5\x92\xe5\x2a\x5d\x5e\xe1\xc3\xae\x44\x63\xcb\xda\x4f\xb1\xe8\xa4\xe3\x29\x38\xfd\x15\xda\xd7\xaa\x6a\xf9\x71\x8d\x48\x59\xfe\xe0\xdc\x3b\xae\x58\xc8\xea\xab\xab\x5c\xcc\xa9\x51\xe5\xba\x3e\xa6\xc2\x7b\xac\x2c\x2b\xe5\xa2\x9c\x8c\x8a\x3b\xf2\x54\x01\xc6\xbb\xe4\x14\xd6\x75\x00\xcc\xdb\xc7\xb6\xf1\x6e\x39\x7c\xf8\x3b\xcd\xb2\xbc\xf3\xa5\x96\x45\xbe\xd4\x4e\xac\x0e\x99\x13\x74\x8c\x1d\x8d\x8d\x9b\xc9\x76\xf6\xd8\xd3\xf4\xd4\x5e\x5f\x9f\xab\x27\x70\x72\xfa\x7f\x87\xe9\x1e\x41\x4e\xa5\x72\x41\xfe\xb1\xaa\x5c\xe2\xd0\xdd\xe0\xa7\xec\x3f\x86\xd9\xbf\x0f\xd3\x9b\x1c\x07\x23\x91\x3c\xd7\xbb\x34\xed\xcf\x64\xe2\x52\x94\x8f\x4c\xe2\x6d\x79\xb9\x3a\x69\x3f\x7b\x72\x61\xee\x8c\x76\x24\xb8\xe5\xaa\xae\xa1\x89\xba\x86\x26\x9a\x69\x28\x53\xa0\xf3\x62\x0a\x9e\xa1\x19\x3e\x05\x4f\xd1\x09\x3a\xd6\xc2\xaa\x63\x3c\xa3\xdf\x24\x64\x5f\xb9\x4c\x23\x46\x22\x14\xaf\xf4\xcf\xba\xb2\xfa\xce\x55\xa5\x2a\xa9\xec\x67\x2e\xb3\xbf\x65\x4b\x8e\xb2\xaf\x71\x72\x14\xc3\x3f\xf5\x11\xed\xe2\xc4\x03\x56\x66\x14\x5b\x5a\x65\xfb\x29\x48\x90\x82\x04\x29\x48\x90\x02\x27\x7c\x38\xe1\xc3\x09\x1f\x4e\xf8\x5d\xe3\x84\xdf\x3d\x3e\xe6\x70\x7e\x86\xf3\x33\x9c\x9f\xe1\xfc\x0c\xe7\xe7\x9e\x76\x7e\x86\x77\x26\xbc\x33\x7b\xd4\x3b\x73\x4b\x26\x48\x79\x07\xcd\x08\xb2\x75\x92\x8e\x73\xb2\x75\x98\x0e\xd2\x98\x6f\x4a\x02\x07\x1b\x6a\x2a\x3b\x8a\x4f\x26\x93\x6b\x17\xfc\xa1\xea\x30\x4b\xd8\xbd\x43\x6b\x5d\x49\x1d\x7d\xa2\xd8\x0f\xb7\x5b\x30\xec\xad\x66\x6a\x14\x27\xf7\xda\x6b\xcf\x8b\xd2\x6e\xf2\x25\x40\xd5\x34\xa5\xe9\x74\x8d\x05\x6b\x3f\x25\xd7\x37\xe2\xb0\x5b\xc1\xd3\xaf\x45\x4f\xbf\x3f\x0e\xd1\xa4\xf8\xe2\x27\xe8\x08\xff\xe2\xc7\x68\xdd\xf3\x8f\x66\x85\x7b\xdf\x29\x3a\x61\xb9\xf7\xb5\xd2\xce\x19\x61\xfe\x3f\x4d\x27\x6d\xe6\xff\x56\x1a\xda\x68\x4a\x94\xa0\xc5\x4f\xe2\xd7\x6d\xf9\x50\x62\xae\xf9\x50\x9c\xa2\x68\xc4\x3d\x19\x4a\x67\x84\x12\x72\xa2\x00\xc7\x02\xc7\x02\xc7\x02\xc7\x22\x27\x0a\x72\xa2\xc0\x1f\x1b\x39\x51\x60\x16\x80\x59\x00\x66\x01\x98\x05\x60\x16\x08\xc4\x2c\x80\xb0\x1c\x84\xe5\x20\x2c\x07\x39\x51\x60\x75\x83\xd5\xad\x87\x72\xa2\x34\x47\x95\x1b\x93\x64\x47\x42\x94\xad\x57\x82\x97\x7d\x69\x88\xa6\x1b\x04\x0e\xac\xa5\xc4\xcc\x19\xcd\xae\xaa\x55\x65\xc5\x98\x8e\xf6\x97\xa4\x67\x60\xf9\xc1\x20\xfb\xe4\x36\xda\xe5\x68\xe5\xca\x5a\x2a\xf2\xc5\x90\x3e\xf5\x2c\xfb\xaa\x54\x32\x54\x1b\xc3\xdf\x7d\x8a\xb7\x6e\x8c\xec\xb4\xd9\x7a\xd2\xa6\x44\x4c\x98\xea\x47\x9c\xb7\x18\xb7\xf0\xae\x83\xd7\x29\x65\xd9\x44\x04\x6a\x55\x96\x72\xc3\xd6\xbe\x86\xef\xdd\x74\x2d\x58\xdb\x1a\x59\x1b\xb6\xb8\x03\x45\xd8\xda\x4e\x26\x86\xf9\xed\x1c\xc1\x10\x97\x52\x5e\x3d\xee\x72\x4f\xfc\xcc\xb3\xb4\x20\x3e\xbb\x39\x3a\xc3\x3f\xbb\x49\x3a\x45\x27\x5a\xf8\xec\xb8\xe5\x7c\x66\x6d\x63\x21\x3b\xd7\x45\xc8\x4e\xd1\xff\x43\x9c\x63\x67\xc4\xe7\xe7\x35\xf0\xe4\x62\xfd\x71\x9b\xd2\x94\x41\x30\x02\x82\x11\x32\xb0\x7e\xc1\xfa\x05\xeb\x17\xac\x5f\x3d\x63\xfd\xca\x74\x8d\x71\x27\xf0\x9e\xb4\x6c\x75\xc8\xc0\xea\x00\xab\x03\xac\x0e\xb0\x3a\xc0\xea\xd0\x79\xab\x43\x66\x86\xa6\xd8\x64\xec\x94\x49\x4b\x1e\xb4\xe7\x50\xf0\xda\xe6\xb5\x3f\x97\x42\xa6\xa7\x71\x6d\x06\xb8\xb6\x7d\xb8\x36\xd3\xf5\xb8\x76\x0b\x06\x49\xb0\xaf\x0c\xd2\xb0\x4f\x32\x0e\x81\x6e\x0c\x54\xf9\xb3\x83\xec\x6b\x61\xcb\xff\xf7\x6d\x2e\x49\xa2\x39\xd8\x4a\xec\xd6\x8e\xd4\x86\x1f\xf0\x43\x01\x27\x84\x7e\x94\x8e\x0a\x2a\x37\x46\xfb\x39\x95\x4b\xd0\x20\x0d\xf8\x3a\x59\x37\x87\xdf\x7c\xb2\x3f\x9f\xf1\x87\x6f\x0f\xb2\x58\x2d\x05\x17\xb7\xb6\x33\xb7\xc8\xdf\xb5\x85\x74\xdc\xe3\x9e\xd9\x59\x0c\xeb\x1e\xfd\x60\x07\x46\x36\x7d\x9c\x26\xe8\x48\x4d\x40\x47\xd3\x43\x0b\x8f\x41\x04\x72\xb4\x18\xc8\xf1\xd3\x21\x6f\xc3\xd7\x06\xbe\xf5\x93\x22\xb2\x63\x9c\x0e\x59\x91\x1d\xeb\xb8\xde\x5f\x24\x94\x57\x03\x14\x09\x89\x3f\xbb\xdd\x12\x09\xb7\x1b\x89\x96\x4b\xba\x18\xb8\xc7\x1e\x53\xd1\x5e\x29\x80\xac\xca\xc8\xaa\x7c\xf3\x7a\xf1\xc2\x7d\x0b\xee\x5b\x70\xdf\xea\x96\xac\xca\xbe\xae\x2a\x2d\x64\x55\xf6\x6f\x73\xe3\x59\x95\x83\x5a\xf4\xd3\x5f\xbd\xcd\x5a\xf4\x1f\xf4\xc9\x98\x2c\x34\x81\x08\x4f\xdd\xd9\x66\x45\x00\xd9\x91\x7b\x4d\xfd\x40\x76\x64\x64\x47\xde\xe4\xec\xc8\x6d\x05\x3b\xcd\x49\x6c\x2f\x39\x6d\x49\x72\xdf\x9c\xca\x99\x31\xda\xcf\x92\xb1\x61\x13\xd5\xdf\x61\x47\xf5\xbc\xe9\x7a\x2e\xbf\x05\x9d\x21\x3f\x34\x44\xfb\x7d\x48\xa2\x70\x37\x59\x91\xca\x26\x4d\xfc\xed\x41\xf6\x1d\x1b\x4d\xdc\xe3\x42\x13\xa7\xf8\x45\x0f\x4b\xe5\xc4\xbd\x6e\x44\xd1\x3c\x1c\x30\x55\x7c\x82\x4e\x88\xc9\x77\x98\x0e\xf2\xc9\x97\xa4\x61\x4a\xf8\x4e\x3e\xb3\x3b\x1b\x25\x8b\xe7\xfc\xe7\xe7\x20\x1b\xa8\xd5\x28\xac\xdb\x3b\xe8\xe2\xa7\x6d\x74\xf1\x7e\x77\xba\x68\x0d\x73\xd4\x83\x30\xb6\x69\xa4\xd3\xa7\xe9\x24\x1d\xaf\x59\xda\xd7\x35\xd4\x58\xcc\x41\x1a\x5b\x24\x8d\x1f\x09\x6d\xf4\x33\x4f\x0b\xa8\x78\x8c\x8e\x5a\x50\xb1\x73\xa2\x82\x13\xc7\x20\x45\x45\xe2\x47\x36\xea\xc8\xcc\xf2\x6e\x96\x74\xb8\xdf\x1d\x3c\xb6\x4b\x38\x00\x3e\x02\x3e\x02\x3e\x02\x3e\x02\x3e\x02\x3e\x02\x3e\xba\xc3\xc7\x20\xd7\xff\xf4\xef\xd8\x00\xe4\xa0\x0f\x80\xb4\xb4\x82\xfb\x5c\x21\x64\x1b\x94\x02\x80\xc8\x5e\x53\x45\x00\x22\x01\x22\x37\x19\x44\xb6\x9d\x05\xf9\xd6\x65\x0b\x52\xc4\x67\x0e\xd3\x41\x36\x16\xdb\x6f\xc2\xc5\xbb\x1c\xbe\xc3\xc6\x35\x37\x05\x94\xfc\x8f\x71\xba\xcb\x80\x92\x22\x16\x5b\x95\xb3\x15\xb9\xaa\xb2\xcf\xc7\xd9\xa7\xfa\xad\xa5\xae\xa2\xfb\xf6\x97\x72\x85\xb5\x42\x6e\x55\x2a\x3a\x62\xae\xcd\x38\x9a\x45\x7e\x75\x20\x91\xd5\xc9\xc4\x43\xd7\xad\x45\x53\x34\x3c\x5f\x50\xab\xb3\x4a\x65\xb2\x58\x34\x17\x51\x15\x41\xd1\xf5\xdf\xcb\x5e\xf7\xef\x65\x27\x23\xde\x31\x91\x76\xfb\xac\xff\x37\xf3\x10\xdb\x5b\xfb\xcd\x88\x17\xe1\xfc\x60\x10\xf4\x8c\xa0\x67\x04\x3d\x23\xe8\x19\x41\xcf\x08\x7a\x46\xd0\x33\x82\x9e\x11\xf4\x8c\xa0\x67\x04\x3d\x23\xe8\x19\x41\xcf\x9d\x0d\x7a\x46\xb4\x32\xa2\x95\x11\xad\xdc\xc3\xd1\xca\xef\xbb\x42\xe3\x22\xe1\x62\x56\xae\xe8\xad\xc8\xf6\x7c\x8b\xf6\x9f\x0b\xf9\x52\xa1\x94\x37\xe4\x34\x7b\xe5\x19\xf6\x1b\x03\x74\xbb\xfd\x8c\x2b\x6b\xa9\xc8\xb8\xd0\x16\x2b\xfa\x3a\x67\x08\x42\x33\xb9\xa2\x75\xfa\xa2\x68\xf0\xa2\x68\x30\xb1\x4f\xbb\xd0\x76\x58\xbd\x94\xf2\x3c\xb9\xcb\x19\x5e\x1a\x80\x0b\x80\x0b\x35\xad\x00\xb8\x00\xb8\x00\xb8\x7a\x07\x70\x75\x51\xc9\xa6\xae\x01\x5c\xa8\x25\x04\xc0\x05\xc0\x05\xc0\x05\xc0\xb5\x19\xb5\x84\x7a\x9a\x47\xa1\xd8\x49\x2f\x17\x3b\x49\x6f\x41\x1e\x95\xa9\xd2\x93\xc2\xf7\xe9\x22\x2d\x70\xdf\xa7\x0c\x9d\xa5\x59\x6f\xc7\x40\x3b\xb4\x5a\x4b\x25\x3d\x79\xd1\x7c\x41\xf5\x77\x82\x5a\x57\xe1\x07\xaf\x3b\xb9\x45\x35\xbb\xb0\x35\x6a\x50\x65\x9c\x7b\x5b\xc5\xfe\x06\xd5\xb3\xb5\x98\x58\xb0\xa2\x52\x03\x8c\x36\x22\xce\xe9\x34\x48\x13\xdc\xeb\x09\xba\x44\x8f\xd6\x38\x73\x4f\x53\x7a\xe3\x2f\x10\x6e\xdd\x08\x49\x6d\x31\x24\xf5\x85\x30\x3d\x26\x84\xca\x79\x9a\xe7\x42\x65\x96\x02\x99\x93\xf4\x94\x08\x55\x5d\xa4\x47\xac\x50\xd5\xa0\xda\x7e\xbb\x88\x8c\x79\x94\x2e\xda\x22\x63\x82\x6a\xdc\x4f\x12\x7a\xcb\xa6\xb2\xa2\x56\x9b\x2d\x91\xd3\x40\xfa\xad\x47\x88\x26\xfe\xff\x23\xf5\x92\x30\xa9\x87\x48\xd9\xe2\xcd\x94\xe5\x06\x52\x71\x5c\x8f\xb2\x75\x4a\x45\xf3\xe2\x4e\xc9\xc7\xf6\x44\xdf\x02\x34\x03\x34\x03\x34\x03\x34\xf7\x0e\x68\x86\x36\xe8\xa3\x0d\x76\x0f\x89\x47\xe6\x83\x8e\x64\x3e\x80\xc1\x03\x06\x0f\x18\x3c\x60\xf0\x80\xc1\xa3\xa7\x0d\x1e\x48\x80\x83\x04\x38\x48\x80\xd3\xae\x04\x38\xb0\x27\xc2\x9e\xd8\xab\xf6\xc4\x4c\x3e\xe0\xdc\x4f\x7e\x10\x3a\xe1\x0e\xa1\xef\x64\x77\x88\xaf\xd9\x92\xb4\x1d\x06\xd2\xc1\x67\xd5\xf8\x5e\x98\x22\xc2\x0d\xbf\xa4\xe4\x64\xcb\xfd\x7e\x49\xae\x4a\xa9\x51\xf6\xb5\x30\x7b\x2d\x4c\x3b\xb5\x63\x57\xf4\x1f\x23\x77\xe7\xe5\xaa\x53\x31\x16\x46\xdb\x44\x24\x2f\x57\xcf\x2b\x39\xf9\x92\x38\x71\x72\x61\xce\xc0\x81\xc1\x65\xbe\x98\xa8\x6b\x68\xa2\xae\xa1\x89\x66\x1a\xca\x78\xa7\x72\x59\x97\xd1\xa6\xc1\x6c\xb3\x0d\xc0\x7c\x41\x35\x8a\x0e\xb1\x9f\xbc\x4c\x43\x4d\x67\x57\x66\xbf\xfd\x34\xfb\xab\x87\xac\xe4\x26\x71\x9f\x18\x07\x2b\xc3\x32\x8f\x69\x68\x7b\x16\x2f\x44\x31\x20\x8a\x01\x51\x0c\x30\x2e\xc1\xb8\x04\xe3\x12\x8c\x4b\x88\x62\x00\xd4\x07\xd4\x07\xd4\x07\xd4\x07\xd4\xbf\x29\xa0\x3e\xa8\x23\xa8\x63\x8f\x52\xc7\x2d\x19\xc5\xf0\x34\x4d\x0a\x76\x35\x41\x47\x38\xbb\x1a\xa3\xfd\x94\x6c\x3e\xbd\x71\x53\xd1\x0a\x3e\xf1\x03\x81\x66\x38\x8e\xfd\x8e\xad\xde\x15\xb3\x82\x10\xac\x22\x36\x7a\xd0\x01\x2a\x5c\xdd\x2c\x0e\x64\x08\x27\xf0\x08\x27\xf8\x4e\x57\x54\xb8\x9a\x12\xe1\x01\xc7\x69\xc2\x16\x1e\x10\x74\x16\x75\xbf\x30\x80\x40\xeb\x64\x7d\x65\xd8\x12\x31\xf7\xb9\x7b\xf7\x9b\xe2\x66\xc8\x5e\x33\xcb\xf2\xe2\xdf\xaa\xd5\xb3\x80\x58\x81\x58\x81\x58\x81\x58\x7b\x07\xb1\x42\xfd\x82\xff\x3e\xfc\xf7\xe1\xbf\x0f\xd4\x0f\xd4\x0f\xd4\x0f\xd4\x0f\xd4\x0f\xff\x7d\xf8\xef\xc3\x7f\x1f\xfe\xfb\xb0\xa4\xc1\x92\xd6\x2e\xff\x7d\x4f\x57\xeb\x40\x1d\xfb\x5b\xf1\xdf\x0f\xb4\x1c\xe7\x16\x2c\xab\xf9\xc5\x41\x3a\xd1\x30\x0f\xbf\xa8\xb5\xd9\x20\x1b\xff\x8f\xe2\xec\x4f\xfb\xeb\xf3\xe4\xbc\x3f\xd4\x5c\x1d\x4e\xcf\xc8\x87\x80\x4a\x73\xee\x17\xa5\x39\x9b\x4b\x4a\x36\x5f\xe8\xfa\x0c\xff\xdd\x5b\xa5\xb3\xb3\xa1\x30\x08\x11\x40\x88\x00\x2a\x79\xc2\x7e\x05\xfb\x15\xec\x57\xbd\x63\xbf\x42\x25\x4f\x54\xf2\x84\xdd\x00\x76\x03\xd8\x0d\x60\x37\xe8\x0a\xbb\x01\x2a\x79\xa2\x92\xe7\x56\x21\xa5\xa8\xe4\xd9\x8e\x4a\x9e\x3f\x35\x44\xc7\x05\x41\xac\x2c\x49\xd9\xa4\x01\x8f\x78\x53\xf5\x1c\xb1\xb8\xaa\x56\xb5\x8d\x4b\xd1\x48\x79\xf1\x1c\xfb\xea\x20\xfb\x5b\xdb\xe8\x2d\xda\xd5\x93\xf6\x8b\xaf\xac\xa5\x22\x9f\xd7\x29\xa2\x1d\x1d\x96\x0c\xe1\x66\x66\xbd\x10\x8d\x5e\x54\xb4\x95\x33\x00\x68\x38\x6c\x29\x33\x5c\x61\xd3\x97\x3e\x4d\x1f\xb2\xb4\xb4\xb8\x43\x31\xb6\xb5\x9d\x4c\xec\xe5\xb7\xbb\x58\xfb\x3c\x97\x52\xb6\x8e\x82\x32\xd6\x53\xc6\xfb\xdd\x29\xe3\x76\x76\x0b\xef\x18\x5d\xcb\xf9\x13\xc6\x49\x76\x4a\xc7\x88\xd6\x58\xbb\x55\xc6\xf0\x9c\xab\x20\x8b\x20\x8b\x20\x8b\x20\x8b\x20\x8b\x20\x8b\x20\x8b\x20\x8b\x20\x8b\x20\x8b\x20\x8b\x20\x8b\x20\x8b\x9d\x26\x8b\x47\xe8\x30\x3b\x18\x1b\x33\xbd\xa9\xee\xe6\x6e\x54\x7a\x2f\x6c\xdb\xbb\x58\xbf\x76\xa0\x9d\x5e\x52\x60\x92\x60\x92\x60\x92\x5b\x88\x49\x7e\x6b\x90\x0e\x18\x39\x76\x05\x78\xf4\xc8\xb4\x6b\x34\xf6\xce\x55\xa5\x2a\xa9\xec\x1f\x0d\xb2\x5f\xea\xb7\xb2\x01\xdc\x68\xce\x85\xd1\xd8\x1b\x3e\xa2\x35\x12\x90\xdb\xe2\x80\x70\x5b\xac\x49\x6b\xe2\xb8\xd3\xd6\x70\x56\xf4\x74\xfd\x0d\x9c\x2f\xae\xcb\x59\xf1\x82\x3f\x4a\x1c\x66\x09\x17\x70\x68\xfa\x00\x3b\xde\x05\xa8\x21\xa8\x21\xa8\x21\xa8\x21\xa8\x21\xa8\x21\xa8\x21\xa8\x21\xa8\x21\xa8\x21\xa8\x21\xa8\x21\xa8\x61\xa7\xa9\xe1\xd6\x8b\xc1\x04\x5d\x04\x5d\x04\x5d\xdc\x42\x74\xf1\xf7\x76\xd3\x49\xe1\xf1\x28\x95\xcb\x6a\x83\x3a\x5e\x15\x99\x33\x33\xed\xd3\xd5\x9d\x1d\x47\x55\x1e\xa9\xce\x3e\xb1\x9b\xfd\x20\x4c\xb7\x6a\xd7\xf3\xb2\x5e\x15\x59\xca\x45\xc5\x31\x63\x94\xad\x45\xfd\xa2\x68\x66\x51\xae\x26\x1e\xd4\x4e\x9c\x2c\x97\x55\x27\x15\x34\x8e\x8b\x38\xf8\x60\x53\x8f\x66\x9e\xa4\x93\x02\xd5\x8d\xd3\x21\x8e\xea\x46\x69\x84\xf6\x79\x66\x7b\xd5\x9e\x29\xb9\x96\x4a\x5a\xbd\xf2\x75\xfc\xbb\xd7\x9d\xd8\xdd\xc2\xfa\xf3\x72\x95\xae\x9d\xf7\x67\x75\xfb\xd8\x90\xce\xe7\xb4\xdb\x9b\x84\xce\xec\x81\x1d\xe4\x45\xbe\xb4\xdd\x1a\xf9\x84\xf6\x8e\xa4\xac\xdc\xcc\xe0\xc7\xf5\x73\x3b\x3b\xfe\x69\x4e\x46\x6b\x72\x4e\xaf\xef\x05\x20\xeb\x21\x92\x4e\xb7\x98\x74\xfa\xa3\xa1\x0d\x7f\xfd\x53\x22\xeb\xf4\x71\x9a\xb0\xb2\x4e\x07\x2e\x42\x9a\x93\x11\x0d\xe5\x82\x5d\x80\x34\x10\x49\xe5\xd5\x2a\xa5\xbf\x7f\x9b\x25\x42\x0e\x94\xa5\x8a\xb6\x37\xd6\xf6\x93\x7c\x9d\x69\x46\x96\x3c\x54\xd6\x96\x95\xce\x49\x92\x7b\x6a\x1b\x1a\xe1\x1d\xd8\xc7\x4b\x68\xee\xb1\x1f\x5c\x91\x2b\x79\xd9\x7e\x74\xc0\x7e\x54\x53\x3e\xab\x72\xbe\x90\x1d\xa9\x3b\xcf\xd1\x8a\xf6\xf7\x0d\xfd\xa8\xd6\xcb\xf4\x79\x9a\xa7\x4c\x8d\x14\x9b\xa0\x23\x2d\x58\x7c\x16\x38\x7f\x87\x48\xf3\x11\x69\x9f\xed\xa3\x17\xfb\xd8\x0b\x7d\x91\x4f\x99\x06\xa9\xe7\xfb\x7a\x47\xa4\xd5\x20\x0b\x6d\x9c\x39\x82\xe7\x58\x4d\x9b\x9c\x96\x21\x6e\xd0\x63\xe2\x0e\x71\x16\x2d\x2c\x36\x52\x91\x5f\x59\x52\x4a\x23\xe2\x6a\x7e\x06\xb7\xd8\xa9\xd1\xc1\x8c\xaa\x94\x16\x84\x45\xea\x61\xed\xb3\xd0\xff\x5e\x34\x3e\x16\xeb\xc7\x21\x6f\x59\x9b\x7e\x2d\x44\xaf\x86\xd8\x2b\xa1\xc8\xcb\xa6\x2e\xfc\xe9\xd0\xac\xa2\x6d\xed\x0a\x6a\x34\xaf\xf0\x61\x57\xa2\xb1\x65\xed\xa7\x58\x74\xd2\xf1\x14\x7c\x97\x23\xb2\xc0\xae\xaa\x16\x7b\x1f\x91\xb2\xfc\xc1\xb9\x45\xa3\x58\xc8\xea\xe9\xdd\xe4\x62\x4e\x8d\x2a\xd7\xf5\x31\x15\xc4\xbf\x2c\x2b\xe5\xa2\x9c\x8c\x8a\x3b\x72\x13\xa2\xf1\x2e\xf9\x6e\xc3\x75\x00\xcc\xdb\xc7\xb6\xf1\x6e\x39\xcc\x61\xed\xd7\x1c\xbd\x43\x46\xca\x22\x64\x24\x60\xdd\x31\x33\x4e\x87\xd8\x81\x58\xca\xc4\x0b\x6f\xb5\x3b\x25\x59\x17\xd5\xfb\x24\x6d\x3d\x2e\xc1\x7e\x38\x44\xf3\x62\x9f\x93\x2b\xa8\x59\x65\x4d\x93\xc6\xb5\x01\x5d\x1e\x3b\x1f\xb9\x94\x2b\x2b\x85\x52\x55\x2d\x16\xb2\x56\xa4\xd7\x3f\x1b\x62\xef\xd9\x46\x3b\xcd\xd6\xb4\xd5\xf3\x9f\x37\x13\xe1\x35\xa3\x37\xb7\xa8\x35\xd7\x15\x31\x5e\xe2\x76\xd3\xc6\x93\xd8\x17\x71\x47\x67\xbb\xdf\x41\xa3\x0b\xe3\xbc\x2e\xfb\x7f\xb4\x13\xec\x88\xf8\x50\x1d\xa3\xed\xa6\xcf\xd5\xce\x5d\xb8\x6a\xc0\x55\x03\xae\x1a\x70\xd5\x80\xab\x06\x5c\x35\xe0\xaa\x01\x57\x0d\xb8\x6a\xc0\x55\x03\xae\x1a\x70\xd5\x80\xab\x46\xa7\x5d\x35\x26\xe8\x08\x3b\x1c\x3b\x68\x22\x91\xdd\x76\x96\xe2\xd8\xd7\xdd\x0c\x38\x05\x6e\x1e\x70\xf3\x80\x9b\xc7\x16\x72\xf3\xf8\xdf\x09\x23\x35\x7e\xc3\xc4\x56\xb6\x94\x56\x4b\x85\x52\xae\x50\xca\x9b\xbc\xf3\x57\x12\xec\xc5\x3e\x8f\xcc\x56\x0f\x71\x97\x0f\xe7\x7e\xd9\x16\xd4\x9a\x16\x4d\x25\x86\xb4\xd3\x1a\xa7\x92\xd2\x4f\x0d\xd8\xeb\x43\xa2\x59\x01\x06\x4f\xd1\x09\x0e\x06\xc7\xe9\x10\x1d\xf0\x64\xf7\x7c\x88\xd6\x52\xc9\xfa\x7e\xf9\xe2\xc0\x67\xfd\x69\xdf\x59\x36\xeb\x9b\xb7\xa9\x2e\xed\x93\x71\x7f\x3b\x13\xf4\x71\x35\x89\xbc\x7f\x87\xc7\xeb\x1a\x34\xfc\x44\x7c\xdf\xd8\xb0\x7e\xe6\x26\xbc\xb4\xf4\x1c\x9d\xa1\x99\x1a\x23\x6b\x6b\x6f\x0d\xf6\x55\xb8\x8c\xb4\xe8\x32\xf2\x99\x50\x60\xa2\xe3\x9c\x70\x1d\x99\xa6\xb4\xe5\x3a\xd2\x36\x39\xd4\xd8\xe5\xa3\xa3\x62\x2a\xf1\xc5\x5d\x1e\x72\xe8\x1e\xbd\xc0\x95\xe4\x26\x7a\xf6\x89\x83\x9b\x21\x79\xda\x53\x9f\x1c\x42\xc8\x47\x08\xa1\x46\x6e\x47\x6a\xe4\xa2\x38\x22\x8a\x23\xa2\x38\x62\xbb\x8a\x23\x66\x5e\x0a\x05\x5b\x08\x8e\x2e\x6a\xad\x8d\xb1\x73\x34\x47\xdb\xc5\xd8\xca\xb9\x8d\xb6\xe9\xa7\x3e\x44\xdd\xd5\x87\x1d\xec\x56\x31\x4c\x9d\xd5\x20\xd2\xff\x6c\xa7\x87\x06\x91\xaa\x73\x57\xf5\xdd\xd2\x24\xca\xfe\x09\x8d\x03\x57\x2b\xe0\xb1\xda\x6b\xca\x0c\x3c\x56\xe1\xb1\xba\xc9\x1e\xab\xdd\x46\xbd\xd6\x95\xb9\xdc\x73\x61\xf0\x75\x93\xcd\x9c\xa2\x13\xec\x58\xec\xa8\x69\x4e\xb9\xcf\x23\xd7\x9e\xde\x62\xfb\x53\xee\xb1\x8f\x0c\xd2\xa1\x96\xca\x0a\xb0\xff\x11\x67\xdf\xe8\xf7\x58\xfc\xae\x37\x59\x94\x34\xe0\x8a\x02\x86\xbf\x68\xe3\x25\x74\x6b\x24\xf4\xea\xd2\xea\xa3\xcd\xd5\x06\x58\xff\x37\x84\xaa\xa3\x70\x1d\x85\xeb\x28\x5c\x47\xe1\x3a\x0a\xd7\x51\xb8\x8e\xc2\x75\x14\xae\xa3\x70\x1d\x85\xeb\x28\x5c\x47\xe1\x3a\xba\x99\xae\xa3\x70\xce\x84\x73\x26\x9c\x33\x7b\xd8\x39\xf3\xf9\x21\x9a\x69\x1a\x0f\x7a\xa5\xe7\xe2\xb8\xf0\x57\x06\xd9\xab\x5e\xb8\xb0\xdc\x64\x01\x80\xe0\x38\x61\xc2\x8b\x13\xda\x92\xc4\x00\x15\x6e\x0c\x15\x36\x17\x5e\xde\x02\x7a\xe7\xbc\x10\x8c\x10\x8c\x10\x8c\x10\x8c\x10\x8c\x10\x8c\x10\x8c\x10\x8c\x10\x8c\x10\x8c\x10\x8c\x10\x8c\x10\x8c\x10\x95\x00\x40\x21\x41\x21\x41\x21\x6f\x1e\x0a\xf9\xef\x86\x68\xbc\xa9\x3a\xa3\x7c\x0c\x2b\xfc\x09\x8c\xe0\xf0\xbf\x31\xc4\x7e\xc7\x56\x6b\xf4\x73\xcd\xe4\xc1\x9c\xd7\x9a\xb9\xa8\x9d\xd0\x15\x49\x30\xef\x77\x2d\x52\x6a\x75\x12\x84\xb2\x85\xe4\x97\xf3\xfe\x74\x72\x88\xc5\x6b\xab\x91\x5a\x83\x0e\x87\x45\xc0\x48\xc0\x48\xc0\x48\xc0\x48\xc0\x48\xc0\x48\xc0\x48\xc0\x48\xc0\x48\xc0\x48\xc0\x48\xc0\xc8\xcd\x84\x91\x0d\xeb\x86\x58\x9b\x37\x24\xba\x04\xc5\x04\xc5\x04\xc5\xec\x28\xc5\x7c\x83\x68\xcc\xa0\x98\x1e\xfc\xb2\xac\xe4\xcc\x12\xa6\x7a\x96\x4b\xf6\x0a\xc5\x3e\x61\x03\x98\x77\x8b\x35\x2c\xaa\x1f\x16\xb2\x7d\x41\xc9\x25\xf4\xc5\xad\x16\x13\x2e\x28\xb9\xf6\xa4\x22\x3b\x49\xc7\x69\xa2\x26\x6f\x47\x82\x06\x3d\x83\xf8\xb5\x07\x48\xae\xa5\x92\x7a\x6f\x32\xdf\x0a\xd1\x31\x81\x05\x0f\xd2\x18\xc7\x82\xc3\xb4\x8e\xeb\xe9\xb4\x48\x5a\x77\x94\xc6\xad\xa4\x75\xeb\x6b\x61\x52\xa4\xaf\x99\xa0\x23\xb6\xf4\x35\xeb\x6b\xc2\x0f\x43\xde\xe7\x8e\x21\x6f\x65\xdb\xca\x8a\x5a\xa5\x6b\x73\xfe\x14\x72\x80\x3d\x58\x4b\x21\xdd\x32\xd1\x64\x90\xf7\xa4\x71\xde\x93\x0c\x32\x49\x7a\x64\x92\x3c\x48\x63\x6c\x7f\x2c\x69\xea\x3e\x77\xda\x95\x26\xcf\x54\x14\x5b\x4f\x63\x62\xff\x65\x88\xe6\x8c\x8a\xd2\x05\xf9\xc7\xaa\x72\x89\xaf\x97\x96\x23\xfb\x92\x5c\x95\xcc\x64\x17\xab\x6a\x55\x59\x31\x04\xbb\x5d\x1e\xe8\x96\xa5\x9f\x18\x62\x5f\xd8\x46\x77\x39\x9a\xba\xa2\xb7\x11\xf9\x62\x33\x76\xa6\x29\x7e\x0b\x03\x13\x4e\x9b\xb7\xe8\x0a\xab\x53\xea\xba\x28\x9d\x6a\x7b\xba\x4b\xe2\xe1\xbc\xba\x0d\x3b\x54\x0b\x76\xa8\x77\xfa\xaf\x00\xe7\xd9\xbc\x59\x39\xb1\x7e\xda\x1a\xb9\x34\x3c\x5e\x8a\x7d\x91\xe0\x6f\x0f\xc6\x2a\x18\xab\x60\xac\x82\xb1\x0a\xc6\x2a\x18\xab\x60\xac\x82\xb1\x0a\xc6\x2a\x18\xab\x60\xac\x82\xb1\x0a\xc6\xaa\x4e\x1b\xab\x66\x68\x8a\x4d\xc6\x4e\x99\x04\xe5\x41\x47\x36\x50\x8f\xfd\x5c\xfb\x73\x82\xc2\xb0\x04\xc3\x12\x0c\x4b\x5b\xc8\xb0\xf4\xf5\x2b\x34\xad\x63\xcd\xd5\xaa\xa2\x66\xa5\x62\xa1\x94\x6f\x60\x65\xe2\x88\x4a\x29\x55\xa5\x62\x59\xc9\x19\x97\xc8\x15\x95\xfd\xcd\x2b\xec\xab\x03\xf4\x66\x5b\x2b\x57\xd6\x52\x91\x43\x42\x01\xad\xe8\x4b\xa7\x21\x5b\x0d\x88\x79\xd6\x6c\x6d\x41\xc9\x4d\x9a\xad\x25\xc6\xb4\xcb\x26\xad\xa6\xec\x46\x29\x8f\x6b\xba\x1c\x20\xa6\x01\xce\x00\xce\xd2\x00\x67\x00\x67\x00\x67\x00\x67\x3d\x03\xce\xd2\x5d\x03\xce\x02\xef\x49\xcb\xe0\x2c\x0d\x70\x06\x70\x06\x70\x06\x70\x06\x70\xd6\x79\x70\x96\xee\x69\x40\x95\x06\xa0\x6a\x1f\xa0\x4a\x77\x3b\xa0\x4a\x6f\x41\x40\x95\x79\x27\x5d\x12\x8e\x57\x17\xe8\x61\xee\x78\x75\x86\x66\x68\xca\xd3\xc9\xd6\xc6\x9f\x92\x6b\xa9\xa4\x07\x2c\x9a\x2f\xa8\xfe\xee\x57\xde\xfe\xb7\x22\x47\xe9\x92\xbf\xf7\xd5\x29\x76\xc2\xf0\xbe\xb2\xfa\xa5\x3b\x5d\x79\x74\xcd\xe1\x98\x1b\xfb\x5f\x3b\xea\x90\x9a\xbe\x44\x45\x25\x4f\x7a\x76\x50\x9c\xb1\x49\xfc\x4c\xe0\xae\xc7\x68\x91\x1e\xa9\xf1\xf0\x9e\xa4\x53\x1b\x7c\x6f\x28\xd0\x88\x92\xf7\x2d\x3a\x2a\x7f\x2c\x2c\x4a\xce\xee\xe7\x25\x67\x35\x41\x32\x45\x1b\x9f\x90\xf4\xb8\x88\x24\x58\xa0\xf3\x56\x24\x41\x20\x0d\x3f\x21\x02\x0c\x1e\xa1\x0b\xb6\x00\x83\x40\x5a\xf6\x93\x7b\xcd\xc9\x35\x97\xdc\xcb\xcd\xcb\x38\xbf\xd8\x86\xc4\x07\x46\xea\xe4\xde\xb0\x5e\x1a\xd9\x56\x67\x5a\x59\xf6\x94\x81\x27\xc5\xd9\x0e\x19\x38\x65\x5e\xd9\x69\x69\xd8\x9e\xd2\xfb\xa0\xc9\xa0\xc9\xa0\xc9\xa0\xc9\xbd\x43\x93\xa1\xfe\xf9\xa8\x7f\xdd\x83\xdb\x5f\x0f\xd3\x77\xc2\xec\xdb\xe1\xc8\xb7\xcc\x57\xf5\xb9\xf0\xa3\xf6\x0d\x6e\xa1\x14\x55\xc5\x26\x35\xba\x24\x2f\x0b\xdb\xb4\x09\x82\xac\x25\x4b\xff\x4a\xf8\x60\xd9\x14\xcd\x92\x52\x1a\x29\xc9\x79\x89\xbf\x12\x7d\x8f\x6b\x57\x48\x05\x12\x36\x27\x83\xae\x3c\x14\x56\x56\xe4\x9c\xa6\xf2\x16\x6f\x58\x46\x63\x4b\xb4\x17\x8a\xc3\xfa\xd6\x97\x3f\x6e\x34\x5f\x91\xb2\x7c\x9e\x14\x94\x9c\xb9\xf0\x58\x8b\x03\xb7\xc7\x1b\xef\x65\x55\xd5\x3a\x69\x1f\x28\x49\xbb\xd2\x78\x20\xfd\x26\xcb\xe2\x7b\x33\x9a\x48\x8a\x7e\x8a\x12\xdb\x2e\x7d\x8c\x31\xde\x85\x05\xde\x03\xb7\x2d\x3d\xac\x1a\xb0\x6a\xc0\xaa\x01\xab\x06\xac\x1a\x3d\x6d\xd5\xf8\x5a\x98\x5e\x0b\xb3\x57\xc3\x91\x57\xcc\xd5\xf6\x63\xe1\x69\x5b\x20\x70\xb9\x28\x4b\xaa\x6c\x7e\xfa\x0b\x15\xa5\x2c\xe5\xf9\x3a\xbc\xa0\x14\x0b\xd9\x1b\x0e\x9f\x26\xe3\x75\x5b\x91\xc4\xda\x0b\x4f\x25\xc7\x93\xd1\x45\x21\x47\xc4\x22\x59\x96\x4b\xda\x34\xb5\x56\x11\x39\xaa\x54\xca\x57\xa5\x92\xe1\x63\x55\x59\x95\x47\x97\xa5\xa2\xa1\xfd\xc7\xc4\xd1\x58\x74\xb9\x50\x92\x8a\x85\x77\x19\xe2\x7b\x49\x8e\x4a\x39\x6e\x22\x50\x46\x05\x31\xce\x59\xaa\xa5\x68\x3c\xae\x5a\x17\x09\x7d\x3b\x19\x9d\x29\x70\x91\x64\xeb\xb8\x52\xa9\x7f\x32\xcb\x2c\x53\x15\xea\x3e\xd7\xfe\x94\xea\xd5\x64\x6c\x97\xe8\xcf\xb4\xf1\x20\x4e\x67\xad\x0f\xf4\xd3\xfb\xfb\xd9\x7b\xfb\x23\x6f\x98\x1e\x7b\xdf\xec\x7b\x5c\x97\x83\xda\x14\xbd\xaa\x5c\x8f\xe6\xa5\xca\x92\x94\x77\x50\x09\x53\x51\x93\x2b\xcb\x4a\x65\x45\x1b\x0b\xd7\x9e\x5e\xa8\xb9\xb9\x77\x47\xb9\x5a\x63\xe8\x24\x65\xf1\x54\x05\x4d\x5f\xc8\x16\x72\x96\x62\xcd\xd7\x46\xbe\xcd\x31\x47\x57\x5b\x9c\xf4\x95\xc3\x58\xfd\x92\xb6\x61\x34\xe4\xa6\x69\x0b\x31\xfd\xf4\x9c\x37\x4b\x46\x05\x81\xe2\xb2\xd8\xae\xad\xc6\xc5\x33\xc4\xb5\x3d\x0f\xff\xcb\x39\x31\xd4\x63\xd1\x78\x5a\xca\x3e\x9b\xaf\x28\xab\xa5\x9c\x76\x16\x77\x90\xe3\x27\xd5\x0c\x9c\x50\x56\x74\x0d\xc8\xd9\x88\xf1\x04\x4b\x66\x4b\xc7\xa2\xf1\x59\xa5\x22\xdb\x9a\x8d\x66\x25\x35\x2b\xf1\xa4\x26\xfa\xf8\x08\x97\x48\xde\x9e\x2a\xd4\xe9\xba\x06\x97\xcd\x36\x92\xb1\x3b\xca\xb5\xf3\xc6\xae\xdb\xc0\x68\x08\xa3\x61\x8f\x1a\x0d\x33\x79\x9a\x17\xdc\x7c\x86\xa6\x38\x37\x3f\x41\xc7\xe8\x68\x0b\xf0\x72\xb1\x2a\x55\x57\xd5\x80\xf0\x73\xf3\x36\x34\x17\x38\x7d\x2d\xe1\x8e\x9f\xef\x64\x77\x08\x89\x61\x49\x73\xda\x8a\xb9\x4a\xfe\x24\x4c\x77\x09\xa7\x7e\x21\x0c\xcd\xec\x24\xec\xbf\x86\xd9\xd7\xc3\xf4\x66\xf1\xb3\x99\x70\xe4\xee\xbc\x5c\x75\x6a\xdb\xc2\xdc\x9b\xd8\x93\x97\xab\x42\x1a\xea\xe9\x3b\x26\x17\xe6\x0c\xca\xa8\x06\xc7\xc8\xeb\x1a\x9a\xa8\x6b\x68\xa2\x99\x86\x32\x65\xcf\xd9\x45\x8f\x89\x49\x7c\x9e\xe6\xf9\x24\x9e\xa5\x69\x4a\xb7\x30\x89\x6d\x03\x30\x5f\x50\xab\xfa\xa4\x65\xff\x78\x88\x52\x3e\xf9\xb9\x5c\xea\x0b\xfc\xc9\x20\xfb\xbd\xb0\x95\x9e\xeb\xde\x8a\x2c\xe5\x6a\x10\x83\x95\x77\x30\x71\x9f\x76\xb8\x03\x99\xfc\x45\xc2\x93\x27\xe9\xa4\x18\xb1\x71\x3a\xc4\x47\x6c\x94\x46\x68\x9f\x6f\x72\x2b\x5b\x92\x7b\xbf\x0f\xfd\x5e\xf7\x8f\xf0\x16\xd6\x9f\x97\xab\x01\x27\xd9\x8f\x7c\x66\xbb\x35\xce\xd1\x8a\x5c\x2e\x4a\x59\xd9\x7b\xa8\x1f\xd0\xcf\xe8\xd4\x68\xa7\x79\x36\x99\x1a\x73\xf9\xfa\x86\x1b\x6c\x14\xa6\xf1\x16\x4d\xe3\x1f\x0d\x6d\xf8\x5b\x9f\x12\x66\xf0\xe3\x34\x61\x99\xc1\x03\x17\x18\x81\x4a\x84\x06\xe2\xa7\xbc\x5a\xa5\xc4\xbb\x77\x59\x02\xe3\x4e\x7d\x5f\x22\xd9\x65\x84\xbe\xb9\xe8\x98\x88\x68\x8f\x0d\x19\x52\xc3\x47\x6a\xc0\x8e\xd1\x11\x3b\x06\x00\x16\x00\x16\x00\x56\xbb\x00\x56\xe6\xa5\x50\xc0\xbb\xf8\x8b\xc2\x3d\xed\x1c\xcd\xd9\xdc\xd3\xda\x4c\x06\xa2\xee\x2b\xf6\x0e\x76\xab\x18\xa6\x66\xf7\x0c\x0d\xf5\x02\x4b\x7d\x48\xbf\x7e\x9b\xa5\x02\x0c\x95\xa5\x4a\xb5\xc0\x7d\x79\x84\x15\xc0\x7b\xf3\x70\x7f\xb9\x43\x25\xd7\x26\xee\xa9\x6d\x68\x84\xdf\x7a\x1f\xdf\x42\xef\xb1\x1f\x5c\x91\x2b\x79\xd9\x7e\x74\xc0\x7e\x54\xad\x56\xa4\xaa\x9c\x2f\x64\x47\xea\xce\x73\xb4\xa2\xfd\x7d\x43\x3f\xaa\xf5\x32\xad\xed\xa6\x33\x35\xdb\x96\x09\x3a\xd2\xc2\x14\x58\xe0\x41\xd3\xd0\x46\x7c\xb4\x91\xcf\xf6\xd1\x8b\x7d\xec\x85\xbe\xc8\xa7\x4c\x91\xfe\x7c\x5f\xef\xec\x61\x6a\x2c\xb2\xda\x38\x73\x4f\x37\xee\x35\xa0\x4d\x4e\x2b\x7b\xc2\xa0\xc7\xc4\x1d\xe2\x2b\x93\x70\x8c\x94\x8a\xfc\x4a\x4d\x05\x13\x57\xf3\x33\xb8\x3a\xa4\x46\x07\x33\xaa\x52\x5a\x10\x69\x04\x1e\xd6\x3e\x0b\xfd\xef\x45\xe3\x63\xb1\x7e\x1c\xf2\xde\x5c\xa5\x5f\x0b\xd1\xab\x21\xf6\x4a\x28\xf2\xb2\xc9\x77\x3f\x1d\x9a\x55\x2a\x59\xae\xa9\xe5\x15\x3e\xec\x4a\x34\xb6\xac\xfd\x14\x8b\x4e\x3a\x9e\x82\x83\x71\xa1\x54\xad\xaa\x96\x8b\xdb\x88\x94\xe5\x0f\xce\x1d\x07\x8b\x85\xac\xbe\x68\xca\xc5\x9c\x1a\x55\xae\xeb\x63\x2a\x1c\xeb\xca\xb2\x52\x2e\xca\xc9\xa8\xb8\x23\xcf\xfb\x60\xbc\x4b\x0e\xa8\x5d\x07\xc0\xbc\x7d\x6c\x1b\xef\x96\x23\x20\xa3\xfd\x60\xc8\x3b\xef\x6d\xb9\x1d\xf5\x17\x7b\xa8\xee\x07\xfb\xf9\x77\x98\x59\xac\xed\xe9\x5e\xc6\x96\xe4\xaa\x34\xb6\xee\x9c\x2f\xff\xf9\x0a\x7b\x7d\x80\xee\x74\x38\x6a\x8b\xa6\x5a\x4d\xfc\x32\x5e\x9b\xf8\x45\x34\x87\xec\x2f\xc8\xfe\x82\xec\x2f\xf0\xd7\x87\xbf\x3e\xfc\xf5\xe1\xaf\xbf\x75\xfc\xf5\xbb\xc7\x1d\x1d\x7e\xd2\xf0\x93\x86\x9f\x34\xfc\xa4\xe1\x27\xdd\xd3\x7e\xd2\x70\xe4\x84\x23\x67\x8f\x3a\x72\x6e\xc9\xec\x2f\x6b\xf4\x76\x01\x1b\x1f\xa5\x8b\x1c\x36\xce\x53\x86\xce\x36\x97\x01\x41\xa0\xa3\xb6\xa6\x80\xc9\xfb\x83\xc8\x69\x96\xde\x40\x0a\x18\xf1\x0c\x14\xfb\xfb\xe4\x8e\xd9\xfc\x93\xc1\x1c\xad\x4f\x06\xd3\x69\xa6\x26\x10\xd8\x53\xf4\x04\x5d\xaa\xb1\x15\xcd\xd2\x74\x10\xef\x12\x76\x23\xf8\xbe\xb5\xe8\xfb\xf6\xd9\xb0\x48\xe1\xb2\x9f\xa7\x70\xd1\x24\xcc\x59\x0a\x68\x56\xd2\xd3\xc2\x29\xee\x31\x5a\xb4\x9c\xe2\x82\x6b\xfd\xb2\xb0\xc0\x5f\xa2\x47\x6d\x16\xf8\xe0\x9a\xdf\x68\x75\xda\x4e\x89\xc6\xc4\x87\x47\xdc\x45\xe3\xfa\xf2\xc5\x4c\xd5\xe7\x8b\x11\xed\x20\x69\x0c\x20\x34\x20\x34\x20\x34\x20\xf4\x56\x85\xd0\x50\x0e\x91\x34\x06\xce\xd6\x48\x1a\x03\x63\x08\x8c\x21\x30\x86\xc0\x18\x02\x63\x08\x62\x6e\x10\x73\x83\x98\x1b\x24\x8d\x81\xad\x11\xb6\xc6\x9b\x23\x69\xcc\x3a\x12\xba\x74\x8c\x4c\x6f\xc5\xf8\x80\x5f\x1e\xa4\xe3\x22\x3e\xa0\xb2\x24\x65\x93\xc6\x40\xf3\x79\xc2\xdf\x56\x41\x19\x5d\x4b\x89\x69\x35\x9a\x2d\xae\xaa\x55\xb9\x52\x51\x8a\xf2\x92\xb6\x69\x2e\xe5\x55\xf6\x83\x38\xfb\xbf\xfa\xe9\x2d\xda\xd5\x93\xf6\x8b\xaf\xac\xa5\x22\x7f\x4d\xd7\x7d\x4b\xb9\xc2\x5a\x21\xb7\x2a\x15\xed\x36\x67\xc9\xdc\x67\x4e\x89\x66\x2f\x2a\x45\x39\x2d\x9a\x4d\xda\x94\x8a\x09\x53\x1d\x89\xf3\xe6\xe2\x16\xee\x75\xf0\x3b\xa5\x2c\x9b\xc8\x40\xad\xca\x52\x2e\x99\x48\xf2\x0b\x2e\xd6\x76\xed\x52\xaa\xfe\x8e\xf3\x05\xb5\xda\xe5\x31\x05\x99\x67\x69\x41\x7c\x63\x73\x74\x86\x7f\x63\x3c\x89\x48\x0b\xdf\x18\xf7\x01\x98\xd1\x36\x89\xbe\xdf\xd9\x5e\xf7\xef\x6c\x27\x23\xde\x31\x61\x15\x7f\xd6\xff\x03\x3b\xcb\x66\xf5\x0f\xcc\x73\x9e\xe9\x9f\x5b\xfd\xbb\x71\x46\xef\x20\xb2\x02\x91\x15\x19\x18\xb5\x60\xd4\x82\x51\x0b\x46\xad\x9e\x31\x6a\x65\xba\xc6\x66\x13\x78\x4f\x5a\x36\x26\x64\x60\x4c\x80\x31\x01\xc6\x04\x18\x13\x60\x4c\xe8\xbc\x31\x21\x70\x14\x91\xe9\x69\x7c\x9a\x01\x3e\x6d\x1f\x3e\xcd\x74\x3d\x3e\xdd\x82\xa1\x1a\xec\x57\x9f\xa1\x7d\x7a\x6a\x11\x6d\xd3\x95\x2f\xf0\xac\x52\x4e\x6e\xa8\x6f\xf5\x0b\x59\x59\x65\x7f\xeb\x19\xf6\xaf\x07\x88\xd5\x9c\x7c\x65\x2d\x15\x19\x6c\x9c\x3b\x64\x72\x61\x6e\x51\x34\x92\xb8\x9f\xa7\x0b\x71\xb6\x70\x29\x65\x9d\xd0\xe5\x08\x0f\x69\x41\x00\xaf\x90\x16\x04\xf0\x0a\xf0\x0a\xf0\xaa\x87\xe0\x55\x17\x39\x1c\x77\x0d\xbc\x82\x27\x2c\xe0\x15\xe0\x15\xe0\x15\xe0\x15\xd2\x82\xc0\x55\xef\xe6\x61\x4d\x5d\xef\xaa\xb7\x25\xd3\x82\x3c\x47\x4b\xc2\xf5\xe9\xed\xf4\x24\x77\x7d\x5a\xa4\x47\xe8\x82\xab\xeb\x93\x36\x0f\x46\xa4\x7c\x5e\x1b\xac\xaa\x52\xb1\xbc\x9f\x6a\x29\x95\x28\xee\xa5\x83\xa3\x40\xb2\x83\xbc\xc3\xdf\x0f\xea\x04\x3b\x66\xf7\x66\x12\x2e\x4f\x56\x37\xcc\xe2\x75\xae\x44\x8d\x62\x1f\x20\x57\x80\xf6\x16\x23\x2b\x48\xc9\x0e\xcb\x1e\xd0\x33\x81\x74\x00\x97\x09\xba\x25\xd1\x15\xba\x5c\x93\xf1\xe3\x61\x3a\x17\xe0\x6b\x42\x6c\x27\x12\x7f\xb4\x98\xf8\xe3\xd7\xc2\x94\x13\x19\x34\x2e\xd3\xdb\x6d\x19\x34\x2e\x50\xb0\x53\xd4\xbb\x22\xe1\x33\x42\x84\x3d\x4e\x8f\x71\x11\x16\xf8\x8d\xb3\x22\xfd\xc8\xd3\xf4\x94\x95\x7e\x24\xe8\x9b\x34\x9b\x0d\xa4\x39\x51\xd8\x58\xda\x35\x14\x94\x89\x3f\x1a\x76\x15\x85\xf7\xbb\x66\x01\xb1\x49\xc5\x7d\x7a\xe2\x8f\x5a\xa9\x68\xe5\xfd\x68\x9b\x7c\x44\x82\x0f\xe0\x64\xe0\x64\xe0\x64\xe0\x64\x24\xf8\x40\x82\x0f\x24\xf8\x40\x82\x0f\x98\x35\x60\xd6\x80\x59\x03\x66\x0d\x98\x35\x02\x31\x6b\x20\xc1\x07\x12\x7c\x20\xc1\x07\x12\x7c\xc0\x6a\x08\xab\x61\x0f\x25\xf8\x68\xbb\xdd\x2d\xf8\xbc\x1b\xef\xdd\x47\x69\xe1\x3c\x2f\x04\xd5\xe8\x5a\xca\xab\x1a\x67\x59\xc9\xe5\x0a\x6a\x65\x95\x43\xdc\xa5\xd5\x5c\x5e\xae\xea\x27\x3c\xc7\x5e\x4a\xb0\xbf\xd7\x47\x3b\x44\x1b\x57\xd6\x52\x91\x81\x8a\x2c\xe5\x6a\xf6\xa7\x0b\x4a\x6e\xda\x6c\x20\xcd\x1b\x48\x0c\x69\xe7\x09\x09\x67\x2f\x5a\xed\x72\x6a\xb0\x0c\x3c\x93\xa3\x39\x31\x8f\xd2\x74\x9a\xcf\xa3\x09\x3a\x42\x87\x3d\x33\x98\xeb\x2b\xcf\x5a\x2a\xe9\xd2\x35\xdf\x49\xf4\xa4\xff\xc4\x38\xcc\x0e\xea\xef\x5e\xdc\x4a\x9f\x1a\x6e\x77\xb3\x4f\xa0\x6b\xf7\xba\xcf\xcf\x5b\x58\xbf\x76\x6a\xe4\xcf\xb6\xdb\xdf\xca\x50\x45\x2e\x17\xa5\x6c\x6d\x0d\x71\xb7\x17\x33\xac\x9f\xba\x09\xef\x26\xcd\x6b\x6d\xd4\xd8\x6f\x5b\x7e\x39\xa0\x74\x30\xd5\xb6\x68\xaa\xfd\xe7\x21\x6f\x23\x6a\x80\xd2\xe3\xbc\xb0\x97\x9e\xa1\x19\xcb\x5e\xba\x81\xf6\x0c\xa1\xe3\x2d\x19\xca\xab\x55\x6a\xa3\x4c\x4a\xfc\xa3\x5d\x76\xa1\xb3\x47\x57\xa3\x25\x57\x39\xa3\x9b\x41\x37\x43\xcc\xb4\xc7\x0c\x0a\x71\xe3\x23\x6e\x80\xe2\x3b\x82\xe2\xc1\x60\xc0\x60\xc0\x60\xda\xc5\x60\x32\x2f\x85\x02\xde\x88\x5e\x14\x4e\x61\xe7\x68\xce\xe6\x14\xd6\xe6\xcd\x6d\xd4\x5d\x45\xd8\xc1\x6e\x15\xc3\xd4\x4e\x2d\x21\xfd\xa1\x9d\x76\x2d\x61\xac\x2c\x55\xaa\x05\xee\x9d\x22\xb8\x76\x13\x7b\x94\x44\x59\xaa\x66\xaf\x76\x58\x75\xb8\xa7\xb6\xa1\x11\xde\x8b\x7d\xda\x9f\x13\x7b\xec\x07\x57\xe4\x4a\x5e\xb6\x1f\x1d\xb0\x1f\xe5\x7b\x7b\x39\x5f\xc8\x8e\xd4\x9d\xe7\x68\x45\xfb\xfb\x86\x7e\x54\xeb\x65\xfa\x3c\xcd\x53\xa6\x66\x77\x34\x41\x47\x5a\x98\x25\x0b\x3c\x36\x18\x0a\x8b\x8f\xc2\xf2\xd9\x3e\x7a\xb1\x8f\xbd\xd0\x17\xf9\x94\x29\xf5\x9f\xef\xeb\x9d\xfd\x51\x8d\xdd\x51\x1b\x67\xee\xcf\xc5\x6d\xe3\xda\xe4\xb4\x92\x04\x0c\x7a\x4c\xdc\x21\xbe\x78\x09\xf7\x3f\xa9\xc8\xaf\xd4\xb4\x34\x71\x35\x3f\x83\x6b\x4c\x6a\x74\x30\xa3\x2a\xa5\x05\x11\x2d\xff\xb0\xf6\x59\xe8\x7f\x2f\x1a\x1f\x8b\xf5\xe3\x90\xf7\xc6\x2d\xfd\x5a\x88\x5e\x0d\xb1\x57\x42\x91\x97\x4d\x8a\xf9\xe9\xd0\xac\x52\xc9\x72\x65\x2e\xaf\xf0\x61\x57\xa2\xb1\x65\xed\xa7\x58\x74\xd2\xf1\x14\x1c\xff\x0a\xbd\x6b\x55\xb5\x1c\xb9\x46\xa4\x2c\x7f\x70\xee\x1e\x57\x2c\x64\xf5\x75\x55\x2e\xe6\xd4\xa8\x72\x5d\x1f\x53\xe1\x3e\x56\x96\x95\x72\x51\x4e\x46\xc5\x1d\x79\x7a\x03\xe3\x5d\x72\x0c\xeb\x3a\x00\xe6\xed\x63\xdb\x78\xb7\x1c\x71\x07\x1d\xa5\x55\xf7\xbb\xaf\x0a\xdb\xd9\x2d\xbc\xa7\xed\x5c\x14\x32\xa7\xe9\x24\x3b\x1e\x9b\x30\x53\x17\xdf\xcf\xbf\x2a\xfd\x13\x77\xb9\x3a\xd6\xaf\x9d\x60\x4f\x4d\xbc\x05\x93\x1f\xff\xec\x20\xc5\x05\x84\x55\xab\x4a\x45\xca\xcb\xb6\xcc\x25\x59\xb5\x50\x52\x72\xb2\x49\x5a\xff\x28\xce\x5e\x0f\x13\xe9\x27\x6a\x2b\x67\xc4\x05\xb5\x4e\x2d\xce\x9d\x57\x72\x72\xe2\x2d\xda\xb1\x45\x71\xee\xa5\x94\xfe\x6b\xc0\x24\xf5\x49\x3a\x29\xe6\xe6\x38\x1d\xe2\x73\x73\x94\x46\x68\x9f\xe7\xdc\x34\x1e\x71\x2d\x95\xd4\xfb\xe3\x3b\x21\x1f\xf3\x9f\x6f\x63\x6c\xbf\x3e\xdf\x9c\x43\x68\x24\xf3\xd5\xef\xb4\x1e\x74\xfa\x0b\xdb\x1d\xc3\x7c\xaf\x3b\x3b\x35\x46\xfa\x6e\xfd\x70\x7b\x07\x3b\xcd\x53\x2d\xd7\x2c\xfe\xeb\x1b\x6d\xac\xf7\xe0\xa1\x2d\xf2\xd0\x8f\x86\x36\xfc\xa9\x4f\x09\xd6\x79\x9c\x26\x2c\xd6\x19\xb8\xbc\xf0\x21\x9f\xed\x11\x27\x89\xdf\xbd\xdd\x21\x2f\x76\x99\xd8\xd3\x10\x11\x6f\x15\xbf\xb4\x59\x42\x80\x6a\x82\x6a\x82\x6a\x82\x6a\x82\x6a\x82\x6a\xae\x9b\x6a\xfe\x1f\x1b\x5f\xe1\xa7\x05\xc7\x3c\x41\xc7\x6c\x1c\x33\xf0\x25\xde\x9f\x5c\xb6\x67\x95\x4f\xff\xee\x6d\x8e\x55\x7e\xc0\x07\x5b\x1a\x6b\xff\x5d\x7c\xeb\xdc\xbe\xa5\x1f\x54\xb2\xd7\x14\x0e\x50\x49\x50\xc9\x4d\xa6\x92\xed\x27\x3f\xbe\x28\xb2\x3d\x52\x3e\x73\x90\xc6\xd8\xfe\x58\xd2\x84\x88\x77\xda\x31\xa4\x7e\x45\x3d\x7a\x0c\x1c\x0c\xfe\xc1\x20\x8d\x79\x80\xc1\x25\xb9\x2a\x99\xf5\xd0\x6a\x18\xe1\x2f\x0e\xb2\xef\xf6\xd3\xed\xd6\x32\xc5\x4f\x8e\x7c\x26\xa4\x3b\xed\xda\x8a\x9f\x95\x0c\x7d\xdd\x48\x72\xac\x3f\x5c\x20\x95\xcf\x86\xad\x00\x30\xbe\x3d\xd0\xc3\x85\x0a\x55\x79\xc5\x92\x43\x71\x47\xcc\xa6\xad\xed\x64\x22\x72\xdd\xb1\x68\xf2\xa7\x08\x7a\xe5\xbc\x89\x6a\xa4\x79\x7f\x2c\xd7\xc5\xc7\xf2\x94\xff\xc7\x32\xce\x0e\xad\xf3\x63\xe1\x6f\x05\xe5\xd0\x90\x51\x1a\xe5\xd0\x90\x02\x04\x29\x40\xfc\x86\x05\x29\x40\x6e\xa6\x14\x20\x28\x87\x86\x72\x68\x48\xbd\x80\xd4\x0b\x48\xbd\x80\xd4\x0b\x5d\x91\x7a\xa1\x4b\xc8\x05\x8a\xa8\x21\x44\x1d\x45\xd4\xb6\x4e\x62\x6b\xf6\x73\x49\xba\xa0\x17\x51\xcb\x69\x0a\x70\x41\x29\xb9\x97\x52\x13\xdc\x71\x65\xb5\x2a\x69\xca\xcd\x75\x79\xe9\xaa\xa2\x3c\xeb\xd8\x2e\x9a\x18\xf2\xe5\x11\xf6\xfe\x7e\xba\xd7\xb5\x41\x13\x4a\x8e\xba\x78\x2f\x3e\xac\xb7\xfe\xb8\x68\x7d\xca\xde\x7a\xe2\xb8\x76\xc1\xa4\x5b\x9b\x3a\x21\x6c\x74\x75\xc0\xae\x8f\xef\x09\xd1\xb2\xc0\x7c\x57\xe8\x32\xc7\x7c\x8f\xd3\x63\xb4\xe8\x49\xc0\xdd\xc7\x56\x1f\x8a\x64\xa3\x8e\xfb\xc2\xbf\xe7\xfc\xd9\xde\x53\xec\x09\x23\xbd\x40\x83\x77\xac\x93\xbe\x86\x9d\xa9\xc3\x7f\x7e\xbe\x94\x3f\x4f\x7e\xf3\x60\xcc\xdd\xbd\xb2\xe1\x54\x38\xa5\x5f\xd3\x1d\xb3\x21\x5d\xa0\x3c\xc9\x35\x26\xd0\xf6\xcc\x06\x58\x47\xe1\xc3\xd9\xa2\x0f\xe7\x77\x3b\x27\xb2\x9e\x15\xbe\x9e\x39\x5a\xb2\x7c\x3d\x21\x1f\xdd\x5c\x52\x13\x3f\xd8\xe5\x27\x1f\x1f\x32\xdd\x49\x1b\x8a\xc4\x93\x7a\x56\xf1\xee\x90\x88\xf0\x45\x85\x2f\x2a\x7c\x51\xe1\x8b\x0a\x5f\x54\xf8\xa2\xb6\x12\x61\xef\x99\x7d\xa7\xeb\x43\xef\xd7\xa7\x76\xd4\xe9\x0b\x01\xe9\x21\xfe\x6e\xb2\xe9\xf7\xbd\xc9\x4f\xf3\x38\xe6\xe3\xe2\xda\x50\x1f\x39\xc1\xfd\xb4\x36\x5f\x1d\x81\x7f\x6c\xaf\x29\x41\xf0\x8f\x85\x7f\xec\x26\xfb\xc7\x6e\x39\x3c\xd8\xb6\x75\xc8\xcf\x8f\x37\x33\x47\x67\xd8\x4c\x6c\xca\x34\x5b\x0d\xda\xcd\x56\x8d\xda\xee\x80\x17\xee\xaf\xed\xa3\x73\x82\x8d\xe7\x0a\x6a\x56\x59\xd3\xa4\x72\x0d\x0f\xf7\x48\x99\x2a\x97\x72\x65\xa5\x50\xaa\xaa\xc5\x42\xd6\x72\xcf\xfd\x7f\x12\xec\x13\x7d\x74\x87\xd9\x98\xb9\xd2\x46\x5d\x58\xf8\x8c\xde\xc4\x62\x91\x17\x0d\xd3\xce\x98\x36\x2e\xd4\x57\x50\x2b\xf1\x8d\xe3\xe4\x80\x59\xf7\x32\x9d\x13\x73\x79\x9a\xd2\x7c\x2e\x1f\xa7\x06\xeb\x57\xd2\x1a\x2a\x63\xfe\x3a\x3a\xb7\x81\x18\xde\xbc\x5c\xa5\x6b\x4b\xfe\xf3\xf9\x14\x3b\xa1\x4f\xdf\xda\xd7\xa6\x4f\x59\x67\x87\xea\xe6\x7e\xe4\xfd\x3b\xdc\x5e\xd2\x5e\x77\x50\xed\x7c\x4f\x49\xfd\xa4\x4d\x79\x55\xeb\xd5\x35\xfc\xde\x15\x74\x0d\xd0\xe6\x16\x69\xf3\x2f\x87\x82\x95\x1a\x0b\x02\x29\xcf\xd1\x19\x0b\x29\x6f\x96\x1c\xe2\xb9\x04\x3a\x20\x87\x12\x9f\xdb\xe5\x26\x87\xee\x36\x80\x70\xa9\x46\xf6\x8c\x88\x03\x9b\x23\x7a\x40\x7c\x41\x7c\x41\x7c\x41\x7c\x41\x7c\x41\x7c\x5b\x21\xbe\x9d\xcc\x82\x1a\x2c\x45\x6e\x3e\x33\x41\x07\x74\x86\xf4\xc7\x77\xba\xe9\x0c\xc3\x3e\x28\xd7\xa9\x48\x0c\xf3\xbd\x79\xc7\xf5\x08\xa0\xda\x5e\xd3\x5e\x80\x6a\x81\x6a\x37\x19\xd5\x76\x98\x6e\x75\x60\x09\xf0\x67\xae\x13\x74\x84\x1d\x8e\x1d\x34\x99\xeb\x6e\x3b\x73\x75\xb4\x7e\x53\x64\x59\xfd\xe1\x10\x2d\x0a\x8c\xbb\x5c\x54\xae\x6b\x1f\x72\x45\x29\x26\xcd\xa8\x67\xf7\xd4\x0a\xe5\x4a\x41\xa9\x14\xaa\x37\x8a\xf2\x9a\x5c\x74\x77\x73\xfe\xe4\x10\xfb\xcf\xdb\x68\x8f\xad\xd1\x49\xa3\x4d\x73\xe1\xfd\x37\xcd\xa4\x5e\x58\xd0\xef\x35\xaf\xdd\xcb\x01\xb9\xbb\x22\x1b\xc3\x04\xbf\xdd\xac\xcb\x63\xea\x9a\x81\x77\xff\x91\xad\xa1\x85\x6c\x0d\x3f\xee\x2f\x23\x9e\x60\x97\x74\x19\xd1\x68\x4a\x1b\xb9\x97\x3d\x5f\x0f\xd2\x39\x20\x9d\x03\xd2\x39\x20\x9d\x03\xd2\x39\x20\x9d\x03\xd2\x39\x20\x9d\x03\xd2\x39\x20\x9d\x03\xd2\x39\x20\x9d\x03\xd2\x39\x6c\x7a\x3a\x87\x33\x34\xc3\xa6\x62\x93\x26\x67\x19\x70\xd4\xc3\xf1\xdc\xd1\x21\xc3\x03\x32\x3c\x20\xc3\x03\x32\x3c\xd8\x32\x3c\x7c\x3f\x4e\x09\xed\xd5\x35\xae\xf0\x6f\x52\xcd\xaf\xc4\xd9\xbf\x0f\xd3\xad\x59\xa5\xc2\xf3\x9c\xbf\xd5\xbd\x9e\x7f\xe2\x6e\xed\xf7\x29\xa5\x22\xd7\xd4\x5f\x0c\xd8\xf9\xf4\x22\x8d\x0b\x40\xb7\x9f\x92\x1c\xd0\x0d\xd2\x00\x3d\xe8\x89\xe7\xb5\x5e\xeb\xd5\xcf\x36\xea\x68\x3a\xe3\x4f\xe1\x62\x2c\xaa\x53\xb8\x58\xcc\xaa\x73\xe6\x48\x28\x1c\x79\xdf\x76\x6b\x2c\x77\x7b\x56\xe1\x4f\x44\xf4\x43\x6d\x1f\xd1\x34\x2f\x20\x57\x63\xe4\x6c\x72\x48\x61\xd0\x84\x3f\x68\x8b\xfe\xa0\x7f\x37\x44\xc7\x85\x0b\xe7\x21\x3a\x60\xb9\x70\x6e\xfc\x6b\x6e\x59\x3e\x34\xe9\xe7\xd9\x9c\x18\xb0\x7f\xf2\x6e\x22\x21\xf1\x6f\x6e\xb7\xc4\xc0\x4e\x7b\x5d\xfc\xc4\x6e\xf1\xbf\xf6\x7f\xf8\xf0\xd0\x84\x87\x26\x3c\x34\xe1\xa1\x09\x0f\x4d\x78\x68\xae\xdb\x43\xf3\xa7\x42\xad\x2b\xe2\x27\x84\x6b\xe7\x61\x3a\x68\x73\xed\x0c\x4e\x8f\xf7\xf7\xbb\x0c\x46\x95\x4f\xbf\x72\x9b\xb5\x86\xc7\xfc\xab\xd6\x27\xde\xc6\xdd\x6d\xda\xba\xb0\xc3\x65\xb2\xd7\xd4\x09\xb8\x4c\xc2\x65\x72\x93\x5d\x26\xdb\xc8\x64\x7c\xcb\x3e\x05\x23\xca\x33\xfb\x29\xc9\x86\x63\x09\x93\xae\xdf\x5e\x53\x6d\xfe\xa6\xf0\x7b\xfc\x50\x9c\xde\x62\x82\x3f\x7b\x99\xa8\xbf\x1c\x60\xff\xce\x86\xf8\xee\x76\x41\x7c\xbc\x76\xe1\x9b\x6d\x8c\x2f\xf8\xf2\xf1\x8b\x74\x44\x4c\xa3\x14\x8d\xf2\x69\x34\x44\x71\x7a\xc8\x77\x1a\x6d\xb0\x10\x34\x67\x7b\xb3\xfe\xb3\x68\x2f\x7b\xa0\x76\x16\xd5\x55\x0b\x8b\xbc\xdb\x06\xf7\x22\xee\x70\x8f\x0f\xe4\x1d\x4e\xba\x17\x7c\xed\xe7\x63\x74\x94\xc6\x6b\x96\xe1\x66\x07\x13\x6b\x2e\xa8\x5e\x8b\x54\xef\x43\xa1\x0d\x7c\xc3\x27\x04\x0f\xd4\xb6\x05\x26\x0f\xec\x88\x08\xe0\x5c\x2f\x20\x11\x90\x78\xdd\x06\xf6\xde\x64\x82\x3d\xfe\xd5\xef\x72\x90\x3d\x14\x7c\xbf\x49\xa4\x01\x80\x1e\x80\x1e\x80\x1e\x80\xde\xd6\x06\x7a\x2f\x85\xba\x3f\x97\x66\x00\xe8\x2f\xa0\x65\x3e\xfd\x6f\x6d\xec\x6f\xaf\x0f\xfb\xe3\x8b\xff\xed\x76\xf8\x87\x8a\xef\x60\x7e\x60\x7e\x60\x7e\x5b\x97\xf9\xb5\x13\xd6\xf8\x42\xbf\x80\x84\x78\x26\x45\xa3\x6c\x24\xb6\xcf\x64\x78\xbb\xec\xd4\xaf\x43\x95\xdd\xff\x70\x37\x9d\xd0\xeb\x2d\x95\xcb\x6a\x03\x9f\xbc\x9c\x5c\x2e\x2a\x37\x56\xb4\xd5\x5d\xe7\x76\xa3\x6a\x56\x2a\xca\xec\x9f\xec\x66\x7f\x19\xa6\x5b\xb5\xcb\xb5\xd5\x68\x80\xd3\x3b\x7e\xc8\x78\x14\x6b\x29\x9a\x36\x1b\x49\xec\xe5\xe5\x93\xca\x65\xd5\x6e\x8c\xb2\x8e\x2f\x6a\x0d\x04\x4c\xf8\xde\x4e\xa7\xc5\xa4\x39\x4a\xe3\x7c\xd2\x68\x93\x67\xc4\x3b\x0b\xea\x6a\x55\xd1\x9e\xa3\x50\xca\x73\x1d\x40\xeb\xd1\x46\x49\xdf\x82\xff\xd4\x19\x61\xfb\x8c\x6c\xa7\x56\x07\xf4\x39\x24\xfa\xe0\x60\x7e\x5f\xde\x6e\x8d\xfd\x90\xc1\xfc\xfc\x87\x7f\xc0\x28\x59\xd4\xc9\x37\x90\x9e\xa2\x49\x3a\x55\xb3\x54\xaf\xf7\x15\x60\x7d\x06\x1f\x6c\x91\x0f\x7e\x22\x14\x80\x04\x98\x11\x9c\xf0\x24\x1d\xb7\x38\x61\x1b\x04\x49\xe0\x92\xc2\x87\x40\xa6\xff\xe2\x36\x4b\x90\x8c\xd5\x6d\x29\xfc\x25\xca\x83\x22\xc3\x7e\xa7\xe4\x09\xb6\x1d\xbd\x26\xd6\xb0\xed\xc0\xb6\x63\x93\xb7\x1d\x6f\xf7\xf6\xfb\xde\xf0\xc2\xd2\xf4\xf6\x23\xf0\x95\x21\x33\x46\xfb\x59\x32\x36\x6c\x6e\x44\xee\xb0\x6f\x44\xf8\xf9\x37\x85\x03\xc2\x6f\x0c\xd2\x84\xd8\xeb\x88\xec\x30\x56\xa6\x25\x3d\xc9\x92\x57\xf6\x7c\x7e\x36\xfb\x07\x83\xec\xc5\x7e\xda\x21\xfe\xa7\xad\x92\xef\xd4\xe3\x73\x4b\xb9\xc2\x5a\x21\xb7\x2a\x15\x1d\x69\x95\xcc\x58\x78\x9e\x86\x27\x90\xcc\x49\xc9\x84\x08\x09\xe6\x2d\x3a\x96\x59\xfe\xcb\x7c\x41\xad\x22\xc3\x51\xbd\x2a\x75\xd1\xff\x83\x19\x65\x23\xfa\x07\xe3\x98\x1a\x46\x86\x33\x7e\x1b\x87\x32\xb5\xd7\xfd\x2b\xdd\xc9\x88\x3f\xac\xf6\xfe\x90\xbc\x08\xc9\x8b\x90\xbc\x08\xc9\x8b\x90\xbc\x08\xc9\x8b\x90\xbc\x08\xc9\x8b\x90\xbc\x08\xc9\x8b\x90\xbc\x08\xc9\x8b\x90\xbc\xa8\xd3\xc9\x8b\xb6\x1e\xab\x40\x56\x23\x64\x35\x42\x56\xa3\x2d\x94\xd5\xe8\x97\x12\x74\x58\xb0\x45\x35\x7b\x55\xce\xad\x72\xb4\x5b\x93\xc9\xdd\xc8\xe1\x9e\x2d\x4a\xaa\x6a\x85\x3f\xfd\xd5\x10\xfb\x48\x1f\x31\xeb\xba\x86\x75\x38\x8d\x5c\x6b\x53\x5a\x23\x89\x98\x76\xc6\xa2\x79\x65\x4d\x02\x74\x7e\x4e\xc0\x4e\x14\xf9\x75\xf9\x69\x26\x6d\xa3\x61\x54\x28\x70\xf4\xce\x17\xde\x65\xfd\xe1\xdd\x69\x76\x52\x87\x77\x75\x63\x5f\x93\x72\x5c\xdc\xb2\xbe\x44\x41\x63\xaf\x8d\xc8\x7b\x76\xb8\xbe\x1e\x8f\x0a\x9c\xce\x37\xf4\x90\x7e\x52\x27\x5f\x52\xfa\x02\x3d\x4c\xe7\x6a\x6c\x93\x1b\x79\x4b\x30\x4e\xc2\xe7\xa2\x45\x9f\x8b\x5f\x5d\x9f\x63\xb7\xbf\xc0\x78\x44\xf8\x5f\x64\xe8\xac\xe5\x7f\xd1\x66\x19\xe4\x13\xbb\xd5\x09\x11\x95\x78\x61\x97\xab\x0c\x7a\xab\x95\xbc\xc9\x21\x76\x1e\x14\xbf\x77\x54\xea\x20\x00\x0c\x01\x60\x08\x00\x43\x00\x18\x02\xc0\x10\x00\x86\x00\x30\xf7\x00\xb0\x4e\xe8\x0a\xe9\x7f\xb8\xd3\x55\x57\xf0\xab\xba\xe9\xd4\x20\xf6\x72\x77\xa3\x4e\x29\x10\x70\xe7\xec\x35\xb5\x05\xee\x9c\x70\xe7\xdc\x64\x77\xce\x4e\xb3\x2c\x5f\xd7\xce\x4e\xac\x0e\x8d\xcb\x6d\x3a\x2e\xee\x40\xfc\xd9\x1b\xfb\xe8\x82\xce\x4d\xab\x4a\x45\xca\xcb\xb5\xd0\xd4\xc3\x27\x33\xab\x89\x43\x7e\x41\x56\x2a\x4b\xd9\x42\xb5\x60\x01\xd5\x7f\xb6\x8f\x7d\xa6\x8f\x6e\xd7\x8f\x9b\xcb\xdf\x43\x2e\x34\x75\x6a\x71\x6e\x51\x9c\x36\x25\x9a\xb9\x91\x48\x72\xa4\x2a\x7e\xac\x2b\x35\x5d\x7f\x7e\xc0\x78\xb5\xe0\xed\x61\x7c\x5e\xcc\xd5\x33\x34\xc3\xe7\xea\x29\x3a\x41\xc7\xbc\xe7\xaa\x3e\x9a\xc6\x44\xad\xef\x78\x93\x11\x6b\xb2\xff\x94\x4c\xb3\xd3\x75\xb3\xcc\x98\xa3\x8e\x97\xaa\x4f\xd0\xfa\xbe\x50\xe4\xc3\x3b\xea\x5f\xd8\xa0\x3b\x5f\x75\x79\x67\x29\x03\xb2\x6e\xd2\x6b\x4b\x3f\x42\xda\x97\xe1\xd4\x1e\x36\xf6\x7a\xa0\x40\x00\xb9\xb6\x88\x5c\xff\x75\x88\x16\x05\x24\x9d\xa7\x8c\x05\x49\x37\x2a\x30\xba\x48\x32\x71\xec\xda\x9c\x64\xf2\x14\x39\x4d\x8a\xac\xc4\xaf\xec\xaa\x97\x4c\xf7\x98\xd4\xd5\x45\x18\xed\xd7\xd1\xeb\x66\xc9\x22\x60\x58\x60\x58\x60\x58\x60\x58\x60\x58\x60\x58\x60\x58\x77\x0c\xdb\x9c\xee\xb0\xce\x4d\x4c\x3d\x88\xfd\x85\x9d\xf5\xba\x43\xca\x87\xc2\xba\x68\x14\xa3\x02\xc5\x6e\x82\x42\x01\x2c\xdb\x6b\x6a\x0c\xb0\x2c\xb0\xec\x26\x63\xd9\x42\xd0\x1b\x4a\xdf\x05\xa3\x33\xcb\x81\x2f\xff\xcd\x9c\xa2\x13\xec\x58\xec\xa8\x89\x66\xef\xb3\xa3\xd9\xfa\x5b\xdc\x14\x51\xf9\xaf\x0c\xd1\x41\x9f\x7a\xa0\x86\xd6\xf6\xce\x55\xa5\x2a\x99\x98\xf7\xef\x0c\xb1\x3f\xb5\x95\x0d\x70\x73\x96\x35\x62\x2d\x1f\xd1\x2e\x4c\x3c\xe0\x56\x23\xd4\x71\x4a\xc0\x30\xf7\x69\x9a\x14\x13\x79\x82\x8e\xf0\x89\x3c\x46\xfb\x29\xe9\x9b\xa5\xce\xd1\xa5\x8d\x66\x1c\xbb\xe0\x3f\xb5\x87\x59\x42\x4c\x62\xe7\x8d\xdd\x8b\x08\x46\xbe\x64\x2b\x32\xe0\xe1\x01\xeb\x1c\xf6\x07\x3d\x6a\x89\xb6\x71\xe4\xd3\xd3\x94\xa6\xd3\x35\x6a\xc3\xba\x87\x1e\xca\x02\x10\x6c\x8b\x08\xf6\x93\xa1\x20\x3e\xfd\x59\x41\x71\xb5\xe5\xcf\xa4\xb8\xed\x10\x21\x41\xcb\x08\x1f\x72\x9b\xf8\x73\x5b\x91\x02\xcb\x81\xd5\x29\x35\xf6\xba\xd7\x21\x6d\xa7\xd0\x00\x38\x05\x38\x05\x38\x05\x38\x05\x38\x05\x38\x5d\x37\x38\xfd\xc7\x81\xac\xf8\x67\x04\x2d\x3d\x4d\x27\x6d\xb4\xb4\x1d\x4b\xbe\x3f\x22\x6d\x4e\x29\xa8\xcd\x72\xed\xad\x1e\xa4\xbf\x67\xab\x59\xe0\xe7\x8a\xea\xd4\x05\x62\xae\x95\x4b\xdb\xa4\x0a\x00\x79\xf6\x9a\x02\x02\xe4\x09\xe4\xb9\xc9\xc8\xb3\x23\xa4\x28\x68\x89\xee\xcf\x33\x1b\xba\x9a\x3a\x1a\xbe\x29\x50\xe6\xcf\xef\xa1\x8c\x70\x66\xcd\xca\x15\x3d\x95\x80\xac\xd6\x7a\xb4\xda\x8f\x15\xf2\xa5\x42\x29\x6f\x4c\x0d\xb3\xb0\x02\x37\x73\xb2\xd7\xee\x61\xaf\xf7\xd1\x5b\xec\xe7\x9b\x56\xc4\xc3\xa2\xcc\x02\x3f\xb1\x3e\x2d\xf7\x94\x75\xc9\xa2\xb8\xc5\x45\x71\x8b\x04\xbf\xce\x76\x58\xd5\xcd\x89\x9e\x57\x08\x93\x6b\xc0\x60\x74\x95\x9e\x12\xd3\x7d\x91\x1e\xe1\xd3\xfd\x1c\xcd\xd1\x19\xef\xe9\x6e\x1f\x4d\x13\xf3\x7b\xf5\x78\xa3\xc4\xf4\x9d\xfe\x9f\xc9\x79\x36\xaf\x7f\x26\x2e\x2f\xda\xb0\x08\x78\xf6\xaf\xce\x30\x10\xf9\xbd\x1d\x1e\xaf\xf9\xa8\x59\xd1\x61\xdd\x6f\xda\xb8\x74\xb3\x5f\x76\xfa\x32\xbd\x9d\x9e\xac\xd1\x67\x82\x7b\xdb\x50\x6f\x00\x69\x5b\x84\xb4\xdf\x08\xb5\x55\x0c\x5d\x11\xf4\xf6\x09\xba\x64\xd1\xdb\x2e\x91\x73\xdc\x7f\xb6\xf3\x72\x2e\xfd\xbb\x3b\x3d\xe4\x5c\xba\xbe\xe0\xc4\xba\x05\xde\xb8\xd8\x25\x6e\xa2\xb8\xc3\xd6\xb1\xd7\x64\x2b\xb6\x8e\xd8\x3a\x6e\xf2\xd6\x71\x73\x75\x69\xdf\x90\xc6\xce\xaf\x32\x99\x59\x9a\x66\xe9\xd8\x69\x73\xe7\xf8\x90\xc3\x8b\xc6\xab\xa1\x4e\x04\x3b\xc6\x29\x21\xf6\x87\x25\xb9\x7a\x5d\xa9\x3c\xeb\x48\x12\xa7\x17\xa1\xe0\xfb\x41\x55\x95\x55\xf6\xd5\x38\xfb\xb5\x7e\x7a\x93\x75\xee\x95\xb5\x54\x44\x6d\xae\xe8\xc4\x9c\x68\x25\xa0\xb2\x13\xc3\xfc\x82\xf3\x66\x47\x2e\xa5\xf4\xf6\xe7\x0b\x6a\x75\x56\xa9\x4c\x16\x8b\x26\x97\x0d\x6e\x3d\xbd\x89\x4a\x50\x34\x53\x2e\xe2\xda\x13\xfe\x9f\xca\x21\x76\x40\x7c\x0f\xfa\xf8\xbb\x19\xe0\xeb\xe6\x16\x0a\x51\xa0\x10\x05\x0a\x51\xa0\x10\x05\x0a\x51\xf8\x0d\x0b\x0a\x51\xa0\x10\x05\x0a\x51\xa0\x10\x05\x0a\x51\xa0\x10\x05\x0a\x51\xa0\x10\x45\xd0\x85\x28\x50\x26\x02\x65\x22\x50\x26\xa2\x87\xcb\x44\xfc\xee\x10\x9d\xf5\x25\x80\x1e\x51\x70\x26\x18\x34\x02\xe0\x7e\x76\x88\xfd\x59\x1d\x1f\xfc\x4c\x48\x7f\x1f\x36\x2a\x58\x32\x24\x9c\xb2\x1c\x7d\xb6\x50\xca\x05\x4a\x07\x87\x2d\x6d\x86\x6b\x6c\xfa\xda\xa7\x29\x44\x96\x9a\x16\x77\x68\xc6\xb6\xb6\x93\x89\x58\x1d\x5b\xb4\x5c\x3c\xf5\x7e\x82\x28\xae\x07\xbc\x5f\x17\xe0\xbd\x39\x9a\xe8\xc9\x0c\xbd\x31\x23\x68\x22\x68\x22\x68\x22\x68\x22\x68\x22\x68\x22\x68\x22\x68\x22\x68\x22\x68\x22\x68\x22\x68\x22\x68\x62\x87\x69\xe2\x41\x1a\x63\xfb\x63\x49\xd3\xcf\xe9\x4e\xbb\x9f\x93\xbe\x73\xbb\x19\xe2\x6a\x40\x39\x41\x39\x41\x39\xb7\x10\xe5\xfc\xfc\xdb\x28\xea\x95\xd2\xcb\x8c\x6e\xfb\xe0\xdb\xd8\x1f\xd8\xd2\x77\x0d\x34\x0c\x68\x33\x91\x60\x62\xb7\x4b\x12\xaf\xb6\xc4\xa8\x3d\x41\x27\x04\xd4\x3b\x4c\x07\x39\xd4\x4b\xd2\x30\x25\x7c\x43\x32\xcd\x4e\x6d\x34\x0c\xed\x9c\x3f\xbf\x1b\x64\x03\xb5\xd1\x9a\xd6\xed\xed\xc8\x2e\xf2\x19\x5b\xd2\xae\x21\xbf\x98\x32\x6b\xb0\xf7\xb8\xa7\xee\x6a\x4b\x98\xd8\x69\x3a\x49\xc7\x6b\x62\x17\xd6\x35\xe0\x88\x56\x40\x24\x58\x8b\x91\x60\x1f\x09\x6d\xf4\x63\x4f\x8b\x60\xaf\x63\x74\xd4\x0a\xf6\x0a\x5a\x60\x34\x27\x11\x1a\xc9\x81\xa6\x53\x74\xa5\x7f\xdf\x96\xac\x63\xac\xe9\xe0\x2c\x4b\x72\x44\xdc\x52\x76\x20\xde\x0a\xf1\x56\x88\xb7\x42\xbc\xd5\x96\x8e\xb7\x6a\xbb\x5e\xe8\x1b\x52\x15\xa4\x66\x98\x39\x4c\x07\xd9\x58\x6c\xbf\x89\x03\xee\xb2\x43\x04\xf3\x9a\x0e\x04\x47\xbd\x38\x44\xe3\xc2\x35\x62\x89\x7b\x41\xf8\xb9\x43\x5c\x53\x96\x4c\x4f\x88\xef\x0d\xb2\xaf\xf6\xd3\x76\x7e\xa1\xb6\x62\xfd\xd3\x66\x9c\x20\x32\xca\x52\x57\x38\x40\xec\xe6\xb7\x4b\x6b\xff\xd8\x7d\x1f\x32\xca\x52\xf7\xfb\x3d\x78\x96\x84\x0a\xdc\x21\xa2\x69\xbf\x87\xb3\xfe\x5f\xc7\x43\x6c\xaf\xfe\x75\xf0\x19\xa3\x7f\x20\x19\x65\x09\x7e\x0e\xf0\x73\x80\x9f\x03\xfc\x1c\xe0\xe7\x00\x3f\x07\xf8\x39\xc0\xcf\x01\x7e\x0e\xf0\x73\x80\x9f\x03\xfc\x1c\xe0\xe7\xb0\x99\x7e\x0e\xfb\x29\xc9\x86\x63\x09\x13\x51\xdc\x6e\x47\x14\x19\x65\x09\x3e\x0e\xf0\x71\x80\x8f\x03\x7c\x1c\x3a\xea\xe3\xf0\x83\x08\x9d\xf7\x8e\xe4\x12\x99\x7e\x9b\x0c\xe4\x32\x3c\x22\x3e\x19\x61\x1f\xed\x23\xe6\x88\xe7\x12\xe9\x11\x1f\x6c\xe8\x1c\xa1\xbb\x7a\x25\x86\xb5\xb3\xec\x81\x54\x35\x85\x42\xf5\xf3\xda\xe2\x2d\xa1\x17\x2b\xda\xcf\x8b\x15\x85\x2f\x9c\x63\xe3\x74\x88\x0e\x78\x52\x71\xdb\x98\x19\x39\xc8\x8c\x50\x23\x3f\x3c\x7e\xd9\x9f\xef\x4d\xb0\x23\xeb\x8f\x6b\xd2\x6b\xf4\x35\xf6\xca\x88\xfc\x60\xbb\xeb\x0b\x8a\xfb\xf9\x54\x18\xef\x68\x54\x3f\x71\x73\x5e\x53\x7a\x8e\xce\xd0\x4c\x8d\xc1\xb2\xb5\xf7\x04\x5b\x25\xbc\x2d\x5a\xf4\xb6\xf8\x4c\x28\x30\x61\x71\x4e\xb8\x5d\x4c\x53\xda\x72\xbb\xd8\x64\xc9\xe3\x23\x60\x3c\x05\x93\x9f\x47\xc6\xcf\xef\x74\x95\x3c\xfb\x9b\x76\xce\x30\x44\x90\xb0\x03\x77\x5e\x00\xc1\x5b\xa3\xd7\x24\x20\xbc\x35\xe0\xad\xb1\xc9\xde\x1a\x9d\xd3\x4b\x7d\xdd\x36\xda\xab\xb8\xf6\x4c\x34\x08\xfb\x7e\x9c\xf6\x89\x9d\x97\xb3\x12\xb7\xb9\xed\x12\x6e\x23\x59\xb5\x50\x52\x72\xb2\xca\xbe\x1c\x67\xbf\xda\x4f\xb7\xeb\x27\x9b\x0b\x67\x93\x89\x74\xa7\x16\xe7\xce\x2b\x39\x39\xa0\x44\xba\xf7\xf1\x0b\xf4\x32\xde\x46\x7a\x7a\x71\x87\xf9\x82\x5a\xed\x7e\x87\x8f\x2e\x4d\x9d\xfb\x94\xff\xb7\x35\xce\x0e\xf9\x14\x70\xd7\xde\x82\xcb\x87\x05\x37\x10\xb8\x81\xc0\x0d\x04\x6e\x20\x70\x03\x81\x1b\x08\xdc\x40\xe0\x06\x02\x37\x10\xb8\x81\xc0\x0d\x04\x6e\x20\x70\x03\xe9\xac\x1b\x08\x5c\x2e\xe0\x72\x01\x97\x8b\x1e\x76\xb9\xf8\xa9\x3e\x7a\x48\x80\x3f\x49\xdb\x74\xe5\x0b\xdc\x12\xa4\x9d\x5d\x03\x00\xd9\x1f\x84\xd9\xeb\x61\xba\xbb\xe6\x34\x13\xfd\xdd\x9d\x97\xab\xce\x95\x57\x3c\x53\xe2\xa1\xbc\x5c\x9d\x74\x5e\xa4\x43\xba\xc9\x85\x39\x63\xcb\x17\xa0\x49\xac\xae\xa1\x89\xba\x86\x26\x9a\x69\x28\x53\xf6\x8e\xc4\x7a\x4c\x10\xbb\xf3\x34\xcf\x89\xdd\x2c\x4d\x53\xba\x05\x62\x67\x1b\x80\xf9\x82\x6a\x84\x63\xb1\x3f\xbd\x4c\xc7\x1a\x24\x34\xf6\xf3\x80\x61\x9f\xbb\xcc\xfe\xde\x40\x6d\x0e\xe3\x87\x84\x0e\x5f\xd1\xb5\x0f\x63\x79\xaa\xc9\x5e\x9c\x78\x40\x3b\x6d\x2b\x27\x0e\x4e\x03\x2d\x02\x2d\xa6\x81\x16\x81\x16\x81\x16\x81\x16\x7b\x06\x2d\xa6\xbb\x06\x2d\x06\xde\x93\x96\xd1\x62\x1a\x68\x11\x68\x11\x68\x11\x68\x11\x68\xb1\xf3\x68\x31\xdd\xd3\x24\x30\x0d\x12\xd8\x3e\x12\x98\xee\x76\x12\x98\xde\x82\x24\x30\xf3\x0e\x9a\x11\x54\xeb\x24\x1d\xe7\x54\xeb\x30\x1d\xa4\xb1\xe6\xbc\x3a\x93\xb6\xd2\xf5\xbe\xce\x67\xcd\x15\x51\x6a\xaa\x08\xbb\xd3\x7f\xf3\xda\x7d\xee\x6e\x6d\xb7\xb2\x6d\xdc\xa3\x2d\xf6\x13\x3b\x6a\xa9\xd8\x1d\x62\x19\x8a\x4a\x25\x93\x80\xed\x15\x3f\x75\x86\x81\x09\x64\x35\x4d\x69\x3a\x5d\xe3\x44\xbf\x9f\x92\xeb\x1b\x7b\xb8\xce\x23\x78\xa8\xc5\xe0\xa1\x3f\x0e\x79\xa3\xee\x49\x21\x14\x26\xe8\x08\x17\x0a\x63\xb4\xee\x89\x29\xbc\xc5\x53\xdc\x5b\xdc\x0c\x28\x6a\xa5\x9d\x33\x5a\x3b\x63\xec\x34\x9d\xa4\xed\x93\x59\x6d\x79\x69\xad\xa1\xf5\x09\xa2\x16\xaa\xb9\x35\x10\x44\x65\x45\xad\x52\xe2\xb7\x86\x6b\x05\xd1\x1e\xa1\xf9\x46\xb3\x4a\x51\xdb\xc7\xea\xeb\x92\x19\x5e\x24\x8e\xda\x65\xd2\x94\x79\x62\xbb\xa5\xd3\xa3\x74\x91\x16\xec\xd2\x29\x91\xa6\xd3\x2d\x58\x3b\xa6\xf9\x33\x5c\xe0\xdc\x55\x05\xa1\x05\xa1\x05\xa1\x05\xa1\xed\x21\x42\x0b\xfd\xcc\x47\x3f\xeb\x1e\x84\xfd\x7a\x98\xbe\x13\x66\xdf\x0e\x47\xbe\x65\xbe\xaa\xcf\x85\x1f\xb5\x6f\x1a\x0b\xa5\xa8\x2a\x36\x7e\xd1\x25\x79\x59\xd8\x7b\x4d\xb8\x62\x2d\x59\xfa\x57\xc2\x07\xcb\xa6\x09\x96\x94\xd2\x48\x49\xce\x4b\xfc\x95\xe8\xfb\x46\xbb\xc6\x28\x30\xab\x39\x19\x74\xdd\xa0\xb0\xb2\x22\xe7\x34\x9d\xb4\x78\xc3\x32\xc4\x5a\xa2\xbd\x50\x1c\xd6\xb7\x93\xfc\x71\xa3\xf9\x8a\x94\xe5\xf3\xa4\xa0\xe4\xcc\x85\xc7\x5a\x1c\xb8\x8d\xdb\x78\x2f\xab\xaa\xd6\x49\xfb\x40\x49\xda\x95\xc6\x03\xe9\x37\x59\x16\xdf\x9b\xd1\x44\x52\xf4\x53\x44\x27\xba\xf4\x31\xc6\x78\x17\x16\x78\x0f\xdc\xb6\xc9\xb0\x14\xc0\x52\x00\x4b\x01\x2c\x05\xb0\x14\xf4\xb4\xa5\xe0\x6b\x61\x7a\x2d\xcc\x5e\x0d\x47\x5e\x31\x57\xdb\x8f\x85\xa7\x6d\xa1\xc1\xe5\xa2\x2c\xa9\xb2\xf9\xe9\x2f\x54\x94\xb2\x94\xe7\xeb\xf0\x82\x52\x2c\x64\x6f\x38\xfc\x84\x8c\xd7\x6d\xc5\x16\x6b\x2f\x3c\x95\x1c\x4f\x46\x17\x85\x1c\x11\x8b\x64\x59\x2e\x69\xd3\xd4\x5a\x45\xe4\xa8\x52\x29\x5f\x95\x4a\x86\xdf\x52\x65\x55\x1e\x5d\x96\x8a\x86\xf6\x1f\x13\x47\x63\xd1\xe5\x42\x49\x2a\x16\xde\x65\x88\xef\x25\x39\x2a\xe5\x38\x76\x57\x46\x05\x85\xcd\x59\xaa\xa5\x68\x3c\xae\x5a\x17\x09\x7d\x3b\x19\x9d\x29\x70\x91\x64\xeb\xb8\x52\xa9\x7f\x32\xcb\xd4\x51\x15\xea\x3e\xd7\xfe\x94\xea\xd5\x64\x6c\x97\xe8\xcf\xb4\xf1\x20\x4e\x07\xa8\x0f\xf4\xd3\xfb\xfb\xd9\x7b\xfb\x23\x6f\x98\x5e\x70\xdf\xec\x7b\x5c\x97\x83\xda\x14\xbd\xaa\x5c\x8f\xe6\xa5\xca\x92\x94\x77\x40\x07\x53\x51\x93\x2b\xcb\x4a\x65\x45\x1b\x0b\xd7\x9e\x5e\xa8\xb9\xb9\x77\x47\xb9\x5a\x63\xe8\x24\x65\xf1\x54\x05\x4d\x5f\xc8\x16\x72\x96\x62\xcd\xd7\x46\x91\xc4\xc0\x18\x5d\x6d\x71\xd2\x57\x0e\x63\xf5\x4b\xda\x86\xd1\x90\x9b\xa6\x7d\xc1\xf4\x7d\x73\xde\x2c\x19\x15\xac\x88\xcb\x62\xbb\xb6\x1a\x17\xcf\x10\xd7\xf6\x3c\xfc\x2f\xe7\xc4\x50\x8f\x45\xe3\x69\x29\xfb\x6c\xbe\xa2\xac\x96\x72\xda\x59\xdc\xe9\x8c\x9f\x54\x33\x70\x42\x59\xd1\x35\x20\x67\x23\xc6\x13\x2c\x99\x2d\x1d\x8b\xc6\x67\x95\x8a\x6c\x6b\x36\x9a\x95\xd4\xac\x94\xd3\x9e\x5e\x1f\x1f\xe1\x66\xc8\xdb\x53\x85\x3a\x5d\xd7\xe0\xb2\xd9\x46\x32\x76\x47\xb9\x76\xde\xd8\x75\x1b\x18\xe2\x60\x88\xeb\x51\x43\x5c\x26\x4f\xf3\x82\x5f\xcf\xd0\x14\xe7\xd7\x27\xe8\x18\x1d\x6d\x01\x5e\x8a\xc4\x4d\xbe\xb6\xad\x84\x3b\xf8\xbd\x93\xdd\x21\xbe\x66\x4b\xd2\x52\xfb\xec\x60\x5b\x31\x01\xc9\x0f\x13\x74\x76\xfd\x3e\xef\xfa\xb9\x5c\x6a\x16\xcc\xdc\x8f\xec\xb3\x09\xf6\xa1\xbe\x5a\xc2\x1e\xe5\xf9\x1e\x9d\x7b\x60\x1d\xa8\x0b\x99\x99\x88\xd7\xe6\x7a\xb4\xc8\xba\xe3\xc4\x80\xd3\x3c\x2e\x09\xc3\xc6\x7e\x6e\xd8\xd0\xe6\xe8\x11\x3a\x4c\x07\x9b\x34\x69\x38\x3a\xe6\x3b\x3d\x9f\xf1\x9f\x72\xc7\xd8\x51\x9f\x69\xe6\xbc\xa5\xc3\xee\xe1\x93\xe8\xf1\x8f\xb7\xd7\xbe\x94\xbd\x46\x8e\xc7\x46\xef\x25\xe1\x92\xdf\xb1\x23\xaf\x26\x9d\xa1\xb3\x34\x5b\x63\x98\x6d\xf1\xdd\x00\xff\xc1\x3c\xdb\xa2\x79\xf6\x17\x43\xc1\x49\x88\x79\x61\x8b\xd5\x96\x43\xd3\x16\xdb\x3e\x79\xd3\x38\xfd\x62\xbb\xc5\x51\xe2\x93\xbb\x6a\xe5\xcd\x5b\x75\x25\x5d\xaa\x11\x31\x43\xf5\x06\xd6\xce\x48\x98\xf6\x18\x57\x21\x6b\x7c\x64\x0d\x00\x7f\x47\x00\x3f\xc8\x0e\xc8\x0e\xc8\x4e\xbb\xc8\x4e\xe6\xa5\x06\x7e\x5b\xc1\xee\x7b\x2f\x0a\xd7\xab\x73\x34\x67\x73\xbd\xda\x58\x9b\x86\x8e\x10\x75\xd7\x11\x76\xb0\x5b\xc5\x30\xb5\x5d\x4d\x48\xff\xd4\xce\x5a\x35\x61\xb8\x2e\x01\x74\xa3\xfd\xc9\x60\x5d\xf2\xe7\xb6\xeb\x0e\xc8\xfb\xdc\x6b\x1a\x0b\xf2\x3e\x23\xef\xf3\x26\xe7\x7d\xee\x20\xa8\xf2\x4d\xfc\xdc\xe6\x35\x21\x33\x41\x47\xd8\xe1\xd8\x41\x13\xa1\xee\x76\x94\xf0\xb6\x5f\x77\x53\xe4\x7f\xfe\x9b\x7d\xb4\x57\xe0\xd7\xca\x92\x94\x4d\x1a\x2f\xa4\x26\x13\xcc\x28\xe3\x9b\x26\x7a\x8b\x76\xd2\xa4\xfd\x1c\x6d\xd1\xf4\xcc\x00\x13\xcb\xcb\xd5\x8b\xb5\x57\x5c\xea\xfe\xf4\x2f\xed\xc9\xf2\xe2\x37\xf7\xd9\x27\xae\xd0\x39\xff\x77\xd1\xb0\x1a\x52\x45\x29\xca\x4b\xda\x7a\x59\xca\xab\xec\xeb\xcf\xb0\xaf\x0c\xd0\x6e\xb7\x77\x26\x52\xf7\x0c\x35\x4e\x0d\x73\x51\x29\xca\x69\xd1\x58\x22\xa5\x9d\xea\xf2\x2e\x6b\xaa\x5d\xd8\x2e\x41\xba\x18\xa4\x8b\x41\xba\x18\x04\x23\x20\x18\x01\xc1\x08\x08\x46\xe8\x9a\x60\x84\xee\xf1\xb5\x87\x13\x38\x9c\xc0\xe1\x04\x0e\x27\x70\x38\x81\xf7\xb4\x13\x38\xbc\x54\xe1\xa5\xda\xa3\x5e\xaa\x5b\x32\x5d\x4c\x96\xce\x0a\x3c\x36\x49\xa7\x38\x1e\x3b\x4a\xe3\x74\xc8\x13\x06\x73\x94\x65\x94\xff\xb3\xe1\xa1\xa6\x32\xc6\x14\xfc\x61\xef\x2c\x9b\xd6\x61\xaf\x27\x34\xd3\xa1\xaf\xed\xe6\x2e\xa5\xaa\xfd\x52\xc8\xfc\xdb\x1d\x8d\x48\xda\x5b\x8c\x74\x32\x0e\x68\x76\x40\xfc\xba\x39\xd8\x4c\x50\xae\x33\x34\x43\x53\x35\xe6\xba\x03\x94\x5a\xf7\xeb\x82\x9d\x0e\x5e\x8c\x2d\x7a\x31\x7e\x3f\x44\xd3\x42\x62\x9c\xa0\x63\x5c\x62\x1c\xa2\x56\xa6\x20\xcd\x09\x17\xc6\x34\x9d\xb6\x5c\x18\x5b\x6c\x2a\x23\xdc\x1a\xa6\x68\xd2\xe6\xd6\xd0\x62\x5b\x7e\x42\xcc\x27\x27\x4c\x07\x85\x5c\xe2\x83\x23\x8d\x84\x58\xd4\x35\x15\x8d\x5d\xa0\x9d\x10\x67\x78\x09\x34\xb7\xd4\x34\xed\x13\x6d\x48\x4f\x03\x22\x0c\x22\x0c\x22\x0c\x22\x8c\xf4\x34\x48\x4f\x03\xef\x75\xa4\xa7\x81\x65\x02\x96\x09\x58\x26\x60\x99\x80\x65\x22\x10\xcb\x04\x82\x98\x10\xc4\x84\x20\x26\xa4\xa7\x81\xe1\x0f\x86\xbf\xc0\xd3\xd3\x74\x26\x7e\xaf\x95\xf4\x34\x9d\xe3\xd1\x5b\x31\x60\xe2\x7f\xed\xa6\x53\x46\xdd\xdc\xb2\xda\x20\x45\x8d\x5a\x95\xaa\xf2\xf2\x6a\x51\x93\x60\x7a\x7e\x1a\xfe\xdb\xaa\xca\x3e\xb7\x9b\xfd\xef\x30\xdd\xaa\x35\x70\x65\x2d\x15\x19\xe4\x09\x6a\xc4\x31\x63\xb2\x59\x7a\xf8\xa2\xde\xce\xa2\x5c\x4d\x3c\xa4\x9d\x39\x59\x2e\xab\xf6\x48\x43\xdb\x09\xe2\xa5\x07\x9c\xa6\xe6\x29\x3a\x25\xa6\xe4\x11\x3a\xcc\xa7\xe4\x7e\x4a\xd2\xb0\xa7\x9d\x44\x7b\x2a\x63\x02\xea\xdd\xda\x40\xba\x88\xbc\x5c\xa5\x6b\x17\xfc\x67\xe4\x30\x4b\xe8\x33\x52\xbb\xbf\x3e\xf9\xec\x5d\xb0\x07\xf9\x44\xbe\xbc\xdd\x1a\xfd\x7d\x46\x26\x9a\x66\x5e\xc0\xa0\x7e\x72\x87\xdf\x41\x3a\x4d\xa7\xe9\x64\x8d\x19\x77\x9d\x2f\x01\x9c\x0f\x16\xdc\x16\x2d\xb8\x1f\x0b\x6d\x5c\x04\x4c\x0b\xe3\xad\xb6\x9c\x99\xc6\xdb\x4e\x0a\x12\x9e\x77\x26\x68\x41\x92\xfe\xab\xdb\x2c\x41\x72\xb0\x2e\x76\xbc\x19\x89\x32\xc0\xe3\x1a\x3b\x28\x4f\x10\x41\xde\x6b\x72\x0d\x11\xe4\x88\x20\xdf\xe4\x08\xf2\x0e\xe8\x90\xbe\x91\xe3\x41\x0b\xff\xcc\x11\x3a\xcc\x0e\xc6\xc6\xcc\xdd\xcb\xdd\xf6\x50\x71\xdb\x55\x37\x45\xa0\xf8\xff\x8c\xd3\x1e\x6d\xdf\xa3\xed\x78\xf8\xae\x79\xd4\xd8\x84\xbf\x73\x55\xa9\x4a\x2a\xfb\x4a\x9c\xbd\xd4\x4f\xb7\x66\x95\x8a\xac\xad\x86\x37\x74\x90\x5f\xca\x15\xd6\x0a\xb9\x55\xa9\x68\xf7\x66\x95\x4c\xa3\x99\xe1\x5e\xf1\x88\xd6\x48\xd2\x06\x47\x27\x4c\xac\x1a\xe7\x2d\xc5\x2d\xb7\x15\x87\x1f\x82\x52\x96\x4d\xd3\xa7\x5a\x95\xa5\x5c\x32\x31\xcc\x2f\x98\x52\x2a\xf2\xa5\x94\xa3\xfd\xf9\x82\x5a\x9d\x55\x2a\x93\xc5\xa2\xb9\xd2\x06\xb7\xb0\xb6\x27\x4c\x39\xf3\x2c\x2d\x88\x2f\x67\x8e\xce\xf0\x2f\x67\x92\x4e\xd1\x89\x16\x16\x4b\xee\x56\x3c\xb3\x26\x97\xfc\x3f\xa5\x75\x7d\x29\xb1\x98\xb1\xd5\xb7\x0f\xb5\x33\x3d\xe8\x5e\xf7\x6f\x73\x27\x23\xfe\xa4\xdc\xc3\x36\x83\x80\x6c\x04\x64\x67\xe0\x7e\x07\xf7\x3b\xb8\xdf\xc1\xfd\xae\x67\xdc\xef\x32\x5d\xe3\x5d\x16\x78\x4f\x5a\x76\x7b\xca\xc0\xed\x09\x6e\x4f\x70\x7b\x82\xdb\x13\xdc\x9e\x3a\xef\xf6\x14\x38\x3a\xc8\xf4\xb4\xa3\x47\x06\x8e\x1e\xed\x73\xf4\xc8\x74\xbd\xa3\xc7\x16\x8c\xf0\x66\xff\x7e\x90\x8e\xe8\x2e\x0e\xab\x55\x45\xcd\x4a\xc5\x42\x29\x3f\xba\x36\xb6\x24\x57\xa5\x31\x1d\xfe\x71\x62\xa4\x94\xaa\x52\xb1\xac\xe4\x8c\xd3\xe4\x8a\xca\xfe\xf6\x20\x7b\xf7\x36\xba\xd3\x76\xe5\x15\xfd\xca\xc8\xf3\xa1\xe6\x98\xe0\x59\xb3\xed\x05\x25\x37\x69\xb6\x1d\x10\x1d\x4c\xf3\x0b\x26\xad\xfe\x5d\x12\xdd\xf3\xb8\xeb\xd6\x64\x86\x9e\xde\x45\x81\xc3\x44\x83\x19\xe6\xfd\x99\xe1\x34\x4b\x0b\x52\xe8\x31\xd4\x36\x66\x28\x5e\x89\x51\xf7\xca\x36\x99\xc0\x12\xc1\x12\xc1\x12\xc1\x12\xc1\x12\xc1\x12\xc1\x12\xc1\x12\xc1\x12\xc1\x12\xc1\x12\xc1\x12\xc1\x12\xc1\x12\xc1\x12\xc1\x12\xc1\x12\xbb\x8b\x25\x7e\xe1\x32\x25\x04\x4b\x54\xb3\x57\xe5\xdc\x6a\xd1\x59\xde\xbb\x5c\x29\x28\x95\x42\xf5\x46\xb6\x28\xa9\xaa\xac\xb2\xf7\x5d\x66\x1f\x1f\xa0\x37\x59\xe7\xf2\x08\x9d\xc6\x65\x4a\x16\xf4\x36\xa6\xb4\x36\x12\xf7\x6a\x27\x2f\x9a\xd7\x5f\x4a\x39\x0e\x77\x39\xb9\x43\x51\x12\x70\x2b\x14\x25\x01\xb7\x02\xb7\x02\xb7\xea\x21\x6e\xd5\x45\x19\xd6\xba\x86\x5b\x21\xf5\x17\xb8\x15\xb8\x15\xb8\x15\xb8\x15\x8a\x92\x20\x37\xd1\xcd\x83\x99\xba\x3e\x37\xd1\x96\x2c\x4a\xb2\x4c\xe7\x84\x63\xd3\x34\xa5\xb9\x63\xd3\x71\x6a\x90\x52\x20\x69\x63\x51\x6b\xa9\xa4\x83\x10\x35\x55\x97\x64\x5d\x45\xa8\xeb\xc0\x97\x1e\x2f\xe9\xb8\xad\x33\x5e\xd2\xaf\x18\xc9\x3f\xde\x51\xcb\xc8\xde\x6a\x16\x20\x71\xe2\xb0\xfb\xc5\xef\xed\x07\x62\x82\x5f\x65\xe8\x2c\xcd\xd6\xe4\x76\x68\x54\x2a\xbc\xc1\x8b\x40\x5e\x07\xe4\xab\x69\x31\x5f\xcd\x1b\xa1\xf5\x15\xac\x6f\x30\x0b\x45\x3a\xb6\x14\x4f\xc7\x66\xe6\xad\x69\xbd\xb5\x87\x45\xdd\x91\x59\x9a\xb6\xd5\x1d\x69\xbd\xb9\x8d\x96\x1e\x69\xb3\x1c\x4b\xbc\x3e\x5c\x2b\xa7\x62\xae\x35\x46\x9c\x32\x6b\x48\x9c\x63\x97\x59\x56\x65\x91\x76\x4a\x2f\x54\x14\x01\xce\x05\xce\x05\xce\x05\xce\x45\x45\x11\x54\x14\x41\x45\x11\x54\x14\x81\x59\x01\x66\x05\x98\x15\x60\x56\x80\x59\x21\x10\xb3\x02\x2a\x8a\xa0\xa2\x08\x2a\x8a\xa0\xa2\x08\xac\x76\xb0\xda\x05\x5e\x51\x24\xd0\xc2\x21\xbe\x68\x79\x3d\x15\x45\xda\x8c\x99\x83\x4f\x76\xfb\x47\x61\x7a\x9b\xee\xb5\x5e\x55\x2a\x52\x5e\xb6\xb9\xac\xb3\xff\x14\x66\xff\x21\x4c\xa4\x1f\xb9\xb2\x96\x8a\xdc\x9d\x97\xab\x4e\x35\x58\x18\x42\x13\x6f\xcb\xcb\xd5\x45\x71\xde\xa5\xd4\xe4\xc2\x9c\x81\xfe\x02\x04\xd7\x75\x0d\x4d\xd4\x35\x34\xd1\x4c\x43\x99\xb2\x77\xd2\x88\xc7\xc4\xcc\x3a\x4f\xf3\x7c\x66\xcd\xd2\x34\xa5\x5b\x98\x59\xb6\x01\x98\x2f\xa8\x46\xe6\x08\x6d\x30\xb7\xf1\xcc\xc2\xec\x4b\x61\xf6\xaf\xc2\xd4\x9f\x55\x2a\x72\x24\xe2\x1c\xd3\xc9\x85\x39\x53\x2c\x27\x58\x5e\xae\x4e\x29\x15\x79\x72\x61\x4e\xff\x5e\xbb\x71\x40\x8b\xf4\x88\x18\xb7\x0c\x9d\xe5\xe3\xc6\x8b\x78\xb4\x36\x6e\xc6\x63\xfa\x7d\x96\xec\x43\x09\x7a\x5c\xcf\xdd\x92\x5b\x29\x70\xbf\xf7\x8a\x9c\x2f\xf0\x64\xfd\x56\x71\x9e\x51\xbd\x02\x8f\x9e\xcc\x85\x13\x35\x49\x53\x4a\xae\xcb\x4b\x57\x15\xe5\x59\x07\xe9\x36\xaa\xd7\xb0\x7f\x31\xc4\x7e\x7b\x1b\xdd\xeb\xda\xb0\x59\x44\xfe\x65\x3d\xc9\x8b\x3d\xb3\x4b\xc9\xd0\xb0\x8d\x90\x8d\x4b\xe6\x0d\x1f\x17\x37\x9c\xb2\xdf\x30\x90\x44\x2f\xc3\xd6\x26\x9b\x83\x04\x7d\x4b\xa6\xed\xd3\x2d\x7a\x10\x77\x70\x31\x5b\xdb\xc9\xc4\x29\x91\x26\xc6\xed\x59\xf5\x6a\xf7\x8d\x1f\xa2\xcb\x23\x4d\x36\x25\xaf\xb4\x77\x8a\xf6\xeb\x22\x45\xfb\x5f\xf7\x5f\x28\x9e\x66\x4f\x19\x59\x61\x1a\x4c\x70\x7d\xcd\x68\xfc\x8a\x5c\x0a\x52\x21\xfe\x06\xf1\x37\xc8\x1b\x03\x83\x2d\x0c\xb6\x30\xd8\xf6\x8e\xc1\x16\x79\x63\x90\x37\x06\x86\x32\x18\xca\x60\x28\x83\xa1\xac\x2b\x0c\x65\x99\x73\x34\xc7\xce\xc4\x66\xcc\x02\x59\x09\x7b\x69\xad\xc6\xbb\xba\xfa\x6a\x5b\x48\x42\x83\x24\x34\x5b\xc4\xce\x80\x24\x34\xed\x48\x42\xf3\xc5\x41\x3a\x61\xd4\xec\x2e\xc8\x3f\x56\x95\x4b\xfc\x6d\xd9\xa0\xbe\xe0\xa0\xd9\x55\xb5\xaa\xac\x18\x2d\xda\x48\x18\xfb\x51\x9c\xfd\x69\x3f\xed\x72\x5c\x7e\x65\x2d\x15\x79\x5f\x93\x29\xad\xa7\x78\xcb\xc6\x46\x72\xda\x6c\x39\xa0\x9c\xd6\xfb\x05\xac\xb4\xf7\xee\x52\xca\xeb\x9e\xf3\x05\xb5\x0a\x3a\x59\x4f\x27\x9b\x49\x2c\x7d\xad\xe8\x4f\x28\xe7\xd8\x19\xb3\x88\x64\xfd\x6c\xd3\xc9\xa4\xd7\xcb\x71\x1a\xb6\x80\x23\x81\x23\x81\x23\x81\x23\x81\x23\x81\x23\x81\x23\x81\x23\x81\x23\x81\x23\x81\x23\x81\x23\x81\x23\x3b\x8b\x23\x41\x10\x41\x10\x41\x10\x7b\x98\x20\xfe\xf9\x80\xe1\x10\x2c\x95\xcb\xea\xe8\x5a\x6a\x34\x27\xc9\x2b\x9a\x76\x55\x55\xd9\x37\x06\xd8\x7f\xe8\xa3\x5b\xb5\x23\x57\xd6\x52\x91\x78\xe3\x74\xd5\xd3\xfc\xc2\x45\xb9\x9a\xd8\xab\x9d\x38\x59\x2e\xab\x97\x52\xe6\x8f\x5b\xad\xd4\xdc\xd3\x34\x29\x40\xdd\x04\x1d\xe1\xa0\x6e\x8c\xf6\x53\xd2\x33\x99\x89\x36\x46\x3c\x43\x86\xf1\xbc\x4d\xa5\x5b\x7a\xd8\x1f\xba\x25\xd8\xa0\x9d\x9c\x99\x00\xae\xac\xea\xc0\xcd\xbc\xa3\x6f\x76\x25\x50\x37\x50\x37\x50\x37\x50\x37\x50\x37\x50\x37\x50\x37\x50\x37\x50\x37\x50\x37\x50\x37\x50\x37\x50\x37\x50\x37\x50\x37\x50\x37\x50\xb7\x4e\x51\xb7\x7f\x3d\x44\x87\x79\x64\xb8\xe9\xa1\x57\x32\xc1\x98\x08\x4a\xe6\x7f\x3f\x37\x2a\xf6\x78\x2b\x52\xd9\x8c\x55\x7e\x7e\x88\xfd\x76\x3f\xdd\x9a\x55\x2a\x3c\x42\xff\x17\x9b\x89\x4a\x16\x6e\xc4\x0f\x4b\xe5\xae\x08\x40\xbe\x8f\xdf\x6e\x4a\xa9\xc8\x97\x52\x26\x0f\xcc\x99\x7d\xec\x76\x30\xd8\x95\xf1\xc5\xe7\xfc\x41\xe2\x20\x1b\xd0\xe1\x61\x2c\x66\xf8\xea\x19\x63\x0e\xe7\x3c\x60\x42\x60\x42\x60\x42\x60\x42\x60\x42\x60\x42\x60\x42\x60\x42\x60\x42\x60\x42\x60\x42\x60\xc2\xcd\xc4\x84\x87\xe9\x20\x1b\x8b\xed\x37\x63\x85\xef\xb2\xc7\x0a\x9b\x7b\xb7\xfa\xb0\xe0\x77\xd0\x33\xec\xe9\xd8\x53\xe6\x75\x13\x7a\x8f\x45\xbd\x24\x4d\x31\x5a\xad\x5e\x8d\xaa\x59\xa5\x2c\x0f\x47\xd5\xd5\xec\x55\xed\xad\x71\xcd\x57\x96\x56\x04\x0e\x29\x57\x14\x3e\x42\xb1\x1d\x26\x8c\x40\xe0\x31\x00\x26\x00\x26\x00\x26\x07\x98\xbf\x3f\x44\x13\xc2\x6d\x70\x49\x24\x5a\xd4\xd3\x2e\x7a\x61\xcc\x8a\x52\xba\xa6\x2c\x99\x10\xf3\x9f\x0c\xb1\x3f\x0f\xd3\x9b\xf8\xb5\x66\x82\xc5\x48\x45\x96\x72\x35\x3b\xe4\xa9\x8a\x52\xca\x28\x4b\x89\x07\xb4\x63\x69\xed\x74\x3d\x47\xa1\x0d\x1c\x8a\x53\x82\xad\x98\xb5\x5e\xf7\x3f\xfe\x24\x49\xfd\x49\x92\x7a\x97\x7c\xb1\xde\xbd\xee\x58\xef\x16\xd6\x9f\x97\xab\x74\xed\x11\x7f\xa8\x97\x64\xc3\x3a\xd4\xe3\x3d\x30\xb8\x9e\x7e\xff\xba\x34\x80\x91\x7f\xbb\xbd\x76\xd4\xef\xad\xc8\xe5\xa2\x94\x95\x3d\x06\xfe\x41\xfd\x70\x07\xc7\x3e\x3d\x4d\x69\x3a\x5d\x53\x6b\x71\xdd\x83\x8f\x5a\x3d\xa8\xb2\xd8\x62\x95\xc5\x4f\x86\xbc\x33\x06\x07\x20\x15\x66\x45\xe1\xc5\x53\x74\xc2\x2a\xbc\xd8\x42\x3b\xfe\x42\xa4\xbc\xda\x16\x21\x92\xf8\xc9\x5d\xb5\x42\x64\x97\x9e\x38\x5f\x32\xe5\xc6\x5e\xf1\x4b\x27\xc5\x46\x7b\x8a\x1c\x42\x8c\xf8\x88\x11\x14\xda\xea\x48\xa1\x2d\x54\x58\x41\x85\x15\x54\x58\x69\x57\x85\x95\xcc\x4b\xa1\x80\xcb\x4c\x5c\x14\x05\x91\xcf\xd1\x9c\xad\x20\x72\x9b\x4b\x57\x44\xdd\x55\x81\x1d\xec\x56\x31\x4c\xcd\x6a\x03\x3e\x0a\x80\x43\x59\x48\xff\xd9\x6d\xb5\xda\xc0\x40\x59\xaa\x54\x0b\xdc\x34\x29\x76\x9d\x1e\x7b\x8b\x58\x99\x9b\x9f\x3b\xa3\x22\xdc\x53\xdb\xd0\x08\xbf\xfb\x3e\x5e\x08\x61\x8f\xfd\xe0\x8a\x5c\xc9\xcb\xf6\xa3\x03\xf6\xa3\x3c\x1f\xbb\x9c\x2f\x64\x47\xea\xce\x73\xb4\xa2\xfd\x7d\x43\x3f\xaa\xf5\x32\x7d\x9e\xe6\x29\x53\xb3\xab\x69\x50\xca\xbf\xc1\x6c\x58\xe0\xc3\x06\xc5\xc4\x47\x31\xf9\x6c\x1f\xbd\xd8\xc7\x5e\xe8\x8b\x7c\xca\x94\xee\xcf\xf7\xf5\xce\xfe\xa6\xc6\xd0\xa1\x8d\x33\x37\xdc\x73\x63\x9c\x36\x39\x2d\xf7\x9d\x41\x8f\x89\x3b\xc4\x17\x29\xe1\xe7\x21\x15\xf9\x95\x9a\x36\x26\xae\xe6\x67\x70\xcd\x48\x8d\x0e\x66\x54\xa5\xb4\x20\xfc\x58\x1e\xd6\x3e\x0b\xfd\xef\x45\xe3\x63\xb1\x7e\x1c\xf2\xde\x78\xa5\x5f\x0b\xd1\xab\x21\xf6\x4a\x28\xf2\xb2\x49\xc2\x3e\x1d\x9a\x55\x2a\x59\xae\xb4\xe5\x15\x3e\xec\x4a\x34\xb6\xac\xfd\x14\x8b\x4e\x3a\x9e\x82\x33\x4e\xa1\x5f\xad\xaa\x96\xc5\x7e\x44\xca\xf2\x07\xe7\x7e\x10\xc5\x42\x56\x5f\x3f\xe5\x62\x4e\x8d\x2a\xd7\xf5\x31\x15\x7e\x02\x65\x59\x29\x17\xe5\x64\x54\xdc\x91\x3b\x1e\x19\xef\x92\xb3\x46\xd7\x01\x30\x6f\x1f\xdb\xc6\xbb\xe5\x70\xa2\xe9\x08\x45\x0a\x7e\x87\xd7\xc0\xdf\x8c\x3f\x34\x65\x0e\xd2\x18\xdb\x1f\x4b\x9a\x26\x86\x3b\x1d\xa6\x09\xd1\xee\xcd\x60\x98\x60\x3f\x73\x99\x06\x05\xe8\x2c\x29\x39\xb9\xb6\xca\x4c\x65\xb5\x54\x2d\xac\xc8\xd9\xa2\xa4\xaa\xb2\xca\xfe\xe7\xd3\xec\xe7\x06\x68\xa7\x76\xa6\xb9\x18\x26\x1a\x07\x4d\x5f\x14\x2d\xf0\xda\x4f\x89\x7b\xb4\x73\xcf\x2b\x39\x59\x5f\x0e\xed\x07\xbb\xdc\x2d\x32\x0d\xa7\x41\x38\x0d\xa6\xe1\x34\x08\xa7\x41\x38\x0d\xc2\x69\xb0\x67\x9c\x06\x03\xaf\x7f\xdf\xb2\xd3\x20\x2a\xf1\xc3\x69\x10\x4e\x83\x70\x1a\x84\xd3\x60\x4f\x3b\x0d\xa2\x54\x38\x3c\xe9\x7a\xb5\x54\x78\x7a\x0b\x7a\xd2\x65\x72\x34\x27\x10\x61\x9a\x4e\x73\x44\x38\x41\x47\xe8\xb0\x27\x22\xe4\x14\xca\x20\x84\x76\x3e\xd4\x54\xbe\x39\x9f\x04\x71\xd7\x9e\xf6\x47\x89\x47\xd9\xb8\x8e\x12\x6d\x40\x4c\x07\x8a\xf6\xfe\xd4\x53\xc5\xd8\x87\x77\xd4\xb0\xb1\xbb\xc4\x6a\x14\x95\x9c\x18\xec\x5e\xf1\x73\xbb\x41\x98\xe0\x56\x67\x69\x96\xa6\x6b\xcc\x32\x07\x69\x6c\xfd\x2f\x00\x06\x19\x38\x9c\xb5\xe8\x70\xf6\x97\x21\x3a\x27\xcc\xc5\xd3\x94\xb6\x99\x8b\x0f\x53\x4b\x33\xd1\xdb\x7b\x6d\x46\x88\x9a\x93\x74\x9c\x8b\x9a\x56\xdb\xcf\x08\x0f\xb6\x29\x9a\xb4\x3c\xd8\x5a\x6c\xcb\x5f\x32\x95\x95\xe6\x25\x53\x33\x62\xc8\x45\x7a\x25\xbe\x3d\x5c\x23\x99\x1e\xd0\xfd\x14\x6c\x4e\x1f\xca\xb2\x53\x4a\xc5\xc5\x29\x36\x29\x35\x65\x9e\xdc\x46\x79\xd5\x1e\x2f\x37\x80\x5b\x80\x5b\x80\x5b\x80\xdb\xde\x01\xb7\xd0\xd6\x7c\xb4\xb5\xee\x21\xdb\xf0\x30\xee\x88\x87\x31\x0c\x08\x30\x20\xc0\x80\x00\x03\x02\x0c\x08\x3d\x6d\x40\x40\xa0\x09\x02\x4d\x10\x68\xd2\xae\x40\x13\xd8\xe7\x60\x9f\xeb\x55\xfb\x5c\x26\x1f\x70\x8c\x95\x9f\xb9\x2b\xe1\x0e\x95\xef\x64\x77\x88\xaf\xd9\x92\xb4\x6d\x36\x7d\x05\xef\x97\xfe\xfa\x10\x8d\x09\xbf\x74\xb5\xaa\x54\xa4\x7c\x9d\x6b\xba\xfe\xb3\xee\x9a\x6e\x24\xde\xf8\x85\x21\xf6\xb7\xfb\xe8\x76\xfd\xa0\x89\xbb\xef\x77\x49\xbd\xb1\x28\xce\x11\xb0\xfb\x3e\xed\x04\xfd\x17\x9d\x76\xdb\x8f\x07\x9c\x7c\x23\x4b\x67\xc5\x3c\x99\xa4\x53\x7c\x9e\x1c\xa5\x71\x3a\xe4\x69\x5c\x30\x46\xc0\xb0\x2f\xd8\x7b\xb6\xd1\x1c\x1c\x57\xfc\xa7\xc5\x71\x36\xa1\x4f\x0b\xe7\xab\xd0\x67\x86\xa3\x37\xf5\x19\x39\xfe\x70\x7b\xfd\xeb\x88\xb9\xe7\xe4\x70\xbc\x91\x07\xf4\x73\x3a\xf5\x52\xd2\xe7\x68\x8e\xce\xd4\x18\x4a\x5b\x7d\x2b\xa0\x6f\xb0\x95\xb6\x68\x2b\xfd\x7c\x28\x40\xe1\xf0\xb0\x30\x64\xce\xd2\xb4\x65\xc8\xdc\x14\x59\xc3\x53\x75\xb4\x5b\xd6\x24\x7e\x66\x57\xbd\xac\xb9\xcb\x4c\xdd\xe1\x10\x2f\xba\xbe\xdb\x31\xe9\x82\xe4\x1d\x48\xde\x71\xf3\xa2\x75\x30\x15\x30\x15\x30\x95\xb6\x25\xef\xf8\x42\x90\x3a\xc1\x79\xe1\x89\x75\x86\x66\x6c\x9e\x58\x6d\x54\x0a\xda\xbd\xea\xfb\x67\x05\x49\xff\xe4\xce\x7a\xbd\x60\x9f\x4f\x12\x0f\x87\xb6\x70\x3f\x0f\x0b\xef\x84\xb2\x80\x34\x1e\xbd\xa6\xa2\x20\x8d\x07\xd2\x78\x6c\x72\x1a\x8f\x4e\xf2\x28\xbf\xd4\x1b\x6d\x5f\x2f\x32\x47\x69\x9c\x1d\x8a\x1d\x30\xb3\x74\xbc\xcd\x9e\xdb\xc3\x7e\x6d\x7d\x82\x8f\xe0\x31\xe7\x1e\x9a\x11\x98\x53\x2a\x17\xe4\x1f\xab\xca\x25\xce\xca\x2d\xd8\x39\x9a\x5d\x55\xab\xca\x8a\xa1\x22\xd9\xdf\x88\x40\x9e\xa3\x2a\x67\xc5\xec\x1f\xec\x61\xdf\xed\xa3\x5d\x8e\x66\xae\xac\xa5\x22\x87\x38\xfa\x14\x27\x19\x0f\x69\x4b\x55\xc5\x5b\x37\xbc\x21\xa7\xcd\xd6\x13\x29\xed\xb2\x49\x7b\x63\x97\x52\x5e\x27\x0b\x5a\x1d\x30\x23\xfd\x60\x88\x2a\x62\x52\x3e\x4b\x85\xff\x8f\xbd\x77\x8f\x73\x23\x2b\xef\xbc\x3f\xad\xee\x19\xdb\x0f\x86\xf1\x9c\x19\x18\x46\x73\xd3\xc8\x33\x7d\x91\xbb\xd5\x56\xfb\xde\x1e\x5f\xa4\xbe\xd8\x2d\xf7\xd8\x3d\xdd\x9e\x1e\x18\xc6\x98\x6a\xa9\x5a\x96\x2d\xab\x44\x95\xba\x8d\x79\x5f\xf6\x93\x85\x9d\x0d\xd9\x7d\x21\x2c\xd9\x4d\xb2\x9b\x2c\x10\x48\xb8\x85\x40\x20\x24\xd9\xec\x64\x21\xfb\x66\x81\xd9\x4c\x32\x21\xb7\x97\x6c\x20\x04\x12\xb2\x04\x48\xb2\xc9\x9b\xcd\x42\x60\xdf\xf0\x7e\xce\x39\x75\x93\x54\xa5\x52\xab\x25\xb5\xda\xfa\xfd\x03\x9e\x56\xd5\xa9\xaa\x73\x4e\x3d\x75\xce\xf7\xf9\x3d\xcf\x23\x26\x65\x86\x14\xba\xec\x37\x29\x9d\xfb\x1c\xb3\xf3\x3c\x38\x9f\xa4\xca\xce\x5d\x4f\xc4\xfd\x1e\x65\xb3\x38\xb5\x10\x3c\x79\xe7\xd8\x19\x73\xf2\x7a\x0d\xb9\x95\xba\xc6\xef\xfe\xdc\x85\xcc\xc2\xef\x22\x8f\x01\x3f\x6a\xc1\xd5\x0d\x8f\xf9\x41\xf3\xcc\x2d\x1c\xf6\xd4\x1a\x19\xf4\xfa\xaa\xe5\x47\xfb\x87\x1d\xab\x14\xf0\xda\x26\x79\xed\x3f\x0f\xd1\x0d\x49\x59\x4b\x54\x74\x28\xeb\x96\x1a\xab\xad\xb0\x9c\x96\x81\xec\xa8\x05\x0c\x20\xce\xa9\xbf\xda\xed\x61\x20\x93\x35\x3b\xbf\x0d\x5b\xca\x09\xb1\x56\xd9\x22\x3b\x89\x2d\x62\xaf\x19\x5f\x6c\x11\xb1\x45\xdc\xe2\x2d\x62\x97\x2e\xc7\x03\x77\x93\x8d\x7d\x8e\x36\xf0\xc9\xa9\xf7\xe5\x4a\xcf\xd0\x14\x4b\x46\x4f\xd9\x5b\xcb\x47\x2a\xd2\x46\xfa\x34\xdf\x81\x6d\xe6\x9f\x10\xed\x37\xb7\x99\x6b\xbc\x85\xb2\xf9\x8a\x54\x8b\x6a\x44\x14\x99\xae\xae\xe7\xd5\x1b\x06\xfb\x30\x45\x7f\xf2\x36\x7a\x45\xe5\x19\x36\x3f\xbd\xdb\x8e\x6d\xbf\xc8\x4f\x5a\x14\x27\xc5\x06\xe5\x5f\x93\x15\xe7\x98\xc0\xd4\x75\x5c\x8b\x37\x0d\x4f\x10\xef\xb1\xca\x0f\xd2\x09\x3a\xee\x0b\x30\xaa\x3a\xc1\xe2\x18\xae\x1b\x4c\xff\xeb\x90\x04\xe5\xfb\x05\x28\xe7\xf3\xfd\x14\x6d\xae\x49\x5a\x92\xcb\xc4\x79\x4a\x3b\xcb\xc4\x4d\x37\x7a\x51\xd2\xfc\xc7\xe9\x9c\x8b\xe6\x6f\xba\xd5\xe6\xf3\x2c\xc8\x68\x66\x35\xf8\xad\x4b\xb1\xd3\xd6\xab\xe4\x35\x25\xcd\x55\xa0\xfb\xa6\x6a\x49\x0e\x16\x0e\xf5\x17\x0e\x69\xec\xda\x7c\x76\x6d\x2d\xb7\xb0\x5f\xbd\x44\xc7\xa4\x85\x95\x09\x5f\xab\x2d\xab\x4f\xe1\x30\x79\x30\x7b\xdf\x25\xf6\xc3\x83\xf4\x32\xf9\x5f\xb6\x89\xdd\x5b\x3f\xb5\xae\x28\xad\x1f\x13\xce\x68\xf1\x4f\xa3\x26\xc9\xbc\xf8\x33\x12\xeb\x22\xb1\x2e\x12\xeb\x22\x3f\x03\xf2\x33\x20\x3f\x03\xf2\x33\x74\x4d\x7e\x86\xee\x49\x3f\x80\xb8\x78\xc4\xc5\x23\x2e\x1e\x71\xf1\x88\x8b\xef\xe9\xb8\x78\x04\xee\x22\x70\xb7\x47\x03\x77\xb7\x65\x62\xdd\xcb\x34\x2d\x01\xf5\x09\x3a\x2e\x00\xf5\x21\x3a\x40\x09\x5f\xea\x6b\x62\x29\x8b\xf6\x0a\x34\xd4\x92\x9c\xba\x4f\x05\xb3\xde\x83\x6c\x42\xe2\x5c\x71\x55\xdf\x94\x95\x15\xe4\x8c\xa2\xdf\xdf\x59\x43\xc4\xee\xb0\x9c\x0e\x45\x93\x7e\x45\xe5\x1f\x3a\xc2\xbf\x24\xae\x9a\xa2\x24\x9d\xaa\xf2\x35\x8c\xd3\xd8\x86\xfa\x1d\x1e\x6f\xc8\x8d\x9a\x04\xd7\xdf\xec\xa3\xd3\xf2\xb5\x3f\x46\x47\xc4\x6b\x9f\xa0\x8d\x4e\x3f\x99\x26\x37\x21\xd2\xe4\xda\x9e\xa8\x26\x9a\x99\x95\xbe\xa7\x53\x74\xc2\xe5\x7b\x6a\xa2\x9d\x20\x0b\xd4\x98\x85\xf1\x34\x22\x75\xcd\x4e\x90\x1b\x2b\xf6\xdf\x46\x6b\x2c\x50\xd8\x33\x71\xae\x34\x46\xa3\xf2\xb7\x0a\x63\xe4\xe4\xcc\x6d\xaf\x59\x42\xda\x5c\x60\x59\x60\x59\x60\x59\x60\x59\xa4\xcd\x45\xda\x5c\xc4\xf6\x23\x6d\x2e\xdc\x03\x70\x0f\xc0\x3d\x00\xf7\x00\xdc\x03\x2d\x71\x0f\x20\xc5\x0b\x52\xbc\x20\xc5\x0b\xd2\xe6\xc2\xfb\x06\xef\x5b\xcb\xd3\xe6\xfa\x06\xb2\xb6\x34\x9f\x6e\xdb\x79\xf2\x06\xf2\xf1\xa6\x5f\x47\xaf\x65\xcf\x44\x9f\xb6\x43\x85\x26\xcd\x2f\xb9\x74\x69\x14\xb3\x22\x2c\x22\x62\x64\xb4\x92\x3a\x1a\x31\xd6\x32\x57\xf8\x6a\x46\x0c\x97\xaa\x5c\x97\x86\xb5\xa4\x6b\xe2\xab\x18\xdd\x65\x2b\xca\xdb\x1a\x45\xf4\xa5\x10\xdd\x69\x6a\xdc\xed\xd0\xa7\x71\xf6\xd9\x10\xfb\xf5\x10\x91\xf3\xa7\x70\x38\x27\x3e\x0a\xfc\xd3\x64\xcf\x19\x25\x22\xfa\x33\xf6\xf2\x9c\x5a\x9e\xb1\x0f\x4d\x2e\xcc\x9d\xe1\x7f\x6e\x1d\x0c\xaf\x69\x68\xb2\xa6\xa1\xc9\x46\x1a\x4a\xe7\x37\x14\xff\x53\x67\xee\x59\xcf\x18\xe4\xe6\x60\xef\x8c\xd1\xac\xec\x5e\x7d\x45\xc9\xc4\xad\x03\xaa\x02\xb5\xfc\x22\x09\x74\xad\xe0\xa4\x41\xfe\xdc\x08\xfb\xc7\x10\xdd\xcd\xdb\x49\xba\x9b\xb9\xbc\x9e\x08\xdf\xe3\x91\x0b\x79\x51\x2b\xa8\xb1\x47\xf9\x0f\x8b\xd5\xa7\x2c\xbb\xfc\xa9\xfc\xb0\x16\xa7\xf9\x58\xf2\x7f\xf7\x8f\xca\xfe\x4f\xd0\xb8\xe8\xff\x11\x1a\xa2\x47\x7d\x1d\x4b\xa2\xcb\xd6\x13\x71\x7e\x8b\x0d\xe6\xea\xb8\x14\x6c\x06\x26\xd9\x51\xd3\x0c\xf8\x0e\x89\x95\x1f\x5b\x2b\xa8\x95\xc9\x39\x9e\xdb\xe9\x33\x00\x61\xef\xec\xc7\x62\x0c\x86\xcd\xdf\x3a\x3c\x0c\xa9\xe3\x74\x8c\x8e\x54\x79\xb5\x1b\xed\x6e\x30\x53\x78\xb3\x9b\xf4\x66\xbf\xb3\xaf\xf9\xb7\x9c\x4e\x48\x2f\xf6\x61\x3a\xe8\x78\xb1\x37\x70\xfa\x26\x93\x19\x37\x66\x3d\x3c\x22\x87\x1b\xb2\x24\xb1\x77\xef\xf1\xb1\x1e\x2f\xb5\xf3\x19\x0b\x83\x61\x56\x69\xed\xb4\xbd\x40\x3a\x63\xa4\x33\xbe\x75\x5d\x1e\x60\x5d\x60\x5d\x60\x5d\x6d\x4b\x67\xfc\x5c\x5f\x8b\xeb\xe4\x2c\x4a\x21\xda\x39\x9a\x73\x09\xd1\xda\x5c\x7b\x27\x30\xeb\x70\x9b\x57\x08\xa9\xff\xef\x25\x3e\x2b\x84\xbd\x01\x99\x8d\xc5\xba\x61\x50\xe4\x47\xe9\xe0\xb2\x01\x59\xab\x7a\x6d\xb1\x82\xac\x55\xc8\x5a\xb5\xc5\x59\xab\x96\x36\xb1\xbd\x0c\xfa\x00\xb4\x17\x1f\x05\x66\xb6\x4a\x27\x68\x9c\x8d\x45\xf7\xd9\x00\x79\x8f\x3b\xd7\x14\x6f\xaf\x36\xaf\xd4\x36\x64\xce\x7f\x35\x4c\x07\x7c\xea\xc0\x49\x7f\x85\x4f\x21\xb8\x5f\x1c\x66\x7f\x3d\x40\xe4\x64\xfd\x0f\xff\x42\x9f\xe9\xdf\x70\x82\x79\xb8\x69\x90\x3d\x60\xe5\x54\x71\xa7\x7f\x8e\xbb\x56\xec\x93\xf6\x5a\x7f\x48\xb4\x32\xe4\xe8\x86\x2b\x84\xa0\x5a\x49\xb5\x37\x62\x46\x59\x55\xb2\xa3\x8e\x60\x46\x6c\x5e\x4c\x79\x45\xbe\xac\x5e\x77\x2c\xdd\x50\x85\xc6\xcd\xd5\x76\x3c\x16\xbe\x51\x51\x79\xa0\x2d\x45\x07\xda\x93\xd8\x25\x7d\x8d\x16\xe4\xcb\x37\x47\x67\xc4\xcb\x97\xa4\x53\x74\xa2\x89\xef\xad\x08\xc4\x6a\x2c\x54\xc0\xff\xad\xb9\x21\xf3\xc1\x3d\x1d\xfc\xd6\x1e\x61\x87\x9a\xc9\x2e\x4e\x69\x64\xb2\x41\x26\x9b\x34\x42\x26\x10\x32\x81\x90\x09\x84\x4c\xf4\x4c\xc8\x44\xba\x6b\x22\x02\x5a\x7e\x27\x4d\x4b\xd5\xd3\x90\xaa\x43\xaa\x0e\xa9\x3a\xa4\xea\x90\xaa\x77\x5e\xaa\xde\x4d\x05\x9e\xd2\x3d\xad\xeb\x4d\x43\xd7\xdb\x3e\x5d\x6f\xba\xeb\x75\xbd\xdb\x30\xab\x0e\xfb\xc2\x08\x1d\xe7\x43\xe7\xf0\x45\x1f\xbd\x25\x7f\xed\xf2\x19\x55\xc9\x64\xf8\x72\xc5\xe6\x8e\x3f\x36\xc2\xfe\x72\x80\x76\x64\x34\x5d\x40\xc7\x5f\x6c\x08\x3a\xca\xa6\x92\xb2\xa9\xae\xc0\x8e\x7b\xc5\xe5\xa6\x34\x5d\x75\x7b\x04\x2b\x6f\x14\xfc\xb1\x96\x3f\x2e\x04\xe3\xc5\x31\xb6\xcf\xc4\x8b\xd1\xa8\x85\x14\x2b\xfa\xb5\x51\x3f\x80\x24\x9a\xa0\x8e\xa0\x8e\xa0\x8e\xa0\x8e\xa0\x8e\xa0\x8e\xa0\x8e\xa0\x8e\xa0\x8e\xa0\x8e\xa0\x8e\xa0\x8e\xa0\x8e\x1d\xa6\x8e\xc7\xe9\x18\x3b\x12\x3d\x64\x53\xc7\x70\x05\x75\xac\xd8\xe1\xdd\x0a\xca\x2c\x90\x4d\x90\x4d\x90\xcd\x6d\x44\x36\x3f\x30\x44\x0f\x5a\x64\x53\x57\x6d\xa0\xc6\x3f\x1a\xba\xc6\x4d\xb6\xc1\xfe\xd9\x10\xfb\x4e\xbf\x03\x2f\x0f\xd4\xaf\x3f\xb7\xe8\x34\x32\x65\x37\x12\x1b\xe7\x27\x49\x70\xe8\x79\xc0\xac\xa6\x27\x0b\x05\x9b\x29\x76\xbd\x8a\xf1\x0a\x3d\x2e\x29\xe2\x2c\x4d\x0b\x8a\x78\x92\x1e\xa3\x49\x5f\x09\x31\xef\x3b\x21\x21\xf6\x7a\xf6\x0e\xe6\x5b\xaf\x46\x8c\x9e\xf7\x03\xf9\x22\x40\x22\x40\x22\x40\x22\x40\x22\x40\x22\x40\x22\x40\x22\x40\x22\x40\x22\x40\x22\x40\x22\x40\xe2\x56\x82\x44\x90\x3a\x90\x3a\x90\xba\x1e\x26\x75\x7f\x37\x42\x33\x32\xe6\x59\x29\x95\x8c\x40\x25\xa2\x43\xf0\xf8\xd6\x41\x66\xe5\x34\xd5\x88\x1f\x1a\x61\xdf\x1d\xa0\x1d\xbc\x99\xcb\xeb\x89\xf0\xaf\x34\xa2\x46\x74\xf0\xd0\xa2\xd9\x5c\x57\x28\x12\xe5\xe5\x92\xa5\x92\xe1\x56\x24\xd6\xde\x6c\xb7\xf3\xc4\xae\x8c\x8a\x5e\x0e\x66\x8a\x07\x58\xc2\x64\x8a\x7c\x36\x99\x54\xb1\xb6\xfb\x81\x14\x81\x14\x81\x14\x81\x14\x81\x14\x81\x14\x81\x14\x81\x14\x81\x14\x81\x14\x81\x14\x81\x14\x81\x14\xb7\x12\x29\x9e\xa2\x13\xec\x78\xf4\x98\x2d\x2f\x7c\xd0\xad\x4d\xac\xdd\xc4\x41\x9f\x08\xea\x09\xea\x09\xea\xd9\x51\xea\xf9\xe7\x31\x3a\x23\xa9\x67\x51\x2d\xdf\xd0\xf4\x6b\x7c\x41\x69\x27\x7b\x14\x15\x96\xfc\x08\x68\xbe\x98\xd3\x55\x77\xf6\xc7\x7f\x1b\x63\x6f\xef\x27\xe6\x34\xe4\xd4\xef\xf7\xa8\x7e\x33\x27\x4f\x8f\x0d\xf2\xdf\xce\xdb\xe7\x98\x15\xfc\x1d\xd8\x68\x1e\xd7\xe2\x0a\x38\x0a\xcd\x4a\x20\x78\x8a\x4e\x08\x20\x78\x84\x0e\xd1\x01\x5f\x81\xa1\xab\x73\xcc\x67\x8a\x9b\xf7\xb5\x89\x8a\x16\x1b\xaf\x87\x53\x33\x46\x26\x11\xb4\xee\xa5\xa6\x38\x56\xf8\xeb\x3b\x3d\x07\xe4\x01\xef\x6a\x38\xd6\x98\x8c\x98\x3f\x77\x7a\x58\x52\x73\x74\x86\x66\xaa\xd2\x45\x37\x37\x2e\xc8\x14\x8d\xfa\x38\x4d\xd6\xc7\xf9\x68\x5f\xcb\x8c\xc3\x39\x59\x2d\x67\x9a\x52\x4e\xb5\x9c\x2d\xb0\x34\x1b\xa8\x9d\xd3\xac\xa5\x89\xbd\x77\x8f\xa7\xa5\xb9\xd3\xaa\x9c\x53\xb4\xad\xcb\xb0\xfc\x53\xc7\x8d\x0b\xca\xe7\xa0\x7c\x0e\xca\xe7\xa0\x7c\x0e\xca\xe7\xa0\x7c\x0e\xca\xe7\x6c\xa6\x7c\x4e\xb3\x8b\x84\xd4\xdb\x76\x7b\x2e\x12\x06\x03\x8a\xe7\x58\x2b\x87\x21\x51\x85\xa1\x93\x0b\x07\x14\xd0\xe9\xb5\xe5\x0a\x0a\xe8\xa0\x80\xce\x16\x17\xd0\xe9\x1c\x9c\x0a\xaa\x77\xd3\xe6\xef\x41\xfa\x20\x4d\xb0\xfd\xd1\xb8\xed\xe1\xb8\xcb\xed\x22\x31\x4f\xbb\x15\xfc\x22\xec\x67\x46\x28\x19\x5c\x66\x5c\xf2\x56\xa9\x3b\xcd\x14\xd6\x8c\x32\x37\x8b\xae\x0a\xe3\x7f\x32\xcc\xfe\xed\x6d\x74\xaf\x57\x01\x3a\xf9\x21\xfd\x44\x43\x5a\x53\xd9\xf2\xa2\xc6\x67\x6d\x17\x88\x4c\x87\x6f\xf8\x54\xc5\x13\xcf\xe4\xba\x5b\xa8\x4c\x9b\x50\x99\xe6\x83\xdf\xe0\x59\x36\xdd\x60\xc5\x2c\xd7\x60\x78\xbc\xcd\x10\x9e\x42\x78\x0a\xe1\x29\x84\xa7\x10\x9e\x42\x78\x0a\xe1\x29\x84\xa7\x10\x9e\x42\x78\x0a\xe1\x29\x84\xa7\x10\x9e\x76\x56\x78\x7a\x94\x0e\xb3\x83\xd1\x09\x9b\x8f\xdc\x53\x21\x3c\x75\xb6\x70\xa8\xc4\x03\x3d\x28\xf4\xa0\xd0\x83\xba\xf4\xa0\x9f\xbc\x9b\x0e\x5a\xf9\x2a\xeb\xd7\xe0\xb1\xa0\xe4\x78\x49\xd7\xde\x70\x93\x7d\xef\x2e\xf6\xee\x90\x93\xc5\x72\x38\xa3\x15\x8b\xdc\xda\x9d\x99\xb9\xe8\xe0\x9e\xb2\x16\x11\x47\xf3\x9b\x37\x93\xf3\xc6\x06\xcd\x23\x65\xf6\xca\x33\x6a\xb9\xa6\xf2\xcd\x02\x3f\x65\xa2\x3f\x36\x1e\x93\x98\xed\x88\x2f\x14\xa3\xfb\x25\x7f\x7b\x39\xdd\x25\xf8\xdb\x4b\xe9\x25\x9f\xe8\xdb\x49\xe6\xa4\xb1\x90\xd9\xc3\xde\xc8\x8c\xd8\x4e\xf3\x56\xe8\xea\x93\xc1\xd0\x6c\x82\xed\xf7\xa9\x28\x23\xee\xd7\x94\x07\x55\x44\x66\x87\x3d\x7b\x68\xe1\xc9\x0d\xf5\xd0\xc2\x5a\x23\x3d\x54\xb7\x23\x02\xa1\xe2\x96\xf5\x50\xf4\xa7\x5c\x3d\x34\x62\xf7\xd0\x85\xa5\x80\x2e\x1a\xaa\xec\x22\xcd\xb8\x85\xfb\x28\xf6\x33\xae\x3e\xda\x67\xf5\xd1\xf4\xcc\xfc\xcc\xc5\x99\xfa\xbd\x34\x52\xd1\x4b\x52\xc6\x76\xeb\xf6\xd3\xc4\x07\x5c\xfd\x34\x6a\xf5\xd3\x85\x85\x8b\x73\x17\xce\x2f\xd5\xef\xa8\x58\x45\x47\x99\x8d\xdf\xba\x3d\x35\xe9\xf9\xd6\x9d\x9d\x49\x4e\x6f\xe4\xad\x3b\xab\x2a\xd9\xb6\xdb\xee\xf6\x74\x40\x03\x5d\x9f\x7a\x9f\xab\x8f\x62\xb6\x65\x4a\x5e\x9c\x3a\x5b\xbf\x93\x86\x2b\x4d\x93\x10\xb0\xdc\xb2\x33\x29\x7d\x9a\x4e\xb2\xc7\xa2\x93\xf6\xd6\xe3\x21\x8f\x7c\xfc\xee\xb3\xb7\xc2\xb9\xfb\xf9\x10\xfd\x56\x88\xfd\x46\x28\xfc\x39\x7b\x55\xfe\x91\xd0\x82\x52\x96\xbb\xf7\x2b\x6a\xa4\xa4\xe8\x82\xbf\x3c\xb9\x38\x6f\x6e\x39\xcd\xad\x6e\xc4\x5c\xfb\x44\xd4\x62\xb6\xa4\xe5\x8b\x65\x83\xdf\xc2\xea\x6a\xfe\x0d\xaa\x21\x57\x60\x2e\xd7\x00\x5f\xc9\x1a\xaa\xbd\x14\xcd\xac\xe9\x3a\xdf\xed\xc9\x09\x62\x6d\x62\xca\x9a\xd5\xa6\x10\x10\x44\xd4\x37\x28\xd7\x4b\x05\x93\x4b\xdf\xb8\xa2\x15\x6c\x32\xc4\xef\x46\x2c\xf5\xcb\xe5\xd2\xe4\xf8\x78\x41\xcb\x28\x85\x2b\x9a\x51\xf6\x58\xad\xf1\x61\x1d\x33\x6e\x1a\x65\xf5\xba\xb3\x5a\x53\x0b\x8a\x51\xce\x67\x0c\x55\xd1\x33\x57\xc6\x0a\x5a\x2e\x97\x2f\xe6\xc6\x2f\xcb\xff\x3e\xf5\xfa\x13\x6b\x86\xaa\x4f\x5e\xcb\x5f\xcf\x5c\xb9\x19\x8f\x58\xbd\xe1\xfd\x73\x54\x8c\x8c\xdb\xbd\xfd\xeb\xf7\xd1\x49\x2b\x79\x52\x5e\x57\x73\x79\xa1\x77\xf2\xf0\x6d\x9b\x0e\x26\xf7\xfa\xd1\x10\xda\x35\xf6\x03\xf7\xb1\xff\xda\x4f\xf7\x54\x9d\x6f\x7b\xb6\x87\x44\x08\x91\x3c\xd6\x9a\x4f\x0e\xf3\x49\x2e\xcc\xd9\xb6\x9b\x1f\x98\xac\x6c\xc5\x74\x25\x3b\x47\x49\xbd\x5c\x8b\x63\x8a\xfe\x09\x65\xe4\x5b\xfb\x0c\x3d\x2d\xde\x5a\x21\x38\xf7\x74\xff\x8a\x11\x52\x72\x39\xbe\xd1\x28\x6b\xba\xe3\x01\xae\xee\x3e\x4b\xd0\xe1\xdc\x7a\xe0\xcb\xaf\x06\xbf\xd9\x29\x76\xda\x4e\x2b\xe4\x35\x5c\xb5\xbe\x5d\xf9\xfa\xbb\xee\x22\x20\xae\x29\xfc\xa5\x5d\xfe\x63\x19\xb3\xa2\x8f\x1a\x18\xce\x31\xf3\xd8\xad\x18\xd1\xd4\x2a\x65\x69\xa5\x4a\x7c\xd7\x86\x21\x85\x28\x0f\xb1\x4a\x4d\xc6\x2a\xfd\x45\x5f\x67\x8c\xce\x15\x19\xc8\xa4\xd0\x65\x27\x90\x69\x6b\xcc\x5b\x40\x94\x53\x67\xac\x5f\xea\xf9\xdd\xfe\xe6\xed\x40\x8d\x9a\xb9\x01\x3b\xb7\xaf\x24\x33\xed\x75\xd6\xca\x41\xde\xdc\x6b\x96\x14\xf2\x66\xc8\x9b\xb7\x58\xde\xdc\x5b\xeb\xe4\x20\x89\x75\xfa\x08\x1d\x62\x07\xa2\x09\x7b\xc3\xfb\x0a\xf7\x96\xd9\x69\xa9\xfd\xce\x3a\xf6\xc2\x08\x9d\xd8\x40\x0a\xdc\xac\x5a\x2a\x68\x37\xaf\xab\xae\x42\xfc\x3f\x34\xc2\xfe\xd4\x95\xfa\xf6\xe3\x8d\xc8\x91\xa7\xed\x66\xba\x42\x8d\xfc\x90\x67\xca\x5b\xe7\x26\x21\x42\x6e\x42\x84\x7c\x3e\xf8\x5d\xdb\xc7\x46\x6a\x53\xdd\x3a\xdd\x8e\x14\xb7\x50\x1a\x43\x69\x0c\xa5\x31\x94\xc6\x50\x1a\x43\x69\x0c\xa5\x31\x94\xc6\x50\x1a\x43\x69\x0c\xa5\x31\x94\xc6\x5b\xa9\x34\xae\xcb\x2e\x9c\xcd\xdb\xad\x10\xc2\x0d\x29\x33\xa4\xcc\x90\x32\x6f\x23\x29\xf3\x6f\x0e\xd1\xbd\x92\x66\xae\x08\x8a\x69\xe3\xcc\xab\xda\x8a\xc1\x3e\x3a\xc4\xde\x3b\x40\x3b\xc5\x4f\x97\xd7\x13\x61\xcd\xfc\xaa\x16\xb3\xf9\xf5\x7c\x76\x4d\x29\x54\x40\x4b\x7b\x05\x9b\xd6\x56\x5a\x42\x29\xe3\xb1\xbd\xe2\x84\x14\xff\x9f\xe5\x44\x5a\x5b\x99\xcf\x1b\xe5\xed\x56\xa6\x7f\x2b\x58\xe3\x5e\x6f\xd6\xb8\x9b\x91\xb8\x31\x59\xae\xff\x6c\x30\x6f\x7c\x94\xed\x35\x79\xa3\x98\x03\x26\x70\x4c\x6b\x2b\x20\x8d\x20\x8d\x20\x8d\x20\x8d\x20\x8d\x20\x8d\x20\x8d\x20\x8d\x20\x8d\x20\x8d\x20\x8d\x20\x8d\x20\x8d\x5b\x49\x1a\x81\xf3\x80\xf3\x80\xf3\x7a\x18\xe7\xfd\xda\x08\x1d\xf7\xc4\x79\x7e\x05\xfa\x75\xad\x78\x55\x5b\xb1\xa5\x89\x3f\x30\xc2\xbe\xe4\x06\x7e\x1f\x6d\x28\x55\xaa\xae\x15\x5b\x85\xfc\x36\x2b\x4c\x7c\xc0\x0d\x0c\x5d\xc5\xf8\xe5\x2d\x02\x15\x36\x21\x4b\x9c\x0f\xc6\x84\x23\x6c\xc8\x03\x13\x9a\x9d\x0e\x54\x08\x54\x08\x54\x08\x54\x08\x54\x08\x54\x08\x54\x08\x54\x08\x54\x08\x54\x08\x54\x08\x54\x08\x54\xb8\x95\xa8\xb0\x6e\x51\x19\x73\xe7\x06\x45\x22\x10\x26\x10\x26\x10\x66\x47\x11\xe6\x37\x46\x68\x56\x22\x4c\xf5\x0d\x65\xb5\x28\x86\xaa\xaa\xea\x53\xa3\xb5\xf6\xdf\x35\xc2\xbe\x3b\x40\xcc\x69\xc7\xce\x3e\xd2\x10\xd7\x34\x4b\x6b\x75\x05\xd7\x94\x97\x9b\xb1\x9f\xa4\xed\x45\x1d\x6f\x21\xc2\xb9\x1c\x0c\x30\x0f\xb0\x44\x4d\x96\x02\x93\x68\x3a\xb3\xa7\xaa\x50\x5b\x20\x39\x05\xeb\x04\xeb\x04\xeb\x04\xeb\x04\xeb\x04\xeb\x04\xeb\x04\xeb\x04\xeb\x04\xeb\x04\xeb\x04\xeb\x04\xeb\xec\x26\xd6\x79\x0b\x15\xd0\x06\xeb\x04\xeb\x04\xeb\xdc\x46\xac\xf3\xcd\x23\x74\x34\xa0\x22\x80\x89\x3c\x6b\x4b\x02\xb0\xcf\x0c\xb3\x1f\xb8\x8d\x58\x6d\x82\xe5\xc6\x32\x4a\x3a\x49\x33\xbb\x02\x70\x46\x6e\x78\xa6\x76\x76\xee\x12\x64\xb3\x96\x6c\xbe\x2e\x98\x6c\x9e\x60\xc7\x1b\xce\xce\xba\xa1\xc4\xac\x60\x9c\x60\x9c\x60\x9c\x60\x9c\x60\x9c\x60\x9c\x60\x9c\x60\x9c\x60\x9c\x60\x9c\x60\x9c\x60\x9c\x60\x9c\xdd\x97\x64\xb2\x93\x05\x32\x00\x21\x01\x21\x01\x21\xb7\x11\x84\xfc\xda\x10\x3d\xe4\x55\xd0\x46\x57\x05\x06\xe3\x6f\x2a\xfb\xb5\x21\xf6\x49\x57\xc9\x9a\xf5\xc6\xf2\x40\x2e\xca\x16\x96\xd4\xd6\x14\xad\x89\xc7\x46\x5c\x65\x67\x9c\xc6\x91\x14\xb2\x31\x5a\xd8\x6c\x7d\x19\xa7\xa7\x2b\xeb\x41\x37\x92\x64\x12\x7c\x10\x7c\x10\x7c\x10\x7c\x10\x7c\x10\x7c\x10\x7c\x10\x7c\x10\x7c\x10\x7c\x10\x7c\x10\x7c\x10\x7c\x10\xa9\x21\x81\xf9\x80\xf9\x80\xf9\x3a\xa6\x35\x1c\xa3\x05\x13\xf3\xad\x95\x35\x23\xa3\x14\xf2\xc5\xdc\xf8\xfa\x84\x0c\xac\xf6\x09\xa9\x16\x04\x49\x2b\x96\x95\x42\x49\xcb\x5a\xe7\xa9\xba\xad\x41\xfc\xb9\x51\xf6\x42\x3f\xdd\xe5\x6a\xf1\xb2\xd9\x62\x38\xa6\xab\x4a\xb6\x6a\x8f\x7a\xd6\x6e\x6e\x41\xcb\x26\xed\xe6\x62\x47\xf8\xb1\x49\xa7\x91\xe5\x89\xaa\xe8\x66\x9f\x13\x5b\x46\xf9\x24\x94\xd3\xe9\x29\x09\xe5\x16\xe8\xbc\x80\x72\x67\x69\x96\xa6\xfd\xa0\x5c\xdc\xf5\xd4\x71\xf3\xa9\xe3\x3e\x37\x1a\xc8\xe6\x1e\xf0\x66\x69\xb7\xb3\x81\x9c\x5a\xa6\xab\xb9\x60\x74\x37\xcd\x52\x12\xd7\xf9\xdd\x82\xc3\xee\x26\x2a\x02\x9c\x5d\x8f\x41\xe1\xcf\xee\xf2\x1e\xcd\x31\x5d\x2d\x15\x94\x8c\xda\xe0\x80\x4e\x9a\x87\x6f\xf9\x98\xa6\x9e\xa6\x57\xd1\x32\x1b\x58\xd1\xb2\x37\xc3\xe2\x7f\x23\x7d\xb1\x16\x0d\x6a\xea\x5d\x21\xfa\x89\x10\xfb\xf1\x50\xf8\xdf\xd8\x1f\x81\xef\xf4\x3d\xc5\xbf\xa6\xe6\x67\x7c\x54\x20\xf2\x0c\xdf\xac\x4b\xab\x5d\xb1\x75\x74\xe3\xc4\x15\x35\x52\xe2\xe3\xc3\xd7\xeb\xf1\x48\xb2\x18\xc9\x17\x25\xb4\xd1\xf4\xc8\x5a\xd1\xe6\x3e\xd9\x48\x56\xbf\xb9\xb8\x56\x8c\x64\xf3\xba\xca\x4d\x95\x6a\xc3\x04\xbe\x26\x10\x8b\x2e\x93\xa6\x58\x7b\x27\x73\xff\x1a\x59\x5d\xd3\xc5\xfa\xbc\xa4\x6b\x19\xd5\x10\xab\x23\xd3\x32\x9a\x9f\xdd\x78\x64\x59\x5c\x51\xec\x7b\xc4\x72\x66\x32\x32\x16\x49\x16\x0a\x93\x62\xed\x93\xd5\x6f\x46\xf4\xb5\x22\xdf\x57\x73\xdb\x66\x2d\xf8\xcc\xe6\xd4\x6c\xf4\x76\x79\x6b\x2e\x43\x9e\xfa\xd1\x10\xfd\x48\x88\xbd\x23\x14\x7e\x9b\xdd\x41\xdf\xec\x13\xcb\xe5\xc7\x95\xa2\x92\x53\x75\xb9\xf1\x93\x11\x15\x86\xa1\x65\xf2\x62\x2d\x63\x6f\xb1\x14\xb1\x47\xd5\xf4\x08\x5f\xea\x96\x6f\xda\x8b\xbb\xeb\xca\x35\x7e\xff\xe5\x2b\xaa\xa1\x5a\xd6\x96\x7f\x01\x2c\xf8\x25\xf8\xcf\x8a\x1a\x11\x76\x5e\x6c\xa7\x34\x3d\x92\x98\x38\xca\x8f\xd5\x95\x8c\xa0\x74\x05\xad\x98\x93\xb6\x55\x6c\x79\xf8\xea\x54\xc9\x17\xe5\x92\x47\x6c\x29\x9c\x63\x05\xb9\x30\x19\x22\xdf\x08\x5b\xdf\xcb\x9c\x56\x50\x8a\xb9\xb8\xa6\xe7\xc6\x4b\xd7\x72\xe3\x6b\xc5\x7c\x46\xcb\xaa\xe3\x7b\xe7\x8c\x05\xde\x4a\x3c\xba\xdb\xfd\xac\xee\x2f\xdc\xe7\xfb\xda\x67\x6a\x9e\xe1\x0d\x27\xd8\x93\xb4\x44\x3b\xa6\xe4\xb2\xbe\x2b\x0c\x59\x69\xad\x61\x43\xe6\x61\x99\x36\x68\xdb\x62\xbf\xb5\xc7\xdb\x90\x99\x7b\x9a\x88\xe2\x6b\xbb\x8e\xc9\x23\xb6\xde\x74\x5d\xa4\x45\x5a\x70\x9b\xae\x58\x8a\x4e\x37\xe1\x22\x9a\x16\xcf\x73\x41\x10\x76\x03\x46\x2b\xc8\x68\x7d\x2d\x44\x5f\x0d\xb1\x2f\x87\xc2\x5f\xb4\x3b\xe8\xe3\xa1\x8b\xee\x15\x65\x9e\x6f\x6c\xc4\xaa\x30\xb2\xa2\xae\x4a\xef\x92\xbd\x95\x73\x5c\x18\xe6\xee\x59\xdc\xa2\xcb\x28\x15\xb5\xe2\x58\x51\xcd\x29\xa2\x23\xcc\x45\xa5\xdb\x78\x49\xa8\x63\x0f\x81\x39\x5f\xf3\xd7\xaf\xab\x59\x6e\x1e\x0b\x37\x1d\xb7\x8f\x83\xfa\xf3\x85\x51\x73\xad\x29\x56\xdb\x91\x9c\xce\x3f\xd6\x25\x55\xcf\x6b\x59\x7b\xcd\xec\x7c\xb7\x85\x47\xcd\xea\x8d\x35\x83\xdf\xa4\x7b\x9d\xae\xf0\x33\xad\x07\x32\x2f\xb2\x2a\xf1\x86\xd5\x44\x5c\xde\xe7\x75\x55\x29\x7a\xde\x63\x94\x89\x5b\x58\x10\x77\xe0\xb1\x86\x4e\xfd\x4e\x88\x5e\x0c\xb1\x17\x42\xe1\xe7\xed\x6e\x7e\x4f\x68\xda\xe5\x25\x2e\x15\x54\xc5\x50\x6d\x16\xb5\xa0\x6b\x25\x25\x27\x06\x60\x41\x2b\xe4\x33\x37\x2b\xdc\x51\xd6\xb3\x38\x6e\x66\x3e\x4a\x89\xf8\x91\x78\x64\x49\x0e\x88\xec\x9d\x12\xdf\x42\xf1\x6d\xa8\x85\x35\xd5\x88\xa6\x97\xae\x28\x45\xcb\x3d\xa6\xaf\xa9\xe3\xab\x4a\xc1\x72\x03\x44\xe5\xaf\xd1\xc8\x6a\xbe\xa8\x14\xf2\x6f\xb4\x78\xe2\x8a\xca\xd7\xe8\x62\xd3\x36\x2e\xd7\xe6\x59\x87\x31\xcb\xc6\x87\x0c\xe7\x24\x09\xde\xe3\x91\x99\xbc\x98\xce\xae\x1b\xd7\xf4\xda\x27\x73\x36\xc0\x65\xc9\xfd\xc5\xcb\xa6\x95\xaf\xc4\xa3\x7b\xe4\xfd\x4c\x5b\x0f\x52\xe1\x67\x4b\xbd\x75\x80\x9e\x1d\x60\x6f\x1e\x08\x7f\xdf\x76\xb6\x7e\xa1\xff\x29\x13\xcc\xf1\x57\x8a\xef\x40\x73\x8a\xbe\xa2\xe4\xd4\x48\x46\x2b\x14\x54\x61\x9d\x9d\xf7\x42\xd5\x57\x35\xfd\x3a\xef\x0b\xcf\x3b\xbd\x50\x75\x71\xff\x1b\x15\xf3\xd9\x9a\x8c\x25\xf9\x54\x79\x3e\x51\x32\xf9\xac\x43\xd8\x05\xac\x15\xfe\x0e\xbb\x77\xf9\x9e\xd4\x44\x99\x16\x8e\x8d\xbb\xba\xd1\x02\x79\xf6\xae\xd3\x76\xb1\x56\x5e\x2c\x1e\x49\x66\xf8\x06\x55\x7c\xc9\xdd\xc6\x61\x48\x3e\xc3\x50\x64\xcc\x1c\xf8\xca\x89\x61\x1c\x8f\x0c\xa5\x94\xcc\x35\xfe\x05\x2a\x66\xf9\x51\xc2\xb7\x29\x0e\xaa\xea\x38\x49\xcf\xcd\xa9\x5f\xd9\x88\xf5\x04\x2b\x76\x4b\xc7\x23\x43\xb3\x9a\xae\xba\x9a\x8d\x64\x14\x23\xa3\x64\xf9\xd3\x9b\xfd\x23\xbd\xd9\xa2\x3d\x43\x5a\xaf\x9a\x06\x57\xed\x36\xe2\xd1\x3b\x4b\xd5\xf3\xc6\xbd\xc2\x78\xae\x8f\x16\xf9\x42\x60\x82\x9d\xa3\x39\xda\x29\x7b\x43\xcd\xb2\x13\x74\x9c\x8e\x35\xf1\x11\x59\x2a\x2b\xe5\x35\xc3\xf7\xf3\x4f\xf3\x72\x39\x33\x43\x53\x62\x39\xb3\xb9\xcb\x58\x8b\x89\x88\xf7\x62\x62\x17\xdb\x21\xbb\xa9\x73\xeb\x89\xd4\x6f\xec\xf6\x5e\x4f\x1c\x2e\x29\x7a\x39\x2f\x5c\xa7\x72\xbf\xde\xe0\x0e\xe9\x68\x49\x48\x59\xb6\x72\x91\x71\x5f\x75\x43\x63\xe2\x9e\xf6\xf1\x7f\x4e\xde\xef\xfe\xf1\xba\xaa\xe7\x54\xf7\xaf\x83\xee\x5f\x45\x44\x99\x9a\xcb\x67\xc6\x6a\x8e\xab\x68\x85\xff\xfb\xa6\xf9\x2b\xbf\xcb\xd4\x79\x9a\xa7\x74\xd5\xde\x6c\x92\x8e\x36\x31\x6d\x16\x84\xf6\x00\x4b\x9b\x80\xa5\xcd\xc7\xfa\xe9\x23\xfd\xec\x43\xfd\xe1\xf7\xdb\xdf\x87\xb7\xf4\xf7\xce\x7e\xac\xca\x5d\xc3\xfb\x59\xc8\x0f\x84\x4b\x91\x4f\x4e\x47\x84\x34\xec\x33\x71\x47\xc4\x67\x4e\xaa\x55\x94\x82\x38\x93\xaf\xe7\xe4\xd9\xe2\x08\xb1\xb6\x32\x22\xc3\x69\x43\x2b\x2e\x48\x35\xce\xe3\xfc\xb5\x30\xff\xbd\x64\xbd\x2c\xce\x1f\x47\xfc\x37\x8a\xa9\x17\xfb\xe8\x85\x3e\xf6\x7c\x5f\xf8\x33\x36\x69\xfc\x60\xdf\xac\xa6\x67\xc4\xb2\x2f\xa7\x89\x6e\xd7\x22\xd1\x55\xfe\xa7\x68\x24\x59\xf1\x14\x82\x21\xcb\x15\xda\x9a\xe1\xe8\x0e\xc6\x94\x8c\x78\x70\xa1\xe6\x28\xe4\x33\xe6\x17\x58\x2d\x64\x8d\x88\x76\xc3\xec\x53\xa9\x76\x28\xa9\x5a\xa9\xa0\xc6\x23\xf2\x8a\x42\x3e\x65\x8d\xa5\x60\xb9\x9e\x1d\x60\x5f\x3e\x7a\x9b\xb8\xad\x0a\x29\xd0\x16\x32\x36\xff\x58\xd6\x92\xac\x74\xd2\xd8\xc7\xa4\x11\x8e\x56\xff\xeb\x92\x9e\xa6\x14\x3b\x1d\x3d\x69\x8b\xb5\xf7\xba\xc5\xda\x3e\x67\xdd\x0a\x09\x2a\xd8\x6f\x0f\xd3\xc1\x66\xea\x13\xb1\x1f\x1d\x66\x1f\x74\x17\x26\x32\x1a\x53\xa0\xb6\xb2\x34\x51\x3c\xf6\x70\xdd\xe2\x42\xf3\x79\xa3\x0c\xd9\x69\x93\xb5\xc8\x1b\x2b\x32\xe4\x96\x9f\xd6\x29\x38\x04\xd1\x29\x44\xa7\x10\x9d\x42\x74\x0a\xd1\x29\x44\xa7\x10\x9d\x42\x74\x0a\xd1\x29\x44\xa7\x10\x9d\x42\x74\x0a\xd1\x69\x87\x45\xa7\xdb\x8f\x50\x40\xd6\x0a\x59\x2b\x64\xad\xdb\x48\xd6\xfa\xe9\x61\xda\xbf\x11\xa2\x28\x68\xe2\xbf\x18\x66\x3f\xe9\xa6\x89\x5a\x63\x34\xb1\x75\x24\xd1\xa7\x4c\xf9\xb6\xa1\x88\xbe\x6e\xe0\x96\xe3\xc5\x0d\x51\xc4\xb3\xc1\x14\xf1\x51\xb6\xd7\x83\x1c\xa2\x4c\x39\x08\x22\x08\x22\x08\x22\x08\x22\x08\x22\x08\x22\x08\x22\x08\x22\x08\x22\x08\x22\x08\x22\x08\x22\x08\x22\x08\x22\x08\x22\x08\xe2\xad\x4b\x10\xff\x74\x1f\x9d\x94\x04\x31\xa3\xea\x66\x2b\xaa\xe1\xaa\xc0\xe3\xfe\x73\x3e\x57\x14\x65\xc6\xe5\x97\xc0\x0a\x83\x7f\xf7\x3e\xf6\x6b\xfd\x74\x87\xfb\xc0\xcb\xeb\x89\xf0\xa8\x47\x08\xfc\x94\x73\xcc\x92\x6c\x6c\x51\x36\x16\xdb\xc7\x8f\x76\xfd\x6c\x2c\x27\x7c\x0f\x6e\x71\xe0\x7b\x89\x9e\x94\xdc\xee\x3c\xcd\x0b\x6e\x37\x4b\xd3\x94\xf2\x15\xe5\x56\xf4\xd3\x7a\x22\xee\x7b\x9b\x81\xda\xc0\x42\x30\xb0\x9b\x63\x67\x4c\x75\x9f\xef\x55\x3c\x64\x81\x1e\x43\x49\x01\x41\xf6\xe1\x5f\xde\x55\x3b\x84\xe3\xde\x71\xef\xfe\xa3\x18\x37\x4f\xd8\x92\x81\x4c\xbd\x8a\x96\xe9\x62\x55\x44\x45\x4b\x46\x12\xb1\x15\x88\x75\x6f\x32\xd6\xfd\x37\xfb\xe8\x69\x19\x92\xbe\x44\x4f\x38\x21\xe9\xed\x36\x31\x6d\x32\x69\x0d\xc6\xb9\x37\x66\xd8\xea\x18\xab\x8d\xd8\xbc\xd8\x7f\xd9\x53\x6b\xb9\xa2\x76\xa0\xbb\xbf\xb1\x1a\x93\xc7\x6c\x8d\xad\x42\x78\x3b\xc2\xdb\x11\xde\x8e\xf0\x76\x84\xb7\x23\xbc\xbd\x99\xf0\xf6\xce\x84\xa2\xb7\x23\x8a\xbe\xf1\xf0\xf6\x8e\x2e\x23\x52\x9f\xde\x5d\xbb\x8c\x38\x1a\x10\xdf\xee\xbf\xb8\x18\x15\x51\x95\x1d\x5f\x5b\x20\xaa\xbd\xd7\x56\x34\x88\x6a\x47\x54\xfb\x16\x47\xb5\x97\xb6\x6a\x1b\xda\xd1\x0f\x44\x60\x04\x7d\x7a\x96\xa6\x59\x2a\x7a\xda\x76\xd7\x3c\xea\x0e\x6c\xf7\xbd\x48\xfb\x8b\x92\xb1\xef\x85\xe8\x5e\x09\x79\xd5\x37\x94\xd5\xa2\xe0\xf1\xe3\xeb\x09\x99\xfc\x94\xfd\x71\x88\xfd\x51\x88\x98\xf3\xd3\x65\xf3\xa7\xf0\x3d\x39\xb5\x5c\xe9\xaf\x95\x9c\x3a\x16\xc9\xa9\xe5\x19\xfb\xf8\x65\x79\x78\x72\x61\xce\xd2\x08\xb5\xae\x02\xd1\x64\x4d\x43\x93\x35\x0d\x4d\x36\xd2\xd0\x06\x31\x6f\x9d\x0f\x93\xeb\x39\xe7\xf3\xc1\x98\x97\xfd\xf2\x10\x8d\xc9\xde\xd7\x57\x94\x4c\xdc\x3a\xa0\xaa\xd4\xbd\x1c\x0c\x5d\x2b\xa8\x06\x7b\x76\x88\xfd\x43\x3f\xdd\xcb\x0f\x4f\xba\x8f\xb6\x07\x26\x2a\x25\x1f\xba\xe9\xac\xb6\x36\x3c\x56\x75\xfb\x45\xad\xa0\xc6\xf6\xf3\x63\x16\xab\x9b\x30\xc7\x8a\x1f\xb1\xdd\xaa\x46\x3d\x43\x49\x39\x80\x93\x74\x54\x0c\xe0\x04\xed\xa7\xb8\xaf\x35\x11\x9d\x6d\x76\x58\x9c\x3f\x6f\x23\x83\x75\x75\x25\xd8\xa4\x9c\x62\x27\x4c\x93\xe2\x3b\x9e\x56\xed\x28\xad\xa0\xba\x6d\x88\xcc\x98\x71\xf5\x41\x6f\x43\xb2\x83\xdd\x86\xaa\x51\x90\xdf\x42\x7e\x0b\xf9\x2d\xe4\xb7\x90\xdf\x42\x7e\x0b\xf9\x2d\xe4\xb7\x90\xdf\x42\x7e\x0b\xf9\x2d\xe4\xb7\xa8\x1a\x05\x71\x2c\xc4\xb1\x10\xc7\x76\x54\x1c\xfb\xaf\xfb\x69\xd0\xac\x1a\x95\xe5\x4b\xd9\xbc\x56\xd4\xd5\x5c\x5e\x38\xc7\x2a\xe0\xdd\x38\xfb\x46\x88\xfd\xf7\x10\xbd\xd2\xf3\xb8\xcb\xeb\x75\x50\xea\x60\x4e\x2d\x27\xbd\xce\x5a\x06\x50\xf5\x01\xaa\x3f\xf7\x3a\x9a\x95\xc3\xb2\x5a\xd0\x6e\xf0\x6f\xa4\xae\x15\xe2\x36\x73\xa9\x66\xaa\x25\x3d\xaf\xe9\xf9\xf2\xcd\x82\xba\xae\x16\x2a\x76\xe3\x06\xfb\xd3\xcb\xec\x87\x86\xe8\x7e\x57\x3b\x49\xab\x19\x1b\xb6\x1e\xad\x0f\x5b\x17\xcc\xe6\xe7\x79\xf3\x53\xee\xe6\x63\xc7\xf8\x99\xb3\x1e\x6d\x9b\x14\xd6\xff\xd4\x2e\x67\xb1\x29\x20\x4a\x20\xca\x14\x10\x25\x10\x25\x10\x25\x10\x65\xcf\x20\xca\x54\xd7\x20\xca\x96\xdf\x49\xd3\x88\x32\x05\x44\x09\x44\x09\x44\x09\x44\x09\x44\xd9\x79\x44\x99\xea\x69\xa2\x98\x02\x51\x6c\x1f\x51\x4c\x75\x3b\x51\x4c\x6d\x43\xa2\x98\xbe\x49\xaf\x95\xcc\xec\x29\x7a\x52\x30\xb3\x0b\xf4\x38\x9d\xf3\xd5\xb0\xb9\xf9\x96\x25\x65\xf3\x87\x46\x0d\x09\xdc\x02\xc4\x67\x57\xff\x8f\x60\x01\xdc\xab\xd8\x72\x8d\xa8\xcd\x54\xc4\xd5\x03\x72\xa6\x28\xce\xff\xfe\x29\xfa\x59\x0a\x60\x71\x7b\xe5\x17\x2d\xa2\xd4\xc3\x6e\xc7\xe5\x41\x5b\x0b\xde\x24\x27\xbb\x44\xaf\xa1\x57\x57\x85\x43\xcc\xd1\x99\x16\x0d\x38\xa2\x23\x10\x97\xde\x64\x5c\xfa\x7f\x08\xc9\xb8\xf4\xfd\x22\x2e\x9d\x5b\xa2\x73\xd4\xba\x89\x49\x97\x65\xcc\xfb\xab\x68\xd9\x89\x79\x6f\xe9\x05\x5e\x27\x03\xd3\x5e\x4d\x4f\xb9\x02\xd3\x5a\x7a\x85\xe6\x0d\x69\x49\x6b\xdc\x90\xb6\xc4\x6e\xd6\x58\xe3\xd8\xaf\x8c\x05\x18\xd2\x71\x33\xb6\xd1\x15\x28\xaa\xad\xd6\x33\xaa\x33\xf2\x84\x3a\x46\x75\xca\x6e\xaa\x63\xe6\xb5\x3d\x51\xf4\xe0\xda\xe0\xda\xe0\xda\xe0\xda\xbd\xc3\xb5\xb1\x8e\x0c\x58\x47\x76\x0f\xf8\x47\x06\x93\x8e\x64\x30\x81\x7f\x05\xfe\x15\xf8\x57\xe0\x5f\x81\x7f\xa5\xa7\xfd\x2b\x48\x64\x85\x44\x56\x48\x64\xd5\xae\x44\x56\x70\x5f\xc2\x7d\xd9\xab\xee\xcb\x74\xae\x43\x39\xdc\x2c\x5e\x1d\xf3\xe6\xd5\x77\xb1\x3b\xe5\xdb\xec\x58\xda\xad\x65\xd7\xad\x4f\x7f\xf3\xe6\x7e\x8a\xc8\x78\x01\x23\x73\x45\xcd\xae\x15\xf8\x8e\xab\x2a\x48\xc0\xca\x82\xe3\x1c\xd1\x58\x16\x9c\x25\xfb\xf8\xed\x92\x05\xa7\x25\xb9\x9a\x1a\x8f\xe6\xb0\x82\x36\xbe\x64\x67\xc1\x51\xd6\xca\x9a\x91\x51\x78\x9f\x8d\xaf\x27\xc6\xc5\xe5\xb5\x62\x59\x29\x94\xb4\xac\xf5\x9b\xaa\x1b\xec\xc3\x43\xec\x5d\x03\xf4\x32\xd7\xe1\x97\xd7\x13\xe1\x43\xf5\xa3\x31\xce\xda\xad\x2d\x68\xd9\xa4\xdd\x5a\x4c\x04\x71\x24\x9d\xa6\x96\x13\x3e\x47\x6e\xb7\xac\x38\xaf\xa7\x65\x39\x6e\x17\xe8\x71\x31\x6e\x67\x68\x86\xa6\x7c\x9d\x60\xae\xde\xe4\x83\xe5\xd3\x09\xad\x4f\x95\xe3\xba\xae\x69\x13\x7c\x2e\x5d\x99\x73\x0b\xa9\x72\x10\x87\x82\x54\x39\xf0\xd7\xc1\x5f\x07\x7f\x1d\xfc\x75\x48\x95\x83\x54\x39\xf0\x93\xc0\x4f\x02\x3f\x09\xfc\x24\x5d\xe5\x27\x41\xaa\x1c\xa4\xca\xd9\x2e\x64\x18\xa9\x72\xda\x91\x2a\xe7\x17\x86\x69\x84\x0f\xdd\xf8\x7a\x42\xf6\xdb\xb8\x5d\x6f\xd6\x2c\x14\x29\xfe\xfd\xa6\xf1\x92\x96\x35\xd8\xb3\xc3\xec\xc7\x06\x68\x47\x46\xd3\xd5\xcb\xeb\x89\xb0\x66\x7e\xc9\x8a\xd9\xfc\x7a\x3e\xbb\xa6\x14\xdc\xcf\xa8\xd8\xab\xc6\x05\x2d\x1b\x77\xf9\x4e\x27\xed\x85\xc4\x90\x38\x7f\xc8\xd9\x25\x57\x6c\x7b\xb4\x92\x6a\x2b\xa3\x8c\xb2\xaa\x64\xe3\xb1\xfb\xc5\x09\x53\x9a\xae\x2e\x27\x6c\xd4\x97\x5d\xd0\xb2\xf3\xf9\x16\x56\xe3\x68\x13\xf1\xbb\x46\x0b\x92\xf8\xcd\xd1\x19\x41\xfc\x92\x74\x8a\x4e\x34\x41\x6a\xc5\x94\x9a\xe1\x2b\xba\x40\xd6\xb7\xd7\x9b\xc3\xed\x66\x24\x6e\x4c\x86\x0e\xcd\x04\x03\xc1\x28\x8b\x98\x40\x30\x1a\xb5\x7c\x03\x5a\xb6\x82\xf9\x81\xe9\x81\xe9\x81\xe9\x81\xe9\x81\xe9\x81\xe9\x81\xe9\x81\xe9\x81\xe9\x81\xe9\x81\xe9\x81\xe9\x81\xe9\x75\x98\xe9\xbd\x8e\x5e\xcb\x9e\x89\x3e\x6d\x97\x33\x9b\x34\xaf\x2c\x83\xe5\xf9\x02\x67\xad\x7c\x25\x62\x64\xb4\x92\x3a\x1a\x31\xd6\x32\x57\x78\xef\x8b\x15\xac\xaa\x5c\x97\x0c\xa2\xa4\x6b\xe2\x49\xa3\xbb\x6c\x02\xd0\xce\x42\x67\xa0\x86\xa0\x86\xa0\x86\xdb\x88\x1a\xfe\xd0\x30\x3d\x52\x49\x0d\x75\xd5\x46\x62\xa6\xa6\x54\x68\x01\xbf\x32\xc4\x7e\xdf\x05\x0c\xff\x69\x5f\x63\xc4\x70\xd1\x69\x6d\xca\x6e\xad\x45\x0c\xf1\xa0\x8b\x21\x7a\x5e\x67\x3e\x6f\x94\xb7\x9b\x9a\xf0\x9a\xbf\x3c\xb4\xe5\xd0\xd1\x62\x8b\x4f\x05\x63\xc3\x83\x6c\xc2\xac\xa8\xe7\xd5\xd1\x15\xf8\xd0\xc1\x8b\x0d\x41\x4b\xd0\x46\xd0\x46\xd0\x46\xd0\x46\xd0\x46\xd0\x46\xd0\x46\xd0\x46\xd0\x46\xd0\x46\xd0\x46\xd0\x46\xd0\x46\x28\x08\xc1\x02\xc1\x02\xc1\x02\x3b\xc5\x02\xff\x7e\x88\x26\x3c\x02\x84\x27\x56\xd4\xb2\x32\xe1\x1f\x25\xfc\xa9\x21\xf6\x89\x01\xba\xab\x22\x4a\x58\x9e\xd3\x6c\xa8\xf0\x89\xea\x50\x61\xd9\xdc\x2d\x12\x2f\xbc\x4e\xaf\x91\x20\xef\x22\x2d\x0a\x90\x37\x4f\x69\x3a\xdb\x58\xbc\xb0\xec\x88\x4d\x05\x0d\x07\xa5\x1f\xcf\x05\xc3\xc0\x69\x96\x6a\x34\x82\x58\xde\x30\x79\xc4\x20\x83\xfb\x81\xfb\x81\xfb\x81\xfb\x81\xfb\x81\xfb\x81\xfb\x81\xfb\x81\xfb\x81\xfb\x81\xfb\x81\xfb\x81\xfb\x81\xfb\x81\xfb\x81\xfb\x81\xfb\x75\x8a\xfb\xbd\x7b\x84\xe6\x25\xf7\xd3\x57\x94\x4c\xdc\x42\x46\xa2\x29\x27\x4b\x63\xfd\xa0\x62\x5d\x2b\xa8\x2b\xf9\x62\x36\x5f\xcc\x19\xec\xc5\x61\xf6\x95\x01\xba\x9b\xb7\x96\x74\x37\x76\x79\x3d\x11\xbe\xd1\xa0\x6e\x50\x2b\xa8\x29\xd9\x5e\x8b\xd4\x82\x09\x71\xc2\x62\xf5\x3d\xb9\xc3\x8f\x5d\x17\x45\x18\xb2\x37\x3d\xcc\x06\xd3\xc1\x24\x3b\x65\xe2\x3e\xdf\xe9\x64\x06\x1e\xbb\xfa\xbb\x32\xe9\x20\x74\x83\xe0\x87\xe0\x87\xe0\x87\xe0\x87\xe0\x87\xe0\x87\xe0\x87\xe0\x87\xe0\x87\xe0\x87\xe0\x87\xe0\x87\x5d\xc8\x0f\x11\xa5\x0c\x42\x09\x42\x09\x42\xd9\x46\x42\xf9\x6c\x8c\x9e\x08\x26\x94\xb2\x8e\x4c\x20\xa6\x34\xff\xf2\x26\xf6\xb1\x11\xf6\xf6\xdb\xe8\x5e\x2f\x4c\x29\xeb\xce\xfc\xac\x19\xe4\xec\x26\x94\x45\xcb\xb0\x5a\x22\xc6\x45\x8d\x7f\xae\x5b\x80\x28\x47\x9d\x15\x94\x58\x25\x9a\xdf\x5b\xbe\x08\x73\x96\x86\x43\x15\xab\x71\x57\xdb\xf1\xd8\x3e\x3f\xc0\x29\x1e\xa6\x92\x72\x02\x6d\xd6\xa2\xcd\x87\xbc\xa1\xe3\x4e\x76\xbb\xb8\x31\xda\x58\xb9\x95\x46\xd8\xa7\x47\xe9\x25\x40\x4d\x40\x4d\x40\x4d\x40\x4d\x40\x4d\x40\x4d\x40\x4d\x40\x4d\x40\x4d\x40\x4d\x40\x4d\x40\x4d\x40\xcd\xce\x42\xcd\x04\x8d\xb3\xb1\xe8\x3e\x1b\x6a\xee\x11\x34\xd3\xbc\x0b\xbe\x77\x8b\x0e\xf0\xbf\xb8\x29\x25\x38\x28\x38\x28\x38\x28\x38\x68\x1b\x39\xe8\x37\x87\x68\xd8\xac\xa3\x5d\xd6\x74\x25\xa7\xd6\xc8\x33\x33\x46\x3e\xab\xe7\xf9\x30\xb2\xff\x34\xc4\x3e\x3e\x40\x64\x1e\x79\x79\x3d\x11\x5e\x6b\x4c\x7b\x39\xb5\x34\x37\x2d\xda\x68\x91\xf2\xf2\x3e\x71\xc2\x92\xbc\x8f\xe5\x84\xdd\xfc\xf6\xd0\x58\x76\x3e\x1d\x63\x43\xa5\x5e\x1a\xcb\xd9\x68\xc2\xc8\xca\xd9\x62\x12\x48\x7b\x1c\x50\xfc\x05\x04\x12\x04\x12\x04\x12\x04\x12\x04\x12\x04\x12\x04\x12\x04\x12\x04\x12\x04\x12\x04\x12\x04\x72\x2b\x09\x24\x60\x1f\x60\x1f\x60\x5f\x0f\xc3\xbe\xff\x15\xa3\xc7\xac\xd2\x2c\x7e\x72\x46\xcf\x5a\x2d\x96\xbe\xf1\x17\x63\xec\x5f\xf5\x3b\x25\x5b\x86\x75\x55\xc9\x56\xed\x41\x3d\xab\x79\xc4\x46\xf8\x91\xd5\xb5\x99\x3d\x0f\x6d\x19\xbe\x93\xb4\x2d\xeb\x4f\xdb\xe6\x24\x6d\x4b\xd1\x69\x41\xdb\x26\xe9\x28\x1d\xf6\xcd\x99\xc8\x1f\x39\xbe\x9e\x88\x7b\xde\xb3\x85\xd9\x1e\xf0\xc6\x6c\xb7\xb3\x81\x9c\xba\x51\xc0\x66\xd7\x52\x0e\x2e\x8f\x12\xfe\x9b\x9d\xce\x98\xec\xe3\x23\xa8\x64\xd4\x86\x86\x65\xd4\x3c\x78\x0b\x46\x26\x25\x72\x54\xb2\x81\x15\x2d\x7b\x33\x2c\xfe\x37\xd2\x17\x6b\x7a\x04\x52\xef\x0a\xd1\x4f\x84\xd8\x8f\x87\xc2\xff\xc6\x36\xc6\xdf\xe9\x7b\x8a\x7f\xd5\xcc\xcf\xe9\xa8\x80\xd4\x19\xbe\x69\x96\xd6\xb3\x62\x0b\xe7\xc6\x7a\x2b\x6a\xa4\xc4\x7b\x97\xaf\x9b\xe3\x91\x64\x31\x92\x2f\x4a\x78\xa2\xe9\x91\xb5\xa2\xcd\x5f\xb2\x91\xac\x7e\x73\x71\xad\x18\xc9\xe6\x75\x95\x9b\x0c\xd5\xde\xd4\xf3\x6f\xb3\x58\xfc\x98\x54\xc3\xda\xc3\x98\xfb\xc8\xc8\xea\x9a\x2e\xd6\xc9\x25\x5d\xcb\xa8\x86\x58\xa5\x98\x16\xca\xfc\xfc\xc5\x23\xcb\xe2\x8a\x62\xff\x21\x96\x15\x93\x91\xb1\x48\xb2\x50\x98\x14\x6b\x90\xac\x7e\x33\xa2\xaf\x15\xf9\xfe\x96\xdb\x18\x6b\xe1\x65\x36\xa7\x66\xa3\xb7\xcb\x5b\x73\x19\xd4\xd4\x8f\x86\xe8\x47\x42\xec\x1d\xa1\xf0\xdb\xec\x0e\xfa\x66\x9f\x58\xb6\x3e\xae\x14\x95\x9c\xaa\xcb\x0d\x98\x74\x2d\x1a\x86\x96\xc9\x8b\x35\x85\xbd\xd5\x51\xc4\x5e\x51\xd3\x23\x7c\xc9\x59\xbe\x69\x2f\xb2\xae\x2b\xd7\xf8\xfd\x97\xaf\xa8\x86\x6a\x59\x3d\x6e\x89\x2d\x08\x25\x38\xcc\x8a\x1a\x11\xf6\x56\x6c\x6b\x34\x3d\x92\x98\x38\xca\x8f\xd5\x95\x8c\xa0\x65\x05\xad\x98\x93\x36\x4e\x6c\x3d\xb8\xe1\x51\xf2\x45\xb9\xf4\x10\x4b\x7b\xe7\x58\x41\x10\x4c\x96\xc7\x37\xa4\xd6\x77\x2b\xa7\x15\x94\x62\x2e\xae\xe9\xb9\xf1\xd2\xb5\xdc\xf8\x5a\x31\x9f\xd1\xb2\xea\xf8\xde\x39\x63\x81\xb7\x12\x8f\xee\x76\x3f\xab\xfb\x4b\xf3\xc9\xbe\x16\x5a\x02\x3a\xcf\x9b\x4a\xb0\x33\x34\x43\x3b\xa6\xe4\x82\x7a\x53\xed\x05\x49\x8c\xfd\x4d\x4e\x69\xad\x8d\x26\x27\xf6\x53\x7b\x1c\x93\xf3\xa0\xdc\x23\x44\x14\x1f\x2b\xb3\x4f\xfe\xbe\x15\x46\xe6\x22\x2d\xd2\x82\xdb\xc8\xc4\x52\x74\xba\x09\x97\xca\xb4\x78\x82\x0b\x82\x50\x1b\x30\x36\x41\xc6\xe6\x6b\x21\xfa\x6a\x88\x7d\x39\x14\xfe\xa2\xdd\x41\x1f\x0f\x5d\x74\xaf\xc8\xf2\x7c\x63\x20\x56\x55\x91\x15\x75\x55\x7a\x67\xec\xad\x90\xe3\x02\x30\x77\x9f\xe2\x16\x5d\xc6\xa4\xa8\x15\xc7\x8a\x6a\x4e\x11\x1d\x61\x2e\xca\xdc\x46\x47\x42\x11\x7b\x08\xcc\xf9\x99\xbf\x7e\x5d\xcd\x72\xb3\x56\xb8\xe9\xb8\x4d\x1c\x54\x9e\x2f\x8c\x9a\x6b\x35\xb1\x5a\x8d\xe4\x74\xfe\x19\x2d\xa9\x7a\x5e\xcb\xda\x6b\x4e\xe7\x8b\x2a\x3c\x52\x56\x6f\xac\x19\xfc\x26\xdd\xeb\x5c\x85\x9f\x69\x3d\x90\x79\x91\x55\x89\x07\xac\x26\xe2\xf2\x3e\xaf\xab\x4a\xd1\xf3\x1e\xa3\x4c\xdc\xc2\x82\xb8\x03\x8f\x35\x68\xea\x77\x42\xf4\x62\x88\xbd\x10\x0a\x3f\x6f\x77\xf3\x7b\x42\xd3\x2e\x07\x6b\xa9\xa0\x2a\x86\x6a\xb3\x9c\x05\x5d\x2b\x29\x39\x31\x00\x0b\x5a\x21\x9f\xb9\x59\xe1\xce\xb1\x9e\xc5\xf1\xd0\xf2\x51\x4a\xc4\x8f\xc4\x23\x4b\x72\x40\x64\xef\x94\xf8\x16\x84\x6f\xe3\x2c\x2c\xa8\x46\x34\xbd\x74\x45\x29\x5a\xee\x25\x7d\x4d\x1d\x5f\x55\x0a\x16\x46\x8f\xca\x5f\xa3\x91\xd5\x7c\x51\x29\xe4\xdf\x68\xf1\xb8\x15\x95\xaf\x71\xc5\xa6\x67\x5c\xae\x6d\xb3\x0e\xa3\x95\x8d\x0f\x19\xce\x49\x12\x5c\xc7\x23\x33\x79\x31\x9d\x5d\x37\xae\xe9\xb5\x4f\xe6\x6c\x20\xcb\x92\x9b\x8b\x97\x4d\x2b\x5f\x89\x47\xf7\xc8\xfb\x99\xb6\x1e\xa4\xc2\x4f\x95\x7a\xeb\x00\x3d\x3b\xc0\xde\x3c\x10\xfe\xbe\xed\xac\xfc\x42\xff\x53\x26\xd8\xe2\xaf\x14\xdf\xc1\xe5\x14\x7d\x45\xc9\xa9\x91\x0c\xb7\x59\xc2\xe2\x3a\xef\x85\xaa\xaf\x6a\xfa\x75\xde\x17\x9e\x77\x7a\xa1\xea\xe2\xfe\x37\x2a\xe6\xb3\x35\x19\x4b\xf2\xa9\xf2\x7c\xa2\x64\xf2\x59\x87\x50\x0b\xd8\x29\xfc\x05\x76\xef\xf2\x3d\x9d\x89\x02\x2d\x9c\x19\x77\x75\xa3\x05\xc2\xec\x5d\x9b\xed\xa2\xac\xbc\x58\x3c\x92\xcc\xf0\x0d\x9e\xf8\x02\xbb\x8d\xc3\x90\x7c\x86\xa1\xc8\x98\x39\xf0\x95\x13\xc3\x38\x1e\x19\x4a\x29\x99\x6b\xfc\xab\x52\xcc\xf2\xa3\x84\x6f\x50\x1c\x54\xd5\x71\x92\x3e\x9b\x53\xbf\xb2\x11\xeb\x09\x56\xec\x96\x8e\x47\x86\x66\x35\x5d\x75\x35\x1b\xc9\x28\x46\x46\xc9\xf2\xa7\x37\xfb\x47\x7a\x83\x45\x7b\x86\xb4\x5e\x35\x0d\xae\xda\x6d\xc4\xa3\x77\x96\xaa\xe7\x8d\x7b\x65\xf0\x5c\x1f\xcd\xcb\x95\xc1\x0c\x4d\x89\x95\xc1\x09\x3a\x4e\xc7\x9a\xf8\x7c\x2c\x95\x95\xf2\x9a\x41\x8b\xbc\xb5\x09\x76\x8e\xe6\x68\xa7\xec\x5b\x35\xbb\xd9\x36\x83\x16\x08\x6d\xab\xc4\x18\xf1\x5e\x79\xec\x62\x3b\x64\xff\x53\xea\x1d\xbb\x9d\x35\xc2\xc1\x92\xa2\x97\xf3\xc2\xbd\x28\xf7\xb4\x0d\xed\x4f\x62\x25\xa7\x1e\x67\xc7\x16\x0e\xf7\x55\x37\x34\x26\xee\x62\x1f\xff\xe7\xe4\xfd\xee\x1f\xaf\xab\x7a\x4e\x75\xff\x3a\xe8\xfe\xd5\x28\xeb\x4a\x59\xcd\xe5\x33\x63\x35\xc7\x55\xb4\xc2\xff\x7d\xd3\xfc\x95\xdf\x65\xea\x3c\xcd\x53\xba\x6a\x67\x34\x49\x47\x9b\x98\x23\x0b\xc2\x1f\x8f\xe5\x4a\xc0\x72\xe5\x63\xfd\xf4\x91\x7e\xf6\xa1\xfe\xf0\xfb\x6d\x9b\xff\x96\xfe\xde\xd9\x1b\x55\xb9\x30\x78\x3f\x0b\x97\xbc\x70\xb3\xf1\xc9\xe9\x08\x73\x86\x7d\x26\xee\x88\xf8\x74\x49\x05\x87\x52\x10\x67\xf2\x35\x9a\x3c\x5b\x1c\x21\xd6\x4b\x46\x64\x38\x6d\x68\xc5\x05\xa9\x50\x79\x9c\xbf\x16\xe6\xbf\x97\xac\x97\xc5\xf9\xe3\x88\xff\xa6\x2d\xf5\x62\x1f\xbd\xd0\xc7\x9e\xef\x0b\x7f\xc6\xa6\x6f\x1f\xec\x9b\xd5\xf4\x8c\x58\xca\xe5\x34\xd1\xed\x5a\x24\xba\xca\xff\x14\x8d\x24\x2b\x9e\x42\x70\x55\xb9\xea\x5a\x33\x1c\x5f\xfc\x98\x92\x11\x0f\x2e\x14\x0e\x85\x7c\xc6\xfc\xaa\xaa\x85\xac\x11\xd1\x6e\x98\x7d\x2a\x15\x00\x25\x55\x2b\x15\xd4\x78\x44\x5e\x51\x48\x8a\xac\xb1\x14\x7c\xd3\xb3\x03\xec\xcb\x47\x6f\x13\xb7\x55\x21\x8f\x69\x25\x8e\xda\x44\x5c\x6a\x49\xc6\xa5\xb6\x6b\xdb\x98\x4e\xd1\x69\x76\x32\xfa\x98\xad\x68\x7e\xb8\x42\x04\xed\x75\xfe\xad\xa0\x8a\x66\xbf\x77\x89\x0e\x48\x95\xe5\x8a\xd0\x54\x5a\x91\xe5\x3e\x10\x36\xa3\x6b\xc5\xab\xda\x8a\xc1\x7e\xe2\x12\xfb\xbf\x06\xe9\xa5\xe2\x24\x3b\x86\xfc\xd1\xfa\x25\x70\xa6\x74\xad\x98\xd6\x56\x62\x0f\xf3\xc3\x52\xfc\x90\x9a\x78\x6d\xf3\x90\x2e\x57\x4a\xa6\xa0\x1b\x84\x6e\x30\x05\xdd\x20\x74\x83\xd0\x0d\x42\x37\xd8\x33\xba\xc1\x54\xd7\xe8\x06\x5b\x7e\x27\x4d\xeb\x06\x53\xd0\x0d\x42\x37\x08\xdd\x20\x74\x83\xd0\x0d\x76\x5e\x37\x98\xea\x69\x99\x5f\x0a\x32\xbf\xf6\xc9\xfc\x52\xdd\x2e\xf3\x4b\x6d\x43\x99\x5f\xfa\x75\x34\x23\x59\xe2\x49\x7a\x4c\xb0\xc4\xc3\x74\x90\x26\x7c\x59\xa2\xa0\x4b\x71\x93\x2e\xc5\x4d\x36\xd4\x50\xe1\xdf\x27\x82\x31\x61\x9c\x8d\x9a\x98\x50\x5c\xc6\x0a\x14\x95\x17\xa9\xcd\x56\x17\x54\x4b\x38\xfa\x8f\x3b\xab\x71\xd8\x1e\xf9\xfd\x89\x28\x36\xf9\xda\x2b\xff\xd2\x19\xf6\x25\x51\xd5\x34\xa5\xe8\x74\x95\xcb\x66\x3f\xc5\x37\xd6\xe7\x70\xd4\x40\xc4\xd6\xa4\x88\xed\xaf\xfa\x28\x29\xdf\xf9\x49\x3a\x2a\xde\xf9\x09\xda\xf0\xfc\xa3\x59\x29\x5e\x3b\x45\x27\x1c\xf1\x5a\x33\xed\x9c\x91\x7e\xee\xd3\x74\xd2\xe5\xe7\x6e\xa6\xa1\xae\x32\x40\x25\xcd\x28\x53\xec\x0f\x46\xab\x0d\xd0\xfd\xa6\x7e\xc1\x25\x06\xd1\x56\x6d\x63\x34\x26\x7f\x75\x1b\xa3\x29\xfb\xc0\x76\x9b\xa5\xf6\xc8\xdf\x80\x64\x81\x64\x81\x64\x81\x64\x7b\x07\xc9\x62\x61\x16\xb0\x30\xeb\x1e\x66\x0d\xe9\x71\x47\xa4\xc7\x70\x0d\xc0\x35\x00\xd7\x00\x5c\x03\x70\x0d\xf4\xb4\x6b\x00\x11\x28\x88\x40\x41\x04\x4a\xbb\x22\x50\xe0\x79\x83\xe7\xad\x57\x3d\x6f\xe9\x5c\x8b\x83\xaf\x82\x58\x72\xcc\x1b\xfc\xde\xc5\xee\x94\x6f\xb3\x63\x69\xa9\x0d\xdc\x79\x3b\xaa\xda\x9f\x7b\x2d\x9d\x92\xaa\xf6\x6c\xde\xc8\x68\x7c\xc1\xe3\xca\x1e\xec\xa3\x6d\x57\x8b\xd9\x92\x96\x2f\x96\x8d\x42\x3e\xa3\x1a\xec\xdb\x97\xd8\x07\x06\x69\xb7\xdd\x80\x48\x61\x51\x5f\xe0\x3e\x63\xb6\xb0\xc4\x5b\x88\x0d\xf2\x83\xa7\xad\xd3\xdd\x61\x62\x15\xc7\x41\xeb\x0e\xad\x3b\xb4\xee\x70\xac\xc0\xb1\x02\xc7\x0a\x1c\x2b\x5d\xe3\x58\xe9\x1e\xbf\x01\x80\x36\x80\x36\x80\x36\x80\x36\x80\x76\x4f\x03\x6d\x10\x37\x10\xb7\x1e\x25\x6e\xdb\x52\xeb\xae\xfa\xe7\x77\x4d\x4b\x7c\x38\x45\x49\x81\x0f\x8f\xd3\x31\x3a\xe2\xab\x3f\x75\x08\xd6\x7a\x22\x5e\x41\x8e\xe6\xf3\x86\x5d\x47\x29\x40\x9d\x7e\xf5\x52\x30\x17\x9c\x64\x47\x25\x0a\xac\xb8\x88\x57\x92\xa5\x6a\xa8\x46\xd1\x0f\xed\xaa\x22\x65\xf7\x58\xda\xf7\x62\x15\x15\x1b\x96\x3f\x74\x90\x8b\x49\x8c\x35\x47\x67\x68\xa6\x4a\x07\x7f\x88\x0e\x34\xd1\xed\xd0\x5c\x41\x0c\xdf\xa4\x18\xfe\x7f\x9b\x79\xdb\x26\x84\xeb\xc0\x51\xa0\x1f\xa1\xe6\xa6\xa2\xbf\x89\x99\x95\x26\xe6\x14\x9d\x10\x26\xa6\xe9\x0b\x9c\x93\xc2\xfb\x69\x4a\x39\xc2\xfb\x66\x1b\xb3\x6c\x55\x63\xb6\xc8\xcf\xd6\x04\xd9\xa8\x40\x9d\xfc\xb7\x46\xab\x6c\x55\xd4\x53\x26\x5f\x69\xb6\x26\xe4\x31\x2e\xb3\xe5\xa5\x95\x6f\xa7\x01\x83\x62\x1e\x60\x17\x60\x17\x60\x17\x60\x17\x8a\x79\x28\xe6\xa1\x98\x87\x62\x1e\x0e\x06\x38\x18\xe0\x60\x80\x83\x01\x0e\x06\x28\xe6\xa1\x98\x87\x62\x1e\x8a\x79\xf8\xef\xe0\xbf\xeb\x42\xff\x5d\x57\x2b\xe6\xdb\x4b\xa3\xb7\xa3\x7a\xfe\x17\x86\xe9\x88\x54\xcf\x1b\x65\x4d\x57\x72\xaa\xa3\x9d\x97\xc9\xe1\xc5\x34\x1a\xcf\xf0\x45\x97\xf8\x3d\xa3\x94\x94\x4c\xbe\x9c\x57\x0d\xf6\xed\x21\xf6\xad\x01\xba\xc3\xfc\xc1\xce\x44\xf3\x4f\xcc\x75\x6e\x31\x9b\x5f\xcf\x67\xd7\x94\x82\xdb\x67\xab\xd8\x7b\xca\xa9\xa5\xb9\x25\x79\xe6\x94\x6c\xf2\x66\xdc\xb5\x80\x98\xb4\x97\x1e\x43\xa2\xb9\x21\x07\xed\x56\xb0\x3a\xad\xa4\xda\x78\xc0\x28\xab\x4a\x36\x1e\x3b\x26\x4e\x30\xdb\xb6\x92\xdd\xd4\x5c\x6c\x3e\x6f\x94\x67\x35\x3d\x59\x28\xd8\x48\xdf\xe8\x72\x79\x7e\xfa\x1a\x2d\xc8\x57\x6b\x8e\xce\x88\x57\x2b\x49\xa7\xe8\x44\x13\xaf\x96\x70\xa7\xcf\xf0\xbd\x61\xe0\xeb\xb5\xd7\xfb\xf5\xda\xcd\x48\xdc\x98\xf4\x38\xab\xc1\xef\x55\x8a\x9d\xae\x09\x37\x31\x5f\xb4\xca\x99\x67\xc5\xa8\xd4\x8c\x18\xa5\x11\x9e\x80\xf0\x84\x34\xbc\x58\xf0\x62\xc1\x8b\x05\x2f\x56\xcf\x78\xb1\xd2\x5d\xe3\xa4\x69\xf9\x9d\x34\xed\x3d\x48\xc3\x7b\x00\xef\x01\xbc\x07\xf0\x1e\xc0\x7b\xd0\x79\xef\x41\xcb\x29\x44\xba\xa7\x79\x69\x1a\xbc\xb4\x7d\xbc\x34\xdd\xf5\xbc\x74\x1b\xc6\x3b\xb0\xbf\x1e\xa4\x7b\x2a\x2b\x49\x3a\xd5\x22\x7f\x77\x90\x7d\xb6\x9f\x76\x5a\xd9\xa9\x1b\x2d\x14\x19\x75\x15\x8a\x34\xff\xb6\xdd\xf0\xdc\x6b\xe8\xb4\xc4\x73\xc7\xe8\x88\xc0\x73\x09\x1a\xa7\xb1\xc0\x64\xe3\x1b\x2a\x76\x10\x14\xfb\x31\x1f\x4c\xe2\x46\xd8\x90\x47\x9c\x87\x57\x7e\x18\x00\x37\x00\x37\x00\x37\x00\x37\x00\x37\x00\x37\x00\x37\x00\x37\x00\x37\x00\x37\x00\x37\x00\x37\x00\x37\x00\x37\x00\x37\x00\x37\x00\xb7\x4e\x01\xb7\xe7\x87\xe8\x95\x7c\xe8\xc6\xd7\x2d\x45\x9e\x93\xd8\x96\xfd\xec\x10\x7b\xf7\x00\xed\xc8\x68\xba\x7a\x79\x3d\x11\x5e\x6b\x4c\x7f\x67\x73\xb5\x16\xc9\xee\x5e\x29\x4e\x98\xd2\x74\xd5\x95\xe6\x63\x3e\x6f\x94\xbb\x1d\xdb\x75\xad\xaa\xee\x5c\x30\xcb\x1b\x66\x83\x26\xbf\x8b\x46\x4d\x78\x67\xf7\x7d\xa5\x36\x15\x28\x0f\x28\x0f\x28\x0f\x28\x0f\x28\x0f\x28\x0f\x28\x0f\x28\x0f\x28\x0f\x28\x0f\x28\x0f\x28\x0f\x28\x0f\x28\x0f\x28\x0f\x28\x0f\x28\xaf\x43\x28\xef\x83\x31\x7a\x95\xd4\xce\xe9\x2b\x4a\x26\x6e\xb1\x21\xd1\x94\x77\xf0\xad\x4f\x0d\x2b\x5d\x2b\xa8\x2b\xf9\x62\x36\x5f\xcc\x99\x3f\xbc\x89\xbd\x30\xc2\xde\x77\x1b\xdd\xcb\x5b\x4e\xba\x1b\xb6\x83\x73\x3f\xd1\x67\x8e\x95\x0b\x09\x16\x2d\xeb\x67\x69\xf2\x16\xb5\x82\x9a\x92\x2d\xb7\x04\x0e\x8e\x3a\xab\x1d\xb1\xa2\x33\xbf\x8d\x7c\xc1\xe4\x2c\xe3\x86\x2a\x56\xce\xae\xb6\xe3\xb1\x09\x71\xb9\xc5\xea\x87\x32\x63\x7b\x9d\x94\x9c\xae\xfb\x06\x74\xac\x85\x8e\xf9\x60\x9e\x38\xcb\xa6\x4d\x9e\xe8\x3b\x39\x4d\xcc\xe8\xea\xeb\xda\x3a\x72\x57\x1f\xf2\xe6\x9b\x3b\xd9\xed\xa2\x0f\x80\x23\x81\x23\x81\x23\x81\x23\x81\x23\x81\x23\x81\x23\x81\x23\x81\x23\x81\x23\x81\x23\x81\x23\x81\x23\x3b\x8d\x23\x8f\xd2\x61\x76\x30\x3a\x61\xa7\x2c\xbb\x47\xe4\x2a\x33\xef\xc2\xb5\xc7\x8b\x0e\xf0\x1f\xdc\xa9\xc8\xb6\x5f\xb2\x33\xa0\x52\xa0\x52\xa0\xd2\x6d\x84\x4a\xff\x78\x98\xc6\x7d\x92\x13\x3a\x79\x09\xb3\x7a\x9e\x0f\xa3\x45\x40\x3f\x34\xcc\xfe\x7c\x80\xc8\x49\x4a\x18\xfe\xf9\x46\x90\xe7\xd4\xd2\xdc\xb4\x68\xa8\x2b\x80\xe7\x3d\x95\x29\x0c\xed\x9b\x03\xd5\xac\xa5\x9a\x4f\x05\x53\xcd\x83\x6c\x22\x20\xd5\xa0\xec\xde\xca\xda\x52\x60\x98\x60\x98\x60\x98\x60\x98\x60\x98\x60\x98\x60\x98\x60\x98\x60\x98\x60\x98\x60\x98\x60\x98\x60\x98\x5d\xc6\x30\x0f\xd3\x41\x36\x11\xdd\x6f\x93\xc8\x97\xbb\x19\xa6\xbd\xb9\xab\x25\x98\xe0\x8b\xe0\x8b\xe0\x8b\x3d\xcc\x17\x7f\x3e\x4c\x29\x2b\xaa\xda\x4f\x64\xa9\xda\x1c\x8c\x1b\x79\x5d\xe3\x26\xd6\x82\x8d\xe3\x46\x46\x29\xa8\xec\x1b\xf7\xb2\x1f\xee\x77\xe2\xaf\x13\xba\xaa\x64\x23\xe2\x27\xab\xa3\x9d\xaf\xf5\xa2\xd3\xde\x94\xdd\x5e\x2c\xce\x4f\xa9\x0a\x9d\xce\x7a\x1e\xba\xc4\x9b\x6d\x6d\xb5\xf1\x8d\xe6\x2d\x54\xd6\xca\x1a\x7f\x3a\xbe\x24\x5e\x4f\xc4\xc5\x1d\x05\x52\xba\x85\x60\x4a\x37\xc6\xf6\x99\x94\xce\x75\x05\x13\xd1\xc9\x8b\x54\xe0\xb9\x07\xbc\xf1\xdc\xed\x6c\x20\xa7\x96\x29\xfc\xa5\x9d\xce\x88\x1c\xe4\xa3\xa8\x64\xd4\x8d\x0d\x4a\xc2\x3c\x6b\xab\xc6\x25\x35\x45\x49\x3a\xe5\xae\x02\x1f\xe9\x8b\x6d\x74\x60\x50\x02\x39\xa8\x04\xf2\x8f\x86\xe8\x47\x42\xec\x1d\xa1\xf0\xdb\xec\x0e\xfa\x66\x9f\x58\xf7\x3e\xae\x14\x95\x9c\xaa\xcb\x1d\x9c\x74\x6b\x1a\x86\x96\xc9\x8b\x45\x89\xbd\x57\x52\xc4\x66\x53\xd3\x23\x7c\xcd\x5a\xbe\x69\xaf\xd2\xae\x2b\xd7\xf8\xfd\x97\xaf\xa8\x86\x6a\x99\x3f\x77\xc1\x61\xab\x30\xb1\x30\xbc\x62\x5f\xa4\xe9\x91\xc4\xc4\x51\x7e\xac\xae\x64\x04\x6e\x2b\x68\xc5\x9c\x34\x76\x62\xef\xc2\x2d\x90\x92\x2f\xca\x35\x88\xd8\x1b\x38\xc7\x0a\x04\x61\xc2\x40\xbe\xa3\xb5\x3e\x60\x39\xad\xa0\x14\x73\x71\x4d\xcf\x8d\x97\xae\xe5\xc6\xd7\x8a\xf9\x8c\x96\x55\xc7\xf7\xce\x19\x0b\xbc\x95\x78\x74\xb7\xfb\x59\xdd\x9f\x9c\xf7\xf6\xf9\xbe\xd5\x2d\x30\x18\x33\xbc\x85\x04\x3b\x49\x8f\xd1\x8e\x29\xb9\x42\x6f\xa2\x19\xcb\xbc\xf8\x1b\x83\xd2\x5a\x99\x5a\x6f\x7d\x52\x6f\xdf\xed\x98\x97\x13\x25\x45\xe7\x3b\x65\xbe\xbb\x14\xdf\xa7\x8d\xd9\x99\xf1\x92\x47\xe2\x8c\x8e\x58\x99\xfb\xaa\x1b\x1a\x13\xb7\xb2\x8f\xff\x73\xf2\x7e\xf7\x8f\xd7\x55\x3d\xa7\xba\x7f\x1d\x74\xff\xca\x17\xb2\x65\x35\x97\xcf\x8c\xd5\x1c\x57\xd1\x0a\xff\xf7\x4d\xf3\x57\x7e\x97\xa9\xf3\x34\x4f\xe9\x2a\x0b\x37\x49\x47\x9b\x70\x18\x2d\x08\x42\x0f\x63\x17\x60\xec\x3e\xd6\x4f\x1f\xe9\x67\x1f\xea\x0f\xbf\xdf\x76\x59\xbd\xa5\xbf\x77\x8c\x5d\x15\xd4\xe0\xfd\x2c\x20\xbd\x00\x6f\x7c\x72\x3a\xae\xba\x61\x9f\x89\x3b\x22\x68\xb5\xf4\xe9\x28\x05\x71\x66\x51\x2b\x8e\xc9\xb3\xc5\x11\xc2\xa7\x67\x44\x86\xd3\x86\x56\x5c\x90\x3e\xab\xc7\xf9\x6b\x61\xfe\x7b\xc9\x7a\x59\x9c\x3f\x8e\xf8\x5b\xe1\xd4\x8b\x7d\xf4\x42\x1f\x7b\xbe\x2f\xfc\x19\x7b\x5d\xfd\xc1\xbe\x59\x8d\x6f\x13\xf3\x46\x24\xa7\x89\x6e\xd7\x22\xd1\x55\xfe\xa7\x68\x24\x59\xf1\x14\x62\xc7\x24\x8b\xc5\xaf\x19\x0e\x9d\x1f\x53\x32\xe2\xc1\x85\xcf\xa3\x90\xcf\x98\x55\x60\xd5\x42\xd6\x88\x68\x37\xcc\x3e\x95\x3e\x81\x92\xaa\x95\x0a\x6a\x3c\x22\xaf\x28\x9c\x8c\xd6\x58\x8a\x9d\x8b\x67\x07\xd8\x97\x8f\xde\x26\x6e\xab\xc2\x61\xd6\x89\xd5\xa6\xbf\xeb\x56\xdc\x60\xa3\x1f\x04\x8f\x34\xd9\x7e\x1f\x87\xf4\x04\xed\x67\xf1\xe8\xa8\x0d\x21\xee\x74\x43\x08\x71\xcc\xad\x20\xa1\x62\xff\x73\x94\x4e\x4a\x49\x86\x52\xca\xab\x6f\x28\xab\x45\xb1\xc1\x75\x09\x33\x32\x6b\x46\x59\xbb\x6e\x6d\xbf\xdc\xa3\x6a\x2a\x34\x3e\x3e\xca\xbe\xdc\x4f\x7b\x2a\xce\xe7\x9f\xd1\x7d\x62\xdf\x54\xf9\xc5\x9c\x12\x8d\x59\x3e\xb5\x69\xbb\xb1\x98\x38\x38\xe9\x6e\x62\x39\xe1\x77\x70\x8b\xb7\x4b\x6f\xab\xb3\x2e\xd2\xe5\xd4\xbe\x46\x79\x31\xb5\x33\xa4\xd0\x65\xbf\xa9\xed\xdc\xfa\x98\xed\x87\x76\x3e\x6c\x95\xdd\xbb\x9e\x88\xfb\x3d\x9d\x35\xe7\x0b\xc1\x53\x7a\x8e\x9d\x31\x15\x0f\x3e\x6d\x91\xd7\x8c\xf7\x18\xe7\xc0\xdd\xd7\xdb\xc9\x63\x80\xe3\xd6\x36\xac\xc1\x31\xb6\x8e\xdf\x92\x61\x4e\xad\x91\x41\xaf\xaf\x5a\x9b\xb4\x7f\x34\xb1\x84\xc1\x7e\xad\xc9\xfd\xda\x3f\x0f\x6d\x85\xf9\xa1\x1b\x72\x23\x57\xa2\xa2\xb3\x91\xeb\xc8\x85\x83\x16\x01\x8d\x19\xc4\x7a\x56\x6e\x03\xc6\x32\x60\x07\x1a\xfb\x2f\x7b\x3c\x0c\xe2\xc3\x66\xfd\x7a\xc5\xdf\x06\x8e\xc9\x43\xb6\xc6\x04\x5e\xa4\x45\x5a\x70\x9b\xc0\x58\x8a\x4e\x37\xb1\x39\x9b\x16\xcf\x70\x41\xac\x99\x0d\x58\xb8\x20\x0b\xf7\xb5\x10\x7d\x35\xc4\xbe\x1c\x0a\x7f\xd1\xee\xa0\x8f\x87\x2e\xba\xf9\x7d\xbe\x18\x31\x24\x83\x8f\xac\xa8\xab\x52\x03\x68\x3b\xdc\x1c\xa1\x99\xe9\xe3\x14\xb7\xe8\xb2\x60\x7c\xb9\x5e\x54\x73\x8a\xe8\x08\x13\xe1\xbb\x2d\x9d\x74\xbd\xdb\x43\x60\x4e\xd2\xfc\xf5\xeb\x6a\x96\xdb\xd2\xc2\x4d\x47\x9c\xe7\x08\xb2\xf2\x85\x51\x93\xec\x0b\xdf\x46\x24\xa7\xf3\x2f\x7d\x49\xd5\xf3\x5a\xd6\xf6\x50\x38\x1f\x7d\xa1\x7b\xb4\x7a\x63\xcd\xe0\x37\xe9\xf6\x8a\x28\xfc\x4c\xeb\x81\xcc\x8b\xac\x4a\x27\xb4\xd5\x44\x5c\xde\xa7\xdc\xdd\x78\xdc\x63\x94\x89\x5b\x58\x10\x77\xe0\xe1\xb1\x48\xfd\x4e\x88\x5e\x0c\xb1\x17\x42\xe1\xe7\xed\x6e\x7e\x4f\x68\xda\x25\xfe\x2d\x15\x54\xc5\x50\x6d\xc5\xc0\x82\xae\x95\x94\x9c\x18\x80\x05\xad\x90\xcf\xdc\xac\x10\x0d\x5a\xcf\xe2\xa8\x87\xf9\x28\x25\xe2\x47\xe2\x91\x25\x39\x20\xb2\x77\x4a\x6a\x31\xab\x16\xcb\x8e\xf8\x44\x8d\x68\x7a\xe9\x8a\x52\xb4\x44\x8c\xfa\x9a\x3a\xbe\xaa\x14\x2c\xb1\x56\x54\xfe\x1a\x8d\xac\xe6\x8b\x4a\x21\xff\x46\x6b\x17\xb7\xa2\x46\x94\xac\x70\x7d\x69\xe3\xd2\x13\x92\x75\x94\x40\xb2\xf1\x21\xc3\x39\x49\xca\xa3\xe2\x91\x99\xbc\x98\xce\xae\x1b\xd7\xf4\xda\x27\x73\xdc\x8d\x65\xa9\xce\x12\x2f\x9b\x56\xbe\x12\x8f\xee\x91\xf7\x33\x6d\x3d\x48\x85\x1a\x32\xf5\xd6\x01\x7a\x76\x80\xbd\x79\x20\xfc\x7d\x9b\x2f\x7c\xa1\xff\x29\x53\x3e\xc1\x5f\xa9\x2b\xda\x8d\x48\x4e\xd1\x57\x94\x1c\xdf\x7d\x5a\x95\xf9\x9d\xf7\x42\xd5\x57\x35\xfd\x3a\xef\x0b\xcf\x3b\xbd\x50\x75\x71\xff\x1b\x15\xf3\xd9\x9a\x8c\x25\xf9\x54\x79\x3e\x51\x32\xf9\xac\xa3\x83\x12\x92\x1a\xb9\xfb\xb5\x7a\x97\x6f\x6a\x4d\xc1\x89\x25\x9a\x89\xbb\xba\xd1\x92\x5b\xd8\x3e\x3e\x5b\x08\x5b\x79\xb1\x78\x24\x99\xc9\xa8\x25\xf9\xd9\x77\x1b\x87\x21\xf9\x0c\x43\x91\x31\x73\xe0\x2b\x27\x86\x71\x3c\x32\x94\x52\x32\xd7\xf8\x77\xa9\x98\xe5\x47\x09\x05\xaa\x38\xa8\xaa\xe3\xa4\xc6\xc9\x9c\xfa\x95\x8d\x58\x4f\xb0\x62\xb7\x74\x3c\x32\x34\xab\xe9\xaa\xab\xd9\x48\x46\x31\x32\x4a\x96\x3f\xbd\xd9\x3f\x52\x73\x2c\xda\x33\xa4\xf5\xaa\x69\x70\xd5\x6e\x23\x1e\xbd\xb3\x54\x3d\x6f\xdc\xcb\x91\xe7\xfa\x68\x5e\x2e\x47\x66\x68\x4a\x2c\x47\x4e\xd0\x71\x3a\xd6\xc4\xe7\x63\xa9\xac\x94\xd7\x0c\x5a\xe4\xad\x4d\xb0\x73\x34\x47\x3b\x65\xdf\xaa\xd9\xcd\xb6\x19\xb4\x86\x88\x78\x7f\xd5\x77\xb1\x1d\xb2\x9b\xa8\xb3\xab\x8c\xd4\x1f\xef\xf6\x58\x46\x1c\xa9\xe1\xcf\x0d\x6e\xb0\x46\x05\x0d\xe9\xf8\xda\x02\xd8\xb9\xd7\x56\x34\xc0\xce\xc0\xce\x5b\x8c\x9d\xbb\x94\xda\x05\x92\xea\x8e\x62\xbd\xf4\x0c\x4d\xb1\x64\xf4\x94\x4d\xa4\x1f\xa9\x90\xd2\xf9\x34\xdf\x7e\x65\x1d\xfb\xdb\x41\xba\xd7\x52\xea\x64\xb4\xeb\x25\xad\xc8\x17\x25\xe2\x1b\xae\x1a\xec\xf7\x06\xd9\xe7\x5c\x02\x9c\x47\xa4\xd2\xbb\xba\xd0\xa8\x75\x9a\xfc\xf4\xc7\xee\xe5\x47\x49\xaf\x6b\xd5\x4f\xdd\x1e\x65\xb7\x42\x67\xe4\x84\x3d\x4d\x27\xc5\x84\x3d\x4a\x87\xe9\xa0\xaf\x07\x85\x77\x8b\x98\x8a\x95\x4f\xd9\x92\x72\xa3\x4f\x04\xcf\xce\x38\x1b\xad\x2e\x51\x50\x75\x27\x28\x54\x80\xa8\x3a\x44\xd5\x21\xaa\x0e\x51\x75\x88\xaa\x43\x54\x1d\xa2\xea\x10\x55\x87\xa8\x3a\x44\xd5\x21\xaa\x0e\x51\x75\x5b\x19\x55\x87\xe8\x38\x44\xc7\x21\x3a\xae\x87\xa3\xe3\xbe\x34\xe4\x30\x37\x99\x6d\x4b\x74\x96\x2e\x6e\x95\x7d\x6a\x88\x7d\xc2\x55\x74\x74\xbd\xb1\xa2\xa3\xf3\xbc\x85\x45\xfe\x43\x8b\xaa\x8e\x8e\xb8\xaa\x8e\x3a\x8d\xcf\xe7\x8d\xf2\xac\xa6\x27\x0b\x05\x3b\xa0\xa2\xeb\xa9\xde\x56\xe4\xce\x9a\x0f\xc6\x77\x23\x6c\xc8\x83\x1e\xdb\x28\xcf\xe9\x73\x6a\xa8\xa8\x29\xf0\x1e\xf0\x1e\xf0\x1e\xf0\x1e\xf0\x1e\xf0\x1e\xf0\x1e\xf0\x1e\xf0\x1e\xf0\x1e\xf0\x1e\xf0\x1e\xf0\x1e\xf0\x1e\xf0\x1e\xf0\x1e\xf0\x5e\xa7\xf0\xde\xff\x08\xd1\xcb\xcd\x48\x6e\x27\xa6\x7d\x7c\x3d\x31\xce\xfe\x20\xc4\x7e\x37\x44\x2f\x73\xfd\xf9\xf2\x7a\x22\x7c\x4f\x4e\x2d\x57\x7e\x60\xe5\xad\xc7\xee\xcf\xa9\xe5\xa4\x73\xec\x72\x22\xb9\x30\x67\x6d\xe8\x5a\x47\xdd\x26\x6b\x1a\x9a\xac\x69\x68\xb2\x91\x86\xd2\x25\x7f\x21\xe8\x93\x92\xc0\x9d\xa7\x79\x41\xe0\x66\x69\x9a\x52\x4d\x10\x38\x57\x07\xcc\xe7\x8d\xb2\x49\xdb\xd8\xbb\x46\x68\x22\x20\xdd\x58\x49\xcb\x96\xd5\xeb\x25\xb1\x9d\xb3\xe2\xe5\xbf\x3e\xcc\xbe\x11\x72\x48\xeb\x83\x1e\x61\xf2\x0b\x5a\xf6\xa2\x79\x5a\xec\x21\xaf\x5c\x62\xae\x03\x5a\x1c\x0d\xff\x34\x9d\x92\x9d\x76\x94\x0e\x8b\x4e\xdb\x4f\x71\x1a\x0d\x14\x23\xba\x6e\x28\x90\x52\xd6\x8f\x35\xbf\xfa\x78\x30\xc4\x8c\xb1\xe1\x6a\x70\xe9\xbe\x01\x37\xe0\x0c\x7f\xd2\x95\x38\xec\x61\xef\x88\x75\x77\x77\x47\x7d\xb2\x84\xb5\xad\xc7\x53\x29\x3a\x4d\x27\xab\xa2\x17\x36\xd8\xe5\x88\x58\x40\x94\x79\x93\x51\xe6\xef\xe9\xdb\xfc\x0b\x3f\x2d\x43\xc6\x4f\xd0\x71\x27\x64\xbc\xf5\x66\xa3\x31\xbb\x50\xcf\xb9\xe1\xbe\x5a\x50\x80\xf7\x37\xee\x70\xcc\xc6\xdd\x76\x5c\xb7\xdb\x52\x98\xd1\xde\x9d\x33\x14\x08\xdf\x46\xf8\x36\xc2\xb7\x11\xbe\x8d\xf0\x6d\x84\x6f\x6f\x38\x7c\xfb\xa7\xea\xc4\xcb\x6d\x7a\x01\x30\x23\x43\xb9\x4f\xd2\x63\xae\x50\xee\x0d\x37\xd3\x78\x90\x76\x6b\xb7\x08\xa9\xbf\x78\x89\xf3\xad\x8f\x05\x04\x5f\xbb\x57\x00\x11\xcf\x4c\x9f\x6d\x59\x00\x20\xc6\xba\xd7\x96\x1d\x88\xb1\x46\x8c\xf5\x16\xc7\x58\x77\x80\x05\xb5\x78\x53\x17\x14\x7e\x5d\xbf\x3e\xb2\xab\xa9\x5b\x22\xb9\xe7\xbf\x7f\x1d\xcd\x79\x20\xe1\x89\x15\xb5\xac\xf8\x22\x4b\x31\x40\xfc\x85\x2e\x94\xb4\xac\x75\x9e\xaa\x1b\xec\xb7\x2f\xb3\xaf\x0d\xd2\x5d\x15\x18\x59\x36\x15\x3e\x24\x65\x05\xba\xe9\x10\xad\x0e\xd5\x3e\x6b\x37\xb9\xa0\x65\x93\x76\x93\xb1\x23\xfc\x34\x37\x6a\x96\xcd\x39\x9f\x52\x9f\x13\xbb\x5c\xfa\x99\x82\x26\x12\x9a\xc8\x14\x34\x91\xd0\x44\x42\x13\x09\x4d\x64\xcf\x68\x22\x53\x5d\xa3\x89\x6c\xf9\x9d\x34\xad\x89\x4c\x41\x13\x09\x4d\x24\x34\x91\xd0\x44\x42\x13\xd9\x79\x4d\x64\xaa\xa7\x25\x8c\x29\x48\x18\xdb\x27\x61\x4c\x75\xbb\x84\x31\xb5\x0d\x25\x8c\xe9\x75\x7a\x8d\xe4\x8d\x17\x69\x51\xf0\xc6\x79\x4a\xd3\xd9\xc6\x4a\x09\x49\x74\x14\xf7\x21\x46\x2d\x49\x8e\x97\x0b\x66\x95\xd3\x2c\xe5\x5b\x58\xc8\xe7\xd6\x5c\xae\x28\xf9\x0c\x14\xfd\x71\xf2\xc6\x6c\xe6\x17\x2b\xa2\xf8\x12\xb5\x63\xf2\x88\xad\x64\x6a\x12\x81\x3d\x4d\xaf\xa2\xe5\x2a\x77\xd1\x2c\x4d\xb7\x62\x2c\xe1\x3a\x82\xd8\xad\x49\xb1\xdb\xc7\x42\xf4\x94\xb4\x30\x0b\x74\x5e\x58\x98\xb3\xd4\xa2\x59\x49\xcf\x48\x11\xdc\x93\xb4\xe4\x88\xe0\x5a\xd7\xfa\x25\xe9\x61\x5f\xa6\x8b\x2e\x0f\x7b\xeb\x9a\x6f\xde\x34\x96\xb4\x4e\x9a\xc6\xd8\x8f\x8d\x79\x9b\xc6\x51\x53\x77\xe1\x12\xb1\x68\xab\xbe\x66\x72\xca\xac\xc2\x52\x63\x26\xa7\xec\xd3\x3b\x6d\x30\xdb\x23\xee\x03\x84\x06\x84\x06\x84\x06\x84\xee\x1d\x08\x8d\xc5\x61\xc0\xe2\xb0\x7b\x28\x3d\x84\xd5\x1d\x11\x56\xc3\x19\x02\x67\x08\x9c\x21\x70\x86\xc0\x19\xd2\xd3\xce\x10\xc4\xd7\x20\xbe\x06\xf1\x35\xed\x8a\xaf\x81\xaf\x11\xbe\xc6\x5e\xf5\x35\xa6\x73\x2d\xae\x0c\x1a\x44\xa3\x1b\xa3\xcd\x35\xf0\x78\xc3\xf8\xf9\x6a\xcc\x1b\x7b\xdf\xc5\xee\x94\x66\xc3\x31\xe9\xb4\x1d\xe3\x03\xbe\x11\xa2\xb0\x8c\x0f\xc8\xa8\xba\xe9\xb5\x55\xad\xe2\x71\xe3\xec\x37\x42\xec\x73\x21\xda\xed\xfe\x2d\x1c\xce\x89\xef\x05\xff\x6a\xd9\xd3\x49\x89\x88\x9e\x8d\xdd\x93\x53\xcb\x53\xae\x83\x93\x0b\x73\x67\xf8\x0f\x5d\x98\x33\x26\xef\x1f\x0c\x79\x5e\x4e\xe5\x33\x34\x23\xa6\xf2\x29\x3a\x41\xc7\x9b\xcb\x19\x23\x1e\xde\x4a\x16\xf3\xfc\x08\x9d\x95\x7d\xad\xaf\x28\x99\xb8\x75\x41\x71\x9b\x56\x8f\xaf\x27\x64\x64\x86\xcc\xce\x9d\x29\xac\x19\x65\x55\xd7\xb5\x82\xba\x92\x2f\xf2\x0f\x97\x9d\x42\xe6\x7b\xc3\xec\xa3\xb7\xd1\xbd\xbc\xa5\xa4\xbb\xa1\xcb\x66\x0b\xe1\x5f\xe9\x33\x6d\x81\x2b\x69\x77\xd1\x5a\x6b\xda\x15\xf4\xe4\x05\x16\xb5\x82\x9a\x92\x17\x68\x49\x12\xef\x51\x67\x8b\x29\xb6\xd1\xe6\x86\x84\xef\x52\x9d\xbd\xf3\x50\x05\x15\x72\xb5\x1d\x8f\xc9\xc7\x5f\xac\x7e\xb6\x65\xf9\x68\xb5\x37\xdd\xe5\xd1\x20\x5b\x92\x08\xdc\x3f\x0e\xea\x86\x2c\x43\x59\x0a\x36\xa1\x8f\xb3\x73\xa6\xc5\xf4\x9d\xb1\x56\xfd\xbf\x9a\x31\x71\xc7\xd7\x4a\xe3\x8b\x44\xe1\x08\x8a\x41\xa2\x70\xf8\x23\xe1\x8f\x84\x3f\xb2\x87\xfc\x91\x48\x14\x8e\x44\xe1\xf0\x03\xc1\x0f\x04\x3f\x10\xfc\x40\x5d\xe1\x07\x4a\x9f\xa2\x13\xec\x78\xf4\x98\xcd\xa9\x1e\x74\x67\xc0\xa8\xdd\xc9\xd5\x26\xc2\x40\xa6\x71\x64\x1a\xdf\x26\xe8\x1c\x99\xc6\xdb\x91\x69\xfc\xfb\x83\xf4\x0a\x2b\xef\xb5\x75\xfc\xeb\xd7\xb4\xb2\x62\xb0\xaf\x0d\xb2\x3f\xec\x77\x92\xa9\xed\xab\x9f\x17\xc6\xda\xfd\x3d\xc1\x4f\x8e\x0d\xf1\x83\x65\x32\xb5\x8a\x1f\xb6\x5b\xe1\xbf\xd7\xf9\xb3\xe5\x19\x09\x02\x4f\xd2\x63\x02\x04\x1e\xa6\x83\x34\x11\x98\x4e\xa9\xa2\x33\x5c\xf9\xc7\x03\xe3\x95\x2e\x04\x33\xbe\x51\x16\xab\x97\x5b\xa9\xe2\xd2\x40\x78\x40\x78\x40\x78\x40\x78\x40\x78\x40\x78\x40\x78\x40\x78\x40\x78\x40\x78\x40\x78\x40\x78\x40\x78\xa8\xf5\x07\x02\x07\x02\x07\x02\xd7\x31\x02\xf7\xc1\x51\x3a\xe7\x59\xeb\x6f\x83\x39\x9d\x2d\x3d\xe1\x1f\xee\x63\xbf\xdc\x5f\x53\x21\x30\xe6\x51\x99\xce\x2f\xa3\xc2\x04\x3f\xb6\xa2\x6a\x60\x87\x13\x28\xa4\xaf\xd3\xa2\xa4\x6b\xe7\x68\x4e\xd0\xb5\x29\x4a\xd2\xa9\xc6\xb2\x63\x34\x9f\x18\x23\xa0\x96\xdd\x4a\x30\x82\x3b\xc5\x4e\x6c\x22\x2f\x46\x82\xc2\x1f\xdc\x55\x33\x72\x63\xde\x75\xee\xfc\x06\xef\x90\x79\xf8\x56\x8e\x5f\xea\x49\x5a\xa2\x27\xaa\x32\x06\x6d\x7e\x00\x11\x0f\x8e\x64\x41\x4d\x26\x0b\xfa\x6c\x9f\x4c\x16\x94\x10\xc9\x82\xec\x9c\x3e\x6d\x35\x2b\xed\xb0\x61\x96\xa9\xea\x80\x2d\x0a\x2a\xaa\xf7\xa9\x3d\x35\xa6\x2a\x62\xd7\xd6\xf3\xb3\x4e\x07\x6b\x93\xf5\x74\xdc\x38\xa1\xf4\x1e\x4a\xef\xdd\xba\x19\x22\x10\x1a\x8c\xd0\x60\x84\x06\xb7\xad\xf4\xde\x73\x7d\x2d\x8e\x8f\x5c\x94\xc9\x00\xf9\x22\xc1\x49\x06\xd8\xe6\x98\xcb\xe0\xa2\x7c\x1d\x58\x5f\xa4\x7e\x69\x77\xcd\x02\xe2\x70\x40\xc1\x3e\xbf\x65\xc5\x01\x51\x11\x6a\xab\x56\x15\xa8\xe7\xd7\x6b\x6b\x19\xd4\xf3\x43\x3d\xbf\x2d\xae\xe7\xb7\x35\x88\xac\x13\xfb\xce\xc0\xba\x7f\xd3\x94\x62\xa7\xa3\x27\x6d\xd5\xf3\x5e\xb7\xea\xd9\xe7\x12\xb7\x44\x0d\xc0\xbf\x0b\xd1\x7d\x12\x15\x1b\x99\x2b\x6a\x76\x4d\x8c\xa6\x1d\x6f\x3e\xce\x7e\x2f\xc4\x3e\x1f\xa2\x97\x3a\x3f\xf2\x6f\xea\x3d\x39\xb5\x5c\xe9\x90\x95\xa8\x3b\x76\x5f\x4e\x2d\x2f\xd9\x87\x2e\x27\x92\x0b\x73\x96\xfe\xa7\x75\xba\xcc\xd6\x85\xf9\x97\xe8\x49\x39\xe1\xf9\xd7\x8e\x4f\xf8\x59\x9a\xa6\x54\x73\xd1\xfc\xd6\x73\x36\x92\x4a\x9e\xfd\x56\x1f\xed\x1a\x37\x67\xe8\x38\xfb\x74\x1f\xfb\xd5\x3e\xda\x61\xfe\x77\xf8\xee\x9c\x6a\xc9\x96\xb2\xb6\xcf\x2c\xf6\xb2\x9c\x5a\x9e\xd2\xb2\xb6\x83\x2a\xa0\x13\xf8\x5f\xd2\xaf\xdb\x90\xa2\xb4\xf2\xf9\xcc\xeb\xc6\xe7\x8a\xab\x5a\xe0\xf3\x7c\xe8\x5e\x3a\x64\x49\x7e\x7d\x7c\x0c\x86\xaa\xaf\xe7\xed\x3f\xbe\x69\xdc\x10\xeb\x4a\xf6\x3f\x5e\xc9\xbe\x11\x72\x14\xc1\x8f\x08\x9f\x82\xfc\xcd\x7a\xfd\x9c\xb5\xda\x92\x6c\x23\xf6\x30\x3f\xaa\xba\xae\xb2\xf9\xa3\x5c\xaf\xb6\xd8\x79\xb0\x4c\xc7\x65\x47\x1e\xa4\x09\xd1\x91\xa3\x14\xa3\xe1\x40\x69\xae\x79\x4b\x81\x26\x70\x2e\xd8\x04\x0e\xb2\x47\xaa\xd5\xb7\x56\xeb\x0d\x12\xb6\x9c\x5a\xa6\xf0\x47\x76\x3a\x7d\x3d\x64\x79\x01\x82\xba\xfb\x11\xf3\xc0\x0e\xf6\x78\x8a\xcf\xd8\xc9\xaa\xf5\xe7\x06\xba\x1c\xeb\x4d\x70\xfd\x26\xb9\xfe\x8f\xf7\x6d\xee\x65\x3f\x2d\x9d\x02\xc7\xe8\x88\xe3\x14\x68\xad\xb9\xa8\x4f\xd1\x1b\xb4\x26\xf5\x74\xfd\xd6\x8d\x54\x54\xbe\xdf\x5f\xb3\x91\x0e\xb2\x1b\x51\xcf\xfa\xf7\x6d\xb2\x1a\xd8\x31\xf7\x9a\x05\xc3\x8e\x19\x3b\xe6\x2d\xde\x31\xb7\x79\x5d\x18\xb4\x6d\x6d\xe1\xc2\x31\x7d\x90\x26\xd8\xfe\x68\xdc\xde\xbb\xde\xe5\xde\x01\x9b\x67\xdc\x12\x3b\xde\xef\x84\xe9\x98\x29\x8e\x2a\xe5\x75\x35\x97\x17\x16\xbf\x22\xcd\xda\xb8\x19\x07\xe5\xb1\x5f\xf9\x8f\x61\xf6\x8b\xfd\xc4\xaa\x4e\x35\x97\xd3\x75\xb6\x2e\xc9\x85\x39\xeb\xb3\xf8\xa8\xd0\x42\x55\x36\x20\xb6\xca\xed\xdc\xc1\xdc\xa4\xd7\xca\x99\xfa\x14\x3d\x29\x66\xea\x05\x7a\x9c\xce\x79\xce\x54\xde\xc6\x98\x92\xcb\xe9\x6a\x4e\x29\x6b\xba\xf3\x7d\xaa\xee\x2f\xb9\xf1\xdd\xfc\xaa\x45\x48\xa1\x5e\x17\x3c\x95\x4f\xb0\xe3\x16\x06\xf2\x1c\xb9\x8a\x09\x2d\xa7\xba\xeb\xfe\xc2\x9f\xda\xe5\x39\x6e\xb1\xa0\x6d\x90\x6b\xe8\x86\x2d\x25\x54\x67\x47\x2f\xa5\xd0\x65\xba\x54\xb5\xb6\x68\xed\xf0\x61\xb9\x81\x0d\x53\x93\x1b\xa6\x2f\xf7\xb5\xdd\xb6\x64\xe4\x9e\xea\x19\x7a\xda\xd9\x53\x75\x91\x01\x13\xdb\xae\xb6\x1b\xb0\xd4\x27\x77\x7b\x1a\xb0\x03\x0d\x6f\xcc\x5c\x96\x6c\x50\xba\x37\x3b\x67\xc7\xb0\x3f\xeb\x35\x83\x89\xfd\x19\xf6\x67\x5b\xbc\x3f\xdb\xfa\x55\x6f\xdb\xbf\x0a\xc1\xbe\xcd\x23\x74\x88\x1d\x88\x26\xec\x3d\xda\x2b\xdc\x3b\x3b\xa7\xa5\xf6\x67\x72\x62\xff\xec\x5e\x3a\x2a\xb7\x5e\x2b\x22\x85\xb5\xbf\xb7\xe8\xaa\xb6\x52\xbd\xf3\xfa\xdc\x2b\xd9\x9f\x84\x68\xa7\x38\x53\x28\x83\xeb\xee\xb7\xd2\xda\x4a\xec\x41\x7e\x44\x8a\x1f\xef\xe6\x8f\x69\x6d\xa5\x2d\x3b\xac\x25\x7f\xd5\xf6\x51\x39\x09\x13\x34\x2e\x26\xe1\x08\x0d\xd1\xa3\xbe\x90\x40\x3c\x20\x9f\x65\x69\x6d\xa5\xc1\xbd\xd3\xd9\xe0\x49\xf6\x28\xdb\x6b\x4e\x32\xd1\xbe\x39\x8f\xd2\xda\x4a\x65\xb0\xc8\x4f\xee\x74\xf5\xf0\xde\xa0\x9d\x11\xef\xe4\x87\xcd\x83\x3a\xd5\xcf\xa9\xe3\x74\x8c\x8e\x54\x7d\xc7\x1b\xed\x4f\x7c\xb4\xb1\xcb\x69\x72\x97\xf3\xce\xbe\xe6\x5f\x63\x3a\x21\xf7\x2f\x87\xe9\xa0\xb3\x7f\xd9\xc0\xe9\x41\x1f\x99\x96\xbd\xff\x01\x7b\x9c\xd4\x97\x5f\xe2\x32\x0f\xa3\x0d\xef\x3b\xb8\x9d\x78\x48\x7c\x8d\x3a\x61\x25\xb0\xd3\xe8\x35\xa3\x85\x9d\x06\x76\x1a\x5b\xbc\xd3\x68\xe7\xea\x2f\xd0\x0d\xd4\x32\xfb\x9f\xde\x4f\x71\x36\x1a\x8d\xd9\x7b\x85\x3b\xdc\x7b\x85\xb4\xb6\x72\x4b\x78\x80\xbe\x1e\xa2\x7b\xe4\x36\x44\x66\x1e\x74\xeb\x1d\x5f\x08\xb1\xe7\x43\xb4\x4b\xfe\x50\x57\xeb\xc8\x7f\x10\xd5\x56\x0c\xe8\x1c\xfd\x74\x81\x3f\xb8\x8f\x4e\xc8\x9e\x5e\x2d\x68\x37\xb8\xed\xd3\xb5\x42\xdc\xce\x32\x58\x5d\xd8\x88\x1f\x64\x64\xae\xa8\xd7\x15\x3b\xf5\xc0\x67\x62\xec\xe7\xfb\xe9\x7e\xd7\xe9\x49\xeb\x6c\xbb\x9a\xd1\x03\x1e\x89\x08\x66\x0b\xda\x8d\x25\xd1\x54\x6c\x84\xff\x3c\xeb\xd1\x80\x59\x32\xc8\x39\xb4\xc5\x3b\x42\x7f\x3a\x40\x69\x39\x1e\x53\x94\x14\xe3\x21\xf6\x34\xbe\x36\xc1\xdd\x77\xe6\x33\xc7\x9d\x9b\x6e\x70\x8f\x78\x2d\xd8\x46\x9c\x65\xb3\xa6\x8d\xa8\x37\x5a\xa6\xe9\x70\x6e\xa0\xb6\x98\x4f\xf8\x5f\xed\x0a\x18\xb2\x88\x77\x06\x02\xd7\xa8\x8d\x9a\x47\x6c\xc1\xc0\xa5\x1e\xa7\x73\x34\x57\xb5\x80\x6b\x7e\x80\xb0\x7e\xc3\xa6\xb3\xc9\x4d\xe7\x2f\xf5\xb5\xd2\x52\xd0\x05\xb9\x0d\x3d\x4b\xb3\xce\x36\x74\x53\x0d\x6e\xd2\x65\xd6\x49\x9b\x14\xfb\xf4\x9e\x00\x9b\x74\x97\x9d\x6a\xc0\x65\x86\xf6\xc9\x3f\x6e\x85\x15\x42\x52\x01\x24\x15\x40\x52\x01\x24\x15\x40\x52\x01\x24\x15\x40\x52\x01\xef\xa4\x02\x8d\xad\x20\x82\x57\x07\x8d\xac\x31\x52\x1f\xdb\x1d\xb0\x82\x18\x09\xc8\x35\xe0\x5a\x57\xc4\x04\xba\xe9\xf0\xb2\x02\x64\xbc\xd7\x16\x33\x20\xe3\x20\xe3\x5b\x4c\xc6\xd5\x96\x6e\x61\x83\x3e\x19\x9d\xdc\x52\x6e\x52\x7a\xe3\x34\xdd\x01\xe9\xcd\x37\x07\xe9\x4e\x2b\x42\x5b\x2d\x66\x4b\x5a\xbe\x58\x36\xd8\xef\x0f\xb2\xe7\x5d\xf5\x98\x86\xea\xd7\x63\x9a\xb1\xce\x8b\xed\x75\x6a\x31\xd9\x7f\xdc\x6e\x75\x98\x9e\xf1\xa7\xb3\x49\x39\x61\x27\xe9\xa8\x98\xb0\x13\xb4\x9f\xe2\x81\x41\x3d\x76\x47\xb8\x6b\x30\x9d\x0b\x9e\x8e\xc3\x6c\xb0\x3a\x40\xc7\x6e\xaa\xd2\x39\x1f\x50\xd0\x09\xf5\x97\x50\x7f\x09\xf5\x97\x50\x7f\x09\xf5\x97\x50\x7f\x09\xf5\x97\x50\x7f\x09\xf5\x97\x50\x7f\x09\xf5\x97\x50\x7f\x09\xf5\x97\x50\x7f\x09\xf5\x97\x50\x7f\x09\xf5\x97\x22\x1d\xaa\xbf\xf4\xfe\x11\x3a\x6f\x26\xd5\x2c\x6b\xba\x92\x53\xab\x85\x8e\xe2\x14\xbf\xd8\xb7\x0c\xb7\xc1\xe2\xb4\x8c\x52\x52\x32\xf9\x72\x5e\x35\xd8\xef\x0d\xb3\x3f\x1f\xa0\x3b\xcc\x1f\x6c\x8f\xd3\x3f\x31\x3f\x7b\xc5\x6c\x7e\x3d\x9f\x5d\x53\x0a\xee\x0e\x51\xec\x25\xe6\xd4\xd2\xdc\x92\x3c\x73\x4a\x36\x79\x33\xee\x72\x95\x4f\xda\x8b\x90\x21\xd1\xdc\x90\xb3\xc3\xae\xd8\x32\x69\x25\xd5\x56\x40\x18\x65\x55\xc9\xc6\x63\x07\xc4\x09\x66\xdb\xa6\xe7\xca\x09\xee\xa8\xbd\xec\x7c\xde\x28\x77\x3b\x18\xbc\x46\x0b\x92\xff\xcd\xd1\x19\xc1\xff\x92\x74\x8a\x4e\x34\xe1\xa3\x12\xb3\x54\x48\x84\x03\xb1\xf5\x5e\x6f\xb4\xb7\x9b\x91\xb8\x31\x59\xb0\x5d\x0d\x86\x89\x29\x76\x5a\x22\xc4\xda\xae\xf7\x75\x7a\x56\xce\x51\x60\x44\x60\x44\x60\x44\x60\x44\x60\x44\x60\x44\x60\x44\x60\x44\x60\x44\x60\x44\x60\x44\x60\x44\x60\xc4\x4e\x63\xc4\xed\x17\x59\x0b\x50\x09\x50\x09\x50\xb9\x8d\x40\xe5\x27\x47\xe8\xa0\xa5\x0a\xac\xcb\x24\xab\xb2\x21\xb3\xef\x0d\xb3\x2f\x0c\x38\xc2\xc1\x8f\xf6\x99\xbd\xee\x82\x8f\x45\xcb\x52\x5a\xf2\x41\x33\xcf\x58\x4b\xc8\xe3\xa8\xb3\x2a\x12\x2b\x3f\xf3\x1b\xca\x17\x56\xce\x72\x6f\xa8\x62\x85\xed\x6a\x3b\x1e\xbb\xff\x46\x9d\xd2\x04\x00\x94\x6d\x2d\x51\xe3\xaf\x9a\xbd\x21\x55\xb3\x00\x90\x00\x90\x00\x90\x00\x90\x00\x90\x00\x90\x00\x90\x00\x90\x00\x90\x00\x90\x00\x90\x00\x90\x00\x90\x9d\x05\x90\xbd\x52\x12\x0a\xd8\x12\xd8\x12\xd8\x72\x1b\x61\xcb\xb7\xf6\x53\x54\xea\x2b\x33\xaa\x6e\xb6\xa2\x1a\xd5\x22\x4b\x26\xd2\x38\xd1\xdd\xee\x63\x6c\xe1\xa4\x6f\x5a\xcf\x68\x4e\x2d\x4f\xb9\xce\x30\x45\x8d\xc8\xf0\xe9\x9d\xe1\xf3\x3d\x23\xb4\xbf\x21\x82\x5c\xd2\xb2\x36\x3d\xfe\xd6\x30\x7b\xc1\x45\x8f\x3f\xdc\x08\x3d\x5e\xd0\xb2\x5d\x41\x8e\x5f\xe9\x49\x8e\x17\xb4\x2c\xa8\xf1\x46\x0a\x58\x4a\xd4\x7b\x75\x26\x18\x2b\x47\x59\xa4\x1a\x2b\x2f\x68\xd9\xca\xb4\xc5\x20\xc6\x20\xc6\x20\xc6\x20\xc6\x20\xc6\x20\xc6\x20\xc6\x20\xc6\x20\xc6\x20\xc6\x20\xc6\x20\xc6\x20\xc6\x9d\x25\xc6\x75\xcb\xc7\x2c\x68\x59\xd0\x62\xd0\x62\xd0\x62\xd0\xe2\x8e\xd2\xe2\x8f\x3d\x40\x69\x49\x8b\x95\x52\x5e\x7d\x43\x59\x2d\x8a\xd1\xaa\xc6\xc5\x99\x35\xa3\xac\x5d\xb7\x1a\x74\x53\xb1\xca\x3a\xb4\x5f\xb9\x9f\xbd\x79\x80\x5e\x5e\xd1\x96\x8d\x95\x0f\xd5\x2d\x4a\x3b\x25\x2e\x61\xed\x4b\xa7\xed\x4b\xc4\x0e\xf3\xd3\x92\xee\x16\x4d\xec\xec\x77\x46\x5b\x2a\xd8\xbe\xb3\x8f\x6e\x48\x44\x58\xa2\xa2\x40\x84\x57\x68\x95\xb2\x7e\x88\xd0\xb9\xd9\x31\x27\x79\xaa\xbb\x6c\xb2\xab\xaf\xad\x4c\xae\x7e\xcf\xb3\x89\x4a\x22\xa2\xba\xd1\xeb\x83\x39\xe2\x79\x36\xef\x94\x59\xae\x99\x06\x26\x59\xf4\xbd\xbf\xda\x1a\x47\xbf\x4a\x7e\x93\xe0\x68\x50\xdd\x5c\xdf\x79\x70\xcc\x3c\x73\xab\xa7\x42\xea\x8d\xf4\x06\x5a\xaf\x4a\xd4\xdd\xa1\xa9\x80\xa4\xde\x28\x97\xd4\x64\xb9\xa4\x77\x84\xb6\xce\x80\xfd\x9f\xb2\xb6\xd2\x1a\x19\x4e\x6d\xa5\xee\x31\x9f\x9d\xb7\x8f\x41\xa5\x84\xff\xe9\x4b\xfd\xcc\x67\xb2\xe1\xba\xc2\xbe\x76\xf4\x88\xc8\xbf\xbd\x95\x56\x14\xa5\x16\x7a\xcd\x2a\xa3\xd4\x02\x4a\x2d\x6c\x71\xa9\x85\x6e\x5e\xc0\x07\x16\x31\xee\xfc\x27\x2a\x3d\x43\x53\x2c\x19\x3d\x65\xc3\xa7\x47\xdc\xb8\xca\xaf\x9d\x0e\x14\x6b\xf8\xb3\x10\xbd\x52\xee\x58\x8b\x6a\xf9\x86\xa6\x73\x03\x60\x6d\x57\xd9\x67\x43\xec\xd7\x43\x44\xce\x2f\xe1\x70\x4e\x14\xcd\x5a\xd5\xf4\xeb\xf6\xc6\x5f\x91\xa2\x89\xd8\xcb\x73\x6a\xf9\xbc\x7d\x68\x72\x61\xee\x0c\xff\x73\x17\x2a\x98\xf2\x74\x5e\x4e\xdd\x33\x34\x23\xa6\xee\x29\x3a\x41\xc7\x9b\x53\x30\x89\x67\x0c\x94\x2e\xfd\xc8\x7d\x34\x27\x7b\x59\x16\xe0\x1a\x5f\x4f\xd4\x11\x2f\x65\xf3\x86\xbe\x26\x4c\xd3\xca\x5a\x36\xa7\x96\xab\xb1\xc0\x67\xc2\xec\x7d\xfd\xb4\x4b\x36\x75\x79\x3d\x11\xde\x5f\x17\x05\x2c\x68\xd9\x69\xbb\xc5\x94\x68\x31\x36\xce\xcf\x90\xa5\xbc\xaa\xb4\x45\xd5\x87\xb6\x65\xfb\x9f\xa5\x39\x39\x02\x29\x3a\x2d\x46\x60\x92\x8e\xd2\x61\xdf\xba\x17\x66\xa1\x35\xbe\x40\xa8\xbd\xc1\x40\x73\xf0\xea\xe0\xb7\xfd\x30\x3b\x68\xbe\xed\xf2\x52\x8e\xf8\xa7\xe6\x6a\x15\xf1\xa5\xf5\x51\x41\xf8\xcd\xbb\xdc\xa3\x74\x20\x68\xaf\xee\x35\x50\x13\xe6\x49\x5b\x36\x56\xa9\x79\x4a\xd3\xd9\xaa\xd5\x5d\xd3\x83\x85\xb5\x1d\x76\xdc\x4d\xee\xb8\x3f\xd9\xd7\x4a\xa3\x71\x5e\xee\xa1\xf9\x17\xc0\xde\x43\xb7\xd3\x08\x05\x94\x27\x6e\xcc\x46\xb9\x6d\x4f\xa3\xf6\x2a\xf5\xde\xdd\x6e\x23\xf4\x58\xc3\x3b\x5e\x2f\x6b\xb4\x5f\x2c\xa0\xb6\xc4\x16\x61\x97\xdb\x6b\x96\x10\xbb\x5c\xec\x72\xb7\x78\x97\xdb\xd1\x75\x6a\xe0\xb6\xb5\x7d\x0b\xd9\xf4\x69\x3a\xc9\x1e\x8b\x4e\xda\xdb\xd3\x87\xaa\xd4\x14\xd5\x67\xdf\x0a\xea\x0a\xf6\xf9\x18\x1d\x97\xbb\x32\x7d\x45\xc9\xc4\xad\x11\x11\xaf\x51\x8d\xcb\xb6\xb0\x66\x94\xb9\x6d\x2c\x38\x99\x89\xde\x11\x63\x3f\xdd\x4f\xf7\xf2\x93\x93\xee\x73\x6d\xbc\xfc\xa0\xd8\x97\x55\x81\x64\xd9\xd0\xa2\x56\x50\x63\x43\xfc\xf7\xc5\xea\xd3\x2d\x7c\xec\x1c\xd8\xe2\xdd\xd7\x65\x9a\x96\xb3\xfa\x04\x1d\x17\xb3\xfa\x10\x1d\xa0\x84\xef\xac\x16\x9d\x63\x43\x19\xe7\xae\x36\xeb\x48\xcd\x07\x4f\xe7\x59\x36\xed\x97\x4a\xdc\x77\xc8\x2c\x36\xe3\xba\xd1\xf0\x3f\xec\xac\x37\x4c\x0f\x5b\x1b\x33\xff\x91\x8a\x99\x87\x74\x7c\xb0\x52\x7c\x85\x3a\x55\xb5\xec\x68\x66\xb4\xb0\xde\xc0\xce\xab\xc9\x9d\xd7\x87\xfb\x5a\x64\x30\xe6\xe4\xa6\x8b\x7f\x4c\xed\x4d\x57\xc7\x6d\x8f\xd8\x6f\x35\x66\x7b\x9a\x30\x35\x35\xe6\x2a\xf6\x2b\x7b\xea\xd9\x9e\xbb\xcd\xaa\xfe\x4a\x85\xb9\x19\x91\x7f\xed\xbc\xb5\xb9\x48\x8b\xb4\xe0\xb6\x36\xb1\x14\x9d\x6e\x62\x8b\x33\x2d\xee\xff\x82\x58\x31\x18\x30\x3d\x41\xa6\xe7\x6b\x21\x12\x11\xc4\xe1\x2f\xda\x1d\xf4\xf1\xd0\x45\xb7\xc0\x2e\x5f\x8c\x18\x52\x24\x17\x59\x51\x57\x65\x00\xa0\xad\xb6\x75\xa2\xcc\x4c\x81\xb3\xb8\x45\x97\x69\xe1\x8b\xde\xa2\x9a\x53\x44\x47\x98\x1a\x3b\xb7\x09\x92\xba\x7b\x7b\x08\xcc\x39\x99\xbf\x7e\x5d\xcd\x72\x23\x57\xb8\xe9\x44\xe6\x39\xd1\x58\xf9\xc2\xa8\x29\xbd\x13\xe2\xc3\x48\x4e\xe7\xdf\xd0\x92\xaa\xe7\xb5\xac\x2d\x21\x74\x3e\xa7\x22\xe8\xd1\xea\x8d\x35\x83\xdf\xa4\x5b\xb6\xa8\xf0\x33\xad\x07\x32\x2f\xb2\x2a\x15\xe8\x56\x13\x71\x79\x9f\x72\x8f\xe0\x71\x8f\x51\x26\x6e\x61\x41\xdc\x81\x87\xa4\x30\xf5\x3b\x21\x7a\x31\xc4\x5e\x08\x85\x9f\xb7\xbb\xf9\x3d\xa1\x69\x57\xa0\x6f\xa9\xa0\x2a\x86\x6a\x87\x0b\x2c\xe8\x5a\x49\xc9\x89\x01\x90\xcc\xa3\x22\x62\xd0\x7a\x16\x27\x52\x98\x8f\x52\x22\x7e\x24\x1e\x59\x92\x03\x22\x7b\xa7\xa4\x16\xb3\x6a\xb1\xec\x44\x9e\xa8\x11\x4d\x2f\x5d\x51\x8a\x56\x04\xa3\xbe\xa6\x8e\xaf\x2a\x05\x2b\x52\x2b\x2a\x7f\x8d\x46\x56\xf3\x45\xa5\x90\x7f\xa3\xb5\x17\x5a\x51\x23\x4a\x56\x68\x53\xb5\x71\x29\x55\xcc\x3a\x61\x40\xb2\xf1\x21\xc3\x39\x49\xc6\x46\xc5\x23\x33\x79\x31\x9d\x5d\x37\xae\xe9\xb5\x4f\xe6\xe8\x81\xcb\x32\x34\x4b\xbc\x6c\x5a\xf9\x4a\x3c\xba\x47\xde\xcf\xb4\xf5\x20\x15\xa1\x90\xa9\xb7\x0e\xd0\xb3\x03\xec\xcd\x03\xe1\xef\xdb\xbb\xf4\x2f\xf4\x3f\x65\xc6\x4e\xf0\x57\xea\x8a\x76\x23\x92\x53\xf4\x15\x25\xc7\xf7\x70\x85\x82\x2a\x2c\xb1\xf3\x5e\xa8\xfa\xaa\xa6\x5f\xe7\x7d\xe1\x79\xa7\x17\xaa\x2e\xee\x7f\xa3\x62\x3e\x5b\x93\x51\x6e\x7f\xf8\x3c\xcd\xaa\x99\x7c\xd6\x09\x82\x12\xf1\x34\x72\x0f\x69\xf5\xae\x21\x7c\x5a\xe2\x57\x2b\x62\x26\xee\xea\x46\x2b\xd6\xc2\x16\xe1\xda\x51\xb0\x95\x17\x8b\x47\x92\x99\x8c\x5a\x92\xdf\x63\xb7\x71\x18\x92\xcf\x30\x14\x19\x33\x07\xbe\x72\x62\x18\xc7\x23\x43\x29\x25\x73\x8d\x7f\x6d\x8a\x59\x7e\x94\x08\x3f\x15\x07\x55\x75\x9c\x0c\x70\x32\xa7\x7e\x65\x23\xd6\x13\xac\xd8\x2d\x1d\x8f\x0c\xcd\x6a\xba\xea\x6a\x36\x92\x51\x8c\x8c\x92\xe5\x4f\x6f\xf6\x8f\x0c\x38\x16\xed\x19\xd2\x7a\xd5\x34\xb8\x6a\xb7\x11\x8f\xde\x59\xaa\x9e\x37\xee\x75\xc2\x73\x7d\x34\x2f\xd7\x09\x33\x34\x25\xd6\x09\x7c\xbd\x70\xac\x89\xcf\x87\x84\x87\xb4\xc8\x5b\x9b\x60\xe7\x68\x8e\x76\xca\xbe\x55\xb3\x9b\x6d\x33\x68\xe1\xd0\xd8\xca\xa0\x81\x0f\x7f\xe0\xe2\xe1\x6a\xc4\x7b\x8d\xb2\x8b\xed\x90\x23\x42\xa9\xf7\xed\xae\xb7\x76\x88\xd5\xb0\x5c\xff\x0d\xcc\xb0\x60\x08\x1d\x5d\x50\x80\xd8\xf6\xda\x32\x06\xc4\x16\xc4\x76\x8b\x89\x6d\xa7\xd8\x56\x20\xac\xed\xdc\x0e\x33\x7d\x94\x0e\xb3\x83\xd1\x09\x1b\xbd\xde\x53\xa1\x2d\x72\x4e\xed\x80\x9c\xe8\x87\x47\x68\xdc\xca\xd1\xe3\x23\x70\x51\x8b\xd9\x92\x96\x2f\xda\xaa\x16\xf6\xc5\x61\xf6\xd5\x90\x93\xa2\xe7\x7e\x0f\x68\x3a\x63\x9d\x13\x7b\x80\xff\x5a\x9d\x12\xc7\xfe\xb9\xc5\xa0\xf4\x55\x74\x42\x4e\xa6\xc3\x74\x50\x4c\xa6\x38\x8d\x52\xcc\x77\x32\xf1\x27\xe0\x5f\x0b\xfb\x76\x36\x4b\x48\xcf\x05\xcf\xa1\x61\x36\x28\xa7\x88\x73\x51\x0f\x2f\x71\x34\x4a\xe1\x0f\xee\x74\xfa\xf8\x21\x6f\xe2\xe9\x74\x73\xc4\x3c\xa0\x43\x3d\x9d\x3a\x4d\x27\xe9\xb1\xaa\x4f\xf5\x86\xba\x1a\x1f\x67\xe0\xcd\x26\xf1\xe6\x4f\xf6\x6d\xf6\x35\x4f\x49\xae\x79\x9c\x8e\x39\x5c\xb3\x73\xa6\x42\x00\xcd\xc6\x4c\x45\x75\x76\x2b\x6f\xa3\x11\xfb\xde\x1d\x8e\xa9\xd8\x63\xee\x36\x1d\xdb\xf0\x90\xfc\x4b\xa7\x4c\x03\x90\x24\x90\x24\x90\x24\x90\x24\x90\x24\x90\x64\x6f\x22\xc9\x40\x4e\xd8\xd2\xaf\x7f\xea\x2b\x2f\x71\xbe\xfe\xc3\x01\x88\xd1\x59\x15\x3c\x58\xf2\x4a\x56\xda\x86\x45\x01\xb0\x62\xaf\x2d\x45\x80\x15\x81\x15\xb7\x18\x2b\xb6\x9d\x04\x05\xf2\xc4\x56\x9a\xf8\xf4\x61\x3a\xc8\x26\xa2\xfb\x6d\x62\xf8\x72\x37\x31\xb4\xcf\xb9\x25\x44\x9e\x3f\x1a\x93\x44\xd2\x15\x7a\x27\x15\x9d\x25\x2d\x6b\xa8\x99\x35\x3d\x5f\xbe\x29\x7e\xc9\x3b\xc2\xce\x3f\x1d\x61\xff\xa6\x9f\x5e\x66\x47\x4d\x48\x77\xdb\x23\x1e\x60\x72\x41\xac\xea\x45\x23\x72\xf1\x11\x8b\xba\x23\xeb\xc4\x89\x35\xc7\xb4\x98\x52\xe6\x36\xb4\xc4\x71\x44\xca\x12\x7a\xd7\xdc\xdd\x66\xa1\xe5\x33\xc1\x13\xf5\x18\x3b\xe2\xa3\x52\xae\xba\x95\xda\x54\x38\xdf\xdd\x59\x33\x2e\x43\xde\x30\xb3\x76\x68\x1e\xad\x8a\xa5\xeb\xc4\xe8\xa4\x2e\xd0\xe3\x74\xae\x6a\xb5\xb0\x99\xe1\xc1\x72\x01\xa0\xb3\x49\xd0\xf9\x1f\x37\xb6\x19\x0a\xb6\x14\x4f\x48\xee\x99\xa6\xb3\x0e\xf7\xdc\x32\xe3\x23\x30\x68\x63\xc6\xc7\x4f\xab\x11\x60\x8d\x62\xef\xdd\x53\x63\x7c\xc2\xb6\x7e\xb3\xd6\xde\x3c\x22\x7f\xeb\xa8\xb9\x01\x2d\x05\x2d\x05\x2d\x05\x2d\x05\x2d\x05\x2d\x6d\x37\x2d\x0d\xfe\x9a\x6f\x9c\x96\x6e\x7e\x85\x10\x4c\x4b\xdb\xba\x43\x49\xfd\xeb\xdd\x35\x8b\x84\xfd\x01\x14\xb5\x76\xe9\xb0\xb7\x22\xd0\xbe\xdd\x2b\x07\x20\xd5\x5e\x5b\xaf\x00\xa9\x02\xa9\x6e\x31\x52\xcd\xf9\x5a\xf2\xd6\x7e\x84\xac\xef\x42\x5b\xad\x7e\x20\xbf\x4d\x9f\xa4\xc7\xd8\x64\xf4\xa8\x8d\x4e\x1f\xa8\x8a\xb0\xaf\xbc\x42\x07\xa4\x9a\x7f\x3b\x42\xf3\xc1\xd1\xef\xf5\x2b\x2d\x56\x84\xc3\xbf\x67\x84\x7d\x7f\x80\xee\xf6\x8a\x57\x08\xff\x6c\x23\x75\x17\x17\x35\x3e\xc3\xba\xa0\xf0\xe2\xe0\x0d\x9f\x60\x09\xc7\xb1\xd9\xd2\x50\x89\x5e\x2a\xc3\x78\x29\xf8\x35\x9c\x64\x47\x1b\xd4\x45\x57\x0b\xa2\x51\x9e\x11\xe5\x19\x51\x9e\x11\xe5\x19\x51\x9e\x31\xa8\x5b\x50\x9e\x11\xe5\x19\x51\x9e\x11\xe5\x19\x51\x9e\x11\xe5\x19\x51\x9e\x11\xe5\x19\x5b\x5d\x9e\x31\x41\xe3\x6c\x2c\xba\xcf\xc6\x1d\x7b\xdc\xb8\xc3\x3b\x18\x75\xfb\x89\xcb\x50\x9f\x11\xf5\x19\x51\x9f\x71\x1b\xd5\x67\x7c\x7e\x84\x8e\x5a\xe1\xe9\x75\xc1\x66\x49\xcb\x96\xd5\xeb\x25\xb1\xef\xb1\xf8\xe6\xbf\x1c\x61\x7f\x36\xe0\x84\x46\x7c\xa2\x11\xa4\xb9\xa0\x65\x2f\x9a\xed\x74\x05\xd9\x94\x4b\x84\xea\x28\x0d\xd7\x5d\x82\x69\x36\xc1\x34\x1f\x0f\x66\x9a\x31\x36\x5c\xad\xcd\x76\xf5\x3a\x18\x26\x18\x26\x18\x26\x18\x26\x18\x26\x18\x26\x18\x26\x18\x26\x18\x26\x18\x26\x18\x26\x18\x26\x18\xe6\x56\x32\xcc\xba\x79\xf5\x5c\xbb\x37\xa0\x4c\xa0\x4c\xa0\x4c\xa0\xcc\x8e\xa2\xcc\xef\x87\xe8\x21\x29\xdf\xcc\xe6\x8d\x8c\xc6\x0d\x72\x75\xcd\x22\xf6\xc5\x10\xfb\xc3\x10\xdd\x69\x1f\x60\x07\x24\xdc\x93\x53\xcb\x95\x2b\x01\xf9\x1c\xb1\x87\x72\x6a\x79\xda\x3a\xdc\x0c\x3c\x48\x2e\xcc\x59\x9b\xcf\x16\xa6\x71\x69\x59\x35\xdf\x12\x3d\x29\x89\xe0\x79\x9a\x17\x44\x70\x96\xa6\x29\xd5\x5c\x35\x5f\xeb\x39\xe7\xf3\x46\x20\x16\x64\x7f\x19\xa3\x73\x8d\x96\x8f\x6a\x44\x3f\xfb\x93\x31\xf6\xce\xba\xe5\xa4\xee\xf1\x48\x40\x60\x55\x27\xf2\xaf\x23\xd5\x26\xcd\x6a\x53\x19\x52\x2b\xd2\xed\xb6\xa2\x86\xd4\x4a\x30\x79\x3d\xc5\x4e\x34\xa9\x26\x35\x53\x0e\xfc\xb7\xba\xc5\xa3\xc2\xde\xd9\x07\xc4\xb8\x8c\x05\x54\x8d\x6a\xe7\xd0\x6c\x38\xa5\x6a\xcd\xd8\x20\x8a\x06\x99\x06\x3a\x94\x52\xb5\xd6\x2e\x6c\x3c\xa5\x6a\x2b\x6d\x8b\xc8\x25\xd0\x98\x6d\x69\xb6\x3e\x9d\xb8\xc3\xd8\x47\xeb\x16\x87\x7a\xa9\x9d\x5c\x40\x98\x93\xd1\xfa\x55\xa1\xda\x6a\x4d\x90\x57\x00\x79\x05\x90\x57\x00\x79\x05\x90\x57\x00\x79\x05\x7a\x33\x0b\x6b\x63\xcb\x81\x7a\x3b\x89\x56\x54\x84\xfa\x57\x75\x2b\x42\xed\x0d\x48\x34\x20\x96\x11\xfb\xea\x96\x82\x6a\xd7\x2a\x02\x39\x06\x7a\x6d\xed\x82\x1c\x03\xc8\x31\xb0\xbd\xd2\xb6\x6e\x7c\x0b\x19\x98\xb6\xb5\x03\x80\xaa\x37\xb4\xf6\xec\xff\xbe\x44\xfb\x24\x6e\x35\xca\x9a\xae\xe4\xd4\x6a\xc6\x6a\xfe\x39\x53\x50\x0c\x43\x35\xd8\x3b\x2f\xb1\x9f\x19\xa4\x3b\xcc\xbf\x3a\x45\x13\xa5\x5c\x43\x37\x1d\xcd\xd6\xda\xdb\x12\xec\x2e\xc9\xc3\xa7\x78\x23\xb1\x07\xf9\xb1\xe6\x5f\xcc\xcf\xa3\xfb\xf7\x2e\xd7\xca\xa6\xa0\x23\x85\x8e\x34\x05\x1d\x29\x74\xa4\xd0\x91\x42\x47\xda\x33\x3a\xd2\x54\xd7\xe8\x48\x5b\x7e\x27\x4d\xeb\x48\x53\xd0\x91\x42\x47\x0a\x1d\x29\x74\xa4\xd0\x91\x76\x5e\x47\x9a\xea\x69\x69\x65\x0a\xd2\xca\xf6\x49\x2b\x53\xdd\x2e\xad\x4c\x6d\x43\x69\xe5\x46\x0b\xea\x58\x38\xca\x02\x87\x6e\x44\xd4\x88\x90\xf0\xea\x83\xde\x10\x71\x07\xbb\x8d\x0f\x1d\x5d\xbd\x1c\xcc\x10\x1f\x63\x93\x26\x43\xac\x84\x63\x26\x38\x74\xdf\x52\x2d\x40\x8c\x7e\x64\x57\x2d\x27\x7b\xb9\xfc\x2c\x45\x94\x4a\x24\x66\x7e\xad\x3a\x00\xc5\x24\xc3\x3a\x47\x73\x74\xa6\xca\x6b\x73\x84\x0e\x35\x35\x18\x70\xd9\x40\xd0\xd6\xa4\xa0\xed\xcd\x21\x3a\x2b\x6d\x42\x92\x4e\x09\x9b\x70\x8c\x9a\x9d\x86\xf4\xb8\xd4\xb6\xcd\xd2\xb4\xa3\x6d\xdb\x44\x73\xe7\xa5\x33\xfc\x0c\xcd\xb8\x9c\xe1\x9b\x68\x2f\xc8\x5c\xb5\xdb\x1c\xd5\xb1\x87\x25\xcd\x28\x53\xec\xeb\xa3\xb5\xe6\xea\x61\x53\xfb\xe0\x12\x92\x68\xab\x95\xa6\x2b\x26\x0f\xa9\x34\x5d\x53\xf6\xf1\x6d\x34\x62\xed\x11\xcf\x81\xec\x82\xec\x82\xec\x82\xec\xf6\x0e\xd9\xc5\xfa\x2d\x60\xfd\xd6\x3d\xe8\x1b\xc2\xe5\x8e\x08\x97\xe1\x61\x80\x87\x01\x1e\x06\x78\x18\xe0\x61\xe8\x69\x0f\x03\xe2\x57\x10\xbf\x82\xf8\x95\x76\xc5\xaf\xc0\x81\x07\x07\x5e\xaf\x3a\xf0\x36\xe8\x0c\xdb\x7c\x98\x55\xdb\xe9\x72\xcc\x9b\x2e\xdf\xc5\xee\x94\xe6\xc2\x31\xe5\xd4\x7a\x11\xfb\xa7\x5f\x4b\x87\xac\x04\xd4\x7e\x39\x41\x54\x9b\x2a\xf3\x45\x95\xce\x6f\x47\x37\xd8\xb3\xaf\x65\x1f\x1e\x74\xb2\x4f\x1f\xa8\x2f\x63\x5f\x74\x1a\x99\xb2\x1b\x89\x8d\xf0\x93\xaa\x93\x3f\x7b\x1e\x0a\x69\x3b\xa4\xed\x90\xb6\xc3\x01\x02\x07\x08\x1c\x20\x70\x80\x74\x8d\x03\xa4\x7b\xf8\x3e\xc0\x33\xc0\x33\xc0\x33\xc0\x33\xc0\x73\x4f\x83\x67\x90\x31\x90\xb1\x1e\x25\x63\xdb\x52\xda\x7e\x45\x8a\x4f\xf7\x0b\xf1\x69\xe8\xc2\x39\x76\x92\x1e\xa3\x49\x5f\x9d\x68\x46\xd3\xd5\xf8\x7a\x22\xee\xc9\x88\x5a\xa2\x6d\x7f\x2a\x18\xf7\x1d\x64\x13\xd5\xa5\xb3\x3c\xef\xa7\xa2\x88\x56\xf4\x93\xbb\x1c\x58\xf6\xa0\xad\x65\xf7\xe6\x62\xfb\xe4\xef\x9d\x25\x63\x12\x64\xcd\x53\x9a\xce\x56\xe9\xdb\x8f\xd2\xe1\xe6\x46\x04\x02\x29\x08\xdc\x9b\x14\xb8\x3f\x1b\xa2\x39\x69\x19\x52\x74\x5a\x58\x86\x49\x6a\x7a\x1e\x4a\x49\x7a\x42\x48\xd2\x6d\x85\xfb\x66\xda\xbb\x20\x25\xee\x67\x69\xd6\x25\x71\xdf\x4c\x83\x41\x66\xab\x5d\x66\x29\x50\xdb\xfe\xad\x51\xc7\x6c\xc5\x3c\x35\xed\xde\x26\xec\x90\x3c\x56\x9a\x30\x47\xd4\xde\x59\x63\x06\x9d\x3b\x30\x2f\x30\x2f\x30\x2f\x30\x2f\x74\xee\xd0\xb9\x43\xe7\x0e\x9d\x3b\xdc\x0d\x70\x37\xc0\xdd\x00\x77\x03\xdc\x0d\xd0\xb9\x43\xe7\x0e\x9d\x3b\x74\xee\xf0\xe6\xc1\x9b\xd7\x85\xde\xbc\x8e\xeb\xdc\x1b\x23\xcc\x6e\x76\xdc\x18\x6d\xde\x90\xbe\x7d\x1b\xa6\x81\xff\xce\x30\x9d\x92\x69\xe0\xd5\x37\x94\xd5\xa2\x98\x9e\x76\x0a\x78\x31\x79\xfc\x84\xf5\xf9\x62\x4e\x57\x45\x6a\xf8\x4f\x0e\xb3\xcf\x0e\x10\x73\x1a\xb0\xd3\xc8\x18\xe6\x62\xb7\x98\xcd\xaf\xe7\xb3\x6b\x4a\xc1\xed\xc6\x55\xec\x8d\xe5\x9c\x6c\x2a\xee\x5a\x3a\x4c\xda\x8b\x8e\x21\xd1\xc6\x90\x03\x75\x2b\x28\x9d\x56\x52\x6d\x30\x60\x94\x55\x25\x1b\x8f\xed\x13\x27\xcc\xd8\x77\x53\x53\x89\xc5\xbc\xdc\x7c\xde\x28\x77\xb9\x30\x3f\x7d\x8d\x16\xe4\x6b\x34\x47\x67\xc4\x6b\x94\xa4\x53\x74\xa2\x89\xd7\x48\x38\xd2\x67\xf8\x3e\x30\xf0\x55\xda\xeb\x3d\xe3\x77\x33\x12\x37\x26\x1d\xcd\xcb\xc1\xef\xdb\x01\x96\x30\xdf\x31\x67\x66\x98\xef\x9a\x39\x02\x1e\xc5\x17\x10\x89\x80\x48\x84\x34\x5c\x54\x70\x51\xc1\x45\x05\x17\x55\xcf\xb8\xa8\xd2\x5d\xe3\x81\x69\xf9\x9d\x34\xed\x1a\x48\xc3\x35\x00\xd7\x00\x5c\x03\x70\x0d\xc0\x35\xd0\x79\xd7\xc0\x36\x44\x19\xe9\x9e\xc6\xad\x69\xe0\xd6\xf6\xe1\xd6\x74\xd7\xe3\xd6\x6d\x18\x3c\xc1\x9e\x1b\x96\x15\x28\xc7\xd7\x03\x48\xa3\x64\x3b\xec\x5f\x0e\xb3\x7f\x3f\xe0\xc8\x79\x5f\xdf\x18\x5b\x14\xcc\xab\x45\x64\xf1\x41\x71\x42\x75\x5c\x83\xb8\x02\x60\xa2\x37\x4c\x3c\x13\xcc\x09\x1f\x61\xd1\x6a\x16\x2f\xdb\xae\x50\x7a\x37\x42\x25\x41\x0f\x41\x0f\x41\x0f\x41\x0f\x41\x0f\x41\x0f\x41\x0f\x41\x0f\x41\x0f\x41\x0f\x41\x0f\x41\x0f\x41\x0f\x41\x0f\x41\x0f\x41\x0f\x41\x0f\x6f\x1d\x7a\xf8\xed\x61\x3a\x27\x85\x8b\x4a\x96\x2f\x96\xf3\x5a\x51\x57\x73\x79\xfe\x5e\x0a\x8c\x22\x73\x18\x3b\x6c\x51\xec\x4a\x15\xbe\x16\xba\xa1\xae\x5c\xd1\xb4\x6b\x15\xbb\x4b\x83\x7d\x60\x98\xfd\xc4\x6d\xf4\x4a\xcf\xc6\x2e\xaf\x27\xc2\xff\xa2\xaf\x31\xde\xb8\x6c\x5f\xe7\x29\x79\x9d\x29\xf7\x75\x5a\x04\x22\x4f\x8a\x13\x92\x5e\xf7\xba\x9c\xa8\x7f\x07\x00\x95\xde\xa0\xf2\x8d\xc1\xa0\xf2\x29\xf6\xa4\xc4\x93\xf5\x7b\x98\x3c\x54\xc6\xf5\xe6\x28\xd8\x26\xd8\x26\xd8\x26\xd8\x26\xd8\x26\xd8\x26\xd8\x26\xd8\x26\xd8\x26\xd8\x26\xd8\x26\xd8\x26\xd8\x66\x57\xb2\x4d\x90\x47\x90\x47\x90\xc7\x1e\x26\x8f\xef\x1d\xa6\xc3\x26\x79\xe4\x9b\x2e\x2f\xe6\xe8\x0e\x9f\x36\x77\xfd\xf9\x8c\x6a\xb0\xaf\x0f\xb1\x2f\x0e\xd0\x3d\x55\xe7\xd9\xe1\xd2\xeb\x8d\x21\xc6\xe4\xc2\xdc\x92\x6c\xb1\x45\x38\x71\x44\xe2\xc4\xca\xbb\x32\xc3\xa6\x9d\x8b\x81\x1c\x6e\x22\x5e\x5a\x0d\xc6\x8b\x29\x76\xda\xa2\x85\x9e\xf3\xaa\x36\x5a\x5a\xd2\x48\x67\x84\x00\x09\x01\x09\x01\x09\x01\x09\x01\x09\x01\x09\x01\x09\x01\x09\x01\x09\x01\x09\x01\x09\x01\x09\x01\x09\x01\x09\x01\x09\x01\x09\x01\x09\x3b\x06\x09\x7f\xf7\x3e\x9a\x91\x90\x30\xa3\xea\x66\x2b\xaa\xe1\x52\x25\xba\xff\x9c\xcf\x15\x45\x36\x45\x69\xa7\x65\xec\xf3\x9b\xc6\x0d\x91\xf9\x92\xbd\xf5\x3e\xf6\xf9\x7e\xba\xc3\x7d\xfc\xe5\xf5\x44\xf8\xb0\xae\x2a\xd9\x88\x3c\xc6\xea\x74\xe7\x0b\x3c\xe5\x1c\xbd\x24\x5b\x5f\x94\xad\xc7\x12\xfc\x3c\xd7\xcf\xc6\x72\xc2\xf7\x60\x99\x7b\xb3\xb5\x55\x8d\xd2\x25\x5f\xa0\x46\x4f\x4a\x76\x77\x9e\xe6\x05\xbb\x9b\xa5\x69\x4a\xf9\x57\xa4\x72\xf7\xeb\x7a\x22\xee\xfb\x14\x16\xa7\x2b\x04\x23\xb8\x39\x76\xc6\x44\x70\x1e\xa3\x66\xe2\x36\xdf\xeb\x54\xc6\x2b\x3f\xe0\x4d\x05\x6f\x67\x03\x39\xb5\x4c\xe1\xcf\xed\xaa\x1d\xd2\x63\xba\x5a\x2a\x28\x19\xb5\x89\x51\x3d\x68\x9e\xba\x85\x03\x9b\x7a\x15\x2d\xd3\xc5\xaa\xda\x7b\x2d\x19\x40\x14\x70\x41\x1d\xbe\x26\xeb\xf0\xfd\x66\x5f\x9b\x8c\x0a\x3d\x2d\x6b\xf2\x2d\xd1\x13\x4e\x4d\xbe\x56\xb5\x1d\xe4\x71\xf0\xb7\x2d\xa5\xb5\x32\x35\x68\xe8\x36\x62\xcd\xea\x18\xc5\xd4\x6f\xef\xae\xb5\x64\xa9\x92\xa2\xf3\x8d\x3a\xdf\xdc\x8a\xcf\x6a\x13\x26\x6d\xa2\x24\x92\x6d\x6c\x8d\x41\xbb\xaf\xba\xa1\x31\x71\x37\xfb\xf8\x3f\x27\xef\x77\xff\x78\x5d\xd5\x73\xaa\xfb\xd7\x41\xf7\xaf\xc2\x85\xa3\xe6\xf2\x99\xb1\x9a\xe3\x2a\x5a\xe1\xff\xbe\x69\xfe\xca\xef\x32\xc5\xa7\x6b\xba\xca\x98\x4e\xd2\xd1\x26\x3c\x59\x0b\xc2\x51\x00\x13\x1a\x60\x42\x3f\xd6\x4f\x1f\xe9\x67\x1f\xea\x0f\xbf\xdf\xf6\x9c\xbd\xa5\xbf\x77\x4c\x68\x15\x5b\xe1\xfd\x2c\x7c\x05\x82\xff\xf1\xc9\xe9\x78\x0c\x87\x7d\x26\xee\x88\x80\xe6\xd2\xb5\xa4\x14\xc4\x99\x45\xad\x38\x26\xcf\x16\x47\x08\xd7\xa2\x11\x19\x4e\x1b\x5a\x71\x41\xba\xce\x1e\xe7\xaf\x85\xf9\xef\x25\xeb\x65\x71\xfe\x38\xe2\x6f\xdb\x53\x2f\xf6\xd1\x0b\x7d\xec\xf9\xbe\xf0\x67\xec\x6d\xc1\x07\xfb\x66\x35\xbe\xcb\xcd\x1b\x91\x9c\x26\xba\x5d\x8b\x44\x57\xf9\x9f\xa2\x91\x64\xc5\x53\x88\x0d\x9f\x2c\xa0\xb5\x66\x38\x4e\x82\x31\x25\x23\x1e\x5c\xb8\x5e\x0a\xf9\x8c\x59\x19\x43\x2d\x64\x8d\x88\x76\xc3\xec\x53\xe9\x9a\x28\xa9\x5a\xa9\xa0\xc6\x23\xf2\x8a\xc2\xd7\x69\x8d\xa5\xd8\x78\x79\x76\x80\x7d\xf9\xe8\x6d\xe2\xb6\x2a\xfc\x76\xa5\x76\x7d\xab\x82\xbe\x27\x0f\x79\x7f\x4f\x76\xb2\xdb\xc5\x5d\x37\xfa\x45\x69\xf0\x8b\x11\xf4\xe1\x49\xcf\xd2\x34\x4b\x45\x4f\xdb\xd1\xa3\x8f\x8a\x97\xce\xb4\x00\xbe\xe7\x45\x07\xf8\x61\x6d\x4d\x96\xff\xbe\x18\x1d\x92\x9b\xba\xa2\x5a\xbe\xa1\xe9\xfc\xcd\xae\x16\x7d\x98\x69\xf1\x33\x05\xc5\x30\xac\x3c\x56\x6f\x62\xdf\x18\x61\x3f\xde\x4f\xcc\x39\xcd\xd6\x7c\x3c\x24\xf6\x71\x95\x1f\x45\x33\x3d\xfa\x14\x6f\x23\xf6\x30\x3f\xe0\xbc\x7d\xa2\x29\xcb\x70\x1f\xd2\xe2\x0d\xda\x2a\x9d\x93\xd3\x70\x9a\x52\x62\x1a\x3e\x46\x75\xbe\x3c\x71\x57\x57\x98\xcf\x14\x77\xdf\xdc\x26\x16\x33\x7c\xa3\x74\x75\x25\x78\xea\x9d\x62\x27\x2a\xd2\xca\xcb\xeb\xd6\xa8\x25\xcc\x29\x59\x33\x74\x14\xfe\xde\x4e\xcf\xa1\x89\x5a\xfb\xb1\x3a\xa3\xf3\x88\x79\x4c\x07\x07\x68\xa3\x6b\x83\xa0\x11\xc2\xda\x00\xdb\xab\x26\xb7\x57\xff\xa1\x4f\xea\xad\x12\x42\x6f\x65\xef\x83\xda\x63\x30\x5a\x6a\x96\x2c\xeb\xd3\x98\x79\x69\xdc\x96\x78\x18\xa2\x80\xed\x5a\xec\x03\x7b\x3c\xad\xcf\x2b\xcc\xb2\x56\x4a\xb1\xd2\xe2\xec\x95\x7f\xef\xa4\xc1\x69\x4f\x21\x72\x98\x9d\x00\xb3\x83\x62\xb8\x1d\x29\x86\x8b\x2a\x88\xa8\x82\x88\x2a\x88\xed\xaa\x82\x98\x7e\xae\xaf\xc5\xa5\xe0\x16\x79\x6b\x13\xec\x1c\xcd\xd1\x4e\xd9\xb7\x6a\xb6\xdd\xe5\xe5\x3a\xb1\x4c\x88\x78\x2f\x13\x76\xb1\x1d\x72\x28\x28\xf5\x63\xbb\x3d\x57\x0a\xfb\x6a\x68\x6b\x9d\x0d\x4b\x54\x6c\xe9\x3b\xb4\x7a\x00\x46\xed\xb5\x35\x0b\x30\x2a\x30\xea\x16\x63\xd4\x0e\xf3\xab\x40\x78\xda\x81\x6f\x47\xfa\x18\x1d\x61\x87\xa2\x07\x6c\x64\xfa\x4a\x37\x32\x75\x1f\xda\x01\x4a\xfa\xed\x11\x7a\x3c\x88\x92\x36\x56\x59\xd4\xa2\xa7\x3f\x33\xc2\xfe\xe9\x6d\x9e\x9f\xbe\x8f\x9a\x59\xb9\xdc\x71\x72\x45\x6b\xbf\xa0\xad\x46\x78\x5f\xb5\xb4\xbc\xe8\xa8\x23\xe9\x15\xdb\x13\x53\x00\x9a\x2f\xab\xd7\x1d\x23\x36\x54\x21\x0f\x77\xb5\x1d\x8f\xc9\xcb\xd5\x7c\x7e\x6b\x8a\x93\x22\xd0\xae\xf6\x4d\xbb\x14\xfc\x22\x4d\xb2\xa3\x8d\xbd\x37\xb5\x78\xb8\xce\x8b\x2c\x9e\x1b\xc1\x75\x08\xae\x43\x70\x1d\x82\xeb\x10\x5c\x87\xe0\x3a\x04\xd7\x21\xb8\x0e\xc1\x75\x08\xae\x43\x70\x1d\x82\xeb\x10\x5c\xd7\xe9\xe0\xba\x83\x34\xc1\xf6\x47\xe3\x36\xec\xb8\xcb\x03\x76\xd4\x72\x0e\xd4\x24\x40\xd0\x1f\x82\xfe\x10\xf4\xd7\xc6\xa0\xbf\x9f\x1e\xa6\x03\x92\x7c\xea\x2b\x4a\x26\x6e\x11\xac\x3a\xc9\xc1\x74\xad\xa0\x1a\xec\x2f\x87\xd8\x57\x06\xe8\x5e\x7e\x52\xd2\x7d\x8e\x8d\x39\x4b\x8d\x25\x06\x5b\xd4\x0a\xad\x4a\x09\x76\x48\x9c\xb0\x58\x7d\x47\x26\xae\xe4\x17\x9a\xcf\x1b\xe5\x59\x4d\x4f\x16\x0a\x36\xbc\x04\xb5\x6c\x36\x3d\x58\x63\x3e\x02\x13\x6d\xfa\x4e\x2e\x13\x71\xf2\xd1\xa9\xe5\x9b\xc0\x97\xc0\x97\xc0\x97\xc0\x97\xc0\x97\xc0\x97\xc0\x97\xc0\x97\xc0\x97\xc0\x97\xc0\x97\xc0\x97\xc0\x97\xc8\x0d\x06\x4c\x08\x4c\x08\x4c\xd8\x29\x4c\xf8\xdf\x87\x29\xc2\x87\x6e\x7c\x3d\x31\x6e\x49\xc0\x8b\xe5\x75\xad\xb0\x76\xdd\xd1\x3c\xfe\xe7\x61\xf6\x77\x21\xda\x91\xd1\x74\xf5\xf2\x7a\x22\xbc\xd7\x23\x4c\x7c\xc1\x3e\x77\x59\x9c\x1b\x0b\x8b\xdc\x5e\x9a\xae\x2e\x27\xaa\x7f\x6b\x71\x8c\xf8\x65\x9a\x96\x20\xed\x04\x1d\x17\x20\xed\x10\x1d\xa0\x84\x7f\xaa\x02\x4d\x57\x85\x88\xbe\xea\xae\x02\xe1\xd9\x62\x30\x17\x1b\x67\x63\x12\x7b\xd5\x34\xee\x91\x8f\x20\x1a\x0d\xcc\xcc\xf5\xfc\x4e\xa7\xd7\x07\xbd\x23\xc0\x6b\x3a\xfe\x01\x2b\xfd\x56\x07\xfa\x3e\x75\x86\x66\x68\xaa\x2a\xa6\xa1\x99\xce\x47\x30\x03\xe2\xbe\x9b\x8c\xfb\xfe\x70\x5f\x8b\xde\xff\x39\x19\x3d\x9e\xa2\xd3\x4e\xf4\xf8\x96\x9a\x12\xc7\x4e\x34\x60\x54\x82\x22\xbb\xbf\x73\x87\x63\x4a\xee\xb5\xc2\xb9\x6b\xad\xc7\xfd\xf2\xa7\x8e\x18\x0f\x84\x72\x23\x94\x1b\xa1\xdc\x08\xe5\x46\x28\x37\x42\xb9\x37\x1c\xca\xfd\xb3\xad\xfa\xec\xa7\x65\x08\xf7\x14\x25\x5d\x21\xdc\x6d\xfa\xee\x07\x86\x55\x37\xb8\x32\xf0\xde\x4d\xf8\xac\x12\x52\xff\xef\x4b\x9c\x4f\xff\x78\x40\x7c\x76\xcd\x82\xe0\x3e\x99\xfa\xb2\xcd\xeb\x01\x04\x67\xf7\xda\x2a\x04\xc1\xd9\x08\xce\xde\xe2\xe0\xec\x4e\x81\xa3\xc0\xa8\xec\x36\x6c\x07\xd3\x27\xe8\x38\x3b\x16\x3d\x62\x4b\x8c\xef\x77\x0b\x93\xab\x4f\xed\x40\x24\xf6\x67\x47\xe8\xa8\x55\xa9\xb4\x64\x8c\xaf\x27\xfc\x22\xae\x8d\xb2\x52\x56\x57\xd7\x0a\x86\x6a\x97\x1f\x60\x3f\x34\xc2\xbe\x15\xa2\x1d\xfc\x4c\xfe\x11\x7b\xd0\x03\x40\x2e\x99\xa7\x2d\xa9\xe5\x98\xc8\x63\x99\x2c\x95\x8c\x65\x57\x30\xb3\xeb\x80\x16\x03\xc8\xa7\xe9\x94\x9c\x47\x47\xe9\xb0\x98\x47\xfb\x29\x4e\xa3\xbe\xf3\x88\x3f\x85\x95\xee\xc5\xbc\xa1\xcd\x26\xa6\xbc\x10\x3c\x81\x46\x59\xcc\xae\xe8\x59\x32\xcc\x29\xe4\xbe\x05\xf7\xec\x09\xff\xd2\x4e\xa7\xb7\x1f\xf6\x06\x8f\xee\x0e\xb7\xb2\x53\x76\xac\xcf\x53\x29\x3a\x4d\x27\xab\xbe\xd7\x1b\xec\x74\x7c\xa3\xc1\x1c\x9b\x64\x8e\xef\xe9\xdb\xfc\x2b\x3f\x2d\x71\x23\xff\xf6\xd8\xb8\xb1\x93\x86\x43\xa4\xe7\x6f\xb5\xe1\x88\x3d\xbb\xc7\x31\x1c\x77\xdb\x98\xd1\x6d\x2b\x1e\x96\x7f\xed\x9c\xa9\x00\x66\x04\x66\x04\x66\x04\x66\x04\x66\x04\x66\x6c\x26\x63\x64\x27\x73\x3c\xb6\x36\x3d\xa5\xb5\x08\x68\xf5\x57\xbe\x81\xf4\x90\xdf\x7c\x89\xb3\x0e\x88\x05\x30\x47\xf7\xea\x40\xee\xdb\x3b\xb2\x38\x00\x73\xec\xb5\x25\x09\x98\x23\x98\xe3\x16\x33\xc7\x0e\xb0\xa2\xc6\xac\xbd\x87\x0b\xc9\xc7\xf2\x07\xf2\xcb\xf4\x51\x3a\xcc\x0e\x46\x27\x6c\xd6\x78\x8f\x9b\x35\xba\x9a\xba\x15\x12\x21\xb0\x37\xf7\x4b\xc5\xa4\xc1\xff\x47\x57\x73\x79\x61\xff\x2b\xa2\xaa\xc7\xd9\x1f\x87\xd8\x1f\x85\x88\x55\x1d\xc1\x3f\x86\xf7\xe4\xd4\x72\x65\x84\x82\xd4\x7e\xc6\x22\x39\xb5\x9c\xac\x3c\x7e\x39\x91\x5c\x98\xb3\xa2\xe2\x5a\x98\x0a\xb9\xa6\xa1\xc9\x9a\x86\x26\x1b\x69\x68\x83\x55\xa2\xea\x7c\xc0\x5c\xcf\x39\x9f\x0f\xae\x12\xc5\xbe\xfc\x0c\x0d\x5a\xb2\x55\x3f\x8e\xac\x66\x74\xb5\x6c\xb0\x5f\x78\x86\xfd\xf5\xa3\x8e\x03\xf4\x11\x19\xcb\xa4\x9b\x51\x18\xd6\x06\xc6\xca\xcf\xb9\x24\xce\x8a\x85\xf9\x51\xd2\xe9\xe9\x5a\x85\x88\xdf\xba\x3c\xae\x3c\x85\xd0\x6a\x84\x56\xa7\x10\x5a\x8d\xd0\x6a\x84\x56\x23\xb4\xba\x67\x42\xab\x53\x5d\x13\x5a\xdd\xf2\x3b\x69\x3a\xb4\x3a\x85\xd0\x6a\x84\x56\x23\xb4\x1a\xa1\xd5\x08\xad\xee\x7c\x68\x75\xaa\xa7\x23\xa1\x53\x88\x84\x6e\x5f\x24\x74\xaa\xdb\x23\xa1\x53\xdb\x30\x12\x3a\xfd\x6a\x7f\xa7\xdc\x49\x09\xb9\x8e\xd0\x21\x01\xb9\xc6\x69\x8c\xf6\x05\xca\x44\x25\x2d\x9a\xcf\x1b\x65\x0b\xcc\x3e\xe8\xcd\x51\x77\xb0\xdb\x64\xe2\xbd\xb3\xc1\xe0\xf6\x51\xb6\xb7\x5a\x06\x2a\xaf\x53\xe1\x9f\x8b\xbe\xe0\x8a\x1b\xbe\x43\x7e\x69\x22\x8a\xc5\xb6\xee\x97\x7f\x68\x33\xdd\x92\x30\x4a\xf8\x2f\xab\x5c\x5c\x23\x34\xd4\x60\xe7\xc1\xa3\x05\x85\x5e\x93\x0a\xbd\x3f\xec\xa3\x49\xf9\xd6\x1e\xa0\x84\x78\x6b\xf7\x51\xe3\x13\x4f\x3a\x69\x12\xc2\x49\x63\x8b\xf3\x36\xd4\xc0\x69\x29\x1a\x38\x46\x47\x5c\xa2\x81\x0d\xb5\x10\xe4\xe6\xf1\xb7\x26\x25\xad\x71\x6b\x52\x2f\x92\xc8\xbc\x91\xd8\xa7\x46\x1d\x6b\x72\x9f\x29\xd4\x70\xa9\x5e\xb4\x55\xcb\xb2\x0c\xba\x83\x87\xa7\xec\x23\xda\x6c\x63\xda\xa3\xef\x03\x41\x05\x41\x05\x41\x05\x41\xed\x1d\x82\x8a\xc5\x56\xc0\x62\xab\x7b\x10\x33\xb4\xd5\x1d\xd1\x56\x83\xe4\x83\xe4\x83\xe4\x83\xe4\x83\xe4\xf7\x34\xc9\x47\x88\x0d\x42\x6c\x10\x62\xd3\xae\x10\x1b\x38\xca\xe0\x28\xeb\x55\x47\x59\x3a\xd7\xda\x80\xaf\x40\x68\xdc\x32\x17\xd3\xd5\x98\x37\x7e\xbe\x8b\xdd\x29\xed\x82\x63\xb3\x69\x3b\xaa\xfc\xbf\x71\x3f\x3d\x65\xaa\xfc\xd7\xca\x9a\x91\x51\x0a\xf9\x62\x6e\x7c\x7d\x42\x16\x4c\xf3\x91\x9c\x8b\xce\xd6\x8a\x65\xa5\x50\xd2\xb2\xd6\x79\xaa\x6e\xa5\x31\x11\xa9\x4d\xd6\x0c\xf6\xb6\xfb\xd9\x17\xfb\xe9\x2e\x57\xc3\x97\xcd\x86\xc3\x07\x45\x66\x13\x79\x9c\x35\xdf\x9c\xa5\xf8\x59\xbb\xfd\x05\x2d\x9b\xb4\xdb\x8f\x9d\x10\xf9\x4e\x9c\xe6\x96\x65\x6b\x0e\x77\xf7\x39\x51\x4e\x9a\x16\x67\x43\xd1\xe9\x29\x39\xa5\x17\xe8\xbc\x98\xd2\x67\x69\x96\xa6\xfd\x23\x5c\x9c\xdb\x8e\x9b\xbd\x10\xf7\xb9\xdd\xc0\xd9\x9d\x0b\x9e\xdd\xd3\x2c\x65\x45\xbb\x38\x17\x36\xa7\xb9\xdf\x65\x9d\x79\x2f\xef\x2f\x30\x6d\xf3\xff\xb3\xcb\x7b\x74\x8f\x58\x99\x54\x36\x3a\xc0\xa7\xad\xfc\x2a\xdd\x31\xc6\xa9\xa7\xe9\x55\xb4\x5c\xe5\xd6\x6d\xd1\x20\x03\x43\xc2\xe7\xdb\xa4\xcf\xf7\xf3\x7d\xed\x33\x3d\xcf\x48\x87\xf0\x93\xb4\xe4\x38\x84\x3b\x67\xd8\x02\xb2\xb8\x74\xca\xee\xa5\xfe\x68\xb7\xb7\x61\x3b\x5d\x13\xe1\xbd\x51\x0b\x77\x52\xc6\x7d\x6f\xbd\x7d\x43\x54\x78\xaf\xd9\x53\x44\x85\x23\x2a\x7c\x8b\xa3\xc2\xb7\x70\xcd\x1c\x98\x9c\xb2\x53\x1f\x97\xf4\x34\xa5\xd8\xe9\xe8\x49\x7b\x97\xb8\xd7\x1d\x45\xee\xd3\xcc\x2d\x11\x51\xfe\x0f\x21\x2b\xa2\xdc\xbf\x54\x37\xfb\xfd\x10\xfb\x9d\x10\xdd\x59\x53\x97\x3b\x1c\xce\x09\x0e\xb9\xaa\xe9\xd7\x6d\x4c\xa1\x44\xc4\x78\xc4\xee\xcf\xa9\xe5\x9a\xba\xd9\xc9\x85\xb9\x33\xfc\xd7\x2e\x8c\x27\xcf\xd3\x79\xf9\x1e\x9c\xa1\x19\xf1\x1e\x9c\xa2\x13\x74\xbc\xb9\x78\x72\xf1\x8c\x81\x81\xe4\xff\x6b\x84\x0e\x07\x97\x49\x1f\xcf\x14\xd6\x8c\x32\xff\x64\x15\x9c\xaa\x48\xcf\x8d\xb0\x77\xf4\xd3\xdd\x5e\x95\xd2\x3d\x33\x94\x4e\xc9\x36\x16\xb5\x82\x1a\x8b\xf2\xdf\x3d\x2a\x9a\xbb\x8e\xd9\xda\x24\xa5\xa2\x37\xd6\x13\x71\xd7\x0d\x6d\x36\x49\x69\x36\xd8\x90\x24\xd9\xa9\x06\xeb\x8a\xbb\xef\xab\x22\x73\xe9\x17\x77\xfa\x0c\x89\x4f\x1a\x53\xf7\xa8\x3c\x6a\x1e\xd2\xc9\x81\xd9\x70\x26\x53\x8f\x91\xc1\x1a\x0f\x7b\xe6\x0e\x65\x32\xf5\xb2\x0b\x1b\xcf\x64\xda\x5a\xeb\x22\xf6\xc0\x1d\xb1\x2e\xb1\x0f\xef\xf1\xb1\x2e\x4e\xae\x53\xb7\x41\x79\x44\xfe\xb5\xa3\xf6\x04\xe9\x4e\x91\xee\xf4\xd6\x95\xe4\x41\x8b\x01\x2d\x06\xb4\x18\x48\x77\x5a\x2f\xdd\x69\x47\x96\x02\x0d\xe4\x40\xfd\xe1\xdd\x3e\x8b\x85\xa0\x84\xa8\xee\x25\xc4\x5e\x81\x61\x3a\xb5\x82\x00\xfd\xee\xb5\x75\x0b\xe8\x37\xe8\xf7\x96\xe7\x44\xf5\xfd\x3e\x6c\x76\x6f\xda\x30\xe5\xee\xc8\x37\xa3\x7e\xa2\x54\xd7\x59\x1d\xa8\xc7\xf4\xc9\x11\x9a\x6a\x00\x7c\x0a\x61\x9c\x1b\x7f\xae\x70\x73\x59\xcc\xd9\x14\xf4\x2f\x87\xd9\xbb\x6f\xf3\xf9\xce\xfd\x4a\x9f\xa9\xab\x73\xf2\x3d\xf0\xf7\x5c\x6e\x10\xac\x8c\x9b\x6e\x8e\x25\xdb\x8e\xbb\xd6\xe3\x93\xf6\x4a\x7e\x48\xb4\x35\xe4\x44\xad\x56\x84\x21\x6a\x25\xd5\xde\x66\x19\x65\x55\xc9\x8e\x3a\xe1\x1a\x62\x6b\x62\x8a\xfb\xf3\x65\xf5\xba\x63\xbc\x86\x2a\x22\xac\x5c\x6d\xc7\x63\xb1\x1b\xc1\xdf\x5d\xf3\x7e\xbb\x3c\x23\x68\xfa\x1a\x2d\xc8\xd7\x68\x8e\xce\x88\xd7\x28\x49\xa7\xe8\x44\x13\x9f\x54\x91\xc1\x63\x66\x5d\x2d\x06\x47\xa7\xfb\xbf\x70\x37\xe4\x0b\x77\x2d\xf8\x85\x3b\xcb\x66\x37\xfe\xc2\x99\x63\x52\xf9\xde\x21\x27\x2a\x72\xa2\xa6\x11\xd1\x8f\x88\x7e\x44\xf4\x23\xa2\xbf\x67\x22\xfa\xd3\x5d\x13\xb0\xde\xf2\x3b\x69\x3a\x92\x3a\x8d\x48\x6a\x44\x52\x23\x92\x1a\x91\xd4\x88\xa4\xee\x7c\x24\x75\xfa\x14\x9d\x60\xc7\xa3\xc7\x6c\xfe\xf1\xa0\x0f\xff\x30\x37\x71\xed\xc7\x20\xe9\x9e\x0e\x3e\x4d\x23\xf8\xb4\x7d\xc1\xa7\xe9\xae\x0f\x3e\xdd\x86\x59\x5a\xd9\x5b\x2f\xd1\x70\x40\xe1\x1f\xb5\x98\x2d\x69\xf9\x62\xd9\x60\x5f\x7c\x86\x7d\xd7\x55\xfa\x67\xa8\x7e\xe9\x9f\x19\xeb\xbc\xd8\x03\x5e\xd5\x7f\xec\x9f\xbb\x1c\xf7\xa1\x00\x10\x60\x17\x0a\x00\x01\x76\x01\x76\x01\x76\xf5\x10\xec\xea\xa2\xec\x8c\x5d\x03\xbb\x90\x36\x10\xb0\x0b\xb0\x0b\xb0\x0b\xb0\x0b\x05\x80\x90\xd7\xec\xd6\x41\x4b\x5d\x9f\xd7\x6c\x5b\x16\x00\x7a\x86\x92\x52\x26\x35\x49\x47\x85\x4c\x6a\x82\xf6\x53\x3c\xb0\x5c\x87\xcd\x85\x1a\x29\x5c\x7d\xf5\x5c\xb0\xec\x69\x98\x0d\x56\x27\x60\xb3\xaf\x51\x29\x41\x0f\x28\x28\x14\xfd\xb2\xab\x0c\xd0\x1e\xb3\x0c\x90\x43\xb9\x1e\xf2\xae\x03\xd4\x7a\xce\x25\xb1\xd4\x69\x3a\x49\x8f\x55\x29\xbb\x47\x29\xd6\x78\xff\x42\xcb\x8d\x28\xd7\x26\xa3\x5c\xbf\xda\x47\x27\xe4\xbb\x7d\x98\x0e\x8a\x77\x3b\x4e\x1b\x9a\x7b\x94\x92\x31\xae\xc7\xe9\x98\x13\xe3\xba\xd1\x36\xa6\x64\x64\xcd\x63\x34\xe9\x8a\xac\xd9\x68\x23\x9b\x2d\x0b\xd4\x4a\x03\x14\xfb\x9c\xab\x32\xd0\x83\x9e\x95\x81\x1c\x73\x33\x12\x54\x1c\xa8\x5d\x86\x07\xf5\x81\x00\x58\x01\x58\x01\x58\x01\x58\x51\x1f\x08\xf5\x81\x90\x8c\x00\xf5\x81\x00\xfa\x01\xfa\x01\xfa\x01\xfa\x01\xfa\x5b\x02\xfa\x91\x93\x06\x39\x69\x90\x93\x06\xf5\x81\xe0\x47\x83\x1f\x6d\x9b\xd7\x07\xda\x40\x55\x9f\x06\x49\x72\x3d\x7e\xec\x50\xe6\xed\x98\xb6\xf9\x6f\xee\xa5\x93\x66\x89\xa0\x52\xc9\xa8\x23\x48\x37\xca\x4a\x59\x5d\x5d\x2b\x70\x1b\x65\x97\x02\xca\x28\x05\x95\xfd\xdc\xbd\xec\x7b\x21\xda\xc1\xcf\x17\x02\x75\x59\xfd\x87\xff\x54\x5b\x39\x61\xc9\x6c\x65\x49\x2d\xc7\x1e\x11\x05\x7f\x4a\x25\xc3\xed\xb8\x73\x1d\xb0\xc4\x9b\x68\x71\x02\xe1\xd7\xd0\x69\x39\x15\x8f\xd1\x11\x31\x15\x13\x34\x4e\x63\x8d\xe5\x28\x4f\xc4\xc5\x1d\x05\x4e\xbf\x85\xe0\x29\x35\xc6\xf6\xf9\xe6\x1a\x97\x17\xa9\x70\x91\x06\x54\xea\x79\x7e\xa7\xd3\xfb\x31\xbb\x3a\x4f\xf0\x00\x0c\x59\x05\x79\x3a\x3a\x06\xa9\x29\x4a\xd2\xa9\x2a\x1f\xea\x46\x07\x01\x1c\x0f\x6e\xd4\x26\xdd\xa8\xef\xed\xa3\x19\xe9\x07\x3d\x49\x8f\x39\x7e\xd0\x16\x1a\x82\xcd\xdb\x98\x06\xd3\x05\x37\x66\x69\x3c\x3e\x56\x7e\x56\x27\xf5\xdd\x97\x38\xa6\xe4\x40\x6d\x3d\x9c\x60\x9b\xf2\xa8\x2c\x81\xd3\x31\x8b\x82\x5c\x7f\xbd\x66\xd8\x90\xeb\x0f\xb9\xfe\xb6\x38\xd7\x5f\x27\x56\x91\x81\xc9\xfe\x5a\xbe\xcc\x4c\x4f\xd0\x7e\x16\x8f\x8e\xda\xdb\x97\x3b\xdd\x81\xed\xe2\xf8\x5b\xa2\x52\xcd\x67\x47\xe8\xb1\xca\x2d\x8f\xcc\x0f\xe8\xb3\xf1\xc9\x2a\xea\x75\x6e\xf3\xec\x6d\x0f\x7b\x76\x84\xfd\xc9\x80\xf3\x9d\xfc\xf9\x46\x12\x04\x4e\x8b\x56\x96\xd4\x72\x57\xe4\x05\x7c\xf0\x86\xd7\x57\xda\xbe\xc7\x2e\x0f\x0e\xee\xce\x5c\x80\x8f\x07\xbf\x8f\x31\x36\x6c\xbd\x8f\xa5\x92\x61\xbe\x88\x76\xaf\x23\xdb\x1f\x02\xa0\x91\xed\x0f\xfa\x3c\xe8\xf3\xa0\xcf\xeb\x55\x7d\x1e\xb2\xfd\x21\xdb\x1f\x74\x51\xd0\x45\x41\x17\x05\x5d\x54\x57\xe8\xa2\xd2\x87\xe9\x20\x9b\x88\xee\xb7\x01\xc7\xcb\xdd\x50\xc4\xde\xbb\xdd\x0a\x60\x04\x69\x04\xa1\x51\x41\x1a\xc1\xed\x13\xeb\xcd\xfe\x65\x3f\xed\x35\x41\xa6\x77\xe1\x13\x51\x93\x7c\x9c\xfd\x59\x88\x7d\x25\x44\x2f\x57\xaa\x2a\x9b\x88\x5f\xc3\xf7\xe4\xd4\x72\xe5\x7a\x40\x3e\x4f\x6c\x6f\x4e\x2d\x57\x55\x0d\x11\x67\x24\x17\xe6\xac\x6d\x68\x0b\x83\x1c\x5b\x56\x7d\xbb\x44\x4f\x4a\x36\x78\x9e\xe6\x05\x1b\x9c\xa5\x69\x4a\x35\x57\x7d\xdb\x7a\xce\x46\x82\xe2\xd9\xc7\x5e\x42\x93\x0d\x8c\x86\xa1\x16\x56\x8d\x35\xf1\x2d\xd0\xd7\x0a\xaa\xc1\x37\x98\xea\x0d\x83\xfd\x0d\x45\xff\xf4\x36\xbf\x41\x32\xa3\xdc\x23\x4a\x64\x49\x2d\xac\x2e\xc9\xd3\x17\xf9\xe9\x8b\xe2\xf4\xd8\x7e\x79\x80\xd7\x78\x79\x9f\xd1\x62\x4d\xc7\x6b\xe8\xd5\xf4\x54\x95\x17\xf4\x0c\xcd\xd4\x73\x89\xb8\xba\xc8\x7c\xcc\xb8\xf7\xad\xa6\x3f\x11\xa2\x57\xc9\x31\x7d\x82\x2e\x88\x31\x9d\xa3\x56\x35\x4e\x97\xa4\x16\x60\x99\x2e\x3a\x5a\x80\x16\x36\xff\x5a\x19\x2e\xfd\x14\x3d\xe9\x0a\x97\x6e\x61\xfb\x9b\x8d\xa4\xd6\x82\xa9\xf5\x3c\x4b\x3b\x5e\x24\xbf\xe2\x35\x3e\xb7\xe7\x42\xda\xe2\x31\x28\x0d\xff\x76\x7d\xff\x76\x1a\xc2\x1d\x1f\xe1\x4e\xcb\x1d\x81\xbf\x3a\x42\x17\x6c\x8b\x6d\xb9\x47\xc7\xd7\x27\xf8\x4c\x9d\xa8\xef\x14\x14\xaf\x01\x7f\xfa\x42\x49\xcb\x5a\x67\xab\xba\xc1\xfe\x62\x98\x7d\x77\x80\xee\x72\x35\x78\xd9\x6c\x30\xfc\x16\xd3\x4f\xc8\xa7\xf7\x7a\x3e\xbb\xa6\x14\x2a\x5c\x86\x36\x42\x3a\x6b\xb7\xbd\xa0\x65\x93\x76\xdb\x2d\x71\x1c\xc6\x63\x8f\x49\xd7\x9f\x73\x7f\xcb\xf2\xf6\x1c\x37\xa0\xcf\xf5\xf9\x47\x10\x8e\xc1\x5a\x0b\x9b\x0b\xb6\xa0\xd3\x2c\x25\xad\xa4\x4f\xd7\xba\xcc\xa4\x1c\x0c\x2f\x9d\xd6\xd5\xbd\xde\xa6\x7c\x37\x23\xd1\x03\x22\x5b\x0e\xdc\x86\x70\x1b\xc2\x6d\x08\xb7\x21\xdc\x86\x70\x1b\xc2\x6d\x08\xb7\x21\xdc\x86\x70\x1b\xc2\x6d\x08\xb7\x21\xdc\x86\x1d\x76\x1b\xc2\xfd\x07\xf7\x1f\xdc\x7f\x70\xff\xb5\xd1\xfd\xf7\xfe\x4b\xb4\xbf\xa1\xd0\x6d\x5d\x15\x2c\x8d\xbf\xba\xec\x6f\x9f\x61\x3f\x30\xe8\xc4\x2e\x0c\xd7\xaf\x26\xb6\x28\xcf\x5c\x52\xcb\xb1\x07\xf9\x91\xd5\x11\x03\xce\xef\x5d\x4e\x06\x51\x4f\x0c\x5c\x0c\xf5\xc4\xc0\xc5\xc0\xc5\xc0\xc5\x7a\x88\x8b\x75\x51\x36\xd7\xae\xe1\x62\x48\x33\x0a\x2e\x06\x2e\x06\x2e\x06\x2e\x86\x7a\x62\xc8\x83\x78\xeb\x40\xa6\xae\xcf\x83\xb8\x2d\xeb\x89\x5d\x92\x45\x83\xf6\x8b\xa2\x41\xa1\x0b\xe7\xd8\x01\x4a\xd0\xb8\xbf\x76\xb5\x54\x32\xe2\xeb\x89\xb8\x03\x86\x1a\x2a\x28\x76\x3e\x58\x43\xb5\x8f\x8d\xd4\xe6\x4e\x70\x2e\xb3\xb1\x9a\x62\xdf\x70\x65\xcc\xbb\xcb\xd6\x56\xbb\x68\x97\xf9\x21\xea\x00\xef\x92\x78\x4a\xe8\xd4\xaa\xf4\xd3\x63\xb4\x6f\x03\xfd\x8c\xc4\x51\xc8\x88\xd7\xa4\xb0\xf6\x6b\x7d\xfe\xa9\xec\x4e\xca\xb7\xff\x08\x1d\x12\x6f\xff\x38\x6d\x6c\x56\xca\x72\x61\x09\x51\x2e\xcc\x96\xd7\x6f\xb8\x91\x69\x29\xa2\x3f\x41\xc7\x5d\x22\xfa\x8d\xb6\xd2\xa8\x22\xbe\x31\x5b\xe4\x95\x54\xcf\xd3\x2e\xc5\x7e\x73\xd4\xb1\x35\x0f\x79\x96\x17\x73\xd9\x9d\x98\x3c\x40\xda\x1d\xaf\xfa\x62\x6d\xb3\x40\x28\x30\x06\xe2\x0a\xe2\x0a\xe2\x0a\xe2\x8a\x02\x63\x28\x30\x86\x02\x63\x28\x30\x06\xf2\x0f\xf2\x0f\xf2\x0f\xf2\x0f\xf2\xdf\x12\xf2\x8f\x02\x63\x28\x30\x86\x02\x63\x28\x30\x06\xc7\x1a\x1c\x6b\x3d\x54\x60\xac\xc5\x38\x79\x1b\x06\x96\xb0\x7f\xbc\x44\xe3\x52\xa8\x6e\x64\xae\xa8\xd9\x35\x51\x08\xa1\x2a\x2d\x52\x49\xcf\x6b\x7a\xbe\x7c\x33\x53\x50\x0c\x43\x35\xd8\x6f\x5c\x62\xbf\x38\x48\xcc\x39\xc1\xce\x85\xb4\xaf\xbe\x64\x7d\xc1\x6c\x68\x8a\x37\x14\x8b\xf2\x83\x97\xec\x46\xcc\x94\x48\x15\xc7\x40\xb9\x0e\xe5\x3a\x94\xeb\xf0\xa3\xc0\x8f\x02\x3f\x0a\xfc\x28\x5d\xe3\x47\xe9\x1e\x37\x01\xf8\x35\xf8\x35\xf8\x35\xf8\x35\xf8\x75\x4f\xf3\x6b\x00\x36\x00\xb6\x1e\x05\x6c\xdb\x52\xb9\xbe\xb1\x5c\xa0\x71\x17\x9a\xb2\x52\x2e\x57\x60\xa2\x86\x74\xec\x01\xba\xf3\xab\x99\x60\x18\x78\x9a\x9d\xac\x49\x9a\x6c\x12\xc1\x1a\x7a\x66\xe2\xc1\x8a\xfb\xa4\xe8\xf3\xbb\x3c\xb9\xd9\x2b\x6c\x9d\x7b\x25\x22\x7b\x44\xfe\xbd\x43\x90\x4c\x32\xad\x0b\xf4\x38\x9d\xab\x92\xbb\xd7\x21\xb6\x81\x83\x03\xd5\x15\xc4\xef\x4d\x8a\xdf\xdf\x1e\xda\x90\xf3\x20\x70\x2a\xd2\x13\x52\xf1\x9e\xa6\xb3\x8e\xe2\x7d\x93\x4d\x2e\x4a\xfd\xfb\x39\x9a\x73\xe9\xdf\x37\xd9\xe6\x66\x13\xc7\x77\xc2\x94\xc5\xfe\x6e\xd4\xd3\x94\x45\x3d\x65\xf4\x95\x66\x2d\x2e\x8f\xa9\x31\x6b\x8e\xa8\xbe\x9d\x06\x0e\x6a\x7a\x50\x60\x50\x60\x50\x60\x50\x60\xa8\xe9\xa1\xa6\x87\x9a\x1e\x6a\x7a\x78\x23\xe0\x8d\x80\x37\x02\xde\x08\x78\x23\xa0\xa6\x87\x9a\x1e\x6a\x7a\xa8\xe9\xe1\xec\x83\xb3\xaf\x0b\x9d\x7d\x5d\xad\xa6\x6f\x8c\x3a\x6f\x08\x32\xd7\x96\x28\x6d\xb9\x00\xfe\x9b\x31\x9a\x95\x02\xf8\x6c\xde\xc8\x68\x7c\x81\xe2\xe8\xdf\xfd\xb2\xb6\xab\xc5\x6c\x49\xcb\x17\xcb\x46\x21\x6f\xff\xf4\x26\xf6\xee\x18\x7b\x7b\x3f\xed\xb6\xdb\xb9\xbc\x9e\x08\x47\x74\x55\xc9\x56\xed\x54\x67\xcc\xb3\x97\xf8\xd9\xb1\x41\x7e\xc4\xb4\x75\x8e\x3b\xa3\x55\xc5\x71\xad\x85\xe0\x69\x85\x66\xe5\x44\x3a\x45\x27\xc4\x44\x3a\x42\x87\xe8\x80\xaf\xcb\xc2\xe9\x9b\xf5\x44\xbc\xe2\xbe\x02\xa7\xd0\xa5\xe0\x69\x31\xc9\x8e\x9a\xd3\xa2\x7a\x0c\xcc\x59\x51\x79\xc5\x8a\x74\x62\x0f\x78\xcf\xd0\xdb\xd9\x40\x4e\x2d\x53\xf8\xeb\x3b\xab\x06\x64\xaf\xae\x96\x0a\x4a\x46\xad\x3b\x26\x23\xe6\x41\x9d\x1e\x96\x94\xa8\x3e\x5c\xe5\x7c\x6d\x6e\x5c\x80\xe7\xe0\x76\x6d\xd2\xed\xfa\xd1\xbe\x96\x19\x87\x73\xd2\xe1\x3a\x4d\x29\xc7\xe1\x7a\xeb\x59\x9a\xd2\x5a\x99\x62\xef\xdd\x53\x65\x69\xee\x31\x17\xd0\x4a\xb1\xca\xba\x0c\xcb\x1f\x3a\x6e\x5c\xda\xe3\xf8\x84\x9d\x09\xb0\x33\x80\xef\x1d\x81\xef\xa0\x2e\xa0\x2e\xa0\x2e\xed\xa2\x2e\xe9\xe7\xfa\x5a\xbc\xf5\xdc\xb8\x70\x6a\xf3\xdb\xd9\x88\xf7\x27\x7c\x17\xdb\x21\xbb\x89\x1a\x5c\x43\x04\xad\x15\xfc\xd6\x18\xa9\xb7\xed\xae\x5a\x24\x8c\x96\x14\xbd\x9c\x17\xb2\x14\x09\xb4\xeb\xee\x4b\x86\x4a\x4a\x39\x73\xa5\x93\x0b\x87\xfb\xaa\x1b\x1a\x13\xb7\xb0\x8f\xff\x73\xf2\x7e\xf7\x8f\xd7\x55\x3d\xa7\xba\x7f\x1d\x74\xff\x6a\x94\x75\xa5\xac\xe6\xf2\x99\xb1\x9a\xe3\x2a\x5a\xe1\xff\xbe\x69\xfe\xca\xef\x32\x75\x9e\xe6\x29\x5d\xb5\x23\x9a\xa4\xa3\x4d\xcc\x91\x05\x11\xc2\x8b\xe5\x4a\xc0\x72\xe5\x63\xfd\xf4\x91\x7e\xf6\xa1\xfe\xf0\xfb\x6d\x9b\xff\x96\xfe\xde\xd9\x16\x55\xf9\x19\x79\x3f\x0b\xfd\x96\xf0\x85\xf3\xc9\xe9\xc4\xf2\x0f\xfb\x4c\xdc\x11\xf1\xe9\x92\x72\x3f\xa5\x20\xce\xe4\x6b\x34\x79\xb6\x38\x42\xac\x97\x8c\xc8\x70\xda\xd0\x8a\x0b\x32\xa8\xfd\x71\xfe\x5a\x98\xff\x5e\xb2\x5e\x16\xe7\x8f\x23\xfe\xfb\xb5\xd4\x8b\x7d\xf4\x42\x1f\x7b\xbe\x2f\xfc\x19\x9b\x5a\x7e\xb0\x6f\x56\xd3\x33\x62\x29\x97\xd3\x44\xb7\x6b\x91\xe8\x2a\xff\x53\x34\x92\xac\x78\x0a\x81\x7b\xe5\xaa\x6b\xcd\x70\x84\x5b\x63\x4a\x46\x3c\xb8\x90\xc3\x15\xf2\x19\xf3\xab\xaa\x16\xb2\x46\x44\xbb\x61\xf6\xa9\x94\x8b\x95\x54\xad\x54\x50\xe3\x11\x79\x45\x91\x85\xc0\x1a\x4b\x81\x5d\x3d\x3b\xc0\xbe\x7c\xf4\x36\x71\x5b\x15\xe1\x01\x8a\x7f\xce\xeb\x16\x6d\x4c\xad\x0f\xc2\x43\xde\x1f\x84\x9d\xec\x76\x71\x9b\x8d\x7e\x0f\x9a\xdd\x53\xa6\x27\xe9\x28\x3b\x1c\x3d\x68\x67\x0c\xb9\x57\xbc\x56\xe6\x3b\x5e\x71\x5e\x74\x80\xff\xe4\xce\x05\xb2\x0d\xb3\x8d\x3c\x3b\x42\x33\x12\xb6\x66\x34\x4d\xcf\xe6\x8b\xe2\xdd\xa9\xce\x37\x22\xe0\xba\x1f\x7a\x15\x8b\x78\x83\x3d\x37\xcc\x7e\x6b\x80\xee\x76\x37\x63\x4b\x90\x5f\x6f\x6a\x01\x8a\xd9\xfc\x7a\x3e\xbb\xa6\x14\xdc\x41\x30\x8a\xad\xbb\x99\xe7\x2d\xc5\x5d\x2b\xfc\x49\x7b\x6f\x30\x24\x5a\x18\x72\x14\xaf\x15\x12\x46\xad\xa4\xda\x1b\x37\xa3\xac\x2a\xd9\x78\x6c\x9f\x38\x61\xca\x75\x37\xa6\xa4\xd9\xf9\x40\x8b\xcb\xcd\xe7\x8d\x6e\x2f\xc7\x99\xbe\xe6\xff\xf6\x6d\x24\x6a\xa7\xce\x97\x58\x44\x27\xcd\xac\xab\xc5\x72\x53\xe8\xc6\x63\xee\x98\x6f\x9a\xe8\xe3\x5a\xaf\xc1\xd5\xbd\xde\xaf\xf9\x6e\x46\xe2\xb9\x45\x00\x50\x1a\xc9\x5c\x90\xcc\x25\x0d\x19\x3f\x64\xfc\x90\xf1\x43\xc6\xdf\x33\x32\xfe\x74\xd7\xa8\xd4\x5b\x7e\x27\x4d\xcb\xa7\xd3\x90\x4f\x43\x3e\x0d\xf9\x34\xe4\xd3\x90\x4f\x77\x5e\x3e\xbd\x0d\xb9\x46\xba\xa7\x25\xa9\x69\x48\x52\xdb\x27\x49\x4d\x77\xbd\x24\x75\x1b\xe6\x9f\x61\xdf\x1e\xa4\x07\x24\x87\x94\xf0\xc6\xa5\xf8\x94\x7f\x60\x7f\x30\xc8\xfe\x6b\x3f\xed\x92\xff\x25\x74\x83\xf5\x53\x1b\x0b\x9a\x15\x7b\x98\x1f\x24\xfe\x69\x2c\x27\xc4\xff\xcf\x6a\x7a\xb2\x50\xb0\x29\x60\xb7\x67\x34\x4e\x3f\x4d\xa7\x24\xe3\x3b\x4a\x87\x05\xe3\xdb\x4f\x71\x1a\xf5\x25\xec\x66\xf7\xad\x27\xe2\xe2\x69\x5b\x92\x88\x67\x31\x98\x05\x8e\xb3\x31\x93\x05\x56\x8c\x9f\xc5\xdb\xf9\xdf\x2a\x39\x3b\xd8\x1e\xd8\x1e\xd8\x1e\xd8\x1e\xd8\x1e\xd8\x1e\xd8\x1e\xd8\x1e\xd8\x1e\xd8\x1e\xd8\x1e\xd8\x1e\xd8\x5e\x67\xd9\x1e\xc8\x1b\xc8\x1b\xc8\x5b\x0f\x93\xb7\x7f\x31\x42\x67\x24\x79\x2b\xaa\xe5\x1b\x9a\x7e\xad\xa2\xde\x58\x7d\xe9\x9f\x79\x86\x88\x6e\xc8\xab\x06\xfb\xd4\x30\xfb\xfc\x00\xbd\xd4\x69\xe8\xf2\x7a\x22\x7c\xb3\x31\xf1\xdf\x79\x79\xd2\x82\x19\xdc\xd1\x12\x11\xe0\xa8\x38\xe1\xbc\x7d\x3b\x6e\x79\x7e\xc5\xf5\xb6\x87\x0a\xb0\xe5\x62\xbf\x40\x32\xd8\x88\x4c\xef\xea\x6b\x83\xf1\xe0\x71\x76\xcc\xc4\x83\x35\x93\xcc\x44\x84\x15\xc3\x01\x54\x08\x54\x08\x54\x08\x54\x08\x54\x08\x54\x08\x54\x08\x54\x08\x54\x08\x54\x08\x54\x08\x54\x08\x54\x08\x19\x20\x60\x24\x60\x24\x60\xe4\x2d\x0a\x23\x7f\xf5\xb5\x74\xda\x1f\x46\xca\x60\x64\x1f\x16\x99\x2f\xe6\x74\xd5\x30\x54\x83\x7d\xfb\x12\xfb\xc0\x20\xb1\x0a\x0a\x29\xe3\x90\x1f\xad\x2f\x19\x9c\x93\x4d\xc4\x06\xf9\x61\x6e\x6a\x58\x15\x38\x6c\x1e\xd7\xe5\xc0\x30\x05\x6e\x06\x6e\x86\x2a\x58\xe0\x66\xe0\x66\xe0\x66\xbd\xc3\xcd\xba\xa8\xc8\x53\xd7\x70\x33\x54\x1f\x02\x37\x03\x37\x03\x37\x03\x37\xdb\x8a\xea\x43\x3d\x0d\xa1\x50\x1e\xa5\x97\xcb\xa3\xa4\xb6\x21\x84\x4a\xab\x94\x96\x42\xab\x29\x4a\x0a\xa1\xd5\x71\x3a\x46\x47\x7c\x23\x2e\x5d\xa4\xca\x2a\x1e\x6e\x02\xa2\x86\x82\x2f\x37\x94\x68\xcd\x4f\x3d\x65\x5e\xd0\x23\xd1\x5a\x40\x6c\x67\xf4\x43\xbb\x3c\x49\xd9\x9d\xf2\xab\x14\x51\x8a\x36\x15\x1b\x96\x7f\xea\x20\x17\x6b\xae\x02\x87\xff\x78\x20\xd5\x2c\x2a\x70\x34\x59\x81\xe3\x7f\x6f\xb0\x02\x87\xff\x24\x6c\xa2\x02\x47\x9d\xc6\xe6\x65\xce\xee\x19\x9a\x72\xe5\xec\x6e\xba\xb5\xae\xb5\x55\x25\xcd\x28\x53\xec\x5b\xa3\x9e\xb6\xea\x7e\x33\x43\xbb\x2b\xdd\xbd\xb6\x6a\x9b\xad\x09\xf9\x6b\x8d\xd9\x9a\xb2\x8f\x6e\xb7\x01\x6b\x4f\x95\x0f\x80\x5d\x80\x5d\x80\x5d\x80\xdd\xde\x01\xbb\x58\xbd\x05\xac\xde\xba\x87\x7c\xa3\xc2\x52\x47\x2a\x2c\xc1\xc1\x00\x07\x03\x1c\x0c\x70\x30\xc0\xc1\xd0\xd3\x0e\x06\x14\xda\x43\xa1\x3d\x14\xda\x6b\x57\xa1\x3d\xf8\xef\xe0\xbf\xeb\x55\xff\x5d\x3a\xd7\xe2\x1a\x93\x41\x84\x39\xe6\x8d\x80\xef\x62\x77\xca\xb7\xd9\xb1\xb4\x1b\xac\x04\xb6\x61\x1a\xbd\x0d\xc3\x5d\xd8\x0f\xbe\x84\x0e\x49\xf9\xbc\xd5\xb3\x55\xe5\xbc\xc6\x0d\xb5\xb0\x6a\xac\x89\x07\xd1\xd7\x0a\xaa\xa1\xab\xeb\x79\xf5\x86\xc1\x7e\x8b\xa2\x9f\xbe\x8d\xf6\x54\x9c\x76\x79\x3d\x11\x7e\xc8\xf2\x03\x46\x96\xd4\xc2\xea\x92\x3c\x73\x91\x9f\xb9\x28\xce\x8c\xed\x93\x07\x24\xdd\x27\x2e\x27\xbc\x0f\x6e\x31\x57\x5f\xa6\x8b\xb4\x58\xe5\x18\xac\x43\xd6\xe3\x95\x7d\xc2\xe7\xa4\xe7\x5d\xa6\xdf\x17\xa2\x25\x39\xe9\xe7\x29\x2d\x26\xfd\x34\xb5\xa0\x5d\x7a\x95\x74\xfc\x3c\x41\x17\x1c\xc7\x4f\x6b\x5a\x7e\xb5\xf4\x02\x2d\xd2\x82\xcb\x0b\xd4\x9a\xa6\x9b\xcf\x1d\x2c\x7c\x36\x57\xaf\x06\xbf\xa4\x67\xd8\x8c\x7c\x17\x7d\x6e\xc1\xa3\x8e\xab\xd7\xf4\xa6\x34\x00\x61\x7d\x40\x98\x86\x7b\xd7\xc7\xbd\xdb\x72\x4b\xfc\x9d\x41\xba\xd7\xb4\xc4\xa5\x92\xc1\x2d\xaf\xae\x0a\xb3\xc6\xd7\x89\xec\x8f\x06\xd9\x8b\xfd\xb4\x83\xff\xc4\x8d\xec\x70\xfd\xb0\xa4\x45\x79\xe6\x92\x5a\x8e\x3d\xc2\x8f\x4c\x96\x4a\xc6\x72\xc2\xf9\xeb\x76\xcb\x68\x7e\x89\x52\xd2\xbc\x1e\xa7\x63\xc2\xbc\x1e\xa0\x04\x8d\xfb\x5b\xaa\x52\x49\xe4\x33\x77\x1e\xb8\x25\x49\xcd\xcf\x07\x1b\xa6\x7d\x6c\xc4\xb2\x37\xa5\x92\x61\x2e\x18\x9c\xdb\x40\x96\x22\x44\x5b\x21\x4b\x11\x9c\xf2\x70\xca\xc3\x29\xdf\xab\x4e\x79\x64\x29\x42\x96\x22\x38\x43\xe1\x0c\x85\x33\x14\xce\xd0\xae\x70\x86\x22\x87\x10\x72\x08\x6d\x17\xf7\x0f\x72\x08\xb5\x23\x87\xd0\xb7\x86\x68\xc4\xcc\x21\xa4\x65\xd5\x9a\x54\xe6\xfa\x5a\x91\x3f\x70\xa6\xa0\x88\x64\x41\xff\x69\x88\x7d\x7c\x80\x76\xf0\x43\x2f\xaf\x27\xc2\x6f\x68\x2c\x59\xf9\xa2\x6c\x64\x8a\x37\xd2\xa2\x5c\xe5\xf7\xc9\x5c\xe5\x5a\x56\x5d\x4e\xb8\x9b\x47\x6a\x72\x6f\xbe\xf7\x54\x30\xbf\x3b\xc8\x26\x2c\xef\x9f\x33\x13\x2c\x8c\xe7\xea\xe2\x0a\x90\xd7\x50\xce\x73\xd0\x3e\xd0\x3e\xd0\x3e\xd0\x3e\xd0\x3e\xd0\x3e\xd0\x3e\xd0\x3e\xd0\x3e\xd0\x3e\xd0\x3e\xd0\x3e\xd0\x3e\xd0\x3e\xd0\x3e\xd0\x3e\xd0\xbe\x4e\xd1\xbe\xe7\x6d\xda\x27\x43\x3f\xec\x34\xe1\x25\x2d\x9b\xcd\x1b\xfa\x9a\xd8\x81\xaf\xac\x65\x73\x6a\xd9\x60\xff\x6e\x88\xbd\x7d\x80\x5e\x26\x0f\xb5\x13\x88\x24\xea\xeb\xef\x16\xb4\xec\xb4\xdd\x52\x4a\xb4\x14\x3b\xc8\x4f\x91\xa1\x22\x66\x3e\x11\x8f\xa3\xb6\x9b\x30\xef\x3a\x2d\x4a\x8c\x77\x8e\xe6\x04\xc6\x9b\xa2\x24\x9d\xf2\x15\xe6\x99\xe1\x41\x56\x12\x19\x8f\x0e\x68\x48\xa8\xb7\xa1\xf2\x81\xf2\x9a\x26\xc3\xf3\xb8\xe2\xc6\x33\x60\x01\xe5\x01\xe5\x01\xe5\x01\xe5\x01\xe5\x01\xe5\x01\xe5\x01\xe5\x01\xe5\x01\xe5\x01\xe5\x01\xe5\x01\xe5\x01\xe5\x01\xe5\x01\xe5\x01\xe5\x75\x0a\xe5\xfd\xf1\x88\xcc\x5e\x30\xbe\xee\x5b\xe4\x8f\xbf\x70\xf9\x8c\xaa\x64\x32\x7c\xb5\x61\xfe\xf6\x26\xf6\xbe\x11\xf6\xf7\x21\xda\x91\xd1\x74\x21\xe2\x7b\x58\x57\x95\x6c\xd5\xe6\x73\x49\x9e\x99\x94\x67\xc6\xa2\xfc\x90\x29\x4d\x57\x97\x5d\xf9\xcb\x2b\x8f\x69\x6d\xb6\x82\xf4\x25\x5f\x10\xb6\xb1\x70\x58\xfe\x90\x32\x5e\xdf\x7d\xb3\x16\x4c\x7b\xc0\x1b\x76\xdd\xce\x06\x72\x6a\x99\xae\x2e\x04\xb3\xb6\x31\xb6\xcf\x64\x6d\xd1\x28\x59\x71\xf9\xee\x4b\x55\xa8\xe5\xc2\x9f\xdd\xe9\xf4\xfb\x23\xba\x5a\x2a\x28\x19\xb5\x7e\xd7\x3f\x6a\x1e\xd5\xc9\xde\x4f\xcd\xd0\x14\x25\xab\x72\x45\x6c\xbc\x97\x91\x82\x14\x09\xe4\x9b\xcc\x30\xf0\xfe\xbe\x56\xbc\xe6\x74\x46\xe6\x10\x39\x4d\x27\x9d\x1c\x22\x4d\x35\x14\x44\xe5\x5b\x6e\x29\xea\x98\xa6\xd2\x5a\x99\x62\xdf\xb9\xc3\x31\x24\xf7\x98\xa9\xc0\x94\x6a\xdb\xf1\x88\xfc\xa1\xa3\xa6\xa3\x3d\xe9\xdb\x61\x49\x02\x2c\x09\x52\x08\x77\x24\x85\x30\x72\x47\x22\x77\x24\x72\x47\xb6\x2b\x77\x64\xfa\x03\xad\xf9\xec\x9f\x95\x09\xbe\x92\x74\xca\x95\xe0\xab\x2d\xdf\xfd\x88\xf7\x57\x7a\x17\xdb\x21\xbb\xa4\xd1\x3d\x84\x47\xd2\x2e\xbf\x55\x42\xea\x7f\xbe\xc4\xf9\xf4\x8f\x95\x14\xbd\x9c\x17\x8e\x3e\xb9\xbb\xac\xbf\x99\xd8\x5b\xe2\xdb\xc8\x4e\xad\x07\xee\xab\x6e\x68\x4c\x5c\x7e\x1f\xff\xe7\xe4\xfd\xee\x1f\xaf\xab\x7a\x4e\x75\xff\x3a\xe8\xfe\xd5\x28\xeb\x4a\x59\xcd\xe5\x33\x63\x35\xc7\x55\xb4\xc2\xff\x7d\xd3\xfc\x95\xdf\x65\xea\x3c\xcd\x53\xba\x6a\x1b\x33\x49\x47\x9b\x58\x8d\x2c\x08\x5f\x38\x56\x21\x01\xab\x90\x8f\xf5\xd3\x47\xfa\xd9\x87\xfa\xc3\xef\xb7\x4d\xf9\x5b\xfa\x7b\x67\x3f\x53\xe5\x3e\xe0\xfd\x2c\xdc\xe1\xc2\xc5\xc5\x27\xa7\x23\x8a\x19\xf6\x99\xb8\x23\xe2\x8b\x24\xd5\x13\x4a\x41\x9c\xc9\x97\x5e\xf2\x6c\x71\x84\x58\x06\x19\x91\xe1\xb4\xa1\x15\x17\xa4\x3a\xe4\x71\xfe\x5a\x98\xff\x5e\xb2\x5e\x16\xe7\x8f\x23\xfe\x1b\xad\xd4\x8b\x7d\xf4\x42\x1f\x7b\xbe\x2f\xfc\x19\x9b\x7c\x7d\xb0\x6f\x56\xd3\x33\x62\x85\x96\xd3\x44\xb7\x6b\x91\xe8\x2a\xff\x53\x34\x92\xac\x78\x0a\xc1\x34\xe5\x62\x6a\xcd\x70\xfc\xe0\x63\x4a\x46\x3c\xb8\x50\x17\x14\xf2\x19\xf3\x63\xa9\x16\xb2\x46\x44\xbb\x61\xf6\xa9\xf4\xbe\x97\x54\xad\x54\x50\xe3\x11\x79\x45\x21\xe7\xb1\xc6\x52\xb0\x45\xcf\x0e\xb0\x2f\x1f\xbd\x4d\xdc\x56\x85\x34\x65\x83\xe9\xd2\x9a\xb4\xfb\x0f\x79\xdb\xfd\x9d\xec\x76\x71\x8b\x6d\x30\xfb\xe9\xe3\x74\x8c\x1d\x89\x1e\xb2\x13\xac\x86\xc5\x5b\x64\xbe\xd2\x95\x07\x47\x07\xf8\x6f\xee\xdc\xa9\xdb\x30\x3b\xeb\xfb\x2f\xd1\xfe\xca\x9c\x80\x3e\x94\x33\xab\x96\x0a\xda\xcd\xeb\x7c\xc5\xc1\xfe\xf6\x19\xf6\x03\x83\x0d\xa7\x0a\x9c\xb6\xcf\x8c\x3d\xe8\xa4\x0a\x74\x3e\x8c\xce\xef\x5d\xae\x45\x4c\x41\x95\x07\x55\x1e\x6a\xdc\x41\x95\x07\x55\x1e\x54\x79\xbd\xa3\xca\xeb\xa2\x12\x6e\x5d\xa3\xca\x43\x6d\x31\xa8\xf2\xa0\xca\x83\x2a\x0f\xaa\xbc\xad\xa8\x2d\xd6\xd3\x22\x3a\x14\x3f\xea\xe5\xe2\x47\xa9\x6d\x28\xa2\x6b\xb6\xb8\x82\x03\x86\xb6\xb4\xb8\x82\x73\x1b\x15\xda\x91\xe8\x37\x76\x3a\x08\xec\x2e\xbb\x24\x91\x8b\x76\x99\x1f\xa2\x0e\xf0\x2e\x89\xa7\x44\x82\xbb\x2a\x47\xcc\x18\xed\xdb\x40\x3f\xc3\xf7\x02\x2d\x59\x93\x5a\xb2\xaf\xf5\xd1\x49\xf9\x92\x1f\xa1\x43\xe2\x25\x1f\xa7\x8d\x4d\x3e\x9a\x92\x3a\xb2\xc7\x68\xd2\xd1\x91\x6d\xb8\x91\x69\xe9\x95\x3e\x41\xc7\x5d\x5e\xe9\x0d\xb7\xb2\xd9\x0a\x53\x2d\xb6\x35\xb1\xdf\x1c\x75\x6c\xcd\x43\xa6\xae\xc0\x25\xd2\xd0\x56\xdd\x76\x27\x26\x0f\x90\x76\x67\xca\x3e\xaa\x03\x16\xa8\x3d\xb2\x34\x10\x57\x10\x57\x10\x57\x10\xd7\xde\x21\xae\x58\x86\x05\x2c\xc3\xba\x07\x49\x43\x12\xdc\x11\x49\x30\xc8\x3f\xc8\x3f\xc8\x3f\xc8\x3f\xc8\x7f\x4f\x93\x7f\x44\x86\x20\x32\x04\x91\x21\xed\x8a\x0c\x81\x63\x0d\x8e\xb5\x5e\x75\xac\xa5\x73\x34\x2f\xf9\xf5\x0c\x4d\x09\x7e\x7d\x82\x8e\xd3\xb1\x26\xe0\xe5\x52\x59\x29\xaf\x19\x81\x08\x39\xe6\x8d\x90\xef\x62\x77\xca\xb7\xd9\xb1\xb4\x8d\xe2\x64\x0f\x9d\xbb\x37\x5a\xde\x8e\x42\xf5\xbf\x1f\xa2\x09\x53\xa8\xbe\x56\xd6\x8c\x8c\x52\xc8\x17\x73\xe3\xeb\x13\x32\xb1\xae\xe8\x5b\xad\x58\x56\x0a\x25\x2d\x6b\x1d\xa0\xea\x06\xfb\xd4\x10\xfb\xc4\x00\xdd\xe5\x3a\xe7\xb2\x79\x4e\xf8\x50\x7d\xd9\xfa\x59\xbb\xc9\x05\x2d\x9b\xb4\x9b\x8c\x9d\x10\x1a\x76\xa7\xbd\x65\xd9\x9c\xcf\xe1\xdb\x2d\xdd\xee\x3a\xbd\x46\xbe\x05\x17\x69\x51\xbc\x05\xf3\x94\xa6\xb3\xfe\xae\x13\xa7\x1f\xe2\x66\xbf\xc6\x7d\x7a\xa2\x25\x3e\xdc\x5c\xf0\x8b\x30\xcd\x52\xd6\xe4\x77\x6e\xce\x7c\x07\x7c\x6e\xcd\xe5\x6b\x91\xcf\x80\x04\xbc\x90\xfa\x23\x01\x2f\x1c\x4f\x70\x3c\xc1\xf1\xd4\x4b\x8e\x27\x24\xe0\x45\x02\x5e\x00\x7f\x00\x7f\x00\x7f\x00\xff\xae\x00\xfe\x48\xc0\x8b\x04\xbc\xdb\x05\x71\x22\x01\x6f\x3b\x12\xf0\xfe\xe7\x11\x9a\x96\xdc\x2f\xa3\xea\x66\x2b\xaa\x51\x53\x41\xdf\xfd\x63\x3e\x57\xcc\x17\x73\x96\xb5\xb6\xf2\xf1\x7e\x67\x98\xfd\xf4\x6d\x74\x87\xfb\xc0\xcb\xeb\x89\xf0\xaf\xf5\x99\xa3\xe1\x2a\xa9\x5f\xb4\xec\x9b\xc5\x02\xa7\x9c\x93\x96\x64\xeb\x8b\x96\xb4\xa9\x05\x75\xf6\x47\x9d\xd5\x8d\x58\xc1\x99\xdf\x42\xbe\x40\x72\x96\x6d\x43\x15\x2b\x65\x57\xdb\xf1\xd8\xa8\xb8\x9c\xeb\x1e\x8d\xe5\x84\xef\x1d\x77\x3b\x80\xdc\x8a\xb2\xfd\xfe\x79\x66\x6e\xc8\x3c\x33\x85\x60\xec\x38\xc7\xce\x98\xd8\xd1\x63\x9e\x9a\xf8\xd1\x77\x50\x2a\xc4\xde\x60\x8f\x60\x8f\x60\x8f\x60\x8f\x60\x8f\x60\x8f\x60\x8f\x60\x8f\x60\x8f\x60\x8f\x60\x8f\x60\x8f\x60\x8f\x1d\x66\x8f\xb3\x34\xcd\x52\xd1\xd3\xb6\x4a\xea\x51\x77\x2e\x50\xdf\xbd\x5c\x6d\x5a\x50\x30\x4c\x30\x4c\x30\xcc\x1e\x66\x98\x2f\xc4\xe8\x54\x43\x49\x76\xb9\xbd\xd7\x35\x6e\x6d\xf9\xe6\x43\x0c\xa9\x85\x2f\x7f\x30\xc6\x7e\xb0\xdf\x49\x02\xf0\xa8\x47\x39\xb1\x29\xfb\xec\x45\xf3\xec\xd8\x20\x3f\xac\x3a\x01\x49\xed\x71\x2d\x2e\x2b\xa6\xd0\xac\x64\x78\xa7\xe8\x84\x60\x78\x47\xe8\x10\x1d\x08\xcc\xbf\x50\x7b\x5f\x81\xe4\x2e\xa0\xb4\xd8\x72\x30\xb7\x3b\xc0\x12\xf5\x75\xb3\x1e\x77\x15\xfe\x8a\x2b\xf5\xcb\xb0\x77\x81\x31\x8f\xc1\x18\x31\x8f\xec\xf4\x78\xa4\xe6\xe8\x0c\xcd\x54\x25\x86\x69\x6e\x40\x10\x99\x8c\x04\x31\x4d\x26\x88\xf9\x68\x5f\xcb\xac\xc2\x39\x99\x28\x66\x9a\x52\x4e\xa2\x98\x2d\x30\x31\xa5\xb5\xf6\x99\x98\xd8\xbf\xdb\xe3\x98\x98\xfb\xec\xd2\x63\x1e\x56\x65\xd8\x9d\xed\xa5\x83\x46\x05\x25\xc8\x50\x82\xec\xd6\xcd\x37\x80\x40\x53\x04\x9a\x22\xd0\xb4\x6d\x25\xc8\x9e\xeb\xf3\xaf\x3c\xdc\xda\x30\xbc\x45\x99\x0f\xee\x1c\xcd\xb9\xf2\xc1\x6d\xae\xcd\xc6\xcb\x92\x35\xb6\x38\x68\x68\x41\xe0\x5e\x41\xa4\xfe\xf9\x6e\x67\x71\x90\x08\x28\x4e\xe6\xb1\x64\xf8\xff\xd9\x7b\xf7\xf8\x36\xae\xf3\xce\xfb\x43\x90\xb2\xa4\xc7\x4e\x6c\x1d\xc7\x8d\x0d\x4b\x36\x0c\x59\x22\x09\x91\x20\xa1\xbb\x48\xdd\x08\x52\x94\x08\x53\x12\x23\xca\x54\x2e\xb6\xe5\x21\x30\x84\x46\x02\x31\xc8\x0c\x00\x59\xd9\xcd\x6e\x1a\xd7\x69\xb2\x6d\xb3\xe9\x2d\xe9\xf6\xf6\xba\xdd\x36\x9b\xb4\x6e\xb3\x49\xdc\x37\x7d\x9b\x6e\xbb\xbd\x64\xeb\xc6\xa9\xbb\xe9\x2d\x6d\xd3\xf5\x1b\xb7\x69\x37\xed\x6e\xb7\xdd\xed\xb6\xd9\xe4\x4d\x9b\xf7\x33\xe7\xcc\x15\x18\x60\x40\x10\x00\x41\xe1\xf7\x8f\x4d\x61\x66\xce\x9c\x39\x73\xe6\x39\xe7\x7c\x9f\xe7\xf9\x9d\x41\xbe\x7d\x4e\x27\x67\x0c\xd8\xa4\xac\xd7\xe6\x29\xd8\xa4\x0c\x9b\x94\x6d\xf0\x26\x65\x52\xed\x31\xa6\x45\x2b\x51\x6b\x24\x68\xd3\x22\x30\x70\x03\xb4\xd4\x29\x3a\xc1\x26\xa3\xc7\x6c\x17\xc5\x43\x1e\x17\x45\x55\x89\xb7\xc5\x96\x65\xef\x7b\x90\xa6\xc7\xa4\x82\x52\x87\xa3\x5a\xf6\x34\x5f\x2c\xab\xb9\xd2\xaa\x9c\xce\x49\xca\xaa\x85\x52\xc7\x74\x3e\x8a\xb3\x5f\x09\xb3\x1f\xeb\x77\x36\xf9\xdc\xcf\x89\xaa\x38\x66\xb5\xa0\x33\x8a\x2e\xd8\x25\x2e\xf1\x12\xa7\x8d\x12\x63\x63\xc6\x35\x95\x1b\x7d\xfa\x9e\x2a\x66\x0e\x2d\xe6\xac\x19\x9a\x13\xfd\x38\x49\xa7\x79\x3f\x9e\xa0\xa3\x74\x38\x70\x17\x3e\xdf\x0a\x06\x72\x90\x2b\xc1\x7d\xfc\x20\xdb\x5f\x6f\xaf\x3d\xff\xfb\x06\x30\xdc\xf0\xbb\xb6\x3b\xef\xe8\x90\xc5\x5a\xd7\xf6\x9a\xf6\x9b\x97\x6d\xd8\x9b\x4a\xf2\x2c\xfa\x8a\xe9\x47\xd3\xaf\x0a\x93\x0f\x40\xd8\x26\x21\xec\x8b\x75\xd6\x5d\x2d\xb4\x25\x17\x04\xa0\x3d\x4b\x67\x1c\x40\xbb\x8e\xf2\x1a\x44\xb1\x8d\x59\xa8\x7a\xb6\xc8\xdf\x7a\x25\x9f\xbf\xcb\x31\x41\x27\xab\x96\x5b\x6b\xb3\x45\xe3\xbe\x9b\x43\x77\xc4\x12\x61\x11\xd6\x6b\x76\x10\x8b\x30\x2c\xc2\x36\x78\x11\xd6\xd1\x39\x6a\xe0\x86\xd1\x8d\x0d\x11\x8d\x4d\x5c\x3d\x39\x3b\x49\x3a\xcd\x4e\x46\x8f\xdb\xeb\xa8\x47\xdc\x0b\x31\xdf\xeb\x6f\x8b\xb5\xd8\x87\x86\x69\x56\x44\xb6\x08\x4a\x3c\x56\x4e\x08\x41\x2e\x91\x95\x57\x6b\x79\xa6\x66\x32\x8a\xae\x95\xf8\x57\xb5\x5c\xca\x64\xe5\xa2\xce\x3e\x37\xc4\x5e\x1d\xa0\xd7\x8b\x72\xae\x9a\xe5\x84\xff\xa5\x19\x0f\x9a\xcf\x28\x65\x25\x53\x92\x72\x9e\x44\x3d\x3b\xf6\x7a\x41\xcd\xcc\xd8\x25\x26\x79\x89\x2d\x49\xcd\x8b\xc7\x0e\xf0\x0b\x04\xc1\x5e\x12\x95\x72\x0d\xdb\xd5\xb7\x9d\x57\x90\x63\xe7\xf7\x69\x3e\x15\xfc\xe5\x4d\xb2\x63\xe6\x97\x27\x3a\x81\xf5\xf5\x55\x37\xb2\xfb\xdb\x13\x82\x5d\xd7\x77\xfb\x7f\xfa\x77\x31\xe2\x0f\xce\xe5\xc3\x90\x59\x87\xcc\x3a\x64\xd6\x21\xb3\x0e\x99\x75\xc8\xac\x43\x66\x1d\x32\xeb\x90\x59\x87\xcc\x3a\x64\xd6\x21\xb3\x0e\x99\x75\x1d\xce\xac\xdb\x7c\xa4\x03\x39\x77\xc8\xb9\x43\xce\xdd\x26\xca\xb9\xfb\xa1\x21\x3a\x24\xc8\xa4\x9e\xbe\x26\x67\x4a\x5c\xa2\xde\x56\x0d\x73\x43\xca\x82\xa6\xa8\x9a\x52\xbc\x95\xce\x49\xba\x2e\xeb\xec\xb5\x41\xf6\xfb\x03\xc4\x9c\xcb\x6c\x18\x79\xab\x41\x18\x69\x16\x38\x6d\x14\xd8\x22\x0c\x29\x2e\x58\xb4\xeb\x64\xa2\x48\xcf\xad\x80\x1e\xfd\xd1\x63\x23\x68\xf0\x7a\x3a\x98\x4f\x9e\x66\x27\x4d\x3e\x59\xd5\xa5\x2c\x54\xe9\x7e\x1d\xd5\x90\x12\xfc\x11\xfc\x11\xfc\x11\xfc\x11\xfc\x11\xfc\x11\xfc\x11\xfc\x11\xfc\x11\xfc\x11\xfc\x11\xfc\x11\xfc\x11\xbb\x0a\x80\x0e\x82\x0e\x82\x0e\x76\x8a\x0e\xbe\x38\x46\xac\x3a\x87\x8c\xfd\xc8\x18\xfb\xdb\x3d\x4e\xac\xff\x60\xfd\x1d\x42\xed\x38\xc0\xd8\xbd\xc6\x89\x15\x31\xfd\x5d\x4e\xe2\x92\x60\x51\x60\x51\x49\xb0\x28\xb0\x28\xb0\x28\xb0\xa8\x9e\x61\x51\xc9\xae\x61\x51\x2d\xaf\x49\xd3\x2c\x2a\x09\x16\x05\x16\x05\x16\x05\x16\x05\x16\xd5\x79\x16\x95\xec\x69\x74\x94\x04\x3a\x6a\x1f\x3a\x4a\x76\x3b\x3a\x4a\x6e\x42\x74\x94\x7a\x82\xa6\x44\x04\xd3\x04\x1d\xe5\x11\x4c\xfb\x69\x9c\xe2\x81\x09\xcd\x36\x17\x9a\x57\xf4\xe0\x90\xa5\x87\xfc\x43\x96\xb6\xb2\x2d\x22\x5a\xe9\xb1\xe0\x68\xa5\x21\xb6\xb7\x32\x8f\xd9\xae\x83\x27\x77\x39\xfa\x07\xdb\x1c\xe4\xc5\xc4\x78\x13\x91\x5c\x74\xeb\x3e\xf1\x5b\xbb\xf8\x96\xc0\x51\xa7\xe9\x24\x1d\xaf\xd0\x86\x18\xa1\x58\xe3\xed\x0a\x35\x08\xa8\xe2\x34\xa9\x8a\xf3\x5a\x1f\x9d\x10\xdf\xf4\x61\x3a\xc8\xbf\xe9\x38\xad\xa9\xef\x51\x52\x08\xde\x4c\xd2\x31\x47\xf0\x66\xad\x65\x4c\x0b\xa5\xd2\xe3\x34\xe1\x52\x2a\x5d\x6b\x21\x41\x86\xa5\x95\x86\xa3\x8e\x95\x2a\xa8\x7a\x91\x5a\x2f\x37\xf0\x52\x88\xee\x10\x41\xbd\xec\x3f\x84\xd8\x2f\x84\x68\xc0\xf8\x47\x38\x9c\x95\x8b\xae\x25\xc7\xd4\xc2\x9c\x3d\x1b\x8a\xbd\x3e\x2b\x17\xa7\x16\xe6\x4c\x8b\xde\x42\x35\x9d\xaa\x82\x26\xaa\x0a\x9a\x68\xa4\xa0\xd4\xaa\xd0\xa8\x1d\xe7\x1a\xb5\x46\xe7\x9b\xa6\x29\x3a\xd5\x44\x48\xec\xd4\xc2\xdc\x59\xe3\xc5\x35\x32\xc2\xb0\x4f\x11\xed\x33\xf7\x24\x29\x19\x2f\xa3\x68\x56\xd3\xb5\xb3\x32\x67\x67\x9a\x5c\x56\xe4\x9b\x3a\xfb\x76\x8a\x7e\x6d\x80\x76\x78\x4f\x36\x46\x8c\x37\xd8\x23\xc6\x65\xe3\xfc\x4b\xfc\xfc\xd8\x23\xe2\xd7\x29\xcf\xe9\x4b\x09\xd7\x29\x1b\xac\xb0\x56\xf1\xd4\xe5\x44\xdc\x55\xb7\xd4\x73\xa1\xb5\xc9\x96\xd4\x2d\xad\x09\x39\xac\xfa\xe5\x5d\x14\x96\xe2\x1c\xcd\xba\x2c\xc5\x7a\x0a\x6c\x7e\x3a\xc2\x3f\xf4\xeb\x4f\x07\x5b\x95\x13\x6c\xd2\xd2\xbc\xf4\xeb\x6f\xa6\xa1\x71\x57\xca\xa3\xaf\x82\xc1\xbd\xfe\xe0\x9e\xc2\xe0\x5e\x63\x70\x6f\xf9\x20\xf4\x42\x98\xe6\x84\xe5\xcc\xcb\xc5\x9b\xaa\x76\xc3\x93\x59\x52\x4b\xf3\x86\xef\x48\xaf\xeb\x72\xa5\x0a\xe9\xab\x0f\xb0\x7f\xd5\x4f\xaf\x73\x8a\x32\x6c\xea\xa3\x75\xb5\x48\xe7\x44\x51\xb1\x41\xe3\xac\x0b\xf6\x85\x6e\x3d\x39\xf3\x94\xb6\xa8\x8e\xae\x71\x01\xe4\x6a\xa4\x72\x22\x6e\x56\x6c\xbd\x1b\x3b\xbd\x39\xd8\xe0\x1c\x62\x07\x4c\x83\x53\xf5\x9a\x4c\x63\x63\xd5\xc5\x6d\x68\xc2\x7f\xb4\xad\xf2\x65\x0c\x06\x89\x8e\x5a\xef\x23\x66\x9e\xd8\xf1\x57\x92\x9c\xa1\x24\x9d\xae\x18\xfc\xd6\xfc\x4e\xb0\x80\xc2\x02\xaa\x49\x1b\xfb\x13\x7d\xad\xb0\x09\xb3\x62\x9a\x74\x8a\x4e\x38\xd3\xa4\x76\xd8\x96\xc6\x8c\x47\x6d\x1b\x51\xd3\xac\x04\x08\x94\x26\xdf\x77\x57\xa5\x6d\x19\x6f\x58\x4d\xd4\x32\x32\x43\x5c\xc9\xae\xa3\x26\x06\xba\xa1\xbd\x66\xe8\xa0\x1b\x0a\xdd\xd0\x0d\xd6\x0d\xed\xc8\x2c\x33\x50\x2f\xb4\x6d\xf3\xcc\xd4\x41\xda\xcf\xc6\xa3\x71\x5b\x02\xe3\x5e\xb7\x60\xa8\x79\xc5\x6d\x21\x11\xfa\xa5\x41\x0a\x5b\xa1\xb6\x66\xc2\xbd\x9a\x29\xca\xab\x05\x1e\x2a\xc5\x7e\x65\x90\xfd\xdc\x80\xe3\x7f\xb8\xd9\xb0\xde\xe7\x65\xb3\x88\x16\x25\xd8\xc7\x6e\x3a\xba\xdc\xae\xd2\xe7\x15\xbd\x38\xab\x6a\x53\xb9\x9c\x3d\xcc\xb6\x6e\x64\xed\xb5\x1c\xfb\xf3\xc1\x5f\x53\x8c\x0d\x55\xa9\xef\x3a\x6f\xc3\xfb\x09\x21\x82\x19\x11\xcc\xc8\xa6\x47\x04\x33\x22\x98\x11\xc1\xdc\x3b\x11\xcc\xc8\xa6\x47\x36\x3d\x22\x98\x11\xc1\x8c\x08\x66\x44\x30\x77\x45\x04\x33\xb2\xe9\x91\x4d\xbf\x59\x42\xa2\x91\x4d\xdf\x8e\x6c\xfa\xdf\xbe\x4a\xc7\x45\x44\x44\x5a\xd6\xcc\x52\x64\xbd\x52\x6d\xd3\x7d\x4c\xc9\xe6\x79\x40\x84\x30\xd6\xec\xfd\x57\xd9\x5f\xef\xa5\x37\xb8\xcf\xb0\x45\x37\x8f\xd4\x4f\xc2\x9f\x76\xae\x59\x14\xa5\x5e\x12\xa5\xc6\xc6\x79\x52\xbe\xab\x48\x53\x33\xb3\xe6\x15\x5d\xce\xf5\x90\xb1\x0f\xde\x85\x8c\x7d\xf0\x2e\xf0\x2e\xf0\xae\x1e\xe2\x5d\xc8\xd8\x47\xc6\x3e\x78\x17\x78\x17\x78\x17\x78\x57\x57\xf0\x2e\x64\xec\x03\x4f\x21\x63\x7f\xf3\xe0\xa9\xd4\x2d\x7a\x4a\xc4\x43\x5d\xa1\xc7\x79\x3c\xd4\x45\x3a\x4f\x8f\xd5\x4e\xaa\x75\x33\x2c\x93\x42\xc5\x6b\x42\xa3\x96\xa4\xf3\xbf\x3d\x38\x30\xea\x02\x9b\x37\x03\xa3\x7c\x18\x9b\x19\x29\x55\xb3\x92\xd5\x5b\x91\x44\x7f\x8a\x6a\xf0\xb6\xa8\x9d\xcf\x59\x1b\xad\xed\x37\x15\x01\x36\x02\xae\x09\x16\xf6\x24\xbd\x8d\xde\x52\x11\x12\x3e\x47\x67\x5b\xf4\x52\x11\x21\x8e\x54\x98\x26\x53\x61\x7e\x3e\x54\xd3\x1a\xd0\x5b\x85\x19\x5a\xa4\x37\x71\x33\xf4\x18\xb5\xae\xc7\xd2\x55\x91\x3c\xf3\x66\x5a\x72\x92\x67\x5a\x7a\x83\xa7\x45\xd2\xf1\x5b\xe8\x8a\x2b\xe9\xb8\x95\x77\xb0\x8c\x65\xe7\x8d\x61\x50\x7e\x73\xec\x47\x46\x6b\x18\xcb\xb8\x98\xd6\x47\xd2\x6a\xce\x58\xa4\x9b\x83\x6e\x6d\xc3\x79\x5c\x9c\xef\x67\x38\xed\x12\x3a\x65\x42\x2f\xd3\x25\x5a\x70\x9b\xd0\x58\x92\x4e\x37\x11\x25\x3c\xc3\x1f\xe9\x22\xa7\xce\x3a\xf8\x34\xf8\x34\xf8\x34\xf8\x74\x0f\xf1\x69\xcc\x15\x03\xe6\x8a\xdd\x03\xf0\xbf\x1c\xa2\xd7\x42\xec\xd5\x50\xf8\x8b\xf6\xab\xfa\x78\xe8\xb2\x7b\xc9\xac\xe4\x23\xba\x58\xf6\x46\x96\xe5\x15\xe1\xed\xb6\xd1\x92\x33\x64\x99\x5f\x09\x6f\x2c\xd7\xac\x34\xaf\xe6\x47\xf3\x72\x56\xe2\xaf\xc4\x5c\x35\xbb\x67\xaf\x02\x32\xdb\x9d\xc1\x9c\x3c\x28\xab\xab\x72\xc6\x98\x1f\xe7\x6e\x39\x6e\x68\xc7\xb4\x2b\xb9\x11\x73\x31\xcd\x1f\x37\x92\xd5\xa4\x34\xef\x27\x8a\x9a\xb1\x07\x1e\x67\x70\xe0\x1e\x7e\xeb\xbd\x94\x74\xa3\x92\xee\x86\x92\x8c\x2b\xad\x07\x32\x6f\xb2\x22\xbe\x37\xab\x88\xb8\xa8\xa7\x48\x08\xf4\xa9\x63\x94\xf1\x2a\x2c\xf0\x1a\xf8\x41\x02\xf8\x49\xe0\x27\x81\x9f\x04\x7e\x12\xf8\x49\x7a\xda\x4f\xf2\xf9\x10\xbd\x12\x62\x2f\x87\xc2\x2f\xd9\xa3\xed\xf3\xa1\x19\x57\xb2\x6e\x21\x27\x4b\xba\x6c\x7f\xfa\x0b\x9a\x5a\x90\xb2\x7c\x1c\x5e\x50\x73\x4a\xfa\x96\x27\x4a\xca\x7a\xdd\x4e\xb6\xaf\xf1\xc2\x13\xf1\x23\xf1\xc8\xa2\xb0\x23\x62\x90\x2c\xc8\x79\xa3\x9b\x3a\xa3\x88\x1c\x51\xb5\xc2\x35\x29\x6f\x45\x6d\x69\x25\x79\x6c\x45\xca\x59\xb3\xff\xa8\x38\x1a\x8d\xac\x28\x79\x29\xa7\xbc\xc3\x32\xdf\xcb\x72\x44\xca\x70\xa7\x83\x3a\x26\x18\x74\xc6\x99\x5a\x8a\xc2\x07\x75\xe7\x22\x31\xdf\x8e\x47\xce\x28\xdc\x24\xb9\x2a\xae\x6a\xd5\x4f\xe6\x38\x7a\x8a\x62\xba\xcf\x67\x7f\x6a\xf1\x5a\x3c\x7a\x8f\xa8\xcf\x8c\xf5\x20\xde\xf0\xaf\xf7\x0e\xd0\x73\x03\xec\xdd\x03\xe1\x6f\xda\x31\x80\x5f\xe8\xbf\x62\xda\x41\xa3\x8b\x5e\x53\x6f\x46\xb2\x92\xb6\x2c\x65\x3d\x54\xc2\x9e\xa8\xc9\xda\x8a\xaa\xad\x1a\x6d\xe1\x5b\xd3\x8b\x15\x37\xaf\x5d\x51\x3e\xad\xb1\xe6\x24\x05\xf1\x54\x8a\x31\x5f\x48\x2b\x19\x67\x62\xcd\xc7\x46\xa1\x1b\x60\xb5\xae\x31\x38\x99\x23\x87\x35\xfa\xc5\x5d\xcd\x68\xd9\x4d\xdb\xbb\x62\x47\xfe\x79\x6f\x16\x8f\x08\x0a\xc5\x6d\xb1\x7b\xb6\x3a\x28\x9e\x61\xd0\x58\xf3\xf0\xbf\xbc\x1d\x43\x9f\x8c\x0c\x26\xa5\xf4\x8d\xac\xa6\x96\xf2\x19\xe3\x2c\x1e\x72\xc7\x4f\xaa\x68\x38\x31\x59\x31\x67\x40\xde\x42\xac\x27\x58\xb6\x4b\x9a\x8c\x0c\xce\xaa\x9a\xec\x2a\x36\x92\x96\xf4\xb4\x94\x31\x9e\xde\x6c\x1f\x11\x64\xc9\xcb\xd3\xc5\x74\xba\xaa\xc0\x15\xbb\x8c\x78\x74\x47\xa1\xb2\xdf\xb8\xe7\x36\x70\x43\xc2\x0d\xd9\xa3\x6e\xc8\x54\x96\xe6\x05\x4b\x3f\x43\xd3\x9c\xa5\x9f\xa0\x49\x3a\xd6\x04\xbc\x14\x5a\x49\x81\x0e\xbc\x98\x3f\x20\xbe\x97\xed\x10\x5f\xb3\x63\x69\x37\xc2\x99\xd7\x7a\xd1\x8e\xdf\x1d\xa2\x64\x70\x44\xbf\x90\xf3\xa8\x13\xd7\xff\x9d\x43\xec\x5d\x5b\x6a\xa0\xf3\xe7\xfa\x1a\x93\xfa\xa8\xd9\x0e\x2d\x12\xfe\x38\x24\x84\x3f\xd6\xe0\xca\x9c\x57\xba\x3e\x57\xa0\x7b\x35\x40\x36\xe0\xeb\x40\xde\x04\xf2\x26\xa0\x13\x02\xbf\x14\xfc\x52\xf0\x4b\xf5\x8e\x5f\x0a\x3a\x21\xd0\x09\x81\x3f\x00\xfe\x00\xf8\x03\xe0\x0f\xe8\x0a\x7f\x00\x74\x42\xa0\x13\xb2\x59\x08\x28\x74\x42\xda\xa1\x13\xf2\x0f\xc3\x74\x5a\x50\x45\x81\x56\x2a\x79\x62\x8d\xad\x53\xc4\xc9\xe6\xbe\x29\xec\xe3\xc3\xec\x1f\x42\xf4\x7a\xf1\xa3\x4d\x13\xef\xe7\x3b\xa6\x78\x97\xa3\x9c\x5a\xc5\x22\xc6\x11\xfe\xa7\x05\xf6\x1c\xc1\x7c\xfe\x73\x8b\xb7\x47\x79\x1b\x9d\x16\xe4\xed\x18\x1d\xe1\xe4\x2d\x41\x63\x34\x5a\x33\x46\xda\x6c\x09\x2b\x3a\xba\x31\xd2\x16\xb0\x3b\xca\x95\x60\xc6\x76\x90\xed\x37\x19\x9b\xe7\x55\x98\x74\x4d\xd4\xa2\x8a\xa4\x85\x7f\x6d\x5b\x55\xc3\x3f\x68\xed\x8e\xe2\xd7\xf6\xbb\xcd\x83\x9d\x6b\xfe\x24\xdf\x3c\xad\x22\x31\x64\xad\xed\x8f\x90\x3e\xa4\x7f\x34\x99\xfe\xf1\xe3\x7d\x2d\xf8\xfc\xcf\x88\x5c\x8e\x93\x74\xdc\xc9\xe5\xe8\xa8\x15\x29\x94\xda\x68\x45\x62\x1f\xb8\xa7\xca\x8a\xdc\x6d\x3a\xd9\xa5\xbc\x69\x39\xa2\xe2\x87\x0e\x1a\x8e\xf6\xa4\x43\xc0\x92\x04\x58\x12\x84\xe4\x76\x24\x24\x17\xb1\x58\x88\xc5\x42\x2c\x56\xbb\x62\xb1\x52\x9f\xee\xab\x9d\xf5\xd9\xda\x48\x95\x4b\x22\x0d\xf3\x31\x9a\x73\xa5\x61\xae\xaf\x4c\x6b\x3e\x10\xf1\x9f\x0f\x6c\x67\x5b\x45\x33\xb5\x6f\x4a\x90\xfc\xfb\x3b\xab\xa6\x04\x8f\x56\x6d\x8d\xe6\xb7\xc2\x78\xa4\x60\xc7\x27\xb4\x7d\x9a\x80\x7d\xd0\x7a\x6d\x72\x82\x7d\xd0\xb0\x0f\xda\x06\xef\x83\xd6\x09\x9c\x14\xb8\x0d\x5a\xbb\xec\x7e\x6a\x3f\x8d\xb3\x78\x74\xc4\xde\xcf\x6c\x87\x7b\x17\x34\x7e\xd1\x6d\xb1\x07\xda\x57\x06\x29\x62\x6e\xb6\x5f\x28\xe8\xce\x4e\x68\x7a\x51\x2a\xca\x2b\xa5\x9c\x2e\x17\x75\xf6\x6b\x83\xec\x53\x03\xb4\xd5\x38\x63\x0d\x3b\xa1\x2d\x9a\x45\x2c\xca\xad\x0a\x88\x14\x3b\xa1\x4d\x15\x0a\xfa\x52\xc2\x55\x3a\x76\x42\x6b\x61\x14\xe4\xc5\xe0\x0f\x6a\x84\xc5\xac\x0d\xf3\x0b\x05\xdd\xfc\x8e\x5c\xef\x03\x7b\xa1\x21\xc6\x11\x31\x8e\x88\x71\x44\x8c\x23\x62\x1c\x11\xe3\x88\x18\x47\xc4\x38\x22\xc6\x11\x31\x8e\x88\x71\x44\x8c\x23\x62\x1c\x11\xe3\x88\x18\x47\xc4\x38\x6e\x48\x8c\xe3\x87\x9e\xa4\x31\x2f\xea\xab\x11\xd5\x98\x91\xe4\x55\x63\xd2\x55\xd4\xd9\x9f\x3d\xc1\xbe\xb1\xc7\x21\x7f\x83\xf5\x77\x3c\x9b\xe1\x17\x2e\xca\xc5\xd8\x2e\xe3\x44\x01\xea\x1c\xa7\x97\x7d\xb8\xcb\xe1\x1c\xb6\x33\x03\xb2\xc2\x76\x66\x40\x56\x40\x56\x40\x56\x3d\x84\xac\xba\x48\x0d\xb5\x6b\x90\x15\x64\x3a\x81\xac\x80\xac\x80\xac\x80\xac\xb0\x9d\x19\x74\x04\x6f\x1f\xc2\xd4\xf5\x3a\x82\x9b\x72\x3b\xb3\x27\x68\x4a\x04\x35\x4d\xd0\x51\x1e\xd4\xb4\x9f\xc6\x29\x5e\x33\x22\x50\x2a\x14\x74\x9e\x97\x64\x71\xa1\x86\x76\x2c\x3b\x1f\x1c\xa0\x14\x63\x43\xd5\x01\x4a\xf6\x5d\x3c\xe1\x49\x41\x1b\xa0\x45\xff\x74\x9b\x83\xbf\x98\xbd\x01\x99\x43\xba\x1e\x16\xbf\xb5\x9f\x75\x09\x34\x75\x9a\x4e\xd2\xf1\x8a\x40\xeb\x11\x8a\x35\xde\xc6\x08\xad\x46\x06\x69\x93\x19\xa4\xaf\xf5\xd1\x09\xf1\x7d\x1f\xa6\x83\xfc\xfb\x8e\xd3\x9a\xfa\x1e\x25\x45\xfa\xe8\x24\x1d\x73\xd2\x47\xd7\x5a\xc6\xb4\x48\x33\x39\x4e\x13\xae\x34\x93\xb5\x16\xd2\xfc\xb6\x88\x7c\xdb\xad\x16\x1b\xa1\xd8\x6f\x8e\x38\x46\xe6\x21\xdf\x8d\xbb\x1c\x83\x33\x2c\x8e\x0b\x83\xe3\xec\xcd\xd5\x7e\xd3\x83\x5d\xb9\x80\x59\x81\x59\x81\x59\x81\x59\xb1\x2b\x17\x76\xe5\x82\x04\x00\x76\xe5\x02\xee\x07\xee\x07\xee\x07\xee\x07\xee\x6f\x09\xee\x87\x12\x0c\x94\x60\xa0\x04\x83\x5d\xb9\xe0\x4d\x83\x37\xad\x87\x76\xe5\x6a\x2d\x4b\xde\x8c\x22\x14\xcf\x0f\xd3\xe1\x35\x46\xa6\x5b\x9a\xbb\x7f\x3e\xc4\xfe\x2c\xe4\xc0\xf3\x9d\x3e\x62\xbb\xae\xa8\x74\xe3\x68\x87\x3c\x75\xa9\x37\xaf\xd7\x59\xb2\x4e\xa9\xdd\xd6\x76\xab\xf0\x4f\xb9\xfc\xa0\x0f\xfb\x2b\xeb\x3a\x0d\x1d\x31\x4f\x80\x57\xf4\xb6\x81\x72\xf0\x8a\xd6\xf0\x8a\xfe\x70\x57\x78\x45\xd7\xa9\xa8\xdb\x62\x7f\xe6\x37\xef\x76\x05\x4d\x58\x02\xba\xee\xa0\x09\xb7\x0f\x73\xb3\x7a\x2e\x61\x32\x20\xa0\xdb\x0d\xf4\x1c\xd8\x04\xd8\x04\xd8\xa4\x9d\x02\xba\x5d\xaf\x93\x1b\x38\xfc\x07\x0b\xe8\xb6\x76\x06\x90\xfc\xd3\x3b\x9d\x19\xc0\x50\x80\x5e\xae\x33\x2f\x78\xa8\xe0\x08\xfc\xb5\x75\x5a\x00\xc1\xdc\x5e\x9b\x8c\x40\x30\x17\x82\xb9\x1b\x2c\x98\xdb\x76\x22\xd4\x98\x0d\x77\xdb\xe9\x7a\xf6\x3c\x50\x7c\x37\x75\x98\x0e\xb2\xfd\xd1\x71\x1b\x35\xde\xe7\xd6\xc8\xb5\x0b\xba\x2d\x74\x72\xbf\x30\x54\x89\x28\x85\x4e\x6e\x0d\x50\xa9\xc9\xfc\x13\xe2\x1a\x1a\x3f\x3c\xc4\x7e\xd6\xa5\x9e\x5b\x6e\x4c\x3d\xf7\x92\x28\xa1\x75\xe2\xb9\xbb\x6f\xfa\x8d\xad\xce\x6d\xe6\x15\xbd\xdb\x85\x39\xba\x57\x35\xf7\x42\xf0\x97\xb7\x8f\x0d\x57\x7f\x6d\x4e\xf3\x43\x34\x17\x0a\x24\x10\xcd\x45\x68\x3c\x42\xe3\x11\x1a\xdf\xab\xa1\xf1\x10\xcd\x85\x68\x2e\x42\x92\x11\x92\x8c\x90\x64\x84\x24\x77\x45\x48\xf2\x26\x24\x15\x90\xe5\x45\x98\x27\x64\x79\x37\x8f\x68\x0a\xfb\x37\xc3\x94\x12\x64\x31\xa3\xe8\x69\xd5\x30\xab\xe6\x0e\x65\x63\xe6\x66\x64\xf5\x51\xa3\x9c\xcf\x14\x54\x25\x5f\xd4\x73\x4a\x5a\xd6\xd9\x67\x87\xd8\x9f\x0c\xd0\x0e\xbb\x2c\x7b\xd7\xca\x5b\x8d\x71\xc7\x33\x66\x71\x8b\x46\x71\x2d\x42\x8f\xe3\xfc\x82\x19\xab\x4a\xd5\xbb\x61\xba\xef\x09\x0e\xb9\x0e\x0e\xb9\x1c\xcc\x21\x4f\xb1\x13\x26\x87\xac\xec\x6f\xd6\x8e\x78\xee\xb7\xe1\xb3\x33\x1e\xd8\x24\xd8\x24\xd8\x24\xd8\x24\xd8\x24\xd8\x24\xd8\x24\xd8\x24\xd8\x24\xd8\x24\xd8\x24\xd8\x24\xd8\x24\xd8\x24\xd8\x24\xd8\x24\xd8\xe4\x6d\xc3\x26\xff\x66\x88\xe6\xcc\xa8\xc7\x8c\x31\x59\x56\xd4\xbc\x26\x67\x15\x1e\xf5\x6e\x5c\x63\x73\x4a\x13\x51\xae\x96\x8a\x92\x31\x13\xba\x29\x2f\x5f\x53\xd5\x1b\x9e\xb5\xa5\xce\x7e\x6c\x88\x7d\xff\x16\xba\xdf\xb7\xa8\xab\xe5\x44\xf8\xbd\x7d\x8d\x21\xca\xf3\xe6\x5d\xae\x88\xbb\x4c\xbb\xef\xd2\x22\x62\x79\x5c\x04\x4b\xfa\xd5\x74\x29\x51\xef\xfe\xa0\x97\xeb\xa0\x97\xcf\x04\xd3\xcb\xc7\xd9\xa2\x5f\xfc\x72\x9d\xde\x69\x52\xcd\x7a\x2f\x0d\x4c\x13\x4c\x13\x4c\x13\x4c\x13\x4c\x13\x4c\x13\x4c\x13\x4c\x13\x4c\x13\x4c\x13\x4c\x13\x4c\x13\x4c\xb3\xd3\x4c\x13\xc4\x11\xc4\x11\xc4\xb1\x87\x89\xe3\x57\x76\xd0\xb8\xf1\xea\xea\x88\x40\x16\xd4\x8c\x25\xff\x38\x56\xd0\xd4\x67\x6e\xb1\x4f\xee\x60\xdf\x1b\xa2\xad\x69\x55\x93\xaf\x96\x13\xe1\x47\xd3\x6a\x3e\x6f\xd8\xab\xb3\x67\x2e\x3b\xc0\xa6\xa8\x46\xf8\xd9\x46\xc5\x17\xd4\x4c\x2c\x6a\x9e\x35\xad\x6a\xf2\x52\xe2\xac\x5c\x74\xe2\x11\x17\xd4\xcc\x82\x71\xea\xfe\xfe\xd8\x58\x4c\x30\xb3\x23\xb4\x53\x30\xb3\xfb\xe8\x5e\xce\xcc\x5e\x47\x77\x7e\xa2\x6f\x1b\x99\xfd\x21\x90\x7f\xbd\x29\x18\x6d\xc5\xd9\x88\x0f\xda\x8a\x46\x4d\x80\x65\x55\xca\x94\x1f\xa3\xeb\x8f\xf8\x23\x35\x62\xdb\xcc\x27\xa3\xb0\x6f\xab\x2c\x3c\xde\x70\xab\x2c\x94\xda\xdb\x2a\xc1\x8f\xd0\x60\xc3\x05\x35\x96\xbb\x61\xa3\xdf\xef\x6a\x95\x3d\x76\xab\x5c\x5c\xac\xd3\x2c\xbb\xbd\xcd\xa2\xea\x41\xed\x52\xeb\xc9\xeb\x37\xd8\x86\xb6\x4b\xec\xdf\xb8\xda\x65\xd0\x6a\x97\x99\x33\xf3\x67\x2e\x9f\xa9\xdd\x32\x7b\x3c\x2d\x23\x14\xf2\xba\xe0\x4b\x5a\xcb\x83\x37\xd0\xdc\xfb\x7f\xc8\xd5\x36\x43\x56\xdb\x5c\x5c\xb8\x3c\x77\xf1\xc2\x62\xed\xc6\xd9\xeb\x69\x1c\xf3\xde\xb7\xdf\x17\x35\xe1\xfb\x45\x9d\x3b\x33\x35\xd3\xe8\x17\x75\x4e\x96\x32\xb7\x5f\xaf\x49\x7e\xc8\xd5\x2e\x7b\x6d\x4b\x33\x75\x79\xfa\x5c\xed\x86\x79\xd4\x6b\x6a\x8c\x11\xb2\xad\xb6\x66\x43\x5a\x26\x75\x9c\x26\xd8\xd1\xe8\x61\x3b\x78\xe1\x41\xb7\x74\x4c\x45\xe1\x1b\x21\x20\x23\xd1\x55\xf6\x64\xf8\x6d\xd6\xbc\x69\x6a\x41\x2a\x8a\x25\xf2\x35\x39\xf2\xf8\xa5\xf9\x88\x71\x5b\x3e\x57\xd4\x65\x7b\xb2\x97\x2e\x69\x9a\xb1\x2a\x12\xaf\xd3\x5a\x26\x18\xef\x57\xcd\xc4\xa3\xbc\xa6\x6e\x49\x99\x3f\x1c\xa4\xfb\xad\xa9\x8e\x70\x9f\x0a\x6e\xbd\x2a\x15\x74\xf6\xe9\x41\xf6\xb1\x01\xa7\xf3\x94\x1a\xf3\x8c\x0a\xaf\xd6\x79\xa9\xd0\x22\x37\xe8\x10\xbf\x40\xf4\x44\xbb\xec\x79\x45\x2f\xce\xaa\xda\x54\x2e\x67\xf7\x4b\x1d\x2e\xcf\x26\x5d\x9e\x8f\x05\x7f\x7d\x43\x6c\x6f\xe5\xd7\x67\xbf\x0b\xa8\xc6\xc0\x8b\x09\x2f\x26\xbc\x98\xf0\x62\xc2\x8b\x09\x2f\x26\xbc\x98\xf0\x62\xc2\x8b\x09\x2f\x26\xbc\x98\xf0\x62\xc2\x8b\x09\x2f\x26\xbc\x98\xf0\x62\x6e\x88\x17\xf3\x1f\x9e\xa0\x3d\x22\x6f\x42\x2f\xaa\x9a\x94\x95\x2b\x15\x5d\xd2\xba\x92\x57\x33\xb2\xce\x3e\xf7\x04\xfb\xee\xbd\x74\xb7\x79\x9a\x2d\xd6\xb2\x47\xcc\x0d\x35\x73\x54\xb3\xcc\x9e\xba\x12\xb9\xa1\xe4\x33\x91\xe9\xc5\xb9\x0b\x6a\x46\x8e\x3d\x60\x9c\xb6\x28\xae\x35\x55\x55\xcc\x43\x5d\x0e\xe4\x92\x40\x55\x40\x55\x49\xa0\x2a\xa0\x2a\xa0\x2a\xa0\xaa\x9e\x41\x55\xc9\xae\x41\x55\x2d\xaf\x49\xd3\xa8\x2a\x09\x54\x05\x54\x05\x54\x05\x54\x05\x54\xd5\x79\x54\x95\xec\x69\xb2\x94\x04\x59\x6a\x1f\x59\x4a\x76\x3b\x59\x4a\x6e\x42\xb2\x94\x5a\xa6\xb3\x22\x90\xe9\x34\x9d\xe4\x81\x4c\x47\xe9\x30\x1d\xac\xb9\x87\x9c\x85\x9f\x4c\xae\x14\x37\xe9\xd0\xbc\xa2\x07\xc7\x2f\xbd\x35\x38\x34\xe9\x08\x3b\x64\x06\x24\x89\x72\xab\xd5\x62\xcd\xc8\x25\x2f\x06\xa3\xeb\x0f\xf9\xc7\x46\x6d\x65\x5b\x78\x58\x54\xf4\x7d\xdb\xab\x99\xd8\x3d\x62\x30\x8a\x48\x36\xfe\x7a\x50\xfc\xd2\x5e\x00\x26\x78\xd5\x2c\xcd\x50\xb2\x62\xbf\xce\xfd\x34\xbe\xd6\x86\xc7\x3e\x9d\x41\xfb\x74\x7e\x30\x44\xdf\x1b\x62\x1f\x08\x85\xbf\xcb\x6e\xa0\xbf\xea\xeb\x9d\x7d\x3a\x6b\x6e\x87\x99\xfa\xbb\x3e\x9a\x16\x9f\xfe\x71\x9a\xe0\x9f\xfe\x41\x6a\xa2\x07\xd2\x39\xa3\x90\x04\x0f\x80\xdc\x3a\x2d\xa6\x77\x4d\x96\x34\x27\x36\x32\x4e\xd2\x69\xd7\x46\xc6\xcd\x15\x15\x64\x8b\x6a\xdb\x8b\x82\xaa\x17\xa9\x41\x5b\xe5\x6f\x8c\x02\x2c\x58\xec\x0f\x47\xaa\x6d\xd1\x4e\x73\xb7\x6a\xd7\xd6\xdf\xea\x8a\x6d\x97\xf6\x88\xa3\x15\x76\xc9\x3e\xb5\x3d\x16\xea\x32\x5d\xa2\x05\xb7\x85\x8a\x25\xe9\x74\x13\x11\xae\x22\x85\xc3\x0c\xbd\x06\xa2\x05\xa2\x05\xa2\x05\xa2\xed\x21\x44\x8b\x19\x5a\xc0\x0c\xad\x7b\x18\xf6\x97\x43\xf4\x5a\x88\xbd\x1a\x0a\x7f\xd1\x7e\x55\x1f\x0f\x5d\x76\xaf\x1a\x95\x7c\x44\x17\x2b\xbf\xc8\xb2\xbc\x22\x1c\xbe\x36\x5d\x71\x86\x2c\xf3\x2b\xe1\x8d\xe5\x9a\x0b\xe6\xd5\xfc\x68\x5e\xce\x4a\xfc\x95\x98\x0b\x47\xf7\x9c\x51\x70\x56\xbb\x33\x98\x73\x02\x65\x75\x55\xce\x18\xb3\xd2\xdc\x2d\xc7\x13\xeb\x98\x76\x25\x37\x62\xae\x27\xf9\xe3\x46\xb2\x9a\x94\xe6\xfd\x44\x51\x33\xf6\xc0\xe3\x0c\x0e\xdc\xc9\x6d\xbd\x97\x92\x6e\x54\xd2\xdd\x50\x92\x71\xa5\xf5\x40\xe6\x4d\x56\xc4\xf7\x66\x15\x11\x17\xf5\x14\x9b\xa1\xfb\xd4\x31\xca\x78\x15\x16\x78\x0d\xfc\xd6\xc9\x70\x15\xc0\x55\x00\x57\x01\x5c\x05\x70\x15\xf4\xb4\xab\xe0\xf3\x21\x7a\x25\xc4\x5e\x0e\x85\x5f\xb2\x47\xdb\xe7\x43\x33\xae\x34\xd3\x42\x4e\x96\x74\xd9\xfe\xf4\x17\x34\xb5\x20\x65\xf9\x38\xbc\xa0\xe6\x94\xf4\x2d\x4f\xa0\x90\xf5\xba\x9d\x3c\x55\xe3\x85\x27\xe2\x47\xe2\x91\x45\x61\x47\xc4\x20\x59\x90\xf3\x46\x37\x75\x46\x11\x39\xa2\x6a\x85\x6b\x52\xde\x0a\x5c\xd2\x4a\xf2\xd8\x8a\x94\xb3\x66\xff\x51\x71\x34\x1a\x59\x51\xf2\x52\x4e\x79\x87\x65\xbe\x97\xe5\x88\x94\xe1\xdc\x5d\x1d\x13\x18\x36\xe3\x4c\x2d\x45\xe1\x83\xba\x73\x91\x98\x6f\xc7\x23\x67\x14\x6e\x92\x5c\x15\x57\xb5\xea\x27\x73\x7c\x1d\x45\x31\xdd\xe7\xb3\x3f\xb5\x78\x2d\x1e\xbd\x47\xd4\x67\xc6\x7a\x10\x6f\x04\xd4\x7b\x07\xe8\xb9\x01\xf6\xee\x81\xf0\x37\xed\x30\xb8\x2f\xf4\x5f\x31\xed\xa0\xd1\x45\xaf\xa9\x37\x23\x59\x49\x5b\x96\xb2\x1e\xd8\x60\x4f\xd4\x64\x6d\x45\xd5\x56\x8d\xb6\xf0\xad\xe9\xc5\x8a\x9b\xd7\xae\x28\x9f\xd6\x58\x73\x92\x82\x78\x2a\xc5\x98\x2f\xa4\x95\x8c\x33\xb1\xe6\x63\x23\x5f\xe6\xd8\xad\x6b\x0c\x4e\xe6\xc8\x61\x8d\x7e\x71\x57\x33\x5a\x76\xd3\x76\x30\xd8\xc1\x6f\xde\x9b\xc5\x23\x02\x21\x71\x5b\xec\x9e\xad\x0e\x8a\x67\x18\x34\xd6\x3c\xfc\x2f\x6f\xc7\xd0\x27\x23\x83\x49\x29\x7d\x23\xab\xa9\xa5\x7c\xc6\x38\x8b\x47\x9d\xf1\x93\x2a\x1a\x4e\x4c\x56\xcc\x19\x90\xb7\x10\xeb\x09\x96\xed\x92\x26\x23\x83\xb3\xaa\x26\xbb\x8a\x8d\xa4\x25\x3d\x2d\x65\x8c\xa7\x37\xdb\x47\xc4\x19\xf2\xf2\x74\x31\x9d\xae\x2a\x70\xc5\x2e\x23\x1e\xdd\x51\xa8\xec\x37\xee\xb9\x0d\x3c\x71\xf0\xc4\xf5\xa8\x27\x2e\x95\xa5\x79\x81\xb6\xcf\xd0\x34\x47\xdb\x27\x68\x92\x8e\x35\x01\x2f\x17\x8b\x52\xb1\xa4\xb7\xc8\xb5\xd5\xa0\x33\xcb\xcb\x8f\xaf\xc7\xfc\x51\xf5\xbd\x6c\x87\xb0\x14\x8e\x15\xa7\x96\x27\x8a\xb0\xbf\x8d\xd1\xf1\x20\xf9\x2c\x93\x48\xe4\x8b\x65\x35\x57\x5a\x95\xd3\x39\x49\x59\xb5\x04\xb5\xd8\xc7\x62\xec\xbb\xfb\x5d\x52\x37\x9a\x2c\x65\x2a\x56\xa4\x0b\x76\x01\x4b\xbc\x80\x69\xa3\x80\xd8\xb0\x71\xa6\x50\x8a\x70\xc9\x95\xf8\x9d\xda\x5a\xee\x9d\xca\x08\x3f\xc4\x38\xf7\x43\x18\x7d\x67\x82\x8e\xd2\xe1\x9a\x1e\x08\xe3\xc9\x8c\x9e\xe2\x5b\xb5\xc0\x8e\xb3\xcb\xff\xe5\xde\xc1\x06\xb2\x72\x91\xae\x5f\x09\xee\x57\x07\xd9\xfe\x2a\x2d\x15\xdf\xaa\xb8\x95\x1d\xc2\x7f\xbb\xcd\x79\x27\xfb\x34\xb9\x90\x93\xd2\x72\x43\xaf\x65\xc4\x3c\x79\x03\xde\x4c\x72\x9e\x52\x74\xae\xc2\x67\xda\xf4\xab\x01\x97\x83\xe7\xb4\x49\xcf\xe9\x8b\x7d\xad\x34\x11\x17\x84\xff\xf4\x2c\x9d\x71\xfc\xa7\xed\x34\x39\x8d\xd9\x94\xba\xca\x81\xbe\xf7\xad\x63\xcb\x0a\xa5\x22\xc5\x7e\xe8\x1e\xc7\xe4\x3c\x64\x4e\x9a\xa5\x1a\x56\x66\x9f\x38\xbe\x11\x46\xa6\x3d\x6e\x4f\x18\x9b\x00\x63\x03\xf4\xde\x11\xf4\x0e\xe6\x02\xe6\x02\xe6\xd2\x2e\xe6\x92\xfa\xb9\x96\xce\x0c\x2e\x8a\x78\xa8\x73\x34\xeb\x8a\x87\x6a\xe7\xd4\x20\xe2\x3f\x82\x6f\x67\x5b\x45\x03\xb5\x6f\x41\x92\xfc\xc0\x5d\xce\xec\xe0\x60\x41\xd2\x8a\x0a\x8f\x41\x11\xf4\xba\xa1\x95\x49\xac\xe0\x68\x0b\x76\x6c\xca\xf0\x60\x65\x41\xa3\xbc\x16\xfb\x8c\x3f\x27\x76\xba\x0f\xae\xca\x5a\x56\x76\x1f\xdd\xeb\x3e\xca\xf7\x18\x93\xb3\x4a\x7a\xb4\xea\x3c\x4f\x29\xc6\xdf\xb7\xcc\xa3\x46\x2d\x93\x17\x68\x9e\x52\x15\x6b\xa2\x09\x3a\xda\xc4\x84\x85\x2b\x83\x62\xa2\x12\x34\x51\xf9\x58\x3f\xbd\xd0\xcf\x3e\xda\x1f\xfe\xb0\x6d\xed\x9f\xed\xef\x9d\x55\x51\x85\x7f\xd1\x68\x67\x1e\xb7\xc5\x7d\xe0\x46\xe7\x74\x12\xec\x87\x6a\x74\xdc\x61\x3e\x68\x89\x30\x3f\x29\xc7\xaf\x34\x66\x67\xe2\x6a\x7e\x06\x9f\x29\xe9\x91\xa1\x94\xae\xe6\x17\x44\xa6\xf9\x79\xe3\xb3\x30\xff\x5e\xb4\x3e\x16\xe7\xc7\xe1\xda\xcb\xb5\xe4\x2b\x7d\xf4\x72\x1f\x7b\xa9\x2f\xfc\x19\x9b\x56\x7e\xa4\x6f\x56\xd5\xd2\x7c\x12\x97\x55\x79\xb3\xab\x91\xe8\x8a\xf1\x53\x34\x32\xe5\x79\x0a\x8e\x79\xc5\x7c\xab\xa4\x3b\x01\x5b\xa3\x52\x9a\x3f\x38\x0f\x83\xcb\x29\x69\x73\x3c\x95\x73\x19\x3d\xa2\xde\x34\xdb\x54\x84\x89\x15\x64\xb5\x90\x93\xe3\x11\x71\x47\x2e\x0d\x60\xbd\x4b\x8e\x5b\x7d\x1b\xc0\xbe\x7d\x74\x0b\xaf\x96\x27\x66\xbf\xa3\x84\xaa\x5d\x16\xff\xfa\xc3\xfe\x83\xcd\x36\x76\x07\x6f\x02\x4a\x25\xe9\x34\x3b\x19\x3d\x6e\x8b\xf2\x3e\xe2\x91\xf4\xf5\xbb\xc1\x46\x08\xfb\xb6\x1c\xb9\xbe\xbf\x9f\x1e\x35\xf7\x48\x2d\x19\x25\x14\xcd\x4f\xa8\x52\xf2\x83\xfd\x79\x88\xfd\x69\x88\xbe\xc5\x7b\x96\x1d\x51\xfc\xc6\xac\x5c\xf4\x46\x23\x88\x1c\x93\xd8\xa3\x59\xb9\x38\xe5\xb9\xc6\x0c\x28\x9e\x5a\x98\xb3\xa2\x31\x5b\xa7\xbf\x3b\x51\x55\xd0\x44\x55\x41\x13\x8d\x14\x94\x2a\xd0\xe3\xa2\xd7\x1b\x43\x9e\xd1\xeb\x79\x0a\x45\x13\x03\x9d\xeb\x39\x1b\xc9\x5b\x61\xdf\xd1\x4f\xbb\xcd\x17\x52\x50\xe4\x67\x8a\x72\x9e\xbb\x51\xaa\xde\xc7\x9f\x85\xd8\x97\x42\x74\x9f\xe7\xa4\xe0\xd7\xb1\xdb\x78\x1d\xee\x4b\x36\xcb\xdb\xa8\xa9\x12\xde\x9e\xd7\x64\xbd\x8d\x2f\x84\xe8\x0d\xe2\x6d\x08\xe9\x16\xeb\x35\xb0\x5f\x0a\xb1\x4f\x87\xe8\x0e\xf1\x6b\x38\x9c\xe5\x4b\x1a\x63\x61\x65\x7b\x99\xa4\x08\x37\x53\xb1\x1d\x59\xb9\xc8\x25\x97\xf5\xa9\x85\xb9\xb3\xc6\x4f\x5d\xd8\xc0\x8a\x00\x83\xe3\x1c\x0c\x1a\xed\x78\x8a\x4e\xd0\x64\x73\xed\xc8\x9f\x31\xb0\x9f\xff\xfc\x90\xf0\xf5\xe8\x63\x2b\x39\xf5\xa6\x31\x79\xd0\xd4\x5c\xdc\x96\x65\xa9\xec\xef\x42\x62\xdc\x38\x55\x4f\x5f\x93\x57\x25\x9d\x7d\x75\x90\xfd\xb7\x01\xda\xe9\xba\x7a\xca\xba\xd8\xfe\x0e\xca\x8d\x09\x8f\xcf\xe6\xd4\x9b\x8b\xbc\xd8\x16\x29\x8f\xc7\xf9\x05\xb3\x3e\x55\x33\xbf\x37\xe7\x8e\xd8\x72\xd9\x7f\x26\x70\x23\x78\x26\x70\x8e\xcd\x9a\x33\x81\x7a\x5d\xc8\x9c\x23\x38\x2d\x5e\x9d\x24\xd3\x90\xd8\x39\xf4\xc9\x21\xfa\x04\x7d\x72\x64\x14\x21\xa3\x08\x19\x45\x3d\x94\x51\x04\x7d\x72\xe8\x93\x23\x93\x03\x99\x1c\xc8\xe4\x40\x26\x47\x57\x64\x72\x40\x9f\x1c\xfa\xe4\x9b\x25\x76\x1d\xfa\xe4\xed\xd0\x27\xff\x93\xbd\x74\xb7\x15\x26\xae\xcb\x69\x4d\x2e\xea\xec\xa5\xbd\xec\x17\x5d\x91\xdf\x8f\xd6\x57\x20\x5f\xe4\x57\xc5\x78\xd3\x0b\x1f\xbe\xf8\x65\xb3\x6d\x0c\xf8\x16\x3a\x29\xc0\xdc\x11\x3a\xc4\xc1\xdc\x18\x8d\xd2\xbe\x40\xdf\x9c\x78\xd8\x86\x64\xb4\xce\x05\x63\xb8\x3d\x6c\x77\xa5\x43\x4e\xdc\xc0\xeb\x81\x0b\x10\xcd\x02\x5e\x03\x5e\x03\x5e\x03\x5e\x03\x5e\x03\x5e\x03\x5e\x03\x5e\x03\x5e\x03\x5e\x03\x5e\x03\x5e\x03\x5e\x03\x5e\x03\x5e\x03\x5e\x03\x5e\xeb\x14\x5e\xfb\xb6\x21\x1a\xb4\x22\x50\x0b\xfa\x58\xd9\x0a\xbe\x33\x03\xac\x72\x86\xd5\x2d\x2b\xfc\x0d\xb2\x3f\x1c\x64\x9f\x1b\xa0\xad\xc6\x89\x57\xcb\x89\xf0\xbf\x68\x2c\xde\x6e\xda\x2e\xe9\x92\x59\x52\x8b\xe2\xee\x12\xfc\x82\xa9\x42\x41\x5f\x4a\x54\xdf\x64\x5e\xd1\x37\x1d\xe1\xbb\x51\x3b\xf2\xb5\xe5\x31\x79\x16\xf3\x5b\x0a\x66\x7e\x07\x58\xc2\x27\x67\xdb\xe8\x06\x96\xa0\x48\x55\xeb\x23\xca\x0e\x18\x10\x18\x10\x18\x10\x18\x10\x18\x10\x18\x10\x18\x10\x18\x10\x18\x10\x18\x10\x18\x10\x18\x10\x18\x10\x18\x10\x18\x10\x18\xb0\xbb\x30\xe0\xf3\x4f\xd1\x31\x33\xf5\xd9\xce\x16\xb7\xd3\x71\x6b\xa8\xb3\x2a\xf9\xac\x26\xeb\xba\xac\xb3\x3f\x7d\x92\x3d\xbf\x97\x98\x4f\x6e\xfa\x9e\xfa\xa1\x79\x73\xa2\x88\xd8\x5e\xe3\xb4\x33\x95\x79\xea\x8e\xd4\x8e\x79\x5e\x97\xf3\xbb\x24\x70\x16\x70\x16\xb6\xa1\x03\xce\x02\xce\x02\xce\xea\x1d\x9c\xd5\x45\xbb\xac\x75\x0d\xce\xc2\xf6\x5f\xc0\x59\xc0\x59\xc0\x59\xc0\x59\x1b\xb1\xfd\x57\x4f\xd3\x27\xec\x4f\xd4\xcb\xfb\x13\x25\x37\x21\x7d\x4a\xd5\xde\x54\x9f\x52\x22\xee\x69\x9a\xa6\x78\xdc\xd3\x24\x1d\xa3\x23\x35\x53\x1e\x5d\x0a\x8a\xd6\xae\xfd\x26\x39\x72\xc9\xfc\x05\xe5\x26\x36\x18\x11\x65\x46\x41\x39\xb7\x34\x63\xa1\xcc\x1b\x56\xeb\x8e\x45\x7f\x7c\xbb\x2f\x22\xdb\x21\x86\xa3\x88\x94\xb7\x71\xd8\x90\xf8\xa9\x83\x40\x4c\xf0\xab\x39\x3a\x4b\x67\x2a\x14\x9f\x0f\xd1\x81\x26\xda\x1b\x62\xcf\xd8\x02\xa7\xc9\x2d\x70\xbe\xd1\x47\xb3\xe2\xa3\x3f\x45\x27\xf8\x47\x7f\x84\x9a\xeb\x84\xf4\x98\xd8\xff\x66\x86\x92\xce\xfe\x37\x4d\x17\x36\x2f\x24\xf3\xcf\xd0\xb4\x4b\x32\xbf\xe9\xd2\x82\x52\xb1\x6b\x1b\xa9\x82\xda\x46\x23\x15\xfb\xf2\x88\xaf\x91\xda\x69\x6e\x8b\xe0\xda\x63\x42\x5d\xb1\xed\xd5\x7e\x71\xb4\xca\x5e\x4d\xdb\x67\xb7\xdb\x72\xb5\x67\x6b\x1d\xa0\x5c\xa0\x5c\xa0\x5c\xa0\xdc\xde\x41\xb9\x98\xb6\x05\x4c\xdb\xba\x87\x75\x63\x5b\xb3\x8e\x6c\x6b\x06\x97\x02\x5c\x0a\x70\x29\xc0\xa5\x00\x97\x42\x4f\xbb\x14\xb0\xbb\x25\x76\xb7\xc4\xee\x96\xed\xda\xdd\x12\x1e\x3b\x78\xec\x7a\xd5\x63\x97\xca\x0a\xb4\x3c\xce\xd1\x72\xe8\xe2\x63\xec\x04\x4d\xd2\xb1\x26\xe0\xe5\x62\x51\x2a\x96\x82\xd1\x72\xcc\x1f\x2d\xdf\xcb\x76\x88\xaf\xd9\xb1\xb4\x6d\xc3\xcc\x9b\x71\xf3\xbc\xd7\x86\x68\x5c\x84\xc8\xe7\xd5\x8c\xec\x6c\x59\x65\x0a\x66\x68\xa5\xbc\xf1\x8e\xd3\x39\x49\xd7\xad\x40\xf9\x77\xb2\x17\x86\xd8\x9f\x0f\xd0\x56\xe3\x8a\xab\xe5\x44\xf8\x93\x7d\x66\x27\x73\x09\x65\xe4\xad\x49\x8c\x15\x14\x7f\x49\x94\x34\x6d\x94\xd4\x12\xa9\x8c\x11\x67\xd5\xc2\x57\x66\xe6\x1c\xd7\x58\xf8\x38\xcb\xb1\x41\x0f\x68\x70\x95\x1d\x8f\xbd\x91\xdf\xee\x82\x9a\x91\x97\x12\xee\xca\x75\x79\x38\xfe\x86\xec\x64\x55\x7b\xeb\x49\x5e\xb1\x35\x6e\x73\xec\xea\x69\xe6\x27\xe5\x6e\x7f\x8f\xe4\x2e\xb4\x34\x90\x7c\x00\x2d\x0d\x78\xac\xe0\xb1\x82\xc7\xaa\x87\x3c\x56\xd0\xd2\x80\x96\x06\x3c\x05\xf0\x14\xc0\x53\x00\x4f\x41\x57\x78\x0a\x52\xc7\xe8\x08\x3b\x14\x3d\x60\xa3\x8d\xfb\x39\xd3\x30\x6b\xe1\x5e\xbe\x45\x07\x8c\x23\xed\x64\x16\x90\xe1\x00\x56\x85\x0c\xc7\xe6\x49\x84\x60\x7f\x1c\xa2\xd7\x7b\xd5\x78\xd9\x6f\x84\xd8\xaf\x87\x1c\xd1\xdd\x37\x66\xe5\xa2\x77\x34\x16\x95\x8d\xdd\x97\x95\x8b\x42\x0b\x77\x6a\x61\xce\x5a\xf6\xb5\x30\xcc\xb6\xaa\xa0\x89\xaa\x82\x26\x1a\x29\x28\x55\xa0\xc7\x05\x95\xbb\x40\xf3\x9c\xca\xcd\xd2\x0c\x25\x9b\xa0\x72\xae\xe7\x6c\x64\x77\x2b\xf6\x6b\x43\x34\x6a\xed\x24\x26\x98\x6d\x0d\x61\x13\xc3\xa6\x29\x69\x59\x67\xdf\x3d\xc4\xfe\xed\x80\xb3\xcf\x98\xde\x98\xe0\xf1\xa2\xb8\xbc\x45\x2a\xc7\x62\x6a\x23\xf6\x2d\x73\xa2\xa8\xcd\x7b\x18\xcf\x0d\x0a\x5b\x4d\x61\x1b\x11\x1f\xbe\x3e\x17\x4c\x62\xf7\xb2\x47\xad\x3d\xce\x78\x7b\x93\x8f\x0e\x72\x34\x0a\xf6\x0a\xf6\x0a\xf6\x0a\xf6\x0a\xf6\x0a\xf6\x0a\xf6\x0a\xf6\x0a\xf6\x0a\xf6\x0a\xf6\x0a\xf6\x0a\xf6\xda\x69\xf6\xba\xf9\xc2\xca\x80\x68\x81\x68\x81\x68\x37\x11\xa2\x7d\x75\x88\xf6\x7a\x29\xa2\x95\xe5\x9a\x2f\x96\xd5\x5c\x69\xd5\x09\xfe\xfc\xe4\x10\xfb\x8a\x0b\x1f\x7e\xaa\x91\xe0\xcf\x05\xbb\xb0\x25\x5e\x58\x57\x04\x80\x3e\xe8\x62\x90\x95\x15\x04\x7e\x6c\x22\x08\xf4\x52\x30\x7a\x1c\x63\xa3\x0e\x60\x14\x10\xb2\xb2\xe9\x11\xff\x09\x06\x09\x06\x09\x06\x09\x06\x09\x06\x09\x06\x09\x06\x09\x06\x09\x06\x09\x06\x09\x06\x09\x06\xb9\x91\x0c\xf2\x04\x4d\xb2\x63\xd1\x23\x36\x83\xdc\xe9\x8e\xff\xac\x5c\xc2\x21\x06\x14\x80\x11\x80\x11\x80\xd1\x05\x18\xff\x57\x88\x76\x59\x31\xa0\x8a\x26\x67\x15\xe3\x8b\xe4\x74\xc4\x4c\x39\x67\xff\x39\xc4\x7e\x2b\x44\x77\x57\x1c\x0e\x87\xb3\x5c\x8c\x64\x45\xd5\x56\xed\x06\x95\x22\x1c\x22\xc5\xc2\x3c\x3a\xd4\x73\xfe\xd4\xc2\xdc\x59\xe3\x58\x17\x86\x88\x2a\x74\x41\x30\xbb\xb3\x74\x86\x33\xbb\x53\x74\x82\x26\x9b\x0b\x11\xe5\xcf\x18\x18\x1b\xfa\xa5\x61\x3a\x2b\x1a\x3d\xad\xaa\x5a\x46\xc9\x7b\x5a\x3c\x28\x60\x94\x4b\xff\xd8\xd8\xf7\xfb\x86\xd9\xff\x1a\xa0\xbb\xdd\x05\x5d\x2d\x27\xc2\x2f\x34\x82\x7f\xe7\x8d\x92\xba\x82\xf9\x46\x4d\xe6\xeb\x3c\x84\x3b\xfe\x94\xd7\x13\xe8\xb7\x09\xf4\xfb\xe6\x60\xf4\x7b\x88\x1d\x30\xd1\xaf\x4f\x67\x34\x59\x30\x7f\x01\x00\xc0\x00\xc0\x00\xc0\x00\xc0\x00\xc0\x00\xc0\x00\xc0\x00\xc0\x00\xc0\x00\xc0\x00\xc0\x00\xc0\x00\xc0\x1b\x09\x80\xf7\xd3\x38\x8b\x47\x47\x6c\x00\xbc\xc3\x0d\x80\xf9\xba\xad\x9a\xfa\x22\x70\x15\x5c\x19\x5c\x19\x5c\xb9\x8d\x5c\xf9\xe5\xa7\x68\x52\x20\x4e\x21\xe0\x3c\x56\x4e\xd4\xe2\x99\x05\x35\x93\x51\x74\xad\xc4\x49\xc1\x72\x29\x93\x95\x8b\x3a\xfb\x8e\xa7\xd8\x0b\x7b\x69\xbb\xb8\xf8\x6a\x39\x11\x4e\x88\x19\xac\x66\x8e\xbd\x96\x71\xb6\x23\x59\xd5\xcc\x8c\x5d\x48\x92\x17\x12\x1b\x36\x2e\x11\xea\xcf\x6e\x9a\xe8\x73\x6a\x97\xb3\xc5\x24\x60\x1b\x60\x1b\xf6\x87\x03\x6c\x03\x6c\x03\x6c\xeb\x1d\xd8\xd6\x45\xdb\x9f\x75\x0d\x6c\xc3\xbe\x5c\x80\x6d\x80\x6d\x80\x6d\x80\x6d\x1b\xb1\x2f\x57\x4f\x53\x28\x6c\x1c\xd4\xcb\x1b\x07\x25\x37\x21\x85\x4a\x5d\xa3\xf3\x22\x26\x6b\x96\x66\x78\x4c\xd6\x49\x3a\x4e\x13\x35\x77\xd0\x37\xb7\x47\x2b\x27\xe2\x3e\x8c\xa8\x11\xd5\xc7\x3a\x1b\xe9\x0b\x15\xc0\xb7\x04\xc7\x63\x1d\x66\x07\x7d\x74\xff\x44\xd5\xac\xd4\xdc\xea\xda\x51\xf4\xff\xde\xee\xc6\x65\x3b\xc5\x58\x14\x91\x7c\xc9\xd8\x3e\x71\xb4\xb3\x6c\x4c\xa0\xac\x79\x4a\xd1\x39\xf7\xde\xf9\x91\xbe\xd8\x51\x3a\xdc\xdc\x3b\xc1\x0e\xd2\x41\x3b\x48\x7f\x30\x44\xdf\x1b\x62\x1f\x08\x85\xbf\xcb\x6e\xa0\xbf\xea\xe3\x73\xad\xf3\x52\x5e\xca\xca\x9a\x58\x35\x08\xcf\x8f\xae\xab\x69\x85\x0f\x84\xf6\xfc\x5c\xe2\x0b\x1c\x55\x8b\x18\xf3\xa4\xe2\x2d\x7b\x66\xb0\x2a\xdd\x30\xea\x5f\xbc\x26\xeb\xb2\xf5\xed\xbb\xf7\x6b\xb6\xf6\x75\xe6\x56\x87\xcf\xc5\x55\x2d\x92\xd8\x7f\xd4\x38\x57\x93\xd2\x1c\xf1\x18\xcb\x58\xf1\xa5\xf3\xf9\xb2\x31\xb5\x91\x94\xbc\x70\x03\xf1\xf9\xa8\x73\x2e\x5f\xf6\x9a\x00\xca\x58\x45\x59\xd6\x3b\xab\xe6\xa4\x7c\x36\xae\x6a\xd9\xb1\xc2\x8d\xec\x58\x29\xaf\xa4\xd5\x8c\x3c\xb6\x7b\x4e\x5f\x30\x4a\x89\x47\xef\x72\x3f\xab\x9b\xfa\x3f\x17\xa2\x39\x61\x1b\x92\x74\x9a\xdb\x86\x09\x6a\xba\x1f\x8a\x08\xe2\x04\x8f\x20\xde\x3a\x2d\x66\x81\xeb\x2a\xef\xa2\x51\xde\x7e\x76\x8e\x66\x69\x9b\xd8\xac\x71\x9d\x05\x36\x6f\xb8\x0a\x6a\xe3\x86\xab\x51\x63\xe5\x36\x70\xb1\xbf\x1e\x71\x1b\xae\x21\x73\xd3\x48\xd7\x0e\x9c\xea\x8a\xaf\x11\x3b\x24\xce\xb4\x8c\xd8\xb4\x7d\x41\x27\xcd\xd9\x65\xba\x44\x0b\x6e\x73\x16\x4b\xd2\xe9\x26\x82\x7e\x67\xf8\xb3\x5c\xe4\x15\xd5\x81\x7a\x81\x7a\x81\x7a\x81\x7a\x7b\x08\xf5\x62\x22\x17\x30\x91\xeb\x1e\x16\xfe\xe5\x10\xbd\x16\x62\xaf\x86\xc2\x5f\xb4\x5f\xd5\xc7\x43\x97\xdd\xab\x4f\x25\x1f\xd1\xc5\x0a\x32\xb2\x2c\xaf\x08\xc7\xb1\x4d\x69\x9c\x21\xcb\xfc\x4a\x78\x63\xb9\xa6\x8c\x79\x35\x3f\x9a\x97\xb3\x12\x7f\x25\xe6\x02\xd4\x3d\xb5\x14\xbc\xd6\xee\x0c\xe6\x7c\x41\x59\x5d\x95\x33\xc6\xe4\x35\x77\xcb\xf1\xe8\x3a\xa6\x5d\xc9\x8d\x98\xeb\x52\xb1\x13\x76\x56\x93\xd2\xbc\x9f\x28\x6a\xc6\x1e\x78\x9c\xc1\x81\x3b\xcb\xad\xf7\x52\xd2\x8d\x4a\xba\x1b\x4a\x32\xae\xb4\x1e\xc8\xbc\xc9\x8a\xf8\xde\xac\x22\xe2\xa2\x9e\xab\xb2\x94\xf7\xad\x63\x94\xf1\x2a\x2c\xf0\x1a\xf8\xad\xb7\xe1\x72\x80\xcb\x01\x2e\x07\xb8\x1c\xe0\x72\xe8\x69\x97\xc3\xe7\x43\xf4\x4a\x88\xbd\x1c\x0a\xbf\x64\x8f\xb6\xcf\x87\x66\x5c\x19\xbb\x05\x9e\x0f\x6c\x7f\xfa\x0b\x9a\x5a\x90\xb2\x7c\x1c\x16\x2b\x63\x4f\xc0\x91\xf5\xba\x9d\x94\x5f\xe3\x85\x27\xe2\x47\xe2\x91\x45\x61\x47\xc4\x20\x59\x90\xf3\x46\x37\x75\x46\x11\x39\xa2\x6a\x85\x6b\x52\xde\x0a\x80\xd2\x4a\xf2\xd8\x8a\x94\xb3\x66\xff\x51\x71\x34\x1a\x59\x51\xf2\x52\x4e\x79\x87\x65\xbe\x97\xe5\x88\x94\xe1\xfc\x5e\x1d\x13\x38\x37\xe3\x4c\x2d\x45\xe1\x83\xba\x73\x91\x98\x6f\xc7\x23\x67\x14\x6e\x92\x5c\x15\x57\xb5\xea\x27\x73\x7c\x26\x45\x31\xdd\xe7\xb3\x3f\xb5\x78\x2d\x1e\xbd\x47\xd4\x67\xc6\x7a\x10\x6f\x24\xd5\x7b\x07\xe8\xb9\x01\xf6\xee\x81\xf0\x37\xed\x70\xba\x2f\xf4\x5f\x31\xed\xa0\xd1\x45\xaf\xa9\x37\x23\x59\x49\x5b\x96\xb2\x1e\x10\x61\x4f\xd4\x64\x6d\x45\xd5\x56\x8d\xb6\xf0\xad\xe9\xc5\x8a\x9b\xd7\xae\x28\x9f\xd6\x58\x73\x12\x01\x41\x8c\x1e\x91\x91\xd3\x4a\xc6\x99\x58\xf3\xb1\x91\x2f\x73\xec\xd6\xd5\x79\xf6\x3d\x3f\x6a\x8d\x7e\x71\x57\x33\x5a\x76\xd3\x76\x54\xd8\x41\x74\xde\x9b\xc5\x23\x82\x2e\x71\x5b\xec\x9e\xad\x0e\x8a\x67\x18\x34\xd6\x3c\xfc\x2f\x6f\xc7\xd0\x27\x23\x83\x49\x29\x7d\x23\xab\xa9\xa5\x7c\xc6\x38\x8b\x47\xaf\xf1\x93\x2a\x1a\x4e\x4c\x56\xcc\x19\x90\xb7\x10\xeb\x09\x96\xed\x92\x26\x23\x83\xb3\xaa\x26\xbb\x8a\x8d\xa4\x25\x3d\x2d\x65\x8c\xa7\x37\xdb\x47\xc4\x2b\xf2\xf2\x74\x31\x9d\xae\x2a\x70\xc5\x2e\x23\x1e\xdd\x51\xa8\xec\x37\xee\xb9\x0d\x3c\x7a\xf0\xe8\xf5\xa8\x47\x2f\x95\xa5\x79\x41\xc0\xcf\xd0\x34\x27\xe0\x27\x68\x92\x8e\x35\x01\x2f\x17\x8b\x52\xb1\xa4\x07\x32\xe6\xf6\x31\xe4\xeb\x31\x7f\x7c\x7d\x2f\xdb\x21\x0c\x85\x63\xc4\x69\x13\x66\xbf\xb0\x8f\x0d\xd3\x81\x06\x37\x93\x4b\x6b\x72\xd1\x16\x07\xf9\xea\x10\xfb\x3d\x97\x26\xf4\xcf\x34\x22\x0a\xb2\xc8\x8b\xe8\x0a\x55\x90\x07\x6b\xec\x46\x67\x54\xb0\xcb\x43\xf6\x53\x37\x6a\x7e\x0e\xad\xd7\x09\xb1\x3e\xb0\x73\xc1\x1f\xd8\x1e\xb6\xbb\x52\xe8\x59\xb4\xa7\xf7\x7b\x0a\x12\x16\x81\xfc\x07\x32\x12\x20\xff\x01\x37\x15\xdc\x54\x70\x53\xf5\x90\x9b\x0a\xf2\x1f\x90\xff\x80\x7b\x00\xee\x01\xb8\x07\xe0\x1e\xe8\x0a\xf7\x40\xea\x00\x25\xd8\x58\x74\xd4\x86\x19\xcc\x2d\xff\x21\x56\x76\xd0\xff\xd8\xec\x9c\x16\xfa\x1f\xd0\xff\xd8\x5c\x99\x17\xec\xb5\xa7\xe9\xb8\xad\x2b\x2d\x3f\x53\x94\xf3\xfc\x6d\x39\x1a\xc7\xcb\x72\x51\x4a\x8c\xa5\x4b\x7a\x51\x5d\xb5\x0a\x74\x21\x30\xf6\xfc\xd3\xec\x3b\x06\xe9\x3e\xcf\xd5\x57\xcd\xcb\xc2\x87\xeb\x8b\x81\x4c\xf3\x52\xad\xd5\xe8\x8c\x5d\x6a\x6c\xdc\xb8\x6e\xca\x5d\xe4\x92\x28\xb1\xd6\x15\x5d\x4e\x19\x21\x0c\x02\x0c\x07\x61\x10\x60\x38\x60\x38\x60\xb8\x1e\xc2\x70\x5d\x14\x0c\xdd\x35\x18\x0e\x51\xba\xc0\x70\xc0\x70\xc0\x70\xc0\x70\x10\x06\x41\x18\xe1\xed\x83\xa7\xba\x3e\x8c\x70\x53\x0a\x83\x7c\xb0\x8f\xfe\x99\x88\xc2\x2a\x92\xc6\xa3\xb0\x72\x74\x9d\xae\xd5\x8a\xc2\x72\x78\xd1\xa8\xbd\x5e\x77\x02\xb2\xbc\x84\xcb\x64\x54\xf1\x5a\x48\xa9\x25\x3a\x22\x6f\x0f\x8e\xf4\xba\xc0\xe6\xcd\x48\x2f\x3f\x02\x67\xc6\x7e\xd5\xaa\xa4\x3b\x1a\x8c\x3f\x0e\x45\x7f\xf4\xce\x5a\x34\xee\x11\x5b\x6b\xa4\x26\x78\xdb\x2f\x4e\xd9\x10\xf4\x26\x48\xd9\x3b\xe8\x19\x2a\x57\xe8\x8e\xac\x50\xa6\x13\x6f\x1c\xc9\xad\x50\x29\x69\x52\xa5\xe4\x5f\xf7\xd3\x3f\x17\xd2\x22\x25\xd2\x1d\x69\x91\x6b\xd4\xa1\xae\x4b\xef\x14\x42\x24\x65\x2a\xba\x84\x48\x3a\x77\xfb\x9a\x31\xb4\x37\x85\xf5\x2e\x50\x9e\x5b\xef\x8e\xd5\xc8\x32\xd0\x9d\x37\xc0\x41\x12\x2d\xb1\x1f\x19\xad\x65\xa0\x47\x7d\x35\x55\x6a\x1a\xeb\xe3\xe2\x74\x5f\x63\x6d\x97\xd0\x21\xb3\x0d\x7d\x15\x10\x73\x10\x73\x10\x73\x10\x73\xe8\xab\x40\x5f\x05\xfa\x2a\xd0\x57\x81\xe7\x06\x9e\x1b\x78\x6e\xe0\xb9\x81\xe7\xa6\x25\x9e\x1b\xe8\xab\x40\x5f\x05\xfa\x2a\xd0\x57\x81\x63\x14\x8e\xd1\x4d\xae\xaf\xd2\x18\x93\xae\x02\xcb\xeb\x80\xd4\x6b\xd2\x5d\x69\xb9\x2a\xca\x4f\xef\xa4\x79\x91\x63\x90\x96\x35\xd3\x15\x2c\x57\xa7\x18\xb8\x8e\x29\xd9\xbc\x92\xcf\x5a\xd3\x2c\x53\x25\x65\x4c\x2a\x14\x34\xb5\x2c\xe5\xd8\x1f\x3f\xc8\xbe\xd2\x4f\x6f\x70\x5f\x61\x33\xf4\xa3\x9a\x2c\x65\x22\xd6\xa9\x56\x77\x71\x66\xd2\xd3\xce\x45\x8b\xe2\x36\x97\xc4\x6d\x62\xfc\x4a\xd7\x61\x9b\xa5\xd7\xba\x62\xca\xbc\x49\x6b\x51\x7a\xaa\x44\x6f\x15\xdd\x71\x91\xde\xc4\xbb\xe3\x63\x34\x47\x67\x6b\xea\xc9\x7b\xda\xd4\x76\x8b\xd4\xaa\x73\x8b\x3a\xa7\xd9\x17\x7d\xde\xa7\xd5\x15\x6b\x56\xa0\xda\x61\xb2\xcb\xbf\x73\xde\xc1\x06\xb2\x72\x91\xc2\x7f\xb9\xbd\xc6\xab\x9e\xd4\xe4\x42\x4e\x4a\xcb\x4d\xbd\x6d\xeb\xe2\x8d\x7f\xe1\xc9\x27\xe9\x6d\xf4\x96\x0a\x97\x77\xeb\xde\x38\x90\x22\xbc\xda\x4d\x7a\xb5\xff\xa0\xaf\xad\xa6\xe8\xaa\xf0\x98\xbf\x99\x96\x1c\x8f\x79\x47\x6d\x5d\x6d\xc3\x53\x28\x35\x1c\xbc\x53\x6b\x9c\x6e\xc6\x36\x26\xff\xeb\x5d\x35\x6c\xdd\x4c\x41\xd2\x8a\x0a\xf7\xb4\x89\x35\x7a\x53\x46\xef\x58\x81\x0b\x75\x6d\xa8\xc9\x7b\xb0\xb2\xa0\x51\x5e\xa9\x7d\xc6\x9f\x13\x3b\xdd\x07\x57\x65\x2d\x2b\xbb\x8f\xee\x75\x1f\x35\x96\x20\x45\x39\xab\xa4\x47\xab\xce\xf3\x94\x62\xfc\x7d\xcb\x3c\x6a\xd4\x32\x79\x81\xe6\x29\x55\x61\x6e\x27\xe8\x68\x13\xf3\xbd\x05\x9e\xeb\x05\xfb\x1a\x60\x5f\x3f\xd6\x4f\x2f\xf4\xb3\x8f\xf6\x87\x3f\x6c\x63\x86\x67\xfb\x7b\xc7\xbe\x56\x40\x56\xa3\x9d\xb9\xf3\x9a\x3b\x02\x8c\xce\xe9\x24\x7d\x0e\xd5\xe8\xb8\xc3\x9c\x96\x88\x58\x07\x29\xc7\xaf\xcc\xab\xf9\x51\x71\x35\x3f\x83\x7b\x6a\xf4\xc8\x50\x4a\x57\xf3\x0b\x22\xfb\xf1\xbc\xf1\x59\x98\x7f\x2f\x5a\x1f\x8b\xf3\xe3\x70\x6d\xc3\x9f\x7c\xa5\x8f\x5e\xee\x63\x2f\xf5\x85\x3f\x63\x2f\xd9\x3e\xd2\x37\xab\x1a\x0b\x7c\x45\x8f\x64\x55\xde\xec\x6a\x24\xba\x62\xfc\x14\x8d\x4c\x79\x9e\x82\xaf\x75\x85\xbf\xa7\xa4\x3b\x5e\xeb\x51\x29\xcd\x1f\x9c\xc7\x02\xe4\x94\xb4\x09\x72\xe4\x5c\x46\x8f\xa8\x37\xcd\x36\x15\xbe\xf2\x82\xac\x16\x72\x72\x3c\x22\xee\xc8\xd3\x55\xad\x77\xc9\xd7\x9c\xbe\x0d\x60\xdf\x3e\xba\x85\x57\xcb\x13\x47\xda\x73\x73\xea\xda\xc2\x80\x05\x21\x0c\x38\x4b\x33\x2c\x19\x3d\x6d\x6b\x4b\xec\x71\x0b\x52\xd4\xbc\x53\xb5\x46\x45\xcb\x57\x8b\x7f\x31\x48\xbb\xc5\x6a\x51\x24\x3f\x3b\xeb\x44\x53\x52\x53\xfc\xcc\x7e\x69\x90\x7d\x62\x80\xb6\x8b\x7f\x5d\x2d\x27\xc2\x6f\x37\xbd\x15\xf9\x8c\x52\x56\x32\x25\x29\xe7\xd1\xcf\xb4\x3d\x83\x5c\x89\xb1\x25\xb2\x99\xf1\xd8\xde\x9b\xb6\xb6\xa3\xbe\x94\xe0\xff\x9f\x57\xf4\xe2\xac\xaa\x4d\xe5\x72\xb6\x12\xa6\xde\xe5\xd9\xe9\x1b\xa2\x81\x79\x29\xf8\x9b\x18\x63\xa3\xe6\x37\xe1\xe9\x09\xe6\xd7\xc0\x4b\xf3\xaa\x61\xee\xf6\xef\xf4\x77\x31\xe2\x0f\xcb\x43\xb2\xa1\x88\x89\x54\x7c\x28\x62\x22\xb0\x10\x81\x85\x08\x2c\xec\xa1\xc0\x42\x28\x62\x42\x11\x13\x01\x5d\x08\xe8\x42\x40\x17\x02\xba\xba\x22\xa0\x0b\xd2\x93\x90\x9e\xdc\x2c\x21\x2c\x90\x9e\x6c\x87\xf4\xe4\x27\x9e\xa4\x61\x01\xfa\xf4\xa2\xaa\x49\x59\xd9\x45\xfa\xca\x6a\xae\xb4\x2a\x4b\xc5\xa2\x94\xbe\xb6\xca\x71\xdf\xb7\x3d\xc9\x7e\x6c\x2f\x91\x79\xea\xd5\x72\x22\x1c\xaf\x2f\x2e\xb9\xc4\x8b\x98\xb2\x8b\x88\xed\x32\xce\x5f\x14\xd7\x2f\x25\x2a\x0f\x77\x39\xa3\x83\x82\x24\xb0\x15\x14\x24\x81\xad\x80\xad\x80\xad\x7a\x08\x5b\x75\x51\xba\x67\xd7\x60\x2b\xe4\x21\x02\x5b\x01\x5b\x01\x5b\x01\x5b\x41\x41\x12\x89\x52\xb7\x0f\x65\xea\xfa\x44\xa9\x4d\xa9\x20\xb9\x42\x8f\x89\x10\xa6\x19\x4a\xf2\x10\xa6\xe3\x54\x27\xd8\x37\x6e\xa1\xa8\x72\x22\x5e\x49\x88\x5a\x22\x08\xf9\x54\x70\xd8\xd3\x24\x3b\x66\x86\x3d\x79\xb9\x98\x19\xf7\x54\x59\x2d\x4f\x08\x54\xf4\x27\xb7\x7b\x18\xd9\x03\xb6\xe4\x63\x15\x0e\x7b\x58\x1c\x6a\x3f\x10\x13\xfc\x2a\x45\xe7\x68\xb6\x22\xea\xfa\x30\x1d\x6c\xe6\x45\x20\xe2\x1a\x19\x2d\x4d\x66\xb4\x7c\xb3\x8f\xce\x0a\x73\x70\x9a\x4e\x72\x73\x70\x94\x9a\xec\x85\x22\x69\x34\xc1\x93\x46\xed\xf4\x95\xe6\x4b\x3b\x2f\x04\x1c\x67\x69\xc6\x25\xe0\xd8\x7c\x71\xcd\xdb\x29\x2e\x52\xd8\x6e\x3b\x15\xfb\xf2\x88\xc7\x4e\xed\xf1\x55\x3e\xac\xb2\x59\xc3\xe2\x34\xdb\x66\x39\x32\x87\x6d\xb6\x5e\x90\x37\x04\xce\x05\xce\x05\xce\x05\xce\x85\xbc\x21\xe4\x0d\x21\x6f\x08\x79\x43\xb8\x15\xe0\x56\x80\x5b\x01\x6e\x05\xb8\x15\x5a\xe2\x56\x80\xbc\x21\xe4\x0d\x21\x6f\x08\x79\x43\x78\xed\xe0\xb5\xdb\xe4\xf2\x86\x8d\xa1\x63\x37\x0d\x5e\x13\x46\xde\x58\x2d\xc3\xff\xe7\x69\x9a\x16\x41\xeb\x52\x66\x55\xe1\xd1\xc3\x9a\x9c\x55\xb8\x18\x91\xa2\xe6\x5d\x21\xec\xab\xa5\xa2\x64\x58\xf3\x9b\xf2\xf2\x35\x55\xbd\xe1\x41\x84\x3a\xfb\x9f\x57\xd9\xbf\x1a\xa4\xfb\x7d\x0b\xb9\x5a\x4e\x84\x27\xea\x07\xb7\x9f\x37\x0b\xbf\x22\x0a\x9f\x76\x17\x1e\x3b\x62\x5c\x3b\xe5\x57\xf2\x52\xa2\xde\x85\x08\x81\x47\x08\x3c\x42\xe0\xe1\x33\x81\xcf\x04\x3e\x13\xf8\x4c\xba\xc6\x67\xd2\x3d\x2e\x01\xb0\x6a\xb0\x6a\xb0\x6a\xb0\x6a\xb0\xea\x9e\x66\xd5\x80\x69\x80\x69\x3d\x0a\xd3\x36\x65\x08\xfc\xbb\xfa\x48\x16\x04\xf0\x29\x7a\x82\x13\xc0\x25\xba\x4c\x97\x6a\xc6\x95\xfa\x93\xad\x72\x22\x5e\x0f\x1e\xb5\x24\x3a\xfe\x99\x60\x74\xf8\x38\x5b\xb4\x36\x42\xa9\x03\xe0\x4c\x78\x58\xaf\xc2\xde\xb8\xf9\x2f\x52\x1d\x18\xb7\xc7\x8e\xa2\xaf\xcb\xdd\x8e\x89\xd3\x36\x92\xbc\x09\x50\x96\xa1\x65\x7a\xba\x22\xd6\x7e\x81\x2e\xb4\xf6\x85\x23\x96\x0b\x51\xf8\x4d\x46\xe1\xff\x56\x88\x24\x61\x90\xde\x4a\x6f\xe6\x06\xe9\x12\xb5\xbc\x7f\xd2\x8a\x88\xcf\xbf\x4a\x4f\x3a\xf1\xf9\xed\xb8\x4f\x56\x44\xee\x3f\x4d\x4f\xb9\x22\xf7\xdb\x71\xa3\xf5\xc6\xf4\x6f\x98\x75\x8d\x7d\x62\xb4\x8e\x75\x4d\xf8\xc6\xfe\xd7\xb5\xb4\xd3\xe2\x92\x1a\x96\xd6\xc9\x0a\xe8\xa0\xcd\x45\x86\x00\x68\x37\x68\x37\x68\x37\x68\x37\x32\x04\x90\x21\x80\x0c\x01\x64\x08\xc0\xeb\x02\xaf\x0b\xbc\x2e\xf0\xba\xc0\xeb\x82\x0c\x01\x64\x08\x20\x43\x00\x19\x02\x70\x6a\xc2\xa9\xd9\x85\x4e\xcd\x8e\x67\x08\x6c\x18\x88\xde\xd8\xdc\x81\x9f\x1d\xa4\x21\x91\x3b\x90\x56\x55\x2d\xa3\xe4\x2b\x52\x06\xf8\xd6\x9d\x63\x7c\x14\xd7\xd9\xb7\x0e\xb2\xbf\xef\xa7\x37\xb8\xcf\xb4\x37\x83\xde\x5d\x3f\x39\x60\xde\x28\x21\x36\x62\x9c\x34\xed\xba\xdc\xdc\xea\x99\x1f\xdd\x6c\x3b\x54\xca\x94\x12\x3d\x74\x9a\xa6\x78\x0f\x9d\xa4\x63\x74\xa4\xf6\xfe\xad\xee\xf6\xb5\xf6\x6f\xe5\x0f\xde\x12\x37\xf5\x93\xc1\xfd\x77\x82\x1d\xb5\xf6\x73\xad\x7e\xd7\x66\xb7\xe5\x15\xaa\xde\xbb\x15\x3b\x54\x22\xcf\x01\x3b\x54\xc2\xf3\x03\xcf\x0f\x3c\x3f\x3d\xe4\xf9\xc1\x0e\x95\xd8\xa1\x12\xc4\x1d\xc4\x1d\xc4\x1d\xc4\xbd\x2b\x88\x3b\x76\xa8\xc4\x0e\x95\x9b\x85\x31\x62\x87\xca\x76\xec\x50\xf9\x4f\x43\x74\xc4\xdc\xa1\x32\x7d\x4d\xce\x94\x72\xc6\x6c\xcd\x51\xf8\xe0\x17\x8c\x15\x34\x45\xd5\x94\xe2\xad\x74\x4e\xd2\x75\x59\x1f\xfb\x67\x79\x69\x55\x7e\x27\xfb\xd5\x21\xf6\xf5\x01\x7a\x9d\x73\xe1\xd5\x72\x22\xfc\x62\x9f\xd9\xfc\xce\xd3\x4a\x79\xcb\xa0\x59\xf8\x6e\xc1\x2c\x70\xda\x28\x30\xee\xf2\xd0\x4d\xd8\x93\x8c\x41\x5e\xcc\xa0\xb3\x82\xf6\x2c\x89\xd4\x82\x6c\xc7\xdf\xe8\x45\x59\xca\x8c\x38\x53\x18\x3e\x4d\x33\x07\x3c\x63\x16\xe4\xcc\xcd\x06\x3d\xd3\x61\x57\xd9\xf1\xd8\x43\xfc\x76\x8b\xf6\xa3\x2c\x25\x3c\x75\xec\x76\x78\x78\x83\x16\x04\x3c\x9c\xa3\xb3\x1c\x1e\x4e\xd1\x29\x3a\xd1\x04\xde\xe6\xdd\xf5\x4c\xb9\x11\x7d\xf5\x87\xfd\x11\xe2\x36\x76\x07\xaf\xd8\x5a\x05\xd6\x2b\xbb\x9f\x49\x10\x3d\xaf\xc1\x43\xba\x01\x11\x01\x11\x01\x11\x01\x11\x01\x11\x01\x11\x01\x11\x01\x11\x01\x11\x01\x11\x01\x11\x01\x11\x01\x11\x3b\x0c\x11\x27\xe8\x28\x3b\x1c\x3d\xb8\xad\x8f\x0d\x14\xa4\xe2\xb5\xf0\x03\x3c\xdd\xdb\xac\x85\x67\xfd\x16\x1d\x30\x0e\xc5\x4c\xc3\xfd\x03\x00\x90\x00\x90\x00\x90\xbd\x0d\x20\x7f\x77\x98\xc6\x4d\x00\xe9\xd1\x46\x1e\x2b\x27\xc6\xca\x5c\x18\x59\xb2\x85\x91\x6d\xf2\xf8\xa3\xc3\xec\xbd\xfd\x9e\xdd\xf5\x76\x6b\xb2\x94\xa9\x58\xb6\x56\xed\xad\xb7\xcb\x38\xa9\x43\xbb\x81\xa6\x96\x5b\xb7\x0d\x63\x6b\xe4\xa8\x9b\xde\xc9\xf0\xfa\x2e\x7f\xd2\x77\x07\x1b\xc8\xca\x45\x0a\xff\xd9\x36\xcf\xab\xd8\xab\xc9\x85\x9c\x94\x96\x83\xde\x46\xc4\x3c\x0f\xdb\xb3\xde\x86\x29\xbc\x10\x86\xa9\x21\x0c\xf3\xef\xbb\x76\x7b\xd6\xee\xb5\x32\x85\x52\x91\x62\x1f\xb8\xc7\xbb\xed\xb3\x99\x65\xe3\xb7\xed\x73\xc5\x16\xaa\x9b\x72\xe3\x54\x58\x98\x00\x0b\x83\xd4\xfc\x8e\xa4\xe6\x23\x27\x13\x39\x99\xc8\xc9\x6c\x57\x4e\xa6\xb1\x64\x6b\xd9\x74\xa0\xc3\xfb\xab\x47\xfc\xc7\xeb\xed\x6c\xab\x68\x9c\xb6\xef\xb0\x9e\xfc\xf6\xbb\x3c\x53\x82\xb1\x82\xa4\x15\x15\xee\x6a\x14\x6b\xd4\xa0\x15\xc8\x43\x05\x1e\xf4\xd1\xfe\x79\xc2\x83\x95\x05\x8d\xf2\x3b\xef\x33\xfe\x9c\xd8\xe9\x3e\xb8\x2a\x6b\x59\xd9\x7d\x74\xaf\xfb\x28\xcf\x0d\x94\xb3\x4a\x7a\xb4\xea\x3c\x4f\x29\xc6\xdf\xb7\xcc\xa3\x46\x2d\x93\x17\x68\x9e\x52\x15\x6b\x9f\x09\x3a\xda\xc4\x2c\x65\x81\xbb\xe2\x31\x3b\x09\x98\x9d\x7c\xac\x9f\x5e\xe8\x67\x1f\xed\x0f\x7f\xd8\x36\xf1\xcf\xf6\xf7\xce\xfa\xa7\xc2\x7b\x61\xb4\x33\xf7\xc6\x73\x0f\x9b\xd1\x39\x9d\x98\x9c\xa1\x1a\x1d\x77\x98\x8f\x54\x22\x78\x43\xca\xf1\x2b\x8d\x29\x99\xb8\x9a\x9f\xc1\xa7\x47\x7a\x64\x28\xa5\xab\xf9\x05\x11\x9c\x72\xde\xf8\x2c\xcc\xbf\x17\xad\x8f\xc5\xf9\x71\xb8\xf6\xc2\x2c\xf9\x4a\x1f\xbd\xdc\xc7\x5e\xea\x0b\x7f\xc6\x86\x67\x1f\xe9\x9b\x55\xb5\x34\x9f\xb9\x65\x55\xde\xec\x6a\x24\xba\x62\xfc\x14\x8d\x4c\x79\x9e\x82\x63\x51\x31\xc9\x2a\xe9\x8e\x1b\x7e\x54\x4a\xf3\x07\xe7\xc1\x0d\x39\x25\x6d\x0e\xa2\x72\x2e\xa3\x47\xd4\x9b\x66\x9b\x0a\xe7\x7f\x41\x56\x0b\x39\x39\x1e\x11\x77\xe4\xd1\x44\xd6\xbb\xe4\x78\xd2\xb7\x01\xec\xdb\x47\xb7\xf0\x6a\x79\x22\x63\x3a\x08\xa0\x6a\x87\x82\x15\x9a\x09\x05\x5b\xf3\x48\x90\x3a\x41\x93\xec\x58\xf4\x88\xed\x82\xd8\xe9\x76\x41\x54\x5e\xda\x7e\x2f\x04\xfb\xa7\x41\x1a\xad\x41\x35\x45\x0a\xb4\x08\xac\x4c\xeb\x4a\x46\x53\x8c\x27\x61\x9f\x1b\x64\xff\x71\x80\xee\x76\x86\x33\x91\x07\x5d\x32\xfd\x7c\xf9\x8c\x52\x56\x32\x25\x29\xe7\x09\xac\xb4\x7d\xea\xd3\x8b\x73\x33\xbc\xa0\x96\x04\x53\xc6\x63\xc2\xbd\x68\x8f\x8c\xbc\x2e\xf6\x3d\xe6\x15\xbd\x75\x63\xe3\x6d\x14\x10\xb9\xdb\xff\x2b\xb8\x8b\x11\xaf\x98\x48\xac\x7e\x22\xf8\x4b\x38\xc6\x8e\x54\x65\x4c\xd7\xfb\x34\xec\x17\x83\x90\x48\x84\x44\x22\x24\x12\x21\x91\x08\x89\x44\x48\x24\x42\x22\x11\x12\x89\x90\x48\x84\x44\x22\x24\x12\x21\x91\x08\x89\x44\x5e\x35\xc2\x1a\x11\xd6\x88\xb0\xc6\x8e\x85\x35\x7e\x75\x88\x4e\x0a\x00\x28\x15\x0a\xba\x93\x4a\x9d\xb7\xf5\x08\x45\x2c\x23\xff\xfb\x9d\x63\x86\xb5\xd6\x54\xc3\x56\x1a\x4b\x07\xfe\x62\xd9\x8b\x43\xec\x3f\x0d\xd0\x56\xe3\xfa\xab\xe5\x44\xf8\x5f\x34\x48\x02\xed\x92\x2e\x99\x25\xb5\x08\x09\xee\xe3\x17\x4c\x15\x0a\xfa\x52\xc2\x56\x55\xcc\x54\xdf\x0e\x74\x70\x1d\x74\x70\x29\x98\x0e\x1e\x60\x09\x4b\x36\xb4\x50\xd0\x2d\x04\x58\xf5\x1a\x90\x2a\x0d\x2e\x08\x2e\x08\x2e\x08\x2e\x08\x2e\x08\x2e\x08\x2e\x08\x2e\x08\x2e\x08\x2e\x08\x2e\x08\x2e\xb8\x91\x5c\xf0\x69\x7a\x8a\x3d\x11\x7d\xab\x1d\xa7\x34\x61\xde\x59\xc4\x01\x1a\x13\x9c\x52\xf1\x5a\x44\x4f\xab\x05\x79\x24\xa2\x97\xd2\xd7\x8c\xd6\xe7\x33\x58\x59\x5a\x15\x1c\xa3\xa0\xa9\xfc\x49\xa3\xdb\x6d\x7e\x80\x84\x6a\x90\x47\x90\x47\x90\x47\x4e\x1e\x3f\x30\x48\x51\x41\x1e\x05\xbc\xa9\x8c\x3c\x14\xbf\xb2\xbf\xdc\xcb\xfe\x4b\x3f\xbd\x5e\xfc\xab\xd1\x6d\x57\x38\xe9\x8a\xed\x35\x4e\xe2\x7f\xea\x66\x68\x20\xff\xc7\x66\xdb\x70\xe5\x2a\xcd\x08\x08\x78\x82\x26\x39\x04\x3c\x44\x07\x28\x51\x33\x3e\xd6\x6c\x4d\x6b\xab\x15\xfe\xc8\x2d\xd9\x6a\xe5\x4a\x30\xf3\x3b\xc8\xf6\x9b\xcc\xcf\xf3\x4e\x4d\xf8\x27\xf8\x23\x36\x59\x01\xf4\x03\xf4\x03\xf4\x03\xf4\x03\xf4\x03\xf4\x03\xf4\x03\xf4\x03\xf4\x03\xf4\x03\xf4\x03\xf4\xdb\x68\xe8\x07\x24\x07\x24\x07\x24\xd7\xc3\x48\xee\x8f\x86\x44\x36\xf0\x58\x39\x51\x2b\x00\x50\x97\xd3\x9a\xec\x08\x1c\xfe\xbb\x21\xf6\xfb\x21\xda\x9a\x56\x35\x21\x76\xe5\xa3\x6e\xb8\xc8\xaf\x88\x85\x8d\x43\xd3\xaa\x26\xbb\xa3\xf2\xc4\xb1\x16\x0b\x1a\x3e\x5e\x93\x74\xd1\x84\x00\x69\x07\x28\xc1\x41\xda\x3e\x1a\xa6\xc1\x3a\x3b\x17\x6b\x3c\xcb\x5c\x54\xd2\xa2\x64\xe7\x82\x29\xd8\x1e\xb6\xdb\xa4\x60\xd1\xa8\x89\xbe\x44\x19\x6b\x52\x2b\xfc\xe0\x36\xa7\x5d\x77\xfa\x4b\x15\x9a\x4d\xbb\xcb\x3c\xda\x89\xd6\x4d\xf2\xfd\xc8\x2b\x14\x3a\x1a\x6f\x45\x08\x72\x40\x90\xb0\x49\x41\xc2\x0f\xf6\xb5\xe5\xbb\xa6\x53\x42\x9c\xf0\x28\x1d\x76\xc4\x09\x9b\x31\x0c\xf5\x65\x01\x5b\x67\x37\x62\xff\xfb\x6e\xc7\x30\xdc\x6d\xab\x0b\x9a\xb6\x60\xa7\xf8\xa1\x23\xa6\x00\x82\x82\x10\x14\x84\xa0\x20\x04\x05\x21\x28\x08\x41\xc1\x35\x0b\x0a\x7e\xba\x4f\xc8\x02\x8f\x73\x59\x60\x63\xd4\xe6\x33\xcb\x26\x86\x8f\xc5\xa2\x54\x2c\xe9\x74\x49\xa8\x0a\x3e\x46\x73\x2e\x55\xc1\x75\x96\xb9\x7e\x69\xc1\x96\x0d\xfb\xc9\xdf\xbb\xd3\x19\xf6\xf7\x04\x28\x08\x9a\x93\x81\x07\xb9\xaa\x55\xbb\xe7\x02\x10\x0d\xec\xb5\x19\x08\x44\x03\x21\x1a\xb8\xc1\xa2\x81\x8f\xaf\x6b\xcd\x17\x64\xd7\x5b\x07\x79\x82\x14\x07\x53\x07\x28\xc1\xc6\xa2\xa3\x76\xa4\x2d\x73\x2b\x02\x8a\x12\xab\x75\x00\x37\x5f\x78\x2e\xfb\xfa\x30\x25\x2d\xb6\x58\x37\xc5\x58\x93\xed\x2f\xc6\xc9\x36\xb6\x81\xe3\x27\x87\xd9\xb7\x6e\x71\x06\xc2\x5f\x6c\x64\x17\xe7\x4b\x4e\x89\x4e\xfe\x69\x57\xec\xe6\x1c\xbb\xe9\x37\x40\xfb\xd6\xb7\xdb\xa3\x14\xbb\x72\x67\xe7\x35\x85\x2c\xda\xdf\xb1\x6f\xfb\x23\x4f\x19\x21\x8b\x08\x59\x44\xc8\x22\x42\x16\x11\xb2\x88\x90\x45\x84\x2c\x22\x64\x11\x21\x8b\x08\x59\x44\xc8\x22\x42\x16\x37\x32\x64\x31\x49\xa7\xd9\xc9\xe8\x71\x1b\x84\x3c\xe2\xa6\x27\xbe\xeb\xb8\xdb\x01\xa6\x20\xb0\x12\x81\x95\x08\xac\xdc\x44\x81\x95\xff\x7d\x90\x76\x79\xe1\xa7\xf1\x7d\x29\x69\x59\x4a\xa7\x8d\x29\x90\xce\x3e\x3b\xc8\x7e\x69\xc0\xe1\x9a\xef\x68\x4c\x44\x71\x51\x94\x32\x25\x4a\x69\x91\x80\xe2\xa8\x0b\x4a\x7a\x6f\x30\xaf\xe8\x9b\x2e\x7b\xba\x6b\x25\x14\x17\x82\xd9\xe4\x28\xdb\x57\xed\x63\x70\xbf\x10\x40\x49\x40\x49\x40\x49\x40\x49\x40\x49\x40\x49\x40\x49\x40\x49\x40\x49\x40\x49\x40\x49\x40\x49\x40\x49\xe4\x51\x03\xf7\x01\xf7\x01\xf7\x6d\x08\xee\xfb\xce\x61\x3a\x24\xa4\x0d\x97\x39\xec\xab\x9d\x4e\x9d\xd6\xd4\xfc\x75\x75\xd9\x0e\x6f\xfc\x9d\x21\xf6\xff\x86\x68\x1b\xbf\xec\x6a\x39\x11\x0e\xfb\x24\x54\x4f\x6b\x6a\x3e\xa5\x2e\xc7\x76\x1a\xc7\x92\xc6\x99\x9e\x8d\x4e\xc4\xd1\x16\xe7\x54\x5f\xa9\x9d\x7b\x79\x5c\xe0\xb5\x43\x74\x80\xe3\xb5\x51\xda\x47\xc3\x35\xe3\x70\xf9\x83\xc5\xcb\x89\xb8\x59\xcd\xe0\xe4\xc9\xac\x5c\xa4\xeb\xf3\xc1\xac\x6c\x98\x0d\x9a\x5b\x8c\x88\x92\x3d\x68\xcc\xa4\x68\xfc\xee\x14\xfe\x89\x6d\xae\x16\xde\xe5\x9f\x5a\x6d\x35\xf2\xc3\xe6\xe1\xce\xb4\x73\xf2\x14\x9d\xa0\xc9\x8a\x54\x86\xb5\xb4\x27\x72\x17\x90\x5f\xdd\x64\x7e\xf5\x0f\xf6\xad\xef\x53\xa6\x29\x91\x48\x3d\x41\x47\x9d\x44\xea\x35\x16\x11\x04\xd6\x03\x72\xac\x1b\x33\x13\x1e\x63\x50\xdb\x66\xc4\xbe\x71\xb7\xcb\x4c\xdc\x63\x27\x5a\x5b\x96\xe1\x21\xf1\x4b\x87\x0c\x03\x72\xad\x91\x6b\x8d\x5c\x6b\xe4\x5a\x23\xd7\x1a\xb9\xd6\xc8\xb5\xf6\xcf\xb5\x6e\xe9\xf0\x9f\x7c\xf5\x4e\xd7\xf0\xbf\x37\x20\xe1\xda\x9a\x14\xec\xe2\x59\x7d\xed\x9f\x13\x20\xe7\xba\xd7\x66\x22\xc8\xb9\x46\xce\xf5\x06\xe7\x5c\x5f\x59\xe7\x02\x31\xc8\xc2\xb7\xd4\x7e\x07\x67\x5e\x1f\xa4\xfd\x6c\x3c\x1a\xb7\xe3\x7e\xef\x75\xc7\x0e\x9b\x45\xde\x0e\xd1\xc2\xec\x6b\x43\x74\x58\xe0\xc8\xbc\x5c\xbc\xa9\x6a\xc6\x07\xef\xec\xb6\x62\x06\x24\x2a\xf9\xac\x26\xeb\x7a\x3a\x27\xe9\xba\x6c\xf3\xc8\x5f\x1a\x62\x5f\x1d\xa0\xd7\x39\xd7\x19\x83\xe1\x27\x1b\x49\xba\x9e\x13\xe5\x4d\x1b\xe5\x75\x45\xae\xf5\x2e\x7e\xbb\x0b\xf6\x93\x2c\x25\xdc\x55\x44\x18\x63\x13\xe9\xd5\x4f\x06\x7f\xb0\x13\xec\xa8\xf9\xc1\x56\xf5\x3d\xf3\xe3\x75\xbf\x05\xc4\x33\x22\x9e\x11\xf1\x8c\x88\x67\x44\x3c\x23\xe2\x19\x11\xcf\x88\x78\x46\xc4\x33\x22\x9e\x11\xf1\x8c\x88\x67\x44\x3c\xe3\x46\xc6\x33\x1e\xa3\x23\xec\x50\xf4\x80\x8d\x3c\xee\x77\x83\x12\xf7\xf2\xad\x9a\x96\x20\x14\x12\xa1\x90\x08\x85\xec\xe1\x50\xc8\xef\x1a\xa6\xc9\xda\xec\x51\xec\xf4\x5c\x0f\x40\xfe\xd6\x10\x7b\xcf\x16\x62\x1e\x00\x29\xb6\x80\xde\x9c\x14\x32\x5a\x45\x21\xf9\xd3\x6c\x2e\x14\x59\x33\x16\xb4\xe5\x8c\xd2\x42\x91\xcb\xc1\xa4\xf1\x14\x3b\xd1\x24\x69\x14\xdb\x50\x07\xe2\x4e\xf0\x48\xf0\x48\xf0\x48\xf0\x48\xf0\x48\xf0\x48\xf0\x48\xf0\x48\xf0\x48\xf0\x48\xf0\x48\xf0\x48\xf0\x48\xf0\x48\xf0\x48\xf0\x48\xf0\xc8\x4d\xc0\x23\xbf\xfa\x00\x1d\x11\x3c\x52\x2f\xaa\x9a\x94\x95\x5d\x81\x90\x65\x35\x57\x5a\x95\xa5\x62\x51\x4a\x5f\x33\x86\x6b\x0b\x42\x8e\xe9\x3c\x43\x81\x7d\xfa\x01\xf6\x03\xfd\x44\xe6\x85\x57\xcb\x89\xf0\x28\x4f\xcf\x16\x87\xad\x16\x76\x46\xea\x25\x5e\xde\x94\x5d\x5e\x6c\xb7\x71\xfa\xa2\xb8\x7c\x29\x51\x79\x58\xe4\x41\xb4\x38\x71\x7b\x99\xce\x0a\x26\x77\x9a\x4e\x72\x26\x77\x94\x0e\xd3\xc1\x9a\x31\xbb\x56\xa3\x94\x13\xf1\xca\xea\xad\x23\x39\x93\xe7\x70\x3f\x15\x8c\xf0\x26\xd9\x31\x41\xea\xaa\xee\xed\x93\xcc\xed\x7d\x7f\x14\xfe\xbb\x6d\x9e\x57\x33\x6e\xe5\x75\x37\xfc\x76\xf6\x9a\x57\x74\xf4\x05\x25\x53\x74\x8e\x66\x2b\x12\x29\x9a\x7c\x43\x48\xa2\x40\xf2\x77\x93\xc9\xdf\xff\xbe\xaf\x75\x76\x62\x5e\xe4\x81\x9f\xa1\x69\x27\x0f\xbc\x7d\x56\xa7\x31\xb3\xe2\x6f\x33\x1a\x30\x36\x01\x29\xe7\xc9\x0f\xdd\xe5\xb1\x3a\xc7\xaa\xf2\xc4\x1a\x36\x3f\x8f\xf2\xb4\x84\x8e\x19\x1f\x64\x90\xf5\x9a\xf1\x43\x06\x19\x32\xc8\x36\x38\x83\xac\x83\xb3\xd1\xa0\xf4\xaf\x76\x0f\x1c\xa9\x13\x34\xc9\x8e\x45\x8f\xd8\x94\x62\xa7\x9b\x52\x54\x5e\xda\x7e\x52\xc1\x7e\x7c\x88\xc2\x15\x1b\x70\xaa\x19\x27\xd0\xe2\xd9\x21\xf6\xeb\x2e\x01\xfa\x9f\x6e\x24\xba\xe2\x82\x9a\x91\xbb\x22\xaa\xe2\x6e\xf7\x3e\x9a\x6a\x46\xee\xfe\x10\x8a\x2e\xcc\xe6\x9a\x0d\xfe\x20\x76\xb3\x47\x7c\x56\x62\xb6\x38\xbd\xd1\xf4\x08\x93\x40\x98\x04\xc2\x24\x10\x26\x81\x30\x09\x84\x49\x20\x4c\x02\x61\x12\x08\x93\x40\x98\x04\xc2\x24\x10\x26\x81\x30\x89\x4e\x87\x49\x24\x68\x8c\x8d\x46\xf7\xd9\x00\xe2\x1e\x37\x80\x30\x96\x6a\x08\x8f\x40\x78\x04\xc2\x23\x10\x1e\xe1\x0a\x8f\x78\x57\x8c\xce\x89\xf0\x08\xa1\x43\xe9\x82\x85\xfe\x02\xf6\x05\x35\x93\x51\x74\xad\xc4\x17\xf3\xcb\xa5\x4c\x56\xb6\xc3\x26\xd8\x27\x87\xd9\xb3\x5b\x68\xbb\x28\xe9\x6a\x39\x11\xfe\x85\x46\xa0\xe2\x82\x9a\x99\xb1\x4b\x4c\xf2\x12\xbb\x82\x31\xc6\xf8\xed\x84\xb8\xa6\x5b\xdb\xd1\xa7\xbe\xc0\x8f\x4d\xe0\xc7\xb7\x04\xe3\xc7\xc3\xec\xa0\x89\x1c\x45\x9f\x32\xb1\xa3\xcf\x1b\x80\x90\x14\x88\x24\x88\x24\x88\x24\x88\x24\x88\x24\x88\x24\x88\x24\x88\x24\x88\x24\x88\x24\x88\x24\x88\xe4\x46\x12\xc9\xd3\x74\x92\x1d\x8f\x4e\xd8\x44\xf2\x61\x37\x91\xf4\x59\xc5\xdd\x0e\xea\xdb\x40\xa0\x40\xa0\x40\xa0\x9b\x08\x81\xfe\xe9\x20\xed\xad\x95\x21\x26\x50\x68\x5a\x57\x78\xe8\x24\xfb\xd4\x20\xfb\xe9\x01\x4f\xfc\xbf\x6e\x8e\xb6\xf9\x8c\x52\x56\x32\x25\x29\xe7\x61\x9d\xf6\xcc\x76\x7a\x71\xae\x65\xb1\x93\xf1\xd8\x03\x37\x3d\x99\x03\x66\xe1\xf3\x8a\xbe\x09\x40\x64\xe7\xa5\xa4\x76\xfb\x83\xc8\xbb\x18\xf1\x8a\x19\x8d\x4c\xd7\x1f\x0f\x86\x91\xfb\xd9\x78\xbd\xe0\x60\xf3\x2d\x00\x44\x02\x44\x02\x44\x02\x44\x02\x44\x02\x44\x02\x44\x02\x44\x02\x44\x02\x44\x02\x44\x02\x44\x02\x44\x6e\x24\x88\x04\xe4\x03\xe4\x03\xe4\xeb\x61\xc8\xf7\xee\x7e\x8a\x04\xc9\xd2\xb3\xff\x12\x62\x7f\x1c\xf2\xd5\x9e\x7f\x63\x56\x2e\x7a\xc7\x5b\xf1\x24\xb1\x48\x56\x2e\x56\x89\xbb\x4f\x2d\xcc\x59\x6b\xbc\x16\x0a\x78\x54\x15\x34\x51\x55\xd0\x44\x23\x05\xa5\x0a\xf4\xb8\x60\x6d\x17\x68\x9e\xb3\xb6\x59\x9a\xa1\x64\x13\xac\xcd\xf5\x9c\xf3\x8a\x1e\x18\x12\xc8\x3e\xf7\x04\xc5\xc4\x4b\xe0\x9b\xb9\x8e\x95\x13\xb5\xa2\x4c\xaf\xab\xcb\x3a\xfb\x91\x27\xd8\x57\xf6\xb8\xf6\xe4\x7e\x44\x4c\xcf\x35\x73\x62\x61\x8d\x3c\x56\x1c\x69\x4a\x5d\x8e\xdd\x6f\x9c\x52\xb5\x1b\x77\x2b\x77\xe2\x6e\x0f\x18\x4d\x02\x14\x02\x14\x26\x01\x0a\x01\x0a\x01\x0a\x01\x0a\x7b\x06\x14\x26\xbb\x06\x14\xb6\xbc\x26\x4d\x83\xc2\x24\x40\x21\x40\x21\x40\x21\x40\x21\x40\x61\xe7\x41\x61\xb2\xa7\xb9\x5e\x12\x5c\xaf\x7d\x5c\x2f\xd9\xed\x5c\x2f\xb9\x09\xb9\x5e\xea\x0a\x1d\x17\x2c\xeb\x10\x1d\xe0\x2c\x6b\x94\xf6\xd1\x70\x4d\x81\x49\xce\x92\xe2\xe5\x44\x3c\xa5\x2e\x37\x82\xac\xae\x3f\xe4\x1f\x3c\xb6\x95\x6d\x11\x71\x63\xe7\x82\xe3\xc6\xf6\xb0\xdd\x66\xdc\x18\xbf\xbb\x19\x2e\x96\x52\x97\x3d\xa1\x62\xd1\x5f\xde\xe6\x42\x5d\x77\x89\x81\x26\x22\x71\xaa\x15\x16\xff\x6a\x2b\xd7\x12\x18\x6a\x92\x8e\xd1\x91\x0a\x8d\xde\x41\xda\xd3\x50\x7b\x42\x90\x17\x6a\xe4\x4d\xaa\x91\xff\x4e\x1f\x9d\x10\x22\xe2\x87\xe9\xa0\x23\x22\x3e\x4c\x8d\xf6\x3d\x3a\x69\x5c\xbe\x9f\x1d\xa1\x43\xb4\x6d\x2a\x6d\x18\xf1\xb5\x5d\x5f\x33\x2c\xf5\xa8\x30\x2f\x09\x1a\xe3\xe6\xa5\xf1\x22\x83\x2d\x48\x41\x6d\xa9\x05\x89\xfd\xc2\x88\xcb\x82\xdc\x2f\x26\xa5\x91\xb4\x9a\x33\x96\x98\xe6\x90\x61\x58\x93\x47\xc5\x11\xd3\x9a\x4c\xdb\xc7\xdb\x69\x57\x2e\xd3\x25\x5a\x70\xdb\x95\x58\x92\x4e\x37\xe1\x71\x98\xe1\x55\xbf\xc8\x29\xa8\x0e\x5e\x0a\x5e\x0a\x5e\x0a\x5e\xda\x43\xbc\x14\x53\xac\x80\x29\x56\xf7\x00\xe5\x2f\x87\xe8\xb5\x10\x7b\x35\x14\xfe\xa2\xfd\xaa\x3e\x1e\xba\xec\x5e\xc2\x29\xf9\x88\x2e\x96\x61\x91\x65\x79\x45\x78\x5f\x6d\xd4\xe1\x0c\x59\xe6\x57\xc2\x1b\xcb\x35\x99\xcb\xab\xf9\xd1\xbc\x9c\x95\xf8\x2b\x31\x57\x71\xee\x49\x9f\x80\x9e\x76\x67\x30\xa7\x03\xca\xea\xaa\x9c\x31\xa6\x95\xb9\x5b\x8e\x5b\xd4\x31\xed\x4a\x6e\xc4\x5c\xdc\xf1\xc7\x8d\x64\x35\x29\xcd\xfb\x89\xa2\x66\xec\x81\xc7\x19\x1c\xb8\xc7\xd9\x7a\x2f\x25\xdd\xa8\xa4\xbb\xa1\x24\xe3\x4a\xeb\x81\xcc\x9b\xac\x88\xef\xcd\x2a\x22\x2e\xea\x29\xb6\x2d\xf0\xa9\x63\x94\xf1\x2a\x2c\xf0\x1a\xf8\x2d\x5a\xc1\xed\xc1\xed\xc1\xed\xc1\xed\xc1\xed\x7b\x9a\xdb\x7f\x3e\x44\xaf\x84\xd8\xcb\xa1\xf0\x4b\xf6\x68\xfb\x7c\x68\xc6\x95\x6c\x5b\xc8\xc9\x92\x2e\xdb\x9f\xfe\x82\xa6\x16\xa4\x2c\x1f\x87\x85\xc2\x9f\x27\x6a\xc7\x7a\xdd\x4e\xb6\xae\xf1\xc2\x13\xf1\x23\xf1\xc8\xa2\xb0\x23\x62\x90\x2c\xc8\x79\xa3\x9b\x3a\xa3\x88\x1c\x51\xb5\xc2\x35\x29\x6f\x45\x11\x69\x25\x79\x6c\x45\xca\x59\xb3\xff\xa8\x38\x1a\x8d\xac\x28\x79\x29\xa7\xbc\xc3\x32\xdf\xcb\x72\x44\xca\x70\x08\xae\x8e\x09\x26\x9a\x71\xa6\x96\xa2\xf0\x41\xdd\xb9\x48\xcc\xb7\xe3\x91\x33\x0a\x37\x49\xae\x8a\xab\x5a\xf5\x93\x39\x8e\x87\xa2\x98\xee\xf3\xd9\x9f\x5a\xbc\x16\x8f\xde\x23\xea\x33\x63\x3d\x88\x37\x1c\xe9\xbd\x03\xf4\xdc\x00\x7b\xf7\x40\xf8\x9b\x76\x4c\xda\x17\xfa\xaf\x98\x76\xd0\xe8\xa2\xd7\xd4\x9b\x91\xac\xa4\x2d\x4b\x59\x0f\x67\xb0\x27\x6a\xb2\xb6\xa2\x6a\xab\x46\x5b\xf8\xd6\xf4\x62\xc5\xcd\x6b\x57\x94\x4f\x6b\xac\x39\x89\x90\x02\x34\x7a\x44\x46\x4e\x2b\x19\x67\x62\xcd\xc7\x46\xb1\xbb\x91\xd5\xba\xc6\xe0\x64\x8e\x1c\xd6\xe8\x17\x77\x35\xa3\x65\x37\x6d\xda\x6f\x47\xa2\x79\x6f\x16\x8f\x08\xa6\xc4\x6d\xb1\x7b\xb6\x3a\x28\x9e\x61\xd0\x58\xf3\xf0\xbf\xbc\x1d\x43\x9f\x8c\x0c\x26\xa5\xf4\x8d\xac\xa6\x96\xf2\x19\xe3\x2c\x1e\x02\xc6\x4f\xaa\x68\x38\x31\x59\x31\x67\x40\xde\x42\xac\x27\x58\xb6\x4b\x9a\x8c\x0c\xce\xaa\x9a\xec\x2a\x36\x92\x96\xf4\xb4\x94\x31\x9e\xde\x6c\x1f\x11\xf4\xc7\xcb\xd3\xc5\x74\xba\xaa\xc0\x15\xbb\x8c\x78\x74\x47\xa1\xb2\xdf\xb8\xe7\x36\x70\x8b\xc1\x2d\xd6\xa3\x6e\xb1\x54\x56\x6c\x70\x39\xce\x37\xb8\x0c\x5d\x7c\x8c\x9d\xa0\x49\x3a\xd6\x04\xbc\x14\x3b\x3a\x06\xba\x9c\x62\xfe\xc0\xf8\x5e\xb6\x43\x7c\xcd\x8e\xa5\x6d\x21\x3c\xde\x84\x42\x3b\xec\xc5\x41\xba\xd7\x67\x83\x33\xf6\xc3\x83\xec\x03\xae\x9d\xcd\x0a\x8d\x49\x74\xb4\x50\x9f\xe3\xde\x8a\xdd\xc9\x36\x87\x32\x47\xe7\x25\x82\x1b\x52\xe6\x68\x6c\x97\x32\xdf\x9d\xc9\x20\xc5\x81\x08\x7b\x48\x71\xc0\x63\x04\x8f\x11\x3c\x46\x3d\xea\x31\x82\x14\x07\xa4\x38\x40\xea\x41\xea\x41\xea\x41\xea\xbb\x82\xd4\x43\x8a\x03\x52\x1c\x9b\x85\x4d\x42\x8a\xa3\x1d\x52\x1c\x1f\x18\xb2\x54\x20\x2a\xb7\x1c\xf3\xdb\x5b\x8c\xbd\x36\xc8\x7e\x7f\xc0\xbd\xa9\xd8\xbf\x6c\x8c\xe7\xb5\x6b\x5b\xb1\x78\xec\xa0\x67\x63\x30\x9f\xfb\xcc\x2b\x7a\x71\x56\xd5\xa6\x72\x39\x3b\xa6\xb6\x75\x3a\x20\xb7\x11\xff\x6b\x6c\x07\x30\x37\xc5\x6b\x74\x37\xb0\x86\xd0\x22\x90\x20\x90\x20\x90\x20\x90\x20\x90\x20\x90\x20\x90\x20\x90\x20\x90\x20\x90\x20\x90\x20\x90\x20\x90\x20\x90\x20\x90\x20\x90\x20\x90\x60\xa7\x90\xe0\xf7\xc5\xe8\x7c\x05\x12\x14\x92\xbc\x66\x58\x9f\xbf\x48\xac\x1f\x2e\x14\x27\xbc\x93\xfd\xea\x30\xfb\x9e\x2d\xf4\x7a\x9b\x1a\x0a\x19\xdf\x5f\xe8\x33\xdf\x8a\x0b\x18\xe6\x2d\x3b\x67\xe9\xc8\xb6\x0b\x1c\x8e\x38\xf3\x1b\x3e\x87\x33\x47\x43\x63\x8a\xe4\x4c\xdc\x06\x3d\x73\x65\x57\xd9\xf1\xd8\x98\x07\x3b\xf2\x07\x72\x52\xf6\x7d\x2a\x0d\xe2\x58\x4d\x1c\x9f\x0a\x26\x8e\x93\xec\x58\xa3\x94\xd1\x45\x26\xf9\xeb\xa0\xeb\x0f\xfb\x63\xc7\x6d\xec\x0e\xfe\xe0\x40\x8e\x40\x8e\x40\x8e\x40\x8e\x40\x8e\x40\x8e\x40\x8e\x40\x8e\x40\x8e\x40\x8e\x40\x8e\x40\x8e\x40\x8e\x9d\x46\x8e\xa7\xe9\x24\x3b\x1e\x9d\xb0\xd3\x1d\x1f\xe6\x79\x8e\x66\x2d\x7c\xd6\x7a\xd1\x01\xe3\x04\x77\x3a\xe3\xe6\x4b\x98\x04\x16\x05\x16\x05\x16\xdd\x44\x58\xf4\x4f\x86\x69\x56\x60\x51\x81\x7f\x2a\x37\x2c\xab\x4b\x47\xc5\x25\x16\x0f\xfd\xee\x61\xf6\x37\x03\xf4\x7a\xf1\xa3\xcd\x43\x5f\x68\x84\x87\x72\xb2\xd6\x15\x04\xf4\x91\x9b\x36\xe9\xd3\xab\x08\x28\xff\x19\xcc\xb3\x9a\x79\x06\x31\xc9\xeb\x57\x82\xa1\xe8\x41\xb6\xdf\x84\xa2\x9e\x9e\x68\xb2\x51\x51\x8f\x2a\x1a\x0a\xd8\x09\xd8\x09\xd8\x09\xd8\x09\xd8\x09\xd8\x09\xd8\x09\xd8\x09\xd8\x09\xd8\x09\xd8\x09\xd8\x09\xd8\xd9\x61\xd8\xb9\x9f\xc6\x59\x3c\x3a\x62\xa3\xca\x1d\x6e\xd8\xc9\x17\x6f\xc0\x9b\xc0\x9b\xc0\x9b\xc0\x9b\x1d\xc5\x9b\x5f\x1e\xa4\x87\x05\xde\x94\x0a\x05\xdd\x49\x03\xcf\xc8\x85\x9c\x7a\xcb\x18\xff\x75\xf6\x1f\x07\xd9\x8b\x03\xb4\xd5\x38\xe1\x6a\x39\x11\x2e\x37\x96\xfb\x3d\x63\x97\xd0\xa2\x94\xef\x61\x7e\xc1\x54\xa1\xa0\x2f\x25\x9c\xc2\x91\xe7\xdd\x18\x81\xbc\x10\x0c\x18\xf7\xb1\x61\xc1\x12\x9d\xd6\x25\x9f\xc4\x6f\xa3\x1f\x20\xb9\x1b\xf0\x11\xf0\x11\xf0\x11\xf0\x11\xf0\x11\xf0\x11\xf0\x11\xf0\x11\xf0\x11\xf0\x11\xf0\x11\xf0\xb1\x2b\xe1\x23\x30\x1f\x30\x1f\x30\x5f\x0f\x63\xbe\xaf\x85\x68\x97\xc0\x7c\x69\x55\xd5\x32\x4a\x9e\x17\xe2\xc4\x32\x8e\x31\xde\xa1\xe9\x6e\xf7\xe1\xab\xe5\x44\xf8\x8d\x59\xb9\xe8\x1d\x69\xc5\x33\xc4\x76\x65\xe5\xe2\xb4\xeb\xe4\xa5\xc4\xd4\xc2\x9c\xb5\xb4\x6b\x1d\x81\x9b\xa8\x2a\x68\xa2\xaa\xa0\x89\x46\x0a\x4a\x15\xe8\x71\x01\xdd\x2e\xd0\x3c\x87\x6e\xb3\x34\x43\xc9\x26\xa0\x9b\xeb\x39\xe7\x15\x3d\x90\xbc\xb1\xf7\x5c\xa5\x19\xd1\xf6\x19\x45\x4f\xab\xc6\x80\x53\x19\x44\x5a\x2b\x7c\x34\x9f\x29\xa8\x4a\xbe\xa8\xe7\x94\xb4\xac\xb3\xff\xf0\x14\xfb\xf4\x5e\xda\x61\x97\x62\x87\x90\xee\x13\x13\x77\xcd\x9c\x72\x58\x63\x92\x1d\x3c\x6a\x16\xb3\x68\x14\x13\xe3\x27\xcf\x58\x65\x54\x87\x70\xba\x4f\xee\x72\x90\x9a\x04\x5f\x04\x5f\x4c\x82\x2f\x82\x2f\x82\x2f\x82\x2f\xf6\x0c\x5f\xec\xa2\x0d\xf6\xbb\x86\x2f\x62\xe7\x77\xf0\x45\xf0\x45\xf0\x45\xf0\xc5\x8d\xd8\xf9\xbd\xa7\x71\x20\xb6\xa6\xee\xe5\xad\xa9\x93\x9b\x10\x07\xa6\xae\xd7\x84\x56\x74\x51\x40\xb2\x73\x34\xcb\x21\xd9\x69\x3a\x49\xc7\x6b\x41\xb2\xb8\x03\xb4\x4c\x14\x15\xf7\xe0\xa3\x79\x45\x2f\x5a\xf1\x67\x0f\xf9\xc7\x8b\x6d\x65\x5b\xc4\x16\xc3\xcb\xc1\xf1\x69\xa7\xd8\x89\xaa\xfc\x56\x33\x26\xad\x92\xac\x59\x49\xb1\xee\xda\x50\xf4\x73\xdb\xfd\xe8\xd9\x1b\xc5\xf0\x14\x91\xf2\x15\xa4\x6c\x54\x1c\xe8\x34\x2b\x13\x68\xeb\x02\xcd\x53\x8a\x0d\x2c\xab\x99\x5b\x61\xfe\xdf\x48\x5f\x6c\x82\x8e\x36\xfb\x2a\x92\x3f\x1a\xa2\x1f\x0a\xb1\x1f\x08\x85\xbf\xdf\x36\x52\xff\xa7\xef\x8a\x31\x05\x30\x47\x85\x11\x1e\xd1\x99\x36\xd6\x7e\xc2\xaa\x78\x56\x22\x6e\x3a\xb5\x2c\x47\x0a\xc6\x7b\x30\xa6\x7f\xf1\xc8\x54\x3e\xa2\xe4\x05\x03\x50\xb5\x48\x29\x6f\x63\x84\x4c\x24\xa3\xdd\xba\x54\xca\x47\x32\x8a\x26\x1b\x9f\x92\x6c\xaf\x4d\x8d\x21\x86\x8f\xe1\xe6\xe2\xdc\x9a\x8a\x9b\xcb\xa1\xc8\x4a\x49\xe3\xd3\xbd\x82\xa6\xa6\x65\x9d\x0f\xb6\xe6\x97\x6b\x0e\x0b\xf1\xc8\x12\xbf\x23\x9f\x46\xf3\xd1\x71\x22\x32\x1a\x99\xca\xe5\x26\xf8\x50\x9a\xd1\x6e\x45\xb4\x52\xde\x58\xa6\x19\xdf\x9e\x35\x7f\x30\x8b\x93\x33\xd1\x3b\x44\xd5\xdc\x86\xe6\x83\x21\xfa\xde\x10\xfb\x40\x28\xfc\x5d\x76\x03\xfd\x55\x1f\x9f\x7d\x9d\x97\xf2\x52\x56\xd6\xc4\x3a\x42\xc4\xde\xeb\xba\x9a\x56\xf8\xd0\x68\xcf\xd8\x25\xbe\xe4\x51\xb5\x88\x31\x73\x2a\xde\xb2\xe7\x0a\xab\xd2\x0d\xa3\xfe\xc5\x6b\xb2\x2e\x5b\xd6\xc0\xb0\x50\x16\x4b\xe1\x38\x61\x59\x8e\x70\x3b\xc4\x67\xe7\xaa\x16\x49\xec\x3f\x6a\x9c\xab\x49\x69\x0e\x7d\x8c\x85\xad\xf8\xf6\xf9\x0c\xda\x98\xec\x48\x4a\x5e\x78\xe8\xf8\x0c\xd5\x39\x97\x2f\x84\x4d\x24\x65\xac\xab\x2c\x7b\x9e\x55\x73\x52\x3e\x1b\x57\xb5\xec\x58\xe1\x46\x76\xac\x94\x57\xd2\x6a\x46\x1e\xdb\x3d\xa7\x2f\x18\xa5\xc4\xa3\x77\xb9\x9f\xd5\xed\x90\xf9\x8e\x10\x3d\x26\x8c\xc2\x0c\x25\xb9\x51\x38\x4e\xeb\xe8\x89\x22\xf6\x35\xc1\x63\x5f\xb7\x4e\x8b\x99\xe1\x3a\x4b\x7c\x93\x51\xe2\x7e\x96\xa2\x73\xb4\x6d\x2a\x6d\x8c\x45\xeb\x2e\x32\x28\x9c\xb6\xb6\x39\x2b\xa8\x8d\x9b\xb3\xb5\x58\xaf\x2a\xd3\x17\xfb\xfa\x88\x9f\x39\x8b\x8a\x79\x75\x24\xad\xe6\x8c\x55\xb2\x39\xea\x79\x2d\xdb\x11\x71\x4e\xa5\x65\x9b\xb6\x2f\xe9\x8c\x8d\xbb\x4c\x97\x68\xc1\x6d\xe3\x62\x49\x3a\xdd\x84\x47\x66\x86\x3f\xcd\x45\x4e\x79\x75\xf0\x60\xf0\x60\xf0\x60\xf0\xe0\x1e\xe2\xc1\x98\xdb\x05\xcc\xed\xba\x07\x98\x7f\x39\x44\xaf\x85\xd8\xab\xa1\xf0\x17\xed\x57\xf5\xf1\xd0\x65\xf7\x12\x55\xc9\x47\x74\xb1\xcc\x8c\x2c\xcb\x2b\xc2\xbb\x6c\xa3\x1c\x67\xc8\x32\xbf\x12\xde\x58\xae\x59\x64\x5e\xcd\x8f\xe6\xe5\xac\xc4\x5f\x89\xb9\x4a\x75\xcf\x36\x05\xd4\xb5\x3b\x83\x39\x57\x50\x56\x57\xe5\x8c\x31\x9f\xcd\xdd\x72\xdc\xbe\x8e\x69\x57\x72\x23\xe6\xe2\x95\x3f\x6e\x24\xab\x49\x69\xde\x4f\x14\x35\x63\x0f\x3c\xce\xe0\xc0\x3d\xea\xd6\x7b\x29\xe9\x46\x25\xdd\x0d\x25\x19\x57\x5a\x0f\x64\xde\x64\x45\x7c\x6f\x56\x11\x71\x51\xcf\x55\x59\xca\xfb\xd6\x31\xca\x78\x15\x16\x78\x0d\xfc\x16\xe5\xf0\x4b\xc0\x2f\x01\xbf\x04\xfc\x12\xf0\x4b\xf4\xb4\x5f\xe2\xf3\x21\x7a\x25\xc4\x5e\x0e\x85\x5f\xb2\x47\xdb\xe7\x43\x33\xae\x64\xe4\x42\x4e\x96\x74\xd9\xfe\xf4\x17\x34\xb5\x20\x65\xf9\x38\x2c\xf6\x78\xf1\x44\x25\x59\xaf\xdb\xc9\x66\x36\x5e\x78\x22\x7e\x24\x1e\x59\x14\x76\x44\x0c\x92\x05\x39\x6f\x74\x53\x67\x14\x91\x23\xaa\x56\xb8\x26\xe5\xad\x28\x29\xad\x24\x8f\xad\x48\x39\x6b\xf6\x1f\x15\x47\xa3\x91\x15\x25\x2f\xe5\x94\x77\x58\xe6\x7b\x59\x8e\x48\x19\x0e\xf9\xd5\x31\xc1\x7c\x33\xce\xd4\x52\x14\x3e\xa8\x3b\x17\x89\xf9\x76\x3c\x72\x46\xe1\x26\xc9\x55\x71\x55\xab\x7e\x32\xc7\xb1\x52\x14\xd3\x7d\x3e\xfb\x53\x8b\xd7\xe2\xd1\x7b\x44\x7d\x66\xac\x07\xf1\x86\x5b\xbd\x77\x80\x9e\x1b\x60\xef\x1e\x08\x7f\xd3\x8e\xb9\xfb\x42\xff\x15\xd3\x0e\x1a\x5d\xf4\x9a\x7a\x33\x92\x95\xb4\x65\x29\xeb\x81\x10\xf6\x44\x4d\xd6\x56\x54\x6d\xd5\x68\x0b\xdf\x9a\x5e\xac\xb8\x79\xed\x8a\xf2\x69\x8d\x35\x27\x11\x1b\xc5\x18\x3d\x22\x23\xa7\x95\x8c\x33\xb1\xe6\x63\x23\x5f\xe6\xd8\xad\x6b\x0c\x4e\xe6\xc8\x61\x8d\x7e\x71\x57\x33\x5a\x76\xd3\xf6\x66\xd8\x91\x76\xde\x9b\xc5\x23\x02\x36\x71\x5b\xec\x9e\xad\x0e\x8a\x67\x18\x34\xd6\x3c\xfc\x2f\x6f\xc7\xd0\x27\x23\x83\x49\x29\x7d\x23\xab\xa9\xa5\x7c\xc6\x38\x8b\x87\xb8\xf1\x93\x2a\x1a\x4e\x4c\x56\xcc\x19\x90\xb7\x10\xeb\x09\x96\xed\x92\x26\x23\x83\xb3\xaa\x26\xbb\x8a\x8d\xa4\x25\x3d\x2d\x65\x8c\xa7\x37\xdb\x47\x04\x35\xf2\xf2\x74\x31\x9d\xae\x2a\x70\xc5\x2e\x23\x1e\xdd\x51\xa8\xec\x37\xee\xb9\x0d\xdc\x7e\x70\xfb\xf5\xa8\xdb\x2f\x95\xa5\x79\x01\xc5\xcf\xd0\x34\x87\xe2\x27\x68\x92\x8e\x35\x01\x2f\x17\x8b\x52\xb1\xa4\x07\x02\xe7\x98\x3f\x70\xbe\x97\xed\x10\x5f\xb3\x63\x69\x3b\x02\x9f\x37\xa1\x36\x11\xfb\xb3\x10\xdd\x2f\x02\xf1\xf5\xf4\x35\x39\x53\xca\x19\x4b\x37\x33\x12\x9f\xfd\xa7\x10\xfb\xb5\x10\x91\x73\x24\x1c\xce\xf2\x61\xc2\x18\xac\xec\x5e\x24\x45\x78\xab\xc5\xee\xcb\xca\xc5\x45\xfb\xd4\xa9\x85\xb9\xb3\xc6\xcf\x5d\x98\xf5\xa0\xd0\x05\xd1\x4d\xcf\xd2\x19\xde\x4d\x4f\xd1\x09\x9a\x6c\x2e\xeb\x81\x3f\x63\x60\xba\xc3\xfb\x1e\xa4\x69\xa3\x95\xc7\xca\x35\xf3\x1a\x34\xd9\xae\xb8\x31\x3f\xd6\x8c\xae\xab\x59\x32\xd9\x63\x3a\xff\x20\xd8\xaf\x84\xd9\x8f\xf5\xd3\xd6\xb4\xaa\xc9\x57\xcb\x89\xf0\x7e\x4d\x96\x32\x11\x71\xcc\xfa\x9e\x9d\xa5\xce\x25\xa7\xc4\x69\xbb\xc4\xd8\x98\x71\xcd\xb4\xaa\xc9\x4b\x2e\xa7\xad\xef\xa9\xe2\x23\x6c\xad\x7b\x23\x95\xa9\xed\x65\x9f\x13\x2f\x25\x49\xa7\xf9\x4b\x99\xa0\xa3\x74\xb8\xa6\xaf\xca\x68\x02\xe3\x15\xf8\xd6\xdc\xb2\x0f\xbb\xfc\xed\xc3\x1d\x6c\x20\x2b\x17\xd7\xa8\x2f\x1d\x8d\x9a\x46\xc0\xf7\x8e\x1e\x4d\x98\xf0\xbb\xb6\x3b\xef\xe8\x90\xf1\x62\xa5\xb4\xbc\xc6\xd7\xb4\xdf\xbc\x6c\xc3\xde\x54\x72\x9e\x52\x74\xae\xc2\xd9\xde\xf4\x1b\x01\x8e\x85\xab\xbd\x49\x57\xfb\x8b\x7d\x2d\xb4\x0c\xc2\xf2\x27\xb8\xe5\xb7\x1d\xed\xeb\x29\x2f\x68\x8a\x52\xdb\x04\x15\x4a\x6d\x34\x41\xc9\xe7\xef\x72\x4c\xd0\xc9\x82\xa4\x15\x15\xee\x5c\x14\x58\x62\x8d\xb6\x68\xbc\x60\xcc\x27\x37\xc4\x12\x3d\x58\x59\xd0\x28\xaf\xcb\x3e\x3e\xfc\xef\x74\x1f\x5c\x95\xb5\xac\xec\x3e\xba\xd7\x7d\xd4\x58\x66\x15\xe5\xac\x92\x1e\xad\x3a\xcf\x53\x8a\xf1\xf7\x2d\xf3\xa8\x51\xcb\xb5\x86\x1c\xd5\x99\x2c\x2c\xf0\x6c\x2d\xd8\xc1\x00\x3b\xf8\xb1\x7e\x7a\xa1\x9f\x7d\xb4\x3f\xfc\x61\x1b\xa5\x3c\xdb\xdf\x3b\x76\xb0\x02\x24\x1b\xed\xcc\x1d\xf4\xdc\xd9\x61\x74\x4e\x27\x6d\x73\xa8\x46\xc7\x1d\xe6\x44\x48\xc4\x73\x48\x39\x7e\x65\x5e\xcd\x8f\x8a\xab\xf9\x19\xdc\x1b\xa5\x47\x86\x52\xba\x9a\x5f\x10\xf9\x8b\xe7\x8d\xcf\xc2\xfc\x7b\xd1\xfa\x58\x9c\x1f\x87\x6b\x1b\xe8\xe4\x2b\x7d\xf4\x72\x1f\x7b\xa9\x2f\xfc\x19\x7b\x59\xfa\x91\xbe\x59\x55\x4b\x73\x47\x59\x56\xe5\xcd\xae\x46\xa2\x2b\xc6\x4f\xd1\xc8\x94\xe7\x29\xf8\x7a\x5e\xf8\xb4\x4a\xba\xe3\x99\x1f\x95\xd2\xfc\xc1\x79\xbc\x43\x4e\x49\x9b\xb0\x4a\xce\x65\xf4\x88\x7a\xd3\x6c\x53\x11\x0f\x50\x90\xd5\x42\x4e\x8e\x47\xc4\x1d\x79\xc2\xa9\xf5\x2e\xf9\xba\xda\xb7\x01\xec\xdb\x47\xb7\xf0\x6a\x79\x22\x41\x5b\x39\x15\x5d\xc7\x2e\x28\x85\x26\x76\x41\x59\xc3\x10\x91\x4a\xd2\x69\x76\x32\x7a\xdc\x5e\xad\x3e\xe2\x56\xdf\xf5\xbd\xfe\x76\x50\xe3\x65\x5f\x1f\xa6\xa4\xb5\x16\xab\xbf\x8d\xbb\x69\x5c\xf3\xc5\xb2\x9a\x2b\xad\xca\xe9\x9c\xa4\xac\xda\x1b\x17\x7d\x72\x98\x7d\xeb\x16\x67\x8c\xfd\xc5\x86\x76\x70\xb7\x4b\x5c\xe2\x25\x4e\x1b\x25\x76\xc5\x0e\x46\xb1\x9b\x7e\x43\xbc\x6f\x7d\xbb\x3c\xff\xfd\x36\xd8\xca\xc8\xfe\x88\x7d\xdb\xdf\xfb\x11\x23\xdb\x1f\xd9\xfe\x50\x13\x45\x74\x27\xa2\x3b\x11\xdd\xd9\x3b\xd1\x9d\x50\x13\x85\x9a\x28\xa2\xea\x10\x55\x87\xa8\x3a\x44\xd5\x75\x45\x54\x5d\x7d\x98\xe2\xbb\x8e\xbb\x1d\x60\x0a\x34\x4f\x11\xed\x04\xcd\xd3\xcd\x23\x72\xc0\x5e\x1c\xa4\x98\x08\xf7\xc9\xcb\xc5\x9b\xaa\x76\xc3\x15\xee\x53\x4e\x58\x3f\xf2\xb8\x48\x45\xd6\xd9\x73\x83\xec\x6b\xfd\xf4\x3a\xe7\xdc\xab\xe5\x40\x65\xcd\x0b\xe2\x64\x11\x19\x19\x1b\x35\x4e\xbe\x60\x5f\xbf\x94\xf0\x1c\xde\x6c\x9b\x14\xad\xd4\x8e\x5e\x59\x53\x3a\xb8\xab\xf1\xcb\x89\xb8\xa7\x49\xd6\xa2\x0f\xf1\x54\x30\x55\x9c\x64\xc7\x4c\xaa\x58\xf5\xc2\x4d\xc8\xe8\xb9\x3b\xe0\x22\xe0\x22\xe0\x22\xe0\x22\xe0\x22\xe0\x22\xe0\x22\xe0\x22\xe0\x22\xe0\x22\xe0\x22\xe0\x22\xe0\xe2\x46\xc2\x45\x60\x3b\x60\x3b\x60\xbb\x1e\xc6\x76\xef\xdf\x4a\x87\x03\xf2\xc7\x0a\x6a\xc6\xce\x16\x33\x06\xb1\x15\x55\xbb\x29\x69\x19\xf6\xbb\x77\xb0\x9f\x0c\x39\x71\x8a\xb1\xb4\x9a\xcf\x1b\x56\xeb\xec\x99\xcb\x0e\xb6\x29\xaa\x11\xd7\x35\xc6\x43\x2c\xa8\x99\xd8\x90\x79\xae\x08\x0b\x3c\x2b\x17\x5d\x91\x81\x6a\x66\xc1\xb9\x60\x7f\x7f\x6c\x2c\x26\x08\xd9\x11\xda\x29\x40\xd8\x7d\x74\x2f\x07\x61\xaf\xa3\x3b\x3f\xd1\xb7\x8d\xcc\x1e\x12\x18\x49\xd7\x64\xa0\x9c\xa8\xce\xac\xa8\x8e\x29\x7a\xe7\x61\x59\xd7\x1f\xf1\x67\x69\xc4\xb6\x99\x4f\x49\xd1\x7f\xe7\x6a\xa7\x7d\x56\x3b\x2d\x5c\x5c\x0c\x6c\xa8\x61\x4f\x43\x2d\xa8\xfa\x6d\xde\x52\x01\xde\x2e\xbf\x5b\x6c\x84\xb7\xeb\x2d\x74\x85\x3d\x1e\x5e\xb4\xbe\xf2\x23\xf3\xe6\xfa\xc8\x78\x83\xfc\x55\x5a\xaf\xf1\x92\x15\x65\xef\x9a\xb3\x5d\x91\x97\x17\xd5\xf4\x0d\xb9\xa8\x47\xb7\xf0\x0b\xdc\x46\x84\xbd\x6f\x88\x1e\xf5\x06\x11\xfb\x47\x0b\xb3\x2f\x0d\xb2\xdf\x1b\x70\xba\xd5\xb7\x9a\x61\xc2\x4a\x3e\xa3\x94\x95\x4c\x49\xca\x79\x22\x86\xed\x25\x5c\xfb\x22\x85\xe3\xb1\x83\xae\x58\x5f\xdf\xfb\x18\xed\xb4\xd9\xc8\xfc\x46\x44\xfd\x36\xb2\xdd\x3f\x22\x7f\x01\xe7\x01\xe7\x01\xe7\x01\xe7\x01\xe7\x01\xe7\x01\xe7\x01\xe7\x01\xe7\x01\xe7\x01\xe7\x01\xe7\x01\xe7\x01\xe7\x01\xe7\x01\xe7\x37\x3d\x9c\xff\xef\x21\x7a\x50\xc4\xd4\x5a\x88\x88\x97\x62\xab\xe8\x7d\x2e\xc4\x7e\x33\x44\xaf\xf3\x1c\xac\x2b\xa4\x77\x7f\x56\x2e\x4e\xb9\xcf\x86\x96\x9e\x4b\x4b\xef\xb3\x4f\xd0\x6e\x33\x84\x59\xcd\xc8\xae\xe0\x65\xad\x94\x37\x7a\x56\x3a\x27\xe9\xba\xac\xb3\x1f\x7f\x82\x3d\xb7\x97\xb6\x1a\x27\x71\xc7\x47\xfd\xa8\xe5\x4b\xe2\xe2\x69\xe3\xe2\xd8\xb7\xf0\xa0\x65\x35\x23\x2f\x25\xdc\xbf\x77\x39\x03\xc5\xce\xff\x20\x82\xd8\xf9\x1f\x44\x10\x44\x10\x44\xb0\x87\x88\x60\x17\x6d\x64\xd4\x35\x44\x10\x3b\xec\x80\x08\x82\x08\x82\x08\x82\x08\x62\xe7\x7f\x6c\x01\x72\xfb\x00\xbc\xae\xdf\x02\x64\x53\xee\xfc\x7f\x95\x66\x04\xc3\x3a\x41\x93\x9c\x61\x1d\xa2\x03\x94\xa8\x9d\xbc\xad\x66\x84\xde\xab\x0b\x0d\xcd\x2b\x7a\x70\xbc\xd8\x9a\x42\xc1\x5c\x70\xcb\x92\x74\x75\xdd\xcf\x1b\xb1\x19\x90\x27\x1e\xfd\xdf\xdb\x1c\x0e\x76\x9f\xb5\xb3\xbf\x17\x79\xdd\x2f\x7e\x6e\x23\xf4\x12\x8c\x6a\x9a\xa6\xe8\x54\x85\x9e\xf6\x18\x8d\xae\xa9\xb1\x21\xa2\x8d\xcd\x04\x9a\xdc\x4c\xe0\xaf\xfa\xe8\xb4\xf8\xd6\x8f\xd1\x11\xfe\xad\x27\x68\xad\xdd\x8f\xce\x88\x3d\x04\x4e\xd2\x71\x67\x0f\x81\x26\x8a\x99\x15\x3b\xf4\x9f\xa2\x13\xae\x1d\xfa\x9b\x28\x67\xbd\xdb\xf2\xb7\xcb\x2c\xc5\x7e\x7b\xc4\x31\x3b\x8f\xf8\xee\xc0\xef\x31\x41\xe6\x26\xfd\xc2\x04\x39\xdb\xee\xb7\xd1\x18\x61\xaf\x7d\x10\x58\x10\x58\x10\x58\x10\x58\xec\xb5\x8f\xbd\xf6\xb1\xd7\x3e\xf6\xda\x87\x27\x00\x9e\x00\x78\x02\xe0\x09\x80\x27\xa0\x25\x9e\x00\xec\xb5\x8f\xbd\xf6\xb1\xd7\x3e\xf6\xda\x87\xa3\x0d\x8e\xb6\x1e\xda\x6b\xbf\x5d\x44\xb9\xf5\x9b\x01\xfe\xe7\x10\xbd\x4e\x04\x93\x0b\xfb\x34\xc6\x7e\x29\xc4\x3e\x1d\xa2\x3b\xc4\x3f\xeb\x86\xe9\xef\xc8\xca\x45\x61\x93\x10\x9f\xef\x8a\xcf\xff\x99\x3b\xe9\xc0\x1a\xb4\x8a\xe4\xb2\x22\x7a\xd2\xdf\x53\xf4\x15\x97\x52\x8a\xe9\x90\x8c\x58\xc7\x45\xcb\x2f\xa8\x99\xd8\x23\xe2\x48\xd5\x66\x85\x6a\xe6\x8c\x79\x6e\x8b\xdd\x04\xb3\x34\x43\xc9\x0a\x9f\xe5\x7e\x1a\xaf\xe9\xa5\x31\x47\xe7\x72\x62\x59\x2e\x4a\x89\xb8\x55\xab\xd4\xdf\xf5\xd1\xb4\x78\x19\xc7\x69\x82\xbf\x8c\x83\xd4\x44\x39\x74\x4e\xf8\x9f\xa6\xe8\x94\xe3\x7f\x6a\xae\xa4\x39\xe1\x82\x4a\xd2\x69\x97\x0b\xaa\xb9\xa2\x82\xec\xc7\x62\xb0\x4d\x18\x67\x71\x61\x00\xec\x42\x5d\x1f\x3f\xbf\x1d\x99\x36\x43\x54\xa2\x9e\xe3\x9b\xbb\xb6\x52\x00\x93\xf5\xc1\x64\x0a\x8e\xe2\x1a\x8e\xe2\x43\x74\x80\x25\xa2\x63\xb6\x3a\xd5\x1b\xdc\x0a\x57\x56\xff\xbc\x1d\xb6\x70\x61\xdf\x36\x48\x6f\xb4\xec\xb5\x31\xdd\x55\xd2\xb2\x94\x4e\xab\xa5\x7c\x51\x67\x5f\xd9\xcb\xfe\xa4\xdf\xb1\xc9\x23\xf5\x73\xa8\x16\xc5\xd5\x53\xe2\xea\xd8\x90\x71\xb6\xb0\xd2\xde\x23\x9b\x4d\x5b\x4a\x12\xae\xfa\x71\x3e\x76\x1a\x66\xfb\x08\x1d\xa2\x03\x81\xfb\x41\x7b\x1f\xba\xa1\x08\xa1\x85\x60\x23\x39\xca\xf6\x55\x8a\x45\x79\x6f\xb4\xa6\xd0\x20\xa8\x48\x21\x67\x0c\x2a\x52\x88\x58\x40\xc4\x02\x22\x16\x7a\x28\x62\x01\x2a\x52\x50\x91\x82\xa7\x18\x9e\x62\x78\x8a\xe1\x29\xee\x0a\x4f\x31\x54\xa4\xa0\x22\xb5\x59\x7c\x63\x50\x91\x6a\x87\x8a\xd4\xb7\x49\x74\xce\x54\x91\xca\x18\x53\x59\x45\xcd\x6b\x72\x56\x31\xbe\x4b\xb7\x9a\x94\x09\xc2\xc7\x56\x4b\x45\xc9\x98\xa6\xdc\x94\x97\xaf\xa9\xea\x0d\xcf\xc2\x4f\x67\x9f\x7a\x9a\xfd\xe8\x20\xed\xf2\x2d\xe9\xaa\x59\x44\x78\xa2\x3e\xcd\x3b\x6f\xde\xe1\x8a\xb8\xc3\xb4\xfb\x0e\xb1\xe3\xc6\xb5\x53\x7e\xc5\x2f\x89\xd2\xeb\x5d\xdd\xe5\xbc\x0f\x3a\x4a\x60\x62\xd0\x51\x02\x13\x03\x13\x03\x13\xeb\x21\x26\xd6\x45\x49\x2a\x5d\xc3\xc4\x90\x3d\x01\x26\x06\x26\x06\x26\x06\x26\x06\x1d\x25\x84\x77\xdf\x3e\x08\xab\xeb\xc3\xbb\x37\xa5\x8e\xd2\x73\x7d\x74\x5d\x04\x4a\xa5\x49\xe2\x81\x52\x6f\xa3\xb7\xd0\x95\x9a\x81\x52\xfe\xa0\xcb\x8a\x2c\xad\x47\x90\x1a\x0a\xa6\x0a\x08\x7d\xba\xfe\xce\xe0\x60\xab\xb7\xb2\x37\x9b\xc1\x56\xf5\xa0\x9c\x19\x86\x55\xaf\xc2\xd5\x91\xac\xd1\x7f\xa2\x20\x40\xb7\xc7\x96\x6a\xaa\xcb\xe2\x4e\x8a\xd3\x36\x9c\xc6\x09\x78\xa6\x50\x96\xe4\x8a\x60\xe9\xc7\x69\xb1\x0d\x9d\x00\xb2\x03\x90\x81\x6a\x32\xba\xf7\x4f\x42\x35\xad\x07\xad\x08\x13\x76\x95\x9e\xe4\x26\xec\x0a\xb5\xa7\xf7\xd2\x0d\x11\xc5\x9f\xa1\x65\x27\x8a\xbf\x6d\x37\xcb\x89\x40\x7f\x99\xd2\xae\x40\xff\x76\xdd\xcd\x32\xc0\x1b\x6b\x60\x83\x92\x03\x62\xbf\x32\x1a\x64\x80\x13\xbe\xa2\x55\x75\x8d\xf1\x39\x71\x49\x3d\x63\xec\x48\x5b\x75\xd0\x2c\x43\xea\x0a\x90\x1c\x90\x1c\x90\x1c\x90\x1c\x52\x57\x90\xba\x82\xd4\x15\xa4\xae\xe0\xac\x81\xb3\x06\xce\x1a\x38\x6b\xe0\xac\x69\x89\xb3\x06\x52\x57\x90\xba\x82\xd4\x15\xa4\xae\xe0\x0b\x85\x2f\xb4\x87\xa4\xae\x36\x96\x71\xb7\x5e\x0d\xe4\x6f\x88\xf6\xd7\xd9\xcc\xba\x9c\x18\xd3\x4b\x7c\x38\x94\xd2\x69\x59\xd7\x35\xb9\xac\xc8\x37\x75\xf6\xf3\x14\xfd\xa9\x2d\x74\x8f\xe7\x9a\xab\xe5\x44\x78\xa7\xed\xc2\x5c\x14\x97\x4d\xf1\xcb\x2e\xf1\xcb\x62\x43\xa6\xe7\xd2\x7d\xd5\x52\xc2\xe7\xcc\x16\xc3\xf0\x45\x7a\x13\x5d\xac\xf0\x51\x9e\xa2\x13\xb5\x5d\x21\x9e\xa6\x30\x3a\x52\x75\x15\x53\x3f\x18\xa2\x05\xd1\x4d\xe7\xe8\x2c\xef\xa6\x53\xb4\xde\x42\xe9\x71\xe1\x24\xba\x40\xf3\x8e\x93\xa8\x05\xc5\x2e\x09\x77\xd0\x45\x3a\xef\x72\x07\xb5\xa0\xdc\xf5\x6e\x45\x92\x0d\xfe\x9a\x66\x58\xd2\xfa\x9a\x7c\x3a\xa8\xa5\x88\xe2\x53\x37\x8f\x90\x1c\x30\x1e\x84\xa1\x9a\x73\x1d\xb7\xdc\xe2\x7e\x31\x4c\x67\x85\xc5\x95\x9f\x29\xca\x79\x3e\xa8\xdb\x69\x5e\x35\x04\xf4\x94\x7c\x56\x33\x5e\x9d\xad\xa2\xa7\xf3\x91\x8d\xfd\xeb\x30\xfb\x50\x3f\x31\xa7\x20\xdb\x95\xf9\xa8\x26\x4b\x99\x88\x38\xcd\xea\x3d\x0e\xbe\x98\x13\xe5\xc5\x46\x8c\xb3\xce\xd8\x57\x9b\x0e\x4b\x47\x63\xcf\x3c\x4f\x8c\xa3\xad\x35\xca\x6b\x55\x59\x72\x9e\xd1\xf6\x49\x9b\xb5\x0b\xb4\x42\xbb\xfc\xad\xd0\x1d\x6c\x20\x2b\x17\xe9\xfa\x52\xb0\x11\x3a\xc0\x12\xa6\x11\x72\xaa\x61\x9a\x1e\xab\x12\x55\x63\x75\xf8\x6f\xb7\xf9\xbe\x99\x41\x4d\x2e\xe4\xa4\xb4\x1c\xf8\x72\xc6\xcc\x13\x37\xe6\xfd\x24\x8d\x51\xed\x4c\xc5\xa0\xd9\xdc\x0b\x82\x13\x05\x81\x3b\x4d\x5a\xdf\x9f\xed\x6b\x99\x95\x78\x4c\x4c\xad\x66\x28\xe9\x4c\xad\x36\xc0\xe4\x14\x4a\xed\x33\x39\xc9\x0f\xde\xe5\x6b\x72\xc6\x0b\x92\x56\x54\x78\x58\x80\x00\x8a\x81\xb6\x67\xb4\x60\xac\xfd\x3a\x6f\x79\x1e\xac\x2c\x48\x54\x64\x1f\x97\xc4\xdd\xe9\x3e\xb8\x2a\x6b\x59\xd9\x7d\x74\xaf\xfb\x28\x5f\x6c\xc9\x59\x25\x3d\x5a\x75\x9e\xa7\x14\xe3\xef\x5b\xe6\x51\xa3\x96\x49\x63\xe6\x9d\xaa\xb0\x7a\x13\x74\xb4\x89\xc5\xe7\x02\xcf\x35\x85\xe9\x0b\x30\x7d\x1f\xeb\xa7\x17\xfa\xd9\x47\xfb\xc3\x1f\xb6\x99\xe7\xb3\xfd\xbd\x63\xfa\x2a\x3c\x3e\x46\x3b\xf3\x48\x1a\xee\x95\x34\x3a\xa7\x93\x74\x3e\x54\xa3\xe3\x0e\x73\x74\x2b\x02\xaf\xa4\x1c\xbf\x32\xaf\xe6\x47\xc5\xd5\xfc\x0c\xee\x36\xd6\x23\x43\x29\x5d\xcd\x2f\x88\xec\xeb\xf3\xc6\x67\x61\xfe\xbd\x68\x7d\x2c\xce\x8f\xc3\xb5\x6d\x72\xf2\x95\x3e\x7a\xb9\x8f\xbd\xd4\x17\xfe\x8c\xcd\x8f\x3e\xd2\x37\xab\x6a\x69\xee\xd1\xce\xaa\xbc\xd9\xd5\x48\x74\xc5\xf8\x29\x1a\x99\xf2\x3c\x05\x07\x6f\xc2\xf9\x5c\xd2\x9d\x10\x9a\x51\x29\xcd\x1f\x9c\x07\x26\xe5\x94\xb4\x49\x95\xe5\x5c\x46\x8f\xa8\x37\xcd\x36\x15\x81\x3b\x05\x59\x2d\xe4\xe4\x78\x44\xdc\x91\xa7\xcb\x5b\xef\x92\x03\x30\xdf\x06\xb0\x6f\x1f\xdd\xc2\xab\xe5\x89\x63\xef\xdc\x4c\xb4\x31\xbb\x1f\x60\xe5\xab\x87\x85\xeb\x0f\xfb\x0f\x37\xdb\xd8\x1d\xfc\xf1\x29\x75\x90\xf6\xb3\xf1\x68\xdc\xd6\xa9\xbd\xd7\x2d\x6f\x6b\xde\xe9\xb6\x50\xb7\xfd\xc7\x27\x29\x2e\x56\x57\x7a\x51\xd5\xa4\xac\x5c\x29\xa4\x51\x56\x73\xa5\x55\x59\x2a\x16\xa5\xf4\xb5\x55\x39\x5f\xd4\xd9\x67\x9f\x64\x3f\xb7\x97\xee\x36\xcf\xb7\x07\xcd\x78\x7d\xb9\x8c\x25\x5e\xce\x94\x5d\x4e\x2c\x6a\x9c\xbf\x28\x0a\x31\x87\xcb\xca\x73\x20\x84\x01\x21\x0c\x08\x61\x20\xc6\x17\x31\xbe\x88\xf1\x45\x8c\x6f\xd7\xc4\xf8\x76\x4f\x08\x2b\x62\x2b\x11\x5b\x89\xd8\x4a\xc4\x56\x22\xb6\xb2\xa7\x63\x2b\x11\xfc\x85\xe0\xaf\x1e\x0d\xfe\xda\x94\x42\x18\x37\xd6\x16\x09\x64\x91\x29\x8b\x1e\x56\x62\xa2\x86\xd4\x2e\xd2\xc1\x28\xf1\x34\x3b\x69\xb2\x42\x2f\x0b\x33\xdd\x48\x95\xb7\x5d\x53\x4a\x35\x97\xd4\x88\xbe\xb4\xbd\x9a\x9b\x3d\x60\x87\x80\x55\x21\xb2\x47\xc5\xa1\x0e\x41\x32\xc1\xb4\x2e\xd2\x79\x7a\xac\xc2\xa1\x53\x27\x9a\x30\xf0\xe5\xc0\xa3\x03\x67\x76\x93\xce\xec\xf7\x87\xd6\x14\xd8\x1a\xd8\x15\xe9\x4d\xc2\xa5\x9d\xa2\x73\x8e\x4b\x7b\x9d\x45\x5e\x12\x91\x82\x8f\xd1\x9c\x2b\x52\x70\x9d\x65\xae\x37\x4a\xb0\x13\xa6\x2e\xf6\x77\x23\xd5\xa6\x6c\x8f\xaf\x1e\x44\x95\x59\x8b\x8b\xd3\xbc\x66\xcd\x51\x7d\x68\xb3\x81\x83\xd2\x03\x28\x30\x28\x30\x28\x30\x28\x30\x94\x1e\xa0\xf4\x00\xa5\x07\x28\x3d\xc0\x1b\x01\x6f\x04\xbc\x11\xf0\x46\xc0\x1b\x01\xa5\x07\x28\x3d\x40\xe9\x01\x4a\x0f\x70\xf6\xc1\xd9\xd7\x85\xce\xbe\xae\x56\x7a\x68\x8c\x3a\xd7\x8a\xce\x6f\x0c\x43\xb7\x3e\xfe\xfd\x1c\xdd\x31\x96\x53\xb3\xfa\x18\x3b\xc9\x8e\xd3\x80\xf1\x67\x8c\xe5\xd4\xec\xac\x92\x93\xe7\x15\xbd\x78\x4e\xca\x67\x72\xb2\x96\x7a\x20\xa8\xad\xd8\x47\x9f\xa4\x7d\x46\x17\x1d\x2b\xd7\x4c\x4b\x2e\xa8\x99\xa2\xbc\x5a\xe0\x68\x8b\x7d\xed\x09\xf6\xee\xbd\xb4\x35\xad\x6a\xf2\xd5\x72\x22\x3c\x5c\x3f\x7c\x7e\x41\xcd\x5c\x36\x2f\x8d\x3d\x6c\x9c\x3a\xad\x6a\xf2\x92\x2b\xc1\xcc\x75\x02\xc2\xe6\x11\x36\x8f\xb0\x79\x38\x4c\xe0\x30\x81\xc3\x04\x0e\x93\xae\x71\x98\x74\x8f\x3f\x00\xa0\x1a\xa0\x1a\xa0\x1a\xa0\x1a\xa0\xba\xa7\x41\x35\x48\x1a\x48\x5a\x8f\x92\xb4\x4d\x19\x36\xff\x14\x4d\x0b\xfa\x77\x9c\x26\x38\xfd\x3b\x48\xfb\x69\xbc\x66\x9c\x69\x5a\xd5\x64\x2e\xac\xe3\x90\xa1\x96\xec\x0b\x79\x3e\x18\xf4\xc5\xd8\x90\xc9\xf5\xa2\x51\x93\xe5\xb9\x6a\xe1\x91\x9a\x8c\xfe\x8f\x6d\x0e\x04\x7b\x83\x1d\x0b\xef\xe6\x5d\x8f\x88\x5f\x3b\x41\xbc\x04\xa0\x4a\xd2\x69\x3a\x59\x11\x03\x1f\xa7\x91\xb5\xb4\x34\xc2\xa3\x10\xf6\xde\x64\xd8\xfb\x7f\xed\xa3\x53\xe2\x3b\x3f\x4a\x87\xf9\x77\x3e\x4e\x6b\xec\x7d\x34\x23\x22\xdd\x4f\xd0\xa4\x13\xe9\xbe\xf6\x52\xce\x88\xe0\xf6\x93\x74\xdc\x15\xdc\xbe\xf6\x62\xd6\x1b\xcf\xde\x5a\x83\x13\xfb\xad\x11\xc7\xe0\x44\x7c\x23\xd6\xdd\xc6\x67\x9f\x38\x43\x18\x1f\x27\x48\xbd\x13\x66\x08\x91\xea\x00\xaf\x00\xaf\x00\xaf\x00\xaf\x88\x54\x47\xa4\x3a\x22\xd5\x11\xa9\x0e\x07\x00\x1c\x00\x70\x00\xc0\x01\x00\x07\x00\x22\xd5\x11\xa9\x8e\x48\x75\x44\xaa\xc3\xbf\x06\xff\x5a\x17\xfa\xd7\x3a\x1e\xa9\xde\x5a\x46\xbc\x96\xc0\xf7\xcd\xa8\xfa\xfe\xb5\x10\xed\xaa\xab\xfa\xce\xf8\xa7\x5b\xad\xf0\xf2\xc6\xac\x5c\xf4\xce\xb0\x85\x37\x34\xb6\x2b\x2b\x57\x88\xb9\x4f\x2d\xcc\x59\x68\xb1\x85\x1b\x9e\x54\x15\x34\x51\x55\xd0\x44\x23\x05\xa5\x0a\x62\xef\xc0\x71\xbe\x77\xa0\xd1\x41\x67\x69\x86\x92\x4d\x74\x50\xd7\x73\x36\xe2\x5e\x65\x3f\xf9\x24\x0d\x8b\xb6\xcf\xcb\xc5\x9b\xaa\x76\xc3\x58\x21\x3b\xdb\x47\x9a\x5b\x97\xa5\x73\x92\xae\xcb\x3a\xfb\xea\x13\xec\x47\xf6\xd2\xeb\x9c\x53\xaf\x96\x13\xe1\x58\xfd\x5c\x01\x73\x7f\x82\x69\xa3\x88\xd8\x4e\xe3\xdc\x0b\xf6\xe5\x4b\x09\xf7\x51\x64\x0a\x20\x53\x00\x99\x02\x70\x58\xc1\x61\x05\x87\x15\x1c\x56\x5d\xe3\xb0\xea\x1e\x7f\x0c\x1c\x05\x70\x14\xc0\x51\x00\x47\x01\x1c\x05\x3d\xed\x28\x00\xc9\x04\xc9\xec\x51\x92\xb9\x29\x33\x05\x64\x4a\x09\xb8\x35\x4d\x53\x1c\x6e\x4d\xd2\x31\x3a\x52\x33\x68\xd7\x05\xa2\xca\xf6\xe6\x9c\x1c\x10\xb5\x24\x61\xe0\xc9\x60\x36\x3b\xc1\x8e\x7a\x76\xf1\xe4\x37\xf7\x20\x59\x13\xdd\x56\x31\x33\x8a\xfe\x5f\xdb\x2b\xf9\xd8\xb7\x58\x69\x04\x79\x2f\x0b\x7b\x48\xfc\xde\x76\x1a\xd6\xdc\x86\xf0\xb5\xdf\x02\x22\xd8\x90\x4c\xd0\x64\x32\xc1\x37\xd6\xb8\x21\x7c\xed\x4e\xd8\xc4\x86\xf0\x75\x0a\x9b\x17\xa9\x05\x67\x68\xda\x95\x5a\xd0\x74\x69\x41\x16\xaa\x31\x0b\xd4\x90\xb9\xf1\x31\x53\x41\x09\x0c\xb1\xd7\x46\x2a\x2d\xd4\x23\xbe\x79\x07\x1e\x63\x35\x24\x4e\x71\x1b\x2b\x27\xfd\xa0\x8d\x66\x0b\x59\x07\x80\xb8\x80\xb8\x80\xb8\x80\xb8\xc8\x3a\x40\xd6\x01\xb2\x0e\x90\x75\x00\x67\x02\x9c\x09\x70\x26\xc0\x99\x00\x67\x02\xb2\x0e\x90\x75\x80\xac\x03\x64\x1d\xc0\x57\x07\x5f\x5d\x17\xfa\xea\xba\x5a\x1f\xbf\x31\x06\xbd\x16\xee\xec\xe6\xd5\xad\xcf\x11\xf8\xd0\x10\x1d\xac\x1d\xa7\x2e\xd2\x04\xf8\x7b\xae\x0c\x59\x7f\x75\x90\xfd\xce\x00\x31\x0f\xf0\x16\xd9\x03\xcf\x98\xb3\xd0\x7c\x46\x29\x2b\x99\x92\x94\x73\x7b\x4f\x25\x7b\xc5\xe7\x7e\xc8\xb8\x6b\x60\x9f\xb0\xa7\x04\x83\xbc\xa0\x41\x07\xb9\x7a\x18\x9a\x5a\x90\xed\x65\xbb\x5e\x94\xa5\x4c\x3c\xb6\x97\x5f\xe0\x66\xe9\xbc\x46\x95\xae\xce\x2e\x8f\x87\x5f\xe3\xc6\xe9\x75\xfa\x37\xf7\x5f\x9f\x29\x37\xb2\xdb\xf0\x72\x70\xbf\x3d\xc5\x4e\x34\xd9\x6f\xcd\x7d\xd3\x77\xfb\x7f\x47\x77\x31\xe2\x0f\xcf\xdd\xc8\x29\x24\x03\x20\x19\x20\x05\x3f\x12\xfc\x48\xf0\x23\xc1\x8f\xd4\x33\x7e\xa4\x54\xd7\xb8\x49\x5a\x5e\x93\xa6\xf9\x7d\x0a\xfc\x1e\xfc\x1e\xfc\x1e\xfc\x1e\xfc\xbe\xf3\xfc\xbe\xe5\xa4\x21\xd5\xd3\xc4\x32\x05\x62\xd9\x3e\x62\x99\xea\x7a\x62\xb9\x09\xb3\x0b\xd8\x17\x86\xe8\xb0\x20\x83\x52\xa1\xa0\x8f\x95\x2d\x0e\x58\x63\xd7\xcb\x8c\x5c\xc8\xa9\xb7\x8c\x01\x58\x67\x3f\x3c\xc4\x7e\x76\x80\xb6\x1a\xd7\x5d\x2d\x27\xc2\xe5\xc6\x80\xe0\x8c\x5d\x42\x8b\x70\xe0\x6e\x7e\xc1\x54\xa1\xa0\xbb\x37\x14\x70\x6e\x03\x16\xe8\xcf\x02\x1b\xe1\x74\xd7\x2f\x04\x03\xc3\x7d\x6c\xd8\x04\x86\x46\x4f\x30\x19\xa1\xd3\xfc\x5e\xb2\x0d\xee\x07\xee\x07\xee\x07\xee\x07\xee\x07\xee\x07\xee\x07\xee\x07\xee\x07\xee\x07\xee\x07\xee\x07\xee\xd7\x59\xee\xb7\xf9\x74\x4e\x41\x16\x41\x16\x41\x16\x37\x11\x59\xfc\xe0\xd3\x74\xc4\x22\x8b\x8a\xfc\x4c\x51\xce\xf3\xb7\xe5\x92\xc7\x4d\x97\xf4\xa2\xba\x6a\x95\xe5\x62\x5d\xec\x8f\xaf\xb2\xaf\xee\xa5\x7b\x3c\x17\x5e\x2d\x27\xc2\x87\xeb\x8b\xe5\x4e\xf3\x02\xad\xe5\xe4\x8c\x5d\x60\x6c\x9f\x71\xdd\x94\xbb\xb4\xa5\x44\xad\x93\xbb\x9c\x15\x42\x47\x17\x08\x0d\x3a\xba\x40\x68\x40\x68\x40\x68\x3d\x84\xd0\xba\x48\x61\xa0\x6b\x10\x1a\x52\xdf\x81\xd0\x80\xd0\x80\xd0\x80\xd0\xa0\xa3\x8b\xdc\xdc\xdb\x87\x47\x75\x7d\x6e\xee\xa6\xd4\xd1\xfd\xee\x3e\x2a\x8b\x20\x2b\x95\x56\x79\x90\x55\x96\x64\x4a\xd7\x0a\xb2\x72\x50\xd1\xa8\xbd\x5e\x77\xe2\xad\xbc\x48\xab\x9c\x88\xd7\xa2\x49\x2d\x11\xdd\xcd\x05\x47\x61\xcd\xb1\xb3\x3e\x92\x97\x7e\xe4\xcd\x8c\xd0\xaa\x55\x61\x8a\xfe\x7f\xe4\x43\xde\x1e\xb1\x64\x78\x6b\x43\xb6\x51\x71\x4a\xa7\x31\x9b\xa0\x62\x25\xd2\xe9\xed\x15\x02\xbd\x12\x5d\x6d\xf3\xdb\x85\x30\x1c\xc4\x7c\x9b\x14\xf3\x7d\x57\x3f\x3d\x23\x74\x73\xdf\x4e\xaa\x4b\x37\x37\x4d\xed\xef\xb6\x35\x0d\x12\x69\xc2\x42\xde\x20\x85\x5b\xc8\x8e\x54\xe6\xa6\xd0\x22\x2e\x50\xde\xd1\x22\xee\xc4\x8d\x83\xad\x2f\x57\xfc\x6d\xd0\xfa\xae\xc3\xe2\xba\x0d\x77\xec\xfd\xa3\x3e\xd6\x77\xd4\x57\x62\xb8\xa6\x25\x3e\x22\x4e\xaf\xb4\xc4\xf6\xc5\x1d\xb2\xc9\x50\x1f\x06\xfa\x06\xfa\x06\xfa\x06\xfa\x86\xfa\x30\xd4\x87\xa1\x3e\x0c\xf5\x61\xb8\x60\xe0\x82\x81\x0b\x06\x2e\x18\xb8\x60\x5a\xe2\x82\x81\xfa\x30\xd4\x87\xa1\x3e\x0c\xf5\x61\x78\x38\xe1\xe1\xec\x21\xf5\xe1\x8e\x02\xe9\xd6\x8b\x11\x7f\x29\x44\xf7\x88\xc4\x00\x61\xb4\xc6\xca\x89\x31\xf6\x72\x88\xbd\x14\xa2\xed\xe2\x97\xab\xe5\x44\xf8\x8d\x59\xb9\xe8\x9d\x0b\x0b\xcf\x71\xcc\x38\x20\x6c\xd5\x52\x62\x6a\x61\xce\xaa\x7e\x0b\xb7\xce\xab\x2a\x68\xa2\xaa\xa0\x89\x46\x0a\x4a\x15\xe8\x71\xd1\x89\x2e\xd0\x3c\xef\x44\xb3\x34\x43\xc9\x26\x3a\x91\xeb\x39\x1b\x71\x28\xb3\xef\x19\xa6\xe3\xa2\x89\xf5\xf4\x35\x39\x53\xca\xd5\xd4\x7b\x2e\x68\x8a\xaa\x29\xc5\x5b\xa6\xe0\xb3\x10\x7b\x79\x27\xfb\xfc\x10\x7b\xdf\x16\x62\xce\xd5\xb6\xee\xf3\x8b\x7d\xe6\x27\xe9\x12\x77\xc9\x5b\x53\x3e\x2b\x11\x63\xc1\x2c\xb5\x75\xb2\xcf\x23\xce\x22\x8f\x2f\x64\xcd\x25\x81\xb1\x4e\x74\x56\xaf\x83\x1e\x2e\xe3\x2a\xdb\x52\x89\x59\xb4\x9f\xc7\x14\x8d\xf6\x54\xb4\xcb\x33\x3f\x36\x44\x25\xe6\x61\x7f\xbb\xb4\x8d\xdd\xc1\x2b\x46\xd7\xd3\xc1\xc6\xe8\x34\x3b\x69\x1a\xa3\xaa\xde\x68\x5a\x22\xcf\x6b\xa8\xd6\x94\x86\x6c\x0c\x72\x5e\x20\x1b\x03\xc7\x1f\x1c\x7f\x70\xfc\xf5\x90\xe3\x0f\xb2\x31\x90\x8d\x81\xc3\x05\x0e\x17\x38\x5c\xe0\x70\xe9\x0a\x87\x4b\x6a\x82\x8e\xb2\xc3\xd1\x83\xb6\x6c\xcc\x03\x3c\x6e\xd7\xac\x85\x67\x11\x17\x1d\x30\x0e\x41\x10\x06\x82\x30\x9b\x10\x4f\x43\x10\xa6\x1d\x82\x30\x9f\xbc\x4a\xe7\x4c\x28\x59\x54\x35\x29\x2b\x57\x12\xc9\x1a\x9a\xd3\x69\xc3\x72\xf3\x0b\xd2\x52\x41\x4a\x2b\x45\x45\xd6\xd9\x5f\x3c\xc5\x7e\x63\x2f\xdd\x6d\x1e\xb0\xe9\xe4\x78\x80\x40\xcc\xe2\xdc\xa2\xb8\x62\x5a\x14\x75\x2b\x16\x37\xae\x30\x7f\x34\xa1\xa0\xa3\x22\x5d\x7d\x7e\x97\x33\x42\xa8\xc3\x80\x94\x41\x1d\x06\xa4\x0c\xa4\x0c\xa4\xac\x87\x48\x59\x17\x45\x80\x77\x0d\x29\x43\x68\x32\x48\x19\x48\x19\x48\x19\x48\x19\xd4\x61\x10\x3b\x79\xfb\xc0\xa9\xae\x8f\x9d\xdc\x94\xea\x30\xab\x74\x49\xc4\x56\x3d\x46\x73\x3c\xb6\x6a\x9a\xa6\xe8\x54\xad\xd8\xaa\xb8\x45\xb0\x4c\xee\x14\xaf\x06\x45\x0d\x09\xbf\xc8\xc1\xc1\x53\x49\x76\xda\x0a\x9e\xf2\x50\x33\x2b\x86\xb3\xea\xc6\x3e\x5b\xf2\x07\xe8\xcb\x44\xbf\xb4\xbd\x9a\xa4\x3d\xe8\x08\xbe\x54\x43\xb3\x71\x71\x70\x03\xb0\x99\xa0\x5c\x6f\xa2\x8b\x74\xbe\x42\xed\xe5\x04\x4d\xae\xe3\x75\x21\xc9\x16\x4a\x2e\x4d\x2a\xb9\x7c\x5f\xa8\xb6\x9e\xca\x05\x61\x54\xce\xd2\x19\x6e\x54\x4e\xd1\xfa\x7a\x29\x2d\x0a\xad\x94\x79\x4a\x39\x5a\x29\xeb\x2e\xf4\xb2\xd0\xa1\x39\x4f\x8f\xb9\x74\x68\xd6\x5b\x6a\xa3\xea\x2a\x9d\x31\x81\xb1\x67\x47\xab\x4d\xdc\xa0\xbf\xaa\x4a\xb5\xb9\x9b\x10\x27\x7a\xcd\x9d\x23\xa7\xd2\x41\xc3\x07\x49\x15\xf0\x62\xf0\x62\xf0\x62\xf0\x62\x48\xaa\x40\x52\x05\x92\x2a\x90\x54\x81\xdf\x02\x7e\x0b\xf8\x2d\xe0\xb7\x80\xdf\xa2\x25\x7e\x0b\x48\xaa\x40\x52\x05\x92\x2a\x90\x54\x81\x5b\x10\x6e\xc1\x1e\x92\x54\xe9\x0c\x85\xde\x84\xdb\x2e\xb3\x17\x89\x06\x8d\xce\x3b\x56\xae\x19\xa0\xbf\x6c\xac\x8e\xf3\x59\x9d\xbd\x97\xa2\x3f\x30\x40\x5b\xd3\xaa\x26\x5f\x2d\x27\xc2\xf7\xd8\x2e\xc4\xa4\x38\x23\xb6\x4b\xfc\x32\xad\x6a\xf2\x92\xcb\x5f\x68\x1e\x6e\x31\x2b\x3f\x49\xc7\x69\xa2\xc2\x49\x18\xa3\xa1\x9a\x3e\x0d\xa3\xda\x46\xf7\x32\x6b\x93\xfa\x62\x1f\x4d\x8a\x0e\x7b\x90\xf6\xf3\x0e\x3b\x42\x6b\xb8\x9e\x4e\x0b\x5f\xcd\x31\x3a\xe2\xf8\x6a\xd6\x56\xc2\x94\x70\xcc\x4c\xd0\x51\x97\x63\x66\x6d\x45\x04\x7d\x24\x73\xc1\x1d\x7f\x2f\x7b\xd4\xec\xf8\xd1\xa8\xd9\xd9\xad\xd2\xdd\x42\x41\x41\x9e\x9e\x14\x98\x5b\x7d\xe6\x96\x82\x87\xb5\x86\x87\x75\x13\x9a\xcd\xaf\x0f\xd1\x48\xdd\x64\xa7\xb4\xae\xe4\xd5\x8c\xa3\xb8\xf4\x9b\x43\xec\xff\x84\xaa\x7d\x94\x61\x4d\x96\x32\x15\xb4\x70\x7a\x71\xee\x82\x9a\x91\x63\x0f\x18\xc7\x2a\x9c\x92\xe2\x50\x6b\x2d\x69\xea\x29\x9a\x16\x86\xf0\x38\x4d\x70\x43\x68\x18\xc4\xf1\xb5\x38\x87\x8d\x4a\x05\xda\xa2\x5d\xfe\x06\xe4\x0e\x36\x90\x95\x8b\x74\xfd\xad\xc1\xa6\xea\x08\x3b\x14\x30\x46\xf3\x8a\x54\x0d\xcc\xe1\x97\xb7\x55\x37\xfd\x2e\x4d\x2e\xe4\xa4\xb4\x5c\xa3\xf5\x77\x9a\x87\x3b\xf0\x02\x92\x5c\x37\xac\x62\x28\x6b\xe2\x0d\xc0\xed\x81\x20\x97\x26\x4d\xf0\x47\xfa\x5a\x63\x03\xce\x89\x49\xd1\x14\x9d\x72\x26\x45\x9d\xb6\x26\x85\x52\x7b\xad\x49\xec\x1b\x77\x57\x5b\x93\x7b\x4c\x24\x21\xd9\x06\xe4\x41\xdf\xa8\x92\xb6\xd8\x8f\xf6\x84\x8d\xc0\x9a\x04\x58\x13\xb8\x2e\x3b\xe2\xba\x04\xb3\x06\xb3\x06\xb3\x6e\x17\xb3\x4e\x7d\xb4\x45\x43\xff\x9c\xa0\x19\x49\x3a\xed\xa2\x19\xed\x19\xfb\x23\xfe\x63\xff\x76\xb6\x55\x34\x4a\x5b\x87\xff\xe4\x57\xef\xac\x1e\xfe\xf7\x16\x24\xad\xa8\xf0\x10\x3e\xe1\xfc\xab\xb1\xaa\x08\x17\xb8\x4e\x6d\x7b\xe7\x04\x0f\x56\x16\x34\xca\xef\xba\x8f\x6b\x1d\xef\x74\x1f\x5c\x95\xb5\xac\xec\x3e\xba\xd7\x7d\x54\x2f\x6a\x52\x51\xce\x2a\xe9\xd1\xaa\xf3\x3c\xa5\x18\x7f\xdf\x32\x8f\x1a\xb5\x4c\x5e\xa0\x79\x4a\x55\xac\x67\x26\xe8\x68\x13\x33\x92\x05\xae\x82\x80\x99\x48\xc0\x4c\xe4\x63\xfd\xf4\x42\x3f\xfb\x68\x7f\xf8\xc3\xb6\x39\x7f\xb6\xbf\x77\xd6\x35\x15\x01\x18\x46\x3b\xf3\xc0\x56\x1e\x24\x64\x74\x4e\x47\x0e\x65\xa8\x46\xc7\x1d\xe6\xa3\x92\x88\x83\x96\x72\xfc\x4a\x63\xfa\x25\xae\xe6\x67\xf0\xa9\x90\x1e\x19\x4a\xe9\x6a\x7e\x41\xe8\x82\x9c\x37\x3e\x0b\xf3\xef\x45\xeb\x63\x71\x7e\x1c\xae\xbd\xe0\x4a\xbe\xd2\x47\x2f\xf7\xb1\x97\xfa\xc2\x9f\xb1\xdd\x39\x1f\xe9\x9b\x55\xb5\x34\x9f\xa5\x65\x55\xde\xec\x6a\x24\xba\x62\xfc\x14\x8d\x4c\x79\x9e\x82\xfb\xc1\xc4\x84\xaa\xa4\x3b\x11\xad\xa3\x52\x9a\x3f\x38\x8f\x13\xce\x29\x69\x73\xc0\x94\x73\x19\x3d\xa2\xde\x34\xdb\x54\xc4\xd1\x16\x64\xb5\x90\x93\xe3\x11\x71\x47\x2e\xe4\x62\xbd\x4b\xee\x8f\xf2\x6d\x00\xfb\xf6\xd1\x2d\xbc\x5a\x9e\x0c\xab\xa7\x6a\x67\x49\xb4\x62\x78\x09\x16\xac\x2e\x08\xc1\xea\x36\x5a\xfe\xd4\x41\xda\xcf\xc6\xa3\x71\x1b\x54\xde\xeb\xd6\x47\x33\x2f\x6b\xbf\x32\x1a\xfb\x83\x27\x29\x1e\xe0\xb3\xd1\x65\xad\xac\xa4\x65\x29\x9d\x56\x4b\xf9\xa2\xce\xfe\xed\x93\xec\xfd\x7b\x1d\xd7\xcd\x48\x7d\x1d\xa5\x45\x71\xf5\x94\xb8\x3a\x16\x35\xce\xae\x74\xea\x78\xcf\x81\x6e\x12\x74\x93\xa0\x9b\x84\x3c\x18\xe4\xc1\x20\x0f\x06\x79\x30\x5d\x93\x07\xd3\x3d\x69\x1e\xc8\x3f\x40\xfe\x01\xf2\x0f\x90\x7f\x80\xfc\x83\x9e\xce\x3f\x40\x80\x34\x02\xa4\x7b\x34\x40\x7a\x53\xea\x26\x49\xb5\xa9\xde\xac\xa0\x7a\xa7\xe8\x04\xa7\x7a\x47\xe8\x10\x1d\x08\x8c\x5b\xf5\x52\xa3\x79\x45\x2f\x5a\x60\x6f\x21\x98\xdb\x8d\xb2\x7d\x95\x91\xaa\xde\xf2\x1a\x0d\x58\x15\xb2\x48\xcf\x6e\x77\x80\xd8\x1b\xed\x58\xe6\x0a\xf6\xf5\xa8\x7f\x48\x73\x9b\xe8\x97\x80\x55\x67\x68\x9a\xa6\x2a\xdc\x27\x09\x1a\x5b\x63\xe3\xc2\x6b\x82\x68\xb0\x26\xa3\xc1\xfe\xb6\xaf\xf6\x67\x9f\x14\x9f\xfd\x24\x1d\xe3\x9f\xfd\x01\x5a\x7b\xcf\xa4\xb3\x22\x4a\xec\x34\x9d\x74\xa2\xc4\x9a\x2a\xe8\x9c\xf0\x39\x4f\xd1\x29\x97\xcf\xb9\x99\x92\x1a\x95\x33\x6a\xb9\x95\x8a\xfd\xc1\x88\x63\x85\x76\xfb\x2a\x16\x55\x58\xa4\xb8\x38\x49\x58\x24\x3f\x95\xa2\xb6\xda\x26\x28\x14\x81\xcc\x82\xcc\x82\xcc\x82\xcc\x42\xa1\x08\x0a\x45\x08\xf3\x85\x42\x11\x3c\x04\xf0\x10\xc0\x43\x00\x0f\x01\x3c\x04\x2d\xf1\x10\x20\xdb\x03\xd9\x1e\xc8\xf6\x80\x42\x11\x1c\x70\x70\xc0\xf5\x90\x42\x51\x63\x60\x39\x10\x27\x3b\xe4\x79\x33\xaa\x6a\xfc\xdd\x9d\x34\xb9\xb6\xc0\x76\x53\x5e\x63\x8c\xf3\x35\xf6\xc9\x3b\xa3\xef\xdd\xe2\xe0\x74\x73\xb2\x68\xc2\x37\x3e\x85\xaa\x60\xe9\xc3\x8d\x78\xf7\x2e\x1b\x97\xb7\x18\xa3\x9f\xa7\xc7\x68\xae\xc2\xc5\x77\x8c\x8e\xd4\xf4\x5a\x18\xef\x4a\x4c\xa3\x79\x28\x78\x39\x11\xe7\xb5\xba\x24\x4c\x6f\xea\xbd\x21\x4a\x89\xae\x3d\x4d\x53\xbc\x6b\x4f\x52\xf3\xc5\xd1\x45\xe1\x9f\x39\x47\xb3\x8e\x7f\x66\x5d\x05\x2e\x08\x3f\xcd\x1c\x9d\x75\xf9\x69\xd6\x55\x62\xd0\xb7\x17\xe4\xc8\x91\x82\xbf\xb7\x93\xec\xb8\xf9\x39\x55\xd4\xc5\x93\x2d\xe2\xa9\x95\xfb\x53\x84\x20\x12\x04\x91\x9a\xf4\xbf\x1e\xa3\x23\xec\x50\xf4\x80\x6d\xba\xef\x77\xe7\x19\xb9\x3b\x5c\x75\xb2\xd1\x26\xb4\xfa\x7f\x3d\x44\xa7\x84\x96\x92\xc8\xb6\xa9\x94\x52\xe2\xd3\x85\x5a\xe3\x81\xb8\x84\x7d\x74\x88\x7d\x7a\x80\x5e\x2f\xfe\x65\x67\xe5\xbe\xdd\xe4\x1a\xf9\x8c\x52\x56\x32\x25\x29\xe7\x0e\xb2\x91\x6c\x86\x78\xc6\xb8\x2a\xee\x5a\x23\x4e\xd8\xab\xcb\x41\x5e\xc2\xa0\xe3\xbd\xf3\xb8\x63\xd4\x82\x6c\x13\x60\xbd\x28\x4b\x99\x78\x6c\x0f\xbf\x80\x97\xa8\x57\x6d\x9c\xc5\x7f\x9e\x57\xf4\x6e\x4f\x97\x4a\xdd\x10\x16\x7b\x9c\x5b\x6c\x63\x38\x99\xa2\x53\x74\xa2\x89\x99\xd2\x15\xbb\x35\x02\x2d\xf6\x6e\x7f\x8b\x7d\x17\x23\x5e\x31\x1e\xb3\x73\xfd\x4a\xb0\xd9\x3e\xc8\xf6\x9b\x66\xdb\xd3\x9d\x4c\x73\x2d\xea\x52\x9d\xda\x87\x04\x31\x24\x88\xa5\x10\x86\x80\x30\x04\x84\x21\x20\x0c\xa1\x67\xc2\x10\x52\x5d\xe3\x65\x6f\x79\x4d\x9a\x76\xff\xa6\xe0\xfe\x85\xfb\x17\xee\x5f\xb8\x7f\xe1\xfe\xed\xbc\xfb\x77\x13\xf2\x8b\x54\x4f\xbb\xd4\x52\x70\xa9\xb5\xcf\xa5\x96\xea\x7a\x97\xda\x26\xcc\x69\x63\x3f\xfd\x04\x45\x6a\x68\xb7\xdb\xb2\xed\xec\x3d\x4f\xb0\x7f\xdc\x43\xe4\xe8\xfc\x85\xf7\xd4\xd7\x4c\xb2\x14\xfe\xde\x60\x9c\x66\x0b\xfc\xb5\x5a\xdb\x0f\xf2\x48\xa0\x5f\x90\x47\x02\xfd\x02\xfd\x02\xfd\x02\xfd\x5a\x6f\x12\x4e\xd7\xd0\x2f\x24\x3f\x80\x7e\x81\x7e\x81\x7e\x81\x7e\xf5\x34\xfd\x42\x74\x36\x50\x52\xaf\x46\x67\x6f\x4a\x79\xa4\x27\xd7\x26\x87\xe2\x68\x9b\x5b\xb2\xe6\xf3\xca\xba\x62\x59\x45\x50\xd4\xe3\xc1\x41\x51\xfb\xd9\xf8\x1a\x25\xcf\x29\xfa\x17\xdb\x3c\xfc\xcb\xd9\xee\xd5\x42\x5d\xdf\x22\x7e\x69\x1b\xec\x12\x6c\x8a\x87\x9e\x55\x84\x4a\x8f\xd2\xbe\x35\x34\x32\x92\xed\xa1\x84\xd4\x64\x24\xee\x97\xfb\xe8\xa4\xf8\xc2\x8f\xd0\x21\xfe\x85\x8f\xd1\xda\x3a\x9f\xd8\xfd\x20\xc1\x77\x3f\xb0\x83\xe9\xd7\x5c\xc8\x8c\x08\xa0\x3f\x41\x93\xae\x00\xfa\x35\x97\x12\x64\x68\x1a\x33\x24\x3e\xf9\x26\x75\x8d\x4a\x50\x2c\x7e\xec\x37\x47\x3c\x86\x66\xa7\xaf\x0a\x92\x65\x74\x22\x15\xdb\xea\x39\x0a\x48\x9b\x69\x6f\x3d\xb0\x56\xb0\x56\xb0\x56\xb0\xd6\xde\x61\xad\x98\x83\x41\xf0\x08\x82\x47\x10\x3c\x02\xf3\x07\xf3\x07\xf3\x07\xf3\x07\xf3\x87\xe0\x11\x04\x8f\x20\x78\x04\xc1\x23\xb8\xd4\xe0\x52\xbb\x3d\x04\x8f\xda\xe3\x88\x5a\x8b\x8e\x52\xeb\xd5\x29\x7e\x70\x88\x0e\xd6\x88\x16\x77\xcb\x53\x94\xd5\x5c\x69\x55\x96\x8a\x45\x29\x7d\x6d\x95\x4b\x52\xbc\x36\xc8\x7e\x7f\xa0\x7a\xa7\xf8\x7f\xde\x98\x26\xc5\x12\x2f\x6f\xca\x2e\xaf\x45\xf2\x14\xe2\x02\xef\xfe\xf3\x95\xb7\x82\x40\xc5\x3a\x04\x2a\xd2\xc1\x9f\xc0\x69\x76\xb2\xde\x27\x50\xf9\x3a\x20\x56\x81\x70\x7d\x88\x55\xc0\x85\x04\x17\x12\x5c\x48\x3d\xed\x42\x82\x58\x05\xc4\x2a\x80\xee\x81\xee\x81\xee\x81\xee\xbb\x02\xdd\x43\x4a\x02\x52\x12\x9b\x05\x56\x42\x4a\xa2\x1d\x52\x12\xbf\xfc\x14\x1d\x0a\x10\x2c\xb7\x22\x95\xf2\x45\x81\x08\xd3\x39\x49\x59\xd5\xd9\x73\x4f\xb1\x9f\xda\xeb\x48\x95\x1f\xa8\x2f\x2e\xb1\x60\x17\x22\xe8\xd0\xb4\x51\x48\x6c\xd8\xb8\xa8\x52\xbb\xdc\xf7\xd4\x2e\xc7\x79\xd0\x9f\x00\xd0\x82\xfe\x04\x80\x16\x80\x16\x80\x56\x0f\x01\xad\x2e\x0a\xf9\xed\x1a\xa0\x85\x58\x54\x00\x2d\x00\x2d\x00\x2d\x00\x2d\xe8\x4f\x20\x58\xee\xf6\xe1\x4f\x5d\x1f\x2c\xb7\x29\xf5\x27\xae\xd1\x79\x11\x06\x35\x4b\x33\x3c\x0c\xea\x24\x1d\xa7\x89\x9a\x29\xe1\x69\x55\xe3\xf9\xe0\xbe\x8c\xa8\x21\x29\x8a\x35\xed\xbf\x13\x8d\x9a\x21\x4d\xbe\xf7\xf3\xc6\xf8\x05\x68\x5c\x44\x5f\xdc\xee\xc0\xb2\x87\x6c\x25\x0a\x7f\x2e\xb6\xcf\x7f\x57\xbf\xf6\x92\x31\x01\xb2\xe6\x29\x45\xe7\x2a\xc4\x2a\x8e\xd2\xe1\xe6\xde\x08\x72\x26\xa1\x5b\xd1\xa4\x6e\xc5\x73\x21\xba\x20\x74\x27\xce\xd2\x19\x47\x77\x62\x82\x9a\xee\x8c\x62\x53\xc8\xfd\x7c\x53\x48\x47\x82\x62\x3d\x05\xd6\xb2\x35\x34\x27\x6c\x5a\x92\x4e\x73\x9b\xb6\x8e\x7b\x6c\xa0\xd9\x12\xd2\x16\xff\x6d\xc4\x31\x5b\x31\x5f\x5d\x0b\x7f\x13\x76\x48\x9c\x2b\x4c\x98\x23\x71\xd1\x59\x63\x06\xe9\x0b\x60\x5e\x60\x5e\x60\x5e\x60\x5e\x48\x5f\x40\xfa\x02\xd2\x17\x90\xbe\x80\xbb\x01\xee\x06\xb8\x1b\xe0\x6e\x80\xbb\xa1\x25\xee\x06\x48\x5f\x40\xfa\x02\xd2\x17\x90\xbe\x80\x37\x0f\xde\xbc\x96\x4b\x5f\xd4\xc4\xcb\x2d\xd5\xc4\xb0\x08\xf3\x1a\x34\x2a\xda\x46\xa3\x37\xe1\xe6\xa9\xec\x77\x87\x28\x29\xe4\x35\xa4\x82\x22\x3f\x53\x94\xf3\xbc\x87\xfa\x8b\x6c\xa4\x4b\x7a\x51\x5d\xb5\xfa\xa6\xeb\x8d\xb1\xef\x1c\x62\xef\xda\x42\xf7\x79\xca\xb0\x25\x37\xbe\xad\xaf\x31\xcd\x8d\x69\x5e\xbc\xc5\x23\x67\xec\xe2\x5b\xa4\xbd\x71\x88\x5f\x30\xe5\xae\xa2\xa9\xc0\x51\xeb\xc6\x50\xe2\x58\x87\x12\xc7\xdb\x83\xbf\xb2\x0b\x6c\xde\xfc\xca\xfc\x3a\x9f\x25\x49\x53\xe3\xe5\x40\x97\x03\x69\x0c\xd0\xe5\x80\x7f\x0b\xfe\x2d\xf8\xb7\x7a\xda\xbf\x05\x5d\x0e\xe8\x72\xc0\xaf\x00\xbf\x02\xfc\x0a\xf0\x2b\x74\x85\x5f\x01\xba\x1c\xd0\xe5\xd8\x2c\x24\x15\xba\x1c\xed\xd0\xe5\x78\x6e\x88\x86\x04\x55\x74\xa8\x4e\x05\x4b\x54\xf2\x59\x4d\xd6\x75\x59\x67\x7f\x38\xc8\x3e\x37\x40\xcc\x07\x1c\xea\x8d\x71\xc3\x39\x51\x54\x8b\x30\x61\x82\x5f\x70\xa6\x92\x11\x9a\x37\x99\x57\xf4\xe2\xac\xaa\x4d\xe5\x72\x76\x2c\xb0\x0e\x44\x58\x8d\x08\x97\x82\xe9\xdf\x01\x96\x30\xe9\x9f\xf3\xea\x4d\xe6\x67\x36\x76\x35\xe2\x6b\x08\x3d\x82\x03\x82\x03\x82\x03\x82\x03\x82\x03\x82\x03\x82\x03\x82\x03\x82\x03\x82\x03\x82\x03\x82\x03\x82\x03\x82\x03\x82\x03\x82\x03\x82\x03\x76\x8a\x03\xfe\xc4\x30\xc5\xeb\x6e\xde\x95\xd6\x95\x8c\xa6\x18\x6f\x51\x88\xf6\xbe\x93\xfd\xcf\x21\xf6\xae\xfe\xea\x6d\xbb\x76\x6a\xb2\x94\xa9\x58\x7d\x4e\x2f\xce\xcd\xf0\x8b\x63\x0f\x1a\x47\xbd\xbb\x6a\xd9\x07\x5b\x9b\x94\x9f\x7a\xba\x76\x60\xeb\x19\x41\xd9\x4e\xd2\x71\x4e\xd9\x0e\xd3\x41\xda\x5f\x53\x37\xc1\x6a\x10\xf3\x01\xe3\x76\x7d\x2d\x82\xb6\xcb\x9f\x74\xdd\xc1\x06\xb2\x72\x91\xae\x3f\x11\x0c\xd8\x8e\xb1\x23\x01\x7b\xbd\x89\x3b\x56\x63\xb6\xf0\x17\xb6\x55\xbf\x82\x87\x35\xb9\x90\x93\xd2\x72\xcd\xb7\xf0\x90\x79\x42\x47\x5e\x44\xf2\x1c\xcd\xd2\x4c\x85\xd4\x4b\x53\x0d\x8e\xfc\x60\xc8\xbc\x34\x29\xf3\xf2\x42\x5f\x8b\x3e\x7a\x4a\x09\xb5\x98\x69\x9a\x72\xd4\x62\x9a\x2d\x2b\x88\xcd\xd7\xb6\x2c\x85\x52\xbb\x2d\x4b\xec\x3d\xf7\x54\x5b\x16\x66\x66\xf0\x48\x2e\x63\xb2\x4b\xfc\xd6\x19\x5b\xd2\x1e\xa5\x15\xd8\x95\x00\xbb\x82\x6c\xff\x8e\x64\xfb\x23\xcd\x13\x69\x9e\x48\xf3\x6c\x57\x9a\x67\xea\x67\xfa\xda\xbd\x24\xa0\xc7\x84\xf6\xdb\x0c\x25\x5d\xda\x6f\xeb\x5c\x5f\x44\xfc\x67\x01\xdb\xd9\x56\xd1\x30\x6d\x9e\x08\x24\xbf\x79\x67\xf5\x44\x60\xa8\x20\x69\x45\x85\xfb\x04\xc5\x42\xb4\xe6\x5a\x63\x67\xa1\x7a\x23\xe5\x36\xcc\x0e\x1e\xac\x2c\x68\x94\xdf\x77\x9f\xf1\xe7\xc4\x4e\xf7\xc1\x55\x59\xcb\xca\xee\xa3\x7b\xdd\x47\xf5\xa2\x26\x15\xe5\xac\x92\x1e\xad\x3a\xcf\x53\x8a\xf1\xf7\x2d\xf3\xa8\x51\xcb\xe4\x05\x9a\xa7\x54\xc5\x2a\x67\x82\x8e\x36\x31\x37\x59\xe0\xae\x72\xcc\x49\x02\xe6\x24\x1f\xeb\xa7\x17\xfa\xd9\x47\xfb\xc3\x1f\xb6\x0d\xfb\xb3\xfd\xbd\xb3\xd6\xa9\xf0\x2e\x18\xed\xcc\xbd\xe5\xdc\x03\x66\x74\x4e\x27\x66\x66\xa8\x46\xc7\x1d\xe6\xe3\x93\x08\xae\x90\x72\xfc\x4a\x63\x22\x26\xae\xe6\x67\xf0\x49\x91\x1e\x19\x4a\xe9\x6a\x7e\x41\x04\x8f\x9c\x37\x3e\x0b\xf3\xef\x45\xeb\x63\x71\x7e\x1c\xae\xbd\x08\x4b\xbe\xd2\x47\x2f\xf7\xb1\x97\xfa\xc2\x9f\xb1\xc1\xd8\x47\xfa\x66\x55\x2d\xcd\xe7\x6b\x59\x95\x37\xbb\x1a\x89\xae\x18\x3f\x45\x23\x53\x9e\xa7\xe0\xc8\x53\x4c\xad\x4a\xba\xe3\x26\x1f\x95\xd2\xfc\xc1\x79\xf0\x41\x4e\x49\x9b\x43\xa7\x9c\xcb\xe8\x11\xf5\xa6\xd9\xa6\xc2\x39\x5f\x90\xd5\x42\x4e\x8e\x47\xc4\x1d\x79\xb4\x8f\xf5\x2e\x39\x7a\xf4\x6d\x00\xfb\xf6\xd1\x2d\xbc\x5a\x9e\xc8\x95\xa7\x5b\x35\x6c\x04\x2d\x04\x1f\xf6\x1f\x02\xb6\xb1\x3b\x78\x2d\x1b\x1d\x01\xaa\x6c\x7b\x63\x43\x42\xea\x30\x1d\x64\xfb\xa3\xe3\x76\xba\xf4\x7d\xfc\xa3\x32\xbf\x70\xfb\xbc\xe8\x80\xf1\x73\x5b\x93\xa0\xbf\x7e\xaf\xc0\x94\xf5\xb6\x11\x53\x33\x16\xa2\x1c\xcb\xa9\x59\xf6\x1b\xf7\xb2\x5f\x0f\x39\xda\xa2\xbb\x38\x9e\xcc\xa9\xb6\x81\x72\xc6\xac\x05\x35\x13\x0b\x1b\x87\xab\xf4\x90\xd5\xcc\xbc\x9a\xdd\x4f\x45\xf9\x99\xe2\x58\x21\x27\x29\xf9\x16\xa3\xca\x23\xb5\xe7\x25\x3b\x45\x07\xbb\x8f\xee\xe5\x1d\xec\x75\x74\xe7\x27\xfa\xb6\x91\xd9\x24\x0d\x12\xc8\x33\xc1\x9d\x23\xca\x22\x55\x69\xf4\x6a\xc6\x9b\x34\xff\x81\x3e\xfa\xae\x3e\xf6\xaf\xfa\xc2\xdf\x6e\x7f\xbe\xd7\xac\x30\x2d\xc9\x18\xcb\xf8\x07\x24\x62\x4d\x8a\xaa\xc5\xab\x73\x6a\xb6\x22\x1e\xc0\x6d\x12\x0d\x2b\xcd\xdf\x83\xc6\x6d\x80\x9a\x77\x17\x67\x4e\xf0\x0a\x6a\x26\x1e\xdd\x6e\xff\xec\x9e\xd1\x5d\xa1\xc7\xd9\x62\xf8\x4d\x56\x75\x0e\xce\xaa\xf6\xcc\xd4\x78\xc5\x0e\x32\xb7\xca\xf1\x54\x84\xaf\x69\xe2\xd1\x3b\x56\xf8\x55\x9e\x2f\xfb\x3b\xb7\xd0\xfb\xb6\xb0\xf7\x6c\x09\x3f\xbb\xc5\x2a\xfc\xb7\x07\x94\xbc\x2e\xa7\x4b\x9a\xbc\x78\x43\x29\x5c\x9e\x5f\x5c\x92\x35\x65\xe5\x96\x31\x33\x96\xf3\x99\xca\xc1\x98\x8f\x30\x56\x6c\x9c\x7b\x30\xe6\x41\x52\xda\xaa\x15\xfb\xa5\x64\xb8\x77\x61\xc5\x8e\x49\xe0\xee\x5c\x59\x33\x49\xbe\xfd\x95\x2d\x5b\xb7\xe1\x9f\x4a\x5a\xcd\xe7\x65\x61\xeb\x8a\x6a\x3c\x22\xc6\x02\x6e\x11\x57\xa5\x1b\x62\x2e\x76\xee\xf2\xe5\x85\x45\xfb\x44\x35\x1f\x59\x96\x8b\x37\x65\xf3\xa3\x73\x6a\x66\x2d\x1e\xec\xf2\xcd\x67\x34\xc7\x17\x61\x70\xbd\x97\xa4\xa5\xbc\xf1\x20\x65\xfe\xf8\x76\x5b\xf3\xd0\x08\x51\x3b\xe1\x4e\xe5\x0f\x62\xd8\x09\x7b\x0d\xa8\xc9\x52\x2e\x62\xf4\xc3\x9c\x5c\x8c\x47\xac\x88\x2d\xf3\x07\xf3\xb1\x78\x00\x99\x70\x77\xb9\x6e\x60\xdf\x7c\x50\x8f\x5c\x9e\x5f\x8c\xa4\x35\x99\x3b\xb8\xa5\x9c\x3e\xe2\x89\x81\x33\xea\x6b\xf9\xf2\xad\x07\x37\x1d\x2d\xee\xbb\x1b\x37\x2b\x97\x72\x79\x59\xe3\xa3\x31\x5f\xde\xaf\x4a\x79\x7b\x95\xa5\x64\x32\x39\x39\x22\x15\x8b\x52\xfa\x46\x64\x48\x8e\x67\xe3\xc6\xdc\x47\xfc\xdb\x68\x01\xfb\x6d\xf2\xd0\x4d\x1e\xcf\x67\x4e\x29\x4a\x52\xce\x69\x8e\xb4\xba\xea\x89\xd7\x72\x57\x61\x38\x1e\xdd\x59\xaf\x43\x79\xba\xe3\xbb\x42\xf4\x8f\x7d\xec\xeb\x7d\xe1\xaf\xda\x9f\xde\xef\xf7\xcd\xad\x88\x45\x26\x8f\x25\xb1\xa3\x62\x96\x6f\x15\xad\x88\x18\x29\x53\x19\x07\x69\xb3\x19\xee\x74\x94\x8a\xe6\x44\x47\xd8\x43\x6e\x88\xad\xf7\x2e\xdd\xe2\xcf\x97\x51\xf4\x42\x4e\xba\xc5\xbd\xb4\xab\x05\xbe\xac\xe3\x2b\xcf\x48\x4e\xc9\xf3\xbe\x99\x53\xb3\x59\xc5\x9a\xec\xb8\x82\x55\xf4\x9c\x92\xbd\x56\xcc\xdd\x12\x01\x12\xaa\xe6\xfc\xe0\xcc\x9b\xbc\xe6\x97\x7b\xe7\xe2\x51\x11\xf3\x93\x34\x9e\xc2\xe3\x55\x1b\xa7\x38\x1b\x89\xc6\xec\x31\xe8\x6e\xf7\x18\xb4\xa0\x66\xaa\x47\x9f\xcd\x27\xf2\x91\x5a\xa6\xa7\xd9\x53\xe1\x27\xac\x02\x4f\x5c\x12\x8d\x69\x07\xe8\x59\xef\xcd\x8c\xe8\x14\x66\xb2\xda\xc2\x9a\x86\x6d\x9b\x75\x9d\xa7\x2f\xfd\x56\x88\x3e\x1b\x62\xbf\x11\x0a\xff\xba\x0b\x32\x4d\x45\x34\x39\x27\xf0\x1b\x8f\x2a\xf0\xc7\x79\xe9\x92\xa6\xf1\x30\x0a\xc5\xfa\xac\x1d\x6b\x7f\x4d\xbd\x69\xd6\xc4\x4b\xe2\x0a\x86\x35\xc8\xc8\xba\x13\xb1\x20\x19\x96\x98\x47\x28\xf0\x28\x3c\x1e\x4d\x62\x0c\x09\xc6\xd5\x11\xdd\x8c\xd3\xe5\xe6\xda\x0c\xd3\xb3\x96\x03\x76\x08\x43\x35\xec\xb3\x58\x40\xa9\x58\xd2\xe4\x11\x11\xdc\x9a\xd5\x7d\xae\xbc\x68\xdc\x49\x15\x7d\x97\xdf\xcb\xe4\x70\xbc\x8b\x1a\xff\xbe\x6c\x54\xd1\x62\x38\x36\xd9\x8b\xde\xe5\x3e\xd9\xd3\x31\x5f\xea\xa3\xcf\xf4\xb1\x5f\xed\x0b\xff\xb2\xfd\x6d\xfe\x68\x8d\x6f\x33\xc7\x1d\xfd\xf6\x37\x69\xd8\x5b\xb3\x03\xf3\xda\x9a\xcd\xc8\x1f\xcf\x03\x16\x47\xc4\x71\x1e\x8d\x7e\x4d\xbd\x99\x77\x8a\xe0\x91\x39\x6e\x3f\xb6\xdd\x2b\xac\xe7\xf1\x7b\xbe\xe8\xf6\xa2\xa4\xe4\xe6\x8d\xda\x78\x1e\xe5\x87\xfa\xe8\x07\xfa\xd8\xf7\xf7\x85\xbf\xc7\x7e\x94\xb7\x9b\x00\x70\x24\x22\x65\x32\x86\x11\xbc\x34\x3b\x7d\xe0\xc0\x81\x63\x46\x79\xe6\x9f\x17\xa4\xbc\xca\xdf\xac\x5e\x94\x56\x0b\x56\x70\xca\xb2\x9c\x55\xf2\x79\x73\x25\x28\x8b\x28\x1a\xc7\x66\xd8\xd6\xc6\xa7\xdb\x92\x5d\x96\xa7\xe3\xb2\xe7\x07\xe9\x11\x4b\x01\xa7\xa0\x1b\x53\x40\xe3\x69\x35\x35\x97\x93\x35\xa3\xa3\xf3\xf8\x02\xf6\xf5\xbd\xec\x7f\xf4\xd3\x56\xe3\x14\x63\xbe\x37\x5e\x7f\xbf\xd8\x69\xbb\x84\x4b\x66\x09\xb1\x11\xe3\x8a\xa9\x42\x41\x5f\x4a\x54\x1f\xdd\x6c\x19\x25\xb2\xf0\x57\x8d\x73\x7f\x95\x31\x81\x9c\xa4\x63\x74\xa4\xe6\x0a\xc5\x68\xb5\x78\x39\x11\xaf\x7e\xf0\x86\x36\x3d\x59\x53\x2e\x89\x71\x2f\x6b\xa9\x51\x75\xbb\x35\xed\x79\x82\x34\x12\xa4\x91\x20\x8d\x04\x69\x24\x48\x23\x41\x1a\x09\xd2\x48\x90\x46\x82\x34\x12\xa4\x91\x20\x8d\x04\x69\x24\x48\x23\x41\x1a\x09\xd2\x48\x90\x46\x82\x34\x92\x4e\xa5\x91\x7c\x7b\x8c\xa6\x04\xa2\x4b\xab\xaa\x96\xe1\x2e\x06\x35\xef\xe4\x92\xd4\x72\xda\xf2\xb0\x46\x3b\xb3\xe4\x57\x86\xd9\xd7\x43\x74\xb7\xbb\x88\xab\xe5\x44\xf8\x7e\x9f\xcc\x92\x79\xe3\xc2\xd8\x23\xc2\x6b\xeb\x9c\xee\xf6\xde\xf2\x53\x5a\xec\xb0\x7d\xa2\xb6\xc3\x76\x4a\xf0\xb6\x09\x3a\xca\x79\xdb\x7e\x1a\xa7\x78\x9d\x3d\x39\x5d\xad\x54\x4e\xc4\x79\x5d\x1b\xf4\xea\xbe\x39\x18\xb6\x1d\x62\x07\x04\x60\xe3\xe5\x7a\x98\x9a\x09\xe1\x7c\x5e\x13\x85\x7f\x73\x5b\x75\xe3\x3f\xe8\x9f\x53\x22\xda\xff\x51\xf3\x60\x07\x5f\x41\x72\x86\x92\x74\xba\x22\xde\x6a\xcd\x4d\x8d\x28\x2b\x64\x94\x34\x99\x51\xf2\x13\x7d\x34\x2b\x52\x41\x4e\xd1\x09\x27\x15\xa4\x89\xef\x7d\x63\x6d\x09\xcf\x24\x69\xcc\x96\xd4\xb1\x19\xb5\xcc\x4c\xec\x7b\xee\xa9\xb6\x25\xaf\xb7\xb3\x48\x84\xf9\xd8\x6d\xed\xe1\xdb\x39\xeb\x81\x3c\x12\xe4\x91\x20\x8f\x04\x79\x24\xc8\x23\x41\x1e\xc9\x9a\xf3\x48\x3e\x5d\x27\x8f\xa4\xa5\x7b\x26\xd1\x25\x91\x4e\xf2\x18\xcd\xb9\xd2\x49\x5a\xb3\x0f\x53\x63\x43\x7e\x83\x4b\x06\xf7\xf0\x1f\x9c\xaf\x92\xfc\xda\x9d\xd5\x93\x82\x47\x03\x32\x4a\xc4\x54\x21\xca\xe3\x9d\x3b\x34\x53\x40\x4e\x49\xaf\xcd\x4f\x90\x53\x82\x9c\x92\x0d\xce\x29\x79\xa2\x05\xab\xbe\xc0\x40\xad\xb6\xad\xf7\x02\x33\x55\x52\xfb\x69\x9c\xc5\xa3\x23\x76\x54\xee\x0e\x77\x1c\x2f\x2f\xf0\x76\x88\xe4\x65\x3f\x3b\x68\x09\x6b\x67\x14\x3d\xad\x96\x0d\xab\xec\x60\x50\x39\x9f\x29\xa8\x4a\xbe\xa8\xe7\x94\xb4\xac\xb3\x6f\x1d\x64\x7f\xdf\x4f\x77\xd9\x67\x1a\xe3\xe1\xbe\xfa\x81\x8b\x67\xcc\x12\x16\x8d\x12\x44\xcc\xe2\x8c\x75\xf9\x52\xc2\x73\xf4\x36\x8f\x59\x74\xda\xb7\x9c\x88\x7b\x1e\xbc\xa1\x98\xc5\x80\xd8\xc2\xeb\x4f\x06\x7f\x2a\x13\xec\xa8\xf9\xa9\x54\xbe\x6b\xf3\x3b\xf1\xd4\xca\x9b\x54\x83\xd0\x45\x84\x2e\x22\x74\x11\xa1\x8b\x08\x5d\x44\xe8\x22\x42\x17\x11\xba\x88\xd0\x45\x84\x2e\x22\x74\x11\xa1\x8b\x08\x5d\x44\xe8\x22\x42\x17\x11\xba\x88\xd0\xc5\x0e\x85\x2e\x7e\x6e\xc8\x0a\x5d\xd4\x96\xa5\x74\xdc\x02\x45\x15\x01\x8c\xee\x8d\xf1\xd2\xb9\x92\x5e\x34\x56\x2f\x39\x79\x59\xc9\x67\x94\x7c\x56\x67\xef\x19\x62\x5f\x1f\xa0\x07\x8c\x22\xa6\xdc\x25\xd8\xc2\x69\xff\xa2\xb1\x9d\xf2\xa6\x45\xd9\x97\xd4\x9c\x9c\x14\x65\xb7\x68\xd3\xbc\x03\xfc\x82\x4b\x95\xf5\xb3\x84\xd9\xaa\x6e\x3b\xaf\xe8\xc5\x6e\x07\x86\x1b\xb1\x6d\x5e\x23\xdb\xdb\x5d\x2f\x04\xb3\xc3\xf3\xec\x31\x33\x07\xba\xaa\xe9\xab\xa5\xf8\x4c\xca\x58\xb3\x83\x02\x27\x02\x27\x02\x27\x02\x27\x02\x27\x02\x27\x02\x27\x02\x27\x02\x27\x02\x27\x02\x27\x02\x27\x02\x27\x02\x27\x02\x27\x02\x27\x02\x27\x76\x0c\x27\x7e\x36\x44\x77\xba\xc4\x0a\xd9\x2f\x84\xd8\xa7\x42\x34\x60\xfc\x23\x1c\xce\xf2\x44\x8c\x15\x55\x5b\xb5\xdb\x4c\x12\xfa\xcb\xb1\xbb\xb3\x32\x17\x1f\x9c\x5a\x98\x3b\x6b\xfc\xd0\xba\x68\xf6\xaa\x82\x26\xaa\x0a\x9a\x68\xa4\xa0\x94\x42\x17\x04\x6e\x3b\x4b\x67\x38\x6e\x3b\x45\x27\x68\xb2\x09\xdc\x66\x3d\x63\x10\x6c\x63\xff\xb8\x97\xc2\x5e\xe5\x47\xbd\x28\x15\xe5\x95\x52\xce\xb0\x78\xec\xd5\xbd\xec\x77\x5c\x92\x8f\xc3\xf5\x23\x27\x17\xcd\x4b\x17\xe5\x62\x6c\x8f\xa3\xf5\xe8\xfa\x79\xb3\x05\x4c\x3e\x45\xd3\xe2\x85\x1c\xa7\x09\xfe\x42\x0e\xd2\x7e\x1a\x0f\x14\x79\x74\x3d\x71\x43\x91\x92\x17\x83\x69\xe6\x08\x8b\xf9\x64\x8c\xb8\x94\x1e\x5d\xf7\x84\xac\x23\x60\x26\x60\x26\x60\x26\x60\x26\x60\x26\x60\x26\x60\x26\x60\x26\x60\x26\x60\x26\x60\x26\x60\x26\x60\x26\x60\x26\x60\x26\x60\x66\xd7\xc0\xcc\x6f\x84\xe8\x21\x93\xbf\xf9\x87\x45\x8e\xb1\x3f\x0a\xb1\x3f\x08\xd1\x3d\x52\x45\xd0\x63\xf8\x8d\x59\xb9\xe8\x1d\x6b\xc5\x53\xc4\x1e\xca\xca\xc5\x8a\x10\xc4\xa9\x85\x39\x6b\x75\xd7\x3a\xe6\xd6\x3a\xee\x59\x9b\x53\xd2\xe3\x82\xbf\x5d\xa0\x79\xce\xdf\x66\x69\x86\x92\xcd\x01\x51\xab\x01\xe6\x15\xbd\x68\xe1\xcf\x1f\xbc\x8b\x2e\xd4\x6f\xfe\x5a\xb2\x9a\x6a\x5a\xca\xe9\x25\x3e\x4e\x48\xe9\xb4\xac\xeb\xc6\x5a\x4e\xbe\xa9\xb3\xdf\xb8\x33\xfa\x85\x2d\x3e\xaf\xeb\x11\x31\x98\x47\xa4\xc8\xbc\x71\xed\xa2\xb8\x76\x8a\x5f\x7b\x89\x5f\x1b\x3b\x22\x4e\xa9\x78\x77\x2e\x11\x96\x1a\x17\xb6\x58\xc1\xed\xcd\xb4\x44\x97\x2b\xb4\x51\xea\xb4\x7a\x45\xbc\x64\x39\x11\xaf\x55\xcf\xd4\x47\x43\xf4\x36\xa1\xbb\x73\x99\x2e\xb9\x74\x77\xea\xbe\xd5\xc6\xcb\x6f\x4d\x2f\x5a\xc3\xfd\xde\x2a\x64\x0a\x17\xe9\x4d\x8e\x4c\x61\x8b\xca\x0e\x4e\xa2\x2f\xa8\x7a\x91\xae\xe7\x82\xd1\xf1\x1c\x3b\x6b\xe1\x62\xbf\xc8\x56\x53\x70\xa2\xd6\x33\x7a\x72\xea\x21\x73\x53\x5f\xe6\x26\x05\x51\xcf\x1a\xa2\x9e\x9b\x50\x68\xe4\x2f\xef\xa7\xc4\x1a\xb6\xc4\xd5\xb9\x08\x18\xfb\xe4\xfd\xec\x8f\x5c\xbb\xe2\x8a\xcd\x22\xc5\x31\xff\x8d\x71\x77\xd5\xd8\x18\x57\x88\x8a\xb5\x58\x5e\xf9\x12\x1d\x11\xa6\x70\x9c\xe2\xdc\x14\x0e\xd1\x5e\x7a\xb4\x8e\x06\x8e\x26\x73\x91\x2b\x35\x13\xe8\xc4\x6a\xcd\x5e\xb8\x01\xda\xcc\xe1\xef\xdb\xe6\xb4\xed\x6e\x4b\x39\xb9\x5e\xf3\x46\x6c\x05\xe5\x8e\xb4\x70\x72\x82\x8e\xd2\xe1\x8a\xd1\xb3\xc1\x26\x86\x8a\x18\x34\x93\x9b\x34\xaf\x1f\xe8\x6b\xfe\xc3\x3e\x2e\x66\x31\x87\xe8\x80\x33\x8b\x69\x9d\x59\xf8\xff\xd9\x7b\xfb\xf0\x48\xae\xea\x4e\xf8\xe9\x1e\x8d\x67\xe6\x8e\x8d\xed\xb2\xbd\x60\xf9\xab\x5c\xb6\x47\x52\x5b\x2a\x8d\xc6\x63\xcf\x8c\xc6\xe3\x19\xb5\xa4\xb1\x25\xcf\x87\x90\xc6\x33\xc6\x9f\x53\xea\xbe\x6a\x95\xa7\x55\xd5\x54\x55\x4b\x96\x79\x78\x93\xd8\xcb\x86\xcd\x42\x42\x42\x48\x88\xe1\xe5\x21\xd9\x7c\x60\x42\x80\xb0\x79\xd9\x24\x04\x42\x36\x21\x7c\x98\x38\x80\xb3\x21\x09\x24\x01\x16\x36\x21\xe4\xcd\x1b\x92\x6c\x02\x24\xbb\xbc\xcf\x3d\xf7\xa3\x6e\xf5\xb7\xa4\x6e\x49\x33\x3a\xfc\x61\x34\x5d\x55\xb7\x6e\x9d\xba\x75\xee\x39\xbf\x73\xce\xef\x34\xe1\x47\x6e\x8f\xd6\xc8\xfe\xc9\xee\x58\x2d\xf4\x57\xf1\x1d\x36\xd2\x0f\x37\x0b\xde\xc3\x8e\x6b\x07\xe4\x3c\xdc\x6a\xda\x0a\x39\x0f\x91\xf3\x70\x83\x39\x0f\x37\xbf\xbd\xd7\x94\xd6\x70\x2b\xb4\x27\x37\x9e\xeb\x53\x40\x54\x7e\xc1\x85\x3c\xa2\x80\x16\x5c\xd0\xf5\x75\xcb\xa4\x17\xca\x11\x34\x9a\x5f\xa2\xb3\xf3\xbe\x7f\x21\x91\x06\x12\x1a\x1f\xec\x35\x7e\x7e\x3b\xb9\xa9\xe6\x78\xaa\x68\xfa\x8d\xa9\xd6\xaa\xa6\x4f\x8a\x5b\x9d\xe3\xb7\x1a\xd5\x6f\xd5\xa6\xfa\xe9\x11\xb8\x60\xa4\xd6\x74\x45\x0d\x75\xa3\x49\x60\x35\x75\xed\xaf\xf4\xf5\xcd\xbf\xd2\x47\x8c\x87\x25\x3e\xd4\x60\xed\x89\xef\xb7\xd1\x3b\xa8\x2e\xab\x6e\xa9\x98\x1b\xb3\x11\x31\x1b\x11\xb3\x11\x31\x1b\x11\xb3\x11\x31\x1b\x11\xb3\x11\x31\x1b\x11\xb3\x11\x31\x1b\x11\xb3\x11\x31\x1b\x11\xb3\x11\x31\x1b\x11\xb3\x11\x31\x1b\x11\xb3\x11\xd7\x2b\x1b\xf1\xc7\xba\x25\x53\xe3\x2c\x00\x8c\x12\x6e\xac\x93\xfd\x90\x0b\x7c\xef\x29\x7f\xb6\x32\x03\xe2\xd3\xd7\x1b\xcf\x6d\x23\x57\xc0\x10\x0a\x68\xbc\xbd\x61\x1e\xc4\x68\xe0\x7b\x93\xfe\x6c\xa6\x87\x9d\x95\x65\x17\x0a\xc8\x2f\x8e\xc8\x89\x53\x3a\x92\x15\xd1\xae\xa6\xd3\xf0\xc8\xb6\x78\x64\x5b\xcc\xb8\xc5\xa6\xd3\xaf\x6e\x0e\xd4\xd9\x46\xbf\x00\xea\xe0\x46\x02\x91\x13\xb7\xa9\x06\xdf\xba\xbf\xb0\xb3\xf2\x2d\xf4\x34\xcb\x98\x90\x2f\x22\x23\x4e\x5c\xf7\x77\xb1\xe2\xee\xd3\x35\x65\x8e\xb1\x49\xcc\xa4\x58\x7d\xf7\xe9\xb5\x7f\xf3\xab\x68\x60\x5d\x7b\x9c\x35\x66\x57\xb4\xa6\x54\x9a\xe8\x91\x84\xce\xc9\x3e\x7b\x79\xa5\x52\xd9\xdb\x72\xbe\x85\xd4\x2e\xbd\x10\xda\x5b\x57\xdd\x82\xd9\x17\x5b\x4d\xc3\x61\xf6\x05\x66\x5f\x6c\x78\xc7\xc9\x75\xb0\x2b\x9b\x25\x50\x74\xc0\xb2\x9c\xdc\x4f\xf6\x19\x7b\x2d\x5b\x65\x58\x5c\xa3\xe7\x64\x88\xcb\x2e\x89\xbc\x8c\xff\xd3\x43\xfa\xb8\x47\x54\xa3\x57\xe7\xe0\xa2\xcc\xc4\x80\xa6\xef\xa1\xf1\xb9\x1e\xe3\xf7\xba\xaa\x1b\x30\xbf\xb6\xb5\x1c\x0b\x68\xd1\xd9\xa6\x64\x8a\xfe\x25\xa7\xaa\x9b\x33\x8c\x7f\xc2\x0d\x2f\x3a\x16\xa6\x8d\xc8\x9b\xe8\x5c\x1f\x57\x4c\x89\xc0\x94\x08\x4c\x89\xc0\x94\x08\x4c\x89\xc0\x94\x08\x4c\x89\xc0\x94\x08\x4c\x89\xc0\x94\x08\x4c\x89\xc0\x94\x08\x4c\x89\xc0\x94\x08\x4c\x89\xc0\x94\x08\x4c\x89\xd8\x4c\x29\x11\x3f\x94\x91\x85\x59\x1e\x8d\x96\xfc\xe0\x02\xb3\xd6\x2a\xe1\xbf\x3a\xf9\x11\xe2\x8a\x92\x5f\x74\x73\xae\x3c\xf6\x7a\xe3\xfd\x7d\xc6\xbf\xdf\x4e\xae\x88\xc7\x7b\x72\x71\xa8\xfb\xd7\x44\x21\x96\x8e\x0c\x7a\x52\xcf\x49\x82\xf5\x53\xfc\x92\x29\x36\xe0\x72\x5b\x90\xc2\xfe\xd8\xb2\x01\xeb\x4d\xec\x83\xcc\x38\x8a\x4d\xb6\x9e\x84\x95\xac\x8d\x6d\x67\x7a\xe1\x76\xa7\xd4\xa3\xe8\x35\xd4\x89\xd9\x22\xc6\x58\x8d\x31\xd6\x47\xee\x97\x38\x72\xff\x44\x73\x10\xf2\xb0\x71\x88\x23\x8d\x09\x69\x93\x1a\x5c\xf1\x55\xeb\x17\x51\x46\x44\x19\x11\x65\x44\x94\x11\x51\x46\x44\x19\x11\x65\x44\x94\x11\x51\x46\x44\x19\x11\x65\x44\x94\x11\x51\xc6\xf5\x46\x19\x87\xc9\x41\xe3\x1e\x6b\xbf\x4a\x8c\xba\x5e\x4f\xa7\x4a\x38\x76\x97\x42\x52\x15\x62\xa0\x88\x81\x22\x06\x7a\x11\x61\xa0\xef\xea\x21\xbd\x1c\x03\x0d\x66\x9d\x9c\x5d\x8f\x2a\x3d\xf0\x8b\x34\x34\xbe\xbd\xc7\xf8\xc6\x36\x72\x2d\x3b\x73\xa4\x92\x03\xdd\x6a\xdc\x3f\x72\xda\x2f\xd2\xcc\x9d\xec\x9c\xe9\xca\xab\xcf\x0e\xb1\x83\x17\x5b\xe2\xe2\x39\xce\xad\xb9\x17\xb8\x35\xd3\xa7\x1f\x34\x06\xc8\x9d\xa4\xaf\x6e\x9a\x2f\x48\x77\x71\xc8\x66\x8f\xda\x52\xdf\xc8\xc7\x9b\xe3\x83\xc3\xc6\x41\x8e\x0f\xb2\x41\x6b\xc1\x82\x75\x5f\x29\x76\x91\x44\xf8\x10\xe1\x43\x84\x0f\x11\x3e\x44\xf8\x10\xe1\x43\x84\x0f\x11\x3e\x44\xf8\x10\xe1\x43\x84\x0f\x11\x3e\xdc\x6c\xf0\x21\x02\x74\x08\xd0\x21\x40\xb7\x85\x01\xba\xf7\xf5\x92\xbb\x5b\x00\xe8\x78\xb2\x62\xe0\x17\xe9\xac\xeb\xe5\x5d\xaf\x10\x1a\xff\xd0\x63\xfc\xcf\xae\x3a\x68\xdd\x52\x6b\x65\xcb\xd3\x7e\x91\x66\xf9\x78\x6d\x2a\x5e\x3e\x00\x17\xd4\xc1\x00\xc5\xad\xb0\x8e\xb9\xb5\x1c\xc3\x56\xaa\x8d\x9f\xca\x37\xc7\x11\x47\x8c\xa3\x4d\x01\xc3\x18\x68\x14\x2f\x29\xd9\x3c\x10\xe1\x42\x84\x0b\x11\x2e\x44\xb8\x10\xe1\x42\x84\x0b\x11\x2e\x44\xb8\x10\xe1\x42\x84\x0b\x11\x2e\x44\xb8\x10\xe1\x42\x84\x0b\x11\x2e\x44\xb8\x10\xe1\xc2\x75\x82\x0b\xff\xb6\x97\xd8\x4d\x1a\xdb\xb3\x0f\xce\xcd\xc5\x35\xcb\xbf\xd1\x9b\xe8\x6a\xdf\x0d\x6c\xee\x49\xaf\x73\x86\x5f\x92\xb9\xa1\x56\x3f\x7b\x71\xb0\xcd\xbc\xed\x67\xc9\x61\x0e\xa8\xed\x27\xfb\x00\x50\xeb\x27\x19\xd2\xdb\xb4\xbb\xa9\x98\xcc\x1a\xc8\x95\x81\xb1\x7d\xa2\x39\x6a\xb6\xc7\xb8\xbd\xb2\x01\xaa\xbc\xb9\x0e\x8d\x75\xbf\x53\xeb\x6a\x7f\x93\xe4\x68\xaf\x2d\xde\x9b\xeb\xf4\xb3\xef\x88\x84\xb3\xf7\x91\x7b\xc9\x70\x05\x6b\xf1\x0a\x44\x8c\x2c\xc5\xc8\xc3\xbe\x4a\x1e\xf6\xb7\xa7\xc8\x31\x4e\xa2\x7e\x88\x1c\x88\x49\xd4\xdb\xf3\x85\xaf\x49\x6f\xb4\xc8\xbd\xde\x3e\xf5\x90\xf9\x97\x2b\x63\xf5\x70\x15\xb7\xc4\x4d\x47\x69\x84\x9b\xf8\x2f\xeb\xa3\x10\xce\x90\x69\x32\xa5\x2b\x84\x4c\x96\x1c\x5b\x45\x08\x63\x0c\xe6\x7c\x1a\xb0\xde\x10\xd5\x44\x33\x35\xf1\xf5\x34\xf9\x6a\xda\xf8\x8b\x74\xf7\x97\x94\x80\x7e\x35\x7d\x46\xb7\x6d\x5c\x66\x62\x83\x7d\x62\xce\xd2\x39\x1e\xe7\x50\x4e\x45\x0c\xa6\x0b\x3f\x0e\xa6\xa8\xa9\x01\xcf\xf7\x06\x3c\x5a\x70\x40\x10\xc2\xbc\xd1\xd5\x05\x87\x17\xd4\x2b\x10\x6b\xd0\x5d\x58\xa0\x79\xa6\x90\x8a\xcb\x71\x00\x22\x06\x9d\xdd\x62\xbf\xb0\x7a\xc0\xee\x33\x0b\x01\xdb\xd6\x4a\x34\x70\xfd\xbc\xb2\xde\xe2\x1d\x0e\x62\x3b\x52\x1a\xe5\x90\x4d\x52\xb7\x18\x1d\x76\xa5\x7c\x20\x71\x93\x39\xee\x68\xcb\x21\x6c\x3e\x4f\xce\x02\x5e\x63\x8e\x96\x01\x53\x98\x82\x19\xd4\xb0\xe6\xb2\x9f\x4f\x93\x97\xd2\xc6\x8b\xe9\xee\x4f\x29\x31\xbf\x3b\x3d\xa6\xc5\x34\x4b\xc0\xf4\xac\x50\x91\xa9\xc0\x2f\x39\x05\x78\x01\xbc\x3e\x2b\x11\x18\x91\xcf\x12\x07\x45\xd9\x5b\x1a\xb2\x0f\xd8\xe6\x0c\x7f\x21\x5c\x3a\x25\x66\xcc\x33\x87\x48\x02\x6c\xd4\xf4\x83\xd2\xbc\xe3\xc9\x40\x4d\x50\xa6\x83\x73\x4e\x51\x02\xd2\x16\x3f\x6a\x99\x73\xae\xe7\x14\xdd\x67\x24\xb2\x35\x4b\x99\xb5\x08\xee\xc3\x20\xb7\x12\xf3\x31\xda\xc9\x07\xef\x09\xe3\x8b\x38\x04\x6c\x9b\xe3\x2e\x2c\x67\x6d\xe2\x7e\x50\xfd\x64\xb1\x2b\x16\x71\x04\x1a\x3e\x36\x3f\x9a\xb7\xad\xab\xf8\x7c\xc6\xe4\x83\x24\x22\x3e\xd9\x37\x76\x91\x37\x74\x19\xcf\x76\x75\x7f\x5f\x85\xfd\xbe\xb8\xed\x9c\x80\x88\xd8\x27\xc5\x7c\xa1\x82\x13\xcc\x3a\x05\x6a\xe6\xfc\x62\x91\x82\x1e\x8d\xbf\x0b\x1a\xcc\xf9\xc1\x02\x93\x45\xcd\x99\x9e\xae\xb8\x79\xfd\x89\xc2\x7a\x96\x8b\xb1\xc4\x9f\xca\x65\x0b\x25\xe7\xe6\x63\xac\x17\x60\x43\xce\x12\x2f\xa5\xcb\xbc\x23\x01\xaa\x49\x60\xd0\xd6\xc4\x28\x21\x25\xe5\xff\xa8\x60\x5f\xf2\x66\xb6\x39\x92\x63\xae\x12\xec\x9d\xba\x72\xe8\xe1\xcf\xd0\x63\x0e\x88\x17\x9f\x5c\x18\xe1\x61\xb3\x27\xeb\xe4\x2e\xb0\xbd\xc2\xcb\xb3\xb3\x20\xca\x06\x27\x55\x08\x8e\xe3\xb8\x62\xe9\x27\x07\x91\x4f\x30\xab\x46\x3a\x6c\xf6\x1c\xf7\x03\xaa\x0d\x6b\xe6\x9c\x30\xe7\xe4\xd9\xd3\x0b\xf9\xf0\xb8\x2a\x8c\x17\x72\xed\x55\x35\xe0\x9c\x1a\xc3\xb6\xae\x2e\x55\xae\x1b\x7d\x4f\xff\x48\xaa\xfe\x8e\x7c\x82\xef\xc8\xe3\x64\x14\x76\xe4\x23\xe4\x30\x39\xb4\x8a\x7d\x85\xf7\x0d\x21\xd3\x6c\xb4\x7d\xc6\x83\x64\x82\xec\xe4\x42\xa7\xf9\x35\x8e\x29\xb7\xfd\xf6\xed\xeb\x4f\x99\xb5\x2d\x88\x5d\xc6\x0e\x2e\x71\x92\xfd\x93\xdd\xf1\xce\xbf\xa7\xaa\xcf\x4a\x6d\x0f\xe1\xc6\x12\xe7\x7e\xef\xb0\x39\x80\x1d\x55\xb6\x9a\x11\x82\x1d\x55\xb0\xa3\xca\x06\x77\x54\xe9\x30\xe2\xd3\x46\xdd\xde\xac\x2d\x4b\xe3\x1e\x2a\x62\xc8\x4b\xa1\xdc\xdf\xf8\x0f\x4f\x92\xbd\x3c\x3b\xd1\x29\xb9\x01\x2d\xb8\xa0\xf1\x13\xa9\x89\xbc\xcd\xa4\x48\x2f\x72\x73\x34\x34\x3e\xff\x84\xf1\xf2\x1e\xf2\xca\x8a\x2b\x54\xcf\xb1\xde\xc6\x95\xc4\x23\x53\x13\x72\x37\xbc\x9d\x9d\x39\x92\x1c\x46\x34\x1d\x8b\xcf\xda\xe4\x99\x83\x59\x4c\x9d\xc3\xd4\xb9\x2c\xa6\xce\x61\xea\x1c\xa6\xce\x61\xea\xdc\x96\x49\x9d\xcb\x6e\x9a\xd4\xb9\xb6\xcf\x64\xd5\xa9\x73\x59\x4c\x9d\xc3\xd4\x39\x4c\x9d\xc3\xd4\x39\x4c\x9d\x5b\xff\xd4\xb9\xec\x96\xce\x74\xcb\x62\xa6\x5b\xe7\x32\xdd\xb2\x9b\x3d\xd3\x2d\x7b\x11\x66\xba\x4d\x3e\x9b\x22\x05\x8e\x17\x9e\x27\x4f\x00\x5e\xf8\x30\x39\x4b\xce\xd4\xc4\x0b\xd9\x42\x18\x70\x0a\x05\x26\xad\xc8\x0f\xe2\x28\x41\x25\x6a\x25\xfb\x30\xc7\xf0\x51\x4b\x64\x6d\x4d\xd8\xd4\x9e\xa2\xcd\xb1\xc7\xac\x71\x4c\x60\x8f\xb5\x91\xb4\xea\x4e\xcd\x1c\xa0\x8c\x67\x4a\xac\x5f\x26\xf5\x51\xb5\x6b\xf9\x06\x66\x3a\x9e\x8e\xa0\xed\xe1\xbf\xae\x1b\x86\xc6\x21\xaf\x39\x92\x27\xb3\x15\xd1\x9d\x69\x32\xd5\xee\x57\x87\x51\x1f\xcc\x50\x5b\x65\x86\xda\xcb\xe9\xfa\xd1\xec\x1c\xd7\x3a\x8f\x91\x47\x40\xeb\x40\xd6\x54\xbb\x97\x2e\x99\xe7\xf9\x71\x0e\x79\x32\xce\x8f\xeb\xcc\x9d\x5c\x1e\x4e\x9f\x25\xe7\xb5\x70\x7a\x47\x6e\x25\xb5\x65\x6b\xda\xb0\xb9\xbe\x6b\xac\x2f\x1b\x28\xe5\x92\x1f\x46\x24\xf3\xdd\xfe\xfa\xda\xf2\x16\x91\x66\xa1\xe5\xac\xf8\x73\xba\xe2\xdc\xcb\x4f\xa8\xad\x38\x47\xd5\x55\x1d\x53\xa1\x9d\xc9\xd5\x43\x18\x1a\x61\x68\x84\xa1\x11\x86\xde\x3a\x30\x34\x9a\x88\x4d\x4c\xc4\xcd\x83\xd3\x63\x9e\xf4\xba\xe4\x49\x63\x38\x04\xc3\x21\x18\x0e\xc1\x70\x08\x86\x43\xb6\x74\x38\x04\xcb\x65\xb0\x5c\x06\xcb\x65\x3a\x55\x2e\x83\xd1\x46\x8c\x36\x6e\xd5\x68\xe3\x64\xa1\xcd\x05\x61\xcd\xa2\x73\x99\xda\x40\xf0\x35\xc6\xd5\xfc\x6b\x8e\x35\xed\x7a\x45\xea\xda\x9f\x88\xff\xce\x3e\x72\x98\x27\xe2\xf3\xbc\x70\x8d\x1a\xb8\x0e\x0d\x08\x3f\x4f\x92\x80\x7c\xb9\xd7\xf8\x5a\x9a\xec\xe2\x3f\x3e\xb9\x38\xd4\xfd\xaa\x1a\x34\x20\x40\x4b\xcb\x49\x40\xe0\xcf\x50\x2f\x42\x83\x5f\x36\x96\x04\x44\x3c\xfa\xe2\x90\xdd\x1a\x81\xee\x74\xf3\x97\x3d\x68\x0c\x88\x97\x9d\x90\xab\x78\xa5\xfc\x36\x89\xda\x90\xc6\xc4\x22\xdd\xbf\xb0\x53\x17\xf2\x0d\xb5\xf9\x40\xb8\x9c\x25\x1b\xc8\xba\x88\x7a\xc5\x6c\x20\x15\xb2\x46\x20\x0d\x63\xad\xeb\xc9\x06\xd2\xea\xa7\xbe\x26\x05\xd2\x22\x1b\x48\x6b\x6a\xa4\x46\x7f\xbe\xfa\x2a\x25\xf3\x43\x57\xe9\x7a\xe2\x4a\x49\x0c\xe2\x09\xdd\x20\x78\x41\xd6\x47\x35\x20\x2f\x08\xf2\x82\x5c\xba\x78\x37\x02\x1d\x08\x74\x20\xd0\x81\xbc\x20\x8d\x78\x41\x3a\xb0\xc3\xb7\x40\x10\xf2\xd5\xdd\xba\x0d\x70\x7b\x13\x8a\x10\x6e\x18\x70\x82\x90\x8e\xdb\x05\x48\x10\xb2\xd5\xac\x11\x24\x08\x41\x82\x90\x8b\x8b\x20\x64\xa5\x68\x50\x33\x56\x8f\x4e\xc0\x45\x93\xfb\xc8\x5e\xc3\xb6\xfa\x15\xe7\xc7\xd5\x3a\x53\x08\x9c\x7f\x49\xf0\x84\xbc\x7d\x1b\xe9\x6d\xde\xc5\x8c\x53\x85\x18\xdf\x4a\x1b\x7f\x9d\x26\xd7\xd7\x6a\x5c\xc6\xb3\x73\x5f\x59\xa0\x51\x32\x96\xcf\xab\x50\x32\x3d\x05\x1a\xd5\xe8\x2d\x26\xab\x19\x64\x3a\x63\xfb\xba\x89\x0d\x57\x0d\x34\x5c\x35\xd0\x70\x2b\x03\x4d\x96\xc8\x43\x7c\x71\xb3\x9d\x8d\x2d\xee\xe3\x64\x8c\x64\x57\xb1\x9f\x69\xcf\xd9\x4a\xed\x8a\xf1\xdb\xbd\xa4\x5f\x32\x46\xf3\x2e\x72\x75\x79\xa3\x73\x01\x8d\x42\xe3\x47\x7b\x8d\x77\x77\xc5\xd4\x65\x41\x6b\xad\xe4\x66\xe0\xea\x36\x75\x91\xbb\x65\xa9\x36\x0d\x1a\xbb\x05\x7b\xe8\x4d\xce\xf9\xb2\x79\xbb\xc5\x3d\xd0\x5c\xc5\xdd\x61\xdc\x56\x4d\x92\xc4\x04\x8f\x1d\xe1\x90\xd6\x06\x3b\xc2\x61\x3d\x01\xd6\x13\x60\x3d\xc1\x56\xad\x27\xc0\x8e\x70\xd8\x11\x0e\xf3\xb8\x31\x8f\x1b\xf3\xb8\x31\x8f\x7b\x53\xe4\x71\x5f\x84\x60\x11\xf6\x9c\xc3\xdc\x58\xec\x39\x77\xf1\x30\xf1\x18\x5f\xe8\x23\xc3\x2d\x21\x88\x72\xb0\xd7\x96\xfd\xc8\x51\xa9\xa7\x6f\xed\x33\xbe\xa9\xe1\x89\xbf\x96\x12\xb2\xd7\x50\x44\x4f\xea\x4b\x49\x02\x2d\x1d\xc9\x57\xb3\x91\xda\x02\x2a\xf6\xc7\x16\x12\x58\x81\x62\x3f\x65\x46\x56\x6c\xfa\xf5\x24\xac\x6d\x6d\x6c\x3b\x63\xd5\x84\x24\x13\xf3\x44\x54\xb2\x1a\x95\x3c\xdd\x1c\x70\xec\x37\x32\x95\x80\x63\x42\xac\xad\x72\xb3\x2f\x71\x6e\x76\x04\x26\x11\x98\x44\x60\x12\x81\x49\x04\x26\x11\x98\x44\x60\x12\x81\x49\x04\x26\x11\x98\x44\x60\x12\x81\x49\x04\x26\xd7\x17\x98\x1c\x26\x07\x8d\x7b\xac\xfd\x0a\x98\xbc\x5e\xcf\x7c\x4b\x38\x78\x97\x42\x06\x1c\x82\x9a\x08\x6a\x22\xa8\x79\x11\x81\x9a\x5f\xb9\x81\xdc\xcf\x33\x56\x73\x34\x10\xa3\x50\xbd\xac\x5e\xff\xd9\x2d\x78\xae\x57\x90\xbb\x88\x00\x36\x07\x9d\x52\x29\xf0\x17\x9d\xa2\xf1\x93\x37\x18\x7f\xb4\x8d\x5c\xa9\x5f\xf1\xe4\xe2\x50\xf7\x41\x28\xb1\x97\x67\x49\xc1\xc7\x36\xc2\x68\x7c\xfe\x0c\xbf\xc3\x34\xbf\x43\xe6\x2e\x76\xa5\x76\x38\x3c\x3b\x54\xf7\xe4\x11\x31\x7e\x9b\x4b\xf3\x4b\xf5\xcb\x7a\x56\x92\xc8\x6a\x27\xa4\xbb\x38\x64\xd7\x7d\x8e\xe6\xc5\xb8\x05\x1a\x91\xa7\x8a\xcd\x01\xc5\x09\xe3\x7e\x01\x28\xd6\x78\xb5\x02\x61\xac\x3b\x8d\x04\xda\xd8\xfd\xe2\xae\xea\xd7\x7a\x58\x16\xf5\xaf\xe6\xcd\xde\x23\x2e\xde\xd0\x97\x9b\x05\x26\xfd\x8a\xea\x9a\xb6\xbc\x44\xac\xb3\x41\x9a\x80\x55\xd2\x04\x7c\x36\xd5\x21\xc5\x42\x1e\xe1\xec\x03\x33\xe4\xd5\x31\xfb\x40\xbb\xc6\x6e\x16\xff\x68\x42\x2d\xb0\xae\xda\x2c\xfb\xf2\xe5\xd5\xda\x6c\xac\xaa\xec\x70\x35\x6a\x6d\x3f\xef\x5b\xbe\x51\x4a\x0d\xcb\x15\xb7\x9a\x1a\xc5\x72\x45\x2c\x57\xdc\xe0\x72\xc5\x95\x55\x74\xb5\x71\x4f\x59\xd7\x4d\xa3\x79\x33\xf4\xe3\x64\xcc\xc8\x5a\xc7\x14\x58\x73\x87\x0e\xf4\xd4\xbd\x49\x35\xe8\xd3\xf6\xa2\xc4\xb7\xf6\x92\x41\xee\xe2\x79\x34\x5a\xf2\x03\xf6\x65\x57\x16\x23\xf2\x7c\x16\x70\xee\xc2\x90\x86\xc6\x5f\xf6\x18\x2f\x77\x11\x23\xbe\x40\x15\x25\x86\xad\xd5\xc1\x4d\xf0\xa1\xda\x54\x08\x37\x04\x17\x9c\x52\xb3\x11\xb5\x8e\xe2\x26\x27\xdc\x30\x3a\xee\x07\x23\xc5\xa2\xca\x48\x69\x5f\xe9\xe3\x25\x94\x84\xf2\x78\xf3\x0f\x66\xd8\x38\xc8\xbf\x0a\x21\xda\x6a\x92\x3f\xf1\x3d\x55\x2d\x25\xd2\x52\xe5\x1d\xa6\xa5\x60\x5a\x0a\xa6\xa5\x60\x5a\x0a\xa6\xa5\x60\x5a\x0a\xa6\xa5\x60\x5a\x0a\xa6\xa5\x60\x5a\x0a\xa6\xa5\x60\x5a\x0a\xa6\xa5\xac\x73\x5a\x0a\x26\x7e\x60\xe2\x07\x26\x7e\x6c\xe1\xc4\x8f\x3f\x4a\x93\x9d\xb2\x9a\xcd\xf8\x64\xda\xf8\xbd\x74\x5c\x9b\x56\x97\x78\xec\xba\x02\x8d\x78\xed\x17\xd2\x8c\xd5\xa6\x19\x7b\x53\x9a\x58\x83\xf6\x12\x2d\x16\x07\x2e\x78\xfe\x92\x37\xe8\x97\xa8\xe7\xe6\x07\x12\x9e\xf2\xa0\xf1\x8d\x94\xf1\xb5\x14\xd9\x75\x8e\x16\x8b\x0f\xb2\xd3\xba\xcf\x31\x91\x87\xbc\x6f\x86\xe9\xe4\x72\x60\xdf\xc1\x66\x1a\x98\xa7\x4b\xd4\x9b\x18\x4b\x3a\xdb\xfd\xa6\x53\x0c\x7d\x13\xee\x21\x1c\x28\xb3\xe7\xf4\xc4\xd8\x28\x00\x55\x3e\xec\x61\x79\x3f\xd7\x93\xc9\x14\x68\x24\xfa\x71\x8c\xf0\x61\x27\x60\x54\x3e\xe8\xa8\x3e\x66\xf5\x7b\x9c\x3c\x40\x6e\xe4\x72\xbc\x8e\x5c\x03\x72\xbc\x82\xec\xfe\x50\x6a\x27\x11\x5f\x67\x53\x71\xfc\xe6\x13\xe4\xa8\x48\x2f\xf2\xfd\x20\xef\x7a\x35\xa9\xf0\xea\xd4\x51\x02\x21\x75\x68\xfc\xf3\xe3\xc6\x2f\xed\x21\xd7\xea\x03\x28\x34\xfa\x36\xee\xa0\x04\xc2\xb4\x92\x7b\xaf\xac\x9f\x3c\xc1\x46\xc8\xec\x61\x27\x8d\x6a\x97\x0b\xf8\x38\xae\x5f\x84\xf3\x36\x39\x64\x9c\x45\xe8\x14\xa1\x53\x6c\x5d\x8e\xd0\x29\x42\xa7\x08\x9d\x6e\x1d\xe8\x74\x13\x75\xe6\xde\x34\xd0\x29\xb6\x8c\x46\xe8\x14\xa1\x53\x84\x4e\x11\x3a\xdd\x88\x96\xd1\x5b\x1a\xe9\xc4\x9e\xb6\x5b\xb9\xa7\x6d\xf6\x22\x44\x3a\x27\x69\xfd\x12\xae\x49\x0e\x6e\x8d\x92\x11\x00\xb7\x0e\x93\x43\xe4\x40\xfd\xcc\x55\x1d\xc1\x12\x00\x94\x0d\xc8\xd1\x09\x37\xae\xdb\xba\xb9\x76\x16\xdc\x0e\x63\x3b\xa7\x9e\x6f\x2d\x09\x4f\x66\xad\x56\x83\x66\x22\x6b\x15\xee\x5b\x9d\x9d\x67\xbd\x67\x57\x1d\xa4\xec\x15\x7c\x5f\x32\x1d\x01\x8a\xf5\xf2\x7f\xaf\x23\x2c\xc6\x51\xac\x09\x72\x3f\x19\xaf\x28\x11\xb8\x9b\xdc\xb5\x0a\xa9\x63\x75\x00\x16\x59\xad\xb2\xc8\xea\xdf\x52\xe4\x38\xff\xf4\x8f\x92\x23\xf0\xe9\x1f\x20\xab\x5b\x84\xe4\x41\x5e\x55\x35\x46\xb2\x71\x55\xd5\xaa\x07\x3b\xc1\x7b\xbb\x8d\x93\x51\xad\xb7\xdb\xaa\x47\x6b\x96\x0f\x5c\x5f\x55\x95\xfc\xce\xab\xaa\xcc\xb7\xfa\xeb\xa8\xaa\x6e\xd1\xe4\x4f\xeb\x98\xe8\xcf\x09\xb5\xb5\x8f\x1f\xab\xa1\xb6\x46\xd5\xd9\x9d\x55\x60\x9d\x69\x13\x8b\xb8\x2e\xe2\xba\x88\xeb\x22\xae\xbb\x75\x70\x5d\xb4\xde\x9a\x58\x6f\x9b\x07\xf8\xc6\x16\xdd\xeb\xd2\xa2\x1b\xe3\x0b\x18\x5f\xc0\xf8\x02\xc6\x17\x30\xbe\xb0\xa5\xe3\x0b\x9f\x4f\x93\x97\xd2\xc6\x8b\xe9\xee\x4f\xa9\xdd\xf6\xdd\xe9\x31\xad\x8c\xba\x04\x89\x71\xea\xd3\x9f\xaa\xec\x4b\x9f\xc8\x2e\x92\xaf\x3b\xae\xc3\x66\x2f\x7c\xc8\x3e\x60\x9b\x33\x5c\x8f\x44\x7a\x2f\xfd\x78\x17\xa1\xa2\x61\xbf\xcc\x76\x0a\xca\x74\x70\xce\x29\x4a\xeb\xdf\xe2\x47\x2d\x73\xce\xf5\x9c\xa2\xfb\x8c\x54\xdf\xb3\xd4\x74\xf2\x00\xd6\xfb\x83\x1c\xbb\xcd\xc7\xa6\x25\x1f\xbc\x27\x8c\x2f\xe2\xf6\xb6\x6d\x8e\xbb\xa0\x92\xb4\x89\xfb\x41\xf5\x93\xc5\x01\x92\x88\x9b\xfb\x60\xfd\xf9\xd1\xbc\x6d\x5d\xc5\xe7\x33\xa6\x7a\xf8\x27\xd2\xa6\xde\xd8\x45\xde\xd0\x65\x3c\xdb\xd5\xfd\x7d\x95\x3b\xf7\xc5\x6d\xe7\x84\x1e\x64\x4b\x74\xde\x5f\x32\x0b\x4e\x30\xeb\x14\x12\xb8\x83\x32\xd4\x68\x30\xe7\x07\x0b\x4c\x16\x35\x67\x7a\xba\xe2\xe6\xf5\x27\x0a\x66\x8d\xb4\x49\x4a\xfc\xa9\x5c\x66\x2f\xe4\xdc\x7c\x6c\x58\xc3\xde\xc8\x59\x1c\xa4\x74\xd9\xe6\x24\x76\x0e\xb9\xfb\xd9\x9a\x18\xa5\xde\x54\x51\x09\x95\x31\x97\xbc\x99\x6d\x72\x60\x09\x74\xb1\x6e\xad\xf6\xf0\x67\xe8\x61\x3e\x0f\xfc\x95\x5c\x18\xe1\x61\xb3\x27\xeb\xe4\x2e\x14\x02\xbf\xec\xe5\xd9\x59\x90\xaa\x06\x27\x55\x08\x8e\x1b\x2b\xc2\x02\x4a\x0e\x22\x9f\x60\x56\x8d\x74\xd8\xec\x39\xee\x07\x54\x1b\xd6\xcc\x39\x61\xce\xc9\xb3\xa7\x17\xf2\xe1\xc9\x89\x30\x5e\xc8\xcd\xe9\xaa\x01\xe7\xd4\x18\xb6\x75\x75\xa9\x72\xdd\xe8\xb6\x0d\x86\xef\x30\x7c\xb7\x45\xc3\x77\x93\x05\x8e\x30\xef\x05\x84\x39\x7d\xfa\x41\xe3\x08\x39\x4c\x0e\xad\x02\xbc\x9c\x89\x9c\xa8\x1c\xb6\x89\x71\x62\xb5\x08\xf2\x53\x99\xda\x00\xf6\x35\xc6\xd5\x5c\x59\xc4\x8a\x9c\x5c\x84\x5c\xba\xc6\xa7\xfb\xc8\xde\xfa\xc4\x2d\x92\xad\x25\x57\x74\xc2\x90\xaa\x36\x43\x3f\xd9\x67\xfc\xfb\x6d\xe4\x8a\x04\x73\x4b\xf7\x2d\x40\xc1\x99\x74\x78\x05\xc3\xc7\x28\xbb\x3c\x73\x23\x3b\x41\xe7\x57\xd1\x8f\xb6\x99\x52\xd3\x59\x59\xec\x45\x7b\xf6\xc5\x21\x5b\x9f\xd7\x1a\x58\xe7\x80\x43\xb3\xb5\xd5\xa9\xb3\x00\xd5\x25\x3f\x49\x90\xa6\xf0\xc9\x75\xff\xf9\xce\xca\xd7\x60\x49\xca\xcc\x06\x6f\xe2\x16\x71\xce\xfa\xbc\x8c\x15\x87\x63\xeb\xbf\x0d\x04\xf4\x30\x1c\xbb\xca\x70\xec\xfb\x57\x18\x8e\x6d\xa0\x12\x56\x1e\x8e\x5d\x83\x7e\x59\xd1\xf6\xd6\x92\xd2\x48\x50\x8e\x35\x26\xcd\xcc\xbc\xfd\xaa\x4a\xfd\xf2\xef\x84\xcd\xed\x78\x49\x9d\x72\x33\xff\x7d\x9d\x54\x4a\x67\x02\xa4\xa8\x5d\x9a\x68\x17\x04\xe9\xd7\x05\xa4\x47\x74\x06\xd1\x19\x44\x67\x3a\x85\xce\x4c\x7e\x24\xd5\x66\x17\x75\x9a\xa7\x54\x3d\x48\x26\xb4\x94\xaa\x0e\xbb\xbd\x66\xed\x8d\x7b\x97\xb1\x83\x8b\x69\x85\xa9\x55\x2b\xb6\x1c\xb2\xcf\x5d\x5e\x69\x1a\xdc\x59\xc5\x6f\xdd\xc0\x07\xb9\xa9\x54\x49\xb7\xd9\x19\x73\x01\xf9\xaa\xb7\x9a\x91\x82\x7c\xd5\xc8\x57\xbd\xc1\x7c\xd5\xeb\x07\x3f\x35\xa3\x8c\xee\xf0\x2e\x30\x79\x88\x1c\x30\xee\xb6\xee\x52\x90\xe7\xab\x74\x46\x6a\xfd\xb2\x75\x20\xa1\xfe\x78\x2f\x19\xe2\x58\xe6\x2c\x50\x4d\x2f\xd6\x65\xfd\x78\xca\x9f\x55\x60\xe6\x8f\xf4\x1a\x7f\x98\x26\x3b\xe1\x12\x70\x70\x6b\xe0\x98\x93\xfe\x6c\xe6\x55\xec\xf7\x2c\x3b\x4b\xef\x4a\x3e\xe9\xcf\xb6\x19\xba\x9c\x21\x07\xf9\xda\x19\x22\x83\xb0\x76\xfa\x48\x0f\xb9\xa3\xee\xda\x81\x79\xb3\x65\x33\xe9\xcf\xae\x15\xac\x7c\xa0\xf9\x5a\xb9\xc3\xb8\x4d\xac\x15\xb8\xb1\x58\x1f\xec\xde\x89\x66\x3e\x6f\xda\xa9\x49\xf4\xfa\xda\x90\x24\x13\xea\x0d\xe2\x50\xe7\xe5\x9a\x85\xc2\x9b\x8a\x7d\xb8\x55\xc1\xe2\xa6\x8b\xb8\xe3\x2a\x71\xc7\x9f\x48\xad\xe1\x7b\x3e\xc2\x91\xc6\x7b\xc8\xfe\x18\x69\x6c\xa3\x3a\x68\xdb\xf7\xde\x0c\x46\xfc\xfb\x2b\x35\x75\x70\xb9\x44\x10\x41\x03\x88\x42\x8c\x75\x50\x00\x88\x19\x22\x66\x88\x98\x21\x62\x86\x88\x19\x22\x66\xb8\x35\x31\xc3\xd6\xf6\xfb\xda\x5b\x7c\xc2\x0a\x68\x8e\x3e\x66\x5f\xda\xad\x6d\xf9\x56\x13\x64\x90\x19\x02\xd7\x83\xc3\xda\x59\x3b\x00\xc1\xc0\xad\x66\x7d\x20\x18\x88\x60\xe0\x06\x83\x81\x33\xf5\xb9\x41\x56\xed\x19\xb6\x0c\xff\xb5\xcd\xc5\x9b\xdc\x4b\x6c\xa3\xdf\xca\x28\xa4\xef\x4a\x1d\xe9\x9b\xf4\x67\xab\x01\xbe\x8b\x30\x1d\xf2\xa3\x4f\x90\x7b\x38\x84\x38\x57\xf4\x97\xd8\x17\x19\xf8\x45\x5b\x71\xaa\x56\x92\x0a\xb3\x93\xc2\xdc\x3c\x5d\x70\x42\xe3\xd9\x27\x8c\x4f\xef\x21\x37\x6a\xd7\x8d\xc8\xcb\x14\xeb\x40\x6f\x63\x2a\xe1\xe3\x45\x7f\x69\x06\x86\xcb\xf4\xb1\x33\x8f\xd7\x18\x4b\x30\x11\xc4\xa7\x22\xa5\x30\x52\x0a\x23\xa5\x30\x52\x4f\x20\xf5\x04\x52\x4f\x20\xf5\xc4\xa6\xa1\x9e\xd8\x3c\xcc\x0a\x58\xf2\x8f\x25\xff\x58\xf2\x8f\x25\xff\x58\xf2\xbf\xa5\x4b\xfe\xb1\x26\x19\x6b\x92\xb7\x68\x4d\xf2\x45\x49\x29\xec\x92\x53\x1c\x1d\xbc\x9f\x8c\x03\x3a\x78\x94\x1c\x21\x87\xeb\xa2\x83\x3a\x5e\x25\x29\x3a\x63\x90\xa8\x95\xbe\x62\x4f\x5d\x68\x8e\x15\x3e\x60\x1c\x17\x58\x61\x23\x7c\x4c\x40\x88\xf1\xed\x6b\xd4\x55\x37\xe1\x30\xb6\xfe\x78\x57\x13\x2c\xed\x1a\x45\x36\xac\xc1\x66\x77\xf2\x1f\xd7\x17\x38\xe3\x38\xd7\x49\xf2\x20\x99\xa8\x08\xed\x34\xa2\x7a\x6e\xfc\xc2\x30\xb2\x83\x19\x67\xab\xcc\x38\x7b\x63\x7a\x65\x9c\xe3\x8d\x17\x22\x39\xcd\x73\xd0\x1e\x20\xc7\xe3\x1c\xb4\x35\x0d\x38\xc5\x23\xdf\x13\xe4\x7e\x2d\xf2\xbd\xa6\x11\xd7\x4a\x42\xbc\x9e\x8a\x2f\xf3\x1f\x07\x9a\x28\xb6\x5b\x6a\x52\x13\x6b\x4a\xee\x6e\x7e\x42\x03\x25\x17\xf3\x14\x77\x4c\xdd\x21\x45\x31\xe2\xc4\x88\x13\x23\x4e\x8c\x38\x31\x52\x14\x23\x45\x31\x66\x32\x23\x45\x31\xc6\x2b\x30\x5e\x81\xf1\x0a\x8c\x57\x60\xbc\xa2\x2d\xf1\x0a\x2c\x68\xc1\x82\x16\x2c\x68\x41\x8a\x62\x0c\x07\x62\x38\xf0\x22\xa7\x28\x5e\x01\x87\x70\x8b\x58\x74\x15\xac\xbc\x2a\x70\xba\xfd\xf9\xf3\x5f\x7b\x25\xb9\x81\x2d\x30\xe0\xde\xf0\xf3\x8a\x32\x78\x30\x04\x49\x19\xff\xed\x95\xc6\xcb\x69\xb2\x23\xe7\x07\xf4\xc9\xc5\xa1\xee\x5b\x81\x6b\x83\x1f\x93\x2f\x3a\xb6\x81\x4f\xf9\x79\x9a\xb9\x96\x9d\x32\xea\x07\xf4\xec\x10\xfb\x37\x97\x78\xdb\x29\x37\xda\x53\xa1\xc1\x1e\x8b\x2d\x0b\x36\xd1\x16\x29\x37\x8e\x37\x7f\xdd\xb7\x19\xb7\xd6\x28\xc4\xb3\x2c\xf1\x3e\xd9\xcd\x48\xf7\x5b\x76\xc6\x52\xbd\x5d\xf2\x6d\x34\x14\xec\x2b\xc5\x59\x9d\x95\xed\x8a\x69\x37\x74\x19\x22\x38\x86\x41\xd0\x75\xa2\xdd\xd0\x97\xdd\x2a\x68\x37\x12\x97\xaf\x9e\x85\xa7\x54\x6e\x59\x25\xd4\x54\x03\x09\x82\xbe\x2f\xec\x8e\x55\xc2\x40\x55\x01\x6e\x43\xdd\x70\x1d\x54\x8f\x75\x4e\x33\x60\x1d\xee\x56\x53\x54\x58\x87\x8b\x75\xb8\x1b\x5e\x87\xbb\xfa\x1d\x61\xcd\x34\x7c\x6d\x52\xea\x93\x43\x64\xd0\x18\xb0\xee\x54\x35\xb5\x57\xe9\x55\xb8\xec\xf4\x75\xe0\xd9\xfb\xad\x7e\x32\xca\x8b\x64\x73\x34\x10\xf9\x8d\x34\xac\xac\x8d\xd5\x8f\xb9\x05\x0f\x3a\x89\xf0\xb7\x23\x99\xf7\xfe\xe9\x4e\xe3\x8f\xb6\x91\x6b\xf5\x13\x55\x12\x4c\x7f\x0d\x16\xbe\xd1\xf8\xc4\x19\x3e\xe2\x34\x1f\x31\xb3\x17\x9c\x04\x6d\x1c\x99\x07\x53\xef\x8a\x36\x3b\x10\x65\xf2\x08\x5f\x5a\x33\xe4\xd5\xb0\xb4\x1e\x24\x13\xe4\xfe\xfa\x4b\x4b\x17\x9b\xcc\x67\xaa\x3b\xd7\xa6\x8b\xef\xb5\xcd\xd7\xd6\x29\xe3\x84\xec\x80\x53\xfd\xca\xc4\x62\xab\x3f\x81\xea\xec\xcd\xc6\x5e\x4d\xf7\x7f\xdf\x55\xe7\xb5\x0e\xd6\xa6\x02\xac\xff\x66\xef\x92\x5e\xca\x86\xbd\xdc\xec\xe3\xe4\x51\xf2\x9a\x0a\x5b\xa1\x7d\x6f\x17\x4d\x07\xf4\x71\x56\xe9\xe3\xfc\x71\xaa\xa3\x6a\xe7\x49\xee\x05\x3d\x4c\xce\xc6\x5e\xd0\xd6\xd5\x6b\xc0\x63\xf8\xf2\x55\x75\xf4\x9a\xa5\x38\x0d\xeb\xab\xb2\x7d\xfc\x9c\x0d\xd4\x64\xc8\x80\x88\x0c\x88\x97\x6e\xde\x10\x06\x8c\x31\x60\x8c\x01\xe3\x4e\x32\x20\xae\x27\x67\x61\x7b\x43\x74\xad\x77\x4d\x59\x7f\xa3\x23\xfb\xf2\xe5\x75\xac\x8a\x83\x4d\x68\x13\xeb\xdb\x1a\x43\x1c\xc0\xdd\x10\x53\x03\xc1\xdd\xad\x66\xe0\x20\xb8\x8b\xe0\xee\x06\x83\xbb\x1b\x8b\xc0\x35\x85\x7f\xd7\x7f\x57\x99\x3c\x4e\xc6\x8c\xac\x75\x4c\x21\xc5\x77\xe8\x48\x71\xdd\x81\xd6\x01\x3e\x7e\xd7\x8d\x64\x9a\xc3\xc7\x4e\x39\xf2\xc3\x9c\x53\x74\xbd\x42\x83\x66\x2d\x20\x6b\xf6\xd9\x16\x4b\x7e\x5e\x5e\x42\x83\xca\xd4\x92\x4f\xde\x60\x7c\x72\x1b\x79\x85\x36\xe6\x93\x8b\x43\xdd\xfb\x1b\x66\x98\x3c\xa0\x86\x9e\xf2\xf3\x23\x6a\xe8\xcc\x41\x76\xd5\x48\x3c\x92\x4e\x48\x5c\xe7\x9a\x8e\x24\xa6\x2c\x70\x93\x6b\x2f\xac\x66\xb6\xaa\x47\xc9\x08\x39\x5a\x77\x55\x6b\xcf\xce\x76\xaf\x3a\x33\x6d\xba\x9a\x67\x9b\x2f\xd6\xa3\xc6\x11\xb1\x58\xb5\x7b\x8a\x45\x5a\xef\xb6\x2d\x36\x8c\x00\x00\xf9\xd7\x77\x55\xbd\xc9\x03\xcd\xb2\x5a\xea\xbd\xcc\xc3\xe2\xc2\x8d\x7f\x9f\xd9\x87\xc8\x0c\x79\x75\x85\x65\xb2\xf6\x17\x8a\x06\x0a\x42\xc8\xab\x84\x90\x7f\x3f\x55\xdf\x01\xeb\x84\xea\x39\xc7\x31\xe5\x29\x72\x2a\xc6\x94\xdb\x31\xf0\xe6\x50\x5d\xa5\x72\x44\xb2\xbf\x7f\x79\x95\xea\x3a\xd6\x72\xf6\x4d\x3d\x1d\x76\x08\xec\x88\x8d\xd5\x60\xe8\xd7\x6d\x35\xb5\x89\x7e\x1d\xfa\x75\x1b\xec\xd7\x6d\x8c\x05\xdc\xd4\x9f\x6b\x6d\x9f\xa9\x91\xb6\xdd\xfa\x9e\x33\x39\x46\xb2\xc6\x31\xeb\x3e\xe5\xc0\xdd\xa6\x3b\x70\x75\xae\xba\x24\x48\xf8\xbf\xdc\x4b\x32\xdc\x41\xf4\xfc\x3c\x8d\xf3\x8a\x06\x83\xb2\x17\xb9\x0b\x34\x57\x74\xc2\x50\x95\x16\x18\xbf\xd2\x6b\xfc\x6d\x9a\xec\x60\xe7\xb2\xcd\xf6\x96\x1a\x99\x43\xd3\xfc\x42\xde\x79\x1a\x1a\x7c\x9e\xf2\xf3\xf4\xec\x90\xfe\x7b\x9b\x5d\xb7\x47\xc9\x31\xbe\x70\x0f\x91\x03\xb0\x70\x87\xc8\x20\x19\xa8\xdf\x02\x96\x3d\xe9\xe2\x90\xad\xcf\x68\xad\xed\x3c\xcf\x35\x5f\xa4\xfb\x8d\x7d\x7c\x15\x26\xee\x5b\x63\xdd\x6a\x6f\x82\x74\xff\xc6\xce\x58\xdc\x56\xed\x8c\x9e\x84\xc4\x65\x03\xd0\x4e\x0b\x3d\x0b\xca\xa1\xc2\x42\x58\xa9\xd4\xd1\x2c\x40\x6f\x6a\x95\xde\xd4\xcf\xa5\xda\xf0\xd1\x8f\x73\x17\xe9\x3e\x72\x6f\xec\x22\xad\xab\xee\x80\x22\x84\xd6\x74\x47\x0d\xe5\xd0\x44\x9d\x64\xde\x70\x55\xac\x3b\xae\x53\x59\x33\x09\x75\xf1\x2a\xfe\x73\xc7\xb5\x05\xa6\xc3\x60\x3a\x0c\xa6\xc3\x60\x3a\x0c\xa6\xc3\x60\x3a\xcc\xd6\x6c\x08\xda\x3c\x1d\xa6\x53\x76\x40\xf6\x5b\xbb\x63\x3b\xe0\xce\x26\x79\x2e\x09\xeb\xe0\x95\xe0\x82\x77\xd2\x38\x40\xa0\x73\xab\x99\x24\x08\x74\x22\xd0\xb9\xc1\x40\xe7\xa3\xf5\x03\x71\x6b\xf6\x29\x5b\x86\x35\x3b\xa5\xed\x27\x0f\x91\x03\xc6\xdd\xd6\x5d\x0a\x85\x7c\x95\x8e\x65\xea\x97\xad\x43\xfe\xc9\x6f\x3c\x41\xc6\x38\xbc\x18\xcc\x3a\x39\x5b\x0a\x1a\x96\x74\x65\x11\x63\x9d\x94\x94\xc0\x2f\xd2\xd0\xf8\xfb\xc7\x8d\x5f\xde\x43\xae\x67\xa3\x8c\xe8\x83\xc4\x55\x21\x8d\xdb\x7d\x4e\xfb\x45\x9a\xc9\xb0\x73\xa6\x2b\x87\x10\xd9\x9a\x71\x8c\x8f\x9d\x8b\x9d\x3e\xb1\xd3\x27\x76\xfa\x44\x06\x77\x64\x70\x47\x06\x77\x64\x70\xdf\x34\x0c\xee\x9b\x87\xa0\x1c\x99\xb3\x91\x39\x1b\x99\xb3\x91\x39\x1b\x99\xb3\xb7\x34\x73\x36\x52\xfb\x22\xb5\xef\x16\xa5\xf6\xbd\x28\x3b\x7d\x3e\x46\x46\x38\xc2\x37\x4c\x0e\x02\xc2\xb7\x8f\xec\x25\x76\x5d\x84\x0f\x50\x2b\x59\xb3\x36\xed\x17\x69\x4b\xcd\x3d\x57\x94\x19\x5f\x17\x18\x93\x40\x9f\x5f\xa4\xab\xe8\xe9\xf9\xe3\xbb\x1a\xa1\x65\x57\xa8\x86\x9e\x00\x8c\xf5\xf3\x7f\xae\x33\x34\xc6\x91\xac\x63\xe4\x3e\x72\x6f\x45\x00\xa6\x9f\x64\x5a\x7f\x25\x18\x72\xc1\x24\xb2\x55\x26\x91\x7d\x35\xc5\xf9\x67\xf7\x02\xff\x2c\x53\x07\x36\x59\xd1\xda\x23\x59\x9e\x41\x76\x98\x1c\x8a\x33\xc8\x56\x3a\xc6\x28\x8f\x46\xdf\x4b\x86\xb5\x68\xf4\x4a\x07\x59\x6b\x5f\xce\x75\xd0\x59\x99\xef\xf4\x37\xd2\x49\xd7\xd7\xec\xc5\x09\xfa\xe9\x1e\x7e\xa8\x9e\x7e\x8a\x5b\x70\x76\x54\x53\x61\x1b\x4e\x04\x71\x11\xc4\x45\x10\x17\x41\x5c\x6c\xc3\x89\x6d\x38\x31\x7f\x18\xdb\x70\x62\x30\x01\x83\x09\x18\x4c\xc0\x60\x02\x06\x13\xda\x12\x4c\xc0\x32\x12\x2c\x23\xc1\x32\x12\x6c\xc3\x89\xb1\x3a\x8c\xd5\x5d\xe4\x6d\x38\xd7\x23\x04\xb6\x82\x56\x9f\x17\x23\x35\xc7\x5f\xf6\x92\xfd\xb2\xbf\x27\x2c\x9b\x7a\x19\xf2\x4c\xeb\xb8\x39\xea\xe4\x72\xcc\x01\x09\x8d\x5f\xec\x35\x7e\xbd\x2b\xee\x47\xf7\x8c\xb0\x70\xbd\xbc\xbb\xe8\xe6\xcb\x4e\x51\x8f\xcc\x3a\xca\x9b\x9c\xe1\xa3\x8c\xf0\x51\x6c\xcd\x6c\x18\x56\x06\x47\x0f\x0c\xd5\x13\x03\xba\x09\x84\xce\x2f\x51\x05\x0a\x84\x11\x75\xf2\x76\x86\x5f\x20\x9a\xdb\x29\x6c\x3e\x79\xab\x13\x6e\x1b\x19\x91\x3b\x93\x6e\x3f\x79\xa1\x7e\xc5\xc8\x14\xff\xae\x26\xc8\xfd\xf0\x5d\x8d\x90\xa3\xe4\xc8\x2a\xbe\x2b\x88\x9b\x8f\x33\xc7\x50\x7e\x42\x53\xcd\x3f\xa1\x01\xe3\xce\xca\x36\x56\x49\xd9\x26\xd9\xd4\x6e\xab\xfd\xc1\x5c\x6e\x10\x78\x54\x08\x1c\x4f\x62\x65\x01\x56\x16\x4c\x62\x50\x0a\x83\x52\x18\x94\xc2\xa0\xd4\x96\x09\x4a\x4d\x6e\x9a\x98\x4b\xdb\x67\xb2\xea\x60\xc0\x24\x06\x03\x30\x18\x80\xc1\x00\x0c\x06\x60\x30\x60\xfd\x83\x01\x17\x21\x62\x31\xb9\xa5\x01\xd6\x49\x04\x58\x3b\x07\xb0\x4e\x6e\x7a\x80\xf5\x22\x2c\x86\x30\x3e\xda\x47\xee\x6e\x09\x61\xa4\x5e\xbe\xe4\xbb\x5e\xdc\x4e\xfc\xb9\x3e\xe3\xcf\x35\x88\xf1\x83\x29\x21\x76\x0d\x58\xf4\xa4\xaa\x94\x14\x1c\xe3\x72\x90\xb6\x60\x8b\xfd\xb1\x61\x04\xc6\x9f\xd8\x46\x99\x6d\x15\x5b\x7c\x3d\x09\x23\x5b\x1b\xdb\xce\xdc\x5c\x13\x99\x54\x73\xdc\xfc\x80\x64\xdb\x71\xc7\xa6\x98\xfe\x83\xcd\x01\xc9\x5e\x63\x4f\x25\x20\xa9\x44\x9a\xc4\x22\xeb\xf3\xe3\xc0\x53\x22\x0e\x89\x38\x24\xe2\x90\x88\x43\x22\x0e\x89\x38\x24\xe2\x90\x88\x43\x22\x0e\x89\x38\x24\xe2\x90\x88\x43\x22\x0e\xb9\xde\x38\xe4\x3d\x64\xbf\xb1\xcf\xda\xab\x70\xc8\xeb\x74\x3a\x51\xe5\xdc\x5d\x0a\xcd\x90\x10\xbf\x44\xfc\x12\xf1\xcb\x8b\x08\xbf\xfc\x4f\x7d\x64\x4a\x74\xb7\xce\x33\x23\xdb\xf5\xbd\x80\x16\x5c\xe0\x82\xaf\x41\x30\xcc\x21\x4e\x70\x69\x1d\x66\x48\x2d\xd1\xd9\x79\xdf\xbf\x90\x70\x4d\x43\xe3\xc3\xbd\xc6\x7b\xb7\x93\x9b\x6a\x8e\xa8\xb8\x0a\x7e\x24\xd5\x5a\x52\xe5\x59\x75\xb3\x73\xfc\x66\xa3\xfa\xcd\xda\x94\x64\x39\x0a\x17\x8c\xd4\x9a\xb0\x20\x48\x68\x3c\x8d\x8b\x23\x01\x73\xfd\xf1\xce\x56\xd2\x25\x9f\xfa\x81\xe6\xa0\xe8\x63\xc6\x23\xb2\x23\x61\x83\x65\x2a\xe0\xd2\xc6\xaf\xaa\x46\xef\x79\xc4\x49\x11\x27\x45\x9c\x14\x71\x52\xc4\x49\x11\x27\x45\x9c\x14\x71\x52\xc4\x49\x11\x27\x45\x9c\x14\x71\x52\xc4\x49\xd7\x17\x27\x45\x34\x12\xd1\x48\x44\x23\xb7\x30\x1a\xf9\x9e\x3e\x32\x29\xd0\xc8\x72\xe4\x87\x39\xa7\xe8\x7a\x85\xa6\xa9\x95\x80\x39\xf9\x5e\xe4\x14\x4b\x7e\x5e\x5e\x48\x83\xd0\xf8\xd3\x5e\xe3\x5b\x5d\xe4\x15\xda\x58\x4f\x2e\x0e\x75\x3f\xd7\x22\xf0\xf8\x80\x1a\x76\xca\xcf\x8f\xa8\x61\xdb\x84\x38\x1e\xe0\x88\x63\x3c\x35\x3d\x87\xb2\xce\xad\x11\x65\x5c\x03\xca\xb8\x22\x3a\x05\x6d\xc9\x08\x50\xb1\xce\x2b\x49\xf6\x0d\x44\x20\x11\x81\x44\x04\x12\x11\x48\x44\x20\x11\x81\x44\x04\x12\x11\x48\x44\x20\x11\x81\x44\x04\x12\x11\x48\x44\x20\x71\x7d\x81\x44\x4c\x9c\x44\xa8\x12\xa1\x4a\x84\x2a\x3b\x08\x55\x7e\xbb\x97\x1c\xe3\x50\x65\xce\xf7\x83\xbc\xeb\x55\xe4\x4b\x36\x86\x2c\x81\x84\x3a\x34\xde\xd7\x6b\x7c\xac\x8b\x5c\xa9\x8f\xf0\xe4\xe2\x50\xf7\x6b\x5b\x03\x28\x4f\xb0\x41\xda\x04\x47\xee\x11\xb5\xdc\xf1\x44\x74\x3c\x12\xee\x84\xe8\xe3\x1a\xd0\xc7\x87\x9b\xa3\x8f\x77\x1b\x77\x09\xf4\xb1\xc6\x8a\x12\x28\x24\xbc\x08\xc4\x1c\x11\x73\x44\xcc\x11\x31\x47\xc4\x1c\x11\x73\x44\xcc\x11\x31\x47\xc4\x1c\x11\x73\x44\xcc\x11\x31\x47\xc4\x1c\x11\x73\x44\xcc\x11\x31\x47\xc4\x1c\x2f\x51\xcc\xf1\x83\xbd\x64\x98\x63\x8e\x75\xdb\xfd\x54\x54\x6a\xe7\x8a\xe5\x30\x62\xfe\x51\x91\x86\xc6\xb7\x7b\x8c\x6f\x74\x35\x6a\x1f\xbf\xd4\x1a\xee\x38\xca\x07\x9d\xf6\x8b\xed\x42\x1f\xfb\xe1\x82\xba\xbd\xe9\xe3\xfb\x21\x06\x59\x1b\x83\x74\x9b\xc3\x8b\xc7\x8d\xb1\x16\x7b\x45\x69\xf2\xae\xd1\x32\x0a\x3b\xe0\x20\x28\x89\xa0\x24\x82\x92\x08\x4a\x22\x28\x89\xa0\x24\x82\x92\x08\x4a\x22\x28\x89\xa0\x24\x82\x92\x08\x4a\x6e\x42\x50\x12\x21\x43\x84\x0c\x11\x32\xdc\xc2\x90\xe1\xff\xda\x43\xae\x95\xfd\x69\x4a\x7e\x3e\xa2\x0b\x25\xf0\x22\x8c\x2f\xef\x31\x3e\xb7\x2d\x6e\x3f\xd3\xc7\x4d\xc0\x40\x6c\x5e\x52\xbb\xc9\xc6\x33\x53\x7e\xfe\x8c\xb8\x34\x73\x07\x3b\x95\x77\x7d\xd1\x7e\x3e\xee\x07\x23\xc5\xa2\x4a\x19\xdc\xf4\xdd\x5f\x9e\x20\xa3\x1c\xa5\xbb\x97\x0c\x03\x4a\xb7\x9f\xec\x23\x7b\xeb\xa1\x74\x36\x93\x93\xbd\x38\x64\x6b\x4f\x7c\xc2\x0d\x9b\x03\x73\x37\xd7\x46\xcb\x76\x18\xdb\x79\x5e\xe0\xc9\xe6\xc0\x5d\xc6\xe8\xe5\xb8\x9c\x76\xef\x44\x0a\x60\xdc\x2e\x06\x71\x37\xc4\xdd\x10\x77\x43\xdc\x0d\x71\x37\xc4\xdd\x10\x77\x43\xdc\x0d\x71\x37\xc4\xdd\x10\x77\x43\xdc\x0d\x71\x37\xc4\xdd\x10\x77\x43\xdc\x0d\x71\xb7\xf5\xc2\xdd\x3e\xdd\x4b\x0e\xd6\x60\x32\xdc\xa7\x27\xe7\xd5\xe5\x2d\x7c\x53\xaf\xf1\x83\xdb\xc9\x35\x09\xde\x42\x7e\xe5\x26\x21\x2f\xcc\x56\x91\x17\xf2\xe9\x35\xe0\x2d\xbc\xd8\xf0\xc1\x4d\x5b\x49\x5c\x68\x8e\x18\x8e\x19\xd9\x35\xf0\x18\xee\xc3\xae\x28\x88\x25\x22\x96\x88\x58\x22\x62\x89\x88\x25\x22\x96\x88\x58\x22\x62\x89\x88\x25\x22\x96\x88\x58\x22\x62\x89\x88\x25\x22\x96\x88\x58\x22\x62\x89\xeb\x8b\x25\xfe\xf4\x9d\xe4\x64\xf3\xb2\xdf\x7a\x54\x83\x81\x5f\xa4\xb3\xae\x97\x77\xbd\x82\x38\xf0\x7a\xe3\x13\x19\xe3\xf9\x6d\xe4\xda\x5a\x95\xc0\xdd\x37\x07\xd4\xc9\x57\x78\xa8\xd3\x7e\x91\x66\xf9\x18\x99\x7e\x76\xbc\x46\xa9\x6e\xcc\x16\xa8\x9d\xdd\x36\x94\x8f\x83\x72\x8f\x90\xa3\x1c\x94\x3b\x48\xee\x01\x50\x6e\x2f\xb1\x49\x7f\xdd\xa4\x3d\x10\xd7\xe2\x90\xad\x4d\xa8\x29\x06\x97\x6f\x0e\xaf\x8d\x18\x47\x6b\xa4\xdf\x35\xab\xaa\xd5\x27\xf1\xd4\x4d\xb5\x91\xbe\xcb\x8c\xae\x02\x8d\x48\xf7\x37\x76\xd6\x79\x39\xb7\x06\xb4\x54\x74\x72\xb4\xc1\xfb\x19\x14\xa7\x6c\xcc\x2b\xca\x66\xc9\x31\x72\x9f\xd1\x35\xeb\xe7\x97\xbb\xe1\xbf\x66\x2a\xb3\xc2\x77\x94\x7d\x57\x9a\xbc\x23\x6d\xbc\x3d\xdd\xfd\x36\xa5\xa2\xbf\x93\x3a\xc7\xf6\x3a\xb1\xc9\xf6\x03\xfe\x9d\x63\xae\x34\xd7\xa9\x09\xc7\x4e\x07\xfb\x66\xa9\x59\x62\xef\x8a\x59\xd3\xb6\x39\xe2\x99\xae\xc7\x21\x15\x3f\x30\xcb\x9e\x42\x65\xf2\x66\x3e\x58\x9e\x2e\x7b\x66\xde\x0d\x28\x53\x24\x54\xb9\xfa\x6c\xc7\x06\x93\x48\x60\x1d\xd2\xb3\x11\xde\xa5\x39\x57\x0e\xc0\x7a\x2e\x05\x7e\x8e\x86\x60\xbb\x08\xbd\x25\x36\x45\x9b\x37\xa4\xe6\x5e\x09\x18\x1b\xc3\xe6\x80\x39\x52\x2c\x0e\x83\x65\x92\x0f\x96\xcd\xa0\xec\x31\xaf\x97\x69\x1e\x69\x8e\x89\xe1\x68\xde\xba\x8c\x4f\x4d\x53\xb3\xd9\x9f\x4a\x93\xb7\xa6\x8d\xb7\xa4\xbb\xdf\xac\x04\xf4\x37\x29\x30\x66\x4f\x3a\x9e\x53\xa0\x01\x77\xcb\x38\xf5\x48\x18\xfa\x39\x17\x2c\x0d\xe5\x00\x39\xe0\x41\xfa\x81\xc9\x0c\xd1\x68\x59\x99\x5e\x0b\xce\x05\x36\xff\x68\x9e\x86\x54\xea\x42\xa6\x9f\x25\x34\x05\xe8\xcc\x2c\x35\x41\x0b\x83\xb3\xe3\x07\xe6\xd0\xbe\x83\xec\xdc\xc0\xc9\x01\x86\x56\xf4\xbd\x02\xd7\x7c\xe0\x90\x30\xdb\xd1\x71\x3d\x6e\x90\x80\xc1\x1f\x9f\x0b\xb8\x82\x40\xf8\x98\x9b\x2a\x77\xb3\x82\x5f\x74\xbc\x82\xed\x07\x85\xc1\xd2\x85\xc2\x60\xd9\x73\x73\x7e\x9e\x0e\xde\x36\x11\x4e\xb1\x51\x6c\xeb\x72\xfd\x59\xf5\xfd\xe7\xdd\xa9\xba\x1f\xf8\xda\x55\xc7\x18\x1b\x60\xc8\x38\x42\x0e\x93\x1d\xa3\xdc\xea\x5e\xf9\x28\x52\xcf\xd4\xd7\x00\xa5\x72\x44\x5a\x54\x43\xab\x50\x3d\xba\xe6\xca\xfc\xda\x55\x75\xf4\xcc\xb5\xdc\x7f\x30\x9d\x84\x6a\xb1\xf9\xaf\x1b\xa4\x59\xce\x90\x69\x32\xa5\x6b\x96\x4c\x96\x1c\x5b\x45\x40\x66\x0c\x1e\xe2\x34\xc0\xd6\x21\xea\x9a\x66\xba\xe6\xeb\x69\xf2\xd5\xb4\xf1\x17\xe9\xee\x2f\x29\x01\xfd\x6a\xfa\x8c\x6e\xa6\xb9\xcc\x5b\x00\x53\xcb\x9c\xa5\x73\x3c\x64\xa3\xfc\xa3\x38\x2e\x20\x5c\x52\x98\xa2\xa6\x4b\x3c\xdf\x1b\xf0\x68\xc1\x01\x41\x08\x4b\x4d\xd7\x39\x1c\x29\x51\xaf\x40\x2c\x4c\x77\x61\x81\xe6\x99\x56\x2b\x2e\xc7\xb1\x94\x18\x3f\x77\x8b\xfd\xc2\x80\x03\x13\xd6\x2c\x04\x6c\xd3\x2c\xd1\xc0\xf5\xf3\xca\x10\x8d\xf7\x4f\x08\x53\x49\x69\x94\x43\x36\x49\xdd\xf8\x75\xd8\x95\xf2\x81\xc4\x4d\xe6\x38\x66\x20\x87\xb0\xf9\x3c\x17\xa8\xe3\xd5\x9c\xa3\x65\xc0\x14\xa6\x60\x06\x35\x0c\xd3\xec\xe7\xd3\xe4\xa5\xb4\xf1\x62\xba\xfb\x53\x4a\xcc\xef\x4e\x8f\x69\xc1\xda\x12\x70\x46\x2b\x80\x67\x2a\xf0\x4b\x4e\x01\x5e\xc0\x94\x5f\x74\x73\xcb\x89\x18\x8f\x7c\x96\x38\xda\xcb\xde\xd2\x90\x7d\xc0\x36\x67\xf8\x0b\xe1\xd2\x29\x31\xbf\x84\xf9\x76\x12\x2b\xa4\xa6\x1f\x94\xe6\x1d\x4f\xc6\x9c\x82\x32\x1d\x9c\x73\x8a\x12\x5b\xb7\xf8\x51\xcb\x9c\x73\x3d\xa7\xe8\x3e\x23\x41\xba\x59\xca\x0c\x5f\xf0\x84\x06\xb9\xc1\x9b\x8f\x81\x5b\x3e\x78\x4f\x18\x5f\xc4\xd1\x6c\xdb\x1c\x77\x61\x39\x6b\x13\xf7\x83\xea\x27\x8b\xbd\xca\x88\x83\xe9\xf0\xb1\xf9\xd1\xbc\x6d\x5d\xc5\xe7\x33\x26\x1f\x24\x11\xbc\xca\xbe\xb1\x8b\xbc\xa1\xcb\x78\xb6\xab\xfb\xfb\x2a\x82\xf9\xc5\x6d\xe7\x04\xda\xc5\x3e\x29\xe6\xd6\x15\x9c\x60\xd6\x29\x50\x33\xe7\x17\x8b\x14\x94\x70\xfc\x5d\xd0\x60\xce\x0f\x16\x98\x2c\x6a\xce\xf4\x74\xc5\xcd\xeb\x4f\x14\xd6\xb3\x5c\x8c\x25\xfe\x54\x2e\x5b\x28\x39\x37\x1f\xc3\xd6\x80\x80\x42\x10\x41\x49\x97\x39\x7a\x02\x1f\x94\x18\xa7\xad\x89\x51\xa2\x63\xca\x95\x53\x71\xcb\xe4\xcd\x6c\x73\x24\xc7\xbc\x3e\xd8\x80\x75\xe5\xd0\xc3\x9f\xa1\xc7\x1c\x10\x2f\x3e\xb9\x30\xc2\xc3\x66\x4f\xd6\xc9\x5d\x60\x1b\x8d\x97\x67\x67\x41\xc0\x10\x4e\xaa\x10\x1c\x87\xa4\xc5\xd2\x4f\x0e\x22\x9f\x60\x56\x8d\x74\xd8\xec\x39\xee\x07\x54\x1b\xd6\xcc\x39\x61\xce\x61\xdb\x86\x94\x0f\x0f\x11\xc3\x78\x21\xd7\x5e\x55\x03\xce\xa9\x31\x6c\xeb\xea\x52\xe5\xba\xd1\x0d\x83\x8f\xa4\xc8\x34\xdb\xbe\xf7\x19\x0f\x92\x09\xb2\x93\x4b\x83\xe6\x61\x33\x3f\xb4\x8a\x4d\x64\x26\x72\xa2\x72\x58\xdf\xd8\x38\xc1\x8d\x8d\x71\x32\x0a\xc6\xc6\xda\x6e\x23\x6d\x06\xb3\xb6\xcd\xb0\xcb\xd8\xc1\xc5\xb4\x4e\x66\x43\xf6\x67\x2e\xaf\x63\x36\x64\x4a\x4e\x10\xb9\x10\x8d\xe4\x2e\x70\x03\x3f\x65\xa0\x54\x87\xf3\xa9\xc3\xb6\xc4\x0d\x95\x03\xf1\x89\xdc\xc9\xfe\x1c\xbe\x51\x3f\xb8\x40\x83\x02\xd5\x8f\xee\xd1\x8f\x86\x51\xe0\x44\xb4\xe0\xe6\x06\xaa\xce\x4b\x8c\xc2\xfe\x5e\x16\x47\xd9\x2c\xb3\xa7\xc8\x09\x32\x59\xe1\x21\x0d\x93\x83\xab\x58\x1d\x53\x10\xb7\x47\x0b\xa6\x89\x05\xf3\x81\x6d\xe4\x7d\xdb\x8c\x17\xb6\x75\xff\xa2\xda\x06\x9e\xdb\xb6\x75\xbc\xa5\x8a\x50\x07\x93\x33\x84\xee\x21\x1c\xc7\x16\x67\x9c\xc0\xd3\x5b\x67\xe1\xf6\xc1\x6e\xc6\x33\x3d\x9c\x22\x5c\xc9\xcc\x36\x7e\x35\x9c\x01\x26\x54\x68\xf6\x4e\x86\xbe\x37\xc5\x33\x59\x4e\xb2\xcf\x42\xfc\x3d\x23\x3f\x96\xf8\xc7\xbe\xfa\x6e\x5c\xf6\xa5\x14\x79\x31\x65\x7c\x2a\xd5\xfd\x09\x85\xd2\xbd\x27\x75\xdc\x0f\x72\x60\xdd\x15\x7c\x10\xbb\x6f\x5a\x73\xec\x27\xcb\x1c\x49\x3c\x05\xe0\xaf\xdc\x10\x2b\x87\x71\xcc\x7e\xc0\xc9\xc1\x83\x43\x26\x44\xd1\xcd\x89\x8d\x96\x16\xf3\xa1\xe9\x2f\x09\x99\xf2\x4c\x81\x12\xf5\x4b\x45\x6a\x9b\xfc\x8e\x90\x7a\x24\xdf\x25\xe0\xa0\x35\x05\xa0\x6e\x6f\x6d\x87\x69\x25\xd2\x68\xd6\x01\xb8\xba\xa5\xf6\xe6\xb0\xd3\xb8\x0c\xe6\xb7\x4e\x7b\xc3\xe4\x41\x72\x8f\xb1\xdf\xda\xa7\x68\x50\x5f\x09\x9f\x95\xf8\xc6\xb5\xab\xac\x2e\x76\x40\xa7\x37\xbd\xf8\x08\x54\x8d\x4f\xf5\xc4\xd5\xd5\x3c\xa7\x93\x27\xbf\x19\x1f\xe8\x31\xfe\x73\x57\x5c\x5d\xdd\x62\x43\x17\xc8\x12\x6c\x53\x8a\xe6\xed\xa2\xa1\x4b\x40\xcf\x0e\xc1\xb8\x98\x84\xd9\xc6\x24\xcc\xfb\x9b\x7f\x4b\xb7\x1b\x56\x5c\x98\xcd\x3f\x1a\x3e\x01\xec\xde\x82\x49\x96\x98\x64\x89\x49\x96\x98\x64\x89\x49\x96\x98\x64\x89\x49\x96\x98\x64\x89\x49\x96\x98\x64\x89\x49\x96\x98\x64\x89\x49\x96\x98\x64\x89\x49\x96\x98\x64\xb9\x11\x49\x96\x2f\xf6\x92\xc1\x24\x94\x57\x27\x9f\x92\xfb\x78\x0b\x4e\x29\x34\x7e\xba\xd7\x78\x41\x43\xf9\xca\x2d\xb6\x4f\x81\x01\x4e\x3a\xa5\x36\x21\x7d\x96\x86\xf4\xc5\xb1\x33\x75\x17\x6c\x99\xb2\x06\x9c\xef\xc1\xe6\x38\x5f\xaf\xb1\xa7\x12\xe7\x53\xb2\x47\xac\x0f\xb1\x3e\xc4\xfa\x10\xeb\x43\xac\x0f\xb1\x3e\xc4\xfa\x10\xeb\x43\xac\x0f\xb1\x3e\xc4\xfa\x10\xeb\x43\xac\x6f\x23\xb1\xbe\x8b\x2f\xd1\x08\xd1\x44\x44\x13\x11\x4d\xbc\x88\xd0\xc4\x9f\xdd\x46\xee\xe4\x25\xdb\x73\x45\x7f\x89\xa9\xfb\xc0\x2f\xda\x0a\x3e\xa8\x6c\xd6\x6c\x7c\x3b\x6d\xfc\x5d\x9a\xdc\xa8\x9d\x3c\x22\xcf\x55\xdd\x99\x5f\x59\xa0\x51\x72\x9b\xe7\x8f\x97\xc9\x14\x68\x74\xbc\xc6\x95\xa2\x7d\xf2\xc8\xd4\x84\x74\x32\xdb\x97\xf0\x37\x5c\x35\xd0\x70\xd5\x40\xc3\xad\x0c\x34\x59\x22\x0f\x71\xec\xef\x14\x39\x01\xd8\xdf\x71\x32\x46\xb2\xab\xc0\xfe\xb4\xe7\x6c\xa5\x35\x8b\xf1\xfd\x1e\x62\xf3\x77\xe4\xf9\x79\x5a\xbb\x7f\x76\x50\xf6\xd8\xca\xcc\x15\x9d\x30\xa4\xa1\xf1\xd9\x1e\xe3\xe3\x5d\xe4\x72\x76\xbe\x7a\x2d\x4f\xb7\x86\xfa\x4e\xf3\x91\x46\xd9\x48\x6d\x02\x7e\xb9\x4d\x75\xca\xcf\x53\xf1\xa6\xf5\x7b\x20\xec\xbb\x06\xd8\xf7\xb1\xe6\xb0\xef\x21\xe3\x80\x80\x7d\xb5\xd5\x23\x93\xa3\xb5\xf7\x50\xdd\x41\x1b\x71\x60\xc4\x81\x11\x07\x46\x1c\x18\x71\x60\xc4\x81\x11\x07\x46\x1c\x18\x71\x60\xc4\x81\x11\x07\x46\x1c\x18\x71\x60\xcc\xf9\x44\x94\x16\x51\x5a\x44\x69\xd7\x0b\xa5\xfd\xea\x1e\x72\x95\xcc\xf9\x64\x5f\x96\x9b\xa3\xa1\xf1\xd9\x3d\xc6\xc7\xb5\xc6\xd8\x77\x34\x6e\x8c\x3d\xc3\x2f\xcb\xdc\x1a\x37\xc5\x16\x3f\x5d\x6c\xb5\xd6\x2b\xa4\x28\x90\x0d\xb1\xc5\xd3\xb6\xa5\x19\xf6\x44\x73\xd8\x6d\x8f\x71\x3b\xc7\xd8\xc4\x7d\xb1\x11\x36\x62\x6c\x88\xb1\x21\xc6\x86\x18\x1b\x62\x6c\x88\xb1\x21\xc6\x86\x18\x1b\x62\x6c\x88\xb1\x21\xc6\x86\x18\x1b\x62\x6c\x88\xb1\x21\xc6\x86\x18\xdb\x06\x63\x6c\x5f\x49\x93\x57\xf2\x2c\xbb\xbc\x1b\xe6\x7c\xa6\xf4\x64\xaa\x9d\xf1\x7b\x69\xe3\x77\xd2\x64\x97\x3a\xd0\xdd\x5d\x00\xba\xeb\x39\x3f\x58\x50\x42\x74\x4c\x80\x75\x32\xd7\x16\x68\x34\x26\xcf\x1c\x99\x9a\xb8\x9f\xfd\xba\x09\xf3\x19\x5d\x72\x8a\xe3\x68\xf7\x93\x71\xc0\xd1\x8e\x92\x23\xe4\xf0\xea\xf2\x19\xe1\x19\x9b\x26\x32\x7e\xb7\x97\x9c\x58\x41\xb2\x29\xcf\x6c\x2c\x05\xae\x1f\xb8\xd1\x72\x91\x2e\xd2\x62\xc2\xbf\x0e\x8d\xf7\xf4\x1a\xef\xdc\xde\x24\x1b\xf5\x3f\xb4\xd8\x88\x7c\x4a\xdc\xe7\x04\xbb\xcf\xa8\x7e\x9f\x36\x65\x41\xde\x07\x17\x34\xc8\x7f\xad\x3f\x03\xcc\x91\x5c\x43\x8e\xe4\xeb\x9a\x83\xb5\x0f\x1b\x67\x05\x24\xdb\x68\x61\x8a\xa4\xc9\xfa\xaf\x09\x53\x28\x11\xde\x45\x78\x17\xe1\x5d\x84\x77\x11\xde\x45\x78\x17\xe1\x5d\x84\x77\x11\xde\x45\x78\x17\xe1\x5d\x84\x77\x11\xde\x45\x78\x17\xe1\x5d\x84\x77\x37\x0e\xde\xfd\x78\x2f\x19\x6e\xde\x9b\x3c\x51\x52\xad\xf5\x23\x37\x7e\xa8\xd7\xf8\x5f\x5d\xe4\xfa\x5a\xcd\xe4\x38\xcc\xb8\xd4\x62\x75\x75\xdc\x58\xa8\x4d\xb0\xe2\xbd\x4b\x75\xda\xd3\xf1\x4a\xeb\xf8\x7e\xd8\x57\xa7\x8d\xa0\xa2\xdb\x1c\x54\x3c\x6e\x8c\xad\xad\x47\x15\x42\x88\x08\x21\x22\x84\x88\x10\x22\x42\x88\x08\x21\x22\x84\x88\x10\x22\x42\x88\x08\x21\x22\x84\x88\x10\x22\x42\x88\x08\x21\x22\x84\x88\x10\x22\x42\x88\xeb\x0b\x21\xfe\x59\x9a\x5c\xc7\x21\xc4\x30\xf2\x03\xa7\xa0\xa8\x18\x8d\x8f\xa7\x8d\x8f\xa6\xc9\x0e\xf1\x73\xc3\xec\x50\xa3\x40\xa3\x19\x7e\x1e\xe6\x86\x6a\xb9\xa1\x5f\xdd\x1d\xb7\x35\xaa\xd3\xd0\xa8\xe4\xe7\xc5\x0f\xaf\x1f\xa4\x4f\xd3\x9c\xf1\x2b\xbb\x8d\xb7\xa4\xe3\x0a\xf8\xdb\x72\xbe\xe7\xb1\xbd\xe0\xfe\xf1\x33\x31\x18\x06\x7e\x3a\xcd\x41\x8a\xa7\x9f\xcf\xdc\x2a\x4e\xe2\x25\xf0\xf7\xd3\x28\xee\x44\x34\xe5\xe7\xc7\x9f\xa6\xb9\x7d\xdb\x32\x83\x19\x0e\x3f\x1e\x20\x37\x72\x31\x5c\x47\xae\x01\x31\x5c\x41\x76\x7f\x28\xb5\x93\x88\x2f\xad\x29\x94\x78\x6b\x6d\x28\x91\x18\x3b\xc5\x2c\xc8\x53\x53\xcd\x81\xc4\x01\xe3\xce\xca\xc6\x3d\x62\xaa\xa7\x01\xf3\x49\x50\x37\x12\xeb\x27\x34\x91\xdc\x2e\x45\x32\x75\x7a\xa6\xbe\x4c\xac\x84\x4c\xa6\xfc\xf0\xe2\x10\x4a\x53\x51\x68\x65\xf6\x65\x12\x1a\xaf\xed\xf6\xa5\xae\x99\x1e\xf5\x17\x16\x98\x56\x70\x43\xe1\x27\x2c\xf8\x11\x33\x6e\xf9\xaf\x42\x3e\xe5\x88\xda\xcc\x8a\x5e\x34\x9d\x20\x70\x96\x6d\xf3\x94\x1f\xc9\x03\xdc\x8d\x61\x36\x9a\x19\xce\xd3\x62\xd1\xb6\x76\x88\xab\x75\x3d\xfc\xf6\x14\x79\x5b\xca\xf8\x89\x54\xf7\x8f\xc5\x7a\x78\xd4\xf7\x22\x87\x7d\x2e\xcc\xc2\xe3\xa8\x43\x7c\x3f\x61\x63\xc3\x40\x49\x0f\x11\xdc\x8a\x5c\x7c\x2d\xe8\xe4\x00\x60\x2a\x38\xe4\x7b\x54\x3f\xcc\xed\x91\x92\x9f\xb7\xad\x5d\xea\x67\x7d\x6a\x87\xc9\x21\xe3\x80\x75\xb7\x22\x17\xef\x06\x56\x71\xa1\xea\x93\x42\xb5\xba\xd8\x31\x9d\x37\xbc\xf3\xcc\xe4\xaf\x25\xbe\xb1\xd0\x7d\x41\x4a\xed\xd4\x34\xcd\xbb\x01\x15\x3e\x76\x18\x39\x5e\xde\x09\x24\xec\x15\x6b\x7d\xf1\xcc\x62\x8f\x73\x43\xb1\xb3\xe9\x72\x04\x47\xc2\xba\x2c\x8c\xf2\x34\x08\x12\x18\x6b\xc5\x2d\x4f\xd7\xbe\xa5\xeb\x31\x0b\x6f\x45\xb7\x9c\x73\x8a\x21\xb5\xad\xed\x61\x94\x77\xbd\xc4\x2d\x03\x52\x32\xbc\xee\x62\x93\x5b\x0a\xab\x72\x95\x8f\xe9\x97\xa3\xc4\x3d\x9f\x26\x8b\x46\xd4\x1d\xc8\x7b\x3e\x7c\xe6\xcc\x6b\x60\x35\x05\x65\x0a\xe1\xab\x9c\x13\xc5\x3e\x01\xb3\x61\xa5\x67\xe2\x14\x8b\x3e\x44\xab\x94\x09\x01\x4a\xa4\xee\xf3\x6e\x63\xf6\xae\xbe\x81\xfe\x60\x1f\xb9\xa7\xe6\x06\xaa\x5a\xd9\x2d\xfa\xc5\xf2\x02\x75\xa2\xc8\xc9\xcd\x33\xd7\x55\xea\x7b\xe3\x13\xbd\xc6\xf7\xba\x08\x11\xd7\x31\xd5\xf6\x5f\x45\x5e\xbf\x1e\x66\xf3\xa4\x37\x20\x59\x4f\xce\xc2\x78\x23\x6a\xbc\xb6\xc4\xdc\xfa\x63\x08\x00\x60\x0e\xe1\x30\xba\x11\x5d\x88\xb1\x8d\x9e\x04\x9c\xa4\x8d\x6d\x67\x6e\x86\xdb\x09\x23\xe0\xec\x50\xe5\x1c\x31\x26\x57\xbd\x67\x3c\xd1\x7c\x43\x38\x6c\x1c\x12\xfa\x3e\xb9\xb8\xc4\x8e\x59\x29\xe5\xc4\x46\xf1\xd4\x2d\xb5\xf7\xa4\x9d\xc6\x65\xf0\xe0\x18\x84\xc3\x20\x1c\x06\xe1\x30\x08\x87\x41\x38\x0c\xc2\x61\x10\x0e\x83\x70\x18\x84\xc3\x20\x1c\x06\xe1\x30\x08\x87\x41\xb8\xf5\x0e\xc2\x1d\x21\x87\x8d\x43\xd6\x01\x05\x3c\xdd\xa8\xa3\x56\x95\x3e\x5e\x35\x6e\x85\x31\x3c\x8c\xe1\x61\x0c\x6f\x0b\xc7\xf0\xbe\x78\x9e\x8c\x73\x08\xd2\xc9\x33\x4b\xd8\xf5\xbd\x80\x16\x5c\xf6\x5d\x26\x2b\x01\x06\xc1\xd9\x74\x98\x89\xb3\x44\x67\xe7\x7d\xff\x42\x05\xf7\xc8\x9b\xcf\x1b\x3f\xd1\x43\x5e\x55\x73\x98\x27\x17\x87\xba\xef\x6d\xcc\xc7\x7c\x56\x0d\x7f\x8e\x0f\x9f\x60\x92\xc8\x1c\x62\x57\x8f\xd4\x1a\xfb\xec\x50\xe3\x4b\x37\x39\x7c\x98\x45\x24\x0d\x91\xb4\x2c\x22\x69\x88\xa4\x21\x92\x86\x48\xda\x96\x41\xd2\xb2\x9b\x06\x49\x6b\xfb\x4c\x56\x8d\xa4\x65\x11\x49\x43\x24\x0d\x91\x34\x44\xd2\x10\x49\x5b\x7f\x24\x2d\xbb\xa5\x91\xab\x2c\x22\x57\x9d\x43\xae\xb2\x9b\x1d\xb9\xca\x5e\x84\xc8\xd5\xe4\xb3\x29\x52\xe0\x79\x57\xe7\xc9\x13\x90\x77\xf5\x30\x39\x4b\xce\xd4\xed\x7b\x55\x1b\xdf\x5a\x1c\xb2\x1b\xc3\x47\x6d\xe9\x8f\xf5\x4c\xf3\x74\xad\x73\xc6\x43\x22\x5d\xab\x11\x10\x27\x93\xb7\x1a\x4e\x39\x99\xfe\xfc\x4d\xd2\x00\x94\xeb\xe1\xdb\x9a\xe9\x34\xc3\xdf\x0e\xf3\x13\x37\x16\x81\xe3\x80\xd9\x1c\xc9\x93\x59\xa3\x6b\xd6\xcf\x2f\x77\xc3\x7f\xcd\x54\x66\x9a\x4c\xb5\xfb\xc5\x67\xdf\x95\x26\xef\x48\x1b\x6f\x4f\x77\xbf\x4d\xa9\xc4\xef\xa4\xce\x31\x83\x43\xec\x41\xfd\x95\xe9\xa1\x09\xbf\x47\xc7\xc2\x66\xa9\x59\x62\xef\x84\x19\x9b\xb6\x39\xe2\x99\xae\xc7\x11\x07\x3f\x30\xcb\x9e\x02\x2d\xf2\x66\x3e\x58\x9e\x2e\x7b\x26\x4f\x7a\x65\xdb\xb1\xf4\x84\xd9\x86\x06\x16\x83\x80\x02\xa4\xe1\x2f\x9c\x2f\x73\xae\x1c\x80\x71\x59\x0a\xfc\x1c\x0d\x61\x6b\x17\x7a\x42\x6c\x42\x36\x7f\xc3\xdc\x68\x87\xbd\x78\xd8\x1c\x30\x47\x8a\xc5\x61\xd8\xb8\xf3\xc1\xb2\x19\x94\x3d\xe6\x14\xb2\x2f\x5d\x5a\x2b\x62\x38\x9a\xb7\x2e\xe3\x53\xd3\xd5\xda\x4f\xa5\xc9\x5b\xd3\xc6\x5b\xd2\xdd\x6f\x56\x02\xfa\x9b\x14\xd8\x7a\x27\x1d\xcf\x29\xd0\x80\x7b\x2d\x3c\x07\x3a\x0c\xfd\x9c\xeb\x44\xba\x7f\xe0\x80\x83\xe5\x07\x26\xb3\xd3\xa2\x65\x65\x99\x2c\x38\x17\xd8\xfc\xa3\x79\x1a\x52\xa9\x7b\x98\x3e\x94\xc8\x0d\x80\x17\xb3\xd4\x04\xad\x07\xbe\x80\x1f\x98\x43\xfb\x0e\xb2\x73\x03\x27\x07\x10\x13\x73\xa3\xb9\xa6\xd1\xf3\xc4\x79\xe8\x0a\xec\xe1\xf8\x5c\x70\xbb\x05\x00\xc6\xbc\x38\xb9\x7b\x14\xfc\xa2\xe3\x15\x6c\x3f\x28\x0c\x96\x2e\x14\x06\xcb\x9e\x9b\xf3\xf3\x74\xf0\xb6\x89\x70\x8a\x8d\x62\x5b\x97\xeb\xcf\xaa\x47\x2a\x5e\x4e\x93\x1c\x57\x4d\x8f\x91\x47\x40\x35\x9d\x21\x1d\x58\xa1\x64\x9e\xdd\x64\xc8\x70\xc8\x93\x64\xc7\x28\xb7\x4f\x3b\x74\x27\x97\xdd\x69\x9f\x31\x4b\xce\x93\x9d\x23\x39\xb6\x53\x76\xec\x56\xab\xd7\xb5\x25\x7f\x83\x75\x6d\xe6\xd7\x07\x1a\xe8\xda\xbb\xb8\xb3\x60\xe6\xfc\x22\x73\xfd\xc5\x56\xde\x44\xef\x8e\xf3\x8b\xea\xe8\xdd\x51\x35\xd4\xba\x6a\x60\x78\xf1\xba\x06\xce\x64\xc9\xb1\x55\x24\x3c\x8f\xc1\xb3\x89\xa2\x0a\xc4\xc0\x11\x03\x47\x0c\x1c\x31\xf0\x2d\x84\x81\xa3\x85\xd9\xc4\xc2\xdc\x3c\x41\x82\xaf\xa7\xc9\x57\xd3\xc6\x5f\xa4\xbb\xbf\xa4\x5e\xd5\xaf\xa6\xcf\xe8\x6e\xb9\xeb\x99\x21\x77\xad\xcd\x59\x3a\xc7\x23\xea\x0a\xbe\x8a\xb7\x2c\xf1\x95\x80\xb0\x34\x5b\xd6\xf3\xbd\x01\x8f\x16\x1c\x78\x25\xc2\x33\xd7\x6d\x5e\x0e\x64\xab\xc5\x20\x4c\x09\x77\x61\x81\xe6\x99\x55\x5d\x5c\x8e\x43\xdd\xb1\x6a\x77\x8b\xfd\xc2\x61\x87\xc7\x35\x0b\x81\x93\x83\x75\xe2\xfa\x71\xcd\x5a\xbc\x39\x40\x16\x81\x7c\x2f\xe5\x90\x56\x14\x5c\x3a\xec\x4a\xf9\x40\xe2\x26\x73\xfc\x7b\x93\x43\xd8\x7c\x9e\x0b\xd4\xf1\x6a\xce\xd1\x32\x60\x0a\x53\x30\x83\x5a\x40\x04\xc6\x62\x30\x16\x83\xb1\x18\x8c\xc5\x60\x2c\x66\x4b\xc7\x62\x3e\x9f\x26\x2f\xa5\x8d\x17\xd3\xdd\x9f\x52\xbb\xed\xbb\xd3\x63\x5a\xa5\x72\xa9\x48\x9d\x90\xaa\x4f\x7f\x2a\xf0\x4b\x4e\x01\xf6\xe1\x29\xbf\xe8\xe6\x96\x13\x99\x58\xf2\x75\xc7\xa5\xce\xec\x85\x0f\xd9\x07\x6c\x73\x86\xeb\x11\xbe\x49\x96\xa8\xc7\x96\x69\xbc\x8b\x50\xd3\x0f\x4a\xf3\x8e\x27\x33\xc3\x82\x32\x1d\x84\x5a\x6e\xae\xb2\x2d\x7e\xd4\x32\xe7\x5c\xcf\x29\xba\xcf\x48\xf5\x3d\x4b\x4d\x27\x0f\x81\x0d\x7f\x90\xe3\xdc\xf9\xd8\xb4\xe4\x83\xf7\x84\xf1\x45\xdc\xde\xb6\xcd\x71\x17\x54\x92\x36\x71\x3f\xa8\x7e\xb2\x38\x98\x14\x71\x73\x1f\xac\x3f\x3f\x9a\xb7\xad\xab\xf8\x7c\xc6\xe4\x83\x24\x53\xcc\xde\xd8\x45\xde\xd0\x65\x3c\xdb\xd5\xfd\x7d\x95\x67\xf8\xc5\x6d\xe7\x84\x1e\x64\x4b\x74\xde\x5f\x32\x0b\x4e\x30\xeb\x14\x12\x18\x85\x32\xd4\x68\x30\xe7\x07\x0b\x4c\x16\x35\x67\x7a\xba\xe2\xe6\xf5\x27\x0a\x66\x8d\xb4\x49\x4a\xfc\xa9\x5c\x66\x2f\xe4\xdc\x7c\x6c\x58\xc3\xde\x08\x6e\x8e\x92\x6e\x08\x34\x32\x70\x54\xee\x7e\xb6\x26\x46\xa9\x37\x55\x04\x47\x65\x17\x26\x6f\x66\x9b\x1c\xc2\x02\x5d\xac\x5b\xab\x3d\xfc\x19\x7a\x98\xcf\x03\x7f\x25\x17\x46\x78\xd8\xec\xc9\x3a\xb9\x0b\x85\xc0\x2f\x7b\x79\x76\x16\xa4\xf5\xc1\x49\x15\x82\xe3\xc6\x8a\xb0\x80\x92\x83\xc8\x27\x98\x55\x23\x1d\x36\x7b\x8e\xfb\x01\xd5\x86\x35\x73\x4e\x98\x73\xf2\xec\xe9\x85\x7c\x78\x22\x27\x8c\x17\x72\x73\xba\x6a\xc0\x39\x35\x86\x6d\x5d\x5d\xaa\x5c\x37\xba\x6d\x83\xa1\x4e\x0c\x75\x6e\xd1\x50\xe7\x64\x81\x9c\xe0\xd0\xfc\x38\x19\x05\x68\xfe\x08\x39\x4c\x0e\xad\x02\xbc\x9c\x89\x9c\xa8\x1c\x36\x85\xab\x33\xb5\xe1\xea\x6b\x8c\xab\xf9\xd7\x1c\x6b\xda\x0d\x85\xae\xdb\x5e\x09\x64\xfc\xb6\x4d\x5e\xdd\x52\x65\x01\xef\x31\xd0\xa4\xbc\x40\xf2\x9e\x7c\x73\xc0\x78\x73\x17\xb9\xa9\x1e\xc8\xce\x7b\x0f\x0c\x05\xd4\xc9\x57\x38\xb7\x4d\x70\xf6\xfb\xd8\x25\x75\x50\x76\x18\x74\x3d\x01\xf6\xc9\xe7\x52\x3c\xb6\xb3\x17\x62\x3b\x6c\x95\xbe\x86\x9c\x23\x0f\xad\x38\xd6\x02\x33\x5f\x6b\xc0\xe5\xa6\xda\x2b\xf8\x32\xa3\xab\x40\x23\xf2\xd4\x0f\x34\x5f\xb4\x8f\x19\x8f\x74\x62\xd1\xf2\x7e\x00\xdd\x2f\x90\x66\xeb\x61\x7f\x40\x4b\x45\x27\x47\x57\xb6\x24\x46\xc4\x55\x9b\x65\x55\x64\x2f\x10\x97\x14\x2a\x02\xdf\x9d\x5a\x15\x88\x4d\x62\xf4\x7b\x95\xd1\xef\x7f\x48\x91\x05\x1e\x98\x9e\x23\xf9\x38\x30\xbd\x01\x1a\x6c\xfd\x94\x68\x73\x5d\x59\x2a\x6f\x02\x5d\x99\xf9\xdf\x57\x35\xd3\x95\x3d\xc2\x69\x69\x9a\x11\x74\xac\x61\x64\x7a\xfd\xb5\x63\x67\x82\xd2\xa8\x06\x9b\xa8\x41\x0c\x8c\xac\x4b\x60\x04\x11\x31\x44\xc4\x10\x11\xeb\x14\x22\x36\xf9\x91\x54\x9b\x61\x81\x69\x9e\x30\xf7\x20\x99\xd0\x12\xe6\x3a\x0c\x35\x98\xb5\x8d\x8f\x5d\xc6\x0e\x2e\xa6\x56\xed\x8f\x2a\xb3\xa1\x6d\x06\x49\xf6\x4d\x57\x34\xb3\x3f\x8e\x94\x9c\x20\x72\x21\xcb\x88\xc7\x27\x56\xe6\xb4\x1d\x2d\x39\x51\x6e\x7e\x33\x18\x25\x37\x54\x0e\x34\x00\x53\xbb\x13\x68\xc8\x6f\xd4\x0f\x2e\xd0\xa0\x40\xf5\xa3\x7b\xf4\xa3\x30\x7d\x5a\x70\x73\x03\x55\xe7\x25\x46\x61\x7f\x2f\x8b\xa3\x6c\x96\xd9\x53\xe4\x04\x99\xac\x70\x17\x87\xc9\xc1\x55\xac\xbf\x29\x28\x5c\x47\x53\xa8\x89\x29\xf4\x81\x6d\xe4\x7d\xdb\x8c\x17\xb6\x75\xff\xa2\xda\x4f\x9e\xdb\xb6\x75\x3c\xc2\x8a\xf8\x32\x93\x33\xe4\xed\x41\x0e\x04\x5b\x9c\x31\x83\x45\x6f\x9d\x85\xdb\x07\xdb\x22\x4f\xf3\x74\x8a\x70\x25\xb3\xff\xf8\xd5\x70\x06\xd8\x62\xa1\xd9\x3b\x19\xfa\xde\x14\xa7\x72\x38\xc9\x3e\x0b\xf1\xf7\x8c\xfc\x58\xe2\x1f\xfb\xea\xbb\xaa\xd9\x97\x52\xe4\xc5\x94\xf1\xa9\x54\xf7\x27\x14\x5a\xfd\x9e\xd4\x71\x3f\xc8\x81\x99\x58\xf0\x41\xec\xbe\x69\xcd\xb1\x9f\x2c\x73\x24\xf1\x14\x00\xf3\x73\x8b\xae\x1c\xc6\x09\x7b\x03\x4e\x0e\x1e\x1c\xd2\x20\x8b\x6e\x4e\xec\xd8\xb4\x98\x0f\x4d\x7f\x49\xc8\x94\xa7\x09\x96\xa8\x5f\x2a\x52\xdb\xe4\x77\x04\xee\x0d\xf9\x2e\x01\x6e\xaf\x29\x00\x75\x7b\x6b\x3b\x4c\x2b\x51\x14\xb3\xa9\x80\xc3\xfa\x24\xc2\xf0\x24\x1b\xef\x0e\x4f\x3e\x48\x26\x8c\xfb\xad\x71\xc5\x9e\x95\x49\xb0\x67\x35\x1c\xad\xf3\x5c\x5a\xc6\xff\x49\x93\x6e\x81\xa0\x97\x23\x3f\xcc\x39\x45\xd7\x2b\x0c\x2e\xee\x63\x93\xdf\x37\x68\xfc\x65\xda\xf8\x72\x9a\x5c\xa3\x1d\x7b\x52\x1c\xeb\x7e\x65\x81\x46\xc9\x7c\x13\x5e\xa6\x95\xb9\xb5\x40\xa3\x91\xf8\x82\xb3\xfc\xfc\x91\xa9\x09\x99\x6c\xdb\xbe\x0e\xb8\xed\x6b\xbe\x51\x22\x0f\xf1\x35\xcd\x76\x34\xb6\xa6\x8f\x93\x31\x92\x5d\x5d\xf3\x0d\xf9\x9c\xad\x94\x75\x19\xdf\x54\x2f\x20\xe7\xfb\x41\xde\xf5\x12\x91\x0b\xe3\x33\x69\xe3\x93\x69\x72\xb9\x7e\xac\x61\xab\x13\xf6\x56\x46\xb5\x93\x37\x75\xbf\x93\xba\x98\x5a\x9b\x1b\xa1\x48\x59\x7f\xa6\x97\xd8\xb2\xdf\x09\xe7\xbe\xaf\xd3\xf5\x84\x7a\xf9\x92\xef\x7a\x51\x68\xfc\x74\xaf\xf1\x42\x57\xdc\xdb\xa3\xdc\x5a\xc7\xe9\x71\x79\x7d\x9b\xfa\x4d\x5b\x70\x01\x6f\x15\x12\xb7\x09\x51\x77\x61\x0b\x6d\x93\x53\x50\x6d\xde\xae\xd2\x0f\x36\xdf\x20\x7a\x8d\x3d\x95\xcd\x60\x94\xec\x93\x11\x4e\x24\xda\x42\xa2\x2d\xa4\xac\xc7\x22\x23\x2c\x32\xc2\x22\xa3\xad\x53\x64\x84\x94\xf5\x48\x59\x8f\xc5\x1d\x58\xdc\x81\xc5\x1d\x58\xdc\xb1\x29\x8a\x3b\xd6\xa1\x57\x22\xb2\xda\x23\xab\xfd\x45\x92\x30\x8f\xac\xf6\x9d\x60\xb5\xff\xbd\xeb\xc9\xbd\x1c\xb8\x9d\xe5\x6d\x34\xeb\xf6\x50\xce\x05\xbe\xf7\x94\x3f\xab\xfa\x28\x87\x10\x6a\x37\x9e\xbb\xde\xf8\xff\xd2\x64\x27\x5c\x0d\x7d\x83\x21\xa3\x9c\x1f\x94\x42\x8e\xf7\xf4\xd1\xc0\xf7\x26\xfd\xd9\x8c\xc5\xce\xca\xb2\x6b\x74\x28\x50\x1c\xe5\x41\xfc\x36\x27\x8a\x9f\x23\xf7\x72\xe4\xee\x6e\x72\x17\x20\x77\x03\xe4\x4e\xd2\x57\x37\xdc\x03\xcf\x63\x2f\x0e\xd9\x62\x4e\x4d\x51\xba\x13\xcd\x01\xb8\x3e\xa3\x47\x00\x70\x30\xba\xc0\xe0\xe4\x0d\x12\x5d\x25\x1b\x67\x92\x77\xff\xda\x4e\x4d\xe2\x3d\x32\x67\xbb\x99\xd0\xef\x10\x27\xae\xa7\xdc\xb3\x80\x7a\x57\xc4\xd6\x57\x22\x78\x0c\xa6\x63\x7a\xf5\x2a\xd3\xab\x9f\x4f\xd5\x8f\xcd\xac\x4d\x19\x8c\xf0\xac\xed\x61\x72\x30\xce\xda\x5e\xd9\x10\x2d\xa6\x41\xb7\xa6\x55\x6a\x74\x2e\xaf\xa5\x61\xb2\x7f\xb7\x5b\x53\x1b\x7b\xab\xd2\x87\x9a\xe9\x8f\xdb\x20\x18\xbd\x5e\xda\x03\xb3\x82\xb6\x9a\x22\xc3\xac\x20\xcc\x0a\xda\xe0\xac\xa0\x4e\x5b\x89\x4d\x13\x7d\xda\x6a\x46\x4e\xee\x27\xfb\x8c\xbd\x96\xad\x00\x84\x6b\xf4\xac\x1d\x71\x45\x75\x7a\xce\xc5\x07\x3b\x18\xff\x9c\x26\x37\x72\x37\x86\xc7\xa0\x2b\x6b\x66\x8d\xff\x9e\x36\xbe\x90\x26\xaf\xe0\x47\x55\x0a\x6d\xdd\xec\x9f\x1b\x0b\x34\x82\xe8\x7c\x28\x72\x61\x31\xf1\xa7\x76\xe2\xcf\x37\x76\x11\x4b\x26\xa3\x54\x3a\x8e\xaf\x1f\x94\x09\xf6\xc6\xc7\x76\x75\x7f\x67\x5b\x9c\x80\x72\xa7\x74\x59\xe4\x09\xd5\x46\x87\xb2\x2f\x32\x37\x8b\x93\x2b\x12\x47\x8e\x8b\x4b\xdb\xec\xaf\x1c\x23\xf7\x91\x7b\x2b\x76\xfd\x7e\x92\xa9\xab\x02\xd8\x23\x31\xc1\xa9\x79\x4d\xbe\x33\x45\x8e\xf0\x97\x71\x0f\xd9\x0f\x2f\xc3\x26\x2b\x1a\x81\x64\xb9\x79\x79\x98\x1c\x8a\xcd\xcb\x95\x8e\xb1\xfa\x5a\x65\x30\x3c\x5b\xcb\x27\xe1\xaa\x27\xbe\x69\x0d\x43\xd4\xb2\xc8\x24\x5a\x3e\x8d\x2d\x9f\x49\x74\xe1\xea\xb8\x70\xf7\x90\xfd\xc6\x3e\x6b\xaf\xda\x8a\xae\xd3\x37\x30\xb5\xee\xd6\x23\xc3\xb4\x8f\x4c\xb7\xd6\xfd\x91\xa7\xe4\xb5\x48\xd2\xf0\x4b\x7d\xc6\x4b\xdb\x1b\x30\x21\x7f\x22\x25\xc0\x4c\x2d\x3f\xcf\x93\x01\x88\xd6\x1a\x42\xb6\x25\x79\xaf\x3f\x8e\x41\x42\x9c\x55\x44\xac\xdc\x88\x2e\xc4\xab\xb0\x27\x11\xcf\xd6\xc6\xb6\x33\xc3\x4b\x0d\x8a\x4d\x2e\xea\xae\x94\x1b\x92\x12\x58\xdf\x8c\x5c\xe2\x66\xe4\x46\xf2\xa3\x60\xf6\x20\x66\x0f\x62\xf6\x20\x66\x0f\x62\xf6\x20\x66\x0f\x62\xf6\x20\x66\x0f\x62\xf6\x20\x66\x0f\x62\xf6\x20\x66\x0f\x62\xf6\xe0\xfa\x66\x0f\x6e\xea\x92\x5d\x4c\x14\xc4\x44\x41\x4c\x14\xbc\x88\x12\x05\xdf\x98\x21\x0f\xad\x80\xa4\x96\xa3\xa0\x0b\xe5\xa8\x39\x06\xfa\xfe\x3e\xe3\x8b\xdb\x9b\x91\xdd\xfc\x6e\x2b\x40\xe8\x49\x71\xbb\x4d\x0b\x83\x1e\x69\x04\x83\xc2\x83\x36\x7a\x04\x44\x42\xab\x91\xd0\xd7\x37\x07\x3a\x1f\x31\x1e\x5e\x03\x4f\x53\xa3\x17\x42\x9a\x02\xb1\x08\x86\x22\x18\x8a\x60\x28\x82\xa1\x08\x86\x22\x18\x8a\x60\x28\x82\xa1\x08\x86\x22\x18\x8a\x60\x28\x82\xa1\x08\x86\xae\x33\x18\x3a\x41\xee\x37\xc6\xad\x51\x05\x86\xf6\xea\x60\x68\x23\x1f\x0f\xa1\x50\x84\x42\x11\x0a\x45\x28\x54\x83\x42\xff\x2d\x4d\x6e\x16\x50\x68\xc9\xa5\x4f\x47\xd4\x83\xb7\xa5\xe5\x80\x1a\x7f\x9a\x36\xfe\x38\x4d\xae\x4a\x1c\x7f\x72\xb1\x41\xc1\xc1\xcd\x05\x1a\x8d\xe8\x67\x9f\xc5\x92\x83\x3a\x25\x07\x3f\xf5\x38\xd9\x5f\xbb\xd6\xa3\x2e\x11\x26\x9c\x67\xfc\xf9\x63\xc6\xbf\xde\x41\x76\xa9\x1a\x90\xee\xdb\xb8\xb9\x1f\x08\x43\x45\xee\x64\x12\x4e\x06\x04\x32\x73\x03\x3b\x49\xd6\x82\x68\xb4\x95\xec\x97\x4d\x8e\xc8\x66\x11\x7d\x44\xf4\x31\x8b\xe8\x23\xa2\x8f\x88\x3e\x22\xfa\xb8\x65\xd0\xc7\xec\xa6\x41\x1f\xdb\x3e\x93\x55\xa3\x8f\x59\x44\x1f\x11\x7d\x44\xf4\x11\xd1\x47\x44\x1f\xd7\x1f\x7d\xcc\x6e\x69\x84\x2f\x8b\x08\x5f\xe7\x10\xbe\xec\x66\x47\xf8\xb2\x17\x21\xc2\x37\xf9\x08\x39\xca\xb1\xad\x83\xe4\x1e\xc0\xb6\xf6\x12\x9b\xf4\xd7\x65\x5f\x10\x38\xd4\xe2\x90\x0d\xa8\x50\x2b\x28\xd6\x53\x37\xd7\xce\x1c\xdb\x61\x6c\xe7\x0d\x3d\xa6\x9b\x27\xb6\x0d\x1a\x03\x22\x8f\x2d\x01\x84\xc9\xde\x1e\x90\x3f\xa7\x57\xe6\x5a\x5f\xde\xa9\xa3\x5f\x57\xf2\x6d\xc7\x74\x3c\x81\x74\xdd\xc4\x7f\xe8\x34\xd6\xc5\xa1\xa9\xfb\xc8\xbd\x64\xb8\x82\x67\x23\x43\x7a\x5b\x95\x31\xb2\x69\x21\x2d\xe0\x2a\x39\x25\xbe\x94\x22\x87\xf9\xd7\xbd\x9f\xec\x83\xaf\xbb\x9f\xac\x60\xe5\x91\x63\x9c\x9d\xe5\x10\x39\x10\xb3\xb3\xac\x6c\x84\x11\xde\xf2\x74\x98\x1c\xd4\x5a\x9e\xae\x6c\x88\xd5\xab\x97\x92\xdf\x21\xf5\x92\xf9\x4c\xbf\xae\x5e\xba\x45\x07\x5b\xad\x1d\xb0\x3f\x27\x34\x8d\x68\x9d\x2e\x35\xcd\xa8\x3a\xa5\xb3\x3a\xa7\x33\x8d\xcf\x11\x5f\x45\x7c\x15\xf1\x55\xc4\x57\xb7\x0e\xbe\x8a\xb6\x57\x13\xdb\x6b\xf3\x00\xd0\x5f\x4f\x93\xaf\xa6\x8d\xbf\x48\x77\x7f\x49\xbd\xaa\x5f\x4d\x9f\xd1\x5d\x3e\xd7\x33\x43\xee\xb6\x99\xb3\x74\x8e\x47\x6b\x15\x34\x12\x6f\x59\xe2\x2b\x01\x61\x69\x56\x9e\xe7\x7b\x03\x1e\x2d\x38\xf0\x4a\x84\xd7\xa7\x5b\x83\x1c\x24\x55\x8b\x41\xd8\x04\xee\xc2\x02\xcd\x33\x7b\xb3\xb8\x1c\x87\x51\x63\xd5\xee\x16\xfb\x85\x33\xc8\x5b\xef\x17\x02\x27\x07\xeb\xc4\xf5\xf3\x6a\xe3\x89\x37\x07\x88\x50\xcb\xf7\x52\x0e\xd9\x24\x75\x41\x39\xec\x4a\xf9\x40\xe2\x26\x73\xfc\x7b\x93\x43\xd8\x7c\x9e\x9c\x8c\xb4\xc6\x1c\x2d\x03\xa6\x30\x05\x33\xa8\xe5\xe4\x22\xce\x8f\x38\x3f\xe2\xfc\x88\xf3\x23\xce\xbf\xa5\x71\xfe\xcf\xa7\xc9\x4b\x69\xe3\xc5\x74\xf7\xa7\xd4\x6e\xfb\xee\xf4\x98\x56\x77\x5c\x2a\x52\x27\xa4\xea\xd3\x9f\x0a\xfc\x92\x53\x80\x7d\x78\xca\x2f\xba\xb9\xe5\x44\x96\x8f\x7c\xdd\x71\xe1\x32\x7b\xe1\x43\xf6\x01\xdb\x9c\xe1\x7a\x84\x6f\x92\x25\xea\xb1\x65\x1a\xef\x22\xd4\xf4\x83\xd2\xbc\xe3\xc9\xac\xa3\xa0\x4c\x07\xe7\x9c\xa2\xb4\xfe\x2d\x7e\xd4\x52\xec\xbb\x81\xba\x95\x93\x07\xd0\xdc\x1f\xe4\x18\x6a\x3e\x36\x2d\xf9\xe0\x3d\x61\x7c\x11\xb7\xb7\x6d\x73\xdc\x05\x95\xa4\x4d\xdc\x0f\xaa\x9f\x2c\x0e\x54\x44\xdc\xdc\x07\xeb\xcf\x8f\xe6\x6d\xeb\x2a\x3e\x9f\x31\xf9\x20\xc9\xf4\xa5\x37\x76\x91\x37\x74\x19\xcf\x76\x75\x7f\x5f\xe5\xb0\x7d\x71\xdb\x39\xa1\x07\xd9\x12\x9d\xf7\x97\xcc\x82\x13\xcc\x3a\x85\x04\xd8\xa0\x0c\x35\x1a\xcc\xf9\xc1\x02\x93\x45\xcd\x99\x9e\xae\xb8\x79\xfd\x89\x82\x59\x23\x6d\x92\x12\x7f\x2a\x97\xd9\x0b\x39\x37\x1f\x1b\xd6\xb0\x37\x72\xce\x72\x29\xdd\x10\x3a\x9d\xc3\x51\xb9\xfb\xd9\x9a\x18\xa5\xde\x54\xd1\x01\x95\xb9\x96\xbc\x99\x6d\x72\xb0\x08\x74\xb1\x6e\xad\xf6\xf0\x67\xe8\x61\x3e\x0f\xfc\x95\x5c\x18\xe1\x61\xb3\x27\xeb\xe4\x2e\x14\x02\xbf\xec\xe5\xd9\x59\x90\x32\x06\x27\x55\x08\x8e\x1b\x2b\xc2\x02\x4a\x0e\x22\x9f\x60\x56\x8d\x74\xd8\xec\x39\xee\x07\x54\x1b\xd6\xcc\x39\x61\xce\xc9\xb3\xa7\x17\xf2\xe1\x49\x82\x30\x5e\xc8\xcd\xe9\xaa\x01\xe7\xd4\x18\xb6\x75\x75\xa9\x72\xdd\xe8\xb6\x0d\x86\xd1\x30\x8c\xb6\x45\xc3\x68\x93\x05\x72\x82\x83\xd6\xe3\x64\x14\x40\xeb\x23\xe4\x30\x39\xb4\x0a\xf0\x92\xf7\x69\x69\x0a\x20\x67\x6a\x03\xc8\xd7\x18\x57\xf3\xaf\x39\xd6\xb4\xad\x82\xc9\x35\xc8\xc1\xeb\x03\xcb\x17\x63\x13\x82\xff\x94\x21\x59\x9e\x98\xee\xd1\x68\xc9\x0f\x2e\x30\x57\xad\x69\x72\xba\xeb\x15\x02\x1a\x86\x8a\x34\xdf\xf8\xbd\x3e\xe3\x7b\x69\x72\x45\x3c\x06\xa0\xe9\xd0\x56\x2d\xe9\xf7\x4e\xf0\x2b\x33\xb7\xb2\x63\xa7\xd4\xe9\x7a\xe4\x4e\x9c\xd2\xe6\x5e\x6a\x8f\xf1\x08\xc6\x5e\x88\x60\xb0\xb5\xb8\x8f\xec\x25\x76\xdd\xd8\x85\x26\x8d\xc5\x21\x5b\x4c\x69\x0d\x04\xf5\x05\x1a\x91\xa7\x1e\x6e\xbe\xe6\xee\x36\xee\x12\xeb\xac\xea\x7d\x88\xb5\x26\xe7\xa2\x2f\xcd\xee\x4f\xef\xac\x14\xfe\x4d\xb2\x5d\x41\x6d\xf9\xdf\x2e\x0e\xaf\xe3\x2b\xc8\x8e\x91\x2c\x39\x56\x11\x3e\x5d\xf1\x3b\x40\x20\x0f\x83\xa8\xab\x0c\xa2\xfe\x7c\xaa\x1d\x3a\xe0\x38\x0f\xa5\x1e\x25\x47\xe2\x50\xea\xfa\xea\x12\x68\x76\xd1\x9a\x2e\xa9\xb1\x7f\x35\xd3\x2b\x99\xb7\x5e\x55\xa9\x4b\xc4\xf6\xc9\x96\xb9\xd4\x1f\xb7\xf1\x9f\xd6\x53\x7d\x74\x26\x12\x8a\xea\xa4\x89\x3a\x41\x34\x7e\x5d\xd0\x78\x84\x61\x10\x86\x41\x18\xa6\x53\x30\xcc\xe4\x47\x52\x64\x9a\x67\x30\x3d\x48\x26\xb4\x0c\xa6\x0e\x79\xa4\xed\x75\x7c\xa5\x49\xd0\x31\xff\xe1\x29\xb3\xb6\xb1\xb1\xcb\xd8\xc1\x5f\x00\xc9\x7e\x77\x77\xa5\x55\xb0\xa7\xaa\x19\x6b\x6d\x57\xc3\x82\x36\x81\xeb\x64\x29\x60\x07\xd6\xad\x66\x9f\x60\x07\x56\xec\xc0\xba\xc1\x1d\x58\xd7\x05\x5b\x6a\xda\x86\xb5\x63\xbb\x43\xe3\x96\xac\xe2\x8a\x4b\xa2\x25\xeb\x0f\xf6\x91\x01\x8e\x86\x86\x91\x1f\x38\x05\xaa\x41\xa1\xe2\x97\x5c\xd1\xd1\x81\xcf\x97\x7a\x8d\xef\xa4\x09\x11\x07\xd9\xb6\x78\x4b\x0d\xd4\x73\x86\x1f\x1e\x65\xd7\x66\xae\x67\x27\x88\x5f\xce\x0e\xe9\x87\xda\x0c\x79\x3e\x41\x46\xf9\xb2\xbc\x97\x0c\xc3\xb2\xdc\x4f\xf6\x91\xbd\x75\x97\xa5\x7c\x64\x30\x3b\xe2\x49\xad\x15\xf4\x7c\xa4\xf9\xb2\x3c\x60\xdc\x2d\x96\x65\x52\xec\x62\x4d\x26\x66\x93\x80\x3d\x5f\xdc\x99\x10\xbd\x55\x1b\xf3\x4c\x48\xff\x46\x71\xce\x3a\xbc\x80\x2c\xb0\xcb\x54\xd8\x02\xab\x78\x03\x68\x03\x20\xe4\xb9\x4a\xc8\xf3\x3d\xa9\xf6\xe8\x80\x07\x38\xe8\x39\x42\x8e\xc6\xa0\x67\x67\xb4\x49\x07\xd5\x45\x13\x48\x35\xf3\x6f\x57\x26\xb4\xc9\x75\x12\xf5\x4c\x2a\x90\x1b\xf8\xcf\xeb\xa1\x3f\x10\xf1\x44\xc4\x13\x11\x4f\x44\x3c\x11\xf1\x44\xc4\x73\xc5\x88\xe7\x0b\x6d\xda\xfa\x27\x38\x6c\x9a\x25\xc7\x34\xd8\xb4\x33\x7b\x7f\x53\x14\xb2\x93\xd6\x41\xf6\x5f\x76\x27\xb6\xff\x3b\x9b\xc0\x9b\x09\xa3\xa0\x1b\x7c\xf0\x0e\xdb\x04\x88\x6d\x6e\x35\x4b\x04\xb1\x4d\xc4\x36\x37\x18\xdb\x5c\x27\x10\xa9\x29\xba\xd9\x41\xcd\x3f\x79\x88\x1c\x30\xee\xb6\xee\x52\x48\xe5\xab\x74\x7c\x53\xbf\xac\xf3\xc4\xea\xc6\xcb\xbd\x64\x68\xd0\x29\xb9\x83\x8b\xb2\x21\x5d\x9d\x1c\xcc\x92\x9f\x8f\xe8\x42\x09\x0a\x8f\x8d\xff\xbb\xd7\xf8\x40\x17\xd9\x91\xf3\x03\xd8\xbb\x96\x44\x7d\x91\x97\x77\x17\xdd\x7c\xd9\x29\x26\x7a\xd0\xa9\x5a\xbe\x29\x3f\x7f\x46\x0c\xd1\x96\x56\x73\x76\xe6\x76\xb8\x60\xd4\x0f\xa8\x1e\xe5\xd3\xee\x73\xc2\x0d\x37\x3b\x03\xf1\xe4\x85\xfa\x61\xdd\xb6\x37\x8b\x93\xeb\xff\xb6\xda\xeb\xff\x72\x83\xc0\xc4\x38\xbf\xce\xc9\xe6\xdf\x40\xc6\xe8\xe5\xab\x5d\x93\x39\xa9\x91\x05\x66\x59\xd8\xe7\x0d\x99\x96\xb1\xcf\x1b\x32\x81\x20\x13\x08\x32\x81\x6c\x25\x26\x10\xec\xf3\x86\x7d\xde\x90\x81\x01\x19\x18\x90\x81\x01\x19\x18\x36\x05\x03\xc3\x45\x98\x28\x85\xed\xdf\xb0\xaa\x1d\xdb\xbf\x5d\x3c\xe4\xd0\xc6\x73\x7d\x64\x84\x27\x36\xf2\x50\x63\x2b\xe8\x62\xde\x0d\x83\x32\xf8\xfe\xb3\xe5\x7c\x81\x46\xa1\xf1\xd1\x5e\xe3\x0f\xbb\xc8\x2e\x3e\xc4\x93\x8b\x43\xdd\x3f\xd0\x32\xce\x38\xa6\x06\xcb\xc2\x60\x6d\xc2\x1b\x6d\xb8\x80\x07\x3f\x2b\x10\xc7\xca\x3b\x5e\x1c\xc8\x63\xdb\x01\xc6\xa6\xc8\x7b\x4b\xc8\xe3\x6b\x9a\x23\x8f\xf7\x18\xfb\x05\xb6\xc8\xd7\x07\x51\x38\x64\xe5\x9b\x48\x82\xef\x88\x42\x22\x0a\x89\x28\x24\xa2\x90\x88\x42\x22\x0a\x89\x28\x24\xa2\x90\x88\x42\x22\x0a\x89\x28\x24\xa2\x90\x88\x42\x22\x0a\x89\x28\x24\xa2\x90\x88\x42\x5e\x32\x28\xe4\x0b\x8f\x92\x2b\x64\x6e\xa3\xe7\xe7\x69\x68\xfc\xf4\xa3\xc6\x97\xef\x88\xf3\x16\x2d\x6e\x5d\x06\x62\x5f\x94\x8a\xd3\x9f\x33\x2f\xb8\x5e\xde\x3c\xe5\xe7\x69\xe6\x15\xec\x1c\x91\x61\xe8\xe7\xe9\x26\x87\xf4\xb2\x08\x6f\x21\xbc\x85\xed\xb6\x10\xde\x42\x78\x0b\xe1\xad\xad\x03\x6f\x6d\xa2\x6e\x52\x9b\x06\xde\xc2\x36\x47\x08\x6f\x21\xbc\x85\xf0\x16\xc2\x5b\x1b\xd1\xe6\x68\x4b\x63\x45\xd8\x87\x65\x2b\xf7\x61\xc9\x5e\x84\x58\xd1\xe4\x39\x72\x2f\x4f\x85\xba\x9b\xdc\x05\xa9\x50\x03\xe4\x4e\xd2\x57\xb7\xee\x38\xe7\x07\x50\x74\x7c\xca\xcf\x43\x8d\x69\xd3\xb4\xa7\xe3\xcd\x33\x9a\x6e\x33\x6e\x8d\xab\x25\x79\x36\x13\x1b\x3e\xc9\x29\x55\xbf\x69\x39\x64\x4e\x59\xff\x65\x67\x0c\x6f\x5d\xc1\x77\x19\xd3\xe1\x48\xd6\x55\xfc\x9f\x1d\xc0\xb2\x38\xf4\x74\x98\x1c\x22\x07\x2a\xe8\x1a\x7a\xc8\x1d\x2d\xc9\x10\xb9\x19\x90\x73\x6e\x95\x9c\x73\x5f\x48\x91\x83\xfc\xd3\x1d\x22\x83\xf0\xe9\xf6\x91\x56\x97\x1d\x39\xc2\x89\xe6\xee\x21\xfb\x63\xa2\xb9\x15\x5c\x7e\x1f\x27\xab\x39\x40\xee\xd6\xc8\x6a\x56\x70\xfd\xc6\x6b\x8d\x92\x1f\x46\x24\xf3\xfe\xfe\x58\x6b\x5c\x2f\x18\x86\x34\xba\x26\x7f\x8e\x6b\x90\x6e\x7e\x88\x6b\x90\x51\x75\xbc\x03\xba\xa4\x33\x54\x74\x88\x8b\x22\x2e\x8a\xb8\x28\xe2\xa2\x5b\x07\x17\x45\xb3\xaa\x89\x59\xb5\x79\x80\x63\xa4\x01\x5d\x17\x1a\x50\xc4\xe7\x11\x9f\x47\x7c\x1e\xf1\x79\xc4\xe7\xb7\x34\x3e\x8f\x6c\xd0\xc8\x06\x8d\x6c\xd0\x9d\x62\x83\xc6\xf0\x17\x86\xbf\xb6\x6a\xf8\x6b\xb2\xb0\xce\x6d\x1a\x33\xb5\xc1\xdd\x6b\x0c\xd1\x47\x39\xd6\xb4\xa4\x5d\x80\x72\xfb\x79\x68\x7f\xe7\x2a\xd2\x9d\xc8\xd5\x16\x1d\xaf\x06\x4b\x81\xff\xf4\xb2\xf1\x73\x57\x19\x6f\x4a\xc7\x18\xf5\x1d\x39\xdf\xf3\x98\xa1\x70\xff\xf8\x99\x38\x93\x39\xf2\x4d\x38\x5b\xe1\xd5\xd7\x8b\xd3\x38\x60\x7d\x3f\x8d\xd8\xaf\x53\xec\x94\x7d\xdb\x32\x83\x19\x4e\x82\x70\x80\xdc\xc8\xdf\xca\x75\xe4\x1a\x78\x2b\x57\x90\xdd\x1f\x4a\xed\x24\x62\x72\x4d\x31\xfa\xe9\xe6\x22\x1d\x34\x06\x6a\x89\x14\x66\x22\xb0\xe9\x24\x5e\x7f\x6b\xed\x57\x4a\x8c\x9d\xe2\x89\x48\x77\x4d\x71\x4c\x3d\xd4\xba\x38\xa6\xca\x1d\x11\x47\xf3\xa9\x77\x42\x62\xd6\x8f\x6a\xe2\xd8\xa3\xc4\x71\x7a\xa6\x91\x3c\xba\x93\xf2\xf0\xc3\x0d\x5c\x1f\xb5\x39\x73\xeb\x3c\x79\x0b\x42\xce\xfc\x84\x26\x90\x5e\x29\x90\xb1\xf1\x13\xe3\x67\xc6\x1b\x88\xe4\xc6\x84\x48\x78\xf4\xe4\xd2\xf9\x68\xf6\xfd\xa4\x26\x94\x3e\x29\x94\xd3\x53\x67\x26\x4e\x9f\x9a\x69\x20\x95\x9b\x12\x52\x11\x37\xbf\x74\xc4\x32\x5c\xf3\xe3\x79\x60\x7c\x64\xac\xe5\x8f\xe7\x01\xea\xe4\x2f\x1d\x6d\x92\x7d\x8b\x26\x90\x1e\xa5\x4d\x46\xce\x8c\x3e\xd0\x40\x22\x37\x24\xd5\x09\x74\x21\xde\xb0\x35\xd2\xc2\x53\xc6\x22\x69\x41\xcc\x93\x47\xc8\x61\xe3\x90\x75\x40\xd5\x6b\xde\xa8\x93\xd6\x57\xde\xa7\x9a\xb8\x7e\x96\x9c\x37\x9e\xe8\x7e\x4c\x1a\x0c\xd9\x29\x27\xe2\x48\xd0\x3c\x35\x1f\x9a\x3e\x61\xb2\x31\xc1\x08\x0c\xa9\xb2\xe2\x72\xe5\x20\x60\xde\x32\x17\xb3\xb4\xff\x23\xdf\x64\x06\x82\x6d\xc1\x3c\x74\x1b\xe2\xe7\x7b\xc9\x60\x05\xeb\xd4\x2c\x8d\x1c\x49\x3d\x55\x93\x63\xea\xef\x7b\x8c\xaf\x77\x91\x57\x28\x8e\x29\xb8\x60\xe3\x89\xa6\x0e\x25\x88\xa6\x60\x52\x75\x38\xa6\x8e\xfb\xc1\x48\xb1\xa8\xa8\xa8\xda\xd7\xff\xe5\x12\xe2\x9c\x7a\xa2\xf9\x27\x73\xd8\x38\xb4\x0a\x3a\x29\x78\x31\xa4\x25\x4e\x2b\x24\x9e\xc2\xca\x3c\x24\x9e\xc2\x0c\x14\xcc\x40\xc1\x0c\x94\x2d\x94\x81\x82\xc4\x53\x48\x3c\x85\x91\x7f\x8c\xfc\x63\xe4\x1f\x23\xff\x9b\x22\xf2\x8f\xb4\x50\x48\x0b\x75\xb1\xc4\x3a\x91\x16\xaa\x13\xb4\x50\x1f\x7f\x5c\xc2\x84\xc1\xac\x93\xb3\x25\x5c\x04\x43\x89\x16\x9e\x83\x8b\x43\x83\xb9\x62\x39\x8c\x98\xcb\x52\xa4\xa1\xf1\xa6\xc7\x8d\x17\xf6\x90\x6b\xd9\x05\x23\xfa\xf9\x10\x48\x68\xcc\x22\x35\xca\xc7\x99\xf6\x8b\x34\x03\x84\x53\xd3\x95\x83\x9c\x1d\xd2\xce\xd9\xe4\xf8\x1d\x12\x4c\x21\x8c\x85\x04\x53\x08\x63\x21\x8c\x85\x30\xd6\x16\x82\xb1\x36\x51\x9d\xd0\xa6\x81\xb1\xb0\x80\x05\x61\x2c\x84\xb1\x10\xc6\x42\x18\x0b\x09\xa6\x30\xc3\xfe\xd2\x41\x9d\x36\x7d\x86\xfd\x45\x49\x30\xf5\x04\x19\xe5\x79\x4f\xf7\x92\x61\xc8\x7b\xda\x4f\xf6\x91\xbd\x75\x69\x62\x00\x9a\x5a\x1c\xb2\x35\x64\xa8\x25\x9e\xa9\x26\xfc\x50\x4f\xe5\x9b\xa7\x42\x8d\x18\x47\x45\x2a\x54\x5d\x7c\x4c\x64\x47\x69\x93\x4b\x66\x63\xbf\x6d\x57\x1d\xac\xec\x5a\x45\x49\xa5\xc3\x62\xb7\xf3\x5f\xd7\x09\x18\xe3\x38\x56\x96\x1c\x23\xf7\x55\xb0\x55\xd9\xa4\x7f\x25\x2f\x04\xd9\x15\x90\xb4\x6a\x95\xa4\x55\x7f\x95\xaa\x5f\x24\x74\x94\xeb\x89\x83\xe4\x1e\xd0\x13\x7b\xc9\x0a\x97\x25\x19\xe3\xa4\x56\x47\xc8\xe1\x98\xd4\x6a\xe5\xa3\x8c\x73\x6e\xab\xfb\xc8\xbd\x1a\xb7\xd5\x8a\x87\x69\xae\x97\x80\x81\xaa\x45\xbd\x54\x23\x7d\x79\x25\x3a\x2a\xf3\x4f\xfd\x75\xf4\x92\x59\x93\xf4\x4a\xd7\x51\x36\x3f\xa3\x96\x8e\x52\xd7\x74\x4e\x5b\x21\x1f\x16\xc2\xb8\x08\xe3\x22\x8c\x8b\x30\x2e\xf2\x61\x21\x1f\x16\xf2\x61\x21\x1f\x16\x86\x13\x30\x9c\x80\xe1\x04\x0c\x27\x60\x38\xa1\x2d\xe1\x04\xe4\xc3\x42\x3e\x2c\xe4\xc3\x42\x3e\x2c\x8c\xd6\x61\xb4\xae\xed\x7c\x58\x6d\xa5\xbd\x6a\x1a\x02\x5b\x09\x1f\x56\x6b\xb0\x73\xe3\x68\x57\x53\x20\xba\xfd\x6c\x59\xdf\xee\x21\xd7\x2b\xb6\x2c\x45\xfd\x20\x28\xb3\x8c\x97\x7b\x8c\x2f\x6a\x04\x26\x37\x06\xd4\xc9\x57\x38\x9e\x8a\x2f\x22\x73\x0d\x3b\x2a\x7a\xc2\xc8\x1f\xdb\x0b\x5e\x4f\x3e\xcc\xdb\x6c\xec\x85\x36\x1b\x6c\x01\xd8\xa4\x9f\x64\x9a\x77\xc8\x90\xd3\x69\xfa\xc6\x1f\x6c\xfe\x16\x7b\x8d\x3d\x0d\x29\x96\xd4\xbd\x9e\xba\xa9\xf6\xf2\xb9\xcc\xe8\x2a\xd0\x88\x74\xbf\x4b\x6b\xb0\x73\x4b\x40\x4b\x45\x27\x47\xeb\x0a\xf7\xdf\x89\x13\x3a\x2a\xdf\xec\x31\x72\x1f\xb9\xb7\x22\x94\xb9\x22\x01\x23\x2c\x86\x81\xcc\x55\x06\x32\xdf\x99\x5a\xeb\xc7\x9d\xe5\xd1\xca\xc3\xe4\x50\x1c\xad\xdc\x18\x05\x51\x5f\x29\x24\x08\xb5\xea\x2b\x88\x52\x39\x22\x99\x7f\xbc\x32\x56\x10\x86\xb0\x47\x1d\x4d\x27\x5c\xa7\x37\xd1\xe9\x94\x4a\xe8\x4c\xbc\x10\xd5\x44\x13\x35\x81\x98\xf5\xba\x60\xd6\x08\x56\x20\x58\x81\x60\x45\xa7\xc0\x8a\xc9\x8f\xa4\xda\xec\xb1\x4d\xf3\x54\xa2\x07\xc9\x84\x96\x4a\xd4\x61\x2f\xd0\xac\xbd\x4b\xef\x32\x76\x70\x31\x91\xb6\x1a\x05\xd9\x3f\xde\xad\xd1\xad\x96\x9c\x20\x72\x21\x6d\x83\x03\xbe\x75\xfd\x83\x6b\x4b\xcc\x27\xef\x98\x29\x70\x43\xe5\x40\x03\x70\xbf\x3b\xd9\x9f\xc3\x37\xea\x07\x17\x68\x50\xa0\xfa\xd1\x3d\xfa\xd1\x30\x0a\x9c\x88\x16\xdc\xdc\x40\xd5\x79\x89\x51\xd8\xdf\xcb\xe2\x28\x9b\x65\xf6\x14\x39\x41\x26\x2b\x3c\x93\x61\x72\x70\x15\x6f\x1d\xe8\x34\xd1\x00\x69\x66\x80\x7c\x60\x1b\x79\xdf\x36\xe3\x85\x6d\xdd\xbf\xa8\xb4\xf8\x73\xdb\xb6\x8e\x9f\x52\x11\x70\x63\x72\x86\x44\x26\x08\x0a\xb3\xc5\x19\x57\xa5\xf7\xd6\x59\xb8\x7d\xb0\x19\xf1\xbc\x37\xa7\x08\x57\x32\xab\x8b\x5f\x0d\x67\x80\x05\x14\x9a\xbd\x93\xa1\xef\x4d\xf1\xf2\xec\x93\xec\xb3\x10\x7f\xcf\xc8\x8f\x25\xfe\xb1\xaf\xbe\x03\x95\x7d\x29\x45\x5e\x4c\x19\x9f\x4a\x75\x7f\x42\xc1\x77\xef\x49\x1d\xf7\x83\x1c\x18\x67\x05\x1f\xc4\xee\x9b\xd6\x1c\xfb\xc9\x32\x47\x12\x4f\x01\xb8\x27\xb7\xa3\xca\x61\x9c\xc1\x34\xe0\xe4\xe0\xc1\x21\x2f\xac\xe8\xe6\xc4\x3e\x49\x8b\xf9\xd0\xf4\x97\x84\x4c\x79\xde\x54\x89\xfa\xa5\x22\xb5\x4d\x7e\x47\xa8\xa7\x97\xef\x12\xf0\xc7\x9a\x02\x50\xb7\xb7\xb6\xc3\xb4\x12\x89\xee\x1d\x47\x7b\x6e\xa9\xad\xd9\x77\x1a\x97\xc1\xec\xda\xab\xd8\x27\xef\x21\xfb\x8d\x7d\xd6\x5e\xc5\x7b\x7b\x5d\x82\xf7\x56\x5e\x53\x4d\x78\xdb\x76\xcc\xef\x87\x7b\xc9\x5e\x4e\x5b\x11\xe6\xe6\x69\xbe\x5c\x74\xbd\x82\x46\x57\x21\x38\x6e\x03\xd7\x0f\xdc\x68\x39\x57\x74\xc2\x90\x86\xc6\x17\x7a\x8c\xdf\xef\x22\x57\xc4\x57\xb0\xfd\x69\xb9\x45\x76\x5b\x31\xd6\x28\x1b\xab\x4d\xbc\xb6\x16\x5c\x30\xa3\xa6\x73\x76\x28\x71\x97\x13\x6e\x18\x6d\x72\x02\x8c\x0d\x21\xb0\x6d\x85\x60\x76\x65\x2c\xb7\x55\x6b\x48\x12\xde\xea\xaf\x23\xf9\x25\x20\xf3\x07\x32\x7f\x20\x81\x2d\xa6\x8c\x63\xca\x38\xa6\x8c\x6f\x9d\x94\x71\x24\xb0\x45\x02\x5b\x4c\xd5\xc5\x54\x5d\x4c\xd5\xc5\x54\xdd\x4d\x91\xaa\x8b\x04\xb6\x48\x60\x7b\xb1\x24\x27\x22\x81\x6d\x27\x08\x6c\xbf\xf6\x38\xe9\xad\xd9\xe7\xaa\xe4\xe7\x43\x9a\x2b\x07\x6e\xb4\x0c\x47\x5c\x1a\x1a\xff\xcf\xe3\xc6\xfb\xf7\x54\x35\xb8\x1a\x6c\xcc\x59\x3b\x05\x71\x77\x18\x88\x87\x07\x39\x73\x6d\x65\x3f\xaa\xe4\x39\x9b\x1c\xb8\x43\xe6\x5a\xc4\xaf\x90\xb9\x16\xf1\x2b\xc4\xaf\x10\xbf\xda\x42\xf8\xd5\x26\xaa\xe8\xdf\x34\xf8\x15\x96\x9a\x23\x7e\x85\xf8\x15\xe2\x57\x88\x5f\x21\x73\x2d\xd6\xc2\x5e\x3a\x70\xd3\xa6\xaf\x85\xbd\x28\x99\x6b\x57\x96\xf0\x64\x8b\x24\x7a\x81\x34\xd9\x55\x30\x51\x5b\x68\x6c\x1f\x6b\x9e\xeb\x74\xc8\x38\x50\xd5\xa9\xbb\x4e\x8b\xef\xe4\x04\x89\xf5\x3b\xbb\xaa\x00\xb3\x6e\x45\x5c\x5b\x8d\x8d\x09\xfa\xda\x75\x42\xc7\x38\x98\x75\x9a\x9c\x24\x0f\x56\x64\x56\x37\xc8\xa7\x6f\xfa\x56\x30\xb5\x1a\x4b\x40\x57\x59\x02\xfa\xa3\x69\xf2\x6a\x5e\xc3\x39\x49\x1e\x88\x6b\x38\x1b\xd6\x77\x34\x5d\x8f\xab\xa8\x19\x69\x3e\x66\x5d\xca\xdd\x95\x94\xbb\x34\xbd\x8d\xd4\x61\x1d\xd5\x51\xcd\x08\x75\x33\x7f\xdb\x5f\xa5\xc3\xf6\xd4\x24\xb9\xad\xd6\x67\x82\xea\x36\xa1\xcf\x62\x92\xdb\x4e\x6b\x36\xa4\xba\x45\xdc\x17\x71\x5f\xc4\x7d\x11\xf7\x45\xaa\x5b\xa4\xba\x45\xda\x00\xa4\xba\xc5\xf8\x03\xc6\x1f\x30\xfe\x80\xf1\x07\x8c\x3f\xb4\x25\xfe\x80\xec\x31\xc8\x1e\x83\xec\x31\x48\x75\x8b\xe1\x3d\x0c\xef\x6d\x21\xaa\xdb\x8e\xc2\xd1\xed\xa7\xbb\xf8\x83\x34\xb9\x9c\x27\xb9\xcf\x02\xb3\x85\xf1\x5b\x69\xe3\x37\xd2\x64\x3b\xfc\xab\xbb\xbb\x00\xda\x9a\xed\x19\xea\x65\x3a\x26\xcc\x35\x73\x55\x81\x46\x59\x76\xd6\xc8\xd4\xc4\xfd\xec\x97\xf6\x01\xd4\x55\x03\x0d\x57\x0d\x34\xdc\xca\x40\x93\x2e\x39\xc5\x17\xca\xfd\x64\x1c\x16\xca\x51\x72\x84\x1c\x5e\xc5\x42\x91\xcf\xd8\x6c\xa9\x18\xef\xbd\x8e\x1c\xae\x47\x19\x0c\x7f\xbf\x7e\x90\xe9\x55\x37\xe6\x11\x1e\x2c\x05\xfe\xd3\xcb\x83\xaf\x2b\x39\xd1\xfc\xeb\x8d\xbf\xb9\xd6\xf8\x25\x8d\x54\xb8\x37\xe7\x7b\x1e\xb3\xd7\xee\x1f\x3f\x13\x27\xcc\x47\xbe\x09\x17\xb1\xd7\x31\xc3\x47\xcb\xd8\xe2\x4c\xce\x72\x75\x3f\x8d\x14\x69\x4a\x5e\x9c\x32\xc5\x2e\x39\xe7\x46\xf3\x53\x4e\x34\xbf\x6f\x5b\x66\x30\xc3\x99\x36\x0e\x90\x1b\xb9\x90\xae\x23\xd7\x80\x90\xae\x20\xbb\x3f\x94\xda\x49\xc4\x2a\x69\xfa\x75\xdc\x5a\xfb\xeb\x20\xc6\x4e\x31\x25\xf2\xd4\x43\xcd\x3f\x8a\x7d\xc6\x5e\xbe\xea\xf5\xd9\x8a\xb0\x42\x2d\x1e\x68\xcb\x22\xdd\x35\x25\x35\xf5\xd0\x8a\x24\x35\x55\xbe\x08\x25\x55\x49\xaa\xd3\x4c\x66\xd6\x0b\x9a\xa4\xfa\x94\xa4\x4e\xcf\x34\x11\xd5\x60\x52\x54\x7e\xb8\x7e\xb2\xea\x8c\x20\x5a\x78\x05\x99\x5f\xd1\x64\x75\xa7\x94\xd5\xd8\xf8\x89\xf1\x33\xe3\x8d\xa5\x35\x94\x90\x16\x0f\x8c\x5d\xfa\xf2\xda\xf7\x01\x4d\x5e\xfd\x52\x5e\xa7\xa7\xce\x4c\x9c\x3e\x35\xd3\x58\x60\xfb\x12\x02\x13\xf7\x5f\x81\xc4\xea\x06\x9a\x1b\x8a\x72\x65\x12\x5b\xa9\x52\x6a\x41\x62\xc3\x35\xbf\xc6\x07\xc6\x47\xc6\x56\xf2\x35\x3e\x40\x9d\xfc\xa5\xbf\xba\xb2\xbf\xac\xc9\x2a\xa3\x34\xd7\xc8\x99\xd1\x07\x1a\x0b\x6b\x6f\x52\x75\x31\x43\xe5\xd2\xd7\xf3\x93\xc7\xc8\x7d\xc6\xbd\xd6\xb0\xa2\x51\xbb\x45\xa7\x51\xab\x71\x75\x35\xa1\xda\x79\xf2\x84\xf1\x98\xf5\x88\x1a\x61\x58\x00\x46\x3c\xe1\xc6\xcb\x9b\xec\x71\xcd\x30\xe7\x97\x68\xbf\x19\x96\x73\xf3\xa6\xc3\x5d\x8e\x88\x3a\x0b\xdc\x7f\x2f\x05\x3e\x80\x2f\xd6\x2e\x65\xf5\xe8\x77\xb8\x9b\xdc\x65\x0c\x59\x83\xea\x0e\xd7\xb2\xff\xca\x5c\x3c\xe9\x9b\x58\x70\x4c\xbf\xec\x73\x69\xf2\x07\x69\xe3\x33\xe9\xee\x4f\x2a\x97\xef\x7d\x69\xf6\xe2\x00\xee\x9c\xa7\x66\xc9\x09\x00\xb0\x7e\x68\xfa\x84\xc0\xe8\x04\x36\x68\x0a\x8b\xcb\xa4\x5e\xbe\xe4\xbb\x5e\x14\xb2\x99\xcf\xcd\xb9\x4f\xd3\x90\x23\x7b\x5a\x4c\x9f\xb9\x49\x21\x55\x7e\x4e\xae\x1c\x04\xd4\x8b\xc4\xfa\x92\x1e\x72\xe4\xcb\x31\x81\xd2\xcf\xa4\x4f\x3b\x0b\xa5\xa2\x80\x94\x96\xe6\xfd\xa2\x82\xd2\xd9\x6c\xc0\x8f\x8c\xa2\xd2\xf0\xe0\x60\xd1\xcf\x39\xc5\x79\x3f\x8c\x6a\xd8\x88\x6c\x35\x0c\x84\xcb\x61\x44\x17\x62\x1b\x91\x16\x9d\x30\x72\x73\x21\x75\x82\xdc\xfc\x40\xd1\x2f\x14\x5c\xaf\x30\xf8\x24\xff\xf7\xd1\xd7\x1e\x29\x87\x34\x18\xbe\xe0\x2e\xe4\xe6\x97\x6d\x53\x4a\xa3\xf6\x61\x2e\xd2\x44\x7f\x8b\x5e\xb2\x5f\x70\xdd\x45\x7e\xe0\x14\x68\x4c\x74\xc7\x2b\x5d\x39\xdb\x5d\x2e\x74\xf3\x81\xcb\xd6\x99\x6c\x7d\xf1\xeb\xbd\xc6\x3f\x75\x91\x2b\xc5\x55\x2a\xf3\xe5\x83\x29\xe1\x11\x6a\x3c\x77\x9e\x44\x1c\x65\xd1\xeb\xe8\xcc\xc4\x18\x8c\xd6\x16\xae\xbb\xfe\x38\xbe\x00\x31\x14\x81\x46\xbb\x11\x5d\x88\x03\x27\x3d\x89\x90\xa0\x36\xb6\x9d\xb9\x91\x33\xe5\xf1\x07\x91\x49\x38\x72\x86\x9b\xbc\xd4\x76\x43\x38\xf2\xea\xd3\x42\x2e\x71\x5a\xc8\xd6\x1c\x60\xc9\x8f\x97\x58\x77\xb2\x97\xb8\x94\x3f\xa9\x72\x93\x91\x1d\x0f\xab\x8b\x91\x1d\x0f\xb3\xcc\x30\xcb\x0c\xb3\xcc\xb6\x50\x96\x19\xb2\xe3\x21\x3b\x1e\x66\xf7\x60\x76\x0f\x66\xf7\x60\x76\xcf\xa6\xc8\xee\x69\xcc\xd7\xaf\x1c\xb8\xce\xf3\xf5\x23\xab\x1e\xe6\x41\x20\xab\xde\x45\x54\xe6\xfc\xd1\x7f\xd9\x4b\xbe\x92\x26\x77\x68\x95\x7e\x79\x37\xcc\xf9\x4c\x6b\xda\x8b\x43\xf6\xb8\xc0\x68\x1f\x70\xbd\x28\x34\x3e\x9c\xb6\x1e\x4c\xfc\x62\x96\x02\x7f\xd1\xcd\xb3\x55\x0c\xff\x14\x0f\xc5\xb4\x23\x5b\xd6\xa0\x68\xf9\xe9\x9a\xf3\xcf\xbe\xfb\xf2\x02\xcd\xdb\x00\xe2\x73\x1d\xf9\x99\xd4\x9f\xa5\xc8\x17\x53\x64\xe7\x9c\x1f\x3c\xe2\x7b\x34\x34\x3e\x93\xb2\xf2\xf2\x1f\x89\x9a\x0d\x6a\x3e\xe3\x7b\xb4\x37\xec\xe3\x68\x40\x83\x1b\x80\x59\xeb\x0b\xb4\xc3\x8c\xfc\x92\x5f\xf4\x0b\xcb\xa6\xb3\x04\x68\x8e\x5f\x66\x86\x9a\xfd\xa1\xd4\x0e\xb2\xdd\x09\x02\x67\xf9\x37\x53\x43\x64\x90\x0c\xd4\x2d\x81\x4c\x08\xe6\x38\x9f\xda\xf7\x52\xb7\x91\x7f\x97\x00\xd8\xd8\x12\x1c\x88\x96\x4b\xd4\xd8\x65\xec\x70\x22\x7f\xc1\xcd\x11\xf2\xd1\xcb\xc9\x80\x36\x12\x7d\x3a\xa2\x1e\x7c\x16\xaa\xa0\xf2\x81\x33\x67\xa6\x26\xbc\x42\x40\xc3\x70\xca\x89\xe6\x8d\xff\x74\xb9\xe5\x57\xfc\x16\x17\xd8\xb2\x65\x01\x30\xbd\x70\x2f\x67\x9d\xdc\x05\xea\xe5\x6d\x73\xc2\xcb\xf9\x0b\x4c\xfa\xe5\xa0\xc8\xcc\x9d\x28\x37\x2f\xca\x6c\xf9\xf9\x60\x23\xfa\xc1\x92\x13\xf0\x64\x4a\x95\x36\xc7\xae\x7e\x3e\xb5\x43\xfc\x99\x78\x31\x3f\xb4\x9b\xfc\x7c\x8a\xc8\x43\xc6\xdb\x53\x64\xb8\xae\x88\x6a\x3c\x98\x78\x80\x2c\xbf\xdc\x7a\x44\xfc\x21\x50\x9b\x50\x98\xc2\x42\x9f\xe4\xab\x82\x03\x6c\x92\xdc\xfd\x85\x1d\x33\x70\xe6\xe6\xdc\x9c\x32\x48\xf4\x67\xb1\xc9\xbb\xd2\x04\xb6\x3f\xe3\x6d\x69\xeb\x87\x55\x30\x02\xa4\xc0\xcc\x9c\x82\xe3\x7a\x62\x9f\x06\x61\xc0\xa7\x0e\x2e\x35\x88\x4c\xd5\xdc\x8c\xf2\x20\x43\x71\xd9\x74\xb9\xb1\x20\x2b\x8b\xb5\xda\xe3\xbc\x1b\x02\x2c\xa6\xe7\xa1\xe6\x7c\x8f\x99\xe7\xbc\x85\x92\xc5\x6e\x61\xa9\x28\x88\x03\x91\x87\x64\x11\xf2\xf4\xf1\x51\xf3\xae\x43\x07\xef\xe1\x91\x82\x50\x16\xae\x14\x5c\x4f\xbe\xd7\x9e\xc1\x1e\xdb\x84\xea\xa5\xb2\xa7\xcc\xdd\x7e\x6e\xac\xc0\x25\x70\xef\xca\x27\xe0\xc6\x8a\x78\x6c\x5b\x8f\x94\x7d\x78\x07\xd9\xc9\x2e\x3c\xc3\x96\xe7\x0b\x3b\xac\x77\xec\x98\x12\xff\xd2\x37\x24\xf6\x2c\x00\x69\xb2\xed\x3d\xa1\x2d\x41\xa4\x72\x55\xf1\x69\xc3\xc5\xc2\x83\xf2\x3d\x65\x73\xcc\xf9\x4c\x3a\x6c\x52\x3c\x1b\x74\xd8\xcc\x98\xe3\x4f\x3b\xb9\x68\xd8\x04\xc5\x2e\x6e\xc3\x84\x02\xef\x82\xb2\x63\xc5\x65\xdb\xcc\x98\x53\x01\x9d\x73\x9f\x8e\xcf\x9b\x75\x98\x63\xe6\x7b\x42\x86\x70\x7a\x09\xce\x31\xc3\x52\xd1\x8d\x98\x28\x41\x50\x27\xe5\x72\x77\x43\x62\x9a\x79\x98\x8d\x27\xbf\x14\x2a\xe1\xe3\xe5\xf8\x4f\x27\x74\x43\xdb\x1c\x49\x9e\x00\x6b\x31\x14\x51\x2c\x62\x2a\x9f\x99\x7b\xc6\xd2\x11\x83\x4b\xd4\xed\x21\xaa\x31\xd8\x63\x86\xb4\xe4\x04\x4e\xe4\x07\x6c\x54\xcd\x8c\x76\x88\xc9\xc5\x06\x1b\x01\x7f\x02\xe6\x41\x80\x67\xcd\xfe\x0c\x41\x55\xf2\x19\x0c\x2c\xb9\x21\x95\x4f\xe8\xcf\x99\x25\x21\x53\x62\xaa\x21\xd9\x08\xb6\x79\xca\x8f\x84\x1f\xe3\xce\xc5\xe8\x87\x7c\x0e\xf1\x22\x4a\xe2\x2b\x70\xc0\x58\xe7\xcb\xc0\x94\x07\x13\x17\xb8\x5e\x62\x7c\x80\xda\x14\xf2\xcc\x67\xdf\x4b\xed\x82\x6d\x0e\xce\xf9\xfe\xe0\xac\x13\xc8\x87\xa2\xa1\xfa\x69\x70\xd6\x79\x86\x83\x59\x0a\xd5\xe4\x57\xca\x13\x66\x9d\x67\xfa\x6c\x92\x31\x27\x24\x9e\x0f\xeb\x6b\x46\x20\xe9\xc3\xe6\x44\xf3\x75\x07\xf6\x3f\x73\x90\x89\x09\x07\x85\x82\x11\x0d\x9b\x92\x03\x87\xb0\x34\xd9\x06\x1a\xf1\xdd\x82\xef\xa8\xfc\x35\x51\xb5\x80\x99\x44\x02\x71\x1a\x7b\x6a\xe9\x7c\x15\x61\xff\x98\x12\xaf\x22\xe0\x2b\x98\x8b\x14\xda\xa1\xd9\xa4\xf2\x76\x3c\x4e\x20\x1a\xb0\x69\x9e\xaf\xfc\x64\xc5\x75\x09\x7c\xa5\xb6\x2c\x12\x1f\xee\x57\x76\x91\x63\x9a\x9a\x9d\x2b\xfa\x4b\x4c\x29\x05\x7e\x51\xe9\xd9\x13\xcc\xc8\xa1\x79\xd9\xca\xe8\x04\x5d\xa4\xc5\x51\x1d\xd0\x35\xde\xb1\xcb\xfa\xeb\x54\xd3\xd3\x94\x5f\xcd\x2d\xd3\xc8\x37\xe7\x1d\x2f\x1f\x87\x50\x35\x38\x31\x2c\x0b\xff\xc6\x17\x26\x16\x98\xa7\x4e\x3e\xcf\xde\x07\xfb\xc2\x97\xb8\xef\x4a\xc3\x61\x62\x66\xcc\x07\x98\x45\x10\x68\x03\x71\xab\x0d\x1c\x0b\x3e\x1b\xb3\xc8\xa6\xc3\x07\xa3\xf9\xa3\xec\xa2\x73\xec\x66\x5a\xad\x1f\xfb\xaa\x41\x3b\x26\xa7\x43\x9f\xce\x51\x9a\x8f\x01\x86\xa3\x89\x2d\xec\x67\x77\x90\x7f\xde\x4e\x5e\xc5\x5c\xff\x80\xe6\x47\x7d\x8f\xc7\x91\x73\xcb\x33\xf3\x4e\x40\x43\xe3\x4f\xb7\x1b\xdb\x5d\x2f\xba\x6b\x9f\xf5\xbb\xdb\xcf\xd7\x3b\xeb\xbc\xd9\x3b\x32\x3a\xd3\xa7\x50\x72\xa1\xc2\xe8\xd3\x34\x57\x06\xc1\xc1\x8d\x35\xb0\x56\xfa\xe4\x02\x84\xd2\x81\x2f\x31\x75\x7f\xae\xe6\xf3\xf3\xba\x54\xee\xee\xd0\xa7\x69\x99\xb7\xb2\x03\x3f\xb8\xe0\x2e\x32\xb7\xcc\x5d\xa0\xb6\x69\x8e\x8c\xce\xa8\x6a\x47\x87\xb9\xaf\x2e\xd4\x3a\xf2\x1b\xf1\x9a\x00\x15\xf0\xca\xc5\x8f\x23\xa6\xd5\x3b\x33\x7a\xa2\x0f\xaa\x04\xdc\x45\xa8\x12\x70\x16\x7c\x61\x2b\x68\x27\x0f\x88\x75\x56\xa4\xf9\x8a\x69\x82\x2e\x2c\x05\x3e\x5b\xdf\xec\xf1\xb9\x35\xe1\x06\xa6\x10\x60\xe2\x96\x21\x88\x50\xd8\xe3\xa5\xc0\xcf\x97\x73\x42\x7e\xb5\xce\xe6\xe0\x5c\xef\xc8\xe8\xd9\x3e\x73\x60\x60\xa0\x9e\xf8\x2a\xe4\x94\x2b\xc7\x72\x02\xd7\x9a\x5d\xca\xd6\x18\x75\x72\xf3\x3c\x39\x22\xf9\x04\xc3\x84\x98\xda\xff\x46\x46\xcf\xf6\x16\xfb\xcc\x23\x66\x8e\xba\xc5\x5e\x73\x66\xf4\x84\x99\x61\x32\x66\x3f\x0e\x9a\xbd\x66\x58\x5e\x78\xb4\x52\x04\x17\x1e\x87\x33\x2e\xf4\x99\x7d\x66\x1f\x21\xb3\x6e\xa1\x40\x03\x31\x57\x78\xbf\xf0\x8e\xa8\xe3\x29\xa0\x89\xbb\xaf\xea\x69\x23\xbd\x39\x63\x24\x56\x54\x89\x7a\x21\x6c\xa5\x7c\x83\xe0\xb1\x89\xa9\x13\x7d\x09\x48\x6a\x1e\x74\x99\xac\xc3\x50\x61\xa8\xbb\xf6\xda\x1f\x4a\xed\x22\xd2\x31\x21\xbf\x90\x22\x57\xc0\x0b\x9f\x16\x70\xab\xf1\xd6\x46\x16\x5c\x5d\xd5\x22\x2f\xb7\x1e\x3a\x9f\x18\xee\xbc\x66\x9c\x83\x0f\x1f\xf9\x66\xde\xaf\xf5\x8d\x32\x55\x2c\x0a\xab\xf9\xdb\x62\xbe\x8f\x5b\x98\x8f\x4c\xcf\x5f\x22\xff\x7b\x17\xb9\x73\x05\x7c\x23\xc6\x1f\xec\xb2\xde\x9e\xaa\xfa\xd9\x2c\x30\x0b\xdd\x13\x4b\x6b\xd6\x2d\x02\xe9\x8c\x6f\x2e\x38\x17\xaa\x34\xd8\xdc\x1c\x15\xb8\xb8\x1c\xc3\x1c\xf5\xbd\x88\x3e\x2d\x11\x1b\x59\x79\x14\x7b\xe2\xec\x2b\xe3\x78\xa3\xb0\x09\xd9\x77\x36\x56\x51\x04\xb5\x6f\x28\xe9\xd9\x7c\x6c\x07\xf9\x58\x9a\x10\xa7\xe4\x0a\x4f\xcc\xf8\x40\xda\xfa\xd9\xf4\xc8\xd4\x84\xf4\xba\x75\x5b\x58\x38\xcc\xcc\x14\xce\xcd\xd3\x05\x47\x29\x89\x80\x0a\xf8\x28\xce\xfd\x96\xd9\x1c\x31\xf6\x2a\x34\x25\x98\xa2\x81\x16\x52\x94\xa3\x29\xf7\x53\xc4\x98\xc0\xce\x63\xf6\x2a\xac\x1f\xee\x55\x6a\xf1\xca\x44\x9d\x3b\xb7\xe4\x6c\xf3\x24\xe0\xf1\xde\x9c\x3f\x1c\xf3\xe8\xb8\x91\x4c\x56\xc9\xf9\x0b\x0b\x65\xcf\x8d\x96\x07\x61\x19\xb9\xb3\xe5\xc8\x0f\xc2\xc1\x3c\xfb\x5e\x06\x43\xb7\x30\xe0\x04\xb9\x79\x37\xa2\xb9\xa8\x1c\x50\x89\x1a\x08\xbb\x39\xb4\x17\xf2\x0a\x3a\x08\xf5\x0d\xf0\x37\xd2\xa4\x0b\x92\x15\x3e\x98\xb6\x7e\x2e\xfd\xa0\xeb\xe5\x85\x51\x03\x87\x55\x88\x47\x48\x48\xfa\x3c\xd3\xe3\x90\x52\x2b\x30\x25\x90\xa2\x80\xe4\xd4\xa9\x61\x12\xb7\x76\xbd\x39\x55\x86\x25\xed\xfa\xd8\x15\x89\x23\x87\x61\x79\x16\x30\x05\x2d\xe9\xcd\x36\x47\x1d\x4f\xac\x6f\x81\x14\x32\x87\xcc\x1c\x75\x16\x68\x71\xd4\x09\x69\xc7\x25\x07\xb6\x05\xe4\x74\x24\x64\xf7\x85\x14\xd9\x29\x83\x1f\xc6\x27\x52\xab\x4a\x49\x39\x0d\x62\x3b\x49\x23\xc7\x7a\x2e\x35\x13\x39\x5e\xde\x09\xf2\x71\xe5\x9d\xaa\x1d\xeb\xf4\x23\xca\x1b\x91\xa7\x48\x17\x33\x54\x8c\xd9\x35\x90\xb2\x31\x53\xcb\xba\x95\x0d\x93\xf8\x02\x45\x7d\x18\xf5\xa0\xcd\x6b\xde\xfe\x5e\x6a\xb6\x79\x12\xcd\x51\xe3\xc8\x80\x99\x2c\x1b\x31\xcd\x7a\x85\x23\xa6\x59\x95\x4a\x43\x5e\xba\x96\xf4\x68\x73\x97\xa9\x38\x72\xf2\x22\x27\x0a\xac\x5c\xe3\xe7\xae\xb5\xfe\x47\x4a\xff\x45\xa1\x49\x2a\xf7\x4e\xa6\x2b\xf0\x80\x1a\x74\x46\x65\x2a\x43\x0c\x0b\x3f\x73\x43\x65\x4a\x70\x5c\x78\xd1\x59\xbf\x58\x5e\xa0\xa1\xf4\xe6\xf2\xcb\x9e\xb3\x20\xac\x60\x80\x79\xb8\x5a\xb2\x09\xd1\xef\xcc\x0b\xfd\x38\xd9\x82\x4a\xe8\x3c\xcc\x77\x6c\x0d\x83\x96\xf7\xe5\x13\x71\x72\x39\x3f\xc8\x8b\x26\xbf\x34\xca\xf1\x84\x13\xcf\x8c\x57\x19\x74\xc6\xb5\x9f\x4f\xed\x8e\xef\x1c\x24\x94\xea\x7f\xbd\x9a\xfc\x78\x8a\x90\xf8\x49\x8d\x67\x53\x16\x9d\x8a\x1f\x7c\xde\x2f\xe6\x6b\x4a\x83\x87\xbe\xd4\xa8\x5c\xd7\x4b\xbd\xc9\xf9\xee\x16\x85\x24\xa4\xea\x4d\xcc\xde\x7e\x6f\x2a\x99\x80\xaa\xcd\x8b\x7c\x27\x45\xae\xe6\x57\x67\xd9\xb6\xe8\x15\x4e\xfa\x79\x6a\x7c\x2d\x65\xfd\x51\xea\x6c\xe5\xcf\xda\xc6\xc9\x2c\xed\xca\x17\x31\x5a\x74\xdc\x85\x50\xb3\x7d\xb5\x97\x20\xd2\x9c\xca\x5e\xde\x36\x15\x40\x40\xa3\x7e\x33\x71\x93\x09\xc9\x13\xc1\x43\x26\x4c\x1f\x25\x43\x5a\x10\x0e\x9c\xf7\x21\xd1\x87\xf9\xb1\xa1\x9e\x79\x25\x61\xb3\x79\x2a\x46\x8d\x1b\xd8\xca\x1c\xa3\x84\x8f\x82\x7b\xdc\x1a\xf7\xb8\x4b\x52\x4f\xff\x6d\x8a\x5c\x0e\xe1\x6d\x91\xe2\x6c\x7c\x29\x65\x7d\x2c\x35\x56\x5b\xb7\xd4\x50\x46\x35\x3f\x41\x3d\x99\x45\x85\xec\x43\x6a\xea\x37\xea\x37\x01\xac\x78\xd4\x0a\x7c\xab\xdf\xb4\x42\x7f\x2e\xb2\x1e\x07\xd0\x84\x27\x4c\xc1\xb5\x03\xa6\x0a\xbd\x03\xce\x70\x56\xb0\xe8\x84\xee\x42\xa9\xb8\x6c\xce\x39\x6e\xd1\x74\xe7\x00\xd8\x02\x0d\x05\x57\x26\x20\xe3\x8a\x5c\xf4\x53\x44\xd7\x58\xc6\x51\x6b\xdf\x94\xa6\x6a\x92\x30\x36\x24\x3e\x4a\x98\x26\x3e\x2b\xf1\x51\xfd\x74\x8a\x5c\x11\xd0\x1c\x53\x05\xc2\xee\x7d\x43\xca\x7a\xba\x03\xe2\x83\x8f\x4f\xbb\x4f\x12\xa2\xe0\xd5\x33\x89\x99\x79\xe4\x5a\x40\x40\xf9\x9d\xc6\x9f\x2e\x39\x00\xfc\x1a\x67\xad\x07\x46\x6a\xfc\xce\xbe\xda\xa5\x50\xe5\x11\xd4\xd8\x13\xa0\xf0\x9b\xeb\x4e\xe6\x02\x39\x5e\x1e\xfc\x18\x11\xac\x20\x3f\xb2\x8d\x5c\x2d\x20\xd7\x33\x1c\xc9\x77\x69\x68\xfc\x53\xda\xfa\xe5\xf4\xb4\x9e\xe3\xe2\x31\xcd\x1a\xa9\x33\x44\x9c\x6b\xb1\xb5\xbd\xcd\x1c\x67\xae\xa2\x98\x44\xa9\x58\x2e\xb8\xb1\xe2\x62\xe6\x9e\xbf\xe4\x69\x39\x00\x2a\xa2\x20\x33\x41\x65\x04\x66\xc4\xe3\x99\x07\xa6\x98\xe9\xb2\x4c\x98\x39\x43\x83\x05\x99\x8d\xe2\x70\x2f\x25\x90\xd9\x7c\x6a\x34\x99\xb2\x03\x39\xa0\x6d\x57\xd8\xf1\xd2\x3d\x44\x0e\x90\xbb\x9b\xb6\xa8\xaf\xf5\x08\x68\x8e\xaf\xde\x1c\xff\x5e\x8a\x36\xb7\x26\xb3\xc6\x31\x65\x4d\x56\x24\x65\x4b\xab\x52\x37\xc2\x6a\x1a\x94\x6f\xdb\x4f\x0e\x6a\xef\xd3\xc9\x2f\xb8\x90\x57\x1c\xd0\x82\x1b\x46\x1c\xec\x53\xe6\xe5\xc9\x32\xdb\x72\xbd\xc2\x39\x3a\x3b\xef\xfb\x17\x8c\x3f\xb9\xcb\x7a\xa2\xe2\x37\xcd\xc6\x74\x3c\x53\x0d\x67\x2e\x89\xc3\x95\x5c\x11\x3c\x9c\xa9\x4a\x04\xd8\x07\x24\x5c\x69\x78\x3d\xcf\xa7\x20\x1b\xe1\xf9\xd4\xe5\xfc\x1d\x72\x10\x32\x61\xe3\xbd\xbc\x8f\xbc\x2b\x45\x12\xc7\x8d\x37\xa7\xc8\x64\xdd\x35\xdb\xf8\x19\xc5\x73\x8c\x6a\xc3\x59\x47\xf5\x7f\xa9\x4f\x5d\x00\x9f\x62\x05\x30\x8d\x1d\x27\x86\xb1\x21\x6c\x73\x5a\x60\xbc\xe4\xfd\x29\x72\x05\xdb\x26\xca\x81\xa0\x0e\x35\xde\x99\xb2\x7e\x34\x75\x5c\xff\x29\x31\x6e\xc2\xdc\x80\xec\x51\x6d\x6d\xc7\x52\x55\xab\x9c\xe9\x6a\x8e\xc0\xe6\x25\x3d\x06\xd5\x19\xe1\xcc\x09\x9e\x02\xef\x07\x26\xbb\x69\x05\xb4\x0c\xc7\x12\x7a\xfb\x2d\x3b\x89\x11\x50\xd7\x5b\xf4\x73\x1a\x97\x85\xf1\xdd\x1d\xd6\xb7\x76\x54\xff\x9e\x00\x77\x68\xcc\x51\x22\x5f\xba\x16\x74\x75\x00\x23\x5c\x28\x17\x23\xb7\x54\xe4\xc9\x2d\x80\xb7\xc7\x21\x30\x91\xbd\xa4\x3d\x24\x7b\x0c\xfe\x92\xcc\x91\xea\x27\xb3\x4e\xd1\x45\x1a\x58\xb0\x8e\xac\x89\xb9\x53\x94\xe6\x69\xde\xb2\x09\x81\xdf\x87\x79\x79\x8f\x98\x08\x6c\xdb\xe2\x73\x97\x53\xe1\x95\x01\xcc\x82\xf4\x72\x14\x72\x96\x1a\x4e\x81\x10\x79\x8f\x1a\x43\xc7\xc3\x3a\x91\x59\xa4\x4e\x18\x81\x55\xe0\xe4\xf3\xae\x88\xf8\x01\xe4\xa8\x3d\x6f\xc5\xfb\x54\xf7\x91\xd1\x1a\xa1\xfa\x66\x29\x80\x96\xf9\x05\x37\x12\x99\x3e\x32\x49\x8b\x29\x78\x0e\xfd\xc5\xc3\xf0\x3d\x29\xd4\x72\xb6\x64\x8e\xa2\x9c\x2d\xcf\x7b\x10\x6b\x5d\xec\x0c\xa1\xc8\x08\xe0\x2a\x97\x27\xe3\x67\x16\xca\x61\x94\x11\xb9\x66\x0b\x25\x3f\x02\x72\x44\x11\x29\x97\x1c\x82\x5a\x0a\x31\x5d\x56\x79\xc6\xc5\x65\x35\x5f\x1e\x85\x1a\x36\x33\x15\x18\xad\x26\x97\x78\x49\xa9\xfa\x03\x95\x1e\x07\xa0\x1a\xe0\x81\x10\xfe\x63\x22\xb5\xcd\x0c\x93\x50\x9d\x01\x62\x56\x45\x49\x9e\x98\xe4\x6f\x14\x96\x3f\x9f\x76\xbf\x14\x8a\xf4\x51\x6b\xdc\x99\x0d\x7e\x41\x46\x87\xd9\xbd\x97\x12\x92\xe3\xb9\x92\xb1\xd4\x04\xda\x1c\x50\x3f\xc8\xcb\x32\xa7\x05\xd7\x73\x17\xdc\x67\x68\x4b\x22\x60\xb7\x88\x7c\x65\x82\x6a\x15\x59\xfc\x95\x3a\xc5\xa2\xb9\x50\xd6\xc3\x49\xda\x94\x73\xfe\x42\x89\x19\x62\xfd\x30\x2f\x47\x8e\x22\x17\x50\x52\x1f\x8b\x92\xac\x8a\x64\x1b\xf1\x4d\x25\x54\xc2\x3b\xd3\xa4\x22\xaf\xc5\xf8\x8f\x69\x19\x11\xf9\xc7\xd4\x99\xc4\x21\x2d\x3e\x24\x73\xd8\xe2\x0c\x9c\x58\x33\xd8\xe6\x88\x5a\xa2\xf2\x9c\x12\xe0\x05\xfd\x89\x8f\x4b\x64\x31\x8a\xfc\x42\x5e\x73\x62\x0a\x2f\x7d\x64\x6a\x42\x3b\x0e\xc6\xb8\x0a\x06\x43\x90\x99\x2b\x59\xc5\x41\x74\x46\xbb\x57\x92\x46\x7c\x96\x46\x4b\x94\x7a\xe6\x10\xe8\x92\xbb\xf6\x4a\x5e\x49\xa5\x2d\x99\x68\xb4\x9f\x13\x08\xfa\x07\xbb\xc8\x2b\x95\x78\xa7\xe9\xa2\x4b\x97\x84\x53\x1b\x1a\x3f\xd9\x65\xfd\xcb\xb6\x91\xda\x07\x45\x58\x57\xae\x14\x19\x48\x2e\xa9\x24\xc8\xf3\x15\x17\x9e\x8f\xd3\xde\xd8\xe3\xc9\x7d\x97\x3e\x5d\xe2\xe9\x88\x4c\x1e\x7a\xda\x73\xc4\x93\x92\xa1\x40\x12\x32\x08\x65\xfa\xa1\xab\xa5\x3d\x8b\x70\x94\xca\x5d\xe5\x69\x8d\x9e\x16\xab\x57\x37\x8d\x33\x6a\x61\x00\x57\x14\x14\xc4\x06\xef\xec\xb2\x36\x87\x7e\xb5\xfc\x24\xa5\x15\xbc\x21\xb5\x10\xa4\xdb\x3e\x31\xc7\x79\x2e\x39\x93\x6a\xfc\xe2\xeb\x04\x1d\xd5\x0e\xa7\x32\x00\xbd\x7c\x1c\x4d\x96\x35\xa4\x8e\xb7\x1c\x9f\x71\xc1\x63\x76\xb9\xf8\xf4\xd9\x04\x67\xc4\x04\x79\xa6\x9b\x38\x90\x50\xe7\x30\x55\x00\x51\x12\x61\xcc\x5a\xab\x4a\x5b\x21\xe7\x1f\xed\x11\xc6\x44\xcf\xe3\xe7\x1b\xb9\x81\x7f\x95\x22\x60\xdd\x18\x7f\x91\xb2\x3e\x97\x3a\x53\x01\x86\x55\x7d\xab\xb6\x79\x8a\x1d\x8e\x37\xd3\xb9\x32\xf3\x4c\x5e\x5b\x76\x8a\x22\xe5\x83\xf9\xb2\xfd\xa6\xbb\xe0\x14\xa8\x98\x57\x22\x2d\xaf\x5f\xb8\x39\x96\x76\x86\x25\x2b\x71\xf5\x5b\x8b\x1b\x72\x10\x24\x31\x44\xad\xb3\xfd\xa0\xe0\x78\xee\x33\x62\x87\x96\x36\x4f\x42\x77\xfc\xf9\x2e\x72\xb5\x02\xfe\xa4\x8f\x60\x7c\x72\xd7\xaa\xd8\xcb\x4f\xe8\xc5\x05\xd6\xb3\xbb\x4e\x55\x0e\x2c\x18\xcd\x34\x63\xc4\x07\x06\x5c\xfd\xfd\xfa\x9e\xa6\x54\x95\xbe\xd0\x1d\x4f\x35\x5f\xb1\x58\x1d\x49\x50\xa7\xf2\x19\x78\x81\x01\xbf\xa7\x2a\x4d\x12\xe7\xb8\x51\x48\x8b\x73\x71\xdf\x03\x18\x89\x2b\x35\x3d\x45\x41\x91\xbb\xb1\xbb\x8b\x6f\x41\xc1\x2f\xb2\xcc\xa2\x62\xe4\x50\x15\x22\xe5\x8a\xe5\x90\xa9\x4f\xa8\xec\x8e\x53\x04\x21\x29\xc3\x63\x1a\xdc\x0c\x2f\xb8\xa5\x50\x7f\x70\x9b\x90\x64\xf1\x73\x1d\xd1\x2c\xab\x4d\x7d\x69\x1e\x72\xf4\x95\x34\x64\xb6\x47\x45\x1f\x07\x2b\x28\x7b\x10\xee\xb4\xd8\xba\xb0\xf6\x5a\x4c\x3f\x5b\x43\xd6\x61\xd3\x5c\xf6\xcb\xc9\x0a\x0c\x55\x7a\x02\x65\xe8\xec\x53\x0e\x87\x4d\xab\x6a\x81\x58\xc3\xe6\xeb\x88\x69\x5a\x20\xb0\xf1\xa7\x99\xf3\x07\x9f\xb2\x35\x6c\x3e\x0a\x31\xd9\xd7\x89\xc8\xac\x75\x81\x2e\x5b\xc3\xda\x14\xfa\xe5\x01\xee\x58\xc0\x50\xd6\x29\x3f\x9a\xf0\xe2\x43\xdc\x84\x54\x63\xc1\x6f\x7b\xd5\x61\x93\xcd\x5d\xfc\xfd\x38\xfc\xff\xeb\x09\xfb\xeb\xf5\xcc\xfe\x93\xbb\x26\x7f\x32\x87\xe7\x9f\x81\xd7\xbd\x72\x59\xd6\xea\x87\x61\x51\x6f\xd1\x0d\x7c\x6f\x81\x7a\x11\x17\x67\x29\xf0\xf3\x5c\xa2\x61\xe4\x14\x5c\xaf\x60\x1d\x5e\x3f\xb1\xea\xb3\xa9\x29\xd9\x66\x62\x85\xd9\x6b\x92\x95\xcf\x50\x5b\xbe\x4d\xd2\x8a\x55\xfa\xb0\xbf\x48\x03\xb6\x21\x0e\x2e\xf9\xc1\x05\xd7\x2b\x0c\x30\xf9\x0d\x08\x51\x0f\xf2\x8f\x67\x10\x3e\x5e\xb0\xf1\xc5\x92\x0f\x55\x9a\x96\x92\x97\x96\x6a\x2c\xd5\x3b\x07\x66\x12\x9a\x46\xe6\x6c\xc8\xaf\x5f\x2b\x78\x22\xdf\xdc\x4e\x5e\xc1\x6f\xac\x94\xdb\x1f\x6f\x6f\x83\x72\xfb\xf1\xed\xa7\x13\xa3\xb6\xa2\xd9\x94\x32\x4b\x7a\x10\xf3\x8e\x96\xea\x29\x35\x4b\x72\xce\x50\x0b\xc3\x3d\x10\x2d\x13\x72\xd6\x17\xab\xd2\x2f\xe6\x4f\x0b\x23\xd4\xcb\x9b\x1e\x5d\x3a\xad\x17\x2e\xc4\x45\xbb\x5e\x54\xb1\x99\xf2\x7d\xc4\x0d\x79\x1d\x47\x6c\x11\xf3\xca\x98\x39\x93\x72\x5a\xcb\x86\xda\x75\xc4\xf4\xca\xc5\xa2\x3c\xa7\x37\x9e\x8b\xb0\x62\x72\x0e\xcf\x85\xe0\x80\x25\x54\xae\xc4\x33\xac\x38\x87\x97\xb4\xf0\x72\x56\x2f\x51\x7e\x91\xe3\x60\xd0\xbc\xb3\x48\x65\x2a\x5f\x6f\xd1\xbd\xc0\xac\xe8\x31\x5a\x2a\xfa\xcb\xec\x1b\x98\xf6\x8b\xc5\x59\x27\x77\x81\x97\xc3\x4e\xf9\x79\x9d\x9c\x43\x0c\xd7\x27\xd5\x64\x8d\x47\xb6\xcd\x87\xc2\x24\x49\xb9\x94\x3f\xa8\x10\x37\xb1\x07\x9b\xdc\xab\x18\x70\xbd\x7e\x73\x96\xe6\x1c\x66\xc6\x51\x2f\xcf\xcc\x39\x81\x7e\x31\x15\x5f\xdb\x60\xe0\xd8\x5f\xa4\x90\xb5\x8a\xb2\xbd\x55\xae\xf5\xbf\xeb\x22\xdb\x83\x72\x91\x86\xc6\xd7\xbb\xac\x1f\xee\x9a\x66\x7f\x6a\xa8\x0f\x64\x77\x68\x78\x4e\x65\xd1\x46\x38\x18\x96\x67\x63\xf4\x27\x69\xe5\x07\xcc\xb4\x9b\xf5\xcb\x11\x37\xd3\x6b\x1c\x80\x37\x16\x13\x4a\xcc\x41\xa9\xaf\x98\xe5\x93\x8e\xb7\xfc\xa4\xc9\x26\x64\x9b\x0f\xf8\x4b\x14\xac\x3b\x57\xd8\xd6\xdc\x5f\xe5\xa5\x71\x67\x95\x47\xa4\x6c\x6b\xe5\x0a\xb3\xa5\x2a\x31\xad\xea\xa3\x80\xc1\x94\xca\xb1\x4c\xe5\x16\xcc\x71\x83\x08\x40\x20\x10\x5d\x4e\xe1\x8a\x01\x85\x84\x70\x99\x04\x2c\x8b\xb1\xa4\x9b\x56\x5c\x86\x3c\xe1\xd9\xa2\xca\xc1\x06\xef\xbd\x7f\x0d\xb3\x04\x3f\x16\xf6\x7f\x81\x46\xf8\x3a\x14\x97\xc8\xac\x8b\x6f\x22\x81\xaf\x84\xc5\xad\xdf\xa7\xe6\x09\xb2\x0a\x4a\x37\x73\x4f\x92\x07\xc9\xc4\x2a\xe1\x37\xf6\xf6\xce\xb9\xd1\xfc\x69\xb5\x82\xc8\x9f\x6c\x23\xbb\xd9\x37\x34\x0e\xa9\x38\xa1\xf1\xa9\x6d\xd6\x6f\x6e\x9b\x89\x7f\xe0\x62\xaf\x03\x3d\x31\xb5\xc7\x2e\x36\x29\x3f\xb9\x2e\x07\xed\x43\x1e\x38\x08\xfd\xe6\x29\xdf\xa3\xfd\xe6\x8c\xbf\x40\xf9\xdf\xa7\xbd\x31\xde\xba\x41\x89\x17\xf6\x69\x7d\x50\xf3\xe4\x43\x33\x67\x34\xf6\x09\x07\xde\xb9\x97\x73\x8b\xae\x70\x5c\x80\x21\xa6\xdf\x0c\x81\x9f\xa0\xaa\x5a\x89\xc7\x18\xb9\xeb\xe4\x98\x73\x65\x60\x66\x08\x23\x5a\x92\x9a\x2b\x7e\x79\xb9\x79\xc7\xf5\x62\xee\x00\x7d\x12\x10\x26\x80\x06\x08\x5e\x8c\x5c\x94\xbd\x3c\xc0\x25\xd3\x8a\x5a\x43\x1a\x19\xa2\x21\x85\x13\x71\xd4\x3a\xae\x2a\x74\xca\x91\x3f\xa0\xa6\xc4\xd5\xd1\xb2\xd0\xd6\x8e\xe6\x20\x09\x21\xc8\xb7\x70\xe4\x88\x94\x21\x53\x8c\x4c\x7e\x49\x94\x51\x1c\x4c\x06\xae\x76\x90\xdd\x30\xb0\xc0\x17\x7f\x78\x87\xf5\xdd\xcb\xb4\x1f\x92\x58\x2b\xb3\x8c\x40\xf3\x58\xdc\xe5\x14\xf1\x6a\x6d\x2f\xa9\xcc\x71\xaf\x0d\x18\x42\x82\x2e\xb7\xa6\xc6\x5f\x5b\x76\x17\x9d\x22\xb3\x6c\x6c\x42\x06\x64\xf6\xb9\x7c\x58\xf9\x9e\xa4\x62\x76\x23\x05\x42\x49\xa5\xe3\x68\x0e\x71\x00\xaa\x27\x61\x5f\xbb\x6c\xbb\x91\x3b\x87\x8a\x2c\x29\xf0\x6e\xd1\x75\x4c\xa7\x54\x0a\x07\x17\x87\xfa\xe5\x1f\xf0\x21\x88\xfa\x46\x55\x29\x11\xff\x3e\x5b\x8e\x94\x18\xf8\xbc\x64\xbd\xe5\x79\xa7\xe4\x02\xe3\x65\x38\xfc\xa8\xc5\x06\xb3\x1e\x67\x83\xca\x60\x3b\xfb\x75\x71\x88\xfd\xa6\xd4\xef\xb0\xf9\xa8\xa5\x4d\xcf\x7a\xfc\x7c\xbf\xf6\xd4\x91\x9f\x98\x12\x34\x68\xab\x9a\x90\xd8\xf9\x85\xae\xab\xb1\xfb\x73\xb9\x2a\x39\x57\x0b\xd7\x9d\x93\xf2\xe0\x85\x7c\x22\xf2\x53\xe4\x48\x80\xeb\x81\x58\xc3\x7e\x28\x6b\xe6\x02\x13\xee\x0f\x73\xe2\x21\xfc\xc1\x66\x26\xfc\xfc\x8e\x8b\x1f\x60\xe6\x4d\x27\xfe\x59\x2a\x33\x1d\x44\x0a\x22\xbf\x10\x66\x5b\xfb\x9d\x24\xe0\x3e\xfe\x45\xe8\x5f\xe6\xff\xbc\x2c\x91\x68\xc9\x74\x42\x98\x73\xd8\x1e\x65\x2f\xee\x63\x77\xde\x67\x9f\xa4\x51\xe0\xe6\xce\x38\x41\x81\x46\xc6\x87\x2f\xb3\x1c\xfd\x87\x44\x26\x47\xc4\x7f\x92\x99\x15\x8b\x14\x22\xba\xe2\x9f\xcc\x8a\x12\xbf\x94\x23\xb7\x28\x80\x04\x11\x0b\x90\xe4\x39\x0b\x30\xf4\xf3\xa9\xae\x68\xb9\x44\x13\xe1\x9f\x8f\x6e\x27\x3f\x93\x26\x86\x18\xe2\xa1\x78\x04\xe3\x59\x85\x4e\xfe\x6d\xaa\xfa\xb0\x44\x32\xf4\xb9\xc5\x35\xe5\x7c\x3e\xaa\xcf\x8e\x58\x92\x7c\x16\xa6\x93\x0b\x7c\x1e\x8b\x36\x03\x5a\xa4\x8b\xcc\x01\x2c\xf9\xf9\xb0\x3f\x8e\x50\x32\xfb\x39\xe4\x88\x56\x8e\x7a\x51\x62\x30\x59\x92\x9f\xb8\xa5\xba\x87\x4a\x3e\xf2\xf3\xa1\x5e\x65\x04\x6b\x8d\x93\xb8\xb0\x53\xa6\x2b\xe6\x24\xc3\xa5\x20\x1e\x0d\x96\xfc\x70\x8a\x5c\x2e\x1e\x07\x1a\xdf\x18\xbf\x90\x5a\x01\x99\xae\x2d\xa7\x65\xbf\xba\xec\x40\x6f\x46\x6b\x4e\x1f\x6d\x05\x42\x6c\x22\x3b\xb3\x17\xe4\xf5\x5a\x71\x9b\x3e\x92\x23\xf0\xae\x8d\x47\xad\x53\x90\x02\x11\x07\x7f\x13\x28\x8d\x18\x16\x4e\x71\x43\x53\x7b\xc1\xfd\xbc\xd3\x0f\x2c\xb0\x11\x6d\xce\xfa\x2a\xff\xc1\x14\xd9\x0e\x93\x36\x96\xd6\x26\x95\x83\x8b\xcd\xc4\x21\x26\x5a\xf1\x98\x36\x79\x76\x27\x39\xa2\x07\xd4\x69\x20\xca\x34\x69\x5c\x1d\x37\x1a\xff\x38\xe3\x16\x3c\xd7\x2b\x88\xed\xfc\x84\x1b\x46\xc6\xc7\x77\x3c\x9f\xda\x0e\x0c\x10\x89\x0f\xe3\x97\x2f\xc3\x64\xab\xb5\x26\x5b\x39\x84\x0b\xd6\x78\x58\xb7\x6f\x1f\x24\x13\xe4\xfe\xfa\x29\x11\x2b\x7a\x83\x98\x24\xb1\x86\x9c\xe5\x09\x2d\x15\x6e\x75\x1c\xe1\xec\x03\x3a\x49\x23\xe7\x7b\xa9\xd7\x35\xcf\xb7\x78\xd8\x38\xab\xf2\x2d\x12\x6f\xb9\x22\xe9\xa2\xe1\xe7\x5a\x33\x0b\xe3\x1f\x77\x92\x9b\xf5\x02\x59\x20\xbe\x83\xca\x6a\xf6\x17\x7c\xe5\x9f\xdb\x69\xdd\xa1\xfe\x25\x0b\x87\x78\xac\x08\x7e\x56\x9e\x58\x4d\x65\xf0\x5f\xb0\xba\x60\xcd\xca\x60\x4e\x2a\x83\xc7\x2d\x8b\x53\xf0\x24\xde\x82\x14\x55\x0d\x87\xb8\x9f\x64\x48\x6f\xfd\x72\xe8\xe4\xdb\x46\x8d\xb0\x06\x8d\xf0\xa2\x9e\x1d\xfb\xb1\x95\x98\x3a\xd5\x3a\xc1\xfa\x01\x95\x1a\x9b\xa0\x86\x5a\xb7\xb4\xd8\xef\xa5\x5a\xe3\xe5\x54\x4a\x49\xac\xa4\x0a\x75\xa4\xb4\x46\x52\xf5\x10\xf2\x5c\x17\xd9\xd7\x32\xdf\xc0\x74\xb9\x28\x2c\xc8\xcf\x6e\xb3\xfe\x73\xba\xd6\x91\xe4\xf7\xc0\x24\x13\x43\xec\x26\x2c\x2b\x91\xcc\x2f\x58\x02\x42\x58\x33\xbc\x98\x0d\xfc\xb5\x61\x49\xb9\x7b\xef\xbc\x1f\x46\xf7\x0d\xde\x5b\x72\xa2\xf9\xfb\x8e\xde\xcb\xa9\x71\x4b\x4e\x10\xdd\x67\x0e\xdc\x27\x2f\x17\xd1\x4c\xfe\x5f\x76\x30\x94\xb6\x56\x39\x28\x9a\x39\x3f\x90\xf4\x7a\x91\xaf\xaa\xe8\xfb\xa5\x1e\x13\xdf\x89\xde\x75\x31\xc6\x11\x24\xf4\x1d\xc3\x9e\x5a\x2a\x0d\x94\x43\xf7\x0c\xf6\x88\xc0\xb0\xea\x38\xc9\x83\xeb\x3d\x47\x7b\x98\xc1\xd9\x73\x5b\x0f\x53\xc4\x50\x7f\x9f\x50\xc4\x3f\x96\x22\x6f\x4a\x11\x7e\xc0\x78\x36\x65\xed\x1f\xa9\xe8\x39\xcd\x4b\xf6\x45\xd1\x62\x29\xc1\x97\xad\xc4\xa6\xeb\x96\x86\xdd\x31\x5a\xe0\x90\x20\x9f\xd8\x45\x06\x1b\x94\x94\xf0\x24\xd1\x91\x28\x72\x72\xf3\x0b\x72\x33\xfa\xf1\x5d\xd6\x48\xad\x03\x7c\x05\x24\x1f\xa8\xf2\xbc\xc6\x1b\xd5\x27\x71\xa3\x5a\xf3\x46\xf5\x3a\xb9\x51\x05\xd6\x9e\x09\xb9\x51\x45\x1a\xb3\x5f\xe5\x2b\x09\xf5\x05\xd5\xb8\xc3\x7b\xb3\xe5\x81\xbb\xd7\x1a\x76\xaf\xcf\xe8\xbb\xd7\x47\xd7\xb8\x7b\xfd\x5f\xb5\x77\xaf\xf5\xdb\xbc\x16\x9a\x6f\x5e\x93\xc6\x03\x03\x62\x97\xaa\xa5\x4e\x6a\xd9\xca\x66\x9d\x8c\x67\xf2\x86\x2b\xc8\x3d\x4d\x10\xab\x07\xfc\xc0\x7d\xc6\xf7\x22\xa7\x38\xe5\xe7\x47\xc4\x09\x34\x30\x3e\x73\xb9\xf5\x52\xaa\xce\x41\xf9\xed\x24\x33\x91\x78\xc1\xdc\xbc\xba\x84\x57\xfc\xaa\x8b\x64\xfc\x8c\xfd\xb2\xe0\x48\x96\x88\x05\xc7\x83\x16\xcc\x1c\xef\x01\xf2\x70\x33\x27\x2b\x4b\x1c\x2f\xe6\x62\x8a\x43\x09\xf2\xfb\x80\x61\x4d\x2d\x72\x96\xcc\x71\xe3\xe0\x82\x96\x9b\x95\xac\x2d\xfe\xde\x2e\x54\xaa\x58\x5b\xbc\x71\x7a\xed\x2b\xba\x5e\x7b\x79\xcd\x35\x6b\x3f\x9e\x52\xca\x4c\x7c\x9c\x61\xb2\x86\x6d\x03\x2a\xd8\xfe\x7d\x5a\x94\x1a\xff\x6b\x8a\x3c\x50\x3f\xf6\xd9\xba\x52\x82\xb2\xe3\xf7\xa7\xa0\xee\x58\x3e\xa5\x5e\x40\xa4\x80\xe2\x59\x3a\xef\x2c\xba\x7e\x39\x50\xc0\xab\x1a\xa3\xe3\x8f\xcf\x66\x34\xe0\x78\xf9\x81\x10\x9a\xf2\xd9\xe4\x2d\x29\xf6\xd2\xd9\xdf\xc6\x1b\x1a\xd6\x60\xac\x40\x10\x30\x9c\x75\x80\x0f\xab\xd4\xb1\xe0\xc5\xd0\x1b\xdd\xf1\x04\x81\x0a\x11\xac\x74\x1f\xaa\x33\x8b\xc4\x56\xc4\x27\x1c\x6f\x45\xda\xd3\x10\xf2\xf7\x69\x72\x6b\x8d\x0a\x29\x5e\x42\x32\x31\x25\x2a\x54\x3e\x93\xb6\x5e\x9d\xfc\x49\x47\xb8\xab\x36\x1c\x70\x6d\xf8\xf9\xe6\xc4\x94\x50\xfe\xa2\x05\x80\xe9\xcc\x81\x8c\x97\x93\x4a\xff\xaf\x53\xe4\x1b\xa9\xaa\x94\xed\x2f\xa4\x64\x50\xe4\xb7\x52\x51\xa3\x94\x6d\xd9\x53\x5f\xdd\x77\x62\x8a\xa3\xec\xf2\xae\x61\xe4\xe6\x2e\x2c\x0b\x96\x99\xb8\x61\xbe\x4c\xa6\xbe\x6f\xaf\xb9\x67\x8f\x79\xef\x91\x83\xf7\xec\xdf\xbb\xb7\x97\x2d\xd6\x21\x33\xef\x2c\xf7\x99\xae\xea\xf7\x33\x22\xe6\x6d\x1e\x39\x62\x5a\xf2\x26\x56\x9c\x9f\xa2\x30\xf5\xa1\xbd\x07\xc5\x18\x77\x99\xf3\x7e\x39\x08\xfb\x92\xb9\xd7\x5f\xd9\x55\x53\xe4\x9c\x91\xc4\xf1\x0a\x14\x9c\x96\xdf\xdc\x65\x0d\x24\x7f\x4a\x3a\xac\xf1\x31\xce\xa7\x5c\xdb\x41\x79\xc7\x4e\xdc\x4b\xd7\xba\x97\x7e\x3a\x25\x3d\x94\xdf\x4e\x59\x3f\x98\x9a\xa8\x06\xd3\xb4\x77\xa1\x78\x76\x6b\xcc\xb4\x51\x7a\x60\xe2\xfb\x19\xe4\xb6\xd7\x80\x9a\xce\x80\xa2\x66\x09\x07\x75\xef\x67\x90\x0c\x90\x3b\x9b\x96\x3b\xc6\xd3\x43\xbb\x60\x0d\x76\xc1\x1f\xea\x76\xc1\x7f\x5b\xa3\xbf\xf3\x6c\x6a\x83\xe0\x3a\xed\xe1\xbe\x97\x3a\xdb\x7c\xb3\xb9\xcb\x18\x92\x9b\x4d\x52\x1d\x55\xe0\x73\x6a\x7b\xb1\xac\x2a\xac\xce\xa3\x91\xc8\x7a\x45\xac\xee\x52\xc1\xea\x1a\xbf\x53\xc0\xea\xfe\xdf\x14\x39\x1d\x5f\x11\x83\x7b\x03\xaa\x41\x4f\xfc\x81\x24\x4e\x60\x5f\xca\xe4\xcc\xe9\x53\x33\xb0\x6f\x4c\x05\x7e\x29\x3c\x1d\x8c\xb0\x59\x19\x3f\x93\xb2\x7e\x2a\x55\xfb\x98\x6e\x92\x54\x35\x43\x10\x59\xbb\xc0\x2a\x57\x71\xb9\xc8\xac\x85\xa7\x66\x72\xaa\x38\x6c\x9b\x27\x1d\x17\x6a\xd4\x29\x67\x9a\x35\x43\x1a\xb8\x8e\xca\xcc\x28\x95\x83\x92\x1f\xd2\xd0\x26\xbf\x7a\x39\x19\x6e\x68\x35\x0e\x35\x32\x9f\x8d\x7f\xd8\x6d\x3d\xd9\xe0\x78\x05\x1f\x4e\x9e\x86\x40\xc8\x38\x57\xf6\x72\xbc\x24\x0f\xa8\x9d\xf9\x32\xad\x33\x8c\xfd\x7c\xea\x15\xf0\x17\xcf\x7a\x99\xa6\x73\xcf\xa7\x76\x2f\x38\x4f\x4f\x73\xf7\x3e\xb9\x9e\xfe\x07\x21\x1f\xee\x22\x3b\x84\xb3\x6e\xbc\xb7\xcb\xfa\xcb\x6d\xd2\x73\x17\xdb\x51\x0d\x43\x3f\xd4\x68\x78\x44\x1d\x57\xe4\xc7\xb4\xef\x89\xa9\x27\x51\x85\x5e\x5e\x6d\xc2\x3b\x6a\x24\x0f\x69\x69\x18\x72\x06\xfa\xe7\xd5\x67\x9b\x60\xd2\xd5\x1e\xd7\x4d\xb0\xce\x8b\x5a\xde\x65\xb9\xb5\xc1\x86\xab\x6a\xe9\xaa\xd2\x21\x54\x1b\x06\x61\xc0\xf3\x5f\x05\x3d\xaa\xfc\x31\xae\x8f\xe4\x69\x30\xe6\x78\x50\xf0\xfb\xd5\x54\x41\x03\x80\xa1\x99\xa7\xb9\x80\x3a\x21\x15\xfd\x64\x00\x86\x51\x93\x74\x3d\x7e\x30\xcf\x2d\x21\x66\x72\x82\x26\x76\x6c\x13\xb8\xcb\x79\x61\x2c\x10\x1b\x96\x9d\x62\x8d\x4c\x9a\x30\xae\x1e\xa8\xf6\x33\xe6\xfd\x25\xce\x19\x28\x19\x3a\xc4\xf5\xb2\xf1\x10\x53\x67\x89\xaf\xff\x30\x39\x44\x0e\xac\xc0\x23\x1a\x12\x19\x56\x6c\xa9\x92\x7f\x48\x93\xdd\x0b\xae\x27\xd7\x95\xf1\x35\x95\xdc\xf4\xd9\xb4\xf6\xbb\x42\x7a\xfd\x25\x68\x54\xb4\xe0\xc6\xb4\xe7\x3a\x37\xa2\x38\x3b\x41\x9b\x1c\x3b\x4d\xf0\x7d\x73\xc8\x29\xef\x2f\x79\xb6\x69\x4e\x44\x92\x43\x10\x2e\x1a\x62\x82\xb6\x4d\xb3\xe2\xce\xb2\x2a\x8f\xa7\x9e\xee\x95\xa9\xec\x4e\xb1\x34\xef\x24\x3b\x8e\x3d\x30\x35\x32\x03\x9f\x8d\xff\x08\xf4\x2f\x09\x55\xb7\x31\x68\x57\xa9\x17\x4f\x8b\x1c\x7e\x20\x56\x15\xe6\xad\x90\x34\xaf\x28\xe0\x34\x9b\x6c\x36\x33\x5c\x7c\x30\x93\x1c\x90\x5c\x6a\xdd\x86\x12\x63\x8a\x01\x94\x83\x11\xb7\xf3\x48\xf8\x15\xff\x21\x4d\x2a\xbe\x6e\xe3\x1f\x57\xe6\xde\x0f\xd9\xa3\xec\x43\x13\xae\x01\x7f\x94\x69\xc9\x63\x6d\xfd\x62\x2a\x39\x3a\xdf\x78\x95\x55\x2f\xbe\x9b\xd8\x4c\xf4\xf9\x4b\x51\xf5\x14\x72\x27\x94\xf9\x62\x9a\xa6\x50\x88\xa0\xc6\x77\x0e\x5b\x17\x7c\x0d\xa1\xb9\x44\x8b\x45\x93\xaf\x00\x27\x17\x95\x01\x9f\x14\xbd\xbc\xaa\xe0\x49\x9b\xbc\x90\x22\xba\x56\x33\x7e\x46\x79\x91\xcf\xa5\xb4\xdf\xe5\xea\x2b\x97\x4a\xed\x59\x7d\xe5\x12\x90\xc5\xc6\xb9\xf4\xc0\xbe\xcf\x37\xdc\x78\xe9\x25\x5f\xda\x5f\x5d\x49\x6e\xac\xe5\x7f\xc3\x4a\x39\xe9\x94\x8c\xdf\xbf\xd2\x3a\xa0\xfe\x25\x18\xc0\x92\xe8\x2e\x40\x4b\x40\xc2\xcc\x84\x0a\x1c\x0f\xc0\x15\x9f\x74\xb0\xdf\xfb\x0a\xf4\x04\xd7\xea\x09\xbe\x7d\x1b\x21\xb3\xae\xe7\x04\xcb\x63\xcc\x07\xf8\x8f\xdb\xac\x6f\xa4\xb3\xea\xdf\xc9\x5d\x91\x9f\x67\x72\x9b\x1e\x28\x78\x2e\xd0\x65\xae\x6e\x45\x83\x18\x10\x1e\x53\x36\x5e\x79\x81\xb2\x0f\x3c\xe6\x62\xef\x37\x7b\x06\x7a\xfa\xcd\x9e\x27\xb9\xc9\x67\xf7\xd8\xa6\x7e\x1f\x8d\xbf\x7d\x76\x39\xa2\x26\x6f\x58\x93\xa3\x1a\x8f\x31\x2f\x2c\x86\x99\x3c\x74\xe6\xf8\xc0\x41\x33\x60\x5f\x0b\x87\x3e\xa0\x89\x59\x18\x41\x59\xba\xeb\xe9\x23\xc3\xf4\xd8\xa5\xfe\x22\x0d\x8a\x4e\x29\xce\xbf\x17\x2d\x0b\xe0\x6f\x38\x15\xf8\x7b\xfa\x55\x33\x47\xc9\x2c\x28\x1b\xc9\x68\x75\xd4\x82\x77\xc1\x36\x1f\x0a\xf9\xae\xab\xd8\x7f\x44\x93\x24\x28\xc1\x35\x87\xec\xa1\xbd\x77\xc6\xbd\x1b\x55\x35\x6f\x91\x46\xf6\x7b\x53\xd7\x90\xab\x8d\x2e\xf6\xb0\x75\x48\xea\xfe\x30\x4d\xba\xc0\x31\xfb\xfd\xb4\xf5\x9e\x74\xf5\xeb\xa8\xfe\x64\xda\xf2\x56\xce\xf2\x44\x7d\x10\x93\xe7\x7b\x03\x5c\xd6\x15\x2f\x05\x86\x97\x1d\xaa\x34\x69\x8b\x3e\x7e\x35\x5e\x49\x93\x97\x01\x67\x8b\x97\x51\x39\xde\x8a\x5e\x49\x23\xda\xbf\xbf\x4e\x91\x5d\xee\xc2\x42\x19\x8a\x4f\x8c\x3f\x4b\x59\x2f\xa6\x26\xe4\x3f\x21\x55\x1d\x0a\x39\x7d\xe8\x14\xd5\x9f\xec\x6d\x04\x1a\x29\x7e\x1a\x36\xcd\x58\x85\xe5\x2a\xbd\x7b\xb3\x17\xb2\x85\x2b\xc0\xf0\xca\xec\xf7\x3e\x51\xe3\x1f\x25\xef\xcb\xfd\x23\xb6\x98\x2a\xb3\xe5\x45\x47\x53\x8e\xf8\x09\x90\x8e\x6f\x40\x9e\x5b\xb4\x13\x3c\x5c\x88\x8a\x20\x13\x6b\x32\x1a\xfc\xea\xe6\xc0\x88\x6d\xf4\x0f\x68\x98\x87\x4a\xa7\x94\x0b\xbd\x32\x7f\xe9\xed\x97\x91\x4c\x8d\x9d\xfe\x94\x9f\x57\xb5\xce\x82\x8d\x60\x81\x7a\x91\xf1\xcd\xed\xd6\xd2\x08\x67\x7e\x53\x15\x97\x41\x7c\x5c\x2c\x50\xd5\x15\x11\xfc\x5c\xa9\xf0\xf8\x1e\xd9\x6f\x3a\xbc\x11\x27\x18\xa9\xb2\x10\x51\x9e\x1c\xd0\xa2\x62\xed\x63\x3a\x10\x7c\x0e\xbe\xb7\x3e\x9f\xda\x76\x81\x2e\x3f\x9f\xda\x29\xaf\x48\x18\x12\x9f\xeb\x22\x47\x09\x3b\xc1\x38\x68\xdd\x79\x46\x16\x6a\xc2\x18\xaa\x2f\x5c\x5c\xdb\x1d\x93\x63\xe9\x8b\xe6\x47\x53\x44\x0d\x6e\xfc\x50\xca\x8a\xa6\x75\xd7\xfd\x02\x5d\xee\x09\xf9\x04\xd9\x9b\x99\x77\x4b\xa2\xf3\x36\xef\x4b\x28\x2d\x00\x28\x06\x54\x4f\x25\x88\xa3\xa0\x24\x2e\x62\xff\x37\xfe\xb4\x1b\x46\x61\xbf\x39\xe6\xd3\xf0\x94\x1f\xc1\x3f\x6d\xf3\x7e\xd1\x8b\xed\x44\x94\x98\xd0\xbf\xa6\xc9\x65\x7c\x5c\xe3\xef\xd3\xd6\xcb\xe9\x11\x0d\x0f\xd0\xb5\x80\xc6\x6f\x20\xa5\xe9\x86\xec\x3b\xf4\x03\x79\xdf\x68\x5e\x2b\xd5\x63\x43\xc8\x18\x03\xdb\x1f\xa0\x7a\xb5\xe6\x18\x7c\xba\x6c\x1c\x7d\xc2\x0d\x86\xab\x3f\xd4\xfd\xe0\x7f\x9c\xa8\x7b\x31\x14\x0c\x2b\xde\x28\xd1\x2b\x43\x26\x01\xc4\x7d\xf1\x44\xd7\x0a\x51\x1c\xe2\x99\xc2\x64\x15\xfc\x7d\x7c\x3c\x6e\x11\x16\x9d\x9c\xde\x42\x0e\xca\x24\x69\x01\x0a\x61\x82\x02\xf4\xeb\xc8\xcd\x37\xa2\x16\x79\xf3\x2b\xc8\x89\x55\x03\x45\x35\x28\xd0\x8c\xdf\xba\xc2\x3a\x57\xe3\xf7\xa4\x49\xa0\x3b\xc9\x92\x4e\xdd\x31\xcf\x9c\x98\x61\xa7\x79\x02\x37\x53\x7b\xae\xac\x45\x4a\x7c\x0b\x7f\x74\x39\xf9\xdd\x14\xd9\x99\x73\xb2\x65\x2f\x5f\xa4\xc6\x87\x53\xdc\x48\xb1\x7e\x2e\x25\x7f\xe3\x1f\xea\xd4\xf8\x49\x93\x7a\x39\x3f\x4f\xf3\xe6\xe8\x88\x39\xcb\x0f\x25\x25\x2e\xfd\x23\xc5\xa3\xa4\xdd\xb6\x47\x75\x86\xd6\x52\xb9\xe1\xe5\x27\xba\xe8\xf0\xca\x4d\xb6\x35\x32\xaf\xde\xf7\xa3\x50\xe6\x5c\x68\x76\x55\xc0\x6f\x95\x58\xff\x3f\x95\x26\x3b\x44\x9b\x22\xe3\x47\xd2\xe4\x5c\x3d\x25\xbe\xc2\x77\x23\x42\x67\xb1\x0f\xf9\xfe\x94\x6c\x86\xe4\xf2\xf2\x39\x71\x40\x3a\x04\xf2\x68\x35\xf5\xd2\x38\x07\xf0\xe4\x09\x7e\x00\x58\xac\xfc\x18\xe2\x54\x12\x60\xbc\xa8\x28\x49\x0f\xca\x9e\xc7\x96\x26\x7b\x99\xb2\xba\x9e\x57\x42\x8b\x9e\x80\xcb\x7e\x59\x3a\x2d\xcc\x4e\x3b\x2f\xee\x72\xde\x26\xff\xb0\x93\x6c\x2b\x07\x45\xe3\x6f\x76\x5a\x5f\xde\xc9\xee\x58\x70\x17\xa9\x44\x2d\x72\x89\x8e\x2e\x8a\x3f\xc0\xf5\xe2\x10\xff\x43\xd3\x27\xd8\xc3\x2c\x98\xbd\xe7\xc1\xf7\xa1\xc3\x83\x83\xf3\x7e\x18\x0d\x97\xfc\x20\x1a\x2c\x39\xd1\xfc\xf9\x3e\x9b\x17\x72\x72\xae\x30\x36\xda\xf9\x72\x50\x3c\xcf\x1e\x51\x4d\xa4\xe6\x83\x32\xe5\x7b\x9e\x0d\x76\x5e\xef\xf3\x0e\x32\x95\x0a\x93\x0b\x4b\x3e\x7f\xf2\xd9\x0f\x2b\x9b\x34\xbe\x8d\x60\xe5\x14\x34\x5b\x60\x9a\xb2\x1b\x98\x0b\xd0\xd3\x60\x96\x57\x7e\x15\x17\x45\x31\x22\x95\x10\xc7\xd8\xa9\x19\x78\x6a\x7f\x41\x5b\x69\x21\xef\xad\xd3\x6f\x9e\x67\x1b\x69\xbc\x5e\xce\x4b\x13\x50\x8c\x65\xba\xde\x80\x2c\x4c\x67\x03\x39\x61\x05\x49\x83\x63\x16\x9d\x65\xca\x95\xb0\xeb\xf3\x5d\xa1\xcf\x96\x8f\x0e\x0d\x50\x8b\x21\x80\x38\x8e\x67\x4e\x4c\xc9\x6e\x2d\x36\x21\x53\x45\x80\xdb\x3c\xd5\x55\x88\x37\xd9\x3c\xcf\x5e\x5d\x91\x5f\xce\xa4\x3c\xb4\xef\x80\xbd\xd7\xde\x6b\x0f\x9d\xe7\x75\x70\x62\x64\xb6\x72\xdc\xf0\xc2\xb2\x59\xf6\xc0\x93\x67\xab\x24\x62\x5a\xa2\x00\x5d\x75\x72\xec\x4b\x52\x94\x16\x5a\x15\xb7\xcf\x5b\xbe\xb2\x41\x42\xf1\x8d\xb3\x73\x1c\xad\x33\xbd\xc4\x3d\x78\xab\x08\x2a\x43\x04\x17\xa8\xce\xef\xa4\x2f\xff\x99\x32\x54\x0c\x87\x11\x1c\x66\x77\x2e\xba\x17\x28\x6f\xe6\x23\xf6\x16\xb6\xa2\x84\x71\x6e\x53\xbb\x9f\x77\xc5\x77\x42\x38\x05\xfa\xef\x95\x4b\xbc\xf0\xdf\xa3\x4b\x72\x0d\xc8\x16\xaf\xb0\x34\xd5\x22\xb3\xc0\xb8\xb2\x38\xe5\x39\x5b\xc1\x95\x6d\xbc\x2c\x69\x7d\x59\x36\x21\x23\xaa\x21\x13\x27\x94\x73\x8a\x02\xf2\x99\x53\xcd\x41\x45\xe5\x38\xf4\x57\xe5\xbb\x69\x89\x06\x50\x24\x3e\x5b\x14\x2c\x86\x0f\x4d\x9f\xb0\xcd\xd7\xf8\x65\x38\x57\xb5\xf3\x85\xf6\x42\x3e\xb0\xac\x71\x8c\x7e\xd6\x8d\x02\xe6\x57\x8b\x61\x2a\xa9\x3b\xe6\xf4\x72\x5a\x27\xe6\x3b\x80\xee\x47\x73\x2e\x3c\xf0\x48\x14\xb1\xbd\x53\x5c\xce\x59\xe7\xca\x21\x0d\xd8\x52\x98\x75\x42\x37\x67\x3a\xe5\x68\x9e\x53\x2d\x5b\xec\xc0\x30\xbb\xfd\x92\x1f\xe4\x8f\x59\x8a\x4f\x88\x63\x87\xb6\x79\x3c\x70\x0a\xbc\x5a\xb7\xd7\xba\xcd\xb6\x6d\x8b\xb7\xca\xaf\x6e\xb1\x6f\x1d\x15\x47\x85\x73\x2e\x46\xe8\x17\x91\x89\x84\x36\xfe\xec\x2e\xf2\xd8\x6a\x75\xae\xc0\xf1\xca\x61\xe4\x2f\xc8\xb2\xcb\x31\xa5\xcb\x79\x7a\x8a\xf1\xad\x9d\xd6\x74\xe3\x53\x2a\x78\x9d\x39\x57\x84\x6c\xbe\x5a\xe7\xca\xc4\xd6\xf8\xc6\x9d\xe4\xbb\x29\x72\x85\x03\xfc\x05\x34\x0f\xb4\x57\xc6\x5f\xa5\x88\xdb\x9e\xad\xa5\xf1\x63\xc2\xdd\xac\x67\x12\x37\x07\xc9\x47\x92\x2a\x2b\x46\x4a\x14\xa0\xc8\x59\x2b\xe5\x2e\x0c\x13\x30\xe3\xfe\x88\x4c\x17\x2e\xcb\x85\x9c\x77\xe7\x60\xe3\x8a\xe2\xd6\xd7\x7c\x54\x97\xd3\xbe\xd9\xe4\xbd\x69\x42\x72\xbe\xc7\xe9\x12\x43\xe3\x1d\x69\xeb\x54\xfc\x4f\x25\x5c\x21\x57\xde\x41\x2d\x88\xdc\x5c\xb9\xe8\x04\xa6\x13\x02\x39\x1e\x2f\x2a\xae\x2f\xec\xd8\x9c\x7a\x2d\xf1\xc9\xc2\x7a\xc8\x75\x54\x3e\xc2\xf7\x52\x3d\xa4\xbb\xba\x47\xe4\x82\x53\x1a\xb8\x40\x97\x43\x63\x97\xb1\x63\x00\x42\x0d\xe4\x7b\x29\xb3\x6e\x37\xc9\xcb\x8c\xae\x05\xa7\x44\xc8\x97\xb7\x91\x57\x70\xb8\x40\x91\x10\x7e\x7a\x9b\xf5\xbe\x6d\xc9\xdf\x20\xfe\xcb\xa3\x40\x8a\x26\xcf\x9f\xab\x10\x90\xdc\x3b\x68\x00\x9d\xc5\x83\x98\xa5\xcf\x36\xcf\x04\x4e\xee\x82\xf0\xde\x43\xbd\x1f\x2f\x90\x4f\x99\x0e\x53\xc7\x02\x20\x02\xc5\x03\xb1\x3e\x8e\x62\xa8\x73\x5d\x0f\x9a\x3b\xf0\xad\x51\x71\x58\x0b\x58\xc4\x0c\xfd\xc4\x28\xaa\xcf\x14\x47\x89\x99\x14\xc3\xf9\xc4\x19\x6c\x9b\x16\xfc\x00\x92\xe8\xb0\x17\xa0\x14\x36\x4d\xcf\x37\xfd\x62\xb2\x19\x7a\x91\xce\x45\xdc\xbe\x80\x74\xe2\xbe\x7e\x19\xa1\xf2\x64\xe3\x54\x51\x9e\x1d\x55\x31\x21\x0a\xc4\x41\x30\x20\xda\xa6\x12\x2b\x5b\xd3\x8a\xfc\x85\x8d\x21\xa8\x5f\xce\xc3\x4a\x96\xd7\x9f\x67\x5b\x56\x51\xb4\x97\x86\x9e\xf5\x09\x46\xc5\x46\xe6\xfd\xb7\x53\x64\x66\x6d\xeb\x51\xc6\x52\xc6\xfc\x5c\x59\x75\x95\x33\x7e\x26\x65\xbd\xa6\xe6\x11\xf9\x46\xa5\x79\x09\x6e\x89\x17\x9b\x2b\x89\xf2\x75\xb8\x1d\xb3\xcb\xf3\xfa\x10\x49\x3b\x7f\x2f\xe9\x26\xbb\xff\x7f\xf6\xde\x05\x4c\x92\xac\x2a\x17\xfd\x22\xab\xba\xab\x7a\xcf\x30\x33\xc4\x41\xbc\x22\x62\x18\x5c\x9d\xaa\x31\x33\xfb\x39\xaf\x1e\x66\xc6\xea\xaa\xea\xa6\x98\xee\xea\x9a\xaa\xea\x19\x1e\x72\x98\x5d\x99\x3b\x2b\x83\x8e\x8c\xc8\x89\x88\xac\xea\x1c\x0e\x2a\x0c\xa2\x1c\x79\xbf\x6d\x45\x1e\x07\x10\x87\x51\x0f\xca\x43\x11\x8e\x1e\x14\xf0\x05\x28\x7a\x51\xb8\x82\xfa\xc9\xcb\x37\x1e\xe1\x72\xe9\xab\x9e\xfb\xed\xb5\xd6\xde\xb1\x77\x64\x64\x56\x55\xd7\xcc\x80\x7e\x75\xbe\x23\xd3\x15\x19\xb1\xdf\x7b\xed\xb5\xd7\xe3\xff\xd1\xff\x0a\x47\x9c\x6b\xf5\xcf\x45\x15\xd1\x7a\xf6\xe0\x15\x16\xbb\xdd\x30\x18\x99\x22\x48\xb5\xcd\x6e\xf7\x25\xe6\x3f\xdf\x19\x09\x68\x63\xfb\x84\x6d\x23\x27\xd8\xe6\x9b\x25\x40\x4b\x28\xfa\x40\x36\x7a\x60\x55\xd0\x30\x59\x48\x73\xaf\xfd\x2a\x64\xd7\xb7\x86\xe2\x93\x93\xec\xdf\x1c\x36\xa9\x20\x65\xdd\x7f\x72\xfc\x05\x8d\x35\x63\x85\x69\x6c\x9a\xf0\x3f\x70\x07\x01\xec\x95\x01\x76\xe0\x1c\x7e\xc9\x5a\x44\xf3\x6c\x96\xcd\xec\x18\x97\xa7\x38\xa0\x52\x50\x3d\xde\x12\x3f\x70\x1b\xad\xc1\xcd\x54\xca\x2a\x77\xc2\xdd\x27\x65\x37\x93\x6f\x7e\x67\xc9\x9b\x74\x9b\xed\xbb\x93\xee\x7e\xf8\x8a\xed\x39\x52\xf6\xc2\xd3\xbf\x79\x06\xd7\x3f\x34\x0d\xae\xbf\xb5\x6b\x83\xeb\xf3\x8b\x06\x57\x6d\x6e\xbd\xe5\x91\x32\xb7\xd6\x2f\x39\x3f\xb4\xb5\xbd\xf5\x19\xee\xd3\x6a\xe5\x11\x67\xe5\xb2\xa0\x90\x57\x3a\x4a\x8c\x32\xf6\xd9\x71\xf6\x24\xdb\x35\x0e\xd9\x32\x36\xda\xd7\x8a\x08\x5b\x2b\x08\xac\x3b\xd3\x68\x88\x34\x45\x88\x63\x08\xde\xf9\xe9\x71\xff\xed\xce\x88\x17\x70\x7d\x1b\x47\x88\x8e\x7f\x87\x17\x73\xa6\xe6\xa2\x2d\x40\x29\x36\x33\x66\x93\x66\x14\x0c\x16\x4a\xd0\xc5\x38\xda\xea\x35\x6d\x3f\x10\x99\x25\xce\x3f\x5f\x61\xbf\xe8\xb0\x6f\x8b\x8c\x12\xf4\x47\xee\x6b\x1c\x36\x37\x2a\x7c\xa0\x64\x8c\x16\xcb\x0a\xf2\xcf\x94\x3e\x36\x4e\x2f\xd3\x16\x87\xd9\x4a\xf2\x46\xab\xf7\xba\x3d\x48\xb2\xc5\x6e\x32\xd8\xdc\xd7\x3b\xa3\x0e\x8c\xd2\xe6\x96\xb4\x75\x65\xab\xa1\x1c\xd5\xea\x61\x2d\xfe\xda\x38\x7b\x62\x89\xf1\x1f\xd3\xc8\x96\x92\xf8\xd9\x68\x6e\x74\x3f\x36\xee\x3f\x39\xff\xd3\x62\x03\xed\xe2\x63\xd1\xf4\x38\x44\x89\xc0\x45\x1c\x55\xc7\x1c\xcd\x9a\x68\x5e\x40\x74\x58\xf3\xfc\xd5\x31\x76\x1f\xbb\xa2\x19\x6f\x46\x9b\x3c\x69\xce\x2c\x2d\xb8\xe7\xd9\x0d\x5b\x46\x0e\xcf\xe5\xef\xe7\xad\xf2\x0f\x97\xa7\x31\x18\x85\xa3\x07\x10\x31\xf7\xc1\x99\x78\x9e\xed\x4f\x45\x23\x11\x99\xcb\xd9\xe1\x2d\xab\x5d\x81\x57\x8d\x1a\xab\xe5\x35\x62\x91\x03\x95\xbd\xd2\x61\xff\x89\x2c\x5a\x33\x0d\x88\x09\x59\x8d\xcf\x8b\xc8\x7d\x0e\xbb\x6d\x1b\x55\x0f\x7c\x67\xb4\xe3\xe6\x61\xed\x18\xf8\x68\xa0\x51\x19\x3b\xd0\xd0\x41\x1d\xeb\xec\xd8\x96\x2d\xd1\x8e\x24\xa3\xfe\x83\xe5\xf5\xeb\x82\x07\x6a\x7d\xfb\xa3\x4b\x73\x0d\xec\x5e\xba\x3f\xf2\x68\xff\xfd\x8e\xfd\xcc\x5b\x93\xe7\x8e\x97\xc5\xeb\x80\x5e\x74\xdc\xbb\x8e\xa0\xa1\xab\x5e\x2f\x6a\x8a\x24\xcd\xe2\x18\x10\x00\x01\xda\x13\x35\x8d\xae\x48\xda\xbc\x9b\xca\xa7\x5d\x91\x04\xdd\xb6\x48\x78\x48\x36\xe9\x14\x6d\x33\x3c\x22\x5b\x4c\xd6\x87\x12\xbb\x49\x10\x35\x82\x2e\x0f\xf3\x48\x4d\xc4\xf4\x6b\x23\x5d\x75\xa6\x62\xac\x68\x2f\x8a\x26\x7c\x46\x9e\x20\x9c\x7f\x7b\xa1\xff\xdf\x57\xb1\xdf\x02\x63\x36\xfc\xe4\xfe\x4a\xc5\x7f\x93\x83\xeb\x69\x20\xb1\x98\xde\x29\x04\x82\x81\x01\x40\x76\x21\x6e\xe6\x96\xe3\x5e\x1e\xba\x60\x8f\xd3\x2e\x03\xfd\xb1\x09\xa6\xda\x7b\x8c\x1d\x61\x87\xb6\x5c\x1c\x85\xd8\xac\x3d\x2d\xf7\x5b\x51\xcb\xfd\x13\x87\x3d\x1e\x72\x6a\xe5\x42\x29\x11\x2c\xee\x7b\x1d\xff\x67\x9d\x99\x11\x6f\x94\x50\xd5\x58\xcb\x12\x4c\xe5\x41\xaa\x0d\xfd\x9c\x36\x2f\x8d\x28\x3a\xfa\x22\x80\x1e\xcc\xa0\xbc\x42\x8a\xaf\x7c\x59\xea\xb5\xb3\xb8\xef\xe2\x0d\x91\x24\x41\xb3\x29\x5f\xcc\x74\x84\x2a\x80\x94\xdb\x01\x13\x5f\x18\x63\xd7\x00\x2c\xff\x52\x2f\x0c\x69\x77\xb9\x7f\x30\xe6\xff\xd8\xd8\x42\xe1\xa9\x7d\x25\xd4\xfe\x9e\x14\x4d\x6c\xf4\x0a\x5a\xd0\x52\xde\x31\xd1\xc6\x15\x0b\x44\x9c\x78\xdd\x5e\x18\xe2\x1d\xbe\x8f\x7c\x01\xc8\xd3\x0d\xe1\x6e\xe8\xcb\xd6\x6e\xa4\xb2\x1d\x3a\xd0\x26\x9e\x80\x65\x2f\x0b\xa2\x46\x86\x17\x03\xf5\x8b\x02\x2d\x56\x7f\xeb\xa0\x12\x18\x28\xd5\xd2\x6e\xdc\x44\x64\xcb\xd2\x92\x21\x9a\x05\xcf\x7f\x94\x24\xe0\x62\xa7\x48\xa6\xcb\x10\x17\x2a\xf1\x07\x7b\x7e\xf0\x89\xc4\x7c\x13\x44\xeb\x35\xa4\x46\xe8\x85\x21\x0d\x65\x2d\x8e\x6a\xbc\xd6\x8d\x9b\x3b\xe7\x44\x3b\x1d\x37\x78\x58\x10\x2b\x7b\xf7\xb4\xbd\xc0\x98\x42\x60\xcc\x76\x33\x86\x86\xa5\x06\x69\xaa\x37\x6b\x8f\x32\xf6\xba\x31\xe6\x97\x31\xf5\xcd\x2e\xad\xc4\x8d\xf3\x22\x9b\x41\xfd\xf8\x2b\x15\xff\xd6\xc2\xb3\x02\x87\x1b\x3e\xd3\x78\x03\x71\x57\x44\x14\x95\x00\xdf\x5c\x74\xc6\xa5\xaa\x6c\x27\x49\x56\xd8\x47\x1d\x06\xcf\xdd\xff\xb1\xed\x89\xea\x65\x41\x58\x0f\xa2\x2c\xcd\x92\xfa\x42\x94\x9d\x4d\x56\x60\xde\xfd\xe7\x2e\x52\xec\x70\x62\xf1\x8e\xc8\xd2\x31\x7e\x19\x49\xa8\x22\xa5\xb9\x29\xca\x75\xfa\x4c\x5d\xd4\x48\xd4\x40\xd8\xa6\x77\x58\x7e\x79\xc3\xf5\xd7\x1f\xbd\x9e\x68\x55\xd4\x5b\x3c\xf2\x16\x66\x16\x67\x9e\xb5\x72\xd7\xec\xb3\x16\x67\xce\xcc\xd7\xd9\x59\x36\xde\x8e\xd3\xcc\x3d\xe5\x1f\x3f\x4b\xee\xb8\xe3\xde\x93\xe3\x34\xc3\xd6\x60\x78\x70\x84\xdc\x30\x55\x2b\x4c\x5e\xc9\xfb\x85\x25\xcb\x19\xf5\xee\x03\xec\xdc\x65\x07\x00\x0c\x73\x24\x40\x8a\xeb\x67\x26\xfd\x3b\x46\xbd\x60\x9f\x1d\xc3\xde\x1c\x8d\xd0\xf3\xaf\x7b\xb8\x92\xbb\xd6\x63\x7e\x49\x27\xc0\xbe\xc3\xf1\x6f\x40\x30\x39\x84\xb4\xce\xd3\x51\xb6\x9a\x1d\xf3\x40\x6a\x30\xce\x9e\xf5\x10\x45\x98\x0c\xab\x77\xef\xe8\xfa\x16\x41\xaa\xdc\xd8\xfa\xc0\x58\x71\xef\xd4\x91\x94\xf6\x04\x17\xa1\x2a\x47\x08\x8b\x62\xb8\xe5\xbb\x8f\xb1\xa3\x97\xe1\x2f\x70\xff\xe5\x9b\xc4\x12\x7a\xd1\x31\x69\x03\x2e\x3a\xc3\xe8\xd9\x2c\xe9\xf6\x89\x23\xec\x6b\x05\xbe\x81\x2f\x8c\xf9\x7f\xf2\xd0\xf3\x0d\x20\xcd\x80\x45\x30\x30\xa5\xfd\x3b\x8a\x76\x79\x23\xe0\x0a\xf8\x28\x8f\xca\x51\x3c\x8d\x2b\x71\x07\xa2\xb6\x08\x58\x7f\xba\xbe\xc7\x50\xb0\x63\x86\x02\x8b\xe0\xb0\x40\x46\xf0\xe2\x09\xff\xdf\xf6\xc8\x08\xf6\xc8\x08\xf6\xc8\x08\x1e\x61\x32\x82\x7c\x1b\xec\x71\x08\xee\x71\x08\xee\x71\x08\xee\x71\x08\xee\x71\x08\xee\x71\x08\xee\x71\x08\xfe\xc7\xe1\x10\x7c\xf9\x24\x73\x13\x91\x13\x9e\x93\xc6\xfd\x8d\x09\xff\x6f\x27\x06\x9f\x97\xf8\x6c\xac\x3b\x97\x81\x71\x40\x9c\xfe\x08\x49\x82\x3c\xdf\xa9\x49\xb7\x3f\x9a\xdb\xbf\x54\x41\x47\x2a\x74\x54\xb5\x14\xf5\xbf\xd4\xcf\xe1\xf9\x71\x6b\x9c\x41\x92\x92\xf2\xa9\x9a\x82\x90\x43\x72\x8d\xc8\x8b\x15\xd2\x04\x8e\x68\x82\x3c\x1a\xb0\x8e\x92\xa2\xf3\x62\x2d\x34\x0b\x83\x43\x5e\x76\xd8\xec\xaf\x3d\xa7\x79\x3d\x85\x8d\x8d\x81\xda\x8a\xae\x1f\xe2\x6f\x95\x56\xba\xd6\xa7\x90\x88\xbc\x18\x24\x28\x4c\x0d\x9c\x25\x90\x4f\x3c\xb4\xf8\xda\x8d\xeb\x28\xec\x0b\x75\x6d\x35\x89\xf2\xaf\xeb\xf4\xd2\xec\x3a\x30\xce\x36\x45\xa7\x1b\x67\x90\x36\x07\x77\x65\x74\xb9\xa3\x65\x97\x4e\x3e\xb8\xe5\x75\x13\xb1\x11\xc4\xbd\x34\xec\xeb\xf6\xd6\xbd\xc5\x38\x13\xc7\xbd\xeb\x0a\x40\x13\xe5\xdc\xfa\x6a\x57\x0d\x90\xfc\x8b\x3c\x4c\xa8\xee\x5d\x27\x47\x68\x48\x01\x89\x48\xe5\xd2\x0f\x22\xaf\xd5\x4b\x60\x6c\x70\xb4\xd4\xef\xb4\x27\xb0\xd9\x55\x23\x6a\x93\x12\x15\x06\x6a\x96\x85\x9f\x57\xe2\x52\xd6\xbd\x69\x8d\x1c\xe6\x6e\xe4\xa3\xa6\xef\xe4\x8a\x34\x5e\x4a\x85\x20\x0a\x3a\xc1\x7d\x62\x5b\x43\x20\xab\x30\xd3\xe2\x72\x09\x86\x53\x0a\xb0\x40\xbd\x8c\xba\x23\x9b\x6d\x34\x59\xf1\x56\x56\x29\xd1\x63\x43\x33\x48\x96\x48\x0f\x95\xf7\x54\x50\xf1\x71\x4f\x59\xb7\xef\xbf\xd6\xb4\xa2\x7f\xbe\x47\x2b\xba\x47\x2b\x5a\xc2\xa2\xf2\x64\x76\x72\x54\xf0\xdc\x30\x73\x64\x09\xa3\xe8\x6b\xc7\xd9\x30\x63\xa0\x7b\x69\xcc\xff\xec\xd8\x4c\xf9\x8f\x78\x6f\xf2\xd4\xb6\x53\x7e\x94\x2e\x78\xcb\xe5\x93\x7b\x0a\x1f\xde\x93\x27\x0b\xc8\x29\x50\xd6\x4f\x71\xa1\x4b\xb6\xc1\xa5\x05\x95\x85\x0a\x02\x3e\x4b\xfa\xda\x4b\x0f\x10\x75\x2a\x85\x81\xb4\x0f\xa8\x11\x17\x42\x90\xa9\x20\xb5\x94\x70\x1c\x22\x31\x90\xa1\x90\x5b\x83\xcc\xcc\x02\x23\xba\x6d\xad\x6f\xb4\xa1\x6a\x02\x5a\x40\x7b\x5a\x3c\x08\xf3\xcc\x51\xe5\x9d\x59\x68\x21\xb3\x1d\x66\x81\xe4\xdb\xc9\x9a\xc1\x1c\x4d\x56\x05\x03\xe5\xd9\x21\x51\xd3\x6b\xc6\x22\x25\x48\x15\xb0\x58\xc0\x15\x47\xbf\x81\x46\x3a\x92\xa3\xb2\x81\x2b\xd4\x40\x23\x97\xaf\x70\x36\x42\x53\x11\xe6\xcf\x4b\x7b\xa4\x8f\xe1\x7b\xf2\xa7\x5e\x22\xbc\x2e\x68\x13\xa3\xf2\x2a\x7e\xd2\x61\x96\x09\xd9\xfd\x71\x87\xcd\xef\x7c\xc9\x95\x24\x4a\xfb\xb7\x5b\x69\xd3\x96\x29\x31\xf6\xc8\xed\x00\x09\x4c\xfa\x06\x87\x69\x8a\x84\x60\xd0\x64\x0f\x3a\xec\x51\xd4\x13\x52\x97\x2e\x3a\xfe\x8b\x9d\x93\xe6\x23\xab\x5c\xcb\xdb\x25\x92\x24\x4e\x0c\x87\x8a\xa1\x14\x28\xd7\x8a\xdc\xde\x6d\x1e\x35\xe5\xce\xae\xe5\xb3\x96\xab\x42\x0b\xeb\x91\xd4\x66\xe2\xc4\x93\x95\xda\xec\xae\xf0\xc4\x1c\xca\x2f\x39\x0c\x8c\xf2\xee\xe7\x1c\xff\x0f\x9c\x55\x32\x86\x0c\xea\x24\x3a\x1f\x13\xbc\xb0\xb9\x2a\xd7\xea\x85\x61\xdf\xbb\xb7\xc7\x43\xca\xc4\xc6\xcc\x57\x8c\xcf\xc0\x89\xb4\x2e\x76\x55\x02\x97\xf4\x8d\x37\x7c\x15\x9f\x66\x56\x6d\x5d\x28\xac\x22\xca\xde\x8e\x93\x75\x1e\xa9\x18\x57\x3d\x19\xb6\xdd\xb8\x32\x80\xb2\xfc\x22\x8d\xce\xf6\xcf\xce\xea\x28\x94\x65\xfa\xae\x24\x3b\x7b\x46\x2b\x57\xea\x9d\x2e\x4f\x53\x91\x56\x0b\x67\x5b\x18\xe6\x00\x03\x30\x3f\x4d\x8f\x10\xb7\xe4\xbe\xc9\x7f\x87\xfd\x61\x91\x06\x14\xb6\x05\x1c\x8d\xaa\x2e\x1b\xcb\x59\x81\xf9\xa1\x09\xef\xe8\x21\x05\x0e\x6d\x5d\x03\x0e\xe7\x8f\x2d\x3c\xae\xbf\xbb\x66\x9b\x4c\xa2\x94\x49\xf9\x81\x6b\xfc\xdb\xcc\x07\x85\x8c\x9c\x90\xa7\x59\x2d\x11\xbc\x99\x67\x50\x6a\x8d\x1a\x41\xcf\xea\x65\x34\xa1\x5f\xbc\x9a\xfd\xde\x18\x7b\xb4\x8e\x0e\x50\x2e\x2f\xf7\x3d\x63\x3b\x84\x29\x9f\x2d\x16\x61\x36\xd6\xff\x62\x45\x57\x91\x5b\x8f\xe1\x7c\x40\xac\xb5\x01\x32\xd1\xa9\xb4\xd7\x68\x63\xd8\x59\x9c\x0a\x5b\x62\xeb\x23\x55\x0e\x3b\xc0\xa9\xa5\xd3\xb9\x6c\xbc\x43\xaf\x5d\x35\x44\x14\x96\x81\x83\x91\xb7\x23\x88\x10\xa8\xb0\x1b\xeb\x78\x2b\x05\xb4\x88\x30\x6b\x04\x34\x07\xc9\xe5\xde\xec\xd2\x39\xb9\x82\x3a\xa2\x13\x27\xfd\x69\xca\x93\x56\x70\x72\x52\x0c\xac\xf5\x02\xd4\x7e\xad\x36\xe0\x7e\x82\x5b\x2f\xf4\x81\x87\x1e\x8d\x1c\xa9\xad\x18\xa1\x11\x77\x71\x67\xc9\xbe\x6a\xd4\x3d\x80\x10\x8a\x93\x0e\x0f\xe5\xb1\x52\x93\xed\xcc\xa1\x1e\x95\xfe\xe2\x77\xe3\x66\xea\x13\x3c\x63\x9d\x7d\xb5\xc2\x26\x55\xfa\x99\xfb\xa5\xca\xe8\x38\xf5\x81\x49\x54\x49\x6e\xd6\xdc\xbd\xb9\x62\xe4\xb3\xe5\x33\xb6\x1e\xc6\x6b\x39\xe8\x20\x68\xe4\x43\xec\x97\xf2\x0c\x33\x66\x45\x9f\x97\x99\x4e\x88\xcc\x9b\x62\xd8\x58\x8c\x08\x64\x72\x19\x81\x98\x96\x9a\x5c\x1c\x81\xab\x40\x45\x27\xc6\xbd\x0c\x1c\x6a\x71\x4b\x6b\x88\x53\x46\x82\xb6\x17\x8a\x68\x3d\x6b\xcb\x9f\xef\xed\x89\x1e\x5c\x37\x1b\x61\xdc\x93\xc3\x99\xa6\x60\x2f\x53\x81\x8d\x60\xdd\xb8\x73\x69\x05\xab\x0a\x63\xde\x5c\xe3\x21\x8f\x1a\x72\xd9\x0e\xad\x6c\xba\xce\x3e\xe4\xa8\x3d\xe5\xbe\xcf\x61\xb7\xef\x68\xd0\x75\xec\x55\x3e\xe4\x99\x8e\x05\xc8\xc7\x9b\x06\xba\x6c\x4d\x9f\x2f\x0e\xae\xd5\xfd\xaa\xd7\x0e\xb2\xb4\x26\x97\x10\x4a\x22\xb2\xd0\x13\x82\xae\x32\xab\xd4\xd9\x6b\x2a\x6c\x5c\xae\x26\xf7\xc5\x95\xe1\xb1\x49\xa5\x5d\x58\x8a\x9b\xa9\xd5\x81\xdf\x75\x30\x7c\x74\x64\xfb\xb7\xb7\xfb\xac\x9e\x64\x09\x8f\x52\x0c\xbd\x4a\x6b\x74\x11\x16\x4d\xa3\x6f\x0a\xa2\x75\x43\xe1\xa5\x91\x6b\x14\x99\x6e\x9b\x3a\xce\x5c\xc1\x1e\xe3\x15\x5f\xae\x29\x9e\xe4\xa8\x91\x26\x34\x6b\x9d\xfd\xe4\x18\x9b\x54\xf2\xc9\x7d\xf1\xd8\x0e\xf7\x54\xa9\x3c\xfc\x48\xe5\x9b\x22\x05\xff\xe3\xc9\xbb\xb7\x55\x88\x10\xf9\xa7\x2b\xfe\x4b\x2b\x8a\xef\x18\xe6\xd0\xc6\x9f\xa5\x0f\x00\xb3\x55\xad\x0a\xba\x1b\xf8\x03\x67\x97\x5f\xf5\x7c\x25\x0b\xe5\xbf\x71\x8b\xca\x7f\x2d\x41\xed\x71\xe2\xf9\xc6\xbb\x30\xac\x39\x5a\xb7\x5a\xf0\xca\x24\xac\xb0\x4e\xcc\x54\x57\xb2\xcd\x94\x54\xad\x49\x9b\xf3\x71\x41\xf7\x19\xa9\x27\x88\x17\x5b\x53\x78\xb1\xba\x80\x33\x34\x54\x39\x68\xac\xa5\xc2\x4f\xb2\xc3\xc6\x42\x25\xe5\x46\x65\x27\x2d\xc5\xcd\xb9\x20\x4d\x7a\x30\x43\x27\x7a\xcd\x75\x81\x84\x76\x7f\x35\xe1\xff\xc0\x90\xdf\xca\x38\xed\x4a\x5e\x1d\x12\x2d\xf7\xc6\xfd\x7b\x71\x53\xdf\x12\x71\x53\x7b\x41\x8b\xbb\x0d\x5a\x3c\xa7\x62\x16\x4f\x6f\x1f\xd3\x7e\xeb\xed\x77\xc9\x69\x6f\x1d\xcf\x36\xef\xce\xea\x78\x36\x2c\x52\x47\xb0\x0d\xd9\xb5\xa5\x34\xcb\xff\x7b\x92\x5d\x3f\xec\x10\x1b\x8d\x24\xff\xe1\x49\xbf\x66\xd3\x2f\xc1\xbd\x63\x28\x01\xdc\x8e\x70\xe1\xbf\x3c\xc1\x5e\x52\x00\x58\xfe\x21\x75\x7f\x0c\x46\x63\x29\xe7\x19\x1b\x14\x4d\x92\x8a\x4c\x25\x48\xe4\xad\xb9\xc5\x30\x1b\xa6\x1d\x0e\xf0\x19\xe0\x93\x38\x33\x0c\x44\x79\x0f\x6e\xfc\x5b\x06\x6e\x7c\x76\x7b\x4a\xd8\x68\xa4\xf1\x77\x3a\x16\x30\x1c\xd4\x92\x07\x7b\xdc\x32\x7c\x21\xa3\x0a\x11\x0a\x9e\xd8\x8a\x94\x3e\x99\x10\x1a\xbb\xab\xcd\xba\x56\x34\x81\xe2\x0f\x28\x2c\x58\x48\x72\x84\xa1\xcb\x52\x18\x46\x8b\xda\xb0\xce\xfe\xc4\x61\x4f\x40\x1d\x6d\x76\xe9\xdc\xb9\x2c\x50\xcc\x10\x4b\x22\x69\x48\xe1\xbb\x2e\xdc\x77\x6b\x04\xf2\x9f\x71\x48\x9d\x23\xd5\x17\x14\xba\x5e\xfe\x91\x37\xa5\xa5\x36\x01\x2f\x4a\xa5\x8b\xca\xc1\xe5\x09\xe7\xa0\x68\xca\x2f\xa7\x21\x65\x0b\x3c\x20\x14\xbd\x9f\xde\x22\x17\x16\xc0\xe4\x6a\xad\x14\xfb\x86\xa6\x0f\xf3\x4e\x87\xb2\xc9\xc2\x1f\xb4\xa7\xf8\x63\x93\xa5\xe0\xe4\x39\xcd\xfb\x83\x93\xfe\x13\x86\xd0\xbc\x13\xbb\x73\xa9\x9a\xf1\xe6\x3d\xda\xdc\x5d\x9f\x6f\x4b\xea\x7c\x3b\xe5\x5f\x75\xda\x1a\x73\xf3\xbc\xbb\x8e\x4d\xb1\xef\xdb\x32\xf7\x6b\x8f\xc9\x7d\x8f\x1b\x6a\x80\x1b\xea\xf2\x21\x90\x87\x52\xb8\xff\xed\x04\xbb\x75\x34\x85\x10\x19\x3f\x66\x43\x9e\xa6\x4b\x1a\x3d\x4f\x9f\x0c\xee\xcf\x4d\xf8\xcf\xdd\xe2\x9d\x1c\xec\x2f\x55\xb9\xaf\x4a\x26\x00\x70\x2d\xa9\x1e\x1a\xe7\x8d\x1c\xfd\x39\x58\x20\x25\x6e\x41\x04\x59\xad\x10\x68\x58\xbf\xe8\xc0\x1e\xa1\x18\x7e\x4b\xaa\xbd\x64\x3f\xfb\x8c\xc3\x26\x55\x28\xab\xfb\x31\xc7\xff\x75\x67\x66\x69\x01\xfe\x52\xba\x07\x06\xdd\x2a\xad\x23\xe7\xc7\x15\x18\x7b\x4d\x7d\x68\x82\xdb\xca\xfc\xd6\x92\xe8\x68\x55\xcf\x05\x3c\xec\xd5\x42\xe2\x98\xdc\xdb\x79\x98\x2f\xc6\xf6\x42\x60\x9e\x8a\x48\x49\x9a\xb5\x2e\x4f\xb2\x3e\x82\x45\x54\xad\xda\x92\x32\xcf\xc1\x2c\x89\x87\x5b\xfc\xba\x12\x0e\xe6\x85\x7e\x78\x5f\x0a\x85\x80\x9f\xe5\x16\xbf\x0e\x2e\x94\x82\x13\x63\x7b\x85\x7c\xc8\x61\x07\xf4\x14\xb9\xef\x76\xfc\xb7\x3b\x8b\x66\x84\xa1\x1d\xcc\x4a\xbe\x91\x11\x63\xbd\x9a\x53\x12\x18\x9d\xc7\xcb\x3d\x2c\x00\x0f\x72\xac\x33\x8c\x06\x50\x05\x63\x94\x8d\x1a\xf6\x5e\x24\x5f\x28\xff\x64\x16\x57\x96\x1d\x42\xf0\x39\x87\xed\x83\x37\xdd\x3f\x76\xfc\x8f\x38\x2b\xf0\x91\x41\x61\x15\xe8\x53\x2e\x37\x44\x95\x2d\xd1\x62\x2c\x2c\x75\x87\xc2\x2e\x8a\x4d\xf0\xa6\x48\x13\x80\x60\x30\xa3\x33\x75\xef\xa4\x09\xa3\xaf\x34\x4e\xb0\x14\x9b\x3b\x4e\x7f\xd1\x84\xbd\x97\x5a\xaa\xab\xd5\xc1\x9f\x38\x60\x61\x9e\xe4\x5c\xec\xf5\xd9\x95\x85\xb9\x24\xd8\x10\x09\x5a\x33\x26\xfd\xa3\xd6\x93\x32\x1b\x86\x7e\x61\x74\xba\xdf\x2f\x4f\xec\x1d\x63\x7b\x94\xee\x48\x72\xbc\xa7\x62\xee\x52\xc5\x5c\x57\x2a\xe6\x7f\xf6\x9f\x10\x28\xd6\x53\x13\x40\x45\x6f\x4a\x53\xe5\x3c\xcc\x0e\xb2\xda\x50\x95\xb3\x4c\x0a\x5c\x72\xf8\xd6\xba\xc6\x6d\xee\x93\x86\x64\x95\x17\xa8\xf5\x75\xb2\xa0\x29\x52\x18\x7b\xcd\x55\xec\xd0\xb6\xd6\x6f\xbe\x44\x52\xf7\x2f\x1e\xe5\xbf\xc4\x31\x1e\x10\x04\xa9\x15\xb5\x92\x03\xc9\xaa\x7b\x32\xe4\x18\xc0\x75\x9f\x64\x00\x2c\x00\x05\x25\xab\xbd\xdd\x3c\xf3\xe4\x54\xa8\xc8\x53\x1a\xdb\x24\x8e\x33\x84\x1f\xa5\x73\x2b\x14\xeb\xbc\xd1\xf7\x36\x0e\xcb\x8f\xea\x17\x9d\x49\x55\xf3\x45\xe7\x71\x18\x09\x33\x83\x78\xd7\x27\xfa\x14\xb4\xb1\x30\xb7\x6c\x4b\xc5\xdf\xbc\x62\x4f\x2a\xee\x42\x2a\x7e\x79\x1f\x1b\x31\xd2\xee\xef\xec\xf3\xff\x7a\x9c\x03\xa7\x26\x38\x1f\xa1\x7f\xf2\x27\x0d\x23\x9c\x28\x48\x72\xed\x8f\x05\xbf\xa6\x42\x14\x22\x0d\x6d\x95\x78\x7b\xb2\xd8\x6b\x8b\xb0\xab\xd7\x4e\x02\x7e\x0c\x85\xa9\x4e\xea\x5d\x07\x32\xfa\x51\x81\xae\x89\x56\x2b\x68\x40\xb5\x9b\xbc\xef\x75\x63\xc4\xd5\xae\x13\x15\x36\xaa\xbc\x0a\x52\x9b\x77\xbb\x49\xdc\x4d\x02\x40\x1f\xb6\x1b\xc7\x21\x3b\x07\xde\xc2\xe6\x13\x9d\x47\x5f\x45\x71\x2f\x44\x3a\xa2\x5c\x07\x2d\x53\xdc\x61\x55\x37\xd7\xc0\xd1\x47\x63\x5b\xb4\x8e\xa9\x99\xe4\x76\x91\x45\x63\xf8\x06\x55\x9f\x88\xac\x97\x80\x03\x2a\xec\x93\xef\x09\x06\x96\x06\x4b\x36\x21\x88\x54\x74\xa9\xb1\x84\x64\xaf\xa8\x61\x46\x82\xcf\x71\xca\x38\x02\x3d\x89\x08\xa6\x00\xfd\x9c\x18\x2c\x40\xf0\x2e\x2c\x61\x0f\x65\x95\x55\x65\x89\xa3\x62\x61\xb8\x75\x34\x3e\xb4\x10\xcd\x3f\x76\x01\x6a\xd9\x7b\x21\x46\x1b\x66\xde\x53\x6b\x27\xe3\x64\x93\x27\x4d\xd1\x94\xff\xf2\xda\x82\x37\x51\x57\x7b\x6a\x6d\x59\xf0\xb0\xb6\xd0\x35\x9e\x29\xc0\xbf\x65\xd1\x89\x33\x21\x17\x96\x37\x15\x10\xe2\x1b\x84\xec\x4d\xcb\x85\xb0\x2e\xac\x2e\x03\x34\x43\x2e\x6a\x97\xd8\x22\x3b\x7d\x19\x47\xf4\xca\xb0\xc5\xcc\x9e\xc6\xb4\x78\x71\xcf\xf8\x87\xf3\x20\xb8\x44\xad\x9e\x60\x94\xe4\x1b\x15\xad\xb6\x77\x1c\xef\xee\x38\xbe\xe4\xac\x6c\x7d\x4a\x1e\x72\xeb\x65\x37\x72\xe3\x08\x2b\xde\xc9\x7f\xe6\x0a\xf6\x04\xd3\x4a\xdb\xed\x22\x73\x47\xc6\x33\xd1\xea\x85\x2b\x22\x73\xff\x3f\xe6\xbf\xb1\x62\x3c\xb0\x89\x76\x09\x8b\x0d\x4c\xa5\x70\x5b\x20\x22\x35\x20\x29\x42\xac\xb7\x00\x78\x73\xf4\xbf\x11\x7f\x09\xe6\xbb\xe9\xf1\xf4\x38\xf3\x6a\xde\x22\x8a\xb2\xe3\xde\x8c\x8a\x9e\x48\x31\x0b\x1d\x18\x21\xa2\x26\xb0\x29\xc8\x3b\x4f\x5d\xbe\xbd\x82\x47\xfe\x71\x6f\x46\x9e\x3a\x51\xdf\x43\x20\xc5\xd9\x90\x07\x1d\x90\x63\xda\x4a\x5a\x07\x7e\x03\xb3\xf1\x3a\x38\x5c\x2d\x60\xa0\xf4\x88\x94\x30\xcd\xf1\xe9\x40\x78\xf0\x70\x93\xf7\x53\x10\xee\x8a\xa2\x04\x22\xff\xb0\x01\xfa\x65\x1b\x23\xf9\x7f\x4d\xb0\x5f\x74\xe4\xbc\x41\x88\xd8\x5b\x1d\x76\x64\xb8\x55\x7c\x70\xbc\x29\x16\x41\x03\xf1\xa7\x76\x4c\x00\x3e\x45\xb7\x6e\xaa\x23\x65\x8d\xef\xe9\x24\x01\x5d\x9b\x6e\x81\x71\x0f\xe6\x08\xc2\xe7\xd7\xfa\xc8\xd3\xb1\x19\x44\xcd\x78\x13\xb6\x56\x20\x87\x75\x6f\x7f\xee\x81\x1a\x7f\xd3\x54\xac\x3b\x8c\x7b\xe7\xed\xbb\x84\xca\x62\x19\x1b\x4f\xbb\xa2\xe1\x86\x23\x80\x14\xcb\xf6\x5d\x57\x34\xfc\x9b\x88\x6a\x3b\x5f\xec\xca\x2b\x94\xcb\x32\x2d\xee\xd4\xe6\x4b\x45\x56\xbf\xe4\x9c\xdb\x5a\x3c\x1f\x71\x0f\x29\x32\x7d\xa3\xe2\x61\x4c\xfa\xb2\x8d\x8c\xfd\x01\x63\xf5\x2d\x62\x99\xc8\x61\x18\x44\xeb\x90\x88\xe2\xbe\x81\xf9\xaf\x1e\x2b\x3c\xcc\xbd\x81\xa4\xda\xa8\xc0\x3e\xd1\xe6\x1b\x41\x9c\x80\x4d\x32\x8e\x84\xd7\x0c\x12\x34\xbb\x80\x92\x96\x0a\x0f\x3f\x87\x73\x1e\x40\x4c\x9a\x94\x77\xa3\x58\xb5\x65\x29\x73\x38\x48\xda\xdf\x09\x0b\x53\x45\x09\x29\x6b\xe7\x93\x97\x66\x88\x46\x43\xaa\x6e\xe8\x7f\x35\x9b\xb2\x21\xc2\xb8\x21\x25\xaf\x14\x51\x1a\xe4\xce\xb3\xdc\x55\x70\x94\xe8\x32\x54\x2e\x4b\x2b\xe4\xdd\x2e\xf4\xc6\xfa\x32\x43\xe6\x92\xb5\xdc\xbb\x86\x42\xaf\xea\xa5\x71\xae\x4e\x96\xb8\x7e\x95\xc1\x55\x64\xc8\xb9\x13\x65\xa0\x27\x62\x7a\x10\x59\x5f\x79\x4b\x0a\x27\xdc\xd5\x7a\x1f\x96\xd5\x06\xae\x58\xa9\xd6\x16\x48\x05\xbe\xbe\x9f\xbd\xa2\xc2\x26\x55\xb7\xdc\x17\x54\xfc\x5f\x76\xd4\x5f\xb6\x33\x0d\x73\xcd\xcc\xa8\x2d\x78\x4f\xa4\x79\x9e\x8d\xb6\x69\x13\x0d\x9b\x5a\x23\xde\x8c\xe9\xdb\x25\x97\xdf\x00\x9b\x54\x15\xed\xc2\x9b\x01\x69\xed\xc5\xc5\xa3\x7c\x84\xf2\x12\x0b\x1a\xae\x3c\x64\x83\x08\x32\x30\x4c\x9d\x6f\x86\x8d\xd8\xbd\x5b\xac\x5d\x4c\x0a\x60\xaf\x70\xd8\x95\x98\x1f\x4a\x89\x03\x3f\xe2\xf8\xa9\xf9\xc0\x04\x2f\x51\xd6\x7b\x1c\x06\xea\x5d\x1e\x96\x0f\x2e\x4d\x83\x37\xb4\x6a\xf9\x42\x71\xee\xce\xf0\x0b\x58\x2e\xa6\x82\xaa\xd2\x6d\x42\x9e\x31\xf6\x38\x6b\x6a\xef\x86\x99\x55\x01\xf4\x3f\x3f\xa6\xdc\xbb\xaf\x1b\x5b\x19\xfa\x9a\xb6\x4c\xeb\xf5\x46\x11\xe8\x06\x11\x76\x97\xa7\x78\x22\x76\x3a\x22\x6a\x52\xf6\x98\xc9\x8a\xad\x73\x6b\x91\x8d\x43\x2d\x07\x04\xf2\x50\x7f\x61\x5c\xc2\x88\x96\xa8\xf9\x07\x22\x2b\x15\x5e\x21\xef\x50\xf7\xf6\x78\x28\xc7\xf5\x3e\x91\xc4\x18\xc1\x48\xb7\x56\xfb\xe7\xa3\x37\x1c\x3a\xe4\x4d\xc9\x05\xd5\x8e\x7b\xc9\xb4\x3d\xc4\xea\xe6\x67\x0d\x73\x7a\xdc\xab\xc1\x35\x4d\x51\x65\x1f\xf7\x0e\x79\x53\x41\x5d\xd4\xbd\x28\x2e\x6c\x1b\xa9\xbc\xc4\x91\x98\xae\x5b\x9f\xc8\x4e\x1d\xf7\x8e\x1e\x52\x9f\x8d\xda\x6e\xf2\x2d\x35\xb8\xf2\x02\x3a\x6d\xfb\xb5\x3f\x70\x15\x9b\x2a\x71\x83\x92\xf4\xca\x90\xe8\x86\x98\x5b\xdc\x97\x5d\xe5\x9f\x2d\xfd\xc5\x54\x83\xb3\x52\xf6\x0d\x25\x50\x32\x9b\x0c\xc6\x16\x03\x9f\xb8\x72\x4f\x73\xd8\x85\xe6\xf0\xe7\x15\x43\x75\xf8\xe4\x88\x08\xe9\xed\xc1\x6c\xbe\xbc\x42\xd8\x23\xa7\x31\x19\x1e\xa6\xb1\x7c\xfa\xe5\x89\x08\x99\xe4\x55\x34\x92\xd0\xad\x26\xe7\x19\x5e\x13\xf9\x85\x81\xe3\x1a\x59\x8a\x9b\x53\xe9\x74\x7e\xf6\x94\x2f\x10\x79\xa7\xe1\xeb\xf2\xa4\xfb\x16\xc0\xfc\x64\x2f\xae\x90\x3e\x75\x7f\x85\x1d\xdf\x32\x96\xa0\x74\xac\x40\xb5\xfa\x2d\x67\x40\xb7\x1a\x08\x52\x33\x15\x2e\xad\x99\x68\x07\x5e\xe9\x66\x7a\xb8\xc7\x41\xb6\xb1\xc6\xa3\x66\x0d\x6f\x5e\xec\x73\x15\x7d\xb3\xfb\xa3\x0a\x7b\xd2\x65\x0e\x08\xde\xf1\x5e\x59\xb1\x2f\x79\x60\xd5\x4b\x44\x43\x44\x90\xda\xbd\x06\xf6\xa7\xa6\x71\xe7\x1b\x35\x0e\x97\x73\xeb\xf3\x96\xe2\xae\xd4\xe1\x72\xa0\x62\x84\x56\xab\x7b\xcb\x82\x37\x6b\x71\x14\xf6\x1f\xf1\x01\xde\xbb\x88\xee\xd6\x50\xf4\xcc\xad\x6f\x22\xc7\xdd\x9b\xca\x0c\x45\xa5\x8b\xb5\x68\x32\xfa\xf4\x3e\x76\xc2\x58\xe8\xad\x30\xde\xa4\x75\x98\x87\xce\x26\x41\x9c\x04\x59\xff\xb4\xec\x89\x95\x2c\xad\x29\xe4\xdc\x57\xee\xf3\x4f\x6f\xe3\x3d\x6b\xc2\x35\x87\x1e\x66\x32\xe3\xc7\x1a\xa1\xdc\x38\x55\xdf\x33\xce\x7e\xc1\x61\x6e\xc8\xd3\x6c\x35\xe1\x51\x0a\x1f\xad\x06\x1d\xe1\xbe\xce\x61\x37\x5e\xc6\x19\x21\xbf\xf5\x7f\xf0\x9e\xc1\x02\xef\xd1\x5e\x31\xa9\xbb\x01\xa4\x85\xdd\xd2\x4c\xbf\xae\xb2\xdf\xa5\xe2\x44\x7b\x3a\x27\x80\xab\xb3\x75\x36\x81\x99\x4f\xc2\xfd\x41\xff\xec\x3d\xf4\xef\x7b\x50\x17\x68\xf7\x3a\x3c\x82\x54\x42\x30\x8b\xd1\x8f\x0a\x6c\x04\xb4\x3e\x91\xf1\x20\x54\x10\x01\xd8\x1a\x5d\xb5\xa5\xce\x86\x6c\x7f\x22\x78\x1a\x47\xee\x9a\x7f\xee\x1e\xfc\x27\x55\xd3\x8b\x82\x7b\xe5\xda\x8e\x23\x51\xdb\x8c\x93\x66\x35\x3f\xe1\x3d\x7c\x51\x5f\xe7\x74\x0f\xaf\x4d\x47\xd6\xd6\xd2\xf2\x52\xf6\x0a\xff\xa9\x07\xcd\x16\x6d\xba\x48\x0d\x49\xbf\x0a\x2c\xff\x27\x79\x98\x8a\xaa\xc2\x43\x1c\x92\xd9\x3a\x4b\x29\x27\xb7\xf8\xf5\x7b\xe4\x3f\xee\x29\x46\xa8\x14\x6a\x28\x2d\xe4\x93\x8f\x62\xd3\xdb\x0a\x51\x82\x40\xeb\xb7\x3e\xca\xbf\xc5\xf8\xbb\x90\xfc\xa9\x32\xb9\xe4\xbf\x81\x60\x74\x33\x48\xdb\x08\x7d\x0f\x04\x7a\xf6\x8a\xfd\xd3\x2b\xd9\xfb\x2b\x6c\x62\x8d\x37\xce\x8b\xa8\xe9\xfe\xc2\xa8\xc3\x76\x78\xb3\x4e\xe0\xe7\xfe\x3f\x3a\x33\x5a\x01\xa7\x22\xbd\x06\xef\x62\xde\x4a\x8b\xf2\xea\x4c\xbc\x47\xd4\x49\x9a\x71\x74\x6d\xa6\x60\x06\xa3\x3e\xc1\x32\x5a\x57\xc8\xb8\xe5\x5d\x4b\x25\x5e\x2b\x2f\x05\xd7\x02\x0e\xc6\xb5\x25\x0c\xc5\x85\xb8\x1a\xc5\x0e\x9b\x7b\x47\xd1\x3f\x64\x24\xf3\x19\x2a\x10\x40\x3f\x41\xfb\x01\xbe\x29\x5e\x0f\x1a\x76\xb8\x16\x25\x3a\xd2\x4b\x75\xf6\xee\x7d\xec\x9a\xa0\x10\xa8\xe2\xbe\x71\x9f\xff\x8a\x7d\xc5\xf0\x95\xb2\xcc\x69\xf3\x1d\x1d\x66\x63\x86\xd4\x08\x33\x7f\xd2\x7a\x5b\xc9\x27\xba\x8a\xe7\x5d\x80\x4b\x73\x8e\x62\x6a\x06\x21\xd1\xd8\x10\x77\xba\x32\x34\x75\x13\x81\x94\x27\xf7\xd8\x28\x60\xd4\xad\x7a\x43\xd6\x77\x8f\x07\x8a\x38\xe5\x78\x9f\x04\xd2\xda\xc6\xf9\x4d\x9e\x34\x53\x4c\x98\xcb\x02\x79\x27\x92\x6a\x29\x65\x27\xf1\xcc\xf8\x84\xe2\x92\x00\x31\x4f\x5f\x03\xc1\x00\x2e\x6b\x17\x4d\x08\xa1\x23\x3f\xb5\x9a\x3d\xec\xbf\xa5\x9f\xf6\x3d\xd1\x09\x32\x8f\x7b\x9b\x3c\x41\x6e\x69\x1c\x47\x9c\x6d\x88\x53\xcf\xab\x84\xbc\xb3\x9c\x33\x55\x33\xe9\xab\xa1\x51\x28\x29\x74\x78\x03\xc6\x03\x9e\xcd\x98\x24\xae\xc6\x5b\xa4\x1a\x38\x84\x7b\x30\x18\xe6\x6a\x9b\x89\xec\x79\xc9\xb3\xf6\x50\x1d\xea\xf0\xe4\x3c\x1a\x2e\x68\xcd\x54\x4b\xac\x27\xe0\xb2\xcd\x80\xaa\xcb\x34\x12\xe8\x8c\x77\x1a\x8f\x93\x0a\x63\xcd\x48\x87\xad\xe6\xd4\xdb\x03\x2b\xaa\x84\x5b\x92\x04\xce\x9f\x3a\x0a\x49\xe6\xe3\x8e\x7f\xbf\x33\xa3\x6d\x3e\xc0\xbb\x0d\xbf\xe8\x96\x69\x13\x9e\x59\xc1\x20\x0b\x7c\x9c\xc8\x3b\xb5\xfc\x34\xf7\xce\x42\x88\x78\xc2\x5b\x2d\x4c\x0d\x30\x31\xcb\x0a\x62\xc2\xf2\xe1\xdd\xc2\x6e\x1e\x7e\x5a\x8e\x10\x43\xcb\xbd\x50\xb0\x17\x8e\xb1\xb1\x2c\x4c\xdd\x7f\xab\xf8\xef\xae\x10\xaf\x7e\x7e\xc0\xd7\xbd\x59\x74\x69\x84\x7d\x4b\x5a\x82\x1b\x58\x81\x85\xe4\xc9\xb4\xf2\x7b\xf9\xa8\xea\x1d\x3b\x76\x14\xfa\x9c\xbb\xa0\x45\x67\x4d\xea\x73\x6a\xfd\x20\x6a\x08\x09\x8a\x7c\xd9\x01\x1f\x37\xdd\xd5\x94\x3d\x4b\x95\x71\x21\x07\x1a\x80\xeb\x1a\xa0\xf5\xf3\x46\x23\x4e\x9a\x06\xcd\xb4\xf2\x41\x59\xb1\xf5\x49\xdc\x5b\xc7\x1c\x91\x95\xc5\x05\x68\xa6\x86\x1a\xd5\xae\x65\xda\xc0\xe6\x16\x6a\xf5\xc2\x56\x10\x6a\xbc\x1b\xf5\x86\xee\xf8\xca\xe2\x82\x35\x13\xc7\xd9\x4d\x23\x08\xbb\x86\xcf\xc4\xea\xe9\x15\xf6\xa5\x0a\xfb\x6e\xe3\xe5\x64\x8d\x37\x72\x06\xb4\x38\x14\xcb\xa2\xe5\xfe\x66\xc5\xbf\x9d\xfe\xad\x92\xf1\x6d\x82\x33\x90\x23\x60\x2a\xd0\x7a\x71\x12\x87\xc2\xa0\x4d\xbe\xe8\xe8\xe0\xd7\x11\x61\xb2\x9f\x71\xd8\x92\x11\x25\x3b\xe7\xdf\x78\x99\x31\xb2\xe6\x2e\x3a\x41\x06\x94\xe3\x7e\x6d\x47\xb1\xa9\x85\x32\x20\x34\xf5\xb8\x5f\xdb\x51\x68\xaa\x59\xc6\x9f\x4d\xb2\x6a\x79\xb4\x13\x7a\x28\x67\xb2\x8c\x37\xda\x52\x12\x10\x7e\xc4\x3b\x27\xfd\xd9\xf2\x9f\x06\x35\x24\xee\x15\xdf\xd4\x51\x04\x72\xe8\xe1\x29\x35\x47\x0d\xf6\xef\x4f\xb0\x2f\x3a\xec\x0a\xfc\x6d\x3e\x49\xe2\xc4\xfd\x94\xc3\x0e\x6e\x27\x3c\x0b\xeb\x82\x6f\xfc\xb7\x00\x24\x0a\x28\x79\x00\xcd\xe2\x89\x08\x78\x3b\xc0\x12\x49\x06\x67\xac\x23\xc7\xa5\x82\xc5\xcf\xa3\xbe\xa5\x06\xc0\x89\x03\x3b\xdc\xce\xfe\x22\x97\x2b\xc1\x41\xa9\x4d\x51\x52\xa4\xb2\xfc\x29\xec\x83\x1a\x75\x3b\xa9\xb3\x77\x39\x4c\x0f\x82\xfb\x56\xc7\x7f\x8d\xb3\x60\x71\x98\x13\x37\x9e\x94\x7e\x3d\x20\xe1\x40\xd0\x16\xf5\xc9\x23\xd5\x4e\x8b\x48\xe9\x37\x2b\xcc\xe5\x7a\x3a\xcf\x28\x3b\xd6\x83\x15\xff\xa5\x95\x73\xdd\x38\x32\x9a\x4a\xb5\x54\x8d\x73\x08\xf0\x74\xf5\x05\x5e\x83\x3a\x98\xbb\x16\x83\x5f\xf2\xeb\x7d\xb1\xa9\x44\x31\x48\x8a\x00\x80\xb6\x34\xe5\x45\x37\xf6\xd2\xde\x5a\x2a\x57\x57\x94\x79\x77\xf3\x20\x3b\x19\x27\x33\xf4\x6d\xe2\x9d\x01\x96\x29\x80\x37\x7a\xa4\x86\xed\x81\x42\x3c\x89\xb1\xcc\x61\x89\xcb\xdb\xcf\xc3\xbd\xc4\xb1\x8e\x87\x74\x89\x97\x14\x39\x62\x89\x7f\xf6\x6a\xf6\x7f\x96\x76\x85\x68\xdb\x55\xc8\xa3\xfb\xee\xab\xfd\x5f\x1d\xcf\x63\xa6\x1b\xbc\x9b\x81\xe7\x6f\x90\x3f\x90\x7b\x3a\x55\x5d\x85\x55\x78\x0b\xb2\xdb\x2d\xde\x10\xde\xd4\xec\xca\xc2\xb4\xda\x39\x4d\x2c\x0c\x61\xb7\xf3\x03\x93\xb4\xe5\xba\x37\xbb\xb2\x40\xef\x48\x85\x07\x1c\x02\x0a\x39\x1f\x41\x4a\x31\xb4\xac\x10\xc9\x4d\x0e\xc7\xb0\x5f\xf7\x16\x08\x64\x38\x53\x54\xfb\xca\x95\x40\x55\xd4\xb0\xf4\x9a\x42\xb0\x4a\x00\x30\xbf\xc1\x13\x93\x47\xe7\x6e\xa9\xf4\xea\x36\xe2\xa6\x30\x9a\x06\x64\x0c\x16\x0b\x1a\xb6\x2d\xc5\xb7\xec\xa6\x0d\xd8\xdc\xb1\x8c\xba\x89\xe3\x40\xab\x98\x26\xd2\x38\xe1\x7b\xa9\x48\x2d\xb3\x7c\x16\xcb\xb7\x44\xd2\x09\x22\xa1\xb1\x41\xe9\x73\x33\xe9\x02\x4a\x0f\x45\xb6\xcd\x12\x10\x38\x22\x9f\x57\x39\xe6\x29\xd9\x9f\x69\x43\xc7\x11\x52\x98\xd5\x07\xe3\xe8\x09\xe7\x31\xaa\xe9\x84\x82\x66\xfd\xa2\x03\x06\x5f\x9b\x48\xe7\xc0\x9e\x61\x6e\x2f\x42\xe4\x9b\xe7\xe7\xf9\xc9\x71\xc3\xcf\xf3\xe2\xf1\xdd\xfa\x79\x7e\x77\x6c\x18\xed\x75\x3d\xff\x17\xaa\x81\x96\x12\x61\xda\x0c\x0c\xa1\x42\x0e\x1d\x73\x6a\x28\x6b\xe7\x16\x29\x70\x80\xcf\xc4\x74\x07\x41\x31\xc5\xb3\x59\x96\x77\x4a\x64\x4b\x00\x7d\x29\xeb\x9e\x9a\x46\x48\x34\x0d\xc7\xaf\xc4\xcf\xaa\x16\x45\x58\x92\x3a\xbf\x6f\x38\xea\x35\xda\x3c\xe1\x8d\x0c\xee\x43\x09\xb8\x6f\xab\xde\x9a\x58\x0f\x88\x30\x32\x6a\xca\x95\x10\x28\xfe\x60\x1e\x61\x02\x7a\xd4\xeb\x88\x24\x68\xe4\x5f\x7b\x53\xcf\xe0\xb5\xfb\x0e\xd5\x6e\x9e\xa9\x3d\xfd\x99\xd3\xf8\x72\x93\x83\xf9\x6a\xaa\x36\x5d\xf5\x9a\x71\x96\x7a\x53\xf5\x69\xdc\x6c\x66\x19\xa9\x02\x60\x7b\xe4\x3c\x54\x6b\xe4\xa0\x7a\xfa\x88\x1b\xd2\xd0\xb3\x12\x7c\x53\xde\x4a\x99\x27\x4a\x4e\x08\xbe\x54\xbf\xe4\xac\x6d\x6d\x54\xbf\xdd\xbd\xb5\xb6\xdd\xac\x84\x52\xc8\x87\xcf\x4c\xb2\x9b\xb6\xa6\x21\x1f\x42\x41\xfe\x86\x49\xff\x2d\xce\xbf\x33\xfa\xf1\xbf\xde\xcf\x3e\xe2\xb0\x7d\xe2\x42\x96\x70\xf7\x03\x8e\xff\x32\x67\x5e\xfe\xb3\x88\xda\xa3\x2c\xa8\xf5\x85\xa8\x15\xd7\x4f\x89\x0c\xde\x9a\x9a\x96\x5b\xb5\x1d\x37\x0d\xa0\xc8\x9c\x39\x38\x4e\xea\x9e\xb7\x12\x20\x2b\x28\x46\xdd\x07\x51\xb7\xa7\xcd\x1e\x9a\x52\x38\x41\xf2\x08\x79\x68\x72\xb9\x6f\x55\xd6\x59\x5b\x24\xa2\xfe\x80\xf3\x78\xf6\xb8\xe1\x51\xcd\xa6\x16\x7a\x86\xed\x5b\xa7\x2b\xed\x14\x92\x99\x58\x17\xda\xd4\xeb\xc7\xbd\x6b\x13\xe1\xc9\x23\x0b\xf0\x87\xe2\x64\x54\xbc\xf4\x1e\x2d\xfb\xc3\x4f\xcb\x7e\x1b\x1b\xeb\x05\x4d\xf7\x46\xff\xba\x73\x0b\x73\x25\xca\x31\xda\x1f\xe0\x5d\x32\x72\x24\x96\xf1\xee\x01\x87\x8d\xcb\x87\xee\x9b\x1d\xff\x55\xce\xb9\x54\x24\x6a\xca\xc1\xde\x5f\x32\xe1\xde\x42\x0b\x68\x2c\x94\x79\xca\x97\x1f\xf9\xc0\x4c\x24\x4f\x5d\x1f\x16\x8e\x0f\x56\x2a\x30\xd5\x06\xa4\xdd\x74\x13\x41\x80\x10\xfe\xdd\xb0\x9a\x5b\x1e\x54\xb7\x29\x08\xa6\x9b\x93\x19\x0c\x55\xaa\x3e\xad\x39\xb3\xb1\x7f\xeb\x58\x91\xde\x56\xf2\xff\x0a\xa2\xa8\xfd\xb6\xe3\x9b\x7f\x97\x1b\x82\x88\xc7\x1b\x22\xef\x6c\x17\xc6\x8b\x1c\x76\x92\x1d\xd0\x48\x80\xee\xcd\x7e\x75\x56\xfd\x41\xb8\xcf\x1a\x50\x05\x03\xf7\x82\xd4\x5b\x17\x91\xbc\xfb\x14\xfc\x30\xb7\x13\x8d\xe5\x8d\xfe\x75\x8b\x71\x53\x9d\xbb\xd1\xf6\x0b\xf8\xe2\xbe\xd2\x18\x9f\x53\x78\x93\x68\xa5\x78\xf7\xa3\x7e\xff\xea\x3e\xff\x27\x9c\x65\x33\xaa\x5d\xbf\x87\x8a\x33\x8a\x10\x79\x3b\x54\x19\x70\x2d\x01\xae\x3e\x30\xcb\x00\xde\x4b\xfe\x05\x5e\x97\xf4\x1d\x88\x4c\x7a\x5e\xbc\x19\x89\x24\x6d\x07\x5d\x0a\xef\x00\x03\x4e\x9c\x78\x2b\xf3\xa7\x83\xa8\x77\xc1\x4b\x04\xd0\x15\x04\xd1\x7a\xfd\xa2\x73\x40\xe9\x6b\xe9\x45\x67\xbc\xcb\xb3\xb6\x3d\xd0\xe3\xec\x9d\x0e\xcb\xdf\x71\xdf\x00\xa2\x53\xfd\x69\x9a\xaf\xb4\xde\x87\xf4\x9f\xa0\x45\x90\xa3\x30\x6f\x71\x16\x77\xe3\x30\x5e\x2f\xf7\xf5\x2b\x1a\x11\x75\x6e\x53\xf7\x0e\xae\xab\xcf\x0f\x2e\xcf\xcf\xcc\x9d\x99\x97\x47\x32\xde\xa5\x14\x07\x71\x3e\x1b\xf7\x3b\x0c\x7a\xe1\x3e\xc7\x8f\x96\x78\xa6\xb3\xdd\x8a\x83\x06\x99\x6f\x0f\x5b\x2b\x3e\xe9\xb0\xc9\x44\xf0\xe6\xd9\x28\xec\xbb\x1f\x76\xfc\xf7\x3a\xcb\xf4\x17\x08\x7b\x82\x9e\x8d\x51\xcd\x2e\x69\x1c\xde\xa9\x14\x19\x34\xe8\x44\x89\x0a\x93\xf0\xba\xf2\x5a\x86\xbc\x32\x36\xd8\x70\x8b\x87\x43\xd4\xe6\xcb\xef\x93\x61\x42\x7a\x47\xc5\x4a\xb3\x56\x0b\x5d\xee\x1a\x74\xa8\xd3\x1a\xff\x91\x8a\xdf\x2b\x3e\x34\x71\xaf\x09\x8a\x4f\xae\xe8\x48\xee\xb8\x82\xb1\xde\xd4\x0b\xd2\xde\x1a\xda\x3a\xa6\xc4\x85\x46\xd8\x03\x75\x52\x29\x63\xd3\xfa\xac\x87\x4b\x65\x50\xf0\xcb\x3f\x87\xf5\xa5\x84\x90\x45\x9f\xe1\x5d\x37\xdc\x46\x44\xd1\xac\x7a\xbb\xd8\x7a\x3f\xff\x09\xf5\x1b\x0b\x35\x88\x7b\xf2\xfd\x6b\x53\x4f\xbf\xc4\x5e\x3c\x3e\x74\xa8\x14\x0f\xc8\xaa\x48\x3a\xee\xe7\xc7\xfc\xb7\x39\x8a\x7f\x25\x21\xba\x10\x18\x14\x4d\x5c\x22\x2f\xe1\x1a\x73\x3f\x8a\x75\x56\x3a\xa8\xe6\x74\x93\x47\xe2\x34\x54\xb4\x3a\x70\xcf\x9e\x59\x9c\x13\xe4\x4d\x5b\xa5\x2d\x67\x56\x4c\x18\x83\xca\x2d\x06\x93\xd2\x5b\xa3\x04\x1b\xb9\x22\x8b\x2d\xb5\xc7\xf6\xe7\x2b\xec\xb5\x0e\xbb\xa6\xc8\x72\xe4\x3e\xcf\xf1\x6f\xc8\xfd\x4a\x76\x3f\xac\xa6\xae\x61\x2f\xc1\x9f\x0f\xc4\x29\xa6\x7a\xf2\x24\x76\x9c\xdd\xb4\xe5\x64\x99\x2d\x5c\xce\xcb\x66\xaf\x70\x88\xae\x10\x50\x06\x2e\xab\x4d\xb0\xe4\x1e\xca\x36\xbd\xf2\x7a\x76\x6d\xc9\xeb\x4b\x84\x15\x1f\x65\x74\x46\x48\xe5\xfa\x8b\xc7\xfc\x99\xb2\x1f\xb4\x95\x7e\x10\x6e\xad\xab\x5f\x27\xf9\x61\xcf\xd6\xab\x8f\xb1\xb7\x3b\x6c\x02\x82\x5b\x17\xce\xba\x17\x1d\x76\xdb\x96\x3d\x59\xc1\x97\x07\x1a\x82\xdb\xe1\xa9\xf4\xb3\x9d\x9f\xa5\x1e\x0e\x34\x47\x5b\xbe\xd1\x5e\x42\x52\x2d\x8e\x4c\xe3\x96\x1c\xfa\xb4\xce\x9e\x57\x61\x07\xe8\x42\x13\xa7\xee\x3f\x3b\xc3\x89\x9e\xf2\xa6\xe2\xeb\x67\x57\x86\x34\xf6\x17\x1c\xfd\x46\xa1\xbd\xfa\xb1\x92\xba\xa4\xbd\xeb\xe6\x92\x02\x4f\x94\xfa\xd7\xa6\xe8\x2a\xa5\x7b\xbf\xd5\x19\x30\x64\x67\xc4\x6b\xbd\x03\x01\xac\xfb\x9a\x0b\x60\xf6\x01\x87\x5d\x81\xea\xe3\x19\x39\x28\xee\x83\xf2\xc8\x9d\xc9\x1f\xe4\xca\x12\xa0\x98\xf3\xbe\xe5\x75\xd0\xf4\x89\xd0\xb0\xd2\xd3\x60\x14\xf3\x17\x35\xe8\x60\x3e\x89\x35\x6a\xeb\x13\xb1\x4d\xb5\x8e\x6c\xc3\x88\xeb\xc4\xcf\x39\xec\x00\xbf\xaf\x97\x88\x93\x41\x28\xdc\x9f\xda\xce\x14\xce\xa8\xd7\x87\x4c\xe1\xdd\xfa\x05\x6b\x06\x23\x0f\x9e\x7b\xf0\x03\xf1\xbc\x93\x16\x45\x16\x63\x98\x2f\x60\x3f\x08\xd4\x64\x19\x04\xe4\x75\x29\xc5\xf6\x37\x44\xb7\xdd\x4a\xdd\x9f\x18\xc1\xc7\x9e\x9f\x10\xa2\xdb\x3e\x39\x6c\xa1\x3d\x05\x7f\xb5\x17\x99\x7c\xe6\x9d\x5c\x29\x69\x16\xb2\xff\xb4\x91\x6c\x45\x36\x47\x4a\x43\xd2\xf6\xd8\x7f\x61\xfb\xc2\xb8\xc1\x43\x37\x1d\x91\xd2\xa7\x5a\x75\x5a\xbe\x69\x35\xe5\x56\x78\x64\xb6\x44\xd9\xbe\x6b\x7a\x71\xab\xcc\x42\xd0\x2e\x40\x2e\xf2\x16\x54\xd3\x67\x7f\xe4\xb0\xb1\x64\xad\xe9\xfe\x9e\xb3\x9d\x38\xdc\x13\x73\x43\x06\xe4\xc5\xce\xf2\x89\x39\x7b\x38\x96\x79\x33\x4e\xbd\x13\x61\xdc\x38\xef\xcd\x89\x21\x13\x36\x6a\x64\x76\xa4\xde\x24\x6b\x4d\x63\x5f\x7d\x46\x4e\x76\x10\x35\x45\xe2\x7e\x62\x5b\x93\x0d\xef\x0e\xe9\xdb\x4b\x1c\xfc\xd9\xee\x1e\x16\xbf\x95\xd8\x23\x79\x62\x8b\x93\x6d\x75\xac\xd3\x4f\xef\x0d\x6b\x58\x4b\xad\x6b\x76\xee\x6b\x15\x36\x29\x8b\x93\x3a\xaf\xfb\xe5\x0a\xbb\x7e\xcb\xee\x3d\x99\xde\xb6\x85\x65\x45\x3d\xb6\x3b\x86\xcb\x27\x4e\xfa\xe6\x44\xd5\xbd\xa5\x24\xde\x08\xc8\xfe\x0e\x14\xc8\x60\x53\x8b\xbb\x18\x56\x25\xaf\xa3\x3a\x5e\x18\xb3\x5d\x5a\x3d\x34\x32\x62\xac\x45\x0d\x96\x1d\x7d\x83\x04\xcc\x51\x53\xdf\x62\xa5\xb2\xfb\x3d\xde\xd9\xa8\x06\xc3\xa4\x13\x61\x53\xf3\xae\x83\x50\xd8\xf2\x1a\xba\xc9\xfb\xf0\xf5\xdd\x0b\xa7\x4f\x7b\x8b\x67\x57\xbd\xbb\xcf\x2e\xdf\x81\x44\x41\x10\x7b\x81\x75\x69\x17\xd2\x65\x8a\x45\x25\x0b\x65\x93\xe4\x0d\x82\xbd\xcd\x61\x57\xc9\x86\x6c\xc6\xc9\x05\x1c\x49\xf7\x95\xce\x36\x46\x7f\xc9\xfa\x86\x46\xff\x69\xf6\x53\x7b\x0a\x54\x2d\x97\xb5\xba\xd8\x57\x1c\x36\xd9\xe0\x5d\xde\x08\xb2\xbe\xfb\x57\x8e\xff\x2a\x08\xdb\x1b\xb0\xd2\x0d\x9c\xdd\xd7\xa6\x05\xb6\x71\x55\xc8\x43\x79\xb2\xa8\x32\x1f\x18\x0d\x9b\x3b\x60\x70\xaf\xeb\x28\xb7\x3b\x7b\x1c\xbc\x9e\xa6\x99\xec\x47\x2b\xec\x80\xbe\xe0\xb8\x5f\xdf\xce\x41\xa4\x2f\x62\x43\x76\xfd\x2f\x39\xf9\x55\x2d\x29\xbf\xc5\x8f\xd0\x25\x78\x7e\x1e\x89\x0b\xdd\x38\xcd\x35\x0c\xb8\xd7\x17\x37\x13\xb2\x48\x95\x5b\xb7\x77\x70\xa1\x63\x1f\x77\xd8\x63\xba\xed\x38\x03\xa0\x50\xea\xd5\x5c\x90\x9e\x77\xdf\x33\xca\x5e\xa6\x17\x6a\xc9\x97\xd6\x98\x74\xcb\xde\xb0\x47\x07\xdf\x30\x32\x5a\x8c\x65\xd6\x94\x6f\xef\x6c\x2d\xbf\xd6\x61\x63\x8d\x34\x70\x5f\xba\x9d\xe3\x69\x76\x65\x61\xc8\x64\x2e\xcd\xae\x2c\x98\xed\x54\x12\x46\x4d\x9d\x22\xfd\xc1\xb9\xd0\x1c\x18\x86\x33\x66\xea\x84\xc8\x34\xa4\xee\x74\x9d\xfd\x5f\x0e\xdb\x17\xa4\xb2\x69\xbf\xe7\x6c\x23\x61\x63\x61\x65\x78\xe3\xfe\xab\x03\xbf\x16\xf4\x1d\x7c\x46\x03\xac\xfd\x74\x65\x2b\x6d\x84\xce\x0a\x16\xbf\x9d\x2c\x40\xf6\x0e\x87\x5d\x43\xa3\x93\x47\xbc\xbe\xca\xf1\x7f\x14\xd0\xf0\xa4\xec\x58\x31\x7e\x35\xf1\x8c\x91\x87\xb9\x70\x1d\x58\x13\x61\x1c\xad\xa7\x75\x6f\x1e\xee\xbb\x44\xaa\x23\x78\x94\x1a\xee\x2d\x15\x0a\xa0\x08\xb1\xf0\x23\x0c\x63\xef\x5b\xf5\x59\xd6\xb8\x77\x55\xd8\x63\xf8\x66\x3a\x1f\xf2\x34\x0b\x1a\xa0\x68\xc8\x77\x85\xfb\xfa\x51\x5c\x28\x5a\x11\xbd\x7b\x65\xe0\x4b\x6b\x5a\x3e\xe9\x94\xbd\x52\xd4\x4a\xef\x5e\x79\xd8\xe6\x68\x97\xe7\x16\xdf\x4c\x05\xb6\x7e\x4d\xb6\x5e\xfe\x2c\xd8\xa7\x2a\x6c\xb2\x11\xf2\xa0\xb3\x2c\x5a\xee\xef\x54\x46\x24\xb3\xab\x71\x2a\xe2\x28\xbf\xba\x32\x4b\xdf\x43\xa4\x90\x66\x15\x5d\x0b\x6a\x3a\xcb\x9b\x87\xa0\x85\x63\x2a\x38\x12\x27\x15\xd7\x3e\x74\xbd\xf8\x10\x4a\xae\x7b\xf3\x40\x0e\xa7\xf3\xdd\xc8\xe6\x83\x91\xc7\x6b\x71\x2f\x6a\xd6\x3d\xe8\x03\xc5\xda\x98\x96\x49\x32\xfd\x67\x1c\xd0\xa8\xe1\x2a\xa0\x5b\x70\x17\xd6\x79\xd7\xec\x43\x79\x9e\x51\x3f\xd9\xe7\x1d\x76\xa5\x54\x3b\x66\x48\xb1\x76\xff\xc8\x61\x47\xb7\x1c\x5c\xea\x80\xf1\x9d\xff\x53\x8e\xf9\xa7\x0e\x80\x68\xc4\x51\x9a\x25\x1c\x23\x25\xc1\x6e\x0c\xa9\xee\xc0\x00\x09\x17\x6a\x6b\x2b\xd1\xcd\x10\xaf\x71\x94\x21\x62\x87\xce\x47\xad\xb0\x27\xe7\x53\x25\xee\xb7\x45\xb3\x87\xd4\x1d\x26\xa4\xba\xe6\xdd\x24\x63\x03\xfb\xc0\x18\xfb\xae\x6e\x61\xd6\x96\x05\xcc\x86\xa2\x64\x1b\xf3\x5f\x3c\x06\xee\x84\x36\xef\x76\x45\x44\x28\x93\x83\xa2\x01\xa6\x33\x11\xa1\xe0\xaa\x85\x00\x86\x8d\x33\x8b\xf4\x89\x9a\x47\x84\x27\xc2\x5b\x16\xf2\x26\xac\xb1\x26\x91\x92\x9a\x47\x3d\x23\xde\x66\x70\x45\xa5\xd3\x55\x6f\x0e\x88\x8a\xed\x0f\x9b\xfd\x88\x77\x28\x56\xa7\x6b\xc8\xc2\xb2\xef\xe5\xaa\x59\x16\x8d\x7e\x23\x84\x42\x54\x40\xfd\x74\x5d\x3f\xd5\x9e\x48\x93\x55\xd0\xb8\xa8\x23\xf1\xa4\xd7\x93\xba\x7c\xd8\xd7\x18\x5d\xc5\xca\x1e\xca\x75\x99\xe0\xa4\x28\x77\x22\x89\xcc\xdf\x71\x18\xc3\x37\xce\xc4\x4d\xe1\xbe\xdf\xf1\x1f\x74\xf2\xbf\xf5\x62\x0b\xe4\x86\xce\x43\x1b\x83\x28\x13\x51\x53\xef\x47\x88\x1c\xa7\x18\x28\xf4\xe0\xc8\x1e\xb7\x82\x50\x60\x9e\x1f\xdc\x07\x62\x2f\x11\x1d\x39\x5f\x41\xe4\x25\x7c\xd3\x03\x19\x84\x84\x65\x30\xb9\x3d\x38\x4b\x4e\xe6\x1f\xc9\x7a\x3a\x08\x05\x01\x2b\xc3\x60\x46\x04\xc5\x3f\xed\x8a\x46\xd1\x4f\x56\x69\x35\xdc\xff\x36\x2a\x2e\x4f\xed\xb3\x93\xb3\xb6\x1e\x73\x72\xd6\xd6\x5a\x4e\x06\x6b\x89\xf0\x66\xdb\x3c\x8a\x44\xf8\x30\xc8\x72\xf6\xf7\x0e\x9b\x68\xc9\x21\x10\x89\xfb\x97\x0e\x3b\xb6\x75\x8b\xf1\x65\xab\xd9\xef\x70\xe8\x69\xa1\xf1\xf4\xb0\x78\x59\xd8\xaa\xb1\x83\xed\x84\xad\x21\x37\x61\x2f\xe5\xeb\x2a\x9f\xa4\x29\xba\x22\x6a\xa6\xea\x36\xa8\x6a\xa3\xa0\x34\xc5\xa6\xa5\x22\x98\x91\x39\x8b\xfd\x4f\x87\x8d\x45\xad\xd4\xfd\x55\x67\x1b\x47\xcc\xe2\xc9\x15\xab\x9f\x2f\x70\x16\x0b\xb6\x95\xc8\x5b\x2c\x33\xac\xec\x4c\x91\xde\xc9\xe1\x19\xb5\x52\xf6\x32\x87\x4d\xdc\xdb\x8b\xd7\xfa\x99\x70\xef\xdf\xce\x9c\xdd\x89\x2f\x5b\x7d\xb9\x83\x1e\xda\x53\xa6\x1e\xee\xd0\x54\xf4\xa0\xc3\x1e\xb5\x91\x76\xdb\x42\xa9\x2a\xee\xc5\x51\x0c\x64\xfa\x94\xa1\x4f\x82\x24\xeb\xf1\x70\x40\xab\xbf\xfb\x2e\xb3\x48\xbb\xa1\x1b\x2b\xf0\xd3\xe5\x5d\x44\x7f\x5a\x99\x07\xe1\x0a\xf2\x52\x67\x44\x24\x8d\x65\x1e\x1c\x68\xe1\xaa\x7e\x5c\x6a\x14\x9c\xe3\x19\x47\x2d\x6c\x47\x16\xc1\xf7\x3a\x8c\xb5\x42\xa1\xee\xf2\x3f\x3b\x02\x9a\xd6\xd8\x96\xe2\xc2\x10\x35\xbe\x7d\x52\x97\x65\x8f\x21\xf8\x92\x83\x86\x1a\xc3\x01\xe9\x62\x9c\x3f\x07\xf5\x00\x6b\x78\x40\x71\x41\x34\x88\x29\x0f\x8f\x91\x3a\xfb\x07\x87\x5d\x09\xdd\x21\xb6\x7c\xf7\xb3\x8e\xff\x1b\x46\x96\x0f\x8d\x02\xfe\x88\xb4\xa1\xde\x33\xfc\x24\xf6\xab\x9e\x9f\xc6\xad\xcc\x7f\x26\x70\x52\x69\xf6\xeb\xa6\x57\xa3\x6f\x90\xba\x42\xca\xe1\x3e\xd2\x65\x06\x2d\x70\xce\x05\x1a\x98\xe5\x21\x3c\xa3\x0e\x3e\x11\x2a\xad\x51\x43\x47\xd8\x99\x3f\x58\x61\x8f\x5e\x6f\x88\xc2\x9d\xf6\x67\x2b\xdb\xf0\x6e\x9c\x9a\x9d\x1f\x71\xa1\xfd\xbc\x33\xf0\x7b\xe1\xb2\x3f\x3b\xff\x88\xdf\xc1\x76\x2b\xbb\xd6\x1b\x22\x1f\x6c\x79\xe7\x96\x97\xba\xef\x2d\x35\x4c\x35\xe7\x16\x57\xd0\x95\x89\x4b\xc9\xfd\x61\xff\xe6\xc1\xa7\x5a\x2f\x98\x5b\x5c\x81\x81\x08\x21\xd6\xb7\xab\x33\xec\x30\x5a\xc1\xf2\x44\xdd\xc0\x9e\x40\xa9\x2f\x8f\xf5\x0f\x94\x66\xc5\x3e\x86\xed\x83\xeb\xa0\x6b\x4d\xf5\x3b\x0f\xb0\xef\x2a\xf3\xfc\x88\x46\x42\x44\x65\x2f\x3a\xe0\x7f\x77\xfe\xa7\x8d\x69\x84\xcf\xcb\x61\xbc\xff\x65\x8f\x20\x64\xd7\xc1\xc6\x0f\x38\x0a\xbe\xf9\x4d\x8e\xdf\x5a\x50\xf8\xcd\xf9\x04\xa4\x30\x01\xb9\xe7\x7a\x87\x4b\xd9\x8a\x0f\x38\x88\x85\x99\x72\xe1\xfb\xd9\x34\xbb\x76\x6b\x37\x21\x7c\xb7\x17\x19\xbd\xc7\x3d\x62\x72\x8f\x2c\x6f\x1d\x6b\x7b\xd0\xad\x95\x01\x58\xe4\xd2\xa6\x88\x5a\xf1\xc7\xe3\x96\xb7\xbf\x04\x00\x30\x87\xa6\x78\xc7\xb8\x7f\xa6\xec\x87\x42\x82\xbf\xc9\xe9\x4c\x6f\x43\xf2\x30\xa4\x27\x8b\x04\xee\xc0\xb0\xb4\x14\xc9\xf3\x45\x87\xa0\x10\x2c\x69\xf7\xfe\x31\x76\x52\x23\x32\x3c\xc9\x3f\x88\x21\x24\x97\x85\xb5\x70\x5a\x63\x2d\x9c\xf0\xaf\x5f\x29\xc7\x56\xa8\xaa\xd8\xde\x52\x6c\x05\xb3\xb4\xa3\x84\xa8\xf0\xfd\xfe\x13\x56\x29\x93\xd2\xec\x67\x8e\xa5\x60\x7e\xf4\x8a\x72\xd8\x8d\xe7\x5c\x3e\xea\xc6\x0f\x9c\xde\x2d\xb2\xc6\xd3\x72\x64\x8d\x45\x7f\x66\x06\xc1\x34\xbc\xed\x83\x69\x40\x2e\x69\xf9\x88\xff\xda\xb8\xc5\xe0\x69\x04\x20\x9b\x01\xad\xab\xf1\x79\x11\x19\xb1\xda\x3f\x3a\xee\xcf\x17\x9e\x0d\x0d\xd1\xce\xe4\x7b\x9e\x5d\xb0\x0e\xd8\xb6\x16\xd2\x2f\x8e\xb1\x7f\xae\xb0\x03\xbc\xd7\x0c\xc0\x5e\xe4\x7e\xa1\xe2\xff\x76\x65\x46\xfd\x69\x8b\x7f\xc8\x3a\x56\x54\x39\x49\x6a\x62\x52\x91\x40\x25\xf4\xeb\x9c\x8e\x4c\x93\xfd\x63\x93\x4c\xa2\x9d\xb4\xee\xa9\x7a\x6a\x7c\x13\xe0\xa2\x8b\xcd\x8e\x13\xc2\x11\xdc\x10\x49\xd0\xea\xe7\x15\xe2\x9b\x9b\xdc\xb0\x5e\x40\x8c\x69\x01\x75\x02\x6d\x86\xba\x2f\x51\x9e\xec\x4d\xf8\x73\xc6\xaf\xb2\x01\xa0\xab\x37\x15\x5b\x8e\xfa\x0d\x9b\xd0\xcc\x99\xe0\xad\x1f\xa9\x1a\x33\xbd\xab\x1b\xe0\x38\x8c\x8a\xd5\xbe\x91\xed\x83\x4e\xb8\x75\xff\x7b\x56\x71\x6c\x50\x42\xc4\x5d\x7e\x6f\x4f\x5e\xb8\x79\x02\xc8\x00\xe7\x85\xbd\x7a\xde\xcf\xd8\x93\x46\x02\x24\x0e\x27\x7a\xc4\x6d\xfe\x8d\x03\xfe\xd3\x47\xbe\x51\x10\x59\x83\x30\xc2\xa3\x79\x21\xaf\xa6\x0f\x14\xb2\xe7\x45\xe7\xea\xa6\x8d\xf5\x79\xd1\x61\x7a\x47\xda\x72\xed\xb3\x93\xec\x5f\x1c\x66\xfc\xea\xfe\xbd\xe3\xbf\xd8\xc9\xff\xd6\xc1\x53\x18\xe1\x66\xfc\xa0\x39\x78\x34\xf0\x82\x41\xf2\xa7\x28\x01\xc1\xfc\x81\x74\x7a\xa8\x86\xe5\xa9\x3a\x2a\x37\x0e\xa0\x10\x32\x42\x95\x37\xca\x97\xeb\xa3\x23\x32\x6b\x4e\xef\x64\x67\xd9\x99\x1d\x60\x57\x0e\x9d\x1a\x7d\x5c\xb0\x07\x1d\x76\x15\x0d\x20\xf1\x00\xbb\x3f\xe9\xf8\x67\xec\x47\x16\x7e\x90\x94\x49\xf9\xb9\x02\x98\x60\x9a\x6b\x59\xd9\x2a\xad\xd1\xb0\xba\x70\x2b\xbb\x85\xdd\xbc\xa3\x2e\x98\xf4\xd7\xec\xa2\xc3\x8a\xf3\xed\xbe\x50\x53\x17\xde\x5b\xf8\x09\x70\x4d\x69\x39\x95\x60\xa8\x2a\xe3\x34\x06\x3b\x97\x35\xbd\x0a\x14\x94\xb2\xd7\xa9\x10\xd1\x20\x11\xa8\x0d\xda\xf8\x56\x87\x15\x97\x9e\xfb\x72\xdd\xb8\xe7\x14\x7e\x52\x83\x3a\xc8\xe9\x78\x59\x0d\x54\x58\xb7\x46\x22\xf5\x90\x66\xfe\x83\xc3\x1e\x25\x3f\x41\x06\x4f\x79\xf2\xfd\xd9\x2e\x00\xa7\xde\xe4\x58\x65\x95\x43\x4d\x0d\x59\x86\x8a\x37\x33\x1b\xe0\x60\xad\x1a\x8b\x49\x14\x76\x96\xb2\x15\xb6\x01\x25\x2e\x23\x7a\xeb\x02\x25\x26\x20\xda\xf2\x68\x5d\x34\xeb\xec\x02\x73\x15\x50\xdd\x29\x8c\xc4\x97\x1a\xd4\x1a\x4c\xcc\x0d\xc7\xfc\x33\x83\x3f\x96\xa0\xdd\xa9\x20\x7e\x38\xf6\x14\xec\x5d\xe9\x62\x37\x86\xfa\x6d\x07\x4c\x4a\x17\x1b\x03\xa3\x4e\x80\xf2\xe8\xe9\x98\xd7\xa0\x24\xee\x57\x26\xfd\x3f\x70\x86\xfc\x68\xc8\x4a\x0e\xee\xb2\x40\x4e\x7a\xa2\xe4\x93\xc2\x4f\xd1\x66\x05\x62\x74\x25\x9c\x3d\x5a\x4c\x19\x58\x1a\x20\xec\xc6\xaa\x46\x9e\xf1\xd7\xa6\xf2\x2d\x15\x8b\x8a\x81\xc0\xaa\x54\x70\x4c\x20\xf2\xd2\x5a\x9c\xb5\x3d\xc2\x3f\x89\x9a\x70\xf7\x00\xf3\xae\xa2\x20\x97\x5b\xb7\x06\xa0\x67\xf2\x18\x3c\x5c\xbf\xc9\x12\xbb\x2f\x9d\x60\xbf\x38\xc6\xf6\xc1\xf7\xee\xdb\xc6\xfc\x4f\x57\x14\x3f\x64\x13\x42\x78\x70\x94\xb1\x78\x00\x7d\xee\x65\xeb\x31\x5c\xa9\xb0\x25\x75\x6f\x1e\x32\x92\xc1\xd2\x6f\x9c\xb2\x48\x29\xdb\x59\x03\xd8\x7e\xb2\x79\x21\x30\x13\x0f\xbd\xb3\xcb\x70\x08\xdb\xd8\x04\x18\x38\x1d\x27\x1e\x04\xc6\x47\xeb\x84\x5d\x60\x22\xd3\x40\xb0\x24\xb6\x65\x4a\x8d\x84\x94\xd8\x89\x90\x27\x64\x83\x36\x9c\x7c\x61\xba\xa4\x02\x52\x4d\x30\x02\x46\x87\x5f\x9a\x7a\x83\xec\x05\x65\xd3\xe4\x75\xc3\xc4\xa5\x7a\xe4\x21\x7c\x9f\x90\x5a\xf4\x6c\xa8\xe6\xd9\xe8\xc9\x49\xa6\x08\x58\x42\x04\xf0\xda\x6e\x2c\xf2\x88\xc5\xb9\x14\x27\x19\xfb\xf4\x18\xab\x64\xb1\xfb\xc9\x31\xff\x85\x63\x25\xd3\x55\x3e\x53\x7a\xd5\x61\xe8\xb4\x79\x5c\x22\x70\x17\xdd\xfa\xcd\x49\x94\x27\xdf\x88\x59\xcc\x91\x08\x76\x31\x9f\x56\xc3\x47\x4c\xab\xf1\xde\xee\x66\x77\xb7\x13\x9b\xaf\x75\x48\x1d\x78\x08\xe7\x56\x88\x84\xbd\xe4\xaa\x11\x44\x78\x8b\x71\x53\x10\x68\xc3\x17\x1e\xe5\x3f\xd3\x7a\xe2\xb5\xe3\xb0\x59\x06\xda\x50\x1a\x71\x2e\x3b\x62\xe2\x1c\x44\x69\xc6\xc3\x10\x8d\xfe\x1c\x1c\xcf\x04\xc7\x73\xd1\xd9\x2f\xff\x5a\x98\xb3\xc4\xc6\x97\xae\x64\xef\x73\xd8\x15\x72\x08\x1b\x1c\xb8\x37\xdc\x9f\x1d\x71\x72\x0d\xe2\x68\xc8\x76\xab\x24\xbd\xd4\x0f\x8d\x82\x8a\x48\xc9\x05\xeb\x3a\xa9\xa1\x10\x09\x38\xc8\x29\x83\x81\x89\xda\xe9\x5d\x44\x95\x93\xb2\xb0\x0e\x89\x78\x60\xc1\x84\x44\x3c\xcd\xa4\x74\x39\x09\xdb\x54\xc1\x43\x9c\xb2\x6d\x2a\xfd\xbf\x32\xce\x68\x02\xdc\x9f\x1f\xf7\xdf\x38\x8e\xff\x56\x8d\x84\x61\xd0\xc6\x29\x6a\x2e\x1a\xa8\xe2\x96\x27\x6f\x8b\xd6\x10\x20\x51\x63\x6a\x5e\x59\xe0\x14\x07\x73\x8c\x54\x86\xf1\xd6\xa6\xa2\xa8\xd0\x83\xab\xd1\xfe\x30\x68\xb3\xad\xe8\x7d\xb0\xa3\x71\x47\x44\x8d\x10\x59\x1d\x5b\xa0\x41\x37\x45\x6a\x91\x2c\x55\xcd\xea\xd0\xf8\x49\xc8\x67\x9a\x4d\x05\x22\x97\x53\xcf\x97\xff\x38\xec\x57\x21\xc3\x11\x6d\x26\x66\x43\xec\x8f\x8d\x26\x18\x5f\xcf\xf8\x04\xea\x61\xd4\x19\xa4\x69\x4f\x20\x4b\x64\xa7\x83\x67\x64\x59\xe9\xb2\x41\x88\xaf\xc1\x8d\x1c\x2e\xae\xf7\x0e\x54\x04\x50\x78\x39\x45\x96\x1e\x59\xf0\x8b\x1b\x2d\x8b\x74\x1e\x22\x8a\x4d\x00\x2d\x9b\xcb\xaf\xb1\x85\xba\xe1\x9e\x09\x61\x04\x69\xc6\xa3\x26\xf9\x76\xa8\x47\x8a\x4b\x40\x4e\x29\x0d\xd1\x30\x1a\x52\x9b\xf9\x74\x1f\xbb\x52\x25\xeb\xdd\x21\xfa\xa9\xfb\xae\x7d\xfe\xab\xf7\x99\x4f\x8a\x1c\x7d\xe7\xe5\xb3\x81\x60\x07\x05\x24\x00\xe3\xca\xb5\xc8\x90\xb2\x26\xc8\x02\x1e\x82\xf5\x1a\xe4\x06\x85\xe6\xc2\x20\xd1\x8d\xda\x60\xfd\x51\x35\x63\x35\x8a\xb0\x2b\xef\x75\xea\x4d\x61\xb7\x01\x99\x30\xea\xd7\x1b\x71\xe7\xe0\x7d\x71\x24\xfc\xaa\xfd\x2c\x11\xeb\x41\x1c\xf9\xd3\xdb\x69\x92\x9e\x34\xdd\x1e\xbd\x70\xec\xf6\x70\x88\x4d\x83\xf5\xaf\x2c\xf1\x0a\x5a\x05\x66\x07\x3d\x3d\xf2\xeb\x74\xe0\x53\x95\x03\x25\xab\x94\xf7\xcc\x78\x93\x16\xb5\xf2\x1f\x14\x97\x24\x84\xc4\xe9\x42\xd0\x06\xa2\xdd\x85\x70\x58\x16\x97\x19\x4c\x51\x01\xd2\x25\x68\xb4\x55\xc5\x41\xa6\x7c\x13\x89\xbc\xa4\x89\x0d\x43\x2c\x18\x2d\x81\xe3\xb1\xcb\xd3\x14\x00\xfc\x34\xa8\x1f\x4d\xf0\x02\xba\x2d\x89\x12\x0e\xe3\x69\x34\x22\x1e\x05\x23\xc5\xd0\xa4\xfc\xb1\x35\x12\x36\x6b\x32\x6a\x00\x41\x4b\x63\x01\xa9\x38\x40\x95\xe4\xaa\x33\x49\x47\x58\x4b\xde\x77\x80\x99\x07\x09\xb2\x36\xe4\xd0\xc1\x52\x41\x6e\xf4\x92\x20\xeb\xe3\x19\x0a\x9e\xa4\xfb\x0f\xf8\xb7\x96\xfe\x62\x1b\xb5\x06\x5e\x19\x4d\x17\xfb\x5b\x7b\x7e\xa6\x5d\xfb\x99\x36\x95\x9b\x29\xf2\xfd\xa0\xc4\xcb\x44\x43\x45\xd3\xb0\x7d\x6b\xc5\x56\xcb\x62\xcf\x67\xb4\x0b\x9f\xd1\xef\x99\x3e\xa3\xff\xb1\x4b\x9f\xd1\x0f\x7f\x93\x5c\x46\xaa\xa2\x4b\x4e\x6b\x6b\x7f\xd1\xac\x3b\xa3\xfd\x45\xb8\xb4\xb4\xcf\xa8\x54\xae\x94\xe2\xb3\xbc\xfe\x4a\x76\xdd\x30\x20\x4d\x62\xd8\x5e\x8e\x43\x71\x02\x03\x4d\xdd\xbf\xbe\xc2\x7f\x5d\x65\xf0\x79\x9e\xa7\x0c\xa9\x68\xf9\xef\x55\x8d\x03\x41\xd7\x1d\x2f\xc8\xea\x9e\x94\xe1\x0d\x70\x57\xa8\xec\x66\xeb\x23\x75\x6f\x21\x3c\x64\x0d\xe3\x45\xc8\x3c\xcd\x66\xea\x6d\xb6\x63\xeb\x12\xb1\x11\x70\x8f\x80\x62\xea\xde\x5c\x0e\x3b\x1c\x44\xde\xc6\xe1\xfa\xe1\x1b\xe5\x3f\x5a\x7c\x03\xa9\x17\xa0\x93\x36\x4c\x87\xca\x6e\x38\xec\x0d\x76\x0e\xab\x85\xe3\x35\x8a\x91\xfd\x33\x41\xe4\x17\xb0\xe9\x60\x15\x47\x8e\xd4\x2f\x3a\x13\x09\xc2\x8d\x5a\x12\xf9\x83\x93\x7b\x12\x79\x0f\x66\xec\x9b\x27\x18\xdb\x86\x5c\xfc\xc1\xdd\x82\x8c\x7d\xe7\x08\xfa\x16\xf6\x51\x87\xa9\x1d\xe0\xfe\x9a\x33\x82\x32\xbb\x0c\xae\xd7\x7f\x91\xa3\xc1\x7a\xe5\xca\x8e\xc2\xfe\x4e\xe5\x03\x19\x5c\x84\x67\x14\x44\x33\x42\x71\x43\xe4\x39\x9b\xc9\x81\x92\xc0\x46\x99\xf3\xe9\x02\x50\x66\x9d\xbd\xc8\x61\x93\x29\x8a\x93\xd4\x7d\xae\x7f\x2b\x89\x96\x94\xcc\x17\x86\xb4\xa3\xfd\xa4\xa0\x07\x33\x85\x21\x8c\xc4\x72\xb0\x30\xb6\x4f\x25\x6e\x8d\x0b\xd5\x79\xc9\xd9\xd8\xfa\x28\x58\x71\xef\xd4\x47\xc1\x50\xe9\x96\x23\x76\x0d\x88\xb8\xd2\xa3\xe1\x97\xf6\x6f\x1b\xf9\x17\xf1\x38\x9e\xb7\xdf\xff\xac\x53\xfe\x5b\x21\xae\xd3\x48\x5d\xcb\xa9\xc0\x72\xd8\xda\xe5\x60\xbd\x2d\x4f\x8d\x4d\x5c\x06\x3a\x44\x8e\xa2\xe2\x75\x82\x81\x0a\x82\x93\xd2\x5f\xa7\x4d\x29\x6c\xd1\x2a\xc8\xfc\x1e\xdc\xf9\x37\x11\x32\x1d\x31\xfa\x79\x98\xca\xc3\x23\x94\xd7\x13\x05\x6c\x03\x61\x0d\xcd\xd4\x06\x05\x21\xf8\x21\xaa\x2d\x15\x05\xdf\xf4\x2b\xc7\xd9\x6f\x8c\xb1\x6b\xb0\x24\x03\x58\xe1\x9d\x63\xdb\x49\xcb\x2c\x41\x5e\xf0\xff\xa9\x52\x2c\xcc\x4e\xc1\x07\x07\xb7\x85\x8b\xd9\x10\x69\xca\x93\xbe\x65\x16\x18\x4c\x76\x50\xcc\xb4\x60\xc2\xc7\xa8\x5e\xea\xbf\x19\x8f\x58\xbc\xae\xe7\x38\xbc\x30\x0b\x2a\x70\x63\x76\x65\xe1\x4c\xb0\x4e\x9e\x0d\x4a\x49\x83\x5b\x99\x6e\x2a\xc4\x15\xe0\x97\x88\x70\x81\xc2\x74\x44\xd5\x68\xc3\x28\x1b\x93\x32\xe3\x18\x39\x0a\x38\x9e\x02\xd0\xb8\x76\x1c\xc5\x09\xf6\x50\xd1\x8a\xc3\xea\x42\x5b\x52\x73\x78\xc3\xd9\x19\xf6\x98\x62\x4a\x09\x64\x9d\x5d\xef\x4f\x2d\x1a\x86\xb6\xc1\x41\xd5\x43\x6e\x99\x33\xee\xb7\x89\xd8\x4a\x30\xfa\x0c\xf3\xe8\xa7\xbe\xa9\xe6\xd1\x3f\xbd\x92\xbd\xa3\x60\x1e\x7d\xfd\xa8\x6c\xc2\x62\x5f\xca\x6c\xa4\x4f\x7d\xb8\x6c\xa4\x7b\xf6\xd0\x3d\x7b\xe8\x9e\x3d\x74\xcf\x1e\xba\x67\x0f\xfd\xf7\x60\x0f\x7d\xc3\x04\x3b\x7e\x59\xa8\xaf\x18\x02\xf6\xb9\xfd\xfe\x77\x0c\xfd\xf5\xa2\x33\x41\x61\x0a\xd6\x69\xf6\xc0\x7e\xf6\xfc\x0a\xdb\xdf\x14\x51\x20\x9a\xee\xd7\x1d\xff\xcb\xce\x1c\xfc\xdb\x24\x6a\xaa\x43\x84\xa8\x72\xa0\x72\x44\x45\xdd\x54\x0a\x28\x7e\x6b\x12\x07\x13\xd0\xdd\x42\x0b\x03\x18\x54\x78\x84\xdc\xc4\xf2\x17\x98\x98\xa6\xae\xa5\x85\x71\xa7\xe4\x9a\xb7\xf0\x58\xdb\x1c\x31\xd5\xba\x41\x04\x87\x77\xa4\xe3\xb9\xa4\xb4\x52\xef\x19\xad\xaa\x7b\xd4\x7a\x29\x3e\xe9\x12\x93\x51\xdb\x67\xf2\x66\xc8\x47\x36\xff\xc2\xe7\x2a\xec\x6a\x21\xf7\x02\x0c\x36\xb2\x07\xfc\x6e\xc5\xff\x40\x65\xde\x7e\x08\x17\xe8\x48\x07\x88\x2a\xde\x04\xa0\x54\x44\xba\x80\xb8\x01\xb1\x50\x9a\x2b\xc0\xec\x12\x71\x35\xb6\x45\xe3\xbc\x5a\x81\xf2\x26\x9a\x88\xb0\x9f\x2f\xc5\x2c\xf6\xd6\x45\xa6\xaf\x55\x98\x0d\x25\x3c\x4e\xbf\x49\x7d\x31\x88\x7a\xc2\xd8\x1d\x76\xf1\x14\xd1\x07\xe9\x97\x01\x86\x8f\x05\x19\x9e\x52\xc8\x61\xdd\x10\x55\x6f\xf9\xc4\xcc\xac\xc6\xa2\x42\x27\xbe\xc7\xe1\x0e\x86\x27\x92\x88\x80\xcd\x46\x3e\xc0\x20\xb9\x34\x93\x82\xc1\xf4\xc4\x43\x56\x37\x1e\x06\x10\xa2\x3c\x80\x94\x6a\x09\xe8\x67\xea\xc0\xe6\x15\xff\xe4\x32\x7e\x61\x2d\x31\x18\x0f\x23\x6e\xaf\x0f\x38\x7e\x50\x10\x84\x84\xea\x20\x9b\x84\xd6\x8e\x55\x7c\x83\xa9\xe5\xed\x3e\xd5\x7f\x8a\x31\xd5\x39\x98\xfe\xa8\x35\x4c\xdf\x56\x69\x81\xea\xa5\x6c\x2f\x92\xbf\x65\x56\x9c\xd1\xb6\xc0\xd8\xdc\xf7\x32\x7f\x6e\xe4\x1b\x05\x18\x99\x5c\x63\x56\x80\x6d\x78\x40\x5f\x74\x26\xd6\x79\x26\x36\x79\xff\xa2\xb3\x1f\x8f\xd2\x8b\xce\x01\x4c\xb7\x28\x1a\xcf\x3e\x74\x80\xb5\xd8\x35\xdd\x24\xce\x30\xb7\x7f\x2e\xee\xf0\x20\x72\x97\xfd\xf9\xd5\x82\xe6\xa7\xea\x58\xd2\xef\x7a\xf8\xb2\x19\x71\x8e\x5c\x53\x1a\x92\xaa\x5e\x30\x20\xe7\x8d\x70\x3f\xb8\x9d\xd4\xd1\x15\xf5\x3a\xa1\x13\xfc\xb8\xa3\x9f\x94\xd8\x08\x28\x3b\x45\x36\x47\x35\x16\x30\x75\xe5\x12\x84\x79\xf2\x52\x01\xf1\xd8\x1b\xd6\x2d\x2f\x0f\x23\x21\x44\xa2\x3c\x0a\xf8\x74\xbc\x1e\x44\x06\xf3\x09\xa2\x7d\xf2\x20\xac\xb3\x7b\x18\x4b\xd3\x70\x1e\xaf\x3f\xee\xb2\x3f\x77\x32\xe4\x00\x6a\x81\xda\xe4\xc1\x66\x90\xc2\x3e\x5c\x59\x39\x6d\xa8\x92\x58\x48\xd6\xf6\x4e\xe1\x04\x55\x75\x58\x31\x2c\x27\x6b\x0d\xbd\xd2\x61\x57\xd0\x38\x42\x3a\xf7\xf3\x1d\xbf\xb7\x30\x10\xae\x6a\xaa\x4d\x88\x5c\x4c\x5a\x5a\x7e\xef\x5f\x6d\x07\x8d\xf3\x66\x3a\x5a\x9c\xc8\x67\x91\xf1\x48\xc3\x8f\xca\x31\x28\xfe\x66\x4e\xe2\x33\x74\x9b\x96\xe2\x38\x74\x4f\xfb\xb7\xaf\x1a\x6b\x43\xb1\x81\xc8\xdf\x4c\xf2\x3b\x1d\x05\x9e\x2f\x34\xaf\x09\x8b\xc7\x2a\xfc\x2c\xa3\xe5\xea\xce\xfb\x37\x15\xd7\x5f\x41\x37\xe4\xa9\xb9\xde\x82\x48\xb5\xc1\x2a\xf0\x79\x3a\x21\x1e\xae\x9c\xa9\xdf\x32\x4b\xd5\x43\xc5\xc3\x44\xf0\x66\x0e\x33\x40\xe6\x2f\xd5\x2b\xa5\x09\xab\x18\xbe\x81\x7e\xe5\xc8\x0c\x04\x62\x64\xb6\xe1\x4d\x0e\xdb\xdf\x4a\x57\xfb\x5d\xe1\xbe\xc6\xf1\x7f\xcc\x31\x92\xe2\x21\x38\x2f\x8b\x15\xd1\xc6\x19\xc2\x19\xe0\x66\xb6\x3d\xbc\x33\xa0\x7c\x42\x1a\x22\xad\xcb\x68\x5d\x33\xf0\xce\x5f\xa8\x7b\xbe\xb8\x90\x1d\x93\xba\xe1\x85\x56\x2a\xff\x13\x65\xad\xd4\xb7\xe6\x17\x7e\x31\x9b\x38\xc7\x94\xc0\x70\x6f\xf6\xab\xab\x3a\xb9\xb6\xd9\x44\x7a\x35\x5b\x00\xcc\x2c\x2d\xa8\xe5\x6b\x75\xf4\x82\x81\x94\x1b\xfa\xff\x79\x00\xcf\xd6\x9b\x92\x3f\x1f\xdc\x4c\x82\x4c\x4c\x23\x5b\xf0\x50\x1c\x5d\xfd\x63\x2a\x32\xe8\x61\x10\x91\x3d\x03\x78\x84\x52\x5b\xda\x7e\xe8\x00\xfb\xbe\xed\x05\x57\xb9\xaf\x3b\xe0\x2f\x58\x4f\x8c\x00\x4e\x44\xdb\xc0\x1f\x4d\xe6\x3b\x75\x9e\xe0\xf6\x22\xb5\x7d\x29\x6e\xda\x8e\xe1\x4f\x4d\xc8\xe5\x86\xac\x02\xfd\xcb\x0b\xfd\x02\x1b\xd5\xad\xa5\xbc\x02\x03\x0c\xd7\x3a\x6c\xcf\x2a\xa1\xbe\xe7\x0a\xd9\x73\x85\x7c\xf3\x5c\x21\x9f\x34\x7d\xc4\x1f\x1e\x01\xcd\xb8\x3d\x67\xc8\xfd\xce\xb7\x00\x99\xfd\x25\xa7\xb9\xb5\x7f\x60\xc6\xbd\xbd\x66\xe5\x0e\x7a\xca\x5b\x60\x6c\xf7\x82\x97\xc0\xda\xb8\x8c\xfd\xc3\x24\xf3\x4a\x92\x0d\xe7\xb8\xe8\xc4\xd1\x0a\x65\x47\xff\xce\xa4\x5f\xb3\x9e\xe0\xda\x6c\xc4\xa1\xa2\xa0\x00\xf6\x76\xf9\x82\x14\x54\x43\x62\x58\x7e\x6e\x62\x6f\x95\xef\x45\x42\x10\x13\xce\xde\x81\xb1\xcb\x03\xe3\x07\x55\x34\xd3\x8a\xff\xd8\x1c\xa5\xc3\xdc\x84\xa6\x21\xa7\xce\xaa\xec\xba\xe1\xf9\x56\xc5\x5d\x7f\xc9\xb9\x7b\x6b\xe9\x73\xcc\x3d\x32\x44\xfa\xc8\xf2\xb4\xc0\xb1\x04\x07\x63\x1f\xa9\x58\x5a\x93\x8d\x08\x07\x98\xeb\x2a\xeb\xc4\xfd\xa9\x8a\x7f\x7b\xc9\x73\x4a\x22\x40\xac\x16\xbd\x64\x94\x57\xca\x5e\x06\x17\x9d\x03\xf0\x74\xa9\x48\x4c\xf1\xeb\x0e\x5b\x61\xf9\x6f\xee\x49\xff\x66\x40\xad\x8d\x4d\x76\x68\xe0\x58\x96\x75\x29\xdd\x3c\xa7\xd3\x95\xea\x28\xf5\xbd\x6e\xbb\xe5\xcd\x65\xfd\x74\xff\x8c\x5a\xd1\xea\x3e\x81\x4b\x58\xfe\xf3\xa4\xaa\x5c\xca\x40\xa9\x9f\x66\x02\x90\xad\x32\x91\x74\xa4\xfa\xab\x6f\x69\xb0\x44\xfd\x8d\xc3\xbe\x55\xd3\x57\xae\x60\x37\x18\x03\xd9\x0a\xe3\x4d\x4a\x89\x32\x42\xd1\xa4\x90\x5f\xee\x85\x22\xbd\x3b\xc8\xda\xca\xdd\xed\xbe\xe7\x0a\xff\x35\x63\x43\x7e\x04\x13\x8a\xce\x2d\x82\x2d\x81\x6e\x1c\xed\xf9\x36\xec\x1e\x90\xba\x6c\x64\x9e\x62\x9e\x90\xfc\xa5\x11\x47\x69\xd0\x44\x87\x1d\xf0\x30\x20\x75\x19\x3f\xaf\x84\x3a\x15\x51\x55\x5b\x7c\x4d\xd3\xd5\xc2\x73\x79\x0f\x26\xa0\x13\x43\xf4\xc7\x08\x40\x27\xaf\x24\xbd\xae\xbc\x43\x83\x89\x73\x58\x4f\x74\x06\x85\x6e\x2f\xb0\x60\x36\x75\xba\x05\x58\xff\xa6\xf8\xb4\x9d\x63\x91\xd3\xd3\xa4\xc5\x92\x8c\x86\x43\x41\x53\x6b\x43\xbf\x55\x8d\x86\x66\x61\x7a\xa7\xa6\x30\xc2\x67\x25\x85\xd6\x2f\x3a\x3a\x40\xc1\x5a\xae\xf7\x4f\xb2\x37\x55\xd8\x35\xc5\x32\xdc\x9f\xa8\xf8\x2f\x73\xee\x29\x3e\xbe\xc7\x8e\x6c\x34\xc8\x93\x8c\xa1\xa2\x9b\x25\xa6\x46\xf7\xb1\x35\x16\xcf\x7e\x91\x55\x3a\x48\x70\x9e\xd4\xb4\x60\x42\xab\x4d\xb4\x74\x6e\xf9\xb4\x25\x77\x4e\xb2\x39\x76\x62\xa8\xdc\x29\x5b\xb3\xa5\x8d\xbd\xe4\x3c\x91\x3d\xd6\x92\x47\xb2\x6f\x35\xc8\xf9\x3f\xe0\x4e\xf0\x2c\xee\x04\x0d\xc6\x3e\x5d\x61\x8f\xb2\xc6\xdd\xfd\x68\xc5\xff\xb8\x73\x4f\x52\x32\x38\x69\x18\x34\x2c\xe2\xb0\x87\x61\x68\x72\x12\xfd\x99\x42\x8a\x78\xb1\x49\xf2\xc3\x92\x69\x6c\xf3\xd4\xc0\xab\x04\x1b\xbe\x35\xbc\xb3\x6c\x66\x04\x54\x57\xd9\xf0\x5e\xf6\xd8\x3e\x6f\xcc\x88\x9d\xf9\x6a\xc5\x7f\x61\x45\xef\x8e\x82\xe3\x2a\x8a\x93\x0e\x0f\xc1\x08\x56\x55\x48\x6e\x72\xc0\x7a\x51\x06\x94\xef\xc8\x9a\x9d\xfb\x71\x21\x21\xaa\x81\xe0\x64\x6b\x71\x2f\x03\x29\x92\xe4\x00\x88\x65\x1b\x4c\x65\x8a\xc1\x2c\xd6\xbd\x19\x9a\x4e\x9c\x37\x04\xf8\x4b\x71\x7b\x83\xf8\x01\x43\xc5\x71\x23\xc5\x9f\xd0\xc6\xe8\x87\x5e\x64\xff\x04\xf6\x3b\xe2\x42\x53\xbb\x54\x6c\x88\xa4\x9f\xd3\xcf\x19\xe8\x43\xf9\x74\xdc\xc4\x6e\x18\x81\xe8\x56\x36\x1d\x3a\x1a\x68\x5b\x73\xf0\x52\xc6\xbe\x73\x18\x6d\x48\x57\x34\xdc\x7f\x38\xe0\xdf\xa6\xfe\x28\xa4\xd3\xf3\x9c\x7f\x0c\xc5\x39\x7a\xa2\x82\x54\x9b\x9f\x36\x83\xac\x6d\x47\xc2\xbc\xf1\x00\xfb\x94\xc3\xae\x6c\x98\xe4\x3c\x1f\x76\xd8\xe1\x6d\xf1\x98\x58\xfc\x37\x2f\x73\x16\x5a\x9e\x41\xfc\x0f\x93\xa2\x65\x3a\x4a\x91\x22\x93\x0f\x6a\xee\xf2\x40\x99\x43\x5c\x4d\xf2\x00\x62\xc1\x2a\xd2\xc3\x5b\xe7\x59\xbe\x54\x54\x5c\x88\xb2\x0d\x2b\xa7\x21\x79\xca\x72\x1f\x31\x7b\xa1\xc3\x98\x8a\x2d\x5a\x98\x73\x2f\xf8\xe7\xf3\xf0\x52\x04\x32\xe3\x61\x98\x43\xd0\x21\x86\x99\xc8\x8c\x22\xe4\x1a\xe8\xc4\x1b\x4a\x57\xe0\xad\x4c\x24\xde\xe1\xfa\xe1\xa3\x75\x2f\x15\x22\xd7\x04\xd1\xe3\xad\x94\xc1\x1b\x0e\xdf\x7c\xc3\x0d\xe6\x31\x7e\x27\x9b\xe8\xc6\xcd\xd9\x85\xb9\x65\xd0\x41\xf0\x9f\xc5\x18\x8b\x6e\xdc\xf4\x16\x96\xbc\x84\x47\xeb\xc2\xe3\x69\x1a\xac\x47\x39\xee\x97\x1c\x3a\x4b\x33\x78\xb0\xc2\x26\xa9\xcc\xd4\x7d\x63\xc5\xff\x53\x47\xfd\x55\x2c\x57\x95\x99\x96\x16\x0a\xc3\x08\xa8\x8d\xde\x5a\x1f\x4c\x4f\x9e\xf2\x27\x41\x9d\x83\x89\x90\x85\x29\x3e\x94\xb5\x3d\x11\x65\x49\xdf\xcc\x19\xa6\xfe\x40\x37\x69\x20\x17\x32\x50\xb1\x55\x2c\x31\xcf\x30\xe3\xfa\x30\x5d\x08\x65\x33\x04\x90\x4f\xb7\xbc\x85\xa5\x8d\x63\xb0\x75\x17\x96\x36\x6e\x18\xe1\xab\xbc\xe4\x5c\xcb\xbe\xd3\xda\x51\x5d\x59\x7b\x2d\xcd\x12\x9e\x89\xf5\xbe\x3b\xe9\xee\xef\x88\x64\x5d\x30\x30\xd7\x92\xf1\x3d\x59\x98\x73\x53\xbf\x55\x88\xe9\xd0\x63\x43\x66\xd0\x46\x18\xf7\x9a\xca\x5e\x9f\x28\xdd\x10\x2d\xfb\xc7\xbd\x27\x2d\xd1\x0f\x8b\xbc\x23\x6e\x3b\x7e\xf0\xa0\x7e\xa0\x4c\x6c\x8b\x10\x36\x72\x9b\xed\x1e\xda\x9f\x01\x0e\xaf\xfb\x34\xdf\x1b\xdc\x2b\x44\x2c\x84\xaf\x58\xbd\xbe\x8e\x4d\xb1\xef\xdb\x72\x43\xae\xca\x0f\xd9\x27\x1c\xf6\xa8\x5e\x44\xd1\x37\x10\x18\xf4\x41\xc7\xff\x05\xe7\x9c\xf9\x48\xa5\xd5\xa7\x44\x75\xa4\x7e\x09\xc2\x20\xeb\x83\x74\x17\x9b\x14\x53\x77\xa2\xaf\x14\xd4\xaa\x16\x26\x46\x49\x3b\xc6\xe9\x02\xa7\x36\xfc\xef\xc1\x27\x22\x06\x2f\x70\x2e\xd4\x00\xc0\x0e\x68\xb4\x65\xff\x2c\x13\xec\x4f\x8f\x59\x34\xe3\xb6\x03\xe7\x0e\xa1\x39\xab\xdc\x7f\xae\xf8\xd5\x81\xa7\xf9\x45\xc2\x3b\x2f\xfa\x68\x9b\xd7\x60\x6f\x63\xe7\x45\xdf\x92\x86\xff\xbd\xc2\xee\x62\xf2\xa9\x7b\xd6\x3f\x21\xc5\x12\x7d\x63\xf8\x80\xf2\xfb\x02\x62\x23\x1b\xb6\x75\xc0\x39\x54\xef\x9d\x17\xb6\x0d\xfb\x05\x2a\x08\xea\xbf\xf8\xb1\x19\x9d\x46\xce\xa6\x6c\xc7\x43\x19\x6f\xc8\x63\x57\x6c\x1e\x24\x23\x50\x4d\x0a\xf7\x1a\x45\xb3\x1e\x84\xc8\xda\x83\x4f\x84\xff\xd8\x8e\x90\x49\xe5\xee\x74\x67\xfd\xeb\x57\x88\xf9\xd1\xf4\xf9\xac\x10\x14\x1b\x22\xba\xca\x11\x50\xa2\x97\x82\x11\xad\xe9\xf9\x99\x6f\x67\x27\x69\x7a\x64\x6b\x6b\x7c\x7d\x3d\x11\xeb\xc0\x78\xaa\x4d\x13\xbc\x1b\x28\x96\x74\x2b\x8c\x60\x66\x69\x81\x28\x79\xe0\x68\xfb\xa3\xc7\xfa\xff\xcd\xb1\x9f\x0d\x27\x7d\x84\x58\x35\xc5\x22\x6c\xb8\xc1\x34\x99\xb0\xa7\xee\x2b\xe8\x0a\x90\x23\x0a\xab\x57\x39\x3c\xe4\xae\x03\xff\x72\x3f\xee\x61\x00\x1b\x39\xb7\x95\x7f\xad\x21\x92\x0c\xad\xe5\x02\x51\x82\x14\x8e\xd2\x45\xe7\x31\xa0\x38\x2c\x25\x41\x9c\x04\x59\xff\x4c\x10\x05\x9d\x5e\xe7\xa2\x73\x35\xdd\x16\xd5\x0f\xd6\xda\xfa\xda\x63\xd8\xbb\xf6\xb1\xd2\x2f\xdd\xd7\xef\x53\xd0\x21\xcf\xdf\x77\xaa\xf8\x42\xd4\xeb\x28\xf5\xab\x4b\x8f\x51\x24\xa3\x92\x45\xc6\x8f\x36\xdf\xc8\xd5\xa8\xba\xf7\xe4\x60\x1d\xe8\xd8\xd5\x07\x16\x74\x3e\x91\xc0\x52\x5a\x7b\x4b\x24\x14\x7f\x89\x16\xb3\xd4\x93\x4b\xcb\x0b\xe3\x4d\xb3\x80\x38\x12\x29\x1c\x9a\xa4\x81\xa1\xdb\x93\x3a\x9c\x6a\x7b\x0d\x16\xdc\x81\x28\x60\x45\x2b\x2a\x36\x44\xe4\xb5\xb1\x41\xa5\xbd\x43\x1b\x8b\x97\xf6\xe0\xe8\xa0\x26\x6e\xb6\xe3\x50\x35\x74\x5d\xc0\x0e\x6e\xdb\x9d\xc2\xab\x68\x37\x09\x3a\x3c\xe9\x7b\x29\xe0\x21\xa4\x04\x79\x1a\x47\x83\x55\x75\x7a\x1d\xa9\x9c\x36\x85\xec\x2e\x94\x95\x6a\xb0\x98\x2c\x86\x0e\xa7\x99\x37\x75\xe4\x90\xb7\x26\x5a\x72\x33\x1e\x3e\x34\x8d\x75\xa4\xa2\x11\x47\xcd\xd2\x5a\x40\xf1\x0a\xbb\x6d\xbe\x26\x80\xc3\xdf\x83\xc0\xa9\x24\x48\x73\x7b\x80\xe9\x6b\x54\xa6\x2b\x6f\x6a\xe3\x70\x7d\x8d\x27\xaa\xaa\x8d\xc3\xf5\x56\x1c\x4f\x7b\x77\x8b\x6b\x9b\x60\xc0\xea\x74\x84\x54\x5e\xe3\x8e\xc8\xe0\x5e\x12\x06\xe7\xc5\x71\xef\x3a\xd2\x31\x80\xec\x50\x74\x33\x88\x9d\x8e\x60\x0a\xe0\x96\x7a\xf8\xa6\x43\x87\x0e\x21\x5c\x3d\xe7\x2b\x22\xf5\xa6\xce\x76\x45\xb4\xd2\x0e\x5a\x59\xd5\x9b\x13\x81\x7c\x0b\xb8\x08\xa8\x02\x8d\xcd\x4d\x07\xdc\x91\x43\x87\x0e\xa5\x16\xae\xc9\xc7\x1c\xf6\x6d\x41\x94\x8a\x46\x2f\x11\x2b\xe7\x83\xee\xea\xe9\x95\xbb\x00\x32\xcb\xfd\x65\xc7\x7f\xab\xb3\x50\xf6\x93\xda\x42\xa9\xb7\x7a\x7a\x65\xe8\x3e\x42\xb8\xee\x92\xad\x8b\x5a\xbf\xb6\x36\xa0\xfb\x3c\xcd\x92\x38\x5a\x0f\xa1\xec\x46\xdc\x4b\xf8\xba\x54\xe2\xbc\xa7\xc5\x3d\xb5\x03\x50\xfb\x13\xde\xec\xcc\x89\x5e\xd4\x84\x5c\x03\x08\xe3\xb3\x3d\x7a\xdf\xa8\xb0\x09\xba\xb0\xb8\x7f\x5f\x61\x77\x95\x9e\xab\x3b\x96\x64\x24\xb2\xf2\x78\x82\x57\x56\x14\xd9\xd8\x20\x19\x24\x9e\x26\xf8\xab\x76\xb6\xcd\x2c\x2d\xe8\x3e\x83\xb2\xd4\x03\x0b\x4b\x1e\x29\x4a\x30\x29\xde\xb1\x63\x47\x75\xb6\x84\x51\x47\x14\x84\x55\xdc\x3c\x6a\xab\x0b\x64\x27\x21\xb2\x5f\xcc\x9e\x58\x5a\xc0\x1d\x45\xfb\xd6\xa4\x30\x01\x2e\xb1\x90\xc8\x9b\xac\x09\x10\x18\x35\x6b\x62\xfc\x36\x45\x28\x40\x2b\xd7\xaa\x24\xdc\x07\xb1\xac\xc4\x6b\xb4\xa5\x8e\x87\x2b\xab\xd5\x0b\x5b\x41\x18\x8a\x66\x9d\x3d\x95\x4d\x50\xc5\xee\x19\xff\x07\xee\xca\xdb\xa0\xda\xa6\x9a\x65\x34\x00\x1c\xc6\x69\xdd\xb3\x23\x5c\xfd\x8d\xc3\x96\x9f\xf9\xf3\x13\xac\x28\x7c\xdd\x3f\x98\x50\x42\xf5\xfd\x13\x77\xd9\xbf\xe5\x1a\x10\xec\x49\x29\x12\x88\xc2\x40\xcf\x85\x1e\x21\xb0\x71\x61\x00\x13\x89\x37\xe3\xd0\x5f\x87\x8b\x15\x44\x13\x47\xde\x7d\x22\x89\xb7\x90\x49\x85\x76\x0c\x8a\xa3\xe1\x72\x08\xd9\xc3\x83\x0c\xc2\xf0\x55\x9b\x38\xb6\x88\x34\xc8\x9e\x95\xf4\xd0\xe1\x61\x58\x95\x2a\xec\x1a\x5f\x0b\xfb\x6a\x87\x1f\x3e\x94\x82\xaf\xa3\xc1\x53\x28\x41\xdc\xdb\xe3\xa1\xee\x2d\x89\xd6\x40\xa4\x55\xd3\x2e\xaf\x9c\x37\xb0\x04\x14\x92\x3f\x46\x32\x77\x7b\x99\xc8\x47\x51\x35\x8d\xab\x91\xa2\x85\x5a\x28\x27\x48\x3d\x1f\xf6\x98\x94\x6a\x3e\x04\x5f\xe2\xea\x92\xe3\xc5\xd7\xe4\x8d\x2b\x8a\x23\xf3\x9d\x42\x09\x69\x95\xc2\x31\xa5\x34\x53\x63\x18\x8a\x0b\x41\x23\x5e\x4f\x78\xb7\x8d\x0c\x0d\x75\xcf\xbf\x63\xa0\x04\x29\x4a\x78\x92\x29\x2d\xc1\xdf\x50\x04\xd7\x1c\x22\xaa\xc9\x23\x0f\xf9\x16\x34\xa0\x53\xb2\x07\x1d\xfe\xec\x58\x9f\x77\xd3\xf4\x89\x52\xa5\xc2\x3e\x05\x92\x40\xef\x7c\x38\x0f\x7c\xa9\x41\xf9\x52\x3c\xf8\x98\xfc\x88\xf8\x8f\x76\xa1\x41\x64\x14\x0a\x6b\x27\x15\x18\xea\x86\x41\x19\xad\x20\x91\x0b\xad\xef\x9d\x9a\xf1\x6e\x83\x0c\x0a\xef\x36\x3c\x6e\xbc\xa9\x4d\xb0\xa0\x9c\x9a\x41\x09\xa3\xc6\x87\xa8\xfa\xbc\xb4\xd7\x6a\x05\x17\xf0\x4c\xe5\x98\x7c\x21\x1b\x04\xdf\x4e\x57\x73\x80\x69\x79\xec\xe3\x89\x15\xad\xdb\x9d\xa4\x3e\x5a\x6d\xac\x7b\x33\x91\xda\x85\xaa\x8d\xca\x26\xa4\xc6\xf7\xb8\xb7\x71\xf8\x50\xd5\xdb\x38\x52\xf5\x36\x0e\xcb\xff\x03\x19\x09\x7f\x1d\x92\xff\x3a\x5a\xf5\x36\x8e\x82\xd8\x94\x8f\x8e\x40\x8b\xf0\x3d\xf8\xe7\x91\xaa\xd7\x8a\xe3\xc3\xf8\xbf\x87\x6c\x90\xad\xbf\x03\x86\x32\x14\xef\xee\x9f\x3b\xee\xf8\x5a\x3f\x13\xfe\xdb\x9c\x5c\xe4\x03\x95\xd4\xfc\x19\x4f\x44\x8d\x58\x9e\x6e\xb3\x33\xde\x1a\xfe\x84\xeb\xa5\xb8\x82\x15\x94\x39\x39\x1a\x48\xe8\x5c\x8b\xd2\x47\x0e\x89\x71\x7a\xc1\x6a\xee\x45\xc6\xbd\x4d\x45\xd2\x24\x90\x8a\x16\xc7\x99\x66\x3b\xd0\xf6\x72\x98\x4d\x59\x5b\xdd\xba\xb9\x6e\xcb\x16\x34\xab\x48\xfc\x8f\xfb\xb5\x53\x4a\x63\xb3\xe4\xb8\x22\xd2\x2e\x88\x4b\x53\x34\x7e\xd4\xb1\x00\x7f\x2c\xdc\x3d\x8a\x1b\x04\xed\xfb\x8d\x8e\x7f\x5c\xff\x35\xdc\xb2\x84\x00\xb3\x80\x78\x98\xf6\xd6\xb4\xf5\xd3\x52\x75\xcf\xb1\x15\x36\xa9\xa0\xed\xdc\x53\x4a\x0c\x1f\x19\xc4\xbf\x53\xc1\xa0\xa9\x3e\xa2\x08\x27\x8e\xf4\x24\x6b\xf6\x7f\x64\x9c\x8d\x46\x10\x3c\x52\x5f\x8a\x9b\xa9\x89\x22\xe8\xfe\xfe\x98\x7f\xd1\x29\x3e\x35\x62\x3c\x33\x03\x86\x72\x43\xb1\x8b\x70\xc2\x39\x54\xa3\x20\xd7\x01\x18\x27\xba\xb1\x0e\xe1\xd2\xd8\x95\x30\x14\x64\x0c\x9e\x6a\x99\x87\x14\x24\x53\x61\x94\x67\x5a\xeb\x26\x31\x72\xea\xd4\xba\x22\xa9\xa1\x42\x39\x5d\xbf\xe8\xec\xc7\xaa\x2e\x3a\x13\x54\xa2\x35\x92\x2f\xa9\xb0\xfb\x1d\x46\xef\xb8\xfd\x11\x8c\x94\x65\xa3\x81\x7d\x5e\xd0\xb8\xaa\xfe\x4d\xd4\x2f\x03\x2c\xd5\x30\x65\xd3\x8f\x6b\x7d\x5c\x55\x60\x3c\xa5\x6b\xb4\xbc\xbe\xaa\x06\xba\xfd\x11\xf0\xf9\xc3\x9b\x01\xdc\x2d\x38\xfe\xfe\x8d\x6a\xf4\xf2\xc4\xb3\x81\x79\x50\x0b\x02\x53\x56\xb0\x6d\xec\xd5\x63\xa5\x80\xf4\x8b\x2a\x8f\x34\x47\x4d\xfe\x9b\x8a\x7f\xeb\xe0\xe3\xbc\x42\x1b\x53\x57\x83\x5c\xe6\x19\xa9\x23\x51\x92\x5f\x5c\x79\x88\xd1\x8d\x9f\x44\xe8\xc6\xc7\xfc\x6b\x15\xba\xb1\x6e\x8a\xd2\x56\x42\x64\x4f\x29\x81\x39\x3e\x5b\x8a\x72\x7c\xf3\x65\x43\x3d\xb2\xc7\xe6\x18\xc5\x56\x76\xc0\xb7\xe9\x08\x6a\xeb\xf1\xfb\xaf\x60\x4f\x18\x4c\x86\x35\x51\x08\xdc\xd7\x5d\xe1\xff\xb8\x63\x65\x02\xa7\x79\x02\x8b\x07\x69\x81\x55\x0d\xfd\x06\xb2\x8d\x34\xb2\x01\x9f\x0d\xe9\x37\x5a\xa5\x6e\xca\xf3\x8d\x7b\xbd\x28\xc8\xf0\xd8\x36\xd1\x0d\xe2\xa4\x24\x67\xd6\x96\x56\x2f\x3f\xb0\x17\xb3\xf2\xef\x22\x49\xfd\x39\x6c\x5f\x02\x1e\xbf\xc4\x3f\x86\xeb\x01\x73\x2e\x55\x7e\xad\xb9\x52\xf4\xbd\xca\x98\x7d\xd3\xa6\x7a\x90\xd5\xd8\xf7\x6f\x95\xcd\x6d\x38\xd7\xd9\xcf\x55\xd8\xd5\xea\x46\x18\xc4\x11\xc0\x77\xbe\x66\x14\x0f\x9e\x2a\x63\xc6\xfe\xc8\xff\x0b\xa7\xf0\x84\xd2\x39\x94\x2e\xa9\xe2\x12\x20\x07\x50\x9f\xc2\xed\x78\x13\xee\x53\xbd\x20\x44\xf7\xe3\xf0\x6e\x82\x9a\x52\x52\x47\x2a\x32\x23\xc3\x05\xbf\x47\x00\x46\x2d\x5d\x14\xfa\x2c\x24\xc7\x00\x29\x1f\x41\xab\x82\x8b\x12\x3f\x51\x1a\x54\x9a\xc5\x9d\xae\x61\x30\xd7\xa5\xec\x45\x8a\xee\x36\xf0\xe7\x92\xf3\xec\xad\x43\x73\x4e\xb9\xf3\x97\x07\x1c\x50\xe4\x22\x78\x2f\x63\xdf\x53\x12\x28\x44\xe0\xc5\x2b\x22\x03\xf5\xf0\xe5\xcc\x3f\x66\x3f\xd2\x80\xd9\xc5\xa8\x62\xee\xe5\x2f\x42\xb4\x03\x29\x11\x96\xd4\xfd\x8d\x03\xec\xfe\x0a\xbb\xba\x13\x44\xcb\x82\x37\xfb\x2b\xa0\x10\xa5\xee\x3f\x6a\x10\xe5\x4f\x3b\x64\xb3\x33\xd4\x45\x54\x9b\x70\xd9\xd3\xfd\xcf\x8b\xc4\xa6\xc1\xe3\x27\x35\xb4\x3c\x3f\x01\x83\xef\xe5\x95\x48\x1e\xf2\x3c\xea\xab\x7b\x3c\xa9\x01\xf2\x40\x4d\x78\xda\x86\x0c\x40\xe4\x30\x23\xab\x85\x8a\x6d\x91\x9b\x41\x25\x36\xeb\x50\x77\x58\x56\x87\xbc\x29\x59\x99\xda\x0e\x65\x1f\xc8\x83\x29\x8d\xe3\x48\xfe\x37\xc8\x30\x13\x88\x37\xfb\xd3\x96\x56\xfb\xe6\x8a\xa1\x2b\xbf\xba\xa2\xba\xff\x6f\x4e\x11\x3d\x3a\x1f\x06\xa5\x47\xab\xcf\x72\x2b\x19\xc7\x64\x64\x34\x68\x36\x03\x20\xc9\xee\x05\x69\x5b\x93\x59\x8a\x0b\xf2\x93\x20\x03\x63\x05\xac\x7f\xe3\x42\x63\x77\xf0\xf0\x8e\x5d\x12\x9b\x71\x72\x3e\x8c\x79\x33\x3d\x98\x4b\x83\xf4\x20\xb5\x52\xae\x8e\xfc\xf1\xc1\x27\x6e\xb6\x79\x56\x0b\xd2\x1a\xaf\x95\xbe\x60\x8d\xd1\xbf\x56\x98\x5e\x45\xee\x57\x2a\xc3\x59\x9a\x47\xc5\x45\xf2\x35\x11\x2a\xb7\x90\xff\xce\x8a\xf6\x10\x61\x1c\x8d\xfc\xd5\xbb\xb7\x27\x92\x3e\x9a\xbf\x73\x2e\x4b\x5a\x4e\xb9\x67\x93\xda\xeb\x35\x30\xb9\x02\x0a\xa6\xc4\xd4\x48\xc9\x0d\x32\xc5\xe5\x2e\xd1\x20\x22\x0b\x89\x5e\x5f\xd8\xd1\x1c\x59\x5a\x15\x9b\x8a\xac\xae\x2d\x80\x96\x3f\xd5\xcb\x44\xa7\x2b\x45\xd9\xb5\x2a\x01\xf6\x21\xf6\x1a\x61\xa1\x07\x9f\x08\xff\xad\xa9\x11\x4f\xd9\xab\x2a\x6c\x52\xd5\xed\xbe\x70\x3b\xec\xaf\x4b\x71\x73\x95\x3e\x80\xdc\x83\x3f\x74\xd4\x9f\x9a\x8d\x00\x55\xa5\xc2\x39\xa7\x7b\x2a\x1f\xeb\xbd\xa5\xb2\x67\xe0\xc2\xd8\x6b\xb5\x82\x06\xa8\x49\x1a\x3d\x5d\x1e\x63\x4d\x91\x01\xec\xf0\xc3\xb9\x6a\x9f\xd8\x8d\x9b\x35\x35\x10\xec\x95\x07\x8a\x6c\x1b\x03\x88\xf6\x5a\xb5\xc9\x6f\xa4\x7f\x36\xe9\xbf\xd4\x19\x7c\x7e\x39\x77\x52\xa4\x0f\xa3\x14\x67\x1a\x4f\xfb\x0a\xda\x0e\xb2\xd4\xb8\x70\x42\x8a\x76\xe4\x2d\x44\xeb\x98\x7e\x03\x9f\xc0\x2d\x14\xaf\x80\x17\x1d\x86\xb5\x2c\x02\xf4\xc4\x95\xd4\x0a\xb8\xba\x59\xd2\xfb\x75\x13\xec\x7d\x0e\xbb\x92\x6f\x88\x84\xaf\x0b\xf8\xdd\x7d\xfb\x4e\xe2\x95\x07\x69\xd9\xfd\x75\xb3\x34\xb5\x4a\x06\xc6\x01\xcc\x11\xf8\xa2\x4d\x4a\xe0\xf1\x46\x12\xa7\xa8\x0d\x26\x22\x14\x1b\x3c\xca\x70\x17\x4f\xc1\xfd\xe0\x5e\xaa\x67\x9a\xbd\xcc\x61\x56\xcf\xdc\x1f\xde\x5d\xc3\x7f\xc0\x2c\x6c\x64\xc3\xa9\xa5\x85\x06\xd5\xd9\x69\x66\x8c\xbb\x7b\x9b\x7f\x38\xff\xab\x0c\x2a\x43\xdd\xe2\x23\x0f\xae\x07\xc5\xdb\xe0\xd7\x4d\x69\xf9\x37\x0f\x85\xb4\x7c\x5b\x25\x35\xa4\x65\x6e\xea\xac\x29\x33\x5b\x2b\x4e\x3a\x9a\x7b\x08\x15\x78\x63\x69\xa2\x70\xd5\x45\x94\xdd\xec\x31\x63\x3f\x15\x3a\x0c\xd7\xda\x21\x30\xe0\x55\x3a\x43\xbb\x1c\xa8\x89\x79\x8a\xcc\x77\x78\x1d\xe6\xa1\xd7\xe5\x09\xef\x88\x2c\xc7\x84\x50\x64\x15\x64\x19\x03\xde\x5f\x29\x1a\x34\xc0\x84\x7e\xa1\x11\x77\x01\xcd\x19\x1a\xd1\x8b\x40\x53\x7e\x76\x2f\xcd\x8c\x62\x60\x2e\x8a\xd6\xc3\x75\x0e\x26\x5d\x2a\xa7\xce\x7e\xc4\x61\xb4\x93\xdc\x0d\xf6\xe4\x1d\x71\x5f\xcc\xca\xc5\x4b\x1a\x72\x91\x3a\xfb\x3a\x32\xd0\xe4\x34\x12\x20\x2d\x9b\x26\xc2\x01\xa9\xc6\xec\x8b\x13\xec\xa6\xa1\x39\x61\xb4\xf1\x81\x12\x7d\x49\x0d\x57\xaa\x2b\x72\x7f\x6e\xc2\x7f\xee\x16\xef\x58\x44\x3b\x68\x33\x55\x5a\xb9\x89\x45\xa0\x46\x48\xf9\x80\xf3\xab\x7e\x9c\xe4\x96\x8d\x9a\x1c\x79\x50\x64\x68\x4f\x5d\x74\xe0\x1e\x4e\xe0\x37\xb6\x29\x6c\x3f\x9b\xa5\x10\x8a\x5b\xfc\x7a\xd9\xd6\xd0\x57\x71\x15\x65\xac\x0c\x04\x36\x4a\x87\xc3\x0e\xe8\x06\xb8\xef\x76\xfc\xb7\x3b\xda\x4e\x64\x96\x88\x0f\x74\x94\xc6\xb0\xa2\x87\x00\x83\xa0\x73\x13\xba\x47\x77\x2f\x08\xf0\xd6\x35\xa1\x4f\x40\x05\x56\xc0\x92\x1b\xf2\x09\x69\xee\x76\x60\xf8\xe7\x1c\xb6\x0f\xde\x74\xff\xd8\xf1\x3f\xe2\xac\xc0\x47\x46\xe0\x57\xa0\x6f\x51\x04\x9c\x33\x64\x02\xbc\xe2\x04\x60\x77\xe4\x9d\x08\x71\xb3\xac\x26\x68\x2a\xec\x69\xf0\x6d\xe4\x9d\xa9\x63\xa0\xbb\x06\xa2\xa0\x10\x3d\xf0\x45\x98\xeb\x49\x7f\xd1\x84\x95\x95\x5a\x11\x7e\x56\x07\x3f\xe3\xb0\x49\xde\x0d\xc0\xea\xed\x7e\xcc\xf1\x7f\xdd\x99\x59\x5a\xb0\x6c\xe0\x68\xff\x56\xb2\x64\xc4\x04\xc9\xdb\xb0\xf1\x2d\x00\x62\x14\xc2\x14\x75\xc8\x3f\x98\x7e\xd4\xac\x28\x4b\xaf\x94\x19\xda\xe2\x8e\x20\x06\x70\x95\xa0\x88\x99\x20\x69\xd6\xba\x3c\xc9\xfa\x90\x51\x9b\x56\xad\xda\x4a\x81\x62\x66\xc9\xda\x74\x8b\x5f\x57\xb6\x26\x30\xc3\x92\xe1\x6f\x7b\xeb\xf8\xcd\x63\x56\x56\x97\x52\xbc\xe6\xa3\x8d\x93\x49\xdc\xa1\x50\xce\x7f\xa9\xf8\xc7\xad\x27\xc5\xe8\x40\xaa\x09\x45\x37\xe6\x9f\x62\xf0\xe5\x19\xde\xb5\x4d\x9e\xef\xaa\xb0\x86\x0a\x14\x3d\xc3\xbb\xcb\xa2\xe5\xae\x6c\x83\xa9\x5e\x97\x36\x1f\x6d\x50\xa4\xe8\xe3\x57\xdb\x22\xaf\xa5\x10\x51\xc5\x22\xb6\xbf\x9b\x88\x56\x70\xc1\x6d\xfa\x77\xcf\x18\x76\x91\x9c\x1f\x4c\x7e\xd2\x4d\x80\x5e\x1a\xf2\xe2\x79\xa3\x0d\x71\x4a\x34\x61\xba\x68\x33\xf1\x79\xf6\x59\x0b\x73\xf3\x8b\xab\x0b\x27\x17\xe6\x97\xad\x71\x7c\xba\x89\x1d\x70\x66\xdb\xd0\x01\x79\x77\xbe\x63\x35\x0f\x9a\x2a\xf4\xe5\x1d\x33\x6c\xa7\x26\x30\xf7\xab\x3f\xe0\x87\xf9\x9f\x72\x6d\x28\x0b\x18\x45\xf5\x86\xa1\x06\x68\x68\x1a\x90\x5c\xb0\x6a\xdb\x7c\x43\x28\xd7\xa8\x8e\x91\x96\x5f\x28\x94\xc3\x5e\x0a\xb6\x48\x08\x2e\x00\xad\xda\xb6\x85\x7e\xfd\x76\xf6\xb9\x09\xc6\x5a\x41\x04\x98\x3e\x49\xea\x7e\x62\xc2\xff\xf1\x09\x35\x8c\x88\x30\x43\x5e\x69\x43\x89\x07\xb6\xef\x50\x64\x8a\xf9\x0e\x77\x24\xc4\x49\xf4\x89\xc1\x05\x63\x44\x09\xb9\x24\x9f\x49\x63\xff\x76\xe5\x0d\x1a\xc3\x13\x3b\xdd\x38\x02\xb0\x1b\x7d\x0b\x48\x30\x0e\x17\x0d\xa6\xb2\x24\x5d\x8f\xe6\x5c\xc3\xa3\x31\x14\xca\xfe\x9d\x66\xbc\xd3\xb5\xe3\x6e\x70\xff\x47\x35\x08\x96\x90\xe5\x04\x05\xf2\xb6\x1c\xc0\x12\xac\x07\xb2\xce\xa6\x14\x6e\x6a\x38\x94\x64\xd4\xde\x1c\x10\xe3\xf4\xa2\x2c\x09\x04\x83\xbc\xe7\xd5\x3d\xef\x2c\x7a\xc4\x53\x6f\xf1\xec\xaa\x27\x22\xc8\x41\x6f\x7a\x6b\xa2\xc1\x7b\xa9\x00\x65\x46\xde\x2b\x9a\x3d\x39\x7b\x69\xb0\x1e\x81\x11\x45\x5e\x6b\x82\xf4\x3c\xd2\x1b\xf6\x1a\xe7\xbd\x7c\x2e\xea\xc6\xbf\xc9\x3e\xdd\xe6\x89\x4a\x8a\xaa\x42\xdd\x1c\x34\x2c\x10\xbc\x5d\x91\x00\xaa\x4a\x1c\x11\x62\x2f\x79\xe8\xf3\xd1\xd2\xc5\x69\x4a\x9d\xbc\x5f\xea\xbe\x6a\xf2\xd5\xc8\x62\x42\xc1\x89\xba\x38\x0d\xb2\x1e\x51\x28\x45\xb4\xe4\x50\x5c\xaa\xd9\x33\xe7\x54\xcd\x33\x3a\xb4\xf3\x8a\x0d\x02\x1b\x48\x91\xe2\x41\xa6\x02\x56\x38\x0c\x0a\x0f\xbd\x29\x3c\x5d\xc9\x66\xa7\xd1\x22\xd1\xf9\x0a\xe9\x06\x20\x8a\xa7\x65\xeb\xe5\x68\x92\xf7\x7e\x78\x43\xb8\xd9\x73\x88\xdf\x30\x9a\x51\x95\xaf\xf7\x42\x85\x0d\xc0\xbd\xa6\xe0\xcd\x30\x6e\x9c\xaf\x7b\x77\x93\x21\x49\xcf\xa5\x8e\x1c\x31\x26\x06\xc2\x08\x12\x01\xa1\x31\x38\xe2\xbc\x13\x47\xeb\xa8\x4c\x76\x52\x11\x6e\x08\x34\x14\xc8\x17\xe5\x61\xb4\xd1\x0b\x23\x91\xa8\xe8\x43\x5d\xa4\x32\xbc\x0e\xa3\xf8\xb9\xec\x58\xe8\xe7\x57\xd8\x7e\xbc\xe8\xbb\x5f\x77\xfc\x4f\x3b\x52\x06\xc3\x6a\x83\x6a\x4b\xed\x18\x05\xa5\x2e\x4e\xd6\x79\x14\xdc\x87\x2e\x42\x79\x67\x5d\x47\x0c\xa4\x29\x54\x5e\x72\xc7\xe1\xb4\x41\xe8\xcb\x29\xc9\x46\xdf\x03\x52\x83\x00\x4d\xfb\xe6\xe8\xfa\x4d\x65\x40\x74\xd3\xa0\x91\xa3\xfc\x42\x2f\x45\x5b\x6d\xbd\x17\x34\x05\xd9\x31\x1e\x28\x8c\x91\x21\xe9\xd8\xd7\x2a\xa4\x4b\xfe\x5d\xc5\xff\x5c\x05\xb4\xc9\x5c\x19\x0b\xee\xed\x21\x32\x1f\xcc\xbf\x09\x52\x5b\x54\xf0\x40\x86\xe2\x21\x4d\x72\xb8\xea\xf1\x90\x02\x4b\x01\x90\xc9\x10\xd0\x39\x8c\xa9\xf2\xf3\x00\x70\x11\xa5\xd9\x49\x95\xc6\xe0\x1d\x6b\x61\xd6\x5d\x37\x89\xbb\x49\xc0\x33\xd2\x72\xe5\xcd\xa1\xc3\x33\x15\xde\xa2\xb4\x60\x8c\x37\x0a\x20\xca\xc7\x20\xad\xc4\xd6\xc5\x20\x70\x3b\xdd\x38\x43\x3c\x5e\x24\x30\x32\x92\x30\xf2\xf3\xae\xd4\xc9\xb4\xd3\xc1\x37\x78\x3c\x07\xe3\x8f\xdf\x32\x66\x6a\xdf\xaf\x1e\xf3\x5f\x38\x96\x6b\xdf\xa6\x63\x00\x9f\xd0\x24\xa0\x74\x81\x73\x3e\x1a\x9c\x2a\x0c\x4a\x81\xa3\x29\x32\x35\x79\x39\x53\x1b\x3c\x14\x39\x7d\xbf\x4f\xfa\xab\x6f\x22\x93\xaf\xf5\x32\xe3\x07\x75\x7d\xe7\x51\x1c\x81\x2f\xd2\x76\x44\xe4\x79\x23\xea\x44\xc5\x78\x4a\x5a\x14\x68\xdc\x23\xa5\x1a\xa4\x64\xde\xa2\x1a\x3a\x3c\x72\x7b\x80\xbe\x37\xa0\x70\x8c\xd3\x1c\x8c\x58\x5d\x34\x31\x1d\x8c\xb1\x5c\x8f\x99\x5b\x5c\x79\xd6\xe9\x99\x13\xf3\xa7\x1f\x92\xb9\xd2\xad\xb3\x26\xe9\xff\x1d\x63\x57\xc7\x9b\x91\x48\xf4\xad\x2f\x75\x3f\x3f\xe6\x7f\x40\x73\xb7\xa9\x66\x36\x41\x07\x33\x6c\x98\xea\x2a\x28\x75\xee\xd3\xa7\xf5\x7b\xa6\x94\x87\x40\xe5\x35\x21\x22\xa5\x32\x54\x2d\xd7\xa9\xea\xf9\x3a\x4f\xd6\xf8\xba\x50\x00\x04\xa4\xc8\x9b\x6f\x06\x16\x5b\x22\x37\xc4\x87\x0a\xe8\x8a\x94\xd6\x61\x9e\xf0\x50\x01\x79\x5c\x63\x3a\xd7\x8c\x2f\x35\xa0\x90\xe1\x16\xc3\x59\xa2\xdb\x10\xe0\xb6\x51\xfa\x5a\x8e\x52\xdd\x41\x8d\x08\xb4\x07\x81\xed\x02\x21\x9e\x7b\xc5\x4c\xd9\x3d\xcf\x66\xd9\xcc\xe5\x78\x4a\xad\x49\xd9\xbe\xcc\xbf\xe4\x7c\x1f\x7b\x7c\xc9\x9b\xf0\x6b\xed\xbc\xe8\xbb\xfb\xdd\xf1\x5e\xd0\x64\xec\x6f\x1c\xb0\x1d\xb5\x4e\x07\xd1\x79\xf7\xb3\x8e\xff\x09\x67\x85\xfe\x42\x8d\xe3\xdc\xf2\xe9\xa2\x23\xdc\x98\xf6\x25\x8d\x3b\x4c\x5e\x41\x05\x1f\xb4\x2c\x78\xb3\x26\xf5\xaa\x3a\x63\x73\xf3\x4b\xcb\xf3\xb3\x33\xab\xf3\x73\xa6\xfd\x02\xe3\xff\xb2\xb8\x2b\x4f\xf1\x2e\x5f\xe7\x79\xe9\x74\xb9\x8e\xbc\xc3\xf5\x23\x87\xc0\xb6\xc7\x09\xc0\x2f\x4f\xe3\x96\x42\x30\xe4\x51\xa4\xf7\xa0\xa1\x93\x1d\xae\x1f\x39\xac\x3e\xb3\x54\xff\x37\x54\xd8\x15\x74\x2b\x06\xc3\xdb\x4b\x2a\xfe\xff\x76\x8a\xe8\x50\xea\xda\x9c\x6b\x38\xb4\xfe\xd6\x44\x28\xcf\xf5\x1c\x95\x36\x48\xf5\x09\x69\xba\x5e\xf2\x13\x00\xc1\x5e\x35\x5e\xad\xec\x81\x21\xb0\x22\x03\xe5\x91\x2a\x4d\x8b\xe6\x05\xb8\xbc\x02\x30\x5f\x1f\xc3\xff\x12\x8d\xb7\x0d\x2a\x85\x8e\x3a\x0b\x52\x8f\xd8\xf9\x62\x2f\x58\x8f\x40\x2c\x40\x6e\x34\x99\xd8\x50\xf9\x97\xba\x13\xca\x8e\x52\xbc\xbc\xf7\x4e\xb0\x2b\xe9\x4c\x42\x98\xab\xb7\x4d\xf8\xaf\x9b\x38\x65\x3c\x29\xfa\xae\xf1\xe6\x66\x53\x7a\x62\x8b\xaa\x98\x05\x88\x9f\x62\xb8\x86\x3c\x65\x61\x20\xce\x2e\x9e\x7e\x9a\xb7\x70\x12\xde\x86\x52\xb1\xbf\x88\xbc\x98\xa1\xb0\x50\xd0\x69\x25\x19\x69\xb2\xb2\x6a\x6e\x09\xd2\x30\xc0\x24\xf4\xe9\xac\x55\x82\xc5\x80\xd2\x94\xdb\x55\x7f\x86\x66\x45\x1a\x70\x94\xd2\xf0\x09\x60\x8e\xaf\x19\x3c\x85\x14\x38\x4a\x1d\xc0\x38\x4b\x15\xf6\x8b\x6d\xa4\xcf\x21\xb3\x57\x81\x9f\x52\xa0\x21\xd0\x65\xa3\xeb\x3d\x2d\xf4\x37\x77\x47\x23\x60\x64\xd4\x30\x77\x52\x28\xa2\xf5\x1c\xbb\x80\xa2\x3b\xcd\x73\xa7\xc3\xcf\x0b\xe3\x84\xa1\xe6\x51\x38\x22\x05\x73\x33\x36\x3c\x9d\x4f\x6f\x28\x35\x49\xb8\x38\x3d\x71\x21\x48\x33\x0a\x07\xa6\xc5\x05\xe3\x22\x2f\x35\x0a\x7d\xdf\x3b\x76\xe8\x66\xaf\xa6\x02\xee\xf3\x78\x5e\x11\x80\x8d\x84\x5e\x3b\x72\xe8\xb0\x37\x4b\xae\x9c\x38\xf1\xae\x3f\x74\x08\xc7\x92\x90\x18\xd1\xa9\x2f\x6f\x6e\x52\xc7\x36\xe8\xcf\xed\xc5\xd2\x00\x6f\x1c\x89\xdd\x16\xe0\x3f\x2a\xb2\xc4\xa0\x83\x28\x8a\x99\x05\x2d\xa0\x62\x6a\x72\xb8\xd6\xbe\x37\x65\x44\xee\x62\x56\xa7\x2e\x40\xf9\x61\x74\xb1\xcb\xf2\x8b\xda\x0c\xbc\xd5\x16\xbc\x29\x92\xe9\x3a\x63\x33\x80\x95\x90\xe3\x0c\xa8\xdd\x60\x99\x97\x1e\xf6\xe8\x81\x5c\xb1\xeb\x9b\x1b\xf7\x1f\xc6\xd8\xa3\x95\xf2\xa7\xaf\xc2\xee\x9f\x8e\x5d\x3e\xf7\xef\x4f\x8f\xcd\x16\xcb\xc3\x03\x21\xd3\x7f\x0e\xc4\x47\xd1\x6a\x81\x51\xdd\xd4\x97\x48\x75\xce\x73\x9d\x96\xac\x60\x48\xe5\xd0\xad\xf7\x78\xc2\xa3\x4c\xe4\xba\x14\xca\xab\x36\xef\x76\x45\x94\xd6\xc8\xf6\x40\x17\x2b\xf4\xf8\xa4\xa2\xcb\x41\xaa\x68\x38\xc5\xb4\xee\xcd\x52\x2e\x93\x02\x61\xd5\x89\xbd\xb0\x3d\x54\x95\xba\xcd\x38\xdd\xcb\x27\x67\x8f\x1e\x3d\x7a\x33\xba\x33\x08\xa4\x3e\x88\xbc\x73\xab\xb3\x75\xc6\xb6\x71\xb8\x79\x8b\x3d\x82\x17\x97\xaa\xc6\xc3\x1f\x3c\xa2\xa1\x76\xbe\xe2\xb0\xc7\x29\xdb\xc7\xa9\x84\x37\xc4\x92\x48\x02\x60\x12\x82\xa8\x8a\x3f\x74\x14\x03\xf2\xaf\x39\x8b\x03\xd1\x14\x26\x0a\x9d\x39\x45\x52\x60\xcb\xb2\x5a\x3d\x08\x70\x07\xa0\x57\x39\xcc\x34\x07\x41\x2e\x53\xd5\x61\xab\x8d\x32\x6a\x68\x14\xd2\x1e\xce\xff\xa0\x75\x06\x30\xf0\xd2\x18\x7d\xde\xf2\x8a\xa8\x6c\x2f\x69\x3b\x4e\x32\x01\xf8\x91\xf9\xf0\xda\x14\xdc\x57\xb0\x47\x0f\x14\xe8\xbe\xf4\x8a\xcb\x5f\xe3\x9f\x64\x73\x65\x0d\x5c\x3e\x39\xeb\xc1\xaa\xa0\x38\xf1\x26\x09\x9a\x4c\xab\x04\xb0\x90\xc8\x92\xaa\x8f\x19\x54\x6e\x8b\x07\xb8\x1c\x0b\xeb\x5c\xc4\x91\xe1\x7a\xa4\xf5\x20\x29\xeb\x31\x00\xa5\xa8\x6f\x10\x70\x81\x56\xa6\x5c\xd6\x18\xa5\x45\x70\x86\x60\x43\x40\x5d\x18\x56\x3f\x9e\x4a\xba\x69\xf2\x4a\x74\xa1\x8b\xdc\xbc\xb8\xb9\x94\xd1\x6e\x2a\x67\x47\xda\x08\xc8\x4e\x22\xa7\x52\x7f\x1a\xe2\x19\x00\x1a\x0b\x30\xe7\xf2\x46\x5b\xd5\x26\xa5\xf2\xf4\xa0\x14\x35\xce\x99\xaa\x17\x47\x04\xbf\x68\x18\x4a\x94\xcd\x07\xef\x38\xde\x4c\x0a\x4d\x50\xe7\x62\xf1\xc5\x3c\x11\x34\x13\x9d\xb4\x6a\x0d\xd3\x5a\x18\x37\xce\xcb\xb1\x3e\xab\xaa\x29\x5d\x69\x14\x00\xa7\x8f\x77\x03\x9f\x19\x1d\x30\x71\xa2\x24\x4e\xab\x97\xc0\xc9\x15\x44\xa4\x44\x20\x1b\x89\x71\xb9\x0f\x32\xed\x25\x51\x4b\xd5\x2b\xfa\x22\xe8\x05\x35\xca\x90\x5c\xa3\x2f\x1d\x72\x98\x0a\xb8\xfd\x1c\x51\x29\x30\x22\x4d\x99\x05\x00\xc8\x01\x02\xcf\x73\x23\x6b\x10\x79\x47\x0f\xa9\xfd\x8b\xb3\x6c\x61\xa7\xcb\xf9\xc9\x90\x51\x03\xc3\x60\x8d\xf5\xa5\xf6\x31\x24\xd3\xa0\x81\x4d\x29\x4a\x2a\x28\x4a\x5f\xd8\xba\x71\xb3\xee\xcd\xd0\xcc\xf2\xcc\xa8\x14\xf5\x01\xab\x52\x59\x97\xc7\xbd\x36\x4f\x9a\x65\x95\x4c\xad\x2c\x9c\xba\x63\xe1\xf4\xe9\xe9\x81\xea\x50\x75\x85\x5a\x1a\xa1\xe0\x51\xaf\x5b\x35\xed\xbd\xb2\xef\x5a\xb0\xcc\x2c\x2d\x40\xd8\x2b\xfc\x00\xa2\x1b\xbd\x17\x0a\x5c\x13\x88\xd2\x61\xe3\xdb\xd7\x4a\x39\xa6\x08\x1f\x0d\xfa\x8c\x5e\xae\x34\x11\xb0\x46\xaa\x5e\x2f\xca\x82\x10\xdd\xca\x3a\x1f\x3d\x06\x8f\x19\x19\x5e\x60\x16\xc1\x44\x0a\x36\xb1\x1c\xfe\x3a\x2b\x6c\xb4\x82\xcc\x44\xb5\x95\xce\xa1\x6a\xc9\x66\xb7\xcd\xd4\x96\xe6\xab\x85\xc0\xf0\x33\x68\x7b\x42\xc4\x3a\xa9\x1e\xa9\x93\xe9\x15\xfb\xd8\xd5\x6a\x60\x54\x40\xe8\xbf\x8e\xfb\xff\x38\x0e\xae\x1d\x2e\xd5\x3a\xdc\x8d\xb0\xc0\x0a\xde\xa9\x3c\x5e\xd3\x44\xd8\x32\x4e\xa8\xa2\x71\xd2\x48\x62\x2e\x60\xf7\x8b\x48\x5b\x23\xc0\x02\x81\xe6\xd5\x26\x9e\x3a\xea\x73\xe0\x35\xef\x66\x41\x47\x5e\xdf\x20\xf9\x1c\xc3\x38\x1a\xfd\x2a\x7d\x40\x61\x46\x10\xdb\xaf\x14\xcc\x4d\x30\x6b\xe6\x58\xce\xc0\x83\xa0\x17\x43\xac\x29\xfb\xf5\x45\xd0\xd0\x4e\x20\xc8\x40\xaa\x41\x1e\x12\x1d\x90\xd5\x95\xa7\x6a\x70\x14\x81\x80\x68\x7a\xbd\xa8\x13\x37\x51\x57\x37\xd9\x04\x8c\x44\xcd\x3e\xac\x73\x75\x86\x22\x1e\x01\x1a\xbc\x61\x5f\x34\x7a\x21\x4f\x46\xb7\x6c\x7b\x6a\x0e\x46\xba\x28\x2b\x5c\x46\x3a\x7d\xde\x68\x63\x1a\x64\xfb\x1f\xf6\xd5\x66\x4c\x54\x8d\x4c\x2e\x35\x1e\x35\x6b\x10\x94\x99\x0e\xa8\xc7\xbf\x5c\x61\x63\xbd\xa0\xe9\xbe\xb3\xe2\xbf\xa1\x72\x6e\x61\x4e\x19\xfd\xe8\x9a\x21\x85\x5f\xa0\x12\x60\xe0\x6e\x6e\x66\xa4\x98\x56\x2e\x0c\xc9\xe8\x77\xd1\x1e\x6b\xdc\x9c\xec\x23\x5e\x0a\xc1\x1e\xb0\x21\xc8\xbd\xa9\xad\xb2\xe0\x74\xd5\xb3\x61\x1c\xea\x4a\x21\xcb\x62\xb5\xea\xe2\xc8\x5b\x3a\xb7\x6a\x2a\xb8\xdb\x9b\xa8\xdd\xd8\x6e\x7b\x81\x1d\xe8\xff\x1b\x15\x76\x05\xd8\xbb\xb0\x05\xee\x7f\xaf\xf8\xaf\xac\xcc\xe4\x0f\xc8\x16\xd0\x8b\xd2\x2c\xe9\xc1\x54\x35\xc1\x2b\xab\x0e\xdb\x2e\x00\x6d\xe7\x17\x68\x23\x73\x82\x67\x66\xc8\xc1\x5a\x3f\x77\xf2\x64\x31\x64\xe9\xc6\xf8\x2d\x79\xdb\x88\x70\x83\x27\x6b\x41\x96\xf0\xa4\x6f\x60\x5b\xc2\x26\x50\x9e\x15\x08\xe7\xc4\x40\xdc\xc8\x0c\x0b\x06\x19\x03\x0c\x90\x20\x17\x60\x57\xf5\x21\x5b\x44\x7b\x2b\x76\x38\x6e\xc6\xb8\x8c\xf2\x3a\xbc\xd2\x61\x2c\x37\xf2\xbb\xf7\x6b\xb5\x3c\x9a\xf1\x52\x29\xa6\xe5\x69\x46\x71\xbe\xd6\x75\xca\x60\xc5\xb1\x9d\x04\x99\x81\xe0\x0c\x49\x4c\xdb\x32\xc2\x59\x8a\xf4\xfd\xe3\xec\x51\x64\x48\x85\x28\x8e\xd4\xfd\xa7\x31\xff\xf5\x63\x67\xcc\x47\x72\xfa\x52\x4f\x9e\xb0\xad\x30\xde\xac\x05\x68\x2d\xd0\xe9\xda\x4a\x10\x81\x30\x21\xe2\x30\x4d\x0c\x65\x18\x69\xd1\x9f\x4b\xa5\xe4\x56\xb3\x4e\x9c\x66\x44\x53\xa6\x25\x7e\x3b\xee\xa5\xe2\xbc\x10\x5d\x4d\xe3\x89\xae\xeb\x7c\xb7\xe1\x7c\x46\xd7\x66\x5e\x44\xd7\x45\xd2\xe1\x72\x86\x1b\x0b\xca\x69\x46\xd7\xac\x8e\x0c\xa5\x51\x5f\x9b\x82\x12\x5b\xb5\x8c\xc8\xfa\x21\x69\x75\x39\xb6\xbb\x9e\x0a\xde\xed\x86\x7d\xaf\xcb\xb3\x36\xc0\x33\x78\x7e\x23\xa8\x35\x9a\xbe\xc2\x8c\x30\x47\x03\x2e\x3a\x9b\xbc\xaf\x55\xab\x3c\xd5\x5d\xe1\x5d\xa8\xc6\xc1\x49\x54\x58\x98\xb9\x6e\x60\xd9\x91\x17\xd8\x29\x36\x7f\x19\xd7\x1d\x6b\x6e\xe7\xa3\x2c\xe9\xb3\x97\x4c\x96\x66\x01\xcc\x89\x6e\x18\xf7\x3b\x22\x42\x94\xe0\x2f\x4d\xf8\x53\xf6\x23\x1b\x86\x30\xff\x6d\x08\x40\xf0\xef\xed\xdf\xcb\x0e\xd9\x2d\x2c\x6c\x5b\xc1\xc2\x3e\xcb\xff\x9e\x05\x45\x72\x9d\x19\xe8\x7c\xe6\x2c\x6c\x3f\xe9\x69\x70\xca\xf7\xf2\xe2\x76\x91\x17\xb7\x66\xe4\xc5\xdd\xb5\x3b\x24\xe7\x6f\x1f\x82\xe4\x7c\xc9\x79\xea\xd6\x79\x42\xd7\xbb\x47\x6b\xe5\xa0\xbd\xd6\x3e\x2e\x66\x05\x7d\x6c\x92\x1d\x1d\xc1\x03\x55\x4a\x01\xd5\x15\x0d\xf7\x35\x93\xfe\x5b\x9c\x21\x3f\xe2\xe2\xc1\xf0\xd5\xae\x79\x82\x71\x78\x31\x47\x5e\xb4\xb8\x3c\x0d\xfc\xce\x19\xb3\x15\x33\x79\x52\xba\xdc\xa9\x06\xa8\xe8\xb0\xd7\x94\xc2\x9a\x0a\x3b\xcb\xfa\x2f\xf7\xb3\x45\xb6\x1f\xc1\x20\xdd\x39\x7f\xea\x14\xc2\x42\x9a\x81\x86\xa9\xd7\x8f\x7b\xd7\x26\x88\x7c\x4b\x11\x2a\xa3\x68\xb4\x1e\x70\xd8\xb7\x19\x58\x9f\x79\x23\xdc\x97\x3b\x23\xe0\x3c\x07\x86\x79\xb1\xac\x0c\xff\x4c\xe9\x63\x23\x8d\xa2\x88\x96\xc5\x6d\x08\x57\x7b\xbc\x65\x63\xdd\x64\xb0\xa5\xaf\x1a\x45\x29\x39\xd0\xd2\x92\x66\xae\x6c\x35\x21\xa3\x1a\x3c\xac\xb1\xb7\xa1\xee\x7e\xa3\x7f\x1d\x68\xee\xa5\x7c\x9b\xf4\xae\x9c\x27\x79\xbe\x5b\x4e\xad\x77\x3a\x6c\x5c\x3e\x74\xdf\xe2\xf8\xaf\x76\xce\xa5\x42\xc7\xb4\x83\xf1\xa5\x64\x9a\xe5\xd5\xbd\x1f\xf7\x74\x2c\xb3\x2f\x3f\xf2\x35\x3d\xba\x8f\xcb\x45\xc1\x88\x04\x29\x85\x76\x89\xa4\x9b\x08\xba\x17\xf9\x77\x03\x6a\x69\xcb\x83\xfa\x36\x05\x29\xa8\xdc\x00\x13\xe6\x51\x9f\x96\x9a\xd9\xda\x8f\x38\x6c\x9f\xb8\x90\x25\xdc\xfd\x80\xe3\xbf\xcc\x99\x97\xff\xf4\x1a\x71\x82\x21\x4d\x4d\x7d\x84\x41\x37\x17\xa2\x56\x5c\x3f\x25\x32\x78\x6b\x6a\x5a\x0a\x8a\xb6\x69\x3c\x31\x10\x50\x65\xb7\x08\x52\x46\xb1\xcf\x04\x51\xb7\xa7\x23\x25\x0c\x02\xb4\x00\x15\x2c\x82\x31\x52\x6c\x00\x6d\x91\x88\xfa\x03\xce\xe3\xd9\xe3\x86\xef\x02\x53\xf5\xfd\xed\x71\x0b\x13\x7c\x4d\xde\x99\x73\x6e\xd5\x24\x8e\x9e\x12\xaf\x51\x86\xcc\x1b\xc6\xfd\x9b\xad\x27\x45\xb3\x80\xc6\x5e\x50\x59\xfb\xdc\x6b\x24\x71\xe4\x3d\x3b\x5e\xb3\xc3\x19\xff\x68\x8c\xbd\xd9\x61\xfb\x79\x23\x0b\x36\x84\xfb\x5a\xc7\xaf\xe5\x30\xea\x94\x28\x87\x8c\x9c\x58\x60\xd8\xf7\x92\x5e\x14\xc9\x79\x7f\x76\xbc\x66\x9f\x9d\xc7\xd8\x91\x6d\xc4\x86\x16\x22\xf7\xb7\x89\xf3\xf1\x5a\x87\x5d\x13\xf2\x34\x5b\x41\x64\x45\x01\xd9\xfb\xcf\x73\x2e\xdf\x92\x7d\x72\xc1\xd8\x16\xa0\x4a\x6e\x92\x59\x55\x56\x83\xb7\x5b\xf9\xd7\xb3\xe3\x35\xf8\x25\xbf\x9d\x4a\xbd\x9a\x5a\xd1\xac\xb3\x57\x39\x08\x2c\xb0\xa2\x7f\x87\xa6\x3d\xf7\xf2\x5b\x76\x62\x07\x2d\xb3\x5a\xd5\x88\x3b\x5d\xb4\xa8\xb3\x9f\x79\x82\x95\xe2\x60\x20\xb4\x80\xfe\x32\x9b\xff\xbd\x12\xac\xcb\xd9\x5c\x46\x71\x00\xe7\xd3\x5f\x7e\x97\x7f\xfb\xc8\x37\x0a\x98\x14\x06\x78\x59\x0e\xe0\x3d\x41\xff\xbc\xe8\x30\x80\x0d\x85\xf0\x01\x6b\xe5\x5d\x7a\x3c\x7b\xc1\x7e\xb6\x1f\x90\x55\x53\xf7\x1b\xfb\xfc\xf7\xee\xc3\x7f\x6b\xe7\xa0\x41\xd4\x29\x6f\xc9\xf4\x6b\x6e\xf1\xa7\x7b\x02\x40\xcb\x36\x2d\x18\x1a\xc6\x96\x95\xfe\x23\x45\x25\x60\xac\xa1\x7e\x64\x0e\x85\x71\x53\xa2\x42\x8f\x7b\x7e\x33\x58\x0f\x32\x1e\xa2\x4d\x56\x2a\x34\x7e\xd5\xf3\x65\xf5\x22\x6a\x04\xdd\xb6\x48\xa4\x3a\x00\xf4\x9f\x58\xa0\x14\x03\x7e\x59\x85\x25\xf0\x38\xe5\x35\x96\x15\x5e\xda\x0a\x05\x97\x43\x35\xde\x05\xc6\x2b\x65\x0e\x4b\xc4\x71\xe6\xf9\x29\x4e\xd6\xd0\x22\xe4\xcc\x21\xaa\x48\xa7\x13\x64\x58\x1b\x2b\x6f\x82\x7c\xc6\xd7\x13\x21\x74\x9b\x78\xc6\x0b\x6f\x31\xcf\x97\xdd\x83\x3a\xa0\xf8\x24\xd4\xff\x56\x6f\x82\xb1\x0d\xbe\x17\x85\x07\x3c\xea\x43\x11\x66\xbf\x0a\x03\x0b\x35\x00\x04\x6b\xde\x31\xd1\xe1\x41\x68\xb0\x99\xc1\xd0\x1c\xec\xc8\xbd\x23\x5f\x0f\xba\xa9\x68\x78\x42\xe3\x5a\xcb\x9f\xf1\x59\xd6\x8b\x22\x11\xe6\x7f\xcb\x13\x01\x3e\xd1\xf6\x6d\xaa\x21\x6e\xa4\x5d\xb3\xc6\x4e\xd0\x48\xe2\x34\x6e\x65\x5e\xba\xde\x00\x5e\x2f\x91\xa5\x0d\xde\x15\xf0\xf7\xa8\x90\xd3\x6d\x09\xb7\xb7\x3a\x6c\x52\xb6\x05\xa2\x2d\x5f\xe7\xf8\x2f\x72\xd4\x5f\xf6\x4e\x33\x63\x6e\xe0\x24\x46\xe3\x2e\xd9\x16\x21\xae\x7f\xd8\xb6\x2d\xb1\x79\xe4\x58\x4e\x5e\x1c\xe5\x26\x37\x30\xb2\x75\x3a\x3d\x70\x95\x59\x6a\xc1\x87\xf5\x41\xfb\x6b\x8e\xff\x52\x47\xd0\x41\x4b\x0d\xc4\x3f\x6d\x18\xa4\x47\xb0\xa1\x3b\x38\x67\x3f\xeb\x68\x4d\xf6\x93\x52\x63\x20\xed\x55\xf7\x84\x00\x3a\x41\xed\x48\xdb\x41\xf7\x11\x1f\xf2\x5d\x2e\xa7\xff\xe5\x30\x25\x7d\xdd\x2f\x2a\x04\xb0\x07\x1d\xe5\x2f\xcb\x49\xf7\x23\xef\xc2\xf5\x87\x6e\xb6\x04\x38\x2d\x7a\xed\x5c\x53\xd9\x8b\x10\xe6\xeb\xcf\xce\x2f\xaf\x2e\x9c\x5c\x98\x9d\x59\x9d\xf7\x96\xe7\xef\x3c\x37\xbf\xb2\xea\x03\x96\xd8\x1a\x45\x7f\x63\x86\x62\xa2\xe8\x8e\x79\xea\x3d\x65\xe5\xec\xa2\x17\x27\xde\xd3\x66\xce\x9c\x46\x8f\x18\x88\x11\x79\xcd\xd1\x79\x89\x61\x1f\xe0\xff\x6e\x38\xa6\x92\x25\x2f\x03\x08\xec\xe5\x57\x33\xe3\xa0\x71\x9f\x77\xb5\xff\xcf\x57\xe5\x7f\x17\x52\x87\xf3\x23\x04\x5f\xd1\xfe\x62\x48\x39\x0d\xd1\x81\x20\x77\x5b\x9d\x31\x79\xf3\x3b\x88\xfe\x8b\xc4\x14\xf1\x43\xe7\x1d\xfd\x5e\xad\x20\xcc\x70\x9a\x8d\x60\xa2\x1e\x11\xb8\xfa\xf2\x84\xab\xe7\xcd\xbb\x75\x71\xe6\xcc\xbc\x8f\x2f\xa9\x8c\xd2\x3a\x63\x77\x8b\x30\xac\x01\x10\x92\x19\x06\x88\x9f\x29\x89\x7f\xb8\x8e\x18\x80\xb9\xe1\x15\x91\x39\x55\x98\x5b\x0d\xa5\xa9\x7f\x5c\x71\x8e\x97\x9e\x83\x25\x11\xec\x26\x17\x80\xfc\xdb\x2e\xb6\xce\x3c\xcf\x3a\xf1\x10\x60\x0d\x9a\x86\xc6\x65\x41\x22\x3d\xae\x41\x88\xf6\x06\xee\x02\x28\x25\xb7\x20\xd6\xd0\xf2\x99\x54\x55\xed\x74\x96\xd3\x7e\xf1\x1b\x69\xa2\xc4\xb0\x19\x6d\x1a\x44\xc3\x0a\xaa\x33\xef\xc8\xf6\x46\xa4\x46\xa8\xf6\xdb\x18\x19\x8d\x7f\x0f\xf8\xa9\xbb\x1a\x1a\xea\xe6\xc0\xb8\xa8\xde\xe2\xb3\xed\xf7\xb7\x4a\x09\x06\x0f\xd5\xe0\x1d\x2d\x1d\xbc\x50\x64\x35\x52\x67\x7c\x35\x5a\xe5\xea\x4d\xd9\x70\xc1\xb0\x80\x4a\xa4\x0c\x56\x1a\x9b\xd2\x1e\x39\xe8\x47\x23\x8e\x22\x0a\xad\x41\xdc\xdc\xb0\xff\x90\x2f\xb6\x87\x74\xcc\x18\x18\xcf\x34\x58\x5a\x22\x4c\x74\x93\x2c\xc7\x57\xb8\x29\x77\x8b\xe8\x7c\xc5\x83\x78\x73\xaf\xc9\x15\x15\xc1\xff\xde\x77\xd0\x18\xd0\x1a\xb5\xa5\xa6\x24\xcf\xc1\x27\x1a\x72\x90\x04\x01\x63\xb3\xbd\x34\x8b\x3b\x5e\x2e\x4f\x50\x04\xa9\xb0\x4c\x23\xd6\x0e\xac\xee\x38\x6e\x64\x44\x46\x11\xb2\x0a\xf0\x8c\xcd\x20\xc5\xd3\x1b\x4c\x5c\x80\xa7\x04\xcf\xa7\x34\x44\x24\x81\x23\xeb\x17\xe5\x5d\x43\x6e\xb8\x25\x91\x74\x82\x0c\x04\x2a\xd1\xa1\x1c\x27\x42\x6c\x62\xcc\x24\xef\x7b\x33\x48\x95\xe7\x4e\x91\x21\x59\xee\x77\x58\x80\x8a\x54\xa4\x8a\xf9\x59\x18\xaa\x88\x49\xf1\x6b\x41\xb3\x29\xe8\xd8\xca\x71\x9d\xd5\x45\x40\x1d\x5a\x53\x98\xd7\x07\x70\xac\x84\xda\x4e\xb5\xcd\x84\x19\x8e\x0f\x4c\x93\x62\x93\xa6\x20\x25\x4c\x4d\x4d\x84\xec\x1b\x42\x12\x82\x9f\x9a\xda\x8b\x7a\xf6\xf4\x56\xdd\xd2\xad\x2a\x76\xec\xd8\x76\x3a\x66\x5c\x73\x0e\x62\x51\x4d\x72\x11\xe2\xd3\x3a\xf3\xae\xaf\x7b\xf3\x17\xba\x01\xfa\xb7\xcc\xc5\xe2\x85\x41\x4b\x48\x4d\xf6\xb8\xee\x35\xa2\x01\xb4\x82\x0b\x86\xa7\x8b\x0e\x3c\x9d\xdf\x42\x21\x4a\x60\xd1\x68\x76\x82\xa8\xce\xbc\x1b\xe0\x24\x87\x12\x80\xb1\x29\x2b\x9c\x7c\x33\xf6\xa6\x37\xc6\xd2\xd2\x17\x7f\xd2\x41\x3b\xd4\x2b\x1c\xff\x7e\xa7\x17\x34\x6d\x65\x56\x3e\xf8\xe6\xe9\xb2\x3f\x76\x35\x7b\x7c\x69\x6a\x30\x09\x28\xf7\x8b\x57\xf9\xff\x5a\xd1\x7f\x96\x31\x3d\x6a\x61\x46\xe6\x21\x79\xf9\xee\x08\xb2\xc6\xf3\x46\xd6\x93\x77\x2f\xcc\xcf\x92\x53\x06\x01\x4c\xc7\x99\x07\x61\xb0\xc7\x3d\xbf\xd3\x4f\x37\x1a\xf2\xf6\xe1\xad\xf4\xd6\x52\x21\x77\xcc\x33\x98\xe7\x79\xde\x73\xe0\x7f\x3d\x6f\x06\x19\x80\x85\xfc\xe1\x39\x7e\xd0\xf5\x8f\x7b\xfe\xe1\x43\x75\xf9\xff\xeb\x87\xfd\xe7\x56\x3d\xfb\xe1\x91\xfa\x11\xff\xb9\xcf\xac\xd2\xc7\x4b\x71\x92\xe1\x87\x52\x8d\x91\x6f\x71\x79\x6f\xe9\xc6\x89\x3c\xf0\x6e\xba\xe1\xc6\xeb\xa1\x00\xf5\xe3\x9a\xf1\xe3\xd1\x43\x37\x3f\xf7\x99\x50\xcc\x73\xab\xdb\x6c\xd0\xd1\xfa\xd1\x6d\xd7\x7d\xf3\xd1\xa1\x35\xdf\x78\x83\x51\xf1\x33\x6d\xe6\xaf\x2b\xd8\x2f\x8c\xb3\x89\x14\x87\xca\x7d\xcb\xb8\xff\xe7\x63\x86\x0b\x91\x87\xa1\x31\x1f\x79\xac\x02\x39\xbe\xc2\xd0\xa3\x2f\xeb\x79\x2f\x60\xdd\x76\x43\xde\x00\xf5\x56\x1e\x37\xf8\xca\x00\xad\x95\xb7\xb0\x04\x05\xf6\x31\x99\x14\x78\x96\x82\x68\x5d\x8a\x77\x22\x69\x06\x77\x7d\xa7\x17\x66\x41\x37\x14\x80\x41\x9e\x56\x31\xb7\x2d\x6e\x19\x28\xcc\x88\xb5\xc5\x09\xb4\xde\xfe\x4d\xee\xb2\x29\x95\xf8\x0a\x95\x35\xe4\x3b\x60\x98\x34\x92\x1e\x74\xfc\xd9\xb4\x0a\x63\x4b\x81\x58\x9a\x20\x75\xa9\x3d\x98\x15\xdf\x0c\xd2\x6e\xc8\xfb\xa8\xbd\xe7\x65\xa8\x6e\xaa\x7c\xd3\xfc\x17\x68\x78\xdd\x5b\x8c\x8d\x8e\x85\xa1\xc7\xbb\x5d\xc1\xe1\x0c\x04\x2a\x29\x63\x00\xc1\xa7\x90\x01\x3a\x59\xfe\x54\x91\x04\x4a\xb5\x1b\xeb\xaa\x7b\x2b\xb2\x42\x39\x13\xd6\xb7\x50\x1f\x6d\xfc\xb8\xd3\x4d\x82\x54\x10\x29\x45\xd0\xb0\xef\x41\x47\xd9\x61\x76\x70\x4b\xd3\xa2\xda\xae\xb8\xa1\xf6\xbc\xa8\x7b\x6c\xcc\x7b\x6c\xcc\x0f\x2d\x1b\xf3\x9d\x5b\x3b\x53\xeb\x6e\xb5\x46\xee\x53\x7d\x7c\x16\x3c\xa7\x9a\x21\xd5\xf7\x19\x7b\xef\x01\x76\x9d\xb1\x97\xd3\x2c\x4e\xf8\x3a\x6c\x67\xe4\x9c\x9f\xc9\x32\xde\x68\xeb\xf0\x8a\x1f\x3b\xe0\xcf\x94\xfd\x50\x76\x42\x17\xdf\xd3\xc1\x4b\xa5\x71\x17\x1f\x9a\x60\x1b\x2a\x6c\xa0\xe3\x7f\x5f\x69\xd8\x40\xb1\xc4\x74\xfb\xbc\x77\x23\x3a\xb6\xb7\xd1\x76\xb1\xd1\x7e\xd7\xdc\x68\x1f\xdc\x25\x21\xf4\x0f\x95\x87\x11\xec\xf1\x41\xff\x3b\x39\xb2\x2e\x39\xed\xad\x25\xd4\xbc\x3b\xab\xc3\x3d\xd4\xae\x2c\x60\xc1\x96\x49\x98\x62\xf8\xc7\x57\x2a\xec\xce\x7c\x75\xe5\xb7\x51\xc3\x12\x64\x92\xe2\xe4\x2f\x80\x07\x0e\xae\xec\xca\xf9\x7f\x97\xce\x7c\x74\x2f\x56\xfc\x33\xc3\x7e\xb4\x03\xc9\x8c\x74\x49\xf4\x62\xd3\xed\xcc\xfa\x38\xb5\xbd\xbd\x1f\x70\xd8\xaf\x3a\xec\xea\xb8\x2b\x22\xb9\xac\x8e\xae\xc0\x64\xbb\x6f\x73\x86\xb0\xfc\xec\xbc\x63\x4f\x59\x39\xbb\x88\xa5\x2e\x25\x71\x37\xf5\xef\x28\xd4\xa5\x24\xea\x59\x7c\xec\x6d\x1c\xd5\x64\xd0\xc8\x4e\x29\xfb\x60\x74\x0d\xf4\xc4\xa4\x27\xaf\x7f\x75\xf6\xea\x6f\x67\x33\x3b\xe7\x1f\x2a\x92\xa8\x7d\xfc\x3f\x34\x89\xda\x57\x1e\xf3\x1f\x8e\x90\xea\xab\x06\x21\xd5\x97\x2b\xec\xce\xdd\x12\x52\xed\x71\x51\xed\x71\x51\xed\x71\x51\xed\x71\x51\xed\x71\x51\xed\x71\x51\x7d\xab\x71\x51\xed\x11\xa0\xee\x11\xa0\xfe\x7b\x23\x40\x7d\xfd\x04\xbb\x7e\xa7\x81\xf0\x18\xc7\xfa\xb9\xfd\xfe\x77\x0c\xfd\xf5\xa2\x33\x41\x6e\x25\x4b\xc1\x7d\x60\x3f\x6b\x30\xf5\x8b\xfb\x54\xff\x29\x33\x74\x08\x98\x58\xb2\x40\x27\x85\x18\xbf\xe0\x8c\x01\x59\xab\x92\xdb\xb4\xe3\xaf\xc5\x43\x79\xac\xca\x65\xba\x19\x00\xf2\x91\xa1\x75\x3e\xbf\xc2\xf6\x37\x45\x14\x88\xa6\xfb\x75\xc7\xff\xb2\x33\x07\xff\x96\xb5\xa8\x73\x64\x74\x2d\xf8\x6d\x35\x2f\x1e\xab\x03\xb1\x04\x86\x74\x9e\xb7\x1b\x1b\x02\x9c\x32\xba\x96\x16\x52\x61\x69\x3a\x1a\x23\xde\x18\x93\x9f\xbd\xb8\x1b\x44\x94\x4d\xab\xd9\xa9\xe3\xfc\x3d\xa3\x55\x75\x8f\x5a\x6f\x24\xf4\x67\xd4\x76\x63\xf8\x00\xb8\xcb\x1a\x84\xcf\x55\xd8\xd5\x42\xee\x3d\x98\xcc\xf9\x24\x89\x13\xf7\x77\x2b\xfe\x07\x2a\xf3\xf6\x43\x05\x5b\x4a\x90\x34\x2a\x6b\x0b\xfc\x1c\x02\x5e\x88\x1b\x10\x42\xdc\xf4\x9a\xbd\x44\x59\xb0\xac\x95\xe2\x35\xda\xa2\x71\x5e\xa5\x8c\xca\x8b\x7e\x22\xc2\xbe\xd7\x8d\x53\x84\x5b\x00\x70\xa4\x0c\x20\xcb\xa0\x40\x74\xc4\xea\xeb\x94\xd4\x11\x83\xa8\x27\x8c\x94\x66\xbb\xf8\x54\x91\x08\x78\x69\x37\xc8\x48\x47\x44\x78\x01\xc5\xb1\x57\xf5\x96\x4f\xcc\xcc\x2a\x95\x0c\xa0\x41\x21\x5e\x27\x89\x43\x42\xc1\x13\x11\xdc\xe5\xe4\x03\x74\x20\x61\xc6\x3c\x59\x5c\xb0\x4d\x00\xb8\x03\xb8\x85\x00\xda\x33\x10\x73\x6f\x39\x22\x9f\xa9\xe9\xc2\x56\xfc\x93\x04\xf3\x63\x2d\x31\x18\x0f\x1d\xbd\xb4\xd9\xee\xc3\xfd\x04\x1d\xdd\x9b\x3c\x87\x26\x89\x13\x5a\x3b\x56\xf1\xcf\x1b\x67\xc7\x2f\x1f\x56\xdd\xfd\xd0\x98\xbf\x36\xe2\x77\x23\xfa\x0f\xc7\xc5\xbc\x3d\x4b\x71\x29\x32\xb8\xf6\x52\xce\x6c\x9f\x46\x81\x4e\x93\xed\x40\x9a\xff\xcf\x0a\xbb\xc1\x32\x89\x4d\xf9\xdf\x69\xea\xfb\x05\x8a\x78\xb3\xeb\xaf\x77\xc8\xa8\xfa\x32\xc7\x7f\x9e\x03\x46\xd5\xc2\xdb\xb7\x3c\x82\x26\x4b\xeb\xfe\x13\x12\xb2\x66\xd3\xbf\xbb\x8c\xe8\x7e\xa0\x5d\xbb\x03\x94\xfc\xca\x35\xec\xd8\xe5\x04\x83\xbb\xef\xbb\xc6\x7f\x70\xdf\xd0\x9f\x35\x58\x00\x61\x7a\x41\xaa\x46\xa3\xcd\xa3\x20\xed\x00\x04\xea\x9a\x5c\x1a\x03\x21\x86\x29\x80\x6e\x80\x81\x99\xf2\x76\x47\xc4\x1f\xa2\xe6\xdb\xe6\xa0\xdb\x05\x99\xc7\xd3\x7e\xd4\x68\x27\x71\x14\xf7\xd2\xb0\xef\xe9\x60\x1f\x0c\xdf\x4b\x7b\x80\x00\x71\x87\x15\x7f\xa4\x6e\x88\xba\x45\x18\xf1\x52\x1a\xec\xb5\x55\x68\x97\x37\xa5\xb1\x0f\x77\x14\x62\x66\x84\xe6\x4c\x63\xc0\x4c\x69\xf4\x94\x8a\x1c\xcf\x3d\xd9\xdb\x8b\x8f\x1a\xdd\x2c\x2b\x78\xcb\x6a\x08\x5b\x55\x83\x53\x88\xfd\xd3\xa1\xa2\x8f\xc0\x20\x59\x6d\xc2\x54\x62\xbd\x76\xec\xd1\x49\xe2\x8e\xd7\xc0\x50\xa7\x28\x8e\x6a\x83\x21\x91\x52\x90\x48\x85\xd2\x92\x20\xff\x7a\x60\xcf\xaa\xbe\xe7\x08\xfe\xe6\xf9\xa7\xee\x30\xdc\x53\xb7\xef\xd2\x0d\xcc\xde\x59\x61\xb0\xc2\xdd\xb7\x54\xd8\xc9\xe1\xf1\x10\x3b\x49\xf7\xf1\xff\xc4\x49\xb7\x93\xd3\xa3\x63\xa4\x75\x40\x95\x02\x24\xa2\x68\x2b\xb2\x80\x1b\x8a\x4e\xd5\xd8\xda\x0a\xa0\x00\x62\xdb\x28\xe3\x5f\x99\xbe\x44\x66\x46\x6d\xd5\xbd\xb3\xa0\xca\xd2\x4b\x48\x5c\x95\x04\x14\xd3\x69\xec\x7a\x8a\xe2\xd4\xf0\xb2\x0a\xf8\xa5\x8f\x38\x08\x75\xd0\xe0\x89\x80\xf6\xeb\xce\x08\xb2\x99\xed\x8f\x17\x72\xf3\x3e\xe0\x90\x2e\x59\xea\x35\x40\x7d\x4f\x2b\xe4\x46\x5c\xa2\xdc\x33\xea\xc4\xd2\xea\x9a\x81\xc2\x68\x0c\xba\x1d\x96\xaa\x82\xf6\x14\x42\x3c\x0f\xc2\x5e\x62\x90\xe8\x9a\x50\x90\x14\xde\x49\xef\xd4\x2f\x39\xc9\xd6\x1e\xb1\xb3\xee\x19\xe5\xb3\x1f\xda\xf7\x61\x3e\x7c\x6b\xf0\xc8\x8d\xc6\x5e\x70\xb5\xa5\x76\xee\x70\x80\xdd\x0f\x5f\xe5\xbf\xd4\xd9\xe2\xa5\x7c\xf0\xf5\x30\xe4\xe0\xb2\x4a\x61\xd6\xe3\x7d\x10\x07\xfb\xa0\x1c\x16\xc2\x01\xc9\xc9\x88\xad\x15\x3e\x24\xa3\xcc\x3a\x53\xde\xf6\x28\xf6\x5f\xaf\x60\x57\x18\xbf\xbb\x97\x18\x99\xb7\x7e\x83\xd9\xf3\xe8\x75\x75\xb0\x22\xda\x30\xa3\x92\xe2\xed\x89\xa6\x8d\xc5\x23\x6f\x46\x2d\x17\x63\xaa\x53\x75\xdb\x28\x03\xce\xdb\x08\x38\x14\x74\x90\x7a\x68\x12\x90\x23\xf6\x9b\x6e\x4e\xb5\x00\x73\x9a\x07\x4a\x12\x06\xaa\x18\x99\x19\x02\x58\x6b\xb4\x80\x8d\xe6\xc9\x21\xed\x77\x85\xe7\xe3\x85\xd3\xa7\x4c\x0f\x0d\x9e\xaa\x6b\x4c\x44\x07\x6f\x0e\x88\x6d\x47\x35\xe6\x61\xf2\x72\x5b\xc3\x40\x15\x5b\x32\xa4\xbe\x93\x30\xb3\xdb\xad\x8f\xb2\xe7\x08\x6d\x16\xad\x06\x10\xd0\x41\xda\xa1\xd1\x71\x72\xed\xc0\x62\xc3\x6c\x7b\x22\xc9\xd2\x89\x2f\x29\xea\x74\x33\x61\x68\x3c\xcb\xd9\x46\x50\x15\x32\x72\x67\x7c\xa4\xfb\xaa\xea\x52\xa3\x98\x60\x53\xd3\x7c\x0d\x2a\x93\x28\xc4\x05\x78\x3a\x31\x9f\x7b\x27\xe6\x97\x35\xa9\xd8\xcc\xca\x62\xfd\xb0\xb9\x67\x3d\x8d\x2c\xe4\xf1\xd4\xa0\xc1\x92\xb7\x5e\x0a\x8c\x39\x06\x70\x01\x27\x67\xaf\x3f\x72\xd3\x21\x0c\xb8\x5e\x8c\xa3\x9a\x6c\xb8\x4a\x0e\x04\x18\x7e\x0c\xfa\xd3\x60\xa2\x06\x7a\xa1\xdd\x15\xa3\xc7\x74\x40\xf4\x22\x65\xa6\x6d\x56\x99\x07\x0a\x23\x60\xfa\x23\xed\x5e\x37\xe4\x11\xcf\xe2\xa4\xef\x65\xe2\x42\x36\xb4\x95\xd7\xd7\x8f\x50\x3b\x6f\x3c\x76\xc3\x4d\xb8\x24\x6d\x04\x71\x5d\xb1\xb1\x25\xf2\xe1\xcb\x45\x7d\x61\x97\xcb\xdd\x6f\x67\xdf\x78\xcd\x58\x20\xaa\x94\x16\x1b\xda\x66\x83\x7e\x15\xb2\xeb\xab\xca\xca\x45\x04\xd6\x0d\x11\x8f\xf7\xf6\xe4\x30\xd2\xa8\xe4\xa0\x4a\x06\x0a\x18\x68\x7e\x1d\xd1\x04\xde\x02\x3b\x9a\x1a\x1c\xde\x06\xe2\xaa\xbc\x11\xb4\x79\xd4\x4c\xdb\xfc\x3c\xc0\x8f\xad\x0e\x9c\x15\x66\x9a\x96\x1c\x16\x3c\x8c\xea\x8c\x3d\x54\x09\x59\x55\x2f\x8d\xe5\xfd\x8b\xd0\xc2\xa4\xe4\x3c\xce\x20\x54\x17\xdf\x9b\x82\x7f\xd7\xe4\xff\x3b\x31\x7f\x6a\x61\xd1\x33\x96\x08\x3c\x85\xdf\xeb\xf5\x7a\xfe\xde\xfc\xe2\x5c\xf9\x5b\xd3\x3b\xb7\xbc\xbf\xa6\xc2\x58\x7e\x0a\xb8\x2f\xac\xf8\xcf\x30\x0e\x05\x4e\x38\xc4\xa4\x7b\x6b\x08\x8d\x3b\x20\xb3\xca\x7c\x31\x11\x9e\xaf\x44\xae\x5f\xd5\x32\x0c\x67\x56\x49\x18\x2b\x30\xf5\x2c\x3b\xc3\xee\xd8\xbd\x62\xa1\xd9\xf7\x2f\x39\xd7\xb2\xc7\x0d\x76\xb8\xc3\xbb\xb5\xf3\xa2\x9f\xca\x4e\xd7\x40\xe0\xb1\x4b\x8e\x37\x74\x68\xf6\xbb\xe3\x1d\xde\x65\xec\xf3\xe3\xec\xda\xad\x98\xee\x4e\xf0\xc6\x79\x11\x35\xdd\x5f\x19\xf7\xe7\xec\x47\x06\xf6\x84\x1d\x5f\x8d\xe8\x13\xc8\x4d\xa8\x3c\xe3\x2a\x9e\xd7\x3e\x28\xff\x6a\x8c\xbd\x0b\xc8\x7b\xf1\x08\x72\xdf\x5a\x61\xc7\xb7\x0c\xe3\x5d\xed\x77\x45\xf3\x74\xdc\xe0\x61\x91\xe5\xef\x0b\xce\xb2\x01\x20\xc9\x23\x4f\xbf\x00\x52\x86\x5c\x76\x86\x8a\x98\xe3\x4d\xe6\x30\xe6\x16\x6b\x9d\xcd\xf3\x09\xc7\x90\x89\x51\x69\x38\x8f\xf2\xc0\xe4\x45\x85\x4c\xaf\x1e\x2c\xc5\x09\x45\x00\x90\x2e\x6a\x65\xdc\x28\xda\xe1\x4e\x2f\xeb\xc1\xd6\x12\x17\x1a\x61\x2f\x0d\x36\x40\xf3\xcd\x43\x28\x7c\x0a\x0f\xf0\xeb\xec\x41\x27\x0f\x78\x78\xd3\x28\x88\x91\xd2\x19\xa5\x72\x68\x16\xfd\x7b\x54\xd8\x81\x8e\x6a\x90\xad\x51\x0f\x81\x67\x93\x5e\xdd\x51\x6b\xd5\x4c\xf8\x75\xf6\xd7\x15\x56\xc4\x3d\x21\xf3\x00\x39\x00\x56\xe3\xf3\x22\xb2\x55\xbc\xf7\x54\xfc\x63\x83\x8f\x95\x74\xa5\xb0\x76\x70\x99\x67\xf2\x2d\x03\x59\x60\x1f\x3c\xb8\xe8\xfc\x27\xa1\xd3\x60\x34\x86\xad\xb5\xf8\x3e\xe9\xb0\xd7\x3a\xac\xec\x35\xf7\x87\x2f\x1f\x9e\x61\x6e\x7e\xb0\x3c\x4d\x4f\x17\xa0\x31\x2f\xaf\x32\x3f\x7c\x34\xe2\xfe\x79\x11\xd5\xd9\x8d\x0c\xbb\xe1\xd6\xfd\xef\x81\x61\xd0\x9c\xbf\x04\x0f\x29\x78\x02\xe6\x7c\xf9\xb6\x79\x9d\xfc\xa9\xfd\xec\x49\x23\x0d\xba\x47\xea\xb3\x2a\x49\x40\x4d\x91\x45\xae\xfb\x99\x7d\xfe\xbf\x54\x46\xbe\xb2\x35\xcf\x6e\x0e\xdf\x8b\xcc\xa4\x98\xa3\x9a\xc5\xc6\xd6\xab\x02\xb3\xb7\x46\xce\x0f\xa2\xfc\xb2\x2e\xb7\x4e\x18\xc8\x0b\x7c\xd5\xa6\xea\xa5\xac\x8a\x1c\xf5\x36\x88\x90\xd1\x06\xb0\x7d\x23\x1b\xd8\xa4\xc1\x43\xe1\x11\xeb\xe7\x94\xa8\xaf\xd7\xbd\xd9\xa5\x73\xa0\x9b\x89\x4e\x9c\xf4\xa7\xeb\x9e\xb7\xd2\x6b\xb4\x35\x83\xa9\x14\xee\x6b\xbd\x80\x92\x25\x0a\x4d\x8d\xc8\x39\x0a\x0d\xe6\xa1\x47\x43\x4a\x66\x78\x74\x13\xc7\x94\x08\x1e\xa7\x66\x86\x5f\x16\xab\x68\x9a\xae\x48\x6a\xb2\xa1\xaa\x42\x4c\xf6\x05\x85\xa9\x1b\x37\x53\xdf\xcb\xad\xdd\x11\x70\x07\x4f\x50\x5f\x2e\x3a\x07\x74\x97\xad\x05\xfc\x8b\x15\xb6\xc6\xf2\xdf\xdc\x73\xfe\x93\x67\xf3\xb1\x19\xc4\x04\xb0\x06\x2e\x43\x5c\x60\x7d\xc9\x51\x5d\xc2\x21\x33\x97\xd4\x0b\x1c\xa6\x9a\xe2\xf6\xd9\x6d\x3b\xe0\x64\x3d\x52\xc7\x75\x03\x60\xa7\x74\x39\xbe\x51\xcd\x90\x6d\x50\xb0\x16\x51\x19\xbd\x2d\x3b\x41\x46\xf1\xe3\x7e\x6d\x18\xab\xaf\x29\xce\x4b\x79\x7d\xbf\x30\xce\xa6\x86\x42\xe8\x3c\x25\x5e\x33\x09\xb7\xe5\x89\x77\x67\xe1\x59\x81\x66\x1b\x34\x23\xee\x3d\x25\x5e\xb3\x5c\xe8\x39\xaf\x95\x02\x74\xe7\x9a\x77\xdc\x9a\xc0\x4f\x8d\xb1\x6f\x98\xe1\xc9\x7f\xb7\xeb\x3c\x80\x07\x46\xe4\x01\xa8\x51\x7a\x76\xbc\x96\xda\xcd\x43\x10\x67\x6a\xe1\x23\x07\x6c\xfc\x27\x0e\x59\xab\x3e\xee\xb0\xe9\xa1\xcb\x4a\x4d\x92\x9c\x1f\x30\x48\xbd\xc9\x59\x21\xf8\xc8\x52\x0c\x4f\x9d\x7c\x99\x77\xf7\x61\xef\x92\xec\x05\xc0\xe5\xe2\xb5\x9a\xbd\x69\x9c\x7d\x6f\x29\x93\x27\xf1\xe9\x69\x7d\xce\xfd\x7f\xc6\xfc\x83\x0b\xa5\x08\x5c\xd6\x1d\xd6\xe0\xe2\x93\x02\x42\x6a\x71\x17\x1d\x32\x5e\x59\x4b\xea\xed\x63\x2c\x65\x13\x1d\x91\xa6\x7c\x5d\xb8\x6d\xff\x19\x67\xf0\x9f\xa5\x05\xa3\xa2\x96\x97\x6c\x47\xe5\x0d\x1a\xad\xb8\xbc\x86\x86\x59\x9b\x5c\xb1\xe6\xce\xba\x4f\x9b\xd2\xba\xbe\x3a\x27\x72\xb1\x33\xac\x3e\x0b\xa2\x46\xfe\xea\x3f\x19\x2a\xe8\xfb\xc7\x3d\x7f\x35\xe9\x01\x26\xcd\x49\x1e\xa6\xf2\x1f\xf2\xe7\x73\x11\x9c\x25\x36\x95\xef\x22\x83\x01\x71\x4f\xfa\x37\xaf\x12\x01\xec\xf6\xea\x3c\x9e\xd7\x67\x96\xd7\x60\xfb\xc0\x85\xec\x3e\xdd\x3f\xa3\x67\x8a\xbc\xca\x00\x36\x33\x7a\xdc\xec\x51\x32\xbe\xb3\x1a\xfd\x8d\x49\x56\x35\x96\x48\x37\x0e\x83\x46\x9f\x78\xff\xe7\x82\x34\xe9\xc1\xd1\x72\xa2\xd7\x5c\x17\x08\xbb\xf4\xdb\x93\xfe\x89\x21\xbf\x95\xa2\x02\x72\xaf\xe4\x6d\x5b\xfd\xfe\xda\x04\xfb\x4a\x85\x5d\xd5\xe1\x17\xce\x45\xfa\xc8\x72\x3f\x5b\xd9\xae\x14\xea\x65\x41\x58\x97\x2a\x7f\x96\xd4\x17\xa2\xec\x6c\xb2\x02\x7d\xf3\x7f\xaa\x32\x13\x79\x62\x03\xb3\xa4\x11\xb8\x95\x62\x08\x5a\x1e\xcf\x00\xac\xd6\xf3\xed\x5a\x7d\x3c\x8c\x90\xd9\x10\x2d\x9b\xbe\xa2\x39\xf4\x31\xea\x2a\x32\xf2\xe6\xb5\xb1\x41\xd5\x52\xf5\x82\xba\xa8\x63\x9c\x50\x20\xd7\xab\xc6\xb5\xd7\x6f\x89\x26\xa2\xf1\x5b\x93\x15\x47\xc0\x41\x26\xaf\xd4\x1b\xe0\xa1\x0f\x43\x6f\x23\x0e\x7b\x51\xc6\x93\xbe\x2e\x1d\x9d\x92\x18\x8b\x24\x0f\xc9\x43\x3b\x52\x85\x3b\x41\x34\xa3\xfb\x59\x67\x1f\xad\xb0\x2b\xcd\x47\xee\x7b\x77\x3d\xe0\x5f\x75\x46\x0e\x38\x04\x74\x15\xda\x31\x72\xbc\x89\x4a\x8c\xa2\xeb\x76\x30\xee\x10\xb5\x31\x62\xec\xbd\x95\x98\xec\x3c\x18\x74\xd8\x8f\x7b\x97\x31\xfc\xfe\xe1\x43\x87\xbe\xd7\xaf\xb3\xf7\x98\x34\xfd\xef\x78\x28\x68\xfa\xdf\xe7\xc0\xdf\x88\x7b\x8d\x41\x6c\x30\x4e\x9b\xa0\xd6\xe5\xed\x19\x40\x65\x06\xd0\x03\xda\x6a\xde\x1a\xee\x35\x6f\xc6\x8b\x7a\xa1\x41\xdb\x0f\xc3\x8a\x14\x9e\x51\x0c\x05\x43\xe0\x2b\xa0\x6b\x13\xfd\xe2\xd4\x73\x9e\x3b\x5d\xf8\x80\xe8\x90\x81\xb5\x18\xda\x82\x8c\x8e\xd6\x7d\xb5\x7e\xc9\x99\x1e\xcd\x64\xc7\xdc\xc9\x44\x40\x3a\x33\x63\xf7\x4f\xb0\xa7\x5c\x76\x06\xc7\xdd\x62\xad\x1d\xc7\xe7\x67\xc1\x89\x09\x0e\xd3\x77\xef\xf7\x9f\x34\xf0\xd4\x50\x95\x00\x29\x22\xc6\xb0\x73\x4e\xde\x4f\x8c\x6e\xc5\x8f\x2e\x3a\xff\x47\xfe\x10\x23\xbc\xc8\xf7\x6a\x9f\x6d\xbf\xbf\x8f\x7d\xd8\x61\x57\xa2\x93\x0f\x29\xaa\xdd\x77\x3b\xec\xe9\x0f\x51\x6a\x8a\xea\x82\x51\xbc\x7f\x97\x59\x99\xb6\xed\x11\x3e\x3b\xac\x04\xb9\x9a\xcd\x0e\x42\xdc\x21\x96\x04\x64\x71\x34\xfe\xf2\xdb\x7b\xa8\x86\x7b\xea\xec\x79\xe3\x6c\x68\x9f\xdd\xcf\x8f\xf9\x1f\x1d\x1b\xf6\xab\xe2\x8b\x53\x51\xd2\x0a\x8b\x51\x87\xcd\xdc\x33\x5b\xf8\xf2\x9e\x3c\xc4\x52\x36\x8e\x1a\x41\x44\x36\xc4\x7d\x62\x00\x24\xc0\x9a\x53\x89\x17\x68\xd9\xcc\xa3\xe7\xf3\x6c\x47\x62\xc8\xce\x54\x26\x4b\x4a\xfc\x1c\x91\xde\xf8\x79\x84\xb6\x79\xcd\xcb\x69\x24\x21\x3c\x4a\xa5\xc1\xc8\x5d\x94\x37\xa2\x6a\xad\x12\xd9\xa0\x16\x0f\x42\x7d\x2f\x20\x2f\x7f\xee\xb9\x58\x90\xc7\x5d\x4e\xe9\xad\xba\x68\x93\xb4\x1a\xb0\x81\x0a\xb3\x43\xb5\x10\x22\xeb\x72\x2b\x2f\xb0\x7e\x03\x9c\xa8\x7e\x43\x5f\x61\x55\xc0\xee\x8a\x6a\x28\x0f\x43\xed\xb7\x57\x13\xaf\x9b\x3c\x0a\xe0\xf6\x33\x0e\xfb\xfe\x32\x0d\x00\x6f\x23\x14\x76\x77\x32\x14\x17\x30\xdb\xcc\x7d\xc0\xf1\x4f\x0f\x3c\x35\x81\x3d\xf5\x0d\x59\xfe\xbc\x81\x3f\x2b\x68\x52\x83\x10\x81\x02\x39\xea\x17\x9d\xfd\xcd\x24\xd8\x28\x5c\x2a\xe7\xd9\x2c\xa3\xe7\xee\xcd\x7e\x15\xff\x55\x76\xd9\x32\x2a\xc1\x97\x2c\xf5\xe6\x1f\xbf\x9b\xd5\x4a\xb3\x5b\x29\xa8\x6c\x65\x61\x05\x1f\xcd\xf2\x2e\x6f\x04\x59\xdf\xfd\xe0\x77\xfb\x9f\x9b\x18\x7c\x8e\xb4\x05\x45\xb3\x8f\x5c\x66\xb3\x2b\x0b\xde\x29\x91\xe9\x17\xe5\x4c\xe0\xe9\xae\x8c\x8f\xaa\xa8\x90\xa7\x8a\xcc\xc6\xbe\xc0\xe5\x27\x5b\x43\x95\x02\x48\x66\x06\xd7\x47\x16\x77\xe3\x30\x5e\xef\x7b\xa9\x58\xef\x80\xb2\x87\x47\xbf\x19\x12\x43\x59\x4f\x90\x2c\x91\x10\xac\x4c\x22\xd0\xd1\x98\x66\x3c\xca\x90\x7e\x58\x6c\x7a\x4b\xb4\x46\xa3\x0c\xa7\x2f\xad\x33\x66\xe8\x23\x39\x49\xb8\xb8\xd0\x05\xc3\x23\x44\xee\xa6\x14\xba\x5b\xb3\x7a\xe4\xf9\x29\xdd\xf5\x7c\x88\xf8\xf4\x0f\x1f\x39\x7a\xcc\x3b\x15\x9c\xf0\x8d\x7e\x05\x91\xe7\xab\x2e\xd4\xed\x70\x9b\xfb\xe2\x48\xdc\xda\x4b\x6b\x52\x39\x38\xec\x0f\x14\x0e\x19\x44\x69\xaa\x0b\x3f\x54\x56\xb4\x5d\x62\x3b\x4e\x33\xb9\x44\x6e\x3d\x1f\xc5\x4d\x51\xe3\x6b\x8d\xc3\x47\x8e\xfa\xe8\x91\xc0\x5c\x0c\x34\x79\x24\x42\x40\xc6\x0a\x1a\x8e\x31\x07\x09\x16\x6a\x14\x1b\x13\x91\x1a\x75\x91\xc6\x2d\x12\x8c\xfb\x01\x22\x49\x8e\x38\x44\x35\x08\x75\xc5\xc0\x11\x64\x5a\x24\x96\xd0\x5e\x90\x91\x09\x86\x66\x10\x6c\xb2\xd8\x47\xaf\x01\x9d\x84\xf5\x5c\xa3\xb4\x8a\xc8\x2e\x05\x03\x3a\x41\xdc\x18\x4d\x42\x9a\xab\x1d\x7d\x72\x9f\x48\x62\x1c\x03\x62\x57\x57\x57\x53\x83\xad\x18\xa9\x90\x1a\x41\x53\x65\x36\x80\x63\x9a\x4b\x09\x9b\xa2\x5f\x4b\x75\x07\x1d\x3c\xc8\xd0\x21\x97\x5c\xaf\x93\xab\x20\x10\x40\xa5\xc0\x67\x13\x15\x78\x5c\xb2\xa7\x20\x7f\xa4\x25\x00\xf6\xd3\x5b\xd7\x4e\x22\x59\x3e\xb8\xc7\x88\x27\x84\xc3\x1e\x23\x11\x10\x77\x81\x86\x38\xcb\xa7\xa8\xc6\x37\x41\x86\x63\x85\x5a\xdf\x9d\x5d\x59\x98\x43\x81\x50\xa8\xb6\x7e\xd1\xb9\x26\x35\xd6\xd8\x00\xe4\xeb\xef\x3f\x96\xbd\x61\xdc\xb0\x87\xbc\x7c\x7c\xb7\xf6\x90\x8f\x8f\x8d\xc2\x45\xd0\x44\xb5\x14\x30\x6d\x6c\xfb\x8e\xe0\x90\x53\xaa\x93\xf7\xd6\x84\x66\x8f\xf6\xd2\xde\x5a\x33\xee\xc8\x95\x38\xd5\x8c\xb3\x34\x0f\x18\x3f\x72\xfd\x51\xaf\xd1\xe6\x09\x6f\x64\x22\x49\xa7\xeb\xde\x6a\xec\x89\x28\xed\x25\x42\x27\x25\x24\x82\xf0\x5c\xe0\x74\x0a\x83\x86\x5a\xaf\xe8\x9c\xc8\x07\x5c\x27\x9f\x10\x93\x6d\x95\x44\x20\x85\xdd\xeb\x8c\x5f\xca\x8b\x6d\xa4\x41\xda\xa8\x3d\xa9\xd7\x0b\x9a\xb7\xc9\x3b\xa8\xcd\x41\x0a\xb7\x67\xee\x49\x2d\x3b\x49\x45\x8d\x5a\x0f\x7d\x27\x66\xf0\x88\x34\x4b\xa8\x85\x68\x7c\x8c\xd9\x27\xb4\xc3\xb3\x06\x65\xb7\x56\x3e\x9b\x0a\xe4\xec\x11\xb0\x19\xc9\xcb\x93\x14\x2e\xab\xb4\xa9\xe5\xe5\x69\xf7\x6a\xff\x3f\x39\x8b\x46\x99\x3a\xe2\x0f\x87\x46\x56\x47\x94\x57\x04\xa4\xae\xb2\x3e\x49\x96\xb4\x62\xc5\x6d\xac\xb7\xfd\x26\x87\x90\x40\x50\x6d\x6c\xe2\x32\xf3\x43\x45\x56\x04\xa5\xe6\x9c\x81\x52\xf1\x90\x95\x6a\x9b\x36\x2e\x00\x28\x07\x6e\x09\x03\xa5\x0c\x94\x10\x86\xd8\xec\x62\x0c\x88\x11\xc4\xf1\xc7\x15\x36\xb0\x1b\xdd\x5f\xaf\xf8\xef\xaa\x14\xf9\x9b\xad\x73\x41\x27\xd7\xa8\xee\xe5\x9d\x46\x47\x2a\x46\xdb\xa9\x6d\xd3\x11\x02\x5f\x4f\x91\x58\x38\x8f\xa4\x50\x34\x85\x39\x2b\x8c\x55\x0f\x09\xd6\xa9\x28\x8e\x6a\xd4\x69\x6b\xeb\x4d\x53\x4c\x08\x57\x81\xc6\x5e\xce\xc0\x68\x12\xee\x96\x48\xbf\x9c\xfd\x3c\x5e\x4b\xe3\x50\x64\x45\x76\x23\x45\xc6\xb9\xd6\x87\x34\x51\xb0\x95\xc6\xc9\xf0\xb1\x34\x15\x9f\xbd\x60\xd2\x5d\x06\x93\x7e\x72\x8c\x4d\xaa\x25\xe5\x7e\x78\x6c\x07\x80\x1d\x75\x7d\x21\xb8\xb3\x27\x95\xae\xac\xef\xff\xe8\xd8\xac\x71\x12\x67\x9a\xdb\x59\x2f\x5e\x3a\x39\x0d\x49\x17\x44\x30\xe9\x86\x66\xb9\x0c\x4c\x01\xa9\xd2\x42\xac\x5f\x28\xd1\x02\x04\xa7\xa9\x67\x74\x79\xc2\x3b\x02\xd1\xf1\x91\x9c\x2b\x23\xee\xf3\x6e\x22\x36\x82\xb8\xa7\xa2\x1c\x29\x60\x23\x15\x1d\xd9\x64\x30\xe5\xe5\x58\xfa\x53\xb2\x5d\x10\x7e\x79\xb8\x7e\x64\x9a\x16\x4f\xd3\xe3\xe9\x71\x38\xbb\x06\xd5\xd7\x2a\xc0\x70\xf5\xc1\x6b\xa5\x1c\x3b\x24\x26\xca\x40\x5c\x21\x34\x1f\xd6\xe5\x06\xa9\xa3\x05\x59\x25\x2f\x0f\x86\x19\xd8\x6a\x9c\x65\x90\x8b\x9a\x3a\x7c\x05\x88\x9c\xa4\xd2\xa3\x5b\x55\xdf\x8b\x10\xde\x45\x84\xf0\x7b\xc7\xd9\xa3\x3b\xfc\x42\xd0\xe9\x75\xf0\xd2\xb0\x12\xdc\x27\xdc\x37\x8d\xef\x6e\x67\xfc\xc5\xd8\x99\x62\x99\xdf\xd2\x5b\x04\xad\xad\x6a\x07\xa4\x90\x3e\x6f\xec\x8e\x63\xf5\x43\x9e\xa6\x41\x48\xd6\x65\x8d\x29\xe6\xb8\xe5\xcc\x78\xb0\xea\xe1\x7e\x87\x1c\xea\xd8\x73\x85\x1a\xa9\x16\xeb\xb3\x12\x1e\xad\x8b\xba\x4a\x15\x7c\x16\xec\x26\x92\xfa\x52\xf9\x45\xda\x7f\x4e\x3b\x26\xd7\x99\x52\xa4\xc0\xd7\x7d\x82\xd6\xc4\x29\x9c\xe2\x83\xe3\x41\x48\x0c\x9a\x86\x24\xdf\x05\x74\xe8\x1b\x51\x2a\x33\x4b\x0b\xc0\xa2\x4c\x73\xb8\x6c\x1c\xa2\x75\x8d\x7b\x0b\xfd\xa2\x36\x35\x42\x1e\x74\xea\x97\x9c\xf3\x5b\x87\xf6\x3e\xd9\x3d\xb9\x15\xd8\xcd\xe0\x09\x6a\xc7\xfa\xc2\x8d\x9e\xb1\x2f\x8f\xb3\x85\xcb\xb6\xbd\x15\x11\x37\xdc\x8b\xe3\xfe\x2d\xc5\x87\x5e\x3b\x0e\x9b\x03\xf8\x1b\xf4\x56\x3d\x14\xeb\xbc\xf1\xff\xb3\xf7\x2e\x60\x96\x5d\x55\x9d\xf8\x77\x6e\xf5\x73\x27\x9d\x84\x23\x83\xf3\x1f\xc7\xcf\x33\x27\x8c\xd5\x15\x6f\xdd\xea\x4e\x42\x1e\x4d\x12\xa8\x54\x55\x93\x4a\xba\xab\x6b\xaa\x2a\x09\x0f\x81\xde\x75\xef\xbe\xf7\x1e\xfb\xdc\x73\x6e\xce\x39\xb7\xaa\x2f\x0c\x0a\x84\xa7\xbc\x11\x19\x28\xe5\x25\x28\x18\x51\x04\x91\xe1\xa9\xa3\x80\x82\x08\x22\xf2\xe7\x21\xa2\x28\x3a\x28\x7f\x05\x5f\xa3\x4e\x23\xe3\xff\xdb\x6b\xad\xfd\x3a\xf7\x51\xd5\xdd\x49\x88\x4e\xc1\xf7\xa5\xeb\x9e\xc7\x3e\xfb\xb9\xf6\xda\xeb\xf1\xfb\xf5\xa9\xf2\x5b\xde\x41\xad\x9e\x0e\xcb\x1b\xfb\xc0\x04\xbb\x85\xfc\xc9\xd7\x85\x53\xc9\x08\x7f\xb2\x86\xe7\x54\xc8\xac\xf6\xb2\x3c\xc9\xcc\x27\xfc\xc7\x86\xd7\x98\xd8\x21\xab\x24\x27\x98\x68\x6c\x71\x4f\x60\x7b\xba\xbc\x68\xfb\xff\x25\x9c\x07\x3a\x3c\x32\xf3\x51\x9e\x5f\x70\xe7\xca\x09\xa4\xc9\xb3\x48\xb3\x4b\xf6\xa6\x75\xf2\xf0\xd7\x8b\x52\x92\xdf\xa7\x3c\xb6\x47\xae\x65\xff\xa3\x9e\xca\x11\x7f\x87\xd7\xa5\x2c\x64\xfb\x23\x2a\x58\x0b\xee\xed\xf0\x43\xc1\x69\xf9\xf4\x69\x4b\x7b\xe2\x44\x9b\x0a\xa5\x28\x7c\x84\xa3\xd3\xd7\x3d\xea\x51\xd7\x3c\xaa\x8a\xd6\xb5\x3c\xda\x10\x53\xb5\x60\x5e\x34\x79\x2f\x46\xa6\xd9\x6b\xaf\xbd\x06\x84\xc7\x3a\xaf\x9f\xd9\x94\xe7\x35\x48\x82\x2e\xa2\xf5\x28\x96\x3b\x88\x93\x50\xfc\x4b\x0f\x67\x4f\xbe\xd0\xa9\x46\xf6\x27\x07\x05\x69\x5e\x4b\x51\xa5\xb6\xfd\xee\x77\x85\x37\x6d\xf3\x8c\x1d\xfe\xa6\x2d\xa4\x80\xb0\xb4\x32\x6f\x82\x38\xf6\x21\x4d\xe6\x96\xb7\x9f\x56\x97\x33\x0d\xbf\xee\xb3\x3f\xf0\x18\x6b\xc8\xdd\xac\x2e\x77\x1c\xff\x93\x5e\xf8\x2b\x9e\xf9\xed\xc4\xda\x44\x79\x39\xaf\xb1\x64\x08\x55\x92\xc2\xbc\xaf\x31\xf7\x91\x96\x28\x93\x8a\x9f\x7c\xc8\xda\xf7\xdc\x82\x33\x51\x17\x91\x3c\xe9\x04\x9b\x3c\x83\x80\x6e\x8c\x3c\xd6\xb0\x9d\x68\x24\xce\x48\xde\xbb\x63\x88\x09\xcc\x4e\x82\xf0\x2f\x56\x98\xaf\xaa\x13\xa5\xc9\xdd\x58\xa8\xff\xda\x4a\xf8\xa2\xca\xe0\x75\xf0\xb8\x64\x51\x43\xc5\x57\x60\xd9\xba\x2a\x56\x94\x14\xa6\xa5\x21\xd8\x40\x89\xf7\x5e\x71\xe5\x9f\x36\xdd\x70\x5a\x67\x2f\x83\xd8\x2d\x97\x3b\xa2\x93\x9d\x9e\x24\xe2\x54\x3a\x7d\x63\xf2\xa0\x4a\xce\x17\x90\xf5\x4f\x84\xa8\xd6\x10\x21\x1a\x4b\x9a\x69\x60\x99\xbc\xe0\x38\x9f\xab\x41\x84\x46\x4c\x3c\xaa\x38\xcb\xf5\x83\x1e\x49\xa6\x5f\xf2\xc2\x9f\xf6\x6c\xd9\xa4\x8a\xc6\x13\x3d\x04\x35\xfd\xc9\x33\x7f\x72\xe3\xe8\x9f\x3c\xf3\x75\x55\xf8\x0b\x53\x76\xe1\xa7\x28\xea\xb4\xc7\xb8\x73\x84\xd2\x92\xb1\xae\xc0\xf3\xe9\x36\x9a\x17\xc1\x69\xa9\x9c\xe4\x33\x37\xc1\xb6\x70\xcb\xcc\x4d\x74\xeb\x96\x99\x5a\xad\x76\x5a\x56\xfc\x34\xbe\x7e\xda\xca\x09\x37\xd5\xbf\x77\x82\xed\xc3\x13\x88\xff\x4f\x15\xd6\xb8\x7f\x9c\x33\xc3\x56\xad\x89\xd0\x0f\x5f\x56\xa1\x13\x94\x6b\xde\xa5\x8b\x9a\x98\x7a\x38\x6e\x98\x3e\x76\x8d\x5f\x5c\xb5\x60\x2d\xed\x4e\xc7\x52\x9b\xc3\xb7\x45\x36\xad\x61\x6c\xe8\xc4\x05\x6e\xc1\x01\x77\x30\xe0\x9f\x0f\x3c\xab\x03\x42\x39\x8a\x54\x5a\xa3\x98\xaa\x5b\x57\xe7\xb4\x3c\x38\x5c\xe8\xcf\x5a\xf5\xa7\xa6\x19\xa9\x4b\xca\x0d\xc0\x63\x4d\xd5\x58\x9b\x91\xe4\xf1\x9f\x12\x2e\xd3\x68\x83\xc6\xdd\x8c\x79\x0b\x4d\x7d\x51\xd2\x9a\x41\x54\x2f\xd4\xb7\xad\x1e\x00\xdd\x19\x51\x82\xd5\xac\x8e\x38\xaa\xe3\xb3\xcb\x8b\xb9\xb3\xc2\xef\xf3\x98\x92\x6d\xfe\xeb\xbd\xf0\x65\x9e\xb6\x4b\x0c\x5f\x55\xa5\x2a\xc3\x52\x25\xd7\x0d\x64\xbc\x96\xa7\x2b\x31\x17\x4b\xc5\x44\xce\xe7\xcc\xb0\x66\x0b\x8b\xe2\xd0\x01\xcc\xa1\xe7\x6f\x1e\xc4\x2b\x78\xee\x04\xbb\xd4\xca\x79\xc9\xfd\xbf\xaf\xb0\xe6\x03\x37\x45\x57\xad\x4f\x85\x2f\xac\xd8\x5f\xd6\xd0\x26\x9b\x00\x84\x60\xdf\x19\x36\x19\x95\xe2\x5b\x96\xf8\x6d\xbe\x31\x7e\x66\xda\x05\xef\x68\x7a\xda\x2f\x5c\xc0\x1c\x75\xdb\x38\x6a\x7a\xfe\xcd\x1e\xf6\xef\x4d\x3e\xc1\x72\x06\xc6\x89\x39\xa9\xbd\x26\xb9\xff\xd9\x3d\xe1\xe7\x26\x46\xdd\xb5\xdd\x78\xfa\x91\xa0\x4e\x37\xf5\x0e\x11\x25\xc1\x1a\x9c\x4c\xd3\x5e\xd1\xed\x81\xd9\x44\x18\xae\x85\xc1\x64\x79\x43\xb9\x00\x71\x99\x72\xc0\xd5\x11\xad\x2e\xba\x45\x3e\x73\x25\xee\x8c\xc8\xb6\x40\xed\x9b\xe6\xf9\x74\x81\xa8\x78\x4d\x48\xd8\x03\x8e\x87\x71\xc3\xa1\xea\xb9\x93\x91\x50\xcf\x5e\xc0\x20\xa8\x57\x47\xf5\x3f\x9e\xf2\x03\xf3\x42\x9a\x8d\xac\xa6\x1b\xe3\xae\x42\x70\xe5\x7d\x85\x20\xae\xe1\x3a\x5a\x62\x94\x62\x12\xe5\x0a\x35\xc9\xb8\x43\x13\x16\xb3\x1f\x7a\xe0\xd6\x1e\xce\x17\xa3\xb6\xb1\xd7\x31\x07\xe7\xa0\x0c\x43\x83\x65\x0c\x41\x9b\xf1\xff\xf4\x60\xb8\x3c\xe4\x3a\xc6\x77\xe5\x3a\x95\x93\xb8\x10\x38\xf2\x15\xc0\xae\x9f\xf6\xba\x18\xde\x22\xb2\x66\x9a\x75\xa4\xb2\x4d\x80\x2b\xc3\x52\xd1\x9f\x7f\xe0\xfe\xcd\x06\x7e\x91\x8a\xaf\x7c\xb6\x37\x26\x1b\x78\xc7\xfd\x00\xc1\x97\x37\x40\xbc\x19\x1e\xc4\xc6\x32\x87\xd2\xf6\x41\xe0\x30\xa2\xc1\xde\xee\xe9\x10\xc1\x37\x8d\xcb\xb6\xdd\x79\x7d\x30\xa0\xf8\x4e\x93\x1a\x80\xc8\x84\x68\x02\xb3\xb5\x55\x48\x3c\xb3\x30\x52\x86\xa7\xde\x1a\xa0\x94\x24\xdd\x45\x87\xdf\x05\x05\xf8\xce\x99\xfc\x76\x9e\x94\xad\x58\xa9\x9d\x35\x53\xb2\xdf\x0c\x59\x3a\x43\x0d\x38\xef\x73\x83\x96\x9d\x70\xfe\xa3\xb5\xd5\x3a\x8f\x29\x86\xdf\x7f\xd9\x9e\x70\xd6\xfa\x3d\x8e\x5c\x16\xc3\x80\x39\xe5\x63\xd8\xe9\xc6\x5b\x1e\x04\xa4\x45\x75\xee\x46\x79\xbd\x6b\x82\x3d\x91\xe9\x5b\xfe\x92\x32\x59\xdc\x48\x9c\x29\x64\x51\x48\x9b\x41\xba\xae\x14\x5b\xc2\x67\x72\x92\x19\x44\xc3\xf0\xfd\x5b\xb6\x83\xdf\xb3\x63\x06\x7f\xa3\x12\xbe\xb7\x12\x0f\x8d\xf8\x43\x7c\x2a\x5c\x9b\xc6\x32\xa9\xaa\x15\xd4\xd3\x9e\xce\xb0\x8e\x72\x34\xfe\x69\xfb\xa3\x2c\x4f\xc7\xf0\xad\xf7\x34\xff\x06\x4d\x7d\x94\x9a\x90\x92\xb6\x91\x46\xc0\x2e\x92\xa5\x52\x50\x63\xf0\x60\xdf\x9c\x6b\xd7\xcc\x4b\xca\xf2\x62\xd3\x67\x50\x41\xf4\x5d\x68\xc1\x34\x58\x20\x83\xbc\x9f\x14\xfc\xac\x35\x7d\x49\x40\xbb\x75\xcb\x77\x08\x1f\x04\x6f\xe5\x57\xc2\x3f\xd3\xfa\x65\x27\x96\x89\xb1\xef\x1d\x96\xaa\x97\xc6\x02\xa3\xaf\xfc\xf7\xb2\xf0\x25\xde\x1a\x26\x9d\xe0\x6a\x35\x37\x41\x30\x00\x98\x36\xa5\x61\xe2\x1d\xc8\x76\xed\x07\x05\xc7\x95\xac\x4c\xc4\x24\x75\x8b\x0c\xd8\x54\x6e\x3a\x23\xfa\x55\x14\x85\xa2\xd9\x14\xf5\xe2\x16\x2b\xb3\x06\x9e\xc7\x44\x1d\x59\x60\x9a\x05\x37\xa9\xbf\x6e\x71\x43\xa1\x5f\x7b\x90\xbd\xd7\x63\xfb\xb0\x08\xff\xe7\xbd\xf0\x0d\xde\x02\xfc\x5d\xca\x76\xc2\xca\xe0\x63\xb2\xaa\xf0\x85\x5a\xb0\x00\xc1\x9b\x88\x90\x88\xb3\x05\x62\x01\xad\x87\x73\x65\x8a\xb1\xd4\x29\x87\x3a\x0a\x94\xad\xa5\x54\x31\x18\x57\x83\x65\xd0\x49\xcd\x15\xe2\x53\x59\x38\x2b\xea\xbd\xc2\x3d\x73\xff\x9e\xc7\x26\xce\x88\xbe\xff\x09\x2f\xfc\x90\x77\x87\xd0\xce\x2e\xac\xc0\x19\xd1\x37\x6e\xdb\xc2\xf4\xba\xed\xaf\x1d\xd7\x80\x33\xa2\x9f\xeb\x0c\xfc\x33\x58\x3a\xb9\x64\x75\xbf\xaa\x33\xd9\x02\xd8\x33\x1e\x4d\x81\x4c\x26\x4e\x47\xa1\x47\xa6\x56\xe9\xaa\xdd\x49\x03\x7e\xc2\x67\xec\x56\xfd\x83\xc7\x0e\xa8\x0f\xf8\x7f\xe1\x85\x5f\xf2\x4e\xa9\xcf\x39\x51\x6f\x67\x44\x7f\x52\x0a\xa1\x18\xbe\x04\x94\x9c\xb4\x65\xc2\x17\x54\xc8\xbf\x2a\x0a\x7b\x1a\x2b\x0a\x1f\x5f\xb8\xa7\xc7\x63\xd7\x92\x45\x97\xe8\x21\xd9\xde\x7b\x7a\xd1\x06\x8f\x81\xbf\x29\x95\x8b\xb1\x51\xe7\x99\xb6\x2a\xc8\x7d\x38\x4f\xb1\x93\x39\xcc\x70\xa9\xf5\xa9\x69\x6c\xba\x92\x84\xa1\x15\x67\x22\x67\x56\x2b\xcd\xfa\x4e\xc3\xbf\x56\x61\x0f\x33\xe3\xb4\x0a\xc8\x91\xb9\xff\xbb\x15\x10\x85\xd7\x5d\x1b\x7e\xa0\xb2\x56\xbe\x5b\x96\xc1\x5d\x91\x45\x29\x22\x9a\x29\x42\x68\x6b\xe4\x0f\xa3\x65\x57\x0d\x5a\xda\x54\x33\x5a\x4f\x2f\x1b\x97\xb0\x84\x03\xd1\x4a\xd2\x4c\x34\xa6\xac\x55\xaa\xa7\x5a\x2d\xb8\xb5\xaf\xac\x6b\x55\xa2\x17\xd3\xde\x44\xfa\x26\xcd\x03\xea\x1a\x33\x4b\x9b\x29\x44\xa7\x04\x87\x1b\x29\xbc\x03\xc1\xd6\x53\xb5\xe0\x89\x22\x4b\x61\x98\x12\xd1\xe2\x45\xb4\x21\xd4\xbc\x51\x12\xd1\x4a\x9b\x3f\x12\x1c\x86\xd7\x82\xa8\x43\xc9\xf3\x71\x7f\x4a\x2b\x83\x00\xef\xea\xee\x06\x3f\xe5\xb1\xbd\x50\x9c\xff\x1a\x2f\xfc\x51\xef\x2e\xf4\xc0\xd8\x8b\x07\xf5\x94\x52\xff\x69\x39\x94\xea\x75\xa1\x57\x42\x94\xd3\xb4\xa9\x5a\xce\x34\x73\x10\x53\x0b\x47\xf7\xee\x0f\xc9\x41\xe0\x41\x26\x5a\x30\x21\x70\x0a\xb8\x7c\xfb\x97\xb2\xbb\x1e\x18\x73\xb7\xff\xe9\x4b\xc2\xe7\x56\x46\xdd\x75\x57\x99\xa5\x94\x99\x9d\x11\xac\x30\xdd\xd4\x02\x32\x2d\x21\x9f\x53\x8c\xdd\xc9\x3b\x57\xd7\xac\xdd\x8b\x36\xae\x9b\x6a\x00\xb5\x20\x9f\xb8\xa5\x46\x3f\xd0\xe8\x28\x57\xa3\x65\x03\x0f\x36\x8e\xd6\x8e\x5e\x57\x0d\xba\x31\x4f\x12\x32\xe6\x41\x90\x06\x8f\xe9\xee\xd5\x57\xd7\x82\x3b\x73\x40\xea\xb5\xda\x4f\x6a\xda\xc6\xd1\x60\x64\x1b\x15\xa2\xfc\xb0\x03\xd9\x7b\x0f\xec\x1e\x03\x76\x8f\x01\xff\x46\xb0\xc1\x7e\x41\x59\x03\x7e\xda\x63\xed\x07\xce\xec\x62\x96\x16\xd8\x0b\x6e\xcc\xdd\xb4\x59\x48\x95\x50\x9c\x8e\x9b\x5c\x6d\x57\x8e\xb9\x17\xb1\x65\xd8\xfb\x8d\xc1\xe0\x1d\xde\x03\x69\x29\xb2\xaa\x8c\x26\x85\xd9\x7c\x58\xa2\xbb\xe2\x6c\x2c\x78\xa1\xed\x5c\xa3\x0a\x3a\xe7\x6d\x6e\x7f\x82\x5b\xf3\x57\xcc\x09\x6e\x88\xdc\x32\x1e\xf8\x11\x5f\x19\x7a\x8c\x7b\xee\x41\xf6\x48\x47\x15\x07\xb2\x42\x8d\xb5\x70\x42\xf0\x5c\x00\x55\xd6\x1f\x1c\x08\xff\xb3\xfe\xe5\x72\xc7\xc0\xe5\xf1\x74\x58\xef\xdb\xbf\x2b\x1b\x2f\x56\x36\xc6\x8a\x4f\xac\x1e\x86\x9a\x4f\xcc\x8c\x82\xea\x2a\x1a\x06\xdb\x76\x7a\x35\x3b\xc2\x6a\x63\x90\x52\x86\x8c\xf9\xae\x24\xbe\x08\x49\xfc\x09\x3b\x4d\xff\x57\x2e\x92\x45\xec\x47\x86\xb3\x88\x3d\x88\x3c\x7d\xeb\xdb\x0b\xa7\xc7\xf8\x37\x2b\xcc\x3f\x2d\x24\x46\x62\xfc\xd9\xf3\x4d\x61\xfc\xfd\xd4\x8d\xec\xff\x19\x62\x12\xa0\x64\xa6\xe7\xdc\x18\x3e\x69\x58\x06\x93\xd4\x05\x1b\x2a\x8e\x09\x73\x71\xc0\x5e\x60\xa2\xb8\x30\xd6\x99\x48\x3d\x92\xfe\x50\x54\x8b\xda\xb0\x98\x9f\x57\xdc\xc0\x5e\xec\xb1\x7d\x75\xd1\x6d\x37\x73\xff\x59\x1e\xbb\x66\x5b\xa8\xa1\x39\xd1\x6d\x1f\x5f\xa5\x08\x39\x84\xb3\xb9\x1d\xaf\xb9\xb5\x96\xd7\x82\xe3\xab\x41\x27\xed\x25\x85\xd2\x84\xdb\x69\x5e\x28\x65\x99\x67\x10\x26\xd2\x4d\x1b\x93\xb9\x26\x62\x66\xaf\xf7\xd8\x25\x8d\x74\x33\xd9\xe4\x59\x63\x76\x79\xd1\x7f\xa9\xc7\x6e\xd8\xb6\x4e\xf3\xe6\x05\xa7\x62\x2b\xd6\x0d\xbb\x76\xea\x03\xa0\x99\x1b\x33\xbd\xee\x56\x12\xae\x0a\xf7\x8f\xbc\x8f\x50\x32\xfb\x9c\xc7\x0e\xc0\x81\x65\x3e\xca\xfc\x8f\x7b\xec\x51\xdb\x73\xac\xd2\xd3\x4e\xd5\x5e\xe6\xa9\xcb\x6e\xb7\x15\xa2\xd3\x4d\x33\x9e\xf5\x83\x46\x94\x81\x51\xa9\x3f\xb6\xc3\x86\xae\x90\x21\xa6\x2b\xed\xb4\x23\x47\xf0\x0c\x45\xb7\x5e\x09\x6d\x69\x44\x19\x7b\xad\xc7\x0e\xd2\xcd\x34\xf7\x5f\xe4\xb1\xeb\xb6\x6d\x19\xc5\xc0\x9d\x72\xa7\xc3\x7f\xd1\x97\xdd\xa6\x99\xcb\x34\x97\xb5\x7d\x0b\x36\x23\x39\x4f\xf0\xcc\x64\xc5\xf9\x61\xc4\x3e\x7b\x67\x85\x3d\x9c\x6f\xe6\x0b\x31\xcf\x8b\xa8\x7e\x6b\x9c\xd6\xcf\xc8\xd2\x84\xff\x9a\x0a\x9b\xdd\xb6\x9a\xb3\x77\xaf\x0e\xbc\xe9\xd4\xf8\x33\xde\xb0\x47\x9c\xda\x27\xc1\xec\xdd\xab\xc1\x7c\x94\x9f\x29\x9d\xfa\x4a\x76\x3a\xae\xb8\xfc\x27\x73\x9c\xed\x24\xfd\x0c\x1f\x8d\x3a\x1c\xd2\xfe\x0c\x39\xe0\x17\x39\x86\x7c\x33\x17\x58\x7b\x04\xd0\x93\xb5\x67\x1f\x38\xc8\x0e\x8a\x6e\x5b\x74\x44\xc6\x63\xff\xe7\x0f\xee\x60\x3c\x17\xd4\xe3\x4e\xef\xfc\xed\x01\x7d\xdd\x1d\xd0\x52\x3e\xa5\xe2\xd9\x02\x66\x21\x4a\xd0\xd0\xa1\xd6\x94\x0f\x09\xe6\x5b\x7c\x8f\x26\x71\xbd\x5f\x47\x5a\x9b\x22\x72\x3a\x05\x8b\x55\x0a\x53\x54\x04\xd3\x9a\x44\x69\x5d\x68\xac\x16\x02\x5b\x54\xef\x00\xf1\x11\xa1\x13\x35\xe4\x28\xa8\x68\x0a\xf5\x00\xa8\x57\x90\xc9\x50\x63\xec\x4e\x65\xc8\x89\x9a\xc7\x02\x3e\x85\x5a\x19\x89\xd9\x1c\x83\xa7\x12\x21\x1a\x50\x46\x14\x9b\xaf\x64\xbd\x24\xaf\x06\xeb\x53\x2a\x5d\x0c\x4c\x58\x04\x69\x44\x43\x82\x11\xdf\x99\x90\xcd\x07\xfb\xb6\xdc\xf5\xf3\x84\x77\xf3\x76\x5a\x04\x69\xa6\xe3\xc0\x59\x10\x04\x45\xc6\xeb\x67\x00\xcf\x29\x13\xf4\xc5\x2a\xab\x4f\x39\xa1\xea\x26\xe9\xd4\xa4\x0e\x17\xed\x0c\x28\x04\xb8\x9b\xc2\x47\xcd\x1f\xfa\xbe\xca\x4c\x0e\x1a\xfd\x84\x77\xa2\xba\x6a\xaf\x8e\x7a\xa7\x6c\x44\x59\xae\xac\x1a\x1f\x48\xcf\x9c\x8b\x79\xd4\x09\x0e\xe7\x42\x04\x43\xa7\x0b\x18\x22\x3a\x72\x02\x06\x81\xe3\x0e\x55\xa9\x5b\x08\xc6\x0e\x66\x7d\x51\x6c\x0a\x91\xd8\xe2\x15\x81\xfa\xe4\x87\x93\xc6\xf0\x4f\x4f\xd1\xc0\x0d\xaf\x57\x9a\x05\x4e\xba\x75\xd2\x48\xb3\x69\xea\xb1\x3a\x04\xea\xa0\x7d\x92\x46\x09\xe6\x18\x45\xda\x10\x77\x21\x64\xcb\x00\x62\x26\xe6\x75\xab\x19\x8a\x6a\xb8\x3c\xfe\x6c\x44\x0d\x79\xe8\x91\x0b\x17\xab\x32\xb7\xba\x88\x2f\x47\xad\x76\x31\xbd\x29\x80\xa6\x07\x72\x45\x03\xbd\x00\xf5\x27\x4d\x06\xa2\x35\xa6\x1d\xc1\xd1\x8a\xaa\x13\x11\x64\xc5\x36\x79\x3f\x98\x0e\x64\x4f\x43\xb8\x4b\x5a\xef\x75\xec\x53\x01\x5c\xc4\x22\x54\x9f\xdb\x1d\x5e\x63\x6c\x56\xdb\x5d\x7b\xb9\x40\x06\x15\x50\xe6\xc0\xba\x39\x50\x33\x0a\xcc\xa0\x5e\x35\x97\xad\x64\x25\xd8\x71\x4c\xe8\x39\x77\xb3\x26\x65\x01\xb0\x6a\x4c\x12\x84\x5e\x7c\x8f\x13\x89\xc8\xa2\x7a\x69\xca\x8c\x4a\xb8\xac\xb1\xb7\x57\xd8\xbf\xeb\x0e\x1b\x62\xff\x95\x15\x76\xeb\xb6\x92\x6c\xe8\xec\x70\xa4\xda\x27\xbc\x6d\x9f\x29\x1b\xfb\xac\x08\xeb\x51\xeb\xc2\x76\x45\x19\x24\x8a\x0b\x95\xef\xa6\x03\xa6\x95\xa8\x37\x97\xf0\x0a\xc4\xb5\xe7\xec\x65\x1e\xdb\x7f\x4f\x2f\x5d\xef\x17\xc2\xbf\xd7\x1b\xc3\xa9\xac\x3a\xe8\xbf\xe0\xc3\x4e\x97\xdc\x41\x17\xdd\x76\xab\x8b\xe7\xa9\xc8\xfd\xa4\xc7\xf6\x83\xcf\x71\xf1\x94\xff\xf2\x9d\xd4\x68\x15\x1f\x76\x6a\xf4\x78\xba\x58\x52\x24\xe8\xe2\xc0\x64\x3d\x0f\x8d\xe2\x0d\x1e\x3b\xc8\x9f\xd6\xcb\xc4\xf1\x28\x16\xfe\x2b\x76\xa2\xed\xcc\xaa\xc7\x9d\x2a\xde\xad\x2f\x97\xf5\x05\x79\x3d\x80\x1b\x0a\x39\x72\xb0\x0b\x81\xfd\x26\x52\x95\xb5\x75\x02\xf6\x96\x0a\x7b\x58\xab\x2e\xcc\x3c\x93\xca\x87\xff\xd2\xca\x18\xb8\x39\x55\xd3\xc7\xcd\x2d\xb8\xaf\x95\x66\xfe\xc0\x7d\xb7\x7f\x1f\x37\xb7\xf0\x50\xd5\x74\x5a\x75\x61\x46\xbd\x11\xe5\x67\xd8\x0b\x3c\xc0\xfb\x6b\x46\xad\x93\xbc\xeb\x3f\x7d\x07\xe3\x38\xa7\x9e\x76\x7a\x65\x56\x5f\x76\x3b\x43\x97\xbd\xfd\xd1\xe0\xe3\x15\xb6\xbf\x15\x15\x2b\xa2\x9b\xfa\x1f\xaa\xec\x60\xca\x3f\x0e\x1f\x76\xea\xf1\x9c\x0a\x5d\x75\xab\xd1\x8a\xc0\xba\x90\xe6\x11\x1c\x09\xd0\xa9\x66\x7c\x66\x99\xc0\xed\xbb\x16\xcc\x2f\x2c\xaf\x2c\xcc\xcd\xae\x2d\xcc\x1f\x0b\x54\x49\xa5\xf8\xf7\x35\x3b\xcb\x8d\x5b\xa7\x45\x62\x8c\x54\xdf\xaa\xd2\xac\xe4\x49\xa0\x0f\x2b\x90\xed\xce\x93\x60\x31\x89\x0a\x83\xac\x88\x89\x74\x71\x9a\x68\x54\x8a\x6e\x4a\x3e\xe7\x56\x54\x28\x9e\x45\x9c\xe2\x6d\x51\x2a\x4d\x5e\x59\x06\xe1\xa1\x6b\x52\x63\x7f\xe5\xb1\xbd\x51\x5e\xcf\x23\xff\xab\x1e\xbb\x7a\xdb\x9e\x5c\x5c\x9d\x5b\x75\xcf\x7e\x6f\xf7\xe0\x5a\x69\x51\xe2\xb5\x07\x71\x72\x13\x8a\x85\xf1\xbb\xe0\x44\x9e\x81\xb6\xcd\xac\x2c\xcc\xce\x9f\x5c\xa8\x75\x1a\xec\x53\x1e\x7b\x78\xb7\x9d\x16\x69\x52\x5a\xf0\xbf\xec\xed\xe0\x84\xb3\x3c\xe4\x4d\xa7\x37\xba\xc3\x9e\x70\x67\x18\x3e\x31\x87\xbc\xb1\xb1\xc8\x6c\xf1\x2a\x57\xda\x48\xe1\x7a\x46\x91\x07\xd9\x1d\xc5\x3e\xed\xb1\x7d\xf5\x28\x69\x88\xcc\xff\xe8\x8e\xec\x0a\xf0\xac\x53\xe9\x97\x78\x78\xb1\xb4\x20\xf1\xda\x36\x12\x7f\x68\xa5\x76\x34\x42\x9d\x7e\x7e\x4f\x3c\x8d\x5f\x99\xee\x36\xac\x31\xba\xcf\x63\x95\x66\xdd\x7f\xb3\xc7\x66\xb6\x6d\xce\xf1\x39\xb7\xff\x8f\xcf\xb9\xad\x38\x1e\xad\x67\x22\x98\x6b\xf3\x24\x11\xf1\x03\x30\x17\xd9\xdb\x3d\xc6\x9a\x06\x29\xe7\x75\x1e\x3b\xba\x7d\xa5\xf5\xf3\x54\xed\xf6\x28\x54\x9d\x16\xaa\x73\x6a\x14\x06\xea\xaf\xe5\x8b\x68\xcc\xe8\xb6\xa0\x40\x00\x24\x17\x51\x27\xae\xc7\x6e\xdc\x6b\x45\x49\x8d\xbd\x6a\x82\x1d\x90\x8d\x5b\xe6\x45\xdb\x7f\xfe\xc4\x0e\xcc\x2a\xb7\xd1\xd3\x4e\x85\x3f\x59\x51\x97\xdd\xfa\x76\x33\x31\x0d\x79\x25\x70\x18\x93\x7b\x72\x9a\x59\x16\x16\x7b\x4b\x56\xbd\xab\x9a\x82\x4f\x41\x4c\xb2\xd3\xcb\x46\x58\xe9\x90\x27\x04\x98\x88\xe3\xbe\xc9\xaf\x20\x02\x55\xde\x42\x0a\xd0\x8c\x30\x2d\xba\x59\xb4\x11\xc5\xa2\x05\xaa\x3e\xc0\xd9\x60\xa0\x44\x26\x6c\x58\x22\xa5\xfa\x97\xe7\xb0\x61\x57\x10\x19\x39\xfd\x97\x4e\xad\xc1\xd1\x11\x36\xa4\x8b\xde\x66\xe5\x07\xbb\xbc\x68\xb3\x9f\xf6\xd8\x65\xf2\xc0\xb8\x99\x66\x6a\x26\xbd\x72\x27\x36\xaf\x65\xe7\x1d\x1a\x9c\x27\xb8\x57\x4b\x23\x44\xf7\x2e\x68\x61\xb3\x17\x79\xec\x60\x37\x4b\x7f\x08\x90\xfb\xfc\x1f\xde\x81\x16\xb0\xac\x9e\x76\xaa\x38\x8f\x3e\x07\x48\xd9\x8d\xc1\x91\x2e\x8f\x93\x56\xf4\xbe\xa8\x67\xa2\xc8\xab\xa4\x16\x74\x78\x57\x1d\xb7\x2d\x93\xa2\xd4\x7d\x51\xb9\x44\x95\x6d\xc7\xca\xe5\x80\xd8\x5e\xd3\x97\x87\x2a\x97\xf3\xbc\xe0\xb8\x99\x9d\x97\x66\xf9\x7e\x8f\x4d\xc8\x9d\xf5\x5d\x1e\x3b\xb2\xbd\x5c\x2e\xed\xab\x3f\x2c\x37\xd0\xc3\x66\xeb\x27\xab\x5e\xb0\x98\x14\x22\x6b\xf2\xba\x98\xb2\xab\x6a\x4e\x99\x4e\xce\xbd\x6b\x2d\x52\x18\x46\xe2\x2c\xf9\xa8\x6c\xc4\x97\xc3\xb7\x5a\x47\xcc\xa9\x1a\xfb\x98\xc7\xf6\x37\xe3\xb4\x7e\x46\x64\xfe\x07\x77\x72\xae\x38\x8e\x0f\x3b\x8d\xb8\xd7\xa3\xab\x25\xa1\x4c\x17\xcb\x53\x70\x8c\x10\xa6\xc5\xdf\x10\x5d\x48\x81\xa3\x11\x50\x05\x11\x0b\xbb\xce\x22\xc5\x00\xf0\xac\x97\x24\x51\xd2\x62\x5f\xf0\xd8\xc1\x16\xda\xc9\x9a\xb9\xff\x5b\x3b\x99\x26\x8f\x53\x8f\x97\x8c\xc9\xfa\x7a\x49\x93\xd7\x97\xcf\xef\xf4\x76\x5e\xaa\x8c\x6e\x82\xb5\x55\xfe\x82\x4a\xd8\xfb\x69\x2f\xfc\x6f\xde\x5d\xca\xe0\x07\x90\x39\x9a\x5b\x1e\x90\x83\x9e\x7a\x62\xf6\xd6\x85\x13\x48\x75\x85\x10\x3b\x16\x98\xe3\x85\x1c\x19\xd2\x0d\xd9\xd9\x62\x73\x86\x60\xfe\xa7\x65\x79\xd3\xe4\x31\x9c\x81\x43\xf9\xcc\x20\xe3\xe3\x47\x3c\x36\x91\x34\x73\x39\xa5\xb6\x5f\x13\x4b\x25\x07\xc8\xf3\xbd\xa5\x92\xfb\x23\x09\x96\xce\xdb\xf7\x71\xb1\x42\x3b\x69\xe6\xec\xe3\x1e\x9b\xc8\xd6\x1b\xfe\xaf\xed\xa4\x19\x2b\xb7\xce\x3b\xcd\x78\xb1\xb7\x72\xeb\xbc\x3b\x81\x56\x78\x23\xcd\x03\xb0\x89\x07\xf3\x62\xc4\x21\xf6\x7e\x9b\x49\xd9\xba\xad\x6e\x7d\xd4\x63\xfb\x50\xde\xfa\xef\xdf\x89\x06\xb9\x0a\xcf\x3a\x2d\x7a\xb6\x87\x17\x4b\x90\x80\x78\x6d\xbb\xf3\xdc\x45\xef\xa2\xf8\x1d\xa9\x89\x1d\xda\xc8\xbb\x6d\xa1\xbc\x0e\xfe\x96\xc7\x1e\xb3\x6d\x73\xee\xa2\x57\xa2\xac\xe8\xf1\x78\x60\x6b\xb8\xfb\x2e\xbb\xc8\x92\x61\x7e\x15\x6e\x5d\xd8\x56\xfa\xee\xfd\xec\xfb\xac\xea\x64\xeb\xbc\x5e\x4e\x39\xf1\x5f\xbe\x3f\xfc\x0d\x8f\x7e\x18\xd4\xfa\x92\x8d\xac\xd0\xc4\xdc\x52\xef\x81\xe8\x16\x4c\xd0\x2a\x20\x53\x0d\x18\x81\x61\x8b\x02\xad\xd0\x0a\xbc\x35\x68\x82\x22\x02\x6d\xa9\x9d\xc6\x8d\x80\x93\x26\x06\x4e\x3b\xed\x6f\xa7\xef\x11\x98\x97\xc1\xca\x4f\xd2\x44\xad\x79\x02\xa6\xcb\xb1\x0a\xb2\x0f\x0c\xb3\x7c\x3e\x8e\x49\xf7\xbe\xbd\xec\xc3\x1e\x3b\xc0\xbb\x11\x90\x9e\xfb\xef\xf1\xc2\xb7\x79\xb3\xcb\x8b\xc8\x54\x8f\x09\x3f\x85\x43\x56\xef\xb2\xd2\xd6\x05\x90\x1b\x61\xe0\x86\x1d\x4a\x1b\x86\x50\x47\x32\x0b\xcd\xd6\x21\x72\x5e\x3d\x9a\x97\x9e\x85\x01\x18\x96\xcd\x80\x85\xdc\xa9\x1a\x85\xb5\xd2\x85\xd8\xc2\xed\x2b\x8a\xd7\xf7\xf3\x5e\xf8\x71\xcd\xeb\x4b\x7d\x48\x7b\x92\xae\x32\xc4\x06\xf7\x84\x01\x16\x59\xef\x1b\x4a\x58\x6c\x26\xb0\xfe\xc8\x0f\x87\xd5\x20\x84\xef\x2a\xc2\x1f\xb7\x49\xa1\x0e\x05\x9d\x35\x4c\xdc\x1a\x54\xd4\x8a\x38\x81\x28\x6a\x59\x2d\x8a\x4e\x29\xbd\x42\x8b\x14\xe1\x57\x34\x9d\xb5\xd3\xc4\x1b\x69\xbf\x39\x1a\x3e\x72\x69\x80\x18\x7e\xb0\x8d\xf6\xab\x1f\xf6\x6c\xd8\x8a\x5f\xf6\xc2\xb7\x7a\x4b\x65\x9c\x0a\x6b\x44\x35\xd7\xfc\xa2\xf3\x89\x33\x14\x3c\x22\x27\x9e\x2e\xad\xaa\xe7\x1e\x76\x17\x00\xb6\xdb\x1d\x86\xe2\x46\x85\xd8\x42\x8c\x2f\x44\x9d\x9f\x7f\x07\xfc\x8f\x0a\xfb\x4f\xc3\x8d\x5d\xa8\xa8\x2d\x76\x78\x4b\xf8\x5b\x95\xf0\x3f\xcc\x53\xe4\x99\x63\xfa\x89\xe4\xed\x2d\x6f\xaf\xd9\x18\xd5\x2a\xf8\x9a\xc7\xde\xe4\x31\xbc\xe1\xff\x37\x2f\x7c\x3a\x74\x8e\x9c\x15\x0a\x0b\x03\x80\xbf\x08\x76\x0d\x40\x61\x6b\x98\x88\xff\xa4\x50\xd6\xa6\x55\xcf\x00\x08\xb3\xdf\x15\x99\x94\x38\xc7\x36\x8e\xd6\x8e\xd4\xae\x97\x53\xa7\x01\x5a\x52\xbb\xb7\x2e\x1f\x68\xa5\x69\x2b\x16\x4f\x35\x07\x9b\xc1\x57\x9e\x3c\x06\x3a\x76\x96\x1d\xcc\xa3\xa7\x89\x5b\xfb\x85\xc8\xfd\x6b\x55\x58\xf8\xf7\x41\xba\x48\xf4\x34\x3d\x90\x54\x51\x42\x84\x72\x23\x9f\x3f\x77\xd0\xa1\xc2\x18\x19\x5d\x00\x26\xe3\x9f\x3d\x18\x3e\x7e\xe8\x1d\x5b\x14\x97\xc8\xbd\x09\x28\x07\xc2\x7d\x23\x43\xdb\xa2\xe2\x8a\xba\x69\x03\x63\xca\xb7\x3c\x00\x3e\x71\x83\xcb\x0e\xb0\x0f\x4c\xb0\x3d\x9d\xb4\x21\xfc\x77\x4d\xa8\x0c\xa0\x9f\x9c\x38\x45\x48\x25\xc7\x02\x79\x2b\x58\x8f\x0a\xc3\xe9\x98\x0b\xf0\x6e\x01\x2b\xbb\xe2\x64\xc1\xc0\xf5\x58\x54\x0d\x3f\x5e\x12\xa4\xf5\x42\x85\x86\x69\xaf\xdc\x91\x23\x47\x8e\xc0\x1c\x3d\x72\xfd\xf5\xd7\xa3\x78\x6d\x88\x7a\xd4\x19\x7c\x10\x9e\x7a\xd4\xd1\xa3\x35\x60\x48\x83\x68\x94\x6e\x91\xa3\xbf\x09\x4b\x46\x7f\xac\xf5\x72\x5e\x45\x56\x35\x82\x14\xca\x4b\x77\xc9\xa1\x45\xed\x31\x08\x5c\x26\x51\xc4\xf2\xd5\x52\x70\xfd\x49\xf9\xb8\xf2\x0f\x43\x66\x2d\xee\x23\x1d\x70\xc7\x61\xa0\xb5\xc2\x97\xb4\xe1\x25\x15\x61\x0d\x1e\xc0\x29\x8b\x45\x8d\x90\xac\x43\x15\x9d\xb9\xcd\x1c\x16\xae\xe1\xcb\x23\xe8\x5d\xc2\x0f\xc3\xc2\xcc\x18\xe4\xa2\x94\x65\xf5\x87\x1e\xc1\xd9\xfc\xbf\x5e\xf8\x9b\x9e\xc2\xbb\x39\x16\x2c\x13\xb4\x0d\x95\x19\x63\x40\x3f\x00\xdb\xd8\xc0\x3b\x50\x1d\xf4\x10\x92\xf3\x9b\x14\x69\x8a\x0d\xe3\xeb\x79\x1a\xf7\x0a\x30\x6c\x28\x92\x44\xf9\xde\x64\xad\x36\x09\xa5\x19\xbd\xbb\x57\x34\xa7\x6f\x50\x04\x78\xe8\x92\x47\x50\xeb\xa8\x10\x1d\x23\xf1\xec\x9a\xe8\x84\x6b\xf0\xb0\x63\xf7\xc9\x92\xed\x15\xf8\x0d\x8f\x5d\xa1\x4e\xca\xc7\xe5\x34\x5e\x11\x4d\xff\x8b\x3b\x39\xd5\xac\xd8\x6f\x69\x04\xca\xd7\x7b\xf8\xa7\x13\x69\x5f\xe6\xed\x39\x86\x1e\x47\x73\x42\x47\x9a\x24\x82\x26\xa1\x28\xba\xc3\x78\xb1\x56\xef\xf6\xaa\xf4\x40\x0d\x39\x8f\xaa\xfa\x21\x79\xd3\x79\x8b\x9e\x98\x42\x58\x59\x0d\xf1\xa6\xe1\xb9\x6b\xec\x67\x3c\x76\xa0\xa9\x1a\xfa\xda\x9d\x9c\x45\x31\xf6\xd8\x6d\x26\x37\x53\xc1\xb4\x17\x53\x4b\xa8\xb1\xdd\xb4\x41\xcd\x84\x48\x40\x4c\xe9\xa9\x62\xe2\x5a\x5e\xc5\x59\x02\xd9\x20\x7a\xd7\x72\x70\xc4\x6b\xec\xb5\xfb\x86\xc6\x9d\x2d\x24\x1b\x77\xf1\xcc\xff\xd6\xde\x70\x16\xff\x2c\x1d\x61\x44\xb2\x11\x65\x69\xd2\x41\x16\xa2\x2c\x02\x8f\x2e\x3d\x40\xa0\x61\xda\x06\x36\x4c\x7f\xfa\xa9\xbd\x6c\x89\xb6\xe5\xe3\xe1\x8d\xf6\xb6\x3c\xac\x64\xfb\x5c\x38\xf7\xd4\xc5\xf9\x85\xa5\xb5\xc5\xe3\x8b\x0b\x2b\xce\x2e\xf7\x81\x09\x95\x9c\xf2\xce\x89\xf0\xcd\x13\x77\xa9\x5a\x59\x44\x6d\x8f\x3c\x7c\xd7\xec\xca\x53\x97\x66\x4f\x2e\xe0\xd0\x89\xb3\x5d\x9e\x34\xb4\xf9\xb1\xb0\xc1\xd4\x94\x8a\x33\xac\x3e\xb9\xc6\x1a\xd5\x9b\x24\xa4\x65\x25\x7d\x7d\x9c\x1f\xfa\x1a\x41\xaf\xeb\x0e\x33\x7c\xcb\x72\x92\xc6\x1b\x4a\x7a\x19\x5d\x99\xbe\x13\x25\xdd\x5e\x51\xce\x6d\xec\x25\xf5\x36\x4f\x5a\x6a\x9d\xda\x8d\xc3\x84\x46\x25\x7f\x44\x5e\xe7\x5d\xcd\x98\x1b\x34\xd2\x9e\xfc\xf8\x23\x1f\x59\x0d\x22\x71\x2c\x78\xa4\xf5\x62\x2d\x58\xa0\x67\xad\x5e\x83\xef\x25\x90\x6d\xb4\x6e\xfa\x4c\x2e\x8f\x16\xcf\x1a\x31\x10\xfe\x35\x9d\x3c\x71\xdd\x40\x42\x98\xc6\x24\xf1\xb2\xa2\xeb\x8c\xde\x2b\x3c\x76\x10\x46\xef\x78\x96\x76\xfc\x7b\x3d\x36\xbd\x7d\x48\x12\x4c\x4c\x15\xcf\x67\x85\x98\x8c\x9a\x46\x93\xb9\x4a\x71\xb3\xe2\x66\x73\xe4\xfd\x18\x54\xb4\x6a\xec\x1b\x97\xb0\x70\xd4\x71\x67\x19\xf0\xe7\x57\x7a\xb1\xf0\x7f\xf3\x92\xf0\x4d\x9e\xf9\x3d\x24\xdd\x9f\xe2\x95\x0c\x00\x16\xa2\xd7\x07\x19\x64\x2e\xae\xf7\x0a\xa3\xf9\x2a\xf9\x3c\x8c\x03\x9b\x18\x37\x21\xb9\x51\x9f\x89\x02\x0d\xa7\x6b\xd6\xf8\x90\xc7\x6a\x5b\xde\xde\x0d\x91\xad\xbb\x0a\xdc\x8b\x19\xfb\xe0\x04\xbb\x3c\x49\x13\x25\x67\xef\x5c\x39\x91\xfb\x6f\x9b\x08\x5f\x34\xb1\xe4\x5e\xa4\xe8\x67\x81\xfc\x09\x3c\x2b\x22\x1e\x07\xbd\x2c\x56\xbb\x23\x1e\xa1\x6c\x62\x2f\x0d\xff\x5b\x0b\x82\xab\x72\xdb\x80\x8d\x6d\x46\xb1\x85\xe7\xa4\x66\x2f\x8e\xab\x41\x33\x02\x5c\xb5\x42\x74\x75\x74\xaa\xdc\x61\x56\x01\xbe\x50\x6a\xcf\x5a\xd6\x43\x8d\x10\x19\xba\xb0\xa0\x95\xcb\x2c\xcc\xf8\x89\x2e\x64\x21\x2b\x60\xf4\x39\xb4\x49\xad\xa4\x31\x50\x5a\x6a\xe5\x9d\xa8\xc7\xac\xdb\xb7\xe2\x01\xb4\x16\xc8\x61\x75\x4e\x9d\xb2\xc8\xbe\xc2\xd5\x32\x1b\xcc\x61\xad\xd2\x23\x47\x9d\x54\xe9\xc9\x2c\x1c\x4e\x05\x74\xf4\xb4\xdb\x00\xed\xb3\xdf\x9b\xe1\xdd\x28\x9c\xaa\x06\xd0\x41\x30\x45\xd3\xa2\x3d\x8e\x16\xe1\x6d\x1e\x3b\xa4\x4a\x04\xed\xdb\x7f\xb5\x17\x3e\x6d\xc5\xbe\x52\xc6\xad\xdb\x6c\x47\x85\xd0\x61\xf3\xd0\x79\x16\x4e\x71\x69\xda\x04\xc1\xac\x22\x5b\x91\x63\x4f\xf9\x88\xf2\x69\x29\x10\xfa\xe0\xa1\xb0\xf0\x20\xc6\x55\xf5\x6f\x3d\x76\xd0\x40\x09\xfd\x29\xaa\x3a\xaa\xeb\x9c\x50\xfe\x12\xb8\xcf\x60\x9d\x26\xaf\x9a\x74\x36\xa4\x38\xb6\xde\x51\x81\x36\x3a\x18\x4e\x9d\xd2\xf3\x5a\x30\x79\xd5\x4c\x33\x4d\x27\xcb\xf9\x9f\x56\xa6\x7d\x30\x09\x0f\x28\x33\xff\x8e\x8a\x1d\xd3\xe8\x77\x7b\x0c\xd7\x9d\x7f\x9f\x17\xbe\xc4\xbb\x4b\xfe\xe9\x36\x16\x2f\xe1\x22\xd2\xd3\xea\xc4\x09\x4c\x27\xa0\xaf\xcb\x73\x39\x2a\x2e\xb3\x05\xc6\xb2\x8b\x15\x21\xbf\x40\xb4\x2a\x6a\x1f\x32\x9c\x21\xb2\xcf\x6a\x01\x14\x3e\x1b\xc7\xe5\xce\x82\x88\xfd\x71\xd5\xfe\x3b\x8f\x1d\xd4\xed\xf3\xff\xcc\x0b\x3f\xa6\xad\x1b\xf9\x30\xec\x47\x6d\xfa\x40\x77\xbf\x4d\x56\xa8\x7b\x10\xcf\xc5\x9d\x5e\x5c\x40\x2a\xba\xb6\x19\x0c\x02\xe7\x24\x7d\x02\x7e\xb1\x68\xa6\x79\x4b\x96\x58\xd8\x31\x7c\x22\xe9\x75\x08\xb5\xdd\x19\x26\xf9\xbe\xb1\x48\xa8\xed\x72\x07\x33\xf4\xc3\x1e\x5b\xba\x60\x50\x4e\x79\x68\x59\x85\x1c\x94\xe5\x2c\xed\xe6\xa7\xb2\x5b\xd3\x34\xf6\xef\x09\xd3\xa1\x37\xec\x11\x29\x3d\x80\x07\x2a\xc2\xe0\x52\x3b\x96\xbd\x75\x16\x99\xc5\xfa\xa8\x9e\xeb\x66\x69\x57\x64\x72\xdb\x7a\xe1\x1e\x76\x64\x5b\x84\x18\x4b\xba\xc3\x26\xf6\xd9\x89\xf0\x44\xe9\xda\x4e\x36\x32\x58\x9a\xaa\x2a\xb6\x74\x1b\xba\xdd\x7c\xb3\xc2\x3e\xe1\x0d\x6e\x37\xff\xdd\x0b\xb7\xbc\x87\xcc\x76\x53\x0b\x82\xf0\xaa\x90\x24\x1d\x8f\xc7\x92\xd2\xbc\x5e\xaf\xee\x57\x79\xe1\x59\xb9\xd8\xdc\xb5\x7d\xc6\x0e\x2e\xb3\x44\xbf\x9c\x9d\xf0\x5e\x95\xb8\x4a\x5a\xa2\xa8\x06\xdd\x34\x97\xff\xed\x15\x55\x8a\x50\xae\x06\x40\x4b\x55\x05\x54\xc9\xaa\x3a\x8e\x9e\x4f\x05\x9f\x7b\x29\xbb\x71\xa7\x68\x41\xb0\xcb\xd9\x60\x41\x52\xb3\xf9\x5a\x65\xd4\xdd\x81\x55\xae\x39\x86\x33\xdc\x2e\x81\xf5\x90\x0c\xb6\xc0\xed\x47\xea\x68\x94\xd4\xd3\x4e\x17\x80\xe4\xd1\x37\x06\x90\x13\x0a\x30\x13\xcb\xd0\xf6\x2d\x92\xcf\x88\x4c\x04\x20\x09\x48\x52\xa4\xd5\x59\xd2\xbb\xc1\xea\x95\x03\x4d\x53\x16\xe1\x86\xde\xe8\x65\x16\xa6\x12\x44\x40\xdd\x2a\xea\xbc\x97\x0b\x17\x01\x86\xea\x0b\x13\x06\x30\xd2\x36\x04\x00\x4e\x72\xbd\xf1\x08\x9e\x11\x64\x30\x0c\x6c\x54\x4c\xe6\x41\xce\x9b\x18\xee\x99\xe7\x3d\x82\x0c\x20\x03\x2b\xf0\x63\xa8\xf0\x61\x32\xb0\x54\x89\xf3\x8d\x30\xf8\x63\xca\x21\x34\x5d\x51\xdb\xf2\xf4\x3e\x0e\x5d\xbd\xe5\x5d\x91\xb8\xeb\x31\xdf\xf2\x98\x79\xc1\x59\x58\xaf\x3f\xc0\xbe\xec\x31\xeb\xae\xff\x29\x2f\xfc\x55\x6f\xd1\xf4\x34\x61\x4f\x9a\xa8\x5b\x6c\xb4\x46\x80\x53\x96\x5c\xa4\x1d\xcb\xec\x51\x32\x41\x0c\x1d\x0c\x2f\xe8\x74\x60\x2d\x89\x04\x2c\xb9\x40\xac\x05\xc5\xf2\xc4\x1a\x38\x63\xdf\xe4\x96\xfb\xd6\xbe\x2f\x75\xde\x64\x52\x53\x62\x51\x8d\xac\xe1\x72\xb0\x08\x7f\xa2\xc2\x06\x3a\xc4\x7f\x5e\x25\x7c\xb7\x57\x12\x5b\x7a\x7f\x52\x8b\x90\xd7\x0b\x4d\xe7\xa5\x86\xc8\xe5\x78\x52\x80\x63\x69\xe2\x2c\x53\x82\x94\xc1\x72\x32\x22\x2b\x8a\xb0\xce\x51\x2b\x01\x4e\xd5\xa4\xa8\x42\xaa\x95\xd2\xdb\x1b\x3d\x50\x36\x35\x11\x72\x37\x05\xa6\x89\xbe\x3b\xf1\x9d\x25\x7b\x2b\x7b\xec\x78\x9e\xe0\xed\xc5\x37\x7b\x69\x85\xb9\xd3\xc7\xff\x67\x2f\xfc\x79\xef\xfe\xeb\x97\x07\xb9\x4f\x1e\xc3\x6e\x1e\x9d\x35\x38\xa2\x4f\x9c\x0e\x79\x71\x85\x5d\x6e\xa6\xd2\x02\x10\x94\xfe\x93\x17\x7e\xdd\x5b\x70\x2f\x82\x50\xc2\x25\x1e\xb8\x5c\x49\x28\x5f\xa0\xe7\x80\xa0\xc3\xa4\x37\x2b\x23\x7b\x90\xd6\xc1\xf8\xa3\x45\x0d\xc8\x0b\xf3\x55\x67\x09\x98\x99\x4f\xbb\xe8\x90\xd9\xef\xbc\x8b\xb6\x44\x5e\x04\xee\x20\xf2\xa4\x31\x93\x66\xc1\xc0\xa4\xa7\x7c\x3f\xa7\x3f\xcd\x3e\xf0\xb2\x9b\xd9\x7f\x1c\xe7\x02\xf0\xbf\x71\x53\x38\x3b\xab\x00\x0b\xe9\xc4\x54\x20\xb8\xa1\x13\xc9\xd9\x4f\x7b\x90\x88\x2e\xa7\x47\xd6\x4b\x94\x47\x9e\x8f\xcc\x23\xfc\xf1\x9b\xd8\xb3\x2b\xec\x50\x1c\x6d\x88\x44\xe4\xf9\x72\x96\xae\x0b\xff\xaf\x3d\xf6\xfd\x3b\x09\xc3\x59\x17\xe1\x47\xbd\x65\x00\x67\x89\xea\x52\xc5\x59\x57\x7c\xb5\x54\x27\x55\x6c\xcd\xd8\x9b\xb4\xc2\x97\x09\xb0\x48\xe2\xf9\x1e\x6d\x3b\xf2\xfd\x26\xa2\x4e\x0e\x49\x9d\x3d\x4f\x9f\xee\x66\x9a\x9d\x89\x53\xde\xc8\x67\xe4\x69\x4f\xfe\x67\x5a\xe7\x84\x5c\xa9\xab\x38\x0d\x5f\xcd\xd9\xab\xf6\xb2\xbd\x90\x5f\xe3\xff\xe8\xde\xf0\x47\x27\x4e\x28\x26\x42\x48\xb9\x29\x52\x0a\x21\x33\x39\xc0\x56\x0c\xd9\x82\xbc\x85\xa4\xee\x30\x55\xc0\x81\xdb\x8a\x36\x14\x6e\x2f\xc5\x91\x19\x7c\xcf\xe1\x50\x83\x89\x28\x64\x95\xad\x34\x9b\xdc\xf1\xf3\xf4\x72\x41\x9c\x5c\x10\xa5\x17\x75\x78\x16\xc5\x7d\xbb\x30\x1e\xd7\x82\x25\x6d\x6c\xef\x97\xab\x34\x7f\x6a\x61\x15\xa2\xcd\x14\x3f\x29\x6e\x82\xf2\xbe\x05\x91\x4b\xb1\x72\xb5\x60\x36\xe9\xe3\x4d\x22\x47\x44\xa2\x43\x91\x58\xea\x80\x82\x9e\x0e\x8f\xd4\xe0\xff\xa1\x6c\x25\x30\xbd\x45\x40\x1f\x57\x0a\x51\x26\x45\xbf\x44\xef\x63\xb5\x7d\xd8\xb0\x9f\x4f\xa2\x78\x69\xd5\x2c\xa7\x59\x71\xce\x7b\x34\xfb\x0f\x4e\x7a\xb0\x6c\xc5\x74\x87\x77\xa7\xcf\x88\x7e\xee\x7f\xaf\xff\x3d\xd3\xa6\x92\xf2\x0d\x36\x2d\x67\x62\x91\xd6\xd3\x98\x9d\xf3\x02\xf6\x88\xc1\xb7\x81\xa0\x79\x9f\xbf\xa7\xc3\xbb\xec\x9c\x77\x84\xfd\xc7\x21\x5c\xa5\x1d\x91\xb5\x84\xfc\x86\x7f\x85\x7f\x99\xfb\x81\x73\xde\xe4\x78\x7a\xd3\x03\xfe\x3e\x78\x9d\xb1\x37\x55\xd8\x65\x99\xe0\x8d\xc8\x2c\xce\x97\x56\x76\xbc\x38\xbf\x3a\x76\x71\x2a\x5b\xa8\x2e\x7f\xf8\x2a\x45\xf6\x20\x4c\x83\xd3\xd6\x53\x4c\x80\xcf\x1f\x12\x4b\xf7\xe5\x15\x76\x79\x2e\xea\xbd\x2c\x2a\xfa\xb2\x01\xe2\x6c\xe1\xff\xf3\x4e\xa2\x68\x56\xdd\x97\xc2\xdf\xf4\xd4\x15\xcb\xb5\x44\xe9\x89\xe4\xc6\x25\x91\x7a\xde\x2d\x42\x8b\xe2\x8c\xaa\xe6\x74\x1d\x3f\x39\xb3\xc3\x72\x0a\x9e\x9f\x81\xd2\x50\xb9\x9e\x96\x3d\xa2\xfb\x61\xb0\x54\xf6\x77\x1e\x3b\x84\x3e\x36\x0c\xfd\xc9\xfd\x3f\xf6\xc2\xc7\x39\x57\xca\xea\x06\xe4\x9f\x06\x0d\xba\x69\x25\xb4\x11\xe0\x94\x11\x78\xf6\x8a\x3c\xca\x66\x76\x60\x0a\xbe\xcb\xfa\xf0\x39\x6f\x7a\x9b\xf5\x72\xc8\xbf\x04\xab\xb1\xcc\x8b\xf6\xf9\x2c\x96\xcf\x4d\xb0\xfd\x22\xd9\x00\x13\xf5\x6f\x4d\x84\x2f\xd0\x52\xdc\x82\x63\xd1\xe1\x42\x3b\x73\x1c\xa0\x3a\x25\x25\x85\xf6\x38\xe8\x2d\x95\x0e\x8b\x9d\xe1\xbe\x8f\x60\x16\xa2\x50\x91\x3d\x02\x0a\x30\x4b\x8a\xb8\x68\x48\xf9\x06\x59\xac\x15\x7f\xcb\xb1\x9f\xa3\xcf\x0e\x6c\x9c\x00\xb7\x07\xe8\x70\xca\x6a\x1f\x25\xc6\x58\x43\x0d\xb4\x41\xba\x78\x9e\xa7\xf5\x08\xac\x2f\x9a\xd5\x25\xe6\x79\xa1\xaa\x0d\xb5\x29\xf8\x19\xf0\xa9\xd4\x45\x43\x1e\xca\x86\x05\x90\xf0\x24\x58\x48\x36\xb4\x73\x42\xe9\x8a\x50\x93\xe1\x65\xdc\x1f\x32\x7c\x01\x87\x11\xbd\x06\xec\x25\x15\xb6\x17\xdc\xff\xfe\xbd\x95\xf0\x7f\x79\xf3\x18\xa2\x89\x01\x01\x14\x98\x78\x7e\xcb\xd1\x0a\x55\x80\x52\xf2\x12\x1b\x9a\x36\xc6\xca\x13\xa4\x54\xb8\x83\x76\xd4\x6a\x4b\x85\x86\x10\xa9\x81\x41\x19\x39\xb3\x3b\x04\xa3\xa7\x36\x43\xa9\x78\x12\xf1\x43\x39\x4a\x03\x06\x4d\x09\x37\x15\x62\x1a\xcb\x53\x34\xf8\xa9\xe7\x45\x37\x4e\xfb\xc4\x27\x97\x34\x02\x79\x90\x17\xcd\x5e\xbc\x2a\x4a\x51\x41\x6f\xf3\xd8\x41\x2d\x10\xfd\x2d\x8f\x5d\xb5\x6d\x87\x9e\x50\x8f\x87\xf1\x6c\xdd\x72\x9b\x17\x6d\x61\xb7\x83\x94\x15\x12\x77\x30\xb4\x51\xa2\x09\x32\x20\x12\xc2\x52\xee\x54\x86\x2d\x4c\xe1\xa1\x92\x9f\xfd\xb8\x0a\x27\x7d\xb1\x17\xfe\x88\xed\x48\xb4\xf6\x21\x63\xb2\xcd\xed\xc8\xd2\x5a\xb0\xc0\xeb\x6d\x17\x12\x03\x11\x34\x60\xc9\xa1\x55\x49\x85\x9e\x82\xb5\xf3\xb0\x7e\x77\x6a\xf8\x34\x34\x5d\xf8\x3b\x1e\xdb\x9b\x17\x8d\x28\xf1\x3f\xea\x85\xef\xf1\xee\xd6\xee\xb1\x28\xb7\xab\x86\xfd\x20\xa7\x40\x1d\x89\x92\xd6\x7b\xcd\x26\xa5\xed\xc2\xeb\x83\xae\xc6\xac\x97\x60\xf8\xe4\x22\xa1\x02\xd9\xb0\x81\x72\xaf\x25\x24\x99\x11\xaf\xc3\x9a\xe2\xf1\x26\xef\xe7\xca\x6a\x13\x25\xc1\xc2\xa9\xe3\xda\xc8\x08\xe0\xd0\x83\xe4\x24\xff\xc3\x63\x8c\x02\x67\xe7\xa3\xcc\x7f\xa7\x17\xbe\xd9\xd3\xfb\xf9\x64\x1e\xd0\x3d\x93\x3b\x31\x2a\x38\x63\xa0\x29\x93\xb9\x21\x19\xb1\xa2\x34\x34\x08\xa2\x0a\xd3\xb0\x8c\x3f\x03\xcd\x82\xf9\xbf\xdd\xa0\x7c\x6b\x0f\xdb\x5f\x4f\x3b\x1d\x9e\x34\xfc\x6f\xee\x09\x3f\xb3\x67\x21\x29\xb2\x3e\x62\xed\x80\xf8\x40\xfd\x56\x20\xaa\xa3\x2d\x8a\xdb\x22\x8e\x89\x0e\xc5\x12\x0e\x93\x79\xb0\xb0\xb4\xb6\xf2\x84\xe5\x53\x8b\x4b\x6b\x0a\xb6\x1d\x75\x16\x33\x2e\x90\xf0\xd2\xc0\xe8\xb9\x0b\xf2\x49\xd7\xad\x4e\xb6\x76\x95\xff\xdb\xbd\xc9\x17\xac\x06\xa2\xb2\x13\x25\xf2\x80\x3a\xdd\xe0\x05\x9f\xb6\x0e\xbc\x28\xe7\xc4\x34\x4d\x93\x69\x9e\xb5\x20\xb7\xde\x52\x87\xae\xa4\x28\xfd\x69\xae\x9f\x8a\x92\x69\x3e\x0d\x93\x64\x8c\x31\xf6\xf9\x15\x76\x39\x4c\x9b\xe5\x5e\x1c\xa3\xc3\xd8\xff\x3b\x2f\xfc\x33\x0f\x02\xf0\x82\x6e\x2f\x8e\xc9\x33\x5c\x0b\x4e\xa1\x9b\x63\x16\xd6\x69\x35\x58\x12\x40\x9f\xbe\xd8\x5c\x4a\x8b\x65\xf4\x1b\xb8\x3e\x01\x7c\x50\x4e\xbd\x63\x04\xeb\x55\xf0\x96\x83\x09\x01\x71\xb0\x76\x01\x06\x55\xf3\xfe\x50\xa9\x07\x76\xbc\x2b\xa1\x24\xd9\x4d\xf8\xdb\xee\x89\xcf\xef\x61\x07\x41\x3c\x9d\x4a\xea\xc2\xff\xf8\x9e\xf0\xfd\x7b\xee\xb6\x46\x7c\x40\x40\x68\x4c\xb3\x38\x25\xaa\x7d\x14\x6e\x75\x4a\x92\xe3\xcd\x42\x2e\x49\x34\xbd\xae\x0b\x91\x04\x69\x57\x28\xfd\x42\x5b\x35\x20\xd6\x59\x01\x0b\xa3\x70\x24\xab\xa8\x29\x31\x2f\x32\xc1\x3b\x38\x4b\x33\x01\xbc\xbf\xb2\xa8\x80\xd7\xb3\x34\xcf\x8d\x42\x84\x85\x05\xb9\xc0\xf8\x39\x58\x8b\xba\x45\xd0\xef\x36\x89\x94\xfe\x1a\x55\xcb\xb1\xab\x80\x0e\x56\xd5\x30\xc1\x81\x6c\x72\x4c\x81\x5d\x59\x5e\x28\x00\x30\x0a\xd5\x26\x8a\x97\x46\xa4\x4d\x44\x22\xa1\x9a\xe6\x54\xd5\xa4\xa1\x03\xed\xe4\xec\xb6\x0a\xa4\xa2\x1a\x51\x4e\x46\x80\xbc\x6a\x31\x97\x41\x47\xab\x9a\x42\x57\x2b\x1e\x27\x2c\x9d\x2e\x59\xc5\xd9\xaa\xa4\x36\xb7\x98\x8d\x09\xd8\x73\xd4\x66\x52\x75\x0e\xeb\xdd\x2c\x05\x60\x2a\xd2\x12\x06\xb6\x2d\x4b\x4c\x68\x92\xad\x11\xbb\x94\xb3\x49\x3d\xcb\x63\x13\x45\xd1\xf7\x9f\x16\xc6\x3b\xde\x72\xd7\xd6\x9e\x00\xfb\x6d\x54\xe4\x22\x6e\x56\x03\x1e\xe7\xa9\x09\x3f\x9c\x84\x0a\x4d\xd2\x89\x05\x69\xb1\xc6\x6f\x94\x7f\xe3\xb1\x4b\xf1\x28\x74\x32\xed\x25\x45\xee\x7f\xc5\x0b\x6f\x5b\x4e\x1b\x06\x5b\x24\xa5\x54\x09\x9d\xe4\x6c\x8b\xf8\x66\x14\x0b\x42\xdf\xdd\x46\xd5\x3d\xc2\x6a\xac\xba\xc3\xc3\x11\x54\xe5\x9c\x57\xdd\xe6\x6c\x74\xa9\xcf\xa0\x6a\xe7\x7b\x34\xfa\xa3\x3d\x6c\x0f\xcf\x5a\xb9\xff\xb9\x3d\xe1\xfb\xf6\xcc\x92\xd0\xd4\x50\x83\x42\xef\xb3\x43\x37\xd1\xb9\x93\xf3\xbb\xbb\xe7\xee\xee\x39\x76\xf7\xfc\x13\x27\x7c\xe4\xb3\x3b\x49\x35\x1d\xc6\x01\x1a\x6e\x79\x73\x69\xa7\xdb\x2b\x4c\x74\x45\xae\x16\xbb\xe5\x06\x33\xe7\xf3\xfb\x65\x6b\x04\xb5\x15\x7b\x08\x0f\x43\x16\x2d\x92\xb5\x73\xb2\x8f\x4e\xb0\x47\x14\x22\xeb\x90\x0f\xe2\xa4\xc8\x73\xa9\x2f\xf0\xa2\xed\xdf\x37\xe1\x84\x74\x2f\x0f\xd0\x5b\xaa\x30\x60\x73\xc5\x9e\xfa\x56\xa9\x41\x07\x8b\xd5\xb3\x77\x33\x8b\x8a\x42\x24\xe8\xe2\xc3\x1c\xa0\x6d\x25\xd3\x49\x55\x86\x79\x37\x4a\x0a\x01\x8b\x0f\x45\xe5\x7a\x16\x89\xa6\x76\xb2\xf3\xa2\x97\xbb\x0e\x91\x3c\x17\x19\x54\xa7\xc9\xa3\xb8\x97\x09\x55\xad\x5a\x70\xb7\xc6\x2a\xef\x25\x08\x6a\x4d\x36\x22\xe4\x79\x6f\x6a\x86\x40\x00\x63\xba\xf6\xc8\x8d\xd7\x51\xcc\x3e\x2c\xbe\x22\x2d\x78\xac\xdb\x18\x8b\xa4\x25\x7b\x0a\xf7\x6f\x1e\xc7\x03\xf9\xd1\xeb\x02\x03\x7f\xb1\xe2\x47\xaf\x3e\xb3\xee\xaa\x56\x33\x0d\xb1\x31\x63\x75\xdf\x74\x9c\xb6\xb6\x3b\x68\xfc\xcf\x09\xf6\xef\x87\x8c\x23\xea\x7c\x1f\x9f\x08\x3f\x30\xb1\x48\xee\x24\x8d\xf1\x3b\x6c\x80\x0c\x6e\xb8\x32\x2b\x35\x6a\x88\x1b\x03\x35\xef\x91\x32\x04\x66\x39\xc2\xcb\x1f\x3e\x7b\x1c\xcb\x54\xe9\x74\x8c\xde\x7c\xf5\xc9\x34\xc1\xe8\xfc\xbc\x87\xf1\x14\x52\x01\xa0\x01\xaa\x05\xc7\x79\x1c\xaf\xf3\xfa\x99\xb5\xf4\x44\xda\xca\x4f\x91\x0b\xcd\xa9\x0b\x98\x80\xea\xed\x5e\x72\xa6\xe4\xa9\x49\x5b\xc4\x2e\xa6\x6c\xba\xc3\x1a\x0c\x73\x58\x6b\x42\x2a\x9c\xde\x94\x22\xce\x46\x85\x71\xfd\x53\xae\x0b\x7a\x24\xad\xf2\x73\x7b\x40\xaf\x3e\x72\xed\x0d\x38\x3d\xa4\xec\xbb\xe1\x48\x10\x47\x89\xc8\xe9\x6c\x29\x14\x72\x5a\x87\xc7\xb1\x5c\xed\xf6\xc0\xcb\x8e\xde\x6e\xa0\xff\xc8\x63\x13\x22\xd9\xf0\x3f\xef\x85\x8b\xca\x1a\x38\xdc\xf2\x47\x49\x17\x83\x06\xc0\xf1\x7b\xfc\x0f\xb0\x29\x36\xb9\xc3\x58\xd8\x9d\xef\xd7\xf2\xc9\xf1\x8a\xc0\x7e\x1f\xf2\x7a\x18\xfb\xec\x1e\x76\x29\x68\x76\xbd\x2e\xfa\x07\x3e\xbc\x67\xc7\xfe\x81\xd7\xec\x59\xb5\xde\x74\x10\xa2\xc9\x34\x24\xd5\x22\xa9\xb4\xd3\x74\x6b\xf6\x62\x70\x39\x45\x45\xc4\xe3\xe8\x69\xa4\x4c\x5a\x67\x98\x24\xd5\x30\x09\xe9\x3a\x45\x6d\xe8\xe3\xba\xd2\x4c\x91\x4c\x03\xbc\xa0\x6e\xc9\x46\x35\xb5\x1c\x0a\x55\x5d\x91\x01\xcf\x61\x95\xf8\x06\xb4\x1f\xc2\x71\x63\xc2\xeb\x3a\x99\x64\x18\x59\x7c\x43\x04\x8d\xa8\x09\xbb\x77\x41\x9f\xb4\x39\xaf\xb1\x0b\xd6\x45\x2b\x4a\x14\xb5\x26\x27\xbc\x1b\x6d\xfb\xaa\xa2\xe5\x36\x2a\xc8\x0c\x02\x26\x33\x0e\x68\x74\x44\x56\x91\x06\x60\xed\x03\x55\x5f\xae\x46\x9e\x75\xa4\xaa\x2d\x0f\x0b\x55\x94\x90\xe4\x9e\x06\xe8\xfe\xfe\x34\x41\x72\x77\x89\x9e\xc1\x54\xff\x41\x75\xaa\xbc\x8e\xb1\x1f\x7c\x20\x09\xec\xfc\x0f\x1e\x0c\xef\x18\xff\x88\x4d\x8e\xa8\x48\xfa\xc0\xee\x86\xd1\x46\xe0\x60\xec\x66\x11\xa0\x3b\x1b\x8e\xe2\x3d\x45\xbf\x2b\xb6\xbc\x03\xb7\xaf\x9e\x5a\x5a\x2e\xa7\x62\xfd\xe6\x01\xf6\x92\x0a\x3b\xd0\xcd\xa2\x34\x8b\x8a\xbe\xff\xcc\x8a\x4a\xc7\xfa\xba\xa7\x2e\x52\x3c\x2e\x25\xff\xa0\x29\x5c\x29\xab\x3a\xb5\x26\xea\x74\xd3\x0c\x68\x9a\x34\xf8\x37\xd5\x10\x98\x9e\x33\x9c\x64\xb0\x14\xf2\x5a\x70\x22\xdd\x14\x19\x71\x3d\xe1\x9a\xa8\xa7\xe0\x20\x95\xcf\x91\x65\x59\x7d\xbe\x16\x28\x6e\x48\x1b\x27\x38\xed\xc8\x6d\x1c\xec\x6a\x4a\x76\x62\x2c\x79\x5e\x17\x09\xcf\xa2\xd4\xa6\x47\x6c\xc9\x55\x00\xd0\x29\xd4\x22\x67\x3f\x3e\xe2\x26\x37\xbd\xc3\x63\xd0\x65\xfe\xcf\x78\xe1\x6b\x3c\xf9\x17\x75\xc0\xa9\xae\x48\x66\x97\x17\x01\x77\x30\x30\xe2\x84\xc2\x09\x75\x83\x5d\x32\xca\x56\x54\xb4\x7b\xeb\xb5\x7a\xda\x99\x39\x35\xbb\x38\x43\x65\x4c\xaf\x12\x8e\x23\xaa\x58\xeb\x71\xba\x3e\xd3\xe1\x79\x21\xb2\x19\x82\x60\xce\x67\xae\xae\x1d\xa9\x75\x1a\x57\x82\xc6\x8a\x50\x87\x36\x0b\xa5\x2d\xd3\xef\xf3\x98\x1e\x5d\xff\x75\x5e\xf8\x32\x4f\xfd\xa2\xa8\xc4\x48\xca\x16\xcc\x52\x83\x00\xf5\xc3\x51\x4d\xd4\x68\x53\x92\x52\x3b\x50\xa9\x38\x53\xc6\xb7\xad\xa9\xfd\x74\x24\xab\x00\xe3\x73\x89\xfa\x11\x25\x47\xa3\x47\x51\xfc\x26\x6b\xd9\xee\x11\xbb\xb2\x77\xb3\x4b\x30\x20\x13\x34\x43\xff\xb6\xf0\xd1\xd6\x4f\xac\x6f\xbb\xd7\xe1\x09\x1c\xb1\xe1\x20\x60\xdf\x77\xe7\x96\x53\xf0\x7b\x2b\x6c\x1f\x7a\xfe\xfd\x9f\xaf\x84\x6f\xa8\x10\x0b\x49\x29\x98\x7c\xc7\x83\x28\xf7\xe8\x49\xb9\x82\x26\x03\xab\x24\x08\xea\x36\xe8\x44\x10\x74\xd0\xa7\x5c\xf0\x66\x04\x30\x19\x30\xe9\x31\xd0\x0e\xa2\xe6\x12\x45\xf8\xa5\x1e\x83\x38\x04\x7a\x2e\x72\x63\x8e\xc9\x79\xf3\x60\x4f\xa0\x5b\xc9\x25\x71\x2c\x9c\x56\x84\xd4\x03\xa3\x90\x10\x25\x19\x69\x03\x03\xdd\xff\x02\xc6\xc6\x85\x3c\x0d\xa5\xde\xfc\xec\x83\x44\xbd\xf9\xcf\xfb\xd9\x5b\x0d\xdd\xc4\x4f\x7a\x6c\x6e\xc7\xa1\x5a\xbb\xd4\x94\xff\xea\x78\x17\x76\x99\x10\x1e\x22\x9c\x34\xcf\x55\x9c\x34\xcf\x18\x83\xfb\xb7\xa3\x05\x77\x51\xdc\xb4\xe7\xbc\xce\xf6\x84\x08\xb7\xfb\xb7\xdd\x4f\x7c\x9b\x8c\x7d\x6d\x0f\x3b\x3a\x8a\x6a\x13\x38\xfc\xaf\x36\xbd\x94\x45\x75\x0a\x5e\x7f\xe7\x9e\xf0\xa5\xde\xe0\xf5\x12\x4d\x8d\x62\xdf\xc4\x59\x0c\x0a\x78\x07\x9e\x56\x69\x0e\x18\xe4\x66\x05\xf2\xd3\x2c\x3e\x2c\x65\x37\xa1\xd5\x54\x83\x76\x54\xe4\xd3\x5d\x91\x4d\xe7\x40\x30\x17\x00\x67\x7e\xb0\x98\xb4\x20\x58\x0d\x5f\x99\xaa\x6d\x79\xfb\xb0\xf0\x2d\x6f\x3f\x7d\x78\xcb\xbb\x5c\xe5\x53\x34\xb0\xb2\x8e\xa4\xfd\xcb\x0a\x7b\x8e\xc7\xd4\xc3\x7e\x7f\x7c\xa0\xf0\x40\xb7\x60\xc3\x21\xe6\x82\x84\xec\xf5\xaa\xc1\x4e\xf4\xbe\xdb\x0b\x6a\x57\x42\x35\x0f\x6b\xcc\x9e\xcc\xca\x15\xf5\x6f\x1f\xcf\x4b\x3c\x50\x9b\xb9\x2c\xcd\x73\x92\xbb\x58\xc4\x8a\xb2\x6c\xb2\x7b\x3d\x46\x7d\xe3\xf7\x47\xaf\x93\x31\x8d\x5c\xd4\x3a\x43\x78\x03\x8d\xa0\x56\x23\x14\x43\x5d\xd6\x82\x14\x36\xb8\xb9\xde\x37\xd9\xd1\x8a\xa4\x93\xfd\xe1\x5e\x16\x58\x5f\xa3\xb0\x03\x79\xbe\x96\x7f\xad\x8a\x2c\x12\xb9\xff\xbe\xbd\xe1\x9f\x57\xac\x0b\x43\x73\x36\xe5\xe9\x02\xef\xa6\x4d\x0a\x5f\xa8\x06\xa0\x26\x62\xca\x1c\xa1\x78\xe7\x33\x80\x77\xd6\xed\x62\xd8\xa3\x2c\x29\x4a\x7a\x69\x2f\x8f\xfb\x78\x14\x49\x15\xb4\x76\x70\x5b\xba\x19\xa4\xcd\x42\x80\x32\x84\x42\x0e\x9a\x65\xd7\x24\xca\x83\x9e\xa6\x73\xc4\xb8\x1f\x0a\x06\xca\xc8\x38\xa6\xfc\xec\xee\x4d\xb9\xd3\x86\x4a\x14\x82\x94\x9d\x6e\xa5\x33\x45\x9a\xc6\xf9\x0c\x56\x1e\xff\x79\xea\x7a\x96\xf2\x46\x1d\x14\xa3\x5a\x2b\x0d\xe5\x9e\xb7\xa9\x68\xaa\x20\x9c\x28\xeb\x61\x14\x39\x49\x61\xd9\x0f\x6d\xc1\xb3\x62\x5d\x70\x0a\x3a\x91\x6a\x06\x10\xa5\x92\xee\x0b\xfb\x9b\xa9\x88\x5c\x4f\x1a\xcb\x1c\xd8\x94\x20\xcb\x60\xcb\xbb\x22\xe6\x79\x71\x8a\x48\x6c\xd7\xa2\x52\x98\xf1\x5b\x2a\x2c\x61\xf8\xa8\x2f\xd4\x99\x6b\x19\xd1\x73\x54\xbe\x9a\xa1\xc2\xc5\xb8\xed\x44\xa7\xf5\x81\xbf\x0e\x7a\xd0\x74\x1f\x18\xb2\x74\xdd\x71\x10\x9c\x83\xcd\x3b\x3d\x36\x50\x27\xa9\x0c\xdd\x74\x01\x52\xfe\x64\x54\xcf\x52\x59\x40\xf8\x83\xe5\x22\x35\xc1\xa2\xfc\x1b\x6c\x01\x50\x33\x18\x76\xb3\x51\x52\xf5\x37\xb9\x6c\x89\x48\x14\xfb\x81\xdb\x88\x1a\x7b\x29\x63\x57\x59\x13\x9c\xc2\x62\x89\xb4\x78\x09\x7f\xa1\x95\x72\x59\x88\xcc\xff\xa3\x83\x61\x3c\x70\xd5\xcd\x64\x96\x17\x74\x10\x53\x91\xf1\x66\x33\xaa\x07\x45\x3a\x23\x6b\x56\x0b\x4e\x25\xb1\x01\xf1\xb3\x82\xeb\x61\x59\x40\x34\x94\x93\xa9\xe5\x0c\xe9\x9f\x1c\x60\x9f\x9d\x60\x97\x74\x53\x0d\x88\xe0\x7f\x64\x82\x3d\xf6\x42\xb8\x84\xf8\xba\x88\x35\xaa\xc2\xbf\x54\x0c\x48\x7d\x89\x7d\x18\x4f\x6b\x39\x61\x2d\x2c\xa7\x8d\xbc\x66\x07\x6e\x35\x53\x59\x4d\x88\x9a\x23\x46\x22\xf7\xf5\x5c\x74\x78\x52\x44\xf5\xfc\xd1\x41\xd4\xd4\x70\x08\xeb\xbd\x42\xf1\x58\x46\x85\x2e\x9c\x83\xb3\xbf\x91\xd7\x18\x5b\x6c\x06\x1a\x55\x48\xd5\x12\x15\xd9\x3c\xc5\xa8\x9e\x42\x05\xef\x0d\x0e\x06\x84\x34\x6d\xb6\xd3\x58\xe8\xa2\xc9\x9c\x95\x1b\x42\xdf\x65\xd3\x87\xca\x00\xa9\xbf\x98\xd3\x7b\x68\x65\x1f\xa8\x48\x2d\x38\xa5\xc9\x37\xad\xea\xef\xe8\x1b\x18\xcc\x30\x99\x07\xe9\x66\x62\x4a\xae\xb1\x9f\xf3\xd8\xfe\xa8\x0b\xd0\x71\xfe\x1b\xbc\x31\xd1\x7a\xee\xfc\x5c\x5c\x86\x57\xc2\x0e\xfd\xa1\x75\x77\x4a\xa7\x07\xc8\x6a\x0b\xf9\x9a\x1e\xb3\xfc\xd1\x2a\xfc\x0e\x9c\xf3\xb2\x4f\x13\x4a\xe8\x56\x98\x50\xf0\x83\xe6\x25\x1a\xf4\x6a\xec\xf7\x27\xd8\xc3\x92\x72\xb7\xdc\x5f\x53\x51\xe1\x7a\x58\xc3\x81\xae\x4b\x62\x49\x99\xce\xeb\x69\x57\xd0\x4c\x7b\x60\x67\xa3\x6e\x23\xcd\x49\x67\x40\x1f\xda\xb3\x51\x56\x1f\xbe\x71\xbe\xc5\xb1\x2f\x5d\x3a\x5a\x20\xa2\x99\x91\x94\xb8\xb9\x98\xe7\xb9\xff\xf6\x4b\xc3\xbf\xab\xd8\x57\x06\xd8\xdc\xe1\x22\x4d\x27\x7a\xb0\x6a\xc3\x0c\xd0\x81\x56\xa9\x86\x52\x09\xc7\x9d\xf9\x74\x84\x97\xa0\x84\x9a\x6b\x68\x8d\xf2\x69\xda\xb8\xa7\xe1\xf6\x69\x0b\xee\xa5\x6c\x78\x56\xfa\x2d\x25\xca\x6a\x2d\x14\xab\x6b\x8c\x78\x96\x85\x90\xca\xd6\x41\xc0\x14\x30\x53\x6a\x27\x1d\xe5\x30\xbb\x11\xcc\x40\xba\x0a\x4e\xb0\x4b\x22\x36\x75\xf3\x4c\x5e\xf6\x66\x54\xb4\xe5\xc1\x82\x53\x17\x99\xa0\x4c\x9d\xb7\x91\xe7\x51\x2b\x21\x9c\x62\xad\xa8\x60\x77\x38\x1b\xc3\x07\x0f\xb0\xaf\xa9\xa3\xd0\x1f\x8e\xd9\x6f\xb7\x1b\x4d\x38\x06\xbd\xce\x83\x73\x10\x6d\xb2\x0d\x91\x83\x1f\xd8\xa1\xad\xb4\xdf\x79\xc0\x0f\x92\xb2\x55\xd3\x3c\x69\x4c\xa3\x75\x65\xd7\x7c\xb1\x6b\xbe\xf8\xce\x99\x2f\x3e\x63\x13\x39\x7e\xd4\xbb\x48\x03\x46\x78\xaf\xa7\xb9\x1c\xb1\x33\x27\xf3\xef\x04\x9f\x63\x7b\x7b\xf3\xc5\x82\x3f\xa7\xf8\x1c\xed\xd5\x3f\x8c\x42\x56\xf3\x3a\x5a\xd2\x46\xb1\x3a\xfe\xc1\x7e\x07\x41\x88\x77\xbb\x39\x92\xe3\xe9\xb8\x7b\xe0\x96\xfd\xe5\xfd\xe1\x91\xd2\x35\x9c\xa3\xf5\x34\x8e\x89\x11\x2b\x6d\xba\xe1\xfa\x43\x69\x66\x5f\xbe\x6f\x57\x5e\x5c\xac\xbc\x38\xae\x68\x66\x6f\xde\x79\xa8\xdd\x90\x81\xdd\x95\x3b\x17\x21\x77\x16\x2d\xb1\x33\x26\x11\x7c\x07\xf4\xb1\xe7\xbc\x27\x6c\xbf\xda\xaf\xf3\xaf\xb5\xa8\xa5\xbb\xb9\x31\x4e\xba\x8b\xb2\x6c\x98\xfc\x83\x4b\xd8\x35\x63\x4d\xaf\x22\x6e\x0e\xf3\xd3\xbc\xe5\x92\xf0\x1f\xbc\x11\x37\x87\x38\x6b\x6c\x0b\x1d\xb8\x6c\x86\x7b\x6a\x02\xc8\x5d\x68\x46\x71\x0c\x39\xf9\xa0\xc8\x49\xed\xd2\xe0\x82\x21\x64\x48\x28\x6f\x39\xfa\x7e\x58\x0b\x02\x59\x1d\x9a\xa0\x52\x33\xe3\x71\x50\xe7\xb9\xa8\x06\xeb\x04\x99\x21\x3f\x9c\x9b\xe0\x59\x88\x36\x07\x6c\x49\x0c\x3a\x43\x84\x0f\xcb\x23\xd3\x1f\x5e\xcb\x61\xee\xa4\x37\x1d\xd8\x75\x27\xfd\xdf\x23\x5f\x77\xe5\xe2\x43\xc4\x9d\x74\x9f\x3a\x43\xbd\xc9\x63\x0b\xe7\xb1\xe2\x86\xcb\x2d\x38\x4c\x3d\xfe\x42\x7d\x4a\xb5\xa0\x04\xc1\x9e\xeb\xb4\x57\x30\x56\x9c\xf3\xee\xd9\x5e\x8c\x2f\xf9\x27\x94\xd2\x36\xa2\x92\xa3\x28\xb9\x87\x7a\xa8\xd8\x9b\x0f\xb2\x63\xdb\xf8\x1e\x6e\x93\x6f\xa5\x49\xc1\xe3\xe5\xb4\x31\x4b\x0f\x88\x0c\xb4\xba\x3f\x3d\x10\xde\x31\xe6\xbe\x8b\x14\xd5\xd6\x0f\x42\x02\x22\xd7\x8f\x6a\x42\xfb\xa1\x0a\xdf\x8b\xf7\xef\x0a\xa4\x8b\x15\x48\xaf\xf1\x94\xc6\xf7\x52\x2f\xbc\x1e\xfe\x2a\xe7\xae\x6f\x3f\x3a\xb6\xae\x78\x1b\x3b\xce\xe6\xcf\xcb\x85\x35\x62\x9a\xec\xca\xca\x8b\x90\x95\x99\x25\x2b\x9b\x17\xa5\x43\x86\x93\xaa\x20\x35\x2f\x8c\xc9\x55\x4e\x10\x7d\x84\x3d\x5f\x29\x35\x46\x3a\x38\x92\x0a\x67\x89\x23\xae\xd4\x14\x62\xec\x7d\x87\xd8\x95\xa3\x50\x6a\x2d\x14\x53\xff\x15\x87\xc2\xb7\x4d\x58\x17\xec\xb4\x12\x64\xdc\x18\x05\x45\x0b\x8c\x06\x88\x66\x6e\xb2\x61\x78\x20\xcb\x1a\xce\x6a\x8b\x08\x86\x16\x92\xaa\x7a\xac\x15\xa7\xeb\x3c\xb6\xe9\x6f\x17\x8b\x80\x37\x1a\x39\xa0\xdb\xda\x3b\xc6\x46\xc4\x55\x68\x40\x5e\x42\xb0\xb6\x1f\xd3\x54\x03\xd6\xed\xc2\x00\x18\xd4\x82\xc0\x6a\x31\x61\xb8\xa1\x5b\xdb\xaa\x6c\x12\xf7\x31\xd9\x5b\x20\xc4\x7b\x44\x40\x87\x56\x35\xe7\x35\x47\xa6\xbc\xbb\x71\xb4\x76\xf4\x7a\xf9\x47\x93\x6f\xa4\xe0\x3c\x18\xc9\xc4\x31\xb3\x71\xd4\xae\x02\x4a\x41\xcc\xe2\x49\x15\xa5\xf4\x3a\x29\xa4\xaa\xec\xab\xaf\xae\x6d\x79\xfb\xe5\x88\xac\x88\xa6\x23\xee\xbf\x74\x80\xfd\x99\xc7\xd4\x2d\xff\x8b\xe3\x60\x92\x07\x26\xc2\x8a\x68\x86\x6f\xf5\xe8\xaf\xb1\xa3\xa9\x0e\x1b\x17\x34\xa0\x68\x33\xb5\x3e\x33\x34\xad\xca\x62\xb0\x80\x8d\x1e\x81\xe8\x4c\x4c\x3f\x7b\xa1\xc7\x0e\x28\xc6\x12\xff\x19\xe1\xcd\x7a\x32\xa0\x76\x61\x4d\x5e\x87\x4f\x86\xe2\xff\x52\x17\x31\x76\xe7\x88\x22\xc3\x38\x6d\x76\xb7\xd8\x5d\x9d\xff\x3b\xb7\x8f\xb5\xad\x7d\xec\x07\x2f\xd6\x02\xfb\x3d\x63\x0c\xb0\xe7\xbc\x74\xfb\xdd\xeb\x84\x7f\xbb\xda\xbd\x2c\xb1\x36\xd6\x2e\x3a\x52\x36\x32\xf6\xf7\xdf\xcd\xfe\xfd\xb0\x6c\x16\xd9\x2d\xfe\xe7\xbf\x3b\x7c\xe1\x04\x86\x39\x44\x48\xf4\x00\x68\x61\xb8\x72\x30\x7e\x25\x4f\x3b\x62\x13\xb0\xc7\x94\xe4\x42\x11\x55\x0b\x30\x3e\x22\x57\x20\x1e\x2a\xa6\x3d\x13\x05\xf6\x38\x86\x53\x40\x82\x51\x16\xb5\x5a\x10\x36\x2f\x17\x0f\xe6\x21\xe1\x6c\x12\x1b\x52\x60\x11\xba\x0d\xc4\xe0\x50\xd4\x45\x3d\x4d\xf2\x5e\xc7\x5a\xb0\xc8\x59\x14\x6b\xb6\xd0\x22\xea\xa8\x7c\x0e\x8d\xbf\xa3\x08\x94\xe5\xe6\xb3\x22\x78\x0e\xa0\x8a\x4d\x30\xef\x42\x88\x19\xf8\xe3\x90\x53\xb7\x97\x34\x44\x16\x43\x5c\x33\xd5\xae\xaa\xed\x40\x18\x21\x24\x1a\xb8\xc9\x09\x4a\x10\xc0\x20\x1d\x05\xc4\x03\xa0\x81\xf2\x0b\xa6\x1f\x8c\xdf\xaf\x40\x8a\x10\x48\x6a\x51\x3b\xe9\x06\x18\x7a\xf2\x62\x5a\x34\x9b\x69\x56\x54\x01\x95\x90\xe2\x70\x62\xc8\x2c\xa9\x6d\x79\x7a\x1a\x6e\x79\x97\x45\x09\xf4\xcd\xb0\x90\xb5\x4f\x7f\x17\xbb\x5d\x45\xe2\xcc\xaa\x48\x9c\xea\x9a\x13\x80\x23\x7b\x93\xbc\x88\xd8\x3b\x6d\x9e\x6b\x34\x45\x37\xca\xa6\xc7\x0e\xc2\x23\x10\x5d\xd3\xbe\xc8\xe0\x9a\xc3\x6b\x3a\x88\x06\x3e\x8e\xc3\xb9\xc9\x73\x4a\xb5\x4f\x29\xee\x06\x08\x3c\x2e\x83\x6b\xf2\x8d\xbc\xe0\x9d\xae\xff\x63\x1e\xbb\xfe\x02\xbe\x0e\x1f\x7e\xfc\x9a\x8a\xe0\x71\x52\x36\x45\xe9\xfb\x52\x5c\x67\x90\xec\x8c\x35\x4d\x9b\xca\x54\x05\x49\xf0\x5d\x02\x8d\x0d\xd6\xfa\x5d\x21\xd7\x73\x6d\x8a\xfd\xb8\xc7\x0e\xc5\xdc\xae\xe7\xbd\x17\x51\xcf\xdb\x86\xd7\x13\x80\x5f\x65\x15\x92\xc2\x0a\x9e\xd2\x7b\x9b\x69\x86\x6e\x00\xbb\x93\xed\xa7\xc5\xe4\xdf\x1e\xde\x3c\x8b\xe1\xef\xd3\xa3\x93\x10\x84\x4a\x44\x54\x85\x9a\x04\x25\x5b\x1a\xde\xc9\x1e\x86\x72\x20\x4a\x5a\x8b\x49\x0e\x19\x32\xfe\x63\xc3\x6b\x16\xe7\x6d\xdc\x1f\xa2\xa9\x8e\xe8\x81\x2a\x92\x4e\x9d\x26\x56\xbb\xe9\xb3\xfd\xa7\x35\x4f\x3b\xc5\xbe\x14\x08\x05\x21\xbc\xef\x5e\x6f\x07\x39\xf5\x56\xd0\x5d\xb8\x0a\x5c\xab\xc6\xc4\x81\x93\x8a\x02\xb2\xac\xfe\xb1\x22\x05\xd2\x2c\x48\xa2\x38\x88\x9a\x88\x25\xac\x3c\xee\x45\x9a\xe0\xdb\xb5\x5d\x95\xe3\x62\x55\x8e\x1f\x62\x25\x21\xe5\x3f\x7e\x07\x88\x7d\xa5\xb0\xd4\xf0\x91\x6b\x86\xc6\x8d\x72\x16\xf5\x78\xca\x4d\x49\x0e\x7a\x6d\x57\xbd\xb9\x08\xf5\xe6\xb5\x1e\xdb\x97\xc1\x66\xe5\xbf\xd4\x0b\x9f\xe9\x41\x80\x93\xd9\xad\xb8\xfc\x3b\x03\x40\x61\xe4\xde\x86\xbd\x11\xcf\xdf\x52\x8e\x50\x17\xc3\xd8\x18\x48\x54\x2c\x50\x07\x32\x17\x19\x4f\x72\xcc\x4b\xd2\x79\xed\x5a\xff\x51\x87\x1d\x14\x3f\xb5\x7f\xeb\xee\x6f\xf6\x32\x29\xea\x60\xce\xf9\xcf\xd9\xb9\xa8\x23\xd2\xe9\x35\x10\xb0\x9d\x6e\x9a\x98\xf0\x61\xec\x7f\xb5\x2c\x6a\xc1\x6a\x79\xf0\xc6\x8f\x5d\x8d\xdd\xc5\xf6\xa1\x73\xc8\x3f\x11\x3e\xe6\x6e\x88\x5a\x42\x9f\xb7\xdc\x51\x0a\x7e\x46\x24\x33\x98\x69\x4b\x30\x17\xf0\x41\x1c\xc5\x15\x7d\x81\xe4\x9a\x6b\x01\xda\x0f\xf9\x94\xa2\xe1\xb7\x2e\x60\xed\x5f\xa3\xc0\x16\x02\x0c\xea\xe7\x59\x5f\x2d\x50\xe4\xa0\xcb\x04\xa5\x19\x9f\x55\x58\xda\x35\xd6\x65\xbe\xee\x95\x39\xd5\x51\xfe\x13\xc3\x93\x65\x68\x3a\xda\xa2\x90\x4a\x85\x92\x2f\x8d\x4e\x62\xef\x58\x26\x0c\x8c\xf6\x2f\x77\xeb\xe2\x94\x5e\xf9\x84\xf0\x84\x54\x0b\x4a\x9b\xf2\xe1\x25\xa9\xe1\xc5\xd5\xe0\x6e\x9e\x25\x51\xd2\x9a\xc2\xf8\x2c\xcc\x5f\xab\xeb\x81\x6a\x34\x0c\xa6\x5a\xb3\x27\xe7\x90\xf5\x89\x73\xde\xc9\xed\x0f\x06\x57\xf9\x87\xb5\x0f\x35\x0c\xb5\x07\x15\x5a\x53\xf6\x9b\xfe\xf5\xe5\x4e\x98\x3d\xc0\x11\x6d\xc8\xd5\x03\x99\xec\x28\xcf\xfc\x4f\x5c\x1e\x1e\x57\x3f\x4a\x24\xbd\x18\x9e\x16\xa7\xad\xa8\xce\xe3\x20\x5c\xe7\xf5\x33\x22\x69\x84\x26\x78\x1c\x35\x6a\x42\x93\xad\x6d\x79\x07\x09\x36\xb8\xc4\x5f\xf9\xe7\x97\xb1\x57\x79\x8c\xc9\xc1\xc5\x69\xe1\x3f\xd7\x63\x37\x8c\x9c\x28\x43\xeb\x39\xa7\x5f\x0e\x17\x4d\x41\x26\xbd\x62\xb8\x1b\xc4\x15\x3a\x86\xd0\x04\xcb\xac\xb1\x5f\x99\x60\x7e\x43\xdb\x9c\xd6\xd2\x6e\x1a\xa7\xad\xbe\xff\xd6\x89\xf0\x85\x13\x83\xd7\xad\x64\x0e\x75\xc5\xfe\x6c\x97\xe3\xb1\x09\x94\x05\x3c\x9d\x05\xb3\xcb\x8b\xb5\x12\x26\xa5\x29\xd7\xb2\x52\x59\x08\xbd\x1a\x41\xd4\x2a\x03\x81\x9f\xf0\xfe\xe1\x24\x0d\xf2\x34\x4d\x54\x1e\xb0\x95\x33\xb3\x71\xb4\x76\xf5\xb5\x53\xb5\x20\xb8\xbb\x0d\x20\x25\xe6\xbb\x75\x9e\x20\x87\xaf\xe2\x7e\x8c\x0a\x85\xfc\xb3\x99\x45\x05\xba\x97\xdb\x59\xda\x6b\xb5\xe9\xdb\xf2\xb3\x55\xcd\xfb\xc0\x8b\x42\x74\xba\x88\xd7\x20\x5f\x00\x67\x74\x64\x70\x04\xf3\x28\x46\x72\xbe\xa8\x95\xa4\x19\x24\xeb\x0f\xeb\x22\x8a\xa1\x6c\xa6\xbd\x44\xaf\x83\xa7\xa5\x09\x91\xe7\xa5\x0d\x60\x52\x52\x81\xc1\x52\xad\x14\xbc\x51\xbb\xaf\x04\x51\x63\xcd\x2d\x29\x65\xf7\xb6\xa3\xa4\x80\x29\x35\x9a\xa0\x7a\xe8\x94\xba\x4d\xbe\x17\x2e\xc1\xeb\x23\x26\x52\x09\x7b\xb5\x9d\x6e\x22\x2b\x1f\x2d\x17\x37\xca\xb3\xd7\x91\x0a\xf9\xa7\x2a\xec\x40\x3b\xcd\x0b\xc8\x4c\xfd\xb5\x4a\xf8\x4b\x15\xf5\xcb\x88\x0c\x35\xff\xec\x99\x41\x09\xe2\x0a\xaa\xd7\x9c\x7d\x01\xe9\x42\xe1\x34\x17\x69\xd0\x88\x72\xb9\xf0\x7a\x51\xde\xb6\x6e\x80\xa2\x02\xa9\xce\x18\x5c\x7d\x18\x04\x5b\x94\x04\xf3\x4b\xab\x68\x37\x9c\xaa\x05\x27\x15\xac\x9a\x79\x0f\x0f\x1f\x0a\x55\x04\xec\xcb\xba\xbe\x43\x83\x58\x9b\xbd\xa4\x05\xa0\xdf\xf8\x05\xc3\x33\xa4\x88\x43\xf1\x9b\x53\x86\x4c\x30\x4e\x37\x45\x56\xe7\x39\x8e\x72\x97\xe7\x39\x54\x0a\x42\xb4\x83\xc3\x2b\xc7\xe7\x82\xa3\x47\xaf\xbe\x66\x2a\x00\x88\xdd\xc1\xa3\xc8\xef\x7b\xec\x80\x9a\x1a\xfe\x27\xbd\xf0\x57\x3d\x3d\x51\x4a\x41\xc1\x36\x53\xd2\x52\xda\xc0\x96\x98\x6d\xd3\xed\xf5\x52\x38\x6f\x43\x20\x4e\x8a\xdd\x37\x71\x5a\x27\xe8\x58\x28\xce\x19\x2e\x85\x33\x95\xc8\xa5\x63\x41\xf3\xaa\xc9\xb5\x1a\x47\x75\xb1\xa4\x67\xb4\xe0\x52\xd8\x07\x2d\x5e\x22\x0c\x78\xb9\xc7\x0e\x62\xc2\xd4\x8a\x68\xfa\xcf\xde\x09\xda\x75\x79\xff\x5c\xd2\xef\x2b\xfb\x8d\xc5\x00\xce\x83\x3b\x06\x52\xea\x08\x09\xce\xea\x3a\xbb\x77\xd8\xed\x6c\x8f\x5c\x92\xfe\xad\xe1\xa3\x60\x69\x0e\xa1\xa1\x7a\xa2\xbc\xee\xbc\x66\xf9\x02\x1c\x42\xd2\x0a\x33\x7b\x82\xff\x95\x4a\xf8\xc6\x8a\xfe\x39\x74\x39\x94\x40\x76\x4c\x7f\x23\x57\x4a\x21\xb2\x6e\x26\xc0\xac\x52\x97\x27\x60\x4b\x45\xa9\xa7\x19\xc2\xdb\xc2\x45\x67\x18\x14\x9a\x3d\xec\xdb\x50\x1c\x20\xa4\xd3\xfa\x22\x10\xda\xa4\x11\xdb\x28\x22\xb8\x73\xcb\xb3\x9c\xae\xaf\x05\x29\x23\xce\x2a\x49\x1f\x65\x90\x75\x51\xe7\x5d\xbe\x1e\xc5\x40\xac\xae\x58\x6f\x7b\xb9\x71\xee\xf0\x22\x88\x05\x27\x5e\x2d\x05\xae\x8f\xe4\x73\xa8\xe1\x80\x28\x3f\x7a\xe4\xc8\x18\x96\xa1\x6d\x40\xec\x73\x51\x30\xf6\x75\xe6\x10\x28\x13\xcc\xe5\xc6\xd1\xda\x72\xda\x98\x8f\xf2\xac\x07\xba\xd6\xad\xbd\x46\x4b\x14\xfe\x87\x58\xd8\x1e\x72\x5d\x61\x0f\xd0\x6c\x49\xe9\x14\x8c\x16\x0a\x7e\x56\x0a\x20\x7a\x9e\x68\xc8\x70\x29\x40\xfc\x52\x03\x27\x9d\x1b\x50\xd9\x4d\x1b\x79\x39\xe5\x68\xf7\xe0\xbd\x6b\xeb\xdf\x8d\xb7\xbe\x3f\x0f\x9c\xcf\x57\x71\x46\x3f\xc2\x8e\x8d\xdc\x44\xc6\x0a\x04\x88\x2d\x9a\x75\x20\x3a\x94\xc4\x57\x09\x1b\xeb\xa2\xcd\x37\xa2\x54\x67\x72\x0d\x29\xa5\x06\x49\xc6\x14\x65\xd8\x1f\x93\x36\x32\xbe\x2e\x18\x5d\x78\xe3\x49\x63\x12\x8d\xfb\xda\x70\x5c\x52\xe8\x87\xd5\xe2\x9c\xf7\xd4\xed\x4f\x53\x37\xf9\xc7\xf4\x69\x0a\xab\xa3\x4f\x54\x43\xca\x2c\x9f\xaf\x7e\x62\x0f\xbb\x7a\x58\x7b\xc8\x17\xdc\x4b\x66\xf3\x3b\x73\x91\xad\x12\x2c\x18\x9e\x73\x73\xff\x4f\x27\xc2\xa7\x8e\xba\xe9\x08\x3e\x85\x27\x86\xa8\x2f\x4a\x15\x57\x94\x16\x4a\xda\x5a\x94\xf6\xea\x85\xda\x96\xb7\x27\xeb\xc5\x6e\xea\xee\x5f\x55\xd8\x19\x06\x97\xfd\x7a\x78\x17\xf0\x20\x45\xe5\xef\x40\xb6\xb4\x54\xe9\x1b\x51\xbd\x50\xa5\x42\x64\x26\x9c\x10\x74\xad\x95\xae\x67\x43\x1b\x21\xdd\xba\x59\x64\xdf\xf6\xd8\xbe\x8c\x27\x2d\x91\xfb\x7f\xeb\x85\xef\xf0\xf0\x6f\xd8\xc8\x75\xa9\xa2\x11\xd0\xe5\xb4\x19\xf4\xa2\x86\x5b\x22\x52\xc8\x2f\x36\x91\x0d\x09\xc4\x38\xe0\xeb\x17\x69\xd0\x4c\x33\x70\xa7\xd3\x19\xb5\x17\x11\x42\x2f\x38\x54\x2c\x30\x62\x28\xdd\x68\x67\xa0\xdf\x22\x81\xba\xec\x4e\x91\x34\x6a\x81\x22\xff\x06\x6b\x83\xd4\x59\xa1\x95\xce\x3e\x7c\x0d\x3b\xca\x66\xb6\x9f\xc7\x94\xfa\x34\xbf\x22\x3f\xca\xde\x7a\x19\xbb\xce\x8e\x47\x6a\x10\x47\x5d\x26\x5a\x11\xf4\x38\x05\xfa\xad\xf4\x62\x71\x77\x54\xb4\x4f\x29\x43\x7c\xee\xff\xe5\xa1\xf0\xd9\xde\xe0\x75\x14\xf0\x45\x4f\xaa\xdb\x69\x33\xb0\x6e\xc8\xc6\xac\x18\xea\xb0\xc5\x02\x0f\x8c\x52\xb8\x68\xa8\xc8\x0e\x3f\x23\x82\xbc\x97\xa9\x8c\xb5\x18\xc1\x85\xb1\x38\x80\x38\xcd\xb1\xac\x4c\xa0\x2a\xee\x66\x83\x7d\xe1\x52\xf6\x6e\x87\xad\xf4\x67\xbc\xf0\x85\x83\x6c\xa5\x16\xdd\xa8\x8d\x10\x94\x07\xeb\x02\xb1\xd4\xd2\x1a\xd0\xca\x62\xc4\x2f\x3d\x0a\x83\x4c\x57\x69\x17\xc2\xf0\x05\x42\x95\x54\x9e\x0b\x50\xe6\x54\xc0\x62\x9a\x08\x33\x78\xe3\xc8\x19\x3f\xe8\xb1\x4b\x8c\xaa\x91\xfb\x6f\xf7\xc2\x97\x79\x46\xd7\x70\xaa\xae\x50\x88\x76\x54\x79\xf5\xf0\x03\x5b\xfd\x37\x56\x18\x4b\xcd\xdc\x78\x45\x25\xfc\x7b\xcf\x9d\x13\x60\x67\x35\x57\x60\x75\xa9\xd9\x16\xb4\xd3\xf4\x4c\x50\xe7\x99\x20\x33\x7a\x30\x1d\xcc\xad\x2c\xcc\xae\x2d\x54\x83\x3b\x97\xe7\xe1\xdf\xf9\x85\x13\x0b\xf2\xdf\xb9\x53\x4b\x4b\x0b\x73\x6b\x41\x9a\x05\x57\x69\x66\x5e\xa8\x7c\x9a\x3b\x5f\x50\xa2\x08\x4d\x58\xd6\xc7\x9c\x5a\xc8\x49\x96\x91\xd1\xeb\x81\xed\xa2\x57\xef\xb1\x31\x6b\x5f\xb0\x27\xfc\xff\x26\x2e\x8c\xf2\x98\xb1\xe3\x06\x6e\xe4\x58\x30\x29\x35\xd7\x49\x8a\xe7\x87\x2c\x72\xbc\x34\x13\xa7\xad\x49\x4d\xd2\x8c\xa0\x98\x36\xb7\x31\xe9\xbc\x38\x59\x34\x81\xa8\xf9\x78\x55\xf3\x4e\x5b\x6f\xe9\xc2\x9d\x77\xec\x07\xec\x72\x67\x20\x86\xcd\x79\x52\x5e\x28\x17\x78\xd5\xcc\xf0\x1a\x28\xcc\xcf\x28\x73\xdf\x80\x8c\xe4\xcd\x28\x6e\xd4\xa5\x36\x53\x1e\x2c\x73\x42\xc7\x9e\x83\xad\x42\x24\x20\x54\x4c\xd1\x8d\x14\x9a\x96\x6e\x88\x2c\xe6\x5d\x14\xbe\xc6\x32\x51\x63\x6c\xbe\x4c\x47\x2a\x92\x7a\x8c\xd4\x6b\x28\x6d\xaa\x6e\xb3\x11\x91\x91\x14\x49\xc5\x2f\xbc\xa3\x89\xf1\xe6\x09\xb6\x17\x32\xbb\xfd\xd7\x4e\x84\x2f\x9a\x80\x3f\x2d\x38\x42\x98\x72\x70\x4d\x1f\x29\x80\xcf\xf9\x2e\x20\xfb\xa1\x3d\x4e\xce\xe1\x90\x62\xb2\xc2\x6a\x10\xea\xec\xe6\x46\x88\xa7\x83\xf0\xaa\xd0\x3c\x60\x53\x77\x43\xc4\x5b\x29\xc1\xdc\xce\xd4\x8d\x63\x4c\xd6\xb6\x3f\xad\x4b\x07\x71\xa4\x82\xad\x00\x6a\xd0\x29\xa8\xe6\x54\x64\xe0\xab\x86\xab\x7d\xdb\x2f\x1a\x92\x5b\x85\x16\x2a\x17\x2d\x70\xbe\x53\xe7\x64\x16\x07\x76\x2d\x58\x75\x86\x86\x4a\x73\xfa\x51\xce\xaa\x2e\xcf\xd0\x75\x81\x4f\x3a\x58\xee\xe1\x55\xa1\xa3\x29\x3c\x67\xdf\x50\xd6\xc4\x3b\x44\x7f\x2d\x05\x70\xc0\xaf\xee\x0d\xab\x27\x79\xd7\x3a\xde\x9c\x11\x7d\x3c\x69\x02\x30\xa0\xe6\x0f\x41\xe4\xf7\xda\x96\x37\x71\x46\xf4\xb7\xbc\x3d\xdd\x32\x70\xe4\x6b\xf7\xb2\x49\x26\x6f\xfa\x41\xf8\x5d\x44\x09\x45\x60\x80\x03\x7e\x8d\x0f\x4c\xb0\x3d\x9d\xb4\x21\xfc\x77\x4d\xa8\xf8\x0a\x1b\x19\x5a\xde\x0a\xd6\xe5\x91\x47\x69\x62\xb9\xb0\xb9\x68\x73\x9c\xdd\x98\x17\x23\x8c\x39\x4c\x9e\x51\xeb\x85\x3a\x60\x06\xeb\xa2\xd8\x14\x22\x09\x8e\x1c\x39\x72\x04\xa6\xd3\x91\xeb\xaf\xbf\x1e\x03\x01\x1b\xa2\x1e\x75\x06\x1f\x84\xa7\x1e\x75\xf4\x68\x2d\x78\xc2\xec\xc9\x13\x9a\x83\x00\xe0\x84\xb1\x64\xf9\x80\xf3\x72\x5e\x45\x18\x45\x8d\xb6\xef\xde\x25\x47\x0b\xb5\x67\x14\x9f\x0c\x76\xae\xca\x18\x3f\x29\x1f\xb7\x69\x64\x94\xd9\x43\xb1\xc8\x00\x12\x49\xd2\x8c\xa3\x3a\x45\xe5\xa0\x61\xd2\x30\xb2\xc9\xfd\x01\x23\x41\x0b\x05\xab\x2d\xeb\x80\x5c\xcd\x41\x33\x07\xc5\x42\xb3\x2f\x28\x12\x1d\xb2\x36\x60\x61\x66\x0c\x50\xfb\x2c\xe1\x61\xca\xe1\x47\x3c\xcc\x35\x1b\xf4\x13\xe6\x0c\xed\x38\x0a\xcd\xbb\xc3\xbb\xf0\x1b\xe7\x43\x2d\x38\x89\xf0\x92\x34\x60\x7c\x3d\x4f\xe3\x5e\xa1\x98\xab\xd5\x4d\x65\xdd\x29\x88\xd3\x3a\x10\xe8\x17\x09\x26\x6b\xb5\x49\xf3\x18\x2a\x9c\x46\x09\xc5\x29\x0c\xcf\xd8\xf3\xed\xdc\x01\xf6\x3d\x43\x96\xc1\x52\xda\x10\x90\xe8\xf0\xfb\x07\xc2\x53\xea\x87\xda\xf6\x11\x03\x42\x53\xcd\xc6\x31\x58\x2a\x95\x59\x17\xa2\xb2\x80\xa7\x03\x95\x4e\xa1\xf9\x9c\x11\x74\x71\x78\xb2\xc3\x1b\xf6\xef\x1a\x17\x2e\xc2\xb8\xf0\x3b\xb6\x71\xe1\xd7\xbd\x8b\x0b\x89\x7f\xb6\x31\x2d\xb8\x31\xf0\x0f\x62\xe3\x76\x6d\x75\x17\x6b\xab\x5b\x52\x99\x2f\x0b\xe1\x21\x85\x5e\x9e\xc8\x75\x6a\xab\x2e\x53\x6c\x92\xfd\xe7\x6d\xad\xff\x72\x79\x9f\xf3\x96\xb7\x37\x6d\x4c\xfb\x3f\x30\xcc\x51\xac\x04\x48\xd9\x96\xf1\xce\x4b\x58\x75\x28\xb5\x83\xe6\x9c\x98\xd3\xde\xf4\xd5\xae\xa8\xfb\xcf\xbc\x24\xbc\x63\xe4\x5d\x6d\x59\x28\xdb\x92\x20\xbc\xd4\x21\x3d\xc6\x97\xdc\xb3\xe6\x5b\x18\xfb\x50\x85\x1d\x28\x44\xa7\x2b\x47\xda\xff\xc5\xca\x0e\xfc\x22\xcb\x69\x63\x8d\x5e\x00\x43\xd6\xb7\x3c\xf5\x53\x9f\x91\x2c\xff\x87\x01\xd7\x2a\x88\x1a\xd4\x18\x3f\xd6\x05\x19\x55\x80\x25\x25\x4a\xf2\x5e\xb3\x19\xd5\x23\x0a\xc5\x90\xb5\x47\x8d\xac\x21\x0a\xc0\xbc\xa1\x9d\xaf\xe0\x67\x44\x6e\x31\x28\x82\x06\x1c\xf0\x40\x55\x63\x45\x34\x2f\x02\xec\xdb\x62\x19\x9c\xb1\xfa\xd0\x5c\xbe\xb2\x9b\x36\xa6\x55\x97\xb1\x7b\x2b\xec\xf2\x4e\x94\xac\x08\xde\xe8\xaf\x42\x5c\x45\xee\x7f\xd3\x53\x7a\xcc\x17\xbd\x93\x51\x12\x75\x7a\x1d\x2b\x58\x14\xa3\x2f\x50\x17\xc0\xed\x83\x07\x89\xd8\x94\x2a\x2c\x75\x86\x45\xa0\xba\x8e\x74\xb3\x7d\x83\x37\x93\xf4\x65\x21\x51\x61\x73\x00\xd5\x33\x9e\xb7\x21\x1d\x03\x79\x7f\x88\xb1\xc2\xf2\x26\xf2\x0d\x1e\xc5\x7c\x3d\x16\x2e\x25\xc0\x91\xe0\x70\xd7\x02\x8a\x1f\xf6\x42\xc0\x73\x70\x83\x43\x80\x2d\xd9\x3b\x78\xa3\x3f\xe5\x28\x01\x6f\xac\xb0\x03\x6a\xc8\xfc\x1f\xd3\x20\xe1\xdf\xf6\x56\xd4\x38\x0e\x80\xd6\x29\x4b\xa7\x7a\xcd\xd0\xf2\xf3\x00\xf6\x25\x04\x62\xb3\xbd\xb0\x4a\x33\x13\x67\xe5\x2b\x51\x11\x3c\x4d\x64\x29\x08\xa3\x5e\xa2\x75\x28\xb7\x81\x47\x1f\xd0\x99\xb0\xd9\xe6\xc5\x74\x94\x4f\xf3\xe9\xa1\xf7\x9d\x2e\x7a\xef\x04\x3b\xa0\xb0\xac\xfc\x9f\x9b\x08\x5f\x32\xe1\xa0\x51\x11\xda\xd5\x3d\x3d\x91\xf5\x71\x42\xcb\x13\x28\x2e\x16\x9a\x0b\xe6\x0c\xa0\x7b\x15\xc2\x92\x41\x8f\xb4\x0b\x33\x78\x58\x06\x78\xc8\xa0\x52\x23\xf4\x96\x06\xd0\xa2\xa3\xe1\xb2\x5c\x99\x34\xa9\x6b\xe4\x33\x06\x16\x56\xd9\xbd\x8e\x15\x52\xaa\xd7\x58\x95\x28\x41\xce\x7c\x33\xdd\xb0\xe1\x86\x77\x66\xb8\x10\xaa\xca\xe5\x8e\x94\x18\x4e\xed\xa8\x66\x69\x52\xaa\xcd\x79\x8e\xa0\xec\xbd\x8d\x48\x6c\xce\x10\x7c\xc9\xb4\x5c\x3b\xd3\x74\xc4\x9b\xc1\x8f\xcc\x5c\x09\xff\x4e\xab\x11\xc9\xc7\x05\x3f\xfc\xf1\x15\xec\xfb\x2d\x41\x28\xf7\xc8\x46\x2f\x26\x14\xb7\x65\x02\x8d\x47\x40\xad\xf7\x5c\x11\x3e\xdd\xb9\xa2\x77\xed\x0e\xef\x76\xa5\x02\x07\xfa\x97\x85\x35\x8f\xf0\x51\xe0\xf5\x35\xb0\xe1\x04\xac\x4f\x90\xfa\xd0\xfd\xe8\xb5\x45\x8d\x90\x74\x73\x29\x0b\x90\x2f\x97\x9e\x94\x7a\x26\x3c\xe1\x48\xf9\x0f\x5c\xf6\x6f\xd3\x11\xf3\x9c\x0a\xbb\xa2\x9b\x09\x39\x91\xa2\x34\x21\x32\x9a\x6f\x7a\xe1\x1f\x7b\xcb\xa5\xab\x4a\xfa\xd0\x2f\x29\x24\xd5\x8b\x49\x0b\x17\x1a\x68\xeb\x10\x41\x61\x31\x0b\x10\x6b\x21\xb1\x15\x52\xa9\x40\x4e\xb0\xac\x9f\xb1\xa5\xcd\xb0\x27\xe4\x5c\xef\x25\xf2\xd8\x54\x8a\x4f\x5a\x17\x05\x9f\x06\x7e\xdc\x2a\xc4\x2b\x68\x2c\xb5\xa5\x34\x59\xd6\xb5\xd3\xc5\x50\x68\xc3\xf4\x40\x68\xc3\xdb\x3c\x86\x83\xee\xbf\x5e\xef\x3b\x2f\xf0\xcc\x64\x49\x35\x45\x88\x3d\xe3\x8c\xb8\x05\x33\x66\xbd\xe8\xf1\xd8\x3c\x42\x4c\xf7\x90\x98\x86\x6c\x75\x2a\x5a\x8a\x52\x0b\xdd\x48\x05\xe0\xd7\xe3\xb9\x72\xdd\x47\x19\x6e\x61\x5d\x51\x77\xcf\x8a\xbb\x0a\xee\x45\x2a\xb8\x2f\xf2\x5c\x7a\x84\xff\x1a\xa6\x65\x7a\x84\x24\xe0\xd9\x7a\x54\x64\x3c\xeb\x3b\xd1\xc4\xbd\xbc\xc7\xe3\xb8\xaf\x88\x5c\x72\x04\xad\x05\xf2\x20\x29\x71\x4d\x12\x49\x49\x32\x19\x45\x04\x8c\x0f\x76\x6d\x7e\x66\x82\x1d\xc2\x8c\x49\x5a\x04\xfe\xab\x26\xc2\xe7\x4e\x38\x97\x2c\x2b\xe0\xa6\x4d\x60\xe8\x0a\xc9\xa1\x11\x50\x5c\x61\xd8\x61\x41\xba\x5e\xb0\x7c\xf5\xe6\x48\x56\x50\x4c\x8c\x4a\xfa\x03\xd3\x1c\x20\x5b\xd3\x44\x94\xbe\x48\x22\xb4\xc3\xb3\x33\xf8\xa9\xd3\x4e\xb5\x4f\x03\x40\x31\xae\xfb\xa8\x69\x85\x8e\x0c\x94\x24\x72\x15\x93\xa3\x0c\x0f\x51\x56\x2e\x8c\x16\xbd\x03\x2b\x08\x6a\x3b\xd0\x34\xe5\x16\x5c\x37\x10\x8a\x51\x16\xaa\x6a\x78\xf9\x73\xb6\x31\x68\x54\x27\xb9\x0c\x8e\xbb\x66\x86\x0b\x36\x33\x9c\xf3\x1a\xdb\x9f\x01\x67\xfd\xc7\xe8\x33\xa0\xa5\x1d\x94\xa0\xe1\x9d\x71\x2c\x9f\x0b\x3f\xf3\x30\xf6\x03\xa3\x74\x0c\xf4\x77\xba\x8a\xc6\x4f\x3d\x2c\xfc\x63\x6f\x7e\x61\x79\x65\x61\x6e\x76\x6d\x61\x3e\x98\x46\x79\x8e\xd4\x17\x54\xb2\x9c\x50\xee\xac\x77\xc2\x62\xe5\x7e\x33\x50\xdb\x99\x8d\xa3\x33\xce\x3b\xb5\xe0\x21\xa8\xcf\xec\x6e\x24\xbb\x1b\xc9\xee\x46\xb2\xbb\x91\xfc\x2b\xda\x48\x76\xcf\x60\xbb\x67\xb0\x87\xc0\x19\xec\x9c\x17\x6d\xaf\xcf\x1c\xf7\xe7\x2f\x4e\x9f\x41\xc8\x04\xb6\x75\x05\x3b\xb2\xa3\x79\x3d\xbb\xbc\xa8\x02\x49\xfc\x3f\xbb\x3c\xbc\xdd\xfa\x5d\x0a\x23\x50\x0d\xe6\x46\xa0\x40\x72\x0f\x6d\x09\x94\x6e\xa3\xbd\xf2\x86\xe7\xf0\xd2\x3c\x4a\x5a\xbd\x98\x67\x4b\xf0\x93\x99\x67\xb6\x3c\x90\x69\x52\xd7\x10\xd9\xba\xeb\xa3\xfb\xc2\x65\xec\x19\x8c\x49\x75\xa9\x95\x42\x3a\x75\x1a\xae\x9a\x5f\x6e\xcc\x0b\x00\xc8\xc8\x4e\x73\x82\x01\x48\xd7\xa0\xba\x62\x80\x13\x4c\x54\xcc\xeb\x98\xe4\x71\x3c\x39\x35\x26\xb4\xe2\x66\x22\x5f\x7b\x54\x78\x58\x91\xaf\x81\x52\x15\xf7\x32\x42\xaa\x51\xdf\xd6\x31\x00\xf6\xeb\xaf\xaf\x30\xa7\xe5\xfe\x4b\x2a\xe1\xbf\x78\xf6\x15\xed\x43\xa0\x6b\xc3\xcb\x0c\x70\xae\x72\xc4\xe8\x57\x94\x75\x45\xaa\x22\xfa\xa9\x3e\x40\x3f\xa3\x0a\x4a\xbb\xfc\x9e\x9e\x88\xfb\xa8\xea\x95\xbf\x49\x09\x87\x59\xa6\x32\x10\x4d\xfa\xa5\x8a\x3a\xb5\xc0\xdb\xa3\x42\x74\xa0\x74\x64\xf3\x55\x9f\x80\x4c\x17\xfa\xb4\xe1\xdd\x30\x9b\x86\x9c\xe7\xf5\x22\x0e\xe6\x4e\x2c\xa2\x6e\xd6\xe4\xa5\x0e\xfa\x39\xb9\x74\xe5\xb8\xfb\x6f\xf2\xc2\xe7\x78\xf0\xa7\x3b\xae\x79\xaf\x0b\x6c\x2e\x0d\x28\x2d\xc0\x27\x0e\x23\xa9\x7b\x52\x8f\x7b\xa0\x03\x89\xa2\x0a\x2f\x54\x83\x4d\x5e\xd4\xdb\x55\x32\xe6\x57\x69\x0f\xaa\x06\x5d\xbc\xdc\x10\xb1\x90\x3f\xf1\x5f\x13\xbf\x8f\xaa\x62\x37\x4b\xcf\xf6\xc7\xcd\x86\xdf\xf4\xd8\x5e\x98\x63\xfe\x87\xbc\xf0\x1d\x1e\xaa\xfb\x6a\x4a\x40\x46\x8a\x54\x78\xf0\xf2\xe0\x10\x2e\x80\xcd\x35\xea\x50\x1c\x96\x9a\xaf\x76\x1e\x27\x47\xf2\x4e\x3d\x61\x65\x9b\x6a\xc1\xf1\xd4\x8d\x5f\xaa\xe2\xb4\x96\x5b\x35\x21\x92\x98\x8c\x0e\xd2\x7c\x9b\x76\x80\xd7\x6a\x9d\xc7\xc2\x0d\x46\x69\x90\x26\xf1\x83\xe1\xa9\x33\xa4\x47\xc0\x80\xc9\xbf\x55\xa2\xb3\xae\x05\xad\x95\xe3\x69\x3a\x39\xf0\xa0\x25\x09\x26\x9b\x69\x2a\x17\x93\xf9\xca\x1a\xb3\x16\xba\x7f\x3c\xbc\xc1\x0a\xd8\x31\x50\xa9\x91\x23\x4f\x1c\x01\xa2\x08\xe6\x1d\x35\xa8\xcd\x18\x64\x02\x43\x64\x90\xff\xc4\xf0\x16\xf3\xab\x3c\x75\x5a\x2d\x91\x03\xfb\x28\x64\x0e\x43\xb9\x43\xd6\xeb\xc8\xf1\xfe\xed\x09\xe6\xe7\x45\x9a\xf1\x96\xa0\xc3\xcd\x6d\x3c\x6f\xfb\xbf\x3c\x11\xbe\x75\x42\x2e\xaa\x36\xcf\xdb\xf6\x1e\x23\x02\x7a\x5a\x49\xe3\xaa\x7d\x08\x2a\x09\x23\x24\x86\x97\x07\x1b\xb4\xbf\xc3\xfe\xa2\x08\xd0\xe9\x5c\x03\x30\x71\xb2\x4c\x0c\xd7\xea\x99\xa8\x41\x0b\x78\x05\x17\x3a\xa4\xd3\xa1\x60\x20\x6d\x59\xdc\xd3\xe3\x31\x6c\x77\x40\xf2\x1a\xe5\xc8\x24\x55\xe8\x7d\x31\xca\x29\x04\xd6\x38\x7f\x92\x80\xc7\xdd\x36\xd7\xa9\x5c\xea\x04\x55\x6f\x43\x88\x71\x9a\xd9\x39\x9c\x4e\x8a\x2f\x8a\x19\xbd\xb7\x6b\x8a\x71\xb5\xa7\x6b\x26\x5e\x0c\xdd\x22\x0a\xe4\xd5\x81\xde\x75\xb2\xc8\xc0\x9f\x82\xc9\x67\x8e\xf2\x00\xba\x73\x26\x3a\x3c\xb2\xd8\x43\xc5\x86\x48\x10\x05\x23\x68\x65\xbc\xd1\x93\x93\xcb\x99\xf4\x2f\xac\xb0\xfd\x34\x16\xfe\xb7\xbd\xf0\x9b\x9e\x1a\x98\x81\x25\x6c\x1d\xe1\xb7\x5d\xc4\xa5\x67\x47\x2d\xe3\x8b\x5e\xc5\x1b\x47\x83\xc3\x9b\x90\xf1\x1a\xc1\xd9\x2a\xe0\x3a\x77\x76\xa0\x0a\x56\xa4\xe2\x24\x99\x25\xa6\x5c\x01\xf0\xdc\x43\x6e\xdc\xf6\x00\x8e\xe4\xd1\x51\x38\x92\xfe\x6f\x5d\x1a\x7e\xca\x1b\x71\x53\x75\x65\x3d\x4d\x9a\x51\xab\x87\x61\xb2\x24\x28\x46\xe2\x5e\x56\x95\x27\xb6\x57\xa4\x1d\x5e\x44\x75\x38\xe1\x76\x78\x02\xe0\x49\x38\x00\xe0\xc9\x42\x8f\x1b\x1a\x0b\xfa\xd6\x4a\xb2\x33\xb5\x31\x4a\xaf\x14\x24\x1a\xac\xf3\x1c\xf9\xc3\x20\xbd\x0b\x48\xdb\x2c\xd6\x10\xd7\x2f\x7f\xee\xe0\xae\x85\x63\x37\x6f\xeb\x3b\x77\x54\xfd\x8a\x7d\x54\xfd\xbd\x8b\x3e\xaa\xbe\xc2\x1b\x09\x38\x4a\x9d\xfb\x1d\x39\xb8\x62\x16\xd7\xb7\xbc\xf3\xa2\x7d\x1c\x29\x94\x20\x16\xe6\x17\xbc\x7c\x64\x54\x8e\x52\x6a\x28\xb5\xab\xa7\x73\xbb\x8c\x18\x7a\xb0\xd9\x78\x6a\xec\xe5\x26\x85\xec\xb9\x1e\xbb\xfd\x7e\xe9\x08\x62\xe6\xcc\x35\x5e\x7d\x61\x21\x44\x0c\xc7\x8f\xb0\xba\xe0\x42\xe8\x58\x35\x7a\x6c\x30\x1e\x89\x76\x10\x85\xf6\x28\x63\x5f\xf4\xd8\xf7\x0e\x09\x73\xba\x6d\x6d\x6d\xf9\x36\xc1\x1b\x22\xf3\xff\xbb\x17\xde\x62\x7e\x3a\x7c\x81\xc4\x77\xde\xc6\x3b\x18\x07\x01\xe6\xb4\x28\x09\xe4\x2b\x52\xa3\x5f\x17\xb9\x3e\x85\x0e\x31\x67\x3f\x91\x5d\x45\x47\xbc\x30\xfc\x77\xa0\xd3\x61\x61\xa8\x67\xc8\x1b\xf6\xd2\xac\x2a\x43\xc3\x95\xe1\x23\x06\x1e\x36\x85\xd3\xd3\xaf\xab\xb0\xff\x34\xa4\x6d\x27\xa2\x4e\x54\x40\xf6\x14\x84\x98\xfd\xa3\x17\x2e\xb9\x97\xf4\xfe\xc2\x83\x4e\x94\xcc\x74\xf8\xd9\xa0\x97\x4b\xad\x12\x80\x07\xe9\xb4\x66\x4e\xb8\x5c\x45\x84\xa4\x09\x0c\x40\x6d\xcb\xdb\x07\x4f\xba\x87\xe9\x97\x79\xec\xc5\x1e\xa3\x3b\xfe\xb3\xbc\xf0\x31\xf0\xd5\x01\x78\x6a\x53\x97\x45\x79\xea\x33\x48\xa8\x94\xe9\x22\x12\x48\x4b\x6b\x9c\x47\xee\xd8\x60\xcb\x65\xd1\xec\x1b\x3f\xe0\xa4\x71\x93\xee\xac\x5d\x2e\x73\xab\x8b\xf3\x59\xb4\x41\x91\x78\xbf\xf6\x03\xe1\xd5\xce\x95\x31\xd1\x77\xfa\x39\x77\x63\xff\x3f\x57\xb1\x8f\xef\x65\x97\xf1\xa2\xe0\xf5\xb6\x4a\xac\xf0\xdf\xb3\x37\x7c\xeb\x5e\xf7\x9a\x43\xeb\x1b\xe5\xb2\x3c\x1d\x17\x0e\xe5\x9a\x10\x73\xa9\x31\xc3\xbb\x26\x2b\x28\x38\xac\x68\x2e\xa2\xc2\xe8\x26\x58\x59\x59\x92\x09\x22\x5c\xee\xad\xc7\x51\xde\xbe\x0b\x8a\x3e\x3c\x25\x05\x71\x3b\x6d\x4c\xa9\x68\x70\xcc\x53\xb0\xb1\x0d\xe8\x53\x0d\x01\xff\x58\xc8\x3f\x4a\x8f\x50\xf9\x6e\xf4\x20\x55\x5a\x9f\xbd\x49\xd1\x22\x56\x10\x13\xee\x8e\x8f\x77\x2c\x10\x19\x30\xeb\x70\x88\xfb\x4f\x8a\x28\xb6\x43\xe3\x23\x55\x0f\xc8\x94\x05\x66\xd0\x6e\x96\xd6\x85\x50\x40\x05\x1d\xa9\xa5\x49\x59\x05\x47\x02\xd9\x64\x71\x16\xd5\x96\x69\x7a\x31\x0b\xea\x29\x00\x1b\x20\x57\x46\x54\xb4\x87\xf4\x31\x84\x96\xc1\xc6\x3c\xbe\xaa\x1a\x4a\x66\x60\x20\xe0\x88\x25\x07\xa0\x30\x58\xc2\x7a\x6e\xac\x60\x86\x62\x7f\xd4\x81\x43\x87\xe4\xeb\xc3\x92\x61\x99\x2b\xd2\xa0\xc9\xe3\x9c\xec\xff\x03\xdf\xd5\x78\x31\x67\xa2\x2e\x64\xb1\x18\x9e\xc1\xb1\xcf\xcb\xe1\x13\x8d\x1a\x63\xae\x9d\x34\xea\x74\x7a\x00\x5e\xe3\x9e\x83\xbf\x54\x61\x87\x28\x7f\x80\x4c\xbf\x1f\xaf\x84\x1f\xa8\xcc\x93\xf0\xa0\x13\x96\x05\xff\x49\xdd\x4b\x16\x95\x1c\x0f\x75\x90\x8f\xb4\x99\x88\x2c\x6f\x47\x5d\x34\x83\xe8\xe4\x0e\x8d\xf3\x83\x2f\xd2\x60\x23\xdb\x02\x0c\x32\x66\x28\x35\x51\xf8\xda\x8b\x31\x38\xbe\x6a\x55\xcc\x4e\xc0\xe0\x0d\x44\x35\x02\xcf\x48\xc1\xa3\x12\x0f\x25\x58\xb9\xba\x6d\x6d\x17\xe6\xa8\x39\x22\xac\x76\x0a\xa8\x3b\xe0\x39\x25\x1d\x10\x11\xaf\x12\x42\xf6\x81\xd1\xc5\xd5\xe4\x7e\xdf\x41\x26\x19\xd7\xbd\x46\x78\xff\xec\x25\xec\xb2\x6e\xda\x58\x4c\x9a\xe9\xa9\xe4\x24\x00\x90\xbe\xfa\x92\xf0\xf9\x97\x2c\x36\x5d\x07\x90\xfb\xcc\xf9\x48\x0e\xd3\x11\xf2\x38\x64\x6f\xcd\x87\x21\x37\xa4\x9b\x36\x96\x78\x07\x3f\x71\xe7\xe2\x7c\x35\x10\x45\xbd\x36\x15\x34\x7a\x99\xee\x7f\x2b\x15\x11\x26\x38\xd5\x8c\xe6\x66\xb9\x58\x42\x0a\x07\x8d\xb7\xcb\x73\x3a\x11\x75\x30\x8a\xd1\xca\x5c\x82\xd7\xcd\xe2\xa5\x8a\x1b\x4b\x70\xa9\xc9\x3c\xd7\xa0\x50\xf4\x68\x43\x74\xe3\xb4\xdf\x11\x14\x1d\x89\x1d\x75\x07\xe2\x8d\x61\x2d\x00\x1f\xa7\x5c\x3f\x9e\x07\x38\x78\x73\x04\x3e\x42\x06\x06\x59\x89\xa5\xb4\x21\xca\xe2\x52\x2e\x97\x7c\xa0\x9e\x68\x63\xe9\xa6\x49\x0e\x00\x3e\xe0\xea\xe0\x19\xe4\xdd\x51\x34\x25\x64\xf7\xd1\x91\xc3\x85\xb6\xca\x49\x79\x28\xd7\x84\x8c\x1b\xc0\x91\x2a\xdf\xd4\x37\x65\x2d\xd5\xea\x55\xaf\x37\x07\xe6\x44\x6e\x4f\x19\x9a\xee\x98\xe8\x00\x69\x44\xad\x2c\xdd\xc4\x64\x49\x65\x85\x88\xce\x96\x92\x8e\xc2\x7a\x6e\x36\x48\xd2\x47\xbb\x69\x03\xe8\x98\xc2\x63\xf2\x83\x35\xb0\xec\x8e\x7b\x0e\x4c\x6a\xd6\xc3\x98\x80\x37\xea\x8d\x5e\xd4\x08\x8f\xd1\x79\xed\xb0\xbc\x70\xe7\xe2\xfc\xd4\xd0\xa7\x45\xb7\x2d\x3a\x22\xe3\x71\x78\x2c\x08\x65\x0b\x43\x25\x7b\xac\x0d\x23\x09\xf4\x63\x41\x94\xc4\x51\xa2\xee\xb2\x60\x9b\xff\xa1\x32\x04\xab\x9e\x9b\xf5\x8d\x18\x82\x55\xcc\x90\x02\xc9\x1a\xc2\xb4\x0d\x19\x1b\x5f\x47\x3c\x87\x26\x62\x53\x0b\x85\x28\xb1\xb7\xd8\xa3\xb5\xa3\xd7\xa9\xdc\x72\x90\x3a\x99\x9d\x39\x8f\x73\x4c\x65\x1e\x91\x24\x45\xab\xb8\x6c\x70\xb8\x2c\x15\x5c\xc0\x5d\x0e\x31\x6b\x72\xc1\x7c\x18\x2b\x7e\x22\x6a\x8a\x7a\xbf\x1e\x8b\x93\x00\xb6\x04\x3b\x83\x2e\xb6\xce\x93\x20\x16\x7c\x43\xe8\xb5\x11\x34\xa2\x5c\x6f\x47\x33\x69\x46\xd0\x63\x16\x68\x50\x2d\x98\xcd\x4b\x0d\x78\x14\xf0\x2e\x24\x93\x85\xae\xa1\x79\xbc\xea\x7c\x0c\x5a\xa8\x1e\x4a\x13\xcc\x4d\xc3\x1d\x15\x17\x31\xca\x08\xf0\x67\x73\x95\x9c\xa9\x77\x46\xb3\xce\x0d\xb2\x94\xea\x1b\x28\x08\xc4\x73\x94\x3b\x96\x2c\x60\x63\xe0\x72\x63\xee\xc8\x72\x60\x2a\x74\x79\xc6\x3b\xb2\x04\x8d\xc4\x81\xea\xdb\x0e\x77\xc2\x7f\x99\x60\x0f\x53\x72\x75\x45\x74\x51\x4c\xf8\x7f\x31\x11\xfe\xfe\xc4\x4a\xf9\xb2\x23\xa1\x1d\xc1\xb1\xc9\xa5\x96\x76\x7a\x40\xd2\x9c\xa6\x1d\xaf\x2b\xb2\x28\x6d\x90\x75\x0a\xb7\x6a\xb9\xa6\x09\x77\x1b\xa3\x16\xd2\x1c\xa5\x0e\xd9\x4b\x49\x80\xd1\x5e\xa9\x12\x37\xed\xfd\xae\x61\x39\x4f\x51\xee\x32\xb6\x94\x16\xe2\x58\x30\xdb\x84\xae\x96\x7d\x5f\x17\x79\xde\xec\xc9\x85\x13\x15\x11\x8f\x07\x85\x21\x54\x07\xf2\x89\x73\x71\x4f\x0f\x50\xc5\xa5\x6c\x94\x85\x0e\x3e\x4b\xba\x22\x0c\xbd\x45\xfd\xef\x42\x52\x09\x5d\xd9\x25\xb1\x49\x9b\x8d\x02\x53\xb3\xb6\x11\xa4\x87\x97\x2b\x33\xeb\x25\x60\xf4\xd4\x79\x11\x6a\xf0\x60\xbd\x49\x8d\xde\x31\x2a\xc3\xd7\x4d\x9a\x83\xd6\xe1\xe6\x56\x17\x57\x11\x69\x71\xb6\x0e\xd6\xbe\xb5\xf4\x8c\x48\xcc\x5a\x35\xf6\x60\x67\x06\x3c\x73\x1f\xbb\x9c\x96\xfc\x1c\xef\xf2\x7a\x54\xf4\xfd\xbf\xdc\x1b\x7e\x79\x6f\x79\xbb\x2e\x3d\xe4\xcc\x06\xd2\xb7\x07\xf7\x6c\x9c\x19\xe0\xd1\xd5\xfe\x58\x00\x25\xa1\x28\x17\xc7\xf4\x5f\x57\x45\xeb\x02\x07\x76\x45\x65\xbf\x46\xe1\x41\xa9\x27\xb2\x4c\xd9\xfa\x52\x05\xd5\xf9\x0b\x94\xe4\xba\xa9\xb6\xde\xb6\xa0\x9f\x05\x71\xe6\x95\x00\xdb\xa2\x4e\x47\x34\x22\x5e\x88\xb8\x6f\x2f\x6a\x44\xb4\xa4\x35\x16\x2c\x6a\x84\x2b\x50\x1a\xb2\x74\x23\x92\xca\x9f\x7c\x48\x8a\x49\xec\x0a\x15\x1f\x20\x67\xcb\x3a\x71\xbe\xd0\x4e\x0e\x84\x7e\xfa\x90\x30\xd8\xd8\xb6\x54\x10\x70\x06\x8a\x06\xa0\xec\x07\x79\x8f\xd0\x18\x47\x36\xb8\xc6\xd8\x6c\x0c\x87\x86\x22\xda\x10\x71\xbf\x6a\x17\x4e\xad\xd4\x12\x4a\x27\x9d\xe2\x9a\x82\x60\x83\x20\xcd\x70\x41\xa1\x0a\xa9\x53\x6a\x9b\x31\xe8\xe4\xd0\x92\x0c\x3b\x65\x60\xe4\x6c\xa5\x40\xd6\x1e\x92\x4b\x75\x13\xc6\x48\xa5\xc1\x29\x8f\xb0\x6e\xdb\x4c\xf8\x52\x0f\x0c\x99\xec\xa0\x7f\x34\xca\x2a\x9a\xb3\x04\xbe\xbe\x8f\x1d\x2a\xe4\x62\x59\x21\x1b\xa7\xff\x85\x7d\xe1\x5f\xec\x5d\xb3\x2f\x8d\x16\x7e\x89\x10\x0d\x44\x7f\x98\x54\x50\xa7\x80\x3e\x27\xd7\x3d\x94\xaa\xf2\x8f\xd4\xe1\x4e\x2d\x11\x30\xac\xa5\x10\xbf\x25\xa4\x9c\xe2\x59\x1f\x18\xd0\x44\x52\xd0\x99\xbc\x36\x44\xf5\x93\x5f\x57\xc5\x26\xe7\xa1\xf5\x8d\xd0\xf9\x48\xa4\x49\x3d\x4f\xd8\x5a\x1e\x4a\xb6\x21\x7a\x9b\xfc\xd0\xb1\xa1\xda\x4c\xee\x48\x9f\x1a\x56\x32\x3c\x16\x3c\x9d\x05\x41\x78\x13\xef\x35\x22\x91\xd4\xc5\x2d\x74\x25\x08\x42\x78\x22\x3c\x16\xdc\x04\x7f\xdc\x52\xc5\xab\xe2\x6c\x37\x42\xcd\x5c\x63\xe9\xcb\x67\xcc\x65\xa4\x2d\x90\xd7\x65\x83\x57\x8e\xcf\x5d\x73\xcd\x35\x37\xc2\xdb\xcf\x90\xff\xa9\xd5\x6a\xec\x19\x7a\x2b\xa0\xaf\xca\x27\x01\x63\xc2\x1e\x54\x2b\xea\xce\x38\x90\x00\xbb\xa4\x40\xa8\x7d\x80\x31\x04\x29\xaa\xb2\x93\x14\x58\x72\xb0\x96\xea\xf0\x16\xd4\x89\xf0\x39\x0e\x5b\x0f\xd4\xb5\x5f\x0d\x06\xb7\xd0\x12\xaa\x24\xd1\x4a\x0c\x19\x2d\x7b\xcb\x7c\xe0\xf7\x03\x63\x8e\xba\x91\x5d\xcf\x1e\x35\xd2\x1c\x55\x36\x34\xd9\xbd\x79\xce\xbb\x72\x24\xfc\xe0\x41\x7f\x3f\x2f\xd2\x4e\x54\x67\xec\x97\xf6\xb1\x87\x6f\x0c\xea\x73\xb9\xbf\xb5\x2f\xfc\xf6\xde\x21\x9a\x9e\x09\xdd\xdd\x94\xb2\x16\xfc\xe7\x69\x53\x0b\xd7\x11\x87\x45\x75\x54\x2f\xad\xff\xa6\x31\xd8\xe9\x21\x8d\x72\x47\xfb\x54\x3e\x35\x32\x92\xa1\xfd\xd0\x52\xa5\xd5\x0a\x73\xad\x67\x20\x2a\x95\xc5\x0a\x8f\x42\x96\x66\x29\x15\x37\x2c\xac\xc7\xe3\x60\xf9\xae\x99\xe5\xbb\xe6\x82\x8e\x90\xba\x4e\x94\x77\xb0\x8e\x16\x62\x81\xac\x91\x51\x7f\x69\x9f\x01\x09\xd2\x10\x55\xdd\x72\x4c\x31\xc5\x7a\xd1\xc1\x80\xfc\x9b\x2a\x65\x15\x8c\xfa\xca\x46\x64\x6b\xff\x16\xbc\x4b\xac\xfa\x1a\x1a\x1c\x59\xf9\x76\xfa\x06\xa8\x35\x18\xd4\x55\x0b\x66\x55\x0f\xb7\xd1\xc5\xb6\x2e\x02\xbe\x29\xab\xa2\xbc\x77\xb6\x09\x8f\x0e\x02\xad\x94\xb6\xfd\x96\x28\x08\x2c\x75\x50\x36\x81\x3c\x24\x75\x59\x69\x50\xc7\x15\xc0\xf7\xa0\xf9\xbd\xe4\xba\xd4\xbd\x93\x0b\x31\x24\xdb\x6e\x5a\xca\xab\x56\x54\xb4\x7b\xeb\x3a\xf1\x4e\x9f\x6c\xa6\x01\xc8\x75\x9a\xfa\xb5\xd6\x2e\x3a\xb1\x69\xa6\x5c\xb1\xb6\x9a\xaf\x6a\x84\xba\x5e\x8e\x7a\x3f\xb1\xd6\xc8\xeb\xf8\x93\x10\xca\x86\x00\x7a\x8f\xd5\xcc\x47\xc6\x53\x7c\xde\x63\xb7\x9a\x75\x28\xce\x16\x02\xd1\xb8\xa6\x75\x70\x80\xf1\x64\x39\x0f\xe8\xa5\x7a\xfb\xea\xa9\x25\xff\xe5\x5e\xf8\x12\x8f\x90\x3e\x0c\x92\xb7\x0e\x5f\x87\x3b\x26\xca\x3d\x17\x04\x73\x2a\x87\x57\x47\x13\x1d\x0b\xe4\xbe\x59\x0d\xa2\xa4\xb8\xee\xda\x6a\xd0\x8c\x53\x0e\x7f\x60\x4d\xab\xc1\x93\x9e\xac\x8d\xa6\x4f\x7f\x46\x35\xe8\xf0\xee\x93\xf0\x96\x7d\x1d\x01\x9d\xa3\xb8\xc6\x5e\xb7\x97\x1d\xa7\x96\xc9\xe1\x9a\xe6\xad\x56\x26\x5a\xbc\x48\xdd\x16\x95\x51\xd1\x66\x97\x95\x74\xd3\xc8\xdf\xfe\x67\xf6\x84\x4b\x43\xae\x97\x72\xb9\x73\x00\xae\x43\xa7\xb2\x79\x5a\xca\x7c\x0e\xd6\x98\xa8\x0e\x11\x5a\xa0\xb2\x6f\x79\x00\xea\xbe\xe5\x91\xb3\xc9\x31\x8a\x7f\x75\x82\x5d\x4f\xa0\xef\x33\x61\x08\xe0\xb1\x24\x33\x0a\x0d\x00\x0f\x87\x03\xac\x86\x63\x23\x7b\x85\xc7\x7c\xa0\x8b\xd1\x94\x08\x40\xac\xf3\xf4\x0b\xa7\x8c\x79\xec\x09\x9e\x17\xc8\x19\xe3\x7c\xd6\x62\x5d\x50\x01\x66\x72\x1a\x93\x05\xb8\x48\x03\x9e\x10\xee\xd2\xe3\x0d\x55\xcc\xc9\xf0\xb1\xb7\xb9\x44\x31\x74\x47\xa9\x41\x72\xd1\x91\x11\x92\xd6\x63\x0c\x9f\xd7\xdf\x72\x5a\xfb\x14\xcd\x2d\xb1\x16\x3e\xee\xce\x24\xba\x47\x9e\x27\xd2\x44\x4c\x6f\xa6\x59\xa3\x6a\xfc\xc6\x65\xc2\x08\xdd\x88\xc9\x7c\x6c\xf9\x4f\xd4\xce\xc0\xe5\x70\x6e\xd5\xf1\xdf\xb9\x40\x90\x66\x30\x82\x39\xdc\x89\xd7\xe0\x6c\x73\x1c\xcd\x7e\x77\x26\x67\x92\x74\xb3\x04\x51\xbc\xdf\xe1\x75\x54\x0e\x99\xe3\xb1\x38\x6b\x4b\x54\xff\x37\xf6\x87\x6d\x73\xd1\x45\xca\x6f\x89\x44\x64\x51\x5d\x6d\x51\x96\x9f\x1e\x0e\xf9\xe6\xe4\x20\x1a\x33\xda\x41\xd0\x23\xc3\x5b\x20\xce\x8a\x3a\x85\x61\x74\xe3\x5e\x2b\x4a\x6a\x5b\xde\x3e\x94\x4e\xce\x7c\xfc\xc8\x3e\x76\x8a\xd1\x0d\x7f\x21\xbc\x61\x5e\x5b\xf5\x8a\x12\x3e\x33\x89\xb6\x22\x05\x60\x6f\xec\xed\x28\x57\x52\xd7\x6e\xfd\x47\x3c\xb6\xaf\x09\xc0\xc8\xfe\xfb\xbc\xf0\xe7\xbc\xe3\x51\x2c\xf2\x7e\x5e\x88\x0e\x4e\x72\xe5\xb3\xb0\xe0\x8b\x00\x32\xc7\x7e\xc6\xc4\x21\xd2\xee\xd9\x06\xdd\x0a\xed\xaf\x49\x2b\xc0\x67\x6b\xc1\xc2\xd9\x9a\xd4\xfe\x8a\x6b\xc3\x6a\x10\x9e\x6d\xe6\xf2\x9f\xa4\x68\xe6\xa1\xbb\x87\x5b\xc5\x37\x00\x28\x0c\x02\x2e\xad\xae\xc7\x74\x10\xa7\x19\xa7\xd8\x7e\xc2\x15\xf2\xe7\xc3\x49\x83\xd1\xb4\x70\xb6\xc8\x8c\x41\x45\x41\x0f\x45\x10\x25\x33\x16\x57\xfe\xd9\x1e\x3b\x20\xd7\xc6\xa9\x24\xee\xfb\x1b\x61\xdb\x14\x39\x5f\x36\x4b\x04\x87\xe5\x83\x33\x80\x8a\x3f\x55\x0b\x56\xe8\xad\x00\x70\xb4\x40\xaf\x47\xb0\x4a\x08\xe3\x50\x37\x73\x51\x14\x44\x1f\x8f\xad\x02\x7b\x68\xee\x9e\x5b\x7e\xaf\xc2\x0e\xe6\xa2\x9e\x21\x38\xf8\x47\x2b\x63\x14\x37\xed\x47\x94\x3b\x5d\x19\x21\xfc\x45\x15\x53\xfd\x55\x55\x1e\x5a\x83\x2d\xa8\x70\x58\x4d\x70\x57\x45\x3f\x58\x01\x5b\xb9\x80\x75\xb9\xe1\xee\xd4\x45\x4a\x67\x96\x54\xc5\xfe\xb6\xa2\x84\xc6\x47\xc3\x60\xe3\x4e\x49\xaa\x58\x13\x50\xc5\x9c\xaf\xd8\xee\x23\xed\x8b\x1a\x5a\x91\xbc\x94\xa6\x82\x0f\x55\x11\xfd\x0e\xfe\xc6\xad\x8c\xcc\xcd\xc3\x6b\xc5\x7e\xe6\x90\xc3\x8a\x91\x88\x82\xf2\xf1\x65\xff\x2d\x26\xad\x4c\xe4\xb9\xff\xac\x43\xe1\x33\x2b\xf4\x03\xf5\x72\x17\xd2\x3a\xeb\xc5\xca\x30\x02\xb1\xbd\x41\x94\xac\x03\xa9\x41\x3d\x4d\x12\x7c\x2c\x47\x13\x18\x57\x1c\x64\x1a\x56\xde\x31\xdb\x12\xa9\x46\x2d\x98\x4d\x02\xf5\x3d\x05\xa8\x4d\x91\x62\xd8\x94\x96\xec\x7c\x3a\x82\xe5\xda\x6d\x18\xf7\xa7\xe1\x13\x20\xc3\x7b\x59\x9c\x57\x83\x38\xe5\x8d\x60\x9d\xc7\x1c\xc6\x35\xe3\xcd\x66\x54\xaf\x06\x68\x83\x94\x3b\xe3\xea\xea\x89\x6a\x90\xca\xe3\x10\xca\x0c\x94\x3c\x1b\x51\x06\x61\xfe\x0a\x32\x5f\x14\x75\xd7\x45\xfc\x52\xb6\x1b\xfb\xb5\x1b\xfb\xb5\x9b\xa6\x74\xbf\x46\x7b\x7d\x41\x61\x76\xff\x8e\xc7\xae\x1e\x29\xdc\x87\x8a\x28\x88\xeb\x7a\x8d\x67\xc7\x7b\x28\xe8\x1a\xad\x01\xcb\x8b\xf4\xf8\x83\x1d\xc2\xc5\xfe\xd0\x44\x70\xfd\x9e\xc7\xae\x3d\xcf\xc6\x61\xac\xd6\x96\xb7\x3a\x34\x58\xeb\xa1\xd0\xc0\x73\xde\x53\xb6\x8f\x07\x7b\xb4\x7f\xa3\x8e\x07\xb3\x1a\x5a\x4a\x58\xa2\x16\x94\x53\xaf\xbf\x74\x90\x85\xa3\x61\xa6\x17\x36\x10\x91\xd3\x7f\xf7\xc1\xf0\x33\x9e\xfa\x15\x08\xf9\x07\x62\x15\xd1\x69\x40\x2e\xf0\x24\x6d\x40\xd4\xaf\x62\x74\xa8\x8b\x0c\x80\x0b\xa1\xcc\x88\x8e\xb7\x39\x6f\x8a\x02\x99\x67\x8a\x8c\x47\x49\xe1\x60\x1f\x95\xf0\x6d\x97\xd3\x46\x2d\x08\x66\x95\xe8\x80\x42\xc1\x2c\x80\x27\x7c\xaa\x88\x0a\x35\xa1\x90\xfc\xf5\x7e\xb0\x7c\x6a\x75\x8d\x8c\x05\xb5\x5a\x6d\x06\x00\x6f\x6f\x92\x75\x95\x7b\xd1\x2d\x33\xea\xad\xdc\xdd\x7c\xfe\x7e\x1f\x7b\xa6\xbd\xfe\x8b\x8b\x5d\xfe\xd7\x98\xbf\x47\x41\x81\x81\xa9\x03\xb6\x42\x59\x27\xd1\xd8\x25\x8b\xbc\xe8\xfd\x2f\x63\x87\x30\xbf\x48\x41\xe1\x73\xf6\xd8\x0b\x18\xc7\x79\xbb\x8c\xf0\x7b\x9d\x9f\x4a\xdf\xa4\x2c\xf0\xc6\xee\x9e\x7b\x31\x18\x13\x4f\xd8\x5e\xc4\x5d\xe7\x5f\x3b\x8a\x42\x41\x09\xa5\xa1\x39\x98\x7f\xed\x39\xe0\x55\x36\xbe\xe1\x3c\x17\x9d\x34\x51\x14\x3e\xb9\xff\x51\x2f\x3c\x3e\xe4\x3a\x98\x79\xf3\x00\x23\xb6\xd2\xae\x20\x9d\xba\x01\x4f\xe5\xda\xed\x4a\xa9\x0d\xc0\xe0\xe4\x08\x95\x94\x75\xd8\xe5\xc4\xf5\xa7\xe9\xf0\x9e\xb8\x83\x60\x4d\xb7\x1e\xe1\xa4\xa6\xcf\x03\x98\x81\xa8\xde\xd6\x6e\x1d\x8a\x66\x11\x09\x10\x30\x7e\x63\x82\x3d\xc6\x2e\x4e\x64\x05\x1a\x98\x85\x31\xe3\xcd\x99\x8b\xab\x51\x4b\xbe\x47\xe6\x77\xdc\x09\xfd\xfb\x26\x9c\x36\xfc\xc4\x04\x7b\x8d\xc7\x2e\xb1\x8a\xf2\x5f\xe0\xf9\x7b\xd6\xfb\x85\x08\x4f\x2d\x36\xb5\x78\xde\xe4\x79\xc0\xbb\x72\x51\x28\x34\x5f\x2b\x44\x12\x9d\x4f\x31\xa7\x43\x6a\x94\xe7\x3d\xd1\x08\xac\x32\xe1\x2c\x5b\x73\x66\xc6\x8e\x7c\x01\xdf\x76\x59\xff\xbe\xe1\x85\xb7\x18\x1e\x3f\xc2\x5d\xd7\xe7\x34\xaa\x6a\x95\x76\x10\x55\x5f\x1e\x07\x69\x16\x34\x44\x12\xf1\xd8\xb1\xa0\xae\xb1\x15\xb6\x3c\x7a\xac\xce\xab\x73\x75\xad\xce\x79\x93\xec\x3f\x0c\x36\xac\xc3\xbb\xd3\x67\x44\x3f\x97\x8d\x9b\x06\xab\x07\xdb\x86\x8d\xa9\xc3\xbb\x8c\x7d\x99\x0d\x9d\xe3\x90\x75\xb8\x78\xca\x31\x2f\xbd\x8b\x85\xc7\x86\x5c\x77\xed\x4c\x5d\xed\xc5\x08\xe8\x59\xb2\xe9\x6c\x79\xfb\x5b\xbc\x10\x9b\xbc\xbf\xe5\xed\x43\xe3\xc9\x96\x67\x6c\x07\xce\x94\xf9\xd8\x41\x76\x9a\xb1\x3c\x8f\x17\xd0\x3d\xe4\xaf\x84\xf3\xc7\x63\x0e\x5b\x32\x7a\x8c\x66\x28\xb2\x46\x1e\x15\x03\x92\x2d\x75\x15\x28\x57\xb4\x83\xc7\xe1\xa7\xaa\xc6\x64\xc3\xe3\x5c\x38\xa6\x8b\x67\x79\x8c\x61\xd5\x20\xb9\x37\x0f\x9b\x6b\x4e\x92\x34\x59\xca\x78\x8c\x28\x92\x1a\x6e\x13\xd7\xaa\x6a\x9c\xb2\x32\xd1\x76\x5c\xa6\xe3\xb3\x8c\x5a\xc1\x90\x14\xe3\x79\xa6\x3a\xc5\xbf\x11\x99\xd1\xc1\x30\xa5\xd8\xb7\x48\x7d\x54\xdf\x9a\x5d\x5e\x54\x0d\x73\x4a\xf9\xb4\x67\x1b\x61\x7e\xdd\xbb\x50\x23\xcc\x0b\x3c\x63\x7a\xd1\x76\x97\xbc\x64\x78\x69\xa6\x99\xae\x50\x2f\xa7\x78\x22\x74\x1c\x0d\xb5\xc0\x90\xbd\x04\x75\x34\xb9\x83\xa8\x6d\xaf\x1a\x9c\x48\x5b\x51\x52\x8e\xa4\x6d\xf2\x28\xae\xb1\xb3\x96\x6d\x2b\x0e\x9f\xf2\x20\x5a\xb4\x5e\xe9\xb1\x4b\xc8\xc7\x78\x32\x6d\x08\xff\xd9\x5e\xd8\x5b\xd4\x8e\x77\x83\x9f\x62\x62\x46\x30\x41\x6e\xc3\x89\xcf\x59\x17\x52\x2f\xad\x9f\x59\x36\x66\x55\x29\x23\xd6\xda\x51\x62\x5d\x72\x02\x37\xcb\xf7\xec\x11\x7e\x92\xae\xd3\x72\x9a\xc6\xfe\x89\xf0\x31\x6b\xd6\xbc\xa0\x10\x84\x40\xde\x1b\x32\x03\x41\xd3\x28\xc8\x30\xd4\x48\x3b\xbc\xc4\x80\x77\x8a\xd1\x7a\xf4\x17\xc2\x1b\xd6\x4a\x76\x5a\xd5\x48\x9a\xe7\x00\x9c\xa9\x6d\x3e\x51\xa2\xea\xe0\x14\xf8\x26\x63\xb1\x7d\xb5\x17\xbe\xe0\x3b\x65\xb1\xb5\xe1\xfc\xe5\x1d\xa7\x8e\x4d\x76\x85\xe9\x95\x79\xe8\x14\x7f\x25\x5c\x28\x37\x5f\x75\xf1\xb2\xe9\x41\x7c\xd8\x76\x10\xa8\xee\x50\xae\x69\xfb\x3b\x6f\xd8\xc7\x6e\xde\x19\x9e\x36\x5f\x17\xb1\x82\x22\x25\xaf\x7d\x47\x24\x85\xff\xe5\xbd\xe1\xd9\x59\xc2\x38\x55\x98\x9b\x2a\xae\xb1\xa3\xd8\xc7\xcd\x1d\x8c\x06\x52\xd6\x48\x05\xad\xcf\x83\x33\xa2\xaf\xb8\x52\xa9\x1b\xd5\xc3\xc8\x8c\x4c\xa9\xe1\xa2\x6f\xc1\x96\x6a\x96\x82\x03\xea\x0d\x47\x4c\x7f\x78\x0f\x7b\x65\x85\xed\xc3\x67\xfd\xe7\x57\xc2\xaf\x7b\x8a\x60\x93\x40\x90\x32\x0e\x90\xbb\xb6\x22\x9b\x6b\x03\xaa\xae\x45\x94\x4b\x55\x32\xcd\x82\xa5\xb4\x58\x4c\xaa\x26\x6e\x3f\xa7\x22\x54\xde\x74\x92\x26\xd3\x60\xa5\x1d\x5a\xc6\x02\x42\x00\xa5\x59\x30\x9f\x8a\x7c\x29\x2d\xe0\xc2\x98\xe2\xa8\x28\x44\x67\x80\x3b\x84\xbb\x0a\xd9\xeb\x14\xaa\xcd\x15\xf7\x54\x54\x0f\x3a\x22\x6b\x09\x84\x23\x18\xe7\x29\xdd\xf2\x90\xbf\xe1\x95\x5e\x78\xb5\xec\x50\x95\xa3\xa3\x90\x61\x4d\x70\x99\x1e\x35\x8b\xe0\xc5\xd1\x61\x26\xd9\xf7\x38\x1b\x38\x7c\x7b\x5a\xb1\x61\xf9\x07\xfc\x7d\x50\x27\x76\xce\xfb\x7e\xf6\x1f\x87\x3c\x09\x77\xa5\x5e\x20\x37\xfc\x33\xa2\xcf\xd8\x4b\x3d\xa6\x47\xd3\x7f\x96\x17\x16\xba\x0b\x9d\x7d\xfc\x8c\xe8\x4f\xe6\xc4\x47\x90\x26\x90\x5e\x00\x64\x16\x10\x9c\xd5\xd4\x43\x89\x34\x24\xaa\x08\xb4\x6d\xcb\x21\xa4\x91\xa4\x21\x91\x33\xca\x1e\x13\x67\x85\xfc\x86\x4b\x51\xa6\x76\xa8\xd9\xa7\xf5\x32\x21\x25\x87\x89\x8b\x70\xb4\x91\x97\xed\x09\xef\xd6\xcf\xb8\xbe\xe4\x00\xae\x07\x70\x43\xf9\x56\x29\xe2\x3e\x31\xb2\x04\x10\x31\x22\xf0\x99\x63\x9c\x94\x3a\x5b\xd7\xb6\x3c\x86\x9b\x1d\xe2\x9d\x1c\xcc\xdb\x3c\x03\x25\xc1\xb5\x36\x4f\x7c\x07\x77\xa9\xa7\x30\xab\x8a\xfe\x72\x38\x67\xfb\xd7\x68\xa7\x76\x25\x01\xf6\x89\xda\x2a\x28\x1e\x07\xb8\x5c\xa0\x27\xee\x10\xfd\x12\xc8\xd9\xe5\xe6\x03\x10\x6c\xee\x9f\x0d\xcf\xa8\xaf\x60\xf4\x79\xea\x38\x42\xce\xf7\x73\x76\x44\x5c\xa1\x88\xcf\xb8\x42\x3a\x6a\xd8\xb5\x79\x24\x33\x63\xe0\x7f\x77\xc8\x56\xe5\x8f\x60\xa9\x94\xdf\xf7\xbf\x0f\x0e\xcd\x46\x34\x79\x6b\xfe\xe7\x0f\x86\xa7\xcc\x4f\xd9\xd3\x16\xa4\x83\x95\xa5\x87\xc9\x2e\xe0\x17\x51\xb1\x3e\x26\x5f\x3d\x09\xb8\xa1\xc0\x71\x8f\x6b\x3f\xb5\x4b\x1a\xba\xeb\x80\xd8\x75\x40\xdc\xbf\x0e\x88\x4f\x2a\x07\xc4\x47\xbc\xf3\xca\x52\x05\xef\xc3\xcb\x3d\x27\x27\x17\x83\xca\x60\x3c\x75\x26\xec\x83\x6e\x94\x5f\xd9\xde\x62\x35\xe3\x4f\x0f\x63\xc6\x30\x8d\x1b\xc0\x40\xf5\x6c\x35\xf3\xfc\x82\xb3\x1e\xb2\x71\x59\x9f\x39\xc4\xa6\xac\x21\x1e\x12\x51\x66\x39\x9a\xfc\xb7\x1c\x0a\x1f\x6d\xfd\x2e\xd9\xcc\x95\xe3\x1a\x43\x20\xc1\xa2\x94\xb7\xf1\x70\x2b\x8c\x4a\x62\x31\x49\xbe\xbf\xc2\xf6\x93\xe7\xdb\xff\x85\xca\x18\xbe\xda\xd1\xd5\xba\x15\x5f\x0f\xbf\xe9\xcd\xea\xcd\x8e\x8a\x44\x36\xee\x98\xb6\x6b\xa9\xa2\x20\xe6\x8c\x92\x31\x88\xaa\x99\x4c\xaa\xec\x70\x80\x4c\x01\x2e\xb4\x59\x9b\xa6\x3b\x6d\x06\x93\x54\xe2\xa4\x54\x7c\x27\xc1\xe9\x3f\xa9\x35\x5c\x2b\x6a\xc1\x8d\xf8\xd3\xb8\x3b\x52\xa9\x83\xe8\x00\x58\x1a\x29\x6f\x90\x5b\x3e\xb3\xad\x6f\x60\xdd\x82\xfa\xcb\x5a\xc6\xa9\xd4\x85\x8b\x94\x4a\xef\x07\xbc\x84\x96\x59\x63\xef\xde\xcb\xae\x88\xb0\x13\x00\xe2\x0e\x36\xee\x37\xec\x0d\x5f\xb1\x77\xb1\x74\x75\x58\x6c\x90\xfd\x8c\x4e\x5b\x32\x30\x3e\xf2\x8c\x66\x9d\x72\x9d\xa7\x4d\x9c\x6e\xe4\xe6\x58\x83\xb6\xa5\x03\x36\xc9\x9a\x67\x4a\x34\x8a\xbf\xf2\x53\x6a\xa8\xdf\xd3\x2e\x61\x02\x35\xab\x06\x88\x81\xa7\x03\xd8\x08\xc8\xd0\x71\x3c\xcd\x60\x7c\x37\x79\xd6\xc0\xe4\x65\x5e\x44\x40\xb9\xde\xaf\xaa\xa0\x68\x5e\x58\xaf\x50\x72\x21\xf0\x4d\xa8\x21\x6b\x45\x1b\x22\x19\xa0\x68\xb1\x33\xc6\xd6\x5c\xd3\xa8\xdc\xcc\x44\x27\x2a\x02\x1e\x6c\xf2\x0c\xec\xc9\x51\xd3\x4a\xa8\xc0\x33\x9f\xfe\x24\xc0\x15\x95\xc0\x8a\xe4\x99\x4c\x75\x0d\x71\x63\x2a\x5d\x64\x76\x79\x51\xa9\x1a\x94\xbc\x46\xfd\x4d\x49\x25\x40\xac\xa2\x60\x6a\xcd\x6c\x33\xa1\x22\x38\x2e\x7a\x6b\x26\xdf\x87\xc1\x7c\xa5\x39\xa3\x42\x9e\x4b\xd1\xe9\x39\x44\xeb\xaa\xc5\x83\x9b\xbf\x0e\x16\xa3\xfe\x18\x16\xa1\x5b\x45\x03\x96\xd2\x81\xdc\x39\x92\xd6\x7b\xba\xa5\xce\x51\xe4\x0b\x1e\xdb\x0b\x2b\xc8\xff\x94\x17\xde\xeb\xcd\x6a\x58\x05\x38\x30\x60\x40\x8d\xe6\x34\x56\xe7\x7e\xd7\xc3\xbb\xd8\xb4\x19\x55\xaa\x88\x81\x86\x54\x94\xb0\x92\xe1\x30\x1e\xc7\x2a\xea\x05\x67\x80\x39\x7e\x94\xc4\x84\x73\xca\x7c\x34\xbb\x71\x74\x6c\xe6\x18\x31\xb4\xd2\x8b\x05\x7b\xde\x04\x9b\x28\xe2\xdc\xff\x3f\x95\xf0\xdd\x95\xb5\x13\xab\x2e\xc8\x53\x2d\x98\x43\xbf\x75\xdc\x77\xa4\xa5\x9d\x61\x98\x1b\x44\x41\xf9\xbe\xbc\x54\x0d\xae\xbd\xf6\x1a\x68\x73\x47\xca\x86\x2e\x04\x69\x76\xd6\xa5\x8a\xa5\xe6\x0f\xf4\xa0\x12\x14\x66\xda\xc9\x0e\x05\x10\x2d\xd1\xd7\xc9\xb2\xaa\x8c\xb3\x06\xec\x09\xce\x04\x10\xf7\xcc\xeb\x75\xc0\x1c\x68\xa9\x9e\x92\x25\x80\xd0\xb0\x12\xfb\xdb\x59\xda\x6b\xa1\xc9\x6b\x75\x69\x11\xaa\xa9\xbb\xa5\xaa\x56\x45\xa4\x42\x98\xcc\x12\x6a\xf6\xe2\x66\x14\xc7\x26\xad\x18\x9f\xd0\x0d\x5f\x5d\x5a\x74\x46\xe2\x18\xbb\x81\x5d\x77\x01\x23\xb1\x76\x62\x95\xfd\x75\x85\xd9\x0f\x37\xe3\x74\x93\x6a\xa2\x9f\x3e\x1e\xa7\x9b\xab\xa0\xc4\x93\x47\xe5\x83\x95\xf0\x96\xf2\x45\x7b\x9b\x1e\x1a\x78\xc0\x03\xf3\x8a\xbb\xbb\xfd\x95\xc7\xbe\xee\xba\x3d\xbe\xe4\x85\x8f\x3e\x6d\x7e\x9f\x1e\x44\xe7\x74\x8a\x87\xe1\x75\x4a\x37\x7d\x33\xcf\x6e\x1d\xed\x31\xdd\xa6\xb9\x96\x8f\x63\x5b\xcf\xc5\xce\xdd\x20\xec\x17\x0e\x38\x51\x0a\xd9\x3a\xaf\x4b\xb5\x87\xf8\x50\x57\xd2\x18\xc9\x0b\x9f\x7f\x20\x9c\x29\x5d\x1b\x16\x57\x67\x3d\x92\x0f\x25\x27\xfc\xd4\xbe\xdd\x43\xcc\x45\x1c\x62\x9a\xd6\x19\xe6\x89\x17\x47\x4d\xf8\x3d\x63\xce\x2f\xbb\x27\xf5\x8b\x3d\xa9\xb7\x14\x65\xe0\x53\xc2\xef\x5b\x94\x7f\xb8\x52\xc3\x5e\x27\xb6\x80\x38\xc2\x6a\xac\x3a\x52\x40\x0c\x59\x9c\xe7\xbc\x64\xfb\x13\xd3\x1d\xfe\xa2\x3e\x31\x41\x19\xbc\x57\x00\x66\x21\xee\x6f\xa5\x70\xa6\xd2\x32\x2f\x9f\xa6\x5e\x78\x99\x83\xfd\xc4\xbb\xdd\x1c\x83\x3a\x54\x5a\x2c\x1c\x38\xbe\x7a\x28\x5c\x72\x2f\x8d\x44\x35\xb2\x43\xdf\x08\xc4\x4c\x67\xcb\x9b\x22\x6a\x5b\x9e\xe6\x39\xdb\xf2\x34\xb7\xa0\x23\x5c\xbe\x71\x29\x7b\xe3\x04\xfb\xee\x6e\x96\xc2\x8e\x32\x2f\x78\x23\x8e\x12\xa1\xf8\xf3\x9e\xa3\x79\x80\xbf\x51\x91\x7a\x62\x87\x9f\x05\x0e\x3d\x48\xca\x88\x12\x87\x41\x8f\xdb\x79\xbe\x8a\x7d\x5f\x15\xac\xf0\x62\x30\x85\xca\x62\x04\xc0\x7c\xab\x26\x8f\xac\xcc\x56\x5d\x4a\x59\xe1\x96\xbf\xa3\xa4\x27\x88\xbf\xb8\x2e\x0b\xc6\x57\xad\xb7\x88\xac\xdd\xca\x17\x01\xc7\x15\x0f\x96\x4b\x8d\x5c\x38\x5b\x17\xa2\x01\x10\xd1\x90\xa3\xa1\x91\x7a\x7a\x70\x74\xd4\xbe\x59\xab\x42\x04\xd6\x16\x2c\xa5\x05\xe5\x3a\xe8\x06\xda\x99\xf0\x22\x2f\xa2\x0e\x68\xfb\x64\xf5\x87\x14\x1a\xd9\x67\x4e\x27\x45\x39\x66\x4c\x97\x98\xf2\xae\x3b\x72\x24\x77\xd9\x9a\x5e\xe9\x59\xa4\x7e\xcf\xd5\xb8\xe6\xdd\xa5\x01\x0e\x3f\x24\x4b\xbf\x08\xfe\x3e\x87\x05\xb9\xcc\xe0\xe7\x54\xea\xc3\x1e\x7b\x78\x26\xd0\xbf\x78\x5b\x94\x17\x69\xd6\x07\x83\x82\x7f\x9f\xae\xe0\xab\x00\x78\xdd\x30\x0d\xa6\x71\x43\x31\xe6\xad\x8a\x82\xc2\xc5\x91\x4f\x58\x9d\x1b\xe5\x70\x4b\x6d\xf5\x01\x6b\xc4\x11\xb7\x15\xff\xe0\x59\x64\x80\x7f\xe1\x5d\x50\x74\x96\xe3\x6b\x0b\xdf\xe2\x9d\x70\x9d\x6b\x8a\xdc\xa2\x86\xfe\x0b\x39\x1f\xec\x4e\xd8\x6c\xa7\xb9\x40\x78\x79\xb0\x7e\xc0\x6b\x16\x73\x9f\x9a\x96\xe0\x9f\x02\x24\x38\xa0\x90\xb6\x9e\x70\x20\x76\x1c\x5e\x40\x1d\xde\x47\x6b\x1f\xb2\x8e\xd6\x45\x9c\xd7\xd8\xfb\x64\xc3\x95\xe3\xe7\xed\x1e\xbb\x66\x34\xe2\xe0\xa0\xbc\xa2\xf7\xc2\xe3\x6b\xe5\xf5\x81\x37\x54\x22\x0e\x0c\x30\xc6\xd9\x08\xd5\x76\x43\x30\x90\x88\x4d\x68\x51\xed\x9c\x57\x1d\xef\x98\x3a\xe4\x5f\x82\x13\xe5\x0e\xd1\xcf\x19\xbb\xc7\x62\x4a\x15\x17\x40\x94\x7a\x54\xf3\xa4\x0e\x44\x42\xe6\x43\x59\x51\x6b\xbb\xec\xa2\xb0\x5a\x1e\xcb\xf6\xa1\xc4\xf2\xaf\x0b\x0f\x2f\x0e\xc2\x65\x0c\x15\x6e\x8e\xa7\xe9\x0f\x18\x9b\x76\xd1\x2c\x6d\x74\x80\x52\x16\xf6\x46\x24\x36\xfd\xfb\x58\xf8\x56\xcf\xba\x10\xf0\x42\x0e\x3e\x36\xcc\x7a\x5f\x0a\x57\x4c\x59\x07\xcf\x22\xe4\xbd\x81\x6d\x0e\xc5\xf5\xb1\xc0\x2e\x43\x6b\xae\x64\x40\xa8\x13\x84\x1d\x9e\x57\x37\xc5\x7a\x3b\x4d\xcf\xa8\x0c\x78\xf3\x0d\xb9\x92\x31\xb9\x86\xf6\x06\x4c\xf0\xd4\x46\xd1\x2d\x0f\x6c\xcb\xce\x16\xfb\xcf\xfb\xd9\xcf\x9a\xc8\xf0\xd7\x7b\x6c\x76\x1c\xb6\xe7\x36\xbd\x41\x61\xe2\x4f\x30\x51\xe2\xf2\xa4\x89\x7b\x15\x55\x9e\x90\xb5\x21\x7d\x7b\x68\xa0\x89\x8a\x90\x53\x5c\x4f\x56\x17\xee\x86\xfc\xee\x7a\x9c\xbe\x93\x87\xb5\x3b\xac\xc3\xda\x63\x2e\xd2\xdf\xc4\xee\x55\x9e\x9e\xa7\xb3\xc7\x5c\xcc\x92\x93\x3b\xc6\x0d\xa0\x8f\xb7\xd3\xb8\x91\x8f\x00\xce\x55\xab\x4a\x45\xd0\xf3\xb8\x27\xfb\xf5\x9c\x77\x66\xfb\xd3\xc6\x6d\xfe\x71\x1b\x44\xd7\xae\x50\xe9\xa4\x61\xd5\x6b\x68\x8c\xf1\x9b\xf7\x0e\x75\x5a\x2f\xa7\x99\x0a\xa7\x7d\xd6\xde\x2d\x6f\x4f\x37\xcd\x8a\x2d\xef\x40\x37\x4b\x8b\xb4\x9e\xc6\x8e\xbc\xfa\xca\x1e\xf6\xc5\x0a\xdb\x2b\xb2\x2c\xcd\xfc\xdf\xa9\x84\xbf\x56\x59\x90\x7f\xc2\x61\x04\x20\x42\xd2\xac\xa1\xc2\xc0\xd6\x63\xd1\x31\x71\x61\x0a\x2b\x06\x2c\x6c\x08\x92\x27\xfb\x49\x9d\x4c\xa0\xc4\x20\x6f\x73\xd0\xe4\x3b\xdd\xb8\x6f\x81\xf5\x68\x58\x16\x30\x8b\x1e\x0b\xa6\x83\xf5\x5e\x14\x17\xd3\x51\x42\xef\x51\xc8\x0d\xbe\x6e\xfb\x22\x50\x14\xa3\x24\x54\x30\x10\x52\xa3\xc2\x27\x7b\xb9\x60\x81\x95\x8f\x0d\xe1\x06\x6c\x3a\xa8\xc7\x69\xaf\xa1\x82\x07\x33\x83\x55\xe9\x7c\x0c\x34\x29\xb0\x70\x23\x5d\x04\xc5\x24\x38\x55\x67\x81\x6a\x66\x33\x4d\x6b\x84\x6b\x56\xab\xa7\x9d\x19\xb3\x9a\xec\x29\x1e\x31\xe8\x7d\x9f\x2b\xdd\xe1\x84\x1c\x1c\x4d\x3b\x00\xdc\x14\x5a\x87\x18\xe8\xd5\xb4\xa9\xe0\xe6\xf4\x06\x80\x23\x22\x1a\x10\x06\xe2\xec\xd6\xaf\xf6\x98\x1e\x62\xff\x85\x5e\xf8\x5f\x97\xe9\x87\x21\x39\xa0\xdf\x17\xf2\x29\xa4\x92\xd1\x71\x75\x3a\x24\x4a\x1c\x0b\xc2\xb5\xb9\xe5\xb0\x1a\x84\x77\xce\xc3\x3f\xab\x73\x6b\xcb\xa1\xdd\x07\xcf\xdb\xbf\x43\x36\x22\x9c\xb3\x73\x52\x8f\xf0\x7f\x77\x5f\x78\xd6\xfa\x6d\x58\xef\x46\xc0\x69\xf0\x24\x90\x4b\x80\x76\x4a\x79\x4c\xec\x65\xa2\x4a\x44\x31\x80\x4e\xc6\x73\x41\x88\xb1\xda\xde\x0c\xc3\x9f\x13\xb6\x31\x04\x99\x48\x2d\xc9\xb5\x77\x7e\x6c\x2f\x7b\xcf\x04\xdb\x0b\xee\x02\xff\xed\x13\xe1\xeb\x27\x0c\xdb\x45\x89\x24\x02\x67\x4c\x9b\xe7\x98\x2a\xd4\xc0\x89\x0a\x1f\xa9\x4a\x45\x4b\x4e\x2b\xd0\x3a\xa4\x80\x06\x9f\x68\x2e\xb2\x88\xc7\xca\xce\x10\x9c\x04\x41\x0f\xd4\x36\x41\x23\xc5\x28\xa3\x6e\x9a\x17\xcd\xe8\x6c\xa0\x7d\x30\x52\x19\x4c\x90\xdc\x84\x17\x28\x8d\xe5\xe6\x37\x9b\x65\xbc\x8f\x6d\x91\x27\xa3\xe9\x28\x69\x88\xb3\x52\xd2\x07\xc7\x65\x5d\x71\x17\xe1\xdd\xae\xe0\x99\x93\x66\x4c\x51\x29\x56\xc0\x1d\x54\x3e\x0f\x1a\x78\xf2\x6e\xe2\xdb\x6d\xbe\x01\xd8\xa3\x6e\xd7\xd5\x02\x95\x79\x5d\x63\x6c\x01\x57\x43\x7e\x8c\x05\x41\x08\xf0\x94\xc1\xb4\xe5\x4a\xa2\x4b\x64\x9c\x57\x26\x61\xd5\x77\xf2\x1d\x30\x08\x3d\xe9\xc8\x93\x6b\xdb\xbd\xdc\x8c\xb2\xbc\xa0\x0a\x8b\xa4\xc8\x64\xaf\xd1\xeb\xce\xcc\xfb\x11\x83\x08\x51\x84\xad\xd9\xa0\xed\x62\x42\xd8\xcc\x8b\xca\x52\x0d\x93\xcd\x16\x61\x8a\x81\x09\xeb\xa1\xb3\x6f\x60\x8f\x06\x27\xd4\x34\x8a\x4a\x0e\x1a\xb4\x02\x9e\x36\x91\x50\x0a\x39\xe2\x99\x5e\x98\xcf\x06\x34\xfb\xcf\xbb\x0e\x2a\x0c\x5a\x23\x12\x63\xbe\x79\x01\x4b\x13\x62\xa3\xdd\x35\xa1\x0f\x06\x76\x6d\x7e\x6c\x82\xfd\xe7\x1d\x9d\xf4\xfc\xbf\xa8\x84\xb7\x0d\x5e\xb6\xce\x4f\xed\x74\x73\xa7\xc7\x3d\x67\x35\xbd\xbc\xc2\xde\xec\xb1\x43\xf2\xdc\x1f\x25\xad\x3b\x41\x1b\xf1\x5f\xe1\xb1\x1b\xb6\x3d\x8f\xae\xd8\xaf\x98\xba\x85\x77\xd2\x0d\x05\x68\x88\xae\x29\xc4\x95\x04\x12\x55\x45\xb7\x8f\xd4\x33\x83\xad\x02\xe0\x93\x9b\x03\xa7\xfc\x1a\x6b\x13\x32\xca\xe9\x70\x75\x8d\xd0\x50\xec\x83\x37\xa1\x6f\x84\x2b\x02\x8f\x77\x61\x90\x66\x41\xe8\x94\xe1\x06\x11\xbb\xc5\x3b\xd4\x52\xcc\xc1\x42\x1f\x50\x51\x9c\xe3\xd1\xd6\xbf\xf2\xe3\xd1\xdf\xec\xdf\x3d\x72\xec\x1e\x39\xfe\x8d\x1c\x39\x7e\x98\x4e\x1c\x1b\x63\x02\x7c\xc6\x2e\xe7\x8b\x3b\x6c\xb0\xb7\x18\x43\xc3\x6b\x3d\x76\xd3\x05\xd6\xe1\x81\xb6\x31\x9c\xf3\x9a\xdb\x9f\x89\xe6\xfc\xd9\x8b\x3d\x13\x31\xf6\xfe\x43\xce\xfe\xe6\x64\xde\x3b\xae\x74\xff\xe5\xbb\xf0\x27\x25\xf8\x93\xd7\x30\xf6\x65\x75\x82\xfe\xac\x37\x26\x54\x64\x74\xaf\x3e\xd4\x11\x1b\xbe\x6a\x96\xcb\x17\x3c\x76\xe3\x85\xb4\xf0\xa1\x0f\xdb\xb0\xbb\xbd\xee\x6e\xaf\xbb\x31\xe4\xf7\x67\x0c\xf9\x39\xaf\xbe\xfd\xfe\xf5\x58\xff\x96\xe9\x71\x68\x27\x68\xb0\x0b\x46\x63\xa5\xb0\xbf\xba\xc4\x81\xed\x5a\x87\x3c\x31\x9d\xd8\x9d\xa5\xc9\xed\xe9\xba\xff\xb1\x4b\xc2\x63\xf4\xf7\x40\x08\x97\xc3\xbb\x08\x21\x5c\x14\x70\x57\xcf\xd2\x24\xf8\xa1\x74\xdd\x15\xf8\x6f\xde\xc5\xbb\xda\x15\x15\xbb\xa2\xe2\xfe\x4d\x37\xf9\x96\x52\xa1\xfe\x76\x1c\xde\xd5\xd0\xb5\x0d\xda\xd3\x07\x40\x7b\xda\x51\xf4\x0f\xd7\xeb\xda\xb6\x72\x82\xba\x8e\x4c\x05\x0f\xfc\xc8\x96\x55\x8f\x4f\x1b\x05\xeb\xa3\xe3\x20\xb1\x86\xb7\x1f\x75\xab\x17\x7b\x73\x96\x36\x85\xc8\xa7\xa6\xa9\x0f\x7e\xbe\xcd\xdd\xdb\xcb\xfe\x6b\xfd\xab\xa7\xc7\xc8\x7a\x68\xad\x09\x1e\xc3\xd6\x32\xf6\x77\x57\x38\x71\xa5\xda\x18\x07\xd8\x2b\xab\x42\x39\x70\x7e\xfb\x8a\xf0\x96\xd2\xb5\x71\xc1\xbb\xaa\xc3\x10\x24\x26\xc8\x45\x51\xdb\xf2\x1e\x41\x4f\x60\x00\xd1\x2a\x4d\x8f\xc6\x96\xe7\xa3\xef\xe1\x64\x94\xe7\xe6\xe2\x23\x68\xb6\x0d\x3c\x7d\x09\x3e\x0d\x41\x11\xce\x56\xf2\xb6\xcb\xd9\xff\x72\x43\x82\xff\xa7\x17\x9e\x58\x71\x6b\x49\xa2\xdd\x44\x20\xa4\xeb\xf2\xb8\x63\x32\x06\x78\xa0\xdb\x39\x99\xbb\x3a\xb5\x13\x23\x7c\x3d\x7b\xd4\x4e\xc2\x57\x54\x59\x0e\xf4\xc9\xf8\x2c\xe7\xfd\xfe\x5e\x82\x3e\xd9\x69\xe6\x34\xfb\x2d\x8f\x5d\x8e\xdd\x32\xab\x1a\xe6\xbf\x47\x07\x8a\xbc\xb1\x14\x11\x95\x00\xb6\x36\x1c\x2b\xad\x48\x10\xc2\xf1\x81\x85\x8e\xc3\x06\x24\xc9\x49\x03\x9d\x51\x83\xa0\xdd\xf6\x63\xea\x6d\x88\x82\xd3\x7d\x7b\x18\xc3\x4b\x20\x50\x4f\xa5\xfc\xc8\xa9\x5d\x2b\x05\xb5\xb8\xf1\x1e\x5f\xf6\xd8\x90\x19\xe1\xff\x86\x6e\xcf\x2f\x8e\x6e\x0f\xcf\x46\xb5\x04\xf9\xad\xe4\x7d\x08\xd4\xea\x75\xbb\x29\x65\x41\x64\xbd\xa4\xf4\xec\xd0\xf5\xed\xa6\xd0\x00\xf4\x79\x3d\x4d\xea\xa2\x5b\xe4\x33\x52\x8b\x92\x27\xe2\x7c\xc6\x44\x0f\xe6\x33\x04\x91\x24\x8a\x19\xa7\x7d\xaf\xf1\x98\x3d\x87\xfd\x17\xe8\x86\xe5\x0f\xea\x38\xc1\xe0\xb8\x81\x69\x7f\xea\xb1\xcb\xea\x69\x1c\x43\x7c\xdd\x1c\x70\xce\xfd\xae\xae\xdd\x07\xbc\x39\xc5\x7e\x0d\xbc\xf3\xfa\xc1\x5c\x03\x49\xe8\xe9\x8e\x51\x95\xfa\xa7\x1d\x54\x09\x5e\x1d\x93\xfb\x02\x10\x44\xa6\xac\x80\x6f\xa4\x51\x03\xac\x0a\x1a\xd8\x1f\xdd\x65\x51\x41\x1c\x21\x45\x4a\xc1\x4c\x26\xe9\x4a\x7d\x3f\x11\x9b\x72\x75\x1b\x82\xcb\x15\x8a\x15\x74\x9b\xf9\x55\x8f\x8d\x10\x46\xfe\xc7\x74\x73\xdf\xb5\xb3\x59\xa6\x27\xf6\xd1\xf2\x70\xe8\xb4\xc2\xef\xd0\x3c\xfb\x7b\x8f\x8d\x10\xa2\xfe\x17\x75\x2b\x3f\x0c\xad\x2c\xd2\x82\xc7\x17\x38\xf3\x0e\x9b\x6d\x1f\x5f\xab\xa7\x59\x26\xea\x45\xdc\x1f\xf1\xc6\xd4\x03\xdb\xec\x8f\x7a\xec\x61\xd8\x92\x3b\x13\x2d\x8a\xfc\x77\xe8\x16\xbf\xf6\xfe\x90\x86\x09\x65\x2e\x3e\x80\x62\x30\x66\x3e\xee\x4d\xa2\xf1\x38\x91\x10\xbe\x90\x7f\x17\x34\xe3\xba\x6b\xc3\x59\x08\x8d\x86\x9c\x2e\x51\x97\x5b\x54\x4b\x3f\x14\xa8\xf7\x94\xe5\xd4\x6c\xc3\xd6\x42\xac\x95\xbe\xf6\x08\xd2\xcb\xcb\x73\x65\x45\x75\xdc\xcd\xdb\xcc\x14\x7b\x55\x50\x59\x56\xdf\xb8\x01\xb0\x97\xb0\x6b\x4b\x96\x61\x13\xe7\xbe\x71\xb4\xb6\x8a\x18\x9a\x2b\xbd\x58\xe4\xb6\x7d\xd8\xff\xd0\x25\xe1\xd7\x2a\xa3\xee\x1a\xe4\x06\x72\x87\xf7\x62\xe4\xea\x27\x43\x2a\x30\x54\x51\x8c\xaf\xc3\x56\x15\x25\x8a\xa5\x95\xb0\xd0\x2d\x40\x3b\x02\x0b\x51\x55\x44\xce\x4f\x6d\x8c\x8e\x1c\x2c\x21\x8c\xf9\x86\x5c\xc5\xbe\xf2\xea\x8b\xb3\x5d\x91\x01\x79\x8e\x0e\xcc\x26\xd3\x39\xf8\xda\x6f\x25\x9a\x0f\xa7\x0b\xa8\xbe\xb2\x43\x91\xa4\x73\x43\x40\xea\x17\x36\x84\xdc\xe7\x39\xc2\x38\x20\x09\x8b\xd4\x55\x72\xde\x04\x57\x39\xcf\xf3\x1e\xd1\x17\x28\x2c\x52\x20\x19\x81\xc8\x71\x4d\xad\x5a\x0d\xc4\x86\x94\xaa\x44\x4a\xa2\xb8\x5c\x4c\x57\xd4\xb6\xbc\x43\xea\x78\x08\x5d\xbd\xe5\x5d\x91\xa4\xc9\x8a\x7b\x89\x99\x17\x1c\x65\xec\x55\x07\xd8\x8b\x2b\xec\x72\xd3\x54\x88\xe7\xf1\xff\xc9\x0b\xbf\xee\x2d\xb8\x17\x61\x14\x28\x24\x20\x4a\x82\x7a\xda\x59\x8f\x12\x0b\x07\x0d\xbe\x84\x8c\x88\xda\xec\xcf\x55\x84\x4e\x5a\x07\x41\xae\xfb\x16\x3a\xc8\x7c\xd5\x82\xb9\x4b\xac\x31\x54\xc9\xc9\x2e\x47\xe1\xc0\xbb\x9a\x05\xd9\x69\xb4\x22\x42\x5c\x2a\x75\x86\xf2\x5b\x5a\x5d\x68\x9f\x34\xbf\xec\x31\xab\xb3\xfc\x4f\x79\xe1\xaf\x7a\x8b\x66\xe2\x45\x39\x10\xc4\x19\xe2\x21\x9c\x03\x99\x28\x7a\x59\x62\xc5\x5d\x03\xc7\x0b\xcf\x9c\xcf\xe8\xb0\x75\x90\x07\xf2\xf4\x01\x7e\x66\x2b\x98\x04\x8b\x75\xfa\xc0\xe9\x1a\x65\xe9\x77\xee\x0f\xeb\x9f\xdc\x9e\xbd\x4e\x90\xed\xab\x2b\x6c\x60\x7e\xf8\xcf\xaa\x84\xef\xf6\x06\x3a\xaa\x44\xf8\xcd\x95\x27\xc3\x9a\xb1\x40\xc3\x1b\xa7\x9b\xb8\x75\x76\x45\xd6\x4c\xb3\x8e\x5c\x93\x49\x9a\x4c\x6b\x2b\x08\xaa\x19\x58\x4e\xd6\x10\x30\x03\x22\xac\x73\xd4\x4a\xe0\x00\x9b\x14\x55\x18\x18\x92\x0d\x41\xa3\x07\x51\xf0\x05\x24\xb1\x62\x60\x4b\x1e\xad\xc7\x83\x23\x67\x54\xfd\x9b\xd9\xa3\xc7\xd8\xe9\x07\x84\x57\xa9\xb9\xec\x85\x15\xe6\x2e\x24\xff\x1f\xbd\xf0\xe7\xbd\xfb\xaf\x4b\x1e\xe4\xee\x18\x9f\x39\x3a\xd0\x1d\x4e\x5f\x7c\x78\xaf\x83\xcd\x69\x91\xf7\xd7\x88\x91\xbf\x36\x82\xbc\xdf\xf0\xd3\xfc\xcb\x9e\x50\x6c\xf7\xd0\x48\xb2\x9a\x60\xc4\x9b\x48\x5c\x63\x00\x9b\x23\x48\x65\x1a\x43\x5b\xf3\xbf\x27\xd8\xcf\x0d\x67\x9f\x79\xd5\x18\xbf\xd5\x76\xf4\x33\x4f\x18\x2c\xcf\x40\x6f\x5d\x14\x31\x0d\x6b\x99\x28\xa4\x1f\x0c\x4f\x69\x22\x1a\xa9\x7d\x97\x02\x92\xc4\xd9\x6e\xcc\x49\x02\x5b\xe4\x14\x2e\x51\x0d\x64\x17\xe9\x4f\xdb\x82\x6e\x59\x07\x1b\x1d\x0f\x6f\xa4\x34\xa7\x48\xed\xc9\x17\x44\x4c\xb3\xaa\x0d\x3a\x8b\xe1\x4d\xf9\x4e\x88\x69\x82\xc3\xc3\x18\x69\xa6\xec\x42\x6f\xa4\xd0\x9a\xa3\xe1\x23\x01\xad\xcf\x9d\x31\xea\xe0\xaf\x4b\xb4\x5f\xfd\xf5\x4b\x9d\x58\x19\x1d\xf0\x5a\x42\xf9\x9a\x8b\x79\xd4\xf1\x7f\xfc\xd2\x70\x69\xe8\x1d\xec\xfb\x5e\x2e\xb2\x49\x6d\x2b\x45\x25\x31\x69\x04\x75\x78\x02\xa2\x66\x2c\x38\x52\x04\x68\x74\xe6\xe1\xe7\x76\x0d\xe8\xbb\x06\xf4\x5d\x03\xfa\xfd\x6b\x40\xff\x27\x65\x40\xff\xa6\x37\x26\xa8\x66\xec\xba\x07\x43\xfa\x2f\x0e\x42\x37\x29\x23\x7a\xbd\xcd\x33\x5e\x2f\x44\x16\xe5\x45\x54\xcf\x1d\xbc\x5e\x1a\x77\x15\xbd\x01\x27\x4f\xd8\x56\xcf\xfb\xe4\x4c\x70\x9e\x33\x46\x86\x28\x7e\xbe\x2b\xcd\x25\xbc\x02\x12\x27\x67\xcf\xab\x68\x61\xfb\xad\x71\xd1\x3c\xe3\x1b\x8f\x56\xf4\xf7\x7b\xe3\x4d\xc5\x56\x9c\xd1\x8c\x6d\x36\x1e\x90\x78\x28\x0e\x11\x7d\x70\x5a\xea\xb6\x0f\x7c\x47\x9c\xf3\x9e\xbc\xbd\xcd\xfd\x98\x7f\xc3\x30\x8c\xab\xa1\x7d\x52\x0e\x13\x7a\xce\x01\x1b\xee\x6a\x28\xaa\xc4\x72\x16\xa5\x59\x54\xf4\x4f\xc8\x19\x3c\x67\x7b\x56\x21\x79\xfb\xd7\xf7\x87\xa7\xc6\x3f\xa2\x13\x16\x46\x3a\x67\xbb\xf4\x7e\x10\xcb\x02\x94\xca\xe3\xec\x30\xff\xb4\x97\xfd\xa5\xc7\xf6\x03\xe6\x98\x68\xf8\x5f\xf1\xd8\xd2\x79\x61\x63\x9c\xc0\x17\x47\xd7\x34\x7c\x89\x77\x9a\x4a\x3f\x6d\xd5\xb8\x9d\x5a\x31\xa1\xf2\xc4\xd3\xe6\x49\x23\x26\x1e\x7c\x1e\x50\xb1\xe5\x16\x38\x61\xd2\x65\xcc\x55\xe4\x24\x6b\xe8\x30\xdc\xd3\xb2\xb1\x80\x14\x72\x3a\xa4\xf2\xc2\xd3\x35\xf6\xbc\x3d\xa4\x1b\x7c\x7b\x22\xfc\xe6\x84\x7a\x68\x48\xcc\x19\x90\xce\xd9\x9f\x07\x2c\x1c\x43\x00\x02\xad\xa2\xde\x4e\xf4\x1e\x2f\xce\x8a\x7a\x0f\x8f\xff\xc1\x2c\xed\x2e\x69\x33\x38\x1d\x2e\x9c\x95\xb5\x0c\x4f\x07\x1d\xc1\x93\x5c\x21\xdb\x52\x0f\xa8\x9d\xba\xf4\x41\x63\xd1\xd6\x5f\xe5\xf8\xdd\xe0\x30\x9e\x5f\x7b\xd8\x7b\x89\xd8\x10\x59\x70\x4f\x4f\xf4\x44\x63\x0a\x7a\xa1\x91\xc2\x9b\x0d\x51\x48\x49\x64\xb6\x24\x4d\x43\xdd\xe1\x0d\x61\xd9\xb1\x8a\x94\x40\xb2\xdd\x2a\xe4\xe5\x66\xe8\x9e\xb4\xdb\x71\x98\x4f\x6d\xdb\x96\xa7\xf2\x4c\x3c\x75\xa0\xff\x30\xb7\xfe\xf0\xfa\x14\x52\x77\x5b\x69\x23\xa0\x39\xd1\xcc\xb1\xc8\xb3\xf3\x72\xc5\xc5\xd9\x7a\xdc\xcb\x81\xc6\x1b\x35\x94\x81\x4f\xd7\x14\xc9\x70\xc3\xc5\xae\x3d\xcd\xbe\xcb\x91\x01\xbd\x04\x1c\x4b\x8b\xfe\xe3\xa6\x83\x46\x24\x15\x46\x88\x77\x4b\xb3\x63\x88\xd3\x12\x50\xa2\xc2\x74\x91\x4e\x5b\xf7\xc5\xad\xfd\x63\x40\xcb\x4c\x95\x3d\xa6\x66\x2f\x63\xe7\x2a\xec\xae\x8b\xa3\x24\x9d\xeb\xe5\x45\xda\x51\xc7\xac\xbb\x90\x78\x5a\x2a\x82\x6f\xa8\x84\x27\x47\xdd\x74\x71\x2e\x36\xcc\xf5\x8e\x28\xda\x29\x65\x10\xbb\x2f\x97\x62\xe8\x7f\xcd\x63\xbf\xe2\xb1\xcb\xd3\xae\x48\xa4\xb2\x79\x0d\xc2\xdf\xf8\xf7\x79\xec\x49\xa3\xc4\xc3\x85\x12\xae\x62\xd1\xcb\x59\xda\xcd\xc3\x3b\x4a\x1f\x54\x07\x81\x53\x78\x39\xd8\xb8\x46\xa9\xb6\x16\x2f\xa3\xd5\x3e\x38\xe4\x66\x3d\xa4\x8a\x78\xdf\x15\xec\xe8\x36\xd2\x4b\xb5\x7e\x19\x48\x37\xe4\x21\xd6\xbf\xf7\x8a\xf0\x8d\x13\x83\xd7\x09\x37\x20\x13\x28\x27\x70\xda\x13\x60\x16\x4e\x5d\xad\x2c\xaa\x85\x50\x0d\xa4\x9e\xac\x14\x4a\xba\x3a\x99\xcb\x8d\x62\x5d\x91\x0b\x07\x05\xcf\x5a\xa2\xb0\xa0\xde\x66\x83\x21\x1f\x57\x1f\xe2\x03\x5f\x29\x09\xbd\x63\xb0\x14\xb9\x0d\xc1\x87\x80\x57\x30\x11\x44\xb6\x9e\xeb\xb2\x0a\x9b\x2f\x42\x2e\xc0\x11\x6f\xf1\x6e\xf4\x38\xb9\x09\x8e\x7a\xb3\x3e\xf2\x4d\xad\x8e\x0f\x7f\x13\x16\x7e\x63\x6a\xe8\xab\x1a\xc3\x77\xe8\xbb\xb5\x2d\x6f\x2f\x34\x66\xcb\x3b\xa8\xab\xb7\xe5\x1d\x74\xf5\x7f\x35\x97\xdf\x79\x29\xfb\x43\x8f\x99\x07\xfd\x4f\x7b\xe1\xcb\xbd\xd3\xfa\x77\x09\x4c\x0a\xbe\x27\x47\x4d\x4e\xb8\x16\xb6\x5c\x9d\x6d\x14\x1a\x07\xe2\x72\x87\x57\x85\x66\x64\xe2\xb8\xf4\x02\x18\x75\x49\x33\xaa\xea\xdd\x0a\x31\x17\x62\xca\x29\x72\x24\xd3\x28\xb8\xee\x6d\xb0\xa6\x72\x51\x30\xf6\xee\x0a\xbb\x94\x80\x08\x57\xeb\x69\x57\xf8\x6f\xae\x84\xaf\xaa\x9c\xb6\x2f\x0d\xdd\xe4\x52\x82\x75\x28\x03\x3b\x5a\x80\x17\x52\x49\x35\x90\xca\x87\x31\x83\xaf\xcd\xbb\x5d\x91\xe4\x81\x88\xa0\x1c\xc5\x67\xed\x64\xaa\x11\x9d\x82\x7e\x17\x08\x06\xec\x50\x6e\x9c\xfb\xd8\x77\x66\xc0\xa7\x4c\x26\x92\x41\x83\xec\x44\x45\x81\x05\x20\x6e\x75\xa1\x0c\xa9\xa7\xcd\x8b\xa7\x6d\xc5\x40\x99\xc0\xb8\xa5\x1f\xc4\x08\xa3\x69\x99\x35\xff\x71\x82\x31\x53\x80\xff\xe7\x13\xe1\xdb\x26\x9c\x12\x5d\x98\x31\x5c\xab\xd6\xdc\xa4\x4d\x5c\x8e\x53\xbd\xd0\x53\xd5\x25\xf5\x02\xdf\x93\xd6\x7b\xf8\x40\x29\x7a\x0a\x29\xbd\x85\xfa\x54\x2e\x64\x83\x50\x67\x79\x3f\x78\x31\x58\x46\x9a\xc1\x12\x1e\xf2\x7c\x78\x55\x58\x0b\x2c\x78\x19\x67\xd2\x26\x7d\x2b\xeb\xd5\x94\xb6\xde\x43\xfb\x39\x8c\x1f\xe1\x7b\xba\x0d\x7a\xaa\xbe\x4d\x93\xe4\xa9\xee\x2c\xc9\x05\x4e\x86\xd2\x0c\xc4\x01\x6a\x12\xb0\xff\x14\xe9\x73\x50\x61\x9b\xe3\x14\xdd\xf9\x5a\x8f\x2b\xcd\x62\xb4\xa5\x5f\xe4\x82\xf9\x64\x85\x19\x71\xe1\xff\x6a\x25\xfc\x17\xef\xb4\xfe\x3d\x4a\x22\x18\x81\x76\x38\xaa\x89\x5a\x35\x88\xd3\x4d\x91\xd5\x79\x8e\x59\xc2\xdd\xb8\x97\xf1\x78\x0a\x1c\x1b\xb0\xfa\xe9\x38\x58\xb5\x89\xdf\x6a\x01\xe0\x41\x52\x5a\x6f\x35\x78\x52\x10\xaa\x90\x7c\xa0\x86\x48\x1b\x22\xa7\xf3\x52\x18\x3c\x59\x65\x06\xea\x2e\x1a\x2f\x81\x4c\x05\x1f\x6c\x01\xf4\xdb\x1e\x43\x91\xec\xff\xba\x17\x3e\xdb\x3b\x0d\x7f\x8f\xea\x47\xdc\x88\x76\x26\x54\xf5\xb3\x0f\x6a\x73\xfe\xe6\xe1\xec\xfb\xec\x70\xfd\xb4\x01\xe7\xe1\x95\x5e\x52\x44\x70\xe4\xcb\x73\xff\x93\x0f\x0f\x3f\x31\x61\x5f\xd1\xa6\x00\x85\x37\x9a\x36\x2d\xa8\x95\x0c\x9f\xb4\x12\x9c\x29\xa5\x8d\xa6\x37\xda\xf7\x9d\xf2\x22\x83\xe6\xd9\x10\x98\x7e\x21\x2c\xe4\x58\xb7\x5c\xeb\xe1\xac\x97\x04\x9c\xd0\xad\xe0\x21\x72\x27\x42\x54\x82\xfd\x05\xf2\x43\x76\x78\xd2\xe3\x71\xdc\x77\xf3\x4a\x00\x85\x38\xcd\x34\xc0\xad\x61\xe0\xce\xaa\x14\x63\x42\x14\x3c\xba\x29\xcb\x69\x43\x9e\x4c\xb1\x29\x16\x81\x57\x26\xf2\x6e\x9a\xe4\x91\xd4\xd7\x9b\x29\x62\xe5\xc6\x1b\xda\xd6\x66\xd5\x08\x20\x77\x0d\xa7\x32\x81\x7c\xd9\x1e\x7b\x68\x84\xc1\x54\x25\xc3\x75\x35\x90\x22\x67\x27\xc6\x02\xd3\x29\x33\xd4\x75\xd3\x30\x5a\x33\x5b\xde\x7e\x3c\x84\xba\x24\x22\xcf\xbb\x62\xd7\x06\x7b\xb1\x36\xd8\x4f\xef\x61\xaa\x6f\xfd\x8f\xec\x09\xdf\xb3\xe7\x36\xfc\x51\x32\x61\xf4\x92\x86\xc8\xe2\x3e\xba\x7a\x71\x56\x73\x4c\x88\xb2\x2c\x1b\x1a\x1a\x68\x6e\x65\xd1\x00\x26\x5b\x04\x4d\x04\x13\x85\xdf\xc3\x6c\x61\x35\x08\x30\xd0\x38\x3b\xc9\x17\x66\x31\xaf\x18\x58\x3c\xd5\xcb\x40\xf9\xf9\xfd\xf0\xa1\x12\x1a\x2d\x38\xac\x73\xf2\xc6\x37\x74\x16\x17\x7d\x94\xf2\xfd\xad\x38\xc4\x24\x90\x07\xf4\x3e\x94\x58\xa5\x48\x13\x7a\x52\x1d\x79\xe5\xc4\x87\x78\x27\x80\x0a\xb8\xa7\x17\x6d\xf0\x18\xb3\x8a\xad\x77\x6b\xee\xe6\xc1\x55\x31\xe0\x39\x16\x8d\x20\xcc\x7a\x49\x3d\x0c\x3a\x51\xab\x6d\xf4\x37\xdd\x63\xf2\x66\x70\x6a\x6e\x51\xf7\xee\x61\x24\xd0\x4f\x38\x90\x62\x9d\x88\x92\xde\x59\x4b\x68\x4c\x69\x78\xa7\x5e\x29\xca\x69\xa8\x60\x91\xbd\xaa\xc6\x55\x89\x67\xbd\x41\x56\xa1\x07\xd3\xac\xa3\xfa\x76\x7e\x69\x35\x40\x58\xb3\xc3\x2b\xc7\xe7\x82\xa3\x47\xaf\xbe\x66\xca\x66\x0d\x22\x97\x65\x94\x07\x51\xa7\xd3\xfb\xff\xd9\x7b\x13\x30\x49\x8e\xf2\x4c\xf8\xc9\xea\x9e\x51\x4f\x8c\x90\x21\xb1\xd7\x5e\xec\xdf\x1b\x1b\x82\x9d\x69\xa8\xae\x9e\xd1\xe8\x1c\x21\x44\x4f\xf7\x8c\xd4\x62\x8e\xd6\x74\x8f\x64\x4e\x75\x74\x65\x54\x57\x6a\xb2\x32\x4b\x99\x59\xdd\x53\x92\xd9\x05\x84\x00\x23\x96\xc3\x18\x30\xc3\x61\xc0\x18\x30\x37\x36\xb7\xd7\x60\x8c\x6d\xbc\x60\xf0\x62\x2f\x68\x59\xb3\x3e\x38\x6c\x6c\x78\xd6\xde\xc5\x07\xe8\xc7\xde\xff\x89\xef\x8b\x88\x8c\xc8\xca\xea\x73\x46\xb2\xff\x6d\x9e\x07\x4d\x57\x1e\x71\x65\xc4\x17\x5f\x7c\xc7\xfb\xe6\x03\x89\xe6\x3b\x66\xfd\xad\x9b\xf5\x3f\x64\x9b\xf5\xdf\xb1\x6d\xb3\xfe\xf2\x43\x65\xb9\xff\x5a\x8d\x8c\x25\x2b\x22\x6d\x0b\x1e\xf8\x5f\xaa\x91\xf1\xe1\xf9\x75\x6a\xc3\x3e\xa5\x9e\x66\x6f\xaf\xe9\x3f\xcb\xf6\xea\x82\x91\x57\x3f\x50\x66\x28\x33\x81\x5f\x48\x08\x0c\x06\x4a\x44\x41\xb7\x77\xae\x46\xf5\x9e\x44\x36\x66\xc1\xc6\x88\xac\x30\x5e\x9e\xd0\xf4\xbd\x93\xdd\x24\x98\xd0\x6d\x9a\x24\x2e\x3c\x7e\x18\xd3\x25\x91\xcb\xd9\xce\x53\x98\xdc\x2b\x07\x1b\x07\xaf\xd5\x2b\x06\x14\xa2\x76\x12\x27\x29\xee\xe5\x99\x9a\xcf\x20\x08\x90\x9f\x50\x6f\xd7\x66\x54\x5a\x82\xcb\xf1\x6f\x90\x57\xd7\x08\x29\x9a\xe3\xbf\xb0\x46\x9e\xb0\xee\x30\xcf\x9b\xe7\xd9\x03\x5e\xf1\x43\x65\x1c\x5b\x09\x03\x80\x92\x52\x90\x23\x23\x5f\x62\x86\x20\xe5\x3c\x47\x61\xad\x87\xbb\xe0\x27\x74\x94\x22\x10\xd4\x3a\x80\x4d\x16\x60\xc7\xf8\xa9\x60\x96\x30\x87\x43\xa4\x55\xa9\x3c\x8c\x86\x51\xbd\xa2\x3c\x5b\x8c\x27\x88\xd6\x69\x31\xbb\xc1\xd9\x54\x56\xd0\xd8\xb8\xcf\x00\xfd\x04\x76\x25\x25\xf7\x40\x91\xa3\x25\xc7\x4f\x67\x67\xfd\xde\x68\x35\x74\xc6\x40\x94\xb9\xff\x96\x51\x36\x3b\x78\x79\x78\x58\x44\x11\xaa\xbb\xd9\x40\x88\x8f\x8e\x90\x57\x54\x07\x42\xdc\xb3\xf5\x38\x88\x27\x1f\xdf\x5e\xb4\x43\x83\x3c\xb5\x08\x77\x38\xc9\xa6\x14\xe8\x0a\x35\x31\x0e\x26\xfe\x01\x0d\x1e\xeb\xc5\x37\x38\xfb\xca\x31\x13\xe0\xf0\x44\x36\xb9\xb0\xf5\xb0\x86\xe3\xc6\xd3\x76\x84\x5d\x35\x5f\x1d\xc8\x50\xd7\x1c\x19\x55\xf1\x0c\x4e\x69\x07\x95\xcf\x62\x9c\xfd\x84\x86\x0a\x71\xe2\xaf\x83\xc1\x06\xfc\xd7\x5a\x65\x1c\xc3\x4c\xb2\x1a\xaf\xf2\x34\x98\x9a\x9b\x9d\x4b\x93\x3b\x31\xd1\xdc\x7f\x7b\x8d\xbd\xd6\xb3\xb2\x29\x02\xf5\x14\xd8\xb4\xa4\x64\x47\xcc\x51\xf5\x02\x90\x7f\x61\xf8\x02\x5e\x91\xea\x25\x38\xc3\x6c\x20\x5b\xcd\xa9\x19\x06\x98\x69\x8f\x5c\x1e\xa6\x68\xb0\xe7\xda\xc4\xa3\x06\xa9\x32\xb7\x78\x06\x3a\x03\x94\xc3\xcf\x26\xf7\x68\x5c\xe7\x94\x8d\x57\xe0\x3a\x5b\x3d\x44\x0f\x1d\xe0\x78\x6d\x3c\xc8\xa9\x62\xa4\xb0\x9c\x63\x61\x24\xc8\x8b\x47\x2b\xf9\x68\xb5\xd1\xf6\xd6\x5e\x92\x6b\x54\xfc\x3f\x19\x61\xc7\x2a\xae\x3b\xe7\x05\xcd\xf2\x43\xdb\x72\xb4\x2d\x97\x88\x09\xf3\xed\x65\xa5\x01\x78\xd1\x08\xf9\x53\x8f\x8c\xca\x17\xfc\x07\x3c\xf6\x42\xef\x66\xf9\xaa\x0e\x99\xc1\x88\xd6\xca\x62\x0d\x67\x18\x82\x44\x15\x16\x89\x4d\xba\x5b\x91\xa2\x7a\x52\xbf\x3f\x71\x97\xec\x5b\x36\xf9\x4e\xef\x46\x72\xc3\x26\xf0\xd0\x1b\xa6\x01\xb7\xf6\x78\x9c\x87\xb9\x9d\xf1\x43\xde\xe2\x91\x51\xc0\x05\x7d\xad\xc7\x6e\x39\x23\xf5\xcf\x52\x26\xbc\x19\x20\x8c\x5a\x46\x76\xb4\x32\x5a\x96\x3a\x97\x1a\xcb\x54\xe3\xc2\x36\xf2\xfd\x35\x72\x64\xcb\x6e\x5e\xc3\x6b\xeb\xff\xa5\xc7\xee\xdc\xc0\x73\x85\x51\xcf\xc6\xf0\x50\x1b\xa7\xde\x4e\x65\x77\x99\x52\x42\x27\xb4\x1f\x8c\x29\x5c\x0f\x40\x4f\x3d\xef\x8d\xc6\x65\xae\xc0\x36\x69\x11\xb8\xea\x3f\x8b\xdd\x0a\x16\xd0\xc5\x2a\xe2\x9b\x92\x4b\xcf\x3d\x98\x61\x15\x96\x51\xa0\xca\xeb\x46\x5e\xba\xc7\x59\x3f\x9a\x92\xd4\xf8\xbb\xe6\x67\x4f\x26\x01\xf2\x1d\x7c\x7d\x8c\x4d\x5a\xbf\x2b\xb9\x0e\xf0\xb6\x52\xe2\xb3\x46\x25\xdf\xc1\x07\x2f\x21\x6d\x2d\x32\xee\x60\x3f\x11\x6a\x91\x61\xc7\x64\xaa\x82\x6c\x29\x71\x25\xb9\x62\x0d\x24\xe0\x21\x0d\xdf\x39\x97\x6c\xe3\x5c\xf2\x59\xfb\x5c\xf2\xeb\xde\xf6\xb8\x15\xfe\xbd\x89\x35\x42\xe3\xa9\x2a\xf7\x21\x0b\x33\xda\x31\x1a\x6d\xcf\x68\xf4\xa0\x17\xac\xaf\x6f\x4f\xf9\x37\x9a\x18\x1d\xbd\x22\xcb\x54\x0a\x85\x04\xa9\x04\x38\xfd\xdd\x4b\xc9\x4d\xb6\xda\x1d\xa8\x1c\x8a\x54\x2c\x87\x90\x8d\x69\xa3\xb8\x6a\x1f\x7b\xbc\x7c\x3b\x42\x96\x39\x92\xda\xff\xc1\x5e\xf6\xa2\xda\xda\xcf\x94\x43\x51\x07\xa2\x76\xe2\x80\x9a\x36\x14\xc0\x68\x60\x60\x6a\xca\xfd\x97\x82\xfd\x14\x3e\x15\xea\x09\xf0\xa7\xd6\x9d\x9a\x6d\x1e\x2f\x83\x82\x96\x37\xe8\x4c\x41\x12\x16\xc6\x70\x44\xbc\xba\x4e\xbb\x11\x8f\x63\x15\x68\x93\x8a\x4e\xb2\xc2\x23\x7d\xf7\xba\x06\x3d\x93\x09\x5a\x3d\x04\xea\x5b\xaf\x1c\xa4\xeb\x74\x30\x8c\xb3\x5c\xf0\x12\xc4\xe5\x57\xc6\x76\x24\xe3\x36\x24\xe3\x17\x6d\xc9\xf8\x5b\xdb\xb6\xd8\x3c\xaf\x1c\x88\x69\xc4\xe3\xf5\x0f\x95\x7c\x6c\x90\xfb\x6a\x64\x4c\xcd\xef\xcc\xff\x9e\xc7\x66\xd5\x6c\x2a\xa9\xf3\xfa\x11\x13\x32\x61\xd8\x0a\x1c\x6f\x57\x41\xd3\x9f\x39\xbe\x9f\xe3\xe4\x16\x72\xf3\xf0\x84\x86\xcd\xad\xf6\x0d\xe5\x77\x4b\x85\x69\x33\xf9\xdd\x3b\x1b\xc5\x76\x37\x8a\xfb\xbc\xf5\x77\x8a\x25\x7f\xb1\x40\x7f\x5b\x43\xc0\x99\x7d\x63\x6d\x29\x57\xb9\x95\xfc\xe9\x65\xe4\x9a\x35\xb2\x65\x70\x52\xcd\x8b\xa8\x35\x98\xe4\xe8\xbf\xf5\x32\xf6\xa2\x5d\xd5\xf7\xa8\x88\x7b\x1d\x39\xb9\x85\x73\xc6\xb3\xd3\x88\xf4\xa1\x08\x7c\x76\x4d\x1e\x9b\x0c\x22\xb9\x2f\x60\x18\x84\x39\x02\x51\xb4\x6a\xa8\xd4\xb3\x72\x52\xd2\x40\x9e\x5b\x55\xd6\xa4\x8a\xce\x73\xf3\x1a\x3b\xc6\xb7\xb1\x76\x86\x24\xc8\x63\x2b\x4b\x72\x48\xa7\x8b\x7c\x5d\xf0\x3e\x2c\xf5\xe9\x99\x59\x98\xa1\x59\x3b\x59\x9d\x6c\x87\x81\xd0\x6d\x06\x12\xc3\x3c\xa1\x77\xf5\xc2\xe6\xd9\x48\x1e\x4e\x00\xab\x59\xc4\x01\x8e\x87\xb2\xe0\x18\xd3\x4f\x98\x5a\xa9\x92\x98\x7c\xa8\x2a\x3b\x79\x6a\x81\x1e\x29\x2a\x34\x89\x73\x59\x3f\x83\x43\x43\x9e\xd0\x20\x0d\x57\xca\x19\x9d\x81\x68\xaa\x5c\x79\xae\xb2\xdf\x53\x1e\x66\x02\xd3\x47\xa1\xb0\x40\x74\x7b\x79\xbf\x8e\xb0\xa7\x34\x0a\x5b\x22\x0f\x3b\x62\x32\x15\x2b\x49\xd3\xca\x47\x54\xd9\xd5\xb1\xa2\xff\x6b\x8a\x54\xb6\x4f\x0d\xce\x54\xb3\x29\x32\x35\x3a\xf8\xf8\xf1\xa4\xc9\x23\xfb\x32\x18\x47\x51\xad\x80\x82\xe8\x2a\xef\xa3\xf7\x58\x6e\x89\xc3\x1a\xad\x56\x3d\x30\x69\x0e\x87\x55\xbd\x7f\xec\xc2\x22\x5b\xbe\x4c\x87\xe1\xbf\xc0\x23\xb3\x1b\xce\x38\x5b\x6b\x0d\x41\x54\xfe\x75\x5b\x85\xba\x6c\x90\x77\x17\xd8\x32\x6f\xf6\xc8\x4d\x9b\x6d\xd3\x90\xc4\x65\x76\xdb\x66\x61\x2f\x2b\xd6\x38\x1f\x58\xd9\x3b\xbc\x1a\x3b\x99\x41\x0f\x9f\x42\xfa\xa0\xd7\x5b\x7f\xcb\x3d\xed\xcf\xad\x05\x5a\xb4\x26\x01\x5e\xf5\x0a\x27\xe4\xd3\xa3\x0e\xe5\xf6\xb0\x44\x15\x65\x81\x7d\xd5\x28\x9b\xa9\xbe\x55\x85\xab\x39\x34\x43\xc5\x3d\xca\xfc\xc6\x08\xb9\xb3\xf0\x3f\xdc\xc1\x4e\x0f\x80\xbe\xaf\xeb\x7f\x58\x6d\xe3\xf2\x57\xf6\x6f\x74\xe8\xc1\x04\xd1\x58\x44\xc5\x2c\xfd\x35\x8f\xec\xea\xb6\x79\x26\xfc\x0f\x78\xec\x17\xbd\x39\xf9\xa7\x25\x2c\x42\x2b\xb5\x48\xce\x5e\x1d\x73\x50\xa7\x88\x26\x0b\x36\x77\xc8\x7a\xa9\xe3\xc9\x31\x12\x3c\xd3\x21\x3e\x2a\xf3\xe6\x02\xa6\xdb\xc8\xd6\x39\x78\xf0\x05\x3a\xfd\x8b\x3c\x76\xcf\x69\x93\x31\xca\xe9\x52\x1a\x8a\x96\x45\xa4\xa1\x96\x1d\x46\xa0\x9a\x43\xb2\xd4\x25\x14\xd5\x82\x76\x6f\x76\x04\x8f\x31\x99\x52\xed\x34\xb4\xcb\xd3\x4c\x43\x73\xe4\x61\xd0\xa7\x41\x98\x75\x23\x58\x7a\x18\x2a\x72\x7c\xd6\x19\xd3\x6f\xbb\x28\xf5\x62\x05\x16\xad\x96\xe4\x47\xe5\xcf\x79\xa9\xb6\x64\xfe\xa7\x46\xd8\x5d\xd6\x6f\x13\x57\x6a\x6f\x2d\x80\xbe\x01\x77\x93\x16\xc5\xb2\xea\x34\x6c\x88\x86\xfc\xa4\xba\x47\xab\x3c\x9b\x0c\x33\x15\x38\xab\xdc\xa0\x79\x18\xf7\x92\x5e\x16\x21\x7c\x08\x44\x91\x4b\xb5\xa0\x71\xde\xdb\x05\xc9\xfd\xe7\xbd\x47\x46\x3c\xcb\x4f\x29\x4b\xf7\x42\x58\xb2\xda\xbe\xb5\x46\x62\x82\x8f\xfa\x42\x23\x79\xcc\xc1\x6f\x63\xbd\x2d\xa8\xf2\x10\x4c\x21\x96\xe7\x25\x33\xdb\xb0\xd9\xbd\x6e\x21\xc1\xb3\x9c\xb6\x05\x4f\xf3\x25\xc1\xd1\x47\xe7\x42\x89\x7c\xc0\x23\x03\x6d\xf2\x5f\xbf\x46\x2e\xd9\x1a\xca\xc0\x89\xb0\x99\x26\xe0\x11\x7c\x46\xb9\x48\xdd\x7c\x70\x12\x02\xbc\x01\xb4\x0c\xbe\x44\x21\x44\x55\xf3\x57\xb9\xec\x89\x88\x75\xb4\x98\xdb\x89\x06\xf9\xf4\xa5\x0e\x85\x4d\x61\xb9\xd5\x46\x5b\xff\xcd\x97\xb2\x2f\x8c\x6a\x5b\xf2\x30\xdd\x81\x47\x11\x9d\x9e\x9f\x45\x0d\x10\x22\x5f\xb2\x1c\xa3\x6e\x12\x8c\x33\x0e\x44\xc3\x79\x40\x85\x51\xc7\x42\x11\x72\x17\xb8\x46\xae\xd9\x9a\x06\x21\x62\xea\x34\xe8\x54\x46\xa3\x44\x4e\x64\xe8\x7e\x9f\xea\xa0\x6a\x59\xf8\x04\x96\x3b\xa1\x0f\x2d\x29\xcd\xc2\x40\x34\x79\x5a\x84\xe3\x00\x61\x35\x60\xe4\x4b\x1d\x18\xa2\x77\x78\x2f\x4f\x64\x37\x9a\x10\xe0\xd7\x4d\xba\xbd\x68\x48\x2b\xb4\x87\xb3\xe8\x82\x6c\x46\x97\x23\x73\x8b\x2e\x54\xc1\xf1\x3b\x27\x27\x53\x12\x42\x91\x38\xc1\x4c\x99\x19\x9a\x59\xf4\x21\x24\x06\xf0\x00\x34\xf0\x78\x19\xd9\xe5\x21\x99\x48\x85\x3e\x23\xe5\x04\x26\x3f\x41\x73\x66\xd4\x88\x3a\x11\x55\x7a\x5c\xea\x3a\xae\x5c\x07\x1c\xaa\x2d\x47\x56\x11\x25\xf2\xf4\xa4\x48\xb7\x79\x2e\x6b\xd2\xc0\x17\xe6\x73\x98\x0d\xdc\xed\x07\x8f\xe9\xa9\xd5\x58\xa4\x85\xfb\xa5\xc2\xd3\x02\x7a\x76\xd6\x4d\x62\x03\x91\xa4\xcb\xaa\x52\xa1\xbf\xba\x7b\x47\x69\xdb\x51\xda\x1e\x3e\x2b\xe2\x4f\x5b\xe7\xb7\xee\x76\x6d\x88\x93\xc6\x92\x07\x0b\xdd\x0e\xc8\x7e\x8a\x51\x1e\x70\x49\xc8\x07\x1a\xe4\x0e\x75\xde\xbb\x9d\x4c\x6e\xc0\xab\xa6\x65\x33\x1c\xea\x1e\x9b\xad\x45\x5e\xac\x9e\x7c\xd0\x7b\xfa\xfa\x3a\xe9\xb5\xfe\xd5\x1b\x74\x18\x94\x53\x7a\x1f\x18\x21\x37\xab\x96\x22\x09\xc9\xf2\x72\x2a\x96\x79\x9e\xb8\x59\x6e\x95\x86\xc4\x79\x8c\xf0\x2f\x3c\xbe\x2f\x1e\x61\xd7\x97\x2f\xaa\x8d\x87\x5b\xe1\xcf\x79\x42\xd5\x53\x8d\x48\x2c\xf3\x66\x5f\xb5\xd5\x91\x2a\x9f\xa8\x91\x2b\x95\xf3\xb6\xce\xfe\x0d\x04\x50\x57\xb8\x6e\x55\x92\x81\x3d\x1d\xa6\xc9\x1e\x63\x15\xf2\xaf\x66\xe3\x27\x4d\xfe\x86\xf5\xbe\xca\x2f\x19\x5a\xc8\x6f\x78\x8a\x7e\xec\x23\x06\x84\xed\xcd\xde\x6c\xab\xc8\x2b\xa9\xab\xa8\xed\x34\xb7\xed\x47\xa1\xa1\xb2\x52\x6c\x01\xca\xcc\x5b\x10\xda\xe4\x09\xbd\xf2\xca\x43\xb0\x2b\x2d\xf1\xe6\x59\x88\x46\x69\x26\x9d\x2e\xcf\xc3\xa5\x30\x0a\xf3\x7e\x83\x2e\xca\x52\x17\x2d\x4b\x11\xc7\x64\x40\x87\xf4\x6c\xff\xc1\x89\xab\xaf\xba\xea\xd0\x55\x0a\xc2\x36\x0b\x57\xc4\xb8\xab\xd3\x7c\x70\x97\x03\x58\xb6\x66\xf2\xbb\x3a\x43\x80\x1c\xf1\xff\xdf\x51\xf6\xc6\xda\xba\x8f\x15\xdf\x53\x45\x39\x23\x28\xc8\xdc\x6d\xd3\x5a\x3d\x35\xdb\xa5\x31\xd0\x85\x99\xd6\xe9\x5b\x72\x11\xc3\x53\xa8\xcf\xcf\xdd\x86\xa2\x59\x2a\x79\x2a\xea\x4d\x3f\xa9\xb6\x41\x08\xc6\x9d\xaa\xce\x53\x77\xda\x15\x66\x75\x2a\x32\x10\x93\x52\x39\xa8\x53\x4e\x57\x53\xa9\x9f\xa6\x94\xa7\x50\x99\x0a\xb8\x82\xf4\x57\xc8\x20\xc4\x9a\x70\x2b\xcd\x68\xb2\xaa\x92\x05\xa4\xea\x9a\xc4\x82\x8a\x28\x13\x74\x3f\x74\x09\x2c\x66\xe3\x8d\xf3\xde\x1e\x38\x64\x9c\x2c\x47\x1c\x7c\xb2\x46\x9e\x4a\xc6\xe4\x91\xe9\x54\x1c\xf5\xfd\x13\xec\xc6\xdb\xa5\xba\x02\x61\x2b\x28\xb2\xd5\x2d\x9a\x89\x5c\x85\x1c\x51\x6c\xfe\x09\xe8\x7b\x31\x53\x20\x25\xcc\x4d\xec\xfa\x33\x8f\x14\xf5\xfa\x7f\xe8\xb1\xdf\xf6\xa6\xf5\xcf\xf2\xea\xe0\xd5\x43\x55\xfd\x71\x94\x66\x06\x91\xa0\x18\x3c\x9d\x17\x1f\xeb\xe2\x03\x17\xd8\x2b\xef\xe5\x8f\x25\x3f\x51\x4d\x4d\xac\x4e\xbf\x7f\x7d\x39\xfb\xb0\x67\x7e\xda\x71\xaf\xd5\x86\x31\xe7\x0c\x2c\x27\x91\x7a\x51\x6e\x87\x79\xca\xc3\x08\xdd\x2f\x4d\x60\xf9\xb0\xc2\x0b\xf1\x5b\xcb\xb9\x24\xd7\x3c\xe8\x99\x61\xab\x08\xa0\x37\xcb\xbc\x18\xb9\x26\xee\x8f\xa0\xb7\x36\x73\x13\x52\x9e\x26\x11\xb8\x27\x4b\x07\xee\xb7\x33\xf2\xba\x9a\x83\xfd\xfb\x92\x1a\x3b\x67\x30\x9c\x95\x34\x31\x0d\xda\x1e\xf6\x66\x37\xc1\xff\x4c\x44\x61\x4b\x34\xfb\xcd\x48\x5c\x2e\x7f\x15\x95\xdb\xce\xa5\x83\x64\x92\x4c\x6c\x84\x2c\xfa\xe2\x22\x04\xbf\xbb\x46\x1e\x65\xce\x00\xf8\xd1\x44\xe6\xbf\xba\xc6\xfe\xc0\x33\x98\x61\x52\xa5\x85\x25\x0a\x5c\x76\x72\x8d\x17\x29\x44\x6a\xa6\x77\x78\x1c\xb6\x44\x96\x37\xe8\x51\xde\x6c\x6b\xd6\x3b\x83\x93\x1c\xa1\xcd\x22\xe9\xe5\xdd\x1e\x1c\x09\x16\x83\xa4\x79\x16\x5e\x97\x1f\x3e\x5f\xbc\xf0\xa3\xce\x63\x18\x79\x6c\xe6\x84\x8e\x4f\xdd\x68\x88\x8e\x1e\xff\x69\x77\x68\xc8\x32\xd9\x2d\xa7\xe3\xec\x9c\xff\x4c\x36\x37\x3b\x47\x79\x10\x00\x69\x8e\xda\xe4\xe4\x3d\xb9\xfb\x60\xa2\x95\x9e\xb2\x18\x26\x1c\x2e\xc7\x52\x99\x3b\xaa\xf1\x26\xe4\x2c\xee\x8b\xbc\x08\x48\x76\xcc\x0c\x3f\xa8\x91\x1f\x91\x0d\x9b\x1e\xf8\x36\x5f\xab\xb1\x37\xd6\xd6\xf8\x36\xf2\xad\xb5\x3e\x50\x19\x1f\x34\xeb\x81\x17\xa0\xd5\x8b\xca\xaf\xc2\x21\x10\x60\x4d\x11\xa8\xf4\x06\xc8\x9e\xc4\x4d\xd9\x2a\x41\xca\xd9\x9c\x43\xd4\x73\xd5\xbb\x70\x0f\x0e\xe6\x99\xc8\xff\xa5\x7c\xe7\x73\x64\x57\x37\x09\x66\xe7\xfc\x84\x2d\x59\x9f\x99\x47\x51\x82\x41\x11\xea\x04\x84\x69\x71\x09\x26\x99\x14\xc9\xec\xca\x75\xe6\xa4\xe8\x0d\x7c\x78\x53\x98\xf3\xe1\x57\x8c\xd1\x2b\x62\x77\x4c\x0d\x58\xba\x36\x6c\x1f\x54\x13\xcf\x35\x0e\x52\xd1\x58\x6e\xd0\x7d\x47\x57\xc2\x66\x2e\x82\x7d\x76\xbd\x7f\xe4\x91\x3d\xe6\x53\xf9\x9f\xdf\x06\x56\xdd\x2b\xbc\xd3\xc7\xa6\xe9\xa1\x43\x87\xae\xa3\x40\x79\x88\x96\xb5\x0e\x0c\x4f\xb1\x30\x74\xf0\x89\x3c\x2e\x37\xcf\xc6\xc9\x6a\x24\x82\xe5\x02\x9a\x56\x1d\xc8\x0b\x08\x4b\x65\xa0\xb1\x0f\xeb\xdd\x1e\x98\x50\x9c\xac\x22\x1a\x76\xf8\xb2\xd8\x9f\x8d\x3b\x3a\x0d\xf9\x99\x1a\x79\x8c\xe8\xb6\x45\x47\xa4\x3c\x1a\x5c\x53\x7f\xe5\xb1\xb7\x69\x4c\x22\x44\x5a\xeb\x53\xf3\xb8\x9d\xb2\xa4\x28\x54\x57\x20\x21\xca\x0c\xae\xca\x61\xb2\xb3\x2c\x78\xd4\x6d\xf3\x09\x05\xc5\x62\xa5\x56\x68\x2b\xca\x9a\xc9\x15\x47\x07\x5a\x9a\x99\x24\x8b\xed\xcf\xed\xb3\x85\x05\x7a\x91\xcd\x6f\x3e\x02\x7e\xc8\x0c\xab\x0e\x25\xff\xfc\x28\x79\x54\x9c\x20\xd8\x49\x20\xcf\x46\xa0\x55\x7d\x64\x94\xfd\xf2\xe8\xc0\x65\x00\xc9\x11\x2a\x7b\x5b\xe1\x9d\xe2\xe8\xd2\x6e\x2a\x90\x47\x52\x01\xcd\x40\x66\x9e\x6d\xc7\x59\xea\x81\x85\xa6\x69\x4e\xcf\x45\x9e\x47\x0a\x19\x6d\x7c\x95\xf7\xc1\x28\x85\x25\x49\xf5\x05\xb2\x65\x3a\xc0\x24\x2c\xc2\x15\xa1\xfc\xbb\xcb\x29\x6f\x0a\x29\x0d\x35\x69\x99\x7c\xb4\x2b\xd2\x30\x09\x32\xe7\x2b\x9b\x3c\xf6\xe5\x1e\x4f\x79\x9c\x0b\x51\xa4\xcb\xc9\x36\xeb\x0c\xb8\xa2\x25\x89\xea\x11\x1a\xb5\x34\x94\x72\x0a\x5a\x52\x20\x9a\x61\x00\xc7\x36\xe4\x2c\xd5\xa5\x48\xbd\x78\x15\x79\x54\x5b\xaa\xf7\x98\xb5\xb2\x24\x9a\x49\xc7\xce\x14\xcc\x92\x24\x96\x82\xc6\x2d\x97\x47\x59\x62\x15\xbe\xac\x3a\x6a\x45\xc0\xd8\xad\x42\x9b\x7f\x3b\x5c\x76\xe0\x7c\x64\x3b\xb4\xe2\x8e\xb6\xaf\x80\xf2\x16\x26\xf6\xea\xd1\x04\x03\x24\x57\xe8\xca\x75\x1b\x84\x41\x85\x05\x04\x61\x0b\xce\x32\x39\x52\xfb\xea\x7c\xdf\x58\x7f\x7c\x03\x70\xab\xe6\x55\xf5\xbe\xf8\x3b\x7b\xb4\x4b\xe3\x3f\xed\x61\xef\xdd\x03\xe9\x98\xe0\xd6\x40\x6d\x3c\xd1\x26\x9d\x10\xb3\x1c\x65\x4f\xd4\x2a\xcc\x7a\x9d\x0e\x4f\xfb\x48\x25\x2d\x52\x93\xc6\xa4\xe6\x70\x98\x67\xd4\xec\x2c\xb8\x4b\x16\x8a\x1b\x92\xf9\xd6\xd5\xc0\x61\x48\x80\x3c\x49\xa9\xa5\x82\xd8\x3e\x75\x13\x66\x24\x97\xce\x4a\x18\xf4\x6c\xd9\xa1\x55\x64\x8e\x44\xc8\xda\x19\x60\x25\x7d\x59\xea\x74\x37\x09\xf6\x65\xea\x0d\x68\x8b\xb2\x69\xb6\xe4\xd7\x33\xe9\xa7\xd8\x71\xb4\xa8\x1d\x26\x64\x0e\x43\x2d\x0e\xab\x1c\xd5\x00\x74\x83\x25\x21\x62\x15\xfd\xe7\x0a\x57\x65\x64\xd1\x0a\x38\xa2\x26\x0c\x50\x04\x94\xe4\x6a\x06\x65\xe2\x0a\x13\xb1\x9e\x0b\x5a\x48\x23\x35\x74\x86\xc2\x5e\xc9\x6b\xf4\x94\x17\x4b\x80\x67\x74\x55\x44\x11\x9c\x85\x40\x27\xe8\xca\x19\x11\x24\xab\xb1\xdc\xe5\xe1\xac\x86\x15\x25\x2b\x42\x63\xf7\x03\x19\x55\xdd\x64\xaf\xcb\x03\x7b\xce\xcf\xca\x23\xfb\x6a\x3b\x94\x1f\xeb\x34\xa6\x7d\x55\xf4\xdc\x72\x64\x15\xf9\xb3\x3c\x8a\x06\xfa\x97\xa1\x54\x77\xbb\x35\x65\x83\xd3\x58\x43\x21\x3f\x8d\x5c\xdc\x2a\xdd\x0c\xac\xc8\x5a\x12\x02\x17\x78\x53\xe9\x84\x26\xc7\x0e\x13\xd7\xd5\xaf\x06\x9d\x97\x3a\x97\x08\x44\x70\x98\x4e\x0d\x24\xdb\xe7\xa6\x13\x72\xa9\x6a\xe6\x44\x70\xfd\x2b\x5d\x0d\xbb\x01\xf2\x45\x49\x3b\x55\xb8\x6c\xf4\x31\x1e\x46\x9b\x2a\x58\x0d\x4a\x75\x5f\xc1\x2c\xef\xb4\x41\x79\xd4\xcc\x12\x51\x0f\x2a\xfb\xbb\x38\x17\x9a\xe4\xc7\x38\x89\x27\xee\x16\x69\x62\x4e\x87\x29\x6c\xf7\x56\x71\x3a\x94\x01\x26\x61\x43\xa7\x2f\x1d\x86\x74\x48\x85\x8c\x04\x8b\xcd\x49\x4a\x33\x27\x41\x98\x09\x6a\x00\x92\x25\x68\x47\x50\xa7\x79\xbf\xab\x3c\x16\x8a\x22\xdc\x60\x82\x23\x88\x78\xa7\x17\xeb\x7d\xcd\x30\xfe\x83\xde\x5e\x14\xdd\x20\xe4\x42\xab\xa9\x03\xde\xcd\x17\x8e\x90\xdd\xa0\x62\x66\xfe\x0f\x6a\xec\x6f\x3c\xfc\xdb\x4a\x7b\x2c\xd4\x4e\x31\x4c\xf1\x1c\x00\xbb\x29\x59\xce\x0e\xe4\xfa\x28\x06\x26\x56\x84\x63\x51\x6f\xcf\xce\xe1\x6b\x0d\x29\x03\x4d\xec\x56\x51\x0f\xcf\x51\xc7\x3f\xa8\xec\xe2\x26\x03\x28\x69\xd1\xd9\xb9\x95\x2b\x61\xd2\xcc\xce\xad\x5c\x6d\x23\xb1\x18\xe6\x6f\x50\x73\x29\x74\xc8\x2c\xab\xa2\xec\xbe\xc8\x1d\x3d\xe6\xf1\x64\x3f\xf9\x77\x1b\x39\x0b\xcf\xce\x6d\xfc\x68\xfb\xa0\xf7\xb8\x75\x8e\xcb\xbb\xfc\x91\xb0\x4b\xc8\xbd\x35\x32\x76\x57\x92\x21\x32\xc7\xdf\x7b\xec\x5b\x70\xf2\xbd\xb5\xc7\x23\xb9\xf7\x25\x2d\x6d\x47\xa5\xfb\x6f\x3d\x35\x3f\x8e\x40\x00\x85\x11\x59\x9f\xec\xac\x0f\xa3\x48\x4d\x2d\x04\x6e\x27\x3b\x9d\xce\x0b\xd8\x78\x6e\x3d\x35\x8f\x69\xa4\x60\x22\x03\xb5\xd3\xec\xe5\xb7\x9e\x9a\xc7\x8a\x44\xb6\x55\x83\xbf\x6c\xd6\x44\x37\x4d\xba\x49\xc6\xa3\x6c\x52\x8a\x3f\x2b\xef\x2a\xc9\x1a\x9d\xc0\xf1\xb6\x13\x52\xaf\x4c\x4e\x43\x1c\x70\xe0\x7d\xd1\x14\x11\x90\x62\xf3\xb5\x3d\xec\xe8\xd0\xbb\x95\xac\xb5\xc5\xc3\x16\xdd\xc4\x90\xc4\x9b\x37\x8d\x91\x2f\x78\x3a\xf3\xe6\xb7\x3c\x76\xcf\x71\x15\x6c\x38\xac\x94\x0b\xc3\x1d\x62\x95\x5e\x5c\xde\x7c\xfe\x5f\xe5\xb8\xec\xf8\x9f\xb6\xe1\x7f\xfa\x7d\x3b\x8a\xfd\x37\xb7\x99\xdf\x63\xc5\xb0\x3b\x09\x3e\x0f\x65\xe7\x76\xdc\xb9\xdb\x8e\xdd\xe6\xeb\xfb\xec\x9e\xe4\x3f\xb1\x0a\x88\x77\xa8\xdc\x2a\x7b\xee\x3e\x39\x46\x1a\xd6\xfa\x0e\xc2\xac\x29\x15\xd3\x7e\x11\xf0\xa3\xd6\xd5\x7c\x14\x36\x31\xf5\xf0\x25\x63\xec\xea\x81\xab\xb6\x05\xbd\xc8\x4f\x30\x8b\x32\x93\xcf\x65\x95\x82\xf0\x0b\xbb\xc9\x59\x2d\x07\x97\xd8\x8f\x1e\xaf\x7e\xd5\x16\x51\x4f\x24\x87\xc9\xb5\x43\x45\xd4\x3a\x5d\xd8\x11\x52\xdb\x10\x52\x4b\x96\x8c\xba\x6d\x7b\x22\xea\x47\x87\x48\xa8\x1d\xb1\xb1\x6d\xb1\x11\xad\x2f\x36\x00\xc7\x57\x89\x8d\x62\xc1\x94\x9c\xfd\x03\x8b\xbc\x32\xb1\xe3\x23\xbb\xc8\x15\xeb\xe4\x79\x1f\x8b\x92\x55\x04\xad\x2d\x70\x3a\x9e\xb3\x8b\x1d\xae\xb8\x6e\x45\x37\x5a\xd6\x09\xc4\x95\x29\x1e\x77\x7d\x60\x1f\x1a\x2d\x71\x7c\x2c\xaa\xbf\x17\x2b\x59\x3e\xd6\xb5\xff\xad\x05\x5d\x11\x19\xeb\xf5\x12\x3b\xb3\x88\x7f\xaa\x6a\x7a\x71\x78\x97\x9c\x17\x49\x2c\x26\x56\x93\x34\xa8\x5b\x56\xed\x2d\x02\x65\xb4\x4c\xd0\xbd\xec\x15\xfe\xb9\xb8\x0e\x03\x08\x08\x0f\x29\x39\x2a\x81\x33\xaa\x13\xcf\xa7\x15\x84\xc6\xf5\xac\x51\x20\x83\x43\x8c\xa1\x72\x6e\x97\x6a\xa8\x2c\xe4\xbd\xd5\x60\x28\xaf\xd9\x86\xa5\xfd\x19\x8b\x83\x05\x2e\x5e\x20\x5e\x98\x06\xf9\xf8\x88\x43\x01\x9b\x2e\xf1\xa6\xac\x79\x4a\x05\xaf\x84\x49\x0c\xa8\xc7\xaf\x1c\x61\xb7\x95\xae\x59\xb3\xb4\x9d\xac\x02\x60\x37\x9c\xfc\xe8\x34\x3a\x43\x4e\x27\x91\xc0\xba\xd4\x7b\x02\x41\x41\x20\x84\xb0\x78\xc4\x99\xc5\xef\xae\x91\x5f\xae\x91\x1f\x6e\x16\xb7\xe7\x85\x3c\x5c\x24\x69\xe6\xbf\xbc\xc6\xfe\x93\x37\x5d\x71\xc7\xc4\xc5\xe8\x1d\x37\x33\x77\xd0\x9a\x54\x46\x29\x6b\xc9\xfd\xce\x69\x25\xe4\xd6\x14\xe1\x97\xa9\x22\x1d\x6b\x81\x8f\xc0\xc4\xb6\xe8\x52\xe1\x88\x0d\x87\xef\xb8\xdc\x9b\x7d\x99\x9d\x3a\x64\x6a\xe6\x41\x20\x02\x7b\xfb\x9e\x21\x47\xc8\x93\xb7\xb2\x75\xf0\x25\x11\xe9\x9e\x93\x4f\x8e\x90\x7d\x83\x1f\x0f\xe5\xcd\xce\x17\xfc\x97\xf0\x05\xbf\xe7\x1c\xc8\xdd\xd7\x14\x08\x5f\xe3\x34\x5f\x3d\xaa\xa1\xd1\xfd\xcf\x10\xf6\x31\x62\x5f\xb1\x71\x3d\xe5\x30\xd2\x02\x46\x9d\x86\x16\xb7\x9b\xda\xb4\xb2\x06\x21\x0b\x89\x8a\x1b\x0e\xb3\x3a\xed\xa0\x91\x15\xad\x4b\x1a\xbc\x39\xa3\x4e\x15\x3c\x03\xcb\x39\x08\xc2\x30\xa6\xfd\xa4\x97\x9a\x82\xeb\xb6\x06\x92\xa7\xbd\xa6\x42\xee\x3e\xa5\xc2\x78\xd5\xe3\x46\x71\xc0\x67\x1a\xf4\xa9\x49\x0f\xdd\x16\x3a\xfc\x19\x03\xe3\x44\x8a\xcf\xaf\xf0\x34\x4c\x7a\x99\x0e\x27\x06\x5d\xac\x41\xc8\xe4\x24\x9d\xd5\x25\x75\x79\xf3\x2c\x5f\x16\x08\xbe\x4f\x4f\xf4\xa7\xe6\x66\x55\xa5\x58\x07\xbd\x87\xec\xd1\x63\xb8\xd0\xef\x0a\xa9\x72\xd1\xc5\x3b\xb3\x24\x3e\xcc\xea\x61\x1c\x85\xb1\x60\x8b\x64\xcf\x89\xfe\x9c\x0a\x59\x56\xcf\xaa\x42\xd4\x93\x1d\x75\x9b\x2d\x92\x67\x63\x4d\xf8\x7b\xca\xaa\x65\xea\x14\xfa\x9a\x94\xfa\xac\xde\xe4\x78\x55\xbe\x08\x0d\x3f\x7a\xee\x22\x37\xdc\xf9\x66\x17\xbc\xf9\xa7\x70\xa9\xac\x86\xa9\x8a\x0e\xb8\x65\xfe\xd4\x49\x5c\x1f\x51\x92\x9c\x05\x53\x2e\xe6\x33\x44\xe1\x59\x9c\x5d\x87\x65\xe9\x4c\x2a\x52\xec\x30\xb3\xfa\xc9\xea\x64\x0f\x2b\xf4\x5b\x76\x98\xad\x1c\x84\x6b\xa6\xb5\xf0\xa6\x79\x55\x35\x59\x3e\xb2\xc7\xb4\xeb\x30\x6b\x25\x89\xbc\xf4\xec\xba\x6c\xe1\x7c\x42\x57\xd1\x6f\x0b\xc8\xe3\x37\xd2\x19\xd1\x4c\x02\x41\x5b\x61\x9a\xe5\xc8\x3b\x2c\xfb\x45\x93\x94\xf6\x79\x07\xb0\xa6\x7a\x71\x87\xa7\x59\x9b\x47\x26\x59\x80\x47\xa0\xb9\x02\x0e\x08\x88\x3a\x67\xaa\xdb\x9f\xaa\x41\x17\x64\x6d\x00\x6b\xae\x70\x02\xf9\x2a\x0e\x89\x42\x89\xcb\x13\x80\x76\x5e\xea\xe5\x60\xbe\xee\xc5\xf2\xb3\x0b\x85\x83\x19\x8b\x73\xf2\x6b\x8b\x2e\x6c\xe4\x09\x6d\x26\xdd\xbe\x46\xdb\xec\x9e\x5d\x9e\x44\x85\x5c\x0e\xce\x78\x21\x73\x07\xd6\xd0\x02\x62\x76\xc2\xfe\xaf\x26\xd5\xbe\x4c\x87\xbd\x81\x96\x88\xa1\xea\x45\x69\xb4\xd5\x8b\x55\xca\x60\x91\x9e\x60\xc9\x58\x6c\x65\xf1\x79\xb1\x17\x72\x15\xdb\x93\xab\x4e\xf3\x5e\x1a\x23\x8a\x44\xd1\x3c\x9d\x53\xaa\xce\x84\x72\xb2\xa1\x28\x90\x85\x98\x87\xe1\x51\x3d\x86\xfb\x17\x4e\xcd\x9c\x3a\x2c\x8f\x75\x50\x80\xd4\x12\x0b\xb7\x5d\x91\x08\x80\x67\x97\x1e\xaa\x6f\xba\x60\x33\xed\x95\x5a\x57\x20\x5d\xa0\xc4\x36\x3e\x4c\xd5\x02\x11\x34\xc6\x6d\x6c\xa6\xfb\xf6\x90\x6b\x6d\xab\x9e\x48\x73\x34\xf8\x0a\x80\xd5\x9b\x2e\x7e\xcf\x87\xcb\xb2\xb3\xa7\xf1\x70\x0a\xe7\xff\xdf\x1d\x63\xa7\xd7\x7c\xa2\x12\x8c\x68\xd8\x0b\x1a\x9e\xa8\xd2\x36\xf0\x8d\x9d\xa4\x80\x6d\x27\x05\xbc\xd6\x58\x99\x5f\xe6\xb1\xc3\x61\x81\x09\xb7\xc9\x0f\x64\x2b\x00\xc7\xc8\x0c\x39\x32\xdc\x48\xbc\xd1\xe9\xb4\x63\x8b\xd9\x86\x2d\x66\xd6\xb2\xc5\x6c\x06\xb5\x6d\xd0\x16\xf3\xa0\xb7\xb2\xbe\xc5\x60\xde\xbf\xd5\x58\x0c\x9c\x0f\x5c\xce\x10\x58\x4b\x32\x94\xad\x8f\x5f\xdb\xe3\x28\xf1\xc3\xa2\xcb\x41\xec\x7c\x78\x0f\xbb\xae\xea\x86\x8b\x88\x52\x7e\x82\xc2\x84\xaf\x76\xc1\xdc\x37\xb6\x23\x5d\xb6\x2b\x5d\x7e\xd5\x48\x97\x5f\xf6\x98\xd0\xc6\xdb\x81\x2c\xe1\xcd\xbb\xaf\x86\x87\x7f\xdb\x82\xe8\x2a\x72\x88\x1c\xdc\x34\x55\xdf\x8e\xdc\xd9\x71\x54\x15\x9d\x7b\xd0\x7b\xc6\xfa\xe2\xef\x3a\xff\x9a\x8d\x10\x1e\x56\x09\xb9\x6f\xef\x71\x12\xaa\xf5\xa4\x54\x4e\xf7\xa9\x26\xe4\x24\x83\x88\xfb\xd4\x1e\x76\xf5\xe0\x65\x57\xc0\xb9\xf7\xd7\xd4\x9e\x5e\xb5\x23\xdf\xb6\x2d\xdf\x3e\x6d\xe4\xdb\xc7\x3c\xb6\x7a\xbc\xf2\x23\x6c\x58\xb8\xe5\x3c\x3b\x0b\x22\x0e\xb0\x92\xc4\x84\x4a\x9e\xc0\x00\x23\xeb\xba\x4a\xdb\x98\xe0\x58\xfe\xa4\x2d\xf1\x0e\x91\x83\x6b\xe4\x08\x56\x4f\xae\x1d\x79\xb7\x23\xef\x6c\x79\xf7\xb4\xf5\xe5\xdd\x35\xfe\x55\x55\xf2\x6e\x50\x3a\x95\xa5\xdd\xc7\xc6\xc8\xb1\xad\xa6\x82\x4e\xcd\xcd\xaa\x0a\x40\x1c\xfe\xcf\x4b\xd8\xa4\x7b\xc9\x15\x85\xc5\xbd\x75\x30\x6e\x77\x5f\x40\x65\x79\x47\xa4\x6e\x57\xa4\xde\xa9\x25\x2a\xb7\xe5\xda\x02\x39\x4d\xe6\x2a\xbf\xcb\x36\xa6\xd1\x8e\xe0\xdb\x0e\x8c\x51\xb2\xbe\x98\x38\xee\xdf\x52\x20\x07\x96\x3e\x47\xe9\x60\xe8\xae\xe4\x4a\x57\xf2\x37\x7d\x72\x66\xab\xa4\xa9\x25\x36\xd8\x19\x33\x8f\x80\x31\xfa\x7e\x9f\xcd\xad\xf5\x40\xc9\x1d\xa4\x20\xc5\x56\xb9\x22\x42\x09\x53\x6b\x3a\x24\x94\x77\xbb\x82\xa7\xe7\xbd\x5d\xd0\xf3\xf3\x1e\xc0\x5a\x4a\x1d\x2c\x6b\x26\x5d\x71\xde\x1b\xd3\x0e\x07\x47\x0c\xbd\xf0\x51\xe4\x83\x1e\xc1\x67\xfd\x77\x7a\x24\xb8\x30\x6c\xb1\x43\x3b\x0e\x29\xeb\xec\x08\xd4\x67\xf1\x1d\x59\x08\xf0\x52\x42\xc8\x8f\x83\x29\xbc\x85\x77\x1a\x0a\x2c\x30\xf0\xc9\x2f\x8f\x92\x1f\x81\x79\x9f\xae\x08\x65\x71\x3c\x06\x19\x01\xfe\xcb\x46\xd9\x73\x46\x2b\x6f\x39\x98\x6d\xdc\x58\x46\x31\x93\x40\x99\x5d\x0d\x85\xb3\xe1\x59\xd4\x36\x52\xc5\x69\xab\xc4\x66\x91\xc4\xae\xab\x0a\x30\x9b\x42\x1d\x0e\x61\xdd\x26\x06\xb3\x80\x16\xe2\xb9\x0e\xfd\xab\x17\x08\xd7\xd0\x65\x30\x99\xb6\x74\x33\xb3\x30\x10\xd6\x03\xa9\xa0\x3c\x5a\xe5\xfd\xac\xa8\xac\x9c\xfb\x14\x38\xd8\xc1\x2d\xbe\x92\xa4\xe8\xb9\xc3\x64\xec\xc5\x73\x13\xfa\xd5\x09\x65\xb8\x9d\xc0\xda\x16\x41\x64\xa7\x3d\x70\x25\x2d\xca\x6e\x37\xf4\x54\x79\xfa\xe3\x9f\xd9\xc0\xee\x36\x4a\x44\xbf\x8b\x0d\x08\xa8\x5d\x57\xaf\xe4\x10\x3a\x6f\x2f\x58\xde\x0d\x27\xf1\x63\x4e\x18\x49\x5c\xbe\x30\x61\xcf\xc1\xcb\x15\x41\x30\x2c\xf2\x5e\xa6\xbb\x21\xaf\xb8\x1d\x81\xb9\xa2\xa2\x26\xdc\xec\xf2\xd7\x79\x04\x97\x81\xff\x0a\x8f\xdd\xeb\xc1\x9f\x95\x14\xe2\xc2\x30\xea\x95\xe6\x1b\x24\xe1\xa0\x53\x72\x82\x26\x69\x91\x5f\x3e\x01\x85\x05\x0d\x3a\x15\x45\xc9\xaa\xd9\xc5\xe0\x93\x2d\x2a\x37\xe6\x22\x7c\xe1\x45\x03\xd7\x10\x2c\x3a\x31\x0a\xdf\xdd\x4d\xcc\xe2\xf4\xff\x7c\x37\xfb\xce\x2e\xfd\xab\x0c\x7a\xaf\x49\x6b\xcd\x7d\xe5\x48\x1d\xd2\xe8\x06\xd5\x0a\x00\xae\x25\xd9\x26\xed\x9a\x6c\x26\x9d\x6e\x4f\x39\x67\x93\x34\xc0\x9c\x55\x5c\x01\x6a\x32\x9b\x4a\xe4\x6b\xb2\x05\x38\xb3\x64\xfd\x45\x74\x8e\x06\x09\xd2\xee\x0c\xb5\x87\x85\x19\x65\xb0\x45\x46\xe1\x59\xc1\x00\x30\x08\xdc\x00\x59\x92\xe6\x94\x2f\x25\x2b\x40\x03\x6f\x3f\x53\x2a\x21\xab\x5b\xcb\x11\xda\x27\x02\x1a\x89\x73\x61\x33\x59\x4e\x79\xb7\x8d\xc9\x06\x0d\xca\x9e\x32\x50\x42\x86\x09\x20\x98\x67\xc0\x29\x5b\x61\xca\xb1\x0c\xb9\x3c\x89\xfa\x4c\x00\xa7\xa6\x11\x27\x30\x57\xf7\xce\x24\xd5\x65\x8c\xab\x57\x12\x70\x6d\x41\x5e\x03\x46\xb7\x40\xef\x18\xe4\x19\x32\x39\x0f\x98\xdc\x25\x98\x02\x51\x55\x89\x62\x76\xa1\x61\x6c\x15\x0a\x0e\xa2\x0c\x93\x8a\x32\x64\x2c\x42\x6f\xd8\x52\x9f\xde\x34\x45\x9f\x84\x14\x51\x4f\xc2\x34\x46\xba\x1f\x3d\x2f\x37\x4d\xa1\x76\xa0\xc7\x47\x25\x79\xd0\xac\xd7\x6a\x85\xe7\x68\xd6\x93\x83\x94\xe1\xab\x49\x8a\xef\x8e\x9b\xac\x28\x00\xae\x04\x14\x0e\x68\xb9\xd3\x49\xd5\x47\xa7\x8d\x0d\x3a\x15\x6b\xba\x3a\xdd\x46\xc3\x4b\xae\xc6\x57\x6e\x8e\x07\xea\x74\xe5\x8a\x3a\x5d\x39\x28\xff\x0f\x5b\x25\xfc\x3a\x20\xff\x3a\x54\xa7\x2b\x87\x60\xf7\x94\x97\xae\x80\x16\xe1\x73\xf0\xe7\x15\x75\xda\x4a\x92\x83\xf8\xdf\x03\x4e\x4a\xc2\x9d\xa4\x4d\x5a\x17\x79\x07\x52\x4b\x82\xfc\x92\x07\x08\x05\xaa\x53\xfe\xab\x3d\x72\xc7\x45\xa9\x79\xda\xd4\xc1\x0e\x59\x7e\x3f\xad\x92\x5b\x97\x94\xa8\x2e\xf6\xbc\xe9\xd3\x33\x0d\xf2\x4d\x8f\xe0\x96\xee\xff\x91\xc7\x3e\xef\xc1\x9f\x5a\x2e\x18\x02\xeb\x75\x05\xc1\xc2\xe0\x16\xaa\x98\xb9\x14\x69\x4d\x2c\x45\xc0\xa2\xd4\xc4\xb2\xc9\x27\x42\x99\x4f\x9a\x6c\x34\x1a\x8b\x0d\x7a\xc2\xcd\x62\xb1\x31\x6a\x86\x8d\x31\xdd\xaf\xf6\x4c\x80\x34\x5e\x7c\x22\x88\x9f\x06\xd2\xed\x3e\xa9\xa1\x8a\x5f\x1c\x77\xa4\xe0\x5f\xee\x26\x4f\x74\x31\x5b\x93\xac\xc9\xa3\x30\x5e\x6e\xac\x5c\x01\x13\xac\xc8\xa9\xd5\x35\x9e\x10\x79\x1a\x36\x15\xb8\xcb\x3b\x76\xb3\x3f\x1c\x59\xf3\x11\x4b\xe4\xab\xf8\x1a\x59\x83\x40\x38\x38\x23\xe9\x3b\xf0\x86\xda\x90\xf3\xc4\xca\xdb\xab\xcb\x85\xe6\xe8\x05\x46\x65\x96\xcb\x0d\x59\x7b\xea\x5a\x6d\x03\xb0\x5a\xde\x6c\x63\x6a\xa5\xf2\xa8\x6a\x0c\x0c\xa8\x57\xb1\x42\xef\x87\xfc\xf0\xe9\xb9\x33\x90\x07\x28\x3a\x49\xda\x1f\x07\x1e\x61\xc3\x75\x69\x82\x5e\x56\x84\x54\x26\xa4\x00\x5f\x16\x8a\xc8\xdb\x4a\xf8\xc3\x75\x5e\x64\xc5\x60\xf9\x0d\x4a\xe7\xa5\x90\xc0\x7e\xe1\x57\x5f\xea\x85\x11\x3a\x7f\x4b\xfd\x8b\x55\xa2\x9a\x42\x06\xa1\xea\x13\x28\x41\x88\x69\xab\x89\x9a\x6e\x49\x66\x27\xc2\x02\xb1\x5b\xda\xe1\x91\x54\x80\x26\x64\x9f\x75\x85\x1a\x76\x45\x50\xd6\x4d\x82\x8c\x51\x43\xac\x0c\x70\x31\x49\x2c\x28\xc3\xa6\x32\x8c\x88\x28\x54\xab\x4c\xe4\x9a\xfe\xe6\xbc\xb7\x1b\x1f\x3a\xef\xed\x31\xf6\x29\x17\x76\xa4\x46\x96\x48\x71\xcf\x3f\xc3\x6e\x76\xd2\x07\xcb\xb3\x77\x00\xae\xa1\xa0\x1e\x15\xa6\xe3\x58\xa7\x3d\x51\x8f\x28\xec\xa6\xc3\x6c\x22\x1e\x82\xdd\x64\xd3\x19\xc1\x04\x29\xc7\x50\x3e\x9b\xa8\xce\xf8\x19\xb9\x6e\x2d\xdc\xe2\x81\x35\x80\xf3\x79\x01\x87\xeb\x6a\x35\x83\x5c\x56\x56\x75\xb1\xc8\x20\x93\x17\x91\xff\x10\x3f\x09\xf9\xef\xbe\x03\xb0\xa8\x0d\x74\x00\xdf\x85\xa1\x9d\x1f\xf5\xd9\x0d\xc5\x4f\xcc\xb3\xac\x82\xa0\xa9\x82\x63\x8d\x07\x28\xc0\xfe\xea\x51\xe4\x6d\xa3\x64\x8f\xc9\xac\xf3\x7f\x7e\x94\x3d\x7f\x44\x1b\x2e\x8b\x84\xbb\x54\xae\x17\x3d\x9b\x74\x9a\x79\x83\xde\xda\x13\x69\xa8\xe3\x26\x9b\x51\xd2\x0b\x90\xdb\x38\x10\x29\x90\x4c\x9b\x49\xb8\x69\x87\x0e\xb2\x77\x43\x96\xd6\xe5\x45\x33\x4e\x26\xb9\x38\x3c\xa0\x5e\x37\x23\x58\x5b\x3c\xa3\x90\xcc\xa6\x30\x5b\xd5\x38\xc0\x25\x7a\x56\xf4\x35\x8b\x3e\x6c\xcf\xcd\x10\x91\x3a\x74\x14\xae\x4a\xa6\xe5\x31\xc6\xa9\x60\x5c\x4b\x33\x49\xd3\x1e\x06\xfc\xc0\xf9\x21\x44\xac\x45\x59\x60\x20\xcf\xe1\x90\x23\xa5\xd7\x84\xe2\xea\x80\x78\x31\x4e\x5b\xbd\x28\x9a\x48\x05\xe4\xae\x77\xe4\x67\x80\x64\xbb\x42\x35\x3f\x3c\x39\xd9\x4d\xf5\x91\xfc\x9a\xeb\x0e\x5d\x77\x50\x01\x2d\xe8\x2d\xde\xd9\x7d\x0f\x90\x06\xa9\xaf\x6b\xc5\x95\xb3\x62\x0a\x87\xea\xa2\x60\xe3\xfc\x92\x8b\x1c\xf4\xea\x1a\x7b\x8e\x37\x5d\x44\x5d\x87\x00\xf4\x08\x2d\x06\xba\xee\x32\xb9\x18\xe4\xd3\x17\xef\x6f\x6b\x46\x98\x62\xec\x51\xba\x82\x1c\x20\x8d\x0d\x8d\xd2\xc5\xc5\x10\x7a\x85\x47\x76\xa3\x75\xde\x7f\x81\xb7\x01\x7f\xa3\x6a\x51\x2b\x5c\x56\x30\xe7\xb7\x0e\x90\x1c\xb6\xc2\xe5\x81\xcc\x4a\x18\xcf\x95\x90\xa3\x7a\xd1\x8f\x79\x27\x6c\x1a\x2c\x10\xf5\x8e\xa1\x22\xcd\xc9\x0f\x05\xc0\x72\xa8\xa3\xf4\x33\x9f\x93\x2b\x37\xd4\xb4\x19\xf7\x3d\x36\x6e\xfe\x94\x0d\xc4\x52\x0b\xaa\x51\x95\x9d\x2c\x5f\x6c\x90\x7b\x3d\xf2\x43\xca\x0d\x3b\x95\xe7\xbc\xd9\x16\x81\xdf\x65\x0d\x2d\x5e\xd4\x2d\x45\xc9\x23\x4f\xd2\xea\x21\x47\xc8\x6c\xde\x9d\xa1\xeb\x52\xee\xdb\xdf\xf1\xc8\x5e\x95\x6c\x2b\x45\x83\xff\x61\x8f\x2d\x4f\x15\xbf\x87\xb1\xd8\x5a\x32\xd3\x6a\xa1\xd9\x55\x01\x67\xd8\xb0\xa2\x1a\x08\x35\x30\xb3\x4e\xf3\x2e\x6f\x86\x79\xff\x02\x73\xe4\x7d\xcd\x23\x63\x4d\x55\xb4\xff\x25\x8f\xdd\xe7\xe9\x8a\xca\x7d\x40\x22\xbf\x8a\x9e\x5c\x48\x5c\x35\xdd\x94\x0b\xdb\xc9\xbb\xc8\x6e\x84\x3c\xf0\x97\xd9\xe3\xf4\x4c\x19\xc0\x5d\xb0\x41\x3a\x36\x3f\x41\x8c\x2e\x3a\x2b\x4b\x23\xef\xf3\xc8\x98\x2c\x68\x36\x6e\x25\xfe\x5b\xbc\x0d\x14\x00\x1b\x30\x64\xe9\xcb\x77\x58\x32\x8f\x1c\x03\x61\x90\x4d\xf6\x7a\x61\x90\x61\xcc\xa4\xdc\x5a\xa2\xbe\x62\xed\x54\xd6\xb4\x2d\x7d\x04\x5b\xf4\xc9\xb7\xc8\xe7\x0d\x92\xf9\xa7\x3d\xf6\x21\x4f\x36\x47\xa1\x99\xeb\x39\xac\x90\xa8\x8c\xf4\x35\xe9\xf7\x05\x3e\xc8\x05\x69\x0e\x96\x26\x35\x62\xb3\x21\xc7\x62\x05\x30\x69\x14\xb2\x0f\xea\xae\x71\xb2\xea\xda\xc2\x1c\xad\x2b\x20\x97\xaa\x59\x35\x1b\x9f\xc9\x84\xbf\xc0\xae\x35\x3a\x08\x2c\x66\x58\x72\x5a\x5e\x84\x31\xec\xb2\xfb\x01\xb9\x51\x04\xe3\x1a\x37\x61\x40\x5c\x3c\x82\xec\xb5\x6b\x79\xd9\x6e\x72\x70\xa3\xe8\x94\x45\xea\xd1\x57\x76\xb1\x23\x6b\x3f\xa2\xa6\x67\x94\x95\x32\x83\x0a\x1c\xbd\x95\xe6\x9a\xdc\xc0\x7f\x3f\x4a\x7e\xc4\xe4\xee\x38\x4d\x7e\xb4\x4a\xb5\x71\x2e\xc6\xe4\x11\x11\xcf\xf2\xb9\x34\x59\x12\x90\x34\xf3\xcc\xad\xe7\xcc\xb0\x82\x41\x78\x15\xb0\x3a\x96\x0a\x08\x29\x95\xbf\xf3\xcf\x92\xb7\xf8\xa7\x8a\x14\xae\x13\xec\xc9\x37\x5f\xc0\x9c\xad\xbf\x2c\x70\xf6\xff\xd8\x63\xff\xc5\x3b\xa3\x54\x44\xc4\x0d\xb3\x00\x52\xb3\x76\x92\xe6\x75\x83\x9c\x0f\x07\xf5\x0c\xfc\xb5\x00\x3f\x64\x61\xf0\x4b\x15\x3f\xb3\xd1\x72\xe4\xe6\xb1\x56\x56\x17\x9d\x6d\x49\x6d\x33\x15\xdd\x24\xcd\x33\xca\x4e\x8b\x2c\xbc\x5b\xaa\xfc\x69\x2e\x02\x86\x85\x22\xce\xb7\x2c\x14\x6a\x8e\xfa\x10\x11\x5d\x0e\xa5\x42\xac\x30\xa4\x13\x95\x65\xb8\xeb\xee\x1f\x2f\xad\x46\x39\x38\x32\xc8\xf5\x80\x47\xf9\xdf\xbb\x94\xbd\xc8\x66\x37\xe6\xf4\x34\x0f\x92\x8c\x1e\x89\x92\xe6\x59\x3a\x23\xc0\xbd\x0a\xeb\x12\x1b\x29\x3b\xa6\x2d\xa5\xc8\xd4\x63\x81\x62\x9e\x3e\x32\x63\xd6\xb4\xe6\x1b\x4f\x56\x63\x91\x66\xed\xb0\x4b\x3b\x3c\xe6\xcb\xa8\x3c\x4b\x01\x32\x7f\xf4\x78\x18\xf7\xce\xd1\x54\x44\x7c\x49\xc0\x9e\x7b\xde\x1b\xeb\x24\x71\x98\x27\x29\x44\xb4\x48\x59\xee\x2c\xab\x5f\xd8\x4b\x9e\x03\x98\xba\x49\xe4\x9f\x63\x67\x21\x06\x1c\x5a\x2b\xaf\x20\x94\xb2\x41\x3e\x0d\x33\x9a\x2e\x55\xa3\x5d\x2a\x8d\x3c\x33\xbc\x7d\xd8\xe4\xc9\x74\x29\x98\x3c\x7d\x74\x6a\xe6\xc4\xd1\x46\x27\xb8\xbc\x9d\xac\x4e\xe4\xc9\x44\x2f\x13\x13\xa1\x73\x2a\xfd\xa0\x67\x81\xb3\xbe\xdd\x63\x3f\xef\x19\x3c\x56\x30\x23\xae\x6e\x11\xac\x15\xd6\x03\x02\xb6\x5e\xb0\x56\x5b\x96\xf9\xbf\xf5\xc8\x9e\x4c\x34\x53\x91\x9f\x16\x2d\xff\xcf\xbd\x0d\x20\xab\xcd\xeb\xc7\x11\xa3\x97\x7d\xc0\x33\x57\x60\x5f\xb0\xce\xe1\xbc\x97\xb7\x91\xcb\x1a\xf3\x29\xe0\x39\x58\x16\xa7\x8f\xcc\x9c\xc9\x44\x0a\x4b\x40\x9d\x25\x03\x40\x3f\x4a\xc3\x40\x64\xf2\x20\x97\xda\xfa\x96\xa2\xa3\xbf\x50\x43\x40\x9e\x87\x64\xc9\xa9\x7f\x37\xeb\x14\x13\x06\xfc\x7b\x03\x13\x86\x07\x9d\x30\xbe\x18\x53\xe6\x57\x6b\x64\x77\x2b\x5b\x90\x82\xff\x1d\x35\x76\xbe\x76\x2c\x8c\x04\x82\x02\x39\x29\x96\x36\x86\x70\x3f\xe9\x81\x03\x52\x4e\x0a\x58\x7f\x0d\xba\x10\x76\x0f\xd3\xa3\x71\xd6\x4b\x2d\x24\xb6\x56\xa9\xa8\x30\x73\x59\xfa\x0b\xf8\x1f\x24\x01\x8c\x97\x0d\x1c\xd1\x51\xd5\xa9\xc3\x94\x89\x73\xf9\x95\xac\x4e\xd9\xb9\x56\x26\xff\x89\xf3\x56\xc6\x1a\x74\xb6\xd3\x8d\xc2\x66\x28\xf5\x0e\xf0\x70\x2b\x83\xd7\x92\x50\x2f\xd0\xb0\x45\x7b\xb1\x31\xd3\x6d\x59\x17\xd5\x0a\x68\xba\xe4\x40\xb6\xa4\x04\x65\x80\x1f\xb2\x67\x14\x9f\x0e\x2e\xa9\x6f\x77\xe1\xbf\xd4\xbb\x3c\x72\x89\x9a\x92\xfe\x2f\x78\xec\x15\xde\x53\xf0\x87\x56\xc4\xba\x3c\x6f\xcb\x31\x38\x2b\xfa\x14\xae\x3b\x53\xdc\x9a\x4b\x93\x22\x6f\x4e\x36\x45\xb7\x3d\x69\x66\xf8\x85\x6f\xed\x4b\x3d\x62\x24\xa6\xff\x5c\x8f\x89\xa9\x81\xb0\xfe\x6e\x9b\xea\x27\x2e\x5c\x0b\x86\x6a\x64\xdf\xd8\x4d\x1e\xbb\x2e\x05\x7c\x57\x34\xfd\x5f\xdb\xcd\x6e\x1a\xb8\xea\x44\xcf\x04\x22\x0b\xd3\x12\x4d\x7b\x9e\x68\xfa\x76\x18\x77\x78\xd1\xb5\x7f\x7d\x7d\x17\xf9\x13\xcd\x00\xff\x65\x8f\xbd\xc0\x6b\x0f\x32\xc0\x57\x95\xfc\x2f\x8a\x00\xfe\xab\x35\xf2\x08\x70\x86\xea\x64\x4a\xff\xb3\xb5\x0d\x58\x4a\xe6\xed\x57\xd8\x6b\x6a\x4e\x11\x08\xea\x99\x25\x03\x99\x21\xad\x30\xca\x45\x9a\x61\x6a\x1b\xbc\xa2\x0e\xf8\x16\xe6\x16\x02\x67\xa9\xe4\xa7\x14\x92\xbd\xd0\xed\x07\xdd\x07\xd3\x9d\x38\xd7\x05\x9b\x9f\xc6\x06\x77\x1a\x73\x0a\x64\x93\x81\x30\x5b\xd2\xb0\x94\xe0\x81\x33\x78\x80\x3a\x6a\xe9\x18\xc0\x01\xd8\x11\x19\x2a\xa9\x75\x29\xc9\xdb\xba\x89\x53\x27\x67\xa8\xdb\xc1\xfd\x61\xcb\x75\x29\xc8\x1f\xe3\x75\x43\xcf\x00\x85\x88\xa0\x41\x7e\xc1\x23\xbb\xb1\x14\xff\xe7\x3c\x76\xae\xbc\xa6\xf4\x80\x6c\x6e\x10\x60\xff\x73\xa2\x1d\x30\x95\x10\x47\x08\xeb\xce\x10\xba\x4f\x05\xb3\xad\xb1\xcc\x7e\xef\x52\x27\xa6\x58\x41\x08\x82\xf1\xfa\x60\x63\x36\x5e\x96\x23\x8d\x78\x5f\x6f\xb9\x94\x7d\xb7\x66\x5f\x29\x5b\x16\x00\x85\x4b\x6f\x40\xea\xc1\xba\x4d\x55\xaf\xf6\x10\x75\x8b\x02\xa8\x25\x9c\x0f\x17\x43\xbc\x04\x25\x34\xdc\x45\x11\x66\x13\x01\xca\xc2\x09\xb8\xbd\x48\x21\xc8\x49\xc1\x5a\x21\x40\x81\xf6\x8f\x6b\x57\x91\x32\xca\xc4\xd4\x69\x6e\xa1\xa0\x37\x13\x88\xd5\x48\x81\x33\x12\xca\x6e\xd0\xdb\xc1\xdb\x4c\xe5\x8c\x8a\x04\x2d\xf5\x53\xcd\x8f\xb6\x66\x9e\xb4\x9a\x20\x25\x81\x8a\xc5\xa8\xd3\x58\xac\x9a\xee\x15\x26\x16\x4d\x1b\xcd\xd5\x10\x15\x53\xc7\xf8\x88\x8c\x01\xaf\x0d\x27\x61\x94\xfd\x38\x1c\x8e\x50\xfa\xc8\x4e\x24\xf6\x0e\xb9\xcd\xc3\x18\xc3\xfc\x07\x76\x0c\xf3\x6f\x6f\x9b\x22\xfb\xde\x32\x45\xf6\xbe\xec\xa1\x8b\x64\xd6\x15\x91\xaf\x69\x92\xd4\xaf\xac\x81\x30\xb2\x86\x68\x04\xf6\x9c\x37\x78\xf3\x16\x7d\x8e\x56\x0c\x1c\x28\x4e\xfb\x9d\x8b\xde\x3b\xd9\x21\x80\x7c\x47\x0b\xc9\x83\x5e\x73\xfd\x20\xcc\x27\xfb\x4f\x32\x41\x98\x56\x77\x4b\xf1\x97\x76\x37\xca\x41\xdb\xef\xda\xe3\x30\x47\xf2\x6e\x37\xd3\x56\x55\x84\x0e\x3b\x2d\x56\x80\x10\x17\xe2\xb2\xff\x71\x8c\x9d\xa9\xbe\x85\x2b\xd2\x2c\x37\x65\xe4\x05\xf6\x41\x13\xda\x32\xf8\xe6\xda\xd1\xdb\xef\xbe\x84\xac\xea\x88\xe1\x98\xed\x9f\xd5\x09\xac\x76\xb0\xd6\x60\x99\x4e\x96\xd8\x35\xe4\x2a\x72\x68\xb8\xeb\x77\x68\x6f\x77\x64\xce\x36\x64\xce\xaf\xda\x32\xe7\x6d\xdb\xcc\x9b\x58\x7e\xa8\x84\xca\xce\x36\xbd\x5d\xf4\xb1\xc5\xf5\x05\xd6\x0d\xfe\xf5\x56\xd4\x78\x37\x2b\x92\x87\x2b\xa5\x4a\x59\x5a\xbd\xfa\x52\x87\x93\xaa\x4c\x39\xdd\x00\xea\xef\x0a\x62\x70\xff\x7f\xec\x65\x7f\xe1\x0d\xbb\x4b\x9b\x6d\xd1\x3c\x5b\x44\xa8\x26\x29\x68\xeb\x2a\x14\x3c\x49\x55\xf4\x97\x4d\x20\xcf\x63\x45\x42\x4d\x81\x46\x1e\xe3\x3e\x2c\xae\xaa\x9b\xf9\x0a\xca\xbe\x82\x22\x09\x43\x58\xad\x08\x28\x7e\x56\x64\x40\xf2\xd8\x83\x13\x44\x16\x0a\xe0\x6b\x5f\x4e\x79\x9c\x0f\xbe\x87\xa7\x4b\x85\x55\xaf\xe1\xc8\x6d\xcc\x1e\xe8\x04\x9a\x52\x07\x39\x16\xbf\x75\x81\x69\xca\xff\x97\xde\x81\xbf\xe5\x91\xa9\x0d\x53\x82\x37\x2a\x06\x1f\xf6\xe2\xb7\x78\x5b\xe6\x27\xa7\xa0\x9d\x37\x8a\xf1\xd2\x67\x3a\x71\x57\x8f\x47\xc6\xfb\x6c\x6e\xf7\x93\x1e\xed\xf0\x40\x38\x85\xf2\x65\x1e\xc6\x59\xde\xa0\xf2\xa4\x06\x30\xcf\x75\x15\x21\xa2\x54\x7b\x79\x36\x7c\x7b\x41\x84\xfe\x7a\x8f\x4c\x6f\xaf\xd7\x18\x1d\x70\x66\xb3\x24\xe8\x76\x10\xb5\x6e\x7c\x88\x18\xda\xab\x22\x50\x33\x77\x47\x94\xed\x9c\x38\x1e\xc6\xe4\xa1\x74\xfd\x6d\xe0\x94\x7f\x62\x62\x43\x84\xe7\xc3\x44\x76\x79\x63\x78\xc5\xa3\x9c\x58\xbb\x25\x88\xd0\x5a\x39\xd8\xb8\x25\x59\xd2\xb4\x6f\x8f\x64\x07\xcc\xaf\x01\x4b\x88\x15\x60\xa7\xbc\x5a\xb7\x24\x4b\xee\x49\xfe\xbd\x8f\x24\x5f\x1d\x21\x8f\x6c\x26\x9d\x6e\x24\x72\x11\xcc\xc6\x81\x38\x27\x32\xff\x33\x23\xec\x63\x23\xd3\xa5\xab\x16\xd8\xbd\x79\x41\x2e\x63\xb8\x07\x01\x68\x0d\x10\x5b\xea\x66\x98\xc4\x27\x92\x40\xd0\x1b\x28\xc3\x02\x02\x86\x1b\x4b\x2e\xce\x81\x2b\xa5\xc3\x15\xf0\x91\x2e\x83\xa7\xd6\x04\xc4\x80\xb9\x40\x34\xc3\x0e\x8f\xa8\x22\x70\xcc\x68\x26\xba\x3c\xd5\x64\x04\xf2\x73\xf3\x4c\xe1\x30\x41\xf0\x7c\x39\xe7\x20\x8c\x9b\xa9\xdc\x86\x80\xd3\x21\x10\xa9\x7c\x36\x15\x05\x6f\x46\x33\x89\x33\xd1\xec\xe5\xe1\x8a\x5b\x82\xec\x83\xb2\xef\xc9\xf5\x6c\xb7\x4a\x89\x32\x8c\xbf\x87\xe8\x5d\x9e\xe5\x54\x44\xe8\x0a\x2c\xe8\x33\x43\x91\xd5\xdd\xe6\x72\xda\xee\x77\xdb\x22\x46\x93\x9f\xb2\x54\xd7\x35\x83\xde\xe0\x98\xca\x86\x1c\xac\xd3\x43\x75\x7a\x65\x9d\x5e\x05\x75\x5d\x03\x56\xb6\x7e\xd5\x50\xb1\x83\xf5\x43\x13\x57\xd5\xaf\x61\x8e\x07\xf5\x37\x6b\xe4\xb2\xe2\x83\x80\x5f\xfc\x7d\xb5\xad\x3b\xc6\xbf\x6b\x3b\x57\x0b\xca\x6e\xd9\xfe\x3b\x93\x25\xe0\x8b\x30\xfd\x68\xd0\xd9\x5c\x07\x35\x1a\xce\x1d\xed\x71\xc9\x04\x44\x11\x2b\x8c\xaf\x09\x15\x8d\x8c\xf9\x24\xbc\x99\x26\x59\xf1\xa9\xb5\xab\x07\x82\xf2\xb0\x48\xbb\xeb\x61\x4c\x4f\x1f\x9b\x06\xfe\x2a\xa5\xc6\x68\x8e\x98\x33\x0b\xd3\x9a\xf6\x42\x0f\x00\xb6\x59\xb3\x3b\xc9\x46\x38\xed\x97\x43\x92\xb5\xc1\xdd\xab\x59\xd6\xa2\x7e\x83\x7c\x78\x97\x13\x59\xf8\xf6\x5d\xec\xdb\x23\x0b\xc5\x0e\x60\x71\x55\x43\x38\x0b\xcf\x75\x8a\x8d\xd9\x5e\xf6\x65\xee\x7a\x34\xa6\xbe\x5b\x64\xad\x3c\x8c\xb2\x3a\xf2\xb9\x94\xc0\x49\x33\x8b\x99\x0d\xfc\x61\x0c\xf9\x42\x98\x02\xc3\xc2\xb8\x83\xb4\xe7\x16\x08\x4e\xb3\xac\x2b\xe2\x40\x04\x1b\x2d\x77\x5e\xbf\x30\x50\xf4\xf5\xc5\x18\xa9\xc2\x53\x91\xf5\x3a\xda\xe2\x6b\x03\xb7\xda\xbc\x52\xda\xae\x08\xb4\x47\xca\x0f\xec\xb6\xd1\xcc\x94\x8d\xb6\x51\x4b\xa4\x8a\xde\x5f\x10\x3a\x81\x3b\x93\xa5\x6c\x22\xed\xc5\x13\x79\x32\x51\xcc\x99\xc9\x8d\x07\x71\xda\x32\xfa\xa2\x06\x71\x3e\xe8\x5d\x4e\xfe\x95\xf3\xa4\x94\x79\x13\x10\x8b\xb3\xc7\xbf\x84\xe7\x49\x27\x6c\x12\x72\x82\xec\x6e\xc1\x7c\xf1\xa7\x35\x65\x6f\xa3\x10\x96\x8a\x42\x53\xe7\x2f\x42\x0c\xb5\xd4\xca\x21\x64\x0a\xe7\x99\x4b\x9f\xfb\xcd\x9a\x4d\x3a\xf7\xc0\x36\xe4\xc8\xeb\x6a\x6b\xca\x91\xe2\xa3\x18\xae\x42\x45\xd3\x83\x27\x90\x3b\x93\xa5\x81\xc9\xa4\x40\xe2\x34\x97\xab\x9e\xce\xb8\xde\xea\x25\xe6\x15\xf0\x5c\x08\xa9\x9d\xe5\x8a\xec\x14\xe5\x39\x8a\x87\xdc\x9a\xe4\xe5\x6c\x49\xd9\xe8\x9c\x8a\x15\x91\xf6\x15\x4d\x5e\x69\x55\xa0\xd6\x83\xf5\x63\x62\xd4\x66\x65\x16\x99\x27\x7b\x32\x4d\x39\xe4\x1f\xd3\x5f\xee\xe0\x06\xbf\x9c\x61\x2b\x72\x3f\xde\x34\xd9\x2d\x8f\x77\x2b\xc2\xbf\x4e\x97\xf8\x58\xb7\x44\xbc\x1d\xf5\x4d\xb0\xac\xac\xc2\x2d\xe4\x33\x7b\xc9\x81\x0a\x57\xdc\x4d\xd3\x47\x8b\x80\x9c\x99\x30\x3b\xeb\x04\xe5\xfc\xec\x5e\xf6\xa9\x9a\x13\x94\x53\x3c\x4c\xe5\xd3\x4e\xda\xc3\x4d\x49\xb2\x1c\x09\x3a\xad\xd2\x0c\x8f\xc6\xcb\x61\x2c\x1a\x84\x4c\xd1\x9b\xa6\x8f\xd2\xb9\x19\x3c\x0c\x89\x73\x21\x1c\x89\x60\xdb\x80\x80\x02\x95\x36\xcb\x8b\x60\x4c\x14\xfe\x81\xac\x00\xde\x01\x7f\xe0\x92\x70\xe8\x7e\x65\x99\xdd\x34\x01\x8d\x56\x7e\x85\xbb\xa5\x24\x52\xcc\xbf\x67\x35\xa9\xa1\xa9\x5a\x1e\x97\x61\xe7\x58\x52\x95\xe2\xd6\x9b\x0a\x1e\x4c\xae\xa6\xa1\xdc\xab\xe2\xa6\x40\xfa\x28\x1e\x4c\xc0\xa3\x1d\x1e\xe3\x4c\xc9\x1a\xaa\x98\xad\x46\x13\xed\xee\x06\x03\xe4\xca\x1f\x1e\x23\x9f\x2a\xa2\x31\x3e\x5c\x63\xef\xd8\x89\xc6\xa8\x8e\xc6\x58\x6e\x8a\x22\xfa\x4c\x4e\x0a\x5b\x4d\xfa\x7e\x8d\xec\xe9\xf2\x34\xc7\x90\xca\x6f\xd7\xf4\x0a\xf9\x12\x50\xb7\x9a\x3b\x7a\xea\xac\x33\x96\xb3\x2d\x9a\x74\xc2\x3c\xd7\x5b\x64\x50\xc4\x51\xe8\x87\xe4\xe8\xa9\x52\x30\xf6\xa3\x18\x2f\xa9\x1c\xaa\x5b\xf2\xe4\x32\x99\x05\xfc\x60\x1d\xaa\xb1\xd3\xde\x8b\x36\x81\xee\xc7\x1a\x74\x3e\xec\x84\x11\x4f\x23\x45\x7c\xa7\x8a\x28\x9e\x6b\x25\xa9\x29\x10\xf2\x6c\x0f\x30\xba\x3f\x01\x94\x5e\x98\xd8\x91\xe0\x8a\x6c\xb0\x9b\xca\xef\x99\xf7\xd1\x6c\x30\x7e\x31\x86\xde\x92\x29\xbf\xe2\x11\x35\xb3\xfd\xb7\x7b\xec\xb5\x2a\xa0\xd1\x09\xc7\x9a\x9b\x71\x45\xc4\xf4\xd1\x06\x3d\xa3\xfd\xad\x76\xf8\x32\x2c\x76\xfd\xc4\xc5\x9c\x30\x9f\xb0\xa3\xe6\x3e\xe0\xb1\xb7\x3c\x34\x51\x73\xdb\x1f\x75\x8b\x37\xfd\xdf\x54\x85\x9e\xf3\x2e\x07\xd2\xfd\x50\x64\xfe\xaf\x7b\xec\x89\x53\x41\x90\xa9\x33\x50\x27\x59\x11\x19\x9d\x3b\x35\x3f\xfb\x53\xb4\x69\x3d\x87\x3b\x9e\xde\x34\x0a\x12\x3b\xf7\xac\x79\x17\xb9\x8a\x8c\xf0\x20\xf0\x1b\xcc\x9f\x0a\xe4\xce\x6c\x97\xb1\x46\x74\xc0\xb5\x64\x34\x48\x93\xae\x7f\x80\xfd\xf0\x69\x68\xc3\x86\xdf\x7c\xd1\x98\x43\xe6\x60\x76\x2b\x4c\xae\x6f\x65\x43\x82\x48\xbf\x78\x09\xbb\xdf\x0d\x22\x35\x6f\x6c\x38\x76\xb4\x78\x43\x47\x90\x06\x89\x4a\xef\x5a\x43\xf4\x27\x69\xb5\xe4\xdf\xa3\x0d\x28\xd9\x79\x6f\xb4\xcb\xf3\xb6\x33\xb2\x6f\xde\x4d\x5e\x55\x23\xbe\x79\xc6\xc0\x05\xf8\xff\xe4\xb1\xbf\xf1\x8e\x0e\x5c\xb7\x33\x0f\xf1\x02\x74\x48\x7d\xb9\xcc\x6a\xbc\x2e\xb3\x82\xbc\x4e\x99\x14\x65\x39\xba\x86\xa2\x82\xc0\x9a\xcd\xf9\x50\x6a\x7d\xa4\x7b\x9c\xbb\x6d\x7a\x53\xa1\x5c\xcb\xba\x75\x56\x40\x17\x6a\x7f\x13\x7c\xa2\x9b\x38\x31\x77\xf7\x7a\x04\xc6\xcb\xbf\x87\xc5\x73\x3c\x6f\xeb\x9e\x97\x3f\x0f\x04\xc2\x5d\xb4\x56\xfc\x81\x2d\x2c\x7e\xdb\x63\x1f\x5e\x5b\x58\x0c\x34\x0e\xb7\x45\xad\x70\x40\x14\x51\xa1\x5f\x58\xf6\xf2\xed\xc6\xde\xae\xdf\x27\x4b\x80\xbc\xcb\x23\xc5\xb4\xf4\xdf\xe0\xb1\x97\xb9\x33\x4d\x0f\xb5\xb1\xfd\x81\x40\x87\x89\xa6\x83\xdd\x8b\x8e\xe6\x49\x37\x89\x92\xe5\xfe\x45\xfb\x04\x1f\x19\x75\xd8\x31\x2c\x92\xc2\x05\xd1\xe9\xca\x83\x3c\xc4\xf1\xbd\x7c\x94\xdd\x5a\xba\x66\x81\xe9\xc0\x0e\x03\xb0\x2a\x90\x9a\xad\x2c\xc5\x70\x40\x85\xe3\x8b\x3e\x83\x80\x3c\xe4\x34\x57\xa5\xb8\x26\xb7\x91\xff\x7f\xc6\x2f\x7c\x59\x7b\x4f\x3e\xef\x91\xfd\x1b\x21\x87\x04\x27\xc9\x9b\xc0\x49\x52\xd0\x33\x1a\x34\x02\x8c\x5c\x58\x12\x6d\xbe\x12\x22\x1e\x4d\xae\x69\x34\x1f\xda\x98\x05\xf2\xe6\x47\x90\xeb\xd6\x61\x04\x9a\x53\xcc\xcb\xc7\x65\x25\xd3\x0a\xf8\x10\x3a\xe4\x7f\xfd\x52\x76\x7c\xf8\xed\x01\x9b\xad\x73\x13\x77\x13\xcd\xea\x0c\xb4\xc8\xee\x96\xfa\xdc\xbd\x3b\x6e\x91\x1d\xb7\xc8\xc3\x17\x14\xf1\x0d\x5b\x90\x3d\xb0\x6d\x41\xf6\x2a\x6f\x51\x97\x66\xd3\x42\x3d\xec\xa2\xed\xe7\x6a\x4a\xb4\xdd\x5f\x23\x4f\x19\x2a\xda\x36\x27\x18\x40\xfa\xfd\x9a\x07\xa8\x59\x45\x67\x37\x2a\x0b\x39\x65\x6a\x7e\x4c\x68\xf9\xc0\x1e\x72\xc1\x78\x6f\xcd\xf8\x8e\xbf\xe7\x91\x13\x17\x6a\x60\xd0\x8b\xfc\x36\xaf\x4c\x10\x56\x05\x18\xf1\xf0\x8f\xc2\x83\xde\x73\xbd\xf5\x7d\x83\xcf\xf2\x9f\x61\x7c\x83\xf6\x70\x14\xa0\x40\x25\x1f\xe1\xf0\xf1\xa9\x84\x1a\x7c\x7f\xcd\xc9\x1c\x1d\xf0\xd6\x9f\x4c\x62\x9d\x9d\x30\x95\x63\xaf\x45\xe6\x3f\xe8\xb1\x7b\x2a\xef\x58\xdc\xf0\x2a\x17\xca\x94\x46\x79\xf1\x94\x9b\x6c\x1e\x27\xf1\x84\x43\xa0\xac\xa4\x17\xe2\x0d\xa9\x32\x84\xe2\x0f\x6a\xf1\xa6\xab\x16\x25\xf2\x98\x07\x9a\xfa\x01\x76\xb9\xad\xa9\x9f\x39\x7d\x1c\xb3\x54\x0c\x42\x0a\x94\x6c\x8b\xa1\x43\x64\x74\x45\xa4\x4b\xfe\x13\xd8\x4f\xde\x26\xd2\xa5\x01\xd1\x71\xf3\xc2\xc2\x9c\x1c\xb4\x25\xfb\xa5\xbf\x1e\x73\xce\xbf\xc6\x72\x3f\x9d\x26\xf1\x2d\xc9\x12\x84\x06\x7e\x6e\x8c\x8d\x5b\xbf\xab\x88\x26\x9a\x69\x12\xd3\x3b\x93\xa5\x21\xe1\x7e\xef\xbf\x64\x67\x87\xde\xee\x0e\xdd\xd4\xf1\x92\x4f\x63\x3f\x19\x56\xc6\x4b\xe2\x27\x72\x53\x0c\x26\xc8\x13\xc8\xf8\xfa\x8e\x1a\xf5\xee\x8e\x1a\xb0\x0d\x35\xe0\x73\xb6\x1a\xf0\x89\x6d\xc6\x46\xfe\x87\x87\x09\x52\x5a\x57\xf4\xa0\x77\xdb\xfa\xf2\xfc\x90\x7f\xd0\xc8\x73\x98\x4a\x45\xcc\x5f\x21\x2e\xca\xf1\x1c\xdf\x78\x74\x65\x32\xf3\xf4\xfc\xec\x10\x3b\xd4\xaf\x3e\x9a\x9d\xb6\xcc\x50\xca\xbe\xa7\xc2\xe5\x32\x65\x37\xc2\xc8\x02\x8b\x1e\x6e\x7a\x7e\x56\xdb\x0d\x82\x34\x5c\x11\x29\xdd\x7f\x44\xe4\x5c\x63\xad\x8c\x9f\xf7\x76\xe3\xf5\xf3\x9e\x02\x35\xb8\x99\xc7\x41\x89\xfc\xef\xa3\x3e\x79\xcf\x08\x79\x4c\xe1\x87\x9b\xeb\x2d\x45\x61\xd6\x36\x09\xb3\xfe\x4b\x47\xb6\x90\x71\xfb\x95\xda\xf4\xd0\x12\x75\xb4\xb5\x7a\xd6\x58\x8e\x30\xf5\x56\x2d\x2b\x2b\x06\x3b\x13\x90\x8d\xbe\x22\x9c\xe0\xba\x3c\xa1\x5d\x20\xe9\x57\xe4\x86\xf3\xb3\x7a\x14\x14\x74\x65\x24\x14\x76\xa5\xbc\x35\xd0\x1a\x45\xb3\x22\x45\x61\x71\xef\x4c\xdc\x75\xee\x36\x79\x14\x65\x65\xbf\xa0\x06\x7c\x2c\xe4\xe8\x92\x40\x1b\x19\x0d\x5b\x00\xbb\x88\x1d\x09\x71\x4d\x03\xdf\xa8\xc6\xc0\xac\xec\x63\x86\x11\x2a\x79\x1b\x7c\x4e\xfa\xa1\x3a\xe4\x54\xe1\xdf\x18\x21\x22\xbb\x2b\x82\x06\x79\x2a\x51\xdf\xd5\x3f\xc5\x8e\xcc\x60\x9f\x2b\x10\xc6\x8a\xd1\xe8\x65\x1a\xe4\x2b\xd4\x96\xa6\x21\x5c\xa8\xaf\x1a\x21\x3f\x1c\x27\x81\x18\x98\x04\x3f\xa8\x6d\x61\x12\x7c\xa6\x76\xb2\xa2\xac\x87\xe3\xf3\x5b\xed\xb0\x3e\xbc\xbc\xfa\x2f\xe3\x93\xff\x8a\x47\x9c\x25\xec\xbf\xd9\x63\xaf\xf4\x6e\xb3\xae\xe8\x19\x80\x60\x2f\xb6\xc7\x89\xa6\x22\xef\xa5\x71\xe1\xc7\xb3\xe4\x06\x52\x35\x7e\xed\x39\xaf\xcd\xe8\x34\x98\xb4\x6e\x33\x76\x48\xf8\x40\x7a\x78\xd5\xe3\x52\x2b\x94\x2d\xec\x2d\x65\x72\xb7\x8a\x73\x3d\x5c\x95\x93\xe9\x81\x11\xf2\xaf\x0b\xb9\x72\xf4\x5c\x97\xc7\x41\x31\xa3\x3e\xb6\x15\xb1\xf2\xfc\x91\xe9\x61\x05\x3e\xbc\x52\x05\x1b\x63\xcd\x21\x35\x85\x14\x16\x19\xa0\xb5\xe2\x74\x42\x0f\x0b\x0c\x57\x46\x45\xcc\x97\x00\x43\x10\xdf\x9f\x9e\x9f\xbd\x4d\x39\x11\x94\x10\xa7\xcb\x10\x43\xf4\xcf\x6c\x3a\xbe\xd4\x33\x8e\xe8\x7b\x3d\xb6\x52\xf6\x43\x17\xfe\xd1\x13\x2a\x9c\x99\x0f\x38\x98\x37\xee\x5d\x6e\x0c\xf1\x2b\xdb\x53\xed\xcd\x23\xc4\x8f\x11\x8b\x70\x59\x14\x73\xec\xc5\x5b\x99\x63\x5f\xaa\x9d\x1c\x28\xe9\xe1\x92\x59\xd0\x8a\x92\xc4\xaa\xba\x76\x26\xce\xac\xab\xff\x3c\x65\xd8\x39\xcb\x35\x12\xb1\x67\x9d\x52\xed\x39\x5c\xa0\x98\xda\x03\x34\x64\xc3\x56\xa4\x6c\x15\xfe\x0f\xba\xbf\x08\xce\x18\x77\xb1\xbd\xcf\x90\x47\xa2\x00\xb3\x8e\xc4\x53\xec\xb1\xd6\x31\xb8\x14\x39\x91\x50\xb5\x29\x34\xde\x59\xf2\x34\xda\xd9\xec\xdf\xdc\x43\x1a\xc3\xa8\x9c\x2d\x76\xe2\x23\x61\x1c\x84\xf1\x32\x9c\x33\x7f\x65\x0f\xfb\xb8\x57\x7d\xaf\x92\xdc\x70\xe0\x49\xf0\xfc\xd8\x28\xee\x2b\x07\x1b\x07\xaf\x71\xe0\xdc\xa1\x1d\x55\x71\xc9\x93\x2b\x07\x2b\x4a\x94\x75\xe3\x9c\x00\xe7\x54\x9c\xd0\x28\x89\x97\x01\x37\x56\x23\xff\x62\x35\x57\x5c\x51\x7d\xf2\xfd\xb3\xdd\x3b\x67\xaa\x6d\x9c\xa9\x5a\xd6\x91\xea\x69\xdb\x3b\x51\xfd\xf8\x1a\xee\xa1\x1d\xeb\xc4\x76\xad\x13\x77\x6b\xeb\xc4\x5d\xec\xdf\xcd\x16\x74\xa4\xc6\x36\x31\xb8\x56\x6d\x2b\xc5\x13\xc9\x61\x72\xed\xd0\xbd\x68\x1d\xd9\xf1\xa0\x77\xf7\xfa\x47\xd5\xdb\xfd\x33\xe6\xa8\x3a\x54\x06\x14\xc7\xd7\x4a\x41\x50\x69\x73\x7c\xc7\x6e\x72\xc7\x56\x91\xce\x55\x8f\x86\x80\x80\x17\xd8\x76\xbf\xb5\x8b\x3d\x75\xdd\xa7\x8a\x9d\x47\xfb\x95\x0b\xea\x0f\x34\x19\x17\x31\xc9\x7a\xca\x76\x93\xa0\xb1\x26\xe4\xdd\x67\x47\x89\x28\x20\xdc\x9e\xc6\x4e\x18\xcc\x36\xf9\x75\xdb\x17\x10\xcf\xed\x4e\x03\xe7\xb6\xc8\xe6\x15\xfe\x1a\x54\xa2\xb1\x7f\x93\x58\x4c\xac\x26\x69\x50\x2f\xa4\x92\x8d\xd3\xe6\xc4\x4c\x57\x60\xb5\xd9\x75\x3d\xcd\x58\xec\xe7\xd8\x74\x66\xf2\xb4\xca\x61\xdc\x36\xba\x9e\x14\x90\x52\x3a\x2e\x00\xb6\xc4\x31\xb9\xa7\xd6\xa9\xe2\x46\x71\xca\xbe\x4b\x41\x01\x86\xec\x19\x3a\x56\x50\x96\x64\x87\x20\x5a\xa5\x4a\x2d\xd1\xd8\x99\xe9\xd1\x2c\xe7\xb0\xb3\x8a\xa0\x4e\x21\x8a\x64\xaa\xd9\x14\x5d\x4d\xfa\xbc\x20\xd2\x0e\x60\xa8\xc4\xcb\x4e\x95\xbf\x54\x0d\xfc\xf7\xd2\x35\x32\xe8\xd7\x0b\x4c\x9e\x1f\x2c\x4f\x8d\xe8\x36\xc0\x00\xff\xb7\x47\x16\xb6\xb7\x54\x6e\x99\x3f\x75\x12\xf9\x53\xe6\xd2\xa4\x9b\x9d\x4a\xa7\xa4\xfc\xf0\x5f\xeb\xb1\x57\x7b\xd5\xf7\x6c\xd7\x32\xd7\x8a\x14\x12\x9d\xc7\x54\x84\x0a\x03\x9e\x72\x5a\x7a\x9d\x22\xcc\xb3\xc1\x49\x2e\xdd\x6e\xd0\x13\x3c\x34\x81\x2b\x00\x32\xab\xd8\xd6\x51\xf8\x77\x7b\x69\x37\xc9\x44\xd6\x20\xef\x25\x95\x11\x17\xd3\x49\xa7\x9b\xc4\x22\xce\x55\x0e\xd3\x0b\x09\xeb\x97\xae\xd1\xfd\x68\x7f\x71\x2e\x4a\x39\x34\x6e\xa7\x20\xa1\xa4\x92\x3d\x0b\x03\xae\x82\x33\x5b\x89\xad\x08\x29\x24\xee\xa9\xb9\xd9\x41\x9a\x1b\xa9\x20\x5d\xf7\x04\x67\xc9\xff\xe2\x18\xf9\x82\xe7\x24\x7a\x7c\xd2\xb3\x21\x5e\x55\x73\xec\x04\x05\x0d\x5f\xba\xf1\xf4\xfc\x81\x61\xb8\xb8\x58\xcf\x3b\x2a\xd8\x0e\xcc\xc8\x0e\x22\xc0\x05\x45\x04\xb8\x7d\x7d\x9d\xeb\x4a\xff\x8a\x2a\xba\xc9\x92\x44\x2b\xfb\x07\xfe\x6e\xac\x32\xb7\xc2\x25\xa9\x5c\x48\xce\x8a\x78\x0e\x13\x16\xe4\x07\xfc\x8d\x31\xf6\x1d\x6f\xcd\x47\xdc\x8d\x40\xe5\x3a\xc8\xcf\xa0\xc8\x26\x15\x2d\x2b\xcd\xe5\x6b\xc6\x0e\x0c\x92\xb3\x5b\x14\x32\x00\x72\x95\xc9\xaf\xca\x87\x94\x12\xc6\xea\xe3\x42\x92\x4a\xda\x8b\x61\xf7\xb4\x8c\x3d\x72\xe3\x00\x26\x02\x4c\x52\x97\x32\x3a\xa3\xfb\x0b\x42\x11\x10\xda\x38\x95\xe4\x86\x04\xfb\xe8\x6a\x98\x89\xf1\x46\x55\x00\xec\xaf\xed\x26\x3f\x5b\x23\x63\xbc\x17\x84\x22\x6e\x0a\xff\xbe\x1a\xfb\x07\x6f\x4a\xfd\xd2\x8a\x88\x9c\x60\x90\x10\xa4\x1f\xd3\x5a\x09\x34\xb9\x41\xa7\xe4\x1c\x0d\xbb\xa1\xca\xe2\xe4\xaa\x2b\x90\x39\x62\x62\xcc\xc3\x3c\x13\x51\x4b\x31\x44\xc5\xfa\x7a\x28\xd2\x41\xd6\xb7\xca\x6a\x70\x5e\x9b\xee\xe8\x55\xa2\x26\xb9\xd5\x9a\x05\xbb\x84\x72\x34\xad\x55\xad\xc6\x2b\x35\x8a\x84\x13\xdb\x34\x42\x1e\x25\xce\x75\x43\x15\x59\x21\xe4\xce\x95\xf9\xbf\x38\x02\xd9\x0d\x57\x5f\xc9\xfe\xe3\xc8\xd1\xf2\xdd\x02\xa1\x1b\x04\xb4\x08\x68\x60\x85\xa3\xc1\x66\x1b\xe6\x7d\x2b\xcd\x75\xf0\xeb\x37\xe8\x54\x56\x74\x85\xf2\x6e\x37\x4d\x38\xc0\xc1\x15\x6d\xa9\xdb\x09\x36\xae\xd9\x19\x0d\x0e\xf0\x8e\x4e\x47\x4a\x72\xae\x2c\x60\x43\x2a\x5c\xb0\x0a\x43\xba\x30\x20\xf2\xca\xd3\xbe\xca\x0a\xb2\x8a\x50\x33\xb4\x65\xff\xc8\x68\x12\x05\xb0\x81\xf1\x98\x5e\x7b\x80\x76\x45\xda\x54\xf3\x20\xd4\x09\x6a\x79\x42\xa3\x70\x05\x92\x7b\xd6\x78\xf9\x8a\x2b\x69\x3b\xe9\xa5\x59\xc3\xb6\x44\x1d\x84\x6b\x28\xd2\xb4\xe9\x33\xa7\x91\x90\x7a\xe5\xc1\x03\xb4\x13\xc6\xbd\x5c\x94\x32\xad\xda\x2a\x1c\x62\x91\xcd\xdb\xe1\x10\x10\x0a\x91\x8a\x88\x83\x19\x51\xcd\x07\x0c\x4a\xc7\xcd\x56\x7d\x19\xb9\xd6\xc0\x62\xa5\x72\x9b\xac\xf6\xc6\x79\xe2\xcc\x92\xaf\xef\x71\x82\x47\x10\x94\xa3\x88\xd4\x49\x82\x99\x30\x53\x3c\x23\x47\x7a\xc1\xb2\xc8\x21\x52\xf6\x3d\x7b\xd8\x91\x21\xf7\x50\xc5\xc0\xb0\xd9\x6e\x11\xc9\x58\xf1\xb4\x1b\xce\xf8\xdf\xc7\xc8\xf9\x11\x32\x96\x69\x14\xc8\xff\x38\x42\x9e\xbc\x15\xc3\x07\x5f\x12\x91\xc1\x85\x7c\x7d\x0d\x7e\xd3\xbb\x7a\x22\xed\x03\x50\xaf\xce\x9c\x4b\x32\x41\xc5\x4a\xd8\xcc\x0d\x57\x9d\xe5\x51\x55\xc9\x24\x9a\x5f\x65\x09\x9b\x4b\xa7\x68\xdc\x03\x43\xa6\x02\x61\xc4\x3f\x32\x1a\x27\x98\x2c\x07\x24\x68\x60\x3d\x35\x8f\xec\xbf\xe7\xd9\xe3\x98\x7a\x56\x7a\x58\xb3\xba\x04\x61\xab\x25\x77\x4c\xcc\x1b\xd4\x3a\x82\x1d\xee\x85\x2f\x42\xea\xa3\x5c\x1a\x50\xd1\x6c\xac\xe0\x53\x26\x57\x0e\xd6\xc1\x01\xec\x56\x8b\xcb\x00\x7e\x99\xb7\xb4\x68\x2a\x40\x5d\x1e\xf4\xc6\xd7\x56\x25\x89\x3f\xa6\x68\x62\x08\xf9\x9b\x1a\xb9\xac\xc3\xcf\x9d\x89\x4d\xe0\x8f\xff\x3f\x6a\x1b\x55\x8b\x7a\x79\x18\x35\xc2\x38\xcf\xf2\xb4\x31\x1b\xe7\xa7\xd2\x79\x98\x7d\xec\x75\x35\x39\x64\xea\x33\xd8\xb8\x1f\x61\x4b\x2e\x92\x4e\x92\xe5\x94\xb9\xb5\x32\xec\x0c\x76\x0e\x3f\x17\xd3\xfd\x66\xc8\x55\x58\x3c\x4c\x79\x2b\x57\xe0\x22\xba\x96\x3a\x0d\x1b\xa2\x41\xc5\x8a\x40\xa8\x9b\xa5\xcc\x96\xd3\xf0\x14\x80\xd3\x04\xa5\x44\x7d\x79\xba\x03\xc0\x9c\x54\xbe\x8a\xc3\x2a\x45\x57\x9c\xf3\xb4\x6f\x4d\xa5\xa5\xbe\xce\xd4\x92\x9f\xec\x80\xe5\xf3\xa1\x9d\x5e\xde\x03\x52\x40\x71\x4e\x1e\x60\xe4\x1a\xd6\x19\x41\xb0\xa7\xb0\x4e\x18\x4f\x99\x7e\x36\xc8\x67\x6a\xe4\x52\xfb\x92\xff\xe1\x6d\x0f\xf8\xdf\x7a\x6b\x0e\x38\x4a\x25\xb7\x1d\x6b\x8e\xb7\x92\xb7\x86\x68\x6c\xc3\xe3\x0e\xfb\xd6\x1a\x63\x4f\xe7\x13\xc4\xb1\x55\xa4\x82\x3a\x4f\x6d\x73\xc3\xcf\x0e\x1e\x38\xf0\x38\xd6\x20\xcf\x27\x95\xf9\x4e\x40\x4a\xd4\x82\xd1\xec\xfb\x7f\xba\x87\x5d\x2f\x2f\x50\xae\xae\xe0\x47\x33\x44\x79\xb1\x73\xaf\x60\x57\xa1\x69\x2f\x12\xa5\x74\xa7\x9f\xdb\x43\xde\x3f\x42\x1a\xda\x69\x32\xd3\x93\x83\x3f\x6f\x5e\x99\x5d\x8e\x13\x73\xf9\xe8\x39\xc0\xa0\x48\x62\xff\x1f\x6b\x64\x62\x63\xec\x1e\x7a\xfc\x3f\x57\x53\xde\x18\xd3\x2e\x55\x65\x07\x83\x46\x8c\x5a\x02\x42\xcd\xb8\x7e\x34\x59\x6d\x47\xe4\xf2\xab\x5b\x7d\x91\x9b\x5d\x5d\x2b\x6f\xda\xfa\x0f\x7b\x96\x7a\x48\x04\x34\x89\x1d\x9a\xad\x2d\xb5\xa0\x29\x78\x66\x52\x5b\x54\x2b\x12\xa9\x05\xc0\x36\x16\xc0\xc0\x40\x13\x84\x1e\x1d\xc5\x77\x17\xa0\x4b\x88\xc7\xea\xb8\x38\xae\x70\x08\x50\xb3\x94\x67\x86\x24\x85\x7f\x64\xb3\xf3\xb4\x0f\x30\xcb\x72\xc6\xa8\x95\x27\x27\x8a\xe9\x1f\x08\xdd\x30\x47\xa6\x96\x06\xf9\xe3\x5d\x64\xb2\x0b\x0e\xbd\x8d\x7f\xb3\x77\xee\x62\x7f\x3c\xba\xd0\x2e\xc6\x27\xd5\x2a\x8c\xf6\x95\xeb\x1b\xb8\x8e\x80\xef\x2e\xd0\xb0\xc3\x19\xcf\xc3\x4c\x65\x2f\x9a\x01\x54\xe8\xc2\xc8\x49\x5a\x3d\x7e\xc8\x20\x16\xe6\xd0\xd3\x66\x3b\x01\x4e\x3d\x8b\x0d\x68\x25\x4c\xe0\x64\x85\xd0\x0a\x0a\xee\x44\x2f\xb3\xa2\x78\x05\x9e\x62\x5e\x0b\x33\x14\xb9\x66\x10\xb4\xe2\x21\x8b\x01\x11\x25\x7f\x2c\x43\x54\x40\x26\xcf\xf3\x1d\x59\xe8\xaa\x08\x97\xdb\x79\xa6\x16\x78\x01\x3e\x6d\x8a\xed\x08\xf0\xfb\x45\x51\xc1\x92\x57\x2c\x1e\x7b\xb6\xec\x2f\x87\x90\xd6\xe9\xb0\x25\x54\x39\x5c\x75\x2a\xf2\x66\x63\xbc\x6e\x68\x6a\x39\xb4\x71\x49\xaa\xee\xda\x89\x9c\xb7\xd3\xa4\xb7\x8c\x3d\x51\xb8\x2d\x05\x7e\x45\xe1\x8e\xe7\x41\x00\x02\x04\x3b\xc7\x8c\x77\xb7\xd7\xd1\x0a\x20\xf4\x4f\x63\x1d\xa3\x6d\x30\x4d\x45\xd6\x4d\xc0\x72\x8e\x77\x8e\x16\x6d\xbb\xde\xbc\xb4\x3f\x1b\x2f\x06\xb3\x1d\x2e\xb7\xf5\x58\xf2\x54\x28\x8d\xce\xfe\x06\x8d\x8d\xbb\x0c\x4c\x82\x8d\x7e\xb7\x18\xaf\x05\x91\x76\xc8\xa7\x77\x55\x1e\x32\x2b\x09\x64\x74\x16\x94\xff\xd2\x5d\x6c\x20\x57\xd2\x79\x42\x4e\x12\x7d\x34\xec\xa6\x49\xd0\x6b\x0a\x5a\xf9\x82\x06\xc7\xa4\x3c\x83\xcc\x64\x65\x3c\x38\xda\x6d\x8b\x8e\x48\x79\x64\xc7\xbf\x55\x82\xbe\xdd\x3b\x4a\xbe\x65\xdb\x65\xfe\x68\xdb\x76\x99\xd7\x7a\x27\xe4\x0a\x42\x1f\x02\x85\xa4\xcb\x4c\xd1\xf2\x6a\x7c\x65\xb5\x54\x35\x50\x72\x33\xe9\xe2\x39\x4f\xcd\x88\xb9\xdb\xa6\xad\xfc\x2f\xc8\xee\xcd\x1b\xf4\x64\x82\xe7\x3c\x4d\xcd\x8d\x54\xdc\xb8\xd5\x1a\xbf\xea\x92\x50\x67\x3f\x3c\x68\x29\x9b\x9c\x32\x68\x36\xc8\xab\x74\xe6\xc1\x4b\x6a\xe4\xf0\xfa\xdf\xbc\x6a\xc4\x21\xd1\xe0\xf3\xde\xc2\x40\x7a\x81\xf6\x1d\x54\xbe\x85\x62\x41\x9e\x32\x11\x24\x09\xa0\x1a\x00\xb2\x05\xba\xde\x8b\x9b\x6d\x1e\x2f\x97\x07\x01\xc9\x64\xe4\x4a\x77\x32\xe1\x60\x5d\xe9\x64\x38\x2c\x19\xf2\x41\xf5\xc0\x64\x88\x0b\x55\x3d\x61\x70\xd8\xb2\x04\xc7\x05\xcc\xce\x0d\xf2\x89\xbd\xe4\x27\xab\xb3\xca\xcc\x5e\xfe\xba\xbd\xec\xc6\xb9\x24\x18\xb6\x95\x83\xb9\x09\x76\x80\x0d\xee\xe7\x9f\x23\xe4\x2b\xa3\x9b\xde\xcf\xdf\x36\xca\xbe\x36\xf2\x7f\xeb\x06\x0d\x37\x61\x41\x5d\xf8\x9d\x1a\xd1\x5f\x72\xf0\x42\xc0\xd1\xad\x17\xe5\xa1\xd4\x12\xb5\x40\xaf\x9b\x10\xf1\x4c\xab\x6e\x59\x49\x3e\xcb\xda\x14\xcd\xae\x9e\x36\x52\x46\x42\x79\x30\x3f\x32\x58\x98\x6a\x57\x93\x9b\x57\x2e\xd2\x4e\x66\xce\xf0\xb8\x75\x87\x25\xf9\x7c\x25\xb9\x62\x03\xe1\x45\x73\x6e\x9d\xe4\x3b\x5b\x50\x3c\x3e\xba\x8b\x7d\x67\x47\xf1\xf8\xbf\x56\xf1\x68\xf3\xcc\x06\x01\x1a\xae\x87\x94\xe6\xf7\x43\xa9\x86\xdc\x0e\x5d\x10\x41\x79\xba\xbf\xdf\x0d\x92\x2a\x88\x55\xba\x91\xda\x9f\x8a\x48\x2f\xe5\x28\xfc\xc7\x31\x76\xdb\x1a\xf7\xd7\x82\x3f\xd4\xe9\x62\x69\xf1\xba\x05\x31\xd5\x38\xef\x8d\xa9\x3b\x6e\x0c\xc0\xfd\x63\xa4\x49\x1e\x91\x0a\x1e\xf4\x55\xc5\x99\x7f\x5a\x23\xc3\xdc\xe0\x62\x27\xc1\x53\xba\x82\xac\x88\xab\x1e\x52\xa5\x63\xea\x7b\x00\xe0\x01\x54\x05\x9f\xf5\x74\x0d\x1f\xf4\x74\xad\x7a\x29\xc0\xe7\xb0\xb8\x13\x15\x71\xad\xd5\x08\x7c\xfe\x02\xc1\xa1\x59\x6d\x2f\x2e\x5f\xbe\xda\xe6\xf9\x44\x98\x4d\xf0\x89\xca\xfb\x4e\xcf\x5e\xee\x91\x47\x19\x8b\x80\x19\xc3\x7b\x74\x0f\x5b\x25\xfc\x29\x63\x3b\x30\xe3\xb8\x1f\xc7\x15\xb8\x87\xb5\x6d\xa2\x13\xc6\xa7\xe5\x55\x65\xb1\x1e\xdf\xdc\x60\xbf\xd4\x65\x09\xfe\x3f\x1e\x7b\xea\x69\x77\xe6\xac\x8f\xea\x37\xa4\xa2\x01\x90\x3f\x7b\xbd\x4c\x93\x29\x72\xe3\xba\xeb\xa5\x72\x82\x5f\x5c\x17\xf1\x6b\x3c\xf2\xc3\x80\x73\x08\x76\x52\x11\x98\xcf\xf4\x1f\xf4\x67\xba\xb3\x02\x78\x0c\x84\x2b\x20\x1f\x28\x8d\x19\xc4\x8f\x76\x20\xab\x6b\x45\xd6\xbe\x51\xff\x8a\x34\xbe\xf5\x3f\xd5\xfd\x1e\xf1\xb5\x87\xfd\x26\x11\x2b\x10\x48\xbf\xa7\xdd\x17\x8b\xa7\x06\x6e\xd2\x54\xb4\xd0\xc8\x0a\xbb\x45\x71\x5d\x55\x5b\x5e\x41\x2a\x94\x71\x23\xad\x79\x6e\x8d\x3c\x61\x18\x13\x03\x9a\xca\x15\x27\x81\x92\x57\x5f\xf4\xd8\xb5\xce\x15\x03\x24\x31\x20\x9f\xca\xe4\x0c\xae\xe6\xf9\x22\x8f\xdc\xe7\x91\x4b\xe5\xfa\x3c\xc2\x23\x1e\x37\x45\xea\xf7\x36\x10\x62\x70\xdc\x7a\x41\x65\xd5\x3e\xc9\xbe\x56\xc4\x4d\x55\xcb\x4b\xf8\x90\x09\x0f\x26\x96\xd4\x0b\x0d\x72\x5f\x9d\x1c\xb6\xf3\x4c\x03\x85\x3c\x92\x8a\xe5\x10\xa6\x16\xe6\x9b\xe2\x70\xdc\xa6\x0e\x32\xf1\xf2\xed\x62\xa9\x9d\x24\x67\xfd\xdf\x79\x02\x5b\x1c\xb8\x6a\xe1\x6b\xf0\x98\x9a\x22\xe9\xaa\xba\x2d\x77\xc4\xdc\xe1\x3d\x06\x17\x9a\xc1\x04\x95\xaa\x08\xef\x76\x23\x20\xb1\x4f\x0a\xd2\xff\x4b\x31\xe4\x00\x93\x67\x9d\x01\xfd\xe4\xe3\xc9\xeb\x6b\xe4\x32\xa9\x47\x27\xbd\x5c\xbb\xc5\x7e\xc6\x80\x7e\x7d\xd7\x5b\x70\x6e\x95\x59\xf2\xf1\x66\x21\x76\x54\x4b\x1b\x74\xca\x98\x3c\xf5\x33\x10\xe5\xac\x74\x51\xdd\xa1\xa6\xd4\x46\xf4\xe9\x2f\x44\xd5\x8e\xaa\xc3\xd8\xd4\xdc\xac\x75\xbf\xc5\xc3\x88\x2e\xf1\x0c\x14\x7a\x74\xeb\xf0\x30\xea\xa5\x42\x99\xfe\x51\x87\xd2\x75\x61\x30\x87\x56\x4e\x97\x44\xbe\x2a\x44\x4c\x0f\xc2\x70\x1d\x3a\x40\x33\xec\x4c\xc1\xea\x96\x27\xf6\x65\x67\xae\xbf\xd1\x23\xce\xf0\xf9\xff\xd1\x23\xb7\x0c\x07\x07\x5f\x73\x1a\xa8\xcf\x3c\x6d\x15\xc7\x6e\xb4\x7f\x99\xa0\x83\x76\xb2\xaa\x42\xde\x3b\xbd\x18\x39\x94\x0a\xed\x04\x06\x58\x27\x94\x90\xaf\xee\x21\x8f\x32\x5e\x0c\x43\x1b\xf6\x3b\x7b\x2e\x80\xc3\xe8\xb9\x7b\x4e\x96\x0b\x06\x18\xe2\xc0\x86\x2a\x4f\x68\xda\x8b\x9d\xaf\x9a\xc4\x45\x14\x44\xf1\xcd\x6c\x6c\xf3\x02\xfe\x08\x67\x0e\x37\xa1\xf3\xb6\xea\xa6\x0d\xeb\xe6\xd0\xa6\x9e\x51\x9e\x67\x38\xc5\x9a\x92\x70\x62\x19\xd1\x1b\x66\x1a\xcc\x1f\x6b\x57\x31\x19\x26\xca\x04\xc5\xf2\x40\xc9\x99\x8e\x54\x33\x41\x55\x25\x5c\x7f\x00\x8e\x47\xda\xe4\xec\x6c\xd8\xcd\xec\x8e\x37\x08\x71\x9c\x24\xc3\x86\xa6\x6f\x4c\x3f\xe8\x79\x8b\x6d\x30\x2a\xa0\x26\xc8\xb2\xa4\x19\x72\x83\x70\xc4\xd2\x5e\x0c\x98\x27\x4c\x0a\x23\x76\x80\xc9\x35\xc2\x0e\xb2\xeb\x29\x42\xee\xa1\x7f\x2b\x77\x06\x8d\x82\xea\x15\x45\xc9\x6a\x76\x98\xb2\x81\x09\xc2\x0e\xd3\x7b\x08\xa5\xac\x6c\x98\x63\x87\xe9\xd3\x09\xa5\x14\xee\xca\xff\xb1\xb3\xa2\xcf\x0e\x5b\x4d\xa8\xeb\x1b\x89\xa2\x76\x93\x77\x4f\x26\xf9\x6c\x5c\xdc\xc2\xa8\x14\x53\x16\x5c\x3b\x60\x6e\x53\xd9\x76\xf5\xf7\x33\xe1\xdf\x67\x13\xf9\xd7\xb3\x09\x99\x6d\xd1\x30\xce\x72\xc1\x03\x07\x4c\x10\xd0\x9d\x36\x3f\x96\xe5\x71\x94\x6f\x33\x11\xaf\x84\x69\x12\xcb\x13\x07\x0e\x67\x37\x4d\x02\x1c\xd1\x2c\xe7\xcb\x61\xbc\xcc\xae\x7f\xe8\x86\xd5\x6e\x4d\xe5\xc8\xae\x37\xac\xd0\x7a\x6b\x64\x75\x1f\xaa\xc7\x77\x5e\x88\x0d\x69\xc1\xc9\x8a\x48\x57\x42\xb1\x3a\xa9\x36\xf6\x09\x39\x7e\x13\x6a\xa8\x27\x95\x4e\xd3\xd2\x47\x5a\x8d\x4a\x25\x87\x13\x8d\x1c\x7a\xb8\xb2\x06\x21\x96\x90\x85\xd3\x1e\x38\x67\x1d\x41\x53\x2f\x9d\xdb\x00\x52\x36\x97\xeb\xb8\x41\xfe\xe7\x28\xd9\x05\x36\x28\xff\x1b\xa3\xec\x85\xa3\xa7\xe5\x9f\xd6\x3e\xb9\x0a\xb2\xa3\xd8\x01\x41\xd0\xf0\xbc\xd8\x22\x27\xb3\xde\x52\xb1\x5f\xba\x9b\x4f\x2a\x54\x54\x32\xee\x1e\x15\x37\x40\x90\xe9\xd2\xe5\x91\x33\x2c\x44\xd4\x1d\x3c\xee\xdf\x41\x65\x83\x1a\xf4\xe6\x64\x55\x36\xba\x4e\xc3\x58\x21\x77\x83\xf9\x17\xbd\x73\xc5\x3e\x3f\xa5\xb7\x07\xb5\x13\xe0\x06\x7e\xa2\x97\x0f\xb9\x0b\x86\x9e\x6e\x2f\x37\xe1\x88\x5a\x2a\x81\x95\x10\xf5\x25\x1c\xba\xa6\x09\x1c\x4c\x45\x53\x7e\x3d\x6d\x71\xd4\x64\x77\x3a\x7b\x2a\xea\xd3\x20\xcc\x54\x52\x1d\xa8\xa4\x10\x6a\x52\xdf\x46\x2b\xc1\x3e\x07\x22\x51\xee\xd7\x28\x6f\x0b\xe5\xc5\xc4\x37\xca\xe9\x32\xa0\xf2\xb8\x18\x50\x76\x3d\x95\x0f\x54\x31\x28\x9e\x20\x4f\x21\xb3\x5b\xdc\x91\xe5\xd7\xbb\x3d\xcc\xdb\xa7\xcc\x0c\x22\x7f\xe1\x11\x50\x9b\x90\x52\x7c\xa1\x94\x33\x3c\xa0\x94\x35\x20\x48\xdb\x22\x33\x84\x03\x04\xbd\xab\xc7\x23\xc5\x05\x29\x1a\xcb\x8d\x3a\x92\xdb\x2a\x5d\xc5\x59\x79\x72\xf2\x8b\x54\x50\x66\x3d\xc1\xaa\xd2\x95\x55\x85\x18\x37\xe5\x14\x51\xf5\x74\x92\x2e\xf3\x58\xa7\x36\x54\xa7\x9f\xfe\xe5\x2e\x72\x19\x0e\xa8\x51\x1b\xbe\xbc\xeb\x02\xa8\x0d\xaf\xd8\x75\xca\x29\x75\x23\x3a\x83\x51\x13\x42\x67\x43\x6e\x73\xeb\x38\xa5\xf7\x6c\xb7\xcd\x00\xc0\xa8\xf9\x62\x4c\x14\x1d\x10\x86\x42\x41\x51\x80\xad\x81\x61\x8b\xc5\xaa\xfa\x85\x6e\x0c\xfd\xd1\xe4\x91\x57\x0b\x28\x67\x9c\x11\xc6\x5d\xb3\x53\x6a\x42\x52\xd9\x4a\x15\x38\xbe\xa6\xde\xa2\x62\x64\xd4\x33\xfb\x8b\xb6\x28\x9f\x7f\x93\x67\x02\xa1\x53\x04\x20\x73\x27\xa9\xd5\xc2\xd2\x33\x81\x90\xcb\x77\x5c\x05\xa5\x27\x56\x2f\xd4\xda\xb7\x8f\xa0\xfb\x81\xd3\x95\xd3\x19\xd1\x8d\x92\xbe\xdc\x5d\x4e\x27\x51\xb4\xc4\x9b\x67\xe1\x7d\x3a\x97\x04\x73\x69\x72\xae\x8f\x39\x86\x3a\xa6\x79\x5c\x2b\x20\x15\x5d\x06\x94\x59\xfb\xd3\x98\x1d\x11\x36\xe7\xd0\x99\xa3\x2a\x9d\x72\x42\xca\x95\x25\xd1\xe4\xbd\x0c\x62\xa0\x81\x71\x09\xa3\xa4\xa5\xf2\x54\xbd\xa0\x20\x68\x41\x14\x22\x4f\x7f\xf5\xed\x6d\x23\xff\x6d\x84\xec\x95\x5d\x3a\xda\x6a\x49\x01\xe2\xff\xee\x08\xfb\xf8\xc8\x7c\x71\x01\x85\xa9\xcd\xc0\x53\x9c\x66\x60\x16\xca\x97\xa9\xc0\x87\x1b\x14\x73\x32\x78\xc1\x5d\x2b\x85\xe0\x61\x9d\x0c\x52\xa7\x27\x93\x58\xd4\xe9\x7c\xd2\x11\xf8\xf7\xa9\x78\x26\xed\x9f\xee\xc5\xd4\x08\x4d\x50\x48\xec\x42\xe9\x89\x33\xf3\x0b\x34\x94\x52\x1a\xb1\xaf\x41\x92\xc7\xcd\x30\x0a\x15\xb3\x29\xf8\x0a\xea\x34\x0b\xe3\xa6\x80\xdb\x48\x10\xa4\x32\x55\x8d\xf3\x0c\xe8\x35\x5a\x3d\x48\x8e\xce\x72\xd1\x35\x01\x26\x66\xa4\x9b\x6d\x1e\xc6\xe6\x34\xe9\x34\x02\xdc\x08\x80\x24\x1e\x17\x0c\x15\xbd\x38\x48\x62\x85\x88\x00\xa2\xdc\x68\x53\x01\xf6\xca\x20\x21\x15\x0c\xaa\xbd\x3c\x99\x30\x4d\xc2\xd9\xd1\x57\x8b\x87\x9b\x81\x35\x83\xa0\xbf\xc2\x0d\x37\xe8\x31\x04\x00\xd8\xc4\xa2\x71\x07\xa3\x7d\x55\xb6\xcd\xfb\x47\xc9\x8f\x9a\xbe\x21\x7f\x8c\x8a\xe1\xce\xfc\x9f\x1b\x65\xff\x30\x32\x55\x7d\x53\xa5\x9e\xc3\x76\x2d\x02\x93\xa6\x56\x58\xdd\x17\x4b\x2f\x2e\xea\x20\x65\x5c\xe6\xfa\xd0\x2e\xce\x75\xd5\xa4\x98\x9b\xd5\xb4\x4e\x30\x0c\xca\x8d\x03\xe0\x12\x00\xa9\xaf\x5e\xd7\x1f\x04\x6a\xc4\x29\x1b\x1a\x34\xdc\x4c\xd1\x09\x17\x2c\x10\xa6\xd2\x52\x5c\x6d\x98\x61\x09\x4e\xa2\x78\xd1\x86\xba\x9d\x0f\x52\x9c\xa1\xcd\x51\x5d\x47\xa9\xcf\x02\xb4\xa2\x72\x38\x06\x85\x12\xe4\xec\xbb\xc5\xb1\x5f\xfb\x6d\x4d\xab\xe4\x34\x0a\x12\x81\x82\x43\x67\x31\x49\xc5\xdc\x3c\x81\xdf\x53\xc3\x5f\x99\x68\xe6\x3a\xa6\x61\x97\xc4\xae\xd5\x54\x59\xb2\x94\xcd\x3d\x25\xeb\x92\xca\x73\xbf\x25\x17\x16\x9f\xbe\x4f\x6d\xec\xfb\x9e\xb9\xb8\x16\xcb\xf2\x7b\x3d\xf2\x08\x55\xce\x1c\x14\xe3\xbf\xde\x63\x2f\xf5\x8e\xd9\x97\x9c\xb3\xb8\x13\x73\x2f\xd2\x34\x49\xad\xec\x8e\x62\x61\x99\x3c\x0f\xa9\x12\xb5\x01\x76\x22\xa0\x13\xc5\x98\x19\x51\x41\xd1\x8f\x25\x27\xb9\xac\xd4\x9d\xe4\x78\xcf\x99\xe3\xaf\xb9\x84\xec\x85\xc5\xa3\x9a\xfb\xc2\x4b\xd8\xf7\x77\x5b\x17\x5c\xc3\x81\x3c\xe6\x80\xce\xcc\x70\x86\x58\x21\x09\x6a\xfb\x8a\x9b\x49\x47\xfb\x7d\xe4\x8a\x6e\xd0\xa9\xc1\x36\xb2\xa3\xe7\x78\x33\xc7\xa3\xd1\xd1\xbb\x7a\xe1\x0a\x8f\xe4\x31\xa5\x41\xc8\x04\x85\x5b\x87\xcd\x82\xd6\xb2\x48\xef\x05\x61\x2e\x0f\x05\xcd\x3c\xea\x17\x04\xd8\xd6\xfc\x4d\x41\x69\x2e\x53\xff\x04\x66\xb3\xca\x74\x90\x7d\x27\x09\xf0\x8d\x95\x90\x03\xa3\x20\x86\x78\xe2\x1f\xf0\xa5\x71\x9b\x2e\xf2\xc4\x8a\xeb\x4b\xbd\xdc\x0c\x03\xb6\x0b\x27\x67\x40\x17\x79\x37\xbc\x29\x4d\x7a\xdd\xec\xf0\xd3\x99\x2c\x8c\x3d\x53\x16\xaa\x53\x41\xe4\xd5\x95\x83\xf2\x9a\x39\x38\x1c\xa6\x4f\x67\x56\xf3\xd8\x33\x17\xeb\x56\xaf\xf3\xc4\x69\x12\x85\x7e\x95\x1b\xa4\x94\x0d\xed\x9f\x1e\x54\x38\x70\x5c\xcd\x38\x0f\x0e\x6e\xd8\xd2\xe3\xe1\x90\xb1\x16\x14\x4f\xd0\xd9\x3a\x46\x15\xc2\x80\x29\x5b\x86\x5c\x73\xca\xdd\x9f\xea\x65\x79\xd1\x87\x5f\x5e\xff\xe7\x37\xfc\xc8\x36\xbe\x22\x40\x5e\x16\x2f\x22\x8b\x4e\xe5\x37\xb1\x57\xa6\x5a\x11\xf6\xca\xfc\xf9\xcb\xc8\x63\x2b\x38\x67\xe7\xa5\x3a\xd1\xea\x45\xf3\x42\x67\xf7\xfd\xf5\x23\xd8\x93\x07\xae\x6e\x80\xa9\xcc\x7a\x67\xa8\x7b\xee\x95\x8f\x20\x6f\xf5\xc8\x0f\xa9\xb7\x35\xc5\xa6\xff\x72\x8f\x3d\xcf\x2b\x5d\xac\x23\x98\x45\x6e\x78\x10\x0d\xf9\xa0\xb5\xd7\xe8\xad\xc7\xaa\xdb\x48\x10\xe5\x49\x10\x52\x87\x34\x61\xd4\x88\x76\xd3\x14\xf4\xe9\x07\xea\xa6\x42\x6c\xe8\xb8\x23\xc8\xbe\x5a\xed\xc8\xf8\x6d\x4f\x7b\x32\xde\xe7\x0d\xde\xae\x70\xfa\x39\x0e\x0d\xed\xbb\x30\x7b\x9b\x3d\x66\x74\x36\xb7\x9c\xc0\x66\xbb\xb1\x1e\xd9\x97\x59\xa5\x69\x25\x52\xca\x4c\x4c\xc1\x93\xa7\x92\x4e\x4f\x65\x78\xa9\x78\xf8\x62\x17\x73\xad\xc5\x2f\xf1\xca\x5e\xd2\xc2\xc3\xe7\x5c\x37\x47\x36\xe3\x4b\x82\xe1\xd4\xc1\x42\xaa\x1a\x7b\xfc\x2d\x22\xa0\xc2\xe3\xc4\x81\x4e\xa1\x4f\x8d\x5f\xcc\x6d\xce\x07\x3c\x72\x19\xf6\xc2\xcc\x89\x37\x7a\xec\xe5\x9e\x7b\xed\x62\x4f\x09\x3d\x65\x27\xd4\x80\xea\x31\xa8\xeb\x1b\xe3\xf6\x0c\xf9\x27\x8f\x5c\xd6\x4c\xa2\x08\x9a\x36\x9d\xf4\xe2\xdc\xff\xb6\x71\x04\x7f\xc9\x73\x6f\x19\x44\x54\xf8\x91\xb4\xa4\xaa\xde\xa6\xe6\x99\x22\xff\xdd\x99\x11\x0b\xc3\x87\xb6\x97\x41\xb7\x8b\x20\x04\x8d\x39\x82\x84\xd6\x7c\x25\x09\x03\x2e\x7b\xd5\x11\xcd\x36\x8f\xc3\xac\x83\xf1\x6e\x60\x15\x16\x38\xbd\xf0\x23\x16\x27\x72\xdd\x86\x58\xac\x4a\x71\x35\xc8\x85\xeb\x7e\xb4\xef\xbb\x99\xb7\x7f\xe5\xb1\x93\x9b\x77\xcb\x66\xaa\x7b\x99\x18\x24\x5c\xb3\x15\xa3\xeb\xc8\x35\xe4\xaa\x75\x19\xb4\xad\xc1\xba\xb8\x1e\xd8\xf7\xdb\x52\x4c\xad\xa0\xd7\x9b\xaf\xff\x02\xaf\x74\x6f\x5b\xab\xc8\xe8\x6f\xf6\xfd\x42\x45\xc7\x35\x80\xe4\x8a\xae\x08\x75\x3f\x57\xcb\x8a\x58\x78\x9a\x6e\xe9\xd1\xf4\x02\xb4\xd0\xad\xe7\x7d\x1e\xf9\xa1\xd2\x02\xf2\x5f\x67\x86\xe6\x3e\xaf\x74\xef\x21\x1a\x1a\x57\x92\xb8\x2d\xfe\xc8\x6e\x67\x6b\x6c\x26\x49\x1a\x00\x76\x81\x62\x73\x16\x3c\x43\xaa\x81\x97\xee\x66\xfb\xcd\x2f\x95\x95\x5d\xc6\x7f\xe6\x14\x9e\x70\xfd\xc1\x5f\xda\x45\x7e\xc6\x23\x7b\x79\x13\xac\x5b\x80\x78\x70\x37\x79\xe2\x16\x0c\x58\x27\xc2\x66\x9a\x00\xea\xc1\x61\xab\x30\x6c\x8a\xcb\xc9\xa6\x97\x52\x04\x21\x81\xab\x52\x3e\x34\x95\x6d\x8d\x08\x72\x59\x1b\x12\xe0\x66\x21\x2b\x31\xef\xfb\xf3\xec\x98\x7b\xc5\xf5\x34\x87\xfa\xaa\x12\xae\xf8\x2c\x76\xd7\xa9\xc8\xd9\x43\x7f\xd7\x23\x3f\x0c\x57\x67\x7a\x6e\x3a\xe3\xfb\xcc\x74\x78\xad\x57\xf5\x80\xca\x46\xd3\x87\x3a\x6d\x3c\x0a\xe4\x29\x51\xa0\xac\xe4\xaa\x63\xfa\xf4\xbf\xca\x43\xd0\x89\x90\x92\x43\x75\x15\x82\x7a\x75\x26\x4f\x47\x70\x60\xca\xd2\x96\x37\xcd\xfe\x02\x50\x11\x66\x6b\x3e\x2d\x62\xb1\x2a\xc7\xd4\x9d\x22\x29\x79\x24\xd4\x57\xc0\x4c\x64\xfe\xb3\x74\x27\x66\xcb\xb7\x06\xe7\x74\x6e\xdd\x84\x51\xc3\xd6\x6b\x8f\x2f\x8e\x67\xc9\xa3\xfb\x26\x8f\xec\x49\x75\x73\xfc\x9f\xf5\xb6\x39\x61\x4e\x9b\xb2\xd6\x9a\x2e\xf6\x97\xc5\x36\xb6\xb9\x02\x28\xd1\x6a\x06\x48\x76\xf8\xd8\xe4\x43\xbb\x6d\xb6\x9e\x8d\x1a\x5e\xfd\x7f\xda\xc5\x3e\xe7\x4d\x95\x5c\x30\x0a\x84\xa7\x9c\xf9\xc7\xc1\xb5\x05\x91\x52\x4a\xc9\xc6\x7d\x31\x15\x99\x3c\x4d\x27\x2d\x3c\x7e\x1c\x2f\x82\xc0\xcb\x3e\x2d\x38\x23\x4e\x9d\x9c\x11\x81\x95\xeb\x57\xaa\xdb\x1c\xfe\x8c\x15\x34\x33\x56\xd1\x21\x8f\xc6\x89\x65\xe5\xb7\xd6\xfa\x0f\x46\xc8\x7f\xf6\xc8\x23\xcb\xad\xf0\x3f\xe8\xb1\xdb\x06\x9a\xe6\x00\x0f\x95\x2a\xb2\x63\x1d\x75\x9f\xad\xe8\xc7\xa2\x57\xf6\x3e\x79\x8a\x9c\x18\x0e\x78\xbf\xd1\x0f\x74\xba\xa8\x87\xbc\xa7\xa6\x4e\xf4\x38\xc2\xfe\x1b\x6a\xec\xff\x78\xf6\x90\x63\x92\x1c\x87\x10\xed\x7b\xce\x8a\x7e\x1d\x4e\xe6\xcf\xa6\x5d\x1e\xa6\x30\x88\x59\x18\x2f\x47\xc2\xb9\xa7\x54\x2f\xbb\x18\x59\x42\x98\x51\x61\xce\x95\x2a\x61\xc8\x62\xe9\x2d\x8f\x5e\x5d\xf9\x52\xcf\x8a\x7e\x81\x8e\x07\x0e\x4b\x74\xb2\x6b\xf7\x24\x5c\x9e\x8d\x59\xdd\x98\x12\x8d\xf1\x20\xe5\x96\xa4\x83\xd3\x1f\x3a\x2f\xd9\x9a\x03\xbe\x06\x96\xdc\x83\x1e\x23\x3f\xe2\xe8\x10\x1d\xde\x1d\x20\xf9\xfc\xd8\x28\xb9\x7e\x78\xfe\xee\x7c\xaf\xab\x8c\xab\x3c\xc2\xf3\xe7\xbc\xd2\x40\x94\x19\xdc\xff\xfe\x08\x0b\xd6\x7d\xca\x41\x86\xd0\x3a\x0c\x82\xfb\x60\x38\x0e\x3e\xa5\x95\x63\x4b\x1b\xd4\x0f\xbb\x13\xfb\xf9\x23\x24\x27\xa3\xf2\xa4\xec\x47\xec\x0e\xf9\x6f\x01\x44\xa4\x0b\x37\x39\x17\x41\xd8\x54\x5e\x3f\x8e\x96\x3f\xdd\x54\x3c\xe0\x67\xc6\xd6\xa3\xa6\xc2\xbc\x68\xf6\xd2\x30\xef\x4b\xa5\x53\x9c\xcb\x9d\x7d\xe4\xfe\x1a\xd9\x9d\xf2\x78\x59\x64\xfe\x73\x6a\xec\xd7\x3d\xfc\xdb\x44\xbc\x6a\xab\x95\xba\x9c\xb4\xaa\xea\x43\x4e\x7d\x70\x97\xc3\xf9\x1a\xfc\x0e\xc5\x7e\xa1\x27\xe9\xe0\x9b\xb2\x8a\x18\xaf\xf7\x8b\xe7\xa0\xae\xc2\xbc\x0c\xb9\x11\x98\xc0\x0e\x66\x86\x38\x28\x5c\x59\xb0\x5d\x9d\xe8\x65\xf9\xe9\x5e\x3c\xe5\x7a\x04\x0f\x91\x83\x64\x72\xa8\x52\x5b\x9a\x15\xb3\x33\xa7\x65\xa5\xe4\x3d\xa3\xa4\x44\xf3\x9f\x64\x4d\x0e\xac\x63\x2b\x57\xc8\x47\xaf\x68\x9c\x10\x79\x1a\x36\x67\x0d\xe4\x80\x9c\x2f\xc7\xca\x17\x9d\xe9\x01\xfa\x7f\x31\x2b\x20\x96\xdf\x08\x22\xdc\x6f\x3b\xf0\xbe\x8a\xd5\x72\x26\xc6\xeb\x46\xc8\xff\xa8\x59\x49\xe0\xff\xa5\x76\x01\x9c\x73\xf7\xd7\xec\x8d\x41\xcd\xb2\x30\x5e\x9e\x10\x71\x33\x09\x70\x58\x3b\xe6\x0c\x81\x09\xd8\xc5\xaa\x2b\xcb\x52\x7d\xb4\x59\x0e\x57\x44\xac\xba\x82\xf9\x08\x99\xc8\xeb\x8a\x3e\x16\xf1\x2d\xe1\x28\x15\x43\x44\x37\x0e\x05\xed\xf2\x94\x77\x44\x5e\x80\xe8\xe2\xfb\x99\x36\xa2\x9b\xf8\x02\xad\x0c\x16\x0f\x34\x93\xae\xfc\x30\x58\x55\x2f\x86\xca\xee\xec\x65\xb9\x55\x0c\xb8\x65\xb5\x3b\xc2\x9c\x54\x39\x18\xc4\x54\x39\x0d\x72\x9d\xf2\xf5\x1e\x64\x8f\x8d\x2d\xa2\x2e\xdb\x91\x6a\x77\xcd\x5e\x3d\x6f\xb9\x84\xec\xab\x08\x35\x3c\x2a\xb7\xc1\x99\x30\x75\xe0\xcb\xff\x7e\x37\xeb\xd9\x2c\x7a\x7a\xb7\x0c\xc2\x14\xc6\x51\x85\xf5\x62\xb2\xef\xd1\xd2\x2d\xcd\x9a\xb7\x15\xa6\x54\x67\x3e\xfd\xcd\x2e\x72\x5f\x8d\xec\xee\x88\x20\xec\x75\xfc\x07\x3d\xf6\x1d\xef\x76\xa0\x37\x55\xb8\x64\x1a\x55\x1d\x1f\x30\x2e\x6d\xde\x3c\x8b\x27\x62\xd3\x24\xc5\x26\x5b\x50\x79\x32\xa6\x5d\x6f\x82\xc7\x99\x76\x7a\xe8\xd8\xfa\x7d\x99\x79\x16\x8b\xb6\x60\x6f\x4d\xb2\x3e\xa2\x30\xed\x57\x0f\x82\x9b\xf3\x84\xe8\x40\x65\xdb\x24\x7f\x84\x0a\x82\x30\xb5\x3f\xde\x37\x47\xc8\x9e\x2c\xbc\x5b\x1c\x0f\x3b\x61\xee\x3f\x30\xb2\x09\x58\xc9\x86\x56\x98\x1a\xb7\xf6\x38\x68\xef\xec\x35\x23\x0b\x89\x14\x6c\xbc\xa3\xcd\x10\x51\xd2\xe4\x91\x19\xd0\xd4\x16\x59\x30\x94\x7a\x96\x58\x20\x33\x82\xca\x06\xd1\x48\xb6\x08\x73\xd1\xb3\x04\xa3\x39\x9b\x86\x58\xa4\x03\x23\x62\x46\x71\x01\xf6\xfa\x73\x61\xa7\xd7\xa1\x3d\x80\xdd\x4b\x62\xf7\x99\xa2\x22\x63\xfe\x84\x15\x12\xc6\xf0\x12\xc6\x49\x6a\x6d\x19\xf6\x0b\x3d\x28\x96\xb5\x1e\x33\x8b\xb4\xb7\x10\xb3\x4d\x54\x2d\xd0\x5a\x54\xbd\xa3\xc8\xe2\xbb\xc4\x00\x15\x98\xce\xa5\xa9\x12\x87\x91\x3b\x57\x34\xc1\xae\xe9\x78\x2f\x46\x09\x3a\xc8\x6f\x56\xfd\xe1\x7b\x99\x48\x27\x96\x7b\x61\x30\xf8\xc9\xc9\x6b\x2e\xa9\x4c\x75\x9f\x4b\x82\x02\x59\xf1\xbb\xbb\xd9\x8c\x7d\xe1\xa2\x80\x28\xbe\x74\x37\xf9\x88\x67\x20\x07\xdf\xed\xb1\x37\x7a\xf3\xdb\x07\x1d\xdc\x7a\x72\x43\x37\xc1\xff\x4c\x44\x61\x4b\x34\xfb\xcd\x48\x5c\x2e\x7f\x15\x76\x27\x7b\xb5\xbc\xd2\x53\x80\x86\x2f\xf6\xd8\xb3\x17\xd6\x47\x34\x7c\x68\x5a\x15\x93\x47\xc8\xf3\xd3\x5c\x9a\x2c\xe1\xd1\xff\x99\x5b\xc7\x3a\x64\xc7\x0d\xb0\xe1\x2a\x90\xfe\x2e\xa9\x03\x59\xd1\x2b\xf2\x8a\x6a\x88\xc5\x7b\xb6\x5e\xeb\x93\x8f\x6f\x17\x4e\xf1\xa7\x0a\x60\xce\x13\xec\xc9\x37\x5f\x40\x2c\xce\x67\x19\x2c\xce\x05\x76\xd3\x99\x0b\x8f\xbf\x79\x6f\xf5\xca\xd4\xbc\x99\x73\x49\x9a\xfb\x7f\xb2\x9b\x5d\x65\x5f\x50\x07\xec\x5e\x37\x32\xec\x99\x26\x5e\x5e\x6b\x90\x72\x8b\x04\x64\xac\x24\xcd\x9d\x15\xf8\xb3\xbb\xc9\xaf\x8f\x90\xbd\xbc\xdb\x9d\x4b\x93\x3c\x69\x26\x91\xff\x9e\x11\xf6\x0b\x23\x00\x2f\xd5\x2d\x92\x1e\xba\xea\x6e\x21\xac\xa1\x48\x1b\x2c\x5c\x05\x80\x16\x0a\xd2\x53\x06\x14\xa4\x7e\x9c\xf3\x73\x0d\x7a\x26\x9e\xe8\xa6\xa2\x15\x9e\x13\x01\x86\xa6\x82\x76\x2d\xf5\x00\xe3\xb0\x98\x9d\x3a\x39\x55\x94\xa4\x51\x9d\xf0\xe1\xfd\x1c\xc2\x98\xe9\xe9\x63\xd3\x13\x57\x1f\x3a\x74\x15\x48\x61\x25\x09\x57\x57\x57\x1b\x21\x8f\x79\x23\x49\x97\x27\x79\x96\x85\xcb\x10\x45\x9a\x4d\xaa\x22\x26\xa0\x88\xf1\x06\x3d\x99\xc4\x13\xa6\x7c\xdd\x3b\x83\x4a\x27\x37\xea\x52\x13\xb3\x5e\xb3\x2d\xb5\xb5\x4e\xbf\x99\x74\xba\x3c\xee\x37\x9a\x49\x67\xb2\xd3\x9f\x68\x02\xfe\xec\x84\x2e\xc3\x46\x73\x59\x02\x5e\x15\x18\x1d\x9d\xc3\xb8\xdc\xe3\x69\x60\x99\x1a\x15\x32\x5b\x31\xfe\x0e\x84\xbf\x52\xf0\xe5\xc4\x85\x57\xd4\x96\xe1\xcc\x99\x77\xeb\xd0\xbc\x5f\xf4\xd8\xab\x4a\xa1\x79\xe6\x3b\x61\xa3\x20\xf6\x1f\x1d\xa8\xb2\xf2\x7d\xf2\xc1\x7d\xfa\x34\xab\x4c\x33\x4e\x66\xa0\x6a\xde\x1c\x94\x51\xa0\xf2\xcf\x9c\x9c\xbf\xe3\xf8\xd4\x91\xa3\xc7\x1b\x54\xe3\xb1\x1b\x27\xb7\x5c\x93\x5d\x35\x2d\xf5\x96\x65\x37\xf7\x06\x02\xb3\xd0\xbf\x4a\x1b\xb4\x60\x2f\x84\x37\x2c\x0b\x96\x05\x0f\xe9\xda\xa9\x96\xc8\x98\x1e\x69\xff\x36\x36\x2b\xdf\x9d\x9d\x1b\x3a\x3d\x75\x93\xcf\xcc\xcc\xd5\xe9\xc2\xf4\x1c\x04\x93\xcd\x4f\x2f\xcc\x15\x11\x12\x61\x26\x6f\x38\x4d\x7c\x45\x8d\x3c\xae\x62\x15\xa2\xf2\xea\x00\xc2\xfc\x85\xc7\x9e\x35\x78\xd9\x1c\x76\x9a\x49\x2c\x4f\xab\x21\xba\x28\x78\xae\xb6\x73\x38\xa6\xea\x4c\x5b\xc3\xad\xa2\xdd\xcb\xbc\xd9\x14\x70\x34\x90\x22\xce\xd5\x55\xfb\x64\x95\x8c\x69\xbd\xc9\x3f\xbb\x59\x28\x98\xc3\xe6\x94\x58\x44\xae\xb4\xe5\xfc\x87\x74\xd1\x81\xc6\xea\x4c\x91\x8e\xc8\x1b\xe4\x0d\x97\x92\x89\x75\x38\xfb\x8e\x45\xc9\x2a\x22\xc1\x82\xfd\xfa\xdb\x7b\xd9\x4d\xee\x25\x4b\x32\xe9\x88\x8c\xe2\x81\x7d\x59\xc9\xbc\x1d\x41\x44\x98\x3c\x3a\x37\xce\x7b\x8f\xe9\x0e\x25\xbc\x73\x46\xe8\xef\x09\x79\x5b\x8d\x3c\x3a\x08\x33\x29\xd9\x7b\x61\xd6\x16\xe9\x09\x91\xb7\x93\xc0\x7f\x69\x8d\xcc\x6c\x8a\x83\x50\xb6\x6d\x66\xb0\x20\xf6\xdb\xde\x62\x45\xf9\x8b\x15\x69\x2a\x90\xe7\x0b\xe1\x39\x51\xb2\x4a\x9d\x97\x60\xa6\x16\x28\xa7\x30\xde\x6a\x61\x86\x99\x82\xc0\x6c\xd0\xc5\x38\x8c\x16\x9d\xf4\x22\xa5\x17\xba\x65\xc1\x31\x24\x43\x29\x81\x2a\x69\x2f\xc3\x53\x1e\x8f\x56\x79\x3f\xd3\x1a\xae\x7d\xaa\x68\x90\x97\x8d\x10\x5f\x87\x91\xce\xa5\xa2\x29\x02\x00\x47\xfc\x9e\xc9\x73\xfa\x5a\x6d\x71\xf0\xfe\xa2\x1d\x38\xa3\xd3\xbb\x3b\x89\x8a\x4b\x2c\xbe\xa7\xd3\x29\xae\x0e\x8c\xa9\xe6\x61\x90\xeb\xb6\xd9\x4e\x32\x11\x5b\xaf\x80\xd0\x54\x45\xc9\x62\x8d\xc5\x23\xee\x75\x44\x1a\x36\xc1\x50\x10\x25\xe0\x2f\xdc\x8f\xea\xf2\xaa\xa0\x39\x3f\xab\x51\x08\xa2\x64\x59\x3d\xa5\x92\x95\xc7\xe9\x89\x81\x0e\x34\x28\x3d\xca\x9b\xed\x8a\x3b\xa5\x0c\xa9\x54\x23\x58\xd0\xa7\x1f\xac\x1f\x3c\x70\xe0\xc0\x81\x67\xca\x7d\x23\xd7\x09\xe9\x2a\x79\xb1\x78\x5d\xc5\x89\x9a\x33\x02\x1c\xf5\xf5\x61\x1b\x72\x26\x12\x2a\xcb\x91\xdb\x48\x21\xc9\x2d\xe1\xf6\xa9\x1a\x59\x63\xa2\xfb\x6f\xa9\x91\xb9\x0b\xc4\xa2\x59\xf0\x96\x7c\xde\x5b\x1c\x5e\xe5\x62\x01\x40\xa9\x39\x4c\xf8\x1a\xdc\x93\x66\x13\xc1\x80\x7c\x93\x48\x54\xbc\x6d\x47\xe4\x67\x49\xb4\x22\x07\xc9\x9d\x37\x03\x99\x6f\x68\x65\x4f\xcf\xa2\xb1\x24\x8c\x11\xe7\x23\x8c\x01\xeb\x01\xb5\x3f\x2b\x94\x9b\xfc\x6d\x4d\xa7\x46\xfc\x55\x8d\x7d\xc7\x5b\x84\xbf\x17\x9d\xe4\x08\x39\x71\xd2\x22\x6a\x33\x8a\xec\xa5\x07\x4b\x55\xaf\x3f\xd8\x35\xad\xb6\x15\x61\x5b\x56\xf8\x11\x58\xb2\xd4\xee\x67\x92\x90\xe5\x36\xd8\x11\x26\xf1\x1a\x12\x34\xec\x70\x68\xb3\x12\x30\x2e\x2c\xb4\xec\x1f\x59\x14\xaa\xcc\xad\xb4\xb0\xd5\xc4\x49\xd1\x64\x27\x95\xb6\x68\x9d\x63\xed\xbb\x99\x1c\xdb\xa4\xb8\xc3\x98\x39\x48\x25\xb9\x3d\xcc\xdb\xf3\x18\x60\x98\x3d\xe8\x5d\x4e\xfe\x95\x63\x63\x8e\xc2\x2c\x1f\x30\x32\xbf\xb2\x9a\x3f\xbd\x48\x95\xc3\x33\xde\x77\x46\xd8\x4d\xa5\x6b\xb2\xef\x36\xdd\x0d\x6a\xe1\xe5\xb8\x1f\x9d\xa2\x6f\x5e\x2e\xa1\xa9\x8d\x90\xbf\x73\x23\x04\xfe\xdc\x63\xc7\x37\x1f\x21\x60\x72\x97\xd6\x8c\x0f\xd8\x18\x84\xb7\x69\xea\xc5\x8d\x0e\x78\xad\x47\x76\x75\xdb\x3c\x13\xfe\xcb\x3c\xf6\x5c\x6f\x4e\xfe\x59\xe6\xc4\x35\x27\x47\x0a\x4f\x6a\x15\xab\xc0\x79\xdc\xe0\xe9\x34\xe7\xd9\xd9\x6c\x92\x07\x9d\x30\x0e\xe5\x0a\x9f\x50\x2b\x7d\xd2\x94\x94\x4d\xda\x9a\xd4\x97\xf7\x12\x5a\x99\xc5\x8e\xc6\xa2\x5b\x7b\x49\xce\xfd\x0f\xec\x65\xc7\x9c\x2b\x52\x56\x66\x94\x2f\x2f\xa7\x02\x94\xe0\xbb\xe0\x62\x2a\x64\xa1\x0a\xc3\x4f\xc4\x60\x4b\x0f\xe0\x20\x60\x2a\x77\xa6\xc4\xf3\xc8\x0e\x98\xf5\x0e\xf9\xf9\x0e\x3c\xfc\x05\x85\x87\xff\xb4\xa7\x00\xc7\x3e\xee\x91\x2b\x36\x80\x56\x61\xad\x6a\x00\x1a\xbb\xd7\x53\x07\x81\x62\xe5\x69\xfe\x72\x58\xe5\x8d\x87\x84\x99\xfc\x2f\x0a\xa3\xe3\x57\x3d\x72\xe5\x26\xfb\x81\x50\x09\xbf\xe8\x19\xd4\x86\xa2\x2f\xbc\x99\xf7\x78\x54\x48\x27\x94\x5c\x90\x32\x95\x17\x1b\x0a\x58\xa5\x1f\x92\x9e\x3e\xe8\x9d\x59\x1f\x8d\xfe\x0a\xff\x40\x15\x1a\xbd\xd3\xe9\x32\x16\xfd\x3b\x88\x83\x09\xad\x6c\xfa\x05\x73\xcf\xfc\xec\x3c\x5e\x9a\xe6\x5d\xde\x94\xca\x62\x98\xe5\xfe\xff\xde\xc3\x8e\x56\xdf\xaa\xa4\x31\x1b\x78\xd2\xc4\x40\x54\x32\x8a\xbd\x65\x6c\x47\xe0\x6f\x57\xe0\x7f\xc9\xd3\x74\x55\x9f\xf3\xd8\x81\xd9\x4a\x36\xed\xe1\x9f\xc5\xd6\x91\x6e\x24\x37\x0c\xf7\x1f\x6d\x60\xc2\x48\x0d\xe8\x31\x83\x7a\x67\x87\x77\xa5\xa2\x94\x49\xdd\x73\x02\x76\x7e\xf2\xa0\x47\x87\x6a\xa8\xbb\xfd\xd1\x0e\xef\xee\xf0\x9c\x6c\x67\x23\xfb\xac\xbd\x91\xfd\xfa\x36\xe9\xbb\xff\x7d\x35\x7d\xf7\x43\xc7\xde\x9d\xac\x2f\x10\x8f\xfb\xb7\x18\x81\xa8\x27\x6a\x99\x07\xad\x52\x8a\x55\xf2\xa0\xfd\xe1\x1e\x72\xcd\xc6\xa2\xe7\x10\x5d\xfe\x18\xa0\x5e\x1e\x8d\xf3\xb4\xef\xbf\x66\x0f\xcb\x06\x2f\xe3\xd4\x5d\x4d\xd2\xb3\xf2\x1c\x37\x11\x06\x75\xca\x29\xdc\x9f\x17\xb9\xf1\x49\x62\x40\x49\x29\x50\xdd\x9a\xcc\xca\x92\x84\x20\x9b\xc2\xc1\xc8\x71\xd3\x28\xc6\xc8\xdb\x3c\x32\x9a\x87\x1d\xe1\xbf\x61\x1b\xcc\x59\x4b\x3a\x1a\x50\x96\x94\xe5\xbc\x03\x41\x5c\x3a\x26\x30\x33\x70\x9f\xab\xf2\xd8\x9b\xa9\x1c\x05\x25\x67\x0b\x33\x96\xa1\x1e\x3d\xd5\xb5\xf2\x20\xf6\x4d\x75\xbb\x51\x7f\x1f\x79\xb5\x2b\xff\x5f\x58\x63\xdf\xf7\xd6\x96\xff\x96\xd8\x77\x47\xc6\xf8\x35\x4a\x63\x03\xa6\x2b\x3c\xad\x82\x93\x1f\x86\x79\x52\x95\xc6\x30\xde\x02\xe3\x7d\x80\x51\xa1\x3b\x01\x00\x21\xd4\x6a\x05\x14\x0b\xbd\x0b\x33\x1a\x8b\xa6\xc8\x32\x8e\x49\x91\x79\x8a\x61\x05\x4e\xeb\xb8\xd5\x0e\x9d\x36\x1c\xe6\x96\x25\x85\xf7\xf2\x44\x9e\x9d\xd1\xf2\x65\x12\x79\x1c\x93\xf6\x9b\x3c\x42\x70\x7c\x81\x7e\xf7\x15\x1e\xbb\xd7\x3b\x66\x7e\x6b\x11\x1f\x84\x59\x33\x0d\x81\x4a\xcd\x0a\x63\x41\xfe\x01\x29\xb5\xd4\x17\x52\xbd\x97\x13\xcd\xe4\x4d\x2d\x80\xb5\x22\x34\x4a\x4e\xd4\x47\x9b\x08\xba\x03\xb2\x2c\x34\x59\xc1\x87\x29\xc3\x9a\x6f\x3b\xe8\xe4\x0c\xbd\xc1\x23\x63\x2d\x75\xc3\x7f\xf9\xd6\xc4\x8c\x29\xf8\x56\xfd\x97\xc5\x08\x86\xe9\xa6\xb7\xcc\x9f\x3a\x69\x86\x57\xf7\xa4\x00\xcf\x30\x8e\x90\xa2\x91\xe0\xd0\x6d\x90\xdb\xc9\x25\x18\x5a\x92\xfa\xc7\xd9\x8d\xb8\x2a\x53\x65\xb8\x19\x24\x5c\xd1\xab\x13\xc3\x51\xd4\x86\x62\x26\xb9\xf3\x6d\x7e\xc5\x23\x7b\x0c\x9c\x86\xff\x56\x8f\xbd\xda\x73\x26\xb7\xed\x53\x2e\x60\x37\xd0\x8c\x15\x09\x1e\xa0\xaa\x11\x66\xb4\x42\x52\x2c\x09\x59\xb5\x8a\x6f\xc7\xb9\x0b\x9f\x05\x8d\x68\x2a\x32\xd1\x38\x4d\x0a\x64\x59\xb5\xa0\xe0\x1b\xef\x3b\x03\xbb\xd1\x3e\xa7\xd1\xef\xbb\x94\x3c\xbe\x22\x27\xe2\x74\x12\x45\x61\xbc\x8c\x6f\x14\x79\xf6\xfe\x73\x2e\x65\xd7\x80\xe6\x0f\x86\x71\x30\x3c\x39\xca\xbf\xcd\x66\x91\x62\x19\x6a\x13\x74\x85\xd1\x47\xf7\x92\x6f\xee\x22\x63\x1d\x7e\x6e\xbe\x97\x2e\x0b\xff\xbf\xed\xda\x2e\xdd\xc1\xab\x77\xd9\x01\x23\x55\x28\x76\xca\x2f\x53\x80\xe7\xf2\xa5\x64\x45\x38\xcd\x77\x5f\x6b\xd0\xdb\x40\x9f\xd0\x0e\x1d\xa0\x90\x48\xa2\x5e\x6e\x82\xb4\xf7\x8b\x73\x87\xe9\x55\x08\x5b\xa0\xc9\x64\x20\x52\xa5\x65\x8a\x84\xfa\xe1\xb9\x83\x07\x1e\x37\xae\x4c\x91\xb2\x44\xb5\xea\x0f\x48\x21\x78\xc2\xe1\xbd\x90\x93\xe5\x40\x83\x4e\x95\x2a\x83\xf7\xa2\x66\x2f\x2a\x10\x97\xad\x2a\x97\xfa\x34\x4d\x7a\xe8\xee\xeb\x75\xdd\x9c\xdb\x2b\xae\x7a\x5c\x83\x1e\xc5\x6c\xc8\xc3\x5a\x54\xa3\x73\x53\x99\xb3\x0f\x1d\x78\x5c\x5d\xa7\xf1\x50\x95\x65\x01\x69\x13\x7a\xcc\xb8\x1c\xb0\x5e\x97\x86\x9d\x8e\x08\x42\x0e\x50\x2b\x26\x0e\xdc\xfd\xd2\x18\xd3\x98\xd5\xd1\xd5\x6a\x36\xa8\x1c\xa2\x88\x8a\x11\x4e\x14\x0a\xaa\xac\x11\x06\x29\x48\x30\x4f\xeb\x5c\x53\x88\x80\x1e\x3c\x74\xe0\x71\xe5\x71\x6c\xd0\x53\x40\x22\x11\xa9\x61\x85\x2c\xb1\x25\x21\x62\x7a\x36\x8c\x22\x11\xd4\xd7\x6d\x7e\xab\x97\xe6\x6d\x91\xd6\xa9\x88\x33\x04\x36\xc6\xf6\x95\xda\xa6\xe9\xac\x80\xcd\x1b\x44\x64\x1f\x23\x19\x02\xfd\x92\xd6\xed\x40\x70\x28\xfa\x92\xca\x26\x93\xcf\xef\x1a\x60\x53\xf9\xf8\xb6\x67\xfb\xf7\x46\x37\x38\xdb\x6d\x86\x94\x81\xc6\x5f\x94\x09\xbe\xf5\x69\x1b\x40\xd4\xcf\xb0\x05\x02\x82\x42\x2d\x8d\x2d\x4e\x6e\x39\x71\x86\xce\x8e\x40\xe5\xf1\x5f\x33\xf8\x11\x37\x31\xeb\xd5\x14\x35\x93\x1a\x23\x22\x78\xd0\xaf\x6f\xa0\x76\x33\x3b\x31\x06\x03\x63\x05\x54\x9c\x2e\xc5\xa8\xe2\xd2\x0c\x1f\x98\xc8\x15\x0b\x0d\xdb\x51\xf0\xb5\x20\xa3\x0a\x68\x6f\x43\xe7\x33\x3a\x3f\x2a\x86\xa2\x41\x7e\x7f\xaf\x93\xf8\xb4\x04\xf8\x29\xe6\xfc\x97\x26\xf1\x2d\xc9\x12\x38\x8e\xdf\xb6\x97\x3d\xd5\xfa\x5d\xe1\x35\xbe\x33\x59\xb2\x40\xc5\xc1\x45\x12\x25\xc9\x59\xd4\xbb\x00\x3b\x5f\x65\x1f\xa2\x0f\xb4\xa9\xf0\xc2\xd3\x5e\xdc\x38\xef\x8d\x69\x41\x7e\xde\xdb\x7b\x67\xb2\xa4\x29\x0a\x5c\xaa\x4f\x42\x3e\xe2\x91\x7f\xd5\xe2\x61\x24\x82\x5b\x92\xa5\xec\xe6\x50\x1e\x04\xfa\x18\x19\x69\xe7\xde\xb9\xd0\xa7\xf8\x02\x95\xeb\x33\x6b\x8b\x40\x36\x14\xe6\x5a\x2a\x72\x1e\xc6\x76\x68\x08\x9c\x08\x31\xc8\xd7\xf2\xe5\x9a\xa8\x43\x71\x4e\x7e\xa9\x30\xa7\x77\x8b\x34\x41\x71\x67\x7b\x16\xdd\x99\x7c\xd0\xf5\x22\xfe\x8c\x47\xec\x8e\xf9\x77\x93\xab\x87\x9e\xc3\xdd\xaf\x70\x4b\xf1\x16\x58\xeb\x9e\x3c\xef\x00\x5b\xca\x61\x77\x89\x0e\x54\xde\x1c\x0c\xb7\xfa\x20\x52\xf0\x51\xf5\xf9\x1a\xe4\x99\xc4\x0c\xb7\x7f\x2b\x9b\xb1\xc1\xc8\xa5\xa2\x25\x9f\x53\x4a\x58\x9d\x66\x16\x0c\x9b\x88\x1b\xab\xe1\xd9\xb0\x2b\x57\x0f\xc4\xf2\xc8\x5f\x93\xf2\x71\x47\x0b\xf9\x8c\x47\x7e\x14\xd6\x4f\x18\x2f\xcf\x08\x1e\x44\x61\x2c\x74\xde\xd7\xbb\x4c\xf6\xf4\xab\x3c\x13\xa5\x12\xa8\x87\x64\xe5\x0a\xd9\x12\x49\x6a\x55\x21\xa6\x9f\x0a\xd5\x2c\xcc\x32\x91\x59\x5b\x3f\xf2\x14\x02\xda\x50\x5f\x05\x79\x35\x28\x3d\x11\x42\xc0\x06\x7c\x6d\x33\x2f\x33\x8b\x0f\xa2\x17\xe7\xe8\xbe\x54\x13\x24\x89\xcb\x74\x6e\x9f\xf4\xc8\x63\xb2\x1e\x84\x7e\xb4\x7a\xd1\xc0\x94\x7b\x93\x99\x72\x2f\x2e\x4d\xb9\xe2\xa5\x87\x7a\xda\x1d\x1a\xc8\x20\xbb\x24\xeb\x65\x5d\x11\x07\xfe\xcf\x79\xec\x3e\x0f\x63\xc3\x22\xbe\x4c\x73\x01\x90\x26\x18\x08\x67\xf2\xb5\x13\xaa\x1e\xa7\x59\x6f\x09\x13\xa3\x73\x6b\xf8\xc0\x7b\x6e\x30\x54\x38\xa4\x44\xe4\x09\xe5\x11\x42\x3e\xc3\x17\x13\x16\xb9\x40\xd6\xa0\x4e\xf3\x5a\x3c\xca\x54\x62\xdd\x52\x92\x44\x82\xc7\xe4\xf7\x6a\xe4\x51\xcd\x24\xc6\x23\x4a\xb3\xaf\x60\x43\x3e\x5c\x63\xef\xa8\x15\x33\x5d\x85\x6f\xe4\x72\x6e\x53\xf3\xb4\xdd\x32\x3c\x95\xc9\xf9\x8d\xd0\x6d\x0e\xda\xd1\x04\x65\x80\x1a\xc2\x8a\xb0\xed\xc3\x98\x35\x92\xe9\x65\x91\x69\xac\xaf\xa2\xf4\xa8\x7f\xbd\x7c\xf3\x58\x92\x2e\x85\x01\x3b\x2c\xa7\xd8\x52\x18\x64\x76\xfd\x69\x4f\x8e\x49\x76\x36\xec\x76\xe5\x44\x8d\xc5\x39\xb8\x26\x67\x6a\x37\x15\x2b\x61\xd2\x03\xd5\x83\xb6\x79\x16\xef\xcb\x8b\xc9\xd0\x17\x39\x94\x7d\x1a\x69\xda\xd8\x61\xb9\x83\x34\x45\x64\x1f\xd5\xb4\xca\x22\x67\xbe\xfc\xea\x8a\xd2\x2d\x43\x19\x9a\xb7\x29\x87\xfd\x23\x89\x85\xbd\xfa\x5e\x37\x4a\xfe\x9f\xaa\x68\xc5\x73\xa2\x39\x85\xec\xa3\xdf\x1f\x61\x07\x8b\x9f\x4e\x64\x22\x83\xb6\xc7\x45\x78\x34\x93\x82\xda\x04\x44\x6a\x49\xfc\x91\x11\xf2\xc1\x11\x72\x49\x33\xe9\x74\x78\x1c\xf8\xef\x18\x61\xf7\x8f\x4c\xe3\x8f\x22\xcd\x1d\x7f\xc2\xba\xce\x13\xf5\xa1\xe4\x12\x07\x10\x27\x3d\xed\xa0\x92\xba\x39\x9c\x81\xe2\xe0\x64\x1a\xd8\x65\xc9\xb2\xd3\x24\xc9\xe9\xfe\x7d\x93\xfb\xc6\x8b\xe0\x38\x55\xcc\xbe\xcc\x22\x2b\x55\xd1\x2c\x45\xa3\xb2\xb0\xd3\x05\x0a\x37\xd1\xdc\x17\xe8\x7c\x0f\x39\x89\xb1\xcb\xd0\x2a\x4e\xb3\xb6\x88\xa2\x3a\xcd\xe0\xd8\x6f\xd2\x3f\xe0\x2a\xc0\x7d\xa6\x3d\xe5\xeb\xdc\xbf\xef\xa7\xf7\x01\x7b\xc0\x38\x5d\x4d\xe4\xa7\x95\xcd\x6f\xd0\x05\xcc\x27\x30\x05\xf5\x93\x9e\x49\x50\xd5\xeb\x38\xea\x23\x5a\x30\x78\xd4\x13\xc5\xeb\x20\x1f\x97\x9a\x4f\x68\x7b\xd6\x0f\xc0\x68\x2a\x79\xce\x33\xa0\xb2\x9c\x6c\x0b\x1e\xe5\xed\xbe\x12\x04\xf1\x04\x48\x05\x08\x46\x57\x77\xd6\x82\x13\xfa\xf8\x5e\x72\x64\x03\x90\x83\x83\x30\xd0\x6e\xd0\xcb\x77\x09\xfb\x19\x6f\x1d\x84\xc4\x62\x5e\xa9\xaf\x64\x83\x23\xb6\x14\x67\x42\x19\x51\x0d\x06\x83\x03\x64\x19\x85\x58\x2c\x83\x87\xa7\x6c\xbc\x06\x24\xb2\xcd\xe3\x65\xc5\xda\xe3\xcc\xcd\xff\x3a\x46\xbe\x68\xdb\x46\x7f\x6b\xdb\x4e\xbe\xe7\x95\x9d\x7c\xc6\x40\x7a\xfd\x43\x65\x21\x6d\x90\xe7\xd4\xc8\x98\x1a\xa4\xcc\xff\xae\xc7\x66\x0d\x3c\x9b\x93\x40\xba\x6a\xe3\x60\xe6\x48\x12\x82\xc0\x66\xc3\x90\xb8\x9d\xd9\x72\x13\x39\x4a\xa6\x37\x0d\x50\x39\x38\x5b\x36\x14\x53\xa1\xfc\x04\x1b\x8e\xa9\xd8\xf1\x23\xed\x04\x0e\x3c\x5c\xfe\x96\x07\xbd\xe7\x78\xeb\xfb\x28\x9e\xe9\x3f\xdd\xf8\x28\xaa\x17\x4b\xc9\x63\xb1\xb6\x08\x2d\xfb\x77\xff\x69\x0f\x79\x4c\xc5\xd6\x7e\x24\x84\x13\xb7\xff\x27\x7b\xd8\x9b\x3d\xf5\x83\xe6\xa1\x26\xf3\x31\x90\x74\x2a\x15\xe3\x7a\x9b\x9f\xb3\xae\x08\x9f\xc2\x8c\x2e\xc9\x93\x3b\x52\x40\x41\xe8\x31\x80\x33\x1a\x92\x22\xa9\x65\x76\x53\x81\x68\x1a\x61\x4c\x0f\x36\xae\xa9\xd3\x2e\xe2\x03\xe8\x0c\xba\x25\xac\x3b\xa3\x16\x90\xb1\x39\xb3\x2a\xb0\xec\xc6\x79\x6f\x77\xce\xd3\x65\xe1\xa6\x3c\x7c\xf1\x12\xf2\x6c\xa2\x6e\xf8\xd9\x06\xa8\x98\x50\x3e\x17\x11\x93\x37\x02\xa8\x3e\xbc\xef\x20\x8e\xda\xd8\xdc\xb2\x7d\x86\x97\xc7\x15\xe9\x8d\x1d\xe9\xb2\x23\x5d\x76\xc2\x92\x2e\xac\x57\xf7\xd4\xfa\x12\xb3\xee\x3f\xbe\x2a\xcc\x45\x49\xb1\xb2\x00\x7c\xcb\x2e\xf2\x14\x35\x16\xb2\xd8\x09\x1d\x8d\x98\xa4\xc5\x70\xf0\x6e\x58\x89\x9f\x3d\x35\x37\xab\x12\x67\x8a\x7c\xca\x3f\x18\x65\x27\x2b\xae\x97\x54\xd7\x02\xe1\x2e\xa6\xc5\xd3\x60\x7a\x03\x3a\xcc\xb0\xd9\x8b\x78\x8a\xa7\xf8\x35\x33\x2b\xbf\x3e\x42\xae\x51\xe9\x89\x93\x8c\xad\x9f\x9d\xe8\xa4\xbf\xec\xa4\xf4\x5d\xc0\x94\xbe\xa7\x99\x50\xb3\x39\x36\x7d\x01\xb2\x5b\xed\xb2\x3f\x54\x73\xc2\x6b\x81\xf8\x50\x4f\xc2\x53\x2b\x22\x6d\x0b\x1e\xf8\x2f\xaf\xb1\xd3\xfa\x07\xc5\x63\x65\x2f\x15\x65\x8c\xc5\x62\x13\xd5\x8f\x96\x79\x22\x8c\x5b\x03\xf2\x7a\x9d\xd9\xf6\x1e\x8f\xbc\xd3\x23\x63\xdd\x24\x38\x16\x9e\x13\x81\xff\x06\x8f\x9d\x9c\x53\x3f\xca\x15\xb5\xd4\xc5\xcd\x55\xf7\xce\xb5\xe3\x89\x36\x90\x8f\x6e\x35\x98\x3c\x7f\x8c\xfc\xb8\x35\x6e\xe9\x12\x6f\x2a\xe7\xa5\x80\x38\xb5\x6f\x5c\xc2\xfe\xad\xfe\x51\x15\x99\x26\xef\x65\x95\x31\x68\xbf\xb1\x7b\x67\x77\xdf\xee\xee\xfe\x54\x1d\x82\x36\xc7\x7e\xcc\x44\xa0\x15\xe7\x4e\x18\x7c\xfb\x24\x39\x4e\xf6\x91\xc7\x0d\xd5\xe2\xec\x8f\xbb\xa3\x38\x6c\x43\x71\x68\x59\x7a\xc3\xd3\xb6\x17\x04\xf6\xe3\x6b\xe8\x0c\x0f\x7a\xcb\xeb\xef\xe5\x33\xfe\x11\xb3\x97\xc3\x07\xe6\xbd\xbc\x9d\xa4\x9a\xd9\xa1\x74\xf4\xd1\x4b\xb9\xbc\xc7\x7f\xe0\x27\xc8\xc6\x1a\x7e\x3b\x60\x41\xc9\x31\xf2\xff\xf1\xc7\xd9\xd5\xf0\x97\x2d\xd7\x4c\x9e\x35\x52\x9b\xc0\xc1\x66\x15\x92\x7d\x0a\x49\x67\x21\x21\x60\xa7\x1d\xb9\xf1\x40\x8d\x7c\xb2\xa6\x7f\xfa\x1f\x5c\x83\xcd\xd9\x6d\x66\xda\x8b\xe5\xb6\xda\x38\xcd\x57\x8f\x6a\x74\x5f\xf6\x77\x9e\x26\x4b\xc8\x0e\x13\xfa\x78\x3a\xdb\xa2\x5a\x03\x98\x0a\x02\x24\x13\x3b\xa1\x40\x8c\x0f\x1b\xe7\xa0\x43\x34\xa7\x8f\x2a\xee\xdb\x33\xc0\xb6\xa0\xde\xa9\x78\xde\xf1\x77\x2e\x21\x62\x3e\x30\x34\x00\x54\x80\x5b\xd6\xd1\x34\x4d\xd2\xc3\xf4\xf1\xc5\x96\x28\xc5\x4f\xa7\x23\xe2\x40\x04\xd7\x2b\x6a\x6c\x98\x8a\xb0\xa2\x3a\xfc\x2c\xe0\x43\x67\x82\x50\x4a\x03\xd1\x15\x78\xf8\x54\xc8\x14\xe2\x5c\xde\x20\x8f\x56\x1a\xcf\x5e\xe7\x34\xfd\xb9\x1f\x5b\x7f\x3e\xbd\xfd\xc7\xfc\xb7\xfe\x58\x95\x76\x58\x7c\xf9\xd2\xe4\x99\x58\xef\x01\x3a\x70\x3a\xd7\xd3\x72\xf0\xdc\x5e\x9e\xb0\xc3\x0a\xc5\x88\xc2\xcd\x9d\xfb\xb7\xdc\x03\xac\x6c\xb0\x1b\x55\x95\x15\x4d\xea\x86\x05\xca\xf4\x26\x9a\xb2\x9d\xd7\xcb\xa3\x52\xd2\xca\x2f\xd6\x78\x54\x57\x33\x31\x64\x12\x74\xbb\x59\x55\x13\x26\xd6\x7b\xa0\xb2\xa7\xeb\x3f\x75\x45\xa9\xea\xa2\x9e\x5e\xde\x96\xb2\xbe\x79\x71\x07\xa7\xb2\x16\xa7\x11\x43\xa5\xf5\x1a\x6d\x58\xbb\xc2\x8d\x94\x68\x37\x41\x23\x99\x6d\x66\x7e\x6e\xfc\xad\x2b\x36\xf4\xb1\xae\x28\x7f\x2c\xab\x02\x53\x2b\x38\xe9\x37\xd1\xca\x8d\x3d\x5f\x5a\x36\x4d\x91\xe6\x98\x7c\x2f\xb6\xb0\x70\xb7\xf2\x76\xb9\x01\x36\x12\xee\xe6\x1b\xb0\x85\xb7\x4b\x0d\x08\xc2\x0c\x48\xc2\xfa\x9b\xaf\x7d\xd3\xaf\x96\xaa\x06\xbd\x61\x0b\xc3\xbe\xb9\xf7\xca\x95\x1a\x49\xbb\xd9\x37\xed\xf4\x61\xa9\x28\x21\xe6\xfb\x06\x5b\xc1\xa3\x6e\x7b\x83\x52\xcc\x5a\xe0\x6b\x56\x69\x1a\xe6\xf0\x89\x6d\xb2\x39\xba\x08\x75\x4a\xda\x46\xc7\x54\x49\x16\x03\xf0\xa6\x3f\xec\xe6\xdf\x2d\x7d\x22\xb0\x05\x6c\xbe\xda\x4d\xbc\x65\xba\x3b\xfc\xa3\xad\x53\x9c\xa9\x16\xbf\xd8\x26\xda\xb9\xc1\x17\x4a\x63\xb2\xfe\x49\x61\xfd\x9a\xb7\x5e\x46\x79\x7a\x6c\xbd\xa4\x52\xb7\x94\xe3\x64\x73\xf3\x6c\xb0\x61\x74\x53\xe5\x6d\xa3\xf6\x72\xf3\xab\x33\x6c\xd6\xff\x14\x9b\x7c\xb1\x3c\xfe\x9b\x7c\x5d\x65\xf3\xfc\xe9\x28\xb9\xbc\x22\x7e\xae\x1c\x3a\xe7\x7f\x74\x94\xdd\x5a\xba\x56\xb2\xf5\x42\xda\x13\x84\x11\x69\x4b\x0e\x84\x25\x43\x2c\x9d\x0e\xac\x03\xe3\x01\x37\xe4\xe8\xce\x91\xf1\xcb\x23\xe4\xfb\xb6\x2d\xff\x3b\xdb\xb6\xe5\xbf\x73\x0d\x5b\xbe\x3e\xe8\x41\x68\x99\xd3\x3c\xb0\x72\xe8\x16\x3e\x74\x99\xc8\x0f\xe8\x4c\xe4\x2f\x78\x64\x7c\xdd\xf8\xc6\x86\x0a\x26\x65\x6f\xf2\xe6\xcb\x20\xfa\xc3\xf2\x10\x54\x77\x2f\x7a\x97\xca\xd9\xc9\x5f\x18\xa9\x84\x9f\x42\x7f\xed\x09\xde\x3d\x1a\xaf\x28\x04\xd5\xb7\x8e\xb0\xdf\xf4\x06\xaf\x2b\x24\xda\x8c\x72\x6a\x6e\x02\xf5\x6a\xd2\x85\x68\x6a\x65\x8e\x32\x1c\xbb\x74\x85\xa7\x21\x5f\x8a\x04\x72\xd8\x35\x08\x59\x50\xc1\x4e\x60\xdd\x50\x43\xa1\x3c\x9e\xa6\xc4\x7d\x19\x9d\xe1\x06\xdf\x0c\x62\x1d\x8d\x49\x04\x5e\x38\x2b\xfa\x13\x68\x51\x03\x74\x6e\xca\xb3\xea\x4a\x5d\x5b\xf2\x7f\xf1\xc8\x4d\x64\x4c\x23\x03\xfb\xd7\xb3\x09\xfc\x60\x7d\x87\x2b\xbb\xe8\x97\x46\xeb\x51\x58\x63\x4e\xbc\xdf\xf3\x35\x34\xda\x4f\xb3\xe4\xa4\x85\x5e\xab\xf0\x68\xf2\x4d\x83\x32\xae\xcd\x00\x0c\x38\x14\x93\x97\xc3\x3f\xb6\xe5\xec\x9d\x8f\x24\xff\xba\xe2\x83\xce\x8b\x66\x2a\x72\xff\x25\x8f\x64\xcf\xf3\xf0\x6f\x95\xac\x94\xe1\x0f\xbd\xec\x38\x68\xd7\x3c\x8c\x31\x11\x09\x39\xdd\x21\x56\x7b\xa9\x9f\x0b\xf3\x7d\x54\x10\xa2\x0a\x55\xb3\x3e\x8d\x1e\xa1\x48\x64\x10\xe3\x1f\x43\x54\x3c\xd4\x31\x1f\xde\x2d\xb0\x14\xf7\x23\xbc\xfd\x32\x72\x42\x19\x53\x8e\xb2\x6b\xcf\x28\xb4\xa7\x16\x6f\x86\x51\x08\x66\x9f\x6e\x9a\x2c\xa7\xbc\x03\x69\x67\xc8\xe9\x06\xc6\x98\x96\xdd\x76\xc7\x55\xb1\x63\x15\xdf\xa6\x55\xfc\x0b\x35\x32\x8a\x3e\x5b\x39\x74\xf0\x79\x1d\xd6\x0c\x7b\xe0\x11\xe0\xea\xac\xe8\xe3\xb7\x07\xe2\x52\x34\x9e\xc3\x26\xa8\xe0\xb4\x68\xb3\xcd\x53\xde\xcc\x45\x9a\xd5\xe9\xbe\x89\x7d\x75\xba\xef\x8e\x7d\x34\x49\xe9\xbe\xc6\x3e\x85\xbd\x2b\xd2\x90\x47\xd0\x75\x0d\x80\x5d\xaa\x4a\xe1\x1c\xf2\x4c\x5c\x7d\x25\xd5\x60\xd9\xd8\xe2\xfa\xa0\x21\x9d\xa7\x4b\x61\x9e\xf2\xb4\x4f\xf7\xab\x9c\xc0\x3e\xc4\x15\xe2\x0b\xe3\x58\x22\x8a\x8c\xb6\x48\x81\xe5\xd2\xca\xca\xd3\x43\x9d\x27\x49\x94\x35\x42\x91\xb7\x20\x32\xbc\x9d\x77\xa2\xc9\xb4\xd5\xbc\xf2\xea\x2b\xaf\xbd\x3c\x43\x27\xcd\xc4\x95\xef\xf4\x1e\x4d\x1e\xe5\x8f\xca\xb9\x5d\x8d\x9d\x4f\xbe\xe9\x91\x3d\x61\xa7\xd3\x03\xc2\x52\xff\x01\x8f\xfd\xae\x37\xab\x7f\x02\xf9\x93\xca\x02\xc9\xc1\x0b\x07\xe9\x12\x1a\x81\x0d\xda\x29\xf5\x07\x07\x52\x3e\x15\x76\x56\xa6\x26\xb0\xd8\x8f\x69\x90\x6e\x34\x5f\x99\x4a\x6e\x5c\xf1\x5b\xe6\x6e\xa5\x26\x3f\x77\x80\x7a\xce\xca\x2d\x32\x41\xd9\xb8\x48\xe3\x30\x72\x83\x9e\x77\x7c\x1e\x3b\xc1\x12\x8e\xe6\xf4\xd9\x1a\x21\xd8\x3f\x29\x45\xfc\x5f\xab\xb1\x37\xd4\x8a\xdf\x3a\x56\x1d\x83\xfe\xfb\x10\x6a\x9e\xc4\x13\x4b\x61\x2c\xd7\xad\xb3\xf4\x63\x3d\x87\xa4\x70\xd0\x09\xcd\xdd\x34\x59\x09\x03\x85\x6c\x4f\x57\xd3\x30\x17\x13\x8a\xe9\xb0\xdb\xcb\x0d\x56\x6c\x8a\x12\x3a\x0e\x11\x92\x6f\x2a\x8a\xa4\xbc\xc2\x88\x51\x8b\x6d\x13\xe2\x31\xe5\x22\x53\x42\x3b\x28\xb6\xb5\x24\xc6\xd2\xeb\xe0\x43\x96\x7f\x82\xbb\x38\xee\x53\x71\x0e\x53\x1b\x8c\xbc\x5e\x30\x80\xfe\xd6\xb6\x08\xc9\xd7\x2b\x22\xa5\x49\x2f\x97\x2d\x03\xc5\x3b\x15\x1c\xdc\x0a\x66\x06\x4f\xcd\xcd\xae\xcd\xc0\x71\x72\x7d\xef\xc2\x13\xfc\xf1\x2a\xdf\x02\x8a\x8c\xb2\x53\xea\x5d\x63\xa4\x2a\x08\x6d\x3a\x8c\x03\x91\xce\x29\x46\xda\x38\x77\x90\xf4\xff\xe1\x12\xf6\x6d\xcf\x86\xd2\xa7\x4d\x78\x5c\x23\x8b\x9a\x75\x1c\xc6\xf4\x54\x57\xc4\x59\xce\x9b\x67\x1b\x74\x8a\x4e\x3b\x8f\xc1\x66\x01\x83\xa7\x9d\x37\x00\xdd\x0e\x22\x21\xa1\xbc\x08\x7f\xc7\x11\xb5\xdf\x02\x5c\xf6\x25\x61\x68\xee\x38\xd4\xba\x0c\x78\x6e\xb8\x3f\xc9\x21\x8a\x44\xde\x70\xeb\xdc\x1a\x80\xff\x79\x6f\x0c\x5f\x9f\x9d\x71\xd4\x96\xbf\xde\x45\xee\xad\x91\xdd\x2d\x4c\x72\xff\x9e\xc7\xfe\xca\x3b\x66\xc2\xf4\x31\xf2\x25\x4f\xb0\x53\x36\x9c\x6d\xab\xf4\x8c\xc3\x1d\x9c\x03\x03\x95\xdc\x3d\x31\x8e\x39\x5e\xa6\x3a\xec\x5f\x25\x0f\x66\x87\x29\x13\xe7\xf2\x2b\x59\x9d\xb2\x73\xad\x4c\xfe\x13\xe7\xad\x8c\x35\xe8\x6c\xc7\x84\xe1\x83\x74\x4c\x0d\x79\x34\xbe\x20\x77\x99\x5e\x6c\xa5\xd7\x54\x88\x00\x15\x3d\xa9\x8d\x7c\x93\x9d\x7e\x76\x57\x34\x81\xdf\x77\xa2\x1b\x4c\x9e\x3e\x3a\x35\x73\xe2\x68\xa3\x13\xd8\x72\xeb\x53\x1e\x19\x93\x53\xf9\x54\x1c\xf5\xfd\x0f\x7a\xec\x97\x4c\x06\xd4\xe1\xc1\x1c\x19\xba\x5f\x3e\x3a\x09\x8b\x69\xbc\x01\xdc\x89\xf2\x3d\x5a\xe0\x0f\x22\x6f\x09\x6c\x0b\xfa\xa6\x26\x23\x0f\x63\x8a\x53\xf1\x84\x1c\xd5\x6a\xdd\x68\x13\x5d\xb0\xf6\xad\xd7\x7a\x64\x0f\x8a\x9b\xd3\xa2\xe5\xdf\xef\x6d\x20\x52\x73\x5e\x3f\xae\x22\x35\x6f\x2f\x7a\x0d\xdb\x10\xc6\x14\x69\x21\xa6\xf6\x33\x35\xab\x65\x67\x0c\xf7\x86\x85\x70\x9a\xc4\xb1\x0a\x6a\x95\x2b\x67\x1e\x56\x0e\x79\x81\x47\xcc\x0c\xf4\xef\x66\x1d\xb5\x14\xc2\xc0\xbc\xa7\x10\x01\x70\xfe\xe8\xdb\xb1\x5a\x97\x17\xee\x43\xff\xd2\x08\xb9\x71\xcb\x20\xa0\x0a\x93\xf1\xcf\x6a\xec\xb6\xf5\x1e\xda\x00\x25\x2b\x53\x1b\xfa\x84\x86\x13\x65\xee\x99\xe2\x41\x8f\xfc\x83\x0b\xd1\xf8\x2d\x8f\x5d\xbb\x58\xfc\x5e\x2c\xa3\x16\x9a\xc2\x2b\x8b\x2e\x02\x40\x16\xc8\xe9\x0b\x06\x8c\xea\x60\x35\xae\x8b\x3f\xa4\xb0\x1a\xd7\xc7\x1f\x7a\xa5\x67\xc3\x25\xac\xa1\x2d\xcc\xf1\xbc\xd9\xf6\x05\xfb\xa9\x39\xe4\xa4\xb6\x36\xd4\x3c\x01\x14\x5d\x14\xc4\x72\x06\x5b\x4c\x3a\x5a\xb6\xc9\xe1\xb3\x70\xd7\xe7\xa6\x16\xa6\x6f\x36\x58\xa5\x4b\x49\xe0\xb0\x2c\x91\xb7\xed\x26\x4f\xa8\xb6\x34\xa0\x9c\x07\xa2\xc3\x05\x91\x02\x22\x88\x08\xfc\xbf\xdf\xc5\x9e\x34\xec\xa6\x82\xa1\x2f\x7e\x5b\x53\xa3\xd8\x38\xce\x7b\x63\xe2\x5c\x98\x4f\x27\x81\x6b\xcc\x7a\xfd\x2e\x72\x9a\xec\x35\xcf\xcd\xce\xf8\xd3\xec\xea\x69\x2b\xdd\x6a\x76\x46\xef\x2d\x0a\xb3\x63\x5f\x90\x34\xcf\x8a\xf4\xf0\xe4\xe4\x13\xcd\x6b\x77\x84\xc1\x93\xf6\xd9\xcb\xe3\x76\x62\xaa\xf3\x9f\xa2\x33\x28\xaf\xb6\x53\x9f\xcc\x46\x8f\x31\x7c\xaa\xf9\x96\x65\xc8\x14\xee\x24\x3b\xe6\x84\xe8\x04\xbb\xa9\xdc\x6f\x6d\x3d\x34\xf2\x09\x80\x8a\xc3\x35\xe5\xbd\x53\xa3\xdb\x26\x11\x90\x53\x45\x14\xe4\x0c\xbb\xe6\x84\x0a\x7b\x4c\xc5\x32\x4f\x03\xad\xb8\x6f\xa0\x1f\xc5\xf8\xcc\x99\xe0\xc7\x63\xec\xba\xfd\x4b\x69\x28\x5a\xe3\x26\xd0\x71\x13\x23\xe3\x94\x98\x85\xcb\x31\x8f\xfc\x63\x7a\xbc\x0f\xce\xc3\x85\xad\x0e\xf5\x73\xe5\x3e\x80\xc9\x9e\x53\xb9\x9f\x6f\x7d\xa8\xaf\x73\x87\xda\x24\x4d\x16\x99\xe4\xe5\x76\xe8\x24\x53\xf2\xc5\x5a\x25\x8e\xe9\x7c\x33\xe9\x1a\x78\x75\xff\xdd\x35\x16\x4f\x01\x0b\x93\xb0\x79\xf5\x1c\xb9\x39\x75\x72\xa6\x38\x45\xe3\x23\x96\x6c\x2d\x34\x0e\x28\x25\x98\x30\x8a\x5b\x35\x4f\x5f\x45\x10\xe6\x20\x15\xe0\xab\x3c\xf6\xe4\x29\x13\x3d\x37\xd0\x3c\x8b\x73\x0e\x40\x03\x92\xae\x28\x83\x45\xb9\x89\x5b\xc7\xc8\x0c\x39\xb2\xfe\x56\x0c\x1d\xd0\x18\x82\x55\x5c\x7f\xaf\xa8\x91\xfd\x95\x7b\x38\xb4\x5b\x63\xe9\xa3\x70\xf6\xbf\xe1\xb1\x9b\x2b\xef\x0c\x6c\x4c\xb6\x30\x47\x7e\x36\x7c\x8d\x72\xf5\x9e\x3b\x6a\xf7\x7b\xe4\x45\x1e\x19\xc3\xb3\xe9\xec\x9c\xff\xd3\x6b\x70\xa4\x19\xf9\xa8\x1e\xc6\x16\xb0\x59\xfd\xb2\x6b\x90\x19\x6c\x09\xbe\x47\x67\xe7\xc0\x60\x12\x0c\xb6\x8c\x7c\xdb\x55\xfe\x0b\xb7\xb2\xde\xb8\x34\xf1\xc7\x74\xb1\x8b\x7e\x64\x8c\x4d\x0f\x5e\x1e\xb6\x61\x3b\x2c\x3d\x3c\xb6\x59\x16\xec\xf0\xf1\x4b\xc8\x1f\xd6\xc8\x2e\xc8\xb4\xf6\xff\x73\x8d\x7d\xbc\x86\x49\xd7\x36\x09\xb6\x46\xc9\x32\x67\x7c\xd8\xab\x44\x97\x2b\x2d\x37\x15\x4d\x21\xb7\xab\x3c\x95\xfd\x6b\xd6\x29\x6f\x36\x13\x25\xa7\x12\xe0\x40\x80\xd3\x97\x52\xbc\xc3\xcc\x41\x4a\x2a\xda\x45\xa7\x80\x1d\x09\xcd\x14\x45\xfd\x3c\xa6\x3d\x8c\x88\x56\xf8\xcc\x74\x36\x46\x34\x95\x26\xcf\x14\xf5\x42\xaf\x63\x19\x04\xc1\xce\xd7\x95\x7a\x1f\xb4\xda\x79\x59\x9e\x51\xa0\x83\xc8\xff\x0f\x04\x1f\x79\xb8\x14\x46\x61\xae\xb3\xfc\xb3\x3a\x55\x79\xe7\x58\x1c\x1e\x1d\xa5\x26\x9f\xa7\x3d\xc1\x30\x2c\x5c\x8b\xb4\x78\xd9\xb4\x3f\x73\x8d\x31\x5f\xa9\x91\x4b\x80\x8a\x24\x5e\xf6\x3f\x5f\x63\x9f\xa8\xa9\x1f\x00\x82\x1d\x60\xc8\x4c\x84\xa3\x27\x2b\x13\xe7\x20\x3d\x15\xf1\xe6\x73\x0d\x4d\x82\x02\x1f\xcc\xb9\xda\x36\x6f\xd5\x6c\xf6\xdd\xa2\x09\x0a\x1b\xc5\x7c\x7a\x4d\x62\x26\x6c\x73\x93\x62\x5b\x53\x15\x17\x86\x1b\xc4\x39\xb3\x6a\x40\x5b\x55\x18\xd5\x07\x07\x39\x10\xad\x82\xb8\x0e\x4b\xb2\xc2\xda\x2d\xb2\x18\x65\xcf\xd2\xd4\x26\x06\xe9\x5f\x4f\xe4\xf9\x28\x6c\x1a\xe5\x22\x8c\x97\x8b\x84\x09\x9b\x1e\xc5\x1d\xdb\xf7\xd6\xc8\x5e\xab\x99\xfe\x1b\x6b\xec\x65\x35\x7b\x64\xd6\x9b\xbd\x4e\x1f\x37\x3a\xed\xa6\x37\x35\xd1\xe4\xf1\x53\xf0\xb8\x00\x5d\xb1\xeb\x8f\x93\xdc\x6d\xc3\x45\x1e\xb0\x6f\x8d\x90\x1b\x36\xb4\x7d\x02\xe3\xa7\x32\xd5\x1f\x4b\xd2\x19\x2d\x96\xfc\xb7\x8d\xb0\x55\xfb\xa6\x2b\x01\xcb\xd8\x78\x52\x51\x65\xe6\x97\xb2\x1d\x81\x82\x68\x60\xe4\xd0\x86\xd4\xe1\x98\x21\x0f\xc9\x0a\x72\xcc\xce\x0a\xd1\xd5\xf1\x2a\xb8\x2c\x1b\xe7\xbd\x4b\x97\xad\x9a\xcf\x7b\x97\xa8\x42\x1c\x41\x76\x5f\x8d\x84\xc4\x79\xd0\x7f\x2a\x3b\x6e\xff\x76\xd8\x35\xc0\xde\xa3\x90\x13\x2d\x78\x3b\x5b\xf5\x2c\xf7\xca\x56\x7e\x5e\xe3\x11\xdd\x0a\xff\x7e\x8f\xfd\xfb\x95\xca\x2a\xaa\x0a\x95\x87\x1c\x5d\x62\x81\xd7\x91\x27\x34\xe3\x0a\x6a\x0c\x37\x19\x95\xc3\x93\x26\x3d\xc0\xaf\x6f\xd1\xac\x1b\x85\xb9\xb1\xda\xda\xdf\xc2\x71\xbf\x7c\xf7\x52\xfb\x53\x37\x9c\x70\x2d\xb9\xa3\x15\xbf\xa5\xb2\x16\xc6\xcb\xa7\xf1\xcc\x50\x24\x30\x7d\xe0\x52\xf6\xac\xf5\x1f\x73\x20\x1e\xdc\xbd\x86\x0e\x7d\x5b\x9d\xc4\xd7\x4c\x68\xfa\x95\xbd\xe4\xb8\x51\x56\x8f\xb0\xab\x94\x92\x6a\xad\x4c\x0a\xea\x6b\x39\x4d\x47\x9f\x7d\x60\xf9\xd9\x23\xf2\xea\x02\x03\x5a\x7e\xaa\x21\xb9\x38\x75\x4c\x6c\x6d\x0d\x61\x9a\x9b\xea\xca\xd3\x99\x08\xea\x74\x46\xc4\xc0\xf8\x21\x67\xcd\x31\x04\x79\x29\x4e\xb6\x60\x1f\x57\xf6\x6d\x06\x65\x30\x9a\xa4\x94\xa9\x72\x98\xf3\xa9\x7e\x7e\x97\xf2\xbc\xbd\x6c\x17\x7b\xee\xae\x21\x64\x72\x4f\x01\x99\x62\xd5\xc0\x53\x41\x99\x6e\x0e\xab\x53\x86\x0d\x52\x34\xc4\x0c\x9b\xc4\x1a\x84\x4c\xc5\xd6\x73\xd6\x27\x92\xc7\x37\x08\x0e\x5f\x09\x39\xd4\x36\xc9\xe1\x29\x1e\xd9\x39\xb6\x75\x3b\xc9\xca\x1e\x5f\xe0\x7c\x57\xc5\x42\x95\xc5\x1e\x13\x66\x59\xcf\xd2\x6c\xc3\x65\x79\x10\x24\x64\xca\xb4\xf1\xc2\xb6\x22\x80\x42\xed\x36\xa8\x91\x1f\xde\x0e\x35\x3a\x6b\xb6\x43\x4d\x90\xb5\x5a\x81\x45\x6a\x8c\x9f\x3c\xc1\x1a\xf1\xd3\x15\x73\x5f\x56\x69\x0f\x14\x0e\x42\xf9\x5b\x76\x7a\x0a\xb4\x4a\x9c\x6b\x46\xbd\x2c\x5c\x11\x1b\x9f\x6d\x36\xc9\x4a\x07\xea\x49\x80\xb9\x45\x76\xa8\x41\xc8\x29\x8d\xcb\x59\x5a\x9f\x48\xd1\x93\xab\xf8\x7c\x45\x74\xec\x4c\xcd\x0f\xd7\x2a\x33\x03\x7f\xa1\xb6\xf5\x43\xd9\xd7\xbd\xc1\x02\x4d\xaa\xe2\x60\xc2\x60\x29\xd3\x6e\xed\xac\x41\xa9\xa7\x28\x1a\x5c\x30\xee\x23\x88\x8d\x95\x7d\xa8\xbb\xaa\x93\x22\xa4\x3e\xac\x7d\x07\x76\x7e\x5f\x66\x72\x14\x00\x0b\x44\x93\xd6\x28\x46\xde\xc0\x18\x54\xdb\xa1\xf1\x2e\x6b\x7d\x1b\x3c\x74\xe4\x25\x1e\xb9\x4c\x36\x1d\x71\x31\x61\xd0\xee\xde\xfa\x98\xdd\xe8\x16\xe5\x0c\x97\x12\x15\x16\x87\xbe\x81\x07\x35\x3d\x22\xad\xc2\x8e\xf0\x74\x76\x52\xa7\x4f\x9a\xbd\x9b\xd3\x76\xaf\xc3\xd1\x19\xe2\x24\x58\x82\xd6\xe1\xa6\x56\xae\x29\x63\xef\xdb\x45\xc6\x87\x73\x4c\x4f\xe1\x0c\xbb\x39\xc9\xf2\x39\x9e\xb7\xfd\x07\x46\xd9\xcb\xbc\xd2\x45\x27\x30\x00\xcc\xf0\x9a\x2c\xad\x98\xee\x0e\x24\x99\x45\x96\xc7\x55\x98\x20\xec\x05\x08\x70\x08\x90\x39\xa0\x65\xa8\x83\x2f\x16\xdc\x95\x35\x21\xdf\x9f\xb2\xcf\x3b\xcc\x75\x7a\x0f\xfa\xf0\x08\x79\x4f\x8d\x10\xf9\xf4\x1c\x3c\x2c\x55\xcc\x97\xd7\x8a\xdf\xfa\x4b\x38\xe5\x69\x4d\xcf\x6e\x7d\x41\xc4\x07\x8d\x31\x28\x57\xda\x19\xb2\xf8\xf8\xc5\x06\x5d\x48\x79\x08\x61\x15\x59\xc4\xb3\xb6\xe6\x20\x4f\x21\xc7\x06\x27\xf4\x8a\xc1\x74\x18\xa8\x56\xa1\x36\x41\xa5\xf2\x7a\x83\x90\xc2\x5d\xb1\x38\xd9\x4a\x92\x45\xc5\x7e\x0b\x0b\x5d\x5d\xaa\xe3\xbf\x93\x8b\x20\x59\xf0\xef\x25\x9e\x2e\xba\x6f\x00\x1e\x57\xf1\x56\xb0\x28\xd7\xce\xe2\xa4\xc8\x9b\xf0\x94\x43\x68\x69\x3b\x22\x56\x59\xb8\xaa\x58\xb0\x0b\x1f\xb7\xa2\x29\x93\xa5\x59\x23\x54\x22\x39\xb2\x86\xd8\x30\x2d\x45\x91\x19\x4d\x70\x3d\x18\xc8\x44\x59\x97\xab\xe8\xbe\xfb\x87\x6c\x3b\xe7\x1a\xcb\x4b\x59\xc4\x7f\x70\x99\x9d\xaa\x2b\xcf\x44\x79\x2f\x8d\xd5\x61\x00\x7c\x97\x1c\x01\xcd\xb8\xfc\x76\xf1\xbe\x5c\x3f\x81\x59\x4b\x05\x1c\xbe\x9d\xe1\xf5\x08\xf2\xfb\xb6\x37\xf9\x37\xb7\x09\xa4\x6e\x41\x05\x39\x48\xea\x0f\xa5\xab\x9c\x7c\xc3\x33\x3a\xd9\x57\x3c\xf6\x39\x6f\x8a\xaa\xa6\x17\xc9\xd9\xa8\x10\x76\xf5\x36\xb3\xda\xee\xab\x68\x02\x1b\xe2\xd8\x60\x2e\xf3\x30\xea\xa5\x82\x19\x5e\xaf\x59\x15\xfa\xa3\xce\x61\x99\xc2\xff\xce\x35\xda\x74\x9c\xb8\xdc\x51\x1a\xad\x52\x9e\xdf\x4e\xa3\x26\xd8\x8c\x78\x8a\xca\x37\x8f\xe9\xcd\x0b\x0b\x73\x5a\x9c\x37\x01\x7a\xa4\x67\x2d\x3f\x79\xa8\x49\xc3\x40\x18\xa4\x27\x35\x8b\x7f\xa3\xd0\x15\x3f\xe4\xb1\xb7\x6b\xe6\x0f\x9d\xfb\xa6\x7b\xd2\xa0\xa7\x40\x59\x3c\x4c\xd9\x3c\xa2\xf2\xa1\x9a\xa7\x7b\xf5\x50\x47\x12\xee\xc4\x5f\x5d\xc0\xf8\xab\xdb\xc8\xa8\x9c\x30\xfe\x49\x6d\xd6\xbe\x6e\xbe\xb7\xbc\x2c\xb2\x5c\x04\x38\xab\x94\x08\x80\x59\x65\x10\xb5\xf1\x43\xd4\x11\x8c\x56\xc5\xf7\xb8\xb0\x89\xef\xae\x91\x4b\xd4\x96\xea\xbf\xa9\x46\x9e\xbc\x05\x99\x80\xd3\x71\x06\x0b\x61\x7f\xe4\x41\x26\xa8\x54\x69\x30\xc4\xb8\x94\x58\xaf\x4c\x33\x88\x5c\x09\x81\x62\xea\xc8\x24\x47\x1d\xe7\x01\xb0\xd5\xc8\x03\x86\x30\x25\x61\xe1\x8e\x49\x02\x16\x31\xfa\x57\x0d\x06\x18\xd4\x88\x23\x81\x2e\x21\xd9\xe7\xe5\x1e\x4f\x79\x9c\x0b\xe3\x51\x85\x23\x2f\x28\x6a\x7d\x3d\xc7\x6c\x4b\x97\xda\x7c\xb5\xa6\xae\x9a\x87\x28\xec\x3b\xd1\x4c\x5b\x8f\x66\x3a\x53\xa8\x7d\xb7\xb0\x1b\xa6\x50\xcb\x1b\x2a\xa9\xcb\x08\x15\xb6\xcc\x6e\x38\x19\xb6\xdb\x08\x81\xc1\x88\xe7\x52\x08\xcc\x1b\x47\x2a\xb1\xe5\x67\xb8\xe8\x24\xf1\xbc\x50\xaa\xef\xbc\x86\x6d\xfb\xeb\x1a\xfb\xa9\x21\xf7\xcc\xdc\xe8\x49\x51\x52\xb8\xf4\x0d\xfe\xbc\xc1\x60\x56\xcf\xa3\x0d\xd4\x94\xe6\x6e\xe1\x2f\xa8\x91\x9f\xf5\xc8\x23\x52\x1b\xe3\xde\x7f\xce\x1a\x94\x18\x43\x60\xf1\x75\xe9\xec\xe8\x69\x17\x0a\x1a\x5d\x06\x18\x86\x90\x35\xe8\x9c\x0a\xa6\xd6\x2a\x0f\x9c\x57\x6e\xa0\xcc\x29\x8d\x35\x48\xa2\x0c\x06\xcb\xec\x69\x0b\xca\x5c\x10\x40\x1d\xa0\x69\x69\xc8\x6e\x05\x2a\x52\x7a\x1b\x36\xa8\x53\x31\x66\x76\x33\x87\x16\xd9\x79\xd0\xd9\x0f\xbf\x3c\x4a\xfe\x5d\x55\x2f\x0d\xbc\x74\x61\x36\x7a\xd7\x28\xd0\x30\x96\xaf\x0f\xc5\xbd\x01\xe8\xd1\xb0\xc9\x91\x76\x23\xb7\x82\x9f\xd1\x1d\xb0\xa6\x91\xe8\x13\x23\xe4\xa9\xc5\x1c\x3f\xc9\xa6\xa6\x86\x9d\x64\x86\x42\xc5\xa0\x79\xad\x12\xc9\xe5\x98\xd1\x75\x9e\xc8\x26\x17\xda\x5b\x46\x84\x39\x6e\x94\x89\x23\xec\xaa\xf9\xad\xd8\x9d\xec\xd2\x0e\xa9\x8f\xff\x04\xf6\x93\xfa\xe3\xdb\x43\x58\x0d\xf1\xf3\xaa\x6a\x88\x9f\xff\xb0\xf5\x33\x29\x40\x3b\x47\xdb\x85\xf9\xf9\xc4\x25\x8e\xff\xcb\xca\x86\x6d\xa8\x84\xda\xc6\x5c\x12\x64\x27\x44\x9e\x86\x4d\x15\xfc\xf6\xe2\x4b\xd8\x37\xbd\xf2\x55\xcb\x30\xa8\xf0\x7c\x01\x2f\x9d\x4a\x1d\x51\xaa\xca\x69\x68\xd2\x71\xc0\x5b\x23\xb7\x41\x00\xa7\x8b\xdd\x08\x14\x78\x49\x65\x3e\xec\x77\xc0\xec\xa0\x4f\x88\x58\x9b\x4d\x74\xd3\x04\x49\xbb\x27\xba\x22\x9d\x40\x60\xe9\x71\x15\x25\x87\xc1\x8c\xfa\x7c\xca\x57\x44\xca\x97\x41\x10\x2d\x63\x3a\x83\x0a\xb5\x53\x4c\x1d\x49\xc7\x38\xd0\xac\xa4\x0b\x28\xa4\x71\xde\x23\xd8\xf4\x93\xbc\x23\xce\x7b\x3e\xde\x9c\xc2\x12\x01\x91\xdf\x59\x0d\xff\x6b\x94\x3c\x85\x58\x6f\xf8\x37\xb0\x03\xc5\x2f\x7d\x56\x8d\xad\xbc\x08\x35\x30\x61\x4c\x61\x33\xd3\xc6\x74\x35\x69\xbe\x5a\x23\x63\xda\x71\xec\xff\xfe\xd6\x54\x95\xe3\x7c\x49\x44\x86\x90\xfc\xc5\x35\xe3\x88\x36\x90\x48\xb2\xaa\x09\x1d\x49\xae\x6d\xe3\xbc\x00\xd1\x2b\xf6\x19\x0a\xf1\x80\x85\x2f\x5b\xaf\x43\xb4\x63\xa9\xbe\xdc\xae\x0e\x9c\x1a\x86\xb7\xcb\x01\xaa\x9b\xc3\x69\x80\x07\x06\x77\xd7\x84\x7e\xe9\x91\xc7\xf7\x33\x6d\xe4\x91\x85\x77\xe4\x77\xd2\x24\xe5\xc5\x03\xcd\x04\x00\x99\xa1\x26\x65\x6e\x02\x16\xa0\xa2\x14\x18\x6f\x3d\x03\xf4\x36\xb4\xcc\xe1\xf3\xab\x62\x1a\xe4\x37\x3d\x52\xf1\x45\xfd\x77\x6d\xe6\x9c\x38\x08\x7b\xc4\xa2\xc1\x32\x8d\xc1\xc8\x9a\x5c\x7a\x0a\xa8\xf9\x59\x9a\x11\xbc\x99\x26\x19\xd8\x04\x69\x2a\x22\xb1\xc2\xe3\x5c\xf1\x37\x40\x94\xef\x5d\xaa\xb2\x71\xf2\xa5\x3d\xe4\xdf\x56\x78\xcc\x01\xc4\xfc\x34\x8f\x97\xc5\x6c\x2e\x3a\xfe\xfb\xf6\xb0\x79\xf7\x92\x39\x80\x70\xda\x09\xe3\xc9\x0e\x3f\x87\x2c\x89\x8a\x31\xbf\x40\x5c\xb7\x49\x9a\x34\x9f\x72\x12\x83\x3a\xa1\xf7\x06\x67\x11\xfc\xd6\x18\xf9\xff\xd8\x7b\x13\x38\x4b\xae\xf2\x3e\xf4\x57\xb7\x67\x3d\x92\x40\x2a\x01\xc6\x40\xe0\xa4\x24\x7b\xba\xe5\xbe\xb7\xd7\x19\x69\x1a\xc1\xb8\xd5\x33\x23\xb5\xa5\x99\x69\xa6\x67\xc4\x22\x09\xba\xfa\xd6\xb9\xb7\x8b\xae\x5b\x75\xa9\xaa\xdb\xdd\x17\xa4\x04\x43\x30\x5e\xb0\x1d\xc0\x0b\x9a\x80\xc1\xcf\x04\x9e\x05\x36\x0e\xd8\x60\x1c\x9b\xc5\x0e\xbf\xf8\xd9\x71\x9c\x18\xe3\xdd\x0e\x26\x24\x78\x8b\x1d\xef\x8e\xde\x73\xf2\x7e\xe7\xfb\xbe\xb3\xd5\xbd\xbd\xcc\x22\xe1\x97\x37\xfa\xfd\x60\xfa\x56\x9d\x3a\xfb\xf9\xce\xb7\xfe\xbf\x79\xa2\x90\xc7\x83\x71\x43\x21\x07\x92\x3d\x61\x43\xd5\x24\x58\x26\x45\xcf\x41\x52\xf3\xf9\x1f\xf0\x82\x97\xab\x5b\x52\xd7\x63\xf9\x51\x50\x4d\x38\xab\xab\x56\x9f\xe1\xa4\xc5\x56\xe3\x58\x50\xf2\x55\x9d\xb8\x94\x5c\xe2\xb5\x45\xb9\xfa\xbc\xc7\x9e\x41\x9d\x26\x43\x8b\xff\x19\x2f\x78\xdd\x49\xe7\x91\x4e\xfd\xb4\xd3\x88\x94\x82\x6f\x0f\x63\xca\x4d\xb5\x4f\xcd\xa8\xbe\xc3\x63\x23\x9d\x70\xcb\x7f\xb3\x17\xcc\x9e\xd1\xdb\xa4\x99\xa5\x92\x87\x03\x1f\xcf\x8c\x12\x79\xc8\x3d\x31\xd0\xd9\x6b\xdc\x9b\x5f\xaf\xb1\x67\x75\xc2\x2d\xdc\xcc\x38\xf4\xf3\x92\x43\xf6\x3f\x55\x0b\xfe\xd4\x3b\x33\xe4\x0d\x44\x96\x98\x0c\xf1\x8a\x04\x5b\x90\x68\xa0\x1c\x84\x58\x50\x93\x66\x1c\xf1\xcd\xe5\x76\x41\xd8\x6a\x79\x61\x64\xe5\x9a\x41\xe5\xde\x04\x8d\x08\x16\x89\x62\x74\x18\x84\xb1\xeb\xe5\x30\xe1\x5f\xf2\x1a\x7b\x5d\x0f\xfd\x0c\x50\xae\xe9\x75\x24\x63\xaf\x24\xec\x17\x57\xc4\x7d\xdc\x21\xf2\x4c\xae\xf6\xf2\xa2\xd4\x84\xd6\xed\xf7\x53\xb1\xce\x71\x8a\xeb\x1c\xa7\x5f\xe5\x75\xfe\x2f\x23\xec\xc8\x0e\xe9\x2b\x2f\x64\xeb\x22\x55\x87\xec\x93\x23\xc1\xdd\xf6\x03\xa3\x58\xb7\xfc\x8b\xf1\x52\x53\xf8\x8e\x4d\x48\x11\xc1\x4b\xf9\x55\xe3\x92\x77\x28\xec\x45\x10\x18\xe1\x90\xb2\xdf\xaf\xb1\x77\x7a\x4c\xbf\xf3\xbf\xc5\x0b\xfa\xf3\xf4\x43\x1d\xe3\x38\x25\x81\x5d\x95\xd2\x1e\x22\xb2\x6a\x79\xb3\x07\x76\xd7\x20\x24\x16\x54\xd0\x70\x3b\x29\x22\x40\xfb\x42\xd5\x01\xbd\x95\xf7\x2e\x37\x98\x08\x36\x75\xfc\x79\x8f\xdd\x22\xb6\xba\x31\x39\x0c\x53\xfe\x8d\x8f\xea\xfc\x1b\xef\xf6\x4e\x55\xdf\x6a\xb2\x63\xc1\xb4\x83\x36\x1b\xd2\x89\xee\xad\xcb\x6b\x14\x59\x00\xc1\x06\xaa\xeb\xfa\x32\x0b\x06\xda\x0c\x86\xd7\xe5\x68\x63\xde\xc5\xd8\xcb\xcd\x3a\x1b\x84\x8c\xba\x19\xb9\x0d\xff\x69\x81\x15\x6d\x4c\x35\x16\x7a\x45\x99\x75\x94\xcf\xdb\x42\x96\xf4\x3a\xe9\x49\xbd\xfb\xfc\x9f\x3e\x1c\xdc\xbf\x73\x11\xcb\x51\x00\x20\x07\x7b\x1d\x94\x2e\x88\x07\x81\xc4\x01\xdd\x3c\x06\xbd\x86\xbc\xed\x52\xe0\x03\x49\x20\x3a\xf4\xda\x22\x4b\x97\xc2\x72\xcd\xd9\x34\xff\xf6\x10\x40\x33\xaa\x77\xfe\x7b\xbc\xe0\xbb\x3d\xf5\x8b\xc4\xe3\x58\x72\xb4\x98\x18\x0f\x4c\x08\xa3\x71\x43\x34\xc8\x82\x90\xe7\x68\xba\x86\x89\x1c\x23\xd7\xc9\xb8\xe0\x42\x4e\x34\x26\x0b\x68\xcb\xbd\x5d\x22\x0f\xdd\x84\xf1\x59\xf7\x6b\xc6\xbb\x79\x16\xf5\x28\x7c\xc0\x68\xd0\xc9\x22\x25\x87\xe8\xec\xa5\x7b\x28\x62\x77\x2e\xa8\xa7\xc4\xa5\x0e\x98\xa3\xe0\x85\x91\xba\x06\xea\xf8\xce\x1a\x3b\xa4\x3c\xc5\xfd\x37\xd6\x94\xc2\xee\x8f\x3c\xf5\x50\x65\xed\xc3\x45\x47\xde\xc3\x98\x92\x93\xb0\x8c\x37\x04\x8f\x3b\xdd\x2c\x2f\x43\x7d\x86\x74\x7f\x1d\x26\x1d\x04\x97\xa2\xc1\x1f\xc8\x36\x45\x4e\x69\x59\xd0\x14\x01\x51\x97\x91\x90\xe5\xd6\xe2\xb6\x64\xf5\x54\xf3\x0d\x8e\x0b\x5f\x28\x36\xa6\x2f\xf9\x42\xba\x2e\xe5\x26\x05\x32\x2e\x22\x0e\x49\xed\x79\xd1\x14\x69\x98\xc7\x59\x61\xd9\xd4\x91\xc7\x0d\x75\x95\xbc\x0d\x21\xf9\x39\x12\xf8\x49\x57\xc5\xf8\x11\x8f\x38\xa0\x0f\x78\xc1\xbb\x3c\x6d\xf5\xc4\x58\xa0\xf9\xa5\x45\x54\x2c\x18\x3a\x39\xb0\x40\x7c\xd9\x4a\xcd\xd3\x8e\xcb\xb5\xde\x6a\xa3\x99\x75\x26\xce\xcd\x2f\x4e\x50\x1d\x75\x27\xac\x7e\x62\x35\xc9\x56\x27\x3a\x61\x51\x8a\x5c\x79\xca\x14\x13\xd3\x8d\xc9\x46\x27\xba\x2d\x0a\xcb\xb0\x8e\xc8\x6e\xb2\x21\xa5\x5a\x74\xfd\xb6\x6f\xb0\xb4\x51\xfe\x7d\xc1\x8b\x6d\xe5\xd4\xb0\x3d\x31\xa0\xbc\x1a\xbe\xbd\x7e\xb2\xc6\x0e\xa0\xb9\xc0\xff\x91\x5a\xf0\xde\x9a\x49\xe4\x29\xef\x45\xa5\xcd\xdc\xf3\xbc\x48\x01\xef\x88\xdc\x8e\x47\xac\x94\xa0\xc8\x43\x6a\x11\xae\x9b\xc7\x9d\x30\xef\xdb\x39\x22\x69\x1f\x49\xc9\xb7\x80\xc8\xdc\x38\xd5\xfe\x3d\x2a\x70\x04\xa5\x41\x28\x17\x57\x20\x5a\xe1\x7e\x7b\xda\xd7\xe4\x83\x87\x1d\xad\x9c\x85\x50\xb3\x31\xd5\x38\x8b\xbf\x30\xa3\x0e\x60\xa8\xfe\xdd\xa1\xe0\xe8\xc0\x53\x17\xc9\xd3\x79\xbd\x73\x6e\xef\x4f\x1f\xbc\x6e\xc1\xb8\x5a\x0b\x46\xae\x70\x55\xe3\x20\x18\x82\xab\xaa\xa6\x6a\x48\x2e\xef\xbb\xd8\xb1\x1d\xb2\xe4\xef\xb0\x13\xae\xab\xeb\xaf\x42\x5d\xff\x8b\xb6\xb9\xf8\x53\x57\x69\x2e\xfe\xa7\x5f\x25\x6b\xb1\x81\x68\x5f\xdb\xdd\x4a\x70\xca\x5f\xd8\x03\x02\xd6\x00\x55\xa9\xda\x0f\xfe\x4b\xc3\xd1\x22\x56\xdc\x4e\x96\xb2\x68\x59\x34\x7b\xf2\xd6\xc4\x2a\x00\xaf\xe7\x5f\x35\x82\xe9\xa1\x6f\x1c\xc2\x42\x0e\x25\x22\x85\x88\xc8\xa8\x71\xc9\x3b\x58\x08\x08\x4f\xbd\xe4\x1d\xce\x7b\xe9\x7c\x71\xb1\x10\xf9\x25\xcf\x2f\x7a\xdd\x6e\x02\xc2\x73\x98\x80\x53\x66\x71\xc9\x3b\xd8\x2a\xe0\x4f\x87\xb0\x7d\xb2\xce\x5e\xc3\x58\x37\x8f\x37\xe2\x44\xb4\x45\xe4\xbf\x2c\x58\x30\xbf\x24\x21\x06\x9f\x5e\x51\x80\xcf\x03\xe8\x28\x9b\x70\xef\xa1\x68\x81\xde\x2a\x79\x0f\xc2\x6c\xcd\x77\xae\xfb\xc3\x7b\x3c\xa6\xba\xe9\xbf\xdd\x63\x2f\xdd\xf6\x24\x57\x66\x8a\x22\x6f\x95\x31\x05\x83\x29\x8b\xe0\x1c\xd5\x65\xe9\xec\xd0\x78\x62\x1c\x71\xa2\xb8\x59\x2a\xa0\x19\xf0\xed\x80\x3b\x1a\xf4\x75\x2e\xd3\x53\x88\xb2\xc1\xbe\x67\x84\xdd\x42\xee\x66\x0b\xcb\x8b\x27\xf3\x58\xae\xa6\xff\x3f\x6b\xc1\x47\x6b\xf3\xd5\xc7\x74\x55\x43\x71\x45\xb9\xe2\x14\x52\x7f\x2d\x2c\x2f\xf2\x88\x4a\x61\x1b\x14\xe1\x6b\x65\xc3\x22\xd7\x13\x48\x24\xbe\x2a\xc0\xef\x4b\xf2\xb9\x71\x4a\x73\x2b\x19\xf0\x06\x9f\x4f\xc9\xcf\xa0\xea\xfe\x8d\xc2\x77\xda\xb7\xda\xd2\x09\x53\x0b\xd4\x57\xaa\xde\x88\xee\x9a\x90\x52\xb5\xf2\x52\x29\xec\x34\x7c\x72\x7e\xd1\x8a\x8a\xd7\x85\x24\x70\x10\x72\x9b\xa5\x08\x2a\xd1\xd2\xce\xc8\xc4\xfb\xa3\x5f\x13\x4e\xf8\xc2\xf2\xe2\x22\x34\x82\x91\xb7\x55\x0f\x6f\x43\xb7\xef\x66\x73\xec\xae\xbd\xae\x76\x75\xaa\xd9\xa7\x3d\x76\x73\xe8\xba\x63\x15\xfe\x13\x5e\xb0\x52\x7d\x38\x6c\x4d\xb4\xe3\x51\xd1\xe0\xa7\x30\x67\x7b\x65\x16\x93\xc4\x2a\xa4\xf6\x83\x9c\x45\x67\x08\x2f\x66\xc7\x77\xb0\xaf\xed\xec\x51\xc6\x3e\xe1\xb1\x17\x91\x60\x08\xaf\x96\xd4\x01\x39\x55\x34\xc3\x04\xf3\x8b\xbd\xd3\x0b\xfe\x99\xb7\x4b\x21\x65\x32\x74\x55\x65\x2a\x04\x5a\x2e\xba\xc2\x26\x92\x1c\x39\x98\x02\x60\x5b\x48\xc9\x08\xb5\xc6\xfa\x68\x92\x0a\x46\xde\x41\x52\x86\x48\x4b\xf5\x81\x7b\x64\xdf\xed\xb1\xc3\x72\x76\x96\xb2\xbc\x2c\xfc\xef\xf2\x82\xd3\xfa\x97\x4d\x13\x50\x1c\xc3\x69\xcc\x24\xaf\x12\xa6\x6d\x72\x14\xa3\x45\x52\xbb\x7d\xab\x9b\x55\xa7\xf6\x38\xbb\x93\x1d\xdd\xeb\xd4\xde\x47\xcd\x83\xc6\x96\x75\xd9\x0d\xb2\x51\xa2\xc4\x7e\x18\x2c\x5b\x3f\x2b\x44\xcb\xa2\x9b\x04\x2d\x01\x86\xd7\x02\xc4\xaa\xfb\xac\xcf\xc8\xf6\xa2\x8f\xa1\x33\x21\xbf\x54\x63\x37\x12\xd8\xf5\x42\x12\x16\x85\xff\xd3\xb5\x1d\xd2\x87\x55\x3a\x7f\xde\xfa\xb0\x4a\xcd\xfe\xca\xb3\xab\xbd\x6c\x9a\x66\x57\x4d\xcc\x33\x66\x2c\xd0\xae\x4b\xc6\x59\x02\x05\xbc\x71\x35\xca\x23\x90\xaf\x51\x7f\x0d\x46\x02\x5d\x58\x72\x8d\x92\x0f\x68\x02\x3b\x72\x0a\x2f\x1c\x50\xc6\x66\x4e\xbd\x88\x84\x4d\xca\x30\xb7\x3b\x0e\x5d\x20\xfb\x12\xb9\x47\x36\x58\xc4\x0e\xc2\x8e\x5a\x3c\xe9\xbf\x32\xb8\x9f\xfe\xbc\xdc\x95\x5b\x32\x51\xb9\xc3\x57\xed\x47\x3c\x66\xae\x45\xff\xbd\xde\xf6\x36\xa3\x21\x4b\x86\x5f\x55\xd7\xeb\x21\x5d\xdf\x95\xac\x15\x7d\x49\x46\xb9\x81\x9b\xe8\x5f\x7a\xec\x20\x91\x6b\xff\x71\x2f\x78\x4c\x39\x23\x0e\x21\x70\xe4\x7b\xd8\x4d\x7a\xed\x38\xdd\x8e\xc8\xa5\x99\xe5\xcf\x68\x08\x1c\xbf\x90\x91\xcb\xa3\xf1\x62\x2c\x20\x7f\x94\x2c\x25\x27\xf8\xc8\x1d\x47\x76\xca\xad\xf8\xd1\x1a\xbb\x55\xdd\x98\x61\x37\x84\x90\x98\x58\x14\xfe\xa5\x5a\xf0\xf7\xde\x90\x17\x2e\xa7\xdf\xb4\xdf\xd8\x49\xbf\x89\xab\x40\xaa\x11\x46\x5a\x84\xb5\x80\x3f\xdc\x5a\x53\x7b\x2b\xd2\x08\xd1\x9b\x9a\x9c\x5e\xe5\xb6\x40\x8c\xcb\x23\x05\xa0\xe4\xe6\x08\x06\xcf\x5f\x29\x07\x2b\xef\x67\xc9\x79\x43\xc7\x42\xd3\x2f\x39\x8f\xa8\xdf\x1e\x36\x16\x4c\x50\x0a\xd6\x89\xe8\x64\x9e\x75\xed\x97\x3b\x4d\xda\x27\x6a\xcc\xa7\xfa\x4e\x27\x62\xeb\x41\x5a\xe7\xf7\xd5\x82\xcf\x7b\x83\xcf\x87\xad\xb9\x7c\xad\x6f\x73\x5a\xf1\x2c\x87\x40\xb1\x21\xb7\x9b\x55\xda\x5d\x7d\x64\x04\x8c\xc5\x31\x2e\xb8\x80\xdc\x89\xf1\x86\x40\x3e\x40\xe7\x0a\x47\xad\x37\x29\x44\xed\x0a\x8d\x87\xbe\x76\x9e\xa4\x57\x01\x2e\x87\x33\x13\x2f\x61\x2f\x66\xc7\x2f\xf3\x16\x35\x73\xc1\xde\x31\xc2\x9e\xa3\xae\xc8\xc8\xdd\x71\x7f\x5b\x0b\x7e\xab\x36\xfc\x5d\xd5\xb4\x24\x79\xaf\xa1\xbb\x4f\xdb\xc6\xa3\x48\x0c\xee\x39\xde\x4b\xc9\x74\x61\xa8\x8c\xb6\xc2\x42\x88\x44\x94\x67\x5d\x8a\x72\xd5\x3b\xa8\xc1\x71\x8b\x51\xe0\xcd\xf6\x3b\x6c\x9b\xbe\xef\xb8\xc9\xdc\x53\x80\xdd\x06\xfb\x8b\xbc\x78\x63\x03\xfa\x42\x2b\x84\x3c\x1e\xe4\x66\x55\xa1\x28\x69\x33\xe9\x45\x66\xed\x86\x6d\x73\xd9\xe5\x9d\xb6\xf3\x77\x8e\xb0\x9b\x31\x4d\x70\x24\xd2\xe5\x7e\xd1\x2c\x93\xc2\xff\xbb\x5a\xf0\xdb\xb5\xea\x53\xf7\xf4\x5b\xdc\xb0\x2e\xc8\x0b\x2c\x39\x6e\x05\x33\x64\x3c\xcd\x52\x41\x58\x72\x22\x2d\x73\x50\x9b\x8a\x58\xf1\x39\x89\x64\x6e\xf0\x3b\x72\x22\x90\x5c\x2a\xa4\xdf\xe3\xc1\x1d\xa0\x6c\x47\xf6\xa4\x19\x16\x82\xac\xef\x96\x4e\x14\x0c\xc7\xe4\xac\x9e\xb5\x06\xbb\xd2\xe0\xcb\x98\x53\xe3\x0e\x88\x98\x44\xe3\x33\xbd\x83\x79\xd6\x5f\x38\xae\xed\xa2\xd1\x6e\xf0\xa0\x95\x65\x13\x77\x04\x3a\x8b\x72\x40\x7e\xec\xc1\xb8\xfa\xf3\xf5\x01\x64\xd4\x6d\x98\xf2\x8d\x4a\xf9\x86\x29\xdf\x30\xe5\x77\x58\x0f\xba\x51\x17\x97\x16\xd4\x8d\xba\xb8\xb4\x70\xb9\x37\xaa\xfc\x64\xc7\x1b\xf5\x6f\x6a\xec\x39\xca\xc9\xfd\x7c\x96\x95\x06\xdf\xc8\xff\xed\x5a\xf0\xef\x6a\xc3\xdf\xf1\xaa\xdf\xbd\x8d\xeb\xa3\x4f\x9a\xce\x4b\x4d\xc1\x03\xb2\x2e\xa4\x47\x90\x0a\xb9\x15\x27\x42\x63\x1f\x21\x7b\xe3\x60\x25\xd8\x47\xd2\xd2\xaf\xd8\x35\xa6\x19\xfa\x1b\x0e\xaf\x15\x2a\x5c\x5a\x5e\x32\xd1\xc4\x69\x5f\x4d\x05\xf0\x53\x34\x00\x44\x2d\x72\x5b\x07\x85\xdb\x5e\xba\x4e\x99\xdd\x37\x63\x08\xa9\x58\xed\x95\xf8\x2b\x49\xd4\xd9\x44\xd1\x1e\x3d\x00\xac\x89\xff\xb7\x1e\x7b\xee\x76\x04\x41\x4a\x45\xdf\xe9\x6d\xf7\x16\xe3\x36\x6c\xea\x34\x40\xf9\x24\x05\xeb\x1a\x40\x5d\xe7\xc6\xe5\x17\xd6\x44\x21\x28\xba\x01\x5b\x50\x41\x2a\xf4\x95\xa4\x2e\x26\xe6\x0b\xc3\xbc\x76\xd8\xa8\x3f\x5e\x63\x0c\x98\x28\xd0\x46\xf8\xff\xb2\xc6\xe6\x2f\x8b\x2b\x83\xcf\xaa\x6c\xd9\x7f\xf2\xcc\xbb\x2b\x63\xcc\xf0\xd3\x6d\x38\xb3\x3d\x71\xd3\x56\x35\x92\xa1\x29\xc3\x75\x01\xc2\x3a\x7a\x4f\xd9\xae\xcb\x4e\xf4\x8f\xf5\x99\xc3\x31\x93\xe4\xa4\x38\xe6\xf7\x7b\xec\xb9\xe1\x76\x52\xe4\x5b\xbd\xa0\xbf\xdd\xcb\xbd\x28\x71\x90\x1b\xd4\x52\x22\x17\xfa\x6b\x15\xc5\x66\x5c\x06\x6c\x12\x2d\xcf\xb2\xbb\x4f\x9f\xf4\xd8\xb3\xe9\x3a\x59\xca\xb3\x26\xc0\x72\x5d\xe8\x77\x45\xe1\x7f\xd1\x0b\x7e\x4e\xc5\x57\xb9\xaf\x86\xf1\x3a\x8a\xbb\x70\x4b\x36\x76\xe4\x7a\xe0\xc0\xc9\x29\x25\x1f\x13\xe7\xdb\x0a\x07\x8c\xea\x78\x6b\x19\xdc\xc2\x6a\x25\x5a\x49\xd8\xae\xac\xc4\x0e\x5b\xfb\x4f\x47\xd8\xb3\xa8\xe3\x17\xd3\x22\x6c\x09\x75\x2f\x7e\x61\x24\xf8\xc9\x91\x61\x6f\xb6\xbd\x1b\xd5\xf8\x7b\x50\xfa\xab\x7f\x41\xaa\xfe\xec\x74\x3d\xba\x7d\xb5\xd5\x00\x0d\x40\x85\x4a\x04\x59\xcd\x33\x6b\xad\x43\x15\x7f\x35\x30\x58\x7b\x3a\xe4\x27\x1b\x59\x1c\x91\x55\x03\x12\x3a\x6d\x77\xed\xd2\xd5\xb6\xd7\x5b\xd7\x2a\x7e\xd9\x97\xae\x14\xd8\x48\xad\xea\x3f\x7e\x19\xaa\xcd\xd3\xcb\x43\xa9\xd8\xcb\x5b\x7b\xa6\x60\x9b\xf2\x49\xab\x20\xb0\x80\x98\x10\xe3\x28\x64\x41\xa9\x91\x17\x54\x6a\xaa\x5f\xf1\xd8\x10\x9d\xb0\xff\xaf\x3d\xb6\xb8\x67\x6d\xec\xc0\xe7\xd5\xde\xa7\x83\x2d\xec\x75\x20\xf6\x97\x38\x24\xdc\x3e\x3b\x0e\xea\x0b\x07\xd8\xcb\xae\x99\xb3\x44\xaa\x80\x13\xfe\xd7\xfe\xe0\xcc\x76\x2f\x2d\xb7\x71\x72\xeb\x55\x56\xba\x28\x6e\x21\xa6\xb7\x32\x05\x90\x8b\xcd\xc2\xf9\xc6\x25\xef\x90\x1a\xbe\xa3\x7a\xff\xbe\xfd\xec\x73\x23\x4c\xbf\xf3\x7f\x72\x24\x78\x62\x44\x4f\x94\x71\xc5\x90\x2d\x55\xfc\x1a\xb4\x85\x5f\xb6\x0d\xb0\xf1\xe5\xa6\x10\xa9\x6e\x1b\x00\x4d\xe1\x44\x19\x2c\xd3\x39\x5e\xe7\x2b\x67\xb3\x54\xac\xcc\x71\xc2\x57\x87\xaf\x73\x24\x9b\x18\x59\x8c\x17\xa3\xb6\x72\xc2\xd5\x6e\x82\x2e\xcb\xac\xd7\x5c\x83\x2b\x0d\x03\x0d\xe9\x32\x54\x3e\xcb\x4e\x17\x1b\xb2\x39\x4a\xf5\xbd\x32\x07\xaa\x64\x34\x98\xe1\xea\x4b\x36\x0d\x5d\xaf\x21\xc8\x07\xec\x97\x94\xd1\x5f\x3e\x8e\xb4\x10\xa6\xa1\x39\xe6\x8d\xb3\xac\x15\x72\xc7\x38\x02\xa9\x0a\x72\xd7\x55\x81\x22\x16\xe2\x8b\xa6\xf2\xc0\xcd\x82\xfd\x2e\xdf\x10\xe4\xcb\x7e\x5a\x0e\xa1\x20\x0a\xdf\x42\x37\x77\x08\xdd\x97\x65\xad\xe6\xad\xbe\x11\x4b\x50\x31\x95\x1d\xa4\x12\xfe\xa7\x3d\xf6\x8a\xed\xce\xd4\x65\xee\x51\x93\x29\x5d\x61\x73\x34\x55\x3f\x06\x37\x22\x4c\xa8\x33\x67\x6a\x42\x1b\xfc\xbc\x62\xdc\x80\x0f\x5f\x51\x9b\x6c\x45\x21\xfa\x94\x99\x59\xab\x06\xfb\x6f\xdf\xc8\xce\x5c\x69\x9f\x91\x56\x7c\xd3\xf2\xb9\xb3\xcb\x60\xcf\x5d\x92\xb2\xb1\xff\x2f\xbe\x31\x78\xa8\xf2\x0c\x2f\x3e\xf9\xb0\x8e\x4f\x79\x2b\x93\x7b\x36\x4e\xdb\xdc\x4d\x91\x70\x32\x0f\x5b\x25\x9f\xe5\xa3\x6b\x65\xd9\x9d\x9b\x98\x78\x6d\x91\xa5\x75\xb4\x16\x03\x00\xf7\x98\x1b\x25\xf3\x91\x13\xec\x57\x6b\xc6\x5d\xf6\xe7\x6b\xdb\x63\x33\x5e\xe9\xd0\x82\xbf\x54\x1a\x7b\x1c\x85\xeb\xfc\x25\x77\x61\x2f\x55\x41\x64\x64\x38\x86\xa3\x52\xe8\xf8\x16\x82\x5c\x52\x96\x18\x62\x36\x7a\x80\x4c\x0b\x26\x16\x87\x00\x59\x1f\x39\xb6\x96\xc1\x62\x6a\x90\x2a\x54\x9e\x78\x05\x65\x89\x52\x89\x33\x7a\x85\x32\x75\x6f\x4c\xf1\x51\xc9\x4c\x89\x4d\x91\x8f\x6d\x5b\x1f\xc0\x10\xb3\x47\xd9\x0d\xd6\x3c\xfa\x9d\x27\xbc\xd7\xb0\x47\xd8\x43\xd7\x7a\x76\xad\x4d\x62\xbb\x40\x16\xec\x46\x45\x29\x4e\x66\xcd\xc2\x6f\xb2\xf0\x5a\xb6\x7c\xca\xd4\xdd\xeb\x28\x0f\x0d\xf6\xf1\x5b\xb4\xbf\xce\x87\x6e\x09\xde\x73\x8b\xe3\xaf\xa3\xdc\x74\x36\x66\x94\xf7\x0d\x92\x83\x86\x0a\x94\xa1\xc7\x48\xad\xe3\x36\xd8\xd6\xd0\x69\xc7\xec\x75\xbb\x08\xc5\xd3\x8b\x68\x8e\xb1\x3a\x5f\x2d\xb2\x14\xc7\x1e\x47\x73\x72\xab\x14\x99\x72\x16\xe1\x8b\x27\xc7\x39\x78\xc9\x85\x7c\x7a\xd6\x02\xd2\xe7\x6b\x62\x4b\x39\x37\xd4\x79\x2f\x8f\xe7\x64\x47\x2f\x9e\x5f\x04\xfb\x6c\x98\xd3\x75\x7a\x6f\x96\x84\x69\x9b\xa7\xa2\x9c\xe8\xe5\x49\x63\x49\xbe\x20\x9f\x44\x59\xb6\xce\x45\x27\x8c\x13\xf8\x16\xfe\x92\x42\x5d\x2e\x8a\x62\xdb\x5a\x64\x21\xac\x66\x9e\x4a\xd6\xc1\x2c\x24\xf9\x4e\xd9\x77\x18\x59\xd5\xfd\x05\xbd\xea\xf9\x22\xf8\xaf\x40\xf6\x89\xa2\x04\x4e\x75\x9c\x03\x8e\x8a\x8e\xc2\x3c\x7f\x7a\x81\x4f\x4d\xce\xcc\x8e\x73\xc2\xd7\xe7\x33\x8d\x29\xfe\xd0\xf9\xd3\x0b\xf2\xe9\x23\xf2\xaa\x89\xbb\x1b\xb3\xd0\xdf\xc5\xa5\x8d\x59\xbe\xb8\xb4\x5d\x4f\xb1\x93\x8b\x4b\xf8\xc9\x31\xf5\xc9\xb1\xbd\x7d\xd2\x8c\xa3\x5c\x0e\x67\x61\xf1\xe4\xf9\x9d\x8b\x43\x89\x3a\xef\x84\x4d\x59\xfe\xcc\xfc\xc2\x6e\x53\x88\x9f\xc9\x82\x75\xde\xeb\xc1\x8a\xa7\xfc\xe2\xc5\xc5\x93\x5a\xa1\x2b\x99\xd4\x5e\xb7\x2b\x72\xe0\xd7\x07\x82\x54\xdb\x62\x8b\x8f\x9e\x88\xc7\x5e\xfd\xd0\x64\xfd\x78\x58\x6f\x3d\xf2\x86\xbb\x1e\xab\x9f\xd0\x3f\x66\xf7\xf6\x63\x6a\xfa\xb1\xdb\xa9\x07\x33\xba\x0b\x33\xd7\xb0\x0f\x33\xfa\xd7\xcc\x5e\x3a\x31\xab\x3b\x31\x7b\x0d\x3b\x31\xeb\x76\xe2\xae\xe3\xe1\xea\x23\xc3\xfb\x65\x75\xe5\xa8\xee\xca\xd1\x6b\xd8\x95\xa3\x97\xdb\x95\xb8\x58\x4d\x71\xdb\x2e\xdf\x73\x76\x6a\x52\x4a\x75\xf0\xd7\x0c\x79\x7f\xaa\xe3\x9f\xc4\xeb\x82\x07\x93\x33\xd3\x53\x77\x1e\x9d\x9a\x9c\x9d\xc1\x50\xcb\xe3\x77\xde\x55\xd7\xcf\xa6\x02\xaa\x6f\x6a\xd2\xae\x71\xb7\x7a\xe8\x9b\x19\xf3\xcd\xf0\xb6\x07\xdb\x6a\xe6\x22\x8a\xcb\x66\x98\x03\x41\xc3\x5f\x5c\xfe\x54\xdf\x0f\x9d\xc1\x57\x8f\x9e\x98\x83\x15\x83\x39\x18\x3d\x31\x87\x7f\xcf\x3c\x36\x76\xe2\xd1\xa3\x0f\x4d\xd5\x8f\x3e\x42\x2f\x67\x1f\x7b\xf4\xd8\xe8\x89\xb9\xc9\xa9\xa9\x47\x61\x5a\xf1\xf9\x98\xfe\xf4\xd1\x99\x87\x66\xef\x54\x85\x67\x1e\x7b\x74\x46\x16\x7e\x68\xb2\x7e\xf4\x91\x47\x1f\x3a\x76\x97\x5b\x7a\xea\xb1\x47\x47\x4f\xcc\x4d\x4f\xcd\x4c\x3d\x3a\x75\xd7\xe4\xe4\xa3\x33\x47\x1f\x8e\x64\x93\x0f\x47\xf2\xdd\xd8\xed\xa4\xe9\x4b\x21\x83\x08\x8f\xe2\xb6\x1c\x89\xa1\xc3\x1d\x48\x33\x1f\xa7\xbc\xce\x8b\x42\xae\x17\xbf\xd8\x58\x6e\x70\x08\x85\x4f\x24\x19\x03\x51\x46\x0d\xdb\x5c\x07\xd6\xa8\xa1\xbd\x87\xea\xfc\x91\x13\x0f\x47\x6f\x98\xd6\x7f\xcd\xc2\x2e\x58\x13\x5b\xcd\x2c\xc9\x72\x58\x83\x35\xb1\x15\x46\xa2\x19\x77\xc2\x84\xc3\x53\x44\x01\xc0\x55\xb8\xed\x34\xfc\x37\x37\xbc\x91\xdb\x4e\x8c\xd2\x06\x9b\xaf\x9f\x96\x73\xfa\xa8\xfd\xf3\x98\x1c\x68\x9d\xe7\xed\x55\xd3\xd8\xf9\x7b\xef\x19\x68\x24\x6f\xaf\x52\x6b\x79\x7b\x75\x74\xfa\xe8\xd1\x71\xfa\xdf\x71\xb9\xea\xab\xfd\x52\xcc\x55\x93\xb4\x50\x42\x07\x08\xd8\xaf\x43\x58\xdc\x66\x06\xdb\x22\xed\x63\x44\x45\xd6\x32\x17\x99\xbc\x14\xe5\x1c\xaa\x60\x69\xb3\xc7\xa6\x27\x27\x8f\xd5\x27\xa7\xea\x93\xd3\x41\xe5\xd2\x68\xf5\x92\xa4\x0e\x1f\xc4\xa9\xbc\x41\x66\x66\x66\x8e\xcb\xaa\xc8\xe3\x1f\xaa\x53\xde\xff\x6e\x95\xd3\x3c\x2d\x82\xa1\x74\x1a\xd0\x94\x80\x50\x9f\xd4\x81\x03\x06\xaf\x33\x21\x84\xa2\xe5\x66\x98\x58\x95\x13\x6f\x80\xa3\x90\x35\xe8\x91\x40\xa8\x6a\x65\x38\x53\xb3\xf5\xa9\xe9\xfa\xd4\xd1\x0b\x53\xc7\xe7\x66\x26\xe7\xa6\x27\x1b\x93\x93\x93\xaf\xaa\x0e\x4e\x7e\x5e\x87\xcf\xcd\xe0\x1c\x89\xe4\x16\x56\x8b\x23\x27\x13\x35\xeb\xb1\xfd\x59\x2a\xce\xb5\xfc\xc4\x56\x68\x3c\xd5\x2c\x1c\x7b\x16\xdb\x5f\xc6\x65\x22\x06\x3a\x13\xa6\xfd\xa7\xbd\x33\x9c\x1d\xee\xc4\xe9\x03\x22\x6d\x97\x6b\xfe\xad\x14\x20\xe2\x38\xab\x3f\x87\x1d\xec\x86\xa5\xe4\x48\xdc\x0e\x0f\x4b\xee\xcd\x9e\xcf\x6e\xe8\xa5\xf1\xeb\x7a\x10\x43\x58\xf8\x37\x3a\x9a\xd0\xcf\x8f\xb0\x17\x3a\x1e\x87\xca\xe5\x4b\xc3\x2a\xfb\x3f\x36\x12\xfc\xe0\xc8\xce\x65\x2c\xff\x3f\xb2\x69\x6b\x2c\x1a\xe0\xce\xc8\x8b\xcc\x02\x5d\x57\x39\xd9\x31\xf1\xcb\x38\xee\xc9\x0b\xfd\xae\x38\x23\x65\x0d\x29\xdc\x9a\x94\x30\x94\xcd\x0b\xd4\xa3\x24\x23\x28\x77\x64\x84\xc2\xb4\xdc\x78\x5a\xbd\x1c\xa4\x7e\xe5\x18\x82\x4e\xae\xaa\x03\xea\x33\x79\x76\xc7\xab\x2a\x04\x9d\x77\xc8\xe1\x72\x21\xdb\x30\xa4\xf0\x0a\x93\xa4\xdf\xe0\xce\x44\x28\x39\xbd\x4e\x00\xa6\x75\x94\xa0\x2a\x7d\x5a\x15\x04\xe3\xe4\xe0\xe8\x40\xf0\x1a\xbe\x8d\xdd\xfc\xf0\x05\x90\x05\xa3\x5d\x89\xf8\x68\x0f\x72\xd3\x55\xbb\x3d\xae\xfb\x3c\xe6\xea\xb7\xdf\x35\xc2\x6e\xdf\x4b\x47\xfd\x3f\xab\x05\x5f\xac\xed\x69\x48\x45\xa9\xec\xc7\x96\xc3\x5d\x24\x9a\x59\x84\x78\xba\xa2\x8b\x86\x99\x6e\xde\x83\x30\x11\xfa\x0c\x95\xb7\x72\x46\x01\xd8\x45\x8f\x88\xf4\x31\x0a\x9c\x4b\x92\x37\x14\x96\x51\x1d\x12\x82\xa9\xbf\x50\xb5\xe4\xf2\x2a\x2a\xe2\x0d\x91\xf4\x71\x16\x8b\xcd\x18\xc3\x5a\x57\xc3\xe6\x3a\x6a\x99\x73\x79\xb1\xa8\xd6\x29\x45\x61\x2f\x07\x48\x19\x74\xd7\xe8\xe6\x59\x57\xe4\x60\x57\x92\x3c\xbd\xd6\xd4\x2c\x99\xe7\xa1\x89\x5d\x36\x7d\x74\x3a\xd6\x94\xfb\x39\xa6\x98\x74\xb4\x12\xda\x92\x74\x03\x51\x08\x60\x0d\xb5\xe9\xd5\x59\x9a\x6f\xf7\xd8\x8d\xe8\x8f\x24\xd2\x66\x2c\x0a\xff\xf5\x4f\x78\x09\x7b\x2d\x5b\x7b\x0a\x29\xca\xb9\x7c\x19\x48\xc1\xbc\x24\x60\xb6\xb8\x1a\xb3\x7d\x22\xed\x75\xfc\x70\xcf\x99\x22\xae\xb4\x37\xec\x0d\x8c\x99\x05\x78\xba\xc5\xf2\x37\xb0\x67\x0d\x5b\xef\x6b\x2d\x9e\x0f\x4c\xfb\x3d\x59\x96\xb0\xe7\xb9\x71\x3a\x0e\x6d\xe6\xec\x66\x8d\x5d\x79\x26\xdc\x8a\x3b\xbd\x4e\x85\x40\x3b\x25\xe2\x74\x48\x89\x17\xb2\x83\x1d\xfa\xf4\x56\xff\x40\x04\xa8\xb7\xd0\x06\xf2\x6d\xec\x7d\xfb\xd8\xb3\x9d\x23\xde\x09\xbb\x98\x78\xe3\x5b\xf6\x05\x7f\x37\x32\xf4\x15\x07\x4b\xa9\x42\x73\x26\xc2\x64\x91\x57\xa5\xd9\x03\x1f\xcd\x32\xeb\x66\x49\xd6\xee\xd3\x11\xd1\x13\x83\xd4\x1a\x14\xb7\xca\xf7\x17\x7d\x76\x28\xb8\x8a\xaa\x55\xa1\x20\x40\x10\xa7\x39\x25\xbd\x53\xa0\x10\x73\x8c\x4d\x8d\xf1\x95\x76\x1e\xa6\xbd\x24\xcc\x57\xe6\x18\x97\xff\xa1\xbd\xb7\x13\x92\x0a\x3e\x6c\x96\xbd\x30\xc1\xdf\xa3\x95\x9c\x9a\x63\xd0\x02\x44\xde\x11\x39\x01\x4d\x47\xaa\x4e\x61\x89\x35\x02\xf5\x82\x52\xa8\x37\x1e\x2d\xd7\x44\x1f\xcf\xbb\x7c\xb8\x2a\x5b\x4b\x63\x4c\x0e\x0a\xec\x4d\x21\xba\x61\x0e\xc0\xe8\x90\xa1\x60\x4c\xfb\x2e\x63\x7d\xb6\x57\x8f\x21\x47\xa0\x4e\x48\xb0\xa7\x0d\x36\x3d\xc6\x57\xc2\x32\xeb\xc4\xcd\x95\x39\x8c\x96\xa1\xf8\x9f\x92\x74\x60\x21\xc6\x1e\x82\x11\x4b\x40\xc0\xed\x38\x32\x61\x21\xc0\x6e\x84\x79\x03\x1b\x9b\x87\x4a\x70\xfc\x06\xe5\xb1\x8c\x73\x01\x7e\x06\xdd\x24\x6c\xaa\xd9\x57\xe1\x1b\xf6\x36\x7c\x0e\x3b\x78\x3b\x92\x39\x77\x7b\x6e\xb0\x67\x9a\x73\x83\xec\xc3\xd3\x72\x64\x24\xe3\x95\x24\x5f\x15\xc6\x2b\xdc\xda\x89\xf1\xfa\xc7\x8c\x75\x7a\x49\x19\x77\x13\xc9\xa3\x0e\x3d\x6e\xb7\xb2\x7d\xb7\xe7\xa2\xe5\x4e\xe4\xed\xec\xa6\x4e\xb8\x65\x51\x9e\xa1\xb5\xcb\x52\x71\xba\x5b\x29\x79\xde\x89\x10\x0c\xed\xc0\x73\xd9\xa1\xb4\x97\x00\xc2\x5f\x85\x52\xbc\xc9\x63\xb7\x10\xdf\xb8\xf4\x55\xa3\xc5\x5f\xcf\x0e\x29\x17\x0d\xff\x79\x3b\x58\x2d\x3b\x2a\x12\x2b\x62\xab\x4f\xe9\x8e\x83\x5b\x91\xbd\x88\x1d\xea\x84\x5b\xb8\xc7\x87\x4e\xbb\x2c\x10\xa7\x3b\x14\x88\xd8\x48\x9a\x95\xfe\x53\xbc\x47\x1f\x61\x07\x09\x5e\xc7\x7f\x2a\xae\xe9\x0f\xed\xdb\x31\xa1\xd3\xdb\xf6\x05\x7f\x36\xb2\xfd\x7b\xf7\xda\xc0\xb8\x68\x8d\xa7\x37\x3c\xfb\x13\x5f\xe9\x84\xdd\x15\xa0\xa7\x26\x99\x62\x89\x99\x91\xc9\x5e\x1c\xaa\xb0\xfd\x48\x6c\x69\xcc\x95\xb0\x0b\x29\x98\x25\xad\x0c\xdb\xfc\xcc\xc5\xe5\x0b\xee\x4d\x93\xa5\x40\x4c\x49\x24\x59\x53\x08\xf1\xc1\xf0\x5e\x04\xd6\xad\x45\xb6\xa6\xa0\x13\x76\x83\x06\x9f\x4f\x8a\x6c\xdc\x88\x34\x85\xc5\x1f\x6a\x4b\x5e\x58\x62\xd4\x97\x91\x4f\x14\x79\x86\xbb\x2e\x52\xc9\x18\xc9\x1b\x6c\x2d\x4e\x22\xc2\x5b\xeb\xe5\x82\x8f\xa6\x19\xb0\xa8\x64\x6e\xd1\x89\xf5\xc6\x28\xc7\xb4\xc5\xb7\x9a\xb6\x31\x0d\xa1\x66\x44\xb5\xd3\x53\x96\x2b\x54\x0c\xc7\xde\x33\x2e\x47\x84\xe9\x59\x79\xb9\x96\x15\x4e\xad\x21\xc4\x5e\x20\x8c\x9a\xba\x9f\xf0\x22\x92\x9b\x7d\x27\xcf\x82\xdf\x3b\xb0\x6d\x52\xaf\x9f\x3b\x10\xfc\xc4\x81\x6d\x96\x7c\xc8\x2e\xb9\x06\xbc\x85\x5e\x71\x87\xa1\x98\xd9\x86\xa1\xb8\x96\xd7\x2e\x36\xbb\xc7\x7b\xb7\x32\x10\xac\xc8\x72\xfb\x01\xf8\xad\xb4\xcf\x15\xa8\x3d\xf4\x6d\x14\xb7\xcb\x38\x35\x3f\xce\x1b\x8d\xc6\x18\xf2\x0f\x85\x28\x15\x53\xb4\x2c\xc8\x46\x63\xed\x7c\xed\x3f\x0e\xb3\xa1\x2e\x2f\x5c\x5a\x73\x36\x01\x1e\x82\x5c\xc0\x4e\x85\xcd\x35\xac\x0f\x99\xa8\xea\x9e\x1e\xb7\x58\x42\xa8\x60\x38\x07\xa9\x66\x18\x31\xc3\xb1\x42\x8b\x22\x6c\x47\x0d\xe8\xab\x06\x9b\x19\x43\xd2\xe0\x30\x7c\xb4\xc0\x39\xa9\xf4\x80\xdf\x89\x4d\xa6\x90\x38\xe7\x22\xa1\x44\x4d\x74\x0c\xd2\x2c\xad\x23\xe5\x58\x17\x7d\xac\x6a\x58\xfa\xc1\x4e\x83\x9f\xcb\x23\xf4\x33\x57\x22\x70\xc4\x7b\x5d\xb9\xd3\x44\xde\xa6\x8c\xe2\x9d\xb0\x2b\xc9\x0d\xad\xd9\xb0\x1d\x48\x6e\x52\x30\x44\xdd\x95\x8c\x60\x09\x49\xfd\xc0\xec\xec\x92\x38\x5e\x3c\x77\x72\x76\xdc\x78\xf2\xb7\x8e\x54\xc8\x71\x9c\x96\xf5\x2c\xa7\x64\xd0\xfe\x1f\xd4\x82\xdf\xac\x6d\xff\xde\xc9\xa9\xa1\x30\x9c\x0c\x4a\x30\x39\x61\x19\x8c\x07\x88\xcb\x51\xe6\x41\xe5\x4c\x08\xc7\x02\x43\xb6\x15\xb0\xb0\x8b\x7e\x6f\x72\xeb\x85\x05\x11\x37\x88\x19\xef\x9f\x6b\xc1\x6c\x8a\x5c\x81\x36\xb4\x2c\xf5\x2e\x41\xf7\x95\x8e\x9d\x91\x98\x13\x3a\xa3\x50\x05\x2c\x3f\x26\x11\x9c\x53\x1d\xb5\x1f\xd1\x44\x4d\x8f\x71\xe0\x18\xa9\xb8\xf9\x74\xe8\xc7\x03\x9f\xc3\x93\x46\xa3\xc1\x01\x27\x88\x60\xcc\x1c\xde\xe9\x37\x46\xd8\xed\xdb\x45\x13\x2b\xff\x8b\xc5\xb4\x95\xf9\x1f\x19\x09\x2e\xc8\x3f\x0c\xa2\x0d\xbd\xc6\xec\x3a\xda\x29\xa4\x01\x4e\x11\x9b\xe2\x48\x92\xf0\xcd\x10\x80\x6d\x78\x14\x17\xea\x26\xc1\x9c\x42\x56\xf1\x4b\xde\xfe\x4e\xf8\xda\x2c\x97\xff\xc6\xa9\xfc\x97\xb5\xe3\x52\x67\x76\x39\xdc\x8e\xcb\x85\xac\xd3\x89\xcb\x4b\xde\x8d\xed\xb8\xbc\x90\x0b\x01\xd9\x07\x2f\x79\x87\x57\x7b\x71\x12\x9d\xc4\xbf\xdb\x99\xfe\xe4\x50\x33\xeb\x74\xe3\x44\xe4\x97\xbc\x43\xdd\x24\x2c\x65\x5b\x8e\x97\xc2\x97\x3d\xf6\xb5\xcc\x6a\xc5\xe5\x69\x9f\xcb\x4c\x65\xee\x9b\x67\x31\xec\xea\x40\x79\xdd\x11\xf7\xcd\xd7\x30\xdd\x95\xc1\x26\xd4\xb0\xaa\x4a\x4d\x67\x90\x83\xed\xcb\x29\x1a\x68\x45\x8d\xd2\x7d\xf1\xd3\x07\x1c\x04\xfe\x66\x06\x19\xb8\x70\xd6\x15\x73\xf4\x80\x08\x0b\x01\x41\xd0\xdf\x75\x20\x18\xd5\xbf\x28\x50\xdf\x71\x05\x01\x97\x2a\x28\xe1\xfa\x7c\x7c\x61\x3f\xfb\x36\x8f\xdd\x10\x36\xe1\xaa\xa6\xfc\x06\x77\x5f\x41\xbc\xfa\x99\xb8\x99\x67\x00\x28\x39\x67\x55\x46\xb9\x25\xe5\x5f\x3a\x32\x46\x41\x34\x26\xb2\x37\x98\x7e\x04\x3f\x88\x1a\x4c\xb0\x67\xac\x65\x49\x24\xf2\xc5\x08\xaf\x38\x7f\x39\x38\xed\x3e\x71\x33\x15\xc5\xea\x29\x1d\x5b\x2c\x4b\xc9\x2b\xed\x86\xaa\xb8\x47\xcf\x82\xa7\xca\x18\xa1\xa0\x8f\x3e\xe2\x29\xcc\x99\xc7\xbd\x61\x05\xc8\x6b\x45\x19\x28\x54\x08\x56\x04\x6a\x59\x15\x44\x88\x03\x4b\x09\x35\x79\x33\x8c\x51\x4b\x01\x2e\xfb\x34\x54\x1e\x97\x26\xaa\xb8\x23\x42\xe0\x81\x14\x1e\x8f\x4a\x07\x01\xa0\x9c\xd9\x2a\x91\xfd\xf3\x22\x15\x9b\x72\x4e\x5d\x88\x98\x9c\xdd\x0c\xed\x19\x40\xd0\xc2\x7f\xb5\x1a\xc4\x62\xf5\x95\xc6\x8e\x44\xeb\x99\x9c\x34\xeb\x25\xcc\x1a\xf6\x5e\xf9\xc5\xe1\x7c\x16\x6e\x9b\xef\xf3\xd8\xe1\x5c\x75\xc7\x7f\x87\x77\x95\x1b\xe6\xbc\xae\x6b\xa7\xed\x62\xaf\x2c\xf6\x71\x2d\x2c\xec\x74\x19\x88\x63\x8d\x8b\xcd\x9e\xbc\x71\x68\x8e\x42\x4c\xfc\x77\x26\xec\x3a\x59\xb8\x7f\xe9\xc6\xe0\x73\xb5\xf9\x28\xec\x42\x06\x6e\x5d\x06\x93\x97\x87\x14\xe6\x47\x8c\xaf\xdc\x7d\xfa\xfe\x34\x50\x8c\xfa\xa3\x23\x05\xb7\x52\x95\x2b\xf6\xcb\xa4\x89\x84\x80\x75\x0a\x41\x0c\x0b\xcc\x60\x6d\x79\x15\x81\x80\x41\x3a\x5e\xab\x1e\x92\x36\x20\x1a\x22\x0d\x3b\xa2\x18\xb7\x83\xab\x90\x6f\xa2\x9b\x1d\xae\xb7\x4c\xa9\x84\xe0\xce\x37\xc8\x97\x61\xb7\x1b\xa7\x6d\x84\x05\x93\x2d\x95\x99\x0a\xf5\x36\xa3\xbe\x9a\xfc\xde\x0e\x6d\xf9\x9b\xc3\x6c\x99\x1d\x52\xd8\x3c\xfe\xbd\xc1\x71\x74\x4f\xeb\xeb\xa0\x6b\x70\xd8\xd2\x0d\x67\x39\x70\xd7\xd0\x31\xc5\xdf\x91\x0a\xdb\xb9\xf5\xfe\x68\x04\xdc\xa9\x24\xb7\x72\x26\x8b\x84\xff\x5b\x23\x6a\xbf\xff\x9b\x11\x93\x37\xba\x03\x49\x02\x64\x85\x8a\xb5\x92\x32\x14\x5c\xfc\x05\xb9\x99\xa6\xda\xad\x0b\x97\x61\xb5\xaf\x04\x14\x2b\xc9\x78\xca\xb3\x66\xa9\x30\x62\xf4\xb9\x98\x9c\x9c\x9c\x84\x49\x98\xbc\xf3\xce\x3b\x91\x47\x51\x76\xe4\x4a\x41\x28\x75\x74\x6a\xaa\xc1\x5f\x39\x7f\xe6\x01\x1e\x36\x9b\x42\xee\x33\x08\x61\xc3\x9a\x65\x01\xe7\xe3\x62\x1c\x31\xbd\xb4\xff\xa5\xfb\x96\x20\x4d\x69\x78\x0d\x27\x2f\xf8\xe4\xb1\xd9\xd9\x06\x3f\x19\xe7\x80\xac\x1a\x8b\x42\xa1\x24\x94\x2a\xcd\x88\x32\x7b\xa0\x35\x43\x79\x0c\xa0\x3b\xa3\x95\xbc\xae\x13\xb7\xd7\x28\xe2\x0d\xd0\xad\x93\x58\x71\xd6\x94\x2a\x03\x5d\x97\xc9\x9f\x03\xea\x32\x5b\x54\x76\x8e\x84\x13\xf2\xcb\x1e\xd7\x40\xf3\xb9\x28\xa4\x04\x48\x81\xab\x58\x99\x59\xab\x01\x94\xfd\x27\xf6\x29\x95\xcb\xfb\xf6\x05\x5f\x1a\xa9\x86\x54\x80\x1a\xd4\xd5\xad\x0e\x39\x3e\x3a\x2d\x2a\xe5\x17\x8f\xac\x5d\x67\x8e\x68\xf6\x5a\x9c\x10\x38\xf6\xa5\xc9\xfe\x0d\x82\x17\x8c\x6b\x13\xa4\xd4\xd4\x02\xe1\x5d\x17\x7d\x8c\xe2\x41\xb2\xa0\x1e\x93\xd4\xb2\x38\x80\x18\x29\x19\x71\x11\xe1\x1e\xdf\xa1\x65\x23\x53\xc3\xf1\xc4\xd9\xeb\xa5\x03\x5f\x53\xe4\x10\x91\x17\x68\x30\x84\x3e\xc5\xb6\x5c\xae\xb1\xdf\x64\x71\x25\x54\xd3\x2c\xe9\x89\x18\xb7\x87\x5c\x88\xb2\x47\x53\x23\xf2\x3c\xd3\xa1\x9c\x31\x65\xf9\xcb\xd7\xa5\x64\x41\x67\xad\xc1\x11\x4c\x42\x9d\x59\x0d\xc4\xa6\x64\x5e\xd9\x2c\xdd\xda\xd0\xc8\x91\x46\xe3\x08\x6e\xc6\x8c\x32\xf5\xe2\xce\x92\xcf\x1d\xc9\xbe\xc1\xc6\xd9\x1d\xbb\x66\x75\xbd\x5f\xf4\x2f\x64\x00\x1b\xf1\xcf\x3c\x02\xa1\x7b\x34\xc8\xce\x5a\xf0\xc8\xb4\xf0\xe5\x50\xd0\x1c\x23\xa8\x34\xe2\x6c\x22\xca\x9a\xc5\x44\x33\x4b\xe1\x94\x4e\x64\x1b\x22\xdf\x88\xc5\xe6\x04\x21\xda\xd4\x65\x3f\xeb\x84\xb4\x34\x01\x94\x78\xe2\x36\xf8\xc7\xc9\x50\x37\xc2\x66\x86\xf4\x73\x69\x2d\x2b\xb3\x74\x49\x32\xa7\x85\xdc\x2c\x27\xe3\x62\xdd\xb9\x87\x7e\xaf\x16\xdc\x75\xde\x60\x77\x86\x1c\xbf\x90\x6b\x54\xe6\x59\x92\x88\x5c\xd2\x2f\xfa\x5a\xf2\xe5\xeb\xc6\x69\xfc\x92\xb7\xaf\x1b\x2d\x9e\x74\x08\xf0\x77\xd5\xd8\x22\x83\xc7\xfe\x7c\x30\xab\x1c\xe2\x34\x9c\x59\xb1\x7b\xfd\xd5\x5c\x2a\xad\xe2\x42\xbf\x2b\x30\x97\x8a\x15\xa4\xa8\x92\x98\x43\x3e\x1f\x8b\x76\x22\x5d\xb5\xca\x68\x3d\x92\xf2\x53\x02\xcf\x45\xca\x4e\x20\x65\x43\x0a\x52\x3c\xb5\xd5\xe0\x81\xd8\x2a\x67\x83\x71\x1e\x6c\xb5\x0a\xf9\x4f\x5a\xb6\x8a\xa0\xc1\x17\x4d\x94\x2e\x60\x47\x99\xa8\x3a\xfc\x40\x4a\x74\x16\x89\x70\xf8\xbe\x4f\x1c\x60\x53\x3b\x80\x81\xe2\x62\xcc\x97\x65\xd8\x5c\x93\xb7\x1c\x2d\xcb\x37\x1f\x08\x7e\xd7\x1b\xfe\xce\x86\x5a\xd5\xd7\x3a\xcc\xb2\x41\xfc\x0b\xe1\x1b\x11\x35\xf8\x79\xa0\xa9\x69\xb6\x89\x12\xba\xde\x09\x2a\x70\x9d\x68\xa2\xfa\x00\x52\xd8\x69\x1f\x7f\x7a\x9a\x8f\xcb\x93\xdb\xea\x81\x96\x6e\x13\x63\xb3\x14\x22\x41\x91\x29\x0c\x1b\x1d\x6c\x9e\x02\xda\xb3\x9c\xd0\xb0\x59\x52\xf6\xb8\x8e\x00\xe6\x8f\x5a\x2b\xaa\xc9\x12\xde\xb1\x8f\x7d\x66\x84\xdd\x1c\x5b\x40\x35\x20\x63\x7c\x68\x64\x07\xd0\x11\xbd\xc7\xf5\xfe\x31\x5f\x06\x7f\x5e\xab\x56\x66\xa5\x2c\x23\xe7\x7b\x3b\x19\x50\x2a\x9a\xa2\x28\xc2\x1c\x83\x97\x60\xdc\x3c\xb4\xb7\x26\x4d\xb4\xe5\xd6\x13\x52\x24\x21\x8d\xdf\x3e\x56\xd5\xd4\x27\x86\x39\x82\x55\x50\x70\x94\x0b\xcb\x8b\x67\xe2\xb6\xf2\x3c\x42\x67\x71\xf0\xe5\x30\xf2\x86\x64\x95\x13\xe2\x17\xc0\x66\x07\xe6\xb9\x1d\x9a\x86\xfe\xf3\x61\x73\x52\xed\x94\xdc\x80\xf5\x44\x6c\x88\x64\x28\xac\x10\x58\xf6\x72\x03\x90\xa4\x92\xa5\x6d\xdb\x71\x76\x86\x3d\xab\x5b\x69\x17\xa0\xe4\x8f\x06\xa3\x36\x65\x1c\x9c\x54\x3d\xe5\xce\xc9\xf9\xbf\x6f\x66\x5f\xb7\x4d\x7e\xeb\x86\x93\xc9\xd6\xff\x8f\x37\x07\xbf\xe3\x39\x8f\xdc\x43\x52\xf4\x56\x09\x4f\xc0\x86\x7a\xa3\x71\x01\xa0\x29\x32\x98\x0a\x63\x17\x33\x3b\xab\x24\x87\x0a\x78\x17\xf3\x48\x91\x86\x52\x2b\x10\xdd\x66\x89\x4c\x8f\x13\xd0\x3c\x4e\x23\x42\x59\x8d\xd3\x9d\xa8\xae\xac\xd7\x66\xb0\x91\x2a\xa0\xa7\xad\x5e\x92\x28\xf0\x03\x93\x93\xf9\x92\x77\x03\x79\x24\x5f\x00\xf8\xd6\xc3\xfa\x95\x9b\x89\xfb\x26\xf6\xfe\x11\xb6\xbf\x0b\x38\x40\x97\x46\x82\x1f\xad\xc1\x9f\x95\x94\xb2\x2a\x7e\x91\x40\xd3\x38\x96\x21\xd0\x1f\xd9\x5f\x81\x31\x8a\x2a\xd7\x30\xe1\x76\x14\x09\xcc\xcc\x29\xcc\x7e\x90\x97\x0e\xf4\x34\x3a\x56\x11\xd8\x24\x80\xdb\x63\xad\x2a\x09\x17\x22\xea\x57\x53\x2a\x2b\x14\x84\x34\xd3\xe7\x0a\x3e\xa3\x2a\x42\x6c\x27\x36\xce\x74\x2a\x32\x3c\x4e\xf0\x15\xe9\xf4\x9d\xba\x03\x79\xbc\xa1\x9a\x80\x7a\x0b\x3d\x27\xe4\x3f\x80\x52\xe0\x21\x27\x4f\x01\x39\x0f\x53\x93\x93\xd4\xac\xcd\x07\x1c\x63\xb3\x6c\x7a\x5b\xda\x33\x74\x33\x2e\x65\x79\xf9\xa4\x77\xdb\xb6\xa6\x80\xc3\xfe\x41\xd4\x72\x32\xf6\xdf\x6b\xcc\x5e\x51\xff\x8b\xb5\xe0\x57\x6a\xd6\x83\xca\x92\x29\x1d\xb8\x72\x4b\x6f\x86\x79\x1e\x5b\x7c\xb4\xb3\x09\x21\x9e\x4c\x15\xb5\x80\x57\x68\x1a\x68\xfb\x69\x6d\x37\xa4\x4a\xaa\x90\x86\xb8\xd3\xe9\x95\x10\x6d\x1d\xb6\x4a\x49\xb7\xa5\xcc\x42\x41\x5a\xb6\x9e\x52\xf5\x07\xc1\x40\x21\xcc\x0d\x45\xe7\xa4\x6f\xee\xdd\x39\x7e\x07\x38\xfa\xcf\x71\x9b\xd1\x20\xe7\x7f\x8a\x40\x68\x60\x99\x63\x43\xca\x1c\xb3\xcb\x9c\x7e\xd9\xc9\xb3\x6e\x19\x7e\x1a\x7c\xc4\x5e\xd6\x0b\x13\xe4\x3c\x4f\x66\x1d\xc9\xf4\x49\x8a\xd3\xb8\x9e\xd4\xec\x1a\x42\x82\xfe\x27\x8f\x19\xba\xe3\xff\x47\x2f\x78\xad\xa1\xa4\x4e\x60\x34\x11\x03\xeb\xed\x10\x12\xb2\xfb\xa1\x9c\x74\xf2\xe2\x9b\x93\x39\xcd\x26\x59\xe3\xf2\x4e\xe6\x1e\x4f\xe5\x75\x00\xd2\x2b\x07\x20\x5d\xb3\xf0\x47\x1f\x66\x2f\xb9\x02\xed\x9c\x71\x74\x0d\x9e\xaf\x01\x48\x71\x26\x8f\x14\x06\x84\xf4\x49\xaf\xb9\x3b\x38\xe8\x37\xfa\x2f\xd5\xe0\xa0\x66\x47\x54\xb0\x41\x1d\xaa\x59\xc5\x05\x7d\x27\x67\xcf\xb3\xb6\x95\xd8\x80\x65\x94\x7b\x4a\xfe\xe5\xff\xd5\x8b\x82\x4f\x8d\xc0\x9f\x2a\xeb\x27\x2a\xae\x80\x22\x40\x61\x5e\x64\x1d\x81\x99\x16\x54\xd0\x6d\xd2\x2b\x4a\x48\x35\x5c\xf2\xb6\x48\x45\x8e\x80\x44\x22\xcd\xe4\xbd\x25\x8b\x53\x6e\x2b\x0a\xef\x55\x7e\x97\x4a\x40\xd9\xb0\xed\x79\x0a\xf6\x3b\x17\x25\x2e\x0e\xea\x30\x41\xe5\x91\xc7\xed\x36\x20\x8b\x83\x37\x2f\x24\xb1\xc2\x8d\x27\x36\xb2\x64\x83\x1c\xdf\xc1\xed\x98\x63\xad\x00\x25\xd0\xeb\x58\xd4\x4b\x6e\x31\x30\xde\x12\x56\x5c\x19\x77\x54\x02\x7e\x35\x3e\xba\x8c\x91\x49\xa2\xfc\x99\xb9\x68\x49\xbe\x07\xee\x06\xc4\x27\x40\x46\x0f\x82\x25\x13\xf4\x69\xc0\xde\x8d\x73\x93\x09\xab\x8c\xd3\x9e\x88\x30\x9d\xb2\xca\x43\x80\x13\xae\x2c\xb3\x61\x49\x2d\xa8\x1e\xdb\xf8\xe6\x96\xb5\x5a\xb3\xf4\x1b\x62\x9c\xaf\x8a\xa2\xac\x8b\x56\x2b\xcb\xcb\x71\x37\x68\x1d\x36\x92\x64\xa3\x64\x55\x17\xe2\x8e\x9b\x3b\xe1\x67\x9e\xcf\xbe\x5f\xd2\x3a\xf5\xd2\xff\xb6\xab\x55\x37\x2f\xea\xba\x9c\xfc\xcb\xa4\x70\x96\x17\x38\xce\x28\x68\x67\x73\x4b\x05\xaf\x3c\xbb\x95\x5f\x43\x83\x7d\xbc\xc6\x0e\xe7\xa2\x1d\xe6\x51\x9c\xb6\xfd\xff\xb3\xc6\x26\x77\x15\x90\xf0\x60\x9d\x57\x9a\xa8\xe0\x2f\x3d\x5d\x81\x6b\xd9\x50\xfe\x8d\xa6\x47\x31\xa5\x3a\x03\x1a\xd3\x91\x82\x73\x33\x04\x9e\x42\x1e\xc9\x30\x25\xe7\x74\xda\xfd\xaa\x3e\x92\xeb\x35\x63\x5d\x8c\x23\x86\x83\x93\xda\x6d\xb0\x94\xf5\xbe\x20\xd5\x1d\xe4\x0e\xa0\x7e\x08\xb2\x9e\xae\x8a\x66\xd8\x43\x0c\x8c\xb0\x89\x99\x3d\xe0\xe8\xe0\xa1\x29\x50\xdb\x6d\xaa\xd2\xc6\xe6\xff\xe8\xb1\x83\xa0\x26\x12\x91\xff\x39\xef\x0a\xa6\xed\x9f\x7b\xf4\xb9\x5a\x43\x0d\x01\x8f\x39\xbc\xa4\xf0\xa8\x02\x88\x55\x02\xa8\x66\x26\x87\xb7\xc5\x29\xf7\x57\x83\x9f\x92\x13\x01\xeb\x6e\xd6\x40\x4d\xbb\x3e\xb6\x9a\xdf\xe2\x00\xaf\x9e\x08\xc5\x6d\xa8\x0e\xa8\x31\x5d\xf2\xd8\x81\x42\xe4\xb1\x28\xfc\xef\xf6\x76\xb8\x14\x2b\xd4\x6b\x19\x3e\x09\x1e\xc1\x4f\x81\xc1\x06\x7f\x7f\x9d\xd4\x0e\x17\x9f\x5e\x5b\xcb\x60\x49\x55\x0a\xd4\xa5\x45\x7b\x81\xdc\x44\xca\x2c\xc5\xaf\x1b\xec\xdf\x79\xec\x00\x8e\xdb\xff\xac\x17\x7c\xcc\xc3\xbf\x65\x6b\x80\x22\x41\x3f\xe5\x9e\x2f\xc3\x75\x91\x4e\x50\x2a\x7c\x33\x2f\xc4\x26\x55\x27\x4a\x9d\x89\x6a\xf6\x60\x87\x89\x35\x10\x47\x68\xab\x6f\x61\xdc\xb4\x22\x1e\x20\xf0\xa2\x46\x18\xe9\x69\x89\xbb\x7b\x6a\xfa\x2e\x2b\x30\xec\x3a\xff\x78\x2d\xf9\xc7\x4f\x7b\xec\x6b\x22\x39\x01\x52\x4e\x8b\x1e\x08\x0b\x20\x88\x45\x19\x76\xba\xfe\x0f\xed\x90\x18\x73\xb7\x6c\x82\x6b\xdb\x54\x6a\x20\x05\xd5\x6b\x6d\xd9\x2a\x7a\xc0\xd0\xad\x86\xcd\xf5\x4d\xc9\x65\xa8\xa0\x30\x44\xfb\x83\x3b\x87\xa8\x00\x1d\x05\xcc\x27\xfb\x01\x8f\xdd\x6c\x6a\x23\x55\xdd\xdb\x3d\x36\xbe\x2b\x2d\xc1\x63\x07\x1f\x04\x61\xb5\x8a\x6b\xdf\xd1\x8f\xa9\x24\x1e\x4f\x78\xc1\xbb\x75\x12\x0f\x5b\x92\xb4\x0e\xf5\xe8\x59\x88\x19\x19\xe7\x2f\x0f\xf3\x34\x4e\xdb\x63\xe3\x70\x52\x50\xa6\x6b\x6a\x95\x62\x64\x41\x10\xa2\x2e\xf0\x1a\x1c\x44\xe7\x84\x7d\xab\xc7\x9e\x69\xe6\x60\x21\xeb\xa5\xa5\xdf\x53\x76\xb6\x95\xca\x9b\x6b\x3e\x69\x8e\x09\xe8\xb3\x1e\x7b\xae\xa9\xf9\xb4\xbc\x93\xcd\x66\x7d\xff\x55\x6c\xd6\x78\xbb\x5a\xaf\xfd\x26\xb8\x2e\xcd\x5c\xb9\x34\xf3\x2b\x76\x3a\x85\xcf\x79\x57\x2b\xcf\xbc\xd9\xdb\x41\xa0\x79\xba\xb2\x2a\xb0\x9f\xf1\xd8\x3e\x29\x6b\xf8\x3f\xee\x05\x1f\xf0\xe4\x5f\x56\x76\x9c\x2b\x4b\xed\xcc\x21\x58\x27\x4c\x78\x02\xc1\x02\xaa\xbc\xaa\x7b\x6a\xfd\x1e\x8c\x53\x4b\xe2\xd5\x3c\x04\x6e\xc2\xb0\xed\xdd\x5c\xe8\xcc\x48\x6b\x61\x1a\x69\xcf\x54\x8e\x51\x7e\xc7\x66\xd7\xef\x71\xe8\xc3\xfb\x3d\x9d\x54\xf7\x71\x2f\xf8\x0e\x8f\x52\xea\x02\x43\x81\xa6\x9d\x2a\x4f\xa1\x28\x94\x3b\xc2\xa7\x98\x51\xf8\xa4\xc7\x6e\xd5\x0c\xb1\xe1\x74\xfd\x1f\xf4\x82\x77\x7a\x43\x5e\x0c\x4b\xae\x6a\xb1\xd1\xa8\x7c\x27\xe6\xd7\xb0\xe6\xc4\x53\xaf\xb8\xa6\xc4\x75\x04\x6e\x5b\xb9\x52\x12\xfc\xb3\x1e\xbb\x45\x77\x71\x31\x2d\x20\xa5\x95\xff\x61\x2f\xf8\x01\x6f\xe0\xb1\xea\xf7\xe2\xc9\x21\xbd\x8e\xa9\x90\xdd\xcd\x44\x94\xf5\xad\xfe\xeb\x5b\x97\xd3\xbb\x2b\x58\x80\x27\xbd\x57\xec\xae\x20\x38\xea\xcf\xd4\x1d\x81\x9f\x2b\x75\x01\xf1\xca\x55\x5d\x81\x7c\xca\xd8\x2f\x1f\xdc\x6b\x5e\xa3\xa5\x2c\x2f\xfd\xf7\x1d\x0c\x8e\x0d\x3c\xb5\xe0\xa0\x48\xc3\xad\x31\x1d\xcb\x3c\x6c\xb5\xe2\x26\xa7\x5c\xba\x4a\x20\xfd\xcd\x03\xec\x9b\x3d\x76\xa8\x9b\x67\x65\xd6\xcc\x12\xbf\x17\xac\x91\x43\x3c\xfc\xe6\xa3\x17\x16\x96\xc6\xf9\xc5\x93\x4b\x20\x54\x2f\x2f\x5c\x58\x52\xc9\xd7\x54\x85\xa0\xf5\x85\x14\xa8\x60\xb0\x77\x62\x50\xc7\x5d\x88\x78\xe3\xd8\x71\x61\x61\xc9\xd9\x1c\x8f\x8f\xb0\x83\x22\x8d\x60\x64\xdf\xa9\x1d\x60\xfe\xb6\x86\x60\xab\xe3\x43\xf4\xfb\x98\x63\x40\x6e\x0f\x34\x07\x60\x64\x2c\x0d\x99\xaa\x1a\x47\x05\x60\x01\x22\xbb\x65\xc2\x24\xa7\xda\xd5\xbe\x05\x84\xbb\xcd\xc6\x51\xe6\x01\x0d\x9a\x9b\x97\x46\x97\x2d\xcb\x68\x7c\xa7\x7c\x78\x19\xf5\x1e\xdc\x2e\x30\x53\xe5\x28\x0e\x7a\x0c\x4a\xa2\xde\x9b\x3a\x6c\x52\x85\x40\x5e\xcc\x2c\x77\xf3\xa9\xa9\xf2\xb1\xc1\xd4\x8f\x41\x2a\x9d\x4f\xba\x6b\x21\x69\x78\x00\xb5\x4c\x8f\x55\x99\xd6\x8c\x4f\xd6\x69\xfa\xf2\x5e\x59\x38\x70\xb6\xd0\x29\xec\x45\xe0\x32\x2c\x1f\xaf\xb1\x7d\xb2\x65\xff\x47\x6a\x7b\xbd\xac\x7a\x65\x9c\x34\xe2\xb4\x2c\xca\xbc\xb1\x98\x96\x2a\x38\x36\xf8\x63\xef\x82\x9a\x20\x52\xfe\xa0\x8a\x47\x6d\xb7\xa1\x11\xc0\x21\x87\xbc\xa0\x71\x13\x67\x04\xe7\x50\xd5\xb1\x5d\x92\x03\xf4\x0d\xc9\x20\x01\x29\xed\x42\x95\xa6\x57\x19\x71\xd0\xf5\x0c\x31\xb1\x31\x5f\x1e\xd4\x43\x9c\xc8\x38\xa1\x88\xea\x63\x53\xf5\x64\x51\x27\x64\xfe\xec\x49\xac\x4e\x39\xc0\x60\x3b\x51\x83\xfd\xbc\x67\xfb\x33\x3b\xea\x63\x42\x7f\xcc\xf2\x57\x65\xa9\xf0\xdf\xeb\x05\x0f\xd0\xdf\xaa\xd3\x85\x63\x42\x46\xf1\x19\xcf\xdd\xeb\xb3\x54\xd8\x92\x5c\x81\x46\x7a\xc9\x73\x13\x67\xa4\x52\x32\x3a\x07\xfd\x6e\x36\x47\xde\x24\xd3\xc1\xd7\xc1\x7d\x50\xc9\xab\x6a\xdf\x11\xb2\x0d\xe7\x7c\xbe\xa7\xc6\x8e\x59\x43\x69\x25\xd9\x26\x51\x64\x33\x98\x24\xdb\x3c\x19\x43\x8c\x4d\x2f\x2e\xd6\x44\x7e\x46\x94\x6b\x59\xe4\x7f\xc9\x0b\x4e\x6d\xf3\xae\x62\x8a\xea\xe0\x43\xf0\x89\x94\x2d\x80\xaf\xb6\xfe\x64\x68\x5a\xe5\xb7\x78\xec\x4d\x4a\x20\x79\x7d\xd0\x59\x91\x7f\xac\x54\x05\x92\xc1\xba\x54\x53\x72\x37\x1a\x27\x0e\x63\x6a\x0a\xee\xe9\x5f\x2c\x44\x1e\xc0\xe6\x08\xee\xe9\x9f\x95\x3b\xa5\x1b\x36\x45\x60\x30\xf4\x9c\xf9\xf9\xe5\x11\x27\xdf\xb4\x71\xc5\x68\x2c\x2c\x2f\x9e\xcd\x22\xf4\x39\xf8\xe1\x91\xe0\x61\xeb\x37\x38\x81\x0e\x5b\x68\x6b\xa7\x59\x0e\xcf\x49\xe2\xa4\x16\x82\x6b\x30\x49\x54\x4c\x44\x9a\x45\xe2\x92\x77\x90\xde\x3a\x93\xf4\xd1\x1a\xfb\x48\x8d\xa9\x57\xfe\x0f\xd5\x82\x6f\xf7\x22\x2b\x8d\x11\x37\x09\x8c\x4c\x4f\xac\x16\x55\xca\x23\x50\xa2\x62\x60\x01\x35\x88\x2e\x5a\x49\x62\x75\xca\xd8\x81\x01\xb7\x33\xd5\xdd\xa4\x53\x88\x7e\x1a\xcd\xac\x43\x97\xb2\x63\x7f\x39\xca\x66\xd8\xd4\xb6\x12\xef\xe0\xac\x62\xcf\x9e\xf4\x8e\xb0\x17\xb8\x08\x0b\xf2\x08\xd6\x21\xaa\xa4\xbe\x2e\xfa\xfe\x41\x7f\xbf\xdc\xde\x4c\x96\x7c\xfe\x90\x92\x1a\x7b\xf3\x90\x7f\x00\xbe\x62\xec\x7b\x0e\xb0\xfa\x9e\x78\x71\x44\x8e\x7c\x70\xca\xff\xd2\xfe\xe0\x57\xf7\xab\x5f\x5c\x76\x16\xae\x61\xb2\xb8\x2b\xd8\x8a\x14\x41\x5f\x42\x2b\x3a\x8d\xc2\x9e\x2e\xe4\xb1\x00\x77\x18\xf0\x96\xc4\x95\x68\x30\x76\x8a\x7c\x02\x1d\x50\xdf\x23\x8d\x23\x83\xf2\x15\x11\xbf\xb2\x10\x49\x0b\x75\x35\x40\x92\xc2\x64\x33\xec\x17\x18\x58\x93\x99\x10\x13\xb8\x57\xad\x70\x14\xb7\x3e\xf0\x6c\xa8\x93\xbf\x61\x0e\x3e\xb9\x78\x53\x51\x61\xc2\x6d\x07\xd6\xc2\xc4\x99\x14\x82\xb7\x28\x04\xbb\x13\x96\xc5\x1c\x3f\xd2\x9a\xbb\x5b\xce\xfc\x4b\x8f\x8c\x53\x6a\x68\xfc\x59\x65\x4c\x43\x83\x21\xaa\x66\x06\x3a\x07\xe3\x4e\xc1\x7e\xd7\xe5\x47\x36\xe6\xee\x06\x36\xde\xaa\x0d\x7f\xab\xea\xc4\x56\xd8\x2c\xf9\x6b\x0b\x8d\xb6\xa3\x73\x49\x93\xa7\xb5\x0a\xb9\xe3\x47\xe2\xb9\xbb\x21\x66\xc9\xaa\x0b\x7f\xa3\x53\x0d\x3a\x91\xe3\x57\xf0\x41\xac\x03\x8f\x8e\xac\xcf\xdd\xbd\x2e\xfa\x85\xf5\x25\xfc\xc4\xb3\x24\x3b\x9a\xb5\xb8\xdd\xd8\x11\xf0\x02\x56\x3b\x00\x35\x6c\x71\xae\x6c\x99\x24\x98\x68\x5f\x47\x08\xb8\xb2\x17\x8a\xa0\x49\x55\x90\xa1\x5e\x68\xe0\x84\xe4\x17\x16\x21\xc7\x7c\x1e\xda\x1f\x05\x92\xc9\x80\xa3\x37\xce\x8c\x81\x2d\xd4\x7c\x4d\xca\x8b\xb8\xad\x78\xd2\x09\xbd\x29\x23\x3a\x3d\x51\xdc\x6a\xd9\xa1\xc5\x7f\x7d\x60\x68\x62\xfd\xd3\x0b\x8e\xeb\xdf\x2f\x1f\x08\x3e\xe4\xb9\xa6\xf4\x78\x35\x17\x7c\x61\x2d\x4c\x53\xa1\x52\xcd\x34\x86\x3e\x45\x12\xa1\x82\xbe\xc0\x07\x0f\xd9\x27\x29\x63\x4d\x6c\xe6\x71\x29\x78\x96\x36\xb7\xfd\xfa\xaa\x1d\xbd\xdf\xb1\x9f\xcd\xb1\x91\xa4\x97\xfa\x33\x8a\x11\x7d\xa1\x71\xc4\x3e\xbd\xa0\x9c\xe4\x93\x5e\x4a\x8c\x83\xc3\x2e\xbd\xc9\x63\x87\x54\xf6\x03\x7f\x23\x58\x33\x9f\xda\x9e\xcd\x98\x3b\x60\xd4\x8c\x69\x4c\xde\x2c\xf8\x15\xe6\xf0\xb0\x32\x23\x80\xf2\x44\xbd\x54\xe9\xbe\xe2\x94\x9c\xb3\x00\xac\xbc\x92\xb7\xeb\x02\x63\xd8\xcb\x97\xbf\xfc\x6c\xe1\x9f\x0e\xee\x18\x36\x80\xcd\x2c\x4f\xa2\xcd\x38\x22\x27\x7c\x3e\x2a\x0b\x8f\xed\x10\x8b\xfa\x13\x1e\xdb\xbf\xb9\x19\x47\x85\xff\x21\x2f\xf8\x2e\xcf\xa9\x93\xfc\xad\xa0\x4e\x0e\x95\x9a\x44\xb3\x05\x1f\x85\xcf\xc6\xf8\x29\x24\x60\xf0\x8b\x20\xb3\x56\x29\x0e\x08\xf6\xac\xee\x33\x66\x9a\xef\xa5\x9a\x23\x06\x72\x25\xe5\x7d\x60\xcc\xb3\x72\x8d\x17\x71\xa7\x97\x94\x61\x2a\xb2\x5e\x91\xf4\x77\x8a\xa1\xfd\xdf\xc0\xd7\xf3\xbd\x35\xf6\xdc\x61\x0e\x8a\x59\xb4\xb8\xe4\xbf\xa9\x16\x7c\xc0\x5b\x5c\xd2\x3e\x33\xf6\x3d\xde\x02\xb4\xf7\x32\xd7\x79\x92\x04\x1f\xed\x26\xbd\x3c\x4c\xc6\x38\x7c\x4d\x4c\xb3\x8b\x1f\x8f\x7e\x13\x05\x84\xf7\x2d\x2e\xcd\xf1\xf9\x94\x5b\xf5\x4b\x01\x0a\xf5\x88\x2a\xb7\xb0\xe4\xc1\xcf\x67\xca\xb1\x07\x83\x94\x4a\xdb\x59\x5f\x19\xa8\x9d\x93\x76\x3f\x5b\x64\xb5\xb8\xeb\x2f\x04\xc7\xe2\x2e\x21\x59\x59\xcd\x8c\x82\x1b\x4f\x96\x83\xab\xce\x18\xe4\x28\x6e\xa7\x4e\xa3\xf6\x0c\xfd\x82\xc7\xbe\x76\xc8\x0c\x21\xbe\xbe\xff\x61\x2f\xf8\x06\xfc\x53\x9b\x53\x24\xb9\xcd\x25\xe5\x30\x19\x90\x34\xaa\xb3\x4e\x6a\xbe\x1f\xc8\xae\xd3\xeb\x55\xd6\x20\x4e\xfa\xeb\x83\xaf\x3d\xab\x2f\x30\x8a\xf6\xee\x53\x6c\x86\xdd\xb5\x49\x86\xd5\xf8\x47\x82\xe7\x3d\x68\x6e\xa3\x1d\xbe\xf8\xd1\x91\xa1\x01\x3f\xf3\xaf\xef\xe5\x42\xee\x5e\x87\xda\x7e\xf3\x48\xf0\x72\xfd\xc6\x71\x4f\x4c\x39\x3c\xe7\xf0\x62\x99\x9c\x0d\x61\xab\x2b\x81\x06\x76\xb1\x3c\x6b\xab\x71\x1a\xd1\x2b\x6b\x51\x2f\x79\xac\x10\xcd\x5c\x94\x67\x61\x36\x0e\x17\x6b\x61\x0e\x9e\x97\xce\x8c\xbc\xb1\xc6\xb6\x2c\xa2\x97\x04\xaf\x7e\x1a\x49\xdd\xab\x99\xd5\x45\x7f\x29\x58\xb0\xf9\x0a\x7c\x43\x91\x6d\xca\x1e\x8d\x73\xb2\x8c\xdc\x24\x9f\x6f\x36\x61\xd4\xb0\x92\x72\x26\xee\x17\x7d\x17\x57\xc4\x8c\xda\xff\x9a\x80\x2d\xcb\x1f\x5c\xcf\x01\x95\xfa\xfb\x7d\x43\xcf\xe7\x85\x30\x4e\x4b\xff\x8b\xfb\x82\x95\x0b\xa0\xbb\x8c\x48\x54\x83\xe7\x1c\x71\x0d\xd0\x3b\x1a\x34\x95\xc8\xc5\x04\x98\x85\x2b\x50\x61\xea\xdd\x8c\x2e\x7b\x0d\x6c\x56\x66\x89\xc8\x55\xfe\x12\xa8\xab\x71\xc9\x1b\x59\x17\xfd\x4b\xde\x01\xfc\xd8\x59\x9f\x2f\x8e\xb0\xf7\x79\x8c\xde\xf8\xdf\xeb\x05\x6f\xf5\xb4\x14\x83\xea\x07\x8c\x78\xd1\x41\x60\x31\x6e\x90\x6e\x16\x15\xaa\xe5\xc1\x76\xa1\x58\x83\x3f\x08\x90\xbe\x82\xd0\xc4\x60\x6e\xb2\x65\x39\xa4\x5e\x22\xc6\xf9\x12\x04\x2b\x98\x27\x30\xc3\x67\xb3\x53\x5b\xa2\xd9\x2b\x5d\x19\xf3\x24\x93\x23\xf0\x5f\x12\x4c\xba\x9d\xc3\xee\x48\x46\x07\x8f\xa7\x95\xd7\x9c\xa4\x10\xbb\x96\x1f\xf1\xd8\xe1\x32\xee\x88\xf9\x28\x12\x91\xff\xde\xab\xb0\xa7\x44\x17\x54\x35\x55\x51\x19\xdd\x61\x94\x40\x6e\xa6\x0c\xa2\x3f\x21\xed\x0d\x29\x87\x31\x89\x5a\x1e\x97\xa5\x40\x4a\xac\x47\x8e\x5f\x14\x0d\x76\x4a\xd1\x86\xbb\x83\x09\x33\x5a\xe4\x59\x9b\x59\x9e\x8b\xa2\x9b\xa5\xb6\xcd\x5a\x4f\x87\x33\xec\xcf\xdf\xe4\xe8\x1a\x5c\xed\xe1\x62\xda\x96\x04\xf5\x7c\x2f\x11\xfe\x13\x37\x05\x7f\xe2\x59\x0f\xaa\x63\xcb\x7b\x09\x78\xf4\x40\x68\x9d\x8e\xb3\x2a\x08\x93\x3c\xb4\x34\x20\x40\x3a\xb4\x21\x9d\x30\x9b\xc2\xe6\xba\x48\x23\xe5\xd8\x5c\x34\xf8\x62\xda\xcc\x3a\x28\x5e\x90\x6d\x06\x92\x66\x81\x2b\x8a\x90\xc3\x44\x7b\x14\x08\x22\x50\x23\xa8\x4f\x80\xcf\x4d\x79\x9e\xf5\xac\x5b\x46\x55\x1e\x16\x00\x9f\xaa\x03\x02\x11\xac\xa4\x94\xab\xd9\xe6\xd6\xd0\x80\xd4\xba\x37\xce\x87\x6f\x60\x5f\x62\x6c\x9f\x6c\xc8\xff\x4d\x16\xfc\x22\xbb\x2f\x2b\xb4\xbd\x0f\x21\x01\x5f\xa7\xdd\x3d\x23\x74\xf7\x34\x32\x8a\x72\x6b\x96\x9f\x0f\xc3\xa6\x9e\x39\x7e\xd7\xb1\x06\x3f\x9b\xd1\x01\x31\xce\xac\x91\xd8\x88\x43\x8c\x30\xd3\x76\xac\x40\xd6\x12\x38\xec\x3a\xe1\x73\x5b\xbc\xb9\xaa\x75\x8e\x4f\x35\xb8\xbc\xab\x75\xbc\x9b\xca\xa7\xb2\xa0\xbd\x62\x01\x42\xdb\x1d\xbc\x61\xa5\xe5\xa1\x91\x07\x88\x11\x6c\xd6\xe2\x92\xe2\x06\x40\x13\xa1\xfc\xe7\x31\x15\x2c\x55\xd3\x60\xd3\x78\x02\x57\xe6\x56\x78\x24\xd0\x2d\x2c\x57\xda\x35\xb9\x33\xc9\x0f\x9d\xdc\x66\x50\x11\x5b\xed\x21\x3b\xcc\xad\x4e\x6a\x05\x29\x3a\x28\x50\x4b\xe8\x18\xac\xb9\xa3\xb9\xbb\x26\x61\x47\xac\x95\x65\x57\x12\x0c\x59\xc5\xdc\xec\xec\x8c\x7e\x58\x34\xd8\x3d\x19\xae\x7c\x81\x9e\x9e\xae\x5f\x9b\x36\x22\x0f\xdd\x7c\xa4\xa0\x33\x11\xca\xea\x1e\x5c\x15\xad\x8c\x72\x64\x0d\x6c\x23\x95\x68\x6c\x8d\x36\x8c\x13\xb7\x67\x7d\x81\x7b\x96\x22\x41\x48\x79\xb8\x1a\x12\x74\x85\xab\x42\x1c\x68\x83\xe1\x76\xa4\x50\x96\xa0\x9b\x8b\x66\x5c\x88\xc0\x44\xbc\x85\xce\x9e\x94\x9b\x5f\xa9\x8b\x30\xb3\x13\xb2\xa2\x51\x56\x0e\xee\x57\x3e\x6a\x92\xdd\xac\x86\x79\xa3\x99\x75\x82\x31\x80\x82\xde\x8c\x93\xa8\x19\xe6\x51\x30\xbe\x5d\x43\x98\xfd\xc7\x38\xc9\x13\x6c\x8b\xfa\x10\xa3\x10\x54\x03\x77\x34\x64\x13\x50\x3d\xee\x1e\x5d\x4c\xdb\x54\xf8\x91\x3b\x8e\x20\x63\x1f\x76\xbb\x22\xcc\xe5\xf9\x41\xb5\x85\x09\x0d\x96\xf4\xe1\xe4\xd9\x65\xaa\x1b\x1d\x4f\x50\x7f\x8b\xfb\x59\x75\x02\xde\x63\x1e\x4f\xd2\xda\x93\x17\x63\xa5\x77\xa6\x09\xec\x27\xcc\xf4\x4b\x5e\xc2\x83\x3b\x82\x31\x54\xea\x09\x1b\x61\x66\xd8\x1e\x81\x4f\x9c\x5c\x27\xe6\x84\x6f\x86\x7d\x3c\xa3\x98\x3f\x8f\x30\x4e\xe4\xf2\x8d\x13\x81\xc4\x1c\x5b\x6a\x10\xe8\xb3\x23\xe9\x2f\x59\x0d\x60\xaf\xc3\x42\xad\x89\x90\x50\x52\xd0\x0a\x50\x66\x50\x65\x83\x4f\x3b\xd5\x9b\x11\x12\xb1\xbc\xe2\x66\xca\xcc\xb4\x04\x3b\xb4\xd7\x6a\xc5\x5b\x92\x63\xeb\x64\x1b\x46\xb9\x24\x57\x04\xe6\x72\x4c\x91\x0b\x3d\xc5\xb2\x05\xe7\x3a\x3a\xc5\xf6\xc9\x96\xfc\x97\xb0\x17\x6f\xab\xce\x73\xef\xa8\xfb\x2e\x5c\x58\xaa\x1e\x08\xf6\x13\x07\xd8\x6d\xbb\xdd\x6a\x17\x1e\x58\xf6\xbf\xf3\x40\x70\xc6\xfc\xb4\xac\x60\x70\x34\xf2\x30\x2d\x80\xe6\x24\x61\x5f\xe4\x06\x74\xbb\x7a\x8f\x18\x7a\x54\xc9\x24\xb2\x9f\x7d\xa6\xc6\xf6\xcb\x59\x2b\xfc\x8f\xd7\x82\xbf\xf4\xe4\x1a\x10\x2a\x22\xb7\x33\x8d\x17\x03\x19\x35\x65\x77\x9a\x22\x2f\x51\x95\x4b\x18\x37\xa4\xf2\x51\xbe\xeb\x50\x83\x31\xa3\x69\xc5\xd8\x04\x05\x7b\x53\x55\x65\x52\x2c\x03\x3b\xeb\xc6\x48\x3b\x0b\x01\x2b\x6b\x27\x04\x07\x6d\x6c\x16\x46\xab\x61\x12\xa6\x4d\x91\xdb\x86\xd4\x56\x2f\x69\xc5\x49\x82\x2b\x1c\x17\x6a\xf0\xe3\x72\xaf\x24\xa2\x55\x56\x84\xd1\xed\xc4\xeb\x3d\x7a\xc0\xff\x61\xcd\x61\xd4\x7f\xab\x16\xfc\xbb\xda\xb2\xfe\x3d\xcc\x52\x4d\xcc\xbb\x0a\x78\x57\x24\x0e\x27\xd5\x32\xcc\xc0\xda\xce\xce\xce\x34\x50\x55\x26\xab\x82\xee\x6b\x27\x4a\x6d\x04\x95\x1f\x4a\xfa\x8c\x3e\x2f\x44\x94\x97\xcf\x2e\xea\x1c\x15\x3c\x4c\x20\x97\x1a\x11\x7b\xf5\xca\x68\xff\x44\x8a\x73\x08\x11\xe4\x16\xba\x53\x70\x1f\xdc\xe6\x74\xa8\x90\x48\xa8\xa4\x55\xee\xc5\x3c\x5e\xa9\xb9\x30\x29\xfb\x35\x11\x27\xb8\x64\xad\xb9\xd4\xd4\xc7\x1c\x5a\xfd\x11\x8d\xc7\x39\x7e\x7f\xe2\xb1\xb3\x57\x9c\x6d\x68\x68\x06\x14\xff\x71\x2f\x78\xe5\xd0\x37\x2a\xfd\x81\x0a\x47\x07\xed\xb1\x95\xcd\x49\x3b\xfc\x80\x26\x42\xb6\x14\x01\x6f\x65\x55\xe1\x1e\xb7\xc9\x1d\xe1\x5c\x7d\x36\xd2\xcb\x13\xf7\xd9\x1f\xdc\xc0\x82\xe1\xf0\x18\x98\xbc\x72\x19\x7c\x4c\xfc\xcf\xde\x10\x7c\x53\xe5\x99\x11\x0a\x23\x51\x86\x71\x52\xe8\x43\xa3\x80\x3a\x2a\xfe\x29\x26\x21\xa6\x51\x11\x48\xc9\xb6\x7f\xc9\xbb\x31\x17\x10\xfd\x0d\x3e\x64\x97\xbc\xfd\x71\x27\x6c\x8b\x4b\xde\x41\xf8\xb7\x12\xcb\xfc\x71\xc6\x72\x52\x22\xbc\x36\x78\x04\x21\x0a\xb4\xea\xe9\xe4\xd9\xe5\xd7\x3c\x30\x7f\xcf\xa9\x07\x48\x27\x63\xb2\x8b\xc6\x64\xf0\xdc\x3e\x40\x6e\xd0\xe9\xc9\x9e\xa8\x65\x76\x83\xae\x6c\xf1\xa4\x7f\x32\xb8\x53\xcf\xc7\x91\x82\x9b\x2c\xea\xa4\x27\x3e\x12\x65\xcd\x75\x91\xcf\x4d\x4c\xdc\xad\x3f\x7b\x4d\x1c\xbd\xf4\x88\x53\xe9\xeb\x19\x0e\xd5\x7f\x5d\x10\x49\xea\x06\x3f\xb4\xd3\x06\x75\x1c\xe0\xc8\x53\x00\x64\xb8\xcc\xa0\x75\x93\x16\x76\x02\x6a\x76\x9c\xa9\x8e\x33\x35\xbd\x7e\x23\xf8\xc7\x8b\xf8\xa7\xed\x33\xa2\xc6\x06\xa5\x9c\x6e\xff\x58\x8d\x1d\x84\xf5\x12\x91\xff\xfe\x5a\xf0\xbd\xb5\x65\x6d\x8c\xb4\x01\x3d\xcc\x18\xd6\x20\x13\x40\x01\x14\xb9\x2c\x30\xd2\xbf\xd7\xe5\xdd\x3c\x5b\x05\x56\x33\x2e\xe3\x30\x01\x57\xd8\xb0\x50\x29\xc4\xd0\xd2\x85\xf0\x5e\x14\xf0\x46\xdf\x2d\xc9\xcf\x2a\xa9\x16\x8b\x5e\xb3\x29\x8a\xa2\xd5\x4b\x24\x47\x52\x08\x5b\x7f\x62\x10\x64\x9c\x49\x15\x34\x84\x71\x72\x40\x20\x67\x18\x9e\x64\x85\x28\xc8\x23\xa0\x14\x9d\x6e\x96\x87\x79\x9c\xf4\x1b\x7c\xb1\x50\x06\x21\xcc\xbf\x2b\xab\x4d\xb3\x81\x5e\x29\xb0\x6d\x47\xd5\x92\xb0\xfd\x50\xa3\xdf\x64\x13\xbb\xfa\xa5\x3a\x27\x4d\x04\x93\x27\xe9\x80\x19\x13\xa7\xbd\x3e\xea\xb8\x35\xa5\xb0\x8b\x69\x1d\xfb\xec\x70\x12\x16\x25\x82\x4f\x25\x97\xdf\xe2\xf1\x9d\x5b\x04\xac\x1d\x9b\xe2\x5a\x4d\x3f\xc8\xf0\x50\xfb\x67\x82\x13\x97\xb9\x2d\xe4\x77\x71\x2a\x05\x01\xdc\x18\xce\x04\x7e\xba\xc6\x1c\x2a\xe1\xff\x68\x4d\x99\x18\xde\x55\xbb\xe0\x22\x19\xc5\x1d\xe2\x5f\xdc\xa6\x56\x05\x38\xf8\xeb\x65\x37\x51\x91\x8e\x94\x61\x2a\x8a\x44\x18\x39\xb9\x95\x35\x58\xa7\x24\x14\x7d\x51\xaa\x2a\x3b\x19\x44\x85\x90\xf8\xaa\xb0\xec\xc0\x64\x9b\x34\x29\xc6\x1c\xa4\xd7\x4a\x8d\x0d\x7e\x0f\x4c\x6f\x56\x38\x39\x9c\x01\xe8\xbd\xa7\x61\xad\xdb\x61\xbe\x2a\xe9\x42\x53\xf2\x1e\x4d\x2b\x2d\x1f\x5e\x72\xc0\x77\xb7\x85\x14\x7e\x30\x97\x70\xc9\x8f\x42\xc2\x8d\x05\xd7\x19\xe5\x2b\x37\xb1\xc6\x90\xa5\x5f\x5c\x5e\x58\x5e\x1c\x88\x4c\x47\xf5\xe8\x8f\xdf\x14\xfc\xa8\xb7\x43\x81\x8a\xc6\x14\x4a\x02\x5c\x44\x83\xfe\xbe\x5c\xbb\x94\xfb\xd5\x95\xd8\xa3\x2e\x79\x37\xa2\x29\x64\x29\xcb\xcb\x30\xb9\xe4\x8d\xc4\xaf\x4b\x2f\x79\x23\x49\xaf\xe2\xbd\x75\x03\x7b\x90\xdd\xd2\x5c\x0b\xbb\xf3\xbd\x72\xed\xa4\x72\x30\xf1\xe7\x83\x19\xb5\x59\x55\xf3\x31\x74\x4a\x17\xe1\x0b\xf7\xcd\x2f\xf1\xb0\x27\x25\x87\x92\xbc\x0c\x9c\xad\xfa\xab\x1e\xbb\x09\x0e\x5b\x58\x66\x39\x70\x6c\x3f\xeb\x05\x3f\xee\x61\x66\x38\xaa\x6d\x51\xbd\xc7\x90\x58\xc9\x35\x39\x9f\x54\x70\x5b\x24\xaf\x14\x17\xcd\x22\x86\x4c\x5f\xad\xb0\x29\x2a\x36\x1c\xf4\x0e\xc7\xba\x63\x5d\xe6\x6e\x32\x57\x75\x61\x2e\x5e\x3a\x77\x37\x99\x9a\xd0\x90\xac\x04\x36\x8d\x7d\x64\x62\xbf\x52\xda\x67\xee\x65\x75\x18\xd9\xca\xf3\xa2\xe5\x77\xf6\x10\xaa\xb3\xac\x4a\x53\xa8\xce\x71\x98\x38\x7c\x0a\x8d\x61\x77\xa9\x8f\xe0\x52\xa8\x67\xc5\x9d\x5e\xf6\x7e\x8f\x39\xcb\xea\xbf\xd3\x0b\xde\xe2\xe1\xf7\x17\xf0\x7b\x7c\x81\x52\x02\xfe\xed\xc2\x48\x2e\x2e\x01\x9d\xef\xbe\x26\x8c\xa2\x7c\x0e\x17\xd6\xf2\x3a\x8b\x0b\x82\x20\x02\x77\x31\x05\x4f\x3b\x5a\xf6\xbb\x94\x0f\xfd\xc2\xc2\x12\xa9\x69\xee\x3a\x86\x58\x4e\x33\xd3\xc7\x26\xc7\x9c\x29\x3a\xcf\x9e\xa9\x76\xd4\xb2\x00\x70\x29\xff\x44\x30\x35\x7c\x3f\x51\x81\x5d\x77\xd3\x4f\xd4\xb4\x05\xee\x89\x5a\xf0\xee\x5a\xd5\x02\x47\x37\xb6\x0d\x39\xd2\xcf\x7a\x1a\xb4\x91\xac\x73\x17\xe2\xee\x1c\x3f\xa5\x20\x76\x43\x03\xd1\x64\x57\x65\xe3\xfb\xee\x6a\xab\x53\x79\x83\xaf\x91\xc5\xee\x72\x79\x1b\xf2\x68\x99\x20\x5a\x71\x1b\x9c\x0e\xd7\x72\x24\x4f\xbe\x3f\x16\xbc\x80\x36\x08\x4e\xbb\x09\x49\x1f\x88\x45\x4f\xd9\x33\xdc\x33\xe6\x3f\x1c\x9c\x53\xc7\x55\x1d\x29\x38\x9c\x30\x83\xbd\x02\x21\x82\x69\x17\x2b\x51\xd9\x15\x31\x8f\xd0\x4e\x3a\xc2\x47\xcb\x66\xd7\xdd\x2d\xb3\x68\x2a\xaf\xab\x7b\xec\xb9\xce\x7e\x7e\x40\xdb\xc8\x5d\x32\xfe\x31\x8f\x1d\xc4\xf3\x5c\xf8\x1f\xf4\x82\x6f\x1f\x76\x0a\xf8\x03\x71\x51\x7e\x35\x8e\xc2\xb6\x76\xe4\x75\xcb\xc8\xf5\x9a\xe0\x65\x57\x67\xc4\x1a\x74\x07\x70\x79\x85\x4f\xd4\xd8\x91\x21\xf4\xe8\x81\xac\x19\x26\x95\xf8\x41\xff\x3b\x6a\x41\x31\xec\x85\x11\x6f\x44\x9a\xf5\xda\x6b\x8e\x65\xb8\xcc\xb8\xe4\x14\xe5\x39\x43\x73\x2e\x69\x90\x34\xaa\x98\x4a\x96\x03\xfc\x29\x4a\xe4\x21\xd1\x5d\x70\x86\x73\xc5\xb6\xb7\x7a\xff\x50\x00\xab\x7e\xec\x90\x03\x36\xba\x3d\xba\x8b\xdc\x60\xfe\x9b\x0e\x05\xc7\x06\x9e\xba\x20\x2f\x3a\x2d\xbb\x0a\x3d\x01\x84\x81\x42\xca\x78\xa5\xe8\xb8\xce\x76\xff\xd7\x81\xeb\xe1\x7c\x57\x1b\xce\xd7\x52\x20\x79\x8f\x04\x5f\xf3\xc0\xf0\xb9\xb7\x4f\xe9\x9d\xec\x28\x9b\xb9\x3c\xd4\x06\x8c\xcb\xbf\x1e\xc6\x74\xe5\x61\x4c\xab\x56\x14\xd3\x83\xdb\x6b\x79\x77\x30\xb2\xca\x95\x85\x10\xa6\xaf\xd1\x11\x4c\xa8\x09\x35\x70\x0c\xed\xdd\xa3\x2d\x4e\xfa\xf7\x6c\x13\x6d\xb1\x37\x70\x06\xd9\x0b\xc6\xfe\xc7\x01\xf6\x8a\xab\x4b\xed\xe0\xa6\x4c\x5e\xee\xad\xea\x9d\xed\xbf\xfb\x40\xf0\xca\xed\x5f\x3b\xf4\x80\xb4\x4d\xe0\xc3\xdf\x0c\x13\x90\xa0\x4c\x49\xc9\x6e\xba\x15\x55\x34\xd5\x1f\xdd\xcf\x3e\x35\x22\x97\x08\xf4\x5d\x1f\x1b\x61\xeb\xd7\x32\x7b\xc5\xb6\x63\x40\x5d\x5a\xf0\x73\x35\xea\xbe\x1d\xad\x31\x90\x10\x5e\x51\x2d\x68\x93\x87\x7c\x65\x02\xbf\x5a\xb1\xc7\x4a\x30\x4d\x14\xbd\x00\x96\x15\xeb\x50\x0c\xad\xb6\x9b\xc7\x9d\x30\xef\x5b\xe8\x52\x90\x68\x59\x07\xef\xd3\x67\x2b\xba\xb5\x32\x4c\x5f\x1f\xaa\x2b\x4a\x91\xd6\xe9\xdd\x1b\x1a\xda\xe1\x21\xad\x85\x69\xbf\x04\x93\xb4\xc5\x87\xec\xde\x03\xf6\xa7\x1e\xdb\x0f\x6b\xef\x7f\xd9\x63\xaf\x7d\x7a\xd6\x4f\x36\x17\x6c\xe0\x8e\xbb\xdc\xc5\x93\x1f\xb9\x53\x01\x8c\x65\x2e\xca\x5e\x9e\x02\x6f\xb9\x12\xf6\xca\x4c\x96\x8b\xd3\xf6\xc4\xc6\xd4\x0a\x24\xc6\x34\x03\xfe\xb5\x7d\x6c\xd6\xa2\xd7\x61\x44\x90\xb6\xb9\x68\xc7\xe0\x98\x8d\x20\xe1\x0d\xf2\x5f\x32\x5c\xcf\x7b\xf6\x05\x2f\xae\x3e\x24\x9f\xfa\xd0\xf0\x31\x72\x29\xa8\x54\x23\x11\xed\xb0\xa9\xe8\xc1\x25\xef\xb0\x66\x65\x86\x45\x4c\x7c\x6a\x84\x7d\x23\xf1\x33\x77\x05\xdf\xb0\x22\xff\x58\x19\x6e\xa6\x20\x64\x37\xe5\xb6\x62\x93\xc9\x73\xcc\x34\xe2\xdf\x13\x1c\x5d\xd1\xbf\x9c\xba\xe0\xc9\x9e\x2a\x6c\xb1\x7d\xdd\xb0\x5c\xf3\x5f\x1d\xbc\x6c\x45\xfe\xb1\x42\x7e\x73\xda\xd8\x71\xf1\xfc\x03\x88\x46\x8a\xc6\x5c\x25\x1f\x2b\x94\xd4\x30\xed\x6b\x2b\x21\x6c\x70\xc0\xca\xc5\x06\x2b\x3e\x93\x18\xf5\xf3\x09\x0d\x22\xfe\x83\xde\x20\x02\xac\x1d\xcb\x63\xd0\xec\x42\x4c\xe2\x0d\x86\x51\xb1\xba\x96\x65\xeb\x9a\xdd\x95\x8d\x2a\x53\xfe\xf0\xb0\xe3\x06\x5f\x91\xb5\xae\xd8\x11\x5b\x94\x38\x1c\xc3\x76\x50\xb5\x35\x3a\x55\x3f\x76\xf4\xe8\xcc\x51\x2b\xc8\x6b\xcc\x15\x32\x7e\x8a\xb1\xbb\xed\x8d\x65\x76\x61\x63\x63\x5a\x1e\x89\xe9\xc6\x7d\x59\x1e\xbf\x5e\xf2\xc8\xc9\x52\x16\xcd\x53\x01\x6d\x30\xf8\x1f\x87\x83\x57\xed\x58\xa2\x62\x69\x1c\xb4\x19\x84\x7c\x4d\x7f\x0f\x8a\xfb\x50\xd7\xd0\xb8\xe4\x3d\x93\x3e\x20\x48\x91\xe2\x92\xf7\xcc\x48\x14\x72\xc5\xcd\x13\xa6\xd5\x92\x2e\x9f\xf9\xbb\x87\xd8\x87\x3d\xf6\x0c\xaa\xe1\x8c\x28\xf3\xb8\x59\xf8\x8f\x7b\xc1\x19\xf7\x91\xda\x65\xa0\xee\x94\xd2\x0b\x29\x85\x55\x92\x1f\x2a\xa5\x6c\x55\x98\x6c\xc7\x74\xd2\xe6\xb3\x5e\xc2\x5e\xcc\x8e\x6f\xcb\x67\x0d\x9b\x5e\xec\x03\xce\x15\xbb\xe4\xb1\xea\x80\xfd\x6f\xd1\x9b\xeb\x75\x95\x57\xa0\x78\xa4\xf9\x34\xca\xcc\x5c\xbd\x85\xb8\xbf\xa8\x20\x5d\xda\xb0\xae\x83\x83\x0e\x8c\xba\x10\x22\x55\x9a\x00\x67\x68\xd6\x5e\x79\x3f\x00\x04\x38\x73\xef\xff\x73\xdd\xb9\x37\x54\x5e\x99\x88\x7a\x78\x7c\xb5\x1d\xb4\x14\xac\xbb\x74\xf3\x4f\x3d\x76\x13\x28\xc5\xe5\x3b\x00\x10\xfa\x9d\xab\xf0\x70\x7b\x9f\xe7\xd4\xe5\x6c\x15\xf0\x71\x43\xbb\xe3\xd0\x03\x80\xfc\x49\x54\x51\x36\xcb\x21\x8f\x5b\x9b\xc9\x1e\x89\x3c\xf9\x64\x7d\x86\xbc\x1c\x59\xab\x14\x55\x65\x35\x4c\x99\x5c\x79\xb8\x46\xa3\x06\xdb\x62\xbe\x02\x2c\xba\x17\x70\xad\xc0\x22\xb8\x4a\xc9\xc1\x82\x33\x83\x2f\xd5\x28\x20\x94\x37\x17\x4d\xb9\x81\xda\xe6\xad\xce\x40\x30\x74\xb3\x5b\x53\xfd\xff\x78\xcc\x3a\x7b\xfe\x7f\xf3\x82\xb7\x79\xe6\xb7\x6a\x85\xa2\x7b\xac\x17\x3a\x4f\x94\xc9\x5d\xe5\x4c\x01\xdd\xb2\x92\xbf\x00\x6d\xc7\x38\x29\x0d\xd5\xad\xab\x14\x6d\x59\x8e\x2e\x97\x4a\xaf\xae\xea\x07\x0f\x26\x40\xb9\x35\x27\xf3\x65\xec\x1c\x3b\x73\x59\x27\x73\x9b\x55\x5d\x50\xed\xb0\xef\x39\xbc\xc7\x8c\x1d\x20\x44\xff\xd7\x43\xc1\xd7\xe9\x5f\x6e\x50\x19\x3c\x56\xe8\xa5\x8d\xa1\x32\xf3\xa7\x0e\x5e\x97\x99\xaf\x56\x66\x7e\x9d\x92\x99\xd7\x82\x00\x32\xe8\xb9\xab\xa0\xa6\x8a\x96\x61\xef\xe2\xf3\xf6\x0b\x7f\x5d\x7c\xbe\x0a\xf1\xf9\x17\x6d\x14\x90\x4f\x79\x57\x27\x40\xff\xd3\x6d\x04\xe8\xa7\x0b\xfe\x63\xef\x82\x3a\x89\xe6\xce\xa6\xaa\x48\xe7\x9a\x8c\xb8\xb0\x89\xb0\xf1\x18\xfb\x56\xcf\xb6\xff\xed\x05\x21\xcf\x7f\xd0\x3f\xac\xd3\xe4\x07\x2f\xd5\xcf\xe5\x5e\xa5\xfa\xe5\x01\x81\x47\x60\xa2\xea\xc8\x12\x88\xbb\xc6\x11\xc3\x1a\x3d\xf9\xaa\xa6\xa4\xbf\xf0\x6c\x0a\xb9\xcb\x5d\xeb\xff\x1b\xcf\xee\xc6\x87\x3c\x93\x20\x66\x33\x0f\xbb\x5d\x91\xf3\x30\xcf\x7a\x69\x84\xe0\x89\xd8\x1b\x60\xe0\xc9\x98\x51\xa0\x37\x78\xb3\xe4\x9d\x30\x2f\xd6\xc2\x84\x5c\xc2\x31\x0d\x47\x1a\x41\xd4\x68\x83\xf3\x97\x63\x6d\x2a\xe5\x20\x46\xc0\x23\x6a\x9c\x64\xfd\x55\x3e\x30\x48\xa8\xdb\xa7\x38\x68\x0b\x55\x01\xae\xfd\x6e\xd8\x5c\x0f\x01\x5c\xa1\x55\x45\x20\xf9\xe8\xcd\xec\x25\x36\x75\x30\xae\x6d\x96\x9c\x69\x1e\x2e\xc7\xed\x34\x4e\xdb\xe4\x5e\x09\x41\xd0\xff\xfd\x99\xc1\xa7\x3c\x4c\x16\x64\x29\xa3\x87\x20\x03\xdb\xfe\x8c\x92\x25\x40\x03\x5f\x83\x83\x56\x1d\x55\xec\xf8\x52\x8e\xfe\x22\x22\x51\x52\x8c\xa5\x01\x83\xd7\x69\x51\x20\x97\x3b\x4a\xe2\x56\x8a\xe2\x48\xe4\x31\x31\x03\x56\xe6\x7c\x48\x81\xa1\xe9\x46\x27\x8b\xd0\xd6\xb2\xda\x97\x9c\x4d\x2e\x2f\xb1\x83\xd4\x31\xe7\x1a\xfb\xbe\x67\xb0\xf7\xef\x67\x07\x7a\xd0\x15\xff\xf1\xfd\xc1\xaf\xed\x23\x27\x68\xea\x9d\x09\x77\xd7\x61\xc1\x50\x18\xf3\x6d\x6c\x95\x26\x09\x87\x92\xda\x50\xec\x69\x65\xb9\xa4\x88\xc2\x9c\xe2\x32\xcb\x92\xa2\x11\x8b\xb2\xd5\xc8\xf2\xf6\xc4\x5a\xd9\x49\x26\xf2\x56\xf3\xe8\xf4\x5d\x93\xb7\x15\x68\xf6\xac\xcf\x36\xa6\x1b\x53\x8d\x19\x4c\xc6\x76\xf9\x1f\x4e\x4d\x33\x0c\xed\x20\x6f\xc5\x30\x17\x73\x8c\x07\x05\xae\x68\x30\xce\x78\x10\xc5\xed\x58\x0a\x36\xf2\x19\x80\x4a\xc0\x53\x95\x3c\xa4\x09\x39\xc5\x3a\x22\x2d\xe1\xb1\x1c\x96\x48\x9b\x71\x77\x4d\xe4\xce\xc3\xb0\x9d\x0b\xa1\x9f\x40\x6c\xf4\x40\x39\xb9\xcf\xa0\x19\xfc\x95\x27\xe6\x87\x2a\x0b\xa6\x79\xac\x42\x54\x9f\x84\x29\xfe\x4b\x39\xf5\xc3\x5e\xb9\x86\x15\xe1\xc5\x61\x7e\x67\x91\xe0\xf6\x08\x45\x27\x8c\x13\xc0\x7a\xc0\xb9\xc1\x5a\x26\x3a\xf2\x0c\xcb\x3f\xe3\x6e\x21\x9a\x1c\x22\x1e\xc0\xa6\x68\x3d\x2c\x7b\x69\x2a\x12\xeb\x81\xdc\x3b\xf0\xb3\x54\xc0\x5c\xaa\x95\xac\x59\x74\x9d\x66\x91\x04\x65\xad\x92\x17\xed\x26\x3c\x49\x45\x59\x34\xc3\xae\x80\x07\x57\xed\x8b\x19\xb1\x43\xb2\x37\xa0\xdb\x78\x45\x70\xff\xe2\x50\x04\x02\xda\xe2\x92\xce\xc8\xc2\xb0\x03\xf1\x2f\xc8\xbd\x67\x0c\xf4\x2d\x80\x92\x04\x5f\x1b\x87\x56\x7c\x8b\xc7\xf6\x8b\xad\x32\x0f\xfd\xc7\x82\x97\x9d\x92\x7f\x6c\x83\x75\x70\x45\x2d\x3d\xe1\xbd\x80\xed\x90\xdc\xd8\x0e\x5a\xfe\x01\x8f\x1d\x80\x9b\xa7\xf0\xdf\xe9\x05\x2f\x83\x14\x3e\xd7\xb2\x2b\x57\xbd\x1e\x4d\xa6\x08\x8a\xff\x0a\x7f\xdf\x6a\xbf\x14\xc1\x8b\xee\x09\x0b\x71\x6c\xb6\x2e\x52\xb9\x29\x23\xbe\x74\xff\xc2\xf2\x6d\x53\x93\x7c\x61\xf9\x3c\xe0\x07\x5c\x7e\x23\xef\xd8\xc7\x18\x44\x50\xa2\x3b\xc7\x9b\xf7\x05\x7f\x3d\x42\x24\x54\x72\xbf\xf0\x46\xbb\x4f\x50\x77\x54\x20\x53\x68\x85\xc5\x60\xc2\x20\xe3\x28\x38\xc7\x57\x8a\x66\xd6\x15\x75\xe5\x44\x2b\x79\x0b\xd0\x96\x81\x47\x88\x01\xe9\x57\x44\x8d\x8c\xb1\x22\x9a\x63\xe4\x32\x4f\x38\x9f\xca\x79\x8d\x8e\xa5\x75\xb9\x8c\x53\xae\x1e\x15\x03\x0a\x44\x2d\x18\x04\xa8\x32\xca\xd0\x3a\x56\x52\xa7\x3a\x83\x06\x23\xff\xf9\x4a\x5b\xa0\xb4\x4a\xdb\x57\xd2\x58\x22\xca\x3a\x7d\x2e\xab\x9f\x69\xf0\x73\xf2\x7e\xd9\x04\xa7\x7f\xb7\x8a\xea\xe7\xa8\x88\xac\xf7\xd2\xf5\x34\xdb\x4c\x83\x06\x3b\xa9\xd2\x52\xaa\x48\xe8\xbc\x57\xa0\x7f\x09\x2e\x4c\xc1\xd7\xe4\xbd\x9e\x16\x5c\xee\xd4\xac\xc5\x57\xc3\x34\xd2\x61\x0f\x94\x13\x02\x15\x71\x1a\x06\x07\x31\x7f\x56\xe4\xad\xd3\x30\x0b\xbf\xe2\x9c\xd1\x26\x1b\xe9\xc5\x91\xff\x70\x70\xee\x22\xb8\x7f\x3e\x25\x84\xe0\x17\xf7\xb1\x71\x1b\x32\x76\x50\x35\x4d\xfe\xd1\xf7\x60\x84\x97\x7f\x69\x5f\x70\xd2\x7d\x64\x23\x5b\x25\x89\x05\x00\xdf\x1a\x92\x4e\x43\xde\xdf\xe0\xc5\xe0\x5c\xcf\x9f\x1b\x61\x7f\x07\xb1\xf9\x28\x7a\xf8\x7f\xec\xb1\xb9\x5d\xdd\x7f\x2e\xf4\xbb\x22\x1a\x66\x5a\x0f\x3e\xec\x9d\xd7\xba\x7f\x0b\xb0\xf8\xbc\x68\xa1\xf2\x1f\x79\x0d\x8b\xab\x30\xa6\x82\x74\xb8\xde\x57\x45\x10\x69\x2c\xda\x96\xf5\x4d\x61\x6b\x5e\x69\xa0\x3a\x64\x95\x7e\x1b\x08\x29\x62\x5d\x8c\x67\x0a\x5b\x64\x37\x58\x5f\xf9\x73\x41\x7d\xd9\x01\xdf\x49\x07\xed\xf5\x4d\x11\x0d\x55\x0d\x3f\xa6\xab\x02\xbc\xae\xf4\x6a\x51\xa1\x2a\x3d\x51\xe1\x61\xdb\xf4\x84\xbd\x9f\x39\x2c\xe8\x30\x2c\xa2\xa5\x3c\xce\xf2\xb8\xec\x3f\x20\xd9\x77\xcc\x09\x46\x49\x2e\x41\x5b\xf1\xa5\xc3\xc1\xb9\x9d\x8b\xb8\xc2\xf3\xf6\x65\x77\xd6\x6b\xfc\xf9\x41\xf6\x36\x4f\xc9\xe5\x6f\xf2\x82\xd1\x15\xf8\x73\xc5\xad\x9d\x8e\x57\xbd\x8b\xad\xc4\xc2\xbd\x50\xee\x67\x8b\xec\xde\x6d\xf7\xe9\xe5\x8d\xfe\xba\xc8\x7e\x15\x22\xfb\x97\x6c\x91\xfd\x0b\x57\x29\xb2\x7f\xaf\xb7\xa2\xea\xd2\x86\xa0\xe2\x1f\x00\x90\xe7\x75\x3d\xdc\xd5\xe9\xe1\x9e\xf4\xde\xe2\xed\xae\x0b\x59\xf5\x57\xb4\x2e\xc4\x3e\xc1\xc6\x94\x5b\x51\x8a\xec\x4c\xad\x86\x6a\x4a\x2e\x1d\x66\x5f\x3f\x1c\x3c\x8b\x90\x0f\x16\x92\xb0\x28\x80\x1e\xfe\xd9\xa1\x60\xb6\xfa\x10\xe9\x82\xf1\xd6\x06\x25\x22\x41\x26\x34\x65\x21\xb1\x0d\xd1\xfb\xd7\xd7\x95\xb9\x57\xad\xcc\xcd\xd4\xa5\xd1\x0a\x6e\xd3\xca\xdc\xd2\x4a\xb0\x65\xaf\x96\xeb\x0c\x35\xcb\xa6\x77\xf0\x6a\xde\x66\x17\x5c\xbf\x17\xae\xe2\x5e\xf8\x05\xfb\x5e\xf8\x99\xab\xbc\x17\xfe\xc9\x70\x55\xee\xd3\xa7\xc9\x8d\x76\xa7\x5e\xf3\xfe\x09\x4d\xbd\xd4\x86\xaa\xd0\xab\x2a\x35\xa9\xa6\xc0\xf9\xa3\xe7\x3b\x09\xf8\x2c\x38\xa3\x65\x8a\xce\x5d\x40\x95\x98\xff\x99\xe7\x07\x7f\xe5\x0d\x3e\x27\x3f\x8f\x6e\x16\x51\x9e\x41\x13\xd5\x5b\x52\x7e\x7f\xd2\xe3\x65\x9d\x0e\x06\xf7\x50\x10\x0d\xf9\xe3\xca\x8d\x99\x75\x84\xad\x0c\x84\x94\x93\x56\x42\x59\x13\xeb\x57\xb8\x8d\x37\x38\x45\x9a\x92\x6a\x0c\x0d\x80\xc3\xcb\x02\x5a\x34\x68\x90\x45\x84\x09\x68\x36\x74\x80\xa8\xf9\x7c\x70\x80\xae\xec\xf2\x53\x5f\xcb\x7e\x6c\x84\xdd\x4a\xe9\x87\x17\xc0\x3a\x8a\x98\xad\xfe\xbb\x46\x82\x6f\x1f\x19\xf2\x42\x93\xd9\x55\xb1\x16\x6e\xc4\x19\xd8\x57\xc1\xae\x0a\x3e\x49\x3a\x1a\x05\x84\x25\x9d\x47\xba\xe2\x89\x4f\x88\x08\xab\x42\x7e\xa3\xf2\xf8\x91\x43\xf0\x52\x16\x39\xa0\xb9\x20\xde\x3b\x60\x13\xda\x9f\x1f\x80\x35\x1d\x75\xb6\xca\xa4\xac\xa2\x96\x54\x77\x46\xdd\xee\x14\x63\xa0\x81\x80\xaa\x29\x66\x49\x83\xd5\xa4\x5c\x74\xd7\x44\x47\xe4\x61\xe2\x36\x54\xf4\x9a\x6b\x3c\x2c\xe6\x28\x80\x78\x1c\xe2\x75\xe3\x36\x00\xeb\xc9\xea\x41\x23\x11\xc5\xb9\xc2\xae\x31\x0a\x4e\x1e\x9c\x4b\xcf\x67\x59\x79\x26\x2e\x20\x14\x9b\x00\x40\xe7\x21\x60\x2e\x18\x86\x6f\xac\xde\xa9\x90\x5c\x47\x50\xfa\xa1\x1a\x63\x79\x2f\x9d\xc7\xa1\xfa\xdf\x53\x53\x66\xeb\xff\x09\x38\xb8\xf7\x2e\x9e\x94\x93\x94\xf7\x52\xa2\x83\x65\xde\x47\x4a\x58\x0d\x60\xe4\xdd\x3c\x6b\x42\x9e\xba\x8b\x85\x80\x50\x4a\xd0\xd3\x2b\xaf\x76\x8c\x42\x10\x65\x83\x9f\x81\xfc\xa9\x85\x82\xb1\x92\xbb\xb8\xba\xaf\x38\x47\x50\x65\xf9\x0e\x70\xd4\xaa\x27\x4b\x0e\x79\x70\x3f\x52\x42\x65\xb8\x08\x4c\x4c\xcf\x60\xfd\xb0\xdf\x0b\x7b\xc3\xa3\x4a\xc9\x00\x30\x55\x0d\xed\x3f\x35\xc2\x6e\x84\x59\x3a\x9b\xc1\xe4\xfb\x3f\x3c\x12\x5c\x1a\x59\x1c\x44\x7c\x36\xd3\x01\x32\xae\x9c\x37\x44\x55\xce\xd2\x7a\x9e\x65\x25\x69\x27\x16\x41\x7b\x42\x71\xd6\xf7\x93\x9a\x07\x76\x10\x68\xd3\x95\xa3\x3b\x86\xa9\x86\xa5\x9e\x4d\xc0\x8d\x36\xb1\x25\xb1\x85\xb7\x44\x4d\x5d\x5c\x3c\xc9\x27\xf9\xa8\x6c\x6b\x0c\xa6\xa9\x15\xc6\x10\x5d\x8e\x59\x9f\x2b\xd1\x99\x2d\x55\x05\x74\x09\x16\x88\x67\xb9\x8a\x0c\x4d\x33\xdc\xa7\xd4\x27\xc0\x96\x57\x19\xb4\x45\xde\xca\xf2\x0e\xc4\x93\xfc\xc3\x5b\x4f\x37\x4a\xe1\x97\x6a\xec\x19\x9b\x71\x1a\x65\x9b\x05\xc2\x01\x16\xfe\x4f\xd5\xd8\x4b\x77\xd5\xab\xbc\x1c\xbf\xa9\x34\x46\x55\x04\x7f\x0b\xe7\x83\xca\x98\x84\xff\x8a\x74\x3b\x10\x50\x49\xe2\xc4\x22\x56\x53\xaa\x97\x3a\x9b\x92\x4e\x1e\x1f\x3a\x71\xa0\xd5\xf1\xaa\x55\x80\xd3\xfc\x74\xcf\x2d\xa4\x92\x2a\x00\xa7\xae\xf0\x3f\xe7\x05\xdf\xee\x21\x68\x5d\x01\xf7\x9d\xa5\x34\xd0\xca\x23\xb0\x3c\x40\x09\x03\x23\xa0\x10\xf9\x96\xb2\x88\x20\x0a\x7a\xa9\x89\x89\x52\xe5\x47\xc9\x43\xc7\x6c\x59\x3a\x09\x63\x94\x2e\x5f\xed\xee\x24\xec\xa5\x90\x40\xd7\x70\x99\xdf\xc0\xc6\xd8\x91\xdd\x63\xe7\xa0\x25\xf6\xf3\x23\xec\x20\x91\x7b\xff\xa7\x47\x14\x0d\xfc\xc0\xc8\x3c\x4e\x4e\x98\xb8\x19\xd4\x80\xb3\xa0\x4c\xfc\xb0\xce\xc5\xe0\x3a\xeb\x48\x78\xba\xc5\x1d\xfa\x4f\x80\xf7\xd6\xe9\x2f\x33\x05\x10\x04\xdb\x41\xdf\x7d\x40\x67\x43\x3b\x59\xf0\x2a\xbe\xb6\xc0\xe1\xa3\x39\xc6\xa6\x30\x0c\x29\xdb\x4c\xe5\x3d\x28\xe9\xb6\xda\x25\xb2\xcc\xe9\x65\xbc\xca\x08\x2d\xa9\x10\x65\x3b\x8e\xf8\x2a\x6a\x80\xe5\xee\x19\x4d\xc5\x26\xc6\xa9\xe9\x30\x45\xd2\x01\x2a\xd4\x4c\xaa\x4d\x37\x4d\x55\x8e\xf1\x19\xac\xd3\xba\xa0\x57\x63\x82\x1e\x39\x77\xfe\x08\x05\x55\xe6\x9b\xf5\x7c\xb3\x5e\xaf\xd7\x19\x53\x94\x66\x08\xf5\x93\x84\x0c\x6c\x99\xfd\xca\x2c\x54\xae\x5c\x94\xb5\xfa\x0a\xa4\xd5\xa1\xd5\x9f\xa9\xb1\xc3\x40\xab\x2f\x16\x22\xf7\x3f\xaa\x2f\xb4\x77\x43\xf4\xf2\xc5\x2b\xb8\xd0\xec\x28\x28\x49\xbf\xdd\x03\x83\x44\x5a\x73\xc1\x03\xe1\x76\xff\xf0\xc8\xe3\xae\xd7\xdd\x5b\x46\xd8\x33\x0a\x01\x21\xbf\x8a\x62\xfe\x45\x6d\x0f\xd1\xed\x14\x26\xac\x48\xe4\xa7\x60\xc6\x55\xec\x70\x53\xf5\x66\x00\x21\x6f\x0f\xe4\x71\x80\x00\x28\xdc\x68\x0a\x0b\x0b\x79\x1e\xa6\x51\xd6\x19\x68\x0d\x10\x3d\x1c\x84\x8a\x06\xff\xff\xca\x9a\xb0\x7f\x22\x97\xa1\xd9\xcc\x3a\xdd\xa5\x3c\x93\x87\x73\x4f\x18\x03\xcb\xce\x27\xc1\x71\x3c\xf0\xf0\x4c\x5f\x33\xb8\x93\x79\x95\xbc\x1a\xc0\x1f\x49\xb7\xd8\x57\x3c\xe6\xdb\x74\xef\x5e\x34\x50\xfe\x07\x2f\xf8\x90\x37\xaf\xe9\x3c\x9a\x2d\xed\x15\x35\x60\x4c\x74\x86\xe0\xbc\xc5\x69\x65\x25\x00\x49\x3c\x8c\xd0\x9b\x4f\x87\x32\x58\xf7\x9e\x8a\x97\xb8\x77\xf1\x24\x2e\x88\xb3\x2f\xd2\x4c\x35\xad\x68\x13\x66\xe9\xc2\xb8\x06\x67\x6f\x9b\x5b\xe1\xd9\xec\x56\x22\x08\x2e\x83\x77\xc0\x89\xd9\x77\x50\x9c\x1c\xf3\x0f\x88\x8b\xe0\x24\xf2\xd6\x03\xda\x00\xa4\x1f\xee\x94\x1a\x01\x55\x07\x61\x51\xb8\xb8\x72\x03\xd1\x31\x1f\x18\x01\xa7\x4e\x95\x37\xe7\xfb\x47\x82\x6f\x1b\xb1\xd2\xe5\x80\xb9\x41\x2b\x8d\x76\xca\x9a\x43\x9a\x27\x4a\x2e\x84\xd0\x30\xb2\x7d\x92\x88\x08\x13\x07\x0c\x62\x71\x0b\xa3\x1f\x79\xd0\x4a\xc2\x8d\x2c\x2f\x02\xba\xdc\x72\xab\x5a\x7d\xe1\x40\x88\xa5\x69\x0d\xf3\xc5\x0b\x0c\x54\x1e\x87\x60\xcd\x4e\xd8\x47\x59\xc8\xd4\xbc\xa4\x10\x6c\x0d\x7e\x0d\xd4\xa3\x33\x60\xba\xf9\x33\xa9\x93\xc6\xbf\xde\x9c\xa6\xd0\x60\xbf\xd5\x35\xec\x1b\x84\x13\xa4\x19\xe6\x9d\x84\x38\x96\xe9\xa3\x93\x56\x8a\x1c\xb9\xd5\x30\x4b\x13\xe5\xe4\x09\xc2\x26\x9a\x81\x63\x5c\x87\xba\x69\x3b\xd8\x36\x21\xb8\xeb\x3b\x50\x63\x4c\xe3\xf2\x16\xfe\xdf\x7a\xec\xfe\xbd\x01\x89\x0d\x6e\x28\x33\x39\xc6\x78\xf7\x41\xcf\x9a\x32\xb2\xc6\xa4\xeb\x08\x29\x5a\x0d\x79\xa1\xbd\x4e\x49\xc9\x63\x0a\xb3\x68\x3a\x66\x20\x0b\x75\xc0\x9d\x63\x48\x69\x45\x9f\xc4\x03\x5b\xc9\x08\x18\xe8\x34\xcc\xc1\x5d\xc2\x00\x12\x17\x0d\xf6\xb1\xc3\x4e\x18\x70\xbe\x1a\x36\xf5\x28\xcf\x67\x89\xb8\x27\x06\x74\x50\xd0\xe3\xbe\xf5\x70\xf0\x7e\xaf\xf2\x70\x98\x1e\xd7\x2a\x52\xf0\x93\x26\x2b\x5c\x9c\xf2\x8d\xa9\xc6\xd4\x9d\xf2\x8f\x96\xdc\xaa\x60\x9e\x92\x2d\x86\xbd\x12\x02\x19\x6c\x8f\xbd\x89\x8d\x29\x5e\x69\xcc\xca\x3a\x90\x66\x3c\xc9\xd2\x36\xa6\x70\x21\x97\x6b\xac\x7f\x7a\x7a\xb8\xe2\xf8\xf3\xd7\x23\x67\xaf\x5a\x71\x9c\x28\xc5\x71\x33\x78\xd1\x10\x2f\x60\x7b\xe1\xf7\x9e\x77\x63\xbb\x3d\x77\x5d\x6b\x7c\x15\x5a\xe3\x96\xa5\x34\x7e\xd5\xd5\xe9\x8c\x77\x4e\x69\x5e\xec\xae\xd0\x5d\xf2\xcf\x6a\x85\xee\xb6\xc7\x5d\xeb\x76\x2b\x67\x7e\xa8\xf1\xe9\x83\xfb\x87\x22\xb2\x9d\x3d\xbd\xec\x40\xf3\xbc\x71\x7f\xf0\x46\x27\x4f\x44\xca\xcf\x9e\x5e\x56\x30\xe4\xf2\x92\x4c\xc2\xa2\x54\xa6\x8f\x96\x00\xe6\x14\xb1\xd3\xa5\xe8\x27\xcb\x2a\x98\x1d\x42\xa8\xde\x11\x6d\x27\xcb\x87\x83\xed\x1c\x40\xa3\xdb\x25\x0f\x42\xeb\x1c\xa2\xf4\x7b\x23\xec\x8d\x1e\x85\xdc\x6d\x05\xeb\x4b\xa1\xca\x19\x1e\x17\xa0\x0f\xb5\x51\x46\x64\x77\xc8\x7c\x77\xb5\x40\x20\x69\xcb\xd9\x2c\x3f\x67\xe7\x94\xf8\xb8\x17\x7c\xd0\xdb\x11\x7b\x42\x76\x04\x3b\x47\x02\x81\x42\x2b\x42\x69\x51\x84\x51\x1d\xf4\xb4\x96\xd0\x37\x0c\x8e\xe2\x9a\x0c\xc2\xd2\x15\xbd\x03\x53\x4a\x6f\x88\xdc\x7f\xab\x17\xbc\x1e\x4f\xb8\xb2\x6c\x69\x04\x46\xc0\xfb\xd7\x39\x00\x88\xfd\x7a\xea\xa6\xf6\x77\x6f\x72\xe2\x5c\xb6\xe7\x25\x00\x3d\xfb\x23\xff\xbf\x46\xcf\xfe\x57\xd7\xd1\xb3\xaf\xa3\x67\x5f\x47\xcf\xbe\x8e\x9e\x7d\x1d\x3d\xfb\x29\x45\xcf\xfe\x26\x42\xcf\xbe\x87\x7d\xe3\xe5\x08\xbd\x43\x21\xb4\x3f\x78\xc0\xe1\xc4\x20\x5f\x86\xfa\x80\x92\x72\xc4\x69\xdb\x7f\xe3\x81\xe0\x82\xf9\x59\x49\xcc\x58\x98\x17\xcd\x2c\x2d\xca\x3c\xd4\xae\xb7\xb2\x3e\x0d\xb8\x86\x89\xe3\xce\xa3\x12\x11\x64\x6f\xf7\xfe\xf8\x17\xfb\xd9\x8f\xd6\xd8\x0d\x94\x45\x04\x54\x9f\x3f\x50\x0b\x3e\xe9\x59\x0f\xd0\x10\xdf\xed\x22\x4a\xf0\xa8\xd8\x6a\x26\x3d\xc8\x7e\x11\xf5\x20\xb0\xba\x14\xc5\x98\x9c\x69\x08\x14\x26\x48\x59\x75\x8f\xc5\x85\xd3\x36\x8f\x30\xb9\xb9\x06\x94\x18\x27\xd3\x71\xbc\x21\x92\x3e\xef\xa5\x71\x96\xaa\xf5\xa1\x68\x1c\x1c\x8d\xca\x72\x62\x1b\x01\xe0\x0c\x81\xf4\xe2\x8e\xce\xc8\x50\x13\xac\xce\xbe\x61\x77\x7f\x62\x3d\xd4\x3d\x7a\xed\x7f\xa5\xc6\x6e\x94\xdd\x5a\x06\x0f\xef\x2c\xf7\x7f\xad\x16\xfc\x64\xcd\x7e\x02\xd2\x5d\x81\x9b\x8b\xac\xa5\x0a\x4b\x58\x39\x33\x64\xa9\x1a\x1a\xe8\xae\x88\x47\x1e\x98\x31\xb2\x18\xa9\x5c\xb1\xd5\xf9\xb4\xf1\x2f\x69\x4b\xd8\x69\x58\xf4\xa9\x57\x91\xd5\x05\x75\x10\x09\x99\x53\x95\xd3\xff\x58\xca\x2b\x79\xdb\x50\xc8\x6e\x16\x1d\xb1\xd2\x4d\xda\x85\x1b\x7c\x1e\xd5\x8f\x1a\x80\x3b\x49\x38\x5e\xa0\x6a\xa5\x90\xe5\x45\xc9\x1c\xf9\x00\xbd\x03\x1a\x4f\xec\x10\x25\xf2\xbf\x0e\xed\x92\xc3\x13\x74\x2c\xbf\x79\x28\x98\xb0\x7e\x0f\x53\xaf\xd0\xeb\x9d\x7d\x83\x3f\x72\xdd\x4d\xee\xaa\xb5\x1d\x4d\xa5\xed\x78\x55\xf0\x82\x78\x98\x9b\x1c\xad\xc4\xde\x8f\xe9\xe0\xa2\x5f\x57\x72\x5c\x77\x8d\x23\xd7\xb8\xd7\xec\xae\x49\xb9\xdb\x9f\xdb\xcd\x35\xce\x22\x1e\x55\xaf\xb8\xf7\x99\xc8\x06\x0c\x66\x6a\xb7\x73\xd1\x0e\x25\xd9\xb3\xa1\x9d\xaa\xc0\x48\xf3\x4b\x8b\x04\x70\xe4\xff\xde\xe1\xe0\x21\xf3\xd3\x45\x31\xa4\xe0\x47\x14\x16\xa5\xe0\x14\x37\x7b\x49\x98\x73\x30\x3c\x11\xd1\x69\x20\x34\xa8\xba\x3f\x02\xea\x5e\x03\x86\x14\xb8\xb7\xf9\xb7\x1d\xba\x4e\xc1\xae\x96\x82\x5d\x27\x2e\x57\x4e\x5c\xee\xb7\x68\xcb\x89\xbd\xc6\x20\x39\xa4\x05\x23\xb6\x24\x71\x61\xff\x87\xc7\xf6\x49\xa6\xd7\x7f\xdc\x63\xe7\x86\x56\x75\x65\x27\x72\xb9\x2b\x9a\xc1\x69\xd0\x12\x68\x2c\xd6\x6a\x7a\x4e\x30\xb3\x63\xda\x0c\xf4\x57\xed\xa5\x31\x3d\x51\x22\x23\x2c\x25\xfb\x7e\x4f\x63\xfb\xbd\xcd\x63\x2f\xbb\x96\xdd\x44\x04\xbf\x3b\x07\xb3\x62\x60\x7c\xfc\xa0\xc5\x35\x4c\xf9\xfc\xd2\x22\xf5\xec\x49\xaf\xb5\x3b\x6d\x5c\xf0\xe7\xeb\x44\x03\x4d\xc3\x15\x12\xa8\xc1\x1b\xab\xbd\x25\x22\xca\x7e\x78\x3f\x9b\xdc\x13\xe8\x14\x48\x3f\x84\xe3\xf5\x57\xfb\x82\x63\x03\x4f\xc9\x83\xb8\xb4\x70\xbb\xf0\xb8\x21\x81\x44\x60\x2c\x87\xe0\xfd\xc8\x3e\xf6\x27\x1e\xf3\xc3\x0d\x21\xe9\xfa\xc5\x32\x4e\x48\x43\xee\x7f\x41\x23\x45\x7d\xda\xa3\xca\xe6\x07\x4a\x29\xe6\xc4\x6d\x8d\x84\x41\xaa\xd4\xc4\xbd\xd1\xe1\xc6\x6e\xf0\xb0\x99\x67\x98\x78\x95\xe7\x22\x11\x1b\x61\x5a\x12\xd6\x92\x3e\xf1\xca\x78\xdb\x15\x79\x53\x52\x53\xbb\x32\x15\xe4\xeb\x34\xe9\x24\x66\x21\xe6\xb9\x70\x7d\x55\x3e\xe1\xb1\x1b\xa9\x67\x30\x73\xfe\x07\x2e\xe7\x0e\x6f\x68\x34\xc7\x97\xf5\xc2\xb4\x8c\xcb\x7e\xd0\xb6\x6b\xbb\x9c\x09\xd9\x65\x1e\xf8\x68\x48\x41\xca\xd0\xce\x18\xfb\x66\x4f\xe5\x55\xdc\xba\xba\x1e\x1f\xdf\xd8\xb5\xab\xd4\xb7\x4a\x17\x1a\xec\xcb\x87\x18\x1f\x22\x02\x2a\xf8\x51\x8c\xbd\xf9\xf4\xa1\xe0\xeb\x9c\x27\xae\xc5\x4e\x07\xba\x0e\x97\x22\x7e\xf8\xba\x14\x71\xd5\x77\xf0\x2b\x94\x14\x71\x2e\xf0\xab\x68\xc3\xae\x88\xdf\x60\xe3\xec\x8e\x5d\x45\x7c\xbd\x9a\xd7\x6f\xf7\xab\xb8\xdd\x7f\xd9\x16\x1d\x7e\xf6\x2a\x45\x87\x37\x79\x5f\x25\x84\x24\x6b\x70\x4f\x7a\x17\x77\xbf\x23\xa7\xfd\xc9\x6d\xd0\x8c\x83\x60\x00\xbf\x18\x23\x6b\xd8\x07\x6a\xec\x05\x43\xb6\xe1\x7d\x59\x51\xce\x27\x71\x58\xf8\x6f\xa9\x05\x99\xfe\x65\x5d\x7b\xca\x4c\xb5\x2a\xca\x4d\x21\x30\xf5\x76\x1a\x69\x4b\x1c\x69\x8b\x94\x4e\x37\x4e\x49\xa1\x12\x82\x9d\x56\x25\x0a\x57\x37\xc7\x91\x82\x32\xe0\xb5\xe2\xa4\x62\x36\x7a\x8b\xc7\x1e\x60\x87\x75\xb5\xfe\x89\xe0\xb6\xfb\x74\x1b\xea\xf2\x09\x57\xb3\x0d\x61\x59\xfe\x76\xc2\xe1\x3f\x0a\x59\xc3\x27\x82\x60\xd0\x50\xb8\x86\x5a\xe6\x84\xbc\x4f\x1d\xb5\xea\x4f\x1f\x72\x92\x6a\xbb\x46\xee\x8d\xa9\x86\x8a\xe7\x07\x4b\xdf\x77\x1f\xb2\x22\xfc\xc1\xd4\x57\xd1\x2d\x84\x4d\xf2\xf6\x5b\x33\x79\x6e\x94\xe7\x17\xaa\xc5\xc8\x8f\x9f\x67\xa9\x3e\xce\x05\xea\xc2\xb0\x8e\x5c\x72\x57\x69\x9b\xc7\x45\x7a\x04\xd1\x6d\x00\x80\x22\x2d\xc7\xd1\x28\x83\x4c\x98\xa5\xf9\x1c\x27\x64\x83\xa2\x88\x57\x51\x0d\x17\xa7\xcd\xac\xd3\x4d\x44\x29\xe4\xfd\xb0\x21\xf2\xd5\x01\x34\xfa\x37\xd7\xd8\xe1\xb0\x1b\x93\x7b\xe1\x5f\x7b\xc1\x6f\x78\xf3\x4b\x8b\xf8\x73\x18\x9c\xad\x7a\x59\x49\x9a\x6d\x73\x0c\x05\x3a\x0c\x76\x7a\x49\x19\x77\x13\xf8\x44\x3b\x2a\xe6\xc2\x0e\x8f\x09\xd3\x3e\x4d\x94\xc5\x85\x28\xab\x40\x96\xea\x46\x45\xda\xeb\x90\xce\xd5\xc0\x5d\x13\x5a\xad\xae\xdd\xb8\x22\x2a\xf3\x1d\x0f\xee\x08\x78\x47\x84\x29\xcc\xfb\x4e\x5b\xe6\x93\x1e\xbb\x49\xd5\x7c\x16\x76\x21\xa4\x9d\x38\x6f\x3f\xaa\x22\xe9\x6e\xca\xa3\xed\x7a\xdc\x9b\x60\x14\x30\x0b\x18\xff\xf4\x06\xe7\xf3\x29\x46\x16\x81\x32\x19\x3b\x05\xa5\xc5\x86\xc8\x09\x09\xda\xec\x8f\xcb\xe9\xfb\x97\x3d\x76\xd8\xa0\x89\xff\x9a\x17\x7c\x52\xf7\xbb\xa8\x42\x09\xa8\xc7\xc6\x74\xe1\xf4\xd1\x69\x53\x9d\x5e\xcb\xfb\x4f\xed\x93\x06\xe3\xc1\x1d\x13\xad\x2c\x0b\xaa\x16\x6e\x1b\xe8\xf9\x48\x2b\xcb\x8e\x20\xbf\x9c\x24\xee\xc2\x6d\x57\xef\x0e\xc3\x7c\x8f\x64\xd9\xe4\x0e\xf6\xdf\xe9\x05\x9b\x0f\x8a\x7c\xd5\x1d\xdc\xfa\x10\x28\x0d\xb9\x37\xe0\x9b\x71\x9e\xc4\xeb\x62\x8e\x03\xa0\x66\x02\xae\x68\x9b\x68\x0a\x47\x8f\xfb\x71\xba\xdd\xc6\x79\x24\xe4\x71\x19\xe7\xdd\x3c\xdb\xea\x5f\xce\x32\x7c\xb3\x3f\x34\xc5\x3b\xa4\x4c\xf3\xbf\x7c\x4b\xf0\x46\x0f\xb3\xa7\x59\x48\x25\x7c\x4d\x84\x49\xb9\xc6\x9b\x6b\xa2\xb9\x4e\x8a\x69\x1d\xdd\xa3\xcf\x81\x15\x8f\x22\xcb\x44\x02\xcd\x91\x42\x43\x81\x12\x96\x4c\x12\x6f\x80\x7b\x04\xa4\x26\x03\x47\x7b\xd1\x14\xf2\x19\x59\x4a\x5d\xaa\xfb\x2b\x37\xb3\xb7\x7b\x6c\x9f\xd8\x12\x4d\xff\x5b\xbd\x3d\x98\x26\x4e\x6d\x89\xe6\x3c\xa2\x5c\xbd\xf2\x5c\x8a\xc0\x22\xa0\xee\xb7\xce\xa9\x31\xd9\x0d\x71\x1e\x6d\x70\x59\x45\xc5\x76\x44\xa7\x5f\xb2\x86\xe1\xba\x68\xb0\x0f\x7a\xec\xe6\x56\x18\x27\xbd\x5c\x5c\x58\xcb\x45\x21\xaf\x23\xff\x1d\x5a\x7e\x7a\xec\x4c\x9c\xc6\x9d\x5e\x07\x0c\x4d\xa2\xd9\x2b\xe5\xf8\xa8\xbc\xb9\x2c\x20\xb5\x1a\xcd\xa7\x95\x3f\x4f\x96\x93\xf3\x0a\xf0\x71\x6b\x21\x58\xdf\x20\xa7\x9e\x80\x9c\xec\xb6\x87\xcb\x4c\x83\xab\x96\x34\x83\x3f\xe5\x0a\x3f\x5f\xf2\xd8\xad\x98\xcb\x29\x39\x29\x92\xb0\xbf\x0c\x90\x80\x85\xff\x0b\xba\xb3\x3f\xee\x9d\xd5\xc8\xb5\x08\x18\x58\x58\xd8\x75\x6e\xde\x36\x4a\xd8\xa6\xac\xe4\x72\x2d\x4d\x96\x38\xa4\x9b\x94\x38\xea\x0a\xf2\x08\x6d\x66\xf9\x7a\x92\x85\x51\x31\x21\x85\xa1\x09\x88\x8a\x8d\x5b\xa2\xd9\x6f\x26\xe2\x36\xdd\x8f\x3a\xb6\xe5\x0c\xf2\xdf\x7b\xec\x66\x4a\x3b\x68\x96\xe3\x93\x7a\x84\x3f\xe4\x0d\x5b\x0f\xfa\x60\xf7\x05\x31\x09\x0d\xdd\x45\xc1\x85\x72\x57\x64\xaa\xc1\xcf\x90\xda\x71\x0a\xd5\x22\x6a\x8a\x00\xe1\x06\xf3\x13\xee\x69\xd5\x9e\x51\xc6\x1d\x91\xf5\x4a\xb5\x60\xbf\xac\x87\xf3\xc9\x6d\x17\xcc\xa4\xf2\xa7\xa1\x40\xfa\xbd\xac\x57\xc9\x84\x34\x45\x9f\x0d\xed\xc7\xd3\xb7\x6a\xeb\xec\xa0\x6c\xe0\x5e\x51\xfa\x2b\xac\xb1\xeb\xc9\xbe\xef\xc2\x85\xa5\x7b\x45\x49\x87\x5b\xfd\xac\x1c\x54\xb0\x6d\x5b\xc8\xf0\x44\xa7\x1a\xec\x31\x76\x53\x57\xe4\x31\x84\x63\xc0\x7c\x26\x6a\x3a\x5f\x7e\x9f\x86\x71\x1e\x8d\x53\x35\xa3\x63\x36\xef\xa3\x67\xd4\x01\x80\x9f\x9a\x54\x85\x77\x5f\xd0\xb7\x7b\xec\x70\xd9\xec\x2e\x67\xcd\x75\x51\xfa\x6f\xf6\xf6\x90\xb3\xed\xc2\xc2\x12\x16\xa7\x11\x9f\xd3\x0f\x6c\x08\xc6\x54\xd1\xa6\x38\xdd\xc8\x92\x0d\x34\x5e\xab\xac\x50\x0d\xf8\x6b\x2d\xcb\xd6\x0b\x9d\x27\x51\x87\xd0\xb1\xbf\xd9\xcf\xfe\x91\x95\x3a\xf2\xde\x3c\x6c\x8a\x25\x67\x8e\x7e\x75\xbf\x0a\x8d\xfa\xd4\xfe\x73\x8a\xa1\x88\x94\xbb\xb8\x99\x2c\x6d\xb5\x4c\x85\x88\x0a\x37\xcd\x71\x5b\x56\x8b\x6e\x55\xbd\x6e\x96\xd2\xce\x24\x62\x88\x8c\x24\x14\xe1\xb8\x3c\x1a\x94\x7c\x48\x2b\x86\x2a\x51\xd0\x88\x30\xd6\x73\xc3\xc3\x23\xf7\x06\x88\x88\x4e\x66\x4c\xc0\x77\x4c\xb4\x09\x1c\x43\x84\x94\xd3\x84\xa9\x10\x5c\xd3\xb2\xbc\x09\xcc\xe9\x5a\x98\x98\xc4\xda\x7c\x5d\x72\x6e\x58\x8d\x14\x5d\xc9\xf6\x8c\x4b\x4e\x6e\xe2\x3a\x3f\x86\xd8\x22\xcf\xa8\x66\x22\xc2\x54\x32\xa2\xb2\x3d\x49\x11\xfa\x59\xcf\x8a\x1c\x5b\x6c\xd9\xb5\xc4\x05\x4f\xe3\x64\xdc\x12\x47\x76\x5c\xa0\x4a\x98\xa7\x05\xdc\x66\xd5\x99\x6d\x88\x3c\x8f\x23\xa5\x3d\x81\x67\x1a\x52\xd5\xf2\x0d\x00\x84\x35\x8e\x3a\x2d\x65\x3a\x49\xb3\xb4\x9e\x8a\x76\x08\x04\x93\xf6\xb2\x95\xfd\x9b\xbf\x5e\xe4\x99\x85\xfa\x5d\x94\x59\x97\xc7\x9d\x8e\x88\xe4\x0d\x90\xf4\xf9\x46\x1c\x42\xfd\xd6\xcc\xf1\xd1\x34\xe3\x19\xec\x41\x10\x4a\x21\xf2\x78\xad\x57\xf2\x28\xdb\x4c\xc7\x4c\xc0\x81\xdc\xd9\x49\x77\x2d\x24\x6f\x1b\xb9\x6c\x14\x5f\x50\x60\x46\x15\xb9\xea\xc0\xab\x5c\x18\x3a\x45\xbc\x25\x00\xcd\x93\xb7\xc3\xb2\x1a\xff\x77\x13\x7b\xf9\x15\x27\x96\xde\x39\xff\x88\xff\xe5\x1b\x83\x57\xef\x5c\x44\x2b\xb3\xd6\xb2\x4d\x18\x3c\x64\x21\x41\x0f\x95\x4a\xb2\x9c\xa1\xb9\x72\x2e\x79\x37\xcb\xa5\x52\x28\xfe\x4b\x61\xb9\x76\xc9\xf3\x51\xa3\x6e\x3f\x74\xf8\xa8\x5f\xba\x81\xfd\xe7\x1a\x1b\xf8\xd0\xff\xf7\xb5\xe0\x33\xb5\xea\x53\x47\xdd\xf6\x4d\xcb\xe7\xce\x62\xec\x0c\x21\x26\x80\x5b\x59\x35\xbc\x84\xc4\xac\x3c\x17\x45\x17\x29\x42\x46\x09\x52\x10\xb9\x4f\x65\x14\x58\x21\xe4\x5b\x5d\x6b\xe1\xb8\xb0\x01\xcf\x2a\x49\x15\x29\xe9\x73\x4b\x4c\x3a\xa3\x53\x4b\xc3\xc7\xd0\x51\xf4\x48\x5d\x69\xc8\x36\x56\x94\x7f\x5e\x8e\xe7\x28\xa3\x3d\x8a\x65\x64\xed\x88\xaf\x47\x63\x21\x4d\xa8\x33\x0c\x3c\x78\x43\xd3\xc1\xc0\x49\xc3\x74\x30\xa0\x38\xc8\xf3\x2c\x97\x32\xf1\xbd\xa7\x2e\x38\xe2\xf9\x1b\x47\xd8\x90\xb5\xf0\xff\xb0\x16\xfc\x76\x6d\xf0\xf9\x35\x9e\x69\xa8\xff\x29\x9f\x6b\x4c\xfc\x73\xcd\x66\xbb\xda\x6b\x45\x0a\xd3\x5d\x16\x23\x32\x37\xf1\xa4\xb3\x04\xbf\xbf\x9f\xdd\x02\x7e\x44\xca\xdd\x06\x56\xe0\x17\xf7\x07\x3f\xb3\x7f\xe0\xf1\x53\xb2\x00\xca\x65\xe8\xa9\x5d\x00\x29\x44\xe9\x9d\xaf\xca\x17\x18\x27\x0e\x2e\x9f\x70\x6b\x6d\x93\x22\x01\x89\x38\x92\x56\x50\xc3\x59\x1e\x4f\x66\x1a\x4c\x36\x77\x52\xf6\xe2\x07\xa3\xe0\xdb\xcb\x51\x69\xb3\xa5\x3d\xa4\x64\xa1\x5e\xb3\x1c\x23\x36\x54\x2b\x5d\xc0\xf2\xa7\xb2\x8a\xa3\x27\xa6\xfe\x44\xde\xee\x54\xb5\x64\xd3\xf6\xc8\x7c\x96\x61\xb1\x5e\x4c\x84\xc0\x96\xdb\xba\xc8\xb0\x1b\x4f\xe0\x72\xd5\xb5\x1c\x5f\x7d\x50\xb7\xd8\xae\xdb\x60\x32\xea\xf6\xd6\xba\xb6\x9b\x5a\xef\x84\x2b\xd8\xd4\xc0\x45\xa0\x2a\x06\x66\xc8\xd9\xe3\xdf\xb7\x6f\x68\x5e\xcd\xa5\x2c\x2f\x37\xb3\x7c\xcb\x89\x4f\xf9\xa3\x91\xe0\xc5\xc3\x5e\xb8\x8e\x15\xaa\x84\x8a\xe6\xd7\x46\xa4\x4b\xde\x21\x7c\x54\xc9\xfe\xff\xce\x11\xf6\x4b\x9e\x4e\x88\x0b\x79\x95\x4f\x2f\xcb\xbf\xab\xca\x96\x6a\x6e\x5b\x95\x0a\xd7\xda\xe4\xd5\x22\x7b\xcf\x7d\xdb\x70\xb3\xde\x5e\x7e\xb6\x5b\x7b\x56\xb7\xac\x00\x95\x24\x78\xf5\x40\x30\x09\x1f\x35\x69\xb2\xc1\x63\xf8\x6a\x52\xa7\x3a\x51\x25\xf7\x32\x3d\xc9\xfe\x8b\x83\xc6\x83\xf4\x37\xef\xa5\xf1\xeb\x7a\x92\x93\x8a\x23\x91\x96\x0a\x76\xbd\xb2\x56\xf6\x10\x7e\xa5\xcd\x5e\x38\x34\xe2\x5b\x1b\xf2\xfd\x8f\xb4\x83\xd3\xd6\xef\x4a\xfa\x27\x0b\x79\x0a\x03\x7c\x11\xd0\x00\x15\x50\x85\xbc\xf2\x42\x17\x4f\x55\xed\x87\xdf\x68\xb1\x5f\x3f\xc8\x6e\x91\xb2\xe0\x3d\x61\x12\xa6\x4d\x91\x83\x9b\xa4\xff\xd9\x83\xc1\xc7\x0e\x0e\x3c\xd6\xc6\x48\x15\xf0\x0c\x6a\xe9\x2c\x8c\xf8\x2a\x15\x33\x81\xbf\x94\xea\x55\x12\x27\xe5\x14\xb4\x2a\x24\xc7\x8d\x0a\xc1\xc1\x84\x5e\x96\x65\x53\x47\xe9\x1a\x62\x06\x34\xa8\x5e\x94\xfd\x44\x98\x79\xcd\xc7\x89\xcd\xb7\x74\xa7\xe8\xf4\xae\xc2\x81\x95\x29\xb0\xbe\x11\x77\x03\x70\x9b\xa7\x88\xe6\x46\x33\xeb\x4c\x38\x6f\x1b\xfc\x62\xaa\x3d\xe6\x51\xe7\x2a\xc9\xbb\x3c\x15\x10\x43\x0a\x98\x03\x69\x54\x47\x24\x7e\x3b\x96\xd8\x71\x4f\x15\xa5\x91\x50\xd4\xc0\x55\x7a\xe8\x23\x0f\x58\x33\x7a\xc4\xa0\x3a\x29\xd4\x0c\x45\x49\x76\x9c\x52\xc2\x7b\x1a\xe7\x65\x16\x85\x7d\x9d\x2b\xdf\xe4\x17\x8e\xb2\x54\x6e\xe7\x1c\xf2\xee\xe2\x72\x65\xbd\x48\xc9\x0f\x39\xca\x04\x39\x25\x26\x58\xed\xe9\x90\x72\x8c\x35\x01\xa5\x6a\xda\x37\x10\x4f\x4e\xeb\x0a\x97\xc6\x82\x99\xee\x75\x20\x19\x13\x6c\xbb\xdd\x3a\xbe\xa9\xc2\x7d\x64\x2b\x34\x3b\x85\x12\xd5\x74\x2c\x10\xc5\xb3\xcf\x5b\xbd\xd8\xb1\x62\x8c\x0c\x70\x47\x59\x8c\xa9\x61\x51\x7e\x44\xdd\x1a\x7a\x22\x2b\x49\x10\x56\x70\xf7\xd5\xc4\x4c\x0e\x69\x5b\x6e\x21\xd0\xe7\xa2\xd0\xae\x17\x38\xc3\x35\xae\x2e\xf0\xb9\xb4\x29\xf4\x74\xc9\x8a\xc9\xe0\xa9\xb2\x4c\x0d\xa0\x98\xad\x4a\x9a\xd4\x95\xe2\xab\x6c\x55\x9f\x5a\x58\x74\x34\x92\x2a\xb7\xe7\xb4\xd2\x18\x74\xc0\xa1\x8c\xdf\x3b\xc2\xf6\x43\x6c\x8f\xff\x1d\x23\xc1\x7b\xbd\x0b\x96\x09\x09\x43\x7e\x34\x18\x80\xc2\x57\x33\xfe\xd3\x94\x07\xf0\x72\x23\xde\x68\x8e\xeb\x26\x46\x40\x3d\x9b\xb8\x6d\x23\xce\xcb\x5e\x98\xd4\xe3\x6e\x51\x0f\xd3\xa8\x4e\x2f\xea\xdd\x3c\xdb\x8a\x5d\x28\xc9\x49\xd6\x60\xe3\x7b\x40\xc4\xd0\x50\xd0\x4f\x7a\x47\xd8\xf3\x1d\x13\x67\x57\x6e\xa6\x3a\x78\xf1\x88\x76\xdf\x3f\xe4\x1f\x00\x4f\x6f\xf6\xa4\x37\xcd\x9e\x37\xe8\xfc\xde\x09\xbb\xf5\x75\xd1\x2f\xfc\x67\xf9\x7e\x1d\xa6\x87\xd5\x21\xfb\x42\xd6\xcc\x12\xf6\xa4\xc7\xb7\xf5\x98\x3f\xe0\xef\xeb\x84\x5d\x26\x7b\xf0\x82\x21\x3d\x80\x56\x65\xd5\xfe\x41\x1f\x56\x83\xb1\x0f\x8d\xb0\x43\x8a\xc9\xf0\x7f\x60\x24\x78\xe3\xc8\xf9\xac\x57\x5a\x59\x0c\x29\xfc\x48\x05\x1b\xc0\xf1\x40\x1e\x4c\x76\x11\xc4\x6a\x42\x8e\xd3\x47\xa6\xe2\xf6\xae\xd0\xef\x55\x2e\x2f\xba\xdb\xc7\x29\xe4\x40\xef\x29\x7d\x7c\x33\x0a\xbf\x49\xb9\x94\x9c\x73\x24\xa4\x08\xf2\x01\x61\xac\x70\x19\x96\x85\x71\x4f\x50\x41\x47\x16\xcc\x78\x05\xeb\x87\x18\x69\x0b\x49\x09\x41\x92\x16\x92\x5e\x51\x8a\x7c\x71\x69\x9c\x9f\xcd\x22\x58\x3e\x34\x2f\xda\xdb\xb9\xc1\x17\xe1\xd8\x46\x10\x00\x43\xe4\xf3\x14\x75\x0d\x52\xcf\x5f\xcb\xbd\xb9\x93\x73\xfe\xef\x1f\x62\x87\x9b\xaa\xcb\xfe\xe7\x0f\x05\x9f\x3b\xa4\x7f\xaa\xfb\x70\xd0\x1a\x6c\x03\xc2\x03\xc1\xee\x01\x69\xd6\xa8\xfc\x88\x6b\x93\xf4\x61\xa9\xc2\x54\x7f\x6e\xa3\xad\xcb\x99\x87\xcf\xc6\x21\xce\x2e\xad\xe7\x10\x2b\x37\x1a\x16\xbc\x2b\x72\xe2\xa7\x5c\x38\x88\xb1\x71\xd5\xa2\x5c\x87\x38\x95\x37\x85\x93\x01\x41\x41\xeb\xe8\xe0\x4c\xea\xe8\x8b\x31\x43\xcd\x66\x5c\x08\x9d\xb5\xa6\x3a\x16\x64\x96\xc2\x38\x71\xc8\x56\x07\xe5\x21\x8b\xaa\xe9\xbb\x07\x29\x56\xc1\x7b\x69\x22\xc7\x06\x1a\x3c\xb9\x94\x1a\x81\x83\xd0\x7a\x80\xe6\xab\x6f\x33\x67\x99\xf9\x28\x6e\x33\xad\x46\xb2\x98\x03\xe4\x10\x57\x93\x30\x5d\x87\x88\xb8\xc1\xfa\xdd\xaa\xc1\x43\xc5\xad\x3c\x4e\x95\xe0\x13\x16\xc2\x61\x3c\xc2\xbe\x66\x29\xe8\x2a\xb0\x6c\xcb\x26\xfd\x26\x7a\x0f\x8c\x35\xf8\x10\x44\xc7\xb3\x59\x2a\x82\x71\x47\x1e\xe0\xa3\x41\x30\x36\xce\xc1\xcf\x0f\x33\xec\x58\x7e\x07\x7c\x99\xb8\x4e\xe8\x47\x99\x51\x0d\xbc\x03\xa0\x42\x21\x0f\xd6\x44\x18\xc1\x4c\xd2\x8a\x04\xa0\x97\x23\xba\xca\x17\x97\xc6\xac\x48\xc0\x5e\x21\x5a\xbd\x04\xaf\x91\x28\x86\xf4\x4d\xda\x31\xa7\x99\xa5\xa9\x68\x9a\xa8\xa7\x2e\xa0\xcf\xc8\xc3\x06\x56\xfe\x3c\xdb\xea\x93\xcd\xd8\x42\x08\x89\x1a\xfc\x2a\x0f\xb4\xc3\xd9\x39\x1b\xdd\xbd\x62\xcd\xb5\x9a\xd1\xd9\xb7\x17\x6d\xdc\xec\xcf\xe1\x1b\x72\xc8\x3d\xba\xc3\x8d\xfd\x94\xd1\x94\xdd\xef\x3b\x73\x53\x7f\x76\x84\x3d\x5b\xd1\xdd\x0b\x48\xfd\x09\x63\xf5\x83\x23\xc1\xe3\x23\x43\x5f\xf1\x48\xa4\x99\x3c\x5d\x71\xcb\xe5\xb2\x31\x35\x28\xac\x0f\xc4\xb4\x1a\x8a\x6e\x5d\x2c\x69\x16\x89\xba\x24\x05\x89\xdc\x8c\x44\xd0\xea\x9b\x71\x24\x2c\x0f\x34\x1e\x40\xfe\x89\x00\xaf\x8f\x7c\x43\x25\x9b\x25\x9f\x2e\x12\xbe\xd1\x77\x27\xdc\xc8\xe2\x08\xf5\x06\x90\xbf\x6c\x2d\xeb\x02\x93\x67\xef\x01\x28\x28\xb7\x08\x06\x81\x81\xd0\x48\x33\x88\x8c\x68\x1e\x17\xeb\x05\xef\x66\xa5\xe4\xef\xe1\xe0\xc5\x1d\x62\xfa\x22\xdd\xfb\xa2\x2b\x05\x3a\x29\x5c\xf3\x80\xb6\x5e\xc0\xb3\xd5\xa2\xd9\xcb\x77\xea\x20\x38\xb6\x40\xb8\x96\xd3\x47\x2b\x63\x86\x9c\x13\x87\x21\x86\x2b\xb1\x9d\x65\x11\xe8\xe3\xc3\x24\x01\x36\xb4\x6e\x3a\xe0\x70\x5b\xfb\xd9\xad\x68\x74\x5f\x58\x13\xcd\x75\x75\x12\xfc\xbf\xdf\xa7\x8c\x56\x5f\xd9\x37\xe4\x7d\xd5\x16\x06\x25\xd0\x6a\x9f\xaa\x22\x1a\x59\x49\xb1\x65\xb0\xdb\x33\xfb\x30\xa2\xc8\x41\xf7\x24\x29\x94\x06\xa6\x7e\xf8\x3e\xb2\x3f\x68\x86\x09\xde\x48\xc6\xb6\x61\xd1\x3d\xeb\x16\xda\xed\x9e\x41\xe3\xc6\x10\xd8\x5a\x55\xb3\xbe\x90\x7a\x65\xd6\x09\x4b\x12\x5d\xf4\xf5\xd4\xe0\xfa\x64\xd2\x45\x57\x10\x9b\x0f\x4b\xa0\x24\x81\x62\x0c\xb8\xea\x9e\x22\xde\x0a\x79\xc2\x78\x32\xc4\x2d\x9d\xa4\x05\x42\x05\xd1\x09\xcd\xcd\xe2\x62\xf3\xbc\xc4\x31\x61\xdf\x2f\x97\x5e\x21\xf5\xd5\xd8\x4a\xa9\x90\xfc\x4b\x79\xed\x28\x96\x81\x36\xa2\xaa\x95\xe8\xa3\xc0\x95\xe5\x0e\xa8\xe4\xa2\xfe\xf3\x43\x8c\xc5\xdd\xd3\x61\x27\x96\x1b\xc5\xff\xd2\xa1\xe0\xbd\x87\x16\x97\xd4\x6f\xd7\xd1\x65\x71\x89\xb7\xd4\x0b\xac\x79\x71\x69\x63\x76\x5c\xfe\xff\xb1\x31\xc3\xbc\x54\xb2\x77\xeb\xbd\xd0\xb6\x43\x57\x03\xf9\xd1\xc9\x5e\x98\x2c\x97\x61\x73\x3d\x70\x4d\x3b\xbc\x02\xc5\x35\xc0\x1f\xb9\xdb\x42\x07\xe2\x13\xa1\xaa\x60\x60\x29\xf3\x20\x0d\xb3\x4f\x1b\x9b\x44\xbb\x1d\xd6\xd1\x30\x58\xae\xb7\x3a\x4c\x02\x9c\x8c\x70\x23\x8c\x13\x48\x0d\xa8\x54\x87\xd8\x03\x1a\xb3\xdb\x20\x61\xaf\xc5\xe5\xc0\x51\xb8\x4c\xf6\xaa\x32\x3b\x3a\x07\x2f\x4c\x06\x81\x96\xcd\xc9\x36\x2c\xb4\xb7\x30\x8a\x48\x3e\xd5\xf1\xdf\x8a\xd0\x85\x79\x5f\x2f\x6d\x1f\x29\x9c\x8d\x31\x8c\xa8\xa5\x66\x0f\x81\x69\x15\x21\xfa\xf4\x57\xaa\xb3\x2a\x9f\xfd\x30\x7e\x47\xee\x15\xc2\xad\x96\x6b\x1f\xb8\xe3\xc8\x2a\xbc\x83\x96\xc7\xe9\x92\xdf\x23\x1f\x81\x4f\xa0\xef\x1a\xec\x5b\x33\x46\x81\x05\x60\x72\x8d\x58\x02\xc6\x2a\xbc\x2e\xc1\xe1\x76\xc2\x2d\x70\x1e\x90\xbd\xdf\xcc\xc0\xe7\x13\x4e\x4d\x24\xaf\xfc\x42\x6e\x79\x7d\x94\x00\x10\x51\xc4\x94\x5b\x39\x12\xf9\x18\xf4\x4e\x14\xc2\x9c\x36\x50\x74\x19\x03\x85\xe2\xce\x0d\x4e\xbb\xb5\xf7\x16\x97\xa8\x3f\xe3\x92\xba\x59\x4e\x50\x80\xad\x61\x15\xb2\xb6\x68\x4c\x8b\xd4\x96\x37\x99\x85\x32\x3b\xf4\xc8\x5c\x75\x56\xb9\x2f\x1c\x64\x37\x96\x59\x37\x4b\xb2\x76\xff\x7e\x29\x55\xff\x9b\x83\xc1\x07\x0f\xda\x4f\x90\xee\x74\x75\x96\xa5\x3a\x4c\x8c\x26\x44\xaa\x28\x0a\xbc\xc4\xd4\x3a\x4a\x9f\x02\x3d\x69\x68\x0f\xd1\x85\x0d\xf7\x40\xa6\xab\x25\x1e\xa2\x90\x97\x82\xa1\xf8\xa8\x59\x01\x3b\x84\x66\xb6\x97\x15\x25\x73\xd5\x34\x00\x6c\x4c\x6e\x97\x00\x6b\x08\x0e\x01\x61\x31\xfc\x26\x7d\x09\xdd\x9f\x17\x9c\xce\x2b\x15\x26\xb2\xfb\x15\x31\x3e\x2c\x31\x5b\xf8\xd4\x31\x2a\x1c\xf6\xab\x8e\x6d\xba\xdf\x80\xda\xb8\x96\x15\x22\x75\x81\x49\x10\xe1\xc1\x9e\x31\x52\xa8\x69\xd2\x45\x00\x3c\xc5\x8e\xb4\x10\xba\x03\x09\xce\x70\x27\x2b\xd0\x7b\xf5\x31\xc5\xf7\x87\x25\x79\x2b\xe9\x06\xf5\xe6\x8c\x07\x34\x0c\x6b\x61\xe1\x54\x61\xd0\x50\x29\xfd\x25\x84\x9d\x19\x61\x84\x96\x51\x5d\x90\x42\xc3\x34\x23\xcf\x00\x0e\x93\x38\x43\xb0\x32\x65\x06\xfe\x93\x90\x65\x53\xf7\x47\xc1\x3c\x36\x41\xfd\x12\x26\x3a\x0a\x24\x6e\x91\xde\x14\x28\x11\x0a\x55\x85\x48\x0b\xa1\xc0\x4b\x20\xfb\xbc\x63\xf3\x91\xdb\xd1\x4c\x1a\x71\x3b\x66\xca\xb2\x5c\xe5\x18\x4c\x33\x33\x1f\x36\x50\x85\xe6\x72\x10\x45\xb5\x8a\x40\x09\xce\x12\x94\x46\x82\xee\x50\xe8\xdc\x5a\x86\xba\x8f\xd5\x3e\x85\xb9\x29\x87\xde\x14\xd6\xd3\x22\xc4\x7a\xaf\xd1\xf5\x5a\x6d\x21\x32\x18\x8b\x1a\x20\x11\x30\x0a\x3a\x19\xe1\x22\x86\x84\xba\xa3\x9c\xff\x77\x72\x44\xfd\xb6\x7d\xec\x1f\x29\x16\xcd\x26\xca\x8a\x5c\x17\xfe\x57\x46\x82\xdf\x18\xd9\xb1\x88\xb6\xde\xc6\x2d\x6e\x1e\xee\xc2\x0e\x62\x3e\x42\x47\x5f\x0c\xa4\xda\x95\x30\xb5\xa3\x57\x5c\xf0\xa0\xcc\x7b\x22\x80\x1c\x0e\xea\x50\x21\x93\x1b\x80\x55\x28\xd0\x98\x98\xc4\x51\x38\x6c\xa5\x0d\x8f\x09\x6e\xa8\xa6\xa7\x0d\xbe\xf3\xe8\x40\x71\x60\x69\x8f\xf7\xd0\x71\x67\x65\x9a\x89\x08\x95\xda\x6b\xcd\x58\x0e\x2c\xf5\x08\x24\x53\x86\xcb\x04\x34\xbe\xd7\x7c\x4b\x3d\x70\x8f\x1a\x0d\xe1\xd3\xea\xbd\xe5\x58\xbf\x7e\xb2\xc6\x6e\x14\xd6\x4d\xe9\x7f\xb0\x16\x3c\x5e\xb3\x9f\x28\xed\x98\x96\x3f\x35\xb5\xc7\xa6\xa3\xb8\x68\xca\xeb\xa8\xcf\x3b\x42\x8e\x2f\x2e\x3a\x85\xeb\xb3\x41\x1e\x45\x71\x38\x84\x4f\x47\xfe\x34\x04\xb0\xa1\x85\xb3\xf3\x67\x4e\x41\x5c\x58\x1e\x8d\x35\xf8\xd9\xcc\x28\x31\x4c\x10\xc9\x46\x96\x6c\x80\x60\x61\x4c\x98\x49\xb6\x29\x72\x50\xfd\x9c\x3f\xbd\x50\x9f\x9a\x9a\x9e\x31\x68\x70\xa3\xbb\x24\x2e\x96\xa5\xc7\x5c\x17\xa7\x15\xb9\x20\x2b\xca\x72\x69\xb3\x11\x81\x23\x2a\xfe\xe7\x1a\x7b\x86\x6d\x5b\x5b\x5c\xf2\xff\x43\x2d\xf8\xd9\x5a\x55\xbd\xa2\x18\x93\x0b\xfd\xae\x98\x73\xb7\x8c\xf3\x03\xc6\xd8\x16\xa5\xc6\x78\xd7\xf0\x6a\x8b\x4b\x2e\x5e\xf5\xa0\xc9\x83\xb8\xf2\x48\x74\x81\x42\x4b\x19\x85\x7c\xbf\xe5\xf7\x60\x44\x4f\xfa\x68\x94\xc9\x7a\x51\x5d\x1b\x91\x74\x72\x70\xac\xbf\xaf\x38\x47\x77\x5c\xca\x8c\x51\x31\xdf\x58\x39\xb5\x87\x70\x69\xb1\xa5\xf9\x55\xd6\x2b\xd3\xb0\x3e\x9a\x06\x4c\x46\x58\x3b\xd4\xcc\xf2\x25\x8f\x3d\xbb\x10\x80\xbe\x32\xdf\x02\x43\x42\x1f\xf3\x97\xf9\x6f\xf4\xd8\xb1\x3d\x58\x17\x86\x7c\x1a\x9c\x1e\x5a\xa3\x1b\xac\xe2\x08\x27\xc4\xa7\x20\x82\x7e\x48\x5f\x35\xd8\x5f\xd4\xd8\x0d\xea\x5c\x2c\x2e\x15\xfe\x97\x6a\xc1\xe7\x6a\xd6\x83\x01\xb9\x8c\x94\x84\xe4\xf9\x8c\x1c\x11\x22\xeb\xb8\x82\x89\x42\x50\x2f\x32\xe0\x71\xba\xa5\xd6\x9b\x54\x4f\x90\x66\x42\x6d\x7c\x3a\xc4\xac\xac\xa4\x25\xc7\x92\x68\x53\x86\x98\x4d\xc9\xa3\x16\xb1\xa4\x1b\x68\x14\x2d\x10\xef\x08\x43\x54\xa8\xb9\x30\xcf\xe3\x0d\xc9\x75\x96\x0a\xa5\xc7\x80\x25\x2d\x2e\x35\x38\x9f\x57\x19\x92\xc8\x22\x8b\xc0\x96\x44\x2a\x5c\x29\xdf\x58\xaa\xc0\x98\x61\x41\xef\x59\xe6\x07\xf2\x2f\xd8\xe1\xfa\xfa\x9b\x1a\x7b\xb6\x32\xf9\xba\x6a\xb6\xdf\xa9\x05\xff\xbe\xb6\x38\xec\x95\xa5\x9f\xa9\xdc\x18\x3a\xca\x54\x2b\xa6\x74\x60\x82\xc1\x3f\x74\x53\xc6\x12\x6c\x15\x29\xde\xac\xe7\x69\xd2\xb7\xb5\x59\x84\x45\x37\xd0\x02\x98\xfe\x2c\x71\xc5\xa9\xdd\xa8\xeb\xe8\xf3\xe1\xea\x3e\xb7\xd5\x71\x5d\x4a\x32\x0b\x79\xd6\xed\xe2\xc1\x4b\xb7\xf9\x06\x2d\xe2\x61\xd4\x47\xfe\x4c\x99\x67\xb5\xb6\x48\x8f\xc1\x39\x8a\xff\xb2\xc6\x9e\x59\x39\x38\xfe\xdb\x6b\xc1\x9b\x6b\xcb\x8a\x8a\x04\x0b\xc0\x11\x2e\x2e\x91\x00\x09\x9a\x6f\x48\x3a\x84\x6c\x9e\x64\xa7\x42\x70\x2b\xae\x9c\x24\x7e\x0a\x2f\x30\xe2\x28\x17\x97\x88\x39\x1e\x2c\xa8\xc8\xbe\x6a\x49\xae\x86\x6c\xc6\xf5\xb3\xc7\x27\x5f\x25\x3d\xf0\x77\x1c\x62\xfb\x40\x96\x7a\xd3\xa1\xe0\xaf\x0e\xc2\xdd\xaf\x35\x59\xe4\x7b\x6a\x79\x15\x10\x10\x6c\x51\x8d\x67\xd0\x72\xb4\x4a\x33\xa5\xd2\x03\xc8\xc5\x73\x55\xe9\x7b\x54\xdd\x07\xba\x5c\xa0\xd9\x1f\x40\x9f\x22\xbd\xb1\xde\xa8\x96\x21\x0c\xf1\x25\xf4\x29\x06\xfa\x90\xd9\xbb\xd5\x95\x6c\xf4\x38\x0d\x1e\xbd\xf2\x37\xcb\x72\x3c\x7a\x88\x7d\x5b\xd1\x27\xae\xf6\x49\x8b\x43\x7c\x77\xaf\x69\x05\x92\x9b\x26\x28\x8e\x39\xcb\xf5\xb3\xe5\x04\xb4\x7d\x84\x94\x25\x99\x7c\xc7\xac\xa7\xac\x37\x8e\x81\x45\x85\xd1\x19\x86\xda\x0e\x78\x26\x63\x4a\x6f\x35\x89\x8b\x35\x05\x8a\x40\xf8\x6e\xa6\x4c\x1e\xd2\xed\x2a\x59\x1b\xab\xee\x86\x6c\x12\x97\x20\xe0\xab\xbd\x38\xc1\xdb\x58\xcf\xbc\x12\xd0\xf4\xe4\x23\x1c\x67\x8a\x51\x7e\x44\x61\xd1\x56\x46\xc7\x3f\x33\x72\xab\xd5\xc5\xc2\x55\x25\x00\xd5\x30\x6b\x6d\x37\xad\x55\xd4\x20\xa6\x91\x2f\x91\x6d\x21\x76\x79\xe6\xd1\xb8\x65\xf9\x80\x69\x4f\x3b\x84\x2d\x80\x4b\x7c\xec\x0a\x3b\xe8\x70\x52\xc8\x0f\x2a\x4b\x60\x61\xe9\x6e\x9c\xc0\x3f\xe1\xd8\x75\x96\x05\x28\xf0\x89\x6f\xa6\xcc\x7b\x04\xdc\xac\xd5\x48\x8e\x69\xd0\x28\x92\xae\x25\x2d\xa0\xcd\x11\xa7\xed\xba\x2e\xa7\xe8\x01\x28\xc1\x6c\x6a\xf0\xc7\x37\x31\x66\xb4\x39\xfe\xef\xdc\x14\x3c\x7e\xd3\x82\xd1\xee\x6c\xcf\x21\xec\xac\xa8\x95\x9b\x74\x07\x7b\xf4\x75\x83\xf4\x55\x1a\xa4\x41\x25\xf0\xbf\x87\x41\xfa\xba\x45\xfa\xab\x6e\x91\x1e\xec\x64\xe5\x02\x8c\x6d\xa0\x04\x88\x19\x05\x9f\x6d\x0d\xc7\x61\x6e\x35\x92\xfb\x76\x18\xf6\x38\xb1\x51\xa4\xb5\xb4\x93\x12\x1a\x42\xf4\xd0\xe4\x23\x78\x23\xe8\x8a\x41\x55\xa8\x49\xf9\x06\x61\x22\x5f\x34\x87\x6c\x27\x5b\x10\xb0\x31\xc0\xc3\x45\xe3\x95\x7e\x21\x72\x35\x9c\xab\x2c\x15\x4a\x8f\x87\x1b\x4a\xa9\x55\x75\xb3\xd5\x7b\xc3\x1d\xae\xd8\x53\x9b\x97\xa7\xdf\x5f\x5c\xda\x49\xb5\x2f\x49\xf4\x9e\xb4\xfa\x96\x86\x9e\xba\x7c\xad\xf4\xf8\x5f\x2d\xf7\xbd\x2b\xb2\x19\x7c\xeb\x7e\xf6\x0c\x77\x14\xfe\x5f\xef\x0b\xbe\xb2\x8f\x6c\x95\x6a\x64\x15\x3f\x75\xb3\x1c\x75\x88\xd8\x35\x36\x3c\x30\x84\x21\x9d\xd0\x7e\x8d\xcb\x57\x61\xb0\x1c\x1a\x64\xa0\x62\xf2\x34\x94\xf3\xc0\x99\x27\xad\xe3\x32\xa0\x5e\x43\xcd\x0d\x63\xfa\x52\x88\xe1\xd6\xdb\x80\x8f\x6a\x8c\x6c\x6d\x7d\x1b\x1b\xe7\xc1\x12\x90\x44\xab\x7f\xa3\x72\x5f\xda\x16\xdb\x2c\xb5\xa6\x43\x5f\xc3\x42\x9f\xd4\x02\xc9\x7c\xb5\x72\x0e\x71\x96\xf2\x99\xfa\x92\x8a\xe3\xbd\x10\x9c\xc7\x59\xbc\xaa\x96\xc7\xad\xdb\x5c\x92\x47\x82\x18\xb7\x37\xb5\x4d\x51\x0a\xcd\xa2\x81\x5a\x4a\x59\x3a\x86\x78\x88\x5f\x8e\x9d\xaf\xd8\x89\xd0\xda\x8c\xd7\x3b\x47\xd8\x73\x6d\x3d\x16\x86\x5e\x9c\x97\x97\x76\xe1\xff\x4d\x2d\xf8\xcd\x9a\xed\xbd\x8e\x51\xe2\xd5\xe0\x87\x6e\x12\x96\xad\x2c\xef\x10\x7d\x21\xb5\xa6\x6c\xa0\x69\x74\x24\x03\x4e\xda\x46\xcd\xe5\xf2\xd6\x46\x6f\x8f\x35\x58\xec\x92\xee\x86\x96\x80\x8b\x6b\xad\x55\x0b\xae\x24\xc4\x07\x2e\xe5\x26\x5c\x8b\x75\x5a\xd8\x09\xbd\x33\xea\x6e\xf3\xf5\x56\x9c\x8b\xcd\x30\x49\x26\x76\xd0\xde\x7c\x78\x3f\x7b\x2e\xf1\xcf\x67\xb3\xf2\xbc\x08\xa3\xfe\xbc\x62\x79\xfd\xef\xde\x1f\xbc\x71\xff\x76\x6f\xad\x68\x57\x54\x24\xa5\x7d\x1e\xb6\xe5\x64\x91\xc3\x86\x08\x13\x52\xce\x0f\x71\x0c\x51\xf7\x33\xa9\x77\xa2\xb8\xc8\x45\x3b\xcc\x23\xa8\x86\x6a\x56\x8a\x3e\x50\x8d\x4c\xa4\x59\x59\xb7\x94\x24\xca\x98\xde\x93\xac\xac\x64\xeb\xd0\x26\x60\x71\x56\xfa\xc6\x43\x04\xc0\xe5\x32\x2c\x25\xaf\xb4\x2c\xca\x23\x05\xbf\x4f\xf1\x58\x16\xa3\xd0\xcd\xb3\x6e\x08\x37\xd9\xf2\xf9\x07\x41\x05\x8e\xca\x6f\xac\x21\x2e\x0b\xc4\xd3\xd6\x20\x09\xbd\xbc\x9b\x15\x70\x74\xba\x42\xae\xb6\xd2\xbc\x63\x07\x2d\x25\x9a\x49\x11\x46\x73\xd5\x16\x29\xc0\xe3\xd8\xe2\x7a\x1a\x55\xa4\x68\x03\xc0\xe2\xf8\xf5\x83\x5a\xa0\x9b\x2b\x57\x7b\x65\xae\xc3\x35\x48\xaa\x6a\x25\x0b\xbd\x21\x80\xd9\x0b\xa4\x68\x9b\xaa\x0d\x0b\x03\x2a\xd7\x44\xa7\x10\xc9\x86\xd0\x1a\xcb\x06\x9f\x6f\x0b\x34\xeb\x52\x00\x5b\xd1\xeb\x08\xb4\x79\x58\xe3\x52\xc3\x88\xac\x46\xed\xd3\x77\xca\xd6\xd1\x6d\x37\x38\x49\xae\x8b\xb0\x25\x50\x70\x82\xbc\xad\x6b\xb1\x49\x73\xee\xda\x47\xfe\x6b\xcd\x89\xf6\xea\xc2\xf5\x65\x10\xf3\xb3\xa2\x94\x6c\x29\x10\x15\xff\xa7\x6a\xc1\x3f\xf7\x9c\x47\xda\x4a\x06\x29\x30\x11\x8d\x10\xe2\xa9\x2c\xcf\x7d\x75\xb2\x89\x93\x91\x94\x27\xe4\x5d\x62\x00\xb2\x1c\x9d\xc7\x31\x25\xa4\xbc\xbf\x4a\x23\xb3\xac\x52\x12\x0e\x4a\x1f\x0d\xbe\x61\xa9\x92\x5f\x28\x8d\x49\xe3\x92\x37\xd2\x89\x53\xf9\xff\xe1\x96\x13\x34\xf4\x36\x8f\x2d\x30\xf9\xd4\xbf\x5b\xb9\xb6\x1d\xe9\x84\x5b\xda\xdc\x23\x09\x36\x01\x3d\xa2\xab\x58\x9c\x4a\x02\x10\x6f\x54\x42\xba\x4f\x31\xd9\x80\xff\x52\x55\xc9\x58\x27\xd6\x50\x95\xd8\xb1\x3d\x55\xf3\x47\x27\xd8\x7d\x57\x1c\x19\xfe\x4d\xcb\xe7\xce\x2e\x03\x88\xe0\x52\x9e\x75\x0b\xff\x3d\x27\x82\x87\x2a\xcf\x50\xd0\x96\x0f\xeb\xf8\xd4\xc2\xa2\x59\xa6\x04\xd1\xc8\xfd\x9f\xcc\xc3\x56\xc9\x67\xd1\x74\x34\x37\x31\xf1\xda\x22\x4b\xeb\x08\x51\x08\xa6\xa3\xb1\x4a\x52\x9b\x97\xb2\x0d\xf6\x2c\x93\xd9\x4f\xb6\x26\xf2\x32\x16\x85\xff\x6a\xf6\xf0\x76\x06\x8a\xab\x1b\xe0\xb9\xfc\x9e\x2c\x4b\x58\x97\xed\x0f\xd3\xfe\xb9\x96\xdf\xb6\x09\xee\xab\xd8\x2b\xd8\x83\x4f\x4d\xbb\x6c\x83\xb1\xae\x19\xdf\xda\x13\x4f\x65\x63\xb6\xfb\xfe\xef\x1d\xd8\x96\xf9\xfc\xb9\x03\xc1\x4f\x1c\x18\xfe\x8e\x03\x34\xa1\x52\x78\x61\xd0\x6f\x99\xf1\x56\x2f\x07\x56\x5f\x0b\xdb\x31\x68\x5c\xd1\x04\x4f\xb7\xae\xee\x19\x72\xff\xca\x16\xdc\x23\x8f\x0d\x4c\x99\xa0\x1c\x52\x41\x72\x9a\x51\xc0\x6a\x4a\x32\x98\x63\x6c\x6a\x8c\xaf\x20\x67\xbc\x32\xa7\x5d\x11\xe0\x70\x90\x89\x0f\x95\x8b\xc8\xca\x09\xc0\xfd\x44\x20\x2a\xf9\xb4\x19\x26\x61\xde\x60\x5c\xfe\x37\x0f\x95\x50\xb3\x86\x62\x94\x31\x58\xb6\x73\xd1\x4d\x20\x95\xb7\xe1\x95\xb4\x51\x4e\x0f\x04\x2b\xb2\x1d\x2f\x32\xc4\x47\x83\x99\xca\x5a\xd8\xb7\x51\x54\xbe\x8e\x53\xf3\xe3\xbc\xd1\x68\x8c\x35\xd8\xf4\x18\x5f\x29\x44\xb9\x32\x87\xb5\x2c\x0b\xa2\xf6\xd8\x21\x93\x33\x42\x67\x80\xd1\xd8\x6e\x88\xab\xaf\x4d\x99\x96\x6c\xc9\x4f\x85\xcd\x35\xac\x6f\xc3\x81\xbe\x08\x75\xe3\x1a\x45\x14\x2b\x70\x56\xb9\x13\x76\x71\x91\xd5\x0c\x03\x5f\x4c\xc3\xc4\xb5\x1e\xfc\xc8\x6c\x0d\xf5\x55\x83\xcd\x8c\xf1\x95\x4e\xd8\x55\x63\x43\xb9\x8f\x16\x18\x86\xb8\x0e\x50\x87\x64\xa2\x43\x3f\xa2\x38\xe7\x02\xdd\x98\x0a\x95\xf1\x26\xcd\xd2\x7a\x9c\x46\x62\x8b\xaf\x8b\x3e\x56\xa5\xdc\x5b\x28\xd2\x11\x58\xca\x4e\x83\x9f\xcb\x29\xf5\x4b\x57\x47\x25\x02\x3e\x0b\x04\x19\xe1\x5d\xde\x09\xbb\xbc\x0c\xdb\xb4\x66\xc3\x76\x20\x29\x0d\x91\xdd\x51\x5d\x51\x7a\x0d\x82\x5e\x65\xb6\x3d\x01\xc7\x8b\xbc\x89\x9c\x9d\xc2\x61\x96\x9f\xcf\x6e\xc0\x80\x57\xc8\xfa\xe8\xdf\xe8\x5c\x82\xef\xa9\xb1\x83\x64\x23\xf2\xdf\x5e\xdb\x3e\x93\xea\x15\x9c\xf7\xe0\xb3\x5e\x64\xdc\x3d\xc2\x8a\x29\x4a\x76\xb6\x97\xaa\x8c\x5c\xb4\x15\x50\xa8\xd0\xc6\x12\x85\x8b\xc7\xe5\x95\xac\xc5\x3d\x13\x37\xee\x82\x78\x58\x1f\xb9\x92\xa1\xf5\x42\x5f\xb0\x80\x9f\xa1\x16\xe9\x62\xba\x9e\x66\x9b\xe9\x69\x14\x69\xf0\x92\xc5\x3c\x77\x2c\x62\xfb\x44\xda\xeb\xf8\x0f\xdb\x64\xf8\x1c\x3b\x73\x4d\x67\x8a\xbd\x90\x1d\x24\x75\x86\x7f\xab\x7f\x20\xca\x7a\xab\x09\xc6\x1f\xa7\x80\x49\xc5\x5e\xc4\x0e\x75\xe2\x14\xd7\x6f\x68\x1e\xe5\xaf\x67\x87\x94\x2c\xed\x3f\x6f\x07\x16\xfd\x31\x76\x4b\x37\x2c\xa5\x54\xb5\xf4\xd5\xa0\xf5\x9f\x1f\x61\x2f\x74\x0e\xad\xe8\xac\x8a\x28\x12\x91\x06\x12\xf0\x7f\x6c\x24\xf8\xc1\x91\x9d\xcb\x58\xf0\x12\xe4\xfa\xa7\xad\x9b\x61\xca\x55\x71\x9b\xb9\xa4\xe4\xe5\x84\xea\x4e\x31\xc9\x17\xfa\x5d\x71\x46\x6e\x2c\x49\xeb\x0d\xde\x3b\x9e\x52\x38\x6b\x8a\x6a\x29\xb8\xe3\xc5\x2a\x8c\xa7\xba\x6e\x8c\xdc\x08\xf0\x02\xd4\x01\xf5\xd9\x7a\x9c\x46\xe3\xdc\x00\x31\xe3\xe5\xa2\x72\x54\x48\x42\x04\x8a\x5d\xbc\x35\x6c\xf7\xaa\x86\x4b\xe1\xd4\x76\xad\xf7\x70\xbf\xd6\x49\x06\x77\xfb\xb4\x2a\x78\x99\xf7\x28\x4e\x42\x4b\x8d\xa8\x01\x84\xb7\xc4\xae\x67\x1a\x97\x14\x71\xa4\x8c\x9c\x3a\xda\x83\xa8\x8b\x6a\xb7\xc7\x75\x9f\xc7\x5c\x46\xfa\xad\x23\x95\x80\xcd\x38\x2d\xeb\x59\x5e\xc7\x3d\xe7\xff\x81\x94\xc7\xb7\x7f\xef\xc4\x57\x84\x65\x05\xb8\x89\x54\x77\x61\xaa\x80\x92\x48\x45\x82\xe0\x0d\xb6\xe7\x1f\x0e\x3a\x54\x48\x9b\xca\x2f\x4b\xcd\x0c\xd8\x02\x81\x71\x28\x78\x73\x2d\x4e\x22\xb4\x40\xf6\xcf\xb5\x80\x5c\x8b\xbc\x13\x97\x25\x4a\xdf\x86\x7b\x1c\x0a\x6f\x48\x07\x88\x98\x00\xa8\x02\xee\x97\x3a\xd4\x3f\xa7\x3a\x6a\x3f\xa2\xd3\x37\x3d\x26\xbb\xa3\x8b\x9b\x4f\x87\x7e\x3c\xf0\x39\x3c\x69\x34\x1a\x08\x10\x95\xe5\x90\x74\xdb\x59\x09\xce\x6e\x86\xb4\x58\x92\x07\x3f\x43\x04\xc5\xa5\xf7\xa9\xf1\x09\x3b\x99\x35\xaf\x25\x1f\x7b\xca\x54\xdb\xd3\x4e\xc0\xec\x16\x56\x8b\x23\xdf\xa1\x41\x9c\x1d\xee\x84\x5b\x0f\x40\x52\xf0\xe1\xd4\xec\x76\x76\x53\x27\xb6\x29\xd4\xd0\x52\xef\xdb\xc7\x9e\x3d\x94\x6d\xf0\xbf\x65\x5f\xf0\x77\x23\xc3\x39\x0a\x87\x6d\xa4\x23\x70\x0d\xf8\x46\x27\xba\x87\xaa\x75\x58\xc8\xe9\x6d\x58\xc8\x76\x1e\xa6\xbd\x24\xcc\x5d\x16\x05\x78\x12\x80\xae\x69\x82\x9d\x06\x7e\x8f\xae\x8b\x7e\x9d\xb4\x9a\x61\x9c\x17\xe8\xdb\x06\xa9\xfd\x89\x10\x20\x52\x24\xaa\xe4\x44\x5a\x62\x8d\x60\x67\x80\x52\x68\x5b\x1d\x2d\xd7\x44\x1f\x24\x65\x78\x08\x29\xf9\xd2\xb8\xdb\x4b\x94\xaa\xb5\x10\xdd\x10\x14\x0a\x61\xb3\xcc\xf2\xc2\xa0\x87\x61\x7d\x36\x0c\x02\x09\xd6\xbd\x5c\x23\xbd\xca\x9e\x22\x4f\x79\x2d\x99\x63\x18\xff\x1e\x79\x63\x7b\xab\x3d\x87\x1d\xbc\x1d\xc5\x3b\x77\x0b\xbe\x82\x1d\x24\x9f\x26\xff\x1a\xdf\xe4\xce\x09\x44\xf8\xc2\xca\x09\xfc\xc7\x8c\x29\x0e\xfa\x5c\x6b\xf8\x75\xff\x1a\x36\x92\x66\xa5\xff\xd4\xc9\x7a\xb7\xb2\x7d\xb7\xe7\xa2\xe5\x4e\x8a\x64\x42\xa8\xc7\x43\x7b\xf5\x5c\x76\x28\xed\x25\xe0\xb0\x5e\x19\x52\x97\xed\xcf\x52\xf1\x74\x0a\xab\x1f\xda\xb7\x23\x46\xc0\xdb\xf6\x05\x7f\x36\xb2\xfd\xfb\x61\xc2\xa3\x16\x63\xb6\x13\x2a\xa4\x1c\x01\xc7\xc3\x75\xa7\x84\xfa\x30\x0c\x01\x15\x23\x28\x28\xa8\xfc\x0f\x61\x57\x05\xa8\x94\x61\x9b\x9f\xb9\xb8\x7c\x61\x1b\x81\x13\xee\x3e\x6d\xa7\x0b\x86\xf7\x22\xb0\x88\x90\xf2\x96\xee\x84\xdd\xa0\xc1\xe7\x93\x22\x1b\xb7\x6d\x57\xe6\x3a\xd7\x7a\x52\x0d\x42\x33\x20\x8e\x01\xe9\x8a\x54\x1c\x10\xe9\x9f\xe1\x92\x44\x99\x51\xf2\xd2\xa3\x69\xc6\x53\x51\x28\xa6\x5c\xeb\xd3\xc7\x60\x80\x60\x69\x21\x72\x6d\xfb\x23\x80\x95\x12\xef\xf0\x55\x61\xec\x3d\x59\xae\x64\x2b\x47\x2a\x18\x47\x0f\x24\x32\x6a\x66\x85\x53\x2b\x99\x9d\x01\x84\x52\x91\x1b\xa4\x2b\x92\x2f\xde\xc9\xad\xb0\xcb\xf6\xc3\xbd\xfb\xf4\xed\xcf\x35\x95\x38\xe2\x35\xec\x91\xa7\x4a\x4f\x34\x2f\x87\x01\xa2\x41\xb8\xb5\x83\x68\x20\xaf\xdb\x38\xdd\xe9\xba\xbd\x95\x7c\xda\x9c\x39\x93\x77\x70\xb8\xb5\xdb\x1d\xfc\x1c\x76\x90\xd8\x21\xf7\xeb\x67\xb1\xfd\x65\x5c\x26\x95\x3a\x5f\xc7\x9e\x69\x94\x69\xd8\xe5\xa7\x5a\x8f\xb6\xc9\x6e\xb0\x6a\x7f\x1a\x45\x9d\xb7\x78\xec\x46\x75\x13\x37\xe5\xfc\xf5\x9e\xf0\xda\x4c\xb0\xe6\x53\x35\xdc\x65\x98\x63\xd8\x14\x76\x3f\x9e\x27\x27\x40\x32\x35\xe0\x9a\xe2\xae\xc7\xc7\x6f\x61\x07\x30\x79\x93\xff\xa1\x5b\x82\xf7\xdc\x82\x7f\x93\x1c\x75\xae\x2b\x52\xc0\x50\x87\xec\xd0\xf2\xb9\x62\xba\x49\x5c\xa6\xc7\xc4\x76\xa0\xf1\x8a\x10\xf0\x34\xaf\x6c\x17\xd1\x42\xce\x1c\x63\x75\xbe\x5a\x64\x29\x76\x31\x8e\xe6\xa4\x88\x5f\x64\x9a\x21\x5b\x3c\x39\xce\xe3\x86\x68\xf0\x90\x4f\xcf\x9a\xec\xc6\x05\x5f\x13\x5b\xca\x2d\xa6\xce\x7b\x79\x3c\x27\x3b\x4a\xc9\xbd\xbb\x61\x4e\x08\x39\xf7\x66\x49\x98\xb6\x79\x2a\xca\x89\x5e\x9e\x34\x96\xe4\x0b\xca\x43\x2c\xcb\xd6\xb9\xe8\x84\x71\x32\x87\xb2\x42\x18\x27\xda\x95\x6a\xbb\x5a\x64\x21\xac\x86\x2c\x56\xbc\xae\xc3\x10\xe6\xb4\x5f\x4e\x25\x53\x0e\xc2\x34\x71\xf4\x59\x16\x25\x1a\x07\x52\xf0\x46\x19\x4c\x70\x3e\x35\x39\x33\x3b\xce\x0b\x4a\x14\x3a\xd3\x98\xe2\x0f\x9d\x3f\xbd\x20\x9f\x3e\xd2\xe0\x75\x1e\x77\x37\x66\xa1\xbf\x8b\x4b\x1b\xb3\xe0\x00\x34\xbc\xa7\xd8\xc9\xc5\x25\xfc\xe4\x98\xfa\xe4\xd8\xde\x3e\x69\xc6\x51\x2e\x87\xb3\xb0\x78\xf2\xfc\xce\xc5\xa1\x44\x9d\x77\xc2\xa6\x2c\x7f\x66\x7e\x61\xb7\x29\xc4\xcf\x64\xc1\x3a\xef\xf5\x60\xc5\x53\x7e\xf1\xe2\xe2\x49\x6d\x6b\xca\x36\x0b\xde\xeb\x76\x29\xda\xc3\x9a\x1e\x0c\xfc\x6d\x8b\x2d\x3e\x7a\x22\x1e\x7b\xf5\x43\x93\xf5\xe3\x61\xbd\xf5\xc8\x1b\xee\x7a\xac\x7e\x42\xff\x98\xdd\xdb\x8f\xa9\xe9\xc7\x6e\xa7\x1e\xcc\xe8\x2e\xcc\x5c\xc3\x3e\xcc\xe8\x5f\x33\x7b\xe9\xc4\xac\xee\xc4\xec\x35\xec\xc4\xac\xdb\x89\xbb\x8e\x87\xab\x8f\x0c\xef\x97\xd5\x95\xa3\xba\x2b\x47\xaf\x61\x57\x8e\x5e\x6e\x57\xe2\x62\x35\xc5\x6d\xbb\x7c\xcf\xd9\xa9\x49\xc9\x28\xc0\x5f\x33\x1c\xb9\x50\x75\xfc\x41\x58\x08\x26\x67\xa6\xa7\xee\x3c\x3a\x35\x39\x3b\x83\xd0\x71\xc7\xef\xbc\xab\xae\x9f\x4d\x05\x54\xdf\xd4\xa4\x5d\xe3\x6e\xf5\xd0\x37\x33\xe6\x9b\xe1\x6d\x0f\xb6\xd5\xcc\x45\x14\x97\xcd\x30\x07\x82\x86\xbf\x38\xa4\xf1\xa6\xef\x87\xce\xe0\xab\x47\x4f\xcc\xc1\x8a\xc1\x1c\x8c\x9e\x98\xc3\xbf\x67\x1e\x1b\x3b\xf1\xe8\xd1\x87\xa6\xea\x47\x1f\xa1\x97\xb3\x8f\x3d\x7a\x6c\xf4\xc4\xdc\xe4\xd4\xd4\xa3\x30\xad\xf8\x7c\x4c\x7f\xfa\xe8\xcc\x43\xb3\x77\xaa\xc2\x33\x8f\x3d\x3a\x23\x0b\x3f\x34\x59\x3f\xfa\xc8\xa3\x0f\x1d\xbb\xcb\x2d\x3d\xf5\xd8\xa3\xa3\x27\xe6\xa6\xa7\x66\xa6\x1e\x9d\xba\x6b\x72\xf2\xd1\x99\xa3\x0f\x47\xb2\xc9\x87\x23\xf9\x6e\xec\x76\x85\xda\xd7\x07\x1c\xb3\x28\x6e\xcb\x91\x18\x3a\xdc\x01\x08\xbe\x38\xe5\x75\x5e\x14\x72\xbd\xf8\xc5\xc6\x72\x83\x17\x19\xc4\x82\x16\xa2\xd9\xcb\xe3\xb2\xaf\x86\x6d\xae\x03\x6b\xd4\xd0\xde\x43\x75\xfe\xc8\x89\x87\xa3\x37\x4c\xeb\xbf\x66\x61\x17\xac\x89\xad\x66\x96\x64\x39\xac\xc1\x9a\xd8\x0a\x23\xd1\x8c\x3b\xe0\x42\x9e\x64\x39\x6f\x66\x11\x29\xef\x83\xdb\x4e\xc3\x7f\x73\xc3\x1b\xb9\xed\xc4\x28\x6d\xb0\xf9\xfa\x69\x39\xa7\x8f\xda\x3f\x8f\xc9\x81\xd6\x79\xde\x5e\x35\x8d\x9d\xbf\xf7\x9e\x81\x46\xf2\xf6\x2a\xb5\x96\xb7\x57\x47\xa7\x8f\x1e\x1d\xa7\xff\x1d\x97\xab\xbe\xda\x2f\xc5\x1c\xc4\x34\x1c\x9b\xe5\x22\x95\x9f\x45\x7c\x35\x4e\xc3\xbc\xcf\x41\xc1\x57\xe7\xdd\xb0\x28\x36\x33\xd8\x16\x69\x1f\xf4\x6a\x10\x73\xa4\x2e\x32\x79\x29\xca\x39\x94\xff\xba\x7b\x6c\x7a\x72\xf2\x58\x7d\x72\xaa\x3e\x39\x1d\x54\x2e\x8d\x56\x2f\x49\xea\xf0\x41\x9c\xca\x1b\x64\x66\x66\xe6\xb8\xac\x8a\xbc\x7a\xa1\x3a\x85\xbe\xe0\x56\x39\xcd\xd3\x22\x18\x4a\xa7\x41\x45\x0a\x84\xfa\xa4\xfa\x14\xe6\xa1\xd3\x0d\x4b\xd0\x5c\xc0\xa6\x58\x96\x12\x83\xa9\x9c\x78\x03\x1c\x85\xac\x41\x8f\x04\x42\xb1\x2b\xc3\x99\x9a\xad\x4f\x4d\xd7\xa7\x8e\x5e\x98\x3a\x3e\x37\x33\x39\x37\x3d\xd9\x98\x9c\x9c\x7c\x55\x75\x70\xf2\xf3\x3a\x7c\x6e\x06\xe7\xc8\xf5\xef\x1a\x61\xb7\xef\x45\x35\xea\xff\x59\x2d\xf8\x62\x6d\x4f\x4a\xd4\xa2\xcc\xba\x28\xc0\x99\xdc\x95\x3c\x12\xcd\x0c\x40\x1b\x8a\x52\x74\x51\x93\xd2\xcd\x7b\xa9\x06\xed\x55\x4e\x0d\x2a\xf0\xa9\x12\xca\xa7\x79\x1e\x58\x05\x34\x33\xa3\x32\x25\x84\x6c\xf3\xda\x8b\x2a\x97\x27\xa6\x80\xe4\xf3\x84\x6f\xb3\x19\x97\xcd\x35\x51\x40\x64\x36\xc6\x07\xe5\x1d\x80\x7c\xc3\xd6\x8d\xe2\x25\x6e\x81\x54\x26\x22\x5b\x54\x22\xb4\x89\xaa\xd5\xda\x4d\xc1\xa4\x73\xf1\xd8\x1d\x03\xbd\x90\x96\xd7\xca\xbc\x07\x78\x2b\xda\x50\xd3\xe0\xa7\x01\xb2\x15\x3d\x61\x56\xe3\x28\x12\xa9\xab\x0c\xfe\xfe\x5b\xd9\x98\x9d\x49\xab\xdb\x05\x76\xf5\x7c\x96\x24\x71\xda\xbe\x08\x4a\x9a\x93\xa1\xe8\x64\xe9\xb2\x28\xfd\x3f\xf4\x83\x7b\x00\x2e\xb5\xcc\x94\x67\x0b\x29\x98\x0a\xf4\x0b\x24\xcf\x0d\x79\x5e\x22\xf8\x0a\xe4\xde\x1c\x6b\x23\x9d\x8f\x6b\xb4\xff\x3e\x9f\xfd\x3a\x03\xa1\x68\xb9\x97\xb7\x85\xff\x0b\x6c\xaf\x09\x60\x7b\x65\x9c\x34\xe2\xb4\x2c\xca\xbc\xb1\x98\x96\x8a\xa5\x0e\xbe\x95\xa1\xdd\x0e\xfd\x4d\x53\x9d\x26\x04\xa3\xfb\x14\xbe\xa9\xce\x49\x6f\xc2\xfa\xf5\x38\x31\xf9\x3c\x78\xe9\x4a\x82\x46\x40\x7e\x0a\x2f\xd2\x2d\x16\x61\xa8\x1e\xfd\xa3\x8b\xa9\x24\x02\xe4\x91\x18\xa6\x3c\x5c\x2d\xb2\x44\x4a\xf3\xd4\xa3\x51\xb1\x35\xc7\x8f\x8e\xa1\x82\xdc\x4d\xf5\xa9\xa6\x13\xf3\x61\xca\x72\x53\x93\x5f\x37\x66\x2d\xf9\xff\xcb\xde\xbb\xc0\x49\x92\x55\x75\xc2\xbf\xc8\xaa\xae\xae\xbe\xcd\x43\x42\x17\x56\x5d\x97\x6b\x0c\xd8\x5d\x43\x65\x56\x3f\x66\x7a\x66\x6a\x86\x81\xea\xaa\x9e\xe9\x62\xba\x7b\xca\xaa\x9a\x19\x61\x76\xa4\x23\x33\x6f\x66\xc5\x54\x64\x44\x4e\x44\x64\x55\x27\x8f\x95\x97\x08\x8a\xf2\x90\x87\x16\x1f\x30\x80\x22\xa2\x08\x0a\x82\xac\x0a\x32\x80\xa8\x88\x08\xac\x0f\x44\x45\x10\xd6\xe7\xee\xba\x8a\x7e\xee\xe0\xee\xef\xfb\xdd\x73\xce\x7d\x45\x66\xbd\x7b\x18\xf4\x6b\x14\xba\x32\x1e\x37\xee\xf3\xdc\x73\xcf\xe3\xff\xa7\x70\xfc\x63\x72\x3a\x9d\x0f\x2f\xdd\x91\x58\xb0\x2a\x39\x3f\x56\xe3\x33\xa5\x8f\xc1\x7b\x71\x83\xac\x86\xb8\x32\xcc\x27\xeb\x7d\x9e\xa5\xbd\xa4\x89\x63\x84\x59\x71\x64\x5b\x92\xd5\x39\x6e\x18\x3f\xb4\xa1\xff\x58\x8d\x9f\x41\x93\xdc\xb4\x82\x67\x45\xbb\x3e\xd9\x39\x4e\x1e\x7b\xe2\xa4\x46\x63\x38\x79\xec\x89\xca\x4c\x51\xa4\x45\x18\x0f\x0c\x0a\x42\x88\x9a\x74\x3f\xe2\xb3\x80\xa9\x85\xd3\x48\x76\xf5\x51\x38\x64\x10\xaa\x34\xf5\x11\xb2\xc0\xc8\x93\x55\xb3\x17\x8b\xe6\x84\x19\x31\xf4\x0d\x87\x3c\x11\xeb\xf0\xb6\xca\xe5\x25\xda\x1e\xf0\xa9\x00\xf0\x37\x04\x8a\x75\xc2\x6c\x15\x8d\x42\xc8\x2a\x45\x47\x23\x1c\x4c\x8c\x9e\xc9\x65\x3f\xc5\x61\x2f\x41\x84\x48\x2a\x17\x82\x35\xa9\x81\xd0\x18\x02\x29\xb5\x26\x0c\x7d\xc2\x8c\xd1\x51\x08\xa7\xc3\x23\x47\xc1\x63\x11\xe6\x85\xec\x6f\xb8\xba\xa4\xa9\x57\xa8\x82\xee\x74\x4b\xc9\xf3\x0d\xe9\x49\xa6\xde\xba\xd2\x14\x9c\xad\x5a\x56\x17\x8d\xb4\x03\x09\x1a\xe6\xeb\x0a\x90\x36\x13\xa1\x3c\xc4\x51\x65\x8a\x2c\x4c\xf2\x88\x88\xf0\x08\xea\x19\xd2\x52\xc4\x1a\xc4\x47\x42\xec\x2a\xe4\x34\x86\x52\xb2\x4c\x0c\x69\xa0\x26\xbd\x68\xc6\x7d\xdd\xdd\x4e\x85\x15\xf2\xbb\x0a\x4d\x43\x5a\xa7\xac\x2d\x30\x34\x3d\xaf\xf1\x19\xcd\x4a\x05\x97\xa3\x0e\x65\x22\x00\x3f\x47\x9e\x47\xf5\x28\x06\xbe\x0c\x4d\xde\x66\x82\xc9\x30\x52\x4d\xab\x6b\x38\x71\x20\x5d\x0c\xa3\x2b\x2c\xd0\x2a\x39\x49\xd0\x62\xaa\xdc\x6b\x88\x40\x26\x8f\x44\x88\x15\xd6\x0a\xa3\x98\x58\xfa\xf2\xd4\xe0\xde\x47\x09\x1c\xea\xd7\xac\xe2\x35\xca\x46\x11\xae\xc2\x03\x90\x28\x0c\xd8\xde\xaa\x96\x7d\x0b\x2c\x0d\xba\x13\x7a\x59\x49\x8e\x28\xcf\x7a\x5d\xc4\x02\xde\x15\xef\x87\x9e\x18\x28\xa5\x41\x7c\xba\xae\x74\xf6\x81\x83\xec\x51\x1d\x47\x3e\xf8\x6f\x3b\xb8\x5f\xf9\xfa\xe5\xb1\xe1\xf2\xd5\x99\xa8\xb9\x91\x9f\x75\xe1\x4c\xbf\xa6\xca\x6d\x16\x97\x43\x54\x96\xe5\x49\xa9\x12\x0a\x6b\xc6\x8e\x7e\xa3\x35\x6d\xc9\xd5\xbd\x4b\xcb\x66\xba\x9e\x0c\xc8\x4b\xc8\x78\x55\xb2\xba\x24\xaa\x71\x94\xa4\x00\x1d\x94\xa9\xc7\xff\x95\xc8\x54\xe8\x59\xa9\x80\x75\xc9\x5c\xad\x45\x01\xa7\x60\x60\x5c\x6a\xa0\xa0\x0e\x17\xa3\xf0\x36\x6c\x98\xe5\xe6\xa4\xb9\x18\x18\x44\xcc\x0b\x4d\x78\x5d\xce\x1c\x79\xc6\x05\xe1\x5b\x7a\x0a\xd5\x23\x59\xbd\x38\x6c\x08\x92\xc2\xb2\xb5\x5a\x50\x83\x87\x4e\xcd\x43\x48\xf4\x81\x52\x01\x97\x57\x80\x20\x2f\x52\xf2\xba\xb9\x65\x4f\xf2\x62\xa5\x97\x97\xf2\xf2\xb5\xe4\xbe\x0e\xab\x9e\x66\x51\x3b\x4a\xb6\x9c\x8c\xf6\xf7\x39\xc5\xec\x22\x3d\xd9\xe0\xa2\x60\x5f\x3e\x30\x94\x73\xda\x61\x00\xf3\x3f\x74\x20\x78\xb2\x73\xc5\xe6\x2c\xd4\xb4\x58\x1a\xde\x48\x3e\xca\x6f\x15\x85\xe6\x0d\xae\x6d\x78\xa3\xdd\x34\x2b\x1c\x95\xec\xaf\x47\xd9\x2b\x3d\x76\x78\xa5\x28\xba\x67\x45\xd8\x14\x59\xee\x3f\xdf\x0b\x66\x31\x7c\x87\xaf\xe0\x25\x64\xed\x29\x94\x56\x4a\x05\xd6\xf0\x13\x64\x8c\xc8\x44\x17\x37\x01\x7a\xc7\x31\xfa\x4f\xb1\xea\x0e\xa8\x0c\x65\x71\x58\x09\x76\x1d\x1b\xed\x86\xc5\x8a\x3f\x15\x04\x40\x05\x82\x82\x16\x30\x82\xb1\x0e\xf0\x69\xb2\xc4\xda\xc7\x8f\x8f\x7b\x0c\x1a\xe9\x7f\xd0\xdb\xaf\xec\x7b\x2e\x64\x15\xa6\x99\x35\xc8\xb8\x35\x21\x4c\xa2\x5b\x1f\x4d\xfc\x56\xe3\x44\x56\xa7\x1c\x39\xaa\xd3\x20\x12\xf9\xb8\x7c\xf3\xd4\xb5\xd7\x9e\xbc\xb6\xc6\xa1\x78\xed\xee\x49\xf8\xfc\xcc\x85\x99\x67\x2e\xdd\x39\xfb\xcc\x0b\x33\xe7\xcf\xd4\xd8\x22\x1b\x03\xf5\x5f\xf8\x67\x83\x1b\xc1\x9c\x2c\x28\x20\x19\xd6\xa1\xca\xfa\xc3\xd4\xf0\x82\x28\x23\xdc\x64\x76\xd9\x49\x35\x37\xc6\x68\x54\x3e\xe5\xf7\x82\x95\xb3\xca\xe6\x49\xfa\x7d\x82\xee\xf6\x49\xe5\xf5\xd1\x29\xc6\x52\x72\xcc\x2f\xd4\xf8\xd3\xd3\x1e\x70\x99\x85\xf5\xb8\xcf\xd7\xc3\xa4\x50\xb3\x22\x90\x45\x05\xb2\x9d\xd6\x2c\xe2\x51\x92\x17\x22\x74\x7d\xbe\x5f\xf3\x86\x72\x35\x5c\x48\x9b\xca\x7a\xeb\xff\xa1\x17\x5c\x6f\xfd\x36\xe0\x20\x51\x82\xc7\x59\x65\xbf\x85\xd5\x9e\x36\xc5\x91\x5c\xa7\x5c\x6e\x78\xe0\x2c\xd9\xf0\x0e\xd2\x15\x67\xa6\x7f\x1f\x9b\x60\xea\x86\xff\x1f\x83\xc7\x2c\x53\x01\x16\x75\xb1\xa9\xe9\x22\xf9\x5d\x9e\x16\x3c\xf9\x82\xf5\x0c\x38\xe3\x26\x55\xec\x89\xa2\x43\x9e\xd4\xd9\x30\x08\x8c\xa0\xb0\x30\xe6\xdd\xce\x7f\xdb\xe3\xd9\xe3\xad\xd6\x8b\x35\xa0\x04\x57\x41\xed\x67\xe4\x4f\xff\x05\x8f\x0f\x3e\x38\x02\x7f\x62\x74\x5d\x86\x70\xaf\x98\x91\x0f\x6f\xf0\x3c\xed\x88\x75\x4c\xa6\x72\xc0\x4b\x20\x06\x0a\x43\xf5\x81\xc8\x80\x10\x6e\xe5\xe3\x20\xf3\x55\x46\xaf\x3e\x5b\x2a\x4a\x91\x35\x3b\x9e\x52\xa5\x0d\x66\xa2\x40\x42\x6c\x82\x6d\x93\x62\x39\x8b\xda\x6d\x39\xb4\x18\x23\x95\xe7\x61\x5b\x20\x66\x93\x00\x78\x20\xf2\xd3\xc2\x46\x80\xa5\x2a\x3d\xcd\xd0\xd0\xdb\xb0\x50\x05\x92\xd2\x41\x30\x8f\xd5\x3e\x22\x34\xc0\x6d\x65\x11\xf5\xd6\x4c\xb4\x62\x9a\xec\x21\x2a\x94\xb9\xac\x9e\x0d\xb1\x43\xb5\x9b\x54\xb9\xc3\x72\xe2\x44\x49\x0f\xb2\xd9\xe1\x69\x4c\x40\xc5\x5e\x57\x2e\xe5\xb0\xa0\x2f\xa8\x1a\xe7\xd6\x2e\x6a\x05\x44\xe8\xc9\xb7\x26\x26\x79\x5d\xe4\x45\x55\xb4\x5a\x90\x18\x9b\xf7\xba\x0a\x59\x2f\x06\x9b\x52\x6d\xc3\x3b\x04\x1f\x59\x8e\x88\xa8\x44\x4d\xc0\x5f\xff\x36\xf6\x69\x8f\x1d\xcc\x04\xa8\x1a\xfe\xc7\x76\xc2\x89\x88\xb1\x6f\x8b\x0a\xfb\x29\x78\x85\x47\xaf\xab\x58\x7f\x4d\xdd\x61\xd0\x2a\x55\xc0\x26\x45\x21\x69\xda\x22\xa2\xbe\xae\xf1\x33\xb5\x76\x0d\x95\x0e\x4c\xc6\x81\x11\xa0\x78\x1b\x3d\xc6\x56\xe2\x78\x86\x47\x0e\x4a\x22\x57\x15\x50\xd1\xae\x6f\xf3\xd8\x18\x16\xed\xbf\xde\x0b\x7e\xd8\x53\xf4\x8c\x39\x5f\x87\xad\x13\x7f\xae\x87\x39\x28\xcd\xc9\x14\xd1\xbd\x9a\x4f\x93\x9c\x29\xd7\x45\xc5\xf4\x91\xb0\x86\xa4\x20\xb9\x99\x0e\xf0\x4d\xe0\xcc\x55\x60\x80\x27\xae\xb7\xac\xa1\xce\xfa\xfb\xb0\xc7\xfe\xbd\x81\x74\xbb\x25\xca\x72\x18\xa3\xbc\x08\x3b\x5d\xff\x27\x3c\x76\xdd\x1e\xb8\xea\x65\x01\x41\xb4\x59\xa9\x9a\xdb\xd1\x00\xc9\x91\x92\x9f\x93\x76\x51\x0f\x1b\xab\xeb\x61\xd6\xd4\xa6\x3c\x3c\xf2\xc0\xf4\xa4\x39\x40\x4b\x09\xf0\xca\xd8\x07\x2a\x6c\x14\x28\xe7\xdf\x5d\x09\xde\x52\xb9\x2d\xc2\x54\x4c\x4d\x57\x85\xea\xa5\xf6\x76\x29\x35\x63\xf1\xcc\xd2\xb2\xcd\xe7\x65\x02\xa1\x4c\x46\x28\x66\x56\x82\x05\x39\x24\x26\x21\xca\xaf\x52\xc9\xd0\x3a\xe1\xbc\xb0\x60\xa2\x7b\x75\x79\x96\xd3\x3a\x06\xd0\xd3\xcc\x6a\x4d\x58\x07\xc5\xcf\x27\x7c\x36\xec\x88\x78\x36\xcc\x87\x23\xc4\x3c\x44\xf4\xfd\xf6\xf8\x3f\x87\x8d\xe1\x31\xd8\xcf\x02\x41\x07\x62\x98\xa5\x78\x86\x2c\x4f\x54\x35\xff\x56\x7a\x9d\x30\xd9\xff\xec\xfb\x80\xc7\xbe\x19\xa5\x79\x94\xb4\x67\x75\xa6\x98\xff\x66\x2f\xf8\x51\x6f\xc8\x8d\x61\x24\xef\x26\xc1\x8c\xf0\xe7\x28\x30\x12\xc6\x09\xa6\x09\x51\xf7\x5c\x74\x13\x0c\xe5\xaf\x58\x14\x17\xcb\x95\xa7\x51\xc2\xa8\x4c\x29\x2f\xa4\xfa\x8c\x72\xd0\xa9\xfb\x9b\x3c\x36\x96\x8b\x2c\x12\xb9\xff\x6a\x8f\x9d\xdc\x54\x6a\x0d\xdb\xd2\x96\xe0\xbd\xe0\x1e\x7c\x1f\xac\x0a\x10\x5a\x5b\x57\xac\x70\x38\xbd\xe9\x36\xb4\x04\xf7\x01\x2b\x59\x59\xd6\x2c\x8a\xe5\xa9\x2a\x2a\x8e\x98\xf0\xb4\x22\x4d\xf0\xed\x1a\xfb\x90\xc7\x1e\x67\x56\xd9\xb9\xd0\x5e\xda\x6f\xdd\xc7\xd2\x5e\xd9\xa4\xd0\xcb\xbf\xb2\xdf\xe6\xb1\x6f\x32\xa5\x11\x93\xd9\x2b\xbd\x1d\x50\xb9\x60\x37\xc3\x0b\x41\x58\x2e\xe2\xf2\x57\xf4\x75\x1e\x33\x1b\x9c\xff\x52\x8f\xdd\xb4\x87\xce\x3d\x1f\x35\xb2\x14\x7a\x78\x5e\x97\xa5\xaa\x6a\x13\xd5\xaa\x79\x0d\xcb\x12\xc1\x5a\xd3\x3a\x66\x6f\xa8\xf5\xa9\x01\x2d\xd8\xaf\x7a\x6c\x54\x2a\x3d\xfe\x7b\xbd\xe0\x6d\x9e\xfc\x0b\x85\xa3\xbb\x82\xb9\x15\x73\xa1\x41\x51\xe0\x4c\xac\xb3\xa3\x89\x77\x4d\xaa\x05\x10\xaa\x1b\xc6\x3c\x86\x20\x1d\xf5\xbc\x2a\xfb\xf8\xea\x69\x74\x0a\xc4\x51\x3d\x0b\x61\x06\x1b\xfd\xa1\x9b\x89\x6e\x98\x29\x82\x98\xa4\xa9\xe3\x4b\xc9\xee\x7a\xea\x9a\xd5\xd3\xce\x32\x7b\xc0\x63\x8f\xd1\x92\x60\x3e\xc9\x8b\x30\x69\x08\xff\x67\xbd\xe0\x8d\xde\xc0\x65\xcd\x9e\x32\x37\x44\x38\x44\xf4\x90\x2d\x0d\x62\x51\x54\x2f\xf5\x9f\xd5\xda\x8d\x10\xc0\x54\xff\x62\x57\x72\xee\xc5\x1e\xa9\xce\xff\x39\xb8\x4f\x45\xdd\x6a\x2c\x17\xd5\xc1\xb8\xc2\x8f\x5e\x00\xcf\xc9\x24\xbf\x2b\xcc\x92\x28\x69\x4f\x4c\xc2\xb7\x11\x86\xa4\xa1\xba\x31\x84\x80\x7d\x52\x5a\x11\x6e\x75\x53\xd5\xc0\xae\xc8\x07\x2b\x8c\x99\x08\x79\xff\x5d\x95\xe0\xcd\x95\x99\x85\x79\x15\xe8\x6f\xf3\x60\x12\x7a\xab\x68\x92\xc3\x45\xd7\xb3\x14\x37\x82\x6a\xaa\x52\x4f\xd4\x7e\x49\x23\x0e\x9b\x50\x56\x40\x32\x71\x3b\x01\x1c\x11\x2c\x4d\x9f\xa4\xa4\xde\x94\x17\x06\x99\x8e\xe2\xea\x54\x30\x72\x26\x60\x4b\xee\x25\x56\x09\x38\x61\x1e\xf2\x1d\x53\x5b\x56\xed\x0e\x7c\x89\xc7\x1e\x6d\xc4\xc6\x6c\xda\x4b\x0a\xbf\xa7\xf2\x4d\x2f\x96\xee\x5c\x76\x39\xe3\xc4\xb2\x7d\xc6\x63\xe3\x2a\xc3\x41\x2a\xce\x3b\x3c\xd9\x3b\x22\xc7\xa4\x91\x04\x2f\xf4\x96\x8a\x50\x2a\xcb\x4a\x8b\x3d\x92\xeb\x04\x8a\x87\xbc\xaf\xd5\x87\xd8\xfb\x2a\xec\x90\x56\x7a\xfd\xb7\x57\xf6\x70\x1c\xf8\x07\xcf\x68\xcd\x0e\x8c\xa6\x52\xe7\x8d\x00\x95\x92\xb0\x0e\xcc\xf8\xf3\x09\x2e\xe2\x06\x20\x81\xe1\x8e\x9a\x50\x96\x0d\xd7\x82\xc6\x91\x27\x0a\x3d\x3c\x27\x89\x42\x94\xbe\x4b\xa2\x98\x1d\xf6\x94\x75\x5f\x59\xf4\xf4\xa2\x97\x7f\x90\xda\x52\x17\x68\x26\x8f\xe0\x94\x00\xa6\x14\x38\xad\xe2\x39\x35\x47\x80\x65\x53\x94\x5a\x78\x0f\x7a\xf7\xb0\xc7\x3b\x8e\xe3\x76\x96\xf6\xba\x55\x5a\xc5\xa0\xf8\xf9\xd3\xfe\xf5\x55\x0e\xd7\xa7\xe9\xcc\x47\x03\xc8\x38\x78\xf9\xa7\xb1\x5b\x18\x57\x8b\x7f\x9a\x93\xda\xc2\xd8\xbb\x2b\xec\xdb\xad\xce\x4f\xd2\x26\x76\xfe\x9a\xc8\x56\x44\xd8\xf4\x5f\x51\x09\x16\xd5\x0f\x2b\x0a\xb7\x84\xae\xa2\xf5\xed\x54\x3d\x1a\xe6\x10\x85\xa1\x41\x66\x95\x99\x36\xe4\xdd\xb4\xe9\x7a\x4a\xdf\xe9\xb1\x77\x78\x6c\xbc\x9b\x36\x6f\x89\x2e\x89\xa6\xff\x06\x2f\xb8\xb0\x40\x3f\x06\xe9\x46\xf1\xe2\xee\x3e\xf7\x0e\xef\x29\xec\xc9\xec\xc6\x1d\x2f\xa4\x9a\xe6\x47\xfd\xee\x5e\x08\x69\x01\x4e\x48\x65\x85\x7d\xe7\xe6\x09\xfb\xf3\x73\x98\xaa\xff\x05\x2f\x38\x45\x7f\x2b\xf4\x97\x1c\xcd\xe9\x53\x9d\xf0\x12\x89\x57\x95\x15\xa4\x53\xf7\xe7\xe7\xf2\x9d\xe7\xd3\x9f\xba\xe6\x32\xe4\xd3\x9f\xba\x66\xaf\xf9\xf4\x6f\x1d\x63\xcf\xb8\x4c\x4c\xeb\x73\x7a\x5c\x66\x15\x85\x85\xff\xd1\x03\xc1\xd3\xb7\x7d\xca\x48\x82\xa6\x28\xc2\x28\x36\x50\x16\x1a\x50\x50\x3f\xaa\x36\x3a\x39\x23\xb4\xf9\x6c\x0c\xd5\x21\xa7\x9f\x7f\x6b\x94\x3d\x83\xd1\x0d\x7f\x21\x98\x25\x8d\xc9\xf4\x90\xd6\x9f\x84\x29\x1e\x8e\x82\x72\x0f\x5f\x86\x04\xb0\x5b\xd0\xcf\x48\x31\xaa\x35\x37\xfc\x18\x95\x86\x28\xf8\x4f\xc3\x95\x06\xa7\xd4\x65\xd0\x10\x60\x08\x9a\x82\x9f\xc9\x8b\x90\x60\x23\x27\xc1\xa8\x9a\xcf\x00\x78\x2f\x65\x96\x69\xb6\xfc\x12\xe7\xd0\x4f\x7a\xcc\x8f\xa5\x4e\xaf\x9d\xa1\xa0\xd0\xfe\xc8\x3e\x4e\x0b\x4b\x83\xe5\x21\x30\x3e\xa8\xb4\x4e\x23\x2c\x1f\xac\x72\x3b\xa5\x89\xee\x48\x43\xac\x54\x63\x82\x1d\x24\x63\x9b\xff\x8c\xe0\x3c\xfd\x39\x54\xb1\xd5\xf7\x08\x57\x25\x69\xeb\x19\x80\xc7\x2d\xac\x8b\xfe\xb0\xd3\x1d\xf7\xea\xd3\xf1\xc5\x60\xc9\x9c\x8e\x43\x62\xe4\x05\xa3\x67\x75\x3d\xcd\x9a\x93\xe6\x24\xaf\xdc\xca\x2d\x63\x73\xc3\xd6\x1d\xc9\xb7\xfc\xd6\xf7\x07\x7b\x5f\x25\x28\x51\x36\x5b\x04\xc0\xf7\xfb\xb6\xef\x0c\x16\xb6\x7a\xc0\xf2\x9b\xac\xa4\xeb\x8a\xf0\x77\x3d\x24\x81\x1a\x65\x96\xad\x24\xe5\x61\xb7\x2b\xc2\x6c\xc3\x3b\x00\xbb\xc9\x86\x77\x00\xe8\x6d\x37\xbc\x03\x79\x23\xed\xba\x16\xbe\x77\x72\xf6\x91\x0a\x7b\x84\x45\x72\x9d\xfb\xbf\x50\x61\xad\xcb\x13\x57\x3e\xac\xe5\x4b\xd6\xa7\x82\x8f\x7b\xf6\x97\x55\x36\x0c\x9a\xe1\x9c\x3b\x94\x27\x06\x31\x5d\x65\xb6\xf7\x95\x10\x01\x5d\x2d\xd6\x4b\x73\x5a\x50\x10\x44\xb9\x5b\x9e\x4a\xf9\xa0\xad\x34\x07\x1a\x0f\x0b\x47\xbf\x2b\x32\xb5\x3b\xbb\x2f\x86\x99\xe0\x9d\x5e\x81\x78\x9a\x3a\x39\xab\x26\xf7\x3e\x86\xfa\x74\x8e\x76\x45\xd6\x7c\xe8\xfa\x70\x56\x7f\x28\x38\x69\x3e\xaa\x0f\x08\xd6\x25\xc2\x1d\x32\x02\x75\x76\x71\xae\xc6\x3e\xe0\x31\x9c\x13\xfe\xbb\x3c\x16\x3d\x74\xf5\x34\x93\x18\x84\x5c\x70\x1a\x79\x96\xd5\x30\x3b\x3a\x87\xec\x75\x08\x66\xc4\x67\xcc\x06\xe0\x0c\x76\x8d\x7d\xfa\x00\xfb\x77\x43\x53\xed\xfd\xf7\x1f\x08\xde\x7e\x60\x78\x16\x7e\x09\x11\xca\x01\x08\xd8\x2e\xea\x4e\xa5\x2e\xd0\x41\xcb\x39\x2d\x13\x2a\x03\x1c\xfd\xbb\xb2\xcb\x73\xe5\xde\xca\x8b\x34\x0b\xdb\xa2\xe6\xa4\x3c\x63\x12\xb4\x49\xd6\x96\x4d\xc6\xe4\x07\x55\xcd\x3c\x6a\x0a\x37\x9b\x3b\x8c\xd7\xc3\xbe\x05\x01\x01\x53\x9d\xe2\x5f\xd4\x69\x4d\xb9\xe4\xd4\x0a\x19\x9c\xdd\xb6\x8f\x0d\xe2\xf0\xa2\x84\xb4\x47\x78\x58\x9d\x74\x72\xf7\xa8\x23\xdf\x53\x87\xa3\x69\x0b\x95\x8c\xf2\xae\x2c\x6c\x03\x2c\xaf\xc6\x17\xb0\x9a\xfa\xf0\xd9\x4b\x9c\x06\x52\x10\xc5\xf6\x91\x8d\xbc\x97\x2b\x53\xf0\xc5\x9d\x44\x5c\x4e\x43\xab\x2e\x9a\x8c\x36\x79\xd8\x15\x3b\x46\x2f\xb3\x3e\x10\x76\xa3\x29\x9c\x74\x1a\x22\x20\x2f\x5f\xa8\xda\x0b\xe6\x2a\x0a\xa3\x04\x79\xd1\xd3\x35\x94\x57\x4a\x51\xa1\x2d\xf0\x4b\xc0\xf6\xe6\x86\x39\xfe\x4e\x85\x31\xd3\x05\xfe\x87\x2a\x0f\xa5\xf0\xb8\x53\x7f\x28\xf8\x7d\xcf\xea\x78\x97\x63\x5e\x0f\x03\x4d\x27\xeb\x41\xc4\x68\xc5\xd0\x51\xa5\xe9\x94\x56\xe9\xa0\x38\x1e\x1c\x61\x22\x36\x97\xb3\x48\x25\xf3\xec\x5c\x24\xd3\xc4\xdf\x4c\x1a\x7f\xaa\xc2\x0e\x2a\x51\xfc\x91\x4a\xf0\x8b\x15\xf5\x62\x64\xa2\x72\xd5\x25\x6a\xc2\x26\xbb\x0b\x46\x88\x94\x2e\x52\xec\x2b\x41\xb2\x00\x82\xc8\x45\x79\x70\xce\xa7\x6e\x82\x2d\xf7\xe6\xa9\x9b\xa8\xf4\x9b\xa7\x6a\xb5\xda\x45\x42\xa3\x37\x6c\x4a\xb6\x89\x1c\x6d\x84\x51\x21\x3a\x6a\x35\x5c\x54\x5d\x70\x91\x52\x12\x5b\xfa\xd2\x45\xe8\x0a\xeb\x01\x59\x11\xc0\xf7\xb2\x48\xa4\x6e\x57\x1e\x36\xeb\xc5\xfc\xa2\x03\xfd\xea\xae\x6c\x79\xa4\xb5\x1f\x1c\xe2\x08\x7f\xc3\x38\x1b\x57\x4f\xf8\xaf\x18\x0f\x5e\xa9\xfa\x57\x6b\xd4\x0a\x9b\x5a\x8e\xa1\xd5\xbf\xf9\xb6\x1d\x3c\xac\xba\xe5\xda\x2e\xef\xba\xd3\x3a\x6e\x8f\x9b\x82\x09\x7d\x6f\x97\x7d\xaa\x8c\x70\xb8\x3b\x21\xa8\x36\x4e\x5e\x29\x31\x7b\x05\x85\x6c\x22\x62\x8f\x42\x75\xa6\x19\xa2\x7b\x42\x21\x21\xa1\xe0\x93\x9d\x64\x41\xf3\x51\x6c\xa4\x9e\xe2\xe8\x1f\x8b\x72\x1e\x48\xe9\x54\x8d\xa3\x55\x11\x18\xfc\x5f\x60\x39\x03\xf0\x67\x48\xaa\xb0\x9e\x29\x95\xa0\xa9\xbb\xe5\xc7\xa1\x7e\xa2\xc9\x63\x71\x29\x6a\xa4\xed\x2c\xec\xae\x28\x70\x90\xe0\xb6\x81\x12\x72\x3a\x47\x92\x7f\x3b\x58\x0b\x08\xf8\x54\x16\x85\x29\x12\x0a\x87\x4e\x85\xc2\x61\x9a\xf2\xbd\x52\x60\x60\x19\x13\xf4\x8a\x05\x66\x8d\xc7\x2f\x68\x5d\x00\x71\x84\x98\x6e\x23\x65\x15\xd2\x41\x68\x86\x56\xbb\xd0\x28\xb1\x0a\xad\x11\xb4\x01\x2c\x43\x04\xe2\xc4\x19\x51\xef\xf3\x5b\x67\xf8\xcd\x88\xf1\x73\x33\x85\x29\x1e\xc5\xe0\x83\x5b\x67\xf0\x78\xa0\xfa\x07\x9a\x95\xa4\x3c\xef\xb5\x5a\xd1\x25\x9e\xf7\x64\x27\xe5\xf8\x2a\x6c\xa0\xdd\x95\x90\x00\xcd\x31\xbe\xab\x8f\xbb\x23\xd4\xdc\x69\x24\xb5\xd1\xa9\x63\x8d\xcf\x18\xae\x13\xaa\xa3\x5a\x22\xaa\x7f\xa7\xf9\xda\xf1\x63\x93\x7c\xed\xc4\x24\x5f\x3b\x2e\xff\x0b\x02\x1b\x7e\x1d\x93\x7f\x9d\x9c\xe4\x6b\x27\x41\x86\xcb\x4b\x27\xa0\x46\xf8\x1c\xfc\x79\x62\x92\xb7\xd2\xf4\x38\xfe\xef\x31\x27\x7c\x29\x65\x1d\xb6\xfa\xf5\x50\xec\x68\x5d\xb0\x8f\x8d\xb2\x7f\x6f\x67\x09\x80\xc9\x78\x36\x8d\x7b\x9d\x24\xf7\xdf\x3e\x1a\xbc\x63\x64\xb3\xbb\x16\x64\x8b\x79\x84\x37\xe8\x26\x72\x43\xe1\x82\x59\x86\x33\x63\xda\x2b\xba\xbd\x62\xdb\xfd\x5d\xb3\x4f\x4d\x81\x2a\x01\xfb\x3a\xd9\x36\x11\xfe\xf8\xaa\x4c\x34\x44\x04\xfb\xb4\x96\xeb\xd5\x30\xaf\x02\xdd\xa6\xbb\x59\xef\xe0\x74\xa1\xea\xbb\x9b\x83\x85\x7a\x67\x93\x5d\x0c\xe9\x74\x79\xa1\xdf\x4f\xb3\x4d\x5f\x77\x28\x77\x09\x0a\x03\xef\x4b\x11\xd3\x8d\x43\x0d\x31\xa0\x22\x53\x07\x77\x35\xb5\x23\x3b\x13\x29\x61\x31\xbb\xf7\xa1\x3c\xc9\xc8\x3a\x9a\xe9\xc4\xbe\xe2\x31\x3c\xb6\xfa\x9f\xf7\x82\x4f\x7a\xf0\xa7\xbd\x6b\xe3\x85\x87\x64\xcf\xde\x76\xa7\xde\x6c\x0d\x00\x8e\x3f\xc2\xfa\x64\x1d\x7e\xf1\x26\xd8\x23\x6a\xdd\xb8\x97\x85\xf1\xcd\x35\x2a\xfe\xe2\x84\xb3\x9f\xbe\xd7\x63\x78\x1c\xf7\xdf\xe1\x05\xff\x8f\x07\x7f\x5a\xc7\x14\x9b\x95\x6b\xb3\x73\x6f\x94\x6b\x46\x16\xf0\x37\xcb\xaf\x76\xc3\x86\xa8\x42\x61\x4d\x8a\x56\x17\x0e\x29\xc0\x45\xc2\xca\xa7\x3d\xef\x82\x7a\xa7\x79\xb1\x66\xd3\xe9\x39\x37\xec\x6a\xff\x9d\xc7\x8e\x0e\xb1\xf2\xcf\xaa\x58\x42\xc0\xb2\x5d\x44\xb3\xad\xff\x11\x2f\xb8\x7e\xe8\x1d\x8a\x14\xa3\x1f\x18\xea\x25\x35\x07\x2b\x26\xd1\xb1\x52\x3c\x9b\xf5\xd9\x21\xd8\x8b\x44\x73\xa6\xf0\xe3\xbd\xdb\xbb\x8e\x83\x85\x2b\x54\x50\xc0\x4e\x1c\x24\x78\x6c\xc1\x08\x74\x34\x13\xd5\x09\xfa\x1e\x7b\xe3\x81\xa1\x4d\x5e\xc8\x52\x59\x39\xd1\xbc\x53\x4e\x61\x41\xee\xef\xff\x35\x1a\x3c\x61\xd1\x18\xbb\x43\xde\x55\x8f\xf1\x35\x78\x8e\x68\xca\x9d\xe6\xfd\xca\x28\xfb\x9d\x11\x80\x16\x90\xfd\x7f\x3e\x6d\x0a\xff\x83\x23\xca\x6d\xf5\xf6\x11\x79\x81\xd7\xa3\xc2\xe8\xcb\xb9\x28\x10\x64\x2a\x27\x0d\x2b\xd1\x09\x0d\xad\x48\x0a\xaf\x7a\x5f\x05\x45\x1a\x22\xa4\x30\xe1\x69\xa3\xd0\xec\x9a\x75\x51\xac\x0b\x91\xf0\x63\xc7\x8e\x1d\x83\xc9\x70\xec\xba\xeb\xae\xc3\x18\x76\x95\x1f\x59\x7a\x10\x9e\xba\xf6\xf8\xf1\x1a\x7f\xfa\xcc\xf9\x73\xc4\x37\x46\xe8\xb2\x58\x32\x70\xf5\xda\x2f\xe7\x93\x80\xa4\x6a\xd2\x03\xdc\xbb\x14\xf1\x45\xcd\xab\xf1\x39\xe0\x89\x48\xc1\x2b\x2d\x77\x67\x5a\x56\xdd\xb0\x30\xe7\x73\x4c\x76\xb3\x50\xde\xc9\xc4\x41\x4e\xe2\x4e\xd4\x5e\x51\x41\xac\x52\x3c\xc7\x91\x02\x85\x44\x9d\x42\x91\x23\x61\x90\x34\x94\x45\x0a\x65\x2c\xa0\x2a\x04\x1d\xd4\xca\x6f\x95\xcb\x56\x6f\xff\x72\xd9\xc9\xc5\x41\x27\x58\x2c\x4c\x57\x5d\x56\xc2\x35\xab\x47\xec\xa0\xb2\xa7\x7d\x6f\xf0\x6d\x7a\xe3\xc7\x39\x40\x93\x42\x56\xc4\x96\xb5\xd7\xb2\x93\xec\xf8\xb6\x1e\x35\x9c\x6f\x0b\xba\x08\xf6\xcf\x8f\x60\xc1\x90\xe7\x16\x4f\xcf\x39\x53\xf3\xd3\x8f\x08\x5e\xe2\x39\x73\x73\x31\x6c\xa6\x39\x3f\x1d\xa7\x8d\x55\x3e\x27\x00\x6c\xba\x63\x32\x43\xe4\x4a\x50\x9a\x7d\x0b\x92\x31\x71\x8d\x76\xd3\x66\x8d\x2f\x9e\x9e\xa3\xc6\x68\xf0\x16\x9e\xae\x27\x22\xcb\x57\xa2\x2e\xd1\xca\x75\x14\xe7\xeb\xd2\x99\x73\x51\xd2\xbb\x04\x71\x78\x75\x11\xcb\xc1\xda\xf0\xc6\x3b\x69\x12\x15\x69\x96\x6f\x78\x07\xa2\x4e\xd8\x76\x97\xc4\x4f\x1c\x66\x3f\xe3\xb1\x83\xab\xa2\x0f\xa8\x6f\x6f\xf2\x82\x57\x7a\xb7\xe1\x0f\xb5\x1b\x74\x29\xca\x7a\x55\xf4\x79\x46\xf0\x10\xb2\x5e\x77\xe4\x22\x73\x84\xd9\x94\x28\x1a\x53\x0d\xd1\x5d\x99\xa2\xe2\x86\x3a\x4c\x49\x55\x53\x4e\xb7\x29\x6a\xdd\x54\x56\x6f\x4e\x2d\x9e\x99\x99\x3b\x7f\xa6\xd6\x69\x5e\xb5\x92\xae\x57\x8b\xb4\xda\xcb\x45\x35\x2a\x6c\xc1\xf8\x23\x1e\xd3\x2d\xf2\x9f\xef\x05\x62\x46\xee\xbf\xb1\xd0\xfc\x54\xb3\xa2\xbb\xc2\xd5\x13\x97\xaf\x06\x9b\xc2\xd4\x3c\x0f\xc2\xca\xd3\xd8\xbf\x14\xac\x2e\x83\x17\x48\x0e\xb6\xbc\x02\x3b\x85\xd3\x43\x59\x7d\x38\x7b\xc4\x3e\xfb\xe4\xbd\x1e\x1b\xcf\x44\xd8\xbc\x3d\x89\xfb\xfe\x4f\x79\xc1\x8f\x79\x8b\xf4\x8b\x83\x42\x8e\xf4\x29\x69\x46\x99\x10\xfa\xa6\x02\x4b\x8f\x12\x8e\x33\xf8\xbc\x9c\x95\x25\x2b\x16\x42\x6a\x5e\xb6\x5a\x5b\xd6\x98\xff\xed\xb1\x43\xb9\x68\x64\xa2\x58\x14\x2d\xff\x6f\x3c\x76\xed\xb6\x6b\x12\x98\xf8\xca\xae\xee\x9f\xf7\x96\x54\x29\xc0\x22\x61\x69\x12\x61\x4f\x1e\x18\x0a\x85\x24\x8d\x5f\x73\x27\x30\xa8\x9c\x48\x37\x01\x0e\xd2\x0c\x5c\x8f\x7a\x06\x5b\xa3\x97\x44\xf1\xe5\xea\x07\xf6\x02\x8f\x8d\xf6\x72\x91\xf9\xcf\x0a\x3a\x66\xd6\x80\xb7\x61\x60\xd6\x84\xcd\x4e\x94\x3c\x14\xf3\xe6\x3d\x15\x36\xd6\xca\x97\xfb\x5d\xe1\xbf\xbd\x12\x6c\x54\x6e\x91\x7b\x1a\xb2\x4d\xd9\xee\x35\x12\xa6\x20\xac\xfa\x69\x4f\x47\xf8\x83\x0c\xab\xf1\xe5\xa8\x3b\xcd\xcf\x58\x9c\x36\x4a\xcc\xdb\x45\xd9\xd8\x53\x2a\x35\x0f\xb0\x55\x28\xa0\x4a\xaa\x27\x2a\xea\x9c\x1a\x35\xcd\x03\x71\xa9\xb8\x26\x98\xe4\xc1\xa5\x56\x2e\xff\x49\x8a\x56\x1e\xd4\xf8\x7c\xa7\x1b\x47\x8d\xa8\x88\x29\x18\xd5\x98\x48\xf1\x05\xa0\xa4\x4e\x2c\x9b\xc2\x6e\xf9\x5a\xd0\xa4\xac\x7a\xf3\xaa\xac\xde\xb4\x7b\x2d\x63\x28\x47\xfd\x28\xf8\x4f\x66\xe8\xe0\x12\x8d\xdd\xe5\x1f\xa9\xbf\x7d\x1c\xbb\x6a\x93\xf4\x08\xa4\x4a\x5d\x42\xf7\xeb\x47\x1f\x17\xdc\x55\xbe\x58\x36\x38\x96\x1d\xb2\xad\xa8\x6d\x08\xcb\xea\x7d\xa0\x76\xaa\x2d\x75\x45\xa3\x46\xa5\xa0\xae\xef\x6c\x1b\x7f\xfc\x58\x76\xff\x38\x3b\x20\xb2\x2c\xcd\xfc\xd7\x8e\x07\x2f\x19\x3f\x23\xff\x74\xd2\x8e\xfa\x90\x13\x12\x8b\x0e\x64\xca\xa7\x49\x23\x8a\xd5\xd9\x68\xa0\x74\x15\x07\x35\xd3\x28\xa2\x35\x55\xab\x1a\x87\x42\x89\xfc\xb9\xd1\xe8\x65\x93\x48\x8c\x8a\x7d\x39\xc9\xc3\xa2\x10\x9d\xae\xf2\x04\x40\xc6\x26\x86\x21\x6f\x5a\x3e\xf2\x6e\xce\x68\x7a\x36\x60\x77\xd8\xa2\x20\xdc\xff\xfa\xc0\xb3\x5b\x0e\x9a\x18\xf8\x48\xb9\x1c\x78\xcb\x98\x74\x51\xf8\xea\x6f\x63\x1b\x27\xb9\x28\x1a\x83\x0d\xe5\xc0\xe3\xdc\x42\xba\x7d\xae\x19\xf6\xa5\xee\x94\xf7\x91\xf5\x51\xf7\x51\x98\xc5\x91\xc8\xb8\xc0\x22\x90\xbf\xb9\x99\xae\x27\xea\xf3\xa6\x39\xf2\x35\x7c\x6c\x02\xf7\x04\xcc\xb9\x00\x7d\x0b\x22\x6a\xb2\x34\x8e\x15\x76\xc1\xb9\x30\x2f\x6e\x4b\xd2\xf5\xe4\xd6\x34\x6d\xda\xa1\x68\x39\x64\x74\x84\x8d\x2c\xcd\x73\x80\xeb\x8d\x21\x93\x0c\x88\x9c\x6a\xfc\x5c\x58\x94\x2b\x23\x2b\x02\x36\x6c\x63\xdd\xc6\x6c\x0d\x5d\x31\xdd\x1d\x13\x9a\xd1\x65\xfb\x3a\x41\xb8\x12\x05\xcf\xc9\x6f\x36\x42\xe2\xc0\x83\xe3\x35\x1e\xf2\x35\x7c\x67\x91\xea\x9a\x43\xf0\x07\xcc\xd6\x7a\x9f\xb7\xa2\x4b\x6a\x52\x96\x97\x42\x94\x0c\x0e\x31\xe6\x3a\x49\xdd\xb4\x15\x25\x4d\xdb\x8e\x52\x4e\x42\x6a\x8a\x7a\xaf\xdd\x86\xe8\x36\xb9\xd1\x42\xe8\x17\x7d\x08\x3f\x6e\x5c\xf2\x70\x51\x75\x64\x9c\xaa\x79\xbf\xd3\x08\x55\x2c\x0e\x0e\x77\x37\xaa\xb8\x47\xf4\x00\x61\x0e\xb3\x3a\xe2\x02\x4b\x6e\x61\xca\x26\x8e\x7d\x00\xae\x18\x9e\x82\x93\x17\x21\x85\xe2\xd1\xa7\xf0\xdd\x42\x5c\x2a\xca\xe3\xaf\xcd\x30\x0e\x31\xf0\x18\x7b\x64\x6c\x0f\x99\xff\xe5\xb1\x1d\x28\xde\x96\x00\xc3\xd0\xe5\x9f\x1b\x73\x06\x9e\x22\xd1\x88\x98\x6c\x70\x12\xe9\x04\x30\xc5\x1b\x17\xc7\x1a\x91\x03\x3c\x88\x51\x01\x90\x2f\xbd\xa4\xc0\x9c\x25\xea\x42\x77\xf5\xf6\xf2\xa1\x0b\x16\x6d\x1e\xa5\x8b\x3a\xc1\x5e\xbe\xe0\xd6\x95\x1e\xa0\xa0\x65\xaa\x97\xc5\x3e\xab\x77\xcc\x72\x91\x11\x58\x85\x21\x45\x34\x69\x22\xdb\x59\xa3\x30\xb9\xe1\x14\xb0\x23\xf7\x40\x15\x5c\xa7\x10\x5a\x8f\x1f\xab\x76\xa2\xa4\x57\xc8\x33\x70\xb8\x2a\x0f\xb0\x51\xda\x44\x13\x33\x50\xb7\xab\xaa\xa0\x30\x44\x19\x28\x47\x78\x48\x0d\x4c\x96\x46\x6b\xb3\x4a\x92\xc8\xa6\x56\xe8\xa8\x2a\x84\x67\x8e\x52\xa4\xd2\xdb\xb4\x7f\x6a\xfc\x42\x5a\x90\xde\x10\xb5\x86\x08\x6e\x24\xc6\xc6\xcc\xe7\x24\x8a\xf9\x51\x39\x2e\x58\x73\x95\x8e\x88\xa6\xef\x52\xbf\x2b\xd6\x49\x8d\x46\x00\xd0\xb3\xa6\xa0\x49\x1d\x63\x68\xba\x42\xa1\x5a\x9a\xc6\x91\x53\x18\x69\x69\x9a\xbc\x0d\x35\x96\x22\xc0\x5a\x31\x9d\x70\x55\xe0\x13\x8a\x0f\x58\x27\x30\x50\x16\x62\x47\x14\x2b\x69\x13\x21\x3d\x70\xe0\x8d\x00\xb7\x16\x99\x35\xce\x89\xc8\xf3\x49\xc4\x26\x8d\x70\x5b\xa0\x0c\x3d\x29\xb5\xa0\x2f\x0d\x1f\x28\xc0\x17\x38\x31\xd0\xec\xc3\x15\x4c\xbc\x5a\x13\xfe\xfb\x2a\x7b\x59\x71\xcf\xab\xd0\xb0\xee\x78\xa9\xc9\xce\x82\x57\xe2\x3e\xfa\x94\x6b\x6a\x66\x90\x44\x57\x20\xa0\x84\x53\x63\x07\x9d\x95\x3c\x84\x03\x5b\x64\x29\x46\x6d\xd8\x02\x9b\x24\x4e\x33\xc4\xff\xd6\x32\x6f\x47\x4b\x9a\xb6\x1b\x8c\x2a\x25\x61\x50\x63\xff\x7b\x94\x8d\xab\xbd\xc0\xff\xdb\xd1\xbd\xf4\xe2\xfb\x46\x2d\x4d\x63\x37\x22\xab\xc8\xfa\x9a\x61\xe8\x2e\xb9\x5c\x87\xeb\x64\xd6\x0a\x9d\x34\x25\x98\xe2\xf1\x73\x96\xde\x42\x1f\x53\x1a\x0d\xe8\x28\x30\xf1\xa3\x7c\x75\x92\x87\x71\x2a\xc5\x03\x3a\xa1\x48\x2c\x58\x01\x63\x00\x7a\xd1\x2c\x09\x42\xf8\x22\xb8\x00\x72\x4d\x8b\x4b\xaf\x82\x26\x95\xa6\x39\xc2\x72\xd3\xa7\x4d\xe5\x26\x09\xcf\x42\xf5\x8b\x79\x31\x4a\xf4\x08\xd5\xcc\x58\x01\x00\xaf\x62\x9a\x8d\x12\x5b\x97\x0d\x5b\x05\xcd\x28\x2a\x61\x05\xdc\x4b\x22\x71\xbb\xba\x48\xa1\xa1\xd4\xa5\xf6\xa6\x8b\x42\x06\xed\x90\x88\x08\x90\x11\xf1\x29\x2c\xee\x61\x73\x66\x40\x5b\x05\x58\x18\xd2\x74\x64\xcb\x2c\x55\x67\x60\xd0\xd5\x00\x10\x0f\x49\x84\x8a\xb7\x6e\x34\x7b\xcb\xe8\xd0\xa4\x7f\x62\x00\x23\x5d\xff\xff\x8c\x04\x37\x38\x57\xca\xd1\xbf\x6a\xbd\x18\x8d\x5f\x93\xf5\xb9\xda\xfc\x7b\x46\xd8\xab\x2b\x10\x56\xd5\x24\xc4\xd5\xef\xaf\x04\xff\x6e\x56\xbd\xae\x88\xda\xa4\x86\x61\x1b\x42\x9e\xca\x6e\xde\x53\x86\x8f\x0e\x47\x7d\xd0\xe3\x9b\xb2\x08\x8d\xf9\xa3\x9d\xb0\xcb\x1e\xf4\x8e\xb0\xff\xe0\x06\xa6\x84\x45\x63\xa5\x0a\xd4\x2c\xd5\x55\xd1\xf7\x0f\xfa\x07\xe4\x0b\xf0\xe4\xb7\x0f\x79\x32\x2f\xb2\xb0\x10\xed\xbe\x3f\xee\x8f\xc1\x5b\xf0\xe4\x56\x38\xd4\x87\xfc\x83\x88\xe1\xcf\xd8\x8f\x7b\xec\x11\x36\x65\xa1\xff\x92\xad\x92\xdc\x8c\x95\xc2\x22\x39\x84\xce\x0f\xbe\xdb\xbe\xe6\x06\xe4\x0f\x0e\x13\xee\x48\x16\x51\xe1\xa4\xdc\x1b\xd3\x44\x68\x8e\x9a\xa4\xa8\xb1\x0f\x8f\xb3\xaa\x8d\xd9\xd5\x2b\x56\xd2\x2c\x7a\x16\x66\x27\x29\x2f\x90\x72\xa2\x2c\xf6\x62\xe1\xbf\x7c\x3c\xf8\x59\xcf\xbe\x32\x10\x52\xd0\x50\x46\x5b\xc1\xf3\x9e\xa6\x9a\xb0\x78\x2a\xba\x22\x03\xe7\x0b\x64\x5e\x93\xe1\x15\x25\x01\x96\x91\x11\x9c\x4f\x94\x27\x47\x0a\x2e\xe7\x33\xf0\x78\xc9\xe5\x0e\xbb\x18\x36\x9c\x37\x7b\xc8\x62\x28\x08\x5c\x87\xf4\xf4\x3e\x1a\x94\x31\x2d\xb9\x10\xb5\x0d\xef\xc0\x9a\xc8\xea\x6e\xbc\xf1\x6f\x8e\xb1\x2f\x7b\xec\x90\x89\xa3\xfc\x7d\x2f\xf8\x80\x6e\x56\x89\xc0\xdb\x8e\x6c\x94\xab\x5c\x36\xda\xf0\x1a\xd7\x38\x0f\xae\x0e\x80\x3e\x0f\x5a\xa9\xe5\x8a\x21\xa1\xec\x46\x60\x9f\xce\x6b\x8c\x07\x57\x4f\xb5\xd2\x34\x28\x2f\x33\x2b\x6c\x91\x1f\x69\xa5\xe9\x11\xed\x9b\x34\x1f\xdf\xa2\xdc\x2d\x4c\x8b\x6f\xf0\x18\xb6\xdf\xff\x51\x2f\x58\xbf\x53\x64\x75\xb7\x71\xab\x16\x57\x8a\xaa\x01\x45\x84\xd4\x73\xb4\xaf\x4f\xf3\xb6\x28\x26\xe1\x8d\x49\xbe\x2e\x17\x04\xd1\x2a\x8b\x49\x12\xa6\x93\x84\x1a\x35\x89\x7c\xd0\xe5\x2e\xd9\xaa\x7e\x2f\xac\xb0\x43\xba\x21\xfe\x3f\x7a\xc1\x1f\x7a\x33\x0b\xf3\xf8\x73\x58\x92\xaa\xba\x49\xa0\x40\xf6\x22\xb0\x26\x93\xd4\x35\x35\x5b\x95\x76\x45\x0e\xba\x5f\x93\xbe\x4a\xcc\x35\xec\xb4\x61\x5b\x96\x58\xd8\x14\x20\x22\xe9\x75\x88\xa3\xd0\x19\x0f\xf9\xbe\x71\x74\xda\xbc\xe9\xeb\x40\x81\xbd\xe3\x5e\xf8\x80\xc7\x1e\xa9\x4a\x06\x77\x9e\xff\x53\x5e\xf0\x83\x7a\x42\xc2\x25\x42\x73\xd2\x29\xf9\xeb\x2b\x51\x61\x96\x1d\xc6\xba\x18\x30\xab\x81\x49\x3a\xa3\xf8\x51\xa4\xfe\x8a\x95\xc2\x2c\xdf\x35\x91\xf5\x8b\x15\xe5\xe9\xdb\x7d\xdd\xbf\x7c\x68\xa8\x4b\x63\x76\x69\xde\x71\x69\xfc\xea\xa1\x60\xd9\xf1\x68\xd0\x64\x03\x8e\x79\x95\xfe\xa6\xed\x84\x64\x19\x9c\x24\xf7\x04\x46\xae\x24\x5c\x31\x98\xf0\xd9\xa5\x79\xde\xcc\xa2\x35\x91\x6d\x78\x63\xf8\x87\xb3\xc0\x3f\x35\xce\xde\xe2\x31\xba\xe3\xbf\xce\x0b\x5e\xea\xcd\xc1\xdf\xc3\xe6\x94\x29\x4c\x81\xf1\x27\xcd\x58\x2d\x77\xac\x51\x8d\xcf\xa6\x09\x18\x14\x40\xb7\xe9\xa7\xbd\x0c\x6d\xaa\x56\x74\x3a\x68\xdd\x58\x70\x28\x57\x53\x3b\xca\x0b\x88\xe0\x29\x21\x69\xd8\x9d\xf7\x09\x4f\x9b\x4f\x7f\xcd\x0b\x7e\xc1\x2b\x9b\x4f\x8d\x89\xf4\xcc\xa5\xda\xa6\xc6\xcc\x16\x1c\x22\x1c\xc6\x63\x1a\x6c\x8d\x08\xd5\x0d\xf3\xdc\x50\xd3\x5a\xfa\x9c\xd5\x78\xf4\xb3\xc2\x34\xd6\xa7\x4b\xe5\xd5\x86\x43\x8d\x6d\x91\x85\xd0\xf5\xb8\xef\x26\x3d\x8c\xb0\x6f\x91\xda\xdc\x02\xb2\xbd\x6a\x93\xba\xff\xb2\x91\xbd\x1a\xe6\x3f\x53\xb9\x30\xa4\x40\x05\x5f\x42\x4f\x69\xc6\x5d\xb4\xcd\x53\xc8\x30\x09\x07\xb0\x0b\x0b\x48\x14\x58\x13\x8e\x8d\x45\xee\x45\x61\xae\x13\x28\xed\x69\x90\x72\xb5\x81\xe8\x5b\x56\x3d\x70\x62\xc3\xae\x23\xaf\xde\x91\x74\x9d\xeb\x8d\x30\x8e\x5d\xa6\x5f\xc8\xfa\xc5\x75\x8b\x9b\x95\xe2\xec\xc3\x51\x8a\x20\x78\x84\x6a\x6f\x27\x1d\xab\x33\xf3\xd0\x86\xe5\x08\xc0\x51\xac\x48\xa9\x90\xa8\x87\x26\x61\xdb\xa0\x17\x74\x0f\x11\x89\x02\x4c\x82\x1a\xeb\x58\xbe\x9e\x30\x58\x5a\x32\x41\x3d\x80\x5e\x57\x05\x3d\x59\x1f\x0e\x1d\x50\x1c\xb5\x1c\x06\xdc\x3b\xfc\xa8\x7c\x75\x6a\x3d\x8b\x0a\x51\xa2\x89\x7a\xc0\x63\xdf\x84\xef\xcd\x28\x22\x8a\xdc\x7f\xa7\x17\xbc\xdc\xbb\xb3\x74\x15\x42\xb1\x45\x4e\xc3\x50\x25\x51\xdd\xb0\x11\x4e\xd1\x19\xac\x1b\x33\x38\x78\x66\xa9\xc2\x2a\xc5\x8b\x47\x72\xde\xb4\xe9\x89\x90\xe8\x57\x7b\x13\x28\x11\xf6\x1d\x25\xd9\x66\xe7\xa0\xbd\xf0\x90\x93\x83\xa6\xd3\xe3\x15\x21\x6d\xaf\x9e\x8b\xc2\xff\x93\xf1\xe0\x3d\x23\xee\x35\x9c\xab\x3a\x16\x26\xd4\xd4\xc7\x74\x4a\x6a\xa4\x1d\x85\x6e\x9a\xb6\x90\x44\x16\xb5\x21\x71\xa9\x1b\xc2\x61\x89\x6e\x19\x7a\x5c\x92\x61\xb3\xf2\xb0\x91\x47\x21\x00\x8f\x35\x7b\x0d\x78\xca\x50\x2b\x5f\xe2\x0b\x58\xd8\x2d\xb6\xc1\x1b\x90\x6f\xa6\x19\xe7\xcf\x06\xa2\x1d\xfd\xf8\x34\xbf\xfb\xd9\x41\xd4\x0d\xa6\x79\x70\xfc\x58\x4d\xfe\x7f\xed\x78\xf0\xdc\x49\xee\x5e\x3c\x51\x3b\x11\x3c\xf7\x9e\x49\x78\x15\x4a\x9f\x06\xb6\x9e\xbb\x9f\x1d\x48\xb9\x27\x9f\x0b\xa5\x60\x92\xad\x08\xa6\xf9\xf5\xa7\xae\xbb\x16\x8a\x50\x37\xeb\xd6\xcd\x93\xc7\x6e\x78\xee\x3d\x8c\xf3\xe7\x02\xc8\x2a\x9e\x9f\x71\xad\x96\x5a\x4b\x6e\xfc\xb5\x48\x00\x9d\x57\x8e\x84\x49\xe1\x34\xbf\x9b\xeb\x9a\x4e\xcb\x4f\x4d\x72\x5d\x49\xf8\xcd\xa9\xa2\x75\xf7\xd1\x93\xc7\x6e\xb0\x9f\x3c\x79\xec\x06\x7e\x8f\xb3\x79\xfc\xd8\x01\xf6\x37\x1e\x3b\xa4\x47\xca\xff\x13\x2f\xf8\x71\x6f\x7e\xc1\x1e\x3b\x10\x95\x69\xab\xa5\x0f\x8d\x31\xe1\x57\x6a\x0e\x60\x88\x14\xd3\x58\xa0\x86\x74\x3a\x17\x56\xdb\x4c\x86\x82\xc5\xb1\x9c\x87\x2d\x44\xe1\x82\x03\x9e\x52\xe0\x15\x11\x7b\x84\x5a\x63\xca\x7b\x45\x14\x47\xcf\x12\xce\xde\x7c\x0d\x3b\xb1\x83\x44\x5f\x35\x41\x69\xf0\xd9\x4f\x57\xd8\x63\x92\x01\xe2\xee\x1f\xad\x04\x9f\xda\x4d\xab\xeb\xbd\x42\x47\x7e\x18\xd3\x62\xa9\x0b\x6c\x93\x19\x31\x64\xc9\xe7\xfb\x42\xee\x2e\x09\xa4\xd9\x69\x33\xe3\x24\xde\xcf\x44\x03\x8b\x22\x6c\x9f\xb0\x0c\xb1\x39\xa9\x59\x65\x06\x1f\x8d\xe5\x74\xd7\x4f\x5e\x86\xbe\x7a\x0e\x3b\x00\xcd\xf5\xf3\xe0\xa4\x5c\x01\x14\xf9\x6a\x83\xb2\xa6\x89\xd3\x3b\x76\x17\x3a\x15\x38\xce\xa6\x58\x75\xc7\x15\x90\x1f\x63\x2f\x3b\xcc\x9e\x68\x3d\x93\xd5\xc3\x06\x28\x5b\xa8\x5a\x2c\xa6\xb1\x38\x1d\x81\x05\xcb\xff\x12\x0b\xde\xe4\x0d\x5e\x77\xb6\x04\x6e\xdd\x47\x2b\x3e\x8c\x1d\x1d\xb0\xa2\x02\xb9\xa9\xe5\xfa\x33\x5b\xad\xf3\x92\x52\x6d\xda\x71\x5a\x0f\x63\x13\xd6\x86\x9b\x5c\xd8\x84\x8c\x9d\xd4\xd9\x74\xd7\xa2\x90\x2f\xe1\xb9\xb0\xb6\xe1\x1d\xcc\xd2\x58\x2c\x8a\x96\x6b\x4b\x18\xbf\x02\xaf\xb0\x5f\x78\x85\x2b\x50\x4e\x7b\x87\x72\x5a\xb1\xe0\x20\xfe\xd3\x7e\xd1\x20\xbe\x7d\x0b\x30\x08\xa9\x19\xa9\x15\xe0\xbf\xcf\x63\x47\x37\x15\x06\x6a\xa1\x2f\xe2\xc3\xc1\x4b\x3d\xfa\x0b\x56\x27\xe8\x6b\xbb\x5c\xa2\x5a\xb7\xb4\x0a\xa2\xc1\x20\x2f\x27\x1d\x24\x66\xc8\x28\xa4\x40\x27\x31\xca\xdb\xb2\x72\xbf\xc8\x63\xe3\x64\xe9\xc9\xfd\x4b\xc1\x93\x69\x75\xe7\x7c\x25\x45\x88\x7b\x2d\x70\x68\x29\xa5\x74\x1f\x84\x64\xea\x1e\x54\x6d\xf9\x38\xc9\xae\xde\x41\x97\xd0\xe7\x1e\xf4\xee\xdb\x1e\xb7\xe1\x82\x7f\xae\x6a\x43\x32\x30\xae\x50\x1c\xa0\x34\xd7\xfe\x55\x42\x74\x18\x94\xa5\x8c\xdd\xff\x08\xf6\x24\xab\x4a\x2a\x91\x50\x59\xce\xb4\x7a\x1b\x36\x56\xa4\xea\xe9\xff\xdd\xe1\xe0\x75\x5e\xf9\x2a\x6f\x84\xdd\x02\x02\xd3\x8b\x15\xc2\x85\x46\x08\x50\x78\x82\x53\x6c\x3b\x45\xc0\x1a\xf3\x8f\x3e\x2a\x4f\xe9\x85\x67\x6e\x02\xaa\x04\x63\x03\x9f\x52\x5d\x8f\xfb\x74\x52\xd5\xd3\x01\x12\xf4\xe5\xfb\x8e\x24\xfe\xaf\x87\xae\x48\xe2\x2b\x92\xf8\xe1\x93\xc4\xbf\x6b\x23\xf3\x7c\x64\xdf\xc8\x3c\xcf\x2f\x23\xf3\x3c\x0c\xb8\x3c\x3f\xed\x31\x58\x67\xfe\xfd\x5b\x34\x67\x5b\x49\x22\x8f\xec\xc1\x33\xe9\xe0\x6e\x0c\x68\x85\xc5\xa0\x81\xe2\x63\x8a\x64\x07\x49\x0b\x45\xab\x51\xe3\x0b\xa9\xc5\xfc\xaa\x1c\x59\x64\x09\xa6\x30\x3a\xf6\x7b\x9e\x06\xe5\xf8\x2d\x8f\x3d\x65\xef\xb5\x45\x07\xc6\x8b\xbd\x25\xc7\x3f\x31\x20\x9d\x34\xf4\xf3\x40\xe5\x90\x2a\x56\x19\x65\x74\x7e\x4c\x59\x42\x6a\x68\x35\xa2\x91\x83\x57\xc9\x66\x58\xc5\xa7\x45\x56\x7b\xd0\x8b\xb6\xdf\x2a\x6e\xf1\xe7\xaa\x83\xe8\x3d\x7a\xbf\x50\x4d\x2e\x6d\x12\xe5\x26\x31\xf6\x53\x1e\xbb\xe3\x21\xe1\xfa\xf3\x2f\x04\xe7\xb6\xba\x6f\xfb\x18\x42\x5e\x7a\xd2\x26\xd3\x46\x1e\xd4\x1a\xfb\x5c\xc5\x39\x5b\x6c\xee\x89\x92\x9b\xc0\x4d\x83\x97\x07\x9c\x1a\x96\xcf\xd0\x71\x45\xb9\x9e\xc3\x2f\x79\xec\x0b\x1e\x3b\x18\x25\x6d\xc0\x4e\xfe\xac\x17\xfc\x90\x37\x8f\x3f\x2c\x57\x85\x65\xcc\xa3\x27\x55\x48\x9b\xb2\x4c\xb9\x9f\xe0\xcb\x59\xd8\x6a\x45\x0d\xe3\x72\x56\xcf\x69\x5f\xa4\x3e\x7a\xe7\xb4\xe1\x16\x70\x38\x77\x8b\x77\x14\x92\xeb\xd9\x29\x76\xcd\xae\x5c\x77\xd4\x10\xf6\x8f\xde\xd0\xe0\xcb\x85\xb4\xb9\xa8\xce\xb2\xb7\x86\x85\xf0\x3f\xe9\x05\xd3\xe5\x8b\x65\x2f\x87\x65\xf4\x0c\x91\x8f\x44\x39\x43\x37\xbc\x47\xea\xbf\x97\xfb\x25\xf8\x90\x9c\xdd\xc7\xdc\xdb\xfe\xc5\x60\x69\xd6\xbe\x60\x79\xe1\x43\x0b\x4c\x46\xa5\x76\xa4\xcd\x23\xb9\x75\x19\xc6\x05\x8c\x58\x90\x82\x05\x8b\x12\xb1\xd5\x8c\x00\x7f\xc5\x08\xbb\xc6\x6a\x77\x2b\x4e\xd7\x09\xf9\x4b\x0b\x0b\xf2\x3f\xcf\x20\xd3\x05\x69\x73\xfe\x1f\x54\x82\xc5\xa1\x77\x48\xab\xc4\x74\x3f\x30\xae\xbb\x41\x73\x34\xbc\x55\x62\xce\x80\xc5\xac\x1c\x91\xb5\x0d\xef\x90\xd6\x78\x36\xbc\x51\xf9\xb7\xd3\x47\x5f\xf4\x58\x9f\xc1\x65\xff\xbe\xa0\x79\x51\xfe\x71\xb1\xec\x31\xd0\x8d\x75\xab\xa7\x54\x2b\x30\x46\x04\x57\x07\xe8\xf9\x2f\x1a\x2b\x04\x3d\x1c\x03\x9c\x7b\x8b\xc2\x7b\x17\x95\x85\xd7\xee\xac\x26\x33\xb5\xf3\xef\x0a\x9e\x76\x51\xff\x72\x2a\x01\x57\x76\x50\x93\x4d\xbe\xf2\x0f\x23\xec\xc8\x90\xa9\xb8\x4c\x9c\xe7\x4b\x22\x86\xbc\x9e\x65\x91\x75\xfc\x8f\x8f\x04\xaf\xac\xcc\x68\x3e\x74\x9e\xd3\x4d\x5e\x88\xac\x33\x04\x92\xac\x17\x83\xed\x0e\x52\x47\xf8\x7d\x3d\x81\xf1\x9c\x33\x3c\xe9\xc5\x90\x12\x49\x0c\xfd\xc3\x8b\x83\xe6\x88\x9c\x27\xa9\x69\xc1\xf2\x8a\xc6\xb0\x40\xec\x37\xdc\x3a\x3a\xa0\xc0\xce\x5c\x98\x23\xa0\x4e\x0b\xee\x2b\x47\xa3\x6b\xda\xe2\xad\x5e\xd2\x40\x9b\x3b\x44\x46\xe5\x60\xab\xb7\x9b\x37\x8c\x25\x85\x48\x4f\x94\x32\xe9\x82\x9a\x53\x6c\x94\x9b\xfd\xe2\xb1\x37\x7b\xec\xdf\x41\xe5\xcf\xc9\x76\x9f\xb9\x24\x7b\x05\x93\xef\x7f\xc0\x0b\x4e\xce\x68\x1f\xdd\x60\xbb\x9d\xa6\x01\x27\x50\x5d\xc4\xae\xcc\x39\xc3\x66\xd9\xcc\xb6\x32\xa7\x3c\x7a\x50\x93\x45\x53\x3a\xfb\xec\xb7\xb0\xab\x6d\xd4\x39\x51\xac\xa7\xd9\x6a\x94\xb4\x21\xea\x08\x7f\x2d\x00\xae\x1a\x80\x1c\xfd\xc4\xb7\x04\x4f\x19\xb8\x6a\xba\xd9\x3a\x6c\xd8\xde\x3b\xe7\x8d\x0d\xef\x70\x37\x6d\xaa\x0a\xb9\x58\xe5\xdf\xcc\x3e\x35\xca\xc6\x04\x4a\xfc\x8f\x8e\x06\x7f\x3f\x72\x8e\xfa\x08\xaf\x81\xff\x32\xa7\x88\x7c\x3c\x1d\x36\x8d\x93\x27\xc6\xa4\xb2\x6e\xda\xcc\x6b\xfc\xf6\x5e\xd1\x4e\x11\xa2\x9d\x04\xbe\x09\x36\x40\x46\x9e\x4c\xd0\x71\xc7\xa9\x1f\xa0\xa9\x0a\x85\xfb\x4e\xf2\x8d\x1f\x45\x1b\x2b\x9c\xf4\x38\xe2\xcc\x61\xfe\xd8\x7a\x94\x0b\x45\x46\x21\x1f\xa6\xaf\x4d\x4c\xf2\xdb\x17\x15\xf3\x8f\xaa\x81\x9a\xc8\x9a\xd7\x23\x4d\x84\xdd\x30\x15\xf8\x1a\xca\x45\x81\xaf\x3a\x3d\xa7\x4f\x69\xeb\xc8\x84\x6d\x7a\x51\x17\x4d\xf5\xa5\x43\xbc\xed\x67\xa2\x25\xa6\x01\x6e\xdd\x92\x91\x1c\x09\xbf\x5c\xee\x38\x68\x3b\xa8\x24\x39\xcf\xd3\x58\xc4\x7d\x87\x94\x9b\x3c\xcb\xc8\x90\x52\x50\xdf\x11\x07\x70\x9e\x6a\x6d\x8d\x7c\x85\x13\x65\xff\x97\x14\xf6\x94\x1b\x1d\x25\xfc\x78\xed\x7a\x7b\x92\x9f\x66\x4f\x65\x37\x6f\x3a\xc9\xb7\x98\xac\x67\xa0\x57\x17\x7b\xb1\x60\x7f\x3e\x6a\x94\x88\x3f\x18\x0d\x5e\x34\xaa\xa6\x94\xda\xd0\x77\x3e\xa7\x96\x07\xa7\x92\xde\x6c\x1f\xa6\x39\x95\x3b\x78\xcb\xb8\x15\x63\xe0\x5d\x02\x89\x90\x3b\x9c\x85\x76\x5f\x3c\x8c\xd3\xb0\x99\x82\x9c\x27\xae\x59\x88\xa3\x78\x68\x66\xa1\x3d\xc9\x66\xd9\xcc\x16\x67\x98\x2d\x26\x19\x29\x71\x30\xcb\x3e\x3f\xc2\x6c\xb1\xe6\x7f\x62\x84\x3d\x75\x0f\x87\x52\x90\xcf\xaa\x90\xe0\xc5\x23\x4b\xd4\x0e\xdd\x36\x88\x33\xc7\xb4\xe7\x81\xde\xa3\xb3\x2b\x59\xcd\x70\x93\x84\x36\x0e\xce\x76\x39\x83\xcd\x54\x87\x84\x1a\xa0\x5c\x52\xf3\x5d\x25\xe6\xc2\xb8\xd5\xf8\x79\x15\x33\x43\xbd\x81\xd3\x35\xa2\x5c\x00\x7c\x0b\x17\x8c\x54\x86\xb4\xcb\x52\xae\x99\x79\x1a\x68\x4c\xa1\x40\x4b\x96\x5d\x13\xc8\xba\x09\x09\xdb\xa4\x91\x76\xea\x90\x27\x8f\xd9\x0f\x6b\x22\xee\x97\xe5\xc5\x85\xdb\x97\x4d\xac\x8b\x9c\x13\x08\x62\x02\x11\xed\x78\x84\x47\x2d\x43\xef\xa5\xb9\xe8\x84\x49\x11\x35\xf2\x9a\x89\x75\x19\x36\x67\xe5\x74\x37\x34\x4f\x94\xec\x87\x26\x51\xf6\xce\x31\x39\xba\xb2\x93\x01\x04\xd2\x7f\xc3\x58\xf0\xbc\x31\x25\x47\x60\xcd\x20\x7c\xb4\x9e\x87\xee\xc0\xa0\xc7\x07\x2d\x2a\x00\xdd\xa4\x53\x98\x65\xa3\xef\x0e\x68\x22\x05\xf7\x4c\xf2\xbb\x83\x33\xfa\xef\x34\xb3\xee\x4d\x72\x7d\x67\xc8\xb2\x72\x50\xcf\x0c\xc2\x8c\x0a\xd0\xd0\xf4\x4c\x78\xf0\xb5\x98\x49\xd4\x89\x2a\xcd\xf8\x19\x6b\x58\x6e\x34\x23\x6c\x07\x54\x49\x85\x88\x1e\x53\xd4\xe4\x80\x6f\x46\x31\xec\x72\x2a\x61\x3a\x36\x3e\x44\xee\x1e\xe8\x58\x2a\xed\x68\x29\x65\x04\xdc\x7d\x56\xe9\xaa\x3e\x54\xfc\xc4\x26\xe5\xd3\x63\xd0\x13\x76\xde\x20\x84\x19\x80\x0d\x1a\xee\x63\xc4\x02\x8e\xdc\x24\x3c\x08\xa6\x6a\x71\x49\x67\xf9\x29\x1c\x3b\x6b\x74\xf9\xdd\xba\xab\xf9\x3d\x35\x7e\x2e\x5a\x15\x52\x22\x43\xa0\xe4\x90\x6f\x29\xd1\x8d\x6c\x68\x3a\x64\x82\x28\x09\xd5\xfe\x6e\xb6\x0c\xab\x1e\xea\xe3\xa1\xf3\x79\xb4\xfc\x61\xda\x02\x81\x8d\xea\xfa\x1c\xcd\x23\x39\x70\x88\x73\xa3\xbe\xbc\xae\xf3\x05\xd4\xf3\x43\x86\x29\x69\xd2\x73\x66\x8b\x51\xb3\xa3\x48\xf9\xbd\xb2\x3a\x77\x73\x3d\xdb\xf8\x3d\xbb\xdb\xa8\x4b\xb1\x5f\x7f\x3f\xc9\x4e\xec\x48\xde\xcd\x41\x7c\x20\xa2\x47\xe5\xfe\x2f\x4f\x06\xd3\xce\x15\x15\x0e\xa3\xb3\x68\x21\xc1\x04\x79\x61\x90\x0b\x75\x66\x61\x5e\x59\x8c\x1d\x55\xf2\x87\xfc\x2b\x16\xea\xfd\x5a\xa8\xef\xaf\xb0\xb1\x66\xd6\x5f\xec\x25\xfe\x6b\x2a\xc1\x3f\x7b\x77\x21\x97\x1d\x61\xe8\x94\xf0\x18\x3b\x69\x53\x6b\xfd\x0e\xf1\x92\x1c\x3e\x84\x55\x84\x98\xfb\x84\x47\xc9\x1a\xca\xc0\xcc\x6d\x2a\x7e\x8a\x37\x01\x40\xc2\xca\xee\xd0\xf9\x7a\x2a\x99\x2a\x13\x79\x37\x4d\x72\x3c\x8d\x25\x29\x6f\xf5\x32\x10\x29\x40\xb3\x97\xe7\x16\xa6\x9d\x36\x1c\xa2\xd4\x35\x20\x2a\xd3\xbc\xca\x67\xe2\x78\x1a\x84\x53\x33\xeb\xf3\xac\x97\xc8\x1d\xa4\x0d\xf1\x37\x18\xaf\x49\xc5\x89\xe6\x16\x13\xfd\x4f\x2b\xcc\x6f\x67\x61\x43\x2c\x40\x42\x12\x51\xab\xfa\x1f\xaf\x28\x44\xe7\x9f\xaf\xc8\x5d\x58\x93\x63\x47\x09\x51\x26\xe5\x0e\x4b\x2c\x6e\xdc\xc6\xf2\xa4\xc9\x56\x91\xb7\x52\x61\x43\x82\x67\x46\xb4\x43\x8a\x58\x03\x94\x0a\xdc\xe6\x51\x66\x3c\x4b\x64\xa9\x35\x2e\x58\x8a\x9d\xa6\x64\x76\x0e\x1d\x00\x08\xf9\x4a\x76\x30\x1f\x34\x47\x65\x76\x69\xe3\x98\xf6\x20\x41\xf8\xa1\xea\x22\x80\x1a\x72\x62\xbe\x80\x3d\x53\x35\x88\x3e\xd2\x72\xb7\xa7\x1a\xd6\x13\x83\x48\x87\xd4\xd1\xc1\xe0\xb8\xe2\x25\xd9\xbb\x97\xe4\xa3\x15\xf6\x4d\x69\xd6\x5d\x09\x93\x39\x48\x60\x92\xad\x96\x62\xf0\xfe\x8a\x0d\x1c\xd8\x95\xa7\x00\xa1\x13\x98\x16\xb2\xb4\x1b\xb6\x61\xb6\x2e\xd0\xbe\x69\xa9\x1a\x6a\xe0\x2d\xc2\x05\xd8\x0c\xae\xab\xf1\x25\xe2\xa8\x45\x3a\x06\xfc\x9c\x3e\x2a\xd4\x05\xc7\x9a\xa8\xe0\xc5\xac\x27\xa6\x08\x01\x55\xbe\x11\xe0\xdd\x80\xb7\xa2\x24\x8c\xc1\xa9\xac\xc3\xa6\x81\x89\xa3\x48\xa7\x32\xd1\x49\xd7\x14\x6c\xb5\x59\x36\x47\x72\xf3\x52\x0e\x36\x95\x1a\x3f\xa3\x12\xc1\x74\xc5\xd3\x6c\xb0\x65\x6a\x77\xc9\x45\x61\xa2\x5c\xea\x69\xb1\x32\x10\xa6\xf8\xc8\x2e\xa4\x99\xab\x14\x96\x77\x7b\x7b\x52\xee\x17\xec\x42\x82\x58\x81\xfa\xb4\x7a\x71\x2b\x8a\x63\x43\x1c\x1d\x1a\xe6\x33\x50\x9c\xb3\x4c\x2e\x3c\x64\x51\xa0\x90\x5e\xca\x3d\x9e\xe4\x21\xbf\xe6\xd8\x0d\x7c\x56\xa1\xe4\x90\x6d\xdd\xc0\xbc\x22\xf2\x5a\x8d\x7d\x6d\x84\x3d\xa6\x5b\xee\x00\xff\x2b\x23\xc1\x1f\x8c\xdc\xa5\x12\xda\x92\x26\x20\x53\xb7\xc3\xac\x1e\xb6\x85\x8d\x7d\xa2\x45\x22\xa6\x68\xc8\x41\x1c\xda\xc5\xb7\x97\x66\xdb\xe6\x3d\x0c\x52\x4b\x89\x1c\xd2\x63\x22\xc4\x16\x6a\x5a\xee\x1d\x45\xa1\x6e\xa6\x85\xc5\x08\xaa\x9d\x74\xd6\xf8\x5b\x50\x3f\x08\xf3\xaa\x63\x45\xdd\x8f\xd5\x38\xc2\xb5\x43\x60\x96\xbd\x2f\x1c\xc1\x36\x1c\xe1\x55\x9a\xb1\xee\x8c\xce\x6f\xe4\x47\x4e\x87\x8d\xd5\x36\x10\xf4\xca\xa7\xf0\xa8\x0a\xa1\x14\x6e\xc7\xa5\x10\x2f\x4c\x02\xce\x2d\x44\xb5\xa0\xae\x4b\xba\x91\x1f\xb9\x25\xcd\x84\x55\xac\x3c\x31\x35\x30\xe7\xcb\xd6\x30\xb1\x3c\x3c\xae\x0c\x16\xd8\xd2\x65\xd8\xb6\xdd\x07\xbd\x2f\x7e\xeb\xf6\x9e\xad\x5f\xfd\x56\xff\xbf\x7c\x6b\x95\x1c\x56\x8e\x1a\xe6\xf2\x55\x18\x67\x57\x10\x30\x4d\x77\x11\x36\x09\xca\xaa\xec\xfa\xda\xaa\xa4\x7d\xbd\x8d\x9e\xb7\x5d\x55\x58\x7f\x06\xa3\xf0\x33\x27\xa0\x63\x07\x25\x95\x9c\x7d\xdb\x14\xa7\x9e\x72\xbc\x77\x7b\xea\x9d\x3d\x96\xa0\x7a\xc8\x14\x33\xac\xa2\xbb\xad\xca\x1e\xcb\x18\xa8\x4c\x37\xdf\xed\xa7\x77\xf6\xc6\x1e\x3f\x04\xb0\x9d\xd6\x6b\x0e\xbc\xcf\xde\xfa\x6a\xaf\x45\x94\x5b\xb0\x55\xf8\xd1\x8e\x6b\xb2\x97\x12\xf6\xb2\xc4\x7a\x45\x9a\x37\xc2\x58\x2a\x20\xd5\x61\x17\xb7\xff\xec\x89\xc1\xf6\xef\xf6\x6d\x39\x90\x43\xab\x57\x0f\x8b\xc6\xca\x26\xa5\x54\x77\xf2\xd0\x56\x03\xd5\x10\x59\x81\x87\x20\xb1\xb7\x85\xbe\xc7\x02\xca\xd5\x48\xd3\xac\x09\xe4\x23\x7b\x9c\x2e\x7b\x2c\xa0\x54\x0d\x83\x8b\xbc\x97\x3a\xec\xe5\xed\x52\x05\x86\x33\x30\xed\xe8\xeb\xbb\x7e\xb5\xfc\x69\x2d\xae\xf7\xf0\xb2\xed\x0d\x37\xd1\x20\x3b\xaf\x0b\x62\x0c\x5f\xbe\xf2\x54\xf5\x06\x3f\xa1\x97\x15\x40\x54\x91\x66\xb5\x65\xc1\xba\x52\xca\x08\xb2\xbf\x16\x0e\x5d\xe3\x96\xc9\x7e\x9b\xba\x6c\xbe\xb5\xef\xbc\x0c\xf5\x42\xda\x1c\x88\xfa\xd9\xd1\x54\xdb\xe5\x8b\xe5\xc1\xdd\xe5\xeb\xa5\xa9\x86\x43\xb6\xc3\x0a\xef\x56\x3d\xa2\xc2\xab\x3b\x8f\xa5\xdd\x51\x87\xed\xab\x98\x72\xf7\xed\xab\xb0\x52\x67\xe6\x8d\x15\xd1\xec\xc5\xdb\xce\x99\x4d\xda\xb5\xa7\xd7\xcb\xed\xd9\x53\x21\x7b\xd8\xe6\x4b\x71\x6e\xd5\x6d\xe2\xdf\x76\xd3\x82\x5d\x97\x40\x04\x7a\xbf\x71\x80\x7d\x97\x0d\x5a\xd0\xed\xe6\x68\x62\xee\xc6\x69\xbf\x23\x92\xc2\x30\x9a\xbd\xe1\x40\x70\xdb\x90\xeb\x43\xe0\xec\x08\x30\xb5\xa9\x1f\xe6\x61\x21\xcf\x66\x22\x03\x8f\x05\xd8\x87\xb6\xe6\x30\xfb\xf9\x51\x76\x82\x78\xc6\xae\x0e\xbe\x63\x99\xc0\x0f\xad\x02\x0d\xc5\x98\x6d\xbc\x79\xe5\x70\xa2\xb0\x67\xef\x1d\x37\xf9\xa9\xe7\xf6\x4b\x0a\xb6\xc6\x1e\x25\xab\x74\x07\x18\xb9\xa0\x3a\xcd\xbd\x57\x67\x02\x90\x1d\xac\x2a\x45\x76\x10\xd9\x7a\x68\x10\xa2\xd8\xd3\x0d\x19\xd9\x85\x60\x66\x06\x81\xcb\xf8\xce\x19\xc8\xc8\xfd\x3d\x8c\x14\xec\x16\x4d\x40\x76\x53\x30\x85\x51\x45\x7b\x22\x17\x3b\xa7\x23\x62\x4f\x07\xd7\x2e\x0d\x27\xa6\x9b\x54\xd8\x01\xdb\x12\xd3\x7d\xf0\xd0\x00\xfe\x86\x75\x90\x80\xb8\x9e\x55\x91\x2c\xa2\x0d\x12\x02\x73\x5e\x76\x28\x38\x5b\xbe\x68\x02\x04\xc9\x78\xa9\xdd\x28\xdd\x30\x0b\x3b\x02\x20\xca\x60\x7a\x17\xf2\x4d\x6d\x2d\xdf\xf0\x0e\x85\xbd\x66\x04\xb9\x19\xce\x54\xfe\xa9\x71\xf6\x9a\x11\x66\x6e\xfa\x3f\x30\x12\xfc\x75\x65\x46\xfd\x04\x5f\x9d\xce\x55\x68\x8a\xa4\xc9\xf5\xa3\xaa\x33\xe0\x4b\x35\x3e\xc3\x33\xd1\x88\xba\x50\x2b\xab\x06\x60\xe8\x46\xe4\x9a\x02\xc9\xa5\x3a\xb9\x88\x5b\x94\x12\x9b\x18\x50\x9b\x4c\x19\x3c\x34\xbc\xc8\xd0\x0f\xa1\x6f\xc5\x78\xbc\xc8\xc2\x4e\x8e\x16\xa7\x3e\x58\x81\x28\xcf\x7b\x14\x13\xaa\x71\x22\x4c\xd1\x64\xcd\x52\x10\xde\xd6\xb0\x08\x0d\x0f\x11\x26\x7d\x83\x10\xab\x5e\x24\xc2\x90\x7a\xaf\x00\x48\x35\x4c\xa9\x5e\x89\xda\x2b\xbc\x29\xda\x99\x40\xc4\x81\x0c\xed\x81\x88\xd4\x0d\x75\x0b\xb3\xb6\x28\x4c\x31\x5b\xa1\x2d\xfc\xc8\x08\x7b\x54\x3d\xed\x25\x4d\x9d\x28\xef\x7f\xad\xb2\x45\x58\xc3\xe0\x94\x3a\xed\xbc\x4d\x69\xf6\xef\xaf\xb8\x97\x87\x24\xd8\x6b\x87\x98\xf1\x7e\x63\x5f\x2a\xa3\x21\x54\x0b\xec\xe4\xcb\xee\x3d\x70\xce\xd6\x89\x39\x0a\x91\x4d\x72\x0e\x40\x51\x21\x0a\x60\x7c\x93\x4a\x07\x5b\x60\x5e\xe3\x17\x6e\x5f\x3e\x33\x8d\x48\x79\x0b\xf3\x18\x11\x92\x1d\xc9\x39\x4d\xfe\xb5\x48\xac\x1b\xc3\x3d\x7c\xc6\xc1\xc8\x74\x9b\x83\x46\x49\x04\x02\x77\x87\x39\x49\x8b\x1a\xbf\x4d\x88\x2e\x3f\x73\xa9\x1b\xa1\xb1\x83\xbc\x3b\x3c\xef\x00\x9e\x8b\xe5\x0d\xee\x66\x69\xa7\x5b\xf0\x4c\xac\x11\x6a\x45\x8d\x7d\xcd\x63\x8f\x11\xe5\x57\xfd\xaf\x78\xca\x31\xf4\x29\x6f\xb0\xe0\x28\xb7\xbd\x56\xa2\x69\xfc\x46\x69\x0b\x9b\x61\x01\x26\x6a\xdf\x96\xe9\x55\x98\xbd\x19\xf9\x12\x31\x1b\x4b\xf7\x37\xe4\x94\x1b\xdc\x4f\x5d\x9a\xfe\x44\x0e\xc1\xbd\x28\x28\x12\x21\x30\xd6\x04\x61\x1e\xe5\xe7\x8e\x98\xc6\x1c\x51\x4e\x61\xc0\xd0\x24\x67\x9c\xeb\xb6\xf9\xe4\x28\x2b\xcd\x36\x75\x82\xaf\xd1\x01\xbd\x76\x76\x61\x66\x09\x2f\x91\x31\xfa\xc7\x47\x83\xbb\xca\x17\xc9\xd9\x83\x64\x18\xca\xdd\x0d\xf1\x2f\x20\x2a\x56\xd2\xb8\x89\x5c\x68\x2d\x0c\x5c\xd7\x6e\xaa\x6e\xa8\x1c\xad\x6b\x61\x6c\x36\xe8\x03\x60\xea\xdd\xf0\x1e\xd9\xb5\xfd\x75\x8e\x90\x7b\xd5\x08\xfb\x2d\x8f\xb9\x0f\xf8\xbf\xe4\x29\x2c\xff\xb7\x78\x8e\xab\xcf\xf1\xf8\x0b\xbe\x1e\x25\xcd\x74\x1d\x86\x48\x6e\x6a\xb2\x56\x86\xa5\x80\xea\x4f\x42\x48\xd7\xbd\xc6\xdd\x12\x95\xb7\xaf\x0d\x58\x3a\x19\x02\x39\x80\xcb\x4c\x0a\x33\x88\x17\x46\x6c\x87\x8c\x8b\xfb\x7a\x61\x2c\xc7\xe9\xf8\xf5\xc7\x8e\xf1\xa3\x27\x8f\xf1\x4e\x94\x4c\xb8\x43\xf1\x54\x52\x3e\xae\x0f\x9e\xb4\x4c\x70\xc9\x9a\x83\xc0\x22\xf1\xa3\xe1\x51\xe6\xf1\x12\x0c\x39\xf6\x9b\xbf\xae\x7a\xa1\x8e\x8e\x49\x27\x02\x3d\x44\xe0\xf9\xb4\xa5\xa2\x63\xb1\xe5\x88\xd6\xa8\xc8\x97\xfb\x56\x4f\x40\x88\xee\xa6\xad\x75\x1a\xf1\xa6\x43\x6c\x7a\xcb\xf9\x74\xbc\x76\x56\x6a\xf0\xb2\x3e\xf1\x42\xda\x9c\xa1\x07\x44\x76\x2e\xca\x0b\xff\xcf\xc6\x83\x5b\x37\xb9\xef\xc2\x1e\xad\xe8\x87\x20\x44\x2f\x34\x8f\xa9\xd0\xe3\x0d\xef\x40\x54\x88\x8e\x3b\x67\x5e\x76\xf0\x8a\xa3\x72\x1f\x8e\xca\xcc\xca\xe6\x6a\xed\x82\x1d\xda\x8a\x9b\x8b\x72\x4c\xe5\x3a\xa2\x89\x17\x0d\x49\x30\x85\x85\x01\xa3\x98\x4e\xb1\xbd\x12\x1d\xb2\xcf\xe8\x90\x1f\xf3\x18\xae\x04\xff\x47\xbc\xe0\x3a\xf8\xab\x8c\xf9\xb6\xfd\x6a\xb2\x35\x9a\xb3\xec\x16\x36\xb7\x95\xc2\xb2\xd3\x25\xbf\xf3\x94\xdf\x2d\x0c\xcc\x5b\x88\x93\x61\xc6\x6a\xf6\x97\xbe\x93\x25\x65\xcc\x97\x2a\x9f\x45\xe1\x35\xf8\x1f\xf4\x83\x5b\xd4\x0f\x37\x19\x8b\xf6\xb9\x38\x6d\x47\x8d\x30\xe6\x41\x3d\x6c\xac\x8a\xa4\x19\x18\x1c\x5e\x44\xb7\x56\x48\x8b\x52\x51\x57\x08\x12\x8e\x3c\xfa\xfc\x63\xd8\x1f\x79\x6c\x3c\x49\x9b\x80\x11\xe6\x7f\xd2\x0b\x3e\xe4\xa9\x5f\xe5\xfc\x0c\x1b\xf2\xea\x42\xda\x44\xf0\x7c\x14\x50\x51\xae\x65\x0b\x85\x83\x11\xe8\x8a\xda\x41\x0c\x1e\x94\xc1\x2e\xc1\xf0\x63\x88\x05\x01\x6c\x51\x3b\x8e\x8c\x5e\x17\x89\x3c\xb8\x11\xd8\xb9\xfc\xae\x86\xc6\x89\xa3\x86\xb8\xa0\x2a\xaa\xb2\x2e\xda\x61\x51\x4a\x1f\xf2\xd8\x21\xd4\x8e\xa5\xa2\xfb\x02\x6f\x07\x78\x1d\x65\xcd\xf6\x82\x7e\x7f\x98\x4e\x6b\x67\x3d\xda\xda\xad\xd3\x75\x76\xef\xb0\x7f\x18\x63\xe3\x2a\x91\xc3\xff\x6f\x63\xc1\x6f\x8c\xe9\xb4\x0e\xbd\x4b\x86\x59\x3d\x2a\xb2\x30\xb3\x52\x5d\xec\x34\xa5\x32\x0a\xbc\x2d\xda\x15\x44\xcc\xaa\xe8\x4f\xe1\x5e\xd2\x0d\xa3\x8c\x94\x85\x46\x0a\xa5\x98\xb7\x30\x04\x16\x4b\xae\xed\x88\x98\x40\xce\x55\xa9\x3b\x4f\x91\xd5\xb3\x2a\xcb\xaa\xd2\x32\x9d\xc2\xfc\x13\xae\xb2\x49\x68\x23\xa2\x58\x43\xde\x09\x2f\x45\x9d\x5e\x47\xce\xa1\xe3\xa7\xca\x35\x54\xc9\x34\xf8\x74\x8e\x1a\x37\x45\xac\x42\xd6\x81\x89\xb6\xc7\x48\x5e\xc0\x91\x16\x71\x4c\xb4\xba\xab\xa2\x9f\x4f\xf3\xab\xb9\x5b\x79\x39\x45\xe5\xbc\x9d\x46\x01\x8d\x71\x46\x56\x40\x1a\x4e\x62\x7b\x66\xcb\xd9\xcf\x38\x47\x1a\x41\x67\xd3\x8c\x70\xca\x16\xc8\x51\x19\x69\x89\x6d\x08\xc4\x00\xc3\x4d\xaa\xb9\x4d\x94\x10\x80\x4c\x0b\x7d\x52\x63\x57\xeb\xb1\xac\xb9\x55\x7c\x56\x9a\x6c\x51\x3d\x79\xd7\x54\x86\xf1\x3d\x57\x67\x67\x95\xc9\x44\x1b\xc4\xd5\x66\xd5\xc1\xfb\x97\xbf\x42\x6e\x08\xa9\x15\x35\x04\xf1\xa8\x3a\x4a\x05\x03\x7b\xa2\x84\x32\xa9\x78\xd8\x8d\x0c\xec\xfb\x56\xe8\x5b\xff\xa3\x62\xe3\x31\xfd\x59\x25\xb8\xbf\x62\x60\x89\xd4\x36\xed\xac\x21\x58\x8d\x26\x65\x4c\x57\x0f\x32\x05\xe4\xd6\xdb\xcd\x04\x54\xb0\xd1\x00\x27\x58\x5b\xcd\x4e\xb7\x91\x8e\xc0\x52\x38\x3e\xa0\x5f\x53\xc8\x3c\x80\x8d\x75\x84\x5a\xa2\x88\x1d\x68\x1d\xc0\x30\x60\xdc\x01\xfe\x52\x78\x80\xb2\x82\x97\x0a\x9a\xb8\x51\xc6\xe5\x32\x68\x84\x5d\xc4\xf2\xa6\xc8\xfe\xc8\x2c\x7d\x0c\x9d\xb6\xb3\x39\xa8\x48\x8a\xb8\xb1\xe0\xe0\x8e\x1f\x3b\xb6\x85\x11\x61\x1b\xcc\xdc\x5c\x14\x80\x5a\x6b\x83\xf9\xfe\xe0\x56\xc9\xea\x9b\xef\x81\xb3\x26\x0c\x6a\xde\x94\x66\x04\xa5\x23\x15\xb5\x09\x6f\x38\x9a\xad\x11\xc1\xaf\xf6\xd8\x81\x15\xb9\x05\xc9\x6a\x5d\xbf\x87\x6a\x9d\x95\x2f\x07\x17\x56\x10\x5e\x6c\x68\x65\x4a\x22\x1a\x98\xfc\x13\xb3\x56\x5c\xdc\xae\x5e\x47\x34\x6b\xec\x77\x2a\x6c\x5c\x49\x23\xff\xc3\x95\xe0\x3d\x15\x57\x36\x0d\x6e\xb2\x38\x23\x6d\x6b\x13\xf0\x96\xaa\x09\xe5\xa0\xa0\x21\xf2\xb4\xdc\xaf\x7b\x51\xbe\x62\xdd\x80\xd3\x01\x64\x56\xa0\x7d\x03\x09\x33\xa2\x84\xcf\x5d\x58\xc2\xdc\x86\x09\x2b\xb1\xc3\xbc\x87\xa7\x36\x15\x0e\x08\x09\x1d\xba\xbe\x43\x61\xc9\x5a\xbd\xa4\x0d\x44\x18\xf8\x05\x83\xaf\xaa\xe2\xaa\xf0\x9b\x13\x86\xc6\x2e\x4e\xd7\x45\xd6\x08\x29\x82\x17\xb0\x16\x65\xa5\x20\xf9\x85\x1f\x5d\xbc\x65\x96\x1f\x3f\x7e\xe2\xe4\x84\x45\x38\xed\xec\xfe\xaf\x71\x8d\x0b\x3b\xc1\x44\xf0\xbf\x38\x12\x9c\x1d\x76\x43\x1f\x13\x06\xb3\x16\x37\x85\x21\xd8\xf0\xc6\x15\x66\xc0\x86\x37\x86\xaa\xf1\x86\xa7\xb5\x2d\x47\x13\xfb\xab\x0a\x7b\x81\xc7\xe8\x21\xff\xd2\x7e\xc0\x12\x10\xf8\xfd\x5a\xc2\x67\x2f\x29\x71\x36\x91\x92\x19\x25\xaa\x66\xb3\xc6\x5e\xe7\x31\x5d\x69\xff\x07\xbd\xe0\xb9\x33\xf4\xa3\xb4\x11\xd8\x5b\x26\x95\x69\x43\xa0\x9e\xbf\x63\x69\x59\xc9\x32\x3a\x02\x69\x2b\x54\xe4\x02\xf3\x6a\xd6\xd7\x7a\x9f\xdf\x2a\x8a\x85\xb8\xd7\x8e\x12\xd9\x3f\x47\x5d\x16\xcd\xb3\x96\x9e\x4a\x36\x78\xd8\x41\xb4\x35\x51\x91\x2e\x0e\xb4\x0a\x71\x81\x4c\x49\xef\x3f\xec\x40\x9b\x6b\x07\x50\x28\x3a\x69\xb2\x24\x70\x22\xbc\xea\x70\x70\xd2\xb9\xb2\xc5\x0c\x68\xc2\x73\x40\x0c\xb8\xe1\x8d\xab\x8c\xa2\x0d\x6f\xbc\x10\x9d\x6e\x0c\x90\xe5\xd6\x50\xff\x12\x63\xaf\xaa\xb0\x47\x77\xa2\x04\x00\xf3\x94\xe9\xe8\xff\x6a\xd3\xd1\x5f\x7a\xcb\xc8\x82\x0c\xda\x12\x31\x23\xa7\x2d\x1d\x11\x6e\xac\x45\x21\x4f\xc4\x7a\xdc\xd7\x8c\x90\xba\xc2\x70\x8e\x32\x3d\x81\xd0\x79\x52\x12\x49\x19\x49\xa6\xe7\xc8\x08\x2f\x91\xf1\x46\x16\xe6\x2b\x00\x9a\x27\x8b\x8f\x0a\x4a\x74\xb4\xd6\xaf\x06\xa8\x73\x23\xb9\x8f\xf1\xa3\xf2\x63\x6a\x93\x1e\xf6\x02\x0f\x73\x9e\xa7\x20\x16\x89\x1c\x07\x2a\x54\x32\x40\x7d\xc8\x63\xdf\x92\x89\xb5\x48\xee\xe6\x67\x23\x39\xcd\xfb\xe7\xa4\xe2\xe7\xbf\x4d\xf7\xcc\x2b\xa0\x67\x4c\x8f\xa4\x71\x93\xaf\xe0\xa3\x48\xaf\x03\x7b\x9c\x54\xcc\x21\xd8\x52\xf1\xf6\x58\x89\xda\xe8\x89\x43\x88\x56\x5b\x1e\x2a\x7b\xba\x4a\xf9\x31\x86\xb4\x52\x4c\xba\xdd\x74\x20\x62\xb6\x5a\xf0\xe1\x0a\xd3\xa3\xef\xbf\xb7\x72\x19\x32\xfb\xfe\xa7\x37\x63\x65\xc3\xf7\x81\x1d\x8e\x12\xfc\x0c\x46\xa4\x46\x38\x86\xe0\x51\x33\x17\x6d\x7e\xdb\x28\x21\xc2\x72\x3d\xac\x85\xec\x1d\xca\x7f\x2f\x11\xa8\xcb\xf1\x54\x53\x17\xfc\x59\x90\x55\xbe\x5b\x0e\xb3\x9d\x1c\x15\xa6\xae\x82\x7f\xab\xaa\xd7\x72\xf6\xa2\x11\xa6\x97\x8d\xff\xff\x56\x76\x70\x62\x5b\x48\x9b\xcb\xf4\x02\xc0\xda\x3c\x50\x99\x71\x5d\x0d\xae\xb7\x16\x1a\x27\x2f\xeb\x19\x8b\x8b\x07\xb5\x3e\xb3\x82\xe0\x36\xde\xe3\xe2\x52\xd8\x28\x80\x94\x48\x76\x5d\xb7\x6f\xf8\x65\x52\x48\x97\x03\x10\x6c\x4b\x1c\xd9\xf9\xad\x56\x47\xc2\x03\x3a\xe1\xf0\x68\x9a\x95\xde\x45\x1c\x5f\xf7\x29\x9b\x1b\x7f\x62\xd7\x63\x20\xbb\x3e\x4e\xc3\x66\x3e\xa5\x47\x3c\xcb\xa7\x32\x81\xd0\xf7\x51\x9a\x98\xcb\x57\x75\xd3\x66\x55\x55\x96\xfd\xa8\xc7\x1e\x85\xb6\xbf\x25\x45\x5e\xf0\xbc\xad\x94\xa5\x01\x21\x7a\x87\xf3\x72\x30\x37\x93\x90\x2d\x91\x2b\x36\x04\x5c\xb1\xdd\x38\x6c\x58\x01\xdc\x8e\x04\x23\x8c\xdb\x44\xac\x63\x92\x28\xfb\xec\x38\x3b\xb9\x95\x73\x0a\x77\x44\xcb\xc5\x43\x80\x33\xaf\x1f\x0f\xe6\x06\xae\x1a\x37\x8a\xc2\x9d\x30\x3e\xa9\x12\x69\xa3\xda\xbd\x5c\x14\x82\x83\xec\x55\x07\x6c\x2f\xe7\x8b\x0e\x04\x7f\x31\xea\x7a\x39\xd5\x4d\xcb\x23\x99\xf3\xc6\x4a\x9a\x23\xc7\x7c\xe1\xf2\x43\xa6\x99\x59\xd6\x40\x3f\x5f\x80\xbe\x04\x9d\x00\xd4\xbb\xf2\x79\xdb\x81\x05\x51\xec\xe4\x99\x74\xbd\x9e\xc8\x7e\x37\xe8\x07\x05\xe1\x97\x1b\xaa\xd2\x52\x89\xb0\xd1\x19\x0f\x97\x8a\x92\x87\x6f\x1c\xc9\x2d\x17\x23\x9f\x51\x06\xe4\xc1\x42\xc0\xe1\x86\x1a\x86\x28\xcc\xa6\x59\x33\xe5\xa2\xe2\x4a\xfb\x93\xe5\x77\xc3\x98\x09\xd3\xf2\x21\xdd\x87\xbb\x87\x21\x8a\x37\x48\x41\x03\xe5\x0f\xc9\xfb\xb6\x6b\x89\x2e\x41\xe8\x2a\xf5\x99\x70\x3d\xcc\x10\xd9\x30\x74\x1e\xc5\x0f\x22\x4f\x17\x82\xe0\xbb\x5f\x54\x59\x2f\x70\x2c\xd6\xf7\x8c\xd7\x17\x4e\xb5\x41\x91\xf5\x44\x30\x69\xcd\x32\xcc\xaf\x8a\x0c\x67\x80\xed\x0d\x56\x1d\x6b\x59\x98\x8c\x23\x73\x2b\x17\x6f\x9b\x3d\xd2\xf9\xb6\x7f\x67\x70\x76\xc6\xad\x8c\x9b\x92\x67\xb9\x62\xc3\x7c\xe0\xe8\x12\x92\x81\xa5\x97\xc3\x67\xad\x84\x97\xb3\x8a\x7d\xf1\x29\xc1\x09\xa2\x74\xdb\xac\xe0\x86\x1c\xe9\xe4\x08\x68\xf5\xe0\x2b\x14\x0e\x9f\xe5\x73\x88\x89\xb4\xd8\x4a\xc2\x0c\x5f\xec\x77\xe4\x22\x9b\x4f\x5a\x69\x70\xbd\xfc\x4b\xad\x69\x75\x75\xa8\xad\x4c\x87\x37\xe0\xd2\x61\x3f\x76\x88\xd5\xb6\x60\x31\xd1\x04\x26\x16\xfa\xf8\x57\xc6\x83\x62\xf0\xb2\xb6\x5e\xe9\x85\xad\xcb\x91\xca\xa8\x7a\xca\xe8\x45\x2d\xcc\x52\x54\x9a\xba\xf6\xc0\xc0\xfb\x16\x70\x26\xac\xda\x56\x58\xe2\x12\xff\xb3\x83\xc0\xe1\x0a\xe7\xc5\x67\x05\x1d\x30\x85\x0e\xa1\x27\xd0\x1f\xa8\x0b\x04\xf0\x55\x5e\x63\xf4\x85\x06\x6d\x51\x04\x88\x81\x06\xc9\x84\xea\x32\xfe\x0c\x6a\x3c\x08\xf8\x51\x98\xf5\x13\x0e\xa1\x83\x19\xbd\xaf\x56\x6c\xe0\xa1\xff\x56\x09\xfe\xb0\xa2\x39\xe5\x87\x02\x0f\x41\xef\xa0\x08\x2a\xd5\xa9\xc6\xf9\xac\x82\x7e\x9e\x24\xa0\x0c\x30\x02\x92\xb2\xa6\x5e\x42\x65\x4d\x6e\x96\xba\x5c\x95\x42\xae\xaf\xe4\x76\xcd\xc1\xaa\x04\x9a\x1b\xb5\x10\xd8\x02\x08\x13\x6a\xa6\xd1\x10\x79\x8e\x6b\xbd\xfc\x16\xae\x77\xf9\x86\x22\xe0\x47\xca\x7d\x8b\xc9\x63\xa0\x83\x82\x30\x8e\x03\x78\xc7\x54\x6f\xe0\x2d\x38\x77\x6b\x2c\x63\xbb\x0a\x72\x30\x96\x44\xdc\x1a\x72\xcb\xee\xf6\xef\x61\xe3\xaa\x34\xff\x5c\xf0\x14\x35\x1d\x81\x2c\xc0\xe2\x1e\x51\x3b\xab\xf1\x1e\xf6\xbb\xc0\x71\x52\x66\xe8\x30\x25\x7f\x2f\x3b\x6c\x31\xca\xf8\xb7\x07\xa7\x97\x2c\x82\x99\x1d\x97\xaf\x8a\x4f\xd2\xc4\x35\xcb\xbf\xd0\x63\xa3\x6b\x22\xab\xfb\xcf\x0e\x12\x43\x29\xb3\x15\x95\xcc\xe5\x63\x92\x31\xb5\x58\x62\x07\xc9\x76\xe8\x9f\x0d\x6e\x54\x0e\x3d\x9a\xaf\xf2\xc3\x77\xba\x9c\x70\xaa\x83\xb7\x2c\xf4\x3c\x3b\x00\xce\x22\x7f\x2e\xb8\x0e\x89\x66\xac\x02\x6f\x55\x20\xfe\x3b\x2e\xee\x33\x23\x6c\x96\x44\x93\xec\x9e\x6a\xd8\x6e\x67\xa2\x2d\x35\x05\x07\xf5\xcf\x49\xb5\x31\x2c\x5d\xda\x8b\xe1\xff\xe0\x48\x70\x63\xf9\x22\x21\xa1\x95\x5c\x1a\xf4\x54\x2d\x16\xed\x50\x07\x69\x3b\x62\xe7\x83\x15\x76\x0d\x49\x9d\xc9\xe0\xf1\x9b\x49\x1d\xf2\x3d\xd9\x8d\x99\xb5\xc5\xc4\xa9\x60\x62\x7b\x21\x31\xa4\x90\x5f\x03\xae\xf3\xac\xf0\xdf\xaf\x4f\x85\x6f\xf6\xe6\x5b\x36\xaa\x05\x6a\xf9\x59\xa1\x39\x44\x09\xb1\x0f\xc9\x59\xc8\x69\xb5\x2e\xea\x2b\x69\xba\x6a\x58\xae\x8b\x94\x5f\x73\xcd\x49\x58\xb5\xf2\xcc\xb8\x1e\x66\x4d\xad\x8a\x00\x39\x62\x8d\x5f\x94\xa5\x5e\xb4\x2d\x0c\xb4\x7f\x77\x0d\x90\x3b\x3f\x7a\xbc\x7a\xea\xda\x6b\x4f\x5e\x3b\x89\xbb\x41\x1e\xad\x89\xd2\x41\xf7\x2b\x07\xd8\xe4\x36\xa0\x76\x70\xf0\x5d\xa4\x90\x19\xff\xbd\x07\x82\x05\xe7\x8a\x76\x3b\xaf\xa4\xeb\xb2\xe2\x64\x69\x31\x3b\x08\x60\x71\x84\x89\x4a\xac\x17\x97\x44\xa3\x07\xf4\x43\x51\x7b\xa5\xe0\x49\xba\xae\x42\x5d\x5c\x23\xd4\x08\xfb\x88\xc7\x0e\xde\xd7\x13\xbd\x28\x69\xfb\xef\xf7\xb6\x40\x0e\x1b\x56\xeb\xef\xc6\x17\x67\x6d\xe6\x8f\x20\xbb\x48\xe5\x5d\xa4\x09\x57\xac\x88\x12\x39\x88\x15\x66\x28\xbb\x9f\x9e\x1f\x66\xe4\x4c\xd2\xa4\x8a\x12\x19\x82\xc2\xa2\x16\xbf\x28\x5b\x01\xf8\x76\x17\x03\xf9\x7d\x11\x5c\xac\xb1\xfb\x2b\x14\xcc\xf2\xba\x4a\xf0\xd2\x8a\x79\x84\x9e\x00\x88\xbd\x45\xf0\x93\xcb\xfd\x8d\x2e\x5a\xa4\x46\xdb\x77\x64\xaf\x0b\x98\x26\x59\xb4\x16\xc6\xa0\xaf\xaf\xe8\x48\x27\x59\x7d\xc1\x7b\x49\x11\xc5\x04\x5b\x42\xce\x4c\xf5\x2e\xec\xaf\xd4\x48\xf4\x6d\x91\x49\x04\x4c\x70\xba\x66\xfb\xac\x10\xc6\x01\xc8\x02\x87\xc1\xf8\x3d\xe8\x5d\x64\xdf\xec\xd8\xf0\x7b\x09\x58\xea\xe7\xfd\x5b\xab\x90\x54\x93\x45\x9d\x28\x91\x92\x66\x1a\x89\xe8\x38\x8e\x44\x5e\x2d\xd2\xaa\x75\x5f\x9c\xee\x23\xa1\x06\x35\x68\x9a\xd3\x2c\x60\xec\x4d\xa3\x4e\x74\xaa\x8b\x90\x74\x76\x79\x79\xc1\x02\x46\x82\x38\x21\xff\xbf\x8f\x04\x6f\xad\x0c\xbb\x53\x8a\xbe\x29\x8a\xae\x3e\x21\x13\xd2\x26\x79\x60\xc8\x15\x9e\x6b\x66\x66\x22\x2e\xc1\xe3\xf2\xf4\xd4\xd4\x4d\x52\x00\xdc\x3c\x75\x53\x37\x2c\x56\x6e\x7e\xca\x4d\x48\x8b\xdc\x0d\xb3\xe2\x66\x5e\xbd\x59\xbd\x4e\x9a\x3c\xfe\xaf\xbc\xa9\xbd\x08\xbd\x2c\xb6\x3c\x3c\xf2\x8b\x8b\xb7\xcc\xf2\x93\x37\x5c\x7f\x6a\x52\x19\x3a\x69\xeb\xb2\xb1\x0f\x0c\x98\xa3\xd2\xf4\x2d\xda\x2c\x43\x1d\x09\x81\xc3\x47\xa6\x8e\x80\x2e\x63\xe1\x3e\xb4\xa2\x4c\xde\x78\xca\x11\x39\x75\x8e\x5c\x75\xa4\xb6\xe1\x1d\x90\xf5\x77\x9d\xf9\xef\xf6\xd8\x3b\x3d\x86\x37\xfc\xb7\x7a\xc1\x35\x33\x76\x6a\x76\xda\xe2\x70\x47\x19\x28\xba\x8e\xbe\xa9\xbb\xcd\x3e\x57\x4c\xb3\xeb\xd9\xa9\x1d\x02\x5e\x59\x83\xb6\x10\x16\x2b\x0f\x7a\x57\x6d\xea\x22\x3a\xe4\x1f\x0c\x8b\xb4\x13\x35\x18\xfb\x17\xcf\x09\xc5\x77\x8b\xa4\xe2\xe8\x08\xff\x69\x2f\xb8\xde\xb9\xa2\xcd\x3a\x03\x3e\x1f\xbd\x7d\x28\xf4\x1f\xa7\x9b\x5e\xe2\xb1\xef\x2f\x53\x2a\xf6\xf6\xc6\xa8\x78\xf3\xbe\x18\x15\x6b\xec\x4d\x87\xd8\x51\xbb\xf9\x69\xd3\x98\xf5\x17\xa5\x00\xe9\x88\xd9\x38\xcc\x73\x88\x4d\xfb\xfb\xf1\xe0\x64\xf9\xa2\xbb\x2c\xec\xbb\x5b\xc7\xa1\xfd\xea\xc1\x2b\x71\x4d\xfb\x8d\x6b\xea\xaa\xb0\xa6\x76\x10\xcc\xab\xa8\x26\x33\x18\xaa\xab\x86\x04\x30\x9d\x62\xd7\xb0\x13\x9b\xaf\xab\xcd\x66\xc1\x95\xd0\xc1\x7d\x84\x0e\x7e\xc2\x46\x82\xff\xa0\xb7\xbf\xe0\xc1\xef\x5b\x1a\x1e\x2c\xf8\xf5\x02\x81\x7f\xd0\x6b\x6d\x1f\xb8\x36\xeb\xcf\x6c\x99\xfc\x57\x96\x25\x43\x73\xa5\x1e\xa8\xb0\x4d\xfb\xa6\x57\x44\x71\x2d\x4a\x8a\xbc\xc8\x6a\xf3\x49\xa1\xc0\xc3\xfd\x1f\xa9\xf8\x8f\x8c\x92\xa2\x9a\x66\x55\xec\xfc\xe0\x7f\x79\xd6\x7d\x9c\xb3\xc8\x4e\xa8\x94\x19\x88\x71\x0e\x13\x0e\xfa\xbc\x8d\x2d\x5e\xe3\xc8\x94\x0c\x9b\x68\x94\x00\x0c\xb9\xbc\xff\xf4\x99\xf3\xe7\x78\x27\xcc\xf2\x95\x30\x8e\x15\xdd\x71\x2f\xb1\xae\x00\xb8\x1d\xf2\x9a\x09\x80\xad\x23\xbf\xb8\x62\xc9\x48\xe4\x14\xef\x77\xe5\xf1\x0b\xd4\x4c\xc2\xd1\xec\xa7\x3d\x54\xa8\xd7\x04\x7a\xc2\x34\xf1\x19\x62\xa0\x2b\xd3\xa2\xaa\x79\x08\xe8\x20\x3c\xa4\xb3\x4f\x46\xa7\x00\xe7\x10\xf7\xd2\xc3\xec\xf8\x66\x11\x8a\x9b\x06\x27\xfa\x9f\x65\x41\xd5\x55\x93\xc1\xd7\xb8\x69\xac\xa4\xbb\xcb\xbd\xe3\x0a\xf3\xc6\x15\xe6\x8d\x2b\xcc\x1b\x97\x93\x79\xe3\x73\x8a\x79\xe3\x77\x3d\x36\xbb\xb3\xf8\xe3\x4d\x57\x37\x38\x2a\x5f\xe2\x11\x9d\x46\x2f\xc3\xd4\x34\xbd\x94\x1f\xea\x36\xc9\x66\x54\xc3\xa4\x59\x25\x5f\x05\x7b\x8e\x4e\x4b\xcc\xd8\x99\xfd\xb6\x0d\x95\xe3\x09\xa5\x02\x0f\x0f\xd2\xb2\x5a\xfb\xa0\xb7\xb2\xfd\x76\x76\xc6\x9f\xad\x6e\x1d\x72\xbd\x13\xa4\x11\xf6\xc6\xc3\xec\x71\x56\x8b\x00\xc1\x03\x98\x32\xd2\xba\xff\x82\xc3\xc1\xf1\xa7\xa5\xf5\x01\x0e\xfb\x41\x21\x4c\x01\xd8\xf7\xa6\x75\x57\xea\xfe\xcf\x43\x57\x84\xc6\x3e\x84\xc6\x67\x6c\xa1\xf1\xb1\x7d\x0b\x8d\x17\x96\x85\x86\xcd\x9d\xf6\x75\x13\x1b\xbf\xa7\xc4\xc6\x6f\x7b\x6c\x62\xd3\xa5\x65\x4f\x44\x10\x0e\x6f\xf4\xb6\x64\xe7\x51\x44\x3c\x38\x1f\xe5\x44\xfc\x3a\xcb\x0c\xf6\x80\x21\xf7\x79\xbf\xc7\x9e\xb4\xb3\x96\xa1\x68\xf8\x01\x6f\x76\xe0\x78\xfc\xf0\x34\xe2\x8a\x8e\xb4\x3f\x1d\xe9\x41\xef\xc2\xf6\xa2\xfb\x49\xfe\xc4\x8e\x50\x96\x9e\x96\xd6\x19\x7b\xd7\x21\xf6\x84\x4d\x82\x70\x1d\x2a\x53\xff\xc5\x87\x82\xeb\xec\x0b\x6e\x8a\x0c\x5c\x51\x21\xb1\x61\xe2\xc6\x61\xbb\xee\x8c\x71\x56\x67\xe3\xdd\x2c\x2d\xd2\x46\x1a\xfb\x77\x06\xf3\xcb\x2b\x82\xcf\x2f\x70\x75\x89\x72\xfc\x21\xda\x27\x2b\x4c\x74\xea\x1d\x73\x0b\x93\x7c\x79\x76\x01\x00\xb8\x97\x66\x97\x17\x8c\x2f\x21\xca\xe5\x0d\xe7\x10\xf0\xb9\x0a\x3b\x1c\x76\xbb\x0b\xea\x3b\xbf\x59\x09\x7e\x19\x30\x5f\x01\x5f\x9d\x16\xf9\x66\x9f\xb4\x0c\xe1\x03\x30\xe6\x56\x7c\x00\x21\x9a\xf7\x93\x22\xbc\x54\xe3\x77\x24\xd5\x6e\x26\x5a\xd1\x25\xd1\x24\x7a\x7f\x34\x0b\x43\x04\x01\x3a\x41\xe7\x67\x2e\xcc\x98\x92\x94\x9f\x04\x1f\x3e\x1a\x42\x42\x26\x5f\xbc\x65\xb6\x7a\xea\xe4\xc9\x6b\x11\xf7\x11\x8d\xa8\xeb\xeb\xeb\xb5\x28\x4c\xc2\x5a\x9a\xb5\xa7\xc2\x3c\x8f\xda\x09\x70\x90\x4c\x29\xf6\x1c\x15\x37\x7c\x21\x4d\xaa\xba\x7c\xd5\x3a\xbd\x1a\x7a\xb9\xe0\xa5\x2a\x22\x62\x75\xce\x3b\x7d\xf0\xbd\x24\xfd\x5a\x23\xed\x4c\x75\xfa\xd5\x46\x2f\x2f\xd2\x4e\x55\x95\xe1\xf4\xed\xc7\x46\xc8\x1f\xf5\x2b\x23\xc1\xcf\x8d\x2c\x3b\x9e\x28\xdd\x87\x33\x00\x2f\x9e\x21\x06\x62\x79\x46\xa8\xe0\xfb\x35\xc1\x43\xde\x4b\xa2\xfb\x7a\x82\x98\x76\x88\xe5\xd2\x7d\x1a\xbc\xc9\xd9\x5a\xa4\x70\x30\x9c\x3c\x20\xea\x83\x49\x05\x55\xa1\x8c\xc5\x7a\x89\x2b\x17\x1b\xd4\xe6\xee\x7b\x6a\xf8\x21\x70\xa4\x21\xec\x38\x42\x66\xd6\x85\x1d\x6f\x02\xda\x43\x9a\xe9\x28\xe8\x67\x9e\x9b\x39\x7d\xe6\x9c\x15\xfa\x3c\xcd\xaf\xb6\x00\x80\xad\x60\xfe\x53\x27\x79\x63\x25\xcc\xc2\x06\x38\x58\xe2\x54\x9e\xaa\xaf\xd6\x59\x01\x39\xd9\xab\x20\xd6\x9a\x63\xb0\x75\xdc\x5d\x09\x93\x5e\x47\x64\x51\xc3\x7e\x35\xcd\xf8\x91\xea\x11\xfd\x32\xd0\x2c\xc3\xac\x00\x8b\x39\x41\x2f\x0c\x7f\xd9\x59\x1a\x76\x93\x9c\x71\x7c\x8f\xf2\xed\xbd\x43\xfb\xf6\x5e\x0d\x11\x9f\xb6\x7f\xad\x1c\xce\xaf\xa1\x8a\x07\xe1\xed\x71\xb8\x15\xa7\x74\x26\xe4\x67\x1a\x2a\xab\x44\xf5\x95\x9d\xcc\x31\x34\xb3\x42\x47\xfc\xea\xd0\x7a\xd7\x9d\xf7\x5f\xc6\xd9\xc4\x76\x46\x6c\x63\xc6\x7d\xd9\x78\x70\xb2\x7c\x11\x35\x44\xd7\x58\x6f\x3f\x23\x36\x31\xe3\xfe\xfe\xd8\x15\x75\x73\x1f\xea\x66\xdd\xd2\x36\xef\xdc\x9f\x45\xf0\x71\x9b\x58\x04\xaf\x28\x18\xfb\x35\xc2\xe4\xca\xcc\x7e\x6f\xf0\x84\xf9\x61\xc9\xc3\xa5\x85\x62\x1b\xda\xaf\x63\xd7\x6e\xe1\xd8\xd9\x7c\xa1\x3e\xe8\xb5\xb7\xd7\x6a\xe6\xfc\xd3\xd5\xed\x51\xe0\xca\x4b\xbd\x74\x48\x65\xbf\x37\xce\xae\xdd\xf5\x09\x1b\x04\xc9\xeb\xc7\x83\xab\xf7\x09\x47\xf0\x57\x63\x57\xe6\xe7\x7e\xe7\xe7\xf7\xeb\xf4\xf6\xe7\xec\x66\x3c\x76\x4e\x2c\xb4\xb3\x29\x71\x65\x23\xf8\xc6\xde\x08\x1e\xf4\xe2\xed\x45\x0a\xc4\x5e\x5c\x06\xac\x81\xe3\x8c\xfd\xfa\x41\x27\xc6\x48\x39\xb1\x31\x34\xe7\x7c\xd8\xbd\x90\x36\x05\xfe\xc0\x3c\x31\xff\x35\x07\x83\x78\xd3\xbb\xae\x6f\xdb\x36\xe8\x41\x0c\xbf\x45\x1c\xaf\x4a\x90\x3a\x7c\x48\xa6\x33\x45\x3e\xa6\xb0\xd2\x20\xdb\x7f\x08\x95\xe6\x86\xf7\x4d\xb2\x7f\x62\x51\x60\x31\xb7\x89\xbe\x23\xae\xfe\xfe\x00\xeb\x92\xa2\xbf\x12\xdc\x6d\x07\x9e\xe9\x6e\x76\xe3\x5e\xa9\x5e\x4d\x53\xad\x32\x59\x0d\x11\x28\x62\xe4\x4e\x1c\x83\xfe\x9b\x3b\x3a\xe9\xf7\xd9\x41\x6b\x59\x20\x06\x82\xd6\x9c\x6f\x3b\xd1\x6b\x97\xa7\x02\x3f\xef\xb1\x47\xab\x15\xaa\xc4\xf4\x1b\xbc\xe0\xe5\xde\xa2\x7b\x71\xa0\x3e\xe5\xfb\xbb\xa8\x55\x2b\xcd\xea\x51\xb3\x29\x80\x88\x04\x86\x6b\xa9\x2b\x1a\x28\x84\xed\x1a\xe3\x2d\xb4\x21\xdb\x75\x7e\x99\xc7\x46\x7a\x51\xd3\x7f\x91\x17\xac\xdf\x31\x3f\x37\x50\x35\x79\xed\xeb\x58\x9d\x0f\x78\x6c\x60\x66\xf9\x3f\xe1\x05\xaf\xf5\x6e\x2b\x5d\xe5\x4d\xd1\x88\xc3\x4c\xa8\xf4\xd6\x55\xd1\xdf\xaa\xa2\xc3\x4e\x77\x4e\x91\x1a\x34\xab\xc8\x7a\x20\x95\x76\x3d\xfc\xef\x19\x1d\xba\x90\x97\xfb\x5d\xd1\x84\x18\xe6\x12\x60\x85\xff\x82\xd1\xe0\x3f\x6f\x7a\xd7\x2c\x64\x91\xa4\xbd\xf6\x4a\x79\x2d\xc7\xa2\x00\x17\x28\x26\xf2\x63\x00\x7f\x1f\xe3\x96\x75\xeb\x69\x83\x88\x20\xcf\xcf\x24\xff\x1a\x1a\xb4\x0d\x0f\x76\xa2\x61\x04\xb9\x5f\xa9\x00\xe0\x48\xd8\x8d\x20\x16\x16\x01\x47\x66\x16\xe6\x9d\x50\x59\x90\x80\x5a\x5a\x0c\x84\xaf\xab\x8a\xc0\xc9\xcf\x7e\xb7\x74\xfa\x2b\x1c\x3e\x1a\xd8\x11\xcd\x99\x4f\xa5\xea\x63\x60\x2e\x7c\xb1\xc6\x6f\x49\x33\x4c\x8e\x54\x94\x15\x59\xb3\xda\x0d\xb3\xa2\x8f\x31\xcd\x93\xce\xd7\xb2\x61\xe4\xb8\xa7\x69\x13\x9e\x0e\xaa\x6a\x0b\x56\x7d\x08\x14\x70\x9b\xb5\xa5\x54\x06\xc8\xb8\xe9\xa0\x3a\x2c\xb8\x76\x47\x65\x7c\xfa\x10\xfb\xce\x21\xd9\x59\x8b\x98\xf8\xb5\x24\x0a\x50\x21\x7f\xe6\x50\x50\xba\x34\xec\x24\x6a\x9e\xd8\x44\x8d\x7c\xe9\x38\x7b\xbf\xd6\x82\xde\xe9\x05\xa9\x62\xbc\xb3\xdf\x7c\x08\x33\xd7\x6c\x5d\x6a\x8a\x55\xb7\xb0\x45\x0f\xf6\xc3\x15\xbd\x69\x1f\x7a\xd3\xa7\x6c\x7f\xcd\x03\xfb\x0c\xaa\x79\x81\xf7\x30\x45\xd5\x58\x8d\xbb\x72\x1c\xda\xb7\x3f\xe0\x7b\xb6\x57\x73\xaf\xf5\x4f\x0e\x67\xdd\x70\x65\x51\x59\xa5\x7d\xd9\x41\x76\xdd\x1e\x9d\xd1\xfe\xef\x8e\x05\xd5\xc1\x90\xcc\x2d\xa2\x69\x36\xbc\x47\xd3\xf3\x54\xa9\x7c\xc3\x7b\x34\x39\xdf\xd4\x15\x47\x08\xfe\x8f\x03\xec\x0b\x1e\x7b\x3c\xbd\x34\xbb\x70\xc7\x1d\x45\x14\x53\xf2\xd7\x82\xc8\x1a\x72\x3e\xb4\x85\xff\xcb\xda\xb2\xf9\x93\x9e\xaa\x50\xb8\x26\xb2\xb0\x2d\xf8\xec\xc2\x1d\xbc\x67\xde\xc2\x64\x6f\x45\x37\x3a\x69\xe6\x95\x68\xa2\x76\xdd\xd5\xc5\xe2\xbe\xa0\xf2\xba\x66\x17\xee\x98\xe4\x80\xb1\x71\xdd\x31\x3b\x88\x3c\x4c\xf4\xa7\x80\x89\x38\xe7\xbd\x1c\x81\x78\xd6\xf9\x75\xc7\x9e\xa8\x30\x01\x9c\x82\x5c\x53\x67\x8b\x95\x7b\xc5\x5f\x52\xed\xb9\x59\x35\xc7\xd8\x69\x49\x5a\xe7\x8a\xe5\xd5\xcd\x55\x87\xc4\x4b\x2b\x7a\xc9\xfd\x4e\xa9\xaf\xad\xef\x28\x17\xe8\x65\xf8\xce\xe7\x3c\xf6\xc8\x38\xcc\x8b\x25\x79\x0f\x80\xaa\x3f\xee\xed\x1d\xa9\xfa\x87\xbc\xd8\x41\xce\xde\x0c\x49\x12\xfe\xc1\xc4\x5a\xd3\x06\x59\xef\x1b\xb5\xc7\xca\x8d\x90\x00\x84\x55\xdc\xee\x20\xe3\x23\x6d\x15\x04\xfd\xeb\xbe\x0f\x34\x60\x80\xa9\xd9\xac\xb1\xef\x65\x7e\x5a\x47\x5f\xcf\xad\x22\x11\xa8\x88\xfa\x67\x15\xae\xec\xc9\x4e\x9a\x83\xdc\x91\x43\xd6\xd6\x0f\x70\xf5\xce\xb6\x7d\xf7\xc6\x83\x4e\xd6\xa4\xce\xc6\x27\x0e\xc7\xa4\x40\x60\x92\xd9\x38\x8c\x3a\xb4\x0a\xff\x66\x2c\xb8\x7d\x8b\xfb\x4a\xd3\x19\xb6\x50\xbb\xfa\x35\x85\xf3\xd1\x90\x2f\x96\xc8\x3c\xc7\xd8\x67\x3c\x76\x38\x84\x84\xb9\xf3\x69\x53\xe4\xfe\x03\x5e\xf0\x93\xde\x8c\xb9\x50\x82\x22\x6d\x14\xbd\x30\xe6\xf8\x02\xef\xa4\x2a\x75\x93\x3e\x51\x0f\x1b\xab\x6a\xc7\x5f\xb8\x73\x96\xaf\x84\xbb\xd7\x63\x08\xae\x65\xca\x34\xa0\x8a\xa5\xe7\x57\xe1\x67\xab\xf0\xd9\xea\xf1\x2d\x32\x7a\x5f\xe3\xb1\xf1\x46\xd8\x0d\x1b\x51\xd1\xf7\x5f\xe2\x05\x37\x2c\xba\x11\x2a\xd4\x0c\x93\x5b\xa8\x92\x15\x92\xa6\xc8\xe2\x3e\xe8\x31\xf0\xcd\xda\x3b\xbc\xa7\xb0\x27\xef\x62\x9f\xae\xa9\x32\x6b\xdf\xdd\x0b\x93\x22\x2a\xec\x13\x39\xfb\x78\xc5\x01\x79\xfa\xa5\x4a\xf0\x4a\xed\xd9\x37\x60\xf9\x72\x72\x6e\x32\x7a\x52\x83\xb7\x2a\x39\xf8\x18\xf0\xc9\x52\x76\x21\xec\x6d\xc4\x08\x2e\xac\xf2\x55\xa2\x45\x2e\x20\x89\xeb\xc8\x22\x3c\xba\x54\x84\x59\x21\x9a\x47\x76\x61\x69\xdb\x72\x0a\xeb\xef\x3d\xe8\x1d\x61\xdf\xee\x6c\x71\xdd\xb0\x68\xac\x54\x15\xac\x81\x3f\xee\x8f\x75\x44\xd6\x16\x4c\x3e\xf9\x1f\x86\x3c\x09\x77\xab\xab\xa2\xef\x1f\xf4\x0f\x40\x6e\x0d\x5b\x64\x07\xba\x2b\x61\x2e\xfc\xf9\xe0\xa6\x05\xf9\xc7\x40\x10\x12\x75\x2b\x3c\x25\xbb\x74\x68\x25\x9d\xb3\xc8\xe7\x1f\xeb\x80\x25\x10\x96\xae\x8a\x2c\x5f\x48\x9b\x73\x51\x9e\xf5\x80\x28\xe1\x74\xaf\xd9\x16\x05\xad\xd2\xb7\x3e\x36\xf8\x41\x6f\xd3\xdb\x76\xc5\x86\x47\x76\xd9\xcb\x76\x48\x31\x35\x4e\x05\x49\xf5\xa5\xc8\x42\xcc\x92\x52\x73\xd8\xe2\x52\xc8\xfb\x79\x21\x3a\xb5\x0d\xcf\x6f\xea\x22\xf2\x19\x24\x47\xde\xf0\x1e\x45\x3d\x72\x56\x84\x71\xb1\xd2\xdf\xf0\x1e\x45\xfb\x82\xbe\xf0\x08\x71\xa9\x0b\x19\x50\x0b\x69\x09\xba\xf9\xc3\xdf\xc2\xbe\xe4\x0d\x15\x8f\x9f\xd0\xb8\xdb\xbf\xe8\x9d\xdf\x46\x40\x02\xc7\x30\xe8\xe3\x1a\x04\x73\x61\xee\xb4\x4a\xff\xe7\x73\x03\x95\x36\x20\xf3\xaa\x8f\xec\x0e\xd4\x60\x00\x2a\xb3\x6d\xb0\x7a\x88\xe3\x0c\xaa\xe1\xc2\xdc\xe9\x23\xfa\xcc\x61\x6a\xe7\x8a\xe8\xaf\x8e\x3b\xeb\xf3\x2b\xe3\xc1\x57\xc7\x66\x07\x60\xd4\xb8\x85\xac\x26\x0f\xdd\x0b\x73\xa7\x89\x76\x51\xb7\xc0\xe0\xc5\x64\x06\xd3\xc1\x34\x50\xb5\xcf\x30\x54\xc0\xfb\x06\xa7\x31\xcc\x04\xa1\x08\x10\xd4\x97\x39\xdc\x23\x8b\x02\xd8\x43\x8e\x22\x9b\x3b\x30\xb6\xe3\x8d\x1c\x81\x03\x0c\xbd\xa7\xa2\x2f\x04\xf8\xbf\x89\x69\x5e\xe5\x4b\xfd\xa4\x71\x4b\x18\xc5\xa2\x39\xad\x61\xfb\xa8\xa6\x22\x69\xa4\x3d\xa9\x37\x43\xd7\x13\x29\x30\x00\x0a\x86\x79\x72\xa4\xe0\x90\x82\x0f\x5b\x6b\xa7\xdb\x2b\x04\x64\xb1\x99\xff\xb8\xdb\x2b\x91\x72\x5b\x7d\x82\xc4\xf9\x19\x66\x69\x61\x82\xba\xba\x23\xdb\x5b\x2a\x2d\xb4\xa6\x80\xbb\x48\x0a\x87\x63\x43\x09\x33\x20\x7d\xa8\xb1\x2a\x9f\x4f\xf2\x5e\xab\x15\x35\xe4\xe1\x50\x4e\x64\x6c\x65\x69\xdf\x97\xfd\x4b\x51\x03\x61\xc1\xd3\x8c\xd7\x85\x22\x9b\xc4\x27\x4b\xb5\xc1\xff\x68\xe3\x13\x29\x1c\x43\x17\xeb\x85\xed\x5a\xb6\xcf\xf6\x2d\x0d\xb6\x2e\x13\x08\x6c\x24\x3b\x56\x61\x1d\x25\x3b\xaa\xed\x90\xaa\x2d\x0f\xfd\x3c\x32\x6b\xa8\xca\x0e\x0c\xf3\x90\x72\x4a\x9d\x60\x40\x24\x14\xee\xd2\xe0\x72\xef\x66\x69\x57\x64\x45\xdf\xd9\x82\x9e\xca\x6e\x66\x37\xed\x41\xc3\x74\xf6\x9f\x6f\x1b\x4c\xad\xeb\x84\x5d\xb9\xa5\xe4\xfe\x21\xff\x60\x15\x33\x36\xb7\xc1\x69\xec\x84\xdd\x5d\x6c\x51\x3b\xdf\xf6\xd8\x19\x56\x12\xce\xfe\x49\xa5\xbc\xff\xc7\xc1\x43\xc2\x0a\x3e\x03\x23\xed\x48\xaf\xf3\xac\x24\xd2\xfd\x1b\x55\x31\xdf\xa5\xc0\xd1\x06\xcf\x02\x9b\x16\xf7\xc3\x07\xd9\x23\x69\x90\x70\x4b\xf0\xbf\x36\x16\xbc\x6b\x6c\xce\xbe\xb4\x15\xb2\x24\xe2\x11\xad\xa4\xb9\xe0\x62\x2d\x22\xbe\xdb\x30\x37\xf4\xdf\x6a\x26\x18\x94\x16\xf3\xa0\x05\xa7\x40\xc9\xda\x19\xa0\x6e\xae\x84\x68\xb6\xec\x8b\x82\xd7\x85\x28\xab\xdf\x43\x67\xb9\x25\xe2\x6a\x7c\x86\xdb\xe0\x6b\x20\x1e\xa3\x1c\xd2\x3b\xb5\x95\x09\xce\x23\xeb\x4a\x6f\xb2\x6a\x67\x6a\x0e\xc6\x28\x55\x57\x3a\xff\x29\x03\x84\xfb\x3a\x1d\x1c\x73\x81\x00\x46\x72\xc3\xb3\x24\x6e\x98\xf3\x95\x70\x4d\x4a\x7c\x68\x4c\x27\xcc\x56\x29\xd2\x4c\x33\x15\x1f\x95\x32\x18\x32\x5e\x43\x28\x3a\xed\x15\x13\xb8\x5d\xac\x8a\xbe\x66\xec\x0d\xbb\xc3\x30\x05\xe0\x88\x4e\x6b\x56\x13\x93\xef\xa3\x89\x3a\xba\x4b\xd7\xae\x19\x01\x4a\x4d\xda\x68\xf4\x70\xab\x08\x75\x8b\x0b\xd9\xc7\x84\x48\x52\x0c\x60\xd2\xea\xde\x06\xdb\x95\x3c\x33\xc9\xc9\xd3\x08\xe3\xb8\x0f\xfd\xb4\xe5\x30\x52\x87\xe4\x69\x07\x9b\x02\xf5\xb2\x32\x84\xdb\xa9\xc8\x79\xde\x49\x11\x02\x8a\xc6\xd7\xa0\x10\x18\x88\x12\xe8\xbb\x34\x37\x60\x56\x50\xd8\xb9\x30\x6b\xdb\x42\x4e\x24\x45\x16\x19\x1c\x59\x59\x18\x5a\x28\x11\xbc\x47\x76\x5a\x3d\x16\x1d\x42\xdf\x92\xed\x57\xfd\x93\xd7\xde\xe1\xdd\xc8\x6e\xd8\xf3\x11\xd9\x3e\x3c\x5c\x64\x43\xf4\x3a\xff\x69\x6a\x89\x5f\x7b\xc1\xde\xe2\x1c\xf1\x6b\x30\xb2\x14\x54\x8c\x92\xdb\xae\xfa\x73\x37\x73\x54\x40\xff\x36\x55\xf6\xa9\x22\x2d\xc2\xb8\xbc\x89\xa2\xb2\x60\x0e\xbe\x96\xfa\x53\x87\x11\x73\x0a\x7f\xc5\xc1\x6d\x98\x4b\x40\xc5\xce\xcf\x8b\x22\x8b\x1a\xa4\x59\xff\xf1\x58\xb0\xe1\x95\xaf\x96\x80\x35\x95\x80\xc4\xf9\x0d\x8a\x70\x07\x9e\x56\xf9\xce\x72\x42\x00\x5c\x2b\x4c\xcc\xc4\x3d\x31\xcb\x73\xba\xe2\x03\x3a\xea\xa4\xaf\x01\x1f\x15\x82\xee\xe4\x55\xbd\x2c\xaa\x5d\x91\x55\x11\x57\x72\xa2\xb6\xe1\x31\xfc\xd4\x05\x70\xc6\x7e\x33\x95\x3a\x83\x36\x23\xc8\xc3\x77\xd4\xe8\x1f\x3e\xc0\xfe\xd1\x46\x3c\xfc\xcb\xcb\x81\x78\xf8\xd6\x8a\x0d\x7d\x87\xaa\x84\x3c\xce\x54\xa5\x36\xd7\x44\x69\xd2\xa1\x03\x82\xb2\x17\xaf\x0e\x84\xc0\xaa\x22\xd4\xa2\x68\x47\x6b\x52\x1e\x61\x47\x42\x46\xa1\xc5\x0f\x6e\x8d\x08\x48\xe8\x49\x02\xa9\xec\x86\x20\x39\x42\xc0\xfd\xb2\x74\x53\x0d\x57\xa1\x44\x24\x96\x9b\x2b\xa1\x03\xac\x50\x52\x79\xd1\x81\x83\xfa\x81\x46\xda\x85\xac\x46\x4c\x6a\x4c\x80\xef\xfc\xde\x1e\x21\x7f\x99\xce\x1f\x00\x0f\x68\x87\xa0\xdc\x51\x39\x35\xf6\x31\x8f\x0d\x1b\x1e\xff\x9d\xbb\x31\xc2\x0f\x1e\xee\x83\x64\x48\xa1\x65\xb3\x8c\x9e\x99\x70\x6c\x23\x83\x22\xfd\xa4\x1e\x0e\x1b\x59\x9a\x23\xf7\x78\x26\x62\xb1\x06\xdc\x47\x72\x8d\x1d\x05\xc3\xe5\x7d\xf4\xb9\x09\x76\x1b\xb3\xa6\x9c\xff\xe4\xe0\x98\xd5\x07\x43\x36\x00\x2a\x3e\x4a\x38\xc8\xef\x28\x4d\xec\x03\xef\x3f\x32\xf6\xf8\x61\x89\x07\xb3\x59\x9a\x50\x5a\x85\xff\x69\x16\x3c\xdd\xfa\x6d\x61\x43\xae\x90\xc2\x7c\x6f\x5a\x27\x54\x0d\xad\x30\xc6\x69\xba\x0a\x00\x44\x78\x7a\x90\x63\xa7\xb6\x00\x3c\xb6\xc6\x7d\x9e\xf5\x12\x00\x7f\x45\x5e\x45\xb1\xe1\x1d\xbe\x37\xad\x2f\x0f\xc3\x7f\xfd\x9d\x43\xec\x7e\x8f\x1d\xcc\x7b\x79\x57\x24\x4d\xff\xd5\x5e\xf0\xfd\x1e\x7a\x85\xe3\xb0\xcd\x0b\x11\xc7\x3a\xef\x48\xed\x0f\x45\xca\xe9\x71\xa9\x49\xe4\x72\xf7\x4a\x0a\x53\xcb\x1c\x66\x6c\x53\xee\x12\x52\x99\x08\xbb\xdd\xb8\x8f\x28\xa8\x08\xfc\x9a\xa3\x2d\xc4\x7a\xa1\xc6\x1d\x20\xd3\x16\x68\xe3\x0e\xb8\xdb\x6f\x57\xd8\x63\x1a\x29\xcd\x87\x46\x9f\xb8\x97\xde\x57\x09\xde\x5e\x59\xd2\x54\x46\x04\x3f\x53\x64\x22\x84\xfd\x4c\x4d\x11\xf3\x21\x5c\xa9\x4f\x4b\xeb\x35\x7e\x27\x9c\x6e\x6d\x0e\xfd\x2a\x0f\x40\xee\x07\xfc\x28\x81\x73\x4d\x4c\xab\x44\x5c\x1a\x24\xa8\x5e\xd6\x4b\xac\xd2\xe3\xfe\x8d\xf2\xcd\x5b\x20\x28\x20\x98\xa6\xe8\x80\xdc\xfe\x7e\xd6\x93\x7d\x92\xaf\x46\xdd\x2e\x18\xb8\xc5\x25\xb8\x26\x4f\xd5\xdd\x4c\xac\x45\x69\x2f\x87\xdf\x2b\x78\x10\x94\xab\x25\x5f\x11\x4d\xa9\x86\x41\xd9\x8b\x08\x4b\x19\x4c\xf3\x46\x98\x34\x44\x9c\x5b\x5b\x4d\xd6\x4b\x12\x59\xa8\x9c\x27\x18\x80\x00\xcf\xe6\x38\x25\x0a\x02\xe5\xe5\x69\xe2\xc0\x16\xbd\xdc\x63\x8f\x6d\xc1\x31\x55\x36\xca\x81\xb7\xed\xa9\x6d\xe9\xa2\x7b\xb2\xc3\xc7\x4d\xe5\xee\x55\xbd\x01\x28\xb7\xd0\x9f\x3d\x61\x85\x7c\x27\xd5\x44\xb4\xc3\x22\x5a\xc3\x98\xe6\xb6\x15\x75\x8d\x68\xb5\xee\xee\xf8\x22\x8f\xd9\x53\xd4\xef\xb1\xe3\x3b\xca\xe1\x71\xb0\x56\x9f\xba\xe4\xd0\x5a\xc9\x3e\x19\x86\xad\x8a\x6b\x86\x66\x05\x50\x93\xd0\xf0\xd6\xd8\x3d\x4c\xaf\x19\xff\xbb\x83\xb9\x65\xa0\x98\xc2\xdf\x72\x9d\xcb\xe7\x88\xa0\x62\x52\x6a\x9d\xda\xe6\x2a\x92\xda\x7a\xb4\x1a\x75\x45\x33\xc2\x34\x04\xf9\x6b\x4a\x3e\xee\x18\xc1\x3e\xee\xb1\xc7\xc1\xf4\x8f\x92\xf6\x9c\x08\x9b\x71\x94\x08\x85\xb8\xfc\x33\xda\xd8\xf3\x1a\x0f\x09\x43\xc3\x98\x37\xe9\x21\xf9\x71\x1b\x70\x59\x15\xa2\xdb\x19\xb5\xe4\x88\x77\x22\x80\xe4\x57\x55\x6e\x1a\x3e\xaf\x30\xe9\x93\x15\xa3\xc6\xf9\xf9\x08\xb6\x12\x18\x42\x6b\x71\x18\xd0\xe4\x9e\x72\xeb\xd0\xa8\xa7\x09\xc4\x81\x58\xe3\xf5\x3a\x8f\x7d\x5b\xde\x03\x93\x71\xab\x17\x0f\xcc\xa2\xbe\x9a\x45\x4d\x77\x16\x99\x57\x2e\xdf\x4c\x3a\xe9\xd6\xec\x67\x0e\x39\x28\x2e\x03\xb1\x66\x0b\x59\x7a\x2f\x86\x13\xf8\xcf\x3b\x14\x7c\xa5\x32\xd3\x0c\xbb\x90\xa4\x63\xe2\x68\xa2\x04\xd8\x54\xba\xf8\xa4\x68\x2a\x7b\x35\x63\x83\x54\x0c\x5a\xc1\xd1\xaf\x1f\xc9\xf9\x5c\x58\x84\x0a\x85\x93\x3a\xd5\x78\xcb\x00\x96\xa9\x5c\x36\x74\x76\x14\x0b\xe5\x05\x2b\xf0\x08\xa2\xd5\x62\xab\x44\xa2\x19\x94\x4f\x63\x98\xcb\x24\xef\x25\x44\xb0\x26\x4f\x03\x52\x57\x16\x48\xbc\x03\x7a\x43\xda\xed\xc5\x06\x72\xd2\x68\x01\x21\x88\x23\x68\x06\x7c\xa9\x48\x11\x93\xa7\xc6\x2f\xa4\x85\x46\x29\xc5\x54\x06\x44\x3c\xd5\x0c\x39\x18\x49\x27\x55\x75\x05\x73\xae\x50\x86\x10\xd3\x1b\x4f\x31\x98\x62\xd1\x49\x9b\xc2\x75\x8c\xbc\x74\x8c\xbd\x63\x54\x45\x68\xdc\x3f\x1a\x7c\x69\x04\x0c\xef\x56\xb0\x0c\x68\x94\xab\xa2\x5f\x35\x94\x2c\x43\x3a\x62\xab\x50\x28\xd3\xed\xaa\x9f\x61\x50\x2d\x77\x0a\xec\xfd\xd0\x89\x78\x8c\x4e\xac\x5d\x5e\x1e\xfe\xa4\x34\xa5\xa1\x56\x97\xa1\x36\x70\x1c\x2a\x05\xf6\x10\x53\x26\x74\xe3\x16\x5f\xb6\x89\xfe\x8a\x95\x7c\x92\x70\x22\x06\xde\xa6\xe8\x0a\x9a\x32\x84\x00\x0b\x07\x52\x0b\x7e\xd9\x10\xd5\xc9\xc7\xe9\x59\xd5\x4b\xba\x23\x26\xed\x26\xe7\xa2\xe8\x51\xd7\xa0\xf9\x91\xe6\x0d\x2a\x98\x74\x3e\x4e\x49\xf6\xd4\xf8\x02\x20\x34\xa9\x85\x98\x89\x18\x17\xa1\x72\xf6\xcb\xcf\x2a\xa3\xad\xfc\xc8\x91\x5a\xed\x08\x34\x8c\x2b\x01\x85\x33\x4e\x5e\x77\x8c\x4e\x35\x36\xc9\xae\xde\xd6\xef\x71\x9b\xe8\x2f\xa7\xb2\x0a\x72\x6f\xc0\x18\xa4\xe7\x04\xe9\x85\xc1\x90\xca\xe2\x32\x63\x81\xc3\x9a\x9a\xba\x0a\xfe\x71\x11\x1a\xc7\x55\xdf\xf8\xb7\x06\x37\x2c\x11\x0d\xe1\xfa\x8a\xa0\x00\x2d\xab\xdb\x39\xa0\xd6\xe7\x38\xa6\xaa\x07\x31\x5e\xa3\xe9\x28\x36\x6f\x38\x34\x80\x21\x6d\xa3\xbd\x12\xee\xce\x37\x2e\xe4\xeb\x15\xf0\xcc\x9d\x16\x77\x05\x1c\xf7\x0a\x38\xee\xbf\x42\x70\xdc\x03\x65\xf9\x34\x40\x89\xab\x5a\xe3\x18\x97\x3e\x31\x1a\xfc\x49\x65\xd8\x9d\xed\x0d\x4c\xba\x76\x74\xbe\x46\x57\x5d\x91\x5a\x59\xad\x93\xc0\xe1\xa1\x77\xc2\x28\x31\xb2\x0b\xd8\x66\xa5\x0e\x9a\x4f\xee\xde\x46\x05\x61\x4a\xb3\x0b\x77\xc8\x61\xed\x88\x4e\x9a\xf5\x27\x6a\x9c\x2f\xf5\x1a\x2b\xda\x68\x12\x66\x82\xd7\x7b\x51\x8c\x5b\x6d\xa9\x52\x49\x13\xb3\x76\xa1\x6a\x61\xac\xe9\x69\x53\xb2\x15\x42\x58\x33\x89\x0d\xa9\x73\x18\x41\x5c\xa4\x3c\x91\xe7\x8a\x98\x77\x45\x56\x95\x15\x55\x1f\x34\xfa\x60\xd0\x4d\x9b\x79\x40\xba\x56\x4d\xa7\x29\x1c\xa4\xb6\x38\xa2\xf9\x13\x9e\xdc\x36\xd5\x2d\xbf\xcf\x6e\xde\x05\x47\xe5\x89\x1a\x8e\x18\x68\xe2\x84\x92\x70\x9d\xea\xb1\xa1\xc0\x82\x38\x7c\xc3\xec\x5b\xdb\x05\x10\x3b\x62\xcf\x32\xa5\x38\xb3\xf0\xcf\x0e\xb3\x60\x38\xd7\xc6\x4c\x52\x44\x33\x2d\x68\x57\xdf\xff\xc0\xe1\x60\x7e\x01\xac\xf2\x45\xc4\x43\xba\x8a\x3b\x52\x5b\xc9\x6b\xe4\x5b\xe9\x0e\x3c\x45\x67\x26\x58\x1e\xbd\x58\x94\x60\x1a\xbf\xc0\xd8\x3f\x1d\x60\x53\x5d\x50\x39\x32\xd1\x9c\xeb\xc9\x7a\x2d\xe9\x77\xe6\xdb\x49\xaa\x2f\x9f\x51\x47\x2a\xff\x81\x03\xc1\x3f\x8d\xda\x87\xc8\x0c\xb5\x2f\x2c\x07\x0c\x29\xea\x74\x89\xae\x45\xe4\xdb\x20\xa3\x72\x1e\x16\x51\x4e\x44\xc7\xb2\xb2\x55\x5d\x59\x71\x49\xea\x7b\x40\x34\x67\x2d\x03\x65\x2a\x06\xed\x98\x28\x0b\x0b\x50\xd5\x1a\x2b\x29\x4c\x37\x8b\x14\x64\x2d\x4a\x21\x98\x13\x05\x04\x99\x08\xb5\xa0\xd0\xc5\xa3\x33\xc4\xbc\x26\xf5\xc4\x34\x2f\xb8\xee\x09\x35\x9e\xc0\x0c\xa8\x30\xed\x91\x16\x39\x2f\x78\x8e\x04\x8b\xeb\x22\x6a\xaf\xc8\x15\x19\xd5\x44\x0d\x81\xcc\xe4\x7a\xb4\x28\x4a\x84\x28\xd0\x42\xa7\xb2\x9c\xad\xe1\x40\x57\x2b\xa4\xf4\xf3\xa3\x65\x6d\x65\x52\xbb\x62\xcb\x63\xb2\x79\x9f\x4d\x72\x51\x34\x6a\x13\x93\xca\xd7\xce\x43\xa8\x68\xbd\x2f\x4f\x4f\x99\x8a\xa0\xc8\x20\x25\x01\xfa\x23\xa6\xaf\x97\xc8\xf7\xe4\x36\xd6\x04\x62\xbd\x00\x5b\x18\x28\xd5\x49\x16\x17\x19\x0a\x47\xf0\xac\x91\xcb\x0e\x68\xc0\x2d\x46\x16\x97\xa1\xaf\x9b\x36\xd5\x74\x5e\x16\x59\xe7\x46\x5d\xc4\xd1\x7c\xc2\xf4\xef\x4a\xd4\x5e\x51\xdd\x1b\x12\x2c\xab\x3b\x2c\x8e\xc2\x7d\x13\x9b\xde\x82\xca\x40\xad\xa6\xbb\xa0\x09\xe0\x9d\xb0\xab\xc0\xfe\x7c\x94\xd5\x36\xeb\xe3\x4d\xe6\xfd\xbb\x47\x83\xbf\x1d\x21\x6f\x96\x3b\x0a\xce\x60\x6e\x32\x75\x75\x8e\x7c\x47\x14\x5c\xae\x03\x33\xa4\x45\xd4\x11\x93\xda\xfb\x66\x1f\x96\x8c\xcd\x23\x55\xc7\x2d\xc0\x19\xe4\x7b\xaf\x46\x43\x84\xb9\x20\x8a\x22\x55\x95\xb4\x23\x10\xe1\x97\x37\xa1\xcd\x50\x0f\x63\xa0\xc5\x1d\xa4\xd9\x23\x3e\x56\x79\x13\x7d\x00\xa8\xd3\x4e\x50\x1e\x08\xc4\x12\xc1\xca\x4c\x33\x7d\x96\x2a\x90\x36\x4a\xac\x89\x84\xac\xb8\xe0\x21\xd4\xad\x05\x2d\x44\x1e\x2a\xb0\x5d\x77\x91\x83\x51\xc5\x27\x68\x4e\x3c\x9a\xac\xe6\x64\x0a\xf3\x36\xa1\x78\x43\x7b\xb2\xc9\xaf\xd1\xbe\x68\x8f\xb8\x61\x95\xcc\xe1\x00\x4b\xcb\x56\xae\xce\x42\x64\x1d\x73\xa0\x41\x01\x15\x95\x66\xdb\x35\xec\xc4\xce\x78\x92\x9c\x59\xf6\x91\x51\x27\x47\x64\xc0\x5c\x8e\x6a\xc5\x1b\x46\x83\x1b\x9c\x2b\x9b\x45\xa6\x59\x91\x5b\x8d\x2c\x4d\x06\xc1\xb1\x3e\x3b\xc2\xde\xec\xb1\x31\xa9\xdb\xae\x09\xff\xb5\x5e\x50\x9d\xd1\x49\xdc\xc4\xca\x05\xf2\x78\xa8\xa9\x35\xdf\x43\x83\x4b\xb9\x4f\x3b\xc4\x24\x7e\xad\xc7\xbe\x09\x63\x82\x71\x7a\x43\x58\xf0\xf3\xf7\x11\x16\x7c\xcb\xbc\x15\x4b\x00\x86\xd0\x75\x32\x28\xb9\xe1\xc2\xf7\xa6\x75\xb8\x63\x2c\x76\x71\xdf\x2c\xb2\x1a\xfb\x51\x8f\xf9\x50\x31\x7d\x1f\xaa\xf6\xdc\xbd\xd7\xec\xf4\x2e\x6a\xe6\xd4\x4a\x4a\x71\x38\x0f\xd5\xd8\x4b\x47\xd9\x91\x21\x7d\x7f\x36\xcd\x8b\x85\xb0\x58\xc1\xe0\x44\xca\x31\xfd\x93\x91\x60\x6d\xd1\x46\xe9\x59\x01\x09\x1a\x16\x2b\x60\x1b\x53\x86\x1b\x58\xc5\x35\x7e\x56\xdf\xa4\x80\x59\xde\x4c\x31\xc7\xab\xd7\x45\x78\xff\xf5\x44\x64\xf9\x4a\xd4\xa5\x78\x6f\xb0\xbf\xc9\xe3\xc1\x99\x73\x51\xd2\xbb\x04\xf6\x93\xba\x00\x2d\x6b\xc3\x1b\x95\x25\x39\xf3\xf1\x53\x15\xf6\x31\x8f\xc1\x75\xff\x57\xbc\xe0\xe7\xbc\x05\xb0\xa4\x50\x44\x40\x94\x81\x07\xb1\xaf\x48\x04\x64\x55\xb5\x68\x83\x5a\x61\x9a\x50\xbf\x13\x47\xc9\xea\xa4\x76\x08\x61\xd0\x1b\x49\x82\x64\x55\x6d\x4f\x99\x00\xc7\x61\xb1\xb2\xe7\x00\x62\x15\x35\x2c\x2b\xa2\xdb\x42\xda\xda\x8b\x3c\xc2\xbb\x7f\x4e\x90\x22\xb9\x6c\x9a\x71\x35\x04\x44\x50\xe9\x58\x70\x83\xe0\xa1\xa8\xc6\x2f\x8c\xb1\x63\xc3\xb6\xb9\x28\x69\xa6\xeb\xf9\x92\x68\xf4\xb2\xa8\xe8\xcf\x22\xba\x0a\x5a\xdc\x73\xff\xff\x1c\x08\x6e\xdf\xf2\x09\x6d\xf2\xa2\xa7\xaa\xda\xa0\xaa\x94\x7c\x30\x1c\x66\x02\x2c\xa6\x61\x5c\x52\x24\xdf\x70\x80\xfd\x95\xc7\xfc\x76\x27\x0f\x67\xf5\x33\xe0\x0b\xfc\x8c\x17\x7c\xd4\xbb\xf5\xfc\xd2\x8c\x7b\x5d\x0e\xac\xa1\x7f\x96\xf7\x79\xd8\xec\x44\xa0\xc8\x28\xda\x08\x7e\xd4\xca\xa8\x59\xe9\xd5\x01\xa5\xc8\x92\x2c\x79\xd4\xce\xa7\xd6\xa9\xbe\xf2\xd3\x13\x3c\x4a\x62\x9d\x3b\x54\x36\x68\xc3\x47\x4c\x0b\x60\x7f\x04\x7d\x5d\xc7\x01\x0d\x56\x13\x99\xcf\x81\x44\xd8\x1e\x83\x0e\x7b\xec\x60\x53\xc1\xbb\xba\x14\xdc\xb2\x49\x29\x43\x0e\x08\x43\x6b\x54\xa4\xbc\x97\xbb\xa7\xd5\x0f\x55\xd8\x23\xb3\x5e\x32\x93\xdf\x91\x8b\x0c\x3e\xf3\xce\x4a\xf0\x26\x00\xb6\x52\x57\xb8\x19\x3a\xe5\xcc\x03\x15\x2f\x29\xb2\x3e\x6e\xec\x26\x4a\x90\xc8\x28\x29\x3e\xc1\xf5\x3a\x40\x44\x7b\x2e\x32\xf7\x18\x1a\x75\xc2\xb6\xc9\x2c\x96\x2a\xa0\x65\xd7\xae\xf1\xf3\x61\x9f\x87\x71\x9e\xaa\x00\xf1\x28\xe1\x0b\x69\xb3\x34\xd1\xd0\xc0\x8c\x77\x81\x67\xad\x74\x1f\x26\xd8\xe0\x6b\x93\x56\x20\x92\x53\xa7\xf2\xeb\x45\xb8\x2a\x72\xa9\x29\x36\x64\x7f\x36\xdc\x1e\x7c\xf9\x21\x07\x3e\x48\x5c\x2a\x44\x82\x07\x01\x65\x8c\x24\x0c\x11\x48\xd9\xfc\xe2\x78\x70\xc4\xfa\xbd\x05\x72\xd0\xf0\x5c\xcd\x0f\x5e\x41\x7e\xdf\x37\xe4\x47\xac\x3c\x29\x8d\xe0\x3b\xb6\x82\xa4\xd9\x05\x16\xcd\xe6\xa3\x7e\x25\x41\xf5\x32\xa2\x10\xef\x2b\x41\xf5\x1b\x02\x4e\xf4\x41\xef\xe2\xf6\x09\x96\x4f\xf6\x6f\xd4\x09\x96\x66\x66\x95\x31\x89\x36\x05\x7d\x7f\xf1\x01\x87\x1f\xb9\x84\x93\xb4\x70\x3a\x4e\x1b\xab\xfe\x17\x47\x83\x3f\xf7\xe8\x87\x15\x45\x13\x02\x5d\x4a\xd4\xe8\xc5\x61\xc6\x67\xe7\xe7\x16\xf9\xd1\x33\x97\x6a\x3c\x38\x7e\xc3\x89\xda\xf1\x53\xd7\xd7\x8e\xd7\x8e\x4f\x9d\xb8\x26\x98\x0c\x4e\x1c\x3b\x76\x7c\xba\x59\xbf\x61\x7a\x7a\xea\xd4\x35\xc1\x84\xb6\x75\xa8\x40\x72\x12\x0d\x94\xce\x57\x00\x8f\x73\xbd\xcf\x43\x7e\x01\xab\x83\x01\x29\x72\x0b\x3b\x02\x07\x7d\x15\x40\x86\xe6\x13\x71\x09\x60\xd9\x61\x8f\xb1\x6a\x27\x2b\x94\x3b\xe4\xd7\x34\x6b\xc9\x8b\x83\x1e\x5b\x15\x45\x9b\xf5\x62\x30\xfa\x35\xa2\x66\x56\x46\x31\x58\x63\x70\xd9\x4f\x82\x10\x5a\x69\x2f\xc9\x81\xc5\x38\xbf\xc0\xb1\x9f\x30\x04\x86\x62\xf2\xd0\xb2\x59\xee\x19\xe0\x26\x72\x3b\xc7\x9e\xd1\x7f\xed\xb1\x31\x6c\x9c\xff\x05\x2f\x78\xc0\x3b\x83\x0d\xc5\xcf\x03\x36\x61\xda\xda\x79\x33\xc3\x64\xbf\x75\xe3\x54\x01\x0a\xeb\x31\x61\xb1\xca\x13\x0b\x5b\x7b\x1f\x8a\x4b\x7b\x85\x86\x8c\x80\x4e\xcb\xc2\xa4\x2d\xb6\xc8\x7b\xfb\x95\x8a\x6b\x0a\x57\xd9\xfb\x29\x20\xfc\x2b\x86\xd9\xb0\x10\xad\x5e\xbc\x24\x0a\xcd\x54\xfb\xa2\x4a\xb0\xba\xdd\x43\x98\xfe\x8a\x13\x8d\x56\x27\x06\xbf\xea\xe0\x3e\xa9\x43\x6f\x57\x8a\x54\xb6\x87\xd1\xcb\x1c\x82\x55\x00\x96\x99\x4b\x2a\x30\xa3\xb1\xa0\xae\x95\x2c\xf2\x69\xd6\x8c\xe4\xb6\x17\x16\x64\xac\x92\x17\xad\x8f\x59\xa1\xbe\xba\x58\x8b\x8a\xda\x84\x42\x97\xe8\xa8\x1f\x38\xec\xc4\xc3\x95\x12\xa6\x21\xe7\xd5\x7f\xe3\xe1\xe0\x04\xfc\xe5\xc2\xa7\x2a\x33\xba\x8a\x05\x47\xb7\x97\x8e\x17\x74\x5a\xfc\x02\xc6\x3e\xad\x30\x8e\x7f\xc3\xdb\x22\x8a\x68\x58\x05\x20\x8a\xe8\x55\x9e\xad\x74\xd8\xd0\xc6\x68\x98\x0c\xe3\x87\x7e\x07\x19\x80\x44\xff\x23\x83\x6f\xfc\x29\x6f\x8b\xad\x7b\x68\xab\xd0\x82\xff\x3a\x6f\x38\x0d\xd0\xc3\xd3\x22\xbe\x28\xc2\x66\x35\x4d\xe2\xfe\x15\x58\xc2\x2b\xdc\x10\x0f\xa3\x56\xf6\x69\x5b\x2b\xfb\xe8\xbe\x61\xde\x5f\xb0\x29\x37\xc4\x8d\x5f\x2f\xad\xac\xf6\xa0\x77\xe7\xf6\x6a\xd9\x49\xff\xb8\xa2\x30\x00\x29\xb1\x23\xc2\x82\xbf\x63\xec\xa6\x6d\xf2\x0c\x66\xd5\xb9\x79\xa8\x5f\xf8\x5d\x2c\xf8\x97\xca\x96\x8f\x3c\x4c\x0e\x62\x4d\xa3\x60\xce\xfd\x51\xf2\x6f\xd9\x6d\x3c\x2c\x62\x7f\xc3\x3b\xa4\x9b\xef\xec\xaa\xff\x3c\xc6\xea\xcc\xdc\xf3\xef\x08\xce\x5a\xfd\x34\x68\xb0\x71\x3a\x51\x6b\xce\xd6\x86\x03\x95\x83\xee\xb3\xd7\xe2\x1f\x54\xd8\xb7\xba\xd5\xb2\x70\x4b\xfc\x0f\x54\x94\xf2\xf2\xd6\xca\xa6\x4f\xed\x26\x3f\xa0\x3c\x91\x36\x4b\x14\xd8\x16\xed\x04\x0b\x53\xc1\x3a\xce\x27\xf5\x37\x94\x73\x5c\x16\x58\xe3\x7c\x9e\x2c\xb4\x90\x5f\x6d\x82\xfd\x90\x44\x14\x3a\xc6\x1e\x98\x8b\x68\x8b\x37\x59\x22\xae\x23\x87\xea\x9f\xdb\x1c\x0d\xae\xf6\xf5\x0f\x95\xe1\x29\x1a\x9f\xaf\xec\x2f\x45\xe3\xf5\x95\x7d\xe6\x68\xec\x7c\x0c\xa0\xdf\xb3\x70\x9d\x8a\x3b\x1a\x25\x79\x21\x42\x08\x03\xdd\x7a\x48\x26\x26\x79\x1e\x75\x22\x79\x06\xa4\xed\x9d\x96\x84\xea\x36\x13\x0c\x53\xd3\xe3\x12\xc6\xeb\x61\x3f\x27\x43\x9d\x9c\x00\xed\x30\x6b\x42\xb4\xa4\x9e\xe2\xdb\x8e\x80\x1d\xfb\x90\xec\x35\xf6\xe1\x4b\x15\xf6\x6d\x96\xc8\xcd\xea\x61\x83\x8e\x1d\x62\x51\xb4\xfc\x8f\x54\x82\xa7\xd0\xdf\xc3\xf3\x45\xe1\xe0\x05\x3b\xb4\xd6\x6e\xb2\x34\x56\x21\x66\xf2\xd0\xb1\xe1\x69\x08\xbc\x2d\xc0\xf2\xfe\xc8\x63\x0b\x16\x56\xde\x5c\x70\xdd\x1e\x91\xf2\xbe\x11\x11\xea\xfe\x89\xb1\x3b\x4d\x27\x1b\x2b\x45\x55\x6e\xf3\xa0\x00\x39\x0c\xd7\x8e\x81\xac\x36\x0b\x64\x00\x6a\x2f\x9b\xd3\x6b\x09\x22\xeb\xfc\xd7\xb3\xe0\x7b\xb6\x7c\xa2\xb4\xe1\x21\xf3\x40\x91\x62\xfe\x16\x2a\x5b\x9b\xbd\xbf\xe1\x8d\x75\xe3\x5e\x16\xc6\x34\x6e\xce\x80\xbd\xe5\x10\xfb\xa0\x47\xfd\xfb\x8b\x5e\xf0\x53\xde\xaa\xd5\xc3\xb9\xc8\xa2\x30\x06\xc5\x15\x2e\x97\x66\x23\xac\x03\x88\xe6\x93\x9b\x4b\xdc\x37\x6a\x1a\xec\x56\x72\x6f\xe9\xc5\x61\x56\xa3\x9a\xd9\xb3\x38\x2f\xc2\xa4\xa1\x0e\xdf\xbd\x9c\x5a\x80\x6b\x96\xfc\x79\x17\xe5\x27\x2f\x9a\x88\x54\x39\xf7\x67\x16\xe6\x79\x23\x8c\x63\x17\x6f\xf2\x3e\x36\x1e\x47\x79\x21\xe7\x86\x2f\x82\xef\x51\x7f\x6f\xd3\x0a\xb0\x7c\x6a\xd2\x0a\xd3\x26\xc7\xe7\x84\x95\x38\x17\xe5\x45\xe0\x7c\xf2\x87\x2b\x8c\x3a\xd5\x7f\x41\x25\xf8\xaa\x87\x7f\xab\x2f\xd2\xaf\xa1\x2b\x58\x8d\x19\x1a\x7d\x1a\x6e\xcf\xa0\x0a\x40\xe9\xd4\x80\x32\xc3\x2f\x4a\x25\x2e\x9f\xba\x09\xd6\xcd\xcd\x53\x37\x91\xfa\x75\xf3\x54\xad\x56\x9b\xba\x09\xbf\x74\xf3\x45\xa2\xf8\x40\xc2\xde\xb2\xf4\xd8\x6c\x62\x48\xe9\x08\x0f\x40\x7e\xe2\xc5\x9b\x60\x52\xd5\xa8\xc8\x1a\x7d\xf1\xe2\x84\xa1\x0f\x09\x21\xb1\x6c\x5d\x64\x8d\xb0\xe4\x53\xf9\xa2\xc7\x58\xbe\x92\x66\x05\x4e\xe7\xcf\x78\xc1\x2f\x7a\xe6\x37\xb6\x6a\x05\x18\x11\xe0\x77\x79\xf9\x4f\x72\x71\xa9\x9b\x12\x61\xa2\x1c\x64\xcd\xa1\xc2\x9b\x69\xa3\x47\xa1\x0a\x10\x08\x4f\x59\xeb\x78\x86\xc0\x25\x40\xbe\xd5\x28\x59\x4b\x51\xa6\xe6\x98\xf9\x76\x51\xea\xb6\x8d\x22\xe6\x52\xff\xba\x09\xbe\x2f\x3f\x2f\x7b\x6b\xbe\xd0\xf1\x09\xe5\x46\x6d\x6a\xda\x79\x9e\xc7\xc6\xd5\x9c\xf6\x8b\xa0\xad\xfe\xd6\xf3\x4c\xfd\x1e\x36\xee\x5b\x7c\xd2\x99\x71\xfa\x72\x93\x16\x80\x1b\x99\xeb\x31\x26\x85\x40\x3b\xcd\x22\x91\xfb\x5f\xf1\x82\xdf\xf0\xcc\x6f\x97\xcd\x15\x86\xcf\x89\x50\x45\x72\x8f\xd2\x5a\xac\x8b\x38\x4d\xda\xf0\x6d\x54\x51\x8f\x84\x71\x7c\x64\x82\xf0\x6c\xa3\x9c\x77\x7b\xf5\x18\x73\x70\x1e\xb2\xb1\x09\xe3\xf8\xe2\x56\x3d\xff\xd9\x51\x87\x8d\x52\x39\x69\xef\xcc\xbb\x2b\x22\x13\x77\x46\x59\xd1\x0b\xe3\xb9\x28\x5f\x75\x5c\xf7\x6f\x18\x0d\x9e\xe8\xb8\xee\xd7\x96\xe0\x79\x95\xe8\xa0\x87\x66\xc3\x63\x78\x69\xa1\xec\x6f\xff\xa3\x11\xf6\x6b\x1e\x1b\x6b\xe5\xcb\xfd\xae\x40\xf9\x78\x4b\x14\x0b\x0a\xcd\x41\x42\xd0\x94\x77\xd2\x5e\x62\x91\xec\x60\xc2\x88\xfd\x0c\xf5\x82\x71\x8d\x42\x10\x41\xda\x55\x61\x64\x04\x1b\xc4\xc1\xec\x2c\x2e\x15\xd7\x04\x93\x3c\xb8\xd4\xca\xe5\x3f\x49\xd1\xca\x83\x1a\x9f\xef\x74\xe3\xa8\x11\x15\x31\x9d\xaf\x33\x34\x03\xd6\x05\xbd\x50\xf6\x22\xda\xdd\x77\x89\x3d\x9a\x5c\xe1\x68\x81\x9e\x9f\x93\x62\x72\x09\x2f\x71\xbc\xc6\x4f\xc3\x9c\x3b\x6f\xa2\x12\x8e\x2e\x2d\x9c\x3e\x3f\xc1\xbb\x59\x0a\xf9\x2f\xf3\x73\x3c\xcc\xf3\xb4\x11\x99\x2c\x21\xb4\xf9\x59\x05\xcb\xc5\xee\x7c\xf9\x4e\xf6\x98\xbc\xfc\x80\x3f\x13\x5c\xb3\xcb\x6f\x27\xe5\x72\x67\x99\x35\x62\xfe\xb5\xc1\x51\x08\x1a\x40\x7b\x3c\xb8\x7f\x21\xbd\xaf\x34\xde\x6b\x9d\xe6\xaa\x5d\xc8\x9b\x0e\x3a\x54\xd6\x6a\x56\xcd\x3c\xab\x97\x89\x81\xc9\xf4\xf7\x63\xc1\xb2\xbe\xe3\x18\x1e\x13\x0e\xd7\x31\xe7\x08\xee\xc2\x7c\xb0\x83\x30\x90\x1c\x1d\xd0\x7c\xe1\x96\x71\x14\xd4\x36\xbc\xf1\x66\x94\xaf\x62\xfa\xfa\x41\xf9\xe7\x1d\x8b\xf3\xee\xa9\xea\x00\xbb\x8d\x1d\x6e\x80\x9a\xdd\x3e\x9f\x36\x85\x7f\x53\x30\x05\x81\x26\xb3\x78\x0d\x92\xa7\xa6\xf9\x85\x34\x11\x93\x60\x30\xe3\xb7\x27\x71\x9f\xfe\xbc\x2b\x8b\x0a\xb7\xef\x6e\x61\xfa\x8b\x52\x1b\x92\xbb\x90\x9d\x2f\x03\x2e\x69\xf9\x80\x3a\x44\xd4\xe3\xb4\xce\x69\x14\xed\x72\x66\x98\xaa\xae\x7f\x2a\x98\x00\xef\xf9\xe2\xfc\xee\x8a\xf8\x37\xb0\xb6\xfe\x4c\x29\x4f\x7f\xe0\x05\xbf\xe9\x9d\x21\xec\x06\xe5\x5b\x58\x5a\x09\x33\xd1\x9c\x36\x71\x77\xd0\x13\xb2\x6b\x90\xa8\x8a\xfa\x84\x87\x0d\x48\xa7\xe4\x7c\x4e\xa0\x96\xd7\x9c\x56\x96\x06\xfd\xc6\xf0\x17\x70\xd1\x34\xa7\x79\x08\x93\x50\xa1\x48\x9a\x21\x38\x8a\xd8\x5c\x89\xbe\x45\x76\x81\x28\x86\x80\x66\x51\x4c\xd4\x54\x26\x05\x4a\x6c\xa8\xb2\x2b\x3e\xc6\x33\x11\x36\xe5\xa4\xf2\xe3\xe0\x7b\x07\x92\xb0\xf9\x51\x79\x7b\x6a\x5d\xce\xb4\x09\x34\xd9\xca\x67\x39\xac\x3d\x0a\x30\x42\xdb\xa1\x30\x37\x73\x51\xc0\xf0\x44\x09\xc5\xfa\x9c\x97\xed\xc9\xdd\x84\xee\x5f\x70\xbd\x7a\x6a\x85\x12\xd4\x3f\x82\x1b\xf8\x3f\x7c\x20\xb8\xde\xb9\x62\x4e\x38\x06\x44\xc9\x3e\xeb\xa0\x63\x20\x41\x44\x7f\x88\x41\xda\xf0\xc8\x6c\xee\x2c\xbc\xf7\x8e\xca\x2d\xff\x31\x71\x98\x17\x67\x45\x98\x15\x75\x11\x16\x10\xbe\xb6\xba\xf7\xf0\xb5\x63\xe7\x74\x94\xda\xba\xe0\xed\x14\x10\x4e\xd1\xd2\x29\x05\x46\x48\x51\xf1\x06\x95\x8c\xbd\x8c\x42\xe8\x96\xb3\x30\xc9\xe1\x1a\xd4\x61\x6d\xef\x75\xb8\xf1\x9c\x13\x29\x67\x3a\xa9\xc0\x4f\xa0\xd5\x36\x4d\x34\x22\x56\x91\xf2\x30\x01\x10\xb8\x1a\xfb\x1e\x76\xb0\x23\xf2\x3c\x6c\x0b\xff\x7c\xf0\xd4\xb3\xbd\x0e\xc0\x5c\x85\x4d\x30\x34\xd1\x1d\x75\x56\x91\xe3\xdb\x14\x45\x18\xc5\x39\x01\x12\x61\x8c\x9e\x6e\x89\xb3\x92\x9e\xc6\xc6\x30\xf7\xd8\x7f\x6a\x70\xf2\x68\x3d\x8b\x44\x6b\x42\x83\xad\x91\xbe\xa8\xeb\x7a\x24\xdf\xb2\xac\x73\xda\x0f\x72\x3a\xb8\x76\x69\x38\xae\xd7\xa4\xca\x7a\x41\x60\x2d\x40\xf7\x9a\xe4\x77\x24\x60\x28\x74\x4a\x9b\xa4\x48\xb5\x27\x04\x8f\x5b\xa6\x33\x27\xc4\x6b\x9b\x51\x72\x24\xc2\xa1\xa1\xdb\x0a\x6d\x79\xb7\x2f\x39\xdb\xca\x7b\x0e\x05\x27\x1c\x1d\x45\x3f\x36\x04\x59\x72\xb8\x2f\xeb\xef\xc6\xff\x2d\xc8\xd1\x87\x4f\xc8\x7c\xca\x63\x87\x72\xd1\xc8\x44\xb1\x28\x5a\xfe\x87\x3d\x76\xed\xb6\x51\xba\xc3\x68\x0a\x82\x1f\xf0\x96\x54\x29\xda\xa8\xac\x0e\xa0\xf2\x3a\x85\xa2\xc1\x5c\x4e\xeb\x52\x40\x29\xdb\xab\x19\x73\x38\xdf\x5a\x51\x81\x9c\xcf\xb7\xca\x1c\x01\x4d\xdb\x85\x6a\xdc\xd7\x61\x51\x88\x4e\x17\xe2\x5b\x7f\xc8\x53\xea\x11\x6c\xf2\xcf\x0d\xba\x77\xea\x5f\xea\xb0\xb2\x22\x57\x6e\x55\xaf\x5c\xfb\xc8\x62\xaa\x43\x19\xed\x5c\x85\x63\x1a\x4a\x46\xd8\x54\x88\x74\x50\x39\xe6\x2d\x3a\x05\x27\xa0\x65\x84\x3d\xda\xd4\x06\x73\x0a\x81\xf4\xf0\x4e\xf7\x62\xb9\xd3\x1a\x69\x57\x57\x89\x16\x00\x7d\x49\x57\x50\xf5\x8f\x95\x96\xe7\x24\x3f\x6b\xe4\xd5\x85\xb4\x79\x24\xb7\x9e\xb2\xb1\x6a\x4a\x6c\xfa\x05\xf1\x61\x50\x66\x2a\x74\x0c\x61\xe0\xa8\x28\xff\x28\xcb\xd2\xcc\x44\x24\x98\xfe\x02\x31\x05\xf9\x11\x98\x0c\xdb\x26\x84\x4b\xbe\x24\x0a\x6e\x8d\x01\x08\xd4\x3e\x96\x5d\xa4\x00\x9a\x9d\xa9\x28\x03\x35\xbc\x9a\x67\x1b\xdf\x2e\x52\x1e\xd0\x2d\x58\x47\xfd\xb4\xa7\x33\x20\x08\x12\xdb\xe4\x49\x0e\xf6\xd3\x05\x73\x13\xb4\x64\x0a\x4b\xee\x66\xa2\x0a\x09\x7f\x83\x6d\x29\xc1\x61\x38\x43\xfa\x09\x97\xe0\xc3\x89\xb6\x71\x22\xc1\x4e\x87\x8d\x55\x91\x34\xfd\x8d\xd1\x60\xce\xbd\x64\xc7\xdf\xc4\xb1\xf6\x0f\xe6\xb4\x3b\xe3\x36\xa8\xd8\x3d\xa5\xfa\x0c\x8c\x98\x8e\xe0\xfb\xd8\x08\x7b\x2e\x3b\x4c\xcf\x00\xbf\x6a\xb2\x53\xbf\x5c\xaf\x88\xe2\x9a\xfc\x5c\x91\xd5\xe6\x93\xe2\xf6\x6c\x09\x1a\x16\x54\x5d\x6c\x10\x0c\xd9\x1e\x00\x10\xa0\x4f\xd6\xd8\x3f\x7b\x56\xca\xe6\xdf\x78\x6c\x7a\x5b\xc9\xb1\x29\xcb\x49\xf0\xb3\x9e\x9d\xf0\x19\x26\x5c\x3f\x60\xed\xbf\xf6\xcc\xb4\x6d\xc2\xc5\xb0\x8c\x5c\xea\x70\xed\xdf\x9e\xb7\xcc\x9c\xf6\x32\x99\x54\x0d\x82\xb9\x09\x96\x3b\xd3\xa7\x68\xb3\x50\xe9\x33\x5a\x74\xb3\x79\xdd\xf3\xea\x20\xe1\x76\xdd\x26\xac\x46\xaa\xeb\xec\xd9\xf4\x17\xbb\xce\x1e\xc5\x0e\x7f\xd7\x81\xe0\xb7\x46\x86\xdd\xb1\x6c\xa5\x84\xf6\x83\x9e\x38\x50\xb0\xbe\xe1\x12\x47\x97\x55\x40\xae\x25\xcb\xd1\xfb\x21\x77\xcf\xb6\x20\x62\x55\xc0\x6c\x45\xeb\x34\xd0\xcc\x66\x26\xe0\x0c\xcb\xff\x86\x75\x26\x72\x38\x8f\x82\xb6\x15\x60\x55\x03\x52\x39\x74\x8c\x50\x2e\x0a\xe3\x73\x1c\xb3\x5c\x7e\x6a\xad\x7f\xc4\xbb\x2c\x6e\x92\xe7\x32\x2a\xdc\xcf\xd9\x0d\x7b\xc8\x75\x5d\xc6\xea\x9f\xa2\x11\x75\x77\x2c\xba\xb8\x45\x96\xeb\x57\xc7\x87\x9e\x67\x34\x20\x04\x84\x4b\x7f\x6a\x3c\x98\x75\xae\xa0\x59\x4f\xb7\x8d\x4e\x37\xe8\x8e\x56\xc6\x3e\x0b\x53\x62\x2b\xf6\xc4\xd7\x5f\x09\xa5\xde\x77\x18\x4d\x4b\x85\x52\xdf\x13\xf0\xa1\xa1\xd4\x7a\x2c\xf2\x3d\x00\x9a\xe8\x97\xaf\x84\xeb\xec\x23\x5c\xe7\x3d\x76\xb8\xce\xdb\xf6\x19\x44\xdd\xfe\xba\xc5\x49\xdf\xb1\x7d\x40\xce\x09\xff\x98\x8e\x93\x0e\x02\x1d\x1f\xed\x08\x8c\x32\x0b\xcd\x3f\x1d\x66\xff\x61\xd8\x29\x26\x6a\x89\x46\xbf\x11\x0b\xff\xf7\x0e\x07\xef\xa8\xe8\x9f\xb6\x6a\xd6\xb0\xe0\x4b\x8b\x15\x61\x67\xad\xd1\xe1\x91\x96\x79\x11\xae\x0a\xdc\x29\xf3\x6e\x9a\xe4\x42\xf1\x7f\x60\x7c\x47\xac\xcb\x86\xc4\xd5\x1c\x79\xcb\x50\x33\xcf\x8b\x25\xcd\xca\xbd\x90\x89\x25\xb9\xef\x98\xe7\x09\x79\x38\x9f\x74\x12\xe6\xca\xf1\x23\xf5\x38\x6d\xac\xe6\xbc\x97\x14\x06\x9a\x9f\xc2\x3c\x54\xde\x9f\x83\x7f\x35\x90\xb6\x03\x18\x66\xf9\xa4\x6c\x01\xc6\xb3\x02\xab\x38\x1c\x94\x08\xfa\x38\x02\x1b\x46\xa6\x55\x61\x25\x52\x7f\xfc\x10\x7b\xfe\x08\x3b\xd4\x55\xed\xf0\xff\xa1\xc2\x8e\x6e\xbb\xcc\xcf\x62\xa9\xc1\xaf\x57\x4c\x07\xc8\xca\x86\x71\x2c\x35\x8e\x4e\x47\x34\xa3\xb0\x10\x71\x5f\x03\x0f\x3b\x91\x34\x4a\x29\x57\xc9\x7d\xaa\x92\xd4\x8a\x52\x70\x4d\x0e\x19\xb9\x51\x12\x2a\x1e\xf1\x4c\x28\x78\xc5\xb0\xd1\x80\x50\x5e\x38\xdb\xe0\xea\x46\x50\x26\xa2\x5e\xe0\xb7\x23\x88\xe7\x6e\xfa\x1e\x92\xcd\x54\xb7\xef\x9e\x7d\x44\x97\x6c\xfd\x59\xd5\x13\xa2\x2a\x4b\xcf\xa7\xae\x32\xb7\xe0\x02\x7b\xd5\x18\x3b\xd8\xc5\xd9\xe3\xbf\x78\x6c\x17\x23\xf0\xb9\x03\x6a\xd2\x0d\xef\x7f\xd2\xbc\xc2\x4d\x3b\x54\xe5\x71\xa3\x73\x4c\x45\x1f\xa7\x4e\xa7\xc1\xac\xe7\xb9\x54\xcf\xc2\x9c\xc7\x52\x27\x10\x79\x3e\x05\x5d\xdd\xeb\x02\x7a\xb1\x80\xc1\xeb\x65\x00\x65\x24\xe4\x51\x1e\x6c\x54\xce\xb6\x8f\x42\x03\x81\x09\x40\x6f\xb4\x26\x27\x00\x6d\x51\xf5\xcb\x43\xd4\xc8\xc2\x7c\x45\x00\x1b\xbe\xb8\x14\x15\x84\xd7\x60\xdb\xd6\xa8\x39\xb4\x66\x08\x45\x96\xf6\x67\xfa\x08\xbe\x84\x27\x69\xfb\xf9\x76\x26\xcf\x1c\x5d\x91\x45\x69\x13\x51\x01\x9b\x52\xa3\xae\x8b\x76\x94\xe4\xaa\xfb\x60\xa9\x53\x3f\xcb\x01\x43\x48\x08\x4c\x89\x97\xf3\x78\x71\x20\x72\x26\xed\x15\x8d\xd4\xa8\x76\x54\x89\xf2\xdc\x46\x98\x32\x93\x0c\xaf\xc7\xc5\x64\x45\x6c\x57\xe7\xcb\x30\xc7\x39\xa0\xa4\xa9\x7b\x9b\x76\x0e\x68\x4e\x61\x63\x05\x92\xfc\xbe\x1e\x8b\xe2\xb7\x2a\xc3\xed\xe7\xea\xa4\x08\x29\xa5\x3f\x59\x09\x4e\x39\x57\x2c\xf9\x0f\xa2\xd4\x40\x82\xc1\x01\xea\x82\x6b\xe5\x51\x72\xf0\x4f\x3d\xf6\x79\x8f\xb1\x56\x94\x40\xac\x45\x96\xfb\xbf\xed\x05\xef\xf2\x6e\xd1\xbf\xe9\x58\x9b\x76\xc3\xfb\x7a\x46\x59\xa2\x93\x0f\xe2\x7b\x90\x71\x12\x21\x9a\x8a\x54\xf6\x5b\x27\x4c\x28\xb1\x1e\x50\xcb\x95\x32\x03\x9a\x0a\x39\x43\x76\xda\x99\x45\x98\xaf\xe6\x53\x61\xb3\x13\x25\x11\x00\x3f\x11\x00\xd4\x94\xb1\x9c\x4c\x6d\xe1\x84\x7e\xd3\x88\x93\x9a\x60\x3b\x23\x54\x22\x8f\xff\xb5\x4a\xf0\x2b\xde\x0c\x5a\x87\x35\xa0\x73\x09\x7c\xa0\x97\x38\xcc\xb6\x39\x98\x39\xd3\x96\x03\xb1\x82\x70\x10\xf7\xf5\x04\xb8\xf5\x91\x54\x0d\x02\xfb\x14\x46\xc3\x8d\x2a\xf9\x08\xb2\xb8\x4b\x5f\xb8\x7d\x51\x07\x51\x52\x1d\x72\x27\x36\x91\x0c\xbc\x6e\x2d\x01\xb4\xa1\xb6\xe1\x3d\x26\xb1\x1a\xb4\x2c\x2f\xba\xdb\x9d\xc7\x5e\xeb\xb1\xc1\x87\xfc\xe7\x79\xc1\x4d\x8b\x8a\x5f\x94\x1b\x98\x84\x61\x5f\x01\x49\x82\x28\x11\xf2\x94\x7a\xfb\x62\x09\x1c\xe2\x5a\x76\x72\x8b\x9c\x8c\x61\x3d\x0f\xe8\x10\xef\x1d\x71\xe8\xbd\x86\x92\xe5\x2f\xa1\x01\x42\x19\xa6\x9e\x3f\x12\xcc\x0e\xbd\x63\x8c\x16\x52\xe7\xb6\xcc\x2e\x4b\xca\x26\x25\xaf\xd3\xc3\xb5\x61\xa1\x70\x6f\xab\xb0\x4b\x74\x64\xed\x06\x4e\xd2\xf2\x10\x7b\x08\xf4\x88\x32\x77\xc1\x3a\x40\xb3\x1c\x49\x30\x97\xad\x56\x45\x48\x95\x0c\x3c\xf6\x64\xfd\x69\x8f\x8d\x76\xd3\xac\xf0\xef\xf7\xb6\x38\xe8\xba\x3d\xe4\x76\xc0\x42\x9a\x15\x41\x63\x61\x6b\x0b\x18\x70\x39\x50\x54\x8f\x9c\xbc\xf8\x03\x81\x5c\x6d\xaa\x60\x34\xea\x0d\xed\xe8\x1a\x7b\xcd\x88\x83\xc5\xaa\x56\xb5\x32\x25\xa2\xdd\xf4\x4c\x96\xa5\x99\xff\xd7\x95\x60\xce\xfa\xcd\x1b\x61\x57\x6a\xd5\xb9\xe1\xca\xb1\x09\x74\x08\x7c\x25\x54\xe6\x63\x72\x66\x28\x9b\x80\x1a\xa8\xd7\x54\xd8\xcf\x79\xc6\xad\xf5\x16\x2f\x78\xb5\x87\xf6\x40\xf2\x5e\xa9\xb3\xd5\xa6\x5f\x98\x29\x8a\xb0\x01\x88\x96\x73\x02\xff\xd2\x5f\x42\xdb\x32\x9d\xd9\xe4\x71\xab\x2e\x78\x9c\xb6\xdb\x60\x76\x93\x2a\x97\x9d\xee\xa6\x50\x03\x72\x01\x4e\xad\x35\x87\xb7\xbc\xe6\xd2\xbf\x8f\x16\x51\x47\xf8\xcf\xd8\xbb\x1b\xf0\xf1\xcb\xca\x03\x88\xed\x5a\x0f\x73\xbb\x6d\x35\xf6\x1a\xc6\xe6\x6c\xab\x88\x02\x14\xc8\x44\x3b\x02\x0a\x15\x44\x9f\xac\x9d\xef\x15\xe0\x22\xba\x0b\x71\x06\x1c\x16\x69\xb0\x68\xfc\xf6\xa1\xe0\xb6\xed\x1e\x72\x63\x97\xb6\x7a\x7a\xb8\x65\xe3\x4b\x57\x2c\x1b\xfb\xb6\x6c\xbc\x42\x33\x22\xbf\xd8\x0b\x9e\x78\x6e\x27\x63\x61\x4b\xed\x45\xb6\xc0\x2e\x6c\x6e\x54\xdb\xcb\xf4\xb9\x62\x04\xb9\x42\x75\x6c\x1a\xf7\xa0\xf7\x02\x6f\x7b\x0b\xc9\x33\xfd\x7b\x0c\x55\xef\xd0\x49\x87\x55\xd3\xb6\x93\xed\x44\x53\xd9\x9c\xf2\xfc\x51\x36\xbd\x8d\xb5\x78\x36\x4b\xf3\x9c\x04\x4f\x99\xec\xfe\x81\x91\xa0\xbe\xc5\xfd\x9d\xd2\xdd\x53\x9c\x5b\xdf\x6c\xcd\x99\x15\xec\xb9\x15\xa1\xfd\x87\x2b\xec\x94\x23\x29\x8f\x06\xdf\x4e\xb8\xbb\xb9\xa3\x9a\x22\x70\xb2\x3d\xa5\x7e\x4c\x45\x3b\xbd\xdc\x0b\x9e\xef\xdd\xe6\xc4\x84\xe3\xd3\x0f\x79\xfe\x9a\x35\x1f\x02\x17\xe1\x02\x75\xad\x66\x70\xd7\x30\xf8\xe7\x81\x7a\x0d\x3f\x23\xf4\x72\x91\x55\xdb\xbd\xa8\x29\xa6\x74\x20\x61\x96\x0f\xc2\x3c\xff\xfa\x21\x56\xb5\xe6\x40\x2b\x4e\xd7\x89\xfb\x42\xeb\x2d\xb7\xc4\xe9\xfa\x12\xec\x07\xb0\x09\xbe\xfa\x50\x30\xe5\x5e\x72\xb7\x3c\x73\x6f\x6b\x13\xfe\x7f\x3d\xc8\xfe\xd4\x5e\xd8\x9f\xde\xe7\xc2\x7e\xa5\x77\x51\x95\x75\xd1\xd0\xd7\x3c\x2c\x4b\x5d\x7d\xe8\xca\x4e\xbe\xdf\x9d\xbc\x50\x1b\xf9\x6a\xf0\x9d\x17\xe1\xaf\x8b\x9b\xcd\x36\xd7\x49\xb1\x25\x53\xd6\x36\xd3\xfc\xca\x66\xbd\xf7\xcd\xfa\x41\xaf\xb7\xfd\xce\xb6\xe8\x2f\xe8\x9d\xcd\x1e\x0a\x93\x7e\x54\xda\xd9\x5c\x79\x33\x14\x38\xe5\x67\x0f\x39\xa1\xf5\xee\x89\xd0\xc1\x2d\xa1\xd3\xdb\x62\x2f\x16\xfe\x3f\x8d\x07\xef\xf5\x36\xbb\xbb\x19\xb4\x0a\xd9\x2f\x8a\x2c\x6c\xb5\xa2\xc6\x65\x46\x4f\x51\xa5\x76\x4c\x1a\x0c\xc0\x62\xc9\x13\x29\xba\xed\xe5\x54\x71\x0f\x7e\xef\x3e\xc8\x3e\x37\xc2\x46\xe5\x0d\xff\x77\x47\x82\x17\x8d\x28\x95\x57\x25\x4c\xa0\x2b\xc0\xb8\xa9\x95\x03\x9c\xc8\xb7\x75\x75\xd1\xbc\x41\xe7\x5c\x83\xc3\xa2\xdc\x83\x84\xcd\x82\x34\x7b\x99\xdc\xe1\x3b\xf5\x48\x8a\x2c\xf4\x98\x87\xf2\x40\x08\xb4\x19\xb7\x2f\xda\x27\xc7\x79\x07\x47\x56\x43\x97\xa7\x19\x30\xb8\x44\x49\x7b\xd2\x7c\x4b\x83\xc5\x86\x71\xac\xab\x7f\x54\x75\x8a\x9c\xf0\x99\x90\x33\xad\x41\xf6\x1f\x7c\x64\x62\xc8\x47\x54\x8a\xa9\x62\xb6\x00\x55\x24\x2c\x78\x2c\x42\xd9\x39\x09\x92\x87\xd8\x9f\x56\x81\x53\xf4\x31\xc5\x05\x5c\xd8\xa3\xa2\x6a\x57\x2e\x47\xb3\xe3\xca\x75\x2c\x3b\x68\x17\x90\xb4\x5b\xcc\xd5\x05\x21\x32\xf6\xe9\x11\x76\x00\x86\xdf\xff\xf8\x48\xf0\xb5\xca\x39\x8d\xdb\x29\x67\x44\x79\x68\x3b\x61\x53\xd0\xb8\x46\x75\x8c\x15\xd9\x76\x78\xcf\xc8\xa3\xbe\x69\x85\x1a\x62\x74\x3a\x6d\x36\xc2\xfb\x18\x57\xac\xf9\x16\xa3\x2a\x1f\xd8\xdf\x98\x8a\xe4\x32\x0c\x2c\x65\xfc\x68\xa7\xf8\x65\x1a\xd3\x34\x2b\xd8\xe7\x0e\xdb\x26\xbe\x9d\x70\xcf\xfa\x6f\x3b\x1c\x2c\x0f\x89\x0d\x57\x81\xc9\x10\x8e\x99\x08\x1e\xe6\x5d\xb9\xe7\x28\xd3\x7f\x19\x21\x16\x7a\x45\x6a\xc9\x8b\x96\x86\xed\xc4\x8e\x6f\x78\x43\xe2\xb4\x37\x3c\x0a\x6a\xde\xf0\x94\x81\xc9\x65\xb9\x61\xec\x97\x2a\x43\x03\xbc\xdf\x5c\xd9\xbb\x69\xe7\x2b\xde\x60\x81\x3a\x4a\x61\xeb\xe0\x6f\xd4\xa5\xb6\x88\x00\x27\x83\x96\x5e\x38\x9a\xc2\xd4\x22\xc5\x37\xa5\x36\x56\xc2\xa4\x0d\x31\x96\x30\x29\xc3\x42\xb9\xaa\x20\xec\x8a\x66\x9c\x09\x1c\x1a\x24\x45\x25\x90\x65\x2c\x06\xb6\x8b\x46\x43\x74\x0b\x29\x89\x6b\xec\xf9\x96\xd9\x6e\x2d\x88\x74\xf8\x39\xe0\xb4\xee\x32\x34\x9d\x66\xb5\x8a\x26\xc7\x66\x92\xc1\x2e\x4c\x68\x8d\xe2\x66\xed\x58\xe2\x7e\xa3\x32\x94\x95\xfd\xe7\x2b\x8a\xa8\xeb\xf5\x95\x21\xac\xe8\x25\x8b\x7d\x4d\xeb\xd8\x16\x6b\xbb\x76\xba\x5b\x5c\xd0\x94\xdf\x5f\x87\x84\xa9\x5e\x57\x56\xf4\x96\x34\xd3\x09\xb5\x93\x72\x7d\x0e\x2d\x2c\xb2\xf9\xe0\x8e\x9f\x40\xe0\x77\xf8\x34\x41\xed\x18\x46\xf5\xbb\x2f\xdd\x53\x1b\x52\xe5\x28\xe7\x37\x4c\x96\xea\x23\x75\xb5\x1e\x2c\x9a\xa6\xf2\xb9\x41\x04\x80\x5c\x49\xb4\x9f\x0f\x59\x49\x26\x01\xd8\x85\x1f\xf8\xc3\x8a\x4e\x01\xf8\x64\x25\xf8\x50\x85\x1c\x94\x46\x62\xf1\x6e\x96\xb6\xb3\xb0\x03\x3c\xb5\x26\xd3\x2b\xb3\x47\xb5\x28\x79\x36\xb7\xc9\x1a\xe0\x0b\x59\xda\xec\x35\xa4\x6a\x99\xb6\x0c\xef\x95\xb5\x2e\xa4\xbe\x06\x13\x01\x8f\x1d\x5c\x94\x72\x7c\xe0\x0c\x20\xc2\x04\x68\xb2\xf4\x0e\x41\xf0\xfa\x44\xbe\xa8\xb9\x7f\x0c\x95\xa0\xfc\x44\x1e\x35\x91\xe9\x9d\xb7\x7b\x61\x16\x26\x85\x10\x4d\x39\xeb\x6b\x26\x28\xd0\xd6\x3e\xac\xfc\x6a\x9a\x88\x38\x4b\x71\x8d\x28\x84\x70\xe5\x40\xdb\x3c\x21\x62\x13\xa2\xf3\x9d\x27\x44\x2c\x53\x42\xc4\xb9\xe0\x29\x2a\x09\xdf\x4e\xb5\xb1\xea\x09\x73\x93\xb7\xd2\xb4\x46\x28\x6d\x80\x3a\x6b\xb4\x6d\xbb\xd4\xcf\xb9\x59\x9e\xc3\xec\x2a\x1a\x42\xc7\x44\x81\xbe\x75\x34\x78\xa9\x37\x78\x7d\xcb\x18\xd0\x41\x76\x5a\x87\x8d\x88\x8e\x1f\x2e\x19\xed\x4a\x54\xe4\x16\xf5\x2c\x14\x94\x94\x5c\x30\x13\xb5\x0d\xef\xd1\x4a\xfb\x6d\x62\xa5\x74\x3c\xe3\x86\x37\x86\xdf\x75\x36\x80\x2f\x57\xd8\x3d\xac\xfc\x8e\xff\x34\x76\x76\x57\xd1\x89\x5b\xd8\x93\xd8\x0b\x3d\x46\x1f\xf6\xfb\x9b\xc7\x47\x6f\x11\xf4\x38\xaf\xd7\x59\x70\xbd\x62\x33\x35\x49\x96\x56\xe4\x23\xdd\xac\x53\x64\x3b\x46\x12\xa3\x92\xfe\x70\x47\x5e\xfe\xf2\x81\xa1\xdc\x20\x67\xe8\xd4\x38\xd3\x6c\xca\x61\xf4\x5f\x7d\x20\xb8\xa9\x74\x0d\x77\x93\xa2\xd7\x8d\x89\x85\xc2\x9c\x6e\x28\x43\x6f\x7e\x81\x87\xf8\x6c\x6d\xc3\xab\x44\x5d\x67\x7c\x1f\x18\x65\xa7\xd8\xf8\x4a\x9a\x43\xea\xb9\x7f\x75\xf0\x1d\x72\x69\x9f\xa5\xdf\x5a\xb3\x50\xc7\x57\x7b\x3d\xdc\x5f\x61\x95\xa8\xeb\xff\x78\x25\xf8\x21\x00\x27\x9e\x5f\x18\x78\x1c\xc1\x82\x69\xd5\xc7\x69\xda\xad\x87\x8d\x55\x7e\xf4\xf8\x89\xeb\x6a\xc7\xe4\xff\x4d\x5d\x3f\x31\x09\x98\xde\xd5\x38\x95\xea\xe7\xd1\xe3\xa7\x6e\xa8\x9d\xb8\xf6\x1a\xb8\x77\xfc\xd4\xc4\xa4\x5c\xa0\xd6\x7d\xc8\x53\x6c\x48\x11\x79\xf4\xe8\x89\x13\xd7\x50\x21\x27\xae\x91\x4a\xe5\xc2\xda\x29\x3c\xae\xe5\x29\x6d\xc2\x52\xe9\xec\x61\x70\x38\x21\xbf\xeb\x1c\x22\xb9\x3a\xa4\xc6\x1a\x87\x45\x2b\x05\x97\xef\x4c\x9c\xa7\x93\xbc\x21\x32\xf0\x70\x59\xab\xad\x91\x76\xba\x69\x82\x59\xe4\x90\x1a\x2e\xef\x55\x81\xce\x6b\x52\xa7\x38\xc0\xd7\x81\x19\xd6\x11\x19\xeb\x6c\x3c\x49\x9b\x18\x82\xbe\x1a\x7c\xaf\x62\xc4\x9c\xe6\x17\x80\x4f\x23\xcd\x69\x4b\x70\xba\x0c\x64\x66\x23\x4c\x6c\xb2\xe2\xa6\xc0\xa8\x0d\x61\x65\x22\x60\x97\x00\xb8\x7b\xa2\xe9\x09\xb5\xb5\xf1\x10\x4e\xba\x45\xd1\xf2\x9f\xb9\x7b\x60\xff\xe0\x49\xc6\xe0\x5b\xa4\x4a\xe2\x74\xb3\x74\x2d\x6a\x6a\x77\xa3\xaa\x31\x7b\xdf\x23\xd8\x63\x87\xb3\x23\xf8\xaf\x7f\x44\xb0\xbe\x90\x36\x87\x81\x2a\x9b\x98\x11\x9c\xb8\xb2\xc9\x59\x2f\x41\x11\x88\xd8\xf0\xcb\x36\xf4\x85\x15\x58\x66\x67\xf1\xc3\x32\x76\x79\x33\xe4\xcb\x25\xc8\xf2\x8f\xb1\x2b\x56\xbb\x2b\x00\x7d\x0f\x9f\xb3\xeb\x33\xb6\x4d\xfc\x63\xfb\x06\xe8\xfb\x86\x00\x4e\x66\xbf\xaf\x30\x4a\x3f\xe9\xed\x20\xa2\x71\x21\x6d\x02\x34\xe9\xfd\xde\x92\x0d\x30\xa5\x13\xfb\x45\x0e\xb1\x18\x65\xac\x52\x20\xae\xf8\xfa\xe2\x7a\xb2\xd7\x55\xb4\x3e\xfa\xb2\xca\x0e\xa2\xe2\x65\xd3\x10\x9f\xf4\x4f\xbc\xf3\x69\x0e\x6b\x1e\x8f\x32\xea\xa0\x52\x82\x2b\x85\x46\x81\x74\x83\x4c\x78\x4b\x37\xee\x75\x41\xde\x87\x85\xa8\xf1\x05\xcd\x7a\x4b\x21\x51\x2a\xc9\xd5\xc0\x8e\x7e\x9d\x7b\xe6\x41\xef\xb6\xed\xed\xc0\x47\xfd\xef\xaa\x0e\x87\x61\xb4\x22\xc2\x17\xd2\x26\x63\x7f\xe1\xb1\xab\xb7\xb1\xdf\xdf\x91\x8b\x8c\x88\x09\xfd\x5f\xf2\x82\x27\x5b\xbf\xf9\x4a\x1a\x37\x95\xa5\x04\x12\xa1\xdc\x24\x7a\x70\x96\x01\xa4\x52\x8e\x2f\x0c\x8d\x83\x6a\xb2\x3a\xb9\xe6\x9e\x11\x9c\xbf\x28\xff\xd0\x7e\x26\x59\x00\x26\x41\x42\xf8\x1f\xda\x92\x40\x27\x09\xae\x06\x96\x2c\x34\xe4\x86\x88\x11\x95\x21\x3e\x11\xd7\x51\x66\xf6\xfa\xff\x1a\x63\x77\xef\x15\x9b\x8b\x20\x29\xbf\x61\x01\xba\xfe\x7f\x8a\xfa\x73\x05\x97\xec\x0a\x2e\xd9\x15\x5c\xb2\x7f\x15\xb8\x64\xff\x32\xc2\xae\xdd\x1c\x5f\xfc\x1b\x3d\x1a\xe6\xeb\x1b\x3a\xf2\x6f\x30\xf6\xe6\x6d\xe3\xec\x89\xc3\x72\x19\x23\x29\x29\x1c\xbc\x93\x7f\x39\x18\xfc\x8d\xe7\x00\x9e\x34\xe0\xa1\x32\xc4\x89\x5c\x6f\xb7\x77\x85\x94\xc7\x8d\xd5\x1a\x9f\xe1\xb3\xce\x63\x56\x84\x34\x25\x98\x00\xbe\x09\xe5\x31\x59\xb9\x3a\x64\x49\xb5\xde\x52\xcc\x4a\x76\x64\x75\x26\xda\x40\x7e\x8e\xeb\x45\x8e\x6d\x2c\x8a\x9a\xfb\xcd\x7c\x6b\x0e\x37\x29\x03\x86\x92\xb8\x8d\xe3\xeb\xf3\x73\xce\xac\xfb\xea\x01\xf6\x62\x8f\xe9\x7b\xfe\xb3\x82\x0e\x55\x32\x6a\x6a\xa3\x84\x33\xa9\xd5\xed\x84\x7a\x6c\xa8\x66\xaa\xa8\x30\xd4\x88\x77\xfa\xf9\x7d\x71\x15\x5f\xa8\x76\x9b\x53\x8b\x67\x66\xe6\xce\x9f\xa9\x75\x1c\x7c\xa7\x17\x56\x34\x7a\xcc\x3f\x7b\xc1\x5f\x3f\xc4\xe8\x31\x58\xc1\xe9\xcb\x85\x21\x73\xd9\x7a\xe1\xc3\x9e\x85\x40\xf3\x5e\x2f\xf8\x49\xcf\x58\x93\x1e\x5a\x34\x9a\x7d\x36\xc1\x82\xb2\x79\xa3\x03\x65\xf3\x8a\x3d\x43\xd9\xdc\x65\x9a\x6e\x30\x81\x43\x85\x63\x43\xd6\x05\x2b\x0f\x5d\x13\x90\xd8\xfc\x24\x49\x42\x5e\x20\xb9\x8e\x97\x60\x1d\xb3\xe7\x57\xd8\x93\x76\x44\x21\x45\x40\xf0\x9f\xf6\x82\xeb\x9d\x2b\xda\xfa\xba\xb9\x6f\xc9\x30\x59\x0d\x50\x9c\x3c\x22\x4e\xc3\xe6\xe9\x30\x96\x3a\x5e\xe6\xf7\xb6\x60\xc8\x30\xdd\x63\x5e\xa0\x13\xe8\xcd\xf6\xb5\xe1\x44\xd7\xee\x59\x54\x7e\xb4\x5a\xa7\x17\x6a\xec\xbf\x1f\xb4\xdd\x1c\x5b\x98\x23\x14\x72\x32\x84\x0f\xbe\xff\x60\xb0\x68\x5f\x70\xb5\x7e\x75\x67\x12\x06\x0a\xd8\x1c\x2d\x45\x44\x29\x2d\xda\xa9\x1a\x16\x1c\xb4\xb7\xda\x86\x37\x06\xda\x54\x5e\x16\x4e\x57\x6c\x79\xfb\xb4\xe5\xf5\x18\xf5\xac\xbf\x1a\x7c\x07\xfe\x35\x7c\xc8\x1c\xcd\xee\x29\xec\xc9\x7b\x8a\xe6\x54\x85\x5d\x31\x21\xee\x27\x04\x6f\x79\x7b\xd3\xcb\x71\x7f\x6a\x58\xfa\xbd\xbd\x32\xcb\xe1\xe2\xaf\xad\xb0\x13\x3b\x1a\xc4\x85\x4c\x18\x27\xbc\xff\x05\x2f\x78\x86\x73\x45\x6b\xe9\xad\x5e\xdc\x8a\x20\xd1\x57\xe5\x26\x27\x26\x6c\x8c\x1f\xc5\x3e\x9d\xe4\x4d\x81\x69\xef\xc0\x62\x8e\xc9\xcd\x59\x16\x89\x26\x4f\x7b\x25\x20\xa5\x1e\x9b\x63\x8f\x56\xc3\xaf\x96\xfc\xf1\xe0\x09\x4b\xc3\x9c\x6c\x8b\xee\x73\xf6\x9c\xaf\xb2\x91\x5e\xd4\xf4\xbf\x2b\xf8\xd6\xa1\x6f\xde\x31\x3f\xe7\xe6\xa8\x1d\x70\xf2\xbe\x86\x89\x5c\x92\xe5\xfe\xff\x1d\x0d\x5e\xe1\x0d\xb9\x51\x8e\xa3\x30\x82\x37\x74\xc5\x2e\x8f\xe8\x05\x98\xa1\xd3\x3a\x7e\x49\x8a\x93\xa4\xa9\x43\xbb\x4c\x0e\x9e\x8d\x4d\x83\x88\xad\x61\xe2\x16\xe2\xf6\xe1\x1f\x8f\xb0\x67\x5b\x7e\xbe\x34\xa8\x6b\x1f\x5f\x84\x31\x1b\xf2\x0b\x5b\xd4\x89\x9c\x34\xf2\x44\x3a\x77\x61\x89\x22\x3c\x8e\x16\xfd\x6e\xd4\x00\x1b\xc6\xcc\x5d\x4b\xee\xeb\xf9\x84\xdd\x99\xcf\x06\x5f\x61\x2f\x58\x99\x5f\xd8\xfd\x07\xe7\x17\x06\xbf\x77\xeb\xec\x19\x9e\x66\x66\xf3\xde\xea\xe3\x5f\xf0\x54\xa4\xdd\xef\x79\xc1\x7d\x0b\x10\xa6\xe6\x08\x3b\x39\x8f\x33\xe4\xf6\x50\x1d\x8c\xc1\x6c\xf3\x2d\xd0\x18\x26\xb9\x80\x43\x35\xe8\xd7\xb8\xb9\x68\x2c\xa5\xd2\x88\x00\x6c\x51\x98\x10\x4f\x5e\x94\xf0\xa8\xb0\x25\xe8\x14\xab\xb2\x27\xed\xc0\x9e\x9c\x15\x4b\x64\x70\xdd\x11\xc3\xf6\x6b\x46\xd9\x13\x36\xd3\x9e\x9c\x03\xce\xdf\x8e\x04\xe7\xe0\xaa\x3d\x35\x91\x99\x39\xee\x57\xff\x3f\xf6\xfe\x04\xcc\x92\xac\xac\x13\xc6\x9f\xb8\x99\x55\x95\x75\x7a\x81\x0e\x70\x70\x04\xf5\x4c\xb4\x50\x99\xcd\xbd\x37\x2b\xab\x37\xba\xba\x69\xc8\xae\xac\xee\x4e\xa8\x25\xc9\xcc\xea\x96\xcd\xce\xb8\x37\xce\xcd\x0c\x32\x6e\xc4\xed\x88\xb8\x99\x75\x7b\xc0\x7f\x0b\x0c\x28\x22\xca\x80\x08\xc5\xa2\xc8\x3a\x88\x8e\x8c\x20\xf4\x20\x8b\x28\x8e\x02\x36\xfc\x01\xc1\x91\x45\x45\x11\x17\x1c\x9d\x81\x71\x69\x1c\xbe\xef\x39\xef\xfb\x9e\x13\xe7\xc4\x8d\x9b\x99\xd5\x59\xbd\x38\x5f\xc2\xf3\x74\xe5\x8d\xe5\xc4\x59\xdf\xf3\x9e\x77\xf9\xfd\x7c\x48\x14\x04\x6b\x35\xe2\xb8\x42\xc4\x0c\xa4\xc8\x6a\xd6\xf9\xc9\x9b\x44\xee\xf3\x8e\xf0\xa5\xf8\x9a\xaa\x62\x81\xfe\xf1\x31\x76\x77\xa1\xb3\xff\x6f\xc7\xfb\xc6\x16\x3a\xfb\x7c\xce\xfd\x5e\x2f\x82\x3c\x62\xa9\x85\xea\xe8\xaa\x05\x4d\x08\xdd\x22\x42\x44\x4c\xe9\x7c\x40\x21\x22\x97\x0d\x84\x3b\xcd\x01\x27\x17\x57\x3f\x4f\x1a\x18\x2c\x40\x1f\xc6\xef\x6e\x81\x18\xf9\x42\x45\x84\x7d\x97\xd7\x95\xc5\x76\xfa\x11\xd2\x55\x2b\x8d\x41\x65\x7d\xc6\x3a\xdb\x19\x7a\x83\x3c\xbf\x22\x84\xa0\x1c\xdf\xa4\xcd\x4e\xad\xae\xe0\x93\x41\x98\xad\xd7\x0b\x02\xbb\x3a\x6f\x36\x9b\x53\x76\x25\x0e\xb2\xcb\x4d\x85\x16\xd1\x4d\x94\x32\x7b\x5c\xfe\x04\xf5\xed\x8f\x26\xbc\xc7\xeb\x5f\xf6\xda\x80\xcb\x5b\xe7\x7c\x7c\xe0\x00\xfb\xa4\xe9\xdf\xfa\xf0\x2e\x73\x3e\xfe\x7f\x0f\x51\x2e\xd7\x5e\x82\xc7\x85\x52\x2f\xd7\x55\x82\x47\xcb\xf3\x42\x05\x42\x55\xcc\xa9\xcc\x4e\x24\x32\x05\xe4\x0c\x9b\x66\x8d\xd1\xa4\xbe\x15\x13\x78\x4f\xa9\xdc\x8d\x52\xe9\x6f\xaf\x54\xde\xe8\xde\xd0\x18\x4e\xcd\xd0\x4e\x3d\x1a\x93\x52\x6e\x87\x96\x26\x8c\x7d\xe0\x20\x33\xc7\x90\x82\x35\x4c\x38\xce\x85\x34\x4c\x80\xdd\x3b\xf2\x89\x94\xfb\xc5\x07\xbd\x6b\x86\xae\x56\x45\x91\xf4\xe8\x21\xde\x96\x4f\x89\x11\x12\xea\xe3\x7b\xe9\xd7\xbb\x5e\xd3\x03\xb5\xa6\x7b\xde\x0f\x85\x55\xc0\x72\xd6\x78\xc9\x77\x8b\x55\xfd\x64\x76\xfd\x16\x61\x75\xdb\xcd\x88\xbd\x15\xbe\x8b\xc8\x93\x4f\x98\x3b\xf3\x87\x76\xb9\x33\xff\x68\xf5\xce\xfc\xe0\x51\x75\x77\xb7\x17\x57\x4f\x73\x6f\x55\x9c\x90\x43\x12\xa4\x2a\xc7\x4c\x0b\x32\x63\x1a\x92\x30\x63\xaf\x72\xd8\xf5\xb6\xe3\x68\x2d\x49\x89\xa4\x4f\xcf\xd5\x25\x11\x75\x28\x24\x61\xb1\x1f\x89\x6c\x51\x6c\x84\x62\x13\xc0\x8b\x96\x2d\x29\x74\x33\x9b\x63\x07\x35\x4c\x8a\x7b\xad\x77\x45\x81\x2e\x9d\x27\x5c\xc8\x79\xec\xe7\x02\xb2\x5b\xc0\xd9\x37\x22\x98\xe0\x2f\xab\x21\xdc\x8f\xf7\xd6\x44\x57\xa4\x25\x8d\xff\x83\x07\xbd\xa7\x2d\x5a\x7c\x20\x42\x3d\xa7\x54\x50\x95\xf8\x80\x20\x56\x94\xd6\x46\x28\xaa\xea\x24\x10\xa4\xe1\x86\x48\xed\x43\xe5\x9f\x4c\xb0\xaf\x4c\xb0\x47\x61\x29\xc7\x22\x3f\xec\x2e\x8b\x6e\x4f\x8a\x35\xf7\xe3\x13\xec\xc6\xed\x8f\x39\x1a\x42\xfe\xb6\xe1\x22\xbc\x17\x4e\xdc\x6e\xc0\x60\x83\xd1\x16\xa2\xff\x60\xdd\xfb\x71\xd0\xf0\xa3\x24\x16\x7c\xe1\xb6\x63\x80\xc5\x94\x26\x1b\x61\x16\x92\x36\xad\x90\xc1\x97\x31\xd6\xa6\xc0\xcd\x83\xb5\x5d\xd9\x51\x98\xe9\xd4\x12\x01\x92\x79\xe3\x97\x01\xe9\x6b\x33\x16\x3a\x1a\x69\xe1\xb6\x63\x75\x1e\x36\x45\x53\xfd\xd2\x8f\xa2\x3d\xc3\xc0\xc4\xd5\x1c\x34\x10\xec\x03\x55\x31\x3d\x9c\xe6\xbb\x2b\x37\xc8\x4a\x82\x93\xb5\x71\xc3\x46\x01\x66\x7e\xe3\x8a\x3c\x0f\xa5\xf2\x01\xfb\xaa\x09\x2e\xab\x25\xd5\x0a\x45\x58\x11\x24\x4d\xb6\xc2\x41\xfa\xe2\x51\xb4\xc9\x17\x12\xd8\x3f\xc2\x00\x37\x2f\xf8\x36\xed\x31\xb9\xea\x26\x1d\x72\xdf\xf6\x73\x81\xf8\x71\xca\x46\x20\x65\x1b\xbc\x4e\x68\x39\xb2\xfe\x76\x08\x7a\x9e\x24\x3c\x4a\xe2\xd5\xa9\x26\x63\xb3\x31\xfa\xbf\xa4\x2c\xc6\x96\x2a\x02\x9c\x22\xa0\x86\x0a\x95\xfd\xab\x8f\x6c\xb2\x16\x50\xb3\x2b\xe2\x24\xbf\x42\x0f\xbe\xc1\x3b\x09\xc7\xb1\x8d\x24\xd4\xc9\x6b\x31\x6c\x91\x18\x27\x45\xbd\xd4\x1a\xf0\x6e\x98\xe5\xfe\xba\x68\x72\x00\x2f\x54\x5b\x42\x2f\x51\xe1\x0d\x31\x9e\xa5\x20\x5e\x40\x21\xa3\x15\xe5\xc8\x2a\xc3\x7e\xdf\x4d\x36\x08\xc7\x10\xd1\xf1\x00\x92\x5c\x45\xa1\xd2\x53\x5d\xe1\xa3\x09\x46\xd5\xb6\x68\x4b\x5d\x0f\xf5\x1a\xee\xfb\xb4\x35\x60\x7f\x48\xe5\x01\x26\x57\x6a\x06\xdb\xaa\x7a\x26\x31\x79\x68\xe4\x0f\xe8\xcc\xac\xc9\x4f\xa9\x98\x91\xdc\x48\x73\xa2\x6d\x27\x16\x6d\x91\x65\x7e\x3a\xc0\xec\x99\x30\x57\xf9\x41\xfd\x4c\x74\xfa\x11\x1e\xad\xbb\x7e\x8c\x50\x74\x60\xbb\xcb\xf2\xb4\xdf\xce\x31\xc3\xa0\x95\x26\xeb\x22\xe6\x84\x3a\xd6\x64\x6c\xd9\x4a\xd2\x4b\x55\xcc\x19\x28\x28\x71\x42\x49\x4f\x05\x2a\x34\xa4\x28\xb6\x06\x26\x18\x15\xb5\x46\xb6\x1f\x71\x22\xc3\x1c\x3a\xa2\x25\xe4\x87\x08\x24\x92\x31\x25\xe4\xea\x16\x9a\x77\x1c\x46\x4d\xf6\x87\x13\xec\x31\x23\x88\x23\xdd\xdf\x98\xf0\xd6\xe4\x1f\xa8\x0b\x68\xb9\x1a\xd4\x75\x3a\x23\x08\x76\xc0\x8d\xee\x10\x97\x13\x08\xe8\x22\xde\xb8\x65\xc1\x46\x01\x66\x96\xdc\xa8\x50\x02\xca\xb2\x6f\x0a\x81\x1a\xd3\x96\x7b\x5f\xd9\xbf\xa7\x4d\xee\x05\x13\x3f\x74\x2a\xdd\x9a\xa1\xd1\x3d\x67\xb7\xa1\xc4\x8f\xdd\x22\x92\x98\xe5\x6c\x1f\x28\x23\xee\xba\xd7\xc0\xa5\x83\x31\x97\x7e\x84\x12\xd3\x5c\x54\x3a\x26\x4c\xae\x9b\x9d\xdb\x3c\xd5\x92\x2e\x8a\xba\xcf\x69\x6f\xaf\xe5\x3d\xd5\xbd\x51\x7b\x3a\xa0\x08\x5b\x37\x2b\x9d\x46\x65\x95\xca\x8e\x8f\xdf\x1d\xab\xd4\xa2\x94\x03\xe1\x66\x29\xf8\x34\x72\xe2\xeb\xc7\xbc\x67\x55\xde\x31\xed\xa8\x05\x20\x67\x11\x98\x36\xd9\xee\xf5\xeb\x0a\x4e\x1f\x56\x63\xbe\x26\xc2\x94\x27\xfd\xbc\xd7\x07\x23\x78\xd7\xcf\xcf\x39\x9a\xa7\xc1\x12\x34\x6f\xab\xb1\x9f\x71\xd8\x81\x20\xdc\x08\xb3\x24\x75\x5f\x7c\x3e\xda\x7b\x05\xcf\xf1\x71\xdb\xef\x61\x55\x41\xa9\x25\x2a\xa0\x4c\xb7\xa0\x6e\xd1\x6f\x79\x33\x1e\xbb\xda\x60\x95\x98\xf2\x1e\xa7\xa4\xf7\xd1\x52\xb0\x9e\xec\x1f\x73\xd6\x3e\x93\x5d\xa2\x3b\x08\x32\x62\x6e\xf5\xae\xd7\x24\xea\x20\xbe\x8f\xda\x88\x7c\x14\xd8\x52\x27\xd4\x7f\x1f\xa2\x16\xb8\x88\x37\xf8\x86\x9f\x5a\x0b\xe2\x15\x8f\xb6\xfc\xf6\x9a\xcd\x22\xe9\x25\x51\xb2\x3a\x58\xea\xc9\xcd\xeb\x18\x6c\x77\x7e\x18\xe7\xee\x57\x1f\xe5\xfd\xf0\xa8\x9b\x46\xee\x96\xca\xcc\x83\x27\x30\x48\x10\xc2\x09\x12\xb9\x02\xba\x09\xc9\x24\x4c\xe4\xca\xa9\xb8\xe6\x39\xe7\x40\xd7\x3f\xbb\xb4\x2e\x36\xcf\x39\x17\xa9\xab\x4f\x17\x83\x73\xce\x65\x72\x03\x3e\x13\x67\x7e\x1e\x66\x9d\xd0\x6f\x45\xf6\x60\xff\x9e\xcb\xbe\xe3\xb0\x4b\x20\x36\x47\xcf\xbb\xbf\x72\xd8\x53\xef\xcf\x81\xcd\x2c\xc4\x7b\xab\x63\xfd\x96\x82\x56\xa9\xd4\x1d\x60\xfd\x33\x5b\x06\x8a\x62\x66\x84\x3c\x13\x74\x00\x40\x7e\x6a\xa0\x4c\xcc\x10\xed\xc7\x79\x39\x3f\x09\xd4\x52\x44\x58\x04\x30\x83\x80\x00\x26\xe4\x9c\xb7\xd9\xa6\x55\xdf\xf0\x20\xe9\xfa\x61\xdc\x64\x9f\xda\xcf\x54\xd7\xb9\x1f\xdd\xaf\x68\xd2\xdf\xb5\xff\x24\x5e\x2b\xe1\xbe\x06\x62\x35\x15\x30\xd5\x50\xb3\x27\x60\x0e\x54\x78\x62\xb1\x21\xa4\xaa\x12\x84\x19\x85\xc1\x06\x4d\x7e\xbb\xd4\x7f\x56\x86\x06\xe1\xc9\x73\xc9\xa9\x24\x5f\xa2\x14\xa2\x15\x40\x2c\x25\xfd\xba\xeb\x9f\x0d\xbb\xfd\x2e\x00\xbd\x86\x39\xa0\x29\x87\x1d\xa5\xaa\xb5\x44\xbe\x29\xc8\x59\x51\xb4\xd8\x9e\x25\xe4\x23\x22\x0f\xa3\x6e\x31\x89\x01\xbe\x1a\x25\x2d\x3f\xe2\xdd\x30\x96\x9f\xc1\x5c\x69\xad\x4e\x03\x67\xd1\x95\x8d\xbb\xe4\x21\x87\x94\xb2\x3a\x57\x7d\x41\x3e\xb4\x3c\xe1\x33\x75\xe2\x9f\x09\xb2\xe2\xc8\x01\x61\x63\xd6\x4c\x52\xb3\xd8\xcf\xf8\xcc\xf4\xcc\xf4\xe1\xa3\xfc\xf9\x5c\x16\x3d\x43\xff\x1e\xa1\x7f\xaf\xe4\xcf\xe7\xcf\xe7\x9c\x2f\x70\x6e\xfd\xcb\xe1\xdf\x86\x3c\x22\x18\x75\x98\x91\xd5\x6c\x27\x5d\x6a\x30\xe8\x54\x8a\x0c\xbe\x48\xca\xca\x13\x2a\x1a\x74\x64\x80\x4d\x96\x75\x98\xb9\xde\x38\x72\xcb\x5e\x87\xe4\x2d\xa8\xd4\x24\x54\x69\x8a\x6f\x82\xf2\xd3\xf5\xd7\x71\x62\xcd\xb6\xf3\xbe\x1f\xc9\x8f\x4f\x1e\x69\x1c\x9e\xe2\x49\x6c\x3f\xbe\x11\x26\x52\x0d\x52\x35\x9c\x9c\x99\x6a\x0e\x55\xf9\x48\x45\x95\xad\xda\x42\x2d\xfc\x78\x00\x45\x8f\x9e\x35\x6a\xc2\xcc\xc6\x83\x4d\x7f\xa0\xa7\x8d\x5a\x58\x52\x2a\xf0\xb5\x70\x75\x0d\x40\xdc\x45\x5b\x04\x5a\xbf\xc7\x59\x10\x2a\x3d\x14\x0b\x1d\xf0\x10\xbc\x61\x87\x90\x4c\x43\x89\x41\xb9\xd5\xe8\x48\xd7\xc2\x25\x35\x03\x63\x7e\x58\x1d\xa1\x08\xa3\xc6\x4e\x57\x7f\x47\x8d\x99\xf2\xc7\xfd\xb9\x9a\xf7\x13\xb5\xe5\xe2\x82\x9a\xe4\xeb\x62\xa0\x81\x70\x61\xca\xc0\x31\x23\x50\xd5\x43\x87\x26\xad\x7f\x9a\x60\x61\x06\x6f\xc9\x3a\x60\x10\x60\x5b\x29\x9c\x43\xb9\xe3\xf9\x50\x34\xa3\x96\x94\xfc\xf6\xe2\x49\xa4\xa8\xb9\x61\x5d\x0c\xea\x58\xd0\x8d\xa8\x8d\x7b\xad\x7e\x7b\x5d\xe4\x1e\x4e\xf2\x3c\x45\xdc\xe5\x7e\xce\xc9\xd3\x1b\x0c\x0b\x9b\x3c\xc1\xc2\xf0\xcd\x51\x7d\x6a\x21\x11\x1e\x60\xc3\xb2\xd9\xbd\xe7\x80\xf7\x4b\x07\x6e\x2f\x5f\x1e\x4e\xdc\x0e\x84\x4f\x1d\xe3\xab\x43\x74\x98\xf3\x20\x11\x59\x7c\xa8\x18\x5d\x68\x3e\x2e\xc1\xb6\xde\x6b\xe4\xec\xb4\xa4\x0f\x9f\xa4\xcd\x76\x8a\xe7\x22\x8a\x14\xd5\x18\xde\x4c\x61\xac\x21\x55\x9c\x9e\x0e\xa1\x04\x7b\x26\x56\xbe\x68\xbe\x94\x17\x36\x11\x39\xcb\x23\x0a\xfe\xae\x33\x0e\xc7\xc6\xd5\x70\x43\x2e\x8d\x1d\x4d\x5d\x5c\x9e\x6b\x22\xea\xf1\x54\x04\x7d\x3c\xaf\x32\xce\xb3\x75\xb1\xd9\x64\xb3\x46\x4b\x11\x75\x46\xcf\x0a\xcf\xea\x53\x0f\x4d\x0a\xb1\xbd\x30\xc3\x0e\x8c\xb9\x82\x75\x51\x1e\x78\x02\xc0\x41\xa7\x74\x96\x85\xab\x10\xdf\x8a\xca\xa7\x9f\xa3\x01\x01\xaa\xa5\x84\x81\x47\x8b\xdf\x93\xe2\x22\x4b\xac\x19\xf8\xe0\x08\xdc\x2b\xa5\xb0\xdb\x5a\xe0\x2e\xc0\xff\x87\x05\xef\x7c\x87\x57\x4c\x41\x5d\x17\x6b\xf2\x9c\x8f\x2c\x3e\x02\x22\xf3\xca\x29\x43\x24\x5f\x39\x7d\x64\x7a\x66\x52\xd6\xf5\xc8\x94\xac\xb5\x25\x6c\x67\xb4\xb0\xd5\x6f\x52\x8d\x44\x66\x89\xdb\xf9\x98\x23\x15\xd8\x66\x92\x06\x8a\xeb\x01\x3b\x13\x6a\x94\xe5\x64\x2e\x08\xbb\x6a\x09\xa3\xb9\xa2\x98\xac\x9b\x89\x5c\x38\x20\xf4\xc3\x9c\x5f\xd1\x4d\x52\x71\x85\xf1\xf8\x4e\x56\xf4\x6b\x0e\xb2\xa9\x1d\x31\xc0\x81\xb3\xe9\x2f\x27\xbc\x43\xc6\xef\x2a\x37\x93\x8a\xda\xac\xf4\x2e\x7d\x78\xcf\xbb\xb4\x6b\x7b\x40\xa4\xbc\x4b\x6d\xef\xfb\x2b\x69\x8b\x8a\xb8\xd9\xe2\x64\x79\x2d\xbb\x7a\x8b\x20\xd9\xd1\xa3\xbe\x67\x7d\xd8\x85\xf5\xe1\x33\xa6\x43\xe9\xb7\x76\xe9\x50\x7a\x58\x24\x32\xdf\xe7\xac\x6e\x6f\x70\x98\x73\x6f\xda\xca\x0b\x6e\x4c\xb6\x92\xed\xc1\x90\x2c\x8c\xfd\xc7\x47\xb2\x1f\xbe\xbf\x79\xa0\x23\x53\x40\x95\xd4\x79\xdf\x23\xbc\x1b\xb6\x79\xc6\x02\x3f\x54\x59\x3d\x72\xf3\x3c\xb6\x38\x67\x50\xdc\x61\x2a\xdd\x39\xe7\x80\xc9\x8a\xae\xc4\xdd\xbd\x97\xb2\x77\x39\x4c\xdd\x72\xdf\xe4\x78\x3f\xed\x28\x37\x91\x99\x65\x1a\x66\xfa\x0b\x45\xb8\x22\xe8\xc7\x60\x89\x26\x02\x61\x00\xe4\x2a\xa7\xf3\x49\x7d\x45\xb1\x67\x2c\x83\x07\x44\x93\x6f\x9c\xf5\xdb\x39\xd1\xf5\xa9\xd2\x61\x17\xa6\xe7\x9f\x9c\xa7\x7d\x61\x33\xe8\xde\xeb\xb0\x8b\xb3\x7e\x4b\x97\xee\x7e\xc8\x61\xfe\xa8\x49\xbb\xab\x21\x59\x32\xbe\xe2\xfd\xb0\xf9\x4d\xb2\x2a\x0c\xf8\x26\xa8\xfc\xe6\x1d\xab\xa3\x74\x32\x3c\xc6\x18\x96\xf3\x62\xa5\x32\xde\x64\xef\x19\x63\xdf\xeb\x07\x18\x77\xeb\x47\x0b\x29\xc8\xf8\x63\x49\xd4\xef\xc6\x99\xfb\xea\x31\xef\xf5\xb5\x51\x77\x0d\xe3\x46\xf1\x88\xdc\xea\xe0\x66\x2a\xf2\x7e\x4a\xa1\x8d\xcb\xa0\x69\xa0\x89\x48\xca\x34\xb1\x15\x75\x89\xb6\xa1\x4f\x83\x4f\x46\xf6\x9c\x5a\x6f\xc8\x0f\x73\xb9\x54\x23\x41\xb1\x6c\xe8\x86\x37\xfc\xac\x01\x40\x69\x68\x3a\x24\xb8\xb3\x26\x51\xe6\xaa\x3a\x41\x0a\x64\x41\x05\xea\x2b\x54\x1b\xbc\x2f\x0f\xf8\xbd\xc8\x1f\x28\x09\x2d\xa7\xa0\x86\xc8\x2b\x25\xcf\xe2\xd9\xcc\xda\x3f\x42\xb6\xca\xc4\x03\x32\x13\xb0\xbf\x8b\xe5\xc7\xbe\xec\x30\x16\xc8\x5d\x02\xe8\xf3\xdd\xdf\x77\xbc\x0f\x3b\xc5\xef\x51\xeb\x66\x44\x5b\x66\x17\xe6\x65\x7b\x8a\xf7\xe9\x98\x4a\x5a\x61\x0e\x00\x59\x06\xdd\x13\x29\x05\x46\xc1\x38\x1e\xf2\x60\xb7\xe9\xa7\x90\x24\xb3\x26\xfc\x00\x62\x70\x75\x54\x2b\xda\x31\x81\x2e\xad\x39\x9c\x63\x64\xaf\xb1\x5f\xad\x31\x57\x55\x27\x4c\xe2\xdb\xb1\x50\x04\x07\x1a\xbe\xae\x29\x85\x95\x29\x07\x8f\xb7\xaa\x2a\x7a\x1e\xe6\x09\xa6\x2e\x63\xae\x08\xa2\x09\x69\x95\x56\xe4\x28\x47\x56\x8a\x6e\x40\xc7\xa8\x14\x00\x56\x24\xa7\x2a\x77\x44\x27\x5b\x3d\x49\x04\x64\x72\x6f\x11\x71\x00\x73\x46\x8d\x42\x2c\x36\xa5\x46\x45\x69\xc6\xc6\x10\x89\x3b\xfb\x7e\xc4\x93\x94\xaf\x82\x4b\x2b\xe5\x59\xee\xb7\xc2\x28\xcc\x07\x00\x78\x97\x00\x2a\x5b\xa8\x31\x69\x68\x1f\xfd\x0d\x87\xf2\x3a\x7f\xcd\xf1\xde\xe6\x98\x9c\xa1\xaa\x68\x79\xad\xce\x21\xff\xfd\x6b\x77\xbf\x61\x63\xe6\x6b\x77\xbf\xb1\x0e\x7f\x01\xf4\x14\xfe\xd4\x5c\x5b\xdb\xa7\x44\x5b\x8d\xf6\xf3\x2d\x53\xa4\x57\x64\xc5\x57\xf0\xf5\xa2\x57\xcd\xea\x7f\xd5\x61\xfb\x51\xe5\x74\x3f\xe7\xb0\x3b\x1e\x90\x55\x74\x9b\x76\x57\x7b\x3d\x52\x96\x6d\x23\x20\x5d\xd4\x3e\xe2\xc2\xbf\x5d\xe7\xbd\xb4\x1f\x03\x5a\xa9\x0f\x74\xce\x30\x15\xc8\x17\xb8\x83\x35\xd6\x64\x6b\x8c\xb6\x41\xf7\x47\xbc\x05\xea\x48\x50\x12\x3b\x91\xbf\xca\x45\xec\xb7\xa2\x30\x5e\x9d\x0e\xc2\x0c\xff\xb2\x4b\x05\x75\x0f\xf9\x70\xd5\x84\x09\x7d\xd4\x20\x67\x17\xe6\x33\x6b\xf1\xfc\xba\xcd\x57\xed\xf7\xf3\x35\x34\xaa\x28\x0e\x8a\xe5\x64\x5d\xc4\x46\x58\xc9\x7f\x18\xf7\x8e\x97\xae\x61\xd5\xb0\x77\x7a\x26\xae\x4a\x0e\x5e\x5d\xbb\x4c\x25\x17\x6c\xb7\xe6\x7f\x1e\x63\xd7\xb2\x7d\xf0\xbc\xdb\xf4\xfe\x1d\x7c\x40\x4d\x49\xa2\xc5\x6a\x09\x3f\x05\x73\xc2\xba\xb0\x21\xf7\xbe\x55\x63\x07\xfd\x7e\x10\x02\x23\x91\xfb\xf5\x9a\xf7\xbb\xb5\x59\xf5\xd3\x8e\xfd\x94\xa5\x19\x09\xc8\x05\x8c\xa4\x96\x6e\x24\x7b\x0a\x42\x28\x7d\xca\xc6\xb6\x18\xe8\x6e\x7e\xd6\xe4\xea\x3b\x0d\x7f\x53\x4e\xf9\xa1\xf6\x26\x29\xf9\xa8\x37\x44\x8a\xd9\xaa\xf4\x41\x7c\x72\xd3\xcf\xec\x34\x0b\x0b\x2b\x96\x2a\xec\x17\x6d\x31\xa0\x74\xd5\x5e\x55\xdc\x95\x15\x40\x94\x2c\x41\xbe\x7f\x75\x0f\xab\xa0\x24\x12\x9d\xd1\xf4\x4d\xfa\x8c\xe1\x3a\x2f\x16\xcb\x16\xa9\xf5\xdf\xb9\xcc\x3a\xe6\x0e\xc3\xd3\xcd\x10\x3c\x1d\xcc\x9a\xdf\xbf\xcc\x7b\x7e\xf1\xb3\xc2\xcf\x01\x08\x84\x98\x69\x01\x20\x5c\xb4\xd5\x12\x6e\xde\x24\x48\xde\x15\x79\x2a\x58\x21\xcb\x8c\xa0\xb3\xbe\x36\x7a\x63\xb4\x80\x45\x9c\x2c\xfb\x53\xaa\x01\x53\x0a\x91\xd6\x9a\x73\xf7\x3c\x92\x7d\xcc\x51\x3f\xdd\xf7\x3b\xec\x29\xe7\x01\xbe\x37\x53\x01\xb8\xe8\xe5\xfa\x48\xd6\x81\xe9\x95\x8c\x00\x54\xa4\xc6\x5d\x18\x5c\x45\xf6\xb3\x35\x36\xde\x4b\x02\xc0\x0b\x3a\x1f\x10\x43\xc0\x0e\xca\xac\x06\x7c\xc2\x01\xd3\xd2\xd6\xf5\xdf\x19\xa3\xb7\x1d\x9e\x93\xfa\x71\x46\x8c\xa9\x0d\xe2\x15\x15\x81\xd1\xb6\x0b\xc9\xf6\x8d\x85\x34\xd9\xeb\xc6\x0c\x67\xe1\xcb\xc7\xd8\xec\x79\xf5\x4d\x15\x7b\xba\xf7\x3b\x35\x2d\x29\xcc\x3e\x2a\x13\xa7\x4f\x2a\x3a\x4b\x64\x08\xdf\x8e\x2f\x7d\xaa\x8a\x69\xfd\x02\x71\xa8\x3f\x3c\x79\xcf\xd9\x5b\x6a\x84\x93\x7a\xae\xe6\xbd\xbc\x06\x29\x34\x24\xed\x15\x6a\x2a\x75\xa5\x26\x4a\x9f\xcf\x8d\xb5\x4d\xe2\xd1\xd3\x3e\x5c\x35\x5a\x5e\x9d\x7b\xc7\xcf\xa2\xd9\x49\xfe\x8d\x8b\x54\xfe\xb5\x00\xdf\x4f\x52\xee\x19\xcf\x42\xc7\x76\xfd\x5e\x4f\xc3\x23\x94\xc4\x09\x75\xb8\xb2\x84\x9d\x4a\x72\x71\xb4\xea\xb3\x5c\x35\xa1\xe8\x15\x79\x38\x54\x09\x45\x94\xb2\xd4\x58\xf5\x73\xc1\x6f\x5d\x98\xd5\x05\x9c\xa4\x8e\x02\x14\x46\xf9\x9a\x95\x78\xff\x0b\xe3\xec\xb2\x76\xf9\x5b\xee\xcb\xc6\xd9\xd3\xce\x6b\x2a\x0f\x55\xd7\x9a\xd3\x1f\x1c\x1b\x8e\x1d\x78\x50\x67\xb7\x71\xae\xd2\x34\xb2\x71\x31\xe7\xcb\xb0\xe3\xff\x5a\xe7\xbc\xc6\x8f\xf2\x63\xee\x47\xbd\x35\x9d\xc8\x86\xd8\xf3\x94\x7a\x85\x93\x40\x85\xf1\x55\x4d\x15\xf5\x96\x54\x05\x9b\xec\xdb\x35\x36\x21\x68\xc2\xbb\xdf\xa8\x9d\xa7\x90\x53\x4b\xc5\x9a\x10\x6f\xae\xa9\x02\xad\x69\xa0\x1c\xc6\x38\x09\xcc\x28\x4a\x3f\xcb\x92\x76\x68\xc6\x16\x5a\x91\x78\x6a\xf1\xcc\xe7\x0a\x36\xdf\xa8\x4a\xb1\xe7\x9b\xf0\x35\xe4\x4a\x00\x45\xb6\x00\x41\xe5\x69\x3f\xc6\x23\x5c\x3f\xcf\xc2\x00\xc1\x95\xc9\xbe\x6f\xee\x37\x3c\x12\xf1\x6a\xbe\x26\x6f\xdf\xd9\x17\x7d\x44\x06\x89\x92\x7e\x40\x50\xe7\x4a\x31\x0e\xdb\x02\x10\xd5\x9e\xb1\xb0\x44\x2c\x0b\x89\x1f\xe8\x8c\xd0\xd1\x1f\x9b\x6a\xb2\xef\x8c\xb1\xef\x1b\xcd\x9c\xec\x7e\x79\xcc\xbb\x86\xfe\xd6\xf6\x77\xbf\x80\xd0\x26\x0a\x6e\x74\xc1\x6a\xa1\x96\xfb\xeb\x22\xb6\xe9\x17\xc7\xd8\x2b\x1d\x36\x2e\xce\x8a\xb6\xfb\xe3\xce\x0e\x92\x37\x8f\x9f\x15\xed\x59\x28\xdc\x7b\xe6\xe9\x58\x14\x4e\x2c\x43\x9f\xec\x24\x72\x10\xa0\x0f\x0a\x5d\xa9\x00\x0d\x91\x45\x94\x30\x84\x55\x75\x13\xa8\x62\x93\xad\xb3\x03\x6b\x79\xde\xbb\x45\xe4\xee\x0a\x6b\x6e\x4f\x27\xbd\xbc\xbc\x70\x8b\xc8\xa9\x5e\xea\x67\xe9\x1b\xb2\x40\xcd\x10\x8d\xfc\xb6\x72\x3a\x34\x65\x07\x1c\xcc\xdb\xbd\xa5\xa4\xbd\x2e\x72\xf7\x45\xce\x0e\x00\x65\x97\x8f\x2d\xe0\xe3\xf4\xc9\xd3\xfa\x82\x69\x83\x8a\x35\x13\x7a\xbc\x91\x44\x1b\x28\x89\x96\x8f\x2d\x40\xa6\x6d\x13\xfe\x02\xaa\x60\x98\xe2\x03\xf9\xae\xca\xf1\x64\x3f\xfd\x7d\x95\x90\xc9\x4b\xa2\xdd\x87\x90\xff\x24\xce\xc5\xd9\xdc\xfd\xcb\x7f\xeb\x7d\xd9\x29\x5d\xa4\x20\xb6\x8c\xae\x4a\x99\x57\x50\xaa\x91\x67\x53\x29\x3f\x90\x9b\x1a\x0c\xc1\xf6\x2c\x25\x5d\x81\xdb\x93\x52\xfa\x91\xc2\x22\x8c\x91\xd7\xa5\xfc\x45\x60\x8f\x4f\x82\xd2\xe5\x26\x47\xbb\x0d\xbc\x82\xe7\xf5\x1c\x8f\x0d\xa4\x83\x85\xf1\x50\x49\xc0\x64\x5f\xb8\x65\xed\x23\xdc\x7d\x8f\x61\x1f\x1a\x63\x17\xa7\xfd\x78\x36\x3b\x95\xc4\x8b\x49\x92\xbb\xef\x1e\xf3\xde\x38\x36\x6f\x98\x3e\x0a\xd8\x7e\x92\xf4\x60\x5b\x4d\xfb\x31\xba\xdc\xe3\x24\x6e\xa4\x49\x92\x03\xa6\x21\xb2\x75\x80\x3d\x49\x1d\x50\x22\x41\xdd\x43\x87\x6d\xf4\x28\x87\x5d\x7f\x55\x48\x5d\x3f\xed\xc7\x48\xdf\x90\x70\x11\x67\xfd\x54\x05\x7b\xa3\x4b\x1c\x59\x41\xf0\x53\x67\xe6\xe7\xf8\x61\x3e\x29\xbf\x85\x91\x71\x1d\x3f\x8c\xd0\xf4\xeb\xa7\xe5\x3a\x16\x5e\x75\xa8\x52\x3f\x06\xba\x9e\x14\x6d\x51\x75\x79\x0a\x83\x1d\xb1\x1c\xe0\xde\x12\x6a\x16\x03\x18\x8f\x3f\xd0\x00\x4b\xf2\xfd\x30\xae\x1e\x94\xf9\x8e\xba\x7d\x1e\x83\x69\x0c\x9c\xbd\x1f\x57\x8d\x60\x66\x0f\xa1\x61\x0c\xf8\x76\x8d\x5d\x9a\x09\x40\x6a\x42\x98\x9b\xcc\xfd\xd3\x1a\x9b\xde\x76\xc1\x11\xba\x13\xbd\xe3\xfd\x32\x60\x72\x2b\xc8\xa7\xb6\xfa\x74\x52\x9a\xd4\x56\x27\xab\x9e\x2d\xcc\xad\xf6\x18\xa8\xa1\x85\x9e\x95\xdb\x48\x1b\xd3\x31\x52\x3f\x0e\x92\xee\xd0\xc7\x60\x37\x90\x1a\x84\xf1\x81\x87\xeb\x10\xb0\x6f\x38\xb2\xdb\xdb\x72\xa3\x5b\x48\x93\x4e\x18\x09\xf7\xf3\xce\x4e\xba\xdd\x7a\xc7\xfb\x05\x07\x99\x92\xe1\xa2\x56\x61\xf2\x04\xec\x88\x2d\x0a\xdb\xb7\x3b\xbc\xfc\xb0\x69\x43\x90\x0b\x0a\xda\xaf\x82\x36\x9e\x60\x0c\x47\x24\x36\x44\x54\x1e\x23\xad\x35\x91\x85\xb5\x48\x27\xc0\x1b\x4d\xf6\x3b\x0e\xbb\xb8\xed\xf7\xd0\x42\x19\x8a\xcc\x7d\x9f\xb3\x45\xc6\xad\x06\x4b\x33\xde\xf0\x5e\x04\xad\x34\x0b\x01\x01\x19\x04\xd3\x41\x9a\xf4\x50\xe5\x56\x5b\x77\x01\xf3\x6d\x1b\x91\x4d\xc3\x2f\x51\x6f\x59\x05\xae\x02\xe1\x84\xd6\xc2\x86\x66\x61\x93\xbd\xac\xc6\x0e\xca\x73\x2c\xe0\x54\xb9\xff\xe2\x78\xff\xc3\xd1\x3f\x79\x20\xe2\x44\x81\x9d\xaa\xe3\x8c\xbc\x8b\xb8\x03\x6a\x44\x0c\x0a\x0e\x5d\x49\xd3\x72\x1c\x66\xaa\xce\x0b\xba\x64\x0c\x37\xec\x67\x54\xf8\xf0\xf2\xd0\xf1\xb2\x1d\x08\x0f\xf6\x03\xd8\xfd\x7b\x7e\xbe\x46\x6c\x1c\x7e\xb6\x2e\x02\xbc\xa0\x31\xce\x21\x94\x01\x4b\xd4\x9f\x5a\x96\xf5\x36\x55\x4d\x5a\xbf\xa4\xa0\x5a\x06\xb8\x57\xd5\x18\x03\xb1\x0f\x40\x2f\xee\x8b\x35\xbf\xcb\xff\x80\xb1\xba\x65\x7e\x4e\xbe\x2c\x25\x2f\xfa\xa0\xf3\x74\x80\x5e\xe8\x82\x66\x83\x9a\x41\xa6\x81\x26\x3f\x23\x9b\x58\x6a\x14\x21\x1e\x88\xfc\xe1\x2d\x4c\x55\x0c\xdc\xaf\xd6\xd8\x41\xe8\x95\x33\x99\x48\xdd\x5f\xd4\x9d\xf2\x72\x90\x8e\x67\xee\x47\xa7\x98\x53\x58\x6e\x90\x76\xed\x70\x17\xd4\x69\x91\x43\x60\x70\xff\x0a\xba\xec\xb7\x6b\xec\xd2\xcd\x30\x0e\x92\xcd\x4c\xed\x3f\xef\xa9\xed\x20\x99\xef\x76\x7c\xa7\xf4\x31\xb5\x1d\xe1\x2c\xa4\x67\x0a\x25\x9c\x30\xe7\x32\x4b\xcf\x8a\x22\x6b\x3d\x56\xed\x49\x4a\xca\xe9\xa0\x8a\xe1\xee\xd0\xfb\x3f\x78\xfd\x1e\xf4\xfd\xe4\x0f\x6a\xec\x7b\xe1\xa4\xb5\x90\x86\x1b\x61\x24\x56\xc5\x71\x79\xda\x42\xf6\xa5\x0f\xd4\xbc\xff\x54\x9b\x1d\x71\x97\x13\x04\x76\xa6\x99\x79\x7c\x35\xfd\xe0\x84\xba\xea\x87\x31\xef\x26\xb0\x4b\xd0\xcb\xa0\xd5\xc5\x3c\xcc\x33\xde\xf3\xe1\x84\xae\xe7\x2b\x88\x17\xa9\x5b\x68\x94\x97\xa2\x7c\x4a\xf7\x8b\x93\x3b\x62\xb1\x79\x87\x2c\x2d\x43\x31\xa3\xba\x0e\xe4\x72\x3c\x6a\x29\x8c\x6c\x00\xb9\x8b\xb8\x1f\x6d\xfa\x83\xac\xb0\xc0\x18\xda\x5c\x76\x94\xcf\x4c\x29\x45\x50\x97\x11\xf0\x23\x53\x90\x20\x76\x6c\x76\xe1\x8e\xa5\x67\x2e\xdd\x31\x3b\x77\x72\xfe\x94\xa5\x1e\xbd\xcd\x61\x4c\xb7\x3b\x70\x5f\xe3\x78\x2f\x75\x16\xfb\xb1\x6d\xb7\x28\x1e\xe0\x5d\xc0\x55\x59\x20\x73\x67\x56\xba\x6b\x70\x50\xc8\x6d\x57\x3e\x12\xe7\x21\xe4\xc7\x49\x89\xbc\xe1\x47\x84\xae\x04\x0a\x31\x75\x05\x72\x53\x6c\xe3\x10\xbd\x93\xfd\x1b\x05\xd7\x28\xb5\xf0\x02\x00\xc7\xbd\xdd\x9b\xbf\x5d\x53\x2e\x99\xda\x00\xb4\xdc\x37\x12\xed\xe0\xa3\x05\xca\x4d\x11\x07\x1c\x66\x55\x9f\x7c\xe7\x18\xbb\x72\xab\x2c\x08\x0a\x4c\x84\x5c\x00\x4a\xda\xe8\x8a\x38\x77\xff\xbe\xe6\xfd\xe8\x6c\x11\x9c\x9e\x15\x99\x2d\xfa\x19\x8a\x8e\x52\x77\x4c\x3c\x74\xca\x7f\x80\xd8\xc7\x6d\xec\x2b\x5d\x7f\x40\x59\x83\x9a\x57\xb1\x2f\x6f\x36\xcf\x39\x63\xeb\x62\x70\xce\xd9\x8f\xc7\x1e\xeb\x48\xf3\x59\x87\x3d\x85\xc9\xdb\xee\x93\xbc\x27\x2e\xaf\x51\x94\x25\xc4\x1f\xeb\xa3\x4c\x91\x96\x40\x50\x42\x79\x62\x6d\x8e\xaf\x70\x18\x15\xed\xbe\xc8\xf1\x56\x67\x63\xca\x8c\x4d\x3a\x56\xc4\x57\xd6\xe4\xf2\xc0\x8e\xeb\xbd\x6b\x03\x43\xe3\x57\x73\x12\xdd\x48\xb8\x48\x14\x8b\x1a\xea\xe9\x36\x3a\xb4\x65\xfc\xf4\x62\x29\xd2\xa0\xe4\xb8\x79\xdb\x38\xbb\x71\x47\x81\x52\x18\x4c\x46\x5c\x3f\x37\x0d\x8e\x81\x27\xfc\xd8\xfc\xdc\xa2\xfb\xb5\x31\xef\xa7\x9c\x91\xb7\x21\x28\x37\x33\x43\xcc\x86\xb2\x34\xc8\xcd\x46\xdc\x40\xba\x33\x07\xca\x24\xd1\xcf\x00\xa5\xad\x27\x30\x5d\x43\xc9\x81\xe2\x13\xc5\x1b\xd0\x4d\xcd\x73\x0e\x2b\xee\x9e\x73\x2e\xc9\xcc\xca\x59\x63\xfa\xf6\x1a\xbb\xdb\x61\xc6\xd3\x6e\xea\x09\x39\xb6\x50\x2e\xd8\xaf\x50\xcb\x52\x28\x91\x52\xee\xe9\xb1\x08\x53\x3e\xbf\x80\xc9\x2b\xab\x72\x6a\x29\x7a\xbc\x1d\x34\xc8\x9a\x15\xbf\xe0\x30\xbb\x8e\xee\x2b\x1c\xef\x6e\x47\x11\x2b\x29\x3f\x31\x3e\x52\xe7\x59\x3f\x84\x70\x15\xca\x7f\x56\xdd\x6a\x2e\x05\x30\xd4\xb4\x92\x0d\x6c\x88\x4d\xe5\x83\x9c\x36\xe8\xd2\x57\x7f\x1d\xed\x25\x69\x5e\x07\xe6\x24\xd9\x26\xf8\x69\x55\xf1\xb5\x17\xb3\xc7\x99\xe6\xc3\x5e\x0f\xbc\xe4\x73\xbe\xe8\x26\xf1\x92\xc8\xdd\x7f\xbc\xc8\xbb\x56\xff\x2a\xc3\xcd\xd9\x76\x0d\x40\x9d\x0b\xe0\x59\x29\xd8\x6d\xc3\xc1\x7f\x67\x7b\x31\x91\x7b\xf4\x2e\x36\x0a\xd6\xe7\x14\xbd\xcb\x27\x9c\x2d\x2c\x8c\x43\x73\x12\x48\x5e\x7e\xce\x59\x1e\xc9\xe9\x02\xac\x27\x7a\x1a\x3e\xd8\xdc\x2e\xbf\x52\x70\xbb\xbc\xa5\xb6\x85\x21\x73\xb8\x5d\x88\xaf\x8b\x9a\x6c\x15\x94\x6e\xa9\x5d\x36\xbd\x4b\x4b\x58\x54\x94\xc0\x71\x0c\xa6\x14\xa9\x11\xc3\xfb\xf2\x4c\xfb\x30\xa4\x7c\xd9\x0b\x6d\xdf\x65\x68\xfb\x7d\xce\xd2\xf6\xd1\xc5\x87\xdd\xe6\x08\xce\x1c\x39\x0f\x75\x2c\xb1\x9e\x8c\x8c\x7d\x77\xc2\x82\x7c\x6c\xc1\x16\xac\xc9\x62\xd2\x24\x7e\x5a\xd2\x82\xec\x86\x3f\x98\xf0\xa6\x8c\xdf\x95\x54\x6c\x69\x12\xf3\xe7\x25\xad\x11\xf9\x0d\xef\xdb\xc3\xf7\xdb\x9b\xd2\x25\x04\x02\xca\xd6\xb8\xc3\xfb\x81\x4a\x2c\x30\x9a\x70\x76\xba\xc6\x11\x76\x78\x8b\x8d\xa4\x72\x06\xef\x69\x25\xbb\x01\xf7\x7b\xf6\xf6\x82\xe7\x49\xee\x35\x1a\x47\x01\x46\x40\xcb\x1a\x43\x66\x54\xc1\x66\xb1\xdf\xd8\x5f\x89\x8f\x7c\x4b\x98\x2f\x8a\x5e\x62\x61\x50\xbd\x72\xbf\xf7\x8f\x35\x8b\x56\xa3\x84\x3c\xd5\xd3\xdb\x9e\x8e\xf5\x03\x2f\x82\x7c\x18\xf4\xd6\xd5\x10\x86\x2e\xc9\xc2\x3c\x49\x07\x4d\x7e\x0b\xfd\xd6\xec\x17\x41\x02\xae\xa5\xad\x48\x30\x2a\xde\x52\x8f\x57\x50\x63\x30\x36\x77\x7c\x61\xf1\xf8\xb1\xd9\xe5\xe3\x73\x47\x39\xb5\xaa\x1c\x75\xbd\x6c\x02\x4f\x19\x0e\x42\x95\x0f\xaa\xaa\x5d\x27\xb3\xb3\x1f\xf3\xe3\xdd\x5e\x3e\x98\x0b\x53\xcc\x54\x85\x58\xb2\x30\x2f\x70\x10\x10\x9a\x26\x4a\x94\xfc\x81\xea\x62\x38\xc3\x6a\x98\x13\x23\x39\xd9\xb0\xd7\x44\xa9\x34\xb2\x84\x1d\x32\x5d\x0c\xe7\x1c\x56\x74\x9c\x25\xd6\xbf\x53\x63\x3f\x59\x63\x07\x35\xc0\xac\xfb\x7f\x1c\xef\xef\x1c\x64\xb2\x35\x60\x67\xe5\x41\x85\x60\x77\x65\x0f\x53\xc9\xf2\xc4\x82\x7e\x3a\x68\xea\xa1\x66\xf3\x10\x1a\x2e\x0f\x35\x0f\x41\x82\x62\x1f\x2d\x7a\x64\x42\xc3\xe1\x2e\x0a\x35\x01\xb6\xca\x63\xcb\x4f\xe7\x6b\x22\xdd\x0c\x33\xe4\x0b\x2f\x99\xfe\xa8\x28\x28\x40\xd5\x65\xb8\x14\x1d\xc7\xde\x6f\x99\x1f\xa5\xb9\x85\x26\x0b\x68\x98\x29\xd5\xa6\x98\xd1\x55\xee\x63\xbd\x4b\x17\x8b\xf2\xce\x2c\x9e\x30\x1f\x7d\x2a\x9b\x48\x05\x8e\xbb\x7b\x95\x77\xe8\x58\xd2\xed\x22\xb6\xd1\x5a\x01\xd5\xad\x0d\x86\xea\x49\xeb\x63\x7f\x73\x89\x15\xbb\x59\xc5\x7d\x77\x73\x94\x6c\x2e\x61\x88\xf5\x87\x2e\xf1\xfe\xc2\x29\x7e\x5b\x5b\x54\xb1\x31\xf9\xb8\x92\xe5\x9f\xb2\xbc\x0c\x83\xa5\x08\x48\x1b\x2e\x01\x5e\x95\x1f\x00\xcf\x20\xbc\x40\xae\x97\x30\x6e\x25\xfd\x38\xb0\x93\x06\x30\xaf\x26\xec\x86\x91\x9f\x16\x84\x62\xe8\xc4\x08\xb3\x22\x0e\x97\x10\xe3\x7a\x7e\x98\x16\x26\x95\xec\x28\x1a\x17\x0d\xc8\x33\xa3\xfe\xb2\x08\x9f\x7b\x50\xa5\x00\xb3\x80\xfa\x61\xb6\x26\x52\xcf\x3e\x97\xfe\xff\x19\xfb\x73\x53\xf5\xf8\xe2\xae\xcf\x56\x3f\xeb\xac\xa8\xd2\x34\x8e\x5a\xf6\x30\x38\x6d\xfd\x8b\x3a\x6d\x7d\x6b\x0b\xed\x6a\x9b\x69\x02\x47\xaf\x5f\x73\x56\x64\x39\x45\xe3\x76\xca\xb6\xe9\x1b\x03\xf4\xa0\x9f\x32\xfe\xc6\xd1\xc7\xb2\x3f\xd9\x62\x8c\xb7\xeb\x00\x3c\xa3\xbd\xc9\x59\xc1\xb2\x74\x27\x0c\x9f\xd7\x1e\xd2\xd6\xee\x29\xa0\x7b\xf0\x61\x0f\x9d\x5a\x78\xe7\xf6\x6a\xe1\x29\xf7\x84\x56\x0b\xcd\x25\x57\xa4\x25\x94\xb2\x5c\x8b\xc5\x54\xa9\x2c\xfe\xe1\x41\xcb\x37\xb1\x83\x60\x6f\x94\x05\xbf\x70\xd0\xfb\x4a\xad\xea\x4e\x89\xcf\x54\xad\x6f\x1c\x78\x58\xde\xe5\x90\xd9\x8a\x08\xd8\x3a\xf7\xb3\x6d\x63\x67\xeb\x17\x28\x0a\xfc\x61\x1a\x12\xab\x93\x7e\x1f\x45\x6d\x99\xc5\x78\x7f\x70\x66\x58\x7b\xf1\xab\xf7\xb3\x9b\x28\x65\xed\xa8\xd7\x30\x13\xd6\x2a\x19\x3b\xc3\x98\x43\x5f\x96\x15\x9f\x2f\xd6\xd8\xbf\xb5\x3f\x75\x26\x0f\x23\x02\x6e\x73\xef\xa9\x29\xe0\xa7\xb7\xd4\x46\x3e\x55\x96\xea\x7a\xd4\xc1\x00\x8f\x8f\x0f\xd5\x87\xa6\x81\xdf\x4e\x93\x0c\x11\xec\x52\x11\x89\x0d\x3f\x06\xd0\x90\xac\x5e\xc8\x00\x05\x05\xd9\x13\x69\x5b\xca\x57\xb3\x30\x68\x92\x12\x7e\x43\xdf\x30\x70\x4a\x33\x8c\x97\x07\x3d\x55\xe5\x2c\xea\x28\xc1\x0e\x5f\xc1\xb9\x62\x76\xf6\x0a\xe4\x22\x91\x97\x1a\x8f\x40\x26\x5e\x96\x0a\xc6\x37\xb7\x73\xdb\x75\xff\xad\x1a\xab\x1a\x44\xf7\x4b\xb5\xdd\xe1\xc6\xbd\xbe\x56\x51\xea\x03\x33\x06\xe8\x01\xf5\x37\xa9\xb8\xc9\x30\xce\x72\xe1\x43\x00\xfa\xd6\x43\x32\x55\xd7\x9a\x2a\xed\x68\x34\xcd\xad\x1c\x06\x08\x05\x6a\xea\x71\x21\x07\x75\x8b\x02\x2f\x53\xb1\xea\xa7\x41\xa4\x9d\x3f\x3b\x1a\x81\xed\x93\xb0\x8e\xfc\x2b\x48\xc2\xda\x4d\x08\xfb\x91\xbd\x10\xf6\xfb\x1b\xc2\x7e\xff\x73\xdf\x8e\xfc\xeb\xcf\x7d\x3b\xb2\x97\xfb\xb6\x45\xdf\xec\xe5\xbe\xed\xe5\xbe\xfd\x5f\x91\xfb\x76\x64\x2f\xf7\xed\x61\x31\xe7\x1f\x98\xdc\xb7\xaf\x1d\x64\xbc\xc2\x0b\xa0\x89\x2d\xc0\x05\xf9\xa1\x83\xde\x13\xac\x2b\x36\x24\x80\xbe\x35\xc2\xff\xf8\xf6\x89\x3d\x83\xc9\x6e\x0d\x26\x9f\x77\x94\xcb\xee\x13\x8e\xf7\x72\xa7\x12\x62\xad\x60\x23\x21\x66\x2e\x25\x7e\x10\xec\xa0\xa2\xda\x15\x58\x40\x1a\xf3\x27\xd9\x90\xba\x91\xd8\x9c\x26\x5c\xac\x86\x54\xea\x1a\x54\xf2\xb4\x86\xeb\xcf\xa6\x4d\x17\x61\x93\xd5\xd9\x15\xdb\xc6\x1a\xeb\x8a\xee\xd9\x81\x76\x11\xb4\xf4\x69\xd3\xb0\xfe\xb1\x5d\xfa\xf4\x5f\xe8\x3c\x44\x4e\x7d\xa3\x71\xf7\x39\x67\xb6\xb7\x6c\x1d\x71\x0f\x57\x51\xe4\x5a\xe2\xa9\x0c\x15\xff\xb7\xcf\xae\xcc\x34\x25\xbe\x15\xf7\xde\x67\x7b\x1e\xfd\x5d\x09\xc3\x02\xb8\xb0\x10\x1e\xa9\xc3\x8d\x2d\xf9\xf6\x8a\x67\xb3\x0f\xd6\xd8\x45\x6b\x49\x96\xcf\x46\xa1\x9f\x89\xcc\xfd\xa5\x9a\xf7\x7a\xe7\xd6\xe2\x02\xc9\x6d\x8d\x84\xae\x96\xac\x7c\x07\x37\xbb\xf9\x85\xcc\x4e\x5d\x0c\xe3\xe7\x41\x94\x6a\xe1\x1a\xec\x81\x6b\x10\x5f\xe9\x84\x91\xb0\xbc\x6b\xc5\xfe\x00\x47\xcc\x82\xea\x25\x4e\xe2\x86\x7c\xe7\x14\x22\xdc\xa1\x6d\xe3\xfc\xd7\xac\x6e\xcc\x7d\xce\xe3\xd9\xe3\xac\x51\xea\x49\x9d\xa7\xd1\x15\xe9\xaa\x68\xac\x8b\x81\xbb\xcf\x1d\x0b\x7b\xec\x3e\xe7\x10\x7b\x6c\xc5\x73\x59\x9e\xfa\xb9\x58\x1d\xb8\x13\xee\x7e\x78\x87\xb1\x4f\xd7\xd8\x84\x22\x82\x73\x3f\xa6\x2d\x48\xef\x86\x2c\x0f\xcd\x10\x87\x6a\x39\xbf\xcd\x4f\xc3\xa4\x9f\x51\x64\x95\x79\x2e\xec\x67\x24\x0c\x50\xf7\x52\xf0\xe8\xb9\x59\x08\x69\x03\xc0\xe9\x03\xb9\x9b\x8a\x63\x8a\xcf\x06\xdd\x30\x83\xdd\xe8\x18\x1a\x4b\x23\x08\x74\x57\x5b\x2a\x00\x44\xf7\x52\x64\xcb\x83\xd4\x11\x12\x25\x8a\x77\xbf\xf8\x32\xe6\x20\xf9\xba\xbc\x76\x51\x9e\x72\x94\x67\x66\x45\xa1\x18\x8b\xeb\xea\x94\xaf\xd8\x8f\x08\xc7\x57\xe7\x31\xa0\x13\xd5\xb8\xaa\x5a\x66\x9b\x92\x7e\xd6\x61\x8f\x93\xaa\x1c\x78\x9a\x97\xf0\x90\x3b\xdb\x06\xc4\x77\xc0\x05\x72\xfb\xde\xea\xec\x16\xf7\x0d\xf3\x6c\x91\xc5\xa0\xa8\x72\x7d\x7c\x90\x00\x78\x0a\xad\x1a\x3e\xe8\xe7\xc4\xf0\xdb\x45\x78\x79\x3b\xc6\xfd\x25\x8e\xc1\x62\x7c\x97\xd7\xb5\x19\x0d\xd4\x1d\xcd\xb1\x94\x04\x08\xd1\x93\x97\xbd\xc8\xc5\x52\x80\xe7\xcd\x84\x07\xd0\x39\x71\x72\x34\x14\x0e\x1f\x4e\x1d\x53\x78\xbe\xb8\xc6\x1e\xd9\x4b\x85\xe8\xc2\x8a\x44\xf2\x0a\xf7\xef\x1c\xef\x4f\x9d\x85\xd2\x55\xb5\xcb\xd2\x2f\xb9\xa4\xd4\x8b\x0a\x26\x1e\xcc\x1d\x51\xb2\x09\xd9\x15\x34\x1c\x10\x8a\x2e\x37\x65\x01\xd1\xc7\x54\xea\x09\xf9\xd0\x82\x7e\xc6\x4c\x42\xa8\x7a\xc2\x48\xd7\xb2\xd9\x7c\xa4\x62\xde\xa0\x04\xc2\x55\x33\xde\xf0\x54\x12\x2f\xe8\xda\xe9\x62\xcc\xf3\x83\x8d\xfb\xe4\xb0\x47\x64\xa5\x94\xeb\x3f\x76\xb6\x00\x65\x35\x65\xa7\xfd\x9e\xf7\x8e\x11\x79\xda\xbd\x24\xc0\xaa\x16\x19\xdb\x25\x67\xb4\xdc\x44\x12\x33\x07\x44\xe5\x15\x35\xf9\x69\x92\x98\x47\xad\xbe\x92\xed\x1b\x34\x39\xe0\x1a\xc2\xc9\xc9\x94\xd8\x88\x46\x68\x20\xae\x83\x39\x10\xce\x03\xb8\x3c\xa5\x32\x35\x11\x27\x81\x00\x1a\x8b\xff\xe6\x78\xf7\x38\xa7\xe8\x17\xca\x7f\x23\x87\xde\x80\xdd\x86\x38\x17\x85\x2f\x5f\xe4\x40\x11\xf3\x6f\x87\x00\xe4\xa5\xb8\x85\xea\xd5\xb5\x73\x1f\x61\x91\xb3\xb0\xdb\x8b\x06\xfa\x42\x56\x2a\x11\xc9\xb5\x92\x40\xea\xa0\x59\xd6\xef\xa2\x48\xc1\xcc\xeb\x0e\xaa\x24\xfa\x6c\xa5\xf3\x3a\x6c\x44\xba\x37\xd4\xd8\xc5\xb2\x04\xcd\x3e\xf1\x93\x35\xef\x6f\xa1\x69\x26\x75\x84\x91\x07\x82\x41\xfa\x0a\x9a\x13\x72\x7e\x4a\x4c\x5d\x1d\x80\xf5\x87\xac\x72\xd9\xca\xa5\x8a\x37\x31\xae\x1f\x9f\x38\x44\x44\x13\x59\xb9\x9c\x12\x42\x7f\xd1\xda\xf3\x56\x4b\xad\xa0\xf8\x69\x40\x10\x8f\xe5\xf9\xa9\x21\x4b\x9b\x7e\x57\x29\x53\xc3\xd8\xa7\xd9\xab\xc7\xd8\x65\xbd\xb2\x94\x75\xbf\x5b\xf3\xfe\xbe\x36\x6f\x85\xa9\xd8\xae\x29\x14\x35\xc5\xba\xf6\x48\xb6\xc8\xef\x35\xda\x69\x08\xf2\xce\x83\x79\xac\x6e\x91\x81\xd0\xbc\x9b\x0a\x9e\x6f\x26\xfa\xa0\xb8\x2e\x06\x00\xb0\x4d\x1d\xa9\xbe\x58\x08\xf7\x2c\x57\x9f\x0c\x85\x81\x53\x0e\x99\xf0\x29\x99\xaf\x2a\x1e\x1e\x34\xf9\x6c\x3c\x20\x8b\x32\x88\x46\x35\xba\x4a\x14\xb6\x06\xc8\xdc\x85\x27\x67\x6b\xd7\x51\x4a\xb4\x4d\xf6\xd6\x1c\x2d\x82\x8b\x6d\xb5\x60\xd1\xc3\x75\x97\xa4\xfc\x2e\x91\x26\x94\xa2\x96\x12\x03\x9d\xba\x6d\xcd\xda\x6f\xd7\xd8\xf7\x64\x6b\x7e\x2a\x28\xc9\x4b\x6b\x72\xee\x1f\xd5\xbc\x4f\xd6\x96\xe4\xad\xe2\x8c\xaf\x32\xe9\xf4\xf9\x43\xf3\x78\xf8\x51\x34\x94\xf7\x99\x21\x28\x7c\xb1\xdb\xe7\xa4\x23\xc9\x8d\xc2\x78\x4a\xdb\x0b\xe9\x68\x2e\xcf\x3d\x30\xa2\x72\x76\xc9\x33\xba\xce\x3f\x83\xbd\x1a\xbb\xd7\xfe\x4a\xae\x50\xe4\x81\x36\x4e\x31\x84\x74\xc2\x34\xd3\xd9\x7c\xda\x2c\x61\xc6\x99\x45\x91\x22\x4c\xc3\xb9\x2c\x02\xbe\x30\x3f\xc7\x67\x9a\x5c\x6a\x5c\xf2\x4f\x59\xd6\x52\x55\x07\xc9\x43\x3f\xbc\x9b\xe4\x6b\xb4\xfb\x55\x08\xcc\x11\x09\x6e\xff\x67\x9c\x7d\x3f\xa6\x13\xc1\x4a\xba\x25\xf5\xdb\x62\x41\xa4\x21\xc8\xf5\x24\x0e\x32\xf7\x4b\xe3\x2a\xdd\xf6\xe3\xe3\xaa\x54\x1e\xa8\x74\x94\x30\xe6\x68\x7a\xd5\xab\x84\xc7\x42\x04\x78\xbe\xa6\x72\x05\x5f\x95\xc5\x76\xfa\x11\xc4\xbd\x63\x18\x7d\x20\xe4\xfc\xcb\xd0\xde\x83\xac\x8b\x1a\xcf\x10\x53\xb0\xf4\x9c\x95\xb2\x34\x16\xab\x7e\x1e\x6e\x08\x4e\xaa\x4d\xb3\xb0\xf3\xd2\x14\xd3\xab\x35\xcb\x93\x1e\x0f\xbb\x5d\x11\x84\x7e\x2e\xa4\x12\x1c\xfa\x50\xb9\x75\xd9\xcb\x34\x94\x93\x71\xc2\x13\x88\x1a\x84\x63\x0b\xc8\xf8\xb5\x7e\xce\x83\x64\x33\x9e\x42\xb8\x0c\xc0\x81\x54\x3e\xb2\x38\xa4\x1c\x7d\x35\xb5\xa1\x49\xbc\x07\x5d\x65\x25\xa9\x72\x72\x76\x61\x0d\xad\xc7\x48\x85\xa8\xea\x3c\x24\xd6\x43\x4d\x4e\x4d\x32\xe5\x73\xa0\x69\x25\xfb\x16\xd1\x45\xe2\x9c\xfb\xdc\x18\x35\xd5\x28\x35\xdd\x10\xde\x41\x59\x0d\x8b\x02\xe5\xdb\x9d\x24\x6d\x87\xad\x68\xc0\xd7\xfc\xa8\xf0\xd1\x98\x7d\x23\xe5\x7b\x6e\x36\x3f\x4a\xe2\x55\x8c\x69\xa4\x0c\xeb\xb3\x3d\x3c\x92\xb4\xe5\x14\xea\xf7\xf0\x7b\x52\xd4\x0f\x92\xfe\x88\x24\xeb\x2b\x0f\xab\xa6\xda\x1a\xea\xef\x38\xec\x60\x10\x67\xc7\x40\x9c\xbb\x1f\xd8\x09\x86\xc1\x42\x12\xcc\x9d\x5a\xc2\x37\xbc\x97\x38\xb6\xee\x38\x77\x6a\x89\xf7\xfc\xd4\xef\x8a\x5c\xae\x48\x7d\x6c\xe3\x0b\xc5\xc5\xc2\xd4\x08\x32\x49\x33\x1f\xca\x53\x88\x76\x02\xac\x8a\x58\xa4\xa0\x52\xc9\x22\xed\x1c\x2c\xed\xa5\x9a\x3b\xb5\x84\xfa\x60\x93\x7d\xd4\xc1\xa3\x1f\x9d\xaf\xdc\x5f\x75\xbc\x37\xc3\xd1\xcf\xc0\x14\x37\x1c\xcf\x9a\x66\x0d\x2a\x77\x26\x13\x5a\xf3\x3d\x94\xa9\x37\x0a\xe9\x56\x4c\x48\x3c\x3a\x92\xec\x52\x12\x38\xcd\x4b\x87\x46\x98\x87\x6a\xf9\x18\x67\xc3\xad\x65\xc1\xab\x00\x3c\xc3\x3c\x04\xb8\x77\x3b\x5e\x36\xa7\x23\x65\xed\x13\x82\x3a\x24\xcb\xbb\xe8\xef\xf3\xe5\xc1\x10\x9a\x66\x3f\x89\xe7\x98\xa2\x9c\xa3\xd0\xe0\x6c\xe8\x19\xbd\x74\x2c\x74\xcc\x31\x76\x59\x26\xf2\x5b\x49\xc9\x9f\xcd\x6e\x7e\xc6\xdc\x29\xf7\xeb\x63\xde\x17\xc6\x08\xd4\x66\xab\xa3\x80\x1a\x38\x0c\x06\x28\x1e\x94\x85\xd4\x79\xea\x53\x02\x2f\xcd\xec\x48\xf8\x1d\xdc\x2f\x27\x8d\xb5\x8e\x7c\x18\x05\x2a\x0a\x4a\xfb\x3a\x0e\x48\x57\xf8\x71\x66\x9c\xfe\x04\x14\xad\xd6\xac\xae\x0f\xaa\xec\xb4\x31\xad\x4b\xb5\x26\xc2\x6f\x48\xfd\xc1\x7e\x02\xd9\x3c\x79\x3f\xcf\xe4\x75\xfc\xb8\x4a\xb9\xdf\xc1\xe7\x53\xb1\x1a\x66\x79\x3a\x28\x5c\xf9\x45\x25\x48\x1b\xd3\x8f\xac\x8b\x01\xbf\xf5\xe9\xc7\x9f\x79\xc7\x89\xd3\xc7\x66\x4f\xdc\x71\x72\xf6\xd8\xad\xf3\xa7\x8e\x3f\x67\xe9\x99\x4b\xcb\xc7\x4f\x3e\xe7\x18\xda\xca\xe9\x1c\xbc\x24\xf2\xe7\xd0\xb0\x66\xcf\x59\x6e\xf7\xc2\xde\x73\x8c\x35\x95\x27\xd0\x6e\x98\xa8\x48\xa0\xa3\x71\x82\xc0\x34\x8e\xfd\x0d\x55\x5e\xf3\x41\x09\x10\x9d\x0e\x98\x6c\xb7\x9e\x93\xbf\x51\x63\x86\xbd\xc5\xfd\xe5\x9a\xf7\x7a\xe7\x04\x59\x4d\x8c\x9d\xb7\x25\xa4\x90\x22\xf7\x8b\x3e\xd9\x1f\x2b\x1e\xa0\x3d\x92\xcc\xff\x18\xc7\xe1\x03\x49\x30\xa4\x48\x13\x41\xac\x0d\x52\x6f\x41\xc8\x5a\x5e\x05\x5f\x9e\x49\xab\x2c\x82\xe7\x6f\x50\xd1\x75\xbc\xcf\x39\xb4\x8d\x41\xe5\x80\xbb\x4f\x0e\xe3\xf9\x98\x54\xfe\x9f\x71\xf6\x28\x4d\x14\x5d\x74\x87\xfb\x8d\x71\xef\x53\x63\xaa\x1f\x0b\x26\x69\xa3\x47\xd3\x7e\xac\x51\x72\xa1\x33\x8f\x57\x3d\x45\x39\x71\xf4\xb0\x6f\xd0\x05\x93\xc6\x4f\xb0\x4d\x60\x29\x69\x40\x3f\xa0\xa4\x40\x27\x2e\x57\x3e\xa0\x40\xb4\xfa\xab\x72\xfc\xe8\x84\x0b\x86\xb1\xb6\xee\xde\x42\x62\xc3\xae\x66\x68\xae\x5a\xcd\x0a\xcd\xe7\xbb\x49\xa0\x23\x9a\x61\x68\x4c\xe6\x60\x59\x18\x32\xdc\xa4\x01\x92\x2a\xf9\x41\x60\x33\x6a\x17\x83\x8d\xc1\xfd\x66\xab\xea\x64\x60\x52\x92\x44\xbf\x65\x74\x8b\xc1\x0c\x50\x3e\xb2\x83\xcf\x86\x0e\xc2\x14\x81\x0d\x96\xba\xb5\x24\x4e\x52\xac\x71\x46\x16\x6b\x10\xe9\x68\x77\xc2\x2c\x81\xe1\x81\x54\x47\xfa\xf3\xe0\x53\x19\xe2\x19\x7f\x60\x67\xe0\x17\xc7\xd9\x23\x01\x4f\x65\xa1\x1f\x45\x4b\x52\xe9\xcb\x33\xf7\x63\xe3\xde\xdb\xc6\xe6\x4b\x57\x47\x19\x47\x35\x2d\x01\x12\x4a\xa8\x87\x0d\x5d\x3b\x36\xf9\xd7\x15\x20\x50\xaf\x1f\x45\xc8\x2c\xad\xcd\x7d\x50\x8f\xcc\x60\x77\x0e\x33\xae\xd8\xb6\xf9\x7c\x39\x47\x01\xb6\x28\xfc\x98\x06\x21\xf3\x33\x22\x86\x93\xea\xe6\x46\x18\xf4\xe5\xa9\xa0\x8f\x36\xc2\x6e\x2f\x82\xd3\xb8\x4f\xa0\x26\x28\x6c\xbb\x54\xa7\x61\x96\x2a\x38\xa2\xf8\x08\x4c\x1f\x24\xed\x75\x91\xd6\x71\x2a\xcc\xc1\x0f\x54\x70\xd0\xae\xa1\xea\x21\x15\x38\x9a\x28\xf7\xe7\xc4\x4c\x93\x66\x1a\xfb\xe1\x72\x22\xb3\x08\xe3\xd5\x06\x5c\x91\x0d\xa1\x2f\x35\x92\xb8\xe1\xcb\xf3\xb4\x39\xaf\xae\x63\xd7\xb2\xab\xb7\x9d\x57\x27\x92\xb6\x1f\xa1\xc3\x7c\x51\x8d\xdc\x03\x32\xb3\xfe\xd4\x61\x97\xa4\x02\xb2\x57\xc8\x70\xf7\x69\xc7\xfb\x4d\x67\x11\x2f\xf1\x5e\x61\xa8\xb3\xb1\x68\x40\xe5\x2d\x74\x6a\x6d\xa2\x9b\x85\x30\xb2\x3a\x3f\x1d\xdf\xec\x87\x51\x3f\x15\x75\xb4\xda\x59\x5b\x14\x3e\x74\xde\x9d\x2f\xd5\xb9\x28\xf1\x83\x6c\xba\x97\xe0\x7f\x1a\x51\xd8\x11\xed\x41\x3b\x12\xd3\x97\x53\x2b\x1a\x58\xe5\x12\x4b\x8f\x3b\xac\x27\xb9\xf7\x38\xde\x2f\x39\xc3\x3a\x56\x55\x20\x67\x49\x67\xa3\x05\xa2\xf1\x92\x94\x74\xdf\x59\x7b\x72\x3f\x5b\x2f\x6c\x2f\x02\x0c\x2e\xba\x63\x8d\xeb\x54\xe5\x06\x99\x88\xa7\xcd\x26\xfd\x8b\xc3\xbe\x47\x8a\xff\x0d\x31\x27\xfc\x20\x0a\x63\xa1\x4e\x9a\x5f\x73\xd4\x49\xf3\x53\xce\x8e\x4e\x9a\xb4\xfb\x60\x69\x0a\xda\x02\x28\xe5\x80\x01\x5e\x5e\xcc\x13\xa4\x8e\x5f\x96\x47\x14\x8a\xfa\x29\x12\xb2\x29\x7e\x10\xde\x8f\x06\x8a\x98\xb0\xeb\xa7\xeb\x60\x6f\xf3\xc3\x88\xa8\x24\xe0\x78\x64\x44\xb6\x99\xc8\x46\xf6\x61\x55\x6e\x49\x59\x68\x1d\x55\xad\x03\xcf\x2a\x9b\xf0\x3b\xb0\x80\x06\xee\xb3\xd9\xd4\xb6\xcb\x69\x96\x1e\xf6\x9a\x43\x02\x8a\x36\x20\x83\x7d\xb3\x60\xea\xcb\xd8\x22\x3b\x20\x15\xbf\xf9\x85\x63\xee\x2d\xde\x75\xa5\x13\x46\xd8\x6b\x9b\xa7\x8b\x9d\x5a\x0b\xa8\xcc\x85\xf9\xb9\x8a\x32\x7b\x61\x70\x7f\xca\xfc\x87\x1a\x9c\x00\x69\x09\xff\x65\xcd\xfb\xa3\x9a\x3c\x7f\xc2\x41\xae\x58\xc2\x7a\xb1\x9a\x87\x4a\xef\x18\x5a\xd8\x6e\x0e\xd3\x2c\xf7\x60\x24\xc2\xc0\x64\xb0\x3c\x64\x3e\x70\x7b\x98\xaf\xdd\x8a\x27\xb4\x43\x75\xfb\x96\xfc\x4d\x05\x1f\x92\x0a\xe1\xa1\x53\x49\x2c\x0e\x35\xcb\xa7\x49\x4c\x38\x0b\xe1\xdc\x47\xe2\xb9\x74\x78\xd4\x16\x3a\xaa\xba\xc2\x98\xc1\x1b\xc5\x71\x91\x2f\x27\xa8\x17\xcb\x2f\xa8\x30\x11\x88\xf0\x94\x7a\x2c\x3e\x6d\x9c\x26\xeb\xf2\x64\x8d\x2f\xe4\x89\x26\x21\x32\xba\x48\x9c\xed\x45\x61\x3b\x94\x8a\x6d\x9e\x8c\x6e\xb6\x75\xb0\xfa\xfb\xfd\x6c\x22\xd9\x10\xe9\x9a\xf0\x03\xf7\xcf\xf6\x7b\xaf\xda\x7f\x9a\x7e\x95\x11\x48\xb4\xcd\x59\x3d\x3e\x14\xe4\xa9\x4c\x15\xa8\xfa\x23\xb4\x0a\xf6\xd6\x22\x22\xcf\x81\x65\xd1\xd2\x85\xb4\xb1\xad\x9f\x27\x45\xe2\xa8\x9f\x1b\x1e\x33\xb0\x2b\x90\x47\xc3\x2c\xa7\xd2\xa9\x46\x67\xe4\x1d\x3c\x69\xb9\xf3\x74\x93\xba\x2a\x35\xd2\x42\x75\x43\x5d\x53\x9b\xa6\x08\x58\x70\xfb\x4f\x40\xeb\x28\x7a\xa4\xa2\x18\x32\xf8\xe2\x88\xae\x99\x1d\x1b\xa5\xc2\x0f\x06\x68\xc4\x9b\xef\xd8\x5f\x42\x98\x29\x7d\x9e\x8d\x83\x62\x7e\xd1\x7e\x46\xda\x4c\x9d\xeb\xa1\x2c\xf9\xc4\xb4\x07\x51\x5b\x82\x2b\xa3\xd4\xcd\xcf\xd6\xd1\xd0\xb9\x19\x66\xc0\xe4\x48\x2d\xeb\xfa\x61\x4c\xf8\xad\xc8\xe9\x2a\x7c\x8a\xb8\xbf\x4b\xa4\xc9\x76\xf1\x02\x22\x5e\xf3\xe3\x36\x3a\x2f\xa6\xd7\x45\x2f\x83\x20\x01\xb0\xdd\x1f\x39\x3c\x73\xdd\xe1\x23\x47\xae\x81\xcd\x45\xf5\x4b\xb3\x1b\x6c\xa5\x47\x83\xa1\xc7\x08\x0b\xdb\x98\x69\xce\x5c\x53\x3f\x5f\xfd\x7a\x21\x09\x74\xb7\x29\xbd\xfa\x5d\xce\x53\xd8\x93\x77\x15\x77\x6f\xfa\x1e\xfe\xe7\x18\x7b\x64\x6a\x74\x2d\x6c\xe6\x5f\x1e\xf3\xee\x1d\x5b\x2c\x5d\xb5\x42\xe7\xac\x49\x40\xd6\xf9\xb0\xd8\xed\xa8\x53\x31\x1c\xa2\x4e\x53\xab\xc4\xd5\x36\xb4\xe1\x13\xe9\x89\x55\x74\x11\xa2\x67\x20\x21\x49\x91\x1e\xf0\x36\xce\x04\xb5\xf3\x9a\x06\xeb\xb4\x1f\xdb\x60\xbe\x86\xdf\xcb\x8b\xc4\xaa\xdf\x1e\x78\xf6\x77\x4c\x33\x95\xaa\x2f\x2a\xff\x52\x89\x96\x72\x0c\xbf\xa7\x02\xc8\xb1\x44\x5e\x0c\x01\x0e\x9d\x06\xef\x54\x36\x59\x85\x72\xb9\x86\x78\xe1\xbb\x98\x84\x54\x52\x03\xaa\xa1\x27\x1f\x18\xbc\x5a\x22\x37\x62\xee\xaa\x66\xde\x55\x96\x98\xbd\x93\x5d\x94\x27\x91\x40\x2d\x26\x73\x5b\xde\x0f\x8d\xda\xc8\x8d\xc7\xac\xf3\xdc\x34\x6b\xec\x00\xb0\x7c\x59\xbf\xcd\xbe\x3b\xc6\xfe\x6d\x3e\x82\x18\x3f\x73\xff\x64\xcc\xfb\x8c\x33\x8a\x37\x3f\x33\xa8\x99\xd6\x92\x4d\x33\x17\x19\x7c\xdc\x49\x7f\x75\x2d\x37\xe8\xf4\x29\x07\xa4\xc4\x02\x9f\x35\x35\x9d\x31\x89\x42\xed\x40\x55\x84\xea\x3e\xdf\xf4\x07\x34\xf6\x7e\x0b\x48\xc4\x0a\x8c\x58\x55\x19\x40\x27\xe4\x23\x9b\x02\x7b\xfc\xec\xa9\xb9\x92\xe1\xe5\xc9\xec\x7a\x76\xdd\x0e\xfa\xab\xba\xd4\xfb\x9c\x9b\xd8\xf7\x59\x27\x10\x79\x10\x6d\x74\xfd\x9e\x3c\xa8\x64\xee\x0f\xb9\x5e\x83\x1b\x8c\xdc\xac\xc1\x87\x58\xa7\xd9\x7d\x0e\x67\xff\x66\xb8\x10\x88\x55\xde\xef\x8e\x77\xfd\x1e\xbb\xcf\x69\x6e\x73\x26\xba\xd4\xbd\xd8\xfc\xce\x79\x1c\x8d\xde\x5d\x63\x07\x08\x1c\xc1\xfd\x85\x9a\xf7\x12\x6d\x32\x53\x88\x09\x88\x4f\x80\xc1\xa2\x14\xa1\x01\x2e\xc1\xed\x2c\x6a\xe7\x79\xec\x21\x36\xc7\x69\xfa\xac\x39\x48\x4f\x64\x53\xec\xd0\xb6\x83\x84\xf8\x13\xe7\x73\x7c\x3c\xb2\x75\x1f\x3d\xca\xbd\x0c\xde\xaa\xa7\x42\xb6\xf4\xe9\x62\x90\x31\xf6\x25\x87\xb9\xb8\x0f\xd0\x69\xe9\x44\x18\xaf\x67\xee\x6f\x3b\xde\xfb\x9c\xe3\x43\xd7\x2b\x82\x62\xcc\x6c\x15\xbf\x95\xf4\x73\x65\xdc\xce\x0c\x39\x6c\x47\x72\x91\xf1\x28\xde\x08\xd3\x24\xee\x62\xea\x55\x0a\x93\x27\xab\x17\xe1\xe3\x78\x54\x89\x73\xff\xac\x1c\x3d\x34\x0c\xf0\x48\xd6\x62\x54\x4c\xc4\x30\x71\xe6\x4f\x4c\xb0\x4b\x43\x13\x9d\x22\x73\xff\xe1\x80\xf7\xe7\xfb\xd5\x9c\x40\xab\x9c\x4e\xca\xdb\x76\x0e\xcc\xc7\x61\x3e\x04\xb9\x79\x56\xb4\xfb\xa4\x87\xa0\x59\x0d\x7c\xc2\xf2\x3d\xab\x3c\x48\xb6\x91\xe7\x31\x02\x73\xf5\xe3\x01\x7c\xdf\xb0\xb8\xc9\x53\x97\xb1\xd3\xd8\x8c\xe3\x39\xa9\xcd\xc6\xd1\x0c\x2c\xca\x52\xde\x07\x10\x91\x94\x06\x54\x5f\x0a\x96\x28\xcc\x03\xa8\xba\x69\x43\x38\x10\x95\x5b\x5f\x86\x28\x39\x08\xc6\x2e\x81\xea\xcb\xad\x2a\x0e\xef\xec\x0b\xee\x77\xa5\x7a\x3e\x84\x6d\x5b\xea\x11\x79\x2e\xd5\x96\xef\x13\xea\x9c\xaf\x8c\x9e\x75\x40\x01\x0b\x63\x91\x65\xbc\x97\x26\x2d\x39\xe0\x27\xa4\xae\x6c\x5e\x48\x52\x3c\xb7\xf6\x7b\x74\x09\x6b\xaf\x76\x68\x03\xf8\x33\xa3\x10\xe8\x52\x63\x20\xd4\xc0\xc7\x18\x2e\xa9\x44\xd0\xf9\x3f\xe8\x43\x44\xad\x71\x66\x6c\x0d\x20\x3e\xae\x1c\x49\x40\x9a\xea\x34\x44\xdf\x17\x20\xf0\x45\xb8\xed\xa0\x27\xb4\x83\x3b\x36\xc2\xd5\xbb\x38\x57\xc1\xfc\xe0\xeb\x84\x42\x02\xd3\xe8\x77\x35\x0d\x63\xa9\xa7\xb3\x26\x3f\x01\x81\xfe\x50\x73\x03\x48\x38\x2c\x75\x6e\x88\xe9\x74\x98\x24\xd8\xf1\xb3\xb5\x30\x89\x87\x87\x60\x67\xf6\xfe\x8a\x90\xde\xdd\x19\x76\x64\x65\x1b\x86\xa5\xed\xe1\xe6\x0f\xf8\x56\x8d\x5d\x9a\xaa\xd9\x77\x8b\x94\x61\xee\x9f\xd4\xbc\xd7\x97\x22\x5f\x30\xa3\x53\xcd\xd1\x55\x14\x75\xa4\xb3\x09\x39\xa0\xbe\x72\x61\xca\x15\xaa\x9f\x6c\xf2\x59\xb5\x64\xf1\x10\x03\xc6\x7a\x59\x98\x1c\xd6\x92\xc4\xc0\x27\x66\x4f\xcd\xa9\xd5\x84\x3c\xb6\xa5\x44\x6e\x3c\x7d\xda\x35\xc1\x54\x0b\x4c\x1f\x47\xce\xce\x3c\xe1\xde\x72\xda\x17\xde\xfd\x53\xf8\xf0\x8c\x3d\x7d\xf8\xf0\xe1\x6b\xe1\xd0\x01\x55\x7b\xfc\x91\x9b\x1e\x7f\xe4\xa6\x66\xd7\xb2\x80\x5e\xcd\xae\x64\x33\x3b\x71\x50\x2f\x9a\x7d\xcc\xde\xe2\xb0\x4b\x74\x04\x18\xe8\xfc\xaf\x74\xbc\x17\x39\x95\x7a\x60\x11\x43\x13\x66\x30\x8c\x74\x66\xd1\xbd\xa2\x0b\xda\x2a\x22\xa7\xba\x10\x0d\xd4\xaf\x8b\x30\x15\xd5\x7b\x1d\x76\x30\xeb\xb7\x50\x83\x73\x3f\xea\x78\xff\xa5\xa2\x86\x10\xc4\xc1\x65\xaf\x63\x6d\xe4\xd9\x76\xc8\xe9\xea\xdd\xa0\x2e\xdd\xd8\xbc\x41\x17\x79\x63\xf3\x06\x08\x0f\x51\x26\xa2\x1b\x9b\xd9\x46\xbb\x79\x83\xca\xd9\xa4\x87\xbc\x6d\x9b\xa5\x25\xab\x4f\xef\xc0\xb7\x7d\xc8\x27\xb5\x1a\xf4\x8f\x07\x59\xc3\x18\x1e\x34\x95\x68\xb8\x91\x85\x24\x98\x0b\xb3\xb4\x0f\xbb\xe8\x4d\xfd\x60\x55\xe4\xee\xc7\x0f\x7a\x6b\x15\xd7\x95\x67\x02\x4f\x5e\x00\xc1\xdb\x51\xf8\xbb\x52\xdc\x05\xfa\x79\x4b\xb1\x6a\xfb\xea\xe8\x55\x46\xea\x93\xb2\xc2\x8a\x1d\xff\xfa\x81\xbd\x1c\x88\x5d\xe4\x40\x3c\xdd\x48\x81\x78\xca\x2e\xa1\x85\xd8\x4f\x29\x08\x9f\x17\x6f\x95\xcb\xbb\xfd\x6c\x02\x18\x9f\xd9\xa5\x9d\xe2\xf6\x90\x05\xa2\x5c\x4a\x93\xfd\x78\x81\xa9\xf3\xa3\x5b\x64\x9f\xee\xa0\x42\x08\xab\x73\xdd\xc9\x04\xf6\xf7\x36\x6e\x8c\x49\x8b\x98\x6d\x4d\x10\xd4\x11\x55\xd9\xcb\xdf\xda\x2d\x88\x68\xb0\x7d\x6a\xcb\xac\xfb\x94\x06\xa5\xb3\x54\x0c\x42\x15\x2e\x8b\x46\x19\xc5\x39\xc0\xd8\x97\xc6\xd9\x75\x66\x36\xa7\xb2\x50\x52\xe0\x87\x62\x24\xc6\x99\x42\x67\x1b\xed\xac\x73\xdf\x38\xee\x5d\x5f\xbe\x48\x41\xdc\x7e\xe1\x8d\x05\xd7\x0a\x3e\xd5\x44\x2b\x0f\x75\xdd\x39\xe7\xa0\x96\xf1\x04\x4b\x62\xc9\xba\x0f\x8f\xb1\x8f\x3a\x6c\xbc\x97\xa4\xb9\xfb\x7e\x47\x65\x79\xbc\xb9\x72\x3b\x4c\x35\x5c\xbf\xca\x3a\x00\x01\x2b\xb7\x17\x29\x01\x37\x45\x6b\x2d\x49\xd6\x2d\x0f\xdd\x55\x57\x5d\x09\xaa\x49\xcb\x6f\xaf\x6f\xfa\x69\x80\xe9\xdd\x79\x88\x54\xde\x4d\xbe\x22\x4b\x5d\x31\xf3\x15\x28\x4b\x06\xbe\x16\xf7\xbb\x2d\x91\xf2\xc9\x99\xc6\x35\x57\x5f\x7d\xe5\xd5\x75\x1e\xc6\x72\x83\x0a\x37\xc4\x94\xed\xc7\x79\x2a\x61\xa8\x3c\xc9\x7b\xe2\x8a\xfc\x63\xa5\xca\xf9\x46\x95\x6e\x72\xd2\xd7\xad\xb4\xdc\xd3\xac\xe8\x27\xf7\x26\xef\xea\x15\xfd\xcb\x2a\x8b\x52\xf9\x76\x50\x60\x87\x8d\xf7\xfc\x7c\xcd\xfd\x11\xef\x19\x2b\xf2\x8f\x95\xb2\x47\xfd\xcc\xe2\x09\xe0\xab\x21\xb3\x4b\x61\x1a\x46\xca\x33\x79\x18\x33\x62\xde\x35\x6c\x7a\xd8\xb6\xb3\x05\xfe\xe1\x12\x76\xc3\x8e\x67\xd7\xed\x38\x44\x84\x69\x8f\x61\x7e\xef\xbb\xc4\xbb\xad\xe2\xba\x52\x11\xb1\xed\xe6\x99\x1a\x9c\x72\xeb\x72\xa8\x96\x4f\x40\x30\x5e\x4c\x7b\xa9\xf6\xb7\xd0\x4c\xb0\x26\xda\xbd\x17\xb3\x8f\x3b\x6c\xa2\xed\xdf\xd4\x97\xe7\x43\xf7\x03\x8e\x3b\xde\x1a\xe4\xc2\x7b\xab\xb3\xa2\x2e\x62\x1f\xf1\x85\xe3\x27\xb9\x88\xdb\x89\x3c\x20\x1c\x9b\xe5\x2d\xb8\x57\xea\x26\xb5\x99\x5b\x84\x69\xf4\xdd\x43\x0a\x60\x9e\xb7\x45\x9a\xa3\xbc\x17\xc3\xa4\x27\xe4\x75\xcc\x53\xa0\x6a\x4b\x92\x3c\x53\xd3\x5b\x23\x32\x81\x72\x0c\x64\x27\x66\x97\xbf\xbc\xc6\x0e\xd0\x50\xb8\x77\xd7\xd8\x2d\xa3\x13\xb7\xcf\x6b\xa9\x7b\xef\x75\x56\xa8\xd8\x15\x95\xf3\x60\x2c\x6f\x73\xdd\xe9\x80\x45\xbd\xe6\x8e\x87\x60\xfb\x28\x0a\x48\x52\xbe\xd2\x4f\xa3\x95\x8a\xc8\x43\xc6\xc8\x39\x43\x6f\xc3\x19\x81\x7c\x46\x86\x3b\x9e\x74\x41\xc2\xc7\x1c\x24\x7d\x03\xf4\xbf\xf8\x50\x93\x7d\x7b\x82\x8d\xf5\xd3\xc8\xfd\xe6\x84\xf7\xd5\x09\xfc\xe6\x6a\xb8\x41\x3b\x0f\x10\x9a\x19\x9b\x2d\x7d\x12\x62\x2e\x34\x44\xa0\x5c\x0a\x10\x98\x34\xb9\x02\x3b\x8a\x38\x3a\x3d\x2d\xa5\x0a\x80\xf7\x4f\xc3\xea\x99\x6a\xf2\xe3\x67\x7d\xe0\x5a\x21\xa0\x00\xfc\x52\x62\xb6\xb9\xaa\xa9\xf2\xa0\xbe\x22\x0b\xd3\x52\x06\xe8\xf2\x44\x87\xe2\x89\x74\x9f\x96\x02\x7c\xa9\xf5\xd7\xeb\x00\xa3\xe2\x33\x0a\x46\xc0\x88\x2c\x96\x1f\xe0\xdd\x70\x75\x0d\x8d\xf0\x22\x4b\x22\xc5\x4f\xaf\x11\x5b\xe6\x4e\x2d\x41\xab\x93\xae\x31\xc3\x32\xcc\x77\xaf\xf3\x15\xb9\x0f\x15\x7c\xfe\x2b\xea\xd0\x4c\x65\xf1\x30\x56\x29\x0c\x50\x90\xaf\x82\x4c\x0b\xc1\x19\xf9\x03\x81\xda\x69\x98\x20\x6b\xcc\x54\x53\x35\xbd\x6b\xb0\x24\xf9\x31\x9f\x5f\x50\x94\x0e\x4d\xc6\x16\x22\xe1\x67\x42\x76\x8b\x50\xa6\x7c\x59\xca\x8a\x1c\xba\x08\x5f\x97\xbd\x3c\x73\xe4\xda\xe6\xe1\xe6\xe1\xe6\xcc\x0a\xc2\x06\x51\xc9\x72\xee\x84\xd9\xfa\x80\xf7\x63\x40\xf9\x91\xf3\x04\xf8\x14\x57\x53\x01\xda\x77\x6a\x07\x38\xa8\x39\x97\xe0\x61\x14\x33\x27\x71\x6d\x03\x75\x4d\x6c\xac\x3e\xca\x68\x81\x6e\x8d\x05\x2e\x78\x10\x3d\x6d\x3f\x8a\x32\x2d\x16\xf5\x22\x00\x90\x00\x39\x2e\x70\x5b\x7e\x39\x0a\xd7\x05\xba\x61\x29\x74\x5d\xce\x28\xbf\x05\x41\x3f\x4d\xd1\xac\xc3\x64\x10\x7e\x06\x8f\xe4\xfd\x34\xe6\xfd\x1e\x1a\x35\x62\xb1\xa9\xe6\x00\xcd\x22\x9c\x9a\x7a\x92\x79\xa0\xa7\x78\xd7\xc3\xe4\x90\x33\x98\x6e\xac\x86\x24\x09\x3d\xa5\xc8\x78\x4d\xc6\x66\x51\xd6\xeb\xa0\x61\x3f\x22\x8f\x58\x47\x23\x58\xa9\x00\x8a\x78\xa0\x8e\x19\x3d\x91\x82\x00\x69\x45\x02\x2b\x75\x66\xf1\x44\x93\x3f\x33\xe9\xc3\xb3\x3a\xf2\x4d\x16\x9c\x27\x10\x0f\x05\xfd\x97\xb6\xc2\x3c\xf5\x53\x5d\x0c\x09\x0f\xbd\xf4\x2c\x18\x17\x5f\xe3\xf4\x68\xec\x51\xd9\xe0\xd9\x3c\x57\xe9\x74\x14\xa5\xe2\x23\x37\x17\x6c\xe6\x59\xd8\xe6\x7e\x3f\x5f\xe3\x80\xd5\xe0\xc9\x1b\x47\xe5\xe7\x37\x93\x34\x78\xaa\xa7\xc1\x87\xa2\x28\xd9\x94\xa7\x92\x9b\x53\x7f\x15\xad\x63\x93\xde\xe5\xcd\x66\xd3\x43\x6e\xca\x3b\xfb\x22\x1d\x98\x2e\xfd\x49\xef\x29\x74\x37\x15\x66\x09\x75\x2e\x40\xbc\x59\x52\xf8\xbf\x8f\xb3\xc7\x6e\x11\xa2\xe1\xde\x33\xee\xcd\xa8\x1f\x28\x4e\xb5\xf3\x44\xc5\x7c\x98\x96\xb7\xb4\x1f\x89\xcc\x06\x4a\xfd\xc7\x31\xb9\x65\x3d\xa2\x97\x04\xb3\x71\x1e\xea\x82\xdf\xb3\x13\xae\xd6\x05\xfb\x25\xef\x05\x73\xda\x93\x03\xa9\x03\x71\x1e\x36\x46\x56\x83\x30\x30\xfc\x8d\x44\x6a\x42\x7d\x23\x97\xd5\x00\x8c\xc1\x60\x3b\x48\x4e\xbb\x2b\x89\x45\x9d\x8b\xbc\xdd\x04\xf0\x3d\x29\x60\x30\x1d\xa6\x97\x04\x93\xd9\xd4\x54\x93\x6d\x60\x16\x9a\x6e\x43\x67\x07\xa1\xfd\xa7\x8c\x17\xbc\xab\x8b\xfa\x43\x4c\xcf\xe8\xba\x9b\x01\x22\xec\xbd\x0e\xbb\x48\xf6\x9f\xfa\xee\x5b\x1d\x56\xdf\x51\xdf\xa9\xef\xe6\xa5\x7e\xdb\xa6\xcb\xda\x49\x83\x28\x34\x77\xd5\x5d\x7f\x3a\x66\x11\xe1\x92\xe7\x04\x1d\x55\xeb\x22\x5e\x44\xbd\xcc\xfd\xaf\x63\xde\x0d\xe6\x85\x42\x67\x2a\x67\x3d\x54\xa6\xe9\x36\xcf\x39\x13\x7e\x3f\x08\xe5\x0e\x6f\x4d\xbc\xaf\xd5\xd8\xab\x1c\xa6\xef\xb9\x2f\x71\xbc\xc1\x2c\xfd\x50\xda\xa8\x54\x7d\x63\xa0\xb3\x54\x37\x68\x87\xcd\x29\x57\x98\x7b\x66\xd5\xe0\xfc\x5b\xa0\xb4\x05\x85\x8a\x0e\x0a\x0f\x95\x01\xb5\x95\x1b\x51\x21\x84\xad\x35\xf7\x7b\x0e\xbb\x4c\x9c\xed\x85\xa8\xc9\xa8\x50\xb1\x5f\xd3\xa1\x62\x6f\x74\x8e\x97\xef\x0e\xe5\xd9\x24\x1d\x54\xda\x8c\xcc\xef\xed\xaa\xbc\x46\xd9\x02\x30\x86\x56\x1a\x29\x20\x06\x0d\x7d\xd3\x1b\x51\x96\xcd\x63\x38\xce\x7e\xb8\x18\x63\xb9\x4b\xc7\x19\x60\x59\x15\x4d\xd7\x86\x09\xeb\x01\x30\x0c\xf7\xb3\x3c\xe9\x2a\xec\x9d\xa5\x22\xc2\x98\x30\x36\x3f\x37\xe6\x7d\x7c\x6c\x9b\x87\xf4\x09\x5d\xc1\xd4\xc9\x8f\x62\x3b\xf1\xbe\x11\xb8\x0c\xab\xca\x2e\x2f\x6b\x72\x05\xdb\x99\x59\x38\x8b\xe4\xb9\x5d\x69\x2a\xbc\xde\xa7\x2d\x9d\x3e\x45\xdb\x4f\xac\xf0\xd2\xfc\x52\x69\x94\x8b\x07\x79\x2c\x57\x70\x71\xb6\x97\x40\x8e\x12\x9f\x1e\x51\x17\x50\x91\xa0\x84\xc2\xe0\x75\x05\x5f\x38\xb3\x6c\x9a\xa8\xe0\xa9\xaa\x12\x72\x3c\x3f\x94\x0b\xc0\xf9\x4f\x1b\xe3\x6a\x9c\xa4\x82\xb8\xcd\x30\xf4\x22\x1e\xe4\xe0\x87\x13\x67\xdb\xa2\x97\x5b\x3d\x95\xfb\xf1\x5d\x3e\x56\x60\x7a\xe1\xf4\xd2\xf2\xf4\xc2\xec\xf2\xb1\x5b\x87\xea\x52\xfe\xe0\xf0\x47\x86\x0a\x35\x23\x46\xbe\x5e\x63\x87\xaa\xbd\x04\x9d\x70\xf5\xa4\xdf\x7b\xba\xd0\x8c\x70\xee\x3d\x35\xef\x07\xf1\x87\xec\xc6\x75\x31\x40\x03\x9f\xcf\xf5\xd3\x44\xd1\x66\xad\xfc\x17\xd5\xd8\x13\x90\x99\xed\x07\x3d\x77\x19\xb2\x53\x30\x1d\x0e\x4a\xb2\xb3\xe5\x1d\x3a\xf4\x3e\xdf\x4b\x4e\x59\x60\xa1\x70\x68\x78\x40\xc0\x65\xa6\x2f\x87\x7f\xcc\x6a\x2c\xb2\x09\xa5\xd1\xb8\x37\x7b\x4f\x5a\xa2\x70\xb8\x4d\x4d\xc7\x27\x8a\x16\x4b\xe5\x21\xcc\x33\x68\x54\x29\x07\xd6\x72\x92\xfe\xd9\x45\x16\x45\xd7\x10\x44\x8d\xfb\xb1\x8b\xbc\x93\x45\xca\x25\x31\xfb\x82\x7d\xb6\x9d\xf4\x70\x82\xc2\x6d\x4c\xe6\x4a\x3a\xbc\xdb\x8f\xf2\xb0\x17\x19\x87\xf8\xcc\xd4\xc5\xec\x7d\xff\x67\xd8\x9e\x6d\x6d\x0f\x4c\x7a\x8f\xf9\xec\x82\x62\xf1\x7f\x41\x19\xf2\x7f\x7f\x2b\xe6\xb3\xa1\xa5\x0e\x1b\xf7\x39\xc8\x2b\xb5\x56\x5d\xd9\x58\x7f\xaa\x88\x75\x7e\x90\x71\xe7\xbf\x56\x78\x04\xbe\xb8\x93\x93\x41\xd1\x34\xf4\x00\xfc\xa2\xa3\xd5\x01\xa5\xe9\xe6\x23\xd0\xf5\x1f\xb2\x46\xde\xe7\x3c\x63\x7b\x53\x79\xd3\xad\x6f\x89\x02\x55\x46\x80\xfa\xe8\xc5\xec\x72\xa3\x7f\xd2\x96\xdf\x2e\xa8\x8a\xf0\x4c\xba\x98\x44\xc2\x7d\xed\xc5\xde\x3f\x3b\xc6\x05\x62\xdb\xa2\x53\x2b\x01\xae\x44\xc9\x6a\xd8\xf6\x23\xfc\x3e\xc0\xfd\x75\x08\x1d\x66\x91\x80\x3d\x0a\x57\xa0\x36\xab\x11\x26\xb5\xec\x29\xa4\xdb\x90\xc5\xdf\x44\xf1\x17\x52\xe1\x2a\x3e\x4a\x57\xcd\xc4\x59\xa9\x64\x6e\xcc\x34\x67\xae\x95\x7f\x74\xfc\x0d\x9c\x8c\xd0\x0c\x79\x36\x4e\x52\x0a\xe2\x51\x83\xb2\x31\x63\x16\x88\xa2\x9b\x7c\xa8\x2a\xab\xbb\x45\x8c\x9b\xaa\xec\x23\x47\xec\x9d\xe9\xf5\x07\xf7\x76\xa6\xbd\x9d\xe9\xa1\xdb\x99\xd6\x8c\x8d\xe9\x39\xbb\xdd\x97\x1e\xbb\xc5\xb6\xc4\xee\x76\xd8\x3e\x38\xd8\xbb\x9b\xde\x55\xb8\x84\xc9\xe9\x15\x45\xe4\x12\x2d\x16\xb7\x36\x87\x1b\xeb\xcb\x8c\x14\xb9\x8a\x1d\xd9\x42\x30\x5b\x82\xa7\x28\x96\xfd\x4a\x8d\x3d\xc2\x5f\x5d\x4d\x01\xec\x22\x89\xe5\x35\xf7\x5c\x6d\x8b\xa4\x3b\xab\xa0\x59\xfb\x4d\xef\x4f\x9c\xd2\x95\xb2\x3f\x88\x00\xd2\xa4\x9c\xb2\xa3\x7e\xf3\x04\xc0\x4e\x03\xca\xa7\x18\xd5\x60\x70\x70\x54\x7c\x43\x41\x13\xc4\xc6\xfb\x7e\x2a\xcc\xfc\x08\xe4\xf3\xc2\x08\x3e\xa4\x76\x32\x8f\x44\xf8\x8a\xf6\x4f\xe5\x49\xb7\x57\x9c\x32\x8d\x94\x8f\xfb\x9c\x64\xfb\x4d\xe2\x84\xfb\x34\xbd\x49\x8c\x14\x95\x05\x5f\x5a\xd1\xbc\x4a\x0a\x8c\xcf\x1e\xb4\x0e\x0a\xd6\x00\xc0\xde\xf1\x9e\x83\xde\x67\x9d\x62\xd3\xd0\xda\x7f\xf0\x00\xec\x18\xbb\xdc\x1b\xee\xf7\xa6\xf0\xb5\xfd\x6c\xa0\x16\x4b\xcf\x6b\xec\x78\xb1\x5c\x98\x55\xb2\xb7\x1f\xed\xed\x47\xff\xf7\xef\x47\xf7\x39\x6b\xdb\xcb\xb6\xe3\xee\xb1\xc6\x16\xf1\x20\xdb\x8b\x3b\x90\x73\xec\xe7\xf7\x6f\xcb\xeb\x53\x22\x87\xc0\xd3\xc7\x37\xf6\x79\x71\xd5\x8d\xed\x69\x7d\x6c\xfe\x88\x9d\xf3\x46\x9c\x73\x18\xbe\x73\x0a\xd8\x6e\x2e\xa6\xa2\x87\x69\x6e\x3e\x35\xce\xde\xe9\x54\x13\xa9\xfc\xcc\xf9\x80\xe4\x56\x10\xa9\x3c\xe3\x7c\x78\x54\x14\x57\x8a\xa2\x20\x48\xc0\xc7\x4f\x1d\x2c\x02\xc4\x61\x65\xaf\x70\x98\xd5\x14\xf7\x47\x77\x57\xc5\xa7\x98\x85\x6d\xc9\xf1\xa2\xd0\xd8\x61\x93\xb9\x93\xde\x9f\x62\x01\x33\xfa\xd9\xbd\xcd\x9b\x2f\x7e\x95\x03\x5d\x34\xb9\x03\x04\x48\x40\x68\xbd\xc1\xe8\x11\xc6\x1a\x83\x1f\x39\xb0\xcd\xc5\xf4\x0e\x87\x5d\x8a\x77\xb5\x11\xf3\xd5\x0e\x7b\xea\xfd\x41\x30\xf6\x5b\x22\x52\x85\x78\x0b\x76\xa1\xb2\xc6\x2a\x7a\x83\x5c\x8e\x03\x13\xbb\x11\x92\xc6\x32\x91\x2a\x8c\x3d\x70\x7c\x62\xfa\x2a\x16\xd4\x64\xff\x34\x71\xbe\xcc\x57\x88\xb2\xff\xd1\x09\xef\x13\x63\x55\x77\x8c\x25\x62\x71\xc6\x00\xcc\xe2\xc3\x8e\xf4\xea\x42\x91\x6a\x3c\x6c\xf9\xb3\x38\x3f\x1d\x53\xa0\x87\x87\x55\x25\x32\x06\x8b\x80\xa7\x59\x15\xcf\xf6\x95\xf1\x0b\x42\xa8\xf5\xf7\x0e\xfb\x5e\x8b\x4d\xca\xe4\xd3\xfa\x9c\x8e\x93\xfb\x90\x33\xea\x21\x4d\x7d\x61\xd0\x97\x3c\xec\xd8\xb4\x2c\x7f\xd8\x5b\x6a\xcc\x1d\x26\xd0\x72\x5f\xbe\x4b\xa2\xab\x4f\x39\xc3\x85\x3e\x20\x9d\xf3\xa0\xd0\x5c\xb1\x2f\x1e\xb4\x82\x39\x8d\x00\xb3\xac\x30\x5e\x15\x17\x97\xc2\xd5\x38\x8c\x57\x95\xc3\xf8\x75\x07\xbd\x43\x85\x43\xdb\x37\xe3\xd3\x00\xef\xcf\x80\xa5\xb3\x66\xf5\x37\x0e\x5c\xd8\xa8\xea\xd7\x28\x63\xec\x4f\x39\xec\xe9\xa3\x2d\x96\xe7\xd5\x38\xb0\xd4\xde\x28\x65\x93\xd9\x2a\x15\xc2\x18\xe6\x99\x88\x3a\xc8\x51\x1b\x0f\xb8\x1f\x60\x86\x89\x1f\x99\xa1\x85\x4d\xf6\x63\x85\x31\x75\x93\x9d\xbc\x50\x35\x43\x43\xeb\x13\xe6\x44\x1a\xe2\x31\xaa\x9c\x20\x68\xcc\x86\xbd\xb8\xea\xbd\x13\xcd\x43\x49\x24\xba\xb9\xfd\x39\x63\xd9\x5d\xd4\x36\x14\x6b\x25\x94\xad\x27\xa3\x16\x44\xa5\x2d\xe5\xd5\x8c\xfd\x40\x75\x30\xce\xb2\xe8\xf6\xe4\x14\x74\xff\xe7\x41\x6f\xde\xf8\x6d\xd8\xa9\x7c\x9e\xab\x8b\x72\x87\xd1\x98\x70\xed\xa4\x17\x0a\x85\xfc\x99\x0a\x05\x73\xd1\x4b\x02\xdb\x86\xf1\x9b\x13\x7b\xb3\x66\xcf\x63\x68\x7b\x0c\xbf\xe9\xb0\x09\x35\xab\xdc\x3f\xde\x61\xd0\x9d\x9a\x9b\xb0\x1b\xbd\xcb\x31\xa6\x6a\x21\xb5\x01\xf5\xc0\x82\x68\x6d\x13\x60\x0a\x4d\x08\x8a\x1c\x53\x1f\x6f\xee\xf1\x57\xff\x2b\xd8\x76\xee\x73\x96\xb6\x17\x9d\x87\xdd\x66\x95\x8f\xd2\x98\x38\x65\x2f\xe5\x57\x0f\xb2\x1f\xaa\x98\x6a\xea\x14\xf9\x8c\x7e\x92\xfb\x40\xc8\xf5\xbe\x83\xde\x95\x43\x57\x6d\x52\x2e\xeb\x36\x07\xea\xa8\x6a\x76\xae\x9f\xda\x63\xe7\xda\xb5\x16\xf2\x41\xcd\xce\xf5\x1e\xc7\xeb\x69\x72\xae\x51\xa3\x41\x71\x4e\xe7\x1d\x35\x85\x59\x5e\xd3\xaa\x0a\x8d\x3b\x65\x69\x76\xae\xf9\x11\x76\x78\x07\x21\x0f\x56\x75\xf6\x76\xc3\x3d\x12\x2e\x93\x84\xeb\x99\xdb\x8b\xb6\x6b\xdc\xab\xaa\x44\xdb\x90\x48\x2a\x0b\xb8\x2f\x4f\x54\xea\x7d\x1a\x64\x25\xcb\xdd\x7b\x26\xbc\xc7\x1b\xbf\x75\xfa\xa1\x5a\x49\x0a\x5f\xa5\x5a\x98\xbd\xed\x00\x5b\x56\x4b\xf1\xe9\xde\x23\x4f\x94\x5e\x32\x57\x4a\x9d\x5d\xc1\x26\xb7\x5d\x29\x54\x95\xbd\x35\xb2\xb7\x46\x8a\xc6\xed\x6d\x98\x0f\xa1\xfe\x64\x08\x87\xb2\x78\xf9\xd5\xfd\x16\x95\x7b\xc1\xbf\x20\x67\x10\xf1\x5e\x03\x10\x1d\xd0\xfe\xbd\x70\xbf\x37\x57\xbe\x58\xc4\xf7\x56\x1b\x6e\x10\x9d\x0e\xc7\x8a\xde\xb5\x0f\x97\xbf\xbc\x8f\xbd\x7d\x0c\x91\xeb\x31\x54\xc1\xfd\xb9\x31\xef\x27\xc6\x0c\x4a\xb9\x02\xdf\xaf\x6c\x29\x36\xa2\x24\xe0\xc4\x40\x83\x8f\x20\x47\x38\x69\xda\x06\x9a\x26\xb1\xa8\x03\xdd\x55\xd8\xc1\x40\x6c\xee\x75\x22\x7f\x23\x49\x33\x0f\x8b\xb0\x82\x2f\x74\x20\x05\xe4\x36\x98\x00\x9a\x16\x42\xf5\x80\x72\xbe\xc0\x04\x5f\x94\x6c\x60\xfe\x2b\xc3\x2e\x94\xa3\xe1\xaf\x09\x87\x56\x17\x0a\x95\x34\xac\xe8\xda\x65\x01\x26\x52\x44\xf0\x68\xf4\x52\xd1\x09\xcf\xca\x63\xba\x9f\xaf\xf1\x38\xe1\x5d\x04\xea\xf5\x63\x7e\xe4\xea\xc3\xbc\xbd\xe6\xa7\x7e\x3b\x27\xf8\x1f\x24\x74\xaf\x53\x26\x98\xdf\xee\x0a\x39\x43\x43\x1c\x87\x46\xf1\x6d\xaf\x8c\xbd\x1e\x76\xbb\x7d\x48\xc5\xb3\x0c\xef\x3f\x56\x63\xac\x48\x9e\x71\xff\x97\xc3\x6e\x1e\xb9\x1f\x8c\x9e\x4b\x45\xbf\x14\xd9\xb5\xef\x70\x8c\xde\x22\x45\x30\x5e\x27\x40\x90\x52\x5e\x00\xe5\xf1\x00\x88\x6a\x61\xa9\xb4\x79\x47\x74\x16\x44\xb9\x7b\x8d\xb8\x72\xe2\x5a\x32\x67\x91\x66\x62\x20\xca\x30\x2e\xce\xe6\xa9\x6f\x24\x0c\x35\xd9\xdf\xee\x63\x57\x8f\xc6\x92\x30\xc1\x1b\x97\x08\x54\x08\xd1\xc7\x32\xf7\xd7\xf6\x79\xe1\x16\xf7\x4d\xa8\x14\x85\x47\x64\x1c\x84\x83\xb0\x9d\xab\x54\x6a\x98\xc8\xe0\xbe\x31\xcb\xa3\xf0\x11\x45\xbe\xf9\xbd\x94\x9f\x57\xc6\xcb\xb4\xf7\xff\x37\x8d\xb3\xff\x5a\x63\x23\x9f\x75\xdf\x58\xf3\xee\xae\x8d\xba\xab\xc9\x95\xa3\x64\x53\x2b\xee\x26\x7c\x25\x84\xf4\x60\x23\x28\x81\xb2\x98\xd4\x89\x26\x9a\x9a\x35\xd2\x85\xae\xf0\x88\xa8\x03\x57\x63\x3c\x18\x2e\x90\xab\xa5\x2c\x88\xd0\x40\xf3\xb1\xad\x09\x44\x30\x95\xea\x8c\x4d\xe5\x3b\xab\x90\x31\xa1\x9a\x34\xba\x19\x05\x5d\x95\xf0\x44\x35\x19\x26\x80\x99\x65\x22\xb7\x20\x13\x6d\xba\x34\xf6\x55\x87\x3d\x86\x92\x9e\xca\x05\xb9\x1f\x73\xbc\xf7\x3a\x23\x6e\xea\xcc\x2b\x4a\x98\x1a\xba\x0f\x79\x25\x1a\xd3\x01\xfa\x69\xd9\x78\xfe\xe4\x99\xa5\x65\xf0\x2d\x62\x47\x28\x41\x35\x72\xa4\xa8\x1b\x8a\x9e\x8e\xc3\xa8\x98\xee\x72\xb1\xe7\x1a\xda\xd5\x5a\xf3\x1f\x18\x67\x75\xd3\x9d\xdb\xeb\x41\x8e\xd5\x92\x7c\xa1\xd3\x8f\x96\x44\x7e\x06\x54\x27\x35\x9b\xdd\x97\x8e\x7b\x9f\x76\x46\xde\x2e\xc5\x3b\xd8\x13\x5d\x5e\x31\xde\x1c\x42\x09\x26\x3c\x76\xcd\x94\x01\x25\x67\x90\x85\x06\xe0\x13\x90\x66\x62\x3b\x30\x8c\x5c\xbf\x58\xb4\x45\x96\xf9\x88\x5c\xae\xca\x90\x9f\xc4\x72\xb4\xcc\x50\x35\x0c\x74\xed\xec\x1d\xeb\xed\x35\x76\x8f\xc3\x2e\x91\x15\x0b\xe3\x55\x6c\x9e\xfb\x0e\x67\x0b\xc0\x19\xd5\x69\x8b\xe6\x2b\x46\x43\x55\xe7\x78\x2d\xeb\x09\xd3\x19\x4f\x5a\x05\xb8\x6d\x8c\x36\x01\x6e\xd8\x32\x51\xe1\x6f\x57\xfc\x32\x38\xc9\xd6\xd8\x38\x80\x6b\xae\x78\x4b\xf8\xa2\x35\x20\xe0\xc9\x55\x80\xf8\xa3\x06\xb1\x40\x11\x29\x7f\xd5\x4e\x82\x1a\x67\x4f\xb4\x23\x01\x8c\xe8\x1a\xc8\x61\x8d\xd5\x01\x08\xe2\x37\x3f\x3f\xe6\x9d\x28\x5d\xa3\xd3\x8c\x85\x70\x61\x07\x61\xfa\x90\x62\xaa\x07\x2f\x4e\xe2\x86\xda\x29\xce\x39\xfb\x36\x44\xda\xb2\x05\xde\xdf\xd5\xd8\x27\x1d\xf6\x88\xb8\xf8\xce\x99\xc5\x13\x99\xfb\x7e\xc7\x3b\xe7\x9c\xb2\x2f\x2a\xf2\x47\x90\x6b\x3d\x3f\xcd\x43\x3f\xe2\xfd\x34\x52\xc2\x09\x73\xae\xb5\xd2\xb1\x01\x19\xa4\x22\xc3\xa3\x05\xbf\x82\xf0\xf9\x94\xa4\x6a\xf5\x73\x94\x4e\x94\x2f\xd9\xe9\x47\x51\x9d\x77\x42\x39\x4b\xb3\x5c\xf4\x34\x79\x99\x9f\xaf\x35\xb9\x21\x09\x11\xac\x6b\xa4\x04\x7a\x93\xc3\xb0\x99\xee\xab\x1d\xef\xec\x6d\x22\x6d\xd9\x96\x94\x42\x67\xb4\x3a\x87\xcf\x2e\xcc\x73\x78\xaf\x0e\x89\xf8\x47\xf9\xaa\xc8\xeb\xbc\x97\x64\xf2\xbf\xfd\xbc\x4e\x54\x73\x75\x0e\xb0\x68\x75\xbe\x26\xfc\xa0\xae\x22\x0a\xce\xa7\x82\x2f\xba\xd4\x92\x20\xda\x1e\x2c\x15\xd1\x2c\x17\x71\x8e\xd0\xa5\xc7\x22\x3f\x04\x7a\x5b\xf7\xb3\x97\x78\x3f\xe6\x8c\xbc\x5d\x4e\xc5\x40\x2a\x54\x83\x20\x15\x88\xa1\x20\x27\x98\x07\x02\xa1\x45\xc1\xb3\x89\xda\x9f\xcf\x97\x0a\x67\x3b\x29\xaf\x69\x43\x07\xb8\x14\xe5\x58\xb3\xe6\x23\x17\x4b\x49\x7f\x50\xab\xe6\xee\x67\x9c\x1d\x10\x7c\x2c\x56\x60\x50\x7a\xaf\x71\x74\x92\x68\x19\x3d\xbe\x1b\xc6\x61\xb7\x5f\xa8\x3a\x74\x20\x82\xf6\x9b\xf3\xec\x7e\xa3\xcb\xf6\x74\xa7\x36\x08\x68\xb6\x38\x6d\xb0\x17\x39\x6c\x42\x31\x9d\xba\x9b\x17\x20\xb6\xe8\xca\x59\x24\x37\x25\x2c\x01\x88\xe7\xd2\xb0\xba\x89\x06\x2b\x45\x0c\x22\x0a\xd3\x65\x6f\x75\xd8\x23\xa9\xba\xc5\x4e\xfa\xd3\x8e\xf7\x63\x8e\x99\x47\xb9\x64\x3c\xa1\xb6\xf3\x22\xec\x59\x4e\x95\x0b\xd9\x47\x70\x88\x68\xcc\x98\x93\xfa\x8d\x0e\x63\x78\xfb\x64\x12\x08\xf7\x15\x8e\x77\xb7\x53\xfc\xd6\x07\xda\x4d\xd8\xd6\x48\xa6\xd2\x40\x86\x23\x6b\x7c\x9b\xda\x9c\x6f\x0e\x23\x41\x08\x38\xa0\x8c\x23\xc4\x27\x08\x7b\xb9\x5d\xd3\x66\x07\xf1\x49\xf0\x2a\x32\x35\x99\xf5\x7b\x9e\xaa\x1e\xf4\xdf\x73\xbc\xd3\xb7\xe9\x5f\x4a\xf5\xa0\x2e\x1f\x06\xb2\x29\x2f\x3d\x00\x88\xd2\x28\x07\x58\x57\xf3\x5b\x9f\x74\xd8\x45\x28\xf9\x64\xdb\x33\xf7\x83\x8e\xf7\x26\x67\xb6\xb8\x60\xa3\x15\x29\x44\x37\x92\x95\x5d\x78\xe2\x81\x9f\xe7\xf8\xb9\x06\x7c\x4e\x0e\xe5\x48\x71\xf5\xf9\x71\xc6\x02\x3f\xf7\x29\x2c\xed\xe3\xe3\xec\xe8\xf6\x08\xd9\x83\x9e\x08\xaa\xe8\x7c\xbc\x97\x8c\x1b\x07\x2b\x0a\x58\x57\x7b\xba\x62\xa7\x40\xfc\x8c\xa3\xfc\x0a\x50\x53\x15\x81\x16\xf6\xfd\x52\xec\xf7\xb2\xb5\x24\x57\x66\xb0\xc9\x8c\x2e\x68\xdc\x03\x32\x3b\xd8\x8f\x4f\x95\x0a\x5b\xb8\xed\x18\x9f\xac\x14\xa9\xe5\x27\xcb\xa7\x2d\xa4\x54\x56\x47\xd6\x8c\xcb\xae\x51\xac\xec\x72\x3b\x9e\x9c\x8d\x7a\x6b\xfe\x94\xc5\x12\x26\xf5\xb4\xa1\x72\x06\x3d\x75\x14\xd0\xa5\x95\x0b\x43\x2c\xb3\xd9\x78\x80\xf5\x9b\xd3\xa3\xa0\x51\xe4\x57\xa5\xfe\xa3\xf4\x7d\x62\xa6\xd0\x74\x16\x20\xcd\xc1\x76\x04\x48\x28\xc0\x46\x46\xd6\x1d\x43\x93\x94\x83\x90\xf5\x81\xd0\x14\x55\x50\x7d\x22\x81\xda\x60\x75\xeb\x9a\xbf\x81\xe8\x28\x10\x73\x66\x43\xad\x87\x4c\xb1\x32\xe3\xf1\x51\x81\x0b\x8f\x2c\xaf\xc9\xde\x35\xc1\x66\x69\xe6\x20\x98\x10\x25\x83\x24\x36\x92\x41\x09\x8c\xaa\x39\xbb\x30\x6f\x1a\x7b\xff\xfc\x80\x37\x6d\x5f\xb2\x37\xfb\xe2\x9e\xf6\x99\x54\x9a\x7d\xff\xf3\x7e\x36\x6f\xd8\x18\xcf\x87\xb7\x61\xd8\xc4\xb8\x67\xdd\xdb\xad\x3b\x4c\x28\x13\xfc\x73\x4c\xc1\x74\x9a\x9d\x1c\x11\x00\x76\xff\x66\xd0\x9e\x4d\x7e\x37\xb1\x3f\xcf\xdb\xde\x00\x7b\x8b\x7b\x5c\x1b\x60\xcb\x23\x51\x0a\xff\xb1\x17\x71\xd9\x2e\xfb\x67\x97\xb0\x1f\xac\x86\xb8\x39\xb6\x34\x3f\x97\x86\x1b\x22\x75\x3f\x72\x89\x77\x6f\x4d\xff\xe4\x6d\xbf\x27\x5b\x54\x65\x87\xf5\x0b\x7a\x4f\xa5\x38\xf1\x79\xb9\x32\x3a\x7e\x5b\xf0\xc9\x63\x4b\xf3\x53\x4a\xb2\x05\x58\x58\x20\x7a\x51\x32\x30\x64\x1c\x61\x6d\x99\x69\x06\x7e\x9e\xfb\xed\x35\x1e\x88\x5c\x31\x74\x93\x80\x25\xb2\x91\x62\x9a\x00\x0c\x30\x92\x30\x0b\x8d\x3d\x41\xaf\x1b\xca\x10\x96\x1e\x89\x7c\x87\x25\x60\x80\x76\xd1\x58\xcd\xa6\x5d\xd0\x0f\x26\x31\x72\x36\x34\x79\xd1\x51\x24\x17\x09\xc6\x2a\x6e\x14\x69\x67\xcd\x73\x0e\x04\x5e\x5a\x92\xf2\x9f\xf6\xb2\x7a\xf7\x62\x0e\x1f\x42\x5f\xe0\xeb\xc6\x8d\x7d\xfa\xe5\xe3\xbb\x8d\x1e\xfb\xc4\x58\x29\x7a\xcc\xf0\x07\xea\xbf\x88\x61\xda\xb4\x0b\x99\x6e\x97\x63\x4b\xf3\x4a\x50\x90\xd9\xce\x1c\x1a\x72\xd6\x5c\x2f\x35\x28\x65\xa5\xcc\x4d\xfe\x51\x9e\x8a\xbc\x9f\xc6\xc5\xe9\x47\x96\x77\x8b\xc8\x17\xa2\xfe\x6a\x18\xcb\x6f\x4f\x4e\x01\x88\x20\x19\x75\xfc\x9c\xbe\x46\x16\x50\xfc\x32\x94\xa4\x34\xc1\x6b\xae\x34\x1d\x1f\x49\xca\x23\x91\x65\x75\xc4\xfb\x43\x47\x41\x1c\xc8\x99\xa0\x40\x34\xd1\x6a\xdd\x5b\xf3\xe3\x7e\x57\xa4\x61\xbb\x78\x9b\x4f\x3e\xdb\x6f\xdc\x75\xb8\x71\xdd\x6c\xe3\x59\xcf\x9d\xc2\x87\x03\x3f\x5b\x13\x19\x9f\x6c\x4c\xd5\x79\x90\xe4\x19\x9f\x6c\x4e\xd5\xc9\xa6\x50\x94\x91\xf1\x96\xc8\x37\x85\x88\x1f\xbc\x20\xbc\xe7\x52\xa0\xf8\x99\x2d\xa0\xf6\xab\xf6\x0e\x88\xbf\xe3\x95\x78\xdb\x72\x2c\xf0\xa1\xe6\x7d\xce\x73\xb7\xdf\xf2\x8e\xba\x4f\xd2\x5b\x9e\x7d\x22\x29\x22\x5d\xd5\x67\xcb\xbb\xdc\xdf\x4c\xb0\x99\xca\x9a\xa2\x2f\x05\x0f\x01\xb3\xb0\x4f\xc8\xd3\x02\x65\xd2\xbd\x6f\xc2\x3b\x56\x7d\x4b\x1d\x6c\x4d\x54\x8e\xf2\x93\x3a\x92\xfc\x9c\x33\x81\x5b\x10\x21\x03\x2b\x79\xff\x85\x03\xec\xb7\x6a\xcc\xf5\xf5\x1b\x27\xd5\xe2\x7b\x77\xcd\x7b\x45\xed\x4c\x2f\x91\xc7\x07\x38\x4c\x76\xfa\x11\x6d\x63\xc4\xd7\xad\xdd\x67\x05\x4b\x9f\x4e\xd0\x33\x77\xaa\xf2\x02\xa0\xbd\x30\xe9\x11\x2d\x15\x79\x4c\x68\x72\xd3\x5e\x06\xe4\x24\x59\xbf\x95\xc9\x06\xc4\x39\xbf\xdd\x0f\xf3\x9b\x93\x74\x96\xde\x4d\xf9\x49\x00\xaf\x03\xf4\x4d\xcb\x9d\x07\xe5\x80\xe5\x91\xc8\xed\xe8\xab\x02\x72\x40\x20\xfd\x28\x12\x5a\xea\x96\xeb\x82\x58\x9c\xf8\x02\x9d\xa6\x1a\xd4\x71\x69\xf3\x5d\xa5\xc3\xb3\x89\xc7\xf5\x37\x0e\xbb\x08\x95\x83\xe3\x69\x9a\xa4\xee\x97\x1c\x76\xd5\x0e\x66\xa9\x39\xf6\xf0\xa2\xf7\x8b\xce\x32\x6c\x79\x59\xce\x85\xbc\x00\xf0\xc3\x7d\xb9\xf5\xc9\x33\x16\xb2\xb4\x90\x16\x62\xd6\x1a\x28\x73\x76\xd1\x11\x15\x45\x8e\xee\x08\x68\x2d\xfe\x7a\x10\x5a\x5b\x31\x46\xbb\x6d\xed\x79\x0d\x3b\x7b\x8f\xc3\xf4\xea\x71\xdf\xea\x78\x3f\xeb\xcc\x5b\xbb\x45\x61\xe9\x2a\x96\x4a\x34\xe0\xea\x95\x07\xab\x9e\x16\x6e\xd9\xdf\xda\xd2\xa6\x2a\x2f\x51\xef\x93\x45\x56\xe2\xfb\x27\xbc\x9f\x70\x86\xaf\x6f\x99\x93\x48\xa9\x49\x46\x42\xa1\x6f\x5a\xe1\x95\x25\xc7\x42\x7a\x5d\x0b\xf3\xac\xd1\x13\x69\x03\x99\x7b\xa1\x20\x1d\x31\x41\xaf\x4c\x35\xcf\x39\xfb\x31\x2d\xab\x94\xdc\x7b\x11\x5e\x1d\xce\xed\xfd\xa7\xfd\xec\xd7\x1d\x76\xb1\x6f\x26\x8d\xbd\x6d\x97\x49\xbd\x1d\xff\xfe\x65\x8b\x6d\x93\x24\x36\x94\x58\x7b\xc2\x4a\xac\xbd\xd1\x9b\x19\x9d\x58\x6b\x14\x3f\x2a\x69\xf0\xcb\x35\xc3\xba\xfd\xe9\xda\x05\x30\x6f\xbf\xac\x96\x19\x59\xb3\xe4\xcd\x0c\xe3\xd5\x86\x82\x47\x07\x07\x23\x22\x8c\x2a\x9d\xcb\x98\x06\x68\x1a\xd7\x45\x28\x07\x96\x99\x4c\x6b\xc0\x3e\x86\x60\xe8\xa1\xad\xc0\x47\xb7\x7b\x85\x97\x53\x1d\x10\x54\x12\x27\x61\x35\xcb\xc2\x21\x4e\x44\xfb\x39\xf4\x03\xed\x04\xf0\x35\xe0\x4b\xe0\xed\xae\xf3\xe7\xc9\x15\x59\x94\x72\xca\x24\xac\x51\xb6\xcb\x55\x1f\xce\x60\x54\x0c\x40\xc2\xd0\xd4\x74\x37\xd8\xad\xa3\x3d\xa1\x15\xab\xee\x98\x9c\x0f\x74\x98\x2a\x9b\x4f\xaf\xa0\x79\xa5\xbd\xe5\xe8\xfa\x09\x2a\xf2\xdd\xd9\xcb\x1d\x66\xae\x03\xf7\x05\xbb\x9b\xe6\x37\x1a\x65\x6d\x35\xcb\xab\xd3\xc2\x9b\xec\x25\x17\x59\x79\xd0\x1a\x02\xf9\xf6\xa5\xe3\x52\xba\x87\xed\x9b\xa2\xa4\xbd\x2e\xcf\xe3\x82\x6c\xb7\x28\x71\xee\x65\xde\x9b\x6b\x8b\x85\x63\xc8\x37\xec\xf1\x7c\x2e\xcc\xd6\xad\x04\xd9\xd9\xdb\x97\x9a\x8c\xcd\xc2\x1f\xfc\xf8\x4d\x4b\x3c\x90\x4f\x80\x4c\x05\x93\xae\x4a\x35\x86\x63\x30\x01\x46\xfb\x05\xc3\x14\xa9\xd4\xfa\x1d\x05\x43\x6e\xe2\xf2\xca\x92\xef\x4a\x62\xa1\x1c\x97\xeb\x78\x4a\x87\xa0\x0a\xeb\xab\x6d\x79\xa4\x25\x41\xae\xa8\x12\x7d\x64\xb8\x9a\xde\x4c\xc3\x5c\x8a\xc7\xb6\x68\xea\x97\x94\x2f\x48\x59\x63\x93\xcd\x58\xa4\xd9\x5a\xd8\x23\xcc\x1b\xd0\xd7\xa4\x9a\xbd\x74\xfc\x44\x18\xf7\xcf\x02\x57\x79\x4b\xc0\xe4\x39\xe7\x4c\xe0\xeb\xf3\x73\x36\x3c\xe4\x04\xfb\xed\x1a\xdb\xdf\xc9\x96\x07\x3d\xe1\xde\x53\xf3\x7e\xa9\x66\x38\x53\x4c\xef\x36\xed\x51\xa0\x6a\x0d\x92\x3e\xdf\xf4\x91\xf4\x9d\x0c\x06\xcb\x61\xef\x28\x3f\x1e\x67\xfd\x54\x14\xc1\x09\x9d\x52\x51\xa1\xae\x7c\xa1\xcc\x01\xda\x3c\xed\x4f\xf1\xaa\xca\xe5\xe7\xc7\x51\xd2\x67\x47\xb9\x27\xce\xe6\x57\x79\x75\xee\x9d\xed\x64\xf2\x9f\x38\xef\x64\x5e\x93\xcf\x77\x35\x31\x35\x9c\x63\x29\x23\xbc\x25\xe8\x05\xb9\xc7\x1b\x14\x09\xbb\x65\x98\xbc\xdc\xdf\xcc\x04\x4e\xc4\x96\x9c\x88\xf2\xb6\x30\x05\xe6\x3b\x6b\xec\x20\x38\xbd\x21\xad\xfa\x5c\x4d\xa5\x55\xbf\xb8\xb6\x0c\x5e\x6a\xba\xa3\xa6\xca\x36\xdd\x39\xdf\xe1\x49\x37\xcc\x73\x45\x58\x12\x14\x01\x04\xea\x21\xd9\x81\x54\x8a\x94\xeb\x66\x97\xdd\x9c\x28\xcf\x21\x97\xe7\xa4\xe9\x2c\xf0\x67\x30\xee\x4e\xf9\x50\x72\xab\x4e\x7e\xc6\xbd\x19\xaf\xc9\x97\x30\x75\x38\x22\x8e\x5d\x2a\xa2\x78\x4e\xca\x45\x55\xa0\xac\x8a\x77\xd8\xe3\x93\x49\x0a\x25\xcb\xd9\x1c\x09\x45\x3b\xdd\x4b\xe5\x90\xe6\x03\x0c\x23\x2a\x31\x9c\xfc\xb9\xc3\x26\xe4\x2c\x3f\x1d\x47\x03\xf7\x8b\x8e\xf7\xbb\x8e\x42\x5a\xf5\x72\x20\x5b\xcb\x13\xf9\xa9\xb6\x20\x26\x6a\x9c\x4c\x8b\xf4\x46\x51\x76\x18\xd3\x79\x05\x54\x79\x24\x6e\x87\x02\xb6\xec\x3f\x0f\xa8\xe2\xbd\x07\x68\x42\x18\x1a\xd4\x7b\x1d\xa6\x17\x9d\xfb\x0e\xc7\x7b\x9d\x73\x06\xb9\x1e\xe7\xe7\xd4\xa2\x2a\x1c\x6f\x28\x12\x4a\xa2\x8a\x4f\xce\x76\xfd\xbb\x92\xd8\x58\xfe\x53\x0f\xf8\x44\x7e\x6d\x8d\xfd\xbb\x2a\x41\x4c\xca\x28\xf6\xb8\xfb\xbf\x1c\xef\x2a\xfb\x92\x15\x64\x42\x53\x47\x69\xb0\x28\x46\xe3\x24\x50\xbc\x3d\xe7\x1c\x86\xc1\x06\x0b\x7e\xbe\x66\x49\xa4\xd7\x39\x4c\x30\xe3\xa6\x7b\xbb\xf7\xb4\x39\xfd\xab\x1c\x02\x80\xcf\x29\xee\x19\x91\x8a\x0a\xd7\x68\xcb\x00\x70\x30\x1b\x7a\x98\xb0\x15\x26\xbd\xc7\x9a\xbe\x72\x5d\x67\x2c\xc5\x7c\xe3\x1b\xe3\xec\xf1\xa5\x08\x1d\xa9\x76\xb7\xb5\xf3\xe0\x4c\x26\xd2\xf9\xb8\x93\xb8\x1f\x1a\xf7\x9e\xa5\x7e\x50\x50\x4e\x5e\xa2\x9e\x29\x82\x7e\x21\x2e\x26\x16\x22\x20\x20\x11\xed\xef\x53\x37\x9b\x50\x4c\xa8\x6c\xd0\x76\x7c\xd5\x67\xc7\xd8\xaf\x38\x6c\xac\x1f\x06\xee\xdb\x1d\xef\xb5\xce\xac\xa2\x14\xc5\x6d\x17\x5d\x88\x8a\x0e\x81\xec\xc4\xf0\x45\xc5\xb0\x1c\x76\x05\xf9\x04\xd5\x9d\x30\xa3\x68\x96\x80\x92\xdc\x11\x52\x1e\xee\x99\x21\xbe\x3a\xbc\x30\x08\x68\xa1\x0d\x50\xf3\x29\x85\xf6\x9e\x99\x9f\xcb\x2c\xed\xf2\x39\x6c\x42\x16\x06\xdd\xbf\xe0\x1d\xd3\xf4\xa9\x48\xd9\x01\xd5\x97\x72\xbd\xb2\xd6\x9a\x28\xd5\x6f\xe7\xe1\x06\x76\x90\x5d\x7a\xcc\xf6\x41\x20\xaa\x2b\xbc\xeb\x66\x47\x26\xe8\xab\x68\x96\xc2\xac\x50\x0c\x67\x22\xcf\xea\x8f\x63\xdf\x37\xda\xef\x6d\x1e\xdd\x9f\xc1\xf6\x83\x49\x27\x73\x6f\xf1\x9e\xa8\xda\x02\x06\x15\xbc\x6c\xf7\x2c\xc4\xc7\xe6\x3c\xe9\x6c\x15\x04\xf4\xa9\xea\x44\x16\x43\xdc\xb9\xff\x69\xc2\x7b\xaa\xf1\xdb\x5a\x7e\x5a\x7d\x31\x8c\x3a\x05\x00\x4d\xa1\xce\xe8\xd5\x78\x10\xde\x18\x5a\x8c\x1f\x3f\xc0\x5e\xe4\xb0\x03\x59\xbf\x05\x4b\xf1\xac\xb7\x0e\x8b\xd0\x20\xd6\xa1\xa5\x06\xf6\x63\xa4\x38\x51\x4e\x5e\xf8\xc2\xa1\x6c\x78\x31\x92\xae\xd3\xb4\xd8\x85\x3d\x8f\x4f\xe2\x93\x87\x32\x20\x30\x9a\xb2\x86\xf4\x23\x35\x76\x11\xd5\xe2\xf8\xd9\x5e\x8a\x4e\x85\xe3\x67\x7b\x3e\xd0\x06\xf4\x2e\x60\xa5\x6e\x12\x72\xf2\x66\x0a\x46\x03\x09\x57\x96\xf0\xd3\x10\x94\x56\xc5\xac\x5c\x04\x85\x64\xfc\x87\x26\x6f\x9b\x5d\xbc\xe3\xd4\xec\xc9\xe3\x53\xc4\x60\x4c\xb5\x2c\x10\x62\xcc\x9a\x18\xc5\x6d\xdb\x23\xaa\x1e\xb2\x0b\x50\xd3\xa3\x7a\xc9\xef\x74\xfb\x79\xdf\x8f\xa2\x01\x17\x67\x89\x58\xac\xc4\x16\x5f\x8c\xb1\x1b\x78\xb7\x97\x07\xd2\x20\xf8\xcd\x8d\x4e\x1b\xdd\x51\xfc\xa4\xd4\x80\x81\x0b\x17\x5f\xe5\x87\x8e\x1e\xb2\x3e\xf9\x07\x0e\x7b\x24\x7e\x33\x4d\x7a\x3e\x22\x39\x22\xfd\x67\xf9\x6a\xe1\xae\x42\x03\x41\x17\x37\x74\xd9\xac\x1e\x3d\x55\x64\x34\x93\xce\x68\x32\x40\x2b\xba\x60\xe2\xbe\xd8\xf4\x07\xdc\x4f\x93\x7e\x1c\x10\x15\x00\xb0\x7e\xca\xb3\xda\xc9\xd2\x87\x4f\x49\x45\x9d\xe2\x46\xcb\x41\xfd\xc0\xc7\x1f\xc6\x7c\xa6\x39\x73\xd8\x6a\xd7\x51\xda\x38\x8e\x78\x8f\x87\x37\xe0\x28\x00\xcc\xda\x50\x87\x53\x1a\xbe\x0a\x17\x9e\xf5\x6e\x6e\xa8\x3e\x6b\xde\xb3\x4e\x92\xde\x2f\xaf\x35\x30\x18\xbb\x03\x4c\xdb\x75\xbc\x44\xe7\x00\xd9\xaa\xcd\x30\x13\x7c\x12\x94\x17\x9e\xa4\xa6\x6e\x3b\x65\x4f\x1c\x78\xc4\x36\xe6\xbc\xd4\x4e\xfc\xad\x4c\x36\x80\x80\x89\xaf\x4e\x78\x87\x8c\xdf\x04\x4f\x6c\x51\x8c\xaa\xb4\x94\xca\x40\x89\x7b\x0e\xec\xb9\xff\x76\xeb\xfe\x5b\x55\xd1\x0d\x3f\xe2\x7d\xbf\x4e\xf5\x55\xb1\xf9\xe6\x08\xec\x3c\x2f\xb7\x72\xc0\xf7\xfc\x8c\xbb\xf0\x33\x7e\xc6\xcc\x39\xfc\xad\x5d\xe6\x1c\x3e\x2c\x30\x2a\xee\x73\x5a\xdb\xfb\xaf\x9e\xe2\x3e\x59\xfb\xaf\x8c\x49\x55\x72\x61\x19\x12\xa4\xec\xc4\xfa\xe0\x41\x2b\xba\x59\xe7\x03\x55\xd1\xca\x82\x44\x7a\xd9\x41\xef\xa9\x23\xee\x55\x49\xa7\x8a\x47\x47\x88\xaa\x4f\xee\x89\xaa\x5d\x8b\xaa\x81\x12\x55\x3d\xef\x50\x05\x2a\x41\xd5\x60\x98\x42\xeb\x28\x7b\x12\xbb\x66\x7b\xde\xe1\xaa\xb9\xb1\x27\xbc\xf6\x84\x97\x25\xbc\xda\xdb\x0b\xaf\xa7\xba\x37\x5a\x98\xb6\x43\xf4\xc6\x7c\x34\x3b\x32\xc8\x32\xf6\xd6\xba\x95\xcd\x38\x8a\x13\xb5\x79\x1b\x32\xb9\x86\xf1\x2a\xb1\xd1\xba\x7f\xfd\x44\x6f\x65\xe8\xaa\x79\x66\x8b\xb9\x2e\x4d\xf3\x4a\x2a\xad\xb6\x48\x70\x90\x57\xb4\x33\x2f\xe3\x61\xce\xfd\x5e\x2f\x82\x13\x72\x52\x9c\xe6\x2e\x6e\x1b\xe4\xb7\xe7\x9c\x8b\xb2\x30\x10\xc7\x3b\x1d\xd1\xce\xb3\x73\xce\x63\xf4\x77\x16\xc5\x46\x28\x36\x49\xda\xd9\xc2\xf1\x83\x57\xb0\xd7\x8c\xb3\x51\x8f\xba\xf7\x8d\x79\x5f\x19\x9b\xad\xbe\xa9\xb0\xe0\xd3\x00\x7c\xb0\x4a\x16\xf4\xe0\x88\x24\xaf\xac\x94\x5e\x5c\x51\x63\x82\x92\x56\xf5\x8d\x38\xdb\x43\x10\x93\xd9\x85\x79\xe5\x98\x01\xe3\x42\x8e\x29\x69\xfd\x4c\xf0\x4e\x98\x66\xb9\x7a\xdd\x4c\x61\xa4\x13\x4c\x98\x2b\x83\x73\x46\xec\xff\x71\x61\xd0\x56\x1f\xb5\xb0\x4e\x61\xa1\x43\x09\x96\xa5\xba\xa8\x43\x5d\xd1\xf4\x22\x4b\x70\x14\xf1\x8e\x1f\x46\x05\x12\xb9\x12\xfd\xf3\x1d\xc4\x69\x04\xfb\x5e\xa0\x87\xd4\xce\xbb\x55\x9f\xd6\x19\x9a\x45\xad\x00\xbf\x5e\x25\x1c\x52\x06\x03\x44\x46\xe8\x27\x34\x76\x2b\x44\x81\x2f\xcc\x93\xd8\xaa\x9b\x8c\xa2\x05\x2f\x6e\x51\x55\x59\x72\x4b\x48\x81\xa5\x82\x05\xc1\x56\xef\x87\x51\x5f\x9e\xb7\x50\xea\x6e\x61\x9b\x78\xb7\xc3\x2e\xa1\xa7\x11\x3b\xdd\x3d\xe7\x78\x2f\x77\x6e\x36\x2f\x59\x7c\x70\xd6\x76\x05\x5e\x7a\x43\x96\x16\xb3\x5e\x4b\x55\x79\xec\xc3\xa4\xf4\x80\x37\x8a\x9e\x41\x94\x58\x79\x73\x1e\xa9\xce\x92\x94\xcb\x8f\xda\xa7\x1f\xb8\x62\x56\xf7\xaf\xf6\xb1\x4b\x71\x50\x34\x14\xf0\x17\xf6\x5d\x00\x7f\xe6\x2b\xf7\x9d\xb6\x4a\xe5\x81\x68\x43\x66\xbf\xa6\x0b\x53\x4c\xb1\xc5\x20\xe8\x48\x78\xca\xa5\xa6\xbd\x67\xcd\xcf\xf0\xfc\x28\xb7\x29\xf0\xd9\x64\x4d\x6e\xd7\x59\xae\x2a\x21\xbb\x00\x4e\xc2\xfe\xaa\x1f\xc6\x59\xce\x5b\x09\xb1\x54\x27\x51\x80\xb5\x81\xd1\x8d\xc5\x26\xfd\xb2\x79\x75\x81\x9a\xb5\xcc\xa0\xea\xe3\xce\xa9\xd2\x8b\x14\x33\xad\x3c\xcd\x86\x1d\x4a\xb6\xd0\xc1\x6f\xf2\xb2\xca\x45\xa5\x9a\x35\xf9\x2c\x8f\xfb\x51\xa4\x1d\xf5\x45\x5d\x94\x69\xc1\x47\xe2\x32\xcc\x0d\xa8\xcb\x71\x2b\x6a\x58\x7a\x06\xed\x8e\x53\x94\x90\x90\x18\xad\x20\x0e\x61\x30\x2d\x62\x1f\xf1\xc9\x28\x04\xee\xbd\x39\x88\xbf\xed\x8a\x38\x5f\x4c\xa2\xa8\xe5\xb7\xd7\xe1\x7d\x29\xc7\x17\xd2\xe4\xac\x4e\x19\xa7\xb8\x00\xc5\xe9\x5a\xd1\x64\x24\x59\x33\x86\x46\x3b\x9c\xf5\xe1\xdc\xe6\x9a\x4e\x7a\x79\x23\x8c\xeb\xbc\x25\xda\xbe\x94\x48\x22\x0e\xd0\x22\x09\xca\x43\xb6\x1e\xf6\x4a\xb3\x5c\x4f\x85\x01\xcf\x44\xae\x15\x13\x35\xea\x73\x36\x99\x25\x66\x61\x5b\x13\xaf\xae\xa8\x84\x69\x2c\xc4\x86\x48\x91\x4c\xb0\xc9\xfe\x6a\x5c\x91\x27\xfc\xf1\xb8\xf7\xd2\x71\x24\x49\x28\x36\x19\x48\xaf\x32\xb6\x0f\x59\x1d\x79\x49\xef\x2f\xd3\x06\xb1\x61\x66\x35\xb5\xed\xa7\x72\xf1\xb5\x92\x7e\x8e\x4e\xd0\x8a\x1b\x48\x01\xa2\x82\xb2\xc2\x8e\x14\xbf\xaa\x96\x77\xf8\xf1\xe0\x0e\xa0\xdf\x68\xf2\x5b\x93\x4d\x01\x82\x2a\x34\x92\x60\x7a\xa9\x90\x1b\x3b\x2f\x36\x49\xbd\x4d\xd0\x8e\x80\x32\xf1\x64\x3f\x1f\x71\x17\x44\x4a\xc1\x30\x2b\x0a\x32\xe0\x18\x63\x0a\x72\x45\xbc\xde\xd6\x6a\x99\x14\x4c\x1b\x30\x03\xd0\x58\x18\xe6\x6b\xb2\x21\x14\x45\x23\xa2\x01\x0f\xc2\xcc\x6f\x45\xaa\xc8\x1e\x04\x5d\xd6\x77\x51\x4b\x88\xa3\x16\x18\x82\x0e\x98\x19\x89\xb9\xf3\x6b\xed\x51\xee\x27\x43\xfa\xc2\x31\x6b\xf3\x30\xbf\x53\xf9\x80\x4a\x69\x31\x65\xf9\xad\xec\x66\x36\x77\xde\x24\xef\x4d\x39\x70\xb7\x87\xf9\xda\x69\x3d\x79\xd8\xeb\x1c\x66\xe9\x19\xee\x8f\x3b\xec\xf8\xf9\x97\x5c\x41\xd7\xef\x3d\xc5\x22\xef\x2f\x51\x8b\x9a\xa9\xdd\x9a\xa5\x1f\x39\xb3\x29\x71\x34\x60\x6f\x38\xc0\x2e\x82\x99\x47\x1b\xd4\xcb\x0f\x78\xdf\xdd\x6f\x5c\xb0\xcb\x5c\x13\xdc\x83\x55\xe3\xe1\xce\x6f\xe4\x91\x93\x1c\x8c\xdb\x49\xd7\xc0\x33\x96\x4a\xc9\xf0\xae\xe4\x01\xa9\xbb\x27\x45\x8f\x77\xfc\xce\x7e\xb8\xe1\x47\x22\xce\xbd\x26\x63\x0d\xe4\x7b\x3f\x4a\xc5\xf9\x1a\x49\x58\x09\x95\x30\xe7\x82\x18\xe1\xd5\x82\xf1\x0d\xbd\x24\x85\x65\x63\x81\xa8\x84\x1d\xca\x3a\xc0\x34\x33\x4a\x95\xeb\x26\x01\xbe\xb1\x11\xfa\x52\x33\xcc\xa6\x37\x66\xea\xea\x0f\x08\x1f\x41\x79\x5f\x10\xc0\x16\xd7\x5b\xfd\x5c\x77\x03\xd6\x4b\xa5\x4d\xae\xf8\xbd\xf0\x16\xf0\x64\x1c\x7d\xb6\x27\x0b\xf3\x9e\x2b\x0b\x55\xe7\x66\x79\x75\x63\x46\x5e\xd3\xa2\xe3\x28\x7f\xb6\x67\x54\xcf\x7b\xee\x4a\xdd\x68\x75\x9e\x58\x55\xe2\xd0\xae\x72\x85\x68\xd7\xa2\x75\x5a\xb1\x73\x61\xbf\xea\x7e\x1e\xee\xdc\xb0\xa3\xfa\x23\x33\x91\xef\x23\x54\xc8\xc2\x18\xc9\x96\xeb\x52\x80\xc6\xd8\x61\xe4\xe7\x92\xba\x14\xf1\x6b\xa7\x4a\xdd\x7a\xc0\xbb\x5f\x5e\x7f\xf8\x75\x7f\x4b\x28\xa3\x05\x39\x71\xf1\x45\x72\xcc\x57\x8d\x89\xe5\xc2\x30\x96\x81\xe5\x47\x55\x74\xaf\x5f\x75\xbc\xcf\x38\xcb\xa5\xc8\xb4\xa1\xbd\xb2\x89\x26\xf5\xc2\x07\x81\xe1\x91\x77\xf6\xfd\x08\xba\x1b\x01\x7b\xea\x3c\xec\xfa\xab\x82\xf4\x57\xcb\x23\x5e\x27\xdf\xb0\x67\x3c\xe1\x55\x05\xc5\x59\x4a\x91\x55\x44\xd5\xd3\x49\xba\xea\xc7\x0a\xa0\x41\x0b\x1f\x3b\x8c\xee\x20\xbb\x4c\x67\xcc\x68\xfd\xf3\x77\x0e\x5e\x00\xfd\xf3\xc7\x0e\x9e\x2a\x17\xbc\x13\x15\x14\x23\x25\x49\xbb\xd1\xfa\xa8\x49\x71\xab\xeb\x5b\x24\x14\x6c\xa9\xfe\xcd\x5b\xba\x2c\x81\xa3\xdb\x14\x55\x18\x84\xa1\x55\xdc\x30\x53\x80\x22\xf8\x75\x3a\x33\x69\xab\x81\x52\x86\x4a\x25\x67\x7a\x85\xaa\x9d\x1d\x28\x72\x03\x3d\xf1\x21\xee\x0f\x37\x57\xa9\x79\x65\xa5\xa9\x69\xad\xe0\x51\x5d\x33\xd0\x19\x4f\x9b\xc0\xfd\x50\xf4\x06\x69\x8d\x65\x3a\x19\x2f\xed\xc7\x40\xdc\xe8\x01\x22\xcf\x61\xdc\x00\x66\xbc\xeb\x39\x06\xf6\xc8\x73\x97\x8a\x61\xd1\xda\xa4\x2f\x77\x78\x40\x58\x38\xca\xbd\xa1\x09\xe2\x1d\xe5\xff\x9e\x71\xee\x41\x87\x1d\x3f\xdb\x4b\x05\xac\x87\xcc\x3b\xca\x9f\xcd\x38\xe7\x70\x57\xfe\xcf\x5b\x17\x03\xef\xa8\x51\x85\xba\xba\x81\x7a\x18\x14\xe5\x9d\x4a\xf2\xf9\xb8\xb8\x85\x5b\x96\x2e\x0b\xae\x1d\xd6\xb7\xb9\xac\x3b\xfd\xfd\x5c\xf8\xf7\x05\x4c\xfe\xf5\x02\xc6\xe6\x3b\x5c\x11\x11\x98\x21\x4b\x20\xad\xce\xbf\x2f\xcb\xfd\x08\xdb\xb0\xe1\x0d\xc5\xee\xec\xa5\x49\x80\x3d\x9a\xe5\xfe\x6a\x18\xaf\x7a\xd7\x3f\x78\xdd\x6a\xd6\xa6\xb2\x67\xb7\xeb\x56\xa8\xbd\xd1\xb3\xaa\x0d\xd5\xfd\xbb\x24\xc4\x05\x20\xb7\xa6\xb3\x91\x8e\x61\xa5\x19\x0f\xf1\x08\x76\x10\x6d\x56\x88\xeb\xfb\x79\xde\xf8\xdf\x63\xcc\x34\x2e\xb9\x5f\x1f\xf3\xbe\x38\xb6\x54\x5c\x40\xad\xdb\x90\x47\x6b\x61\xa6\xa7\x88\x3c\xf5\x02\x85\xbb\xc0\x87\x9b\x7c\xb6\x2d\x1b\x08\x0e\xf5\x42\xaf\x3a\xca\x4f\x25\xb1\xa8\xc3\x7f\x4f\xc7\x73\xe9\x60\xb1\x1f\xf3\xc9\x4d\xa5\x4f\x2b\xb8\x6b\xb9\xe5\xaa\xbd\x4b\x1e\xbd\x20\x34\x53\x85\xbb\x2d\x25\x5d\x30\x17\x9c\x89\xc1\x68\x32\xd5\xe4\x5a\x1f\x87\xb9\x67\x56\x03\xb3\xb0\x8a\x20\x1c\x1f\x0e\x09\x71\x3b\x8c\x42\x32\xda\x40\x88\x62\x9d\x67\x61\xdc\x16\xc6\xbe\x4a\x90\x5d\x68\xa3\x47\xb3\x91\xcf\x3b\x7d\x48\xc4\x37\x11\x6b\x8a\xfd\xad\xbd\xe6\x87\xb1\xb6\xf2\x59\x95\x90\xdd\x85\xa1\xa8\xb1\x10\x05\xbc\x56\x90\xc4\x02\xb7\x1a\x38\x25\xe8\x85\x13\x60\xb7\x68\x38\x96\x82\x4c\xa7\x9f\x27\x0d\x5d\x25\x3c\xbf\x0e\xb4\xaa\x54\x18\x87\xa8\x13\xd4\xb8\x3d\xf9\xc9\xaa\xaf\x64\xb7\xc9\xee\xb3\xb6\xb5\x37\xd4\xd8\xa5\x79\xd8\x15\x49\x3f\x5f\x82\x00\xfc\xcc\xfd\x09\x1d\xf1\xf8\x2d\x67\xd9\xba\x65\x58\xb9\x20\x28\x18\x6f\x16\x16\x33\xbd\xcb\xcf\x76\x72\xda\x82\xd4\x33\x10\xc0\x9d\xd5\x4b\x27\xd1\x28\xd2\xcd\x43\xd2\xfb\x80\x53\x34\xb8\xd4\xdc\x8a\xfb\x60\xed\xb2\xc0\x07\x4a\x46\x2e\x38\xc8\xaa\x6f\xa1\xc7\x40\x65\x11\x51\x72\x1a\x47\x55\xe7\xca\xc3\x1c\x13\x0d\xec\x13\xfa\x4c\x71\xd9\x0a\x70\xfc\xe6\x25\xec\xda\xfb\x79\xf6\x71\x7f\xf5\x12\xef\xb6\x8a\xeb\x36\x28\x88\x05\xf0\x24\x4f\x29\x60\x06\x59\x3e\xb1\x24\x1f\x8b\xc9\x1b\xa6\x27\x07\xf5\x9c\x65\xe0\xfd\xbd\x8b\xd9\xc7\x1d\x36\xd1\xf6\x6f\xea\xc7\x41\x24\xdc\x0f\x38\xee\x78\x6b\x90\x0b\xef\xad\xce\x8a\xba\xb8\x82\xbb\xf8\xc2\xf1\x93\x5c\xc5\xe8\x1f\x9b\xe5\x2d\xb8\x47\x42\xa1\x1c\xe7\x4e\xb6\x51\x61\x7e\xf7\x90\x8e\xab\x37\x08\x1e\x60\x73\x37\x62\x18\xea\x5c\x05\x0a\xa7\x72\x04\xd2\x24\xc9\x33\x35\x6a\x52\x09\xc2\x02\xe4\x31\x0b\x42\x35\xcc\xc9\xf8\xd2\x1a\x3b\x40\x00\xbc\xee\x77\x1c\x76\xd3\xf9\x9f\x41\x29\xb5\xbc\x88\xa3\x7f\xaf\xb3\x42\x25\x52\x1f\x0c\x81\xbe\xd0\xed\x8a\x59\x7c\x1c\x2d\x66\x45\x01\x49\xca\x57\xfa\x69\xb4\xa2\xe7\x56\x11\x94\x0c\x7b\x6a\xc9\xa2\x94\xf6\x31\x07\xd3\x8c\xcd\x41\x75\x87\xa8\x4f\x21\x8e\x17\x75\xe1\x7e\x26\x8a\x0f\x35\xd9\xb7\x27\xd8\x58\x3f\x8d\xdc\x6f\x4e\x78\x5f\x9d\xc0\x6f\xae\x86\x1b\xb4\xee\xa2\xc4\x4e\x5d\xd4\xba\x6e\x18\x17\x09\x17\x67\x16\x4f\x60\x1e\xc6\xe4\x0a\x78\x26\xc5\xd1\xe9\xe9\xb5\x24\xcb\x8f\xf6\x92\x34\x9f\xee\xf9\xf9\xda\xca\x54\x13\x4f\xb3\x44\x6a\x95\x74\xa8\x75\x89\xd9\xe6\xaa\xa6\xca\xc5\xb6\x22\x0b\x5b\x51\xd5\x47\xe8\xc8\x0e\x2a\xaa\xbe\xee\x53\xd5\x03\x76\xeb\xaf\x47\x3c\xbb\x35\xa3\xc5\x2a\x56\x07\x95\x12\x5c\xce\x10\x1b\xd4\x0d\x57\xd7\xc8\xbe\x93\x25\xd1\x06\x6d\x0f\x1a\xde\x64\xee\xd4\x12\xb4\x5a\xee\x0a\x7a\x72\x65\x48\x51\x56\xe7\x2b\x08\x17\xa1\xae\xaf\x28\x73\x11\x95\xc5\xc3\xb8\xa1\xf4\x4f\x59\x90\x9f\x95\xac\xac\x3e\x8f\xfc\x81\x40\x2f\x64\x98\x20\x54\xcb\x54\x53\x35\x5d\xef\x4d\xf2\xc9\x98\xcf\x2f\x70\x3f\x08\x20\x9c\x83\xb1\x85\x48\xf8\x52\x4d\x4a\x72\x15\x0a\x09\x21\x63\x2b\x72\xe8\x22\x7c\x5d\xf6\xf2\xcc\x91\x6b\x9b\x87\x9b\x87\x9b\x33\x2b\x08\x75\x4a\x25\xcb\xb9\x13\x66\xeb\x03\x2e\x95\xc1\x2c\x83\x79\x92\x4b\xc9\xb0\x2a\xb7\x48\x30\xd4\x15\x5a\xaf\xb1\x0f\x27\x00\x49\x09\x1d\x97\xd1\xb2\x96\xcf\xf8\xb1\xb1\xf0\x48\x05\x80\x6e\x55\x1b\x12\x48\x1d\xc3\xd7\x60\x2e\x02\xe0\x62\x93\xe3\x02\xb7\xe5\x97\xa3\x70\x5d\x60\x30\x5d\x0b\x81\x04\xe4\x8c\x92\x5b\x3d\xa6\xa4\xd5\x61\x32\x08\x3f\x83\x47\xf2\x7e\x1a\xf3\x7e\x0f\x2d\x77\xb1\xd8\xd4\x88\x0a\x38\x8b\x70\x6a\xea\x49\xe6\x81\xd2\xe4\x5d\x0f\x93\x43\xce\x60\xba\xb1\x1a\x92\x10\xf4\x94\x56\xe5\x35\x19\x9b\xc5\x70\x41\x03\xd9\x94\x6c\xe0\xe0\xa3\x82\x43\x2d\xed\xe4\x52\x81\x25\x77\x72\x4f\xa4\x20\x3b\xa4\x6a\x02\x95\x3a\xb3\x78\xa2\xc9\x9f\x49\x48\xb6\x6a\x62\x42\xc1\x79\x02\xdb\x16\xf4\x5f\xda\x0a\xf3\xd4\x4f\x75\x31\x65\xdb\xbb\x95\xd3\x56\xf0\xc0\xeb\xc0\x57\xd9\xe0\xd9\x3c\x97\x5a\x19\xbd\x2e\x3f\x45\x48\x7a\x49\x2a\x37\xb7\xb0\x0d\x81\xab\x84\x59\x2b\x6f\x1c\x95\x9f\xdf\x4c\xd2\xe0\xa9\x9e\x3e\xb3\xa0\xb5\xaa\xc9\x6f\x4e\xfd\x55\x34\x59\x4c\x7a\x97\x37\x9b\x4d\x6f\x0a\xda\x8e\x88\x63\x06\x50\xe2\xa4\xf7\x14\xba\x0b\xb0\x0f\x79\x81\xcb\x87\x0e\x01\x4b\x00\x7f\xee\x31\xcc\xdb\x1a\xf3\x12\x70\xea\xde\xfd\x18\xef\x49\xb3\xbc\x74\x55\xe7\x82\x95\xb3\xab\x7d\xf3\xc9\xe6\x39\x47\x67\xa3\x9d\x73\x34\x93\xca\x39\xe7\x22\x12\x05\xa7\xca\xdc\x79\x1f\xf9\x37\xec\xa7\x6b\x6c\x22\x15\xbd\x28\x6c\xfb\x99\xfb\x42\xad\x9e\x7c\xd3\x51\x17\x8d\x34\x29\x80\xd8\x8a\xfb\xdd\x96\x40\x72\x63\xf5\x04\xc9\x4a\xcc\x32\x5b\xd6\x1c\x2a\xcb\x6b\x42\x8e\x43\x2a\x8a\x27\x55\x0a\x90\x88\xb3\x22\x03\x66\x00\xcf\xc0\x3a\x88\xf3\x50\x59\xdd\x3b\x45\x04\xb5\x2a\x12\xcd\x6f\x61\x1c\x84\x1b\x61\xd0\xf7\xa3\xa2\x5c\x90\x15\x88\x96\x88\x0e\x0b\x4c\x17\xc0\x29\x92\x0f\x86\xf7\xd1\xc0\x30\xc0\xcc\xd8\x7a\xc9\x2f\xd7\xd8\xa3\x53\x81\xa0\x4f\xb7\x86\x59\x9e\xa4\x83\x13\x61\x37\xcc\xdd\x57\xeb\xde\xf9\xae\x53\xf5\x80\xea\xa9\xae\x7f\x16\x10\xf7\xcc\x9e\xc2\xa7\x4b\x24\x34\x5d\x3f\xc4\xd0\x4c\x4d\x0a\x69\x8c\xe6\xa1\x4c\xbf\xc6\xd7\xf0\x2b\x28\xbb\xcb\x57\x55\x83\x31\x39\x1d\x32\x20\xd5\xd7\x50\x20\x17\x14\x82\xa0\x6c\x13\xf1\x64\x34\x20\xaf\x78\x30\x34\xdb\xb4\x35\xcf\x04\x87\xdd\x50\x39\x6b\x10\xe7\x69\xf4\xd7\x3b\xcd\x24\xc8\xd7\x5f\x88\x24\xc8\x2f\x38\x66\x12\xa4\x3f\x0c\xf8\x57\xf0\xf9\xd0\x1e\x59\xc4\x95\xd2\x94\xe0\x6d\x4a\x11\xca\xcb\x81\xa7\x26\xcf\xcf\xa1\x4c\xbb\x94\xce\x33\x5f\x64\x27\xa7\xcb\xe9\xcb\xe1\xdf\x86\x3e\x49\xb2\xef\xd4\x98\xb9\x16\xdd\xbf\xa9\x79\x5f\xa9\x19\x17\xaa\xec\x67\x6a\xbf\x87\xe6\xae\xca\xef\xc6\x14\x2b\x6f\x2e\x7d\xc2\xf9\xa6\x67\x87\x53\xf5\x4a\x93\x4b\x3b\x34\x53\x91\xf5\xe4\xf4\x69\x99\x70\xa7\x18\xb2\xa6\x57\x4f\x51\x93\xbc\xc9\x17\x64\xd7\xaf\x8a\x5c\xee\xea\xa0\xf4\x18\x70\xcc\x68\x55\x50\x42\x5e\xaa\x11\x47\x65\x77\x6b\x20\xcc\x06\x8a\xc2\xa6\xd1\xe2\x26\x4d\xaf\x66\xb6\xd1\x6e\xaa\xfd\x0b\x76\x72\x65\x8d\xac\x28\x00\x24\xb6\xa2\xb0\xa7\xbc\x84\x6a\x88\x5f\x4b\x02\xbf\xbb\x66\xd0\x4b\xfd\x7c\xed\x7e\xd0\x4b\xfd\xb5\xa3\x49\xcf\x68\xa4\x4c\xdf\xab\x8d\x21\x0a\xf3\xac\x8a\x72\x2a\x04\x03\x51\xbf\xd3\x09\xdb\x21\x82\x47\x28\x21\x96\x02\x40\x00\x1c\x42\x9b\xfc\xb8\xe2\x8b\xcd\x72\x1f\xf8\xf7\xe5\xf1\xab\xa2\xb1\x78\x88\xeb\x47\x1d\x88\xfa\x90\xb3\xc0\x16\x97\x24\x15\x29\xdd\x45\x8f\xa9\x0e\x2d\x48\x05\x86\x9e\x94\xca\x6d\xb2\xaf\x3b\xec\xd2\xbe\x0d\xc3\xfc\x59\x67\x0b\xdc\xc2\x6d\x51\x9c\xbd\x57\x39\xfd\xad\x70\x9b\x47\xe3\x3b\x5b\xfd\x28\x5b\x07\x38\x52\x72\xa3\x47\x4c\x63\x98\x94\xc3\x22\x14\xc1\x2d\xfd\x42\x62\xc2\xb4\x09\x40\xc1\xd3\xdb\x14\xfb\xee\x18\x7b\xf4\x46\x81\x5d\xa8\x6e\x80\x9d\xe6\xf5\x63\x55\x77\xec\xf8\x3d\xc0\xae\xa4\x15\x00\x82\xc9\x40\xe8\x05\x55\x52\x9d\x8a\x50\x9e\x8e\xc0\xa2\xae\x58\x8b\x5d\xbf\x07\x69\xd4\xa5\xf5\x18\x12\xd2\x29\x7e\x16\xf4\xac\x4d\x7f\xa0\x00\xd1\x43\xf3\x00\x6c\x2c\x60\x82\x45\x3f\xbe\x21\x85\x28\xe2\x7b\x5a\x81\x3c\x20\x32\x70\xae\xe4\x5c\x6a\xd8\x39\x1c\x5c\xb4\x35\x7a\xb2\x35\x00\xb1\xa4\x50\xbd\x30\x9d\x26\x8c\xe1\xa9\x22\xc5\x80\x06\xa1\x20\x52\x9b\xad\xfa\x98\x54\xb8\x33\xa9\x48\xb6\x45\x00\xe7\x45\xa4\xe5\x8e\x07\x3a\x7d\xb7\x54\x4e\xbd\x38\xa7\xeb\x9c\xaa\xe6\xce\x83\x24\xb7\x84\x07\x66\xf7\x8c\xb3\x47\xf5\x92\xe0\xa4\xce\x0f\x26\x7f\xe8\x9b\xc7\xbd\x57\x8d\x57\xdc\x50\xc3\x86\x8e\x51\x3d\xe4\x6a\x7d\x13\x9e\x06\xd4\x83\x68\x93\x05\xef\xf7\xea\x38\x1b\xe5\x6a\xf7\xdb\xa0\x2d\xcb\x17\x93\x18\x12\xfe\x32\x88\xf7\x80\x07\x14\xcd\x72\x90\x6c\x96\x76\x60\xb4\xc7\xc8\x99\xb2\x72\x1a\xe3\xc6\x16\x85\x1f\x0c\x56\x94\xe3\x66\xa8\x26\x61\xcc\xc3\x58\xfe\x80\x03\x12\x06\x11\x4c\x4a\x79\x7a\x98\x0e\xc9\xf2\xef\x99\x3a\x17\x79\x7b\x4a\x1b\xd8\xca\xf8\xe8\x9b\x7e\x98\xf3\x7e\x9c\x87\x91\x41\x61\x8d\x09\xd8\x03\xb5\xbb\xc8\x77\xc2\xb8\x2f\x65\x3b\xc1\x0b\x18\x8d\xa8\x2b\x71\x98\x91\x2e\xd8\x4d\x36\x0a\x8d\x27\xe9\xf5\x92\x0c\xd2\x37\x64\xf5\xb0\xc1\x7e\x04\x47\x50\x48\x5c\x33\x1a\xbd\xe0\xa7\x7e\x14\x89\x68\xc5\x34\xab\x10\x94\x66\x8f\x24\x40\x8f\x9e\x29\xdc\xd2\xa6\xee\x8a\x63\xa1\xe2\x17\x64\xcb\xc2\x78\x15\xf7\xc2\x24\xa6\xbb\xb2\xca\xc4\x8e\x00\x51\x36\xa0\x52\x61\xed\x73\xcc\x35\x37\xf7\x94\x0f\x5f\x6a\x25\x3b\x16\x2e\x42\x8d\xc3\x42\x61\xde\xee\xab\x2f\xf5\x7e\x72\x4c\x61\x6f\x54\xc4\x64\x83\x57\x93\x40\xc0\x61\x1b\x0d\xe3\x56\xd2\x8f\x03\xc3\x58\x95\xa1\x38\xf1\xa9\x55\x2a\x14\x4c\xb1\x3c\x90\x7e\xd7\xf2\xdb\xeb\x22\x0e\x20\x95\x5e\x7d\x8f\x5c\xaf\x2a\xae\x8e\xe0\x16\x64\xff\x2a\x52\x26\x7d\xee\x8f\x06\x0d\xf8\x04\x18\x98\xfb\x69\x94\xd5\x79\x94\xf8\x01\x6f\xf9\x91\x0f\xa6\x9d\xd4\x97\x5b\x57\x9d\x63\x2e\x92\xec\xfc\xa5\xa5\x13\x75\x9e\x74\x3a\x0a\x41\x0b\xad\x89\x1b\x61\x9a\x4b\x25\x5d\x2a\x09\x40\x80\x9e\xb7\x9b\x7c\xee\xf8\xc2\xe2\xf1\x63\xb3\xcb\xc7\xe7\x78\x03\x75\x16\xf4\x21\xab\xe8\xc4\x22\x71\x03\xf3\x2b\xa5\x8c\xf0\x49\x75\x1d\x0a\xa5\xd7\x6e\x58\x95\xeb\xc1\x97\x84\xa0\x4d\xad\x30\x12\x18\x56\x7e\x93\x79\xd8\x3a\x08\xbd\x96\xed\x85\xb9\xef\x01\xf2\xed\xd1\xb9\x5e\x50\x24\xb9\xaf\x28\xce\xf1\xcf\x3b\xec\xda\x91\x7b\xf3\x68\x91\x09\x2a\xf7\x6b\x1d\xd3\x00\xa2\x05\x39\xc4\xa9\x91\xd6\xaa\x57\xff\x03\xdc\xb0\x32\x7f\xeb\x9f\x15\xd4\xe5\x7f\xe8\xb0\xeb\xee\x4f\x0b\x91\xa7\xfc\x9c\x63\x63\xcd\xd1\x09\xfd\x61\xd1\xca\xfb\x9c\xe7\x6c\x9f\x33\x70\x9d\x7b\xad\x4e\x78\x2a\x5a\x5b\xce\x74\xaa\x24\xa1\xfe\x95\x09\xcb\x2c\x96\xb6\xfc\x36\xb1\x9a\x88\x9b\x10\x4b\x1e\x12\x9c\x5e\x3a\xe1\x4d\x97\xae\x55\x6d\xa2\xc6\x23\x59\x65\x3e\xd3\xbd\xfb\xf7\x04\xfd\x05\x4c\xbd\xfc\xc1\x2a\x96\x55\x63\x0c\x4c\x15\xfd\x30\x6b\xb2\xfa\xc8\x45\x52\x31\xf0\x7b\x3b\xca\x2e\x76\x94\x8e\xb1\xa1\x3c\x6b\x77\xb9\x4b\x8f\xdd\x62\x33\xb9\xcf\x89\xb7\x17\x0f\x4f\x77\xe7\xb5\x78\x80\x71\xb6\x59\x7b\x4a\x79\x91\xa5\x65\x5e\xce\x8d\xfc\xc8\x98\x25\x30\x0a\x8a\xd0\x76\x2a\xf2\xe3\xf1\x06\x21\x5e\xbd\x76\xcc\xbb\xc7\x29\x5d\xa4\x98\x0e\xe0\x6b\x81\x3b\xe0\x86\x20\x58\x4d\x1a\xf5\x61\xd8\x02\x0c\x20\x20\xa7\x4a\x19\x98\x9f\xc0\xbb\xb0\xb8\x43\x19\x9f\xf3\x73\x9f\x1c\x6e\x70\x86\xd0\x53\x0e\x9e\x5e\x17\x83\x06\xce\xda\x9e\x1f\xa6\x19\xf7\xb3\xea\x2f\xda\xca\xe9\xef\x3b\xec\xc5\x2a\x16\xf0\xf9\x5e\x62\xc2\xb0\x90\x6d\x21\xbf\xc0\xf6\x4b\xb0\xe9\x4d\x5f\x1e\x6b\x66\x37\x9a\x54\x73\x6c\x42\xb9\x83\xdc\x27\x79\x57\x28\x40\x22\x33\x2e\x8e\x3a\x56\x79\x9d\xe8\x64\x62\xe5\xd9\xbf\x99\xb1\x9b\xef\x57\x7e\x98\x15\xb8\x0d\xdb\xc2\x17\x0e\x7a\xa7\xb7\x7f\xcc\x96\x4f\x5b\x3f\x5f\x9d\x06\xfb\x17\x7b\x69\xb0\xbb\xde\x36\x5e\xad\xe9\xb9\x5f\xe6\x78\x87\x4e\xec\x6c\x34\xcc\x0d\x64\x99\x2d\xb2\x85\xf3\x8f\x5b\xd8\xfa\x0b\x7b\x9b\xcc\x1e\xa7\xb0\xc9\xbb\xfd\x62\x67\xfb\xfd\xcc\x77\xef\x68\xd0\x66\xb5\xbd\xf0\x29\xed\x5f\x3a\x9b\xb6\x7a\xbe\xd2\x5e\xc8\x5e\x78\x80\x5d\x51\xb1\xcb\xcd\x25\x9b\xf1\xa6\x9f\x06\x52\xf0\x98\xf8\x8e\x9f\xdb\xef\xbd\xca\x19\x71\xd3\x44\x03\xd3\x98\x63\x06\xdf\x69\x40\xaf\x41\x5c\x98\xec\xe5\x26\x9f\x33\x2f\xed\x06\x51\xd1\x12\xa2\xbf\xb2\x8f\xbd\x7b\x9c\x5d\x44\xe6\x4c\xa0\xb8\x7a\xc3\xb8\x72\xa9\xbe\x64\xfc\x34\xed\x2c\x47\x81\xbe\x89\xb7\x42\xf4\xce\xf6\x33\x40\xc0\x55\x36\x4d\x40\x4b\xe4\xad\x81\xb2\x8a\x36\x11\x61\x07\x22\x4a\xaa\x4a\xd0\xbc\x48\x22\xd7\xf1\x09\x94\xd5\xb5\x83\x22\x63\x9e\xb4\x73\x25\x5a\x75\xb8\xdb\xe1\xc3\x87\x0f\x43\x8b\x0f\x5f\x7b\xed\xb5\x98\x44\x17\x88\x76\xd8\x1d\x7e\x10\x9e\xba\x7a\x66\xa6\xc9\x9f\x39\x7b\xf2\x04\xd0\x53\xf5\xf2\x0c\x33\x12\xb1\x64\xc8\x60\x35\x5f\xce\xea\xfc\x69\x4b\xa7\x4f\x15\x7c\xa2\xf6\x5d\x32\x66\x51\xf3\xec\xac\xce\xc3\xd7\x5c\x75\x55\x93\xcf\x85\x29\xb8\x1b\x43\xd2\x5c\x0c\x0a\xc0\x22\x50\x01\x22\x18\x95\xf7\x0c\x3c\x87\x90\x11\x46\x7e\x44\x1d\x19\x14\xc6\x60\x33\x8c\xc2\x76\x8e\x16\x78\x8c\x2c\x27\xc2\x3e\xb2\x56\x42\x59\x18\x39\x18\x46\x90\xdf\x21\x90\xfe\x8f\x77\x32\x48\xc9\xa8\x9b\x39\xda\xfd\x28\x57\xe6\x48\x2c\xac\x18\x2b\xa4\x45\x35\x5c\xcb\x2f\x50\x3b\x46\xee\x3d\xb1\xe2\xa4\x11\x0c\x4f\x53\xa8\xc2\xf9\x3b\x06\x86\x96\xce\xcd\x61\x24\xd8\xd7\x0e\xb0\x27\x54\x3c\xfc\x8c\x7e\xd2\x1a\xe4\x36\xc0\xea\xfb\x0f\x78\x2f\x75\x2c\x80\x55\x7a\x8a\x60\x28\xa1\xa7\x22\x3f\xcb\x15\x48\x4d\x47\xe4\xa1\x42\x40\x02\xcf\x8c\x7a\x5e\x2d\xb7\x20\x41\x40\xa6\xad\x56\x5d\x92\x8e\x80\x31\x25\x99\x32\x38\xe7\xec\x37\x00\xfa\xd4\x42\x7c\xd9\x7e\xf6\x0c\x36\xde\xcf\x44\xea\xce\x7b\x37\x9c\xc9\x30\xb4\xac\xeb\xf7\x34\x22\xa1\xa2\x9c\xb4\xa6\x57\x26\xd2\x70\xa3\x2d\xfc\x36\xf8\xd6\x21\xbc\xc6\xdc\x0c\x02\x46\xdf\x72\x9f\xe5\x9d\xbc\x4d\x83\x75\xeb\x9d\x34\xc7\x24\x4a\x0d\x01\x06\x74\x09\xe8\x46\x50\x2b\xd1\xee\x03\x4e\x2e\x27\xcb\xe0\x7e\x0b\xdb\x07\x02\xd4\xbd\xd1\x9b\x81\xc9\xb5\x5d\xd5\x31\xba\x07\xa5\xae\x59\xd0\x0b\x4d\x7c\xcd\x0d\x6f\x4d\x23\x67\x82\x2f\x05\xbd\xaa\x09\x6e\xee\xa2\x5c\x31\x8c\xcf\x52\x70\xb4\xb0\x30\x0a\x6c\x2a\x43\xca\x6c\x0b\x39\xf5\xa7\x50\x09\x1c\x2b\xf7\xf3\x8e\xf7\x3b\xce\x22\xfd\xb2\xa5\x76\x16\xc6\xab\x11\x04\x67\x77\xfb\x51\x1e\xf6\xa2\xa2\x4a\xfa\x05\x6d\xc9\x2f\x71\x7c\x53\xf7\xfb\x19\xd7\x31\x8a\x70\xfc\xe0\x93\xba\x2c\x21\xf7\x4d\x4a\xd0\xcb\x44\xcf\x4f\x8b\x84\x03\xb9\xbb\xfa\xd9\x14\xb9\x5c\x7c\x38\x3d\x91\x95\x4a\xbe\x05\x41\x38\x54\x81\x8e\x06\x54\xb5\x94\x84\x37\x38\x6c\x7f\x2e\x62\x3f\xce\xdd\x57\x3a\xde\x8b\x9c\x65\xf8\x5b\x4e\x68\xa5\x4c\x61\xe0\x50\xa9\x8f\x49\x6c\xdd\x84\xde\x0c\x7e\x26\x53\x35\x0a\x06\xb1\xdf\x0d\xdb\x80\xe2\x56\xb0\xb3\x95\x27\x4f\x56\x2f\x22\x56\x0c\x00\x78\x4c\x4d\x35\xab\xf7\x17\x63\xec\xf0\x68\xee\x6d\x5a\x5e\x65\xda\xed\xf7\x8c\x79\xcf\xad\xbe\x65\x1d\x04\x0a\x26\xe2\x41\x4f\x10\x4a\xe4\x40\x4b\x4f\x4d\xc3\x8b\x2e\x2c\xf3\x85\xe6\x39\x67\x3c\xed\x47\xf6\xb2\xfd\x8d\x1a\xf3\x19\x5c\x76\x9f\xe9\x9d\x00\x86\xda\xb0\x8a\xf1\x78\x0b\x6a\x6f\x4a\x7b\xb0\xc8\xb3\x41\xe8\x16\xfd\xf1\x05\x87\x5d\x9a\x09\x68\x9a\x6a\xed\x6f\x39\x6c\x7a\x5b\x11\x4a\xdd\x41\xef\x78\x3f\xed\xd8\x85\x14\xf4\x94\x14\x5d\xe9\x67\xd7\x17\xd7\xe4\xd4\x91\x9b\xee\x62\x3f\x9e\xcd\x76\x78\x96\xcd\xfd\x6c\x1d\x4e\xb4\xe8\xd2\x6a\xf4\x92\xa0\xa1\x1d\xd1\xd3\x99\x68\xf7\xd3\x30\x1f\xc0\x25\x71\x36\x9f\x66\x7f\xf0\x04\xf6\x58\xa3\xde\x2d\xc8\x64\xdf\x98\x69\x3e\x2d\x69\x41\xe4\xdd\xfb\x9e\xe0\x5d\x4b\x7f\x1b\xa1\x1c\x2a\xf7\xf5\x79\x49\x8b\x8b\xb3\xa2\xdd\x2f\x20\x2d\xa2\x24\x59\x87\x7d\xae\x69\x44\xdb\x59\x23\xf6\xb5\xc7\xb3\xb7\x8f\xb1\x8b\x94\x53\x32\xcc\xba\xee\x6b\xc6\x94\xc6\x73\xf7\xd8\x92\x15\xf2\xaf\xc2\xc6\x86\x03\xed\x28\xdc\x09\x2b\x41\xc7\x43\xe8\x42\x24\x33\xc7\xe5\x83\x08\xa5\xe0\x3e\x6d\x83\xdf\xad\xf4\xbe\x11\x2f\x0c\x81\xc0\x03\x95\xec\x4d\x61\x15\x10\x0d\x0b\x94\xff\xa0\x10\xd0\xdb\xe0\x92\x9e\x9c\x6c\x02\x17\xa8\xa2\x4d\x90\xc3\xd9\xe0\x4d\x34\x10\x37\x0b\x36\x86\x29\x7e\x03\xc7\x27\x8d\x16\x4f\x11\x9b\x02\x94\x04\x71\x9d\x49\xba\xce\x23\xd1\x41\x06\x2e\xc8\x19\x2c\xbe\xdd\xf5\xcf\x72\xe3\xe5\xf3\xb6\x6b\xc8\xc2\xa3\xc4\x0f\xb2\xe9\xc2\x8f\x9d\x4d\x3f\x2f\x69\x65\x8d\xb4\x1f\x37\xf2\xa4\x51\x34\x62\xda\xd2\x35\x5e\xbd\x8f\x1d\xc8\xfa\x59\x4f\xc4\x81\xfb\xe3\xfb\xbc\x7f\x1e\x5f\xc2\x1f\x46\x62\x86\x69\xe7\x78\x5a\xd2\x32\x5d\xe5\xea\xd0\x8e\x4b\x19\xe2\x51\x92\x54\x6e\xe0\x84\x71\x22\x1f\x0f\x8b\x3c\x1c\x4c\x25\x51\x1f\x40\x43\x14\x02\x1a\xc6\x09\xbe\x6d\xfa\xf1\x49\x76\xd9\x9f\xb4\x0b\xa6\xb2\xa4\xcc\x87\xfc\x10\x78\x15\xa8\x34\x35\x95\x45\x27\xf2\x57\xf9\x6a\x22\xe8\xfc\x89\x18\x89\xf4\xdd\xa9\x7a\x55\xa3\xca\x5e\x70\xc2\x92\xc5\xfa\x0d\x25\xa6\x85\x99\x7c\x1f\xb0\x21\x52\x82\x7b\x94\x53\x79\x15\x46\x3d\x4c\xb9\x1a\x1a\x70\x3d\xa7\x7e\x5b\x60\x82\x2a\x62\x98\xc0\xfb\x4d\x4e\x7d\x8e\xf4\x16\xb2\x36\x64\x44\x53\x39\x6c\x4b\xb9\x9f\xe6\xcb\xa1\x26\xd4\x27\x33\xd8\xd3\x92\x56\x9d\x92\x82\xc2\x0d\x11\x0d\xf0\x0d\x7d\x58\x9f\x85\xcb\x73\xc2\x0f\xa2\x30\x16\x2a\xe1\x46\x2e\x18\xa9\xf8\x24\xa4\xf4\x86\x99\x66\x70\xa2\xd2\xa5\xc0\xb6\x68\xfd\xa9\x7a\xb2\x62\x16\x79\x28\xea\x03\x44\x1d\x7a\xbd\x81\x44\x69\x30\xf8\x48\x71\xab\xf3\xb4\xf5\x80\x6f\xab\x21\xfc\x45\x8d\x7d\x8f\x5f\x55\x7f\xf7\x53\x18\x8e\x7a\xcd\x55\xde\xaf\xd7\x6c\x49\x12\x28\x6c\x03\xb9\xd6\xa9\xb5\x52\x3f\x84\xd1\x53\xa9\x19\xba\x27\x35\x10\xbd\x94\x2e\xb4\x29\x50\x40\x47\xd2\xcf\xa2\x81\x1a\x76\x23\x92\x50\x67\xa1\x50\x2c\x52\x11\x0a\x10\xe6\xd7\x97\x72\x84\x20\xbc\x23\x84\x8c\x00\x58\x68\xa3\xe6\xed\xa4\x9f\x17\xb3\x16\x22\x11\xd3\xa4\xbf\x0a\xac\x5a\x68\xf3\x98\x22\x42\x24\x1c\x37\x98\x18\xe6\x90\xcb\x7e\xcd\x93\x5e\x4f\xa8\x71\xcb\x54\x00\x98\x9a\xd9\x18\x62\xd5\xef\x2a\x68\x18\xfb\xac\xf1\x91\x03\xec\xd2\x42\x36\xc0\xe9\xf4\xdd\x07\xbc\x37\x1d\x38\x66\x5d\x33\xa4\x81\xdc\x19\x16\x92\x80\x9b\x52\x11\xb2\x07\x52\xa9\xb2\x04\x10\x7b\x4a\x07\x9d\x95\x53\x49\x3c\x1f\x07\xe2\xac\x08\x56\xf8\x24\x1d\x33\x01\xbe\x65\x45\x5d\x6e\x32\x66\x3d\x85\x2c\xe8\x7a\x6c\x94\xfc\x28\xc0\x58\x14\xfa\x86\x6e\x23\xe0\x01\xc1\x38\x89\x98\x0f\x0b\x6c\x8b\x34\x47\xbd\x1c\xc0\x62\xa6\x60\x43\xbb\x31\xf2\x73\x6b\x49\x37\x89\x92\xd5\xa4\x0f\xa3\x0c\x81\x27\x30\xb7\x65\x65\x47\xd5\x14\x65\x9f\x1a\xe2\x55\x01\x80\x27\x86\xb4\x30\xbf\x20\x8b\x40\x69\x74\x58\x7e\xa0\x72\x9b\x99\x99\xaa\x17\xc8\xe4\x3a\x3b\x30\x8e\x13\x32\xa2\xe2\x3e\x6e\xef\x09\xcf\x4b\x5a\x86\x9c\x6f\xc0\x77\x70\x6f\xdc\x59\x3f\x86\x19\x84\xb5\x8d\xee\x32\xcc\x49\x00\xae\x49\x2c\x1b\xa2\x9e\xb4\xc2\xa9\x3b\xa7\x5e\x31\x10\x43\x59\x3f\x30\x5f\x57\x86\x36\xce\x22\x3f\xa8\xd8\x1e\xe5\x57\xef\x94\x9b\x3b\xe4\xd5\xfd\xc8\xd5\xe0\x95\x30\xe1\x78\x41\x7c\x35\x20\xcd\x59\x85\xf8\xc2\xa1\x64\x2d\xc1\x14\xc0\xd6\x80\xab\x44\x1e\x18\x31\x94\x59\xe4\x60\x86\x3a\x97\x85\x1b\x6d\xc1\xc6\xb0\x21\xbb\x36\x4d\x6d\x40\x35\x57\xc3\x82\x79\x9c\x3a\x25\xbd\xb4\x9d\x24\x2d\xf8\x34\x42\x6f\x07\x0a\x74\x3d\x07\x00\xaf\xf8\x90\x61\xe9\xae\x97\x03\xd1\x30\x4f\x1d\xc5\x40\xa6\xe3\x94\xe5\x6e\x63\x01\xf6\x8d\xb1\x8b\x8c\x8e\x76\x3f\xaa\x55\xad\x77\x95\x54\xad\x61\x15\xcb\x1a\x6a\xa9\xe7\x66\x6b\x22\xa8\x54\xbc\x5a\x90\x7a\x85\x3e\x21\xce\x97\xd4\x36\x93\xf0\x38\x8c\xca\x6b\x81\x4a\x45\x43\xff\x00\xa3\x79\xc3\xd5\xd8\x8f\xb2\xa1\xdb\x14\x64\xa6\x28\x00\xa3\x64\x33\x33\x15\x21\xf9\x01\x8c\xce\x84\x72\x48\xa4\xc2\x84\xb3\x6b\x31\x63\xd6\xc1\x2c\x00\x62\x2f\xbb\x21\x41\x54\xcc\x14\x09\xb5\x45\x25\x54\x08\xeb\xa8\x4a\x52\x4f\x3c\x88\x4a\xd9\x97\x1c\x23\x64\xfb\x5e\xe7\x7e\x84\x6c\xbf\xce\x99\xdb\x59\x58\x36\x2c\x7f\xd2\xf0\x41\xfd\x78\xc0\x5b\xca\xbe\x35\xce\x1e\x93\xe7\x11\x6d\xe8\x90\xd8\x7b\x33\x4d\x3d\xf7\x8b\xda\x32\xfa\x9b\xe3\x23\x9e\xc1\xf1\xac\x32\x28\xc9\x85\x07\x8d\x5c\xf3\xb3\x62\x36\x17\xc7\x97\x49\x02\x2c\x3b\xa6\x64\x1f\x81\xc4\x01\x4a\xb6\x22\x39\xd0\x32\x05\x30\xc1\x47\xd5\xc1\xd7\xe9\xc8\x20\x38\xf0\x72\xa6\xb8\xa5\x44\x14\xae\x42\x3c\x33\x6a\x48\x7e\x3f\x4f\xba\x7e\x4e\x87\x77\x62\x4f\x20\xe1\x69\xec\x72\x2d\x81\xf4\x7b\x70\x5b\x16\x95\x41\xfb\xda\x83\x76\x24\xf8\x6a\xdf\x4f\xfd\x38\x17\x82\x52\x11\xe5\x37\xfd\x28\xbc\x4b\xa4\xd9\x94\x1e\x54\x12\x77\x15\x8d\x21\x3a\x2a\xf5\xb5\xcd\x44\x4a\x9e\xd1\x55\xab\xea\x0c\xd9\x98\xbb\x44\x9a\x14\xa5\xb4\x44\x3b\xe9\x8a\xa1\xe6\x2a\x7a\x88\xb0\xdb\x15\x81\xdc\xfd\xa4\x32\x05\xfd\x15\xe6\xba\xab\xca\x80\xea\xbb\x90\xe0\xcb\xcb\x27\xec\xc1\x21\x39\x6e\x2b\x3a\x29\xbb\xb8\xe5\xb7\xd7\x93\x4e\x07\xd3\x9a\x5a\x6a\xa2\x9d\xb4\x85\xa4\x99\xbe\x84\x9a\x9e\xa2\x7d\xf2\xc1\x65\x8c\x1d\x23\x25\x63\x07\xa6\x8e\xad\xca\x5e\x63\x7d\xf3\x9e\x7d\xec\xd2\xae\x1f\xf7\x7d\x9d\xe4\xe3\xbe\x7d\x9f\xf7\x73\xfb\xec\x6b\x45\x60\xf5\xaa\x88\x15\x42\x1a\x9e\x5f\x95\xe5\x42\xf6\x08\x48\x28\x8d\xce\xc0\x4f\x00\x9f\xce\x8a\x5d\xd4\x0a\x0e\xb4\x99\xe5\x09\x27\x2a\x91\x02\x08\xff\xa6\x62\x14\x82\x9c\x8b\xa4\x08\x5c\x36\x91\xe3\xd5\x3c\x21\x6d\xb7\x17\xb6\xd7\x55\x35\x28\x9b\x42\x65\x74\xca\x4e\x00\xb9\xdd\x93\xca\xac\xec\xbe\x24\x2b\x8c\x2d\xc9\x50\xde\x51\x93\xd3\x9c\x87\x23\x5f\xae\xd8\x4d\x2a\x72\x00\xe4\x47\x65\x5f\xd3\x07\x8d\x5e\x20\xd4\x06\x75\xc6\x29\x90\x66\x38\x61\x4c\x42\x70\x83\xac\xb3\x6f\xbd\x0d\x2a\x3e\xc2\xf0\x41\xdd\x21\xf8\x19\xa4\x81\x14\x53\xb0\x91\x25\x39\xef\xf4\x63\x0c\xaa\x6a\x27\x69\x2a\xda\x79\x34\x68\xf2\x02\x92\x4e\xa5\x74\x66\x62\xa8\xe3\x9f\x2c\xdb\xb4\x22\x15\x02\x2c\x0e\x44\xad\x30\x0e\xb3\x3a\x90\x3f\x89\x02\xbe\x32\x8c\xa9\xb4\xc2\x67\x17\xe6\x1f\x58\xc1\x7b\x79\xd1\x77\x8d\x41\xd2\x4f\x1b\xc9\x66\x0c\xe6\x23\xd5\x89\xd6\x09\xec\x8d\x66\x7e\xdb\xcf\x5c\x88\xfc\xb6\x4f\x3a\xb3\xe7\x95\xd3\xd6\x03\xc5\x1c\xf2\xd9\x4e\x25\x69\x57\x4a\x28\x6b\x66\x66\x22\xcf\x4c\x31\xd5\x41\xb6\xa9\x07\x29\xa9\xed\xfd\x07\x19\xb7\x36\xe2\x24\x0d\xe4\x61\x90\xdc\xef\x27\x84\x9f\x09\xf7\x3f\x1e\xf4\x7e\x00\xfe\xd2\xf6\x51\x9f\x63\x88\x36\x7d\xdd\x76\x15\xbe\x70\x62\x2f\xde\x62\x2f\x1e\xfb\xa1\x0b\x6c\x78\x9f\x19\xd8\xf0\xce\x5d\xc7\x63\xaf\x3e\x58\x21\xd7\x9f\x50\x21\xd7\xbf\xe9\xb0\x23\x5b\xe8\xca\x15\x4b\x14\xb4\xe5\x97\x39\x95\x04\xde\xf0\xc0\x83\x1f\x79\xfc\x23\xdb\x87\x62\x5c\xef\x5e\xa7\x43\x0b\xad\x66\x95\x82\x0a\xa1\x05\xe5\x50\xc2\xd7\x33\x8b\x66\x61\xcb\x2c\x31\x08\x37\xfb\xeb\x83\xde\xb1\x91\x77\x4b\xa8\xfe\x55\x8f\x71\xf0\x66\x57\x87\x98\xbd\x6b\x62\x6f\xc5\xee\x85\x22\x15\x8d\xdb\xdb\x00\x77\xbb\x01\xfe\xa1\x0e\x38\xbc\xd7\xf1\x5e\x2c\x75\x2e\x85\xb2\x5f\x10\x4a\xaa\x70\x28\x48\xa6\xbd\xdf\xac\x91\x45\x81\x0d\x45\x20\x59\x5c\xc2\x2b\xf8\x85\x0b\x94\xb2\x7a\x9f\xe3\x6f\x2f\x1a\x6f\x74\x6f\xd0\xa2\xd1\xf3\x0a\xda\x86\x51\xf2\xab\x2c\x1d\xdf\x33\x6e\x71\x0d\x0f\xb3\x2c\x1f\x69\x1e\xa7\x9c\x41\x8b\xdd\xfc\x9f\xc7\xbc\xcf\x38\x55\x77\x76\xc2\x6f\x5e\x85\x1d\xe9\xc7\x83\x61\x6a\x66\x8b\xea\x9c\x47\x22\x5e\xcd\xd7\xe4\xd8\xde\xd9\x17\x7d\x8c\x5b\x8a\x92\x7e\xc0\xbb\x22\xcb\x00\x4d\x50\x85\x47\x40\xa2\xed\x33\x16\x96\x50\xf2\xc9\x23\x03\x25\x36\xa6\xda\x43\x9a\xf4\x73\x40\x99\x83\xdc\x6e\x40\x24\x00\x92\x74\xac\xa1\x66\x4b\xb7\x84\xf7\x7f\xa8\xb1\x17\x39\x8c\x1e\x71\x07\xa3\xf5\x85\xca\x6e\xc4\x4e\x9a\xd7\x78\x36\xde\x93\x14\xef\xb8\xc9\xed\xa8\x83\xcd\xe9\x26\x45\xc7\x10\xca\x2c\x6a\xe3\xec\x05\x9a\x31\x3b\xdb\x22\x23\x69\x74\x25\x96\xe1\x65\xef\x1a\xfa\x54\x09\x94\xce\x64\xaa\xae\xe2\x16\x67\x1f\xbf\x8c\x35\xce\x4b\xd4\xba\xaf\xba\xcc\x7b\x8b\xa3\x7e\x19\x7e\x77\x25\x72\xe9\x48\x34\x88\xf3\x35\x91\x87\x6d\x83\xe5\x43\x67\xac\xd7\x09\x05\x18\x78\x01\x00\x08\x45\x76\xc9\x86\x9f\x86\x49\x1f\xc1\x15\xfb\x99\x86\xf9\xe6\xb3\xc5\x5e\x29\x85\x16\x18\x55\xc1\xc0\x42\x38\x5d\xff\xbe\xd0\xdb\xea\x5c\x55\xec\x05\xf6\xf1\xe4\xab\x8f\x60\x7f\xed\xc0\xa9\xb0\x73\x22\x8c\xd7\xdd\xaf\x38\xde\xa7\x1d\xf5\x0b\xf7\xec\x33\x8b\x27\xca\x5b\xb5\xc1\x7b\xb1\x40\x49\x07\xda\xb7\xab\x18\x9d\x17\x55\x18\x51\x93\x31\x23\x57\xd6\x98\xfe\x88\xe9\x99\x27\x3d\x4d\xfa\xa7\x4b\x57\x40\x5f\x7c\xa6\x79\xe4\xb0\xce\x81\x55\x86\x5e\x6d\x61\xea\x45\x7e\x1c\x6b\xb8\x44\x23\x1f\x7b\xa6\x79\x64\x46\xbd\x66\x59\xd6\xff\x7c\x9c\x4d\x90\x4b\x50\xb8\x5f\x1c\xf7\x7e\x6f\x5c\xfd\x32\xa2\x48\x14\xee\x3f\x98\x32\xe4\x6f\x1f\x0d\x94\x0a\xa0\xae\xb0\x27\x81\x50\xe6\xa9\xc8\xfb\x69\x2c\x02\xc2\x22\x31\xa0\x20\x94\xf5\x1c\x51\xb5\x80\xf7\x41\xca\x65\x98\x0e\xda\x15\x84\xfe\x1c\xed\x72\x49\x7a\xfe\x9d\x7d\xa1\xf7\x22\x03\x6d\x2f\xcc\x32\xb8\x81\x46\x0e\x03\xdf\xb9\xa4\xf6\xc8\x69\x26\xbf\x18\x98\x40\x05\x09\x19\xbf\x88\xb1\x39\x16\x67\x81\x23\x11\x76\x52\xed\x94\xd2\x73\xeb\x98\xce\x83\xb7\x71\x88\x50\x89\x28\x1c\xc0\xbd\x44\xa1\x64\x75\xcc\x96\xda\xb4\x27\xb2\xdd\xed\x35\x3f\x5e\x45\xe4\xc6\x2e\x3a\x5f\x7d\x29\x2c\x3b\x62\x93\x77\xc3\xb8\x0f\xa2\x54\x4e\x60\xe4\xf0\x57\xf8\x40\x38\xbd\x95\xa2\x80\xe3\xae\x7a\x1b\xed\xdc\x8a\x65\x13\x5d\x61\x38\x92\xd8\x95\x1a\x33\x32\x40\x96\xd7\x48\xf5\x14\xf5\x34\x39\x7a\x80\x52\x86\x8c\x55\xa2\x6e\x9a\xd9\xa0\x3e\xa9\x68\x8b\x50\x77\x65\x9e\xac\x8b\x18\x05\xae\x1f\x23\xb7\x09\x09\x66\x7b\x9a\xfd\xf1\x3e\xe6\xa6\xa2\x8b\x71\xc9\xf3\xb9\xe8\x1e\x03\x0e\xd7\xdf\xdd\xa7\x1c\xdd\xff\x65\xdf\xf0\x6d\x8d\x99\x63\xf8\x72\x5a\x99\x1c\x65\x79\x0f\x83\x46\x87\x28\x6f\x54\x1c\xac\x86\x0f\xb7\xe0\x29\x54\xbb\xb4\x23\x8b\xae\xe2\xcc\x51\x31\x46\x64\x93\x94\x63\x83\x5d\xac\x4d\x22\x84\xae\x50\x36\xa2\x52\xcd\x55\xa5\x32\xde\x27\xf0\x50\x7b\x85\x16\x01\x39\x9d\x9c\xac\x98\x60\xa3\x43\xa6\x6e\x05\x2f\x91\x89\x34\xf4\x23\x8d\xab\x6d\x56\x14\x06\x95\x0c\xfb\xca\xd2\xaf\x98\x37\xc2\x5c\xf3\x7a\xac\xf5\xe3\x75\xc4\x85\xd0\x77\x73\x8a\x83\x40\x65\x2f\xa3\x87\xa6\x8a\xf6\xa4\x82\xba\x0e\xa7\x63\xb9\x4d\xd8\x10\x2d\x89\xee\x57\x4b\x94\x56\x9a\x44\xc0\x79\x21\x27\xfc\xc6\x4c\x73\xe6\x6a\x1d\x10\x0b\xa1\x20\xea\x1b\x38\xe5\xa5\x46\x4a\xec\xb1\x46\x6a\x54\xd5\x4c\xb9\x42\x64\x79\xd8\x05\x99\x79\x05\x2e\xbd\xf0\x2e\x72\x9a\x14\x99\xa4\x4d\x8e\xa0\xa2\x99\x0d\xc1\x08\x7b\xc4\xa8\xb2\x51\x98\x82\x68\x48\x55\x3c\xc8\x59\xbf\x5d\x0a\x68\x7e\xdb\x18\x7b\x44\x69\x7d\xba\xaf\x1c\xf3\x5e\x34\xb6\x64\xc4\xc8\x96\x76\x7d\x14\x0e\x87\x32\x43\xef\x2e\xc0\x12\xcc\xe3\x9e\x22\x80\xd1\xc2\xaf\x35\xa0\x93\x1c\x68\xf0\x9a\x3d\x16\x45\x80\x02\x9d\x86\xf5\x4a\x72\xa6\xc9\x6f\xb3\x62\x37\x72\xb2\xd9\xfa\x5a\xbe\x1a\x45\x82\x29\x1e\xe4\x0e\xef\xc7\x1a\xd4\x1f\x78\x65\x0c\xf8\x4f\x91\xee\x68\xb7\x7b\xa0\x0f\x08\x52\x65\x87\xac\xea\x36\x46\x04\xa6\x49\x04\xa6\x06\x2d\xa4\xdb\x03\x0b\x2d\xf5\xa0\xa9\xfb\x6e\x65\xe4\xd9\x8c\x45\xaa\x21\x51\xdd\x4f\x4f\x78\xff\xcd\xb1\xaf\x15\x98\xb4\x22\x86\x80\x96\x12\x2c\x6d\x24\xd0\x13\x41\xa3\x3e\x80\x13\x1a\xc6\xc6\x2a\x6d\x61\xb6\x74\x45\x8f\x8f\x82\xd3\x53\x90\x38\x08\x1b\xee\x2b\x27\x37\x44\xd6\xc4\x79\x1d\x97\xb8\x52\x66\x1b\x88\x07\x5f\xe7\x59\x52\x84\x3b\xc4\x89\x89\x68\x0f\x2b\xeb\x9c\x63\x1c\x3f\xcf\x39\x60\x99\xd0\xe4\x69\x63\xfd\x30\xb0\x54\xa2\x3f\xd8\xcf\x7e\xce\x21\xf3\xc5\x2b\x1c\xef\x6e\x07\xcc\x17\x3b\x49\x53\x7c\x10\x8c\x03\x11\x65\x4f\x06\xde\xed\x3b\xca\x9e\xac\x3e\xf6\x49\xc5\xa6\xb1\xda\x0f\x03\x31\x5d\x60\x4f\x66\xc3\x59\x92\x21\xf2\xcc\xb7\xbc\x33\x67\xe6\xe7\x2e\xec\xb7\xfa\xa1\xdd\xb0\x6b\x2d\x13\xc1\x94\xf7\x38\xc8\x84\x30\xc5\x83\xf1\x65\xf3\xc5\x7b\x6b\xcc\x6d\x45\x49\x7b\x1d\x26\xeb\x9c\x40\xcf\x88\xfb\xfe\x9a\xf7\xce\xda\xbc\x22\x57\x9e\x3d\x35\xa7\x39\xb8\xe4\x63\xa0\x92\xc8\x5f\x5e\x27\x49\xc5\x2a\x70\x47\xab\x57\xbd\xc2\x0f\x6b\x6c\x7f\xf8\x5a\xc1\x24\xa4\x3c\xa2\xda\xec\x54\x64\xc4\xca\xb3\xb4\x20\xa0\x21\xb2\x5d\xa8\x15\x04\x3f\x40\x4b\xad\x0a\x97\xe3\xcb\x49\x69\x53\xa8\x2b\xf0\xcf\x58\x88\x20\xe3\x1e\x7e\xd5\x33\x22\xf3\x35\x39\x85\xac\x5f\xdd\x08\xd8\xbb\xea\xc8\x11\x3e\x79\x26\xee\xa5\x49\x5b\x2a\x28\x52\x47\x3b\x0e\x18\x5a\x85\x6f\x59\x69\x52\x76\x9c\xde\x6d\x8c\x15\x9e\x27\xf7\x56\xef\xa8\xee\xc5\x52\x63\x08\xb2\x87\x44\x24\x24\x73\xc8\x95\x6d\x61\xd6\x19\xe5\xfe\xfc\xf7\xd8\xa4\xd4\x49\x20\x74\x70\xfa\xa2\xec\x2d\x38\xc1\x67\x99\xfb\x0f\x8f\xf6\xfe\x70\xcc\xbc\x62\x78\x5a\xda\xf0\x5b\x9e\x67\x35\x50\x57\x8a\x4f\x1a\x04\x7e\x36\x90\x30\x6e\xac\x56\x79\x06\xe3\x8f\xb9\x97\x00\x57\xd4\x50\xb9\xc6\xc3\x10\xaa\x1c\x45\xc5\x43\x84\x5a\x06\x69\x2f\xe6\x17\x28\x03\x61\x52\x23\x54\x4e\x71\x74\x2e\x46\x03\x1b\xe2\x48\x21\xbb\x2a\x28\xd8\x22\x0d\x20\xad\x53\xf4\x1f\x75\xb7\x6e\xd6\x42\x12\x2c\xf5\x44\x1b\x9b\x25\x8f\x53\x52\xee\x56\x38\x5c\x11\xbf\x58\xdb\x56\x8d\xda\x81\xe4\x28\xc6\x91\x9c\xe1\xca\x68\x40\x7e\xba\x26\x07\x0a\x1c\x3c\xaf\x88\xdc\x0f\xa3\xac\x0e\x6e\xd2\x0a\x69\x27\xe2\x35\x3f\x6e\x43\x26\x4f\x36\xbd\x2e\x7a\x19\x88\x38\x39\xc2\xd3\xd4\x8b\x0d\x18\xb8\x66\x37\x38\xe7\x1c\xc0\x38\xd9\xd4\x12\xba\x6f\x7c\x24\x7b\x7d\x8d\xb1\xac\xbd\x26\x82\xbe\x3c\xcd\xbb\x2f\xdf\x0a\xd6\xd0\x9a\x3c\x4b\xfa\x25\xef\x8b\x4e\xf1\x83\xaf\x25\x11\x05\x3d\x15\xc5\xc2\x59\x26\x4f\x7d\x35\x73\x45\x9c\xf5\x53\x61\x40\xde\x99\x70\xdc\x38\xe1\xad\x99\x03\x79\x25\x58\x1a\x4e\x09\xc0\x3c\xa3\x33\x17\x25\x38\x85\x18\x2a\x6d\x7c\x54\x6e\x4b\x61\x54\xaf\x28\x0f\x89\x31\x20\x92\x93\xf4\x2e\x93\x86\x52\xce\x35\xf8\x40\x73\xcf\x86\xba\x5b\x1b\xea\x67\xc6\x99\x9a\x79\xee\x6f\x8f\x7b\xbf\x3e\x7e\x2b\xfe\x28\x59\x86\xfa\x71\x20\xd2\x08\x82\x11\x94\x00\xf0\x11\xba\xcc\x38\xd0\xea\x83\xfd\xb1\xc5\xf9\x82\x3a\xc2\xe0\x08\xed\x63\x40\x3a\x45\x84\xf7\x28\x90\x14\x4f\xa9\xb0\x10\x60\xf1\xea\xc3\xb3\xc1\x2a\xa6\x00\x42\x55\x2f\xcb\xe1\xe7\x4f\x80\x0f\x59\x75\x68\x72\x3e\x9f\x5b\xd3\x87\xf0\xd6\xe8\xa3\x29\xa1\x37\x16\x47\xfb\x18\xa9\x3c\xa0\x44\x1c\x26\xfd\xa4\x09\x51\x8c\x96\xb8\x54\x70\xa1\xc9\x9c\xec\x77\x4b\xd4\x58\xbe\x2a\x46\x11\xdc\x79\x69\x3f\x6e\x7b\x94\x79\xa9\x18\x39\x74\x8f\xc9\x9b\xfc\xf4\xb1\x79\xdd\xbb\x93\x78\x82\x27\x90\x3c\x4c\x01\x2c\xe4\xeb\xd4\x10\xe5\x80\x62\x9e\xa9\x94\xc1\xb2\x57\xd5\xb8\xea\x98\xd3\x64\x53\xa4\x6d\x5f\x1e\xee\x65\x0f\x26\x69\x57\xf5\xed\xdc\xa9\x25\xe4\x3e\xe1\x93\x8b\x37\x1f\xe3\x33\x33\x47\xae\x9c\x52\x61\xf2\x20\xcb\x34\xb8\x6c\xd8\xed\xf6\x01\xbd\xbc\xb9\xe7\x97\xfe\xff\xb6\x5f\xfa\xdb\x35\x36\x91\x6c\x88\x74\x4d\xf8\x81\xfb\x8d\x1a\x6b\xee\x6c\x8f\x3a\x4d\xaf\x78\xef\xad\xa9\x3f\xcd\xbc\xc8\xdc\xb0\x76\x71\x55\xfc\x90\xfb\x40\xed\x4d\x30\xd9\x61\x8f\xf7\xc9\x68\x6d\xee\x29\xcd\x0b\xb2\x6f\x1f\x39\x3c\x73\xdd\xe1\x23\x47\xae\x81\x68\x22\x55\xa3\x66\x37\xd8\x2a\xc6\x0f\x04\x89\x61\xe0\x05\x6b\x47\xfd\x7c\x63\xff\x16\x92\x40\x77\x91\x0a\xfb\xbb\xcf\x69\x6d\xef\x26\x7a\x8a\xfb\x64\xed\x26\x82\xce\x2f\xc3\xf1\x18\x9d\x54\x89\xe0\xf5\xdb\x8f\xb6\x00\x79\xc8\x27\x86\x48\xc0\xf0\x27\xea\xa7\x6f\x7c\xb4\xf7\x67\x8e\x79\xa5\x8c\x97\x5c\xe0\xea\xe3\x18\x69\xa5\x95\x4a\x84\xcb\xa8\x6e\x96\x7d\x58\x3a\x1a\x7d\x44\x6e\x68\x93\x31\xf3\xcb\xb4\x69\xc4\x49\xdc\xd0\x87\xde\x00\x89\x11\x2c\xe4\x6d\xfa\x2e\x56\xc4\x6f\xb7\x21\xbc\x00\xa2\xac\x45\xde\x46\x09\x17\xf3\x62\x39\x35\x01\xd0\xfa\x9c\x73\x91\xa1\x8e\x5a\xea\xda\xfb\x2f\xdb\x53\x48\xf6\xa2\x9a\x1e\xba\xdd\xe3\x9b\x0e\xbb\x18\x92\xd6\x55\x62\xef\x97\x1c\xef\xc3\xce\xdc\x88\x7c\xea\xe1\x45\xa6\x26\x60\x69\x61\x54\x04\x77\x66\x94\x1d\x4f\x1f\x42\xee\x49\xfe\x6c\x2f\x4d\xbc\x3a\xf7\xb2\xa4\x93\x7b\xcf\x6d\xf2\x53\x49\xae\xb9\x90\x02\xde\x20\xa8\x04\x5a\x7d\x0b\xb7\x29\x5f\x97\x54\x15\x07\xc8\x53\x15\x76\xc0\x4d\x07\x2b\x0f\xde\xdc\x8a\x82\xfd\xd4\xff\xcb\xde\x9b\xc7\x59\x92\x55\xe5\xa2\xbf\x38\x99\x35\xed\xea\x01\xc2\xfb\xd4\x87\xd3\x36\x68\x6f\x55\x36\x79\x4e\xd6\xd4\xd5\xdd\xd5\x4d\x43\x76\x66\x15\x95\xdd\x35\x24\x99\x59\xdd\x34\x88\x9d\x3b\xcf\xd9\x27\x33\xa8\x38\x11\x87\x88\x38\x99\x75\xfa\xe2\x53\x26\x41\x44\x01\x01\x95\xc2\x07\x8a\x4f\x10\xd0\x46\x14\x50\x04\x15\x2e\xa2\x97\xe1\x79\x55\x9e\x28\x4e\x28\x2a\x08\x82\xfa\xc4\xb1\xee\xbd\xf2\x7e\x7b\xad\xb5\xa7\x38\x27\x87\x1a\xba\x5b\x79\xc9\x1f\x4d\x65\x9c\x88\x3d\xef\xb5\xd7\x5a\xfb\x5b\xdf\x62\xee\x4e\x0c\x9f\x12\x1d\x9a\xb5\x7f\x56\xb8\xc7\x21\x40\x9b\x6a\x76\x3e\xf2\x14\x97\x7f\x09\xd8\xe3\xf1\x32\x9f\xe8\xc9\x20\x48\xec\x33\x41\xf4\xff\x04\xf7\x55\x1f\x57\xee\xba\x87\x5e\xba\x17\x4e\x48\x89\x3b\xf0\x90\x7a\x3e\xeb\xa5\x2d\x0d\x13\x26\x2c\xb2\x57\xc9\x8c\xc6\x94\x6b\x4b\xbb\x8a\x23\xdf\xea\xc1\x41\x34\x16\xd6\xec\xb2\xa0\x71\xdb\xf5\x94\xfd\x27\x88\x49\xc1\x97\x8f\x5f\xe8\x0a\x00\xed\x86\xf7\x45\x27\x27\x87\x3c\x57\x1d\x5b\xf3\xe3\x65\x2b\x6b\x06\x18\x8e\x09\x7c\x21\xd5\x67\x3e\x45\xd8\xcb\x46\xd8\xe3\x89\x42\x7d\x21\xeb\x66\x49\xb6\x1c\xcb\x22\xfc\x87\x5a\xf4\xf6\xda\x9c\x54\x6d\x22\xc2\x11\xd0\xf3\x4b\xf3\x06\x11\x61\xaf\x6e\xed\x50\xc0\xa8\x37\x6a\x04\x12\x03\x18\x01\xac\xb6\x74\xb6\x96\x3a\x86\x25\xd5\xd2\xf7\x93\xb0\x14\xe0\x9d\xc5\x84\x80\xd4\xd2\xbe\xc6\xf9\x2e\xc8\xbc\xa3\xf1\x43\x18\x92\x63\x9d\xad\xa6\xb4\x9c\xba\x43\xf9\x36\xae\xf1\x0c\xda\xbd\x71\x3b\xbb\x95\xdd\xb2\x29\xda\x64\x58\x17\xbe\x3a\xc9\x69\x5f\x15\x30\x66\x55\x8e\xf0\x05\x41\x24\x67\xad\x06\x62\x9d\x20\x15\xb5\xa4\x22\x1f\x3c\xc0\x36\x05\x7f\xaf\x6e\x24\x2d\x1b\x6f\xab\x48\x29\x47\x41\x60\x3f\x1c\xb0\xeb\x73\x44\x0a\x11\x05\xfd\x8b\x83\xe8\xc2\x23\x20\x9f\x41\x89\x70\xea\xf1\xdd\xaa\xe0\xd2\xf5\xf6\xff\xa5\x60\x71\x73\x45\xf2\xc9\xe1\x1d\x46\x91\xd4\x4a\x60\x45\x97\x74\x95\xb0\x2a\xdc\xe8\x67\x19\xbb\x77\x0b\x74\x6e\x68\x18\x6c\x81\x1a\xf0\x4f\x1f\x25\x6a\xc0\xbf\xde\xa6\x06\xbc\x6a\xa5\xee\x75\x06\xa9\xf7\x8a\x2b\xa3\x06\x7c\x80\xdd\xcf\xce\x5d\x36\x35\xe0\x56\xd6\xd2\xb6\xc2\xb9\x0d\xca\x75\xf9\x01\xbf\x77\x0b\xfc\x80\xed\xb0\x55\xdf\x12\xcd\x1f\xbf\x42\x16\x41\xb2\xbc\x5f\x7a\xa3\x97\x7c\x42\x27\xe0\x99\x32\x57\x49\x73\x94\xf8\x26\xfc\x93\x1b\xa2\x77\xee\x18\x7c\x6e\xbd\xb0\xc0\x7d\x61\xfc\x75\xbc\x48\x45\xb7\x58\xc9\x4a\x34\xbf\xd5\xa9\x86\x23\xad\x71\x11\x98\xc8\xc3\xbf\x39\xd1\x38\x0e\xf0\xb5\x00\xab\x9d\xfb\xa4\x34\xc9\x92\x48\x81\x21\x4f\x24\x47\x72\x10\x23\xa9\xa0\xb6\x06\x3f\x8b\x59\x83\x87\x34\x79\x45\x14\x48\x71\xe0\x47\xe7\xe3\x01\x07\x71\xa7\x4a\xed\xab\xee\x86\x05\xca\x7b\x3b\x4f\x99\x1a\x4d\xe6\x5b\xb2\x3f\xb4\xcc\x05\xb6\x11\xbd\xb1\xc0\x37\x8c\x89\x05\x81\x65\xac\x67\x18\x95\x2c\xcd\x6f\x63\x48\x1b\x61\x43\x8f\x2b\xc5\x1f\xc3\xe5\xec\xc5\x27\xd8\x3c\x78\x71\x32\xce\x5b\x18\x41\x48\x2c\x85\x4a\xc3\x03\x5e\x40\x2c\x5f\x76\xb2\x74\x9e\x00\x33\xc3\x53\x06\xa1\x42\x42\x19\x90\xe0\xd2\x2b\x4b\x92\x25\xd1\x3c\x3f\xee\xc9\x1b\x88\xa7\x55\x73\x67\xa2\xf7\x62\xcb\xfd\xd4\x23\xe4\x48\x66\xa1\xa4\x95\x83\x08\x91\x21\xe0\xe9\xa0\x2c\xcd\x04\x10\x24\x37\x6f\x73\x10\x29\x83\xc0\x03\xae\x96\x17\x26\xb7\x5e\x8a\x13\x4c\xc2\x07\xad\xe9\xe6\x71\x47\xe4\x71\x82\x84\x61\x66\xe2\x69\x08\x9c\xfe\x21\x91\x1d\x0e\xa9\x77\xe4\xbe\x73\xf7\x57\xa7\x4e\xba\xc4\x4c\x7f\xc3\xfb\x34\xe6\x6d\xd2\x6e\x56\xcf\x62\x35\x49\xad\x8c\xb7\x4a\xad\x83\x4a\xb6\x3f\xb5\x4e\x7d\x04\xd2\xb6\xae\x72\x95\xba\x4a\x9f\x8d\xc2\xa2\x7b\xee\xfa\xa9\xd0\xfc\x35\x47\x57\x36\x8d\x39\xb1\x76\x5c\x47\xbb\x46\x87\x41\x82\xc4\x06\xe1\x05\x72\x52\x0e\xec\x3e\x77\x6e\x1b\xdb\xaa\xc8\x95\xab\x22\x97\x9d\xc6\x42\x1d\xa6\xe6\x7c\x1e\x14\xf1\x55\xeb\xe5\xef\xf6\xb2\xdb\x7c\x80\xbd\x43\x70\x6f\xae\xdc\x65\xd2\x9e\x47\x91\x3b\x09\x67\x97\x2a\x4d\xae\x85\x3f\xbf\x37\xfa\xa7\x60\x9d\x1f\x79\x73\x45\x36\xcf\x5b\x5f\x0a\xd9\xa0\x3a\x45\x08\x20\x21\xd4\x81\xd7\x95\x39\x5c\xd2\x89\x14\xf8\x9a\xe0\xaa\xf3\x4c\x56\xf2\x76\x9c\x24\x44\xfb\x26\xc0\x75\xd1\xb0\x38\x2f\x74\x4b\x44\x31\xc2\x33\xcc\xf3\x22\x02\x4a\x91\xa4\x4d\xab\xab\x2b\x9b\xb1\x48\x38\x5e\x06\x6a\xa8\x68\xaf\x70\x36\xb9\x48\xd6\x44\xbf\x00\x1a\x05\x62\x3f\x80\x56\xbb\x0e\xa0\xfe\xf0\x56\x5e\x0c\x20\x5e\xd1\x13\xef\x6f\xdf\x0e\xfe\xdd\x76\x93\x3f\x86\x56\xcb\xbd\x8e\x6a\xf1\x94\xab\xd4\x2c\xd8\x3b\x74\x44\xee\x9b\x03\x76\xcf\x46\x01\x39\x5b\x97\x17\x10\xa9\xfb\x0c\xe0\xa9\x44\x3f\x95\x8b\x28\x15\x4b\x59\x8f\x30\x03\x84\x57\x47\x36\x15\xa9\xa6\x1c\xc7\x18\x85\x86\x5a\xb9\x20\xeb\x2c\x23\x15\xb8\x31\xd9\x4f\xdb\xb4\x46\x3f\x1e\xb0\x93\x97\xdb\xe6\x21\xed\xc5\x2c\x47\xe7\x6c\x92\x23\x25\x94\x10\x9a\xa5\x01\xc1\xa8\x96\xfb\x81\x20\xae\xfb\x58\x77\x26\x2e\x4c\x6a\x4d\xe4\x57\xbc\x14\xac\x6e\x2e\xd8\xe7\xc3\xa7\x5b\xc1\xbe\x51\xee\x91\x75\x46\x7c\xa8\xf5\xf5\xa9\x11\x8f\x1d\xda\xde\x7b\xe2\x40\x4c\xcd\xcf\x9c\xc9\x5a\x10\x56\x1d\x3e\x3c\x12\x7d\xbb\xf3\xf7\x86\xd3\x36\x98\xe3\x3b\x49\xf8\xd4\xfc\x0c\x6f\xe5\xf1\x2a\xe2\x36\x20\x61\xbc\x6c\x61\xe0\x5c\x9a\xb5\xe4\xc5\x60\x17\xfd\xea\xdf\x37\xd6\xd8\xbb\x6a\x4c\xff\x14\xfe\x54\x2d\x7a\x59\x60\x4a\x71\xfd\x5e\x6e\x4b\x9c\x1a\xa7\xe9\x5d\xc8\x24\x0c\xf0\x63\x5d\x21\x92\xf8\x25\x89\xd3\x28\x1b\x7f\x20\x00\xc7\x69\x9a\x49\xe6\x08\x3a\xe3\x9b\x59\x87\xd6\xd9\x65\xa4\xef\x5c\x67\x68\xb1\x79\x97\x82\x7d\xec\x1b\xbd\x05\xd0\x15\x65\x73\xa5\xde\x91\xf9\xb2\xac\x9f\x97\xfd\x70\x57\xb8\x43\x9d\x6d\x4c\xbd\xf9\x0d\x43\xde\xd4\xec\xb9\xe1\xee\x70\x27\x7c\xc5\xd8\x5b\xf7\xb0\x27\x6d\xd0\x00\xd7\x8f\x09\xce\xc6\x7f\xdd\x1d\x1d\xa9\x3e\x1c\x96\x9f\xca\xf3\xcc\xca\x75\x22\xc1\x3f\xb0\xed\x51\xbc\xea\xf3\xaf\xd4\x0e\xc5\xf3\xd1\x13\x0d\x73\xbc\x59\xa1\x59\x9b\xfb\x78\x80\xad\xdf\x94\x6c\xb4\x14\xb6\x4f\xdd\xab\x38\x75\x3f\xea\x5a\xf4\xbf\x7c\x95\xbe\xc2\xff\x63\xb8\xab\xf0\xd1\xb2\xe4\x2f\x05\xf1\xe6\x87\xd2\x89\x70\xba\x3e\x78\xac\xf0\xcb\xb8\x42\x01\xc7\x20\xfb\xcb\x3d\xec\xd0\x30\x3e\x6e\x7c\xf1\xec\x7c\xf5\x9e\x88\xc2\xb4\x1f\xde\x13\x1d\xf2\x12\x16\x98\x0f\x86\xc4\xc7\xeb\xd5\xe9\x87\xc1\xfe\xf5\x6e\xf6\x5b\x23\xec\x46\x7c\xe7\x8c\x36\x1f\xc2\xf7\x8f\x44\x0f\x8f\xdc\xe7\x3f\xac\xe0\x45\x21\x04\x45\x5b\xb7\x54\x07\xe5\xaa\x30\xcd\x68\x70\x75\xc8\x78\x81\x29\xb1\x4b\x6e\x6f\x82\x0c\x66\xb3\xd6\xbe\xc2\x79\xcb\xc5\x3e\x36\x38\x5e\xaa\x12\xa3\x62\x49\xe0\x6f\x82\x5a\x81\xd3\x4b\xb5\x85\x60\x3d\x4b\x92\x77\xe2\x3c\x87\x0b\xd7\x6a\x73\xf0\xfa\x2f\x5e\x5e\x01\xfe\xb6\xb4\x94\xcb\xb9\x89\x58\x2b\xb9\xed\xae\x2a\x47\xa4\x14\x9b\x5d\x66\x80\x4b\xcb\xe3\x96\xa4\x98\x1c\xcc\x49\xb0\x24\x57\xc4\x6a\x9c\xf5\x72\xfc\xba\xcc\x78\x44\x3f\x45\x3c\x6e\x1b\x66\x32\xb5\x4f\x35\xd0\x53\x1b\x67\x43\xc6\xe9\x8c\xfd\x11\x13\xd1\x63\xa4\x5c\x37\x97\x75\x38\xba\x07\xfb\x52\x21\x3f\xf4\xee\xfa\x3f\x10\xb0\x9d\xed\x62\xa1\xdf\x95\xe1\xbb\x83\xe8\x2d\xc1\x89\x38\x91\x9a\xf0\xb7\xdf\x85\x3e\x75\x90\x8c\xca\x66\x5e\x69\x57\xde\xf1\xf0\xd8\xaa\xe3\x2b\x99\x12\xbb\x5d\x20\x75\x4b\x97\x4d\x00\xd8\xf1\x0b\x0d\x1e\xc9\x0b\xe5\x91\x68\x9c\x47\x17\xda\x85\xfa\xbf\xb4\x6c\x2b\x0b\x74\xa6\xd3\x4d\xe2\x66\x5c\x26\x24\xee\x72\x03\xf6\xc6\x0f\xd4\x38\xf5\x52\xb3\x1c\xbc\x2e\x5c\x70\x52\x3b\x24\xd1\x77\x0c\xc4\x8c\xf0\xfd\xea\xe7\x89\xb5\x3c\x2e\xe5\x18\x86\xa1\xad\x9b\xfa\xc1\xfc\xa8\x19\xa5\xe3\x94\x66\x1b\x52\xa0\x17\x7e\xb8\xc6\x47\x03\xb6\xa7\x80\x14\x68\x73\xb2\x1d\xbe\x7f\x2b\x8c\x95\x68\x2c\x98\x78\xb1\xe8\x25\x94\xb6\x6e\x4e\xb6\xab\xbb\xc6\x24\xad\x53\x16\xb8\x5a\x8f\xd9\x92\xce\xe2\x53\x02\x3b\xb6\x9e\xdf\xc9\xd9\x19\x35\xb7\x10\x39\x24\x92\x42\x6f\x25\x1b\xd0\xdf\x1a\x37\x6b\x91\x60\xd4\x7a\x49\x90\xc3\x5b\xb6\x1a\xec\xfb\x02\xc6\xec\xe6\x0e\xbf\x33\xea\x3a\xcb\x9c\xce\xd3\x95\x5e\x47\xa4\x75\x35\x9c\x60\xfa\xbb\xf8\x39\xdb\x1c\x2c\xa4\xc1\x69\xe0\x70\x35\xc3\x0a\x07\x8c\x03\xd1\xd3\xd1\x2a\x15\x76\xb1\x7b\x93\xfa\xc7\xd7\xb3\xb1\x2d\x65\x55\x9d\xeb\x25\x32\x7c\xc7\xf5\xd1\x17\x03\xe7\xc1\x00\x6e\x14\xd2\x6d\xeb\x14\xfb\x25\x25\xc8\x29\x10\xc9\xae\xbd\x1e\x20\x67\x60\xe9\x92\x16\x04\x2c\xda\x14\xe5\x88\xd4\xed\x98\x73\x43\x1d\xa1\xcd\xac\x03\xf8\x77\x7d\xd6\xaa\xee\x61\x9c\xb4\xb1\xbd\x08\xd9\x08\x25\x02\xbd\x1b\x85\x4a\xe5\x59\x8f\x78\x59\x55\x25\xba\xf0\x41\x8a\x75\x27\x1f\xbf\xd3\x35\x88\xd9\xf4\x25\xf3\x3b\xf7\xb2\x7b\xd8\xa8\x3a\xe2\xc2\xbb\xd7\x27\xac\x1b\x36\x86\x27\x17\x16\x66\xab\x65\xb3\xcf\x30\x36\xaa\x1a\x1d\x7e\x8a\x45\x1f\x63\x27\xb3\xc2\x44\x5d\xe3\xed\xcb\x73\x7b\x22\xc1\xc1\x6a\x65\x1d\x11\xa7\x66\x19\x08\x9d\xb4\x1b\xfa\x3c\xce\x85\x97\xab\x7c\xee\xc4\x14\x3f\x7c\xfb\x6d\x47\xcd\xad\x88\x5a\xd4\x4a\x50\x23\xfd\xe7\x6a\x8c\x10\x1b\xab\xe3\x44\xaa\x94\x88\x77\x45\x6e\x60\x62\xe7\xe6\x66\xdc\x52\xe3\xd4\x94\x7a\x8c\x1f\x6c\xf0\x99\xd9\xc2\x26\x3d\x42\xc3\xb1\xc1\xa7\x74\x7c\x91\xd2\x88\xab\x9d\x05\x43\x05\x96\xa5\xe8\x76\x93\x3e\x2f\xd5\xd9\x0b\x55\xcd\xcc\x6a\x33\x07\x8c\x38\x8d\x16\x13\xe0\x88\xd3\xe9\x79\xd9\x21\xbc\x6c\x5a\x3c\xb6\xc8\x5b\x12\x19\x77\x73\x1d\x73\x9d\x4b\xb5\xac\x60\x01\x91\x1b\x4d\x49\xc9\xc1\x16\xb2\x3d\xdc\x69\x24\x86\x16\x61\x97\x6d\x83\x11\x60\x6f\x64\xe4\xb1\xdb\x0e\xc0\xea\x52\x73\xae\xb4\x72\x55\xc4\xb1\x23\x47\x0e\x9b\x87\x45\x83\xdd\x9d\x59\x74\x9e\xe8\xd3\xd5\xce\x00\x77\xf3\xd0\x85\x0c\xeb\x4e\x13\xa7\x17\xa5\x95\xe9\x0e\x2b\xfc\xc0\x92\xd4\x51\xe9\x2b\xb4\x60\x1c\x71\x3d\xee\x7e\x81\xeb\x1f\x0e\x69\x9d\x5e\x9e\x72\xc8\x53\xbc\xb5\xdd\x8c\x03\x75\x30\x5c\x8e\x04\xf5\x8a\xba\xb9\x6c\xc6\x85\x8c\x08\x28\x0c\xda\xb7\xbb\x26\x75\xee\x7f\xc0\xfc\x11\x6b\x3d\x26\x41\x2b\x07\xd7\x2b\x11\xcc\x46\xed\x2c\x6b\x2c\x89\xbc\xd1\xcc\x3a\x11\xb0\xb6\x47\x6b\x71\xd2\x6a\x8a\xbc\x15\x8d\xaf\x57\x51\x37\x97\xed\xf8\x82\xa1\xb0\xd1\x19\x7c\xf4\x87\xc4\x1d\x40\x15\xdc\xdc\x50\x55\x40\xf1\xb8\x7a\xcc\x6b\xcd\x15\x91\x8b\xa6\x5a\x42\xfb\x6e\xde\x87\x8e\x1a\xd1\xed\x4a\x91\xab\xfd\x13\x97\x85\x4c\xda\x3a\xde\x17\x65\xcd\xf4\x99\x79\x2a\x1b\x4d\x33\x35\x6d\x04\x25\x33\x8d\x80\xdf\x1b\xc0\xe2\x49\xa1\x99\x48\x2d\x5d\x6d\x9d\xad\x02\xdb\x09\x23\xfd\xe4\x27\xf3\xe8\xe6\x08\x4e\x4d\x5a\x1f\xfa\xe4\x18\xb6\x46\xe0\x13\xc3\x4b\xe2\xef\xf0\x35\xd1\xc7\x3d\xda\xe6\x5a\xa4\xd0\xf4\x8d\x7b\x3e\x1f\xdd\x09\xb4\x6a\x7b\x96\x36\x03\xd6\x3a\x4c\xd4\x8a\x14\x2d\xdc\x66\x86\x1e\x5d\x15\xd9\xe0\x87\xbc\xe2\x6d\x0f\x9d\x18\xd5\x2b\xaa\xa6\xcc\x6c\x4d\xb0\x42\x7b\xed\x76\x7c\x41\xe9\x15\x9d\xcc\x44\x0f\xe2\x8c\xc0\x58\x8e\x69\x71\x61\x86\x58\xd5\xe0\x9d\x6f\x7f\x14\x78\x29\xff\x2a\x89\x88\x26\x51\x34\x4c\xcd\xcf\xa0\xdb\x23\xfc\x99\x20\x9a\xab\x3e\x1c\x9a\x34\x2a\x4e\x93\x38\x95\x8e\x43\x87\x58\x06\xac\x27\x0d\xd5\x2a\x50\x97\x29\xc0\xdb\x3b\x4c\xa6\xd9\xdd\x14\x3f\x7d\x2c\xaa\xbb\x67\x3f\x42\x1a\x80\x45\xdf\x3d\xf7\xad\xb3\xca\xed\xde\x4f\x5e\xe7\x81\x15\xfc\xbc\xbd\x9e\x65\xf2\x3f\xf6\x46\x5f\x0a\x26\x5b\xa2\xeb\x66\xea\x8d\x53\xa5\x57\x6b\x4d\xe2\xca\x92\xf1\x02\xce\x56\xdf\x8e\x82\xa6\xa1\x93\x86\x15\x94\x04\x50\xf3\xa2\x40\x60\xb2\xf1\x6a\x39\xe5\x98\xbd\x46\xba\x0e\x18\xe6\xd0\xc0\x6b\x96\x26\xf1\x7f\xee\x61\x5f\x18\xf1\xd3\x24\xfe\x81\x61\xb2\xff\xb5\x91\xed\x24\x87\x5f\x65\x49\x0e\xff\xaf\x51\xed\xab\xba\x38\x1a\xfd\xd1\xc8\x4c\xdb\x3f\x27\x21\xcb\x84\x9f\x38\x7a\xc8\xba\xf4\x63\xfd\x9b\xb2\xa5\x97\xa5\x5d\xf8\xd9\x73\x70\x34\x60\x27\x39\x76\x37\x24\x89\x83\x4e\xad\x01\x6d\x75\xea\xec\xf0\xf3\xb2\xaf\x03\x39\xc1\x1d\x40\x8f\x57\xcd\xf1\x5e\x39\xd0\x13\x25\x0f\x5a\xb8\x79\x36\xa8\xd9\x9e\xe7\xa0\x74\xe3\xd0\xf5\xd2\x81\xaf\x35\x5d\x12\x6e\x5a\xca\x1c\xa3\xda\xe4\x39\x02\xcc\x09\x4c\x46\x2f\xa4\xdb\xd6\x9a\x1a\x8c\xc2\xb8\xdb\xdf\x42\x96\xbd\x2e\xe5\x8f\x01\x2a\x22\x22\x2f\x42\x6a\x9c\x8e\xc8\xcf\x2b\xc5\x83\x76\x59\x83\xcf\x82\x59\xa0\xaf\x48\x4c\x1a\x1d\xed\xf8\x04\x2a\x1d\x0b\x65\xe2\xfb\x1a\x8d\x7d\xb8\x0c\xb3\x1c\x93\xec\xe0\x9a\x52\xcf\x3d\x97\x77\x83\x8d\xb3\x9b\x37\x35\x0e\xef\x95\xfd\x85\x4c\x35\x81\xcd\x3a\xb9\xb7\xa7\xa3\xa3\x1b\xe4\xde\xce\x72\x00\xde\xc0\x30\x6e\x94\x87\xfb\xd5\x01\x63\x68\x52\x82\x79\xf7\xa2\x20\x5a\x73\xd9\x31\x0a\x2d\x78\x49\x01\xf5\x9d\x2c\x68\x84\x5e\x31\x63\xa2\xa6\x49\xc4\x4a\xdc\x73\xe2\x0b\x3b\xd9\xbc\x1d\x08\x6b\xa1\xd4\x45\x37\x26\x42\x19\xe3\xf7\xf3\x5e\x00\xe8\x5b\xaf\x28\xb3\xce\x1c\x79\xab\xe6\x7b\x4b\xc6\x39\x1c\xfe\xe0\xce\xe8\x81\xf5\x7f\xae\xa4\xed\x83\x8b\x2a\x20\xd6\x03\x86\xc2\xc2\x7d\x53\xc9\x26\xbf\xa0\x4a\xbe\xf6\x9f\xda\xc1\x3e\x1f\xb0\x1d\xf0\x69\xf8\xe9\x80\xc9\xf5\x66\xf9\x5a\x75\x6e\x5e\xd5\x14\xad\x62\x5b\x7d\x94\x50\x13\xbe\xb1\xde\x65\xf2\xf7\x43\x5d\x5c\xf0\xc5\x09\xf8\x68\xd1\xed\xa1\x4e\xd4\x59\xf6\xf2\x14\x60\x81\x8b\x0e\x5d\xe0\xc4\xea\xc1\x45\x3e\x8f\xb4\x8d\x78\x95\xc0\xde\x3b\x62\xee\x0b\x1f\x1e\x61\xed\x47\xbc\xb3\x78\x9b\xf8\xa1\x1a\xcd\xd2\xe5\xf6\x17\xbe\xf2\x3a\x4c\xd4\xff\x94\xbe\x0b\x54\x52\xc7\x61\x3e\xb4\x58\xc4\xb0\xf5\xad\xd3\x3d\x5e\x4e\x21\x4b\x0e\xa1\xe5\xe8\xb3\x45\x53\x5b\x29\xd2\x87\x84\x61\x1d\xa1\x4b\x98\x43\x9b\x57\x34\xb4\xc1\x43\x6a\x13\x69\xbf\x04\xbf\x40\x46\xd2\x40\xa4\x5b\x68\x01\xfb\xfd\x11\x36\x45\x53\xa4\x76\x6d\x5d\x2c\x2f\xe7\x72\x59\x94\x99\x3f\x35\xd5\x94\xea\x93\xb3\x33\xf3\xe8\xf9\xc0\xd9\x08\x5f\x39\x12\x4d\x56\x1f\x5a\xe2\xa3\x96\xcc\x63\xa4\x18\xac\x5e\x78\x8a\x14\x5c\x55\xb8\x1a\xbc\x4d\xf4\x2b\x35\xf6\xbe\x1a\x70\xaa\xb4\x70\x29\x85\x3f\x5d\x8b\x6e\x22\xa3\x58\xfb\x5d\x08\xf4\xa6\x0c\xb7\x6e\x4c\x95\x7b\x42\xf6\x19\xec\x3e\xb6\x30\x74\x45\x5e\x59\x77\xa7\x74\x73\x2e\x05\xfb\xd8\x13\x3c\x0f\xbf\x3a\xbc\xea\x1d\xd1\xad\x2b\xb9\x1b\xee\x09\x77\xd5\xc1\x15\xca\x2e\x05\x9c\x7d\xed\xe0\x9b\xea\xb7\x70\x67\x38\xda\x11\x5d\xb6\xa5\x2b\x4c\x2a\x6c\xcb\x57\x98\x6f\xbb\x8e\xdd\xbe\xe9\x6d\xfd\xa9\xac\x29\x92\x61\x90\xa4\x2f\xee\x8d\x3e\x17\xac\xf7\xeb\x10\x4c\x12\xe8\x5b\x86\x97\x05\xae\x30\x86\x03\x7e\x50\xef\xc6\xf8\x6b\xeb\xe7\xe3\x27\xc5\x2a\xc6\x68\xdb\x13\x06\x69\xaa\x5c\xb6\xce\xf3\x10\x98\xc5\x3b\xbd\xe6\x0a\x97\xa2\x88\x31\x6f\xf1\x72\x2e\xd2\x72\xf0\x3b\xb4\x9f\xc8\xd6\x41\x9a\xbf\xc2\x53\x8e\xa1\x13\x98\x2f\x79\x10\x83\xf4\xf7\xdb\x18\xa4\x6d\x0c\xd2\x57\x0b\x06\xe9\x9f\x34\x06\xe9\x4b\x01\x3b\x71\x0d\xf0\x3c\x5d\xd9\x8c\xde\x14\x5c\x05\x00\xa9\x8a\x43\xd4\xf8\x23\xd7\xa3\x62\x7f\xee\x43\xb6\x19\xba\x41\xd3\x85\x92\x93\x09\x6f\x36\x00\x50\xa2\x73\x4e\x91\x81\x2b\x5b\x8d\xff\x70\x50\xa6\xb5\xcd\x6f\x8d\x17\xc2\xb9\xad\x41\x99\xd6\x13\xde\x43\xb1\x4c\x6f\xdb\xe1\xe5\x70\x71\x73\x23\x9b\x23\x2f\x7c\xf1\x8e\xc8\x7b\xe0\x10\x38\x68\x38\xaa\x3d\x8f\x21\x7d\xda\xc5\x60\x54\xad\xef\x8b\x01\xcd\x82\x27\x62\xdf\x33\xca\x5e\x1e\xb0\x30\x11\x45\xb9\x90\x8b\xb4\x80\x32\x17\xe2\x8e\x0c\x57\xd9\xad\x57\xb0\xe0\xd5\xa7\xd1\x1d\xa7\x44\x51\x42\x8a\x50\xdc\xf9\xa6\xad\x25\x56\x81\x12\x02\x92\x3b\xe2\x14\x82\xee\x44\x49\x2d\x9f\xc1\x76\x11\x2d\x6d\x78\x3a\x7a\xea\xc9\x5e\x47\xa4\xdc\xdc\x6c\xd1\x2f\x7a\x52\xf1\x8a\x02\x88\x42\x68\xc9\x03\x61\x6a\x69\x7a\xe2\xf9\xf6\xee\x61\x3b\x73\x29\x8a\x2c\x0d\x9f\x1a\x1d\xde\xbf\x94\xc7\xb2\x3d\xc6\xf1\x89\x09\x2b\x35\x6d\xdd\x57\x6c\x58\xd6\x29\xb3\xa6\xef\x8e\x6e\xa1\x85\x48\x6a\x9d\x29\x62\x5c\xb3\x59\x2f\x00\x05\xdd\x09\x91\x14\x72\x9c\x9f\x43\xbe\x59\xaf\xb4\xa7\x30\x98\xa2\xf0\xd6\xe8\xe6\x05\x8a\x81\x7f\x0e\xa6\x6d\xd4\x25\x0d\x26\x89\xf3\x0a\xe8\xb2\xeb\x55\x6b\x67\xf3\x6c\x49\xc2\xec\x3d\x78\xe5\xb3\x77\xd3\x7a\xb3\xb7\x26\xe8\xbc\x56\xbb\xfa\x9f\x8e\x0e\xf5\x24\x1e\xef\xae\xc8\x8e\xcc\x45\x32\xa5\x39\x8e\xc2\x8f\x1f\x8d\x5e\xb8\x63\x32\xe5\x83\x3f\x69\x70\x96\xfe\xd3\xcd\x88\x8e\x29\x2d\x4b\xd9\xe9\x66\x39\x06\xaa\xc0\x22\xb1\x30\x38\xcd\x23\x03\x24\x8d\xd0\x4d\x64\x49\x6d\x96\xf1\x6a\x5c\xc6\xe0\x04\x6c\xae\xe0\xb5\xd4\x52\x6f\x79\x19\x7c\x4e\xa6\x0d\x2e\x07\x13\xf8\xdd\xd3\xcc\x61\xb1\xc9\x5d\x06\x34\x9b\xeb\xce\x78\x97\xfa\x9e\x67\x24\x97\xe0\x62\xd0\xb4\xce\xf0\xbb\xbc\x10\x83\x0f\x00\x9e\x20\xe9\x8d\x25\x50\xe4\x48\x71\x87\x1f\xa1\x43\x25\xe5\x72\xb0\x69\x98\x21\xac\xa0\xef\xcb\x8c\xcb\x0b\x4d\xa9\xe4\x1c\x9c\x9f\xd4\x58\x25\xbe\x10\xa7\x38\xae\x7d\x04\x7a\x0c\xe5\x6a\xdc\x84\x1a\x86\x76\xdb\xa1\xc2\xc6\xc1\x5e\xea\xf3\x56\x8c\x79\xc6\xf0\x1c\x76\x28\xf5\xf0\xb4\xe0\x0b\xaa\x6f\xfa\xa4\xc0\xaf\x56\x63\xe1\xf8\x26\x4c\x2f\x9c\x8a\x1c\xa3\xa9\x3a\x82\x74\x8d\x62\xdd\x1b\x50\x0f\xcf\x20\xf0\x4c\x15\x3f\x2c\x2d\x34\xe5\x45\x25\x53\x51\x8b\xfc\xc1\xe5\x55\x98\x57\xdb\x89\x58\x1e\xea\x55\xff\xd9\x83\xec\xd3\x01\x1b\x91\xe9\x6a\xf8\x7b\x41\x34\xa3\x83\x5e\x65\xba\x1a\xe7\x59\x0a\xde\xe2\x55\x91\xc7\xaa\xa2\x42\xfb\x76\xe3\x0a\x89\xd7\x30\xf5\xc5\x35\x7f\x9e\xc4\xc6\xd8\xbe\x4d\x7d\x4c\xc7\xd3\xd5\xfb\xc4\x23\x84\xa3\x0c\xe8\xe6\xe0\xc7\x82\xe8\x07\x03\xd7\xbb\x34\x6c\xcd\x39\x19\x71\xd5\xca\x9b\x3e\x33\xff\xe0\xa9\xc9\xbb\x8f\x9f\xa2\x99\x00\xc7\xa4\x5e\x01\x84\x19\x10\x9d\x4c\x99\x0d\x1e\xbb\xe4\x38\x57\xbd\x75\xd7\x9b\x9a\xf9\x21\x15\x16\x0d\x3f\xb0\x67\x07\xdc\xc6\x86\xdd\x68\x62\x76\xd8\xb5\x2c\x26\xfd\x1d\x5e\x8c\x1d\xf3\x43\xec\xc0\x06\xf4\x56\x7a\xcc\xcd\x4a\x51\x35\xb1\x84\xdd\xa0\xce\x19\x65\x4f\x14\x20\x48\xc3\x67\xb2\xff\xbc\x79\x6a\x10\xf5\x66\x74\x00\xfe\xef\x32\x1a\xcb\xbe\x8b\xdd\x58\xc8\x66\x2f\x8f\xcb\xbe\x6a\x86\xbc\x50\x86\xc9\x16\x70\x2a\xf3\xfe\x37\xd1\x6d\x95\x07\xda\x03\xbb\x79\x03\x7e\x6f\x94\xed\x29\xca\x56\x9c\x9e\x4d\x9b\x32\xfc\xc8\x68\xf4\x4b\xa3\xf7\x3b\x0a\xd2\x10\x92\x52\xb2\x90\x92\xac\xa0\x04\xe4\xea\x6b\xf0\x7f\xa4\x32\xb1\x39\x35\x4d\x6c\x68\xd6\x95\x86\x2c\x94\xae\xc1\x44\x59\x8a\xe6\x0a\xf9\x7a\xf0\xfb\xb8\x00\x8a\x56\xa7\xc4\xa2\xcc\xa5\xe8\xe8\xac\xf3\x70\x9b\xab\x8a\xe2\xa2\x99\x67\x45\xc1\x3b\xbd\xa4\x8c\xbb\xa6\x30\x5e\x48\xb4\x25\xd1\x15\xae\x7b\xe4\xa4\x09\x45\x06\x58\x53\x1b\x35\x0b\xd2\x2a\x9a\x45\xaf\x64\xf1\x38\x5c\x5d\x02\x8b\x88\x66\xc0\xd5\xd7\x87\x64\xbc\x60\x8d\x24\x0a\x54\x79\x46\x9e\xa5\xd4\xd2\x82\x9a\x9a\xb6\xcc\x15\x0c\x60\x1e\x6d\x81\x54\x54\x2b\x2e\x9a\x59\x9a\xca\x26\x50\xf5\x69\x4a\x7c\x1c\x68\xdd\x52\x18\x6a\x9d\x49\x1d\x4b\xa7\x47\x4e\x71\xee\x61\xea\x1f\x2a\x68\x8c\x25\x02\x48\x3c\xdb\xa8\x7d\xb8\xc7\x2d\xf1\xeb\x6a\xc0\x9a\x5a\xfc\x64\xbc\x61\x0b\xf0\x8c\x93\x78\xa5\x09\xa9\x04\x94\x10\x3e\x7e\xf6\x84\xb9\x0b\x32\x05\x7b\x2e\xed\x2f\xd5\xd8\xd7\xe0\x2d\xa0\xd9\x60\xe0\xdb\xfe\xdd\x5a\xf4\x1b\x35\x35\x47\xfa\x2e\xc0\xbd\xaf\xb4\xcd\x82\x16\x10\x53\xac\x26\x5f\x8c\x8b\xa1\xb2\x0a\x6b\x21\x4a\xca\x61\x2f\x18\xba\xe0\x5e\xaa\xe5\xb6\x03\xe2\xdb\x3f\x33\x3b\x35\xce\x67\x67\xa6\xc7\xb9\x2c\x9b\x63\x96\xe7\xd2\x8a\x76\x8d\xd8\x02\x6a\x63\xba\xab\x1e\x56\x11\xdc\x56\x43\x1d\x6b\x2b\xa2\x84\x31\x73\x2a\x02\x76\xcc\x15\x91\xd3\x86\x2c\x35\x4d\xad\x89\x43\x5e\x67\xcb\x81\x9c\xd5\x77\x99\x38\x9d\x43\x78\x81\x3e\x3d\xca\x46\x45\xbe\x5c\x84\x9f\x1c\x8d\xde\x3b\x3a\x99\x2f\xf7\x3a\xd2\x61\x37\x96\x69\x99\xf7\xc1\x0e\xc7\x71\x6a\x65\xcd\xf3\xaa\xc5\x1d\xb1\x2c\xf7\x15\x7c\xea\xf4\xb4\xe1\x09\x8e\xdb\x26\xfb\x00\x5e\xe3\x64\xab\x71\x0b\x19\xe9\xf1\x10\xb4\x77\x5b\x05\xbf\x69\xff\x7d\x93\x73\x0f\x9e\x99\x3c\x7d\x7c\x0c\x49\x37\x81\x41\x08\xf8\xff\xb5\xe2\x60\x7a\xa4\x34\x03\x7b\xa6\xd2\x1d\x92\x3e\x59\x1d\x6a\x6a\x24\xfe\xd5\x17\x59\x0e\xf9\x34\x8e\x7c\x9c\x76\x7b\xa5\x76\x5d\x18\x84\x69\x6a\x88\xf3\x55\xf7\xdc\x66\x15\xfd\xb4\x14\x17\xf4\x25\xa0\x2c\x9a\xa2\x6b\xc1\x20\xad\xac\xa7\x2a\xbf\xe9\xa6\x71\x1e\xcb\x63\xfc\x26\xe7\xc3\x06\x3f\x4e\xef\x3a\xfd\x75\xb6\xc3\x92\xed\xed\x38\xcf\xe5\xb2\xc8\x5b\x09\xa5\xe7\x76\x8d\x4c\xd3\x41\x50\x5a\x0b\xb2\x30\x87\xba\x3b\xb6\x76\x83\x53\x8a\xe2\x7c\x31\x11\xa7\x4a\x79\xa9\x2b\xb9\x52\x17\xdd\x6e\x42\x61\x2a\x78\x74\xc8\x7a\x33\xeb\x74\x44\xda\xaa\x0b\x5a\x08\x75\x33\x0b\x13\x4f\x24\x1a\xc6\xba\x30\x6f\xc5\x69\x5d\xd4\x8b\x15\x99\x24\x1b\x90\x78\xfd\x8f\x51\xb6\x8b\x3e\x08\xff\x76\x34\xfa\x9d\xd1\xe3\x66\x4d\x71\xf8\x02\x69\xc4\x30\xb7\xb5\x85\xed\x0a\x0e\x05\x0f\x5d\x76\xc7\xcf\x2c\xcc\x3d\x30\x7b\x76\xe6\xcc\xc2\xf6\xea\xdb\x5e\x7d\x1b\xae\xbe\x98\xed\x80\x55\x13\x2e\x46\xf3\xd3\xce\x2a\x02\x09\x7b\xd9\x97\x9f\x56\x07\x9a\x80\x52\x3c\x8f\xdf\x4b\x6a\xec\x46\x78\x3a\xdb\x4b\x12\xa2\x7f\xfa\x72\x10\xfd\x65\x30\x03\x15\x76\x7b\x49\x42\x3e\xed\x06\x3f\x8b\xc6\xfd\x24\x84\xdd\x8e\xf3\x33\xc8\xeb\x30\xd3\x3e\x93\x95\xb3\xfa\xae\xdc\xc5\x4c\xe0\x8b\x6a\x95\x1f\x23\x1f\x72\x89\x47\xb3\x73\x7d\x9f\xe5\x5e\x01\x96\x13\xff\x2a\xa6\x6d\xfd\x9e\x3f\x51\x5b\x79\xf5\xc1\x91\xf8\xf0\x08\xfb\x5a\x83\xcd\xcb\xd2\xd3\xe8\x78\x99\x15\xe5\x4a\xf8\xb6\x91\xe8\x47\x1d\x04\xcc\x2c\xc0\x3d\x8c\x06\xa3\xc1\x19\x65\xe6\x3c\x71\xb7\xa2\x53\xaa\xf1\xe7\xe8\xdd\xb4\x96\xc7\x65\x29\x41\xf9\x01\x78\xbb\x8b\x57\x70\xcb\xb0\x50\xf7\x06\x3f\xad\xcb\xb0\xdf\x9a\x54\x34\x08\xaa\x02\x57\x0f\xa6\x46\x20\x8f\xd3\xb8\x71\x0e\x28\xb3\xb2\x28\x64\x0e\xcd\x69\x53\xd2\x6e\x9d\x17\x89\xdf\x4f\xed\x2a\xf3\x5e\xda\x74\x93\xa7\x00\xe7\x5d\xdc\xe6\xcb\x80\xec\xa1\xbb\xbe\x23\x07\x6e\x3f\xca\x97\xfa\xa5\x24\x8d\xa4\xcc\x4a\x91\x98\x3e\x52\x7a\x3c\xd2\x62\x2b\x5c\xfc\x26\x4b\x0f\x60\x55\xa1\xe1\x07\x0f\x9d\x5f\xf2\x57\xd0\x44\x4b\xae\x4e\x38\xc3\x57\x4f\xb2\xe5\xe1\xd6\xa7\x9d\xc7\xcf\x8e\xb0\xaf\x1f\x32\x8f\xb8\xb4\x3f\x32\x12\xbd\x6f\x64\x86\x1c\xa4\x98\x0f\xd0\x85\x64\x3a\x13\xe4\x90\x24\xea\x3c\x32\x0d\x7e\x22\x4e\xa4\x43\x1a\x5e\x05\x83\x0d\x5d\x3d\x90\x10\x9d\x8a\xa8\x68\x3d\xe4\x0c\xd4\x55\x66\x29\xa2\x97\x88\x62\x06\xd4\x60\x9a\xa0\x06\x3f\x21\x90\x66\x65\x21\x3b\x95\x2d\x17\x67\xd3\xe3\x00\x22\xf1\xda\x62\x73\x27\xf9\x49\x17\x92\x0c\xb2\x0b\x2a\x59\x4e\xd8\xc2\x61\x1d\x86\x35\x6c\xec\x01\x8d\x2a\xb2\xa5\xc8\x0b\xb1\x93\x1b\x11\x41\x2c\x38\xeb\x6e\xf9\x85\x3b\xa1\x87\x0e\x1c\xb9\x0d\x97\x87\xda\xe7\xb7\x1d\xe0\x89\xb2\x35\x09\xbc\x0a\x92\x5d\x09\x83\x8e\x80\xfc\x13\xde\xc4\xab\x81\xde\x6c\xa2\xff\xdf\x80\x5d\xb7\xea\x84\x44\x84\x7f\x1a\x44\x27\x67\xb3\x96\xc1\xc4\xe9\x90\x91\xcd\x77\xd4\xc6\xfe\x8c\x03\xac\xc1\xc6\x37\x35\x54\x9d\xe8\x8c\x4b\xc1\xf8\x26\x4e\x8d\xeb\x42\x06\x4d\x03\x8c\xcd\x65\x78\x36\x3e\x18\xb0\x3d\x16\x5c\xf2\xf3\xc1\x16\xc8\x15\x35\x98\x61\xce\xe1\x4a\x8f\x1e\x32\x20\x92\xad\x5b\xf0\xc3\x9d\x6a\x6a\xf1\x15\x5d\x62\x66\xd2\x45\x26\xca\xb4\xea\x6b\x4f\x9d\x8d\x31\x50\x36\x00\xfb\xef\x01\xdb\x01\xf6\x56\xf8\xe1\x20\x7a\x4f\x60\x6d\x70\xd7\x10\xb1\x1c\x0f\x58\x04\x17\x7c\xa9\xd7\x6e\x2b\x6b\x09\x10\x4e\x60\x30\xa6\xc3\xad\x08\x6b\x0b\x92\x62\x05\xf6\xd7\x80\xb5\x37\xf0\x39\x3a\xe8\xf0\xd0\x22\x34\x5d\xbc\x8e\xf9\xe7\x87\xdf\x3c\x3f\x60\x23\x65\xd9\x0f\x1f\x8a\x92\x2d\xf7\x66\x61\xe1\x01\x64\x22\x02\xa8\xf3\x38\x17\x49\x91\x59\x60\xe2\x3e\x68\xe2\x3e\x12\xe7\xca\x9e\xdf\xac\x0d\x1f\x08\x18\xa3\x04\xfa\xd3\x71\x1e\xbe\x33\x88\x7e\x22\x98\x72\x96\x3b\xfd\x46\x0e\xcf\x2c\xef\x37\x86\x04\xe7\x0c\x1d\xce\x7d\xe6\x72\xcb\x8b\x71\xd3\x18\x74\x03\x68\xd4\x29\x0f\x9c\xa4\x2e\xd6\x5a\xec\xc0\xe9\xb2\xf1\x86\xfe\x72\xc0\xae\xc7\xbd\x3b\x2d\x21\xbe\x25\xfc\xb3\x20\x7a\x9a\xf7\xa4\x1a\x4f\x0b\xc9\x84\x20\x68\xa3\x89\x1b\xde\x49\x42\xe6\xfb\x2c\xdd\x0d\x7d\x90\x4d\xb0\xfa\x16\x37\x34\x56\x7c\x29\xa8\x6f\xb2\xa3\xaf\x0f\xf7\x62\x33\x2e\x77\x4b\x7f\x72\x84\xed\x92\xe9\xea\x89\x3c\xeb\x84\x1f\x1d\x89\x5e\x3a\xa2\x7d\xb2\x7a\x37\xb9\xe7\xc7\x70\x3f\xed\xa0\x83\x76\x41\x63\x89\x75\x84\x8a\x35\x50\x08\xd6\x60\x82\xe8\xa6\x1e\x9c\x99\x3e\x7e\x66\x61\xe6\xc4\xcc\xf1\xb9\x06\x9f\x4c\x12\x4d\x72\xec\xe3\x29\x73\x49\xa1\x75\xa8\x42\xc8\x55\xd5\x04\xed\xfe\xf7\xfd\x02\xe0\x92\x81\xdb\x87\xfb\xf1\x32\xe0\x3c\xde\x0f\x28\x9d\x3c\x4e\xad\x3b\x8b\x3a\x38\x6e\xc1\x9d\x43\x83\x8f\xe0\x58\xa3\x66\x43\x6b\x4a\x71\x1e\xd0\x99\x4d\xd9\x52\xa6\x02\xa5\x8f\xf3\x62\x7c\x44\xca\x8f\xa7\xab\xc6\xf4\xe8\xa1\xd6\x8e\xc8\xd2\xe1\x65\x6c\x7c\x04\x6c\xcd\xbd\x7a\x1c\xa7\x11\x01\xe5\xac\xc3\xf6\x24\x71\x5b\x36\xfb\xcd\x44\x86\x8b\x5b\x40\x5d\x9e\xd2\x6f\x47\x87\xcc\x3f\xb7\xee\xde\x3c\xcf\xae\x4f\xe2\x55\xf9\xe8\x38\x73\x9f\xc3\xae\x83\x59\xee\x75\x1f\xf9\xba\xde\xe0\x67\x84\xae\x44\x28\xcc\xf5\xd2\x49\x84\x42\xcf\xd3\xde\xd2\x34\xe3\x9f\x1d\x89\x16\xd7\xfd\xb5\x82\xbc\xc4\xdf\x30\x8a\x14\x5c\x99\x69\xdf\x20\xb1\x35\xcc\x9d\x88\x76\xdd\x0f\x1a\x17\x83\xd1\xbc\x97\xf8\x57\x2c\x7f\x53\x63\xff\x2b\x60\x3b\x73\x40\xc9\x85\x7f\x1f\x44\xef\x08\xf0\xdf\xd0\x67\xf5\xb9\xee\x33\x3d\xce\xda\x7c\x39\x6e\x15\xde\x6d\x20\x46\x11\xcf\x60\x50\xee\x1a\x1c\x1e\x00\xfb\x2e\x33\x0a\x12\x35\x1e\xe6\xe5\x98\xfc\xb1\x45\x0f\x22\xc7\xcc\x0f\x50\xba\xdd\x46\x90\x30\x06\xa1\xc1\x70\x31\x91\xb6\x30\x98\x26\xd6\x4e\xba\xd3\xbd\xa2\x84\xf1\xf2\x56\xfe\x61\x76\x90\x4d\xac\x3b\xb7\x95\xc9\x98\x99\x9e\x53\x95\xb2\x84\xc1\xb0\x84\xad\xe8\x7e\x0c\x69\xa9\x8e\xb3\xea\x29\xec\xc3\x56\xdc\x34\xf4\x86\x30\x2c\x60\xc0\xdb\x69\xd3\xd0\x7c\x77\x6c\x10\xcb\x6e\xcf\x8d\x1f\xbe\x81\x3d\xfd\x4a\x11\x9f\xd8\x6e\xcd\x7f\x09\x9e\x6a\x64\xc1\x0c\x3f\x78\x7d\x74\xff\x90\xe7\x16\x5f\x88\x1e\x12\x2f\xab\x62\x47\x49\x15\xc1\x17\x4e\xcd\x73\xf2\x75\x63\x96\x21\x9a\x82\x35\x2c\xce\xc7\xed\x7e\xea\x3a\xf6\xa1\x80\xed\x6e\x8a\xbb\x7b\x69\x2b\x91\xe1\x7b\x82\x70\x54\xe9\xcc\xd1\x9b\x02\xfd\x0c\xaf\x8d\x67\x8f\x9f\xe6\x32\x6d\x66\xca\xcc\x9b\x9a\xe4\x4b\xf8\x13\x1e\xc2\xd5\xc4\x3b\x9a\xdf\xde\xad\x76\x5f\x61\x32\xfe\x2a\xeb\x0f\xc8\x5e\x50\x57\xf2\x90\xff\x3a\xa0\x3a\x57\x27\x44\x9e\x65\x65\xa1\xe3\xe1\xcc\x70\x22\xd7\x4a\x51\x39\xbe\x5f\x5f\x63\xbb\x08\x21\x19\xbe\xa2\xc6\xbe\xfd\xda\x40\x72\x35\x27\x11\x94\x6b\xc3\x95\x1f\x0e\x34\x18\x13\xc6\xc6\xba\xae\x9c\x24\xa0\xea\x57\xf4\x3e\xc7\x85\x19\x7b\x7e\x1c\x53\xd3\xea\x17\xb2\x9c\xf7\xf2\xc4\x9c\x87\x36\xb2\x9b\x31\x8a\x23\xa4\x2f\xc9\xe3\x6d\x12\x8e\xf9\x89\xeb\x28\xb2\x4b\xed\x56\xd2\xf5\x94\x62\xbc\x48\xb5\x2c\x36\xd8\xdf\xef\x66\x23\xbd\x3c\x09\xbf\xb0\x3b\xfa\xa3\xdd\xaa\xc6\xe5\x78\x95\xc4\x8f\xbe\xce\x36\xc1\x5a\x58\xe1\x38\x87\xab\x21\x62\x94\x38\x37\x77\x8a\x03\xa6\x71\xff\x22\xe0\xf3\xe4\xb1\x89\x89\x95\xac\x28\x8f\xa9\x63\x79\xa2\x2b\xca\x95\xc5\xb1\x06\x3f\x7e\x41\xc0\x6d\x36\xe1\x30\x16\x7b\x79\xb2\xa8\xba\x68\x1a\x32\xb4\xa3\x10\x39\xaa\x0a\x5b\xf4\x53\xcf\xb6\x11\xea\x28\xcc\x60\xe9\xfe\xfb\x7d\xbf\xc3\x58\xa0\xb6\x1a\x1d\x7c\x57\x94\x52\x90\xa7\x10\xe3\x8f\xb5\xb6\xa8\x5d\x8f\x70\x9d\x2e\x29\x6f\x3f\xc4\x11\xaa\x5e\x67\x1d\x67\xb9\x15\x18\x08\x38\xce\x17\x11\x3e\xab\x9f\x2f\x6a\x4f\x26\x95\xc5\xe3\xb4\xae\x73\xf3\xa9\x82\x04\x89\x8d\x35\x6d\xc9\x0b\x9e\x88\xbe\x44\x18\x60\x9c\x25\x30\xe8\x63\x0d\xdd\x75\x25\x5d\x40\x0b\xc7\xe8\xa4\x99\x59\x2e\x5a\x2d\x8c\xae\x65\xb3\x98\xd4\x3c\x35\xd7\x17\xe8\x74\x5d\x54\x53\x97\xe0\xe7\x6a\x94\x0f\x1e\xba\xb5\x71\xa0\x71\xa0\x71\x70\x11\xef\x92\xa9\x64\xb5\x72\xe2\xe2\x7c\xdf\x4d\x58\x0d\x0a\x08\x38\x53\x78\x13\xce\x05\x9d\x2d\xcb\xae\x55\x20\x3e\x4a\x12\x18\xb8\x82\x36\x3a\x64\x35\x4c\x9d\xad\xe8\x2a\xe1\xa9\xc4\xdd\x0f\x62\xa8\x29\x92\x84\xae\x44\xdc\xe5\x3f\xdf\x6b\xae\x68\x32\x27\x3c\x91\xd4\xb9\x82\x68\x93\x25\xcc\x4e\xa3\x56\x94\x12\xc3\xe3\x3c\x6e\xc8\xc6\x38\x2c\x06\x29\x0a\x78\xa5\xec\xe5\x29\xef\x75\x11\x4f\x9b\xca\x35\x93\xb8\x11\x57\x11\x2e\x4d\xb3\xc8\x22\xf0\xd2\x45\x98\xe4\x46\xad\x60\xfa\x61\x39\x26\xa9\x18\x69\x37\x5e\xd4\x60\x6c\x12\x23\x58\xe0\xc6\x12\x7d\x6d\x94\x8f\xa8\x6d\xe2\x6b\x34\x56\x26\xed\x6b\xb7\xb4\xc6\xd6\x42\x5e\xf3\x14\x53\xe0\x63\x90\xa9\x7a\x57\x2f\xcc\xae\x76\xc4\x00\x35\x7d\xca\x45\xbe\x14\x97\xb9\xc8\x4d\x31\x24\x36\xcc\xd6\x6b\xfb\x69\xd3\xf4\xb2\xb2\x59\x51\x55\x7b\x91\xb3\x80\x3e\x57\x55\x59\x2c\xf2\x92\x28\xe2\x26\x40\xe5\x30\x09\x49\xa4\x7e\x38\xa6\xaa\x5f\xcb\xf2\xd6\x53\xa3\x8a\x5e\xd7\xe0\x27\x72\xb1\x8c\xb7\x58\xfb\xa3\x27\x36\x1a\x8d\x68\x0c\xfa\xfe\xdc\x9e\xcc\xfb\x6e\x3a\x80\xfd\xd1\x53\xe8\x57\x5f\x81\x1a\xe7\x98\x73\xdb\x13\xc9\x97\xf6\xb0\x3b\x7d\xc0\xa1\x8e\xa8\x68\xac\x1e\x22\x56\x2b\xad\x69\x69\x8f\xc0\x69\x59\xe6\x71\x93\x82\x22\xdf\xbb\x27\xfa\xc4\xc8\x86\xaf\x54\x52\x8e\x94\x19\x05\xaf\x00\x69\x97\x45\x51\xc3\x17\x1c\x13\x8f\x97\x99\xc3\x86\x02\xd1\xf9\x16\x82\x11\xa7\x4e\x04\x78\xda\x42\x6f\x52\x31\xae\x41\x7f\x00\xe9\x14\xcd\x15\xc4\x17\x91\x14\xd2\x28\x40\xa8\x97\xa2\x31\x31\x74\x78\x6a\xf6\x1c\x24\xb1\x97\x9d\x2c\xef\x8f\x01\x2b\x8b\x1c\x60\x9e\x58\x95\xb9\x58\x86\x8d\xb3\x2c\x29\x6b\x39\x04\x97\x23\x80\xb4\x99\x75\xba\x22\xb7\x2e\x0c\x2c\xbf\xc1\x71\x23\x61\xbf\x70\x1f\x2d\xf5\x62\xf4\x18\x54\xfb\x07\xd9\xfb\x56\xa5\xa1\xb4\xa4\x29\x30\x0a\x26\xe8\x0e\x5d\x94\xfe\x59\xe1\xe6\x01\x84\x9c\x95\x79\x47\x24\x6a\x99\xd7\x01\xe8\x44\x15\xda\x4b\x9f\xa8\x9b\xb5\x8a\x88\xeb\xa0\x12\x0e\xac\x21\x4a\xfe\x47\xd8\xd4\x88\xb8\x51\x8c\x3f\x53\xa9\x4f\x26\xa3\xf2\x1e\xa3\x69\x7b\x4a\xc9\x07\x77\xb0\x9f\xa8\xb1\x10\x4b\x98\xc4\x11\x02\x2b\x2b\x7c\x79\xed\x32\x38\x92\x1a\x26\xd8\xe5\xe9\x3d\x01\x09\x6d\xa3\x8f\x07\x83\x85\x6a\xfd\x90\xe6\x0e\x4d\x41\x3a\x0c\x69\x7a\x6c\xa8\xa1\xbf\xa2\x1c\xe7\x72\x2e\x13\xb9\x2a\x52\xcc\x0d\x3a\x8e\x02\x38\x17\x6b\x54\xdc\x7e\x3a\x8b\x00\x26\x0a\x08\x33\x99\x37\x65\x5a\x7a\x65\xc3\xc2\x1b\x1b\xe7\x45\xdc\x89\x13\x91\xeb\x39\xa7\x21\xa6\x2a\x35\x46\xbc\xdf\x95\x0d\xb6\xc4\xec\x08\x86\xe7\xa2\x93\x9e\x21\xbc\xfe\x95\xbf\xc5\x7f\xd9\x84\x8e\xb4\x2a\x70\x10\xdc\x5d\xec\xc4\x41\xbb\x51\x92\xa9\x97\x53\x5a\xc7\xe8\xa4\x1c\x3a\x51\x85\x6f\xfe\x5d\xc0\xbe\xde\x1b\xf8\x73\x65\xac\xb3\xdf\x87\x9f\x08\x74\xa0\xef\x2f\x07\xeb\xbd\xf4\x88\xcc\x91\xcb\x1b\xbd\xf1\xa4\xe8\xa0\x82\x81\x3a\x1c\x78\x41\xe1\x47\xb8\x7e\x72\x37\xfb\x86\x75\xd0\xc5\xc0\x9e\xf7\x9e\xdd\x11\xa7\x7f\x0f\x23\xcd\x7b\x4e\xb6\xb4\x0e\x53\xde\x4f\xed\x62\xcf\xd2\xc1\xb3\x73\xd1\x13\xe2\x61\x44\x6f\xf7\xa8\xaf\x5d\x3b\x6a\x8c\xed\x63\xdf\xb6\xae\x1d\xe5\x36\x6e\x3b\x82\xe1\x2a\x22\x18\x3e\xe6\xf2\xb9\xfd\xca\x55\xf2\xb9\x7d\xd7\x63\x94\xfa\xc1\x50\xb3\x6f\x07\x03\x5d\x5d\x30\xd0\xa5\x60\x6e\xf3\xd8\x86\x89\xb0\x6e\x62\x1b\x60\x1f\x9a\x60\x06\x92\x0e\x55\xd6\xed\x37\x8c\xb2\x6f\xdd\x08\x5c\x09\x89\x13\xc2\x7f\x19\x89\x7e\x32\xf0\x9f\x51\xdc\x8a\xb0\xc9\x84\x4d\xe4\x82\xe3\xbb\x35\x07\x78\xd6\x06\xe0\x73\x47\x76\x96\xf4\x96\xf5\x0c\x37\x74\xe0\xa7\x5a\x26\x76\x2a\x57\xeb\xa5\xc3\xfc\x46\xe9\xeb\xfc\xe6\xdc\x2f\xe2\x72\x80\x07\xe2\x83\x35\x96\x31\xa6\x2f\x08\x65\x2b\x14\xec\xf6\xad\xe3\x4a\xa1\xdc\x05\xf3\x71\x74\xd3\xb4\x17\xac\x20\xb8\x2d\xd8\x76\x99\xb5\xd9\xae\x35\x6c\x4b\xf8\xac\x0d\x08\x59\x87\xd7\x46\xbd\x88\xbe\xb5\x5a\x15\x15\xe9\xd7\x43\x16\xec\x15\xd4\x33\x87\x5f\x0e\xd6\xa3\x8d\x62\x5b\xcf\xe7\x6b\x1e\x8f\x6b\x3b\xc9\xd6\x28\xc1\x84\x71\x68\xa0\x4f\x12\xe3\x64\xc2\xf7\xd6\xa2\xa7\xb8\x0f\x68\x9d\x60\xa0\x47\x25\x82\xb5\xad\x03\x1d\x61\xf1\xea\x5c\x1a\x43\xb1\xde\x9f\x09\xd8\xa7\x35\x12\xfa\x93\x41\xf4\xdf\x02\x57\x7b\x00\x43\x05\x23\x26\xc9\xb7\x06\x3c\x34\x80\xc8\x88\x6e\x8e\xd0\x8c\x2c\x9b\x2b\x70\x6c\xdb\x97\xe1\x08\xf1\xf2\xd3\xae\xf4\x96\x1a\xcd\xac\xe3\x00\x31\x26\x8c\x75\x3a\xb1\x94\x64\x4b\x13\x1d\xa1\x6c\xa7\x89\xee\xf9\xe5\x09\x65\x10\xa9\x3d\x4b\x10\x1a\x55\x2e\xfc\xa7\xb1\x9c\xe1\xc5\x9e\x32\xfc\xd7\x64\x92\xd4\xd1\x54\xc0\x06\x12\xe5\x89\xf6\x56\x7a\x8a\xcd\x6b\x76\xb2\xfd\x6e\x22\x5f\x24\x52\x02\x0b\xc7\xe7\x67\x5b\x38\x35\x1f\x7e\x6e\x47\x74\xda\xfe\x59\x49\x05\x0b\x81\x2f\x00\x3c\x04\x2f\x01\xd7\x68\xe5\x81\x4b\x09\xcb\x86\xe5\x6f\x9d\xef\xdd\xc1\x2e\xd6\xd8\x0e\x30\xd7\xc3\x57\xd6\xa2\x2f\x07\x27\x33\xcd\x64\x65\x59\x8f\xd1\x9a\xa7\xd8\x50\x63\xc4\x80\xcb\xd0\x75\xcb\x39\x96\x4a\x4c\x6e\x01\x3c\x84\x7a\x9a\x34\xc8\x28\x81\x13\x1a\x4e\x86\x45\x95\x49\x81\x94\x0c\xfe\x95\x7a\xe9\xd2\x00\x81\x2f\x46\x53\x0b\x6a\x15\x2a\xc9\x44\x6b\x49\x24\x22\x6d\xca\xdc\xc9\x88\xc2\xdb\xbd\x44\xf3\xf9\x43\x3b\xa8\xf3\xe3\xca\x30\x4f\x64\xbb\xac\x10\x22\xae\x0b\x6b\xfa\x7c\xcd\x63\x7e\xf8\x83\x5a\xf4\xf1\xda\xbc\xf9\x7b\x98\x5a\x4b\x64\x10\xda\xab\x69\x84\x07\x0c\x97\xe6\xec\xca\x52\xe4\x29\x3b\x72\xe4\x70\x83\x9f\xd0\x69\x0c\xa1\x61\xda\x8f\x00\x5e\x2c\x48\xfe\xa8\x3e\xcc\xb3\x1e\xf4\xdb\x90\x7d\xcd\x9f\x99\x81\x11\xc1\x6c\x34\x49\x96\x5a\x12\x31\xfd\x13\xfa\x16\x80\x2a\x24\xc5\xd1\x01\xe6\x96\xc2\x3a\x77\xa3\x93\xc0\x12\x47\x64\x4d\xe8\xff\xd2\x57\x91\x3e\xe1\xdb\x78\xa5\x64\x9a\x3e\x98\x06\x07\x98\xa1\xce\x54\x4f\xe7\x3d\xe9\x93\x41\x99\x8f\xa8\x3f\x7e\xba\xf3\xeb\xd8\xf1\x2d\xe7\x22\x3b\xdd\x2b\xd7\x4d\x03\x15\xfe\xed\xde\xe8\x2b\xc1\x46\x6f\x54\xb6\x91\x9f\x7d\x1f\xd4\x8d\x96\xcd\x47\x65\x7c\x5b\xc8\x6f\x03\x88\x71\x8c\x0e\x02\xc1\xa7\xf5\x08\x22\x89\x73\x48\x03\xf8\xb4\xd2\x63\x10\x4d\x10\xa7\x90\xae\xfa\xe8\x38\xef\x26\x22\x4d\xf5\x30\xc8\x4e\xb6\x2a\x12\xfd\xeb\xed\x0d\x7e\x4e\x19\xd3\x1b\x64\xc2\x9a\x58\x3d\xc8\x37\xec\x99\xf6\x5f\x7a\xdb\xfc\xf7\xb6\x83\xb4\xb7\x83\xb4\x1f\x43\x13\xe7\xb7\x5d\x13\xe7\xd7\xae\x3a\x09\xd5\x0b\xaa\x49\xa8\x8c\x9d\x73\xc7\xa3\x65\xe7\x34\xd8\x0b\x6b\x6c\x37\x49\x86\x22\xfc\xa7\x20\x9a\xa1\xed\x58\xc9\x18\xa0\x5f\x31\xf0\x34\x43\xb6\xe5\x80\x8f\xd2\x96\xa6\xfc\x55\xd5\xb8\xe7\xd1\x3d\xec\xe4\x46\x91\xe8\x97\x23\x23\x1f\x89\xb8\xb5\x4b\xc1\x0b\xb7\x90\xe6\x6f\x31\xfc\x8e\xcb\x4b\xf3\xb7\x91\x84\x1b\x1a\x96\xfd\xa5\xeb\xd9\x37\x0d\x49\xf0\x37\x27\x01\x45\x31\x2f\xcb\xf0\x63\xd7\x47\x0f\xd8\x3f\xb9\x4c\x8b\x5e\xae\x2f\x69\x5d\x12\xdb\xb4\xa7\x4c\x17\x35\x73\xdd\x0c\xb2\x4c\xa9\x2f\x28\x8f\x1f\x69\xcd\xea\x8b\xb4\x4f\xd4\x1c\x80\xde\xf2\x84\xed\xcb\xae\xdb\x16\xb6\xdb\xc2\xf6\xb1\x13\xb6\xff\xad\xe6\x08\xdb\xf7\xd5\xae\x56\xd8\xfe\x6b\x40\x7a\xe5\x29\xb1\x24\x93\x02\x39\x0b\x9c\x9d\x04\x81\x1d\x48\x2b\x01\x31\xc3\xea\x6f\xc3\x2c\xa1\xd1\x80\x1a\xcc\x41\xac\x90\xb3\x59\x6b\x7f\x31\x66\xa3\xaa\x9c\xe2\x90\x03\x52\x4d\xd3\xbf\x83\x3c\x83\xff\xa8\xf9\x40\xbe\x18\x6c\x80\x28\x19\x94\x36\x40\xfc\xf1\x8b\x48\xfc\xe1\xa1\x76\xaa\x29\x6b\xd0\xe1\x51\x00\xa0\x85\xa8\xee\x73\xfd\xdc\x16\xf7\x88\x77\x58\xb5\xab\x2e\xd2\x56\x1d\xe1\xdd\xec\x37\x6a\x86\x3a\xe1\xfd\x35\x76\xf0\x72\x7a\x8e\xbc\x1f\xdf\x53\xb3\xc4\x1f\xaa\x2f\x1d\x65\x07\xe4\xb2\x89\x3c\xcd\xd9\x12\xd8\xda\x2d\x8d\x26\x1f\xd2\x61\x88\x63\x86\xa8\x4c\xf2\x21\x65\x3d\x38\x4e\x01\x21\xb2\xd4\x27\xab\x3b\x4e\x5b\xd9\x1a\x7c\x0e\x18\xda\x59\x0d\x7b\x37\x14\x23\x04\x96\x9e\x93\xa2\x55\xcf\xd2\xa4\xff\x68\x8f\xe4\xa5\x60\x61\xf3\xe3\xf1\x60\x38\x31\x3c\x9b\x9e\x1d\x90\xaa\x3f\xef\x53\x7b\x87\x5f\x14\x4c\xe5\x59\x7a\x4f\xb6\x14\xbe\x77\x6f\x74\x8c\xfe\x5d\xe5\x73\x1f\x34\x76\x34\xfa\xaa\x99\x67\x29\xb0\x91\x78\xc7\xd9\xeb\xd8\xf6\x71\xb6\x7d\x9c\x3d\x76\xc7\xd9\x57\x65\x02\x5b\x97\x68\x6a\x7d\x58\x77\x75\x5b\xc3\xb1\xf2\xbe\x60\x7e\xab\xa7\x88\x30\x5b\x7a\x9c\xbc\x77\x7a\x69\x11\x75\xca\x23\x3f\xa9\xd5\x93\xe5\x63\x96\x68\xea\x03\x1b\x9d\xa9\x03\x5d\xc7\x73\xe5\xe5\xc1\x94\x43\xa0\xd4\x2b\xfc\x5e\x3e\xfa\xd2\xfd\xaa\xee\x6a\xa8\x6b\x55\xd9\xfe\x03\x01\x3b\xba\x01\xe5\x96\x9b\xec\x70\xae\x97\x48\x87\x6b\x2c\x5c\xf0\x24\xf7\x09\x36\xcd\xf6\x98\x08\xf7\xf0\xd6\xe8\xe6\x33\x2e\x2b\xad\xe6\x19\xa3\x0c\x1f\xed\x2c\x5f\xc7\x5f\xfd\xfe\x91\xa1\x97\x47\x44\xcf\xad\xb1\xcb\x3f\x34\x12\xdd\xe9\x3f\x32\xe0\xe1\x04\x35\x47\xd4\x05\x21\x1e\xd5\x82\x53\x86\x83\x39\x3e\x52\x63\x4f\x67\x3b\x12\x35\x31\xe1\xc9\xe8\x8e\x53\xea\x1f\x4a\x64\x6a\x4a\x70\xf8\x85\xc8\xf7\xd1\x84\x83\x52\x8b\x81\x52\xbd\x8e\xcc\xb2\xd1\x3c\x4b\xa4\x2a\x71\x2e\xd3\x18\x55\xc3\x32\x9e\x69\xc2\xff\xcb\x2b\x11\x48\xa1\x4e\x46\x77\x00\x29\x94\x57\x22\x80\x58\xae\xa0\xc4\x5e\x21\x73\x55\xe2\xb9\x42\xd3\x2f\xe9\x12\xe1\x52\xe3\x72\x4b\x7c\xf3\xde\x2b\xc7\x1a\x57\xd8\x65\xa7\xcd\x66\x0d\x7f\x8b\x45\xaf\x09\xd6\xfb\xd5\xa7\xd7\xf7\xf9\x72\x2d\xaa\x47\x5e\xe8\x66\x4e\xee\x0a\xcb\x6f\xda\xe0\x7c\xa6\x24\x1a\x9b\xd3\xe7\xe6\x17\x88\x7b\x1c\xc2\x56\xe1\x56\x89\xdf\xd9\x30\xcc\x78\x77\x35\xe8\x0f\xd8\x63\x77\x0d\x25\xa9\x7c\x78\x37\x7b\xb7\x15\x3c\x6f\x7b\xa4\x98\x86\xed\x00\x90\xb8\x9a\x1c\x4a\xbd\x2b\x9a\x65\x4f\x27\xf0\x37\xa9\x00\xd6\x29\x68\x5b\xf7\xda\xd6\xbd\xbe\x4a\xc8\x35\xdf\xaa\x75\x9e\x1f\x0b\x58\xf3\x91\xde\x7f\x4a\x53\xba\xbd\x40\xfb\x5b\x5f\xf5\xe8\xd8\x69\x90\xa2\x6b\xc2\x24\xb8\x92\x4e\xd8\x1a\x92\x9d\x5d\x0a\xf2\xcd\x0f\xf6\xb3\xe1\x69\xc7\x6c\x73\x1b\x56\xf1\x66\xae\xbb\xb7\x2b\x07\xff\x67\x77\x7b\x79\x4d\x80\x7d\x68\x55\x8d\x2a\xc4\x69\xe1\x4a\x9a\xb2\x34\xcf\xbf\xb0\x3b\x9a\x1a\x7c\x3c\x60\xee\x91\xbe\x64\x29\x02\x71\xfb\xeb\xa5\xe9\x1b\x7b\x7f\xbe\x8b\x7d\xa2\xc6\x76\x40\x2c\x6c\xf8\x91\x5a\xf4\x4b\x35\x0c\x8b\x75\x05\x98\x21\xfb\x31\x94\xde\x90\x8e\xc6\x40\x4e\x35\x0f\x11\x5d\x76\x8e\x73\xd1\x6c\x66\x79\x8b\x30\xc7\x86\x7a\x87\x82\x36\x20\x93\x40\x2a\x96\xf5\xc6\x33\xed\xe2\x93\x3c\x8d\x75\x0a\x0d\x5b\xbf\x48\x79\x0f\xd9\x19\x29\xd9\xbf\xda\x28\x1d\xcc\x6c\x54\x48\x08\x60\x2d\x7a\x1d\x47\xa0\x81\x9c\xea\x42\x3a\xb8\x15\x48\xad\xe4\x7c\xcc\x45\x01\x41\xb6\xfd\x06\x3f\x91\xe5\x08\x9c\x2d\xe3\xa5\x38\x89\xcb\x3e\xb1\x4e\x16\x18\x86\xdb\x37\x61\x06\x9a\xde\x23\x2a\xf3\x9e\x8c\xfc\x2b\xd0\x74\xd9\xb4\xbf\x92\xee\xee\x53\x3a\xd2\x24\x5d\x0e\xff\xef\x5a\xf4\x2b\x35\xfa\x03\x98\x10\x5a\x88\x2f\x48\x70\xf4\x54\x65\xf2\x02\xdc\x32\x22\x27\x73\xa9\x79\xb2\x7c\x1e\x91\xb2\x92\x99\xc9\x9c\x27\xb6\x09\xe8\x46\xb1\x53\xef\x21\x6a\x35\xed\x16\xe5\x5a\xa3\x8a\xad\xc4\xc2\xd4\x37\x4e\x0d\x88\xde\x89\x93\xf1\xc1\x41\x6e\xe9\xc0\x0b\xdc\x50\xaa\x24\x53\x2b\xb5\x02\xaf\x97\x35\xd1\x0a\xf1\x02\x9a\xab\x68\xbd\x90\xe7\x93\xb8\x69\xd0\x38\x71\xba\x6c\x39\x54\x35\x4f\xe0\xb2\x9a\x74\x6f\x6c\x1f\xae\xb1\xbd\x4e\x33\xc3\x37\xd4\xa2\x57\xd4\xdc\x91\xd9\x6c\xf5\x7a\x7d\xdc\xea\xb2\x9b\xba\xac\x85\x56\x66\xbc\x23\x81\x6a\x9e\x5c\x9e\x6e\xfd\x4a\xf4\x7b\x6d\x78\x84\x07\xec\x15\x3e\x00\x65\xfd\x04\x81\x0b\xa7\xe6\xc3\xcf\x3c\xa2\x00\x94\x17\x6d\x03\x50\x36\x04\xa0\xfc\x85\x0f\x40\x01\x7a\xb6\x2b\x04\xa0\xcc\xcf\x9f\x72\x01\x28\x5b\xc2\x9e\xa8\x6f\xfe\xff\x80\x3d\x79\xeb\x1e\xef\xe8\xb5\xcc\xb2\x18\x05\x1c\x67\x40\xdf\x68\xb2\x98\x5f\xda\x1d\x1d\x1d\x78\x3a\x0c\x91\xdd\xa5\x97\xb6\xf3\x98\x3f\x8a\x79\xcc\x6f\x1a\x0a\x6f\xf7\xe6\xcb\x4f\x64\x7e\x1b\x3b\xca\x8e\xac\x9f\xc8\x7c\xfd\xc5\xb0\x6d\x5c\x6c\xe7\x31\xa7\x3c\xe6\x2b\x9b\x1b\x0c\xc7\xc3\x29\x63\x30\x38\x8b\xaa\x62\x2d\x0c\xc8\x95\xaa\x99\xf0\x3d\x21\xfb\xe6\xa1\xac\xb3\x10\xc7\x3a\x9b\xe5\x65\xf8\xe7\x8f\x8f\x0e\x3b\x7f\xdb\x00\x70\x17\x96\xab\xf4\x41\x7c\x67\x5f\x01\x70\xc4\xc6\xc5\x60\x54\xfd\xbf\x27\x9b\x7e\xe4\xf1\xec\x39\x6c\x77\x37\xcf\xca\xac\x99\x25\xe1\x77\x44\x4f\x5f\xc0\x0c\xaf\xfa\x91\x8d\x57\x86\x32\xf8\x3c\x52\x61\x16\x3c\x5a\x98\x9a\x8d\xc6\x79\x74\x6e\x5a\xfd\x9f\x92\x1b\xd1\xfc\xd4\xc2\x6c\xe4\xd1\xc2\x2c\x4c\xcd\x7a\x82\xf8\x33\xa3\x8c\x61\xbc\x0e\x74\xe4\x13\xa3\x5b\xb5\x3a\x7b\x65\x9c\x34\x94\xd2\x5b\xe6\x8d\x99\xb4\x3c\x9b\xcf\x43\x81\xd1\xab\x46\xcf\x10\xa2\x23\xf7\x8e\x49\xa4\xeb\xcc\x00\xcf\x57\x98\x78\x71\x88\x6e\xc2\xfa\x9d\x5b\x44\xca\x97\xc3\xa9\x24\x1d\x2a\x4a\xba\x03\x92\x16\x1c\x54\x85\x1d\xbd\xe5\x96\xc3\xb7\x60\x9a\x71\x4b\x57\x92\xf2\x99\xc9\x33\x93\x0f\xce\xdf\x37\x05\xd4\x81\x1e\xd5\x8f\x96\x14\x90\x92\xc1\xf0\x8b\x65\xd9\x79\xd9\xe2\xbd\x2e\x06\x18\xa9\x56\xb7\xb0\xbd\x5a\x5b\xc1\x80\x26\x4c\xe7\xee\x70\xc5\xaa\x61\x1f\x64\x12\xf2\xe1\xf5\xde\x11\xb9\x4f\x7d\xb2\x4f\x47\x3e\xd3\x19\xb9\x5f\xa4\x64\x92\x94\x7d\xde\x11\xdd\x31\x4f\x1f\x55\x05\x43\x52\x25\x3c\x4c\x75\x5e\x67\x3c\xd8\x29\xe6\x74\x66\xf6\xc9\x67\xb2\x94\x0e\x0b\x6b\x73\x64\x9d\xb8\x2c\x91\x59\x5d\x99\x1f\x5e\x12\x0b\xb7\x25\x97\x9f\x2b\x8d\x1a\x51\xb7\x50\x6a\xfd\x6c\xe2\x89\xb8\x78\x80\xd1\x90\x9e\xb1\x5f\x1e\x61\x7b\x45\xb7\x3b\xab\x17\xf5\xcf\x8c\x44\x6f\x1c\x59\x58\x21\xdf\x34\xdd\xab\xac\xb7\xbe\x9d\xb1\xc0\x0c\xad\x85\x8d\x78\x77\x32\xe6\xa3\x97\x16\xe9\x25\x1b\xfc\x5c\x5a\x37\x79\x6e\x6d\x4e\x6f\x25\xea\xe1\x1e\x5c\x55\xa1\x16\x89\x2d\x49\x87\xaf\xe3\xcb\xfb\x05\x64\xe4\xe1\x73\x27\xa6\xea\x47\x0f\x1f\xbe\x05\x03\x34\xcb\xb2\x7b\x6c\x62\x62\x6d\x6d\xad\x11\x8b\x54\x34\xb2\x7c\x79\x42\x14\x45\xbc\x0c\x5c\x3b\x66\x54\x20\x00\xaf\x18\x6b\xf0\x33\x59\x5a\x37\xe5\xeb\xde\x15\x6e\xec\x7f\xa5\x89\x9a\x7e\xaf\xd3\x07\xc3\x38\xed\x03\x68\xbe\xd3\xaf\x63\xee\xad\xba\x2e\xc3\xe1\x7f\xe7\xca\x7c\xa0\xd1\xd1\x16\xe4\x72\x4f\xe4\x2d\xbb\x9b\x48\x2e\x4d\xda\xf1\xf7\x2c\x15\x62\xfa\x30\xb4\xf1\x3a\x0b\xa7\x2b\x24\x7e\xae\x46\x21\x02\x6f\xad\x45\xaf\xaf\x2d\x78\x0a\x30\xcd\x93\xcb\x77\x60\xf6\x2f\x66\xb3\x34\x34\x42\x0e\x6d\xfa\x24\x70\x46\xe6\x65\x61\x59\x87\x74\x8a\xae\xae\x6c\xe2\x37\x10\x10\x4b\x6c\xea\x04\xef\x07\xb2\x20\x65\x0c\xc7\x2d\x8c\xd1\x77\xcf\xe5\x82\x0c\x6b\x2a\x68\x1c\x1b\x57\xb1\x45\xf6\xa9\x92\xf6\xf9\x89\x7f\xb5\x69\x37\x0b\xeb\x4d\x93\x38\x2a\xcb\x21\xd3\x71\x36\xae\x70\x8f\x2d\x8d\x50\x96\xea\xfc\x9d\x3a\xc3\x97\x1d\xb4\x17\xec\x60\xbb\xd3\xac\x85\x07\xc4\x3f\x8e\xea\xf0\xc9\x4f\x8f\x2e\x98\xac\xd9\x29\x06\x2a\x03\x6f\x62\x96\x1a\x6a\x48\x5b\x20\x90\xed\x91\xd7\x1e\x89\x94\xe8\xd2\xe3\x0c\x15\xac\xf6\xf6\xa9\x4c\xb4\xee\x26\x93\xa8\xc1\xf9\xb9\xa2\x27\x92\x04\x2c\xc2\x78\x39\x1d\xc4\x66\x10\xd1\x2b\xc5\xd4\x3a\xd2\x2a\x4e\xeb\x20\x5a\x51\x8c\x28\x51\x16\xa7\xb0\x4c\xe3\x2a\xab\x97\x61\xe0\x44\xf4\xb5\x46\x52\xe2\x5b\x6d\x11\x27\x43\xd3\xfa\x0b\xbd\x50\x28\xa8\xda\x30\xbf\x69\x8e\xdb\x79\x4d\x21\xa1\x29\xce\xc0\xd2\xb1\xac\xdd\x5a\x1e\xba\x09\x36\x25\x25\x92\xc5\x1c\x5d\xba\x04\x1c\xc9\x56\x26\x51\x24\xa7\x98\x20\x62\x9c\x5e\xf5\x5b\xea\xc8\x17\xc3\xb8\x19\x77\x75\xe1\x26\xe7\x83\x2d\x1c\x22\xae\x79\x92\xa5\xcb\x32\xd7\x45\x53\x30\x39\x60\xd2\x61\x5d\xaa\x79\x02\xcd\xd1\xcc\x54\x99\xf1\x29\x2d\xae\xc7\xae\xad\xc8\x55\xb5\xd5\xd5\x2a\x42\x6d\xc2\x09\x70\xbd\x97\x81\x86\x11\x4e\xe9\xe5\xd7\x30\xab\xcf\x12\xec\x38\x57\x43\x36\x21\xad\x5e\xd0\x4e\x61\x1f\xd9\xcd\x1a\x5b\x8a\xa3\xb1\x86\xdb\x0f\xed\x8e\x0e\x57\x1f\x0e\xb3\xdb\xdc\x77\xd6\xb3\xda\xfe\x70\xe7\xb6\xd5\x76\xb5\x56\xdb\x05\x6d\xb5\x65\xd1\x4d\x33\xc3\xac\xb6\xca\x44\xb8\x56\xdb\x9d\xec\x18\xbb\x6d\x5d\xab\x6d\x93\xd5\xb0\x6d\xb9\x5d\x85\xe5\xb6\xe4\x18\x6e\xf7\x5d\x9d\xdd\xf6\x75\xeb\xc4\x2b\x5f\x0a\x92\xcd\x0d\xaa\x99\xf0\x69\xc6\xa0\x72\xe6\xbb\x62\x50\x55\xf7\xfb\x50\x00\xf9\x0b\x47\xd9\x81\xf5\x49\xc2\x4e\xcc\x0f\xa5\x6b\xfb\xcd\x91\xe8\x81\xe1\x3f\x6d\xc2\xd5\xb6\x15\x9e\x36\x4f\xd6\xfc\x45\x8d\xfd\xb3\x25\x68\xfb\x62\x10\xbd\x75\x73\x82\xb6\x76\x61\xe2\x1f\xb7\xc4\xca\xa6\xdf\xe7\xff\xde\xa8\xd9\x56\x88\x9a\x6d\x31\x9a\xdf\x22\x35\xdb\x9a\x7a\x42\x33\x63\xa9\xeb\x4d\xa2\x6b\x37\x19\x8b\xa7\x23\xbd\x73\x0f\xdb\xe7\xb4\x27\x5f\x12\x4d\xd3\x1a\x3a\x2c\xe7\xb2\x44\xc2\x51\xf2\xe2\x3d\xd1\x9b\x82\xca\xc3\x61\x47\x89\xf3\x4a\x31\x2c\x24\xec\x56\xf5\x8f\xb6\x58\x45\xc8\x16\x54\x39\x2c\x5d\xdd\xc4\xea\x41\xaf\x28\x94\xe9\x94\xe2\x4a\x9f\xfe\x4b\x94\x55\x4f\x17\x7e\xe8\xd0\xf0\xa3\xeb\x13\xdb\x47\xd7\x55\x1f\x5d\x89\x3e\xba\x9a\xd1\xb7\x98\xa3\xcb\x5e\x59\xb8\x73\xe5\xee\x80\x5b\xd8\xe1\x0d\x00\xd5\xeb\xad\xb8\xed\xe3\xea\x2a\x8e\xab\xb6\x73\x5c\x3d\xf3\xea\x8e\xab\x6f\xd8\x00\x3c\x7a\x29\x28\x36\x3f\xb2\x66\xc3\x33\xe6\xc8\x5a\x77\xaf\x5b\xe4\x80\x2f\x5e\x86\x9e\x5c\x5f\x1c\x61\xff\x79\x88\x3b\xf0\x44\x02\x59\x18\x90\x11\x98\xa8\xb1\x3e\x38\x12\x7d\x28\x98\x73\xa1\x59\xf4\x96\xce\xf8\xaf\xf9\xfc\xc9\x44\xd3\xbf\x8a\x65\x48\x94\x70\x36\xa5\xd3\xcb\x61\x7b\x50\x3d\x2f\xe8\x32\x4a\xfd\x46\x7f\x9f\x3b\x37\x33\x5d\xe1\x6f\xaa\xd4\x55\xf0\x56\x86\x16\x19\xa5\xce\xc9\xd6\x52\x99\x17\x2b\x71\x97\x42\x40\x80\x30\x38\xcb\x2d\x3e\x4f\x82\x4b\x65\x80\xfd\xe1\x8b\x01\x7b\x69\xc0\xf6\x3a\x0d\x09\xfb\x51\xe2\x66\x50\xa3\x9f\x78\x51\x82\xdb\x4a\xd8\x39\xe3\xf5\xbb\xc8\x75\x90\x7a\x6f\xaa\x03\x4c\x37\xd7\xf6\x42\x1b\xfa\x58\x46\xcb\x48\x72\x77\xad\x3d\x68\x5a\xa2\x86\x20\x9c\x8d\xa6\x60\x28\xfc\x96\x58\x87\x09\xb9\x13\x2c\x47\x1b\x02\x5a\x75\xdd\xf4\xbe\x5b\xc1\xfb\xae\x67\x4f\x1c\xc6\x54\xab\xd6\x45\x51\xca\xb4\xc4\x19\x0f\x5f\x7d\x7d\xf4\xce\xa0\xfa\x94\xef\x9f\xbd\x6f\x4c\x0b\x8e\x0c\x08\x90\x9c\x4c\xed\xd9\x6a\x4c\x72\x1e\xef\xe0\x44\xab\x13\xa7\x18\xa3\x97\xe5\x0d\x3e\x83\xc7\x5b\x2a\x92\x6c\x39\xa3\x84\xa0\xe0\x2b\xb8\x7c\xe3\x11\xeb\x9e\xe8\x9a\xe6\xd5\x69\x51\x78\x33\xfb\x8f\xec\xab\x13\xfc\xfd\xb2\x1a\x01\xa1\x9e\x5f\xdb\x02\x19\x7e\x75\x0e\x01\xda\xf4\x1b\x7e\x6c\x91\x18\x8c\x2c\x02\x62\x2a\xfa\x50\x6f\x6f\xb5\xc5\x2c\xcf\x38\xb1\x20\xf2\xd9\xcd\x26\xfe\x9a\xcd\xee\x13\x07\x1f\xb1\x57\xdb\x50\xa3\x97\xd6\xd6\x4f\x88\xba\xfe\x70\x20\xd2\xf2\xe3\x01\x05\x1c\xad\x03\x7f\x72\x2e\x3e\x26\x08\x94\x69\x68\xb8\xaa\xe3\x74\xc5\x81\x44\xd7\x6e\x54\xb6\xb5\xb3\x6d\xd4\xe7\x63\xa5\x2f\x5d\x0a\x9e\xb1\xb9\x1e\x73\x4b\x78\xd8\xe8\x31\x51\x64\x2f\x2f\x2b\xbb\xb3\x7a\x77\xf9\xc5\x9d\xc3\x43\x09\x64\xb3\x99\x75\xba\xb3\x79\xd6\x8e\x13\x19\xfe\xfa\xce\xa8\xed\x3f\x72\x24\x5d\x37\x6b\x4d\xb8\x49\x51\x0a\x7c\x51\x9d\x5f\xf0\x26\x21\x74\x0a\x87\x95\xca\xfc\x44\x1c\x7f\x96\x93\x9b\xf2\x6e\xfb\xc6\xd1\x28\x7b\x43\x8d\x70\xfd\xaf\xad\x45\x2f\xa9\xa1\x93\xdb\x49\x51\x1e\x37\x57\xa0\xbf\x90\xf2\xa0\x52\xbd\x71\x27\x63\x98\x03\x30\xfd\xc7\xd6\xe2\x17\xb9\x3c\xc6\xd8\x29\xcd\xc1\xcb\xeb\xaa\x47\x5e\x1f\x5b\x88\x9d\x81\x27\xa4\x96\x80\x43\xde\xea\x21\x48\xb4\x3e\x87\xa9\x2e\xf4\x5d\x6a\xdd\x8f\x00\x30\xe9\x09\x35\x9d\x96\x19\x83\x4a\x31\xe7\x52\x08\x08\x54\xf5\xd6\x95\x25\x39\xf8\x9e\xee\x89\xbb\xbd\xde\x51\x63\x8f\x33\x44\xc2\x7a\xda\x2e\xd6\xa2\x97\xd7\xaa\x4f\x5d\x0c\xdd\x15\xf6\x14\x5c\xc4\xf4\xa5\xbe\xbd\x51\xba\x97\x4d\xde\xe1\x7e\x5d\x66\x90\x38\xa4\x01\x1e\x08\xbc\xe8\x69\xc9\xa2\x29\x53\x40\xa0\x76\x45\xb9\x32\x0e\xba\x64\x09\x18\x55\x14\x6e\x6a\xad\x27\xb2\xc4\xdb\x54\x5d\x68\x75\x66\x35\xe7\x35\x95\x0c\x5a\x30\x21\x29\xe3\xb6\xb9\x09\x89\xcc\xdc\x46\x0d\x6f\x53\xe5\xec\x6b\xbc\x4d\xd5\x4b\xc1\x87\xf4\xac\xf0\x81\x3a\x24\x1a\xcd\x63\x80\x32\x65\xf9\x31\x28\x8b\x71\xbc\x04\x28\xea\x65\x56\x77\x7e\x97\x77\xf7\x8f\x31\xce\x39\xaf\x0e\x34\x25\xb2\x77\x9e\x30\xf6\xca\x3d\x9e\xb2\x08\xca\xda\x2a\x24\x20\x50\x6b\xc3\xba\xc5\xff\x6a\x77\x74\xb8\xfa\xd0\x37\x67\xdd\x5f\x49\x26\xae\xe3\x16\xff\xc5\x6d\x30\xd3\x55\x9f\x5e\xe7\xb5\x6f\x61\x29\x8a\x86\xf8\x16\xf4\x50\xd1\x34\x6c\x3d\x4f\xcc\xb0\x05\xb0\x7d\x54\x6e\x73\x77\x6a\x10\x93\xd8\xfc\xe0\xbf\x2b\xbc\xb3\xae\xc3\xd3\x2b\x12\xa3\x72\xd8\x73\xe3\x9a\x57\xab\x8e\x3c\x1b\xec\xf5\xe3\x9e\x48\x22\xb5\x18\x62\x39\xe6\x67\xa6\xf3\x78\x55\xe6\x10\xca\xf8\xe5\x27\x45\x87\xbc\x27\xc6\xed\x3b\x68\xe9\x98\xf7\x7c\xcf\xc0\xcf\x3e\x89\xfd\x61\x8d\x5d\xdf\xc6\xb4\x1c\x3a\x9d\x5e\x2d\x7a\x5f\x6d\x9a\x24\x10\xe5\x96\xeb\xa5\x2d\x99\x27\x7d\x58\xed\x68\x29\x15\x1a\xc4\x64\x6e\x50\xad\x7b\x42\x09\x0d\x4d\xe1\x6e\xe3\x6f\xe9\x43\x8f\x88\x9b\xdc\x29\xca\x6c\x70\xe0\xf1\xba\x03\xda\x41\x8d\x0d\xd3\xc0\x65\xc0\x0c\xb4\x10\xc2\x2d\x12\x22\x54\x2c\xaa\x10\x1c\xc8\xb2\x5f\x87\x28\x48\x22\x9c\x2f\xf0\x40\x5a\xc9\x10\x9b\xb3\xd4\xe7\x3a\x13\x01\xb8\xc7\x11\x51\x81\xa1\x66\xf3\x33\xa8\x9c\xf9\xf5\x7b\x70\x71\xc6\x2a\x90\x9f\x4e\xa7\x07\x04\xfb\x3e\x6a\x76\x2f\xbb\xa1\x9b\xb5\x66\xd2\x76\x76\x36\x85\x34\x72\xe1\x0f\xed\x8d\x5e\xb2\x17\xf3\x47\xdb\xb4\xde\xfe\x3b\x1e\x1c\x3f\x2e\x54\x7b\xf4\xe8\xb5\x60\x16\xed\x85\xbb\x33\x10\xc8\x9e\x6e\xd1\x6b\xfb\xe1\x16\xa3\x9b\xb5\xce\x88\x0e\x56\x71\x8e\x72\x44\x37\xc6\x78\xab\x97\x9b\xf1\x77\x99\x95\xb8\x6d\x19\xe5\xda\xae\x16\x4b\x0e\x75\xd4\x31\x44\x41\x88\x67\x28\x68\x48\x3a\x33\x50\x4d\x54\xfb\xa9\xe1\x1a\x09\x50\x54\xbb\x2c\x0a\xde\x15\x39\x32\x58\xe0\xab\x2d\xd9\x4d\xb2\xbe\xc9\x75\x8b\x03\x75\x2f\xea\x21\xd8\x0a\x20\xfe\xaf\xb6\x4f\x14\x1c\x27\xcf\xe4\x92\x4f\xf5\xa4\xe2\xd5\x7e\x6f\x29\x89\x8b\x15\x7c\x67\xff\x18\xa6\x54\x18\x68\x27\xa6\x21\xef\x66\x29\x52\xba\xaa\x35\xd7\x15\x39\xf0\xb3\x13\x8c\x3a\x26\x98\x41\x59\xc9\x18\x43\x63\x12\x0f\xb6\x04\x2b\x41\x48\x94\xfa\xd2\xfc\xa8\x5a\xa9\xb5\x62\xfd\x79\x7b\x60\x4d\x78\x99\xe0\x69\xb9\xa3\x18\x84\x3c\x11\xcb\x79\xb6\x36\xce\x97\x7a\x78\x52\x20\x50\xc9\x43\x81\x34\x78\xd4\x2c\xac\x3c\x21\xd9\xd8\xcd\x5a\x10\x24\x1a\x1d\x83\x44\x7e\xe0\x15\xdc\xe8\x3d\x08\x8e\x76\x5e\xc6\x60\xe9\xf5\xbe\xe8\xc5\xad\xe8\x18\x9d\x93\xfb\xd5\x83\x73\x33\xd3\x63\x43\xdf\x36\x69\xa2\xa2\x63\x3a\x70\x28\xf6\xe4\x46\x8c\x29\xca\x4c\x36\xa9\x38\x4d\xe2\x54\xff\x0a\xfa\xde\x46\xff\x73\xd3\x88\xd9\xfd\x8d\x5e\x5f\x17\x1e\x13\xc1\xb2\x8d\x18\xdb\xb8\x8d\x78\xfe\xa7\x72\xcd\x08\x85\x38\x75\xb1\x6d\x07\x1b\x07\x8f\x6a\x7f\x20\x48\x9d\xdc\xbd\xf9\xc3\x35\xa6\xed\x24\xed\xde\x85\x34\xa1\x00\xfc\xb7\x36\x62\x84\xa8\xd0\xe3\xb6\x62\x6c\xb8\x49\x25\x76\x1a\xbc\x8b\x67\x21\xc5\x81\x2e\xb6\x29\x52\x9e\x48\xb1\x2a\xcd\xde\x50\xea\x33\xe2\xc5\x44\xda\x9a\xc8\x72\x02\x28\x3a\x28\x9d\x06\x9f\x2c\x2a\x1d\xb8\x05\xe0\x38\xe9\xbe\x6a\xee\x76\xf5\xfa\xb8\x57\x19\xf4\xd0\x78\xa9\x53\xc9\x3b\xca\xcc\x00\x34\x0e\x6e\x62\x94\x11\x88\x96\x33\x69\x37\x34\xdd\x9b\xdd\xe7\x4a\x96\x43\xbc\x82\xb1\x21\xa1\x20\x82\xc9\x79\xf9\x3b\x20\xc3\x8c\xe0\x94\x81\x19\x72\x90\xda\x84\x1a\xc6\x99\x8c\x47\xde\x46\xa2\xda\x89\xf9\xf9\xca\x08\x7b\xbc\x96\xab\x73\xb2\x8b\x62\x22\xfc\xfc\x48\xf4\x07\x23\x73\xd5\xc7\x95\x78\x65\x47\x70\x60\xd4\xe4\xe2\x80\xa4\x59\xa4\x13\xaf\x2b\xf3\x38\x53\xdf\x26\x49\x1f\x64\x8f\x0e\x09\x6c\x27\xc8\x95\xd9\xb7\x44\xd2\x44\x98\x49\x02\x4c\x5f\x3d\x68\x17\x9c\xd3\xa9\x96\x13\x91\x83\x72\x97\xb1\x33\x59\x29\x8f\xf1\xc9\x36\x0c\xb5\x4e\x3f\xdb\xee\xa9\x8d\x13\x97\xb1\x48\x06\x85\x21\x34\x67\x5c\x69\x97\x85\x52\x2c\xd3\xd2\xa6\x9b\x19\x7c\x97\x0c\x0b\x98\x7a\x54\x38\x8d\x79\x6d\x52\xe7\x9a\xcd\xdb\xe0\x67\xe4\x1a\x1d\x36\x84\xfc\x73\x8f\x91\x42\xca\x14\x77\xe6\x00\xed\xb2\x9e\x3c\x07\x3a\x49\x1b\xce\xdc\xb3\xd8\xcc\x1a\x26\x73\xe1\xd4\xfc\x8c\x06\x51\x36\x9b\xaa\xd6\x85\xec\xbc\xb4\xf1\x5e\x10\xdc\x06\xfb\xc1\x5f\x01\xdf\xbd\x93\xdd\x48\x5b\x7e\x4a\x74\x45\x33\x2e\xfb\xe1\x17\x77\x44\x7f\xbc\xa3\x7a\x5c\x57\x5e\x1a\x0c\x9f\x93\x43\xce\x6c\x5c\x19\x6a\x4b\x5a\x68\x3b\x40\x1b\xe8\xd2\x84\xae\xec\xf1\xd2\xa1\xa9\x8b\x36\x05\x0e\x9c\x8a\x38\x84\x98\xc2\x51\x8d\x9e\x41\xd5\xa9\xde\x57\x1a\x48\x86\x10\x61\x90\x6d\xb3\xcd\xb1\x45\xa9\x7e\x9a\x2b\xb2\x79\xbe\x1a\x51\x17\x77\x3a\xb2\x15\x8b\x52\x26\x7d\x77\x53\x23\xc6\x8e\xf6\x98\xb2\x35\xa0\xa9\x4d\x01\x4a\x83\xf6\x98\xab\x97\x94\x98\xd4\x57\x5a\xd0\x00\xc8\x7a\xb9\x14\xa3\xbf\x81\x4e\xf2\x5e\xa1\xb4\xcc\x32\x4e\xd6\xe9\xec\x8a\x52\x10\x70\x05\x2a\xe3\x35\x03\x15\x34\x86\x6d\xbc\x7e\x87\x1b\x8c\x4d\x26\x60\xd0\x96\xf1\xaa\x4c\xfa\xe3\x6e\xe1\xd4\x4b\x23\xa1\x0c\x72\x83\x42\xad\x52\x35\xe5\x59\x8e\x1b\x0a\x55\xc8\x52\x7f\xd4\x4e\xe2\x6e\x57\xb6\xa0\x27\x39\x0e\xca\xc0\xcc\xb9\x4a\x81\x6a\xfd\x92\x5a\xe5\xa6\x0b\x1b\x48\xa5\xc1\x25\x0f\xef\x6c\xb6\xe0\x2b\x23\x30\x64\xb1\x83\xfe\xd1\xda\x30\xe3\xec\x5f\xee\x64\xd7\x97\x6a\xb3\xcc\x91\x6d\x19\x7e\x62\x67\xf4\xf9\x1d\x0b\xee\xa3\xf5\x85\x5f\x2a\x65\x0b\x96\x78\xb1\xcf\x00\x62\x05\xee\x40\x0e\xa5\x16\x14\xab\x0b\xa2\xc0\xb1\x26\xe0\xa6\x21\x83\x0b\x4f\xa9\xe4\x94\xc8\xfb\xdc\xa7\x15\x6f\x0c\x51\xfd\x00\xe9\x4f\xc5\xa6\x97\xa1\xf5\xad\xa3\xf3\x91\x48\x53\x7a\x9e\x74\xb5\x3c\x49\xbc\x1a\x03\x7a\x9b\xaa\xe8\xd8\x50\x6d\xa6\xf0\xa4\x4f\x03\x1b\x19\x1d\xe3\xff\x85\x71\x1e\xdd\x29\x7a\xad\x58\xa6\x4d\x79\x17\x3d\xe1\x3c\x82\x37\xa2\x63\xfc\x4e\xf8\xc7\x5d\xe3\xf8\x54\x5e\xe8\xc6\xa8\x99\x2f\xc4\x1d\x59\x94\xa2\xd3\x55\xef\xd8\xc7\x40\xeb\x06\xcf\x55\x87\xe7\x4e\x4c\x1d\x3e\x7c\xf8\x76\xf8\xfa\x3b\xd5\x7f\x1a\x8d\x06\xfb\x4e\x73\x14\x50\xad\xea\x4d\x40\x31\xbb\x93\xea\xb8\x14\x5b\x71\x1b\xf2\xee\x21\x6e\x49\x94\x18\x3b\xae\x8e\x75\x68\x9b\x4d\x27\x8e\x6a\x5d\x83\x2f\xd8\x88\x76\xd4\x89\xf0\x3d\x01\x47\x0f\xb4\xb5\x3f\xce\x07\x8f\x50\xda\x4a\x26\x0c\x33\x8f\x97\x97\x65\x3e\x64\xb6\xdc\x23\xf3\x91\x3f\x0f\xac\xa7\xe8\x08\x3b\xc4\x0e\xac\x1f\xf4\x66\xed\x72\x77\x20\x2f\x05\x4f\x64\x5f\xeb\xb9\x08\x94\x8e\x5e\x07\x5f\xf9\x9e\x70\x97\x28\xb3\x4e\xdc\x64\xec\x0b\x3b\xd9\x7f\x5a\x1d\x54\xe5\x8a\xf0\xb7\x77\x46\xaf\xda\x39\xec\x17\xe3\x14\x04\xe8\x96\x76\xae\x9b\xb4\xe8\xc3\xed\x44\x6d\xa5\x57\xb6\x7e\xdb\x22\x48\xcd\x6c\xc6\x85\xa7\x78\xea\x4c\xd0\x26\xb3\x80\x92\x6d\x8e\x16\xad\x37\x97\xef\x6c\x00\x29\xa9\xf4\xb3\x0e\xe6\xff\xf1\xb5\x62\xa5\xb3\x61\x61\x3d\x91\xf0\xd9\xfb\x26\x66\xef\x9b\xe2\x1d\xa9\xd4\x9c\xb8\xe8\x60\x1b\x41\x15\x47\xbd\x4f\xb5\xc8\x6a\xbe\x74\xc4\x80\xf0\x68\xc9\x71\xd3\x73\x62\xd3\x24\x6f\x39\x28\x82\x31\x1c\xa8\x3a\x5c\x09\x9a\x88\xd2\xbd\xa2\xf8\x6b\x15\x34\xce\x79\xe2\xe6\xe6\x2d\x1d\xf6\x25\xfb\x03\x68\x34\x02\xb2\x1c\x35\xf8\xa4\x1e\xe1\x15\x61\x68\x9b\xd6\x54\x53\xb4\x77\x76\x49\x36\x05\x41\xf1\xb5\x0d\xb0\x9c\xd1\x89\xbf\x2c\x4b\x2e\xd6\x11\x4b\x18\x33\x84\x9a\xb2\x56\x9e\x4e\x64\x6a\x44\x72\xdf\xce\xc4\x4c\x15\x66\xb0\x4d\x1c\x35\x8e\x4e\xe1\xe4\x73\x70\xd6\xa2\x12\x55\x94\xde\x41\xdf\xbf\x1a\xa3\xa6\x0e\xfe\x72\x7d\xc1\xda\x58\x29\x3b\x89\xed\xa6\xda\xac\xae\x86\xaf\x5b\x84\x6a\x5e\x81\x2a\x3f\xf2\x34\xc3\x73\xfc\x53\x67\xf6\x6b\x39\x31\xe9\xed\x9e\xda\x73\x55\x77\x8d\xda\xca\x1b\x2a\xea\xeb\xc5\x7f\x5f\x0a\xf8\xba\xfb\x6d\x67\x38\x5a\xc8\x92\xb1\x8f\xec\x60\x37\x88\xb2\x14\xcd\x15\x0d\xc0\x0c\xdf\xb3\x23\x7a\xcb\x0e\xff\xd9\xe5\x78\x5d\x52\x8e\xdf\x3a\xa1\x14\xfb\xdd\x19\xd7\xb3\x62\x4f\xc8\x29\x13\xf9\x5e\x75\x40\x74\x64\xb9\x92\xb5\xc6\xc6\x69\x35\x92\xba\xe7\x6c\x1b\xaa\xaa\x25\xe1\xff\x9c\x10\x7a\xed\xdc\x57\xcb\x06\x10\xae\xf8\xa2\x36\x98\xd3\x52\xe6\x6d\x61\x82\x2b\x40\xb7\x2b\x1c\xad\x1c\x5f\xef\x38\x5c\x71\x00\x98\x14\x71\x59\x38\xaa\x98\x63\x7e\xc3\xfb\xc0\xa0\x07\x2e\xbc\x6e\x9e\x35\xa5\xd4\xb4\x25\xfa\x48\xb7\xc7\xaa\x4e\x00\x5a\xa7\x0f\x73\xde\xcc\x80\xe6\x04\x2f\x09\x69\x43\x56\xc6\x58\x35\x01\xed\x89\x8d\x9b\x6a\x64\xfb\xc0\x44\x00\x9f\x87\x9a\x80\xd2\x46\xdd\x1b\xff\xe7\x1c\x72\x6a\xfb\xce\x3c\xe7\x00\x30\x56\xe9\x60\xbc\x8d\xe3\x13\x1b\x5a\xaf\xf6\xb5\x14\xe7\x41\x37\x24\xab\xdc\x04\xdd\xac\xfb\x3e\x1a\x81\x5b\xb5\x52\x7f\xf3\x7a\x16\x0d\x3d\x85\x08\x11\x39\x3f\xa3\xe4\x4a\xf8\x96\xeb\xa3\x8f\xee\x98\x3e\x3e\x3b\x77\x7c\x6a\x72\xe1\xf8\x34\xaf\xe3\x7e\x43\xf8\x32\xf9\xa0\x01\x84\x89\xef\x63\x88\x94\x01\xde\x2e\xf5\xb5\x3a\x3b\xb1\x7a\x70\x82\xde\xc1\x24\x31\x48\x5f\x62\x33\xa7\xa2\x3f\xb6\x2a\x9d\x1a\xa6\x60\xcc\x7a\x33\x28\xb8\xd4\x9a\xb5\xba\x57\xa1\xb3\x97\xa2\xa7\x40\xa3\xa6\xdc\x17\x08\x0c\xa7\xd3\xa1\x3a\x80\x70\x5d\x15\xdd\xae\xb4\xe2\x5c\x36\xcb\xa4\x0f\x2e\x8d\x24\x53\x56\x0a\x2c\x25\x9b\x38\x54\x15\x5e\xc7\x72\xeb\x9a\x64\x3d\xe7\xea\xcc\x68\x8a\xdc\xda\x9e\xe3\xee\xcd\x27\x4e\x97\xe8\x95\x99\xea\x06\x9a\xef\x26\x83\xff\x90\x56\x68\xec\x8c\xa3\x5f\x3a\x2e\x4e\x5d\x68\x37\xe9\x2d\x43\x76\x4c\x87\xe9\xdd\x0e\x1d\x71\x41\x03\x92\x3c\x25\x72\x68\x3d\x34\xb4\xb0\xa9\x36\x25\xf7\xe3\xa2\xd0\x71\xb2\x1d\x29\xd2\x82\x72\x87\xaa\xd7\x94\x7a\x04\x99\x45\xa1\x39\xd3\x34\xa2\x56\x51\x72\x6e\x87\x21\x57\x90\x16\x40\xaa\x89\x7a\xb1\xc4\x6a\x30\xd7\xb8\x4c\xb3\xde\xf2\x8a\xe1\xdd\xd1\x2e\x22\x33\x1d\xe6\x96\xcb\xef\x87\x48\xf9\xd9\x35\xc8\x36\x6a\x72\x3a\xe3\x59\x0a\xce\x00\xc3\xce\x97\xa3\xd3\x15\x64\x4a\x6a\x47\x73\x28\x65\xdd\x9f\x6d\x43\xb4\xb7\x41\x40\x8f\xe1\xcd\xe6\xf3\x9c\x8b\xcd\xee\xd5\xe2\x2e\x27\xcc\x5d\x66\xea\x86\xc6\x57\x34\x01\xd8\x12\xea\x85\x06\x6b\x11\x42\xf2\xdb\x37\xe2\xe5\x18\x7e\x38\x00\x3e\xf2\xa6\x62\xa3\x1b\x43\x7a\xf3\x52\xb0\xb8\xf9\x7d\xe7\x93\xc3\x3b\x2c\x69\x83\x67\x06\x5b\x94\x36\x16\x37\x14\x9d\xfd\x7b\x23\xec\xf0\x86\xe9\x8c\x0f\x35\x8e\x93\x22\x41\x29\x8a\x11\x0b\xf9\xc6\x91\x28\x1d\xf6\x43\xc5\x3b\xa1\xe1\x8d\x26\xaa\x5f\xf0\xe5\x24\x5b\x12\x89\x4e\x64\x0a\xe9\x96\x07\xb8\x98\xfa\xee\xb0\x1b\x21\xb4\x13\xbf\xb9\x18\xec\xa2\x62\x3d\x81\xf4\xfd\x35\xf6\xc2\x80\xd1\x3b\x61\x7f\xfd\x25\x31\xb4\x93\xd8\x87\x19\x83\x73\x8e\x6e\xa3\x06\x1a\xe8\xb3\x97\xab\x95\x7e\x5c\xea\xd3\xc9\x90\xb6\x78\x21\x13\xd9\x2c\xb3\x9c\xbd\x28\x60\xba\x81\x61\x9f\xdd\x75\x05\xcd\x80\x0c\xbe\x04\x1c\xbd\xd5\x21\xc8\x43\x8e\x8c\xc1\x71\xd5\xa7\x1d\x66\xfc\xc0\xb6\xb1\xe7\x8f\x0c\xa5\xe1\x38\xae\xf6\xd6\xbc\xcc\x63\x59\x84\x7f\x52\x8b\x9e\xeb\xfc\xad\xab\x18\xc2\xc2\x11\x63\xc8\x96\x54\x2f\x17\x98\xc4\x5c\xc9\x13\x90\x48\xa2\xe4\x6b\xa2\x98\x88\x0b\xbe\x22\xba\x5d\x69\x3c\xc8\x71\xda\xcb\x7a\x45\xd2\xb7\x59\xe8\x06\x73\x91\xfc\x51\xc0\x16\xd9\x0e\x70\x0a\x84\xf7\xeb\x98\xdb\xbb\xcf\x98\x1c\x27\x59\x13\x7b\xda\x74\x28\xb3\xa8\x39\xbd\xae\x95\xfa\x48\xa4\x94\x97\x4b\x52\xe9\xef\x71\x47\x7a\x71\xb8\x17\xd8\xe3\xd4\x2b\x67\x89\xd5\x7f\x21\xee\xc8\xb0\xc5\xee\xbc\x02\x91\x71\x3a\x6e\xe6\x99\xfa\x3e\xba\x49\xfd\x57\xbb\xd9\xa1\x01\xb6\xa9\x26\x7f\x00\xfb\xd3\x9a\xa7\x2e\x3a\x70\xc2\x5c\x96\xe6\x30\x0e\xdf\x5b\x8b\x92\xca\x33\x9f\xa1\x16\x7f\xe4\xe6\x57\xb8\xdc\x52\x07\x3b\xe9\x03\xee\x84\xc1\x75\x46\x99\xc7\x72\xd5\x70\x6d\xc5\x29\xec\x29\x73\x81\xe8\x4d\xc1\xfb\x03\x76\x1f\xd1\x15\x9c\x89\x26\x35\x6b\x17\x01\xff\x0d\xdb\x40\xea\x32\x33\xe7\xa6\x95\x42\x57\x61\xb2\x52\xfb\x71\x98\x0e\xbd\xf3\xb9\xe8\xa4\xbd\xb1\xf4\x93\x4e\x80\x95\x84\x35\xe9\x98\x7e\xd3\x76\x4f\x20\x63\xab\x7c\xda\x80\x11\x2f\x31\xa4\x17\x56\xab\x39\x67\xee\x16\xcd\xf3\x32\x6d\x01\xb3\xc0\xef\xd4\xa2\xdb\x06\x1f\x1b\x51\x4c\x6e\x53\xf4\xea\xc3\x95\x91\xe9\x6c\x25\x67\xd9\xf7\xd6\x58\x8f\xc6\xad\x13\x2d\xae\xc7\x76\xa6\x89\x0b\x1c\x66\x09\x97\x8e\xa2\xd3\x2b\x91\x7d\x40\x5e\x68\x26\xbd\x22\x5e\x35\xf8\x50\x4a\xe6\x8f\x5b\xc1\x83\xea\xb1\x57\x04\x6c\x27\xa6\x01\x0a\x5f\x64\xd2\x4c\x67\xb4\x69\x74\x1b\x7a\x1d\x99\x03\x67\x23\xb4\x80\xb2\x06\x61\xd4\xfd\x6d\x07\xc6\xae\xaa\x49\xa2\x23\x23\x3f\xd2\xfd\x95\x3e\x03\xb7\xe3\x9c\x43\x3b\xfb\x78\x9e\x67\x79\xf8\x85\x5a\x34\xed\xfc\xcd\x9b\xa2\x5b\xf6\xc8\xa0\x97\xf0\x44\xa6\x20\x0a\x20\x84\x85\x30\x12\xc2\x04\x07\x68\xd3\xcd\x9f\x86\xd7\xd6\xd8\xc3\x01\xdb\xd5\x91\x45\x21\x96\x65\xf8\xa6\x20\x7a\x4d\x80\xd4\x3a\x84\x4d\x31\xb4\x17\xeb\xd5\x30\x49\xa6\x61\xce\xa7\xa5\x6f\x24\xd2\xb0\x90\x3a\x46\xee\x94\x24\x5b\x5e\x96\xad\x71\x5e\x64\x4a\x03\xd7\xac\x9b\x99\x11\xd0\xbc\x90\x69\x11\x03\x7a\xd3\x35\xc9\xfc\x7d\x31\xaa\x84\x54\xf8\xcc\xf5\xe3\x09\x36\x10\x42\x20\x7f\xbe\x05\xe4\x8f\xed\xd7\x1a\x08\x03\xd3\xb7\x06\xfb\xe0\x5e\x76\xdf\x95\xd2\xd6\x92\xbe\xe2\xd1\xc2\x4e\x81\xf2\x0d\x8a\xfe\x67\x59\x74\x7a\xbd\x1f\xab\x74\xb6\x99\xd1\xda\xad\x63\x9b\x5e\x25\x72\xfc\xa9\xb9\xc6\xc5\x60\xb7\x0e\x96\xf5\xe6\xf6\x7f\xed\x61\xbf\x3d\xc2\xcc\x6f\xe1\xaf\x8d\x44\xef\x19\x31\x61\xb5\x16\xcc\xa2\x6a\x42\xce\x17\x37\x69\x59\x2e\x75\xdd\xe0\x34\x29\xd7\xa4\x4c\x4d\xdd\x40\xa9\x02\x01\xc9\x84\x6b\x12\xb9\x3c\xc6\xeb\x7c\xf1\x4c\x96\xca\xc5\x63\xe0\x45\xd1\x5f\xe7\xe8\x40\x74\x12\x28\x5a\xab\x07\x9d\x36\x66\x0d\x94\x19\xf8\x0e\xd3\x3e\x79\x52\x3d\xde\x94\x4a\x13\x1b\xaa\x3a\xca\x20\xb6\x78\x0c\xe8\xc5\x51\x69\x47\x63\x17\xfd\x4a\x19\x6c\x0e\x52\xb4\x6c\xa6\x47\xb8\xac\x29\x4d\x1b\x0b\x58\xac\x93\x16\x8d\xe4\xac\x3c\xc6\x81\x69\x49\xca\x96\x26\x25\x04\x8b\xa1\xeb\x2c\x70\xe3\x59\x03\x9e\xf2\x2e\xd1\xfe\x9c\x43\x82\x50\xa0\x60\xd4\x5e\x56\xf2\xc0\x80\xbe\xa3\xde\x75\xaa\xa7\xb6\x4d\x81\x29\x81\x19\xd1\xe8\xa3\x42\xfa\x51\xcc\x2f\xa9\xb1\xaf\x19\xf2\x76\xf8\xb7\x01\x7b\xf0\xda\xf0\x2e\xe3\x02\xbe\x7f\xb0\x8e\xe8\xfb\x82\x61\xed\x24\xa1\x19\xa7\x45\x99\xf7\x9a\x88\x84\x57\x43\xa5\x97\xb0\x76\xf1\xe9\xf1\x8f\xdb\x36\xba\x3b\x2e\xec\x2c\x3a\x31\xe6\xe0\x21\x5b\xd4\x6f\x2d\x3a\x00\x22\xfb\x36\xfb\xd1\x51\xf6\xf5\x76\x08\x31\x51\x02\x2d\xac\x22\xfc\xca\x48\xf4\x99\x91\xf5\x7e\x25\x40\x4e\x96\x63\xbc\x9f\x46\xde\x76\xe1\xb4\x52\x4f\x16\xa7\x2a\x5f\x2e\xda\x7d\xa7\xba\x42\x8d\xe0\xf2\x42\x17\x40\xba\xb0\xe2\x2d\xc5\x3d\xae\x41\xed\xb2\x69\xc7\x79\xe1\xf8\x22\x52\x7b\x81\x41\xd7\x14\xa5\x73\xd7\xe1\xa7\xd0\xb6\xb5\x5a\x57\x9e\x47\x7e\xaa\xf6\x29\x7d\x8c\xbe\x2f\xdb\x88\x71\x67\x79\x5b\x0a\x1a\xa3\xee\x0e\xec\xa7\x19\x37\xac\xac\x65\xba\xe8\x67\x14\xb2\x62\x43\x53\x12\x98\x16\x42\x80\xa8\x26\xc1\x21\x36\x57\xd8\xcc\xe6\x0d\xa4\xcc\x25\xbd\xd3\xee\xd8\x71\x8b\xd5\x70\x97\x89\xc3\x9a\xe3\xb2\xb6\x2e\x3e\x2b\xa2\x25\x1a\x3d\x7b\x71\x23\x72\xd5\xbf\xd9\xc3\x8e\x6c\xe9\x48\x98\x9c\x9d\xd1\xa2\x18\xf0\xf2\xef\xde\x13\xbd\x39\xa8\x3c\xf4\x21\xda\xce\x8f\xe3\x74\x4b\xa2\xaf\x02\x91\xdb\x66\x40\x93\xb1\x92\xd5\xce\x16\xa8\x86\x06\x2f\x8a\xee\x4d\xb8\xba\xc5\xf1\x22\xe8\xa7\xff\x3d\x48\x23\xad\x08\xb6\x1a\x17\x83\xeb\xe0\x3b\x5a\xd8\x17\x83\x3d\xbe\xab\x44\x9f\x05\x2f\xdd\x86\xed\x5f\xb5\xbf\xe9\x41\xe6\x8d\x75\x78\x36\xba\xdb\xfd\x5b\x8b\xc1\x81\x79\xc4\xa1\x1b\xb2\x9e\xda\x99\x9f\xca\x63\xdb\xa1\x75\xe5\x0e\xad\xb7\x04\xcc\x2e\xfd\xf0\x75\x41\x74\xd2\xee\x38\xcf\xf6\x1f\xbe\x29\xed\x4e\xc3\x14\x87\xce\x16\x73\x25\xcc\xdd\xec\xa9\xeb\xbb\x24\xb6\x26\x61\x2e\x05\xf7\x6f\xee\x96\x3a\x12\x1e\x1a\x16\x7f\x57\x59\x43\xd5\xf0\xbb\xcf\x8c\xb0\x93\xd4\x1c\x55\x7c\x5d\x2c\x2f\xe7\x72\x19\xa2\x7c\xdd\xb3\x7e\x68\x36\xd9\xc9\x59\x8d\x32\x20\x17\xd5\x6b\x47\xa2\xc9\xea\x43\x3b\x92\x2d\x99\xc7\x48\x44\x32\x70\x37\x92\x3a\x47\x90\x27\x83\x3e\x54\x63\x1f\xa8\x31\xd6\xb4\x39\x0e\xde\x59\x8b\x6e\x32\x19\x9f\xc8\x76\x34\x3c\xf7\xa2\x1b\xcf\x1b\x5e\x30\x3b\x05\xcf\x66\xcf\x62\x0f\x0c\x9d\x82\xab\xe8\xb3\xa1\x55\xdf\xe4\x46\xb6\x23\xba\x6c\x4b\x89\x76\x21\xc6\xeb\x72\x12\xed\xee\x63\x4f\x18\xac\xb6\x23\xba\xaa\xc0\x22\xdc\x13\xee\xaa\x63\xdc\x18\xfb\xf5\x3d\xec\xdb\x3c\xa7\x08\xdd\x4a\xba\x1d\x3b\x25\x45\x21\xc3\x1f\xdd\x13\x7d\x33\xfc\xcb\x09\xaf\xc4\xbb\x2f\x0a\x5b\xf6\x4d\xc1\x97\x6c\x67\x11\xdf\xbe\x92\x78\x0c\x25\xf8\xbb\xdd\x60\xab\x9f\xba\x6a\x32\x88\xe5\x47\x8b\xef\xe1\xbf\xeb\xc4\x37\xbf\x1e\x6c\x48\x70\xb0\xde\x3e\x85\x1b\x8d\x97\x0d\x4f\xfb\x07\x2f\x3c\xfa\x79\xef\x9a\x9b\x9f\x50\x4f\x0d\xef\xaa\x0f\x5e\x88\x98\xa0\x30\xaf\xbb\x95\x0b\x15\xe8\x14\x63\xbf\xb0\x77\x68\xb0\x38\x5c\xf2\x00\x1f\xc3\x4c\xda\xce\xc2\x57\xed\x8d\xa6\xfc\x47\xb4\xa3\x24\x28\xe2\x71\xab\x98\xe8\xf5\x62\xb4\xb0\xd1\xbf\x99\xf4\xf5\xad\x43\xdf\x5c\x0c\x37\x2e\x06\x7b\x68\xd9\xcc\x4c\x5f\x0c\x18\x12\x3e\x9c\x3b\xa7\xfe\xd8\xb9\x94\x65\xa5\xfa\xc7\xf5\xe7\x55\x7f\x13\xa3\x4a\xef\xca\x8a\x99\x8e\x58\x96\x17\x83\xaf\x33\xd7\xea\x14\x14\x67\xde\xb9\x81\x2e\xc3\xcd\x83\xc7\xa9\x07\xb3\x79\x76\xa1\x6f\x1e\xdd\x48\x1e\xb1\x74\x19\x3b\x71\x31\xb8\xce\x9d\x11\x4f\x08\xbf\x69\x0f\xfb\x74\xc0\x6c\x5b\xc3\xdf\x0e\xa2\x0f\x05\xa7\xf5\x9f\x84\x98\xb6\x28\x36\xbc\x4f\x3f\x91\xe5\xda\xe5\x4c\x5f\x9a\x8b\x17\x5a\x4f\xda\x87\x42\xe1\x0b\x3e\xc3\xa9\x31\x7b\x1b\x6a\x6e\xf2\x14\x11\x11\x20\x58\x3a\x22\xdd\x7f\xcb\x98\x2e\xb5\x1e\xb7\x8e\x69\x62\xe2\x8e\x48\x6f\x05\x52\xe2\x24\x4e\x7b\x17\xd4\x9f\xf5\xae\x58\x96\x85\xfa\xd7\x2d\x13\xf6\x83\xc6\x2d\x80\xc6\x72\x77\xfa\xbf\x05\xcc\x99\x81\xf0\x6f\x83\xe8\xcf\x82\x79\xf3\xf7\x15\xf7\xd1\x8e\x92\xdf\xa7\x85\x61\x74\xae\x4d\xb5\x60\xe6\x64\x8b\x9f\x14\x25\x25\x20\xd1\x5b\x0b\x39\xc3\x1b\xb9\x6c\xad\x88\x12\x48\x91\x5b\x59\xb3\xd7\xd1\x47\xda\x84\x4c\xeb\xbd\x62\x22\x97\xad\x07\x57\x44\xf9\x60\xd1\x5b\x2a\x9a\x79\x0c\x0e\xa1\x07\x2d\xb7\xd1\xc4\xc1\x09\xd5\xed\x89\x7c\xa5\xe8\xc0\x0a\x75\x07\xe0\x6e\xe6\xad\x80\xf0\x50\xf4\x6d\xe0\x3f\x70\x9e\x0d\x1d\x05\xb7\x8c\x23\x8c\x16\x6e\x78\x73\xf4\x4d\x77\x67\x59\xc9\xd7\x1b\x3a\xcf\x79\x14\xb0\xf5\x16\x73\x98\x44\x0f\x4e\x55\x7e\xe2\xfa\xe8\x1f\x56\x30\x2f\x57\x72\xb8\x3f\xd1\x64\x04\xb9\xec\x64\x25\x1a\xf6\xe8\x2a\x6f\x01\xcf\xd1\xb1\x89\x89\x83\x8d\x5b\x1a\x07\xc6\xbc\x96\x9c\x64\x03\x5b\x25\x3c\x12\xed\xbb\x57\x3f\xdb\xb0\x6a\xaf\xa4\x94\xf9\x3b\x37\x7c\x76\x34\x7b\x2f\x3c\xd8\xb8\xf9\xb0\xc0\xf7\xf5\xc0\x20\xa8\xe7\xfb\xa8\xcd\x87\x1b\x07\x8f\x36\x0e\xd4\x0f\x34\x96\xba\x59\xe3\x48\x5d\x74\x5a\x47\x8f\xf8\x2d\x9f\x66\x95\x5d\xaf\xe6\x4f\x23\x4e\xb6\xdc\xea\x19\x56\x95\x0b\xe1\xd1\x68\x4c\x2d\x83\xb3\xfa\x31\xc7\xe7\x9b\x2e\x85\x94\x69\x51\x15\x36\xa3\xfb\xce\xce\x73\xf8\xf7\x06\x9d\x9e\x90\x65\x73\x22\x2b\xea\x1a\x0b\x85\x3d\x9f\x96\x4b\xb1\x48\xf9\xd3\xce\x9c\x9b\x40\x4e\xae\x5b\xf9\xfe\xb5\x15\x29\x1f\xea\x8f\xf9\x03\xf0\x82\x51\x2f\x9d\x8a\xe1\xee\xd1\x5b\x6e\xde\x84\x88\x2c\xc8\xbc\x13\x7e\x62\x24\x7a\x38\x98\x4c\x09\xaa\x6b\x7d\x6c\x6e\x24\x89\xcc\x3b\x48\x9b\x8d\x8e\x25\x3f\xe6\x23\xee\x74\x93\xb8\x19\x97\x7c\x4d\x42\xb4\xe0\x01\xbe\x1f\xae\x50\xe3\x72\x1f\x62\x8a\xea\x59\x77\x0c\x52\x3b\xf5\x92\x64\x0b\x15\xa4\x99\x29\x9f\x0a\x52\x95\x16\x99\x2d\xeb\x62\xb0\x13\xeb\xba\x18\xb0\xae\xb9\xc2\xf2\xbd\xeb\x01\x7b\x41\xc0\x9c\x5f\xc3\x72\x03\xe6\x3f\xef\x74\xa3\xab\x6e\x35\x38\xd1\x53\x26\x89\x15\x83\x1e\x42\x4b\xc7\x07\x2e\xf5\x07\x11\x46\xd8\xbe\x06\x5b\x65\xd4\xd4\x30\xd1\x97\x59\xf7\xdf\x8f\xe3\x54\x2d\x04\xfa\xaf\x95\xce\x41\xbc\x92\xdb\xae\xf1\x4a\xfa\x83\xfa\xc1\x03\x07\xfc\x4b\xab\x0f\xed\x61\xcf\xbc\x96\x37\x24\x36\x71\x1e\xf8\xe0\xfe\x62\x77\x74\xef\x46\x2f\x54\xe8\x18\xd7\xcb\x54\xba\x21\x97\xc5\x0b\x76\x6d\x2b\xec\x57\xa1\xb0\xcf\x38\xfa\xfa\x93\xaf\x8a\x1b\x61\xdb\xf0\xbc\x5a\xc3\xf3\x17\x02\xcd\x29\xf2\x33\x41\x74\x14\x33\x24\x61\x88\x44\xda\x8a\x57\xe3\x56\x4f\x24\x9b\x6e\x12\xd7\xcb\xb2\xc2\xda\xac\x75\x2d\x6f\x93\xd6\xab\xfc\x52\xf0\x5f\x36\x37\x36\x9e\x11\xde\x77\x75\xb9\x38\xd7\xa7\xd7\xf4\x8f\xb2\x4a\xd8\x96\x6e\xfc\xb9\x42\xe6\x60\x8d\xfc\xf2\x68\xf4\x4c\xfd\x07\xa1\x83\xab\x21\xf4\xe8\x05\x2b\x75\x22\x52\xba\x39\x2c\x33\x8b\xab\x37\x3f\x36\xd0\x9c\xd1\x50\x77\xdf\x1d\xf3\x3b\x23\xec\xe9\x6c\x27\x52\x3c\x87\x4f\x8b\x9e\xa4\xf3\x60\xc0\xbd\x2f\x3e\xa6\x4c\x84\x26\x8f\x34\x01\x74\x37\xba\x13\x79\x38\x60\x23\xbd\xb8\x15\xbe\x39\x88\x7e\x24\x98\xd4\x0a\x35\x8a\x39\x84\xc5\xba\x80\x2d\x5d\xba\x68\xe6\x59\x51\x20\x00\xc9\x24\x48\xd0\xf5\xb6\x94\xde\x43\xe0\x73\x91\xe2\xc5\x2d\xfc\xa6\xc9\xf5\x0c\x02\x38\x2e\x30\x9a\x62\x1c\xfd\xac\x70\xbf\x03\x69\x37\xec\xe5\xf6\xb9\x99\xe9\xc2\xd3\x35\xbe\x9d\xed\x56\x85\x01\x50\x64\x36\x9a\x32\xd9\x40\xa0\xb1\x55\x83\xaf\xd2\xea\x0e\xe0\xa7\x93\x84\x8b\x26\x00\x0a\xd4\xd3\xa2\xa2\x3a\xee\x90\x17\xca\x5c\x84\x32\xba\x7d\x32\xed\xbb\x8c\x14\x1e\x2d\x42\x9e\xad\xc6\x4e\x92\x13\x67\x99\x64\x79\xe3\x6d\xc1\x37\xb2\x27\xac\x3f\xe6\xce\xac\xb2\x0f\xed\x76\xaf\xa4\x1a\xed\x24\x5b\xa3\x90\x08\xb3\xd6\xce\x64\xa9\x5e\xbf\x48\xdd\x31\xd7\x4b\x64\xf8\xb2\xdd\xd1\x2f\xd4\x86\xfe\x44\x53\x9f\x4b\x04\x0e\xe2\xc0\x58\x4d\x27\xad\x9b\x73\xca\x9c\x29\x5e\xc6\x56\x0c\x21\x5a\x95\xf9\x92\x09\x1f\x20\x94\x9e\xf7\xf1\xb9\xb9\x53\x4a\xc3\x1a\xde\x04\xa3\xb9\xe9\x3a\x78\xdc\xb6\x31\x6d\x71\x1b\xe9\x01\xf6\x8b\x31\x2e\x4a\x70\x41\x52\xa4\xbd\xd4\x40\x35\x55\x7d\x61\x8a\x41\xd7\x3c\x16\xa4\x4a\xd9\xbf\xb4\xee\x87\xa9\x6d\xcf\xb9\xb9\x53\x43\x8b\x50\x67\x3f\x94\x7f\x31\xb8\xb1\xf2\xba\xb7\xe1\xde\xb2\x83\xfd\xd6\x08\xab\xbe\x12\xbe\x7f\x24\xfa\x9f\xb5\xc5\xca\xd3\x45\xcf\x09\xd1\xcb\x13\x9d\x5c\x87\x82\xa8\x05\xae\x40\x3a\x6d\x60\x8d\x53\x26\x2a\x80\x24\xe0\x19\x42\x67\x3a\xa8\xc5\x68\xdf\x12\x3f\xc0\x31\xc6\x79\x9d\x47\x13\x2b\x52\x24\xe5\xca\x43\x11\x66\x70\x5c\x16\x89\x7d\x7e\x33\x3c\x8c\x93\xca\x63\xfb\x2a\x90\x69\xd8\x65\x00\x68\x43\xfb\xde\xc4\xcd\x11\x6a\xbc\xeb\xbe\xa1\x2a\x56\x6f\xb9\x4a\x79\x57\xe6\xf5\x66\xd6\xe9\x66\x29\x04\x35\xc3\x4b\x14\x99\xd3\x60\x51\xe5\x65\x6f\xf5\xf4\xf2\xa4\x68\xa8\x85\x10\xeb\x7c\xc2\xea\xa0\x46\xb8\xbf\x83\x16\x86\xe5\x22\xd3\x32\xef\x5b\x28\xc0\x55\x86\x4f\xfd\x76\xc0\x70\xf6\xc3\xdf\x08\xa2\x97\x04\x8b\xf0\xef\x45\x5f\x7b\x34\x7a\x31\x2e\xc3\xe1\xf3\x53\xed\x1f\xbc\x0b\xd2\xf0\x51\xef\xd3\xeb\x47\xbd\xcc\x28\x55\xa4\x34\x82\xc4\x94\xa5\x31\x67\x2e\xc4\xfe\x6a\x24\xba\x67\xc8\x73\x6f\x19\x9b\xd9\x4a\x62\xd0\x41\x01\x78\x6a\x4d\x28\x1b\xb0\xe9\x1f\x54\xdf\x3f\xc2\x5e\x36\xa2\x51\xa8\xdf\x3d\xa2\x8d\x90\x2f\xd4\x4e\x8b\x0b\x71\xa7\xd7\xd1\xe8\x39\xb5\x51\xe8\xb8\xa1\xe8\x47\xf4\x96\x78\x11\x99\x14\x6d\x42\xd1\xf8\x36\xc6\xd6\x89\xab\x99\xd4\x90\x36\x9d\x81\x0a\x84\x8b\x89\xef\xc2\x28\x3e\x64\x9b\x30\x5f\x61\x5c\x95\xe1\x77\x46\x60\x0d\x15\xdc\x94\xe3\x08\x3e\x5a\x23\x28\x1f\x1d\x59\x90\x8c\x00\xd9\x11\x75\xae\x27\xaf\xfd\xa6\xfe\x62\x05\x72\x63\xe3\x71\xd3\xe9\x25\x65\xdc\x4d\x28\xd7\x1c\xe1\x04\xf1\x08\xb4\xa1\x2f\xae\x3f\x0a\x32\x2e\x97\x3a\x20\xcc\x82\x03\xec\xa8\xe9\xe1\xd2\x37\xc9\xba\x43\xbd\x74\x29\xeb\xa5\x2d\x4d\x04\xa1\x0d\xb3\x3f\x18\x19\x6a\x9d\xa3\x4d\x28\x5b\xb3\x59\x6b\xb2\x0d\x7a\x51\x1f\xac\xf3\xb7\x8f\x44\xab\x0b\x80\xba\x50\xbf\x23\xaa\x4c\x19\xe0\xe8\x80\xc6\x15\xdf\xe2\xeb\x7c\x4d\x24\x86\x98\xcf\x17\x02\x25\x95\x8c\xd0\x44\x8d\xed\x98\x0e\x14\x08\xc2\xb6\x76\xb9\xfa\x7d\x7f\x31\xe6\x18\xd9\x37\x76\xfd\x72\xab\x50\xd1\x57\x04\xac\xfa\x4a\xf8\xbc\x0d\xe2\x9b\x8d\x4b\xc2\xff\x26\x3a\x69\x76\x20\x9f\x84\xf0\x5a\x41\xbf\x5e\xb6\xd9\xdd\x33\x66\xf7\x79\xbd\xe2\xef\x5b\xbb\x6c\xb3\xbb\xd2\xa7\xcd\xad\xee\x57\x8c\xac\x9b\x44\x1e\x37\xbf\xce\x2e\x76\x32\x4e\xcb\x22\xfc\xd3\x5a\x74\xaf\xf7\x44\xab\x32\x05\x5f\x81\x3f\x09\x6f\xa8\xda\xb2\x92\xad\xb9\x99\xe3\x2b\xcc\xe8\xbd\x4e\x15\xc1\xfb\xa2\x1a\x7b\x7e\x8d\xed\x6e\x67\xf9\x33\xb3\x54\x16\xe1\x3f\x04\xd1\x4b\x03\xfd\x57\x25\x90\xe1\xa1\x2c\x55\x33\x5e\xc9\xcd\x3d\x58\x03\x88\x82\xcc\xd0\x99\x65\xdd\x2c\xc9\x96\xfb\x14\x9d\xac\x33\xfb\xf2\xd3\xa2\x6f\xe0\xa2\x82\x77\x48\xcc\x64\x6d\x7e\x1b\x08\xda\xb8\x92\x49\xe8\x28\x3b\xc2\x0e\xad\xbb\x54\x06\x87\xf0\x04\xf6\x61\x8b\xc1\xf0\xaf\xdd\xc3\xbe\xc9\xb5\x20\xba\x5d\x65\xf4\x34\xa6\x0d\xfb\x47\xf8\xe5\xdd\xd1\x53\xed\x9f\xd4\x39\x35\xf4\xcd\x44\xe4\x48\x54\xaa\x83\x46\x95\x98\x99\x55\x82\x43\x49\xb1\x39\x09\xe9\x0a\xe7\x65\x59\x91\xb8\x1f\xdb\x06\xf3\x6c\xdf\xd4\xfe\xbb\xc9\xb8\x71\x95\xf7\xb4\xff\x7b\x85\xb3\xdf\x12\x63\xb2\x82\x2e\x56\xcf\x6f\x90\x54\x68\x70\xbf\xc1\x7d\xea\x6d\x43\xaf\x53\x5b\xb2\x00\x74\xe9\x92\x5c\x11\xab\x71\x66\xb8\xb6\xec\xc7\x0d\x96\x19\xc2\x7a\xb9\x81\x3f\x77\x48\xb5\x18\x70\x74\xe0\xb4\x3a\xeb\x72\xd9\x94\x69\x99\xf4\x4d\x2c\x8b\x8e\xfc\x1e\xac\xf0\x52\xb0\xb0\xb9\x03\xe4\x60\x38\xe1\x38\x40\xba\x85\x71\x78\xd8\x82\xaa\x60\xa0\x17\x31\x36\xec\x44\x9c\x92\xdd\x95\x13\xf3\x03\x34\xfb\x98\x41\xe4\xb7\xf6\x44\xdf\xef\x67\x10\x51\xaf\xf3\x13\x71\x22\xf1\xbe\x8e\x68\xb0\x40\xf3\x49\x44\x51\xea\x6c\x6e\x6d\x59\x52\x40\x0f\x30\x78\xc3\x67\xed\xe2\x1a\xe4\x02\xb9\x18\xec\xee\x64\x69\xac\xd6\xad\x27\x02\xdf\xbc\x9b\xbd\x39\x60\xe6\xb7\xf0\x75\x41\xf4\x9d\xfa\x6c\x3f\xc6\x4f\xd3\xe3\xa1\x09\x9b\x54\x8f\xf4\x77\xc3\x36\x10\x19\x61\x85\xc9\xcc\x84\x9d\x98\x68\x42\x9f\x26\xe6\x8e\x4f\x4e\x9f\x3e\xae\x76\xc7\x4a\xb6\x56\x2f\xb3\x7a\xaf\x90\xf5\xb8\xdc\xc0\xe7\x22\xd9\x68\x57\x94\x2b\xe1\xb3\xa3\x59\x9d\xed\xf3\x18\x3f\x57\x60\x9e\x91\xd2\x21\x47\xcb\xb3\xac\x1c\xe7\xb9\xa0\xc8\x67\xa1\x39\x27\x92\x04\x5b\x5d\xe6\x52\x8e\xbb\x1c\x48\x13\x6e\x35\xbf\x19\xb0\xdd\xb9\x14\xad\xb3\x69\xd2\x0f\x3f\x18\x44\x3f\x17\xd8\xda\xa6\xab\x1c\x6b\x7c\xbf\x7a\x75\x62\x2d\x8f\x4b\x39\x86\x89\x0f\x80\x4f\x1d\xc2\xad\x11\x78\x9b\xa1\x88\x93\xf6\x47\x1d\xc8\x62\xf8\x8a\x80\xdc\x71\xb8\x00\xbf\xd2\x51\x74\xd8\x02\x7e\x25\x60\x0c\xc3\x98\xd4\x02\x0c\xdf\x11\x44\x3f\xee\x74\x69\xde\xfc\xa2\x81\x97\x6a\x90\x55\x07\xcf\xcb\x3e\x07\x21\xae\x4e\xd3\x73\x85\xcc\xfd\x31\x93\x65\x13\x5a\x31\x01\x2e\x38\x8a\x93\xba\x86\x3d\xb0\x33\xf2\xd9\x80\xed\x29\x74\x60\x5a\xf8\xfb\xc1\x16\x14\xd6\x4a\x1c\x5b\xf4\x13\x03\x3d\x9e\x93\x6d\x24\xf8\x34\xb1\xe8\x59\xd5\x27\x05\x38\x6d\xec\xd7\xd0\x21\x20\x8b\xf6\x9a\xf5\x99\xfd\x48\xc0\x46\xd5\x68\x86\xaf\x08\xa2\xe7\x07\xde\x22\x37\x21\x55\xb9\x68\x65\xe4\x98\x4b\x81\xd3\xd5\x69\x10\x64\x24\x79\x64\xe6\xe0\x15\xbb\x59\x7d\x68\xfe\x24\x79\x61\x1d\x21\xf8\xc7\xbb\xa2\x17\x06\xeb\xff\xee\x87\x16\x2e\xcb\x54\xe6\x71\x73\x48\x36\x16\x47\x4b\x10\xe4\x20\x30\x89\x58\x26\x8c\xb5\xda\x23\x56\x56\x2e\x2f\xc8\x26\x5f\x12\x4a\x2a\x20\xaf\x42\xe3\x62\xb0\x13\xad\x61\x4f\xf0\xfd\xea\x4e\xf6\x6b\x01\xdb\xd9\x2e\x16\x94\x1e\xfa\xde\x20\x7a\x7b\xe0\x88\x67\xa0\xa9\xd7\x7c\x26\x2e\x47\x7e\xbb\xf2\x8e\x17\x32\xa0\xa6\x07\xd2\x15\x98\x0b\x69\x93\x11\xe5\xf8\x85\x06\x8f\xe4\x85\xf2\x48\x34\xce\xa3\x0b\xed\x42\xfd\x5f\x5a\xb6\x8b\xc8\x27\x65\x72\x8a\x6f\xc9\xae\x4c\xd1\xf4\x55\x83\xa8\x19\x17\x01\x22\xe1\xf9\x5e\xcf\xb2\x5d\x94\x36\x21\x9c\x8e\xf6\xd9\x45\x73\xfc\x42\x99\x5b\x72\x4c\x9d\x59\x01\x1c\x8a\xfd\xc6\xdb\x36\x70\xae\xbe\xc0\x15\x80\xab\xd1\xca\xa3\x25\xfe\x7c\x8a\x93\x9a\xbb\xe7\x3f\x50\xbb\x82\x3d\xff\x7d\xb5\xad\xee\x79\xda\xe4\xa4\x38\x91\x49\x04\xd3\x37\x2c\x88\x4e\x7d\x85\xd4\x73\xf8\x35\xf1\x77\xe0\xd4\x68\x0e\x6a\x0a\xd3\x23\x5a\xad\x36\x4f\xb3\x4a\x2d\x2e\xab\x8c\x61\xf2\x18\xda\x90\x02\x71\x44\x70\x90\x65\xa9\x7e\x69\x1c\xdc\x0b\xf8\x6f\x74\x1c\x10\x6b\xf0\xf0\x56\xb1\xb3\x8c\xb6\x41\x78\x3c\xba\x6d\xda\x10\x1c\x57\xa1\xdc\xda\x71\x94\x41\xd4\x8d\x09\xd4\x22\x16\x2a\x77\xdd\xbd\x69\xa7\x67\xb0\x69\xd2\x7c\x0b\x5a\x08\xbf\x7b\x67\xb4\x60\xff\x74\x42\x5e\xa0\xab\xf6\x07\x65\xb5\x96\xb9\x30\xc9\xc0\x53\xe0\x8d\xa2\xad\x85\xd1\x97\x2e\x89\xba\x6f\xc4\xbd\x7e\x07\xfb\x5c\x8d\x5d\xe7\x5e\xbc\x87\x9f\xac\x45\xbf\x58\x73\x9f\x80\x8b\x92\xb2\xcd\x93\x6b\xd9\x49\x91\x51\x80\xde\x94\x52\xbd\xf0\xab\xc7\x76\xeb\x55\x8e\x76\x25\xca\x9b\x81\x5f\x2d\x13\xee\x92\xe9\x21\x4e\x09\xf9\xcf\xb4\x1f\xc8\x66\x6c\xc6\x06\xa2\x18\xf0\x8a\xf2\xda\xaf\x56\x94\xcc\x97\x0d\x37\x81\xd2\x0e\xf7\x15\x5c\x5e\x88\x8b\xb2\x8a\x3b\x68\xf0\xc9\x14\x0c\xfb\x76\x12\x37\x35\xcb\x2a\xd2\x52\x95\xc4\x86\x86\x8e\x3b\xb4\x25\x29\x80\xa6\x45\x4c\xed\x1b\x8a\x86\x87\x6b\x6c\x6f\x99\x25\x9a\x29\x3c\x7c\x43\x2d\x7a\x6f\xe0\x3c\x40\x07\x56\x57\x49\x2e\xd9\xe2\xfb\x21\x76\x17\x1c\x34\xad\x1e\xd8\xe0\xa5\x2c\xc6\x60\xfb\xa8\x31\xd4\x3c\xaf\xe4\x21\xaa\x0e\xa5\x8e\xbc\xd5\x0d\x1b\xe7\xb2\xdd\x96\x4d\x24\xf1\xe4\x90\x17\x44\x9b\x93\xe4\x78\xa5\xf9\xc3\xe6\x58\x91\x0c\xfe\x29\xf2\xa0\x55\xd6\x91\xd5\x35\x27\x58\x9d\x3d\x69\x53\x11\xb3\x60\xba\xba\x45\xaf\xc6\x4b\x1f\xe7\x1e\x9b\x1b\x47\x36\x00\xcd\x7c\xf8\xc9\x1b\xa3\x07\xf4\x1f\x83\x11\x17\xe3\x15\xef\xa6\x0e\x09\x1b\x37\x3d\xb4\xee\x41\x87\xe1\x49\xa0\xd1\xd3\xb8\x18\x40\xa8\xf8\xc5\x60\xb7\xfe\xd0\x87\xae\xdf\xb0\xed\x10\xd9\x76\x88\x3c\x76\x0e\x91\xc3\xc4\x64\xf0\xa4\xe8\x9b\xd3\x75\x98\x0c\x70\x1d\xbb\x1f\xfd\x46\xc0\x1e\x67\x16\xbd\x5e\xb7\x3f\x17\xb0\x7b\xaf\xc0\x9f\xf2\x34\x27\x0e\xed\x44\x96\x4f\x6b\x9f\x66\xd4\xac\xd6\xa0\xdb\xa6\x37\x99\xdd\x76\x24\x75\xdc\xf8\x51\x0c\x4f\xed\xe6\xd9\x92\x58\x4a\xfa\x36\x33\x33\x92\x05\xeb\xd0\x65\xf6\x57\x3b\xd8\x13\xf0\x93\xc9\x56\x2b\x97\x45\x71\x77\x9f\xe2\x83\x67\xa6\xe7\x8a\xf0\x23\x3b\xa2\xcf\x8f\x0a\xde\x11\x5d\x35\x18\x34\xbd\xea\x27\xb5\x53\x28\x5e\x56\xe0\x87\xf6\x4e\x45\xe6\xab\xe6\xa4\xc2\xb1\x33\x24\x0b\x65\xc6\x57\x64\xd2\xa5\x92\xd4\x0a\x11\xcd\x15\x93\x0f\xc3\x10\x8e\x17\xa5\xce\x1a\x5e\x97\xed\x76\xdc\x84\x6a\xd7\x84\xa5\x28\x6f\xf0\x29\x2a\x41\x9d\x7e\xfa\x98\x11\xdd\x6e\x9e\x75\xf3\x58\x94\xb2\xda\x38\x51\x28\x3d\x1f\xef\xaa\xa0\xf9\xc4\x77\xd8\xc7\xe3\x11\xd6\x62\x53\x14\x30\xe7\xe6\x1e\x88\xee\x0c\xc7\x4d\x73\x49\x80\xe8\xfa\x20\xb7\x73\x51\xda\x5b\x02\x55\x34\x5d\x43\x61\xf5\xb9\x2c\x7b\x79\x4a\x3c\x9c\xe5\x4a\x56\x60\xf5\x85\xe1\x31\x2b\x57\xe2\xf4\xbc\x43\xdf\x4d\x43\xac\x7a\x45\x0d\x73\x2f\x97\xe9\x5e\x07\x70\xe0\xc4\xc2\xad\xca\x57\xb2\xce\xc8\xaa\x99\x59\xec\xa1\xaa\x72\x5c\x87\xa0\x52\xb1\x30\xdc\xd2\x12\x7e\x00\xdc\x83\xac\x18\xa7\x00\xbd\xeb\x79\x92\x65\xe7\x0b\x2e\x4a\xfe\x8c\xfa\x89\x2c\x5f\x13\x79\x4b\xb6\xd4\xbf\xf8\x8a\x14\x2d\x99\xf3\x2c\xe7\xcf\xa8\xcf\x49\x91\xd4\x67\xba\xce\x33\x7d\x7b\x3f\x07\xa0\x62\xb5\xb0\xf8\xfe\x98\x78\xba\x21\x6e\x7b\x4c\xb3\x91\x3a\x6d\x9b\x99\xf5\x0e\xc7\x59\x76\x86\x9d\xba\x82\xed\x34\xbf\xde\x62\x66\xff\x67\xc0\xcc\x11\x14\xbe\x32\x88\x0e\xd9\xe8\x67\xca\xd4\x6e\x03\xb6\xdd\x38\x5f\x67\x1d\xbb\x0d\x3c\xcb\x4e\x5f\xd3\xfd\x7e\x29\x98\xdd\xdc\x99\x58\x0f\x9f\x54\xb7\x01\x85\x50\xd4\x7a\xb9\x7d\xa2\x88\xb1\xaf\x54\x03\xce\x5c\x47\xa2\x67\x39\xff\xee\x7f\x18\xf7\xe1\xdb\x77\xff\xc7\xf7\x2e\xfd\xb5\xe7\x5d\xfa\xe3\x60\x0b\xc9\x66\x21\x85\x1c\xfa\xde\xb7\x5d\x4c\xd7\x6c\x22\xb6\x1d\xd1\x5f\xe5\x8e\xe8\x37\xee\xf5\x58\xfe\x86\xa1\xf6\x9e\xde\x93\xbd\x38\x5d\x9e\x72\xb9\x32\xc2\xcf\xb1\x68\x6a\xd8\x0f\x0e\x72\xd4\x27\xd7\x30\x49\x60\xd0\x9f\xf0\x5c\xfc\xd6\x93\x5c\xaf\x66\xec\x4d\x3b\xd8\xee\x15\x91\xb6\xe6\xe3\x87\x64\xf8\xda\x1d\x1a\x7e\xf0\x95\xd1\x45\xfd\x54\xc3\xd3\x3a\x80\xda\xca\xc8\x15\x44\x88\x12\xc4\xd6\xe8\x44\x98\x74\x8e\xaf\xf4\xda\x6d\x48\x0b\x2a\x10\x18\x08\x70\x20\x93\x31\xa1\xcc\xa0\x2d\xca\x34\xe1\xf7\xaf\xc8\x94\xcb\x94\xda\xe6\x40\xfe\x04\x79\x1d\xba\x79\x9c\xe5\x71\xd9\xe7\x90\xbf\xcc\x45\xe2\xed\x2b\xb8\x1a\x3b\x37\x1f\xfa\x7e\x63\x60\x74\x45\x9c\x43\xf2\xf2\x15\x51\xac\x38\x5c\xc6\xea\x4f\x4b\x68\xac\x59\x36\x74\x83\x0d\x93\x4a\xd6\xa6\x26\x22\x0d\x89\x14\x09\x17\x7c\x45\x20\xd1\x3c\x74\x31\x7e\x48\x3a\x24\x2a\x6a\x7d\x35\x38\xa8\x59\x06\xb3\x58\xf0\x6e\xaf\xc4\xee\x3a\x0c\x2c\xc5\x8a\x3a\xc5\x8b\x52\x97\xaf\x95\x10\x55\x78\x83\x3b\x63\xae\x3d\x33\x69\xc6\x13\x91\x2f\xeb\xad\xb3\x88\xdf\x2d\x12\xef\x8f\xcd\x9a\x1f\x2f\xa7\x70\x43\x09\x17\x85\x30\x57\x6a\x40\x8a\x4c\x43\x08\xdb\x72\x4d\xe9\x43\xab\x7d\x18\x35\x7b\x0c\x8a\x52\xad\x17\x52\x71\xa9\x91\x58\xc7\x58\x83\x1b\x9e\x63\x25\x06\xeb\x6d\xd1\x04\x2f\x86\x1b\xbf\x66\x39\x8f\x09\x47\xbd\x2a\xf9\x72\x2f\x6e\x09\xe0\xde\x4b\xcd\xe6\x72\xf3\x2b\xb9\x31\x74\xc0\x88\x6b\x36\xbf\xa1\xa6\xbc\xcd\x07\xaa\xbc\xba\xc6\x1e\x07\x8d\x3a\x25\xd3\xe5\x72\xe5\x54\xdc\x89\xcb\xf0\xdf\x0c\xdf\xda\x5f\x05\x8b\xd5\x5f\x17\xb5\x18\xef\x0c\xe0\xc6\x2c\x72\x95\x38\x67\xd0\x0f\xb4\x26\x62\x2d\x06\x04\xb1\x37\x42\xa1\xc6\xa0\xaf\x2c\x45\x18\x55\xa5\x69\xdc\xc1\xe5\x05\x40\x63\xda\x82\x73\xeb\x56\xd2\xdd\xc5\xae\x19\x7f\x1b\x6d\xa3\x06\x47\x8a\x9e\xd2\x2e\x26\xc0\xfc\x69\x46\x6c\x1a\x18\x6c\xe4\x2d\x15\xf8\xce\xef\xd6\xd8\x4e\x9c\xaa\xf0\xa3\x35\x3d\x16\xef\xaa\xe9\x25\xe2\xd0\xcf\x51\xcf\x69\xcd\x19\x7f\xa6\xdf\x23\x34\x13\xe8\x1d\xf0\xa9\xf1\x38\x45\x07\x3c\x5e\x3f\x8b\x12\xb3\x65\x58\x98\x3e\x7c\xb1\x6e\xcf\xe6\xb5\x60\x05\xaa\xa5\x83\x9e\xe7\xaa\x9b\x4b\xa0\xf4\x29\xf4\xe6\x33\xc2\x02\xb7\x6a\x4f\xd9\xf7\xe7\x49\xa0\xb4\xd0\xbf\xd7\x8b\x0b\xc8\x88\x00\x2c\xf1\xa0\xdf\x59\x6c\x14\x88\x02\xed\x33\x89\xf3\x5c\x26\x72\x55\xa4\xe5\x96\x16\xdb\xd1\x23\xfe\xb8\xbe\x8b\xb1\xbb\xaf\x9e\x05\x22\xfc\xd2\x9e\xe8\x59\xf6\x4f\xff\xa6\x87\x4c\x1d\x04\x02\x76\x45\x5e\xc6\xcd\x5e\x22\x72\xee\xaa\xe2\x0d\x7e\xc6\xe5\x5b\x8c\xb4\x85\x0c\xda\x74\xe4\xfb\x7f\x5f\xbd\x9b\xbd\x45\x47\x8e\xbf\x21\x60\x0b\xd7\x9a\xd2\x02\x60\x10\x27\x20\xb5\xa6\xf1\xc6\xb9\x97\x01\xaa\x23\x98\xf3\x98\x66\x90\x9c\x23\xf4\x84\xdc\xb5\xd8\x6b\xf6\xa3\x81\x41\x45\xbc\x2a\x60\xf7\x5d\xf3\xb6\x12\x59\xeb\x15\xb2\x8c\x6c\x3b\xff\xb6\x9d\x7f\x8f\x9d\xf3\xef\x5e\x07\x0d\xf5\x94\xab\x44\x43\x5d\x0a\xce\x6f\x6e\xbf\x9f\x0c\x4f\xb8\xd1\x50\xde\xde\xaa\xc4\x43\xd9\x2d\x36\x34\xfa\xe9\x95\x35\xb6\x7f\x98\x69\xaf\xa3\xc0\xd5\x7e\x94\xf7\xe3\x29\x1b\xfe\x79\x10\xdd\x36\xf4\x17\x5c\x2a\xfa\x30\xb6\xfc\x3d\x4e\x22\x3c\x4f\xf2\x5d\x0c\xd8\x59\x4b\x39\x3a\x1d\xdd\x7a\x1a\xff\xc9\x95\xe4\xc0\x13\x65\x6d\xa5\xaf\xd5\x63\x4a\x78\x1f\x23\xad\x5c\x5f\x96\xfa\xfe\xc5\x73\xa6\x3e\x8d\xed\xcc\xa5\x28\xb2\x34\x7c\x72\x74\x60\xff\x52\x1e\xcb\xf6\x18\xc7\x27\x97\x57\xd0\x07\x83\xa1\xac\x14\xd3\x42\x76\xb2\x54\x43\x69\xc3\xd7\x07\xd1\x94\xff\x68\xb8\x90\x25\x61\xc5\x8b\x38\x5d\x4e\x24\xc7\x4f\xcc\x8a\x6f\x5c\x0c\x46\x67\xb3\xdc\x27\xe2\x9e\x64\x4f\x61\xf0\x34\xbc\x55\x2b\x08\xd1\xac\x43\x42\xab\xfd\xc9\xa0\xee\x98\x92\xfc\xd0\xdc\x80\x7d\xcb\x3a\x08\x57\x0d\x6e\x0d\xdf\x18\x44\xa7\xe8\xdf\x16\x13\x3c\xd8\x74\xf4\x00\x3f\x04\x90\x5e\x2b\xfc\x0a\x84\xbf\x3b\x58\x5e\x7d\x43\xe3\x75\xe5\x4e\x76\x8c\x1c\xe3\x87\xa2\x6f\x03\x67\xb8\x73\xa2\x56\x1d\xe4\xaa\x0e\x3f\xca\x7c\x07\x3b\x38\x64\x26\x08\x15\xab\xda\x68\xd3\xd2\x18\xfe\xa6\xf0\xe3\xa3\xd1\xd2\xc6\xaf\x38\x1c\xab\xe8\xcd\x36\xeb\x35\xb7\xdf\xb9\x99\x6a\x40\x6d\x6c\xca\x1c\xf0\xc6\xa6\xb7\x4a\x1a\x5c\x0c\xe8\x54\xf4\xba\xfd\x91\x11\x76\xc2\x2c\xc7\x3b\xa3\x09\xb4\x31\x60\x29\x1a\xee\x45\xdd\x98\x7d\x05\xd2\x5f\x97\xb9\x80\x1b\xfb\x0a\xd5\xed\x29\x73\xec\xde\x1d\xdd\x32\xef\x41\x08\x4d\x11\xe3\xda\x58\x59\x80\xf4\x8e\x27\x90\x63\x94\xe8\x47\xbd\xd2\xee\x62\xd0\x6a\xe0\x19\xe8\x77\x25\x6a\xd6\x43\xbb\x6c\x0a\xf7\xbe\x7f\x6d\xc0\x42\xd5\xda\x05\xd3\x58\x20\x03\xff\xae\x2b\xe7\xe1\x9d\x5e\xd0\xfc\xdf\xa5\x26\xe4\x35\x55\x3b\x63\xa2\xb4\x45\x75\x5c\x00\x8a\x00\x47\x01\xe2\xa8\x20\xd8\xb0\xc1\x1e\xb0\xd2\xe4\x4c\x34\x39\xc9\x57\x7a\x1d\x91\xaa\x21\x6f\x01\x8c\x9c\x7e\xd3\x90\x74\xcb\x6d\x5c\x38\xc1\x9a\xeb\x4c\xc0\xbb\xf6\xb0\xb1\x2d\xe1\xed\x61\xc7\x7e\xcf\x9e\xe8\x56\xf7\x81\xaf\x41\xc2\x13\x30\x60\x97\xfa\x4a\xb9\xd0\x6f\xce\x27\x71\x85\x5b\xe0\x57\x76\xb3\x0f\x8f\xd0\xd6\x79\xff\x48\xf4\xf0\xc8\x82\xb7\x59\x94\x1d\x90\xe5\x25\x90\xf0\x72\x4c\x64\x0e\x1c\xe5\x7e\x89\xa8\x86\x62\xec\x99\x8e\x5c\x81\xf4\x0b\x1a\xb4\xe1\xbf\x0d\x91\x9c\xf9\x6a\xac\xc7\x5a\xb8\x49\x04\x88\x9c\x6d\x1c\x2b\xb7\x31\x0c\x46\xdf\xd1\x4c\x6d\xd0\x9a\x67\x3d\xbb\x81\x15\x59\x6d\x98\xd2\xb9\x2c\x49\xc0\x3a\x39\x19\xf8\x38\x24\xa7\x2e\x0a\x3e\x7d\x66\xfe\xc1\x53\x93\x77\x1f\x3f\x65\xf2\x53\xab\xa3\xea\x66\xd7\xaa\xb6\x70\x92\xa3\x87\x79\x73\x45\xe4\xa2\x09\xbe\x92\x24\x4b\x97\x1b\xfa\x55\x08\xf3\x41\xc3\x58\xd9\x8a\x39\xde\xc8\x40\x46\x75\x62\xf5\x76\x3f\xcd\x72\xbe\xaf\xbe\xcf\x7c\x5c\x94\x22\xc7\x98\x43\x99\x9a\x74\x0a\xeb\x7c\xec\x25\x0c\xf7\x92\x0a\xba\x0b\xe8\xe7\x03\x36\xaa\x06\x25\x7c\x9b\xb1\x7d\x5f\x13\x2c\x68\x6a\x73\x5f\xa6\x1b\x61\x6a\xa2\x84\xe8\xb0\x72\x2c\x4d\x9c\x6e\x4c\x88\x03\xbc\xf1\x65\x1e\x37\x75\xf8\xad\x1e\x2b\x50\x3b\xbb\xb9\x2c\x6d\xe6\xb2\x26\xa5\x96\xd4\x4e\x0d\xcd\x82\x43\xd2\x3c\xf7\x4f\x90\x25\xb6\xbb\x9b\x67\x65\xd6\xcc\x92\xf0\xbe\x68\x46\xb5\x77\x66\x96\xeb\x47\x8e\x39\x0a\xcb\x50\xc3\xcc\xce\x4d\xcf\x8e\xf3\x85\xa9\x59\x48\xc3\x33\x3f\xb5\x30\xeb\x0d\xd1\xc2\xd4\xac\x37\x32\x9f\xaa\xb1\xbd\xa2\xdb\x9d\xd5\xf5\x40\xea\xfe\x85\x15\x8a\xb9\x6a\x9a\xc8\xdb\xa1\x55\x3a\x26\x22\xe6\xb6\x2c\xd4\xdc\x21\x8e\xdc\x59\xb7\x70\x11\xc0\x8b\x7e\x5a\x8a\x0b\x0d\x7e\x2e\xad\x53\x98\x66\x8b\x42\xa9\xd1\xec\x27\x88\xb6\xaa\x62\x66\xf2\xcc\xa4\x2d\x49\x73\x12\xe2\xcb\xfb\xe9\xfe\x6f\xee\xc4\x54\xfd\xe8\xe1\xc3\xb7\xc0\x90\x13\x1b\xd2\xda\xda\x5a\x23\x16\xa9\x00\x46\x24\x51\x14\xf1\x72\x0a\x29\xd4\x74\x32\xcd\x3a\x14\x31\xd6\xe0\x67\xb2\xb4\x6e\xca\xd7\xbd\xf3\x2e\x05\x2b\x4d\xc4\x74\x7a\x05\xef\xf4\x9b\x59\xa7\x2b\xd2\x3e\x30\x12\x75\xfa\x75\xe4\xef\xad\xeb\x32\xbc\xb1\xfd\xe9\xe0\x6a\xc9\x39\xee\x99\x3f\x7b\x66\x1e\x6c\x9a\xd9\x3c\xeb\x16\x67\x73\x64\x85\x9f\xcc\x73\xd1\x0f\xcf\x44\xa7\x36\xfa\xdd\x17\x7b\x95\x37\x39\x18\xd0\xb4\xf9\xc1\x4f\xdd\x60\x7f\x57\xf3\x8e\xfa\x81\x28\xfc\xc6\xdd\x59\x2f\x6d\x55\x2e\x2e\xc2\x5f\xaa\x45\xf7\x0c\xfb\x01\xf5\x51\xef\xf6\xc2\x98\x72\xda\xb7\x66\xb2\x81\x42\x08\x9d\x32\x2f\x3c\x01\xfc\xe9\x80\xdd\xea\x99\x94\x63\xd1\x37\x2a\x7b\xd3\x81\xa6\xa0\x5f\x13\xc3\xd6\xbd\xb1\x3f\x43\xa6\xd4\x89\xe8\xf6\x7b\x63\xeb\x7f\x34\xef\xf2\xfb\x94\x7c\x03\x05\x1d\x17\xe0\xbe\xd9\xac\xb5\x0f\x16\xd3\x3e\xbc\x7f\xd9\xe7\x95\x77\x33\x1d\x04\x51\xf4\xbf\x9d\xf1\xb8\x4b\x87\xd4\xbd\x1f\xa3\xfb\xbf\x35\xfa\x4f\xe7\x66\xa6\x37\x7c\xf3\x77\x76\x6d\x92\xb9\xe6\x60\x35\x73\x0d\xde\xf1\x5d\xdc\x15\xfd\x6b\x30\xec\x17\x27\xe2\x8b\x78\xbf\x55\x79\x12\x63\x30\x2f\x37\x5d\x0d\xdf\xef\xa6\x4b\x4f\xc0\x49\x68\xdc\x61\x4a\xa4\x35\x93\xac\xd7\xa2\x93\x1d\x01\x8f\x74\x40\x65\x39\x7f\xfa\xec\x3c\x1e\x61\x49\x26\x5a\x4b\x22\x11\x69\x53\xe6\x06\xb5\x95\xf5\x4a\x48\xb0\x09\x50\x04\x60\x43\x1b\x6b\xf0\xe3\x17\x44\x13\xc2\x35\x52\xc9\x23\x0c\x7f\x8f\x08\x28\x6b\xfd\xb5\x52\xe9\x7d\x0c\xbb\x72\xa6\xaa\xeb\xbe\x60\x07\xfb\x58\xc0\x42\xfc\x76\x72\x55\xe6\x62\x59\x42\x22\x9a\xf0\x5d\xc1\x65\x70\xa3\x34\x0c\x1d\xf7\xd3\x7b\x22\x2d\xe3\xb2\x1f\xf5\x06\xcb\xd4\x7e\x42\x0a\xd4\xef\xca\xbc\xde\xcd\x5a\xd6\x3d\xe6\x27\x09\xda\x0f\x3e\xb4\xe7\x52\x79\x63\x4a\x5e\x0f\xe4\xa8\x80\xa9\x58\x80\xe2\xa0\x86\x06\x7b\x6f\xc0\xf6\x96\xf6\x41\xf8\xd6\xab\xec\xc6\x73\x9c\xc2\x2a\xed\x37\xed\x06\xef\xef\x15\x35\xda\x1d\x9d\x06\x3b\xc5\x9c\x69\x0a\xef\x8a\x0e\xda\xbf\x86\x61\x73\x74\xae\x22\x70\x1f\x17\x03\x3a\xe0\x5b\x02\x76\x03\xbe\x62\x80\x9a\xaf\x09\xd8\x53\xaf\x84\xef\x46\x1d\x47\xba\x90\x68\xd6\x2f\xd4\xbd\xde\x30\x04\x86\x0e\x3f\x38\x68\xc8\x94\xbd\xc7\x24\x96\x71\x73\x16\x35\xd8\x87\x76\xb0\xa7\x6c\x92\x22\xe9\x64\x96\xc7\x0f\x29\x53\x35\x99\xcd\x5a\x93\xf4\x82\x6b\x40\x7d\x65\x34\x92\x9b\xbd\xb4\xbe\x09\xb5\xce\x97\x97\x6d\x43\xfd\xeb\x08\x7b\xfb\x70\x6b\xe3\x35\x1b\xb0\x6c\x6e\x66\x6e\x3c\x30\x58\x9e\x5e\x0f\x57\x69\x84\xb0\x65\x6b\x83\x7c\x7b\x74\xd6\x98\x1c\x6a\x11\x83\x39\x52\x37\xe6\x88\xbc\xd0\x4d\x44\x6a\x4d\x2e\x82\x6c\x6f\x64\x92\xb8\xab\x71\xd6\x98\x96\x27\xa2\xdb\xc9\xac\xd4\xf7\xdf\x57\x66\x64\xce\x1b\x23\x73\x26\xba\x93\x7a\x16\xdb\x99\x1d\x62\x72\xf2\xfd\xc3\x6c\xcd\x31\xb7\xd0\xdb\xc9\xd6\x3c\x18\xdd\x04\x72\xd4\x5f\x31\x4e\x56\xae\xd6\x40\x0f\x5f\xf7\x35\x1b\x62\x4f\x07\x65\x4b\xf8\xb9\x30\xfa\x54\xa8\xff\xc2\x41\x07\x15\xaa\x8e\xae\x99\x21\xae\x5d\xd2\xbc\x21\x13\x94\x71\x7f\xa0\xbf\x0f\x1c\x90\x1d\x91\x17\x2b\xb0\x73\x26\x7a\xa9\xfd\x43\xc9\x08\xa5\xcf\xc0\x69\xfd\xc0\xe4\xe9\x53\xe3\x88\x41\xd6\x8b\x25\xe3\xa8\x06\xed\x1f\x83\x37\x26\x8b\x99\xb4\x3c\x7a\x44\xfd\x05\xec\x1f\x59\x5e\x40\xaa\x54\xdc\xc7\x22\x89\x1f\x32\xbe\xf8\x0e\xe0\xde\x8e\x31\x76\xa7\x96\x79\x77\x71\xfa\xdf\xb1\x63\x4f\xe6\x77\x2a\x8d\x52\xb6\x30\x4b\xd1\x5d\x77\x16\xbd\x76\x3b\xbe\x70\x17\xe3\x7c\xff\x99\x4c\x93\xbe\xe8\xa7\x1e\x86\x7f\xdc\x3a\x5a\xa3\x08\x6d\xa1\x38\xe5\x77\xb6\x64\x33\xee\x88\x64\x7e\xe6\xae\xc6\x18\xbb\xb3\x15\x2f\xc7\xa5\xa9\x8f\xaa\x3c\xc0\x9f\xc7\x0f\xf2\xe7\xf1\x46\xa3\xc1\x9f\xc7\x6f\xe7\xf8\x56\x71\x97\xff\x96\xfe\xf6\x79\xfa\x5f\xe6\xb5\x3b\x71\x88\x87\xbe\x5f\xd8\x0f\x8a\xbb\x1a\xc3\x9e\xa9\x8a\x6d\x51\xaa\xf7\x6e\xfb\xa0\xa8\xe8\x49\x11\x7f\x1e\x8f\xea\x51\x65\x74\x4c\x55\xba\x01\xcf\xa3\x02\xcc\x03\x33\x52\x7e\xd3\x96\xe2\x54\xe4\xfd\xf9\x19\x6c\x08\x8e\xd0\xf1\x0b\x48\x77\xe2\x3e\x53\x6f\x38\x2f\x3b\x45\xdc\x1b\xf3\xe7\xf1\xd3\xea\x3f\x4f\x53\xff\x59\x50\xff\x99\x55\xff\x39\x1e\xab\xb9\x9a\xc1\xeb\x00\x62\x18\x22\x76\x47\xe4\xc5\x28\x8b\x3b\xf8\xbc\x94\x86\x5e\xb5\xbb\xd2\x2f\xe2\x66\xd1\x48\xe3\xa2\x6c\x2c\x67\xab\x13\xcd\x5e\x6f\xe2\x9c\x7a\x6f\x02\x6b\x06\x56\x55\x35\x77\xb6\x4d\xb6\x1d\x1d\x35\x30\x6a\x74\xce\xab\xf6\xa8\xe6\xa8\xd6\xa8\xc6\xa8\xb6\xf8\xcb\xe6\xe0\x81\x43\x47\xf8\x93\xf9\xc1\x7b\x63\xe0\x8c\x39\x78\xe0\xc0\x01\xf5\xe7\xf9\x3b\xf8\x0c\x6f\xc5\x2d\x48\x90\xba\x92\xe9\x9c\x16\x4d\xd1\x8d\x4b\xb3\x72\x1b\xb6\x01\x76\xa0\x60\x6e\xe4\xc0\xac\x3c\x8f\x47\xc7\xab\x0f\x19\x3b\x03\x1c\x34\x80\x45\x04\x57\x23\x89\x9a\x72\x25\x97\x20\x2a\x91\x6b\x46\x6d\x90\x42\x1f\x90\xe3\x3c\xcd\x8c\x66\x40\xd7\x29\x3a\x1a\x42\x6f\x6c\xbe\x0c\x39\x5d\xe9\x0a\xfe\xd0\x77\x1c\x3d\x5c\x3f\xa8\x16\x7e\x47\x2c\xa7\x71\xd9\x6b\x01\xe7\x47\x8e\x77\x12\xe4\x24\xb1\xde\x85\xc3\x9c\xfa\xc4\xbb\x89\x68\xca\xa2\xc1\xb1\xb9\x85\xbe\xd7\xd7\x17\xe8\xdd\x5c\x36\xe3\x42\x3a\xd9\x90\xbb\x5d\xd9\x02\xf4\x22\x32\x73\xf0\x5e\xb7\xc1\xf7\x1f\x6f\x2c\x37\x8e\xf1\x03\x8d\x83\x1d\x02\x5c\x9a\x1f\xe1\x96\xb5\xd3\x18\xf3\x63\x6f\x94\x35\x36\x90\x74\x9c\xc7\x6d\xbe\x26\x75\xb2\x21\xa7\x25\x1a\x39\x40\x23\x12\x4b\x25\x63\x00\xa5\x21\xb8\x2b\x13\xbb\x22\x2f\xac\x73\x07\x65\xad\xbd\xb9\xce\x25\x91\x2a\xc1\xe8\x93\x77\x10\xb7\x09\x8e\x50\x0b\x6f\xaf\xbc\xc4\x36\xc0\x6b\x02\x2f\x8b\x65\x01\xe9\xe7\x94\x51\xa5\x01\xbc\x20\xe0\x20\x39\xf4\xdd\x98\x73\x5b\x3f\x83\x8a\x4d\xd3\xf4\xe0\x21\xf6\x82\x47\x4d\x91\x66\x29\xe4\x5e\x53\xb3\x1e\xe9\xb8\x24\xc8\x0a\x0c\x0b\x56\xaf\xb4\x09\x6a\x9e\x2e\x40\xb4\x9e\xd3\x83\xcc\x35\xbd\xae\x1a\x97\x56\xb6\x96\xf2\xfd\x74\x8f\xe9\xd3\x6e\xc4\x69\x33\x07\xe2\x53\xf5\x9a\xa4\x7f\xc7\x29\x3f\xad\x9a\x54\x14\x62\x0c\xad\x6e\x55\xdb\x31\xc6\xb9\x50\x66\x3b\x4d\xb6\xc9\x26\x0c\x84\x72\x4b\xf0\x4b\x3b\x17\x4d\xda\xd3\x28\xb1\x4c\x93\x64\x27\x2e\x4b\xd9\x62\x9c\x37\xf1\xe6\xdd\xac\xe8\xfd\x90\x29\x5f\xb5\x1f\x40\x2f\x82\xd6\x16\x60\x8c\x35\x3e\x19\x4f\x8a\x78\xd9\xa6\xdb\xce\xb0\x3c\xde\x4b\x13\x44\x4b\x1b\xac\x00\x24\xa9\x5a\x06\x36\x89\x06\x63\xc7\x09\xf4\xa4\x5a\x7f\xb0\x71\x8b\x4d\xef\x6d\x66\x45\x55\x14\x1d\xbc\xe5\xc0\x81\x4e\x84\xef\x3c\x2d\x5e\xff\xad\xc3\x47\x4f\xc7\x91\xda\xab\x5a\x66\x20\xe4\xc4\x9d\xc0\x33\xc7\xef\x3b\x3e\x67\x7c\x4f\x29\x68\xea\x66\x5b\x92\xcb\x93\xb7\x93\x0c\xfd\xaf\x78\x2e\xeb\x33\x78\x81\x40\xdf\xaa\xd4\xb5\x95\x0c\xc8\x73\xd4\xef\xda\xd9\x29\x2f\xc8\x5c\xed\xb3\x86\x6a\x42\x5a\xb7\x0b\x84\x92\x8f\x41\x03\x8a\x52\xfd\x17\x16\x39\x8c\xa6\x9b\x3b\x5b\x59\xd9\x6b\x12\xb1\x64\x1d\x25\x40\x94\xa0\xd3\xbd\xcd\x65\x9d\xa6\x89\x36\x5c\x9c\x73\x7f\x0d\x36\xf8\xfe\xf9\x8c\x8b\x64\x4d\xf4\x41\x02\x55\x7e\x1e\xc7\xd5\xa6\x04\x65\x2b\x6e\xb7\x1b\x63\x3a\x1b\xba\x3e\xd5\x61\x4c\x52\xa2\x01\xec\x88\xf3\x90\x66\x5f\xbd\x1a\x37\x7b\x49\xa9\x63\xd6\xca\x15\x59\xe8\xe9\x44\x0d\x1f\x2e\x60\x72\xba\x56\xcb\x94\xf6\x9f\x61\x06\x6e\xb0\x09\x44\x02\x10\x24\x8a\x44\x6b\x49\x2d\x2e\x56\xb2\xae\x34\x60\x70\x51\xba\x61\x54\x86\x82\x30\xcb\x51\x79\x4d\x0a\xac\x9b\xf4\x25\x1a\x77\xf3\xda\x60\xee\xbd\x9f\xb9\x91\x1d\xbb\x32\x0b\x63\xbe\x2b\x9b\xe1\xdf\xdc\x10\x3d\xb8\xc1\xef\x15\x2d\x51\xd3\x35\xb4\x7b\x29\xed\x2f\xb5\xdc\xe8\x84\x58\xa7\x98\xc6\xc5\xe0\x06\xf8\x17\xda\x85\x73\xb2\x7d\x31\xd8\xdb\x11\x17\xe8\x4e\xc8\xb7\x32\x3e\x7d\x3d\xfb\xfb\x1a\xdb\xdb\x89\x53\xfd\x73\xf8\x19\x03\xcb\xf9\x78\xcd\x79\x6e\xcc\x04\x70\x29\x03\x41\x96\xd1\xb3\x5d\x9c\x12\xbd\x5d\x66\x4e\x2a\x4e\x61\xcd\xa0\xa6\x48\xc9\x2d\xa2\xa4\x53\x83\x2b\xf5\xb3\xe5\x40\x22\x0f\xf2\x6e\xd6\x6a\x70\x5e\xa9\xd9\x87\x3d\x1d\xd0\x80\x7c\xf0\x4b\xf3\xb6\x14\x70\x3e\x2c\x2b\x23\xec\xe4\xec\xe4\x3c\xf4\x3e\x7b\xa6\xcc\x81\x2a\x1f\xe9\x66\x88\x9e\xd1\x25\xd1\x43\xaf\x99\x5a\xbb\xda\x9d\x63\x0c\xe1\xc2\xe2\x03\x55\x6b\xe6\x9b\xa4\xfc\x16\x9a\x49\xd1\xd9\x60\x15\x62\x3e\x28\xc0\xc0\xf5\x4c\x96\x76\xdf\xbd\xfc\x3d\x35\x56\x99\xa4\xf0\xcb\x01\x3b\x79\x59\xb9\x7e\xa7\xf2\xac\x28\xc8\x3d\x57\x85\x34\xbf\x39\xf0\x4b\xaf\xa4\x6a\x27\xb7\x83\x45\x2e\x90\xaf\x8a\x12\x6e\x59\x33\x9c\xa2\xf0\x10\x7f\x85\x13\x8a\x3d\x2c\x3c\xee\x22\x40\xf5\x2a\xb1\x22\x0a\x14\x33\xb8\x02\x44\x93\x3c\x16\x4e\xea\x43\x5a\x21\x1c\xd8\xd2\x1a\xec\xe1\x1a\xdb\xad\xc9\x48\xc2\x1f\xaf\xad\x1f\xac\x70\x39\x3b\xed\x6e\x2a\x30\xfa\xad\xc0\x10\x9d\x54\xf1\x9e\x34\xa3\x55\x22\x14\x1a\x99\x38\x45\x52\xb5\x73\x98\xc7\x6a\x5a\x9d\xa4\xad\x38\x97\x94\xdb\x6f\x3f\xd4\x43\x3f\xc2\xbf\xe1\x0d\xe2\x02\x53\xe7\xac\x06\x8d\x8d\x35\x0c\x58\x4e\x96\xe3\xb4\xa9\xf1\x46\x80\x56\x6a\x9c\x2e\xcf\xf5\x12\xa9\xd9\xe6\xd4\xd6\xe8\x39\x05\xe3\x29\xae\xe4\xb7\x9a\x93\x06\x7b\x6b\xc0\xdc\xdd\x1c\xbe\xce\xdc\xaa\xbc\x30\x70\x9e\xeb\xed\xda\xeb\x76\xaf\xcd\x76\x55\x7a\xdc\x0c\x84\xd4\x10\x04\x25\x31\xd1\x4a\xce\x5e\xf5\x57\xf9\xf3\x77\xb0\x5d\xb4\x5c\xc2\x7f\x1c\x8d\xde\x36\xaa\xd7\x8e\x17\x1d\xe9\xa5\x2f\x77\x57\x9a\x3e\x16\x20\x81\x62\xb3\x97\x20\xb9\xa7\x95\x8a\xde\x52\xe2\xfb\x5d\xe4\xa4\xff\x13\xb1\xb7\x8a\x24\x31\xab\x57\x9f\x7d\x6a\x50\xc7\x08\x06\x3b\xbc\x5c\x25\x0b\x74\xf5\x2d\x1d\xd5\xd4\xd7\x08\x20\x40\x9d\x98\xec\xa0\x03\x2e\x3d\x8d\xe1\xf5\xf3\x6d\x53\xa0\x99\x7e\x68\xe7\x43\x6d\xb5\x06\xe7\xc7\xf3\xe5\x6c\xdc\x34\x15\xf6\x22\x5c\x6c\x19\x4d\x8d\xd0\xea\xdd\xac\x65\x1b\xa9\x55\x3a\x52\x53\xe1\xce\x65\x55\xe6\x85\x70\x20\xb1\x0e\x17\x32\x09\x2a\x2d\x00\xfa\x5d\x5a\x81\x1d\x0d\x21\xf2\x61\x10\x2b\xd9\x1a\x02\x29\xb5\x66\x4c\xdf\x43\xbb\x48\xb1\x5c\x7f\xa9\xd3\xcb\x56\xbd\x82\x33\xff\xb6\x03\xdf\xc6\x05\xba\x2d\xf9\xd4\xec\x39\xde\x2b\x63\x63\x56\xb9\x20\xfe\x3b\xd8\xed\x1b\xe4\x95\x59\x3f\x0d\xba\x3a\x50\xd9\x8f\x8d\x7a\xf7\xd7\x03\xa9\x85\x7d\x96\xc8\xbf\x7f\xe4\x58\x22\x7f\x78\x84\xbd\xc6\xb0\x44\xbe\xcc\xb0\x44\xfe\xc3\x36\x4b\xe4\xd6\x58\x22\xfd\x5b\xe0\x6b\xc9\x17\xf9\xee\x51\xf6\xec\x6b\xc9\xed\x3d\xdf\x5b\xd2\x6b\x84\x52\xc9\x7d\x62\x24\xfa\xf0\xc8\x26\x2f\x19\x90\xa6\xbe\x3c\x52\x35\xbb\x3e\xc8\xc2\x7e\x01\xe3\xec\x97\x57\x34\xf8\xbc\xf1\x5c\x56\xec\x0e\x55\xc8\x62\x03\x8b\x59\x44\xf7\x1d\x04\x73\xc5\xa9\xbe\x08\x12\x95\xd2\x1a\x18\xe5\x00\x1b\xf9\x66\xca\xe8\xa9\xb6\xc0\xc4\x3a\x6d\x29\x07\xd3\xa9\xf2\x9b\xf9\xec\xb9\x05\x17\xc0\x08\x6f\x0d\x2b\xa1\x54\x16\x81\x18\x28\x00\x77\x0e\xe9\x24\xcb\xa9\x12\x4c\xa8\x48\x90\xdf\xb9\x0f\x8c\xbd\x80\x64\xef\x96\xde\x48\x95\x22\x7d\x48\x60\x03\x26\x66\xcf\xce\x2f\x4c\xcc\x4e\x2e\x4c\x9d\x1c\x68\x4b\xb5\xc2\xc1\x4a\x06\x0a\x75\x39\x15\x3e\x59\xf3\x50\x66\x1a\x9f\x85\xf2\x63\x5a\x02\xa0\xfa\xe7\x6a\xd1\x3d\xab\xce\x03\x47\xc1\x87\x80\xe0\x2e\x89\x0c\xc1\x73\xb1\xc6\x97\x92\xac\x79\x9e\xb7\xf0\x4d\x73\xc5\x61\x51\x84\x86\x05\x80\xe1\x3b\xb3\xa2\x5c\xf1\x33\x0c\x06\x2c\x61\xce\x8f\xe1\x77\x44\x4f\xb7\x7f\x79\x91\x7c\x76\xf2\x7d\x78\xa0\x31\x6f\x4d\x33\x50\x68\x77\xd0\xb5\xa3\x2f\x8a\x8d\xeb\x1c\xef\x66\xef\x8d\xee\xb2\x99\xf6\x21\xe6\xd6\xbb\x64\x12\x0e\x3f\x10\x8e\xcf\x54\x22\xe2\x8e\xb6\xd8\xba\x99\x97\x1d\xe8\x1d\xbb\xd8\x13\x9c\x81\xcd\x97\x44\x13\x82\x52\x7b\xd0\xcb\xf0\x95\xbb\xa2\x5f\x0f\xe8\x0f\xab\x4a\x88\xc1\x70\xbd\xcc\x28\xf8\x48\xd5\x0e\x77\x4a\x65\x8c\x3c\xdd\xca\xca\x5e\x8a\xd1\x1d\xa2\xc5\x57\x99\x69\xf0\xbd\x12\xaa\x04\xd9\x59\xc9\x92\x16\x17\xa4\xfe\x01\xe8\xda\x60\x7c\xa9\xbe\x71\xbc\xd2\xc7\xc3\x1d\x79\x50\xd2\xba\x4e\xb0\xa2\xe1\x0b\xc8\xc8\x9e\xb6\x28\x3d\x2a\x60\x1b\xd4\x84\x9e\x8f\xd3\xd6\x30\xec\xe0\xdb\x76\xb0\x3f\x0d\xe8\x26\xfd\xf7\x82\xe8\x23\x81\xbe\x4b\xa7\xda\x97\xa4\x6a\xb9\x69\x43\x0b\xae\xd5\x7b\x52\xcb\x11\xcb\x54\xa2\x5a\x4c\x29\x59\x73\xc9\xa3\x73\x85\xcc\xa3\x71\x1e\x01\x6c\x3f\xc2\xdd\x15\x11\x7a\x69\xb2\x09\xc7\x53\x64\x10\x52\x93\xbd\x72\x45\xe9\xd8\x32\xb7\x89\x8d\x1d\xc4\xb6\x7a\x45\x35\x50\xa3\xbb\x2b\x9f\x90\x89\x80\x69\x78\x4c\x96\xfe\x46\xe5\x96\x04\xd6\xcf\xc1\xe8\x26\xf7\x6e\x7f\xbd\x3e\xba\x9f\x7e\x28\x60\x7b\x4c\x96\xd2\xf0\xdd\x41\xf4\x96\xe0\x8c\xfe\xb3\x72\xf3\xdf\x94\x2d\x83\x64\xd7\x5d\xa3\x2a\xce\x13\xd4\x5b\x4d\x99\x29\x6d\xdc\xcc\x1a\x0e\x97\x9a\x5f\x6f\xc0\x4a\x1b\x1e\x43\x67\x13\xe2\x9f\x2e\x7f\x00\x3e\x14\xb0\xdd\xa2\x1b\x23\x85\xc8\x7b\x82\xe8\xa7\x02\xc3\x21\x62\x83\xe4\xec\x14\x0e\xf6\xab\xe8\x51\xc7\xdc\xc8\xc2\x28\x82\x75\xe8\x4f\xab\x7e\xb5\xa8\xbc\x0b\xfb\x4b\x50\xab\x5d\x14\x75\x64\x42\x5b\xa1\xd3\xd8\x2a\x53\x88\xdb\x8b\x17\x5e\xc7\x9e\xb8\x05\x72\xc8\xf0\x0f\xf7\x46\x27\xab\x0f\x4d\xd4\xd3\x65\xf2\x46\xfa\xe8\x83\xbd\x6c\x91\x85\xfa\xa3\xa7\xc9\x94\xd8\x5e\xc2\x7b\x40\xd1\x3a\x7a\x24\xba\x45\x69\x2c\xcb\xe6\x07\x5b\x01\x1d\x90\x2d\x4b\x50\x6b\x71\xa0\xbe\xa2\xf0\x6c\x76\x7d\x2e\x45\xab\x6f\x8c\xaf\x53\x5a\x8b\xbb\x75\x21\x2b\x45\xe2\x59\x56\xa2\xd5\x47\xe5\x06\x0d\x02\x67\x43\xb6\xdc\x3e\x38\xc5\xbf\x10\xc2\x46\xa9\xe8\x35\x5d\xf4\x52\xb5\x68\xb5\x52\x4b\x99\x77\xe2\x14\x2c\x92\xcd\xea\x00\xc3\x28\xce\x35\x85\x92\x95\xcd\x9a\xc1\x68\xcc\x6f\xc5\x07\x6b\xec\x6b\x7a\xa9\xf1\x5b\x98\xbe\xbe\xd5\xf8\x85\x5e\x5d\xab\x36\xc9\x79\x7f\xf3\x3e\x5b\x1a\x0b\x65\x2d\x55\x4a\xc2\xaf\x01\xa2\x94\x4b\x72\x69\xe6\x3a\x27\xbe\xd6\x34\x9c\xce\x95\x19\xde\x5d\x1c\x3c\x00\xc6\x84\x6e\x44\x53\x74\x45\x33\x2e\xfb\xa0\xa7\xe2\x15\x89\x05\x60\xfa\x75\x68\x58\xcc\x52\xaf\x34\xb8\x78\x5b\x50\x96\x3b\xaf\x63\x73\xa0\x3e\x34\x82\x65\xca\x9b\x70\xcd\x52\xd1\x28\x5f\x1e\xb0\x1b\x29\x44\xc3\x8c\x5f\x5f\x0f\x5f\xeb\x6a\x27\x94\x82\x2f\x57\x7d\x6b\xb8\x94\x9d\x2e\x98\xc8\x4a\x4b\xf6\x9b\xf3\xb2\x80\x3d\x7e\x70\x42\x7b\xba\x41\x8b\xd5\x06\x55\x26\x73\x3f\x2e\x66\xd0\xed\xb5\xaf\x0b\x6c\x7e\xd1\xea\xcf\xcb\x66\x96\xb6\x8a\xb1\xad\xaf\xf2\x3f\x09\xd8\x0d\xcd\x2c\x49\xe0\x26\x61\x0a\xac\xa1\x8f\x19\x1f\xc6\xbb\x03\x78\xa2\x1a\x01\x61\xaf\xe6\xc5\xc2\x4c\xfe\xb4\xb7\x92\xdc\xbf\x5d\xfc\x76\xaf\xd0\x99\x50\xd0\x82\x10\x3a\xfc\x1c\x2f\x30\xc4\x6a\x46\xf1\x9e\x1d\xa9\xd4\xbd\xb8\xe8\x98\xbb\x9b\x54\x4a\x84\xf5\xe2\xd4\x5a\x3d\xc6\xf8\x4f\xe4\x9a\x2c\x4a\x87\x9d\xda\xef\xe0\x3f\x07\x5e\x8a\xe7\xbf\x0a\xa2\xd3\x73\x3e\xea\x9f\x62\xa3\x9c\x55\x06\xb2\x88\x1c\x20\xa0\x2e\xd9\xd1\xdb\x57\x18\x47\x01\xc0\x4c\x3c\xd3\xf8\x36\x76\x94\x1d\xb9\x0c\x8e\x5e\x27\xcd\xf3\xb5\xcf\xe1\xcc\xbe\xf8\x38\xf6\xa4\x4d\x20\x75\x64\x99\xe3\x69\xf0\xfe\xc7\x45\x77\xb9\x0f\x2a\xde\xef\x44\x14\x25\xa0\x46\x5c\x7c\x0d\x05\x92\x10\xe0\x87\x20\x34\xde\x51\xf0\xd9\x1b\xd9\xc7\x46\xd8\xe3\x6d\x92\x47\x52\xef\xc3\x77\x8f\xb0\x7b\x2e\xc3\x8d\xe0\x84\x01\xe9\x22\xdc\xc6\x46\x9f\xad\x59\xa5\xd9\x49\x31\xd3\x96\x74\xb5\x60\x1f\x6a\x44\x97\x56\x2a\x90\xc2\xc6\x86\x62\xc7\xa9\x13\x87\x9b\xb6\xc8\xb1\x30\xc6\x01\x54\xa2\xca\x72\xe0\x81\x0e\x5f\xbd\x19\x0c\x27\xb6\x27\x45\x2f\x4d\x37\xb3\xb8\x67\xbd\x78\xc0\x8d\x47\xde\x29\xcc\x77\x38\x35\x7b\x0e\x6e\x72\x65\x27\xcb\xfb\x63\x0d\x3e\xdf\x73\xfc\xbb\x4a\x36\x2e\xf5\xe2\x04\x9c\xa1\x5e\x1b\x50\x05\x02\x01\xa4\xef\x61\xb4\x3b\x55\x93\x55\x02\x14\x84\xb4\x15\xd5\x57\xbb\xd4\xcb\x8c\xa7\x59\x0e\x57\xcb\x04\xda\xb3\x7e\x2e\xed\x54\x8b\x94\xd8\x89\xc8\x37\xd5\x60\xff\x50\x63\xbb\x25\x79\xe8\xc3\xcf\xd5\xd8\xe4\x65\x4d\x62\x05\xaa\x89\x73\xf7\xe3\x35\x5d\xa0\x37\x63\x3e\x6c\x50\x3b\x2e\xb6\x0c\xda\x04\x0f\xa9\x40\x30\xb6\xd3\x14\x62\x35\x85\x24\xb4\xd6\xaf\xd6\xcc\x3a\x40\xa2\x92\x67\x1d\x6e\x72\xca\x14\x1b\xa0\x34\x1f\x65\x48\x28\xfb\xaf\x81\xde\x53\xe1\x2f\x04\xec\x29\x97\x35\xe8\x26\xa0\xcf\x0e\x79\xe9\x5a\x4d\x34\xde\x34\xd0\xc3\xd6\xf4\xf9\x0d\x11\xb1\xe3\x7c\x25\x2e\x8b\xba\x5a\x42\x05\x9c\x40\xe0\xc3\x4a\xf9\x4c\xba\x0c\x44\x55\xf8\xc9\x58\x83\xbd\xb6\xc6\x46\xd5\x6a\x0a\x5f\x5e\x5b\x3f\x32\x71\x68\x17\x66\xb3\x56\xe1\x75\xe0\xa3\x01\x92\x1f\x6e\xd8\xfe\xad\xed\x3e\xaf\x27\x00\x4a\xc3\xfb\xf3\xa2\xde\xcd\xb3\xa6\x2c\x0a\xd9\x72\xfa\xa6\xfd\xd3\xee\x65\xef\x92\xd4\xae\x53\x65\x92\x2f\x4b\xd2\x6d\x00\x65\x80\x56\x13\x80\xe5\x73\x7b\x8d\xe3\xfa\xa5\x1b\xec\x75\x23\x4a\xd7\x24\xc1\xf8\xf2\x91\xcb\xdc\x53\x43\xe5\xe1\xaf\xd7\x1e\x13\x29\xf8\xd5\x27\xef\x7e\xb2\x46\xd0\xc1\x1f\xad\x45\x3f\x50\x03\x87\xbb\xd6\x96\x7d\xe7\xbb\xf6\xd5\x29\xb1\x63\x00\x12\x18\x0a\x17\x0d\x9c\x5d\xca\xe6\xd7\xb2\x50\xfd\x1b\xb7\xa8\xfa\xd7\x2c\xd4\xae\x4c\x5c\xe7\x5d\x18\xd6\x4a\x9c\x93\xb0\x1c\x6d\xe4\x97\x4d\x1d\x53\xba\xc1\xcf\x64\xa5\x3c\x36\xac\x6a\xae\x3b\xe1\xa8\x3c\x04\x96\x01\x7c\x0f\x5e\xe0\xd6\xf5\x05\xae\x29\xe0\x34\x0d\x95\xbd\xc5\x75\x8d\xce\xff\x3a\xca\x36\xbb\x08\x70\x36\x31\x2e\xf5\x1f\x18\x8d\xfe\x32\xa8\x3e\xdd\x0a\x72\xff\x91\xdf\xe3\xd7\x78\x8b\x5f\x0c\x76\x62\xd3\x2f\x06\x3b\xf1\x07\x4f\x4b\x7a\x71\x4d\xd9\x9b\xf4\x4a\xd8\xbf\x2c\xe1\xa8\x2f\x58\x66\x0c\x8d\x4d\x74\x9b\xbe\x39\x77\xf3\x10\x9a\x16\xd1\x8f\x4b\x7d\x54\xa4\xe1\x72\x93\xac\x4f\xf6\x9d\x8c\x9a\x17\x16\xec\xf6\x2b\x68\x04\xde\x71\x47\x47\xa9\x2a\x9f\xab\xd8\xbb\x8c\xd3\xfa\xbb\x0b\x19\x67\xff\xb0\x97\x7d\xdd\x3a\x69\x8a\xc3\xdf\xdf\x1b\xc5\x67\xe8\xee\x40\xf0\xb5\x2c\x3f\x2f\x73\xba\x4c\x48\x1d\xf9\xd0\xe0\xc7\xd5\x8a\x80\x1f\x6c\x4a\x47\x73\x0d\xe2\x90\xfd\xe8\x05\x23\x9a\x2b\x52\x67\x5e\x4e\xb9\x2c\x9b\xad\x31\xdf\x9d\xf1\x0a\xc6\x7e\x55\x73\x63\xbc\x27\x60\x63\x5b\x4b\xab\xdc\x95\xcd\xe8\xa5\x01\xe1\x48\x2c\xe3\x83\x7b\xc7\xad\x6f\x87\x1e\x09\x7a\x00\xd5\xda\xba\x48\x5b\x75\x74\xd9\xb0\x7f\xb1\x9c\x19\x5f\x0a\xb6\xc0\xd1\x0b\x7d\xc0\xb3\xe4\x97\x83\x2d\x65\x11\xc1\xbe\xcc\x66\x5d\xba\x9d\xd5\x09\x37\x89\xbb\x5d\x59\xa8\xf5\x2c\x4d\x86\xf3\xb0\x3d\x92\x5d\xdf\xe6\xe3\xd8\xe6\xe3\x78\xcc\xf8\x38\x7e\x27\x70\x08\x39\x3e\x1c\x5c\x6d\x7e\xa2\x17\x06\x95\x04\x45\xfb\x0a\x9b\xa2\xe8\x91\xee\xa2\xae\xe8\x52\x70\x6a\x73\x5e\x90\xb1\x70\x5f\xdd\x21\xee\xd4\x1c\x20\x4a\xae\x54\x93\x03\xbd\xf8\x7a\xef\x8a\xc9\x66\x22\x40\x1e\xa4\xcf\x5e\x17\x7d\x2e\xd0\x2c\x48\xb0\xf0\xd4\xc1\xd5\xe2\x62\xa9\x28\x09\xeb\x0a\x30\xc4\xac\x5d\x42\x66\x36\x1d\x27\xeb\x9f\xfa\x9d\x7e\xf1\xdc\x64\x4c\x07\x64\xd3\x4d\x5f\x92\x35\x05\x06\xad\xfb\x06\xdd\xe1\xc3\x07\x8e\x8e\xd9\xdb\xb7\x6e\x9e\x5d\xe8\x03\xb3\x9b\x04\xfd\xd2\xf2\x84\xdb\x44\xf9\xea\xdd\x96\x44\xd7\xa1\xd2\x1a\x90\xac\x58\x19\x29\x70\x0c\x89\xb4\x58\x93\xb9\x5d\xb2\x05\x3a\x0f\xf3\xac\xb7\xbc\x62\xeb\xa8\x90\xf2\xb3\x6d\xd9\xb5\x2d\xbb\xb6\x65\xd7\xb5\x94\x5d\xec\xc3\x5a\x95\x7b\x7f\xc0\xc6\xb7\x90\x0d\xc5\xd2\x97\x7d\xdf\x66\xda\x1c\x09\x9e\x47\x47\xa1\xfb\x37\xab\xd0\x7d\x39\x60\x8d\x2d\xf7\x04\x75\xba\x0f\x6c\x4d\xa7\x33\x3d\xfa\xf7\xa7\xd6\x5d\x0a\xce\x6e\x7e\x08\x8d\x87\x37\x0f\x3b\x84\x86\xb2\x50\x31\xf6\x8a\xeb\x3c\x02\x2a\xe2\x6b\x07\x2b\xe7\x60\xe3\xe4\xc2\xc2\x2c\x79\xb4\x00\xcc\xf1\x99\xbd\x51\x56\x79\x66\x1d\x93\x98\xff\xbe\x5c\xd1\xc4\x75\x4b\xa2\x79\x5e\x02\x28\x2f\x25\x57\x63\x2f\xd7\xf7\x7f\x5a\xe6\xc0\xfb\xea\xfc\x6a\x6b\x5e\x72\x2d\x72\xf5\xd7\x17\x83\x5d\xf4\x4f\xef\x98\xf8\x7d\xc6\x7e\x70\x17\xdb\xad\xbe\x87\x34\x4c\x2f\xde\x15\xfd\xf3\xce\x59\xfa\xcb\x3d\x93\x10\x75\x48\x64\x1c\x5e\x2a\x42\x68\xbd\x6e\x4d\x83\x9b\x8f\x09\xce\xe6\xf0\x8f\x22\xbf\x85\x11\x9d\xc5\x31\x7e\x33\xc6\xab\x1f\xe3\xa7\x9d\x6c\xe7\xe7\xe6\x4e\x61\x87\x24\xc6\xb2\x37\xf8\xcd\x7c\x16\xa8\x24\xec\x7b\xc6\x13\x2b\xec\xeb\x48\x37\xc1\x8b\x6e\x12\x97\x6a\xa9\xed\x9b\xd8\xd7\xc0\x0f\x10\xf2\xcd\x38\x6f\x41\x6b\x52\x3d\xc2\x12\x83\x03\xd4\xcb\xe6\x9f\xa2\x88\x0b\xc8\xa9\xeb\xbe\x40\x7e\x30\xf4\xde\x30\x6e\x88\x5a\xe9\x2e\x56\xa3\x5f\xd4\x27\xa6\x7a\xf5\x64\xdf\xc4\x3e\x5e\xc8\xae\xc8\x05\x66\x85\x71\x19\x5a\x05\xe3\x74\x89\xdb\x06\xe6\x18\xd5\x03\x1e\xb7\xb9\x5c\x95\x79\x5f\xfd\xb3\x00\xa0\x01\xb6\xa0\xbe\x16\x1b\x42\x0d\xb8\x5e\xa5\x31\x65\xdc\x14\xa9\x4a\x40\xd7\x0d\x39\xbc\xdb\x36\x1c\x58\xf7\x83\x26\xa2\x4b\x18\x22\xa1\x8e\x14\x12\xd8\x5c\xff\xe8\x7d\x60\x7d\x79\xf0\xd1\x38\xc5\x44\x81\x27\x9d\x5a\x8f\x2e\xb9\x89\x76\x96\x4d\x2c\x89\x5c\x77\x4a\x16\xe6\xd1\xc4\x92\x78\x08\x03\x55\x0c\xf4\x04\xbf\xd4\x2f\x2c\x89\x87\xc6\x1a\xec\x66\x3e\xe3\x45\x6b\xe8\xbc\x97\xc7\xf8\xcc\xe6\xeb\x0e\x50\x81\x5d\x5e\x66\x8c\xc3\x8f\xb4\xb3\x28\x61\x90\x5f\x30\x82\x82\xca\x5c\x6a\x9e\x5f\x81\xa0\x50\x98\x26\x69\x16\xb0\x1a\x91\x9c\x5e\x53\xbd\x6e\x21\x91\x47\x92\x40\x62\xdf\x59\x9a\x8a\x1c\x57\x30\x91\xaa\xab\xb3\xb0\xc1\xaa\xd5\x21\x37\x0b\xdd\x7d\x97\x99\xa1\x7f\x07\x4e\x63\xfb\x9d\x7b\x7c\xbe\x31\x60\x7a\xbf\x86\xaf\xde\x88\x10\xdd\x17\x34\xd4\xed\xbb\xf1\xcb\xe8\x99\xf4\x0f\xef\xdc\x71\x21\x27\xa4\xed\x5a\x65\xc3\x85\x8f\x97\xb9\x68\xb7\x1d\xb4\xaf\x2b\x5b\x1a\xec\x0d\x35\xe2\xe0\x7e\x6d\x2d\x7a\x49\x4d\xa3\xd2\x74\x76\x25\x88\xa2\x2b\x4a\xbb\xda\x50\x73\x8c\xb5\x08\xd3\x39\x11\xf8\x14\xfa\xdc\x12\x88\x5e\x54\xf3\xa2\xd3\x21\x3b\xd4\x46\xad\xb8\xd0\x71\x23\x46\x3f\xb2\x02\x5e\x24\x3c\x52\x55\x44\xc0\x65\x8a\xa7\xa9\x92\x09\xc2\x43\x50\xcd\x9d\x98\xe2\x87\x6f\xbf\xed\x28\x0a\xa8\x42\x13\x0c\x2d\xc7\xa9\x96\xb3\x20\x2e\x00\x2d\xd9\x4b\x1d\x80\xaa\x9e\x24\xd2\xcd\xaa\x3d\xc0\xd9\xa5\x6e\xfb\x84\x5b\x37\xb0\x9b\x36\x9b\x28\x08\x25\xfa\xbe\x1b\xa2\x3b\x9c\xbf\x2b\x97\xa7\xfa\x26\xa4\x24\x26\x64\xbe\x16\x17\x2b\x08\x6a\x04\x9e\x5c\x5f\xed\xff\xd5\xeb\xd9\x5b\x46\xd8\x48\x99\x14\xe1\x1b\x47\xa2\x77\xd5\x16\x4e\xcd\xfb\xcc\xdc\xee\x88\xbb\xc5\x43\x4a\x0e\x5a\x98\x85\xbd\xbd\x51\xdf\xab\x47\xe3\xfc\xc8\x91\xc3\x80\x26\xb3\xe9\x40\x24\xc6\x77\x69\x53\x01\x64\x22\x0e\x5c\x1f\x62\xc2\x80\xaa\x05\xd2\xf1\x15\xe3\x18\xbd\x66\x20\x88\x54\xc6\x05\x94\xe1\x06\x32\x8c\xbb\xa2\xd9\xcc\x90\x3b\x91\x8e\x30\x55\x02\xb8\x1b\xed\x15\x83\x6b\xfc\xcc\x9f\x99\x81\x66\x1a\x38\xaf\x49\xf3\x11\x53\xdf\x1c\x38\x41\xbb\x97\xb4\xe3\x24\xd1\xe7\xa6\x7e\xc3\x74\x7c\xfe\xcc\x8c\x77\x2f\x7f\x0b\x3b\xbc\x41\xee\xdc\xa1\x73\xba\x70\x6a\x7e\x8b\xa9\xa9\x3e\x5d\x63\x37\x10\xb2\x9e\x36\x6a\xf8\xf1\xda\x95\xee\xf5\x1f\xac\x4d\x7b\x45\xe9\xab\x06\x92\x22\x04\x7c\x41\x4b\x0d\xa2\xed\xa4\x63\x47\x80\x01\x0a\x71\x7f\x28\x99\x45\xda\x07\x3c\x38\xcc\x38\x46\xb5\x68\xa2\x2e\x67\x6b\x54\x2a\xd4\x9c\x5d\x7e\x52\xbd\xc1\x56\x79\x91\x05\x26\xf0\xcf\x25\xc7\xa6\xf6\x38\x47\x85\x6a\x90\x86\xcc\x41\x73\x4c\xdc\x47\x57\x2f\x93\x99\x81\xd9\x6e\xb0\x77\xed\x60\x8f\x8b\x9d\xa3\x00\x58\x54\xde\xb8\x23\x7a\xd5\x8e\x99\xca\xd3\x61\x6c\x2a\xee\x3b\xe6\xf2\xd6\x70\x36\x80\x9f\xdf\xb9\x4e\xf6\xde\xd6\x02\x17\xa5\xa9\xb3\x00\xa1\xdd\x26\x2c\x91\x04\xb2\x2d\x11\x4d\x6e\x88\xd5\xd6\x08\xa9\x5c\x36\xa1\xfc\x45\xbb\x9c\x94\x4e\x4c\xdd\x6a\x34\x55\x7d\x8b\x1c\xac\x40\xda\xe1\x27\xb2\x1c\xa6\x5d\x49\xeb\x02\x2f\x17\xca\x78\x29\x4e\xe2\xb2\x3f\xae\x2f\x6b\x44\xe9\x7c\x82\x61\xce\x25\x9c\xee\x7a\x16\xd1\xb9\xae\x6a\x97\x2d\xc0\xe2\x66\xab\xc6\x48\x45\x2a\xf5\x05\x82\x1b\x53\xcf\x00\x9b\xd5\x89\x4b\x60\x50\xcd\xe1\x3a\x9a\xf6\x21\x21\x77\xd2\x96\x5b\x25\xf8\xd6\xad\x98\xd0\x76\x7e\xf5\xec\xd4\xd2\x05\x38\x92\x71\xf5\x12\xba\x9b\xc6\x5b\xda\x08\x53\xc1\x61\x30\xdc\x05\x38\x99\xfa\xf3\x62\x2f\x31\x31\x42\xbd\x23\xf2\xf3\x18\x1c\x4c\x1b\x51\xa7\x8a\x72\x83\x31\x00\xc4\x5f\x0e\xb0\x77\x1b\x22\x37\x1a\x8f\x13\x43\xa2\x6e\xc6\xf1\xb8\xad\xac\x51\x5a\x23\x2e\x97\xbd\x77\x74\xfc\x6b\xc0\x76\xc0\x32\x0f\xff\x36\x88\x5e\x18\x4c\x1a\x7d\x13\x72\x9b\xe2\x06\xd0\x2d\x33\xf1\x70\x6e\x05\xb0\xf3\xbc\x63\x0c\xc0\xcc\x18\xe3\x61\xf2\x38\xa9\xa3\x4d\x1f\xf2\xb1\x76\x2b\x65\x5e\xc0\x8f\x36\x1f\x5c\x99\x78\x94\x1d\x61\x87\x2e\x4f\x48\x29\x11\xb2\x45\xa1\xf8\xb2\x51\x76\xf3\x46\x71\x3e\x93\x10\xf8\xa2\xd3\xa3\x87\x7f\xf8\xff\xb1\xf7\xf5\xc1\x95\x5c\xd5\x9d\xdb\x4f\x23\x8d\xe6\x7a\xc6\xe0\xde\x90\x4a\x91\x40\xdd\x6a\xef\xa2\xd1\x44\x7a\x1a\xcf\x8c\xc7\xb6\x6c\x6c\x6b\xa4\xb1\x2d\x66\x46\x23\x24\x8d\xbd\xc4\x18\x4f\xeb\xbd\xfb\xa4\xf6\xf4\xeb\x7e\x74\xf7\x93\xe6\x41\xd8\x60\x42\x08\x8b\x17\x08\xa4\x80\x20\x20\xc9\x06\x27\x04\x87\x85\xad\x5a\xc3\x26\xb5\x71\x55\xc2\x02\x09\x64\x97\x65\x59\x96\x72\x6d\xd8\xc4\x7c\x25\x45\xb1\x54\xa5\x76\x59\x2a\xe5\x54\x92\xad\x7b\xce\xb9\x5f\xfd\x3e\xa4\x19\x79\xec\xa4\x4a\xf3\xcf\xe8\x75\xdf\xbe\x7d\xfb\x7e\x9c\x7b\xee\xf9\xf8\xfd\x86\x82\xfb\x7a\xdd\xd0\xb8\x2f\x65\xf6\xf4\x90\x97\x8b\x6b\x65\x64\xcb\x1b\xa5\xb4\x9a\x6c\xcb\x1b\xc1\x89\xb2\xe5\x8d\x26\x69\x5d\x74\xa1\x64\x7d\xbb\xc2\x3e\xe0\x31\x5d\xde\x7f\x87\x17\xbc\x69\x86\x7e\x58\x6e\xc8\xb2\x48\xa1\x54\x1a\x3b\xed\xe7\xdc\x85\xe5\x15\x25\x91\xc9\xe6\x46\xba\x91\x1d\x5f\x49\xa0\xb3\x45\x3b\x23\x85\xe6\x5e\x51\x2c\x02\xc1\xa8\x6c\xda\xe1\x71\x67\xfe\xdc\xc7\x74\xa3\x15\x6c\x2b\x38\xb5\xb4\x51\x92\x9a\x61\x52\x3f\x75\x3a\x51\x29\x2d\xe0\x4d\x8c\xfa\xc1\xcf\xd9\x74\xdf\x31\x1f\x34\x48\xe8\x38\xbe\xb9\x9b\x72\xd8\x6a\x87\xbd\x37\x59\x8d\xa9\xb2\x4f\x57\xd8\xc9\x12\x9c\x9e\x89\x6d\x56\x49\x3b\x0b\x69\xa2\x1c\xd4\x33\x05\x5a\x07\x44\xee\x3f\xeb\x05\x6f\xec\x79\x47\xaa\x70\x88\xfe\x4f\x39\x92\xa6\x4a\xf9\x66\x55\xca\x78\xb8\x55\x22\x80\x15\x15\xe1\xa6\x9f\x58\x81\xe2\x70\x44\x6e\x84\x25\x14\xd4\x94\xdd\x4a\xca\xf2\xd1\xe0\x46\x3b\x81\x43\x9f\x5d\x75\x38\x38\xd4\x5c\x62\xe4\xdb\x10\xd9\xaa\xff\xd3\xc1\xcb\xef\x17\xd9\xaa\x85\x67\x84\xe6\xb0\xfb\x56\x56\x16\xb9\x2c\xe1\x60\x3f\x8f\xb0\x3b\x77\x97\x72\xeb\xff\xdd\x70\xf0\x41\x6f\x9b\x42\x2f\x5c\x32\xae\xd3\xbd\xff\x7d\x1f\xfb\x77\x15\x76\x40\x3f\xe2\xff\x46\x85\xdd\x71\x65\xf9\xc7\x6e\x0a\x6f\xf0\x4d\xcf\xbc\x5f\x4a\x46\xfa\xae\x56\x1a\x47\xb5\x8e\x4e\xf0\x95\x97\xe6\x20\xfb\xbd\x5f\xbe\xa4\xce\x2c\x00\x88\x82\xd8\x8e\x49\xa8\x53\x30\x8c\x9d\x20\xdf\x4a\xeb\xf9\x84\x3a\x8f\x1c\x3f\x7a\x94\x53\x4c\x54\x5e\x84\xab\x3a\xa5\x92\x6f\x46\x49\x3d\xdd\x44\xb7\x33\x29\x51\xd1\xda\xba\x40\x93\x5c\xda\x6c\x8a\xa4\x6e\x88\x11\xf4\x89\xfe\xf8\xd1\xa3\x39\x8a\x45\x4c\x95\x65\x1f\xa9\xb0\xfd\xd4\xe3\xfe\x7b\x76\xdb\x61\x5f\xf1\xd4\xe0\x0d\xee\xae\x0b\xad\x9d\x74\x96\xfa\x24\x39\x81\xa6\x19\xe7\x47\x0c\xce\x89\x03\x9e\x7b\x02\x9d\x24\x2d\x91\xf1\x93\xaa\xb3\x72\x28\x5f\x4f\xdb\xab\x44\xaa\x53\x8a\x49\x2f\x15\x5e\x48\x4b\xbd\x4b\x1d\x54\x65\x5f\x38\xe4\x9c\xdf\x94\x2d\x74\x7e\x79\x76\x79\xde\x21\x8b\xfb\xc8\xa1\xe0\xbd\x0e\x59\x5c\xc2\xa1\x90\x3c\xb7\x5e\xaa\xd2\xdf\x2a\x95\xd1\xa6\x27\x56\x69\x9b\x21\x30\x3f\x12\x75\x14\x64\x68\x96\x9f\x1a\x48\x18\x27\x97\x4b\x4f\xc6\xb8\x83\xb8\xee\x16\xd3\xac\x08\xe3\x2d\x6f\x28\x7a\x7d\xb2\xe5\x0d\xc5\xed\xc4\x59\x3d\x7f\x76\x1d\x7b\xdc\x63\x4e\x59\xff\x97\xbd\xe0\x17\xbc\x08\x9a\x80\x61\x19\x1c\x6f\xa0\xd2\x87\x7f\x43\x30\x0f\xc6\xe2\xcb\x2f\x5e\x94\x2a\x46\xd4\x7a\x38\xac\xd7\xb3\x69\x68\x2c\x69\x7f\xf8\x77\xce\x53\xc3\xc1\xa5\x46\xfc\x70\xd1\x69\x91\x49\x65\x65\x76\x91\x10\x81\x6f\x3d\x79\x14\x3e\xe9\xf8\xb1\x93\x47\xdd\x6d\xed\x7e\x76\x43\x6d\x3d\x6c\x49\x71\xab\x79\x02\xfd\x99\xe0\xf8\xe6\x3a\xc6\xd4\xa8\x5e\xc2\x86\xeb\x22\x7c\xf6\xbe\x99\xc5\x12\xe5\x9b\xc3\x8a\xf5\xe9\x8a\xa6\xa1\x7f\xa2\x12\x7c\xb8\x52\xa6\xa1\x77\xf7\x6e\xd8\xac\x3a\x69\x9b\x6f\x86\xa8\x2f\x11\x45\xfd\x4a\xd4\x9a\xe6\xa7\x93\x1c\x55\x31\xda\x68\xcb\x6c\xf5\x51\x7e\x05\x84\xf5\x84\x7c\xd3\x8f\xb6\x5e\xaa\xca\x51\x2d\x02\xdb\x49\x42\xd4\xa7\x98\xe9\x8b\x0f\xc8\x01\xb0\x74\xc0\x9e\x46\x76\xf7\x4c\x51\x4f\x6b\xf9\x54\x4d\x4e\xc0\x56\x91\x4f\xd1\xae\xae\x78\xc6\x6e\x8c\xf2\x5a\x1e\xd9\xe3\xf1\x35\x8f\x1d\x02\x81\x11\x16\x69\x06\xca\xc6\x67\xbd\xe0\x49\x0f\x33\x50\x69\x14\xe6\xd5\x7d\x80\xd0\x86\xc5\xef\x3c\xe2\xb0\xb0\xa3\xe8\x83\xf7\xcc\xab\x8d\x94\xe7\x51\xb3\x1d\x17\x61\x22\xd2\x76\x1e\x77\x26\x78\x22\x36\xa9\x6e\xbd\xd9\xf2\x3b\x14\x7e\x28\xcc\xcc\x3b\xa7\xef\xa0\xb1\x92\x5a\xd3\x9d\x06\xd4\x0a\x53\x3c\x6c\x58\xc1\x04\x37\x1f\x67\x9a\x9d\x60\x72\x8d\xf8\x93\x2a\x97\xe1\x27\x9c\x85\x70\xb6\x9d\x28\xb8\x1f\x27\x5b\xe0\x92\x45\x15\xf7\x70\xf0\xea\x5d\xd2\xbf\x75\xb1\xcb\xb9\xdc\xfc\x3f\x67\x13\x26\x66\x57\xcb\x97\x78\x1b\x2c\x8c\x65\x43\x7e\x88\x1f\x4a\x7d\x09\xd9\xba\x7a\xf4\xdc\xe5\xc3\x96\xd8\x8b\xd4\x52\x5c\x16\x40\x0e\xee\xdf\x15\xdc\xd4\x7b\x21\x52\x81\x6d\x97\xe1\x51\x26\x25\x94\x3f\x1e\xfc\x14\x75\x35\x3e\xfe\xea\x76\x18\xe3\xec\x80\x19\x64\x8f\x54\xc2\xae\x77\x27\x8b\xff\xda\xe0\xbc\x9a\x77\x6a\x6e\xc0\x2c\x83\x05\x09\xb9\x24\x61\xa2\x3e\x33\x0b\x93\x1c\x31\xb8\xed\xde\x1e\x23\xe1\x34\xc6\x0f\x17\xb5\xd6\x78\x19\x02\x7d\x3f\x4e\xb1\xdc\xff\x98\x17\xbc\xa3\x97\x88\xe4\x67\xa3\x9c\x32\x5a\x5a\xcf\xab\x9c\xec\x4b\x96\xf8\x7f\x46\x59\xd0\x63\x4a\x2c\xa6\xf5\x15\x4a\x35\x92\x4d\xf6\xff\x64\x34\x38\x52\xba\x86\x3e\x02\x75\xf8\xb4\x6e\xe6\xd5\x2d\x6f\x38\x2a\x44\xd3\x45\x21\xfa\xe4\xfe\xbd\xa0\x83\xdd\x06\x1d\x3c\xc4\xb0\x63\xfd\x95\xe0\x25\x67\xa9\xe7\x5b\xa9\x49\x0b\xcb\xed\x91\x3e\xca\xaa\x3b\x70\x44\x5b\x03\xb7\x17\xd3\xb0\x8b\x98\x86\xff\x6a\xc7\x34\x7c\xf6\x4a\x50\xa4\x2d\xe0\xe4\x28\xc7\x88\x86\xb7\x98\x88\x06\x58\x60\xcf\x5b\x34\x83\xf5\x71\xcf\x7a\x0f\x6c\xef\x07\x3f\xe1\x1f\x9b\xec\xcb\xa9\xac\xbc\xe2\x25\xc1\xc1\xd8\xbb\x6f\x70\xcc\x38\xfd\xe9\x46\x80\xb3\xc3\x7f\xe6\xc5\xc1\x37\x3c\x97\xc6\xc3\xe5\xac\x6b\xaf\x12\x6a\x8b\x3d\x91\x14\x71\xb8\x36\xa7\x5a\x71\x14\xf7\x40\x2a\x3f\x5a\x31\x95\x9f\xab\x80\xcd\x58\x99\xfe\x94\x23\xc3\x7d\x2d\xa5\xfb\x4f\x50\x78\x16\xaa\x69\xe8\xe7\x55\xb6\x41\x65\x22\x7d\x24\x05\xef\x52\x91\xf2\x56\x96\xd6\xdb\xb4\xb9\x03\xb9\x2c\x35\x56\x37\xb4\xba\xe5\x5d\x47\xe4\xeb\x2b\x00\x15\x7d\x40\xdf\x72\x24\xe8\x0f\x0e\xb1\x75\x6b\x92\xbd\x76\xb7\x71\x33\x3f\x39\x20\x6c\x86\x7d\x7c\x88\x0d\xc3\x5e\xe2\xff\xda\x50\xf0\xc9\x0a\x6e\x2b\x6e\xdc\xb5\x92\xfd\x64\xd5\xa3\xad\x07\x71\x44\xa0\x67\x20\x86\x5e\x2f\x6c\xc5\x0f\x9e\xc7\x30\x06\xa7\x31\xc0\x3e\x2b\xfa\x33\xb8\x80\xc3\x8d\x98\x5f\x72\x05\xf4\x1b\x15\x8e\x09\x0d\x75\xe9\x4c\x90\x0f\x42\xbb\xf5\xe0\x31\xaa\x22\xd4\xfb\xa7\xba\x4b\xc7\xe8\x24\xa2\x20\x3d\x92\xfb\x4e\xdd\x41\xa8\x78\x67\x02\x6a\x6d\x8e\x84\x33\x20\xc1\xc0\x4c\x04\x7e\x6e\x04\xf6\x49\x1b\xfc\xa6\xa3\x47\xe9\xb5\x3b\x07\x36\x1a\x4c\xb5\xb3\x43\xb3\xe8\x5f\x55\x98\x3d\x81\xfc\x67\x2a\xc1\x57\x2b\xd6\x85\x72\xbc\x3c\x9d\x5b\x14\xe1\x7f\x2d\xcc\xb2\xc8\x4a\xea\x75\xe6\x3c\xd2\xef\x50\x51\xc5\x0b\xab\x46\x51\xcf\xf6\xc2\x86\x49\x75\x78\x4b\xa2\x9c\x47\xcd\x66\xbb\x00\x6b\x59\xd8\x28\x44\x86\xaa\x36\x38\x23\x56\x9c\xc0\x0f\xd5\x1e\xc4\xa9\x92\xe3\x59\xd3\xee\x48\x7d\x30\x9a\xe6\x47\xf8\xfc\xe2\xc6\x89\x69\x5e\x3a\x55\x2f\x6e\x9c\xe0\xc4\x65\x5f\xc5\x32\x27\x7b\x94\x39\x69\x97\xb9\xe7\xd5\x73\x0b\x6e\x19\x7e\x4f\x5b\x2a\x52\x46\xad\x9c\x4b\x9b\x61\x94\x74\x6b\x97\x7b\xaa\xcc\x2e\x55\x99\xef\x78\xcc\x88\x39\xff\xeb\x5e\xf0\x88\x11\xdc\x8e\x66\x49\x12\xc1\xba\xdb\x43\x8e\x6c\xbf\x32\x8f\x5a\xf2\xd6\x5e\x9e\xb7\xb0\x9b\xd9\xf1\xab\x58\x9e\x3b\x5c\x9a\x7b\x2a\xd5\x55\xab\x54\xcf\x7a\xeb\xdb\x2b\x20\xa7\xfd\xd9\xc9\x6e\xca\x47\xad\x85\x98\xf1\x2b\x51\x46\x3a\x32\x8e\xb1\xc7\x86\xd9\x2b\xb6\xe1\x3e\x57\xc0\x44\xcf\xec\x0b\x9e\xd4\xc0\x44\x4d\x2b\x34\x2d\xcd\xa2\xb5\x28\x81\x03\x31\xf1\xec\x41\x17\x03\x5e\xa6\x4e\x0f\xd2\x86\x1d\xcd\x27\xee\x9c\x79\xb5\x81\x67\x45\xef\x69\x88\x11\x0e\xa0\xbc\x69\xc3\xc4\x34\x49\x51\xa1\x5f\x78\xbb\xac\xb5\x9d\x8b\x6c\x02\x3f\x1c\x5c\x7a\x4a\xb1\x09\x11\x18\x46\xc1\x10\x39\x2a\xc5\xa3\x15\x76\x8a\xed\x93\x4f\xfa\xd3\xec\xd6\xbe\xcb\xa0\x57\x7f\x5c\xc8\x45\x46\xdd\xc0\xe6\xd8\x30\xbc\xd7\xbf\x7d\x40\x76\x57\xaf\x4a\x00\x6d\x46\xd5\xf2\x32\x5a\x2a\x2f\x09\x46\x97\x28\x22\xca\xb5\xf0\x5d\x9f\x3b\x50\x37\xfe\x1c\x3b\x75\x45\x6f\x73\x91\x72\xe8\xb5\xcf\x7a\xef\xf2\xd8\x3f\x75\xe6\x59\x3b\x01\x50\x87\x4d\xbf\x3d\x09\x33\x28\x8b\x9a\xd8\xcf\xd3\x30\x7b\x18\x27\xbf\xc3\x64\x91\x4e\x5a\xf7\xc5\xa9\xce\x34\xe3\x5c\x4f\x3e\xf8\x38\xb8\xe0\xb6\x7b\xba\x04\xd9\x03\x45\xe4\x18\x4c\x03\x0c\x0f\x63\x5f\xff\x71\x56\xb5\x3e\x05\x4d\xe5\x74\x58\x9b\x8b\xf2\xac\x0d\x09\xa9\xa7\xda\xf5\x35\xa1\x30\x77\x3e\xfa\xe3\xc1\x3b\xbc\xbe\xb7\x6d\x7d\xb9\x1b\xf6\xb1\x70\x28\x30\x42\xde\xa3\x1a\x8d\x3b\x27\x65\x4d\x91\x85\x11\x32\xde\x23\xf6\xab\x83\xdc\x80\xf3\x77\xcb\xf3\xeb\xba\x8a\x7c\x06\x03\xb4\xb6\xbc\xeb\x69\x43\xbf\x4f\x84\x71\xb1\xde\xd9\xf2\xae\x27\x60\x13\x7d\xe1\xa0\xb8\xdc\x02\xcd\x7a\x31\xad\xbb\xda\xef\x1f\xfe\x18\x7b\x90\x39\xb7\xfd\x33\xca\x0a\x77\xb2\x27\xc2\x4c\x8d\x4c\xe8\x1a\xac\x44\xb7\x88\xaf\xc2\x57\x39\x36\xba\x6f\x79\x3d\xa1\x85\x08\xb6\xe4\xe4\x89\xe0\xd3\x76\x68\x73\x4f\x90\x21\x08\x9f\x00\x11\xaa\x99\xe5\x17\xe7\x4e\x51\xdf\x56\xf9\x5c\x57\x8f\xc0\xee\x8c\x36\x1d\xc5\x48\x62\x8d\x0e\xa2\x4f\x45\x75\xf4\x09\x44\x0d\xde\xdd\x3c\x2e\x5e\xdf\x0e\x63\xd0\x01\x16\xe7\x4e\x8d\xe9\x6d\xc2\xb4\xce\x35\x44\xfe\x70\xd4\x81\x2d\xf9\xee\x68\xf0\xc3\x11\x0d\x17\x92\x9b\xf8\x3a\x73\xa9\x91\x66\xb2\x6a\xd4\xd5\xac\x1e\xb4\x82\x3b\x72\x41\x1e\x5b\xf3\x81\xea\xfb\x0c\xc3\x66\x59\xd7\xcb\x04\xe5\x7c\x53\x36\xac\xb2\xbb\x2a\x76\x17\x50\x1e\x0f\x2b\xce\x11\x80\x70\x90\x37\x60\x54\xc9\x13\x5c\xef\x62\x0f\x18\x9f\xe6\x93\x7c\xb9\x93\xd4\xee\x09\xa3\x58\xea\x8a\xa5\x30\x14\x91\xe0\x9c\x00\x88\x4a\x02\xea\x42\xbc\xff\x30\x4f\xc6\x0a\xae\x92\xb9\x6b\x69\xb3\xd5\x2e\x04\xe3\xce\x3f\xd7\x61\xa4\xa2\x0e\xad\x79\x4e\x62\x1b\x92\x77\xe5\x51\xc4\xdc\x91\xdf\x5b\xaa\x2d\xb4\xa6\x40\x31\x80\x84\x46\x19\xa8\x81\x85\xa6\xca\x26\xf9\x7c\x02\x88\xfa\x35\xb9\x9f\xcb\x65\x80\x5f\x59\x9a\xfb\xb2\x7f\x95\x61\x11\xc0\xf1\x56\x05\x78\x18\x75\xc9\x52\x6b\xf0\x9f\x0e\x44\xa5\x4d\xaa\xa7\x24\x58\xd8\xee\xcb\x76\xf9\x7d\xcb\xdd\x5f\x47\x5b\x21\xb2\x51\x10\x52\x53\xb2\xa3\xd6\xf6\x68\xda\x4a\xcf\xd7\x23\xd5\x8f\x6a\x6c\xd7\x30\xf7\xa8\xa7\xd4\x09\x8a\x60\xc7\x60\x8d\x75\x2f\xf7\x56\x96\xb6\x44\x56\x74\x1c\x0d\xf4\x6e\x76\x67\x7f\x07\xeb\x80\x93\xbc\x03\xf3\xf3\xd2\x6e\x55\xb4\x19\xb6\x26\x2f\x89\x4e\x2e\xd5\xd1\x49\x4e\x30\x3f\xbc\xaf\xd2\x3a\xe2\xef\x6b\x86\x2d\x76\x4d\x30\x83\x4e\xb3\x92\xe4\xf7\x8f\x2b\xd1\xfd\xf2\x6e\x80\xe4\x75\x2c\x03\x23\xed\x48\xaf\x73\xac\xb4\x5f\xf8\xb7\xab\x6a\x5e\xd1\x8c\x12\xd0\xf6\x15\x52\xd6\x0e\xaa\x7b\xe7\x7e\x76\x88\x06\x89\x36\x94\xbf\x19\x09\x3e\x35\x32\x67\x5f\x1a\x44\x78\x8d\x49\x78\x00\x05\x21\x36\x22\xcc\x17\xdc\x0c\x73\xae\x53\xff\xd5\x4c\x30\x1c\xfe\xa6\xa0\x0d\x8d\x8a\x01\x3e\x19\x84\xc1\xaf\x87\x86\xb9\x1b\xb0\xc7\xca\x10\x76\x3d\x66\xb9\x1d\xd6\xc8\x67\xc0\x22\xac\xa6\xb5\x3a\x2c\x35\xc3\x96\x39\x18\x00\x01\x98\x06\x66\xb0\x5a\x67\x5a\x0e\xe7\x07\xd5\x56\xa5\xab\x2a\x04\x02\xe7\x71\xc0\x48\xc8\x79\x2e\xdb\xba\xda\x81\x0d\xcf\x66\x95\xce\xf9\x7a\xb8\x81\x71\x1f\x22\x51\x21\x75\x0d\x60\x24\x89\x05\xd4\x7e\x58\xca\x60\x30\x0c\x84\x50\x75\xda\x2e\x08\x28\xe1\x92\xe8\x28\xf9\x2e\xdb\xdf\x23\x0a\x53\xbe\x5c\xad\x59\x27\x44\xe0\x2a\x3f\x51\x63\x62\xea\xd6\x11\x37\x50\x5a\xab\xb5\x71\xab\x08\xf5\x17\x03\x50\x1c\x1a\x9f\x22\x9b\x68\xa3\x99\x6e\xd8\xd1\xe8\x70\x90\x0d\xdb\x45\x2a\x27\x0f\x3a\x6a\x64\x3f\x0d\x1c\x46\xea\x10\xa0\xc3\x90\x9f\x02\xed\x82\xe4\x0f\x84\xc3\x5d\x4b\x45\xce\xf3\x66\x9a\x16\xeb\x66\x7c\x4d\x90\x14\xe2\x54\xaa\x2d\x15\x30\x17\x55\xb4\x0d\x54\x76\x16\x48\x58\xcc\x12\x11\xf2\xd8\x26\x72\xbb\xb3\xf1\x50\x89\xb6\x30\xd9\x69\xab\xb1\x68\x62\x34\x25\x7c\xbf\xea\x9f\xbc\xfa\xc4\x60\x0b\xd7\x36\x64\x75\x36\xd4\xee\x45\xd6\x43\x69\x24\x6c\xc7\xe3\xc7\x82\x9b\x17\xec\x2d\xce\x11\xbf\x1a\xe7\xcf\x18\x8c\x48\x6e\xbb\xea\xcf\xd6\x01\x27\x11\xcb\x84\xa6\x11\x14\xd8\xf2\xfc\x1c\x04\xdf\x81\xcf\xeb\x07\xa3\xc1\x71\xe7\x0a\x9e\xdc\x89\x08\x81\x2c\x36\xba\x80\xb2\x0d\xf7\x76\x7e\xfd\xee\x9e\xf3\x6b\xd7\x16\xa3\x58\x39\xbf\x6a\xc1\xcb\xe1\x0f\xcd\x56\x42\x96\x22\x3d\x16\x3b\x0f\x48\xed\x3b\x03\xf6\x0c\x37\xbb\xf0\x85\x7d\xc9\xf6\x85\x3d\xb5\x4b\x5f\xd8\xbf\xec\xed\x0a\x7b\xfe\x30\x09\x1a\xdb\x5b\xa1\x66\xfd\x99\x49\x32\x2c\x39\x12\xc3\x4d\x04\x2d\xd9\xa6\xd4\xdc\x23\xcb\x14\xfb\xe6\x01\xc7\x06\xa5\x1c\xb4\xb3\x0a\x36\x0e\x4f\xe0\x20\x9a\x3e\x73\x20\x78\xd4\x5b\x36\x47\x76\xdc\x87\xca\xc7\x36\xbc\x44\x4f\x63\x22\x9b\x5e\x2a\x6e\xa5\x5a\x78\xf1\x39\x9d\x53\x30\x8d\x56\x7c\xb9\x75\x22\xd0\xa7\xca\x35\x88\x12\xbe\x71\x53\xf5\xa6\xdb\x7e\xba\xa7\xa4\x7b\x6a\x4f\xd2\x3d\x97\x92\x8e\x9f\xdd\x66\xc4\x6c\x59\x77\x82\x1d\x63\x47\xb7\xf5\xf8\x97\x6a\xda\x93\x74\x7b\x5e\x7f\xdb\xeb\xff\xe0\xf6\xe2\xee\x56\xff\xa4\x16\x77\xdd\xd2\xa9\x94\xfd\x6e\x47\x04\xb0\xef\x8c\xf4\x64\x53\x90\xca\xf0\xc2\xf2\x2c\x04\x8c\xfb\x7f\x30\x12\x3c\x6c\x5f\x70\x04\xc6\xdc\xc2\x32\x6f\x85\x59\xd8\x14\x05\xe5\x17\x86\x0a\xa5\xcd\xe6\xa7\x45\x80\x3e\xb2\x82\x29\x65\x7c\x6e\x61\x79\x11\x4d\xa9\x8e\xc4\xfa\xca\x30\xfb\x9c\xc7\xae\x03\x5c\x78\x9c\xae\xfe\x93\x5e\xf0\x01\x2b\xb3\x46\xbe\x14\x93\x0c\xf1\x0c\x31\xbf\x68\x1c\xa2\xe4\xec\xd4\x60\x6e\xad\x96\xe6\xc0\xc3\x0c\xbb\x1c\x4f\x2c\x54\x75\xff\x36\xf1\xb9\x36\x44\x7a\x03\xcb\x89\xf5\x40\xe9\x5c\x31\x28\x98\xeb\xd3\x15\xb6\x9f\xc0\x0a\xfd\x8f\x57\x82\x6f\x95\x3e\x41\xae\xd6\x18\x54\x55\x63\xae\xb2\x9a\x0e\x67\x75\x72\x8d\xeb\xa6\x2b\xec\xc3\x1d\x35\x5b\x1d\x23\xca\x4d\xe6\x4b\xf2\xcd\x6d\x94\xe4\xaa\x42\x08\xbe\x88\x12\x7e\x9e\x2e\xc0\x43\xe9\x86\xc8\xb2\x08\xb2\x51\x90\x8a\x55\xea\xf5\xad\x96\x08\x35\xac\x1a\xb4\xca\x1e\xc9\x9d\xba\xf2\x7a\xcd\x35\x7c\x37\xfb\xb2\xc7\x46\x73\x21\x57\x89\xc8\xfd\x3f\xf4\x82\xc7\x4b\x1d\x87\xf7\x78\x1d\x7c\xc1\xb8\xb7\xae\xa7\x79\x01\x54\x02\x3c\x4e\xd3\x4b\xed\xd6\x4e\xa6\x01\x55\x83\x09\xc3\x3b\xea\x50\xe7\x89\x2b\x98\x08\x1f\x3d\xd0\x13\xf8\x68\x31\xad\x83\xfa\xf0\xd6\x03\xc1\x4b\xe9\xef\xae\x28\xbe\x3e\x07\x98\xbf\xdf\xdb\xd6\x77\xbd\xad\xff\x5b\x4f\xed\xeb\x1f\xf5\x82\xd8\x8a\xdf\xbb\xe6\xed\xb1\xa7\xca\x61\xf6\x0a\xf6\xcf\x76\xb2\x4a\xf6\x14\x83\x3d\xc5\xc0\x56\x0c\xae\x1e\x16\x87\x64\x4d\x19\x16\xe7\xa9\x03\xec\x81\xab\xa5\xe4\x2a\xb1\x71\xcd\xe9\xbe\x25\x7f\xe8\x37\x47\x83\xa5\xc1\x45\x4a\x89\x98\xda\x87\x29\x7f\xf4\x7b\xd2\x11\x89\x7f\xb7\x9f\x7d\x63\x88\x5d\x2f\x8f\x71\xa2\x4e\x52\x30\xf7\xbf\x38\x14\x7c\x62\xc8\xbd\x06\x83\x86\xcc\x84\x1b\xea\x92\xd4\xe8\x5d\x6a\x2f\xa2\xb1\x15\x99\x00\x43\x9f\xe6\x52\xaa\x57\xf9\x4a\x16\xd6\x2e\xd1\xfa\xca\x85\xa9\x44\xa1\x90\xf3\x66\xb4\x46\xfe\x40\x48\x18\x84\xa4\x2a\x68\x82\x29\x4b\x10\xa4\xe4\x87\x53\xa1\x59\x2a\x30\x2b\x4f\x9d\x5a\x2c\x43\x64\x2d\x4c\xb8\xfc\xfc\x7c\xdd\x29\x01\xdc\x5c\xe8\xb9\xa4\x57\xf0\xc3\x22\xc9\xdb\x20\x19\x92\x94\xa7\xb1\x8a\x2a\x44\xe7\x48\x2c\x1a\x10\x01\x48\x87\xde\x71\xed\x6a\x49\x68\x3f\x53\xd9\xee\xda\x52\xa9\x5b\x4e\x32\x81\x50\x24\xaa\x5c\x77\xab\x94\x1d\xb4\xf6\x1d\xa3\xeb\x45\x20\xc7\x50\xcf\x5f\xe4\x9b\xeb\x11\xa6\x7b\x75\x10\x15\x43\x1b\xc5\x63\x84\xc8\xe8\xbb\x8d\xfe\xd0\x63\x87\xc2\x5a\x4d\xb4\x0a\x51\x07\xd6\x21\xff\x5b\x1e\xab\xf7\x5b\xc9\xcf\xd1\x04\x86\x17\x05\x6f\x70\xde\x4b\x81\x20\xa4\x49\x1a\x73\xa7\xe6\x5e\x45\xa8\x60\x93\x90\x9e\x41\xfe\xbc\x8a\x7d\x31\xfc\x28\xab\x76\x5a\x3d\x38\xd1\x4c\xad\x72\x74\x64\xc7\xb1\xdf\xac\x38\x4e\xe2\xf7\x57\x82\x05\xcb\xb2\xa0\x8d\xc2\xb8\x62\x10\x9f\x28\x2b\xa2\x5a\x3b\x0e\x33\x1e\x42\x22\x66\xde\x8b\x6e\xce\x5e\x46\xa6\xcb\x63\xf6\x08\x5b\xbf\xc6\x5d\x7a\x6d\x3c\x65\xec\xbb\x07\xd9\xb1\x1d\x09\x72\x14\x38\x73\xa2\x08\xa3\x38\xf7\x3f\x79\x30\xf8\x5e\xc5\xb9\xe4\x30\x4f\x5a\x2e\x6f\x72\x17\x46\x6a\xc8\xcf\xcd\xbc\x46\x31\x6b\x2a\xb4\x34\x3c\x90\x60\x94\xf1\x86\x54\x9c\xad\xc7\xbb\x3d\x56\x21\xb1\x78\xe6\x84\x50\xb1\x64\x3b\xdb\x61\xc8\x1c\xeb\x82\x85\x54\x21\x27\x9c\xc9\x7f\xb6\x38\x3e\xab\x7c\x16\x76\x67\x42\xcd\x21\xfc\x05\xca\xcb\xed\xc6\xea\xa0\x9c\x4e\x8c\x8d\xa6\x38\x54\x08\x13\xd6\xb5\xa3\x64\x20\x1d\x30\xcc\x73\x9d\x56\x17\x26\x1d\x53\x4a\xcd\x66\x15\x14\x9c\x40\xa4\x04\xc4\x3d\xb5\x93\xba\xc8\xd4\x4b\xdc\xa3\xde\xbf\x3f\xc0\x7e\xd3\x63\x43\xed\xa8\xee\x7f\xc4\x0b\xde\xed\x5d\x98\x9f\x33\xd9\xd5\x0a\x68\xe3\xb0\x72\x1b\x65\x84\x0a\x4a\x70\x37\x5a\xa7\x71\x40\x20\x14\x20\x4f\x7d\xbc\x6b\x37\xee\x9d\x42\xd7\xce\x45\x36\xb9\xd6\x8e\xea\x62\xca\x80\x46\xe7\x37\xb6\x23\x57\x81\x78\xc6\x63\x23\x40\x67\x9e\xfb\xff\xc3\x0b\x7e\xde\x93\xc3\x35\x1b\x62\x96\x92\x5c\x39\x26\x53\x1d\xbc\xe2\x75\x9a\x49\x65\x0a\x0b\x00\xc1\x81\x31\x55\x63\x1d\x46\x71\x3b\x13\x00\x06\x06\x5b\x92\x7d\x17\x65\xaa\x9a\x4a\x58\xa7\xa8\x23\xad\xba\x6b\x6a\x3a\xc5\xee\x66\x77\x5e\x85\x4e\x83\x6f\x83\x0f\x61\xab\x2a\x6a\xec\x35\xc1\x59\xe0\xed\x42\xfe\x3a\x3d\xc4\xa5\x91\xe9\xf9\x6d\x14\x55\x60\x7f\x84\x13\xa7\xfb\x4e\xa5\xc3\xbe\xb5\x12\xfc\x08\x3a\x11\xb8\xe0\x76\xff\x12\x7e\x3e\x41\xf7\x1c\x25\x67\xaa\xce\x43\xf1\x6a\x14\x59\x52\x56\x81\x71\x98\x5e\x21\x35\xe9\x17\x4a\x2f\xfd\x80\x47\x5c\x7c\xef\xf2\x82\x37\x43\x7f\x20\x58\xfb\x6e\xfb\xa3\xef\xaa\x81\xea\xfb\xad\x18\xa7\x65\x15\x76\x43\x26\x8a\xac\x33\xd3\x28\x44\x46\x94\x53\xfe\xa3\x9a\x90\xec\xfb\xde\x7c\xa3\x44\x0e\x8b\x5e\x5e\xb9\x6f\x61\x69\x05\x9c\x0f\xd1\x98\x6a\x58\x2c\xb7\xa8\xac\x1e\x92\x5d\x97\xe5\xc0\x41\xf4\x4f\xee\x3a\x3a\xad\xd3\x06\x88\x33\x24\x2d\x4d\x78\x18\xc3\x11\xb3\x80\x2d\x57\x56\x3a\x49\x16\x76\x70\xfe\x63\x45\x16\x11\x95\x53\xe7\x7a\xba\x89\x34\xf2\x45\xca\x37\xc3\xa8\xd0\xad\x0c\x95\x4e\xd7\x55\xbd\xeb\xb5\xfc\xc2\xa8\xc3\xbb\x67\x80\x37\x6b\x99\x28\x16\xb3\xf4\x11\x74\x47\xfa\xbf\x3a\x1a\xbc\xb3\x32\x53\x0f\x5b\x98\x15\x83\xa9\x9b\x51\x02\x9c\x0e\x2d\x2c\x26\x35\x42\xc8\x25\xad\x32\xa6\x62\xa3\x40\x78\xbb\x60\x0c\x58\xf5\x58\xce\xe7\xc2\x22\xa4\x6f\x52\x32\xdf\x90\xce\x02\x6f\x69\xb9\x62\x1e\xe6\x98\xd8\x6c\x51\x5f\xc8\xbd\x55\xd9\x6f\xac\x1a\x89\xe0\x5a\x96\x26\xa6\x4c\x0b\xa1\xb0\x20\x7c\x13\x0d\xad\x87\xdc\x14\xf4\x55\x0a\xa3\x84\x04\x32\x61\xfc\xd8\x80\x01\xcd\xb4\x2e\x5c\xe1\xff\xf7\xc3\xec\xf1\x7d\xea\x04\xbe\xb5\x2f\xf8\xc6\x50\x19\x07\x07\x36\xa2\x4b\xa2\x33\x89\x47\xdb\x56\x18\x65\x3d\x9a\xdd\xcd\xce\x48\x59\xb2\xa6\x87\x54\x97\x40\xe7\x5b\xe9\xe1\xe0\x83\x81\xef\xc5\xb0\x91\xc4\x02\x97\xba\x24\x3a\xb0\xf3\xd1\x90\xa8\xcb\xc8\xf3\xc0\xbb\xe7\x3e\xa0\x46\xd7\xb1\x6f\x07\xbc\xd9\xe4\x4f\x83\x15\x09\x77\xd7\x76\xd2\xf5\x34\x29\xd2\x34\xba\xf0\xc2\x10\x03\x30\x9c\x1c\x6c\x58\xc8\x84\xd3\x45\x65\x55\x17\x61\x2f\x4c\xd8\xdf\x9b\x8b\xa2\xdd\xc2\xfa\x31\xd6\xae\x9d\x00\xa3\x3c\x02\x45\x52\x30\x08\x5a\x04\xc3\xb8\x84\xbe\x07\x70\x05\x45\xb4\x21\xb4\x3d\x47\xbe\x53\x45\x28\xca\x97\x8c\x55\xab\x63\x84\x90\x02\xf1\x93\x59\x81\x22\x4a\x5e\x77\x36\xab\x2a\x9b\x60\x47\xb6\x35\x79\x9c\x11\x9d\x95\x54\x36\x81\xbd\x55\x09\xc9\x9f\x0d\x52\x9b\xb0\x94\x86\xbc\xb8\xe2\x6c\x79\xa9\x86\x6f\x44\x62\x73\x8a\xd0\x8f\x26\x65\x3b\x15\x79\xec\x14\xcc\xfd\xa9\x1b\xe1\x3f\x5b\x22\x9e\x67\xa3\xaa\x6f\xfc\xd9\xe0\xe6\x65\x82\xcf\x53\x89\xd4\xa6\xcf\x21\x6b\xb8\xc8\x61\xb8\x54\xef\x91\xfe\xe3\x24\x51\x3f\x79\x90\xbd\xbc\x07\xe5\xee\x52\x1a\x8b\x53\x48\x92\xeb\xbf\xe7\x60\xf0\x1f\x2a\xd6\x05\x33\xcd\x15\x9f\x6e\x09\xad\x53\x0d\x48\x54\x20\x45\x4d\x0d\x02\xf3\x14\x47\x6f\xc8\x65\x5d\x6a\x86\x20\xf5\xb7\xa1\x6f\xcd\xe4\x19\x01\xa1\xd3\xec\x62\x44\xd0\xa5\x0b\x22\xe3\x56\x1d\x83\xad\x1c\xad\x76\x23\x0a\x39\x85\x71\x23\x9d\x90\xa9\xdc\x2e\xb6\xda\xa1\x89\x6b\xdd\x2e\xf0\x2c\x28\xe5\x52\x95\x73\xeb\x8b\x73\x94\x6b\x68\x9f\xb6\x1a\x0b\xf9\xfa\xe1\x86\xe0\xa2\xd1\x90\xba\x71\x44\x48\x69\xa6\x99\x5b\xde\x7e\xd9\x41\x4b\xa2\xe1\xc8\x9d\xff\x36\xba\x67\x3a\xdd\x43\x5b\x7f\xe1\x4c\x91\xcf\x5f\xce\xe8\x33\x1e\x53\x2b\xc0\xff\x9a\xc7\x0e\xf7\x15\xb9\xb6\xe4\x59\x12\x8d\xe0\x63\x1e\xfd\x35\x50\x7c\xe8\x48\xd1\xab\x91\x20\x28\xc1\xad\xd7\x68\xcb\x11\x38\xc5\xea\x5d\xbc\xd6\x20\x48\x11\x54\xcd\x90\x3a\xcb\xad\x61\x54\x91\x23\xfb\x97\x83\x57\x6a\xe9\x83\x3c\xce\x96\xb4\x74\xe8\xc9\x09\x5d\x57\x36\xd1\xa2\x21\xb7\xf7\xa8\x09\x76\x64\x07\x1d\xa6\x73\x56\x1e\xd9\xde\x18\x7b\xaf\x7f\x5a\x1b\x63\xfb\xb2\x3f\x6b\x1b\xad\x25\x01\xcb\x76\xda\x3f\xf6\xd8\xf2\xd5\xda\x64\x30\xac\xea\x55\xcb\xe7\x17\x96\x41\x00\x2d\x66\x69\x2b\x3f\x9f\x9d\x4a\xd3\xd8\x7f\x7d\x90\xf6\xbc\x61\x67\xab\x94\x0a\xe0\x88\xd3\x7e\xa6\x74\x23\x1b\xa1\xa3\xc8\x2c\x0e\x28\x55\x4e\x87\x5d\xb3\xff\x7c\xbd\x6d\xb2\xe9\xc2\x8c\xab\x2e\x8b\xb8\x41\x7d\x0c\xd8\x55\x4b\x42\x6e\xdb\xfe\xfb\xae\x0f\xde\x3e\xdc\xfb\x1e\x17\x49\xbb\x09\x9e\x3c\xb2\x23\x93\x15\xa7\xa6\x22\x13\xcd\xbc\x05\x84\x5e\x39\xc1\x5b\x22\x93\x9b\x93\x21\xff\xb7\x26\xea\xca\xba\x05\xe5\xa7\xfc\x72\xaa\x36\x32\x7a\x00\xce\x70\x2b\x16\x05\x60\x80\x0a\xdc\xa8\x15\x56\x2e\x0c\xc8\x58\x5e\x02\xaf\x93\xea\xf0\x04\xc1\x6c\x76\xd4\x89\x45\x5c\x6e\x89\x2c\x42\x1d\xb6\x8e\xa6\x5b\x0c\x8a\x0d\xe3\x36\x65\xe8\xf6\xf9\x68\x73\xa6\x6a\x53\x9c\xf3\x85\x79\xe8\xff\x7c\x3d\xdd\x9c\x5a\x07\x43\x14\xb6\x19\x4c\x32\x45\xca\x5f\xdf\x8e\x6a\x97\xe2\x0e\x8f\x05\x72\xa4\x4b\x2d\x34\x07\xc8\x54\x38\x3e\xea\x4c\xa4\x08\x2c\xdf\xcd\x28\xc7\x19\x24\xb7\x7e\x7a\xd9\xc2\xf9\x15\x7e\xca\xbc\x50\x73\x7b\x62\xca\x11\xbc\x1d\xa0\x13\x4b\x5f\x5e\x17\x35\x22\x17\x0e\xe9\x8c\x96\x85\x51\x2e\x20\xb2\xbb\x01\x95\xd5\x45\xab\x5d\x74\x26\x88\xb1\x2b\x8e\x1a\x42\x9e\x2b\xa7\x32\xb1\x91\xd6\x08\xa8\x13\x15\xf3\x2c\x13\xb5\x22\x21\x1c\xd9\x9a\xc8\x64\xfb\xa8\x73\x66\x6a\x35\x91\x53\xef\x60\x71\x40\xd5\xb1\x2f\x6b\x23\x2e\x55\xc4\x37\x43\x40\x0b\xaf\x03\x08\x68\xbf\x46\x2b\x18\x41\x1d\xc1\x5c\xdd\xf2\x80\x67\xc2\xcd\xe3\x1b\x65\x67\x2c\x31\x7f\xd7\x2e\xc5\x3c\x7b\x87\x22\xb3\x78\xd4\x63\x73\x83\xc0\xdf\x76\xb2\x7c\x80\xe4\xe2\x36\x00\xde\x44\x29\xd9\x3b\x05\x4d\x45\x99\xa3\x2d\x9b\x26\xa1\xa8\x57\xd9\xc7\x0c\x23\xc5\x87\xbd\x01\x29\x7f\xdd\xcd\xe9\x6e\x0a\xb2\x54\xdc\x6f\x48\xe9\x1b\x51\x1c\xe3\x59\xd6\x35\xa9\x22\xc2\x91\xe3\x21\x72\x57\x76\xd8\xb5\x9e\xab\x7b\x7a\xde\x9e\x9e\xf7\x02\xa6\x4b\xb7\xb6\xd7\x09\xce\xf9\x67\xb4\x4e\x30\x50\x1d\xe8\xbd\x92\xcb\x9a\xc1\x9f\x8d\x30\xde\x33\xc2\x90\xa8\x3d\x17\xd3\xac\xf0\x7f\x6f\x24\xb8\xd3\xb9\xe2\xa2\xb7\xd8\xe8\x21\x78\xf4\x2a\x13\x5e\x57\xb7\xbc\x43\x35\xbb\x02\x47\xf4\xfd\xcf\x61\xf6\x76\x8f\xb9\x05\xfc\x4d\x65\x35\x5c\xb5\x93\x06\xb2\x02\x91\xf1\x5b\x69\x2e\xd4\x7e\xd9\x4a\xeb\x63\xb9\x15\xd1\x45\x81\x3c\xea\x18\x1d\x52\x0a\x26\x3c\x8c\x09\x13\x13\xfc\x28\xbf\x83\x5f\xe6\x77\xf0\x93\x37\xdf\x7c\xfc\xa4\x6b\xab\x9b\x61\x23\xeb\x69\x5e\xcc\x2f\xfa\xb7\x04\x47\x1e\x00\x0e\xff\x34\x2f\x64\xfd\x45\xca\x57\x23\x4a\x55\xd1\x7b\x17\xb5\xc9\x31\x84\x7e\xd7\x63\xa3\xf2\x21\xf8\x90\xa7\x35\x6b\xfe\x67\xbd\xed\x3e\x45\x3e\xd4\x6d\x29\xba\xb2\x8f\x91\x8f\xdf\x97\xe6\xc5\x02\x8d\x8a\x6d\xfd\xb1\x6b\x43\x6f\x8e\x33\xac\x72\xde\xe7\x85\x19\xb6\x5c\x79\x7e\x12\x01\xd9\x36\x51\x5e\x32\x6b\x2a\x2b\xcb\x53\x5e\xf0\xa9\x2e\xe3\xae\xdd\xec\x84\xcf\xcf\x2c\xcc\x3c\xbc\x7c\xff\xec\xc3\x0b\x33\xe7\x4e\x93\x09\x0b\x60\x21\x48\x85\xa2\x91\x54\x24\x9d\x40\xe3\x66\x26\x14\x10\xf2\xf6\x03\x99\x59\xb0\x59\xff\xb1\x63\xe5\xb0\x91\xb1\x1a\x14\x7a\x05\x9e\xd8\x51\x89\xdc\x2e\x1f\xc7\x6b\xd8\x68\x2b\x4b\x8b\xb4\x96\xc6\xfe\xb9\xe0\xee\x45\xfa\x1b\x5d\xa3\xd8\x31\xf4\x25\x17\xe6\x16\x27\xf8\xca\xec\x22\x28\x45\xcb\xb3\x2b\x8b\xae\xf2\x1a\xac\xcc\x2e\x06\x4e\xd5\x6f\x19\x61\x63\x3d\x16\x98\x72\x71\x52\x9a\x7c\x53\x00\x86\xc6\x70\x30\xd3\xeb\x46\x89\x33\x82\x72\x5b\xb9\x83\x58\xac\xca\xba\x16\xd3\xb7\x0e\xb3\x1f\x79\x6c\x04\x39\x9a\xfd\xef\x7b\xc1\x87\xbc\xb3\xf0\x77\xa9\x4a\x85\xb4\x11\x02\xda\x25\x10\x8c\x97\x5e\x92\xeb\x7c\x9c\x2b\x35\x9d\x39\x04\x15\x53\x88\x6c\xaa\xd1\x96\xf3\x49\x33\xdb\xa6\x9e\xf0\xee\x62\xaf\xbc\x82\x40\x9b\xaa\x76\xf9\xbd\xba\x1d\x26\x45\x54\x74\xec\x64\xa4\xb7\x03\x77\x36\x4a\x7d\xff\x6f\x2a\xc1\xef\x54\x96\xd4\x1e\x50\xfa\x78\x4a\x3c\x1c\xf4\xf1\x2a\x53\x15\x29\x13\x54\x3d\x72\x9b\x6a\x46\x85\x42\x9d\x0c\xcd\xd2\x01\x24\xa2\xba\x35\x33\xa8\xdb\x01\x0d\x10\xf9\xea\xc5\x65\x0d\xf0\x69\x23\xb9\x17\xeb\x22\x03\x3a\x20\x08\xd0\x30\xe8\x57\xd0\x7d\x93\xca\xf3\x4a\x07\xa7\x7f\x24\x43\xf1\x25\xd7\xff\x11\xb6\x5a\x39\x2e\x02\x88\x93\x5c\xd6\x18\x08\xbf\x31\x1a\xdc\x55\xbe\x58\x06\x15\xd7\x34\xce\x16\xde\x81\x79\xa6\xba\xe5\x8d\x66\x04\xb8\xec\xac\x84\x3f\xda\xcf\x9e\xf6\x98\xbe\xa7\x50\x01\x8e\x1f\x0b\x3e\xed\x69\x84\x66\xb2\xd8\x37\x4b\x04\x68\x98\xbd\x69\x92\xed\x54\x25\x57\xdc\xff\x52\x20\xc7\x69\x58\xcf\xa7\x4c\x7c\x4e\x3e\x45\xd5\xc9\x3e\x36\x97\xa7\x6e\xdc\x5c\x0f\x8b\xc9\x28\x9f\x0c\x27\x7b\x16\x70\x24\xf1\x2f\x78\xec\x06\x0d\x69\xae\x3e\xc7\x6f\xa9\x4f\x7c\xd0\xcd\x2f\x37\xe0\xe7\xea\x4b\xf8\xe1\x4c\x84\x75\x4c\x38\x0c\x0b\x1e\x8b\x50\xee\x10\x51\xb2\x24\xaf\x92\x27\x6f\xdc\x50\x22\xd0\x63\x10\x3d\xe0\xb4\xe3\xaf\x3d\x27\xf0\xe4\x7b\x5e\xb0\xb0\xe4\x0e\x1f\xa9\xb3\x16\xc3\x38\x64\xc7\x1a\x3e\x88\xd0\xae\x7d\x2c\x77\xc6\x5b\x38\x06\x97\x5b\xd9\x49\x76\xa2\xff\x59\xa2\x6b\x92\x39\x41\x24\xcf\x79\x86\xf4\x63\x1e\xfb\xb1\x46\x3b\x8e\x3b\x67\xc3\x55\x11\x8b\xba\x1e\x85\x5c\x8d\xc2\xeb\x7a\x64\xf9\x83\x30\x80\x4d\x8d\xc8\xc6\x1c\xe2\x37\xba\x66\xa5\xca\x2a\x9c\x48\xe3\xd0\xc0\xb7\x94\x07\xe2\xe7\x7a\x42\x61\x44\x0a\x09\x63\xe5\x7c\x37\x10\x45\x26\x1a\xb1\x36\x74\xd9\xe0\x18\x8d\x9e\x8b\x82\x72\x9a\xad\xa5\xe7\x34\xe0\x5f\xb0\x43\x30\xa5\x74\x37\xdc\xab\xba\xe1\x98\xdb\x0d\x38\xf1\xf4\x44\xdc\x7e\x8e\xfd\xeb\x83\x4e\x96\xd5\x00\x76\x02\xff\x9b\xd7\x05\x1f\xf0\xba\x08\x21\x6a\x61\xab\xd0\xa0\xf6\x11\xfa\xea\xa4\xb0\x85\x12\x1c\x12\x9b\xe1\x2f\xd7\xf9\xa6\xf0\xa1\xd3\x29\x7d\xde\x30\x37\x81\xe1\x9a\xb1\xae\x57\xd9\x01\x74\x49\x9a\x4c\x6a\x33\x51\xbd\xa7\x11\xe0\x4b\x07\xd8\x57\xcc\x51\xf9\xf3\xde\x55\x92\x32\xe0\x11\xf9\x6d\x56\x7e\x99\x6c\x6d\x5f\x62\x8c\x6e\x0a\x47\x01\x12\x9c\x93\x8d\x4a\xfb\xb8\xcb\x1d\xa4\x3d\xf4\x13\x1c\x48\xd2\x6d\xc5\x78\x52\x71\x67\xec\x9d\xa9\xf7\xce\xd4\x2f\xa0\xef\xe4\x2b\x76\x18\xf7\x7f\xda\x35\x53\xed\xa3\x65\xa6\xda\x17\x80\xa7\xf6\x71\x65\xda\xfb\x90\x37\x00\xf1\x6b\x1b\x6a\x9d\xe0\xe1\xe5\x32\x77\x0e\x06\x5f\x20\xf8\x06\xae\xde\x29\x5a\xe8\x24\xfb\x14\xf1\x47\x0f\x89\x71\x46\xef\x90\x0a\x40\xea\x59\xaf\xbe\xbd\x35\x63\xc6\xbf\xab\x0f\xfa\x6c\x29\xb7\x56\x59\x35\xca\x9f\xc2\xd8\x5f\x1d\xea\x87\x7f\x3d\xd3\x80\x8e\xe9\xac\x88\xac\xe9\x7f\xf1\x50\xf0\xe7\x95\x39\x12\x35\x3a\x54\x13\xf6\xe0\xc3\x52\x2a\x03\x61\x9e\x3c\x8a\x77\x6f\xc0\xcb\x8a\xc9\x5b\xc7\x33\x90\x18\x29\x79\x98\x0f\xe7\xe3\xe3\x56\xe8\x8b\xdc\xae\x8d\x95\xbd\x96\x4e\xc6\x29\xe6\x04\x1d\x0e\xa9\x61\xe3\xc8\xdb\x54\xb8\x37\x93\x22\x9a\x34\x25\xe4\xf1\x18\x68\xbd\x32\xa7\x0e\x0b\x09\x35\xcc\x79\xd6\x4e\x12\xf2\x20\x84\xc8\x05\x84\xd1\x29\x28\x19\x34\xf3\xe7\xaa\x88\x31\xbe\xe2\x92\xe8\xf0\x3b\x8a\xb4\x95\xc6\xe9\x5a\xe7\x8c\xe8\xdc\x69\xc1\xe0\x85\x68\x23\x4d\x3a\x58\x51\x9a\x90\xe7\x1d\x0f\xe1\x9a\xfd\xd7\x74\x5f\xa4\xdf\xbf\xe5\x5d\x67\x55\xea\xee\x6c\x8c\xfd\xa8\xc2\xec\xdb\xfe\x5f\x54\x82\xa7\x2b\x2b\xcf\x6d\x4f\x29\x3d\xa9\x97\x1e\xa5\x87\x51\xc5\x32\x98\xdd\x5b\x8d\x60\x7e\x4d\x7a\xda\xfa\xe8\x9d\xf4\xb3\xe1\xc2\xd3\x18\xc5\xa5\x7e\xae\xf2\xd3\x00\x0f\x62\x57\xac\x68\x5b\x35\x68\x86\x11\x81\xef\xf2\xd8\x21\xa7\x07\xfc\x37\xb1\xbb\xaf\x26\x9b\xc5\xae\x23\xb8\x6d\x86\xbe\xf3\xf5\x6d\x91\x75\x90\x3f\x4e\xaf\x2b\xbd\x8d\x4d\xe8\x50\x7d\xa9\xa4\x62\xaa\x14\xfb\x7f\x43\xec\x06\xdd\xe7\xba\x51\x7f\x3e\xf4\x1c\xb4\x6a\x6b\xa8\x47\xb3\xac\x19\x6b\x86\xda\xb0\x78\x14\x22\x6b\xda\x4e\x5d\xf0\xe0\xc1\x45\xb9\xcb\xc2\x75\x9d\x06\x08\xc0\x82\x6a\x7c\xac\xca\x6c\x38\x69\x2b\x8c\x50\x21\xdb\xa4\x52\xea\x50\xc4\x56\x94\x94\x1f\x26\x86\xb9\x04\xa1\xa5\x69\x96\x42\x30\x8c\xbc\x92\x66\x84\x06\x63\x3d\x41\x49\x44\x61\x92\xf3\x40\x09\x9b\xb1\xdc\x94\x08\x80\x1b\x0f\x1f\xd3\x35\x1e\x7e\xe3\x9b\xc6\xf5\x04\x0c\x63\xcb\xbb\x9e\x97\x01\x7f\xc3\xb8\xb5\x1e\x4e\xc6\x72\x6f\x42\x3f\x0a\x51\x88\xae\xa7\x09\x24\xa5\x40\xa8\xa8\x25\x62\x17\xca\xa3\xc9\x1b\x22\x94\x1b\x19\x18\x3a\x12\x79\xd0\xab\x57\xd9\xe7\x2b\x8c\x99\x77\xfa\x9f\xa9\x04\x1f\xac\xd8\x7d\xa8\x31\x8e\x43\x38\xed\x45\x35\x03\x50\xad\x03\x06\xac\xfc\x89\xe7\x6a\xec\xec\x71\xe9\x3d\x72\xf6\xe8\x76\x4d\xdc\xea\x36\xc3\xa4\x07\xb2\xeb\xc9\x81\x23\x38\x20\xab\xe5\xf3\x87\xd8\x6b\x77\xe7\xce\x77\xf3\x2c\x96\x0d\x92\xd5\x72\x2d\x8c\x85\xff\x9d\x83\xc1\xeb\x06\x17\xd1\x5a\xfb\x3a\xb1\x5e\x41\xc2\x4a\x41\x2c\x61\xc2\x01\xc7\x92\x47\xba\x52\xaa\x54\x75\xcb\x7b\xb1\x1c\x6e\x75\x3a\x5c\x0c\x8b\xf5\x2d\xcf\xcf\x29\x1e\xd9\x5c\x74\x36\x91\xff\x72\x1d\xfb\xe6\x30\xbb\xc1\x91\x65\xc0\xb8\xfe\x27\xc3\xc1\x53\xc3\x5d\x97\x9d\x93\xc5\xab\x96\xcf\x2f\x10\x33\x76\x92\x47\x75\x02\xbb\xac\x21\x99\x8d\xa5\x25\x87\x05\x7a\x77\xf3\x16\xc4\x23\x17\x29\xc7\xef\xbd\x48\x18\x90\x6a\x35\x5d\xac\x72\xe0\x78\xd1\xf5\xe6\x4e\x10\x2b\x46\xf8\x6b\x3a\x4c\x48\xf5\xd1\xf6\x53\x6d\xcc\x87\x87\xa1\xa9\x98\xf2\x70\xb1\x8a\x6f\xb9\x28\x67\xd3\xc5\xaa\xec\xa1\x8b\xa6\xbc\x14\x61\x45\xca\xc1\xaa\x0f\x9b\x4b\x1f\xfe\x36\x3b\x43\x0c\x74\x77\x4b\x2c\x99\x6e\x30\x2e\x05\x3a\x46\x10\x60\x23\x92\x7f\xe3\xd1\xef\xb2\x11\x1e\x79\x91\xb5\x6b\xc5\xb8\x45\x79\x0a\x99\xd4\x00\xc9\x1f\x85\x31\x9c\x91\x50\xf0\xe6\xd6\x3e\xab\xaa\x4e\xb3\xe6\x4e\x8d\x65\x45\x98\x5f\xca\xa7\x42\x70\xbb\xdb\xda\x63\xd8\x8a\xa6\x70\xb8\x8c\xc5\xb2\x7c\x61\xd2\xda\x3d\x6e\x84\xce\x98\xb4\x27\x22\x86\x10\xa1\x50\x4a\x52\xda\xb3\xb1\xe7\x8d\x3a\x47\x73\x84\x6c\x8d\xce\xf4\xc0\x08\xa3\xae\x99\xa0\x70\xcb\xf0\x99\x8b\x53\xf0\xe2\x8b\xce\x12\x80\x00\x5a\x15\xd8\x4c\xe2\x88\xa4\x33\xf4\x90\xb3\x5f\x7f\xbb\xc2\xba\x16\x87\xff\xe5\x4a\xf0\x07\x95\xf2\xd5\xe7\x78\x86\xb7\x44\xad\xaa\x0c\x31\xd7\x6a\x7e\xe3\x94\x7e\x8e\x46\xa2\x6f\x57\x97\xa2\xbe\xa4\x76\x75\xef\xe9\x15\xa7\x97\xdf\x3c\xc4\x7a\xc8\x1b\xff\x7b\x95\xe0\x4f\x2b\xdd\xd7\xaf\x89\x2c\xb9\xd6\x7d\x8d\xb2\xe4\x39\xeb\xed\x72\xab\xaf\x62\xde\x1f\x75\x86\xe0\xc3\xfb\x7b\x62\x89\x9c\x4e\x36\xee\x0f\x33\xa2\xdf\xfb\xeb\x91\xe0\x0e\xfb\x42\x89\x38\xc4\xec\x30\x3a\xd4\x9e\x8c\x43\xf8\x90\xeb\x0e\xfb\x9d\x11\xf6\xab\x15\x36\x0a\x92\x6e\x49\x34\xfc\xf7\x54\x06\x18\x90\x55\x73\xf0\xf4\x7f\x8f\x7c\x46\xab\x99\x4f\x7b\xf8\x27\x66\x03\x58\x39\x05\xad\xb4\x3e\x6d\xf8\xc6\xb5\x6d\x40\x6e\xeb\x13\xee\x4f\xd8\xe5\x27\xf8\x45\x7d\x11\x8d\xbd\x0f\x8e\xdd\x71\xe6\xf4\x6b\xee\x1c\x7b\xe8\xa2\x7d\xcf\x30\x2b\x3b\x05\x60\xc5\x2a\x2e\x58\xfa\xe9\x22\x55\xd3\x0d\x1c\x39\xf4\x6b\xeb\x9f\xad\xb4\x5e\xfe\x95\x57\xd9\xe3\x15\xf6\x62\x35\x7e\xf7\xa8\x8e\x7a\x77\x85\x9d\xdc\xb6\xa3\x96\xec\xa7\x74\x57\x7d\xcd\xea\x2a\x3d\x2f\x0c\x86\x2c\x3a\x9c\xa6\x51\xbd\x34\xfe\x36\xf4\x58\x82\xee\xa4\xad\x4c\x87\xf1\x62\xb5\xd6\x6a\x4f\x50\x81\x6a\x53\x34\xd3\xac\xa3\x7f\x8a\xd6\xba\x68\x8a\x2c\x8c\x27\xc9\x7e\x30\xa1\x1f\xc7\xc7\xf4\x2f\x7c\xd0\x79\x41\xf7\xd3\xe3\xfd\x38\x2c\xaa\x6c\x83\x1d\xc4\x4c\x96\x33\xa2\x23\x7b\xa8\x31\x00\xa3\xce\xcd\xf7\x39\x23\x3a\xba\x73\x8e\x9a\xbe\x91\x27\x45\x04\xe0\x56\x69\x3f\x56\x74\x83\x9e\x30\x2c\x62\x2f\x42\x2f\xde\xb9\xb0\x45\xaf\xbe\x7f\x07\x0c\x71\xb3\xd6\x33\xfa\xed\x2f\xeb\xf1\x76\x5d\xb0\xca\x9e\x3a\xc8\x26\xfb\x51\x3f\xa3\x1a\x79\xdf\xca\xca\x22\x71\x40\x83\xf0\x7c\xec\x60\x90\x96\xae\x99\xf4\x33\xf9\x1a\x10\x32\x44\xdd\xa2\xc8\xa7\xf9\x7c\x52\x4b\x9b\x90\xfd\x9c\x95\x1d\x20\x50\x5e\x8e\x41\x23\xcd\x36\xc3\xcc\xc1\x63\xc1\xa7\xb7\xbc\xfd\xf4\xa7\x1b\x4e\x07\xb4\x97\xea\x96\xff\x2b\x83\x4c\xe9\x3d\x3e\xac\xc4\xbe\xff\x33\x8a\xdf\xde\xde\x05\xac\xf4\x21\x05\xd4\x6f\x8c\xa5\x29\x29\x4a\x70\x46\x21\x12\x6e\x95\xe3\x63\x7f\x4b\x55\x0a\x24\x24\x0f\x7e\x5f\x25\xf8\xc5\x8a\xa2\x0f\xc6\x63\x5a\x9d\x87\x6b\x52\xcd\x2a\x4c\x67\xa0\x70\x8b\x54\x97\x69\x1b\xfe\xac\x9e\xa4\x11\x86\x3e\x68\x24\xf0\xf5\x30\x0b\x6b\x80\xb5\x54\x8f\x72\x85\xe9\xac\xed\xbb\xc6\xc8\x18\xc6\x3c\x90\xaf\x08\x20\x1b\x1c\x67\xc3\x85\xa5\xb3\xc4\x9e\x1e\x11\x31\xf5\xd2\x3d\xb3\xfc\xf8\x6d\xb7\x9e\x2c\xe5\xfa\xac\x45\x89\x1a\xd7\xb1\xa9\x31\x62\xed\x71\x52\xb3\x80\x8b\x07\x1e\x81\x77\x97\xbf\x00\x7d\x24\xf4\xd9\xce\x2e\xf1\x99\xfd\x6c\x54\x3e\x08\xc4\x38\x1f\xdf\x1f\x7c\x68\xff\x22\xfd\xe2\x75\x21\x8f\x7a\x7a\x44\xc0\x9e\xdf\xca\x44\xe1\x98\x34\xa1\x4b\xd5\xac\xc2\x66\xc3\xc3\x14\x1f\x92\x26\x5a\x1c\x19\x70\x73\xb4\xf6\x4f\xf3\x23\xfc\xf4\xe5\xb0\x56\x4c\xf3\x73\x16\x4d\x84\xa6\x75\x16\xf2\x5e\xdc\xa9\xf2\x23\x7c\x31\x13\x8d\xe8\xb2\x29\xb7\x1a\xe6\xa2\x8e\x56\x22\x5d\xbc\x05\x65\x78\xde\x8a\x23\x48\xff\x86\x8e\x3a\xa7\xa6\x7b\x94\x33\xce\xeb\xd0\x9a\x44\xad\x14\x41\xc4\x57\xab\x1d\xf3\x67\x98\x47\x39\x60\x04\xdb\x05\x60\x2e\x2a\xff\x35\xe3\xfa\xdc\x4c\x0e\x44\x25\x4b\xe4\x23\xfa\xf5\x90\x80\x35\x35\xc6\x73\xd1\x0a\xb3\x10\x0e\xb2\x33\x3a\x84\x53\x9e\x9e\x19\xa7\x20\x21\x44\x08\x90\x5f\xc0\x23\x02\xb1\xe5\x00\xe6\x2b\xf5\x2b\x6c\xc1\x24\x84\x2c\xd0\x17\xa6\x0d\xde\xa2\x3e\x65\x5c\x57\x29\x6b\xb0\xf3\x03\x23\x65\x2c\xcb\x0b\xfd\x1d\x6a\x17\xa5\x55\x80\xcc\x60\x38\x0d\xb8\xb1\xae\x59\x0f\x44\x89\x53\xff\x04\x25\xa6\xe1\x21\x06\x5b\x7f\x58\x54\xd7\xaa\x7c\xaa\x91\xa6\x53\xab\x61\xa6\x3e\x4a\xe4\xfa\xd2\xd4\x6a\xf8\x86\x52\x4e\x14\x3e\xa9\x0a\xac\x86\x6f\x18\xaf\xb2\x23\x40\xc9\x6a\x62\x31\x94\xfd\x7c\x1a\xd9\x28\x07\xcf\x3b\x20\x1e\x6e\xf1\x22\x65\x88\x42\x4f\x02\x66\x36\x0e\x81\xce\xdf\xa9\x18\x69\x84\x8b\x4c\x28\x4b\x72\x88\xe7\x2c\x18\x26\xa1\x27\xb0\xec\x91\x8c\x8a\xc9\xaf\x56\x69\x96\x31\xc4\x24\x2f\xd2\x50\x64\x38\x83\xb1\x4b\xc1\x47\x52\x65\xe5\xd7\xc9\x95\xa7\xa1\xd8\xe5\x61\x9e\x08\x3e\xd5\x92\xa5\xe7\x9c\x48\xa7\xde\x7d\xe1\x2c\xdc\xa7\x47\xd9\x78\x8f\x0c\x08\x2b\xdb\x83\xd2\x15\x00\xd2\xea\x89\xd1\xe0\x95\xbd\x6f\xf5\x44\xed\xed\x2a\x99\xf7\x44\xbd\xfa\xc6\xc8\x9e\xfb\x71\xb7\xee\xc7\xb6\x4a\xb9\x8d\x83\x57\xcc\x2b\xd8\x5e\x0b\x89\xb4\x7b\x28\x76\x8e\xe9\xd6\x7f\x52\xec\x79\x3d\x77\xe1\xf5\x6c\x58\x4e\xcf\x9f\xd9\x1d\x74\xd5\xa0\x7c\xb1\x67\xbd\xf6\xf6\x3e\xbe\x25\x7f\xf1\x0a\xb2\x98\x7a\x8b\x80\x72\xd8\xf2\xdf\x32\xf6\xb2\x1e\xb3\x08\x01\xf8\x96\xda\xb1\xf0\xff\x94\x05\xff\xc6\x33\xbf\x7b\xe4\x2d\x20\xb4\x89\x8e\x04\x0c\x15\xa1\x7d\xd6\xee\x9f\x22\xdb\x95\xf6\xb0\xb9\x8e\x4b\x5a\x3e\x64\xd9\xa1\xa5\xdc\x2d\x27\xac\xf6\x28\x56\xdd\xf2\x86\x37\x44\xb6\xea\xca\xac\x5f\x3f\xc0\x7e\xdb\x63\x87\xd4\x24\x46\xdc\xa2\xf7\x7b\xc1\x1b\x96\xec\x2b\xb4\xff\xaa\xfc\x62\xf9\xba\x42\xb8\x06\x73\x63\x26\x2f\xbf\x97\xdb\xee\x01\xe5\x49\x80\xd2\x16\x44\x7d\x94\x5b\xee\xa4\xbe\xc6\xe8\xb7\x79\xec\x80\x16\x1b\x7e\x27\x78\xc8\xe0\x61\x39\x62\x22\xb3\x60\xb2\xc0\x95\x55\x6e\x91\x7a\x6e\x26\x76\xf2\xc7\x42\xf8\xa9\xec\xc6\x03\x1a\xf2\xa4\xc7\xb0\x33\xfd\x27\xbc\xe0\x9d\xde\xfd\xf2\x4f\xb7\x09\x78\x49\x41\x52\xe2\x3e\x39\x73\xf6\x2c\xca\x0d\x7a\x85\x94\x33\x78\xee\x9c\x51\x88\x1a\x4b\x42\xbe\x81\x92\x46\xd4\x91\xd5\xf8\x0b\xe4\x97\x54\x39\x54\xde\xa3\xe9\xb0\x34\x07\x35\xfb\xff\x7a\xec\x40\xd8\x8a\x80\x6a\x2a\xf7\xbf\xeb\x05\x7f\xec\xcd\x2c\xce\xe3\xcf\x5e\x84\x09\xea\xa6\xb2\x2f\x91\x21\xb6\xb0\x20\x3f\xf2\x2a\xe7\xf3\x0d\x43\x81\x3a\xb3\x38\x8f\x0b\x10\xb7\x7a\x5b\x29\x4f\x3a\x0a\x0f\xc3\xc0\x9c\xa8\x33\x87\xa5\x19\xeb\x14\xb9\xba\x35\x8e\x51\x02\xcf\xeb\xda\x0d\x76\xe6\xf6\xb3\xe6\xf7\x87\xd8\x8b\x92\x34\x51\xbd\x7e\x61\xe9\x6c\xee\xff\xf6\x50\xf0\xd8\xd0\x82\x7b\xd1\x01\x56\x02\x94\xaa\x30\xc6\x73\x22\x0e\x23\xa6\xf0\xd0\xce\x8b\xc1\xe1\x60\x39\xc6\x29\x75\x24\xb7\xad\x65\xb8\xa8\xc1\xd0\xa0\x80\x2b\xda\x71\x3c\xc1\x1b\x11\x24\xa1\x15\xa2\xe5\x28\xca\xcb\x51\x52\xc3\xa8\x2d\xbd\x9d\x40\x8b\x30\x96\xcb\x4a\x62\x55\xd1\xee\xda\x7b\x86\xaf\x68\x81\x99\x6c\x35\x26\x07\x88\x91\x6b\xb9\x7d\x74\x84\x2d\x27\xec\x21\xf6\xaa\x1c\xd2\x36\x40\x17\x54\x3c\x40\x7a\xd2\x2e\xce\x5b\xe3\x70\x38\x6f\xd7\xe4\x39\x9b\x07\xad\xb4\x9e\x07\x52\xf6\x04\x68\x44\xc8\x03\x72\x9c\xbb\xdf\x40\xe7\x30\xf3\x9c\xdc\x59\x82\xf1\x09\x0e\x1d\x04\xfb\x59\x5a\xac\x0f\x1a\xbe\x2f\xb8\xaa\x9d\x4b\x81\x07\xcc\x10\xe9\x25\x91\x50\x9c\xb4\xff\x2b\xa3\xc1\xb4\x7d\xc1\x3a\xf3\xf1\x42\x5e\xa7\xe8\x69\x97\xce\xd7\x62\xbd\xeb\x0a\x94\xdb\x53\xeb\xf6\xa2\xca\x5e\x48\xfd\xea\x39\x4d\xd5\x3c\x4d\xe1\x5c\x03\xe2\xed\xb7\x59\x61\xf2\xf8\xc5\xe6\x75\xe0\xe8\x80\x16\x6d\x57\x11\x54\xf0\xac\xb7\xb6\xbd\x5a\x37\xe7\x9f\x52\x10\xe2\x76\x0d\xfd\xb0\xc3\x4b\xef\x55\x94\x09\x5f\x1c\x75\x88\x12\xfb\xb6\xce\xa4\x7e\xfa\xef\x1f\x0d\xe6\xba\xae\xaa\x9d\x32\x13\x79\x3b\x36\x5c\x39\x20\x5c\x4a\x04\x9d\xca\x64\xe6\x72\x13\xee\x67\x9b\x44\xa3\x99\x0e\x38\xae\x74\xb7\xef\x42\x2e\xb2\xf9\xa4\x91\x06\xb7\xca\xbf\x54\x33\xd4\xd5\x9e\xe0\x5b\x9a\x66\x0c\x9a\x57\x65\xef\x1d\x66\x07\xc2\x76\x1d\xd2\xb8\x73\xff\xad\xc3\xc1\x5f\xee\x9b\x51\x3f\x71\xff\xa2\x5f\xdc\x82\x9b\xe3\xb5\xf5\x34\x17\x3a\xe7\xd5\x6a\x18\xf8\x42\x14\x9b\x4e\xda\x6c\x85\x45\x24\x37\x21\x78\xfd\x6a\x4a\x6d\xb0\x3a\x10\x43\x1d\xa0\x29\x52\x2b\x34\xef\x40\x0d\xb3\xe3\x5c\x49\x8c\xa1\x2d\x37\xa7\xf1\x52\x8d\x90\x28\x1c\x9a\x4f\xa0\x50\x0a\x78\x07\xe4\xb5\xd3\x9d\x2a\x9f\x51\x8b\xbf\xbb\x12\xd8\xe4\xe0\x43\x34\x3b\x21\xf8\x37\x4c\xbd\xb8\xdf\x92\x78\x86\xbc\xb8\x50\xd9\x96\x42\xfb\xcb\x7b\x74\x1f\x04\x53\xa9\x1c\x7d\x15\x0f\x86\x7e\x90\x72\xfd\x45\xca\x01\x16\x55\x18\x6d\xda\x6e\x25\x25\x1b\x47\xe6\xb3\x78\xb8\x19\x66\x82\xf0\x97\xec\xa2\xf8\x42\xb4\x9a\x91\xef\xd7\x79\xa3\x82\xe9\x02\x57\x9d\xbe\xa7\xc7\x15\x63\xd1\x82\x22\x6b\x8b\x60\xc2\x9a\xde\x9a\x64\xd2\xb6\x15\xeb\x3a\xa9\x63\xad\xf8\x4c\x2b\x11\x7d\xc0\x0e\xbf\xc6\x0e\x39\xef\xf6\xef\x0f\xee\x9b\x71\x1b\xd3\xc5\xe1\x4e\x0d\xda\x0c\xbb\x51\x0c\x43\xe2\x8a\x94\x2b\xac\xea\x40\x1a\xdd\xc7\x86\xc1\x47\xeb\xdf\x15\x1c\x3b\x0d\xce\xda\xbe\x15\x03\x71\x64\x32\x06\xdb\x41\x6d\x5d\xd4\x2e\xb9\xf4\xb2\x5f\xde\xc7\x6e\xd9\x61\x6e\xbb\x9d\xd5\x2f\x67\xab\xff\xde\x7d\xc1\x6f\x79\x03\x0a\xe0\xd6\x88\xa7\xc5\x96\x3d\xed\x49\xdd\xd4\x76\x78\x34\xb7\xc5\x1d\xa5\x3d\xeb\x53\x8d\xdd\x9a\x19\x03\xc4\x29\x17\x87\xa5\xf3\xf6\x2b\xd6\x34\x31\x20\x6e\xee\x6c\x85\x3d\xe1\xb1\x97\x58\xaa\xb4\x79\xc8\xff\x25\x8f\xdd\xb5\xf3\x5c\xfb\x85\x5e\x75\x04\xe7\x7a\x5e\xb6\xce\xcd\xf6\x99\x18\xd5\x38\x47\xdd\x74\xfb\x47\x36\xd6\xcf\xba\x5b\xfa\xcb\x5e\x7f\x02\xc5\xee\x96\xf6\x68\xe6\xf2\x76\x1d\x38\xa8\xc1\xfd\x1a\xfb\xb9\x83\x3d\x43\x8b\x71\xeb\x5e\x52\x3a\xbc\xff\x91\x83\xc1\x43\xa5\x6b\xe6\x5c\x26\x92\xb4\xbd\xb6\xee\x1a\x1e\x52\xc0\xd0\xe8\xa4\x6d\x1e\x81\x87\x04\xe0\xbf\x9a\x69\x3d\x6a\x74\x8c\x67\x29\x13\xca\xe2\xe2\xee\x54\x9f\xba\x8e\xfd\x85\xc7\x5e\xa4\xda\xac\xf4\xdf\xaf\x7a\xc1\xe7\x3c\x65\x86\xe5\xa5\xbb\xb6\x3f\x2a\xb2\x8e\x1f\xe8\x69\xaa\x8b\x09\x1e\x41\x28\xea\x35\x57\xa7\x6a\x69\x82\xde\xd4\x5a\x67\x92\x52\xd9\x26\xc3\xa4\x2e\xff\x06\xa8\xec\xa4\xd6\xb1\x57\xf4\xa3\x04\xf5\xfa\x86\xa0\xe9\x00\xbd\x5e\x4b\x10\xb7\x32\x8c\xeb\x2d\xce\x31\x63\x3c\xf8\x29\x29\x3f\x15\x62\x76\xb9\x45\xf6\x83\x5f\xdb\xc7\x0e\x80\x44\x07\xd7\xe8\x17\xf6\x05\xbf\xbb\x6f\xbe\x41\x63\x1b\x21\x98\x64\xc8\x5b\x91\xa8\x09\xe7\xe0\x21\xe7\x44\x21\xc2\x3a\x5d\x94\x9d\x97\x29\x10\x22\x3a\x68\x92\x86\x6e\x0e\x26\x60\x9c\x52\xb9\xe1\xaf\x5a\x3e\xbf\x30\x75\x6f\xaa\xe2\x18\x71\x4a\x43\xf6\x5c\x53\x24\xc5\x04\x57\xa7\x3e\x0a\xf5\x5f\x86\xbc\xba\x66\x98\x44\x0d\x29\xc0\x4c\x0e\xe8\x83\xc7\x1e\xaa\xf2\x7b\xd2\x8c\x8b\xcb\x61\xb3\x15\xe3\x24\x29\x34\x20\x92\x3b\x8b\xe0\x63\xf4\xb3\x06\x17\xa7\x95\xaa\xd3\xf1\x26\x34\x16\xe0\x38\x53\x6a\x6c\x5b\xf0\x38\xba\x24\xa6\x79\x00\x3b\xbb\x79\xf5\x1b\xe5\x58\xbc\x29\x00\x48\xd2\x4c\xf0\x40\xfe\x0c\x94\x93\x8b\x4e\x5b\xb6\x4d\xc4\xbc\x18\x77\x8c\x2c\x5a\x5b\x03\x8a\x60\x38\x6b\xc8\x09\x08\xe7\xe0\xa8\xc1\x93\xd4\x2a\xac\xc0\x1b\x4d\x24\x78\xb9\x21\x0f\x1e\x7b\x28\xe0\x87\xdd\xef\x92\xfb\x93\xb8\xcc\x8f\x69\x03\x50\x2b\xad\x8f\x53\x00\x6d\xde\x49\x8a\xf0\xb2\xac\x93\xd4\x33\x30\x05\x14\x29\x9a\x26\x00\x6d\x76\x53\xc4\xb1\x4e\xe3\xdd\x0c\x3b\x68\x1b\xc3\xae\x04\x5e\x65\xe3\x7e\x4d\x1c\x01\x60\xe1\xbf\x82\x02\x0e\xf8\xaf\x70\x6c\xdb\xc9\xba\x78\x1e\x0e\x45\xff\x40\x40\x17\xdf\xe5\xb1\x03\xda\x36\xe3\xbf\xd9\x0b\xf2\x05\x03\x37\x76\xed\x5b\x04\x91\xbf\x53\x0e\x04\xc0\x01\x07\x63\x83\x8c\xd5\x18\x68\xb0\x94\xc6\x02\xdc\x5f\xff\x6b\x34\x78\xb7\xa7\x7e\xf5\xf2\x78\xa1\xe5\x68\xae\x8b\xc5\xed\x16\xf9\x47\x23\xdc\x48\x31\x9b\xb2\x9f\x81\x7d\x6a\xe3\x26\xae\xaa\x47\x9b\x04\x81\x86\x02\xb2\xad\xc8\x50\xb9\x80\x94\x4e\xac\xf8\xd8\xb1\xde\x3c\x32\x9f\xdf\x33\xbd\xec\xda\xf4\xf2\x3a\xe5\x51\xbb\x10\xfc\x44\x0f\x8f\x1a\x0c\xf5\xce\xe1\x4f\xbb\x26\xd4\x9e\x69\xe7\x1f\x83\xeb\x2c\xde\xde\xc6\x32\xef\xdf\x7b\x85\x00\x80\xfd\x38\x2b\xd9\xe3\x8c\xcd\xdb\x3a\x75\x9d\x00\xda\x32\xb1\x16\x41\x36\x3b\xea\xd6\x38\x89\xce\xb5\xe5\xb2\x4c\xd6\x1e\x10\xab\xeb\x69\x7a\x69\xd6\x86\x8d\x00\x69\xf5\xf5\x03\xc1\x99\xed\x0a\xb9\x73\x7a\x50\xe9\xde\x82\xe6\x2f\xf7\x08\xab\x76\x2d\x68\xde\xa7\x09\xab\x1e\xf3\x82\x7f\x7e\x76\x27\x63\x61\xcb\x9d\x07\xd8\x05\xb6\xdc\xff\x5c\x76\xd5\x73\x68\x4f\x40\xed\x11\x53\xd9\xc4\x54\xbf\xe8\x6d\x2f\x0b\xeb\xfe\xaa\x01\x3e\xeb\x39\xf3\x4a\xe2\x70\x3b\xf9\xd4\x53\x4c\x7e\xe2\x7a\x76\xfb\x8e\xa7\xf8\x52\x3b\x16\x0f\x44\xc5\xfa\x79\x4d\x20\xe1\xff\xef\x43\xc1\x5b\xbc\xee\xeb\x38\xbb\x8b\x76\x2b\x06\x1d\xd4\xba\x21\x25\x87\x49\x8b\xe2\xf3\x05\x9a\x26\x65\xff\x6a\xfe\xc1\xa6\x3c\x34\x19\x13\xa4\x22\x0b\xc6\xea\xc4\xe5\x56\x98\xe4\x3a\x8e\x0b\xce\x80\xae\xc5\xe0\xe9\x83\xec\xf7\x3d\x76\x9d\x11\xa4\xb9\xff\x09\x2f\x78\xb7\x67\x24\xa9\x36\x9f\x5b\x67\xdb\x92\x4b\x99\xaf\x0a\xa2\x40\xa8\xf2\xb1\x23\x63\x14\x1c\xa0\x0b\x83\xb9\x93\xae\xd3\x52\x24\x90\x7b\x91\xac\x61\xd8\x2a\xd8\x58\xe3\xa8\x26\xb4\x31\x2b\x4d\x44\x95\x2f\x29\x64\xa5\x01\xc6\xc8\x5f\xaf\x30\x66\x58\x3a\xfc\xf7\x54\x82\x1f\x7a\x6e\xe7\x16\xeb\x0e\x8f\x07\x58\xe6\xd4\xd8\x71\x39\xfc\xbc\x16\x66\x22\xa7\xa8\x8d\x49\x3e\xbb\x74\x7a\x66\xe5\xf4\x04\xbf\xb0\x38\x07\xff\xcf\x9d\x3e\x7b\x5a\xfe\x3f\x7b\x7e\x61\xe1\xf4\xec\x8a\x3c\x2d\x1e\x41\xeb\x50\x1c\x63\xe3\x01\xa6\xcd\x1d\xb7\x30\xe9\xf0\x46\x1b\xd2\x1c\xcd\xcb\x9c\x56\x28\x96\xa7\x7a\x9d\xa0\xa3\xae\x59\x17\xbd\x7f\x9f\x1d\x86\xf1\xf6\x7d\xc1\xf7\x87\xae\x2e\x10\x83\x31\xeb\xcc\x3f\xcd\xc7\x5a\x69\x3d\x1f\xa3\x08\x11\x64\x3a\x84\x4b\x53\x71\xba\x36\xa6\x03\x47\x04\x8f\xd3\x35\x27\xf3\x43\xf3\x22\xca\x2f\xc6\x62\x4e\x24\xc7\x84\xf6\x35\x5b\x4f\xe9\xca\x9d\x67\xec\x02\x76\xbd\x98\x70\xe2\x94\x2c\xe7\x1e\x62\xb9\xde\x2d\x50\x7e\x89\x28\x73\x9f\x60\x6c\xbe\x21\x0f\x47\xf5\x9a\x94\x8a\xe5\xc1\x22\x3f\x03\x38\x92\xda\x31\x65\xb8\x90\x83\xc0\x54\x4d\xb8\x79\xf2\xb8\x18\x87\x2d\x34\x1b\x00\x37\x05\xc0\x7b\x55\x19\x9b\x2b\xa3\xfe\x8a\xa4\x16\xa7\xc0\xb7\xa1\x4c\x3d\xce\x67\x37\xa3\xb5\xf5\x42\x51\x3c\xe8\xec\x9f\x9d\x4c\x8c\x8f\x0e\xb1\xe1\xbc\x96\xb6\x84\xff\xc1\xa1\xe0\xb1\x21\xf8\xd3\x4a\xb5\xc5\x94\xcd\xb4\x25\x8c\xc2\x04\xf1\x2c\xf7\x83\x2d\x09\x75\x17\x98\xc3\x01\x85\x28\x04\x13\x3c\xd0\x07\xea\x7a\x80\xba\x4f\x70\x24\x30\x05\xec\x70\x22\xb0\x7f\xd4\xf0\xc6\x24\xbc\xc7\x8e\x20\x81\xce\x53\xb4\x52\xfa\xd5\xe6\xb8\x2e\xc5\x91\x0d\xa1\xe3\x56\x54\x75\x1a\xd2\xf5\x56\x13\x9f\xb1\xed\x1b\x65\xf3\xad\xc7\x31\x59\x0a\xe3\x3c\xa8\x73\x32\x2b\x06\x08\x20\x83\xad\xa1\xd1\xb4\x58\x56\x3f\x02\x0a\x72\x98\x61\x6c\xb6\xe2\xa5\xa2\xf8\x59\x70\xe3\x1c\x71\x61\x02\x9f\x74\xa2\x80\x7e\xcb\x0b\xfe\x55\x77\x14\x90\x15\xc6\xb3\x13\xd1\x8c\x45\xaf\xad\xd4\xf9\xf6\x7e\x76\xca\xb6\x90\x8b\xac\x40\x48\x11\x61\xe5\x1a\x9b\x8b\xcb\xd1\x5a\x12\x25\x6b\xe4\x2a\xd6\x78\x5c\xfe\x87\xf7\x6f\x79\xfb\xa4\x46\xe0\xec\x5b\x6f\xdb\xcf\x7e\x96\x5d\x1f\x87\x79\x71\x01\xf4\xaf\x95\xa8\x29\xfc\x47\xd8\x2d\x57\xa1\xef\xc8\x47\x83\xa9\x22\x6a\x8a\xbc\x08\x9b\x2d\x9d\x30\x06\xb1\xe4\xa8\xdd\xa1\x86\x1f\xe5\x86\x83\x9f\x2d\xb3\xfd\x4d\x91\xe7\xe1\x9a\xf0\xef\x0b\x6e\x5f\x6f\x37\x01\xda\x3e\xac\x43\x50\x0f\xdd\xc1\xb5\xad\xc9\xbb\xba\xb0\x90\xc1\xf0\x6a\x77\xd9\x6d\x6c\x04\x11\xb3\xfd\xa9\x20\x58\xcd\x22\xd1\x50\x08\xda\xaa\x51\x7d\x1f\xfd\xaa\xc1\x86\xfa\x23\x2f\xf8\xbd\x12\xbe\x93\x6e\xf7\x84\xf2\x3d\xad\x64\xf2\x6c\x72\x4f\x18\xe7\x62\x82\x5f\x48\xc0\x03\x57\xe5\x33\xad\x56\x96\x02\x5e\xfe\x9c\x48\x28\x0a\xac\xce\xef\x21\xa2\x30\x43\x13\x68\x91\x33\x06\x50\x07\x46\x16\x51\x3d\x41\x19\xf8\x32\x6b\x8b\x00\x26\x5b\x3b\xc9\x85\x14\x5f\x1a\xd0\x03\x42\xd4\x11\x3e\x83\x8a\xb9\xe4\x06\x30\xf2\xfe\xc5\x60\x59\xd1\xc9\x39\x5f\x53\xe5\x67\xc0\x73\xe8\x10\x18\x02\x71\x1a\x0f\xd4\xa7\x48\x99\x84\x1f\xa3\xe4\x11\x7e\x8e\xfb\xa6\xcf\x54\x98\x2f\x07\x7c\x25\x93\x0a\x93\xac\x0a\x66\xd4\xaf\x55\xae\x7e\x4a\x7d\xdb\xeb\xae\x50\x2d\x57\xa0\xb6\x72\x3e\x05\xa7\x5b\xa1\x4b\xab\xf0\x2f\x39\x5c\x04\x67\x68\x58\x39\xed\xbe\x04\xb8\x85\x90\x27\x62\xd3\xaa\x0c\x7a\x4b\xae\x75\xa9\x5c\x00\xac\x7e\x82\x8c\x28\x72\x13\xd1\xc5\xc6\x72\x55\x35\x18\xa5\xc3\x64\x4d\x31\x25\x90\xe7\xda\x80\x54\xae\x47\xfa\x98\xab\x40\xf7\xe4\x37\x54\xd9\x57\x87\xd9\x89\x5e\xe9\x6c\x20\x85\x35\x42\x00\x65\x95\x59\xc0\xa5\xfe\x07\x87\x83\x5f\xf2\x66\x50\x2e\xd6\x8d\x4f\x30\x37\x80\x3a\xba\xac\x0a\xf4\xa3\x3b\x6e\x70\x23\x6e\x43\x13\xb2\x00\x88\x58\x4c\xa7\x44\x15\x8c\x14\x2e\xf5\x0c\x60\xf4\x38\xfb\x1a\xd2\xa3\x25\xfa\x20\xbe\xe5\x1d\x80\x1b\x72\xef\xd8\xf2\x46\xd5\xe3\x8e\xe8\x79\x66\x88\xfd\xbc\xc7\xf4\x3d\x7f\x33\x78\x64\xc9\xc9\x3b\x95\x15\x8c\xe5\x84\x08\x94\x26\xf9\x7a\xd4\x52\xf4\x56\xa0\x64\xa9\x43\x3f\x6e\xa3\xaa\x1e\xdc\xc5\xe6\x93\x09\xbe\x90\x16\xf2\xbf\xd3\xc0\x60\x33\xc1\xe7\x52\x91\x2f\xa4\x05\xfc\x74\xe6\xec\x39\x66\x1a\xeb\xdf\x1d\x1c\x5f\x29\x79\x3e\xf0\x13\xb5\x9f\xdc\xa0\x87\xd8\x1c\x15\xa6\xba\xb7\x57\xd8\x08\x36\xcd\xff\x5b\x2f\x78\xc6\x9b\x49\x28\xbb\x38\x6d\x38\x67\xef\x5c\xf3\x6d\xe8\xde\x8d\x72\x79\xfa\x4d\x33\xd5\x74\x9d\x6f\xab\xe8\x0c\xd5\xe6\x91\xa4\xc9\x24\xc4\x37\xf4\xac\x03\xbf\x58\xd6\x63\x7f\xf3\x80\xea\xa8\x2a\x70\xaf\x10\x6f\x22\x82\x03\x86\x16\xe9\x01\x58\x0e\xc2\x42\xac\x45\x35\x64\xc1\xe7\x80\xd3\x38\x68\xeb\xfa\xc1\x3e\xb6\xb0\x3b\x10\x8d\x65\x0c\x54\x34\x8e\xe0\xad\x7d\xc1\xed\xe5\x8b\x14\x78\x1e\x5a\xee\xb2\x22\xe5\x54\xaa\x1a\x8b\xb5\xb0\xd6\xa1\x93\xeb\x96\x67\xfc\x16\x5b\x1e\xb8\x52\x9c\x49\xf9\x1f\x87\xd8\x9d\xe4\x61\x39\x19\x8c\xdb\xcc\x63\xce\x8c\xa0\xaa\xf5\xfe\x5d\x9a\x4e\xc6\x35\x72\x77\x70\xdc\xa2\x53\x32\x35\x39\xae\x92\x81\xd5\xbd\x86\x52\x17\x5f\x1d\xcc\xe9\x94\x2d\x2b\x24\x5d\x67\xbe\x85\x85\x95\x0e\xb9\x89\xe7\x71\x1d\x2d\x0c\xab\xbc\x56\x94\xb2\xfe\xbe\xec\xb1\x7d\xad\x34\x2b\xfc\xcf\x6b\xa0\xd6\x4f\x79\x08\x08\xed\xbe\x44\x85\x8b\x62\xc6\xd2\xce\x5e\xc4\x2f\xca\xd2\x17\xad\x3d\xaa\x07\xb0\x36\x3f\x7c\xd3\xe4\xc9\x9b\x6f\x3e\x7e\xf3\x04\x6e\x38\x79\xb4\x21\xc6\xdd\x5d\xef\xc4\x89\xe3\xb0\x6b\xaf\x86\xb5\x4b\x9b\xf2\xb4\xa0\xa2\x8e\xa2\x38\x2a\x3a\x0e\x6c\xe5\xea\x0d\xec\x45\xec\xba\x53\x22\xcc\x44\x06\x41\x41\xfe\x3f\x79\xe4\x3c\x3b\xe7\x5e\xba\xd3\xbf\x83\x8d\x84\xad\xe8\x8c\xe8\xf8\x87\x1c\x5b\xef\x4b\x47\xd6\x45\x58\x17\x59\xf0\x93\x58\x1e\x03\x8b\x4a\xa1\x6d\xff\x3f\x00\x00\xff\xff\xee\xd7\x1d\x22\xa3\xf0\x34\x00") - -func kubernetesapiV1212SwaggerPbBytes() ([]byte, error) { - return bindataRead( - _kubernetesapiV1212SwaggerPb, - "kubernetesapi/v1212/swagger.pb", - ) -} - -func kubernetesapiV1212SwaggerPb() (*asset, error) { - bytes, err := kubernetesapiV1212SwaggerPbBytes() - if err != nil { - return nil, err - } - - info := bindataFileInfo{name: "kubernetesapi/v1212/swagger.pb", size: 3469475, mode: os.FileMode(420), modTime: time.Unix(1649964528, 0)} - a := &asset{bytes: bytes, info: info} - return a, nil -} - -// Asset loads and returns the asset for the given name. -// It returns an error if the asset could not be found or -// could not be loaded. -func Asset(name string) ([]byte, error) { - cannonicalName := strings.Replace(name, "\\", "/", -1) - if f, ok := _bindata[cannonicalName]; ok { - a, err := f() - if err != nil { - return nil, fmt.Errorf("Asset %s can't read by error: %v", name, err) - } - return a.bytes, nil - } - return nil, fmt.Errorf("Asset %s not found", name) -} - -// MustAsset is like Asset but panics when Asset would return an error. -// It simplifies safe initialization of global variables. -func MustAsset(name string) []byte { - a, err := Asset(name) - if err != nil { - panic("asset: Asset(" + name + "): " + err.Error()) - } - - return a -} - -// AssetInfo loads and returns the asset info for the given name. -// It returns an error if the asset could not be found or -// could not be loaded. -func AssetInfo(name string) (os.FileInfo, error) { - cannonicalName := strings.Replace(name, "\\", "/", -1) - if f, ok := _bindata[cannonicalName]; ok { - a, err := f() - if err != nil { - return nil, fmt.Errorf("AssetInfo %s can't read by error: %v", name, err) - } - return a.info, nil - } - return nil, fmt.Errorf("AssetInfo %s not found", name) -} - -// AssetNames returns the names of the assets. -func AssetNames() []string { - names := make([]string, 0, len(_bindata)) - for name := range _bindata { - names = append(names, name) - } - return names -} - -// _bindata is a table, holding each asset generator, mapped to its name. -var _bindata = map[string]func() (*asset, error){ - "kubernetesapi/v1212/swagger.pb": kubernetesapiV1212SwaggerPb, -} - -// AssetDir returns the file names below a certain -// directory embedded in the file by go-bindata. -// For example if you run go-bindata on data/... and data contains the -// following hierarchy: -// data/ -// foo.txt -// img/ -// a.png -// b.png -// then AssetDir("data") would return []string{"foo.txt", "img"} -// AssetDir("data/img") would return []string{"a.png", "b.png"} -// AssetDir("foo.txt") and AssetDir("notexist") would return an error -// AssetDir("") will return []string{"data"}. -func AssetDir(name string) ([]string, error) { - node := _bintree - if len(name) != 0 { - cannonicalName := strings.Replace(name, "\\", "/", -1) - pathList := strings.Split(cannonicalName, "/") - for _, p := range pathList { - node = node.Children[p] - if node == nil { - return nil, fmt.Errorf("Asset %s not found", name) - } - } - } - if node.Func != nil { - return nil, fmt.Errorf("Asset %s not found", name) - } - rv := make([]string, 0, len(node.Children)) - for childName := range node.Children { - rv = append(rv, childName) - } - return rv, nil -} - -type bintree struct { - Func func() (*asset, error) - Children map[string]*bintree -} - -var _bintree = &bintree{nil, map[string]*bintree{ - "kubernetesapi": &bintree{nil, map[string]*bintree{ - "v1212": &bintree{nil, map[string]*bintree{ - "swagger.pb": &bintree{kubernetesapiV1212SwaggerPb, map[string]*bintree{}}, - }}, - }}, -}} - -// RestoreAsset restores an asset under the given directory -func RestoreAsset(dir, name string) error { - data, err := Asset(name) - if err != nil { - return err - } - info, err := AssetInfo(name) - if err != nil { - return err - } - err = os.MkdirAll(_filePath(dir, filepath.Dir(name)), os.FileMode(0755)) - if err != nil { - return err - } - err = ioutil.WriteFile(_filePath(dir, name), data, info.Mode()) - if err != nil { - return err - } - err = os.Chtimes(_filePath(dir, name), info.ModTime(), info.ModTime()) - if err != nil { - return err - } - return nil -} - -// RestoreAssets restores an asset under the given directory recursively -func RestoreAssets(dir, name string) error { - children, err := AssetDir(name) - // File - if err != nil { - return RestoreAsset(dir, name) - } - // Dir - for _, child := range children { - err = RestoreAssets(dir, filepath.Join(name, child)) - if err != nil { - return err - } - } - return nil -} - -func _filePath(dir, name string) string { - cannonicalName := strings.Replace(name, "\\", "/", -1) - return filepath.Join(append([]string{dir}, strings.Split(cannonicalName, "/")...)...) -} diff --git a/src/vendor/sigs.k8s.io/kustomize/kyaml/openapi/kubernetesapi/v1212/swagger.pb b/src/vendor/sigs.k8s.io/kustomize/kyaml/openapi/kubernetesapi/v1212/swagger.pb deleted file mode 100644 index 2a48837bf..000000000 --- a/src/vendor/sigs.k8s.io/kustomize/kyaml/openapi/kubernetesapi/v1212/swagger.pb +++ /dev/null @@ -1,44195 +0,0 @@ - -2.0 - -Kubernetesv1.21.2BÝè¢* -Q/apis/discovery.k8s.io/v1beta1/watch/namespaces/{namespace}/endpointslices/{name}º) -discovery_v1beta1´watch changes to an object of kind EndpointSlice. deprecated: use the 'watch' parameter with a list operation instead, filtered to a single item with the 'fieldSelector' parameter.*,watchDiscoveryV1beta1NamespacedEndpointSlice2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jk -P -200I -G -OKA -? -=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent - -401 - - UnauthorizedRhttpsj -x-kubernetes-actionwatch -jb -x-kubernetes-group-version-kind?=group: discovery.k8s.io -kind: EndpointSlice -version: v1beta1 -J‚ -ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer J: -86"4pathname of the EndpointSlice"name*string˜J` -^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO -MKIquery-If 'true', then the output is pretty printed."pretty2string Jû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string JÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Jž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± -®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean ¬) -C/apis/flowcontrol.apiserver.k8s.io/v1beta1/watch/flowschemas/{name}ä(Ÿ -flowcontrolApiserver_v1beta1±watch changes to an object of kind FlowSchema. deprecated: use the 'watch' parameter with a list operation instead, filtered to a single item with the 'fieldSelector' parameter.**watchFlowcontrolApiserverV1beta1FlowSchema2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jk -P -200I -G -OKA -? -=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent - -401 - - UnauthorizedRhttpsj -x-kubernetes-actionwatch -jk -x-kubernetes-group-version-kindHFgroup: flowcontrol.apiserver.k8s.io -kind: FlowSchema -version: v1beta1 -J‚ -ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer J7 -53"1pathname of the FlowSchema"name*string˜JO -MKIquery-If 'true', then the output is pretty printed."pretty2string Jû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string JÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Jž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± -®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean Ó( --/apis/storage.k8s.io/v1/watch/csinodes/{name}¡(ß - -storage_v1®watch changes to an object of kind CSINode. deprecated: use the 'watch' parameter with a list operation instead, filtered to a single item with the 'fieldSelector' parameter.*watchStorageV1CSINode2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jk -P -200I -G -OKA -? -=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent - -401 - - UnauthorizedRhttpsj -x-kubernetes-actionwatch -jU -x-kubernetes-group-version-kind20group: storage.k8s.io -kind: CSINode -version: v1 -J‚ -ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer J4 -20".pathname of the CSINode"name*string˜JO -MKIquery-If 'true', then the output is pretty printed."pretty2string Jû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string JÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Jž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± -®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean · -;/apis/authorization.k8s.io/v1beta1/selfsubjectaccessreviews÷ -"é -authorization_v1beta1 create a SelfSubjectAccessReview*1createAuthorizationV1beta1SelfSubjectAccessReview2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*B\ -Z -Xbodybody *H -F#/definitions/io.k8s.api.authorization.v1beta1.SelfSubjectAccessReviewJµ -Y -200R -P -OKJ -H -F#/definitions/io.k8s.api.authorization.v1beta1.SelfSubjectAccessReview -^ -201W -U -CreatedJ -H -F#/definitions/io.k8s.api.authorization.v1beta1.SelfSubjectAccessReview -_ -202X -V -AcceptedJ -H -F#/definitions/io.k8s.api.authorization.v1beta1.SelfSubjectAccessReview - -401 - - UnauthorizedRhttpsj -x-kubernetes-actionpost -jp -x-kubernetes-group-version-kindMKgroup: authorization.k8s.io -kind: SelfSubjectAccessReview -version: v1beta1 -Jž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string J– -“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string JO -MKIquery-If 'true', then the output is pretty printed."pretty2string ð' -"/apis/batch/v1beta1/watch/cronjobsÉ'½ - batch_v1beta1swatch individual changes to a list of CronJob. deprecated: use the 'watch' parameter with a list operation instead.*,watchBatchV1beta1CronJobListForAllNamespaces2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jk - -401 - - Unauthorized -P -200I -G -OKA -? -=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEventRhttpsjQ -x-kubernetes-group-version-kind.,group: batch -kind: CronJob -version: v1beta1 -j# -x-kubernetes-action  -watchlist -J‚ -ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer JO -MKIquery-If 'true', then the output is pretty printed."pretty2string Jû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string JÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Jž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± -®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean ¦( -3/apis/discovery.k8s.io/v1beta1/watch/endpointslicesî'â -discovery_v1beta1ywatch individual changes to a list of EndpointSlice. deprecated: use the 'watch' parameter with a list operation instead.*6watchDiscoveryV1beta1EndpointSliceListForAllNamespaces2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jk -P -200I -G -OKA -? -=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent - -401 - - UnauthorizedRhttpsjb -x-kubernetes-group-version-kind?=group: discovery.k8s.io -kind: EndpointSlice -version: v1beta1 -j# -x-kubernetes-action  -watchlist -J‚ -ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer JO -MKIquery-If 'true', then the output is pretty printed."pretty2string Jû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string JÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Jž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± -®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean ý -/openid/v1/jwks/èå -openid\get service account issuer OpenID JSON Web Key Set (contains public token verification keys)*#getServiceAccountIssuerOpenIDKeyset2application/jwk-set+jsonJ7 - -200 - -OK - ² -string - -401 - - UnauthorizedRhttpsà' -/apis/apps/v1/watch/daemonsets½'± -apps_v1uwatch individual changes to a list of DaemonSet. deprecated: use the 'watch' parameter with a list operation instead.*(watchAppsV1DaemonSetListForAllNamespaces2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jk -P -200I -G -OKA -? -=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent - -401 - - UnauthorizedRhttpsjM -x-kubernetes-group-version-kind*(group: apps -kind: DaemonSet -version: v1 -j# -x-kubernetes-action  -watchlist -J‚ -ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer JO -MKIquery-If 'true', then the output is pretty printed."pretty2string Jû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string JÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Jž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± -®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean ˆ) -5/apis/policy/v1beta1/watch/podsecuritypolicies/{name}Î(‚ -policy_v1beta1¸watch changes to an object of kind PodSecurityPolicy. deprecated: use the 'watch' parameter with a list operation instead, filtered to a single item with the 'fieldSelector' parameter.*#watchPolicyV1beta1PodSecurityPolicy2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jk -P -200I -G -OKA -? -=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent - -401 - - UnauthorizedRhttpsj\ -x-kubernetes-group-version-kind97group: policy -kind: PodSecurityPolicy -version: v1beta1 -j -x-kubernetes-actionwatch -J‚ -ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer J> -<:"8pathname of the PodSecurityPolicy"name*string˜JO -MKIquery-If 'true', then the output is pretty printed."pretty2string Jû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string JÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Jž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± -®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean Ü+ -G/apis/policy/v1beta1/namespaces/{namespace}/poddisruptionbudgets/{name}+§ -policy_v1beta1&read the specified PodDisruptionBudget*.readPolicyV1beta1NamespacedPodDisruptionBudget2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*Ji -N -200G -E -OK? -= -;#/definitions/io.k8s.api.policy.v1beta1.PodDisruptionBudget - -401 - - UnauthorizedRhttpsj^ -x-kubernetes-group-version-kind;9version: v1beta1 -group: policy -kind: PodDisruptionBudget -j -x-kubernetes-actionget - -policy_v1beta1)replace the specified PodDisruptionBudget*1replacePolicyV1beta1NamespacedPodDisruptionBudget2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BQ -O -Mbodybody *= -;#/definitions/io.k8s.api.policy.v1beta1.PodDisruptionBudgetBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– -“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string J¾ -N -200G -E -OK? -= -;#/definitions/io.k8s.api.policy.v1beta1.PodDisruptionBudget -S -201L -J -Created? -= -;#/definitions/io.k8s.api.policy.v1beta1.PodDisruptionBudget - -401 - - UnauthorizedRhttpsj -x-kubernetes-actionput -j^ -x-kubernetes-group-version-kind;9group: policy -kind: PodDisruptionBudget -version: v1beta1 -*² -policy_v1beta1delete a PodDisruptionBudget*0deletePolicyV1beta1NamespacedPodDisruptionBudget2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BT -R -Pbodybody*B -@#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptionsBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string Bä -áÞÛquery±The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately."gracePeriodSeconds2integer BÑ -ÎËÈquery Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the "orphan" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both."orphanDependents2boolean Bˆ -…‚ÿquery×Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground."propagationPolicy2string J» -L -200E -C -OK= -; -9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status -R -202K -I -Accepted= -; -9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status - -401 - - UnauthorizedRhttpsj -x-kubernetes-action delete -j^ -x-kubernetes-group-version-kind;9group: policy -kind: PodDisruptionBudget -version: v1beta1 -B¦ -policy_v1beta12partially update the specified PodDisruptionBudget*/patchPolicyV1beta1NamespacedPodDisruptionBudget2application/json2application/yaml2#application/vnd.kubernetes.protobuf:application/json-patch+json:application/merge-patch+json:&application/strategic-merge-patch+json:application/apply-patch+yamlBN -L -Jbodybody *: -8#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.PatchBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B¯ -¬©¦queryƒfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch)." fieldManager2string BÎ -ËÈÅquery¨Force is going to "force" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests."force2boolean Ji -N -200G -E -OK? -= -;#/definitions/io.k8s.api.policy.v1beta1.PodDisruptionBudget - -401 - - UnauthorizedRhttpsj -x-kubernetes-actionpatch -j^ -x-kubernetes-group-version-kind;9group: policy -kind: PodDisruptionBudget -version: v1beta1 -J@ -><":pathname of the PodDisruptionBudget"name*string˜J` -^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO -MKIquery-If 'true', then the output is pretty printed."pretty2string Ç' --/apis/discovery.k8s.io/v1beta1/endpointslices•'‰ -discovery_v1beta1+list or watch objects of kind EndpointSlice*1listDiscoveryV1beta1EndpointSliceForAllNamespaces2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jj -O -200H -F -OK@ -> -<#/definitions/io.k8s.api.discovery.v1beta1.EndpointSliceList - -401 - - UnauthorizedRhttpsj -x-kubernetes-actionlist -jb -x-kubernetes-group-version-kind?=kind: EndpointSlice -version: v1beta1 -group: discovery.k8s.io -J‚ -ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer JO -MKIquery-If 'true', then the output is pretty printed."pretty2string Jû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string JÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Jž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± -®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean ¯' -)/apis/networking.k8s.io/v1beta1/ingresses'õ -networking_v1beta1%list or watch objects of kind Ingress*,listNetworkingV1beta1IngressForAllNamespaces2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Je - -401 - - Unauthorized -J -200C -A -OK; -9 -7#/definitions/io.k8s.api.networking.v1beta1.IngressListRhttpsj -x-kubernetes-actionlist -j] -x-kubernetes-group-version-kind:8group: networking.k8s.io -kind: Ingress -version: v1beta1 -J‚ -ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer JO -MKIquery-If 'true', then the output is pretty printed."pretty2string Jû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string JÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Jž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± -®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean Õ+ -B/apis/rbac.authorization.k8s.io/v1beta1/clusterrolebindings/{name}Ž+À -rbacAuthorization_v1beta1%read the specified ClusterRoleBinding*.readRbacAuthorizationV1beta1ClusterRoleBinding2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*Jf -K -200D -B -OK< -: -8#/definitions/io.k8s.api.rbac.v1beta1.ClusterRoleBinding - -401 - - UnauthorizedRhttpsj -x-kubernetes-actionget -jp -x-kubernetes-group-version-kindMKkind: ClusterRoleBinding -version: v1beta1 -group: rbac.authorization.k8s.io -£ -rbacAuthorization_v1beta1(replace the specified ClusterRoleBinding*1replaceRbacAuthorizationV1beta1ClusterRoleBinding2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BN -L -Jbodybody *: -8#/definitions/io.k8s.api.rbac.v1beta1.ClusterRoleBindingBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– -“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string J¸ -K -200D -B -OK< -: -8#/definitions/io.k8s.api.rbac.v1beta1.ClusterRoleBinding -P -201I -G -Created< -: -8#/definitions/io.k8s.api.rbac.v1beta1.ClusterRoleBinding - -401 - - UnauthorizedRhttpsj -x-kubernetes-actionput -jp -x-kubernetes-group-version-kindMKkind: ClusterRoleBinding -version: v1beta1 -group: rbac.authorization.k8s.io -*Î -rbacAuthorization_v1beta1delete a ClusterRoleBinding*0deleteRbacAuthorizationV1beta1ClusterRoleBinding2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BT -R -Pbodybody*B -@#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptionsBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string Bä -áÞÛquery±The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately."gracePeriodSeconds2integer BÑ -ÎËÈquery Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the "orphan" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both."orphanDependents2boolean Bˆ -…‚ÿquery×Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground."propagationPolicy2string J» -L -200E -C -OK= -; -9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status -R -202K -I -Accepted= -; -9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status - -401 - - UnauthorizedRhttpsj -x-kubernetes-action delete -jp -x-kubernetes-group-version-kindMKgroup: rbac.authorization.k8s.io -kind: ClusterRoleBinding -version: v1beta1 -B¿ -rbacAuthorization_v1beta11partially update the specified ClusterRoleBinding*/patchRbacAuthorizationV1beta1ClusterRoleBinding2application/json2application/yaml2#application/vnd.kubernetes.protobuf:application/json-patch+json:application/merge-patch+json:&application/strategic-merge-patch+json:application/apply-patch+yamlBN -L -Jbodybody *: -8#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.PatchBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B¯ -¬©¦queryƒfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch)." fieldManager2string BÎ -ËÈÅquery¨Force is going to "force" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests."force2boolean Jf -K -200D -B -OK< -: -8#/definitions/io.k8s.api.rbac.v1beta1.ClusterRoleBinding - -401 - - UnauthorizedRhttpsj -x-kubernetes-actionpatch -jp -x-kubernetes-group-version-kindMKgroup: rbac.authorization.k8s.io -kind: ClusterRoleBinding -version: v1beta1 -J? -=;"9pathname of the ClusterRoleBinding"name*string˜JO -MKIquery-If 'true', then the output is pretty printed."pretty2string ì -/apis/batch/v1beta1/ÓÐ - batch_v1beta1get available resources*getBatchV1beta1APIResources2application/json2application/yaml2#application/vnd.kubernetes.protobuf:application/json:application/yaml:#application/vnd.kubernetes.protobufJp -U -200N -L -OKF -D -B#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.APIResourceList - -401 - - UnauthorizedRhttps³) -7/apis/apps/v1/namespaces/{namespace}/replicasets/{name}÷(ç -apps_v1read the specified ReplicaSet*readAppsV1NamespacedReplicaSet2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*JY -> -2007 -5 -OK/ -- -+#/definitions/io.k8s.api.apps.v1.ReplicaSet - -401 - - UnauthorizedRhttpsj -x-kubernetes-actionget -jN -x-kubernetes-group-version-kind+)group: apps -kind: ReplicaSet -version: v1 -° -apps_v1 replace the specified ReplicaSet*!replaceAppsV1NamespacedReplicaSet2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BA -? -=bodybody *- -+#/definitions/io.k8s.api.apps.v1.ReplicaSetBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– -“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string Jž - -401 - - Unauthorized -> -2007 -5 -OK/ -- -+#/definitions/io.k8s.api.apps.v1.ReplicaSet -C -201< -: -Created/ -- -+#/definitions/io.k8s.api.apps.v1.ReplicaSetRhttpsjN -x-kubernetes-group-version-kind+)group: apps -kind: ReplicaSet -version: v1 -j -x-kubernetes-actionput -*‚ -apps_v1delete a ReplicaSet* deleteAppsV1NamespacedReplicaSet2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BT -R -Pbodybody*B -@#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptionsBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string Bä -áÞÛquery±The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately."gracePeriodSeconds2integer BÑ -ÎËÈquery Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the "orphan" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both."orphanDependents2boolean Bˆ -…‚ÿquery×Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground."propagationPolicy2string J» - -401 - - Unauthorized -L -200E -C -OK= -; -9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status -R -202K -I -Accepted= -; -9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.StatusRhttpsjN -x-kubernetes-group-version-kind+)group: apps -kind: ReplicaSet -version: v1 -j -x-kubernetes-action delete -Bæ -apps_v1)partially update the specified ReplicaSet*patchAppsV1NamespacedReplicaSet2application/json2application/yaml2#application/vnd.kubernetes.protobuf:application/json-patch+json:application/merge-patch+json:&application/strategic-merge-patch+json:application/apply-patch+yamlBN -L -Jbodybody *: -8#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.PatchBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B¯ -¬©¦queryƒfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch)." fieldManager2string BÎ -ËÈÅquery¨Force is going to "force" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests."force2boolean JY -> -2007 -5 -OK/ -- -+#/definitions/io.k8s.api.apps.v1.ReplicaSet - -401 - - UnauthorizedRhttpsjN -x-kubernetes-group-version-kind+)group: apps -kind: ReplicaSet -version: v1 -j -x-kubernetes-actionpatch -J7 -53"1pathname of the ReplicaSet"name*string˜J` -^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO -MKIquery-If 'true', then the output is pretty printed."pretty2string ¼* -V/apis/autoscaling/v2beta2/watch/namespaces/{namespace}/horizontalpodautoscalers/{name}á)­ -autoscaling_v2beta2¾watch changes to an object of kind HorizontalPodAutoscaler. deprecated: use the 'watch' parameter with a list operation instead, filtered to a single item with the 'fieldSelector' parameter.*8watchAutoscalingV2beta2NamespacedHorizontalPodAutoscaler2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jk -P -200I -G -OKA -? -=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent - -401 - - UnauthorizedRhttpsj -x-kubernetes-actionwatch -jg -x-kubernetes-group-version-kindDBversion: v2beta2 -group: autoscaling -kind: HorizontalPodAutoscaler -J‚ -ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer JD -B@">path#name of the HorizontalPodAutoscaler"name*string˜J` -^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO -MKIquery-If 'true', then the output is pretty printed."pretty2string Jû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string JÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Jž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± -®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean õ( -5/apis/node.k8s.io/v1beta1/watch/runtimeclasses/{name}»(ô - node_v1beta1³watch changes to an object of kind RuntimeClass. deprecated: use the 'watch' parameter with a list operation instead, filtered to a single item with the 'fieldSelector' parameter.*watchNodeV1beta1RuntimeClass2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jk -P -200I -G -OKA -? -=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent - -401 - - UnauthorizedRhttpsj -x-kubernetes-actionwatch -j\ -x-kubernetes-group-version-kind97kind: RuntimeClass -version: v1beta1 -group: node.k8s.io -J‚ -ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer J9 -75"3pathname of the RuntimeClass"name*string˜JO -MKIquery-If 'true', then the output is pretty printed."pretty2string Jû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string JÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Jž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± -®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean –) -=/apis/apiregistration.k8s.io/v1beta1/watch/apiservices/{name}Ô( -apiregistration_v1beta1±watch changes to an object of kind APIService. deprecated: use the 'watch' parameter with a list operation instead, filtered to a single item with the 'fieldSelector' parameter.*%watchApiregistrationV1beta1APIService2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jk -P -200I -G -OKA -? -=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent - -401 - - UnauthorizedRhttpsje -x-kubernetes-group-version-kindB@group: apiregistration.k8s.io -version: v1beta1 -kind: APIService -j -x-kubernetes-actionwatch -J‚ -ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer J7 -53"1pathname of the APIService"name*string˜JO -MKIquery-If 'true', then the output is pretty printed."pretty2string Jû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string JÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Jž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± -®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean –_ -@/apis/policy/v1beta1/namespaces/{namespace}/poddisruptionbudgetsÑ^À& -policy_v1beta11list or watch objects of kind PodDisruptionBudget*.listPolicyV1beta1NamespacedPodDisruptionBudget2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*B‚ -ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Bï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string B‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string B‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Bú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer Bû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string BÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Bž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer B± -®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean Jm - -401 - - Unauthorized -R -200K -I -OKC -A -?#/definitions/io.k8s.api.policy.v1beta1.PodDisruptionBudgetListRhttpsj^ -x-kubernetes-group-version-kind;9group: policy -kind: PodDisruptionBudget -version: v1beta1 -j -x-kubernetes-actionlist -"Ù -policy_v1beta1create a PodDisruptionBudget*0createPolicyV1beta1NamespacedPodDisruptionBudget2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BQ -O -Mbodybody *= -;#/definitions/io.k8s.api.policy.v1beta1.PodDisruptionBudgetBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– -“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string J” - -401 - - Unauthorized -N -200G -E -OK? -= -;#/definitions/io.k8s.api.policy.v1beta1.PodDisruptionBudget -S -201L -J -Created? -= -;#/definitions/io.k8s.api.policy.v1beta1.PodDisruptionBudget -T -202M -K -Accepted? -= -;#/definitions/io.k8s.api.policy.v1beta1.PodDisruptionBudgetRhttpsj -x-kubernetes-actionpost -j^ -x-kubernetes-group-version-kind;9group: policy -kind: PodDisruptionBudget -version: v1beta1 -*ü, -policy_v1beta1(delete collection of PodDisruptionBudget*:deletePolicyV1beta1CollectionNamespacedPodDisruptionBudget2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BT -R -Pbodybody*B -@#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptionsBï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string Bž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string Bä -áÞÛquery±The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately."gracePeriodSeconds2integer B‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Bú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer BÑ -ÎËÈquery Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the "orphan" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both."orphanDependents2boolean Bˆ -…‚ÿquery×Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground."propagationPolicy2string Bû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string BÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Bž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer Jg -L -200E -C -OK= -; -9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status - -401 - - UnauthorizedRhttpsj* -x-kubernetes-actiondeletecollection -j^ -x-kubernetes-group-version-kind;9kind: PodDisruptionBudget -version: v1beta1 -group: policy -J` -^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO -MKIquery-If 'true', then the output is pretty printed."pretty2string Û( -7/apis/apps/v1/watch/namespaces/{namespace}/statefulsetsŸ(± -apps_v1wwatch individual changes to a list of StatefulSet. deprecated: use the 'watch' parameter with a list operation instead.*$watchAppsV1NamespacedStatefulSetList2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jk -P -200I -G -OKA -? -=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent - -401 - - UnauthorizedRhttpsjO -x-kubernetes-group-version-kind,*group: apps -kind: StatefulSet -version: v1 -j# -x-kubernetes-action  -watchlist -J‚ -ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer J` -^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO -MKIquery-If 'true', then the output is pretty printed."pretty2string Jû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string JÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Jž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± -®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean µ] -1/apis/apps/v1/namespaces/{namespace}/statefulsetsÿ\„& -apps_v1)list or watch objects of kind StatefulSet*listAppsV1NamespacedStatefulSet2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*B‚ -ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Bï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string B‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string B‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Bú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer Bû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string BÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Bž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer B± -®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean J^ -C -200< -: -OK4 -2 -0#/definitions/io.k8s.api.apps.v1.StatefulSetList - -401 - - UnauthorizedRhttpsj -x-kubernetes-actionlist -jO -x-kubernetes-group-version-kind,*group: apps -kind: StatefulSet -version: v1 -"ð -apps_v1create a StatefulSet*!createAppsV1NamespacedStatefulSet2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BB -@ ->bodybody *. -,#/definitions/io.k8s.api.apps.v1.StatefulSetBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– -“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string Jç -? -2008 -6 -OK0 -. -,#/definitions/io.k8s.api.apps.v1.StatefulSet -D -201= -; -Created0 -. -,#/definitions/io.k8s.api.apps.v1.StatefulSet -E -202> -< -Accepted0 -. -,#/definitions/io.k8s.api.apps.v1.StatefulSet - -401 - - UnauthorizedRhttpsj -x-kubernetes-actionpost -jO -x-kubernetes-group-version-kind,*version: v1 -group: apps -kind: StatefulSet -*Ï, -apps_v1 delete collection of StatefulSet*+deleteAppsV1CollectionNamespacedStatefulSet2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BT -R -Pbodybody*B -@#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptionsBï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string Bž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string Bä -áÞÛquery±The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately."gracePeriodSeconds2integer B‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Bú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer BÑ -ÎËÈquery Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the "orphan" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both."orphanDependents2boolean Bˆ -…‚ÿquery×Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground."propagationPolicy2string Bû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string BÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Bž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer Jg -L -200E -C -OK= -; -9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status - -401 - - UnauthorizedRhttpsj* -x-kubernetes-actiondeletecollection -jO -x-kubernetes-group-version-kind,*group: apps -kind: StatefulSet -version: v1 -J` -^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO -MKIquery-If 'true', then the output is pretty printed."pretty2string ï -/apis/authentication.k8s.io/ÎË -authenticationget information of a group*getAuthenticationAPIGroup2application/json2application/yaml2#application/vnd.kubernetes.protobuf:application/json:application/yaml:#application/vnd.kubernetes.protobufJi -N -200G -E -OK? -= -;#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.APIGroup - -401 - - UnauthorizedRhttps–' -$/apis/networking.k8s.io/v1/ingressesí&á - networking_v1%list or watch objects of kind Ingress*'listNetworkingV1IngressForAllNamespaces2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*J` -E -200> -< -OK6 -4 -2#/definitions/io.k8s.api.networking.v1.IngressList - -401 - - UnauthorizedRhttpsjX -x-kubernetes-group-version-kind53group: networking.k8s.io -kind: Ingress -version: v1 -j -x-kubernetes-actionlist -J‚ -ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer JO -MKIquery-If 'true', then the output is pretty printed."pretty2string Jû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string JÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Jž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± -®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean ð] -./apis/networking.k8s.io/v1beta1/ingressclasses½]±& -networking_v1beta1*list or watch objects of kind IngressClass*!listNetworkingV1beta1IngressClass2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*B‚ -ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Bï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string B‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string B‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Bú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer Bû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string BÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Bž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer B± -®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean Jj -O -200H -F -OK@ -> -<#/definitions/io.k8s.api.networking.v1beta1.IngressClassList - -401 - - UnauthorizedRhttpsj -x-kubernetes-actionlist -jb -x-kubernetes-group-version-kind?=group: networking.k8s.io -kind: IngressClass -version: v1beta1 -" -networking_v1beta1create an IngressClass*#createNetworkingV1beta1IngressClass2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BN -L -Jbodybody *: -8#/definitions/io.k8s.api.networking.v1beta1.IngressClassBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– -“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string J‹ -Q -202J -H -Accepted< -: -8#/definitions/io.k8s.api.networking.v1beta1.IngressClass - -401 - - Unauthorized -K -200D -B -OK< -: -8#/definitions/io.k8s.api.networking.v1beta1.IngressClass -P -201I -G -Created< -: -8#/definitions/io.k8s.api.networking.v1beta1.IngressClassRhttpsj -x-kubernetes-actionpost -jb -x-kubernetes-group-version-kind?=group: networking.k8s.io -kind: IngressClass -version: v1beta1 -*ð, -networking_v1beta1!delete collection of IngressClass*-deleteNetworkingV1beta1CollectionIngressClass2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BT -R -Pbodybody*B -@#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptionsBï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string Bž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string Bä -áÞÛquery±The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately."gracePeriodSeconds2integer B‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Bú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer BÑ -ÎËÈquery Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the "orphan" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both."orphanDependents2boolean Bˆ -…‚ÿquery×Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground."propagationPolicy2string Bû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string BÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Bž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer Jg -L -200E -C -OK= -; -9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status - -401 - - UnauthorizedRhttpsjb -x-kubernetes-group-version-kind?=group: networking.k8s.io -kind: IngressClass -version: v1beta1 -j* -x-kubernetes-actiondeletecollection -JO -MKIquery-If 'true', then the output is pretty printed."pretty2string ‰ -;/api/v1/namespaces/{namespace}/resourcequotas/{name}/statusÉ -core_v1*read status of the specified ResourceQuota*'readCoreV1NamespacedResourceQuotaStatus2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*J\ -A -200: -8 -OK2 -0 -.#/definitions/io.k8s.api.core.v1.ResourceQuota - -401 - - UnauthorizedRhttpsj -x-kubernetes-actionget -jO -x-kubernetes-group-version-kind,*group: "" -kind: ResourceQuota -version: v1 -Ð -core_v1-replace status of the specified ResourceQuota**replaceCoreV1NamespacedResourceQuotaStatus2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BD -B -@bodybody *0 -.#/definitions/io.k8s.api.core.v1.ResourceQuotaBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– -“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string J¤ -A -200: -8 -OK2 -0 -.#/definitions/io.k8s.api.core.v1.ResourceQuota -F -201? -= -Created2 -0 -.#/definitions/io.k8s.api.core.v1.ResourceQuota - -401 - - UnauthorizedRhttpsjO -x-kubernetes-group-version-kind,*group: "" -kind: ResourceQuota -version: v1 -j -x-kubernetes-actionput -B€ -core_v16partially update status of the specified ResourceQuota*(patchCoreV1NamespacedResourceQuotaStatus2application/json2application/yaml2#application/vnd.kubernetes.protobuf:application/json-patch+json:application/merge-patch+json:&application/strategic-merge-patch+json:application/apply-patch+yamlBN -L -Jbodybody *: -8#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.PatchBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B¯ -¬©¦queryƒfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch)." fieldManager2string BÎ -ËÈÅquery¨Force is going to "force" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests."force2boolean J\ -A -200: -8 -OK2 -0 -.#/definitions/io.k8s.api.core.v1.ResourceQuota - -401 - - UnauthorizedRhttpsj -x-kubernetes-actionpatch -jO -x-kubernetes-group-version-kind,*version: v1 -group: "" -kind: ResourceQuota -J: -86"4pathname of the ResourceQuota"name*string˜J` -^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO -MKIquery-If 'true', then the output is pretty printed."pretty2string ‹( -1/apis/storage.k8s.io/v1beta1/watch/storageclassesÕ'É -storage_v1beta1xwatch individual changes to a list of StorageClass. deprecated: use the 'watch' parameter with a list operation instead.*#watchStorageV1beta1StorageClassList2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jk -P -200I -G -OKA -? -=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent - -401 - - UnauthorizedRhttpsj_ -x-kubernetes-group-version-kind<:group: storage.k8s.io -kind: StorageClass -version: v1beta1 -j# -x-kubernetes-action  -watchlist -J‚ -ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer JO -MKIquery-If 'true', then the output is pretty printed."pretty2string Jû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string JÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Jž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± -®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean †( -//apis/storage.k8s.io/v1/watch/volumeattachmentsÒ'Æ - -storage_v1|watch individual changes to a list of VolumeAttachment. deprecated: use the 'watch' parameter with a list operation instead.*"watchStorageV1VolumeAttachmentList2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jk - -401 - - Unauthorized -P -200I -G -OKA -? -=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEventRhttpsj^ -x-kubernetes-group-version-kind;9group: storage.k8s.io -kind: VolumeAttachment -version: v1 -j# -x-kubernetes-action  -watchlist -J‚ -ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer JO -MKIquery-If 'true', then the output is pretty printed."pretty2string Jû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string JÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Jž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± -®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean ¡' -"/apis/extensions/v1beta1/ingressesú&î -extensions_v1beta1%list or watch objects of kind Ingress*,listExtensionsV1beta1IngressForAllNamespaces2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Je -J -200C -A -OK; -9 -7#/definitions/io.k8s.api.extensions.v1beta1.IngressList - -401 - - UnauthorizedRhttpsj -x-kubernetes-actionlist -jV -x-kubernetes-group-version-kind31group: extensions -kind: Ingress -version: v1beta1 -J‚ -ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer JO -MKIquery-If 'true', then the output is pretty printed."pretty2string Jû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string JÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Jž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± -®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean ·- -M/apis/flowcontrol.apiserver.k8s.io/v1beta1/prioritylevelconfigurations/{name}å,ð -flowcontrolApiserver_v1beta1-read the specified PriorityLevelConfiguration*9readFlowcontrolApiserverV1beta1PriorityLevelConfiguration2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*Ju -Z -200S -Q -OKK -I -G#/definitions/io.k8s.api.flowcontrol.v1beta1.PriorityLevelConfiguration - -401 - - UnauthorizedRhttpsj{ -x-kubernetes-group-version-kindXVgroup: flowcontrol.apiserver.k8s.io -kind: PriorityLevelConfiguration -version: v1beta1 -j -x-kubernetes-actionget -ñ -flowcontrolApiserver_v1beta10replace the specified PriorityLevelConfiguration* -OK8 -6 -4#/definitions/io.k8s.api.storage.v1.StorageClassList - -401 - - UnauthorizedRhttpsj -x-kubernetes-actionlist -jZ -x-kubernetes-group-version-kind75group: storage.k8s.io -kind: StorageClass -version: v1 -"‰ - -storage_v1create a StorageClass*createStorageV1StorageClass2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BF -D -Bbodybody *2 -0#/definitions/io.k8s.api.storage.v1.StorageClassBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– -“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string Jó -C -200< -: -OK4 -2 -0#/definitions/io.k8s.api.storage.v1.StorageClass -H -201A -? -Created4 -2 -0#/definitions/io.k8s.api.storage.v1.StorageClass -I -202B -@ -Accepted4 -2 -0#/definitions/io.k8s.api.storage.v1.StorageClass - -401 - - UnauthorizedRhttpsjZ -x-kubernetes-group-version-kind75group: storage.k8s.io -kind: StorageClass -version: v1 -j -x-kubernetes-actionpost -*Ø, - -storage_v1!delete collection of StorageClass*%deleteStorageV1CollectionStorageClass2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BT -R -Pbodybody*B -@#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptionsBï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string Bž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string Bä -áÞÛquery±The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately."gracePeriodSeconds2integer B‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Bú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer BÑ -ÎËÈquery Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the "orphan" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both."orphanDependents2boolean Bˆ -…‚ÿquery×Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground."propagationPolicy2string Bû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string BÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Bž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer Jg -L -200E -C -OK= -; -9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status - -401 - - UnauthorizedRhttpsj* -x-kubernetes-actiondeletecollection -jZ -x-kubernetes-group-version-kind75group: storage.k8s.io -kind: StorageClass -version: v1 -JO -MKIquery-If 'true', then the output is pretty printed."pretty2string ’( -./apis/discovery.k8s.io/v1/watch/endpointslicesß'Ó - discovery_v1ywatch individual changes to a list of EndpointSlice. deprecated: use the 'watch' parameter with a list operation instead.*1watchDiscoveryV1EndpointSliceListForAllNamespaces2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jk -P -200I -G -OKA -? -=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent - -401 - - UnauthorizedRhttpsj] -x-kubernetes-group-version-kind:8group: discovery.k8s.io -kind: EndpointSlice -version: v1 -j# -x-kubernetes-action  -watchlist -J‚ -ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer JO -MKIquery-If 'true', then the output is pretty printed."pretty2string Jû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string JÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Jž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± -®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean Ï( -0/api/v1/watch/namespaces/{namespace}/limitrangesš(¬ -core_v1vwatch individual changes to a list of LimitRange. deprecated: use the 'watch' parameter with a list operation instead.*#watchCoreV1NamespacedLimitRangeList2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jk -P -200I -G -OKA -? -=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent - -401 - - UnauthorizedRhttpsj# -x-kubernetes-action  -watchlist -jL -x-kubernetes-group-version-kind)'group: "" -kind: LimitRange -version: v1 -J‚ -ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer J` -^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO -MKIquery-If 'true', then the output is pretty printed."pretty2string Jû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string JÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Jž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± -®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean Ž( -1/apis/apiregistration.k8s.io/v1/watch/apiservicesØ'Ì -apiregistration_v1vwatch individual changes to a list of APIService. deprecated: use the 'watch' parameter with a list operation instead.*$watchApiregistrationV1APIServiceList2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jk -P -200I -G -OKA -? -=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent - -401 - - UnauthorizedRhttpsj` -x-kubernetes-group-version-kind=;group: apiregistration.k8s.io -version: v1 -kind: APIService -j# -x-kubernetes-action  -watchlist -J‚ -ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer JO -MKIquery-If 'true', then the output is pretty printed."pretty2string Jû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string JÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Jž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± -®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean  -S/apis/authorization.k8s.io/v1beta1/namespaces/{namespace}/localsubjectaccessreviewsê "ú -authorization_v1beta1!create a LocalSubjectAccessReview* -<bodybody *, -*#/definitions/io.k8s.api.apps.v1.DaemonSetBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– -“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string Jœ -= -2006 -4 -OK. -, -*#/definitions/io.k8s.api.apps.v1.DaemonSet -B -201; -9 -Created. -, -*#/definitions/io.k8s.api.apps.v1.DaemonSet - -401 - - UnauthorizedRhttpsj -x-kubernetes-actionput -jM -x-kubernetes-group-version-kind*(group: apps -kind: DaemonSet -version: v1 -Bò -apps_v12partially update status of the specified DaemonSet*$patchAppsV1NamespacedDaemonSetStatus2application/json2application/yaml2#application/vnd.kubernetes.protobuf:application/json-patch+json:application/merge-patch+json:&application/strategic-merge-patch+json:application/apply-patch+yamlBN -L -Jbodybody *: -8#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.PatchBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B¯ -¬©¦queryƒfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch)." fieldManager2string BÎ -ËÈÅquery¨Force is going to "force" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests."force2boolean JX -= -2006 -4 -OK. -, -*#/definitions/io.k8s.api.apps.v1.DaemonSet - -401 - - UnauthorizedRhttpsj -x-kubernetes-actionpatch -jM -x-kubernetes-group-version-kind*(group: apps -kind: DaemonSet -version: v1 -J6 -42"0pathname of the DaemonSet"name*string˜J` -^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO -MKIquery-If 'true', then the output is pretty printed."pretty2string ¼) -O/apis/autoscaling/v2beta1/watch/namespaces/{namespace}/horizontalpodautoscalersè(ú -autoscaling_v2beta1ƒwatch individual changes to a list of HorizontalPodAutoscaler. deprecated: use the 'watch' parameter with a list operation instead.* -< -OK6 -4 -2#/definitions/io.k8s.api.batch.v1beta1.CronJobList - -401 - - UnauthorizedRhttpsj -x-kubernetes-actionlist -jQ -x-kubernetes-group-version-kind.,version: v1beta1 -group: batch -kind: CronJob -J‚ -ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer JO -MKIquery-If 'true', then the output is pretty printed."pretty2string Jû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string JÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Jž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± -®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean ³) -Q/apis/rbac.authorization.k8s.io/v1beta1/watch/namespaces/{namespace}/rolebindingsÝ(ï -rbacAuthorization_v1beta1wwatch individual changes to a list of RoleBinding. deprecated: use the 'watch' parameter with a list operation instead.*6watchRbacAuthorizationV1beta1NamespacedRoleBindingList2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jk -P -200I -G -OKA -? -=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent - -401 - - UnauthorizedRhttpsj# -x-kubernetes-action  -watchlist -ji -x-kubernetes-group-version-kindFDkind: RoleBinding -version: v1beta1 -group: rbac.authorization.k8s.io -J‚ -ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer J` -^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO -MKIquery-If 'true', then the output is pretty printed."pretty2string Jû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string JÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Jž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± -®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean —) -J/apis/rbac.authorization.k8s.io/v1beta1/watch/namespaces/{namespace}/rolesÈ(Ú -rbacAuthorization_v1beta1pwatch individual changes to a list of Role. deprecated: use the 'watch' parameter with a list operation instead.*/watchRbacAuthorizationV1beta1NamespacedRoleList2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jk -P -200I -G -OKA -? -=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent - -401 - - UnauthorizedRhttpsjb -x-kubernetes-group-version-kind?=group: rbac.authorization.k8s.io -kind: Role -version: v1beta1 -j# -x-kubernetes-action  -watchlist -J‚ -ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer J` -^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO -MKIquery-If 'true', then the output is pretty printed."pretty2string Jû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string JÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Jž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± -®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean ä' -/api/v1/watch/persistentvolumesÀ'´ -core_v1|watch individual changes to a list of PersistentVolume. deprecated: use the 'watch' parameter with a list operation instead.*watchCoreV1PersistentVolumeList2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jk - -401 - - Unauthorized -P -200I -G -OKA -? -=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEventRhttpsjR -x-kubernetes-group-version-kind/-version: v1 -group: "" -kind: PersistentVolume -j# -x-kubernetes-action  -watchlist -J‚ -ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer JO -MKIquery-If 'true', then the output is pretty printed."pretty2string Jû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string JÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Jž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± -®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean á* -F/apis/coordination.k8s.io/v1beta1/namespaces/{namespace}/leases/{name}–*Ž -coordination_v1beta1read the specified Lease*&readCoordinationV1beta1NamespacedLease2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*Ja -F -200? -= -OK7 -5 -3#/definitions/io.k8s.api.coordination.v1beta1.Lease - -401 - - UnauthorizedRhttpsj -x-kubernetes-actionget -j] -x-kubernetes-group-version-kind:8group: coordination.k8s.io -kind: Lease -version: v1beta1 -ç -coordination_v1beta1replace the specified Lease*)replaceCoordinationV1beta1NamespacedLease2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BI -G -Ebodybody *5 -3#/definitions/io.k8s.api.coordination.v1beta1.LeaseBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– -“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string J® -F -200? -= -OK7 -5 -3#/definitions/io.k8s.api.coordination.v1beta1.Lease -K -201D -B -Created7 -5 -3#/definitions/io.k8s.api.coordination.v1beta1.Lease - -401 - - UnauthorizedRhttpsj -x-kubernetes-actionput -j] -x-kubernetes-group-version-kind:8group: coordination.k8s.io -kind: Lease -version: v1beta1 -*¡ -coordination_v1beta1delete a Lease*(deleteCoordinationV1beta1NamespacedLease2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BT -R -Pbodybody*B -@#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptionsBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string Bä -áÞÛquery±The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately."gracePeriodSeconds2integer BÑ -ÎËÈquery Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the "orphan" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both."orphanDependents2boolean Bˆ -…‚ÿquery×Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground."propagationPolicy2string J» -R -202K -I -Accepted= -; -9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status - -401 - - Unauthorized -L -200E -C -OK= -; -9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.StatusRhttpsj -x-kubernetes-action delete -j] -x-kubernetes-group-version-kind:8group: coordination.k8s.io -kind: Lease -version: v1beta1 -B -coordination_v1beta1$partially update the specified Lease*'patchCoordinationV1beta1NamespacedLease2application/json2application/yaml2#application/vnd.kubernetes.protobuf:application/json-patch+json:application/merge-patch+json:&application/strategic-merge-patch+json:application/apply-patch+yamlBN -L -Jbodybody *: -8#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.PatchBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B¯ -¬©¦queryƒfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch)." fieldManager2string BÎ -ËÈÅquery¨Force is going to "force" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests."force2boolean Ja -F -200? -= -OK7 -5 -3#/definitions/io.k8s.api.coordination.v1beta1.Lease - -401 - - UnauthorizedRhttpsj -x-kubernetes-actionpatch -j] -x-kubernetes-group-version-kind:8group: coordination.k8s.io -kind: Lease -version: v1beta1 -J2 -0.",pathname of the Lease"name*string˜J` -^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO -MKIquery-If 'true', then the output is pretty printed."pretty2string Þ -N/apis/policy/v1beta1/namespaces/{namespace}/poddisruptionbudgets/{name}/status‹· -policy_v1beta10read status of the specified PodDisruptionBudget*4readPolicyV1beta1NamespacedPodDisruptionBudgetStatus2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*Ji - -401 - - Unauthorized -N -200G -E -OK? -= -;#/definitions/io.k8s.api.policy.v1beta1.PodDisruptionBudgetRhttpsj -x-kubernetes-actionget -j^ -x-kubernetes-group-version-kind;9group: policy -kind: PodDisruptionBudget -version: v1beta1 -  -policy_v1beta13replace status of the specified PodDisruptionBudget*7replacePolicyV1beta1NamespacedPodDisruptionBudgetStatus2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BQ -O -Mbodybody *= -;#/definitions/io.k8s.api.policy.v1beta1.PodDisruptionBudgetBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– -“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string J¾ -N -200G -E -OK? -= -;#/definitions/io.k8s.api.policy.v1beta1.PodDisruptionBudget -S -201L -J -Created? -= -;#/definitions/io.k8s.api.policy.v1beta1.PodDisruptionBudget - -401 - - UnauthorizedRhttpsj -x-kubernetes-actionput -j^ -x-kubernetes-group-version-kind;9group: policy -kind: PodDisruptionBudget -version: v1beta1 -B¶ -policy_v1beta1<":pathname of the PodDisruptionBudget"name*string˜J` -^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO -MKIquery-If 'true', then the output is pretty printed."pretty2string ¢a -J/apis/admissionregistration.k8s.io/v1beta1/validatingwebhookconfigurationsÓ`¦' -admissionregistration_v1beta1listAdmissionregistrationV1beta1ValidatingWebhookConfiguration2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*B‚ -ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Bï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string B‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string B‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Bú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer Bû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string BÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Bž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer B± -®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean J‡ -l -200e -c -OK] -[ -Y#/definitions/io.k8s.api.admissionregistration.v1beta1.ValidatingWebhookConfigurationList - -401 - - UnauthorizedRhttpsj -x-kubernetes-actionlist -j -x-kubernetes-group-version-kind\Zgroup: admissionregistration.k8s.io -kind: ValidatingWebhookConfiguration -version: v1beta1 -"Œ -admissionregistration_v1beta1'create a ValidatingWebhookConfiguration*@createAdmissionregistrationV1beta1ValidatingWebhookConfiguration2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*Bk -i -gbodybody *W -U#/definitions/io.k8s.api.admissionregistration.v1beta1.ValidatingWebhookConfigurationBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– -“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string Jâ -m -201f -d -CreatedY -W -U#/definitions/io.k8s.api.admissionregistration.v1beta1.ValidatingWebhookConfiguration -n -202g -e -AcceptedY -W -U#/definitions/io.k8s.api.admissionregistration.v1beta1.ValidatingWebhookConfiguration - -401 - - Unauthorized -h -200a -_ -OKY -W -U#/definitions/io.k8s.api.admissionregistration.v1beta1.ValidatingWebhookConfigurationRhttpsj -x-kubernetes-group-version-kind\Zversion: v1beta1 -group: admissionregistration.k8s.io -kind: ValidatingWebhookConfiguration -j -x-kubernetes-actionpost -*Ç- -admissionregistration_v1beta13delete collection of ValidatingWebhookConfiguration*JdeleteAdmissionregistrationV1beta1CollectionValidatingWebhookConfiguration2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BT -R -Pbodybody*B -@#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptionsBï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string Bž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string Bä -áÞÛquery±The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately."gracePeriodSeconds2integer B‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Bú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer BÑ -ÎËÈquery Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the "orphan" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both."orphanDependents2boolean Bˆ -…‚ÿquery×Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground."propagationPolicy2string Bû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string BÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Bž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer Jg -L -200E -C -OK= -; -9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status - -401 - - UnauthorizedRhttpsj* -x-kubernetes-actiondeletecollection -j -x-kubernetes-group-version-kind\Zgroup: admissionregistration.k8s.io -kind: ValidatingWebhookConfiguration -version: v1beta1 -JO -MKIquery-If 'true', then the output is pretty printed."pretty2string ½- -J/apis/admissionregistration.k8s.io/v1/mutatingwebhookconfigurations/{name}î,ð -admissionregistration_v1/read the specified MutatingWebhookConfiguration*7readAdmissionregistrationV1MutatingWebhookConfiguration2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*J| -a -200Z -X -OKR -P -N#/definitions/io.k8s.api.admissionregistration.v1.MutatingWebhookConfiguration - -401 - - UnauthorizedRhttpsj -x-kubernetes-actionget -jx -x-kubernetes-group-version-kindUSgroup: admissionregistration.k8s.io -kind: MutatingWebhookConfiguration -version: v1 -ÿ -admissionregistration_v12replace the specified MutatingWebhookConfiguration*:replaceAdmissionregistrationV1MutatingWebhookConfiguration2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*Bd -b -`bodybody *P -N#/definitions/io.k8s.api.admissionregistration.v1.MutatingWebhookConfigurationBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– -“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string Jä -a -200Z -X -OKR -P -N#/definitions/io.k8s.api.admissionregistration.v1.MutatingWebhookConfiguration -f -201_ -] -CreatedR -P -N#/definitions/io.k8s.api.admissionregistration.v1.MutatingWebhookConfiguration - -401 - - UnauthorizedRhttpsj -x-kubernetes-actionput -jx -x-kubernetes-group-version-kindUSgroup: admissionregistration.k8s.io -kind: MutatingWebhookConfiguration -version: v1 -*è -admissionregistration_v1%delete a MutatingWebhookConfiguration*9deleteAdmissionregistrationV1MutatingWebhookConfiguration2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BT -R -Pbodybody*B -@#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptionsBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string Bä -áÞÛquery±The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately."gracePeriodSeconds2integer BÑ -ÎËÈquery Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the "orphan" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both."orphanDependents2boolean Bˆ -…‚ÿquery×Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground."propagationPolicy2string J» -L -200E -C -OK= -; -9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status -R -202K -I -Accepted= -; -9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status - -401 - - UnauthorizedRhttpsj -x-kubernetes-action delete -jx -x-kubernetes-group-version-kindUSgroup: admissionregistration.k8s.io -kind: MutatingWebhookConfiguration -version: v1 -Bï -admissionregistration_v1;partially update the specified MutatingWebhookConfiguration*8patchAdmissionregistrationV1MutatingWebhookConfiguration2application/json2application/yaml2#application/vnd.kubernetes.protobuf:application/json-patch+json:application/merge-patch+json:&application/strategic-merge-patch+json:application/apply-patch+yamlBN -L -Jbodybody *: -8#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.PatchBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B¯ -¬©¦queryƒfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch)." fieldManager2string BÎ -ËÈÅquery¨Force is going to "force" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests."force2boolean J| - -401 - - Unauthorized -a -200Z -X -OKR -P -N#/definitions/io.k8s.api.admissionregistration.v1.MutatingWebhookConfigurationRhttpsjx -x-kubernetes-group-version-kindUSgroup: admissionregistration.k8s.io -kind: MutatingWebhookConfiguration -version: v1 -j -x-kubernetes-actionpatch -JI -GE"Cpath(name of the MutatingWebhookConfiguration"name*string˜JO -MKIquery-If 'true', then the output is pretty printed."pretty2string ô) -P/apis/admissionregistration.k8s.io/v1/watch/mutatingwebhookconfigurations/{name}Ÿ)È -admissionregistration_v1Ãwatch changes to an object of kind MutatingWebhookConfiguration. deprecated: use the 'watch' parameter with a list operation instead, filtered to a single item with the 'fieldSelector' parameter.*8watchAdmissionregistrationV1MutatingWebhookConfiguration2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jk - -401 - - Unauthorized -P -200I -G -OKA -? -=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEventRhttpsj -x-kubernetes-actionwatch -jx -x-kubernetes-group-version-kindUSversion: v1 -group: admissionregistration.k8s.io -kind: MutatingWebhookConfiguration -J‚ -ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer JI -GE"Cpath(name of the MutatingWebhookConfiguration"name*string˜JO -MKIquery-If 'true', then the output is pretty printed."pretty2string Jû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string JÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Jž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± -®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean ¨* -Q/apis/autoscaling/v1/watch/namespaces/{namespace}/horizontalpodautoscalers/{name}Ò)ž -autoscaling_v1¾watch changes to an object of kind HorizontalPodAutoscaler. deprecated: use the 'watch' parameter with a list operation instead, filtered to a single item with the 'fieldSelector' parameter.*3watchAutoscalingV1NamespacedHorizontalPodAutoscaler2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jk -P -200I -G -OKA -? -=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent - -401 - - UnauthorizedRhttpsj -x-kubernetes-actionwatch -jb -x-kubernetes-group-version-kind?=kind: HorizontalPodAutoscaler -version: v1 -group: autoscaling -J‚ -ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer JD -B@">path#name of the HorizontalPodAutoscaler"name*string˜J` -^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO -MKIquery-If 'true', then the output is pretty printed."pretty2string Jû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string JÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Jž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± -®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean ˆ -"/apis/coordination.k8s.io/v1beta1/áÞ -coordination_v1beta1get available resources*"getCoordinationV1beta1APIResources2application/json2application/yaml2#application/vnd.kubernetes.protobuf:application/json:application/yaml:#application/vnd.kubernetes.protobufJp -U -200N -L -OKF -D -B#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.APIResourceList - -401 - - UnauthorizedRhttps±\ -#/api/v1/namespaces/{namespace}/pods‰\â% -core_v1!list or watch objects of kind Pod*listCoreV1NamespacedPod2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*B‚ -ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Bï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string B‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string B‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Bú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer Bû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string BÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Bž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer B± -®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean JV - -401 - - Unauthorized -; -2004 -2 -OK, -* -(#/definitions/io.k8s.api.core.v1.PodListRhttpsj -x-kubernetes-actionlist -jE -x-kubernetes-group-version-kind" group: "" -kind: Pod -version: v1 -"¶ -core_v1 create a Pod*createCoreV1NamespacedPod2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*B: -8 -6bodybody *& -$#/definitions/io.k8s.api.core.v1.PodBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– -“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string JÏ -7 -2000 -. -OK( -& -$#/definitions/io.k8s.api.core.v1.Pod -< -2015 -3 -Created( -& -$#/definitions/io.k8s.api.core.v1.Pod -= -2026 -4 -Accepted( -& -$#/definitions/io.k8s.api.core.v1.Pod - -401 - - UnauthorizedRhttpsj -x-kubernetes-actionpost -jE -x-kubernetes-group-version-kind" group: "" -kind: Pod -version: v1 -*µ, -core_v1delete collection of Pod*#deleteCoreV1CollectionNamespacedPod2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BT -R -Pbodybody*B -@#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptionsBï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string Bž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string Bä -áÞÛquery±The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately."gracePeriodSeconds2integer B‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Bú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer BÑ -ÎËÈquery Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the "orphan" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both."orphanDependents2boolean Bˆ -…‚ÿquery×Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground."propagationPolicy2string Bû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string BÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Bž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer Jg -L -200E -C -OK= -; -9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status - -401 - - UnauthorizedRhttpsjE -x-kubernetes-group-version-kind" group: "" -kind: Pod -version: v1 -j* -x-kubernetes-actiondeletecollection -J` -^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO -MKIquery-If 'true', then the output is pretty printed."pretty2string å ->/apis/apps/v1/namespaces/{namespace}/deployments/{name}/status¢÷ -apps_v1'read status of the specified Deployment*$readAppsV1NamespacedDeploymentStatus2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*JY -> -2007 -5 -OK/ -- -+#/definitions/io.k8s.api.apps.v1.Deployment - -401 - - UnauthorizedRhttpsj -x-kubernetes-actionget -jN -x-kubernetes-group-version-kind+)group: apps -kind: Deployment -version: v1 -À -apps_v1*replace status of the specified Deployment*'replaceAppsV1NamespacedDeploymentStatus2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BA -? -=bodybody *- -+#/definitions/io.k8s.api.apps.v1.DeploymentBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– -“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string Jž -C -201< -: -Created/ -- -+#/definitions/io.k8s.api.apps.v1.Deployment - -401 - - Unauthorized -> -2007 -5 -OK/ -- -+#/definitions/io.k8s.api.apps.v1.DeploymentRhttpsj -x-kubernetes-actionput -jN -x-kubernetes-group-version-kind+)kind: Deployment -version: v1 -group: apps -Bö -apps_v13partially update status of the specified Deployment*%patchAppsV1NamespacedDeploymentStatus2application/json2application/yaml2#application/vnd.kubernetes.protobuf:application/json-patch+json:application/merge-patch+json:&application/strategic-merge-patch+json:application/apply-patch+yamlBN -L -Jbodybody *: -8#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.PatchBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B¯ -¬©¦queryƒfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch)." fieldManager2string BÎ -ËÈÅquery¨Force is going to "force" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests."force2boolean JY -> -2007 -5 -OK/ -- -+#/definitions/io.k8s.api.apps.v1.Deployment - -401 - - UnauthorizedRhttpsj -x-kubernetes-actionpatch -jN -x-kubernetes-group-version-kind+)group: apps -kind: Deployment -version: v1 -J7 -53"1pathname of the Deployment"name*string˜J` -^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO -MKIquery-If 'true', then the output is pretty printed."pretty2string ‚ -#/apis/flowcontrol.apiserver.k8s.io/Ú× -flowcontrolApiserverget information of a group*getFlowcontrolApiserverAPIGroup2application/json2application/yaml2#application/vnd.kubernetes.protobuf:application/json:application/yaml:#application/vnd.kubernetes.protobufJi -N -200G -E -OK? -= -;#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.APIGroup - -401 - - UnauthorizedRhttpsÑ -/apis/node.k8s.io/º· -nodeget information of a group*getNodeAPIGroup2application/json2application/yaml2#application/vnd.kubernetes.protobuf:application/json:application/yaml:#application/vnd.kubernetes.protobufJi -N -200G -E -OK? -= -;#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.APIGroup - -401 - - UnauthorizedRhttpsÛ) -1/apis/scheduling.k8s.io/v1/priorityclasses/{name}¥)ˆ - scheduling_v1 read the specified PriorityClass*readSchedulingV1PriorityClass2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*Jb -G -200@ -> -OK8 -6 -4#/definitions/io.k8s.api.scheduling.v1.PriorityClass - -401 - - UnauthorizedRhttpsj -x-kubernetes-actionget -j^ -x-kubernetes-group-version-kind;9group: scheduling.k8s.io -kind: PriorityClass -version: v1 -ã - scheduling_v1#replace the specified PriorityClass* replaceSchedulingV1PriorityClass2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BJ -H -Fbodybody *6 -4#/definitions/io.k8s.api.scheduling.v1.PriorityClassBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– -“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string J° - -401 - - Unauthorized -G -200@ -> -OK8 -6 -4#/definitions/io.k8s.api.scheduling.v1.PriorityClass -L -201E -C -Created8 -6 -4#/definitions/io.k8s.api.scheduling.v1.PriorityClassRhttpsj -x-kubernetes-actionput -j^ -x-kubernetes-group-version-kind;9group: scheduling.k8s.io -kind: PriorityClass -version: v1 -*š - scheduling_v1delete a PriorityClass*deleteSchedulingV1PriorityClass2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BT -R -Pbodybody*B -@#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptionsBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string Bä -áÞÛquery±The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately."gracePeriodSeconds2integer BÑ -ÎËÈquery Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the "orphan" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both."orphanDependents2boolean Bˆ -…‚ÿquery×Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground."propagationPolicy2string J» -R -202K -I -Accepted= -; -9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status - -401 - - Unauthorized -L -200E -C -OK= -; -9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.StatusRhttpsj -x-kubernetes-action delete -j^ -x-kubernetes-group-version-kind;9group: scheduling.k8s.io -kind: PriorityClass -version: v1 -B‡ - scheduling_v1,partially update the specified PriorityClass*patchSchedulingV1PriorityClass2application/json2application/yaml2#application/vnd.kubernetes.protobuf:application/json-patch+json:application/merge-patch+json:&application/strategic-merge-patch+json:application/apply-patch+yamlBN -L -Jbodybody *: -8#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.PatchBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B¯ -¬©¦queryƒfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch)." fieldManager2string BÎ -ËÈÅquery¨Force is going to "force" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests."force2boolean Jb -G -200@ -> -OK8 -6 -4#/definitions/io.k8s.api.scheduling.v1.PriorityClass - -401 - - UnauthorizedRhttpsj^ -x-kubernetes-group-version-kind;9group: scheduling.k8s.io -kind: PriorityClass -version: v1 -j -x-kubernetes-actionpatch -J: -86"4pathname of the PriorityClass"name*string˜JO -MKIquery-If 'true', then the output is pretty printed."pretty2string ”) -;/apis/storage.k8s.io/v1beta1/watch/volumeattachments/{name}Ô(‰ -storage_v1beta1·watch changes to an object of kind VolumeAttachment. deprecated: use the 'watch' parameter with a list operation instead, filtered to a single item with the 'fieldSelector' parameter.*#watchStorageV1beta1VolumeAttachment2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jk -P -200I -G -OKA -? -=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent - -401 - - UnauthorizedRhttpsj -x-kubernetes-actionwatch -jc -x-kubernetes-group-version-kind@>kind: VolumeAttachment -version: v1beta1 -group: storage.k8s.io -J‚ -ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer J= -;9"7pathname of the VolumeAttachment"name*string˜JO -MKIquery-If 'true', then the output is pretty printed."pretty2string Jû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string JÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Jž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± -®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean ë& -/api/v1/configmapsÔ&È -core_v1'list or watch objects of kind ConfigMap*#listCoreV1ConfigMapForAllNamespaces2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*J\ -A -200: -8 -OK2 -0 -.#/definitions/io.k8s.api.core.v1.ConfigMapList - -401 - - UnauthorizedRhttpsj -x-kubernetes-actionlist -jK -x-kubernetes-group-version-kind(&group: "" -kind: ConfigMap -version: v1 -J‚ -ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer JO -MKIquery-If 'true', then the output is pretty printed."pretty2string Jû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string JÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Jž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± -®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean å* -2/apis/apiregistration.k8s.io/v1/apiservices/{name}®*¥ -apiregistration_v1read the specified APIService*readApiregistrationV1APIService2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*Jy -^ -200W -U -OKO -M -K#/definitions/io.k8s.kube-aggregator.pkg.apis.apiregistration.v1.APIService - -401 - - UnauthorizedRhttpsj -x-kubernetes-actionget -j` -x-kubernetes-group-version-kind=;group: apiregistration.k8s.io -version: v1 -kind: APIService -® -apiregistration_v1 replace the specified APIService*"replaceApiregistrationV1APIService2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*Ba -_ -]bodybody *M -K#/definitions/io.k8s.kube-aggregator.pkg.apis.apiregistration.v1.APIServiceBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– -“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string JÞ -^ -200W -U -OKO -M -K#/definitions/io.k8s.kube-aggregator.pkg.apis.apiregistration.v1.APIService -c -201\ -Z -CreatedO -M -K#/definitions/io.k8s.kube-aggregator.pkg.apis.apiregistration.v1.APIService - -401 - - UnauthorizedRhttpsj -x-kubernetes-actionput -j` -x-kubernetes-group-version-kind=;group: apiregistration.k8s.io -version: v1 -kind: APIService -*¡ -apiregistration_v1delete an APIService*!deleteApiregistrationV1APIService2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BT -R -Pbodybody*B -@#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptionsBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string Bä -áÞÛquery±The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately."gracePeriodSeconds2integer BÑ -ÎËÈquery Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the "orphan" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both."orphanDependents2boolean Bˆ -…‚ÿquery×Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground."propagationPolicy2string J» -L -200E -C -OK= -; -9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status -R -202K -I -Accepted= -; -9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status - -401 - - UnauthorizedRhttpsj -x-kubernetes-action delete -j` -x-kubernetes-group-version-kind=;version: v1 -kind: APIService -group: apiregistration.k8s.io -B¤ -apiregistration_v1)partially update the specified APIService* patchApiregistrationV1APIService2application/json2application/yaml2#application/vnd.kubernetes.protobuf:application/json-patch+json:application/merge-patch+json:&application/strategic-merge-patch+json:application/apply-patch+yamlBN -L -Jbodybody *: -8#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.PatchBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B¯ -¬©¦queryƒfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch)." fieldManager2string BÎ -ËÈÅquery¨Force is going to "force" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests."force2boolean Jy -^ -200W -U -OKO -M -K#/definitions/io.k8s.kube-aggregator.pkg.apis.apiregistration.v1.APIService - -401 - - UnauthorizedRhttpsj` -x-kubernetes-group-version-kind=;group: apiregistration.k8s.io -version: v1 -kind: APIService -j -x-kubernetes-actionpatch -J7 -53"1pathname of the APIService"name*string˜JO -MKIquery-If 'true', then the output is pretty printed."pretty2string •' -#/apis/coordination.k8s.io/v1/leasesí&á -coordination_v1#list or watch objects of kind Lease*'listCoordinationV1LeaseForAllNamespaces2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*J` - -401 - - Unauthorized -E -200> -< -OK6 -4 -2#/definitions/io.k8s.api.coordination.v1.LeaseListRhttpsj -x-kubernetes-actionlist -jX -x-kubernetes-group-version-kind53group: coordination.k8s.io -kind: Lease -version: v1 -J‚ -ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer JO -MKIquery-If 'true', then the output is pretty printed."pretty2string Jû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string JÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Jž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± -®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean ’( -./apis/coordination.k8s.io/v1beta1/watch/leasesß'Ó -coordination_v1beta1qwatch individual changes to a list of Lease. deprecated: use the 'watch' parameter with a list operation instead.*1watchCoordinationV1beta1LeaseListForAllNamespaces2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jk -P -200I -G -OKA -? -=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent - -401 - - UnauthorizedRhttpsj# -x-kubernetes-action  -watchlist -j] -x-kubernetes-group-version-kind:8version: v1beta1 -group: coordination.k8s.io -kind: Lease -J‚ -ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer JO -MKIquery-If 'true', then the output is pretty printed."pretty2string Jû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string JÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Jž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± -®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean ×^ -F/apis/rbac.authorization.k8s.io/v1/namespaces/{namespace}/rolebindingsŒ^³& -rbacAuthorization_v1)list or watch objects of kind RoleBinding*,listRbacAuthorizationV1NamespacedRoleBinding2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*B‚ -ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Bï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string B‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string B‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Bú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer Bû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string BÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Bž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer B± -®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean J^ -C -200< -: -OK4 -2 -0#/definitions/io.k8s.api.rbac.v1.RoleBindingList - -401 - - UnauthorizedRhttpsj -x-kubernetes-actionlist -jd -x-kubernetes-group-version-kindA?version: v1 -group: rbac.authorization.k8s.io -kind: RoleBinding -"Ÿ -rbacAuthorization_v1create a RoleBinding*.createRbacAuthorizationV1NamespacedRoleBinding2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BB -@ ->bodybody *. -,#/definitions/io.k8s.api.rbac.v1.RoleBindingBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– -“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string Jç -? -2008 -6 -OK0 -. -,#/definitions/io.k8s.api.rbac.v1.RoleBinding -D -201= -; -Created0 -. -,#/definitions/io.k8s.api.rbac.v1.RoleBinding -E -202> -< -Accepted0 -. -,#/definitions/io.k8s.api.rbac.v1.RoleBinding - -401 - - UnauthorizedRhttpsj -x-kubernetes-actionpost -jd -x-kubernetes-group-version-kindA?group: rbac.authorization.k8s.io -kind: RoleBinding -version: v1 -*þ, -rbacAuthorization_v1 delete collection of RoleBinding*8deleteRbacAuthorizationV1CollectionNamespacedRoleBinding2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BT -R -Pbodybody*B -@#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptionsBï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string Bž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string Bä -áÞÛquery±The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately."gracePeriodSeconds2integer B‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Bú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer BÑ -ÎËÈquery Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the "orphan" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both."orphanDependents2boolean Bˆ -…‚ÿquery×Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground."propagationPolicy2string Bû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string BÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Bž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer Jg -L -200E -C -OK= -; -9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status - -401 - - UnauthorizedRhttpsjd -x-kubernetes-group-version-kindA?group: rbac.authorization.k8s.io -kind: RoleBinding -version: v1 -j* -x-kubernetes-actiondeletecollection -J` -^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO -MKIquery-If 'true', then the output is pretty printed."pretty2string À( -)/apis/storage.k8s.io/v1/csidrivers/{name}’(ì - -storage_v1read the specified CSIDriver*readStorageV1CSIDriver2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*J[ - -401 - - Unauthorized -@ -2009 -7 -OK1 -/ --#/definitions/io.k8s.api.storage.v1.CSIDriverRhttpsj -x-kubernetes-actionget -jW -x-kubernetes-group-version-kind42group: storage.k8s.io -kind: CSIDriver -version: v1 -¹ - -storage_v1replace the specified CSIDriver*replaceStorageV1CSIDriver2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BC -A -?bodybody */ --#/definitions/io.k8s.api.storage.v1.CSIDriverBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– -“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string J¢ -@ -2009 -7 -OK1 -/ --#/definitions/io.k8s.api.storage.v1.CSIDriver -E -201> -< -Created1 -/ --#/definitions/io.k8s.api.storage.v1.CSIDriver - -401 - - UnauthorizedRhttpsj -x-kubernetes-actionput -jW -x-kubernetes-group-version-kind42group: storage.k8s.io -kind: CSIDriver -version: v1 -*í - -storage_v1delete a CSIDriver*deleteStorageV1CSIDriver2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BT -R -Pbodybody*B -@#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptionsBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string Bä -áÞÛquery±The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately."gracePeriodSeconds2integer BÑ -ÎËÈquery Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the "orphan" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both."orphanDependents2boolean Bˆ -…‚ÿquery×Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground."propagationPolicy2string J£ -@ -2009 -7 -OK1 -/ --#/definitions/io.k8s.api.storage.v1.CSIDriver -F -202? -= -Accepted1 -/ --#/definitions/io.k8s.api.storage.v1.CSIDriver - -401 - - UnauthorizedRhttpsj -x-kubernetes-action delete -jW -x-kubernetes-group-version-kind42group: storage.k8s.io -kind: CSIDriver -version: v1 -Bë - -storage_v1(partially update the specified CSIDriver*patchStorageV1CSIDriver2application/json2application/yaml2#application/vnd.kubernetes.protobuf:application/json-patch+json:application/merge-patch+json:&application/strategic-merge-patch+json:application/apply-patch+yamlBN -L -Jbodybody *: -8#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.PatchBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B¯ -¬©¦queryƒfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch)." fieldManager2string BÎ -ËÈÅquery¨Force is going to "force" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests."force2boolean J[ -@ -2009 -7 -OK1 -/ --#/definitions/io.k8s.api.storage.v1.CSIDriver - -401 - - UnauthorizedRhttpsj -x-kubernetes-actionpatch -jW -x-kubernetes-group-version-kind42group: storage.k8s.io -kind: CSIDriver -version: v1 -J6 -42"0pathname of the CSIDriver"name*string˜JO -MKIquery-If 'true', then the output is pretty printed."pretty2string Í& - /api/v1/pods¼&° -core_v1!list or watch objects of kind Pod*listCoreV1PodForAllNamespaces2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*JV -; -2004 -2 -OK, -* -(#/definitions/io.k8s.api.core.v1.PodList - -401 - - UnauthorizedRhttpsjE -x-kubernetes-group-version-kind" group: "" -kind: Pod -version: v1 -j -x-kubernetes-actionlist -J‚ -ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer JO -MKIquery-If 'true', then the output is pretty printed."pretty2string Jû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string JÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Jž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± -®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean Ë) -=/apis/apps/v1/watch/namespaces/{namespace}/replicasets/{name}‰)â -apps_v1±watch changes to an object of kind ReplicaSet. deprecated: use the 'watch' parameter with a list operation instead, filtered to a single item with the 'fieldSelector' parameter.*watchAppsV1NamespacedReplicaSet2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jk -P -200I -G -OKA -? -=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent - -401 - - UnauthorizedRhttpsj -x-kubernetes-actionwatch -jN -x-kubernetes-group-version-kind+)group: apps -kind: ReplicaSet -version: v1 -J‚ -ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer J7 -53"1pathname of the ReplicaSet"name*string˜J` -^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO -MKIquery-If 'true', then the output is pretty printed."pretty2string Jû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string JÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Jž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± -®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean ¼* -V/apis/autoscaling/v2beta1/watch/namespaces/{namespace}/horizontalpodautoscalers/{name}á)­ -autoscaling_v2beta1¾watch changes to an object of kind HorizontalPodAutoscaler. deprecated: use the 'watch' parameter with a list operation instead, filtered to a single item with the 'fieldSelector' parameter.*8watchAutoscalingV2beta1NamespacedHorizontalPodAutoscaler2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jk -P -200I -G -OKA -? -=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent - -401 - - UnauthorizedRhttpsj -x-kubernetes-actionwatch -jg -x-kubernetes-group-version-kindDBgroup: autoscaling -kind: HorizontalPodAutoscaler -version: v2beta1 -J‚ -ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer JD -B@">path#name of the HorizontalPodAutoscaler"name*string˜J` -^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO -MKIquery-If 'true', then the output is pretty printed."pretty2string Jû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string JÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Jž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± -®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean á -/apis/node.k8s.io/v1/ÇÄ -node_v1get available resources*getNodeV1APIResources2application/json2application/yaml2#application/vnd.kubernetes.protobuf:application/json:application/yaml:#application/vnd.kubernetes.protobufJp -U -200N -L -OKF -D -B#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.APIResourceList - -401 - - UnauthorizedRhttpsÁ' -//apis/rbac.authorization.k8s.io/v1/rolebindings' -rbacAuthorization_v1)list or watch objects of kind RoleBinding*2listRbacAuthorizationV1RoleBindingForAllNamespaces2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*J^ -C -200< -: -OK4 -2 -0#/definitions/io.k8s.api.rbac.v1.RoleBindingList - -401 - - UnauthorizedRhttpsjd -x-kubernetes-group-version-kindA?group: rbac.authorization.k8s.io -kind: RoleBinding -version: v1 -j -x-kubernetes-actionlist -J‚ -ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer JO -MKIquery-If 'true', then the output is pretty printed."pretty2string Jû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string JÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Jž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± -®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean ÷' -,/apis/storage.k8s.io/v1/watch/storageclassesÆ'º - -storage_v1xwatch individual changes to a list of StorageClass. deprecated: use the 'watch' parameter with a list operation instead.*watchStorageV1StorageClassList2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jk -P -200I -G -OKA -? -=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent - -401 - - UnauthorizedRhttpsj# -x-kubernetes-action  -watchlist -jZ -x-kubernetes-group-version-kind75group: storage.k8s.io -kind: StorageClass -version: v1 -J‚ -ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer JO -MKIquery-If 'true', then the output is pretty printed."pretty2string Jû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string JÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Jž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± -®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean ã- -L/apis/admissionregistration.k8s.io/v1/validatingwebhookconfigurations/{name}’-ø -admissionregistration_v11read the specified ValidatingWebhookConfiguration*9readAdmissionregistrationV1ValidatingWebhookConfiguration2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*J~ -c -200\ -Z -OKT -R -P#/definitions/io.k8s.api.admissionregistration.v1.ValidatingWebhookConfiguration - -401 - - UnauthorizedRhttpsj -x-kubernetes-actionget -jz -x-kubernetes-group-version-kindWUkind: ValidatingWebhookConfiguration -version: v1 -group: admissionregistration.k8s.io -‹ - -admissionregistration_v14replace the specified ValidatingWebhookConfiguration* -OK8 -6 -4#/definitions/io.k8s.api.networking.v1.NetworkPolicy -L -201E -C -Created8 -6 -4#/definitions/io.k8s.api.networking.v1.NetworkPolicy -M -202F -D -Accepted8 -6 -4#/definitions/io.k8s.api.networking.v1.NetworkPolicy - -401 - - UnauthorizedRhttpsj^ -x-kubernetes-group-version-kind;9kind: NetworkPolicy -version: v1 -group: networking.k8s.io -j -x-kubernetes-actionpost -*î, - networking_v1"delete collection of NetworkPolicy*3deleteNetworkingV1CollectionNamespacedNetworkPolicy2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BT -R -Pbodybody*B -@#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptionsBï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string Bž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string Bä -áÞÛquery±The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately."gracePeriodSeconds2integer B‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Bú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer BÑ -ÎËÈquery Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the "orphan" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both."orphanDependents2boolean Bˆ -…‚ÿquery×Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground."propagationPolicy2string Bû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string BÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Bž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer Jg -L -200E -C -OK= -; -9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status - -401 - - UnauthorizedRhttpsj* -x-kubernetes-actiondeletecollection -j^ -x-kubernetes-group-version-kind;9group: networking.k8s.io -kind: NetworkPolicy -version: v1 -J` -^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO -MKIquery-If 'true', then the output is pretty printed."pretty2string ¢ -'/api/v1/persistentvolumes/{name}/statusöƒ -core_v1-read status of the specified PersistentVolume* readCoreV1PersistentVolumeStatus2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*J_ -D -200= -; -OK5 -3 -1#/definitions/io.k8s.api.core.v1.PersistentVolume - -401 - - UnauthorizedRhttpsj -x-kubernetes-actionget -jR -x-kubernetes-group-version-kind/-group: "" -kind: PersistentVolume -version: v1 -Ø -core_v10replace status of the specified PersistentVolume*#replaceCoreV1PersistentVolumeStatus2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BG -E -Cbodybody *3 -1#/definitions/io.k8s.api.core.v1.PersistentVolumeBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– -“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string Jª -D -200= -; -OK5 -3 -1#/definitions/io.k8s.api.core.v1.PersistentVolume -I -201B -@ -Created5 -3 -1#/definitions/io.k8s.api.core.v1.PersistentVolume - -401 - - UnauthorizedRhttpsjR -x-kubernetes-group-version-kind/-group: "" -kind: PersistentVolume -version: v1 -j -x-kubernetes-actionput -B‚ -core_v19partially update status of the specified PersistentVolume*!patchCoreV1PersistentVolumeStatus2application/json2application/yaml2#application/vnd.kubernetes.protobuf:application/json-patch+json:application/merge-patch+json:&application/strategic-merge-patch+json:application/apply-patch+yamlBN -L -Jbodybody *: -8#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.PatchBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B¯ -¬©¦queryƒfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch)." fieldManager2string BÎ -ËÈÅquery¨Force is going to "force" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests."force2boolean J_ -D -200= -; -OK5 -3 -1#/definitions/io.k8s.api.core.v1.PersistentVolume - -401 - - UnauthorizedRhttpsj -x-kubernetes-actionpatch -jR -x-kubernetes-group-version-kind/-kind: PersistentVolume -version: v1 -group: "" -J= -;9"7pathname of the PersistentVolume"name*string˜JO -MKIquery-If 'true', then the output is pretty printed."pretty2string §' -/api/v1/persistentvolumeclaims„'ø -core_v13list or watch objects of kind PersistentVolumeClaim*/listCoreV1PersistentVolumeClaimForAllNamespaces2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jh -M -200F -D -OK> -< -:#/definitions/io.k8s.api.core.v1.PersistentVolumeClaimList - -401 - - UnauthorizedRhttpsjW -x-kubernetes-group-version-kind42group: "" -kind: PersistentVolumeClaim -version: v1 -j -x-kubernetes-actionlist -J‚ -ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer JO -MKIquery-If 'true', then the output is pretty printed."pretty2string Jû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string JÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Jž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± -®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean ¿+ -7/apis/apiregistration.k8s.io/v1beta1/apiservices/{name}ƒ+¹ -apiregistration_v1beta1read the specified APIService*$readApiregistrationV1beta1APIService2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*J~ -c -200\ -Z -OKT -R -P#/definitions/io.k8s.kube-aggregator.pkg.apis.apiregistration.v1beta1.APIService - -401 - - UnauthorizedRhttpsj -x-kubernetes-actionget -je -x-kubernetes-group-version-kindB@group: apiregistration.k8s.io -version: v1beta1 -kind: APIService -Ì -apiregistration_v1beta1 replace the specified APIService*'replaceApiregistrationV1beta1APIService2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*Bf -d -bbodybody *R -P#/definitions/io.k8s.kube-aggregator.pkg.apis.apiregistration.v1beta1.APIServiceBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– -“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string Jè -c -200\ -Z -OKT -R -P#/definitions/io.k8s.kube-aggregator.pkg.apis.apiregistration.v1beta1.APIService -h -201a -_ -CreatedT -R -P#/definitions/io.k8s.kube-aggregator.pkg.apis.apiregistration.v1beta1.APIService - -401 - - UnauthorizedRhttpsje -x-kubernetes-group-version-kindB@group: apiregistration.k8s.io -version: v1beta1 -kind: APIService -j -x-kubernetes-actionput -*° -apiregistration_v1beta1delete an APIService*&deleteApiregistrationV1beta1APIService2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BT -R -Pbodybody*B -@#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptionsBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string Bä -áÞÛquery±The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately."gracePeriodSeconds2integer BÑ -ÎËÈquery Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the "orphan" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both."orphanDependents2boolean Bˆ -…‚ÿquery×Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground."propagationPolicy2string J» -L -200E -C -OK= -; -9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status -R -202K -I -Accepted= -; -9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status - -401 - - UnauthorizedRhttpsj -x-kubernetes-action delete -je -x-kubernetes-group-version-kindB@group: apiregistration.k8s.io -version: v1beta1 -kind: APIService -B¸ -apiregistration_v1beta1)partially update the specified APIService*%patchApiregistrationV1beta1APIService2application/json2application/yaml2#application/vnd.kubernetes.protobuf:application/json-patch+json:application/merge-patch+json:&application/strategic-merge-patch+json:application/apply-patch+yamlBN -L -Jbodybody *: -8#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.PatchBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B¯ -¬©¦queryƒfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch)." fieldManager2string BÎ -ËÈÅquery¨Force is going to "force" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests."force2boolean J~ -c -200\ -Z -OKT -R -P#/definitions/io.k8s.kube-aggregator.pkg.apis.apiregistration.v1beta1.APIService - -401 - - UnauthorizedRhttpsj -x-kubernetes-actionpatch -je -x-kubernetes-group-version-kindB@group: apiregistration.k8s.io -version: v1beta1 -kind: APIService -J7 -53"1pathname of the APIService"name*string˜JO -MKIquery-If 'true', then the output is pretty printed."pretty2string ô^ -;/apis/rbac.authorization.k8s.io/v1beta1/clusterrolebindings´^Ù& -rbacAuthorization_v1beta10list or watch objects of kind ClusterRoleBinding*.listRbacAuthorizationV1beta1ClusterRoleBinding2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*B‚ -ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Bï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string B‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string B‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Bú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer Bû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string BÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Bž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer B± -®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean Jj -O -200H -F -OK@ -> -<#/definitions/io.k8s.api.rbac.v1beta1.ClusterRoleBindingList - -401 - - UnauthorizedRhttpsj -x-kubernetes-actionlist -jp -x-kubernetes-group-version-kindMKversion: v1beta1 -group: rbac.authorization.k8s.io -kind: ClusterRoleBinding -"é -rbacAuthorization_v1beta1create a ClusterRoleBinding*0createRbacAuthorizationV1beta1ClusterRoleBinding2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BN -L -Jbodybody *: -8#/definitions/io.k8s.api.rbac.v1beta1.ClusterRoleBindingBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– -“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string J‹ -Q -202J -H -Accepted< -: -8#/definitions/io.k8s.api.rbac.v1beta1.ClusterRoleBinding - -401 - - Unauthorized -K -200D -B -OK< -: -8#/definitions/io.k8s.api.rbac.v1beta1.ClusterRoleBinding -P -201I -G -Created< -: -8#/definitions/io.k8s.api.rbac.v1beta1.ClusterRoleBindingRhttpsj -x-kubernetes-actionpost -jp -x-kubernetes-group-version-kindMKkind: ClusterRoleBinding -version: v1beta1 -group: rbac.authorization.k8s.io -*˜- -rbacAuthorization_v1beta1'delete collection of ClusterRoleBinding*:deleteRbacAuthorizationV1beta1CollectionClusterRoleBinding2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BT -R -Pbodybody*B -@#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptionsBï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string Bž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string Bä -áÞÛquery±The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately."gracePeriodSeconds2integer B‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Bú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer BÑ -ÎËÈquery Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the "orphan" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both."orphanDependents2boolean Bˆ -…‚ÿquery×Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground."propagationPolicy2string Bû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string BÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Bž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer Jg -L -200E -C -OK= -; -9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status - -401 - - UnauthorizedRhttpsjp -x-kubernetes-group-version-kindMKgroup: rbac.authorization.k8s.io -kind: ClusterRoleBinding -version: v1beta1 -j* -x-kubernetes-actiondeletecollection -JO -MKIquery-If 'true', then the output is pretty printed."pretty2string É\ -"/apis/storage.k8s.io/v1/csidrivers¢\…& - -storage_v1'list or watch objects of kind CSIDriver*listStorageV1CSIDriver2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*B‚ -ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Bï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string B‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string B‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Bú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer Bû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string BÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Bž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer B± -®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean J_ -D -200= -; -OK5 -3 -1#/definitions/io.k8s.api.storage.v1.CSIDriverList - -401 - - UnauthorizedRhttpsj -x-kubernetes-actionlist -jW -x-kubernetes-group-version-kind42group: storage.k8s.io -kind: CSIDriver -version: v1 -"ô - -storage_v1create a CSIDriver*createStorageV1CSIDriver2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BC -A -?bodybody */ --#/definitions/io.k8s.api.storage.v1.CSIDriverBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– -“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string Jê -@ -2009 -7 -OK1 -/ --#/definitions/io.k8s.api.storage.v1.CSIDriver -E -201> -< -Created1 -/ --#/definitions/io.k8s.api.storage.v1.CSIDriver -F -202? -= -Accepted1 -/ --#/definitions/io.k8s.api.storage.v1.CSIDriver - -401 - - UnauthorizedRhttpsj -x-kubernetes-actionpost -jW -x-kubernetes-group-version-kind42group: storage.k8s.io -kind: CSIDriver -version: v1 -*Ï, - -storage_v1delete collection of CSIDriver*"deleteStorageV1CollectionCSIDriver2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BT -R -Pbodybody*B -@#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptionsBï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string Bž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string Bä -áÞÛquery±The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately."gracePeriodSeconds2integer B‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Bú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer BÑ -ÎËÈquery Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the "orphan" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both."orphanDependents2boolean Bˆ -…‚ÿquery×Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground."propagationPolicy2string Bû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string BÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Bž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer Jg -L -200E -C -OK= -; -9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status - -401 - - UnauthorizedRhttpsj* -x-kubernetes-actiondeletecollection -jW -x-kubernetes-group-version-kind42group: storage.k8s.io -kind: CSIDriver -version: v1 -JO -MKIquery-If 'true', then the output is pretty printed."pretty2string Ð\ -%/api/v1/namespaces/{namespace}/events¦\ê% -core_v1#list or watch objects of kind Event*listCoreV1NamespacedEvent2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*B‚ -ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Bï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string B‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string B‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Bú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer Bû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string BÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Bž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer B± -®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean JX - -401 - - Unauthorized -= -2006 -4 -OK. -, -*#/definitions/io.k8s.api.core.v1.EventListRhttpsj -x-kubernetes-actionlist -jG -x-kubernetes-group-version-kind$"group: "" -kind: Event -version: v1 -"Å -core_v1create an Event*createCoreV1NamespacedEvent2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*B< -: -8bodybody *( -&#/definitions/io.k8s.api.core.v1.EventBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– -“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string JÕ -9 -2002 -0 -OK* -( -&#/definitions/io.k8s.api.core.v1.Event -> -2017 -5 -Created* -( -&#/definitions/io.k8s.api.core.v1.Event -? -2028 -6 -Accepted* -( -&#/definitions/io.k8s.api.core.v1.Event - -401 - - UnauthorizedRhttpsj -x-kubernetes-actionpost -jG -x-kubernetes-group-version-kind$"group: "" -kind: Event -version: v1 -*», -core_v1delete collection of Event*%deleteCoreV1CollectionNamespacedEvent2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BT -R -Pbodybody*B -@#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptionsBï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string Bž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string Bä -áÞÛquery±The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately."gracePeriodSeconds2integer B‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Bú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer BÑ -ÎËÈquery Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the "orphan" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both."orphanDependents2boolean Bˆ -…‚ÿquery×Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground."propagationPolicy2string Bû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string BÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Bž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer Jg -L -200E -C -OK= -; -9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status - -401 - - UnauthorizedRhttpsjG -x-kubernetes-group-version-kind$"group: "" -kind: Event -version: v1 -j* -x-kubernetes-actiondeletecollection -J` -^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO -MKIquery-If 'true', then the output is pretty printed."pretty2string Õ) -I/apis/certificates.k8s.io/v1beta1/watch/certificatesigningrequests/{name}‡)³ -certificates_v1beta1Àwatch changes to an object of kind CertificateSigningRequest. deprecated: use the 'watch' parameter with a list operation instead, filtered to a single item with the 'fieldSelector' parameter.*1watchCertificatesV1beta1CertificateSigningRequest2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jk -P -200I -G -OKA -? -=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent - -401 - - UnauthorizedRhttpsjq -x-kubernetes-group-version-kindNLkind: CertificateSigningRequest -version: v1beta1 -group: certificates.k8s.io -j -x-kubernetes-actionwatch -J‚ -ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer JF -DB"@path%name of the CertificateSigningRequest"name*string˜JO -MKIquery-If 'true', then the output is pretty printed."pretty2string Jû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string JÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Jž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± -®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean ï] -:/apis/coordination.k8s.io/v1/namespaces/{namespace}/leases°]“& -coordination_v1#list or watch objects of kind Lease*!listCoordinationV1NamespacedLease2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*B‚ -ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Bï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string B‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string B‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Bú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer Bû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string BÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Bž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer B± -®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean J` -E -200> -< -OK6 -4 -2#/definitions/io.k8s.api.coordination.v1.LeaseList - -401 - - UnauthorizedRhttpsjX -x-kubernetes-group-version-kind53group: coordination.k8s.io -kind: Lease -version: v1 -j -x-kubernetes-actionlist -"… -coordination_v1create a Lease*#createCoordinationV1NamespacedLease2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BD -B -@bodybody *0 -.#/definitions/io.k8s.api.coordination.v1.LeaseBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– -“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string Jí -G -202@ -> -Accepted2 -0 -.#/definitions/io.k8s.api.coordination.v1.Lease - -401 - - Unauthorized -A -200: -8 -OK2 -0 -.#/definitions/io.k8s.api.coordination.v1.Lease -F -201? -= -Created2 -0 -.#/definitions/io.k8s.api.coordination.v1.LeaseRhttpsj -x-kubernetes-actionpost -jX -x-kubernetes-group-version-kind53group: coordination.k8s.io -kind: Lease -version: v1 -*Ü, -coordination_v1delete collection of Lease*-deleteCoordinationV1CollectionNamespacedLease2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BT -R -Pbodybody*B -@#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptionsBï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string Bž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string Bä -áÞÛquery±The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately."gracePeriodSeconds2integer B‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Bú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer BÑ -ÎËÈquery Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the "orphan" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both."orphanDependents2boolean Bˆ -…‚ÿquery×Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground."propagationPolicy2string Bû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string BÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Bž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer Jg -L -200E -C -OK= -; -9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status - -401 - - UnauthorizedRhttpsj* -x-kubernetes-actiondeletecollection -jX -x-kubernetes-group-version-kind53version: v1 -group: coordination.k8s.io -kind: Lease -J` -^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO -MKIquery-If 'true', then the output is pretty printed."pretty2string Ó( -5/apis/apps/v1/watch/namespaces/{namespace}/daemonsets™(« -apps_v1uwatch individual changes to a list of DaemonSet. deprecated: use the 'watch' parameter with a list operation instead.*"watchAppsV1NamespacedDaemonSetList2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jk -P -200I -G -OKA -? -=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent - -401 - - UnauthorizedRhttpsj# -x-kubernetes-action  -watchlist -jM -x-kubernetes-group-version-kind*(group: apps -kind: DaemonSet -version: v1 -J‚ -ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer J` -^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO -MKIquery-If 'true', then the output is pretty printed."pretty2string Jû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string JÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Jž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± -®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean Ú& -/apis/batch/v1/jobsÂ&¶ -batch_v1!list or watch objects of kind Job*listBatchV1JobForAllNamespaces2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*JW -< -2005 -3 -OK- -+ -)#/definitions/io.k8s.api.batch.v1.JobList - -401 - - UnauthorizedRhttpsj -x-kubernetes-actionlist -jH -x-kubernetes-group-version-kind%#version: v1 -group: batch -kind: Job -J‚ -ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer JO -MKIquery-If 'true', then the output is pretty printed."pretty2string Jû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string JÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Jž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± -®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean ©0 -'/api/v1/namespaces/{namespace}/servicesý/ò% -core_v1%list or watch objects of kind Service*listCoreV1NamespacedService2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*B‚ -ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Bï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string B‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string B‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Bú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer Bû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string BÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Bž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer B± -®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean JZ -? -2008 -6 -OK0 -. -,#/definitions/io.k8s.api.core.v1.ServiceList - -401 - - UnauthorizedRhttpsj -x-kubernetes-actionlist -jI -x-kubernetes-group-version-kind&$group: "" -kind: Service -version: v1 -"Ò -core_v1create a Service*createCoreV1NamespacedService2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*B> -< -:bodybody ** -(#/definitions/io.k8s.api.core.v1.ServiceBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– -“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string JÛ -; -2004 -2 -OK, -* -(#/definitions/io.k8s.api.core.v1.Service -@ -2019 -7 -Created, -* -(#/definitions/io.k8s.api.core.v1.Service -A -202: -8 -Accepted, -* -(#/definitions/io.k8s.api.core.v1.Service - -401 - - UnauthorizedRhttpsj -x-kubernetes-actionpost -jI -x-kubernetes-group-version-kind&$version: v1 -group: "" -kind: Service -J` -^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO -MKIquery-If 'true', then the output is pretty printed."pretty2string Ý -/apis/batch/v1/ÉÆ -batch_v1get available resources*getBatchV1APIResources2application/json2application/yaml2#application/vnd.kubernetes.protobuf:application/json:application/yaml:#application/vnd.kubernetes.protobufJp -U -200N -L -OKF -D -B#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.APIResourceList - -401 - - UnauthorizedRhttpsã( -9/apis/batch/v1beta1/watch/namespaces/{namespace}/cronjobs¥(· - batch_v1beta1swatch individual changes to a list of CronJob. deprecated: use the 'watch' parameter with a list operation instead.*&watchBatchV1beta1NamespacedCronJobList2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jk -P -200I -G -OKA -? -=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent - -401 - - UnauthorizedRhttpsjQ -x-kubernetes-group-version-kind.,kind: CronJob -version: v1beta1 -group: batch -j# -x-kubernetes-action  -watchlist -J‚ -ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer J` -^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO -MKIquery-If 'true', then the output is pretty printed."pretty2string Jû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string JÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Jž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± -®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean ( -./apis/rbac.authorization.k8s.io/v1/watch/rolesÝ'Ñ -rbacAuthorization_v1pwatch individual changes to a list of Role. deprecated: use the 'watch' parameter with a list operation instead.*0watchRbacAuthorizationV1RoleListForAllNamespaces2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jk - -401 - - Unauthorized -P -200I -G -OKA -? -=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEventRhttpsj# -x-kubernetes-action  -watchlist -j] -x-kubernetes-group-version-kind:8group: rbac.authorization.k8s.io -kind: Role -version: v1 -J‚ -ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer JO -MKIquery-If 'true', then the output is pretty printed."pretty2string Jû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string JÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Jž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± -®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean š] -*/api/v1/namespaces/{namespace}/limitrangesë\þ% -core_v1(list or watch objects of kind LimitRange*listCoreV1NamespacedLimitRange2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*B‚ -ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Bï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string B‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string B‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Bú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer Bû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string BÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Bž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer B± -®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean J] - -401 - - Unauthorized -B -200; -9 -OK3 -1 -/#/definitions/io.k8s.api.core.v1.LimitRangeListRhttpsj -x-kubernetes-actionlist -jL -x-kubernetes-group-version-kind)'group: "" -kind: LimitRange -version: v1 -"ç -core_v1create a LimitRange* createCoreV1NamespacedLimitRange2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BA -? -=bodybody *- -+#/definitions/io.k8s.api.core.v1.LimitRangeBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– -“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string Jä -> -2007 -5 -OK/ -- -+#/definitions/io.k8s.api.core.v1.LimitRange -C -201< -: -Created/ -- -+#/definitions/io.k8s.api.core.v1.LimitRange -D -202= -; -Accepted/ -- -+#/definitions/io.k8s.api.core.v1.LimitRange - -401 - - UnauthorizedRhttpsj -x-kubernetes-actionpost -jL -x-kubernetes-group-version-kind)'kind: LimitRange -version: v1 -group: "" -*Ê, -core_v1delete collection of LimitRange**deleteCoreV1CollectionNamespacedLimitRange2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BT -R -Pbodybody*B -@#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptionsBï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string Bž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string Bä -áÞÛquery±The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately."gracePeriodSeconds2integer B‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Bú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer BÑ -ÎËÈquery Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the "orphan" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both."orphanDependents2boolean Bˆ -…‚ÿquery×Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground."propagationPolicy2string Bû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string BÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Bž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer Jg -L -200E -C -OK= -; -9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status - -401 - - UnauthorizedRhttpsj* -x-kubernetes-actiondeletecollection -jL -x-kubernetes-group-version-kind)'group: "" -kind: LimitRange -version: v1 -J` -^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO -MKIquery-If 'true', then the output is pretty printed."pretty2string ñ+ -R/apis/rbac.authorization.k8s.io/v1beta1/namespaces/{namespace}/rolebindings/{name}š+® -rbacAuthorization_v1beta1read the specified RoleBinding*1readRbacAuthorizationV1beta1NamespacedRoleBinding2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*J_ -D -200= -; -OK5 -3 -1#/definitions/io.k8s.api.rbac.v1beta1.RoleBinding - -401 - - UnauthorizedRhttpsji -x-kubernetes-group-version-kindFDgroup: rbac.authorization.k8s.io -kind: RoleBinding -version: v1beta1 -j -x-kubernetes-actionget -ƒ -rbacAuthorization_v1beta1!replace the specified RoleBinding*4replaceRbacAuthorizationV1beta1NamespacedRoleBinding2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BG -E -Cbodybody *3 -1#/definitions/io.k8s.api.rbac.v1beta1.RoleBindingBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– -“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string Jª -D -200= -; -OK5 -3 -1#/definitions/io.k8s.api.rbac.v1beta1.RoleBinding -I -201B -@ -Created5 -3 -1#/definitions/io.k8s.api.rbac.v1beta1.RoleBinding - -401 - - UnauthorizedRhttpsj -x-kubernetes-actionput -ji -x-kubernetes-group-version-kindFDgroup: rbac.authorization.k8s.io -kind: RoleBinding -version: v1beta1 -*à -rbacAuthorization_v1beta1delete a RoleBinding*3deleteRbacAuthorizationV1beta1NamespacedRoleBinding2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BT -R -Pbodybody*B -@#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptionsBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string Bä -áÞÛquery±The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately."gracePeriodSeconds2integer BÑ -ÎËÈquery Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the "orphan" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both."orphanDependents2boolean Bˆ -…‚ÿquery×Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground."propagationPolicy2string J» -L -200E -C -OK= -; -9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status -R -202K -I -Accepted= -; -9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status - -401 - - UnauthorizedRhttpsj -x-kubernetes-action delete -ji -x-kubernetes-group-version-kindFDkind: RoleBinding -version: v1beta1 -group: rbac.authorization.k8s.io -B­ -rbacAuthorization_v1beta1*partially update the specified RoleBinding*2patchRbacAuthorizationV1beta1NamespacedRoleBinding2application/json2application/yaml2#application/vnd.kubernetes.protobuf:application/json-patch+json:application/merge-patch+json:&application/strategic-merge-patch+json:application/apply-patch+yamlBN -L -Jbodybody *: -8#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.PatchBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B¯ -¬©¦queryƒfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch)." fieldManager2string BÎ -ËÈÅquery¨Force is going to "force" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests."force2boolean J_ -D -200= -; -OK5 -3 -1#/definitions/io.k8s.api.rbac.v1beta1.RoleBinding - -401 - - UnauthorizedRhttpsji -x-kubernetes-group-version-kindFDkind: RoleBinding -version: v1beta1 -group: rbac.authorization.k8s.io -j -x-kubernetes-actionpatch -J8 -64"2pathname of the RoleBinding"name*string˜J` -^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO -MKIquery-If 'true', then the output is pretty printed."pretty2string ”] -'/apis/storage.k8s.io/v1beta1/csidriversè\™& -storage_v1beta1'list or watch objects of kind CSIDriver*listStorageV1beta1CSIDriver2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*B‚ -ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Bï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string B‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string B‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Bú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer Bû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string BÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Bž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer B± -®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean Jd -I -200B -@ -OK: -8 -6#/definitions/io.k8s.api.storage.v1beta1.CSIDriverList - -401 - - UnauthorizedRhttpsj\ -x-kubernetes-group-version-kind97group: storage.k8s.io -kind: CSIDriver -version: v1beta1 -j -x-kubernetes-actionlist -"— -storage_v1beta1create a CSIDriver*createStorageV1beta1CSIDriver2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BH -F -Dbodybody *4 -2#/definitions/io.k8s.api.storage.v1beta1.CSIDriverBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– -“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string Jù -E -200> -< -OK6 -4 -2#/definitions/io.k8s.api.storage.v1beta1.CSIDriver -J -201C -A -Created6 -4 -2#/definitions/io.k8s.api.storage.v1beta1.CSIDriver -K -202D -B -Accepted6 -4 -2#/definitions/io.k8s.api.storage.v1beta1.CSIDriver - -401 - - UnauthorizedRhttpsj\ -x-kubernetes-group-version-kind97group: storage.k8s.io -kind: CSIDriver -version: v1beta1 -j -x-kubernetes-actionpost -*Þ, -storage_v1beta1delete collection of CSIDriver*'deleteStorageV1beta1CollectionCSIDriver2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BT -R -Pbodybody*B -@#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptionsBï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string Bž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string Bä -áÞÛquery±The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately."gracePeriodSeconds2integer B‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Bú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer BÑ -ÎËÈquery Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the "orphan" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both."orphanDependents2boolean Bˆ -…‚ÿquery×Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground."propagationPolicy2string Bû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string BÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Bž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer Jg -L -200E -C -OK= -; -9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status - -401 - - UnauthorizedRhttpsj* -x-kubernetes-actiondeletecollection -j\ -x-kubernetes-group-version-kind97version: v1beta1 -group: storage.k8s.io -kind: CSIDriver -JO -MKIquery-If 'true', then the output is pretty printed."pretty2string ƒ( -//apis/networking.k8s.io/v1/watch/ingressclassesÏ'à - networking_v1xwatch individual changes to a list of IngressClass. deprecated: use the 'watch' parameter with a list operation instead.*!watchNetworkingV1IngressClassList2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jk - -401 - - Unauthorized -P -200I -G -OKA -? -=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEventRhttpsj] -x-kubernetes-group-version-kind:8group: networking.k8s.io -kind: IngressClass -version: v1 -j# -x-kubernetes-action  -watchlist -J‚ -ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer JO -MKIquery-If 'true', then the output is pretty printed."pretty2string Jû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string JÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Jž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± -®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean ô) -L/apis/coordination.k8s.io/v1beta1/watch/namespaces/{namespace}/leases/{name}£) -coordination_v1beta1¬watch changes to an object of kind Lease. deprecated: use the 'watch' parameter with a list operation instead, filtered to a single item with the 'fieldSelector' parameter.*'watchCoordinationV1beta1NamespacedLease2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jk - -401 - - Unauthorized -P -200I -G -OKA -? -=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEventRhttpsj -x-kubernetes-actionwatch -j] -x-kubernetes-group-version-kind:8kind: Lease -version: v1beta1 -group: coordination.k8s.io -J‚ -ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer J2 -0.",pathname of the Lease"name*string˜J` -^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO -MKIquery-If 'true', then the output is pretty printed."pretty2string Jû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string JÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Jž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± -®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean ‚- -P/apis/autoscaling/v2beta2/namespaces/{namespace}/horizontalpodautoscalers/{name}­,Ë -autoscaling_v2beta2*read the specified HorizontalPodAutoscaler*7readAutoscalingV2beta2NamespacedHorizontalPodAutoscaler2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*Jr - -401 - - Unauthorized -W -200P -N -OKH -F -D#/definitions/io.k8s.api.autoscaling.v2beta2.HorizontalPodAutoscalerRhttpsj -x-kubernetes-actionget -jg -x-kubernetes-group-version-kindDBgroup: autoscaling -kind: HorizontalPodAutoscaler -version: v2beta2 -Æ -autoscaling_v2beta2-replace the specified HorizontalPodAutoscaler*:replaceAutoscalingV2beta2NamespacedHorizontalPodAutoscaler2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BZ -X -Vbodybody *F -D#/definitions/io.k8s.api.autoscaling.v2beta2.HorizontalPodAutoscalerBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– -“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string JÐ -W -200P -N -OKH -F -D#/definitions/io.k8s.api.autoscaling.v2beta2.HorizontalPodAutoscaler -\ -201U -S -CreatedH -F -D#/definitions/io.k8s.api.autoscaling.v2beta2.HorizontalPodAutoscaler - -401 - - UnauthorizedRhttpsj -x-kubernetes-actionput -jg -x-kubernetes-group-version-kindDBgroup: autoscaling -kind: HorizontalPodAutoscaler -version: v2beta2 -*Í -autoscaling_v2beta2 delete a HorizontalPodAutoscaler*9deleteAutoscalingV2beta2NamespacedHorizontalPodAutoscaler2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BT -R -Pbodybody*B -@#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptionsBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string Bä -áÞÛquery±The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately."gracePeriodSeconds2integer BÑ -ÎËÈquery Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the "orphan" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both."orphanDependents2boolean Bˆ -…‚ÿquery×Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground."propagationPolicy2string J» -L -200E -C -OK= -; -9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status -R -202K -I -Accepted= -; -9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status - -401 - - UnauthorizedRhttpsj -x-kubernetes-action delete -jg -x-kubernetes-group-version-kindDBkind: HorizontalPodAutoscaler -version: v2beta2 -group: autoscaling -BÊ -autoscaling_v2beta26partially update the specified HorizontalPodAutoscaler*8patchAutoscalingV2beta2NamespacedHorizontalPodAutoscaler2application/json2application/yaml2#application/vnd.kubernetes.protobuf:application/json-patch+json:application/merge-patch+json:&application/strategic-merge-patch+json:application/apply-patch+yamlBN -L -Jbodybody *: -8#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.PatchBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B¯ -¬©¦queryƒfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch)." fieldManager2string BÎ -ËÈÅquery¨Force is going to "force" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests."force2boolean Jr -W -200P -N -OKH -F -D#/definitions/io.k8s.api.autoscaling.v2beta2.HorizontalPodAutoscaler - -401 - - UnauthorizedRhttpsjg -x-kubernetes-group-version-kindDBgroup: autoscaling -kind: HorizontalPodAutoscaler -version: v2beta2 -j -x-kubernetes-actionpatch -JD -B@">path#name of the HorizontalPodAutoscaler"name*string˜J` -^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO -MKIquery-If 'true', then the output is pretty printed."pretty2string ò( -A/apis/networking.k8s.io/v1/watch/namespaces/{namespace}/ingresses¬(¾ - networking_v1swatch individual changes to a list of Ingress. deprecated: use the 'watch' parameter with a list operation instead.*&watchNetworkingV1NamespacedIngressList2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jk -P -200I -G -OKA -? -=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent - -401 - - UnauthorizedRhttpsj# -x-kubernetes-action  -watchlist -jX -x-kubernetes-group-version-kind53version: v1 -group: networking.k8s.io -kind: Ingress -J‚ -ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer J` -^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO -MKIquery-If 'true', then the output is pretty printed."pretty2string Jû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string JÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Jž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± -®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean Î- -C/apis/apiextensions.k8s.io/v1beta1/customresourcedefinitions/{name}†-ò -apiextensions_v1beta1+read the specified CustomResourceDefinition*0readApiextensionsV1beta1CustomResourceDefinition2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*J’ -w -200p -n -OKh -f -d#/definitions/io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1beta1.CustomResourceDefinition - -401 - - UnauthorizedRhttpsjq -x-kubernetes-group-version-kindNLkind: CustomResourceDefinition -version: v1beta1 -group: apiextensions.k8s.io -j -x-kubernetes-actionget -¬ - -apiextensions_v1beta1.replace the specified CustomResourceDefinition*3replaceApiextensionsV1beta1CustomResourceDefinition2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*Bz -x -vbodybody *f -d#/definitions/io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1beta1.CustomResourceDefinitionBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– -“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string J -w -200p -n -OKh -f -d#/definitions/io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1beta1.CustomResourceDefinition -| -201u -s -Createdh -f -d#/definitions/io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1beta1.CustomResourceDefinition - -401 - - UnauthorizedRhttpsj -x-kubernetes-actionput -jq -x-kubernetes-group-version-kindNLgroup: apiextensions.k8s.io -kind: CustomResourceDefinition -version: v1beta1 -*Ó -apiextensions_v1beta1!delete a CustomResourceDefinition*2deleteApiextensionsV1beta1CustomResourceDefinition2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BT -R -Pbodybody*B -@#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptionsBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string Bä -áÞÛquery±The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately."gracePeriodSeconds2integer BÑ -ÎËÈquery Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the "orphan" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both."orphanDependents2boolean Bˆ -…‚ÿquery×Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground."propagationPolicy2string J» -L -200E -C -OK= -; -9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status -R -202K -I -Accepted= -; -9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status - -401 - - UnauthorizedRhttpsj -x-kubernetes-action delete -jq -x-kubernetes-group-version-kindNLversion: v1beta1 -group: apiextensions.k8s.io -kind: CustomResourceDefinition -Bñ -apiextensions_v1beta17partially update the specified CustomResourceDefinition*1patchApiextensionsV1beta1CustomResourceDefinition2application/json2application/yaml2#application/vnd.kubernetes.protobuf:application/json-patch+json:application/merge-patch+json:&application/strategic-merge-patch+json:application/apply-patch+yamlBN -L -Jbodybody *: -8#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.PatchBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B¯ -¬©¦queryƒfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch)." fieldManager2string BÎ -ËÈÅquery¨Force is going to "force" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests."force2boolean J’ -w -200p -n -OKh -f -d#/definitions/io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1beta1.CustomResourceDefinition - -401 - - UnauthorizedRhttpsj -x-kubernetes-actionpatch -jq -x-kubernetes-group-version-kindNLgroup: apiextensions.k8s.io -kind: CustomResourceDefinition -version: v1beta1 -JE -CA"?path$name of the CustomResourceDefinition"name*string˜JO -MKIquery-If 'true', then the output is pretty printed."pretty2string ×' -/api/v1/watch/endpoints»'¯ -core_v1uwatch individual changes to a list of Endpoints. deprecated: use the 'watch' parameter with a list operation instead.*(watchCoreV1EndpointsListForAllNamespaces2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jk - -401 - - Unauthorized -P -200I -G -OKA -? -=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEventRhttpsj# -x-kubernetes-action  -watchlist -jK -x-kubernetes-group-version-kind(&group: "" -kind: Endpoints -version: v1 -J‚ -ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer JO -MKIquery-If 'true', then the output is pretty printed."pretty2string Jû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string JÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Jž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± -®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean ª) -2/api/v1/watch/namespaces/{namespace}/events/{name}ó(Ñ -core_v1¬watch changes to an object of kind Event. deprecated: use the 'watch' parameter with a list operation instead, filtered to a single item with the 'fieldSelector' parameter.*watchCoreV1NamespacedEvent2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jk -P -200I -G -OKA -? -=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent - -401 - - UnauthorizedRhttpsj -x-kubernetes-actionwatch -jG -x-kubernetes-group-version-kind$"kind: Event -version: v1 -group: "" -J‚ -ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer J2 -0.",pathname of the Event"name*string˜J` -^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO -MKIquery-If 'true', then the output is pretty printed."pretty2string Jû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string JÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Jž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± -®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean ü( -;/api/v1/watch/namespaces/{namespace}/persistentvolumeclaims¼(Î -core_v1watch individual changes to a list of PersistentVolumeClaim. deprecated: use the 'watch' parameter with a list operation instead.*.watchCoreV1NamespacedPersistentVolumeClaimList2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jk -P -200I -G -OKA -? -=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent - -401 - - UnauthorizedRhttpsj# -x-kubernetes-action  -watchlist -jW -x-kubernetes-group-version-kind42group: "" -kind: PersistentVolumeClaim -version: v1 -J‚ -ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer J` -^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO -MKIquery-If 'true', then the output is pretty printed."pretty2string Jû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string JÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Jž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± -®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean ü( -;/api/v1/watch/namespaces/{namespace}/replicationcontrollers¼(Î -core_v1watch individual changes to a list of ReplicationController. deprecated: use the 'watch' parameter with a list operation instead.*.watchCoreV1NamespacedReplicationControllerList2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jk -P -200I -G -OKA -? -=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent - -401 - - UnauthorizedRhttpsj# -x-kubernetes-action  -watchlist -jW -x-kubernetes-group-version-kind42group: "" -kind: ReplicationController -version: v1 -J‚ -ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer J` -^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO -MKIquery-If 'true', then the output is pretty printed."pretty2string Jû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string JÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Jž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± -®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean ‹^ -4/apis/rbac.authorization.k8s.io/v1beta1/clusterrolesÒ]½& -rbacAuthorization_v1beta1)list or watch objects of kind ClusterRole*'listRbacAuthorizationV1beta1ClusterRole2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*B‚ -ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Bï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string B‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string B‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Bú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer Bû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string BÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Bž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer B± -®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean Jc -H -200A -? -OK9 -7 -5#/definitions/io.k8s.api.rbac.v1beta1.ClusterRoleList - -401 - - UnauthorizedRhttpsj -x-kubernetes-actionlist -ji -x-kubernetes-group-version-kindFDgroup: rbac.authorization.k8s.io -kind: ClusterRole -version: v1beta1 -"¸ -rbacAuthorization_v1beta1create a ClusterRole*)createRbacAuthorizationV1beta1ClusterRole2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BG -E -Cbodybody *3 -1#/definitions/io.k8s.api.rbac.v1beta1.ClusterRoleBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– -“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string Jö -D -200= -; -OK5 -3 -1#/definitions/io.k8s.api.rbac.v1beta1.ClusterRole -I -201B -@ -Created5 -3 -1#/definitions/io.k8s.api.rbac.v1beta1.ClusterRole -J -202C -A -Accepted5 -3 -1#/definitions/io.k8s.api.rbac.v1beta1.ClusterRole - -401 - - UnauthorizedRhttpsj -x-kubernetes-actionpost -ji -x-kubernetes-group-version-kindFDgroup: rbac.authorization.k8s.io -kind: ClusterRole -version: v1beta1 -*ƒ- -rbacAuthorization_v1beta1 delete collection of ClusterRole*3deleteRbacAuthorizationV1beta1CollectionClusterRole2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BT -R -Pbodybody*B -@#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptionsBï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string Bž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string Bä -áÞÛquery±The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately."gracePeriodSeconds2integer B‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Bú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer BÑ -ÎËÈquery Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the "orphan" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both."orphanDependents2boolean Bˆ -…‚ÿquery×Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground."propagationPolicy2string Bû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string BÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Bž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer Jg -L -200E -C -OK= -; -9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status - -401 - - UnauthorizedRhttpsj* -x-kubernetes-actiondeletecollection -ji -x-kubernetes-group-version-kindFDgroup: rbac.authorization.k8s.io -kind: ClusterRole -version: v1beta1 -JO -MKIquery-If 'true', then the output is pretty printed."pretty2string ×& -/api/v1/eventsÄ&¸ -core_v1#list or watch objects of kind Event*listCoreV1EventForAllNamespaces2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*JX -= -2006 -4 -OK. -, -*#/definitions/io.k8s.api.core.v1.EventList - -401 - - UnauthorizedRhttpsj -x-kubernetes-actionlist -jG -x-kubernetes-group-version-kind$"group: "" -kind: Event -version: v1 -J‚ -ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer JO -MKIquery-If 'true', then the output is pretty printed."pretty2string Jû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string JÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Jž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± -®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean é -=/apis/apps/v1/namespaces/{namespace}/replicasets/{name}/scale§ù -apps_v1&read scale of the specified ReplicaSet*#readAppsV1NamespacedReplicaSetScale2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*J[ -@ -2009 -7 -OK1 -/ --#/definitions/io.k8s.api.autoscaling.v1.Scale - -401 - - UnauthorizedRhttpsj -x-kubernetes-actionget -jP -x-kubernetes-group-version-kind-+version: v1 -group: autoscaling -kind: Scale -Æ -apps_v1)replace scale of the specified ReplicaSet*&replaceAppsV1NamespacedReplicaSetScale2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BC -A -?bodybody */ --#/definitions/io.k8s.api.autoscaling.v1.ScaleBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– -“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string J¢ -@ -2009 -7 -OK1 -/ --#/definitions/io.k8s.api.autoscaling.v1.Scale -E -201> -< -Created1 -/ --#/definitions/io.k8s.api.autoscaling.v1.Scale - -401 - - UnauthorizedRhttpsjP -x-kubernetes-group-version-kind-+group: autoscaling -kind: Scale -version: v1 -j -x-kubernetes-actionput -Bø -apps_v12partially update scale of the specified ReplicaSet*$patchAppsV1NamespacedReplicaSetScale2application/json2application/yaml2#application/vnd.kubernetes.protobuf:application/json-patch+json:application/merge-patch+json:&application/strategic-merge-patch+json:application/apply-patch+yamlBN -L -Jbodybody *: -8#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.PatchBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B¯ -¬©¦queryƒfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch)." fieldManager2string BÎ -ËÈÅquery¨Force is going to "force" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests."force2boolean J[ -@ -2009 -7 -OK1 -/ --#/definitions/io.k8s.api.autoscaling.v1.Scale - -401 - - UnauthorizedRhttpsjP -x-kubernetes-group-version-kind-+group: autoscaling -kind: Scale -version: v1 -j -x-kubernetes-actionpatch -J2 -0.",pathname of the Scale"name*string˜J` -^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO -MKIquery-If 'true', then the output is pretty printed."pretty2string Ð) ->/apis/apps/v1/watch/namespaces/{namespace}/statefulsets/{name})å -apps_v1²watch changes to an object of kind StatefulSet. deprecated: use the 'watch' parameter with a list operation instead, filtered to a single item with the 'fieldSelector' parameter.* watchAppsV1NamespacedStatefulSet2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jk -P -200I -G -OKA -? -=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent - -401 - - UnauthorizedRhttpsj -x-kubernetes-actionwatch -jO -x-kubernetes-group-version-kind,*group: apps -kind: StatefulSet -version: v1 -J‚ -ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer J8 -64"2pathname of the StatefulSet"name*string˜J` -^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO -MKIquery-If 'true', then the output is pretty printed."pretty2string Jû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string JÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Jž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± -®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean ø& -/apis/apps/v1/deploymentsÚ&Î -apps_v1(list or watch objects of kind Deployment*$listAppsV1DeploymentForAllNamespaces2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*J] -B -200; -9 -OK3 -1 -/#/definitions/io.k8s.api.apps.v1.DeploymentList - -401 - - UnauthorizedRhttpsj -x-kubernetes-actionlist -jN -x-kubernetes-group-version-kind+)kind: Deployment -version: v1 -group: apps -J‚ -ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer JO -MKIquery-If 'true', then the output is pretty printed."pretty2string Jû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string JÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Jž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± -®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean Ð' -/api/v1/watch/servicesµ'© -core_v1swatch individual changes to a list of Service. deprecated: use the 'watch' parameter with a list operation instead.*&watchCoreV1ServiceListForAllNamespaces2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jk -P -200I -G -OKA -? -=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent - -401 - - UnauthorizedRhttpsjI -x-kubernetes-group-version-kind&$group: "" -kind: Service -version: v1 -j# -x-kubernetes-action  -watchlist -J‚ -ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer JO -MKIquery-If 'true', then the output is pretty printed."pretty2string Jû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string JÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Jž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± -®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean ž^ -8/apis/apps/v1/namespaces/{namespace}/controllerrevisionsá] & -apps_v10list or watch objects of kind ControllerRevision*&listAppsV1NamespacedControllerRevision2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*B‚ -ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Bï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string B‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string B‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Bú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer Bû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string BÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Bž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer B± -®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean Je -J -200C -A -OK; -9 -7#/definitions/io.k8s.api.apps.v1.ControllerRevisionList - -401 - - UnauthorizedRhttpsj -x-kubernetes-actionlist -jV -x-kubernetes-group-version-kind31group: apps -kind: ControllerRevision -version: v1 -"¡ -apps_v1create a ControllerRevision*(createAppsV1NamespacedControllerRevision2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BI -G -Ebodybody *5 -3#/definitions/io.k8s.api.apps.v1.ControllerRevisionBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– -“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string Jü - -401 - - Unauthorized -F -200? -= -OK7 -5 -3#/definitions/io.k8s.api.apps.v1.ControllerRevision -K -201D -B -Created7 -5 -3#/definitions/io.k8s.api.apps.v1.ControllerRevision -L -202E -C -Accepted7 -5 -3#/definitions/io.k8s.api.apps.v1.ControllerRevisionRhttpsj -x-kubernetes-actionpost -jV -x-kubernetes-group-version-kind31group: apps -kind: ControllerRevision -version: v1 -*ä, -apps_v1'delete collection of ControllerRevision*2deleteAppsV1CollectionNamespacedControllerRevision2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BT -R -Pbodybody*B -@#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptionsBï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string Bž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string Bä -áÞÛquery±The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately."gracePeriodSeconds2integer B‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Bú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer BÑ -ÎËÈquery Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the "orphan" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both."orphanDependents2boolean Bˆ -…‚ÿquery×Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground."propagationPolicy2string Bû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string BÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Bž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer Jg -L -200E -C -OK= -; -9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status - -401 - - UnauthorizedRhttpsjV -x-kubernetes-group-version-kind31group: apps -kind: ControllerRevision -version: v1 -j* -x-kubernetes-actiondeletecollection -J` -^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO -MKIquery-If 'true', then the output is pretty printed."pretty2string Ü' -/apis/batch/v1/watch/cronjobsº'® -batch_v1swatch individual changes to a list of CronJob. deprecated: use the 'watch' parameter with a list operation instead.*'watchBatchV1CronJobListForAllNamespaces2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jk -P -200I -G -OKA -? -=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent - -401 - - UnauthorizedRhttpsjL -x-kubernetes-group-version-kind)'group: batch -kind: CronJob -version: v1 -j# -x-kubernetes-action  -watchlist -J‚ -ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer JO -MKIquery-If 'true', then the output is pretty printed."pretty2string Jû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string JÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Jž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± -®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean ü -D/apis/flowcontrol.apiserver.k8s.io/v1beta1/flowschemas/{name}/status³À -flowcontrolApiserver_v1beta1'read status of the specified FlowSchema*/readFlowcontrolApiserverV1beta1FlowSchemaStatus2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*Je -J -200C -A -OK; -9 -7#/definitions/io.k8s.api.flowcontrol.v1beta1.FlowSchema - -401 - - UnauthorizedRhttpsjk -x-kubernetes-group-version-kindHFgroup: flowcontrol.apiserver.k8s.io -kind: FlowSchema -version: v1beta1 -j -x-kubernetes-actionget -¡ -flowcontrolApiserver_v1beta1*replace status of the specified FlowSchema*2replaceFlowcontrolApiserverV1beta1FlowSchemaStatus2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BM -K -Ibodybody *9 -7#/definitions/io.k8s.api.flowcontrol.v1beta1.FlowSchemaBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– -“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string J¶ -J -200C -A -OK; -9 -7#/definitions/io.k8s.api.flowcontrol.v1beta1.FlowSchema -O -201H -F -Created; -9 -7#/definitions/io.k8s.api.flowcontrol.v1beta1.FlowSchema - -401 - - UnauthorizedRhttpsj -x-kubernetes-actionput -jk -x-kubernetes-group-version-kindHFgroup: flowcontrol.apiserver.k8s.io -kind: FlowSchema -version: v1beta1 -B¿ -flowcontrolApiserver_v1beta13partially update status of the specified FlowSchema*0patchFlowcontrolApiserverV1beta1FlowSchemaStatus2application/json2application/yaml2#application/vnd.kubernetes.protobuf:application/json-patch+json:application/merge-patch+json:&application/strategic-merge-patch+json:application/apply-patch+yamlBN -L -Jbodybody *: -8#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.PatchBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B¯ -¬©¦queryƒfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch)." fieldManager2string BÎ -ËÈÅquery¨Force is going to "force" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests."force2boolean Je -J -200C -A -OK; -9 -7#/definitions/io.k8s.api.flowcontrol.v1beta1.FlowSchema - -401 - - UnauthorizedRhttpsj -x-kubernetes-actionpatch -jk -x-kubernetes-group-version-kindHFgroup: flowcontrol.apiserver.k8s.io -kind: FlowSchema -version: v1beta1 -J7 -53"1pathname of the FlowSchema"name*string˜JO -MKIquery-If 'true', then the output is pretty printed."pretty2string ) -8/apis/storage.k8s.io/v1beta1/watch/storageclasses/{name}Ä(ý -storage_v1beta1³watch changes to an object of kind StorageClass. deprecated: use the 'watch' parameter with a list operation instead, filtered to a single item with the 'fieldSelector' parameter.*watchStorageV1beta1StorageClass2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jk -P -200I -G -OKA -? -=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent - -401 - - UnauthorizedRhttpsj_ -x-kubernetes-group-version-kind<:group: storage.k8s.io -kind: StorageClass -version: v1beta1 -j -x-kubernetes-actionwatch -J‚ -ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer J9 -75"3pathname of the StorageClass"name*string˜JO -MKIquery-If 'true', then the output is pretty printed."pretty2string Jû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string JÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Jž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± -®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean À' -/api/v1/watch/pods©' -core_v1owatch individual changes to a list of Pod. deprecated: use the 'watch' parameter with a list operation instead.*"watchCoreV1PodListForAllNamespaces2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jk -P -200I -G -OKA -? -=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent - -401 - - UnauthorizedRhttpsjE -x-kubernetes-group-version-kind" group: "" -kind: Pod -version: v1 -j# -x-kubernetes-action  -watchlist -J‚ -ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer JO -MKIquery-If 'true', then the output is pretty printed."pretty2string Jû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string JÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Jž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± -®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean ­ -1/api/v1/namespaces/{namespace}/pods/{name}/attach÷ – -core_v1%connect GET requests to attach of Pod*#connectCoreV1GetNamespacedPodAttach2*/*:*/*J7 - -200 - -OK - ² -string - -401 - - UnauthorizedRhttpsj! -x-kubernetes-action -connect -jR -x-kubernetes-group-version-kind/-group: "" -kind: PodAttachOptions -version: v1 -"˜ -core_v1&connect POST requests to attach of Pod*$connectCoreV1PostNamespacedPodAttach2*/*:*/*J7 - -401 - - Unauthorized - -200 - -OK - ² -stringRhttpsj! -x-kubernetes-action -connect -jR -x-kubernetes-group-version-kind/-group: "" -kind: PodAttachOptions -version: v1 -Jœ -™–“querytThe container in which to execute the command. Defaults to only container if there is only one container in the pod." container2string J= -;9"7pathname of the PodAttachOptions"name*string˜J` -^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜Jƒ -€~|query_Stderr if true indicates that stderr is to be redirected for the attach call. Defaults to true."stderr2boolean J -}{query_Stdin if true, redirects the standard input stream of the pod for this call. Defaults to false."stdin2boolean Jƒ -€~|query_Stdout if true indicates that stdout is to be redirected for the attach call. Defaults to true."stdout2boolean Jð -íêçqueryÌTTY if true indicates that a tty will be allocated for the attach call. This is passed through the container runtime so the tty is allocated on the worker node by the container runtime. Defaults to false."tty2boolean „ -#/apis/admissionregistration.k8s.io/ÜÙ -admissionregistrationget information of a group* getAdmissionregistrationAPIGroup2application/json2application/yaml2#application/vnd.kubernetes.protobuf:application/json:application/yaml:#application/vnd.kubernetes.protobufJi -N -200G -E -OK? -= -;#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.APIGroup - -401 - - UnauthorizedRhttps³) -7/apis/apps/v1/namespaces/{namespace}/deployments/{name}÷(ç -apps_v1read the specified Deployment*readAppsV1NamespacedDeployment2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*JY - -401 - - Unauthorized -> -2007 -5 -OK/ -- -+#/definitions/io.k8s.api.apps.v1.DeploymentRhttpsj -x-kubernetes-actionget -jN -x-kubernetes-group-version-kind+)group: apps -kind: Deployment -version: v1 -° -apps_v1 replace the specified Deployment*!replaceAppsV1NamespacedDeployment2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BA -? -=bodybody *- -+#/definitions/io.k8s.api.apps.v1.DeploymentBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– -“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string Jž -> -2007 -5 -OK/ -- -+#/definitions/io.k8s.api.apps.v1.Deployment -C -201< -: -Created/ -- -+#/definitions/io.k8s.api.apps.v1.Deployment - -401 - - UnauthorizedRhttpsjN -x-kubernetes-group-version-kind+)group: apps -kind: Deployment -version: v1 -j -x-kubernetes-actionput -*‚ -apps_v1delete a Deployment* deleteAppsV1NamespacedDeployment2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BT -R -Pbodybody*B -@#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptionsBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string Bä -áÞÛquery±The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately."gracePeriodSeconds2integer BÑ -ÎËÈquery Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the "orphan" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both."orphanDependents2boolean Bˆ -…‚ÿquery×Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground."propagationPolicy2string J» -L -200E -C -OK= -; -9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status -R -202K -I -Accepted= -; -9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status - -401 - - UnauthorizedRhttpsj -x-kubernetes-action delete -jN -x-kubernetes-group-version-kind+)version: v1 -group: apps -kind: Deployment -Bæ -apps_v1)partially update the specified Deployment*patchAppsV1NamespacedDeployment2application/json2application/yaml2#application/vnd.kubernetes.protobuf:application/json-patch+json:application/merge-patch+json:&application/strategic-merge-patch+json:application/apply-patch+yamlBN -L -Jbodybody *: -8#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.PatchBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B¯ -¬©¦queryƒfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch)." fieldManager2string BÎ -ËÈÅquery¨Force is going to "force" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests."force2boolean JY -> -2007 -5 -OK/ -- -+#/definitions/io.k8s.api.apps.v1.Deployment - -401 - - UnauthorizedRhttpsj -x-kubernetes-actionpatch -jN -x-kubernetes-group-version-kind+)group: apps -kind: Deployment -version: v1 -J7 -53"1pathname of the Deployment"name*string˜J` -^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO -MKIquery-If 'true', then the output is pretty printed."pretty2string ÿ -/apis/authentication.k8s.io/v1/ÛØ -authentication_v1get available resources*getAuthenticationV1APIResources2application/json2application/yaml2#application/vnd.kubernetes.protobuf:application/json:application/yaml:#application/vnd.kubernetes.protobufJp -U -200N -L -OKF -D -B#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.APIResourceList - -401 - - UnauthorizedRhttps¶) -//apis/node.k8s.io/v1beta1/runtimeclasses/{name}‚)€ - node_v1beta1read the specified RuntimeClass*readNodeV1beta1RuntimeClass2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*J` -E -200> -< -OK6 -4 -2#/definitions/io.k8s.api.node.v1beta1.RuntimeClass - -401 - - UnauthorizedRhttpsj -x-kubernetes-actionget -j\ -x-kubernetes-group-version-kind97group: node.k8s.io -kind: RuntimeClass -version: v1beta1 -× - node_v1beta1"replace the specified RuntimeClass*replaceNodeV1beta1RuntimeClass2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BH -F -Dbodybody *4 -2#/definitions/io.k8s.api.node.v1beta1.RuntimeClassBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– -“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string J¬ -E -200> -< -OK6 -4 -2#/definitions/io.k8s.api.node.v1beta1.RuntimeClass -J -201C -A -Created6 -4 -2#/definitions/io.k8s.api.node.v1beta1.RuntimeClass - -401 - - UnauthorizedRhttpsj -x-kubernetes-actionput -j\ -x-kubernetes-group-version-kind97version: v1beta1 -group: node.k8s.io -kind: RuntimeClass -*” - node_v1beta1delete a RuntimeClass*deleteNodeV1beta1RuntimeClass2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BT -R -Pbodybody*B -@#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptionsBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string Bä -áÞÛquery±The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately."gracePeriodSeconds2integer BÑ -ÎËÈquery Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the "orphan" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both."orphanDependents2boolean Bˆ -…‚ÿquery×Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground."propagationPolicy2string J» -L -200E -C -OK= -; -9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status -R -202K -I -Accepted= -; -9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status - -401 - - UnauthorizedRhttpsj -x-kubernetes-action delete -j\ -x-kubernetes-group-version-kind97kind: RuntimeClass -version: v1beta1 -group: node.k8s.io -Bÿ - node_v1beta1+partially update the specified RuntimeClass*patchNodeV1beta1RuntimeClass2application/json2application/yaml2#application/vnd.kubernetes.protobuf:application/json-patch+json:application/merge-patch+json:&application/strategic-merge-patch+json:application/apply-patch+yamlBN -L -Jbodybody *: -8#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.PatchBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B¯ -¬©¦queryƒfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch)." fieldManager2string BÎ -ËÈÅquery¨Force is going to "force" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests."force2boolean J` -E -200> -< -OK6 -4 -2#/definitions/io.k8s.api.node.v1beta1.RuntimeClass - -401 - - UnauthorizedRhttpsj\ -x-kubernetes-group-version-kind97version: v1beta1 -group: node.k8s.io -kind: RuntimeClass -j -x-kubernetes-actionpatch -J9 -75"3pathname of the RuntimeClass"name*string˜JO -MKIquery-If 'true', then the output is pretty printed."pretty2string ( -./apis/policy/v1beta1/watch/podsecuritypoliciesÚ'Î -policy_v1beta1}watch individual changes to a list of PodSecurityPolicy. deprecated: use the 'watch' parameter with a list operation instead.*'watchPolicyV1beta1PodSecurityPolicyList2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jk - -401 - - Unauthorized -P -200I -G -OKA -? -=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEventRhttpsj# -x-kubernetes-action  -watchlist -j\ -x-kubernetes-group-version-kind97version: v1beta1 -group: policy -kind: PodSecurityPolicy -J‚ -ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer JO -MKIquery-If 'true', then the output is pretty printed."pretty2string Jû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string JÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Jž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± -®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean ‚ -/logs/{logpath}o8 -logs*logFileHandlerJ - -401 - - UnauthorizedRhttpsJ3 -1/"-pathpath to the log"logpath*string˜ð& -/api/v1/limitrangesØ&Ì -core_v1(list or watch objects of kind LimitRange*$listCoreV1LimitRangeForAllNamespaces2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*J] -B -200; -9 -OK3 -1 -/#/definitions/io.k8s.api.core.v1.LimitRangeList - -401 - - UnauthorizedRhttpsj -x-kubernetes-actionlist -jL -x-kubernetes-group-version-kind)'group: "" -kind: LimitRange -version: v1 -J‚ -ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer JO -MKIquery-If 'true', then the output is pretty printed."pretty2string Jû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string JÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Jž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± -®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean é -W/apis/autoscaling/v2beta2/namespaces/{namespace}/horizontalpodautoscalers/{name}/statusÛ -autoscaling_v2beta24read status of the specified HorizontalPodAutoscaler*=readAutoscalingV2beta2NamespacedHorizontalPodAutoscalerStatus2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*Jr - -401 - - Unauthorized -W -200P -N -OKH -F -D#/definitions/io.k8s.api.autoscaling.v2beta2.HorizontalPodAutoscalerRhttpsj -x-kubernetes-actionget -jg -x-kubernetes-group-version-kindDBgroup: autoscaling -kind: HorizontalPodAutoscaler -version: v2beta2 -Ö -autoscaling_v2beta27replace status of the specified HorizontalPodAutoscaler*@replaceAutoscalingV2beta2NamespacedHorizontalPodAutoscalerStatus2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BZ -X -Vbodybody *F -D#/definitions/io.k8s.api.autoscaling.v2beta2.HorizontalPodAutoscalerBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– -“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string JÐ -W -200P -N -OKH -F -D#/definitions/io.k8s.api.autoscaling.v2beta2.HorizontalPodAutoscaler -\ -201U -S -CreatedH -F -D#/definitions/io.k8s.api.autoscaling.v2beta2.HorizontalPodAutoscaler - -401 - - UnauthorizedRhttpsj -x-kubernetes-actionput -jg -x-kubernetes-group-version-kindDBkind: HorizontalPodAutoscaler -version: v2beta2 -group: autoscaling -BÚ -autoscaling_v2beta2@partially update status of the specified HorizontalPodAutoscaler*>patchAutoscalingV2beta2NamespacedHorizontalPodAutoscalerStatus2application/json2application/yaml2#application/vnd.kubernetes.protobuf:application/json-patch+json:application/merge-patch+json:&application/strategic-merge-patch+json:application/apply-patch+yamlBN -L -Jbodybody *: -8#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.PatchBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B¯ -¬©¦queryƒfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch)." fieldManager2string BÎ -ËÈÅquery¨Force is going to "force" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests."force2boolean Jr -W -200P -N -OKH -F -D#/definitions/io.k8s.api.autoscaling.v2beta2.HorizontalPodAutoscaler - -401 - - UnauthorizedRhttpsj -x-kubernetes-actionpatch -jg -x-kubernetes-group-version-kindDBgroup: autoscaling -kind: HorizontalPodAutoscaler -version: v2beta2 -JD -B@">path#name of the HorizontalPodAutoscaler"name*string˜J` -^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO -MKIquery-If 'true', then the output is pretty printed."pretty2string —( -0/apis/networking.k8s.io/v1/watch/networkpoliciesâ'Ö - networking_v1ywatch individual changes to a list of NetworkPolicy. deprecated: use the 'watch' parameter with a list operation instead.*2watchNetworkingV1NetworkPolicyListForAllNamespaces2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jk -P -200I -G -OKA -? -=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent - -401 - - UnauthorizedRhttpsj# -x-kubernetes-action  -watchlist -j^ -x-kubernetes-group-version-kind;9group: networking.k8s.io -kind: NetworkPolicy -version: v1 -J‚ -ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer JO -MKIquery-If 'true', then the output is pretty printed."pretty2string Jû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string JÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Jž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± -®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean ¯' -$/apis/policy/v1/poddisruptionbudgets†'ú - policy_v11list or watch objects of kind PodDisruptionBudget*/listPolicyV1PodDisruptionBudgetForAllNamespaces2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jh -M -200F -D -OK> -< -:#/definitions/io.k8s.api.policy.v1.PodDisruptionBudgetList - -401 - - UnauthorizedRhttpsj -x-kubernetes-actionlist -jY -x-kubernetes-group-version-kind64version: v1 -group: policy -kind: PodDisruptionBudget -J‚ -ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer JO -MKIquery-If 'true', then the output is pretty printed."pretty2string Jû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string JÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Jž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± -®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean Ú' -4/apis/rbac.authorization.k8s.io/v1beta1/rolebindings¡'• -rbacAuthorization_v1beta1)list or watch objects of kind RoleBinding*7listRbacAuthorizationV1beta1RoleBindingForAllNamespaces2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jc -H -200A -? -OK9 -7 -5#/definitions/io.k8s.api.rbac.v1beta1.RoleBindingList - -401 - - UnauthorizedRhttpsj -x-kubernetes-actionlist -ji -x-kubernetes-group-version-kindFDgroup: rbac.authorization.k8s.io -kind: RoleBinding -version: v1beta1 -J‚ -ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer JO -MKIquery-If 'true', then the output is pretty printed."pretty2string Jû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string JÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Jž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± -®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean ¢* -U/apis/storage.k8s.io/v1beta1/watch/namespaces/{namespace}/csistoragecapacities/{name}È)™ -storage_v1beta1¹watch changes to an object of kind CSIStorageCapacity. deprecated: use the 'watch' parameter with a list operation instead, filtered to a single item with the 'fieldSelector' parameter.*/watchStorageV1beta1NamespacedCSIStorageCapacity2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jk -P -200I -G -OKA -? -=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent - -401 - - UnauthorizedRhttpsj -x-kubernetes-actionwatch -je -x-kubernetes-group-version-kindB@version: v1beta1 -group: storage.k8s.io -kind: CSIStorageCapacity -J‚ -ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer J? -=;"9pathname of the CSIStorageCapacity"name*string˜J` -^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO -MKIquery-If 'true', then the output is pretty printed."pretty2string Jû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string JÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Jž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± -®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean — -7/apis/authorization.k8s.io/v1beta1/subjectaccessreviewsÛ -"Í -authorization_v1beta1create a SubjectAccessReview*-createAuthorizationV1beta1SubjectAccessReview2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BX -V -Tbodybody *D -B#/definitions/io.k8s.api.authorization.v1beta1.SubjectAccessReviewJ© -U -200N -L -OKF -D -B#/definitions/io.k8s.api.authorization.v1beta1.SubjectAccessReview -Z -201S -Q -CreatedF -D -B#/definitions/io.k8s.api.authorization.v1beta1.SubjectAccessReview -[ -202T -R -AcceptedF -D -B#/definitions/io.k8s.api.authorization.v1beta1.SubjectAccessReview - -401 - - UnauthorizedRhttpsj -x-kubernetes-actionpost -jl -x-kubernetes-group-version-kindIGgroup: authorization.k8s.io -kind: SubjectAccessReview -version: v1beta1 -Jž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string J– -“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string JO -MKIquery-If 'true', then the output is pretty printed."pretty2string  -6/apis/authorization.k8s.io/v1/selfsubjectaccessreviewsÔ -"Æ -authorization_v1 create a SelfSubjectAccessReview*,createAuthorizationV1SelfSubjectAccessReview2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BW -U -Sbodybody *C -A#/definitions/io.k8s.api.authorization.v1.SelfSubjectAccessReviewJ¦ - -401 - - Unauthorized -T -200M -K -OKE -C -A#/definitions/io.k8s.api.authorization.v1.SelfSubjectAccessReview -Y -201R -P -CreatedE -C -A#/definitions/io.k8s.api.authorization.v1.SelfSubjectAccessReview -Z -202S -Q -AcceptedE -C -A#/definitions/io.k8s.api.authorization.v1.SelfSubjectAccessReviewRhttpsjk -x-kubernetes-group-version-kindHFkind: SelfSubjectAccessReview -version: v1 -group: authorization.k8s.io -j -x-kubernetes-actionpost -Jž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string J– -“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string JO -MKIquery-If 'true', then the output is pretty printed."pretty2string ß -/apis/autoscaling/ÈÅ - autoscalingget information of a group*getAutoscalingAPIGroup2application/json2application/yaml2#application/vnd.kubernetes.protobuf:application/json:application/yaml:#application/vnd.kubernetes.protobufJi -N -200G -E -OK? -= -;#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.APIGroup - -401 - - UnauthorizedRhttpsþ -/apis/autoscaling/v2beta1/ßÜ -autoscaling_v2beta1get available resources*!getAutoscalingV2beta1APIResources2application/json2application/yaml2#application/vnd.kubernetes.protobuf:application/json:application/yaml:#application/vnd.kubernetes.protobufJp -U -200N -L -OKF -D -B#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.APIResourceList - -401 - - UnauthorizedRhttpsˆ] -./apis/batch/v1/namespaces/{namespace}/cronjobsÕ\ø% -batch_v1%list or watch objects of kind CronJob*listBatchV1NamespacedCronJob2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*B‚ -ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Bï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string B‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string B‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Bú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer Bû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string BÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Bž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer B± -®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean J[ -@ -2009 -7 -OK1 -/ --#/definitions/io.k8s.api.batch.v1.CronJobList - -401 - - UnauthorizedRhttpsjL -x-kubernetes-group-version-kind)'group: batch -kind: CronJob -version: v1 -j -x-kubernetes-actionlist -"Û -batch_v1create a CronJob*createBatchV1NamespacedCronJob2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*B? -= -;bodybody *+ -)#/definitions/io.k8s.api.batch.v1.CronJobBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– -“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string JÞ -< -2005 -3 -OK- -+ -)#/definitions/io.k8s.api.batch.v1.CronJob -A -201: -8 -Created- -+ -)#/definitions/io.k8s.api.batch.v1.CronJob -B -202; -9 -Accepted- -+ -)#/definitions/io.k8s.api.batch.v1.CronJob - -401 - - UnauthorizedRhttpsjL -x-kubernetes-group-version-kind)'group: batch -kind: CronJob -version: v1 -j -x-kubernetes-actionpost -*Æ, -batch_v1delete collection of CronJob*(deleteBatchV1CollectionNamespacedCronJob2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BT -R -Pbodybody*B -@#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptionsBï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string Bž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string Bä -áÞÛquery±The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately."gracePeriodSeconds2integer B‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Bú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer BÑ -ÎËÈquery Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the "orphan" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both."orphanDependents2boolean Bˆ -…‚ÿquery×Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground."propagationPolicy2string Bû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string BÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Bž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer Jg -L -200E -C -OK= -; -9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status - -401 - - UnauthorizedRhttpsj* -x-kubernetes-actiondeletecollection -jL -x-kubernetes-group-version-kind)'kind: CronJob -version: v1 -group: batch -J` -^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO -MKIquery-If 'true', then the output is pretty printed."pretty2string ð -/apis/discovery.k8s.io/v1/ÑÎ - discovery_v1get available resources*getDiscoveryV1APIResources2application/json2application/yaml2#application/vnd.kubernetes.protobuf:application/json:application/yaml:#application/vnd.kubernetes.protobufJp -U -200N -L -OKF -D -B#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.APIResourceList - -401 - - UnauthorizedRhttps©^ -6/apis/rbac.authorization.k8s.io/v1/clusterrolebindingsî]Å& -rbacAuthorization_v10list or watch objects of kind ClusterRoleBinding*)listRbacAuthorizationV1ClusterRoleBinding2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*B‚ -ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Bï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string B‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string B‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Bú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer Bû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string BÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Bž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer B± -®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean Je -J -200C -A -OK; -9 -7#/definitions/io.k8s.api.rbac.v1.ClusterRoleBindingList - -401 - - UnauthorizedRhttpsj -x-kubernetes-actionlist -jk -x-kubernetes-group-version-kindHFgroup: rbac.authorization.k8s.io -kind: ClusterRoleBinding -version: v1 -"Æ -rbacAuthorization_v1create a ClusterRoleBinding*+createRbacAuthorizationV1ClusterRoleBinding2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BI -G -Ebodybody *5 -3#/definitions/io.k8s.api.rbac.v1.ClusterRoleBindingBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– -“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string Jü -F -200? -= -OK7 -5 -3#/definitions/io.k8s.api.rbac.v1.ClusterRoleBinding -K -201D -B -Created7 -5 -3#/definitions/io.k8s.api.rbac.v1.ClusterRoleBinding -L -202E -C -Accepted7 -5 -3#/definitions/io.k8s.api.rbac.v1.ClusterRoleBinding - -401 - - UnauthorizedRhttpsj -x-kubernetes-actionpost -jk -x-kubernetes-group-version-kindHFgroup: rbac.authorization.k8s.io -kind: ClusterRoleBinding -version: v1 -*‰- -rbacAuthorization_v1'delete collection of ClusterRoleBinding*5deleteRbacAuthorizationV1CollectionClusterRoleBinding2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BT -R -Pbodybody*B -@#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptionsBï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string Bž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string Bä -áÞÛquery±The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately."gracePeriodSeconds2integer B‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Bú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer BÑ -ÎËÈquery Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the "orphan" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both."orphanDependents2boolean Bˆ -…‚ÿquery×Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground."propagationPolicy2string Bû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string BÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Bž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer Jg -L -200E -C -OK= -; -9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status - -401 - - UnauthorizedRhttpsj* -x-kubernetes-actiondeletecollection -jk -x-kubernetes-group-version-kindHFgroup: rbac.authorization.k8s.io -kind: ClusterRoleBinding -version: v1 -JO -MKIquery-If 'true', then the output is pretty printed."pretty2string ×' -1/apis/storage.k8s.io/v1beta1/csistoragecapacities¡'• -storage_v1beta10list or watch objects of kind CSIStorageCapacity*4listStorageV1beta1CSIStorageCapacityForAllNamespaces2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jm - -401 - - Unauthorized -R -200K -I -OKC -A -?#/definitions/io.k8s.api.storage.v1beta1.CSIStorageCapacityListRhttpsj -x-kubernetes-actionlist -je -x-kubernetes-group-version-kindB@group: storage.k8s.io -kind: CSIStorageCapacity -version: v1beta1 -J‚ -ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer JO -MKIquery-If 'true', then the output is pretty printed."pretty2string Jû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string JÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Jž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± -®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean ¦ - /api/v1/componentstatuses/{name}ï -core_v1"read the specified ComponentStatus*readCoreV1ComponentStatus2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*J^ -C -200< -: -OK4 -2 -0#/definitions/io.k8s.api.core.v1.ComponentStatus - -401 - - UnauthorizedRhttpsjQ -x-kubernetes-group-version-kind.,kind: ComponentStatus -version: v1 -group: "" -j -x-kubernetes-actionget -J< -:8"6pathname of the ComponentStatus"name*string˜JO -MKIquery-If 'true', then the output is pretty printed."pretty2string ü -T/apis/flowcontrol.apiserver.k8s.io/v1beta1/prioritylevelconfigurations/{name}/status£€ -flowcontrolApiserver_v1beta17read status of the specified PriorityLevelConfiguration*?readFlowcontrolApiserverV1beta1PriorityLevelConfigurationStatus2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*Ju -Z -200S -Q -OKK -I -G#/definitions/io.k8s.api.flowcontrol.v1beta1.PriorityLevelConfiguration - -401 - - UnauthorizedRhttpsj{ -x-kubernetes-group-version-kindXVgroup: flowcontrol.apiserver.k8s.io -kind: PriorityLevelConfiguration -version: v1beta1 -j -x-kubernetes-actionget - - -flowcontrolApiserver_v1beta1:replace status of the specified PriorityLevelConfiguration*BreplaceFlowcontrolApiserverV1beta1PriorityLevelConfigurationStatus2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*B] -[ -Ybodybody *I -G#/definitions/io.k8s.api.flowcontrol.v1beta1.PriorityLevelConfigurationBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– -“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string JÖ -Z -200S -Q -OKK -I -G#/definitions/io.k8s.api.flowcontrol.v1beta1.PriorityLevelConfiguration -_ -201X -V -CreatedK -I -G#/definitions/io.k8s.api.flowcontrol.v1beta1.PriorityLevelConfiguration - -401 - - UnauthorizedRhttpsj -x-kubernetes-actionput -j{ -x-kubernetes-group-version-kindXVgroup: flowcontrol.apiserver.k8s.io -kind: PriorityLevelConfiguration -version: v1beta1 -Bÿ -flowcontrolApiserver_v1beta1Cpartially update status of the specified PriorityLevelConfiguration*@patchFlowcontrolApiserverV1beta1PriorityLevelConfigurationStatus2application/json2application/yaml2#application/vnd.kubernetes.protobuf:application/json-patch+json:application/merge-patch+json:&application/strategic-merge-patch+json:application/apply-patch+yamlBN -L -Jbodybody *: -8#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.PatchBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B¯ -¬©¦queryƒfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch)." fieldManager2string BÎ -ËÈÅquery¨Force is going to "force" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests."force2boolean Ju -Z -200S -Q -OKK -I -G#/definitions/io.k8s.api.flowcontrol.v1beta1.PriorityLevelConfiguration - -401 - - UnauthorizedRhttpsj -x-kubernetes-actionpatch -j{ -x-kubernetes-group-version-kindXVgroup: flowcontrol.apiserver.k8s.io -kind: PriorityLevelConfiguration -version: v1beta1 -JG -EC"Apath&name of the PriorityLevelConfiguration"name*string˜JO -MKIquery-If 'true', then the output is pretty printed."pretty2string †) -F/apis/networking.k8s.io/v1beta1/watch/namespaces/{namespace}/ingresses»(Í -networking_v1beta1swatch individual changes to a list of Ingress. deprecated: use the 'watch' parameter with a list operation instead.*+watchNetworkingV1beta1NamespacedIngressList2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jk -P -200I -G -OKA -? -=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent - -401 - - UnauthorizedRhttpsj] -x-kubernetes-group-version-kind:8group: networking.k8s.io -kind: Ingress -version: v1beta1 -j# -x-kubernetes-action  -watchlist -J‚ -ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer J` -^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO -MKIquery-If 'true', then the output is pretty printed."pretty2string Jû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string JÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Jž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± -®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean µ( -3/apis/autoscaling/v1/watch/horizontalpodautoscalersý'ñ -autoscaling_v1ƒwatch individual changes to a list of HorizontalPodAutoscaler. deprecated: use the 'watch' parameter with a list operation instead.*=watchAutoscalingV1HorizontalPodAutoscalerListForAllNamespaces2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jk -P -200I -G -OKA -? -=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent - -401 - - UnauthorizedRhttpsj# -x-kubernetes-action  -watchlist -jb -x-kubernetes-group-version-kind?=group: autoscaling -kind: HorizontalPodAutoscaler -version: v1 -J‚ -ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer JO -MKIquery-If 'true', then the output is pretty printed."pretty2string Jû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string JÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Jž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± -®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean »( -/api/v1/watch/namespaces/{name}—(Ó -core_v1°watch changes to an object of kind Namespace. deprecated: use the 'watch' parameter with a list operation instead, filtered to a single item with the 'fieldSelector' parameter.*watchCoreV1Namespace2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jk - -401 - - Unauthorized -P -200I -G -OKA -? -=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEventRhttpsj -x-kubernetes-actionwatch -jK -x-kubernetes-group-version-kind(&group: "" -kind: Namespace -version: v1 -J‚ -ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer J6 -42"0pathname of the Namespace"name*string˜JO -MKIquery-If 'true', then the output is pretty printed."pretty2string Jû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string JÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Jž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± -®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean ó -/apis/networking.k8s.io/v1/ÓÐ - networking_v1get available resources*getNetworkingV1APIResources2application/json2application/yaml2#application/vnd.kubernetes.protobuf:application/json:application/yaml:#application/vnd.kubernetes.protobufJp -U -200N -L -OKF -D -B#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.APIResourceList - -401 - - UnauthorizedRhttpsî] -?/apis/rbac.authorization.k8s.io/v1/namespaces/{namespace}/rolesª]—& -rbacAuthorization_v1"list or watch objects of kind Role*%listRbacAuthorizationV1NamespacedRole2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*B‚ -ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Bï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string B‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string B‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Bú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer Bû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string BÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Bž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer B± -®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean JW -< -2005 -3 -OK- -+ -)#/definitions/io.k8s.api.rbac.v1.RoleList - -401 - - UnauthorizedRhttpsj -x-kubernetes-actionlist -j] -x-kubernetes-group-version-kind:8group: rbac.authorization.k8s.io -kind: Role -version: v1 -"î -rbacAuthorization_v1 create a Role*'createRbacAuthorizationV1NamespacedRole2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*B; -9 -7bodybody *' -%#/definitions/io.k8s.api.rbac.v1.RoleBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– -“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string JÒ -8 -2001 -/ -OK) -' -%#/definitions/io.k8s.api.rbac.v1.Role -= -2016 -4 -Created) -' -%#/definitions/io.k8s.api.rbac.v1.Role -> -2027 -5 -Accepted) -' -%#/definitions/io.k8s.api.rbac.v1.Role - -401 - - UnauthorizedRhttpsj -x-kubernetes-actionpost -j] -x-kubernetes-group-version-kind:8group: rbac.authorization.k8s.io -kind: Role -version: v1 -*é, -rbacAuthorization_v1delete collection of Role*1deleteRbacAuthorizationV1CollectionNamespacedRole2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BT -R -Pbodybody*B -@#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptionsBï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string Bž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string Bä -áÞÛquery±The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately."gracePeriodSeconds2integer B‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Bú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer BÑ -ÎËÈquery Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the "orphan" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both."orphanDependents2boolean Bˆ -…‚ÿquery×Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground."propagationPolicy2string Bû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string BÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Bž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer Jg -L -200E -C -OK= -; -9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status - -401 - - UnauthorizedRhttpsj* -x-kubernetes-actiondeletecollection -j] -x-kubernetes-group-version-kind:8group: rbac.authorization.k8s.io -kind: Role -version: v1 -J` -^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO -MKIquery-If 'true', then the output is pretty printed."pretty2string ”* -S/apis/rbac.authorization.k8s.io/v1/watch/namespaces/{namespace}/rolebindings/{name}¼)” -rbacAuthorization_v1²watch changes to an object of kind RoleBinding. deprecated: use the 'watch' parameter with a list operation instead, filtered to a single item with the 'fieldSelector' parameter.*-watchRbacAuthorizationV1NamespacedRoleBinding2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jk -P -200I -G -OKA -? -=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent - -401 - - UnauthorizedRhttpsj -x-kubernetes-actionwatch -jd -x-kubernetes-group-version-kindA?group: rbac.authorization.k8s.io -kind: RoleBinding -version: v1 -J‚ -ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer J8 -64"2pathname of the RoleBinding"name*string˜J` -^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO -MKIquery-If 'true', then the output is pretty printed."pretty2string Jû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string JÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Jž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± -®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean Ý\ -/api/v1/persistentvolumes¿\Œ& -core_v1.list or watch objects of kind PersistentVolume*listCoreV1PersistentVolume2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*B‚ -ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Bï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string B‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string B‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Bú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer Bû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string BÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Bž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer B± -®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean Jc -H -200A -? -OK9 -7 -5#/definitions/io.k8s.api.core.v1.PersistentVolumeList - -401 - - UnauthorizedRhttpsjR -x-kubernetes-group-version-kind/-group: "" -kind: PersistentVolume -version: v1 -j -x-kubernetes-actionlist -"‡ -core_v1create a PersistentVolume*createCoreV1PersistentVolume2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BG -E -Cbodybody *3 -1#/definitions/io.k8s.api.core.v1.PersistentVolumeBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– -“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string Jö -D -200= -; -OK5 -3 -1#/definitions/io.k8s.api.core.v1.PersistentVolume -I -201B -@ -Created5 -3 -1#/definitions/io.k8s.api.core.v1.PersistentVolume -J -202C -A -Accepted5 -3 -1#/definitions/io.k8s.api.core.v1.PersistentVolume - -401 - - UnauthorizedRhttpsj -x-kubernetes-actionpost -jR -x-kubernetes-group-version-kind/-group: "" -kind: PersistentVolume -version: v1 -*Ò, -core_v1%delete collection of PersistentVolume*&deleteCoreV1CollectionPersistentVolume2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BT -R -Pbodybody*B -@#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptionsBï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string Bž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string Bä -áÞÛquery±The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately."gracePeriodSeconds2integer B‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Bú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer BÑ -ÎËÈquery Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the "orphan" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both."orphanDependents2boolean Bˆ -…‚ÿquery×Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground."propagationPolicy2string Bû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string BÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Bž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer Jg - -401 - - Unauthorized -L -200E -C -OK= -; -9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.StatusRhttpsj* -x-kubernetes-actiondeletecollection -jR -x-kubernetes-group-version-kind/-group: "" -kind: PersistentVolume -version: v1 -JO -MKIquery-If 'true', then the output is pretty printed."pretty2string ·* -5/apis/storage.k8s.io/v1beta1/volumeattachments/{name}ý)œ -storage_v1beta1#read the specified VolumeAttachment*"readStorageV1beta1VolumeAttachment2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*Jg -L -200E -C -OK= -; -9#/definitions/io.k8s.api.storage.v1beta1.VolumeAttachment - -401 - - UnauthorizedRhttpsj -x-kubernetes-actionget -jc -x-kubernetes-group-version-kind@>group: storage.k8s.io -kind: VolumeAttachment -version: v1beta1 - -storage_v1beta1&replace the specified VolumeAttachment*%replaceStorageV1beta1VolumeAttachment2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BO -M -Kbodybody *; -9#/definitions/io.k8s.api.storage.v1beta1.VolumeAttachmentBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– -“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string Jº -L -200E -C -OK= -; -9#/definitions/io.k8s.api.storage.v1beta1.VolumeAttachment -Q -201J -H -Created= -; -9#/definitions/io.k8s.api.storage.v1beta1.VolumeAttachment - -401 - - UnauthorizedRhttpsjc -x-kubernetes-group-version-kind@>kind: VolumeAttachment -version: v1beta1 -group: storage.k8s.io -j -x-kubernetes-actionput -*© -storage_v1beta1delete a VolumeAttachment*$deleteStorageV1beta1VolumeAttachment2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BT -R -Pbodybody*B -@#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptionsBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string Bä -áÞÛquery±The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately."gracePeriodSeconds2integer BÑ -ÎËÈquery Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the "orphan" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both."orphanDependents2boolean Bˆ -…‚ÿquery×Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground."propagationPolicy2string J» -L -200E -C -OK= -; -9#/definitions/io.k8s.api.storage.v1beta1.VolumeAttachment -R -202K -I -Accepted= -; -9#/definitions/io.k8s.api.storage.v1beta1.VolumeAttachment - -401 - - UnauthorizedRhttpsj -x-kubernetes-action delete -jc -x-kubernetes-group-version-kind@>group: storage.k8s.io -kind: VolumeAttachment -version: v1beta1 -B› -storage_v1beta1/partially update the specified VolumeAttachment*#patchStorageV1beta1VolumeAttachment2application/json2application/yaml2#application/vnd.kubernetes.protobuf:application/json-patch+json:application/merge-patch+json:&application/strategic-merge-patch+json:application/apply-patch+yamlBN -L -Jbodybody *: -8#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.PatchBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B¯ -¬©¦queryƒfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch)." fieldManager2string BÎ -ËÈÅquery¨Force is going to "force" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests."force2boolean Jg -L -200E -C -OK= -; -9#/definitions/io.k8s.api.storage.v1beta1.VolumeAttachment - -401 - - UnauthorizedRhttpsj -x-kubernetes-actionpatch -jc -x-kubernetes-group-version-kind@>group: storage.k8s.io -kind: VolumeAttachment -version: v1beta1 -J= -;9"7pathname of the VolumeAttachment"name*string˜JO -MKIquery-If 'true', then the output is pretty printed."pretty2string ì -/apis/apiextensions.k8s.io/ÌÉ - apiextensionsget information of a group*getApiextensionsAPIGroup2application/json2application/yaml2#application/vnd.kubernetes.protobuf:application/json:application/yaml:#application/vnd.kubernetes.protobufJi -N -200G -E -OK? -= -;#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.APIGroup - -401 - - UnauthorizedRhttpsÇ] --/api/v1/namespaces/{namespace}/resourcequotas•]Š& -core_v1+list or watch objects of kind ResourceQuota*!listCoreV1NamespacedResourceQuota2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*B‚ -ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Bï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string B‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string B‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Bú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer Bû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string BÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Bž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer B± -®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean J` -E -200> -< -OK6 -4 -2#/definitions/io.k8s.api.core.v1.ResourceQuotaList - -401 - - UnauthorizedRhttpsj -x-kubernetes-actionlist -jO -x-kubernetes-group-version-kind,*version: v1 -group: "" -kind: ResourceQuota -"ü -core_v1create a ResourceQuota*#createCoreV1NamespacedResourceQuota2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BD -B -@bodybody *0 -.#/definitions/io.k8s.api.core.v1.ResourceQuotaBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– -“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string Jí -A -200: -8 -OK2 -0 -.#/definitions/io.k8s.api.core.v1.ResourceQuota -F -201? -= -Created2 -0 -.#/definitions/io.k8s.api.core.v1.ResourceQuota -G -202@ -> -Accepted2 -0 -.#/definitions/io.k8s.api.core.v1.ResourceQuota - -401 - - UnauthorizedRhttpsj -x-kubernetes-actionpost -jO -x-kubernetes-group-version-kind,*version: v1 -group: "" -kind: ResourceQuota -*Ó, -core_v1"delete collection of ResourceQuota*-deleteCoreV1CollectionNamespacedResourceQuota2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BT -R -Pbodybody*B -@#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptionsBï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string Bž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string Bä -áÞÛquery±The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately."gracePeriodSeconds2integer B‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Bú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer BÑ -ÎËÈquery Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the "orphan" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both."orphanDependents2boolean Bˆ -…‚ÿquery×Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground."propagationPolicy2string Bû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string BÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Bž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer Jg - -401 - - Unauthorized -L -200E -C -OK= -; -9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.StatusRhttpsj* -x-kubernetes-actiondeletecollection -jO -x-kubernetes-group-version-kind,*group: "" -kind: ResourceQuota -version: v1 -J` -^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO -MKIquery-If 'true', then the output is pretty printed."pretty2string À) -D/apis/apiextensions.k8s.io/v1/watch/customresourcedefinitions/{name}÷(¤ -apiextensions_v1¿watch changes to an object of kind CustomResourceDefinition. deprecated: use the 'watch' parameter with a list operation instead, filtered to a single item with the 'fieldSelector' parameter.*,watchApiextensionsV1CustomResourceDefinition2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jk -P -200I -G -OKA -? -=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent - -401 - - UnauthorizedRhttpsj -x-kubernetes-actionwatch -jl -x-kubernetes-group-version-kindIGkind: CustomResourceDefinition -version: v1 -group: apiextensions.k8s.io -J‚ -ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer JE -CA"?path$name of the CustomResourceDefinition"name*string˜JO -MKIquery-If 'true', then the output is pretty printed."pretty2string Jû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string JÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Jž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± -®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean Ç( -,/api/v1/namespaces/{namespace}/events/{name}–(Ñ -core_v1read the specified Event*readCoreV1NamespacedEvent2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*JT -9 -2002 -0 -OK* -( -&#/definitions/io.k8s.api.core.v1.Event - -401 - - UnauthorizedRhttpsj -x-kubernetes-actionget -jG -x-kubernetes-group-version-kind$"group: "" -kind: Event -version: v1 - -core_v1replace the specified Event*replaceCoreV1NamespacedEvent2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*B< -: -8bodybody *( -&#/definitions/io.k8s.api.core.v1.EventBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– -“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string J” - -401 - - Unauthorized -9 -2002 -0 -OK* -( -&#/definitions/io.k8s.api.core.v1.Event -> -2017 -5 -Created* -( -&#/definitions/io.k8s.api.core.v1.EventRhttpsj -x-kubernetes-actionput -jG -x-kubernetes-group-version-kind$"group: "" -kind: Event -version: v1 -*ò -core_v1delete an Event*deleteCoreV1NamespacedEvent2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BT -R -Pbodybody*B -@#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptionsBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string Bä -áÞÛquery±The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately."gracePeriodSeconds2integer BÑ -ÎËÈquery Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the "orphan" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both."orphanDependents2boolean Bˆ -…‚ÿquery×Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground."propagationPolicy2string J» -L -200E -C -OK= -; -9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status -R -202K -I -Accepted= -; -9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status - -401 - - UnauthorizedRhttpsj -x-kubernetes-action delete -jG -x-kubernetes-group-version-kind$"group: "" -kind: Event -version: v1 -BÐ -core_v1$partially update the specified Event*patchCoreV1NamespacedEvent2application/json2application/yaml2#application/vnd.kubernetes.protobuf:application/json-patch+json:application/merge-patch+json:&application/strategic-merge-patch+json:application/apply-patch+yamlBN -L -Jbodybody *: -8#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.PatchBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B¯ -¬©¦queryƒfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch)." fieldManager2string BÎ -ËÈÅquery¨Force is going to "force" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests."force2boolean JT -9 -2002 -0 -OK* -( -&#/definitions/io.k8s.api.core.v1.Event - -401 - - UnauthorizedRhttpsjG -x-kubernetes-group-version-kind$"kind: Event -version: v1 -group: "" -j -x-kubernetes-actionpatch -J2 -0.",pathname of the Event"name*string˜J` -^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO -MKIquery-If 'true', then the output is pretty printed."pretty2string ’) -0/api/v1/namespaces/{namespace}/configmaps/{name}Ý(á -core_v1read the specified ConfigMap*readCoreV1NamespacedConfigMap2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*JX -= -2006 -4 -OK. -, -*#/definitions/io.k8s.api.core.v1.ConfigMap - -401 - - UnauthorizedRhttpsj -x-kubernetes-actionget -jK -x-kubernetes-group-version-kind(&group: "" -kind: ConfigMap -version: v1 -¨ -core_v1replace the specified ConfigMap* replaceCoreV1NamespacedConfigMap2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*B@ -> -<bodybody *, -*#/definitions/io.k8s.api.core.v1.ConfigMapBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– -“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string Jœ -= -2006 -4 -OK. -, -*#/definitions/io.k8s.api.core.v1.ConfigMap -B -201; -9 -Created. -, -*#/definitions/io.k8s.api.core.v1.ConfigMap - -401 - - UnauthorizedRhttpsj -x-kubernetes-actionput -jK -x-kubernetes-group-version-kind(&group: "" -kind: ConfigMap -version: v1 -*ý -core_v1delete a ConfigMap*deleteCoreV1NamespacedConfigMap2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BT -R -Pbodybody*B -@#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptionsBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string Bä -áÞÛquery±The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately."gracePeriodSeconds2integer BÑ -ÎËÈquery Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the "orphan" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both."orphanDependents2boolean Bˆ -…‚ÿquery×Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground."propagationPolicy2string J» -L -200E -C -OK= -; -9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status -R -202K -I -Accepted= -; -9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status - -401 - - UnauthorizedRhttpsj -x-kubernetes-action delete -jK -x-kubernetes-group-version-kind(&group: "" -kind: ConfigMap -version: v1 -Bà -core_v1(partially update the specified ConfigMap*patchCoreV1NamespacedConfigMap2application/json2application/yaml2#application/vnd.kubernetes.protobuf:application/json-patch+json:application/merge-patch+json:&application/strategic-merge-patch+json:application/apply-patch+yamlBN -L -Jbodybody *: -8#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.PatchBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B¯ -¬©¦queryƒfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch)." fieldManager2string BÎ -ËÈÅquery¨Force is going to "force" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests."force2boolean JX -= -2006 -4 -OK. -, -*#/definitions/io.k8s.api.core.v1.ConfigMap - -401 - - UnauthorizedRhttpsj -x-kubernetes-actionpatch -jK -x-kubernetes-group-version-kind(&group: "" -kind: ConfigMap -version: v1 -J6 -42"0pathname of the ConfigMap"name*string˜J` -^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO -MKIquery-If 'true', then the output is pretty printed."pretty2string Ì' -/api/v1/watch/secrets²'¦ -core_v1rwatch individual changes to a list of Secret. deprecated: use the 'watch' parameter with a list operation instead.*%watchCoreV1SecretListForAllNamespaces2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jk -P -200I -G -OKA -? -=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent - -401 - - UnauthorizedRhttpsj# -x-kubernetes-action  -watchlist -jH -x-kubernetes-group-version-kind%#group: "" -kind: Secret -version: v1 -J‚ -ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer JO -MKIquery-If 'true', then the output is pretty printed."pretty2string Jû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string JÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Jž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± -®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean „_ -7/apis/certificates.k8s.io/v1/certificatesigningrequestsÈ^Ù& -certificates_v17list or watch objects of kind CertificateSigningRequest*+listCertificatesV1CertificateSigningRequest2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*B‚ -ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Bï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string B‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string B‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Bú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer Bû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string BÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Bž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer B± -®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean Jt -Y -200R -P -OKJ -H -F#/definitions/io.k8s.api.certificates.v1.CertificateSigningRequestList - -401 - - UnauthorizedRhttpsjl -x-kubernetes-group-version-kindIGkind: CertificateSigningRequest -version: v1 -group: certificates.k8s.io -j -x-kubernetes-actionlist -"‡ - -certificates_v1"create a CertificateSigningRequest*-createCertificatesV1CertificateSigningRequest2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BX -V -Tbodybody *D -B#/definitions/io.k8s.api.certificates.v1.CertificateSigningRequestBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– -“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string J© -U -200N -L -OKF -D -B#/definitions/io.k8s.api.certificates.v1.CertificateSigningRequest -Z -201S -Q -CreatedF -D -B#/definitions/io.k8s.api.certificates.v1.CertificateSigningRequest -[ -202T -R -AcceptedF -D -B#/definitions/io.k8s.api.certificates.v1.CertificateSigningRequest - -401 - - UnauthorizedRhttpsj -x-kubernetes-actionpost -jl -x-kubernetes-group-version-kindIGgroup: certificates.k8s.io -kind: CertificateSigningRequest -version: v1 -*Ž- -certificates_v1.delete collection of CertificateSigningRequest*7deleteCertificatesV1CollectionCertificateSigningRequest2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BT -R -Pbodybody*B -@#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptionsBï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string Bž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string Bä -áÞÛquery±The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately."gracePeriodSeconds2integer B‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Bú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer BÑ -ÎËÈquery Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the "orphan" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both."orphanDependents2boolean Bˆ -…‚ÿquery×Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground."propagationPolicy2string Bû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string BÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Bž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer Jg -L -200E -C -OK= -; -9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status - -401 - - UnauthorizedRhttpsj* -x-kubernetes-actiondeletecollection -jl -x-kubernetes-group-version-kindIGgroup: certificates.k8s.io -kind: CertificateSigningRequest -version: v1 -JO -MKIquery-If 'true', then the output is pretty printed."pretty2string ð -/apis/node.k8s.io/v1beta1/ÑÎ - node_v1beta1get available resources*getNodeV1beta1APIResources2application/json2application/yaml2#application/vnd.kubernetes.protobuf:application/json:application/yaml:#application/vnd.kubernetes.protobufJp - -401 - - Unauthorized -U -200N -L -OKF -D -B#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.APIResourceListRhttps‹] -)/api/v1/namespaces/{namespace}/configmapsÝ\ú% -core_v1'list or watch objects of kind ConfigMap*listCoreV1NamespacedConfigMap2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*B‚ -ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Bï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string B‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string B‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Bú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer Bû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string BÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Bž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer B± -®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean J\ -A -200: -8 -OK2 -0 -.#/definitions/io.k8s.api.core.v1.ConfigMapList - -401 - - UnauthorizedRhttpsj -x-kubernetes-actionlist -jK -x-kubernetes-group-version-kind(&group: "" -kind: ConfigMap -version: v1 -"à -core_v1create a ConfigMap*createCoreV1NamespacedConfigMap2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*B@ -> -<bodybody *, -*#/definitions/io.k8s.api.core.v1.ConfigMapBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– -“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string Já -= -2006 -4 -OK. -, -*#/definitions/io.k8s.api.core.v1.ConfigMap -B -201; -9 -Created. -, -*#/definitions/io.k8s.api.core.v1.ConfigMap -C -202< -: -Accepted. -, -*#/definitions/io.k8s.api.core.v1.ConfigMap - -401 - - UnauthorizedRhttpsj -x-kubernetes-actionpost -jK -x-kubernetes-group-version-kind(&group: "" -kind: ConfigMap -version: v1 -*Ç, -core_v1delete collection of ConfigMap*)deleteCoreV1CollectionNamespacedConfigMap2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BT -R -Pbodybody*B -@#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptionsBï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string Bž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string Bä -áÞÛquery±The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately."gracePeriodSeconds2integer B‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Bú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer BÑ -ÎËÈquery Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the "orphan" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both."orphanDependents2boolean Bˆ -…‚ÿquery×Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground."propagationPolicy2string Bû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string BÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Bž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer Jg - -401 - - Unauthorized -L -200E -C -OK= -; -9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.StatusRhttpsj* -x-kubernetes-actiondeletecollection -jK -x-kubernetes-group-version-kind(&group: "" -kind: ConfigMap -version: v1 -J` -^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO -MKIquery-If 'true', then the output is pretty printed."pretty2string ¿( -=/apis/certificates.k8s.io/v1/watch/certificatesigningrequestsý'ñ -certificates_v1…watch individual changes to a list of CertificateSigningRequest. deprecated: use the 'watch' parameter with a list operation instead.*0watchCertificatesV1CertificateSigningRequestList2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jk -P -200I -G -OKA -? -=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent - -401 - - UnauthorizedRhttpsj# -x-kubernetes-action  -watchlist -jl -x-kubernetes-group-version-kindIGgroup: certificates.k8s.io -kind: CertificateSigningRequest -version: v1 -J‚ -ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer JO -MKIquery-If 'true', then the output is pretty printed."pretty2string Jû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string JÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Jž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± -®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean ‘) -/apis/apps/v1/namespaces/{namespace}/replicasets/{name}/status¢÷ -apps_v1'read status of the specified ReplicaSet*$readAppsV1NamespacedReplicaSetStatus2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*JY -> -2007 -5 -OK/ -- -+#/definitions/io.k8s.api.apps.v1.ReplicaSet - -401 - - UnauthorizedRhttpsj -x-kubernetes-actionget -jN -x-kubernetes-group-version-kind+)group: apps -kind: ReplicaSet -version: v1 -À -apps_v1*replace status of the specified ReplicaSet*'replaceAppsV1NamespacedReplicaSetStatus2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BA -? -=bodybody *- -+#/definitions/io.k8s.api.apps.v1.ReplicaSetBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– -“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string Jž -> -2007 -5 -OK/ -- -+#/definitions/io.k8s.api.apps.v1.ReplicaSet -C -201< -: -Created/ -- -+#/definitions/io.k8s.api.apps.v1.ReplicaSet - -401 - - UnauthorizedRhttpsjN -x-kubernetes-group-version-kind+)kind: ReplicaSet -version: v1 -group: apps -j -x-kubernetes-actionput -Bö -apps_v13partially update status of the specified ReplicaSet*%patchAppsV1NamespacedReplicaSetStatus2application/json2application/yaml2#application/vnd.kubernetes.protobuf:application/json-patch+json:application/merge-patch+json:&application/strategic-merge-patch+json:application/apply-patch+yamlBN -L -Jbodybody *: -8#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.PatchBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B¯ -¬©¦queryƒfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch)." fieldManager2string BÎ -ËÈÅquery¨Force is going to "force" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests."force2boolean JY -> -2007 -5 -OK/ -- -+#/definitions/io.k8s.api.apps.v1.ReplicaSet - -401 - - UnauthorizedRhttpsj -x-kubernetes-actionpatch -jN -x-kubernetes-group-version-kind+)group: apps -kind: ReplicaSet -version: v1 -J7 -53"1pathname of the ReplicaSet"name*string˜J` -^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO -MKIquery-If 'true', then the output is pretty printed."pretty2string ü) -L/apis/discovery.k8s.io/v1/watch/namespaces/{namespace}/endpointslices/{name}«) - discovery_v1´watch changes to an object of kind EndpointSlice. deprecated: use the 'watch' parameter with a list operation instead, filtered to a single item with the 'fieldSelector' parameter.*'watchDiscoveryV1NamespacedEndpointSlice2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jk -P -200I -G -OKA -? -=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent - -401 - - UnauthorizedRhttpsj -x-kubernetes-actionwatch -j] -x-kubernetes-group-version-kind:8kind: EndpointSlice -version: v1 -group: discovery.k8s.io -J‚ -ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer J: -86"4pathname of the EndpointSlice"name*string˜J` -^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO -MKIquery-If 'true', then the output is pretty printed."pretty2string Jû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string JÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Jž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± -®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean û* -=/apis/rbac.authorization.k8s.io/v1/clusterrolebindings/{name}¹*¬ -rbacAuthorization_v1%read the specified ClusterRoleBinding*)readRbacAuthorizationV1ClusterRoleBinding2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*Ja -F -200? -= -OK7 -5 -3#/definitions/io.k8s.api.rbac.v1.ClusterRoleBinding - -401 - - UnauthorizedRhttpsjk -x-kubernetes-group-version-kindHFgroup: rbac.authorization.k8s.io -kind: ClusterRoleBinding -version: v1 -j -x-kubernetes-actionget -… -rbacAuthorization_v1(replace the specified ClusterRoleBinding*,replaceRbacAuthorizationV1ClusterRoleBinding2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BI -G -Ebodybody *5 -3#/definitions/io.k8s.api.rbac.v1.ClusterRoleBindingBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– -“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string J® -F -200? -= -OK7 -5 -3#/definitions/io.k8s.api.rbac.v1.ClusterRoleBinding -K -201D -B -Created7 -5 -3#/definitions/io.k8s.api.rbac.v1.ClusterRoleBinding - -401 - - UnauthorizedRhttpsj -x-kubernetes-actionput -jk -x-kubernetes-group-version-kindHFgroup: rbac.authorization.k8s.io -kind: ClusterRoleBinding -version: v1 -*¿ -rbacAuthorization_v1delete a ClusterRoleBinding*+deleteRbacAuthorizationV1ClusterRoleBinding2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BT -R -Pbodybody*B -@#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptionsBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string Bä -áÞÛquery±The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately."gracePeriodSeconds2integer BÑ -ÎËÈquery Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the "orphan" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both."orphanDependents2boolean Bˆ -…‚ÿquery×Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground."propagationPolicy2string J» -L -200E -C -OK= -; -9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status -R -202K -I -Accepted= -; -9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status - -401 - - UnauthorizedRhttpsj -x-kubernetes-action delete -jk -x-kubernetes-group-version-kindHFgroup: rbac.authorization.k8s.io -kind: ClusterRoleBinding -version: v1 -B« -rbacAuthorization_v11partially update the specified ClusterRoleBinding**patchRbacAuthorizationV1ClusterRoleBinding2application/json2application/yaml2#application/vnd.kubernetes.protobuf:application/json-patch+json:application/merge-patch+json:&application/strategic-merge-patch+json:application/apply-patch+yamlBN -L -Jbodybody *: -8#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.PatchBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B¯ -¬©¦queryƒfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch)." fieldManager2string BÎ -ËÈÅquery¨Force is going to "force" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests."force2boolean Ja -F -200? -= -OK7 -5 -3#/definitions/io.k8s.api.rbac.v1.ClusterRoleBinding - -401 - - UnauthorizedRhttpsjk -x-kubernetes-group-version-kindHFversion: v1 -group: rbac.authorization.k8s.io -kind: ClusterRoleBinding -j -x-kubernetes-actionpatch -J? -=;"9pathname of the ClusterRoleBinding"name*string˜JO -MKIquery-If 'true', then the output is pretty printed."pretty2string œ( -5/apis/rbac.authorization.k8s.io/v1/watch/clusterrolesâ'Ö -rbacAuthorization_v1wwatch individual changes to a list of ClusterRole. deprecated: use the 'watch' parameter with a list operation instead.*'watchRbacAuthorizationV1ClusterRoleList2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jk -P -200I -G -OKA -? -=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent - -401 - - UnauthorizedRhttpsj# -x-kubernetes-action  -watchlist -jd -x-kubernetes-group-version-kindA?group: rbac.authorization.k8s.io -kind: ClusterRole -version: v1 -J‚ -ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer JO -MKIquery-If 'true', then the output is pretty printed."pretty2string Jû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string JÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Jž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± -®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean ƒ) -E/apis/rbac.authorization.k8s.io/v1/watch/namespaces/{namespace}/roles¹(Ë -rbacAuthorization_v1pwatch individual changes to a list of Role. deprecated: use the 'watch' parameter with a list operation instead.**watchRbacAuthorizationV1NamespacedRoleList2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jk -P -200I -G -OKA -? -=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent - -401 - - UnauthorizedRhttpsj# -x-kubernetes-action  -watchlist -j] -x-kubernetes-group-version-kind:8version: v1 -group: rbac.authorization.k8s.io -kind: Role -J‚ -ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer J` -^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO -MKIquery-If 'true', then the output is pretty printed."pretty2string Jû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string JÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Jž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± -®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean Ã) -7/api/v1/watch/namespaces/{namespace}/limitranges/{name}‡)à -core_v1±watch changes to an object of kind LimitRange. deprecated: use the 'watch' parameter with a list operation instead, filtered to a single item with the 'fieldSelector' parameter.*watchCoreV1NamespacedLimitRange2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jk -P -200I -G -OKA -? -=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent - -401 - - UnauthorizedRhttpsj -x-kubernetes-actionwatch -jL -x-kubernetes-group-version-kind)'group: "" -kind: LimitRange -version: v1 -J‚ -ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer J7 -53"1pathname of the LimitRange"name*string˜J` -^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO -MKIquery-If 'true', then the output is pretty printed."pretty2string Jû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string JÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Jž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± -®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean ÿ - -2/api/v1/namespaces/{namespace}/pods/{name}/bindingÈ -"¢ -core_v1create binding of a Pod* createCoreV1NamespacedPodBinding2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*B> -< -:bodybody ** -(#/definitions/io.k8s.api.core.v1.BindingJÛ -; -2004 -2 -OK, -* -(#/definitions/io.k8s.api.core.v1.Binding -@ -2019 -7 -Created, -* -(#/definitions/io.k8s.api.core.v1.Binding -A -202: -8 -Accepted, -* -(#/definitions/io.k8s.api.core.v1.Binding - -401 - - UnauthorizedRhttpsj -x-kubernetes-actionpost -jI -x-kubernetes-group-version-kind&$group: "" -kind: Binding -version: v1 -Jž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string J– -“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string J4 -20".pathname of the Binding"name*string˜J` -^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO -MKIquery-If 'true', then the output is pretty printed."pretty2string Ô) -I/apis/apiextensions.k8s.io/v1beta1/watch/customresourcedefinitions/{name}†)³ -apiextensions_v1beta1¿watch changes to an object of kind CustomResourceDefinition. deprecated: use the 'watch' parameter with a list operation instead, filtered to a single item with the 'fieldSelector' parameter.*1watchApiextensionsV1beta1CustomResourceDefinition2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jk -P -200I -G -OKA -? -=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent - -401 - - UnauthorizedRhttpsj -x-kubernetes-actionwatch -jq -x-kubernetes-group-version-kindNLgroup: apiextensions.k8s.io -kind: CustomResourceDefinition -version: v1beta1 -J‚ -ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer JE -CA"?path$name of the CustomResourceDefinition"name*string˜JO -MKIquery-If 'true', then the output is pretty printed."pretty2string Jû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string JÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Jž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± -®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean Ò_ -D/apis/autoscaling/v1/namespaces/{namespace}/horizontalpodautoscalers‰_Ð& -autoscaling_v15list or watch objects of kind HorizontalPodAutoscaler*2listAutoscalingV1NamespacedHorizontalPodAutoscaler2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*B‚ -ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Bï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string B‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string B‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Bú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer Bû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string BÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Bž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer B± -®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean Jq -V -200O -M -OKG -E -C#/definitions/io.k8s.api.autoscaling.v1.HorizontalPodAutoscalerList - -401 - - UnauthorizedRhttpsj -x-kubernetes-actionlist -jb -x-kubernetes-group-version-kind?=group: autoscaling -kind: HorizontalPodAutoscaler -version: v1 -"õ -autoscaling_v1 create a HorizontalPodAutoscaler*4createAutoscalingV1NamespacedHorizontalPodAutoscaler2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BU -S -Qbodybody *A -?#/definitions/io.k8s.api.autoscaling.v1.HorizontalPodAutoscalerBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– -“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string J  -R -200K -I -OKC -A -?#/definitions/io.k8s.api.autoscaling.v1.HorizontalPodAutoscaler -W -201P -N -CreatedC -A -?#/definitions/io.k8s.api.autoscaling.v1.HorizontalPodAutoscaler -X -202Q -O -AcceptedC -A -?#/definitions/io.k8s.api.autoscaling.v1.HorizontalPodAutoscaler - -401 - - UnauthorizedRhttpsjb -x-kubernetes-group-version-kind?=version: v1 -group: autoscaling -kind: HorizontalPodAutoscaler -j -x-kubernetes-actionpost -*ˆ- -autoscaling_v1,delete collection of HorizontalPodAutoscaler*>deleteAutoscalingV1CollectionNamespacedHorizontalPodAutoscaler2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BT -R -Pbodybody*B -@#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptionsBï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string Bž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string Bä -áÞÛquery±The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately."gracePeriodSeconds2integer B‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Bú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer BÑ -ÎËÈquery Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the "orphan" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both."orphanDependents2boolean Bˆ -…‚ÿquery×Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground."propagationPolicy2string Bû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string BÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Bž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer Jg -L -200E -C -OK= -; -9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status - -401 - - UnauthorizedRhttpsjb -x-kubernetes-group-version-kind?=kind: HorizontalPodAutoscaler -version: v1 -group: autoscaling -j* -x-kubernetes-actiondeletecollection -J` -^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO -MKIquery-If 'true', then the output is pretty printed."pretty2string ï -/apis/policy/v1beta1/ÕÒ -policy_v1beta1get available resources*getPolicyV1beta1APIResources2application/json2application/yaml2#application/vnd.kubernetes.protobuf:application/json:application/yaml:#application/vnd.kubernetes.protobufJp - -401 - - Unauthorized -U -200N -L -OKF -D -B#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.APIResourceListRhttps¥) -1/api/v1/namespaces/{namespace}/limitranges/{name}ï(å -core_v1read the specified LimitRange*readCoreV1NamespacedLimitRange2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*JY -> -2007 -5 -OK/ -- -+#/definitions/io.k8s.api.core.v1.LimitRange - -401 - - UnauthorizedRhttpsj -x-kubernetes-actionget -jL -x-kubernetes-group-version-kind)'group: "" -kind: LimitRange -version: v1 -® -core_v1 replace the specified LimitRange*!replaceCoreV1NamespacedLimitRange2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BA -? -=bodybody *- -+#/definitions/io.k8s.api.core.v1.LimitRangeBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– -“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string Jž -> -2007 -5 -OK/ -- -+#/definitions/io.k8s.api.core.v1.LimitRange -C -201< -: -Created/ -- -+#/definitions/io.k8s.api.core.v1.LimitRange - -401 - - UnauthorizedRhttpsjL -x-kubernetes-group-version-kind)'group: "" -kind: LimitRange -version: v1 -j -x-kubernetes-actionput -*€ -core_v1delete a LimitRange* deleteCoreV1NamespacedLimitRange2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BT -R -Pbodybody*B -@#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptionsBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string Bä -áÞÛquery±The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately."gracePeriodSeconds2integer BÑ -ÎËÈquery Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the "orphan" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both."orphanDependents2boolean Bˆ -…‚ÿquery×Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground."propagationPolicy2string J» -L -200E -C -OK= -; -9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status -R -202K -I -Accepted= -; -9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status - -401 - - UnauthorizedRhttpsj -x-kubernetes-action delete -jL -x-kubernetes-group-version-kind)'kind: LimitRange -version: v1 -group: "" -Bä -core_v1)partially update the specified LimitRange*patchCoreV1NamespacedLimitRange2application/json2application/yaml2#application/vnd.kubernetes.protobuf:application/json-patch+json:application/merge-patch+json:&application/strategic-merge-patch+json:application/apply-patch+yamlBN -L -Jbodybody *: -8#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.PatchBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B¯ -¬©¦queryƒfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch)." fieldManager2string BÎ -ËÈÅquery¨Force is going to "force" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests."force2boolean JY -> -2007 -5 -OK/ -- -+#/definitions/io.k8s.api.core.v1.LimitRange - -401 - - UnauthorizedRhttpsj -x-kubernetes-actionpatch -jL -x-kubernetes-group-version-kind)'group: "" -kind: LimitRange -version: v1 -J7 -53"1pathname of the LimitRange"name*string˜J` -^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO -MKIquery-If 'true', then the output is pretty printed."pretty2string ` -I/apis/autoscaling/v2beta2/namespaces/{namespace}/horizontalpodautoscalersÏ_ä& -autoscaling_v2beta25list or watch objects of kind HorizontalPodAutoscaler*7listAutoscalingV2beta2NamespacedHorizontalPodAutoscaler2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*B‚ -ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Bï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string B‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string B‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Bú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer Bû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string BÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Bž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer B± -®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean Jv -[ -200T -R -OKL -J -H#/definitions/io.k8s.api.autoscaling.v2beta2.HorizontalPodAutoscalerList - -401 - - UnauthorizedRhttpsj -x-kubernetes-actionlist -jg -x-kubernetes-group-version-kindDBgroup: autoscaling -kind: HorizontalPodAutoscaler -version: v2beta2 -"˜ - -autoscaling_v2beta2 create a HorizontalPodAutoscaler*9createAutoscalingV2beta2NamespacedHorizontalPodAutoscaler2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BZ -X -Vbodybody *F -D#/definitions/io.k8s.api.autoscaling.v2beta2.HorizontalPodAutoscalerBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– -“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string J¯ -W -200P -N -OKH -F -D#/definitions/io.k8s.api.autoscaling.v2beta2.HorizontalPodAutoscaler -\ -201U -S -CreatedH -F -D#/definitions/io.k8s.api.autoscaling.v2beta2.HorizontalPodAutoscaler -] -202V -T -AcceptedH -F -D#/definitions/io.k8s.api.autoscaling.v2beta2.HorizontalPodAutoscaler - -401 - - UnauthorizedRhttpsj -x-kubernetes-actionpost -jg -x-kubernetes-group-version-kindDBgroup: autoscaling -kind: HorizontalPodAutoscaler -version: v2beta2 -*—- -autoscaling_v2beta2,delete collection of HorizontalPodAutoscaler*CdeleteAutoscalingV2beta2CollectionNamespacedHorizontalPodAutoscaler2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BT -R -Pbodybody*B -@#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptionsBï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string Bž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string Bä -áÞÛquery±The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately."gracePeriodSeconds2integer B‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Bú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer BÑ -ÎËÈquery Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the "orphan" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both."orphanDependents2boolean Bˆ -…‚ÿquery×Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground."propagationPolicy2string Bû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string BÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Bž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer Jg -L -200E -C -OK= -; -9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status - -401 - - UnauthorizedRhttpsj* -x-kubernetes-actiondeletecollection -jg -x-kubernetes-group-version-kindDBgroup: autoscaling -kind: HorizontalPodAutoscaler -version: v2beta2 -J` -^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO -MKIquery-If 'true', then the output is pretty printed."pretty2string ¸( -<":pathname of the PodDisruptionBudget"name*string˜J` -^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO -MKIquery-If 'true', then the output is pretty printed."pretty2string –( -'/apis/storage.k8s.io/v1/csinodes/{name}ê'ä - -storage_v1read the specified CSINode*readStorageV1CSINode2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*JY -> -2007 -5 -OK/ -- -+#/definitions/io.k8s.api.storage.v1.CSINode - -401 - - UnauthorizedRhttpsjU -x-kubernetes-group-version-kind20group: storage.k8s.io -kind: CSINode -version: v1 -j -x-kubernetes-actionget -­ - -storage_v1replace the specified CSINode*replaceStorageV1CSINode2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BA -? -=bodybody *- -+#/definitions/io.k8s.api.storage.v1.CSINodeBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– -“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string Jž -> -2007 -5 -OK/ -- -+#/definitions/io.k8s.api.storage.v1.CSINode -C -201< -: -Created/ -- -+#/definitions/io.k8s.api.storage.v1.CSINode - -401 - - UnauthorizedRhttpsj -x-kubernetes-actionput -jU -x-kubernetes-group-version-kind20group: storage.k8s.io -kind: CSINode -version: v1 -*ã - -storage_v1delete a CSINode*deleteStorageV1CSINode2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BT -R -Pbodybody*B -@#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptionsBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string Bä -áÞÛquery±The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately."gracePeriodSeconds2integer BÑ -ÎËÈquery Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the "orphan" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both."orphanDependents2boolean Bˆ -…‚ÿquery×Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground."propagationPolicy2string JŸ -> -2007 -5 -OK/ -- -+#/definitions/io.k8s.api.storage.v1.CSINode -D -202= -; -Accepted/ -- -+#/definitions/io.k8s.api.storage.v1.CSINode - -401 - - UnauthorizedRhttpsj -x-kubernetes-action delete -jU -x-kubernetes-group-version-kind20group: storage.k8s.io -kind: CSINode -version: v1 -Bã - -storage_v1&partially update the specified CSINode*patchStorageV1CSINode2application/json2application/yaml2#application/vnd.kubernetes.protobuf:application/json-patch+json:application/merge-patch+json:&application/strategic-merge-patch+json:application/apply-patch+yamlBN -L -Jbodybody *: -8#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.PatchBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B¯ -¬©¦queryƒfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch)." fieldManager2string BÎ -ËÈÅquery¨Force is going to "force" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests."force2boolean JY -> -2007 -5 -OK/ -- -+#/definitions/io.k8s.api.storage.v1.CSINode - -401 - - UnauthorizedRhttpsj -x-kubernetes-actionpatch -jU -x-kubernetes-group-version-kind20group: storage.k8s.io -kind: CSINode -version: v1 -J4 -20".pathname of the CSINode"name*string˜JO -MKIquery-If 'true', then the output is pretty printed."pretty2string ç( -2/apis/storage.k8s.io/v1beta1/watch/csinodes/{name}°(î -storage_v1beta1®watch changes to an object of kind CSINode. deprecated: use the 'watch' parameter with a list operation instead, filtered to a single item with the 'fieldSelector' parameter.*watchStorageV1beta1CSINode2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jk -P -200I -G -OKA -? -=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent - -401 - - UnauthorizedRhttpsj -x-kubernetes-actionwatch -jZ -x-kubernetes-group-version-kind75group: storage.k8s.io -kind: CSINode -version: v1beta1 -J‚ -ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer J4 -20".pathname of the CSINode"name*string˜JO -MKIquery-If 'true', then the output is pretty printed."pretty2string Jû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string JÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Jž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± -®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean ™. -O/apis/admissionregistration.k8s.io/v1beta1/mutatingwebhookconfigurations/{name}Å-… -admissionregistration_v1beta1/read the specified MutatingWebhookConfiguration*deleteAdmissionregistrationV1beta1MutatingWebhookConfiguration2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BT -R -Pbodybody*B -@#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptionsBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string Bä -áÞÛquery±The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately."gracePeriodSeconds2integer BÑ -ÎËÈquery Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the "orphan" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both."orphanDependents2boolean Bˆ -…‚ÿquery×Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground."propagationPolicy2string J» - -401 - - Unauthorized -L -200E -C -OK= -; -9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status -R -202K -I -Accepted= -; -9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.StatusRhttpsj} -x-kubernetes-group-version-kindZXversion: v1beta1 -group: admissionregistration.k8s.io -kind: MutatingWebhookConfiguration -j -x-kubernetes-action delete -B„ -admissionregistration_v1beta1;partially update the specified MutatingWebhookConfiguration*=patchAdmissionregistrationV1beta1MutatingWebhookConfiguration2application/json2application/yaml2#application/vnd.kubernetes.protobuf:application/json-patch+json:application/merge-patch+json:&application/strategic-merge-patch+json:application/apply-patch+yamlBN -L -Jbodybody *: -8#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.PatchBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B¯ -¬©¦queryƒfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch)." fieldManager2string BÎ -ËÈÅquery¨Force is going to "force" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests."force2boolean J -f -200_ -] -OKW -U -S#/definitions/io.k8s.api.admissionregistration.v1beta1.MutatingWebhookConfiguration - -401 - - UnauthorizedRhttpsj} -x-kubernetes-group-version-kindZXversion: v1beta1 -group: admissionregistration.k8s.io -kind: MutatingWebhookConfiguration -j -x-kubernetes-actionpatch -JI -GE"Cpath(name of the MutatingWebhookConfiguration"name*string˜JO -MKIquery-If 'true', then the output is pretty printed."pretty2string Ä+ -K/apis/discovery.k8s.io/v1beta1/namespaces/{namespace}/endpointslices/{name}ô*¢ -discovery_v1beta1 read the specified EndpointSlice*+readDiscoveryV1beta1NamespacedEndpointSlice2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*Jf -K -200D -B -OK< -: -8#/definitions/io.k8s.api.discovery.v1beta1.EndpointSlice - -401 - - UnauthorizedRhttpsj -x-kubernetes-actionget -jb -x-kubernetes-group-version-kind?=group: discovery.k8s.io -kind: EndpointSlice -version: v1beta1 -… -discovery_v1beta1#replace the specified EndpointSlice*.replaceDiscoveryV1beta1NamespacedEndpointSlice2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BN -L -Jbodybody *: -8#/definitions/io.k8s.api.discovery.v1beta1.EndpointSliceBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– -“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string J¸ -K -200D -B -OK< -: -8#/definitions/io.k8s.api.discovery.v1beta1.EndpointSlice -P -201I -G -Created< -: -8#/definitions/io.k8s.api.discovery.v1beta1.EndpointSlice - -401 - - UnauthorizedRhttpsj -x-kubernetes-actionput -jb -x-kubernetes-group-version-kind?=group: discovery.k8s.io -kind: EndpointSlice -version: v1beta1 -*± -discovery_v1beta1delete an EndpointSlice*-deleteDiscoveryV1beta1NamespacedEndpointSlice2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BT -R -Pbodybody*B -@#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptionsBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string Bä -áÞÛquery±The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately."gracePeriodSeconds2integer BÑ -ÎËÈquery Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the "orphan" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both."orphanDependents2boolean Bˆ -…‚ÿquery×Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground."propagationPolicy2string J» -R -202K -I -Accepted= -; -9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status - -401 - - Unauthorized -L -200E -C -OK= -; -9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.StatusRhttpsj -x-kubernetes-action delete -jb -x-kubernetes-group-version-kind?=group: discovery.k8s.io -kind: EndpointSlice -version: v1beta1 -B¡ -discovery_v1beta1,partially update the specified EndpointSlice*,patchDiscoveryV1beta1NamespacedEndpointSlice2application/json2application/yaml2#application/vnd.kubernetes.protobuf:application/json-patch+json:application/merge-patch+json:&application/strategic-merge-patch+json:application/apply-patch+yamlBN -L -Jbodybody *: -8#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.PatchBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B¯ -¬©¦queryƒfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch)." fieldManager2string BÎ -ËÈÅquery¨Force is going to "force" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests."force2boolean Jf -K -200D -B -OK< -: -8#/definitions/io.k8s.api.discovery.v1beta1.EndpointSlice - -401 - - UnauthorizedRhttpsjb -x-kubernetes-group-version-kind?=group: discovery.k8s.io -kind: EndpointSlice -version: v1beta1 -j -x-kubernetes-actionpatch -J: -86"4pathname of the EndpointSlice"name*string˜J` -^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO -MKIquery-If 'true', then the output is pretty printed."pretty2string ü) -S/apis/flowcontrol.apiserver.k8s.io/v1beta1/watch/prioritylevelconfigurations/{name}¤)Ï -flowcontrolApiserver_v1beta1Áwatch changes to an object of kind PriorityLevelConfiguration. deprecated: use the 'watch' parameter with a list operation instead, filtered to a single item with the 'fieldSelector' parameter.*:watchFlowcontrolApiserverV1beta1PriorityLevelConfiguration2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jk -P -200I -G -OKA -? -=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent - -401 - - UnauthorizedRhttpsj -x-kubernetes-actionwatch -j{ -x-kubernetes-group-version-kindXVgroup: flowcontrol.apiserver.k8s.io -kind: PriorityLevelConfiguration -version: v1beta1 -J‚ -ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer JG -EC"Apath&name of the PriorityLevelConfiguration"name*string˜JO -MKIquery-If 'true', then the output is pretty printed."pretty2string Jû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string JÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Jž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± -®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean ö' -*/api/v1/namespaces/{namespace}/pods/{name}Ç'É -core_v1read the specified Pod*readCoreV1NamespacedPod2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*JR -7 -2000 -. -OK( -& -$#/definitions/io.k8s.api.core.v1.Pod - -401 - - UnauthorizedRhttpsj -x-kubernetes-actionget -jE -x-kubernetes-group-version-kind" group: "" -kind: Pod -version: v1 -„ -core_v1replace the specified Pod*replaceCoreV1NamespacedPod2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*B: -8 -6bodybody *& -$#/definitions/io.k8s.api.core.v1.PodBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– -“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string J -< -2015 -3 -Created( -& -$#/definitions/io.k8s.api.core.v1.Pod - -401 - - Unauthorized -7 -2000 -. -OK( -& -$#/definitions/io.k8s.api.core.v1.PodRhttpsj -x-kubernetes-actionput -jE -x-kubernetes-group-version-kind" version: v1 -group: "" -kind: Pod -*Á -core_v1 delete a Pod*deleteCoreV1NamespacedPod2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BT -R -Pbodybody*B -@#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptionsBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string Bä -áÞÛquery±The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately."gracePeriodSeconds2integer BÑ -ÎËÈquery Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the "orphan" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both."orphanDependents2boolean Bˆ -…‚ÿquery×Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground."propagationPolicy2string J‘ -7 -2000 -. -OK( -& -$#/definitions/io.k8s.api.core.v1.Pod -= -2026 -4 -Accepted( -& -$#/definitions/io.k8s.api.core.v1.Pod - -401 - - UnauthorizedRhttpsj -x-kubernetes-action delete -jE -x-kubernetes-group-version-kind" group: "" -kind: Pod -version: v1 -BÈ -core_v1"partially update the specified Pod*patchCoreV1NamespacedPod2application/json2application/yaml2#application/vnd.kubernetes.protobuf:application/json-patch+json:application/merge-patch+json:&application/strategic-merge-patch+json:application/apply-patch+yamlBN -L -Jbodybody *: -8#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.PatchBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B¯ -¬©¦queryƒfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch)." fieldManager2string BÎ -ËÈÅquery¨Force is going to "force" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests."force2boolean JR -7 -2000 -. -OK( -& -$#/definitions/io.k8s.api.core.v1.Pod - -401 - - UnauthorizedRhttpsj -x-kubernetes-actionpatch -jE -x-kubernetes-group-version-kind" group: "" -kind: Pod -version: v1 -J0 -.,"*pathname of the Pod"name*string˜J` -^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO -MKIquery-If 'true', then the output is pretty printed."pretty2string ’' -/api/v1/nodes/{name}ù&à -core_v1read the specified Node*readCoreV1Node2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*JS -8 -2001 -/ -OK) -' -%#/definitions/io.k8s.api.core.v1.Node - -401 - - UnauthorizedRhttpsj -x-kubernetes-actionget -jF -x-kubernetes-group-version-kind#!group: "" -kind: Node -version: v1 -€ -core_v1replace the specified Node*replaceCoreV1Node2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*B; -9 -7bodybody *' -%#/definitions/io.k8s.api.core.v1.NodeBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– -“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string J’ -8 -2001 -/ -OK) -' -%#/definitions/io.k8s.api.core.v1.Node -= -2016 -4 -Created) -' -%#/definitions/io.k8s.api.core.v1.Node - -401 - - UnauthorizedRhttpsj -x-kubernetes-actionput -jF -x-kubernetes-group-version-kind#!group: "" -kind: Node -version: v1 -*ä -core_v1 delete a Node*deleteCoreV1Node2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BT -R -Pbodybody*B -@#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptionsBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string Bä -áÞÛquery±The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately."gracePeriodSeconds2integer BÑ -ÎËÈquery Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the "orphan" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both."orphanDependents2boolean Bˆ -…‚ÿquery×Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground."propagationPolicy2string J» -L -200E -C -OK= -; -9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status -R -202K -I -Accepted= -; -9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status - -401 - - UnauthorizedRhttpsj -x-kubernetes-action delete -jF -x-kubernetes-group-version-kind#!group: "" -kind: Node -version: v1 -B -core_v1#partially update the specified Node*patchCoreV1Node2application/json2application/yaml2#application/vnd.kubernetes.protobuf:application/json-patch+json:application/merge-patch+json:&application/strategic-merge-patch+json:application/apply-patch+yamlBN -L -Jbodybody *: -8#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.PatchBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B¯ -¬©¦queryƒfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch)." fieldManager2string BÎ -ËÈÅquery¨Force is going to "force" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests."force2boolean JS -8 -2001 -/ -OK) -' -%#/definitions/io.k8s.api.core.v1.Node - -401 - - UnauthorizedRhttpsj -x-kubernetes-actionpatch -jF -x-kubernetes-group-version-kind#!group: "" -kind: Node -version: v1 -J1 -/-"+pathname of the Node"name*string˜JO -MKIquery-If 'true', then the output is pretty printed."pretty2string é -=/apis/apps/v1/namespaces/{namespace}/deployments/{name}/scale§ù -apps_v1&read scale of the specified Deployment*#readAppsV1NamespacedDeploymentScale2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*J[ -@ -2009 -7 -OK1 -/ --#/definitions/io.k8s.api.autoscaling.v1.Scale - -401 - - UnauthorizedRhttpsj -x-kubernetes-actionget -jP -x-kubernetes-group-version-kind-+group: autoscaling -kind: Scale -version: v1 -Æ -apps_v1)replace scale of the specified Deployment*&replaceAppsV1NamespacedDeploymentScale2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BC -A -?bodybody */ --#/definitions/io.k8s.api.autoscaling.v1.ScaleBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– -“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string J¢ -@ -2009 -7 -OK1 -/ --#/definitions/io.k8s.api.autoscaling.v1.Scale -E -201> -< -Created1 -/ --#/definitions/io.k8s.api.autoscaling.v1.Scale - -401 - - UnauthorizedRhttpsjP -x-kubernetes-group-version-kind-+group: autoscaling -kind: Scale -version: v1 -j -x-kubernetes-actionput -Bø -apps_v12partially update scale of the specified Deployment*$patchAppsV1NamespacedDeploymentScale2application/json2application/yaml2#application/vnd.kubernetes.protobuf:application/json-patch+json:application/merge-patch+json:&application/strategic-merge-patch+json:application/apply-patch+yamlBN -L -Jbodybody *: -8#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.PatchBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B¯ -¬©¦queryƒfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch)." fieldManager2string BÎ -ËÈÅquery¨Force is going to "force" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests."force2boolean J[ - -401 - - Unauthorized -@ -2009 -7 -OK1 -/ --#/definitions/io.k8s.api.autoscaling.v1.ScaleRhttpsj -x-kubernetes-actionpatch -jP -x-kubernetes-group-version-kind-+group: autoscaling -kind: Scale -version: v1 -J2 -0.",pathname of the Scale"name*string˜J` -^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO -MKIquery-If 'true', then the output is pretty printed."pretty2string Ù( -:/apis/events.k8s.io/v1/watch/namespaces/{namespace}/eventsš(¬ - events_v1qwatch individual changes to a list of Event. deprecated: use the 'watch' parameter with a list operation instead.* watchEventsV1NamespacedEventList2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jk -P -200I -G -OKA -? -=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent - -401 - - UnauthorizedRhttpsjR -x-kubernetes-group-version-kind/-group: events.k8s.io -kind: Event -version: v1 -j# -x-kubernetes-action  -watchlist -J‚ -ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer J` -^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO -MKIquery-If 'true', then the output is pretty printed."pretty2string Jû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string JÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Jž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± -®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean µ* -6/apis/scheduling.k8s.io/v1beta1/priorityclasses/{name}ú)œ -scheduling_v1beta1 read the specified PriorityClass*"readSchedulingV1beta1PriorityClass2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*Jg -L -200E -C -OK= -; -9#/definitions/io.k8s.api.scheduling.v1beta1.PriorityClass - -401 - - UnauthorizedRhttpsjc -x-kubernetes-group-version-kind@>group: scheduling.k8s.io -kind: PriorityClass -version: v1beta1 -j -x-kubernetes-actionget - -scheduling_v1beta1#replace the specified PriorityClass*%replaceSchedulingV1beta1PriorityClass2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BO -M -Kbodybody *; -9#/definitions/io.k8s.api.scheduling.v1beta1.PriorityClassBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– -“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string Jº -L -200E -C -OK= -; -9#/definitions/io.k8s.api.scheduling.v1beta1.PriorityClass -Q -201J -H -Created= -; -9#/definitions/io.k8s.api.scheduling.v1beta1.PriorityClass - -401 - - UnauthorizedRhttpsj -x-kubernetes-actionput -jc -x-kubernetes-group-version-kind@>group: scheduling.k8s.io -kind: PriorityClass -version: v1beta1 -*© -scheduling_v1beta1delete a PriorityClass*$deleteSchedulingV1beta1PriorityClass2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BT -R -Pbodybody*B -@#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptionsBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string Bä -áÞÛquery±The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately."gracePeriodSeconds2integer BÑ -ÎËÈquery Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the "orphan" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both."orphanDependents2boolean Bˆ -…‚ÿquery×Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground."propagationPolicy2string J» -L -200E -C -OK= -; -9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status -R -202K -I -Accepted= -; -9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status - -401 - - UnauthorizedRhttpsj -x-kubernetes-action delete -jc -x-kubernetes-group-version-kind@>group: scheduling.k8s.io -kind: PriorityClass -version: v1beta1 -B› -scheduling_v1beta1,partially update the specified PriorityClass*#patchSchedulingV1beta1PriorityClass2application/json2application/yaml2#application/vnd.kubernetes.protobuf:application/json-patch+json:application/merge-patch+json:&application/strategic-merge-patch+json:application/apply-patch+yamlBN -L -Jbodybody *: -8#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.PatchBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B¯ -¬©¦queryƒfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch)." fieldManager2string BÎ -ËÈÅquery¨Force is going to "force" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests."force2boolean Jg -L -200E -C -OK= -; -9#/definitions/io.k8s.api.scheduling.v1beta1.PriorityClass - -401 - - UnauthorizedRhttpsj -x-kubernetes-actionpatch -jc -x-kubernetes-group-version-kind@>group: scheduling.k8s.io -kind: PriorityClass -version: v1beta1 -J: -86"4pathname of the PriorityClass"name*string˜JO -MKIquery-If 'true', then the output is pretty printed."pretty2string ÿ+ -O/apis/storage.k8s.io/v1beta1/namespaces/{namespace}/csistoragecapacities/{name}«+® -storage_v1beta1%read the specified CSIStorageCapacity*.readStorageV1beta1NamespacedCSIStorageCapacity2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*Ji - -401 - - Unauthorized -N -200G -E -OK? -= -;#/definitions/io.k8s.api.storage.v1beta1.CSIStorageCapacityRhttpsj -x-kubernetes-actionget -je -x-kubernetes-group-version-kindB@version: v1beta1 -group: storage.k8s.io -kind: CSIStorageCapacity -— -storage_v1beta1(replace the specified CSIStorageCapacity*1replaceStorageV1beta1NamespacedCSIStorageCapacity2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BQ -O -Mbodybody *= -;#/definitions/io.k8s.api.storage.v1beta1.CSIStorageCapacityBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– -“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string J¾ -S -201L -J -Created? -= -;#/definitions/io.k8s.api.storage.v1beta1.CSIStorageCapacity - -401 - - Unauthorized -N -200G -E -OK? -= -;#/definitions/io.k8s.api.storage.v1beta1.CSIStorageCapacityRhttpsj -x-kubernetes-actionput -je -x-kubernetes-group-version-kindB@kind: CSIStorageCapacity -version: v1beta1 -group: storage.k8s.io -*¹ -storage_v1beta1delete a CSIStorageCapacity*0deleteStorageV1beta1NamespacedCSIStorageCapacity2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BT -R -Pbodybody*B -@#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptionsBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string Bä -áÞÛquery±The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately."gracePeriodSeconds2integer BÑ -ÎËÈquery Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the "orphan" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both."orphanDependents2boolean Bˆ -…‚ÿquery×Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground."propagationPolicy2string J» -L -200E -C -OK= -; -9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status -R -202K -I -Accepted= -; -9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status - -401 - - UnauthorizedRhttpsj -x-kubernetes-action delete -je -x-kubernetes-group-version-kindB@group: storage.k8s.io -kind: CSIStorageCapacity -version: v1beta1 -B­ -storage_v1beta11partially update the specified CSIStorageCapacity*/patchStorageV1beta1NamespacedCSIStorageCapacity2application/json2application/yaml2#application/vnd.kubernetes.protobuf:application/json-patch+json:application/merge-patch+json:&application/strategic-merge-patch+json:application/apply-patch+yamlBN -L -Jbodybody *: -8#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.PatchBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B¯ -¬©¦queryƒfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch)." fieldManager2string BÎ -ËÈÅquery¨Force is going to "force" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests."force2boolean Ji -N -200G -E -OK? -= -;#/definitions/io.k8s.api.storage.v1beta1.CSIStorageCapacity - -401 - - UnauthorizedRhttpsje -x-kubernetes-group-version-kindB@group: storage.k8s.io -kind: CSIStorageCapacity -version: v1beta1 -j -x-kubernetes-actionpatch -J? -=;"9pathname of the CSIStorageCapacity"name*string˜J` -^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO -MKIquery-If 'true', then the output is pretty printed."pretty2string È) -4/api/v1/namespaces/{namespace}/resourcequotas/{name})ñ -core_v1 read the specified ResourceQuota*!readCoreV1NamespacedResourceQuota2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*J\ -A -200: -8 -OK2 -0 -.#/definitions/io.k8s.api.core.v1.ResourceQuota - -401 - - UnauthorizedRhttpsj -x-kubernetes-actionget -jO -x-kubernetes-group-version-kind,*kind: ResourceQuota -version: v1 -group: "" -À -core_v1#replace the specified ResourceQuota*$replaceCoreV1NamespacedResourceQuota2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BD -B -@bodybody *0 -.#/definitions/io.k8s.api.core.v1.ResourceQuotaBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– -“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string J¤ -A -200: -8 -OK2 -0 -.#/definitions/io.k8s.api.core.v1.ResourceQuota -F -201? -= -Created2 -0 -.#/definitions/io.k8s.api.core.v1.ResourceQuota - -401 - - UnauthorizedRhttpsjO -x-kubernetes-group-version-kind,*kind: ResourceQuota -version: v1 -group: "" -j -x-kubernetes-actionput -*ó -core_v1delete a ResourceQuota*#deleteCoreV1NamespacedResourceQuota2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BT -R -Pbodybody*B -@#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptionsBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string Bä -áÞÛquery±The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately."gracePeriodSeconds2integer BÑ -ÎËÈquery Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the "orphan" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both."orphanDependents2boolean Bˆ -…‚ÿquery×Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground."propagationPolicy2string J¥ -A -200: -8 -OK2 -0 -.#/definitions/io.k8s.api.core.v1.ResourceQuota -G -202@ -> -Accepted2 -0 -.#/definitions/io.k8s.api.core.v1.ResourceQuota - -401 - - UnauthorizedRhttpsj -x-kubernetes-action delete -jO -x-kubernetes-group-version-kind,*group: "" -kind: ResourceQuota -version: v1 -Bð -core_v1,partially update the specified ResourceQuota*"patchCoreV1NamespacedResourceQuota2application/json2application/yaml2#application/vnd.kubernetes.protobuf:application/json-patch+json:application/merge-patch+json:&application/strategic-merge-patch+json:application/apply-patch+yamlBN -L -Jbodybody *: -8#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.PatchBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B¯ -¬©¦queryƒfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch)." fieldManager2string BÎ -ËÈÅquery¨Force is going to "force" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests."force2boolean J\ -A -200: -8 -OK2 -0 -.#/definitions/io.k8s.api.core.v1.ResourceQuota - -401 - - UnauthorizedRhttpsjO -x-kubernetes-group-version-kind,*group: "" -kind: ResourceQuota -version: v1 -j -x-kubernetes-actionpatch -J: -86"4pathname of the ResourceQuota"name*string˜J` -^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO -MKIquery-If 'true', then the output is pretty printed."pretty2string  -J/apis/certificates.k8s.io/v1beta1/certificatesigningrequests/{name}/statusÎä -certificates_v1beta16read status of the specified CertificateSigningRequest*6readCertificatesV1beta1CertificateSigningRequestStatus2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*Ju -Z -200S -Q -OKK -I -G#/definitions/io.k8s.api.certificates.v1beta1.CertificateSigningRequest - -401 - - UnauthorizedRhttpsj -x-kubernetes-actionget -jq -x-kubernetes-group-version-kindNLgroup: certificates.k8s.io -kind: CertificateSigningRequest -version: v1beta1 -å -certificates_v1beta19replace status of the specified CertificateSigningRequest*9replaceCertificatesV1beta1CertificateSigningRequestStatus2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*B] -[ -Ybodybody *I -G#/definitions/io.k8s.api.certificates.v1beta1.CertificateSigningRequestBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– -“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string JÖ -Z -200S -Q -OKK -I -G#/definitions/io.k8s.api.certificates.v1beta1.CertificateSigningRequest -_ -201X -V -CreatedK -I -G#/definitions/io.k8s.api.certificates.v1beta1.CertificateSigningRequest - -401 - - UnauthorizedRhttpsj -x-kubernetes-actionput -jq -x-kubernetes-group-version-kindNLgroup: certificates.k8s.io -kind: CertificateSigningRequest -version: v1beta1 -Bã -certificates_v1beta1Bpartially update status of the specified CertificateSigningRequest*7patchCertificatesV1beta1CertificateSigningRequestStatus2application/json2application/yaml2#application/vnd.kubernetes.protobuf:application/json-patch+json:application/merge-patch+json:&application/strategic-merge-patch+json:application/apply-patch+yamlBN -L -Jbodybody *: -8#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.PatchBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B¯ -¬©¦queryƒfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch)." fieldManager2string BÎ -ËÈÅquery¨Force is going to "force" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests."force2boolean Ju -Z -200S -Q -OKK -I -G#/definitions/io.k8s.api.certificates.v1beta1.CertificateSigningRequest - -401 - - UnauthorizedRhttpsj -x-kubernetes-actionpatch -jq -x-kubernetes-group-version-kindNLgroup: certificates.k8s.io -kind: CertificateSigningRequest -version: v1beta1 -JF -DB"@path%name of the CertificateSigningRequest"name*string˜JO -MKIquery-If 'true', then the output is pretty printed."pretty2string ÿ' -*/apis/networking.k8s.io/v1/watch/ingressesÐ'Ä - networking_v1swatch individual changes to a list of Ingress. deprecated: use the 'watch' parameter with a list operation instead.*,watchNetworkingV1IngressListForAllNamespaces2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jk -P -200I -G -OKA -? -=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent - -401 - - UnauthorizedRhttpsj# -x-kubernetes-action  -watchlist -jX -x-kubernetes-group-version-kind53kind: Ingress -version: v1 -group: networking.k8s.io -J‚ -ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer JO -MKIquery-If 'true', then the output is pretty printed."pretty2string Jû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string JÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Jž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± -®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean ã) -H/apis/networking.k8s.io/v1/watch/namespaces/{namespace}/ingresses/{name}–)ò - networking_v1®watch changes to an object of kind Ingress. deprecated: use the 'watch' parameter with a list operation instead, filtered to a single item with the 'fieldSelector' parameter.*"watchNetworkingV1NamespacedIngress2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jk -P -200I -G -OKA -? -=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent - -401 - - UnauthorizedRhttpsj -x-kubernetes-actionwatch -jX -x-kubernetes-group-version-kind53group: networking.k8s.io -kind: Ingress -version: v1 -J‚ -ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer J4 -20".pathname of the Ingress"name*string˜J` -^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO -MKIquery-If 'true', then the output is pretty printed."pretty2string Jû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string JÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Jž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± -®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean ² - /api/v1/namespaces/{name}/statusç -core_v1&read status of the specified Namespace*readCoreV1NamespaceStatus2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*JX -= -2006 -4 -OK. -, -*#/definitions/io.k8s.api.core.v1.Namespace - -401 - - UnauthorizedRhttpsj -x-kubernetes-actionget -jK -x-kubernetes-group-version-kind(&group: "" -kind: Namespace -version: v1 -® -core_v1)replace status of the specified Namespace*replaceCoreV1NamespaceStatus2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*B@ -> -<bodybody *, -*#/definitions/io.k8s.api.core.v1.NamespaceBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– -“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string Jœ -= -2006 -4 -OK. -, -*#/definitions/io.k8s.api.core.v1.Namespace -B -201; -9 -Created. -, -*#/definitions/io.k8s.api.core.v1.Namespace - -401 - - UnauthorizedRhttpsjK -x-kubernetes-group-version-kind(&kind: Namespace -version: v1 -group: "" -j -x-kubernetes-actionput -Bæ -core_v12partially update status of the specified Namespace*patchCoreV1NamespaceStatus2application/json2application/yaml2#application/vnd.kubernetes.protobuf:application/json-patch+json:application/merge-patch+json:&application/strategic-merge-patch+json:application/apply-patch+yamlBN -L -Jbodybody *: -8#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.PatchBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B¯ -¬©¦queryƒfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch)." fieldManager2string BÎ -ËÈÅquery¨Force is going to "force" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests."force2boolean JX -= -2006 -4 -OK. -, -*#/definitions/io.k8s.api.core.v1.Namespace - -401 - - UnauthorizedRhttpsj -x-kubernetes-actionpatch -jK -x-kubernetes-group-version-kind(&group: "" -kind: Namespace -version: v1 -J6 -42"0pathname of the Namespace"name*string˜JO -MKIquery-If 'true', then the output is pretty printed."pretty2string ¬) -7/apis/batch/v1/watch/namespaces/{namespace}/jobs/{name}ð(Ð -batch_v1ªwatch changes to an object of kind Job. deprecated: use the 'watch' parameter with a list operation instead, filtered to a single item with the 'fieldSelector' parameter.*watchBatchV1NamespacedJob2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jk - -401 - - Unauthorized -P -200I -G -OKA -? -=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEventRhttpsj -x-kubernetes-actionwatch -jH -x-kubernetes-group-version-kind%#group: batch -kind: Job -version: v1 -J‚ -ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer J0 -.,"*pathname of the Job"name*string˜J` -^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO -MKIquery-If 'true', then the output is pretty printed."pretty2string Jû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string JÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Jž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± -®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean ÷ -N/apis/networking.k8s.io/v1beta1/namespaces/{namespace}/ingresses/{name}/status¤ž -networking_v1beta1$read status of the specified Ingress*,readNetworkingV1beta1NamespacedIngressStatus2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*Ja -F -200? -= -OK7 -5 -3#/definitions/io.k8s.api.networking.v1beta1.Ingress - -401 - - UnauthorizedRhttpsj] -x-kubernetes-group-version-kind:8group: networking.k8s.io -kind: Ingress -version: v1beta1 -j -x-kubernetes-actionget -÷ -networking_v1beta1'replace status of the specified Ingress*/replaceNetworkingV1beta1NamespacedIngressStatus2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BI -G -Ebodybody *5 -3#/definitions/io.k8s.api.networking.v1beta1.IngressBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– -“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string J® -F -200? -= -OK7 -5 -3#/definitions/io.k8s.api.networking.v1beta1.Ingress -K -201D -B -Created7 -5 -3#/definitions/io.k8s.api.networking.v1beta1.Ingress - -401 - - UnauthorizedRhttpsj] -x-kubernetes-group-version-kind:8kind: Ingress -version: v1beta1 -group: networking.k8s.io -j -x-kubernetes-actionput -B -networking_v1beta10partially update status of the specified Ingress*-patchNetworkingV1beta1NamespacedIngressStatus2application/json2application/yaml2#application/vnd.kubernetes.protobuf:application/json-patch+json:application/merge-patch+json:&application/strategic-merge-patch+json:application/apply-patch+yamlBN -L -Jbodybody *: -8#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.PatchBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B¯ -¬©¦queryƒfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch)." fieldManager2string BÎ -ËÈÅquery¨Force is going to "force" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests."force2boolean Ja -F -200? -= -OK7 -5 -3#/definitions/io.k8s.api.networking.v1beta1.Ingress - -401 - - UnauthorizedRhttpsj -x-kubernetes-actionpatch -j] -x-kubernetes-group-version-kind:8group: networking.k8s.io -kind: Ingress -version: v1beta1 -J4 -20".pathname of the Ingress"name*string˜J` -^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO -MKIquery-If 'true', then the output is pretty printed."pretty2string ö' -+/apis/storage.k8s.io/v1beta1/watch/csinodesÆ'º -storage_v1beta1swatch individual changes to a list of CSINode. deprecated: use the 'watch' parameter with a list operation instead.*watchStorageV1beta1CSINodeList2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jk -P -200I -G -OKA -? -=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent - -401 - - UnauthorizedRhttpsj# -x-kubernetes-action  -watchlist -jZ -x-kubernetes-group-version-kind75group: storage.k8s.io -kind: CSINode -version: v1beta1 -J‚ -ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer JO -MKIquery-If 'true', then the output is pretty printed."pretty2string Jû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string JÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Jž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± -®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean ‘ -%/apis/apiregistration.k8s.io/v1beta1/çä -apiregistration_v1beta1get available resources*%getApiregistrationV1beta1APIResources2application/json2application/yaml2#application/vnd.kubernetes.protobuf:application/json:application/yaml:#application/vnd.kubernetes.protobufJp - -401 - - Unauthorized -U -200N -L -OKF -D -B#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.APIResourceListRhttpsñ] -;/apis/networking.k8s.io/v1/namespaces/{namespace}/ingresses±]“& - networking_v1%list or watch objects of kind Ingress*!listNetworkingV1NamespacedIngress2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*B‚ -ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Bï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string B‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string B‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Bú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer Bû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string BÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Bž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer B± -®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean J` -E -200> -< -OK6 -4 -2#/definitions/io.k8s.api.networking.v1.IngressList - -401 - - UnauthorizedRhttpsjX -x-kubernetes-group-version-kind53version: v1 -group: networking.k8s.io -kind: Ingress -j -x-kubernetes-actionlist -"† - networking_v1create an Ingress*#createNetworkingV1NamespacedIngress2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BD -B -@bodybody *0 -.#/definitions/io.k8s.api.networking.v1.IngressBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– -“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string Jí - -401 - - Unauthorized -A -200: -8 -OK2 -0 -.#/definitions/io.k8s.api.networking.v1.Ingress -F -201? -= -Created2 -0 -.#/definitions/io.k8s.api.networking.v1.Ingress -G -202@ -> -Accepted2 -0 -.#/definitions/io.k8s.api.networking.v1.IngressRhttpsjX -x-kubernetes-group-version-kind53group: networking.k8s.io -kind: Ingress -version: v1 -j -x-kubernetes-actionpost -*Ü, - networking_v1delete collection of Ingress*-deleteNetworkingV1CollectionNamespacedIngress2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BT -R -Pbodybody*B -@#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptionsBï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string Bž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string Bä -áÞÛquery±The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately."gracePeriodSeconds2integer B‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Bú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer BÑ -ÎËÈquery Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the "orphan" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both."orphanDependents2boolean Bˆ -…‚ÿquery×Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground."propagationPolicy2string Bû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string BÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Bž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer Jg -L -200E -C -OK= -; -9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status - -401 - - UnauthorizedRhttpsj* -x-kubernetes-actiondeletecollection -jX -x-kubernetes-group-version-kind53version: v1 -group: networking.k8s.io -kind: Ingress -J` -^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO -MKIquery-If 'true', then the output is pretty printed."pretty2string ü* -H/apis/networking.k8s.io/v1/namespaces/{namespace}/networkpolicies/{name}¯*’ - networking_v1 read the specified NetworkPolicy*'readNetworkingV1NamespacedNetworkPolicy2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*Jb -G -200@ -> -OK8 -6 -4#/definitions/io.k8s.api.networking.v1.NetworkPolicy - -401 - - UnauthorizedRhttpsj^ -x-kubernetes-group-version-kind;9group: networking.k8s.io -kind: NetworkPolicy -version: v1 -j -x-kubernetes-actionget -í - networking_v1#replace the specified NetworkPolicy**replaceNetworkingV1NamespacedNetworkPolicy2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BJ -H -Fbodybody *6 -4#/definitions/io.k8s.api.networking.v1.NetworkPolicyBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– -“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string J° -G -200@ -> -OK8 -6 -4#/definitions/io.k8s.api.networking.v1.NetworkPolicy -L -201E -C -Created8 -6 -4#/definitions/io.k8s.api.networking.v1.NetworkPolicy - -401 - - UnauthorizedRhttpsj -x-kubernetes-actionput -j^ -x-kubernetes-group-version-kind;9group: networking.k8s.io -kind: NetworkPolicy -version: v1 -*¤ - networking_v1delete a NetworkPolicy*)deleteNetworkingV1NamespacedNetworkPolicy2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BT -R -Pbodybody*B -@#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptionsBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string Bä -áÞÛquery±The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately."gracePeriodSeconds2integer BÑ -ÎËÈquery Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the "orphan" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both."orphanDependents2boolean Bˆ -…‚ÿquery×Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground."propagationPolicy2string J» - -401 - - Unauthorized -L -200E -C -OK= -; -9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status -R -202K -I -Accepted= -; -9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.StatusRhttpsj -x-kubernetes-action delete -j^ -x-kubernetes-group-version-kind;9group: networking.k8s.io -kind: NetworkPolicy -version: v1 -B‘ - networking_v1,partially update the specified NetworkPolicy*(patchNetworkingV1NamespacedNetworkPolicy2application/json2application/yaml2#application/vnd.kubernetes.protobuf:application/json-patch+json:application/merge-patch+json:&application/strategic-merge-patch+json:application/apply-patch+yamlBN -L -Jbodybody *: -8#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.PatchBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B¯ -¬©¦queryƒfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch)." fieldManager2string BÎ -ËÈÅquery¨Force is going to "force" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests."force2boolean Jb -G -200@ -> -OK8 -6 -4#/definitions/io.k8s.api.networking.v1.NetworkPolicy - -401 - - UnauthorizedRhttpsj -x-kubernetes-actionpatch -j^ -x-kubernetes-group-version-kind;9group: networking.k8s.io -kind: NetworkPolicy -version: v1 -J: -86"4pathname of the NetworkPolicy"name*string˜J` -^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO -MKIquery-If 'true', then the output is pretty printed."pretty2string ‰ -#/apis/rbac.authorization.k8s.io/v1/áÞ -rbacAuthorization_v1get available resources*"getRbacAuthorizationV1APIResources2application/json2application/yaml2#application/vnd.kubernetes.protobuf:application/json:application/yaml:#application/vnd.kubernetes.protobufJp -U -200N -L -OKF -D -B#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.APIResourceList - -401 - - UnauthorizedRhttps¢_ -K/apis/rbac.authorization.k8s.io/v1beta1/namespaces/{namespace}/rolebindingsÒ^Ç& -rbacAuthorization_v1beta1)list or watch objects of kind RoleBinding*1listRbacAuthorizationV1beta1NamespacedRoleBinding2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*B‚ -ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Bï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string B‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string B‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Bú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer Bû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string BÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Bž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer B± -®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean Jc -H -200A -? -OK9 -7 -5#/definitions/io.k8s.api.rbac.v1beta1.RoleBindingList - -401 - - UnauthorizedRhttpsji -x-kubernetes-group-version-kindFDgroup: rbac.authorization.k8s.io -kind: RoleBinding -version: v1beta1 -j -x-kubernetes-actionlist -" -rbacAuthorization_v1beta1create a RoleBinding*3createRbacAuthorizationV1beta1NamespacedRoleBinding2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BG -E -Cbodybody *3 -1#/definitions/io.k8s.api.rbac.v1beta1.RoleBindingBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– -“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string Jö -D -200= -; -OK5 -3 -1#/definitions/io.k8s.api.rbac.v1beta1.RoleBinding -I -201B -@ -Created5 -3 -1#/definitions/io.k8s.api.rbac.v1beta1.RoleBinding -J -202C -A -Accepted5 -3 -1#/definitions/io.k8s.api.rbac.v1beta1.RoleBinding - -401 - - UnauthorizedRhttpsj -x-kubernetes-actionpost -ji -x-kubernetes-group-version-kindFDgroup: rbac.authorization.k8s.io -kind: RoleBinding -version: v1beta1 -*- -rbacAuthorization_v1beta1 delete collection of RoleBinding*=deleteRbacAuthorizationV1beta1CollectionNamespacedRoleBinding2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BT -R -Pbodybody*B -@#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptionsBï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string Bž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string Bä -áÞÛquery±The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately."gracePeriodSeconds2integer B‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Bú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer BÑ -ÎËÈquery Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the "orphan" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both."orphanDependents2boolean Bˆ -…‚ÿquery×Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground."propagationPolicy2string Bû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string BÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Bž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer Jg - -401 - - Unauthorized -L -200E -C -OK= -; -9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.StatusRhttpsj* -x-kubernetes-actiondeletecollection -ji -x-kubernetes-group-version-kindFDgroup: rbac.authorization.k8s.io -kind: RoleBinding -version: v1beta1 -J` -^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO -MKIquery-If 'true', then the output is pretty printed."pretty2string õ -?/apis/apps/v1/namespaces/{namespace}/statefulsets/{name}/status±û -apps_v1(read status of the specified StatefulSet*%readAppsV1NamespacedStatefulSetStatus2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*JZ -? -2008 -6 -OK0 -. -,#/definitions/io.k8s.api.apps.v1.StatefulSet - -401 - - UnauthorizedRhttpsj -x-kubernetes-actionget -jO -x-kubernetes-group-version-kind,*group: apps -kind: StatefulSet -version: v1 -Æ -apps_v1+replace status of the specified StatefulSet*(replaceAppsV1NamespacedStatefulSetStatus2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BB -@ ->bodybody *. -,#/definitions/io.k8s.api.apps.v1.StatefulSetBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– -“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string J  -? -2008 -6 -OK0 -. -,#/definitions/io.k8s.api.apps.v1.StatefulSet -D -201= -; -Created0 -. -,#/definitions/io.k8s.api.apps.v1.StatefulSet - -401 - - UnauthorizedRhttpsj -x-kubernetes-actionput -jO -x-kubernetes-group-version-kind,*group: apps -kind: StatefulSet -version: v1 -Bú -apps_v14partially update status of the specified StatefulSet*&patchAppsV1NamespacedStatefulSetStatus2application/json2application/yaml2#application/vnd.kubernetes.protobuf:application/json-patch+json:application/merge-patch+json:&application/strategic-merge-patch+json:application/apply-patch+yamlBN -L -Jbodybody *: -8#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.PatchBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B¯ -¬©¦queryƒfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch)." fieldManager2string BÎ -ËÈÅquery¨Force is going to "force" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests."force2boolean JZ -? -2008 -6 -OK0 -. -,#/definitions/io.k8s.api.apps.v1.StatefulSet - -401 - - UnauthorizedRhttpsj -x-kubernetes-actionpatch -jO -x-kubernetes-group-version-kind,*group: apps -kind: StatefulSet -version: v1 -J8 -64"2pathname of the StatefulSet"name*string˜J` -^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO -MKIquery-If 'true', then the output is pretty printed."pretty2string è' -/api/v1/watch/resourcequotasÇ'» -core_v1ywatch individual changes to a list of ResourceQuota. deprecated: use the 'watch' parameter with a list operation instead.*,watchCoreV1ResourceQuotaListForAllNamespaces2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jk -P -200I -G -OKA -? -=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent - -401 - - UnauthorizedRhttpsjO -x-kubernetes-group-version-kind,*group: "" -kind: ResourceQuota -version: v1 -j# -x-kubernetes-action  -watchlist -J‚ -ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer JO -MKIquery-If 'true', then the output is pretty printed."pretty2string Jû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string JÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Jž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± -®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean É( -8/apis/autoscaling/v2beta2/watch/horizontalpodautoscalersŒ(€ -autoscaling_v2beta2ƒwatch individual changes to a list of HorizontalPodAutoscaler. deprecated: use the 'watch' parameter with a list operation instead.*BwatchAutoscalingV2beta2HorizontalPodAutoscalerListForAllNamespaces2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jk - -401 - - Unauthorized -P -200I -G -OKA -? -=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEventRhttpsjg -x-kubernetes-group-version-kindDBkind: HorizontalPodAutoscaler -version: v2beta2 -group: autoscaling -j# -x-kubernetes-action  -watchlist -J‚ -ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer JO -MKIquery-If 'true', then the output is pretty printed."pretty2string Jû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string JÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Jž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± -®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean ³] -*/apis/scheduling.k8s.io/v1/priorityclasses„]¡& - scheduling_v1+list or watch objects of kind PriorityClass*listSchedulingV1PriorityClass2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*B‚ -ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Bï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string B‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string B‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Bú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer Bû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string BÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Bž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer B± -®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean Jf -K -200D -B -OK< -: -8#/definitions/io.k8s.api.scheduling.v1.PriorityClassList - -401 - - UnauthorizedRhttpsj^ -x-kubernetes-group-version-kind;9group: scheduling.k8s.io -kind: PriorityClass -version: v1 -j -x-kubernetes-actionlist -"¥ - scheduling_v1create a PriorityClass*createSchedulingV1PriorityClass2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BJ -H -Fbodybody *6 -4#/definitions/io.k8s.api.scheduling.v1.PriorityClassBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– -“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string Jÿ -G -200@ -> -OK8 -6 -4#/definitions/io.k8s.api.scheduling.v1.PriorityClass -L -201E -C -Created8 -6 -4#/definitions/io.k8s.api.scheduling.v1.PriorityClass -M -202F -D -Accepted8 -6 -4#/definitions/io.k8s.api.scheduling.v1.PriorityClass - -401 - - UnauthorizedRhttpsj -x-kubernetes-actionpost -j^ -x-kubernetes-group-version-kind;9group: scheduling.k8s.io -kind: PriorityClass -version: v1 -*ä, - scheduling_v1"delete collection of PriorityClass*)deleteSchedulingV1CollectionPriorityClass2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BT -R -Pbodybody*B -@#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptionsBï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string Bž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string Bä -áÞÛquery±The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately."gracePeriodSeconds2integer B‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Bú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer BÑ -ÎËÈquery Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the "orphan" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both."orphanDependents2boolean Bˆ -…‚ÿquery×Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground."propagationPolicy2string Bû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string BÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Bž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer Jg -L -200E -C -OK= -; -9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status - -401 - - UnauthorizedRhttpsj* -x-kubernetes-actiondeletecollection -j^ -x-kubernetes-group-version-kind;9group: scheduling.k8s.io -kind: PriorityClass -version: v1 -JO -MKIquery-If 'true', then the output is pretty printed."pretty2string ê -/apis/storage.k8s.io/v1/ÍÊ - -storage_v1get available resources*getStorageV1APIResources2application/json2application/yaml2#application/vnd.kubernetes.protobuf:application/json:application/yaml:#application/vnd.kubernetes.protobufJp - -401 - - Unauthorized -U -200N -L -OKF -D -B#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.APIResourceListRhttpsÊ -/api/À½ -coreget available API versions*getCoreAPIVersions2application/json2application/yaml2#application/vnd.kubernetes.protobuf:application/json:application/yaml:#application/vnd.kubernetes.protobufJl -Q -200J -H -OKB -@ ->#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.APIVersions - -401 - - UnauthorizedRhttps’* -W/apis/admissionregistration.k8s.io/v1beta1/watch/validatingwebhookconfigurations/{name}¶)Ý -admissionregistration_v1beta1Åwatch changes to an object of kind ValidatingWebhookConfiguration. deprecated: use the 'watch' parameter with a list operation instead, filtered to a single item with the 'fieldSelector' parameter.*?watchAdmissionregistrationV1beta1ValidatingWebhookConfiguration2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jk -P -200I -G -OKA -? -=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent - -401 - - UnauthorizedRhttpsj -x-kubernetes-actionwatch -j -x-kubernetes-group-version-kind\Zgroup: admissionregistration.k8s.io -kind: ValidatingWebhookConfiguration -version: v1beta1 -J‚ -ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer JK -IG"Epath*name of the ValidatingWebhookConfiguration"name*string˜JO -MKIquery-If 'true', then the output is pretty printed."pretty2string Jû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string JÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Jž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± -®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean ¿( -=/apis/apiextensions.k8s.io/v1/watch/customresourcedefinitionsý'ñ -apiextensions_v1„watch individual changes to a list of CustomResourceDefinition. deprecated: use the 'watch' parameter with a list operation instead.*0watchApiextensionsV1CustomResourceDefinitionList2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jk -P -200I -G -OKA -? -=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent - -401 - - UnauthorizedRhttpsj# -x-kubernetes-action  -watchlist -jl -x-kubernetes-group-version-kindIGgroup: apiextensions.k8s.io -kind: CustomResourceDefinition -version: v1 -J‚ -ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer JO -MKIquery-If 'true', then the output is pretty printed."pretty2string Jû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string JÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Jž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± -®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean ó& -/apis/apps/v1/daemonsetsÖ&Ê -apps_v1'list or watch objects of kind DaemonSet*#listAppsV1DaemonSetForAllNamespaces2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*J\ -A -200: -8 -OK2 -0 -.#/definitions/io.k8s.api.apps.v1.DaemonSetList - -401 - - UnauthorizedRhttpsjM -x-kubernetes-group-version-kind*(version: v1 -group: apps -kind: DaemonSet -j -x-kubernetes-actionlist -J‚ -ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer JO -MKIquery-If 'true', then the output is pretty printed."pretty2string Jû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string JÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Jž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± -®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean ¾) -6/api/v1/watch/namespaces/{namespace}/configmaps/{name}ƒ)Ý -core_v1°watch changes to an object of kind ConfigMap. deprecated: use the 'watch' parameter with a list operation instead, filtered to a single item with the 'fieldSelector' parameter.*watchCoreV1NamespacedConfigMap2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jk -P -200I -G -OKA -? -=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent - -401 - - UnauthorizedRhttpsj -x-kubernetes-actionwatch -jK -x-kubernetes-group-version-kind(&group: "" -kind: ConfigMap -version: v1 -J‚ -ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer J6 -42"0pathname of the ConfigMap"name*string˜J` -^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO -MKIquery-If 'true', then the output is pretty printed."pretty2string Jû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string JÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Jž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± -®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean æ) -:/apis/batch/v1beta1/namespaces/{namespace}/cronjobs/{name}§)ó - batch_v1beta1read the specified CronJob*!readBatchV1beta1NamespacedCronJob2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*J\ -A -200: -8 -OK2 -0 -.#/definitions/io.k8s.api.batch.v1beta1.CronJob - -401 - - UnauthorizedRhttpsj -x-kubernetes-actionget -jQ -x-kubernetes-group-version-kind.,group: batch -kind: CronJob -version: v1beta1 - - batch_v1beta1replace the specified CronJob*$replaceBatchV1beta1NamespacedCronJob2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BD -B -@bodybody *0 -.#/definitions/io.k8s.api.batch.v1beta1.CronJobBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– -“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string J¤ - -401 - - Unauthorized -A -200: -8 -OK2 -0 -.#/definitions/io.k8s.api.batch.v1beta1.CronJob -F -201? -= -Created2 -0 -.#/definitions/io.k8s.api.batch.v1beta1.CronJobRhttpsj -x-kubernetes-actionput -jQ -x-kubernetes-group-version-kind.,group: batch -kind: CronJob -version: v1beta1 -*‹ - batch_v1beta1delete a CronJob*#deleteBatchV1beta1NamespacedCronJob2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BT -R -Pbodybody*B -@#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptionsBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string Bä -áÞÛquery±The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately."gracePeriodSeconds2integer BÑ -ÎËÈquery Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the "orphan" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both."orphanDependents2boolean Bˆ -…‚ÿquery×Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground."propagationPolicy2string J» -L -200E -C -OK= -; -9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status -R -202K -I -Accepted= -; -9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status - -401 - - UnauthorizedRhttpsj -x-kubernetes-action delete -jQ -x-kubernetes-group-version-kind.,kind: CronJob -version: v1beta1 -group: batch -Bò - batch_v1beta1&partially update the specified CronJob*"patchBatchV1beta1NamespacedCronJob2application/json2application/yaml2#application/vnd.kubernetes.protobuf:application/json-patch+json:application/merge-patch+json:&application/strategic-merge-patch+json:application/apply-patch+yamlBN -L -Jbodybody *: -8#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.PatchBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B¯ -¬©¦queryƒfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch)." fieldManager2string BÎ -ËÈÅquery¨Force is going to "force" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests."force2boolean J\ -A -200: -8 -OK2 -0 -.#/definitions/io.k8s.api.batch.v1beta1.CronJob - -401 - - UnauthorizedRhttpsjQ -x-kubernetes-group-version-kind.,group: batch -kind: CronJob -version: v1beta1 -j -x-kubernetes-actionpatch -J4 -20".pathname of the CronJob"name*string˜J` -^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO -MKIquery-If 'true', then the output is pretty printed."pretty2string •] -(/apis/node.k8s.io/v1beta1/runtimeclassesè\™& - node_v1beta1*list or watch objects of kind RuntimeClass*listNodeV1beta1RuntimeClass2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*B‚ -ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Bï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string B‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string B‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Bú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer Bû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string BÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Bž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer B± -®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean Jd -I -200B -@ -OK: -8 -6#/definitions/io.k8s.api.node.v1beta1.RuntimeClassList - -401 - - UnauthorizedRhttpsj -x-kubernetes-actionlist -j\ -x-kubernetes-group-version-kind97group: node.k8s.io -kind: RuntimeClass -version: v1beta1 -"— - node_v1beta1create a RuntimeClass*createNodeV1beta1RuntimeClass2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BH -F -Dbodybody *4 -2#/definitions/io.k8s.api.node.v1beta1.RuntimeClassBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– -“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string Jù -K -202D -B -Accepted6 -4 -2#/definitions/io.k8s.api.node.v1beta1.RuntimeClass - -401 - - Unauthorized -E -200> -< -OK6 -4 -2#/definitions/io.k8s.api.node.v1beta1.RuntimeClass -J -201C -A -Created6 -4 -2#/definitions/io.k8s.api.node.v1beta1.RuntimeClassRhttpsj -x-kubernetes-actionpost -j\ -x-kubernetes-group-version-kind97kind: RuntimeClass -version: v1beta1 -group: node.k8s.io -*Þ, - node_v1beta1!delete collection of RuntimeClass*'deleteNodeV1beta1CollectionRuntimeClass2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BT -R -Pbodybody*B -@#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptionsBï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string Bž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string Bä -áÞÛquery±The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately."gracePeriodSeconds2integer B‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Bú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer BÑ -ÎËÈquery Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the "orphan" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both."orphanDependents2boolean Bˆ -…‚ÿquery×Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground."propagationPolicy2string Bû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string BÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Bž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer Jg -L -200E -C -OK= -; -9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status - -401 - - UnauthorizedRhttpsj* -x-kubernetes-actiondeletecollection -j\ -x-kubernetes-group-version-kind97group: node.k8s.io -kind: RuntimeClass -version: v1beta1 -JO -MKIquery-If 'true', then the output is pretty printed."pretty2string ä) -2/apis/storage.k8s.io/v1beta1/storageclasses/{name}­)Œ -storage_v1beta1read the specified StorageClass*readStorageV1beta1StorageClass2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*Jc -H -200A -? -OK9 -7 -5#/definitions/io.k8s.api.storage.v1beta1.StorageClass - -401 - - UnauthorizedRhttpsj -x-kubernetes-actionget -j_ -x-kubernetes-group-version-kind<:group: storage.k8s.io -kind: StorageClass -version: v1beta1 -é -storage_v1beta1"replace the specified StorageClass*!replaceStorageV1beta1StorageClass2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BK -I -Gbodybody *7 -5#/definitions/io.k8s.api.storage.v1beta1.StorageClassBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– -“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string J² -H -200A -? -OK9 -7 -5#/definitions/io.k8s.api.storage.v1beta1.StorageClass -M -201F -D -Created9 -7 -5#/definitions/io.k8s.api.storage.v1beta1.StorageClass - -401 - - UnauthorizedRhttpsj -x-kubernetes-actionput -j_ -x-kubernetes-group-version-kind<:group: storage.k8s.io -kind: StorageClass -version: v1beta1 -*• -storage_v1beta1delete a StorageClass* deleteStorageV1beta1StorageClass2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BT -R -Pbodybody*B -@#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptionsBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string Bä -áÞÛquery±The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately."gracePeriodSeconds2integer BÑ -ÎËÈquery Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the "orphan" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both."orphanDependents2boolean Bˆ -…‚ÿquery×Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground."propagationPolicy2string J³ -H -200A -? -OK9 -7 -5#/definitions/io.k8s.api.storage.v1beta1.StorageClass -N -202G -E -Accepted9 -7 -5#/definitions/io.k8s.api.storage.v1beta1.StorageClass - -401 - - UnauthorizedRhttpsj_ -x-kubernetes-group-version-kind<:group: storage.k8s.io -kind: StorageClass -version: v1beta1 -j -x-kubernetes-action delete -B‹ -storage_v1beta1+partially update the specified StorageClass*patchStorageV1beta1StorageClass2application/json2application/yaml2#application/vnd.kubernetes.protobuf:application/json-patch+json:application/merge-patch+json:&application/strategic-merge-patch+json:application/apply-patch+yamlBN -L -Jbodybody *: -8#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.PatchBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B¯ -¬©¦queryƒfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch)." fieldManager2string BÎ -ËÈÅquery¨Force is going to "force" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests."force2boolean Jc -H -200A -? -OK9 -7 -5#/definitions/io.k8s.api.storage.v1beta1.StorageClass - -401 - - UnauthorizedRhttpsj -x-kubernetes-actionpatch -j_ -x-kubernetes-group-version-kind<:group: storage.k8s.io -kind: StorageClass -version: v1beta1 -J9 -75"3pathname of the StorageClass"name*string˜JO -MKIquery-If 'true', then the output is pretty printed."pretty2string ä -E/apis/apiextensions.k8s.io/v1/customresourcedefinitions/{name}/statusšî -apiextensions_v15read status of the specified CustomResourceDefinition*1readApiextensionsV1CustomResourceDefinitionStatus2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*J -r -200k -i -OKc -a -_#/definitions/io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1.CustomResourceDefinition - -401 - - UnauthorizedRhttpsj -x-kubernetes-actionget -jl -x-kubernetes-group-version-kindIGgroup: apiextensions.k8s.io -kind: CustomResourceDefinition -version: v1 -ž - -apiextensions_v18replace status of the specified CustomResourceDefinition*4replaceApiextensionsV1CustomResourceDefinitionStatus2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*Bu -s -qbodybody *a -_#/definitions/io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1.CustomResourceDefinitionBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– -“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string J† -w -201p -n -Createdc -a -_#/definitions/io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1.CustomResourceDefinition - -401 - - Unauthorized -r -200k -i -OKc -a -_#/definitions/io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1.CustomResourceDefinitionRhttpsjl -x-kubernetes-group-version-kindIGgroup: apiextensions.k8s.io -kind: CustomResourceDefinition -version: v1 -j -x-kubernetes-actionput -Bí -apiextensions_v1Apartially update status of the specified CustomResourceDefinition*2patchApiextensionsV1CustomResourceDefinitionStatus2application/json2application/yaml2#application/vnd.kubernetes.protobuf:application/json-patch+json:application/merge-patch+json:&application/strategic-merge-patch+json:application/apply-patch+yamlBN -L -Jbodybody *: -8#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.PatchBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B¯ -¬©¦queryƒfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch)." fieldManager2string BÎ -ËÈÅquery¨Force is going to "force" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests."force2boolean J -r -200k -i -OKc -a -_#/definitions/io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1.CustomResourceDefinition - -401 - - UnauthorizedRhttpsj -x-kubernetes-actionpatch -jl -x-kubernetes-group-version-kindIGkind: CustomResourceDefinition -version: v1 -group: apiextensions.k8s.io -JE -CA"?path$name of the CustomResourceDefinition"name*string˜JO -MKIquery-If 'true', then the output is pretty printed."pretty2string ß - -0/apis/authentication.k8s.io/v1beta1/tokenreviewsª -"œ -authentication_v1beta1create a TokenReview*&createAuthenticationV1beta1TokenReview2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BQ -O -Mbodybody *= -;#/definitions/io.k8s.api.authentication.v1beta1.TokenReviewJ” -N -200G -E -OK? -= -;#/definitions/io.k8s.api.authentication.v1beta1.TokenReview -S -201L -J -Created? -= -;#/definitions/io.k8s.api.authentication.v1beta1.TokenReview -T -202M -K -Accepted? -= -;#/definitions/io.k8s.api.authentication.v1beta1.TokenReview - -401 - - UnauthorizedRhttpsj -x-kubernetes-actionpost -je -x-kubernetes-group-version-kindB@group: authentication.k8s.io -kind: TokenReview -version: v1beta1 -Jž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string J– -“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string JO -MKIquery-If 'true', then the output is pretty printed."pretty2string á] -9/apis/events.k8s.io/v1beta1/namespaces/{namespace}/events£]& -events_v1beta1#list or watch objects of kind Event* listEventsV1beta1NamespacedEvent2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*B‚ -ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Bï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string B‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string B‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Bú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer Bû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string BÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Bž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer B± -®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean J_ -D -200= -; -OK5 -3 -1#/definitions/io.k8s.api.events.v1beta1.EventList - -401 - - UnauthorizedRhttpsj -x-kubernetes-actionlist -jW -x-kubernetes-group-version-kind42kind: Event -version: v1beta1 -group: events.k8s.io -"ÿ -events_v1beta1create an Event*"createEventsV1beta1NamespacedEvent2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BC -A -?bodybody */ --#/definitions/io.k8s.api.events.v1beta1.EventBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– -“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string Jê -@ -2009 -7 -OK1 -/ --#/definitions/io.k8s.api.events.v1beta1.Event -E -201> -< -Created1 -/ --#/definitions/io.k8s.api.events.v1beta1.Event -F -202? -= -Accepted1 -/ --#/definitions/io.k8s.api.events.v1beta1.Event - -401 - - UnauthorizedRhttpsjW -x-kubernetes-group-version-kind42group: events.k8s.io -kind: Event -version: v1beta1 -j -x-kubernetes-actionpost -*Ù, -events_v1beta1delete collection of Event*,deleteEventsV1beta1CollectionNamespacedEvent2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BT -R -Pbodybody*B -@#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptionsBï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string Bž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string Bä -áÞÛquery±The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately."gracePeriodSeconds2integer B‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Bú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer BÑ -ÎËÈquery Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the "orphan" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both."orphanDependents2boolean Bˆ -…‚ÿquery×Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground."propagationPolicy2string Bû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string BÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Bž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer Jg - -401 - - Unauthorized -L -200E -C -OK= -; -9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.StatusRhttpsjW -x-kubernetes-group-version-kind42group: events.k8s.io -kind: Event -version: v1beta1 -j* -x-kubernetes-actiondeletecollection -J` -^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO -MKIquery-If 'true', then the output is pretty printed."pretty2string Ü -/apis/extensions/Æà - -extensionsget information of a group*getExtensionsAPIGroup2application/json2application/yaml2#application/vnd.kubernetes.protobuf:application/json:application/yaml:#application/vnd.kubernetes.protobufJi -N -200G -E -OK? -= -;#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.APIGroup - -401 - - UnauthorizedRhttps’* -F/apis/rbac.authorization.k8s.io/v1/namespaces/{namespace}/roles/{name}Ç)þ -rbacAuthorization_v1read the specified Role*%readRbacAuthorizationV1NamespacedRole2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*JS - -401 - - Unauthorized -8 -2001 -/ -OK) -' -%#/definitions/io.k8s.api.rbac.v1.RoleRhttpsj -x-kubernetes-actionget -j] -x-kubernetes-group-version-kind:8group: rbac.authorization.k8s.io -kind: Role -version: v1 -» -rbacAuthorization_v1replace the specified Role*(replaceRbacAuthorizationV1NamespacedRole2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*B; -9 -7bodybody *' -%#/definitions/io.k8s.api.rbac.v1.RoleBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– -“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string J’ -8 -2001 -/ -OK) -' -%#/definitions/io.k8s.api.rbac.v1.Role -= -2016 -4 -Created) -' -%#/definitions/io.k8s.api.rbac.v1.Role - -401 - - UnauthorizedRhttpsj -x-kubernetes-actionput -j] -x-kubernetes-group-version-kind:8version: v1 -group: rbac.authorization.k8s.io -kind: Role -*Ÿ -rbacAuthorization_v1 delete a Role*'deleteRbacAuthorizationV1NamespacedRole2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BT -R -Pbodybody*B -@#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptionsBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string Bä -áÞÛquery±The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately."gracePeriodSeconds2integer BÑ -ÎËÈquery Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the "orphan" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both."orphanDependents2boolean Bˆ -…‚ÿquery×Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground."propagationPolicy2string J» -L -200E -C -OK= -; -9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status -R -202K -I -Accepted= -; -9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status - -401 - - UnauthorizedRhttpsj -x-kubernetes-action delete -j] -x-kubernetes-group-version-kind:8version: v1 -group: rbac.authorization.k8s.io -kind: Role -Bý -rbacAuthorization_v1#partially update the specified Role*&patchRbacAuthorizationV1NamespacedRole2application/json2application/yaml2#application/vnd.kubernetes.protobuf:application/json-patch+json:application/merge-patch+json:&application/strategic-merge-patch+json:application/apply-patch+yamlBN -L -Jbodybody *: -8#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.PatchBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B¯ -¬©¦queryƒfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch)." fieldManager2string BÎ -ËÈÅquery¨Force is going to "force" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests."force2boolean JS -8 -2001 -/ -OK) -' -%#/definitions/io.k8s.api.rbac.v1.Role - -401 - - UnauthorizedRhttpsj] -x-kubernetes-group-version-kind:8group: rbac.authorization.k8s.io -kind: Role -version: v1 -j -x-kubernetes-actionpatch -J1 -/-"+pathname of the Role"name*string˜J` -^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO -MKIquery-If 'true', then the output is pretty printed."pretty2string í( -3/apis/storage.k8s.io/v1/watch/storageclasses/{name}µ(î - -storage_v1³watch changes to an object of kind StorageClass. deprecated: use the 'watch' parameter with a list operation instead, filtered to a single item with the 'fieldSelector' parameter.*watchStorageV1StorageClass2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jk -P -200I -G -OKA -? -=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent - -401 - - UnauthorizedRhttpsj -x-kubernetes-actionwatch -jZ -x-kubernetes-group-version-kind75group: storage.k8s.io -kind: StorageClass -version: v1 -J‚ -ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer J9 -75"3pathname of the StorageClass"name*string˜JO -MKIquery-If 'true', then the output is pretty printed."pretty2string Jû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string JÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Jž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± -®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean ×) -;/api/v1/watch/namespaces/{namespace}/serviceaccounts/{name}—)ì -core_v1µwatch changes to an object of kind ServiceAccount. deprecated: use the 'watch' parameter with a list operation instead, filtered to a single item with the 'fieldSelector' parameter.*#watchCoreV1NamespacedServiceAccount2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jk -P -200I -G -OKA -? -=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent - -401 - - UnauthorizedRhttpsjP -x-kubernetes-group-version-kind-+group: "" -kind: ServiceAccount -version: v1 -j -x-kubernetes-actionwatch -J‚ -ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer J; -97"5pathname of the ServiceAccount"name*string˜J` -^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO -MKIquery-If 'true', then the output is pretty printed."pretty2string Jû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string JÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Jž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± -®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean §' -/api/v1/replicationcontrollers„'ø -core_v13list or watch objects of kind ReplicationController*/listCoreV1ReplicationControllerForAllNamespaces2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jh -M -200F -D -OK> -< -:#/definitions/io.k8s.api.core.v1.ReplicationControllerList - -401 - - UnauthorizedRhttpsj -x-kubernetes-actionlist -jW -x-kubernetes-group-version-kind42group: "" -kind: ReplicationController -version: v1 -J‚ -ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer JO -MKIquery-If 'true', then the output is pretty printed."pretty2string Jû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string JÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Jž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± -®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean ó) -E/apis/apps/v1/watch/namespaces/{namespace}/controllerrevisions/{name}©)ú -apps_v1¹watch changes to an object of kind ControllerRevision. deprecated: use the 'watch' parameter with a list operation instead, filtered to a single item with the 'fieldSelector' parameter.*'watchAppsV1NamespacedControllerRevision2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jk -P -200I -G -OKA -? -=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent - -401 - - UnauthorizedRhttpsj -x-kubernetes-actionwatch -jV -x-kubernetes-group-version-kind31group: apps -kind: ControllerRevision -version: v1 -J‚ -ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer J? -=;"9pathname of the ControllerRevision"name*string˜J` -^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO -MKIquery-If 'true', then the output is pretty printed."pretty2string Jû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string JÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Jž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± -®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean å* -G/apis/networking.k8s.io/v1beta1/namespaces/{namespace}/ingresses/{name}™*Ž -networking_v1beta1read the specified Ingress*&readNetworkingV1beta1NamespacedIngress2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*Ja -F -200? -= -OK7 -5 -3#/definitions/io.k8s.api.networking.v1beta1.Ingress - -401 - - UnauthorizedRhttpsj -x-kubernetes-actionget -j] -x-kubernetes-group-version-kind:8group: networking.k8s.io -kind: Ingress -version: v1beta1 -ç -networking_v1beta1replace the specified Ingress*)replaceNetworkingV1beta1NamespacedIngress2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BI -G -Ebodybody *5 -3#/definitions/io.k8s.api.networking.v1beta1.IngressBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– -“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string J® -F -200? -= -OK7 -5 -3#/definitions/io.k8s.api.networking.v1beta1.Ingress -K -201D -B -Created7 -5 -3#/definitions/io.k8s.api.networking.v1beta1.Ingress - -401 - - UnauthorizedRhttpsj -x-kubernetes-actionput -j] -x-kubernetes-group-version-kind:8group: networking.k8s.io -kind: Ingress -version: v1beta1 -*¢ -networking_v1beta1delete an Ingress*(deleteNetworkingV1beta1NamespacedIngress2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BT -R -Pbodybody*B -@#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptionsBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string Bä -áÞÛquery±The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately."gracePeriodSeconds2integer BÑ -ÎËÈquery Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the "orphan" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both."orphanDependents2boolean Bˆ -…‚ÿquery×Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground."propagationPolicy2string J» -L -200E -C -OK= -; -9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status -R -202K -I -Accepted= -; -9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status - -401 - - UnauthorizedRhttpsj -x-kubernetes-action delete -j] -x-kubernetes-group-version-kind:8group: networking.k8s.io -kind: Ingress -version: v1beta1 -B -networking_v1beta1&partially update the specified Ingress*'patchNetworkingV1beta1NamespacedIngress2application/json2application/yaml2#application/vnd.kubernetes.protobuf:application/json-patch+json:application/merge-patch+json:&application/strategic-merge-patch+json:application/apply-patch+yamlBN -L -Jbodybody *: -8#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.PatchBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B¯ -¬©¦queryƒfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch)." fieldManager2string BÎ -ËÈÅquery¨Force is going to "force" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests."force2boolean Ja -F -200? -= -OK7 -5 -3#/definitions/io.k8s.api.networking.v1beta1.Ingress - -401 - - UnauthorizedRhttpsj -x-kubernetes-actionpatch -j] -x-kubernetes-group-version-kind:8group: networking.k8s.io -kind: Ingress -version: v1beta1 -J4 -20".pathname of the Ingress"name*string˜J` -^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO -MKIquery-If 'true', then the output is pretty printed."pretty2string ¥) -A/apis/rbac.authorization.k8s.io/v1beta1/watch/clusterroles/{name}ß(™ -rbacAuthorization_v1beta1²watch changes to an object of kind ClusterRole. deprecated: use the 'watch' parameter with a list operation instead, filtered to a single item with the 'fieldSelector' parameter.*(watchRbacAuthorizationV1beta1ClusterRole2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jk -P -200I -G -OKA -? -=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent - -401 - - UnauthorizedRhttpsj -x-kubernetes-actionwatch -ji -x-kubernetes-group-version-kindFDgroup: rbac.authorization.k8s.io -kind: ClusterRole -version: v1beta1 -J‚ -ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer J8 -64"2pathname of the ClusterRole"name*string˜JO -MKIquery-If 'true', then the output is pretty printed."pretty2string Jû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string JÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Jž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± -®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean ´ -4/api/v1/namespaces/{namespace}/services/{name}/proxyûŸ -core_v1(connect GET requests to proxy of Service*&connectCoreV1GetNamespacedServiceProxy2*/*:*/*J7 - -401 - - Unauthorized - -200 - -OK - ² -stringRhttpsj! -x-kubernetes-action -connect -jU -x-kubernetes-group-version-kind20group: "" -kind: ServiceProxyOptions -version: v1 -Ÿ -core_v1(connect PUT requests to proxy of Service*&connectCoreV1PutNamespacedServiceProxy2*/*:*/*J7 - -200 - -OK - ² -string - -401 - - UnauthorizedRhttpsj! -x-kubernetes-action -connect -jU -x-kubernetes-group-version-kind20group: "" -kind: ServiceProxyOptions -version: v1 -"¡ -core_v1)connect POST requests to proxy of Service*'connectCoreV1PostNamespacedServiceProxy2*/*:*/*J7 - -200 - -OK - ² -string - -401 - - UnauthorizedRhttpsj! -x-kubernetes-action -connect -jU -x-kubernetes-group-version-kind20group: "" -kind: ServiceProxyOptions -version: v1 -*¥ -core_v1+connect DELETE requests to proxy of Service*)connectCoreV1DeleteNamespacedServiceProxy2*/*:*/*J7 - -200 - -OK - ² -string - -401 - - UnauthorizedRhttpsj! -x-kubernetes-action -connect -jU -x-kubernetes-group-version-kind20group: "" -kind: ServiceProxyOptions -version: v1 -2§ -core_v1,connect OPTIONS requests to proxy of Service**connectCoreV1OptionsNamespacedServiceProxy2*/*:*/*J7 - -200 - -OK - ² -string - -401 - - UnauthorizedRhttpsj! -x-kubernetes-action -connect -jU -x-kubernetes-group-version-kind20group: "" -kind: ServiceProxyOptions -version: v1 -:¡ -core_v1)connect HEAD requests to proxy of Service*'connectCoreV1HeadNamespacedServiceProxy2*/*:*/*J7 - -401 - - Unauthorized - -200 - -OK - ² -stringRhttpsjU -x-kubernetes-group-version-kind20group: "" -kind: ServiceProxyOptions -version: v1 -j! -x-kubernetes-action -connect -B£ -core_v1*connect PATCH requests to proxy of Service*(connectCoreV1PatchNamespacedServiceProxy2*/*:*/*J7 - -200 - -OK - ² -string - -401 - - UnauthorizedRhttpsj! -x-kubernetes-action -connect -jU -x-kubernetes-group-version-kind20group: "" -kind: ServiceProxyOptions -version: v1 -J@ -><":pathname of the ServiceProxyOptions"name*string˜J` -^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JÐ -ÍÊÇquery¬Path is the part of URLs that include service endpoints, suffixes, and parameters to use for the current proxy request to service. For example, the whole request URL is http://localhost/api/v1/namespaces/kube-system/services/elasticsearch-logging/_search?q=user:kimchy. Path is _search?q=user:kimchy."path2string Ã ->/apis/apiregistration.k8s.io/v1beta1/apiservices/{name}/status€É -apiregistration_v1beta1'read status of the specified APIService**readApiregistrationV1beta1APIServiceStatus2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*J~ -c -200\ -Z -OKT -R -P#/definitions/io.k8s.kube-aggregator.pkg.apis.apiregistration.v1beta1.APIService - -401 - - UnauthorizedRhttpsje -x-kubernetes-group-version-kindB@group: apiregistration.k8s.io -version: v1beta1 -kind: APIService -j -x-kubernetes-actionget -Ü -apiregistration_v1beta1*replace status of the specified APIService*-replaceApiregistrationV1beta1APIServiceStatus2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*Bf -d -bbodybody *R -P#/definitions/io.k8s.kube-aggregator.pkg.apis.apiregistration.v1beta1.APIServiceBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– -“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string Jè -c -200\ -Z -OKT -R -P#/definitions/io.k8s.kube-aggregator.pkg.apis.apiregistration.v1beta1.APIService -h -201a -_ -CreatedT -R -P#/definitions/io.k8s.kube-aggregator.pkg.apis.apiregistration.v1beta1.APIService - -401 - - UnauthorizedRhttpsj -x-kubernetes-actionput -je -x-kubernetes-group-version-kindB@group: apiregistration.k8s.io -version: v1beta1 -kind: APIService -BÈ -apiregistration_v1beta13partially update status of the specified APIService*+patchApiregistrationV1beta1APIServiceStatus2application/json2application/yaml2#application/vnd.kubernetes.protobuf:application/json-patch+json:application/merge-patch+json:&application/strategic-merge-patch+json:application/apply-patch+yamlBN -L -Jbodybody *: -8#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.PatchBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B¯ -¬©¦queryƒfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch)." fieldManager2string BÎ -ËÈÅquery¨Force is going to "force" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests."force2boolean J~ -c -200\ -Z -OKT -R -P#/definitions/io.k8s.kube-aggregator.pkg.apis.apiregistration.v1beta1.APIService - -401 - - UnauthorizedRhttpsje -x-kubernetes-group-version-kindB@group: apiregistration.k8s.io -version: v1beta1 -kind: APIService -j -x-kubernetes-actionpatch -J7 -53"1pathname of the APIService"name*string˜JO -MKIquery-If 'true', then the output is pretty printed."pretty2string Ë) -=/apis/apps/v1/watch/namespaces/{namespace}/deployments/{name}‰)â -apps_v1±watch changes to an object of kind Deployment. deprecated: use the 'watch' parameter with a list operation instead, filtered to a single item with the 'fieldSelector' parameter.*watchAppsV1NamespacedDeployment2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jk -P -200I -G -OKA -? -=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent - -401 - - UnauthorizedRhttpsj -x-kubernetes-actionwatch -jN -x-kubernetes-group-version-kind+)group: apps -kind: Deployment -version: v1 -J‚ -ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer J7 -53"1pathname of the Deployment"name*string˜J` -^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO -MKIquery-If 'true', then the output is pretty printed."pretty2string Jû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string JÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Jž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± -®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean Ì' -/apis/batch/v1/watch/jobs®'¢ -batch_v1owatch individual changes to a list of Job. deprecated: use the 'watch' parameter with a list operation instead.*#watchBatchV1JobListForAllNamespaces2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jk -P -200I -G -OKA -? -=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent - -401 - - UnauthorizedRhttpsj# -x-kubernetes-action  -watchlist -jH -x-kubernetes-group-version-kind%#group: batch -kind: Job -version: v1 -J‚ -ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer JO -MKIquery-If 'true', then the output is pretty printed."pretty2string Jû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string JÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Jž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± -®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean À) -;/apis/batch/v1/watch/namespaces/{namespace}/cronjobs/{name}€)Ü -batch_v1®watch changes to an object of kind CronJob. deprecated: use the 'watch' parameter with a list operation instead, filtered to a single item with the 'fieldSelector' parameter.*watchBatchV1NamespacedCronJob2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jk -P -200I -G -OKA -? -=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent - -401 - - UnauthorizedRhttpsj -x-kubernetes-actionwatch -jL -x-kubernetes-group-version-kind)'group: batch -kind: CronJob -version: v1 -J‚ -ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer J4 -20".pathname of the CronJob"name*string˜J` -^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO -MKIquery-If 'true', then the output is pretty printed."pretty2string Jû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string JÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Jž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± -®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean é) -F/apis/extensions/v1beta1/watch/namespaces/{namespace}/ingresses/{name}ž)ú -extensions_v1beta1®watch changes to an object of kind Ingress. deprecated: use the 'watch' parameter with a list operation instead, filtered to a single item with the 'fieldSelector' parameter.*'watchExtensionsV1beta1NamespacedIngress2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jk -P -200I -G -OKA -? -=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent - -401 - - UnauthorizedRhttpsjV -x-kubernetes-group-version-kind31version: v1beta1 -group: extensions -kind: Ingress -j -x-kubernetes-actionwatch -J‚ -ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer J4 -20".pathname of the Ingress"name*string˜J` -^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO -MKIquery-If 'true', then the output is pretty printed."pretty2string Jû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string JÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Jž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± -®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean ‚) -8/apis/apiregistration.k8s.io/v1/watch/apiservices/{name}Å(€ -apiregistration_v1±watch changes to an object of kind APIService. deprecated: use the 'watch' parameter with a list operation instead, filtered to a single item with the 'fieldSelector' parameter.* watchApiregistrationV1APIService2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jk -P -200I -G -OKA -? -=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent - -401 - - UnauthorizedRhttpsj` -x-kubernetes-group-version-kind=;group: apiregistration.k8s.io -version: v1 -kind: APIService -j -x-kubernetes-actionwatch -J‚ -ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer J7 -53"1pathname of the APIService"name*string˜JO -MKIquery-If 'true', then the output is pretty printed."pretty2string Jû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string JÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Jž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± -®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean ä' -/apis/apps/v1/watch/replicasetsÀ'´ -apps_v1vwatch individual changes to a list of ReplicaSet. deprecated: use the 'watch' parameter with a list operation instead.*)watchAppsV1ReplicaSetListForAllNamespaces2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jk -P -200I -G -OKA -? -=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent - -401 - - UnauthorizedRhttpsjN -x-kubernetes-group-version-kind+)group: apps -kind: ReplicaSet -version: v1 -j# -x-kubernetes-action  -watchlist -J‚ -ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer JO -MKIquery-If 'true', then the output is pretty printed."pretty2string Jû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string JÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Jž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± -®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean ‚- -P/apis/autoscaling/v2beta1/namespaces/{namespace}/horizontalpodautoscalers/{name}­,Ë -autoscaling_v2beta1*read the specified HorizontalPodAutoscaler*7readAutoscalingV2beta1NamespacedHorizontalPodAutoscaler2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*Jr -W -200P -N -OKH -F -D#/definitions/io.k8s.api.autoscaling.v2beta1.HorizontalPodAutoscaler - -401 - - UnauthorizedRhttpsj -x-kubernetes-actionget -jg -x-kubernetes-group-version-kindDBkind: HorizontalPodAutoscaler -version: v2beta1 -group: autoscaling -Æ -autoscaling_v2beta1-replace the specified HorizontalPodAutoscaler*:replaceAutoscalingV2beta1NamespacedHorizontalPodAutoscaler2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BZ -X -Vbodybody *F -D#/definitions/io.k8s.api.autoscaling.v2beta1.HorizontalPodAutoscalerBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– -“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string JÐ -W -200P -N -OKH -F -D#/definitions/io.k8s.api.autoscaling.v2beta1.HorizontalPodAutoscaler -\ -201U -S -CreatedH -F -D#/definitions/io.k8s.api.autoscaling.v2beta1.HorizontalPodAutoscaler - -401 - - UnauthorizedRhttpsj -x-kubernetes-actionput -jg -x-kubernetes-group-version-kindDBgroup: autoscaling -kind: HorizontalPodAutoscaler -version: v2beta1 -*Í -autoscaling_v2beta1 delete a HorizontalPodAutoscaler*9deleteAutoscalingV2beta1NamespacedHorizontalPodAutoscaler2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BT -R -Pbodybody*B -@#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptionsBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string Bä -áÞÛquery±The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately."gracePeriodSeconds2integer BÑ -ÎËÈquery Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the "orphan" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both."orphanDependents2boolean Bˆ -…‚ÿquery×Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground."propagationPolicy2string J» -R -202K -I -Accepted= -; -9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status - -401 - - Unauthorized -L -200E -C -OK= -; -9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.StatusRhttpsj -x-kubernetes-action delete -jg -x-kubernetes-group-version-kindDBgroup: autoscaling -kind: HorizontalPodAutoscaler -version: v2beta1 -BÊ -autoscaling_v2beta16partially update the specified HorizontalPodAutoscaler*8patchAutoscalingV2beta1NamespacedHorizontalPodAutoscaler2application/json2application/yaml2#application/vnd.kubernetes.protobuf:application/json-patch+json:application/merge-patch+json:&application/strategic-merge-patch+json:application/apply-patch+yamlBN -L -Jbodybody *: -8#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.PatchBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B¯ -¬©¦queryƒfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch)." fieldManager2string BÎ -ËÈÅquery¨Force is going to "force" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests."force2boolean Jr -W -200P -N -OKH -F -D#/definitions/io.k8s.api.autoscaling.v2beta1.HorizontalPodAutoscaler - -401 - - UnauthorizedRhttpsj -x-kubernetes-actionpatch -jg -x-kubernetes-group-version-kindDBversion: v2beta1 -group: autoscaling -kind: HorizontalPodAutoscaler -JD -B@">path#name of the HorizontalPodAutoscaler"name*string˜J` -^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO -MKIquery-If 'true', then the output is pretty printed."pretty2string Ï( -4/apis/batch/v1/watch/namespaces/{namespace}/cronjobs–(¨ -batch_v1swatch individual changes to a list of CronJob. deprecated: use the 'watch' parameter with a list operation instead.*!watchBatchV1NamespacedCronJobList2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jk -P -200I -G -OKA -? -=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent - -401 - - UnauthorizedRhttpsj# -x-kubernetes-action  -watchlist -jL -x-kubernetes-group-version-kind)'version: v1 -group: batch -kind: CronJob -J‚ -ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer J` -^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO -MKIquery-If 'true', then the output is pretty printed."pretty2string Jû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string JÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Jž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± -®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean ¿( -0/apis/batch/v1/watch/namespaces/{namespace}/jobsŠ(œ -batch_v1owatch individual changes to a list of Job. deprecated: use the 'watch' parameter with a list operation instead.*watchBatchV1NamespacedJobList2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jk - -401 - - Unauthorized -P -200I -G -OKA -? -=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEventRhttpsj# -x-kubernetes-action  -watchlist -jH -x-kubernetes-group-version-kind%#group: batch -kind: Job -version: v1 -J‚ -ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer J` -^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO -MKIquery-If 'true', then the output is pretty printed."pretty2string Jû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string JÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Jž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± -®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean â+ ->/apis/certificates.k8s.io/v1/certificatesigningrequests/{name}Ÿ+À -certificates_v1,read the specified CertificateSigningRequest*+readCertificatesV1CertificateSigningRequest2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*Jp -U -200N -L -OKF -D -B#/definitions/io.k8s.api.certificates.v1.CertificateSigningRequest - -401 - - UnauthorizedRhttpsjl -x-kubernetes-group-version-kindIGkind: CertificateSigningRequest -version: v1 -group: certificates.k8s.io -j -x-kubernetes-actionget -· -certificates_v1/replace the specified CertificateSigningRequest*.replaceCertificatesV1CertificateSigningRequest2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BX -V -Tbodybody *D -B#/definitions/io.k8s.api.certificates.v1.CertificateSigningRequestBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– -“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string JÌ -Z -201S -Q -CreatedF -D -B#/definitions/io.k8s.api.certificates.v1.CertificateSigningRequest - -401 - - Unauthorized -U -200N -L -OKF -D -B#/definitions/io.k8s.api.certificates.v1.CertificateSigningRequestRhttpsj -x-kubernetes-actionput -jl -x-kubernetes-group-version-kindIGgroup: certificates.k8s.io -kind: CertificateSigningRequest -version: v1 -*Ä -certificates_v1"delete a CertificateSigningRequest*-deleteCertificatesV1CertificateSigningRequest2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BT -R -Pbodybody*B -@#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptionsBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string Bä -áÞÛquery±The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately."gracePeriodSeconds2integer BÑ -ÎËÈquery Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the "orphan" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both."orphanDependents2boolean Bˆ -…‚ÿquery×Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground."propagationPolicy2string J» - -401 - - Unauthorized -L -200E -C -OK= -; -9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status -R -202K -I -Accepted= -; -9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.StatusRhttpsj -x-kubernetes-action delete -jl -x-kubernetes-group-version-kindIGgroup: certificates.k8s.io -kind: CertificateSigningRequest -version: v1 -B¿ -certificates_v18partially update the specified CertificateSigningRequest*,patchCertificatesV1CertificateSigningRequest2application/json2application/yaml2#application/vnd.kubernetes.protobuf:application/json-patch+json:application/merge-patch+json:&application/strategic-merge-patch+json:application/apply-patch+yamlBN -L -Jbodybody *: -8#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.PatchBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B¯ -¬©¦queryƒfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch)." fieldManager2string BÎ -ËÈÅquery¨Force is going to "force" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests."force2boolean Jp - -401 - - Unauthorized -U -200N -L -OKF -D -B#/definitions/io.k8s.api.certificates.v1.CertificateSigningRequestRhttpsjl -x-kubernetes-group-version-kindIGgroup: certificates.k8s.io -kind: CertificateSigningRequest -version: v1 -j -x-kubernetes-actionpatch -JF -DB"@path%name of the CertificateSigningRequest"name*string˜JO -MKIquery-If 'true', then the output is pretty printed."pretty2string û -/apis/extensions/v1beta1/ÝÚ -extensions_v1beta1get available resources* getExtensionsV1beta1APIResources2application/json2application/yaml2#application/vnd.kubernetes.protobuf:application/json:application/yaml:#application/vnd.kubernetes.protobufJp -U -200N -L -OKF -D -B#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.APIResourceList - -401 - - UnauthorizedRhttps·' --/apis/rbac.authorization.k8s.io/v1beta1/roles…'ù -rbacAuthorization_v1beta1"list or watch objects of kind Role*0listRbacAuthorizationV1beta1RoleForAllNamespaces2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*J\ -A -200: -8 -OK2 -0 -.#/definitions/io.k8s.api.rbac.v1beta1.RoleList - -401 - - UnauthorizedRhttpsjb -x-kubernetes-group-version-kind?=group: rbac.authorization.k8s.io -kind: Role -version: v1beta1 -j -x-kubernetes-actionlist -J‚ -ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer JO -MKIquery-If 'true', then the output is pretty printed."pretty2string Jû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string JÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Jž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± -®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean ” -&/apis/admissionregistration.k8s.io/v1/éæ -admissionregistration_v1get available resources*&getAdmissionregistrationV1APIResources2application/json2application/yaml2#application/vnd.kubernetes.protobuf:application/json:application/yaml:#application/vnd.kubernetes.protobufJp -U -200N -L -OKF -D -B#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.APIResourceList - -401 - - UnauthorizedRhttps­` -F/apis/flowcontrol.apiserver.k8s.io/v1beta1/prioritylevelconfigurationsâ_‰' -flowcontrolApiserver_v1beta18list or watch objects of kind PriorityLevelConfiguration*9listFlowcontrolApiserverV1beta1PriorityLevelConfiguration2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*B‚ -ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Bï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string B‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string B‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Bú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer Bû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string BÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Bž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer B± -®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean Jy -^ -200W -U -OKO -M -K#/definitions/io.k8s.api.flowcontrol.v1beta1.PriorityLevelConfigurationList - -401 - - UnauthorizedRhttpsj -x-kubernetes-actionlist -j{ -x-kubernetes-group-version-kindXVversion: v1beta1 -group: flowcontrol.apiserver.k8s.io -kind: PriorityLevelConfiguration -"Æ - -flowcontrolApiserver_v1beta1#create a PriorityLevelConfiguration*;createFlowcontrolApiserverV1beta1PriorityLevelConfiguration2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*B] -[ -Ybodybody *I -G#/definitions/io.k8s.api.flowcontrol.v1beta1.PriorityLevelConfigurationBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– -“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string J¸ -Z -200S -Q -OKK -I -G#/definitions/io.k8s.api.flowcontrol.v1beta1.PriorityLevelConfiguration -_ -201X -V -CreatedK -I -G#/definitions/io.k8s.api.flowcontrol.v1beta1.PriorityLevelConfiguration -` -202Y -W -AcceptedK -I -G#/definitions/io.k8s.api.flowcontrol.v1beta1.PriorityLevelConfiguration - -401 - - UnauthorizedRhttpsj -x-kubernetes-actionpost -j{ -x-kubernetes-group-version-kindXVgroup: flowcontrol.apiserver.k8s.io -kind: PriorityLevelConfiguration -version: v1beta1 -*¹- -flowcontrolApiserver_v1beta1/delete collection of PriorityLevelConfiguration*EdeleteFlowcontrolApiserverV1beta1CollectionPriorityLevelConfiguration2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BT -R -Pbodybody*B -@#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptionsBï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string Bž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string Bä -áÞÛquery±The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately."gracePeriodSeconds2integer B‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Bú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer BÑ -ÎËÈquery Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the "orphan" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both."orphanDependents2boolean Bˆ -…‚ÿquery×Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground."propagationPolicy2string Bû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string BÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Bž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer Jg - -401 - - Unauthorized -L -200E -C -OK= -; -9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.StatusRhttpsj* -x-kubernetes-actiondeletecollection -j{ -x-kubernetes-group-version-kindXVgroup: flowcontrol.apiserver.k8s.io -kind: PriorityLevelConfiguration -version: v1beta1 -JO -MKIquery-If 'true', then the output is pretty printed."pretty2string ‚ - /apis/scheduling.k8s.io/v1beta1/ÝÚ -scheduling_v1beta1get available resources* getSchedulingV1beta1APIResources2application/json2application/yaml2#application/vnd.kubernetes.protobuf:application/json:application/yaml:#application/vnd.kubernetes.protobufJp - -401 - - Unauthorized -U -200N -L -OKF -D -B#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.APIResourceListRhttpsÜ' --/apis/autoscaling/v1/horizontalpodautoscalersª'ž -autoscaling_v15list or watch objects of kind HorizontalPodAutoscaler*8listAutoscalingV1HorizontalPodAutoscalerForAllNamespaces2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jq -V -200O -M -OKG -E -C#/definitions/io.k8s.api.autoscaling.v1.HorizontalPodAutoscalerList - -401 - - UnauthorizedRhttpsjb -x-kubernetes-group-version-kind?=group: autoscaling -kind: HorizontalPodAutoscaler -version: v1 -j -x-kubernetes-actionlist -J‚ -ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer JO -MKIquery-If 'true', then the output is pretty printed."pretty2string Jû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string JÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Jž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± -®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean ³( -)/api/v1/watch/namespaces/{namespace}/pods…(— -core_v1owatch individual changes to a list of Pod. deprecated: use the 'watch' parameter with a list operation instead.*watchCoreV1NamespacedPodList2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jk -P -200I -G -OKA -? -=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent - -401 - - UnauthorizedRhttpsj# -x-kubernetes-action  -watchlist -jE -x-kubernetes-group-version-kind" group: "" -kind: Pod -version: v1 -J‚ -ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer J` -^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO -MKIquery-If 'true', then the output is pretty printed."pretty2string Jû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string JÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Jž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± -®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean ‰( -$/api/v1/watch/replicationcontrollersà'Ô -core_v1watch individual changes to a list of ReplicationController. deprecated: use the 'watch' parameter with a list operation instead.*4watchCoreV1ReplicationControllerListForAllNamespaces2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jk - -401 - - Unauthorized -P -200I -G -OKA -? -=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEventRhttpsjW -x-kubernetes-group-version-kind42kind: ReplicationController -version: v1 -group: "" -j# -x-kubernetes-action  -watchlist -J‚ -ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer JO -MKIquery-If 'true', then the output is pretty printed."pretty2string Jû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string JÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Jž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± -®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean õ' -2/apis/autoscaling/v2beta2/horizontalpodautoscalers¾'² -autoscaling_v2beta25list or watch objects of kind HorizontalPodAutoscaler*=listAutoscalingV2beta2HorizontalPodAutoscalerForAllNamespaces2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jv -[ -200T -R -OKL -J -H#/definitions/io.k8s.api.autoscaling.v2beta2.HorizontalPodAutoscalerList - -401 - - UnauthorizedRhttpsj -x-kubernetes-actionlist -jg -x-kubernetes-group-version-kindDBkind: HorizontalPodAutoscaler -version: v2beta2 -group: autoscaling -J‚ -ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer JO -MKIquery-If 'true', then the output is pretty printed."pretty2string Jû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string JÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Jž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± -®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean Ÿ) -L/apis/rbac.authorization.k8s.io/v1/watch/namespaces/{namespace}/rolebindingsÎ(à -rbacAuthorization_v1wwatch individual changes to a list of RoleBinding. deprecated: use the 'watch' parameter with a list operation instead.*1watchRbacAuthorizationV1NamespacedRoleBindingList2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jk -P -200I -G -OKA -? -=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent - -401 - - UnauthorizedRhttpsjd -x-kubernetes-group-version-kindA?group: rbac.authorization.k8s.io -kind: RoleBinding -version: v1 -j# -x-kubernetes-action  -watchlist -J‚ -ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer J` -^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO -MKIquery-If 'true', then the output is pretty printed."pretty2string Jû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string JÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Jž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± -®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean …* -Q/apis/rbac.authorization.k8s.io/v1beta1/watch/namespaces/{namespace}/roles/{name}¯)Ž -rbacAuthorization_v1beta1«watch changes to an object of kind Role. deprecated: use the 'watch' parameter with a list operation instead, filtered to a single item with the 'fieldSelector' parameter.*+watchRbacAuthorizationV1beta1NamespacedRole2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jk -P -200I -G -OKA -? -=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent - -401 - - UnauthorizedRhttpsj -x-kubernetes-actionwatch -jb -x-kubernetes-group-version-kind?=group: rbac.authorization.k8s.io -kind: Role -version: v1beta1 -J‚ -ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer J1 -/-"+pathname of the Role"name*string˜J` -^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO -MKIquery-If 'true', then the output is pretty printed."pretty2string Jû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string JÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Jž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± -®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean ê' -(/apis/storage.k8s.io/v1/watch/csidrivers½'± - -storage_v1uwatch individual changes to a list of CSIDriver. deprecated: use the 'watch' parameter with a list operation instead.*watchStorageV1CSIDriverList2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jk - -401 - - Unauthorized -P -200I -G -OKA -? -=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEventRhttpsj# -x-kubernetes-action  -watchlist -jW -x-kubernetes-group-version-kind42group: storage.k8s.io -kind: CSIDriver -version: v1 -J‚ -ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer JO -MKIquery-If 'true', then the output is pretty printed."pretty2string Jû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string JÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Jž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± -®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean ö* -
"<path!name of the ReplicationController"name*string˜J` -^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO -MKIquery-If 'true', then the output is pretty printed."pretty2string Ó] -3/apis/batch/v1beta1/namespaces/{namespace}/cronjobs›]Œ& - batch_v1beta1%list or watch objects of kind CronJob*!listBatchV1beta1NamespacedCronJob2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*B‚ -ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Bï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string B‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string B‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Bú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer Bû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string BÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Bž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer B± -®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean J` -E -200> -< -OK6 -4 -2#/definitions/io.k8s.api.batch.v1beta1.CronJobList - -401 - - UnauthorizedRhttpsjQ -x-kubernetes-group-version-kind.,group: batch -kind: CronJob -version: v1beta1 -j -x-kubernetes-actionlist -"þ - batch_v1beta1create a CronJob*#createBatchV1beta1NamespacedCronJob2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BD -B -@bodybody *0 -.#/definitions/io.k8s.api.batch.v1beta1.CronJobBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– -“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string Jí -A -200: -8 -OK2 -0 -.#/definitions/io.k8s.api.batch.v1beta1.CronJob -F -201? -= -Created2 -0 -.#/definitions/io.k8s.api.batch.v1beta1.CronJob -G -202@ -> -Accepted2 -0 -.#/definitions/io.k8s.api.batch.v1beta1.CronJob - -401 - - UnauthorizedRhttpsjQ -x-kubernetes-group-version-kind.,group: batch -kind: CronJob -version: v1beta1 -j -x-kubernetes-actionpost -*Õ, - batch_v1beta1delete collection of CronJob*-deleteBatchV1beta1CollectionNamespacedCronJob2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BT -R -Pbodybody*B -@#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptionsBï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string Bž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string Bä -áÞÛquery±The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately."gracePeriodSeconds2integer B‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Bú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer BÑ -ÎËÈquery Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the "orphan" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both."orphanDependents2boolean Bˆ -…‚ÿquery×Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground."propagationPolicy2string Bû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string BÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Bž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer Jg -L -200E -C -OK= -; -9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status - -401 - - UnauthorizedRhttpsj* -x-kubernetes-actiondeletecollection -jQ -x-kubernetes-group-version-kind.,group: batch -kind: CronJob -version: v1beta1 -J` -^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO -MKIquery-If 'true', then the output is pretty printed."pretty2string »^ -?/apis/discovery.k8s.io/v1/namespaces/{namespace}/endpointslices÷]§& - discovery_v1+list or watch objects of kind EndpointSlice*&listDiscoveryV1NamespacedEndpointSlice2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*B‚ -ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Bï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string B‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string B‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Bú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer Bû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string BÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Bž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer B± -®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean Je - -401 - - Unauthorized -J -200C -A -OK; -9 -7#/definitions/io.k8s.api.discovery.v1.EndpointSliceListRhttpsj -x-kubernetes-actionlist -j] -x-kubernetes-group-version-kind:8kind: EndpointSlice -version: v1 -group: discovery.k8s.io -"© - discovery_v1create an EndpointSlice*(createDiscoveryV1NamespacedEndpointSlice2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BI -G -Ebodybody *5 -3#/definitions/io.k8s.api.discovery.v1.EndpointSliceBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– -“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string Jü -L -202E -C -Accepted7 -5 -3#/definitions/io.k8s.api.discovery.v1.EndpointSlice - -401 - - Unauthorized -F -200? -= -OK7 -5 -3#/definitions/io.k8s.api.discovery.v1.EndpointSlice -K -201D -B -Created7 -5 -3#/definitions/io.k8s.api.discovery.v1.EndpointSliceRhttpsj] -x-kubernetes-group-version-kind:8group: discovery.k8s.io -kind: EndpointSlice -version: v1 -j -x-kubernetes-actionpost -*ë, - discovery_v1"delete collection of EndpointSlice*2deleteDiscoveryV1CollectionNamespacedEndpointSlice2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BT -R -Pbodybody*B -@#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptionsBï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string Bž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string Bä -áÞÛquery±The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately."gracePeriodSeconds2integer B‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Bú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer BÑ -ÎËÈquery Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the "orphan" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both."orphanDependents2boolean Bˆ -…‚ÿquery×Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground."propagationPolicy2string Bû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string BÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Bž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer Jg -L -200E -C -OK= -; -9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status - -401 - - UnauthorizedRhttpsj* -x-kubernetes-actiondeletecollection -j] -x-kubernetes-group-version-kind:8group: discovery.k8s.io -kind: EndpointSlice -version: v1 -J` -^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO -MKIquery-If 'true', then the output is pretty printed."pretty2string ³( -7/apis/storage.k8s.io/v1beta1/watch/csistoragecapacities÷'ë -storage_v1beta1~watch individual changes to a list of CSIStorageCapacity. deprecated: use the 'watch' parameter with a list operation instead.*9watchStorageV1beta1CSIStorageCapacityListForAllNamespaces2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jk -P -200I -G -OKA -? -=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent - -401 - - UnauthorizedRhttpsj# -x-kubernetes-action  -watchlist -je -x-kubernetes-group-version-kindB@version: v1beta1 -group: storage.k8s.io -kind: CSIStorageCapacity -J‚ -ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer JO -MKIquery-If 'true', then the output is pretty printed."pretty2string Jû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string JÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Jž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± -®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean î& -/apis/batch/v1/cronjobsÒ&Æ -batch_v1%list or watch objects of kind CronJob*"listBatchV1CronJobForAllNamespaces2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*J[ -@ -2009 -7 -OK1 -/ --#/definitions/io.k8s.api.batch.v1.CronJobList - -401 - - UnauthorizedRhttpsj -x-kubernetes-actionlist -jL -x-kubernetes-group-version-kind)'version: v1 -group: batch -kind: CronJob -J‚ -ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer JO -MKIquery-If 'true', then the output is pretty printed."pretty2string Jû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string JÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Jž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± -®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean È' -/api/v1/watch/namespaces«'Ÿ -core_v1uwatch individual changes to a list of Namespace. deprecated: use the 'watch' parameter with a list operation instead.*watchCoreV1NamespaceList2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jk -P -200I -G -OKA -? -=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent - -401 - - UnauthorizedRhttpsj# -x-kubernetes-action  -watchlist -jK -x-kubernetes-group-version-kind(&group: "" -kind: Namespace -version: v1 -J‚ -ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer JO -MKIquery-If 'true', then the output is pretty printed."pretty2string Jû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string JÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Jž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± -®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean ¨* -X/apis/rbac.authorization.k8s.io/v1beta1/watch/namespaces/{namespace}/rolebindings/{name}Ë)£ -rbacAuthorization_v1beta1²watch changes to an object of kind RoleBinding. deprecated: use the 'watch' parameter with a list operation instead, filtered to a single item with the 'fieldSelector' parameter.*2watchRbacAuthorizationV1beta1NamespacedRoleBinding2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jk -P -200I -G -OKA -? -=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent - -401 - - UnauthorizedRhttpsji -x-kubernetes-group-version-kindFDgroup: rbac.authorization.k8s.io -kind: RoleBinding -version: v1beta1 -j -x-kubernetes-actionwatch -J‚ -ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer J8 -64"2pathname of the RoleBinding"name*string˜J` -^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO -MKIquery-If 'true', then the output is pretty printed."pretty2string Jû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string JÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Jž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± -®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean Ý( -//apis/storage.k8s.io/v1/watch/csidrivers/{name}©(å - -storage_v1°watch changes to an object of kind CSIDriver. deprecated: use the 'watch' parameter with a list operation instead, filtered to a single item with the 'fieldSelector' parameter.*watchStorageV1CSIDriver2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jk -P -200I -G -OKA -? -=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent - -401 - - UnauthorizedRhttpsjW -x-kubernetes-group-version-kind42group: storage.k8s.io -kind: CSIDriver -version: v1 -j -x-kubernetes-actionwatch -J‚ -ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer J6 -42"0pathname of the CSIDriver"name*string˜JO -MKIquery-If 'true', then the output is pretty printed."pretty2string Jû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string JÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Jž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± -®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean ° -B/api/v1/namespaces/{namespace}/replicationcontrollers/{name}/scaleé -core_v11read scale of the specified ReplicationController*.readCoreV1NamespacedReplicationControllerScale2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*J[ -@ -2009 -7 -OK1 -/ --#/definitions/io.k8s.api.autoscaling.v1.Scale - -401 - - UnauthorizedRhttpsjP -x-kubernetes-group-version-kind-+group: autoscaling -kind: Scale -version: v1 -j -x-kubernetes-actionget -Ü -core_v14replace scale of the specified ReplicationController*1replaceCoreV1NamespacedReplicationControllerScale2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BC -A -?bodybody */ --#/definitions/io.k8s.api.autoscaling.v1.ScaleBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– -“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string J¢ - -401 - - Unauthorized -@ -2009 -7 -OK1 -/ --#/definitions/io.k8s.api.autoscaling.v1.Scale -E -201> -< -Created1 -/ --#/definitions/io.k8s.api.autoscaling.v1.ScaleRhttpsj -x-kubernetes-actionput -jP -x-kubernetes-group-version-kind-+group: autoscaling -kind: Scale -version: v1 -BŽ -core_v1=partially update scale of the specified ReplicationController*/patchCoreV1NamespacedReplicationControllerScale2application/json2application/yaml2#application/vnd.kubernetes.protobuf:application/json-patch+json:application/merge-patch+json:&application/strategic-merge-patch+json:application/apply-patch+yamlBN -L -Jbodybody *: -8#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.PatchBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B¯ -¬©¦queryƒfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch)." fieldManager2string BÎ -ËÈÅquery¨Force is going to "force" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests."force2boolean J[ -@ -2009 -7 -OK1 -/ --#/definitions/io.k8s.api.autoscaling.v1.Scale - -401 - - UnauthorizedRhttpsj -x-kubernetes-actionpatch -jP -x-kubernetes-group-version-kind-+version: v1 -group: autoscaling -kind: Scale -J2 -0.",pathname of the Scale"name*string˜J` -^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO -MKIquery-If 'true', then the output is pretty printed."pretty2string ô, ->/apis/apiextensions.k8s.io/v1/customresourcedefinitions/{name}±,Þ -apiextensions_v1+read the specified CustomResourceDefinition*+readApiextensionsV1CustomResourceDefinition2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*J - -401 - - Unauthorized -r -200k -i -OKc -a -_#/definitions/io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1.CustomResourceDefinitionRhttpsjl -x-kubernetes-group-version-kindIGkind: CustomResourceDefinition -version: v1 -group: apiextensions.k8s.io -j -x-kubernetes-actionget -Ž - -apiextensions_v1.replace the specified CustomResourceDefinition*.replaceApiextensionsV1CustomResourceDefinition2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*Bu -s -qbodybody *a -_#/definitions/io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1.CustomResourceDefinitionBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– -“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string J† -r -200k -i -OKc -a -_#/definitions/io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1.CustomResourceDefinition -w -201p -n -Createdc -a -_#/definitions/io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1.CustomResourceDefinition - -401 - - UnauthorizedRhttpsjl -x-kubernetes-group-version-kindIGgroup: apiextensions.k8s.io -kind: CustomResourceDefinition -version: v1 -j -x-kubernetes-actionput -*Ä -apiextensions_v1!delete a CustomResourceDefinition*-deleteApiextensionsV1CustomResourceDefinition2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BT -R -Pbodybody*B -@#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptionsBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string Bä -áÞÛquery±The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately."gracePeriodSeconds2integer BÑ -ÎËÈquery Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the "orphan" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both."orphanDependents2boolean Bˆ -…‚ÿquery×Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground."propagationPolicy2string J» -L -200E -C -OK= -; -9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status -R -202K -I -Accepted= -; -9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status - -401 - - UnauthorizedRhttpsj -x-kubernetes-action delete -jl -x-kubernetes-group-version-kindIGgroup: apiextensions.k8s.io -kind: CustomResourceDefinition -version: v1 -BÝ -apiextensions_v17partially update the specified CustomResourceDefinition*,patchApiextensionsV1CustomResourceDefinition2application/json2application/yaml2#application/vnd.kubernetes.protobuf:application/json-patch+json:application/merge-patch+json:&application/strategic-merge-patch+json:application/apply-patch+yamlBN -L -Jbodybody *: -8#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.PatchBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B¯ -¬©¦queryƒfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch)." fieldManager2string BÎ -ËÈÅquery¨Force is going to "force" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests."force2boolean J - -401 - - Unauthorized -r -200k -i -OKc -a -_#/definitions/io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1.CustomResourceDefinitionRhttpsj -x-kubernetes-actionpatch -jl -x-kubernetes-group-version-kindIGkind: CustomResourceDefinition -version: v1 -group: apiextensions.k8s.io -JE -CA"?path$name of the CustomResourceDefinition"name*string˜JO -MKIquery-If 'true', then the output is pretty printed."pretty2string ñ& -/api/v1/componentstatusesÓ&Ç -core_v1$list objects of kind ComponentStatus*listCoreV1ComponentStatus2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jb -G -200@ -> -OK8 -6 -4#/definitions/io.k8s.api.core.v1.ComponentStatusList - -401 - - UnauthorizedRhttpsj -x-kubernetes-actionlist -jQ -x-kubernetes-group-version-kind.,group: "" -kind: ComponentStatus -version: v1 -J‚ -ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer JO -MKIquery-If 'true', then the output is pretty printed."pretty2string Jû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string JÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Jž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± -®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean Ü' -/api/v1/watch/limitranges¾'² -core_v1vwatch individual changes to a list of LimitRange. deprecated: use the 'watch' parameter with a list operation instead.*)watchCoreV1LimitRangeListForAllNamespaces2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jk -P -200I -G -OKA -? -=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent - -401 - - UnauthorizedRhttpsjL -x-kubernetes-group-version-kind)'version: v1 -group: "" -kind: LimitRange -j# -x-kubernetes-action  -watchlist -J‚ -ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer JO -MKIquery-If 'true', then the output is pretty printed."pretty2string Jû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string JÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Jž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± -®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean ï -/apis/autoscaling/v1/ÕÒ -autoscaling_v1get available resources*getAutoscalingV1APIResources2application/json2application/yaml2#application/vnd.kubernetes.protobuf:application/json:application/yaml:#application/vnd.kubernetes.protobufJp - -401 - - Unauthorized -U -200N -L -OKF -D -B#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.APIResourceListRhttpsž) -2/api/v1/namespaces/{namespace}/podtemplates/{name}ç(é -core_v1read the specified PodTemplate*readCoreV1NamespacedPodTemplate2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*JZ -? -2008 -6 -OK0 -. -,#/definitions/io.k8s.api.core.v1.PodTemplate - -401 - - UnauthorizedRhttpsj -x-kubernetes-actionget -jM -x-kubernetes-group-version-kind*(group: "" -kind: PodTemplate -version: v1 -´ -core_v1!replace the specified PodTemplate*"replaceCoreV1NamespacedPodTemplate2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BB -@ ->bodybody *. -,#/definitions/io.k8s.api.core.v1.PodTemplateBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– -“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string J  -? -2008 -6 -OK0 -. -,#/definitions/io.k8s.api.core.v1.PodTemplate -D -201= -; -Created0 -. -,#/definitions/io.k8s.api.core.v1.PodTemplate - -401 - - UnauthorizedRhttpsjM -x-kubernetes-group-version-kind*(version: v1 -group: "" -kind: PodTemplate -j -x-kubernetes-actionput -*é -core_v1delete a PodTemplate*!deleteCoreV1NamespacedPodTemplate2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BT -R -Pbodybody*B -@#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptionsBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string Bä -áÞÛquery±The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately."gracePeriodSeconds2integer BÑ -ÎËÈquery Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the "orphan" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both."orphanDependents2boolean Bˆ -…‚ÿquery×Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground."propagationPolicy2string J¡ - -401 - - Unauthorized -? -2008 -6 -OK0 -. -,#/definitions/io.k8s.api.core.v1.PodTemplate -E -202> -< -Accepted0 -. -,#/definitions/io.k8s.api.core.v1.PodTemplateRhttpsj -x-kubernetes-action delete -jM -x-kubernetes-group-version-kind*(group: "" -kind: PodTemplate -version: v1 -Bè -core_v1*partially update the specified PodTemplate* patchCoreV1NamespacedPodTemplate2application/json2application/yaml2#application/vnd.kubernetes.protobuf:application/json-patch+json:application/merge-patch+json:&application/strategic-merge-patch+json:application/apply-patch+yamlBN -L -Jbodybody *: -8#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.PatchBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B¯ -¬©¦queryƒfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch)." fieldManager2string BÎ -ËÈÅquery¨Force is going to "force" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests."force2boolean JZ -? -2008 -6 -OK0 -. -,#/definitions/io.k8s.api.core.v1.PodTemplate - -401 - - UnauthorizedRhttpsjM -x-kubernetes-group-version-kind*(version: v1 -group: "" -kind: PodTemplate -j -x-kubernetes-actionpatch -J8 -64"2pathname of the PodTemplate"name*string˜J` -^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO -MKIquery-If 'true', then the output is pretty printed."pretty2string ` -I/apis/autoscaling/v2beta1/namespaces/{namespace}/horizontalpodautoscalersÏ_ä& -autoscaling_v2beta15list or watch objects of kind HorizontalPodAutoscaler*7listAutoscalingV2beta1NamespacedHorizontalPodAutoscaler2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*B‚ -ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Bï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string B‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string B‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Bú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer Bû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string BÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Bž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer B± -®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean Jv -[ -200T -R -OKL -J -H#/definitions/io.k8s.api.autoscaling.v2beta1.HorizontalPodAutoscalerList - -401 - - UnauthorizedRhttpsj -x-kubernetes-actionlist -jg -x-kubernetes-group-version-kindDBgroup: autoscaling -kind: HorizontalPodAutoscaler -version: v2beta1 -"˜ - -autoscaling_v2beta1 create a HorizontalPodAutoscaler*9createAutoscalingV2beta1NamespacedHorizontalPodAutoscaler2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BZ -X -Vbodybody *F -D#/definitions/io.k8s.api.autoscaling.v2beta1.HorizontalPodAutoscalerBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– -“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string J¯ -W -200P -N -OKH -F -D#/definitions/io.k8s.api.autoscaling.v2beta1.HorizontalPodAutoscaler -\ -201U -S -CreatedH -F -D#/definitions/io.k8s.api.autoscaling.v2beta1.HorizontalPodAutoscaler -] -202V -T -AcceptedH -F -D#/definitions/io.k8s.api.autoscaling.v2beta1.HorizontalPodAutoscaler - -401 - - UnauthorizedRhttpsj -x-kubernetes-actionpost -jg -x-kubernetes-group-version-kindDBgroup: autoscaling -kind: HorizontalPodAutoscaler -version: v2beta1 -*—- -autoscaling_v2beta1,delete collection of HorizontalPodAutoscaler*CdeleteAutoscalingV2beta1CollectionNamespacedHorizontalPodAutoscaler2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BT -R -Pbodybody*B -@#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptionsBï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string Bž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string Bä -áÞÛquery±The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately."gracePeriodSeconds2integer B‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Bú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer BÑ -ÎËÈquery Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the "orphan" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both."orphanDependents2boolean Bˆ -…‚ÿquery×Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground."propagationPolicy2string Bû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string BÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Bž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer Jg -L -200E -C -OK= -; -9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status - -401 - - UnauthorizedRhttpsjg -x-kubernetes-group-version-kindDBversion: v2beta1 -group: autoscaling -kind: HorizontalPodAutoscaler -j* -x-kubernetes-actiondeletecollection -J` -^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO -MKIquery-If 'true', then the output is pretty printed."pretty2string é -/apis/certificates.k8s.io/ÊÇ - certificatesget information of a group*getCertificatesAPIGroup2application/json2application/yaml2#application/vnd.kubernetes.protobuf:application/json:application/yaml:#application/vnd.kubernetes.protobufJi - -401 - - Unauthorized -N -200G -E -OK? -= -;#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.APIGroupRhttpsÈ) -H/apis/rbac.authorization.k8s.io/v1beta1/watch/clusterrolebindings/{name}û(® -rbacAuthorization_v1beta1¹watch changes to an object of kind ClusterRoleBinding. deprecated: use the 'watch' parameter with a list operation instead, filtered to a single item with the 'fieldSelector' parameter.*/watchRbacAuthorizationV1beta1ClusterRoleBinding2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jk -P -200I -G -OKA -? -=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent - -401 - - UnauthorizedRhttpsj -x-kubernetes-actionwatch -jp -x-kubernetes-group-version-kindMKgroup: rbac.authorization.k8s.io -kind: ClusterRoleBinding -version: v1beta1 -J‚ -ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer J? -=;"9pathname of the ClusterRoleBinding"name*string˜JO -MKIquery-If 'true', then the output is pretty printed."pretty2string Jû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string JÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Jž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± -®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean ÿ& -/api/v1/resourcequotasä&Ø -core_v1+list or watch objects of kind ResourceQuota*'listCoreV1ResourceQuotaForAllNamespaces2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*J` - -401 - - Unauthorized -E -200> -< -OK6 -4 -2#/definitions/io.k8s.api.core.v1.ResourceQuotaListRhttpsj -x-kubernetes-actionlist -jO -x-kubernetes-group-version-kind,*version: v1 -group: "" -kind: ResourceQuota -J‚ -ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer JO -MKIquery-If 'true', then the output is pretty printed."pretty2string Jû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string JÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Jž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± -®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean ¨, -K/apis/autoscaling/v1/namespaces/{namespace}/horizontalpodautoscalers/{name}Ø+· -autoscaling_v1*read the specified HorizontalPodAutoscaler*2readAutoscalingV1NamespacedHorizontalPodAutoscaler2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*Jm -R -200K -I -OKC -A -?#/definitions/io.k8s.api.autoscaling.v1.HorizontalPodAutoscaler - -401 - - UnauthorizedRhttpsj -x-kubernetes-actionget -jb -x-kubernetes-group-version-kind?=group: autoscaling -kind: HorizontalPodAutoscaler -version: v1 -¨ -autoscaling_v1-replace the specified HorizontalPodAutoscaler*5replaceAutoscalingV1NamespacedHorizontalPodAutoscaler2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BU -S -Qbodybody *A -?#/definitions/io.k8s.api.autoscaling.v1.HorizontalPodAutoscalerBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– -“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string JÆ -W -201P -N -CreatedC -A -?#/definitions/io.k8s.api.autoscaling.v1.HorizontalPodAutoscaler - -401 - - Unauthorized -R -200K -I -OKC -A -?#/definitions/io.k8s.api.autoscaling.v1.HorizontalPodAutoscalerRhttpsjb -x-kubernetes-group-version-kind?=group: autoscaling -kind: HorizontalPodAutoscaler -version: v1 -j -x-kubernetes-actionput -*¾ -autoscaling_v1 delete a HorizontalPodAutoscaler*4deleteAutoscalingV1NamespacedHorizontalPodAutoscaler2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BT -R -Pbodybody*B -@#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptionsBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string Bä -áÞÛquery±The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately."gracePeriodSeconds2integer BÑ -ÎËÈquery Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the "orphan" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both."orphanDependents2boolean Bˆ -…‚ÿquery×Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground."propagationPolicy2string J» -L -200E -C -OK= -; -9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status -R -202K -I -Accepted= -; -9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status - -401 - - UnauthorizedRhttpsj -x-kubernetes-action delete -jb -x-kubernetes-group-version-kind?=group: autoscaling -kind: HorizontalPodAutoscaler -version: v1 -B¶ -autoscaling_v16partially update the specified HorizontalPodAutoscaler*3patchAutoscalingV1NamespacedHorizontalPodAutoscaler2application/json2application/yaml2#application/vnd.kubernetes.protobuf:application/json-patch+json:application/merge-patch+json:&application/strategic-merge-patch+json:application/apply-patch+yamlBN -L -Jbodybody *: -8#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.PatchBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B¯ -¬©¦queryƒfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch)." fieldManager2string BÎ -ËÈÅquery¨Force is going to "force" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests."force2boolean Jm -R -200K -I -OKC -A -?#/definitions/io.k8s.api.autoscaling.v1.HorizontalPodAutoscaler - -401 - - UnauthorizedRhttpsjb -x-kubernetes-group-version-kind?=group: autoscaling -kind: HorizontalPodAutoscaler -version: v1 -j -x-kubernetes-actionpatch -JD -B@">path#name of the HorizontalPodAutoscaler"name*string˜J` -^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO -MKIquery-If 'true', then the output is pretty printed."pretty2string ó -/apis/scheduling.k8s.io/v1/ÓÐ - scheduling_v1get available resources*getSchedulingV1APIResources2application/json2application/yaml2#application/vnd.kubernetes.protobuf:application/json:application/yaml:#application/vnd.kubernetes.protobufJp -U -200N -L -OKF -D -B#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.APIResourceList - -401 - - UnauthorizedRhttpsÍ - /version/¿¼ -versionget the code version*getCodeVersion2application/json:application/jsonJ` -E -200> -< -OK6 -4 -2#/definitions/io.k8s.apimachinery.pkg.version.Info - -401 - - UnauthorizedRhttps© -5/api/v1/namespaces/{namespace}/services/{name}/statusïé -core_v1$read status of the specified Service*!readCoreV1NamespacedServiceStatus2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*JV -; -2004 -2 -OK, -* -(#/definitions/io.k8s.api.core.v1.Service - -401 - - UnauthorizedRhttpsjI -x-kubernetes-group-version-kind&$group: "" -kind: Service -version: v1 -j -x-kubernetes-actionget -¬ -core_v1'replace status of the specified Service*$replaceCoreV1NamespacedServiceStatus2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*B> -< -:bodybody ** -(#/definitions/io.k8s.api.core.v1.ServiceBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– -“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string J˜ -; -2004 -2 -OK, -* -(#/definitions/io.k8s.api.core.v1.Service -@ -2019 -7 -Created, -* -(#/definitions/io.k8s.api.core.v1.Service - -401 - - UnauthorizedRhttpsj -x-kubernetes-actionput -jI -x-kubernetes-group-version-kind&$version: v1 -group: "" -kind: Service -Bè -core_v10partially update status of the specified Service*"patchCoreV1NamespacedServiceStatus2application/json2application/yaml2#application/vnd.kubernetes.protobuf:application/json-patch+json:application/merge-patch+json:&application/strategic-merge-patch+json:application/apply-patch+yamlBN -L -Jbodybody *: -8#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.PatchBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B¯ -¬©¦queryƒfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch)." fieldManager2string BÎ -ËÈÅquery¨Force is going to "force" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests."force2boolean JV -; -2004 -2 -OK, -* -(#/definitions/io.k8s.api.core.v1.Service - -401 - - UnauthorizedRhttpsj -x-kubernetes-actionpatch -jI -x-kubernetes-group-version-kind&$group: "" -kind: Service -version: v1 -J4 -20".pathname of the Service"name*string˜J` -^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO -MKIquery-If 'true', then the output is pretty printed."pretty2string ø -9/apis/apiregistration.k8s.io/v1/apiservices/{name}/statusºµ -apiregistration_v1'read status of the specified APIService*%readApiregistrationV1APIServiceStatus2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*Jy -^ -200W -U -OKO -M -K#/definitions/io.k8s.kube-aggregator.pkg.apis.apiregistration.v1.APIService - -401 - - UnauthorizedRhttpsj -x-kubernetes-actionget -j` -x-kubernetes-group-version-kind=;group: apiregistration.k8s.io -version: v1 -kind: APIService -¾ -apiregistration_v1*replace status of the specified APIService*(replaceApiregistrationV1APIServiceStatus2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*Ba -_ -]bodybody *M -K#/definitions/io.k8s.kube-aggregator.pkg.apis.apiregistration.v1.APIServiceBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– -“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string JÞ -^ -200W -U -OKO -M -K#/definitions/io.k8s.kube-aggregator.pkg.apis.apiregistration.v1.APIService -c -201\ -Z -CreatedO -M -K#/definitions/io.k8s.kube-aggregator.pkg.apis.apiregistration.v1.APIService - -401 - - UnauthorizedRhttpsj -x-kubernetes-actionput -j` -x-kubernetes-group-version-kind=;group: apiregistration.k8s.io -version: v1 -kind: APIService -B´ -apiregistration_v13partially update status of the specified APIService*&patchApiregistrationV1APIServiceStatus2application/json2application/yaml2#application/vnd.kubernetes.protobuf:application/json-patch+json:application/merge-patch+json:&application/strategic-merge-patch+json:application/apply-patch+yamlBN -L -Jbodybody *: -8#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.PatchBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B¯ -¬©¦queryƒfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch)." fieldManager2string BÎ -ËÈÅquery¨Force is going to "force" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests."force2boolean Jy -^ -200W -U -OKO -M -K#/definitions/io.k8s.kube-aggregator.pkg.apis.apiregistration.v1.APIService - -401 - - UnauthorizedRhttpsj` -x-kubernetes-group-version-kind=;group: apiregistration.k8s.io -version: v1 -kind: APIService -j -x-kubernetes-actionpatch -J7 -53"1pathname of the APIService"name*string˜JO -MKIquery-If 'true', then the output is pretty printed."pretty2string „ -8/apis/batch/v1/namespaces/{namespace}/jobs/{name}/statusÇß -batch_v1 read status of the specified Job*readBatchV1NamespacedJobStatus2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*JS - -401 - - Unauthorized -8 -2001 -/ -OK) -' -%#/definitions/io.k8s.api.batch.v1.JobRhttpsj -x-kubernetes-actionget -jH -x-kubernetes-group-version-kind%#group: batch -kind: Job -version: v1 -œ -batch_v1#replace status of the specified Job*!replaceBatchV1NamespacedJobStatus2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*B; -9 -7bodybody *' -%#/definitions/io.k8s.api.batch.v1.JobBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– -“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string J’ -8 -2001 -/ -OK) -' -%#/definitions/io.k8s.api.batch.v1.Job -= -2016 -4 -Created) -' -%#/definitions/io.k8s.api.batch.v1.Job - -401 - - UnauthorizedRhttpsjH -x-kubernetes-group-version-kind%#group: batch -kind: Job -version: v1 -j -x-kubernetes-actionput -BÞ -batch_v1,partially update status of the specified Job*patchBatchV1NamespacedJobStatus2application/json2application/yaml2#application/vnd.kubernetes.protobuf:application/json-patch+json:application/merge-patch+json:&application/strategic-merge-patch+json:application/apply-patch+yamlBN -L -Jbodybody *: -8#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.PatchBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B¯ -¬©¦queryƒfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch)." fieldManager2string BÎ -ËÈÅquery¨Force is going to "force" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests."force2boolean JS - -401 - - Unauthorized -8 -2001 -/ -OK) -' -%#/definitions/io.k8s.api.batch.v1.JobRhttpsj -x-kubernetes-actionpatch -jH -x-kubernetes-group-version-kind%#group: batch -kind: Job -version: v1 -J0 -.,"*pathname of the Job"name*string˜J` -^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO -MKIquery-If 'true', then the output is pretty printed."pretty2string ç -/apis/events.k8s.io/v1/ËÈ - events_v1get available resources*getEventsV1APIResources2application/json2application/yaml2#application/vnd.kubernetes.protobuf:application/json:application/yaml:#application/vnd.kubernetes.protobufJp -U -200N -L -OKF -D -B#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.APIResourceList - -401 - - UnauthorizedRhttps‡+ -=/apis/flowcontrol.apiserver.k8s.io/v1beta1/flowschemas/{name}Å*° -flowcontrolApiserver_v1beta1read the specified FlowSchema*)readFlowcontrolApiserverV1beta1FlowSchema2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*Je - -401 - - Unauthorized -J -200C -A -OK; -9 -7#/definitions/io.k8s.api.flowcontrol.v1beta1.FlowSchemaRhttpsj -x-kubernetes-actionget -jk -x-kubernetes-group-version-kindHFgroup: flowcontrol.apiserver.k8s.io -kind: FlowSchema -version: v1beta1 -‘ -flowcontrolApiserver_v1beta1 replace the specified FlowSchema*,replaceFlowcontrolApiserverV1beta1FlowSchema2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BM -K -Ibodybody *9 -7#/definitions/io.k8s.api.flowcontrol.v1beta1.FlowSchemaBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– -“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string J¶ -J -200C -A -OK; -9 -7#/definitions/io.k8s.api.flowcontrol.v1beta1.FlowSchema -O -201H -F -Created; -9 -7#/definitions/io.k8s.api.flowcontrol.v1beta1.FlowSchema - -401 - - UnauthorizedRhttpsj -x-kubernetes-actionput -jk -x-kubernetes-group-version-kindHFgroup: flowcontrol.apiserver.k8s.io -kind: FlowSchema -version: v1beta1 -*¿ -flowcontrolApiserver_v1beta1delete a FlowSchema*+deleteFlowcontrolApiserverV1beta1FlowSchema2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BT -R -Pbodybody*B -@#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptionsBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string Bä -áÞÛquery±The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately."gracePeriodSeconds2integer BÑ -ÎËÈquery Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the "orphan" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both."orphanDependents2boolean Bˆ -…‚ÿquery×Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground."propagationPolicy2string J» -L -200E -C -OK= -; -9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status -R -202K -I -Accepted= -; -9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status - -401 - - UnauthorizedRhttpsj -x-kubernetes-action delete -jk -x-kubernetes-group-version-kindHFkind: FlowSchema -version: v1beta1 -group: flowcontrol.apiserver.k8s.io -B¯ -flowcontrolApiserver_v1beta1)partially update the specified FlowSchema**patchFlowcontrolApiserverV1beta1FlowSchema2application/json2application/yaml2#application/vnd.kubernetes.protobuf:application/json-patch+json:application/merge-patch+json:&application/strategic-merge-patch+json:application/apply-patch+yamlBN -L -Jbodybody *: -8#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.PatchBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B¯ -¬©¦queryƒfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch)." fieldManager2string BÎ -ËÈÅquery¨Force is going to "force" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests."force2boolean Je -J -200C -A -OK; -9 -7#/definitions/io.k8s.api.flowcontrol.v1beta1.FlowSchema - -401 - - UnauthorizedRhttpsjk -x-kubernetes-group-version-kindHFgroup: flowcontrol.apiserver.k8s.io -kind: FlowSchema -version: v1beta1 -j -x-kubernetes-actionpatch -J7 -53"1pathname of the FlowSchema"name*string˜JO -MKIquery-If 'true', then the output is pretty printed."pretty2string ê& -/api/v1/endpointsÔ&È -core_v1'list or watch objects of kind Endpoints*#listCoreV1EndpointsForAllNamespaces2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*J\ - -401 - - Unauthorized -A -200: -8 -OK2 -0 -.#/definitions/io.k8s.api.core.v1.EndpointsListRhttpsjK -x-kubernetes-group-version-kind(&group: "" -kind: Endpoints -version: v1 -j -x-kubernetes-actionlist -J‚ -ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer JO -MKIquery-If 'true', then the output is pretty printed."pretty2string Jû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string JÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Jž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± -®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean ¦) -N/apis/storage.k8s.io/v1beta1/watch/namespaces/{namespace}/csistoragecapacitiesÓ(å -storage_v1beta1~watch individual changes to a list of CSIStorageCapacity. deprecated: use the 'watch' parameter with a list operation instead.*3watchStorageV1beta1NamespacedCSIStorageCapacityList2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jk -P -200I -G -OKA -? -=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent - -401 - - UnauthorizedRhttpsj# -x-kubernetes-action  -watchlist -je -x-kubernetes-group-version-kindB@kind: CSIStorageCapacity -version: v1beta1 -group: storage.k8s.io -J‚ -ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer J` -^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO -MKIquery-If 'true', then the output is pretty printed."pretty2string Jû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string JÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Jž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± -®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean ´) -4/api/v1/watch/namespaces/{namespace}/services/{name}û(× -core_v1®watch changes to an object of kind Service. deprecated: use the 'watch' parameter with a list operation instead, filtered to a single item with the 'fieldSelector' parameter.*watchCoreV1NamespacedService2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jk -P -200I -G -OKA -? -=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent - -401 - - UnauthorizedRhttpsjI -x-kubernetes-group-version-kind&$group: "" -kind: Service -version: v1 -j -x-kubernetes-actionwatch -J‚ -ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer J4 -20".pathname of the Service"name*string˜J` -^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO -MKIquery-If 'true', then the output is pretty printed."pretty2string Jû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string JÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Jž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± -®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean ˆ -"/apis/certificates.k8s.io/v1beta1/áÞ -certificates_v1beta1get available resources*"getCertificatesV1beta1APIResources2application/json2application/yaml2#application/vnd.kubernetes.protobuf:application/json:application/yaml:#application/vnd.kubernetes.protobufJp -U -200N -L -OKF -D -B#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.APIResourceList - -401 - - UnauthorizedRhttps ) -0/api/v1/watch/namespaces/{namespace}/pods/{name}ë(Ë -core_v1ªwatch changes to an object of kind Pod. deprecated: use the 'watch' parameter with a list operation instead, filtered to a single item with the 'fieldSelector' parameter.*watchCoreV1NamespacedPod2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jk -P -200I -G -OKA -? -=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent - -401 - - UnauthorizedRhttpsj -x-kubernetes-actionwatch -jE -x-kubernetes-group-version-kind" group: "" -kind: Pod -version: v1 -J‚ -ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer J0 -.,"*pathname of the Pod"name*string˜J` -^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO -MKIquery-If 'true', then the output is pretty printed."pretty2string Jû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string JÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Jž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± -®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean ¯ -J/apis/apiextensions.k8s.io/v1beta1/customresourcedefinitions/{name}/statusà‚ -apiextensions_v1beta15read status of the specified CustomResourceDefinition*6readApiextensionsV1beta1CustomResourceDefinitionStatus2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*J’ -w -200p -n -OKh -f -d#/definitions/io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1beta1.CustomResourceDefinition - -401 - - UnauthorizedRhttpsj -x-kubernetes-actionget -jq -x-kubernetes-group-version-kindNLgroup: apiextensions.k8s.io -kind: CustomResourceDefinition -version: v1beta1 -¼ - -apiextensions_v1beta18replace status of the specified CustomResourceDefinition*9replaceApiextensionsV1beta1CustomResourceDefinitionStatus2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*Bz -x -vbodybody *f -d#/definitions/io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1beta1.CustomResourceDefinitionBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– -“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string J -w -200p -n -OKh -f -d#/definitions/io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1beta1.CustomResourceDefinition -| -201u -s -Createdh -f -d#/definitions/io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1beta1.CustomResourceDefinition - -401 - - UnauthorizedRhttpsjq -x-kubernetes-group-version-kindNLgroup: apiextensions.k8s.io -kind: CustomResourceDefinition -version: v1beta1 -j -x-kubernetes-actionput -B -apiextensions_v1beta1Apartially update status of the specified CustomResourceDefinition*7patchApiextensionsV1beta1CustomResourceDefinitionStatus2application/json2application/yaml2#application/vnd.kubernetes.protobuf:application/json-patch+json:application/merge-patch+json:&application/strategic-merge-patch+json:application/apply-patch+yamlBN -L -Jbodybody *: -8#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.PatchBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B¯ -¬©¦queryƒfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch)." fieldManager2string BÎ -ËÈÅquery¨Force is going to "force" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests."force2boolean J’ -w -200p -n -OKh -f -d#/definitions/io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1beta1.CustomResourceDefinition - -401 - - UnauthorizedRhttpsj -x-kubernetes-actionpatch -jq -x-kubernetes-group-version-kindNLgroup: apiextensions.k8s.io -kind: CustomResourceDefinition -version: v1beta1 -JE -CA"?path$name of the CustomResourceDefinition"name*string˜JO -MKIquery-If 'true', then the output is pretty printed."pretty2string ã -/apis/networking.k8s.io/Æà - -networkingget information of a group*getNetworkingAPIGroup2application/json2application/yaml2#application/vnd.kubernetes.protobuf:application/json:application/yaml:#application/vnd.kubernetes.protobufJi -N -200G -E -OK? -= -;#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.APIGroup - -401 - - UnauthorizedRhttps“ -I/apis/policy/v1/namespaces/{namespace}/poddisruptionbudgets/{name}/statusÅ£ - policy_v10read status of the specified PodDisruptionBudget*/readPolicyV1NamespacedPodDisruptionBudgetStatus2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*Jd -I -200B -@ -OK: -8 -6#/definitions/io.k8s.api.policy.v1.PodDisruptionBudget - -401 - - UnauthorizedRhttpsjY -x-kubernetes-group-version-kind64group: policy -kind: PodDisruptionBudget -version: v1 -j -x-kubernetes-actionget -‚ - policy_v13replace status of the specified PodDisruptionBudget*2replacePolicyV1NamespacedPodDisruptionBudgetStatus2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BL -J -Hbodybody *8 -6#/definitions/io.k8s.api.policy.v1.PodDisruptionBudgetBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– -“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string J´ -I -200B -@ -OK: -8 -6#/definitions/io.k8s.api.policy.v1.PodDisruptionBudget -N -201G -E -Created: -8 -6#/definitions/io.k8s.api.policy.v1.PodDisruptionBudget - -401 - - UnauthorizedRhttpsj -x-kubernetes-actionput -jY -x-kubernetes-group-version-kind64version: v1 -group: policy -kind: PodDisruptionBudget -B¢ - policy_v1<":pathname of the PodDisruptionBudget"name*string˜J` -^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO -MKIquery-If 'true', then the output is pretty printed."pretty2string Ð* -;/apis/rbac.authorization.k8s.io/v1beta1/clusterroles/{name}*¤ -rbacAuthorization_v1beta1read the specified ClusterRole*'readRbacAuthorizationV1beta1ClusterRole2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*J_ -D -200= -; -OK5 -3 -1#/definitions/io.k8s.api.rbac.v1beta1.ClusterRole - -401 - - UnauthorizedRhttpsj -x-kubernetes-actionget -ji -x-kubernetes-group-version-kindFDversion: v1beta1 -group: rbac.authorization.k8s.io -kind: ClusterRole -ù -rbacAuthorization_v1beta1!replace the specified ClusterRole**replaceRbacAuthorizationV1beta1ClusterRole2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BG -E -Cbodybody *3 -1#/definitions/io.k8s.api.rbac.v1beta1.ClusterRoleBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– -“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string Jª -D -200= -; -OK5 -3 -1#/definitions/io.k8s.api.rbac.v1beta1.ClusterRole -I -201B -@ -Created5 -3 -1#/definitions/io.k8s.api.rbac.v1beta1.ClusterRole - -401 - - UnauthorizedRhttpsj -x-kubernetes-actionput -ji -x-kubernetes-group-version-kindFDgroup: rbac.authorization.k8s.io -kind: ClusterRole -version: v1beta1 -*¹ -rbacAuthorization_v1beta1delete a ClusterRole*)deleteRbacAuthorizationV1beta1ClusterRole2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BT -R -Pbodybody*B -@#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptionsBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string Bä -áÞÛquery±The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately."gracePeriodSeconds2integer BÑ -ÎËÈquery Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the "orphan" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both."orphanDependents2boolean Bˆ -…‚ÿquery×Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground."propagationPolicy2string J» -L -200E -C -OK= -; -9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status -R -202K -I -Accepted= -; -9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status - -401 - - UnauthorizedRhttpsji -x-kubernetes-group-version-kindFDkind: ClusterRole -version: v1beta1 -group: rbac.authorization.k8s.io -j -x-kubernetes-action delete -B£ -rbacAuthorization_v1beta1*partially update the specified ClusterRole*(patchRbacAuthorizationV1beta1ClusterRole2application/json2application/yaml2#application/vnd.kubernetes.protobuf:application/json-patch+json:application/merge-patch+json:&application/strategic-merge-patch+json:application/apply-patch+yamlBN -L -Jbodybody *: -8#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.PatchBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B¯ -¬©¦queryƒfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch)." fieldManager2string BÎ -ËÈÅquery¨Force is going to "force" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests."force2boolean J_ -D -200= -; -OK5 -3 -1#/definitions/io.k8s.api.rbac.v1beta1.ClusterRole - -401 - - UnauthorizedRhttpsj -x-kubernetes-actionpatch -ji -x-kubernetes-group-version-kindFDgroup: rbac.authorization.k8s.io -kind: ClusterRole -version: v1beta1 -J8 -64"2pathname of the ClusterRole"name*string˜JO -MKIquery-If 'true', then the output is pretty printed."pretty2string ) -//api/v1/namespaces/{namespace}/endpoints/{name}Û(á -core_v1read the specified Endpoints*readCoreV1NamespacedEndpoints2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*JX -= -2006 -4 -OK. -, -*#/definitions/io.k8s.api.core.v1.Endpoints - -401 - - UnauthorizedRhttpsj -x-kubernetes-actionget -jK -x-kubernetes-group-version-kind(&kind: Endpoints -version: v1 -group: "" -¨ -core_v1replace the specified Endpoints* replaceCoreV1NamespacedEndpoints2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*B@ -> -<bodybody *, -*#/definitions/io.k8s.api.core.v1.EndpointsBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– -“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string Jœ -= -2006 -4 -OK. -, -*#/definitions/io.k8s.api.core.v1.Endpoints -B -201; -9 -Created. -, -*#/definitions/io.k8s.api.core.v1.Endpoints - -401 - - UnauthorizedRhttpsj -x-kubernetes-actionput -jK -x-kubernetes-group-version-kind(&group: "" -kind: Endpoints -version: v1 -*û -core_v1delete Endpoints*deleteCoreV1NamespacedEndpoints2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BT -R -Pbodybody*B -@#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptionsBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string Bä -áÞÛquery±The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately."gracePeriodSeconds2integer BÑ -ÎËÈquery Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the "orphan" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both."orphanDependents2boolean Bˆ -…‚ÿquery×Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground."propagationPolicy2string J» -L -200E -C -OK= -; -9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status -R -202K -I -Accepted= -; -9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status - -401 - - UnauthorizedRhttpsj -x-kubernetes-action delete -jK -x-kubernetes-group-version-kind(&group: "" -kind: Endpoints -version: v1 -Bà -core_v1(partially update the specified Endpoints*patchCoreV1NamespacedEndpoints2application/json2application/yaml2#application/vnd.kubernetes.protobuf:application/json-patch+json:application/merge-patch+json:&application/strategic-merge-patch+json:application/apply-patch+yamlBN -L -Jbodybody *: -8#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.PatchBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B¯ -¬©¦queryƒfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch)." fieldManager2string BÎ -ËÈÅquery¨Force is going to "force" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests."force2boolean JX -= -2006 -4 -OK. -, -*#/definitions/io.k8s.api.core.v1.Endpoints - -401 - - UnauthorizedRhttpsj -x-kubernetes-actionpatch -jK -x-kubernetes-group-version-kind(&group: "" -kind: Endpoints -version: v1 -J6 -42"0pathname of the Endpoints"name*string˜J` -^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO -MKIquery-If 'true', then the output is pretty printed."pretty2string –* -//apis/policy/v1beta1/podsecuritypolicies/{name}â)• -policy_v1beta1$read the specified PodSecurityPolicy*"readPolicyV1beta1PodSecurityPolicy2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*Jg -L -200E -C -OK= -; -9#/definitions/io.k8s.api.policy.v1beta1.PodSecurityPolicy - -401 - - UnauthorizedRhttpsj -x-kubernetes-actionget -j\ -x-kubernetes-group-version-kind97group: policy -kind: PodSecurityPolicy -version: v1beta1 -ú -policy_v1beta1'replace the specified PodSecurityPolicy*%replacePolicyV1beta1PodSecurityPolicy2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BO -M -Kbodybody *; -9#/definitions/io.k8s.api.policy.v1beta1.PodSecurityPolicyBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– -“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string Jº -L -200E -C -OK= -; -9#/definitions/io.k8s.api.policy.v1beta1.PodSecurityPolicy -Q -201J -H -Created= -; -9#/definitions/io.k8s.api.policy.v1beta1.PodSecurityPolicy - -401 - - UnauthorizedRhttpsj -x-kubernetes-actionput -j\ -x-kubernetes-group-version-kind97version: v1beta1 -group: policy -kind: PodSecurityPolicy -*¢ -policy_v1beta1delete a PodSecurityPolicy*$deletePolicyV1beta1PodSecurityPolicy2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BT -R -Pbodybody*B -@#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptionsBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string Bä -áÞÛquery±The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately."gracePeriodSeconds2integer BÑ -ÎËÈquery Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the "orphan" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both."orphanDependents2boolean Bˆ -…‚ÿquery×Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground."propagationPolicy2string J» -L -200E -C -OK= -; -9#/definitions/io.k8s.api.policy.v1beta1.PodSecurityPolicy -R -202K -I -Accepted= -; -9#/definitions/io.k8s.api.policy.v1beta1.PodSecurityPolicy - -401 - - UnauthorizedRhttpsj -x-kubernetes-action delete -j\ -x-kubernetes-group-version-kind97group: policy -kind: PodSecurityPolicy -version: v1beta1 -B” -policy_v1beta10partially update the specified PodSecurityPolicy*#patchPolicyV1beta1PodSecurityPolicy2application/json2application/yaml2#application/vnd.kubernetes.protobuf:application/json-patch+json:application/merge-patch+json:&application/strategic-merge-patch+json:application/apply-patch+yamlBN -L -Jbodybody *: -8#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.PatchBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B¯ -¬©¦queryƒfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch)." fieldManager2string BÎ -ËÈÅquery¨Force is going to "force" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests."force2boolean Jg - -401 - - Unauthorized -L -200E -C -OK= -; -9#/definitions/io.k8s.api.policy.v1beta1.PodSecurityPolicyRhttpsj\ -x-kubernetes-group-version-kind97group: policy -kind: PodSecurityPolicy -version: v1beta1 -j -x-kubernetes-actionpatch -J> -<:"8pathname of the PodSecurityPolicy"name*string˜JO -MKIquery-If 'true', then the output is pretty printed."pretty2string ñ) -L/apis/rbac.authorization.k8s.io/v1/watch/namespaces/{namespace}/roles/{name} )ÿ -rbacAuthorization_v1«watch changes to an object of kind Role. deprecated: use the 'watch' parameter with a list operation instead, filtered to a single item with the 'fieldSelector' parameter.*&watchRbacAuthorizationV1NamespacedRole2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jk -P -200I -G -OKA -? -=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent - -401 - - UnauthorizedRhttpsj -x-kubernetes-actionwatch -j] -x-kubernetes-group-version-kind:8group: rbac.authorization.k8s.io -kind: Role -version: v1 -J‚ -ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer J1 -/-"+pathname of the Role"name*string˜J` -^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO -MKIquery-If 'true', then the output is pretty printed."pretty2string Jû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string JÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Jž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± -®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean È) -8/api/v1/watch/namespaces/{namespace}/podtemplates/{name}‹)ã -core_v1²watch changes to an object of kind PodTemplate. deprecated: use the 'watch' parameter with a list operation instead, filtered to a single item with the 'fieldSelector' parameter.* watchCoreV1NamespacedPodTemplate2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jk -P -200I -G -OKA -? -=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent - -401 - - UnauthorizedRhttpsj -x-kubernetes-actionwatch -jM -x-kubernetes-group-version-kind*(group: "" -kind: PodTemplate -version: v1 -J‚ -ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer J8 -64"2pathname of the PodTemplate"name*string˜J` -^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO -MKIquery-If 'true', then the output is pretty printed."pretty2string Jû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string JÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Jž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± -®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean ÿ -/apis/discovery.k8s.io/v1beta1/ÛØ -discovery_v1beta1get available resources*getDiscoveryV1beta1APIResources2application/json2application/yaml2#application/vnd.kubernetes.protobuf:application/json:application/yaml:#application/vnd.kubernetes.protobufJp -U -200N -L -OKF -D -B#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.APIResourceList - -401 - - UnauthorizedRhttpsì* -K/apis/rbac.authorization.k8s.io/v1beta1/namespaces/{namespace}/roles/{name}œ*’ -rbacAuthorization_v1beta1read the specified Role**readRbacAuthorizationV1beta1NamespacedRole2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*JX -= -2006 -4 -OK. -, -*#/definitions/io.k8s.api.rbac.v1beta1.Role - -401 - - UnauthorizedRhttpsj -x-kubernetes-actionget -jb -x-kubernetes-group-version-kind?=group: rbac.authorization.k8s.io -kind: Role -version: v1beta1 -Ù -rbacAuthorization_v1beta1replace the specified Role*-replaceRbacAuthorizationV1beta1NamespacedRole2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*B@ -> -<bodybody *, -*#/definitions/io.k8s.api.rbac.v1beta1.RoleBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– -“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string Jœ -= -2006 -4 -OK. -, -*#/definitions/io.k8s.api.rbac.v1beta1.Role -B -201; -9 -Created. -, -*#/definitions/io.k8s.api.rbac.v1beta1.Role - -401 - - UnauthorizedRhttpsj -x-kubernetes-actionput -jb -x-kubernetes-group-version-kind?=version: v1beta1 -group: rbac.authorization.k8s.io -kind: Role -*® -rbacAuthorization_v1beta1 delete a Role*,deleteRbacAuthorizationV1beta1NamespacedRole2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BT -R -Pbodybody*B -@#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptionsBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string Bä -áÞÛquery±The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately."gracePeriodSeconds2integer BÑ -ÎËÈquery Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the "orphan" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both."orphanDependents2boolean Bˆ -…‚ÿquery×Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground."propagationPolicy2string J» -L -200E -C -OK= -; -9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status -R -202K -I -Accepted= -; -9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status - -401 - - UnauthorizedRhttpsjb -x-kubernetes-group-version-kind?=kind: Role -version: v1beta1 -group: rbac.authorization.k8s.io -j -x-kubernetes-action delete -B‘ -rbacAuthorization_v1beta1#partially update the specified Role*+patchRbacAuthorizationV1beta1NamespacedRole2application/json2application/yaml2#application/vnd.kubernetes.protobuf:application/json-patch+json:application/merge-patch+json:&application/strategic-merge-patch+json:application/apply-patch+yamlBN -L -Jbodybody *: -8#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.PatchBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B¯ -¬©¦queryƒfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch)." fieldManager2string BÎ -ËÈÅquery¨Force is going to "force" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests."force2boolean JX -= -2006 -4 -OK. -, -*#/definitions/io.k8s.api.rbac.v1beta1.Role - -401 - - UnauthorizedRhttpsj -x-kubernetes-actionpatch -jb -x-kubernetes-group-version-kind?=group: rbac.authorization.k8s.io -kind: Role -version: v1beta1 -J1 -/-"+pathname of the Role"name*string˜J` -^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO -MKIquery-If 'true', then the output is pretty printed."pretty2string Â] -+/apis/storage.k8s.io/v1beta1/storageclasses’]¥& -storage_v1beta1*list or watch objects of kind StorageClass*listStorageV1beta1StorageClass2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*B‚ -ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Bï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string B‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string B‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Bú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer Bû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string BÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Bž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer B± -®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean Jg -L -200E -C -OK= -; -9#/definitions/io.k8s.api.storage.v1beta1.StorageClassList - -401 - - UnauthorizedRhttpsj -x-kubernetes-actionlist -j_ -x-kubernetes-group-version-kind<:group: storage.k8s.io -kind: StorageClass -version: v1beta1 -"¬ -storage_v1beta1create a StorageClass* createStorageV1beta1StorageClass2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BK -I -Gbodybody *7 -5#/definitions/io.k8s.api.storage.v1beta1.StorageClassBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– -“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string J‚ -H -200A -? -OK9 -7 -5#/definitions/io.k8s.api.storage.v1beta1.StorageClass -M -201F -D -Created9 -7 -5#/definitions/io.k8s.api.storage.v1beta1.StorageClass -N -202G -E -Accepted9 -7 -5#/definitions/io.k8s.api.storage.v1beta1.StorageClass - -401 - - UnauthorizedRhttpsj_ -x-kubernetes-group-version-kind<:group: storage.k8s.io -kind: StorageClass -version: v1beta1 -j -x-kubernetes-actionpost -*ç, -storage_v1beta1!delete collection of StorageClass**deleteStorageV1beta1CollectionStorageClass2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BT -R -Pbodybody*B -@#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptionsBï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string Bž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string Bä -áÞÛquery±The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately."gracePeriodSeconds2integer B‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Bú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer BÑ -ÎËÈquery Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the "orphan" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both."orphanDependents2boolean Bˆ -…‚ÿquery×Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground."propagationPolicy2string Bû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string BÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Bž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer Jg -L -200E -C -OK= -; -9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status - -401 - - UnauthorizedRhttpsj_ -x-kubernetes-group-version-kind<:group: storage.k8s.io -kind: StorageClass -version: v1beta1 -j* -x-kubernetes-actiondeletecollection -JO -MKIquery-If 'true', then the output is pretty printed."pretty2string ¿^ -5/api/v1/namespaces/{namespace}/replicationcontrollers…^ª& -core_v13list or watch objects of kind ReplicationController*)listCoreV1NamespacedReplicationController2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*B‚ -ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Bï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string B‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string B‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Bú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer Bû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string BÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Bž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer B± -®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean Jh -M -200F -D -OK> -< -:#/definitions/io.k8s.api.core.v1.ReplicationControllerList - -401 - - UnauthorizedRhttpsj -x-kubernetes-actionlist -jW -x-kubernetes-group-version-kind42group: "" -kind: ReplicationController -version: v1 -"´ -core_v1create a ReplicationController*+createCoreV1NamespacedReplicationController2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BL -J -Hbodybody *8 -6#/definitions/io.k8s.api.core.v1.ReplicationControllerBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– -“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string J… -I -200B -@ -OK: -8 -6#/definitions/io.k8s.api.core.v1.ReplicationController -N -201G -E -Created: -8 -6#/definitions/io.k8s.api.core.v1.ReplicationController -O -202H -F -Accepted: -8 -6#/definitions/io.k8s.api.core.v1.ReplicationController - -401 - - UnauthorizedRhttpsjW -x-kubernetes-group-version-kind42group: "" -kind: ReplicationController -version: v1 -j -x-kubernetes-actionpost -*ë, -core_v1*delete collection of ReplicationController*5deleteCoreV1CollectionNamespacedReplicationController2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BT -R -Pbodybody*B -@#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptionsBï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string Bž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string Bä -áÞÛquery±The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately."gracePeriodSeconds2integer B‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Bú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer BÑ -ÎËÈquery Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the "orphan" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both."orphanDependents2boolean Bˆ -…‚ÿquery×Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground."propagationPolicy2string Bû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string BÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Bž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer Jg -L -200E -C -OK= -; -9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status - -401 - - UnauthorizedRhttpsjW -x-kubernetes-group-version-kind42version: v1 -group: "" -kind: ReplicationController -j* -x-kubernetes-actiondeletecollection -J` -^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO -MKIquery-If 'true', then the output is pretty printed."pretty2string ø( -?/apis/extensions/v1beta1/watch/namespaces/{namespace}/ingresses´(Æ -extensions_v1beta1swatch individual changes to a list of Ingress. deprecated: use the 'watch' parameter with a list operation instead.*+watchExtensionsV1beta1NamespacedIngressList2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jk -P -200I -G -OKA -? -=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent - -401 - - UnauthorizedRhttpsj# -x-kubernetes-action  -watchlist -jV -x-kubernetes-group-version-kind31group: extensions -kind: Ingress -version: v1beta1 -J‚ -ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer J` -^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO -MKIquery-If 'true', then the output is pretty printed."pretty2string Jû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string JÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Jž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± -®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean »( -+/api/v1/watch/namespaces/{namespace}/events‹( -core_v1qwatch individual changes to a list of Event. deprecated: use the 'watch' parameter with a list operation instead.*watchCoreV1NamespacedEventList2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jk -P -200I -G -OKA -? -=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent - -401 - - UnauthorizedRhttpsjG -x-kubernetes-group-version-kind$"group: "" -kind: Event -version: v1 -j# -x-kubernetes-action  -watchlist -J‚ -ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer J` -^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO -MKIquery-If 'true', then the output is pretty printed."pretty2string Jû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string JÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Jž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± -®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean ÷( -K/apis/admissionregistration.k8s.io/v1/watch/validatingwebhookconfigurations§(› -admissionregistration_v1Šwatch individual changes to a list of ValidatingWebhookConfiguration. deprecated: use the 'watch' parameter with a list operation instead.*>watchAdmissionregistrationV1ValidatingWebhookConfigurationList2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jk -P -200I -G -OKA -? -=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent - -401 - - UnauthorizedRhttpsjz -x-kubernetes-group-version-kindWUkind: ValidatingWebhookConfiguration -version: v1 -group: admissionregistration.k8s.io -j# -x-kubernetes-action  -watchlist -J‚ -ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer JO -MKIquery-If 'true', then the output is pretty printed."pretty2string Jû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string JÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Jž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± -®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean ¢( -6/apis/apiregistration.k8s.io/v1beta1/watch/apiservicesç'Û -apiregistration_v1beta1vwatch individual changes to a list of APIService. deprecated: use the 'watch' parameter with a list operation instead.*)watchApiregistrationV1beta1APIServiceList2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jk -P -200I -G -OKA -? -=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent - -401 - - UnauthorizedRhttpsj# -x-kubernetes-action  -watchlist -je -x-kubernetes-group-version-kindB@group: apiregistration.k8s.io -version: v1beta1 -kind: APIService -J‚ -ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer JO -MKIquery-If 'true', then the output is pretty printed."pretty2string Jû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string JÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Jž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± -®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean Ò -E/apis/certificates.k8s.io/v1/certificatesigningrequests/{name}/statusˆÐ -certificates_v16read status of the specified CertificateSigningRequest*1readCertificatesV1CertificateSigningRequestStatus2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*Jp - -401 - - Unauthorized -U -200N -L -OKF -D -B#/definitions/io.k8s.api.certificates.v1.CertificateSigningRequestRhttpsjl -x-kubernetes-group-version-kindIGgroup: certificates.k8s.io -kind: CertificateSigningRequest -version: v1 -j -x-kubernetes-actionget -Ç -certificates_v19replace status of the specified CertificateSigningRequest*4replaceCertificatesV1CertificateSigningRequestStatus2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BX -V -Tbodybody *D -B#/definitions/io.k8s.api.certificates.v1.CertificateSigningRequestBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– -“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string JÌ -U -200N -L -OKF -D -B#/definitions/io.k8s.api.certificates.v1.CertificateSigningRequest -Z -201S -Q -CreatedF -D -B#/definitions/io.k8s.api.certificates.v1.CertificateSigningRequest - -401 - - UnauthorizedRhttpsj -x-kubernetes-actionput -jl -x-kubernetes-group-version-kindIGkind: CertificateSigningRequest -version: v1 -group: certificates.k8s.io -BÏ -certificates_v1Bpartially update status of the specified CertificateSigningRequest*2patchCertificatesV1CertificateSigningRequestStatus2application/json2application/yaml2#application/vnd.kubernetes.protobuf:application/json-patch+json:application/merge-patch+json:&application/strategic-merge-patch+json:application/apply-patch+yamlBN -L -Jbodybody *: -8#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.PatchBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B¯ -¬©¦queryƒfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch)." fieldManager2string BÎ -ËÈÅquery¨Force is going to "force" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests."force2boolean Jp -U -200N -L -OKF -D -B#/definitions/io.k8s.api.certificates.v1.CertificateSigningRequest - -401 - - UnauthorizedRhttpsj -x-kubernetes-actionpatch -jl -x-kubernetes-group-version-kindIGversion: v1 -group: certificates.k8s.io -kind: CertificateSigningRequest -JF -DB"@path%name of the CertificateSigningRequest"name*string˜JO -MKIquery-If 'true', then the output is pretty printed."pretty2string £* -5/apis/networking.k8s.io/v1beta1/ingressclasses/{name}é)˜ -networking_v1beta1read the specified IngressClass*!readNetworkingV1beta1IngressClass2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*Jf -K -200D -B -OK< -: -8#/definitions/io.k8s.api.networking.v1beta1.IngressClass - -401 - - UnauthorizedRhttpsj -x-kubernetes-actionget -jb -x-kubernetes-group-version-kind?=kind: IngressClass -version: v1beta1 -group: networking.k8s.io -û -networking_v1beta1"replace the specified IngressClass*$replaceNetworkingV1beta1IngressClass2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BN -L -Jbodybody *: -8#/definitions/io.k8s.api.networking.v1beta1.IngressClassBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– -“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string J¸ -P -201I -G -Created< -: -8#/definitions/io.k8s.api.networking.v1beta1.IngressClass - -401 - - Unauthorized -K -200D -B -OK< -: -8#/definitions/io.k8s.api.networking.v1beta1.IngressClassRhttpsjb -x-kubernetes-group-version-kind?=version: v1beta1 -group: networking.k8s.io -kind: IngressClass -j -x-kubernetes-actionput -*§ -networking_v1beta1delete an IngressClass*#deleteNetworkingV1beta1IngressClass2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BT -R -Pbodybody*B -@#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptionsBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string Bä -áÞÛquery±The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately."gracePeriodSeconds2integer BÑ -ÎËÈquery Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the "orphan" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both."orphanDependents2boolean Bˆ -…‚ÿquery×Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground."propagationPolicy2string J» -L -200E -C -OK= -; -9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status -R -202K -I -Accepted= -; -9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status - -401 - - UnauthorizedRhttpsjb -x-kubernetes-group-version-kind?=version: v1beta1 -group: networking.k8s.io -kind: IngressClass -j -x-kubernetes-action delete -B— -networking_v1beta1+partially update the specified IngressClass*"patchNetworkingV1beta1IngressClass2application/json2application/yaml2#application/vnd.kubernetes.protobuf:application/json-patch+json:application/merge-patch+json:&application/strategic-merge-patch+json:application/apply-patch+yamlBN -L -Jbodybody *: -8#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.PatchBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B¯ -¬©¦queryƒfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch)." fieldManager2string BÎ -ËÈÅquery¨Force is going to "force" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests."force2boolean Jf -K -200D -B -OK< -: -8#/definitions/io.k8s.api.networking.v1beta1.IngressClass - -401 - - UnauthorizedRhttpsj -x-kubernetes-actionpatch -jb -x-kubernetes-group-version-kind?=version: v1beta1 -group: networking.k8s.io -kind: IngressClass -J9 -75"3pathname of the IngressClass"name*string˜JO -MKIquery-If 'true', then the output is pretty printed."pretty2string ÷) -M/apis/networking.k8s.io/v1beta1/watch/namespaces/{namespace}/ingresses/{name}¥) -networking_v1beta1®watch changes to an object of kind Ingress. deprecated: use the 'watch' parameter with a list operation instead, filtered to a single item with the 'fieldSelector' parameter.*'watchNetworkingV1beta1NamespacedIngress2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jk -P -200I -G -OKA -? -=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent - -401 - - UnauthorizedRhttpsj] -x-kubernetes-group-version-kind:8group: networking.k8s.io -kind: Ingress -version: v1beta1 -j -x-kubernetes-actionwatch -J‚ -ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer J4 -20".pathname of the Ingress"name*string˜J` -^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO -MKIquery-If 'true', then the output is pretty printed."pretty2string Jû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string JÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Jž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± -®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean ¤( -3/apis/rbac.authorization.k8s.io/v1beta1/watch/rolesì'à -rbacAuthorization_v1beta1pwatch individual changes to a list of Role. deprecated: use the 'watch' parameter with a list operation instead.*5watchRbacAuthorizationV1beta1RoleListForAllNamespaces2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jk -P -200I -G -OKA -? -=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent - -401 - - UnauthorizedRhttpsj# -x-kubernetes-action  -watchlist -jb -x-kubernetes-group-version-kind?=group: rbac.authorization.k8s.io -kind: Role -version: v1beta1 -J‚ -ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer JO -MKIquery-If 'true', then the output is pretty printed."pretty2string Jû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string JÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Jž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± -®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean æ( - /api/v1/persistentvolumes/{name}Á(ó -core_v1#read the specified PersistentVolume*readCoreV1PersistentVolume2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*J_ -D -200= -; -OK5 -3 -1#/definitions/io.k8s.api.core.v1.PersistentVolume - -401 - - UnauthorizedRhttpsjR -x-kubernetes-group-version-kind/-kind: PersistentVolume -version: v1 -group: "" -j -x-kubernetes-actionget -È -core_v1&replace the specified PersistentVolume*replaceCoreV1PersistentVolume2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BG -E -Cbodybody *3 -1#/definitions/io.k8s.api.core.v1.PersistentVolumeBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– -“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string Jª -D -200= -; -OK5 -3 -1#/definitions/io.k8s.api.core.v1.PersistentVolume -I -201B -@ -Created5 -3 -1#/definitions/io.k8s.api.core.v1.PersistentVolume - -401 - - UnauthorizedRhttpsjR -x-kubernetes-group-version-kind/-group: "" -kind: PersistentVolume -version: v1 -j -x-kubernetes-actionput -*ø -core_v1delete a PersistentVolume*deleteCoreV1PersistentVolume2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BT -R -Pbodybody*B -@#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptionsBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string Bä -áÞÛquery±The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately."gracePeriodSeconds2integer BÑ -ÎËÈquery Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the "orphan" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both."orphanDependents2boolean Bˆ -…‚ÿquery×Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground."propagationPolicy2string J« -D -200= -; -OK5 -3 -1#/definitions/io.k8s.api.core.v1.PersistentVolume -J -202C -A -Accepted5 -3 -1#/definitions/io.k8s.api.core.v1.PersistentVolume - -401 - - UnauthorizedRhttpsj -x-kubernetes-action delete -jR -x-kubernetes-group-version-kind/-version: v1 -group: "" -kind: PersistentVolume -Bò -core_v1/partially update the specified PersistentVolume*patchCoreV1PersistentVolume2application/json2application/yaml2#application/vnd.kubernetes.protobuf:application/json-patch+json:application/merge-patch+json:&application/strategic-merge-patch+json:application/apply-patch+yamlBN -L -Jbodybody *: -8#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.PatchBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B¯ -¬©¦queryƒfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch)." fieldManager2string BÎ -ËÈÅquery¨Force is going to "force" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests."force2boolean J_ -D -200= -; -OK5 -3 -1#/definitions/io.k8s.api.core.v1.PersistentVolume - -401 - - UnauthorizedRhttpsj -x-kubernetes-actionpatch -jR -x-kubernetes-group-version-kind/-group: "" -kind: PersistentVolume -version: v1 -J= -;9"7pathname of the PersistentVolume"name*string˜JO -MKIquery-If 'true', then the output is pretty printed."pretty2string Æ) -8/apis/apps/v1/namespaces/{namespace}/statefulsets/{name}‰)ë -apps_v1read the specified StatefulSet*readAppsV1NamespacedStatefulSet2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*JZ -? -2008 -6 -OK0 -. -,#/definitions/io.k8s.api.apps.v1.StatefulSet - -401 - - UnauthorizedRhttpsj -x-kubernetes-actionget -jO -x-kubernetes-group-version-kind,*group: apps -kind: StatefulSet -version: v1 -¶ -apps_v1!replace the specified StatefulSet*"replaceAppsV1NamespacedStatefulSet2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BB -@ ->bodybody *. -,#/definitions/io.k8s.api.apps.v1.StatefulSetBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– -“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string J  -? -2008 -6 -OK0 -. -,#/definitions/io.k8s.api.apps.v1.StatefulSet -D -201= -; -Created0 -. -,#/definitions/io.k8s.api.apps.v1.StatefulSet - -401 - - UnauthorizedRhttpsj -x-kubernetes-actionput -jO -x-kubernetes-group-version-kind,*group: apps -kind: StatefulSet -version: v1 -*… -apps_v1delete a StatefulSet*!deleteAppsV1NamespacedStatefulSet2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BT -R -Pbodybody*B -@#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptionsBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string Bä -áÞÛquery±The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately."gracePeriodSeconds2integer BÑ -ÎËÈquery Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the "orphan" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both."orphanDependents2boolean Bˆ -…‚ÿquery×Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground."propagationPolicy2string J» -R -202K -I -Accepted= -; -9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status - -401 - - Unauthorized -L -200E -C -OK= -; -9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.StatusRhttpsjO -x-kubernetes-group-version-kind,*group: apps -kind: StatefulSet -version: v1 -j -x-kubernetes-action delete -Bê -apps_v1*partially update the specified StatefulSet* patchAppsV1NamespacedStatefulSet2application/json2application/yaml2#application/vnd.kubernetes.protobuf:application/json-patch+json:application/merge-patch+json:&application/strategic-merge-patch+json:application/apply-patch+yamlBN -L -Jbodybody *: -8#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.PatchBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B¯ -¬©¦queryƒfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch)." fieldManager2string BÎ -ËÈÅquery¨Force is going to "force" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests."force2boolean JZ -? -2008 -6 -OK0 -. -,#/definitions/io.k8s.api.apps.v1.StatefulSet - -401 - - UnauthorizedRhttpsjO -x-kubernetes-group-version-kind,*version: v1 -group: apps -kind: StatefulSet -j -x-kubernetes-actionpatch -J8 -64"2pathname of the StatefulSet"name*string˜J` -^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO -MKIquery-If 'true', then the output is pretty printed."pretty2string ‚ - /apis/apiregistration.k8s.io/v1/ÝÚ -apiregistration_v1get available resources* getApiregistrationV1APIResources2application/json2application/yaml2#application/vnd.kubernetes.protobuf:application/json:application/yaml:#application/vnd.kubernetes.protobufJp -U -200N -L -OKF -D -B#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.APIResourceList - -401 - - UnauthorizedRhttpsÞ\ -&/api/v1/namespaces/{namespace}/secrets³\î% -core_v1$list or watch objects of kind Secret*listCoreV1NamespacedSecret2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*B‚ -ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Bï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string B‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string B‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Bú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer Bû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string BÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Bž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer B± -®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean JY - -401 - - Unauthorized -> -2007 -5 -OK/ -- -+#/definitions/io.k8s.api.core.v1.SecretListRhttpsj -x-kubernetes-actionlist -jH -x-kubernetes-group-version-kind%#group: "" -kind: Secret -version: v1 -"Ë -core_v1create a Secret*createCoreV1NamespacedSecret2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*B= -; -9bodybody *) -'#/definitions/io.k8s.api.core.v1.SecretBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– -“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string JØ -: -2003 -1 -OK+ -) -'#/definitions/io.k8s.api.core.v1.Secret -? -2018 -6 -Created+ -) -'#/definitions/io.k8s.api.core.v1.Secret -@ -2029 -7 -Accepted+ -) -'#/definitions/io.k8s.api.core.v1.Secret - -401 - - UnauthorizedRhttpsj -x-kubernetes-actionpost -jH -x-kubernetes-group-version-kind%#version: v1 -group: "" -kind: Secret -*¾, -core_v1delete collection of Secret*&deleteCoreV1CollectionNamespacedSecret2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BT -R -Pbodybody*B -@#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptionsBï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string Bž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string Bä -áÞÛquery±The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately."gracePeriodSeconds2integer B‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Bú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer BÑ -ÎËÈquery Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the "orphan" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both."orphanDependents2boolean Bˆ -…‚ÿquery×Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground."propagationPolicy2string Bû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string BÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Bž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer Jg -L -200E -C -OK= -; -9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status - -401 - - UnauthorizedRhttpsjH -x-kubernetes-group-version-kind%#group: "" -kind: Secret -version: v1 -j* -x-kubernetes-actiondeletecollection -J` -^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO -MKIquery-If 'true', then the output is pretty printed."pretty2string é -W/apis/autoscaling/v2beta1/namespaces/{namespace}/horizontalpodautoscalers/{name}/statusÛ -autoscaling_v2beta14read status of the specified HorizontalPodAutoscaler*=readAutoscalingV2beta1NamespacedHorizontalPodAutoscalerStatus2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*Jr -W -200P -N -OKH -F -D#/definitions/io.k8s.api.autoscaling.v2beta1.HorizontalPodAutoscaler - -401 - - UnauthorizedRhttpsjg -x-kubernetes-group-version-kindDBgroup: autoscaling -kind: HorizontalPodAutoscaler -version: v2beta1 -j -x-kubernetes-actionget -Ö -autoscaling_v2beta17replace status of the specified HorizontalPodAutoscaler*@replaceAutoscalingV2beta1NamespacedHorizontalPodAutoscalerStatus2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BZ -X -Vbodybody *F -D#/definitions/io.k8s.api.autoscaling.v2beta1.HorizontalPodAutoscalerBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– -“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string JÐ -W -200P -N -OKH -F -D#/definitions/io.k8s.api.autoscaling.v2beta1.HorizontalPodAutoscaler -\ -201U -S -CreatedH -F -D#/definitions/io.k8s.api.autoscaling.v2beta1.HorizontalPodAutoscaler - -401 - - UnauthorizedRhttpsj -x-kubernetes-actionput -jg -x-kubernetes-group-version-kindDBgroup: autoscaling -kind: HorizontalPodAutoscaler -version: v2beta1 -BÚ -autoscaling_v2beta1@partially update status of the specified HorizontalPodAutoscaler*>patchAutoscalingV2beta1NamespacedHorizontalPodAutoscalerStatus2application/json2application/yaml2#application/vnd.kubernetes.protobuf:application/json-patch+json:application/merge-patch+json:&application/strategic-merge-patch+json:application/apply-patch+yamlBN -L -Jbodybody *: -8#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.PatchBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B¯ -¬©¦queryƒfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch)." fieldManager2string BÎ -ËÈÅquery¨Force is going to "force" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests."force2boolean Jr -W -200P -N -OKH -F -D#/definitions/io.k8s.api.autoscaling.v2beta1.HorizontalPodAutoscaler - -401 - - UnauthorizedRhttpsj -x-kubernetes-actionpatch -jg -x-kubernetes-group-version-kindDBgroup: autoscaling -kind: HorizontalPodAutoscaler -version: v2beta1 -JD -B@">path#name of the HorizontalPodAutoscaler"name*string˜J` -^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO -MKIquery-If 'true', then the output is pretty printed."pretty2string ù - /apis/rbac.authorization.k8s.io/ÔÑ -rbacAuthorizationget information of a group*getRbacAuthorizationAPIGroup2application/json2application/yaml2#application/vnd.kubernetes.protobuf:application/json:application/yaml:#application/vnd.kubernetes.protobufJi -N -200G -E -OK? -= -;#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.APIGroup - -401 - - UnauthorizedRhttpsö) -6/apis/rbac.authorization.k8s.io/v1/clusterroles/{name}») -rbacAuthorization_v1read the specified ClusterRole*"readRbacAuthorizationV1ClusterRole2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*JZ -? -2008 -6 -OK0 -. -,#/definitions/io.k8s.api.rbac.v1.ClusterRole - -401 - - UnauthorizedRhttpsj -x-kubernetes-actionget -jd -x-kubernetes-group-version-kindA?group: rbac.authorization.k8s.io -kind: ClusterRole -version: v1 -Û -rbacAuthorization_v1!replace the specified ClusterRole*%replaceRbacAuthorizationV1ClusterRole2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BB -@ ->bodybody *. -,#/definitions/io.k8s.api.rbac.v1.ClusterRoleBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– -“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string J  -? -2008 -6 -OK0 -. -,#/definitions/io.k8s.api.rbac.v1.ClusterRole -D -201= -; -Created0 -. -,#/definitions/io.k8s.api.rbac.v1.ClusterRole - -401 - - UnauthorizedRhttpsj -x-kubernetes-actionput -jd -x-kubernetes-group-version-kindA?group: rbac.authorization.k8s.io -kind: ClusterRole -version: v1 -*ª -rbacAuthorization_v1delete a ClusterRole*$deleteRbacAuthorizationV1ClusterRole2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BT -R -Pbodybody*B -@#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptionsBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string Bä -áÞÛquery±The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately."gracePeriodSeconds2integer BÑ -ÎËÈquery Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the "orphan" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both."orphanDependents2boolean Bˆ -…‚ÿquery×Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground."propagationPolicy2string J» -R -202K -I -Accepted= -; -9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status - -401 - - Unauthorized -L -200E -C -OK= -; -9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.StatusRhttpsjd -x-kubernetes-group-version-kindA?group: rbac.authorization.k8s.io -kind: ClusterRole -version: v1 -j -x-kubernetes-action delete -B -rbacAuthorization_v1*partially update the specified ClusterRole*#patchRbacAuthorizationV1ClusterRole2application/json2application/yaml2#application/vnd.kubernetes.protobuf:application/json-patch+json:application/merge-patch+json:&application/strategic-merge-patch+json:application/apply-patch+yamlBN -L -Jbodybody *: -8#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.PatchBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B¯ -¬©¦queryƒfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch)." fieldManager2string BÎ -ËÈÅquery¨Force is going to "force" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests."force2boolean JZ - -401 - - Unauthorized -? -2008 -6 -OK0 -. -,#/definitions/io.k8s.api.rbac.v1.ClusterRoleRhttpsj -x-kubernetes-actionpatch -jd -x-kubernetes-group-version-kindA?group: rbac.authorization.k8s.io -kind: ClusterRole -version: v1 -J8 -64"2pathname of the ClusterRole"name*string˜JO -MKIquery-If 'true', then the output is pretty printed."pretty2string ´) -C/apis/rbac.authorization.k8s.io/v1/watch/clusterrolebindings/{name}ì(Ÿ -rbacAuthorization_v1¹watch changes to an object of kind ClusterRoleBinding. deprecated: use the 'watch' parameter with a list operation instead, filtered to a single item with the 'fieldSelector' parameter.**watchRbacAuthorizationV1ClusterRoleBinding2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jk -P -200I -G -OKA -? -=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent - -401 - - UnauthorizedRhttpsj -x-kubernetes-actionwatch -jk -x-kubernetes-group-version-kindHFgroup: rbac.authorization.k8s.io -kind: ClusterRoleBinding -version: v1 -J‚ -ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer J? -=;"9pathname of the ClusterRoleBinding"name*string˜JO -MKIquery-If 'true', then the output is pretty printed."pretty2string Jû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string JÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Jž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± -®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean ˆ] -(/api/v1/namespaces/{namespace}/endpointsÛ\ú% -core_v1'list or watch objects of kind Endpoints*listCoreV1NamespacedEndpoints2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*B‚ -ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Bï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string B‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string B‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Bú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer Bû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string BÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Bž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer B± -®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean J\ -A -200: -8 -OK2 -0 -.#/definitions/io.k8s.api.core.v1.EndpointsList - -401 - - UnauthorizedRhttpsjK -x-kubernetes-group-version-kind(&group: "" -kind: Endpoints -version: v1 -j -x-kubernetes-actionlist -"Þ -core_v1create Endpoints*createCoreV1NamespacedEndpoints2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*B@ -> -<bodybody *, -*#/definitions/io.k8s.api.core.v1.EndpointsBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– -“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string Já -= -2006 -4 -OK. -, -*#/definitions/io.k8s.api.core.v1.Endpoints -B -201; -9 -Created. -, -*#/definitions/io.k8s.api.core.v1.Endpoints -C -202< -: -Accepted. -, -*#/definitions/io.k8s.api.core.v1.Endpoints - -401 - - UnauthorizedRhttpsj -x-kubernetes-actionpost -jK -x-kubernetes-group-version-kind(&group: "" -kind: Endpoints -version: v1 -*Ç, -core_v1delete collection of Endpoints*)deleteCoreV1CollectionNamespacedEndpoints2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BT -R -Pbodybody*B -@#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptionsBï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string Bž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string Bä -áÞÛquery±The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately."gracePeriodSeconds2integer B‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Bú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer BÑ -ÎËÈquery Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the "orphan" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both."orphanDependents2boolean Bˆ -…‚ÿquery×Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground."propagationPolicy2string Bû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string BÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Bž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer Jg -L -200E -C -OK= -; -9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status - -401 - - UnauthorizedRhttpsj* -x-kubernetes-actiondeletecollection -jK -x-kubernetes-group-version-kind(&kind: Endpoints -version: v1 -group: "" -J` -^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO -MKIquery-If 'true', then the output is pretty printed."pretty2string ð ->/apis/apps/v1/namespaces/{namespace}/statefulsets/{name}/scale­û -apps_v1'read scale of the specified StatefulSet*$readAppsV1NamespacedStatefulSetScale2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*J[ -@ -2009 -7 -OK1 -/ --#/definitions/io.k8s.api.autoscaling.v1.Scale - -401 - - UnauthorizedRhttpsjP -x-kubernetes-group-version-kind-+group: autoscaling -kind: Scale -version: v1 -j -x-kubernetes-actionget -È -apps_v1*replace scale of the specified StatefulSet*'replaceAppsV1NamespacedStatefulSetScale2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BC -A -?bodybody */ --#/definitions/io.k8s.api.autoscaling.v1.ScaleBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– -“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string J¢ -E -201> -< -Created1 -/ --#/definitions/io.k8s.api.autoscaling.v1.Scale - -401 - - Unauthorized -@ -2009 -7 -OK1 -/ --#/definitions/io.k8s.api.autoscaling.v1.ScaleRhttpsj -x-kubernetes-actionput -jP -x-kubernetes-group-version-kind-+version: v1 -group: autoscaling -kind: Scale -Bú -apps_v13partially update scale of the specified StatefulSet*%patchAppsV1NamespacedStatefulSetScale2application/json2application/yaml2#application/vnd.kubernetes.protobuf:application/json-patch+json:application/merge-patch+json:&application/strategic-merge-patch+json:application/apply-patch+yamlBN -L -Jbodybody *: -8#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.PatchBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B¯ -¬©¦queryƒfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch)." fieldManager2string BÎ -ËÈÅquery¨Force is going to "force" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests."force2boolean J[ -@ -2009 -7 -OK1 -/ --#/definitions/io.k8s.api.autoscaling.v1.Scale - -401 - - UnauthorizedRhttpsj -x-kubernetes-actionpatch -jP -x-kubernetes-group-version-kind-+group: autoscaling -kind: Scale -version: v1 -J2 -0.",pathname of the Scale"name*string˜J` -^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO -MKIquery-If 'true', then the output is pretty printed."pretty2string Æ) - -2007 -5 -OK/ -- -+#/definitions/io.k8s.api.apps.v1.ReplicaSet -C -201< -: -Created/ -- -+#/definitions/io.k8s.api.apps.v1.ReplicaSet -D -202= -; -Accepted/ -- -+#/definitions/io.k8s.api.apps.v1.ReplicaSetRhttpsj -x-kubernetes-actionpost -jN -x-kubernetes-group-version-kind+)version: v1 -group: apps -kind: ReplicaSet -*Ì, -apps_v1delete collection of ReplicaSet**deleteAppsV1CollectionNamespacedReplicaSet2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BT -R -Pbodybody*B -@#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptionsBï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string Bž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string Bä -áÞÛquery±The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately."gracePeriodSeconds2integer B‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Bú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer BÑ -ÎËÈquery Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the "orphan" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both."orphanDependents2boolean Bˆ -…‚ÿquery×Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground."propagationPolicy2string Bû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string BÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Bž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer Jg -L -200E -C -OK= -; -9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status - -401 - - UnauthorizedRhttpsj* -x-kubernetes-actiondeletecollection -jN -x-kubernetes-group-version-kind+)version: v1 -group: apps -kind: ReplicaSet -J` -^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO -MKIquery-If 'true', then the output is pretty printed."pretty2string þ] -//apis/scheduling.k8s.io/v1beta1/priorityclassesÊ]µ& -scheduling_v1beta1+list or watch objects of kind PriorityClass*"listSchedulingV1beta1PriorityClass2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*B‚ -ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Bï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string B‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string B‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Bú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer Bû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string BÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Bž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer B± -®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean Jk -P -200I -G -OKA -? -=#/definitions/io.k8s.api.scheduling.v1beta1.PriorityClassList - -401 - - UnauthorizedRhttpsj -x-kubernetes-actionlist -jc -x-kubernetes-group-version-kind@>version: v1beta1 -group: scheduling.k8s.io -kind: PriorityClass -"È -scheduling_v1beta1create a PriorityClass*$createSchedulingV1beta1PriorityClass2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BO -M -Kbodybody *; -9#/definitions/io.k8s.api.scheduling.v1beta1.PriorityClassBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– -“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string JŽ -L -200E -C -OK= -; -9#/definitions/io.k8s.api.scheduling.v1beta1.PriorityClass -Q -201J -H -Created= -; -9#/definitions/io.k8s.api.scheduling.v1beta1.PriorityClass -R -202K -I -Accepted= -; -9#/definitions/io.k8s.api.scheduling.v1beta1.PriorityClass - -401 - - UnauthorizedRhttpsj -x-kubernetes-actionpost -jc -x-kubernetes-group-version-kind@>version: v1beta1 -group: scheduling.k8s.io -kind: PriorityClass -*ó, -scheduling_v1beta1"delete collection of PriorityClass*.deleteSchedulingV1beta1CollectionPriorityClass2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BT -R -Pbodybody*B -@#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptionsBï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string Bž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string Bä -áÞÛquery±The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately."gracePeriodSeconds2integer B‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Bú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer BÑ -ÎËÈquery Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the "orphan" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both."orphanDependents2boolean Bˆ -…‚ÿquery×Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground."propagationPolicy2string Bû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string BÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Bž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer Jg -L -200E -C -OK= -; -9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status - -401 - - UnauthorizedRhttpsj* -x-kubernetes-actiondeletecollection -jc -x-kubernetes-group-version-kind@>group: scheduling.k8s.io -kind: PriorityClass -version: v1beta1 -JO -MKIquery-If 'true', then the output is pretty printed."pretty2string ê* -F/apis/discovery.k8s.io/v1/namespaces/{namespace}/endpointslices/{name}Ÿ*Ž - discovery_v1 read the specified EndpointSlice*&readDiscoveryV1NamespacedEndpointSlice2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*Ja -F -200? -= -OK7 -5 -3#/definitions/io.k8s.api.discovery.v1.EndpointSlice - -401 - - UnauthorizedRhttpsj] -x-kubernetes-group-version-kind:8group: discovery.k8s.io -kind: EndpointSlice -version: v1 -j -x-kubernetes-actionget -ç - discovery_v1#replace the specified EndpointSlice*)replaceDiscoveryV1NamespacedEndpointSlice2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BI -G -Ebodybody *5 -3#/definitions/io.k8s.api.discovery.v1.EndpointSliceBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– -“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string J® -F -200? -= -OK7 -5 -3#/definitions/io.k8s.api.discovery.v1.EndpointSlice -K -201D -B -Created7 -5 -3#/definitions/io.k8s.api.discovery.v1.EndpointSlice - -401 - - UnauthorizedRhttpsj] -x-kubernetes-group-version-kind:8group: discovery.k8s.io -kind: EndpointSlice -version: v1 -j -x-kubernetes-actionput -*¢ - discovery_v1delete an EndpointSlice*(deleteDiscoveryV1NamespacedEndpointSlice2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BT -R -Pbodybody*B -@#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptionsBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string Bä -áÞÛquery±The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately."gracePeriodSeconds2integer BÑ -ÎËÈquery Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the "orphan" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both."orphanDependents2boolean Bˆ -…‚ÿquery×Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground."propagationPolicy2string J» -L -200E -C -OK= -; -9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status -R -202K -I -Accepted= -; -9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status - -401 - - UnauthorizedRhttpsj -x-kubernetes-action delete -j] -x-kubernetes-group-version-kind:8kind: EndpointSlice -version: v1 -group: discovery.k8s.io -B - discovery_v1,partially update the specified EndpointSlice*'patchDiscoveryV1NamespacedEndpointSlice2application/json2application/yaml2#application/vnd.kubernetes.protobuf:application/json-patch+json:application/merge-patch+json:&application/strategic-merge-patch+json:application/apply-patch+yamlBN -L -Jbodybody *: -8#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.PatchBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B¯ -¬©¦queryƒfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch)." fieldManager2string BÎ -ËÈÅquery¨Force is going to "force" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests."force2boolean Ja - -401 - - Unauthorized -F -200? -= -OK7 -5 -3#/definitions/io.k8s.api.discovery.v1.EndpointSliceRhttpsj -x-kubernetes-actionpatch -j] -x-kubernetes-group-version-kind:8group: discovery.k8s.io -kind: EndpointSlice -version: v1 -J: -86"4pathname of the EndpointSlice"name*string˜J` -^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO -MKIquery-If 'true', then the output is pretty printed."pretty2string …) -E/apis/coordination.k8s.io/v1beta1/watch/namespaces/{namespace}/leases»(Í -coordination_v1beta1qwatch individual changes to a list of Lease. deprecated: use the 'watch' parameter with a list operation instead.*+watchCoordinationV1beta1NamespacedLeaseList2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jk - -401 - - Unauthorized -P -200I -G -OKA -? -=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEventRhttpsj] -x-kubernetes-group-version-kind:8group: coordination.k8s.io -kind: Lease -version: v1beta1 -j# -x-kubernetes-action  -watchlist -J‚ -ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer J` -^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO -MKIquery-If 'true', then the output is pretty printed."pretty2string Jû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string JÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Jž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± -®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean ÷& -/apis/events.k8s.io/v1/eventsÕ&É - events_v1#list or watch objects of kind Event*!listEventsV1EventForAllNamespaces2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*JZ -? -2008 -6 -OK0 -. -,#/definitions/io.k8s.api.events.v1.EventList - -401 - - UnauthorizedRhttpsj -x-kubernetes-actionlist -jR -x-kubernetes-group-version-kind/-group: events.k8s.io -kind: Event -version: v1 -J‚ -ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer JO -MKIquery-If 'true', then the output is pretty printed."pretty2string Jû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string JÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Jž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± -®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean Š) -G/apis/networking.k8s.io/v1/watch/namespaces/{namespace}/networkpolicies¾(Ð - networking_v1ywatch individual changes to a list of NetworkPolicy. deprecated: use the 'watch' parameter with a list operation instead.*,watchNetworkingV1NamespacedNetworkPolicyList2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jk -P -200I -G -OKA -? -=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent - -401 - - UnauthorizedRhttpsj# -x-kubernetes-action  -watchlist -j^ -x-kubernetes-group-version-kind;9group: networking.k8s.io -kind: NetworkPolicy -version: v1 -J‚ -ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer J` -^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO -MKIquery-If 'true', then the output is pretty printed."pretty2string Jû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string JÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Jž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± -®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean ¼^ -@/apis/networking.k8s.io/v1beta1/namespaces/{namespace}/ingresses÷]§& -networking_v1beta1%list or watch objects of kind Ingress*&listNetworkingV1beta1NamespacedIngress2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*B‚ -ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Bï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string B‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string B‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Bú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer Bû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string BÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Bž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer B± -®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean Je -J -200C -A -OK; -9 -7#/definitions/io.k8s.api.networking.v1beta1.IngressList - -401 - - UnauthorizedRhttpsj] -x-kubernetes-group-version-kind:8group: networking.k8s.io -kind: Ingress -version: v1beta1 -j -x-kubernetes-actionlist -"© -networking_v1beta1create an Ingress*(createNetworkingV1beta1NamespacedIngress2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BI -G -Ebodybody *5 -3#/definitions/io.k8s.api.networking.v1beta1.IngressBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– -“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string Jü -F -200? -= -OK7 -5 -3#/definitions/io.k8s.api.networking.v1beta1.Ingress -K -201D -B -Created7 -5 -3#/definitions/io.k8s.api.networking.v1beta1.Ingress -L -202E -C -Accepted7 -5 -3#/definitions/io.k8s.api.networking.v1beta1.Ingress - -401 - - UnauthorizedRhttpsj] -x-kubernetes-group-version-kind:8group: networking.k8s.io -kind: Ingress -version: v1beta1 -j -x-kubernetes-actionpost -*ë, -networking_v1beta1delete collection of Ingress*2deleteNetworkingV1beta1CollectionNamespacedIngress2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BT -R -Pbodybody*B -@#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptionsBï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string Bž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string Bä -áÞÛquery±The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately."gracePeriodSeconds2integer B‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Bú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer BÑ -ÎËÈquery Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the "orphan" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both."orphanDependents2boolean Bˆ -…‚ÿquery×Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground."propagationPolicy2string Bû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string BÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Bž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer Jg -L -200E -C -OK= -; -9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status - -401 - - UnauthorizedRhttpsj* -x-kubernetes-actiondeletecollection -j] -x-kubernetes-group-version-kind:8group: networking.k8s.io -kind: Ingress -version: v1beta1 -J` -^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO -MKIquery-If 'true', then the output is pretty printed."pretty2string ‡ -5/apis/authorization.k8s.io/v1/selfsubjectrulesreviewsÍ -"¿ -authorization_v1create a SelfSubjectRulesReview*+createAuthorizationV1SelfSubjectRulesReview2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BV -T -Rbodybody *B -@#/definitions/io.k8s.api.authorization.v1.SelfSubjectRulesReviewJ£ -S -200L -J -OKD -B -@#/definitions/io.k8s.api.authorization.v1.SelfSubjectRulesReview -X -201Q -O -CreatedD -B -@#/definitions/io.k8s.api.authorization.v1.SelfSubjectRulesReview -Y -202R -P -AcceptedD -B -@#/definitions/io.k8s.api.authorization.v1.SelfSubjectRulesReview - -401 - - UnauthorizedRhttpsj -x-kubernetes-actionpost -jj -x-kubernetes-group-version-kindGEkind: SelfSubjectRulesReview -version: v1 -group: authorization.k8s.io -Jž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string J– -“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string JO -MKIquery-If 'true', then the output is pretty printed."pretty2string ø& -/apis/apps/v1/replicasetsÚ&Î -apps_v1(list or watch objects of kind ReplicaSet*$listAppsV1ReplicaSetForAllNamespaces2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*J] -B -200; -9 -OK3 -1 -/#/definitions/io.k8s.api.apps.v1.ReplicaSetList - -401 - - UnauthorizedRhttpsj -x-kubernetes-actionlist -jN -x-kubernetes-group-version-kind+)group: apps -kind: ReplicaSet -version: v1 -J‚ -ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer JO -MKIquery-If 'true', then the output is pretty printed."pretty2string Jû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string JÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Jž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± -®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean ë' -)/apis/node.k8s.io/v1/watch/runtimeclasses½'± -node_v1xwatch individual changes to a list of RuntimeClass. deprecated: use the 'watch' parameter with a list operation instead.*watchNodeV1RuntimeClassList2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jk -P -200I -G -OKA -? -=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent - -401 - - UnauthorizedRhttpsjW -x-kubernetes-group-version-kind42group: node.k8s.io -kind: RuntimeClass -version: v1 -j# -x-kubernetes-action  -watchlist -J‚ -ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer JO -MKIquery-If 'true', then the output is pretty printed."pretty2string Jû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string JÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Jž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± -®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean È' -)/apis/policy/v1beta1/poddisruptionbudgetsš'Ž -policy_v1beta11list or watch objects of kind PodDisruptionBudget*4listPolicyV1beta1PodDisruptionBudgetForAllNamespaces2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jm -R -200K -I -OKC -A -?#/definitions/io.k8s.api.policy.v1beta1.PodDisruptionBudgetList - -401 - - UnauthorizedRhttpsj^ -x-kubernetes-group-version-kind;9group: policy -kind: PodDisruptionBudget -version: v1beta1 -j -x-kubernetes-actionlist -J‚ -ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer JO -MKIquery-If 'true', then the output is pretty printed."pretty2string Jû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string JÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Jž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± -®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean Ý) -5/api/v1/namespaces/{namespace}/serviceaccounts/{name}£)õ -core_v1!read the specified ServiceAccount*"readCoreV1NamespacedServiceAccount2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*J] - -401 - - Unauthorized -B -200; -9 -OK3 -1 -/#/definitions/io.k8s.api.core.v1.ServiceAccountRhttpsj -x-kubernetes-actionget -jP -x-kubernetes-group-version-kind-+group: "" -kind: ServiceAccount -version: v1 -Æ -core_v1$replace the specified ServiceAccount*%replaceCoreV1NamespacedServiceAccount2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BE -C -Abodybody *1 -/#/definitions/io.k8s.api.core.v1.ServiceAccountBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– -“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string J¦ -B -200; -9 -OK3 -1 -/#/definitions/io.k8s.api.core.v1.ServiceAccount -G -201@ -> -Created3 -1 -/#/definitions/io.k8s.api.core.v1.ServiceAccount - -401 - - UnauthorizedRhttpsjP -x-kubernetes-group-version-kind-+group: "" -kind: ServiceAccount -version: v1 -j -x-kubernetes-actionput -*ø -core_v1delete a ServiceAccount*$deleteCoreV1NamespacedServiceAccount2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BT -R -Pbodybody*B -@#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptionsBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string Bä -áÞÛquery±The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately."gracePeriodSeconds2integer BÑ -ÎËÈquery Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the "orphan" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both."orphanDependents2boolean Bˆ -…‚ÿquery×Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground."propagationPolicy2string J§ -B -200; -9 -OK3 -1 -/#/definitions/io.k8s.api.core.v1.ServiceAccount -H -202A -? -Accepted3 -1 -/#/definitions/io.k8s.api.core.v1.ServiceAccount - -401 - - UnauthorizedRhttpsj -x-kubernetes-action delete -jP -x-kubernetes-group-version-kind-+version: v1 -group: "" -kind: ServiceAccount -Bô -core_v1-partially update the specified ServiceAccount*#patchCoreV1NamespacedServiceAccount2application/json2application/yaml2#application/vnd.kubernetes.protobuf:application/json-patch+json:application/merge-patch+json:&application/strategic-merge-patch+json:application/apply-patch+yamlBN -L -Jbodybody *: -8#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.PatchBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B¯ -¬©¦queryƒfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch)." fieldManager2string BÎ -ËÈÅquery¨Force is going to "force" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests."force2boolean J] -B -200; -9 -OK3 -1 -/#/definitions/io.k8s.api.core.v1.ServiceAccount - -401 - - UnauthorizedRhttpsj -x-kubernetes-actionpatch -jP -x-kubernetes-group-version-kind-+version: v1 -group: "" -kind: ServiceAccount -J; -97"5pathname of the ServiceAccount"name*string˜J` -^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO -MKIquery-If 'true', then the output is pretty printed."pretty2string ¦] -0/apis/apps/v1/namespaces/{namespace}/deploymentsñ\€& -apps_v1(list or watch objects of kind Deployment*listAppsV1NamespacedDeployment2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*B‚ -ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Bï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string B‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string B‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Bú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer Bû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string BÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Bž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer B± -®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean J] -B -200; -9 -OK3 -1 -/#/definitions/io.k8s.api.apps.v1.DeploymentList - -401 - - UnauthorizedRhttpsj -x-kubernetes-actionlist -jN -x-kubernetes-group-version-kind+)group: apps -kind: Deployment -version: v1 -"é -apps_v1create a Deployment* createAppsV1NamespacedDeployment2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BA -? -=bodybody *- -+#/definitions/io.k8s.api.apps.v1.DeploymentBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– -“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string Jä -> -2007 -5 -OK/ -- -+#/definitions/io.k8s.api.apps.v1.Deployment -C -201< -: -Created/ -- -+#/definitions/io.k8s.api.apps.v1.Deployment -D -202= -; -Accepted/ -- -+#/definitions/io.k8s.api.apps.v1.Deployment - -401 - - UnauthorizedRhttpsj -x-kubernetes-actionpost -jN -x-kubernetes-group-version-kind+)group: apps -kind: Deployment -version: v1 -*Ì, -apps_v1delete collection of Deployment**deleteAppsV1CollectionNamespacedDeployment2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BT -R -Pbodybody*B -@#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptionsBï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string Bž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string Bä -áÞÛquery±The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately."gracePeriodSeconds2integer B‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Bú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer BÑ -ÎËÈquery Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the "orphan" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both."orphanDependents2boolean Bˆ -…‚ÿquery×Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground."propagationPolicy2string Bû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string BÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Bž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer Jg -L -200E -C -OK= -; -9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status - -401 - - UnauthorizedRhttpsj* -x-kubernetes-actiondeletecollection -jN -x-kubernetes-group-version-kind+)version: v1 -group: apps -kind: Deployment -J` -^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO -MKIquery-If 'true', then the output is pretty printed."pretty2string õ' -2/apis/autoscaling/v2beta1/horizontalpodautoscalers¾'² -autoscaling_v2beta15list or watch objects of kind HorizontalPodAutoscaler*=listAutoscalingV2beta1HorizontalPodAutoscalerForAllNamespaces2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jv -[ -200T -R -OKL -J -H#/definitions/io.k8s.api.autoscaling.v2beta1.HorizontalPodAutoscalerList - -401 - - UnauthorizedRhttpsj -x-kubernetes-actionlist -jg -x-kubernetes-group-version-kindDBgroup: autoscaling -kind: HorizontalPodAutoscaler -version: v2beta1 -J‚ -ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer JO -MKIquery-If 'true', then the output is pretty printed."pretty2string Jû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string JÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Jž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± -®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean Á) -D/apis/certificates.k8s.io/v1/watch/certificatesigningrequests/{name}ø(¤ -certificates_v1Àwatch changes to an object of kind CertificateSigningRequest. deprecated: use the 'watch' parameter with a list operation instead, filtered to a single item with the 'fieldSelector' parameter.*,watchCertificatesV1CertificateSigningRequest2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jk -P -200I -G -OKA -? -=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent - -401 - - UnauthorizedRhttpsj -x-kubernetes-actionwatch -jl -x-kubernetes-group-version-kindIGgroup: certificates.k8s.io -kind: CertificateSigningRequest -version: v1 -J‚ -ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer JF -DB"@path%name of the CertificateSigningRequest"name*string˜JO -MKIquery-If 'true', then the output is pretty printed."pretty2string Jû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string JÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Jž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± -®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean Ë* -?/apis/apps/v1/namespaces/{namespace}/controllerrevisions/{name}‡*‡ -apps_v1%read the specified ControllerRevision*&readAppsV1NamespacedControllerRevision2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*Ja -F -200? -= -OK7 -5 -3#/definitions/io.k8s.api.apps.v1.ControllerRevision - -401 - - UnauthorizedRhttpsj -x-kubernetes-actionget -jV -x-kubernetes-group-version-kind31version: v1 -group: apps -kind: ControllerRevision -à -apps_v1(replace the specified ControllerRevision*)replaceAppsV1NamespacedControllerRevision2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BI -G -Ebodybody *5 -3#/definitions/io.k8s.api.apps.v1.ControllerRevisionBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– -“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string J® -F -200? -= -OK7 -5 -3#/definitions/io.k8s.api.apps.v1.ControllerRevision -K -201D -B -Created7 -5 -3#/definitions/io.k8s.api.apps.v1.ControllerRevision - -401 - - UnauthorizedRhttpsj -x-kubernetes-actionput -jV -x-kubernetes-group-version-kind31version: v1 -group: apps -kind: ControllerRevision -*š -apps_v1delete a ControllerRevision*(deleteAppsV1NamespacedControllerRevision2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BT -R -Pbodybody*B -@#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptionsBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string Bä -áÞÛquery±The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately."gracePeriodSeconds2integer BÑ -ÎËÈquery Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the "orphan" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both."orphanDependents2boolean Bˆ -…‚ÿquery×Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground."propagationPolicy2string J» - -401 - - Unauthorized -L -200E -C -OK= -; -9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status -R -202K -I -Accepted= -; -9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.StatusRhttpsj -x-kubernetes-action delete -jV -x-kubernetes-group-version-kind31group: apps -kind: ControllerRevision -version: v1 -B† -apps_v11partially update the specified ControllerRevision*'patchAppsV1NamespacedControllerRevision2application/json2application/yaml2#application/vnd.kubernetes.protobuf:application/json-patch+json:application/merge-patch+json:&application/strategic-merge-patch+json:application/apply-patch+yamlBN -L -Jbodybody *: -8#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.PatchBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B¯ -¬©¦queryƒfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch)." fieldManager2string BÎ -ËÈÅquery¨Force is going to "force" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests."force2boolean Ja - -401 - - Unauthorized -F -200? -= -OK7 -5 -3#/definitions/io.k8s.api.apps.v1.ControllerRevisionRhttpsjV -x-kubernetes-group-version-kind31version: v1 -group: apps -kind: ControllerRevision -j -x-kubernetes-actionpatch -J? -=;"9pathname of the ControllerRevision"name*string˜J` -^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO -MKIquery-If 'true', then the output is pretty printed."pretty2string ‰ -C/api/v1/namespaces/{namespace}/persistentvolumeclaims/{name}/statusÁ¡ -core_v12read status of the specified PersistentVolumeClaim*/readCoreV1NamespacedPersistentVolumeClaimStatus2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*Jd -I -200B -@ -OK: -8 -6#/definitions/io.k8s.api.core.v1.PersistentVolumeClaim - -401 - - UnauthorizedRhttpsjW -x-kubernetes-group-version-kind42version: v1 -group: "" -kind: PersistentVolumeClaim -j -x-kubernetes-actionget -€ -core_v15replace status of the specified PersistentVolumeClaim*2replaceCoreV1NamespacedPersistentVolumeClaimStatus2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BL -J -Hbodybody *8 -6#/definitions/io.k8s.api.core.v1.PersistentVolumeClaimBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– -“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string J´ - -401 - - Unauthorized -I -200B -@ -OK: -8 -6#/definitions/io.k8s.api.core.v1.PersistentVolumeClaim -N -201G -E -Created: -8 -6#/definitions/io.k8s.api.core.v1.PersistentVolumeClaimRhttpsj -x-kubernetes-actionput -jW -x-kubernetes-group-version-kind42kind: PersistentVolumeClaim -version: v1 -group: "" -B  -core_v1>partially update status of the specified PersistentVolumeClaim*0patchCoreV1NamespacedPersistentVolumeClaimStatus2application/json2application/yaml2#application/vnd.kubernetes.protobuf:application/json-patch+json:application/merge-patch+json:&application/strategic-merge-patch+json:application/apply-patch+yamlBN -L -Jbodybody *: -8#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.PatchBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B¯ -¬©¦queryƒfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch)." fieldManager2string BÎ -ËÈÅquery¨Force is going to "force" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests."force2boolean Jd -I -200B -@ -OK: -8 -6#/definitions/io.k8s.api.core.v1.PersistentVolumeClaim - -401 - - UnauthorizedRhttpsj -x-kubernetes-actionpatch -jW -x-kubernetes-group-version-kind42group: "" -kind: PersistentVolumeClaim -version: v1 -JB -@>"<path!name of the PersistentVolumeClaim"name*string˜J` -^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO -MKIquery-If 'true', then the output is pretty printed."pretty2string —) -F/apis/policy/v1beta1/watch/namespaces/{namespace}/poddisruptionbudgetsÌ(Þ -policy_v1beta1watch individual changes to a list of PodDisruptionBudget. deprecated: use the 'watch' parameter with a list operation instead.*3watchPolicyV1beta1NamespacedPodDisruptionBudgetList2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jk -P -200I -G -OKA -? -=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent - -401 - - UnauthorizedRhttpsj^ -x-kubernetes-group-version-kind;9group: policy -kind: PodDisruptionBudget -version: v1beta1 -j# -x-kubernetes-action  -watchlist -J‚ -ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer J` -^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO -MKIquery-If 'true', then the output is pretty printed."pretty2string Jû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string JÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Jž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± -®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean ›( -5/apis/scheduling.k8s.io/v1beta1/watch/priorityclassesá'Õ -scheduling_v1beta1ywatch individual changes to a list of PriorityClass. deprecated: use the 'watch' parameter with a list operation instead.*'watchSchedulingV1beta1PriorityClassList2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jk -P -200I -G -OKA -? -=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent - -401 - - UnauthorizedRhttpsj# -x-kubernetes-action  -watchlist -jc -x-kubernetes-group-version-kind@>group: scheduling.k8s.io -kind: PriorityClass -version: v1beta1 -J‚ -ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer JO -MKIquery-If 'true', then the output is pretty printed."pretty2string Jû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string JÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Jž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± -®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean ´/ -/api/v1/namespaces/ð% -core_v1'list or watch objects of kind Namespace*listCoreV1Namespace2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*B‚ -ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Bï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string B‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string B‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Bú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer Bû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string BÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Bž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer B± -®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean J\ -A -200: -8 -OK2 -0 -.#/definitions/io.k8s.api.core.v1.NamespaceList - -401 - - UnauthorizedRhttpsj -x-kubernetes-actionlist -jK -x-kubernetes-group-version-kind(&group: "" -kind: Namespace -version: v1 -"Ö -core_v1create a Namespace*createCoreV1Namespace2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*B@ -> -<bodybody *, -*#/definitions/io.k8s.api.core.v1.NamespaceBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– -“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string Já -= -2006 -4 -OK. -, -*#/definitions/io.k8s.api.core.v1.Namespace -B -201; -9 -Created. -, -*#/definitions/io.k8s.api.core.v1.Namespace -C -202< -: -Accepted. -, -*#/definitions/io.k8s.api.core.v1.Namespace - -401 - - UnauthorizedRhttpsjK -x-kubernetes-group-version-kind(&group: "" -kind: Namespace -version: v1 -j -x-kubernetes-actionpost -JO -MKIquery-If 'true', then the output is pretty printed."pretty2string È -/apis/½º -apisget available API versions*getAPIVersions2application/json2application/yaml2#application/vnd.kubernetes.protobuf:application/json:application/yaml:#application/vnd.kubernetes.protobufJm -R -200K -I -OKC -A -?#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.APIGroupList - -401 - - UnauthorizedRhttps· - -+/apis/authentication.k8s.io/v1/tokenreviews‡ -"ù -authentication_v1create a TokenReview*!createAuthenticationV1TokenReview2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BL -J -Hbodybody *8 -6#/definitions/io.k8s.api.authentication.v1.TokenReviewJ… -I -200B -@ -OK: -8 -6#/definitions/io.k8s.api.authentication.v1.TokenReview -N -201G -E -Created: -8 -6#/definitions/io.k8s.api.authentication.v1.TokenReview -O -202H -F -Accepted: -8 -6#/definitions/io.k8s.api.authentication.v1.TokenReview - -401 - - UnauthorizedRhttpsj -x-kubernetes-actionpost -j` -x-kubernetes-group-version-kind=;group: authentication.k8s.io -kind: TokenReview -version: v1 -Jž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string J– -“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string JO -MKIquery-If 'true', then the output is pretty printed."pretty2string ¬ -I/apis/networking.k8s.io/v1/namespaces/{namespace}/ingresses/{name}/statusÞŠ - networking_v1$read status of the specified Ingress*'readNetworkingV1NamespacedIngressStatus2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*J\ -A -200: -8 -OK2 -0 -.#/definitions/io.k8s.api.networking.v1.Ingress - -401 - - UnauthorizedRhttpsj -x-kubernetes-actionget -jX -x-kubernetes-group-version-kind53group: networking.k8s.io -kind: Ingress -version: v1 -Ù - networking_v1'replace status of the specified Ingress**replaceNetworkingV1NamespacedIngressStatus2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BD -B -@bodybody *0 -.#/definitions/io.k8s.api.networking.v1.IngressBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– -“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string J¤ -A -200: -8 -OK2 -0 -.#/definitions/io.k8s.api.networking.v1.Ingress -F -201? -= -Created2 -0 -.#/definitions/io.k8s.api.networking.v1.Ingress - -401 - - UnauthorizedRhttpsjX -x-kubernetes-group-version-kind53kind: Ingress -version: v1 -group: networking.k8s.io -j -x-kubernetes-actionput -B‰ - networking_v10partially update status of the specified Ingress*(patchNetworkingV1NamespacedIngressStatus2application/json2application/yaml2#application/vnd.kubernetes.protobuf:application/json-patch+json:application/merge-patch+json:&application/strategic-merge-patch+json:application/apply-patch+yamlBN -L -Jbodybody *: -8#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.PatchBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B¯ -¬©¦queryƒfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch)." fieldManager2string BÎ -ËÈÅquery¨Force is going to "force" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests."force2boolean J\ -A -200: -8 -OK2 -0 -.#/definitions/io.k8s.api.networking.v1.Ingress - -401 - - UnauthorizedRhttpsj -x-kubernetes-actionpatch -jX -x-kubernetes-group-version-kind53group: networking.k8s.io -kind: Ingress -version: v1 -J4 -20".pathname of the Ingress"name*string˜J` -^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO -MKIquery-If 'true', then the output is pretty printed."pretty2string à' -/api/v1/watch/podtemplatesÁ'µ -core_v1wwatch individual changes to a list of PodTemplate. deprecated: use the 'watch' parameter with a list operation instead.**watchCoreV1PodTemplateListForAllNamespaces2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jk -P -200I -G -OKA -? -=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent - -401 - - UnauthorizedRhttpsj# -x-kubernetes-action  -watchlist -jM -x-kubernetes-group-version-kind*(group: "" -kind: PodTemplate -version: v1 -J‚ -ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer JO -MKIquery-If 'true', then the output is pretty printed."pretty2string Jû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string JÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Jž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± -®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean Ï_ - -< -Created1 -/ --#/definitions/io.k8s.api.events.v1beta1.Event - -401 - - UnauthorizedRhttpsj -x-kubernetes-actionput -jW -x-kubernetes-group-version-kind42group: events.k8s.io -kind: Event -version: v1beta1 -* -events_v1beta1delete an Event*"deleteEventsV1beta1NamespacedEvent2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BT -R -Pbodybody*B -@#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptionsBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string Bä -áÞÛquery±The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately."gracePeriodSeconds2integer BÑ -ÎËÈquery Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the "orphan" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both."orphanDependents2boolean Bˆ -…‚ÿquery×Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground."propagationPolicy2string J» - -401 - - Unauthorized -L -200E -C -OK= -; -9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status -R -202K -I -Accepted= -; -9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.StatusRhttpsj -x-kubernetes-action delete -jW -x-kubernetes-group-version-kind42group: events.k8s.io -kind: Event -version: v1beta1 -Bõ -events_v1beta1$partially update the specified Event*!patchEventsV1beta1NamespacedEvent2application/json2application/yaml2#application/vnd.kubernetes.protobuf:application/json-patch+json:application/merge-patch+json:&application/strategic-merge-patch+json:application/apply-patch+yamlBN -L -Jbodybody *: -8#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.PatchBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B¯ -¬©¦queryƒfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch)." fieldManager2string BÎ -ËÈÅquery¨Force is going to "force" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests."force2boolean J[ -@ -2009 -7 -OK1 -/ --#/definitions/io.k8s.api.events.v1beta1.Event - -401 - - UnauthorizedRhttpsj -x-kubernetes-actionpatch -jW -x-kubernetes-group-version-kind42group: events.k8s.io -kind: Event -version: v1beta1 -J2 -0.",pathname of the Event"name*string˜J` -^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO -MKIquery-If 'true', then the output is pretty printed."pretty2string è' - /apis/apps/v1/watch/statefulsetsÃ'· -apps_v1wwatch individual changes to a list of StatefulSet. deprecated: use the 'watch' parameter with a list operation instead.**watchAppsV1StatefulSetListForAllNamespaces2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jk -P -200I -G -OKA -? -=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent - -401 - - UnauthorizedRhttpsj# -x-kubernetes-action  -watchlist -jO -x-kubernetes-group-version-kind,*group: apps -kind: StatefulSet -version: v1 -J‚ -ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer JO -MKIquery-If 'true', then the output is pretty printed."pretty2string Jû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string JÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Jž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± -®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean —] -//apis/apps/v1/namespaces/{namespace}/daemonsetsã\ü% -apps_v1'list or watch objects of kind DaemonSet*listAppsV1NamespacedDaemonSet2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*B‚ -ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Bï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string B‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string B‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Bú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer Bû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string BÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Bž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer B± -®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean J\ -A -200: -8 -OK2 -0 -.#/definitions/io.k8s.api.apps.v1.DaemonSetList - -401 - - UnauthorizedRhttpsjM -x-kubernetes-group-version-kind*(group: apps -kind: DaemonSet -version: v1 -j -x-kubernetes-actionlist -"â -apps_v1create a DaemonSet*createAppsV1NamespacedDaemonSet2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*B@ -> -<bodybody *, -*#/definitions/io.k8s.api.apps.v1.DaemonSetBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– -“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string Já -= -2006 -4 -OK. -, -*#/definitions/io.k8s.api.apps.v1.DaemonSet -B -201; -9 -Created. -, -*#/definitions/io.k8s.api.apps.v1.DaemonSet -C -202< -: -Accepted. -, -*#/definitions/io.k8s.api.apps.v1.DaemonSet - -401 - - UnauthorizedRhttpsj -x-kubernetes-actionpost -jM -x-kubernetes-group-version-kind*(group: apps -kind: DaemonSet -version: v1 -*É, -apps_v1delete collection of DaemonSet*)deleteAppsV1CollectionNamespacedDaemonSet2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BT -R -Pbodybody*B -@#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptionsBï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string Bž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string Bä -áÞÛquery±The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately."gracePeriodSeconds2integer B‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Bú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer BÑ -ÎËÈquery Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the "orphan" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both."orphanDependents2boolean Bˆ -…‚ÿquery×Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground."propagationPolicy2string Bû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string BÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Bž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer Jg -L -200E -C -OK= -; -9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status - -401 - - UnauthorizedRhttpsj* -x-kubernetes-actiondeletecollection -jM -x-kubernetes-group-version-kind*(group: apps -kind: DaemonSet -version: v1 -J` -^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO -MKIquery-If 'true', then the output is pretty printed."pretty2string  ) -6/apis/apps/v1/namespaces/{namespace}/daemonsets/{name}å(ã -apps_v1read the specified DaemonSet*readAppsV1NamespacedDaemonSet2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*JX -= -2006 -4 -OK. -, -*#/definitions/io.k8s.api.apps.v1.DaemonSet - -401 - - UnauthorizedRhttpsj -x-kubernetes-actionget -jM -x-kubernetes-group-version-kind*(group: apps -kind: DaemonSet -version: v1 -ª -apps_v1replace the specified DaemonSet* replaceAppsV1NamespacedDaemonSet2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*B@ -> -<bodybody *, -*#/definitions/io.k8s.api.apps.v1.DaemonSetBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– -“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string Jœ -= -2006 -4 -OK. -, -*#/definitions/io.k8s.api.apps.v1.DaemonSet -B -201; -9 -Created. -, -*#/definitions/io.k8s.api.apps.v1.DaemonSet - -401 - - UnauthorizedRhttpsj -x-kubernetes-actionput -jM -x-kubernetes-group-version-kind*(group: apps -kind: DaemonSet -version: v1 -*ÿ -apps_v1delete a DaemonSet*deleteAppsV1NamespacedDaemonSet2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BT -R -Pbodybody*B -@#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptionsBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string Bä -áÞÛquery±The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately."gracePeriodSeconds2integer BÑ -ÎËÈquery Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the "orphan" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both."orphanDependents2boolean Bˆ -…‚ÿquery×Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground."propagationPolicy2string J» -L -200E -C -OK= -; -9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status -R -202K -I -Accepted= -; -9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status - -401 - - UnauthorizedRhttpsj -x-kubernetes-action delete -jM -x-kubernetes-group-version-kind*(group: apps -kind: DaemonSet -version: v1 -Bâ -apps_v1(partially update the specified DaemonSet*patchAppsV1NamespacedDaemonSet2application/json2application/yaml2#application/vnd.kubernetes.protobuf:application/json-patch+json:application/merge-patch+json:&application/strategic-merge-patch+json:application/apply-patch+yamlBN -L -Jbodybody *: -8#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.PatchBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B¯ -¬©¦queryƒfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch)." fieldManager2string BÎ -ËÈÅquery¨Force is going to "force" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests."force2boolean JX -= -2006 -4 -OK. -, -*#/definitions/io.k8s.api.apps.v1.DaemonSet - -401 - - UnauthorizedRhttpsjM -x-kubernetes-group-version-kind*(version: v1 -group: apps -kind: DaemonSet -j -x-kubernetes-actionpatch -J6 -42"0pathname of the DaemonSet"name*string˜J` -^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO -MKIquery-If 'true', then the output is pretty printed."pretty2string ×( -6/apis/apps/v1/watch/namespaces/{namespace}/replicasetsœ(® -apps_v1vwatch individual changes to a list of ReplicaSet. deprecated: use the 'watch' parameter with a list operation instead.*#watchAppsV1NamespacedReplicaSetList2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jk -P -200I -G -OKA -? -=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent - -401 - - UnauthorizedRhttpsj# -x-kubernetes-action  -watchlist -jN -x-kubernetes-group-version-kind+)group: apps -kind: ReplicaSet -version: v1 -J‚ -ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer J` -^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO -MKIquery-If 'true', then the output is pretty printed."pretty2string Jû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string JÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Jž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± -®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean ™) -J/apis/discovery.k8s.io/v1beta1/watch/namespaces/{namespace}/endpointslicesÊ(Ü -discovery_v1beta1ywatch individual changes to a list of EndpointSlice. deprecated: use the 'watch' parameter with a list operation instead.*0watchDiscoveryV1beta1NamespacedEndpointSliceList2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jk -P -200I -G -OKA -? -=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent - -401 - - UnauthorizedRhttpsj# -x-kubernetes-action  -watchlist -jb -x-kubernetes-group-version-kind?=group: discovery.k8s.io -kind: EndpointSlice -version: v1beta1 -J‚ -ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer J` -^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO -MKIquery-If 'true', then the output is pretty printed."pretty2string Jû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string JÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Jž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± -®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean ï( -I/apis/admissionregistration.k8s.io/v1/watch/mutatingwebhookconfigurations¡(• -admissionregistration_v1ˆwatch individual changes to a list of MutatingWebhookConfiguration. deprecated: use the 'watch' parameter with a list operation instead.* -OK8 -6 -4#/definitions/io.k8s.api.storage.v1beta1.CSINodeList - -401 - - UnauthorizedRhttpsjZ -x-kubernetes-group-version-kind75kind: CSINode -version: v1beta1 -group: storage.k8s.io -j -x-kubernetes-actionlist -"‰ -storage_v1beta1create a CSINode*createStorageV1beta1CSINode2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BF -D -Bbodybody *2 -0#/definitions/io.k8s.api.storage.v1beta1.CSINodeBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– -“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string Jó -C -200< -: -OK4 -2 -0#/definitions/io.k8s.api.storage.v1beta1.CSINode -H -201A -? -Created4 -2 -0#/definitions/io.k8s.api.storage.v1beta1.CSINode -I -202B -@ -Accepted4 -2 -0#/definitions/io.k8s.api.storage.v1beta1.CSINode - -401 - - UnauthorizedRhttpsj -x-kubernetes-actionpost -jZ -x-kubernetes-group-version-kind75group: storage.k8s.io -kind: CSINode -version: v1beta1 -*Ø, -storage_v1beta1delete collection of CSINode*%deleteStorageV1beta1CollectionCSINode2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BT -R -Pbodybody*B -@#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptionsBï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string Bž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string Bä -áÞÛquery±The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately."gracePeriodSeconds2integer B‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Bú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer BÑ -ÎËÈquery Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the "orphan" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both."orphanDependents2boolean Bˆ -…‚ÿquery×Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground."propagationPolicy2string Bû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string BÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Bž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer Jg -L -200E -C -OK= -; -9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status - -401 - - UnauthorizedRhttpsjZ -x-kubernetes-group-version-kind75version: v1beta1 -group: storage.k8s.io -kind: CSINode -j* -x-kubernetes-actiondeletecollection -JO -MKIquery-If 'true', then the output is pretty printed."pretty2string ð* -
"<path!name of the PersistentVolumeClaim"name*string˜J` -^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO -MKIquery-If 'true', then the output is pretty printed."pretty2string Ž -$/apis/authentication.k8s.io/v1beta1/åâ -authentication_v1beta1get available resources*$getAuthenticationV1beta1APIResources2application/json2application/yaml2#application/vnd.kubernetes.protobuf:application/json:application/yaml:#application/vnd.kubernetes.protobufJp -U -200N -L -OKF -D -B#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.APIResourceList - -401 - - UnauthorizedRhttps‹ -#/apis/apiextensions.k8s.io/v1beta1/ãà -apiextensions_v1beta1get available resources*#getApiextensionsV1beta1APIResources2application/json2application/yaml2#application/vnd.kubernetes.protobuf:application/json:application/yaml:#application/vnd.kubernetes.protobufJp - -401 - - Unauthorized -U -200N -L -OKF -D -B#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.APIResourceListRhttps× -/apis/events.k8s.io/¾» -eventsget information of a group*getEventsAPIGroup2application/json2application/yaml2#application/vnd.kubernetes.protobuf:application/json:application/yaml:#application/vnd.kubernetes.protobufJi -N -200G -E -OK? -= -;#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.APIGroup - -401 - - UnauthorizedRhttps¸( - -2007 -5 -OK/ -- -+#/definitions/io.k8s.api.core.v1.SecretList - -401 - - UnauthorizedRhttpsjH -x-kubernetes-group-version-kind%#group: "" -kind: Secret -version: v1 -j -x-kubernetes-actionlist -J‚ -ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer JO -MKIquery-If 'true', then the output is pretty printed."pretty2string Jû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string JÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Jž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± -®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean „( -'/apis/apps/v1/watch/controllerrevisionsØ'Ì -apps_v1~watch individual changes to a list of ControllerRevision. deprecated: use the 'watch' parameter with a list operation instead.*1watchAppsV1ControllerRevisionListForAllNamespaces2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jk - -401 - - Unauthorized -P -200I -G -OKA -? -=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEventRhttpsjV -x-kubernetes-group-version-kind31version: v1 -group: apps -kind: ControllerRevision -j# -x-kubernetes-action  -watchlist -J‚ -ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer JO -MKIquery-If 'true', then the output is pretty printed."pretty2string Jû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string JÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Jž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± -®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean  ^ -9/apis/extensions/v1beta1/namespaces/{namespace}/ingressesâ] & -extensions_v1beta1%list or watch objects of kind Ingress*&listExtensionsV1beta1NamespacedIngress2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*B‚ -ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Bï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string B‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string B‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Bú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer Bû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string BÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Bž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer B± -®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean Je - -401 - - Unauthorized -J -200C -A -OK; -9 -7#/definitions/io.k8s.api.extensions.v1beta1.IngressListRhttpsj -x-kubernetes-actionlist -jV -x-kubernetes-group-version-kind31group: extensions -kind: Ingress -version: v1beta1 -"¢ -extensions_v1beta1create an Ingress*(createExtensionsV1beta1NamespacedIngress2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BI -G -Ebodybody *5 -3#/definitions/io.k8s.api.extensions.v1beta1.IngressBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– -“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string Jü -F -200? -= -OK7 -5 -3#/definitions/io.k8s.api.extensions.v1beta1.Ingress -K -201D -B -Created7 -5 -3#/definitions/io.k8s.api.extensions.v1beta1.Ingress -L -202E -C -Accepted7 -5 -3#/definitions/io.k8s.api.extensions.v1beta1.Ingress - -401 - - UnauthorizedRhttpsj -x-kubernetes-actionpost -jV -x-kubernetes-group-version-kind31group: extensions -kind: Ingress -version: v1beta1 -*ä, -extensions_v1beta1delete collection of Ingress*2deleteExtensionsV1beta1CollectionNamespacedIngress2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BT -R -Pbodybody*B -@#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptionsBï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string Bž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string Bä -áÞÛquery±The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately."gracePeriodSeconds2integer B‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Bú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer BÑ -ÎËÈquery Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the "orphan" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both."orphanDependents2boolean Bˆ -…‚ÿquery×Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground."propagationPolicy2string Bû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string BÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Bž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer Jg -L -200E -C -OK= -; -9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status - -401 - - UnauthorizedRhttpsj* -x-kubernetes-actiondeletecollection -jV -x-kubernetes-group-version-kind31group: extensions -kind: Ingress -version: v1beta1 -J` -^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO -MKIquery-If 'true', then the output is pretty printed."pretty2string á( -0/apis/node.k8s.io/v1/watch/runtimeclasses/{name}¬(å -node_v1³watch changes to an object of kind RuntimeClass. deprecated: use the 'watch' parameter with a list operation instead, filtered to a single item with the 'fieldSelector' parameter.*watchNodeV1RuntimeClass2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jk -P -200I -G -OKA -? -=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent - -401 - - UnauthorizedRhttpsj -x-kubernetes-actionwatch -jW -x-kubernetes-group-version-kind42group: node.k8s.io -kind: RuntimeClass -version: v1 -J‚ -ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer J9 -75"3pathname of the RuntimeClass"name*string˜JO -MKIquery-If 'true', then the output is pretty printed."pretty2string Jû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string JÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Jž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± -®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean Ú -/apis/apps/v1/ÇÄ -apps_v1get available resources*getAppsV1APIResources2application/json2application/yaml2#application/vnd.kubernetes.protobuf:application/json:application/yaml:#application/vnd.kubernetes.protobufJp -U -200N -L -OKF -D -B#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.APIResourceList - -401 - - UnauthorizedRhttpsÃ( --/api/v1/watch/namespaces/{namespace}/services‘(£ -core_v1swatch individual changes to a list of Service. deprecated: use the 'watch' parameter with a list operation instead.* watchCoreV1NamespacedServiceList2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jk -P -200I -G -OKA -? -=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent - -401 - - UnauthorizedRhttpsj# -x-kubernetes-action  -watchlist -jI -x-kubernetes-group-version-kind&$kind: Service -version: v1 -group: "" -J‚ -ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer J` -^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO -MKIquery-If 'true', then the output is pretty printed."pretty2string Jû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string JÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Jž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± -®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean Þ( -&/api/v1/watch/persistentvolumes/{name}³(è -core_v1·watch changes to an object of kind PersistentVolume. deprecated: use the 'watch' parameter with a list operation instead, filtered to a single item with the 'fieldSelector' parameter.*watchCoreV1PersistentVolume2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jk -P -200I -G -OKA -? -=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent - -401 - - UnauthorizedRhttpsj -x-kubernetes-actionwatch -jR -x-kubernetes-group-version-kind/-group: "" -kind: PersistentVolume -version: v1 -J‚ -ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer J= -;9"7pathname of the PersistentVolume"name*string˜JO -MKIquery-If 'true', then the output is pretty printed."pretty2string Jû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string JÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Jž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± -®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean ò -/apis/apiregistration.k8s.io/ÐÍ -apiregistrationget information of a group*getApiregistrationAPIGroup2application/json2application/yaml2#application/vnd.kubernetes.protobuf:application/json:application/yaml:#application/vnd.kubernetes.protobufJi -N -200G -E -OK? -= -;#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.APIGroup - -401 - - UnauthorizedRhttpsà) -G/apis/coordination.k8s.io/v1/watch/namespaces/{namespace}/leases/{name}”)ò -coordination_v1¬watch changes to an object of kind Lease. deprecated: use the 'watch' parameter with a list operation instead, filtered to a single item with the 'fieldSelector' parameter.*"watchCoordinationV1NamespacedLease2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jk -P -200I -G -OKA -? -=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent - -401 - - UnauthorizedRhttpsj -x-kubernetes-actionwatch -jX -x-kubernetes-group-version-kind53group: coordination.k8s.io -kind: Lease -version: v1 -J‚ -ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer J2 -0.",pathname of the Lease"name*string˜J` -^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO -MKIquery-If 'true', then the output is pretty printed."pretty2string Jû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string JÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Jž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± -®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean Ë^ -;/apis/policy/v1/namespaces/{namespace}/poddisruptionbudgets‹^¬& - policy_v11list or watch objects of kind PodDisruptionBudget*)listPolicyV1NamespacedPodDisruptionBudget2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*B‚ -ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Bï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string B‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string B‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Bú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer Bû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string BÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Bž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer B± -®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean Jh -M -200F -D -OK> -< -:#/definitions/io.k8s.api.policy.v1.PodDisruptionBudgetList - -401 - - UnauthorizedRhttpsj -x-kubernetes-actionlist -jY -x-kubernetes-group-version-kind64version: v1 -group: policy -kind: PodDisruptionBudget -"¶ - policy_v1create a PodDisruptionBudget*+createPolicyV1NamespacedPodDisruptionBudget2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BL -J -Hbodybody *8 -6#/definitions/io.k8s.api.policy.v1.PodDisruptionBudgetBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– -“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string J… -I -200B -@ -OK: -8 -6#/definitions/io.k8s.api.policy.v1.PodDisruptionBudget -N -201G -E -Created: -8 -6#/definitions/io.k8s.api.policy.v1.PodDisruptionBudget -O -202H -F -Accepted: -8 -6#/definitions/io.k8s.api.policy.v1.PodDisruptionBudget - -401 - - UnauthorizedRhttpsj -x-kubernetes-actionpost -jY -x-kubernetes-group-version-kind64group: policy -kind: PodDisruptionBudget -version: v1 -*í, - policy_v1(delete collection of PodDisruptionBudget*5deletePolicyV1CollectionNamespacedPodDisruptionBudget2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BT -R -Pbodybody*B -@#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptionsBï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string Bž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string Bä -áÞÛquery±The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately."gracePeriodSeconds2integer B‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Bú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer BÑ -ÎËÈquery Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the "orphan" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both."orphanDependents2boolean Bˆ -…‚ÿquery×Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground."propagationPolicy2string Bû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string BÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Bž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer Jg -L -200E -C -OK= -; -9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status - -401 - - UnauthorizedRhttpsjY -x-kubernetes-group-version-kind64group: policy -kind: PodDisruptionBudget -version: v1 -j* -x-kubernetes-actiondeletecollection -J` -^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO -MKIquery-If 'true', then the output is pretty printed."pretty2string ¯) -3/api/v1/watch/namespaces/{namespace}/secrets/{name}÷(Ô -core_v1­watch changes to an object of kind Secret. deprecated: use the 'watch' parameter with a list operation instead, filtered to a single item with the 'fieldSelector' parameter.*watchCoreV1NamespacedSecret2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jk - -401 - - Unauthorized -P -200I -G -OKA -? -=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEventRhttpsjH -x-kubernetes-group-version-kind%#group: "" -kind: Secret -version: v1 -j -x-kubernetes-actionwatch -J‚ -ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer J3 -1/"-pathname of the Secret"name*string˜J` -^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO -MKIquery-If 'true', then the output is pretty printed."pretty2string Jû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string JÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Jž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± -®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean á` - -OK8 -6 -4#/definitions/io.k8s.api.storage.v1.VolumeAttachment -L -201E -C -Created8 -6 -4#/definitions/io.k8s.api.storage.v1.VolumeAttachment -M -202F -D -Accepted8 -6 -4#/definitions/io.k8s.api.storage.v1.VolumeAttachment - -401 - - UnauthorizedRhttpsj -x-kubernetes-actionpost -j^ -x-kubernetes-group-version-kind;9group: storage.k8s.io -kind: VolumeAttachment -version: v1 -*ä, - -storage_v1%delete collection of VolumeAttachment*)deleteStorageV1CollectionVolumeAttachment2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BT -R -Pbodybody*B -@#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptionsBï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string Bž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string Bä -áÞÛquery±The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately."gracePeriodSeconds2integer B‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Bú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer BÑ -ÎËÈquery Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the "orphan" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both."orphanDependents2boolean Bˆ -…‚ÿquery×Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground."propagationPolicy2string Bû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string BÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Bž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer Jg -L -200E -C -OK= -; -9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status - -401 - - UnauthorizedRhttpsj^ -x-kubernetes-group-version-kind;9version: v1 -group: storage.k8s.io -kind: VolumeAttachment -j* -x-kubernetes-actiondeletecollection -JO -MKIquery-If 'true', then the output is pretty printed."pretty2string ¹` -C/apis/admissionregistration.k8s.io/v1/mutatingwebhookconfigurationsñ_Š' -admissionregistration_v1:list or watch objects of kind MutatingWebhookConfiguration*7listAdmissionregistrationV1MutatingWebhookConfiguration2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*B‚ -ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Bï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string B‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string B‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Bú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer Bû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string BÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Bž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer B± -®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean J€ -e -200^ -\ -OKV -T -R#/definitions/io.k8s.api.admissionregistration.v1.MutatingWebhookConfigurationList - -401 - - UnauthorizedRhttpsj -x-kubernetes-actionlist -jx -x-kubernetes-group-version-kindUSgroup: admissionregistration.k8s.io -kind: MutatingWebhookConfiguration -version: v1 -"Û - -admissionregistration_v1%create a MutatingWebhookConfiguration*9createAdmissionregistrationV1MutatingWebhookConfiguration2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*Bd -b -`bodybody *P -N#/definitions/io.k8s.api.admissionregistration.v1.MutatingWebhookConfigurationBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– -“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string JÍ -a -200Z -X -OKR -P -N#/definitions/io.k8s.api.admissionregistration.v1.MutatingWebhookConfiguration -f -201_ -] -CreatedR -P -N#/definitions/io.k8s.api.admissionregistration.v1.MutatingWebhookConfiguration -g -202` -^ -AcceptedR -P -N#/definitions/io.k8s.api.admissionregistration.v1.MutatingWebhookConfiguration - -401 - - UnauthorizedRhttpsj -x-kubernetes-actionpost -jx -x-kubernetes-group-version-kindUSgroup: admissionregistration.k8s.io -kind: MutatingWebhookConfiguration -version: v1 -*²- -admissionregistration_v11delete collection of MutatingWebhookConfiguration*CdeleteAdmissionregistrationV1CollectionMutatingWebhookConfiguration2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BT -R -Pbodybody*B -@#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptionsBï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string Bž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string Bä -áÞÛquery±The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately."gracePeriodSeconds2integer B‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Bú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer BÑ -ÎËÈquery Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the "orphan" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both."orphanDependents2boolean Bˆ -…‚ÿquery×Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground."propagationPolicy2string Bû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string BÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Bž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer Jg -L -200E -C -OK= -; -9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status - -401 - - UnauthorizedRhttpsjx -x-kubernetes-group-version-kindUSgroup: admissionregistration.k8s.io -kind: MutatingWebhookConfiguration -version: v1 -j* -x-kubernetes-actiondeletecollection -JO -MKIquery-If 'true', then the output is pretty printed."pretty2string ®' -(/apis/coordination.k8s.io/v1beta1/leases'õ -coordination_v1beta1#list or watch objects of kind Lease*,listCoordinationV1beta1LeaseForAllNamespaces2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Je -J -200C -A -OK; -9 -7#/definitions/io.k8s.api.coordination.v1beta1.LeaseList - -401 - - UnauthorizedRhttpsj -x-kubernetes-actionlist -j] -x-kubernetes-group-version-kind:8group: coordination.k8s.io -kind: Lease -version: v1beta1 -J‚ -ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer JO -MKIquery-If 'true', then the output is pretty printed."pretty2string Jû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string JÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Jž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± -®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean þ( -7/apis/scheduling.k8s.io/v1/watch/priorityclasses/{name}Â(ú - scheduling_v1´watch changes to an object of kind PriorityClass. deprecated: use the 'watch' parameter with a list operation instead, filtered to a single item with the 'fieldSelector' parameter.*watchSchedulingV1PriorityClass2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jk -P -200I -G -OKA -? -=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent - -401 - - UnauthorizedRhttpsj -x-kubernetes-actionwatch -j^ -x-kubernetes-group-version-kind;9group: scheduling.k8s.io -kind: PriorityClass -version: v1 -J‚ -ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer J: -86"4pathname of the PriorityClass"name*string˜JO -MKIquery-If 'true', then the output is pretty printed."pretty2string Jû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string JÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Jž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± -®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean Ó) -0/apis/storage.k8s.io/v1/volumeattachments/{name}ž)ˆ - -storage_v1#read the specified VolumeAttachment*readStorageV1VolumeAttachment2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*Jb -G -200@ -> -OK8 -6 -4#/definitions/io.k8s.api.storage.v1.VolumeAttachment - -401 - - UnauthorizedRhttpsj^ -x-kubernetes-group-version-kind;9group: storage.k8s.io -kind: VolumeAttachment -version: v1 -j -x-kubernetes-actionget -ã - -storage_v1&replace the specified VolumeAttachment* replaceStorageV1VolumeAttachment2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BJ -H -Fbodybody *6 -4#/definitions/io.k8s.api.storage.v1.VolumeAttachmentBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– -“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string J° -G -200@ -> -OK8 -6 -4#/definitions/io.k8s.api.storage.v1.VolumeAttachment -L -201E -C -Created8 -6 -4#/definitions/io.k8s.api.storage.v1.VolumeAttachment - -401 - - UnauthorizedRhttpsj^ -x-kubernetes-group-version-kind;9group: storage.k8s.io -kind: VolumeAttachment -version: v1 -j -x-kubernetes-actionput -* - -storage_v1delete a VolumeAttachment*deleteStorageV1VolumeAttachment2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BT -R -Pbodybody*B -@#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptionsBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string Bä -áÞÛquery±The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately."gracePeriodSeconds2integer BÑ -ÎËÈquery Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the "orphan" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both."orphanDependents2boolean Bˆ -…‚ÿquery×Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground."propagationPolicy2string J± -G -200@ -> -OK8 -6 -4#/definitions/io.k8s.api.storage.v1.VolumeAttachment -M -202F -D -Accepted8 -6 -4#/definitions/io.k8s.api.storage.v1.VolumeAttachment - -401 - - UnauthorizedRhttpsj -x-kubernetes-action delete -j^ -x-kubernetes-group-version-kind;9group: storage.k8s.io -kind: VolumeAttachment -version: v1 -B‡ - -storage_v1/partially update the specified VolumeAttachment*patchStorageV1VolumeAttachment2application/json2application/yaml2#application/vnd.kubernetes.protobuf:application/json-patch+json:application/merge-patch+json:&application/strategic-merge-patch+json:application/apply-patch+yamlBN -L -Jbodybody *: -8#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.PatchBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B¯ -¬©¦queryƒfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch)." fieldManager2string BÎ -ËÈÅquery¨Force is going to "force" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests."force2boolean Jb -G -200@ -> -OK8 -6 -4#/definitions/io.k8s.api.storage.v1.VolumeAttachment - -401 - - UnauthorizedRhttpsj -x-kubernetes-actionpatch -j^ -x-kubernetes-group-version-kind;9group: storage.k8s.io -kind: VolumeAttachment -version: v1 -J= -;9"7pathname of the VolumeAttachment"name*string˜JO -MKIquery-If 'true', then the output is pretty printed."pretty2string þ' --/apis/storage.k8s.io/v1beta1/watch/csidriversÌ'À -storage_v1beta1uwatch individual changes to a list of CSIDriver. deprecated: use the 'watch' parameter with a list operation instead.* watchStorageV1beta1CSIDriverList2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jk -P -200I -G -OKA -? -=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent - -401 - - UnauthorizedRhttpsj# -x-kubernetes-action  -watchlist -j\ -x-kubernetes-group-version-kind97version: v1beta1 -group: storage.k8s.io -kind: CSIDriver -J‚ -ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer JO -MKIquery-If 'true', then the output is pretty printed."pretty2string Jû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string JÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Jž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± -®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean ÷( ->/apis/apps/v1/watch/namespaces/{namespace}/controllerrevisions´(Æ -apps_v1~watch individual changes to a list of ControllerRevision. deprecated: use the 'watch' parameter with a list operation instead.*+watchAppsV1NamespacedControllerRevisionList2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jk -P -200I -G -OKA -? -=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent - -401 - - UnauthorizedRhttpsj# -x-kubernetes-action  -watchlist -jV -x-kubernetes-group-version-kind31group: apps -kind: ControllerRevision -version: v1 -J‚ -ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer J` -^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO -MKIquery-If 'true', then the output is pretty printed."pretty2string Jû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string JÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Jž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± -®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean ' -"/apis/events.k8s.io/v1beta1/eventsé&Ý -events_v1beta1#list or watch objects of kind Event*&listEventsV1beta1EventForAllNamespaces2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*J_ -D -200= -; -OK5 -3 -1#/definitions/io.k8s.api.events.v1beta1.EventList - -401 - - UnauthorizedRhttpsj -x-kubernetes-actionlist -jW -x-kubernetes-group-version-kind42group: events.k8s.io -kind: Event -version: v1beta1 -J‚ -ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer JO -MKIquery-If 'true', then the output is pretty printed."pretty2string Jû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string JÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Jž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± -®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean Ù( --/api/v1/namespaces/{namespace}/secrets/{name}§(Õ -core_v1read the specified Secret*readCoreV1NamespacedSecret2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*JU - -401 - - Unauthorized -: -2003 -1 -OK+ -) -'#/definitions/io.k8s.api.core.v1.SecretRhttpsjH -x-kubernetes-group-version-kind%#group: "" -kind: Secret -version: v1 -j -x-kubernetes-actionget -– -core_v1replace the specified Secret*replaceCoreV1NamespacedSecret2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*B= -; -9bodybody *) -'#/definitions/io.k8s.api.core.v1.SecretBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– -“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string J– - -401 - - Unauthorized -: -2003 -1 -OK+ -) -'#/definitions/io.k8s.api.core.v1.Secret -? -2018 -6 -Created+ -) -'#/definitions/io.k8s.api.core.v1.SecretRhttpsj -x-kubernetes-actionput -jH -x-kubernetes-group-version-kind%#group: "" -kind: Secret -version: v1 -*ô -core_v1delete a Secret*deleteCoreV1NamespacedSecret2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BT -R -Pbodybody*B -@#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptionsBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string Bä -áÞÛquery±The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately."gracePeriodSeconds2integer BÑ -ÎËÈquery Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the "orphan" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both."orphanDependents2boolean Bˆ -…‚ÿquery×Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground."propagationPolicy2string J» -L -200E -C -OK= -; -9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status -R -202K -I -Accepted= -; -9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status - -401 - - UnauthorizedRhttpsj -x-kubernetes-action delete -jH -x-kubernetes-group-version-kind%#group: "" -kind: Secret -version: v1 -BÔ -core_v1%partially update the specified Secret*patchCoreV1NamespacedSecret2application/json2application/yaml2#application/vnd.kubernetes.protobuf:application/json-patch+json:application/merge-patch+json:&application/strategic-merge-patch+json:application/apply-patch+yamlBN -L -Jbodybody *: -8#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.PatchBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B¯ -¬©¦queryƒfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch)." fieldManager2string BÎ -ËÈÅquery¨Force is going to "force" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests."force2boolean JU -: -2003 -1 -OK+ -) -'#/definitions/io.k8s.api.core.v1.Secret - -401 - - UnauthorizedRhttpsjH -x-kubernetes-group-version-kind%#group: "" -kind: Secret -version: v1 -j -x-kubernetes-actionpatch -J3 -1/"-pathname of the Secret"name*string˜J` -^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO -MKIquery-If 'true', then the output is pretty printed."pretty2string ú) -B/api/v1/watch/namespaces/{namespace}/replicationcontrollers/{name}³) -core_v1¼watch changes to an object of kind ReplicationController. deprecated: use the 'watch' parameter with a list operation instead, filtered to a single item with the 'fieldSelector' parameter.**watchCoreV1NamespacedReplicationController2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jk -P -200I -G -OKA -? -=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent - -401 - - UnauthorizedRhttpsj -x-kubernetes-actionwatch -jW -x-kubernetes-group-version-kind42group: "" -kind: ReplicationController -version: v1 -J‚ -ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer JB -@>"<path!name of the ReplicationController"name*string˜J` -^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO -MKIquery-If 'true', then the output is pretty printed."pretty2string Jû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string JÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Jž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± -®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean ì' -/api/v1/watch/serviceaccountsÊ'¾ -core_v1zwatch individual changes to a list of ServiceAccount. deprecated: use the 'watch' parameter with a list operation instead.*-watchCoreV1ServiceAccountListForAllNamespaces2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jk -P -200I -G -OKA -? -=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent - -401 - - UnauthorizedRhttpsj# -x-kubernetes-action  -watchlist -jP -x-kubernetes-group-version-kind-+group: "" -kind: ServiceAccount -version: v1 -J‚ -ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer JO -MKIquery-If 'true', then the output is pretty printed."pretty2string Jû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string JÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Jž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± -®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean Œ) -5/apis/batch/v1/namespaces/{namespace}/cronjobs/{name}Ò(ß -batch_v1read the specified CronJob*readBatchV1NamespacedCronJob2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*JW - -401 - - Unauthorized -< -2005 -3 -OK- -+ -)#/definitions/io.k8s.api.batch.v1.CronJobRhttpsj -x-kubernetes-actionget -jL -x-kubernetes-group-version-kind)'kind: CronJob -version: v1 -group: batch -¤ -batch_v1replace the specified CronJob*replaceBatchV1NamespacedCronJob2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*B? -= -;bodybody *+ -)#/definitions/io.k8s.api.batch.v1.CronJobBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– -“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string Jš -< -2005 -3 -OK- -+ -)#/definitions/io.k8s.api.batch.v1.CronJob -A -201: -8 -Created- -+ -)#/definitions/io.k8s.api.batch.v1.CronJob - -401 - - UnauthorizedRhttpsj -x-kubernetes-actionput -jL -x-kubernetes-group-version-kind)'group: batch -kind: CronJob -version: v1 -*ü -batch_v1delete a CronJob*deleteBatchV1NamespacedCronJob2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BT -R -Pbodybody*B -@#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptionsBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string Bä -áÞÛquery±The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately."gracePeriodSeconds2integer BÑ -ÎËÈquery Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the "orphan" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both."orphanDependents2boolean Bˆ -…‚ÿquery×Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground."propagationPolicy2string J» -L -200E -C -OK= -; -9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status -R -202K -I -Accepted= -; -9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status - -401 - - UnauthorizedRhttpsj -x-kubernetes-action delete -jL -x-kubernetes-group-version-kind)'group: batch -kind: CronJob -version: v1 -BÞ -batch_v1&partially update the specified CronJob*patchBatchV1NamespacedCronJob2application/json2application/yaml2#application/vnd.kubernetes.protobuf:application/json-patch+json:application/merge-patch+json:&application/strategic-merge-patch+json:application/apply-patch+yamlBN -L -Jbodybody *: -8#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.PatchBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B¯ -¬©¦queryƒfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch)." fieldManager2string BÎ -ËÈÅquery¨Force is going to "force" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests."force2boolean JW -< -2005 -3 -OK- -+ -)#/definitions/io.k8s.api.batch.v1.CronJob - -401 - - UnauthorizedRhttpsjL -x-kubernetes-group-version-kind)'group: batch -kind: CronJob -version: v1 -j -x-kubernetes-actionpatch -J4 -20".pathname of the CronJob"name*string˜J` -^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO -MKIquery-If 'true', then the output is pretty printed."pretty2string ù( -6/apis/networking.k8s.io/v1/watch/ingressclasses/{name}¾(÷ - networking_v1³watch changes to an object of kind IngressClass. deprecated: use the 'watch' parameter with a list operation instead, filtered to a single item with the 'fieldSelector' parameter.*watchNetworkingV1IngressClass2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jk -P -200I -G -OKA -? -=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent - -401 - - UnauthorizedRhttpsj -x-kubernetes-actionwatch -j] -x-kubernetes-group-version-kind:8group: networking.k8s.io -kind: IngressClass -version: v1 -J‚ -ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer J9 -75"3pathname of the IngressClass"name*string˜JO -MKIquery-If 'true', then the output is pretty printed."pretty2string Jû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string JÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Jž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± -®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean ) -;/apis/networking.k8s.io/v1beta1/watch/ingressclasses/{name}Í(† -networking_v1beta1³watch changes to an object of kind IngressClass. deprecated: use the 'watch' parameter with a list operation instead, filtered to a single item with the 'fieldSelector' parameter.*"watchNetworkingV1beta1IngressClass2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jk - -401 - - Unauthorized -P -200I -G -OKA -? -=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEventRhttpsjb -x-kubernetes-group-version-kind?=group: networking.k8s.io -kind: IngressClass -version: v1beta1 -j -x-kubernetes-actionwatch -J‚ -ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer J9 -75"3pathname of the IngressClass"name*string˜JO -MKIquery-If 'true', then the output is pretty printed."pretty2string Jû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string JÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Jž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± -®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean ÷ -7/apis/storage.k8s.io/v1/volumeattachments/{name}/status»˜ - -storage_v1-read status of the specified VolumeAttachment*#readStorageV1VolumeAttachmentStatus2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*Jb -G -200@ -> -OK8 -6 -4#/definitions/io.k8s.api.storage.v1.VolumeAttachment - -401 - - UnauthorizedRhttpsj -x-kubernetes-actionget -j^ -x-kubernetes-group-version-kind;9kind: VolumeAttachment -version: v1 -group: storage.k8s.io -ó - -storage_v10replace status of the specified VolumeAttachment*&replaceStorageV1VolumeAttachmentStatus2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BJ -H -Fbodybody *6 -4#/definitions/io.k8s.api.storage.v1.VolumeAttachmentBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– -“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string J° -G -200@ -> -OK8 -6 -4#/definitions/io.k8s.api.storage.v1.VolumeAttachment -L -201E -C -Created8 -6 -4#/definitions/io.k8s.api.storage.v1.VolumeAttachment - -401 - - UnauthorizedRhttpsj^ -x-kubernetes-group-version-kind;9group: storage.k8s.io -kind: VolumeAttachment -version: v1 -j -x-kubernetes-actionput -B— - -storage_v19partially update status of the specified VolumeAttachment*$patchStorageV1VolumeAttachmentStatus2application/json2application/yaml2#application/vnd.kubernetes.protobuf:application/json-patch+json:application/merge-patch+json:&application/strategic-merge-patch+json:application/apply-patch+yamlBN -L -Jbodybody *: -8#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.PatchBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B¯ -¬©¦queryƒfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch)." fieldManager2string BÎ -ËÈÅquery¨Force is going to "force" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests."force2boolean Jb -G -200@ -> -OK8 -6 -4#/definitions/io.k8s.api.storage.v1.VolumeAttachment - -401 - - UnauthorizedRhttpsj -x-kubernetes-actionpatch -j^ -x-kubernetes-group-version-kind;9group: storage.k8s.io -kind: VolumeAttachment -version: v1 -J= -;9"7pathname of the VolumeAttachment"name*string˜JO -MKIquery-If 'true', then the output is pretty printed."pretty2string ¢( -/api/v1/watch/nodes/{name}ƒ(Ä -core_v1«watch changes to an object of kind Node. deprecated: use the 'watch' parameter with a list operation instead, filtered to a single item with the 'fieldSelector' parameter.*watchCoreV1Node2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jk -P -200I -G -OKA -? -=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent - -401 - - UnauthorizedRhttpsj -x-kubernetes-actionwatch -jF -x-kubernetes-group-version-kind#!version: v1 -group: "" -kind: Node -J‚ -ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer J1 -/-"+pathname of the Node"name*string˜JO -MKIquery-If 'true', then the output is pretty printed."pretty2string Jû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string JÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Jž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± -®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean €* -H/apis/policy/v1/watch/namespaces/{namespace}/poddisruptionbudgets/{name}³)ƒ - policy_v1ºwatch changes to an object of kind PodDisruptionBudget. deprecated: use the 'watch' parameter with a list operation instead, filtered to a single item with the 'fieldSelector' parameter.**watchPolicyV1NamespacedPodDisruptionBudget2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jk -P -200I -G -OKA -? -=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent - -401 - - UnauthorizedRhttpsj -x-kubernetes-actionwatch -jY -x-kubernetes-group-version-kind64group: policy -kind: PodDisruptionBudget -version: v1 -J‚ -ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer J@ -><":pathname of the PodDisruptionBudget"name*string˜J` -^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO -MKIquery-If 'true', then the output is pretty printed."pretty2string Jû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string JÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Jž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± -®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean â' -&/apis/storage.k8s.io/v1/watch/csinodes·'« - -storage_v1swatch individual changes to a list of CSINode. deprecated: use the 'watch' parameter with a list operation instead.*watchStorageV1CSINodeList2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jk - -401 - - Unauthorized -P -200I -G -OKA -? -=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEventRhttpsj# -x-kubernetes-action  -watchlist -jU -x-kubernetes-group-version-kind20group: storage.k8s.io -kind: CSINode -version: v1 -J‚ -ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer JO -MKIquery-If 'true', then the output is pretty printed."pretty2string Jû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string JÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Jž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± -®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean ‚ - /apis/networking.k8s.io/v1beta1/ÝÚ -networking_v1beta1get available resources* getNetworkingV1beta1APIResources2application/json2application/yaml2#application/vnd.kubernetes.protobuf:application/json:application/yaml:#application/vnd.kubernetes.protobufJp -U -200N -L -OKF -D -B#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.APIResourceList - -401 - - UnauthorizedRhttpsÌ\ -*/apis/batch/v1/namespaces/{namespace}/jobs\è% -batch_v1!list or watch objects of kind Job*listBatchV1NamespacedJob2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*B‚ -ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Bï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string B‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string B‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Bú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer Bû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string BÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Bž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer B± -®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean JW -< -2005 -3 -OK- -+ -)#/definitions/io.k8s.api.batch.v1.JobList - -401 - - UnauthorizedRhttpsj -x-kubernetes-actionlist -jH -x-kubernetes-group-version-kind%#group: batch -kind: Job -version: v1 -"¿ -batch_v1 create a Job*createBatchV1NamespacedJob2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*B; -9 -7bodybody *' -%#/definitions/io.k8s.api.batch.v1.JobBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– -“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string JÒ -= -2016 -4 -Created) -' -%#/definitions/io.k8s.api.batch.v1.Job -> -2027 -5 -Accepted) -' -%#/definitions/io.k8s.api.batch.v1.Job - -401 - - Unauthorized -8 -2001 -/ -OK) -' -%#/definitions/io.k8s.api.batch.v1.JobRhttpsj -x-kubernetes-actionpost -jH -x-kubernetes-group-version-kind%#group: batch -kind: Job -version: v1 -*º, -batch_v1delete collection of Job*$deleteBatchV1CollectionNamespacedJob2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BT -R -Pbodybody*B -@#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptionsBï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string Bž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string Bä -áÞÛquery±The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately."gracePeriodSeconds2integer B‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Bú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer BÑ -ÎËÈquery Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the "orphan" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both."orphanDependents2boolean Bˆ -…‚ÿquery×Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground."propagationPolicy2string Bû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string BÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Bž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer Jg -L -200E -C -OK= -; -9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status - -401 - - UnauthorizedRhttpsj* -x-kubernetes-actiondeletecollection -jH -x-kubernetes-group-version-kind%#group: batch -kind: Job -version: v1 -J` -^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO -MKIquery-If 'true', then the output is pretty printed."pretty2string ´' -/api/v1/watch/nodesœ' -core_v1pwatch individual changes to a list of Node. deprecated: use the 'watch' parameter with a list operation instead.*watchCoreV1NodeList2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jk -P -200I -G -OKA -? -=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent - -401 - - UnauthorizedRhttpsj# -x-kubernetes-action  -watchlist -jF -x-kubernetes-group-version-kind#!group: "" -kind: Node -version: v1 -J‚ -ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer JO -MKIquery-If 'true', then the output is pretty printed."pretty2string Jû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string JÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Jž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± -®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean ( -*/apis/policy/v1/watch/poddisruptionbudgetsá'Õ - policy_v1watch individual changes to a list of PodDisruptionBudget. deprecated: use the 'watch' parameter with a list operation instead.*4watchPolicyV1PodDisruptionBudgetListForAllNamespaces2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jk -P -200I -G -OKA -? -=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent - -401 - - UnauthorizedRhttpsjY -x-kubernetes-group-version-kind64version: v1 -group: policy -kind: PodDisruptionBudget -j# -x-kubernetes-action  -watchlist -J‚ -ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer JO -MKIquery-If 'true', then the output is pretty printed."pretty2string Jû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string JÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Jž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± -®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean ”* -M/apis/policy/v1beta1/watch/namespaces/{namespace}/poddisruptionbudgets/{name}Â)’ -policy_v1beta1ºwatch changes to an object of kind PodDisruptionBudget. deprecated: use the 'watch' parameter with a list operation instead, filtered to a single item with the 'fieldSelector' parameter.*/watchPolicyV1beta1NamespacedPodDisruptionBudget2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jk -P -200I -G -OKA -? -=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent - -401 - - UnauthorizedRhttpsj^ -x-kubernetes-group-version-kind;9group: policy -kind: PodDisruptionBudget -version: v1beta1 -j -x-kubernetes-actionwatch -J‚ -ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer J@ -><":pathname of the PodDisruptionBudget"name*string˜J` -^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO -MKIquery-If 'true', then the output is pretty printed."pretty2string Jû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string JÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Jž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± -®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean Ü) -F/apis/events.k8s.io/v1beta1/watch/namespaces/{namespace}/events/{name}‘)ï -events_v1beta1¬watch changes to an object of kind Event. deprecated: use the 'watch' parameter with a list operation instead, filtered to a single item with the 'fieldSelector' parameter.*!watchEventsV1beta1NamespacedEvent2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jk -P -200I -G -OKA -? -=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent - -401 - - UnauthorizedRhttpsj -x-kubernetes-actionwatch -jW -x-kubernetes-group-version-kind42group: events.k8s.io -kind: Event -version: v1beta1 -J‚ -ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer J2 -0.",pathname of the Event"name*string˜J` -^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO -MKIquery-If 'true', then the output is pretty printed."pretty2string Jû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string JÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Jž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± -®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean ä' -/apis/apps/v1/watch/deploymentsÀ'´ -apps_v1vwatch individual changes to a list of Deployment. deprecated: use the 'watch' parameter with a list operation instead.*)watchAppsV1DeploymentListForAllNamespaces2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jk -P -200I -G -OKA -? -=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent - -401 - - UnauthorizedRhttpsjN -x-kubernetes-group-version-kind+)kind: Deployment -version: v1 -group: apps -j# -x-kubernetes-action  -watchlist -J‚ -ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer JO -MKIquery-If 'true', then the output is pretty printed."pretty2string Jû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string JÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Jž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± -®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean ù -/apis/coordination.k8s.io/v1/×Ô -coordination_v1get available resources*getCoordinationV1APIResources2application/json2application/yaml2#application/vnd.kubernetes.protobuf:application/json:application/yaml:#application/vnd.kubernetes.protobufJp -U -200N -L -OKF -D -B#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.APIResourceList - -401 - - UnauthorizedRhttps†_ -D/apis/discovery.k8s.io/v1beta1/namespaces/{namespace}/endpointslices½^»& -discovery_v1beta1+list or watch objects of kind EndpointSlice*+listDiscoveryV1beta1NamespacedEndpointSlice2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*B‚ -ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Bï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string B‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string B‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Bú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer Bû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string BÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Bž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer B± -®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean Jj - -401 - - Unauthorized -O -200H -F -OK@ -> -<#/definitions/io.k8s.api.discovery.v1beta1.EndpointSliceListRhttpsj -x-kubernetes-actionlist -jb -x-kubernetes-group-version-kind?=version: v1beta1 -group: discovery.k8s.io -kind: EndpointSlice -"Ì -discovery_v1beta1create an EndpointSlice*-createDiscoveryV1beta1NamespacedEndpointSlice2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BN -L -Jbodybody *: -8#/definitions/io.k8s.api.discovery.v1beta1.EndpointSliceBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– -“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string J‹ -K -200D -B -OK< -: -8#/definitions/io.k8s.api.discovery.v1beta1.EndpointSlice -P -201I -G -Created< -: -8#/definitions/io.k8s.api.discovery.v1beta1.EndpointSlice -Q -202J -H -Accepted< -: -8#/definitions/io.k8s.api.discovery.v1beta1.EndpointSlice - -401 - - UnauthorizedRhttpsj -x-kubernetes-actionpost -jb -x-kubernetes-group-version-kind?=group: discovery.k8s.io -kind: EndpointSlice -version: v1beta1 -*ú, -discovery_v1beta1"delete collection of EndpointSlice*7deleteDiscoveryV1beta1CollectionNamespacedEndpointSlice2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BT -R -Pbodybody*B -@#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptionsBï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string Bž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string Bä -áÞÛquery±The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately."gracePeriodSeconds2integer B‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Bú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer BÑ -ÎËÈquery Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the "orphan" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both."orphanDependents2boolean Bˆ -…‚ÿquery×Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground."propagationPolicy2string Bû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string BÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Bž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer Jg -L -200E -C -OK= -; -9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status - -401 - - UnauthorizedRhttpsj* -x-kubernetes-actiondeletecollection -jb -x-kubernetes-group-version-kind?=group: discovery.k8s.io -kind: EndpointSlice -version: v1beta1 -J` -^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO -MKIquery-If 'true', then the output is pretty printed."pretty2string ã -/apis/scheduling.k8s.io/Æà - -schedulingget information of a group*getSchedulingAPIGroup2application/json2application/yaml2#application/vnd.kubernetes.protobuf:application/json:application/yaml:#application/vnd.kubernetes.protobufJi -N -200G -E -OK? -= -;#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.APIGroup - -401 - - UnauthorizedRhttps‰ -C/api/v1/namespaces/{namespace}/replicationcontrollers/{name}/statusÁ¡ -core_v12read status of the specified ReplicationController*/readCoreV1NamespacedReplicationControllerStatus2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*Jd - -401 - - Unauthorized -I -200B -@ -OK: -8 -6#/definitions/io.k8s.api.core.v1.ReplicationControllerRhttpsj -x-kubernetes-actionget -jW -x-kubernetes-group-version-kind42group: "" -kind: ReplicationController -version: v1 -€ -core_v15replace status of the specified ReplicationController*2replaceCoreV1NamespacedReplicationControllerStatus2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BL -J -Hbodybody *8 -6#/definitions/io.k8s.api.core.v1.ReplicationControllerBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– -“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string J´ -I -200B -@ -OK: -8 -6#/definitions/io.k8s.api.core.v1.ReplicationController -N -201G -E -Created: -8 -6#/definitions/io.k8s.api.core.v1.ReplicationController - -401 - - UnauthorizedRhttpsj -x-kubernetes-actionput -jW -x-kubernetes-group-version-kind42group: "" -kind: ReplicationController -version: v1 -B  -core_v1>partially update status of the specified ReplicationController*0patchCoreV1NamespacedReplicationControllerStatus2application/json2application/yaml2#application/vnd.kubernetes.protobuf:application/json-patch+json:application/merge-patch+json:&application/strategic-merge-patch+json:application/apply-patch+yamlBN -L -Jbodybody *: -8#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.PatchBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B¯ -¬©¦queryƒfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch)." fieldManager2string BÎ -ËÈÅquery¨Force is going to "force" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests."force2boolean Jd -I -200B -@ -OK: -8 -6#/definitions/io.k8s.api.core.v1.ReplicationController - -401 - - UnauthorizedRhttpsj -x-kubernetes-actionpatch -jW -x-kubernetes-group-version-kind42group: "" -kind: ReplicationController -version: v1 -JB -@>"<path!name of the ReplicationController"name*string˜J` -^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO -MKIquery-If 'true', then the output is pretty printed."pretty2string ú) -B/api/v1/watch/namespaces/{namespace}/persistentvolumeclaims/{name}³) -core_v1¼watch changes to an object of kind PersistentVolumeClaim. deprecated: use the 'watch' parameter with a list operation instead, filtered to a single item with the 'fieldSelector' parameter.**watchCoreV1NamespacedPersistentVolumeClaim2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jk -P -200I -G -OKA -? -=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent - -401 - - UnauthorizedRhttpsj -x-kubernetes-actionwatch -jW -x-kubernetes-group-version-kind42group: "" -kind: PersistentVolumeClaim -version: v1 -J‚ -ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer JB -@>"<path!name of the PersistentVolumeClaim"name*string˜J` -^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO -MKIquery-If 'true', then the output is pretty printed."pretty2string Jû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string JÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Jž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± -®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean ´' -*/apis/networking.k8s.io/v1/networkpolicies…'ù - networking_v1+list or watch objects of kind NetworkPolicy*-listNetworkingV1NetworkPolicyForAllNamespaces2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jf - -401 - - Unauthorized -K -200D -B -OK< -: -8#/definitions/io.k8s.api.networking.v1.NetworkPolicyListRhttpsj -x-kubernetes-actionlist -j^ -x-kubernetes-group-version-kind;9group: networking.k8s.io -kind: NetworkPolicy -version: v1 -J‚ -ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer JO -MKIquery-If 'true', then the output is pretty printed."pretty2string Jû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string JÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Jž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± -®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean Ž -6/api/v1/namespaces/{namespace}/pods/{name}/portforwardÓ¥ -core_v1*connect GET requests to portforward of Pod*(connectCoreV1GetNamespacedPodPortforward2*/*:*/*J7 - -200 - -OK - ² -string - -401 - - UnauthorizedRhttpsjW -x-kubernetes-group-version-kind42group: "" -kind: PodPortForwardOptions -version: v1 -j! -x-kubernetes-action -connect -"§ -core_v1+connect POST requests to portforward of Pod*)connectCoreV1PostNamespacedPodPortforward2*/*:*/*J7 - -200 - -OK - ² -string - -401 - - UnauthorizedRhttpsjW -x-kubernetes-group-version-kind42group: "" -kind: PodPortForwardOptions -version: v1 -j! -x-kubernetes-action -connect -JB -@>"<path!name of the PodPortForwardOptions"name*string˜J` -^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JY -WUSquery7List of ports to forward Required when using WebSockets"ports2integer ‰( -$/api/v1/watch/persistentvolumeclaimsà'Ô -core_v1watch individual changes to a list of PersistentVolumeClaim. deprecated: use the 'watch' parameter with a list operation instead.*4watchCoreV1PersistentVolumeClaimListForAllNamespaces2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jk -P -200I -G -OKA -? -=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent - -401 - - UnauthorizedRhttpsj# -x-kubernetes-action  -watchlist -jW -x-kubernetes-group-version-kind42group: "" -kind: PersistentVolumeClaim -version: v1 -J‚ -ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer JO -MKIquery-If 'true', then the output is pretty printed."pretty2string Jû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string JÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Jž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± -®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean ì -/apis/authorization.k8s.io/ÌÉ - authorizationget information of a group*getAuthorizationAPIGroup2application/json2application/yaml2#application/vnd.kubernetes.protobuf:application/json:application/yaml:#application/vnd.kubernetes.protobufJi -N -200G -E -OK? -= -;#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.APIGroup - -401 - - UnauthorizedRhttpsÊ\ -#/apis/node.k8s.io/v1/runtimeclasses¢\…& -node_v1*list or watch objects of kind RuntimeClass*listNodeV1RuntimeClass2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*B‚ -ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Bï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string B‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string B‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Bú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer Bû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string BÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Bž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer B± -®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean J_ -D -200= -; -OK5 -3 -1#/definitions/io.k8s.api.node.v1.RuntimeClassList - -401 - - UnauthorizedRhttpsjW -x-kubernetes-group-version-kind42group: node.k8s.io -kind: RuntimeClass -version: v1 -j -x-kubernetes-actionlist -"ô -node_v1create a RuntimeClass*createNodeV1RuntimeClass2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BC -A -?bodybody */ --#/definitions/io.k8s.api.node.v1.RuntimeClassBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– -“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string Jê -@ -2009 -7 -OK1 -/ --#/definitions/io.k8s.api.node.v1.RuntimeClass -E -201> -< -Created1 -/ --#/definitions/io.k8s.api.node.v1.RuntimeClass -F -202? -= -Accepted1 -/ --#/definitions/io.k8s.api.node.v1.RuntimeClass - -401 - - UnauthorizedRhttpsj -x-kubernetes-actionpost -jW -x-kubernetes-group-version-kind42group: node.k8s.io -kind: RuntimeClass -version: v1 -*Ï, -node_v1!delete collection of RuntimeClass*"deleteNodeV1CollectionRuntimeClass2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BT -R -Pbodybody*B -@#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptionsBï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string Bž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string Bä -áÞÛquery±The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately."gracePeriodSeconds2integer B‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Bú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer BÑ -ÎËÈquery Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the "orphan" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both."orphanDependents2boolean Bˆ -…‚ÿquery×Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground."propagationPolicy2string Bû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string BÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Bž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer Jg -L -200E -C -OK= -; -9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status - -401 - - UnauthorizedRhttpsj* -x-kubernetes-actiondeletecollection -jW -x-kubernetes-group-version-kind42group: node.k8s.io -kind: RuntimeClass -version: v1 -JO -MKIquery-If 'true', then the output is pretty printed."pretty2string Ð - /apis/policy/¾» -policyget information of a group*getPolicyAPIGroup2application/json2application/yaml2#application/vnd.kubernetes.protobuf:application/json:application/yaml:#application/vnd.kubernetes.protobufJi -N -200G -E -OK? -= -;#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.APIGroup - -401 - - UnauthorizedRhttps­ -3/api/v1/namespaces/{namespace}/pods/{name}/evictionõ -"Î -core_v1create eviction of a Pod*!createCoreV1NamespacedPodEviction2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BF -D -Bbodybody *2 -0#/definitions/io.k8s.api.policy.v1beta1.EvictionJó -C -200< -: -OK4 -2 -0#/definitions/io.k8s.api.policy.v1beta1.Eviction -H -201A -? -Created4 -2 -0#/definitions/io.k8s.api.policy.v1beta1.Eviction -I -202B -@ -Accepted4 -2 -0#/definitions/io.k8s.api.policy.v1beta1.Eviction - -401 - - UnauthorizedRhttpsjS -x-kubernetes-group-version-kind0.kind: Eviction -version: v1beta1 -group: policy -j -x-kubernetes-actionpost -Jž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string J– -“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string J5 -31"/pathname of the Eviction"name*string˜J` -^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO -MKIquery-If 'true', then the output is pretty printed."pretty2string „' -/api/v1/serviceaccountsè&Ü -core_v1,list or watch objects of kind ServiceAccount*(listCoreV1ServiceAccountForAllNamespaces2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Ja -F -200? -= -OK7 -5 -3#/definitions/io.k8s.api.core.v1.ServiceAccountList - -401 - - UnauthorizedRhttpsjP -x-kubernetes-group-version-kind-+group: "" -kind: ServiceAccount -version: v1 -j -x-kubernetes-actionlist -J‚ -ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer JO -MKIquery-If 'true', then the output is pretty printed."pretty2string Jû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string JÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Jž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± -®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean „a -H/apis/admissionregistration.k8s.io/v1beta1/mutatingwebhookconfigurations·`ž' -admissionregistration_v1beta1:list or watch objects of kind MutatingWebhookConfiguration*createAdmissionregistrationV1beta1MutatingWebhookConfiguration2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*Bi -g -ebodybody *U -S#/definitions/io.k8s.api.admissionregistration.v1beta1.MutatingWebhookConfigurationBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– -“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string JÜ - -401 - - Unauthorized -f -200_ -] -OKW -U -S#/definitions/io.k8s.api.admissionregistration.v1beta1.MutatingWebhookConfiguration -k -201d -b -CreatedW -U -S#/definitions/io.k8s.api.admissionregistration.v1beta1.MutatingWebhookConfiguration -l -202e -c -AcceptedW -U -S#/definitions/io.k8s.api.admissionregistration.v1beta1.MutatingWebhookConfigurationRhttpsj} -x-kubernetes-group-version-kindZXgroup: admissionregistration.k8s.io -kind: MutatingWebhookConfiguration -version: v1beta1 -j -x-kubernetes-actionpost -*Á- -admissionregistration_v1beta11delete collection of MutatingWebhookConfiguration*HdeleteAdmissionregistrationV1beta1CollectionMutatingWebhookConfiguration2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BT -R -Pbodybody*B -@#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptionsBï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string Bž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string Bä -áÞÛquery±The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately."gracePeriodSeconds2integer B‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Bú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer BÑ -ÎËÈquery Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the "orphan" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both."orphanDependents2boolean Bˆ -…‚ÿquery×Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground."propagationPolicy2string Bû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string BÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Bž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer Jg -L -200E -C -OK= -; -9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status - -401 - - UnauthorizedRhttpsj* -x-kubernetes-actiondeletecollection -j} -x-kubernetes-group-version-kindZXgroup: admissionregistration.k8s.io -kind: MutatingWebhookConfiguration -version: v1beta1 -JO -MKIquery-If 'true', then the output is pretty printed."pretty2string ï - -2/apis/authorization.k8s.io/v1/subjectaccessreviews¸ -"ª -authorization_v1create a SubjectAccessReview*(createAuthorizationV1SubjectAccessReview2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BS -Q -Obodybody *? -=#/definitions/io.k8s.api.authorization.v1.SubjectAccessReviewJš -P -200I -G -OKA -? -=#/definitions/io.k8s.api.authorization.v1.SubjectAccessReview -U -201N -L -CreatedA -? -=#/definitions/io.k8s.api.authorization.v1.SubjectAccessReview -V -202O -M -AcceptedA -? -=#/definitions/io.k8s.api.authorization.v1.SubjectAccessReview - -401 - - UnauthorizedRhttpsj -x-kubernetes-actionpost -jg -x-kubernetes-group-version-kindDBgroup: authorization.k8s.io -kind: SubjectAccessReview -version: v1 -Jž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string J– -“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string JO -MKIquery-If 'true', then the output is pretty printed."pretty2string Û -G/apis/extensions/v1beta1/namespaces/{namespace}/ingresses/{name}/status— -extensions_v1beta1$read status of the specified Ingress*,readExtensionsV1beta1NamespacedIngressStatus2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*Ja -F -200? -= -OK7 -5 -3#/definitions/io.k8s.api.extensions.v1beta1.Ingress - -401 - - UnauthorizedRhttpsj -x-kubernetes-actionget -jV -x-kubernetes-group-version-kind31group: extensions -kind: Ingress -version: v1beta1 -ð -extensions_v1beta1'replace status of the specified Ingress*/replaceExtensionsV1beta1NamespacedIngressStatus2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BI -G -Ebodybody *5 -3#/definitions/io.k8s.api.extensions.v1beta1.IngressBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– -“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string J® -F -200? -= -OK7 -5 -3#/definitions/io.k8s.api.extensions.v1beta1.Ingress -K -201D -B -Created7 -5 -3#/definitions/io.k8s.api.extensions.v1beta1.Ingress - -401 - - UnauthorizedRhttpsj -x-kubernetes-actionput -jV -x-kubernetes-group-version-kind31group: extensions -kind: Ingress -version: v1beta1 -B– -extensions_v1beta10partially update status of the specified Ingress*-patchExtensionsV1beta1NamespacedIngressStatus2application/json2application/yaml2#application/vnd.kubernetes.protobuf:application/json-patch+json:application/merge-patch+json:&application/strategic-merge-patch+json:application/apply-patch+yamlBN -L -Jbodybody *: -8#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.PatchBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B¯ -¬©¦queryƒfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch)." fieldManager2string BÎ -ËÈÅquery¨Force is going to "force" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests."force2boolean Ja -F -200? -= -OK7 -5 -3#/definitions/io.k8s.api.extensions.v1beta1.Ingress - -401 - - UnauthorizedRhttpsjV -x-kubernetes-group-version-kind31kind: Ingress -version: v1beta1 -group: extensions -j -x-kubernetes-actionpatch -J4 -20".pathname of the Ingress"name*string˜J` -^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO -MKIquery-If 'true', then the output is pretty printed."pretty2string ý] -./apis/storage.k8s.io/v1beta1/volumeattachmentsÊ]µ& -storage_v1beta1.list or watch objects of kind VolumeAttachment*"listStorageV1beta1VolumeAttachment2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*B‚ -ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Bï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string B‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string B‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Bú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer Bû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string BÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Bž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer B± -®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean Jk -P -200I -G -OKA -? -=#/definitions/io.k8s.api.storage.v1beta1.VolumeAttachmentList - -401 - - UnauthorizedRhttpsjc -x-kubernetes-group-version-kind@>group: storage.k8s.io -kind: VolumeAttachment -version: v1beta1 -j -x-kubernetes-actionlist -"È -storage_v1beta1create a VolumeAttachment*$createStorageV1beta1VolumeAttachment2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BO -M -Kbodybody *; -9#/definitions/io.k8s.api.storage.v1beta1.VolumeAttachmentBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– -“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string JŽ -L -200E -C -OK= -; -9#/definitions/io.k8s.api.storage.v1beta1.VolumeAttachment -Q -201J -H -Created= -; -9#/definitions/io.k8s.api.storage.v1beta1.VolumeAttachment -R -202K -I -Accepted= -; -9#/definitions/io.k8s.api.storage.v1beta1.VolumeAttachment - -401 - - UnauthorizedRhttpsj -x-kubernetes-actionpost -jc -x-kubernetes-group-version-kind@>group: storage.k8s.io -kind: VolumeAttachment -version: v1beta1 -*ó, -storage_v1beta1%delete collection of VolumeAttachment*.deleteStorageV1beta1CollectionVolumeAttachment2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BT -R -Pbodybody*B -@#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptionsBï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string Bž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string Bä -áÞÛquery±The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately."gracePeriodSeconds2integer B‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Bú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer BÑ -ÎËÈquery Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the "orphan" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both."orphanDependents2boolean Bˆ -…‚ÿquery×Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground."propagationPolicy2string Bû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string BÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Bž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer Jg -L -200E -C -OK= -; -9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status - -401 - - UnauthorizedRhttpsj* -x-kubernetes-actiondeletecollection -jc -x-kubernetes-group-version-kind@>version: v1beta1 -group: storage.k8s.io -kind: VolumeAttachment -JO -MKIquery-If 'true', then the output is pretty printed."pretty2string H -/logs/>< -logs*logFileListHandlerJ - -401 - - UnauthorizedRhttps©] -+/api/v1/namespaces/{namespace}/podtemplatesù\‚& -core_v1)list or watch objects of kind PodTemplate*listCoreV1NamespacedPodTemplate2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*B‚ -ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Bï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string B‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string B‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Bú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer Bû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string BÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Bž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer B± -®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean J^ -C -200< -: -OK4 -2 -0#/definitions/io.k8s.api.core.v1.PodTemplateList - -401 - - UnauthorizedRhttpsj -x-kubernetes-actionlist -jM -x-kubernetes-group-version-kind*(group: "" -kind: PodTemplate -version: v1 -"î -core_v1create a PodTemplate*!createCoreV1NamespacedPodTemplate2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BB -@ ->bodybody *. -,#/definitions/io.k8s.api.core.v1.PodTemplateBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– -“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string Jç -? -2008 -6 -OK0 -. -,#/definitions/io.k8s.api.core.v1.PodTemplate -D -201= -; -Created0 -. -,#/definitions/io.k8s.api.core.v1.PodTemplate -E -202> -< -Accepted0 -. -,#/definitions/io.k8s.api.core.v1.PodTemplate - -401 - - UnauthorizedRhttpsj -x-kubernetes-actionpost -jM -x-kubernetes-group-version-kind*(group: "" -kind: PodTemplate -version: v1 -*Í, -core_v1 delete collection of PodTemplate*+deleteCoreV1CollectionNamespacedPodTemplate2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BT -R -Pbodybody*B -@#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptionsBï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string Bž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string Bä -áÞÛquery±The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately."gracePeriodSeconds2integer B‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Bú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer BÑ -ÎËÈquery Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the "orphan" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both."orphanDependents2boolean Bˆ -…‚ÿquery×Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground."propagationPolicy2string Bû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string BÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Bž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer Jg -L -200E -C -OK= -; -9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status - -401 - - UnauthorizedRhttpsjM -x-kubernetes-group-version-kind*(group: "" -kind: PodTemplate -version: v1 -j* -x-kubernetes-actiondeletecollection -J` -^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO -MKIquery-If 'true', then the output is pretty printed."pretty2string ù -/apis/storage.k8s.io/v1beta1/×Ô -storage_v1beta1get available resources*getStorageV1beta1APIResources2application/json2application/yaml2#application/vnd.kubernetes.protobuf:application/json:application/yaml:#application/vnd.kubernetes.protobufJp -U -200N -L -OKF -D -B#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.APIResourceList - -401 - - UnauthorizedRhttps¥] -)/apis/networking.k8s.io/v1/ingressclasses÷\& - networking_v1*list or watch objects of kind IngressClass*listNetworkingV1IngressClass2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*B‚ -ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Bï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string B‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string B‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Bú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer Bû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string BÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Bž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer B± -®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean Je -J -200C -A -OK; -9 -7#/definitions/io.k8s.api.networking.v1.IngressClassList - -401 - - UnauthorizedRhttpsj -x-kubernetes-actionlist -j] -x-kubernetes-group-version-kind:8kind: IngressClass -version: v1 -group: networking.k8s.io -"Ÿ - networking_v1create an IngressClass*createNetworkingV1IngressClass2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BI -G -Ebodybody *5 -3#/definitions/io.k8s.api.networking.v1.IngressClassBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– -“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string Jü -F -200? -= -OK7 -5 -3#/definitions/io.k8s.api.networking.v1.IngressClass -K -201D -B -Created7 -5 -3#/definitions/io.k8s.api.networking.v1.IngressClass -L -202E -C -Accepted7 -5 -3#/definitions/io.k8s.api.networking.v1.IngressClass - -401 - - UnauthorizedRhttpsj] -x-kubernetes-group-version-kind:8version: v1 -group: networking.k8s.io -kind: IngressClass -j -x-kubernetes-actionpost -*á, - networking_v1!delete collection of IngressClass*(deleteNetworkingV1CollectionIngressClass2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BT -R -Pbodybody*B -@#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptionsBï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string Bž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string Bä -áÞÛquery±The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately."gracePeriodSeconds2integer B‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Bú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer BÑ -ÎËÈquery Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the "orphan" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both."orphanDependents2boolean Bˆ -…‚ÿquery×Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground."propagationPolicy2string Bû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string BÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Bž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer Jg -L -200E -C -OK= -; -9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status - -401 - - UnauthorizedRhttpsj* -x-kubernetes-actiondeletecollection -j] -x-kubernetes-group-version-kind:8group: networking.k8s.io -kind: IngressClass -version: v1 -JO -MKIquery-If 'true', then the output is pretty printed."pretty2string —( -4/apis/networking.k8s.io/v1beta1/watch/ingressclassesÞ'Ò -networking_v1beta1xwatch individual changes to a list of IngressClass. deprecated: use the 'watch' parameter with a list operation instead.*&watchNetworkingV1beta1IngressClassList2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jk -P -200I -G -OKA -? -=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent - -401 - - UnauthorizedRhttpsjb -x-kubernetes-group-version-kind?=group: networking.k8s.io -kind: IngressClass -version: v1beta1 -j# -x-kubernetes-action  -watchlist -J‚ -ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer JO -MKIquery-If 'true', then the output is pretty printed."pretty2string Jû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string JÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Jž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± -®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean ×( -6/apis/apps/v1/watch/namespaces/{namespace}/deploymentsœ(® -apps_v1vwatch individual changes to a list of Deployment. deprecated: use the 'watch' parameter with a list operation instead.*#watchAppsV1NamespacedDeploymentList2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jk -P -200I -G -OKA -? -=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent - -401 - - UnauthorizedRhttpsj# -x-kubernetes-action  -watchlist -jN -x-kubernetes-group-version-kind+)group: apps -kind: Deployment -version: v1 -J‚ -ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer J` -^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO -MKIquery-If 'true', then the output is pretty printed."pretty2string Jû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string JÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Jž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± -®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean –` -7/apis/apiextensions.k8s.io/v1/customresourcedefinitionsÚ_÷& -apiextensions_v16list or watch objects of kind CustomResourceDefinition*+listApiextensionsV1CustomResourceDefinition2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*B‚ -ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Bï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string B‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string B‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Bú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer Bû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string BÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Bž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer B± -®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean J‘ -v -200o -m -OKg -e -c#/definitions/io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1.CustomResourceDefinitionList - -401 - - UnauthorizedRhttpsj -x-kubernetes-actionlist -jl -x-kubernetes-group-version-kindIGversion: v1 -group: apiextensions.k8s.io -kind: CustomResourceDefinition -"û - -apiextensions_v1!create a CustomResourceDefinition*-createApiextensionsV1CustomResourceDefinition2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*Bu -s -qbodybody *a -_#/definitions/io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1.CustomResourceDefinitionBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– -“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string J€ -x -202q -o -Acceptedc -a -_#/definitions/io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1.CustomResourceDefinition - -401 - - Unauthorized -r -200k -i -OKc -a -_#/definitions/io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1.CustomResourceDefinition -w -201p -n -Createdc -a -_#/definitions/io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1.CustomResourceDefinitionRhttpsj -x-kubernetes-actionpost -jl -x-kubernetes-group-version-kindIGgroup: apiextensions.k8s.io -kind: CustomResourceDefinition -version: v1 -*Ž- -apiextensions_v1-delete collection of CustomResourceDefinition*7deleteApiextensionsV1CollectionCustomResourceDefinition2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BT -R -Pbodybody*B -@#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptionsBï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string Bž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string Bä -áÞÛquery±The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately."gracePeriodSeconds2integer B‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Bú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer BÑ -ÎËÈquery Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the "orphan" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both."orphanDependents2boolean Bˆ -…‚ÿquery×Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground."propagationPolicy2string Bû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string BÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Bž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer Jg -L -200E -C -OK= -; -9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status - -401 - - UnauthorizedRhttpsj* -x-kubernetes-actiondeletecollection -jl -x-kubernetes-group-version-kindIGgroup: apiextensions.k8s.io -kind: CustomResourceDefinition -version: v1 -JO -MKIquery-If 'true', then the output is pretty printed."pretty2string à -/apis/policy/v1/ËÈ - policy_v1get available resources*getPolicyV1APIResources2application/json2application/yaml2#application/vnd.kubernetes.protobuf:application/json:application/yaml:#application/vnd.kubernetes.protobufJp -U -200N -L -OKF -D -B#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.APIResourceList - -401 - - UnauthorizedRhttps’) -group: scheduling.k8s.io -kind: PriorityClass -version: v1beta1 -J‚ -ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer J: -86"4pathname of the PriorityClass"name*string˜JO -MKIquery-If 'true', then the output is pretty printed."pretty2string Jû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string JÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Jž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± -®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean ³_ -H/apis/storage.k8s.io/v1beta1/namespaces/{namespace}/csistoragecapacitiesæ^Ç& -storage_v1beta10list or watch objects of kind CSIStorageCapacity*.listStorageV1beta1NamespacedCSIStorageCapacity2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*B‚ -ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Bï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string B‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string B‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Bú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer Bû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string BÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Bž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer B± -®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean Jm -R -200K -I -OKC -A -?#/definitions/io.k8s.api.storage.v1beta1.CSIStorageCapacityList - -401 - - UnauthorizedRhttpsje -x-kubernetes-group-version-kindB@group: storage.k8s.io -kind: CSIStorageCapacity -version: v1beta1 -j -x-kubernetes-actionlist -"à -storage_v1beta1create a CSIStorageCapacity*0createStorageV1beta1NamespacedCSIStorageCapacity2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BQ -O -Mbodybody *= -;#/definitions/io.k8s.api.storage.v1beta1.CSIStorageCapacityBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– -“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string J” - -401 - - Unauthorized -N -200G -E -OK? -= -;#/definitions/io.k8s.api.storage.v1beta1.CSIStorageCapacity -S -201L -J -Created? -= -;#/definitions/io.k8s.api.storage.v1beta1.CSIStorageCapacity -T -202M -K -Accepted? -= -;#/definitions/io.k8s.api.storage.v1beta1.CSIStorageCapacityRhttpsj -x-kubernetes-actionpost -je -x-kubernetes-group-version-kindB@group: storage.k8s.io -kind: CSIStorageCapacity -version: v1beta1 -*ƒ- -storage_v1beta1'delete collection of CSIStorageCapacity*:deleteStorageV1beta1CollectionNamespacedCSIStorageCapacity2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BT -R -Pbodybody*B -@#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptionsBï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string Bž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string Bä -áÞÛquery±The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately."gracePeriodSeconds2integer B‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Bú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer BÑ -ÎËÈquery Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the "orphan" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both."orphanDependents2boolean Bˆ -…‚ÿquery×Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground."propagationPolicy2string Bû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string BÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Bž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer Jg -L -200E -C -OK= -; -9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status - -401 - - UnauthorizedRhttpsj* -x-kubernetes-actiondeletecollection -je -x-kubernetes-group-version-kindB@group: storage.k8s.io -kind: CSIStorageCapacity -version: v1beta1 -J` -^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO -MKIquery-If 'true', then the output is pretty printed."pretty2string ´ - -'/api/v1/namespaces/{namespace}/bindingsˆ -"˜ -core_v1create a Binding*createCoreV1NamespacedBinding2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*B> -< -:bodybody ** -(#/definitions/io.k8s.api.core.v1.BindingJÛ -; -2004 -2 -OK, -* -(#/definitions/io.k8s.api.core.v1.Binding -@ -2019 -7 -Created, -* -(#/definitions/io.k8s.api.core.v1.Binding -A -202: -8 -Accepted, -* -(#/definitions/io.k8s.api.core.v1.Binding - -401 - - UnauthorizedRhttpsjI -x-kubernetes-group-version-kind&$group: "" -kind: Binding -version: v1 -j -x-kubernetes-actionpost -Jž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string J– -“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string J` -^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO -MKIquery-If 'true', then the output is pretty printed."pretty2string ú( -,/apis/storage.k8s.io/v1beta1/csinodes/{name}É(ø -storage_v1beta1read the specified CSINode*readStorageV1beta1CSINode2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*J^ -C -200< -: -OK4 -2 -0#/definitions/io.k8s.api.storage.v1beta1.CSINode - -401 - - UnauthorizedRhttpsj -x-kubernetes-actionget -jZ -x-kubernetes-group-version-kind75group: storage.k8s.io -kind: CSINode -version: v1beta1 -Ë -storage_v1beta1replace the specified CSINode*replaceStorageV1beta1CSINode2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BF -D -Bbodybody *2 -0#/definitions/io.k8s.api.storage.v1beta1.CSINodeBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– -“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string J¨ -C -200< -: -OK4 -2 -0#/definitions/io.k8s.api.storage.v1beta1.CSINode -H -201A -? -Created4 -2 -0#/definitions/io.k8s.api.storage.v1beta1.CSINode - -401 - - UnauthorizedRhttpsj -x-kubernetes-actionput -jZ -x-kubernetes-group-version-kind75group: storage.k8s.io -kind: CSINode -version: v1beta1 -*ü -storage_v1beta1delete a CSINode*deleteStorageV1beta1CSINode2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BT -R -Pbodybody*B -@#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptionsBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string Bä -áÞÛquery±The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately."gracePeriodSeconds2integer BÑ -ÎËÈquery Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the "orphan" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both."orphanDependents2boolean Bˆ -…‚ÿquery×Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground."propagationPolicy2string J© -C -200< -: -OK4 -2 -0#/definitions/io.k8s.api.storage.v1beta1.CSINode -I -202B -@ -Accepted4 -2 -0#/definitions/io.k8s.api.storage.v1beta1.CSINode - -401 - - UnauthorizedRhttpsj -x-kubernetes-action delete -jZ -x-kubernetes-group-version-kind75group: storage.k8s.io -kind: CSINode -version: v1beta1 -B÷ -storage_v1beta1&partially update the specified CSINode*patchStorageV1beta1CSINode2application/json2application/yaml2#application/vnd.kubernetes.protobuf:application/json-patch+json:application/merge-patch+json:&application/strategic-merge-patch+json:application/apply-patch+yamlBN -L -Jbodybody *: -8#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.PatchBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B¯ -¬©¦queryƒfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch)." fieldManager2string BÎ -ËÈÅquery¨Force is going to "force" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests."force2boolean J^ - -401 - - Unauthorized -C -200< -: -OK4 -2 -0#/definitions/io.k8s.api.storage.v1beta1.CSINodeRhttpsj -x-kubernetes-actionpatch -jZ -x-kubernetes-group-version-kind75group: storage.k8s.io -kind: CSINode -version: v1beta1 -J4 -20".pathname of the CSINode"name*string˜JO -MKIquery-If 'true', then the output is pretty printed."pretty2string Ö] -./api/v1/namespaces/{namespace}/serviceaccounts£]Ž& -core_v1,list or watch objects of kind ServiceAccount*"listCoreV1NamespacedServiceAccount2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*B‚ -ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Bï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string B‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string B‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Bú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer Bû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string BÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Bž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer B± -®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean Ja - -401 - - Unauthorized -F -200? -= -OK7 -5 -3#/definitions/io.k8s.api.core.v1.ServiceAccountListRhttpsjP -x-kubernetes-group-version-kind-+group: "" -kind: ServiceAccount -version: v1 -j -x-kubernetes-actionlist -"ƒ -core_v1create a ServiceAccount*$createCoreV1NamespacedServiceAccount2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BE -C -Abodybody *1 -/#/definitions/io.k8s.api.core.v1.ServiceAccountBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– -“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string Jð - -401 - - Unauthorized -B -200; -9 -OK3 -1 -/#/definitions/io.k8s.api.core.v1.ServiceAccount -G -201@ -> -Created3 -1 -/#/definitions/io.k8s.api.core.v1.ServiceAccount -H -202A -? -Accepted3 -1 -/#/definitions/io.k8s.api.core.v1.ServiceAccountRhttpsj -x-kubernetes-actionpost -jP -x-kubernetes-group-version-kind-+group: "" -kind: ServiceAccount -version: v1 -*Ö, -core_v1#delete collection of ServiceAccount*.deleteCoreV1CollectionNamespacedServiceAccount2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BT -R -Pbodybody*B -@#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptionsBï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string Bž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string Bä -áÞÛquery±The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately."gracePeriodSeconds2integer B‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Bú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer BÑ -ÎËÈquery Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the "orphan" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both."orphanDependents2boolean Bˆ -…‚ÿquery×Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground."propagationPolicy2string Bû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string BÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Bž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer Jg -L -200E -C -OK= -; -9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status - -401 - - UnauthorizedRhttpsj* -x-kubernetes-actiondeletecollection -jP -x-kubernetes-group-version-kind-+kind: ServiceAccount -version: v1 -group: "" -J` -^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO -MKIquery-If 'true', then the output is pretty printed."pretty2string ó -;/api/v1/namespaces/{namespace}/serviceaccounts/{name}/token³ "ˆ -core_v1 create token of a ServiceAccount*)createCoreV1NamespacedServiceAccountToken2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BM -K -Ibodybody *9 -7#/definitions/io.k8s.api.authentication.v1.TokenRequestJˆ -J -200C -A -OK; -9 -7#/definitions/io.k8s.api.authentication.v1.TokenRequest -O -201H -F -Created; -9 -7#/definitions/io.k8s.api.authentication.v1.TokenRequest -P -202I -G -Accepted; -9 -7#/definitions/io.k8s.api.authentication.v1.TokenRequest - -401 - - UnauthorizedRhttpsj -x-kubernetes-actionpost -ja -x-kubernetes-group-version-kind> -2007 -5 -OK/ -- -+#/definitions/io.k8s.api.storage.v1.CSINode -C -201< -: -Created/ -- -+#/definitions/io.k8s.api.storage.v1.CSINode -D -202= -; -Accepted/ -- -+#/definitions/io.k8s.api.storage.v1.CSINode - -401 - - UnauthorizedRhttpsjU -x-kubernetes-group-version-kind20version: v1 -group: storage.k8s.io -kind: CSINode -j -x-kubernetes-actionpost -*É, - -storage_v1delete collection of CSINode* deleteStorageV1CollectionCSINode2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BT -R -Pbodybody*B -@#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptionsBï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string Bž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string Bä -áÞÛquery±The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately."gracePeriodSeconds2integer B‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Bú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer BÑ -ÎËÈquery Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the "orphan" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both."orphanDependents2boolean Bˆ -…‚ÿquery×Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground."propagationPolicy2string Bû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string BÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Bž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer Jg -L -200E -C -OK= -; -9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status - -401 - - UnauthorizedRhttpsjU -x-kubernetes-group-version-kind20group: storage.k8s.io -kind: CSINode -version: v1 -j* -x-kubernetes-actiondeletecollection -JO -MKIquery-If 'true', then the output is pretty printed."pretty2string š( -4/apis/storage.k8s.io/v1beta1/watch/volumeattachmentsá'Õ -storage_v1beta1|watch individual changes to a list of VolumeAttachment. deprecated: use the 'watch' parameter with a list operation instead.*'watchStorageV1beta1VolumeAttachmentList2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jk -P -200I -G -OKA -? -=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent - -401 - - UnauthorizedRhttpsj# -x-kubernetes-action  -watchlist -jc -x-kubernetes-group-version-kind@>group: storage.k8s.io -kind: VolumeAttachment -version: v1beta1 -J‚ -ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer JO -MKIquery-If 'true', then the output is pretty printed."pretty2string Jû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string JÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Jž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± -®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean ¿^ -5/api/v1/namespaces/{namespace}/persistentvolumeclaims…^ª& -core_v13list or watch objects of kind PersistentVolumeClaim*)listCoreV1NamespacedPersistentVolumeClaim2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*B‚ -ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Bï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string B‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string B‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Bú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer Bû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string BÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Bž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer B± -®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean Jh -M -200F -D -OK> -< -:#/definitions/io.k8s.api.core.v1.PersistentVolumeClaimList - -401 - - UnauthorizedRhttpsjW -x-kubernetes-group-version-kind42group: "" -kind: PersistentVolumeClaim -version: v1 -j -x-kubernetes-actionlist -"´ -core_v1create a PersistentVolumeClaim*+createCoreV1NamespacedPersistentVolumeClaim2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BL -J -Hbodybody *8 -6#/definitions/io.k8s.api.core.v1.PersistentVolumeClaimBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– -“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string J… -N -201G -E -Created: -8 -6#/definitions/io.k8s.api.core.v1.PersistentVolumeClaim -O -202H -F -Accepted: -8 -6#/definitions/io.k8s.api.core.v1.PersistentVolumeClaim - -401 - - Unauthorized -I -200B -@ -OK: -8 -6#/definitions/io.k8s.api.core.v1.PersistentVolumeClaimRhttpsjW -x-kubernetes-group-version-kind42group: "" -kind: PersistentVolumeClaim -version: v1 -j -x-kubernetes-actionpost -*ë, -core_v1*delete collection of PersistentVolumeClaim*5deleteCoreV1CollectionNamespacedPersistentVolumeClaim2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BT -R -Pbodybody*B -@#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptionsBï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string Bž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string Bä -áÞÛquery±The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately."gracePeriodSeconds2integer B‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Bú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer BÑ -ÎËÈquery Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the "orphan" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both."orphanDependents2boolean Bˆ -…‚ÿquery×Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground."propagationPolicy2string Bû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string BÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Bž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer Jg - -401 - - Unauthorized -L -200E -C -OK= -; -9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.StatusRhttpsj* -x-kubernetes-actiondeletecollection -jW -x-kubernetes-group-version-kind42group: "" -kind: PersistentVolumeClaim -version: v1 -J` -^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO -MKIquery-If 'true', then the output is pretty printed."pretty2string Ó( -B/apis/apiextensions.k8s.io/v1beta1/watch/customresourcedefinitionsŒ(€ -apiextensions_v1beta1„watch individual changes to a list of CustomResourceDefinition. deprecated: use the 'watch' parameter with a list operation instead.*5watchApiextensionsV1beta1CustomResourceDefinitionList2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jk -P -200I -G -OKA -? -=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent - -401 - - UnauthorizedRhttpsj# -x-kubernetes-action  -watchlist -jq -x-kubernetes-group-version-kindNLgroup: apiextensions.k8s.io -kind: CustomResourceDefinition -version: v1beta1 -J‚ -ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer JO -MKIquery-If 'true', then the output is pretty printed."pretty2string Jû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string JÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Jž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± -®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean …( -(/apis/extensions/v1beta1/watch/ingressesØ'Ì -extensions_v1beta1swatch individual changes to a list of Ingress. deprecated: use the 'watch' parameter with a list operation instead.*1watchExtensionsV1beta1IngressListForAllNamespaces2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jk -P -200I -G -OKA -? -=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent - -401 - - UnauthorizedRhttpsjV -x-kubernetes-group-version-kind31group: extensions -kind: Ingress -version: v1beta1 -j# -x-kubernetes-action  -watchlist -J‚ -ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer JO -MKIquery-If 'true', then the output is pretty printed."pretty2string Jû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string JÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Jž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± -®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean ¤) -./apis/storage.k8s.io/v1beta1/csidrivers/{name}ñ(€ -storage_v1beta1read the specified CSIDriver*readStorageV1beta1CSIDriver2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*J` - -401 - - Unauthorized -E -200> -< -OK6 -4 -2#/definitions/io.k8s.api.storage.v1beta1.CSIDriverRhttpsj -x-kubernetes-actionget -j\ -x-kubernetes-group-version-kind97group: storage.k8s.io -kind: CSIDriver -version: v1beta1 -× -storage_v1beta1replace the specified CSIDriver*replaceStorageV1beta1CSIDriver2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BH -F -Dbodybody *4 -2#/definitions/io.k8s.api.storage.v1beta1.CSIDriverBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– -“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string J¬ -E -200> -< -OK6 -4 -2#/definitions/io.k8s.api.storage.v1beta1.CSIDriver -J -201C -A -Created6 -4 -2#/definitions/io.k8s.api.storage.v1beta1.CSIDriver - -401 - - UnauthorizedRhttpsj -x-kubernetes-actionput -j\ -x-kubernetes-group-version-kind97group: storage.k8s.io -kind: CSIDriver -version: v1beta1 -*† -storage_v1beta1delete a CSIDriver*deleteStorageV1beta1CSIDriver2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BT -R -Pbodybody*B -@#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptionsBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string Bä -áÞÛquery±The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately."gracePeriodSeconds2integer BÑ -ÎËÈquery Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the "orphan" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both."orphanDependents2boolean Bˆ -…‚ÿquery×Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground."propagationPolicy2string J­ - -401 - - Unauthorized -E -200> -< -OK6 -4 -2#/definitions/io.k8s.api.storage.v1beta1.CSIDriver -K -202D -B -Accepted6 -4 -2#/definitions/io.k8s.api.storage.v1beta1.CSIDriverRhttpsj -x-kubernetes-action delete -j\ -x-kubernetes-group-version-kind97group: storage.k8s.io -kind: CSIDriver -version: v1beta1 -Bÿ -storage_v1beta1(partially update the specified CSIDriver*patchStorageV1beta1CSIDriver2application/json2application/yaml2#application/vnd.kubernetes.protobuf:application/json-patch+json:application/merge-patch+json:&application/strategic-merge-patch+json:application/apply-patch+yamlBN -L -Jbodybody *: -8#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.PatchBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B¯ -¬©¦queryƒfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch)." fieldManager2string BÎ -ËÈÅquery¨Force is going to "force" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests."force2boolean J` -E -200> -< -OK6 -4 -2#/definitions/io.k8s.api.storage.v1beta1.CSIDriver - -401 - - UnauthorizedRhttpsj -x-kubernetes-actionpatch -j\ -x-kubernetes-group-version-kind97version: v1beta1 -group: storage.k8s.io -kind: CSIDriver -J6 -42"0pathname of the CSIDriver"name*string˜JO -MKIquery-If 'true', then the output is pretty printed."pretty2string ú -./api/v1/namespaces/{namespace}/pods/{name}/logÇÄ -core_v1read log of the specified Pod*readCoreV1NamespacedPodLog2 -text/plain2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*J7 - -401 - - Unauthorized - -200 - -OK - ² -stringRhttpsj -x-kubernetes-actionget -jE -x-kubernetes-group-version-kind" group: "" -kind: Pod -version: v1 -J -Š‡queryhThe container for which to stream logs. Defaults to only container if there is one container in the pod." container2string JW -USQquery4Follow the log stream of the pod. Defaults to false."follow2boolean JŒ -‰†ƒqueryÏinsecureSkipTLSVerifyBackend indicates that the apiserver should not confirm the validity of the serving certificate of the backend it is connecting to. This will make the HTTPS connection between the apiserver and the backend insecure. This means the apiserver cannot verify the log data it is receiving came from the real kubelet. If the kubelet is configured to verify the apiserver's TLS credentials, it does not mean the connection to the real kubelet is vulnerable to a man in the middle attack (e.g. an attacker could not intercept the actual log data coming from the real kubelet)."insecureSkipTLSVerifyBackend2boolean J€ -ýú÷queryÕIf set, the number of bytes to read from the server before terminating the log output. This may not display a complete final line of logging, and may return slightly more or slightly less than the specified limit." -limitBytes2integer J0 -.,"*pathname of the Pod"name*string˜J` -^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO -MKIquery-If 'true', then the output is pretty printed."pretty2string Jb -`^\query=Return previous terminated container logs. Defaults to false."previous2boolean JÍ -ÊÇÄquery A relative time in seconds before the current time from which to show logs. If this value precedes the time a pod was started, only logs since the pod start will be returned. If this value is in the future, no logs will be returned. Only one of sinceSeconds or sinceTime may be specified." sinceSeconds2integer JÈ -Å¿queryžIf set, the number of lines from the end of the logs to show. If not specified, logs are shown from the creation of the container or sinceSeconds or sinceTime" tailLines2integer J› -˜•’queryqIf true, add an RFC3339 or RFC3339Nano timestamp at the beginning of every line of log output. Defaults to false." -timestamps2boolean  ' -!/apis/apps/v1/controllerrevisionsú&î -apps_v10list or watch objects of kind ControllerRevision*,listAppsV1ControllerRevisionForAllNamespaces2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Je -J -200C -A -OK; -9 -7#/definitions/io.k8s.api.apps.v1.ControllerRevisionList - -401 - - UnauthorizedRhttpsjV -x-kubernetes-group-version-kind31group: apps -kind: ControllerRevision -version: v1 -j -x-kubernetes-actionlist -J‚ -ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer JO -MKIquery-If 'true', then the output is pretty printed."pretty2string Jû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string JÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Jž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± -®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean ‡* -A/apis/coordination.k8s.io/v1/namespaces/{namespace}/leases/{name}Á)ú -coordination_v1read the specified Lease*!readCoordinationV1NamespacedLease2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*J\ - -401 - - Unauthorized -A -200: -8 -OK2 -0 -.#/definitions/io.k8s.api.coordination.v1.LeaseRhttpsj -x-kubernetes-actionget -jX -x-kubernetes-group-version-kind53kind: Lease -version: v1 -group: coordination.k8s.io -É -coordination_v1replace the specified Lease*$replaceCoordinationV1NamespacedLease2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BD -B -@bodybody *0 -.#/definitions/io.k8s.api.coordination.v1.LeaseBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– -“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string J¤ -F -201? -= -Created2 -0 -.#/definitions/io.k8s.api.coordination.v1.Lease - -401 - - Unauthorized -A -200: -8 -OK2 -0 -.#/definitions/io.k8s.api.coordination.v1.LeaseRhttpsj -x-kubernetes-actionput -jX -x-kubernetes-group-version-kind53group: coordination.k8s.io -kind: Lease -version: v1 -*’ -coordination_v1delete a Lease*#deleteCoordinationV1NamespacedLease2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BT -R -Pbodybody*B -@#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptionsBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string Bä -áÞÛquery±The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately."gracePeriodSeconds2integer BÑ -ÎËÈquery Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the "orphan" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both."orphanDependents2boolean Bˆ -…‚ÿquery×Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground."propagationPolicy2string J» - -401 - - Unauthorized -L -200E -C -OK= -; -9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status -R -202K -I -Accepted= -; -9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.StatusRhttpsjX -x-kubernetes-group-version-kind53version: v1 -group: coordination.k8s.io -kind: Lease -j -x-kubernetes-action delete -Bù -coordination_v1$partially update the specified Lease*"patchCoordinationV1NamespacedLease2application/json2application/yaml2#application/vnd.kubernetes.protobuf:application/json-patch+json:application/merge-patch+json:&application/strategic-merge-patch+json:application/apply-patch+yamlBN -L -Jbodybody *: -8#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.PatchBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B¯ -¬©¦queryƒfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch)." fieldManager2string BÎ -ËÈÅquery¨Force is going to "force" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests."force2boolean J\ -A -200: -8 -OK2 -0 -.#/definitions/io.k8s.api.coordination.v1.Lease - -401 - - UnauthorizedRhttpsjX -x-kubernetes-group-version-kind53group: coordination.k8s.io -kind: Lease -version: v1 -j -x-kubernetes-actionpatch -J2 -0.",pathname of the Lease"name*string˜J` -^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO -MKIquery-If 'true', then the output is pretty printed."pretty2string ®' -(/apis/discovery.k8s.io/v1/endpointslices'õ - discovery_v1+list or watch objects of kind EndpointSlice*,listDiscoveryV1EndpointSliceForAllNamespaces2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Je -J -200C -A -OK; -9 -7#/definitions/io.k8s.api.discovery.v1.EndpointSliceList - -401 - - UnauthorizedRhttpsj -x-kubernetes-actionlist -j] -x-kubernetes-group-version-kind:8group: discovery.k8s.io -kind: EndpointSlice -version: v1 -J‚ -ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer JO -MKIquery-If 'true', then the output is pretty printed."pretty2string Jû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string JÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Jž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± -®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean Ì( -A/apis/rbac.authorization.k8s.io/v1beta1/watch/clusterrolebindings†(ú -rbacAuthorization_v1beta1~watch individual changes to a list of ClusterRoleBinding. deprecated: use the 'watch' parameter with a list operation instead.*3watchRbacAuthorizationV1beta1ClusterRoleBindingList2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jk -P -200I -G -OKA -? -=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent - -401 - - UnauthorizedRhttpsj# -x-kubernetes-action  -watchlist -jp -x-kubernetes-group-version-kindMKkind: ClusterRoleBinding -version: v1beta1 -group: rbac.authorization.k8s.io -J‚ -ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer JO -MKIquery-If 'true', then the output is pretty printed."pretty2string Jû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string JÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Jž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± -®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean Ê - /apis/apps/º· -appsget information of a group*getAppsAPIGroup2application/json2application/yaml2#application/vnd.kubernetes.protobuf:application/json:application/yaml:#application/vnd.kubernetes.protobufJi -N -200G -E -OK? -= -;#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.APIGroup - -401 - - UnauthorizedRhttpsý& -/apis/apps/v1/statefulsetsÞ&Ò -apps_v1)list or watch objects of kind StatefulSet*%listAppsV1StatefulSetForAllNamespaces2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*J^ -C -200< -: -OK4 -2 -0#/definitions/io.k8s.api.apps.v1.StatefulSetList - -401 - - UnauthorizedRhttpsjO -x-kubernetes-group-version-kind,*version: v1 -group: apps -kind: StatefulSet -j -x-kubernetes-actionlist -J‚ -ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer JO -MKIquery-If 'true', then the output is pretty printed."pretty2string Jû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string JÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Jž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± -®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean ü -/apis/authorization.k8s.io/v1/ÙÖ -authorization_v1get available resources*getAuthorizationV1APIResources2application/json2application/yaml2#application/vnd.kubernetes.protobuf:application/json:application/yaml:#application/vnd.kubernetes.protobufJp - -401 - - Unauthorized -U -200N -L -OKF -D -B#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.APIResourceListRhttpsš -N/apis/authorization.k8s.io/v1/namespaces/{namespace}/localsubjectaccessreviewsÇ "× -authorization_v1!create a LocalSubjectAccessReview*7createAuthorizationV1NamespacedLocalSubjectAccessReview2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BX -V -Tbodybody *D -B#/definitions/io.k8s.api.authorization.v1.LocalSubjectAccessReviewJ© -[ -202T -R -AcceptedF -D -B#/definitions/io.k8s.api.authorization.v1.LocalSubjectAccessReview - -401 - - Unauthorized -U -200N -L -OKF -D -B#/definitions/io.k8s.api.authorization.v1.LocalSubjectAccessReview -Z -201S -Q -CreatedF -D -B#/definitions/io.k8s.api.authorization.v1.LocalSubjectAccessReviewRhttpsj -x-kubernetes-actionpost -jl -x-kubernetes-group-version-kindIGgroup: authorization.k8s.io -kind: LocalSubjectAccessReview -version: v1 -Jž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string J– -“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string J` -^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO -MKIquery-If 'true', then the output is pretty printed."pretty2string é -1/api/v1/namespaces/{namespace}/pods/{name}/status³Ù -core_v1 read status of the specified Pod*readCoreV1NamespacedPodStatus2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*JR -7 -2000 -. -OK( -& -$#/definitions/io.k8s.api.core.v1.Pod - -401 - - UnauthorizedRhttpsjE -x-kubernetes-group-version-kind" group: "" -kind: Pod -version: v1 -j -x-kubernetes-actionget -” -core_v1#replace status of the specified Pod* replaceCoreV1NamespacedPodStatus2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*B: -8 -6bodybody *& -$#/definitions/io.k8s.api.core.v1.PodBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– -“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string J -7 -2000 -. -OK( -& -$#/definitions/io.k8s.api.core.v1.Pod -< -2015 -3 -Created( -& -$#/definitions/io.k8s.api.core.v1.Pod - -401 - - UnauthorizedRhttpsj -x-kubernetes-actionput -jE -x-kubernetes-group-version-kind" group: "" -kind: Pod -version: v1 -BØ -core_v1,partially update status of the specified Pod*patchCoreV1NamespacedPodStatus2application/json2application/yaml2#application/vnd.kubernetes.protobuf:application/json-patch+json:application/merge-patch+json:&application/strategic-merge-patch+json:application/apply-patch+yamlBN -L -Jbodybody *: -8#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.PatchBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B¯ -¬©¦queryƒfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch)." fieldManager2string BÎ -ËÈÅquery¨Force is going to "force" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests."force2boolean JR -7 -2000 -. -OK( -& -$#/definitions/io.k8s.api.core.v1.Pod - -401 - - UnauthorizedRhttpsjE -x-kubernetes-group-version-kind" group: "" -kind: Pod -version: v1 -j -x-kubernetes-actionpatch -J0 -.,"*pathname of the Pod"name*string˜J` -^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO -MKIquery-If 'true', then the output is pretty printed."pretty2string ƒ) -N/apis/admissionregistration.k8s.io/v1beta1/watch/mutatingwebhookconfigurations°(¤ -admissionregistration_v1beta1ˆwatch individual changes to a list of MutatingWebhookConfiguration. deprecated: use the 'watch' parameter with a list operation instead.*AwatchAdmissionregistrationV1beta1MutatingWebhookConfigurationList2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jk -P -200I -G -OKA -? -=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent - -401 - - UnauthorizedRhttpsj} -x-kubernetes-group-version-kindZXgroup: admissionregistration.k8s.io -kind: MutatingWebhookConfiguration -version: v1beta1 -j# -x-kubernetes-action  -watchlist -J‚ -ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer JO -MKIquery-If 'true', then the output is pretty printed."pretty2string Jû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string JÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Jž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± -®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean  -A/apis/batch/v1beta1/namespaces/{namespace}/cronjobs/{name}/statusɃ - batch_v1beta1$read status of the specified CronJob*'readBatchV1beta1NamespacedCronJobStatus2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*J\ - -401 - - Unauthorized -A -200: -8 -OK2 -0 -.#/definitions/io.k8s.api.batch.v1beta1.CronJobRhttpsj -x-kubernetes-actionget -jQ -x-kubernetes-group-version-kind.,group: batch -kind: CronJob -version: v1beta1 -Ò - batch_v1beta1'replace status of the specified CronJob**replaceBatchV1beta1NamespacedCronJobStatus2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BD -B -@bodybody *0 -.#/definitions/io.k8s.api.batch.v1beta1.CronJobBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– -“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string J¤ -A -200: -8 -OK2 -0 -.#/definitions/io.k8s.api.batch.v1beta1.CronJob -F -201? -= -Created2 -0 -.#/definitions/io.k8s.api.batch.v1beta1.CronJob - -401 - - UnauthorizedRhttpsj -x-kubernetes-actionput -jQ -x-kubernetes-group-version-kind.,kind: CronJob -version: v1beta1 -group: batch -B‚ - batch_v1beta10partially update status of the specified CronJob*(patchBatchV1beta1NamespacedCronJobStatus2application/json2application/yaml2#application/vnd.kubernetes.protobuf:application/json-patch+json:application/merge-patch+json:&application/strategic-merge-patch+json:application/apply-patch+yamlBN -L -Jbodybody *: -8#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.PatchBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B¯ -¬©¦queryƒfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch)." fieldManager2string BÎ -ËÈÅquery¨Force is going to "force" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests."force2boolean J\ - -401 - - Unauthorized -A -200: -8 -OK2 -0 -.#/definitions/io.k8s.api.batch.v1beta1.CronJobRhttpsj -x-kubernetes-actionpatch -jQ -x-kubernetes-group-version-kind.,group: batch -kind: CronJob -version: v1beta1 -J4 -20".pathname of the CronJob"name*string˜J` -^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO -MKIquery-If 'true', then the output is pretty printed."pretty2string þ' -)/apis/coordination.k8s.io/v1/watch/leasesÐ'Ä -coordination_v1qwatch individual changes to a list of Lease. deprecated: use the 'watch' parameter with a list operation instead.*,watchCoordinationV1LeaseListForAllNamespaces2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jk -P -200I -G -OKA -? -=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent - -401 - - UnauthorizedRhttpsjX -x-kubernetes-group-version-kind53group: coordination.k8s.io -kind: Lease -version: v1 -j# -x-kubernetes-action  -watchlist -J‚ -ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer JO -MKIquery-If 'true', then the output is pretty printed."pretty2string Jû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string JÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Jž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± -®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean * -N/apis/networking.k8s.io/v1/watch/namespaces/{namespace}/networkpolicies/{name}®)„ - networking_v1´watch changes to an object of kind NetworkPolicy. deprecated: use the 'watch' parameter with a list operation instead, filtered to a single item with the 'fieldSelector' parameter.*(watchNetworkingV1NamespacedNetworkPolicy2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jk -P -200I -G -OKA -? -=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent - -401 - - UnauthorizedRhttpsj -x-kubernetes-actionwatch -j^ -x-kubernetes-group-version-kind;9kind: NetworkPolicy -version: v1 -group: networking.k8s.io -J‚ -ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer J: -86"4pathname of the NetworkPolicy"name*string˜J` -^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO -MKIquery-If 'true', then the output is pretty printed."pretty2string Jû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string JÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Jž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± -®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean ž' -(/apis/rbac.authorization.k8s.io/v1/rolesñ&å -rbacAuthorization_v1"list or watch objects of kind Role*+listRbacAuthorizationV1RoleForAllNamespaces2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*JW -< -2005 -3 -OK- -+ -)#/definitions/io.k8s.api.rbac.v1.RoleList - -401 - - UnauthorizedRhttpsj] -x-kubernetes-group-version-kind:8kind: Role -version: v1 -group: rbac.authorization.k8s.io -j -x-kubernetes-actionlist -J‚ -ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer JO -MKIquery-If 'true', then the output is pretty printed."pretty2string Jû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string JÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Jž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± -®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean ¬( -5/apis/rbac.authorization.k8s.io/v1/watch/rolebindingsò'æ -rbacAuthorization_v1wwatch individual changes to a list of RoleBinding. deprecated: use the 'watch' parameter with a list operation instead.*7watchRbacAuthorizationV1RoleBindingListForAllNamespaces2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jk -P -200I -G -OKA -? -=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent - -401 - - UnauthorizedRhttpsj# -x-kubernetes-action  -watchlist -jd -x-kubernetes-group-version-kindA?group: rbac.authorization.k8s.io -kind: RoleBinding -version: v1 -J‚ -ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer JO -MKIquery-If 'true', then the output is pretty printed."pretty2string Jû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string JÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Jž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± -®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean ì( -./api/v1/namespaces/{namespace}/services/{name}¹(Ù -core_v1read the specified Service*readCoreV1NamespacedService2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*JV -; -2004 -2 -OK, -* -(#/definitions/io.k8s.api.core.v1.Service - -401 - - UnauthorizedRhttpsj -x-kubernetes-actionget -jI -x-kubernetes-group-version-kind&$group: "" -kind: Service -version: v1 -œ -core_v1replace the specified Service*replaceCoreV1NamespacedService2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*B> -< -:bodybody ** -(#/definitions/io.k8s.api.core.v1.ServiceBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– -“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string J˜ -@ -2019 -7 -Created, -* -(#/definitions/io.k8s.api.core.v1.Service - -401 - - Unauthorized -; -2004 -2 -OK, -* -(#/definitions/io.k8s.api.core.v1.ServiceRhttpsj -x-kubernetes-actionput -jI -x-kubernetes-group-version-kind&$group: "" -kind: Service -version: v1 -*÷ -core_v1delete a Service*deleteCoreV1NamespacedService2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BT -R -Pbodybody*B -@#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptionsBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string Bä -áÞÛquery±The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately."gracePeriodSeconds2integer BÑ -ÎËÈquery Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the "orphan" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both."orphanDependents2boolean Bˆ -…‚ÿquery×Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground."propagationPolicy2string J» - -401 - - Unauthorized -L -200E -C -OK= -; -9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status -R -202K -I -Accepted= -; -9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.StatusRhttpsjI -x-kubernetes-group-version-kind&$group: "" -kind: Service -version: v1 -j -x-kubernetes-action delete -BØ -core_v1&partially update the specified Service*patchCoreV1NamespacedService2application/json2application/yaml2#application/vnd.kubernetes.protobuf:application/json-patch+json:application/merge-patch+json:&application/strategic-merge-patch+json:application/apply-patch+yamlBN -L -Jbodybody *: -8#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.PatchBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B¯ -¬©¦queryƒfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch)." fieldManager2string BÎ -ËÈÅquery¨Force is going to "force" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests."force2boolean JV -; -2004 -2 -OK, -* -(#/definitions/io.k8s.api.core.v1.Service - -401 - - UnauthorizedRhttpsjI -x-kubernetes-group-version-kind&$group: "" -kind: Service -version: v1 -j -x-kubernetes-actionpatch -J4 -20".pathname of the Service"name*string˜J` -^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO -MKIquery-If 'true', then the output is pretty printed."pretty2string †_ -0/apis/apiregistration.k8s.io/v1beta1/apiservicesÑ^Ó& -apiregistration_v1beta1(list or watch objects of kind APIService*$listApiregistrationV1beta1APIService2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*B‚ -ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Bï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string B‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string B‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Bú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer Bû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string BÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Bž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer B± -®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean J‚ -g -200` -^ -OKX -V -T#/definitions/io.k8s.kube-aggregator.pkg.apis.apiregistration.v1beta1.APIServiceList - -401 - - UnauthorizedRhttpsj -x-kubernetes-actionlist -je -x-kubernetes-group-version-kindB@group: apiregistration.k8s.io -version: v1beta1 -kind: APIService -"« - -apiregistration_v1beta1create an APIService*&createApiregistrationV1beta1APIService2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*Bf -d -bbodybody *R -P#/definitions/io.k8s.kube-aggregator.pkg.apis.apiregistration.v1beta1.APIServiceBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– -“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string JÓ - -401 - - Unauthorized -c -200\ -Z -OKT -R -P#/definitions/io.k8s.kube-aggregator.pkg.apis.apiregistration.v1beta1.APIService -h -201a -_ -CreatedT -R -P#/definitions/io.k8s.kube-aggregator.pkg.apis.apiregistration.v1beta1.APIService -i -202b -` -AcceptedT -R -P#/definitions/io.k8s.kube-aggregator.pkg.apis.apiregistration.v1beta1.APIServiceRhttpsje -x-kubernetes-group-version-kindB@version: v1beta1 -kind: APIService -group: apiregistration.k8s.io -j -x-kubernetes-actionpost -*ù, -apiregistration_v1beta1delete collection of APIService*0deleteApiregistrationV1beta1CollectionAPIService2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BT -R -Pbodybody*B -@#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptionsBï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string Bž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string Bä -áÞÛquery±The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately."gracePeriodSeconds2integer B‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Bú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer BÑ -ÎËÈquery Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the "orphan" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both."orphanDependents2boolean Bˆ -…‚ÿquery×Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground."propagationPolicy2string Bû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string BÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Bž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer Jg -L -200E -C -OK= -; -9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status - -401 - - UnauthorizedRhttpsj* -x-kubernetes-actiondeletecollection -je -x-kubernetes-group-version-kindB@group: apiregistration.k8s.io -version: v1beta1 -kind: APIService -JO -MKIquery-If 'true', then the output is pretty printed."pretty2string œ) -;/apis/events.k8s.io/v1/namespaces/{namespace}/events/{name}Ü(â - events_v1read the specified Event*readEventsV1NamespacedEvent2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*JV -; -2004 -2 -OK, -* -(#/definitions/io.k8s.api.events.v1.Event - -401 - - UnauthorizedRhttpsjR -x-kubernetes-group-version-kind/-group: events.k8s.io -kind: Event -version: v1 -j -x-kubernetes-actionget -¥ - events_v1replace the specified Event*replaceEventsV1NamespacedEvent2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*B> -< -:bodybody ** -(#/definitions/io.k8s.api.events.v1.EventBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– -“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string J˜ -@ -2019 -7 -Created, -* -(#/definitions/io.k8s.api.events.v1.Event - -401 - - Unauthorized -; -2004 -2 -OK, -* -(#/definitions/io.k8s.api.events.v1.EventRhttpsj -x-kubernetes-actionput -jR -x-kubernetes-group-version-kind/-version: v1 -group: events.k8s.io -kind: Event -* - events_v1delete an Event*deleteEventsV1NamespacedEvent2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BT -R -Pbodybody*B -@#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptionsBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string Bä -áÞÛquery±The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately."gracePeriodSeconds2integer BÑ -ÎËÈquery Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the "orphan" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both."orphanDependents2boolean Bˆ -…‚ÿquery×Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground."propagationPolicy2string J» - -401 - - Unauthorized -L -200E -C -OK= -; -9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status -R -202K -I -Accepted= -; -9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.StatusRhttpsjR -x-kubernetes-group-version-kind/-version: v1 -group: events.k8s.io -kind: Event -j -x-kubernetes-action delete -Bá - events_v1$partially update the specified Event*patchEventsV1NamespacedEvent2application/json2application/yaml2#application/vnd.kubernetes.protobuf:application/json-patch+json:application/merge-patch+json:&application/strategic-merge-patch+json:application/apply-patch+yamlBN -L -Jbodybody *: -8#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.PatchBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B¯ -¬©¦queryƒfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch)." fieldManager2string BÎ -ËÈÅquery¨Force is going to "force" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests."force2boolean JV -; -2004 -2 -OK, -* -(#/definitions/io.k8s.api.events.v1.Event - -401 - - UnauthorizedRhttpsj -x-kubernetes-actionpatch -jR -x-kubernetes-group-version-kind/-group: events.k8s.io -kind: Event -version: v1 -J2 -0.",pathname of the Event"name*string˜J` -^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO -MKIquery-If 'true', then the output is pretty printed."pretty2string ˜ -(/apis/rbac.authorization.k8s.io/v1beta1/ëè -rbacAuthorization_v1beta1get available resources*'getRbacAuthorizationV1beta1APIResources2application/json2application/yaml2#application/vnd.kubernetes.protobuf:application/json:application/yaml:#application/vnd.kubernetes.protobufJp -U -200N -L -OKF -D -B#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.APIResourceList - -401 - - UnauthorizedRhttps¿( -,/api/v1/watch/namespaces/{namespace}/secretsŽ(  -core_v1rwatch individual changes to a list of Secret. deprecated: use the 'watch' parameter with a list operation instead.*watchCoreV1NamespacedSecretList2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jk -P -200I -G -OKA -? -=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent - -401 - - UnauthorizedRhttpsj# -x-kubernetes-action  -watchlist -jH -x-kubernetes-group-version-kind%#group: "" -kind: Secret -version: v1 -J‚ -ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer J` -^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO -MKIquery-If 'true', then the output is pretty printed."pretty2string Jû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string JÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Jž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± -®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean Ò) -:/api/v1/watch/namespaces/{namespace}/resourcequotas/{name}“)é -core_v1´watch changes to an object of kind ResourceQuota. deprecated: use the 'watch' parameter with a list operation instead, filtered to a single item with the 'fieldSelector' parameter.*"watchCoreV1NamespacedResourceQuota2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jk -P -200I -G -OKA -? -=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent - -401 - - UnauthorizedRhttpsjO -x-kubernetes-group-version-kind,*group: "" -kind: ResourceQuota -version: v1 -j -x-kubernetes-actionwatch -J‚ -ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer J: -86"4pathname of the ResourceQuota"name*string˜J` -^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO -MKIquery-If 'true', then the output is pretty printed."pretty2string Jû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string JÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Jž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± -®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean à -G/apis/certificates.k8s.io/v1/certificatesigningrequests/{name}/approval”Ô -certificates_v18read approval of the specified CertificateSigningRequest*3readCertificatesV1CertificateSigningRequestApproval2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*Jp - -401 - - Unauthorized -U -200N -L -OKF -D -B#/definitions/io.k8s.api.certificates.v1.CertificateSigningRequestRhttpsj -x-kubernetes-actionget -jl -x-kubernetes-group-version-kindIGgroup: certificates.k8s.io -kind: CertificateSigningRequest -version: v1 -Ë -certificates_v1;replace approval of the specified CertificateSigningRequest*6replaceCertificatesV1CertificateSigningRequestApproval2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BX -V -Tbodybody *D -B#/definitions/io.k8s.api.certificates.v1.CertificateSigningRequestBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– -“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string JÌ -U -200N -L -OKF -D -B#/definitions/io.k8s.api.certificates.v1.CertificateSigningRequest -Z -201S -Q -CreatedF -D -B#/definitions/io.k8s.api.certificates.v1.CertificateSigningRequest - -401 - - UnauthorizedRhttpsj -x-kubernetes-actionput -jl -x-kubernetes-group-version-kindIGgroup: certificates.k8s.io -kind: CertificateSigningRequest -version: v1 -BÓ -certificates_v1Dpartially update approval of the specified CertificateSigningRequest*4patchCertificatesV1CertificateSigningRequestApproval2application/json2application/yaml2#application/vnd.kubernetes.protobuf:application/json-patch+json:application/merge-patch+json:&application/strategic-merge-patch+json:application/apply-patch+yamlBN -L -Jbodybody *: -8#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.PatchBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B¯ -¬©¦queryƒfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch)." fieldManager2string BÎ -ËÈÅquery¨Force is going to "force" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests."force2boolean Jp -U -200N -L -OKF -D -B#/definitions/io.k8s.api.certificates.v1.CertificateSigningRequest - -401 - - UnauthorizedRhttpsjl -x-kubernetes-group-version-kindIGgroup: certificates.k8s.io -kind: CertificateSigningRequest -version: v1 -j -x-kubernetes-actionpatch -JF -DB"@path%name of the CertificateSigningRequest"name*string˜JO -MKIquery-If 'true', then the output is pretty printed."pretty2string “( -//apis/networking.k8s.io/v1beta1/watch/ingressesß'Ó -networking_v1beta1swatch individual changes to a list of Ingress. deprecated: use the 'watch' parameter with a list operation instead.*1watchNetworkingV1beta1IngressListForAllNamespaces2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jk -P -200I -G -OKA -? -=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent - -401 - - UnauthorizedRhttpsj] -x-kubernetes-group-version-kind:8kind: Ingress -version: v1beta1 -group: networking.k8s.io -j# -x-kubernetes-action  -watchlist -J‚ -ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer JO -MKIquery-If 'true', then the output is pretty printed."pretty2string Jû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string JÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Jž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± -®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean Ô -/api/v1/ÇÄ -core_v1get available resources*getCoreV1APIResources2application/json2application/yaml2#application/vnd.kubernetes.protobuf:application/json:application/yaml:#application/vnd.kubernetes.protobufJp -U -200N -L -OKF -D -B#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.APIResourceList - -401 - - UnauthorizedRhttpsŒ -"/.well-known/openid-configuration/åâ - WellKnownWget service account issuer OpenID configuration, also known as the 'OIDC discovery doc'**getServiceAccountIssuerOpenIDConfiguration2application/jsonJ7 - -200 - -OK - ² -string - -401 - - UnauthorizedRhttpsº^ -?/apis/coordination.k8s.io/v1beta1/namespaces/{namespace}/leasesö]§& -coordination_v1beta1#list or watch objects of kind Lease*&listCoordinationV1beta1NamespacedLease2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*B‚ -ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Bï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string B‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string B‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Bú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer Bû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string BÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Bž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer B± -®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean Je - -401 - - Unauthorized -J -200C -A -OK; -9 -7#/definitions/io.k8s.api.coordination.v1beta1.LeaseListRhttpsj -x-kubernetes-actionlist -j] -x-kubernetes-group-version-kind:8group: coordination.k8s.io -kind: Lease -version: v1beta1 -"¨ -coordination_v1beta1create a Lease*(createCoordinationV1beta1NamespacedLease2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BI -G -Ebodybody *5 -3#/definitions/io.k8s.api.coordination.v1beta1.LeaseBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– -“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string Jü -F -200? -= -OK7 -5 -3#/definitions/io.k8s.api.coordination.v1beta1.Lease -K -201D -B -Created7 -5 -3#/definitions/io.k8s.api.coordination.v1beta1.Lease -L -202E -C -Accepted7 -5 -3#/definitions/io.k8s.api.coordination.v1beta1.Lease - -401 - - UnauthorizedRhttpsj -x-kubernetes-actionpost -j] -x-kubernetes-group-version-kind:8group: coordination.k8s.io -kind: Lease -version: v1beta1 -*ë, -coordination_v1beta1delete collection of Lease*2deleteCoordinationV1beta1CollectionNamespacedLease2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BT -R -Pbodybody*B -@#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptionsBï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string Bž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string Bä -áÞÛquery±The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately."gracePeriodSeconds2integer B‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Bú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer BÑ -ÎËÈquery Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the "orphan" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both."orphanDependents2boolean Bˆ -…‚ÿquery×Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground."propagationPolicy2string Bû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string BÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Bž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer Jg -L -200E -C -OK= -; -9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status - -401 - - UnauthorizedRhttpsj] -x-kubernetes-group-version-kind:8group: coordination.k8s.io -kind: Lease -version: v1beta1 -j* -x-kubernetes-actiondeletecollection -J` -^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO -MKIquery-If 'true', then the output is pretty printed."pretty2string É) -0/apis/networking.k8s.io/v1/ingressclasses/{name}”)„ - networking_v1read the specified IngressClass*readNetworkingV1IngressClass2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*Ja -F -200? -= -OK7 -5 -3#/definitions/io.k8s.api.networking.v1.IngressClass - -401 - - UnauthorizedRhttpsj -x-kubernetes-actionget -j] -x-kubernetes-group-version-kind:8version: v1 -group: networking.k8s.io -kind: IngressClass -Ý - networking_v1"replace the specified IngressClass*replaceNetworkingV1IngressClass2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BI -G -Ebodybody *5 -3#/definitions/io.k8s.api.networking.v1.IngressClassBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– -“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string J® -F -200? -= -OK7 -5 -3#/definitions/io.k8s.api.networking.v1.IngressClass -K -201D -B -Created7 -5 -3#/definitions/io.k8s.api.networking.v1.IngressClass - -401 - - UnauthorizedRhttpsj] -x-kubernetes-group-version-kind:8group: networking.k8s.io -kind: IngressClass -version: v1 -j -x-kubernetes-actionput -*˜ - networking_v1delete an IngressClass*deleteNetworkingV1IngressClass2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BT -R -Pbodybody*B -@#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptionsBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string Bä -áÞÛquery±The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately."gracePeriodSeconds2integer BÑ -ÎËÈquery Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the "orphan" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both."orphanDependents2boolean Bˆ -…‚ÿquery×Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground."propagationPolicy2string J» -L -200E -C -OK= -; -9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status -R -202K -I -Accepted= -; -9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status - -401 - - UnauthorizedRhttpsj -x-kubernetes-action delete -j] -x-kubernetes-group-version-kind:8group: networking.k8s.io -kind: IngressClass -version: v1 -Bƒ - networking_v1+partially update the specified IngressClass*patchNetworkingV1IngressClass2application/json2application/yaml2#application/vnd.kubernetes.protobuf:application/json-patch+json:application/merge-patch+json:&application/strategic-merge-patch+json:application/apply-patch+yamlBN -L -Jbodybody *: -8#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.PatchBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B¯ -¬©¦queryƒfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch)." fieldManager2string BÎ -ËÈÅquery¨Force is going to "force" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests."force2boolean Ja -F -200? -= -OK7 -5 -3#/definitions/io.k8s.api.networking.v1.IngressClass - -401 - - UnauthorizedRhttpsj -x-kubernetes-actionpatch -j] -x-kubernetes-group-version-kind:8group: networking.k8s.io -kind: IngressClass -version: v1 -J9 -75"3pathname of the IngressClass"name*string˜JO -MKIquery-If 'true', then the output is pretty printed."pretty2string À( -1/apis/batch/v1/namespaces/{namespace}/jobs/{name}Š(Ï -batch_v1read the specified Job*readBatchV1NamespacedJob2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*JS -8 -2001 -/ -OK) -' -%#/definitions/io.k8s.api.batch.v1.Job - -401 - - UnauthorizedRhttpsj -x-kubernetes-actionget -jH -x-kubernetes-group-version-kind%#group: batch -kind: Job -version: v1 -Œ -batch_v1replace the specified Job*replaceBatchV1NamespacedJob2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*B; -9 -7bodybody *' -%#/definitions/io.k8s.api.batch.v1.JobBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– -“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string J’ -8 -2001 -/ -OK) -' -%#/definitions/io.k8s.api.batch.v1.Job -= -2016 -4 -Created) -' -%#/definitions/io.k8s.api.batch.v1.Job - -401 - - UnauthorizedRhttpsjH -x-kubernetes-group-version-kind%#group: batch -kind: Job -version: v1 -j -x-kubernetes-actionput -*ð -batch_v1 delete a Job*deleteBatchV1NamespacedJob2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BT -R -Pbodybody*B -@#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptionsBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string Bä -áÞÛquery±The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately."gracePeriodSeconds2integer BÑ -ÎËÈquery Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the "orphan" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both."orphanDependents2boolean Bˆ -…‚ÿquery×Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground."propagationPolicy2string J» -L -200E -C -OK= -; -9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status -R -202K -I -Accepted= -; -9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status - -401 - - UnauthorizedRhttpsjH -x-kubernetes-group-version-kind%#kind: Job -version: v1 -group: batch -j -x-kubernetes-action delete -BÎ -batch_v1"partially update the specified Job*patchBatchV1NamespacedJob2application/json2application/yaml2#application/vnd.kubernetes.protobuf:application/json-patch+json:application/merge-patch+json:&application/strategic-merge-patch+json:application/apply-patch+yamlBN -L -Jbodybody *: -8#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.PatchBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B¯ -¬©¦queryƒfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch)." fieldManager2string BÎ -ËÈÅquery¨Force is going to "force" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests."force2boolean JS - -401 - - Unauthorized -8 -2001 -/ -OK) -' -%#/definitions/io.k8s.api.batch.v1.JobRhttpsj -x-kubernetes-actionpatch -jH -x-kubernetes-group-version-kind%#group: batch -kind: Job -version: v1 -J0 -.,"*pathname of the Job"name*string˜J` -^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO -MKIquery-If 'true', then the output is pretty printed."pretty2string ½^ -6/apis/flowcontrol.apiserver.k8s.io/v1beta1/flowschemas‚^É& -flowcontrolApiserver_v1beta1(list or watch objects of kind FlowSchema*)listFlowcontrolApiserverV1beta1FlowSchema2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*B‚ -ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Bï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string B‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string B‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Bú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer Bû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string BÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Bž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer B± -®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean Ji -N -200G -E -OK? -= -;#/definitions/io.k8s.api.flowcontrol.v1beta1.FlowSchemaList - -401 - - UnauthorizedRhttpsjk -x-kubernetes-group-version-kindHFgroup: flowcontrol.apiserver.k8s.io -kind: FlowSchema -version: v1beta1 -j -x-kubernetes-actionlist -"Ö -flowcontrolApiserver_v1beta1create a FlowSchema*+createFlowcontrolApiserverV1beta1FlowSchema2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BM -K -Ibodybody *9 -7#/definitions/io.k8s.api.flowcontrol.v1beta1.FlowSchemaBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– -“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string Jˆ -J -200C -A -OK; -9 -7#/definitions/io.k8s.api.flowcontrol.v1beta1.FlowSchema -O -201H -F -Created; -9 -7#/definitions/io.k8s.api.flowcontrol.v1beta1.FlowSchema -P -202I -G -Accepted; -9 -7#/definitions/io.k8s.api.flowcontrol.v1beta1.FlowSchema - -401 - - UnauthorizedRhttpsj -x-kubernetes-actionpost -jk -x-kubernetes-group-version-kindHFgroup: flowcontrol.apiserver.k8s.io -kind: FlowSchema -version: v1beta1 -*‰- -flowcontrolApiserver_v1beta1delete collection of FlowSchema*5deleteFlowcontrolApiserverV1beta1CollectionFlowSchema2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BT -R -Pbodybody*B -@#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptionsBï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string Bž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string Bä -áÞÛquery±The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately."gracePeriodSeconds2integer B‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Bú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer BÑ -ÎËÈquery Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the "orphan" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both."orphanDependents2boolean Bˆ -…‚ÿquery×Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground."propagationPolicy2string Bû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string BÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Bž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer Jg -L -200E -C -OK= -; -9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status - -401 - - UnauthorizedRhttpsj* -x-kubernetes-actiondeletecollection -jk -x-kubernetes-group-version-kindHFversion: v1beta1 -group: flowcontrol.apiserver.k8s.io -kind: FlowSchema -JO -MKIquery-If 'true', then the output is pretty printed."pretty2string â -/api/v1/nodes/{name}/statusÂÓ -core_v1!read status of the specified Node*readCoreV1NodeStatus2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*JS - -401 - - Unauthorized -8 -2001 -/ -OK) -' -%#/definitions/io.k8s.api.core.v1.NodeRhttpsj -x-kubernetes-actionget -jF -x-kubernetes-group-version-kind#!version: v1 -group: "" -kind: Node - -core_v1$replace status of the specified Node*replaceCoreV1NodeStatus2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*B; -9 -7bodybody *' -%#/definitions/io.k8s.api.core.v1.NodeBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– -“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string J’ -8 -2001 -/ -OK) -' -%#/definitions/io.k8s.api.core.v1.Node -= -2016 -4 -Created) -' -%#/definitions/io.k8s.api.core.v1.Node - -401 - - UnauthorizedRhttpsj -x-kubernetes-actionput -jF -x-kubernetes-group-version-kind#!group: "" -kind: Node -version: v1 -BÒ -core_v1-partially update status of the specified Node*patchCoreV1NodeStatus2application/json2application/yaml2#application/vnd.kubernetes.protobuf:application/json-patch+json:application/merge-patch+json:&application/strategic-merge-patch+json:application/apply-patch+yamlBN -L -Jbodybody *: -8#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.PatchBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B¯ -¬©¦queryƒfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch)." fieldManager2string BÎ -ËÈÅquery¨Force is going to "force" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests."force2boolean JS -8 -2001 -/ -OK) -' -%#/definitions/io.k8s.api.core.v1.Node - -401 - - UnauthorizedRhttpsj -x-kubernetes-actionpatch -jF -x-kubernetes-group-version-kind#!group: "" -kind: Node -version: v1 -J1 -/-"+pathname of the Node"name*string˜JO -MKIquery-If 'true', then the output is pretty printed."pretty2string ¼, -C/apis/certificates.k8s.io/v1beta1/certificatesigningrequests/{name}ô+Ô -certificates_v1beta1,read the specified CertificateSigningRequest*0readCertificatesV1beta1CertificateSigningRequest2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*Ju -Z -200S -Q -OKK -I -G#/definitions/io.k8s.api.certificates.v1beta1.CertificateSigningRequest - -401 - - UnauthorizedRhttpsjq -x-kubernetes-group-version-kindNLgroup: certificates.k8s.io -kind: CertificateSigningRequest -version: v1beta1 -j -x-kubernetes-actionget -Õ -certificates_v1beta1/replace the specified CertificateSigningRequest*3replaceCertificatesV1beta1CertificateSigningRequest2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*B] -[ -Ybodybody *I -G#/definitions/io.k8s.api.certificates.v1beta1.CertificateSigningRequestBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– -“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string JÖ -Z -200S -Q -OKK -I -G#/definitions/io.k8s.api.certificates.v1beta1.CertificateSigningRequest -_ -201X -V -CreatedK -I -G#/definitions/io.k8s.api.certificates.v1beta1.CertificateSigningRequest - -401 - - UnauthorizedRhttpsjq -x-kubernetes-group-version-kindNLgroup: certificates.k8s.io -kind: CertificateSigningRequest -version: v1beta1 -j -x-kubernetes-actionput -*Ó -certificates_v1beta1"delete a CertificateSigningRequest*2deleteCertificatesV1beta1CertificateSigningRequest2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BT -R -Pbodybody*B -@#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptionsBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string Bä -áÞÛquery±The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately."gracePeriodSeconds2integer BÑ -ÎËÈquery Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the "orphan" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both."orphanDependents2boolean Bˆ -…‚ÿquery×Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground."propagationPolicy2string J» -R -202K -I -Accepted= -; -9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status - -401 - - Unauthorized -L -200E -C -OK= -; -9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.StatusRhttpsj -x-kubernetes-action delete -jq -x-kubernetes-group-version-kindNLgroup: certificates.k8s.io -kind: CertificateSigningRequest -version: v1beta1 -BÓ -certificates_v1beta18partially update the specified CertificateSigningRequest*1patchCertificatesV1beta1CertificateSigningRequest2application/json2application/yaml2#application/vnd.kubernetes.protobuf:application/json-patch+json:application/merge-patch+json:&application/strategic-merge-patch+json:application/apply-patch+yamlBN -L -Jbodybody *: -8#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.PatchBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B¯ -¬©¦queryƒfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch)." fieldManager2string BÎ -ËÈÅquery¨Force is going to "force" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests."force2boolean Ju -Z -200S -Q -OKK -I -G#/definitions/io.k8s.api.certificates.v1beta1.CertificateSigningRequest - -401 - - UnauthorizedRhttpsj -x-kubernetes-actionpatch -jq -x-kubernetes-group-version-kindNLgroup: certificates.k8s.io -kind: CertificateSigningRequest -version: v1beta1 -JF -DB"@path%name of the CertificateSigningRequest"name*string˜JO -MKIquery-If 'true', then the output is pretty printed."pretty2string ž -R/apis/autoscaling/v1/namespaces/{namespace}/horizontalpodautoscalers/{name}/statusÇÇ -autoscaling_v14read status of the specified HorizontalPodAutoscaler*8readAutoscalingV1NamespacedHorizontalPodAutoscalerStatus2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*Jm -R -200K -I -OKC -A -?#/definitions/io.k8s.api.autoscaling.v1.HorizontalPodAutoscaler - -401 - - UnauthorizedRhttpsjb -x-kubernetes-group-version-kind?=group: autoscaling -kind: HorizontalPodAutoscaler -version: v1 -j -x-kubernetes-actionget -¸ -autoscaling_v17replace status of the specified HorizontalPodAutoscaler*;replaceAutoscalingV1NamespacedHorizontalPodAutoscalerStatus2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BU -S -Qbodybody *A -?#/definitions/io.k8s.api.autoscaling.v1.HorizontalPodAutoscalerBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– -“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string JÆ - -401 - - Unauthorized -R -200K -I -OKC -A -?#/definitions/io.k8s.api.autoscaling.v1.HorizontalPodAutoscaler -W -201P -N -CreatedC -A -?#/definitions/io.k8s.api.autoscaling.v1.HorizontalPodAutoscalerRhttpsjb -x-kubernetes-group-version-kind?=group: autoscaling -kind: HorizontalPodAutoscaler -version: v1 -j -x-kubernetes-actionput -BÆ -autoscaling_v1@partially update status of the specified HorizontalPodAutoscaler*9patchAutoscalingV1NamespacedHorizontalPodAutoscalerStatus2application/json2application/yaml2#application/vnd.kubernetes.protobuf:application/json-patch+json:application/merge-patch+json:&application/strategic-merge-patch+json:application/apply-patch+yamlBN -L -Jbodybody *: -8#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.PatchBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B¯ -¬©¦queryƒfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch)." fieldManager2string BÎ -ËÈÅquery¨Force is going to "force" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests."force2boolean Jm -R -200K -I -OKC -A -?#/definitions/io.k8s.api.autoscaling.v1.HorizontalPodAutoscaler - -401 - - UnauthorizedRhttpsj -x-kubernetes-actionpatch -jb -x-kubernetes-group-version-kind?=version: v1 -group: autoscaling -kind: HorizontalPodAutoscaler -JD -B@">path#name of the HorizontalPodAutoscaler"name*string˜J` -^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO -MKIquery-If 'true', then the output is pretty printed."pretty2string Ü( -*/apis/node.k8s.io/v1/runtimeclasses/{name}­(ì -node_v1read the specified RuntimeClass*readNodeV1RuntimeClass2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*J[ -@ -2009 -7 -OK1 -/ --#/definitions/io.k8s.api.node.v1.RuntimeClass - -401 - - UnauthorizedRhttpsj -x-kubernetes-actionget -jW -x-kubernetes-group-version-kind42kind: RuntimeClass -version: v1 -group: node.k8s.io -¹ -node_v1"replace the specified RuntimeClass*replaceNodeV1RuntimeClass2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BC -A -?bodybody */ --#/definitions/io.k8s.api.node.v1.RuntimeClassBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– -“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string J¢ -@ -2009 -7 -OK1 -/ --#/definitions/io.k8s.api.node.v1.RuntimeClass -E -201> -< -Created1 -/ --#/definitions/io.k8s.api.node.v1.RuntimeClass - -401 - - UnauthorizedRhttpsj -x-kubernetes-actionput -jW -x-kubernetes-group-version-kind42group: node.k8s.io -kind: RuntimeClass -version: v1 -*… -node_v1delete a RuntimeClass*deleteNodeV1RuntimeClass2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BT -R -Pbodybody*B -@#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptionsBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string Bä -áÞÛquery±The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately."gracePeriodSeconds2integer BÑ -ÎËÈquery Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the "orphan" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both."orphanDependents2boolean Bˆ -…‚ÿquery×Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground."propagationPolicy2string J» -L -200E -C -OK= -; -9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status -R -202K -I -Accepted= -; -9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status - -401 - - UnauthorizedRhttpsj -x-kubernetes-action delete -jW -x-kubernetes-group-version-kind42group: node.k8s.io -kind: RuntimeClass -version: v1 -Bë -node_v1+partially update the specified RuntimeClass*patchNodeV1RuntimeClass2application/json2application/yaml2#application/vnd.kubernetes.protobuf:application/json-patch+json:application/merge-patch+json:&application/strategic-merge-patch+json:application/apply-patch+yamlBN -L -Jbodybody *: -8#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.PatchBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B¯ -¬©¦queryƒfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch)." fieldManager2string BÎ -ËÈÅquery¨Force is going to "force" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests."force2boolean J[ - -401 - - Unauthorized -@ -2009 -7 -OK1 -/ --#/definitions/io.k8s.api.node.v1.RuntimeClassRhttpsj -x-kubernetes-actionpatch -jW -x-kubernetes-group-version-kind42group: node.k8s.io -kind: RuntimeClass -version: v1 -J9 -75"3pathname of the RuntimeClass"name*string˜JO -MKIquery-If 'true', then the output is pretty printed."pretty2string ¹^ -D/apis/rbac.authorization.k8s.io/v1beta1/namespaces/{namespace}/rolesð]«& -rbacAuthorization_v1beta1"list or watch objects of kind Role**listRbacAuthorizationV1beta1NamespacedRole2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*B‚ -ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Bï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string B‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string B‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Bú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer Bû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string BÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Bž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer B± -®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean J\ -A -200: -8 -OK2 -0 -.#/definitions/io.k8s.api.rbac.v1beta1.RoleList - -401 - - UnauthorizedRhttpsjb -x-kubernetes-group-version-kind?=group: rbac.authorization.k8s.io -kind: Role -version: v1beta1 -j -x-kubernetes-actionlist -"‘ -rbacAuthorization_v1beta1 create a Role*,createRbacAuthorizationV1beta1NamespacedRole2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*B@ -> -<bodybody *, -*#/definitions/io.k8s.api.rbac.v1beta1.RoleBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– -“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string Já -= -2006 -4 -OK. -, -*#/definitions/io.k8s.api.rbac.v1beta1.Role -B -201; -9 -Created. -, -*#/definitions/io.k8s.api.rbac.v1beta1.Role -C -202< -: -Accepted. -, -*#/definitions/io.k8s.api.rbac.v1beta1.Role - -401 - - UnauthorizedRhttpsj -x-kubernetes-actionpost -jb -x-kubernetes-group-version-kind?=group: rbac.authorization.k8s.io -kind: Role -version: v1beta1 -*ø, -rbacAuthorization_v1beta1delete collection of Role*6deleteRbacAuthorizationV1beta1CollectionNamespacedRole2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BT -R -Pbodybody*B -@#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptionsBï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string Bž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string Bä -áÞÛquery±The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately."gracePeriodSeconds2integer B‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Bú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer BÑ -ÎËÈquery Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the "orphan" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both."orphanDependents2boolean Bˆ -…‚ÿquery×Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground."propagationPolicy2string Bû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string BÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Bž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer Jg -L -200E -C -OK= -; -9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status - -401 - - UnauthorizedRhttpsjb -x-kubernetes-group-version-kind?=group: rbac.authorization.k8s.io -kind: Role -version: v1beta1 -j* -x-kubernetes-actiondeletecollection -J` -^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO -MKIquery-If 'true', then the output is pretty printed."pretty2string ß( -4/api/v1/watch/namespaces/{namespace}/serviceaccounts¦(¸ -core_v1zwatch individual changes to a list of ServiceAccount. deprecated: use the 'watch' parameter with a list operation instead.*'watchCoreV1NamespacedServiceAccountList2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jk - -401 - - Unauthorized -P -200I -G -OKA -? -=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEventRhttpsjP -x-kubernetes-group-version-kind-+group: "" -kind: ServiceAccount -version: v1 -j# -x-kubernetes-action  -watchlist -J‚ -ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer J` -^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO -MKIquery-If 'true', then the output is pretty printed."pretty2string Jû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string JÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Jž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± -®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean ½) -5/api/v1/watch/namespaces/{namespace}/endpoints/{name}ƒ)Ý -core_v1°watch changes to an object of kind Endpoints. deprecated: use the 'watch' parameter with a list operation instead, filtered to a single item with the 'fieldSelector' parameter.*watchCoreV1NamespacedEndpoints2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jk -P -200I -G -OKA -? -=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent - -401 - - UnauthorizedRhttpsjK -x-kubernetes-group-version-kind(&group: "" -kind: Endpoints -version: v1 -j -x-kubernetes-actionwatch -J‚ -ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer J6 -42"0pathname of the Endpoints"name*string˜J` -^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO -MKIquery-If 'true', then the output is pretty printed."pretty2string Jû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string JÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Jž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± -®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean ‹) -P/apis/admissionregistration.k8s.io/v1beta1/watch/validatingwebhookconfigurations¶(ª -admissionregistration_v1beta1Šwatch individual changes to a list of ValidatingWebhookConfiguration. deprecated: use the 'watch' parameter with a list operation instead.*CwatchAdmissionregistrationV1beta1ValidatingWebhookConfigurationList2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jk -P -200I -G -OKA -? -=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent - -401 - - UnauthorizedRhttpsj# -x-kubernetes-action  -watchlist -j -x-kubernetes-group-version-kind\Zgroup: admissionregistration.k8s.io -kind: ValidatingWebhookConfiguration -version: v1beta1 -J‚ -ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer JO -MKIquery-If 'true', then the output is pretty printed."pretty2string Jû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string JÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Jž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± -®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean ¨) -J/apis/autoscaling/v1/watch/namespaces/{namespace}/horizontalpodautoscalersÙ(ë -autoscaling_v1ƒwatch individual changes to a list of HorizontalPodAutoscaler. deprecated: use the 'watch' parameter with a list operation instead.*7watchAutoscalingV1NamespacedHorizontalPodAutoscalerList2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jk -P -200I -G -OKA -? -=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent - -401 - - UnauthorizedRhttpsj# -x-kubernetes-action  -watchlist -jb -x-kubernetes-group-version-kind?=group: autoscaling -kind: HorizontalPodAutoscaler -version: v1 -J‚ -ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer J` -^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO -MKIquery-If 'true', then the output is pretty printed."pretty2string Jû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string JÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Jž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± -®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean ñ( -@/apis/coordination.k8s.io/v1/watch/namespaces/{namespace}/leases¬(¾ -coordination_v1qwatch individual changes to a list of Lease. deprecated: use the 'watch' parameter with a list operation instead.*&watchCoordinationV1NamespacedLeaseList2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jk -P -200I -G -OKA -? -=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent - -401 - - UnauthorizedRhttpsj# -x-kubernetes-action  -watchlist -jX -x-kubernetes-group-version-kind53group: coordination.k8s.io -kind: Lease -version: v1 -J‚ -ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer J` -^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO -MKIquery-If 'true', then the output is pretty printed."pretty2string Jû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string JÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Jž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± -®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean °( -:/apis/rbac.authorization.k8s.io/v1beta1/watch/clusterrolesñ'å -rbacAuthorization_v1beta1wwatch individual changes to a list of ClusterRole. deprecated: use the 'watch' parameter with a list operation instead.*,watchRbacAuthorizationV1beta1ClusterRoleList2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jk -P -200I -G -OKA -? -=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent - -401 - - UnauthorizedRhttpsji -x-kubernetes-group-version-kindFDgroup: rbac.authorization.k8s.io -kind: ClusterRole -version: v1beta1 -j# -x-kubernetes-action  -watchlist -J‚ -ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer JO -MKIquery-If 'true', then the output is pretty printed."pretty2string Jû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string JÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Jž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± -®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean õ& -/api/v1/podtemplatesÜ&Ð -core_v1)list or watch objects of kind PodTemplate*%listCoreV1PodTemplateForAllNamespaces2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*J^ -C -200< -: -OK4 -2 -0#/definitions/io.k8s.api.core.v1.PodTemplateList - -401 - - UnauthorizedRhttpsj -x-kubernetes-actionlist -jM -x-kubernetes-group-version-kind*(kind: PodTemplate -version: v1 -group: "" -J‚ -ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer JO -MKIquery-If 'true', then the output is pretty printed."pretty2string Jû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string JÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Jž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± -®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean É( -8/apis/autoscaling/v2beta1/watch/horizontalpodautoscalersŒ(€ -autoscaling_v2beta1ƒwatch individual changes to a list of HorizontalPodAutoscaler. deprecated: use the 'watch' parameter with a list operation instead.*BwatchAutoscalingV2beta1HorizontalPodAutoscalerListForAllNamespaces2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jk -P -200I -G -OKA -? -=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent - -401 - - UnauthorizedRhttpsj# -x-kubernetes-action  -watchlist -jg -x-kubernetes-group-version-kindDBgroup: autoscaling -kind: HorizontalPodAutoscaler -version: v2beta1 -J‚ -ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer JO -MKIquery-If 'true', then the output is pretty printed."pretty2string Jû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string JÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Jž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± -®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean —+ -M/apis/rbac.authorization.k8s.io/v1/namespaces/{namespace}/rolebindings/{name}Å*š -rbacAuthorization_v1read the specified RoleBinding*,readRbacAuthorizationV1NamespacedRoleBinding2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*JZ -? -2008 -6 -OK0 -. -,#/definitions/io.k8s.api.rbac.v1.RoleBinding - -401 - - UnauthorizedRhttpsjd -x-kubernetes-group-version-kindA?version: v1 -group: rbac.authorization.k8s.io -kind: RoleBinding -j -x-kubernetes-actionget -å -rbacAuthorization_v1!replace the specified RoleBinding*/replaceRbacAuthorizationV1NamespacedRoleBinding2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BB -@ ->bodybody *. -,#/definitions/io.k8s.api.rbac.v1.RoleBindingBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– -“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string J  - -401 - - Unauthorized -? -2008 -6 -OK0 -. -,#/definitions/io.k8s.api.rbac.v1.RoleBinding -D -201= -; -Created0 -. -,#/definitions/io.k8s.api.rbac.v1.RoleBindingRhttpsj -x-kubernetes-actionput -jd -x-kubernetes-group-version-kindA?group: rbac.authorization.k8s.io -kind: RoleBinding -version: v1 -*´ -rbacAuthorization_v1delete a RoleBinding*.deleteRbacAuthorizationV1NamespacedRoleBinding2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BT -R -Pbodybody*B -@#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptionsBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string Bä -áÞÛquery±The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately."gracePeriodSeconds2integer BÑ -ÎËÈquery Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the "orphan" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both."orphanDependents2boolean Bˆ -…‚ÿquery×Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground."propagationPolicy2string J» -R -202K -I -Accepted= -; -9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status - -401 - - Unauthorized -L -200E -C -OK= -; -9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.StatusRhttpsj -x-kubernetes-action delete -jd -x-kubernetes-group-version-kindA?group: rbac.authorization.k8s.io -kind: RoleBinding -version: v1 -B™ -rbacAuthorization_v1*partially update the specified RoleBinding*-patchRbacAuthorizationV1NamespacedRoleBinding2application/json2application/yaml2#application/vnd.kubernetes.protobuf:application/json-patch+json:application/merge-patch+json:&application/strategic-merge-patch+json:application/apply-patch+yamlBN -L -Jbodybody *: -8#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.PatchBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B¯ -¬©¦queryƒfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch)." fieldManager2string BÎ -ËÈÅquery¨Force is going to "force" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests."force2boolean JZ -? -2008 -6 -OK0 -. -,#/definitions/io.k8s.api.rbac.v1.RoleBinding - -401 - - UnauthorizedRhttpsj -x-kubernetes-actionpatch -jd -x-kubernetes-group-version-kindA?group: rbac.authorization.k8s.io -kind: RoleBinding -version: v1 -J8 -64"2pathname of the RoleBinding"name*string˜J` -^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO -MKIquery-If 'true', then the output is pretty printed."pretty2string È' -/api/v1/watch/events¯'£ -core_v1qwatch individual changes to a list of Event. deprecated: use the 'watch' parameter with a list operation instead.*$watchCoreV1EventListForAllNamespaces2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jk -P -200I -G -OKA -? -=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent - -401 - - UnauthorizedRhttpsj# -x-kubernetes-action  -watchlist -jG -x-kubernetes-group-version-kind$"group: "" -kind: Event -version: v1 -J‚ -ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer JO -MKIquery-If 'true', then the output is pretty printed."pretty2string Jû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string JÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Jž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± -®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean Ë( -//api/v1/watch/namespaces/{namespace}/configmaps—(© -core_v1uwatch individual changes to a list of ConfigMap. deprecated: use the 'watch' parameter with a list operation instead.*"watchCoreV1NamespacedConfigMapList2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jk -P -200I -G -OKA -? -=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent - -401 - - UnauthorizedRhttpsj# -x-kubernetes-action  -watchlist -jK -x-kubernetes-group-version-kind(&group: "" -kind: ConfigMap -version: v1 -J‚ -ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer J` -^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO -MKIquery-If 'true', then the output is pretty printed."pretty2string Jû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string JÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Jž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± -®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean ¡ -+/apis/flowcontrol.apiserver.k8s.io/v1beta1/ñî -flowcontrolApiserver_v1beta1get available resources**getFlowcontrolApiserverV1beta1APIResources2application/json2application/yaml2#application/vnd.kubernetes.protobuf:application/json:application/yaml:#application/vnd.kubernetes.protobufJp -U -200N -L -OKF -D -B#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.APIResourceList - -401 - - UnauthorizedRhttpsÿ' -./apis/node.k8s.io/v1beta1/watch/runtimeclassesÌ'À - node_v1beta1xwatch individual changes to a list of RuntimeClass. deprecated: use the 'watch' parameter with a list operation instead.* watchNodeV1beta1RuntimeClassList2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jk -P -200I -G -OKA -? -=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent - -401 - - UnauthorizedRhttpsj# -x-kubernetes-action  -watchlist -j\ -x-kubernetes-group-version-kind97group: node.k8s.io -kind: RuntimeClass -version: v1beta1 -J‚ -ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer JO -MKIquery-If 'true', then the output is pretty printed."pretty2string Jû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string JÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Jž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± -®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean á& -/api/v1/servicesÌ&À -core_v1%list or watch objects of kind Service*!listCoreV1ServiceForAllNamespaces2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*JZ -? -2008 -6 -OK0 -. -,#/definitions/io.k8s.api.core.v1.ServiceList - -401 - - UnauthorizedRhttpsj -x-kubernetes-actionlist -jI -x-kubernetes-group-version-kind&$kind: Service -version: v1 -group: "" -J‚ -ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer JO -MKIquery-If 'true', then the output is pretty printed."pretty2string Jû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string JÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Jž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± -®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean à -/apis/discovery.k8s.io/ÄÁ - discoveryget information of a group*getDiscoveryAPIGroup2application/json2application/yaml2#application/vnd.kubernetes.protobuf:application/json:application/yaml:#application/vnd.kubernetes.protobufJi -N -200G -E -OK? -= -;#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.APIGroup - -401 - - UnauthorizedRhttpsù( -L/apis/flowcontrol.apiserver.k8s.io/v1beta1/watch/prioritylevelconfigurations¨(œ -flowcontrolApiserver_v1beta1†watch individual changes to a list of PriorityLevelConfiguration. deprecated: use the 'watch' parameter with a list operation instead.*>watchFlowcontrolApiserverV1beta1PriorityLevelConfigurationList2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jk -P -200I -G -OKA -? -=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent - -401 - - UnauthorizedRhttpsj# -x-kubernetes-action  -watchlist -j{ -x-kubernetes-group-version-kindXVgroup: flowcontrol.apiserver.k8s.io -kind: PriorityLevelConfiguration -version: v1beta1 -J‚ -ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer JO -MKIquery-If 'true', then the output is pretty printed."pretty2string Jû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string JÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Jž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± -®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean À( -:/apis/rbac.authorization.k8s.io/v1beta1/watch/rolebindings(õ -rbacAuthorization_v1beta1wwatch individual changes to a list of RoleBinding. deprecated: use the 'watch' parameter with a list operation instead.*readAdmissionregistrationV1beta1ValidatingWebhookConfiguration2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*Jƒ -h -200a -_ -OKY -W -U#/definitions/io.k8s.api.admissionregistration.v1beta1.ValidatingWebhookConfiguration - -401 - - UnauthorizedRhttpsj -x-kubernetes-actionget -j -x-kubernetes-group-version-kind\Zgroup: admissionregistration.k8s.io -kind: ValidatingWebhookConfiguration -version: v1beta1 -© - -admissionregistration_v1beta14replace the specified ValidatingWebhookConfiguration*AreplaceAdmissionregistrationV1beta1ValidatingWebhookConfiguration2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*Bk -i -gbodybody *W -U#/definitions/io.k8s.api.admissionregistration.v1beta1.ValidatingWebhookConfigurationBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– -“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string Jò -m -201f -d -CreatedY -W -U#/definitions/io.k8s.api.admissionregistration.v1beta1.ValidatingWebhookConfiguration - -401 - - Unauthorized -h -200a -_ -OKY -W -U#/definitions/io.k8s.api.admissionregistration.v1beta1.ValidatingWebhookConfigurationRhttpsj -x-kubernetes-actionput -j -x-kubernetes-group-version-kind\Zgroup: admissionregistration.k8s.io -kind: ValidatingWebhookConfiguration -version: v1beta1 -*ý -admissionregistration_v1beta1'delete a ValidatingWebhookConfiguration*@deleteAdmissionregistrationV1beta1ValidatingWebhookConfiguration2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BT -R -Pbodybody*B -@#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptionsBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string Bä -áÞÛquery±The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately."gracePeriodSeconds2integer BÑ -ÎËÈquery Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the "orphan" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both."orphanDependents2boolean Bˆ -…‚ÿquery×Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground."propagationPolicy2string J» -L -200E -C -OK= -; -9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status -R -202K -I -Accepted= -; -9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status - -401 - - UnauthorizedRhttpsj -x-kubernetes-action delete -j -x-kubernetes-group-version-kind\Zversion: v1beta1 -group: admissionregistration.k8s.io -kind: ValidatingWebhookConfiguration -BŒ -admissionregistration_v1beta1=partially update the specified ValidatingWebhookConfiguration*?patchAdmissionregistrationV1beta1ValidatingWebhookConfiguration2application/json2application/yaml2#application/vnd.kubernetes.protobuf:application/json-patch+json:application/merge-patch+json:&application/strategic-merge-patch+json:application/apply-patch+yamlBN -L -Jbodybody *: -8#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.PatchBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B¯ -¬©¦queryƒfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch)." fieldManager2string BÎ -ËÈÅquery¨Force is going to "force" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests."force2boolean Jƒ -h -200a -_ -OKY -W -U#/definitions/io.k8s.api.admissionregistration.v1beta1.ValidatingWebhookConfiguration - -401 - - UnauthorizedRhttpsj -x-kubernetes-actionpatch -j -x-kubernetes-group-version-kind\Zgroup: admissionregistration.k8s.io -kind: ValidatingWebhookConfiguration -version: v1beta1 -JK -IG"Epath*name of the ValidatingWebhookConfiguration"name*string˜JO -MKIquery-If 'true', then the output is pretty printed."pretty2string þ -/apis/autoscaling/v2beta2/ßÜ -autoscaling_v2beta2get available resources*!getAutoscalingV2beta2APIResources2application/json2application/yaml2#application/vnd.kubernetes.protobuf:application/json:application/yaml:#application/vnd.kubernetes.protobufJp -U -200N -L -OKF -D -B#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.APIResourceList - -401 - - UnauthorizedRhttpsé -/apis/coordination.k8s.io/ÊÇ - coordinationget information of a group*getCoordinationAPIGroup2application/json2application/yaml2#application/vnd.kubernetes.protobuf:application/json:application/yaml:#application/vnd.kubernetes.protobufJi - -401 - - Unauthorized -N -200G -E -OK? -= -;#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.APIGroupRhttpsÊ( -./api/v1/watch/namespaces/{namespace}/endpoints—(© -core_v1uwatch individual changes to a list of Endpoints. deprecated: use the 'watch' parameter with a list operation instead.*"watchCoreV1NamespacedEndpointsList2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jk -P -200I -G -OKA -? -=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent - -401 - - UnauthorizedRhttpsj# -x-kubernetes-action  -watchlist -jK -x-kubernetes-group-version-kind(&group: "" -kind: Endpoints -version: v1 -J‚ -ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer J` -^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO -MKIquery-If 'true', then the output is pretty printed."pretty2string Jû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string JÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Jž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± -®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean Ä - -<bodybody *, -*#/definitions/io.k8s.api.core.v1.NamespaceJœ -= -2006 -4 -OK. -, -*#/definitions/io.k8s.api.core.v1.Namespace -B -201; -9 -Created. -, -*#/definitions/io.k8s.api.core.v1.Namespace - -401 - - UnauthorizedRhttpsj -x-kubernetes-actionput -jK -x-kubernetes-group-version-kind(&kind: Namespace -version: v1 -group: "" -Jž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string J– -“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string J6 -42"0pathname of the Namespace"name*string˜JO -MKIquery-If 'true', then the output is pretty printed."pretty2string þ) -R/apis/admissionregistration.k8s.io/v1/watch/validatingwebhookconfigurations/{name}§)Î -admissionregistration_v1Åwatch changes to an object of kind ValidatingWebhookConfiguration. deprecated: use the 'watch' parameter with a list operation instead, filtered to a single item with the 'fieldSelector' parameter.*:watchAdmissionregistrationV1ValidatingWebhookConfiguration2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jk -P -200I -G -OKA -? -=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent - -401 - - UnauthorizedRhttpsj -x-kubernetes-actionwatch -jz -x-kubernetes-group-version-kindWUgroup: admissionregistration.k8s.io -kind: ValidatingWebhookConfiguration -version: v1 -J‚ -ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer JK -IG"Epath*name of the ValidatingWebhookConfiguration"name*string˜JO -MKIquery-If 'true', then the output is pretty printed."pretty2string Jû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string JÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Jž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± -®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean ˆ* -U/apis/admissionregistration.k8s.io/v1beta1/watch/mutatingwebhookconfigurations/{name}®)× -admissionregistration_v1beta1Ãwatch changes to an object of kind MutatingWebhookConfiguration. deprecated: use the 'watch' parameter with a list operation instead, filtered to a single item with the 'fieldSelector' parameter.*=watchAdmissionregistrationV1beta1MutatingWebhookConfiguration2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jk -P -200I -G -OKA -? -=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent - -401 - - UnauthorizedRhttpsj} -x-kubernetes-group-version-kindZXversion: v1beta1 -group: admissionregistration.k8s.io -kind: MutatingWebhookConfiguration -j -x-kubernetes-actionwatch -J‚ -ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer JI -GE"Cpath(name of the MutatingWebhookConfiguration"name*string˜JO -MKIquery-If 'true', then the output is pretty printed."pretty2string Jû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string JÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Jž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± -®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean ü -/apis/apiextensions.k8s.io/v1/ÙÖ -apiextensions_v1get available resources*getApiextensionsV1APIResources2application/json2application/yaml2#application/vnd.kubernetes.protobuf:application/json:application/yaml:#application/vnd.kubernetes.protobufJp -U -200N -L -OKF -D -B#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.APIResourceList - -401 - - UnauthorizedRhttps–] -4/apis/events.k8s.io/v1/namespaces/{namespace}/eventsÝ\û% - events_v1#list or watch objects of kind Event*listEventsV1NamespacedEvent2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*B‚ -ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Bï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string B‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string B‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Bú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer Bû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string BÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Bž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer B± -®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean JZ -? -2008 -6 -OK0 -. -,#/definitions/io.k8s.api.events.v1.EventList - -401 - - UnauthorizedRhttpsj -x-kubernetes-actionlist -jR -x-kubernetes-group-version-kind/-group: events.k8s.io -kind: Event -version: v1 -"Ü - events_v1create an Event*createEventsV1NamespacedEvent2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*B> -< -:bodybody ** -(#/definitions/io.k8s.api.events.v1.EventBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– -“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string JÛ -; -2004 -2 -OK, -* -(#/definitions/io.k8s.api.events.v1.Event -@ -2019 -7 -Created, -* -(#/definitions/io.k8s.api.events.v1.Event -A -202: -8 -Accepted, -* -(#/definitions/io.k8s.api.events.v1.Event - -401 - - UnauthorizedRhttpsj -x-kubernetes-actionpost -jR -x-kubernetes-group-version-kind/-group: events.k8s.io -kind: Event -version: v1 -*Ê, - events_v1delete collection of Event*'deleteEventsV1CollectionNamespacedEvent2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BT -R -Pbodybody*B -@#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptionsBï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string Bž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string Bä -áÞÛquery±The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately."gracePeriodSeconds2integer B‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Bú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer BÑ -ÎËÈquery Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the "orphan" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both."orphanDependents2boolean Bˆ -…‚ÿquery×Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground."propagationPolicy2string Bû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string BÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Bž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer Jg -L -200E -C -OK= -; -9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status - -401 - - UnauthorizedRhttpsj* -x-kubernetes-actiondeletecollection -jR -x-kubernetes-group-version-kind/-version: v1 -group: events.k8s.io -kind: Event -J` -^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO -MKIquery-If 'true', then the output is pretty printed."pretty2string ‹* -B/apis/networking.k8s.io/v1/namespaces/{namespace}/ingresses/{name}Ä)ú - networking_v1read the specified Ingress*!readNetworkingV1NamespacedIngress2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*J\ -A -200: -8 -OK2 -0 -.#/definitions/io.k8s.api.networking.v1.Ingress - -401 - - UnauthorizedRhttpsj -x-kubernetes-actionget -jX -x-kubernetes-group-version-kind53group: networking.k8s.io -kind: Ingress -version: v1 -É - networking_v1replace the specified Ingress*$replaceNetworkingV1NamespacedIngress2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BD -B -@bodybody *0 -.#/definitions/io.k8s.api.networking.v1.IngressBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– -“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string J¤ -A -200: -8 -OK2 -0 -.#/definitions/io.k8s.api.networking.v1.Ingress -F -201? -= -Created2 -0 -.#/definitions/io.k8s.api.networking.v1.Ingress - -401 - - UnauthorizedRhttpsj -x-kubernetes-actionput -jX -x-kubernetes-group-version-kind53version: v1 -group: networking.k8s.io -kind: Ingress -*“ - networking_v1delete an Ingress*#deleteNetworkingV1NamespacedIngress2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BT -R -Pbodybody*B -@#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptionsBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string Bä -áÞÛquery±The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately."gracePeriodSeconds2integer BÑ -ÎËÈquery Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the "orphan" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both."orphanDependents2boolean Bˆ -…‚ÿquery×Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground."propagationPolicy2string J» -R -202K -I -Accepted= -; -9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status - -401 - - Unauthorized -L -200E -C -OK= -; -9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.StatusRhttpsjX -x-kubernetes-group-version-kind53group: networking.k8s.io -kind: Ingress -version: v1 -j -x-kubernetes-action delete -Bù - networking_v1&partially update the specified Ingress*"patchNetworkingV1NamespacedIngress2application/json2application/yaml2#application/vnd.kubernetes.protobuf:application/json-patch+json:application/merge-patch+json:&application/strategic-merge-patch+json:application/apply-patch+yamlBN -L -Jbodybody *: -8#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.PatchBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B¯ -¬©¦queryƒfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch)." fieldManager2string BÎ -ËÈÅquery¨Force is going to "force" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests."force2boolean J\ -A -200: -8 -OK2 -0 -.#/definitions/io.k8s.api.networking.v1.Ingress - -401 - - UnauthorizedRhttpsj -x-kubernetes-actionpatch -jX -x-kubernetes-group-version-kind53group: networking.k8s.io -kind: Ingress -version: v1 -J4 -20".pathname of the Ingress"name*string˜J` -^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO -MKIquery-If 'true', then the output is pretty printed."pretty2string €) --/apis/storage.k8s.io/v1/storageclasses/{name}Î(ø - -storage_v1read the specified StorageClass*readStorageV1StorageClass2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*J^ -C -200< -: -OK4 -2 -0#/definitions/io.k8s.api.storage.v1.StorageClass - -401 - - UnauthorizedRhttpsj -x-kubernetes-actionget -jZ -x-kubernetes-group-version-kind75group: storage.k8s.io -kind: StorageClass -version: v1 -Ë - -storage_v1"replace the specified StorageClass*replaceStorageV1StorageClass2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BF -D -Bbodybody *2 -0#/definitions/io.k8s.api.storage.v1.StorageClassBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– -“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string J¨ -C -200< -: -OK4 -2 -0#/definitions/io.k8s.api.storage.v1.StorageClass -H -201A -? -Created4 -2 -0#/definitions/io.k8s.api.storage.v1.StorageClass - -401 - - UnauthorizedRhttpsjZ -x-kubernetes-group-version-kind75group: storage.k8s.io -kind: StorageClass -version: v1 -j -x-kubernetes-actionput -*ü - -storage_v1delete a StorageClass*deleteStorageV1StorageClass2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BT -R -Pbodybody*B -@#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptionsBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string Bä -áÞÛquery±The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately."gracePeriodSeconds2integer BÑ -ÎËÈquery Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the "orphan" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both."orphanDependents2boolean Bˆ -…‚ÿquery×Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground."propagationPolicy2string J© -C -200< -: -OK4 -2 -0#/definitions/io.k8s.api.storage.v1.StorageClass -I -202B -@ -Accepted4 -2 -0#/definitions/io.k8s.api.storage.v1.StorageClass - -401 - - UnauthorizedRhttpsj -x-kubernetes-action delete -jZ -x-kubernetes-group-version-kind75group: storage.k8s.io -kind: StorageClass -version: v1 -B÷ - -storage_v1+partially update the specified StorageClass*patchStorageV1StorageClass2application/json2application/yaml2#application/vnd.kubernetes.protobuf:application/json-patch+json:application/merge-patch+json:&application/strategic-merge-patch+json:application/apply-patch+yamlBN -L -Jbodybody *: -8#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.PatchBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B¯ -¬©¦queryƒfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch)." fieldManager2string BÎ -ËÈÅquery¨Force is going to "force" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests."force2boolean J^ -C -200< -: -OK4 -2 -0#/definitions/io.k8s.api.storage.v1.StorageClass - -401 - - UnauthorizedRhttpsj -x-kubernetes-actionpatch -jZ -x-kubernetes-group-version-kind75group: storage.k8s.io -kind: StorageClass -version: v1 -J9 -75"3pathname of the StorageClass"name*string˜JO -MKIquery-If 'true', then the output is pretty printed."pretty2string Ó( -1/api/v1/watch/namespaces/{namespace}/podtemplates(¯ -core_v1wwatch individual changes to a list of PodTemplate. deprecated: use the 'watch' parameter with a list operation instead.*$watchCoreV1NamespacedPodTemplateList2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jk - -401 - - Unauthorized -P -200I -G -OKA -? -=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEventRhttpsj# -x-kubernetes-action  -watchlist -jM -x-kubernetes-group-version-kind*(kind: PodTemplate -version: v1 -group: "" -J‚ -ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer J` -^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO -MKIquery-If 'true', then the output is pretty printed."pretty2string Jû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string JÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Jž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± -®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean ƒ) -A/apis/policy/v1/watch/namespaces/{namespace}/poddisruptionbudgets½(Ï - policy_v1watch individual changes to a list of PodDisruptionBudget. deprecated: use the 'watch' parameter with a list operation instead.*.watchPolicyV1NamespacedPodDisruptionBudgetList2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jk -P -200I -G -OKA -? -=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent - -401 - - UnauthorizedRhttpsj# -x-kubernetes-action  -watchlist -jY -x-kubernetes-group-version-kind64group: policy -kind: PodDisruptionBudget -version: v1 -J‚ -ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer J` -^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜JO -MKIquery-If 'true', then the output is pretty printed."pretty2string Jû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string JÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Jž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± -®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean ©[ - /api/v1/nodes—[Ü% -core_v1"list or watch objects of kind Node*listCoreV1Node2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*B‚ -ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Bï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string B‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string B‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Bú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer Bû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string BÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Bž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer B± -®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean JW -< -2005 -3 -OK- -+ -)#/definitions/io.k8s.api.core.v1.NodeList - -401 - - UnauthorizedRhttpsjF -x-kubernetes-group-version-kind#!group: "" -kind: Node -version: v1 -j -x-kubernetes-actionlist -"³ -core_v1 create a Node*createCoreV1Node2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*B; -9 -7bodybody *' -%#/definitions/io.k8s.api.core.v1.NodeBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– -“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string JÒ -8 -2001 -/ -OK) -' -%#/definitions/io.k8s.api.core.v1.Node -= -2016 -4 -Created) -' -%#/definitions/io.k8s.api.core.v1.Node -> -2027 -5 -Accepted) -' -%#/definitions/io.k8s.api.core.v1.Node - -401 - - UnauthorizedRhttpsjF -x-kubernetes-group-version-kind#!group: "" -kind: Node -version: v1 -j -x-kubernetes-actionpost -*®, -core_v1delete collection of Node*deleteCoreV1CollectionNode2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BT -R -Pbodybody*B -@#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptionsBï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string Bž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string Bä -áÞÛquery±The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately."gracePeriodSeconds2integer B‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Bú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer BÑ -ÎËÈquery Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the "orphan" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both."orphanDependents2boolean Bˆ -…‚ÿquery×Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground."propagationPolicy2string Bû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string BÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Bž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer Jg - -401 - - Unauthorized -L -200E -C -OK= -; -9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.StatusRhttpsj* -x-kubernetes-actiondeletecollection -jF -x-kubernetes-group-version-kind#!group: "" -kind: Node -version: v1 -JO -MKIquery-If 'true', then the output is pretty printed."pretty2string Á -/api/v1/nodes/{name}/proxy¢Œ -core_v1%connect GET requests to proxy of Node*connectCoreV1GetNodeProxy2*/*:*/*J7 - -200 - -OK - ² -string - -401 - - UnauthorizedRhttpsjR -x-kubernetes-group-version-kind/-group: "" -kind: NodeProxyOptions -version: v1 -j! -x-kubernetes-action -connect -Œ -core_v1%connect PUT requests to proxy of Node*connectCoreV1PutNodeProxy2*/*:*/*J7 - -200 - -OK - ² -string - -401 - - UnauthorizedRhttpsj! -x-kubernetes-action -connect -jR -x-kubernetes-group-version-kind/-group: "" -kind: NodeProxyOptions -version: v1 -"Ž -core_v1&connect POST requests to proxy of Node*connectCoreV1PostNodeProxy2*/*:*/*J7 - -200 - -OK - ² -string - -401 - - UnauthorizedRhttpsjR -x-kubernetes-group-version-kind/-version: v1 -group: "" -kind: NodeProxyOptions -j! -x-kubernetes-action -connect -*’ -core_v1(connect DELETE requests to proxy of Node*connectCoreV1DeleteNodeProxy2*/*:*/*J7 - -200 - -OK - ² -string - -401 - - UnauthorizedRhttpsjR -x-kubernetes-group-version-kind/-group: "" -kind: NodeProxyOptions -version: v1 -j! -x-kubernetes-action -connect -2” -core_v1)connect OPTIONS requests to proxy of Node*connectCoreV1OptionsNodeProxy2*/*:*/*J7 - -200 - -OK - ² -string - -401 - - UnauthorizedRhttpsjR -x-kubernetes-group-version-kind/-group: "" -kind: NodeProxyOptions -version: v1 -j! -x-kubernetes-action -connect -:Ž -core_v1&connect HEAD requests to proxy of Node*connectCoreV1HeadNodeProxy2*/*:*/*J7 - -200 - -OK - ² -string - -401 - - UnauthorizedRhttpsj! -x-kubernetes-action -connect -jR -x-kubernetes-group-version-kind/-group: "" -kind: NodeProxyOptions -version: v1 -B -core_v1'connect PATCH requests to proxy of Node*connectCoreV1PatchNodeProxy2*/*:*/*J7 - -200 - -OK - ² -string - -401 - - UnauthorizedRhttpsjR -x-kubernetes-group-version-kind/-kind: NodeProxyOptions -version: v1 -group: "" -j! -x-kubernetes-action -connect -J= -;9"7pathname of the NodeProxyOptions"name*string˜Jb -`^\queryBPath is the URL path to use for the current proxy request to node."path2string ¤( -//apis/policy/v1beta1/watch/poddisruptionbudgetsð'ä -policy_v1beta1watch individual changes to a list of PodDisruptionBudget. deprecated: use the 'watch' parameter with a list operation instead.*9watchPolicyV1beta1PodDisruptionBudgetListForAllNamespaces2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jk -P -200I -G -OKA -? -=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent - -401 - - UnauthorizedRhttpsj^ -x-kubernetes-group-version-kind;9group: policy -kind: PodDisruptionBudget -version: v1beta1 -j# -x-kubernetes-action  -watchlist -J‚ -ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer JO -MKIquery-If 'true', then the output is pretty printed."pretty2string Jû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string JÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Jž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± -®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean À] -//apis/rbac.authorization.k8s.io/v1/clusterrolesŒ]©& -rbacAuthorization_v1)list or watch objects of kind ClusterRole*"listRbacAuthorizationV1ClusterRole2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*B‚ -ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Bï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string B‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string B‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Bú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer Bû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string BÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Bž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer B± -®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean J^ -C -200< -: -OK4 -2 -0#/definitions/io.k8s.api.rbac.v1.ClusterRoleList - -401 - - UnauthorizedRhttpsj -x-kubernetes-actionlist -jd -x-kubernetes-group-version-kindA?group: rbac.authorization.k8s.io -kind: ClusterRole -version: v1 -"• -rbacAuthorization_v1create a ClusterRole*$createRbacAuthorizationV1ClusterRole2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BB -@ ->bodybody *. -,#/definitions/io.k8s.api.rbac.v1.ClusterRoleBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– -“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string Jç - -401 - - Unauthorized -? -2008 -6 -OK0 -. -,#/definitions/io.k8s.api.rbac.v1.ClusterRole -D -201= -; -Created0 -. -,#/definitions/io.k8s.api.rbac.v1.ClusterRole -E -202> -< -Accepted0 -. -,#/definitions/io.k8s.api.rbac.v1.ClusterRoleRhttpsj -x-kubernetes-actionpost -jd -x-kubernetes-group-version-kindA?version: v1 -group: rbac.authorization.k8s.io -kind: ClusterRole -*ô, -rbacAuthorization_v1 delete collection of ClusterRole*.deleteRbacAuthorizationV1CollectionClusterRole2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BT -R -Pbodybody*B -@#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptionsBï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string Bž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string Bä -áÞÛquery±The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately."gracePeriodSeconds2integer B‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Bú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer BÑ -ÎËÈquery Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the "orphan" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both."orphanDependents2boolean Bˆ -…‚ÿquery×Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground."propagationPolicy2string Bû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string BÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Bž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer Jg -L -200E -C -OK= -; -9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status - -401 - - UnauthorizedRhttpsj* -x-kubernetes-actiondeletecollection -jd -x-kubernetes-group-version-kindA?kind: ClusterRole -version: v1 -group: rbac.authorization.k8s.io -JO -MKIquery-If 'true', then the output is pretty printed."pretty2string ñ' -/api/v1/namespaces/{name}Ó'× -core_v1read the specified Namespace*readCoreV1Namespace2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*JX -= -2006 -4 -OK. -, -*#/definitions/io.k8s.api.core.v1.Namespace - -401 - - UnauthorizedRhttpsjK -x-kubernetes-group-version-kind(&version: v1 -group: "" -kind: Namespace -j -x-kubernetes-actionget -ž -core_v1replace the specified Namespace*replaceCoreV1Namespace2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*B@ -> -<bodybody *, -*#/definitions/io.k8s.api.core.v1.NamespaceBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– -“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string Jœ -= -2006 -4 -OK. -, -*#/definitions/io.k8s.api.core.v1.Namespace -B -201; -9 -Created. -, -*#/definitions/io.k8s.api.core.v1.Namespace - -401 - - UnauthorizedRhttpsjK -x-kubernetes-group-version-kind(&group: "" -kind: Namespace -version: v1 -j -x-kubernetes-actionput -*ó -core_v1delete a Namespace*deleteCoreV1Namespace2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BT -R -Pbodybody*B -@#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptionsBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string Bä -áÞÛquery±The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately."gracePeriodSeconds2integer BÑ -ÎËÈquery Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the "orphan" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both."orphanDependents2boolean Bˆ -…‚ÿquery×Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground."propagationPolicy2string J» -L -200E -C -OK= -; -9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status -R -202K -I -Accepted= -; -9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status - -401 - - UnauthorizedRhttpsj -x-kubernetes-action delete -jK -x-kubernetes-group-version-kind(&group: "" -kind: Namespace -version: v1 -BÖ -core_v1(partially update the specified Namespace*patchCoreV1Namespace2application/json2application/yaml2#application/vnd.kubernetes.protobuf:application/json-patch+json:application/merge-patch+json:&application/strategic-merge-patch+json:application/apply-patch+yamlBN -L -Jbodybody *: -8#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.PatchBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B¯ -¬©¦queryƒfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch)." fieldManager2string BÎ -ËÈÅquery¨Force is going to "force" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests."force2boolean JX -= -2006 -4 -OK. -, -*#/definitions/io.k8s.api.core.v1.Namespace - -401 - - UnauthorizedRhttpsj -x-kubernetes-actionpatch -jK -x-kubernetes-group-version-kind(&group: "" -kind: Namespace -version: v1 -J6 -42"0pathname of the Namespace"name*string˜JO -MKIquery-If 'true', then the output is pretty printed."pretty2string ‡( -0/apis/scheduling.k8s.io/v1/watch/priorityclassesÒ'Æ - scheduling_v1ywatch individual changes to a list of PriorityClass. deprecated: use the 'watch' parameter with a list operation instead.*"watchSchedulingV1PriorityClassList2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jk -P -200I -G -OKA -? -=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent - -401 - - UnauthorizedRhttpsj# -x-kubernetes-action  -watchlist -j^ -x-kubernetes-group-version-kind;9group: scheduling.k8s.io -kind: PriorityClass -version: v1 -J‚ -ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer JO -MKIquery-If 'true', then the output is pretty printed."pretty2string Jû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string JÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Jž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± -®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean â] -(/apis/policy/v1beta1/podsecuritypoliciesµ]®& -policy_v1beta1/list or watch objects of kind PodSecurityPolicy*"listPolicyV1beta1PodSecurityPolicy2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*B‚ -ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Bï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string B‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string B‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Bú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer Bû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string BÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Bž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer B± -®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean Jk -P -200I -G -OKA -? -=#/definitions/io.k8s.api.policy.v1beta1.PodSecurityPolicyList - -401 - - UnauthorizedRhttpsj -x-kubernetes-actionlist -j\ -x-kubernetes-group-version-kind97version: v1beta1 -group: policy -kind: PodSecurityPolicy -"Á -policy_v1beta1create a PodSecurityPolicy*$createPolicyV1beta1PodSecurityPolicy2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BO -M -Kbodybody *; -9#/definitions/io.k8s.api.policy.v1beta1.PodSecurityPolicyBž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B– -“queryêfieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint." fieldManager2string JŽ -Q -201J -H -Created= -; -9#/definitions/io.k8s.api.policy.v1beta1.PodSecurityPolicy -R -202K -I -Accepted= -; -9#/definitions/io.k8s.api.policy.v1beta1.PodSecurityPolicy - -401 - - Unauthorized -L -200E -C -OK= -; -9#/definitions/io.k8s.api.policy.v1beta1.PodSecurityPolicyRhttpsj\ -x-kubernetes-group-version-kind97version: v1beta1 -group: policy -kind: PodSecurityPolicy -j -x-kubernetes-actionpost -*ì, -policy_v1beta1&delete collection of PodSecurityPolicy*.deletePolicyV1beta1CollectionPodSecurityPolicy2application/json2application/yaml2#application/vnd.kubernetes.protobuf:*/*BT -R -Pbodybody*B -@#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptionsBï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string Bž -›˜•queryøWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed"dryRun2string B‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string Bä -áÞÛquery±The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately."gracePeriodSeconds2integer B‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Bú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer BÑ -ÎËÈquery Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the "orphan" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both."orphanDependents2boolean Bˆ -…‚ÿquery×Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground."propagationPolicy2string Bû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string BÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Bž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer Jg -L -200E -C -OK= -; -9#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status - -401 - - UnauthorizedRhttpsj* -x-kubernetes-actiondeletecollection -j\ -x-kubernetes-group-version-kind97version: v1beta1 -group: policy -kind: PodSecurityPolicy -JO -MKIquery-If 'true', then the output is pretty printed."pretty2string Í - /apis/batch/¼¹ -batchget information of a group*getBatchAPIGroup2application/json2application/yaml2#application/vnd.kubernetes.protobuf:application/json:application/yaml:#application/vnd.kubernetes.protobufJi -N -200G -E -OK? -= -;#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.APIGroup - -401 - - UnauthorizedRhttpsª -;/api/v1/namespaces/{namespace}/services/{name}/proxy/{path}ê§ -core_v1(connect GET requests to proxy of Service*.connectCoreV1GetNamespacedServiceProxyWithPath2*/*:*/*J7 - -200 - -OK - ² -string - -401 - - UnauthorizedRhttpsj! -x-kubernetes-action -connect -jU -x-kubernetes-group-version-kind20kind: ServiceProxyOptions -version: v1 -group: "" -§ -core_v1(connect PUT requests to proxy of Service*.connectCoreV1PutNamespacedServiceProxyWithPath2*/*:*/*J7 - -200 - -OK - ² -string - -401 - - UnauthorizedRhttpsj! -x-kubernetes-action -connect -jU -x-kubernetes-group-version-kind20group: "" -kind: ServiceProxyOptions -version: v1 -"© -core_v1)connect POST requests to proxy of Service*/connectCoreV1PostNamespacedServiceProxyWithPath2*/*:*/*J7 - -200 - -OK - ² -string - -401 - - UnauthorizedRhttpsjU -x-kubernetes-group-version-kind20group: "" -kind: ServiceProxyOptions -version: v1 -j! -x-kubernetes-action -connect -*­ -core_v1+connect DELETE requests to proxy of Service*1connectCoreV1DeleteNamespacedServiceProxyWithPath2*/*:*/*J7 - -200 - -OK - ² -string - -401 - - UnauthorizedRhttpsjU -x-kubernetes-group-version-kind20group: "" -kind: ServiceProxyOptions -version: v1 -j! -x-kubernetes-action -connect -2¯ -core_v1,connect OPTIONS requests to proxy of Service*2connectCoreV1OptionsNamespacedServiceProxyWithPath2*/*:*/*J7 - -401 - - Unauthorized - -200 - -OK - ² -stringRhttpsjU -x-kubernetes-group-version-kind20kind: ServiceProxyOptions -version: v1 -group: "" -j! -x-kubernetes-action -connect -:© -core_v1)connect HEAD requests to proxy of Service*/connectCoreV1HeadNamespacedServiceProxyWithPath2*/*:*/*J7 - -200 - -OK - ² -string - -401 - - UnauthorizedRhttpsjU -x-kubernetes-group-version-kind20group: "" -kind: ServiceProxyOptions -version: v1 -j! -x-kubernetes-action -connect -B« -core_v1*connect PATCH requests to proxy of Service*0connectCoreV1PatchNamespacedServiceProxyWithPath2*/*:*/*J7 - -200 - -OK - ² -string - -401 - - UnauthorizedRhttpsj! -x-kubernetes-action -connect -jU -x-kubernetes-group-version-kind20group: "" -kind: ServiceProxyOptions -version: v1 -J@ -><":pathname of the ServiceProxyOptions"name*string˜J` -^\"Zpath:object name and auth scope, such as for teams and projects" namespace*string˜J5 -31"/pathpath to the resource"path*string˜JÐ -ÍÊÇquery¬Path is the part of URLs that include service endpoints, suffixes, and parameters to use for the current proxy request to service. For example, the whole request URL is http://localhost/api/v1/namespaces/kube-system/services/elasticsearch-logging/_search?q=user:kimchy. Path is _search?q=user:kimchy."path2string ñ( -4/apis/storage.k8s.io/v1beta1/watch/csidrivers/{name}¸(ô -storage_v1beta1°watch changes to an object of kind CSIDriver. deprecated: use the 'watch' parameter with a list operation instead, filtered to a single item with the 'fieldSelector' parameter.*watchStorageV1beta1CSIDriver2application/json2application/yaml2#application/vnd.kubernetes.protobuf2application/json;stream=watch20application/vnd.kubernetes.protobuf;stream=watch:*/*Jk -P -200I -G -OKA -? -=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent - -401 - - UnauthorizedRhttpsj -x-kubernetes-actionwatch -j\ -x-kubernetes-group-version-kind97group: storage.k8s.io -kind: CSIDriver -version: v1beta1 -J‚ -ÿüùqueryÎallowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored."allowWatchBookmarks2boolean Jï -ì é æ queryÇ The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". - -This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications."continue2string J‡ -„query\A selector to restrict the list of returned objects by their fields. Defaults to everything." fieldSelector2string J‡ -„query\A selector to restrict the list of returned objects by their labels. Defaults to everything." labelSelector2string Jú - -÷ -ô -ñ -queryÔ -limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. - -The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned."limit2integer J6 -42"0pathname of the CSIDriver"name*string˜JO -MKIquery-If 'true', then the output is pretty printed."pretty2string Jû -øõòqueryÌresourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersion2string JÚ -×ÔÑquery¦resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. - -Defaults to unset"resourceVersionMatch2string Jž -›˜•querypTimeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity."timeoutSeconds2integer J± -®«¨query‹Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion."watch2boolean J½÷0 -à -%io.k8s.api.discovery.v1.EndpointHints¶"KEndpointHints provides hints describing how an endpoint should be consumed.² -objectÊÚ -× -forZonesÊ"dforZones indicates the zone(s) this endpoint should be consumed by to enable topology aware routing.² -arrayº1 -/ --#/definitions/io.k8s.api.discovery.v1.ForZoneú# -x-kubernetes-list-type atomic - -½ --io.k8s.api.extensions.v1beta1.HTTPIngressPath‹ "oHTTPIngressPath associates a path with a backend. Incoming urls matching the path are forwarded to the backend.šbackend² -objectÊ -¤ -backend˜ -:#/definitions/io.k8s.api.extensions.v1beta1.IngressBackend"ZBackend defines the referenced service endpoint to which the traffic will be forwarded to. -ž -path•"‡Path is matched against the path of an incoming request. Currently it can contain characters disallowed from the conventional "path" part of a URL as defined by RFC 3986. Paths must begin with a '/'. When unspecified, all paths from incoming requests are matched.² -string -¶ -pathType©"›PathType determines the interpretation of the Path matching. PathType can be one of the following values: * Exact: Matches the URL path exactly. * Prefix: Matches based on a URL path prefix split by '/'. Matching is - done on a path element by element basis. A path element refers is the - list of labels in the path split by the '/' separator. A request is a - match for path p if every p is an element-wise prefix of p of the - request path. Note that if the last element of the path is a substring - of the last element in request path, it is not a match (e.g. /foo/bar - matches /foo/bar/baz, but does not match /foo/barbaz). -* ImplementationSpecific: Interpretation of the Path matching is up to - the IngressClass. Implementations can treat this as a separate PathType - or treat it identically to Prefix or Exact path types. -Implementations are required to support all path types. Defaults to ImplementationSpecific.² -string -à -@io.k8s.api.flowcontrol.v1beta1.LimitedPriorityLevelConfiguration› "èLimitedPriorityLevelConfiguration specifies how to handle requests that are subject to limits. It addresses two issues: - * How are requests for this priority level limited? - * What should be done with requests that exceed the limit?² -objectÊ¡ -ö -assuredConcurrencySharesÙint32"Ã`assuredConcurrencyShares` (ACS) configures the execution limit, which is a limit on the number of requests of this priority level that may be exeucting at a given time. ACS must be a positive number. The server's concurrency limit (SCL) is divided among the concurrency-controlled priority levels in proportion to their assured concurrency shares. This produces the assured concurrency value (ACV) --- the number of requests that may be executing at a time --- for each such priority level: - - ACV(l) = ceil( SCL * ACS(l) / ( sum[priority levels k] ACS(k) ) ) - -bigger numbers of ACS mean more reserved concurrent requests (at the expense of every other PL). This field has a default value of 30.² -integer -¥ - limitResponse“ -:#/definitions/io.k8s.api.flowcontrol.v1beta1.LimitResponse"U`limitResponse` indicates what to do with requests that can not be executed right now -ý -+io.k8s.api.policy.v1beta1.PodSecurityPolicyÍ "˜PodSecurityPolicy governs the ability to make requests that affect the Security Context that will be applied to a pod and container. Deprecated in 1.21.² -objectʾ -¾ - -apiVersion¯"¡APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources² -string -¹ -kind°"¢Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds² -string -Ò -metadataÅ -=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta"ƒStandard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata -j -specb -=#/definitions/io.k8s.api.policy.v1beta1.PodSecurityPolicySpec"!spec defines the policy enforced.úb -x-kubernetes-group-version-kind?=- group: policy - kind: PodSecurityPolicy - version: v1beta1 - -Î -'io.k8s.api.storage.v1beta1.StorageClass¢"ãStorageClass describes the parameters for a class of storage for which PersistentVolumes can be dynamically provisioned. - -StorageClasses are non-namespaced; the name of the storage class according to etcd is in ObjectMeta.Name.š provisioner² -objectÊ· -‘ - -parameters‚"eParameters holds the parameters for the provisioner that should create volumes of this storage class.ª - ² -string² -object -ø -volumeBindingModeâ"ÔVolumeBindingMode indicates how PersistentVolumeClaims should be provisioned and bound. When unset, VolumeBindingImmediate is used. This field is only honored by servers that enable the VolumeScheduling feature.² -string -¾ - -apiVersion¯"¡APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources² -string -Ò -metadataÅ -=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta"ƒStandard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata -ì - mountOptionsÛ"¾Dynamically provisioned PersistentVolumes of this storage class are created with these mountOptions, e.g. ["ro", "soft"]. Not validated - mount of the PVs will simply fail if one is invalid.² -arrayº - ² -string -N - provisioner?"2Provisioner indicates the type of the provisioner.² -string -— - reclaimPolicy…"xDynamically provisioned PersistentVolumes of this storage class are created with this reclaimPolicy. Defaults to Delete.² -string -n -allowVolumeExpansionV"HAllowVolumeExpansion shows whether the storage class allow volume expand² -boolean -Š -allowedTopologiesô"«Restrict the node topologies where volumes can be dynamically provisioned. Each volume plugin defines its own supported topology specifications. An empty TopologySelectorTerm list means there is no topology restriction. This field is only honored by servers that enable the VolumeScheduling feature.² -arrayº9 -7 -5#/definitions/io.k8s.api.core.v1.TopologySelectorTerm -¹ -kind°"¢Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds² -stringúe -x-kubernetes-group-version-kindB@- group: storage.k8s.io - kind: StorageClass - version: v1beta1 - -•4 -8io.k8s.api.admissionregistration.v1beta1.MutatingWebhookØ3"^MutatingWebhook describes an admission webhook and the resources and operations it applies to.šnameš clientConfig² -objectÊÓ2 -ž - clientConfig -J#/definitions/io.k8s.api.admissionregistration.v1beta1.WebhookClientConfig"?ClientConfig defines how to communicate with the hook. Required -® - failurePolicyœ"ŽFailurePolicy defines how unrecognized errors from the admission endpoint are handled - allowed values are Ignore or Fail. Defaults to Ignore.² -string - -reinvocationPolicyù"ëreinvocationPolicy indicates whether this webhook should be called multiple times as part of a single admission evaluation. Allowed values are "Never" and "IfNeeded". - -Never: the webhook will not be called more than once in a single admission evaluation. - -IfNeeded: the webhook will be called at least one additional time as part of the admission evaluation if the object being admitted is modified by other admission plugins after the initial webhook call. Webhooks that specify this option *must* be idempotent, able to process objects they previously admitted. Note: * the number of additional invocations is not guaranteed to be exactly one. * if additional invocations result in further modifications to the object, webhooks are not guaranteed to be invoked again. * webhooks that use this option may be reordered to minimize the number of additional invocations. * to validate an object after all mutations are guaranteed complete, use a validating admission webhook instead. - -Defaults to "Never".² -string -œ -timeoutSeconds‰int32"óTimeoutSeconds specifies the timeout for this webhook. After the timeout passes, the webhook call will be ignored or the API call will fail based on the failure policy. The timeout value must be between 1 and 30 seconds. Default to 30 seconds.² -integer -° -admissionReviewVersions”"÷AdmissionReviewVersions is an ordered list of preferred `AdmissionReview` versions the Webhook expects. API server will try to use first version in the list which it supports. If none of the versions specified in this list supported by API server, validation will fail for this object. If a persisted webhook configuration specifies allowed versions and does not include any versions known to the API Server, calls to the webhook will fail and be subject to the failure policy. Default to `['v1beta1']`.² -arrayº - ² -string -ç -nameÞ"ÐThe name of the admission webhook. Name should be fully qualified, e.g., imagepolicy.kubernetes.io, where "imagepolicy" is the name of the webhook, and kubernetes.io is the name of the organization. Required.² -string -Ý -namespaceSelectorÇ -@#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector"‚ NamespaceSelector decides whether to run the webhook on an object based on whether the namespace for that object matches the selector. If the object itself is a namespace, the matching is performed on object.metadata.labels. If the object is another cluster scoped resource, it never skips the webhook. - -For example, to run the webhook on any objects whose namespace is not associated with "runlevel" of "0" or "1"; you will set the selector as follows: "namespaceSelector": { - "matchExpressions": [ - { - "key": "runlevel", - "operator": "NotIn", - "values": [ - "0", - "1" - ] - } - ] -} - -If instead you want to only run the webhook on any objects whose namespace is associated with the "environment" of "prod" or "staging"; you will set the selector as follows: "namespaceSelector": { - "matchExpressions": [ - { - "key": "environment", - "operator": "In", - "values": [ - "prod", - "staging" - ] - } - ] -} - -See https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ for more examples of label selectors. - -Default to the empty LabelSelector, which matches everything. -é -objectSelectorÖ -@#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector"‘ObjectSelector decides whether to run the webhook based on if the object has matching labels. objectSelector is evaluated against both the oldObject and newObject that would be sent to the webhook, and is considered to match if either object matches the selector. A null object (oldObject in the case of create, or newObject in the case of delete) or an object that cannot have labels (like a DeploymentRollback or a PodProxyOptions object) is not considered to match. Use the object selector only if the webhook is opt-in, because end users may skip the admission webhook by setting the labels. Default to the empty LabelSelector, which matches everything. -î -rulesä"‡Rules describes what operations on what resources/subresources the webhook cares about. The webhook cares about an operation if it matches _any_ Rule. However, in order to prevent ValidatingAdmissionWebhooks and MutatingAdmissionWebhooks from putting the cluster in a state which cannot be recovered from without completely disabling the plugin, ValidatingAdmissionWebhooks and MutatingAdmissionWebhooks are never called on admission requests for ValidatingWebhookConfiguration and MutatingWebhookConfiguration objects.² -arrayºM -K -I#/definitions/io.k8s.api.admissionregistration.v1beta1.RuleWithOperations -Ø - sideEffectsÈ"ºSideEffects states whether this webhook has side effects. Acceptable values are: Unknown, None, Some, NoneOnDryRun Webhooks with side effects MUST implement a reconciliation system, since a request may be rejected by a future step in the admission chain and the side effects therefore need to be undone. Requests with the dryRun attribute will be auto-rejected if they match a webhook with sideEffects == Unknown or Some. Defaults to Unknown.² -string -— - matchPolicy‡"ùmatchPolicy defines how the "rules" list is used to match incoming requests. Allowed values are "Exact" or "Equivalent". - -- Exact: match a request only if it exactly matches a specified rule. For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1, but "rules" only included `apiGroups:["apps"], apiVersions:["v1"], resources: ["deployments"]`, a request to apps/v1beta1 or extensions/v1beta1 would not be sent to the webhook. - -- Equivalent: match a request if modifies a resource listed in rules, even via another API group or version. For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1, and "rules" only included `apiGroups:["apps"], apiVersions:["v1"], resources: ["deployments"]`, a request to apps/v1beta1 or extensions/v1beta1 would be converted to apps/v1 and sent to the webhook. - -Defaults to "Exact"² -string -æ -+io.k8s.api.autoscaling.v2beta2.MetricTarget¶"aMetricTarget defines the target value, average value, or average utilization of a specific metricštype² -objectʽ -™ -averageUtilization‚int32"ìaverageUtilization is the target value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods. Currently only valid for Resource metric source type² -integer -¶ - averageValue¥ -;#/definitions/io.k8s.apimachinery.pkg.api.resource.Quantity"faverageValue is the target value of the average of the metric across all relevant pods (as a quantity) -c -type["Ntype represents whether the metric type is Utilization, Value, or AverageValue² -string -€ -valuew -;#/definitions/io.k8s.apimachinery.pkg.api.resource.Quantity"8value is the target value of the metric (as a quantity). -‚ -=io.k8s.api.certificates.v1beta1.CertificateSigningRequestListÀšitems² -objectÊ« -¾ - -apiVersion¯"¡APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources² -string -a -itemsX² -arrayºK -I -G#/definitions/io.k8s.api.certificates.v1beta1.CertificateSigningRequest -¹ -kind°"¢Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds² -string -I -metadata= -;#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ListMetaú{ -x-kubernetes-group-version-kindXV- group: certificates.k8s.io - kind: CertificateSigningRequestList - version: v1beta1 - -ó -io.k8s.api.events.v1.EventListÐ"%EventList is a list of Event objects.šitems² -objectʳ -¾ - -apiVersion¯"¡APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources² -string -f -items]""items is a list of schema objects.² -arrayº, -* -(#/definitions/io.k8s.api.events.v1.Event -¹ -kind°"¢Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds² -string -Ë -metadata¾ -;#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta"Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadataú\ -x-kubernetes-group-version-kind97- group: events.k8s.io - kind: EventList - version: v1 - -ƒ -2io.k8s.api.extensions.v1beta1.HTTPIngressRuleValueÌ"£HTTPIngressRuleValue is a list of http selectors pointing to backends. In the example: http:///? -> backend where where parts of the url correspond to RFC 3986, this resource will be used to match against everything after the last '/' and before the first '?' or '#'.špaths² -objectÊ -Œ -paths‚"4A collection of paths that map requests to backends.² -arrayº? -= -;#/definitions/io.k8s.api.extensions.v1beta1.HTTPIngressPath -Å -/io.k8s.api.storage.v1beta1.VolumeAttachmentList‘ "AVolumeAttachmentList is a collection of VolumeAttachment objects.šitems² -objectÊÇ -¾ - -apiVersion¯"¡APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources² -string -{ -itemsr"&Items is the list of VolumeAttachments² -arrayº= -; -9#/definitions/io.k8s.api.storage.v1beta1.VolumeAttachment -¹ -kind°"¢Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds² -string -Ê -metadata½ -;#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta"~Standard list metadata More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadataúm -x-kubernetes-group-version-kindJH- kind: VolumeAttachmentList - version: v1beta1 - group: storage.k8s.io - -… -6io.k8s.api.autoscaling.v2beta2.HorizontalPodAutoscalerÊ "ÎHorizontalPodAutoscaler is the configuration for a horizontal pod autoscaler, which automatically manages the replica count of any resource implementing the scale subresource based on the metrics specified.² -objectÊú -¾ - -apiVersion¯"¡APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources² -string -¹ -kind°"¢Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds² -string -à -metadataÓ -=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta"‘metadata is the standard object metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata -„ -specû -H#/definitions/io.k8s.api.autoscaling.v2beta2.HorizontalPodAutoscalerSpec"®spec is the specification for the behaviour of the autoscaler. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status. - -status… -J#/definitions/io.k8s.api.autoscaling.v2beta2.HorizontalPodAutoscalerStatus"7status is the current information about the autoscaler.úm -x-kubernetes-group-version-kindJH- kind: HorizontalPodAutoscaler - version: v2beta2 - group: autoscaling - -ð -!io.k8s.api.core.v1.ClientIPConfigÊ"QClientIPConfig represents the configurations of Client IP based session affinity.² -objectÊè -å -timeoutSecondsÒint32"¼timeoutSeconds specifies the seconds of ClientIP type session sticky time. The value must be >0 && <=86400(for 1 day) if ServiceAffinity == "ClientIP". Default value is 10800(for 3 hours).² -integer -à -!io.k8s.api.core.v1.LimitRangeListº "-LimitRangeList is a list of LimitRange items.šitems² -objectÊ› -¾ - -apiVersion¯"¡APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources² -string -É -items¿"€Items is a list of LimitRange objects. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/² -arrayº/ -- -+#/definitions/io.k8s.api.core.v1.LimitRange -¹ -kind°"¢Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds² -string -Ï -metadata -;#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta"‚Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kindsúV -x-kubernetes-group-version-kind31- kind: LimitRangeList - version: v1 - group: "" - -ƒ -2io.k8s.api.networking.v1beta1.HTTPIngressRuleValueÌ"£HTTPIngressRuleValue is a list of http selectors pointing to backends. In the example: http:///? -> backend where where parts of the url correspond to RFC 3986, this resource will be used to match against everything after the last '/' and before the first '?' or '#'.špaths² -objectÊ -Œ -paths‚"4A collection of paths that map requests to backends.² -arrayº? -= -;#/definitions/io.k8s.api.networking.v1beta1.HTTPIngressPath -í -Oio.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1.JSONSchemaPropsOrArray™"–JSONSchemaPropsOrArray represents a value that can either be a JSONSchemaProps or an array of JSONSchemaProps. Mainly here for serialization purposes. -± -:io.k8s.api.autoscaling.v2beta1.HorizontalPodAutoscalerSpecò "_HorizontalPodAutoscalerSpec describes the desired functionality of the HorizontalPodAutoscaler.šscaleTargetRefš maxReplicas² -objectÊã - -¶ -metricsª"ßmetrics contains the specifications for which to use to calculate the desired replica count (the maximum replica count across all metrics will be used). The desired replica count is calculated multiplying the ratio between the target value and the current value by the current number of pods. Ergo, metrics used must decrease as the pod count is increased, and vice-versa. See the individual metric source types for more information about how each type of metric must respond.² -arrayº; -9 -7#/definitions/io.k8s.api.autoscaling.v2beta1.MetricSpec -ò - minReplicasâint32"ÌminReplicas is the lower limit for the number of replicas to which the autoscaler can scale down. It defaults to 1 pod. minReplicas is allowed to be 0 if the alpha feature gate HPAScaleToZero is enabled and at least one Object or External metric is configured. Scaling is active as long as at least one metric value is available.² -integer -† -scaleTargetRefó -H#/definitions/io.k8s.api.autoscaling.v2beta1.CrossVersionObjectReference"¦scaleTargetRef points to the target resource to scale, and is used to the pods for which metrics should be collected, as well as to actually change the replica count. -© - maxReplicas™int32"ƒmaxReplicas is the upper limit for the number of replicas to which the autoscaler can scale up. It cannot be less that minReplicas.² -integer -ç -io.k8s.api.core.v1.ConfigMapÆ"7ConfigMap holds configuration data for pods to consume.² -objectʪ -¾ - -apiVersion¯"¡APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources² -string -˜ - -binaryData‰"åBinaryData contains the binary data. Each key must consist of alphanumeric characters, '-', '_' or '.'. BinaryData can contain byte sequences that are not in the UTF-8 range. The keys stored in BinaryData must not overlap with the ones in the Data field, this is enforced during validation process. Using this field will require 1.10+ apiserver and kubelet.ª -byte² -string² -object -Ï -dataÆ"¨Data contains the configuration data. Each key must consist of alphanumeric characters, '-', '_' or '.'. Values with non-UTF-8 byte sequences must use the BinaryData field. The keys stored in Data must not overlap with the keys in the BinaryData field, this is enforced during validation process.ª - ² -string² -object -è - immutableÚ"ËImmutable, if set to true, ensures that data stored in the ConfigMap cannot be updated (only object metadata can be modified). If not set to true, the field can be modified at any time. Defaulted to nil.² -boolean -¹ -kind°"¢Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds² -string -Ò -metadataÅ -=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta"ƒStandard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadataúQ -x-kubernetes-group-version-kind.,- group: "" - kind: ConfigMap - version: v1 - -˜ -*io.k8s.api.core.v1.NodeSelectorRequirementé"wA node selector requirement is a selector that contains values, a key, and an operator that relates the key and values.škeyšoperator² -objectÊÐ -? -key8"+The label key that the selector applies to.² -string -Ž -operator"tRepresents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.² -string -û -valuesð"ÓAn array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch.² -arrayº - ² -string - -Lio.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1.WebhookClientConfig¼ "WWebhookClientConfig contains the information to make a TLS connection with the webhook.² -objectÊÔ -à -caBundle¶byte"¢caBundle is a PEM encoded CA bundle which will be used to validate the webhook's server certificate. If unspecified, system trust roots on the apiserver are used.² -string -– -serviceŠ -W#/definitions/io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1.ServiceReference"®service is a reference to the service for this webhook. Either service or url must be specified. - -If the webhook is running within the cluster, then you should use `service`. -ò -urlê"Üurl gives the location of the webhook, in standard URL form (`scheme://host:port/path`). Exactly one of `url` or `service` must be specified. - -The `host` should not refer to a service running in the cluster; use the `service` field instead. The host might be resolved via external DNS in some apiservers (e.g., `kube-apiserver` cannot resolve in-cluster DNS as that would be a layering violation). `host` may also be an IP address. - -Please note that using `localhost` or `127.0.0.1` as a `host` is risky unless you take great care to run this webhook on all hosts which run an apiserver which might need to make calls to this webhook. Such installs are likely to be non-portable, i.e., not easy to turn up in a new cluster. - -The scheme must be "https"; the URL must begin with "https://". - -A path is optional, and if present may be any string permissible in a URL. You may use the path to pass an arbitrary string to the webhook, for example, a cluster identifier. - -Attempting to use a user or basic auth e.g. "user:password@" is not allowed. Fragments ("#...") and query parameters ("?...") are not allowed, either.² -string -Ì -\io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1beta1.CustomResourceDefinitionStatusë"RCustomResourceDefinitionStatus indicates the state of the CustomResourceDefinition² -objectʈ -ù - acceptedNamesç -i#/definitions/io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1beta1.CustomResourceDefinitionNames"zacceptedNames are the names that are actually being used to serve discovery. They may be different than the names in spec. -ª - -conditions›"Nconditions indicate state for particular aspects of a CustomResourceDefinition² -arrayºq -o -m#/definitions/io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1beta1.CustomResourceDefinitionConditionú' -x-kubernetes-list-map-keys - type -ú -x-kubernetes-list-typemap - -Ü -storedVersionsÉ"¬storedVersions lists all versions of CustomResources that were ever persisted. Tracking these versions allows a migration path for stored versions in etcd. The field is mutable so a migration controller can finish a migration to another version (ensuring no old objects are left in storage), and then remove the rest of the versions from this list. Versions may not be removed from `spec.versions` while they exist in this list.² -arrayº - ² -string -ñ -Sio.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1beta1.ExternalDocumentation™"YExternalDocumentation allows referencing an external resource for extended documentation.² -objectÊ0 - - description ² -string - -url ² -string -¬ -@io.k8s.api.admissionregistration.v1.MutatingWebhookConfigurationç -"‚MutatingWebhookConfiguration describes the configuration of and admission webhook that accept or reject and may change the object.² -objectÊÒ -þ -webhooksñ"IWebhooks is a list of webhooks and the affected resources and operations.² -arrayºE -C -A#/definitions/io.k8s.api.admissionregistration.v1.MutatingWebhookú' -x-kubernetes-patch-merge-keyname -ú' -x-kubernetes-patch-strategymerge - -¾ - -apiVersion¯"¡APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources² -string -¹ -kind°"¢Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds² -string -Ñ -metadataÄ -=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta"‚Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata.ú~ -x-kubernetes-group-version-kind[Y- version: v1 - group: admissionregistration.k8s.io - kind: MutatingWebhookConfiguration - -Ý -#/definitions/io.k8s.api.core.v1.ServiceAccountTokenProjection"9information about the serviceAccountToken data to project -t - configMapg -4#/definitions/io.k8s.api.core.v1.ConfigMapProjection"/information about the configMap data to project -¦ -!io.k8s.api.core.v1.ServiceAccount€"ºServiceAccount binds together: * a name, understood by users, and perhaps by peripheral systems, for an identity * a principal that can be authenticated and authorized * a set of secrets² -objectÊÛ -Ä -secrets¸" Secrets is the list of secrets allowed to be used by pods running using this ServiceAccount. More info: https://kubernetes.io/docs/concepts/configuration/secret² -arrayº4 -2 -0#/definitions/io.k8s.api.core.v1.ObjectReferenceú' -x-kubernetes-patch-merge-keyname -ú' -x-kubernetes-patch-strategymerge - -¾ - -apiVersion¯"¡APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources² -string -× -automountServiceAccountToken¶"§AutomountServiceAccountToken indicates whether pods running as this service account should have an API token automatically mounted. Can be overridden at the pod level.² -boolean -å -imagePullSecretsÐ"‡ImagePullSecrets is a list of references to secrets in the same namespace to use for pulling any images in pods that reference this ServiceAccount. ImagePullSecrets are distinct from Secrets because Secrets can be mounted in the pod, but ImagePullSecrets are only accessed by the kubelet. More info: https://kubernetes.io/docs/concepts/containers/images/#specifying-imagepullsecrets-on-a-pod² -arrayº9 -7 -5#/definitions/io.k8s.api.core.v1.LocalObjectReference -¹ -kind°"¢Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds² -string -Ò -metadataÅ -=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta"ƒStandard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadataúV -x-kubernetes-group-version-kind31- version: v1 - group: "" - kind: ServiceAccount - -— -"io.k8s.api.core.v1.TCPSocketActionð"=TCPSocketAction describes an action based on opening a socketšport² -objectÊ› -Ç -port¾ -=#/definitions/io.k8s.apimachinery.pkg.util.intstr.IntOrString"}Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. -O -hostG":Optional: Host name to connect to, defaults to the pod IP.² -string -´ -Uio.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1.CustomResourceDefinitionListÚ"KCustomResourceDefinitionList is a list of CustomResourceDefinition objects.šitems² -objectÊý -¾ - -apiVersion¯"¡APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources² -string -² -items¨"6items list individual CustomResourceDefinition objects² -arrayºc -a -_#/definitions/io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1.CustomResourceDefinition -¹ -kind°"¢Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds² -string -I -metadata= -;#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ListMetaúv -x-kubernetes-group-version-kindSQ- group: apiextensions.k8s.io - kind: CustomResourceDefinitionList - version: v1 - -´4 -3io.k8s.api.admissionregistration.v1.MutatingWebhookü3"^MutatingWebhook describes an admission webhook and the resources and operations it applies to.šnameš clientConfigš sideEffectsšadmissionReviewVersions² -objectÊÏ2 -õ - sideEffectså"×SideEffects states whether this webhook has side effects. Acceptable values are: None, NoneOnDryRun (webhooks created via v1beta1 may also specify Some or Unknown). Webhooks with side effects MUST implement a reconciliation system, since a request may be rejected by a future step in the admission chain and the side effects therefore need to be undone. Requests with the dryRun attribute will be auto-rejected if they match a webhook with sideEffects == Unknown or Some.² -string -œ - matchPolicyŒ"þmatchPolicy defines how the "rules" list is used to match incoming requests. Allowed values are "Exact" or "Equivalent". - -- Exact: match a request only if it exactly matches a specified rule. For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1, but "rules" only included `apiGroups:["apps"], apiVersions:["v1"], resources: ["deployments"]`, a request to apps/v1beta1 or extensions/v1beta1 would not be sent to the webhook. - -- Equivalent: match a request if modifies a resource listed in rules, even via another API group or version. For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1, and "rules" only included `apiGroups:["apps"], apiVersions:["v1"], resources: ["deployments"]`, a request to apps/v1beta1 or extensions/v1beta1 would be converted to apps/v1 and sent to the webhook. - -Defaults to "Equivalent"² -string -Ý -namespaceSelectorÇ -@#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector"‚ NamespaceSelector decides whether to run the webhook on an object based on whether the namespace for that object matches the selector. If the object itself is a namespace, the matching is performed on object.metadata.labels. If the object is another cluster scoped resource, it never skips the webhook. - -For example, to run the webhook on any objects whose namespace is not associated with "runlevel" of "0" or "1"; you will set the selector as follows: "namespaceSelector": { - "matchExpressions": [ - { - "key": "runlevel", - "operator": "NotIn", - "values": [ - "0", - "1" - ] - } - ] -} - -If instead you want to only run the webhook on any objects whose namespace is associated with the "environment" of "prod" or "staging"; you will set the selector as follows: "namespaceSelector": { - "matchExpressions": [ - { - "key": "environment", - "operator": "In", - "values": [ - "prod", - "staging" - ] - } - ] -} - -See https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ for more examples of label selectors. - -Default to the empty LabelSelector, which matches everything. -é -objectSelectorÖ -@#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector"‘ObjectSelector decides whether to run the webhook based on if the object has matching labels. objectSelector is evaluated against both the oldObject and newObject that would be sent to the webhook, and is considered to match if either object matches the selector. A null object (oldObject in the case of create, or newObject in the case of delete) or an object that cannot have labels (like a DeploymentRollback or a PodProxyOptions object) is not considered to match. Use the object selector only if the webhook is opt-in, because end users may skip the admission webhook by setting the labels. Default to the empty LabelSelector, which matches everything. - -reinvocationPolicyù"ëreinvocationPolicy indicates whether this webhook should be called multiple times as part of a single admission evaluation. Allowed values are "Never" and "IfNeeded". - -Never: the webhook will not be called more than once in a single admission evaluation. - -IfNeeded: the webhook will be called at least one additional time as part of the admission evaluation if the object being admitted is modified by other admission plugins after the initial webhook call. Webhooks that specify this option *must* be idempotent, able to process objects they previously admitted. Note: * the number of additional invocations is not guaranteed to be exactly one. * if additional invocations result in further modifications to the object, webhooks are not guaranteed to be invoked again. * webhooks that use this option may be reordered to minimize the number of additional invocations. * to validate an object after all mutations are guaranteed complete, use a validating admission webhook instead. - -Defaults to "Never".² -string -é -rulesß"‡Rules describes what operations on what resources/subresources the webhook cares about. The webhook cares about an operation if it matches _any_ Rule. However, in order to prevent ValidatingAdmissionWebhooks and MutatingAdmissionWebhooks from putting the cluster in a state which cannot be recovered from without completely disabling the plugin, ValidatingAdmissionWebhooks and MutatingAdmissionWebhooks are never called on admission requests for ValidatingWebhookConfiguration and MutatingWebhookConfiguration objects.² -arrayºH -F -D#/definitions/io.k8s.api.admissionregistration.v1.RuleWithOperations -– -admissionReviewVersionsú"ÝAdmissionReviewVersions is an ordered list of preferred `AdmissionReview` versions the Webhook expects. API server will try to use first version in the list which it supports. If none of the versions specified in this list supported by API server, validation will fail for this object. If a persisted webhook configuration specifies allowed versions and does not include any versions known to the API Server, calls to the webhook will fail and be subject to the failure policy.² -arrayº - ² -string -™ - clientConfigˆ -E#/definitions/io.k8s.api.admissionregistration.v1.WebhookClientConfig"?ClientConfig defines how to communicate with the hook. Required -¬ - failurePolicyš"ŒFailurePolicy defines how unrecognized errors from the admission endpoint are handled - allowed values are Ignore or Fail. Defaults to Fail.² -string -ç -nameÞ"ÐThe name of the admission webhook. Name should be fully qualified, e.g., imagepolicy.kubernetes.io, where "imagepolicy" is the name of the webhook, and kubernetes.io is the name of the organization. Required.² -string -œ -timeoutSeconds‰int32"óTimeoutSeconds specifies the timeout for this webhook. After the timeout passes, the webhook call will be ignored or the API call will fail based on the failure policy. The timeout value must be between 1 and 30 seconds. Default to 10 seconds.² -integer -ì -+io.k8s.api.autoscaling.v2beta2.MetricStatus¼">MetricStatus describes the last-read state of a single metric.štype² -objectÊæ -Ë -containerResourceµ -J#/definitions/io.k8s.api.autoscaling.v2beta2.ContainerResourceMetricStatus"æcontainer resource refers to a resource metric (such as those specified in requests and limits) known to Kubernetes describing a single container in each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the "pods" source. -ô -externalç -A#/definitions/io.k8s.api.autoscaling.v2beta2.ExternalMetricStatus"¡external refers to a global metric that is not associated with any Kubernetes object. It allows autoscaling based on information coming from components running outside of cluster (for example length of queue in cloud messaging service, or QPS from loadbalancer running outside of cluster). - -object· -?#/definitions/io.k8s.api.autoscaling.v2beta2.ObjectMetricStatus"tobject refers to a metric describing a single kubernetes object (for example, hits-per-second on an Ingress object). -• -podsŒ -=#/definitions/io.k8s.api.autoscaling.v2beta2.PodsMetricStatus"Êpods refers to a metric describing each pod in the current scale target (for example, transactions-processed-per-second). The values will be averaged together before being compared to the target value. -™ -resourceŒ -A#/definitions/io.k8s.api.autoscaling.v2beta2.ResourceMetricStatus"Æresource refers to a resource metric (such as those specified in requests and limits) known to Kubernetes describing each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the "pods" source. -¥ -typeœ"Žtype is the type of metric source. It will be one of "ContainerResource", "External", "Object", "Pods" or "Resource", each corresponds to a matching field in the object. Note: "ContainerResource" type is available on when the feature-gate HPAContainerMetrics is enabled² -string -™ -1io.k8s.api.policy.v1beta1.PodDisruptionBudgetListã"@PodDisruptionBudgetList is a collection of PodDisruptionBudgets.šitems² -objectÊŸ -¹ -kind°"¢Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds² -string -I -metadata= -;#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta -¾ - -apiVersion¯"¡APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources² -string -U -itemsL² -arrayº? -= -;#/definitions/io.k8s.api.policy.v1beta1.PodDisruptionBudgetúh -x-kubernetes-group-version-kindEC- group: policy - kind: PodDisruptionBudgetList - version: v1beta1 - -ÿ -5io.k8s.api.autoscaling.v1.HorizontalPodAutoscalerSpecÅ"-specification of a horizontal pod autoscaler.šscaleTargetRefš maxReplicas² -objectÊè - - maxReplicas~int32"iupper limit for the number of pods that can be set by the autoscaler; cannot be smaller than MinReplicas.² -integer -ò - minReplicasâint32"ÌminReplicas is the lower limit for the number of replicas to which the autoscaler can scale down. It defaults to 1 pod. minReplicas is allowed to be 0 if the alpha feature gate HPAScaleToZero is enabled and at least one Object or External metric is configured. Scaling is active as long as at least one metric value is available.² -integer -† -scaleTargetRefó -C#/definitions/io.k8s.api.autoscaling.v1.CrossVersionObjectReference"«reference to scaled resource; horizontal pod autoscaler will learn the current resource consumption and will set the desired number of pods by using its Scale subresource. -× -targetCPUUtilizationPercentage´int32"žtarget average CPU utilization (represented as a percentage of requested CPU) over all the pods; if not specified the default autoscaling policy will be used.² -integer -Í -io.k8s.api.core.v1.EventList¬"EventList is a list of events.šitems² -objectÊ¡ -¾ - -apiVersion¯"¡APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources² -string -P -itemsG"List of events² -arrayº* -( -&#/definitions/io.k8s.api.core.v1.Event -¹ -kind°"¢Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds² -string -Ï -metadata -;#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta"‚Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kindsúQ -x-kubernetes-group-version-kind.,- group: "" - kind: EventList - version: v1 - -ë -=io.k8s.api.networking.v1beta1.IngressClassParametersReference©"}IngressClassParametersReference identifies an API object. This can be used to specify a cluster or namespace-scoped resource.škindšname² -objectÊ -Ú -apiGroupÍ"¿APIGroup is the group for the resource being referenced. If APIGroup is not specified, the specified Kind must be in the core API group. For any other third-party types, APIGroup is required.² -string -C -kind;".Kind is the type of resource being referenced.² -string -C -name;".Name is the name of resource being referenced.² -string - - namespace´"¦Namespace is the namespace of the resource being referenced. This field is required when scope is set to "Namespace" and must be unset when scope is set to "Cluster".² -string -Þ -scopeÔ"ÆScope represents if this refers to a cluster or namespace scoped resource. This may be set to "Cluster" (default) or "Namespace". Field can be enabled with IngressClassNamespacedParams feature gate.² -string -‹ -#io.k8s.api.storage.v1.CSIDriverListã"3CSIDriverList is a collection of CSIDriver objects.šitems² -objectʳ -¹ -kind°"¢Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds² -string -Ê -metadata½ -;#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta"~Standard list metadata More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata -¾ - -apiVersion¯"¡APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources² -string -g -items^"items is the list of CSIDriver² -arrayº1 -/ --#/definitions/io.k8s.api.storage.v1.CSIDriverúa -x-kubernetes-group-version-kind><- version: v1 - group: storage.k8s.io - kind: CSIDriverList - -• -0io.k8s.apimachinery.pkg.apis.meta.v1.APIVersionsà "APIVersions lists the versions that are available, to allow clients to discover the API at /api, which is the root path of the legacy v1 API.šversionsšserverAddressByClientCIDRs² -objectÊà -¹ -kind°"¢Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds² -string -è -serverAddressByClientCIDRsÉ"éa map of client CIDR to server address that is serving this group. This is to help clients reach servers in the most network-efficient way possible. Clients can use the appropriate server address as per the CIDR that they match. In case of multiple matches, clients should use the longest matching CIDR. The server returns only those CIDRs that it thinks that the client can match. For example: the master will return an internal IP CIDR only, if the client reaches the server using an internal IP. Server looks at X-Forwarded-For header or X-Real-Ip header or request.RemoteAddr (in that order) to get the client IP.² -arrayºP -N -L#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ServerAddressByClientCIDR -Y -versionsM"1versions are the api versions that are available.² -arrayº - ² -string -¾ - -apiVersion¯"¡APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources² -stringúS -x-kubernetes-group-version-kind0.- group: "" - kind: APIVersions - version: v1 - -ž -io.k8s.api.apps.v1.StatefulSetû -"ŸStatefulSet represents a set of pods with consistent identities. Identities are defined as: - - Network: A single stable DNS and hostname. - - Storage: As many VolumeClaims as requested. -The StatefulSet guarantees that a given network identity will always map to the same storage identity.² -objectÊò -¯ -status¤ -2#/definitions/io.k8s.api.apps.v1.StatefulSetStatus"nStatus is the current status of Pods in this StatefulSet. This data may be out of date by some window of time. -¾ - -apiVersion¯"¡APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources² -string -¹ -kind°"¢Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds² -string -K -metadata? -=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta -t -specl -0#/definitions/io.k8s.api.apps.v1.StatefulSetSpec"8Spec defines the desired identities of pods in this set.úU -x-kubernetes-group-version-kind20- kind: StatefulSet - version: v1 - group: apps - -Ð - -.io.k8s.api.autoscaling.v2beta2.HPAScalingRules -"”HPAScalingRules configures the scaling behavior for one direction. These Rules are applied after calculating DesiredReplicas from metrics for the HPA. They can limit the scaling velocity by specifying scaling policies. They can prevent flapping by specifying the stabilization window, so that the number of replicas is not set instantly, instead, the safest value from the stabilization window is chosen.² -objectÊ÷ -‘ -policies„"³policies is a list of potential scaling polices which can be used during scaling. At least one policy must be specified, otherwise the HPAScalingRules will be discarded as invalid² -arrayºA -? -=#/definitions/io.k8s.api.autoscaling.v2beta2.HPAScalingPolicy -‘ - selectPolicy€"sselectPolicy is used to specify which policy should be used. If not set, the default value MaxPolicySelect is used.² -string -Ì -stabilizationWindowSeconds­int32"—StabilizationWindowSeconds is the number of seconds for which past recommendations should be considered while scaling up or scaling down. StabilizationWindowSeconds must be greater than or equal to zero and less than or equal to 3600 (one hour). If not set, use the default values: - For scale up: 0 (i.e. no stabilization is done). - For scale down: 300 (i.e. the stabilization window is 300 seconds long).² -integer -¼ -(io.k8s.api.core.v1.ReplicationController"OReplicationController represents the configuration of a replication controller.² -objectÊÏ -¹ -kind°"¢Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds² -string -ß -metadataÒ -=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta"If the Labels of a ReplicationController are empty, they are defaulted to be the same as the Pod(s) that the replication controller manages. Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata -Œ -specƒ -:#/definitions/io.k8s.api.core.v1.ReplicationControllerSpec"ÄSpec defines the specification of the desired behavior of the replication controller. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status -Þ -statusÓ -<#/definitions/io.k8s.api.core.v1.ReplicationControllerStatus"’Status is the most recently observed status of the replication controller. This data may be out of date by some window of time. Populated by the system. Read-only. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status -¾ - -apiVersion¯"¡APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources² -stringú] -x-kubernetes-group-version-kind:8- group: "" - kind: ReplicationController - version: v1 - -Ù -Bio.k8s.api.flowcontrol.v1beta1.PriorityLevelConfigurationCondition’"LPriorityLevelConfigurationCondition defines the condition of priority level.² -objectʵ -® -lastTransitionTime— -7#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Time"\`lastTransitionTime` is the last time the condition transitioned from one status to another. -g -message\"O`message` is a human-readable message indicating details about last transition.² -string -l -reasonb"U`reason` is a unique, one-word, CamelCase reason for the condition's last transition.² -string -f -status\"O`status` is the status of the condition. Can be True, False, Unknown. Required.² -string -C -type;".`type` is the type of the condition. Required.² -string -Ò -)io.k8s.api.networking.v1beta1.IngressSpec¤ ";IngressSpec describes the Ingress the user wishes to exist.² -objectÊØ -º -backend® -:#/definitions/io.k8s.api.networking.v1beta1.IngressBackend"ïA default backend capable of servicing requests that don't match any rule. At least one of 'backend' or 'rules' must be specified. This field is optional to allow the loadbalancer controller or defaulting logic to specify a global default. -´ -ingressClassNameŸ"‘IngressClassName is the name of the IngressClass cluster resource. The associated IngressClass defines which controller will implement the resource. This replaces the deprecated `kubernetes.io/ingress.class` annotation. For backwards compatibility, when that annotation is set, it must be given precedence over this field. The controller may emit a warning if the field and annotation have different values. Implementations of this API should ignore Ingresses without a class specified. An IngressClass resource may be marked as default, which can be used to set a default value for this field. For more information, refer to the IngressClass documentation.² -string -Ø -rulesÎ"ƒA list of host rules used to configure the Ingress. If unspecified, or no rule matches, all traffic is sent to the default backend.² -arrayº; -9 -7#/definitions/io.k8s.api.networking.v1beta1.IngressRule -† -tlsþ"´TLS configuration. Currently the Ingress only supports a single TLS port, 443. If multiple members of this list specify different hosts, they will be multiplexed on the same port according to the hostname specified through the SNI TLS extension, if the ingress controller fulfilling the ingress supports SNI.² -arrayº: -8 -6#/definitions/io.k8s.api.networking.v1beta1.IngressTLS -ç -io.k8s.api.rbac.v1beta1.RoleRefÃ"?RoleRef contains information that points to the role being usedšapiGroupškindšname² -objectÊÚ -P -apiGroupD"7APIGroup is the group for the resource being referenced² -string -B -kind:"-Kind is the type of resource being referenced² -string -B -name:"-Name is the name of resource being referenced² -string -Ü -,io.k8s.api.storage.v1.VolumeAttachmentStatus«"CVolumeAttachmentStatus is the status of a VolumeAttachment request.šattached² -objectÊÌ -æ - attachErrorÖ -/#/definitions/io.k8s.api.storage.v1.VolumeError"¢The last error encountered during attach operation, if any. This field must only be set by the entity completing the attach operation, i.e. the external-attacher. -± -attached¤"•Indicates the volume is successfully attached. This field must only be set by the entity completing the attach operation, i.e. the external-attacher.² -boolean -à -attachmentMetadata¬"ŽUpon successful attach, this field is populated with any information returned by the attach operation that must be passed into subsequent WaitForAttach or Mount calls. This field must only be set by the entity completing the attach operation, i.e. the external-attacher.ª - ² -string² -object -æ - detachErrorÖ -/#/definitions/io.k8s.api.storage.v1.VolumeError"¢The last error encountered during detach operation, if any. This field must only be set by the entity completing the detach operation, i.e. the external-attacher. -Ý -$io.k8s.api.storage.v1beta1.CSIDriver´"¹CSIDriver captures information about a Container Storage Interface (CSI) volume driver deployed on the cluster. CSI drivers do not need to create the CSIDriver object directly. Instead they may use the cluster-driver-registrar sidecar container. When deployed with a CSI driver it automatically creates a CSIDriver object representing the driver. Kubernetes attach detach controller uses this object to determine whether attach is required. Kubelet uses this object to determine whether pod information needs to be passed on mount. CSIDriver objects are non-namespaced.šspec² -objectÊý -¾ - -apiVersion¯"¡APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources² -string -¹ -kind°"¢Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds² -string -™ -metadataŒ -=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta"ÊStandard object metadata. metadata.Name indicates the name of the CSI driver that this object refers to; it MUST be the same name returned by the CSI GetPluginName() call for that driver. The driver name must be 63 characters or less, beginning and ending with an alphanumeric character ([a-z0-9A-Z]) with dashes (-), dots (.), and alphanumerics between. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata -b -specZ -6#/definitions/io.k8s.api.storage.v1beta1.CSIDriverSpec" Specification of the CSI Driver.úb -x-kubernetes-group-version-kind?=- group: storage.k8s.io - kind: CSIDriver - version: v1beta1 - -Ú -8io.k8s.api.authorization.v1beta1.SubjectAccessReviewSpec"¢SubjectAccessReviewSpec is a description of the access request. Exactly one of ResourceAuthorizationAttributes and NonResourceAuthorizationAttributes must be set² -objectÊé -Æ -extra¼"Extra corresponds to the user.Info.GetExtra() method from the authenticator. Since that is input to the authorizer it needs a reflection here.ª -² -arrayº - ² -string² -object -M -groupD"(Groups is the groups you're testing for.² -arrayº - ² -string -¯ -nonResourceAttributes• -D#/definitions/io.k8s.api.authorization.v1beta1.NonResourceAttributes"MNonResourceAttributes describes information for a non-resource access request -¯ -resourceAttributes˜ -A#/definitions/io.k8s.api.authorization.v1beta1.ResourceAttributes"SResourceAuthorizationAttributes describes information for a resource access request -> -uid7"*UID information about the requesting user.² -string -ª -user¡"“User is the user you're testing for. If you specify "User" but not "Group", then is it interpreted as "What if User were not a member of any groups² -string -ë -io.k8s.api.core.v1.EventSourceÈ".EventSource contains information for an event.² -objectʉ -F - component9",Component from which the event is generated.² -string -? -host7"*Node name on which the event is generated.² -string -æ -(io.k8s.api.core.v1.GlusterfsVolumeSource¹"‹Represents a Glusterfs mount that lasts the lifetime of a pod. Glusterfs volumes do not support ownership management or SELinux relabeling.š endpointsšpath² -objectʉ -« - endpoints"EndpointsName is the endpoint name that details Glusterfs topology. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod² -string -ƒ -path{"nPath is the Glusterfs volume path. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod² -string -Ò -readOnlyÅ"¶ReadOnly here will force the Glusterfs volume to be mounted with read-only permissions. Defaults to false. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod² -boolean -¨ -#io.k8s.api.core.v1.NodeConfigSource€"uNodeConfigSource specifies a source of node configuration. Exactly one subfield (excluding metadata) must be non-nil.² -objectÊ{ -y - configMapl -:#/definitions/io.k8s.api.core.v1.ConfigMapNodeConfigSource".ConfigMap is a reference to a Node's ConfigMap -Œ -#io.k8s.api.core.v1.NodeSelectorTermä"¥A null or empty node selector term matches no objects. The requirements of them are ANDed. The TopologySelectorTerm type implements a subset of the NodeSelectorTerm.² -objectÊ­ -– -matchExpressions"6A list of node selector requirements by node's labels.² -arrayº< -: -8#/definitions/io.k8s.api.core.v1.NodeSelectorRequirement -‘ - matchFields"6A list of node selector requirements by node's fields.² -arrayº< -: -8#/definitions/io.k8s.api.core.v1.NodeSelectorRequirement -’5 -'io.k8s.api.core.v1.PersistentVolumeSpecæ4"APersistentVolumeSpec is the specification of a persistent volume.² -objectÊ”4 -¦ -scaleIOš ->#/definitions/io.k8s.api.core.v1.ScaleIOPersistentVolumeSource"XScaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes. - - storageosó -@#/definitions/io.k8s.api.core.v1.StorageOSPersistentVolumeSource"®StorageOS represents a StorageOS volume that is attached to the kubelet's host machine and mounted into the pod More info: https://examples.k8s.io/volumes/storageos/README.md -¼ - accessModes¬"AccessModes contains all ways the volume can be mounted. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes² -arrayº - ² -string -© - azureFile› -@#/definitions/io.k8s.api.core.v1.AzureFilePersistentVolumeSource"WAzureFile represents an Azure File Service mount on the host and bind mount to the pod. -– -cephfs‹ -=#/definitions/io.k8s.api.core.v1.CephFSPersistentVolumeSource"JCephFS represents a Ceph FS mount on the host that shares a pod's lifetime -| -locals -2#/definitions/io.k8s.api.core.v1.LocalVolumeSource"=Local represents directly-attached storage with node affinity -Ó -rbdË -:#/definitions/io.k8s.api.core.v1.RBDPersistentVolumeSource"ŒRBD represents a Rados Block Device mount on the host that shares a pod's lifetime. More info: https://examples.k8s.io/volumes/rbd/README.md -Ú -cinderÏ -=#/definitions/io.k8s.api.core.v1.CinderPersistentVolumeSource"Cinder represents a cinder volume attached and mounted on kubelets host machine. More info: https://examples.k8s.io/mysql-cinder-pd/README.md -õ -hostPathè -5#/definitions/io.k8s.api.core.v1.HostPathVolumeSource"®HostPath represents a directory on the host. Provisioned by a developer or tester. This is useful for single-node development and testing only! On-host storage is not supported in any way and WILL NOT WORK in a multi-node cluster. More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath -¥ -portworxVolume’ -5#/definitions/io.k8s.api.core.v1.PortworxVolumeSource"YPortworxVolume represents a portworx volume attached and mounted on kubelets host machine -ð -capacityã"“A description of the persistent volume's resources and capacity. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#capacityª? -= -;#/definitions/io.k8s.apimachinery.pkg.api.resource.Quantity² -object -… - glusterfs÷ -@#/definitions/io.k8s.api.core.v1.GlusterfsPersistentVolumeSource"²Glusterfs represents a Glusterfs volume that is attached to a host and exposed to the pod. Provisioned by an admin. More info: https://examples.k8s.io/volumes/glusterfs/README.md -Î -photonPersistentDiskµ -A#/definitions/io.k8s.api.core.v1.PhotonPersistentDiskVolumeSource"pPhotonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine -– -csiŽ -:#/definitions/io.k8s.api.core.v1.CSIPersistentVolumeSource"PCSI represents storage that is handled by an external CSI driver (Beta feature). -Õ -iscsiË -<#/definitions/io.k8s.api.core.v1.ISCSIPersistentVolumeSource"ŠISCSI represents an ISCSI Disk resource that is attached to a kubelet's host machine and then exposed to the pod. Provisioned by an admin. -¨ -storageClassName“"…Name of StorageClass to which this persistent volume belongs. Empty value means that this volume does not belong to any StorageClass.² -string -± -awsElasticBlockStore˜ -A#/definitions/io.k8s.api.core.v1.AWSElasticBlockStoreVolumeSource"ÒAWSElasticBlockStore represents an AWS Disk resource that is attached to a kubelet's host machine and then exposed to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore -Ö -claimRefÉ -0#/definitions/io.k8s.api.core.v1.ObjectReference"”ClaimRef is part of a bi-directional binding between PersistentVolume and PersistentVolumeClaim. Expected to be non-nil when bound. claim.VolumeName is the authoritative bind between PV and PVC. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#binding -ä - nodeAffinityÓ -3#/definitions/io.k8s.api.core.v1.VolumeNodeAffinity"›NodeAffinity defines constraints that limit what nodes this volume can be accessed from. This field influences the scheduling of pods that use this volume. -¼ -persistentVolumeReclaimPolicyš"ŒWhat happens to a persistent volume when released from its claim. Valid options are Retain (default for manually created PersistentVolumes), Delete (default for dynamically provisioned PersistentVolumes), and Recycle (deprecated). Recycle must be supported by the volume plugin underlying this PersistentVolume. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#reclaiming² -string -É - -volumeModeº"¬volumeMode defines if a volume is intended to be used with a formatted filesystem or to remain in raw block state. Value of Filesystem is implied when not included in spec.² -string -ª -fc£ -/#/definitions/io.k8s.api.core.v1.FCVolumeSource"pFC represents a Fibre Channel resource that is attached to a kubelet's host machine and then exposed to the pod. -í -flockerá -4#/definitions/io.k8s.api.core.v1.FlockerVolumeSource"¨Flocker represents a Flocker volume attached to a kubelet's host machine and exposed to the pod for its usage. This depends on the Flocker control service being running -Á -nfs¹ -0#/definitions/io.k8s.api.core.v1.NFSVolumeSource"„NFS represents an NFS mount on the host. Provisioned by an admin. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs - -quobyteƒ -4#/definitions/io.k8s.api.core.v1.QuobyteVolumeSource"KQuobyte represents a Quobyte mount on the host that shares a pod's lifetime -¬ - vsphereVolumeš -?#/definitions/io.k8s.api.core.v1.VsphereVirtualDiskVolumeSource"WVsphereVolume represents a vSphere volume attached and mounted on kubelets host machine -œ - azureDiskŽ -6#/definitions/io.k8s.api.core.v1.AzureDiskVolumeSource"TAzureDisk represents an Azure Data Disk mount on the host and bind mount to the pod. -¶ - -flexVolume§ -;#/definitions/io.k8s.api.core.v1.FlexPersistentVolumeSource"hFlexVolume represents a generic volume resource that is provisioned/attached using an exec based plugin. -î - mountOptionsÝ"ÀA list of mount options, e.g. ["ro", "soft"]. Not validated - mount will simply fail if one is invalid. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes/#mount-options² -arrayº - ² -string -½ -gcePersistentDisk§ ->#/definitions/io.k8s.api.core.v1.GCEPersistentDiskVolumeSource"äGCEPersistentDisk represents a GCE Disk resource that is attached to a kubelet's host machine and then exposed to the pod. Provisioned by an admin. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk -¨ -%io.k8s.api.core.v1.PodDNSConfigOption"9PodDNSConfigOption defines DNS resolver options of a pod.² -objectÊ6 - -name" Required.² -string - -value ² -string -« -io.k8s.api.core.v1.SecretList‰ "SecretList is a list of Secret.šitems² -objectÊü -¾ - -apiVersion¯"¡APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources² -string -ª -items "fItems is a list of secret objects. More info: https://kubernetes.io/docs/concepts/configuration/secret² -arrayº+ -) -'#/definitions/io.k8s.api.core.v1.Secret -¹ -kind°"¢Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds² -string -Ï -metadata -;#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta"‚Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kindsúR -x-kubernetes-group-version-kind/-- group: "" - kind: SecretList - version: v1 - -Ô -'io.k8s.api.apps.v1.StatefulSetCondition¨"MStatefulSetCondition describes the state of a statefulset at a certain point.štypešstatus² -objectʺ -F -reason<"/The reason for the condition's last transition.² -string -L -statusB"5Status of the condition, one of True, False, Unknown.² -string -3 -type+"Type of statefulset condition.² -string -‘ -lastTransitionTime{ -7#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Time"@Last time the condition transitioned from one status to another. -Y -messageN"AA human readable message indicating details about the transition.² -string -» -1io.k8s.api.authentication.v1beta1.TokenReviewSpec…"ETokenReviewSpec is a description of the token authentication request.² -objectʯ -ó - audienceså"ÈAudiences is a list of the identifiers that the resource server presented with the token identifies as. Audience-aware token authenticators will verify that the token was intended for at least one of the audiences in this list. If no audiences are provided, the audience will default to the audience of the Kubernetes apiserver.² -arrayº - ² -string -7 -token."!Token is the opaque bearer token.² -string -º - -NodeSpec describes the attributes that a node is created with.² -objectÊŸ -Ö - configSourceÅ -1#/definitions/io.k8s.api.core.v1.NodeConfigSource"If specified, the source to get node configuration from The DynamicKubeletConfig feature gate must be enabled for the Kubelet to use this field -† - -externalIDx"kDeprecated. Not all kubelets will set this field. Remove field after 1.13. see: https://issues.k8s.io/61966² -string -Q -podCIDRF"9PodCIDR represents the pod IP range assigned to the node.² -string -¬ -podCIDRsŸ"ØpodCIDRs represents the IP ranges assigned to the node for usage by Pods on that node. If this field is specified, the 0th entry must match the podCIDR field. It may contain at most 1 value for each of IPv4 and IPv6.² -arrayº - ² -stringú' -x-kubernetes-patch-strategymerge - - - -providerIDs"fID of the node assigned by the cloud provider in the format: ://² -string -c -taintsY" If specified, the node's taints.² -arrayº* -( -&#/definitions/io.k8s.api.core.v1.Taint -Ï - unschedulable½"®Unschedulable controls node schedulability of new pods. By default, node is schedulable. More info: https://kubernetes.io/docs/concepts/nodes/node/#manual-node-administration² -boolean -œ -$io.k8s.api.core.v1.SecretKeySelectoró",SecretKeySelector selects a key of a Secret.škey² -objectÊ° -V -keyO"BThe key of the secret to select from. Must be a valid secret key.² -string -„ -name|"oName of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names² -string -O -optionalC"5Specify whether the Secret or its key must be defined² -boolean -ž -Fio.k8s.kube-aggregator.pkg.apis.apiregistration.v1beta1.APIServiceSpecÓ"£APIServiceSpec contains information for locating and communicating with a server. Only https is supported, though you are able to disable certificate verification.šgroupPriorityMinimumšversionPriority² -objectÊõ -± -groupPriorityMinimum˜int32"‚GroupPriorityMininum is the priority this group should have at least. Higher priority means that the group is preferred by clients over lower priority ones. Note that other versions of this group might specify even higher GroupPriorityMininum values such that the whole group gets a higher priority. The primary sort is based on GroupPriorityMinimum, ordered highest number to lowest (20 before 10). The secondary sort is based on the alphabetical comparison of the name of the object. (v1.bar before v1.foo) We'd recommend something like: *.k8s.io (except extensions) at 18000 and PaaSes (OpenShift, Deis) are recommended to be in the 2000s² -integer -Í -insecureSkipTLSVerify³"¤InsecureSkipTLSVerify disables TLS certificate verification when communicating with this server. This is strongly discouraged. You should use the CABundle instead.² -boolean -ù -serviceí -V#/definitions/io.k8s.kube-aggregator.pkg.apis.apiregistration.v1beta1.ServiceReference"’Service is a reference to the service for this API server. It must communicate on port 443. If the Service is nil, that means the handling for the API groupversion is handled locally on this server. The call will simply delegate to the normal handler chain to be fulfilled. -X -versionM"@Version is the API version this server hosts. For example, "v1"² -string -ä -versionPriorityÐint32"ºVersionPriority controls the ordering of this API version inside of its group. Must be greater than zero. The primary sort is based on VersionPriority, ordered highest to lowest (20 before 10). Since it's inside of a group, the number can be small, probably in the 10s. In case of equal version priorities, the version string will be used to compute the order inside a group. If the version string is "kube-like", it will sort above non "kube-like" version strings, which are ordered lexicographically. "Kube-like" versions start with a "v", then are followed by a number (the major version), then optionally the string "alpha" or "beta" and another number (the minor version). These are sorted first by GA > beta > alpha (where GA is a version with no suffix such as beta or alpha), and then by comparing major version, then minor version. An example sorted list of versions: v10, v2, v1, v11beta2, v10beta3, v3beta1, v12alpha1, v11alpha2, foo1, foo10.² -integer -ì -caBundleßbyte"¥CABundle is a PEM encoded CA bundle which will be used to validate an API server's serving certificate. If unspecified, system trust roots on the apiserver are used.² -stringú# -x-kubernetes-list-type atomic - -C -group:"-Group is the API group name this server hosts² -string -Ç -#io.k8s.api.autoscaling.v1.ScaleSpecŸ":ScaleSpec describes the attributes of a scale subresource.² -objectÊU -S -replicasGint32"2desired number of instances for the scaled object.² -integer -€ -/io.k8s.api.autoscaling.v2beta2.PodsMetricStatusÌ"šPodsMetricStatus indicates the current value of a metric describing each pod in the current scale target (for example, transactions-processed-per-second).šmetricšcurrent² -objectÊ -ƒ -metricy -=#/definitions/io.k8s.api.autoscaling.v2beta2.MetricIdentifier"8metric identifies the target metric by name and selector -„ -currenty ->#/definitions/io.k8s.api.autoscaling.v2beta2.MetricValueStatus"7current contains the current value for the given metric -” -%io.k8s.api.core.v1.NamespaceConditionê"=NamespaceCondition contains details about state of namespace.štypešstatus² -objectÊŒ -L -statusB"5Status of the condition, one of True, False, Unknown.² -string -< -type4"'Type of namespace controller condition.² -string -O -lastTransitionTime9 -7#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Time - -message ² -string - -reason ² -string -º -io.k8s.api.rbac.v1.ClusterRole— "ˆClusterRole is a cluster level, logical grouping of PolicyRules that can be referenced as a unit by a RoleBinding or ClusterRoleBinding.² -objectÊ -¹ -kind°"¢Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds² -string -h -metadata\ -=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta"Standard object's metadata. -{ -rulesr"4Rules holds all the PolicyRules for this ClusterRole² -arrayº/ -- -+#/definitions/io.k8s.api.rbac.v1.PolicyRule -© -aggregationRule• -0#/definitions/io.k8s.api.rbac.v1.AggregationRule"àAggregationRule is an optional field that describes how to build the Rules for this ClusterRole. If AggregationRule is set, then the Rules are controller managed and direct changes to Rules will be stomped by the controller. -¾ - -apiVersion¯"¡APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources² -stringúj -x-kubernetes-group-version-kindGE- group: rbac.authorization.k8s.io - kind: ClusterRole - version: v1 - -¶ - -!io.k8s.api.apps.v1.ReplicaSetSpec -"4ReplicaSetSpec is the specification of a ReplicaSet.šselector² -objectÊÀ -ƒ -minReadySecondsïint32"ÙMinimum number of seconds for which a newly created pod should be ready without any of its container crashing, for it to be considered available. Defaults to 0 (pod will be considered available as soon as it is ready)² -integer -¡ -replicas”int32"þReplicas is the number of desired replicas. This is a pointer to distinguish between explicit zero and unspecified. Defaults to 1. More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller/#what-is-a-replicationcontroller² -integer -ý -selectorð -@#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector"«Selector is a label query over pods that should match the replica count. Label keys and values that must match in order to be controlled by this replica set. It must match the pod template's labels. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors -“ -template† -0#/definitions/io.k8s.api.core.v1.PodTemplateSpec"ÑTemplate is the object that describes the pod that will be created if insufficient replicas are detected. More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#pod-template -’ -1io.k8s.api.autoscaling.v2beta1.ObjectMetricStatusÜ"ŽObjectMetricStatus indicates the current value of a metric describing a kubernetes object (for example, hits-per-second on an Ingress object).štargetš -metricNameš currentValue² -objectÊ— -· - averageValue¦ -;#/definitions/io.k8s.apimachinery.pkg.api.resource.Quantity"gaverageValue is the current value of the average of the metric across all relevant pods (as a quantity) - - currentValue -;#/definitions/io.k8s.apimachinery.pkg.api.resource.Quantity"@currentValue is the current value of the metric (as a quantity). -L - -metricName>"1metricName is the name of the metric in question.² -string -÷ -selectorê -@#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector"¥selector is the string-encoded form of a standard kubernetes label selector for the given metric When set in the ObjectMetricSource, it is passed as an additional parameter to the metrics server for more specific metrics scoping. When unset, just the metricName will be used to gather metrics. -€ -targetv -H#/definitions/io.k8s.api.autoscaling.v2beta1.CrossVersionObjectReference"*target is the described Kubernetes object. -æ -8io.k8s.api.networking.v1.IngressClassParametersReference©"}IngressClassParametersReference identifies an API object. This can be used to specify a cluster or namespace-scoped resource.škindšname² -objectÊ -C -name;".Name is the name of resource being referenced.² -string - - namespace´"¦Namespace is the namespace of the resource being referenced. This field is required when scope is set to "Namespace" and must be unset when scope is set to "Cluster".² -string -Þ -scopeÔ"ÆScope represents if this refers to a cluster or namespace scoped resource. This may be set to "Cluster" (default) or "Namespace". Field can be enabled with IngressClassNamespacedParams feature gate.² -string -Ú -apiGroupÍ"¿APIGroup is the group for the resource being referenced. If APIGroup is not specified, the specified Kind must be in the core API group. For any other third-party types, APIGroup is required.² -string -C -kind;".Kind is the type of resource being referenced.² -string -¡ - io.k8s.api.core.v1.EnvFromSourceü":EnvFromSource represents the source of a set of ConfigMaps² -objectʱ -c - configMapRefS -3#/definitions/io.k8s.api.core.v1.ConfigMapEnvSource"The ConfigMap to select from -n -prefixd"WAn optional identifier to prepend to each key in the ConfigMap. Must be a C_IDENTIFIER.² -string -Z - secretRefM -0#/definitions/io.k8s.api.core.v1.SecretEnvSource"The Secret to select from -¨A -/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMetaô@"lObjectMeta is metadata that all persisted resources must have, which includes all objects users must create.² -objectÊ÷? -Þ - -finalizersÏ"ˆMust be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list.² -arrayº - ² -stringú' -x-kubernetes-patch-strategymerge - -‚ -labels÷"ÙMap of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labelsª - ² -string² -object -õ -nameì"ÞName must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names² -string -¢ - namespace”"†Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the "default" namespace, but "default" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty. - -Must be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces² -string -ø -ownerReferencesä"¼List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller.² -arrayºE -C -A#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.OwnerReferenceú' -x-kubernetes-patch-strategymerge -ú& -x-kubernetes-patch-merge-keyuid - -ê -selfLinkÝ"ÏSelfLink is a URL representing this object. Populated by the system. Read-only. - -DEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release.² -string - - clusterName"ÿThe name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request.² -string -¶ - generateName¥"—GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server. - -If this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header). - -Applied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency² -string -î -creationTimestampØ -7#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Time"œCreationTimestamp is a timestamp representing the server time when this object was created. It is not guaranteed to be set in happens-before order across separate operations. Clients may not set this value. It is represented in RFC3339 form and is in UTC. - -Populated by the system. Read-only. Null for lists. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata -ð -deletionGracePeriodSecondsÑint64"»Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only.² -integer -¤ -deletionTimestampŽ -7#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Time"Ò -DeletionTimestamp is RFC 3339 date and time at which this resource will be deleted. This field is set by the server when a graceful deletion is requested by the user, and is not directly settable by a client. The resource is expected to be deleted (no longer visible from resource lists, and not reachable by name) after the time in this field, once the finalizers list is empty. As long as the finalizers list contains items, deletion is blocked. Once the deletionTimestamp is set, this value may not be unset or be set further into the future, although it may be shortened or the resource may be deleted prior to this time. For example, a user may request that a pod is deleted in 30 seconds. The Kubelet will react by sending a graceful termination signal to the containers in the pod. After that 30 seconds, the Kubelet will send a hard termination signal (SIGKILL) to the container and after cleanup, remove the pod from the API. In the presence of network partitions, this object may still exist after this timestamp, until an administrator or automated process can determine the resource is fully terminated. If not set, graceful deletion of the object has not been requested. - -Populated by the system when a graceful deletion is requested. Read-only. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata -‘ -resourceVersioný"ïAn opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources. - -Populated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency² -string -³ -uid«"UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations. - -Populated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids² -string -À - annotations°"’Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotationsª - ² -string² -object -’ - -generationƒint64"nA sequence number representing a specific generation of the desired state. Populated by the system. Read-only.² -integer -ƒ - managedFieldsñ"˜ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like "ci-cd". The set of fields is always in the version that the workflow used when modifying the object.² -arrayºI -G -E#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ManagedFieldsEntry - -!io.k8s.api.apps.v1.DeploymentListç"(DeploymentList is a list of Deployments.šitems² -objectÊË -¾ - -apiVersion¯"¡APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources² -string -h -items_"!Items is the list of Deployments.² -arrayº/ -- -+#/definitions/io.k8s.api.apps.v1.Deployment -¹ -kind°"¢Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds² -string -b -metadataV -;#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta"Standard list metadata.úX -x-kubernetes-group-version-kind53- group: apps - kind: DeploymentList - version: v1 - -Í -3io.k8s.api.authorization.v1.SubjectAccessReviewSpec•"¢SubjectAccessReviewSpec is a description of the access request. Exactly one of ResourceAuthorizationAttributes and NonResourceAuthorizationAttributes must be set² -objectÊá -N -groupsD"(Groups is the groups you're testing for.² -arrayº - ² -string -ª -nonResourceAttributes -?#/definitions/io.k8s.api.authorization.v1.NonResourceAttributes"MNonResourceAttributes describes information for a non-resource access request -ª -resourceAttributes“ -<#/definitions/io.k8s.api.authorization.v1.ResourceAttributes"SResourceAuthorizationAttributes describes information for a resource access request -> -uid7"*UID information about the requesting user.² -string -« -user¢"”User is the user you're testing for. If you specify "User" but not "Groups", then is it interpreted as "What if User were not a member of any groups² -string -Æ -extra¼"Extra corresponds to the user.Info.GetExtra() method from the authenticator. Since that is input to the authorizer it needs a reflection here.ª -² -arrayº - ² -string² -object -È -&io.k8s.api.batch.v1beta1.CronJobStatus"9CronJobStatus represents the current state of a cron job.² -objectÊÓ -¡ -active–"-A list of pointers to currently running jobs.² -arrayº4 -2 -0#/definitions/io.k8s.api.core.v1.ObjectReferenceú# -x-kubernetes-list-type atomic - -– -lastScheduleTime -7#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Time"FInformation when was the last time the job was successfully scheduled. -“ -lastSuccessfulTime} -7#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Time"BInformation when was the last time the job successfully completed. -ž -8io.k8s.api.certificates.v1.CertificateSigningRequestSpecá"?CertificateSigningRequestSpec contains the certificate request.šrequestš -signerName² -objectÊú -„ -usagesù"¶usages specifies a set of key usages requested in the issued certificate. - -Requests for TLS client certificates typically request: "digital signature", "key encipherment", "client auth". - -Requests for TLS serving certificates typically request: "key encipherment", "digital signature", "server auth". - -Valid values are: - "signing", "digital signature", "content commitment", - "key encipherment", "key agreement", "data encipherment", - "cert sign", "crl sign", "encipher only", "decipher only", "any", - "server auth", "client auth", - "code signing", "email protection", "s/mime", - "ipsec end system", "ipsec tunnel", "ipsec user", - "timestamping", "ocsp signing", "microsoft sgc", "netscape sgc"² -arrayº - ² -stringú# -x-kubernetes-list-type atomic - -¤ -username—"‰username contains the name of the user that created the CertificateSigningRequest. Populated by the API server on creation and immutable.² -string -Å -extra»"Žextra contains extra attributes of the user that created the CertificateSigningRequest. Populated by the API server on creation and immutable.ª -² -arrayº - ² -string² -object -Ý -groupsÒ"groups contains group membership of the user that created the CertificateSigningRequest. Populated by the API server on creation and immutable.² -arrayº - ² -stringú# -x-kubernetes-list-type atomic - -ò -requestæbyte"¬request contains an x509 certificate signing request encoded in a "CERTIFICATE REQUEST" PEM block. When serialized as JSON or YAML, the data is additionally base64-encoded.² -stringú# -x-kubernetes-list-type atomic - - - -signerName"ósignerName indicates the requested signer, and is a qualified name. - -List/watch requests for CertificateSigningRequests can filter on this field using a "spec.signerName=NAME" fieldSelector. - -Well-known Kubernetes signers are: - 1. "kubernetes.io/kube-apiserver-client": issues client certificates that can be used to authenticate to kube-apiserver. - Requests for this signer are never auto-approved by kube-controller-manager, can be issued by the "csrsigning" controller in kube-controller-manager. - 2. "kubernetes.io/kube-apiserver-client-kubelet": issues client certificates that kubelets use to authenticate to kube-apiserver. - Requests for this signer can be auto-approved by the "csrapproving" controller in kube-controller-manager, and can be issued by the "csrsigning" controller in kube-controller-manager. - 3. "kubernetes.io/kubelet-serving" issues serving certificates that kubelets use to serve TLS endpoints, which kube-apiserver can connect to securely. - Requests for this signer are never auto-approved by kube-controller-manager, and can be issued by the "csrsigning" controller in kube-controller-manager. - -More details are available at https://k8s.io/docs/reference/access-authn-authz/certificate-signing-requests/#kubernetes-signers - -Custom signerNames can also be specified. The signer defines: - 1. Trust distribution: how trust (CA bundles) are distributed. - 2. Permitted subjects: and behavior when a disallowed subject is requested. - 3. Required, permitted, or forbidden x509 extensions in the request (including whether subjectAltNames are allowed, which types, restrictions on allowed values) and behavior when a disallowed extension is requested. - 4. Required, permitted, or forbidden key usages / extended key usages. - 5. Expiration/certificate lifetime: whether it is fixed by the signer, configurable by the admin. - 6. Whether or not requests for CA certificates are allowed.² -string -™ -uid‘"ƒuid contains the uid of the user that created the CertificateSigningRequest. Populated by the API server on creation and immutable.² -string -‡ -io.k8s.api.core.v1.Endpointsæ"ýEndpoints is a collection of endpoints that implement the actual service. Example: - Name: "mysvc", - Subsets: [ - { - Addresses: [{"ip": "10.10.1.1"}, {"ip": "10.10.2.2"}], - Ports: [{"name": "a", "port": 8675}, {"name": "b", "port": 309}] - }, - { - Addresses: [{"ip": "10.10.3.3"}], - Ports: [{"name": "a", "port": 93}, {"name": "b", "port": 76}] - }, - ]² -objectʃ -® -subsets¢"ßThe set of all endpoints is the union of all subsets. Addresses are placed into subsets according to the IPs they share. A single address with multiple ports, some of which are ready and some of which are not (because they come from different containers) will result in the address being displayed in different subsets for the different ports. No address will appear in both Addresses and NotReadyAddresses in the same subset. Sets of addresses and ports that comprise a service.² -arrayº3 -1 -/#/definitions/io.k8s.api.core.v1.EndpointSubset -¾ - -apiVersion¯"¡APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources² -string -¹ -kind°"¢Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds² -string -Ò -metadataÅ -=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta"ƒStandard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadataúQ -x-kubernetes-group-version-kind.,- kind: Endpoints - version: v1 - group: "" - -¶ -*io.k8s.api.storage.v1.VolumeAttachmentList‡ "AVolumeAttachmentList is a collection of VolumeAttachment objects.šitems² -objectÊ -v -itemsm"&Items is the list of VolumeAttachments² -arrayº8 -6 -4#/definitions/io.k8s.api.storage.v1.VolumeAttachment -¹ -kind°"¢Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds² -string -Ê -metadata½ -;#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta"~Standard list metadata More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata -¾ - -apiVersion¯"¡APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources² -stringúh -x-kubernetes-group-version-kindEC- group: storage.k8s.io - kind: VolumeAttachmentList - version: v1 - -ð -Qio.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1.CustomResourceValidationš"MCustomResourceValidation is a list of validation methods for CustomResources.² -objectʼ -¹ -openAPIV3Schema¥ -V#/definitions/io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1.JSONSchemaProps"KopenAPIV3Schema is the OpenAPI v3 schema to use for validation and pruning. -” -Aio.k8s.kube-aggregator.pkg.apis.apiregistration.v1.APIServiceSpecÎ"£APIServiceSpec contains information for locating and communicating with a server. Only https is supported, though you are able to disable certificate verification.šgroupPriorityMinimumšversionPriority² -objectÊð -Í -insecureSkipTLSVerify³"¤InsecureSkipTLSVerify disables TLS certificate verification when communicating with this server. This is strongly discouraged. You should use the CABundle instead.² -boolean -ô -serviceè -Q#/definitions/io.k8s.kube-aggregator.pkg.apis.apiregistration.v1.ServiceReference"’Service is a reference to the service for this API server. It must communicate on port 443. If the Service is nil, that means the handling for the API groupversion is handled locally on this server. The call will simply delegate to the normal handler chain to be fulfilled. -X -versionM"@Version is the API version this server hosts. For example, "v1"² -string -ä -versionPriorityÐint32"ºVersionPriority controls the ordering of this API version inside of its group. Must be greater than zero. The primary sort is based on VersionPriority, ordered highest to lowest (20 before 10). Since it's inside of a group, the number can be small, probably in the 10s. In case of equal version priorities, the version string will be used to compute the order inside a group. If the version string is "kube-like", it will sort above non "kube-like" version strings, which are ordered lexicographically. "Kube-like" versions start with a "v", then are followed by a number (the major version), then optionally the string "alpha" or "beta" and another number (the minor version). These are sorted first by GA > beta > alpha (where GA is a version with no suffix such as beta or alpha), and then by comparing major version, then minor version. An example sorted list of versions: v10, v2, v1, v11beta2, v10beta3, v3beta1, v12alpha1, v11alpha2, foo1, foo10.² -integer -ì -caBundleßbyte"¥CABundle is a PEM encoded CA bundle which will be used to validate an API server's serving certificate. If unspecified, system trust roots on the apiserver are used.² -stringú# -x-kubernetes-list-type atomic - -C -group:"-Group is the API group name this server hosts² -string -± -groupPriorityMinimum˜int32"‚GroupPriorityMininum is the priority this group should have at least. Higher priority means that the group is preferred by clients over lower priority ones. Note that other versions of this group might specify even higher GroupPriorityMininum values such that the whole group gets a higher priority. The primary sort is based on GroupPriorityMinimum, ordered highest number to lowest (20 before 10). The secondary sort is based on the alphabetical comparison of the name of the object. (v1.bar before v1.foo) We'd recommend something like: *.k8s.io (except extensions) at 18000 and PaaSes (OpenShift, Deis) are recommended to be in the 2000s² -integer -˜ -5io.k8s.api.authorization.v1.SubjectAccessReviewStatusÞ"SubjectAccessReviewStatusšallowed² -objectʪ -c -allowedX"JAllowed is required. True if the action would be allowed, false otherwise.² -boolean -‚ -denied÷"èDenied is optional. True if the action would be denied, otherwise false. If both allowed is false and denied is false, then the authorizer has no opinion on whether to authorize the action. Denied may not be true if Allowed is true.² -boolean -Þ -evaluationErrorÊ"¼EvaluationError is an indication that some error occurred during the authorization check. It is entirely possible to get an error and be able to continue determine authorization status in spite of it. For instance, RBAC can be missing a role, but enough roles are still present and bound to reason about the request.² -string -] -reasonS"FReason is optional. It indicates why a request was allowed or denied.² -string - -:io.k8s.api.autoscaling.v2beta1.CrossVersionObjectReferenceÂ"bCrossVersionObjectReference contains enough information to let you identify the referred resource.škindšname² -objectÊÁ -6 - -apiVersion("API version of the referent² -string -˜ -kind"Kind of the referent; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds"² -string -l -named"WName of the referent; More info: http://kubernetes.io/docs/user-guide/identifiers#names² -string -ð -4io.k8s.api.certificates.v1.CertificateSigningRequest·"¬CertificateSigningRequest objects provide a mechanism to obtain x509 certificates by submitting a certificate signing request, and having it asynchronously approved and issued. - -Kubelets use this API to obtain: - 1. client certificates to authenticate to kube-apiserver (with the "kubernetes.io/kube-apiserver-client-kubelet" signerName). - 2. serving certificates for TLS endpoints kube-apiserver can connect to securely (with the "kubernetes.io/kubelet-serving" signerName). - -This API can be used to request client certificates to authenticate to kube-apiserver (with the "kubernetes.io/kube-apiserver-client" signerName), or to obtain certificates from custom non-Kubernetes signers.šspec² -objectÊý -¾ - -apiVersion¯"¡APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources² -string -¹ -kind°"¢Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds² -string -K -metadata? -=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta -« -spec¢ -F#/definitions/io.k8s.api.certificates.v1.CertificateSigningRequestSpec"×spec contains the certificate request, and is immutable after creation. Only the request, signerName, and usages fields can be set on creation. Other fields are derived by Kubernetes and cannot be modified by users. -‚ -status÷ -H#/definitions/io.k8s.api.certificates.v1.CertificateSigningRequestStatus"ªstatus contains information about whether the request is approved or denied, and the certificate issued by the signer, or the failure condition indicating signer failure.úr -x-kubernetes-group-version-kindOM- kind: CertificateSigningRequest - version: v1 - group: certificates.k8s.io - -„ -:io.k8s.api.certificates.v1.CertificateSigningRequestStatusÅ"ŽCertificateSigningRequestStatus contains conditions used to indicate approved/denied/failed status of the request, and the issued certificate.² -objectÊ¥ -Š - certificateú -byte"À -certificate is populated with an issued certificate by the signer after an Approved condition is present. This field is set via the /status subresource. Once populated, this field is immutable. - -If the certificate signing request is denied, a condition of type "Denied" is added and this field remains empty. If the signer cannot issue the certificate, a condition of type "Failed" is added and this field remains empty. - -Validation requirements: - 1. certificate must contain one or more PEM blocks. - 2. All PEM blocks must have the "CERTIFICATE" label, contain no headers, and the encoded data - must be a BER-encoded ASN.1 Certificate structure as described in section 4 of RFC5280. - 3. Non-PEM content may appear before or after the "CERTIFICATE" PEM blocks and is unvalidated, - to allow for explanatory text as described in section 5.2 of RFC7468. - -If more than one PEM block is present, and the definition of the requested spec.signerName does not indicate otherwise, the first block is the issued certificate, and subsequent blocks should be treated as intermediate certificates and presented in TLS handshakes. - -The certificate is encoded in PEM format. - -When serialized as JSON or YAML, the data is additionally base64-encoded, so it consists of: - - base64( - -----BEGIN CERTIFICATE----- - ... - -----END CERTIFICATE----- - )² -stringú# -x-kubernetes-list-type atomic - -• - -conditions†"[conditions applied to the request. Known conditions are "Approved", "Denied", and "Failed".² -arrayºO -M -K#/definitions/io.k8s.api.certificates.v1.CertificateSigningRequestConditionú' -x-kubernetes-list-map-keys - type -ú -x-kubernetes-list-typemap - -ä -'io.k8s.api.networking.v1.IngressBackend¸"DIngressBackend describes all endpoints for a given service and port.² -objectÊã -± -resource¤ -:#/definitions/io.k8s.api.core.v1.TypedLocalObjectReference"åResource is an ObjectRef to another Kubernetes resource in the namespace of the Ingress object. If resource is specified, a service.Name and service.Port must not be specified. This is a mutually exclusive setting with "Service". -¬ -service  -<#/definitions/io.k8s.api.networking.v1.IngressServiceBackend"`Service references a Service as a Backend. This is a mutually exclusive setting with "Resource". -é -/io.k8s.api.authentication.v1.TokenRequestStatusµ"4TokenRequestStatus is the result of a token request.štokenšexpirationTimestamp² -objectÊÒ -– -expirationTimestamp -7#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Time"DExpirationTimestamp is the time of expiration of the returned token. -7 -token."!Token is the opaque bearer token.² -string -› -#/definitions/io.k8s.api.autoscaling.v2beta2.MetricValueStatus"7current contains the current value for the given metric -B -name:"-Name is the name of the resource in question.² -string -å -(io.k8s.api.batch.v1beta1.JobTemplateSpec¸"QJobTemplateSpec describes the data a Job should have when created from a template² -objectÊÖ -ù -metadataì -=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta"ªStandard object's metadata of the jobs created from this template. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata -× -specÎ -)#/definitions/io.k8s.api.batch.v1.JobSpec" Specification of the desired behavior of the job. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status -  -%io.k8s.api.core.v1.ComponentConditionö"/Information about the condition of a component.štypešstatus² -objectʦ -s -messageh"[Message about the condition for a component. For example, information about a health check.² -string -z -statusp"cStatus of the condition for a component. Valid values for "Healthy": "True", "False", or "Unknown".² -string -N -typeF"9Type of condition for a component. Valid value: "Healthy"² -string -c -errorZ"MCondition error code for a component. For example, a health check error code.² -string -ù -,io.k8s.api.policy.v1.PodDisruptionBudgetSpecÈ"BPodDisruptionBudgetSpec is a description of a PodDisruptionBudget.² -objectÊõ -ð -maxUnavailableÝ -=#/definitions/io.k8s.apimachinery.pkg.util.intstr.IntOrString"›An eviction is allowed if at most "maxUnavailable" pods selected by "selector" are unavailable after the eviction, i.e. even in absence of the evicted pod. For example, one can prevent all voluntary evictions by specifying 0. This is a mutually exclusive setting with "minAvailable". -Ç - minAvailable¶ -=#/definitions/io.k8s.apimachinery.pkg.util.intstr.IntOrString"ôAn eviction is allowed if at least "minAvailable" pods selected by "selector" will still be available after the eviction, i.e. even in the absence of the evicted pod. So for example you can prevent all voluntary evictions by specifying "100%". -µ -selector¨ -@#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector"·Label query over pods whose evictions are managed by the disruption budget. A null selector will match no pods, while an empty ({}) selector will select all pods within the namespace.ú) -x-kubernetes-patch-strategy -replace - -ƒ -Jio.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1.WebhookConversion´", a generated name, or a reverse-domain name which ends with the unique CSI driver name. - -Objects are namespaced. - -More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata -Ç - nodeTopology¶ -@#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector"ñNodeTopology defines which nodes have access to the storage for which capacity was reported. If not set, the storage is not accessible from any node in the cluster. If empty, the storage is accessible from all nodes. This field is immutable. -Ô -storageClassName¿"±The name of the StorageClass that the reported capacity applies to. It must meet the same requirements as the name of a StorageClass object (non-empty, DNS subdomain). If that object no longer exists, the CSIStorageCapacity object is obsolete and should be removed by its creator. This field is immutable.² -string -¾ - -apiVersion¯"¡APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources² -string -Ò -capacityÅ -;#/definitions/io.k8s.apimachinery.pkg.api.resource.Quantity"…Capacity is the value reported by the CSI driver in its GetCapacityResponse for a GetCapacityRequest with topology and parameters that match the previous fields. - -The semantic is currently (CSI spec 1.2) defined as: The available capacity, in bytes, of the storage that can be used to provision volumes. If not set, that information is currently unavailable and treated like zero capacity. -¹ -kind°"¢Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds² -string -¶ -maximumVolumeSize  -;#/definitions/io.k8s.apimachinery.pkg.api.resource.Quantity"àMaximumVolumeSize is the value reported by the CSI driver in its GetCapacityResponse for a GetCapacityRequest with topology and parameters that match the previous fields. - -This is defined since CSI spec 1.4.0 as the largest size that may be used in a CreateVolumeRequest.capacity_range.required_bytes field to create a volume with the same parameters as those in GetCapacityRequest. The corresponding value in the Kubernetes API is ResourceRequirements.Requests in a volume claim.úk -x-kubernetes-group-version-kindHF- group: storage.k8s.io - kind: CSIStorageCapacity - version: v1beta1 - -è -Iio.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1.ServiceReferenceš";ServiceReference holds a reference to Service.legacy.k8s.ioš namespacešname² -objectÊ» -> -name6")name is the name of the service. Required² -string -M - namespace@"3namespace is the namespace of the service. Required² -string -Y -pathQ"Dpath is an optional URL path at which the webhook will be contacted.² -string -Î -portÅint32"¯port is an optional service port at which the webhook will be contacted. `port` should be a valid port number (1-65535, inclusive). Defaults to 443 for backward compatibility.² -integer -³ -]io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1beta1.CustomResourceDefinitionVersionÑ"//...` if `served` is true.² -string -ƒ -schemaø -d#/definitions/io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1beta1.CustomResourceValidation"schema describes the schema used for validation and pruning of this version of the custom resource. Top-level and per-version schemas are mutually exclusive. Per-version schemas must not all be set to identical values (top-level validation schema should be used instead). -h -served^"Pserved is a flag enabling/disabling this version from being served via REST APIs² -boolean -ª -storagež"storage indicates this version should be used when persisting custom resources to storage. There must be exactly one version with storage=true.² -boolean -… - subresourcesô -f#/definitions/io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1beta1.CustomResourceSubresources"‰subresources specify what subresources this version of the defined custom resource have. Top-level and per-version subresources are mutually exclusive. Per-version subresources must not all be set to identical values (top-level subresources should be used instead). -ñ -additionalPrinterColumnsÔ"ÖadditionalPrinterColumns specifies additional columns returned in Table output. See https://kubernetes.io/docs/reference/using-api/api-concepts/#receiving-resources-as-tables for details. Top-level and per-version columns are mutually exclusive. Per-version columns must not all be set to identical values (top-level columns should be used instead). If no top-level or per-version columns are specified, a single column displaying the age of the custom resource is used.² -arrayºn -l -j#/definitions/io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1beta1.CustomResourceColumnDefinition - - -4io.k8s.api.authorization.v1beta1.SubjectAccessReviewä "PSubjectAccessReview checks whether or not a user or group can perform an action.šspec² -objectʇ -K -metadata? -=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta -‹ -spec‚ -F#/definitions/io.k8s.api.authorization.v1beta1.SubjectAccessReviewSpec"8Spec holds information about the request being evaluated -¬ -status¡ -H#/definitions/io.k8s.api.authorization.v1beta1.SubjectAccessReviewStatus"UStatus is filled in by the server and indicates whether the request is allowed or not -¾ - -apiVersion¯"¡APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources² -string -¹ -kind°"¢Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds² -stringúr -x-kubernetes-group-version-kindOM- group: authorization.k8s.io - kind: SubjectAccessReview - version: v1beta1 - -¹ -%io.k8s.api.autoscaling.v1.ScaleStatus"AScaleStatus represents the current status of a scale subresource.šreplicas² -objectʲ -Z -replicasNint32"9actual number of observed instances of the scaled object.² -integer -Ó -selectorÆ"¸label query over pods that should match the replicas count. This is same as the label selector but in the string format to avoid introspection by clients. The string will be in the same format as the query-param syntax. More info about label selectors: http://kubernetes.io/docs/user-guide/labels#label-selectors² -string -Ú - -io.k8s.api.core.v1.Toleration¸ -"The pod this Toleration is attached to tolerates any taint that matches the triple using the matching operator .² -objectÊ™ -¸ -effect­"ŸEffect indicates the taint effect to match. Empty means match all taint effects. When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute.² -string -Ó -keyË"½Key is the taint key that the toleration applies to. Empty means match all taint keys. If the key is empty, operator must be Exists; this combination means to match all values and all keys.² -string -ö -operatoré"ÛOperator represents a key's relationship to the value. Valid operators are Exists and Equal. Defaults to Equal. Exists is equivalent to wildcard for value, so that a pod can tolerate all taints of a particular category.² -string -ç -tolerationSecondsÑint64"»TolerationSeconds represents the period of time the toleration (which must be of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, it is not set, which means tolerate the taint forever (do not evict). Zero and negative values will be treated as 0 (evict immediately) by the system.² -integer -¢ -value˜"ŠValue is the taint value the toleration matches to. If the operator is Exists, the value should be empty, otherwise just a regular string.² -string -« -Vio.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1beta1.CustomResourceDefinitionÐ "…CustomResourceDefinition represents a resource that should be exposed on the API server. Its name MUST be in the format <.spec.name>.<.spec.group>. Deprecated in v1.16, planned for removal in v1.22. Use apiextensions.k8s.io/v1 CustomResourceDefinition instead.šspec² -objectʸ -¾ - -apiVersion¯"¡APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources² -string -¹ -kind°"¢Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds² -string -K -metadata? -=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta -® -spec¥ -h#/definitions/io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1beta1.CustomResourceDefinitionSpec"9spec describes how the user wants the resources to appear -º -status¯ -j#/definitions/io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1beta1.CustomResourceDefinitionStatus"Astatus indicates the actual state of the CustomResourceDefinitionúw -x-kubernetes-group-version-kindTR- group: apiextensions.k8s.io - kind: CustomResourceDefinition - version: v1beta1 - -… -$io.k8s.api.coordination.v1.LeaseListÜ"%LeaseList is a list of Lease objects.šitems² -objectʹ -¾ - -apiVersion¯"¡APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources² -string -l -itemsc""Items is a list of schema objects.² -arrayº2 -0 -.#/definitions/io.k8s.api.coordination.v1.Lease -¹ -kind°"¢Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds² -string -Ë -metadata¾ -;#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta"Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadataúb -x-kubernetes-group-version-kind?=- kind: LeaseList - version: v1 - group: coordination.k8s.io - -¢9 -io.k8s.api.core.v1.Volume„9"[Volume represents a named volume in a pod that may be accessed by any container in the pod.šname² -objectÊ‘8 -Œ -cephfs -3#/definitions/io.k8s.api.core.v1.CephFSVolumeSource"JCephFS represents a Ceph FS mount on the host that shares a pod's lifetime -ž - downwardAPIŽ -8#/definitions/io.k8s.api.core.v1.DownwardAPIVolumeSource"RDownwardAPI represents downward API about the pod that should populate this volume -Ö -emptyDirÉ -5#/definitions/io.k8s.api.core.v1.EmptyDirVolumeSource"EmptyDir represents a temporary directory that shares a pod's lifetime. More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir -™ - storageos‹ -6#/definitions/io.k8s.api.core.v1.StorageOSVolumeSource"QStorageOS represents a StorageOS volume attached and mounted on Kubernetes nodes. -± -awsElasticBlockStore˜ -A#/definitions/io.k8s.api.core.v1.AWSElasticBlockStoreVolumeSource"ÒAWSElasticBlockStore represents an AWS Disk resource that is attached to a kubelet's host machine and then exposed to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore -» - ephemeral­ -6#/definitions/io.k8s.api.core.v1.EphemeralVolumeSource"òEphemeral represents a volume that is handled by a cluster storage driver. The volume's lifecycle is tied to the pod that defines it - it will be created before the pod starts, and deleted when the pod is removed. - -Use this if: a) the volume is only needed while the pod runs, b) features of normal volumes like restoring from snapshot or capacity - tracking are needed, -c) the storage driver is specified through a storage class, and d) the storage driver supports dynamic volume provisioning through - a PersistentVolumeClaim (see EphemeralVolumeSource for more - information on the connection between this volume type - and PersistentVolumeClaim). - -Use PersistentVolumeClaim or one of the vendor-specific APIs for volumes that persist for longer than the lifecycle of an individual pod. - -Use CSI for light-weight local ephemeral volumes if the CSI driver is meant to be used that way - see the documentation of the driver for more information. - -A pod can use both types of ephemeral volumes and persistent volumes at the same time. - -This is a beta feature and only available when the GenericEphemeralVolume feature gate is enabled. -¬ -persistentVolumeClaim’ -B#/definitions/io.k8s.api.core.v1.PersistentVolumeClaimVolumeSource"ËPersistentVolumeClaimVolumeSource represents a reference to a PersistentVolumeClaim in the same namespace. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims - -quobyteƒ -4#/definitions/io.k8s.api.core.v1.QuobyteVolumeSource"KQuobyte represents a Quobyte mount on the host that shares a pod's lifetime -œ -scaleIO -4#/definitions/io.k8s.api.core.v1.ScaleIOVolumeSource"XScaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes. -Ÿ - azureFile‘ -6#/definitions/io.k8s.api.core.v1.AzureFileVolumeSource"WAzureFile represents an Azure File Service mount on the host and bind mount to the pod. -¤ -gcePersistentDiskŽ ->#/definitions/io.k8s.api.core.v1.GCEPersistentDiskVolumeSource"ËGCEPersistentDisk represents a GCE Disk resource that is attached to a kubelet's host machine and then exposed to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk -ˆ - configMap{ -6#/definitions/io.k8s.api.core.v1.ConfigMapVolumeSource"AConfigMap represents a configMap that should populate this volume -É -gitRepo½ -4#/definitions/io.k8s.api.core.v1.GitRepoVolumeSource"„GitRepo represents a git repository at a particular revision. DEPRECATED: GitRepo is deprecated. To provision a container with a git repo, mount an EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir into the Pod's container. -í -iscsiã -2#/definitions/io.k8s.api.core.v1.ISCSIVolumeSource"¬ISCSI represents an ISCSI Disk resource that is attached to a kubelet's host machine and then exposed to the pod. More info: https://examples.k8s.io/volumes/iscsi/README.md -Î -photonPersistentDiskµ -A#/definitions/io.k8s.api.core.v1.PhotonPersistentDiskVolumeSource"pPhotonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine -Ð -cinderÅ -3#/definitions/io.k8s.api.core.v1.CinderVolumeSource"Cinder represents a cinder volume attached and mounted on kubelets host machine. More info: https://examples.k8s.io/mysql-cinder-pd/README.md -ª -fc£ -/#/definitions/io.k8s.api.core.v1.FCVolumeSource"pFC represents a Fibre Channel resource that is attached to a kubelet's host machine and then exposed to the pod. -¬ - -flexVolume -1#/definitions/io.k8s.api.core.v1.FlexVolumeSource"hFlexVolume represents a generic volume resource that is provisioned/attached using an exec based plugin. -” -hostPath‡ -5#/definitions/io.k8s.api.core.v1.HostPathVolumeSource"ÍHostPath represents a pre-existing file or directory on the host machine that is directly exposed to the container. This is generally used for system agents or other privileged things that are allowed to see the host machine. Most containers will NOT need this. More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath -¥ -portworxVolume’ -5#/definitions/io.k8s.api.core.v1.PortworxVolumeSource"YPortworxVolume represents a portworx volume attached and mounted on kubelets host machine -‹ - projected~ -6#/definitions/io.k8s.api.core.v1.ProjectedVolumeSource"DItems for all in one resources secrets, configmaps, and downward API -œ - azureDiskŽ -6#/definitions/io.k8s.api.core.v1.AzureDiskVolumeSource"TAzureDisk represents an Azure Data Disk mount on the host and bind mount to the pod. -º -csi² -0#/definitions/io.k8s.api.core.v1.CSIVolumeSource"~CSI (Container Storage Interface) represents ephemeral storage that is handled by certain external CSI drivers (Beta feature). -È -flocker¼ -4#/definitions/io.k8s.api.core.v1.FlockerVolumeSource"ƒFlocker represents a Flocker volume attached to a kubelet's host machine. This depends on the Flocker control service being running -Ø - glusterfsÊ -6#/definitions/io.k8s.api.core.v1.GlusterfsVolumeSource"Glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime. More info: https://examples.k8s.io/volumes/glusterfs/README.md -® -name¥"—Volume's name. Must be a DNS_LABEL and unique within the pod. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names² -string -Ä -nfs¼ -0#/definitions/io.k8s.api.core.v1.NFSVolumeSource"‡NFS represents an NFS mount on the host that shares a pod's lifetime More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs -É -rbdÁ -0#/definitions/io.k8s.api.core.v1.RBDVolumeSource"ŒRBD represents a Rados Block Device mount on the host that shares a pod's lifetime. More info: https://examples.k8s.io/volumes/rbd/README.md -Å -secretº -3#/definitions/io.k8s.api.core.v1.SecretVolumeSource"‚Secret represents a secret that should populate this volume. More info: https://kubernetes.io/docs/concepts/storage/volumes#secret -¬ - vsphereVolumeš -?#/definitions/io.k8s.api.core.v1.VsphereVirtualDiskVolumeSource"WVsphereVolume represents a vSphere volume attached and mounted on kubelets host machine -´ -io.k8s.api.rbac.v1beta1.Subject"ÆSubject contains a reference to the object or user identities a role binding applies to. This can either hold a direct API object reference, or a value for non-objects such as user and group names.škindšname² -objectʪ -à -apiGroup¶"¨APIGroup holds the API group of the referenced subject. Defaults to "" for ServiceAccount subjects. Defaults to "rbac.authorization.k8s.io" for User and Group subjects.² -string -à -kind×"ÉKind of object being referenced. Values defined by this API group are "User", "Group", and "ServiceAccount". If the Authorizer does not recognized the kind value, the Authorizer should report an error.² -string -9 -name1"$Name of the object being referenced.² -string -à - namespaceµ"§Namespace of the referenced object. If the object kind is non-namespace, such as "User" or "Group", and this value is not empty the Authorizer should report an error.² -string -À -!io.k8s.api.core.v1.ContainerImageš"Describe a container imagešnames² -objectÊç -¡ -names—"{Names by which this image is known. e.g. ["k8s.gcr.io/hyperkube:v1.0.7", "dockerhub.io/google_containers/hyperkube:v1.0.7"]² -arrayº - ² -string -A - sizeBytes4int64"The size of the image in bytes.² -integer -Ö -(io.k8s.api.core.v1.DownwardAPIVolumeFile© "XDownwardAPIVolumeFile represents information to create the file containing the pod fieldšpath² -objectʹ -» -mode²int32"œOptional: mode bits used to set permissions on this file, must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. If not specified, the volume defaultMode will be used. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.² -integer -Þ -pathÕ"ÇRequired: Path is the relative path name of the file to be created. Must not be absolute or contain the '..' path. Must be utf-8 encoded. The first item of the relative path must not start with '..'² -string -î -resourceFieldRefÙ -6#/definitions/io.k8s.api.core.v1.ResourceFieldSelector"žSelects a resource of the container: only resources limits and requests (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported. -¦ -fieldRef™ -4#/definitions/io.k8s.api.core.v1.ObjectFieldSelector"aRequired: Selects a field of the pod: only annotations, labels, name and namespace are supported. -™ -io.k8s.api.core.v1.EnvVarû"AEnvVar represents an environment variable present in a Container.šname² -objectÊ¢ -N -nameF"9Name of the environment variable. Must be a C_IDENTIFIER.² -string -» -value±"£Variable references $(VAR_NAME) are expanded using the previous defined environment variables in the container and any service environment variables. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. Defaults to "".² -string -‘ - valueFromƒ --#/definitions/io.k8s.api.core.v1.EnvVarSource"RSource for the environment variable's value. Cannot be used if value is not empty. -î -"io.k8s.api.rbac.v1beta1.PolicyRuleÇ "¡PolicyRule holds information that describes a policy rule, but does not contain information about who the rule applies to or which namespace the rule applies to.šverbs² -objectÊŒ - -¼ -nonResourceURLs¨"‹NonResourceURLs is a set of partial urls that a user should have access to. *s are allowed, but only as the full, final step in the path Since non-resource URLs are not namespaced, this field is only applicable for ClusterRoles referenced from a ClusterRoleBinding. Rules can either apply to API resources (such as "pods" or "secrets") or non-resource URL paths (such as "/api"), but not both.² -arrayº - ² -string -¨ - resourceNames–"zResourceNames is an optional white list of names that the rule applies to. An empty set means that everything is allowed.² -arrayº - ² -string -ò - resourcesä"ÇResources is a list of resources this rule applies to. '*' represents all resources in the specified apiGroups. '*/foo' represents the subresource 'foo' for all resources in the specified apiGroups.² -arrayº - ² -string -´ -verbsª"Verbs is a list of Verbs that apply to ALL the ResourceKinds and AttributeRestrictions contained in this rule. VerbAll represents all kinds.² -arrayº - ² -string -ó - apiGroupså"ÈAPIGroups is the name of the APIGroup that contains the resources. If multiple API groups are specified, any action requested against one of the enumerated resources in any API group will be allowed.² -arrayº - ² -string -à -Nio.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1.JSONSchemaPropsOrBoolq"oJSONSchemaPropsOrBool represents JSONSchemaProps or a boolean value. Defaults to true for the boolean property. -‰ -0io.k8s.api.authorization.v1beta1.NonResourceRuleÔ"LNonResourceRule holds information that describes a rule for the non-resourcešverbs² -objectÊï -Ë -nonResourceURLs·"šNonResourceURLs is a set of partial urls that a user should have access to. *s are allowed, but only as the full, final step in the path. "*" means all.² -arrayº - ² -string -ž -verbs”"xVerb is a list of kubernetes non-resource API verbs, like: get, post, put, delete, patch, head, options. "*" means all.² -arrayº - ² -string -… -9io.k8s.api.authorization.v1beta1.SubjectRulesReviewStatusÇ "çSubjectRulesReviewStatus contains the result of a rules check. This check can be incomplete depending on the set of authorizers the server is configured with and any errors experienced during evaluation. Because authorization rules are additive, if a rule appears in a list it's safe to assume the subject has that permission, even if that list is incomplete.š resourceRulesšnonResourceRulesš -incomplete² -objectÊž -Ý - -incompleteÎ"¿Incomplete is true when the rules returned by this call are incomplete. This is most commonly encountered when an authorizer, such as an external authorizer, doesn't support rules evaluation.² -boolean -› -nonResourceRules†"´NonResourceRules is the list of actions the subject is allowed to perform on non-resources. The list ordering isn't significant, may contain duplicates, and possibly be incomplete.² -arrayºB -@ ->#/definitions/io.k8s.api.authorization.v1beta1.NonResourceRule -Ž - resourceRulesü"­ResourceRules is the list of actions the subject is allowed to perform on resources. The list ordering isn't significant, may contain duplicates, and possibly be incomplete.² -arrayº? -= -;#/definitions/io.k8s.api.authorization.v1beta1.ResourceRule -Œ -evaluationErrorø"êEvaluationError can appear in combination with Rules. It indicates an error occurred during rule evaluation, such as an authorizer that doesn't support rule evaluation, and that ResourceRules and/or NonResourceRules may be incomplete.² -string -= -io.k8s.api.core.v1.Containerî<"AA single application container that you want to run within a pod.šname² -objectÊ•< -‚ - livenessProbeð -&#/definitions/io.k8s.api.core.v1.Probe"ÅPeriodic probe of container liveness. Container will be restarted if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes -” -portsŠ"ŠList of ports to expose from the container. Exposing a port here gives the system additional information about the network connections a container uses, but is primarily informational. Not specifying a port here DOES NOT prevent that port from being exposed. Any port which is listening on the default "0.0.0.0" address inside a container will be accessible from the network. Cannot be updated.² -arrayº2 -0 -.#/definitions/io.k8s.api.core.v1.ContainerPortú; -x-kubernetes-list-map-keys- containerPort -- protocol -ú -x-kubernetes-list-typemap -ú0 -x-kubernetes-patch-merge-keycontainerPort -ú' -x-kubernetes-patch-strategymerge - -¡ -readinessProbeŽ -&#/definitions/io.k8s.api.core.v1.Probe"ãPeriodic probe of container service readiness. Container will be removed from service endpoints if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes - -securityContextü -0#/definitions/io.k8s.api.core.v1.SecurityContext"ÇSecurity options the pod should run with. More info: https://kubernetes.io/docs/concepts/policy/security-context/ More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ -ó - volumeDevicesá"GvolumeDevices is the list of block devices to be used by the container.² -arrayº1 -/ --#/definitions/io.k8s.api.core.v1.VolumeDeviceú- -x-kubernetes-patch-merge-key  devicePath -ú' -x-kubernetes-patch-strategymerge - -Ö -envFromÊ"ˆList of sources to populate environment variables in the container. The keys defined within a source must be a C_IDENTIFIER. All invalid keys will be reported as an event when the container is starting. When a key exists in multiple sources, the value associated with the last source will take precedence. Values defined by an Env with a duplicate key will take precedence. Cannot be updated.² -arrayº2 -0 -.#/definitions/io.k8s.api.core.v1.EnvFromSource - -imageƒ"õDocker image name. More info: https://kubernetes.io/docs/concepts/containers/images This field is optional to allow higher level config management to default or override container images in workload controllers like Deployments and StatefulSets.² -string -¨ - lifecycleš -*#/definitions/io.k8s.api.core.v1.Lifecycle"lActions that the management system should take in response to container lifecycle events. Cannot be updated. -• -nameŒ"Name of the container specified as a DNS_LABEL. Each container in a pod must have a unique name (DNS_LABEL). Cannot be updated.² -string -Ï -stdinÅ"¶Whether this container should allocate a buffer for stdin in the container runtime. If this is not set, reads from stdin in the container will always result in EOF. Default is false.² -boolean -À - -workingDir±"£Container's working directory. If not specified, the container runtime's default will be used, which might be configured in the container image. Cannot be updated.² -string -û -commandï"ÒEntrypoint array. Not executed within a shell. The docker image's ENTRYPOINT is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell² -arrayº - ² -string -‡ -imagePullPolicyó"åImage pull policy. One of Always, Never, IfNotPresent. Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. Cannot be updated. More info: https://kubernetes.io/docs/concepts/containers/images#updating-images² -string -× - stdinOnceÉ"ºWhether the container runtime should close the stdin channel after it has been opened by a single attach. When stdin is true the stdin stream will remain open across multiple attach sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the first client attaches to stdin, and then remains open and accepts data until the client disconnects, at which time stdin is closed and remains closed until the container is restarted. If this flag is false, a container processes that reads from stdin will never receive an EOF. Default is false² -boolean - -ttyz"lWhether this container should allocate a TTY for itself, also requires 'stdin' to be true. Default is false.² -boolean -ñ - volumeMountsà"HPod volumes to mount into the container's filesystem. Cannot be updated.² -arrayº0 -. -,#/definitions/io.k8s.api.core.v1.VolumeMountú, -x-kubernetes-patch-merge-key  -mountPath -ú' -x-kubernetes-patch-strategymerge - -ß -argsÖ"¹Arguments to the entrypoint. The docker image's CMD is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell² -arrayº - ² -string -â - resourcesÔ -5#/definitions/io.k8s.api.core.v1.ResourceRequirements"šCompute Resources required by this container. Cannot be updated. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ -Å -terminationMessagePathª"œOptional: Path at which the file to which the container's termination message will be written is mounted into the container's filesystem. Message written is intended to be brief final status, such as an assertion failure message. Will be truncated by the node if greater than 4096 bytes. The total message length across all containers will be limited to 12kb. Defaults to /dev/termination-log. Cannot be updated.² -string -æ -terminationMessagePolicyÉ"»Indicate how the termination message should be populated. File will use the contents of terminationMessagePath to populate the container status message on both success and failure. FallbackToLogsOnError will use the last chunk of container log output if the termination message file is empty and the container exited with an error. The log output is limited to 2048 bytes or 80 lines, whichever is smaller. Defaults to File. Cannot be updated.² -string -ß -env×"IList of environment variables to set in the container. Cannot be updated.² -arrayº+ -) -'#/definitions/io.k8s.api.core.v1.EnvVarú' -x-kubernetes-patch-strategymerge -ú' -x-kubernetes-patch-merge-keyname - -Ô - startupProbeà -&#/definitions/io.k8s.api.core.v1.Probe"˜StartupProbe indicates that the Pod has successfully initialized. If specified, no other probes are executed until this completes successfully. If this probe fails, the Pod will be restarted, just as if the livenessProbe failed. This can be used to provide different probe parameters at the beginning of a Pod's lifecycle, when it might take a long time to load data or warm a cache, than during steady-state operation. This cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes - - -\io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1beta1.CustomResourceColumnDefinition¼ "KCustomResourceColumnDefinition specifies a column for server side printing.šnameštypešJSONPath² -objectÊÇ - -priority€int32"êpriority is an integer defining the relative importance of this column compared to others. Lower numbers are considered higher priority. Columns that may be omitted in limited space scenarios should be given a priority greater than 0.² -integer -¯ -type¦"˜type is an OpenAPI type definition for this column. See https://github.com/OAI/OpenAPI-Specification/blob/master/versions/2.0.md#data-types for details.² -string -ª -JSONPath"JSONPath is a simple JSON path (i.e. with array notation) which is evaluated against each custom resource to produce the value for this column.² -string -W - descriptionH";description is a human readable description of this column.² -string -¸ -format­"Ÿformat is an optional OpenAPI type definition for this column. The 'name' format is applied to the primary identifier column to assist in clients identifying column is the resource name. See https://github.com/OAI/OpenAPI-Specification/blob/master/versions/2.0.md#data-types for details.² -string -B -name:"-name is a human readable name for the column.² -string -ˆ - -/io.k8s.api.authorization.v1.SubjectAccessReviewÔ "PSubjectAccessReview checks whether or not a user or group can perform an action.šspec² -objectÊü -§ -statusœ -C#/definitions/io.k8s.api.authorization.v1.SubjectAccessReviewStatus"UStatus is filled in by the server and indicates whether the request is allowed or not -¾ - -apiVersion¯"¡APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources² -string -¹ -kind°"¢Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds² -string -K -metadata? -=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta -… -spec} -A#/definitions/io.k8s.api.authorization.v1.SubjectAccessReviewSpec"8Spec holds information about the request being evaluatedúm -x-kubernetes-group-version-kindJH- group: authorization.k8s.io - kind: SubjectAccessReview - version: v1 - -ç -1io.k8s.api.autoscaling.v2beta2.ObjectMetricStatus±"ŽObjectMetricStatus indicates the current value of a metric describing a kubernetes object (for example, hits-per-second on an Ingress object).šmetricšcurrentšdescribedObject² -objectÊì -„ -currenty ->#/definitions/io.k8s.api.autoscaling.v2beta2.MetricValueStatus"7current contains the current value for the given metric -] -describedObjectJ -H#/definitions/io.k8s.api.autoscaling.v2beta2.CrossVersionObjectReference -ƒ -metricy -=#/definitions/io.k8s.api.autoscaling.v2beta2.MetricIdentifier"8metric identifies the target metric by name and selector -Þ - io.k8s.api.events.v1.EventSeries¹"èEventSeries contain information on series of events, i.e. thing that was/is happening continuously for some time. How often to update the EventSeries is up to the event reporters. The default event reporter in "k8s.io/client-go/tools/events/event_broadcaster.go" shows how this struct is updated on heartbeats and can guide customized reporter implementations.šcountšlastObservedTime² -objectʤ -n -counteint32"Pcount is the number of occurrences in this series up to the last heartbeat time.² -integer -± -lastObservedTimeœ -<#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.MicroTime"\lastObservedTime is the time when last Event from the series was seen before last heartbeat. -Ž - -*io.k8s.api.networking.v1.NetworkPolicyPeerß "lNetworkPolicyPeer describes a peer to allow traffic to/from. Only certain combinations of fields are allowed² -objectÊâ -Ô - podSelectorÄ -@#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector"ÿThis is a label selector which selects Pods. This field follows standard label selector semantics; if present but empty, it selects all pods. - -If NamespaceSelector is also set, then the NetworkPolicyPeer as a whole selects the Pods matching PodSelector in the Namespaces selected by NamespaceSelector. Otherwise it selects the Pods matching PodSelector in the policy's own Namespace. -« -ipBlockŸ -.#/definitions/io.k8s.api.networking.v1.IPBlock"mIPBlock defines policy on a particular IPBlock. If this field is set then neither of the other fields can be. -Ú -namespaceSelectorÄ -@#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector"ÿSelects Namespaces using cluster-scoped labels. This field follows standard label selector semantics; if present but empty, it selects all namespaces. - -If PodSelector is also set, then the NetworkPolicyPeer as a whole selects the Pods matching PodSelector in the Namespaces selected by NamespaceSelector. Otherwise it selects all Pods in the Namespaces selected by NamespaceSelector. -Û -*io.k8s.api.networking.v1beta1.IngressClass¬ "óIngressClass represents the class of the Ingress, referenced by the Ingress Spec. The `ingressclass.kubernetes.io/is-default-class` annotation can be used to indicate that an IngressClass should be considered default. When a single IngressClass resource has this annotation set to true, new Ingress resources without a class specified will be assigned this default class.² -objectʼ -ç -specÞ -<#/definitions/io.k8s.api.networking.v1beta1.IngressClassSpec"Spec is the desired state of the IngressClass. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status -¾ - -apiVersion¯"¡APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources² -string -¹ -kind°"¢Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds² -string -Ò -metadataÅ -=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta"ƒStandard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadataúh -x-kubernetes-group-version-kindEC- kind: IngressClass - version: v1beta1 - group: networking.k8s.io - -Ü -"io.k8s.api.apps.v1.StatefulSetListµ"0StatefulSetList is a collection of StatefulSets.šitems² -objectÊ -¾ - -apiVersion¯"¡APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources² -string -F -items=² -arrayº0 -. -,#/definitions/io.k8s.api.apps.v1.StatefulSet -¹ -kind°"¢Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds² -string -I -metadata= -;#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ListMetaúY -x-kubernetes-group-version-kind64- group: apps - kind: StatefulSetList - version: v1 - -Ü -3io.k8s.api.authorization.v1.SelfSubjectAccessReview¤ "öSelfSubjectAccessReview checks whether or the current user can perform an action. Not filling in a spec.namespace means "in all namespaces". Self is a special case, because users should always be able to check whether they can perform an actionšspec² -objectÊ¡ -§ -statusœ -C#/definitions/io.k8s.api.authorization.v1.SubjectAccessReviewStatus"UStatus is filled in by the server and indicates whether the request is allowed or not -¾ - -apiVersion¯"¡APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources² -string -¹ -kind°"¢Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds² -string -K -metadata? -=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta -ª -spec¡ -E#/definitions/io.k8s.api.authorization.v1.SelfSubjectAccessReviewSpec"XSpec holds information about the request being evaluated. user and groups must be emptyúq -x-kubernetes-group-version-kindNL- kind: SelfSubjectAccessReview - version: v1 - group: authorization.k8s.io - -£ -:io.k8s.api.autoscaling.v2beta2.HorizontalPodAutoscalerListä"KHorizontalPodAutoscalerList is a list of horizontal pod autoscaler objects.šitems² -objectÊŒ -¾ - -apiVersion¯"¡APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources² -string -˜ -itemsŽ"7items is the list of horizontal pod autoscaler objects.² -arrayºH -F -D#/definitions/io.k8s.api.autoscaling.v2beta2.HorizontalPodAutoscaler -¹ -kind°"¢Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds² -string -r -metadataf -;#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta"'metadata is the standard list metadata.úq -x-kubernetes-group-version-kindNL- kind: HorizontalPodAutoscalerList - version: v2beta2 - group: autoscaling - -¹ -#io.k8s.api.rbac.v1beta1.RoleBinding‘ "¨RoleBinding references a role, but does not contain it. It can reference a Role in the same namespace or a ClusterRole in the global namespace. It adds who information via Subjects and namespace information by which namespace it exists in. RoleBindings in a given namespace only have effect in that namespace. Deprecated in v1.17 in favor of rbac.authorization.k8s.io/v1 RoleBinding, and will no longer be served in v1.22.šroleRef² -objectÊÛ -å -roleRefÙ --#/definitions/io.k8s.api.rbac.v1beta1.RoleRef"§RoleRef can reference a Role in the current namespace or a ClusterRole in the global namespace. If the RoleRef cannot be resolved, the Authorizer must return an error. -‰ -subjects}"=Subjects holds references to the objects the role applies to.² -arrayº1 -/ --#/definitions/io.k8s.api.rbac.v1beta1.Subject -¾ - -apiVersion¯"¡APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources² -string -¹ -kind°"¢Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds² -string -h -metadata\ -=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta"Standard object's metadata.úo -x-kubernetes-group-version-kindLJ- kind: RoleBinding - version: v1beta1 - group: rbac.authorization.k8s.io - -ô -io.k8s.api.core.v1.Event×"‰Event is a report of an event somewhere in the cluster. Events have a limited retention time and triggers and messages may evolve with time. Event consumers should not rely on the timing of an event with a given Reason reflecting a consistent underlying trigger, or the continued existence of events with that Reason. Events should be treated as informative, best-effort, supplemental data.šmetadatašinvolvedObject² -objectÊÐ -J -countAint32",The number of times this event has occurred.² -integer -u - eventTimeh -<#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.MicroTime"(Time when this Event was first observed. -¦ -firstTimestamp“ -7#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Time"XThe time at which the event was first recorded. (Time of server receipt is in TypeMeta.) -• - lastTimestampƒ -7#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Time"HThe time at which the most recent occurrence of this event was recorded. -U -messageJ"=A human-readable description of the status of this operation.² -string -U -reportingInstance@"3ID of the controller instance, e.g. `kubelet-xyzf`.² -string -Ž -seriesƒ -,#/definitions/io.k8s.api.core.v1.EventSeries"SData about the Event series this event represents or nil if it's a singleton Event. -¾ - -apiVersion¯"¡APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources² -string -j -involvedObjectX -0#/definitions/io.k8s.api.core.v1.ObjectReference"$The object that this event is about. -¹ -kind°"¢Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds² -string -™ -reasonŽ"€This should be a short, machine understandable string that gives the reason for the transition into the object's current status.² -string -Ò -metadataÅ -=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta"ƒStandard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata - -source„ -,#/definitions/io.k8s.api.core.v1.EventSource"TThe component reporting this event. Should be a short machine understandable string. -V -actionL"?What action was taken/failed regarding to the Regarding object.² -string -r -relatedg -0#/definitions/io.k8s.api.core.v1.ObjectReference"3Optional secondary object for more complex actions. -p -reportingComponentZ"MName of the controller that emitted this Event, e.g. `kubernetes.io/kubelet`.² -string -a -typeY"LType of this event (Normal, Warning), new types could be added in the future² -stringúM -x-kubernetes-group-version-kind*(- group: "" - kind: Event - version: v1 - -ð - io.k8s.api.discovery.v1.EndpointË"FEndpoint represents a single logical "backend" implementing a service.š addresses² -objectÊè -” - -conditions… -8#/definitions/io.k8s.api.discovery.v1.EndpointConditions"Iconditions contains information about the current status of the endpoint. -¾ -deprecatedTopology§"‰deprecatedTopology contains topology information part of the v1beta1 API. This field is deprecated, and will be removed when the v1beta1 API is removed (no sooner than kubernetes v1.24). While this field can hold values, it is not writable through the v1 API, and any attempts to write to it will be silently ignored. Topology information can be found in the zone and nodeName fields instead.ª - ² -string² -object - -hints… -3#/definitions/io.k8s.api.discovery.v1.EndpointHints"Nhints contains information associated with how an endpoint should be consumed. -Î -hostnameÁ"³hostname of this endpoint. This field may be used by consumers of endpoints to distinguish endpoints from each other (e.g. in DNS names). Multiple endpoints which use the same hostname should be considered fungible (e.g. multiple A values in DNS). Must be lowercase and pass DNS Label (RFC 1123) validation.² -string -Ú -nodeNameÍ"¿nodeName represents the name of the Node hosting this endpoint. This can be used to determine endpoints local to a Node. This field can be enabled with the EndpointSliceNodeName feature gate.² -string - - targetRef‚ -0#/definitions/io.k8s.api.core.v1.ObjectReference"NtargetRef is a reference to a Kubernetes object that represents this endpoint. -J -zoneB"5zone is the name of the Zone this endpoint exists in.² -string -î - addressesà" addresses of this endpoint. The contents of this field are interpreted according to the corresponding EndpointSlice addressType field. Consumers must handle different types of addresses in the context of their own capabilities. This must contain at least one address but no more than 100.² -arrayº - ² -stringú -x-kubernetes-list-typeset - -ê - -(io.k8s.api.policy.v1.PodDisruptionBudget½ -"hPodDisruptionBudget is an object to define the max disruption that can be caused to a collection of pods² -objectÊâ -¾ - -apiVersion¯"¡APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources² -string -¹ -kind°"¢Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds² -string -Ò -metadataÅ -=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta"ƒStandard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata -‡ -spec -:#/definitions/io.k8s.api.policy.v1.PodDisruptionBudgetSpec"ASpecification of the desired behavior of the PodDisruptionBudget. -ƒ -statusy -<#/definitions/io.k8s.api.policy.v1.PodDisruptionBudgetStatus"9Most recently observed status of the PodDisruptionBudget.ú_ -x-kubernetes-group-version-kind<:- group: policy - kind: PodDisruptionBudget - version: v1 - -› -2io.k8s.api.policy.v1beta1.RunAsUserStrategyOptionsä"_RunAsUserStrategyOptions defines the strategy type and any options used to create the strategy.šrule² -objectÊí -k -rulec"Vrule is the strategy that will dictate the allowable RunAsUser values that may be set.² -string -ý -rangesò"¯ranges are the allowed ranges of uids that may be used. If you would like to force a single uid then supply a single range with the same start and end. Required for MustRunAs.² -arrayº3 -1 -/#/definitions/io.k8s.api.policy.v1beta1.IDRange -§ -6io.k8s.api.admissionregistration.v1.RuleWithOperationsì "‚RuleWithOperations is a tuple of Operations and Resources. It is recommended to make sure that all the tuple expansions are valid.² -objectÊØ -´ - apiGroups¦"‰APIGroups is the API groups the resources belong to. '*' is all groups. If '*' is present, the length of the slice must be one. Required.² -arrayº - ² -string -¼ - apiVersions¬"APIVersions is the API versions the resources belong to. '*' is all versions. If '*' is present, the length of the slice must be one. Required.² -arrayº - ² -string -  - -operations‘"ôOperations is the operations the admission hook cares about - CREATE, UPDATE, DELETE, CONNECT or * for all of those operations and any future admission operations that are added. If '*' is present, the length of the slice must be one. Required.² -arrayº - ² -string -– - resourcesˆ"ëResources is a list of resources this rule applies to. - -For example: 'pods' means pods. 'pods/log' means the log subresource of pods. '*' means all resources, but not subresources. 'pods/*' means all subresources of pods. '*/scale' means all scale subresources. '*/*' means all resources and their subresources. - -If wildcard is present, the validation rule will ensure resources do not overlap with each other. - -Depending on the enclosing object, subresources might not be allowed. Required.² -arrayº - ² -string -£ -scope™"‹scope specifies the scope of this rule. Valid values are "Cluster", "Namespaced", and "*" "Cluster" means that only cluster-scoped resources will match this rule. Namespace API objects are cluster-scoped. "Namespaced" means that only namespaced resources will match this rule. "*" means that there are no scope restrictions. Subresources match the scope of their parent resource. Default is "*".² -string -„ -io.k8s.api.core.v1.KeyToPathã",Maps a string key to a path within a volume.škeyšpath² -objectÊ™ -' -key "The key to project.² -string -» -mode²int32"œOptional: mode bits used to set permissions on this file. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. If not specified, the volume defaultMode will be used. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.² -integer -¯ -path¦"˜The relative path of the file to map the key to. May not be an absolute path. May not contain the path element '..'. May not start with the string '..'.² -string -ú -io.k8s.api.core.v1.NodeListÚ"ONodeList is the whole list of all Nodes which have been registered with master.šitems² -objectÊŸ -¹ -kind°"¢Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds² -string -Ï -metadata -;#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta"‚Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds -¾ - -apiVersion¯"¡APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources² -string -N -itemsE" List of nodes² -arrayº) -' -%#/definitions/io.k8s.api.core.v1.NodeúP -x-kubernetes-group-version-kind-+- group: "" - kind: NodeList - version: v1 - -± -,io.k8s.api.core.v1.ReplicationControllerSpec€ "KReplicationControllerSpec is the specification of a replication controller.² -objectʤ - -½ -template° -0#/definitions/io.k8s.api.core.v1.PodTemplateSpec"ûTemplate is the object that describes the pod that will be created if insufficient replicas are detected. This takes precedence over a TemplateRef. More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#pod-template -ƒ -minReadySecondsïint32"ÙMinimum number of seconds for which a newly created pod should be ready without any of its container crashing, for it to be considered available. Defaults to 0 (pod will be considered available as soon as it is ready)² -integer -  -replicas“int32"ýReplicas is the number of desired replicas. This is a pointer to distinguish between explicit zero and unspecified. Defaults to 1. More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#what-is-a-replicationcontroller² -integer -¸ -selector«"Selector is a label query over pods that should match the Replicas count. If Selector is empty, it is defaulted to the labels present on the Pod template. Label keys and values that must match in order to be controlled by this replication controller, if empty defaulted to labels on Pod template. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectorsª - ² -string² -object -á -&io.k8s.api.scheduling.v1.PriorityClass¶"{PriorityClass defines mapping from a priority class name to the priority integer value. The value can be any valid integer.švalue² -objectÊ» -Ò -metadataÅ -=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta"ƒStandard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata -„ -preemptionPolicyï"áPreemptionPolicy is the Policy for preempting pods with lower priority. One of Never, PreemptLowerPriority. Defaults to PreemptLowerPriority if unset. This field is beta-level, gated by the NonPreemptingPriority feature-gate.² -string -¨ -valuežint32"ˆThe value of this priority class. This is the actual priority that pods receive when they have the name of this class in their pod spec.² -integer -¾ - -apiVersion¯"¡APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources² -string -‹ - description|"odescription is an arbitrary string that usually provides guidelines on when this priority class should be used.² -string -¦ - globalDefault”"…globalDefault specifies whether this PriorityClass should be considered as the default priority for pods that do not have any priority class. Only one PriorityClass can be marked as `globalDefault`. However, if more than one PriorityClasses exists with their `globalDefault` field set to true, the smallest value of such global default PriorityClasses will be used as the default priority.² -boolean -¹ -kind°"¢Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds² -stringúd -x-kubernetes-group-version-kindA?- group: scheduling.k8s.io - kind: PriorityClass - version: v1 - -Ò -+io.k8s.api.scheduling.v1beta1.PriorityClass¢"áDEPRECATED - This group version of PriorityClass is deprecated by scheduling.k8s.io/v1/PriorityClass. PriorityClass defines mapping from a priority class name to the priority integer value. The value can be any valid integer.švalue² -objectÊ» -¾ - -apiVersion¯"¡APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources² -string -‹ - description|"odescription is an arbitrary string that usually provides guidelines on when this priority class should be used.² -string -¦ - globalDefault”"…globalDefault specifies whether this PriorityClass should be considered as the default priority for pods that do not have any priority class. Only one PriorityClass can be marked as `globalDefault`. However, if more than one PriorityClasses exists with their `globalDefault` field set to true, the smallest value of such global default PriorityClasses will be used as the default priority.² -boolean -¹ -kind°"¢Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds² -string -Ò -metadataÅ -=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta"ƒStandard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata -„ -preemptionPolicyï"áPreemptionPolicy is the Policy for preempting pods with lower priority. One of Never, PreemptLowerPriority. Defaults to PreemptLowerPriority if unset. This field is beta-level, gated by the NonPreemptingPriority feature-gate.² -string -¨ -valuežint32"ˆThe value of this priority class. This is the actual priority that pods receive when they have the name of this class in their pod spec.² -integerúi -x-kubernetes-group-version-kindFD- group: scheduling.k8s.io - kind: PriorityClass - version: v1beta1 - -š -0io.k8s.apimachinery.pkg.apis.meta.v1.APIResourceå"JAPIResource specifies the name of a resource and whether it is namespaced.šnameš singularNameš -namespacedškindšverbs² -objectÊØ -} - -categorieso"Scategories is a list of the grouped resources this resource belongs to (e.g. 'all')² -arrayº - ² -string -= -name5"(name is the plural name of the resource.² -string -ž - singularName"ÿsingularName is the singular name of the resource. This allows clients to handle plural and singular opaquely. The singularName is more correct for reporting status on a single item and both singular and plural are allowed from the kubectl CLI interface.² -string -« -verbs¡"„verbs is a list of supported kube verbs (this includes get, list, watch, create, update, patch, delete, deletecollection, and proxy)² -arrayº - ² -string -Ç -group½"¯group is the preferred group of the resource. Empty implies the group of the containing resource list. For subresources, this may have a different value, for example: Scale".² -string -d -kind\"Okind is the kind for the resource (e.g. 'Foo' is the kind for a resource 'foo')² -string -T - -namespacedF"8namespaced indicates if a resource is namespaced or not.² -boolean -h - -shortNamesZ">shortNames is a list of suggested short names of the resource.² -arrayº - ² -string -Ì -storageVersionHashµ"§The hash value of the storage version, the version this resource is converted to when written to the data store. Value must be treated as opaque by clients. Only equality comparison on the value is valid. This is an alpha feature and may change or be removed in the future. The field is populated by the apiserver only if the StorageVersionHash feature gate is enabled. This field will remain optional even if it graduates.² -string -‰ -versioný"ïversion is the preferred version of the resource. Empty implies the version of the containing resource list For subresources, this may have a different value, for example: v1 (while inside a v1beta1 version of the core resource's group)".² -string -… -6io.k8s.api.autoscaling.v2beta1.HorizontalPodAutoscalerÊ "ÎHorizontalPodAutoscaler is the configuration for a horizontal pod autoscaler, which automatically manages the replica count of any resource implementing the scale subresource based on the metrics specified.² -objectÊú -¾ - -apiVersion¯"¡APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources² -string -¹ -kind°"¢Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds² -string -à -metadataÓ -=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta"‘metadata is the standard object metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata -„ -specû -H#/definitions/io.k8s.api.autoscaling.v2beta1.HorizontalPodAutoscalerSpec"®spec is the specification for the behaviour of the autoscaler. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status. - -status… -J#/definitions/io.k8s.api.autoscaling.v2beta1.HorizontalPodAutoscalerStatus"7status is the current information about the autoscaler.úm -x-kubernetes-group-version-kindJH- group: autoscaling - kind: HorizontalPodAutoscaler - version: v2beta1 - -Ù -io.k8s.api.core.v1.HTTPHeader·">HTTPHeader describes a custom header to be used in HTTP probesšnamešvalue² -objectÊZ -* -name""The header field name² -string -, -value#"The header field value² -string - -!io.k8s.api.core.v1.LimitRangeSpec÷"NLimitRangeSpec defines a min/max usage limit for resources that match on kind.šlimits² -objectÊ -Œ -limits"?Limits is the list of LimitRangeItem objects that are enforced.² -arrayº3 -1 -/#/definitions/io.k8s.api.core.v1.LimitRangeItem -î+ -(io.k8s.api.storage.v1beta1.CSIDriverSpecÁ+"2CSIDriverSpec is the specification of a CSIDriver.² -objectÊþ* -É -attachRequired¶"§attachRequired indicates this CSI volume driver requires an attach operation (because it implements the CSI ControllerPublishVolume() method), and that the Kubernetes attach detach controller should call the attach volume interface which checks the volumeattachment status and waits until the volume is attached before proceeding to mounting. The CSI external-attacher coordinates with CSI volume driver and updates the volumeattachment status when the attach operation is complete. If the CSIDriverRegistry feature gate is enabled and the value is specified to false, the attach operation will be skipped. Otherwise the attach operation will be called. - -This field is immutable.² -boolean -Û - fsGroupPolicyÉ"»Defines if the underlying volume supports changing ownership and permission of the volume before being mounted. Refer to the specific FSGroupPolicy values for additional details. This field is alpha-level, and is only honored by servers that enable the CSIVolumeFSGroupPolicy feature gate. - -This field is immutable.² -string -© -podInfoOnMount– "‡ If set to true, podInfoOnMount indicates this CSI volume driver requires additional pod information (like podName, podUID, etc.) during mount operations. If set to false, pod information will not be passed on mount. Default is false. The CSI driver specifies podInfoOnMount as part of driver deployment. If true, Kubelet will pass pod information as VolumeContext in the CSI NodePublishVolume() calls. The CSI driver is responsible for parsing and validating the information passed in as VolumeContext. The following VolumeConext will be passed if podInfoOnMount is set to true. This list might grow, but the prefix will be used. "csi.storage.k8s.io/pod.name": pod.Name "csi.storage.k8s.io/pod.namespace": pod.Namespace "csi.storage.k8s.io/pod.uid": string(pod.UID) "csi.storage.k8s.io/ephemeral": "true" if the volume is an ephemeral inline volume - defined by a CSIVolumeSource, otherwise "false" - -"csi.storage.k8s.io/ephemeral" is a new feature in Kubernetes 1.16. It is only required for drivers which support both the "Persistent" and "Ephemeral" VolumeLifecycleMode. Other drivers can leave pod info disabled and/or ignore this field. As Kubernetes 1.15 doesn't support this field, drivers can only support one mode when deployed on such a cluster and the deployment determines which mode that is, for example via a command line parameter of the driver. - -This field is immutable.² -boolean -ÿ -requiresRepublishé"ÚRequiresRepublish indicates the CSI driver wants `NodePublishVolume` being periodically called to reflect any possible change in the mounted volume. This field defaults to false. - -Note: After a successful initial NodePublishVolume call, subsequent calls to NodePublishVolume should only update the contents of the volume. New mount points will not be seen by a running container. - -This is a beta feature and only available when the CSIServiceAccountToken feature is enabled.² -boolean -€ -storageCapacityì"ÝIf set to true, storageCapacity indicates that the CSI volume driver wants pod scheduling to consider the storage capacity that the driver deployment will report by creating CSIStorageCapacity objects with capacity information. - -The check can be enabled immediately when deploying a driver. In that case, provisioning new volumes with late binding will pause until the driver deployment has published some suitable CSIStorageCapacity object. - -Alternatively, the driver can be deployed with the field unset or false and it can be flipped later when storage capacity information has been published. - -This field is immutable. - -This is a beta field and only available when the CSIStorageCapacity feature is enabled. The default is false.² -boolean -ê - tokenRequestsØ"éTokenRequests indicates the CSI driver needs pods' service account tokens it is mounting volume for to do necessary authentication. Kubelet will pass the tokens in VolumeContext in the CSI NodePublishVolume calls. The CSI driver should parse and validate the following VolumeContext: "csi.storage.k8s.io/serviceAccount.tokens": { - "": { - "token": , - "expirationTimestamp": , - }, - ... -} - -Note: Audience in each TokenRequest should be different and at most one token is empty string. To receive a new token after expiry, RequiresRepublish can be used to trigger NodePublishVolume periodically. - -This is a beta feature and only available when the CSIServiceAccountToken feature is enabled.² -arrayº9 -7 -5#/definitions/io.k8s.api.storage.v1beta1.TokenRequestú# -x-kubernetes-list-type atomic - -³ -volumeLifecycleModesš"ýVolumeLifecycleModes defines what kind of volumes this CSI volume driver supports. The default if the list is empty is "Persistent", which is the usage defined by the CSI specification and implemented in Kubernetes via the usual PV/PVC mechanism. The other mode is "Ephemeral". In this mode, volumes are defined inline inside the pod spec with CSIVolumeSource and their lifecycle is tied to the lifecycle of that pod. A driver has to be aware of this because it is only going to get a NodePublishVolume call for such a volume. For more information about implementing this mode, see https://kubernetes-csi.github.io/docs/ephemeral-local-volumes.html A driver can support one or more of these modes and more modes may be added in the future. - -This field is immutable.² -arrayº - ² -string -× -Bio.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1beta1.JSON"JSON represents any valid JSON value. These types are supported: bool, int64, float64, string, []interface{}, map[string]interface{} and nil. - -Fio.k8s.kube-aggregator.pkg.apis.apiregistration.v1.APIServiceConditionÒ"NAPIServiceCondition describes the state of an APIService at a particular pointštypešstatus² -objectÊã -7 -type/""Type is the type of the condition.² -string -‘ -lastTransitionTime{ -7#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Time"@Last time the condition transitioned from one status to another. -X -messageM"@Human-readable message indicating details about last transition.² -string -^ -reasonT"GUnique, one-word, CamelCase reason for the condition's last transition.² -string -Z -statusP"CStatus is the status of the condition. Can be True, False, Unknown.² -string -î -#io.k8s.api.core.v1.FlexVolumeSourceÆ"hFlexVolume represents a generic volume resource that is provisioned/attached using an exec based plugin.šdriver² -objectÊÄ -O -driverE"8Driver is the name of the driver to use for this volume.² -string -Ä -fsType¹"«Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. "ext4", "xfs", "ntfs". The default filesystem depends on FlexVolume script.² -string -O -optionsD"'Optional: Extra command options if any.ª - ² -string² -object -‚ -readOnlyv"hOptional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.² -boolean -Ó - secretRefÅ -5#/definitions/io.k8s.api.core.v1.LocalObjectReference"‹Optional: SecretRef is reference to the secret object containing sensitive information to pass to the plugin scripts. This may be empty if no secret object is specified. If the secret object contains more than one secret, all secrets are passed to the plugin scripts. -¦ - io.k8s.api.networking.v1.Ingress "€Ingress is a collection of rules that allow inbound connections to reach the endpoints defined by a backend. An Ingress can be configured to give services externally-reachable urls, load balance traffic, terminate SSL, offer name based virtual hosting etc.² -objectÊŽ - -¾ - -apiVersion¯"¡APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources² -string -¹ -kind°"¢Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds² -string -Ò -metadataÅ -=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta"ƒStandard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata -Ø -specÏ -2#/definitions/io.k8s.api.networking.v1.IngressSpec"˜Spec is the desired state of the Ingress. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status -Þ -statusÓ -4#/definitions/io.k8s.api.networking.v1.IngressStatus"šStatus is the current state of the Ingress. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-statusú^ -x-kubernetes-group-version-kind;9- group: networking.k8s.io - kind: Ingress - version: v1 - -Û -"io.k8s.api.policy.v1beta1.Eviction´ "ÒEviction evicts a pod from its node subject to certain policies and safety constraints. This is a subresource of Pod. A request to cause such an eviction is created by POSTing to .../pods//evictions.² -objectÊô -€ -metadatat -=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta"3ObjectMeta describes the pod that is being evicted. -¾ - -apiVersion¯"¡APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources² -string -r - deleteOptionsa -@#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptions"DeleteOptions may be provided -¹ -kind°"¢Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds² -stringúY -x-kubernetes-group-version-kind64- group: policy - kind: Eviction - version: v1beta1 - -ð -&io.k8s.api.core.v1.NodeDaemonEndpointsÅ"FNodeDaemonEndpoints lists ports opened by daemons running on the Node.² -objectÊo -m -kubeletEndpointZ -/#/definitions/io.k8s.api.core.v1.DaemonEndpoint"'Endpoint on which Kubelet is listening. -î -?io.k8s.api.certificates.v1beta1.CertificateSigningRequestStatusª² -objectÊ› -˜ - certificateˆbyte"OIf request was approved, the controller will place the issued certificate here.² -stringú# -x-kubernetes-list-type atomic - -ý - -conditionsî">Conditions applied to the request, such as approval or denial.² -arrayºT -R -P#/definitions/io.k8s.api.certificates.v1beta1.CertificateSigningRequestConditionú' -x-kubernetes-list-map-keys - type -ú -x-kubernetes-list-typemap - -Ý - -&io.k8s.api.core.v1.ScaleIOVolumeSource² -":ScaleIOVolumeSource represents a persistent ScaleIO volumešgatewayšsystemš secretRef² -objectÊÈ -` - -sslEnabledR"DFlag to enable/disable SSL communication with Gateway, default false² -boolean - - -volumeNames"fThe name of a volume already created in the ScaleIO system that is associated with this volume source.² -string -D -gateway9",The host address of the ScaleIO API Gateway.² -string -Ð - secretRef -5#/definitions/io.k8s.api.core.v1.LocalObjectReference"ˆSecretRef references to the secret for ScaleIO user and other sensitive information. If this is not provided, Login operation will fail. -x -readOnlyl"^Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.² -boolean -’ - storageMode‚"uIndicates whether the storage for a volume should be ThickProvisioned or ThinProvisioned. Default is ThinProvisioned.² -string -[ - storagePoolL"?The ScaleIO Storage Pool associated with the protection domain.² -string -O -systemE"8The name of the storage system as configured in ScaleIO.² -string -¡ -fsType–"ˆFilesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. "ext4", "xfs", "ntfs". Default is "xfs".² -string -f -protectionDomainR"EThe name of the ScaleIO Protection Domain for the configured storage.² -string -Ÿ -=io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelectorRequirementÝ"xA label selector requirement is a selector that contains values, a key, and an operator that relates the key and values.škeyšoperator² -objectÊà -’ -values‡"êvalues is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.² -arrayº - ² -string -š -key’"2key is the label key that the selector applies to.² -stringú' -x-kubernetes-patch-strategymerge -ú& -x-kubernetes-patch-merge-keykey - -Ž -operator"toperator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.² -string -Æ -2io.k8s.api.core.v1.AzureFilePersistentVolumeSource"WAzureFile represents an Azure File Service mount on the host and bind mount to the pod.š -secretNameš shareName² -objectÊŽ -x -readOnlyl"^Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.² -boolean -^ - -secretNameP"Cthe name of secret that contains Azure Storage Account Name and Key² -string -‹ -secretNamespacex"kthe namespace of the secret that contains Azure Storage Account Name and Key default is the same as the Pod² -string -$ - shareName" -Share Name² -string -ù -io.k8s.api.core.v1.LimitRange× "OLimitRange sets resource usage limits for each kind of resource in a Namespace.² -objectÊ¢ -¾ - -apiVersion¯"¡APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources² -string -¹ -kind°"¢Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds² -string -Ò -metadataÅ -=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta"ƒStandard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata -Í -specÄ -/#/definitions/io.k8s.api.core.v1.LimitRangeSpec"Spec defines the limits enforced. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-statusúR -x-kubernetes-group-version-kind/-- group: "" - kind: LimitRange - version: v1 - -Ò -=io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1.JSON"JSON represents any valid JSON value. These types are supported: bool, int64, float64, string, []interface{}, map[string]interface{} and nil. -Ò -)io.k8s.api.extensions.v1beta1.IngressSpec¤ ";IngressSpec describes the Ingress the user wishes to exist.² -objectÊØ -º -backend® -:#/definitions/io.k8s.api.extensions.v1beta1.IngressBackend"ïA default backend capable of servicing requests that don't match any rule. At least one of 'backend' or 'rules' must be specified. This field is optional to allow the loadbalancer controller or defaulting logic to specify a global default. -´ -ingressClassNameŸ"‘IngressClassName is the name of the IngressClass cluster resource. The associated IngressClass defines which controller will implement the resource. This replaces the deprecated `kubernetes.io/ingress.class` annotation. For backwards compatibility, when that annotation is set, it must be given precedence over this field. The controller may emit a warning if the field and annotation have different values. Implementations of this API should ignore Ingresses without a class specified. An IngressClass resource may be marked as default, which can be used to set a default value for this field. For more information, refer to the IngressClass documentation.² -string -Ø -rulesÎ"ƒA list of host rules used to configure the Ingress. If unspecified, or no rule matches, all traffic is sent to the default backend.² -arrayº; -9 -7#/definitions/io.k8s.api.extensions.v1beta1.IngressRule -† -tlsþ"´TLS configuration. Currently the Ingress only supports a single TLS port, 443. If multiple members of this list specify different hosts, they will be multiplexed on the same port according to the hostname specified through the SNI TLS extension, if the ingress controller fulfilling the ingress supports SNI.² -arrayº: -8 -6#/definitions/io.k8s.api.extensions.v1beta1.IngressTLS -ð -/io.k8s.api.flowcontrol.v1beta1.FlowSchemaStatus¼">FlowSchemaStatus represents the current state of a FlowSchema.² -objectÊí -ê - -conditionsÛ";`conditions` is a list of the current states of FlowSchema.² -arrayºD -B -@#/definitions/io.k8s.api.flowcontrol.v1beta1.FlowSchemaConditionú -x-kubernetes-list-typemap -ú' -x-kubernetes-list-map-keys - type - -® -"io.k8s.api.rbac.v1.ClusterRoleList‡"/ClusterRoleList is a collection of ClusterRolesšitems² -objectÊÎ -¹ -kind°"¢Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds² -string -f -metadataZ -;#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta"Standard object's metadata. -¾ - -apiVersion¯"¡APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources² -string -g -items^"Items is a list of ClusterRoles² -arrayº0 -. -,#/definitions/io.k8s.api.rbac.v1.ClusterRoleún -x-kubernetes-group-version-kindKI- group: rbac.authorization.k8s.io - kind: ClusterRoleList - version: v1 - -‰ -!io.k8s.api.apps.v1.DeploymentSpecã "NDeploymentSpec is the specification of the desired behavior of the Deployment.šselectorštemplate² -objectÊî -  -progressDeadlineSeconds„int32"îThe maximum time in seconds for a deployment to make progress before it is considered to be failed. The deployment controller will continue to process failed deployments and a condition with a ProgressDeadlineExceeded reason will be surfaced in the deployment status. Note that progress will not be estimated during the time a deployment is paused. Defaults to 600s.² -integer -’ -replicas…int32"pNumber of desired pods. This is a pointer to distinguish between explicit zero and not specified. Defaults to 1.² -integer -à -revisionHistoryLimitªint32"”The number of old ReplicaSets to retain to allow rollback. This is a pointer to distinguish between explicit zero and not specified. Defaults to 10.² -integer -ö -selectoré -@#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector"¤Label selector for pods. Existing ReplicaSets whose pods are selected by this will be the ones affected by this deployment. It must match the pod template's labels. -¹ -strategy¬ -3#/definitions/io.k8s.api.apps.v1.DeploymentStrategy"FThe deployment strategy to use to replace existing pods with new ones.ú, -x-kubernetes-patch-strategy  retainKeys - -q -templatee -0#/definitions/io.k8s.api.core.v1.PodTemplateSpec"1Template describes the pods that will be created. -ƒ -minReadySecondsïint32"ÙMinimum number of seconds for which a newly created pod should be ready without any of its container crashing, for it to be considered available. Defaults to 0 (pod will be considered available as soon as it is ready)² -integer -@ -paused6"(Indicates that the deployment is paused.² -boolean -Ü - --io.k8s.api.authentication.v1beta1.TokenReviewª -"§TokenReview attempts to authenticate a token to a known user. Note: TokenReview requests may be cached by the webhook token authenticator plugin in the kube-apiserver.šspec² -objectÊü -© -statusž -A#/definitions/io.k8s.api.authentication.v1beta1.TokenReviewStatus"YStatus is filled in by the server and indicates whether the request can be authenticated. -¾ - -apiVersion¯"¡APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources² -string -¹ -kind°"¢Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds² -string -K -metadata? -=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta -ƒ -spec{ -?#/definitions/io.k8s.api.authentication.v1beta1.TokenReviewSpec"8Spec holds information about the request being evaluatedúk -x-kubernetes-group-version-kindHF- group: authentication.k8s.io - kind: TokenReview - version: v1beta1 - -£ -io.k8s.api.core.v1.PortStatusšportšprotocol² -objectÊà -Ù -errorÏ"ÁError is to record the problem with the service port The format of the error shall comply with the following rules: - built-in error values shall be specified in this file and those shall use - CamelCase names -- cloud provider specific error values must have names that comply with the - format foo.example.com/CamelCase.² -string -i -portaint32"LPort is the port number of the service port of which status is recorded here² -integer -– -protocol‰"|Protocol is the protocol of the service port of which status is recorded here The supported values are: "TCP", "UDP", "SCTP"² -string -† -0io.k8s.apimachinery.pkg.apis.meta.v1.StatusCauseÑ"xStatusCause provides more information about an api.Status failure, including cases when multiple errors are encountered.² -objectÊÈ -¶ -field¬"žThe field of the resource that has caused this error, as named by its JSON serialization. May include dot and postfix notation for nested attributes. Arrays are zero-indexed. Fields may appear more than once in an array of causes due to fields having multiple errors. Optional. - -Examples: - "name" - the field "name" on the current resource - "items[0].name" - the field "name" on the first array entry in "items"² -string - -messaget"gA human-readable description of the cause of the error. This field may be presented as-is to a reader.² -string -‹ -reason€"sA machine-readable description of the cause of the error. If this value is empty there is no information available.² -string -“ -%io.k8s.api.apps.v1.DeploymentStrategyé"HDeploymentStrategy describes how to replace existing pods with new ones.² -objectÊ -£ - rollingUpdate‘ -8#/definitions/io.k8s.api.apps.v1.RollingUpdateDeployment"URolling update config params. Present only if DeploymentStrategyType = RollingUpdate. -h -type`"SType of deployment. Can be "Recreate" or "RollingUpdate". Default is RollingUpdate.² -string -Ç - -(io.k8s.api.authentication.v1.TokenReviewš -"§TokenReview attempts to authenticate a token to a known user. Note: TokenReview requests may be cached by the webhook token authenticator plugin in the kube-apiserver.šspec² -objectÊñ -¾ - -apiVersion¯"¡APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources² -string -¹ -kind°"¢Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds² -string -K -metadata? -=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta -~ -specv -:#/definitions/io.k8s.api.authentication.v1.TokenReviewSpec"8Spec holds information about the request being evaluated -¤ -status™ -<#/definitions/io.k8s.api.authentication.v1.TokenReviewStatus"YStatus is filled in by the server and indicates whether the request can be authenticated.úf -x-kubernetes-group-version-kindCA- group: authentication.k8s.io - kind: TokenReview - version: v1 - -º -%io.k8s.api.networking.v1beta1.Ingress "€Ingress is a collection of rules that allow inbound connections to reach the endpoints defined by a backend. An Ingress can be configured to give services externally-reachable urls, load balance traffic, terminate SSL, offer name based virtual hosting etc.² -objectʘ - -Ý -specÔ -7#/definitions/io.k8s.api.networking.v1beta1.IngressSpec"˜Spec is the desired state of the Ingress. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status -ã -statusØ -9#/definitions/io.k8s.api.networking.v1beta1.IngressStatus"šStatus is the current state of the Ingress. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status -¾ - -apiVersion¯"¡APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources² -string -¹ -kind°"¢Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds² -string -Ò -metadataÅ -=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta"ƒStandard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadataúc -x-kubernetes-group-version-kind@>- kind: Ingress - version: v1beta1 - group: networking.k8s.io - -í - io.k8s.api.batch.v1beta1.CronJobÈ ":CronJob represents the configuration of a single cron job.² -objectÊ£ - -¾ - -apiVersion¯"¡APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources² -string -¹ -kind°"¢Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds² -string -Ò -metadataÅ -=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta"ƒStandard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata -û -specò -2#/definitions/io.k8s.api.batch.v1beta1.CronJobSpec"»Specification of the desired behavior of a cron job, including the schedule. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status -Ð -statusÅ -4#/definitions/io.k8s.api.batch.v1beta1.CronJobStatus"ŒCurrent status of a cron job. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-statusúW -x-kubernetes-group-version-kind42- version: v1beta1 - group: batch - kind: CronJob - -ó -"io.k8s.api.apps.v1.DaemonSetStatusÌ">DaemonSetStatus represents the current status of a daemon set.šcurrentNumberScheduledšnumberMisscheduledšdesiredNumberScheduledš numberReady² -objectʨ -õ - -conditionsæ"LRepresents the latest available observations of a DaemonSet's current state.² -arrayº7 -5 -3#/definitions/io.k8s.api.apps.v1.DaemonSetConditionú' -x-kubernetes-patch-merge-keytype -ú' -x-kubernetes-patch-strategymerge - -Ê -numberAvailable¶int32" The number of nodes that should be running the daemon pod and have one or more of the daemon pod running and available (ready for at least spec.minReadySeconds)² -integer -Ý -numberMisscheduledÆint32"°The number of nodes that are running the daemon pod, but are not supposed to run the daemon pod. More info: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/² -integer -˜ - numberReadyˆint32"sThe number of nodes that should be running the daemon pod and have one or more of the daemon pod running and ready.² -integer -ä -collisionCountÑint32"»Count of hash collisions for the DaemonSet. The DaemonSet controller uses this field as a collision avoidance mechanism when it needs to create the name for the newest ControllerRevision.² -integer -ã -currentNumberScheduledÈint32"²The number of nodes that are running at least 1 daemon pod and are supposed to run the daemon pod. More info: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/² -integer -ô -desiredNumberScheduledÙint32"ÃThe total number of nodes that should be running the daemon pod (including nodes correctly running the daemon pod). More info: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/² -integer -Å -numberUnavailable¯int32"™The number of nodes that should be running the daemon pod and have none of the daemon pod running and available (ready for at least spec.minReadySeconds)² -integer -l -observedGenerationVint64"AThe most recent generation observed by the daemon set controller.² -integer -l -updatedNumberScheduledRint32"=The total number of nodes that are running updated daemon pod² -integer -ö -4io.k8s.api.authorization.v1.SubjectRulesReviewStatus½ "çSubjectRulesReviewStatus contains the result of a rules check. This check can be incomplete depending on the set of authorizers the server is configured with and any errors experienced during evaluation. Because authorization rules are additive, if a rule appears in a list it's safe to assume the subject has that permission, even if that list is incomplete.š resourceRulesšnonResourceRulesš -incomplete² -objectÊ” -Œ -evaluationErrorø"êEvaluationError can appear in combination with Rules. It indicates an error occurred during rule evaluation, such as an authorizer that doesn't support rule evaluation, and that ResourceRules and/or NonResourceRules may be incomplete.² -string -Ý - -incompleteÎ"¿Incomplete is true when the rules returned by this call are incomplete. This is most commonly encountered when an authorizer, such as an external authorizer, doesn't support rules evaluation.² -boolean -– -nonResourceRules"´NonResourceRules is the list of actions the subject is allowed to perform on non-resources. The list ordering isn't significant, may contain duplicates, and possibly be incomplete.² -arrayº= -; -9#/definitions/io.k8s.api.authorization.v1.NonResourceRule -‰ - resourceRules÷"­ResourceRules is the list of actions the subject is allowed to perform on resources. The list ordering isn't significant, may contain duplicates, and possibly be incomplete.² -arrayº: -8 -6#/definitions/io.k8s.api.authorization.v1.ResourceRule -à -?io.k8s.api.autoscaling.v2beta1.HorizontalPodAutoscalerConditionÿ"eHorizontalPodAutoscalerCondition describes the state of a HorizontalPodAutoscaler at a certain point.štypešstatus² -objectÊù -« -lastTransitionTime” -7#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Time"YlastTransitionTime is the last time the condition transitioned from one status to another -g -message\"Omessage is a human-readable explanation containing details about the transition² -string -P -reasonF"9reason is the reason for the condition's last transition.² -string -S -statusI"<- group: "" - kind: ReplicationControllerList - version: v1 - -¿ -.io.k8s.api.discovery.v1beta1.EndpointSliceListŒ"6EndpointSliceList represents a list of endpoint slicesšitems² -objectÊÎ -k -itemsb"List of endpoint slices² -arrayº< -: -8#/definitions/io.k8s.api.discovery.v1beta1.EndpointSlice -¹ -kind°"¢Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds² -string -b -metadataV -;#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta"Standard list metadata. -¾ - -apiVersion¯"¡APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources² -stringúl -x-kubernetes-group-version-kindIG- group: discovery.k8s.io - kind: EndpointSliceList - version: v1beta1 - -· -2io.k8s.api.flowcontrol.v1beta1.FlowSchemaCondition€":FlowSchemaCondition describes conditions for a FlowSchema.² -objectʵ -g -message\"O`message` is a human-readable message indicating details about last transition.² -string -l -reasonb"U`reason` is a unique, one-word, CamelCase reason for the condition's last transition.² -string -f -status\"O`status` is the status of the condition. Can be True, False, Unknown. Required.² -string -C -type;".`type` is the type of the condition. Required.² -string -® -lastTransitionTime— -7#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Time"\`lastTransitionTime` is the last time the condition transitioned from one status to another. -º -"io.k8s.api.rbac.v1.AggregationRule“"VAggregationRule describes how to locate ClusterRoles to aggregate into the ClusterRole² -objectʬ -© -clusterRoleSelectors"¼ClusterRoleSelectors holds a list of selectors which will be used to find ClusterRoles and create the rules. If any of the selectors match, then the ClusterRole's permissions will be added² -arrayºD -B -@#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector -¿ -'io.k8s.api.rbac.v1beta1.AggregationRule“"VAggregationRule describes how to locate ClusterRoles to aggregate into the ClusterRole² -objectʬ -© -clusterRoleSelectors"¼ClusterRoleSelectors holds a list of selectors which will be used to find ClusterRoles and create the rules. If any of the selectors match, then the ClusterRole's permissions will be added² -arrayºD -B -@#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector -ø - -,io.k8s.apimachinery.pkg.runtime.RawExtensionÇ -"¹ -RawExtension is used to hold extensions in external versions. - -To use this, make a field which has RawExtension as its type in your external, versioned struct, and Object in your internal struct. You also need to register your various plugin types. - -// Internal package: type MyAPIObject struct { - runtime.TypeMeta `json:",inline"` - MyPlugin runtime.Object `json:"myPlugin"` -} type PluginA struct { - AOption string `json:"aOption"` -} - -// External package: type MyAPIObject struct { - runtime.TypeMeta `json:",inline"` - MyPlugin runtime.RawExtension `json:"myPlugin"` -} type PluginA struct { - AOption string `json:"aOption"` -} - -// On the wire, the JSON will look something like this: { - "kind":"MyAPIObject", - "apiVersion":"v1", - "myPlugin": { - "kind":"PluginA", - "aOption":"foo", - }, -} - -So what happens? Decode first uses json or yaml to unmarshal the serialized data into your external MyAPIObject. That causes the raw JSON to be stored, but not unpacked. The next step is to copy (using pkg/conversion) into the internal struct. The runtime package's DefaultScheme has conversion functions installed which will unpack the JSON stored in RawExtension, turning it into the correct object type, and storing it in the Object. (TODO: In the case where the object is of an unknown type, a runtime.Unknown object will be created and stored.)² -object -… -8io.k8s.api.certificates.v1.CertificateSigningRequestListÈ"RCertificateSigningRequestList is a collection of CertificateSigningRequest objectsšitems² -objectÊä -¾ - -apiVersion¯"¡APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources² -string -™ -items":items is a collection of CertificateSigningRequest objects² -arrayºF -D -B#/definitions/io.k8s.api.certificates.v1.CertificateSigningRequest -¹ -kind°"¢Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds² -string -I -metadata= -;#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ListMetaúv -x-kubernetes-group-version-kindSQ- group: certificates.k8s.io - kind: CertificateSigningRequestList - version: v1 - -â -'io.k8s.api.core.v1.PersistentVolumeList¶ "9PersistentVolumeList is a list of PersistentVolume items.šitems² -objectÊ… -¾ - -apiVersion¯"¡APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources² -string -³ -items©"eList of persistent volumes. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes² -arrayº5 -3 -1#/definitions/io.k8s.api.core.v1.PersistentVolume -¹ -kind°"¢Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds² -string -Ï -metadata -;#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta"‚Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kindsú\ -x-kubernetes-group-version-kind97- group: "" - kind: PersistentVolumeList - version: v1 - -ë -%io.k8s.api.core.v1.ServiceAccountListÁ "6ServiceAccountList is a list of ServiceAccount objectsšitems² -objectÊ• -¾ - -apiVersion¯"¡APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources² -string -à -items¹"wList of ServiceAccounts. More info: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/² -arrayº3 -1 -/#/definitions/io.k8s.api.core.v1.ServiceAccount -¹ -kind°"¢Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds² -string -Ï -metadata -;#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta"‚Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kindsúZ -x-kubernetes-group-version-kind75- group: "" - kind: ServiceAccountList - version: v1 - -¹ -Fio.k8s.kube-aggregator.pkg.apis.apiregistration.v1beta1.APIServiceListî"/APIServiceList is a list of APIService objects.šitems² -objectÊ´ -I -metadata= -;#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta -¾ - -apiVersion¯"¡APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources² -string -j -itemsa² -arrayºT -R -P#/definitions/io.k8s.kube-aggregator.pkg.apis.apiregistration.v1beta1.APIService -¹ -kind°"¢Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds² -stringúo -x-kubernetes-group-version-kindLJ- group: apiregistration.k8s.io - kind: APIServiceList - version: v1beta1 - -å -Uio.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1.CustomResourceDefinitionSpec‹"PCustomResourceDefinitionSpec describes how a user wants their resource to appearšgroupšnamesšscopešversions² -objectʇ -´ -namesª -d#/definitions/io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1.CustomResourceDefinitionNames"Bnames specify the resource and kind names for the custom resource. -© -preserveUnknownFields"€preserveUnknownFields indicates that object fields which are not specified in the OpenAPI schema should be preserved when persisting to storage. apiVersion, kind, metadata and known fields inside metadata are always preserved. This field is deprecated in favor of setting `x-preserve-unknown-fields` to true in `spec.versions[*].schema.openAPIV3Schema`. See https://kubernetes.io/docs/tasks/access-kubernetes-api/custom-resources/custom-resource-definitions/#pruning-versus-preserving-unknown-fields for details.² -boolean -› -scope‘"ƒscope indicates whether the defined custom resource is cluster- or namespace-scoped. Allowed values are `Cluster` and `Namespaced`.² -string -ó -versionsæ"ìversions is the list of all API versions of the defined custom resource. Version names are used to compute the order in which served versions are listed in API discovery. If the version string is "kube-like", it will sort above non "kube-like" version strings, which are ordered lexicographically. "Kube-like" versions start with a "v", then are followed by a number (the major version), then optionally the string "alpha" or "beta" and another number (the minor version). These are sorted first by GA > beta > alpha (where GA is a version with no suffix such as beta or alpha), and then by comparing major version, then minor version. An example sorted list of versions: v10, v2, v1, v11beta2, v10beta3, v3beta1, v12alpha1, v11alpha2, foo1, foo10.² -arrayºj -h -f#/definitions/io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1.CustomResourceDefinitionVersion -¥ - -conversion– -_#/definitions/io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1.CustomResourceConversion"3conversion defines conversion settings for the CRD. -å -groupÛ"Ígroup is the API group of the defined custom resource. The custom resources are served under `/apis//...`. Must match the name of the CustomResourceDefinition (in the form `.`).² -string -é -- group: networking.k8s.io - kind: IngressClass - version: v1 - -« -)io.k8s.api.apps.v1.ControllerRevisionListý"UControllerRevisionList is a resource containing a list of ControllerRevision objects.šitems² -objectʬ -w -itemsn"(Items is the list of ControllerRevisions² -arrayº7 -5 -3#/definitions/io.k8s.api.apps.v1.ControllerRevision -¹ -kind°"¢Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds² -string -³ -metadata¦ -;#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta"gMore info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata -¾ - -apiVersion¯"¡APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources² -stringú` -x-kubernetes-group-version-kind=;- group: apps - kind: ControllerRevisionList - version: v1 - -– -4io.k8s.api.authorization.v1.LocalSubjectAccessReviewÝ "çLocalSubjectAccessReview checks whether or not a user or group can perform an action in a given namespace. Having a namespace scoped resource makes it much easier to grant namespace scoped policy that includes permissions checking.šspec² -objectÊè -K -metadata? -=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta -ñ -specè -A#/definitions/io.k8s.api.authorization.v1.SubjectAccessReviewSpec"¢Spec holds information about the request being evaluated. spec.namespace must be equal to the namespace you made the request against. If empty, it is defaulted. -§ -statusœ -C#/definitions/io.k8s.api.authorization.v1.SubjectAccessReviewStatus"UStatus is filled in by the server and indicates whether the request is allowed or not -¾ - -apiVersion¯"¡APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources² -string -¹ -kind°"¢Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds² -stringúr -x-kubernetes-group-version-kindOM- group: authorization.k8s.io - kind: LocalSubjectAccessReview - version: v1 - -‘ -io.k8s.api.batch.v1.JobStatusï"0JobStatus represents the current state of a Job.² -objectÊ® -Ü -completedIndexesÇ"¹CompletedIndexes holds the completed indexes when .spec.completionMode = "Indexed" in a text format. The indexes are represented as decimal integers separated by commas. The numbers are listed in increasing order. Three or more consecutive numbers are compressed and represented by the first and last element of the series, separated by a hyphen. For example, if the completed indexes are 1, 3, 4, 5 and 7, they are represented as "1,3-5,7".² -string - -completionTime¯ -7#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Time"óRepresents time when the job was completed. It is not guaranteed to be set in happens-before order across separate operations. It is represented in RFC3339 form and is in UTC. The completion time is only set when the job finishes successfully. -¶ - -conditions§"ëThe latest available observations of an object's current state. When a Job fails, one of the conditions will have type "Failed" and status true. When a Job is suspended, one of the conditions will have type "Suspended" and status true; when the Job is resumed, the status of this condition will become false. When a Job is completed, one of the conditions will have type "Complete" and status true. More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/² -arrayº2 -0 -.#/definitions/io.k8s.api.batch.v1.JobConditionú' -x-kubernetes-patch-merge-keytype -ú' -x-kubernetes-patch-strategymerge -ú# -x-kubernetes-list-type atomic - -M -failedCint32".The number of pods which reached phase Failed.² -integer -å - startTime× -7#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Time"›Represents time when the job controller started processing a job. When a Job is created in the suspended state, this field is not set until the first time it is resumed. This field is reset every time a Job is resumed from suspension. It is represented in RFC3339 form and is in UTC. -S - succeededFint32"1The number of pods which reached phase Succeeded.² -integer -C -active9int32"$The number of actively running pods.² -integer -Ç -0io.k8s.api.core.v1.GCEPersistentDiskVolumeSource’ "ÁRepresents a Persistent Disk resource in Google Compute Engine. - -A GCE PD must exist before mounting to a container. The disk must also be in the same GCE project and zone as the kubelet. A GCE PD can only be mounted as read/write once or read-only many times. GCE PDs support ownership management and SELinux relabeling.špdName² -objectʶ -Á -fsType¶"¨Filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk² -string -ù - partitionëint32"ÕThe partition in the volume that you want to mount. If omitted, the default is to mount by volume name. Examples: For volume /dev/sda1, you specify the partition as "1". Similarly, the volume partition for /dev/sda is "0" (or you can leave the property empty). More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk² -integer -² -pdName§"™Unique name of the PD resource in GCE. Used to identify the disk in GCE. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk² -string -¾ -readOnly±"¢ReadOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk² -boolean -á -io.k8s.api.core.v1.Capabilities½"LoadBalancer contains the current status of the load-balancer. -…, -:io.k8s.api.admissionregistration.v1beta1.ValidatingWebhookÆ+"`ValidatingWebhook describes an admission webhook and the resources and operations it applies to.šnameš clientConfig² -objectÊ¿* -œ -timeoutSeconds‰int32"óTimeoutSeconds specifies the timeout for this webhook. After the timeout passes, the webhook call will be ignored or the API call will fail based on the failure policy. The timeout value must be between 1 and 30 seconds. Default to 30 seconds.² -integer -ž - clientConfig -J#/definitions/io.k8s.api.admissionregistration.v1beta1.WebhookClientConfig"?ClientConfig defines how to communicate with the hook. Required -Ü -namespaceSelectorÆ -@#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector" NamespaceSelector decides whether to run the webhook on an object based on whether the namespace for that object matches the selector. If the object itself is a namespace, the matching is performed on object.metadata.labels. If the object is another cluster scoped resource, it never skips the webhook. - -For example, to run the webhook on any objects whose namespace is not associated with "runlevel" of "0" or "1"; you will set the selector as follows: "namespaceSelector": { - "matchExpressions": [ - { - "key": "runlevel", - "operator": "NotIn", - "values": [ - "0", - "1" - ] - } - ] -} - -If instead you want to only run the webhook on any objects whose namespace is associated with the "environment" of "prod" or "staging"; you will set the selector as follows: "namespaceSelector": { - "matchExpressions": [ - { - "key": "environment", - "operator": "In", - "values": [ - "prod", - "staging" - ] - } - ] -} - -See https://kubernetes.io/docs/concepts/overview/working-with-objects/labels for more examples of label selectors. - -Default to the empty LabelSelector, which matches everything. -î -rulesä"‡Rules describes what operations on what resources/subresources the webhook cares about. The webhook cares about an operation if it matches _any_ Rule. However, in order to prevent ValidatingAdmissionWebhooks and MutatingAdmissionWebhooks from putting the cluster in a state which cannot be recovered from without completely disabling the plugin, ValidatingAdmissionWebhooks and MutatingAdmissionWebhooks are never called on admission requests for ValidatingWebhookConfiguration and MutatingWebhookConfiguration objects.² -arrayºM -K -I#/definitions/io.k8s.api.admissionregistration.v1beta1.RuleWithOperations -ç -nameÞ"ÐThe name of the admission webhook. Name should be fully qualified, e.g., imagepolicy.kubernetes.io, where "imagepolicy" is the name of the webhook, and kubernetes.io is the name of the organization. Required.² -string -é -objectSelectorÖ -@#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector"‘ObjectSelector decides whether to run the webhook based on if the object has matching labels. objectSelector is evaluated against both the oldObject and newObject that would be sent to the webhook, and is considered to match if either object matches the selector. A null object (oldObject in the case of create, or newObject in the case of delete) or an object that cannot have labels (like a DeploymentRollback or a PodProxyOptions object) is not considered to match. Use the object selector only if the webhook is opt-in, because end users may skip the admission webhook by setting the labels. Default to the empty LabelSelector, which matches everything. -Ø - sideEffectsÈ"ºSideEffects states whether this webhook has side effects. Acceptable values are: Unknown, None, Some, NoneOnDryRun Webhooks with side effects MUST implement a reconciliation system, since a request may be rejected by a future step in the admission chain and the side effects therefore need to be undone. Requests with the dryRun attribute will be auto-rejected if they match a webhook with sideEffects == Unknown or Some. Defaults to Unknown.² -string -° -admissionReviewVersions”"÷AdmissionReviewVersions is an ordered list of preferred `AdmissionReview` versions the Webhook expects. API server will try to use first version in the list which it supports. If none of the versions specified in this list supported by API server, validation will fail for this object. If a persisted webhook configuration specifies allowed versions and does not include any versions known to the API Server, calls to the webhook will fail and be subject to the failure policy. Default to `['v1beta1']`.² -arrayº - ² -string -® - failurePolicyœ"ŽFailurePolicy defines how unrecognized errors from the admission endpoint are handled - allowed values are Ignore or Fail. Defaults to Ignore.² -string -— - matchPolicy‡"ùmatchPolicy defines how the "rules" list is used to match incoming requests. Allowed values are "Exact" or "Equivalent". - -- Exact: match a request only if it exactly matches a specified rule. For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1, but "rules" only included `apiGroups:["apps"], apiVersions:["v1"], resources: ["deployments"]`, a request to apps/v1beta1 or extensions/v1beta1 would not be sent to the webhook. - -- Equivalent: match a request if modifies a resource listed in rules, even via another API group or version. For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1, and "rules" only included `apiGroups:["apps"], apiVersions:["v1"], resources: ["deployments"]`, a request to apps/v1beta1 or extensions/v1beta1 would be converted to apps/v1 and sent to the webhook. - -Defaults to "Exact"² -string -˜ -$io.k8s.api.apps.v1.StatefulSetStatusï "@StatefulSetStatus represents the current state of a StatefulSet.šreplicas² -objectÊ“ -¤ -currentRevision"‚currentRevision, if not empty, indicates the version of the StatefulSet used to generate Pods in the sequence [0,currentReplicas).² -string -Ü -observedGenerationÅint64"¯observedGeneration is the most recent generation observed for this StatefulSet. It corresponds to the StatefulSet's generation, which is updated on mutation by the API Server.² -integer -Œ - readyReplicas{int32"freadyReplicas is the number of Pods created by the StatefulSet controller that have a Ready Condition.² -integer -± -updateRevisionž"updateRevision, if not empty, indicates the version of the StatefulSet used to generate Pods in the sequence [replicas-updatedReplicas,replicas)² -string -þ -collisionCountëint32"ÕcollisionCount is the count of hash collisions for the StatefulSet. The StatefulSet controller uses this field as a collision avoidance mechanism when it needs to create the name for the newest ControllerRevision.² -integer -ù - -conditionsê"NRepresents the latest available observations of a statefulset's current state.² -arrayº9 -7 -5#/definitions/io.k8s.api.apps.v1.StatefulSetConditionú' -x-kubernetes-patch-merge-keytype -ú' -x-kubernetes-patch-strategymerge - -° -currentReplicasœint32"†currentReplicas is the number of Pods created by the StatefulSet controller from the StatefulSet version indicated by currentRevision.² -integer -f -replicasZint32"Ereplicas is the number of Pods created by the StatefulSet controller.² -integer -¯ -updatedReplicas›int32"…updatedReplicas is the number of Pods created by the StatefulSet controller from the StatefulSet version indicated by updateRevision.² -integer -· -$io.k8s.api.coordination.v1.LeaseSpecŽ"(LeaseSpec is a specification of a Lease.² -objectÊÕ -‰ - acquireTimez -<#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.MicroTime":acquireTime is a time when the current lease was acquired. -e -holderIdentityS"FholderIdentity contains the identity of the holder of a current lease.² -string -È -leaseDurationSeconds¯int32"™leaseDurationSeconds is a duration that candidates for a lease need to wait to force acquire it. This is measure against time of last observed RenewTime.² -integer -r -leaseTransitions^int32"IleaseTransitions is the number of transitions of a lease between holders.² -integer -  - renewTime’ -<#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.MicroTime"RrenewTime is a time when the current holder of a lease has last updated the lease. -µ -2io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelectorþ"ËA label selector is a label query over a set of resources. The result of matchLabels and matchExpressions are ANDed. An empty label selector matches all objects. A null label selector matches no objects.² -objectÊü -É -matchExpressions´"VmatchExpressions is a list of label selector requirements. The requirements are ANDed.² -arrayºO -M -K#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelectorRequirement -­ - matchLabels"ÿmatchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.ª - ² -string² -objectú" -x-kubernetes-map-type atomic - -¹ -;io.k8s.api.policy.v1beta1.SupplementalGroupsStrategyOptionsù"dSupplementalGroupsStrategyOptions defines the strategy type and options used to create the strategy.² -objectÊ„ -t -rulel"_rule is the strategy that will dictate what supplemental groups is used in the SecurityContext.² -string -‹ -ranges€"½ranges are the allowed ranges of supplemental groups. If you would like to force a single supplemental group then supply a single range with the same start and end. Required for MustRunAs.² -arrayº3 -1 -/#/definitions/io.k8s.api.policy.v1beta1.IDRange -­ -/io.k8s.api.autoscaling.v2beta2.MetricIdentifierù"FMetricIdentifier defines the name and optionally selector for a metricšname² -objectÊ› -Ý -selectorÐ -@#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector"‹selector is the string-encoded form of a standard kubernetes label selector for the given metric When set, it is passed as an additional parameter to the metrics server for more specific metrics scoping. When unset, just the metricName will be used to gather metrics. -9 -name1"$name is the name of the given metric² -string -¢ -'io.k8s.api.core.v1.EmptyDirVolumeSourceö"uRepresents an empty directory for a pod. Empty directory volumes support ownership management and SELinux relabeling.² -objectÊð -… -mediumú"ìWhat type of storage medium should back this directory. The default is "" which means to use the node's default medium. Must be an empty string (default) or Memory. More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir² -string -å - sizeLimit× -;#/definitions/io.k8s.apimachinery.pkg.api.resource.Quantity"—Total amount of local storage required for this EmptyDir volume. The size limit is also applicable for memory medium. The maximum usage on memory medium EmptyDir would be the minimum value between the SizeLimit specified here and the sum of memory limits of all containers in a pod. The default is nil which means that the limit is undefined. More info: http://kubernetes.io/docs/user-guide/volumes#emptydir -— -io.k8s.api.core.v1.PodConditionó"DPodCondition contains details for the current condition of this pod.štypešstatus² -objectÊŽ -· -status¬"žStatus is the status of the condition. Can be True, False, Unknown. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-conditions² -string -“ -typeŠ"}Type is the type of the condition. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-conditions² -string -n - lastProbeTime] -7#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Time""Last time we probed the condition. -‘ -lastTransitionTime{ -7#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Time"@Last time the condition transitioned from one status to another. -X -messageM"@Human-readable message indicating details about last transition.² -string -^ -reasonT"GUnique, one-word, CamelCase reason for the condition's last transition.² -string -ƒ -io.k8s.api.core.v1.EndpointPortß"5EndpointPort is a tuple that describes a single port.šport² -objectÊ’ -½ - appProtocol­"ŸThe application protocol for this port. This field follows standard Kubernetes label syntax. Un-prefixed names are reserved for IANA standard service names (as per RFC-6335 and http://www.iana.org/assignments/service-names). Non-standard protocols should use prefixed names such as mycompany.com/my-custom-protocol. This is a beta field that is guarded by the ServiceAppProtocol feature gate and enabled by default.² -string -¬ -name£"•The name of this port. This must match the 'name' field in the corresponding ServicePort. Must be a DNS_LABEL. Optional only if one port is defined.² -string -= -port5int32" The port number of the endpoint.² -integer -b -protocolV"IThe IP protocol for this port. Must be UDP, TCP, or SCTP. Default is TCP.² -string -‘ -%io.k8s.api.core.v1.VolumeNodeAffinityç"^VolumeNodeAffinity defines constraints that limit what nodes this volume can be accessed from.² -objectÊy -w -requiredk --#/definitions/io.k8s.api.core.v1.NodeSelector":Required specifies hard node constraints that must be met. - --io.k8s.api.flowcontrol.v1beta1.FlowSchemaSpecë "GFlowSchemaSpec describes how the FlowSchema's specification looks like.špriorityLevelConfiguration² -objectÊö - -¦ -distinguisherMethodŽ -D#/definitions/io.k8s.api.flowcontrol.v1beta1.FlowDistinguisherMethod"Å`distinguisherMethod` defines how to compute the flow distinguisher for requests that match this schema. `nil` specifies that the distinguisher is disabled and thus will always be the empty string. - -matchingPrecedenceøint32"â`matchingPrecedence` is used to choose among the FlowSchemas that match a given request. The chosen FlowSchema is among those with the numerically lowest (which we take to be logically highest) MatchingPrecedence. Each MatchingPrecedence value must be ranged in [1,10000]. Note that if the precedence is not specified, it will be set to 1000 as default.² -integer -Á -priorityLevelConfiguration¢ -P#/definitions/io.k8s.api.flowcontrol.v1beta1.PriorityLevelConfigurationReference"Í`priorityLevelConfiguration` should reference a PriorityLevelConfiguration in the cluster. If the reference cannot be resolved, the FlowSchema will be ignored and marked as invalid in its status. Required. -ô -rulesê"ì`rules` describes which requests will match this flow schema. This FlowSchema matches a request if and only if at least one member of rules matches the request. if it is an empty slice, there will be no requests matching the FlowSchema.² -arrayºH -F -D#/definitions/io.k8s.api.flowcontrol.v1beta1.PolicyRulesWithSubjectsú# -x-kubernetes-list-type atomic - -“ -"io.k8s.api.core.v1.NamespaceStatusì"GNamespaceStatus is information about the current status of a Namespace.² -objectÊ” -õ - -conditionsæ"LRepresents the latest available observations of a namespace's current state.² -arrayº7 -5 -3#/definitions/io.k8s.api.core.v1.NamespaceConditionú' -x-kubernetes-patch-merge-keytype -ú' -x-kubernetes-patch-strategymerge - -™ -phase"Phase is the current lifecycle phase of the namespace. More info: https://kubernetes.io/docs/tasks/administer-cluster/namespaces/² -string -Ö - io.k8s.api.core.v1.ResourceQuota± "FResourceQuota sets aggregate quota restrictions enforced per namespace² -objectÊ‚ - -¾ - -apiVersion¯"¡APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources² -string -¹ -kind°"¢Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds² -string -Ò -metadataÅ -=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta"ƒStandard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata -à -specº -2#/definitions/io.k8s.api.core.v1.ResourceQuotaSpec"ƒSpec defines the desired quota. https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status -ç -statusÜ -4#/definitions/io.k8s.api.core.v1.ResourceQuotaStatus"£Status defines the actual enforced quota and its current usage. https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-statusúU -x-kubernetes-group-version-kind20- group: "" - kind: ResourceQuota - version: v1 - -¨ - -1io.k8s.api.storage.v1beta1.CSIStorageCapacityListò "ECSIStorageCapacityList is a collection of CSIStorageCapacity objects.šitems² -objectÊ¢ -¾ - -apiVersion¯"¡APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources² -string -Õ -itemsË"0Items is the list of CSIStorageCapacity objects.² -arrayº? -= -;#/definitions/io.k8s.api.storage.v1beta1.CSIStorageCapacityú' -x-kubernetes-list-map-keys - name -ú -x-kubernetes-list-typemap - -¹ -kind°"¢Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds² -string -Ê -metadata½ -;#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta"~Standard list metadata More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadataúo -x-kubernetes-group-version-kindLJ- group: storage.k8s.io - kind: CSIStorageCapacityList - version: v1beta1 - -Ó -7io.k8s.apimachinery.pkg.apis.meta.v1.ManagedFieldsEntry— "sManagedFieldsEntry is a workflow-id, a FieldSet and the group version of the resource that the fieldset applies to.² -objectÊ“ -¦ -time -7#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Time"bTime is timestamp of when these fields were set. It should always be empty if Operation is 'Apply' -– - -apiVersion‡"ùAPIVersion defines the version of this resource that this field set applies to. The format is "group/version" just like the top-level APIVersion field. It is necessary to track the version of a field set because it cannot be automatically converted.² -string -  - -fieldsType‘"ƒFieldsType is the discriminator for the different fields format and version. There is currently only one possible value: "FieldsV1"² -string - -fieldsV1 -;#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.FieldsV1"QFieldsV1 holds the first JSON version format as described in the "FieldsV1" type. -W -managerL"?Manager is an identifier of the workflow managing these fields.² -string -² - operation¤"–Operation is the type of operation which lead to this ManagedFieldsEntry being created. The only valid values for this field are 'Apply' and 'Update'.² -string -¯ -*io.k8s.api.apps.v1.RollingUpdateDeployment€ "7Spec to control the desired behavior of rolling update.² -objectʸ -å -maxSurgeØ -=#/definitions/io.k8s.apimachinery.pkg.util.intstr.IntOrString"–The maximum number of pods that can be scheduled above the desired number of pods. Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). This can not be 0 if MaxUnavailable is 0. Absolute number is calculated from percentage by rounding up. Defaults to 25%. Example: when this is set to 30%, the new ReplicaSet can be scaled up immediately when the rolling update starts, such that the total number of old and new pods do not exceed 130% of desired pods. Once old pods have been killed, new ReplicaSet can be scaled up further, ensuring that total number of pods running at any time during the update is at most 130% of desired pods. -Í -maxUnavailableº -=#/definitions/io.k8s.apimachinery.pkg.util.intstr.IntOrString"øThe maximum number of pods that can be unavailable during the update. Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). Absolute number is calculated from percentage by rounding down. This can not be 0 if MaxSurge is 0. Defaults to 25%. Example: when this is set to 30%, the old ReplicaSet can be scaled down to 70% of desired pods immediately when the rolling update starts. Once new pods are ready, old ReplicaSet can be scaled down further, followed by scaling up the new ReplicaSet, ensuring that the total number of pods available at all times during the update is at least 70% of desired pods. -Ï -$io.k8s.api.batch.v1beta1.CronJobSpec¦ "YCronJobSpec describes how the job execution will look like and when it will actually run.šscheduleš jobTemplate² -objectÊ£ - -· -failedJobsHistoryLimitœint32"†The number of failed finished jobs to retain. This is a pointer to distinguish between explicit zero and not specified. Defaults to 1.² -integer -‰ - jobTemplatez -6#/definitions/io.k8s.api.batch.v1beta1.JobTemplateSpec"@Specifies the job that will be created when executing a CronJob. -] -scheduleQ"DThe schedule in Cron format, see https://en.wikipedia.org/wiki/Cron.² -string -Ç -startingDeadlineSeconds«int64"•Optional deadline in seconds for starting the job if it misses scheduled time for any reason. Missed jobs executions will be counted as failed ones.² -integer -¿ -successfulJobsHistoryLimit int32"ŠThe number of successful finished jobs to retain. This is a pointer to distinguish between explicit zero and not specified. Defaults to 3.² -integer -  -suspend”"…This flag tells the controller to suspend subsequent executions, it does not apply to already started executions. Defaults to false.² -boolean -Ì -concurrencyPolicy¶"¨Specifies how to treat concurrent executions of a Job. Valid values are: - "Allow" (default): allows CronJobs to run concurrently; - "Forbid": forbids concurrent runs, skipping next run if previous run hasn't finished yet; - "Replace": cancels currently running job and replaces it with a new one² -string -› -io.k8s.api.core.v1.ExecActionù"1ExecAction describes a "run in container" action.² -objectÊ· -´ -command¨"‹Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy.² -arrayº - ² -string -º -Bio.k8s.api.admissionregistration.v1.ValidatingWebhookConfigurationó -"‰ValidatingWebhookConfiguration describes the configuration of and admission webhook that accept or reject and object without changing it.² -objectÊÔ -Ñ -metadataÄ -=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta"‚Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata. -€ -webhooksó"IWebhooks is a list of webhooks and the affected resources and operations.² -arrayºG -E -C#/definitions/io.k8s.api.admissionregistration.v1.ValidatingWebhookú' -x-kubernetes-patch-merge-keyname -ú' -x-kubernetes-patch-strategymerge - -¾ - -apiVersion¯"¡APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources² -string -¹ -kind°"¢Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds² -stringú€ -x-kubernetes-group-version-kind][- group: admissionregistration.k8s.io - kind: ValidatingWebhookConfiguration - version: v1 - -þ -io.k8s.api.core.v1.Bindingß "¡Binding ties one object to another; for example, a pod is bound to a node by a scheduler. Deprecated in 1.7, please use the bindings subresource of pods instead.štarget² -objectÊÑ -} -targets -0#/definitions/io.k8s.api.core.v1.ObjectReference"?The target object that you want to bind to the standard object. -¾ - -apiVersion¯"¡APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources² -string -¹ -kind°"¢Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds² -string -Ò -metadataÅ -=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta"ƒStandard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadataúO -x-kubernetes-group-version-kind,*- group: "" - kind: Binding - version: v1 - -¢ -Kio.k8s.kube-aggregator.pkg.apis.apiregistration.v1beta1.APIServiceConditionÒ"NAPIServiceCondition describes the state of an APIService at a particular pointštypešstatus² -objectÊã -7 -type/""Type is the type of the condition.² -string -‘ -lastTransitionTime{ -7#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Time"@Last time the condition transitioned from one status to another. -X -messageM"@Human-readable message indicating details about last transition.² -string -^ -reasonT"GUnique, one-word, CamelCase reason for the condition's last transition.² -string -Z -statusP"CStatus is the status of the condition. Can be True, False, Unknown.² -string -µ - io.k8s.api.node.v1beta1.Overhead"ROverhead structure represents the resource overhead associated with running a pod.² -objectÊ­ -ª -podFixed"NPodFixed represents the fixed resource overhead associated with running a pod.ª? -= -;#/definitions/io.k8s.apimachinery.pkg.api.resource.Quantity² -object -„ -io.k8s.api.rbac.v1.RoleListä"!RoleList is a collection of Rolesšitems² -objectÊÀ -¾ - -apiVersion¯"¡APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources² -string -Y -itemsP"Items is a list of Roles² -arrayº) -' -%#/definitions/io.k8s.api.rbac.v1.Role -¹ -kind°"¢Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds² -string -f -metadataZ -;#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta"Standard object's metadata.úg -x-kubernetes-group-version-kindDB- group: rbac.authorization.k8s.io - kind: RoleList - version: v1 - -± -/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEventý"6Event represents a single event to a watched resource.štypešobject² -objectÊ× -¿ -object´ -:#/definitions/io.k8s.apimachinery.pkg.runtime.RawExtension"õObject is: - * If Type is Added or Modified: the new state of the object. - * If Type is Deleted: the state of the object immediately before deletion. - * If Type is Error: *Status is recommended; other types may make sense - depending on context. - -type ² -stringúË -x-kubernetes-group-version-kind§¤- group: "" - kind: WatchEvent - version: v1 -- kind: WatchEvent - version: v1 - group: admission.k8s.io -- group: admission.k8s.io - kind: WatchEvent - version: v1beta1 -- group: admissionregistration.k8s.io - kind: WatchEvent - version: v1 -- kind: WatchEvent - version: v1beta1 - group: admissionregistration.k8s.io -- group: apiextensions.k8s.io - kind: WatchEvent - version: v1 -- group: apiextensions.k8s.io - kind: WatchEvent - version: v1beta1 -- group: apiregistration.k8s.io - kind: WatchEvent - version: v1 -- kind: WatchEvent - version: v1beta1 - group: apiregistration.k8s.io -- version: v1 - group: apps - kind: WatchEvent -- group: apps - kind: WatchEvent - version: v1beta1 -- kind: WatchEvent - version: v1beta2 - group: apps -- group: authentication.k8s.io - kind: WatchEvent - version: v1 -- kind: WatchEvent - version: v1beta1 - group: authentication.k8s.io -- group: authorization.k8s.io - kind: WatchEvent - version: v1 -- version: v1beta1 - group: authorization.k8s.io - kind: WatchEvent -- group: autoscaling - kind: WatchEvent - version: v1 -- group: autoscaling - kind: WatchEvent - version: v2beta1 -- kind: WatchEvent - version: v2beta2 - group: autoscaling -- group: batch - kind: WatchEvent - version: v1 -- group: batch - kind: WatchEvent - version: v1beta1 -- group: certificates.k8s.io - kind: WatchEvent - version: v1 -- group: certificates.k8s.io - kind: WatchEvent - version: v1beta1 -- group: coordination.k8s.io - kind: WatchEvent - version: v1 -- group: coordination.k8s.io - kind: WatchEvent - version: v1beta1 -- group: discovery.k8s.io - kind: WatchEvent - version: v1 -- group: discovery.k8s.io - kind: WatchEvent - version: v1beta1 -- group: events.k8s.io - kind: WatchEvent - version: v1 -- group: events.k8s.io - kind: WatchEvent - version: v1beta1 -- group: extensions - kind: WatchEvent - version: v1beta1 -- group: flowcontrol.apiserver.k8s.io - kind: WatchEvent - version: v1alpha1 -- kind: WatchEvent - version: v1beta1 - group: flowcontrol.apiserver.k8s.io -- group: imagepolicy.k8s.io - kind: WatchEvent - version: v1alpha1 -- group: internal.apiserver.k8s.io - kind: WatchEvent - version: v1alpha1 -- group: networking.k8s.io - kind: WatchEvent - version: v1 -- group: networking.k8s.io - kind: WatchEvent - version: v1beta1 -- group: node.k8s.io - kind: WatchEvent - version: v1 -- group: node.k8s.io - kind: WatchEvent - version: v1alpha1 -- version: v1beta1 - group: node.k8s.io - kind: WatchEvent -- group: policy - kind: WatchEvent - version: v1 -- group: policy - kind: WatchEvent - version: v1beta1 -- group: rbac.authorization.k8s.io - kind: WatchEvent - version: v1 -- group: rbac.authorization.k8s.io - kind: WatchEvent - version: v1alpha1 -- group: rbac.authorization.k8s.io - kind: WatchEvent - version: v1beta1 -- group: scheduling.k8s.io - kind: WatchEvent - version: v1 -- version: v1alpha1 - group: scheduling.k8s.io - kind: WatchEvent -- group: scheduling.k8s.io - kind: WatchEvent - version: v1beta1 -- group: storage.k8s.io - kind: WatchEvent - version: v1 -- group: storage.k8s.io - kind: WatchEvent - version: v1alpha1 -- group: storage.k8s.io - kind: WatchEvent - version: v1beta1 - -à -#io.k8s.api.batch.v1.JobTemplateSpec¸"QJobTemplateSpec describes the data a Job should have when created from a template² -objectÊÖ -ù -metadataì -=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta"ªStandard object's metadata of the jobs created from this template. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata -× -specÎ -)#/definitions/io.k8s.api.batch.v1.JobSpec" Specification of the desired behavior of the job. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status -Î -%io.k8s.api.core.v1.ConfigMapEnvSource¤"ÂConfigMapEnvSource selects a ConfigMap to populate the environment variables with. - -The contents of the target ConfigMap's Data field will represent the key-value pairs as environment variables.² -objectÊÐ -G -optional;"-Specify whether the ConfigMap must be defined² -boolean -„ -name|"oName of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names² -string -ª -io.k8s.api.core.v1.SecretŒ"‚Secret holds secret data of a certain type. The total bytes of the values in the Data field must be less than MaxSecretSize bytes.² -objectʧ -M -typeE"8Used to facilitate programmatic handling of secret data.² -string -¾ - -apiVersion¯"¡APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources² -string -Î -dataÅ"¡Data contains the secret data. Each key must consist of alphanumeric characters, '-', '_' or '.'. The serialized form of the secret data is a base64 encoded string, representing the arbitrary (possibly non-string) data value here. Described in https://tools.ietf.org/html/rfc4648#section-4ª -byte² -string² -object -å - immutable×"ÈImmutable, if set to true, ensures that data stored in the Secret cannot be updated (only object metadata can be modified). If not set to true, the field can be modified at any time. Defaulted to nil.² -boolean -¹ -kind°"¢Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds² -string -Ò -metadataÅ -=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta"ƒStandard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata -Ê - -stringData»"stringData allows specifying non-binary secret data in string form. It is provided as a write-only input field for convenience. All keys and values are merged into the data field on write, overwriting any existing values. The stringData field is never output when reading from the API.ª - ² -string² -objectúN -x-kubernetes-group-version-kind+)- group: "" - kind: Secret - version: v1 - -« -/io.k8s.api.core.v1.CinderPersistentVolumeSource÷"ëRepresents a cinder volume resource in Openstack. A Cinder volume must exist before mounting to a container. The volume must also be in the same region as the kubelet. Cinder volumes support ownership management and SELinux relabeling.švolumeID² -objectÊï -ƒ -fsTypeø"êFilesystem type to mount. Must be a filesystem type supported by the host operating system. Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. More info: https://examples.k8s.io/mysql-cinder-pd/README.md² -string -Á -readOnly´"¥Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. More info: https://examples.k8s.io/mysql-cinder-pd/README.md² -boolean -™ - secretRef‹ -0#/definitions/io.k8s.api.core.v1.SecretReference"WOptional: points to a secret object containing parameters used to connect to OpenStack. -† -volumeIDz"mvolume id used to identify the volume in cinder. More info: https://examples.k8s.io/mysql-cinder-pd/README.md² -string -¥ -?io.k8s.api.flowcontrol.v1beta1.PriorityLevelConfigurationStatusá"VPriorityLevelConfigurationStatus represents the current state of a "request-priority".² -objectÊú -÷ - -conditionsè"8`conditions` is the current state of "request-priority".² -arrayºT -R -P#/definitions/io.k8s.api.flowcontrol.v1beta1.PriorityLevelConfigurationConditionú' -x-kubernetes-list-map-keys - type -ú -x-kubernetes-list-typemap - -“ -*io.k8s.apimachinery.pkg.apis.meta.v1.Patche"XPatch is provided to give a concrete name and type to the Kubernetes PATCH request body.² -object -¦ -+io.k8s.api.core.v1.ContainerStateTerminatedö">ContainerStateTerminated is a terminated state of a container.šexitCode² -objectÊœ -R - containerIDC"6Container's ID in the format 'docker://'² -string -W -exitCodeKint32"6Exit status from the last termination of the container² -integer -t - -finishedAtf -7#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Time"+Time at which the container last terminated -O -messageD"7Message regarding the last termination of the container² -string -P -reasonF"9(brief) reason from the last termination of the container² -string -P -signalFint32"1Signal from the last termination of the container² -integer - - startedAtt -7#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Time"9Time at which previous execution of the container started -Ñ - io.k8s.api.core.v1.ScopeSelector¬"nA scope selector represents the AND of the selectors represented by the scoped-resource selector requirements.² -objectÊ­ -ª -matchExpressions•"@A list of scope selector requirements by scope of the resources.² -arrayºF -D -B#/definitions/io.k8s.api.core.v1.ScopedResourceSelectorRequirement -‘ -(io.k8s.api.core.v1.SessionAffinityConfigä"HSessionAffinityConfig represents the configurations of session affinity.² -objectÊ‹ -ˆ -clientIP| -/#/definitions/io.k8s.api.core.v1.ClientIPConfig"IclientIP contains the configurations of Client IP based session affinity. -ë -/io.k8s.api.discovery.v1beta1.EndpointConditions·"CEndpointConditions represents the current condition of an endpoint.² -objectÊã -Ó -readyÉ"ºready indicates that this endpoint is prepared to receive traffic, according to whatever system is managing the endpoint. A nil value indicates an unknown state. In most cases consumers should interpret this unknown state as ready. For compatibility reasons, ready should never be "true" for terminating endpoints.² -boolean -Ù -servingÍ"¾serving is identical to ready except that it is set regardless of the terminating state of endpoints. This condition should be set to true for a ready endpoint that is terminating. If nil, consumers should defer to the ready condition. This field can be enabled with the EndpointSliceTerminatingCondition feature gate.² -boolean -® - terminatingž"terminating indicates that this endpoint is terminating. A nil value indicates an unknown state. Consumers should interpret this unknown state to mean that the endpoint is not terminating. This field can be enabled with the EndpointSliceTerminatingCondition feature gate.² -boolean -è -=io.k8s.apimachinery.pkg.apis.meta.v1.GroupVersionForDiscovery¦"wGroupVersion contains the "group/version" and "version" string of a version. It is made a struct to keep extensibility.š groupVersionšversion² -objectÊ… -i - groupVersionY"LgroupVersion specifies the API group and version in the form "group/version"² -string -— -version‹"~version specifies the version in the form of "version". This is to save the clients the trouble of splitting the GroupVersion.² -string -ó -=io.k8s.api.certificates.v1.CertificateSigningRequestCondition± "^CertificateSigningRequestCondition describes a condition of a CertificateSigningRequest objectštypešstatus² -objectʲ -L -reasonB"5reason indicates a brief reason for the request state² -string -– -status‹"~status of the condition, one of True, False, Unknown. Approved, Denied, and Failed conditions may not be "False" or "Unknown".² -string -˜ -type"type of the condition. Known conditions are "Approved", "Denied", and "Failed". - -An "Approved" condition is added via the /approval subresource, indicating the request was approved and should be issued by the signer. - -A "Denied" condition is added via the /approval subresource, indicating the request was denied and should not be issued by the signer. - -A "Failed" condition is added via the /status subresource, indicating the signer failed to issue the certificate. - -Approved and Denied conditions are mutually exclusive. Approved, Denied, and Failed conditions cannot be removed once added. - -Only one condition of a given type is allowed.² -string -¶ -lastTransitionTimeŸ -7#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Time"ãlastTransitionTime is the time the condition last transitioned from one status to another. If unset, when a new condition type is added or an existing condition's status is changed, the server defaults this to the current time. -Œ -lastUpdateTimez -7#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Time"?lastUpdateTime is the time of the last update to this condition -f -message["Nmessage contains a human readable message with details about the request state² -string -… -)io.k8s.api.policy.v1beta1.AllowedHostPath×"AllowedHostPath defines the host volume conditions that will be enabled by a policy for pods to use. It requires the path prefix to be defined.² -objectʶ -­ - -pathPrefixž"pathPrefix is the path prefix that the host volume must match. It does not support `*`. Trailing slashes are trimmed when validating the path prefix with a host path. - -Examples: `/foo` would allow `/foo`, `/foo/` and `/foo/bar` `/foo` would not allow `/food` or `/etc/foo`² -string -ƒ -readOnlyw"iwhen set to true, will allow host volumes matching the pathPrefix only if all volume mounts are readOnly.² -boolean -¬ -+io.k8s.apimachinery.pkg.apis.meta.v1.Statusü"CStatus is a return value for calls that don't return other objects.² -objectÊ× -Ï -metadata -;#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta"‚Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds -ä -reasonÙ"ËA machine-readable description of why this operation is in the "Failure" status. If this value is empty there is no information available. A Reason clarifies an HTTP status code but does not override it.² -string -À -statusµ"§Status of the operation. One of: "Success" or "Failure". More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status² -string -¾ - -apiVersion¯"¡APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources² -string -V -codeNint32"9Suggested HTTP return code for this status, 0 if not set.² -integer -¬ -details  -@#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.StatusDetails"ÛExtended data associated with the reason. Each reason may define its own extended details. This field is optional and the data returned is not guaranteed to conform to any schema except that defined by the reason type. -¹ -kind°"¢Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds² -string -U -messageJ"=A human-readable description of the status of this operation.² -stringúN -x-kubernetes-group-version-kind+)- group: "" - kind: Status - version: v1 - -ž -*io.k8s.api.apps.v1.DaemonSetUpdateStrategyï"XDaemonSetUpdateStrategy is a struct used to control the update strategy for a DaemonSet.² -objectʆ -’ - rollingUpdate€ -7#/definitions/io.k8s.api.apps.v1.RollingUpdateDaemonSet"ERolling update config params. Present only if type = "RollingUpdate". -o -typeg"ZType of daemon set update. Can be "RollingUpdate" or "OnDelete". Default is RollingUpdate.² -string -Ö -&io.k8s.api.apps.v1.ReplicaSetCondition«"LReplicaSetCondition describes the state of a replica set at a certain point.štypešstatus² -objectʾ -Y -messageN"AA human readable message indicating details about the transition.² -string -F -reason<"/The reason for the condition's last transition.² -string -L -statusB"5Status of the condition, one of True, False, Unknown.² -string -3 -type+"Type of replica set condition.² -string -• -lastTransitionTime -7#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Time"DThe last time the condition transitioned from one status to another. -¾ -/io.k8s.api.autoscaling.v2beta1.PodsMetricSourceŠ"åPodsMetricSource indicates how to scale on a metric describing each pod in the current scale target (for example, transactions-processed-per-second). The values will be averaged together before being compared to the target value.š -metricNameštargetAverageValue² -objectÊñ -K - -metricName="0metricName is the name of the metric in question² -string -Ü -selectorÏ -@#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector"Šselector is the string-encoded form of a standard kubernetes label selector for the given metric When set, it is passed as an additional parameter to the metrics server for more specific metrics scoping When unset, just the metricName will be used to gather metrics. - -targetAverageValue« -;#/definitions/io.k8s.apimachinery.pkg.api.resource.Quantity"ltargetAverageValue is the target value of the average of the metric across all relevant pods (as a quantity) -Õ -!io.k8s.api.core.v1.LimitRangeItem¯ "SLimitRangeItem defines a min/max usage limit for any resource that matches on kind.štype² -objectÊÄ -A -type9",Type of resource that this limit applies to.² -string -² -default¦"WDefault resource requirement limit value by resource name if resource limit is omitted.ª? -= -;#/definitions/io.k8s.apimachinery.pkg.api.resource.Quantity² -object -Ó -defaultRequestÀ"qDefaultRequest is the default resource requirement request value by resource name if resource request is omitted.ª? -= -;#/definitions/io.k8s.apimachinery.pkg.api.resource.Quantity² -object -‹ -maxƒ"4Max usage constraints on this kind by resource name.ª? -= -;#/definitions/io.k8s.apimachinery.pkg.api.resource.Quantity² -object -× -maxLimitRequestRatio¾"îMaxLimitRequestRatio if specified, the named resource must have a request and limit that are both non-zero where limit divided by request is less than or equal to the enumerated value; this represents the max burst for the named resource.ª? -= -;#/definitions/io.k8s.apimachinery.pkg.api.resource.Quantity² -object -‹ -minƒ"4Min usage constraints on this kind by resource name.ª? -= -;#/definitions/io.k8s.apimachinery.pkg.api.resource.Quantity² -object -å -'io.k8s.api.storage.v1beta1.TokenRequest¹"#/definitions/io.k8s.api.policy.v1beta1.SELinuxStrategyOptions"OseLinux is the strategy that will dictate the allowable labels that may be set. -” -allowedCSIDriversþ"²AllowedCSIDrivers is an allowlist of inline CSI drivers that must be explicitly set to be embedded within a pod spec. An empty value indicates that any CSI driver can be used for inline ephemeral volumes. This is a beta field, and is only honored if the API server enables the CSIInlineVolume feature gate.² -arrayº< -: -8#/definitions/io.k8s.api.policy.v1beta1.AllowedCSIDriver -¿ -allowedHostPathsª"`allowedHostPaths is an allowlist of host paths. Empty indicates that all host paths may be used.² -arrayº; -9 -7#/definitions/io.k8s.api.policy.v1beta1.AllowedHostPath -· -defaultAllowPrivilegeEscalation“"„defaultAllowPrivilegeEscalation controls the default setting for whether a process can gain more privileges than its parent process.² -boolean -œ - hostPortsŽ"FhostPorts determines which host port ranges are allowed to be exposed.² -arrayº9 -7 -5#/definitions/io.k8s.api.policy.v1beta1.HostPortRange -p - hostNetworka"ShostNetwork determines if the policy allows the use of HostNetwork in the pod spec.² -boolean -Í - runtimeClass¼ -C#/definitions/io.k8s.api.policy.v1beta1.RuntimeClassStrategyOptions"ôruntimeClass is the strategy that will dictate the allowable RuntimeClasses for a pod. If this field is omitted, the pod's runtimeClassName field is unrestricted. Enforcement of this field depends on the RuntimeClass feature gate being enabled. -d -hostPIDY"KhostPID determines if the policy allows the use of HostPID in the pod spec.² -boolean -­ - runAsUserŸ -@#/definitions/io.k8s.api.policy.v1beta1.RunAsUserStrategyOptions"[runAsUser is the strategy that will dictate the allowable RunAsUser values that may be set. -¥ -volumes™"}volumes is an allowlist of volume plugins. Empty indicates that no volumes may be used. To allow all volumes you may use '*'.² -arrayº - ² -string -² -allowedCapabilitiesš"ýallowedCapabilities is a list of capabilities that can be requested to add to the container. Capabilities in this field may be added at the pod author's discretion. You must not list a capability in both allowedCapabilities and requiredDropCapabilities.² -arrayº - ² -string -· -allowedFlexVolumes "ÓallowedFlexVolumes is an allowlist of Flexvolumes. Empty or nil indicates that all Flexvolumes may be used. This parameter is effective only when the usage of the Flexvolumes is allowed in the "volumes" field.² -arrayº= -; -9#/definitions/io.k8s.api.policy.v1beta1.AllowedFlexVolume -’ -defaultAddCapabilities÷"ÚdefaultAddCapabilities is the default set of capabilities that will be added to the container unless the pod spec specifically drops the capability. You may not list a capability in both defaultAddCapabilities and requiredDropCapabilities. Capabilities added here are implicitly allowed, and need not be included in the allowedCapabilities list.² -arrayº - ² -string - -forbiddenSysctlsø"ÛforbiddenSysctls is a list of explicitly forbidden sysctls, defaults to none. Each entry is either a plain sysctl name or ends in "*" in which case it is considered as a prefix of forbidden sysctls. Single * means all sysctls are forbidden. - -Examples: e.g. "foo/*" forbids "foo/bar", "foo/baz", etc. e.g. "foo.*" forbids "foo.bar", "foo.baz", etc.² -arrayº - ² -string -d -hostIPCY"KhostIPC determines if the policy allows the use of HostIPC in the pod spec.² -boolean -ö -readOnlyRootFilesystemÛ"ÌreadOnlyRootFilesystem when set to true will force containers to run with a read only root file system. If the container specifically requests to run with a non-read only root file system the PSP should deny the pod. If set to false the container may run with a read only root file system if it wishes but it will not be forced to.² -boolean -Ç -requiredDropCapabilitiesª"requiredDropCapabilities are the capabilities that will be dropped from the container. These are required to be dropped and cannot be added.² -arrayº - ² -string -´ - -runAsGroup¥ -A#/definitions/io.k8s.api.policy.v1beta1.RunAsGroupStrategyOptions"ßRunAsGroup is the strategy that will dictate the allowable RunAsGroup values that may be set. If this field is omitted, the pod's RunAsGroup can take any value. This field requires the RunAsGroup feature gate to be enabled. -¤ -allowPrivilegeEscalation‡"yallowPrivilegeEscalation determines if a pod can request to allow privilege escalation. If unspecified, defaults to true.² -boolean -ú -allowedProcMountTypesà"ÃAllowedProcMountTypes is an allowlist of allowed ProcMountTypes. Empty or nil indicates that only the DefaultProcMountType may be used. This requires the ProcMountType feature flag to be enabled.² -arrayº - ² -string -î -allowedUnsafeSysctlsÕ"¸allowedUnsafeSysctls is a list of explicitly allowed unsafe sysctls, defaults to none. Each entry is either a plain sysctl name or ends in "*" in which case it is considered as a prefix of allowed sysctls. Single * means all unsafe sysctls are allowed. Kubelet has to allowlist all allowed unsafe sysctls explicitly to avoid rejection. - -Examples: e.g. "foo/*" allows "foo/bar", "foo/baz", etc. e.g. "foo.*" allows "foo.bar", "foo.baz", etc.² -arrayº - ² -string -¥ -fsGroup™ ->#/definitions/io.k8s.api.policy.v1beta1.FSGroupStrategyOptions"WfsGroup is the strategy that will dictate what fs group is used by the SecurityContext. -Ò -supplementalGroups» -I#/definitions/io.k8s.api.policy.v1beta1.SupplementalGroupsStrategyOptions"nsupplementalGroups is the strategy that will dictate what supplemental groups are used by the SecurityContext. -Õ -Qio.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1.CustomResourceConversionÿ"MCustomResourceConversion describes how to convert different versions of a CR.šstrategy² -objectÊ– -Å -strategy¸"ªstrategy specifies how custom resources are converted between versions. Allowed values are: - `None`: The converter only change the apiVersion and would not touch any other field in the custom resource. - `Webhook`: API Server will call to an external webhook to do the conversion. Additional information - is needed for this option. This requires spec.preserveUnknownFields to be false, and spec.conversion.webhook to be set.² -string -Ë -webhook¿ -X#/definitions/io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1.WebhookConversion"cwebhook describes how to call the conversion webhook. Required when `strategy` is set to `Webhook`. -í@ -Mio.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1beta1.JSONSchemaProps›@"[JSONSchemaProps is a JSON-Schema following Specification Draft 4 (http://json-schema.org/).² -objectʯ? -Ô -defaultÈ -P#/definitions/io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1beta1.JSON"ódefault is a default value for undefined object fields. Defaulting is a beta feature under the CustomResourceDefaulting feature gate. CustomResourceDefinitions with defaults must be created using the v1 (or newer) CustomResourceDefinition API. -| - definitionsmª_ -] -[#/definitions/io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1beta1.JSONSchemaProps² -object -s - externalDocsc -a#/definitions/io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1beta1.ExternalDocumentation -¶ -format«"format is an OpenAPI v3 format string. Unknown formats are ignored. The following formats are validated: - -- bsonobjectid: a bson object ID, i.e. a 24 characters hex string - uri: an URI as parsed by Golang net/url.ParseRequestURI - email: an email address as parsed by Golang net/mail.ParseAddress - hostname: a valid representation for an Internet host name, as defined by RFC 1034, section 3.1 [RFC1034]. - ipv4: an IPv4 IP as parsed by Golang net.ParseIP - ipv6: an IPv6 IP as parsed by Golang net.ParseIP - cidr: a CIDR as parsed by Golang net.ParseCIDR - mac: a MAC address as parsed by Golang net.ParseMAC - uuid: an UUID that allows uppercase defined by the regex (?i)^[0-9a-f]{8}-?[0-9a-f]{4}-?[0-9a-f]{4}-?[0-9a-f]{4}-?[0-9a-f]{12}$ - uuid3: an UUID3 that allows uppercase defined by the regex (?i)^[0-9a-f]{8}-?[0-9a-f]{4}-?3[0-9a-f]{3}-?[0-9a-f]{4}-?[0-9a-f]{12}$ - uuid4: an UUID4 that allows uppercase defined by the regex (?i)^[0-9a-f]{8}-?[0-9a-f]{4}-?4[0-9a-f]{3}-?[89ab][0-9a-f]{3}-?[0-9a-f]{12}$ - uuid5: an UUID5 that allows uppercase defined by the regex (?i)^[0-9a-f]{8}-?[0-9a-f]{4}-?5[0-9a-f]{3}-?[89ab][0-9a-f]{3}-?[0-9a-f]{12}$ - isbn: an ISBN10 or ISBN13 number string like "0321751043" or "978-0321751041" - isbn10: an ISBN10 number string like "0321751043" - isbn13: an ISBN13 number string like "978-0321751041" - creditcard: a credit card number defined by the regex ^(?:4[0-9]{12}(?:[0-9]{3})?|5[1-5][0-9]{14}|6(?:011|5[0-9][0-9])[0-9]{12}|3[47][0-9]{13}|3(?:0[0-5]|[68][0-9])[0-9]{11}|(?:2131|1800|35\d{3})\d{11})$ with any non digit characters mixed in - ssn: a U.S. social security number following the regex ^\d{3}[- ]?\d{2}[- ]?\d{4}$ - hexcolor: an hexadecimal color code like "#FFFFFF: following the regex ^#?([0-9a-fA-F]{3}|[0-9a-fA-F]{6})$ - rgbcolor: an RGB color code like rgb like "rgb(255,255,2559" - byte: base64 encoded binary data - password: any kind of string - date: a date string like "2006-01-02" as defined by full-date in RFC3339 - duration: a duration string like "22 ns" as parsed by Golang time.ParseDuration or compatible with Scala duration format - datetime: a date time string like "2014-12-15T19:30:20.000Z" as defined by date-time in RFC3339.² -string - -id ² -string -u -oneOfl² -arrayº_ -] -[#/definitions/io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1beta1.JSONSchemaProps - -title ² -string -u -anyOfl² -arrayº_ -] -[#/definitions/io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1beta1.JSONSchemaProps - - minLengthint64² -integer - -pattern ² -string - -type ² -string - - uniqueItems ² -boolean -Ó -x-kubernetes-embedded-resource°"¡x-kubernetes-embedded-resource defines that the value is an embedded Kubernetes runtime.Object, with TypeMeta and ObjectMeta. The type must be object. It is allowed to further restrict the embedded object. kind, apiVersion and metadata are validated automatically. x-kubernetes-preserve-unknown-fields is allowed to be true, but does not have to be if the object is fully specified (up to kind, apiVersion, metadata).² -boolean -˜ -$x-kubernetes-preserve-unknown-fieldsï"àx-kubernetes-preserve-unknown-fields stops the API server decoding step from pruning fields which are not specified in the validation schema. This affects fields recursively, but switches back to normal pruning behaviour if nested properties or additionalProperties are specified in the schema. This can either be true or undefined. False is forbidden.² -boolean -Š - dependencieszªl -j -h#/definitions/io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1beta1.JSONSchemaPropsOrStringArray² -object -i -enuma² -arrayºT -R -P#/definitions/io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1beta1.JSON -{ - -propertiesmª_ -] -[#/definitions/io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1beta1.JSONSchemaProps² -object -{ -additionalPropertiesc -a#/definitions/io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1beta1.JSONSchemaPropsOrBool - - description ² -string - -exclusiveMaximum ² -boolean - -exclusiveMinimum ² -boolean - -maximumdouble² -number -  -x-kubernetes-map-type†"øx-kubernetes-map-type annotates an object to further describe its topology. This extension must only be used when type is object and may have 2 possible values: - -1) `granular`: - These maps are actual maps (key-value pairs) and each fields are independent - from each other (they can each be manipulated by separate actors). This is - the default behaviour for all maps. -2) `atomic`: the list is treated as a single entity, like a scalar. - Atomic maps will be entirely replaced when updated.² -string - -$schema ² -string -v -additionalItemsc -a#/definitions/io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1beta1.JSONSchemaPropsOrBool -u -allOfl² -arrayº_ -] -[#/definitions/io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1beta1.JSONSchemaProps - - maxLengthint64² -integer -! - -multipleOfdouble² -number - -$ref ² -string -$ - maxPropertiesint64² -integer -$ - minPropertiesint64² -integer - -minimumdouble² -number - -nullable ² -boolean -‚ -patternPropertiesmª_ -] -[#/definitions/io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1beta1.JSONSchemaProps² -object -& -required² -arrayº - ² -string -m -itemsd -b#/definitions/io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1beta1.JSONSchemaPropsOrArray - -maxItemsint64² -integer - -minItemsint64² -integer -d -not] -[#/definitions/io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1beta1.JSONSchemaProps -] -exampleR -P#/definitions/io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1beta1.JSON -« -x-kubernetes-list-map-keysŒ"ïx-kubernetes-list-map-keys annotates an array with the x-kubernetes-list-type `map` by specifying the keys used as the index of the map. - -This tag MUST only be used on lists that have the "x-kubernetes-list-type" extension set to "map". Also, the values specified for this attribute must be a scalar typed field of the child structure (no nesting is supported). - -The properties specified must either be required or have a default value, to ensure those properties are present for all list items.² -arrayº - ² -string -Þ -x-kubernetes-list-typeÃ"µx-kubernetes-list-type annotates an array to further describe its topology. This extension must only be used on lists and may have 3 possible values: - -1) `atomic`: the list is treated as a single entity, like a scalar. - Atomic lists will be entirely replaced when updated. This extension - may be used on any type of list (struct, scalar, ...). -2) `set`: - Sets are lists that must not have multiple items with the same value. Each - value must be a scalar, an object with x-kubernetes-map-type `atomic` or an - array with x-kubernetes-list-type `atomic`. -3) `map`: - These lists are like maps in that their elements have a non-index key - used to identify them. Order is preserved upon merge. The map tag - must only be used on a list with elements of type object. -Defaults to atomic for arrays.² -string -‡ -x-kubernetes-int-or-stringè"Ùx-kubernetes-int-or-string specifies that this value is either an integer or a string. If this is true, an empty type is allowed and type as child of anyOf is permitted if following one of the following patterns: - -1) anyOf: - - type: integer - - type: string -2) allOf: - - anyOf: - - type: integer - - type: string - - ... zero or more² -boolean -Ø -$io.k8s.apimachinery.pkg.version.Info¯"TInfo contains versioning information. how we'll want to distribute that information.šmajoršminorš -gitVersionš gitCommitš gitTreeStateš buildDateš goVersionšcompileršplatform² -objectÊä - - -gitVersion ² -string - - goVersion ² -string - -major ² -string - - buildDate ² -string - -compiler ² -string - - gitCommit ² -string - - gitTreeState ² -string - -minor ² -string - -platform ² -string -¼ -)io.k8s.api.coordination.v1beta1.LeaseSpecŽ"(LeaseSpec is a specification of a Lease.² -objectÊÕ -‰ - acquireTimez -<#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.MicroTime":acquireTime is a time when the current lease was acquired. -e -holderIdentityS"FholderIdentity contains the identity of the holder of a current lease.² -string -È -leaseDurationSeconds¯int32"™leaseDurationSeconds is a duration that candidates for a lease need to wait to force acquire it. This is measure against time of last observed RenewTime.² -integer -r -leaseTransitions^int32"IleaseTransitions is the number of transitions of a lease between holders.² -integer -  - renewTime’ -<#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.MicroTime"RrenewTime is a time when the current holder of a lease has last updated the lease. -ú -(io.k8s.api.core.v1.ConfigMapVolumeSourceÍ "ÅAdapts a ConfigMap into a volume. - -The contents of the target ConfigMap's Data field will be presented in a volume as files using the keys in the Data field as the file names, unless the items element is populated with specific mappings of keys to paths. ConfigMap volumes support ownership management and SELinux relabeling.² -objectÊö -S -optionalG"9Specify whether the ConfigMap or its keys must be defined² -boolean -ê - defaultModeÚint32"ÄOptional: mode bits used to set permissions on created files by default. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. Defaults to 0644. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.² -integer -ª -items "âIf unspecified, each key-value pair in the Data field of the referenced ConfigMap will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the ConfigMap, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'.² -arrayº. -, -*#/definitions/io.k8s.api.core.v1.KeyToPath -„ -name|"oName of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names² -string -– -3io.k8s.api.core.v1.PhotonPersistentDiskVolumeSourceÞ"8Represents a Photon Controller persistent disk resource.špdID² -objectÊŽ -I -pdIDA"4ID that identifies Photon Controller persistent disk² -string -À -fsTypeµ"§Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.² -string -· -1io.k8s.api.storage.v1beta1.VolumeAttachmentSource"ÝVolumeAttachmentSource represents a volume that should be attached. Right now only PersistenVolumes can be attached via external attacher, in future we may allow also inline volumes in pods. Exactly one member can be set.² -objectÊ’ -À -inlineVolumeSpec« -5#/definitions/io.k8s.api.core.v1.PersistentVolumeSpec"ñinlineVolumeSpec contains all the information necessary to attach a persistent volume defined by a pod's inline VolumeSource. This field is populated only for the CSIMigration feature. It contains translated fields from a pod's inline VolumeSource to a PersistentVolumeSpec. This field is beta-level and is only honored by servers that enabled the CSIMigration feature. -M -persistentVolumeName5"(Name of the persistent volume to attach.² -string -û -%io.k8s.api.discovery.v1.EndpointSliceÑ"ÜEndpointSlice represents a subset of the endpoints that implement a service. For a given service there may be multiple EndpointSlice objects, selected by labels, which must be joined to produce the full set of endpoints.š addressTypeš endpoints² -objectÊã -¤ -portsš"®ports specifies the list of network ports exposed by each endpoint in this slice. Each port must have a unique name. When ports is empty, it indicates that there are no defined ports. When a port is defined with a nil port value, it indicates "all ports". Each slice may include a maximum of 100 ports.² -arrayº6 -4 -2#/definitions/io.k8s.api.discovery.v1.EndpointPortú# -x-kubernetes-list-type atomic - -ð - addressTypeà"ÒaddressType specifies the type of address carried by this EndpointSlice. All addresses in this slice must be the same type. This field is immutable after creation. The following address types are currently supported: * IPv4: Represents an IPv4 Address. * IPv6: Represents an IPv6 Address. * FQDN: Represents a Fully Qualified Domain Name.² -string -¾ - -apiVersion¯"¡APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources² -string -ß - endpointsÑ"jendpoints is a list of unique endpoints in this slice. Each slice may include a maximum of 1000 endpoints.² -arrayº2 -0 -.#/definitions/io.k8s.api.discovery.v1.Endpointú# -x-kubernetes-list-type atomic - -¹ -kind°"¢Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds² -string -h -metadata\ -=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta"Standard object's metadata.úc -x-kubernetes-group-version-kind@>- group: discovery.k8s.io - kind: EndpointSlice - version: v1 - -“ -io.k8s.api.events.v1.Eventô"¾Event is a report of an event somewhere in the cluster. It generally denotes some state change in the system. Events have a limited retention time and triggers and messages may evolve with time. Event consumers should not rely on the timing of an event with a given Reason reflecting a consistent underlying trigger, or the continued existence of events with that Reason. Events should be treated as informative, best-effort, supplemental data.š eventTime² -objectʽ -— - eventTime‰ -<#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.MicroTime"IeventTime is the time when this Event was first observed. It is required. -¶ - regarding¨ -0#/definitions/io.k8s.api.core.v1.ObjectReference"óregarding contains the object this Event is about. In most cases it's an Object reporting controller implements, e.g. ReplicaSetController implements ReplicaSets and this event is emitted because it acts on some changes in a ReplicaSet object. -Ñ -relatedÅ -0#/definitions/io.k8s.api.core.v1.ObjectReference"related is the optional secondary object for more complex actions. E.g. when regarding object triggers a creation or deletion of related object. -š -series -.#/definitions/io.k8s.api.events.v1.EventSeries"]series is data about the Event series this event represents or nil if it's a singleton Event. -Ì -actionÁ"³action is what action was taken/failed regarding to the regarding object. It is machine-readable. This field cannot be empty for new Events and it can have at most 128 characters.² -string -¾ - -apiVersion¯"¡APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources² -string -¿ -deprecatedLastTimestamp£ -7#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Time"hdeprecatedLastTimestamp is the deprecated field assuring backward compatibility with core.v1 Event type. -¦ -deprecatedSource‘ -,#/definitions/io.k8s.api.core.v1.EventSource"adeprecatedSource is the deprecated field assuring backward compatibility with core.v1 Event type. -³ -typeª"œtype is the type of this event (Normal, Warning), new types could be added in the future. It is machine-readable. This field cannot be empty for new Events.² -string -ˆ -deprecatedCountuint32"`deprecatedCount is the deprecated field assuring backward compatibility with core.v1 Event type.² -integer -Á -deprecatedFirstTimestamp¤ -7#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Time"ideprecatedFirstTimestamp is the deprecated field assuring backward compatibility with core.v1 Event type. -¹ -kind°"¢Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds² -string -Ò -metadataÅ -=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta"ƒStandard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata -½ -note´"¦note is a human-readable description of the status of this operation. Maximal length of the note is 1kB, but libraries should be prepared to handle values up to 64kB.² -string -¤ -reason™"‹reason is why the action was taken. It is human-readable. This field cannot be empty for new Events and it can have at most 128 characters.² -string -¹ -reportingController¡"“reportingController is the name of the controller that emitted this Event, e.g. `kubernetes.io/kubelet`. This field cannot be empty for new Events.² -string - -reportingInstance¬"žreportingInstance is the ID of the controller instance, e.g. `kubelet-xyzf`. This field cannot be empty for new Events and it can have at most 128 characters.² -stringúX -x-kubernetes-group-version-kind53- version: v1 - group: events.k8s.io - kind: Event - -Ï -*io.k8s.api.networking.v1.NetworkPolicyPort "6NetworkPolicyPort describes a port to allow traffic on² -objectÊÙ - -protocolu"hThe protocol (TCP, UDP, or SCTP) which traffic must match. If not specified, this field defaults to TCP.² -string -™ -endPortint32"÷If set, indicates that the range of ports from port to endPort, inclusive, should be allowed by the policy. This field cannot be defined if the port field is not defined or if the port field is defined as a named (string) port. The endPort must be equal or greater than port. This feature is in Alpha state and should be enabled using the Feature Gate "NetworkPolicyEndPort".² -integer -¶ -port­ -=#/definitions/io.k8s.apimachinery.pkg.util.intstr.IntOrString"ëThe port on the given protocol. This can either be a numerical or named port on a pod. If this field is not provided, this matches all port names and numbers. If present, only traffic on the specified protocol AND port will be matched. -È -$io.k8s.api.discovery.v1beta1.ForZoneŸ"LForZone provides information about which zones should consume this endpoint.šname² -objectÊ< -: -name2"%name represents the name of the zone.² -string - -6io.k8s.api.flowcontrol.v1beta1.FlowDistinguisherMethodâ"EFlowDistinguisherMethod specifies the method of a flow distinguisher.štype² -objectÊ… -‚ -typez"m`type` is the type of flow distinguisher method The supported types are "ByUser" and "ByNamespace". Required.² -string -Ï -!io.k8s.api.storage.v1.CSINodeSpec©"\CSINodeSpec holds information about the specification of all CSI drivers installed on a nodešdrivers² -objectʲ -¯ -drivers£"Šdrivers is a list of information of all CSI Drivers existing on a node. If all drivers in the list are uninstalled, this can become empty.² -arrayº5 -3 -1#/definitions/io.k8s.api.storage.v1.CSINodeDriverú' -x-kubernetes-patch-merge-keyname -ú' -x-kubernetes-patch-strategymerge - -” --io.k8s.apimachinery.pkg.apis.meta.v1.FieldsV1â"ÔFieldsV1 stores a set of fields in a data structure like a Trie, in JSON format. - -Each key is either a '.' representing the field itself, and will always map to an empty set, or a string representing a sub-field or item. The string will follow one of these four formats: 'f:', where is the name of a field in a struct, or key in a map 'v:', where is the exact json formatted value of a list item 'i:', where is position of a item in a list 'k:', where is a map of a list item's key fields to their unique values If a key maps to an empty Fields value, the field that key represents is part of the set. - -The exact format is defined in sigs.k8s.io/structured-merge-diff² -object -õ -!io.k8s.api.core.v1.FCVolumeSourceÏ"«Represents a Fibre Channel volume. Fibre Channel volumes can only be mounted as read/write once. Fibre Channel volumes support ownership management and SELinux relabeling.² -objectÊ’ -: -lun3int32"Optional: FC target lun number² -integer -‚ -readOnlyv"hOptional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.² -boolean -T - -targetWWNsF"*Optional: FC target worldwide names (WWNs)² -arrayº - ² -string -µ -wwids«"ŽOptional: FC volume world wide identifiers (wwids) Either wwids or combination of targetWWNs and lun must be set, but not both simultaneously.² -arrayº - ² -string -À -fsTypeµ"§Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.² -string -Ÿ -io.k8s.api.core.v1.PodIP‚"¦IP address information for entries in the (plural) PodIPs field. Each entry includes: - IP: An IP address allocated to the pod. Routable at least within the cluster.² -objectÊK -I -ipC"6ip is an IP address (IPv4 or IPv6) assigned to the pod² -string -Ê -io.k8s.api.core.v1.Sysctl¬"+Sysctl defines a kernel parameter to be setšnamešvalue² -objectÊb -. -name&"Name of a property to set² -string -0 -value'"Value of a property to set² -string -® -(io.k8s.api.core.v1.AzureFileVolumeSource"WAzureFile represents an Azure File Service mount on the host and bind mount to the pod.š -secretNameš shareName² -objectÊ€ -x -readOnlyl"^Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.² -boolean -^ - -secretNameP"Cthe name of secret that contains Azure Storage Account Name and Key² -string -$ - shareName" -Share Name² -string -ý -io.k8s.api.core.v1.Taintà"`The node this Taint is attached to has the "effect" on any pod that does not tolerate the Taint.škeyšeffect² -objectÊà -  -effect•"‡Required. The effect of the taint on pods that do not tolerate the taint. Valid effects are NoSchedule, PreferNoSchedule and NoExecute.² -string -D -key="0Required. The taint key to be applied to a node.² -string -­ - timeAddedŸ -7#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Time"dTimeAdded represents the time at which the taint was added. It is only written for NoExecute taints. -E -value<"/The taint value corresponding to the taint key.² -string -Ó -$io.k8s.api.networking.v1.IngressRuleª "ìIngressRule represents the rules mapping the paths under a specified host to the related backend services. Incoming requests are first evaluated for a host match, then routed to the backend associated with the matching IngressRuleValue.² -objectʬ -â - -hostÙ -"Ë -Host is the fully qualified domain name of a network host, as defined by RFC 3986. Note the following deviations from the "host" part of the URI as defined in RFC 3986: 1. IPs are not allowed. Currently an IngressRuleValue can only apply to - the IP in the Spec of the parent Ingress. -2. The `:` delimiter is not respected because ports are not allowed. - Currently the port of an Ingress is implicitly :80 for http and - :443 for https. -Both these may change in the future. Incoming requests are matched against the host before the IngressRuleValue. If the host is unspecified, the Ingress routes all traffic based on the specified IngressRuleValue. - -Host can be "precise" which is a domain name without the terminating dot of a network host (e.g. "foo.bar.com") or "wildcard", which is a domain name prefixed with a single wildcard label (e.g. "*.foo.com"). The wildcard character '*' must appear by itself as the first DNS label and matches only a single label. You cannot have a wildcard label by itself (e.g. Host == "*"). Requests will be matched against the Host field in the following way: 1. If Host is precise, the request matches this rule if the http host header is equal to Host. 2. If Host is a wildcard, then the request matches this rule if the http host header is to equal to the suffix (removing the first label) of the wildcard rule.² -string -E -http= -;#/definitions/io.k8s.api.networking.v1.HTTPIngressRuleValue -µ -#io.k8s.api.networking.v1.IngressTLS"MIngressTLS describes the transport layer security associated with an Ingress.² -objectʯ -À -hosts¶"óHosts are a list of hosts included in the TLS certificate. The values in this list must match the name/s used in the tlsSecret. Defaults to the wildcard host setting for the loadbalancer controller fulfilling this Ingress, if left unspecified.² -arrayº - ² -stringú# -x-kubernetes-list-type atomic - -é - -secretNameÚ"ÌSecretName is the name of the secret used to terminate TLS traffic on port 443. Field is left optional to allow TLS routing based on SNI hostname alone. If the SNI host in a listener conflicts with the "Host" header field used by an IngressRule, the SNI host is used for termination and value of the Host header is used for routing.² -string -ì -Nio.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1.ExternalDocumentation™"YExternalDocumentation allows referencing an external resource for extended documentation.² -objectÊ0 - - description ² -string - -url ² -string -è -"io.k8s.api.core.v1.ContainerStatusÁ "JContainerStatus contains details for the current status of this container.šnamešreadyš restartCountšimagešimageID² -objectʶ - -r -namej"]This must be a DNS_LABEL. Each container in a pod must have a unique name. Cannot be updated.² -string -S - containerIDD"7Container's ID in the format 'docker://'.² -string -z -imageq"dThe image the container is running. More info: https://kubernetes.io/docs/concepts/containers/images² -string -9 -imageID."!ImageID of the container's image.² -string -° -started¤"•Specifies whether the container has passed its startup probe. Initialized as false, becomes true after startupProbe is considered successful. Resets to false when the container is restarted, or if kubelet loses state temporarily. Is always true when no startupProbe is defined.² -boolean -l -statec -/#/definitions/io.k8s.api.core.v1.ContainerState"0Details about the container's current condition. -y - lastStatel -/#/definitions/io.k8s.api.core.v1.ContainerState"9Details about the container's last termination condition. -V -readyM"?Specifies whether the container has passed its readiness probe.² -boolean -¿ - restartCount®int32"˜The number of times the container has been restarted, currently based on the number of dead containers that have not yet been removed. Note that this is calculated from dead containers. But those containers are subject to garbage collection. This value will get capped at 5 by GC.² -integer -ç -.io.k8s.api.core.v1.ISCSIPersistentVolumeSource´ "®ISCSIPersistentVolumeSource represents an ISCSI disk. ISCSI volumes can only be mounted as read/write once. ISCSI volumes support ownership management and SELinux relabeling.š targetPortalšiqnšlun² -objectÊÙ -V -chapAuthDiscoveryA"3whether support iSCSI Discovery CHAP authentication² -boolean -Ô - initiatorNameÂ"´Custom iSCSI Initiator Name. If initiatorName is specified with iscsiInterface simultaneously, new iSCSI interface : will be created for the connection.² -string -z - secretRefm -0#/definitions/io.k8s.api.core.v1.SecretReference"9CHAP Secret for iSCSI target and initiator authentication -¤ - targetPortal“"…iSCSI Target Portal. The Portal is either an IP or ip_addr:port if the port is other than default (typically TCP ports 860 and 3260).² -string -R -chapAuthSession?"1whether support iSCSI Session CHAP authentication² -boolean -µ -fsTypeª"œFilesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#iscsi² -string -0 -iqn)"Target iSCSI Qualified Name.² -string -n -iscsiInterface\"OiSCSI Interface Name that uses an iSCSI transport. Defaults to 'default' (tcp).² -string -4 -lun-int32"iSCSI Target Lun number.² -integer -³ -portals§"ŠiSCSI Target Portal List. The Portal is either an IP or ip_addr:port if the port is other than default (typically TCP ports 860 and 3260).² -arrayº - ² -string -k -readOnly_"QReadOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false.² -boolean -· -'io.k8s.api.core.v1.LocalObjectReference‹"sLocalObjectReference contains enough information to let you locate the referenced object inside the same namespace.² -objectʇ -„ -name|"oName of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names² -string -° -)io.k8s.api.discovery.v1.EndpointSliceList‚"6EndpointSliceList represents a list of endpoint slicesšitems² -objectÊÉ -¾ - -apiVersion¯"¡APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources² -string -f -items]"List of endpoint slices² -arrayº7 -5 -3#/definitions/io.k8s.api.discovery.v1.EndpointSlice -¹ -kind°"¢Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds² -string -b -metadataV -;#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta"Standard list metadata.úg -x-kubernetes-group-version-kindDB- version: v1 - group: discovery.k8s.io - kind: EndpointSliceList - -ù -Xio.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1beta1.CustomResourceSubresourcesœ"YCustomResourceSubresources defines the status and scale subresources for CustomResources.² -objectʲ -¾ -status³ -k#/definitions/io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1beta1.CustomResourceSubresourceStatus"Ãstatus indicates the custom resource should serve a `/status` subresource. When enabled: 1. requests to the custom resource primary endpoint ignore changes to the `status` stanza of the object. 2. requests to the custom resource `/status` subresource ignore changes to anything other than the `status` stanza of the object. -î -scaleä -j#/definitions/io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1beta1.CustomResourceSubresourceScale"vscale indicates the custom resource should serve a `/scale` subresource that returns an `autoscaling/v1` Scale object. -Ö -4io.k8s.api.admissionregistration.v1.ServiceReference";ServiceReference holds a reference to Service.legacy.k8s.ioš namespacešname² -objectʾ -@ -name8"+`name` is the name of the service. Required² -string -O - namespaceB"5`namespace` is the namespace of the service. Required² -string -f -path^"Q`path` is an optional URL path which will be sent in any request to this service.² -string -À -port·int32"¡If specified, the port on the service that hosting webhook. Default to 443 for backward compatibility. `port` should be a valid port number (1-65535, inclusive).² -integer -º - -MicroTime is version of Time with microsecond level precision.² -string -ò -)io.k8s.apimachinery.pkg.apis.meta.v1.TimeÄ date-time"«Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers.² -string -² -=io.k8s.api.certificates.v1beta1.CertificateSigningRequestSpecð"¾This information is immutable after the request is created. Only the Request and Usages fields can be set on creation, other fields are derived by Kubernetes and cannot be modified by users.šrequest² -objectÊ– -¤ -usages™"ÖallowedUsages specifies a set of usage contexts the key will be valid for. See: https://tools.ietf.org/html/rfc5280#section-4.2.1.3 - https://tools.ietf.org/html/rfc5280#section-4.2.1.12 -Valid values are: - "signing", - "digital signature", - "content commitment", - "key encipherment", - "key agreement", - "data encipherment", - "cert sign", - "crl sign", - "encipher only", - "decipher only", - "any", - "server auth", - "client auth", - "code signing", - "email protection", - "s/mime", - "ipsec end system", - "ipsec tunnel", - "ipsec user", - "timestamping", - "ocsp signing", - "microsoft sgc", - "netscape sgc"² -arrayº - ² -stringú# -x-kubernetes-list-type atomic - -d -usernameX"KInformation about the requesting user. See user.Info interface for details.² -string -† -extra}"QExtra information about the requesting user. See user.Info interface for details.ª -² -arrayº - ² -string² -object -ž -groups“"QGroup information about the requesting user. See user.Info interface for details.² -arrayº - ² -stringú# -x-kubernetes-list-type atomic - -c -requestXbyte"Base64-encoded PKCS#10 CSR data² -stringú# -x-kubernetes-list-type atomic - -’ - -signerNameƒ"õRequested signer for the request. It is a qualified name in the form: `scope-hostname.io/name`. If empty, it will be defaulted: - 1. If it's a kubelet client certificate, it is assigned - "kubernetes.io/kube-apiserver-client-kubelet". - 2. If it's a kubelet serving certificate, it is assigned - "kubernetes.io/kubelet-serving". - 3. Otherwise, it is assigned "kubernetes.io/legacy-unknown". -Distribution of trust for signers happens out of band. You can select on this field using `spec.signerName`.² -string -c -uid\"OUID information about the requesting user. See user.Info interface for details.² -string -Ë -,io.k8s.api.extensions.v1beta1.IngressBackendš"DIngressBackend describes all endpoints for a given service and port.² -objectÊÅ -ø -resourceë -:#/definitions/io.k8s.api.core.v1.TypedLocalObjectReference"¬Resource is an ObjectRef to another Kubernetes resource in the namespace of the Ingress object. If resource is specified, serviceName and servicePort must not be specified. -I - serviceName:"-Specifies the name of the referenced service.² -string -} - servicePortn -=#/definitions/io.k8s.apimachinery.pkg.util.intstr.IntOrString"-Specifies the port of the referenced service. -¤ - -=io.k8s.api.flowcontrol.v1beta1.PriorityLevelConfigurationListâ "OPriorityLevelConfigurationList is a list of PriorityLevelConfiguration objects.šitems² -objectÊñ -Œ -items‚"(`items` is a list of request-priorities.² -arrayºK -I -G#/definitions/io.k8s.api.flowcontrol.v1beta1.PriorityLevelConfiguration -¹ -kind°"¢Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds² -string -â -metadataÕ -;#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta"•`metadata` is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata -¾ - -apiVersion¯"¡APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources² -stringú… -x-kubernetes-group-version-kindb`- group: flowcontrol.apiserver.k8s.io - kind: PriorityLevelConfigurationList - version: v1beta1 - -š -&io.k8s.api.storage.v1.StorageClassListï"4StorageClassList is a collection of storage classes.šitems² -objectÊ» -¾ - -apiVersion¯"¡APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources² -string -o -itemsf"#Items is the list of StorageClasses² -arrayº4 -2 -0#/definitions/io.k8s.api.storage.v1.StorageClass -¹ -kind°"¢Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds² -string -Ê -metadata½ -;#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta"~Standard list metadata More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadataúd -x-kubernetes-group-version-kindA?- group: storage.k8s.io - kind: StorageClassList - version: v1 - -ê -%io.k8s.api.core.v1.PodSecurityContextÀ"ôPodSecurityContext holds pod-level security attributes and common container settings. Some fields are also present in container.securityContext. Field values of container.securityContext take precedence over field values of PodSecurityContext.² -objectʺ -° -fsGroupChangePolicy˜"ŠfsGroupChangePolicy defines behavior of changing ownership and permission of the volume before being exposed inside Pod. This field will only apply to volume types which support fsGroup based ownership(and permissions). It will have no effect on ephemeral volume types such as: secret, configmaps and emptydir. Valid values are "OnRootMismatch" and "Always". If not specified, "Always" is used.² -string -£ - -runAsGroup”int64"þThe GID to run the entrypoint of the container process. Uses runtime default if unset. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container.² -integer -º - runAsNonRoot©"šIndicates that the container must run as a non-root user. If true, the Kubelet will validate the image at runtime to ensure that it does not run as UID 0 (root) and fail to start the container if it does. If unset or false, no such validation will be performed. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.² -boolean -Í -windowsOptionsº ->#/definitions/io.k8s.api.core.v1.WindowsSecurityContextOptions"÷The Windows specific settings applied to all containers. If unspecified, the options within a container's SecurityContext will be used. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. -Ñ -sysctlsÅ"ŠSysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported sysctls (by the container runtime) might fail to launch.² -arrayº+ -) -'#/definitions/io.k8s.api.core.v1.Sysctl -È -fsGroup¼int64"¦A special supplemental group that applies to all containers in a pod. Some volume types allow the Kubelet to change the ownership of that volume to be owned by the pod: - -1. The owning GID will be the FSGroup 2. The setgid bit is set (new files created in the volume will be owned by FSGroup) 3. The permission bits are OR'd with rw-rw---- - -If unset, the Kubelet will not modify the ownership and permissions of any volume.² -integer -À - runAsUser²int64"œThe UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container.² -integer -… -seLinuxOptionsò -/#/definitions/io.k8s.api.core.v1.SELinuxOptions"¾The SELinux context to be applied to all containers. If unspecified, the container runtime will allocate a random SELinux context for each container. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container. -~ -seccompProfilel -/#/definitions/io.k8s.api.core.v1.SeccompProfile"9The seccomp options to use by the containers in this pod. -ç -supplementalGroupsÐ"«A list of groups applied to the first process run in each container, in addition to the container's primary GID. If unspecified, no groups will be added to any container.² -arrayº -int64² -integer -º -.io.k8s.api.networking.v1beta1.IngressClassSpec‡"DIngressClassSpec provides information about the class of an Ingress.² -objectʲ -¦ - -controller—"‰Controller refers to the name of the controller that should handle this class. This allows for different "flavors" that are controlled by the same controller. For example, you may have different Parameters for the same implementing controller. This should be specified as a domain-prefixed path no more than 250 characters in length, e.g. "acme.io/ingress-controller". This field is immutable.² -string -† - -parameters÷ -K#/definitions/io.k8s.api.networking.v1beta1.IngressClassParametersReference"§Parameters is a link to a custom resource containing additional configuration for the controller. This is optional if the controller does not require extra parameters. -³ -'io.k8s.api.rbac.v1beta1.RoleBindingList‡ "¤RoleBindingList is a collection of RoleBindings Deprecated in v1.17 in favor of rbac.authorization.k8s.io/v1 RoleBindingList, and will no longer be served in v1.22.šitems² -objectÊÓ -¾ - -apiVersion¯"¡APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources² -string -l -itemsc"Items is a list of RoleBindings² -arrayº5 -3 -1#/definitions/io.k8s.api.rbac.v1beta1.RoleBinding -¹ -kind°"¢Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds² -string -f -metadataZ -;#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta"Standard object's metadata.ús -x-kubernetes-group-version-kindPN- group: rbac.authorization.k8s.io - kind: RoleBindingList - version: v1beta1 - -§ -"io.k8s.api.core.v1.NFSVolumeSource€"€Represents an NFS mount that lasts the lifetime of a pod. NFS volumes do not support ownership management or SELinux relabeling.šserveršpath² -objectÊÞ -€ -pathx"kPath that is exported by the NFS server. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs² -string -à -readOnly¶"§ReadOnly here will force the NFS export to be mounted with read-only permissions. Defaults to false. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs² -boolean -’ -server‡"zServer is the hostname or IP address of the NFS server. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs² -string -Ý -)io.k8s.api.networking.v1beta1.IngressRule¯ "ìIngressRule represents the rules mapping the paths under a specified host to the related backend services. Incoming requests are first evaluated for a host match, then routed to the backend associated with the matching IngressRuleValue.² -objectʱ -â - -hostÙ -"Ë -Host is the fully qualified domain name of a network host, as defined by RFC 3986. Note the following deviations from the "host" part of the URI as defined in RFC 3986: 1. IPs are not allowed. Currently an IngressRuleValue can only apply to - the IP in the Spec of the parent Ingress. -2. The `:` delimiter is not respected because ports are not allowed. - Currently the port of an Ingress is implicitly :80 for http and - :443 for https. -Both these may change in the future. Incoming requests are matched against the host before the IngressRuleValue. If the host is unspecified, the Ingress routes all traffic based on the specified IngressRuleValue. - -Host can be "precise" which is a domain name without the terminating dot of a network host (e.g. "foo.bar.com") or "wildcard", which is a domain name prefixed with a single wildcard label (e.g. "*.foo.com"). The wildcard character '*' must appear by itself as the first DNS label and matches only a single label. You cannot have a wildcard label by itself (e.g. Host == "*"). Requests will be matched against the Host field in the following way: 1. If Host is precise, the request matches this rule if the http host header is equal to Host. 2. If Host is a wildcard, then the request matches this rule if the http host header is to equal to the suffix (removing the first label) of the wildcard rule.² -string -J -httpB -@#/definitions/io.k8s.api.networking.v1beta1.HTTPIngressRuleValue -§ -"io.k8s.api.node.v1beta1.Scheduling€"TScheduling specifies the scheduling constraints for nodes supporting a RuntimeClass.² -objectÊ› -® - tolerationsž"¹tolerations are appended (excluding duplicates) to pods running with this RuntimeClass during admission, effectively unioning the set of nodes tolerated by the pod and the RuntimeClass.² -arrayº/ -- -+#/definitions/io.k8s.api.core.v1.Tolerationú# -x-kubernetes-list-type atomic - -ç - nodeSelectorÖ"¸nodeSelector lists labels that must be present on nodes that support this RuntimeClass. Pods using this RuntimeClass can only be scheduled to a node matched by this selector. The RuntimeClass nodeSelector is merged with a pod's existing nodeSelector. Any conflicts will cause the pod to be rejected in admission.ª - ² -string² -object -ÿ -!io.k8s.api.storage.v1.CSINodeListÙ"/CSINodeList is a collection of CSINode objects.šitems² -objectʯ -¾ - -apiVersion¯"¡APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources² -string -c -itemsZ"items is the list of CSINode² -arrayº/ -- -+#/definitions/io.k8s.api.storage.v1.CSINode -¹ -kind°"¢Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds² -string -Ê -metadata½ -;#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta"~Standard list metadata More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadataú_ -x-kubernetes-group-version-kind<:- group: storage.k8s.io - kind: CSINodeList - version: v1 - -  - -=io.k8s.kube-aggregator.pkg.apis.apiregistration.v1.APIServiceÞ "[APIService represents a server for a particular GroupVersion. Name must be "version.group".² -objectʉ -¾ - -apiVersion¯"¡APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources² -string -¹ -kind°"¢Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds² -string -K -metadata? -=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta -¢ -spec™ -O#/definitions/io.k8s.kube-aggregator.pkg.apis.apiregistration.v1.APIServiceSpec"FSpec contains information for locating and communicating with a server -— -statusŒ -Q#/definitions/io.k8s.kube-aggregator.pkg.apis.apiregistration.v1.APIServiceStatus"7Status contains derived information about an API serverúf -x-kubernetes-group-version-kindCA- kind: APIService - version: v1 - group: apiregistration.k8s.io - -© -0io.k8s.api.autoscaling.v2beta2.MetricValueStatusô"6MetricValueStatus holds the current value for a metric² -objectÊ­ -ì -averageUtilizationÕint32"¿currentAverageUtilization is the current value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods.² -integer -· - averageValue¦ -;#/definitions/io.k8s.apimachinery.pkg.api.resource.Quantity"gaverageValue is the current value of the average of the metric across all relevant pods (as a quantity) - -valuex -;#/definitions/io.k8s.apimachinery.pkg.api.resource.Quantity"9value is the current value of the metric (as a quantity). -ä - io.k8s.api.core.v1.EndpointsList¿"%EndpointsList is a list of endpoints.šitems² -objectÊ© -¾ - -apiVersion¯"¡APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources² -string -X -itemsO"List of endpoints.² -arrayº. -, -*#/definitions/io.k8s.api.core.v1.Endpoints -¹ -kind°"¢Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds² -string -Ï -metadata -;#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta"‚Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kindsúU -x-kubernetes-group-version-kind20- version: v1 - group: "" - kind: EndpointsList - -¦ -io.k8s.api.core.v1.HostAlias…"oHostAlias holds the mapping between IP and hostnames that will be injected as an entry in the pod's hosts file.² -objectÊ… -L - hostnames?"#Hostnames for the above IP address.² -arrayº - ² -string -5 -ip/""IP address of the host file entry.² -string -¼ -(io.k8s.api.authorization.v1.ResourceRule"¬ResourceRule is the list of actions the subject is allowed to perform on resources. The list ordering isn't significant, may contain duplicates, and possibly be incomplete.šverbs² -objectÊÉ -ƒ - apiGroupsõ"ØAPIGroups is the name of the APIGroup that contains the resources. If multiple API groups are specified, any action requested against one of the enumerated resources in any API group will be allowed. "*" means all.² -arrayº - ² -string -¹ - resourceNames§"ŠResourceNames is an optional white list of names that the rule applies to. An empty set means that everything is allowed. "*" means all.² -arrayº - ² -string -ä - resourcesÖ"¹Resources is a list of resources this rule applies to. "*" means all in the specified apiGroups. - "*/foo" represents the subresource 'foo' for all resources in the specified apiGroups.² -arrayº - ² -string - -verbs“"wVerb is a list of kubernetes resource API verbs, like: get, list, watch, create, update, delete, proxy. "*" means all.² -arrayº - ² -string - -io.k8s.api.core.v1.Probeä"€Probe describes a health check to be performed against a container to determine whether it is alive or ready to receive traffic.² -objectÊÒ -‘ -execˆ -+#/definitions/io.k8s.api.core.v1.ExecAction"YOne and only one of the following should be specified. Exec specifies the action to take. -§ -failureThreshold’int32"}Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1.² -integer -â -initialDelaySecondsÊint32"´Number of seconds after the container has started before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes² -integer -Î -successThreshold¹int32"£Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1.² -integer -â -timeoutSecondsÏint32"¹Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes² -integer -k -httpGet` -.#/definitions/io.k8s.api.core.v1.HTTPGetAction".HTTPGet specifies the http request to perform. -} - periodSecondslint32"WHow often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1.² -integer -‘ - tcpSocketƒ -0#/definitions/io.k8s.api.core.v1.TCPSocketAction"OTCPSocket specifies an action involving a TCP port. TCP hooks not yet supported -ö -terminationGracePeriodSecondsÔint64"¾Optional duration in seconds the pod needs to terminate gracefully upon probe failure. The grace period is the duration in seconds after the processes running in the pod are sent a termination signal and the time when the processes are forcibly halted with a kill signal. Set this value longer than the expected cleanup time for your process. If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this value overrides the value provided by the pod spec. Value must be non-negative integer. The value zero indicates stop immediately via the kill signal (no opportunity to shut down). This is an alpha field and requires enabling ProbeTerminationGracePeriod feature gate.² -integer -À -Wio.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1.CustomResourceSubresourceScaleä "^CustomResourceSubresourceScale defines how to serve the scale subresource for CustomResources.šspecReplicasPathšstatusReplicasPath² -objectÊÍ -ã -specReplicasPathÎ"ÀspecReplicasPath defines the JSON path inside of a custom resource that corresponds to Scale `spec.replicas`. Only JSON paths without the array notation are allowed. Must be a JSON Path under `.spec`. If there is no value under the given path in the custom resource, the `/scale` subresource will return an error on GET.² -string -€ -statusReplicasPathé"ÛstatusReplicasPath defines the JSON path inside of a custom resource that corresponds to Scale `status.replicas`. Only JSON paths without the array notation are allowed. Must be a JSON Path under `.status`. If there is no value under the given path in the custom resource, the `status.replicas` value in the `/scale` subresource will default to 0.² -string -á -labelSelectorPathË"½labelSelectorPath defines the JSON path inside of a custom resource that corresponds to Scale `status.selector`. Only JSON paths without the array notation are allowed. Must be a JSON Path under `.status` or `.spec`. Must be set to work with HorizontalPodAutoscaler. The field pointed by this JSON path must be a string field (not a complex selector struct) which contains a serialized label selector in string form. More info: https://kubernetes.io/docs/tasks/access-kubernetes-api/custom-resources/custom-resource-definitions#scale-subresource If there is no value under the given path in the custom resource, the `status.selector` value in the `/scale` subresource will default to the empty string.² -string -– -'io.k8s.api.core.v1.PortworxVolumeSourceê";PortworxVolumeSource represents a Portworx volume resource.švolumeID² -objectÊ“ -Í -fsTypeÂ"´FSType represents the filesystem type to mount Must be a filesystem type supported by the host operating system. Ex. "ext4", "xfs". Implicitly inferred to be "ext4" if unspecified.² -string -x -readOnlyl"^Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.² -boolean -G -volumeID;".VolumeID uniquely identifies a Portworx volume² -string -Òg -io.k8s.api.core.v1.ServiceSpec¯g"FServiceSpec describes the attributes that a user creates on a service.² -objectÊØf -× -loadBalancerClassÁ"³loadBalancerClass is the class of the load balancer implementation this Service belongs to. If specified, the value of this field must be a label-style identifier, with an optional prefix, e.g. "internal-vip" or "example.com/internal-vip". Unprefixed names are reserved for end-users. This field can only be set when the Service type is 'LoadBalancer'. If not set, the default load balancer implementation is used, today this is typically done through the cloud provider integration, but should apply for any default implementation. If set, it is assumed that a load balancer implementation is watching for Services with a matching class. Any default load balancer implementation (e.g. cloud providers) should ignore Services that set this field. This field can only be set when creating or updating a Service to type 'LoadBalancer'. Once set, it can not be changed. This field will be wiped when a service is updated to a non 'LoadBalancer' type.² -string -• -ports‹"ŸThe list of ports that are exposed by this service. More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies² -arrayº0 -. -,#/definitions/io.k8s.api.core.v1.ServicePortú' -x-kubernetes-patch-strategymerge -ú2 -x-kubernetes-list-map-keys- port -- protocol -ú -x-kubernetes-list-typemap -ú' -x-kubernetes-patch-merge-keyport - -« -selectorž"€Route service traffic to pods with label keys and values matching this selector. If empty or not present, the service is assumed to have an external process managing its endpoints, which Kubernetes will not modify. Only applies to types ClusterIP, NodePort, and LoadBalancer. Ignored if type is ExternalName. More info: https://kubernetes.io/docs/concepts/services-networking/service/ª - ² -string² -object -á - clusterIPÓ"ÅclusterIP is the IP address of the service and is usually assigned randomly. If an address is specified manually, is in-range (as per system configuration), and is not in use, it will be allocated to the service; otherwise creation of the service will fail. This field may not be changed through updates unless the type field is also being changed to ExternalName (which requires this field to be blank) or the type field is being changed from ExternalName (in which case this field may optionally be specified, as describe above). Valid values are "None", empty string (""), or a valid IP address. Setting this to "None" makes a "headless service" (no virtual IP), which is useful when direct endpoint connections are preferred and proxying is not required. Only applies to types ClusterIP, NodePort, and LoadBalancer. If this field is specified when creating a Service of type ExternalName, creation will fail. This field will be wiped when updating a Service to type ExternalName. More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies² -string -Á -externalTrafficPolicy§"™externalTrafficPolicy denotes if this Service desires to route external traffic to node-local or cluster-wide endpoints. "Local" preserves the client source IP and avoids a second hop for LoadBalancer and Nodeport type services, but risks potentially imbalanced traffic spreading. "Cluster" obscures the client source IP and may cause a second hop to another node, but should have good overall load-spreading.² -string -• -healthCheckNodePortýint32"çhealthCheckNodePort specifies the healthcheck nodePort for the service. This only applies when type is set to LoadBalancer and externalTrafficPolicy is set to Local. If a value is specified, is in-range, and is not in use, it will be used. If not specified, a value will be automatically allocated. External systems (e.g. load-balancers) can use this port to determine if a given node holds endpoints for this service or not. If this field is specified when creating a Service which does not need it, creation will fail. This field will be wiped when updating a Service to no longer need it (e.g. changing type).² -integer -ñ - -ipFamiliesâ"ŸIPFamilies is a list of IP families (e.g. IPv4, IPv6) assigned to this service, and is gated by the "IPv6DualStack" feature gate. This field is usually assigned automatically based on cluster configuration and the ipFamilyPolicy field. If this field is specified manually, the requested family is available in the cluster, and ipFamilyPolicy allows it, it will be used; otherwise creation of the service will fail. This field is conditionally mutable: it allows for adding or removing a secondary IP family, but it does not allow changing the primary IP family of the Service. Valid values are "IPv4" and "IPv6". This field only applies to Services of types ClusterIP, NodePort, and LoadBalancer, and does apply to "headless" services. This field will be wiped when updating a Service to type ExternalName. - -This field may hold a maximum of two entries (dual-stack families, in either order). These families must correspond to the values of the clusterIPs field, if specified. Both clusterIPs and ipFamilies are governed by the ipFamilyPolicy field.² -arrayº - ² -stringú# -x-kubernetes-list-type atomic - -Õ - topologyKeysÄ"§topologyKeys is a preference-order list of topology keys which implementations of services should use to preferentially sort endpoints when accessing this Service, it can not be used at the same time as externalTrafficPolicy=Local. Topology keys must be valid label keys and at most 16 keys may be specified. Endpoints are chosen based on the first topology key with available backends. If this field is specified and all entries have no backends that match the topology of the client, the service has no backends for that client and connections should fail. The special value "*" may be used to mean "any topology". This catch-all value, if used, only makes sense as the last value in the list. If this is not specified or empty, no topology constraints will be applied. This field is alpha-level and is only honored by servers that enable the ServiceTopology feature. This field is deprecated and will be removed in a future version.² -arrayº - ² -string -‰ -allocateLoadBalancerNodePortsç"ØallocateLoadBalancerNodePorts defines if NodePorts will be automatically allocated for services with type LoadBalancer. Default is "true". It may be set to "false" if the cluster load-balancer does not rely on NodePorts. allocateLoadBalancerNodePorts may only be set for services with type LoadBalancer and will be cleared if the type is changed to any other type. This field is alpha-level and is only honored by servers that enable the ServiceLBNodePortControl feature.² -boolean -¸ - externalName§"™externalName is the external reference that discovery mechanisms will return as an alias for this service (e.g. a DNS CNAME record). No proxying will be involved. Must be a lowercase RFC-1123 hostname (https://tools.ietf.org/html/rfc1123) and requires `type` to be "ExternalName".² -string -ã -loadBalancerIPÐ"ÂOnly applies to Service Type: LoadBalancer LoadBalancer will get created with the IP specified in this field. This feature depends on whether the underlying cloud-provider supports specifying the loadBalancerIP when a load balancer is created. This field will be ignored if the cloud-provider does not support the feature.² -string -š -sessionAffinityConfig€ -6#/definitions/io.k8s.api.core.v1.SessionAffinityConfig"FsessionAffinityConfig contains the configurations of session affinity. -ò - externalIPsâ"ÅexternalIPs is a list of IP addresses for which nodes in the cluster will also accept traffic for this service. These IPs are not managed by Kubernetes. The user is responsible for ensuring that traffic arrives at a node with this IP. A common example is external load-balancers that are not part of the Kubernetes system.² -arrayº - ² -string -ö -internalTrafficPolicyÜ"ÎInternalTrafficPolicy specifies if the cluster internal traffic should be routed to all endpoints or node-local endpoints only. "Cluster" routes internal traffic to a Service to all endpoints. "Local" routes traffic to node-local endpoints only, traffic is dropped if no node-local endpoints are ready. The default value is "Cluster".² -string -¥ -sessionAffinity‘"ƒSupports "ClientIP" and "None". Used to maintain session affinity. Enable client IP based session affinity. Must be ClientIP or None. Defaults to None. More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies² -string -‹ -type‚"ôtype determines how the Service is exposed. Defaults to ClusterIP. Valid options are ExternalName, ClusterIP, NodePort, and LoadBalancer. "ClusterIP" allocates a cluster-internal IP address for load-balancing to endpoints. Endpoints are determined by the selector or if that is not specified, by manual construction of an Endpoints object or EndpointSlice objects. If clusterIP is "None", no virtual IP is allocated and the endpoints are published as a set of endpoints rather than a virtual IP. "NodePort" builds on ClusterIP and allocates a port on every node which routes to the same endpoints as the clusterIP. "LoadBalancer" builds on NodePort and creates an external load-balancer (if supported in the current cloud) which routes to the same endpoints as the clusterIP. "ExternalName" aliases this service to the specified externalName. Several other fields do not apply to ExternalName services. More info: https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-types² -string -ë - -clusterIPsÜ "™ ClusterIPs is a list of IP addresses assigned to this service, and are usually assigned randomly. If an address is specified manually, is in-range (as per system configuration), and is not in use, it will be allocated to the service; otherwise creation of the service will fail. This field may not be changed through updates unless the type field is also being changed to ExternalName (which requires this field to be empty) or the type field is being changed from ExternalName (in which case this field may optionally be specified, as describe above). Valid values are "None", empty string (""), or a valid IP address. Setting this to "None" makes a "headless service" (no virtual IP), which is useful when direct endpoint connections are preferred and proxying is not required. Only applies to types ClusterIP, NodePort, and LoadBalancer. If this field is specified when creating a Service of type ExternalName, creation will fail. This field will be wiped when updating a Service to type ExternalName. If this field is not specified, it will be initialized from the clusterIP field. If this field is specified, clients must ensure that clusterIPs[0] and clusterIP have the same value. - -Unless the "IPv6DualStack" feature gate is enabled, this field is limited to one value, which must be the same as the clusterIP field. If the feature gate is enabled, this field may hold a maximum of two entries (dual-stack IPs, in either order). These IPs must correspond to the values of the ipFamilies field. Both clusterIPs and ipFamilies are governed by the ipFamilyPolicy field. More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies² -arrayº - ² -stringú# -x-kubernetes-list-type atomic - -ˆ -ipFamilyPolicyõ"çIPFamilyPolicy represents the dual-stack-ness requested or required by this Service, and is gated by the "IPv6DualStack" feature gate. If there is no value provided, then this field will be set to SingleStack. Services can be "SingleStack" (a single IP family), "PreferDualStack" (two IP families on dual-stack configured clusters or a single IP family on single-stack clusters), or "RequireDualStack" (two IP families on dual-stack configured clusters, otherwise fail). The ipFamilies and clusterIPs fields depend on the value of this field. This field will be wiped when updating a service to type ExternalName.² -string -“ -loadBalancerSourceRangesö"ÙIf specified and supported by the platform, this will restrict traffic through the cloud-provider load-balancer will be restricted to the specified client IPs. This field will be ignored if the cloud-provider does not support the feature." More info: https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/² -arrayº - ² -string -¬ -publishNotReadyAddresses"€publishNotReadyAddresses indicates that any agent which deals with endpoints for this Service should disregard any indications of ready/not-ready. The primary use case for setting this field is for a StatefulSet's Headless Service to propagate SRV DNS records for its Pods for the purpose of peer discovery. The Kubernetes controllers that generate Endpoints and EndpointSlice resources for Services interpret this to mean that all endpoints are considered "ready" even if the Pods themselves are not. Agents which consume only Kubernetes generated endpoints through the Endpoints or EndpointSlice resources can safely assume this behavior.² -boolean -æ -'io.k8s.api.policy.v1beta1.HostPortRangeº"HostPortRange defines a range of host ports that will be enabled by a policy for pods to use. It requires both the start and end to be defined.šminšmax² -objectÊŒ -C -max<int32"'max is the end of the range, inclusive.² -integer -E -min>int32")min is the start of the range, inclusive.² -integer -ê? -Hio.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1.JSONSchemaProps?"[JSONSchemaProps is a JSON-Schema following Specification Draft 4 (http://json-schema.org/).² -objectʱ> -v -additionalProperties^ -\#/definitions/io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1.JSONSchemaPropsOrBool -p -anyOfg² -arrayºZ -X -V#/definitions/io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1.JSONSchemaProps -v - -propertieshªZ -X -V#/definitions/io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1.JSONSchemaProps² -object -Þ -x-kubernetes-list-typeÃ"µx-kubernetes-list-type annotates an array to further describe its topology. This extension must only be used on lists and may have 3 possible values: - -1) `atomic`: the list is treated as a single entity, like a scalar. - Atomic lists will be entirely replaced when updated. This extension - may be used on any type of list (struct, scalar, ...). -2) `set`: - Sets are lists that must not have multiple items with the same value. Each - value must be a scalar, an object with x-kubernetes-map-type `atomic` or an - array with x-kubernetes-list-type `atomic`. -3) `map`: - These lists are like maps in that their elements have a non-index key - used to identify them. Order is preserved upon merge. The map tag - must only be used on a list with elements of type object. -Defaults to atomic for arrays.² -string - - uniqueItems ² -boolean - -default‘ -K#/definitions/io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1.JSON"Ádefault is a default value for undefined object fields. Defaulting is a beta feature under the CustomResourceDefaulting feature gate. Defaulting requires spec.preserveUnknownFields to be false. -d -enum\² -arrayºO -M -K#/definitions/io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1.JSON - -maximumdouble² -number - -minItemsint64² -integer -& -required² -arrayº - ² -string -} -patternPropertieshªZ -X -V#/definitions/io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1.JSONSchemaProps² -object -Ó -x-kubernetes-embedded-resource°"¡x-kubernetes-embedded-resource defines that the value is an embedded Kubernetes runtime.Object, with TypeMeta and ObjectMeta. The type must be object. It is allowed to further restrict the embedded object. kind, apiVersion and metadata are validated automatically. x-kubernetes-preserve-unknown-fields is allowed to be true, but does not have to be if the object is fully specified (up to kind, apiVersion, metadata).² -boolean -‡ -x-kubernetes-int-or-stringè"Ùx-kubernetes-int-or-string specifies that this value is either an integer or a string. If this is true, an empty type is allowed and type as child of anyOf is permitted if following one of the following patterns: - -1) anyOf: - - type: integer - - type: string -2) allOf: - - anyOf: - - type: integer - - type: string - - ... zero or more² -boolean - -exclusiveMaximum ² -boolean -n - externalDocs^ -\#/definitions/io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1.ExternalDocumentation - -id ² -string - - maxLengthint64² -integer -$ - minPropertiesint64² -integer -  -x-kubernetes-map-type†"øx-kubernetes-map-type annotates an object to further describe its topology. This extension must only be used when type is object and may have 2 possible values: - -1) `granular`: - These maps are actual maps (key-value pairs) and each fields are independent - from each other (they can each be manipulated by separate actors). This is - the default behaviour for all maps. -2) `atomic`: the list is treated as a single entity, like a scalar. - Atomic maps will be entirely replaced when updated.² -string - -$schema ² -string -X -exampleM -K#/definitions/io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1.JSON - -exclusiveMinimum ² -boolean -! - -multipleOfdouble² -number -_ -notX -V#/definitions/io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1.JSONSchemaProps - -$ref ² -string - -minimumdouble² -number - -nullable ² -boolean -p -oneOfg² -arrayºZ -X -V#/definitions/io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1.JSONSchemaProps -« -x-kubernetes-list-map-keysŒ"ïx-kubernetes-list-map-keys annotates an array with the x-kubernetes-list-type `map` by specifying the keys used as the index of the map. - -This tag MUST only be used on lists that have the "x-kubernetes-list-type" extension set to "map". Also, the values specified for this attribute must be a scalar typed field of the child structure (no nesting is supported). - -The properties specified must either be required or have a default value, to ensure those properties are present for all list items.² -arrayº - ² -string -p -allOfg² -arrayºZ -X -V#/definitions/io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1.JSONSchemaProps -h -items_ -]#/definitions/io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1.JSONSchemaPropsOrArray - -maxItemsint64² -integer - - minLengthint64² -integer - -type ² -string -$ - maxPropertiesint64² -integer - -pattern ² -string - -title ² -string -q -additionalItems^ -\#/definitions/io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1.JSONSchemaPropsOrBool -w - definitionshªZ -X -V#/definitions/io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1.JSONSchemaProps² -object -… - dependenciesuªg -e -c#/definitions/io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1.JSONSchemaPropsOrStringArray² -object - - description ² -string -¶ -format«"format is an OpenAPI v3 format string. Unknown formats are ignored. The following formats are validated: - -- bsonobjectid: a bson object ID, i.e. a 24 characters hex string - uri: an URI as parsed by Golang net/url.ParseRequestURI - email: an email address as parsed by Golang net/mail.ParseAddress - hostname: a valid representation for an Internet host name, as defined by RFC 1034, section 3.1 [RFC1034]. - ipv4: an IPv4 IP as parsed by Golang net.ParseIP - ipv6: an IPv6 IP as parsed by Golang net.ParseIP - cidr: a CIDR as parsed by Golang net.ParseCIDR - mac: a MAC address as parsed by Golang net.ParseMAC - uuid: an UUID that allows uppercase defined by the regex (?i)^[0-9a-f]{8}-?[0-9a-f]{4}-?[0-9a-f]{4}-?[0-9a-f]{4}-?[0-9a-f]{12}$ - uuid3: an UUID3 that allows uppercase defined by the regex (?i)^[0-9a-f]{8}-?[0-9a-f]{4}-?3[0-9a-f]{3}-?[0-9a-f]{4}-?[0-9a-f]{12}$ - uuid4: an UUID4 that allows uppercase defined by the regex (?i)^[0-9a-f]{8}-?[0-9a-f]{4}-?4[0-9a-f]{3}-?[89ab][0-9a-f]{3}-?[0-9a-f]{12}$ - uuid5: an UUID5 that allows uppercase defined by the regex (?i)^[0-9a-f]{8}-?[0-9a-f]{4}-?5[0-9a-f]{3}-?[89ab][0-9a-f]{3}-?[0-9a-f]{12}$ - isbn: an ISBN10 or ISBN13 number string like "0321751043" or "978-0321751041" - isbn10: an ISBN10 number string like "0321751043" - isbn13: an ISBN13 number string like "978-0321751041" - creditcard: a credit card number defined by the regex ^(?:4[0-9]{12}(?:[0-9]{3})?|5[1-5][0-9]{14}|6(?:011|5[0-9][0-9])[0-9]{12}|3[47][0-9]{13}|3(?:0[0-5]|[68][0-9])[0-9]{11}|(?:2131|1800|35\d{3})\d{11})$ with any non digit characters mixed in - ssn: a U.S. social security number following the regex ^\d{3}[- ]?\d{2}[- ]?\d{4}$ - hexcolor: an hexadecimal color code like "#FFFFFF: following the regex ^#?([0-9a-fA-F]{3}|[0-9a-fA-F]{6})$ - rgbcolor: an RGB color code like rgb like "rgb(255,255,2559" - byte: base64 encoded binary data - password: any kind of string - date: a date string like "2006-01-02" as defined by full-date in RFC3339 - duration: a duration string like "22 ns" as parsed by Golang time.ParseDuration or compatible with Scala duration format - datetime: a date time string like "2014-12-15T19:30:20.000Z" as defined by date-time in RFC3339.² -string -˜ -$x-kubernetes-preserve-unknown-fieldsï"àx-kubernetes-preserve-unknown-fields stops the API server decoding step from pruning fields which are not specified in the validation schema. This affects fields recursively, but switches back to normal pruning behaviour if nested properties or additionalProperties are specified in the schema. This can either be true or undefined. False is forbidden.² -boolean -— -)io.k8s.api.apps.v1.RollingUpdateDaemonSeté"BSpec to control the desired behavior of daemon set rolling update.² -objectÊ– -× - -maxSurgeÊ - -=#/definitions/io.k8s.apimachinery.pkg.util.intstr.IntOrString"ˆ -The maximum number of nodes with an existing available DaemonSet pod that can have an updated DaemonSet pod during during an update. Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). This can not be 0 if MaxUnavailable is 0. Absolute number is calculated from percentage by rounding up to a minimum of 1. Default value is 0. Example: when this is set to 30%, at most 30% of the total number of nodes that should be running the daemon pod (i.e. status.desiredNumberScheduled) can have their a new pod created before the old pod is marked as deleted. The update starts by launching new pods on 30% of nodes. Once an updated pod is available (Ready for at least minReadySeconds) the old DaemonSet pod on that node is marked deleted. If the old pod becomes unavailable for any reason (Ready transitions to false, is evicted, or is drained) an updated pod is immediatedly created on that node without considering surge limits. Allowing surge implies the possibility that the resources consumed by the daemonset on any given node can double if the readiness check fails, and so resource intensive daemonsets should take into account that they may cause evictions during disruption. This is an alpha field and requires enabling DaemonSetUpdateSurge feature gate. -¹ -maxUnavailable¦ -=#/definitions/io.k8s.apimachinery.pkg.util.intstr.IntOrString"äThe maximum number of DaemonSet pods that can be unavailable during the update. Value can be an absolute number (ex: 5) or a percentage of total number of DaemonSet pods at the start of the update (ex: 10%). Absolute number is calculated from percentage by rounding down to a minimum of one. This cannot be 0 if MaxSurge is 0 Default value is 1. Example: when this is set to 30%, at most 30% of the total number of nodes that should be running the daemon pod (i.e. status.desiredNumberScheduled) can have their pods stopped for an update at any given time. The update starts by stopping at most 30% of those DaemonSet pods and then brings up new DaemonSet pods in their place. Once the new pods are available, it then proceeds onto other DaemonSet pods, thus ensuring that at least 70% of original number of DaemonSet pods are available at all times during the update. -ä - io.k8s.api.core.v1.HTTPGetAction¿"=HTTPGetAction describes an action based on HTTP Get requests.šport² -objectÊê -‘ - httpHeaders"CCustom headers to set in the request. HTTP allows repeated headers.² -arrayº/ -- -+#/definitions/io.k8s.api.core.v1.HTTPHeader -7 -path/""Path to access on the HTTP server.² -string -Ç -port¾ -=#/definitions/io.k8s.apimachinery.pkg.util.intstr.IntOrString"}Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. -R -schemeH";Scheme to use for connecting to the host. Defaults to HTTP.² -string -} -hostu"hHost name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead.² -string -û -io.k8s.api.core.v1.NodeAddressØ"8NodeAddress contains information for the node's address.štypešaddress² -objectÊ -) -address"The node address.² -string -R -typeJ"=Node address type, one of Hostname, ExternalIP or InternalIP.² -string -¦ -io.k8s.api.events.v1beta1.Event‚"¾Event is a report of an event somewhere in the cluster. It generally denotes some state change in the system. Events have a limited retention time and triggers and messages may evolve with time. Event consumers should not rely on the timing of an event with a given Reason reflecting a consistent underlying trigger, or the continued existence of events with that Reason. Events should be treated as informative, best-effort, supplemental data.š eventTime² -objectÊÆ -Ñ -relatedÅ -0#/definitions/io.k8s.api.core.v1.ObjectReference"related is the optional secondary object for more complex actions. E.g. when regarding object triggers a creation or deletion of related object. -¦ -action›"action is what action was taken/failed regarding to the regarding object. It is machine-readable. This field can have at most 128 characters.² -string -Á -deprecatedFirstTimestamp¤ -7#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Time"ideprecatedFirstTimestamp is the deprecated field assuring backward compatibility with core.v1 Event type. -¹ -kind°"¢Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds² -string -| -reasonr"ereason is why the action was taken. It is human-readable. This field can have at most 128 characters.² -string -¹ -reportingController¡"“reportingController is the name of the controller that emitted this Event, e.g. `kubernetes.io/kubelet`. This field cannot be empty for new Events.² -string -Ÿ -series” -3#/definitions/io.k8s.api.events.v1beta1.EventSeries"]series is data about the Event series this event represents or nil if it's a singleton Event. -¿ -deprecatedLastTimestamp£ -7#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Time"hdeprecatedLastTimestamp is the deprecated field assuring backward compatibility with core.v1 Event type. -¦ -deprecatedSource‘ -,#/definitions/io.k8s.api.core.v1.EventSource"adeprecatedSource is the deprecated field assuring backward compatibility with core.v1 Event type. -— - eventTime‰ -<#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.MicroTime"IeventTime is the time when this Event was first observed. It is required. -½ -note´"¦note is a human-readable description of the status of this operation. Maximal length of the note is 1kB, but libraries should be prepared to handle values up to 64kB.² -string - -reportingInstance¬"žreportingInstance is the ID of the controller instance, e.g. `kubelet-xyzf`. This field cannot be empty for new Events and it can have at most 128 characters.² -string -† -type~"qtype is the type of this event (Normal, Warning), new types could be added in the future. It is machine-readable.² -string -¾ - -apiVersion¯"¡APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources² -string -ˆ -deprecatedCountuint32"`deprecatedCount is the deprecated field assuring backward compatibility with core.v1 Event type.² -integer -Ò -metadataÅ -=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta"ƒStandard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata -¶ - regarding¨ -0#/definitions/io.k8s.api.core.v1.ObjectReference"óregarding contains the object this Event is about. In most cases it's an Object reporting controller implements, e.g. ReplicaSetController implements ReplicaSets and this event is emitted because it acts on some changes in a ReplicaSet object.ú] -x-kubernetes-group-version-kind:8- group: events.k8s.io - kind: Event - version: v1beta1 - -° -io.k8s.api.node.v1.Overhead"ROverhead structure represents the resource overhead associated with running a pod.² -objectÊ­ -ª -podFixed"NPodFixed represents the fixed resource overhead associated with running a pod.ª? -= -;#/definitions/io.k8s.apimachinery.pkg.api.resource.Quantity² -object -… -!io.k8s.api.policy.v1beta1.IDRangeß"6IDRange provides a min/max of an allowed range of IDs.šminšmax² -objectÊŒ -C -max<int64"'max is the end of the range, inclusive.² -integer -E -min>int64")min is the start of the range, inclusive.² -integer -£ -Zio.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1.CustomResourceDefinitionConditionÄ"YCustomResourceDefinitionCondition contains details for the current condition of this pod.štypešstatus² -objectÊÊ -Z -statusP"Cstatus is the status of the condition. Can be True, False, Unknown.² -string -q -typei"\type is the type of the condition. Types include Established, NamesAccepted and Terminating.² -string -¥ -lastTransitionTimeŽ -7#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Time"SlastTransitionTime last time the condition transitioned from one status to another. -e -messageZ"Mmessage is a human-readable message indicating details about last transition.² -string -j -reason`"Sreason is a unique, one-word, CamelCase reason for the condition's last transition.² -string -…" -Zio.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1beta1.CustomResourceDefinitionSpec¦!"PCustomResourceDefinitionSpec describes how a user wants their resource to appearšgroupšnamesšscope² -objectÊ­ -à - subresources² -f#/definitions/io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1beta1.CustomResourceSubresources"Çsubresources specify what subresources the defined custom resource has. If present, this field configures subresources for all versions. Top-level and per-version subresources are mutually exclusive. -ª - -conversion› -d#/definitions/io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1beta1.CustomResourceConversion"3conversion defines conversion settings for the CRD. -¹ -names¯ -i#/definitions/io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1beta1.CustomResourceDefinitionNames"Bnames specify the resource and kind names for the custom resource. -Ñ -preserveUnknownFields·"¨preserveUnknownFields indicates that object fields which are not specified in the OpenAPI schema should be preserved when persisting to storage. apiVersion, kind, metadata and known fields inside metadata are always preserved. If false, schemas must be defined for all versions. Defaults to true in v1beta for backwards compatibility. Deprecated: will be required to be false in v1. Preservation of unknown fields can be specified in the validation schema using the `x-kubernetes-preserve-unknown-fields: true` extension. See https://kubernetes.io/docs/tasks/access-kubernetes-api/custom-resources/custom-resource-definitions/#pruning-versus-preserving-unknown-fields for details.² -boolean -Î - -validation¿ -d#/definitions/io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1beta1.CustomResourceValidation"Övalidation describes the schema used for validation and pruning of the custom resource. If present, this validation schema is used to validate all versions. Top-level and per-version schemas are mutually exclusive. -Ï -versionÃ"µversion is the API version of the defined custom resource. The custom resources are served under `/apis///...`. Must match the name of the first item in the `versions` list if `version` and `versions` are both specified. Optional if `versions` is specified. Deprecated: use `versions` instead.² -string - -versions"‘versions is the list of all API versions of the defined custom resource. Optional if `version` is specified. The name of the first item in the `versions` list must match the `version` field if `version` and `versions` are both specified. Version names are used to compute the order in which served versions are listed in API discovery. If the version string is "kube-like", it will sort above non "kube-like" version strings, which are ordered lexicographically. "Kube-like" versions start with a "v", then are followed by a number (the major version), then optionally the string "alpha" or "beta" and another number (the minor version). These are sorted first by GA > beta > alpha (where GA is a version with no suffix such as beta or alpha), and then by comparing major version, then minor version. An example sorted list of versions: v10, v2, v1, v11beta2, v10beta3, v3beta1, v12alpha1, v11alpha2, foo1, foo10.² -arrayºo -m -k#/definitions/io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1beta1.CustomResourceDefinitionVersion -Å -additionalPrinterColumns¨"ªadditionalPrinterColumns specifies additional columns returned in Table output. See https://kubernetes.io/docs/reference/using-api/api-concepts/#receiving-resources-as-tables for details. If present, this field configures columns for all versions. Top-level and per-version columns are mutually exclusive. If no top-level or per-version columns are specified, a single column displaying the age of the custom resource is used.² -arrayºn -l -j#/definitions/io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1beta1.CustomResourceColumnDefinition -å -groupÛ"Ígroup is the API group of the defined custom resource. The custom resources are served under `/apis//...`. Must match the name of the CustomResourceDefinition (in the form `.`).² -string -´ -scopeª"œscope indicates whether the defined custom resource is cluster- or namespace-scoped. Allowed values are `Cluster` and `Namespaced`. Default is `Namespaced`.² -string -ð -(io.k8s.api.core.v1.ContainerStateRunningÃ"8ContainerStateRunning is a running state of a container.² -objectÊ{ -y - startedAtl -7#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Time"1Time at which the container was last (re-)started -ž -(io.k8s.api.core.v1.ProjectedVolumeSourceñ"$Represents a projected volume source² -objectʼ -Î - defaultMode¾int32"¨Mode bits used to set permissions on created files by default. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.² -integer -i -sources^"list of volume projections² -arrayº5 -3 -1#/definitions/io.k8s.api.core.v1.VolumeProjection -ø -"io.k8s.api.core.v1.RBDVolumeSourceÑ "ˆRepresents a Rados Block Device mount that lasts the lifetime of a pod. RBD volumes support ownership management and SELinux relabeling.šmonitorsšimage² -objectʤ -« -keyringŸ"‘Keyring is the path to key ring for RBDUser. Default is /etc/ceph/keyring. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it² -string -Ž -monitors"eA collection of Ceph monitors. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it² -arrayº - ² -string -€ -poolx"kThe rados pool name. Default is rbd. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it² -string -´ -readOnly§"˜ReadOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it² -boolean -ù - secretRefë -5#/definitions/io.k8s.api.core.v1.LocalObjectReference"±SecretRef is name of the authentication secret for RBDUser. If provided overrides keyring. Default is nil. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it -‚ -userz"mThe rados user name. Default is admin. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it² -string -³ -fsType¨"šFilesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#rbd² -string -r -imagei"\The rados image name. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it² -string -ì -#io.k8s.api.core.v1.NodeConfigStatusÄ"WNodeConfigStatus describes the status of the config assigned by Node.Spec.ConfigSource.² -objectÊÜ -  -error–"ˆError describes any problems reconciling the Spec.ConfigSource to the Active config. Errors may occur, for example, attempting to checkpoint Spec.ConfigSource to the local Assigned record, attempting to checkpoint the payload associated with Spec.ConfigSource, attempting to load or validate the Assigned config, etc. Errors may occur at different points while syncing config. Earlier errors (e.g. download or checkpointing errors) will not result in a rollback to LastKnownGood, and may resolve across Kubelet retries. Later errors (e.g. loading or validating a checkpointed config) will result in a rollback to LastKnownGood. In the latter case, it is usually possible to resolve the error by fixing the config assigned in Spec.ConfigSource. You can find additional information for debugging by searching the error message in the Kubelet log. Error is a human-readable description of the error state; machines can check whether or not Error is empty, but should not rely on the stability of the Error text across Kubelet versions.² -string -ö - lastKnownGoodä -1#/definitions/io.k8s.api.core.v1.NodeConfigSource"®LastKnownGood reports the checkpointed config the node will fall back to when it encounters an error attempting to use the Assigned config. The Assigned config becomes the LastKnownGood config when the node determines that the Assigned config is stable and correct. This is currently implemented as a 10-minute soak period starting when the local record of Assigned config is updated. If the Assigned config is Active at the end of this period, it becomes the LastKnownGood. Note that if Spec.ConfigSource is reset to nil (use local defaults), the LastKnownGood is also immediately reset to nil, because the local default config is always assumed good. You should not make assumptions about the node's method of determining config stability and correctness, as this may change or become configurable in the future. -Á -active¶ -1#/definitions/io.k8s.api.core.v1.NodeConfigSource"€Active reports the checkpointed config the node is actively using. Active will represent either the current version of the Assigned config, or the current LastKnownGood config, depending on whether attempting to use the Assigned config results in an error. -ù -assignedì -1#/definitions/io.k8s.api.core.v1.NodeConfigSource"¶Assigned reports the checkpointed config the node will try to use. When Node.Spec.ConfigSource is updated, the node checkpoints the associated config payload to local disk, along with a record indicating intended config. The node refers to this record to choose its config checkpoint, and reports this record in Assigned. Assigned only updates in the status after the record has been checkpointed to disk. When the Kubelet is restarted, it tries to make the Assigned config the Active config by loading and validating the checkpointed payload identified by Assigned. -¢ - io.k8s.api.core.v1.ServiceStatusý"9ServiceStatus represents the current status of a service.² -objectʳ -” - -conditions…"Current service state² -arrayº@ -> -<#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Conditionú -x-kubernetes-list-typemap -ú' -x-kubernetes-patch-merge-keytype -ú' -x-kubernetes-patch-strategymerge -ú' -x-kubernetes-list-map-keys - type - -™ - loadBalancerˆ -3#/definitions/io.k8s.api.core.v1.LoadBalancerStatus"QLoadBalancer contains the current status of the load-balancer, if one is present. -Á --io.k8s.api.authorization.v1beta1.ResourceRule"¬ResourceRule is the list of actions the subject is allowed to perform on resources. The list ordering isn't significant, may contain duplicates, and possibly be incomplete.šverbs² -objectÊÉ -ä - resourcesÖ"¹Resources is a list of resources this rule applies to. "*" means all in the specified apiGroups. - "*/foo" represents the subresource 'foo' for all resources in the specified apiGroups.² -arrayº - ² -string - -verbs“"wVerb is a list of kubernetes resource API verbs, like: get, list, watch, create, update, delete, proxy. "*" means all.² -arrayº - ² -string -ƒ - apiGroupsõ"ØAPIGroups is the name of the APIGroup that contains the resources. If multiple API groups are specified, any action requested against one of the enumerated resources in any API group will be allowed. "*" means all.² -arrayº - ² -string -¹ - resourceNames§"ŠResourceNames is an optional white list of names that the rule applies to. An empty set means that everything is allowed. "*" means all.² -arrayº - ² -string -ä -"io.k8s.api.core.v1.CSIVolumeSource½ "TRepresents a source location of a volume to mount, managed by an external CSI driveršdriver² -objectÊÏ -¢ -driver—"‰Driver is the name of the CSI driver that handles this volume. Consult with your admin for the correct name as registered in the cluster.² -string -Ë -fsTypeÀ"²Filesystem type to mount. Ex. "ext4", "xfs", "ntfs". If not provided, the empty value is passed to the associated CSI driver which will determine the default filesystem to apply.² -string -¥ -nodePublishSecretRefŒ -5#/definitions/io.k8s.api.core.v1.LocalObjectReference"ÒNodePublishSecretRef is a reference to the secret object containing sensitive information to pass to the CSI driver to complete the CSI NodePublishVolume and NodeUnpublishVolume calls. This field is optional, and may be empty if no secret is required. If the secret object contains more than one secret, all secret references are passed. -m -readOnlya"SSpecifies a read-only configuration for the volume. Defaults to false (read/write).² -boolean - -volumeAttributes­"VolumeAttributes stores driver-specific properties that are passed to the CSI driver. Consult your driver's documentation for supported values.ª - ² -string² -object -ƒ -!io.k8s.api.core.v1.EndpointSubsetÝ"³EndpointSubset is a group of addresses with a common set of ports. The expanded set of endpoints is the Cartesian product of Addresses x Ports. For example, given: - { - Addresses: [{"ip": "10.10.1.1"}, {"ip": "10.10.2.2"}], - Ports: [{"name": "a", "port": 8675}, {"name": "b", "port": 309}] - } -The resulting set of endpoints can be viewed as: - a: [ 10.10.1.1:8675, 10.10.2.2:8675 ], - b: [ 10.10.1.1:309, 10.10.2.2:309 ]² -objectʘ -ë - addressesÝ"™IP addresses which offer the related ports that are marked as ready. These endpoints should be considered safe for load balancers and clients to utilize.² -arrayº4 -2 -0#/definitions/io.k8s.api.core.v1.EndpointAddress -© -notReadyAddresses“"ÏIP addresses which offer the related ports but are not currently marked as ready because they have not yet finished starting, have recently failed a readiness check, or have recently failed a liveness check.² -arrayº4 -2 -0#/definitions/io.k8s.api.core.v1.EndpointAddress -| -portss"3Port numbers available on the related IP addresses.² -arrayº1 -/ --#/definitions/io.k8s.api.core.v1.EndpointPort -Œ -%io.k8s.api.rbac.v1.ClusterRoleBindingâ -"ŸClusterRoleBinding references a ClusterRole, but not contain it. It can reference a ClusterRole in the global namespace, and adds who information via Subject.šroleRef² -objectʳ -¾ - -apiVersion¯"¡APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources² -string -¹ -kind°"¢Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds² -string -h -metadata\ -=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta"Standard object's metadata. - -roleRef¶ -(#/definitions/io.k8s.api.rbac.v1.RoleRef"‰RoleRef can only reference a ClusterRole in the global namespace. If the RoleRef cannot be resolved, the Authorizer must return an error. -„ -subjectsx"=Subjects holds references to the objects the role applies to.² -arrayº, -* -(#/definitions/io.k8s.api.rbac.v1.Subjectúq -x-kubernetes-group-version-kindNL- version: v1 - group: rbac.authorization.k8s.io - kind: ClusterRoleBinding - -  -+io.k8s.api.storage.v1beta1.VolumeAttachmentð "—VolumeAttachment captures the intent to attach or detach the specified volume to/from the specified node. - -VolumeAttachment objects are non-namespaced.šspec² -objectÊÔ -¾ - -apiVersion¯"¡APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources² -string -¹ -kind°"¢Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds² -string -Ð -metadataà -=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta"Standard object metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata -© -spec  -=#/definitions/io.k8s.api.storage.v1beta1.VolumeAttachmentSpec"_Specification of the desired attach/detach volume behavior. Populated by the Kubernetes system. -Õ -statusÊ -?#/definitions/io.k8s.api.storage.v1beta1.VolumeAttachmentStatus"†Status of the VolumeAttachment request. Populated by the entity completing the attach or detach operation, i.e. the external-attacher.úi -x-kubernetes-group-version-kindFD- version: v1beta1 - group: storage.k8s.io - kind: VolumeAttachment - -§ -Uio.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1.JSONSchemaPropsOrStringArrayN"LJSONSchemaPropsOrStringArray represents a JSONSchemaProps or a string array. -Ù -%io.k8s.api.core.v1.LoadBalancerStatus¯"#/definitions/io.k8s.api.networking.v1.NetworkPolicyEgressRule -ã -ingress×"„List of ingress rules to be applied to the selected pods. Traffic is allowed to a pod if there are no NetworkPolicies selecting the pod (and cluster policy otherwise allows the traffic), OR if the traffic source is the pod's local node, OR if the traffic matches at least one ingress rule across all of the NetworkPolicy objects whose podSelector matches the pod. If this field is empty then this NetworkPolicy does not allow any traffic (and serves solely to ensure that the pods it selects are isolated by default)² -arrayºC -A -?#/definitions/io.k8s.api.networking.v1.NetworkPolicyIngressRule -Û - podSelectorË -@#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector"†Selects the pods to which this NetworkPolicy object applies. The array of ingress rules is applied to any pods selected by this field. Multiple network policies can select the same set of pods. In this case, the ingress rules for each are combined additively. This field is NOT optional and follows standard label selector semantics. An empty podSelector matches all pods in this namespace. -­ - policyTypes"€List of rule types that the NetworkPolicy relates to. Valid options are ["Ingress"], ["Egress"], or ["Ingress", "Egress"]. If this field is not specified, it will default based on the existence of Ingress or Egress rules; policies that contain an Egress section are assumed to affect Egress, and all policies (whether or not they contain an Ingress section) are assumed to affect Ingress. If you want to write an egress-only policy, you must explicitly specify policyTypes [ "Egress" ]. Likewise, if you want to write a policy that specifies that no egress is allowed, you must specify a policyTypes value that include "Egress" (since such a policy would not include an Egress section and would otherwise default to just [ "Ingress" ]). This field is beta-level in 1.8² -arrayº - ² -string -ò, -2io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptions»,":DeleteOptions may be provided when deleting an API object.² -objectÊ‹ -¾ - -apiVersion¯"¡APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources² -string -  -dryRun•"øWhen present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed² -arrayº - ² -string -Þ -gracePeriodSecondsÇint64"±The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.² -integer -¹ -kind°"¢Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds² -string -Ä -orphanDependents¯" Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the "orphan" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.² -boolean - - preconditions° -@#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Preconditions"lMust be fulfilled before a deletion is carried out. If not possible, a 409 Conflict status will be returned. -û -propagationPolicyå"×Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.² -stringúá -x-kubernetes-group-version-kind½º- kind: DeleteOptions - version: v1 - group: "" -- group: admission.k8s.io - kind: DeleteOptions - version: v1 -- group: admission.k8s.io - kind: DeleteOptions - version: v1beta1 -- kind: DeleteOptions - version: v1 - group: admissionregistration.k8s.io -- kind: DeleteOptions - version: v1beta1 - group: admissionregistration.k8s.io -- group: apiextensions.k8s.io - kind: DeleteOptions - version: v1 -- group: apiextensions.k8s.io - kind: DeleteOptions - version: v1beta1 -- group: apiregistration.k8s.io - kind: DeleteOptions - version: v1 -- group: apiregistration.k8s.io - kind: DeleteOptions - version: v1beta1 -- group: apps - kind: DeleteOptions - version: v1 -- group: apps - kind: DeleteOptions - version: v1beta1 -- group: apps - kind: DeleteOptions - version: v1beta2 -- group: authentication.k8s.io - kind: DeleteOptions - version: v1 -- group: authentication.k8s.io - kind: DeleteOptions - version: v1beta1 -- group: authorization.k8s.io - kind: DeleteOptions - version: v1 -- group: authorization.k8s.io - kind: DeleteOptions - version: v1beta1 -- kind: DeleteOptions - version: v1 - group: autoscaling -- group: autoscaling - kind: DeleteOptions - version: v2beta1 -- group: autoscaling - kind: DeleteOptions - version: v2beta2 -- version: v1 - group: batch - kind: DeleteOptions -- group: batch - kind: DeleteOptions - version: v1beta1 -- group: certificates.k8s.io - kind: DeleteOptions - version: v1 -- group: certificates.k8s.io - kind: DeleteOptions - version: v1beta1 -- group: coordination.k8s.io - kind: DeleteOptions - version: v1 -- group: coordination.k8s.io - kind: DeleteOptions - version: v1beta1 -- group: discovery.k8s.io - kind: DeleteOptions - version: v1 -- group: discovery.k8s.io - kind: DeleteOptions - version: v1beta1 -- group: events.k8s.io - kind: DeleteOptions - version: v1 -- group: events.k8s.io - kind: DeleteOptions - version: v1beta1 -- group: extensions - kind: DeleteOptions - version: v1beta1 -- group: flowcontrol.apiserver.k8s.io - kind: DeleteOptions - version: v1alpha1 -- group: flowcontrol.apiserver.k8s.io - kind: DeleteOptions - version: v1beta1 -- version: v1alpha1 - group: imagepolicy.k8s.io - kind: DeleteOptions -- group: internal.apiserver.k8s.io - kind: DeleteOptions - version: v1alpha1 -- version: v1 - group: networking.k8s.io - kind: DeleteOptions -- version: v1beta1 - group: networking.k8s.io - kind: DeleteOptions -- group: node.k8s.io - kind: DeleteOptions - version: v1 -- group: node.k8s.io - kind: DeleteOptions - version: v1alpha1 -- group: node.k8s.io - kind: DeleteOptions - version: v1beta1 -- group: policy - kind: DeleteOptions - version: v1 -- kind: DeleteOptions - version: v1beta1 - group: policy -- group: rbac.authorization.k8s.io - kind: DeleteOptions - version: v1 -- group: rbac.authorization.k8s.io - kind: DeleteOptions - version: v1alpha1 -- group: rbac.authorization.k8s.io - kind: DeleteOptions - version: v1beta1 -- group: scheduling.k8s.io - kind: DeleteOptions - version: v1 -- group: scheduling.k8s.io - kind: DeleteOptions - version: v1alpha1 -- group: scheduling.k8s.io - kind: DeleteOptions - version: v1beta1 -- kind: DeleteOptions - version: v1 - group: storage.k8s.io -- group: storage.k8s.io - kind: DeleteOptions - version: v1alpha1 -- group: storage.k8s.io - kind: DeleteOptions - version: v1beta1 - -È -&io.k8s.api.apps.v1.DeploymentCondition"KDeploymentCondition describes the state of a deployment at a certain point.štypešstatus² -objectʱ -2 -type*"Type of deployment condition.² -string -‘ -lastTransitionTime{ -7#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Time"@Last time the condition transitioned from one status to another. -v -lastUpdateTimed -7#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Time")The last time this condition was updated. -Y -messageN"AA human readable message indicating details about the transition.² -string -F -reason<"/The reason for the condition's last transition.² -string -L -statusB"5Status of the condition, one of True, False, Unknown.² -string -¾ --io.k8s.api.authentication.v1.TokenRequestSpecŒ "HTokenRequestSpec contains client provided parameters of a token request.š audiences² -objectʧ -• - audiences‡"êAudiences are the intendend audiences of the token. A recipient of a token must identitfy themself with an identifier in the list of audiences of the token, and otherwise should reject the token. A token issued for multiple audiences may be used to authenticate against any of the audiences listed but implies a high degree of trust between the target audiences.² -arrayº - ² -string -Ž -boundObjectRefû -?#/definitions/io.k8s.api.authentication.v1.BoundObjectReference"·BoundObjectRef is a reference to an object that the token will be bound to. The token will only be valid for as long as the bound object exists. NOTE: The API server's TokenReview endpoint will validate the BoundObjectRef, but other audiences may not. Keep ExpirationSeconds small if you want prompt revocation. -û -expirationSecondsåint64"ÏExpirationSeconds is the requested duration of validity of the request. The token issuer may return a token with a different validity duration so a client needs to check the 'expiration' field in a response.² -integer -Í -/io.k8s.api.autoscaling.v2beta2.HPAScalingPolicy™"WHPAScalingPolicy is a single policy which must hold true for a specified past interval.štypešvalueš periodSeconds² -objectÊ’ -Ê - periodSeconds¸int32"¢PeriodSeconds specifies the window of time for which the policy should hold true. PeriodSeconds must be greater than zero and less than or equal to 1800 (30 min).² -integer -@ -type8"+Type is used to specify the scaling policy.² -string -€ -valuewint32"bValue contains the amount of change which is permitted by the policy. It must be greater than zero² -integer -Ÿ -:io.k8s.api.autoscaling.v2beta1.HorizontalPodAutoscalerListà"GHorizontalPodAutoscaler is a list of horizontal pod autoscaler objects.šitems² -objectÊŒ -¹ -kind°"¢Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds² -string -r -metadataf -;#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta"'metadata is the standard list metadata. -¾ - -apiVersion¯"¡APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources² -string -˜ -itemsŽ"7items is the list of horizontal pod autoscaler objects.² -arrayºH -F -D#/definitions/io.k8s.api.autoscaling.v2beta1.HorizontalPodAutoscalerúq -x-kubernetes-group-version-kindNL- group: autoscaling - kind: HorizontalPodAutoscalerList - version: v2beta1 - -è -%io.k8s.api.discovery.v1beta1.Endpoint¾"FEndpoint represents a single logical "backend" implementing a service.š addresses² -objectÊÛ -Ú -nodeNameÍ"¿nodeName represents the name of the Node hosting this endpoint. This can be used to determine endpoints local to a Node. This field can be enabled with the EndpointSliceNodeName feature gate.² -string - - targetRef‚ -0#/definitions/io.k8s.api.core.v1.ObjectReference"NtargetRef is a reference to a Kubernetes object that represents this endpoint. -ó -topologyæ"Ètopology contains arbitrary topology information associated with the endpoint. These key/value pairs must conform with the label format. https://kubernetes.io/docs/concepts/overview/working-with-objects/labels Topology may include a maximum of 16 key/value pairs. This includes, but is not limited to the following well known keys: * kubernetes.io/hostname: the value indicates the hostname of the node - where the endpoint is located. This should match the corresponding - node label. -* topology.kubernetes.io/zone: the value indicates the zone where the - endpoint is located. This should match the corresponding node label. -* topology.kubernetes.io/region: the value indicates the region where the - endpoint is located. This should match the corresponding node label. -This field is deprecated and will be removed in future api versions.ª - ² -string² -object -î - addressesà" addresses of this endpoint. The contents of this field are interpreted according to the corresponding EndpointSlice addressType field. Consumers must handle different types of addresses in the context of their own capabilities. This must contain at least one address but no more than 100.² -arrayº - ² -stringú -x-kubernetes-list-typeset - -™ - -conditionsŠ -=#/definitions/io.k8s.api.discovery.v1beta1.EndpointConditions"Iconditions contains information about the current status of the endpoint. -” -hintsŠ -8#/definitions/io.k8s.api.discovery.v1beta1.EndpointHints"Nhints contains information associated with how an endpoint should be consumed. -Î -hostnameÁ"³hostname of this endpoint. This field may be used by consumers of endpoints to distinguish endpoints from each other (e.g. in DNS names). Multiple endpoints which use the same hostname should be considered fungible (e.g. multiple A values in DNS). Must be lowercase and pass DNS Label (RFC 1123) validation.² -string -• -/io.k8s.api.storage.v1beta1.VolumeAttachmentSpecá"HVolumeAttachmentSpec is the specification of a VolumeAttachment request.šattacheršsourcešnodeName² -objectÊé -‚ -sourcex -?#/definitions/io.k8s.api.storage.v1beta1.VolumeAttachmentSource"5Source represents the volume that should be attached. -— -attacherŠ"}Attacher indicates the name of the volume driver that MUST handle this request. This is the name returned by GetPluginName().² -string -H -nodeName<"/The node that the volume should be attached to.² -string -· - io.k8s.api.apps.v1.DaemonSetSpec’ "3DaemonSetSpec is the specification of a daemon set.šselectorštemplate² -objectʸ - -’ -minReadySecondsþint32"èThe minimum number of seconds for which a newly created DaemonSet pod should be ready without any of its container crashing, for it to be considered available. Defaults to 0 (pod will be considered available as soon as it is ready).² -integer -¿ -revisionHistoryLimit¦int32"The number of old history to retain to allow rollback. This is a pointer to distinguish between explicit zero and not specified. Defaults to 10.² -integer -Á -selector´ -@#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector"ïA label query over pods that are managed by the daemon set. Must match in order to be controlled. It must match the pod template's labels. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors -„ -template÷ -0#/definitions/io.k8s.api.core.v1.PodTemplateSpec"ÂAn object that describes the pod that will be created. The DaemonSet will create exactly one copy of this pod on every node that matches the template's node selector (or on every node if no node selector is specified). More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#pod-template -“ -updateStrategy€ -8#/definitions/io.k8s.api.apps.v1.DaemonSetUpdateStrategy"DAn update strategy to replace existing DaemonSet pods with new pods. -Ó -3io.k8s.api.authentication.v1beta1.TokenReviewStatus›"DTokenReviewStatus is the result of the token authentication request.² -objectÊÆ -’ - audiences„"çAudiences are audience identifiers chosen by the authenticator that are compatible with both the TokenReview and token. An identifier is any identifier in the intersection of the TokenReviewSpec audiences and the token's audiences. A client of the TokenReview API that sets the spec.audiences field should validate that a compatible audience identifier is returned in the status.audiences field to ensure that the TokenReview server is audience aware. If a TokenReview returns an empty status.audience field where status.authenticated is "true", the token is valid against the audience of the Kubernetes API server.² -arrayº - ² -string -g - authenticatedV"HAuthenticated indicates that the token was associated with a known user.² -boolean -H -error?"2Error indicates that the token couldn't be checked² -string -| -usert -8#/definitions/io.k8s.api.authentication.v1beta1.UserInfo"8User is the UserInfo associated with the provided token. -˜ -.io.k8s.api.authorization.v1.ResourceAttributeså"tResourceAttributes includes the authorization attributes available for resource requests to the Authorizer interface² -objectÊà -‚ -namez"mName is the name of the resource being requested for a "get" or deleted for a "delete". "" (empty) means all.² -string -ô - namespaceæ"ØNamespace is the namespace of the action being requested. Currently, there is no distinction between no namespace and all namespaces "" (empty) is defaulted for LocalSubjectAccessReviews "" (empty) is empty for cluster-scoped resources "" (empty) means "all" for namespace scoped resources from a SubjectAccessReview or SelfSubjectAccessReview² -string -X -resourceL"?Resource is one of the existing resource types. "*" means all.² -string -^ - subresourceO"BSubresource is one of the existing resource types. "" means none.² -string -ƒ -verb{"nVerb is a kubernetes resource API verb, like: get, list, watch, create, update, delete, proxy. "*" means all.² -string -S -versionH";Version is the API Version of the Resource. "*" means all.² -string -M -groupD"7Group is the API Group of the Resource. "*" means all.² -string -Ò -Cio.k8s.kube-aggregator.pkg.apis.apiregistration.v1.ServiceReferenceŠ";ServiceReference holds a reference to Service.legacy.k8s.io² -objectʾ -4 -name,"Name is the name of the service² -string -C - namespace6")Namespace is the namespace of the service² -string -À -port·int32"¡If specified, the port on the service that hosting webhook. Default to 443 for backward compatibility. `port` should be a valid port number (1-65535, inclusive).² -integer -å -,io.k8s.api.flowcontrol.v1beta1.LimitResponse´"PLimitResponse defines how to handle requests that can not be executed right now.štype² -objectÊé -à -queuing· -A#/definitions/io.k8s.api.flowcontrol.v1beta1.QueuingConfiguration"r`queuing` holds the configuration parameters for queuing. This field may be non-empty only if `type` is `"Queue"`. -  -type—"‰`type` is "Queue" or "Reject". "Queue" means that requests that can not be executed upon arrival are held in a queue until they can be executed or a queuing limit is reached. "Reject" means that requests that can not be executed upon arrival are rejected. Required.² -stringú` -x-kubernetes-unionsIG- discriminator: type - fields-to-discriminateBy: - queuing: Queuing - -Ÿ --io.k8s.api.networking.v1.HTTPIngressRuleValueí"£HTTPIngressRuleValue is a list of http selectors pointing to backends. In the example: http:///? -> backend where where parts of the url correspond to RFC 3986, this resource will be used to match against everything after the last '/' and before the first '?' or '#'.špaths² -objectÊ° -­ -paths£"4A collection of paths that map requests to backends.² -arrayº: -8 -6#/definitions/io.k8s.api.networking.v1.HTTPIngressPathú# -x-kubernetes-list-type atomic - -ü -&io.k8s.api.networking.v1.IngressStatusÑ"8IngressStatus describe the current state of the Ingress.² -objectʈ -… - loadBalanceru -3#/definitions/io.k8s.api.core.v1.LoadBalancerStatus">LoadBalancer contains the current status of the load-balancer. -Ÿ -(io.k8s.api.node.v1beta1.RuntimeClassListò"3RuntimeClassList is a list of RuntimeClass objects.šitems² -objectʽ -¾ - -apiVersion¯"¡APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources² -string -p -itemsg""Items is a list of schema objects.² -arrayº6 -4 -2#/definitions/io.k8s.api.node.v1beta1.RuntimeClass -¹ -kind°"¢Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds² -string -Ë -metadata¾ -;#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta"Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadataúf -x-kubernetes-group-version-kindCA- group: node.k8s.io - kind: RuntimeClassList - version: v1beta1 - - -/io.k8s.apimachinery.pkg.util.intstr.IntOrStringŽ int-or-string"ñIntOrString is a type that can hold an int32 or a string. When used in JSON or YAML marshalling and unmarshalling, it produces or consumes the inner type. This allows you to have, for example, a JSON field that can accept a name or number.² -string -‰ -1io.k8s.api.autoscaling.v1.HorizontalPodAutoscalerÓ -"-configuration of a horizontal pod autoscaler.² -objectʪ -¾ - -apiVersion¯"¡APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources² -string -¹ -kind°"¢Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds² -string -Ð -metadataà -=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta"Standard object metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata -Ù -specÐ -C#/definitions/io.k8s.api.autoscaling.v1.HorizontalPodAutoscalerSpec"ˆbehaviour of autoscaler. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status. -| -statusr -E#/definitions/io.k8s.api.autoscaling.v1.HorizontalPodAutoscalerStatus")current information about the autoscaler.úh -x-kubernetes-group-version-kindEC- kind: HorizontalPodAutoscaler - version: v1 - group: autoscaling - -ž -io.k8s.api.batch.v1.Job‚ "1Job represents the configuration of a single job.² -objectÊï -¹ -kind°"¢Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds² -string -Ò -metadataÅ -=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta"ƒStandard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata -Õ -specÌ -)#/definitions/io.k8s.api.batch.v1.JobSpec"žSpecification of the desired behavior of a job. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status - -status· -+#/definitions/io.k8s.api.batch.v1.JobStatus"‡Current status of a job. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status -¾ - -apiVersion¯"¡APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources² -stringúN -x-kubernetes-group-version-kind+)- version: v1 - group: batch - kind: Job - -¯ -$io.k8s.api.discovery.v1.EndpointPort† "7EndpointPort represents a Port used by an EndpointSlice² -objectʾ -b -protocolV"IThe IP protocol for this port. Must be UDP, TCP, or SCTP. Default is TCP.² -string -Ù - appProtocolÉ"»The application protocol for this port. This field follows standard Kubernetes label syntax. Un-prefixed names are reserved for IANA standard service names (as per RFC-6335 and http://www.iana.org/assignments/service-names). Non-standard protocols should use prefixed names such as mycompany.com/my-custom-protocol.² -string -Å -name¼"®The name of this port. All ports in an EndpointSlice must have a unique name. If the EndpointSlice is dervied from a Kubernetes service, this corresponds to the Service.ports[].name. Name must either be an empty string or pass DNS_LABEL validation: * must be no more than 63 characters long. * must consist of lower case alphanumeric characters or '-'. * must start and end with an alphanumeric character. Default is empty string.² -string -³ -portªint32"”The port number of the endpoint. If this is not specified, ports are not restricted and must be interpreted in the context of the specific consumer.² -integer -º -)io.k8s.api.networking.v1.IngressClassListŒ"3IngressClassList is a collection of IngressClasses.šitems² -objectÊÖ -¹ -kind°"¢Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds² -string -b -metadataV -;#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta"Standard list metadata. -¾ - -apiVersion¯"¡APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources² -string -s -itemsj"$Items is the list of IngressClasses.² -arrayº7 -5 -3#/definitions/io.k8s.api.networking.v1.IngressClassúg -x-kubernetes-group-version-kindDB- group: networking.k8s.io - kind: IngressClassList - version: v1 - -Õ -5io.k8s.api.autoscaling.v1.HorizontalPodAutoscalerList›"*list of horizontal pod autoscaler objects.šitems² -objectÊé -¾ - -apiVersion¯"¡APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources² -string -… -items|"*list of horizontal pod autoscaler objects.² -arrayºC -A -?#/definitions/io.k8s.api.autoscaling.v1.HorizontalPodAutoscaler -¹ -kind°"¢Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds² -string -b -metadataV -;#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta"Standard list metadata.úl -x-kubernetes-group-version-kindIG- group: autoscaling - kind: HorizontalPodAutoscalerList - version: v1 - -Æ -,io.k8s.api.core.v1.ConfigMapNodeConfigSource•"lConfigMapNodeConfigSource contains the information to reference a ConfigMap as a config source for the Node.š namespacešnameškubeletConfigKey² -objectÊò -p -nameh"[Name is the metadata.name of the referenced ConfigMap. This field is required in all cases.² -string - - namespacer"eNamespace is the metadata.namespace of the referenced ConfigMap. This field is required in all cases.² -string -± -resourceVersion"ResourceVersion is the metadata.ResourceVersion of the referenced ConfigMap. This field is forbidden in Node.Spec, and required in Node.Status.² -string -Œ -uid„"wUID is the metadata.UID of the referenced ConfigMap. This field is forbidden in Node.Spec, and required in Node.Status.² -string -¹ -kubeletConfigKey¤"–KubeletConfigKey declares which key of the referenced ConfigMap corresponds to the KubeletConfiguration structure This field is required in all cases.² -string -³ -,io.k8s.api.core.v1.TypedLocalObjectReference‚"~TypedLocalObjectReference contains enough information to let you locate the typed referenced object inside the same namespace.škindšname² -objectÊå -Ú -apiGroupÍ"¿APIGroup is the group for the resource being referenced. If APIGroup is not specified, the specified Kind must be in the core API group. For any other third-party types, APIGroup is required.² -string -B -kind:"-Kind is the type of resource being referenced² -string -B -name:"-Name is the name of resource being referenced² -string -Ñ -!io.k8s.api.apps.v1.ReplicaSetList« ".ReplicaSetList is a collection of ReplicaSets.šitems² -objectʉ -· -items­"oList of ReplicaSets. More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller² -arrayº/ -- -+#/definitions/io.k8s.api.apps.v1.ReplicaSet -¹ -kind°"¢Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds² -string -Ï -metadata -;#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta"‚Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds -¾ - -apiVersion¯"¡APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources² -stringúX -x-kubernetes-group-version-kind53- group: apps - kind: ReplicaSetList - version: v1 - -Œ -7io.k8s.api.autoscaling.v1.HorizontalPodAutoscalerStatusÐ"-current status of a horizontal pod autoscaleršcurrentReplicasšdesiredReplicas² -objectÊî -ß -currentCPUUtilizationPercentage»int32"¥current average CPU utilization over all pods, represented as a percentage of requested CPU, e.g. 70 means that an average pod is using now 70% of its requested CPU.² -integer -f -currentReplicasSint32">current number of replicas of pods managed by this autoscaler.² -integer -f -desiredReplicasSint32">desired number of replicas of pods managed by this autoscaler.² -integer -Ù - lastScaleTimeÇ -7#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Time"‹last time the HorizontalPodAutoscaler scaled the number of pods; used by the autoscaler to control how often the number of pods is changed. -^ -observedGenerationHint64"3most recent generation observed by this autoscaler.² -integer -ž -.io.k8s.api.core.v1.PersistentVolumeClaimStatusë"OPersistentVolumeClaimStatus is the current status of a persistent volume claim.² -objectÊ‹ -Ò - accessModesÂ"¥AccessModes contains the actual access modes the volume backing the PVC has. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1² -arrayº - ² -string -• -capacityˆ"9Represents the actual resources of the underlying volume.ª? -= -;#/definitions/io.k8s.apimachinery.pkg.api.resource.Quantity² -object -Ç - -conditions¸"‘Current Condition of persistent volume claim. If underlying persistent volume is being resized then the Condition will be set to 'ResizeStarted'.² -arrayºC -A -?#/definitions/io.k8s.api.core.v1.PersistentVolumeClaimConditionú' -x-kubernetes-patch-strategymerge -ú' -x-kubernetes-patch-merge-keytype - -R -phaseI" -<#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Conditionú' -x-kubernetes-list-map-keys - type -ú -x-kubernetes-list-typemap -ú' -x-kubernetes-patch-merge-keytype -ú' -x-kubernetes-patch-strategymerge - -E -currentHealthy3int32"current number of healthy pods² -integer -M -desiredHealthy;int32"&minimum desired number of healthy pods² -integer - - disruptedPodsû"¯DisruptedPods contains information about pods whose eviction was processed by the API server eviction subresource handler but has not yet been observed by the PodDisruptionBudget controller. A pod will be in this map from the time when the API server processed the eviction request to the time when the pod is seen by PDB controller as having been marked for deletion (or after a timeout). The key in the map is the name of the pod and the value is the time when the API server processed the eviction request. If the deletion didn't occur and a pod is still there it will be removed from the list automatically by PodDisruptionBudget controller after some time. If everything goes smooth this map should be empty for the most of the time. Large number of entries in the map may indicate problems with pod deletions.ª; -9 -7#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Time² -object -` -disruptionsAllowedJint32"5Number of pod disruptions that are currently allowed.² -integer -[ - expectedPodsKint32"6total number of pods counted by this disruption budget² -integer - -/io.k8s.api.autoscaling.v2beta1.PodsMetricStatusÜ"šPodsMetricStatus indicates the current value of a metric describing each pod in the current scale target (for example, transactions-processed-per-second).š -metricNamešcurrentAverageValue² -objectÊ -õ -selectorè -@#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector"£selector is the string-encoded form of a standard kubernetes label selector for the given metric When set in the PodsMetricSource, it is passed as an additional parameter to the metrics server for more specific metrics scoping. When unset, just the metricName will be used to gather metrics. -Å -currentAverageValue­ -;#/definitions/io.k8s.apimachinery.pkg.api.resource.Quantity"ncurrentAverageValue is the current value of the average of the metric across all relevant pods (as a quantity) -K - -metricName="0metricName is the name of the metric in question² -string -õ - -io.k8s.api.batch.v1.CronJobSpecÑ -"YCronJobSpec describes how the job execution will look like and when it will actually run.šscheduleš jobTemplate² -objectÊÎ -  -suspend”"…This flag tells the controller to suspend subsequent executions, it does not apply to already started executions. Defaults to false.² -boolean -Ì -concurrencyPolicy¶"¨Specifies how to treat concurrent executions of a Job. Valid values are: - "Allow" (default): allows CronJobs to run concurrently; - "Forbid": forbids concurrent runs, skipping next run if previous run hasn't finished yet; - "Replace": cancels currently running job and replaces it with a new one² -string - -failedJobsHistoryLimituint32"`The number of failed finished jobs to retain. Value must be non-negative integer. Defaults to 1.² -integer -„ - jobTemplateu -1#/definitions/io.k8s.api.batch.v1.JobTemplateSpec"@Specifies the job that will be created when executing a CronJob. -] -scheduleQ"DThe schedule in Cron format, see https://en.wikipedia.org/wiki/Cron.² -string -Ç -startingDeadlineSeconds«int64"•Optional deadline in seconds for starting the job if it misses scheduled time for any reason. Missed jobs executions will be counted as failed ones.² -integer -— -successfulJobsHistoryLimityint32"dThe number of successful finished jobs to retain. Value must be non-negative integer. Defaults to 3.² -integer -« -&io.k8s.api.core.v1.ConfigMapProjection€ "åAdapts a ConfigMap into a projected volume. - -The contents of the target ConfigMap's Data field will be presented in a projected volume as files using the keys in the Data field as the file names, unless the items element is populated with specific mappings of keys to paths. Note that this is identical to a configmap volume source without the default mode.² -objectʉ -ª -items "âIf unspecified, each key-value pair in the Data field of the referenced ConfigMap will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the ConfigMap, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'.² -arrayº. -, -*#/definitions/io.k8s.api.core.v1.KeyToPath -„ -name|"oName of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names² -string -S -optionalG"9Specify whether the ConfigMap or its keys must be defined² -boolean - -3io.k8s.api.authorization.v1beta1.ResourceAttributeså"tResourceAttributes includes the authorization attributes available for resource requests to the Authorizer interface² -objectÊà -ƒ -verb{"nVerb is a kubernetes resource API verb, like: get, list, watch, create, update, delete, proxy. "*" means all.² -string -S -versionH";Version is the API Version of the Resource. "*" means all.² -string -M -groupD"7Group is the API Group of the Resource. "*" means all.² -string -‚ -namez"mName is the name of the resource being requested for a "get" or deleted for a "delete". "" (empty) means all.² -string -ô - namespaceæ"ØNamespace is the namespace of the action being requested. Currently, there is no distinction between no namespace and all namespaces "" (empty) is defaulted for LocalSubjectAccessReviews "" (empty) is empty for cluster-scoped resources "" (empty) means "all" for namespace scoped resources from a SubjectAccessReview or SelfSubjectAccessReview² -string -X -resourceL"?Resource is one of the existing resource types. "*" means all.² -string -^ - subresourceO"BSubresource is one of the existing resource types. "" means none.² -string -ƒ -3io.k8s.api.autoscaling.v2beta2.ResourceMetricStatusË"ÝResourceMetricStatus indicates the current value of a resource metric known to Kubernetes, as specified in requests and limits, describing each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the "pods" source.šnamešcurrent² -objectÊË -„ -currenty ->#/definitions/io.k8s.api.autoscaling.v2beta2.MetricValueStatus"7current contains the current value for the given metric -B -name:"-Name is the name of the resource in question.² -string -à -"io.k8s.api.core.v1.PodAntiAffinity¹ "IPod anti affinity is a group of inter pod anti affinity scheduling rules.² -objectÊß - -ö -/preferredDuringSchedulingIgnoredDuringExecutionÂ"öThe scheduler will prefer to schedule pods to nodes that satisfy the anti-affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling anti-affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred.² -arrayº< -: -8#/definitions/io.k8s.api.core.v1.WeightedPodAffinityTerm -ã -.requiredDuringSchedulingIgnoredDuringExecution°"ìIf the anti-affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the anti-affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied.² -arrayº4 -2 -0#/definitions/io.k8s.api.core.v1.PodAffinityTerm -à -!io.k8s.api.batch.v1.CronJobStatus"9CronJobStatus represents the current state of a cron job.² -objectÊÓ -¡ -active–"-A list of pointers to currently running jobs.² -arrayº4 -2 -0#/definitions/io.k8s.api.core.v1.ObjectReferenceú# -x-kubernetes-list-type atomic - -– -lastScheduleTime -7#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Time"FInformation when was the last time the job was successfully scheduled. -“ -lastSuccessfulTime} -7#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Time"BInformation when was the last time the job successfully completed. -‰ -'io.k8s.api.core.v1.HostPathVolumeSourceÝ"vRepresents a host path mapped into a pod. Host path volumes do not support ownership management or SELinux relabeling.špath² -objectÊÏ -Å -path¼"®Path of the directory on the host. If the path is a symlink, it will follow the link to the real path. More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath² -string -„ -type|"oType for HostPath Volume Defaults to "" More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath² -string -² -0io.k8s.api.core.v1.WindowsSecurityContextOptionsý"OWindowsSecurityContextOptions contain Windows-specific options and credentials.² -objectÊ -é -gmsaCredentialSpecÒ"ÄGMSACredentialSpec is where the GMSA admission webhook (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the GMSA credential spec named by the GMSACredentialSpecName field.² -string -m -gmsaCredentialSpecNameS"FGMSACredentialSpecName is the name of the GMSA credential spec to use.² -string -¿ - runAsUserName­"ŸThe UserName in Windows to run the entrypoint of the container process. Defaults to the user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.² -string - -)io.k8s.api.extensions.v1beta1.IngressListá"'IngressList is a collection of Ingress.šitems² -objectʾ -¾ - -apiVersion¯"¡APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources² -string -l -itemsc"Items is the list of Ingress.² -arrayº7 -5 -3#/definitions/io.k8s.api.extensions.v1beta1.Ingress -¹ -kind°"¢Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds² -string -Ð -metadataà -;#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta"ƒStandard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadataú` -x-kubernetes-group-version-kind=;- group: extensions - kind: IngressList - version: v1beta1 - -† - io.k8s.api.networking.v1.IPBlocká"ãIPBlock describes a particular CIDR (Ex. "192.168.1.1/24","2001:db9::/64") that is allowed to the pods matched by a NetworkPolicySpec's podSelector. The except entry describes CIDRs that should not be included within this rule.šcidr² -objectÊå -v -cidrn"aCIDR is a string representing the IP Block Valid examples are "192.168.1.1/24" or "2001:db9::/64"² -string -ê -exceptß"ÂExcept is a slice of CIDRs that should not be included within an IP Block Valid examples are "192.168.1.1/24" or "2001:db9::/64" Except values will be rejected if they are outside the CIDR range² -arrayº - ² -string -¼ -3io.k8s.api.apps.v1.RollingUpdateStatefulSetStrategy„"kRollingUpdateStatefulSetStrategy is used to communicate parameter for RollingUpdateStatefulSetStrategyType.² -objectʈ -… - partitionxint32"cPartition indicates the ordinal at which the StatefulSet should be partitioned. Default value is 0.² -integer - -io.k8s.api.autoscaling.v1.Scalež "2Scale represents a scaling request for a resource.² -objectÊ‚ - -Ñ -specÈ -1#/definitions/io.k8s.api.autoscaling.v1.ScaleSpec"’defines the behavior of the scale. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status. -Ú -statusÏ -3#/definitions/io.k8s.api.autoscaling.v1.ScaleStatus"—current status of the scale. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status. Read-only. -¾ - -apiVersion¯"¡APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources² -string -¹ -kind°"¢Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds² -string -Ñ -metadataÄ -=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta"‚Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata.úV -x-kubernetes-group-version-kind31- kind: Scale - version: v1 - group: autoscaling - -ð - -//.../`. Must match the name of the CustomResourceDefinition (in the form `.`). Must be all lowercase.² -string -á - -shortNamesÒ"µshortNames are short names for the resource, exposed in API discovery documents, and used by clients to support invocations like `kubectl get `. It must be all lowercase.² -arrayº - ² -string -€ -singulart"gsingular is the singular name of the resource. It must be all lowercase. Defaults to lowercased `kind`.² -string -ô - -categorieså"Ècategories is a list of grouped resources this custom resource belongs to (e.g. 'all'). This is published in API discovery documents, and used by clients to support invocations like `kubectl get all`.² -arrayº - ² -string -Ó -1io.k8s.api.core.v1.VsphereVirtualDiskVolumeSource"%Represents a vSphere volume resource.š -volumePath² -objectÊÚ -À -fsTypeµ"§Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.² -string -x -storagePolicyIDe"XStorage Policy Based Management (SPBM) profile ID associated with the StoragePolicyName.² -string -V -storagePolicyNameA"4Storage Policy Based Management (SPBM) profile name.² -string -C - -volumePath5"(Path that identifies vSphere volume vmdk² -string -Ÿ -(io.k8s.api.core.v1.AzureDiskVolumeSourceò"TAzureDisk represents an Azure Data Disk mount on the host and bind mount to the pod.šdiskNamešdiskURI² -objectÊø -K - cachingMode<"/Host Caching mode: None, Read Only, Read Write.² -string -F -diskName:"-The Name of the data disk in the blob storage² -string -A -diskURI6")The URI the data disk in the blob storage² -string -À -fsTypeµ"§Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.² -string -à -kind×"ÉExpected values Shared: multiple blob disks per storage account Dedicated: single blob disk per storage account Managed: azure managed data disk (only in managed availability set). defaults to shared² -string -x -readOnlyl"^Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.² -boolean -² - io.k8s.api.core.v1.NodeCondition"8NodeCondition contains condition information for a node.štypešstatus² -objectÊ´ -€ -lastHeartbeatTimek -7#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Time"0Last time we got an update on a given condition. -Œ -lastTransitionTimev -7#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Time";Last time the condition transit from one status to another. -X -messageM"@Human readable message indicating details about last transition.² -string -J -reason@"3(brief) reason for the condition's last transition.² -string -L -statusB"5Status of the condition, one of True, False, Unknown.² -string -, -type$"Type of node condition.² -string -à -(io.k8s.api.core.v1.StorageOSVolumeSource³ "2Represents a StorageOS persistent volume resource.² -objectÊð -À -fsTypeµ"§Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.² -string -x -readOnlyl"^Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.² -boolean -Ï - secretRefÁ -5#/definitions/io.k8s.api.core.v1.LocalObjectReference"‡SecretRef specifies the secret to use for obtaining the StorageOS API credentials. If not specified, default values will be attempted. -‹ - -volumeName}"pVolumeName is the human-readable name of the StorageOS volume. Volume names are only unique within a namespace.² -string -Ð -volumeNamespace¼"®VolumeNamespace specifies the scope of the volume within StorageOS. If no namespace is specified then the Pod's namespace will be used. This allows the Kubernetes name scoping to be mirrored within StorageOS for tighter integration. Set VolumeName to any name to override the default behaviour. Set to "default" if you are not using namespaces within StorageOS. Namespaces that do not pre-exist within StorageOS will be created.² -string -Ë -,io.k8s.api.networking.v1beta1.IngressBackendš"DIngressBackend describes all endpoints for a given service and port.² -objectÊÅ -} - servicePortn -=#/definitions/io.k8s.apimachinery.pkg.util.intstr.IntOrString"-Specifies the port of the referenced service. -ø -resourceë -:#/definitions/io.k8s.api.core.v1.TypedLocalObjectReference"¬Resource is an ObjectRef to another Kubernetes resource in the namespace of the Ingress object. If resource is specified, serviceName and servicePort must not be specified. -I - serviceName:"-Specifies the name of the referenced service.² -string -ç -3io.k8s.api.autoscaling.v2beta2.ResourceMetricSource¯"ÊResourceMetricSource indicates how to scale on a resource metric known to Kubernetes, as specified in requests and limits, describing each pod in the current scale target (e.g. CPU or memory). The values will be averaged together before being compared to the target. Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the "pods" source. Only one "target" type should be set.šnameštarget² -objectÊà -B -name:"-name is the name of the resource in question.² -string -} -targets -9#/definitions/io.k8s.api.autoscaling.v2beta2.MetricTarget"6target specifies the target value for the given metric -ô - io.k8s.api.core.v1.ConfigMapListÏ"CConfigMapList is a resource containing a list of ConfigMap objects.šitems² -objectÊ› -¾ - -apiVersion¯"¡APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources² -string -f -items]" Items is the list of ConfigMaps.² -arrayº. -, -*#/definitions/io.k8s.api.core.v1.ConfigMap -¹ -kind°"¢Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds² -string -³ -metadata¦ -;#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta"gMore info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadataúU -x-kubernetes-group-version-kind20- group: "" - kind: ConfigMapList - version: v1 - -ö -io.k8s.api.core.v1.LifecycleÕ "ªLifecycle describes actions that the management system should take in response to container lifecycle events. For the PostStart and PreStop lifecycle handlers, management of the container blocks until the action is complete, unless the container process fails, in which case the handler is aborted.² -objectÊ™ - - postStartó -(#/definitions/io.k8s.api.core.v1.Handler"ÆPostStart is called immediately after a container is created. If the handler fails, the container is terminated and restarted according to its restart policy. Other management of the container blocks until the hook completes. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks -’ -preStop† -(#/definitions/io.k8s.api.core.v1.Handler"ÙPreStop is called immediately before a container is terminated due to an API request or management event such as liveness/startup probe failure, preemption, resource contention, etc. The handler is not called if the container crashes or exits. The reason for termination is passed to the handler. The Pod's termination grace period countdown begins before the PreStop hooked is executed. Regardless of the outcome of the handler, the container will eventually terminate within the Pod's termination grace period. Other management of the container blocks until the hook completes or until the termination grace period is reached. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks -Ê - io.k8s.api.core.v1.NamespaceSpec¥"6NamespaceSpec describes the attributes on a Namespace.² -objectÊÞ -Û - -finalizersÌ"¯Finalizers is an opaque list of values that must be empty to permanently remove object from storage. More info: https://kubernetes.io/docs/tasks/administer-cluster/namespaces/² -arrayº - ² -string -Ÿ -io.k8s.api.core.v1.NodeSelectorû"¼A node selector represents the union of the results of one or more label queries over a set of nodes; that is, it represents the OR of the selectors represented by the node selector terms.šnodeSelectorTerms² -objectÊ™ -– -nodeSelectorTerms€"//.../`. Must match the name of the CustomResourceDefinition (in the form `.`). Must be all lowercase.² -string -á - -shortNamesÒ"µshortNames are short names for the resource, exposed in API discovery documents, and used by clients to support invocations like `kubectl get `. It must be all lowercase.² -arrayº - ² -string -€ -singulart"gsingular is the singular name of the resource. It must be all lowercase. Defaults to lowercased `kind`.² -string -ü -5io.k8s.api.autoscaling.v1.CrossVersionObjectReferenceÂ"bCrossVersionObjectReference contains enough information to let you identify the referred resource.škindšname² -objectÊÁ -l -named"WName of the referent; More info: http://kubernetes.io/docs/user-guide/identifiers#names² -string -6 - -apiVersion("API version of the referent² -string -˜ -kind"Kind of the referent; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds"² -string -¦ -%io.k8s.api.core.v1.CinderVolumeSourceü"ëRepresents a cinder volume resource in Openstack. A Cinder volume must exist before mounting to a container. The volume must also be in the same region as the kubelet. Cinder volumes support ownership management and SELinux relabeling.švolumeID² -objectÊô -† -volumeIDz"mvolume id used to identify the volume in cinder. More info: https://examples.k8s.io/mysql-cinder-pd/README.md² -string -ƒ -fsTypeø"êFilesystem type to mount. Must be a filesystem type supported by the host operating system. Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. More info: https://examples.k8s.io/mysql-cinder-pd/README.md² -string -Á -readOnly´"¥Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. More info: https://examples.k8s.io/mysql-cinder-pd/README.md² -boolean -ž - secretRef -5#/definitions/io.k8s.api.core.v1.LocalObjectReference"WOptional: points to a secret object containing parameters used to connect to OpenStack. - -+io.k8s.api.extensions.v1beta1.IngressStatusÑ"8IngressStatus describe the current state of the Ingress.² -objectʈ -… - loadBalanceru -3#/definitions/io.k8s.api.core.v1.LoadBalancerStatus">LoadBalancer contains the current status of the load-balancer. -í -1io.k8s.apimachinery.pkg.apis.meta.v1.APIGroupList·"RAPIGroupList is a list of APIGroup, to allow clients to discover the API at /apis.šgroups² -objectÊô -¾ - -apiVersion¯"¡APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources² -string -u -groupsk"groups is a list of APIGroup.² -arrayº? -= -;#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.APIGroup -¹ -kind°"¢Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds² -stringúT -x-kubernetes-group-version-kind1/- group: "" - kind: APIGroupList - version: v1 - -– -2io.k8s.apimachinery.pkg.apis.meta.v1.Preconditionsß"ZPreconditions must be fulfilled before an operation (update, delete, etc.) is carried out.² -objectÊu -D -resourceVersion1"$Specifies the target ResourceVersion² -string -- -uid&"Specifies the target UID.² -string -© -&io.k8s.api.core.v1.LoadBalancerIngressþ"LoadBalancerIngress represents the status of a load-balancer ingress point: traffic intended for the service should be sent to an ingress point.² -objectÊÜ -{ -hostnameo"bHostname is set for load-balancer ingress points that are DNS based (typically AWS load-balancers)² -string -{ -ipu"hIP is set for load-balancer ingress points that are IP based (typically GCE or OpenStack load-balancers)² -string -ß -portsÕ"qPorts is a list of records of service ports If used, every port defined in the service should have an entry in it² -arrayº/ -- -+#/definitions/io.k8s.api.core.v1.PortStatusú# -x-kubernetes-list-type atomic - -• -$io.k8s.api.core.v1.LocalVolumeSourceì"LLocal represents directly-attached storage with node affinity (Beta feature)špath² -objectʈ -€ -fsTypeõ"çFilesystem type to mount. It applies only when the Path is a block device. Must be a filesystem type supported by the host operating system. Ex. "ext4", "xfs", "ntfs". The default value is to auto-select a fileystem if unspecified.² -string -‚ -pathz"mThe full path to the volume on the node. It can be either a directory or block device (disk, partition, ...).² -string -‚ -#io.k8s.api.events.v1beta1.EventListÚ"%EventList is a list of Event objects.šitems² -objectʸ -Ë -metadata¾ -;#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta"Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata -¾ - -apiVersion¯"¡APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources² -string -k -itemsb""items is a list of schema objects.² -arrayº1 -/ --#/definitions/io.k8s.api.events.v1beta1.Event -¹ -kind°"¢Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds² -stringúa -x-kubernetes-group-version-kind><- version: v1beta1 - group: events.k8s.io - kind: EventList - -¸ -/io.k8s.api.scheduling.v1beta1.PriorityClassList„ "6PriorityClassList is a collection of priority classes.šitems² -objectÊÅ -¾ - -apiVersion¯"¡APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources² -string -y -itemsp"$items is the list of PriorityClasses² -arrayº= -; -9#/definitions/io.k8s.api.scheduling.v1beta1.PriorityClass -¹ -kind°"¢Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds² -string -Ê -metadata½ -;#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta"~Standard list metadata More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadataúm -x-kubernetes-group-version-kindJH- kind: PriorityClassList - version: v1beta1 - group: scheduling.k8s.io - -“ -;io.k8s.api.authorization.v1beta1.SelfSubjectRulesReviewSpecT² -objectÊF -D - namespace7"*Namespace to evaluate rules for. Required.² -string -è -(io.k8s.api.core.v1.EphemeralVolumeSource» "JRepresents an ephemeral volume that is handled by a normal storage driver.² -objectÊà -Ý -volumeClaimTemplateÅ ->#/definitions/io.k8s.api.core.v1.PersistentVolumeClaimTemplate"‚Will be used to create a stand-alone PVC to provision the volume. The pod in which this EphemeralVolumeSource is embedded will be the owner of the PVC, i.e. the PVC will be deleted together with the pod. The name of the PVC will be `-` where `` is the name from the `PodSpec.Volumes` array entry. Pod validation will reject the pod if the concatenated name is not valid for a PVC (for example, too long). - -An existing PVC with that name that is not owned by the pod will *not* be used for the pod to avoid using an unrelated volume by mistake. Starting the pod is then blocked until the unrelated PVC is removed. If such a pre-created PVC is meant to be used by the pod, the PVC has to updated with an owner reference to the pod once the pod exists. Normally this should not be necessary, but it may be useful when manually reconstructing a broken cluster. - -This field is read-only and no changes will be made by Kubernetes to the PVC after it has been created. - -Required, must not be nil. -Ø -io.k8s.api.rbac.v1.Role¼"hRole is a namespaced, logical grouping of PolicyRules that can be referenced as a unit by a RoleBinding.² -objectÊÝ -¾ - -apiVersion¯"¡APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources² -string -¹ -kind°"¢Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds² -string -h -metadata\ -=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta"Standard object's metadata. -t -rulesk"-Rules holds all the PolicyRules for this Role² -arrayº/ -- -+#/definitions/io.k8s.api.rbac.v1.PolicyRuleúc -x-kubernetes-group-version-kind@>- group: rbac.authorization.k8s.io - kind: Role - version: v1 - -È -(io.k8s.api.core.v1.ResourceFieldSelector›"ZResourceFieldSelector represents container resources (cpu, memory) and their output formatšresource² -objectÊ¥ - -divisor„ -;#/definitions/io.k8s.apimachinery.pkg.api.resource.Quantity"ESpecifies the output format of the exposed resources, defaults to "1" -5 -resource)"Required: resource to select² -string -Y - containerNameH";Container name: required for volumes, optional for env vars² -string -Ž -+io.k8s.api.core.v1.TopologySpreadConstraintÞ"XTopologySpreadConstraint specifies how to spread matching pods among the given topology.šmaxSkewš topologyKeyšwhenUnsatisfiable² -objectÊÉ -û - labelSelectoré -@#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector"¤LabelSelector is used to find matching pods. Pods that match this label selector are counted to determine the number of pods in their corresponding topology domain. -Ì -maxSkewÀint32"ªMaxSkew describes the degree to which pods may be unevenly distributed. When `whenUnsatisfiable=DoNotSchedule`, it is the maximum permitted difference between the number of matching pods in the target topology and the global minimum. For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same labelSelector spread as 1/1/0: | zone1 | zone2 | zone3 | | P | P | | - if MaxSkew is 1, incoming pod can only be scheduled to zone3 to become 1/1/1; scheduling it onto zone1(zone2) would make the ActualSkew(2-0) on zone1(zone2) violate MaxSkew(1). - if MaxSkew is 2, incoming pod can be scheduled onto any zone. When `whenUnsatisfiable=ScheduleAnyway`, it is used to give higher precedence to topologies that satisfy it. It's a required field. Default value is 1 and 0 is not allowed.² -integer -§ - topologyKey—"‰TopologyKey is the key of node labels. Nodes that have a label with this key and identical values are considered to be in the same topology. We consider each as a "bucket", and try to put balanced number of pods into each bucket. It's a required field.² -string -Ï -whenUnsatisfiable¹"«WhenUnsatisfiable indicates how to deal with a pod if it doesn't satisfy the spread constraint. - DoNotSchedule (default) tells the scheduler not to schedule it. - ScheduleAnyway tells the scheduler to schedule the pod in any location, - but giving higher precedence to topologies that would help reduce the - skew. -A constraint is considered "Unsatisfiable" for an incoming pod if and only if every possible node assigment for that pod would violate "MaxSkew" on some topology. For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same labelSelector spread as 3/1/1: | zone1 | zone2 | zone3 | | P P P | P | P | If WhenUnsatisfiable is set to DoNotSchedule, incoming pod can only be scheduled to zone2(zone3) to become 3/2/1(3/1/2) as ActualSkew(2-1) on zone2(zone3) satisfies MaxSkew(1). In other words, the cluster can still be imbalanced, but scheduler won't make it *more* imbalanced. It's a required field.² -string -– -)io.k8s.api.networking.v1beta1.IngressListè"'IngressList is a collection of Ingress.šitems² -objectʾ -¾ - -apiVersion¯"¡APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources² -string -l -itemsc"Items is the list of Ingress.² -arrayº7 -5 -3#/definitions/io.k8s.api.networking.v1beta1.Ingress -¹ -kind°"¢Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds² -string -Ð -metadataà -;#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta"ƒStandard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadataúg -x-kubernetes-group-version-kindDB- version: v1beta1 - group: networking.k8s.io - kind: IngressList - -’ -Xio.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1.CustomResourceDefinitionVersionµ"//...` if `served` is true.² -string -Þ -schemaÓ -_#/definitions/io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1.CustomResourceValidation"pschema describes the schema used for validation, pruning, and defaulting of this version of the custom resource. -h -served^"Pserved is a flag enabling/disabling this version from being served via REST APIs² -boolean -¶ -,io.k8s.api.authentication.v1.TokenReviewSpec…"ETokenReviewSpec is a description of the token authentication request.² -objectʯ -7 -token."!Token is the opaque bearer token.² -string -ó - audienceså"ÈAudiences is a list of the identifiers that the resource server presented with the token identifies as. Audience-aware token authenticators will verify that the token was intended for at least one of the audiences in this list. If no audiences are provided, the audience will default to the audience of the Kubernetes apiserver.² -arrayº - ² -string -û -)io.k8s.api.autoscaling.v2beta1.MetricSpecÍ"|MetricSpec specifies how to scale based on a single metric (only `type` and one other matching field should be set at once).štype² -objectʹ - -object· -?#/definitions/io.k8s.api.autoscaling.v2beta1.ObjectMetricSource"tobject refers to a metric describing a single kubernetes object (for example, hits-per-second on an Ingress object). -• -podsŒ -=#/definitions/io.k8s.api.autoscaling.v2beta1.PodsMetricSource"Êpods refers to a metric describing each pod in the current scale target (for example, transactions-processed-per-second). The values will be averaged together before being compared to the target value. -™ -resourceŒ -A#/definitions/io.k8s.api.autoscaling.v2beta1.ResourceMetricSource"Æresource refers to a resource metric (such as those specified in requests and limits) known to Kubernetes describing each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the "pods" source. -£ -typeš"Œtype is the type of metric source. It should be one of "ContainerResource", "External", "Object", "Pods" or "Resource", each mapping to a matching field in the object. Note: "ContainerResource" type is available on when the feature-gate HPAContainerMetrics is enabled² -string -  -containerResourceŠ -J#/definitions/io.k8s.api.autoscaling.v2beta1.ContainerResourceMetricSource"»container resource refers to a resource metric (such as those specified in requests and limits) known to Kubernetes describing a single container in each pod of the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the "pods" source. This is an alpha feature and can be enabled by the HPAContainerMetrics feature flag. -ô -externalç -A#/definitions/io.k8s.api.autoscaling.v2beta1.ExternalMetricSource"¡external refers to a global metric that is not associated with any Kubernetes object. It allows autoscaling based on information coming from components running outside of cluster (for example length of queue in cloud messaging service, or QPS from loadbalancer running outside of cluster). -û -io.k8s.api.core.v1.HandlerÜ"6Handler defines a specific action that should be taken² -objectÊ• -‘ -execˆ -+#/definitions/io.k8s.api.core.v1.ExecAction"YOne and only one of the following should be specified. Exec specifies the action to take. -k -httpGet` -.#/definitions/io.k8s.api.core.v1.HTTPGetAction".HTTPGet specifies the http request to perform. -‘ - tcpSocketƒ -0#/definitions/io.k8s.api.core.v1.TCPSocketAction"OTCPSocket specifies an action involving a TCP port. TCP hooks not yet supported - -"io.k8s.api.core.v1.SecurityContextè"ÜSecurityContext holds security configuration that will be applied to a container. Some fields are present in both SecurityContext and PodSecurityContext. When both are set, the values in SecurityContext take precedence.² -objectÊú -½ - runAsNonRoot¬"Indicates that the container must run as a non-root user. If true, the Kubelet will validate the image at runtime to ensure that it does not run as UID 0 (root) and fail to start the container if it does. If unset or false, no such validation will be performed. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.² -boolean -ô -seLinuxOptionsá -/#/definitions/io.k8s.api.core.v1.SELinuxOptions"­The SELinux context to be applied to the container. If unspecified, the container runtime will allocate a random SELinux context for each container. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. -ç -seccompProfileÔ -/#/definitions/io.k8s.api.core.v1.SeccompProfile" The seccomp options to use by this container. If seccomp options are provided at both the pod & container level, the container options override the pod options. -Æ - capabilitiesµ --#/definitions/io.k8s.api.core.v1.Capabilities"ƒThe capabilities to add/drop when running containers. Defaults to the default set of capabilities granted by the container runtime. -Š - procMountü"îprocMount denotes the type of proc mount to use for the containers. The default is DefaultProcMount which uses the container runtime defaults for readonly paths and masked paths. This requires the ProcMountType feature flag to be enabled.² -string -“ - -runAsGroup„int64"îThe GID to run the entrypoint of the container process. Uses runtime default if unset. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.² -integer -° - runAsUser¢int64"ŒThe UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.² -integer -Ä -windowsOptions± ->#/definitions/io.k8s.api.core.v1.WindowsSecurityContextOptions"îThe Windows specific settings applied to all containers. If unspecified, the options from the PodSecurityContext will be used. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. -Õ -allowPrivilegeEscalation¸"©AllowPrivilegeEscalation controls whether a process can gain more privileges than its parent process. This bool directly controls if the no_new_privs flag will be set on the container process. AllowPrivilegeEscalation is true always when the container is: 1) run as Privileged 2) has CAP_SYS_ADMIN² -boolean -¥ - -privileged–"‡Run container in privileged mode. Processes in privileged containers are essentially equivalent to root on the host. Defaults to false.² -boolean -q -readOnlyRootFilesystemW"IWhether this container has a read-only root filesystem. Default is false.² -boolean -¨ -3io.k8s.api.core.v1.TopologySelectorLabelRequirementð"~A topology selector requirement is a selector that matches given label. This is an alpha feature and may change in the future.škeyšvalues² -objectÊÒ -? -key8"+The label key that the selector applies to.² -string -Ž -valuesƒ"gAn array of string values. One value must match the label to be selected. Each entry in Values is ORed.² -arrayº - ² -string -¥ ->io.k8s.apimachinery.pkg.apis.meta.v1.ServerAddressByClientCIDRâ"ServerAddressByClientCIDR helps the client to determine the server address that they should use, depending on the clientCIDR that they match.š -clientCIDRš serverAddress² -objectʦ -€ - -clientCIDRr"eThe CIDR with which clients can match their IP to figure out the server address that they should use.² -string -  - serverAddressŽ"€Address of this server, suitable for a client that matches the above CIDR. This can be a hostname, hostname:port, IP or IP:port.² -string -˜ -io.k8s.api.apps.v1.DaemonSet÷ "7DaemonSet represents the configuration of a daemon set.² -objectÊÙ - -¹ -kind°"¢Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds² -string -Ò -metadataÅ -=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta"ƒStandard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata -Ó -specÊ -.#/definitions/io.k8s.api.apps.v1.DaemonSetSpec"—The desired behavior of this daemon set. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status -® -status£ -0#/definitions/io.k8s.api.apps.v1.DaemonSetStatus"îThe current status of this daemon set. This data may be out of date by some window of time. Populated by the system. Read-only. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status -¾ - -apiVersion¯"¡APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources² -stringúS -x-kubernetes-group-version-kind0.- version: v1 - group: apps - kind: DaemonSet - -þ -$io.k8s.api.batch.v1beta1.CronJobListÕ")CronJobList is a collection of cron jobs.šitems² -objectʵ -Ë -metadata¾ -;#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta"Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata -¾ - -apiVersion¯"¡APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources² -string -h -items_"items is the list of CronJobs.² -arrayº2 -0 -.#/definitions/io.k8s.api.batch.v1beta1.CronJob -¹ -kind°"¢Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds² -stringú[ -x-kubernetes-group-version-kind86- group: batch - kind: CronJobList - version: v1beta1 - -¼ -&io.k8s.api.core.v1.GitRepoVolumeSource‘"÷Represents a volume that is populated with the contents of a git repository. Git repo volumes do not support ownership management. Git repo volumes support SELinux relabeling. - -DEPRECATED: GitRepo is deprecated. To provision a container with a git repo, mount an EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir into the Pod's container.š -repository² -objectÊû -‹ - directoryý"ïTarget directory name. Must not contain or start with '..'. If '.' is supplied, the volume directory will be the git repository. Otherwise, if specified, the volume will contain the git repository in the subdirectory with the given name.² -string -) - -repository"Repository URL² -string -@ -revision4"'Commit hash for the specified revision.² -string -ë -)io.k8s.api.flowcontrol.v1beta1.FlowSchema½ "æFlowSchema defines the schema of a group of flows. Note that a flow is made up of a set of inbound API requests with similar attributes and is identified by a pair of strings: the name of the FlowSchema and a "flow distinguisher".² -objectÊÑ - -ä -metadata× -=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta"•`metadata` is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata -ü -specó -;#/definitions/io.k8s.api.flowcontrol.v1beta1.FlowSchemaSpec"³`spec` is the specification of the desired behavior of a FlowSchema. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status -ë -statusà -=#/definitions/io.k8s.api.flowcontrol.v1beta1.FlowSchemaStatus"ž`status` is the current status of a FlowSchema. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status -¾ - -apiVersion¯"¡APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources² -string -¹ -kind°"¢Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds² -stringúq -x-kubernetes-group-version-kindNL- group: flowcontrol.apiserver.k8s.io - kind: FlowSchema - version: v1beta1 - -Ø -3io.k8s.api.autoscaling.v2beta1.ResourceMetricStatus  "ÝResourceMetricStatus indicates the current value of a resource metric known to Kubernetes, as specified in requests and limits, describing each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the "pods" source.šnamešcurrentAverageValue² -objectÊ” -B -name:"-name is the name of the resource in question.² -string -× -currentAverageUtilization¹int32"£currentAverageUtilization is the current value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods. It will only be present if `targetAverageValue` was set in the corresponding metric specification.² -integer -ó -currentAverageValueÛ -;#/definitions/io.k8s.apimachinery.pkg.api.resource.Quantity"›currentAverageValue is the current value of the average of the resource metric across all relevant pods, as a raw value (instead of as a percentage of the request), similar to the "pods" metric source type. It will always be set, regardless of the corresponding metric specification. -û -)io.k8s.api.autoscaling.v2beta2.MetricSpecÍ"|MetricSpec specifies how to scale based on a single metric (only `type` and one other matching field should be set at once).štype² -objectʹ -ô -externalç -A#/definitions/io.k8s.api.autoscaling.v2beta2.ExternalMetricSource"¡external refers to a global metric that is not associated with any Kubernetes object. It allows autoscaling based on information coming from components running outside of cluster (for example length of queue in cloud messaging service, or QPS from loadbalancer running outside of cluster). - -object· -?#/definitions/io.k8s.api.autoscaling.v2beta2.ObjectMetricSource"tobject refers to a metric describing a single kubernetes object (for example, hits-per-second on an Ingress object). -• -podsŒ -=#/definitions/io.k8s.api.autoscaling.v2beta2.PodsMetricSource"Êpods refers to a metric describing each pod in the current scale target (for example, transactions-processed-per-second). The values will be averaged together before being compared to the target value. -™ -resourceŒ -A#/definitions/io.k8s.api.autoscaling.v2beta2.ResourceMetricSource"Æresource refers to a resource metric (such as those specified in requests and limits) known to Kubernetes describing each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the "pods" source. -£ -typeš"Œtype is the type of metric source. It should be one of "ContainerResource", "External", "Object", "Pods" or "Resource", each mapping to a matching field in the object. Note: "ContainerResource" type is available on when the feature-gate HPAContainerMetrics is enabled² -string -  -containerResourceŠ -J#/definitions/io.k8s.api.autoscaling.v2beta2.ContainerResourceMetricSource"»container resource refers to a resource metric (such as those specified in requests and limits) known to Kubernetes describing a single container in each pod of the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the "pods" source. This is an alpha feature and can be enabled by the HPAContainerMetrics feature flag. -â - io.k8s.api.core.v1.NamespaceList½ "&NamespaceList is a list of Namespaces.šitems² -objectʦ -¾ - -apiVersion¯"¡APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources² -string -Ô -itemsÊ"ŒItems is the list of Namespace objects in the list. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/² -arrayº. -, -*#/definitions/io.k8s.api.core.v1.Namespace -¹ -kind°"¢Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds² -string -Ï -metadata -;#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta"‚Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kindsúU -x-kubernetes-group-version-kind20- group: "" - kind: NamespaceList - version: v1 - -í[ -io.k8s.api.core.v1.PodSpecÎ[""PodSpec is a description of a pod.š -containers² -objectÊŽ[ -» - hostAliases«"›HostAliases is an optional list of hosts and IPs that will be injected into the pod's hosts file if specified. This is only valid for non-hostNetwork pods.² -arrayº. -, -*#/definitions/io.k8s.api.core.v1.HostAliasú% -x-kubernetes-patch-merge-keyip -ú' -x-kubernetes-patch-strategymerge - -Ï -priorityÂint32"¬The priority value. Various system components use this field to find the priority of the pod. When Priority Admission Controller is enabled, it prevents users from setting this field. The admission controller populates this field from PriorityClassName. The higher the value, the higher the priority.² -integer -• -automountServiceAccountTokenu"gAutomountServiceAccountToken indicates whether a service account token should be automatically mounted.² -boolean -† -hostnamez"mSpecifies the hostname of the Pod If not specified, the pod's hostname will be set to a system-defined value.² -string -„ -preemptionPolicyï"áPreemptionPolicy is the Policy for preempting pods with lower priority. One of Never, PreemptLowerPriority. Defaults to PreemptLowerPriority if unset. This field is beta-level, gated by the NonPreemptingPriority feature-gate.² -string -ó -securityContextß -3#/definitions/io.k8s.api.core.v1.PodSecurityContext"§SecurityContext holds pod-level security attributes and common container settings. Optional: Defaults to empty. See type description for default values of each field. -Ô -nodeNameÇ"¹NodeName is a request to schedule this pod onto a specific node. If it is non-empty, the scheduler simply schedules this pod onto that node, assuming that it fits resource requirements.² -string -œ - nodeSelector‹"íNodeSelector is a selector which must be true for the pod to fit on a node. Selector which must match a node's labels for the pod to be scheduled on that node. More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ª - ² -string² -object -” -priorityClassNameþ"ðIf specified, indicates the pod's priority. "system-node-critical" and "system-cluster-critical" are two special keywords which indicate the highest priorities with the former being the highest priority. Any other name must be defined by creating a PriorityClass object with that name. If not specified, the pod priority will be default or zero if there is no default.² -string -ô -shareProcessNamespaceÚ"ËShare a single process namespace between all of the containers in a pod. When this is set containers will be able to view and signal processes from other containers in the same pod, and the first process in each container will not be assigned PID 1. HostPID and ShareProcessNamespace cannot both be set. Optional: Default to false.² -boolean -ý -terminationGracePeriodSecondsÛint64"ÅOptional duration in seconds the pod needs to terminate gracefully. May be decreased in delete request. Value must be non-negative integer. The value zero indicates stop immediately via the kill signal (no opportunity to shut down). If this value is nil, the default grace period will be used instead. The grace period is the duration in seconds after the processes running in the pod are sent a termination signal and the time when the processes are forcibly halted with a kill signal. Set this value longer than the expected cleanup time for your process. Defaults to 30 seconds.² -integer -Æ - dnsConfig¸ --#/definitions/io.k8s.api.core.v1.PodDNSConfig"†Specifies the DNS parameters of a pod. Parameters specified here will be merged to the generated DNS configuration based on DNSPolicy. -À - hostNetwork°"¡Host networking requested for this pod. Use the host's network namespace. If this option is set, the ports that will be used must be specified. Default to false.² -boolean -“ -serviceAccount€"sDeprecatedServiceAccount is a depreciated alias for ServiceAccountName. Deprecated: Use serviceAccountName instead.² -string -û -setHostnameAsFQDNå"ÖIf true the pod's hostname will be configured as the pod's FQDN, rather than the leaf name (the default). In Linux containers, this means setting the FQDN in the hostname field of the kernel (the nodename field of struct utsname). In Windows containers, this means setting the registry value of hostname for the registry key HKEY_LOCAL_MACHINE\SYSTEM\CurrentControlSet\Services\Tcpip\Parameters to FQDN. If a pod does not have FQDN, this has no effect. Default to false.² -boolean -¼ - -containers­"›List of containers belonging to the pod. Containers cannot currently be added or removed. There must be at least one container in a Pod. Cannot be updated.² -arrayº. -, -*#/definitions/io.k8s.api.core.v1.Containerú' -x-kubernetes-patch-merge-keyname -ú' -x-kubernetes-patch-strategymerge - -ÿ -ephemeralContainersç"ÌList of ephemeral containers run in this pod. Ephemeral containers may be run in an existing pod to perform user-initiated actions such as debugging. This list cannot be specified when creating a pod, and it cannot be modified by updating the pod spec. In order to add an ephemeral container to an existing pod, use the pod's ephemeralcontainers subresource. This field is alpha-level and is only honored by servers that enable the EphemeralContainers feature.² -arrayº7 -5 -3#/definitions/io.k8s.api.core.v1.EphemeralContainerú' -x-kubernetes-patch-merge-keyname -ú' -x-kubernetes-patch-strategymerge - -× -imagePullSecretsÂ"¥ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. If specified, these secrets will be passed to individual puller implementations for them to use. For example, in the case of docker, only DockerConfig type secrets are honored. More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod² -arrayº9 -7 -5#/definitions/io.k8s.api.core.v1.LocalObjectReferenceú' -x-kubernetes-patch-merge-keyname -ú' -x-kubernetes-patch-strategymerge - -á - restartPolicyÏ"ÁRestart policy for all containers within the pod. One of Always, OnFailure, Never. Default to Always. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#restart-policy² -string -Ð -serviceAccountName¹"«ServiceAccountName is the name of the ServiceAccount to use to run this pod. More info: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/² -string -ü -activeDeadlineSecondsâint64"ÌOptional duration in seconds the pod may be active on the node relative to StartTime before the system will actively try to mark it failed and kill associated containers. Value must be a positive integer.² -integer -g -affinity[ -)#/definitions/io.k8s.api.core.v1.Affinity".If specified, the pod's scheduling constraints -R -hostIPCG"9Use the host's ipc namespace. Optional: Default to false.² -boolean -R -hostPIDG"9Use the host's pid namespace. Optional: Default to false.² -boolean -ö - dnsPolicyè"ÚSet DNS policy for the pod. Defaults to "ClusterFirst". Valid values are 'ClusterFirstWithHostNet', 'ClusterFirst', 'Default' or 'None'. DNS parameters given in DNSConfig will be merged with the policy selected with DNSPolicy. To have DNS options set along with hostNetwork, you have to specify DNS policy explicitly to 'ClusterFirstWithHostNet'.² -string -ð -overheadã"“Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. This field will be autopopulated at admission time by the RuntimeClass admission controller. If the RuntimeClass admission controller is enabled, overhead must not be set in Pod create requests. The RuntimeClass admission controller will reject Pod create requests which have the overhead already set. If RuntimeClass is configured and selected in the PodSpec, Overhead will be set to the value defined in the corresponding RuntimeClass, otherwise it will remain unset and treated as zero. More info: https://git.k8s.io/enhancements/keps/sig-node/20190226-pod-overhead.md This field is alpha-level as of Kubernetes v1.16, and is only honored by servers that enable the PodOverhead feature.ª? -= -;#/definitions/io.k8s.apimachinery.pkg.api.resource.Quantity² -object -ñ -runtimeClassNameÜ"ÎRuntimeClassName refers to a RuntimeClass object in the node.k8s.io group, which should be used to run this pod. If no RuntimeClass resource matches the named class, the pod will not be run. If unset or empty, the "legacy" RuntimeClass will be used, which is an implicit class with an empty definition that uses the default runtime handler. More info: https://git.k8s.io/enhancements/keps/sig-node/runtime-class.md This is a beta feature as of Kubernetes v1.14.² -string -q - tolerationsb"$If specified, the pod's tolerations.² -arrayº/ -- -+#/definitions/io.k8s.api.core.v1.Toleration -þ -topologySpreadConstraintsà"ÐTopologySpreadConstraints describes how a group of pods ought to spread across topology domains. Scheduler will schedule pods in a way which abides by the constraints. All topologySpreadConstraints are ANDed.² -arrayº= -; -9#/definitions/io.k8s.api.core.v1.TopologySpreadConstraintúB -x-kubernetes-list-map-keys$"- topologyKey -- whenUnsatisfiable -ú -x-kubernetes-list-typemap -ú. -x-kubernetes-patch-merge-key topologyKey -ú' -x-kubernetes-patch-strategymerge - -¬ -volumes "†List of volumes that can be mounted by containers belonging to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes² -arrayº+ -) -'#/definitions/io.k8s.api.core.v1.Volumeú' -x-kubernetes-patch-merge-keyname -ú2 -x-kubernetes-patch-strategymerge,retainKeys - -Û -enableServiceLinksÄ"µEnableServiceLinks indicates whether information about services should be injected into pod's environment variables, matching the syntax of Docker links. Optional: Defaults to true.² -boolean -‰ -initContainersö"äList of initialization containers belonging to the pod. Init containers are executed in order prior to containers being started. If any init container fails, the pod is considered to have failed and is handled according to its restartPolicy. The name for an init container or normal container must be unique among all containers. Init containers may not have Lifecycle actions, Readiness probes, Liveness probes, or Startup probes. The resourceRequirements of an init container are taken into account during scheduling by finding the highest request/limit for each resource type, and then using the max of of that value or the sum of the normal containers. Limits are applied to init containers in a similar fashion. Init containers cannot currently be added or removed. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/² -arrayº. -, -*#/definitions/io.k8s.api.core.v1.Containerú' -x-kubernetes-patch-merge-keyname -ú' -x-kubernetes-patch-strategymerge - -ó -readinessGatesà"›If specified, all readiness gates will be evaluated for pod readiness. A pod is ready when all its containers are ready AND all conditions specified in the readiness gates have status equal to "True" More info: https://git.k8s.io/enhancements/keps/sig-network/0007-pod-ready%2B%2B.md² -arrayº5 -3 -1#/definitions/io.k8s.api.core.v1.PodReadinessGate -£ - schedulerName‘"ƒIf specified, the pod will be dispatched by specified scheduler. If not specified, the pod will be dispatched by default scheduler.² -string -Î - subdomainÀ"²If specified, the fully qualified Pod hostname will be "...svc.". If not specified, the pod will not have a domainname at all.² -string -÷ --io.k8s.api.policy.v1beta1.PodDisruptionBudgetÅ "hPodDisruptionBudget is an object to define the max disruption that can be caused to a collection of pods² -objectÊå -¹ -kind°"¢Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds² -string -K -metadata? -=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta - -spec„ -?#/definitions/io.k8s.api.policy.v1beta1.PodDisruptionBudgetSpec"ASpecification of the desired behavior of the PodDisruptionBudget. -ˆ -status~ -A#/definitions/io.k8s.api.policy.v1beta1.PodDisruptionBudgetStatus"9Most recently observed status of the PodDisruptionBudget. -¾ - -apiVersion¯"¡APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources² -stringúd -x-kubernetes-group-version-kindA?- kind: PodDisruptionBudget - version: v1beta1 - group: policy - -Û -9io.k8s.api.admissionregistration.v1beta1.ServiceReference";ServiceReference holds a reference to Service.legacy.k8s.ioš namespacešname² -objectʾ -À -port·int32"¡If specified, the port on the service that hosting webhook. Default to 443 for backward compatibility. `port` should be a valid port number (1-65535, inclusive).² -integer -@ -name8"+`name` is the name of the service. Required² -string -O - namespaceB"5`namespace` is the namespace of the service. Required² -string -f -path^"Q`path` is an optional URL path which will be sent in any request to this service.² -string -ö -The certificate request itself and any additional information. - -statusw -M#/definitions/io.k8s.api.certificates.v1beta1.CertificateSigningRequestStatus"&Derived information about the request. -¾ - -apiVersion¯"¡APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources² -string -¹ -kind°"¢Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds² -stringúw -x-kubernetes-group-version-kindTR- group: certificates.k8s.io - kind: CertificateSigningRequest - version: v1beta1 - -” - -io.k8s.api.core.v1.PodTemplateñ "IPodTemplate describes a template for creating copies of a predefined pod.² -objectÊÁ -¹ -kind°"¢Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds² -string -Ò -metadataÅ -=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta"ƒStandard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata -ì -templateß -0#/definitions/io.k8s.api.core.v1.PodTemplateSpec"ªTemplate defines the pods that will be created from this pod template. https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status -¾ - -apiVersion¯"¡APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources² -stringúS -x-kubernetes-group-version-kind0.- group: "" - kind: PodTemplate - version: v1 - -Þ -$io.k8s.api.core.v1.ResourceQuotaListµ "3ResourceQuotaList is a list of ResourceQuota items.šitems² -objectÊ -¾ - -apiVersion¯"¡APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources² -string -» -items±"pItems is a list of ResourceQuota objects. More info: https://kubernetes.io/docs/concepts/policy/resource-quotas/² -arrayº2 -0 -.#/definitions/io.k8s.api.core.v1.ResourceQuota -¹ -kind°"¢Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds² -string -Ï -metadata -;#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta"‚Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kindsúY -x-kubernetes-group-version-kind64- group: "" - kind: ResourceQuotaList - version: v1 - -Ü -io.k8s.api.core.v1.ServiceList¹"%ServiceList holds a list of services.šitems² -objectÊ¥ -T -itemsK"List of services² -arrayº, -* -(#/definitions/io.k8s.api.core.v1.Service -¹ -kind°"¢Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds² -string -Ï -metadata -;#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta"‚Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds -¾ - -apiVersion¯"¡APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources² -stringúS -x-kubernetes-group-version-kind0.- group: "" - kind: ServiceList - version: v1 - -° -)io.k8s.api.networking.v1.IngressClassSpec‚"DIngressClassSpec provides information about the class of an Ingress.² -objectÊ­ -¦ - -controller—"‰Controller refers to the name of the controller that should handle this class. This allows for different "flavors" that are controlled by the same controller. For example, you may have different Parameters for the same implementing controller. This should be specified as a domain-prefixed path no more than 250 characters in length, e.g. "acme.io/ingress-controller". This field is immutable.² -string - - -parametersò -F#/definitions/io.k8s.api.networking.v1.IngressClassParametersReference"§Parameters is a link to a custom resource containing additional configuration for the controller. This is optional if the controller does not require extra parameters. -í -5io.k8s.api.policy.v1beta1.RuntimeClassStrategyOptions³"iRuntimeClassStrategyOptions define the strategy that will dictate the allowable RuntimeClasses for a pod.šallowedRuntimeClassNames² -objectÊž -º -allowedRuntimeClassNames"€allowedRuntimeClassNames is an allowlist of RuntimeClass names that may be specified on a pod. A value of "*" means that any RuntimeClass name is allowed, and must be the only item in the list. An empty list requires the RuntimeClassName field to be unset.² -arrayº - ² -string -Þ -defaultRuntimeClassNameÂ"´defaultRuntimeClassName is the default RuntimeClassName to set on the pod. The default MUST be allowed by the allowedRuntimeClassNames list. A value of nil does not mutate the Pod.² -string -¸ -,io.k8s.api.apps.v1.StatefulSetUpdateStrategy‡"ÏStatefulSetUpdateStrategy indicates the strategy that the StatefulSet controller will use to perform updates. It includes any additional parameters necessary to perform the update for the indicated strategy.² -objectʦ -¹ - rollingUpdate§ -A#/definitions/io.k8s.api.apps.v1.RollingUpdateStatefulSetStrategy"bRollingUpdate is used to communicate parameters when Type is RollingUpdateStatefulSetStrategyType. -h -type`"SType indicates the type of the StatefulSetUpdateStrategy. Default is RollingUpdate.² -string -„ -+io.k8s.api.authorization.v1.NonResourceRuleÔ"LNonResourceRule holds information that describes a rule for the non-resourcešverbs² -objectÊï -Ë -nonResourceURLs·"šNonResourceURLs is a set of partial urls that a user should have access to. *s are allowed, but only as the full, final step in the path. "*" means all.² -arrayº - ² -string -ž -verbs”"xVerb is a list of kubernetes non-resource API verbs, like: get, post, put, delete, patch, head, options. "*" means all.² -arrayº - ² -string -ƒ -,io.k8s.api.core.v1.PersistentVolumeClaimSpecÒ "PersistentVolumeClaimSpec describes the common attributes of storage devices and allows a Source for provider-specific attributes² -objectÊ¿ -Þ - resourcesÐ -5#/definitions/io.k8s.api.core.v1.ResourceRequirements"–Resources represents the minimum resources the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources -ƒ -selectorw -@#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector"3A label query over volumes to consider for binding. -¤ -storageClassName"Name of the StorageClass required by the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1² -string - - -volumeModeŽ"€volumeMode defines what type of volume is required by the claim. Value of Filesystem is implied when not included in claim spec.² -string -j - -volumeName\"OVolumeName is the binding reference to the PersistentVolume backing this claim.² -string -Ë - accessModes»"žAccessModes contains the desired access modes the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1² -arrayº - ² -string -Ô - -dataSourceÅ -:#/definitions/io.k8s.api.core.v1.TypedLocalObjectReference"†This field can be used to specify either: * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) * An existing PVC (PersistentVolumeClaim) * An existing custom resource that implements data population (Alpha) In order to use custom resource types that implement data population, the AnyVolumeDataSource feature gate must be enabled. If the provisioner or an external controller can support the specified data source, it will create a new volume based on the contents of the specified data source. -ª -Aio.k8s.kube-aggregator.pkg.apis.apiregistration.v1.APIServiceListä"/APIServiceList is a list of APIService objects.šitems² -objectʯ -I -metadata= -;#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta -¾ - -apiVersion¯"¡APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources² -string -e -items\² -arrayºO -M -K#/definitions/io.k8s.kube-aggregator.pkg.apis.apiregistration.v1.APIService -¹ -kind°"¢Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds² -stringúj -x-kubernetes-group-version-kindGE- group: apiregistration.k8s.io - kind: APIServiceList - version: v1 - -ã -io.k8s.api.storage.v1.CSIDriver¿ "ÎCSIDriver captures information about a Container Storage Interface (CSI) volume driver deployed on the cluster. Kubernetes attach detach controller uses this object to determine whether attach is required. Kubelet uses this object to determine whether pod information needs to be passed on mount. CSIDriver objects are non-namespaced.šspec² -objectÊø -¾ - -apiVersion¯"¡APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources² -string -¹ -kind°"¢Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds² -string -™ -metadataŒ -=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta"ÊStandard object metadata. metadata.Name indicates the name of the CSI driver that this object refers to; it MUST be the same name returned by the CSI GetPluginName() call for that driver. The driver name must be 63 characters or less, beginning and ending with an alphanumeric character ([a-z0-9A-Z]) with dashes (-), dots (.), and alphanumerics between. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata -] -specU -1#/definitions/io.k8s.api.storage.v1.CSIDriverSpec" Specification of the CSI Driver.ú] -x-kubernetes-group-version-kind:8- group: storage.k8s.io - kind: CSIDriver - version: v1 - -ë -1io.k8s.api.storage.v1beta1.VolumeAttachmentStatusµ"CVolumeAttachmentStatus is the status of a VolumeAttachment request.šattached² -objectÊÖ -à -attachmentMetadata¬"ŽUpon successful attach, this field is populated with any information returned by the attach operation that must be passed into subsequent WaitForAttach or Mount calls. This field must only be set by the entity completing the attach operation, i.e. the external-attacher.ª - ² -string² -object -ë - detachErrorÛ -4#/definitions/io.k8s.api.storage.v1beta1.VolumeError"¢The last error encountered during detach operation, if any. This field must only be set by the entity completing the detach operation, i.e. the external-attacher. -ë - attachErrorÛ -4#/definitions/io.k8s.api.storage.v1beta1.VolumeError"¢The last error encountered during attach operation, if any. This field must only be set by the entity completing the attach operation, i.e. the external-attacher. -± -attached¤"•Indicates the volume is successfully attached. This field must only be set by the entity completing the attach operation, i.e. the external-attacher.² -boolean -í -1io.k8s.api.autoscaling.v2beta1.ObjectMetricSource·"‰ObjectMetricSource indicates how to scale on a metric describing a kubernetes object (for example, hits-per-second on an Ingress object).štargetš -metricNameš targetValue² -objectÊø -¶ - averageValue¥ -;#/definitions/io.k8s.apimachinery.pkg.api.resource.Quantity"faverageValue is the target value of the average of the metric across all relevant pods (as a quantity) -L - -metricName>"1metricName is the name of the metric in question.² -string -Ü -selectorÏ -@#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector"Šselector is the string-encoded form of a standard kubernetes label selector for the given metric When set, it is passed as an additional parameter to the metrics server for more specific metrics scoping When unset, just the metricName will be used to gather metrics. -€ -targetv -H#/definitions/io.k8s.api.autoscaling.v2beta1.CrossVersionObjectReference"*target is the described Kubernetes object. -Œ - targetValue} -;#/definitions/io.k8s.apimachinery.pkg.api.resource.Quantity">targetValue is the target value of the metric (as a quantity). -† -3io.k8s.api.core.v1.AWSElasticBlockStoreVolumeSourceÎ -"¡Represents a Persistent Disk resource in AWS. - -An AWS EBS disk must exist before mounting to a container. The disk must also be in the same AWS zone as the kubelet. An AWS EBS disk can only be mounted as read/write once. AWS EBS volumes support ownership management and SELinux relabeling.švolumeID² -objectÊ -Ä -fsType¹"«Filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore² -string -¨ - partitionšint32"„The partition in the volume that you want to mount. If omitted, the default is to mount by volume name. Examples: For volume /dev/sda1, you specify the partition as "1". Similarly, the volume partition for /dev/sda is "0" (or you can leave the property empty).² -integer -ä -readOnly×"ÈSpecify "true" to force and set the ReadOnly property in VolumeMounts to "true". If omitted, the default is "false". More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore² -boolean -´ -volumeID§"™Unique ID of the persistent disk resource in AWS (Amazon EBS volume). More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore² -string -˜ -!io.k8s.api.core.v1.AttachedVolumeò"4AttachedVolume describes a volume attached to a nodešnameš -devicePath² -objectÊ™ -e - -devicePathW"JDevicePath represents the device path where the volume should be available² -string -0 -name("Name of the attached volume² -string -ç -%io.k8s.api.authentication.v1.UserInfo½"ZUserInfo holds the information about the user needed to implement the user.Info interface.² -objectÊÒ -® -uid¦"˜A unique value that identifies this user across time. If this user is deleted and another user by the same name is added, they will have different UIDs.² -string -\ -usernameP"CThe name that uniquely identifies this user among all active users.² -string -n -extrae"9Any additional information provided by the authenticator.ª -² -arrayº - ² -string² -object -Q -groupsG"+The names of groups this user is a part of.² -arrayº - ² -string -Ì -io.k8s.api.core.v1.VolumeMount©"@VolumeMount describes a mounting of a Volume within a container.šnameš mountPath² -objectÊÅ -ƒ -subPathx"kPath within the volume from which the container's volume should be mounted. Defaults to "" (volume's root).² -string -¿ - subPathExpr¯"¡Expanded path within the volume from which the container's volume should be mounted. Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. Defaults to "" (volume's root). SubPathExpr and SubPath are mutually exclusive.² -string -q - mountPathd"WPath within the container at which the volume should be mounted. Must not contain ':'.² -string -Õ -mountPropagationÀ"²mountPropagation determines how mounts are propagated from the host to container and the other way around. When not set, MountPropagationNone is used. This field is beta in 1.10.² -string -: -name2"%This must match the Name of a Volume.² -string -t -readOnlyh"ZMounted read-only if true, read-write otherwise (false or unspecified). Defaults to false.² -boolean -‡ -$io.k8s.api.networking.v1.IngressListÞ"'IngressList is a collection of Ingress.šitems² -objectʹ -¾ - -apiVersion¯"¡APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources² -string -g -items^"Items is the list of Ingress.² -arrayº2 -0 -.#/definitions/io.k8s.api.networking.v1.Ingress -¹ -kind°"¢Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds² -string -Ð -metadataà -;#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta"ƒStandard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadataúb -x-kubernetes-group-version-kind?=- group: networking.k8s.io - kind: IngressList - version: v1 - -» -,io.k8s.api.policy.v1.PodDisruptionBudgetListŠ "@PodDisruptionBudgetList is a collection of PodDisruptionBudgets.šitems² -objectÊË -¾ - -apiVersion¯"¡APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources² -string -y -itemsp"'Items is a list of PodDisruptionBudgets² -arrayº: -8 -6#/definitions/io.k8s.api.policy.v1.PodDisruptionBudget -¹ -kind°"¢Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds² -string -Ð -metadataà -;#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta"ƒStandard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadataúc -x-kubernetes-group-version-kind@>- version: v1 - group: policy - kind: PodDisruptionBudgetList - -¤, -5io.k8s.api.admissionregistration.v1.ValidatingWebhookê+"`ValidatingWebhook describes an admission webhook and the resources and operations it applies to.šnameš clientConfigš sideEffectsšadmissionReviewVersions² -objectÊ»* -– -admissionReviewVersionsú"ÝAdmissionReviewVersions is an ordered list of preferred `AdmissionReview` versions the Webhook expects. API server will try to use first version in the list which it supports. If none of the versions specified in this list supported by API server, validation will fail for this object. If a persisted webhook configuration specifies allowed versions and does not include any versions known to the API Server, calls to the webhook will fail and be subject to the failure policy.² -arrayº - ² -string -¬ - failurePolicyš"ŒFailurePolicy defines how unrecognized errors from the admission endpoint are handled - allowed values are Ignore or Fail. Defaults to Fail.² -string -é -objectSelectorÖ -@#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector"‘ObjectSelector decides whether to run the webhook based on if the object has matching labels. objectSelector is evaluated against both the oldObject and newObject that would be sent to the webhook, and is considered to match if either object matches the selector. A null object (oldObject in the case of create, or newObject in the case of delete) or an object that cannot have labels (like a DeploymentRollback or a PodProxyOptions object) is not considered to match. Use the object selector only if the webhook is opt-in, because end users may skip the admission webhook by setting the labels. Default to the empty LabelSelector, which matches everything. -é -rulesß"‡Rules describes what operations on what resources/subresources the webhook cares about. The webhook cares about an operation if it matches _any_ Rule. However, in order to prevent ValidatingAdmissionWebhooks and MutatingAdmissionWebhooks from putting the cluster in a state which cannot be recovered from without completely disabling the plugin, ValidatingAdmissionWebhooks and MutatingAdmissionWebhooks are never called on admission requests for ValidatingWebhookConfiguration and MutatingWebhookConfiguration objects.² -arrayºH -F -D#/definitions/io.k8s.api.admissionregistration.v1.RuleWithOperations -™ - clientConfigˆ -E#/definitions/io.k8s.api.admissionregistration.v1.WebhookClientConfig"?ClientConfig defines how to communicate with the hook. Required -œ - matchPolicyŒ"þmatchPolicy defines how the "rules" list is used to match incoming requests. Allowed values are "Exact" or "Equivalent". - -- Exact: match a request only if it exactly matches a specified rule. For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1, but "rules" only included `apiGroups:["apps"], apiVersions:["v1"], resources: ["deployments"]`, a request to apps/v1beta1 or extensions/v1beta1 would not be sent to the webhook. - -- Equivalent: match a request if modifies a resource listed in rules, even via another API group or version. For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1, and "rules" only included `apiGroups:["apps"], apiVersions:["v1"], resources: ["deployments"]`, a request to apps/v1beta1 or extensions/v1beta1 would be converted to apps/v1 and sent to the webhook. - -Defaults to "Equivalent"² -string -ç -nameÞ"ÐThe name of the admission webhook. Name should be fully qualified, e.g., imagepolicy.kubernetes.io, where "imagepolicy" is the name of the webhook, and kubernetes.io is the name of the organization. Required.² -string -Ü -namespaceSelectorÆ -@#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector" NamespaceSelector decides whether to run the webhook on an object based on whether the namespace for that object matches the selector. If the object itself is a namespace, the matching is performed on object.metadata.labels. If the object is another cluster scoped resource, it never skips the webhook. - -For example, to run the webhook on any objects whose namespace is not associated with "runlevel" of "0" or "1"; you will set the selector as follows: "namespaceSelector": { - "matchExpressions": [ - { - "key": "runlevel", - "operator": "NotIn", - "values": [ - "0", - "1" - ] - } - ] -} - -If instead you want to only run the webhook on any objects whose namespace is associated with the "environment" of "prod" or "staging"; you will set the selector as follows: "namespaceSelector": { - "matchExpressions": [ - { - "key": "environment", - "operator": "In", - "values": [ - "prod", - "staging" - ] - } - ] -} - -See https://kubernetes.io/docs/concepts/overview/working-with-objects/labels for more examples of label selectors. - -Default to the empty LabelSelector, which matches everything. -õ - sideEffectså"×SideEffects states whether this webhook has side effects. Acceptable values are: None, NoneOnDryRun (webhooks created via v1beta1 may also specify Some or Unknown). Webhooks with side effects MUST implement a reconciliation system, since a request may be rejected by a future step in the admission chain and the side effects therefore need to be undone. Requests with the dryRun attribute will be auto-rejected if they match a webhook with sideEffects == Unknown or Some.² -string -œ -timeoutSeconds‰int32"óTimeoutSeconds specifies the timeout for this webhook. After the timeout passes, the webhook call will be ignored or the API call will fail based on the failure policy. The timeout value must be between 1 and 30 seconds. Default to 10 seconds.² -integer -ì -7io.k8s.api.admissionregistration.v1.WebhookClientConfig° "VWebhookClientConfig contains the information to make a TLS connection with the webhook² -objectÊÉ -Å -caBundle¸byte"¤`caBundle` is a PEM encoded CA bundle which will be used to validate the webhook's server certificate. If unspecified, system trust roots on the apiserver are used.² -string -‡ -serviceû -B#/definitions/io.k8s.api.admissionregistration.v1.ServiceReference"´`service` is a reference to the service for this webhook. Either `service` or `url` must be specified. - -If the webhook is running within the cluster, then you should use `service`. -ô -urlì"Þ`url` gives the location of the webhook, in standard URL form (`scheme://host:port/path`). Exactly one of `url` or `service` must be specified. - -The `host` should not refer to a service running in the cluster; use the `service` field instead. The host might be resolved via external DNS in some apiservers (e.g., `kube-apiserver` cannot resolve in-cluster DNS as that would be a layering violation). `host` may also be an IP address. - -Please note that using `localhost` or `127.0.0.1` as a `host` is risky unless you take great care to run this webhook on all hosts which run an apiserver which might need to make calls to this webhook. Such installs are likely to be non-portable, i.e., not easy to turn up in a new cluster. - -The scheme must be "https"; the URL must begin with "https://". - -A path is optional, and if present may be any string permissible in a URL. You may use the path to pass an arbitrary string to the webhook, for example, a cluster identifier. - -Attempting to use a user or basic auth e.g. "user:password@" is not allowed. Fragments ("#...") and query parameters ("?...") are not allowed, either.² -string -Ó -"io.k8s.api.apps.v1.StatefulSetSpec¬"8A StatefulSetSpec is the specification of a StatefulSet.šselectorštemplateš serviceName² -objectÊ¿ - -replicas‚int32"ìreplicas is the desired number of replicas of the given Template. These are replicas in the sense that they are instantiations of the same Template, but individual replicas also have a consistent identity. If unspecified, defaults to 1.² -integer -­ -revisionHistoryLimit”int32"þrevisionHistoryLimit is the maximum number of revisions that will be maintained in the StatefulSet's revision history. The revision history consists of all revisions not represented by a currently applied StatefulSetSpec version. The default value is 10.² -integer -¨ -selector› -@#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector"Öselector is a label query over pods that should match the replica count. It must match the pod template's labels. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors -û - serviceNameë"ÝserviceName is the name of the service that governs this StatefulSet. This service must exist before the StatefulSet, and is responsible for the network identity of the set. Pods get DNS/hostnames that follow the pattern: pod-specific-string.serviceName.default.svc.cluster.local where "pod-specific-string" is managed by the StatefulSet controller.² -string -¬ -templateŸ -0#/definitions/io.k8s.api.core.v1.PodTemplateSpec"êtemplate is the object that describes the pod that will be created if insufficient replicas are detected. Each pod stamped out by the StatefulSet will fulfill this Template, but have a unique identity from the rest of the StatefulSet. -å -updateStrategyÒ -:#/definitions/io.k8s.api.apps.v1.StatefulSetUpdateStrategy"“updateStrategy indicates the StatefulSetUpdateStrategy that will be employed to update Pods in the StatefulSet when a revision is made to Template. -þ -volumeClaimTemplateså"›volumeClaimTemplates is a list of claims that pods are allowed to reference. The StatefulSet controller is responsible for mapping network identities to claims in a way that maintains the identity of a pod. Every claim in this list must have at least one matching (by name) volumeMount in one container in the template. A claim in this list takes precedence over any volumes in the template, with the same name.² -arrayº: -8 -6#/definitions/io.k8s.api.core.v1.PersistentVolumeClaim -¹ -podManagementPolicy¡"“podManagementPolicy controls how pods are created during initial scale up, when replacing pods on nodes, or when scaling down. The default policy is `OrderedReady`, where pods are created in increasing order (pod-0, then pod-1, etc) and the controller will wait until each pod is ready before continuing. When scaling down, the pods are removed in the opposite order. The alternative policy is `Parallel` which will create pods in parallel to match the desired scale without waiting, and on scale down will delete all pods at once.² -string -¾ -%io.k8s.api.extensions.v1beta1.Ingress”"‹Ingress is a collection of rules that allow inbound connections to reach the endpoints defined by a backend. An Ingress can be configured to give services externally-reachable urls, load balance traffic, terminate SSL, offer name based virtual hosting etc. DEPRECATED - This group version of Ingress is deprecated by networking.k8s.io/v1beta1 Ingress. See the release notes for more information.² -objectʘ - -¾ - -apiVersion¯"¡APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources² -string -¹ -kind°"¢Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds² -string -Ò -metadataÅ -=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta"ƒStandard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata -Ý -specÔ -7#/definitions/io.k8s.api.extensions.v1beta1.IngressSpec"˜Spec is the desired state of the Ingress. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status -ã -statusØ -9#/definitions/io.k8s.api.extensions.v1beta1.IngressStatus"šStatus is the current state of the Ingress. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-statusú\ -x-kubernetes-group-version-kind97- group: extensions - kind: Ingress - version: v1beta1 - -® -"io.k8s.api.rbac.v1.RoleBindingList‡"/RoleBindingList is a collection of RoleBindingsšitems² -objectÊÎ -¾ - -apiVersion¯"¡APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources² -string -g -items^"Items is a list of RoleBindings² -arrayº0 -. -,#/definitions/io.k8s.api.rbac.v1.RoleBinding -¹ -kind°"¢Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds² -string -f -metadataZ -;#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta"Standard object's metadata.ún -x-kubernetes-group-version-kindKI- group: rbac.authorization.k8s.io - kind: RoleBindingList - version: v1 - -¿ -"io.k8s.api.core.v1.SecretEnvSource˜"¹SecretEnvSource selects a Secret to populate the environment variables with. - -The contents of the target Secret's Data field will represent the key-value pairs as environment variables.² -objectÊÍ -„ -name|"oName of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names² -string -D -optional8"*Specify whether the Secret must be defined² -boolean -¡ - -Fio.k8s.api.admissionregistration.v1.ValidatingWebhookConfigurationListÖ "OValidatingWebhookConfigurationList is a list of ValidatingWebhookConfiguration.šitems² -objectÊæ -¾ - -apiVersion¯"¡APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources² -string -” -itemsŠ"'List of ValidatingWebhookConfiguration.² -arrayºT -R -P#/definitions/io.k8s.api.admissionregistration.v1.ValidatingWebhookConfiguration -¹ -kind°"¢Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds² -string -Ï -metadata -;#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta"‚Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kindsú„ -x-kubernetes-group-version-kinda_- kind: ValidatingWebhookConfigurationList - version: v1 - group: admissionregistration.k8s.io - -‚ -*io.k8s.api.core.v1.DownwardAPIVolumeSourceÓ"“DownwardAPIVolumeSource represents a volume containing downward API info. Downward API volumes support ownership management and SELinux relabeling.² -objectÊ® -¬ - defaultModeœint32"†Optional: mode bits to use on created files by default. Must be a Optional: mode bits used to set permissions on created files by default. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. Defaults to 0644. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.² -integer -} -itemst"+Items is a list of downward API volume file² -arrayº: -8 -6#/definitions/io.k8s.api.core.v1.DownwardAPIVolumeFile -â -&io.k8s.api.core.v1.QuobyteVolumeSource·"‡Represents a Quobyte mount that lasts the lifetime of a pod. Quobyte volumes do not support ownership management or SELinux relabeling.šregistryšvolume² -objectÊŠ -Q -userI""1Group to map volume access to Default is no group² -string -‚ -readOnlyv"hReadOnly here will force the Quobyte volume to be mounted with read-only permissions. Defaults to false.² -boolean -á -registryÔ"ÆRegistry represents a single or multiple Quobyte Registry services specified as a string as host:port pair (multiple entries are separated with commas) which acts as the central registry for volumes² -string -œ -tenant‘"ƒTenant owning the given Quobyte volume in the Backend Used with dynamically provisioned Quobyte volumes, value is set by the plugin² -string -æ -0io.k8s.api.policy.v1beta1.SELinuxStrategyOptions±"]SELinuxStrategyOptions defines the strategy type and any options used to create the strategy.šrule² -objectʼ -a -ruleY"Lrule is the strategy that will dictate the allowable labels that may be set.² -string -Ö -seLinuxOptionsà -/#/definitions/io.k8s.api.core.v1.SELinuxOptions"seLinuxOptions required to run as; required for MustRunAs More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ -Õ& -io.k8s.api.batch.v1.JobSpecµ&"7JobSpec describes how the job execution will look like.štemplate² -objectÊâ% -¦ - parallelism–int32"€Specifies the maximum desired number of pods the job should run at any given time. The actual number of pods running in steady state will be less than this number when ((.spec.completions - .status.successful) < .spec.parallelism), i.e. when the work left to do is less than max parallelism. More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/² -integer -” -suspendˆ"ùSuspend specifies whether the Job controller should create Pods or not. If a Job is created with suspend set to true, no Pods are created by the Job controller. If a Job is suspended after creation (i.e. the flag goes from false to true), the Job controller will delete all active Pods associated with this Job. Users must design their workload to gracefully handle this. Suspending a Job will reset the StartTime field of the Job, effectively resetting the ActiveDeadlineSeconds timer too. This is an alpha field and requires the SuspendJob feature gate to be enabled; otherwise this field may not be set to true. Defaults to false.² -boolean -æ -activeDeadlineSecondsÌint64"¶Specifies the duration in seconds relative to the startTime that the job may be continuously active before the system tries to terminate it; value must be positive integer. If a Job is suspended (at creation or through an update), this timer will effectively be stopped and reset when the Job is resumed again.² -integer -¿ -completionMode¬"žCompletionMode specifies how Pod completions are tracked. It can be `NonIndexed` (default) or `Indexed`. - -`NonIndexed` means that the Job is considered complete when there have been .spec.completions successfully completed Pods. Each Pod completion is homologous to each other. - -`Indexed` means that the Pods of a Job get an associated completion index from 0 to (.spec.completions - 1), available in the annotation batch.kubernetes.io/job-completion-index. The Job is considered complete when there is one successfully completed Pod for each index. When value is `Indexed`, .spec.completions must be specified and `.spec.parallelism` must be less than or equal to 10^5. - -This field is alpha-level and is only honored by servers that enable the IndexedJob feature gate. More completion modes can be added in the future. If the Job controller observes a mode that it doesn't recognize, the controller skips updates for the Job.² -string -Ð - completionsÀint32"ªSpecifies the desired number of successfully finished pods the job should be run with. Setting to nil means that the success of any pod signals the success of all pods, and allows parallelism to have any positive value. Setting to 1 means that parallelism is limited to 1 and the success of that pod signals the success of the job. More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/² -integer -Û -templateÎ -0#/definitions/io.k8s.api.core.v1.PodTemplateSpec"™Describes the pod that will be created when executing a job. More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/ -ó -ttlSecondsAfterFinished×int32"ÁttlSecondsAfterFinished limits the lifetime of a Job that has finished execution (either Complete or Failed). If this field is set, ttlSecondsAfterFinished after the Job finishes, it is eligible to be automatically deleted. When the Job is being deleted, its lifecycle guarantees (e.g. finalizers) will be honored. If this field is unset, the Job won't be automatically deleted. If this field is set to zero, the Job becomes eligible to be deleted immediately after it finishes. This field is alpha-level and is only honored by servers that enable the TTLAfterFinished feature.² -integer -r - backoffLimitbint32"MSpecifies the number of retries before marking this job failed. Defaults to 6² -integer -¹ -manualSelector¦"—manualSelector controls generation of pod labels and pod selectors. Leave `manualSelector` unset unless you are certain what you are doing. When false or unset, the system pick labels unique to this job and appends those labels to the pod template. When true, the user is responsible for picking unique labels and specifying the selector. Failure to pick a unique label may cause this and other jobs to not function correctly. However, You may see `manualSelector=true` in jobs that were created with the old `extensions/v1beta1` API. More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/#specifying-your-own-pod-selector² -boolean - -selector -@#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector"ËA label query over pods that should match the pod count. Normally, the system sets this field for you. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors -· - io.k8s.api.coordination.v1.Lease’ "Lease defines a lease concept.² -objectÊ‚ -¾ - -apiVersion¯"¡APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources² -string -¹ -kind°"¢Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds² -string -µ -metadata¨ -=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta"gMore info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata -Ê -specÁ -2#/definitions/io.k8s.api.coordination.v1.LeaseSpec"ŠSpecification of the Lease. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-statusú^ -x-kubernetes-group-version-kind;9- group: coordination.k8s.io - kind: Lease - version: v1 - -› - -,io.k8s.api.core.v1.PersistentVolumeClaimListê "CPersistentVolumeClaimList is a list of PersistentVolumeClaim items.šitems² -objectʪ -¹ -kind°"¢Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds² -string -Ï -metadata -;#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta"‚Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds -¾ - -apiVersion¯"¡APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources² -string -Ø -itemsÎ"„A list of persistent volume claims. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims² -arrayº: -8 -6#/definitions/io.k8s.api.core.v1.PersistentVolumeClaimúa -x-kubernetes-group-version-kind><- group: "" - kind: PersistentVolumeClaimList - version: v1 - -± -3io.k8s.api.autoscaling.v2beta2.ExternalMetricSourceù"ÐExternalMetricSource indicates how to scale on a metric not associated with any Kubernetes object (for example length of queue in cloud messaging service, or QPS from loadbalancer running outside of cluster).šmetricštarget² -objectÊ… -ƒ -metricy -=#/definitions/io.k8s.api.autoscaling.v2beta2.MetricIdentifier"8metric identifies the target metric by name and selector -} -targets -9#/definitions/io.k8s.api.autoscaling.v2beta2.MetricTarget"6target specifies the target value for the given metric -Å --io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta“"£ListMeta describes metadata that synthetic resources must have, including lists and various status objects. A resource may have only one of {ObjectMeta, ListMeta}.² -objectÊÞ -ê -selfLinkÝ"ÏselfLink is a URL representing this object. Populated by the system. Read-only. - -DEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release.² -string -ä -continue×"Écontinue may be set if the user set a limit on the number of items returned, and indicates that the server has more data available. The value is opaque and may be used to issue another request to the endpoint that served this list to retrieve the next set of available objects. Continuing a consistent list may not be possible if the server configuration has changed or more than a few minutes have passed. The resourceVersion field returned when using this continue value will be identical to the value in the first response, unless you have received this token from an error message.² -string -ß -remainingItemCountÈint64"²remainingItemCount is the number of subsequent items in the list which are not included in this list response. If the list request contained label or field selectors, then the number of remaining items is unknown and the field will be left unset and omitted during serialization. If the list is complete (either because it is not chunking or because this is the last chunk), then there are no more remaining items and this field will be left unset and omitted during serialization. Servers older than v1.15 do not set this field. The intended use of the remainingItemCount is *estimating* the size of a collection. Clients should not rely on the remainingItemCount to be set or to be exact.² -integer -¥ -resourceVersion‘"ƒString that identifies the server's internal version of this object that can be used by clients to determine when objects have changed. Value must be treated as opaque by clients and passed unmodified back to the server. Populated by the system. Read-only. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency² -string -‡ -3io.k8s.apimachinery.pkg.apis.meta.v1.OwnerReferenceÏ"ÇOwnerReference contains enough information to let you identify an owning object. An owning object must be in the same namespace as the dependent, or be cluster-scoped, so there is no namespace field.š -apiVersionškindšnamešuid² -objectÊÕ -— -kindŽ"€Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds² -string -l -named"WName of the referent. More info: http://kubernetes.io/docs/user-guide/identifiers#names² -string -i -uidb"UUID of the referent. More info: http://kubernetes.io/docs/user-guide/identifiers#uids² -string -7 - -apiVersion)"API version of the referent.² -string -Î -blockOwnerDeletion·"¨If true, AND if the owner has the "foregroundDeletion" finalizer, then the owner cannot be deleted from the key-value store until this reference is removed. Defaults to false. To set this field, a user needs "delete" permission of the owner, otherwise 422 (Unprocessable Entity) will be returned.² -boolean -V - -controllerH":If true, this reference points to the managing controller.² -boolean -Ÿ -$io.k8s.api.node.v1beta1.RuntimeClassö"ØRuntimeClass defines a class of container runtime supported in the cluster. The RuntimeClass is used to determine which container runtime is used to run all containers in a pod. RuntimeClasses are (currently) manually defined by a user or cluster provisioner, and referenced in the PodSpec. The Kubelet is responsible for resolving the RuntimeClassName reference before running the pod. For more details, see https://git.k8s.io/enhancements/keps/sig-node/runtime-class.mdšhandler² -objectÊ -› - -schedulingŒ -0#/definitions/io.k8s.api.node.v1beta1.Scheduling"×Scheduling holds the scheduling constraints to ensure that pods running with this RuntimeClass are scheduled to nodes that support it. If scheduling is nil, this RuntimeClass is assumed to be supported by all nodes. -¾ - -apiVersion¯"¡APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources² -string -Ð -handlerÄ"¶Handler specifies the underlying runtime and configuration that the CRI implementation will use to handle pods of this class. The possible values are specific to the node & CRI configuration. It is assumed that all handlers are available on every node, and handlers of the same name are equivalent on every node. For example, a handler called "runc" might specify that the runc OCI runtime (using native Linux containers) will be used to run the containers in a pod. The Handler must be lowercase, conform to the DNS Label (RFC 1123) requirements, and is immutable.² -string -¹ -kind°"¢Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds² -string -µ -metadata¨ -=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta"gMore info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata -ô -overheadç -.#/definitions/io.k8s.api.node.v1beta1.Overhead"´Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. For more details, see https://git.k8s.io/enhancements/keps/sig-node/20190226-pod-overhead.md This field is alpha-level as of Kubernetes v1.15, and is only honored by servers that enable the PodOverhead feature.úb -x-kubernetes-group-version-kind?=- group: node.k8s.io - kind: RuntimeClass - version: v1beta1 - -Ä -"io.k8s.api.storage.v1.StorageClass"ãStorageClass describes the parameters for a class of storage for which PersistentVolumes can be dynamically provisioned. - -StorageClasses are non-namespaced; the name of the storage class according to etcd is in ObjectMeta.Name.š provisioner² -objectÊ· -¾ - -apiVersion¯"¡APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources² -string -¹ -kind°"¢Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds² -string -ì - mountOptionsÛ"¾Dynamically provisioned PersistentVolumes of this storage class are created with these mountOptions, e.g. ["ro", "soft"]. Not validated - mount of the PVs will simply fail if one is invalid.² -arrayº - ² -string -N - provisioner?"2Provisioner indicates the type of the provisioner.² -string -ø -volumeBindingModeâ"ÔVolumeBindingMode indicates how PersistentVolumeClaims should be provisioned and bound. When unset, VolumeBindingImmediate is used. This field is only honored by servers that enable the VolumeScheduling feature.² -string -n -allowVolumeExpansionV"HAllowVolumeExpansion shows whether the storage class allow volume expand² -boolean -Š -allowedTopologiesô"«Restrict the node topologies where volumes can be dynamically provisioned. Each volume plugin defines its own supported topology specifications. An empty TopologySelectorTerm list means there is no topology restriction. This field is only honored by servers that enable the VolumeScheduling feature.² -arrayº9 -7 -5#/definitions/io.k8s.api.core.v1.TopologySelectorTerm -Ò -metadataÅ -=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta"ƒStandard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata -‘ - -parameters‚"eParameters holds the parameters for the provisioner that should create volumes of this storage class.ª - ² -string² -object -— - reclaimPolicy…"xDynamically provisioned PersistentVolumes of this storage class are created with this reclaimPolicy. Defaults to Delete.² -stringú` -x-kubernetes-group-version-kind=;- group: storage.k8s.io - kind: StorageClass - version: v1 - -° - -Kio.k8s.api.admissionregistration.v1beta1.ValidatingWebhookConfigurationListà "OValidatingWebhookConfigurationList is a list of ValidatingWebhookConfiguration.šitems² -objectÊë -¾ - -apiVersion¯"¡APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources² -string -™ -items"'List of ValidatingWebhookConfiguration.² -arrayºY -W -U#/definitions/io.k8s.api.admissionregistration.v1beta1.ValidatingWebhookConfiguration -¹ -kind°"¢Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds² -string -Ï -metadata -;#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta"‚Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kindsú‰ -x-kubernetes-group-version-kindfd- group: admissionregistration.k8s.io - kind: ValidatingWebhookConfigurationList - version: v1beta1 - -ˆ -%io.k8s.api.apps.v1.ControllerRevisionÞ"±ControllerRevision implements an immutable snapshot of state data. Clients are responsible for serializing and deserializing the objects that contain their internal state. Once a ControllerRevision has been successfully created, it can not be updated. The API Server will fail validation of all requests that attempt to mutate the Data field. ControllerRevisions may, however, be deleted. Note that, due to its use by both the DaemonSet and StatefulSet controllers for update and rollback, this object is beta. However, it may be subject to name and representation changes in future releases, and clients should not depend on its stability. It is primarily for internal use by controllers.šrevision² -objectʱ -Ò -metadataÅ -=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta"ƒStandard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata -b -revisionVint64"ARevision indicates the revision of the state represented by Data.² -integer -¾ - -apiVersion¯"¡APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources² -string -y -dataq -:#/definitions/io.k8s.apimachinery.pkg.runtime.RawExtension"3Data is the serialized representation of the state. -¹ -kind°"¢Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds² -stringú\ -x-kubernetes-group-version-kind97- group: apps - kind: ControllerRevision - version: v1 - -ð -8io.k8s.api.authorization.v1beta1.SelfSubjectAccessReview³ "öSelfSubjectAccessReview checks whether or the current user can perform an action. Not filling in a spec.namespace means "in all namespaces". Self is a special case, because users should always be able to check whether they can perform an actionšspec² -objectÊ« -¾ - -apiVersion¯"¡APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources² -string -¹ -kind°"¢Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds² -string -K -metadata? -=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta -¯ -spec¦ -J#/definitions/io.k8s.api.authorization.v1beta1.SelfSubjectAccessReviewSpec"XSpec holds information about the request being evaluated. user and groups must be empty -¬ -status¡ -H#/definitions/io.k8s.api.authorization.v1beta1.SubjectAccessReviewStatus"UStatus is filled in by the server and indicates whether the request is allowed or notúv -x-kubernetes-group-version-kindSQ- group: authorization.k8s.io - kind: SelfSubjectAccessReview - version: v1beta1 - -Ù -&io.k8s.api.storage.v1beta1.CSINodeSpec®"\CSINodeSpec holds information about the specification of all CSI drivers installed on a nodešdrivers² -objectÊ· -´ -drivers¨"Šdrivers is a list of information of all CSI Drivers existing on a node. If all drivers in the list are uninstalled, this can become empty.² -arrayº: -8 -6#/definitions/io.k8s.api.storage.v1beta1.CSINodeDriverú' -x-kubernetes-patch-merge-keyname -ú' -x-kubernetes-patch-strategymerge - -© -+io.k8s.api.storage.v1beta1.StorageClassListù"4StorageClassList is a collection of storage classes.šitems² -objectÊÀ -¾ - -apiVersion¯"¡APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources² -string -t -itemsk"#Items is the list of StorageClasses² -arrayº9 -7 -5#/definitions/io.k8s.api.storage.v1beta1.StorageClass -¹ -kind°"¢Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds² -string -Ê -metadata½ -;#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta"~Standard list metadata More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadataúi -x-kubernetes-group-version-kindFD- version: v1beta1 - group: storage.k8s.io - kind: StorageClassList - -å -2io.k8s.api.core.v1.StorageOSPersistentVolumeSource® "2Represents a StorageOS persistent volume resource.² -objectÊë -Ð -volumeNamespace¼"®VolumeNamespace specifies the scope of the volume within StorageOS. If no namespace is specified then the Pod's namespace will be used. This allows the Kubernetes name scoping to be mirrored within StorageOS for tighter integration. Set VolumeName to any name to override the default behaviour. Set to "default" if you are not using namespaces within StorageOS. Namespaces that do not pre-exist within StorageOS will be created.² -string -À -fsTypeµ"§Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.² -string -x -readOnlyl"^Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.² -boolean -Ê - secretRef¼ -0#/definitions/io.k8s.api.core.v1.ObjectReference"‡SecretRef specifies the secret to use for obtaining the StorageOS API credentials. If not specified, default values will be attempted. -‹ - -volumeName}"pVolumeName is the human-readable name of the StorageOS volume. Volume names are only unique within a namespace.² -string -Ý -)io.k8s.api.extensions.v1beta1.IngressRule¯ "ìIngressRule represents the rules mapping the paths under a specified host to the related backend services. Incoming requests are first evaluated for a host match, then routed to the backend associated with the matching IngressRuleValue.² -objectʱ -J -httpB -@#/definitions/io.k8s.api.extensions.v1beta1.HTTPIngressRuleValue -â - -hostÙ -"Ë -Host is the fully qualified domain name of a network host, as defined by RFC 3986. Note the following deviations from the "host" part of the URI as defined in RFC 3986: 1. IPs are not allowed. Currently an IngressRuleValue can only apply to - the IP in the Spec of the parent Ingress. -2. The `:` delimiter is not respected because ports are not allowed. - Currently the port of an Ingress is implicitly :80 for http and - :443 for https. -Both these may change in the future. Incoming requests are matched against the host before the IngressRuleValue. If the host is unspecified, the Ingress routes all traffic based on the specified IngressRuleValue. - -Host can be "precise" which is a domain name without the terminating dot of a network host (e.g. "foo.bar.com") or "wildcard", which is a domain name prefixed with a single wildcard label (e.g. "*.foo.com"). The wildcard character '*' must appear by itself as the first DNS label and matches only a single label. You cannot have a wildcard label by itself (e.g. Host == "*"). Requests will be matched against the Host field in the following way: 1. If Host is precise, the request matches this rule if the http host header is equal to Host. 2. If Host is a wildcard, then the request matches this rule if the http host header is to equal to the suffix (removing the first label) of the wildcard rule.² -string -Ü -*io.k8s.api.policy.v1beta1.AllowedCSIDriver­"RAllowedCSIDriver represents a single inline CSI Driver that is allowed to be used.šname² -objectÊD -B -name:"-Name is the registered name of the CSI driver² -string -¥ -%io.k8s.api.core.v1.SecretVolumeSourceû "íAdapts a Secret into a volume. - -The contents of the target Secret's Data field will be presented in a volume as files using the keys in the Data field as the file names. Secret volumes support ownership management and SELinux relabeling.² -objectÊü -ê - defaultModeÚint32"ÄOptional: mode bits used to set permissions on created files by default. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. Defaults to 0644. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.² -integer -¤ -itemsš"ÜIf unspecified, each key-value pair in the Data field of the referenced Secret will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the Secret, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'.² -arrayº. -, -*#/definitions/io.k8s.api.core.v1.KeyToPath -P -optionalD"6Specify whether the Secret or its keys must be defined² -boolean -“ - -secretName„"wName of the secret in the pod's namespace to use. More info: https://kubernetes.io/docs/concepts/storage/volumes#secret² -string -ê -Sio.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1.CustomResourceSubresources’"YCustomResourceSubresources defines the status and scale subresources for CustomResources.² -objectʨ -é -scaleß -e#/definitions/io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1.CustomResourceSubresourceScale"vscale indicates the custom resource should serve a `/scale` subresource that returns an `autoscaling/v1` Scale object. -¹ -status® -f#/definitions/io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1.CustomResourceSubresourceStatus"Ãstatus indicates the custom resource should serve a `/status` subresource. When enabled: 1. requests to the custom resource primary endpoint ignore changes to the `status` stanza of the object. 2. requests to the custom resource `/status` subresource ignore changes to anything other than the `status` stanza of the object. -Ø -Cio.k8s.kube-aggregator.pkg.apis.apiregistration.v1.APIServiceStatus"AAPIServiceStatus contains derived information about an API server² -objectʾ -» - -conditions¬"$Current service state of apiService.² -arrayºX -V -T#/definitions/io.k8s.kube-aggregator.pkg.apis.apiregistration.v1.APIServiceConditionú' -x-kubernetes-list-map-keys - type -ú -x-kubernetes-list-typemap -ú' -x-kubernetes-patch-merge-keytype -ú' -x-kubernetes-patch-strategymerge - -ª -9io.k8s.api.authorization.v1beta1.LocalSubjectAccessReviewì "çLocalSubjectAccessReview checks whether or not a user or group can perform an action in a given namespace. Having a namespace scoped resource makes it much easier to grant namespace scoped policy that includes permissions checking.šspec² -objectÊò -¾ - -apiVersion¯"¡APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources² -string -¹ -kind°"¢Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds² -string -K -metadata? -=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta -ö -specí -F#/definitions/io.k8s.api.authorization.v1beta1.SubjectAccessReviewSpec"¢Spec holds information about the request being evaluated. spec.namespace must be equal to the namespace you made the request against. If empty, it is defaulted. -¬ -status¡ -H#/definitions/io.k8s.api.authorization.v1beta1.SubjectAccessReviewStatus"UStatus is filled in by the server and indicates whether the request is allowed or notúw -x-kubernetes-group-version-kindTR- group: authorization.k8s.io - kind: LocalSubjectAccessReview - version: v1beta1 - -ª - io.k8s.api.batch.v1.JobCondition…".JobCondition describes current state of a job.štypešstatus² -objectʶ -Œ -lastTransitionTimev -7#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Time";Last time the condition transit from one status to another. -X -messageM"@Human readable message indicating details about last transition.² -string -J -reason@"3(brief) reason for the condition's last transition.² -string -L -statusB"5Status of the condition, one of True, False, Unknown.² -string -? -type7"*Type of job condition, Complete or Failed.² -string -p - lastProbeTime_ -7#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Time"$Last time the condition was checked. -ö6 -%io.k8s.api.core.v1.EphemeralContainerÌ6"ƒAn EphemeralContainer is a container that may be added temporarily to an existing pod for user-initiated activities such as debugging. Ephemeral containers have no resource or scheduling guarantees, and they will not be restarted when they exit or when a pod is removed or restarted. If an ephemeral container causes a pod to exceed its resource allocation, the pod may be evicted. Ephemeral containers may not be added by directly updating the pod spec. They must be added via the pod's ephemeralcontainers subresource, and they will appear in the pod spec once added. This is an alpha feature enabled by the EphemeralContainers feature flag.šname² -objectÊ°1 -ß -env×"IList of environment variables to set in the container. Cannot be updated.² -arrayº+ -) -'#/definitions/io.k8s.api.core.v1.EnvVarú' -x-kubernetes-patch-merge-keyname -ú' -x-kubernetes-patch-strategymerge - -© -name "’Name of the ephemeral container specified as a DNS_LABEL. This name must be unique among all containers, init containers and ephemeral containers.² -string -y -portsp"/Ports are not allowed for ephemeral containers.² -arrayº2 -0 -.#/definitions/io.k8s.api.core.v1.ContainerPort -l -readinessProbeZ -&#/definitions/io.k8s.api.core.v1.Probe"0Probes are not allowed for ephemeral containers. - -securityContextl -0#/definitions/io.k8s.api.core.v1.SecurityContext"8SecurityContext is not allowed for ephemeral containers. -× - stdinOnceÉ"ºWhether the container runtime should close the stdin channel after it has been opened by a single attach. When stdin is true the stdin stream will remain open across multiple attach sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the first client attaches to stdin, and then remains open and accepts data until the client disconnects, at which time stdin is closed and remains closed until the container is restarted. If this flag is false, a container processes that reads from stdin will never receive an EOF. Default is false² -boolean -í -targetContainerNameÕ"ÇIf set, the name of the container from PodSpec that this ephemeral container targets. The ephemeral container will be run in the namespaces (IPC, PID, etc) of this container. If not set then the ephemeral container is run in whatever namespaces are shared for the pod. Note that the container runtime must support this feature.² -string -ß -argsÖ"¹Arguments to the entrypoint. The docker image's CMD is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell² -arrayº - ² -string -û -commandï"ÒEntrypoint array. Not executed within a shell. The docker image's ENTRYPOINT is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell² -arrayº - ² -string -i -image`"SDocker image name. More info: https://kubernetes.io/docs/concepts/containers/images² -string -‡ -imagePullPolicyó"åImage pull policy. One of Always, Never, IfNotPresent. Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. Cannot be updated. More info: https://kubernetes.io/docs/concepts/containers/images#updating-images² -string -Å -terminationMessagePathª"œOptional: Path at which the file to which the container's termination message will be written is mounted into the container's filesystem. Message written is intended to be brief final status, such as an assertion failure message. Will be truncated by the node if greater than 4096 bytes. The total message length across all containers will be limited to 12kb. Defaults to /dev/termination-log. Cannot be updated.² -string -æ -terminationMessagePolicyÉ"»Indicate how the termination message should be populated. File will use the contents of terminationMessagePath to populate the container status message on both success and failure. FallbackToLogsOnError will use the last chunk of container log output if the termination message file is empty and the container exited with an error. The log output is limited to 2048 bytes or 80 lines, whichever is smaller. Defaults to File. Cannot be updated.² -string -ñ - volumeMountsà"HPod volumes to mount into the container's filesystem. Cannot be updated.² -arrayº0 -. -,#/definitions/io.k8s.api.core.v1.VolumeMountú, -x-kubernetes-patch-merge-key  -mountPath -ú' -x-kubernetes-patch-strategymerge - -Á - resources³ -5#/definitions/io.k8s.api.core.v1.ResourceRequirements"zResources are not allowed for ephemeral containers. Ephemeral containers use spare resources already allocated to the pod. -Ï -stdinÅ"¶Whether this container should allocate a buffer for stdin in the container runtime. If this is not set, reads from stdin in the container will always result in EOF. Default is false.² -boolean - -ttyz"lWhether this container should allocate a TTY for itself, also requires 'stdin' to be true. Default is false.² -boolean -À - -workingDir±"£Container's working directory. If not specified, the container runtime's default will be used, which might be configured in the container image. Cannot be updated.² -string -ó - volumeDevicesá"GvolumeDevices is the list of block devices to be used by the container.² -arrayº1 -/ --#/definitions/io.k8s.api.core.v1.VolumeDeviceú- -x-kubernetes-patch-merge-key  devicePath -ú' -x-kubernetes-patch-strategymerge - -Ö -envFromÊ"ˆList of sources to populate environment variables in the container. The keys defined within a source must be a C_IDENTIFIER. All invalid keys will be reported as an event when the container is starting. When a key exists in multiple sources, the value associated with the last source will take precedence. Values defined by an Env with a duplicate key will take precedence. Cannot be updated.² -arrayº2 -0 -.#/definitions/io.k8s.api.core.v1.EnvFromSource -m - lifecycle` -*#/definitions/io.k8s.api.core.v1.Lifecycle"2Lifecycle is not allowed for ephemeral containers. -k - livenessProbeZ -&#/definitions/io.k8s.api.core.v1.Probe"0Probes are not allowed for ephemeral containers. -j - startupProbeZ -&#/definitions/io.k8s.api.core.v1.Probe"0Probes are not allowed for ephemeral containers. -ž -3io.k8s.api.policy.v1beta1.RunAsGroupStrategyOptionsæ"`RunAsGroupStrategyOptions defines the strategy type and any options used to create the strategy.šrule² -objectÊî -ý -rangesò"¯ranges are the allowed ranges of gids that may be used. If you would like to force a single gid then supply a single range with the same start and end. Required for MustRunAs.² -arrayº3 -1 -/#/definitions/io.k8s.api.policy.v1beta1.IDRange -l -ruled"Wrule is the strategy that will dictate the allowable RunAsGroup values that may be set.² -string -— -Qio.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1beta1.WebhookClientConfigÁ "WWebhookClientConfig contains the information to make a TLS connection with the webhook.² -objectÊÙ -à -caBundle¶byte"¢caBundle is a PEM encoded CA bundle which will be used to validate the webhook's server certificate. If unspecified, system trust roots on the apiserver are used.² -string -› -service -\#/definitions/io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1beta1.ServiceReference"®service is a reference to the service for this webhook. Either service or url must be specified. - -If the webhook is running within the cluster, then you should use `service`. -ò -urlê"Üurl gives the location of the webhook, in standard URL form (`scheme://host:port/path`). Exactly one of `url` or `service` must be specified. - -The `host` should not refer to a service running in the cluster; use the `service` field instead. The host might be resolved via external DNS in some apiservers (e.g., `kube-apiserver` cannot resolve in-cluster DNS as that would be a layering violation). `host` may also be an IP address. - -Please note that using `localhost` or `127.0.0.1` as a `host` is risky unless you take great care to run this webhook on all hosts which run an apiserver which might need to make calls to this webhook. Such installs are likely to be non-portable, i.e., not easy to turn up in a new cluster. - -The scheme must be "https"; the URL must begin with "https://". - -A path is optional, and if present may be any string permissible in a URL. You may use the path to pass an arbitrary string to the webhook, for example, a cluster identifier. - -Attempting to use a user or basic auth e.g. "user:password@" is not allowed. Fragments ("#...") and query parameters ("?...") are not allowed, either.² -string -ú -.<.spec.group>.šspec² -objectÊ® -µ -statusª -e#/definitions/io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1.CustomResourceDefinitionStatus"Astatus indicates the actual state of the CustomResourceDefinition -¾ - -apiVersion¯"¡APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources² -string -¹ -kind°"¢Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds² -string -K -metadata? -=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta -© -spec  -c#/definitions/io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1.CustomResourceDefinitionSpec"9spec describes how the user wants the resources to appearúr -x-kubernetes-group-version-kindOM- group: apiextensions.k8s.io - kind: CustomResourceDefinition - version: v1 - -æ -*io.k8s.api.discovery.v1.EndpointConditions·"CEndpointConditions represents the current condition of an endpoint.² -objectÊã -Ó -readyÉ"ºready indicates that this endpoint is prepared to receive traffic, according to whatever system is managing the endpoint. A nil value indicates an unknown state. In most cases consumers should interpret this unknown state as ready. For compatibility reasons, ready should never be "true" for terminating endpoints.² -boolean -Ù -servingÍ"¾serving is identical to ready except that it is set regardless of the terminating state of endpoints. This condition should be set to true for a ready endpoint that is terminating. If nil, consumers should defer to the ready condition. This field can be enabled with the EndpointSliceTerminatingCondition feature gate.² -boolean -® - terminatingž"terminating indicates that this endpoint is terminating. A nil value indicates an unknown state. Consumers should interpret this unknown state to mean that the endpoint is not terminating. This field can be enabled with the EndpointSliceTerminatingCondition feature gate.² -boolean - -(io.k8s.api.extensions.v1beta1.IngressTLSâ"MIngressTLS describes the transport layer security associated with an Ingress.² -objectÊ„ -š -hosts"óHosts are a list of hosts included in the TLS certificate. The values in this list must match the name/s used in the tlsSecret. Defaults to the wildcard host setting for the loadbalancer controller fulfilling this Ingress, if left unspecified.² -arrayº - ² -string -ä - -secretNameÕ"ÇSecretName is the name of the secret used to terminate SSL traffic on 443. Field is left optional to allow SSL routing based on SNI hostname alone. If the SNI host in a listener conflicts with the "Host" header field used by an IngressRule, the SNI host is used for termination and value of the Host header is used for routing.² -string -© -*io.k8s.api.scheduling.v1.PriorityClassListú"6PriorityClassList is a collection of priority classes.šitems² -objectÊÀ -¾ - -apiVersion¯"¡APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources² -string -t -itemsk"$items is the list of PriorityClasses² -arrayº8 -6 -4#/definitions/io.k8s.api.scheduling.v1.PriorityClass -¹ -kind°"¢Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds² -string -Ê -metadata½ -;#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta"~Standard list metadata More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadataúh -x-kubernetes-group-version-kindEC- group: scheduling.k8s.io - kind: PriorityClassList - version: v1 - -† -io.k8s.api.core.v1.ServicePortã"3ServicePort contains information on service's port.šport² -objectʘ -j -protocol^"QThe IP protocol for this port. Supports "TCP", "UDP", and "SCTP". Default is TCP.² -string -â - -targetPortÓ -=#/definitions/io.k8s.apimachinery.pkg.util.intstr.IntOrString"‘Number or name of the port to access on the pods targeted by the service. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. If this is a string, it will be looked up as a named port in the target Pod's container ports. If this is not specified, the value of the 'port' field is used (an identity map). This field is ignored for services with clusterIP=None, and should be omitted or set equal to the 'port' field. More info: https://kubernetes.io/docs/concepts/services-networking/service/#defining-a-service -½ - appProtocol­"ŸThe application protocol for this port. This field follows standard Kubernetes label syntax. Un-prefixed names are reserved for IANA standard service names (as per RFC-6335 and http://www.iana.org/assignments/service-names). Non-standard protocols should use prefixed names such as mycompany.com/my-custom-protocol. This is a beta field that is guarded by the ServiceAppProtocol feature gate and enabled by default.² -string -² -name©"›The name of this port within the service. This must be a DNS_LABEL. All ports within a ServiceSpec must have unique names. When considering the endpoints for a Service, this must match the 'name' field in the EndpointPort. Optional if only one ServicePort is defined on this service.² -string -‚ -nodePortõint32"ßThe port on each node on which this service is exposed when type is NodePort or LoadBalancer. Usually assigned by the system. If a value is specified, in-range, and not in use it will be used, otherwise the operation will fail. If not specified, a port will be allocated if this Service requires one. If this field is specified when creating a Service which does not need it, creation will fail. This field will be wiped when updating a Service to no longer need it (e.g. changing type from NodePort to ClusterIP). More info: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport² -integer -K -portCint32".The port that will be exposed by this service.² -integer -É -.io.k8s.api.networking.v1beta1.IngressClassList–"3IngressClassList is a collection of IngressClasses.šitems² -objectÊÛ -¾ - -apiVersion¯"¡APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources² -string -x -itemso"$Items is the list of IngressClasses.² -arrayº< -: -8#/definitions/io.k8s.api.networking.v1beta1.IngressClass -¹ -kind°"¢Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds² -string -b -metadataV -;#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta"Standard list metadata.úl -x-kubernetes-group-version-kindIG- group: networking.k8s.io - kind: IngressClassList - version: v1beta1 - -ƒ -0io.k8s.api.policy.v1beta1.FSGroupStrategyOptionsÎ"YFSGroupStrategyOptions defines the strategy type and options used to create the strategy.² -objectÊä -÷ -rangesì"©ranges are the allowed ranges of fs groups. If you would like to force a single fs group then supply a single range with the same start and end. Required for MustRunAs.² -arrayº3 -1 -/#/definitions/io.k8s.api.policy.v1beta1.IDRange -h -rule`"Srule is the strategy that will dictate what FSGroup is used in the SecurityContext.² -string -± -'io.k8s.api.rbac.v1beta1.ClusterRoleList… "¢ClusterRoleList is a collection of ClusterRoles. Deprecated in v1.17 in favor of rbac.authorization.k8s.io/v1 ClusterRoles, and will no longer be served in v1.22.šitems² -objectÊÓ -¾ - -apiVersion¯"¡APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources² -string -l -itemsc"Items is a list of ClusterRoles² -arrayº5 -3 -1#/definitions/io.k8s.api.rbac.v1beta1.ClusterRole -¹ -kind°"¢Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds² -string -f -metadataZ -;#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta"Standard object's metadata.ús -x-kubernetes-group-version-kindPN- group: rbac.authorization.k8s.io - kind: ClusterRoleList - version: v1beta1 - -ì -&io.k8s.api.core.v1.FlockerVolumeSourceÁ"ÃRepresents a Flocker volume mounted by the Flocker agent. One and only one of datasetName and datasetUUID should be set. Flocker volumes do not support ownership management or SELinux relabeling.² -objectÊì -ˆ - datasetNamey"lName of the dataset stored as metadata -> name on the dataset for Flocker should be considered as deprecated² -string -_ - datasetUUIDP"CUUID of the dataset. This is unique identifier of a Flocker dataset² -string -» -#io.k8s.api.core.v1.PersistentVolume“ "±PersistentVolume (PV) is a storage resource provisioned by an administrator. It is analogous to a node. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes² -objectÊõ - -Ò -metadataÅ -=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta"ƒStandard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata -Š -spec -5#/definitions/io.k8s.api.core.v1.PersistentVolumeSpec"ÇSpec defines a specification of a persistent volume owned by the cluster. Provisioned by an administrator. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistent-volumes -“ -statusˆ -7#/definitions/io.k8s.api.core.v1.PersistentVolumeStatus"ÌStatus represents the current information/status for the persistent volume. Populated by the system. Read-only. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistent-volumes -¾ - -apiVersion¯"¡APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources² -string -¹ -kind°"¢Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds² -stringúX -x-kubernetes-group-version-kind53- group: "" - kind: PersistentVolume - version: v1 - -ì -!io.k8s.api.core.v1.SeccompProfileÆ"fSeccompProfile defines a pod/container's seccomp profile settings. Only one profile source may be set.štype² -objectÊÓ -ž -type•"‡type indicates which kind of seccomp profile will be applied. Valid options are: - -Localhost - a profile defined in a file on the node should be used. RuntimeDefault - the container runtime default profile should be used. Unconfined - no profile should be applied.² -string -¯ -localhostProfileš"ŒlocalhostProfile indicates a profile defined in a file on the node should be used. The profile must be preconfigured on the node to work. Must be a descending path, relative to the kubelet's configured seccomp profile location. Must only be set if type is "Localhost".² -stringúr -x-kubernetes-unions[Y- discriminator: type - fields-to-discriminateBy: - localhostProfile: LocalhostProfile - - -#io.k8s.api.node.v1.RuntimeClassListè"3RuntimeClassList is a list of RuntimeClass objects.šitems² -objectʸ -¾ - -apiVersion¯"¡APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources² -string -k -itemsb""Items is a list of schema objects.² -arrayº1 -/ --#/definitions/io.k8s.api.node.v1.RuntimeClass -¹ -kind°"¢Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds² -string -Ë -metadata¾ -;#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta"Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadataúa -x-kubernetes-group-version-kind><- kind: RuntimeClassList - version: v1 - group: node.k8s.io - -›, -#io.k8s.api.storage.v1.CSIDriverSpecó+"2CSIDriverSpec is the specification of a CSIDriver.² -objectÊ°+ -Û - fsGroupPolicyÉ"»Defines if the underlying volume supports changing ownership and permission of the volume before being mounted. Refer to the specific FSGroupPolicy values for additional details. This field is alpha-level, and is only honored by servers that enable the CSIVolumeFSGroupPolicy feature gate. - -This field is immutable.² -string -© -podInfoOnMount– "‡ If set to true, podInfoOnMount indicates this CSI volume driver requires additional pod information (like podName, podUID, etc.) during mount operations. If set to false, pod information will not be passed on mount. Default is false. The CSI driver specifies podInfoOnMount as part of driver deployment. If true, Kubelet will pass pod information as VolumeContext in the CSI NodePublishVolume() calls. The CSI driver is responsible for parsing and validating the information passed in as VolumeContext. The following VolumeConext will be passed if podInfoOnMount is set to true. This list might grow, but the prefix will be used. "csi.storage.k8s.io/pod.name": pod.Name "csi.storage.k8s.io/pod.namespace": pod.Namespace "csi.storage.k8s.io/pod.uid": string(pod.UID) "csi.storage.k8s.io/ephemeral": "true" if the volume is an ephemeral inline volume - defined by a CSIVolumeSource, otherwise "false" - -"csi.storage.k8s.io/ephemeral" is a new feature in Kubernetes 1.16. It is only required for drivers which support both the "Persistent" and "Ephemeral" VolumeLifecycleMode. Other drivers can leave pod info disabled and/or ignore this field. As Kubernetes 1.15 doesn't support this field, drivers can only support one mode when deployed on such a cluster and the deployment determines which mode that is, for example via a command line parameter of the driver. - -This field is immutable.² -boolean -ÿ -requiresRepublishé"ÚRequiresRepublish indicates the CSI driver wants `NodePublishVolume` being periodically called to reflect any possible change in the mounted volume. This field defaults to false. - -Note: After a successful initial NodePublishVolume call, subsequent calls to NodePublishVolume should only update the contents of the volume. New mount points will not be seen by a running container. - -This is a beta feature and only available when the CSIServiceAccountToken feature is enabled.² -boolean -€ -storageCapacityì"ÝIf set to true, storageCapacity indicates that the CSI volume driver wants pod scheduling to consider the storage capacity that the driver deployment will report by creating CSIStorageCapacity objects with capacity information. - -The check can be enabled immediately when deploying a driver. In that case, provisioning new volumes with late binding will pause until the driver deployment has published some suitable CSIStorageCapacity object. - -Alternatively, the driver can be deployed with the field unset or false and it can be flipped later when storage capacity information has been published. - -This field is immutable. - -This is a beta field and only available when the CSIStorageCapacity feature is enabled. The default is false.² -boolean -å - tokenRequestsÓ"éTokenRequests indicates the CSI driver needs pods' service account tokens it is mounting volume for to do necessary authentication. Kubelet will pass the tokens in VolumeContext in the CSI NodePublishVolume calls. The CSI driver should parse and validate the following VolumeContext: "csi.storage.k8s.io/serviceAccount.tokens": { - "": { - "token": , - "expirationTimestamp": , - }, - ... -} - -Note: Audience in each TokenRequest should be different and at most one token is empty string. To receive a new token after expiry, RequiresRepublish can be used to trigger NodePublishVolume periodically. - -This is a beta feature and only available when the CSIServiceAccountToken feature is enabled.² -arrayº4 -2 -0#/definitions/io.k8s.api.storage.v1.TokenRequestú# -x-kubernetes-list-type atomic - -ê -volumeLifecycleModesÑ"‘volumeLifecycleModes defines what kind of volumes this CSI volume driver supports. The default if the list is empty is "Persistent", which is the usage defined by the CSI specification and implemented in Kubernetes via the usual PV/PVC mechanism. The other mode is "Ephemeral". In this mode, volumes are defined inline inside the pod spec with CSIVolumeSource and their lifecycle is tied to the lifecycle of that pod. A driver has to be aware of this because it is only going to get a NodePublishVolume call for such a volume. For more information about implementing this mode, see https://kubernetes-csi.github.io/docs/ephemeral-local-volumes.html A driver can support one or more of these modes and more modes may be added in the future. This field is beta. - -This field is immutable.² -arrayº - ² -stringú -x-kubernetes-list-typeset - -É -attachRequired¶"§attachRequired indicates this CSI volume driver requires an attach operation (because it implements the CSI ControllerPublishVolume() method), and that the Kubernetes attach detach controller should call the attach volume interface which checks the volumeattachment status and waits until the volume is attached before proceeding to mounting. The CSI external-attacher coordinates with CSI volume driver and updates the volumeattachment status when the attach operation is complete. If the CSIDriverRegistry feature gate is enabled and the value is specified to false, the attach operation will be skipped. Otherwise the attach operation will be called. - -This field is immutable.² -boolean -Î -"io.k8s.api.storage.v1beta1.CSINode§ "ÊDEPRECATED - This group version of CSINode is deprecated by storage/v1/CSINode. See the release notes for more information. CSINode holds information about all CSI drivers installed on a node. CSI drivers do not need to create the CSINode object directly. As long as they use the node-driver-registrar sidecar container, the kubelet will automatically populate the CSINode object for the CSI driver as part of kubelet plugin registration. CSINode has the same name as a node. If the object is missing, it means either there are no CSI Drivers available on the node, or the Kubelet version is low enough that it doesn't create this object. CSINode has an OwnerReference that points to the corresponding node object.šspec² -objectÊá -¾ - -apiVersion¯"¡APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources² -string -¹ -kind°"¢Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds² -string -| -metadatap -=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta"/metadata.name must be the Kubernetes node name. -d -spec\ -4#/definitions/io.k8s.api.storage.v1beta1.CSINodeSpec"$spec is the specification of CSINodeú` -x-kubernetes-group-version-kind=;- group: storage.k8s.io - kind: CSINode - version: v1beta1 - -× -3io.k8s.api.autoscaling.v2beta2.ExternalMetricStatusŸ"nExternalMetricStatus indicates the current value of a global metric not associated with any Kubernetes object.šmetricšcurrent² -objectÊ -ƒ -metricy -=#/definitions/io.k8s.api.autoscaling.v2beta2.MetricIdentifier"8metric identifies the target metric by name and selector -„ -currenty ->#/definitions/io.k8s.api.autoscaling.v2beta2.MetricValueStatus"7current contains the current value for the given metric - -io.k8s.api.core.v1.EventSeriesÞ"qEventSeries contain information on series of events, i.e. thing that was/is happening continuously for some time.² -objectÊÜ -` -countWint32"BNumber of occurrences in this series up to the last heartbeat time² -integer -x -lastObservedTimed -<#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.MicroTime"$Time of the last occurrence observed -à -"io.k8s.api.core.v1.SecretReference¹"lSecretReference represents a Secret Reference. It has enough information to retrieve secret in any namespace² -objectʼ -V -nameN"AName is unique within a namespace to reference a secret resource.² -string -b - namespaceU"HNamespace defines the space within which the secret name must be unique.² -string -‚ -+io.k8s.api.networking.v1.ServiceBackendPortÒ"8ServiceBackendPort is the service port being referenced.² -objectʉ -u -namem"`Name is the name of the port on the Service. This is a mutually exclusive setting with "Number".² -string - -number„int32"oNumber is the numerical port number (e.g. 80) on the Service. This is a mutually exclusive setting with "Name".² -integer - -!io.k8s.api.storage.v1.VolumeErrorê"DVolumeError captures an error encountered during a volume operation.² -objectÊ• -® -message¢"”String detailing the error encountered during Attach or Detach operation. This string may be logged, so it should not contain sensitive information.² -string -b -timeZ -7#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Time"Time the error was encountered. -Á -Vio.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1beta1.CustomResourceConversionæ -"MCustomResourceConversion describes how to convert different versions of a CR.šstrategy² -objectÊý -Ñ -strategyÄ"¶strategy specifies how custom resources are converted between versions. Allowed values are: - `None`: The converter only change the apiVersion and would not touch any other field in the custom resource. - `Webhook`: API Server will call to an external webhook to do the conversion. Additional information - is needed for this option. This requires spec.preserveUnknownFields to be false, and spec.conversion.webhookClientConfig to be set.² -string -‡ -webhookClientConfigï -_#/definitions/io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1beta1.WebhookClientConfig"‹webhookClientConfig is the instructions for how to call the webhook if strategy is `Webhook`. Required when `strategy` is set to `Webhook`. -œ -conversionReviewVersionsÿ"âconversionReviewVersions is an ordered list of preferred `ConversionReview` versions the Webhook expects. The API server will use the first version in the list which it supports. If none of the versions specified in this list are supported by API server, conversion will fail for the custom resource. If a persisted Webhook configuration specifies allowed versions and does not include any versions known to the API Server, calls to the webhook will fail. Defaults to `["v1beta1"]`.² -arrayº - ² -string -î -4io.k8s.apimachinery.pkg.apis.meta.v1.APIResourceListµ "¦APIResourceList is a list of APIResource, it is used to expose the name of the resources supported in a specific group and version, and if the resource is namespaced.š groupVersionš resources² -objectʈ -¾ - -apiVersion¯"¡APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources² -string -_ - groupVersionO"BgroupVersion is the group and version this APIResourceList is for.² -string -¹ -kind°"¢Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds² -string -§ - resources™"Hresources contains the name of the resources and if they are namespaced.² -arrayºB -@ ->#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.APIResourceúW -x-kubernetes-group-version-kind42- group: "" - kind: APIResourceList - version: v1 - -â -Hio.k8s.kube-aggregator.pkg.apis.apiregistration.v1beta1.APIServiceStatus•"AAPIServiceStatus contains derived information about an API server² -objectÊà -À - -conditions±"$Current service state of apiService.² -arrayº] -[ -Y#/definitions/io.k8s.kube-aggregator.pkg.apis.apiregistration.v1beta1.APIServiceConditionú -x-kubernetes-list-typemap -ú' -x-kubernetes-patch-merge-keytype -ú' -x-kubernetes-patch-strategymerge -ú' -x-kubernetes-list-map-keys - type - -Æ -%io.k8s.api.coordination.v1beta1.Leaseœ "Lease defines a lease concept.² -objectʇ -¾ - -apiVersion¯"¡APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources² -string -¹ -kind°"¢Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds² -string -µ -metadata¨ -=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta"gMore info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata -Ï -specÆ -7#/definitions/io.k8s.api.coordination.v1beta1.LeaseSpec"ŠSpecification of the Lease. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-statusúc -x-kubernetes-group-version-kind@>- version: v1beta1 - group: coordination.k8s.io - kind: Lease - -· -!io.k8s.api.core.v1.NodeSystemInfo‘ "CNodeSystemInfo is a set of ids/uuids to uniquely identify the node.š machineIDš -systemUUIDšbootIDš kernelVersionšosImagešcontainerRuntimeVersionškubeletVersionškubeProxyVersionšoperatingSystemš architecture² -objectÊ¢ -ß - machineIDÑ"ÃMachineID reported by the node. For unique machine identification in the cluster this field is preferred. Learn more from man(5) machine-id: http://man7.org/linux/man-pages/man5/machine-id.5.html² -string -þ - -systemUUIDï"áSystemUUID reported by the node. For unique machine identification MachineID is preferred. This field is specific to Red Hat hosts https://access.redhat.com/documentation/en-us/red_hat_subscription_management/1/html/rhsm/uuid² -string -B - architecture2"%The Architecture reported by the node² -string -4 -bootID*"Boot ID reported by the node.² -string -‡ -containerRuntimeVersionl"_ContainerRuntime Version reported by the node through runtime remote API (e.g. docker://1.5.0).² -string -H -kubeProxyVersion4"'KubeProxy Version reported by the node.² -string -n - kernelVersion]"PKernel Version reported by the node from 'uname -r' (e.g. 3.16.0-0.bpo.4-amd64).² -string -D -kubeletVersion2"%Kubelet Version reported by the node.² -string -I -operatingSystem6")The Operating System reported by the node² -string -n -osImagec"VOS Image reported by the node from /etc/os-release (e.g. Debian GNU/Linux 7 (wheezy)).² -string -‚ -*io.k8s.api.core.v1.PreferredSchedulingTermÓ"®An empty preferred scheduling term matches all objects with implicit weight 0 (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op).šweightš -preference² -objectÊý -‚ - -preferencet -1#/definitions/io.k8s.api.core.v1.NodeSelectorTerm"?A node selector term, associated with the corresponding weight. -v -weightlint32"WWeight associated with matching the corresponding nodeSelectorTerm, in the range 1-100.² -integer -à -Zio.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1beta1.CustomResourceDefinitionListä"KCustomResourceDefinitionList is a list of CustomResourceDefinition objects.šitems² -objectÊ‚ -¹ -kind°"¢Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds² -string -I -metadata= -;#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta -¾ - -apiVersion¯"¡APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources² -string -· -items­"6items list individual CustomResourceDefinition objects² -arrayºh -f -d#/definitions/io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1beta1.CustomResourceDefinitionú{ -x-kubernetes-group-version-kindXV- group: apiextensions.k8s.io - kind: CustomResourceDefinitionList - version: v1beta1 - -ì -*io.k8s.api.authentication.v1beta1.UserInfo½"ZUserInfo holds the information about the user needed to implement the user.Info interface.² -objectÊÒ -Q -groupsG"+The names of groups this user is a part of.² -arrayº - ² -string -® -uid¦"˜A unique value that identifies this user across time. If this user is deleted and another user by the same name is added, they will have different UIDs.² -string -\ -usernameP"CThe name that uniquely identifies this user among all active users.² -string -n -extrae"9Any additional information provided by the authenticator.ª -² -arrayº - ² -string² -object -à -4io.k8s.api.flowcontrol.v1beta1.NonResourcePolicyRuleŠ"·NonResourcePolicyRule is a predicate that matches non-resource requests according to their verb and the target non-resource URL. A NonResourcePolicyRule matches a request if and only if both (a) at least one member of verbs matches the request and (b) at least one member of nonResourceURLs matches the request.šverbsšnonResourceURLs² -objectʧ -Ð -nonResourceURLs¼"ü`nonResourceURLs` is a set of url prefixes that a user should have access to and may not be empty. For example: - - "/healthz" is legal - - "/hea*" is illegal - - "/hea" is legal but matches nothing - - "/hea/*" also matches nothing - - "/healthz/*" matches all per-component health checks. -"*" matches all non-resource urls. if it is present, it must be the only entry. Required.² -arrayº - ² -stringú -x-kubernetes-list-typeset - -Ñ -verbsÇ"‡`verbs` is a list of matching verbs and may not be empty. "*" matches all verbs. If it is present, it must be the only entry. Required.² -arrayº - ² -stringú -x-kubernetes-list-typeset - -› -.io.k8s.api.storage.v1beta1.VolumeNodeResourcesè"JVolumeNodeResources is a set of resource limits for scheduling of volumes.² -objectÊ -Š -count€int32"êMaximum number of unique volumes managed by the CSI driver that can be used on a node. A volume that is both attached and mounted on a node is considered to be used once, not twice. The same rule applies for a unique volume that is shared among multiple pods on the same node. If this field is nil, then the supported number of volumes on this node is unbounded.² -integer -Ú -*io.k8s.api.core.v1.WeightedPodAffinityTerm«"vThe weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s)šweightšpodAffinityTerm² -objectʉ - -podAffinityTerm| -0#/definitions/io.k8s.api.core.v1.PodAffinityTerm"HRequired. A pod affinity term, associated with the corresponding weight. -u -weightkint32"Vweight associated with matching the corresponding podAffinityTerm, in the range 1-100.² -integer - -*io.k8s.api.discovery.v1beta1.EndpointHintsà"KEndpointHints provides hints describing how an endpoint should be consumed.² -objectÊ„ - -forZonesô"ˆforZones indicates the zone(s) this endpoint should be consumed by to enable topology aware routing. May contain a maximum of 8 entries.² -arrayº6 -4 -2#/definitions/io.k8s.api.discovery.v1beta1.ForZoneú# -x-kubernetes-list-type atomic - -• -io.k8s.api.apps.v1.Deploymentó"@Deployment enables declarative updates for Pods and ReplicaSets.² -objectÊË -¾ - -apiVersion¯"¡APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources² -string -¹ -kind°"¢Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds² -string -f -metadataZ -=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta"Standard object metadata. -s -speck -/#/definitions/io.k8s.api.apps.v1.DeploymentSpec"8Specification of the desired behavior of the Deployment. -o -statuse -1#/definitions/io.k8s.api.apps.v1.DeploymentStatus"0Most recently observed status of the Deployment.úT -x-kubernetes-group-version-kind1/- group: apps - kind: Deployment - version: v1 - -„ - -/io.k8s.api.core.v1.CephFSPersistentVolumeSourceÐ "Represents a Ceph Filesystem mount that lasts the lifetime of a pod Cephfs volumes do not support ownership management or SELinux relabeling.šmonitors² -objectʦ -¦ -monitors™"}Required: Monitors is a collection of Ceph monitors More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it² -arrayº - ² -string -e -path]"POptional: Used as the mounted root, rather than the full Ceph tree, default is /² -string -Î -readOnlyÁ"²Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it² -boolean -¾ - -secretFile¯"¡Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it² -string -æ - secretRefØ -0#/definitions/io.k8s.api.core.v1.SecretReference"£Optional: SecretRef is reference to the authentication secret for User, default is empty. More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it -˜ -user"Optional: User is the rados user name, default is admin More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it² -string - --io.k8s.api.core.v1.FlexPersistentVolumeSourceÝ"ƒFlexPersistentVolumeSource represents a generic persistent volume resource that is provisioned/attached using an exec based plugin.šdriver² -objectÊ¿ -Ä -fsType¹"«Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. "ext4", "xfs", "ntfs". The default filesystem depends on FlexVolume script.² -string -O -optionsD"'Optional: Extra command options if any.ª - ² -string² -object -‚ -readOnlyv"hOptional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.² -boolean -Î - secretRefÀ -0#/definitions/io.k8s.api.core.v1.SecretReference"‹Optional: SecretRef is reference to the secret object containing sensitive information to pass to the plugin scripts. This may be empty if no secret object is specified. If the secret object contains more than one secret, all secrets are passed to the plugin scripts. -O -driverE"8Driver is the name of the driver to use for this volume.² -string -¢ -io.k8s.api.node.v1.Scheduling€"TScheduling specifies the scheduling constraints for nodes supporting a RuntimeClass.² -objectÊ› -ç - nodeSelectorÖ"¸nodeSelector lists labels that must be present on nodes that support this RuntimeClass. Pods using this RuntimeClass can only be scheduled to a node matched by this selector. The RuntimeClass nodeSelector is merged with a pod's existing nodeSelector. Any conflicts will cause the pod to be rejected in admission.ª - ² -string² -object -® - tolerationsž"¹tolerations are appended (excluding duplicates) to pods running with this RuntimeClass during admission, effectively unioning the set of nodes tolerated by the pod and the RuntimeClass.² -arrayº/ -- -+#/definitions/io.k8s.api.core.v1.Tolerationú# -x-kubernetes-list-type atomic - -ˆ --io.k8s.apimachinery.pkg.apis.meta.v1.APIGroupÖ"YAPIGroup contains the name, the supported versions, and the preferred version of a group.šnamešversions² -objectʇ -¾ - -apiVersion¯"¡APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources² -string -¹ -kind°"¢Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds² -string -3 -name+"name is the name of the group.² -string -Ç -preferredVersion² -K#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.GroupVersionForDiscovery"cpreferredVersion is the version preferred by the API server, which probably is the storage version. -è -serverAddressByClientCIDRsÉ"éa map of client CIDR to server address that is serving this group. This is to help clients reach servers in the most network-efficient way possible. Clients can use the appropriate server address as per the CIDR that they match. In case of multiple matches, clients should use the longest matching CIDR. The server returns only those CIDRs that it thinks that the client can match. For example: the master will return an internal IP CIDR only, if the client reaches the server using an internal IP. Server looks at X-Forwarded-For header or X-Real-Ip header or request.RemoteAddr (in that order) to get the client IP.² -arrayºP -N -L#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ServerAddressByClientCIDR - -versions"2versions are the versions supported in this group.² -arrayºO -M -K#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.GroupVersionForDiscoveryúP -x-kubernetes-group-version-kind-+- kind: APIGroup - version: v1 - group: "" - -ÿ -%io.k8s.api.core.v1.CephFSVolumeSourceÕ "Represents a Ceph Filesystem mount that lasts the lifetime of a pod Cephfs volumes do not support ownership management or SELinux relabeling.šmonitors² -objectÊ« -¾ - -secretFile¯"¡Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it² -string -ë - secretRefÝ -5#/definitions/io.k8s.api.core.v1.LocalObjectReference"£Optional: SecretRef is reference to the authentication secret for User, default is empty. More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it -˜ -user"Optional: User is the rados user name, default is admin More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it² -string -¦ -monitors™"}Required: Monitors is a collection of Ceph monitors More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it² -arrayº - ² -string -e -path]"POptional: Used as the mounted root, rather than the full Ceph tree, default is /² -string -Î -readOnlyÁ"²Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it² -boolean -Ÿ -3io.k8s.api.flowcontrol.v1beta1.QueuingConfigurationç -"CQueuingConfiguration holds the configuration parameters for queuing² -objectÊ“ - -¢ -handSize•int32"ÿ`handSize` is a small positive number that configures the shuffle sharding of requests into queues. When enqueuing a request at this priority level the request's flow identifier (a string pair) is hashed and the hash value is used to shuffle the list of queues and deal a hand of the size specified here. The request is put into one of the shortest queues in that hand. `handSize` must be no larger than `queues`, and should be significantly smaller (so that a few heavy flows do not saturate most of the queues). See the user-facing documentation for more extensive guidance on setting this field. This field has a default value of 8.² -integer -“ -queueLengthLimitþint32"è`queueLengthLimit` is the maximum number of requests allowed to be waiting in a given queue of this priority level at a time; excess requests are rejected. This value must be positive. If not specified, it will be defaulted to 50.² -integer -Õ -queuesÊint32"´`queues` is the number of queues for this priority level. The queues exist independently at each apiserver. The value must be positive. Setting it to 1 effectively precludes shufflesharding and thus makes the distinguisher method of associated flow schemas irrelevant. This field has a default value of 64.² -integer -´ - -Bio.k8s.kube-aggregator.pkg.apis.apiregistration.v1beta1.APIServiceí "[APIService represents a server for a particular GroupVersion. Name must be "version.group".² -objectÊ“ -§ -specž -T#/definitions/io.k8s.kube-aggregator.pkg.apis.apiregistration.v1beta1.APIServiceSpec"FSpec contains information for locating and communicating with a server -œ -status‘ -V#/definitions/io.k8s.kube-aggregator.pkg.apis.apiregistration.v1beta1.APIServiceStatus"7Status contains derived information about an API server -¾ - -apiVersion¯"¡APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources² -string -¹ -kind°"¢Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds² -string -K -metadata? -=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMetaúk -x-kubernetes-group-version-kindHF- group: apiregistration.k8s.io - kind: APIService - version: v1beta1 - - -(io.k8s.api.core.v1.ContainerStateWaitingã"8ContainerStateWaiting is a waiting state of a container.² -objectÊš -O -messageD"7Message regarding why the container is not yet running.² -string -G -reason="0(brief) reason the container is not yet running.² -string -Á -!io.k8s.api.core.v1.DaemonEndpoint›"CDaemonEndpoint contains information about a single Daemon endpoint.šPort² -objectÊA -? -Port7int32""Port number of the given endpoint.² -integer -à -io.k8s.api.discovery.v1.ForZoneŸ"LForZone provides information about which zones should consume this endpoint.šname² -objectÊ< -: -name2"%name represents the name of the zone.² -string -‚ -1io.k8s.api.core.v1.ReplicationControllerConditionÌ"bReplicationControllerCondition describes the state of a replication controller at a certain point.štypešstatus² -objectÊÉ -F -reason<"/The reason for the condition's last transition.² -string -L -statusB"5Status of the condition, one of True, False, Unknown.² -string -> -type6")Type of replication controller condition.² -string -• -lastTransitionTime -7#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Time"DThe last time the condition transitioned from one status to another. -Y -messageN"AA human readable message indicating details about the transition.² -string -´ -)io.k8s.api.discovery.v1beta1.EndpointPort† "7EndpointPort represents a Port used by an EndpointSlice² -objectʾ -Å -name¼"®The name of this port. All ports in an EndpointSlice must have a unique name. If the EndpointSlice is dervied from a Kubernetes service, this corresponds to the Service.ports[].name. Name must either be an empty string or pass DNS_LABEL validation: * must be no more than 63 characters long. * must consist of lower case alphanumeric characters or '-'. * must start and end with an alphanumeric character. Default is empty string.² -string -³ -portªint32"”The port number of the endpoint. If this is not specified, ports are not restricted and must be interpreted in the context of the specific consumer.² -integer -b -protocolV"IThe IP protocol for this port. Must be UDP, TCP, or SCTP. Default is TCP.² -string -Ù - appProtocolÉ"»The application protocol for this port. This field follows standard Kubernetes label syntax. Un-prefixed names are reserved for IANA standard service names (as per RFC-6335 and http://www.iana.org/assignments/service-names). Non-standard protocols should use prefixed names such as mycompany.com/my-custom-protocol.² -string -¬ -Zio.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1beta1.JSONSchemaPropsOrStringArrayN"LJSONSchemaPropsOrStringArray represents a JSONSchemaProps or a string array. -ð -1io.k8s.api.authentication.v1.BoundObjectReferenceº"JBoundObjectReference is a reference to an object that a token is bound to.² -objectÊß -7 - -apiVersion)"API version of the referent.² -string -N -kindF"9Kind of the referent. Valid kinds are 'Pod' and 'Secret'.² -string -* -name""Name of the referent.² -string -( -uid!"UID of the referent.² -string -Ò -3io.k8s.api.autoscaling.v2beta1.ExternalMetricSourceš"ùExternalMetricSource indicates how to scale on a metric not associated with any Kubernetes object (for example length of queue in cloud messaging service, or QPS from loadbalancer running outside of cluster). Exactly one "target" type should be set.š -metricName² -objectÊ‚ -Ë -targetAverageValue´ -;#/definitions/io.k8s.apimachinery.pkg.api.resource.Quantity"utargetAverageValue is the target per-pod value of global metric (as a quantity). Mutually exclusive with TargetValue. -¹ - targetValue© -;#/definitions/io.k8s.apimachinery.pkg.api.resource.Quantity"jtargetValue is the target value of the metric (as a quantity). Mutually exclusive with TargetAverageValue. -L - -metricName>"1metricName is the name of the metric in question.² -string -§ -metricSelector” -@#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector"PmetricSelector is used to identify a specific time series within a given metric. -à -?io.k8s.api.autoscaling.v2beta2.HorizontalPodAutoscalerConditionÿ"eHorizontalPodAutoscalerCondition describes the state of a HorizontalPodAutoscaler at a certain point.štypešstatus² -objectÊù -« -lastTransitionTime” -7#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Time"YlastTransitionTime is the last time the condition transitioned from one status to another -g -message\"Omessage is a human-readable explanation containing details about the transition² -string -P -reasonF"9reason is the reason for the condition's last transition.² -string -S -statusI" ::= - (Note that may be empty, from the "" case in .) - ::= 0 | 1 | ... | 9 ::= | ::= | . | . | . ::= "+" | "-" ::= | ::= | | ::= Ki | Mi | Gi | Ti | Pi | Ei - (International System of units; See: http://physics.nist.gov/cuu/Units/binary.html) - ::= m | "" | k | M | G | T | P | E - (Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.) - ::= "e" | "E" - -No matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities. - -When a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized. - -Before serializing, Quantity will be put in "canonical form". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that: - a. No precision is lost - b. No fractional digits will be emitted - c. The exponent (or suffix) is as large as possible. -The sign will be omitted unless the number is negative. - -Examples: - 1.5 will be serialized as "1500m" - 1.5Gi will be serialized as "1536Mi" - -Note that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise. - -Non-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.) - -This format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.² -string -­ -:io.k8s.api.autoscaling.v2beta2.HorizontalPodAutoscalerSpecî"_HorizontalPodAutoscalerSpec describes the desired functionality of the HorizontalPodAutoscaler.šscaleTargetRefš maxReplicas² -objectÊß -ò - minReplicasâint32"ÌminReplicas is the lower limit for the number of replicas to which the autoscaler can scale down. It defaults to 1 pod. minReplicas is allowed to be 0 if the alpha feature gate HPAScaleToZero is enabled and at least one Object or External metric is configured. Scaling is active as long as at least one metric value is available.² -integer -† -scaleTargetRefó -H#/definitions/io.k8s.api.autoscaling.v2beta2.CrossVersionObjectReference"¦scaleTargetRef points to the target resource to scale, and is used to the pods for which metrics should be collected, as well as to actually change the replica count. -® -behavior¡ -L#/definitions/io.k8s.api.autoscaling.v2beta2.HorizontalPodAutoscalerBehavior"Ðbehavior configures the scaling behavior of the target in both Up and Down directions (scaleUp and scaleDown fields respectively). If not set, the default HPAScalingRules for scale up and scale down are used. -© - maxReplicas™int32"ƒmaxReplicas is the upper limit for the number of replicas to which the autoscaler can scale up. It cannot be less that minReplicas.² -integer - -metricsõ"ªmetrics contains the specifications for which to use to calculate the desired replica count (the maximum replica count across all metrics will be used). The desired replica count is calculated multiplying the ratio between the target value and the current value by the current number of pods. Ergo, metrics used must decrease as the pod count is increased, and vice-versa. See the individual metric source types for more information about how each type of metric must respond. If not set, the default metric will be set to 80% average CPU utilization.² -arrayº; -9 -7#/definitions/io.k8s.api.autoscaling.v2beta2.MetricSpec -  -)io.k8s.api.storage.v1.VolumeNodeResourcesò"JVolumeNodeResources is a set of resource limits for scheduling of volumes.² -objectÊ— -” -countŠint32"ôMaximum number of unique volumes managed by the CSI driver that can be used on a node. A volume that is both attached and mounted on a node is considered to be used once, not twice. The same rule applies for a unique volume that is shared among multiple pods on the same node. If this field is not specified, then the supported number of volumes on this node is unbounded.² -integer -µ -]io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1beta1.CustomResourceSubresourceStatusÓ"ÅCustomResourceSubresourceStatus defines how to serve the status subresource for CustomResources. Status is represented by the `.status` JSON path inside of a CustomResource. When set, * exposes a /status subresource for the custom resource * PUT requests to the /status subresource take a custom resource object, and ignore changes to anything except the status stanza * PUT/POST/PATCH requests to the custom resource ignore changes to the status stanza² -object -Ö -io.k8s.api.core.v1.VolumeDevice²"JvolumeDevice describes a mapping of a raw block device within a container.šnameš -devicePath² -objectÊà -l - -devicePath^"QdevicePath is the path inside of the container that the device will be mapped to.² -string -S -nameK">name must match the name of a persistentVolumeClaim in the pod² -string -¯ -io.k8s.api.rbac.v1.Subject"ÆSubject contains a reference to the object or user identities a role binding applies to. This can either hold a direct API object reference, or a value for non-objects such as user and group names.škindšname² -objectʪ -à -kind×"ÉKind of object being referenced. Values defined by this API group are "User", "Group", and "ServiceAccount". If the Authorizer does not recognized the kind value, the Authorizer should report an error.² -string -9 -name1"$Name of the object being referenced.² -string -à - namespaceµ"§Namespace of the referenced object. If the object kind is non-namespace, such as "User" or "Group", and this value is not empty the Authorizer should report an error.² -string -à -apiGroup¶"¨APIGroup holds the API group of the referenced subject. Defaults to "" for ServiceAccount subjects. Defaults to "rbac.authorization.k8s.io" for User and Group subjects.² -string -ƒ -#io.k8s.api.apps.v1.DeploymentStatusÛ "HDeploymentStatus is the most recently observed status of the Deployment.² -objectÊ‚ -` -observedGenerationJint64"5The generation observed by the deployment controller.² -integer -] - readyReplicasLint32"7Total number of ready pods targeted by this deployment.² -integer -ƒ -replicaswint32"bTotal number of non-terminated pods targeted by this deployment (their labels match the selector).² -integer -Á -unavailableReplicas©int32"“Total number of unavailable pods targeted by this deployment. This is the total number of pods that are still required for the deployment to have 100% available capacity. They may either be pods that are running but not yet available or pods that still have not been created.² -integer -Œ -updatedReplicasyint32"dTotal number of non-terminated pods targeted by this deployment that have the desired template spec.² -integer -Š -availableReplicasuint32"`Total number of available pods (ready for at least minReadySeconds) targeted by this deployment.² -integer -Þ -collisionCountËint32"µCount of hash collisions for the Deployment. The Deployment controller uses this field as a collision avoidance mechanism when it needs to create the name for the newest ReplicaSet.² -integer -÷ - -conditionsè"MRepresents the latest available observations of a deployment's current state.² -arrayº8 -6 -4#/definitions/io.k8s.api.apps.v1.DeploymentConditionú' -x-kubernetes-patch-merge-keytype -ú' -x-kubernetes-patch-strategymerge - -ì -+io.k8s.api.autoscaling.v2beta1.MetricStatus¼">MetricStatus describes the last-read state of a single metric.štype² -objectÊæ -Ë -containerResourceµ -J#/definitions/io.k8s.api.autoscaling.v2beta1.ContainerResourceMetricStatus"æcontainer resource refers to a resource metric (such as those specified in requests and limits) known to Kubernetes describing a single container in each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the "pods" source. -ô -externalç -A#/definitions/io.k8s.api.autoscaling.v2beta1.ExternalMetricStatus"¡external refers to a global metric that is not associated with any Kubernetes object. It allows autoscaling based on information coming from components running outside of cluster (for example length of queue in cloud messaging service, or QPS from loadbalancer running outside of cluster). - -object· -?#/definitions/io.k8s.api.autoscaling.v2beta1.ObjectMetricStatus"tobject refers to a metric describing a single kubernetes object (for example, hits-per-second on an Ingress object). -• -podsŒ -=#/definitions/io.k8s.api.autoscaling.v2beta1.PodsMetricStatus"Êpods refers to a metric describing each pod in the current scale target (for example, transactions-processed-per-second). The values will be averaged together before being compared to the target value. -™ -resourceŒ -A#/definitions/io.k8s.api.autoscaling.v2beta1.ResourceMetricStatus"Æresource refers to a resource metric (such as those specified in requests and limits) known to Kubernetes describing each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the "pods" source. -¥ -typeœ"Žtype is the type of metric source. It will be one of "ContainerResource", "External", "Object", "Pods" or "Resource", each corresponds to a matching field in the object. Note: "ContainerResource" type is available on when the feature-gate HPAContainerMetrics is enabled² -string - -/io.k8s.api.autoscaling.v2beta2.PodsMetricSourceŽ"åPodsMetricSource indicates how to scale on a metric describing each pod in the current scale target (for example, transactions-processed-per-second). The values will be averaged together before being compared to the target value.šmetricštarget² -objectÊ… -ƒ -metricy -=#/definitions/io.k8s.api.autoscaling.v2beta2.MetricIdentifier"8metric identifies the target metric by name and selector -} -targets -9#/definitions/io.k8s.api.autoscaling.v2beta2.MetricTarget"6target specifies the target value for the given metric -ô -io.k8s.api.core.v1.NodeØ "iNode is a worker node in Kubernetes. Each node will have a unique identifier in the cache (i.e. in etcd).² -objectÊ - -¿ -spec¶ -)#/definitions/io.k8s.api.core.v1.NodeSpec"ˆSpec defines the behavior of a node. https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status -ø -statusí -+#/definitions/io.k8s.api.core.v1.NodeStatus"½Most recently observed status of the node. Populated by the system. Read-only. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status -¾ - -apiVersion¯"¡APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources² -string -¹ -kind°"¢Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds² -string -Ò -metadataÅ -=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta"ƒStandard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadataúL -x-kubernetes-group-version-kind)'- group: "" - kind: Node - version: v1 - -… -io.k8s.api.core.v1.Serviceæ "çService is a named abstraction of software service (for example, mysql) consisting of local port (for example 3306) that the proxy listens on, and the selector that determines which pods will answer requests sent through the proxy.² -objectÊ› - -¾ - -apiVersion¯"¡APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources² -string -¹ -kind°"¢Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds² -string -Ò -metadataÅ -=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta"ƒStandard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata -Å -spec¼ -,#/definitions/io.k8s.api.core.v1.ServiceSpec"‹Spec defines the behavior of a service. https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status -þ -statusó -.#/definitions/io.k8s.api.core.v1.ServiceStatus"ÀMost recently observed status of the service. Populated by the system. Read-only. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-statusúO -x-kubernetes-group-version-kind,*- group: "" - kind: Service - version: v1 - - -(io.k8s.api.networking.v1.HTTPIngressPathâ "oHTTPIngressPath associates a path with a backend. Incoming urls matching the path are forwarded to the backend.šbackend² -objectÊØ - -’ -pathType…"÷PathType determines the interpretation of the Path matching. PathType can be one of the following values: * Exact: Matches the URL path exactly. * Prefix: Matches based on a URL path prefix split by '/'. Matching is - done on a path element by element basis. A path element refers is the - list of labels in the path split by the '/' separator. A request is a - match for path p if every p is an element-wise prefix of p of the - request path. Note that if the last element of the path is a substring - of the last element in request path, it is not a match (e.g. /foo/bar - matches /foo/bar/baz, but does not match /foo/barbaz). -* ImplementationSpecific: Interpretation of the Path matching is up to - the IngressClass. Implementations can treat this as a separate PathType - or treat it identically to Prefix or Exact path types. -Implementations are required to support all path types.² -string -Ÿ -backend“ -5#/definitions/io.k8s.api.networking.v1.IngressBackend"ZBackend defines the referenced service endpoint to which the traffic will be forwarded to. -ž -path•"‡Path is matched against the path of an incoming request. Currently it can contain characters disallowed from the conventional "path" part of a URL as defined by RFC 3986. Paths must begin with a '/'. When unspecified, all paths from incoming requests are matched.² -string -´ -$io.k8s.api.networking.v1.IngressSpec‹";IngressSpec describes the Ingress the user wishes to exist.² -objectÊ¿ -§ -tlsŸ"´TLS configuration. Currently the Ingress only supports a single TLS port, 443. If multiple members of this list specify different hosts, they will be multiplexed on the same port according to the hostname specified through the SNI TLS extension, if the ingress controller fulfilling the ingress supports SNI.² -arrayº5 -3 -1#/definitions/io.k8s.api.networking.v1.IngressTLSú# -x-kubernetes-list-type atomic - -ß -defaultBackendÌ -5#/definitions/io.k8s.api.networking.v1.IngressBackend"’DefaultBackend is the backend that should handle requests that don't match any rule. If Rules are not specified, DefaultBackend must be specified. If DefaultBackend is not set, the handling of requests that do not match any of the rules will be up to the Ingress controller. -´ -ingressClassNameŸ"‘IngressClassName is the name of the IngressClass cluster resource. The associated IngressClass defines which controller will implement the resource. This replaces the deprecated `kubernetes.io/ingress.class` annotation. For backwards compatibility, when that annotation is set, it must be given precedence over this field. The controller may emit a warning if the field and annotation have different values. Implementations of this API should ignore Ingresses without a class specified. An IngressClass resource may be marked as default, which can be used to set a default value for this field. For more information, refer to the IngressClass documentation.² -string -ù -rulesï"ƒA list of host rules used to configure the Ingress. If unspecified, or no rule matches, all traffic is sent to the default backend.² -arrayº6 -4 -2#/definitions/io.k8s.api.networking.v1.IngressRuleú# -x-kubernetes-list-type atomic - -Š -*io.k8s.api.storage.v1.VolumeAttachmentSpecÛ"HVolumeAttachmentSpec is the specification of a VolumeAttachment request.šattacheršsourcešnodeName² -objectÊã -— -attacherŠ"}Attacher indicates the name of the volume driver that MUST handle this request. This is the name returned by GetPluginName().² -string -H -nodeName<"/The node that the volume should be attached to.² -string -} -sources -:#/definitions/io.k8s.api.storage.v1.VolumeAttachmentSource"5Source represents the volume that should be attached. -µ -6io.k8s.api.authorization.v1beta1.NonResourceAttributesú"{NonResourceAttributes includes the authorization attributes available for non-resource requests to the Authorizer interface² -objectÊo -8 -path0"#Path is the URL path of the request² -string -3 -verb+"Verb is the standard HTTP verb² -string -Á ->io.k8s.api.autoscaling.v2beta2.HorizontalPodAutoscalerBehaviorþ"™HorizontalPodAutoscalerBehavior configures the scaling behavior of the target in both Up and Down directions (scaleUp and scaleDown fields respectively).² -objectÊÓ -° - scaleDown¢ -<#/definitions/io.k8s.api.autoscaling.v2beta2.HPAScalingRules"áscaleDown is scaling policy for scaling Down. If not set, the default value is to allow to scale down to minReplicas pods, with a 300 second stabilization window (i.e., the highest recommendation for the last 300sec is used). - -scaleUp‘ -<#/definitions/io.k8s.api.autoscaling.v2beta2.HPAScalingRules"ÐscaleUp is scaling policy for scaling Up. If not set, the default value is the higher of: - * increase no more than 4 pods per 60 seconds - * double the number of pods per 60 seconds -No stabilization is used. -Æ -$io.k8s.api.core.v1.ISCSIVolumeSource "’Represents an ISCSI disk. ISCSI volumes can only be mounted as read/write once. ISCSI volumes support ownership management and SELinux relabeling.š targetPortalšiqnšlun² -objectÊÞ -¤ - targetPortal“"…iSCSI Target Portal. The Portal is either an IP or ip_addr:port if the port is other than default (typically TCP ports 860 and 3260).² -string -V -chapAuthDiscoveryA"3whether support iSCSI Discovery CHAP authentication² -boolean -µ -fsTypeª"œFilesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#iscsi² -string -Ô - initiatorNameÂ"´Custom iSCSI Initiator Name. If initiatorName is specified with iscsiInterface simultaneously, new iSCSI interface : will be created for the connection.² -string -4 -lun-int32"iSCSI Target Lun number.² -integer -k -readOnly_"QReadOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false.² -boolean - - secretRefr -5#/definitions/io.k8s.api.core.v1.LocalObjectReference"9CHAP Secret for iSCSI target and initiator authentication -R -chapAuthSession?"1whether support iSCSI Session CHAP authentication² -boolean -0 -iqn)"Target iSCSI Qualified Name.² -string -n -iscsiInterface\"OiSCSI Interface Name that uses an iSCSI transport. Defaults to 'default' (tcp).² -string -³ -portals§"ŠiSCSI Target Portal List. The portal is either an IP or ip_addr:port if the port is other than default (typically TCP ports 860 and 3260).² -arrayº - ² -string -ò -"io.k8s.api.core.v1.PodTemplateListË"*PodTemplateList is a list of PodTemplates.šitems² -objectÊ® -¾ - -apiVersion¯"¡APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources² -string -] -itemsT"List of pod templates² -arrayº0 -. -,#/definitions/io.k8s.api.core.v1.PodTemplate -¹ -kind°"¢Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds² -string -Ï -metadata -;#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta"‚Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kindsúW -x-kubernetes-group-version-kind42- version: v1 - group: "" - kind: PodTemplateList - - -*io.k8s.api.discovery.v1beta1.EndpointSliceà"ÜEndpointSlice represents a subset of the endpoints that implement a service. For a given service there may be multiple EndpointSlice objects, selected by labels, which must be joined to produce the full set of endpoints.š addressTypeš endpoints² -objectÊí -h -metadata\ -=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta"Standard object's metadata. -© -portsŸ"®ports specifies the list of network ports exposed by each endpoint in this slice. Each port must have a unique name. When ports is empty, it indicates that there are no defined ports. When a port is defined with a nil port value, it indicates "all ports". Each slice may include a maximum of 100 ports.² -arrayº; -9 -7#/definitions/io.k8s.api.discovery.v1beta1.EndpointPortú# -x-kubernetes-list-type atomic - -ð - addressTypeà"ÒaddressType specifies the type of address carried by this EndpointSlice. All addresses in this slice must be the same type. This field is immutable after creation. The following address types are currently supported: * IPv4: Represents an IPv4 Address. * IPv6: Represents an IPv6 Address. * FQDN: Represents a Fully Qualified Domain Name.² -string -¾ - -apiVersion¯"¡APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources² -string -ä - endpointsÖ"jendpoints is a list of unique endpoints in this slice. Each slice may include a maximum of 1000 endpoints.² -arrayº7 -5 -3#/definitions/io.k8s.api.discovery.v1beta1.Endpointú# -x-kubernetes-list-type atomic - -¹ -kind°"¢Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds² -stringúh -x-kubernetes-group-version-kindEC- version: v1beta1 - group: discovery.k8s.io - kind: EndpointSlice - -‹ -&io.k8s.api.flowcontrol.v1beta1.Subjectà"´Subject matches the originator of a request, as identified by the request authentication system. There are three ways of matching an originator; by user, group, or service account.škind² -objectÊ -B -user: -8#/definitions/io.k8s.api.flowcontrol.v1beta1.UserSubject -D -group; -9#/definitions/io.k8s.api.flowcontrol.v1beta1.GroupSubject - -kind"Required² -string -V -serviceAccountD -B#/definitions/io.k8s.api.flowcontrol.v1beta1.ServiceAccountSubjectúŽ -x-kubernetes-unionswu- discriminator: kind - fields-to-discriminateBy: - group: Group - serviceAccount: ServiceAccount - user: User - -Ö -.io.k8s.api.policy.v1.PodDisruptionBudgetStatus£"ŠPodDisruptionBudgetStatus represents information about the status of a PodDisruptionBudget. Status may trail the actual state of a system.šdisruptionsAllowedšcurrentHealthyšdesiredHealthyš expectedPods² -objectÊÁ -[ - expectedPodsKint32"6total number of pods counted by this disruption budget² -integer -â -observedGenerationËint64"µMost recent generation observed when updating this PDB status. DisruptionsAllowed and other status information is valid only if observedGeneration equals to PDB's object generation.² -integer -ô - -conditionså"ôConditions contain conditions for PDB. The disruption controller sets the DisruptionAllowed condition. The following are known values for the reason field (additional reasons could be added in the future): - SyncFailed: The controller encountered an error and wasn't able to compute - the number of allowed disruptions. Therefore no disruptions are - allowed and the status of the condition will be False. -- InsufficientPods: The number of pods are either at or below the number - required by the PodDisruptionBudget. No disruptions are - allowed and the status of the condition will be False. -- SufficientPods: There are more pods than required by the PodDisruptionBudget. - The condition will be True, and the number of allowed - disruptions are provided by the disruptionsAllowed property.² -arrayº@ -> -<#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Conditionú' -x-kubernetes-list-map-keys - type -ú -x-kubernetes-list-typemap -ú' -x-kubernetes-patch-merge-keytype -ú' -x-kubernetes-patch-strategymerge - -E -currentHealthy3int32"current number of healthy pods² -integer -M -desiredHealthy;int32"&minimum desired number of healthy pods² -integer - - disruptedPodsû"¯DisruptedPods contains information about pods whose eviction was processed by the API server eviction subresource handler but has not yet been observed by the PodDisruptionBudget controller. A pod will be in this map from the time when the API server processed the eviction request to the time when the pod is seen by PDB controller as having been marked for deletion (or after a timeout). The key in the map is the name of the pod and the value is the time when the API server processed the eviction request. If the deletion didn't occur and a pod is still there it will be removed from the list automatically by PodDisruptionBudget controller after some time. If everything goes smooth this map should be empty for the most of the time. Large number of entries in the map may indicate problems with pod deletions.ª; -9 -7#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Time² -object -` -disruptionsAllowedJint32"5Number of pod disruptions that are currently allowed.² -integer -š -(io.k8s.api.storage.v1beta1.CSIDriverListí"3CSIDriverList is a collection of CSIDriver objects.šitems² -objectʸ -¾ - -apiVersion¯"¡APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources² -string -l -itemsc"items is the list of CSIDriver² -arrayº6 -4 -2#/definitions/io.k8s.api.storage.v1beta1.CSIDriver -¹ -kind°"¢Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds² -string -Ê -metadata½ -;#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta"~Standard list metadata More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadataúf -x-kubernetes-group-version-kindCA- kind: CSIDriverList - version: v1beta1 - group: storage.k8s.io - -á -&io.k8s.api.core.v1.ComponentStatusList¶ "Status of all the conditions for the component as a list of ComponentStatus objects. Deprecated: This API is deprecated in v1.19+šitems² -objectʽ -¾ - -apiVersion¯"¡APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources² -string -l -itemsc" List of ComponentStatus objects.² -arrayº4 -2 -0#/definitions/io.k8s.api.core.v1.ComponentStatus -¹ -kind°"¢Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds² -string -Ï -metadata -;#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta"‚Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kindsú[ -x-kubernetes-group-version-kind86- kind: ComponentStatusList - version: v1 - group: "" - -ä -io.k8s.api.core.v1.PodDNSConfigÀ"_PodDNSConfig defines the DNS parameters of a pod in addition to those generated from DNSPolicy.² -objectÊÐ -Ä - nameservers´"—A list of DNS name server IP addresses. This will be appended to the base nameservers generated from DNSPolicy. Duplicated nameservers will be removed.² -arrayº - ² -string -µ -options©"âA list of DNS resolver options. This will be merged with the base options generated from DNSPolicy. Duplicated entries will be removed. Resolution options given in Options will override those that appear in the base DNSPolicy.² -arrayº7 -5 -3#/definitions/io.k8s.api.core.v1.PodDNSConfigOption -Î -searchesÁ"¤A list of DNS search domains for host-name lookup. This will be appended to the base search paths generated from DNSPolicy. Duplicated search paths will be removed.² -arrayº - ² -string -£ -io.k8s.api.core.v1.PodList„ "PodList is a list of Pods.šitems² -objectÊÿ -¾ - -apiVersion¯"¡APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources² -string -­ -items£"lList of pods. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md² -arrayº( -& -$#/definitions/io.k8s.api.core.v1.Pod -¹ -kind°"¢Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds² -string -Ï -metadata -;#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta"‚Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kindsúO -x-kubernetes-group-version-kind,*- group: "" - kind: PodList - version: v1 - -½ -Wio.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1.CustomResourceDefinitionStatusá"RCustomResourceDefinitionStatus indicates the state of the CustomResourceDefinition² -objectÊþ -Ü -storedVersionsÉ"¬storedVersions lists all versions of CustomResources that were ever persisted. Tracking these versions allows a migration path for stored versions in etcd. The field is mutable so a migration controller can finish a migration to another version (ensuring no old objects are left in storage), and then remove the rest of the versions from this list. Versions may not be removed from `spec.versions` while they exist in this list.² -arrayº - ² -string -ô - acceptedNamesâ -d#/definitions/io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1.CustomResourceDefinitionNames"zacceptedNames are the names that are actually being used to serve discovery. They may be different than the names in spec. -¥ - -conditions–"Nconditions indicate state for particular aspects of a CustomResourceDefinition² -arrayºl -j -h#/definitions/io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1.CustomResourceDefinitionConditionú' -x-kubernetes-list-map-keys - type -ú -x-kubernetes-list-typemap - -å -2io.k8s.apimachinery.pkg.apis.meta.v1.StatusDetails® "éStatusDetails is a set of additional properties that MAY be set by the server to provide additional information about a response. The Reason field of a Status object defines what attributes will be set. Clients must ignore fields that do not match the defined type of each attribute, and should assume that any attribute may be empty, invalid, or under defined.² -objectʳ -¥ -uid"UID of the resource. (when there is a single resource which can be described). More info: http://kubernetes.io/docs/user-guide/identifiers#uids² -string -à -causesÕ"ƒThe Causes array includes more details associated with the StatusReason failure. Not all StatusReasons may provide detailed causes.² -arrayºB -@ ->#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.StatusCause -b -groupY"LThe group attribute of the resource associated with the status StatusReason.² -string - -kind„"öThe kind attribute of the resource associated with the status StatusReason. On some operations may differ from the requested resource Kind. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds² -string -— -nameŽ"€The name attribute of the resource associated with the status StatusReason (when there is a single name which can be described).² -string -— -retryAfterSecondsint32"ëIf specified, the time in seconds before the operation should be retried. Some errors may indicate the client must take an alternate action - for those errors this field may indicate how long to wait before taking the alternate action.² -integer -Æ -#io.k8s.api.core.v1.SecretProjectionž"Adapts a secret into a projected volume. - -The contents of the target Secret's Data field will be presented in a projected volume as files using the keys in the Data field as the file names. Note that this is identical to a secret volume source without the default mode.² -objectÊÿ -¤ -itemsš"ÜIf unspecified, each key-value pair in the Data field of the referenced Secret will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the Secret, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'.² -arrayº. -, -*#/definitions/io.k8s.api.core.v1.KeyToPath -„ -name|"oName of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names² -string -O -optionalC"5Specify whether the Secret or its key must be defined² -boolean -´ -io.k8s.api.rbac.v1.RoleBinding‘ "·RoleBinding references a role, but does not contain it. It can reference a Role in the same namespace or a ClusterRole in the global namespace. It adds who information via Subjects and namespace information by which namespace it exists in. RoleBindings in a given namespace only have effect in that namespace.šroleRef² -objectÊÑ -¾ - -apiVersion¯"¡APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources² -string -¹ -kind°"¢Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds² -string -h -metadata\ -=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta"Standard object's metadata. -à -roleRefÔ -(#/definitions/io.k8s.api.rbac.v1.RoleRef"§RoleRef can reference a Role in the current namespace or a ClusterRole in the global namespace. If the RoleRef cannot be resolved, the Authorizer must return an error. -„ -subjectsx"=Subjects holds references to the objects the role applies to.² -arrayº, -* -(#/definitions/io.k8s.api.rbac.v1.Subjectúj -x-kubernetes-group-version-kindGE- group: rbac.authorization.k8s.io - kind: RoleBinding - version: v1 - -È -Sio.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1beta1.JSONSchemaPropsOrBoolq"oJSONSchemaPropsOrBool represents JSONSchemaProps or a boolean value. Defaults to true for the boolean property. -Ì -2io.k8s.api.authorization.v1.SelfSubjectRulesReview•"ˆSelfSubjectRulesReview enumerates the set of actions the current user can perform within a namespace. The returned list of actions may be incomplete depending on the server's authorization mode, and any errors experienced during the evaluation. SelfSubjectRulesReview should be used by UIs to show/hide actions, or to quickly let an end user reason about their permissions. It should NOT Be used by external systems to drive authorization decisions as this raises confused deputy, cache lifetime/revocation, and correctness concerns. SubjectAccessReview, and LocalAccessReview are the correct way to defer authorization decisions to the API server.šspec² -objectÊ -K -metadata? -=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta -Š -spec -D#/definitions/io.k8s.api.authorization.v1.SelfSubjectRulesReviewSpec"9Spec holds information about the request being evaluated. -§ -statusœ -B#/definitions/io.k8s.api.authorization.v1.SubjectRulesReviewStatus"VStatus is filled in by the server and indicates the set of actions a user can perform. -¾ - -apiVersion¯"¡APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources² -string -¹ -kind°"¢Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds² -stringúp -x-kubernetes-group-version-kindMK- group: authorization.k8s.io - kind: SelfSubjectRulesReview - version: v1 - -Þ - io.k8s.api.core.v1.ContainerPort¹">ContainerPort represents a network port in a single container.š containerPort² -objectÊÚ -ˆ - containerPortwint32"bNumber of port to expose on the pod's IP address. This must be a valid port number, 0 < x < 65536.² -integer -A -hostIP7"*What host IP to bind the external port to.² -string -å -hostPortØint32"ÂNumber of port to expose on the host. If specified, this must be a valid port number, 0 < x < 65536. If HostNetwork is specified, this must match ContainerPort. Most containers do not need this.² -integer -Æ -name½"¯If specified, this must be an IANA_SVC_NAME and unique within the pod. Each named port in a pod must have a unique name. Name for the port that can be referred to by services.² -string -Y -protocolM"@Protocol for port. Must be UDP, TCP, or SCTP. Defaults to "TCP".² -string -‚ -'io.k8s.api.core.v1.ResourceRequirementsÖ"AResourceRequirements describes the compute resource requirements.² -objectÊ„ -ö -limitsë"›Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ª? -= -;#/definitions/io.k8s.apimachinery.pkg.api.resource.Quantity² -object -ˆ -requestsû"«Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ª? -= -;#/definitions/io.k8s.apimachinery.pkg.api.resource.Quantity² -object -Ê -#io.k8s.api.apps.v1.ReplicaSetStatus¢"?ReplicaSetStatus represents the current status of a ReplicaSet.šreplicas² -objectÊÇ -Ø -replicasËint32"µReplicas is the most recently oberved number of replicas. More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller/#what-is-a-replicationcontroller² -integer -… -availableReplicaspint32"[The number of available replicas (ready for at least minReadySeconds) for this replica set.² -integer -ø - -conditionsé"NRepresents the latest available observations of a replica set's current state.² -arrayº8 -6 -4#/definitions/io.k8s.api.apps.v1.ReplicaSetConditionú' -x-kubernetes-patch-merge-keytype -ú' -x-kubernetes-patch-strategymerge - -‹ -fullyLabeledReplicassint32"^The number of pods that have labels matching the labels of the pod template of the replicaset.² -integer - -observedGenerationiint64"TObservedGeneration reflects the generation of the most recently observed ReplicaSet.² -integer -X - readyReplicasGint32"2The number of ready replicas for this replica set.² -integer -Œ -&io.k8s.api.storage.v1.VolumeAttachmentá "—VolumeAttachment captures the intent to attach or detach the specified volume to/from the specified node. - -VolumeAttachment objects are non-namespaced.šspec² -objectÊÊ -Ð -statusÅ -:#/definitions/io.k8s.api.storage.v1.VolumeAttachmentStatus"†Status of the VolumeAttachment request. Populated by the entity completing the attach or detach operation, i.e. the external-attacher. -¾ - -apiVersion¯"¡APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources² -string -¹ -kind°"¢Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds² -string -Ð -metadataà -=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta"Standard object metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata -¤ -spec› -8#/definitions/io.k8s.api.storage.v1.VolumeAttachmentSpec"_Specification of the desired attach/detach volume behavior. Populated by the Kubernetes system.úd -x-kubernetes-group-version-kindA?- version: v1 - group: storage.k8s.io - kind: VolumeAttachment - -ð -"io.k8s.api.core.v1.PodAffinityTermÉ "ßDefines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key matches that of any node on which a pod of the set of pods is runningš topologyKey² -objectÊÊ - -ö - topologyKeyæ"ØThis pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed.² -string -Ž - labelSelector} -@#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector"9A label query over a set of resources, in this case pods. -õ -namespaceSelectorß -@#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector"šA label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. This field is alpha-level and is only honored when PodAffinityNamespaceSelector feature is enabled. -Å - -namespaces¶"™namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace"² -arrayº - ² -string -Å -\io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1beta1.CustomResourceSubresourceScaleä "^CustomResourceSubresourceScale defines how to serve the scale subresource for CustomResources.šspecReplicasPathšstatusReplicasPath² -objectÊÍ -á -labelSelectorPathË"½labelSelectorPath defines the JSON path inside of a custom resource that corresponds to Scale `status.selector`. Only JSON paths without the array notation are allowed. Must be a JSON Path under `.status` or `.spec`. Must be set to work with HorizontalPodAutoscaler. The field pointed by this JSON path must be a string field (not a complex selector struct) which contains a serialized label selector in string form. More info: https://kubernetes.io/docs/tasks/access-kubernetes-api/custom-resources/custom-resource-definitions#scale-subresource If there is no value under the given path in the custom resource, the `status.selector` value in the `/scale` subresource will default to the empty string.² -string -ã -specReplicasPathÎ"ÀspecReplicasPath defines the JSON path inside of a custom resource that corresponds to Scale `spec.replicas`. Only JSON paths without the array notation are allowed. Must be a JSON Path under `.spec`. If there is no value under the given path in the custom resource, the `/scale` subresource will return an error on GET.² -string -€ -statusReplicasPathé"ÛstatusReplicasPath defines the JSON path inside of a custom resource that corresponds to Scale `status.replicas`. Only JSON paths without the array notation are allowed. Must be a JSON Path under `.status`. If there is no value under the given path in the custom resource, the `status.replicas` value in the `/scale` subresource will default to 0.² -string -œ -io.k8s.api.core.v1.EnvVarSourceø"']`, `metadata.annotations['']`, spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs. -¤ -resourceFieldRef -6#/definitions/io.k8s.api.core.v1.ResourceFieldSelector"ÔSelects a resource of the container: only resources limits and requests (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported. -v - secretKeyReff -2#/definitions/io.k8s.api.core.v1.SecretKeySelector"0Selects a key of a secret in the pod's namespace -i -configMapKeyRefV -5#/definitions/io.k8s.api.core.v1.ConfigMapKeySelector"Selects a key of a ConfigMap. -½ --io.k8s.api.networking.v1beta1.HTTPIngressPath‹ "oHTTPIngressPath associates a path with a backend. Incoming urls matching the path are forwarded to the backend.šbackend² -objectÊ -¤ -backend˜ -:#/definitions/io.k8s.api.networking.v1beta1.IngressBackend"ZBackend defines the referenced service endpoint to which the traffic will be forwarded to. -ž -path•"‡Path is matched against the path of an incoming request. Currently it can contain characters disallowed from the conventional "path" part of a URL as defined by RFC 3986. Paths must begin with a '/'. When unspecified, all paths from incoming requests are matched.² -string -¶ -pathType©"›PathType determines the interpretation of the Path matching. PathType can be one of the following values: * Exact: Matches the URL path exactly. * Prefix: Matches based on a URL path prefix split by '/'. Matching is - done on a path element by element basis. A path element refers is the - list of labels in the path split by the '/' separator. A request is a - match for path p if every p is an element-wise prefix of p of the - request path. Note that if the last element of the path is a substring - of the last element in request path, it is not a match (e.g. /foo/bar - matches /foo/bar/baz, but does not match /foo/barbaz). -* ImplementationSpecific: Interpretation of the Path matching is up to - the IngressClass. Implementations can treat this as a separate PathType - or treat it identically to Prefix or Exact path types. -Implementations are required to support all path types. Defaults to ImplementationSpecific.² -string -Ø -)io.k8s.api.rbac.v1.ClusterRoleBindingListª"=ClusterRoleBindingList is a collection of ClusterRoleBindingsšitems² -objectÊÜ -¾ - -apiVersion¯"¡APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources² -string -u -itemsl"&Items is a list of ClusterRoleBindings² -arrayº7 -5 -3#/definitions/io.k8s.api.rbac.v1.ClusterRoleBinding -¹ -kind°"¢Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds² -string -f -metadataZ -;#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta"Standard object's metadata.úu -x-kubernetes-group-version-kindRP- group: rbac.authorization.k8s.io - kind: ClusterRoleBindingList - version: v1 - -ý - -io.k8s.api.rbac.v1.PolicyRuleÛ -"¡PolicyRule holds information that describes a policy rule, but does not contain information about who the rule applies to or which namespace the rule applies to.šverbs² -objectÊ  -¨ - resourceNames–"zResourceNames is an optional white list of names that the rule applies to. An empty set means that everything is allowed.² -arrayº - ² -string -† - resourcesy"]Resources is a list of resources this rule applies to. ResourceAll represents all resources.² -arrayº - ² -string -´ -verbsª"Verbs is a list of Verbs that apply to ALL the ResourceKinds and AttributeRestrictions contained in this rule. VerbAll represents all kinds.² -arrayº - ² -string -ó - apiGroupså"ÈAPIGroups is the name of the APIGroup that contains the resources. If multiple API groups are specified, any action requested against one of the enumerated resources in any API group will be allowed.² -arrayº - ² -string -¼ -nonResourceURLs¨"‹NonResourceURLs is a set of partial urls that a user should have access to. *s are allowed, but only as the full, final step in the path Since non-resource URLs are not namespaced, this field is only applicable for ClusterRoles referenced from a ClusterRoleBinding. Rules can either apply to API resources (such as "pods" or "secrets") or non-resource URL paths (such as "/api"), but not both.² -arrayº - ² -string -Æ -)io.k8s.api.authentication.v1.TokenRequest˜":TokenRequest requests a token for a given service account.šspec² -objectÊÜ -¾ - -apiVersion¯"¡APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources² -string -¹ -kind°"¢Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds² -string -K -metadata? -=#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta -E -spec= -;#/definitions/io.k8s.api.authentication.v1.TokenRequestSpec -I -status? -=#/definitions/io.k8s.api.authentication.v1.TokenRequestStatusúg -x-kubernetes-group-version-kindDB- kind: TokenRequest - version: v1 - group: authentication.k8s.io - -É -.io.k8s.api.authentication.v1.TokenReviewStatus–"DTokenReviewStatus is the result of the token authentication request.² -objectÊÁ -w -usero -3#/definitions/io.k8s.api.authentication.v1.UserInfo"8User is the UserInfo associated with the provided token. -’ - audiences„"çAudiences are audience identifiers chosen by the authenticator that are compatible with both the TokenReview and token. An identifier is any identifier in the intersection of the TokenReviewSpec audiences and the token's audiences. A client of the TokenReview API that sets the spec.audiences field should validate that a compatible audience identifier is returned in the status.audiences field to ensure that the TokenReview server is audience aware. If a TokenReview returns an empty status.audience field where status.authenticated is "true", the token is valid against the audience of the Kubernetes API server.² -arrayº - ² -string -g - authenticatedV"HAuthenticated indicates that the token was associated with a known user.² -boolean -H -error?"2Error indicates that the token couldn't be checked² -string -Î -7io.k8s.api.authorization.v1.SelfSubjectAccessReviewSpec’"¦SelfSubjectAccessReviewSpec is a description of the access request. Exactly one of ResourceAuthorizationAttributes and NonResourceAuthorizationAttributes must be set² -objectÊÚ -ª -nonResourceAttributes -?#/definitions/io.k8s.api.authorization.v1.NonResourceAttributes"MNonResourceAttributes describes information for a non-resource access request -ª -resourceAttributes“ -<#/definitions/io.k8s.api.authorization.v1.ResourceAttributes"SResourceAuthorizationAttributes describes information for a resource access request -Ä -"io.k8s.api.core.v1.ObjectReference "]ObjectReference contains enough information to let you inspect or modify the referred object.² -objectʯ -æ -resourceVersionÒ"ÄSpecific resourceVersion to which this reference is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency² -string - -uidz"mUID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids² -string -7 - -apiVersion)"API version of the referent.² -string -Ô - fieldPathÆ"¸If referring to a piece of an object instead of an entire object, this string should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. For example, if the object reference is to a container within a pod, this would take on a value like: "spec.containers{name}" (where "name" refers to the name of the container that triggered the event) or if no container name is specified "spec.containers[2]" (container with index 2 in this pod). This syntax is chosen only to have some well-defined way of referencing a part of an object.² -string -— -kindŽ"€Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds² -string -„ -name|"oName of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names² -string -Ž - namespace€"sNamespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/² -string -‚ - io.k8s.api.rbac.v1beta1.RoleListÝ"RoleList is a collection of Roles Deprecated in v1.17 in favor of rbac.authorization.k8s.io/v1 RoleList, and will no longer be served in v1.22.šitems² -objectÊÅ -¾ - -apiVersion¯"¡APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources² -string -^ -itemsU"Items is a list of Roles² -arrayº. -, -*#/definitions/io.k8s.api.rbac.v1beta1.Role -¹ -kind°"¢Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds² -string -f -metadataZ -;#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta"Standard object's metadata.úl -x-kubernetes-group-version-kindIG- group: rbac.authorization.k8s.io - kind: RoleList - version: v1beta1 - -¤ - -Iio.k8s.api.admissionregistration.v1beta1.MutatingWebhookConfigurationListÖ "KMutatingWebhookConfigurationList is a list of MutatingWebhookConfiguration.šitems² -objectÊç -¾ - -apiVersion¯"¡APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources² -string -• -items‹"%List of MutatingWebhookConfiguration.² -arrayºW -U -S#/definitions/io.k8s.api.admissionregistration.v1beta1.MutatingWebhookConfiguration -¹ -kind°"¢Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds² -string -Ï -metadata -;#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta"‚Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kindsú‡ -x-kubernetes-group-version-kinddb- group: admissionregistration.k8s.io - kind: MutatingWebhookConfigurationList - version: v1beta1 - -¬ -;io.k8s.api.admissionregistration.v1beta1.RuleWithOperationsì "‚RuleWithOperations is a tuple of Operations and Resources. It is recommended to make sure that all the tuple expansions are valid.² -objectÊØ -¼ - apiVersions¬"APIVersions is the API versions the resources belong to. '*' is all versions. If '*' is present, the length of the slice must be one. Required.² -arrayº - ² -string -  - -operations‘"ôOperations is the operations the admission hook cares about - CREATE, UPDATE, DELETE, CONNECT or * for all of those operations and any future admission operations that are added. If '*' is present, the length of the slice must be one. Required.² -arrayº - ² -string -– - resourcesˆ"ëResources is a list of resources this rule applies to. - -For example: 'pods' means pods. 'pods/log' means the log subresource of pods. '*' means all resources, but not subresources. 'pods/*' means all subresources of pods. '*/scale' means all scale subresources. '*/*' means all resources and their subresources. - -If wildcard is present, the validation rule will ensure resources do not overlap with each other. - -Depending on the enclosing object, subresources might not be allowed. Required.² -arrayº - ² -string -£ -scope™"‹scope specifies the scope of this rule. Valid values are "Cluster", "Namespaced", and "*" "Cluster" means that only cluster-scoped resources will match this rule. Namespace API objects are cluster-scoped. "Namespaced" means that only namespaced resources will match this rule. "*" means that there are no scope restrictions. Subresources match the scope of their parent resource. Default is "*".² -string -´ - apiGroups¦"‰APIGroups is the API groups the resources belong to. '*' is all groups. If '*' is present, the length of the slice must be one. Required.² -arrayº - ² -string -ã -Bio.k8s.api.certificates.v1beta1.CertificateSigningRequestConditionœštype² -objectʆ -| -lastUpdateTimej -7#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Time"/timestamp for the last update to this condition -S -messageH";human readable message with details about the request state² -string -9 -reason/""brief reason for the request state² -string -Ò -statusÇ"¹Status of the condition, one of True, False, Unknown. Approved, Denied, and Failed conditions may not be "False" or "Unknown". Defaults to "True". If unset, should be treated as "True".² -string -h -type`"Stype of the condition. Known conditions include "Approved", "Denied", and "Failed".² -string -¶ -lastTransitionTimeŸ -7#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Time"ãlastTransitionTime is the time the condition last transitioned from one status to another. If unset, when a new condition type is added or an existing condition's status is changed, the server defaults this to the current time. -Ò -4io.k8s.api.core.v1.ScopedResourceSelectorRequirement™"A scoped-resource selector requirement is a selector that contains values, a scope name, and an operator that relates the scope name and values.š scopeNamešoperator² -objectÊà -ƒ -operatorw"jRepresents a scope's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist.² -string -M - scopeName@"3The name of the scope that the selector applies to.² -string -ˆ -valuesý"àAn array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.² -arrayº - ² -string -í -Nio.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1beta1.ServiceReferenceš";ServiceReference holds a reference to Service.legacy.k8s.ioš namespacešname² -objectÊ» -> -name6")name is the name of the service. Required² -string -M - namespace@"3namespace is the namespace of the service. Required² -string -Y -pathQ"Dpath is an optional URL path at which the webhook will be contacted.² -string -Î -portÅint32"¯port is an optional service port at which the webhook will be contacted. `port` should be a valid port number (1-65535, inclusive). Defaults to 443 for backward compatibility.² -integerb - - BearerTokenjO -M - BearerToken>< -apiKey authorizationheader"Bearer Token authentication \ No newline at end of file diff --git a/src/vendor/sigs.k8s.io/kustomize/kyaml/openapi/kustomizationapi/swagger.go b/src/vendor/sigs.k8s.io/kustomize/kyaml/openapi/kustomizationapi/swagger.go deleted file mode 100644 index 4f4ec3e4a..000000000 --- a/src/vendor/sigs.k8s.io/kustomize/kyaml/openapi/kustomizationapi/swagger.go +++ /dev/null @@ -1,249 +0,0 @@ -// Copyright 2019 The Kubernetes Authors. -// SPDX-License-Identifier: Apache-2.0 - -// Code generated for package kustomizationapi by go-bindata DO NOT EDIT. (@generated) -// sources: -// kustomizationapi/swagger.json -package kustomizationapi - -import ( - "bytes" - "compress/gzip" - "fmt" - "io" - "io/ioutil" - "os" - "path/filepath" - "strings" - "time" -) - -func bindataRead(data []byte, name string) ([]byte, error) { - gz, err := gzip.NewReader(bytes.NewBuffer(data)) - if err != nil { - return nil, fmt.Errorf("Read %q: %v", name, err) - } - - var buf bytes.Buffer - _, err = io.Copy(&buf, gz) - clErr := gz.Close() - - if err != nil { - return nil, fmt.Errorf("Read %q: %v", name, err) - } - if clErr != nil { - return nil, err - } - - return buf.Bytes(), nil -} - -type asset struct { - bytes []byte - info os.FileInfo -} - -type bindataFileInfo struct { - name string - size int64 - mode os.FileMode - modTime time.Time -} - -// Name return file name -func (fi bindataFileInfo) Name() string { - return fi.name -} - -// Size return file size -func (fi bindataFileInfo) Size() int64 { - return fi.size -} - -// Mode return file mode -func (fi bindataFileInfo) Mode() os.FileMode { - return fi.mode -} - -// Mode return file modify time -func (fi bindataFileInfo) ModTime() time.Time { - return fi.modTime -} - -// IsDir return file whether a directory -func (fi bindataFileInfo) IsDir() bool { - return fi.mode&os.ModeDir != 0 -} - -// Sys return file is sys mode -func (fi bindataFileInfo) Sys() interface{} { - return nil -} - -var _kustomizationapiSwaggerJson = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xe4\x56\xc1\x6e\xdb\x30\x0c\xbd\xe7\x2b\x04\x6d\xc7\xd8\x45\x6e\x43\x6e\xc3\x0e\x3b\x14\x05\x0a\x74\xb7\xa1\x07\xc6\xa1\x5d\xce\x8e\xa4\x51\xb4\xb1\x6c\xc8\xbf\x0f\xd6\x62\xd7\x4a\xec\x75\x0b\x1a\xac\x4b\x0f\x06\x0c\x99\x7c\x4f\xe4\x7b\x24\xfc\x63\xa6\x94\x5e\x63\x4e\x86\x84\xac\xf1\x7a\xa9\xda\x23\xa5\x34\xd9\xb4\x7c\xe7\x53\x70\x94\x82\x73\x3e\x6d\x16\xe9\x07\x6b\x72\x2a\x6e\xc0\xbd\xe7\xe2\x31\x52\x29\xed\xd8\x3a\x64\x21\x1c\x9e\x2a\xa5\x3f\xa2\x41\x06\xb1\x7c\x90\x10\x3e\xbe\x65\xcc\xf5\x52\xe9\x37\x57\x03\xfe\xab\x11\xda\x18\xa5\x87\xd8\xed\xdf\x76\xf3\xee\x1a\xb0\x5e\x07\x14\xa8\x6e\x87\x17\xca\xa1\xf2\xd8\x07\xc9\xd6\x61\x4b\x6b\x57\x5f\x30\x13\xdd\x9f\x7f\x4b\xca\x7a\x85\x6c\x50\xd0\x27\x05\xdb\xda\x25\x0d\xb2\x27\x6b\x92\x92\xcc\x5a\x2f\xd5\xe7\x9e\x3a\xaa\x23\xc4\xb6\x88\x65\xed\xc5\x6e\xe8\x3b\xa6\x59\x68\x54\x28\x84\x6c\x4f\x11\xa2\xf7\x58\x3a\xee\x65\x14\xb2\xa7\x6d\xa3\x9a\xc5\x0a\x05\x16\xc7\x45\xdf\xcf\x06\xa5\x8f\x69\x75\x87\x19\xa3\xbc\x0c\xa1\x1e\xab\xeb\xba\x1f\xe1\x77\x8a\x78\x61\x32\xc5\xa5\x08\x3c\x10\xe0\xf9\xd5\x9d\xd2\x6b\x52\x60\x03\x1b\xf4\x0e\xb2\x3f\x6f\xfe\x3c\x4e\x3e\x25\x6f\x85\x0f\xd0\x90\xe5\x53\x72\xaf\x9b\x5b\x20\xbe\xb3\x35\x67\x78\xba\x23\x63\x94\x0b\x71\x56\x2c\xfe\xf3\x9b\xeb\x7a\x7f\x19\x90\x5f\x50\xbd\xb9\x18\xbf\xd6\xc4\x18\x17\xa4\x3f\x6d\x1d\xde\xa0\x40\xc7\x74\x3f\x7f\xca\x8c\x59\xb7\xfb\xfa\x4a\x0e\x05\x26\xc1\xcd\xa1\xea\x7f\xa3\x7b\xbc\x5d\x07\x20\xbb\xf9\x98\x11\x81\x19\xb6\x71\x27\x23\x4d\x1d\x48\xf6\x90\x6c\x90\x0b\x4c\x4a\xdc\xb6\x29\x61\x26\x9e\xca\xf0\xc2\x20\x58\x84\x84\x90\x3d\xee\x75\x1f\x56\xc5\xd9\x9a\x31\xd8\x44\x2f\xb2\x13\xff\xf5\x30\xc6\xc3\x72\x86\x61\x9c\xd8\x83\x93\xc3\x55\x91\x20\x43\x75\xb4\x33\x27\x5c\x34\xb5\x8b\x7f\x6f\x90\x51\x1b\xe7\x54\x1d\xaf\xea\xf3\xd3\xa2\x69\xfe\x0d\xeb\xeb\xf8\x8f\x89\x0d\x78\xaa\xc1\x67\xed\xb3\xfb\x19\x00\x00\xff\xff\x2f\x39\x79\xd0\x6e\x0c\x00\x00") - -func kustomizationapiSwaggerJsonBytes() ([]byte, error) { - return bindataRead( - _kustomizationapiSwaggerJson, - "kustomizationapi/swagger.json", - ) -} - -func kustomizationapiSwaggerJson() (*asset, error) { - bytes, err := kustomizationapiSwaggerJsonBytes() - if err != nil { - return nil, err - } - - info := bindataFileInfo{name: "kustomizationapi/swagger.json", size: 3182, mode: os.FileMode(420), modTime: time.Unix(1615228558, 0)} - a := &asset{bytes: bytes, info: info} - return a, nil -} - -// Asset loads and returns the asset for the given name. -// It returns an error if the asset could not be found or -// could not be loaded. -func Asset(name string) ([]byte, error) { - cannonicalName := strings.Replace(name, "\\", "/", -1) - if f, ok := _bindata[cannonicalName]; ok { - a, err := f() - if err != nil { - return nil, fmt.Errorf("Asset %s can't read by error: %v", name, err) - } - return a.bytes, nil - } - return nil, fmt.Errorf("Asset %s not found", name) -} - -// MustAsset is like Asset but panics when Asset would return an error. -// It simplifies safe initialization of global variables. -func MustAsset(name string) []byte { - a, err := Asset(name) - if err != nil { - panic("asset: Asset(" + name + "): " + err.Error()) - } - - return a -} - -// AssetInfo loads and returns the asset info for the given name. -// It returns an error if the asset could not be found or -// could not be loaded. -func AssetInfo(name string) (os.FileInfo, error) { - cannonicalName := strings.Replace(name, "\\", "/", -1) - if f, ok := _bindata[cannonicalName]; ok { - a, err := f() - if err != nil { - return nil, fmt.Errorf("AssetInfo %s can't read by error: %v", name, err) - } - return a.info, nil - } - return nil, fmt.Errorf("AssetInfo %s not found", name) -} - -// AssetNames returns the names of the assets. -func AssetNames() []string { - names := make([]string, 0, len(_bindata)) - for name := range _bindata { - names = append(names, name) - } - return names -} - -// _bindata is a table, holding each asset generator, mapped to its name. -var _bindata = map[string]func() (*asset, error){ - "kustomizationapi/swagger.json": kustomizationapiSwaggerJson, -} - -// AssetDir returns the file names below a certain -// directory embedded in the file by go-bindata. -// For example if you run go-bindata on data/... and data contains the -// following hierarchy: -// data/ -// foo.txt -// img/ -// a.png -// b.png -// then AssetDir("data") would return []string{"foo.txt", "img"} -// AssetDir("data/img") would return []string{"a.png", "b.png"} -// AssetDir("foo.txt") and AssetDir("notexist") would return an error -// AssetDir("") will return []string{"data"}. -func AssetDir(name string) ([]string, error) { - node := _bintree - if len(name) != 0 { - cannonicalName := strings.Replace(name, "\\", "/", -1) - pathList := strings.Split(cannonicalName, "/") - for _, p := range pathList { - node = node.Children[p] - if node == nil { - return nil, fmt.Errorf("Asset %s not found", name) - } - } - } - if node.Func != nil { - return nil, fmt.Errorf("Asset %s not found", name) - } - rv := make([]string, 0, len(node.Children)) - for childName := range node.Children { - rv = append(rv, childName) - } - return rv, nil -} - -type bintree struct { - Func func() (*asset, error) - Children map[string]*bintree -} - -var _bintree = &bintree{nil, map[string]*bintree{ - "kustomizationapi": &bintree{nil, map[string]*bintree{ - "swagger.json": &bintree{kustomizationapiSwaggerJson, map[string]*bintree{}}, - }}, -}} - -// RestoreAsset restores an asset under the given directory -func RestoreAsset(dir, name string) error { - data, err := Asset(name) - if err != nil { - return err - } - info, err := AssetInfo(name) - if err != nil { - return err - } - err = os.MkdirAll(_filePath(dir, filepath.Dir(name)), os.FileMode(0755)) - if err != nil { - return err - } - err = ioutil.WriteFile(_filePath(dir, name), data, info.Mode()) - if err != nil { - return err - } - err = os.Chtimes(_filePath(dir, name), info.ModTime(), info.ModTime()) - if err != nil { - return err - } - return nil -} - -// RestoreAssets restores an asset under the given directory recursively -func RestoreAssets(dir, name string) error { - children, err := AssetDir(name) - // File - if err != nil { - return RestoreAsset(dir, name) - } - // Dir - for _, child := range children { - err = RestoreAssets(dir, filepath.Join(name, child)) - if err != nil { - return err - } - } - return nil -} - -func _filePath(dir, name string) string { - cannonicalName := strings.Replace(name, "\\", "/", -1) - return filepath.Join(append([]string{dir}, strings.Split(cannonicalName, "/")...)...) -} diff --git a/src/vendor/sigs.k8s.io/kustomize/kyaml/openapi/kustomizationapi/swagger.json b/src/vendor/sigs.k8s.io/kustomize/kyaml/openapi/kustomizationapi/swagger.json deleted file mode 100644 index 7441a5ee6..000000000 --- a/src/vendor/sigs.k8s.io/kustomize/kyaml/openapi/kustomizationapi/swagger.json +++ /dev/null @@ -1,130 +0,0 @@ -{ - "definitions": { - "io.k8s.api.apps.v1.ConfigMapArgs": { - "properties": { - "GeneratorArgs": { - "$ref": "#/definitions/io.k8s.api.apps.v1.GeneratorArgs" - } - }, - "additionalProperties": false, - "type": "object", - "x-kubernetes-group-version-kind": [ - { - "group": "kustomize.config.k8s.io", - "kind": "ConfigMapArgs", - "version": "v1beta1" - } - ] - }, - "io.k8s.api.apps.v1.SecretArgs": { - "properties": { - "GeneratorArgs": { - "$ref": "#/definitions/io.k8s.api.apps.v1.GeneratorArgs" - }, - "type": { - "type": "string" - } - }, - "additionalProperties": false, - "type": "object", - "x-kubernetes-group-version-kind": [ - { - "group": "kustomize.config.k8s.io", - "kind": "SecretArgs", - "version": "v1beta1" - } - ] - }, - "io.k8s.api.apps.v1.GeneratorArgs": { - "properties": { - "namespace": { - "type": "string" - }, - "name": { - "type": "string" - }, - "behavior": { - "type": "string" - }, - "KvPairSources": { - "$ref": "#/definitions/io.k8s.api.apps.v1.KvPairSources" - } - }, - "additionalProperties": false, - "type": "object", - "x-kubernetes-group-version-kind": [ - { - "group": "kustomize.config.k8s.io", - "kind": "GeneratorArgs", - "version": "v1beta1" - } - ] - }, - "io.k8s.api.apps.v1.Kustomization": { - "required": [ - "TypeMeta" - ], - "properties": { - "configMapGenerator": { - "items": { - "$ref": "#/definitions/io.k8s.api.apps.v1.ConfigMapArgs" - }, - "type": "array", - "x-kubernetes-patch-merge-key": "name", - "x-kubernetes-patch-strategy": "merge" - }, - "secretGenerator": { - "items": { - "$ref": "#/definitions/io.k8s.api.apps.v1.SecretArgs" - }, - "type": "array", - "x-kubernetes-patch-merge-key": "name", - "x-kubernetes-patch-strategy": "merge" - } - }, - "additionalProperties": false, - "type": "object", - "x-kubernetes-group-version-kind": [ - { - "group": "kustomize.config.k8s.io", - "kind": "Kustomization", - "version": "v1beta1" - } - ] - }, - "io.k8s.api.apps.v1.KvPairSources": { - "properties": { - "literals": { - "items": { - "type": "string" - }, - "type": "array" - }, - "files": { - "items": { - "type": "string" - }, - "type": "array" - }, - "envs": { - "items": { - "type": "string" - }, - "type": "array" - }, - "env": { - "type": "string" - } - }, - "additionalProperties": false, - "type": "object", - "x-kubernetes-group-version-kind": [ - { - "group": "kustomize.config.k8s.io", - "kind": "KvPairSources", - "version": "v1beta1" - } - ] - } - } -} \ No newline at end of file diff --git a/src/vendor/sigs.k8s.io/kustomize/kyaml/openapi/openapi.go b/src/vendor/sigs.k8s.io/kustomize/kyaml/openapi/openapi.go deleted file mode 100644 index 475905b19..000000000 --- a/src/vendor/sigs.k8s.io/kustomize/kyaml/openapi/openapi.go +++ /dev/null @@ -1,749 +0,0 @@ -// Copyright 2019 The Kubernetes Authors. -// SPDX-License-Identifier: Apache-2.0 - -package openapi - -import ( - "encoding/json" - "fmt" - "io/ioutil" - "path/filepath" - "reflect" - "strings" - - openapi_v2 "github.com/google/gnostic/openapiv2" - "google.golang.org/protobuf/proto" - "k8s.io/kube-openapi/pkg/validation/spec" - "sigs.k8s.io/kustomize/kyaml/errors" - "sigs.k8s.io/kustomize/kyaml/openapi/kubernetesapi" - "sigs.k8s.io/kustomize/kyaml/openapi/kustomizationapi" - "sigs.k8s.io/kustomize/kyaml/yaml" - k8syaml "sigs.k8s.io/yaml" -) - -// globalSchema contains global state information about the openapi -var globalSchema openapiData - -// kubernetesOpenAPIVersion specifies which builtin kubernetes schema to use -var kubernetesOpenAPIVersion string - -// customSchemaFile stores the custom OpenApi schema if it is provided -var customSchema []byte - -// openapiData contains the parsed openapi state. this is in a struct rather than -// a list of vars so that it can be reset from tests. -type openapiData struct { - // schema holds the OpenAPI schema data - schema spec.Schema - - // schemaForResourceType is a map of Resource types to their schemas - schemaByResourceType map[yaml.TypeMeta]*spec.Schema - - // namespaceabilityByResourceType stores whether a given Resource type - // is namespaceable or not - namespaceabilityByResourceType map[yaml.TypeMeta]bool - - // noUseBuiltInSchema stores whether we want to prevent using the built-n - // Kubernetes schema as part of the global schema - noUseBuiltInSchema bool - - // schemaInit stores whether or not we've parsed the schema already, - // so that we only reparse the when necessary (to speed up performance) - schemaInit bool -} - -type format string - -const ( - JsonOrYaml format = "jsonOrYaml" - Proto format = "proto" -) - -// precomputedIsNamespaceScoped precomputes IsNamespaceScoped for known types. This avoids Schema creation, -// which is expensive -// The test output from TestIsNamespaceScopedPrecompute shows the expected map in go syntax,and can be copy and pasted -// from the failure if it changes. -var precomputedIsNamespaceScoped = map[yaml.TypeMeta]bool{ - {APIVersion: "admissionregistration.k8s.io/v1", Kind: "MutatingWebhookConfiguration"}: false, - {APIVersion: "admissionregistration.k8s.io/v1", Kind: "ValidatingWebhookConfiguration"}: false, - {APIVersion: "admissionregistration.k8s.io/v1beta1", Kind: "MutatingWebhookConfiguration"}: false, - {APIVersion: "admissionregistration.k8s.io/v1beta1", Kind: "ValidatingWebhookConfiguration"}: false, - {APIVersion: "apiextensions.k8s.io/v1", Kind: "CustomResourceDefinition"}: false, - {APIVersion: "apiextensions.k8s.io/v1beta1", Kind: "CustomResourceDefinition"}: false, - {APIVersion: "apiregistration.k8s.io/v1", Kind: "APIService"}: false, - {APIVersion: "apiregistration.k8s.io/v1beta1", Kind: "APIService"}: false, - {APIVersion: "apps/v1", Kind: "ControllerRevision"}: true, - {APIVersion: "apps/v1", Kind: "DaemonSet"}: true, - {APIVersion: "apps/v1", Kind: "Deployment"}: true, - {APIVersion: "apps/v1", Kind: "ReplicaSet"}: true, - {APIVersion: "apps/v1", Kind: "StatefulSet"}: true, - {APIVersion: "autoscaling/v1", Kind: "HorizontalPodAutoscaler"}: true, - {APIVersion: "autoscaling/v1", Kind: "Scale"}: true, - {APIVersion: "autoscaling/v2beta1", Kind: "HorizontalPodAutoscaler"}: true, - {APIVersion: "autoscaling/v2beta2", Kind: "HorizontalPodAutoscaler"}: true, - {APIVersion: "batch/v1", Kind: "CronJob"}: true, - {APIVersion: "batch/v1", Kind: "Job"}: true, - {APIVersion: "batch/v1beta1", Kind: "CronJob"}: true, - {APIVersion: "certificates.k8s.io/v1", Kind: "CertificateSigningRequest"}: false, - {APIVersion: "certificates.k8s.io/v1beta1", Kind: "CertificateSigningRequest"}: false, - {APIVersion: "coordination.k8s.io/v1", Kind: "Lease"}: true, - {APIVersion: "coordination.k8s.io/v1beta1", Kind: "Lease"}: true, - {APIVersion: "discovery.k8s.io/v1", Kind: "EndpointSlice"}: true, - {APIVersion: "discovery.k8s.io/v1beta1", Kind: "EndpointSlice"}: true, - {APIVersion: "events.k8s.io/v1", Kind: "Event"}: true, - {APIVersion: "events.k8s.io/v1beta1", Kind: "Event"}: true, - {APIVersion: "extensions/v1beta1", Kind: "Ingress"}: true, - {APIVersion: "flowcontrol.apiserver.k8s.io/v1beta1", Kind: "FlowSchema"}: false, - {APIVersion: "flowcontrol.apiserver.k8s.io/v1beta1", Kind: "PriorityLevelConfiguration"}: false, - {APIVersion: "networking.k8s.io/v1", Kind: "Ingress"}: true, - {APIVersion: "networking.k8s.io/v1", Kind: "IngressClass"}: false, - {APIVersion: "networking.k8s.io/v1", Kind: "NetworkPolicy"}: true, - {APIVersion: "networking.k8s.io/v1beta1", Kind: "Ingress"}: true, - {APIVersion: "networking.k8s.io/v1beta1", Kind: "IngressClass"}: false, - {APIVersion: "node.k8s.io/v1", Kind: "RuntimeClass"}: false, - {APIVersion: "node.k8s.io/v1beta1", Kind: "RuntimeClass"}: false, - {APIVersion: "policy/v1", Kind: "PodDisruptionBudget"}: true, - {APIVersion: "policy/v1beta1", Kind: "PodDisruptionBudget"}: true, - {APIVersion: "policy/v1beta1", Kind: "PodSecurityPolicy"}: false, - {APIVersion: "rbac.authorization.k8s.io/v1", Kind: "ClusterRole"}: false, - {APIVersion: "rbac.authorization.k8s.io/v1", Kind: "ClusterRoleBinding"}: false, - {APIVersion: "rbac.authorization.k8s.io/v1", Kind: "Role"}: true, - {APIVersion: "rbac.authorization.k8s.io/v1", Kind: "RoleBinding"}: true, - {APIVersion: "rbac.authorization.k8s.io/v1beta1", Kind: "ClusterRole"}: false, - {APIVersion: "rbac.authorization.k8s.io/v1beta1", Kind: "ClusterRoleBinding"}: false, - {APIVersion: "rbac.authorization.k8s.io/v1beta1", Kind: "Role"}: true, - {APIVersion: "rbac.authorization.k8s.io/v1beta1", Kind: "RoleBinding"}: true, - {APIVersion: "scheduling.k8s.io/v1", Kind: "PriorityClass"}: false, - {APIVersion: "scheduling.k8s.io/v1beta1", Kind: "PriorityClass"}: false, - {APIVersion: "storage.k8s.io/v1", Kind: "CSIDriver"}: false, - {APIVersion: "storage.k8s.io/v1", Kind: "CSINode"}: false, - {APIVersion: "storage.k8s.io/v1", Kind: "StorageClass"}: false, - {APIVersion: "storage.k8s.io/v1", Kind: "VolumeAttachment"}: false, - {APIVersion: "storage.k8s.io/v1beta1", Kind: "CSIDriver"}: false, - {APIVersion: "storage.k8s.io/v1beta1", Kind: "CSINode"}: false, - {APIVersion: "storage.k8s.io/v1beta1", Kind: "CSIStorageCapacity"}: true, - {APIVersion: "storage.k8s.io/v1beta1", Kind: "StorageClass"}: false, - {APIVersion: "storage.k8s.io/v1beta1", Kind: "VolumeAttachment"}: false, - {APIVersion: "v1", Kind: "ComponentStatus"}: false, - {APIVersion: "v1", Kind: "ConfigMap"}: true, - {APIVersion: "v1", Kind: "Endpoints"}: true, - {APIVersion: "v1", Kind: "Event"}: true, - {APIVersion: "v1", Kind: "LimitRange"}: true, - {APIVersion: "v1", Kind: "Namespace"}: false, - {APIVersion: "v1", Kind: "Node"}: false, - {APIVersion: "v1", Kind: "NodeProxyOptions"}: false, - {APIVersion: "v1", Kind: "PersistentVolume"}: false, - {APIVersion: "v1", Kind: "PersistentVolumeClaim"}: true, - {APIVersion: "v1", Kind: "Pod"}: true, - {APIVersion: "v1", Kind: "PodAttachOptions"}: true, - {APIVersion: "v1", Kind: "PodExecOptions"}: true, - {APIVersion: "v1", Kind: "PodPortForwardOptions"}: true, - {APIVersion: "v1", Kind: "PodProxyOptions"}: true, - {APIVersion: "v1", Kind: "PodTemplate"}: true, - {APIVersion: "v1", Kind: "ReplicationController"}: true, - {APIVersion: "v1", Kind: "ResourceQuota"}: true, - {APIVersion: "v1", Kind: "Secret"}: true, - {APIVersion: "v1", Kind: "Service"}: true, - {APIVersion: "v1", Kind: "ServiceAccount"}: true, - {APIVersion: "v1", Kind: "ServiceProxyOptions"}: true, -} - -// ResourceSchema wraps the OpenAPI Schema. -type ResourceSchema struct { - // Schema is the OpenAPI schema for a Resource or field - Schema *spec.Schema -} - -// IsEmpty returns true if the ResourceSchema is empty -func (rs *ResourceSchema) IsMissingOrNull() bool { - if rs == nil || rs.Schema == nil { - return true - } - return reflect.DeepEqual(*rs.Schema, spec.Schema{}) -} - -// SchemaForResourceType returns the Schema for the given Resource -// TODO(pwittrock): create a version of this function that will return a schema -// which can be used for duck-typed Resources -- e.g. contains common fields such -// as metadata, replicas and spec.template.spec -func SchemaForResourceType(t yaml.TypeMeta) *ResourceSchema { - initSchema() - rs, found := globalSchema.schemaByResourceType[t] - if !found { - return nil - } - return &ResourceSchema{Schema: rs} -} - -// SupplementaryOpenAPIFieldName is the conventional field name (JSON/YAML) containing -// supplementary OpenAPI definitions. -const SupplementaryOpenAPIFieldName = "openAPI" - -const Definitions = "definitions" - -// AddSchemaFromFile reads the file at path and parses the OpenAPI definitions -// from the field "openAPI", also returns a function to clean the added definitions -// The returned clean function is a no-op on error, or else it's a function -// that the caller should use to remove the added openAPI definitions from -// global schema -func SchemaFromFile(path string) (*spec.Schema, error) { - object, err := parseOpenAPI(path) - if err != nil { - return nil, err - } - - return schemaUsingField(object, SupplementaryOpenAPIFieldName) -} - -// DefinitionRefs returns the list of openAPI definition references present in the -// input openAPIPath -func DefinitionRefs(openAPIPath string) ([]string, error) { - object, err := parseOpenAPI(openAPIPath) - if err != nil { - return nil, err - } - return definitionRefsFromRNode(object) -} - -// definitionRefsFromRNode returns the list of openAPI definitions keys from input -// yaml RNode -func definitionRefsFromRNode(object *yaml.RNode) ([]string, error) { - definitions, err := object.Pipe(yaml.Lookup(SupplementaryOpenAPIFieldName, Definitions)) - if definitions == nil { - return nil, err - } - if err != nil { - return nil, err - } - return definitions.Fields() -} - -// parseOpenAPI reads openAPIPath yaml and converts it to RNode -func parseOpenAPI(openAPIPath string) (*yaml.RNode, error) { - b, err := ioutil.ReadFile(openAPIPath) - if err != nil { - return nil, err - } - - object, err := yaml.Parse(string(b)) - if err != nil { - return nil, errors.Errorf("invalid file %q: %v", openAPIPath, err) - } - return object, nil -} - -// addSchemaUsingField parses the OpenAPI definitions from the specified field. -// If field is the empty string, use the whole document as OpenAPI. -func schemaUsingField(object *yaml.RNode, field string) (*spec.Schema, error) { - if field != "" { - // get the field containing the openAPI - m := object.Field(field) - if m.IsNilOrEmpty() { - // doesn't contain openAPI definitions - return nil, nil - } - object = m.Value - } - - oAPI, err := object.String() - if err != nil { - return nil, err - } - - // convert the yaml openAPI to a JSON string by unmarshalling it to an - // interface{} and the marshalling it to a string - var o interface{} - err = yaml.Unmarshal([]byte(oAPI), &o) - if err != nil { - return nil, err - } - j, err := json.Marshal(o) - if err != nil { - return nil, err - } - - var sc spec.Schema - err = sc.UnmarshalJSON(j) - if err != nil { - return nil, err - } - - return &sc, nil -} - -// AddSchema parses s, and adds definitions from s to the global schema. -func AddSchema(s []byte) error { - return parse(s, JsonOrYaml) -} - -// ResetOpenAPI resets the openapi data to empty -func ResetOpenAPI() { - globalSchema = openapiData{} - kubernetesOpenAPIVersion = "" - customSchema = nil -} - -// AddDefinitions adds the definitions to the global schema. -func AddDefinitions(definitions spec.Definitions) { - // initialize values if they have not yet been set - if globalSchema.schemaByResourceType == nil { - globalSchema.schemaByResourceType = map[yaml.TypeMeta]*spec.Schema{} - } - if globalSchema.schema.Definitions == nil { - globalSchema.schema.Definitions = spec.Definitions{} - } - - // index the schema definitions so we can lookup them up for Resources - for k := range definitions { - // index by GVK, if no GVK is found then it is the schema for a subfield - // of a Resource - d := definitions[k] - - // copy definitions to the schema - globalSchema.schema.Definitions[k] = d - gvk, found := d.VendorExtensible.Extensions[kubernetesGVKExtensionKey] - if !found { - continue - } - // cast the extension to a []map[string]string - exts, ok := gvk.([]interface{}) - if !ok { - continue - } - - for i := range exts { - typeMeta, ok := toTypeMeta(exts[i]) - if !ok { - continue - } - globalSchema.schemaByResourceType[typeMeta] = &d - } - } -} - -func toTypeMeta(ext interface{}) (yaml.TypeMeta, bool) { - m, ok := ext.(map[string]interface{}) - if !ok { - return yaml.TypeMeta{}, false - } - - apiVersion := m[versionKey].(string) - if g, ok := m[groupKey].(string); ok && g != "" { - apiVersion = g + "/" + apiVersion - } - return yaml.TypeMeta{Kind: m[kindKey].(string), APIVersion: apiVersion}, true -} - -// Resolve resolves the reference against the global schema -func Resolve(ref *spec.Ref, schema *spec.Schema) (*spec.Schema, error) { - return resolve(schema, ref) -} - -// Schema returns the global schema -func Schema() *spec.Schema { - return rootSchema() -} - -// GetSchema parses s into a ResourceSchema, resolving References within the -// global schema. -func GetSchema(s string, schema *spec.Schema) (*ResourceSchema, error) { - var sc spec.Schema - if err := sc.UnmarshalJSON([]byte(s)); err != nil { - return nil, errors.Wrap(err) - } - if sc.Ref.String() != "" { - r, err := Resolve(&sc.Ref, schema) - if err != nil { - return nil, errors.Wrap(err) - } - sc = *r - } - - return &ResourceSchema{Schema: &sc}, nil -} - -// IsNamespaceScoped determines whether a resource is namespace or -// cluster-scoped by looking at the information in the openapi schema. -// The second return value tells whether the provided type could be found -// in the openapi schema. If the value is false here, the scope of the -// resource is not known. If the type is found, the first return value will -// be true if the resource is namespace-scoped, and false if the type is -// cluster-scoped. -func IsNamespaceScoped(typeMeta yaml.TypeMeta) (bool, bool) { - if res, f := precomputedIsNamespaceScoped[typeMeta]; f { - return res, true - } - return isNamespaceScopedFromSchema(typeMeta) -} - -func isNamespaceScopedFromSchema(typeMeta yaml.TypeMeta) (bool, bool) { - initSchema() - isNamespaceScoped, found := globalSchema.namespaceabilityByResourceType[typeMeta] - return isNamespaceScoped, found -} - -// IsCertainlyClusterScoped returns true for Node, Namespace, etc. and -// false for Pod, Deployment, etc. and kinds that aren't recognized in the -// openapi data. See: -// https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces -func IsCertainlyClusterScoped(typeMeta yaml.TypeMeta) bool { - nsScoped, found := IsNamespaceScoped(typeMeta) - return found && !nsScoped -} - -// SuppressBuiltInSchemaUse can be called to prevent using the built-in Kubernetes -// schema as part of the global schema. -// Must be called before the schema is used. -func SuppressBuiltInSchemaUse() { - globalSchema.noUseBuiltInSchema = true -} - -// Elements returns the Schema for the elements of an array. -func (rs *ResourceSchema) Elements() *ResourceSchema { - // load the schema from swagger.json - initSchema() - - if len(rs.Schema.Type) != 1 || rs.Schema.Type[0] != "array" { - // either not an array, or array has multiple types - return nil - } - if rs == nil || rs.Schema == nil || rs.Schema.Items == nil { - // no-scheme for the items - return nil - } - s := *rs.Schema.Items.Schema - for s.Ref.String() != "" { - sc, e := Resolve(&s.Ref, Schema()) - if e != nil { - return nil - } - s = *sc - } - return &ResourceSchema{Schema: &s} -} - -const Elements = "[]" - -// Lookup calls either Field or Elements for each item in the path. -// If the path item is "[]", then Elements is called, otherwise -// Field is called. -// If any Field or Elements call returns nil, then Lookup returns -// nil immediately. -func (rs *ResourceSchema) Lookup(path ...string) *ResourceSchema { - s := rs - for _, p := range path { - if s == nil { - break - } - if p == Elements { - s = s.Elements() - continue - } - s = s.Field(p) - } - return s -} - -// Field returns the Schema for a field. -func (rs *ResourceSchema) Field(field string) *ResourceSchema { - // load the schema from swagger.json - initSchema() - - // locate the Schema - s, found := rs.Schema.Properties[field] - switch { - case found: - // no-op, continue with s as the schema - case rs.Schema.AdditionalProperties != nil && rs.Schema.AdditionalProperties.Schema != nil: - // map field type -- use Schema of the value - // (the key doesn't matter, they all have the same value type) - s = *rs.Schema.AdditionalProperties.Schema - default: - // no Schema found from either swagger.json or line comments - return nil - } - - // resolve the reference to the Schema if the Schema has one - for s.Ref.String() != "" { - sc, e := Resolve(&s.Ref, Schema()) - if e != nil { - return nil - } - s = *sc - } - - // return the merged Schema - return &ResourceSchema{Schema: &s} -} - -// PatchStrategyAndKeyList returns the patch strategy and complete merge key list -func (rs *ResourceSchema) PatchStrategyAndKeyList() (string, []string) { - ps, found := rs.Schema.Extensions[kubernetesPatchStrategyExtensionKey] - if !found { - // empty patch strategy - return "", []string{} - } - mkList, found := rs.Schema.Extensions[kubernetesMergeKeyMapList] - if found { - // mkList is []interface, convert to []string - mkListStr := make([]string, len(mkList.([]interface{}))) - for i, v := range mkList.([]interface{}) { - mkListStr[i] = v.(string) - } - return ps.(string), mkListStr - } - mk, found := rs.Schema.Extensions[kubernetesMergeKeyExtensionKey] - if !found { - // no mergeKey -- may be a primitive associative list (e.g. finalizers) - return ps.(string), []string{} - } - return ps.(string), []string{mk.(string)} -} - -// PatchStrategyAndKey returns the patch strategy and merge key extensions -func (rs *ResourceSchema) PatchStrategyAndKey() (string, string) { - ps, found := rs.Schema.Extensions[kubernetesPatchStrategyExtensionKey] - if !found { - // empty patch strategy - return "", "" - } - - mk, found := rs.Schema.Extensions[kubernetesMergeKeyExtensionKey] - if !found { - // no mergeKey -- may be a primitive associative list (e.g. finalizers) - mk = "" - } - return ps.(string), mk.(string) -} - -const ( - // kubernetesOpenAPIDefaultVersion is the latest version number of the statically compiled in - // OpenAPI schema for kubernetes built-in types - kubernetesOpenAPIDefaultVersion = kubernetesapi.DefaultOpenAPI - - // kustomizationAPIAssetName is the name of the asset containing the statically compiled in - // OpenAPI definitions for Kustomization built-in types - kustomizationAPIAssetName = "kustomizationapi/swagger.json" - - // kubernetesGVKExtensionKey is the key to lookup the kubernetes group version kind extension - // -- the extension is an array of objects containing a gvk - kubernetesGVKExtensionKey = "x-kubernetes-group-version-kind" - - // kubernetesMergeKeyExtensionKey is the key to lookup the kubernetes merge key extension - // -- the extension is a string - kubernetesMergeKeyExtensionKey = "x-kubernetes-patch-merge-key" - - // kubernetesPatchStrategyExtensionKey is the key to lookup the kubernetes patch strategy - // extension -- the extension is a string - kubernetesPatchStrategyExtensionKey = "x-kubernetes-patch-strategy" - - // kubernetesMergeKeyMapList is the list of merge keys when there needs to be multiple - // -- the extension is an array of strings - kubernetesMergeKeyMapList = "x-kubernetes-list-map-keys" - - // groupKey is the key to lookup the group from the GVK extension - groupKey = "group" - // versionKey is the key to lookup the version from the GVK extension - versionKey = "version" - // kindKey is the the to lookup the kind from the GVK extension - kindKey = "kind" -) - -// SetSchema sets the kubernetes OpenAPI schema version to use -func SetSchema(openAPIField map[string]string, schema []byte, reset bool) error { - // this should only be set once - schemaIsSet := (kubernetesOpenAPIVersion != "") || customSchema != nil - if schemaIsSet && !reset { - return nil - } - - version, exists := openAPIField["version"] - if exists && schema != nil { - return fmt.Errorf("builtin version and custom schema provided, cannot use both") - } - - if schema != nil { // use custom schema - customSchema = schema - kubernetesOpenAPIVersion = "custom" - // if the schema is changed, initSchema should parse the new schema - globalSchema.schemaInit = false - return nil - } - - // use builtin version - kubernetesOpenAPIVersion = strings.ReplaceAll(version, ".", "") - if kubernetesOpenAPIVersion == "" { - return nil - } - if _, ok := kubernetesapi.OpenAPIMustAsset[kubernetesOpenAPIVersion]; !ok { - return fmt.Errorf("the specified OpenAPI version is not built in") - } - customSchema = nil - // if the schema is changed, initSchema should parse the new schema - globalSchema.schemaInit = false - return nil -} - -// GetSchemaVersion returns what kubernetes OpenAPI version is being used -func GetSchemaVersion() string { - switch { - case kubernetesOpenAPIVersion == "" && customSchema == nil: - return kubernetesOpenAPIDefaultVersion - case customSchema != nil: - return "using custom schema from file provided" - default: - return kubernetesOpenAPIVersion - } -} - -// initSchema parses the json schema -func initSchema() { - if globalSchema.schemaInit { - return - } - globalSchema.schemaInit = true - - // TODO(natasha41575): Accept proto-formatted schema files - if customSchema != nil { - err := parse(customSchema, JsonOrYaml) - if err != nil { - panic("invalid schema file") - } - } else { - if kubernetesOpenAPIVersion == "" { - parseBuiltinSchema(kubernetesOpenAPIDefaultVersion) - } else { - parseBuiltinSchema(kubernetesOpenAPIVersion) - } - } - - if err := parse(kustomizationapi.MustAsset(kustomizationAPIAssetName), JsonOrYaml); err != nil { - // this should never happen - panic(err) - } -} - -// parseBuiltinSchema calls parse to parse the json or proto schemas -func parseBuiltinSchema(version string) { - if globalSchema.noUseBuiltInSchema { - // don't parse the built in schema - return - } - - // parse the swagger, this should never fail - assetName := filepath.Join( - "kubernetesapi", - version, - "swagger.pb") - - if err := parse(kubernetesapi.OpenAPIMustAsset[version](assetName), Proto); err != nil { - // this should never happen - panic(err) - } -} - -// parse parses and indexes a single json or proto schema -func parse(b []byte, format format) error { - var swagger spec.Swagger - switch { - case format == Proto: - doc := &openapi_v2.Document{} - // We parse protobuf and get an openapi_v2.Document here. - if err := proto.Unmarshal(b, doc); err != nil { - return fmt.Errorf("openapi proto unmarshalling failed: %w", err) - } - // convert the openapi_v2.Document back to Swagger - _, err := swagger.FromGnostic(doc) - if err != nil { - return errors.Wrap(err) - } - - case format == JsonOrYaml: - if len(b) > 0 && b[0] != byte('{') { - var err error - b, err = k8syaml.YAMLToJSON(b) - if err != nil { - return errors.Wrap(err) - } - } - if err := swagger.UnmarshalJSON(b); err != nil { - return errors.Wrap(err) - } - } - - AddDefinitions(swagger.Definitions) - findNamespaceability(swagger.Paths) - return nil -} - -// findNamespaceability looks at the api paths for the resource to determine -// if it is cluster-scoped or namespace-scoped. The gvk of the resource -// for each path is found by looking at the x-kubernetes-group-version-kind -// extension. If a path exists for the resource that contains a namespace path -// parameter, the resource is namespace-scoped. -func findNamespaceability(paths *spec.Paths) { - if globalSchema.namespaceabilityByResourceType == nil { - globalSchema.namespaceabilityByResourceType = make(map[yaml.TypeMeta]bool) - } - - if paths == nil { - return - } - - for path, pathInfo := range paths.Paths { - if pathInfo.Get == nil { - continue - } - gvk, found := pathInfo.Get.VendorExtensible.Extensions[kubernetesGVKExtensionKey] - if !found { - continue - } - typeMeta, found := toTypeMeta(gvk) - if !found { - continue - } - - if strings.Contains(path, "namespaces/{namespace}") { - // if we find a namespace path parameter, we just update the map - // directly - globalSchema.namespaceabilityByResourceType[typeMeta] = true - } else if _, found := globalSchema.namespaceabilityByResourceType[typeMeta]; !found { - // if the resource doesn't have the namespace path parameter, we - // only add it to the map if it doesn't already exist. - globalSchema.namespaceabilityByResourceType[typeMeta] = false - } - } -} - -func resolve(root interface{}, ref *spec.Ref) (*spec.Schema, error) { - if s, ok := root.(*spec.Schema); ok && s == nil { - return nil, nil - } - res, _, err := ref.GetPointer().Get(root) - if err != nil { - return nil, errors.Wrap(err) - } - switch sch := res.(type) { - case spec.Schema: - return &sch, nil - case *spec.Schema: - return sch, nil - case map[string]interface{}: - b, err := json.Marshal(sch) - if err != nil { - return nil, err - } - newSch := new(spec.Schema) - if err = json.Unmarshal(b, newSch); err != nil { - return nil, err - } - return newSch, nil - default: - return nil, errors.Wrap(fmt.Errorf("unknown type for the resolved reference")) - } -} - -func rootSchema() *spec.Schema { - initSchema() - return &globalSchema.schema -} diff --git a/src/vendor/sigs.k8s.io/kustomize/kyaml/order/syncorder.go b/src/vendor/sigs.k8s.io/kustomize/kyaml/order/syncorder.go deleted file mode 100644 index 57bc86503..000000000 --- a/src/vendor/sigs.k8s.io/kustomize/kyaml/order/syncorder.go +++ /dev/null @@ -1,121 +0,0 @@ -// Copyright 2021 The Kubernetes Authors. -// SPDX-License-Identifier: Apache-2.0 - -package order - -import ( - "sigs.k8s.io/kustomize/kyaml/errors" - "sigs.k8s.io/kustomize/kyaml/yaml" -) - -// SyncOrder recursively sorts the map node keys in 'to' node to match the order of -// map node keys in 'from' node at same tree depth, additional keys are moved to the end -// Field order might be altered due to round-tripping in arbitrary functions. -// This functionality helps to retain the original order of fields to avoid unnecessary diffs. -func SyncOrder(from, to *yaml.RNode) error { - // from node should not be modified, it should be just used as a reference - fromCopy := from.Copy() - if err := syncOrder(fromCopy, to); err != nil { - return errors.Errorf("failed to sync field order: %q", err.Error()) - } - rearrangeHeadCommentOfSeqNode(to.YNode()) - return nil -} - -func syncOrder(from, to *yaml.RNode) error { - if from.IsNilOrEmpty() || to.IsNilOrEmpty() { - return nil - } - switch from.YNode().Kind { - case yaml.DocumentNode: - // Traverse the child of the documents - return syncOrder(yaml.NewRNode(from.YNode()), yaml.NewRNode(to.YNode())) - case yaml.MappingNode: - return VisitFields(from, to, func(fNode, tNode *yaml.MapNode) error { - // Traverse each field value - if fNode == nil || tNode == nil { - return nil - } - return syncOrder(fNode.Value, tNode.Value) - }) - case yaml.SequenceNode: - return VisitElements(from, to, syncOrder) // Traverse each list element - } - return nil -} - -// VisitElements calls fn for each element in a SequenceNode. -// Returns an error for non-SequenceNodes -func VisitElements(from, to *yaml.RNode, fn func(fNode, tNode *yaml.RNode) error) error { - fElements, err := from.Elements() - if err != nil { - return errors.Wrap(err) - } - - tElements, err := to.Elements() - if err != nil { - return errors.Wrap(err) - } - for i := range fElements { - if i >= len(tElements) { - return nil - } - if err := fn(fElements[i], tElements[i]); err != nil { - return errors.Wrap(err) - } - } - return nil -} - -// VisitFields calls fn for each field in the RNode. -// Returns an error for non-MappingNodes. -func VisitFields(from, to *yaml.RNode, fn func(fNode, tNode *yaml.MapNode) error) error { - srcFieldNames, err := from.Fields() - if err != nil { - return nil - } - yaml.SyncMapNodesOrder(from, to) - // visit each field - for _, fieldName := range srcFieldNames { - if err := fn(from.Field(fieldName), to.Field(fieldName)); err != nil { - return errors.Wrap(err) - } - } - return nil -} - -// rearrangeHeadCommentOfSeqNode addresses a remote corner case due to moving a -// map node in a sequence node with a head comment to the top -func rearrangeHeadCommentOfSeqNode(node *yaml.Node) { - if node == nil { - return - } - switch node.Kind { - case yaml.DocumentNode: - for _, node := range node.Content { - rearrangeHeadCommentOfSeqNode(node) - } - - case yaml.MappingNode: - for _, node := range node.Content { - rearrangeHeadCommentOfSeqNode(node) - } - - case yaml.SequenceNode: - for _, node := range node.Content { - // for each child mapping node, transfer the head comment of it's - // first child scalar node to the head comment of itself - if len(node.Content) > 0 && node.Content[0].Kind == yaml.ScalarNode { - if node.HeadComment == "" { - node.HeadComment = node.Content[0].HeadComment - continue - } - - if node.Content[0].HeadComment != "" { - node.HeadComment += "\n" + node.Content[0].HeadComment - node.Content[0].HeadComment = "" - } - } - } - } -} diff --git a/src/vendor/sigs.k8s.io/kustomize/kyaml/resid/gvk.go b/src/vendor/sigs.k8s.io/kustomize/kyaml/resid/gvk.go deleted file mode 100644 index 2c8cbb1f5..000000000 --- a/src/vendor/sigs.k8s.io/kustomize/kyaml/resid/gvk.go +++ /dev/null @@ -1,258 +0,0 @@ -// Copyright 2019 The Kubernetes Authors. -// SPDX-License-Identifier: Apache-2.0 - -package resid - -import ( - "strings" - - "sigs.k8s.io/kustomize/kyaml/openapi" - "sigs.k8s.io/kustomize/kyaml/yaml" -) - -// Gvk identifies a Kubernetes API type. -// https://github.com/kubernetes/community/blob/master/contributors/design-proposals/api-machinery/api-group.md -type Gvk struct { - Group string `json:"group,omitempty" yaml:"group,omitempty"` - Version string `json:"version,omitempty" yaml:"version,omitempty"` - Kind string `json:"kind,omitempty" yaml:"kind,omitempty"` - // isClusterScoped is true if the object is known, per the openapi - // data in use, to be cluster scoped, and false otherwise. - isClusterScoped bool -} - -func NewGvk(g, v, k string) Gvk { - result := Gvk{Group: g, Version: v, Kind: k} - result.isClusterScoped = - openapi.IsCertainlyClusterScoped(result.AsTypeMeta()) - return result -} - -func GvkFromNode(r *yaml.RNode) Gvk { - g, v := ParseGroupVersion(r.GetApiVersion()) - return NewGvk(g, v, r.GetKind()) -} - -// FromKind makes a Gvk with only the kind specified. -func FromKind(k string) Gvk { - return NewGvk("", "", k) -} - -// ParseGroupVersion parses a KRM metadata apiVersion field. -func ParseGroupVersion(apiVersion string) (group, version string) { - if i := strings.Index(apiVersion, "/"); i > -1 { - return apiVersion[:i], apiVersion[i+1:] - } - return "", apiVersion -} - -// GvkFromString makes a Gvk from the output of Gvk.String(). -func GvkFromString(s string) Gvk { - values := strings.Split(s, fieldSep) - if len(values) < 3 { - // ...then the string didn't come from Gvk.String(). - return Gvk{ - Group: noGroup, - Version: noVersion, - Kind: noKind, - } - } - k := values[0] - if k == noKind { - k = "" - } - v := values[1] - if v == noVersion { - v = "" - } - g := strings.Join(values[2:], fieldSep) - if g == noGroup { - g = "" - } - return NewGvk(g, v, k) -} - -// Values that are brief but meaningful in logs. -const ( - noGroup = "[noGrp]" - noVersion = "[noVer]" - noKind = "[noKind]" - fieldSep = "." -) - -// String returns a string representation of the GVK. -func (x Gvk) String() string { - g := x.Group - if g == "" { - g = noGroup - } - v := x.Version - if v == "" { - v = noVersion - } - k := x.Kind - if k == "" { - k = noKind - } - return strings.Join([]string{k, v, g}, fieldSep) -} - -// legacySortString returns an older version of String() that LegacyOrderTransformer depends on -// to keep its ordering stable across Kustomize versions -func (x Gvk) legacySortString() string { - legacyNoGroup := "~G" - legacyNoVersion := "~V" - legacyNoKind := "~K" - legacyFieldSeparator := "_" - - g := x.Group - if g == "" { - g = legacyNoGroup - } - v := x.Version - if v == "" { - v = legacyNoVersion - } - k := x.Kind - if k == "" { - k = legacyNoKind - } - return strings.Join([]string{g, v, k}, legacyFieldSeparator) -} - -// ApiVersion returns the combination of Group and Version -func (x Gvk) ApiVersion() string { - var sb strings.Builder - if x.Group != "" { - sb.WriteString(x.Group) - sb.WriteString("/") - } - sb.WriteString(x.Version) - return sb.String() -} - -// StringWoEmptyField returns a string representation of the GVK. Non-exist -// fields will be omitted. This is called when generating a filename for the -// resource. -func (x Gvk) StringWoEmptyField() string { - var s []string - if x.Group != "" { - s = append(s, x.Group) - } - if x.Version != "" { - s = append(s, x.Version) - } - if x.Kind != "" { - s = append(s, x.Kind) - } - return strings.Join(s, "_") -} - -// Equals returns true if the Gvk's have equal fields. -func (x Gvk) Equals(o Gvk) bool { - return x.Group == o.Group && x.Version == o.Version && x.Kind == o.Kind -} - -// An attempt to order things to help k8s, e.g. -// a Service should come before things that refer to it. -// Namespace should be first. -// In some cases order just specified to provide determinism. -var orderFirst = []string{ - "Namespace", - "ResourceQuota", - "StorageClass", - "CustomResourceDefinition", - "ServiceAccount", - "PodSecurityPolicy", - "Role", - "ClusterRole", - "RoleBinding", - "ClusterRoleBinding", - "ConfigMap", - "Secret", - "Endpoints", - "Service", - "LimitRange", - "PriorityClass", - "PersistentVolume", - "PersistentVolumeClaim", - "Deployment", - "StatefulSet", - "CronJob", - "PodDisruptionBudget", -} -var orderLast = []string{ - "MutatingWebhookConfiguration", - "ValidatingWebhookConfiguration", -} -var typeOrders = func() map[string]int { - m := map[string]int{} - for i, n := range orderFirst { - m[n] = -len(orderFirst) + i - } - for i, n := range orderLast { - m[n] = 1 + i - } - return m -}() - -// IsLessThan returns true if self is less than the argument. -func (x Gvk) IsLessThan(o Gvk) bool { - indexI := typeOrders[x.Kind] - indexJ := typeOrders[o.Kind] - if indexI != indexJ { - return indexI < indexJ - } - return x.legacySortString() < o.legacySortString() -} - -// IsSelected returns true if `selector` selects `x`; otherwise, false. -// If `selector` and `x` are the same, return true. -// If `selector` is nil, it is considered a wildcard match, returning true. -// If selector fields are empty, they are considered wildcards matching -// anything in the corresponding fields, e.g. -// -// this item: -// -// -// is selected by -// -// -// but rejected by -// -// -func (x Gvk) IsSelected(selector *Gvk) bool { - if selector == nil { - return true - } - if len(selector.Group) > 0 { - if x.Group != selector.Group { - return false - } - } - if len(selector.Version) > 0 { - if x.Version != selector.Version { - return false - } - } - if len(selector.Kind) > 0 { - if x.Kind != selector.Kind { - return false - } - } - return true -} - -// AsTypeMeta returns a yaml.TypeMeta from x's information. -func (x Gvk) AsTypeMeta() yaml.TypeMeta { - return yaml.TypeMeta{ - APIVersion: x.ApiVersion(), - Kind: x.Kind, - } -} - -// IsClusterScoped returns true if the Gvk is certainly cluster scoped -// with respect to the available openapi data. -func (x Gvk) IsClusterScoped() bool { - return x.isClusterScoped -} diff --git a/src/vendor/sigs.k8s.io/kustomize/kyaml/resid/resid.go b/src/vendor/sigs.k8s.io/kustomize/kyaml/resid/resid.go deleted file mode 100644 index f786f1366..000000000 --- a/src/vendor/sigs.k8s.io/kustomize/kyaml/resid/resid.go +++ /dev/null @@ -1,164 +0,0 @@ -// Copyright 2019 The Kubernetes Authors. -// SPDX-License-Identifier: Apache-2.0 - -package resid - -import ( - "reflect" - "strings" - - "sigs.k8s.io/kustomize/kyaml/yaml" -) - -// ResId is an identifier of a k8s resource object. -type ResId struct { - // Gvk of the resource. - Gvk `json:",inline,omitempty" yaml:",inline,omitempty"` - - // Name of the resource. - Name string `json:"name,omitempty" yaml:"name,omitempty"` - - // Namespace the resource belongs to, if it can belong to a namespace. - Namespace string `json:"namespace,omitempty" yaml:"namespace,omitempty"` -} - -// NewResIdWithNamespace creates new ResId -// in a given namespace. -func NewResIdWithNamespace(k Gvk, n, ns string) ResId { - return ResId{Gvk: k, Name: n, Namespace: ns} -} - -// NewResId creates new ResId. -func NewResId(k Gvk, n string) ResId { - return NewResIdWithNamespace(k, n, "") -} - -// NewResIdKindOnly creates a new ResId. -func NewResIdKindOnly(k string, n string) ResId { - return NewResId(FromKind(k), n) -} - -const ( - noNamespace = "[noNs]" - noName = "[noName]" - separator = "/" - TotallyNotANamespace = "_non_namespaceable_" - DefaultNamespace = "default" -) - -// String of ResId based on GVK, name and prefix -func (id ResId) String() string { - ns := id.Namespace - if ns == "" { - ns = noNamespace - } - nm := id.Name - if nm == "" { - nm = noName - } - return strings.Join( - []string{id.Gvk.String(), strings.Join([]string{nm, ns}, fieldSep)}, separator) -} - -// LegacySortString returns an older version of String() that LegacyOrderTransformer depends on -// to keep its ordering stable across Kustomize versions -func (id ResId) LegacySortString() string { - legacyNoNamespace := "~X" - legacyNoName := "~N" - legacySeparator := "|" - - ns := id.Namespace - if ns == "" { - ns = legacyNoNamespace - } - nm := id.Name - if nm == "" { - nm = legacyNoName - } - return strings.Join( - []string{id.Gvk.String(), ns, nm}, legacySeparator) -} - -func FromString(s string) ResId { - values := strings.Split(s, separator) - gvk := GvkFromString(values[0]) - - values = strings.Split(values[1], fieldSep) - last := len(values) - 1 - - ns := values[last] - if ns == noNamespace { - ns = "" - } - nm := strings.Join(values[:last], fieldSep) - if nm == noName { - nm = "" - } - return ResId{ - Gvk: gvk, - Namespace: ns, - Name: nm, - } -} - -// FromRNode returns the ResId for the RNode -func FromRNode(rn *yaml.RNode) ResId { - group, version := ParseGroupVersion(rn.GetApiVersion()) - return NewResIdWithNamespace( - Gvk{Group: group, Version: version, Kind: rn.GetKind()}, rn.GetName(), rn.GetNamespace()) -} - -// GvknEquals returns true if the other id matches -// Group/Version/Kind/name. -func (id ResId) GvknEquals(o ResId) bool { - return id.Name == o.Name && id.Gvk.Equals(o.Gvk) -} - -// IsSelectedBy returns true if self is selected by the argument. -func (id ResId) IsSelectedBy(selector ResId) bool { - return (selector.Name == "" || selector.Name == id.Name) && - (selector.Namespace == "" || selector.IsNsEquals(id)) && - id.Gvk.IsSelected(&selector.Gvk) -} - -// Equals returns true if the other id matches -// namespace/Group/Version/Kind/name. -func (id ResId) Equals(o ResId) bool { - return id.IsNsEquals(o) && id.GvknEquals(o) -} - -// IsNsEquals returns true if the id is in -// the same effective namespace. -func (id ResId) IsNsEquals(o ResId) bool { - return id.EffectiveNamespace() == o.EffectiveNamespace() -} - -// IsInDefaultNs returns true if id is a namespaceable -// ResId and the Namespace is either not set or set -// to DefaultNamespace. -func (id ResId) IsInDefaultNs() bool { - return !id.IsClusterScoped() && id.isPutativelyDefaultNs() -} - -func (id ResId) isPutativelyDefaultNs() bool { - return id.Namespace == "" || id.Namespace == DefaultNamespace -} - -// EffectiveNamespace returns a non-ambiguous, non-empty -// namespace for use in reporting and equality tests. -func (id ResId) EffectiveNamespace() string { - // The order of these checks matters. - if id.IsClusterScoped() { - return TotallyNotANamespace - } - if id.isPutativelyDefaultNs() { - return DefaultNamespace - } - return id.Namespace -} - -// IsEmpty returns true of all of the id's fields are -// empty strings -func (id ResId) IsEmpty() bool { - return reflect.DeepEqual(id, ResId{}) -} diff --git a/src/vendor/sigs.k8s.io/kustomize/kyaml/runfn/runfn.go b/src/vendor/sigs.k8s.io/kustomize/kyaml/runfn/runfn.go deleted file mode 100644 index c855144d7..000000000 --- a/src/vendor/sigs.k8s.io/kustomize/kyaml/runfn/runfn.go +++ /dev/null @@ -1,546 +0,0 @@ -// Copyright 2019 The Kubernetes Authors. -// SPDX-License-Identifier: Apache-2.0 - -package runfn - -import ( - "fmt" - "io" - "os" - "os/user" - "path" - "path/filepath" - "sort" - "strconv" - "strings" - "sync/atomic" - - "sigs.k8s.io/kustomize/kyaml/errors" - "sigs.k8s.io/kustomize/kyaml/fn/runtime/container" - "sigs.k8s.io/kustomize/kyaml/fn/runtime/exec" - "sigs.k8s.io/kustomize/kyaml/fn/runtime/runtimeutil" - "sigs.k8s.io/kustomize/kyaml/fn/runtime/starlark" - "sigs.k8s.io/kustomize/kyaml/kio" - "sigs.k8s.io/kustomize/kyaml/kio/kioutil" - "sigs.k8s.io/kustomize/kyaml/yaml" -) - -// RunFns runs the set of configuration functions in a local directory against -// the Resources in that directory -type RunFns struct { - StorageMounts []runtimeutil.StorageMount - - // Path is the path to the directory containing functions - Path string - - // FunctionPaths Paths allows functions to be specified outside the configuration - // directory. - // Functions provided on FunctionPaths are globally scoped. - // If FunctionPaths length is > 0, then NoFunctionsFromInput defaults to true - FunctionPaths []string - - // Functions is an explicit list of functions to run against the input. - // Functions provided on Functions are globally scoped. - // If Functions length is > 0, then NoFunctionsFromInput defaults to true - Functions []*yaml.RNode - - // GlobalScope if true, functions read from input will be scoped globally rather - // than only to Resources under their subdirs. - GlobalScope bool - - // Input can be set to read the Resources from Input rather than from a directory - Input io.Reader - - // Network enables network access for functions that declare it - Network bool - - // Output can be set to write the result to Output rather than back to the directory - Output io.Writer - - // NoFunctionsFromInput if set to true will not read any functions from the input, - // and only use explicit sources - NoFunctionsFromInput *bool - - // EnableStarlark will enable functions run as starlark scripts - EnableStarlark bool - - // EnableExec will enable exec functions - EnableExec bool - - // DisableContainers will disable functions run as containers - DisableContainers bool - - // ResultsDir is where to write each functions results - ResultsDir string - - // LogSteps enables logging the function that is running. - LogSteps bool - - // LogWriter can be set to write the logs to LogWriter rather than stderr if LogSteps is enabled. - LogWriter io.Writer - - // resultsCount is used to generate the results filename for each container - resultsCount uint32 - - // functionFilterProvider provides a filter to perform the function. - // this is a variable so it can be mocked in tests - functionFilterProvider func( - filter runtimeutil.FunctionSpec, api *yaml.RNode, currentUser currentUserFunc) (kio.Filter, error) - - // AsCurrentUser is a boolean to indicate whether docker container should use - // the uid and gid that run the command - AsCurrentUser bool - - // Env contains environment variables that will be exported to container - Env []string - - // ContinueOnEmptyResult configures what happens when the underlying pipeline - // returns an empty result. - // If it is false (default), subsequent functions will be skipped and the - // result will be returned immediately. - // If it is true, the empty result will be provided as input to the next - // function in the list. - ContinueOnEmptyResult bool - - // WorkingDir specifies which working directory an exec function should run in. - WorkingDir string -} - -// Execute runs the command -func (r RunFns) Execute() error { - // make the path absolute so it works on mac - var err error - r.Path, err = filepath.Abs(r.Path) - if err != nil { - return errors.Wrap(err) - } - - // default the containerFilterProvider if it hasn't been override. Split out for testing. - (&r).init() - nodes, fltrs, output, err := r.getNodesAndFilters() - if err != nil { - return err - } - return r.runFunctions(nodes, output, fltrs) -} - -func (r RunFns) getNodesAndFilters() ( - *kio.PackageBuffer, []kio.Filter, *kio.LocalPackageReadWriter, error) { - // Read Resources from Directory or Input - buff := &kio.PackageBuffer{} - p := kio.Pipeline{Outputs: []kio.Writer{buff}} - // save the output dir because we will need it to write back - // the same one for reading must be used for writing if deleting Resources - var outputPkg *kio.LocalPackageReadWriter - if r.Path != "" { - outputPkg = &kio.LocalPackageReadWriter{PackagePath: r.Path, MatchFilesGlob: kio.MatchAll} - } - - if r.Input == nil { - p.Inputs = []kio.Reader{outputPkg} - } else { - p.Inputs = []kio.Reader{&kio.ByteReader{Reader: r.Input}} - } - if err := p.Execute(); err != nil { - return nil, nil, outputPkg, err - } - - fltrs, err := r.getFilters(buff.Nodes) - if err != nil { - return nil, nil, outputPkg, err - } - return buff, fltrs, outputPkg, nil -} - -func (r RunFns) getFilters(nodes []*yaml.RNode) ([]kio.Filter, error) { - var fltrs []kio.Filter - - // fns from annotations on the input resources - f, err := r.getFunctionsFromInput(nodes) - if err != nil { - return nil, err - } - fltrs = append(fltrs, f...) - - // fns from directories specified on the struct - f, err = r.getFunctionsFromFunctionPaths() - if err != nil { - return nil, err - } - fltrs = append(fltrs, f...) - - // explicit fns specified on the struct - f, err = r.getFunctionsFromFunctions() - if err != nil { - return nil, err - } - fltrs = append(fltrs, f...) - - return fltrs, nil -} - -// runFunctions runs the fltrs against the input and writes to either r.Output or output -func (r RunFns) runFunctions( - input kio.Reader, output kio.Writer, fltrs []kio.Filter) error { - // use the previously read Resources as input - var outputs []kio.Writer - if r.Output == nil { - // write back to the package - outputs = append(outputs, output) - } else { - // write to the output instead of the directory if r.Output is specified or - // the output is nil (reading from Input) - outputs = append(outputs, kio.ByteWriter{Writer: r.Output}) - } - - var err error - pipeline := kio.Pipeline{ - Inputs: []kio.Reader{input}, - Filters: fltrs, - Outputs: outputs, - ContinueOnEmptyResult: r.ContinueOnEmptyResult, - } - if r.LogSteps { - err = pipeline.ExecuteWithCallback(func(op kio.Filter) { - var identifier string - - switch filter := op.(type) { - case *container.Filter: - identifier = filter.Image - case *exec.Filter: - identifier = filter.Path - case *starlark.Filter: - identifier = filter.String() - default: - identifier = "unknown-type function" - } - - _, _ = fmt.Fprintf(r.LogWriter, "Running %s\n", identifier) - }) - } else { - err = pipeline.Execute() - } - if err != nil { - return err - } - - // check for deferred function errors - var errs []string - for i := range fltrs { - cf, ok := fltrs[i].(runtimeutil.DeferFailureFunction) - if !ok { - continue - } - if cf.GetExit() != nil { - errs = append(errs, cf.GetExit().Error()) - } - } - if len(errs) > 0 { - return fmt.Errorf(strings.Join(errs, "\n---\n")) - } - return nil -} - -// getFunctionsFromInput scans the input for functions and runs them -func (r RunFns) getFunctionsFromInput(nodes []*yaml.RNode) ([]kio.Filter, error) { - if *r.NoFunctionsFromInput { - return nil, nil - } - - buff := &kio.PackageBuffer{} - err := kio.Pipeline{ - Inputs: []kio.Reader{&kio.PackageBuffer{Nodes: nodes}}, - Filters: []kio.Filter{&runtimeutil.IsReconcilerFilter{}}, - Outputs: []kio.Writer{buff}, - }.Execute() - if err != nil { - return nil, err - } - err = sortFns(buff) - if err != nil { - return nil, err - } - return r.getFunctionFilters(false, buff.Nodes...) -} - -// getFunctionsFromFunctionPaths returns the set of functions read from r.FunctionPaths -// as a slice of Filters -func (r RunFns) getFunctionsFromFunctionPaths() ([]kio.Filter, error) { - buff := &kio.PackageBuffer{} - for i := range r.FunctionPaths { - err := kio.Pipeline{ - Inputs: []kio.Reader{ - kio.LocalPackageReader{PackagePath: r.FunctionPaths[i]}, - }, - Outputs: []kio.Writer{buff}, - }.Execute() - if err != nil { - return nil, err - } - } - return r.getFunctionFilters(true, buff.Nodes...) -} - -// getFunctionsFromFunctions returns the set of explicitly provided functions as -// Filters -func (r RunFns) getFunctionsFromFunctions() ([]kio.Filter, error) { - return r.getFunctionFilters(true, r.Functions...) -} - -// mergeContainerEnv will merge the envs specified by command line (imperative) and config -// file (declarative). If they have same key, the imperative value will be respected. -func (r RunFns) mergeContainerEnv(envs []string) []string { - imperative := runtimeutil.NewContainerEnvFromStringSlice(r.Env) - declarative := runtimeutil.NewContainerEnvFromStringSlice(envs) - for key, value := range imperative.EnvVars { - declarative.AddKeyValue(key, value) - } - - for _, key := range imperative.VarsToExport { - declarative.AddKey(key) - } - - return declarative.Raw() -} - -func (r RunFns) getFunctionFilters(global bool, fns ...*yaml.RNode) ( - []kio.Filter, error) { - var fltrs []kio.Filter - for i := range fns { - api := fns[i] - spec := runtimeutil.GetFunctionSpec(api) - if spec == nil { - // resource doesn't have function spec - continue - } - if spec.Container.Network && !r.Network { - // TODO(eddiezane): Provide error info about which function needs the network - return fltrs, errors.Errorf("network required but not enabled with --network") - } - // merge envs from imperative and declarative - spec.Container.Env = r.mergeContainerEnv(spec.Container.Env) - - c, err := r.functionFilterProvider(*spec, api, user.Current) - if err != nil { - return nil, err - } - - if c == nil { - continue - } - cf, ok := c.(*container.Filter) - if ok { - if global { - cf.Exec.GlobalScope = true - } - cf.Exec.WorkingDir = r.WorkingDir - } - fltrs = append(fltrs, c) - } - return fltrs, nil -} - -// sortFns sorts functions so that functions with the longest paths come first -func sortFns(buff *kio.PackageBuffer) error { - var outerErr error - // sort the nodes so that we traverse them depth first - // functions deeper in the file system tree should be run first - sort.Slice(buff.Nodes, func(i, j int) bool { - if err := kioutil.CopyLegacyAnnotations(buff.Nodes[i]); err != nil { - return false - } - if err := kioutil.CopyLegacyAnnotations(buff.Nodes[j]); err != nil { - return false - } - mi, _ := buff.Nodes[i].GetMeta() - pi := filepath.ToSlash(mi.Annotations[kioutil.PathAnnotation]) - - mj, _ := buff.Nodes[j].GetMeta() - pj := filepath.ToSlash(mj.Annotations[kioutil.PathAnnotation]) - - // If the path is the same, we decide the ordering based on the - // index annotation. - if pi == pj { - iIndex, err := strconv.Atoi(mi.Annotations[kioutil.IndexAnnotation]) - if err != nil { - outerErr = err - return false - } - jIndex, err := strconv.Atoi(mj.Annotations[kioutil.IndexAnnotation]) - if err != nil { - outerErr = err - return false - } - return iIndex < jIndex - } - - if filepath.Base(path.Dir(pi)) == "functions" { - // don't count the functions dir, the functions are scoped 1 level above - pi = filepath.Dir(path.Dir(pi)) - } else { - pi = filepath.Dir(pi) - } - - if filepath.Base(path.Dir(pj)) == "functions" { - // don't count the functions dir, the functions are scoped 1 level above - pj = filepath.Dir(path.Dir(pj)) - } else { - pj = filepath.Dir(pj) - } - - // i is "less" than j (comes earlier) if its depth is greater -- e.g. run - // i before j if it is deeper in the directory structure - li := len(strings.Split(pi, "/")) - if pi == "." { - // local dir should have 0 path elements instead of 1 - li = 0 - } - lj := len(strings.Split(pj, "/")) - if pj == "." { - // local dir should have 0 path elements instead of 1 - lj = 0 - } - if li != lj { - // use greater-than because we want to sort with the longest - // paths FIRST rather than last - return li > lj - } - - // sort by path names if depths are equal - return pi < pj - }) - return outerErr -} - -// init initializes the RunFns with a containerFilterProvider. -func (r *RunFns) init() { - if r.NoFunctionsFromInput == nil { - // default no functions from input if any function sources are explicitly provided - nfn := len(r.FunctionPaths) > 0 || len(r.Functions) > 0 - r.NoFunctionsFromInput = &nfn - } - - // if no path is specified, default reading from stdin and writing to stdout - if r.Path == "" { - if r.Output == nil { - r.Output = os.Stdout - } - if r.Input == nil { - r.Input = os.Stdin - } - } - - // functionFilterProvider set the filter provider - if r.functionFilterProvider == nil { - r.functionFilterProvider = r.ffp - } - - // if LogSteps is enabled and LogWriter is not specified, use stderr - if r.LogSteps && r.LogWriter == nil { - r.LogWriter = os.Stderr - } -} - -type currentUserFunc func() (*user.User, error) - -// getUIDGID will return "nobody" if asCurrentUser is false. Otherwise -// return "uid:gid" according to the return from currentUser function. -func getUIDGID(asCurrentUser bool, currentUser currentUserFunc) (string, error) { - if !asCurrentUser { - return "nobody", nil - } - - u, err := currentUser() - if err != nil { - return "", err - } - return fmt.Sprintf("%s:%s", u.Uid, u.Gid), nil -} - -// ffp provides function filters -func (r *RunFns) ffp(spec runtimeutil.FunctionSpec, api *yaml.RNode, currentUser currentUserFunc) (kio.Filter, error) { - var resultsFile string - if r.ResultsDir != "" { - resultsFile = filepath.Join(r.ResultsDir, fmt.Sprintf( - "results-%v.yaml", r.resultsCount)) - atomic.AddUint32(&r.resultsCount, 1) - } - if !r.DisableContainers && spec.Container.Image != "" { - // TODO: Add a test for this behavior - uidgid, err := getUIDGID(r.AsCurrentUser, currentUser) - if err != nil { - return nil, err - } - - // Storage mounts can either come from kustomize fn run --mounts, - // or from the declarative function mounts field. - storageMounts := spec.Container.StorageMounts - storageMounts = append(storageMounts, r.StorageMounts...) - - c := container.NewContainer( - runtimeutil.ContainerSpec{ - Image: spec.Container.Image, - Network: spec.Container.Network, - StorageMounts: storageMounts, - Env: spec.Container.Env, - }, - uidgid, - ) - cf := &c - cf.Exec.FunctionConfig = api - cf.Exec.GlobalScope = r.GlobalScope - cf.Exec.ResultsFile = resultsFile - cf.Exec.DeferFailure = spec.DeferFailure - return cf, nil - } - if r.EnableStarlark && (spec.Starlark.Path != "" || spec.Starlark.URL != "") { - // the script path is relative to the function config file - m, err := api.GetMeta() - if err != nil { - return nil, errors.Wrap(err) - } - - var p string - if spec.Starlark.Path != "" { - pathAnno := m.Annotations[kioutil.PathAnnotation] - if pathAnno == "" { - pathAnno = m.Annotations[kioutil.LegacyPathAnnotation] - } - p = filepath.ToSlash(path.Clean(pathAnno)) - - spec.Starlark.Path = filepath.ToSlash(path.Clean(spec.Starlark.Path)) - if filepath.IsAbs(spec.Starlark.Path) || path.IsAbs(spec.Starlark.Path) { - return nil, errors.Errorf( - "absolute function path %s not allowed", spec.Starlark.Path) - } - if strings.HasPrefix(spec.Starlark.Path, "..") { - return nil, errors.Errorf( - "function path %s not allowed to start with ../", spec.Starlark.Path) - } - p = filepath.ToSlash(filepath.Join(r.Path, filepath.Dir(p), spec.Starlark.Path)) - } - - sf := &starlark.Filter{Name: spec.Starlark.Name, Path: p, URL: spec.Starlark.URL} - - sf.FunctionConfig = api - sf.GlobalScope = r.GlobalScope - sf.ResultsFile = resultsFile - sf.DeferFailure = spec.DeferFailure - return sf, nil - } - - if r.EnableExec && spec.Exec.Path != "" { - ef := &exec.Filter{ - Path: spec.Exec.Path, - WorkingDir: r.WorkingDir, - } - - ef.FunctionConfig = api - ef.GlobalScope = r.GlobalScope - ef.ResultsFile = resultsFile - ef.DeferFailure = spec.DeferFailure - return ef, nil - } - - return nil, nil -} diff --git a/src/vendor/sigs.k8s.io/kustomize/kyaml/sets/string.go b/src/vendor/sigs.k8s.io/kustomize/kyaml/sets/string.go deleted file mode 100644 index 07f02afb0..000000000 --- a/src/vendor/sigs.k8s.io/kustomize/kyaml/sets/string.go +++ /dev/null @@ -1,64 +0,0 @@ -// Copyright 2019 The Kubernetes Authors. -// SPDX-License-Identifier: Apache-2.0 - -package sets - -type String map[string]interface{} - -func (s String) Len() int { - return len(s) -} - -func (s String) List() []string { - var val []string - for k := range s { - val = append(val, k) - } - return val -} - -func (s String) Has(val string) bool { - _, found := s[val] - return found -} - -func (s String) Insert(vals ...string) { - for _, val := range vals { - s[val] = nil - } -} - -func (s String) Difference(s2 String) String { - s3 := String{} - for k := range s { - if _, found := s2[k]; !found { - s3.Insert(k) - } - } - return s3 -} - -func (s String) SymmetricDifference(s2 String) String { - s3 := String{} - for k := range s { - if _, found := s2[k]; !found { - s3.Insert(k) - } - } - for k := range s2 { - if _, found := s[k]; !found { - s3.Insert(k) - } - } - return s3 -} - -func (s String) Intersection(s2 String) String { - s3 := String{} - for k := range s { - if _, found := s2[k]; found { - s3.Insert(k) - } - } - return s3 -} diff --git a/src/vendor/sigs.k8s.io/kustomize/kyaml/sets/stringlist.go b/src/vendor/sigs.k8s.io/kustomize/kyaml/sets/stringlist.go deleted file mode 100644 index 2d75978fc..000000000 --- a/src/vendor/sigs.k8s.io/kustomize/kyaml/sets/stringlist.go +++ /dev/null @@ -1,44 +0,0 @@ -// Copyright 2019 The Kubernetes Authors. -// SPDX-License-Identifier: Apache-2.0 - -package sets - -// StringList is a set, where each element of -// the set is a string slice. -type StringList [][]string - -func (s StringList) Len() int { - return len(s) -} - -func (s StringList) Insert(val []string) StringList { - if !s.Has(val) { - return append(s, val) - } - return s -} - -func (s StringList) Has(val []string) bool { - if len(s) == 0 { - return false - } - - for i := range s { - if isStringSliceEqual(s[i], val) { - return true - } - } - return false -} - -func isStringSliceEqual(s []string, t []string) bool { - if len(s) != len(t) { - return false - } - for i := range s { - if s[i] != t[i] { - return false - } - } - return true -} diff --git a/src/vendor/sigs.k8s.io/kustomize/kyaml/sliceutil/slice.go b/src/vendor/sigs.k8s.io/kustomize/kyaml/sliceutil/slice.go deleted file mode 100644 index 23e3ad7c2..000000000 --- a/src/vendor/sigs.k8s.io/kustomize/kyaml/sliceutil/slice.go +++ /dev/null @@ -1,25 +0,0 @@ -// Copyright 2021 The Kubernetes Authors. -// SPDX-License-Identifier: Apache-2.0 - -package sliceutil - -// Contains return true if string e is present in slice s -func Contains(s []string, e string) bool { - for _, a := range s { - if a == e { - return true - } - } - return false -} - -// Remove removes the first occurrence of r in slice s -// and returns remaining slice -func Remove(s []string, r string) []string { - for i, v := range s { - if v == r { - return append(s[:i], s[i+1:]...) - } - } - return s -} diff --git a/src/vendor/sigs.k8s.io/kustomize/kyaml/utils/pathsplitter.go b/src/vendor/sigs.k8s.io/kustomize/kyaml/utils/pathsplitter.go deleted file mode 100644 index aa560299f..000000000 --- a/src/vendor/sigs.k8s.io/kustomize/kyaml/utils/pathsplitter.go +++ /dev/null @@ -1,64 +0,0 @@ -// Copyright 2021 The Kubernetes Authors. -// SPDX-License-Identifier: Apache-2.0 - -package utils - -import "strings" - -// TODO: Move these to kyaml - -// PathSplitter splits a delimited string, permitting escaped delimiters. -func PathSplitter(path string, delimiter string) []string { - ps := strings.Split(path, delimiter) - var res []string - res = append(res, ps[0]) - for i := 1; i < len(ps); i++ { - last := len(res) - 1 - if strings.HasSuffix(res[last], `\`) { - res[last] = strings.TrimSuffix(res[last], `\`) + delimiter + ps[i] - } else { - res = append(res, ps[i]) - } - } - return res -} - -// SmarterPathSplitter splits a path, retaining bracketed elements. -// If the element is a list entry identifier (defined by the '='), -// it will retain the brackets. -// E.g. "[name=com.foo.someapp]" survives as one thing after splitting -// "spec.template.spec.containers.[name=com.foo.someapp].image" -// See kyaml/yaml/match.go for use of list entry identifiers. -// If the element is a mapping entry identifier, it will remove the -// brackets. -// E.g. "a.b.c" survives as one thing after splitting -// "metadata.annotations.[a.b.c] -// This function uses `PathSplitter`, so it also respects escaped delimiters. -func SmarterPathSplitter(path string, delimiter string) []string { - var result []string - split := PathSplitter(path, delimiter) - - for i := 0; i < len(split); i++ { - elem := split[i] - if strings.HasPrefix(elem, "[") && !strings.HasSuffix(elem, "]") { - // continue until we find the matching "]" - bracketed := []string{elem} - for i < len(split)-1 { - i++ - bracketed = append(bracketed, split[i]) - if strings.HasSuffix(split[i], "]") { - break - } - } - bracketedStr := strings.Join(bracketed, delimiter) - if strings.Contains(bracketedStr, "=") { - result = append(result, bracketedStr) - } else { - result = append(result, strings.Trim(bracketedStr, "[]")) - } - } else { - result = append(result, elem) - } - } - return result -} diff --git a/src/vendor/sigs.k8s.io/kustomize/kyaml/yaml/alias.go b/src/vendor/sigs.k8s.io/kustomize/kyaml/yaml/alias.go deleted file mode 100644 index 48a025444..000000000 --- a/src/vendor/sigs.k8s.io/kustomize/kyaml/yaml/alias.go +++ /dev/null @@ -1,99 +0,0 @@ -// Copyright 2019 The Kubernetes Authors. -// SPDX-License-Identifier: Apache-2.0 - -package yaml - -import ( - "bytes" - "io" - - "sigs.k8s.io/kustomize/kyaml/internal/forked/github.com/go-yaml/yaml" -) - -const ( - WideSequenceStyle SequenceIndentStyle = "wide" - CompactSequenceStyle SequenceIndentStyle = "compact" - DefaultIndent = 2 - // BareSeqNodeWrappingKey kyaml uses reader annotations to track resources, it is not possible to - // add them to bare sequence nodes, this key is used to wrap such bare - // sequence nodes into map node, byteio_writer unwraps it while writing back - BareSeqNodeWrappingKey = "bareSeqNodeWrappingKey" -) - -// SeqIndentType holds the indentation style for sequence nodes -type SequenceIndentStyle string - -// EncoderOptions are options that can be used to configure the encoder, -// do not expose new options without considerable justification -type EncoderOptions struct { - // SeqIndent is the indentation style for YAML Sequence nodes - SeqIndent SequenceIndentStyle -} - -// Expose the yaml.v3 functions so this package can be used as a replacement - -type Decoder = yaml.Decoder -type Encoder = yaml.Encoder -type IsZeroer = yaml.IsZeroer -type Kind = yaml.Kind -type Marshaler = yaml.Marshaler -type Node = yaml.Node -type Style = yaml.Style -type TypeError = yaml.TypeError -type Unmarshaler = yaml.Unmarshaler - -var Marshal = func(in interface{}) ([]byte, error) { - var buf bytes.Buffer - err := NewEncoder(&buf).Encode(in) - if err != nil { - return nil, err - } - return buf.Bytes(), nil -} -var Unmarshal = yaml.Unmarshal -var NewDecoder = yaml.NewDecoder -var NewEncoder = func(w io.Writer) *yaml.Encoder { - e := yaml.NewEncoder(w) - e.SetIndent(DefaultIndent) - e.CompactSeqIndent() - return e -} - -// MarshalWithOptions marshals the input interface with provided options -func MarshalWithOptions(in interface{}, opts *EncoderOptions) ([]byte, error) { - var buf bytes.Buffer - err := NewEncoderWithOptions(&buf, opts).Encode(in) - if err != nil { - return nil, err - } - return buf.Bytes(), nil -} - -// NewEncoderWithOptions returns the encoder with provided options -func NewEncoderWithOptions(w io.Writer, opts *EncoderOptions) *yaml.Encoder { - encoder := NewEncoder(w) - encoder.SetIndent(DefaultIndent) - if opts.SeqIndent == WideSequenceStyle { - encoder.DefaultSeqIndent() - } else { - encoder.CompactSeqIndent() - } - return encoder -} - -var AliasNode yaml.Kind = yaml.AliasNode -var DocumentNode yaml.Kind = yaml.DocumentNode -var MappingNode yaml.Kind = yaml.MappingNode -var ScalarNode yaml.Kind = yaml.ScalarNode -var SequenceNode yaml.Kind = yaml.SequenceNode - -var DoubleQuotedStyle yaml.Style = yaml.DoubleQuotedStyle -var FlowStyle yaml.Style = yaml.FlowStyle -var FoldedStyle yaml.Style = yaml.FoldedStyle -var LiteralStyle yaml.Style = yaml.LiteralStyle -var SingleQuotedStyle yaml.Style = yaml.SingleQuotedStyle -var TaggedStyle yaml.Style = yaml.TaggedStyle - -const ( - MergeTag = "!!merge" -) diff --git a/src/vendor/sigs.k8s.io/kustomize/kyaml/yaml/compatibility.go b/src/vendor/sigs.k8s.io/kustomize/kyaml/yaml/compatibility.go deleted file mode 100644 index a47114000..000000000 --- a/src/vendor/sigs.k8s.io/kustomize/kyaml/yaml/compatibility.go +++ /dev/null @@ -1,100 +0,0 @@ -// Copyright 2019 The Kubernetes Authors. -// SPDX-License-Identifier: Apache-2.0 - -package yaml - -import ( - "reflect" - "strings" - - y1_1 "gopkg.in/yaml.v2" - "k8s.io/kube-openapi/pkg/validation/spec" - y1_2 "sigs.k8s.io/kustomize/kyaml/internal/forked/github.com/go-yaml/yaml" -) - -// typeToTag maps OpenAPI schema types to yaml 1.2 tags -var typeToTag = map[string]string{ - "string": NodeTagString, - "integer": NodeTagInt, - "boolean": NodeTagBool, - "number": NodeTagFloat, -} - -// FormatNonStringStyle makes sure that values which parse as non-string values in yaml 1.1 -// are correctly formatted given the Schema type. -func FormatNonStringStyle(node *Node, schema spec.Schema) { - if len(schema.Type) != 1 { - return - } - t := schema.Type[0] - - if !IsYaml1_1NonString(node) { - return - } - switch { - case t == "string" && schema.Format != "int-or-string": - if (node.Style&DoubleQuotedStyle == 0) && (node.Style&SingleQuotedStyle == 0) { - // must quote values so they are parsed as strings - node.Style = DoubleQuotedStyle - } - case t == "boolean" || t == "integer" || t == "number": - if (node.Style&DoubleQuotedStyle != 0) || (node.Style&SingleQuotedStyle != 0) { - // must NOT quote the values so they aren't parsed as strings - node.Style = 0 - } - default: - return - } - - // if the node tag is null, make sure we don't add any non-null tags - // https://github.com/GoogleContainerTools/kpt/issues/2321 - if node.Tag == NodeTagNull { - // must NOT quote null values - node.Style = 0 - return - } - if tag, found := typeToTag[t]; found { - // make sure the right tag is set - node.Tag = tag - } -} - -// IsYaml1_1NonString returns true if the value parses as a non-string value in yaml 1.1 -// when unquoted. -// -// Note: yaml 1.2 uses different keywords than yaml 1.1. Example: yaml 1.2 interprets -// `field: on` and `field: "on"` as equivalent (both strings). However Yaml 1.1 interprets -// `field: on` as on being a bool and `field: "on"` as on being a string. -// If an input is read with `field: "on"`, and the style is changed from DoubleQuote to 0, -// it will change the type of the field from a string to a bool. For this reason, fields -// which are keywords in yaml 1.1 should never have their style changed, as it would break -// backwards compatibility with yaml 1.1 -- which is what is used by the Kubernetes apiserver. -func IsYaml1_1NonString(node *Node) bool { - if node.Kind != y1_2.ScalarNode { - // not a keyword - return false - } - return IsValueNonString(node.Value) -} - -func IsValueNonString(value string) bool { - if value == "" { - return false - } - if strings.Contains(value, "\n") { - // multi-line strings will fail to unmarshal - return false - } - // check if the value will unmarshal into a non-string value using a yaml 1.1 parser - var i1 interface{} - if err := y1_1.Unmarshal([]byte(value), &i1); err != nil { - return false - } - if reflect.TypeOf(i1) != stringType { - return true - } - - return false -} - -var stringType = reflect.TypeOf("string") diff --git a/src/vendor/sigs.k8s.io/kustomize/kyaml/yaml/const.go b/src/vendor/sigs.k8s.io/kustomize/kyaml/yaml/const.go deleted file mode 100644 index 6a2cc4516..000000000 --- a/src/vendor/sigs.k8s.io/kustomize/kyaml/yaml/const.go +++ /dev/null @@ -1,30 +0,0 @@ -// Copyright 2019 The Kubernetes Authors. -// SPDX-License-Identifier: Apache-2.0 - -package yaml - -const ( - // NodeTagNull is the tag set for a yaml.Document that contains no data; - // e.g. it isn't a Map, Slice, Document, etc - NodeTagNull = "!!null" - NodeTagFloat = "!!float" - NodeTagString = "!!str" - NodeTagBool = "!!bool" - NodeTagInt = "!!int" - NodeTagMap = "!!map" - NodeTagSeq = "!!seq" - NodeTagEmpty = "" -) - -// Field names -const ( - AnnotationsField = "annotations" - APIVersionField = "apiVersion" - KindField = "kind" - MetadataField = "metadata" - DataField = "data" - BinaryDataField = "binaryData" - NameField = "name" - NamespaceField = "namespace" - LabelsField = "labels" -) diff --git a/src/vendor/sigs.k8s.io/kustomize/kyaml/yaml/datamap.go b/src/vendor/sigs.k8s.io/kustomize/kyaml/yaml/datamap.go deleted file mode 100644 index f4b7e6664..000000000 --- a/src/vendor/sigs.k8s.io/kustomize/kyaml/yaml/datamap.go +++ /dev/null @@ -1,121 +0,0 @@ -// Copyright 2019 The Kubernetes Authors. -// SPDX-License-Identifier: Apache-2.0 - -package yaml - -import ( - "encoding/base64" - "sort" - "strings" - "unicode/utf8" -) - -// SortedMapKeys returns a sorted slice of keys to the given map. -// Writing this function never gets old. -func SortedMapKeys(m map[string]string) []string { - keys := make([]string, len(m)) - i := 0 - for k := range m { - keys[i] = k - i++ - } - sort.Strings(keys) - return keys -} - -func (rn *RNode) LoadMapIntoConfigMapData(m map[string]string) error { - for _, k := range SortedMapKeys(m) { - fldName, vrN := makeConfigMapValueRNode(m[k]) - if _, err := rn.Pipe( - LookupCreate(MappingNode, fldName), - SetField(k, vrN)); err != nil { - return err - } - } - return nil -} - -func (rn *RNode) LoadMapIntoConfigMapBinaryData(m map[string]string) error { - for _, k := range SortedMapKeys(m) { - _, vrN := makeConfigMapValueRNode(m[k]) - // we know this is binary data - fldName := BinaryDataField - if _, err := rn.Pipe( - LookupCreate(MappingNode, fldName), - SetField(k, vrN)); err != nil { - return err - } - } - return nil -} - -func makeConfigMapValueRNode(s string) (field string, rN *RNode) { - yN := &Node{Kind: ScalarNode} - yN.Tag = NodeTagString - if utf8.ValidString(s) { - field = DataField - yN.Value = s - } else { - field = BinaryDataField - yN.Value = encodeBase64(s) - } - if strings.Contains(yN.Value, "\n") { - yN.Style = LiteralStyle - } - return field, NewRNode(yN) -} - -func (rn *RNode) LoadMapIntoSecretData(m map[string]string) error { - mapNode, err := rn.Pipe(LookupCreate(MappingNode, DataField)) - if err != nil { - return err - } - for _, k := range SortedMapKeys(m) { - vrN := makeSecretValueRNode(m[k]) - if _, err := mapNode.Pipe(SetField(k, vrN)); err != nil { - return err - } - } - return nil -} - -// In a secret, all data is base64 encoded, regardless of its conformance -// or lack thereof to UTF-8. -func makeSecretValueRNode(s string) *RNode { - yN := &Node{Kind: ScalarNode} - // Purposely don't use YAML tags to identify the data as being plain text or - // binary. It kubernetes Secrets the values in the `data` map are expected - // to be base64 encoded, and in ConfigMaps that same can be said for the - // values in the `binaryData` field. - yN.Tag = NodeTagString - yN.Value = encodeBase64(s) - if strings.Contains(yN.Value, "\n") { - yN.Style = LiteralStyle - } - return NewRNode(yN) -} - -// encodeBase64 encodes s as base64 that is broken up into multiple lines -// as appropriate for the resulting length. -func encodeBase64(s string) string { - const lineLen = 70 - encLen := base64.StdEncoding.EncodedLen(len(s)) - lines := encLen/lineLen + 1 - buf := make([]byte, encLen*2+lines) - in := buf[0:encLen] - out := buf[encLen:] - base64.StdEncoding.Encode(in, []byte(s)) - k := 0 - for i := 0; i < len(in); i += lineLen { - j := i + lineLen - if j > len(in) { - j = len(in) - } - k += copy(out[k:], in[i:j]) - if lines > 1 { - out[k] = '\n' - k++ - } - } - return string(out[:k]) -} diff --git a/src/vendor/sigs.k8s.io/kustomize/kyaml/yaml/doc.go b/src/vendor/sigs.k8s.io/kustomize/kyaml/yaml/doc.go deleted file mode 100644 index b58811cf8..000000000 --- a/src/vendor/sigs.k8s.io/kustomize/kyaml/yaml/doc.go +++ /dev/null @@ -1,49 +0,0 @@ -// Copyright 2019 The Kubernetes Authors. -// SPDX-License-Identifier: Apache-2.0 - -// Package yaml contains libraries for manipulating individual Kubernetes Resource -// Configuration as yaml, keeping yaml structure and comments. -// -// Parsing Resources -// -// Typically Resources will be initialized as collections through the kio package libraries. -// However it is possible to directly initialize Resources using Parse. -// resource, err := yaml.Parse("apiVersion: apps/v1\nkind: Deployment") -// -// Processing Resources -// -// Individual Resources are manipulated using the Pipe and PipeE to apply Filter functions -// to transform the Resource data. -// err := resource.PipeE(yaml.SetAnnotation("key", "value")) -// -// If multiple Filter functions are provided to Pipe or PipeE, each function is applied to -// the result of the last function -- e.g. yaml.Lookup(...), yaml.SetField(...) -// -// Field values may also be retrieved using Pipe. -// annotationValue, err := resource.Pipe(yaml.GetAnnotation("key")) -// -// See http://www.linfo.org/filters.html for a definition of filters. -// -// Common Filters -// -// There are a number of standard filter functions provided by the yaml package. -// -// Working with annotations: -// [AnnotationSetter{}, AnnotationGetter{}, AnnotationClearer{}] -// -// Working with fields by path: -// [PathMatcher{}, PathGetter{}] -// -// Working with individual fields on Maps and Objects: -// [FieldMatcher{}, FieldSetter{}, FieldGetter{}] -// -// Working with individual elements in Sequences: -// [ElementAppender{}, ElementSetter{}, ElementMatcher{}] -// -// Writing Filters -// -// Users may implement their own filter functions. When doing so, can be necessary to work with -// the RNode directly rather than through Pipe. RNode provides a number of functions for doing -// so. See: -// [GetMeta(), Fields(), Elements(), String()] -package yaml diff --git a/src/vendor/sigs.k8s.io/kustomize/kyaml/yaml/filters.go b/src/vendor/sigs.k8s.io/kustomize/kyaml/yaml/filters.go deleted file mode 100644 index e364035e6..000000000 --- a/src/vendor/sigs.k8s.io/kustomize/kyaml/yaml/filters.go +++ /dev/null @@ -1,146 +0,0 @@ -// Copyright 2019 The Kubernetes Authors. -// SPDX-License-Identifier: Apache-2.0 - -package yaml - -import ( - "fmt" - "regexp" - "sort" - "strings" -) - -// Filters is the list of serializable Pipeline Filters -var Filters = map[string]func() Filter{ - "AnnotationClearer": func() Filter { return &AnnotationClearer{} }, - "AnnotationGetter": func() Filter { return &AnnotationGetter{} }, - "AnnotationSetter": func() Filter { return &AnnotationSetter{} }, - "LabelSetter": func() Filter { return &LabelSetter{} }, - "ElementAppender": func() Filter { return &ElementAppender{} }, - "ElementMatcher": func() Filter { return &ElementMatcher{} }, - "FieldClearer": func() Filter { return &FieldClearer{} }, - "FilterMatcher": func() Filter { return &FilterMatcher{} }, - "FieldMatcher": func() Filter { return &FieldMatcher{} }, - "FieldSetter": func() Filter { return &FieldSetter{} }, - "PathGetter": func() Filter { return &PathGetter{} }, - "PathMatcher": func() Filter { return &PathMatcher{} }, - "Parser": func() Filter { return &Parser{} }, - "PrefixSetter": func() Filter { return &PrefixSetter{} }, - "ValueReplacer": func() Filter { return &ValueReplacer{} }, - "SuffixSetter": func() Filter { return &SuffixSetter{} }, - "TeePiper": func() Filter { return &TeePiper{} }, -} - -// YFilter wraps the Filter interface so the filter can be represented as -// data and can be unmarshalled into a struct from a yaml config file. -// This allows Pipelines to be expressed as data rather than code. -type YFilter struct { - Filter -} - -func (y YFilter) MarshalYAML() (interface{}, error) { - return y.Filter, nil -} - -func (y *YFilter) UnmarshalYAML(unmarshal func(interface{}) error) error { - meta := &ResourceMeta{} - if err := unmarshal(meta); err != nil { - return err - } - filter, found := Filters[meta.Kind] - if !found { - var knownFilters []string - for k := range Filters { - knownFilters = append(knownFilters, k) - } - sort.Strings(knownFilters) - return fmt.Errorf("unsupported Filter Kind %s: may be one of: [%s]", - meta.Kind, strings.Join(knownFilters, ",")) - } - y.Filter = filter() - - if err := unmarshal(y.Filter); err != nil { - return err - } - return nil -} - -type YFilters []YFilter - -func (y YFilters) Filters() []Filter { - var f []Filter - for i := range y { - f = append(f, y[i].Filter) - } - return f -} - -type FilterMatcher struct { - Kind string `yaml:"kind"` - - // Filters are the set of Filters run by TeePiper. - Filters YFilters `yaml:"pipeline,omitempty"` -} - -func (t FilterMatcher) Filter(rn *RNode) (*RNode, error) { - v, err := rn.Pipe(t.Filters.Filters()...) - if v == nil || err != nil { - return nil, err - } - // return the original input if the pipeline resolves to true - return rn, err -} - -type ValueReplacer struct { - Kind string `yaml:"kind"` - - StringMatch string `yaml:"stringMatch"` - RegexMatch string `yaml:"regexMatch"` - Replace string `yaml:"replace"` - Count int `yaml:"count"` -} - -func (s ValueReplacer) Filter(object *RNode) (*RNode, error) { - if s.Count == 0 { - s.Count = -1 - } - switch { - case s.StringMatch != "": - object.value.Value = strings.Replace(object.value.Value, s.StringMatch, s.Replace, s.Count) - case s.RegexMatch != "": - r, err := regexp.Compile(s.RegexMatch) - if err != nil { - return nil, fmt.Errorf("ValueReplacer RegexMatch does not compile: %v", err) - } - object.value.Value = r.ReplaceAllString(object.value.Value, s.Replace) - default: - return nil, fmt.Errorf("ValueReplacer missing StringMatch and RegexMatch") - } - return object, nil -} - -type PrefixSetter struct { - Kind string `yaml:"kind"` - - Value string `yaml:"value"` -} - -func (s PrefixSetter) Filter(object *RNode) (*RNode, error) { - if !strings.HasPrefix(object.value.Value, s.Value) { - object.value.Value = s.Value + object.value.Value - } - return object, nil -} - -type SuffixSetter struct { - Kind string `yaml:"kind"` - - Value string `yaml:"value"` -} - -func (s SuffixSetter) Filter(object *RNode) (*RNode, error) { - if !strings.HasSuffix(object.value.Value, s.Value) { - object.value.Value += s.Value - } - return object, nil -} diff --git a/src/vendor/sigs.k8s.io/kustomize/kyaml/yaml/fns.go b/src/vendor/sigs.k8s.io/kustomize/kyaml/yaml/fns.go deleted file mode 100644 index e693f88a1..000000000 --- a/src/vendor/sigs.k8s.io/kustomize/kyaml/yaml/fns.go +++ /dev/null @@ -1,867 +0,0 @@ -// Copyright 2019 The Kubernetes Authors. -// SPDX-License-Identifier: Apache-2.0 - -package yaml - -import ( - "fmt" - "regexp" - "strconv" - "strings" - - "github.com/davecgh/go-spew/spew" - "sigs.k8s.io/kustomize/kyaml/errors" - "sigs.k8s.io/kustomize/kyaml/internal/forked/github.com/go-yaml/yaml" -) - -// Append creates an ElementAppender -func Append(elements ...*yaml.Node) ElementAppender { - return ElementAppender{Elements: elements} -} - -// ElementAppender adds all element to a SequenceNode's Content. -// Returns Elements[0] if len(Elements) == 1, otherwise returns nil. -type ElementAppender struct { - Kind string `yaml:"kind,omitempty"` - - // Elem is the value to append. - Elements []*yaml.Node `yaml:"elements,omitempty"` -} - -func (a ElementAppender) Filter(rn *RNode) (*RNode, error) { - if err := ErrorIfInvalid(rn, yaml.SequenceNode); err != nil { - return nil, err - } - for i := range a.Elements { - rn.YNode().Content = append(rn.Content(), a.Elements[i]) - } - if len(a.Elements) == 1 { - return NewRNode(a.Elements[0]), nil - } - return nil, nil -} - -// ElementSetter sets the value for an Element in an associative list. -// ElementSetter will append, replace or delete an element in an associative list. -// To append, user a key-value pair that doesn't exist in the sequence. this -// behavior is intended to handle the case that not matching element found. It's -// not designed for this purpose. To append an element, please use ElementAppender. -// To replace, set the key-value pair and a non-nil Element. -// To delete, set the key-value pair and leave the Element as nil. -// Every key must have a corresponding value. -type ElementSetter struct { - Kind string `yaml:"kind,omitempty"` - - // Element is the new value to set -- remove the existing element if nil - Element *Node - - // Key is a list of fields on the elements. It is used to find matching elements to - // update / delete - Keys []string - - // Value is a list of field values on the elements corresponding to the keys. It is - // used to find matching elements to update / delete. - Values []string -} - -// isMappingNode returns whether node is a mapping node -func (e ElementSetter) isMappingNode(node *RNode) bool { - return ErrorIfInvalid(node, yaml.MappingNode) == nil -} - -// isMappingSetter returns is this setter intended to set a mapping node -func (e ElementSetter) isMappingSetter() bool { - return len(e.Keys) > 0 && e.Keys[0] != "" && - len(e.Values) > 0 && e.Values[0] != "" -} - -func (e ElementSetter) Filter(rn *RNode) (*RNode, error) { - if len(e.Keys) == 0 { - e.Keys = append(e.Keys, "") - } - - if err := ErrorIfInvalid(rn, SequenceNode); err != nil { - return nil, err - } - - // build the new Content slice - var newContent []*yaml.Node - matchingElementFound := false - for i := range rn.YNode().Content { - elem := rn.Content()[i] - newNode := NewRNode(elem) - - // empty elements are not valid -- they at least need an associative key - if IsMissingOrNull(newNode) || IsEmptyMap(newNode) { - continue - } - // keep non-mapping node in the Content when we want to match a mapping. - if !e.isMappingNode(newNode) && e.isMappingSetter() { - newContent = append(newContent, elem) - continue - } - - // check if this is the element we are matching - var val *RNode - var err error - found := true - for j := range e.Keys { - if j < len(e.Values) { - val, err = newNode.Pipe(FieldMatcher{Name: e.Keys[j], StringValue: e.Values[j]}) - } - if err != nil { - return nil, err - } - if val == nil { - found = false - break - } - } - if !found { - // not the element we are looking for, keep it in the Content - if len(e.Values) > 0 { - newContent = append(newContent, elem) - } - continue - } - matchingElementFound = true - - // deletion operation -- remove the element from the new Content - if e.Element == nil { - continue - } - // replace operation -- replace the element in the Content - newContent = append(newContent, e.Element) - } - rn.YNode().Content = newContent - - // deletion operation -- return nil - if IsMissingOrNull(NewRNode(e.Element)) { - return nil, nil - } - - // append operation -- add the element to the Content - if !matchingElementFound { - rn.YNode().Content = append(rn.YNode().Content, e.Element) - } - - return NewRNode(e.Element), nil -} - -// GetElementByIndex will return a Filter which can be applied to a sequence -// node to get the element specified by the index -func GetElementByIndex(index int) ElementIndexer { - return ElementIndexer{Index: index} -} - -// ElementIndexer picks the element with a specified index. Index starts from -// 0 to len(list) - 1. a hyphen ("-") means the last index. -type ElementIndexer struct { - Index int -} - -// Filter implements Filter -func (i ElementIndexer) Filter(rn *RNode) (*RNode, error) { - // rn.Elements will return error if rn is not a sequence node. - elems, err := rn.Elements() - if err != nil { - return nil, err - } - if i.Index < 0 { - return elems[len(elems)-1], nil - } - if i.Index >= len(elems) { - return nil, nil - } - return elems[i.Index], nil -} - -// Clear returns a FieldClearer -func Clear(name string) FieldClearer { - return FieldClearer{Name: name} -} - -// FieldClearer removes the field or map key. -// Returns a RNode with the removed field or map entry. -type FieldClearer struct { - Kind string `yaml:"kind,omitempty"` - - // Name is the name of the field or key in the map. - Name string `yaml:"name,omitempty"` - - IfEmpty bool `yaml:"ifEmpty,omitempty"` -} - -func (c FieldClearer) Filter(rn *RNode) (*RNode, error) { - if err := ErrorIfInvalid(rn, yaml.MappingNode); err != nil { - return nil, err - } - - for i := 0; i < len(rn.Content()); i += 2 { - // if name matches, remove these 2 elements from the list because - // they are treated as a fieldName/fieldValue pair. - if rn.Content()[i].Value == c.Name { - if c.IfEmpty { - if len(rn.Content()[i+1].Content) > 0 { - continue - } - } - - // save the item we are about to remove - removed := NewRNode(rn.Content()[i+1]) - if len(rn.YNode().Content) > i+2 { - l := len(rn.YNode().Content) - // remove from the middle of the list - rn.YNode().Content = rn.Content()[:i] - rn.YNode().Content = append( - rn.YNode().Content, - rn.Content()[i+2:l]...) - } else { - // remove from the end of the list - rn.YNode().Content = rn.Content()[:i] - } - - // return the removed field name and value - return removed, nil - } - } - // nothing removed - return nil, nil -} - -func MatchElement(field, value string) ElementMatcher { - return ElementMatcher{Keys: []string{field}, Values: []string{value}} -} - -func MatchElementList(keys []string, values []string) ElementMatcher { - return ElementMatcher{Keys: keys, Values: values} -} - -func GetElementByKey(key string) ElementMatcher { - return ElementMatcher{Keys: []string{key}, MatchAnyValue: true} -} - -// ElementMatcher returns the first element from a Sequence matching the -// specified key-value pairs. If there's no match, and no configuration error, -// the matcher returns nil, nil. -type ElementMatcher struct { - Kind string `yaml:"kind,omitempty"` - - // Keys are the list of fields upon which to match this element. - Keys []string - - // Values are the list of values upon which to match this element. - Values []string - - // Create will create the Element if it is not found - Create *RNode `yaml:"create,omitempty"` - - // MatchAnyValue indicates that matcher should only consider the key and ignore - // the actual value in the list. Values must be empty when MatchAnyValue is - // set to true. - MatchAnyValue bool `yaml:"noValue,omitempty"` -} - -func (e ElementMatcher) Filter(rn *RNode) (*RNode, error) { - if len(e.Keys) == 0 { - e.Keys = append(e.Keys, "") - } - if len(e.Values) == 0 { - e.Values = append(e.Values, "") - } - - if err := ErrorIfInvalid(rn, yaml.SequenceNode); err != nil { - return nil, err - } - if e.MatchAnyValue && len(e.Values) != 0 && e.Values[0] != "" { - return nil, fmt.Errorf("Values must be empty when MatchAnyValue is set to true") - } - - // SequenceNode Content is a slice of ScalarNodes. Each ScalarNode has a - // YNode containing the primitive data. - if len(e.Keys) == 0 || len(e.Keys[0]) == 0 { - for i := range rn.Content() { - if rn.Content()[i].Value == e.Values[0] { - return &RNode{value: rn.Content()[i]}, nil - } - } - if e.Create != nil { - return rn.Pipe(Append(e.Create.YNode())) - } - return nil, nil - } - - // SequenceNode Content is a slice of MappingNodes. Each MappingNode has Content - // with a slice of key-value pairs containing the fields. - for i := range rn.Content() { - // cast the entry to a RNode so we can operate on it - elem := NewRNode(rn.Content()[i]) - var field *RNode - var err error - - // only check mapping node - if err = ErrorIfInvalid(elem, yaml.MappingNode); err != nil { - continue - } - - if !e.MatchAnyValue && len(e.Keys) != len(e.Values) { - return nil, fmt.Errorf("length of keys must equal length of values when MatchAnyValue is false") - } - - matchesElement := true - for i := range e.Keys { - if e.MatchAnyValue { - field, err = elem.Pipe(Get(e.Keys[i])) - } else { - field, err = elem.Pipe(MatchField(e.Keys[i], e.Values[i])) - } - if !IsFoundOrError(field, err) { - // this is not the element we are looking for - matchesElement = false - break - } - } - if matchesElement { - return elem, err - } - } - - // create the element - if e.Create != nil { - return rn.Pipe(Append(e.Create.YNode())) - } - - return nil, nil -} - -func Get(name string) FieldMatcher { - return FieldMatcher{Name: name} -} - -func MatchField(name, value string) FieldMatcher { - return FieldMatcher{Name: name, Value: NewScalarRNode(value)} -} - -func Match(value string) FieldMatcher { - return FieldMatcher{Value: NewScalarRNode(value)} -} - -// FieldMatcher returns the value of a named field or map entry. -type FieldMatcher struct { - Kind string `yaml:"kind,omitempty"` - - // Name of the field to return - Name string `yaml:"name,omitempty"` - - // YNode of the field to return. - // Optional. Will only need to match field name if unset. - Value *RNode `yaml:"value,omitempty"` - - StringValue string `yaml:"stringValue,omitempty"` - - StringRegexValue string `yaml:"stringRegexValue,omitempty"` - - // Create will cause the field to be created with this value - // if it is set. - Create *RNode `yaml:"create,omitempty"` -} - -func (f FieldMatcher) Filter(rn *RNode) (*RNode, error) { - if f.StringValue != "" && f.Value == nil { - f.Value = NewScalarRNode(f.StringValue) - } - - // never match nil or null fields - if IsMissingOrNull(rn) { - return nil, nil - } - - if f.Name == "" { - if err := ErrorIfInvalid(rn, yaml.ScalarNode); err != nil { - return nil, err - } - switch { - case f.StringRegexValue != "": - // TODO(pwittrock): pre-compile this when unmarshalling and cache to a field - rg, err := regexp.Compile(f.StringRegexValue) - if err != nil { - return nil, err - } - if match := rg.MatchString(rn.value.Value); match { - return rn, nil - } - return nil, nil - case GetValue(rn) == GetValue(f.Value): - return rn, nil - default: - return nil, nil - } - } - - if err := ErrorIfInvalid(rn, yaml.MappingNode); err != nil { - return nil, err - } - - for i := 0; i < len(rn.Content()); i = IncrementFieldIndex(i) { - isMatchingField := rn.Content()[i].Value == f.Name - if isMatchingField { - requireMatchFieldValue := f.Value != nil - if !requireMatchFieldValue || rn.Content()[i+1].Value == f.Value.YNode().Value { - return NewRNode(rn.Content()[i+1]), nil - } - } - } - - if f.Create != nil { - return rn.Pipe(SetField(f.Name, f.Create)) - } - - return nil, nil -} - -// Lookup returns a PathGetter to lookup a field by its path. -func Lookup(path ...string) PathGetter { - return PathGetter{Path: path} -} - -// LookupCreate returns a PathGetter to lookup a field by its path and create it if it doesn't already -// exist. -func LookupCreate(kind yaml.Kind, path ...string) PathGetter { - return PathGetter{Path: path, Create: kind} -} - -// ConventionalContainerPaths is a list of paths at which containers typically appear in workload APIs. -// It is intended for use with LookupFirstMatch. -var ConventionalContainerPaths = [][]string{ - // e.g. Deployment, ReplicaSet, DaemonSet, Job, StatefulSet - {"spec", "template", "spec", "containers"}, - // e.g. CronJob - {"spec", "jobTemplate", "spec", "template", "spec", "containers"}, - // e.g. Pod - {"spec", "containers"}, - // e.g. PodTemplate - {"template", "spec", "containers"}, -} - -// LookupFirstMatch returns a Filter for locating a value that may exist at one of several possible paths. -// For example, it can be used with ConventionalContainerPaths to find the containers field in a standard workload resource. -// If more than one of the paths exists in the resource, the first will be returned. If none exist, -// nil will be returned. If an error is encountered during lookup, it will be returned. -func LookupFirstMatch(paths [][]string) Filter { - return FilterFunc(func(object *RNode) (*RNode, error) { - var result *RNode - var err error - for _, path := range paths { - result, err = object.Pipe(PathGetter{Path: path}) - if err != nil { - return nil, errors.Wrap(err) - } - if result != nil { - return result, nil - } - } - return nil, nil - }) -} - -// PathGetter returns the RNode under Path. -type PathGetter struct { - Kind string `yaml:"kind,omitempty"` - - // Path is a slice of parts leading to the RNode to lookup. - // Each path part may be one of: - // * FieldMatcher -- e.g. "spec" - // * Map Key -- e.g. "app.k8s.io/version" - // * List Entry -- e.g. "[name=nginx]" or "[=-jar]" or "0" or "-" - // - // Map Keys and Fields are equivalent. - // See FieldMatcher for more on Fields and Map Keys. - // - // List Entries can be specified as map entry to match [fieldName=fieldValue] - // or a positional index like 0 to get the element. - (unquoted hyphen) is - // special and means the last element. - // - // See Elem for more on List Entries. - // - // Examples: - // * spec.template.spec.container with matching name: [name=nginx] - // * spec.template.spec.container.argument matching a value: [=-jar] - Path []string `yaml:"path,omitempty"` - - // Create will cause missing path parts to be created as they are walked. - // - // * The leaf Node (final path) will be created with a Kind matching Create - // * Intermediary Nodes will be created as either a MappingNodes or - // SequenceNodes as appropriate for each's Path location. - // * If a list item is specified by a index (an offset or "-"), this item will - // not be created even Create is set. - Create yaml.Kind `yaml:"create,omitempty"` - - // Style is the style to apply to created value Nodes. - // Created key Nodes keep an unspecified Style. - Style yaml.Style `yaml:"style,omitempty"` -} - -func (l PathGetter) Filter(rn *RNode) (*RNode, error) { - var err error - fieldPath := append([]string{}, rn.FieldPath()...) - match := rn - - // iterate over path until encountering an error or missing value - l.Path = cleanPath(l.Path) - for i := range l.Path { - var part, nextPart string - part = l.Path[i] - if len(l.Path) > i+1 { - nextPart = l.Path[i+1] - } - var fltr Filter - fltr, err = l.getFilter(part, nextPart, &fieldPath) - if err != nil { - return nil, err - } - match, err = match.Pipe(fltr) - if IsMissingOrError(match, err) { - return nil, err - } - match.AppendToFieldPath(fieldPath...) - } - return match, nil -} - -func (l PathGetter) getFilter(part, nextPart string, fieldPath *[]string) (Filter, error) { - idx, err := strconv.Atoi(part) - switch { - case err == nil: - // part is a number - if idx < 0 { - return nil, fmt.Errorf("array index %d cannot be negative", idx) - } - return GetElementByIndex(idx), nil - case part == "-": - // part is a hyphen - return GetElementByIndex(-1), nil - case part == "*": - // PathGetter is not support for wildcard matching - return nil, errors.Errorf("wildcard is not supported in PathGetter") - case IsListIndex(part): - // part is surrounded by brackets - return l.elemFilter(part) - default: - // mapping node - *fieldPath = append(*fieldPath, part) - return l.fieldFilter(part, l.getKind(nextPart)) - } -} - -func (l PathGetter) elemFilter(part string) (Filter, error) { - name, value, err := SplitIndexNameValue(part) - if err != nil { - return nil, errors.Wrap(err) - } - if !IsCreate(l.Create) { - return MatchElement(name, value), nil - } - - var elem *RNode - primitiveElement := len(name) == 0 - if primitiveElement { - // append a ScalarNode - elem = NewScalarRNode(value) - elem.YNode().Style = l.Style - } else { - // append a MappingNode - match := NewRNode(&yaml.Node{Kind: yaml.ScalarNode, Value: value, Style: l.Style}) - elem = NewRNode(&yaml.Node{ - Kind: yaml.MappingNode, - Content: []*yaml.Node{{Kind: yaml.ScalarNode, Value: name}, match.YNode()}, - Style: l.Style, - }) - } - // Append the Node - return ElementMatcher{Keys: []string{name}, Values: []string{value}, Create: elem}, nil -} - -func (l PathGetter) fieldFilter( - name string, kind yaml.Kind) (Filter, error) { - if !IsCreate(l.Create) { - return Get(name), nil - } - return FieldMatcher{Name: name, Create: &RNode{value: &yaml.Node{Kind: kind, Style: l.Style}}}, nil -} - -func (l PathGetter) getKind(nextPart string) yaml.Kind { - if IsListIndex(nextPart) { - // if nextPart is of the form [a=b], then it is an index into a Sequence - // so the current part must be a SequenceNode - return yaml.SequenceNode - } - if nextPart == "" { - // final name in the path, use the l.Create defined Kind - return l.Create - } - - // non-sequence intermediate Node - return yaml.MappingNode -} - -func SetField(name string, value *RNode) FieldSetter { - return FieldSetter{Name: name, Value: value} -} - -func Set(value *RNode) FieldSetter { - return FieldSetter{Value: value} -} - -// MapEntrySetter sets a map entry to a value. If it finds a key with the same -// value, it will override both Key and Value RNodes, including style and any -// other metadata. If it doesn't find the key, it will insert a new map entry. -// It will set the field, even if it's empty or nil, unlike the FieldSetter. -// This is useful for rebuilding some pre-existing RNode structure. -type MapEntrySetter struct { - // Name is the name of the field or key to lookup in a MappingNode. - // If Name is unspecified, it will use the Key's Value - Name string `yaml:"name,omitempty"` - - // Value is the value to set. - Value *RNode `yaml:"value,omitempty"` - - // Key is the map key to set. - Key *RNode `yaml:"key,omitempty"` -} - -func (s MapEntrySetter) Filter(rn *RNode) (*RNode, error) { - if rn == nil { - return nil, errors.Errorf("Can't set map entry on a nil RNode") - } - if err := ErrorIfInvalid(rn, yaml.MappingNode); err != nil { - return nil, err - } - if s.Name == "" { - s.Name = GetValue(s.Key) - } - for i := 0; i < len(rn.Content()); i = IncrementFieldIndex(i) { - isMatchingField := rn.Content()[i].Value == s.Name - if isMatchingField { - rn.Content()[i] = s.Key.YNode() - rn.Content()[i+1] = s.Value.YNode() - return rn, nil - } - } - - // create the field - rn.YNode().Content = append( - rn.YNode().Content, - s.Key.YNode(), - s.Value.YNode()) - return rn, nil -} - -// FieldSetter sets a field or map entry to a value. -type FieldSetter struct { - Kind string `yaml:"kind,omitempty"` - - // Name is the name of the field or key to lookup in a MappingNode. - // If Name is unspecified, and the input is a ScalarNode, FieldSetter will set the - // value on the ScalarNode. - Name string `yaml:"name,omitempty"` - - // Comments for the field - Comments Comments `yaml:"comments,omitempty"` - - // Value is the value to set. - // Optional if Kind is set. - Value *RNode `yaml:"value,omitempty"` - - StringValue string `yaml:"stringValue,omitempty"` - - // OverrideStyle can be set to override the style of the existing node - // when setting it. Otherwise, if an existing node is found, the style is - // retained. - OverrideStyle bool `yaml:"overrideStyle,omitempty"` -} - -func (s FieldSetter) Filter(rn *RNode) (*RNode, error) { - if s.StringValue != "" && s.Value == nil { - s.Value = NewScalarRNode(s.StringValue) - } - - // need to set style for strings not recognized by yaml 1.1 to quoted if not previously set - // TODO: fix in upstream yaml library so this can be handled with yaml SetString - if s.Value.IsStringValue() && !s.OverrideStyle && s.Value.YNode().Style == 0 && IsYaml1_1NonString(s.Value.YNode()) { - s.Value.YNode().Style = yaml.DoubleQuotedStyle - } - - if s.Name == "" { - if err := ErrorIfInvalid(rn, yaml.ScalarNode); err != nil { - return rn, err - } - if IsMissingOrNull(s.Value) { - return rn, nil - } - // only apply the style if there is not an existing style - // or we want to override it - if !s.OverrideStyle || s.Value.YNode().Style == 0 { - // keep the original style if it exists - s.Value.YNode().Style = rn.YNode().Style - } - rn.SetYNode(s.Value.YNode()) - return rn, nil - } - - // Clear the field if it is empty, or explicitly null - if s.Value == nil || s.Value.IsTaggedNull() { - return rn.Pipe(Clear(s.Name)) - } - - field, err := rn.Pipe(FieldMatcher{Name: s.Name}) - if err != nil { - return nil, err - } - if field != nil { - // only apply the style if there is not an existing style - // or we want to override it - if !s.OverrideStyle || field.YNode().Style == 0 { - // keep the original style if it exists - s.Value.YNode().Style = field.YNode().Style - } - // need to def ref the Node since field is ephemeral - field.SetYNode(s.Value.YNode()) - return field, nil - } - - // create the field - rn.YNode().Content = append( - rn.YNode().Content, - &yaml.Node{ - Kind: yaml.ScalarNode, - Value: s.Name, - HeadComment: s.Comments.HeadComment, - LineComment: s.Comments.LineComment, - FootComment: s.Comments.FootComment, - }, - s.Value.YNode()) - return s.Value, nil -} - -// Tee calls the provided Filters, and returns its argument rather than the result -// of the filters. -// May be used to fork sub-filters from a call. -// e.g. locate field, set value; locate another field, set another value -func Tee(filters ...Filter) Filter { - return TeePiper{Filters: filters} -} - -// TeePiper Calls a slice of Filters and returns its input. -// May be used to fork sub-filters from a call. -// e.g. locate field, set value; locate another field, set another value -type TeePiper struct { - Kind string `yaml:"kind,omitempty"` - - // Filters are the set of Filters run by TeePiper. - Filters []Filter `yaml:"filters,omitempty"` -} - -func (t TeePiper) Filter(rn *RNode) (*RNode, error) { - _, err := rn.Pipe(t.Filters...) - return rn, err -} - -// IsCreate returns true if kind is specified -func IsCreate(kind yaml.Kind) bool { - return kind != 0 -} - -// IsMissingOrError returns true if rn is NOT found or err is non-nil -func IsMissingOrError(rn *RNode, err error) bool { - return rn == nil || err != nil -} - -// IsFoundOrError returns true if rn is found or err is non-nil -func IsFoundOrError(rn *RNode, err error) bool { - return rn != nil || err != nil -} - -func ErrorIfAnyInvalidAndNonNull(kind yaml.Kind, rn ...*RNode) error { - for i := range rn { - if IsMissingOrNull(rn[i]) { - continue - } - if err := ErrorIfInvalid(rn[i], kind); err != nil { - return err - } - } - return nil -} - -var nodeTypeIndex = map[yaml.Kind]string{ - yaml.SequenceNode: "SequenceNode", - yaml.MappingNode: "MappingNode", - yaml.ScalarNode: "ScalarNode", - yaml.DocumentNode: "DocumentNode", - yaml.AliasNode: "AliasNode", -} - -func ErrorIfInvalid(rn *RNode, kind yaml.Kind) error { - if IsMissingOrNull(rn) { - // node has no type, pass validation - return nil - } - - if rn.YNode().Kind != kind { - s, _ := rn.String() - return errors.Errorf( - "wrong Node Kind for %s expected: %v was %v: value: {%s}", - strings.Join(rn.FieldPath(), "."), - nodeTypeIndex[kind], nodeTypeIndex[rn.YNode().Kind], strings.TrimSpace(s)) - } - - if kind == yaml.MappingNode { - if len(rn.YNode().Content)%2 != 0 { - return errors.Errorf( - "yaml MappingNodes must have even length contents: %v", spew.Sdump(rn)) - } - } - - return nil -} - -// IsListIndex returns true if p is an index into a Val. -// e.g. [fieldName=fieldValue] -// e.g. [=primitiveValue] -func IsListIndex(p string) bool { - return strings.HasPrefix(p, "[") && strings.HasSuffix(p, "]") -} - -// IsIdxNumber returns true if p is an index number. -// e.g. 1 -func IsIdxNumber(p string) bool { - idx, err := strconv.Atoi(p) - return err == nil && idx >= 0 -} - -// IsWildcard returns true if p is matching every elements. -// e.g. "*" -func IsWildcard(p string) bool { - return p == "*" -} - -// SplitIndexNameValue splits a lookup part Val index into the field name -// and field value to match. -// e.g. splits [name=nginx] into (name, nginx) -// e.g. splits [=-jar] into ("", -jar) -func SplitIndexNameValue(p string) (string, string, error) { - elem := strings.TrimSuffix(p, "]") - elem = strings.TrimPrefix(elem, "[") - parts := strings.SplitN(elem, "=", 2) - if len(parts) == 1 { - return "", "", fmt.Errorf("list path element must contain fieldName=fieldValue for element to match") - } - return parts[0], parts[1], nil -} - -// IncrementFieldIndex increments i to point to the next field name element in -// a slice of Contents. -func IncrementFieldIndex(i int) int { - return i + 2 -} diff --git a/src/vendor/sigs.k8s.io/kustomize/kyaml/yaml/internal/k8sgen/pkg/labels/copied.deepcopy.go b/src/vendor/sigs.k8s.io/kustomize/kyaml/yaml/internal/k8sgen/pkg/labels/copied.deepcopy.go deleted file mode 100644 index 52f32be88..000000000 --- a/src/vendor/sigs.k8s.io/kustomize/kyaml/yaml/internal/k8sgen/pkg/labels/copied.deepcopy.go +++ /dev/null @@ -1,44 +0,0 @@ -// Code generated by k8scopy from k8s.io/apimachinery@v0.19.8; DO NOT EDIT. -// File content copied from k8s.io/apimachinery@v0.19.8/pkg/labels/zz_generated.deepcopy.go - -//go:build !ignore_autogenerated -// +build !ignore_autogenerated - -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package labels - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Requirement) DeepCopyInto(out *Requirement) { - *out = *in - if in.strValues != nil { - in, out := &in.strValues, &out.strValues - *out = make([]string, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Requirement. -func (in *Requirement) DeepCopy() *Requirement { - if in == nil { - return nil - } - out := new(Requirement) - in.DeepCopyInto(out) - return out -} diff --git a/src/vendor/sigs.k8s.io/kustomize/kyaml/yaml/internal/k8sgen/pkg/labels/labels.go b/src/vendor/sigs.k8s.io/kustomize/kyaml/yaml/internal/k8sgen/pkg/labels/labels.go deleted file mode 100644 index 300014eac..000000000 --- a/src/vendor/sigs.k8s.io/kustomize/kyaml/yaml/internal/k8sgen/pkg/labels/labels.go +++ /dev/null @@ -1,192 +0,0 @@ -// Code generated by k8scopy from k8s.io/apimachinery@v0.19.8; DO NOT EDIT. -// File content copied from k8s.io/apimachinery@v0.19.8/pkg/labels/labels.go - -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package labels - -import ( - "fmt" - "sort" - "strings" -) - -// Labels allows you to present labels independently from their storage. -type Labels interface { - // Has returns whether the provided label exists. - Has(label string) (exists bool) - - // Get returns the value for the provided label. - Get(label string) (value string) -} - -// Set is a map of label:value. It implements Labels. -type Set map[string]string - -// String returns all labels listed as a human readable string. -// Conveniently, exactly the format that ParseSelector takes. -func (ls Set) String() string { - selector := make([]string, 0, len(ls)) - for key, value := range ls { - selector = append(selector, key+"="+value) - } - // Sort for determinism. - sort.StringSlice(selector).Sort() - return strings.Join(selector, ",") -} - -// Has returns whether the provided label exists in the map. -func (ls Set) Has(label string) bool { - _, exists := ls[label] - return exists -} - -// Get returns the value in the map for the provided label. -func (ls Set) Get(label string) string { - return ls[label] -} - -// AsSelector converts labels into a selectors. It does not -// perform any validation, which means the server will reject -// the request if the Set contains invalid values. -func (ls Set) AsSelector() Selector { - return SelectorFromSet(ls) -} - -// AsValidatedSelector converts labels into a selectors. -// The Set is validated client-side, which allows to catch errors early. -func (ls Set) AsValidatedSelector() (Selector, error) { - return ValidatedSelectorFromSet(ls) -} - -// AsSelectorPreValidated converts labels into a selector, but -// assumes that labels are already validated and thus doesn't -// perform any validation. -// According to our measurements this is significantly faster -// in codepaths that matter at high scale. -func (ls Set) AsSelectorPreValidated() Selector { - return SelectorFromValidatedSet(ls) -} - -// FormatLabels convert label map into plain string -func FormatLabels(labelMap map[string]string) string { - l := Set(labelMap).String() - if l == "" { - l = "" - } - return l -} - -// Conflicts takes 2 maps and returns true if there a key match between -// the maps but the value doesn't match, and returns false in other cases -func Conflicts(labels1, labels2 Set) bool { - small := labels1 - big := labels2 - if len(labels2) < len(labels1) { - small = labels2 - big = labels1 - } - - for k, v := range small { - if val, match := big[k]; match { - if val != v { - return true - } - } - } - - return false -} - -// Merge combines given maps, and does not check for any conflicts -// between the maps. In case of conflicts, second map (labels2) wins -func Merge(labels1, labels2 Set) Set { - mergedMap := Set{} - - for k, v := range labels1 { - mergedMap[k] = v - } - for k, v := range labels2 { - mergedMap[k] = v - } - return mergedMap -} - -// Equals returns true if the given maps are equal -func Equals(labels1, labels2 Set) bool { - if len(labels1) != len(labels2) { - return false - } - - for k, v := range labels1 { - value, ok := labels2[k] - if !ok { - return false - } - if value != v { - return false - } - } - return true -} - -// AreLabelsInWhiteList verifies if the provided label list -// is in the provided whitelist and returns true, otherwise false. -func AreLabelsInWhiteList(labels, whitelist Set) bool { - if len(whitelist) == 0 { - return true - } - - for k, v := range labels { - value, ok := whitelist[k] - if !ok { - return false - } - if value != v { - return false - } - } - return true -} - -// ConvertSelectorToLabelsMap converts selector string to labels map -// and validates keys and values -func ConvertSelectorToLabelsMap(selector string) (Set, error) { - labelsMap := Set{} - - if len(selector) == 0 { - return labelsMap, nil - } - - labels := strings.Split(selector, ",") - for _, label := range labels { - l := strings.Split(label, "=") - if len(l) != 2 { - return labelsMap, fmt.Errorf("invalid selector: %s", l) - } - key := strings.TrimSpace(l[0]) - if err := validateLabelKey(key); err != nil { - return labelsMap, err - } - value := strings.TrimSpace(l[1]) - if err := validateLabelValue(key, value); err != nil { - return labelsMap, err - } - labelsMap[key] = value - } - return labelsMap, nil -} diff --git a/src/vendor/sigs.k8s.io/kustomize/kyaml/yaml/internal/k8sgen/pkg/labels/selector.go b/src/vendor/sigs.k8s.io/kustomize/kyaml/yaml/internal/k8sgen/pkg/labels/selector.go deleted file mode 100644 index 73c5ae6a6..000000000 --- a/src/vendor/sigs.k8s.io/kustomize/kyaml/yaml/internal/k8sgen/pkg/labels/selector.go +++ /dev/null @@ -1,925 +0,0 @@ -// Code generated by k8scopy from k8s.io/apimachinery@v0.19.8; DO NOT EDIT. -// File content copied from k8s.io/apimachinery@v0.19.8/pkg/labels/selector.go - -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package labels - -import ( - "bytes" - "fmt" - "sort" - "strconv" - "strings" - - "log" - "sigs.k8s.io/kustomize/kyaml/yaml/internal/k8sgen/pkg/selection" - "sigs.k8s.io/kustomize/kyaml/yaml/internal/k8sgen/pkg/util/sets" - "sigs.k8s.io/kustomize/kyaml/yaml/internal/k8sgen/pkg/util/validation" -) - -// Requirements is AND of all requirements. -type Requirements []Requirement - -// Selector represents a label selector. -type Selector interface { - // Matches returns true if this selector matches the given set of labels. - Matches(Labels) bool - - // Empty returns true if this selector does not restrict the selection space. - Empty() bool - - // String returns a human readable string that represents this selector. - String() string - - // Add adds requirements to the Selector - Add(r ...Requirement) Selector - - // Requirements converts this interface into Requirements to expose - // more detailed selection information. - // If there are querying parameters, it will return converted requirements and selectable=true. - // If this selector doesn't want to select anything, it will return selectable=false. - Requirements() (requirements Requirements, selectable bool) - - // Make a deep copy of the selector. - DeepCopySelector() Selector - - // RequiresExactMatch allows a caller to introspect whether a given selector - // requires a single specific label to be set, and if so returns the value it - // requires. - RequiresExactMatch(label string) (value string, found bool) -} - -// Everything returns a selector that matches all labels. -func Everything() Selector { - return internalSelector{} -} - -type nothingSelector struct{} - -func (n nothingSelector) Matches(_ Labels) bool { return false } -func (n nothingSelector) Empty() bool { return false } -func (n nothingSelector) String() string { return "" } -func (n nothingSelector) Add(_ ...Requirement) Selector { return n } -func (n nothingSelector) Requirements() (Requirements, bool) { return nil, false } -func (n nothingSelector) DeepCopySelector() Selector { return n } -func (n nothingSelector) RequiresExactMatch(label string) (value string, found bool) { - return "", false -} - -// Nothing returns a selector that matches no labels -func Nothing() Selector { - return nothingSelector{} -} - -// NewSelector returns a nil selector -func NewSelector() Selector { - return internalSelector(nil) -} - -type internalSelector []Requirement - -func (s internalSelector) DeepCopy() internalSelector { - if s == nil { - return nil - } - result := make([]Requirement, len(s)) - for i := range s { - s[i].DeepCopyInto(&result[i]) - } - return result -} - -func (s internalSelector) DeepCopySelector() Selector { - return s.DeepCopy() -} - -// ByKey sorts requirements by key to obtain deterministic parser -type ByKey []Requirement - -func (a ByKey) Len() int { return len(a) } - -func (a ByKey) Swap(i, j int) { a[i], a[j] = a[j], a[i] } - -func (a ByKey) Less(i, j int) bool { return a[i].key < a[j].key } - -// Requirement contains values, a key, and an operator that relates the key and values. -// The zero value of Requirement is invalid. -// Requirement implements both set based match and exact match -// Requirement should be initialized via NewRequirement constructor for creating a valid Requirement. -type Requirement struct { - key string - operator selection.Operator - // In huge majority of cases we have at most one value here. - // It is generally faster to operate on a single-element slice - // than on a single-element map, so we have a slice here. - strValues []string -} - -// NewRequirement is the constructor for a Requirement. -// If any of these rules is violated, an error is returned: -// (1) The operator can only be In, NotIn, Equals, DoubleEquals, NotEquals, Exists, or DoesNotExist. -// (2) If the operator is In or NotIn, the values set must be non-empty. -// (3) If the operator is Equals, DoubleEquals, or NotEquals, the values set must contain one value. -// (4) If the operator is Exists or DoesNotExist, the value set must be empty. -// (5) If the operator is Gt or Lt, the values set must contain only one value, which will be interpreted as an integer. -// (6) The key is invalid due to its length, or sequence -// of characters. See validateLabelKey for more details. -// -// The empty string is a valid value in the input values set. -func NewRequirement(key string, op selection.Operator, vals []string) (*Requirement, error) { - if err := validateLabelKey(key); err != nil { - return nil, err - } - switch op { - case selection.In, selection.NotIn: - if len(vals) == 0 { - return nil, fmt.Errorf("for 'in', 'notin' operators, values set can't be empty") - } - case selection.Equals, selection.DoubleEquals, selection.NotEquals: - if len(vals) != 1 { - return nil, fmt.Errorf("exact-match compatibility requires one single value") - } - case selection.Exists, selection.DoesNotExist: - if len(vals) != 0 { - return nil, fmt.Errorf("values set must be empty for exists and does not exist") - } - case selection.GreaterThan, selection.LessThan: - if len(vals) != 1 { - return nil, fmt.Errorf("for 'Gt', 'Lt' operators, exactly one value is required") - } - for i := range vals { - if _, err := strconv.ParseInt(vals[i], 10, 64); err != nil { - return nil, fmt.Errorf("for 'Gt', 'Lt' operators, the value must be an integer") - } - } - default: - return nil, fmt.Errorf("operator '%v' is not recognized", op) - } - - for i := range vals { - if err := validateLabelValue(key, vals[i]); err != nil { - return nil, err - } - } - return &Requirement{key: key, operator: op, strValues: vals}, nil -} - -func (r *Requirement) hasValue(value string) bool { - for i := range r.strValues { - if r.strValues[i] == value { - return true - } - } - return false -} - -// Matches returns true if the Requirement matches the input Labels. -// There is a match in the following cases: -// (1) The operator is Exists and Labels has the Requirement's key. -// (2) The operator is In, Labels has the Requirement's key and Labels' -// value for that key is in Requirement's value set. -// (3) The operator is NotIn, Labels has the Requirement's key and -// Labels' value for that key is not in Requirement's value set. -// (4) The operator is DoesNotExist or NotIn and Labels does not have the -// Requirement's key. -// (5) The operator is GreaterThanOperator or LessThanOperator, and Labels has -// the Requirement's key and the corresponding value satisfies mathematical inequality. -func (r *Requirement) Matches(ls Labels) bool { - switch r.operator { - case selection.In, selection.Equals, selection.DoubleEquals: - if !ls.Has(r.key) { - return false - } - return r.hasValue(ls.Get(r.key)) - case selection.NotIn, selection.NotEquals: - if !ls.Has(r.key) { - return true - } - return !r.hasValue(ls.Get(r.key)) - case selection.Exists: - return ls.Has(r.key) - case selection.DoesNotExist: - return !ls.Has(r.key) - case selection.GreaterThan, selection.LessThan: - if !ls.Has(r.key) { - return false - } - lsValue, err := strconv.ParseInt(ls.Get(r.key), 10, 64) - if err != nil { - log.Printf("ParseInt failed for value %+v in label %+v, %+v", ls.Get(r.key), ls, err) - return false - } - - // There should be only one strValue in r.strValues, and can be converted to an integer. - if len(r.strValues) != 1 { - log.Printf("Invalid values count %+v of requirement %#v, for 'Gt', 'Lt' operators, exactly one value is required", len(r.strValues), r) - return false - } - - var rValue int64 - for i := range r.strValues { - rValue, err = strconv.ParseInt(r.strValues[i], 10, 64) - if err != nil { - log.Printf("ParseInt failed for value %+v in requirement %#v, for 'Gt', 'Lt' operators, the value must be an integer", r.strValues[i], r) - return false - } - } - return (r.operator == selection.GreaterThan && lsValue > rValue) || (r.operator == selection.LessThan && lsValue < rValue) - default: - return false - } -} - -// Key returns requirement key -func (r *Requirement) Key() string { - return r.key -} - -// Operator returns requirement operator -func (r *Requirement) Operator() selection.Operator { - return r.operator -} - -// Values returns requirement values -func (r *Requirement) Values() sets.String { - ret := sets.String{} - for i := range r.strValues { - ret.Insert(r.strValues[i]) - } - return ret -} - -// Empty returns true if the internalSelector doesn't restrict selection space -func (lsel internalSelector) Empty() bool { - if lsel == nil { - return true - } - return len(lsel) == 0 -} - -// String returns a human-readable string that represents this -// Requirement. If called on an invalid Requirement, an error is -// returned. See NewRequirement for creating a valid Requirement. -func (r *Requirement) String() string { - var buffer bytes.Buffer - if r.operator == selection.DoesNotExist { - buffer.WriteString("!") - } - buffer.WriteString(r.key) - - switch r.operator { - case selection.Equals: - buffer.WriteString("=") - case selection.DoubleEquals: - buffer.WriteString("==") - case selection.NotEquals: - buffer.WriteString("!=") - case selection.In: - buffer.WriteString(" in ") - case selection.NotIn: - buffer.WriteString(" notin ") - case selection.GreaterThan: - buffer.WriteString(">") - case selection.LessThan: - buffer.WriteString("<") - case selection.Exists, selection.DoesNotExist: - return buffer.String() - } - - switch r.operator { - case selection.In, selection.NotIn: - buffer.WriteString("(") - } - if len(r.strValues) == 1 { - buffer.WriteString(r.strValues[0]) - } else { // only > 1 since == 0 prohibited by NewRequirement - // normalizes value order on output, without mutating the in-memory selector representation - // also avoids normalization when it is not required, and ensures we do not mutate shared data - buffer.WriteString(strings.Join(safeSort(r.strValues), ",")) - } - - switch r.operator { - case selection.In, selection.NotIn: - buffer.WriteString(")") - } - return buffer.String() -} - -// safeSort sort input strings without modification -func safeSort(in []string) []string { - if sort.StringsAreSorted(in) { - return in - } - out := make([]string, len(in)) - copy(out, in) - sort.Strings(out) - return out -} - -// Add adds requirements to the selector. It copies the current selector returning a new one -func (lsel internalSelector) Add(reqs ...Requirement) Selector { - var sel internalSelector - for ix := range lsel { - sel = append(sel, lsel[ix]) - } - for _, r := range reqs { - sel = append(sel, r) - } - sort.Sort(ByKey(sel)) - return sel -} - -// Matches for a internalSelector returns true if all -// its Requirements match the input Labels. If any -// Requirement does not match, false is returned. -func (lsel internalSelector) Matches(l Labels) bool { - for ix := range lsel { - if matches := lsel[ix].Matches(l); !matches { - return false - } - } - return true -} - -func (lsel internalSelector) Requirements() (Requirements, bool) { return Requirements(lsel), true } - -// String returns a comma-separated string of all -// the internalSelector Requirements' human-readable strings. -func (lsel internalSelector) String() string { - var reqs []string - for ix := range lsel { - reqs = append(reqs, lsel[ix].String()) - } - return strings.Join(reqs, ",") -} - -// RequiresExactMatch introspect whether a given selector requires a single specific field -// to be set, and if so returns the value it requires. -func (lsel internalSelector) RequiresExactMatch(label string) (value string, found bool) { - for ix := range lsel { - if lsel[ix].key == label { - switch lsel[ix].operator { - case selection.Equals, selection.DoubleEquals, selection.In: - if len(lsel[ix].strValues) == 1 { - return lsel[ix].strValues[0], true - } - } - return "", false - } - } - return "", false -} - -// Token represents constant definition for lexer token -type Token int - -const ( - // ErrorToken represents scan error - ErrorToken Token = iota - // EndOfStringToken represents end of string - EndOfStringToken - // ClosedParToken represents close parenthesis - ClosedParToken - // CommaToken represents the comma - CommaToken - // DoesNotExistToken represents logic not - DoesNotExistToken - // DoubleEqualsToken represents double equals - DoubleEqualsToken - // EqualsToken represents equal - EqualsToken - // GreaterThanToken represents greater than - GreaterThanToken - // IdentifierToken represents identifier, e.g. keys and values - IdentifierToken - // InToken represents in - InToken - // LessThanToken represents less than - LessThanToken - // NotEqualsToken represents not equal - NotEqualsToken - // NotInToken represents not in - NotInToken - // OpenParToken represents open parenthesis - OpenParToken -) - -// string2token contains the mapping between lexer Token and token literal -// (except IdentifierToken, EndOfStringToken and ErrorToken since it makes no sense) -var string2token = map[string]Token{ - ")": ClosedParToken, - ",": CommaToken, - "!": DoesNotExistToken, - "==": DoubleEqualsToken, - "=": EqualsToken, - ">": GreaterThanToken, - "in": InToken, - "<": LessThanToken, - "!=": NotEqualsToken, - "notin": NotInToken, - "(": OpenParToken, -} - -// ScannedItem contains the Token and the literal produced by the lexer. -type ScannedItem struct { - tok Token - literal string -} - -// isWhitespace returns true if the rune is a space, tab, or newline. -func isWhitespace(ch byte) bool { - return ch == ' ' || ch == '\t' || ch == '\r' || ch == '\n' -} - -// isSpecialSymbol detect if the character ch can be an operator -func isSpecialSymbol(ch byte) bool { - switch ch { - case '=', '!', '(', ')', ',', '>', '<': - return true - } - return false -} - -// Lexer represents the Lexer struct for label selector. -// It contains necessary informationt to tokenize the input string -type Lexer struct { - // s stores the string to be tokenized - s string - // pos is the position currently tokenized - pos int -} - -// read return the character currently lexed -// increment the position and check the buffer overflow -func (l *Lexer) read() (b byte) { - b = 0 - if l.pos < len(l.s) { - b = l.s[l.pos] - l.pos++ - } - return b -} - -// unread 'undoes' the last read character -func (l *Lexer) unread() { - l.pos-- -} - -// scanIDOrKeyword scans string to recognize literal token (for example 'in') or an identifier. -func (l *Lexer) scanIDOrKeyword() (tok Token, lit string) { - var buffer []byte -IdentifierLoop: - for { - switch ch := l.read(); { - case ch == 0: - break IdentifierLoop - case isSpecialSymbol(ch) || isWhitespace(ch): - l.unread() - break IdentifierLoop - default: - buffer = append(buffer, ch) - } - } - s := string(buffer) - if val, ok := string2token[s]; ok { // is a literal token? - return val, s - } - return IdentifierToken, s // otherwise is an identifier -} - -// scanSpecialSymbol scans string starting with special symbol. -// special symbol identify non literal operators. "!=", "==", "=" -func (l *Lexer) scanSpecialSymbol() (Token, string) { - lastScannedItem := ScannedItem{} - var buffer []byte -SpecialSymbolLoop: - for { - switch ch := l.read(); { - case ch == 0: - break SpecialSymbolLoop - case isSpecialSymbol(ch): - buffer = append(buffer, ch) - if token, ok := string2token[string(buffer)]; ok { - lastScannedItem = ScannedItem{tok: token, literal: string(buffer)} - } else if lastScannedItem.tok != 0 { - l.unread() - break SpecialSymbolLoop - } - default: - l.unread() - break SpecialSymbolLoop - } - } - if lastScannedItem.tok == 0 { - return ErrorToken, fmt.Sprintf("error expected: keyword found '%s'", buffer) - } - return lastScannedItem.tok, lastScannedItem.literal -} - -// skipWhiteSpaces consumes all blank characters -// returning the first non blank character -func (l *Lexer) skipWhiteSpaces(ch byte) byte { - for { - if !isWhitespace(ch) { - return ch - } - ch = l.read() - } -} - -// Lex returns a pair of Token and the literal -// literal is meaningfull only for IdentifierToken token -func (l *Lexer) Lex() (tok Token, lit string) { - switch ch := l.skipWhiteSpaces(l.read()); { - case ch == 0: - return EndOfStringToken, "" - case isSpecialSymbol(ch): - l.unread() - return l.scanSpecialSymbol() - default: - l.unread() - return l.scanIDOrKeyword() - } -} - -// Parser data structure contains the label selector parser data structure -type Parser struct { - l *Lexer - scannedItems []ScannedItem - position int -} - -// ParserContext represents context during parsing: -// some literal for example 'in' and 'notin' can be -// recognized as operator for example 'x in (a)' but -// it can be recognized as value for example 'value in (in)' -type ParserContext int - -const ( - // KeyAndOperator represents key and operator - KeyAndOperator ParserContext = iota - // Values represents values - Values -) - -// lookahead func returns the current token and string. No increment of current position -func (p *Parser) lookahead(context ParserContext) (Token, string) { - tok, lit := p.scannedItems[p.position].tok, p.scannedItems[p.position].literal - if context == Values { - switch tok { - case InToken, NotInToken: - tok = IdentifierToken - } - } - return tok, lit -} - -// consume returns current token and string. Increments the position -func (p *Parser) consume(context ParserContext) (Token, string) { - p.position++ - tok, lit := p.scannedItems[p.position-1].tok, p.scannedItems[p.position-1].literal - if context == Values { - switch tok { - case InToken, NotInToken: - tok = IdentifierToken - } - } - return tok, lit -} - -// scan runs through the input string and stores the ScannedItem in an array -// Parser can now lookahead and consume the tokens -func (p *Parser) scan() { - for { - token, literal := p.l.Lex() - p.scannedItems = append(p.scannedItems, ScannedItem{token, literal}) - if token == EndOfStringToken { - break - } - } -} - -// parse runs the left recursive descending algorithm -// on input string. It returns a list of Requirement objects. -func (p *Parser) parse() (internalSelector, error) { - p.scan() // init scannedItems - - var requirements internalSelector - for { - tok, lit := p.lookahead(Values) - switch tok { - case IdentifierToken, DoesNotExistToken: - r, err := p.parseRequirement() - if err != nil { - return nil, fmt.Errorf("unable to parse requirement: %v", err) - } - requirements = append(requirements, *r) - t, l := p.consume(Values) - switch t { - case EndOfStringToken: - return requirements, nil - case CommaToken: - t2, l2 := p.lookahead(Values) - if t2 != IdentifierToken && t2 != DoesNotExistToken { - return nil, fmt.Errorf("found '%s', expected: identifier after ','", l2) - } - default: - return nil, fmt.Errorf("found '%s', expected: ',' or 'end of string'", l) - } - case EndOfStringToken: - return requirements, nil - default: - return nil, fmt.Errorf("found '%s', expected: !, identifier, or 'end of string'", lit) - } - } -} - -func (p *Parser) parseRequirement() (*Requirement, error) { - key, operator, err := p.parseKeyAndInferOperator() - if err != nil { - return nil, err - } - if operator == selection.Exists || operator == selection.DoesNotExist { // operator found lookahead set checked - return NewRequirement(key, operator, []string{}) - } - operator, err = p.parseOperator() - if err != nil { - return nil, err - } - var values sets.String - switch operator { - case selection.In, selection.NotIn: - values, err = p.parseValues() - case selection.Equals, selection.DoubleEquals, selection.NotEquals, selection.GreaterThan, selection.LessThan: - values, err = p.parseExactValue() - } - if err != nil { - return nil, err - } - return NewRequirement(key, operator, values.List()) - -} - -// parseKeyAndInferOperator parse literals. -// in case of no operator '!, in, notin, ==, =, !=' are found -// the 'exists' operator is inferred -func (p *Parser) parseKeyAndInferOperator() (string, selection.Operator, error) { - var operator selection.Operator - tok, literal := p.consume(Values) - if tok == DoesNotExistToken { - operator = selection.DoesNotExist - tok, literal = p.consume(Values) - } - if tok != IdentifierToken { - err := fmt.Errorf("found '%s', expected: identifier", literal) - return "", "", err - } - if err := validateLabelKey(literal); err != nil { - return "", "", err - } - if t, _ := p.lookahead(Values); t == EndOfStringToken || t == CommaToken { - if operator != selection.DoesNotExist { - operator = selection.Exists - } - } - return literal, operator, nil -} - -// parseOperator return operator and eventually matchType -// matchType can be exact -func (p *Parser) parseOperator() (op selection.Operator, err error) { - tok, lit := p.consume(KeyAndOperator) - switch tok { - // DoesNotExistToken shouldn't be here because it's a unary operator, not a binary operator - case InToken: - op = selection.In - case EqualsToken: - op = selection.Equals - case DoubleEqualsToken: - op = selection.DoubleEquals - case GreaterThanToken: - op = selection.GreaterThan - case LessThanToken: - op = selection.LessThan - case NotInToken: - op = selection.NotIn - case NotEqualsToken: - op = selection.NotEquals - default: - return "", fmt.Errorf("found '%s', expected: '=', '!=', '==', 'in', notin'", lit) - } - return op, nil -} - -// parseValues parses the values for set based matching (x,y,z) -func (p *Parser) parseValues() (sets.String, error) { - tok, lit := p.consume(Values) - if tok != OpenParToken { - return nil, fmt.Errorf("found '%s' expected: '('", lit) - } - tok, lit = p.lookahead(Values) - switch tok { - case IdentifierToken, CommaToken: - s, err := p.parseIdentifiersList() // handles general cases - if err != nil { - return s, err - } - if tok, _ = p.consume(Values); tok != ClosedParToken { - return nil, fmt.Errorf("found '%s', expected: ')'", lit) - } - return s, nil - case ClosedParToken: // handles "()" - p.consume(Values) - return sets.NewString(""), nil - default: - return nil, fmt.Errorf("found '%s', expected: ',', ')' or identifier", lit) - } -} - -// parseIdentifiersList parses a (possibly empty) list of -// of comma separated (possibly empty) identifiers -func (p *Parser) parseIdentifiersList() (sets.String, error) { - s := sets.NewString() - for { - tok, lit := p.consume(Values) - switch tok { - case IdentifierToken: - s.Insert(lit) - tok2, lit2 := p.lookahead(Values) - switch tok2 { - case CommaToken: - continue - case ClosedParToken: - return s, nil - default: - return nil, fmt.Errorf("found '%s', expected: ',' or ')'", lit2) - } - case CommaToken: // handled here since we can have "(," - if s.Len() == 0 { - s.Insert("") // to handle (, - } - tok2, _ := p.lookahead(Values) - if tok2 == ClosedParToken { - s.Insert("") // to handle ,) Double "" removed by StringSet - return s, nil - } - if tok2 == CommaToken { - p.consume(Values) - s.Insert("") // to handle ,, Double "" removed by StringSet - } - default: // it can be operator - return s, fmt.Errorf("found '%s', expected: ',', or identifier", lit) - } - } -} - -// parseExactValue parses the only value for exact match style -func (p *Parser) parseExactValue() (sets.String, error) { - s := sets.NewString() - tok, lit := p.lookahead(Values) - if tok == EndOfStringToken || tok == CommaToken { - s.Insert("") - return s, nil - } - tok, lit = p.consume(Values) - if tok == IdentifierToken { - s.Insert(lit) - return s, nil - } - return nil, fmt.Errorf("found '%s', expected: identifier", lit) -} - -// Parse takes a string representing a selector and returns a selector -// object, or an error. This parsing function differs from ParseSelector -// as they parse different selectors with different syntaxes. -// The input will cause an error if it does not follow this form: -// -// ::= | "," -// ::= [!] KEY [ | ] -// ::= "" | -// ::= | -// ::= "notin" -// ::= "in" -// ::= "(" ")" -// ::= VALUE | VALUE "," -// ::= ["="|"=="|"!="] VALUE -// -// KEY is a sequence of one or more characters following [ DNS_SUBDOMAIN "/" ] DNS_LABEL. Max length is 63 characters. -// VALUE is a sequence of zero or more characters "([A-Za-z0-9_-\.])". Max length is 63 characters. -// Delimiter is white space: (' ', '\t') -// Example of valid syntax: -// "x in (foo,,baz),y,z notin ()" -// -// Note: -// (1) Inclusion - " in " - denotes that the KEY exists and is equal to any of the -// VALUEs in its requirement -// (2) Exclusion - " notin " - denotes that the KEY is not equal to any -// of the VALUEs in its requirement or does not exist -// (3) The empty string is a valid VALUE -// (4) A requirement with just a KEY - as in "y" above - denotes that -// the KEY exists and can be any VALUE. -// (5) A requirement with just !KEY requires that the KEY not exist. -// -func Parse(selector string) (Selector, error) { - parsedSelector, err := parse(selector) - if err == nil { - return parsedSelector, nil - } - return nil, err -} - -// parse parses the string representation of the selector and returns the internalSelector struct. -// The callers of this method can then decide how to return the internalSelector struct to their -// callers. This function has two callers now, one returns a Selector interface and the other -// returns a list of requirements. -func parse(selector string) (internalSelector, error) { - p := &Parser{l: &Lexer{s: selector, pos: 0}} - items, err := p.parse() - if err != nil { - return nil, err - } - sort.Sort(ByKey(items)) // sort to grant determistic parsing - return internalSelector(items), err -} - -func validateLabelKey(k string) error { - if errs := validation.IsQualifiedName(k); len(errs) != 0 { - return fmt.Errorf("invalid label key %q: %s", k, strings.Join(errs, "; ")) - } - return nil -} - -func validateLabelValue(k, v string) error { - if errs := validation.IsValidLabelValue(v); len(errs) != 0 { - return fmt.Errorf("invalid label value: %q: at key: %q: %s", v, k, strings.Join(errs, "; ")) - } - return nil -} - -// SelectorFromSet returns a Selector which will match exactly the given Set. A -// nil and empty Sets are considered equivalent to Everything(). -// It does not perform any validation, which means the server will reject -// the request if the Set contains invalid values. -func SelectorFromSet(ls Set) Selector { - return SelectorFromValidatedSet(ls) -} - -// ValidatedSelectorFromSet returns a Selector which will match exactly the given Set. A -// nil and empty Sets are considered equivalent to Everything(). -// The Set is validated client-side, which allows to catch errors early. -func ValidatedSelectorFromSet(ls Set) (Selector, error) { - if ls == nil || len(ls) == 0 { - return internalSelector{}, nil - } - requirements := make([]Requirement, 0, len(ls)) - for label, value := range ls { - r, err := NewRequirement(label, selection.Equals, []string{value}) - if err != nil { - return nil, err - } - requirements = append(requirements, *r) - } - // sort to have deterministic string representation - sort.Sort(ByKey(requirements)) - return internalSelector(requirements), nil -} - -// SelectorFromValidatedSet returns a Selector which will match exactly the given Set. -// A nil and empty Sets are considered equivalent to Everything(). -// It assumes that Set is already validated and doesn't do any validation. -func SelectorFromValidatedSet(ls Set) Selector { - if ls == nil || len(ls) == 0 { - return internalSelector{} - } - requirements := make([]Requirement, 0, len(ls)) - for label, value := range ls { - requirements = append(requirements, Requirement{key: label, operator: selection.Equals, strValues: []string{value}}) - } - // sort to have deterministic string representation - sort.Sort(ByKey(requirements)) - return internalSelector(requirements) -} - -// ParseToRequirements takes a string representing a selector and returns a list of -// requirements. This function is suitable for those callers that perform additional -// processing on selector requirements. -// See the documentation for Parse() function for more details. -// TODO: Consider exporting the internalSelector type instead. -func ParseToRequirements(selector string) ([]Requirement, error) { - return parse(selector) -} diff --git a/src/vendor/sigs.k8s.io/kustomize/kyaml/yaml/internal/k8sgen/pkg/selection/operator.go b/src/vendor/sigs.k8s.io/kustomize/kyaml/yaml/internal/k8sgen/pkg/selection/operator.go deleted file mode 100644 index 29c443df4..000000000 --- a/src/vendor/sigs.k8s.io/kustomize/kyaml/yaml/internal/k8sgen/pkg/selection/operator.go +++ /dev/null @@ -1,36 +0,0 @@ -// Code generated by k8scopy from k8s.io/apimachinery@v0.19.8; DO NOT EDIT. -// File content copied from k8s.io/apimachinery@v0.19.8/pkg/selection/operator.go - -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package selection - -// Operator represents a key/field's relationship to value(s). -// See labels.Requirement and fields.Requirement for more details. -type Operator string - -const ( - DoesNotExist Operator = "!" - Equals Operator = "=" - DoubleEquals Operator = "==" - In Operator = "in" - NotEquals Operator = "!=" - NotIn Operator = "notin" - Exists Operator = "exists" - GreaterThan Operator = "gt" - LessThan Operator = "lt" -) diff --git a/src/vendor/sigs.k8s.io/kustomize/kyaml/yaml/internal/k8sgen/pkg/util/errors/errors.go b/src/vendor/sigs.k8s.io/kustomize/kyaml/yaml/internal/k8sgen/pkg/util/errors/errors.go deleted file mode 100644 index 24d040e06..000000000 --- a/src/vendor/sigs.k8s.io/kustomize/kyaml/yaml/internal/k8sgen/pkg/util/errors/errors.go +++ /dev/null @@ -1,252 +0,0 @@ -// Code generated by k8scopy from k8s.io/apimachinery@v0.19.8; DO NOT EDIT. -// File content copied from k8s.io/apimachinery@v0.19.8/pkg/util/errors/errors.go - -/* -Copyright 2015 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package errors - -import ( - "errors" - "fmt" - - "sigs.k8s.io/kustomize/kyaml/yaml/internal/k8sgen/pkg/util/sets" -) - -// MessageCountMap contains occurrence for each error message. -type MessageCountMap map[string]int - -// Aggregate represents an object that contains multiple errors, but does not -// necessarily have singular semantic meaning. -// The aggregate can be used with `errors.Is()` to check for the occurrence of -// a specific error type. -// Errors.As() is not supported, because the caller presumably cares about a -// specific error of potentially multiple that match the given type. -type Aggregate interface { - error - Errors() []error - Is(error) bool -} - -// NewAggregate converts a slice of errors into an Aggregate interface, which -// is itself an implementation of the error interface. If the slice is empty, -// this returns nil. -// It will check if any of the element of input error list is nil, to avoid -// nil pointer panic when call Error(). -func NewAggregate(errlist []error) Aggregate { - if len(errlist) == 0 { - return nil - } - // In case of input error list contains nil - var errs []error - for _, e := range errlist { - if e != nil { - errs = append(errs, e) - } - } - if len(errs) == 0 { - return nil - } - return aggregate(errs) -} - -// This helper implements the error and Errors interfaces. Keeping it private -// prevents people from making an aggregate of 0 errors, which is not -// an error, but does satisfy the error interface. -type aggregate []error - -// Error is part of the error interface. -func (agg aggregate) Error() string { - if len(agg) == 0 { - // This should never happen, really. - return "" - } - if len(agg) == 1 { - return agg[0].Error() - } - seenerrs := sets.NewString() - result := "" - agg.visit(func(err error) bool { - msg := err.Error() - if seenerrs.Has(msg) { - return false - } - seenerrs.Insert(msg) - if len(seenerrs) > 1 { - result += ", " - } - result += msg - return false - }) - if len(seenerrs) == 1 { - return result - } - return "[" + result + "]" -} - -func (agg aggregate) Is(target error) bool { - return agg.visit(func(err error) bool { - return errors.Is(err, target) - }) -} - -func (agg aggregate) visit(f func(err error) bool) bool { - for _, err := range agg { - switch err := err.(type) { - case aggregate: - if match := err.visit(f); match { - return match - } - case Aggregate: - for _, nestedErr := range err.Errors() { - if match := f(nestedErr); match { - return match - } - } - default: - if match := f(err); match { - return match - } - } - } - - return false -} - -// Errors is part of the Aggregate interface. -func (agg aggregate) Errors() []error { - return []error(agg) -} - -// Matcher is used to match errors. Returns true if the error matches. -type Matcher func(error) bool - -// FilterOut removes all errors that match any of the matchers from the input -// error. If the input is a singular error, only that error is tested. If the -// input implements the Aggregate interface, the list of errors will be -// processed recursively. -// -// This can be used, for example, to remove known-OK errors (such as io.EOF or -// os.PathNotFound) from a list of errors. -func FilterOut(err error, fns ...Matcher) error { - if err == nil { - return nil - } - if agg, ok := err.(Aggregate); ok { - return NewAggregate(filterErrors(agg.Errors(), fns...)) - } - if !matchesError(err, fns...) { - return err - } - return nil -} - -// matchesError returns true if any Matcher returns true -func matchesError(err error, fns ...Matcher) bool { - for _, fn := range fns { - if fn(err) { - return true - } - } - return false -} - -// filterErrors returns any errors (or nested errors, if the list contains -// nested Errors) for which all fns return false. If no errors -// remain a nil list is returned. The resulting silec will have all -// nested slices flattened as a side effect. -func filterErrors(list []error, fns ...Matcher) []error { - result := []error{} - for _, err := range list { - r := FilterOut(err, fns...) - if r != nil { - result = append(result, r) - } - } - return result -} - -// Flatten takes an Aggregate, which may hold other Aggregates in arbitrary -// nesting, and flattens them all into a single Aggregate, recursively. -func Flatten(agg Aggregate) Aggregate { - result := []error{} - if agg == nil { - return nil - } - for _, err := range agg.Errors() { - if a, ok := err.(Aggregate); ok { - r := Flatten(a) - if r != nil { - result = append(result, r.Errors()...) - } - } else { - if err != nil { - result = append(result, err) - } - } - } - return NewAggregate(result) -} - -// CreateAggregateFromMessageCountMap converts MessageCountMap Aggregate -func CreateAggregateFromMessageCountMap(m MessageCountMap) Aggregate { - if m == nil { - return nil - } - result := make([]error, 0, len(m)) - for errStr, count := range m { - var countStr string - if count > 1 { - countStr = fmt.Sprintf(" (repeated %v times)", count) - } - result = append(result, fmt.Errorf("%v%v", errStr, countStr)) - } - return NewAggregate(result) -} - -// Reduce will return err or, if err is an Aggregate and only has one item, -// the first item in the aggregate. -func Reduce(err error) error { - if agg, ok := err.(Aggregate); ok && err != nil { - switch len(agg.Errors()) { - case 1: - return agg.Errors()[0] - case 0: - return nil - } - } - return err -} - -// AggregateGoroutines runs the provided functions in parallel, stuffing all -// non-nil errors into the returned Aggregate. -// Returns nil if all the functions complete successfully. -func AggregateGoroutines(funcs ...func() error) Aggregate { - errChan := make(chan error, len(funcs)) - for _, f := range funcs { - go func(f func() error) { errChan <- f() }(f) - } - errs := make([]error, 0) - for i := 0; i < cap(errChan); i++ { - if err := <-errChan; err != nil { - errs = append(errs, err) - } - } - return NewAggregate(errs) -} - -// ErrPreconditionViolated is returned when the precondition is violated -var ErrPreconditionViolated = errors.New("precondition is violated") diff --git a/src/vendor/sigs.k8s.io/kustomize/kyaml/yaml/internal/k8sgen/pkg/util/sets/empty.go b/src/vendor/sigs.k8s.io/kustomize/kyaml/yaml/internal/k8sgen/pkg/util/sets/empty.go deleted file mode 100644 index ef404add1..000000000 --- a/src/vendor/sigs.k8s.io/kustomize/kyaml/yaml/internal/k8sgen/pkg/util/sets/empty.go +++ /dev/null @@ -1,24 +0,0 @@ -// Code generated by k8scopy from k8s.io/apimachinery@v0.19.8; DO NOT EDIT. -// File content copied from k8s.io/apimachinery@v0.19.8/pkg/util/sets/empty.go - -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package sets - -// Empty is public since it is used by some internal API objects for conversions between external -// string arrays and internal sets, and conversion logic requires public types today. -type Empty struct{} diff --git a/src/vendor/sigs.k8s.io/kustomize/kyaml/yaml/internal/k8sgen/pkg/util/sets/string.go b/src/vendor/sigs.k8s.io/kustomize/kyaml/yaml/internal/k8sgen/pkg/util/sets/string.go deleted file mode 100644 index 8af1bac2a..000000000 --- a/src/vendor/sigs.k8s.io/kustomize/kyaml/yaml/internal/k8sgen/pkg/util/sets/string.go +++ /dev/null @@ -1,206 +0,0 @@ -// Code generated by k8scopy from k8s.io/apimachinery@v0.19.8; DO NOT EDIT. -// File content copied from k8s.io/apimachinery@v0.19.8/pkg/util/sets/string.go - -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package sets - -import ( - "reflect" - "sort" -) - -// sets.String is a set of strings, implemented via map[string]struct{} for minimal memory consumption. -type String map[string]Empty - -// NewString creates a String from a list of values. -func NewString(items ...string) String { - ss := String{} - ss.Insert(items...) - return ss -} - -// StringKeySet creates a String from a keys of a map[string](? extends interface{}). -// If the value passed in is not actually a map, this will panic. -func StringKeySet(theMap interface{}) String { - v := reflect.ValueOf(theMap) - ret := String{} - - for _, keyValue := range v.MapKeys() { - ret.Insert(keyValue.Interface().(string)) - } - return ret -} - -// Insert adds items to the set. -func (s String) Insert(items ...string) String { - for _, item := range items { - s[item] = Empty{} - } - return s -} - -// Delete removes all items from the set. -func (s String) Delete(items ...string) String { - for _, item := range items { - delete(s, item) - } - return s -} - -// Has returns true if and only if item is contained in the set. -func (s String) Has(item string) bool { - _, contained := s[item] - return contained -} - -// HasAll returns true if and only if all items are contained in the set. -func (s String) HasAll(items ...string) bool { - for _, item := range items { - if !s.Has(item) { - return false - } - } - return true -} - -// HasAny returns true if any items are contained in the set. -func (s String) HasAny(items ...string) bool { - for _, item := range items { - if s.Has(item) { - return true - } - } - return false -} - -// Difference returns a set of objects that are not in s2 -// For example: -// s1 = {a1, a2, a3} -// s2 = {a1, a2, a4, a5} -// s1.Difference(s2) = {a3} -// s2.Difference(s1) = {a4, a5} -func (s String) Difference(s2 String) String { - result := NewString() - for key := range s { - if !s2.Has(key) { - result.Insert(key) - } - } - return result -} - -// Union returns a new set which includes items in either s1 or s2. -// For example: -// s1 = {a1, a2} -// s2 = {a3, a4} -// s1.Union(s2) = {a1, a2, a3, a4} -// s2.Union(s1) = {a1, a2, a3, a4} -func (s1 String) Union(s2 String) String { - result := NewString() - for key := range s1 { - result.Insert(key) - } - for key := range s2 { - result.Insert(key) - } - return result -} - -// Intersection returns a new set which includes the item in BOTH s1 and s2 -// For example: -// s1 = {a1, a2} -// s2 = {a2, a3} -// s1.Intersection(s2) = {a2} -func (s1 String) Intersection(s2 String) String { - var walk, other String - result := NewString() - if s1.Len() < s2.Len() { - walk = s1 - other = s2 - } else { - walk = s2 - other = s1 - } - for key := range walk { - if other.Has(key) { - result.Insert(key) - } - } - return result -} - -// IsSuperset returns true if and only if s1 is a superset of s2. -func (s1 String) IsSuperset(s2 String) bool { - for item := range s2 { - if !s1.Has(item) { - return false - } - } - return true -} - -// Equal returns true if and only if s1 is equal (as a set) to s2. -// Two sets are equal if their membership is identical. -// (In practice, this means same elements, order doesn't matter) -func (s1 String) Equal(s2 String) bool { - return len(s1) == len(s2) && s1.IsSuperset(s2) -} - -type sortableSliceOfString []string - -func (s sortableSliceOfString) Len() int { return len(s) } -func (s sortableSliceOfString) Less(i, j int) bool { return lessString(s[i], s[j]) } -func (s sortableSliceOfString) Swap(i, j int) { s[i], s[j] = s[j], s[i] } - -// List returns the contents as a sorted string slice. -func (s String) List() []string { - res := make(sortableSliceOfString, 0, len(s)) - for key := range s { - res = append(res, key) - } - sort.Sort(res) - return []string(res) -} - -// UnsortedList returns the slice with contents in random order. -func (s String) UnsortedList() []string { - res := make([]string, 0, len(s)) - for key := range s { - res = append(res, key) - } - return res -} - -// Returns a single element from the set. -func (s String) PopAny() (string, bool) { - for key := range s { - s.Delete(key) - return key, true - } - var zeroValue string - return zeroValue, false -} - -// Len returns the size of the set. -func (s String) Len() int { - return len(s) -} - -func lessString(lhs, rhs string) bool { - return lhs < rhs -} diff --git a/src/vendor/sigs.k8s.io/kustomize/kyaml/yaml/internal/k8sgen/pkg/util/validation/field/errors.go b/src/vendor/sigs.k8s.io/kustomize/kyaml/yaml/internal/k8sgen/pkg/util/validation/field/errors.go deleted file mode 100644 index 20229a5b6..000000000 --- a/src/vendor/sigs.k8s.io/kustomize/kyaml/yaml/internal/k8sgen/pkg/util/validation/field/errors.go +++ /dev/null @@ -1,275 +0,0 @@ -// Code generated by k8scopy from k8s.io/apimachinery@v0.19.8; DO NOT EDIT. -// File content copied from k8s.io/apimachinery@v0.19.8/pkg/util/validation/field/errors.go - -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package field - -import ( - "fmt" - "reflect" - "strconv" - "strings" - - utilerrors "sigs.k8s.io/kustomize/kyaml/yaml/internal/k8sgen/pkg/util/errors" - "sigs.k8s.io/kustomize/kyaml/yaml/internal/k8sgen/pkg/util/sets" -) - -// Error is an implementation of the 'error' interface, which represents a -// field-level validation error. -type Error struct { - Type ErrorType - Field string - BadValue interface{} - Detail string -} - -var _ error = &Error{} - -// Error implements the error interface. -func (v *Error) Error() string { - return fmt.Sprintf("%s: %s", v.Field, v.ErrorBody()) -} - -// ErrorBody returns the error message without the field name. This is useful -// for building nice-looking higher-level error reporting. -func (v *Error) ErrorBody() string { - var s string - switch v.Type { - case ErrorTypeRequired, ErrorTypeForbidden, ErrorTypeTooLong, ErrorTypeInternal: - s = v.Type.String() - default: - value := v.BadValue - valueType := reflect.TypeOf(value) - if value == nil || valueType == nil { - value = "null" - } else if valueType.Kind() == reflect.Ptr { - if reflectValue := reflect.ValueOf(value); reflectValue.IsNil() { - value = "null" - } else { - value = reflectValue.Elem().Interface() - } - } - switch t := value.(type) { - case int64, int32, float64, float32, bool: - // use simple printer for simple types - s = fmt.Sprintf("%s: %v", v.Type, value) - case string: - s = fmt.Sprintf("%s: %q", v.Type, t) - case fmt.Stringer: - // anything that defines String() is better than raw struct - s = fmt.Sprintf("%s: %s", v.Type, t.String()) - default: - // fallback to raw struct - // TODO: internal types have panic guards against json.Marshalling to prevent - // accidental use of internal types in external serialized form. For now, use - // %#v, although it would be better to show a more expressive output in the future - s = fmt.Sprintf("%s: %#v", v.Type, value) - } - } - if len(v.Detail) != 0 { - s += fmt.Sprintf(": %s", v.Detail) - } - return s -} - -// ErrorType is a machine readable value providing more detail about why -// a field is invalid. These values are expected to match 1-1 with -// CauseType in api/types.go. -type ErrorType string - -// TODO: These values are duplicated in api/types.go, but there's a circular dep. Fix it. -const ( - // ErrorTypeNotFound is used to report failure to find a requested value - // (e.g. looking up an ID). See NotFound(). - ErrorTypeNotFound ErrorType = "FieldValueNotFound" - // ErrorTypeRequired is used to report required values that are not - // provided (e.g. empty strings, null values, or empty arrays). See - // Required(). - ErrorTypeRequired ErrorType = "FieldValueRequired" - // ErrorTypeDuplicate is used to report collisions of values that must be - // unique (e.g. unique IDs). See Duplicate(). - ErrorTypeDuplicate ErrorType = "FieldValueDuplicate" - // ErrorTypeInvalid is used to report malformed values (e.g. failed regex - // match, too long, out of bounds). See Invalid(). - ErrorTypeInvalid ErrorType = "FieldValueInvalid" - // ErrorTypeNotSupported is used to report unknown values for enumerated - // fields (e.g. a list of valid values). See NotSupported(). - ErrorTypeNotSupported ErrorType = "FieldValueNotSupported" - // ErrorTypeForbidden is used to report valid (as per formatting rules) - // values which would be accepted under some conditions, but which are not - // permitted by the current conditions (such as security policy). See - // Forbidden(). - ErrorTypeForbidden ErrorType = "FieldValueForbidden" - // ErrorTypeTooLong is used to report that the given value is too long. - // This is similar to ErrorTypeInvalid, but the error will not include the - // too-long value. See TooLong(). - ErrorTypeTooLong ErrorType = "FieldValueTooLong" - // ErrorTypeTooMany is used to report "too many". This is used to - // report that a given list has too many items. This is similar to FieldValueTooLong, - // but the error indicates quantity instead of length. - ErrorTypeTooMany ErrorType = "FieldValueTooMany" - // ErrorTypeInternal is used to report other errors that are not related - // to user input. See InternalError(). - ErrorTypeInternal ErrorType = "InternalError" -) - -// String converts a ErrorType into its corresponding canonical error message. -func (t ErrorType) String() string { - switch t { - case ErrorTypeNotFound: - return "Not found" - case ErrorTypeRequired: - return "Required value" - case ErrorTypeDuplicate: - return "Duplicate value" - case ErrorTypeInvalid: - return "Invalid value" - case ErrorTypeNotSupported: - return "Unsupported value" - case ErrorTypeForbidden: - return "Forbidden" - case ErrorTypeTooLong: - return "Too long" - case ErrorTypeTooMany: - return "Too many" - case ErrorTypeInternal: - return "Internal error" - default: - panic(fmt.Sprintf("unrecognized validation error: %q", string(t))) - } -} - -// NotFound returns a *Error indicating "value not found". This is -// used to report failure to find a requested value (e.g. looking up an ID). -func NotFound(field *Path, value interface{}) *Error { - return &Error{ErrorTypeNotFound, field.String(), value, ""} -} - -// Required returns a *Error indicating "value required". This is used -// to report required values that are not provided (e.g. empty strings, null -// values, or empty arrays). -func Required(field *Path, detail string) *Error { - return &Error{ErrorTypeRequired, field.String(), "", detail} -} - -// Duplicate returns a *Error indicating "duplicate value". This is -// used to report collisions of values that must be unique (e.g. names or IDs). -func Duplicate(field *Path, value interface{}) *Error { - return &Error{ErrorTypeDuplicate, field.String(), value, ""} -} - -// Invalid returns a *Error indicating "invalid value". This is used -// to report malformed values (e.g. failed regex match, too long, out of bounds). -func Invalid(field *Path, value interface{}, detail string) *Error { - return &Error{ErrorTypeInvalid, field.String(), value, detail} -} - -// NotSupported returns a *Error indicating "unsupported value". -// This is used to report unknown values for enumerated fields (e.g. a list of -// valid values). -func NotSupported(field *Path, value interface{}, validValues []string) *Error { - detail := "" - if validValues != nil && len(validValues) > 0 { - quotedValues := make([]string, len(validValues)) - for i, v := range validValues { - quotedValues[i] = strconv.Quote(v) - } - detail = "supported values: " + strings.Join(quotedValues, ", ") - } - return &Error{ErrorTypeNotSupported, field.String(), value, detail} -} - -// Forbidden returns a *Error indicating "forbidden". This is used to -// report valid (as per formatting rules) values which would be accepted under -// some conditions, but which are not permitted by current conditions (e.g. -// security policy). -func Forbidden(field *Path, detail string) *Error { - return &Error{ErrorTypeForbidden, field.String(), "", detail} -} - -// TooLong returns a *Error indicating "too long". This is used to -// report that the given value is too long. This is similar to -// Invalid, but the returned error will not include the too-long -// value. -func TooLong(field *Path, value interface{}, maxLength int) *Error { - return &Error{ErrorTypeTooLong, field.String(), value, fmt.Sprintf("must have at most %d bytes", maxLength)} -} - -// TooMany returns a *Error indicating "too many". This is used to -// report that a given list has too many items. This is similar to TooLong, -// but the returned error indicates quantity instead of length. -func TooMany(field *Path, actualQuantity, maxQuantity int) *Error { - return &Error{ErrorTypeTooMany, field.String(), actualQuantity, fmt.Sprintf("must have at most %d items", maxQuantity)} -} - -// InternalError returns a *Error indicating "internal error". This is used -// to signal that an error was found that was not directly related to user -// input. The err argument must be non-nil. -func InternalError(field *Path, err error) *Error { - return &Error{ErrorTypeInternal, field.String(), nil, err.Error()} -} - -// ErrorList holds a set of Errors. It is plausible that we might one day have -// non-field errors in this same umbrella package, but for now we don't, so -// we can keep it simple and leave ErrorList here. -type ErrorList []*Error - -// NewErrorTypeMatcher returns an errors.Matcher that returns true -// if the provided error is a Error and has the provided ErrorType. -func NewErrorTypeMatcher(t ErrorType) utilerrors.Matcher { - return func(err error) bool { - if e, ok := err.(*Error); ok { - return e.Type == t - } - return false - } -} - -// ToAggregate converts the ErrorList into an errors.Aggregate. -func (list ErrorList) ToAggregate() utilerrors.Aggregate { - errs := make([]error, 0, len(list)) - errorMsgs := sets.NewString() - for _, err := range list { - msg := fmt.Sprintf("%v", err) - if errorMsgs.Has(msg) { - continue - } - errorMsgs.Insert(msg) - errs = append(errs, err) - } - return utilerrors.NewAggregate(errs) -} - -func fromAggregate(agg utilerrors.Aggregate) ErrorList { - errs := agg.Errors() - list := make(ErrorList, len(errs)) - for i := range errs { - list[i] = errs[i].(*Error) - } - return list -} - -// Filter removes items from the ErrorList that match the provided fns. -func (list ErrorList) Filter(fns ...utilerrors.Matcher) ErrorList { - err := utilerrors.FilterOut(list.ToAggregate(), fns...) - if err == nil { - return nil - } - // FilterOut takes an Aggregate and returns an Aggregate - return fromAggregate(err.(utilerrors.Aggregate)) -} diff --git a/src/vendor/sigs.k8s.io/kustomize/kyaml/yaml/internal/k8sgen/pkg/util/validation/field/path.go b/src/vendor/sigs.k8s.io/kustomize/kyaml/yaml/internal/k8sgen/pkg/util/validation/field/path.go deleted file mode 100644 index 44cdf997a..000000000 --- a/src/vendor/sigs.k8s.io/kustomize/kyaml/yaml/internal/k8sgen/pkg/util/validation/field/path.go +++ /dev/null @@ -1,94 +0,0 @@ -// Code generated by k8scopy from k8s.io/apimachinery@v0.19.8; DO NOT EDIT. -// File content copied from k8s.io/apimachinery@v0.19.8/pkg/util/validation/field/path.go - -/* -Copyright 2015 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package field - -import ( - "bytes" - "fmt" - "strconv" -) - -// Path represents the path from some root to a particular field. -type Path struct { - name string // the name of this field or "" if this is an index - index string // if name == "", this is a subscript (index or map key) of the previous element - parent *Path // nil if this is the root element -} - -// NewPath creates a root Path object. -func NewPath(name string, moreNames ...string) *Path { - r := &Path{name: name, parent: nil} - for _, anotherName := range moreNames { - r = &Path{name: anotherName, parent: r} - } - return r -} - -// Root returns the root element of this Path. -func (p *Path) Root() *Path { - for ; p.parent != nil; p = p.parent { - // Do nothing. - } - return p -} - -// Child creates a new Path that is a child of the method receiver. -func (p *Path) Child(name string, moreNames ...string) *Path { - r := NewPath(name, moreNames...) - r.Root().parent = p - return r -} - -// Index indicates that the previous Path is to be subscripted by an int. -// This sets the same underlying value as Key. -func (p *Path) Index(index int) *Path { - return &Path{index: strconv.Itoa(index), parent: p} -} - -// Key indicates that the previous Path is to be subscripted by a string. -// This sets the same underlying value as Index. -func (p *Path) Key(key string) *Path { - return &Path{index: key, parent: p} -} - -// String produces a string representation of the Path. -func (p *Path) String() string { - // make a slice to iterate - elems := []*Path{} - for ; p != nil; p = p.parent { - elems = append(elems, p) - } - - // iterate, but it has to be backwards - buf := bytes.NewBuffer(nil) - for i := range elems { - p := elems[len(elems)-1-i] - if p.parent != nil && len(p.name) > 0 { - // This is either the root or it is a subscript. - buf.WriteString(".") - } - if len(p.name) > 0 { - buf.WriteString(p.name) - } else { - fmt.Fprintf(buf, "[%s]", p.index) - } - } - return buf.String() -} diff --git a/src/vendor/sigs.k8s.io/kustomize/kyaml/yaml/internal/k8sgen/pkg/util/validation/validation.go b/src/vendor/sigs.k8s.io/kustomize/kyaml/yaml/internal/k8sgen/pkg/util/validation/validation.go deleted file mode 100644 index 5e1ddbc46..000000000 --- a/src/vendor/sigs.k8s.io/kustomize/kyaml/yaml/internal/k8sgen/pkg/util/validation/validation.go +++ /dev/null @@ -1,506 +0,0 @@ -// Code generated by k8scopy from k8s.io/apimachinery@v0.19.8; DO NOT EDIT. -// File content copied from k8s.io/apimachinery@v0.19.8/pkg/util/validation/validation.go - -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package validation - -import ( - "fmt" - "math" - "net" - "regexp" - "strconv" - "strings" - - "sigs.k8s.io/kustomize/kyaml/yaml/internal/k8sgen/pkg/util/validation/field" -) - -const qnameCharFmt string = "[A-Za-z0-9]" -const qnameExtCharFmt string = "[-A-Za-z0-9_.]" -const qualifiedNameFmt string = "(" + qnameCharFmt + qnameExtCharFmt + "*)?" + qnameCharFmt -const qualifiedNameErrMsg string = "must consist of alphanumeric characters, '-', '_' or '.', and must start and end with an alphanumeric character" -const qualifiedNameMaxLength int = 63 - -var qualifiedNameRegexp = regexp.MustCompile("^" + qualifiedNameFmt + "$") - -// IsQualifiedName tests whether the value passed is what Kubernetes calls a -// "qualified name". This is a format used in various places throughout the -// system. If the value is not valid, a list of error strings is returned. -// Otherwise an empty list (or nil) is returned. -func IsQualifiedName(value string) []string { - var errs []string - parts := strings.Split(value, "/") - var name string - switch len(parts) { - case 1: - name = parts[0] - case 2: - var prefix string - prefix, name = parts[0], parts[1] - if len(prefix) == 0 { - errs = append(errs, "prefix part "+EmptyError()) - } else if msgs := IsDNS1123Subdomain(prefix); len(msgs) != 0 { - errs = append(errs, prefixEach(msgs, "prefix part ")...) - } - default: - return append(errs, "a qualified name "+RegexError(qualifiedNameErrMsg, qualifiedNameFmt, "MyName", "my.name", "123-abc")+ - " with an optional DNS subdomain prefix and '/' (e.g. 'example.com/MyName')") - } - - if len(name) == 0 { - errs = append(errs, "name part "+EmptyError()) - } else if len(name) > qualifiedNameMaxLength { - errs = append(errs, "name part "+MaxLenError(qualifiedNameMaxLength)) - } - if !qualifiedNameRegexp.MatchString(name) { - errs = append(errs, "name part "+RegexError(qualifiedNameErrMsg, qualifiedNameFmt, "MyName", "my.name", "123-abc")) - } - return errs -} - -// IsFullyQualifiedName checks if the name is fully qualified. This is similar -// to IsFullyQualifiedDomainName but requires a minimum of 3 segments instead of -// 2 and does not accept a trailing . as valid. -// TODO: This function is deprecated and preserved until all callers migrate to -// IsFullyQualifiedDomainName; please don't add new callers. -func IsFullyQualifiedName(fldPath *field.Path, name string) field.ErrorList { - var allErrors field.ErrorList - if len(name) == 0 { - return append(allErrors, field.Required(fldPath, "")) - } - if errs := IsDNS1123Subdomain(name); len(errs) > 0 { - return append(allErrors, field.Invalid(fldPath, name, strings.Join(errs, ","))) - } - if len(strings.Split(name, ".")) < 3 { - return append(allErrors, field.Invalid(fldPath, name, "should be a domain with at least three segments separated by dots")) - } - return allErrors -} - -// IsFullyQualifiedDomainName checks if the domain name is fully qualified. This -// is similar to IsFullyQualifiedName but only requires a minimum of 2 segments -// instead of 3 and accepts a trailing . as valid. -func IsFullyQualifiedDomainName(fldPath *field.Path, name string) field.ErrorList { - var allErrors field.ErrorList - if len(name) == 0 { - return append(allErrors, field.Required(fldPath, "")) - } - if strings.HasSuffix(name, ".") { - name = name[:len(name)-1] - } - if errs := IsDNS1123Subdomain(name); len(errs) > 0 { - return append(allErrors, field.Invalid(fldPath, name, strings.Join(errs, ","))) - } - if len(strings.Split(name, ".")) < 2 { - return append(allErrors, field.Invalid(fldPath, name, "should be a domain with at least two segments separated by dots")) - } - for _, label := range strings.Split(name, ".") { - if errs := IsDNS1123Label(label); len(errs) > 0 { - return append(allErrors, field.Invalid(fldPath, label, strings.Join(errs, ","))) - } - } - return allErrors -} - -// Allowed characters in an HTTP Path as defined by RFC 3986. A HTTP path may -// contain: -// * unreserved characters (alphanumeric, '-', '.', '_', '~') -// * percent-encoded octets -// * sub-delims ("!", "$", "&", "'", "(", ")", "*", "+", ",", ";", "=") -// * a colon character (":") -const httpPathFmt string = `[A-Za-z0-9/\-._~%!$&'()*+,;=:]+` - -var httpPathRegexp = regexp.MustCompile("^" + httpPathFmt + "$") - -// IsDomainPrefixedPath checks if the given string is a domain-prefixed path -// (e.g. acme.io/foo). All characters before the first "/" must be a valid -// subdomain as defined by RFC 1123. All characters trailing the first "/" must -// be valid HTTP Path characters as defined by RFC 3986. -func IsDomainPrefixedPath(fldPath *field.Path, dpPath string) field.ErrorList { - var allErrs field.ErrorList - if len(dpPath) == 0 { - return append(allErrs, field.Required(fldPath, "")) - } - - segments := strings.SplitN(dpPath, "/", 2) - if len(segments) != 2 || len(segments[0]) == 0 || len(segments[1]) == 0 { - return append(allErrs, field.Invalid(fldPath, dpPath, "must be a domain-prefixed path (such as \"acme.io/foo\")")) - } - - host := segments[0] - for _, err := range IsDNS1123Subdomain(host) { - allErrs = append(allErrs, field.Invalid(fldPath, host, err)) - } - - path := segments[1] - if !httpPathRegexp.MatchString(path) { - return append(allErrs, field.Invalid(fldPath, path, RegexError("Invalid path", httpPathFmt))) - } - - return allErrs -} - -const labelValueFmt string = "(" + qualifiedNameFmt + ")?" -const labelValueErrMsg string = "a valid label must be an empty string or consist of alphanumeric characters, '-', '_' or '.', and must start and end with an alphanumeric character" - -// LabelValueMaxLength is a label's max length -const LabelValueMaxLength int = 63 - -var labelValueRegexp = regexp.MustCompile("^" + labelValueFmt + "$") - -// IsValidLabelValue tests whether the value passed is a valid label value. If -// the value is not valid, a list of error strings is returned. Otherwise an -// empty list (or nil) is returned. -func IsValidLabelValue(value string) []string { - var errs []string - if len(value) > LabelValueMaxLength { - errs = append(errs, MaxLenError(LabelValueMaxLength)) - } - if !labelValueRegexp.MatchString(value) { - errs = append(errs, RegexError(labelValueErrMsg, labelValueFmt, "MyValue", "my_value", "12345")) - } - return errs -} - -const dns1123LabelFmt string = "[a-z0-9]([-a-z0-9]*[a-z0-9])?" -const dns1123LabelErrMsg string = "a DNS-1123 label must consist of lower case alphanumeric characters or '-', and must start and end with an alphanumeric character" - -// DNS1123LabelMaxLength is a label's max length in DNS (RFC 1123) -const DNS1123LabelMaxLength int = 63 - -var dns1123LabelRegexp = regexp.MustCompile("^" + dns1123LabelFmt + "$") - -// IsDNS1123Label tests for a string that conforms to the definition of a label in -// DNS (RFC 1123). -func IsDNS1123Label(value string) []string { - var errs []string - if len(value) > DNS1123LabelMaxLength { - errs = append(errs, MaxLenError(DNS1123LabelMaxLength)) - } - if !dns1123LabelRegexp.MatchString(value) { - errs = append(errs, RegexError(dns1123LabelErrMsg, dns1123LabelFmt, "my-name", "123-abc")) - } - return errs -} - -const dns1123SubdomainFmt string = dns1123LabelFmt + "(\\." + dns1123LabelFmt + ")*" -const dns1123SubdomainErrorMsg string = "a DNS-1123 subdomain must consist of lower case alphanumeric characters, '-' or '.', and must start and end with an alphanumeric character" - -// DNS1123SubdomainMaxLength is a subdomain's max length in DNS (RFC 1123) -const DNS1123SubdomainMaxLength int = 253 - -var dns1123SubdomainRegexp = regexp.MustCompile("^" + dns1123SubdomainFmt + "$") - -// IsDNS1123Subdomain tests for a string that conforms to the definition of a -// subdomain in DNS (RFC 1123). -func IsDNS1123Subdomain(value string) []string { - var errs []string - if len(value) > DNS1123SubdomainMaxLength { - errs = append(errs, MaxLenError(DNS1123SubdomainMaxLength)) - } - if !dns1123SubdomainRegexp.MatchString(value) { - errs = append(errs, RegexError(dns1123SubdomainErrorMsg, dns1123SubdomainFmt, "example.com")) - } - return errs -} - -const dns1035LabelFmt string = "[a-z]([-a-z0-9]*[a-z0-9])?" -const dns1035LabelErrMsg string = "a DNS-1035 label must consist of lower case alphanumeric characters or '-', start with an alphabetic character, and end with an alphanumeric character" - -// DNS1035LabelMaxLength is a label's max length in DNS (RFC 1035) -const DNS1035LabelMaxLength int = 63 - -var dns1035LabelRegexp = regexp.MustCompile("^" + dns1035LabelFmt + "$") - -// IsDNS1035Label tests for a string that conforms to the definition of a label in -// DNS (RFC 1035). -func IsDNS1035Label(value string) []string { - var errs []string - if len(value) > DNS1035LabelMaxLength { - errs = append(errs, MaxLenError(DNS1035LabelMaxLength)) - } - if !dns1035LabelRegexp.MatchString(value) { - errs = append(errs, RegexError(dns1035LabelErrMsg, dns1035LabelFmt, "my-name", "abc-123")) - } - return errs -} - -// wildcard definition - RFC 1034 section 4.3.3. -// examples: -// - valid: *.bar.com, *.foo.bar.com -// - invalid: *.*.bar.com, *.foo.*.com, *bar.com, f*.bar.com, * -const wildcardDNS1123SubdomainFmt = "\\*\\." + dns1123SubdomainFmt -const wildcardDNS1123SubdomainErrMsg = "a wildcard DNS-1123 subdomain must start with '*.', followed by a valid DNS subdomain, which must consist of lower case alphanumeric characters, '-' or '.' and end with an alphanumeric character" - -// IsWildcardDNS1123Subdomain tests for a string that conforms to the definition of a -// wildcard subdomain in DNS (RFC 1034 section 4.3.3). -func IsWildcardDNS1123Subdomain(value string) []string { - wildcardDNS1123SubdomainRegexp := regexp.MustCompile("^" + wildcardDNS1123SubdomainFmt + "$") - - var errs []string - if len(value) > DNS1123SubdomainMaxLength { - errs = append(errs, MaxLenError(DNS1123SubdomainMaxLength)) - } - if !wildcardDNS1123SubdomainRegexp.MatchString(value) { - errs = append(errs, RegexError(wildcardDNS1123SubdomainErrMsg, wildcardDNS1123SubdomainFmt, "*.example.com")) - } - return errs -} - -const cIdentifierFmt string = "[A-Za-z_][A-Za-z0-9_]*" -const identifierErrMsg string = "a valid C identifier must start with alphabetic character or '_', followed by a string of alphanumeric characters or '_'" - -var cIdentifierRegexp = regexp.MustCompile("^" + cIdentifierFmt + "$") - -// IsCIdentifier tests for a string that conforms the definition of an identifier -// in C. This checks the format, but not the length. -func IsCIdentifier(value string) []string { - if !cIdentifierRegexp.MatchString(value) { - return []string{RegexError(identifierErrMsg, cIdentifierFmt, "my_name", "MY_NAME", "MyName")} - } - return nil -} - -// IsValidPortNum tests that the argument is a valid, non-zero port number. -func IsValidPortNum(port int) []string { - if 1 <= port && port <= 65535 { - return nil - } - return []string{InclusiveRangeError(1, 65535)} -} - -// IsInRange tests that the argument is in an inclusive range. -func IsInRange(value int, min int, max int) []string { - if value >= min && value <= max { - return nil - } - return []string{InclusiveRangeError(min, max)} -} - -// Now in libcontainer UID/GID limits is 0 ~ 1<<31 - 1 -// TODO: once we have a type for UID/GID we should make these that type. -const ( - minUserID = 0 - maxUserID = math.MaxInt32 - minGroupID = 0 - maxGroupID = math.MaxInt32 -) - -// IsValidGroupID tests that the argument is a valid Unix GID. -func IsValidGroupID(gid int64) []string { - if minGroupID <= gid && gid <= maxGroupID { - return nil - } - return []string{InclusiveRangeError(minGroupID, maxGroupID)} -} - -// IsValidUserID tests that the argument is a valid Unix UID. -func IsValidUserID(uid int64) []string { - if minUserID <= uid && uid <= maxUserID { - return nil - } - return []string{InclusiveRangeError(minUserID, maxUserID)} -} - -var portNameCharsetRegex = regexp.MustCompile("^[-a-z0-9]+$") -var portNameOneLetterRegexp = regexp.MustCompile("[a-z]") - -// IsValidPortName check that the argument is valid syntax. It must be -// non-empty and no more than 15 characters long. It may contain only [-a-z0-9] -// and must contain at least one letter [a-z]. It must not start or end with a -// hyphen, nor contain adjacent hyphens. -// -// Note: We only allow lower-case characters, even though RFC 6335 is case -// insensitive. -func IsValidPortName(port string) []string { - var errs []string - if len(port) > 15 { - errs = append(errs, MaxLenError(15)) - } - if !portNameCharsetRegex.MatchString(port) { - errs = append(errs, "must contain only alpha-numeric characters (a-z, 0-9), and hyphens (-)") - } - if !portNameOneLetterRegexp.MatchString(port) { - errs = append(errs, "must contain at least one letter or number (a-z, 0-9)") - } - if strings.Contains(port, "--") { - errs = append(errs, "must not contain consecutive hyphens") - } - if len(port) > 0 && (port[0] == '-' || port[len(port)-1] == '-') { - errs = append(errs, "must not begin or end with a hyphen") - } - return errs -} - -// IsValidIP tests that the argument is a valid IP address. -func IsValidIP(value string) []string { - if net.ParseIP(value) == nil { - return []string{"must be a valid IP address, (e.g. 10.9.8.7)"} - } - return nil -} - -// IsValidIPv4Address tests that the argument is a valid IPv4 address. -func IsValidIPv4Address(fldPath *field.Path, value string) field.ErrorList { - var allErrors field.ErrorList - ip := net.ParseIP(value) - if ip == nil || ip.To4() == nil { - allErrors = append(allErrors, field.Invalid(fldPath, value, "must be a valid IPv4 address")) - } - return allErrors -} - -// IsValidIPv6Address tests that the argument is a valid IPv6 address. -func IsValidIPv6Address(fldPath *field.Path, value string) field.ErrorList { - var allErrors field.ErrorList - ip := net.ParseIP(value) - if ip == nil || ip.To4() != nil { - allErrors = append(allErrors, field.Invalid(fldPath, value, "must be a valid IPv6 address")) - } - return allErrors -} - -const percentFmt string = "[0-9]+%" -const percentErrMsg string = "a valid percent string must be a numeric string followed by an ending '%'" - -var percentRegexp = regexp.MustCompile("^" + percentFmt + "$") - -// IsValidPercent checks that string is in the form of a percentage -func IsValidPercent(percent string) []string { - if !percentRegexp.MatchString(percent) { - return []string{RegexError(percentErrMsg, percentFmt, "1%", "93%")} - } - return nil -} - -const httpHeaderNameFmt string = "[-A-Za-z0-9]+" -const httpHeaderNameErrMsg string = "a valid HTTP header must consist of alphanumeric characters or '-'" - -var httpHeaderNameRegexp = regexp.MustCompile("^" + httpHeaderNameFmt + "$") - -// IsHTTPHeaderName checks that a string conforms to the Go HTTP library's -// definition of a valid header field name (a stricter subset than RFC7230). -func IsHTTPHeaderName(value string) []string { - if !httpHeaderNameRegexp.MatchString(value) { - return []string{RegexError(httpHeaderNameErrMsg, httpHeaderNameFmt, "X-Header-Name")} - } - return nil -} - -const envVarNameFmt = "[-._a-zA-Z][-._a-zA-Z0-9]*" -const envVarNameFmtErrMsg string = "a valid environment variable name must consist of alphabetic characters, digits, '_', '-', or '.', and must not start with a digit" - -var envVarNameRegexp = regexp.MustCompile("^" + envVarNameFmt + "$") - -// IsEnvVarName tests if a string is a valid environment variable name. -func IsEnvVarName(value string) []string { - var errs []string - if !envVarNameRegexp.MatchString(value) { - errs = append(errs, RegexError(envVarNameFmtErrMsg, envVarNameFmt, "my.env-name", "MY_ENV.NAME", "MyEnvName1")) - } - - errs = append(errs, hasChDirPrefix(value)...) - return errs -} - -const configMapKeyFmt = `[-._a-zA-Z0-9]+` -const configMapKeyErrMsg string = "a valid config key must consist of alphanumeric characters, '-', '_' or '.'" - -var configMapKeyRegexp = regexp.MustCompile("^" + configMapKeyFmt + "$") - -// IsConfigMapKey tests for a string that is a valid key for a ConfigMap or Secret -func IsConfigMapKey(value string) []string { - var errs []string - if len(value) > DNS1123SubdomainMaxLength { - errs = append(errs, MaxLenError(DNS1123SubdomainMaxLength)) - } - if !configMapKeyRegexp.MatchString(value) { - errs = append(errs, RegexError(configMapKeyErrMsg, configMapKeyFmt, "key.name", "KEY_NAME", "key-name")) - } - errs = append(errs, hasChDirPrefix(value)...) - return errs -} - -// MaxLenError returns a string explanation of a "string too long" validation -// failure. -func MaxLenError(length int) string { - return fmt.Sprintf("must be no more than %d characters", length) -} - -// RegexError returns a string explanation of a regex validation failure. -func RegexError(msg string, fmt string, examples ...string) string { - if len(examples) == 0 { - return msg + " (regex used for validation is '" + fmt + "')" - } - msg += " (e.g. " - for i := range examples { - if i > 0 { - msg += " or " - } - msg += "'" + examples[i] + "', " - } - msg += "regex used for validation is '" + fmt + "')" - return msg -} - -// EmptyError returns a string explanation of a "must not be empty" validation -// failure. -func EmptyError() string { - return "must be non-empty" -} - -func prefixEach(msgs []string, prefix string) []string { - for i := range msgs { - msgs[i] = prefix + msgs[i] - } - return msgs -} - -// InclusiveRangeError returns a string explanation of a numeric "must be -// between" validation failure. -func InclusiveRangeError(lo, hi int) string { - return fmt.Sprintf(`must be between %d and %d, inclusive`, lo, hi) -} - -func hasChDirPrefix(value string) []string { - var errs []string - switch { - case value == ".": - errs = append(errs, `must not be '.'`) - case value == "..": - errs = append(errs, `must not be '..'`) - case strings.HasPrefix(value, ".."): - errs = append(errs, `must not start with '..'`) - } - return errs -} - -// IsValidSocketAddr checks that string represents a valid socket address -// as defined in RFC 789. (e.g 0.0.0.0:10254 or [::]:10254)) -func IsValidSocketAddr(value string) []string { - var errs []string - ip, port, err := net.SplitHostPort(value) - if err != nil { - errs = append(errs, "must be a valid socket address format, (e.g. 0.0.0.0:10254 or [::]:10254)") - return errs - } - portInt, _ := strconv.Atoi(port) - errs = append(errs, IsValidPortNum(portInt)...) - errs = append(errs, IsValidIP(ip)...) - return errs -} diff --git a/src/vendor/sigs.k8s.io/kustomize/kyaml/yaml/kfns.go b/src/vendor/sigs.k8s.io/kustomize/kyaml/yaml/kfns.go deleted file mode 100644 index 2ae8c1665..000000000 --- a/src/vendor/sigs.k8s.io/kustomize/kyaml/yaml/kfns.go +++ /dev/null @@ -1,137 +0,0 @@ -// Copyright 2019 The Kubernetes Authors. -// SPDX-License-Identifier: Apache-2.0 - -package yaml - -import ( - "sigs.k8s.io/kustomize/kyaml/errors" - "sigs.k8s.io/kustomize/kyaml/internal/forked/github.com/go-yaml/yaml" -) - -// AnnotationClearer removes an annotation at metadata.annotations. -// Returns nil if the annotation or field does not exist. -type AnnotationClearer struct { - Kind string `yaml:"kind,omitempty"` - Key string `yaml:"key,omitempty"` -} - -func (c AnnotationClearer) Filter(rn *RNode) (*RNode, error) { - return rn.Pipe( - PathGetter{Path: []string{MetadataField, AnnotationsField}}, - FieldClearer{Name: c.Key}) -} - -func ClearAnnotation(key string) AnnotationClearer { - return AnnotationClearer{Key: key} -} - -// ClearEmptyAnnotations clears the keys, annotations -// and metadata if they are empty/null -func ClearEmptyAnnotations(rn *RNode) error { - _, err := rn.Pipe(Lookup(MetadataField), FieldClearer{ - Name: AnnotationsField, IfEmpty: true}) - if err != nil { - return errors.Wrap(err) - } - _, err = rn.Pipe(FieldClearer{Name: MetadataField, IfEmpty: true}) - if err != nil { - return errors.Wrap(err) - } - return nil -} - -// k8sMetaSetter sets a name at metadata.{key}. -// Creates metadata if does not exist. -type k8sMetaSetter struct { - Key string `yaml:"key,omitempty"` - Value string `yaml:"value,omitempty"` -} - -func (s k8sMetaSetter) Filter(rn *RNode) (*RNode, error) { - _, err := rn.Pipe( - PathGetter{Path: []string{MetadataField}, Create: yaml.MappingNode}, - FieldSetter{Name: s.Key, Value: NewStringRNode(s.Value)}) - return rn, err -} - -func SetK8sName(value string) k8sMetaSetter { - return k8sMetaSetter{Key: NameField, Value: value} -} - -func SetK8sNamespace(value string) k8sMetaSetter { - return k8sMetaSetter{Key: NamespaceField, Value: value} -} - -// AnnotationSetter sets an annotation at metadata.annotations. -// Creates metadata.annotations if does not exist. -type AnnotationSetter struct { - Kind string `yaml:"kind,omitempty"` - Key string `yaml:"key,omitempty"` - Value string `yaml:"value,omitempty"` -} - -func (s AnnotationSetter) Filter(rn *RNode) (*RNode, error) { - v := NewStringRNode(s.Value) - // some tools get confused about the type if annotations are not quoted - v.YNode().Style = yaml.SingleQuotedStyle - if err := ClearEmptyAnnotations(rn); err != nil { - return nil, err - } - return addMetadataNode(rn, AnnotationsField, s.Key, v) -} - -func SetAnnotation(key, value string) AnnotationSetter { - return AnnotationSetter{Key: key, Value: value} -} - -// AnnotationGetter gets an annotation at metadata.annotations. -// Returns nil if metadata.annotations does not exist. -type AnnotationGetter struct { - Kind string `yaml:"kind,omitempty"` - Key string `yaml:"key,omitempty"` - Value string `yaml:"value,omitempty"` -} - -// AnnotationGetter returns the annotation value. -// Returns "", nil if the annotation does not exist. -func (g AnnotationGetter) Filter(rn *RNode) (*RNode, error) { - v, err := rn.Pipe( - PathGetter{Path: []string{MetadataField, AnnotationsField, g.Key}}) - if v == nil || err != nil { - return v, err - } - if g.Value == "" || v.value.Value == g.Value { - return v, err - } - return nil, err -} - -func GetAnnotation(key string) AnnotationGetter { - return AnnotationGetter{Key: key} -} - -// LabelSetter sets a label at metadata.labels. -// Creates metadata.labels if does not exist. -type LabelSetter struct { - Kind string `yaml:"kind,omitempty"` - Key string `yaml:"key,omitempty"` - Value string `yaml:"value,omitempty"` -} - -func (s LabelSetter) Filter(rn *RNode) (*RNode, error) { - v := NewStringRNode(s.Value) - // some tools get confused about the type if labels are not quoted - v.YNode().Style = yaml.SingleQuotedStyle - return addMetadataNode(rn, LabelsField, s.Key, v) -} - -func addMetadataNode(rn *RNode, field, key string, v *RNode) (*RNode, error) { - return rn.Pipe( - PathGetter{ - Path: []string{MetadataField, field}, Create: yaml.MappingNode}, - FieldSetter{Name: key, Value: v}) -} - -func SetLabel(key, value string) LabelSetter { - return LabelSetter{Key: key, Value: value} -} diff --git a/src/vendor/sigs.k8s.io/kustomize/kyaml/yaml/mapnode.go b/src/vendor/sigs.k8s.io/kustomize/kyaml/yaml/mapnode.go deleted file mode 100644 index 31b41b40f..000000000 --- a/src/vendor/sigs.k8s.io/kustomize/kyaml/yaml/mapnode.go +++ /dev/null @@ -1,40 +0,0 @@ -// Copyright 2019 The Kubernetes Authors. -// SPDX-License-Identifier: Apache-2.0 - -package yaml - -// MapNode wraps a field key and value. -type MapNode struct { - Key *RNode - Value *RNode -} - -// IsNilOrEmpty returns true if the MapNode is nil, -// has no value, or has a value that appears empty. -func (mn *MapNode) IsNilOrEmpty() bool { - return mn == nil || mn.Value.IsNilOrEmpty() -} - -type MapNodeSlice []*MapNode - -func (m MapNodeSlice) Keys() []*RNode { - var keys []*RNode - for i := range m { - if m[i] != nil { - keys = append(keys, m[i].Key) - } - } - return keys -} - -func (m MapNodeSlice) Values() []*RNode { - var values []*RNode - for i := range m { - if m[i] != nil { - values = append(values, m[i].Value) - } else { - values = append(values, nil) - } - } - return values -} diff --git a/src/vendor/sigs.k8s.io/kustomize/kyaml/yaml/match.go b/src/vendor/sigs.k8s.io/kustomize/kyaml/yaml/match.go deleted file mode 100644 index d53f563d6..000000000 --- a/src/vendor/sigs.k8s.io/kustomize/kyaml/yaml/match.go +++ /dev/null @@ -1,271 +0,0 @@ -// Copyright 2019 The Kubernetes Authors. -// SPDX-License-Identifier: Apache-2.0 - -package yaml - -import ( - "regexp" - "strconv" - "strings" -) - -// PathMatcher returns all RNodes matching the path wrapped in a SequenceNode. -// Lists may have multiple elements matching the path, and each matching element -// is added to the return result. -// If Path points to a SequenceNode, the SequenceNode is wrapped in another SequenceNode -// If Path does not contain any lists, the result is still wrapped in a SequenceNode of len == 1 -type PathMatcher struct { - Kind string `yaml:"kind,omitempty"` - - // Path is a slice of parts leading to the RNode to lookup. - // Each path part may be one of: - // * FieldMatcher -- e.g. "spec" - // * Map Key -- e.g. "app.k8s.io/version" - // * List Entry -- e.g. "[name=nginx]" or "[=-jar]" - // - // Map Keys and Fields are equivalent. - // See FieldMatcher for more on Fields and Map Keys. - // - // List Entries are specified as map entry to match [fieldName=fieldValue]. - // See Elem for more on List Entries. - // - // Examples: - // * spec.template.spec.container with matching name: [name=nginx] -- match 'name': 'nginx' - // * spec.template.spec.container.argument matching a value: [=-jar] -- match '-jar' - Path []string `yaml:"path,omitempty"` - - // Matches is set by PathMatch to publish the matched element values for each node. - // After running PathMatcher.Filter, each node from the SequenceNode result may be - // looked up in Matches to find the field values that were matched. - Matches map[*Node][]string - - // StripComments may be set to remove the comments on the matching Nodes. - // This is useful for if the nodes are to be printed in FlowStyle. - StripComments bool - - val *RNode - field string - matchRegex string - indexNumber int -} - -func (p *PathMatcher) stripComments(n *Node) { - if n == nil { - return - } - if p.StripComments { - n.LineComment = "" - n.HeadComment = "" - n.FootComment = "" - for i := range n.Content { - p.stripComments(n.Content[i]) - } - } -} - -func (p *PathMatcher) Filter(rn *RNode) (*RNode, error) { - val, err := p.filter(rn) - if err != nil { - return nil, err - } - p.stripComments(val.YNode()) - return val, err -} - -func (p *PathMatcher) filter(rn *RNode) (*RNode, error) { - p.Matches = map[*Node][]string{} - - if len(p.Path) == 0 { - // return the element wrapped in a SequenceNode - p.appendRNode("", rn) - return p.val, nil - } - - if IsIdxNumber(p.Path[0]) { - return p.doIndexSeq(rn) - } - - if IsListIndex(p.Path[0]) { - // match seq elements - return p.doSeq(rn) - } - - if IsWildcard(p.Path[0]) { - // match every elements (*) - return p.doMatchEvery(rn) - } - // match a field - return p.doField(rn) -} - -func (p *PathMatcher) doMatchEvery(rn *RNode) (*RNode, error) { - if err := rn.VisitElements(p.visitEveryElem); err != nil { - return nil, err - } - - return p.val, nil -} - -func (p *PathMatcher) visitEveryElem(elem *RNode) error { - fieldName := p.Path[0] - // recurse on the matching element - pm := &PathMatcher{Path: p.Path[1:]} - add, err := pm.filter(elem) - for k, v := range pm.Matches { - p.Matches[k] = v - } - if err != nil || add == nil { - return err - } - p.append(fieldName, add.Content()...) - - return nil -} - -func (p *PathMatcher) doField(rn *RNode) (*RNode, error) { - // lookup the field - field, err := rn.Pipe(Get(p.Path[0])) - if err != nil || field == nil { - // if the field doesn't exist, return nil - return nil, err - } - - // recurse on the field, removing the first element of the path - pm := &PathMatcher{Path: p.Path[1:]} - p.val, err = pm.filter(field) - p.Matches = pm.Matches - return p.val, err -} - -// doIndexSeq iterates over a sequence and appends elements matching the index p.Val -func (p *PathMatcher) doIndexSeq(rn *RNode) (*RNode, error) { - // parse to index number - idx, err := strconv.Atoi(p.Path[0]) - if err != nil { - return nil, err - } - p.indexNumber = idx - - elements, err := rn.Elements() - if err != nil { - return nil, err - } - - // get target element - element := elements[idx] - - // recurse on the matching element - pm := &PathMatcher{Path: p.Path[1:]} - add, err := pm.filter(element) - for k, v := range pm.Matches { - p.Matches[k] = v - } - if err != nil || add == nil { - return nil, err - } - p.append("", add.Content()...) - return p.val, nil -} - -// doSeq iterates over a sequence and appends elements matching the path regex to p.Val -func (p *PathMatcher) doSeq(rn *RNode) (*RNode, error) { - // parse the field + match pair - var err error - p.field, p.matchRegex, err = SplitIndexNameValue(p.Path[0]) - if err != nil { - return nil, err - } - - if p.field == "" { - err = rn.VisitElements(p.visitPrimitiveElem) - } else { - err = rn.VisitElements(p.visitElem) - } - if err != nil || p.val == nil || len(p.val.YNode().Content) == 0 { - return nil, err - } - - return p.val, nil -} - -func (p *PathMatcher) visitPrimitiveElem(elem *RNode) error { - r, err := regexp.Compile(p.matchRegex) - if err != nil { - return err - } - - str, err := elem.String() - if err != nil { - return err - } - str = strings.TrimSpace(str) - if !r.MatchString(str) { - return nil - } - - p.appendRNode("", elem) - return nil -} - -func (p *PathMatcher) visitElem(elem *RNode) error { - r, err := regexp.Compile(p.matchRegex) - if err != nil { - return err - } - - // check if this elements field matches the regex - val := elem.Field(p.field) - if val == nil || val.Value == nil { - return nil - } - str, err := val.Value.String() - if err != nil { - return err - } - str = strings.TrimSpace(str) - if !r.MatchString(str) { - return nil - } - - // recurse on the matching element - pm := &PathMatcher{Path: p.Path[1:]} - add, err := pm.filter(elem) - for k, v := range pm.Matches { - p.Matches[k] = v - } - if err != nil || add == nil { - return err - } - p.append(str, add.Content()...) - return nil -} - -func (p *PathMatcher) appendRNode(path string, node *RNode) { - p.append(path, node.YNode()) -} - -func (p *PathMatcher) append(path string, nodes ...*Node) { - if p.val == nil { - p.val = NewRNode(&Node{Kind: SequenceNode}) - } - for i := range nodes { - node := nodes[i] - p.val.YNode().Content = append(p.val.YNode().Content, node) - // record the path if specified - if path != "" { - p.Matches[node] = append(p.Matches[node], path) - } - } -} - -func cleanPath(path []string) []string { - var p []string - for _, elem := range path { - elem = strings.TrimSpace(elem) - if len(elem) == 0 { - continue - } - p = append(p, elem) - } - return p -} diff --git a/src/vendor/sigs.k8s.io/kustomize/kyaml/yaml/merge2/merge2.go b/src/vendor/sigs.k8s.io/kustomize/kyaml/yaml/merge2/merge2.go deleted file mode 100644 index ab0c8244c..000000000 --- a/src/vendor/sigs.k8s.io/kustomize/kyaml/yaml/merge2/merge2.go +++ /dev/null @@ -1,182 +0,0 @@ -// Copyright 2019 The Kubernetes Authors. -// SPDX-License-Identifier: Apache-2.0 - -// Package merge contains libraries for merging fields from one RNode to another -// RNode -package merge2 - -import ( - "sigs.k8s.io/kustomize/kyaml/openapi" - "sigs.k8s.io/kustomize/kyaml/yaml" - "sigs.k8s.io/kustomize/kyaml/yaml/walk" -) - -// Merge merges fields from src into dest. -func Merge(src, dest *yaml.RNode, mergeOptions yaml.MergeOptions) (*yaml.RNode, error) { - return walk.Walker{ - Sources: []*yaml.RNode{dest, src}, - Visitor: Merger{}, - MergeOptions: mergeOptions, - }.Walk() -} - -// Merge parses the arguments, and merges fields from srcStr into destStr. -func MergeStrings(srcStr, destStr string, infer bool, mergeOptions yaml.MergeOptions) (string, error) { - src, err := yaml.Parse(srcStr) - if err != nil { - return "", err - } - dest, err := yaml.Parse(destStr) - if err != nil { - return "", err - } - - result, err := walk.Walker{ - Sources: []*yaml.RNode{dest, src}, - Visitor: Merger{}, - InferAssociativeLists: infer, - MergeOptions: mergeOptions, - }.Walk() - if err != nil { - return "", err - } - - return result.String() -} - -type Merger struct { - // for forwards compatibility when new functions are added to the interface -} - -var _ walk.Visitor = Merger{} - -func (m Merger) VisitMap(nodes walk.Sources, s *openapi.ResourceSchema) (*yaml.RNode, error) { - if err := m.SetComments(nodes); err != nil { - return nil, err - } - if err := m.SetStyle(nodes); err != nil { - return nil, err - } - if yaml.IsMissingOrNull(nodes.Dest()) { - // Add - ps, _ := determineSmpDirective(nodes.Origin()) - if ps == smpDelete { - return walk.ClearNode, nil - } - - return nodes.Origin(), nil - } - if nodes.Origin().IsTaggedNull() { - // clear the value - return walk.ClearNode, nil - } - - ps, err := determineSmpDirective(nodes.Origin()) - if err != nil { - return nil, err - } - - switch ps { - case smpDelete: - return walk.ClearNode, nil - case smpReplace: - return nodes.Origin(), nil - default: - return nodes.Dest(), nil - } -} - -func (m Merger) VisitScalar(nodes walk.Sources, s *openapi.ResourceSchema) (*yaml.RNode, error) { - if err := m.SetComments(nodes); err != nil { - return nil, err - } - if err := m.SetStyle(nodes); err != nil { - return nil, err - } - // Override value - if nodes.Origin() != nil { - return nodes.Origin(), nil - } - // Keep - return nodes.Dest(), nil -} - -func (m Merger) VisitList(nodes walk.Sources, s *openapi.ResourceSchema, kind walk.ListKind) (*yaml.RNode, error) { - if err := m.SetComments(nodes); err != nil { - return nil, err - } - if err := m.SetStyle(nodes); err != nil { - return nil, err - } - if kind == walk.NonAssociateList { - // Override value - if nodes.Origin() != nil { - return nodes.Origin(), nil - } - // Keep - return nodes.Dest(), nil - } - - // Add - if yaml.IsMissingOrNull(nodes.Dest()) { - return nodes.Origin(), nil - } - // Clear - if nodes.Origin().IsTaggedNull() { - return walk.ClearNode, nil - } - - ps, err := determineSmpDirective(nodes.Origin()) - if err != nil { - return nil, err - } - - switch ps { - case smpDelete: - return walk.ClearNode, nil - case smpReplace: - return nodes.Origin(), nil - default: - return nodes.Dest(), nil - } -} - -func (m Merger) SetStyle(sources walk.Sources) error { - source := sources.Origin() - dest := sources.Dest() - if dest == nil || dest.YNode() == nil || source == nil || source.YNode() == nil { - // avoid panic - return nil - } - - // copy the style from the source. - // special case: if the dest was an empty map or seq, then it probably had - // folded style applied, but we actually want to keep the style of the origin - // in this case (even if it was the default). otherwise the merged elements - // will get folded even though this probably isn't what is desired. - if dest.YNode().Kind != yaml.ScalarNode && len(dest.YNode().Content) == 0 { - dest.YNode().Style = source.YNode().Style - } - return nil -} - -// SetComments copies the dest comments to the source comments if they are present -// on the source. -func (m Merger) SetComments(sources walk.Sources) error { - source := sources.Origin() - dest := sources.Dest() - if dest == nil || dest.YNode() == nil || source == nil || source.YNode() == nil { - // avoid panic - return nil - } - if source.YNode().FootComment != "" { - dest.YNode().FootComment = source.YNode().FootComment - } - if source.YNode().HeadComment != "" { - dest.YNode().HeadComment = source.YNode().HeadComment - } - if source.YNode().LineComment != "" { - dest.YNode().LineComment = source.YNode().LineComment - } - return nil -} diff --git a/src/vendor/sigs.k8s.io/kustomize/kyaml/yaml/merge2/smpdirective.go b/src/vendor/sigs.k8s.io/kustomize/kyaml/yaml/merge2/smpdirective.go deleted file mode 100644 index f38b188ea..000000000 --- a/src/vendor/sigs.k8s.io/kustomize/kyaml/yaml/merge2/smpdirective.go +++ /dev/null @@ -1,101 +0,0 @@ -// Copyright 2019 The Kubernetes Authors. -// SPDX-License-Identifier: Apache-2.0 - -package merge2 - -import ( - "fmt" - - "sigs.k8s.io/kustomize/kyaml/yaml" -) - -// A strategic merge patch directive. -// See https://github.com/kubernetes/community/blob/master/contributors/devel/sig-api-machinery/strategic-merge-patch.md -// -//go:generate stringer -type=smpDirective -linecomment -type smpDirective int - -const ( - smpUnknown smpDirective = iota // unknown - smpReplace // replace - smpDelete // delete - smpMerge // merge -) - -const strategicMergePatchDirectiveKey = "$patch" - -// Examine patch for a strategic merge patch directive. -// If found, return it, and remove the directive from the patch. -func determineSmpDirective(patch *yaml.RNode) (smpDirective, error) { - if patch == nil { - return smpMerge, nil - } - switch patch.YNode().Kind { - case yaml.SequenceNode: - return determineSequenceNodePatchStrategy(patch) - case yaml.MappingNode: - return determineMappingNodePatchStrategy(patch) - default: - return smpUnknown, fmt.Errorf( - "no implemented strategic merge patch strategy for '%s' ('%s')", - patch.YNode().ShortTag(), patch.MustString()) - } -} - -func determineSequenceNodePatchStrategy(patch *yaml.RNode) (smpDirective, error) { - // get the $patch element - node, err := patch.Pipe(yaml.GetElementByKey(strategicMergePatchDirectiveKey)) - // if there are more than 1 key/value pair in the map, then this $patch - // is not for the sequence - if err != nil || node == nil || node.YNode() == nil || len(node.Content()) > 2 { - return smpMerge, nil - } - // get the value - value, err := node.Pipe(yaml.Get(strategicMergePatchDirectiveKey)) - if err != nil || value == nil || value.YNode() == nil { - return smpMerge, nil - } - v := value.YNode().Value - if v == smpDelete.String() { - return smpDelete, elideSequencePatchDirective(patch, v) - } - if v == smpReplace.String() { - return smpReplace, elideSequencePatchDirective(patch, v) - } - if v == smpMerge.String() { - return smpMerge, elideSequencePatchDirective(patch, v) - } - return smpUnknown, fmt.Errorf( - "unknown patch strategy '%s'", v) -} - -func determineMappingNodePatchStrategy(patch *yaml.RNode) (smpDirective, error) { - node, err := patch.Pipe(yaml.Get(strategicMergePatchDirectiveKey)) - if err != nil || node == nil || node.YNode() == nil { - return smpMerge, nil - } - v := node.YNode().Value - if v == smpDelete.String() { - return smpDelete, elideMappingPatchDirective(patch) - } - if v == smpReplace.String() { - return smpReplace, elideMappingPatchDirective(patch) - } - if v == smpMerge.String() { - return smpMerge, elideMappingPatchDirective(patch) - } - return smpUnknown, fmt.Errorf( - "unknown patch strategy '%s'", v) -} - -func elideMappingPatchDirective(patch *yaml.RNode) error { - return patch.PipeE(yaml.Clear(strategicMergePatchDirectiveKey)) -} - -func elideSequencePatchDirective(patch *yaml.RNode, value string) error { - return patch.PipeE(yaml.ElementSetter{ - Element: nil, - Keys: []string{strategicMergePatchDirectiveKey}, - Values: []string{value}, - }) -} diff --git a/src/vendor/sigs.k8s.io/kustomize/kyaml/yaml/merge2/smpdirective_string.go b/src/vendor/sigs.k8s.io/kustomize/kyaml/yaml/merge2/smpdirective_string.go deleted file mode 100644 index b4f937f0e..000000000 --- a/src/vendor/sigs.k8s.io/kustomize/kyaml/yaml/merge2/smpdirective_string.go +++ /dev/null @@ -1,26 +0,0 @@ -// Code generated by "stringer -type=smpDirective -linecomment"; DO NOT EDIT. - -package merge2 - -import "strconv" - -func _() { - // An "invalid array index" compiler error signifies that the constant values have changed. - // Re-run the stringer command to generate them again. - var x [1]struct{} - _ = x[smpUnknown-0] - _ = x[smpReplace-1] - _ = x[smpDelete-2] - _ = x[smpMerge-3] -} - -const _smpDirective_name = "unknownreplacedeletemerge" - -var _smpDirective_index = [...]uint8{0, 7, 14, 20, 25} - -func (i smpDirective) String() string { - if i < 0 || i >= smpDirective(len(_smpDirective_index)-1) { - return "smpDirective(" + strconv.FormatInt(int64(i), 10) + ")" - } - return _smpDirective_name[_smpDirective_index[i]:_smpDirective_index[i+1]] -} diff --git a/src/vendor/sigs.k8s.io/kustomize/kyaml/yaml/merge3/merge3.go b/src/vendor/sigs.k8s.io/kustomize/kyaml/yaml/merge3/merge3.go deleted file mode 100644 index 664270e5b..000000000 --- a/src/vendor/sigs.k8s.io/kustomize/kyaml/yaml/merge3/merge3.go +++ /dev/null @@ -1,45 +0,0 @@ -// Copyright 2019 The Kubernetes Authors. -// SPDX-License-Identifier: Apache-2.0 - -// Package merge contains libraries for merging fields from one RNode to another -// RNode -package merge3 - -import ( - "sigs.k8s.io/kustomize/kyaml/yaml" - "sigs.k8s.io/kustomize/kyaml/yaml/walk" -) - -func Merge(dest, original, update *yaml.RNode) (*yaml.RNode, error) { - // if update == nil && original != nil => declarative deletion - - return walk.Walker{ - Visitor: Visitor{}, - VisitKeysAsScalars: true, - Sources: []*yaml.RNode{dest, original, update}}.Walk() -} - -func MergeStrings(dest, original, update string, infer bool) (string, error) { - srcOriginal, err := yaml.Parse(original) - if err != nil { - return "", err - } - srcUpdated, err := yaml.Parse(update) - if err != nil { - return "", err - } - d, err := yaml.Parse(dest) - if err != nil { - return "", err - } - - result, err := walk.Walker{ - InferAssociativeLists: infer, - Visitor: Visitor{}, - VisitKeysAsScalars: true, - Sources: []*yaml.RNode{d, srcOriginal, srcUpdated}}.Walk() - if err != nil { - return "", err - } - return result.String() -} diff --git a/src/vendor/sigs.k8s.io/kustomize/kyaml/yaml/merge3/visitor.go b/src/vendor/sigs.k8s.io/kustomize/kyaml/yaml/merge3/visitor.go deleted file mode 100644 index 978deff0c..000000000 --- a/src/vendor/sigs.k8s.io/kustomize/kyaml/yaml/merge3/visitor.go +++ /dev/null @@ -1,172 +0,0 @@ -// Copyright 2019 The Kubernetes Authors. -// SPDX-License-Identifier: Apache-2.0 - -package merge3 - -import ( - "sigs.k8s.io/kustomize/kyaml/openapi" - "sigs.k8s.io/kustomize/kyaml/yaml" - "sigs.k8s.io/kustomize/kyaml/yaml/walk" -) - -type ConflictStrategy uint - -const ( - // TODO: Support more strategies - TakeUpdate ConflictStrategy = 1 + iota -) - -type Visitor struct{} - -func (m Visitor) VisitMap(nodes walk.Sources, s *openapi.ResourceSchema) (*yaml.RNode, error) { - if nodes.Updated().IsTaggedNull() || nodes.Dest().IsTaggedNull() { - // explicitly cleared from either dest or update - return walk.ClearNode, nil - } - if nodes.Dest() == nil && nodes.Updated() == nil { - // implicitly cleared missing from both dest and update - return walk.ClearNode, nil - } - - if nodes.Dest() == nil { - // not cleared, but missing from the dest - // initialize a new value that can be recursively merged - return yaml.NewRNode(&yaml.Node{Kind: yaml.MappingNode}), nil - } - - // recursively merge the dest with the original and updated - return nodes.Dest(), nil -} - -func (m Visitor) visitAList(nodes walk.Sources, _ *openapi.ResourceSchema) (*yaml.RNode, error) { - if yaml.IsMissingOrNull(nodes.Updated()) && !yaml.IsMissingOrNull(nodes.Origin()) { - // implicitly cleared from update -- element was deleted - return walk.ClearNode, nil - } - if yaml.IsMissingOrNull(nodes.Dest()) { - // not cleared, but missing from the dest - // initialize a new value that can be recursively merged - return yaml.NewRNode(&yaml.Node{Kind: yaml.SequenceNode}), nil - } - - // recursively merge the dest with the original and updated - return nodes.Dest(), nil -} - -func (m Visitor) VisitScalar(nodes walk.Sources, s *openapi.ResourceSchema) (*yaml.RNode, error) { - if nodes.Updated().IsTaggedNull() || nodes.Dest().IsTaggedNull() { - // explicitly cleared from either dest or update - return nil, nil - } - if yaml.IsMissingOrNull(nodes.Updated()) != yaml.IsMissingOrNull(nodes.Origin()) { - // value added or removed in update - return nodes.Updated(), nil - } - if yaml.IsMissingOrNull(nodes.Updated()) && yaml.IsMissingOrNull(nodes.Origin()) { - // value added or removed in update - return nodes.Dest(), nil - } - - values, err := m.getStrValues(nodes) - if err != nil { - return nil, err - } - - if (values.Dest == "" || values.Dest == values.Origin) && values.Origin != values.Update { - // if local is nil or is unchanged but there is new update - return nodes.Updated(), nil - } - - if nodes.Updated().YNode().Value != nodes.Origin().YNode().Value { - // value changed in update - return nodes.Updated(), nil - } - - // unchanged between origin and update, keep the dest - return nodes.Dest(), nil -} - -func (m Visitor) visitNAList(nodes walk.Sources) (*yaml.RNode, error) { - if nodes.Updated().IsTaggedNull() || nodes.Dest().IsTaggedNull() { - // explicitly cleared from either dest or update - return walk.ClearNode, nil - } - - if yaml.IsMissingOrNull(nodes.Updated()) != yaml.IsMissingOrNull(nodes.Origin()) { - // value added or removed in update - return nodes.Updated(), nil - } - if yaml.IsMissingOrNull(nodes.Updated()) && yaml.IsMissingOrNull(nodes.Origin()) { - // value not present in source or dest - return nodes.Dest(), nil - } - - // compare origin and update values to see if they have changed - values, err := m.getStrValues(nodes) - if err != nil { - return nil, err - } - if values.Update != values.Origin { - // value changed in update - return nodes.Updated(), nil - } - - // unchanged between origin and update, keep the dest - return nodes.Dest(), nil -} - -func (m Visitor) VisitList(nodes walk.Sources, s *openapi.ResourceSchema, kind walk.ListKind) (*yaml.RNode, error) { - if kind == walk.AssociativeList { - return m.visitAList(nodes, s) - } - // non-associative list - return m.visitNAList(nodes) -} - -func (m Visitor) getStrValues(nodes walk.Sources) (strValues, error) { - var uStr, oStr, dStr string - var err error - if nodes.Updated() != nil && nodes.Updated().YNode() != nil { - s := nodes.Updated().YNode().Style - defer func() { - nodes.Updated().YNode().Style = s - }() - nodes.Updated().YNode().Style = yaml.FlowStyle | yaml.SingleQuotedStyle - uStr, err = nodes.Updated().String() - if err != nil { - return strValues{}, err - } - } - if nodes.Origin() != nil && nodes.Origin().YNode() != nil { - s := nodes.Origin().YNode().Style - defer func() { - nodes.Origin().YNode().Style = s - }() - nodes.Origin().YNode().Style = yaml.FlowStyle | yaml.SingleQuotedStyle - oStr, err = nodes.Origin().String() - if err != nil { - return strValues{}, err - } - } - if nodes.Dest() != nil && nodes.Dest().YNode() != nil { - s := nodes.Dest().YNode().Style - defer func() { - nodes.Dest().YNode().Style = s - }() - nodes.Dest().YNode().Style = yaml.FlowStyle | yaml.SingleQuotedStyle - dStr, err = nodes.Dest().String() - if err != nil { - return strValues{}, err - } - } - - return strValues{Origin: oStr, Update: uStr, Dest: dStr}, nil -} - -type strValues struct { - Origin string - Update string - Dest string -} - -var _ walk.Visitor = Visitor{} diff --git a/src/vendor/sigs.k8s.io/kustomize/kyaml/yaml/order.go b/src/vendor/sigs.k8s.io/kustomize/kyaml/yaml/order.go deleted file mode 100644 index 4e01c6489..000000000 --- a/src/vendor/sigs.k8s.io/kustomize/kyaml/yaml/order.go +++ /dev/null @@ -1,107 +0,0 @@ -// Copyright 2019 The Kubernetes Authors. -// SPDX-License-Identifier: Apache-2.0 - -package yaml - -// fieldSortOrder contains the relative ordering of fields when formatting an -// object. -var fieldSortOrder = []string{ - // top-level metadata - "name", "generateName", "namespace", "clusterName", - "apiVersion", "kind", "metadata", "type", - "labels", "annotations", - "spec", "status", - - // secret and configmap - "stringData", "data", "binaryData", - - // cronjobspec, daemonsetspec, deploymentspec, statefulsetspec, - // jobspec fields - "parallelism", "completions", "activeDeadlineSeconds", "backoffLimit", - "replicas", "selector", "manualSelector", "template", - "ttlSecondsAfterFinished", "volumeClaimTemplates", "service", "serviceName", - "podManagementPolicy", "updateStrategy", "strategy", "minReadySeconds", - "revision", "revisionHistoryLimit", "paused", "progressDeadlineSeconds", - - // podspec - // podspec scalars - "restartPolicy", "terminationGracePeriodSeconds", - "activeDeadlineSeconds", "dnsPolicy", "serviceAccountName", - "serviceAccount", "automountServiceAccountToken", "nodeName", - "hostNetwork", "hostPID", "hostIPC", "shareProcessNamespace", "hostname", - "subdomain", "schedulerName", "priorityClassName", "priority", - "runtimeClassName", "enableServiceLinks", - - // podspec lists and maps - "nodeSelector", "hostAliases", - - // podspec objects - "initContainers", "containers", "volumes", "securityContext", - "imagePullSecrets", "affinity", "tolerations", "dnsConfig", - "readinessGates", - - // containers - "image", "command", "args", "workingDir", "ports", "envFrom", "env", - "resources", "volumeMounts", "volumeDevices", "livenessProbe", - "readinessProbe", "lifecycle", "terminationMessagePath", - "terminationMessagePolicy", "imagePullPolicy", "securityContext", - "stdin", "stdinOnce", "tty", - - // service - "clusterIP", "externalIPs", "loadBalancerIP", "loadBalancerSourceRanges", - "externalName", "externalTrafficPolicy", "sessionAffinity", - - // ports - "protocol", "port", "targetPort", "hostPort", "containerPort", "hostIP", - - // volumemount - "readOnly", "mountPath", "subPath", "subPathExpr", "mountPropagation", - - // envvar + envvarsource - "value", "valueFrom", "fieldRef", "resourceFieldRef", "configMapKeyRef", - "secretKeyRef", "prefix", "configMapRef", "secretRef", -} - -type set map[string]interface{} - -func newSet(values ...string) set { - m := map[string]interface{}{} - for _, value := range values { - m[value] = nil - } - return m -} - -func (s set) Has(key string) bool { - _, found := s[key] - return found -} - -// WhitelistedListSortKinds contains the set of kinds that are whitelisted -// for sorting list field elements -var WhitelistedListSortKinds = newSet( - "CronJob", "DaemonSet", "Deployment", "Job", "ReplicaSet", "StatefulSet", - "ValidatingWebhookConfiguration") - -// WhitelistedListSortApis contains the set of apis that are whitelisted for -// sorting list field elements -var WhitelistedListSortApis = newSet( - "apps/v1", "apps/v1beta1", "apps/v1beta2", "batch/v1", "batch/v1beta1", - "extensions/v1beta1", "v1", "admissionregistration.k8s.io/v1") - -// WhitelistedListSortFields contains json paths to list fields that should -// be sorted, and the field they should be sorted by -var WhitelistedListSortFields = map[string]string{ - ".spec.template.spec.containers": "name", - ".webhooks.rules.operations": "", -} - -// FieldOrder indexes fields and maps them to relative precedence -var FieldOrder = func() map[string]int { - // create an index of field orderings - fo := map[string]int{} - for i, f := range fieldSortOrder { - fo[f] = i + 1 - } - return fo -}() diff --git a/src/vendor/sigs.k8s.io/kustomize/kyaml/yaml/rnode.go b/src/vendor/sigs.k8s.io/kustomize/kyaml/yaml/rnode.go deleted file mode 100644 index 07bb8a0e1..000000000 --- a/src/vendor/sigs.k8s.io/kustomize/kyaml/yaml/rnode.go +++ /dev/null @@ -1,1294 +0,0 @@ -// Copyright 2019 The Kubernetes Authors. -// SPDX-License-Identifier: Apache-2.0 - -package yaml - -import ( - "encoding/json" - "fmt" - "io/ioutil" - "log" - "regexp" - "strconv" - "strings" - - "sigs.k8s.io/kustomize/kyaml/errors" - "sigs.k8s.io/kustomize/kyaml/internal/forked/github.com/go-yaml/yaml" - "sigs.k8s.io/kustomize/kyaml/sliceutil" - "sigs.k8s.io/kustomize/kyaml/utils" - "sigs.k8s.io/kustomize/kyaml/yaml/internal/k8sgen/pkg/labels" -) - -// MakeNullNode returns an RNode that represents an empty document. -func MakeNullNode() *RNode { - return NewRNode(&Node{Tag: NodeTagNull}) -} - -// IsMissingOrNull is true if the RNode is nil or explicitly tagged null. -// TODO: make this a method on RNode. -func IsMissingOrNull(node *RNode) bool { - return node.IsNil() || node.YNode().Tag == NodeTagNull -} - -// IsEmptyMap returns true if the RNode is an empty node or an empty map. -// TODO: make this a method on RNode. -func IsEmptyMap(node *RNode) bool { - return IsMissingOrNull(node) || IsYNodeEmptyMap(node.YNode()) -} - -// GetValue returns underlying yaml.Node Value field -func GetValue(node *RNode) string { - if IsMissingOrNull(node) { - return "" - } - return node.YNode().Value -} - -// Parse parses a yaml string into an *RNode. -// To parse multiple resources, consider a kio.ByteReader -func Parse(value string) (*RNode, error) { - return Parser{Value: value}.Filter(nil) -} - -// ReadFile parses a single Resource from a yaml file. -// To parse multiple resources, consider a kio.ByteReader -func ReadFile(path string) (*RNode, error) { - b, err := ioutil.ReadFile(path) - if err != nil { - return nil, err - } - return Parse(string(b)) -} - -// WriteFile writes a single Resource to a yaml file -func WriteFile(node *RNode, path string) error { - out, err := node.String() - if err != nil { - return err - } - return ioutil.WriteFile(path, []byte(out), 0600) -} - -// UpdateFile reads the file at path, applies the filter to it, and write the result back. -// path must contain a exactly 1 resource (YAML). -func UpdateFile(filter Filter, path string) error { - // Read the yaml - y, err := ReadFile(path) - if err != nil { - return err - } - - // Update the yaml - if err := y.PipeE(filter); err != nil { - return err - } - - // Write the yaml - return WriteFile(y, path) -} - -// MustParse parses a yaml string into an *RNode and panics if there is an error -func MustParse(value string) *RNode { - v, err := Parser{Value: value}.Filter(nil) - if err != nil { - panic(err) - } - return v -} - -// NewScalarRNode returns a new Scalar *RNode containing the provided scalar value. -func NewScalarRNode(value string) *RNode { - return &RNode{ - value: &yaml.Node{ - Kind: yaml.ScalarNode, - Value: value, - }} -} - -// NewStringRNode returns a new Scalar *RNode containing the provided string. -// If the string is non-utf8, it will be base64 encoded, and the tag -// will indicate binary data. -func NewStringRNode(value string) *RNode { - n := yaml.Node{Kind: yaml.ScalarNode} - n.SetString(value) - return NewRNode(&n) -} - -// NewListRNode returns a new List *RNode containing the provided scalar values. -func NewListRNode(values ...string) *RNode { - seq := &RNode{value: &yaml.Node{Kind: yaml.SequenceNode}} - for _, v := range values { - seq.value.Content = append(seq.value.Content, &yaml.Node{ - Kind: yaml.ScalarNode, - Value: v, - }) - } - return seq -} - -// NewMapRNode returns a new Map *RNode containing the provided values -func NewMapRNode(values *map[string]string) *RNode { - m := &RNode{value: &yaml.Node{ - Kind: yaml.MappingNode, - }} - if values == nil { - return m - } - - for k, v := range *values { - m.value.Content = append(m.value.Content, &yaml.Node{ - Kind: yaml.ScalarNode, - Value: k, - }, &yaml.Node{ - Kind: yaml.ScalarNode, - Value: v, - }) - } - - return m -} - -// SyncMapNodesOrder sorts the map node keys in 'to' node to match the order of -// map node keys in 'from' node, additional keys are moved to the end -func SyncMapNodesOrder(from, to *RNode) { - to.Copy() - res := &RNode{value: &yaml.Node{ - Kind: to.YNode().Kind, - Style: to.YNode().Style, - Tag: to.YNode().Tag, - Anchor: to.YNode().Anchor, - Alias: to.YNode().Alias, - HeadComment: to.YNode().HeadComment, - LineComment: to.YNode().LineComment, - FootComment: to.YNode().FootComment, - Line: to.YNode().Line, - Column: to.YNode().Column, - }} - - fromFieldNames, err := from.Fields() - if err != nil { - return - } - - toFieldNames, err := to.Fields() - if err != nil { - return - } - - for _, fieldName := range fromFieldNames { - if !sliceutil.Contains(toFieldNames, fieldName) { - continue - } - // append the common nodes in the order defined in 'from' node - res.value.Content = append(res.value.Content, to.Field(fieldName).Key.YNode(), to.Field(fieldName).Value.YNode()) - toFieldNames = sliceutil.Remove(toFieldNames, fieldName) - } - - for _, fieldName := range toFieldNames { - // append the residual nodes which are not present in 'from' node - res.value.Content = append(res.value.Content, to.Field(fieldName).Key.YNode(), to.Field(fieldName).Value.YNode()) - } - - to.SetYNode(res.YNode()) -} - -// NewRNode returns a new RNode pointer containing the provided Node. -func NewRNode(value *yaml.Node) *RNode { - return &RNode{value: value} -} - -// RNode provides functions for manipulating Kubernetes Resources -// Objects unmarshalled into *yaml.Nodes -type RNode struct { - // fieldPath contains the path from the root of the KubernetesObject to - // this field. - // Only field names are captured in the path. - // e.g. a image field in a Deployment would be - // 'spec.template.spec.containers.image' - fieldPath []string - - // FieldValue contains the value. - // FieldValue is always set: - // field: field value - // list entry: list entry value - // object root: object root - value *yaml.Node - - Match []string -} - -// Copy returns a distinct copy. -func (rn *RNode) Copy() *RNode { - if rn == nil { - return nil - } - result := *rn - result.value = CopyYNode(rn.value) - return &result -} - -var ErrMissingMetadata = fmt.Errorf("missing Resource metadata") - -// IsNil is true if the node is nil, or its underlying YNode is nil. -func (rn *RNode) IsNil() bool { - return rn == nil || rn.YNode() == nil -} - -// IsTaggedNull is true if a non-nil node is explicitly tagged Null. -func (rn *RNode) IsTaggedNull() bool { - return !rn.IsNil() && IsYNodeTaggedNull(rn.YNode()) -} - -// IsNilOrEmpty is true if the node is nil, -// has no YNode, or has YNode that appears empty. -func (rn *RNode) IsNilOrEmpty() bool { - return rn.IsNil() || - IsYNodeTaggedNull(rn.YNode()) || - IsYNodeEmptyMap(rn.YNode()) || - IsYNodeEmptySeq(rn.YNode()) || - IsYNodeZero(rn.YNode()) -} - -// IsStringValue is true if the RNode is not nil and is scalar string node -func (rn *RNode) IsStringValue() bool { - return !rn.IsNil() && IsYNodeString(rn.YNode()) -} - -// GetMeta returns the ResourceMeta for an RNode -func (rn *RNode) GetMeta() (ResourceMeta, error) { - if IsMissingOrNull(rn) { - return ResourceMeta{}, nil - } - missingMeta := true - n := rn - if n.YNode().Kind == DocumentNode { - // get the content is this is the document node - n = NewRNode(n.Content()[0]) - } - - // don't decode into the struct directly or it will fail on UTF-8 issues - // which appear in comments - m := ResourceMeta{} - - // TODO: consider optimizing this parsing - if f := n.Field(APIVersionField); !f.IsNilOrEmpty() { - m.APIVersion = GetValue(f.Value) - missingMeta = false - } - if f := n.Field(KindField); !f.IsNilOrEmpty() { - m.Kind = GetValue(f.Value) - missingMeta = false - } - - mf := n.Field(MetadataField) - if mf.IsNilOrEmpty() { - if missingMeta { - return m, ErrMissingMetadata - } - return m, nil - } - meta := mf.Value - - if f := meta.Field(NameField); !f.IsNilOrEmpty() { - m.Name = f.Value.YNode().Value - missingMeta = false - } - if f := meta.Field(NamespaceField); !f.IsNilOrEmpty() { - m.Namespace = GetValue(f.Value) - missingMeta = false - } - - if f := meta.Field(LabelsField); !f.IsNilOrEmpty() { - m.Labels = map[string]string{} - _ = f.Value.VisitFields(func(node *MapNode) error { - m.Labels[GetValue(node.Key)] = GetValue(node.Value) - return nil - }) - missingMeta = false - } - if f := meta.Field(AnnotationsField); !f.IsNilOrEmpty() { - m.Annotations = map[string]string{} - _ = f.Value.VisitFields(func(node *MapNode) error { - m.Annotations[GetValue(node.Key)] = GetValue(node.Value) - return nil - }) - missingMeta = false - } - - if missingMeta { - return m, ErrMissingMetadata - } - return m, nil -} - -// Pipe sequentially invokes each Filter, and passes the result to the next -// Filter. -// -// Analogous to http://www.linfo.org/pipes.html -// -// * rn is provided as input to the first Filter. -// * if any Filter returns an error, immediately return the error -// * if any Filter returns a nil RNode, immediately return nil, nil -// * if all Filters succeed with non-empty results, return the final result -func (rn *RNode) Pipe(functions ...Filter) (*RNode, error) { - // check if rn is nil to make chaining Pipe calls easier - if rn == nil { - return nil, nil - } - - var v *RNode - var err error - if rn.value != nil && rn.value.Kind == yaml.DocumentNode { - // the first node may be a DocumentNode containing a single MappingNode - v = &RNode{value: rn.value.Content[0]} - } else { - v = rn - } - - // return each fn in sequence until encountering an error or missing value - for _, c := range functions { - v, err = c.Filter(v) - if err != nil || v == nil { - return v, errors.Wrap(err) - } - } - return v, err -} - -// PipeE runs Pipe, dropping the *RNode return value. -// Useful for directly returning the Pipe error value from functions. -func (rn *RNode) PipeE(functions ...Filter) error { - _, err := rn.Pipe(functions...) - return errors.Wrap(err) -} - -// Document returns the Node for the value. -func (rn *RNode) Document() *yaml.Node { - return rn.value -} - -// YNode returns the yaml.Node value. If the yaml.Node value is a DocumentNode, -// YNode will return the DocumentNode Content entry instead of the DocumentNode. -func (rn *RNode) YNode() *yaml.Node { - if rn == nil || rn.value == nil { - return nil - } - if rn.value.Kind == yaml.DocumentNode { - return rn.value.Content[0] - } - return rn.value -} - -// SetYNode sets the yaml.Node value on an RNode. -func (rn *RNode) SetYNode(node *yaml.Node) { - if rn.value == nil || node == nil { - rn.value = node - return - } - *rn.value = *node -} - -// GetKind returns the kind, if it exists, else empty string. -func (rn *RNode) GetKind() string { - if node := rn.getMapFieldValue(KindField); node != nil { - return node.Value - } - return "" -} - -// SetKind sets the kind. -func (rn *RNode) SetKind(k string) { - rn.SetMapField(NewScalarRNode(k), KindField) -} - -// GetApiVersion returns the apiversion, if it exists, else empty string. -func (rn *RNode) GetApiVersion() string { - if node := rn.getMapFieldValue(APIVersionField); node != nil { - return node.Value - } - return "" -} - -// SetApiVersion sets the apiVersion. -func (rn *RNode) SetApiVersion(av string) { - rn.SetMapField(NewScalarRNode(av), APIVersionField) -} - -// getMapFieldValue returns the value (*yaml.Node) of a mapping field. -// The value might be nil. Also, the function returns nil, not an error, -// if this node is not a mapping node, or if this node does not have the -// given field, so this function cannot be used to make distinctions -// between these cases. -func (rn *RNode) getMapFieldValue(field string) *yaml.Node { - for i := 0; i < len(rn.Content()); i = IncrementFieldIndex(i) { - if rn.Content()[i].Value == field { - return rn.Content()[i+1] - } - } - return nil -} - -// GetName returns the name, or empty string if -// field not found. The setter is more restrictive. -func (rn *RNode) GetName() string { - return rn.getMetaStringField(NameField) -} - -// getMetaStringField returns the value of a string field in metadata. -func (rn *RNode) getMetaStringField(fName string) string { - md := rn.getMetaData() - if md == nil { - return "" - } - f := md.Field(fName) - if f.IsNilOrEmpty() { - return "" - } - return GetValue(f.Value) -} - -// getMetaData returns the RNode holding the value of the metadata field. -// Return nil if field not found (no error). -func (rn *RNode) getMetaData() *RNode { - if IsMissingOrNull(rn) { - return nil - } - var n *RNode - if rn.YNode().Kind == DocumentNode { - // get the content if this is the document node - n = NewRNode(rn.Content()[0]) - } else { - n = rn - } - mf := n.Field(MetadataField) - if mf.IsNilOrEmpty() { - return nil - } - return mf.Value -} - -// SetName sets the metadata name field. -func (rn *RNode) SetName(name string) error { - return rn.SetMapField(NewScalarRNode(name), MetadataField, NameField) -} - -// GetNamespace gets the metadata namespace field, or empty string if -// field not found. The setter is more restrictive. -func (rn *RNode) GetNamespace() string { - return rn.getMetaStringField(NamespaceField) -} - -// SetNamespace tries to set the metadata namespace field. If the argument -// is empty, the field is dropped. -func (rn *RNode) SetNamespace(ns string) error { - meta, err := rn.Pipe(Lookup(MetadataField)) - if err != nil { - return err - } - if ns == "" { - if rn == nil { - return nil - } - return meta.PipeE(Clear(NamespaceField)) - } - return rn.SetMapField( - NewScalarRNode(ns), MetadataField, NamespaceField) -} - -// GetAnnotations gets the metadata annotations field. -// If the field is missing, returns an empty map. -// Use another method to check for missing metadata. -func (rn *RNode) GetAnnotations() map[string]string { - meta := rn.getMetaData() - if meta == nil { - return make(map[string]string) - } - return rn.getMapFromMeta(meta, AnnotationsField) -} - -// SetAnnotations tries to set the metadata annotations field. -func (rn *RNode) SetAnnotations(m map[string]string) error { - return rn.setMapInMetadata(m, AnnotationsField) -} - -// GetLabels gets the metadata labels field. -// If the field is missing, returns an empty map. -// Use another method to check for missing metadata. -func (rn *RNode) GetLabels() map[string]string { - meta := rn.getMetaData() - if meta == nil { - return make(map[string]string) - } - return rn.getMapFromMeta(meta, LabelsField) -} - -// getMapFromMeta returns map, sometimes empty, from metadata. -func (rn *RNode) getMapFromMeta(meta *RNode, fName string) map[string]string { - result := make(map[string]string) - if f := meta.Field(fName); !f.IsNilOrEmpty() { - _ = f.Value.VisitFields(func(node *MapNode) error { - result[GetValue(node.Key)] = GetValue(node.Value) - return nil - }) - } - return result -} - -// SetLabels sets the metadata labels field. -func (rn *RNode) SetLabels(m map[string]string) error { - return rn.setMapInMetadata(m, LabelsField) -} - -// This established proper quoting on string values, and sorts by key. -func (rn *RNode) setMapInMetadata(m map[string]string, field string) error { - meta, err := rn.Pipe(LookupCreate(MappingNode, MetadataField)) - if err != nil { - return err - } - if err = meta.PipeE(Clear(field)); err != nil { - return err - } - if len(m) == 0 { - return nil - } - mapNode, err := meta.Pipe(LookupCreate(MappingNode, field)) - if err != nil { - return err - } - for _, k := range SortedMapKeys(m) { - if _, err := mapNode.Pipe( - SetField(k, NewStringRNode(m[k]))); err != nil { - return err - } - } - return nil -} - -func (rn *RNode) SetMapField(value *RNode, path ...string) error { - return rn.PipeE( - LookupCreate(yaml.MappingNode, path[0:len(path)-1]...), - SetField(path[len(path)-1], value), - ) -} - -func (rn *RNode) GetDataMap() map[string]string { - n, err := rn.Pipe(Lookup(DataField)) - if err != nil { - return nil - } - result := map[string]string{} - _ = n.VisitFields(func(node *MapNode) error { - result[GetValue(node.Key)] = GetValue(node.Value) - return nil - }) - return result -} - -func (rn *RNode) GetBinaryDataMap() map[string]string { - n, err := rn.Pipe(Lookup(BinaryDataField)) - if err != nil { - return nil - } - result := map[string]string{} - _ = n.VisitFields(func(node *MapNode) error { - result[GetValue(node.Key)] = GetValue(node.Value) - return nil - }) - return result -} - -// GetValidatedDataMap retrieves the data map and returns an error if the data -// map contains entries which are not included in the expectedKeys set. -func (rn *RNode) GetValidatedDataMap(expectedKeys []string) (map[string]string, error) { - dataMap := rn.GetDataMap() - err := rn.validateDataMap(dataMap, expectedKeys) - return dataMap, err -} - -func (rn *RNode) validateDataMap(dataMap map[string]string, expectedKeys []string) error { - if dataMap == nil { - return fmt.Errorf("The datamap is unassigned") - } - for key := range dataMap { - found := false - for _, expected := range expectedKeys { - if expected == key { - found = true - } - } - if !found { - return fmt.Errorf("an unexpected key (%v) was found", key) - } - } - return nil -} - -func (rn *RNode) SetDataMap(m map[string]string) { - if rn == nil { - log.Fatal("cannot set data map on nil Rnode") - } - if err := rn.PipeE(Clear(DataField)); err != nil { - log.Fatal(err) - } - if len(m) == 0 { - return - } - if err := rn.LoadMapIntoConfigMapData(m); err != nil { - log.Fatal(err) - } -} - -func (rn *RNode) SetBinaryDataMap(m map[string]string) { - if rn == nil { - log.Fatal("cannot set binaryData map on nil Rnode") - } - if err := rn.PipeE(Clear(BinaryDataField)); err != nil { - log.Fatal(err) - } - if len(m) == 0 { - return - } - if err := rn.LoadMapIntoConfigMapBinaryData(m); err != nil { - log.Fatal(err) - } -} - -// AppendToFieldPath appends a field name to the FieldPath. -func (rn *RNode) AppendToFieldPath(parts ...string) { - rn.fieldPath = append(rn.fieldPath, parts...) -} - -// FieldPath returns the field path from the Resource root node, to rn. -// Does not include list indexes. -func (rn *RNode) FieldPath() []string { - return rn.fieldPath -} - -// String returns string representation of the RNode -func (rn *RNode) String() (string, error) { - if rn == nil { - return "", nil - } - return String(rn.value) -} - -// MustString returns string representation of the RNode or panics if there is an error -func (rn *RNode) MustString() string { - s, err := rn.String() - if err != nil { - panic(err) - } - return s -} - -// Content returns Node Content field. -func (rn *RNode) Content() []*yaml.Node { - if rn == nil { - return nil - } - return rn.YNode().Content -} - -// Fields returns the list of field names for a MappingNode. -// Returns an error for non-MappingNodes. -func (rn *RNode) Fields() ([]string, error) { - if err := ErrorIfInvalid(rn, yaml.MappingNode); err != nil { - return nil, errors.Wrap(err) - } - var fields []string - for i := 0; i < len(rn.Content()); i += 2 { - fields = append(fields, rn.Content()[i].Value) - } - return fields, nil -} - -// FieldRNodes returns the list of field key RNodes for a MappingNode. -// Returns an error for non-MappingNodes. -func (rn *RNode) FieldRNodes() ([]*RNode, error) { - if err := ErrorIfInvalid(rn, yaml.MappingNode); err != nil { - return nil, errors.Wrap(err) - } - var fields []*RNode - for i := 0; i < len(rn.Content()); i += 2 { - yNode := rn.Content()[i] - // for each key node in the input mapping node contents create equivalent rNode - rNode := &RNode{} - rNode.SetYNode(yNode) - fields = append(fields, rNode) - } - return fields, nil -} - -// Field returns a fieldName, fieldValue pair for MappingNodes. -// Returns nil for non-MappingNodes. -func (rn *RNode) Field(field string) *MapNode { - if rn.YNode().Kind != yaml.MappingNode { - return nil - } - for i := 0; i < len(rn.Content()); i = IncrementFieldIndex(i) { - isMatchingField := rn.Content()[i].Value == field - if isMatchingField { - return &MapNode{Key: NewRNode(rn.Content()[i]), Value: NewRNode(rn.Content()[i+1])} - } - } - return nil -} - -// VisitFields calls fn for each field in the RNode. -// Returns an error for non-MappingNodes. -func (rn *RNode) VisitFields(fn func(node *MapNode) error) error { - // get the list of srcFieldNames - srcFieldNames, err := rn.Fields() - if err != nil { - return errors.Wrap(err) - } - - // visit each field - for _, fieldName := range srcFieldNames { - if err := fn(rn.Field(fieldName)); err != nil { - return errors.Wrap(err) - } - } - return nil -} - -// Elements returns the list of elements in the RNode. -// Returns an error for non-SequenceNodes. -func (rn *RNode) Elements() ([]*RNode, error) { - if err := ErrorIfInvalid(rn, yaml.SequenceNode); err != nil { - return nil, errors.Wrap(err) - } - var elements []*RNode - for i := 0; i < len(rn.Content()); i++ { - elements = append(elements, NewRNode(rn.Content()[i])) - } - return elements, nil -} - -// ElementValues returns a list of all observed values for a given field name -// in a list of elements. -// Returns error for non-SequenceNodes. -func (rn *RNode) ElementValues(key string) ([]string, error) { - if err := ErrorIfInvalid(rn, yaml.SequenceNode); err != nil { - return nil, errors.Wrap(err) - } - var elements []string - for i := 0; i < len(rn.Content()); i++ { - field := NewRNode(rn.Content()[i]).Field(key) - if !field.IsNilOrEmpty() { - elements = append(elements, field.Value.YNode().Value) - } - } - return elements, nil -} - -// ElementValuesList returns a list of lists, where each list is a set of -// values corresponding to each key in keys. -// Returns error for non-SequenceNodes. -func (rn *RNode) ElementValuesList(keys []string) ([][]string, error) { - if err := ErrorIfInvalid(rn, yaml.SequenceNode); err != nil { - return nil, errors.Wrap(err) - } - elements := make([][]string, len(rn.Content())) - - for i := 0; i < len(rn.Content()); i++ { - for _, key := range keys { - field := NewRNode(rn.Content()[i]).Field(key) - if field.IsNilOrEmpty() { - elements[i] = append(elements[i], "") - } else { - elements[i] = append(elements[i], field.Value.YNode().Value) - } - } - } - return elements, nil -} - -// Element returns the element in the list which contains the field matching the value. -// Returns nil for non-SequenceNodes or if no Element matches. -func (rn *RNode) Element(key, value string) *RNode { - if rn.YNode().Kind != yaml.SequenceNode { - return nil - } - elem, err := rn.Pipe(MatchElement(key, value)) - if err != nil { - return nil - } - return elem -} - -// ElementList returns the element in the list in which all fields keys[i] matches all -// corresponding values[i]. -// Returns nil for non-SequenceNodes or if no Element matches. -func (rn *RNode) ElementList(keys []string, values []string) *RNode { - if rn.YNode().Kind != yaml.SequenceNode { - return nil - } - elem, err := rn.Pipe(MatchElementList(keys, values)) - if err != nil { - return nil - } - return elem -} - -// VisitElements calls fn for each element in a SequenceNode. -// Returns an error for non-SequenceNodes -func (rn *RNode) VisitElements(fn func(node *RNode) error) error { - elements, err := rn.Elements() - if err != nil { - return errors.Wrap(err) - } - - for i := range elements { - if err := fn(elements[i]); err != nil { - return errors.Wrap(err) - } - } - return nil -} - -// AssociativeSequenceKeys is a map of paths to sequences that have associative keys. -// The order sets the precedence of the merge keys -- if multiple keys are present -// in Resources in a list, then the FIRST key which ALL elements in the list have is used as the -// associative key for merging that list. -// Only infer name as a merge key. -var AssociativeSequenceKeys = []string{"name"} - -// IsAssociative returns true if the RNode contains an AssociativeSequenceKey as a field. -func (rn *RNode) IsAssociative() bool { - return rn.GetAssociativeKey() != "" -} - -// GetAssociativeKey returns the AssociativeSequenceKey used to merge the elements in the -// SequenceNode, or "" if the list is not associative. -func (rn *RNode) GetAssociativeKey() string { - // look for any associative keys in the first element - for _, key := range AssociativeSequenceKeys { - if checkKey(key, rn.Content()) { - return key - } - } - - // element doesn't have an associative keys - return "" -} - -// MarshalJSON creates a byte slice from the RNode. -func (rn *RNode) MarshalJSON() ([]byte, error) { - s, err := rn.String() - if err != nil { - return nil, err - } - - if rn.YNode().Kind == SequenceNode { - var a []interface{} - if err := Unmarshal([]byte(s), &a); err != nil { - return nil, err - } - return json.Marshal(a) - } - - m := map[string]interface{}{} - if err := Unmarshal([]byte(s), &m); err != nil { - return nil, err - } - return json.Marshal(m) -} - -// UnmarshalJSON overwrites this RNode with data from []byte. -func (rn *RNode) UnmarshalJSON(b []byte) error { - m := map[string]interface{}{} - if err := json.Unmarshal(b, &m); err != nil { - return err - } - r, err := FromMap(m) - if err != nil { - return err - } - rn.value = r.value - return nil -} - -// DeAnchor inflates all YAML aliases with their anchor values. -// All YAML anchor data is permanently removed (feel free to call Copy first). -func (rn *RNode) DeAnchor() (err error) { - rn.value, err = deAnchor(rn.value) - return -} - -// deAnchor removes all AliasNodes from the yaml.Node's tree, replacing -// them with what they point to. All Anchor fields (these are used to mark -// anchor definitions) are cleared. -func deAnchor(yn *yaml.Node) (res *yaml.Node, err error) { - if yn == nil { - return nil, nil - } - if yn.Anchor != "" { - // This node defines an anchor. Clear the field so that it - // doesn't show up when marshalling. - if yn.Kind == yaml.AliasNode { - // Maybe this is OK, but for now treating it as a bug. - return nil, fmt.Errorf( - "anchor %q defined using alias %v", yn.Anchor, yn.Alias) - } - yn.Anchor = "" - } - switch yn.Kind { - case yaml.ScalarNode: - return yn, nil - case yaml.AliasNode: - return deAnchor(yn.Alias) - case yaml.MappingNode: - toMerge, err := removeMergeTags(yn) - if err != nil { - return nil, err - } - err = mergeAll(yn, toMerge) - if err != nil { - return nil, err - } - fallthrough - case yaml.DocumentNode, yaml.SequenceNode: - for i := range yn.Content { - yn.Content[i], err = deAnchor(yn.Content[i]) - if err != nil { - return nil, err - } - } - return yn, nil - default: - return nil, fmt.Errorf("cannot deAnchor kind %q", yn.Kind) - } -} - -// isMerge returns if the node is tagged with !!merge -func isMerge(yn *yaml.Node) bool { - return yn.Tag == MergeTag -} - -// findMergeValues receives either a MappingNode, a AliasNode or a potentially -// mixed list of MappingNodes and AliasNodes. It returns a list of MappingNodes. -func findMergeValues(yn *yaml.Node) ([]*yaml.Node, error) { - if yn == nil { - return []*yaml.Node{}, nil - } - switch yn.Kind { - case MappingNode: - return []*yaml.Node{yn}, nil - case AliasNode: - if yn.Alias != nil && yn.Alias.Kind != MappingNode { - return nil, errors.Errorf("invalid map merge: received alias for a non-map value") - } - return []*yaml.Node{yn.Alias}, nil - case SequenceNode: - mergeValues := []*yaml.Node{} - for i := 0; i < len(yn.Content); i++ { - if yn.Content[i].Kind == SequenceNode { - return nil, errors.Errorf("invalid map merge: received a nested sequence") - } - newMergeValues, err := findMergeValues(yn.Content[i]) - if err != nil { - return nil, err - } - mergeValues = append(newMergeValues, mergeValues...) - } - return mergeValues, nil - default: - return nil, errors.Errorf("map merge requires map or sequence of maps as the value") - } -} - -// getMergeTagValue receives a MappingNode yaml node, and it searches for -// merge tagged keys and return its value yaml node. If the key is duplicated, -// it fails. -func getMergeTagValue(yn *yaml.Node) (*yaml.Node, error) { - var result *yaml.Node - for i := 0; i < len(yn.Content); i += 2 { - key := yn.Content[i] - value := yn.Content[i+1] - if isMerge(key) { - if result != nil { - return nil, fmt.Errorf("duplicate merge key") - } - result = value - } - } - return result, nil -} - -// removeMergeTags removes all merge tags and returns a ordered list of yaml -// nodes to merge and a error -func removeMergeTags(yn *yaml.Node) ([]*yaml.Node, error) { - if yn == nil || yn.Content == nil { - return nil, nil - } - if yn.Kind != yaml.MappingNode { - return nil, nil - } - value, err := getMergeTagValue(yn) - if err != nil { - return nil, err - } - toMerge, err := findMergeValues(value) - if err != nil { - return nil, err - } - err = NewRNode(yn).PipeE(Clear("<<")) - if err != nil { - return nil, err - } - return toMerge, nil -} - -func mergeAll(yn *yaml.Node, toMerge []*yaml.Node) error { - // We only need to start with a copy of the existing node because we need to - // maintain duplicated keys and style - rn := NewRNode(yn).Copy() - toMerge = append(toMerge, yn) - for i := range toMerge { - rnToMerge := NewRNode(toMerge[i]).Copy() - err := rnToMerge.VisitFields(func(node *MapNode) error { - return rn.PipeE(MapEntrySetter{Key: node.Key, Value: node.Value}) - }) - if err != nil { - return err - } - } - *yn = *rn.value - return nil -} - -// GetValidatedMetadata returns metadata after subjecting it to some tests. -func (rn *RNode) GetValidatedMetadata() (ResourceMeta, error) { - m, err := rn.GetMeta() - if err != nil { - return m, err - } - if m.Kind == "" { - return m, fmt.Errorf("missing kind in object %v", m) - } - if strings.HasSuffix(m.Kind, "List") { - // A list doesn't require a name. - return m, nil - } - if m.NameMeta.Name == "" { - return m, fmt.Errorf("missing metadata.name in object %v", m) - } - return m, nil -} - -// MatchesAnnotationSelector returns true on a selector match to annotations. -func (rn *RNode) MatchesAnnotationSelector(selector string) (bool, error) { - s, err := labels.Parse(selector) - if err != nil { - return false, err - } - return s.Matches(labels.Set(rn.GetAnnotations())), nil -} - -// MatchesLabelSelector returns true on a selector match to labels. -func (rn *RNode) MatchesLabelSelector(selector string) (bool, error) { - s, err := labels.Parse(selector) - if err != nil { - return false, err - } - return s.Matches(labels.Set(rn.GetLabels())), nil -} - -// HasNilEntryInList returns true if the RNode contains a list which has -// a nil item, along with the path to the missing item. -// TODO(broken): This doesn't do what it claims to do. -// (see TODO in unit test and pr 1513). -func (rn *RNode) HasNilEntryInList() (bool, string) { - return hasNilEntryInList(rn.value) -} - -func hasNilEntryInList(in interface{}) (bool, string) { - switch v := in.(type) { - case map[string]interface{}: - for key, s := range v { - if result, path := hasNilEntryInList(s); result { - return result, key + "/" + path - } - } - case []interface{}: - for index, s := range v { - if s == nil { - return true, "" - } - if result, path := hasNilEntryInList(s); result { - return result, "[" + strconv.Itoa(index) + "]/" + path - } - } - } - return false, "" -} - -func FromMap(m map[string]interface{}) (*RNode, error) { - c, err := Marshal(m) - if err != nil { - return nil, err - } - return Parse(string(c)) -} - -func (rn *RNode) Map() (map[string]interface{}, error) { - if rn == nil || rn.value == nil { - return make(map[string]interface{}), nil - } - var result map[string]interface{} - if err := rn.value.Decode(&result); err != nil { - // Should not be able to create an RNode that cannot be decoded; - // this is an unrecoverable error. - str, _ := rn.String() - return nil, fmt.Errorf("received error %w for the following resource:\n%s", err, str) - } - return result, nil -} - -// ConvertJSONToYamlNode parses input json string and returns equivalent yaml node -func ConvertJSONToYamlNode(jsonStr string) (*RNode, error) { - var body map[string]interface{} - err := json.Unmarshal([]byte(jsonStr), &body) - if err != nil { - return nil, err - } - yml, err := yaml.Marshal(body) - if err != nil { - return nil, err - } - node, err := Parse(string(yml)) - if err != nil { - return nil, err - } - return node, nil -} - -// checkKey returns true if all elems have the key -func checkKey(key string, elems []*Node) bool { - count := 0 - for i := range elems { - elem := NewRNode(elems[i]) - if elem.Field(key) != nil { - count++ - } - } - return count == len(elems) -} - -// GetSlice returns the contents of the slice field at the given path. -func (rn *RNode) GetSlice(path string) ([]interface{}, error) { - value, err := rn.GetFieldValue(path) - if err != nil { - return nil, err - } - if sliceValue, ok := value.([]interface{}); ok { - return sliceValue, nil - } - return nil, fmt.Errorf("node %s is not a slice", path) -} - -// GetString returns the contents of the string field at the given path. -func (rn *RNode) GetString(path string) (string, error) { - value, err := rn.GetFieldValue(path) - if err != nil { - return "", err - } - if v, ok := value.(string); ok { - return v, nil - } - return "", fmt.Errorf("node %s is not a string: %v", path, value) -} - -// GetFieldValue finds period delimited fields. -// TODO: When doing kustomize var replacement, which is likely a -// a primary use of this function and the reason it returns interface{} -// rather than string, we do conversion from Nodes to Go types and back -// to nodes. We should figure out how to do replacement using raw nodes, -// assuming we keep the var feature in kustomize. -// The other end of this is: refvar.go:updateNodeValue. -func (rn *RNode) GetFieldValue(path string) (interface{}, error) { - fields := convertSliceIndex(utils.SmarterPathSplitter(path, ".")) - rn, err := rn.Pipe(Lookup(fields...)) - if err != nil { - return nil, err - } - if rn == nil { - return nil, NoFieldError{path} - } - yn := rn.YNode() - - // If this is an alias node, resolve it - if yn.Kind == yaml.AliasNode { - yn = yn.Alias - } - - // Return value as map for DocumentNode and MappingNode kinds - if yn.Kind == yaml.DocumentNode || yn.Kind == yaml.MappingNode { - var result map[string]interface{} - if err := yn.Decode(&result); err != nil { - return nil, err - } - return result, err - } - - // Return value as slice for SequenceNode kind - if yn.Kind == yaml.SequenceNode { - var result []interface{} - if err := yn.Decode(&result); err != nil { - return nil, err - } - return result, nil - } - if yn.Kind != yaml.ScalarNode { - return nil, fmt.Errorf("expected ScalarNode, got Kind=%d", yn.Kind) - } - - switch yn.Tag { - case NodeTagString: - return yn.Value, nil - case NodeTagInt: - return strconv.Atoi(yn.Value) - case NodeTagFloat: - return strconv.ParseFloat(yn.Value, 64) - case NodeTagBool: - return strconv.ParseBool(yn.Value) - default: - // Possibly this should be an error or log. - return yn.Value, nil - } -} - -// convertSliceIndex traverses the items in `fields` and find -// if there is a slice index in the item and change it to a -// valid Lookup field path. For example, 'ports[0]' will be -// converted to 'ports' and '0'. -func convertSliceIndex(fields []string) []string { - var res []string - for _, s := range fields { - if !strings.HasSuffix(s, "]") { - res = append(res, s) - continue - } - re := regexp.MustCompile(`^(.*)\[(\d+)\]$`) - groups := re.FindStringSubmatch(s) - if len(groups) == 0 { - // no match, add to result - res = append(res, s) - continue - } - if groups[1] != "" { - res = append(res, groups[1]) - } - res = append(res, groups[2]) - } - return res -} - -type NoFieldError struct { - Field string -} - -func (e NoFieldError) Error() string { - return fmt.Sprintf("no field named '%s'", e.Field) -} diff --git a/src/vendor/sigs.k8s.io/kustomize/kyaml/yaml/schema/schema.go b/src/vendor/sigs.k8s.io/kustomize/kyaml/yaml/schema/schema.go deleted file mode 100644 index 9ee592f84..000000000 --- a/src/vendor/sigs.k8s.io/kustomize/kyaml/yaml/schema/schema.go +++ /dev/null @@ -1,44 +0,0 @@ -// Copyright 2019 The Kubernetes Authors. -// SPDX-License-Identifier: Apache-2.0 - -// Package schema contains libraries for working with the yaml and openapi packages. -package schema - -import ( - "strings" - - "sigs.k8s.io/kustomize/kyaml/openapi" - "sigs.k8s.io/kustomize/kyaml/yaml" -) - -// IsAssociative returns true if all elements in the list contain an -// AssociativeSequenceKey as a field. -func IsAssociative(schema *openapi.ResourceSchema, nodes []*yaml.RNode, infer bool) bool { - if schema != nil { - return schemaHasMergeStrategy(schema) - } - if !infer { - return false - } - for i := range nodes { - node := nodes[i] - if yaml.IsMissingOrNull(node) { - continue - } - if node.IsAssociative() { - return true - } - } - return false -} - -func schemaHasMergeStrategy(schema *openapi.ResourceSchema) bool { - tmp, _ := schema.PatchStrategyAndKey() - strategies := strings.Split(tmp, ",") - for _, s := range strategies { - if s == "merge" { - return true - } - } - return false -} diff --git a/src/vendor/sigs.k8s.io/kustomize/kyaml/yaml/types.go b/src/vendor/sigs.k8s.io/kustomize/kyaml/yaml/types.go deleted file mode 100644 index 5fbbba1b0..000000000 --- a/src/vendor/sigs.k8s.io/kustomize/kyaml/yaml/types.go +++ /dev/null @@ -1,240 +0,0 @@ -// Copyright 2019 The Kubernetes Authors. -// SPDX-License-Identifier: Apache-2.0 - -package yaml - -import ( - "bytes" - "strings" - - "sigs.k8s.io/kustomize/kyaml/errors" - "sigs.k8s.io/kustomize/kyaml/internal/forked/github.com/go-yaml/yaml" - "sigs.k8s.io/kustomize/kyaml/sets" -) - -// CopyYNode returns a distinct copy of its argument. -// Use https://github.com/jinzhu/copier instead? -func CopyYNode(n *yaml.Node) *yaml.Node { - if n == nil { - return nil - } - c := *n - if len(n.Content) > 0 { - // Using Go 'copy' here doesn't yield independent slices. - c.Content = make([]*Node, len(n.Content)) - for i, item := range n.Content { - c.Content[i] = CopyYNode(item) - } - } - return &c -} - -// IsYNodeTaggedNull returns true if the node is explicitly tagged Null. -func IsYNodeTaggedNull(n *yaml.Node) bool { - return n != nil && n.Tag == NodeTagNull -} - -// IsYNodeEmptyMap is true if the Node is a non-nil empty map. -func IsYNodeEmptyMap(n *yaml.Node) bool { - return n != nil && n.Kind == yaml.MappingNode && len(n.Content) == 0 -} - -// IsYNodeEmptyMap is true if the Node is a non-nil empty sequence. -func IsYNodeEmptySeq(n *yaml.Node) bool { - return n != nil && n.Kind == yaml.SequenceNode && len(n.Content) == 0 -} - -// IsYNodeEmptyDoc is true if the node is a Document with no content. -// E.g.: "---\n---" -func IsYNodeEmptyDoc(n *yaml.Node) bool { - return n.Kind == yaml.DocumentNode && n.Content[0].Tag == NodeTagNull -} - -func IsYNodeString(n *yaml.Node) bool { - return n.Kind == yaml.ScalarNode && - (n.Tag == NodeTagString || n.Tag == NodeTagEmpty) -} - -// IsYNodeZero is true if all the public fields in the Node are empty. -// Which means it's not initialized and should be omitted when marshal. -// The Node itself has a method IsZero but it is not released -// in yaml.v3. https://pkg.go.dev/gopkg.in/yaml.v3#Node.IsZero -func IsYNodeZero(n *yaml.Node) bool { - // TODO: Change this to use IsZero when it's avaialable. - return n != nil && n.Kind == 0 && n.Style == 0 && n.Tag == "" && n.Value == "" && - n.Anchor == "" && n.Alias == nil && n.Content == nil && - n.HeadComment == "" && n.LineComment == "" && n.FootComment == "" && - n.Line == 0 && n.Column == 0 -} - -// Parser parses values into configuration. -type Parser struct { - Kind string `yaml:"kind,omitempty"` - Value string `yaml:"value,omitempty"` -} - -func (p Parser) Filter(_ *RNode) (*RNode, error) { - d := yaml.NewDecoder(bytes.NewBuffer([]byte(p.Value))) - o := &RNode{value: &yaml.Node{}} - return o, d.Decode(o.value) -} - -// TODO(pwittrock): test this -func GetStyle(styles ...string) Style { - var style Style - for _, s := range styles { - switch s { - case "TaggedStyle": - style |= TaggedStyle - case "DoubleQuotedStyle": - style |= DoubleQuotedStyle - case "SingleQuotedStyle": - style |= SingleQuotedStyle - case "LiteralStyle": - style |= LiteralStyle - case "FoldedStyle": - style |= FoldedStyle - case "FlowStyle": - style |= FlowStyle - } - } - return style -} - -// Filter defines a function to manipulate an individual RNode such as by changing -// its values, or returning a field. -// -// When possible, Filters should be serializable to yaml so that they can be described -// declaratively as data. -// -// Analogous to http://www.linfo.org/filters.html -type Filter interface { - Filter(object *RNode) (*RNode, error) -} - -type FilterFunc func(object *RNode) (*RNode, error) - -func (f FilterFunc) Filter(object *RNode) (*RNode, error) { - return f(object) -} - -// TypeMeta partially copies apimachinery/pkg/apis/meta/v1.TypeMeta -// No need for a direct dependence; the fields are stable. -type TypeMeta struct { - // APIVersion is the apiVersion field of a Resource - APIVersion string `json:"apiVersion,omitempty" yaml:"apiVersion,omitempty"` - // Kind is the kind field of a Resource - Kind string `json:"kind,omitempty" yaml:"kind,omitempty"` -} - -// NameMeta contains name information. -type NameMeta struct { - // Name is the metadata.name field of a Resource - Name string `json:"name,omitempty" yaml:"name,omitempty"` - // Namespace is the metadata.namespace field of a Resource - Namespace string `json:"namespace,omitempty" yaml:"namespace,omitempty"` -} - -// ResourceMeta contains the metadata for a both Resource Type and Resource. -type ResourceMeta struct { - TypeMeta `json:",inline" yaml:",inline"` - // ObjectMeta is the metadata field of a Resource - ObjectMeta `json:"metadata,omitempty" yaml:"metadata,omitempty"` -} - -// ObjectMeta contains metadata about a Resource -type ObjectMeta struct { - NameMeta `json:",inline" yaml:",inline"` - // Labels is the metadata.labels field of a Resource - Labels map[string]string `json:"labels,omitempty" yaml:"labels,omitempty"` - // Annotations is the metadata.annotations field of a Resource. - Annotations map[string]string `json:"annotations,omitempty" yaml:"annotations,omitempty"` -} - -// GetIdentifier returns a ResourceIdentifier that includes -// the information needed to uniquely identify a resource in a cluster. -func (m *ResourceMeta) GetIdentifier() ResourceIdentifier { - return ResourceIdentifier{ - TypeMeta: m.TypeMeta, - NameMeta: m.NameMeta, - } -} - -// ResourceIdentifier contains the information needed to uniquely -// identify a resource in a cluster. -type ResourceIdentifier struct { - TypeMeta `json:",inline" yaml:",inline"` - NameMeta `json:",inline" yaml:",inline"` -} - -// Comments struct is comment yaml comment types -type Comments struct { - LineComment string `yaml:"lineComment,omitempty"` - HeadComment string `yaml:"headComment,omitempty"` - FootComment string `yaml:"footComment,omitempty"` -} - -func (r *ResourceIdentifier) GetName() string { - return r.Name -} - -func (r *ResourceIdentifier) GetNamespace() string { - return r.Namespace -} - -func (r *ResourceIdentifier) GetAPIVersion() string { - return r.APIVersion -} - -func (r *ResourceIdentifier) GetKind() string { - return r.Kind -} - -const ( - Trim = "Trim" - Flow = "Flow" -) - -// String returns a string value for a Node, applying the supplied formatting options -func String(node *yaml.Node, opts ...string) (string, error) { - if node == nil { - return "", nil - } - optsSet := sets.String{} - optsSet.Insert(opts...) - if optsSet.Has(Flow) { - oldStyle := node.Style - defer func() { - node.Style = oldStyle - }() - node.Style = yaml.FlowStyle - } - - b := &bytes.Buffer{} - e := NewEncoder(b) - err := e.Encode(node) - errClose := e.Close() - if err == nil { - err = errClose - } - val := b.String() - if optsSet.Has(Trim) { - val = strings.TrimSpace(val) - } - return val, errors.Wrap(err) -} - -// MergeOptionsListIncreaseDirection is the type of list growth in merge -type MergeOptionsListIncreaseDirection int - -const ( - MergeOptionsListAppend MergeOptionsListIncreaseDirection = iota - MergeOptionsListPrepend -) - -// MergeOptions is a struct which contains the options for merge -type MergeOptions struct { - // ListIncreaseDirection indicates should merge function prepend the items from - // source list to destination or append. - ListIncreaseDirection MergeOptionsListIncreaseDirection -} diff --git a/src/vendor/sigs.k8s.io/kustomize/kyaml/yaml/util.go b/src/vendor/sigs.k8s.io/kustomize/kyaml/yaml/util.go deleted file mode 100644 index 8c9439342..000000000 --- a/src/vendor/sigs.k8s.io/kustomize/kyaml/yaml/util.go +++ /dev/null @@ -1,70 +0,0 @@ -// Copyright 2021 The Kubernetes Authors. -// SPDX-License-Identifier: Apache-2.0 - -package yaml - -import ( - "strings" -) - -// DeriveSeqIndentStyle derives the sequence indentation annotation value for the resource, -// originalYAML is the input yaml string, -// the style is decided by deriving the existing sequence indentation of first sequence node -func DeriveSeqIndentStyle(originalYAML string) string { - lines := strings.Split(originalYAML, "\n") - for i, line := range lines { - elems := strings.SplitN(line, "- ", 2) - if len(elems) != 2 { - continue - } - // prefix of "- " must be sequence of spaces - if strings.Trim(elems[0], " ") != "" { - continue - } - numSpacesBeforeSeqElem := len(elems[0]) - - // keyLine is the line before the first sequence element - keyLine := keyLineBeforeSeqElem(lines, i) - if keyLine == "" { - // there is no keyLine for this sequence node - // all of those lines are comments - continue - } - numSpacesBeforeKeyElem := len(keyLine) - len(strings.TrimLeft(keyLine, " ")) - trimmedKeyLine := strings.Trim(keyLine, " ") - if strings.Count(trimmedKeyLine, ":") != 1 || !strings.HasSuffix(trimmedKeyLine, ":") { - // if the key line doesn't contain only one : that too at the end, - // this is not a sequence node, it is a wrapped sequence node string - // ignore it - continue - } - - if numSpacesBeforeSeqElem == numSpacesBeforeKeyElem { - return string(CompactSequenceStyle) - } - - if numSpacesBeforeSeqElem-numSpacesBeforeKeyElem == 2 { - return string(WideSequenceStyle) - } - } - - return string(CompactSequenceStyle) -} - -// keyLineBeforeSeqElem iterates through the lines before the first seqElement -// and tries to find the non-comment key line for the sequence node -func keyLineBeforeSeqElem(lines []string, seqElemIndex int) string { - // start with the previous line of sequence element - i := seqElemIndex - 1 - for ; i >= 0; i-- { - line := lines[i] - trimmedLine := strings.Trim(line, " ") - if strings.HasPrefix(trimmedLine, "#") { // commented line - continue - } - // we have a non-commented line which can have a trailing comment - parts := strings.SplitN(line, "#", 2) - return parts[0] // throw away the trailing comment part - } - return "" -} diff --git a/src/vendor/sigs.k8s.io/kustomize/kyaml/yaml/walk/associative_sequence.go b/src/vendor/sigs.k8s.io/kustomize/kyaml/yaml/walk/associative_sequence.go deleted file mode 100644 index 7657ebc0f..000000000 --- a/src/vendor/sigs.k8s.io/kustomize/kyaml/yaml/walk/associative_sequence.go +++ /dev/null @@ -1,385 +0,0 @@ -// Copyright 2019 The Kubernetes Authors. -// SPDX-License-Identifier: Apache-2.0 - -package walk - -import ( - "strings" - - "github.com/go-errors/errors" - "sigs.k8s.io/kustomize/kyaml/openapi" - "sigs.k8s.io/kustomize/kyaml/sets" - "sigs.k8s.io/kustomize/kyaml/yaml" -) - -// appendListNode will append the nodes from src to dst and return dst. -// src and dst should be both sequence node. key is used to call ElementSetter. -// ElementSetter will use key-value pair to find and set the element in sequence -// node. -func appendListNode(dst, src *yaml.RNode, keys []string) (*yaml.RNode, error) { - var err error - for _, elem := range src.Content() { - // If key is empty, we know this is a scalar value and we can directly set the - // node - if keys[0] == "" { - _, err = dst.Pipe(yaml.ElementSetter{ - Element: elem, - Keys: []string{""}, - Values: []string{elem.Value}, - }) - if err != nil { - return nil, err - } - continue - } - - // we need to get the value for key so that we can find the element to set - // in sequence. - v := []string{} - for _, key := range keys { - tmpNode := yaml.NewRNode(elem) - valueNode, err := tmpNode.Pipe(yaml.Get(key)) - if err != nil { - return nil, err - } - if valueNode.IsNil() { - // no key found, directly append to dst - err = dst.PipeE(yaml.Append(elem)) - if err != nil { - return nil, err - } - continue - } - v = append(v, valueNode.YNode().Value) - } - - // When there are multiple keys, ElementSetter appends the node to dst - // even if the output is already in dst. We remove the node from dst to - // prevent duplicates. - if len(keys) > 1 { - _, err = dst.Pipe(yaml.ElementSetter{ - Keys: keys, - Values: v, - }) - if err != nil { - return nil, err - } - } - - // We use the key and value from elem to find the corresponding element in dst. - // Then we will use ElementSetter to replace the element with elem. If we cannot - // find the item, the element will be appended. - _, err = dst.Pipe(yaml.ElementSetter{ - Element: elem, - Keys: keys, - Values: v, - }) - if err != nil { - return nil, err - } - } - return dst, nil -} - -// validateKeys returns a list of valid key-value pairs -// if secondary merge key values are missing, use only the available merge keys -func validateKeys(valuesList [][]string, values []string, keys []string) ([]string, []string) { - validKeys := make([]string, 0) - validValues := make([]string, 0) - validKeySet := sets.String{} - for _, values := range valuesList { - for i, v := range values { - if v != "" { - validKeySet.Insert(keys[i]) - } - } - } - if validKeySet.Len() == 0 { // if values missing, fall back to primary keys - return keys, values - } - for _, k := range keys { - if validKeySet.Has(k) { - validKeys = append(validKeys, k) - } - } - for i, v := range values { - if v != "" || validKeySet.Has(keys[i]) { - validValues = append(validValues, v) - } - } - return validKeys, validValues -} - -// mergeValues merges values together - e.g. if two containerPorts -// have the same port and targetPort but one has an empty protocol -// and the other doesn't, they are treated as the same containerPort -func mergeValues(valuesList [][]string) [][]string { - for i, values1 := range valuesList { - for j, values2 := range valuesList { - if matched, values := match(values1, values2); matched { - valuesList[i] = values - valuesList[j] = values - } - } - } - return valuesList -} - -// two values match if they have at least one common element and -// corresponding elements only differ if one is an empty string -func match(values1 []string, values2 []string) (bool, []string) { - if len(values1) != len(values2) { - return false, nil - } - var commonElement bool - var res []string - for i := range values1 { - if values1[i] == values2[i] { - commonElement = true - res = append(res, values1[i]) - continue - } - if values1[i] != "" && values2[i] != "" { - return false, nil - } - if values1[i] != "" { - res = append(res, values1[i]) - } else { - res = append(res, values2[i]) - } - } - return commonElement, res -} - -// setAssociativeSequenceElements recursively set the elements in the list -func (l *Walker) setAssociativeSequenceElements(valuesList [][]string, keys []string, dest *yaml.RNode) (*yaml.RNode, error) { - // itemsToBeAdded contains the items that will be added to dest - itemsToBeAdded := yaml.NewListRNode() - var schema *openapi.ResourceSchema - if l.Schema != nil { - schema = l.Schema.Elements() - } - if len(keys) > 1 { - valuesList = mergeValues(valuesList) - } - - // each element in valuesList is a list of values corresponding to the keys - // for example, for the following yaml: - // - containerPort: 8080 - // protocol: UDP - // - containerPort: 8080 - // protocol: TCP - // `keys` would be [containerPort, protocol] - // and `valuesList` would be [ [8080, UDP], [8080, TCP] ] - var validKeys []string - var validValues []string - for _, values := range valuesList { - if len(values) == 0 { - continue - } - - validKeys, validValues = validateKeys(valuesList, values, keys) - val, err := Walker{ - VisitKeysAsScalars: l.VisitKeysAsScalars, - InferAssociativeLists: l.InferAssociativeLists, - Visitor: l, - Schema: schema, - Sources: l.elementValueList(validKeys, validValues), - MergeOptions: l.MergeOptions, - }.Walk() - if err != nil { - return nil, err - } - - exit := false - for i, key := range validKeys { - // delete the node from **dest** if it's null or empty - if yaml.IsMissingOrNull(val) || yaml.IsEmptyMap(val) { - _, err = dest.Pipe(yaml.ElementSetter{ - Keys: validKeys, - Values: validValues, - }) - if err != nil { - return nil, err - } - exit = true - } else if val.Field(key) == nil && validValues[i] != "" { - // make sure the key is set on the field - _, err = val.Pipe(yaml.SetField(key, yaml.NewScalarRNode(validValues[i]))) - if err != nil { - return nil, err - } - } - } - if exit { - continue - } - - // Add the val to the sequence. val will replace the item in the sequence if - // there is an item that matches all key-value pairs. Otherwise val will be appended - // the the sequence. - _, err = itemsToBeAdded.Pipe(yaml.ElementSetter{ - Element: val.YNode(), - Keys: validKeys, - Values: validValues, - }) - if err != nil { - return nil, err - } - } - - var err error - if len(valuesList) > 0 { - if l.MergeOptions.ListIncreaseDirection == yaml.MergeOptionsListPrepend { - // items from patches are needed to be prepended. so we append the - // dest to itemsToBeAdded - dest, err = appendListNode(itemsToBeAdded, dest, validKeys) - } else { - // append the items - dest, err = appendListNode(dest, itemsToBeAdded, validKeys) - } - } - - if err != nil { - return nil, err - } - // sequence is empty - if yaml.IsMissingOrNull(dest) { - return nil, nil - } - return dest, nil -} - -func (l *Walker) walkAssociativeSequence() (*yaml.RNode, error) { - // may require initializing the dest node - dest, err := l.Sources.setDestNode(l.VisitList(l.Sources, l.Schema, AssociativeList)) - if dest == nil || err != nil { - return nil, err - } - - // get the merge key(s) from schema - var strategy string - var keys []string - if l.Schema != nil { - strategy, keys = l.Schema.PatchStrategyAndKeyList() - } - if strategy == "" && len(keys) == 0 { // neither strategy nor keys present in the schema -- infer the key - // find the list of elements we need to recursively walk - key, err := l.elementKey() - if err != nil { - return nil, err - } - if key != "" { - keys = append(keys, key) - } - } - - // non-primitive associative list -- merge the elements - values := l.elementValues(keys) - if len(values) != 0 || len(keys) > 0 { - return l.setAssociativeSequenceElements(values, keys, dest) - } - - // primitive associative list -- merge the values - return l.setAssociativeSequenceElements(l.elementPrimitiveValues(), []string{""}, dest) -} - -// elementKey returns the merge key to use for the associative list -func (l Walker) elementKey() (string, error) { - var key string - for i := range l.Sources { - if l.Sources[i] != nil && len(l.Sources[i].Content()) > 0 { - newKey := l.Sources[i].GetAssociativeKey() - if key != "" && key != newKey { - return "", errors.Errorf( - "conflicting merge keys [%s,%s] for field %s", - key, newKey, strings.Join(l.Path, ".")) - } - key = newKey - } - } - if key == "" { - return "", errors.Errorf("no merge key found for field %s", - strings.Join(l.Path, ".")) - } - return key, nil -} - -// elementValues returns a slice containing all values for the field across all elements -// from all sources. -// Return value slice is ordered using the original ordering from the elements, where -// elements missing from earlier sources appear later. -func (l Walker) elementValues(keys []string) [][]string { - // use slice to to keep elements in the original order - var returnValues [][]string - var seen sets.StringList - - // if we are doing append, dest node should be the first. - // otherwise dest node should be the last. - beginIdx := 0 - if l.MergeOptions.ListIncreaseDirection == yaml.MergeOptionsListPrepend { - beginIdx = 1 - } - for i := range l.Sources { - src := l.Sources[(i+beginIdx)%len(l.Sources)] - if src == nil { - continue - } - - // add the value of the field for each element - // don't check error, we know this is a list node - values, _ := src.ElementValuesList(keys) - for _, s := range values { - if len(s) == 0 || seen.Has(s) { - continue - } - returnValues = append(returnValues, s) - seen = seen.Insert(s) - } - } - return returnValues -} - -// elementPrimitiveValues returns the primitive values in an associative list -- eg. finalizers -func (l Walker) elementPrimitiveValues() [][]string { - // use slice to to keep elements in the original order - var returnValues [][]string - seen := sets.String{} - // if we are doing append, dest node should be the first. - // otherwise dest node should be the last. - beginIdx := 0 - if l.MergeOptions.ListIncreaseDirection == yaml.MergeOptionsListPrepend { - beginIdx = 1 - } - for i := range l.Sources { - src := l.Sources[(i+beginIdx)%len(l.Sources)] - if src == nil { - continue - } - - // add the value of the field for each element - // don't check error, we know this is a list node - for _, item := range src.YNode().Content { - if seen.Has(item.Value) { - continue - } - returnValues = append(returnValues, []string{item.Value}) - seen.Insert(item.Value) - } - } - return returnValues -} - -// fieldValue returns a slice containing each source's value for fieldName -func (l Walker) elementValueList(keys []string, values []string) []*yaml.RNode { - keys, values = validateKeys([][]string{values}, values, keys) - var fields []*yaml.RNode - for i := range l.Sources { - if l.Sources[i] == nil { - fields = append(fields, nil) - continue - } - fields = append(fields, l.Sources[i].ElementList(keys, values)) - } - return fields -} diff --git a/src/vendor/sigs.k8s.io/kustomize/kyaml/yaml/walk/map.go b/src/vendor/sigs.k8s.io/kustomize/kyaml/yaml/walk/map.go deleted file mode 100644 index 998af6d32..000000000 --- a/src/vendor/sigs.k8s.io/kustomize/kyaml/yaml/walk/map.go +++ /dev/null @@ -1,173 +0,0 @@ -// Copyright 2019 The Kubernetes Authors. -// SPDX-License-Identifier: Apache-2.0 - -package walk - -import ( - "sort" - - "sigs.k8s.io/kustomize/kyaml/fieldmeta" - "sigs.k8s.io/kustomize/kyaml/openapi" - "sigs.k8s.io/kustomize/kyaml/sets" - "sigs.k8s.io/kustomize/kyaml/yaml" -) - -// walkMap returns the value of VisitMap -// -// - call VisitMap -// - set the return value on l.Dest -// - walk each source field -// - set each source field value on l.Dest -func (l Walker) walkMap() (*yaml.RNode, error) { - // get the new map value - dest, err := l.Sources.setDestNode(l.VisitMap(l.Sources, l.Schema)) - if dest == nil || err != nil { - return nil, err - } - - // recursively set the field values on the map - for _, key := range l.fieldNames() { - var res *yaml.RNode - var keys []*yaml.RNode - if l.VisitKeysAsScalars { - // visit the map keys as if they were scalars, - // this is necessary if doing things such as copying - // comments - for i := range l.Sources { - // construct the sources from the keys - if l.Sources[i] == nil { - keys = append(keys, nil) - continue - } - field := l.Sources[i].Field(key) - if field == nil || yaml.IsMissingOrNull(field.Key) { - keys = append(keys, nil) - continue - } - keys = append(keys, field.Key) - } - // visit the sources as a scalar - // keys don't have any schema --pass in nil - res, err = l.Visitor.VisitScalar(keys, nil) - if err != nil { - return nil, err - } - } - - var s *openapi.ResourceSchema - if l.Schema != nil { - s = l.Schema.Field(key) - } - fv, commentSch := l.fieldValue(key) - if commentSch != nil { - s = commentSch - } - val, err := Walker{ - VisitKeysAsScalars: l.VisitKeysAsScalars, - InferAssociativeLists: l.InferAssociativeLists, - Visitor: l, - Schema: s, - Sources: fv, - MergeOptions: l.MergeOptions, - Path: append(l.Path, key)}.Walk() - if err != nil { - return nil, err - } - - // transfer the comments of res to dest node - var comments yaml.Comments - if !yaml.IsMissingOrNull(res) { - comments = yaml.Comments{ - LineComment: res.YNode().LineComment, - HeadComment: res.YNode().HeadComment, - FootComment: res.YNode().FootComment, - } - if len(keys) > 0 && !yaml.IsMissingOrNull(keys[DestIndex]) { - keys[DestIndex].YNode().HeadComment = res.YNode().HeadComment - keys[DestIndex].YNode().LineComment = res.YNode().LineComment - keys[DestIndex].YNode().FootComment = res.YNode().FootComment - } - } - - // this handles empty and non-empty values - _, err = dest.Pipe(yaml.FieldSetter{Name: key, Comments: comments, Value: val}) - if err != nil { - return nil, err - } - } - - return dest, nil -} - -// valueIfPresent returns node.Value if node is non-nil, otherwise returns nil -func (l Walker) valueIfPresent(node *yaml.MapNode) (*yaml.RNode, *openapi.ResourceSchema) { - if node == nil { - return nil, nil - } - - // parse the schema for the field if present - var s *openapi.ResourceSchema - fm := fieldmeta.FieldMeta{} - var err error - // check the value for a schema - if err = fm.Read(node.Value); err == nil { - s = &openapi.ResourceSchema{Schema: &fm.Schema} - if fm.Schema.Ref.String() != "" { - r, err := openapi.Resolve(&fm.Schema.Ref, openapi.Schema()) - if err == nil && r != nil { - s.Schema = r - } - } - } - // check the key for a schema -- this will be used - // when the value is a Sequence (comments are attached) - // to the key - if fm.IsEmpty() { - if err = fm.Read(node.Key); err == nil { - s = &openapi.ResourceSchema{Schema: &fm.Schema} - } - if fm.Schema.Ref.String() != "" { - r, err := openapi.Resolve(&fm.Schema.Ref, openapi.Schema()) - if err == nil && r != nil { - s.Schema = r - } - } - } - return node.Value, s -} - -// fieldNames returns a sorted slice containing the names of all fields that appear in any of -// the sources -func (l Walker) fieldNames() []string { - fields := sets.String{} - for _, s := range l.Sources { - if s == nil { - continue - } - // don't check error, we know this is a mapping node - sFields, _ := s.Fields() - fields.Insert(sFields...) - } - result := fields.List() - sort.Strings(result) - return result -} - -// fieldValue returns a slice containing each source's value for fieldName -func (l Walker) fieldValue(fieldName string) ([]*yaml.RNode, *openapi.ResourceSchema) { - var fields []*yaml.RNode - var sch *openapi.ResourceSchema - for i := range l.Sources { - if l.Sources[i] == nil { - fields = append(fields, nil) - continue - } - field := l.Sources[i].Field(fieldName) - f, s := l.valueIfPresent(field) - fields = append(fields, f) - if sch == nil && !s.IsMissingOrNull() { - sch = s - } - } - return fields, sch -} diff --git a/src/vendor/sigs.k8s.io/kustomize/kyaml/yaml/walk/nonassociative_sequence.go b/src/vendor/sigs.k8s.io/kustomize/kyaml/yaml/walk/nonassociative_sequence.go deleted file mode 100644 index 91b187e5b..000000000 --- a/src/vendor/sigs.k8s.io/kustomize/kyaml/yaml/walk/nonassociative_sequence.go +++ /dev/null @@ -1,13 +0,0 @@ -// Copyright 2019 The Kubernetes Authors. -// SPDX-License-Identifier: Apache-2.0 - -package walk - -import ( - "sigs.k8s.io/kustomize/kyaml/yaml" -) - -// walkNonAssociativeSequence returns the value of VisitList -func (l Walker) walkNonAssociativeSequence() (*yaml.RNode, error) { - return l.VisitList(l.Sources, l.Schema, NonAssociateList) -} diff --git a/src/vendor/sigs.k8s.io/kustomize/kyaml/yaml/walk/scalar.go b/src/vendor/sigs.k8s.io/kustomize/kyaml/yaml/walk/scalar.go deleted file mode 100644 index 1a26f6dff..000000000 --- a/src/vendor/sigs.k8s.io/kustomize/kyaml/yaml/walk/scalar.go +++ /dev/null @@ -1,11 +0,0 @@ -// Copyright 2019 The Kubernetes Authors. -// SPDX-License-Identifier: Apache-2.0 - -package walk - -import "sigs.k8s.io/kustomize/kyaml/yaml" - -// walkScalar returns the value of VisitScalar -func (l Walker) walkScalar() (*yaml.RNode, error) { - return l.VisitScalar(l.Sources, l.Schema) -} diff --git a/src/vendor/sigs.k8s.io/kustomize/kyaml/yaml/walk/visitor.go b/src/vendor/sigs.k8s.io/kustomize/kyaml/yaml/walk/visitor.go deleted file mode 100644 index 153ac2945..000000000 --- a/src/vendor/sigs.k8s.io/kustomize/kyaml/yaml/walk/visitor.go +++ /dev/null @@ -1,28 +0,0 @@ -// Copyright 2019 The Kubernetes Authors. -// SPDX-License-Identifier: Apache-2.0 - -package walk - -import ( - "sigs.k8s.io/kustomize/kyaml/openapi" - "sigs.k8s.io/kustomize/kyaml/yaml" -) - -type ListKind int32 - -const ( - AssociativeList ListKind = 1 + iota - NonAssociateList -) - -// Visitor is invoked by walk with source and destination node pairs -type Visitor interface { - VisitMap(Sources, *openapi.ResourceSchema) (*yaml.RNode, error) - - VisitScalar(Sources, *openapi.ResourceSchema) (*yaml.RNode, error) - - VisitList(Sources, *openapi.ResourceSchema, ListKind) (*yaml.RNode, error) -} - -// ClearNode is returned if GrepFilter should do nothing after calling Set -var ClearNode *yaml.RNode diff --git a/src/vendor/sigs.k8s.io/kustomize/kyaml/yaml/walk/walk.go b/src/vendor/sigs.k8s.io/kustomize/kyaml/yaml/walk/walk.go deleted file mode 100644 index 68de1324e..000000000 --- a/src/vendor/sigs.k8s.io/kustomize/kyaml/yaml/walk/walk.go +++ /dev/null @@ -1,186 +0,0 @@ -// Copyright 2019 The Kubernetes Authors. -// SPDX-License-Identifier: Apache-2.0 - -package walk - -import ( - "fmt" - "os" - "strings" - - "sigs.k8s.io/kustomize/kyaml/fieldmeta" - "sigs.k8s.io/kustomize/kyaml/openapi" - "sigs.k8s.io/kustomize/kyaml/yaml" - "sigs.k8s.io/kustomize/kyaml/yaml/schema" -) - -// Walker walks the Source RNode and modifies the RNode provided to GrepFilter. -type Walker struct { - // Visitor is invoked by GrepFilter - Visitor - - Schema *openapi.ResourceSchema - - // Source is the RNode to walk. All Source fields and associative list elements - // will be visited. - Sources Sources - - // Path is the field path to the current Source Node. - Path []string - - // InferAssociativeLists if set to true will infer merge strategies for - // fields which it doesn't have the schema based on the fields in the - // list elements. - InferAssociativeLists bool - - // VisitKeysAsScalars if true will call VisitScalar on map entry keys, - // providing nil as the OpenAPI schema. - VisitKeysAsScalars bool - - // MergeOptions is a struct to store options for merge - MergeOptions yaml.MergeOptions -} - -// Kind returns the kind of the first non-null node in Sources. -func (l Walker) Kind() yaml.Kind { - for _, s := range l.Sources { - if !yaml.IsMissingOrNull(s) { - return s.YNode().Kind - } - } - return 0 -} - -// Walk will recursively traverse every item in the Sources and perform corresponding -// actions on them -func (l Walker) Walk() (*yaml.RNode, error) { - l.Schema = l.GetSchema() - - // invoke the handler for the corresponding node type - switch l.Kind() { - case yaml.MappingNode: - if err := yaml.ErrorIfAnyInvalidAndNonNull(yaml.MappingNode, l.Sources...); err != nil { - return nil, err - } - return l.walkMap() - case yaml.SequenceNode: - if err := yaml.ErrorIfAnyInvalidAndNonNull(yaml.SequenceNode, l.Sources...); err != nil { - return nil, err - } - // AssociativeSequence means the items in the sequence are associative. They can be merged - // according to merge key. - if schema.IsAssociative(l.Schema, l.Sources, l.InferAssociativeLists) { - return l.walkAssociativeSequence() - } - return l.walkNonAssociativeSequence() - - case yaml.ScalarNode: - if err := yaml.ErrorIfAnyInvalidAndNonNull(yaml.ScalarNode, l.Sources...); err != nil { - return nil, err - } - return l.walkScalar() - case 0: - // walk empty nodes as maps - return l.walkMap() - default: - return nil, nil - } -} - -func (l Walker) GetSchema() *openapi.ResourceSchema { - for i := range l.Sources { - r := l.Sources[i] - if yaml.IsMissingOrNull(r) { - continue - } - - fm := fieldmeta.FieldMeta{} - if err := fm.Read(r); err == nil && !fm.IsEmpty() { - // per-field schema, this is fine - if fm.Schema.Ref.String() != "" { - // resolve the reference - s, err := openapi.Resolve(&fm.Schema.Ref, openapi.Schema()) - if err == nil && s != nil { - fm.Schema = *s - } - } - return &openapi.ResourceSchema{Schema: &fm.Schema} - } - } - - if l.Schema != nil { - return l.Schema - } - for i := range l.Sources { - r := l.Sources[i] - if yaml.IsMissingOrNull(r) { - continue - } - - m, _ := r.GetMeta() - if m.Kind == "" || m.APIVersion == "" { - continue - } - - s := openapi.SchemaForResourceType(yaml.TypeMeta{Kind: m.Kind, APIVersion: m.APIVersion}) - if s != nil { - return s - } - } - return nil -} - -const ( - DestIndex = iota - OriginIndex - UpdatedIndex -) - -// Sources are a list of RNodes. First item is the dest node, followed by -// multiple source nodes. -type Sources []*yaml.RNode - -// Dest returns the destination node -func (s Sources) Dest() *yaml.RNode { - if len(s) <= DestIndex { - return nil - } - return s[DestIndex] -} - -// Origin returns the origin node -func (s Sources) Origin() *yaml.RNode { - if len(s) <= OriginIndex { - return nil - } - return s[OriginIndex] -} - -// Updated returns the updated node -func (s Sources) Updated() *yaml.RNode { - if len(s) <= UpdatedIndex { - return nil - } - return s[UpdatedIndex] -} - -func (s Sources) String() string { - var values []string - for i := range s { - str, err := s[i].String() - if err != nil { - fmt.Fprintf(os.Stderr, "%v\n", err) - } - values = append(values, str) - } - return strings.Join(values, "\n") -} - -// setDestNode sets the destination source node -func (s Sources) setDestNode(node *yaml.RNode, err error) (*yaml.RNode, error) { - if err != nil { - return nil, err - } - s[0] = node - return node, nil -}